diff --git a/0001-LoongArch-Reimplement-multilib-build-option-handling.patch b/0001-LoongArch-Reimplement-multilib-build-option-handling.patch new file mode 100644 index 0000000000000000000000000000000000000000..c3d8f9fd307e3d3b464aba5c6cc9d305759a4d28 --- /dev/null +++ b/0001-LoongArch-Reimplement-multilib-build-option-handling.patch @@ -0,0 +1,464 @@ +From d394a9ac68674b40e0d2b436c09e23dd29d8b5d0 Mon Sep 17 00:00:00 2001 +From: Yang Yujie +Date: Wed, 13 Sep 2023 17:52:14 +0800 +Subject: [PATCH 001/188] LoongArch: Reimplement multilib build option + handling. + +Library build options from --with-multilib-list used to be processed with +*self_spec, which missed the driver's initial canonicalization. This +caused limitations on CFLAGS override and the use of driver-only options +like -m[no]-lsx. + +The problem is solved by promoting the injection rules of --with-multilib-list +options to the first element of DRIVER_SELF_SPECS, to make them execute before +the canonialization. The library-build options are also hard-coded in +the driver and can be used conveniently by the builders of other non-gcc +libraries via the use of -fmultiflags. + +Bootstrapped and tested on loongarch64-linux-gnu. + +ChangeLog: + + * config-ml.in: Remove unneeded loongarch clause. + * configure.ac: Register custom makefile fragments mt-loongarch-* + for loongarch targets. + * configure: Regenerate. + +config/ChangeLog: + + * mt-loongarch-mlib: New file. Pass -fmultiflags when building + target libraries (FLAGS_FOR_TARGET). + * mt-loongarch-elf: New file. + * mt-loongarch-gnu: New file. + +gcc/ChangeLog: + + * config.gcc: Pass the default ABI via TM_MULTILIB_CONFIG. + * config/loongarch/loongarch-driver.h: Invoke MLIB_SELF_SPECS + before the driver canonicalization routines. + * config/loongarch/loongarch.h: Move definitions of CC1_SPEC etc. + to loongarch-driver.h + * config/loongarch/t-linux: Move multilib-related definitions to + t-multilib. + * config/loongarch/t-multilib: New file. Inject library build + options obtained from --with-multilib-list. + * config/loongarch/t-loongarch: Same. +--- + config-ml.in | 10 ---- + config/mt-loongarch-elf | 1 + + config/mt-loongarch-gnu | 2 + + config/mt-loongarch-mlib | 1 + + configure | 6 +++ + configure.ac | 6 +++ + gcc/config.gcc | 6 +-- + gcc/config/loongarch/loongarch-driver.h | 42 +++++++++++++++ + gcc/config/loongarch/loongarch.h | 50 ------------------ + gcc/config/loongarch/t-linux | 66 +++--------------------- + gcc/config/loongarch/t-loongarch | 2 +- + gcc/config/loongarch/t-multilib | 68 +++++++++++++++++++++++++ + 12 files changed, 137 insertions(+), 123 deletions(-) + create mode 100644 config/mt-loongarch-elf + create mode 100644 config/mt-loongarch-gnu + create mode 100644 config/mt-loongarch-mlib + create mode 100644 gcc/config/loongarch/t-multilib + +diff --git a/config-ml.in b/config-ml.in +index ad0db0781..68854a4f1 100644 +--- a/config-ml.in ++++ b/config-ml.in +@@ -301,16 +301,6 @@ arm-*-*) + done + fi + ;; +-loongarch*-*) +- old_multidirs="${multidirs}" +- multidirs="" +- for x in ${old_multidirs}; do +- case "$x" in +- `${CC-gcc} --print-multi-directory`) : ;; +- *) multidirs="${multidirs} ${x}" ;; +- esac +- done +- ;; + m68*-*-*) + if [ x$enable_softfloat = xno ] + then +diff --git a/config/mt-loongarch-elf b/config/mt-loongarch-elf +new file mode 100644 +index 000000000..bbf29bb57 +--- /dev/null ++++ b/config/mt-loongarch-elf +@@ -0,0 +1 @@ ++include $(srcdir)/config/mt-loongarch-mlib +diff --git a/config/mt-loongarch-gnu b/config/mt-loongarch-gnu +new file mode 100644 +index 000000000..dfefb44ed +--- /dev/null ++++ b/config/mt-loongarch-gnu +@@ -0,0 +1,2 @@ ++include $(srcdir)/config/mt-gnu ++include $(srcdir)/config/mt-loongarch-mlib +diff --git a/config/mt-loongarch-mlib b/config/mt-loongarch-mlib +new file mode 100644 +index 000000000..4cfe568f1 +--- /dev/null ++++ b/config/mt-loongarch-mlib +@@ -0,0 +1 @@ ++FLAGS_FOR_TARGET += -fmultiflags +diff --git a/configure b/configure +index aff62c464..81b4a3cec 100755 +--- a/configure ++++ b/configure +@@ -9548,6 +9548,12 @@ case "${target}" in + spu-*-*) + target_makefile_frag="config/mt-spu" + ;; ++ loongarch*-*linux* | loongarch*-*gnu*) ++ target_makefile_frag="config/mt-loongarch-gnu" ++ ;; ++ loongarch*-*elf*) ++ target_makefile_frag="config/mt-loongarch-elf" ++ ;; + mips*-sde-elf* | mips*-mti-elf* | mips*-img-elf*) + target_makefile_frag="config/mt-sde" + ;; +diff --git a/configure.ac b/configure.ac +index f310d75ca..9f8dbd319 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -2729,6 +2729,12 @@ case "${target}" in + spu-*-*) + target_makefile_frag="config/mt-spu" + ;; ++ loongarch*-*linux* | loongarch*-*gnu*) ++ target_makefile_frag="config/mt-loongarch-gnu" ++ ;; ++ loongarch*-*elf*) ++ target_makefile_frag="config/mt-loongarch-elf" ++ ;; + mips*-sde-elf* | mips*-mti-elf* | mips*-img-elf*) + target_makefile_frag="config/mt-sde" + ;; +diff --git a/gcc/config.gcc b/gcc/config.gcc +index 3f870e966..e34a5fbb9 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -2510,7 +2510,7 @@ loongarch*-*-linux*) + tm_file="elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file}" + tm_file="${tm_file} loongarch/gnu-user.h loongarch/linux.h" + extra_options="${extra_options} linux-android.opt" +- tmake_file="${tmake_file} loongarch/t-linux" ++ tmake_file="${tmake_file} loongarch/t-multilib loongarch/t-linux" + gnu_ld=yes + gas=yes + +@@ -2522,7 +2522,7 @@ loongarch*-*-linux*) + loongarch*-*-elf*) + tm_file="elfos.h newlib-stdint.h ${tm_file}" + tm_file="${tm_file} loongarch/elf.h loongarch/linux.h" +- tmake_file="${tmake_file} loongarch/t-linux" ++ tmake_file="${tmake_file} loongarch/t-multilib loongarch/t-linux" + gnu_ld=yes + gas=yes + +@@ -5241,7 +5241,7 @@ case "${target}" in + loongarch_multilib_list_sane=no + + # This one goes to TM_MULTILIB_CONFIG, for use in t-linux. +- loongarch_multilib_list_make="" ++ loongarch_multilib_list_make="${abi_base}," + + # This one goes to tm_defines, for use in loongarch-driver.c. + loongarch_multilib_list_c="" +diff --git a/gcc/config/loongarch/loongarch-driver.h b/gcc/config/loongarch/loongarch-driver.h +index 6cfe0efb5..e7d083677 100644 +--- a/gcc/config/loongarch/loongarch-driver.h ++++ b/gcc/config/loongarch/loongarch-driver.h +@@ -23,6 +23,39 @@ along with GCC; see the file COPYING3. If not see + + #include "loongarch-str.h" + ++#ifndef SUBTARGET_CPP_SPEC ++#define SUBTARGET_CPP_SPEC "" ++#endif ++ ++#ifndef SUBTARGET_CC1_SPEC ++#define SUBTARGET_CC1_SPEC "" ++#endif ++ ++#ifndef SUBTARGET_ASM_SPEC ++#define SUBTARGET_ASM_SPEC "" ++#endif ++ ++#define EXTRA_SPECS \ ++ {"early_self_spec", ""}, \ ++ {"subtarget_cc1_spec", SUBTARGET_CC1_SPEC}, \ ++ {"subtarget_cpp_spec", SUBTARGET_CPP_SPEC}, \ ++ {"subtarget_asm_spec", SUBTARGET_ASM_SPEC}, ++ ++ ++#undef CPP_SPEC ++#define CPP_SPEC \ ++ "%(subtarget_cpp_spec)" ++ ++#undef CC1_SPEC ++#define CC1_SPEC \ ++ "%{G*} %{,ada:-gnatea %{mabi=*} -gnatez} " \ ++ "%(subtarget_cc1_spec)" ++ ++#undef ASM_SPEC ++#define ASM_SPEC \ ++ "%{mabi=*} %(subtarget_asm_spec)" ++ ++ + extern const char* + la_driver_init (int argc, const char **argv); + +@@ -45,7 +78,16 @@ driver_get_normalized_m_opts (int argc, const char **argv); + #define LA_SET_PARM_SPEC(NAME) \ + " %{m" OPTSTR_##NAME "=*: %:set_m_parm(" OPTSTR_##NAME " %*)}" \ + ++/* For MLIB_SELF_SPECS. */ ++#include "loongarch-multilib.h" ++ ++#ifndef MLIB_SELF_SPECS ++#define MLIB_SELF_SPECS "" ++#endif ++ + #define DRIVER_HANDLE_MACHINE_OPTIONS \ ++ " %(early_self_spec)", \ ++ MLIB_SELF_SPECS \ + " %:driver_init()" \ + " %{c|S|E|nostdlib: %:set_no_link()}" \ + " %{nostartfiles: %{nodefaultlibs: %:set_no_link()}}" \ +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index c7e91a06d..a443a6427 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -64,56 +64,6 @@ along with GCC; see the file COPYING3. If not see + #define NM_FLAGS "-Bn" + #endif + +-/* SUBTARGET_ASM_SPEC is always passed to the assembler. It may be +- overridden by subtargets. */ +- +-#ifndef SUBTARGET_ASM_SPEC +-#define SUBTARGET_ASM_SPEC "" +-#endif +- +-#undef ASM_SPEC +-#define ASM_SPEC "%{mabi=*} %{subtarget_asm_spec}" +- +-/* Extra switches sometimes passed to the linker. */ +- +-#ifndef LINK_SPEC +-#define LINK_SPEC "" +-#endif /* LINK_SPEC defined */ +- +-/* Specs for the compiler proper. */ +- +-/* CC1_SPEC is the set of arguments to pass to the compiler proper. */ +- +-#undef CC1_SPEC +-#define CC1_SPEC "%{,ada:-gnatea} %{m*} \ +-%{G*} \ +-%(subtarget_cc1_spec) %{,ada:-gnatez}" +- +-/* Preprocessor specs. */ +- +-/* SUBTARGET_CPP_SPEC is passed to the preprocessor. It may be +- overridden by subtargets. */ +-#ifndef SUBTARGET_CPP_SPEC +-#define SUBTARGET_CPP_SPEC "" +-#endif +- +-#define CPP_SPEC "%(subtarget_cpp_spec)" +- +-/* This macro defines names of additional specifications to put in the specs +- that can be used in various specifications like CC1_SPEC. Its definition +- is an initializer with a subgrouping for each command option. +- +- Each subgrouping contains a string constant, that defines the +- specification name, and a string constant that used by the GCC driver +- program. +- +- Do not define this macro if it does not need to do anything. */ +- +-#define EXTRA_SPECS \ +- {"subtarget_cc1_spec", SUBTARGET_CC1_SPEC}, \ +- {"subtarget_cpp_spec", SUBTARGET_CPP_SPEC}, \ +- {"subtarget_asm_spec", SUBTARGET_ASM_SPEC}, +- + /* Registers may have a prefix which can be ignored when matching + user asm and register definitions. */ + #ifndef REGISTER_PREFIX +diff --git a/gcc/config/loongarch/t-linux b/gcc/config/loongarch/t-linux +index 62a870b66..7cd7cde25 100644 +--- a/gcc/config/loongarch/t-linux ++++ b/gcc/config/loongarch/t-linux +@@ -16,68 +16,16 @@ + # along with GCC; see the file COPYING3. If not see + # . + +-# Multilib +-MULTILIB_OPTIONS = mabi=lp64d/mabi=lp64f/mabi=lp64s +-MULTILIB_DIRNAMES = base/lp64d base/lp64f base/lp64s +- +-# The GCC driver always gets all abi-related options on the command line. +-# (see loongarch-driver.c:driver_get_normalized_m_opts) +-comma=, +-MULTILIB_REQUIRED = $(foreach mlib,$(subst $(comma), ,$(TM_MULTILIB_CONFIG)),\ +- $(firstword $(subst /, ,$(mlib)))) +- +-SPECS = specs.install +- +-# temporary self_spec when building libraries (e.g. libgcc) +-gen_mlib_spec = $(if $(word 2,$1),\ +- %{$(firstword $1):$(patsubst %,-%,$(wordlist 2,$(words $1),$1))}) +- +-# clean up the result of DRIVER_SELF_SPEC to avoid conflict +-lib_build_self_spec = % $@ +- +-# Do some preparation before regression tests: +-# remove lib-build-specs / make symlinks for the toplevel multilib variant +- +-LA_DEFAULT_MULTISUBDIR = $(shell $(GCC_FOR_TARGET) --print-multi-dir) +-.PHONY: remove-lib-specs +-check check-host check-target $(CHECK_TARGETS) $(lang_checks): remove-lib-specs +-remove-lib-specs: +- -mv -f specs.install specs 2>/dev/null +- -mv $(LA_DEFAULT_MULTISUBDIR)/* ./ +- -mkdir -p ../$(target_noncanonical)/`dirname $(LA_DEFAULT_MULTISUBDIR)` +- -$(LN_S) .. ../$(target_noncanonical)/$(LA_DEFAULT_MULTISUBDIR) +- +-# Multiarch +-ifneq ($(call if_multiarch,yes),yes) +- # Define LA_DISABLE_MULTIARCH if multiarch is disabled. +- tm_defines += LA_DISABLE_MULTIARCH +-else +- # Only define MULTIARCH_DIRNAME when multiarch is enabled, +- # or it would always introduce ${target} into the search path. +- MULTIARCH_DIRNAME = $(LA_MULTIARCH_TRIPLET) +-endif ++MULTIOSDIR_lp64d := ../lib64$(call if_multiarch,:loongarch64-linux-gnu) ++MULTIOSDIR_lp64f := ../lib64/f32$(call if_multiarch,:loongarch64-linux-gnuf32) ++MULTIOSDIR_lp64s := ../lib64/sf$(call if_multiarch,:loongarch64-linux-gnusf) + + # Don't define MULTILIB_OSDIRNAMES if multilib is disabled. + ifeq ($(filter LA_DISABLE_MULTILIB,$(tm_defines)),) + +- MULTILIB_OSDIRNAMES = \ +- mabi.lp64d=../lib64$\ +- $(call if_multiarch,:loongarch64-linux-gnu) +- +- MULTILIB_OSDIRNAMES += \ +- mabi.lp64f=../lib64/f32$\ +- $(call if_multiarch,:loongarch64-linux-gnuf32) +- +- MULTILIB_OSDIRNAMES += \ +- mabi.lp64s=../lib64/sf$\ +- $(call if_multiarch,:loongarch64-linux-gnusf) ++ MULTILIB_OSDIRNAMES = .=$(MULTIOSDIR_$(mlib_default)) ++ MULTILIB_OSDIRNAMES += mabi.lp64d=$(MULTIOSDIR_lp64d) ++ MULTILIB_OSDIRNAMES += mabi.lp64f=$(MULTIOSDIR_lp64f) ++ MULTILIB_OSDIRNAMES += mabi.lp64s=$(MULTIOSDIR_lp64s) + + endif +diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch +index e73f4f437..28cfb49df 100644 +--- a/gcc/config/loongarch/t-loongarch ++++ b/gcc/config/loongarch/t-loongarch +@@ -16,7 +16,7 @@ + # along with GCC; see the file COPYING3. If not see + # . + +-TM_H += $(srcdir)/config/loongarch/loongarch-driver.h ++TM_H += loongarch-multilib.h $(srcdir)/config/loongarch/loongarch-driver.h + OPTIONS_H_EXTRA += $(srcdir)/config/loongarch/loongarch-def.h \ + $(srcdir)/config/loongarch/loongarch-tune.h + +diff --git a/gcc/config/loongarch/t-multilib b/gcc/config/loongarch/t-multilib +new file mode 100644 +index 000000000..bf6c18298 +--- /dev/null ++++ b/gcc/config/loongarch/t-multilib +@@ -0,0 +1,68 @@ ++# Copyright (C) 2023 Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++# Helper definitions ++comma=, ++null := ++space := $(null) # ++exclude_1st = $(wordlist 2,$(words $1),$1) ++ ++# Common definitions ++mlib_all := lp64d lp64f lp64s ++$(foreach i,$(mlib_all),$(eval MULTISUBDIR_$i := base/$i)) ++ ++mlib_default := $(firstword $(subst $(comma), ,$(TM_MULTILIB_CONFIG))) ++mlib_all := $(filter-out $(mlib_default),$(mlib_all)) ++ ++MULTILIB_OPTIONS := $(subst $(space),/,$(foreach i,$(mlib_all),mabi=$(i))) ++MULTILIB_DIRNAMES := $(foreach i,$(mlib_all),$(MULTISUBDIR_$(i))) ++ ++# Customize builds with --with-multilib-list ++MULTILIB_REQUIRED := $(foreach i,$(call exclude_1st,\ ++ $(subst $(comma), ,$(TM_MULTILIB_CONFIG))),\ ++ $(firstword $(subst /, ,$(i)))) ++ ++## spec rules for building libraries, triggered by -fmultiflags ++gen_mlib_spec = $(if $(word 2,$1),\ ++ %{$(firstword $1):$(patsubst %,-%,$(call exclude_1st,$1)})) ++ ++lib_build_spec = $(foreach mlib,\ ++ $(call exclude_1st,$(subst $(comma), ,$(TM_MULTILIB_CONFIG))),\ ++ $(call gen_mlib_spec,$(subst /, ,$(mlib)))) ++ ++default_mlib_spec := %{fmultiflags:%{!mabi=*:-mabi=$(mlib_default)}} ++lib_build_spec := %{fmultiflags:$(lib_build_spec)} ++ ++ifneq ($(TM_MULTILIB_CONFIG),) ++loongarch-multilib.h: ++ @echo "#define MLIB_SELF_SPECS" \ ++ "\"$(default_mlib_spec)\"," \ ++ "\"$(lib_build_spec)\"," > $@ ++else ++loongarch-multilib.h: ; @touch $@ ++endif ++ ++# Multiarch ++ifneq ($(call if_multiarch,yes),yes) ++ # Define LA_DISABLE_MULTIARCH if multiarch is disabled. ++ tm_defines += LA_DISABLE_MULTIARCH ++else ++ # Only define MULTIARCH_DIRNAME when multiarch is enabled, ++ # or it would always introduce ${target} into the search path. ++ MULTIARCH_DIRNAME = $(LA_MULTIARCH_TRIPLET) ++endif +-- +2.43.0 + diff --git a/0001-Sw64-Port-add-configure-support-for-sw64.patch b/0001-Sw64-Port-add-configure-support-for-sw64.patch new file mode 100644 index 0000000000000000000000000000000000000000..a2e9f8638cec5d99d63b70d4955a795ba3986505 --- /dev/null +++ b/0001-Sw64-Port-add-configure-support-for-sw64.patch @@ -0,0 +1,703 @@ +From 64050ef082f7f3af78cc136c17c995d62cec14b5 Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:25:58 +0800 +Subject: [PATCH 01/16] Sw64 Port: add configure support for sw64 + +--- + Makefile.in | 1 + + Makefile.tpl | 1 + + config.guess | 12 ++++ + config.sub | 1 + + config/intdiv0.m4 | 2 +- + config/mt-sw_64ieee | 3 + + config/tcl.m4 | 6 ++ + configure | 16 ++++- + configure.ac | 19 +++++- + contrib/config-list.mk | 1 + + gcc/config.gcc | 35 +++++++++++ + gcc/config.host | 8 +++ + gcc/config.in | 17 +++++ + gcc/config/host-linux.cc | 2 + + gcc/configure | 131 ++++++++++++++++++++++++++++++++++++++- + gcc/configure.ac | 84 ++++++++++++++++++++++++- + gcc/doc/install.texi | 9 +++ + 17 files changed, 342 insertions(+), 6 deletions(-) + create mode 100644 config/mt-sw_64ieee + +diff --git a/Makefile.in b/Makefile.in +index 7785b3d9a..283c16c4e 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -707,6 +707,7 @@ all: + @target_makefile_frag@ + @alphaieee_frag@ + @ospace_frag@ ++@sw_64ieee_frag@ + @host_makefile_frag@ + ### + +diff --git a/Makefile.tpl b/Makefile.tpl +index ef58fac2b..d629bca8b 100644 +--- a/Makefile.tpl ++++ b/Makefile.tpl +@@ -630,6 +630,7 @@ all: + @target_makefile_frag@ + @alphaieee_frag@ + @ospace_frag@ ++@sw_64ieee_frag@ + @host_makefile_frag@ + ### + +diff --git a/config.guess b/config.guess +index 1972fda8e..0275a0ef8 100755 +--- a/config.guess ++++ b/config.guess +@@ -1101,6 +1101,18 @@ EOF + sparc:Linux:*:* | sparc64:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; ++ sw_64:Linux:*:*) ++ case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in ++ SW6) UNAME_MACHINE=sw_64sw6 ;; ++ SW6A) UNAME_MACHINE=sw_64sw6a ;; ++ SW6B) UNAME_MACHINE=sw_64sw6b ;; ++ SW8A) UNAME_MACHINE=sw_64sw8a ;; ++ SW) UNAME_MACHINE=sw_64 ;; ++ esac ++ objdump --private-headers /bin/sh | grep -q ld.so.1 ++ if test "$?" = 0 ; then LIBC=gnulibc1 ; fi ++ echo ${UNAME_MACHINE}-unknown-linux-${LIBC} ++ exit ;; + tile*:Linux:*:*) + echo "$UNAME_MACHINE"-unknown-linux-"$LIBC" + exit ;; +diff --git a/config.sub b/config.sub +index 38f3d037a..70ff2e6e4 100755 +--- a/config.sub ++++ b/config.sub +@@ -1262,6 +1262,7 @@ case $cpu-$vendor in + | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v | sv1 | sx* \ + | spu \ ++ | sw_64 | sw_64sw6a | sw_64sw6b | sw_64sw8a \ + | tahoe \ + | thumbv7* \ + | tic30 | tic4x | tic54x | tic55x | tic6x | tic80 \ +diff --git a/config/intdiv0.m4 b/config/intdiv0.m4 +index 55dddcf1c..bfe1bdcdf 100644 +--- a/config/intdiv0.m4 ++++ b/config/intdiv0.m4 +@@ -56,7 +56,7 @@ int main () + [ + # Guess based on the CPU. + case "$host_cpu" in +- alpha* | i[34567]86 | m68k | s390*) ++ alpha* | i[34567]86 | m68k | s390* | sw_64*) + gt_cv_int_divbyzero_sigfpe="guessing yes";; + *) + gt_cv_int_divbyzero_sigfpe="guessing no";; +diff --git a/config/mt-sw_64ieee b/config/mt-sw_64ieee +new file mode 100644 +index 000000000..80c17cdc6 +--- /dev/null ++++ b/config/mt-sw_64ieee +@@ -0,0 +1,3 @@ ++CFLAGS_FOR_TARGET += -mieee ++CXXFLAGS_FOR_TARGET += -mieee ++GOCFLAGS_FOR_TARGET += -mieee +diff --git a/config/tcl.m4 b/config/tcl.m4 +index 4542a4b23..c58bf5343 100644 +--- a/config/tcl.m4 ++++ b/config/tcl.m4 +@@ -1368,6 +1368,9 @@ dnl AC_CHECK_TOOL(AR, ar) + if test "`uname -m`" = "alpha" ; then + CFLAGS="$CFLAGS -mieee" + fi ++ if test "`uname -m`" = "sw_64" ; then ++ CFLAGS="$CFLAGS -mieee" ++ fi + if test $do64bit = yes; then + AC_CACHE_CHECK([if compiler accepts -m64 flag], tcl_cv_cc_m64, [ + hold_cflags=$CFLAGS +@@ -1418,6 +1421,9 @@ dnl AC_CHECK_TOOL(AR, ar) + if test "`uname -m`" = "alpha" ; then + CFLAGS="$CFLAGS -mieee" + fi ++ if test "`uname -m`" = "sw_64" ; then ++ CFLAGS="$CFLAGS -mieee" ++ fi + ;; + Lynx*) + SHLIB_CFLAGS="-fPIC" +diff --git a/configure b/configure +index aff62c464..1b7c11292 100755 +--- a/configure ++++ b/configure +@@ -789,6 +789,7 @@ ac_subst_files='serialization_dependencies + host_makefile_frag + target_makefile_frag + alphaieee_frag ++sw_64ieee_frag + ospace_frag' + ac_user_opts=' + enable_option_checking +@@ -4016,6 +4017,10 @@ case "${target}" in + use_gnu_ld=no + fi + ;; ++ sw_64*-*-*) ++ # newlib is not 64 bit ready ++ noconfigdirs="$noconfigdirs target-newlib target-libgloss" ++ ;; + tic6x-*-*) + noconfigdirs="$noconfigdirs sim" + ;; +@@ -9584,6 +9589,15 @@ case $target in + ;; + esac + ++sw_64ieee_frag=/dev/null ++case $target in ++ sw_64*-*-*) ++ # This just makes sure to use the -mieee option to build target libs. ++ # This should probably be set individually by each library. ++ sw_64ieee_frag="config/mt-sw_64ieee" ++ ;; ++esac ++ + # If --enable-target-optspace always use -Os instead of -O2 to build + # the target libraries, similarly if it is not specified, use -Os + # on selected platforms. +@@ -10299,7 +10313,7 @@ case "${target}" in + esac + + # Makefile fragments. +-for frag in host_makefile_frag target_makefile_frag alphaieee_frag ospace_frag; ++for frag in host_makefile_frag target_makefile_frag alphaieee_frag sw_64ieee_frag ospace_frag; + do + eval fragval=\$$frag + if test $fragval != /dev/null; then +diff --git a/configure.ac b/configure.ac +index f310d75ca..c4a65a49d 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -1271,6 +1271,10 @@ case "${target}" in + use_gnu_ld=no + fi + ;; ++ sw_64*-*-*) ++ # newlib is not 64 bit ready ++ noconfigdirs="$noconfigdirs target-newlib target-libgloss" ++ ;; + tic6x-*-*) + noconfigdirs="$noconfigdirs sim" + ;; +@@ -1335,6 +1339,9 @@ case "${host}" in + rs6000-*-aix*) + host_makefile_frag="config/mh-ppc-aix" + ;; ++ sw_64*-linux*) ++ host_makefile_frag="config/mh-sw_64-linux" ++ ;; + esac + fi + +@@ -2765,6 +2772,15 @@ case $target in + ;; + esac + ++sw_64ieee_frag=/dev/null ++case $target in ++ sw_64*-*-*) ++ # This just makes sure to use the -mieee option to build target libs. ++ # This should probably be set individually by each library. ++ sw_64ieee_frag="config/mt-sw_64ieee" ++ ;; ++esac ++ + # If --enable-target-optspace always use -Os instead of -O2 to build + # the target libraries, similarly if it is not specified, use -Os + # on selected platforms. +@@ -3475,7 +3491,7 @@ case "${target}" in + esac + + # Makefile fragments. +-for frag in host_makefile_frag target_makefile_frag alphaieee_frag ospace_frag; ++for frag in host_makefile_frag target_makefile_frag alphaieee_frag ospace_frag sw_64ieee_frag; + do + eval fragval=\$$frag + if test $fragval != /dev/null; then +@@ -3486,6 +3502,7 @@ AC_SUBST_FILE(host_makefile_frag) + AC_SUBST_FILE(target_makefile_frag) + AC_SUBST_FILE(alphaieee_frag) + AC_SUBST_FILE(ospace_frag) ++AC_SUBST_FILE(sw_64ieee_frag) + + # Miscellanea: directories, flags, etc. + AC_SUBST(RPATH_ENVVAR) +diff --git a/contrib/config-list.mk b/contrib/config-list.mk +index e04210556..730a49e64 100644 +--- a/contrib/config-list.mk ++++ b/contrib/config-list.mk +@@ -96,6 +96,7 @@ LIST = aarch64-elf aarch64-linux-gnu aarch64-rtems \ + sparc64-sun-solaris2.11OPT-with-gnu-ldOPT-with-gnu-asOPT-enable-threads=posix \ + sparc-wrs-vxworks sparc64-elf sparc64-rtems sparc64-linux sparc64-freebsd6 \ + sparc64-netbsd sparc64-openbsd \ ++ sw_64-linux-gnu sw_64-netbsd sw_64-openbsd \ + tilegx-linux-gnuOPT-enable-obsolete tilegxbe-linux-gnuOPT-enable-obsolete \ + tilepro-linux-gnuOPT-enable-obsolete \ + v850e1-elf v850e-elf v850-elf v850-rtems vax-linux-gnu \ +diff --git a/gcc/config.gcc b/gcc/config.gcc +index 23c5bee2b..d55645381 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -547,6 +547,10 @@ sh[123456789lbe]*-*-* | sh-*-*) + extra_options="${extra_options} fused-madd.opt" + extra_objs="${extra_objs} sh_treg_combine.o sh-mem.o sh_optimize_sett_clrt.o" + ;; ++sw_64*-*-*) ++ cpu_type=sw_64 ++ extra_options="${extra_options} g.opt" ++ ;; + v850*-*-*) + cpu_type=v850 + ;; +@@ -3471,6 +3475,11 @@ sparc64-*-openbsd*) + with_cpu=ultrasparc + tmake_file="${tmake_file} sparc/t-sparc" + ;; ++sw_64*-*-linux*) ++ tm_file="elfos.h ${tm_file} sw_64/gnu-user.h sw_64/elf.h sw_64/linux.h sw_64/linux-elf.h glibc-stdint.h" ++ tmake_file="${tmake_file} sw_64/t-linux sw_64/t-sw_64" ++ extra_options="${extra_options} sw_64/elf.opt" ++ ;; + tic6x-*-elf) + tm_file="elfos.h ${tm_file} c6x/elf-common.h c6x/elf.h" + tm_file="${tm_file} tm-dwarf2.h newlib-stdint.h" +@@ -3999,6 +4008,15 @@ if test x$with_cpu = x ; then + ;; + esac + ;; ++ sw_64sw6a*-*-*) ++ with_cpu=sw6a ++ ;; ++ sw_64sw6b*-*-*) ++ with_cpu=sw6b ++ ;; ++ sw_64sw8a*-*-*) ++ with_cpu=sw8a ++ ;; + visium-*-*) + with_cpu=gr5 + ;; +@@ -5571,6 +5589,23 @@ case "${target}" in + esac + ;; + ++ sw_64*-*-*) ++ supported_defaults="cpu tune" ++ for which in cpu tune; do ++ eval "val=\$with_$which" ++ case "$val" in ++ "" \ ++ | sw6 | sw6a | sw6b | sw8a \ ++ | sw6c) ++ ;; ++ *) ++ echo "Unknown CPU used in --with-$which=$val" 1>&2 ++ exit 1 ++ ;; ++ esac ++ done ++ ;; ++ + tic6x-*-*) + supported_defaults="arch" + +diff --git a/gcc/config.host b/gcc/config.host +index bf7dcb4cc..081ce29c2 100644 +--- a/gcc/config.host ++++ b/gcc/config.host +@@ -198,6 +198,14 @@ case ${host} in + ;; + esac + ;; ++ sw_64*-*-linux*) ++ case ${target} in ++ sw_64*-*-linux*) ++ host_extra_gcc_objs="driver-sw_64.o" ++ host_xmake_file="${host_xmake_file} sw_64/x-sw_64" ++ ;; ++ esac ++ ;; + esac + + # Machine-specific settings. +diff --git a/gcc/config.in b/gcc/config.in +index 91328572b..caf0d6492 100644 +--- a/gcc/config.in ++++ b/gcc/config.in +@@ -205,6 +205,11 @@ + #undef ENABLE_LD_BUILDID + #endif + ++/* Define if gcc should always pass --no-relax to linker for sw_64. */ ++#ifndef USED_FOR_TARGET ++#undef ENABLE_LD_NORELAX ++#endif ++ + + /* Define to 1 to enable libquadmath support */ + #ifndef USED_FOR_TARGET +@@ -422,6 +427,10 @@ + #undef HAVE_AS_EXPLICIT_RELOCS + #endif + ++/* Define if your assembler supports explicit relocations. */ ++#ifndef USED_FOR_TARGET ++#undef SW_64_ENABLE_ASAN ++#endif + + /* Define if your assembler supports FMAF, HPC, and VIS 3.0 instructions. */ + #ifndef USED_FOR_TARGET +@@ -2726,3 +2735,11 @@ + #undef vfork + #endif + ++/* Define only sw64 target */ ++#undef FLAG_SW64_ATOMIC ++#undef FLAG_SW64_90139 ++#undef FLAG_SW64_PREFETCH ++#undef FLAG_SW64_PROTECT ++#undef FLAG_SW64_INC_DEC ++#undef FLAG_SW64_DELNOP ++#undef FLAG_SW64_FM +diff --git a/gcc/config/host-linux.cc b/gcc/config/host-linux.cc +index 817d3c087..a65468272 100644 +--- a/gcc/config/host-linux.cc ++++ b/gcc/config/host-linux.cc +@@ -100,6 +100,8 @@ + # define TRY_EMPTY_VM_SPACE 0x1000000000 + #elif defined(__loongarch__) && defined(__LP64__) + # define TRY_EMPTY_VM_SPACE 0x8000000000 ++#elif defined(__sw_64) ++#define TRY_EMPTY_VM_SPACE 0x10000000000 + #else + # define TRY_EMPTY_VM_SPACE 0 + #endif +diff --git a/gcc/configure b/gcc/configure +index ef0449edd..4a70b7c66 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -25775,6 +25775,29 @@ foo: .long 25 + xor %l1, %tle_lox10(foo), %o5 + ld [%g7 + %o5], %o1" + ;; ++ sw_64*-*-*) ++ conftest_s=' ++ .section ".tdata","awT",@progbits ++foo: .long 25 ++ .text ++ ldl $27,__tls_get_addr($29) !literal!1 ++ ldi $16,foo($29) !tlsgd!1 ++ call $26,($27),__tls_get_addr !lituse_tlsgd!1 ++ ldl $27,__tls_get_addr($29) !literal!2 ++ ldi $16,foo($29) !tlsldm!2 ++ call $26,($27),__tls_get_addr !lituse_tlsldm!2 ++ ldl $1,foo($29) !gotdtprel ++ ldih $2,foo($29) !dtprelhi ++ ldi $3,foo($2) !dtprello ++ ldi $4,foo($29) !dtprel ++ ldl $1,foo($29) !gottprel ++ ldih $2,foo($29) !tprelhi ++ ldi $3,foo($2) !tprello ++ ldi $4,foo($29) !tprel' ++ tls_first_major=2 ++ tls_first_minor=13 ++ tls_as_opt=--fatal-warnings ++ ;; + tilepro*-*-*) + conftest_s=' + .section ".tdata","awT",@progbits +@@ -26313,6 +26336,101 @@ fi + + ;; + ++ sw_64*-*-linux* | sw_64*-*-*bsd*) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for explicit relocation support" >&5 ++$as_echo_n "checking assembler for explicit relocation support... " >&6; } ++if ${gcc_cv_as_sw_64_explicit_relocs+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ gcc_cv_as_sw_64_explicit_relocs=no ++ if test x$gcc_cv_as != x; then ++ $as_echo ' .set nomacro ++ .text ++ ext0b $3, $2, $3 !lituse_bytoff!1 ++ ldl $2, a($29) !literal!1 ++ ldl $4, b($29) !literal!2 ++ ldl_u $3, 0($2) !lituse_base!1 ++ ldl $27, f($29) !literal!5 ++ call $26, ($27), f !lituse_jsr!5 ++ ldih $29, 0($26) !gpdisp!3 ++ ldi $0, c($29) !gprel ++ ldih $1, d($29) !gprelhigh ++ ldi $1, d($1) !gprellow ++ ldi $29, 0($29) !gpdisp!3' > conftest.s ++ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -o conftest.o conftest.s >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; } ++ then ++ gcc_cv_as_sw_64_explicit_relocs=yes ++ else ++ echo "configure: failed program was" >&5 ++ cat conftest.s >&5 ++ fi ++ rm -f conftest.o conftest.s ++ fi ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_sw_64_explicit_relocs" >&5 ++$as_echo "$gcc_cv_as_sw_64_explicit_relocs" >&6; } ++if test $gcc_cv_as_sw_64_explicit_relocs = yes; then ++ ++$as_echo "#define HAVE_AS_EXPLICIT_RELOCS 1" >>confdefs.h ++ ++fi ++ ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for jsrdirect relocation support" >&5 ++$as_echo_n "checking assembler for jsrdirect relocation support... " >&6; } ++if ${gcc_cv_as_sw_64_jsrdirect_relocs+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ gcc_cv_as_sw_64_jsrdirect_relocs=no ++ if test $in_tree_gas = yes; then ++ if test $gcc_cv_gas_vers -ge `expr \( \( 2 \* 1000 \) + 16 \) \* 1000 + 90` ++ then gcc_cv_as_sw_64_jsrdirect_relocs=yes ++fi ++#trouble# ++ elif test x$gcc_cv_as != x; then ++ $as_echo ' .set nomacro ++ .text ++ ldl $27, a($29) !literal!1 ++ call $26, ($27), a !lituse_jsrdirect!1' > conftest.s ++ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -o conftest.o conftest.s >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; } ++ then ++ gcc_cv_as_sw_64_jsrdirect_relocs=yes ++ else ++ echo "configure: failed program was" >&5 ++ cat conftest.s >&5 ++ fi ++ rm -f conftest.o conftest.s ++ fi ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_sw_64_jsrdirect_relocs" >&5 ++$as_echo "$gcc_cv_as_sw_64_jsrdirect_relocs" >&6; } ++if test $gcc_cv_as_sw_64_jsrdirect_relocs = yes; then ++ ++$as_echo "#define HAVE_AS_JSRDIRECT_RELOCS 1" >>confdefs.h ++ ++fi ++cat >> confdefs.h <<_ACEOF ++#define FLAG_SW64_ATOMIC 1 ++#define FLAG_SW64_90139 1 ++#define FLAG_SW64_PREFETCH 1 ++#define FLAG_SW64_PROTECT 1 ++#define FLAG_SW64_INC_DEC 1 ++#define FLAG_SW64_DELNOP 1 ++#define FLAG_SW64_FM 1 ++_ACEOF ++ ++ ;; ++ + avr-*-*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for --mlink-relax option" >&5 + $as_echo_n "checking assembler for --mlink-relax option... " >&6; } +@@ -29377,7 +29495,7 @@ esac + case "$cpu_type" in + aarch64 | alpha | arc | arm | avr | bfin | cris | csky | i386 | loongarch | m32c \ + | m68k | microblaze | mips | nds32 | nios2 | pa | riscv | rs6000 | score | sparc \ +- | tilegx | tilepro | visium | xstormy16 | xtensa) ++ | sw_64 | tilegx | tilepro | visium | xstormy16 | xtensa) + insn="nop" + ;; + ia64 | s390) +@@ -31151,6 +31269,17 @@ $as_echo "$as_me: WARNING: --build-id is not supported by your linker; --enable- + fi + fi + ++# sw_64 add --enable-linker-no-relax to support linker -Wl,-no-relax ++# Check whether --enable-linker-no-relax was given. ++if test "${enable_linker_no_relax+set}" = set; then : ++ enableval=$enable_linker_no_relax; ++else ++ enable_linker_no_relax=no ++fi ++ ++if test x"$enable_linker_no_relax" = xyes; then ++ $as_echo "#define ENABLE_LD_NORELAX 1" >>confdefs.h ++fi + # In binutils 2.21, GNU ld gained support for new emulations fully + # supporting the Solaris 2 ABI. Detect their presence in the linker used. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking linker *_sol2 emulation support" >&5 +diff --git a/gcc/configure.ac b/gcc/configure.ac +index 708ec3fd3..9683ab156 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -23,6 +23,7 @@ + # Initialization and sanity checks + # -------------------------------- + ++AC_PREREQ(2.64) + AC_INIT + AC_CONFIG_SRCDIR(tree.cc) + AC_CONFIG_HEADER(auto-host.h:config.in) +@@ -4076,6 +4077,29 @@ foo: .long 25 + xor %l1, %tle_lox10(foo), %o5 + ld [%g7 + %o5], %o1" + ;; ++ sw_64*-*-*) ++ conftest_s=' ++ .section ".tdata","awT",@progbits ++foo: .long 25 ++ .text ++ ldl $27,__tls_get_addr($29) !literal!1 ++ ldi $16,foo($29) !tlsgd!1 ++ call $26,($27),__tls_get_addr !lituse_tlsgd!1 ++ ldl $27,__tls_get_addr($29) !literal!2 ++ ldi $16,foo($29) !tlsldm!2 ++ call $26,($27),__tls_get_addr !lituse_tlsldm!2 ++ ldl $1,foo($29) !gotdtprel ++ ldih $2,foo($29) !dtprelhi ++ ldi $3,foo($2) !dtprello ++ ldi $4,foo($29) !dtprel ++ ldl $1,foo($29) !gottprel ++ ldih $2,foo($29) !tprelhi ++ ldi $3,foo($2) !tprello ++ ldi $4,foo($29) !tprel' ++ tls_first_major=2 ++ tls_first_minor=13 ++ tls_as_opt=--fatal-warnings ++ ;; + tilepro*-*-*) + conftest_s=' + .section ".tdata","awT",@progbits +@@ -4677,6 +4701,36 @@ foo: + [Define if your assembler supports LEON instructions.])]) + ;; + ++ sw_64*-*-linux* | sw_64*-*-*bsd*) ++ gcc_GAS_CHECK_FEATURE([explicit relocation support], ++ gcc_cv_as_sw_64_explicit_relocs, [2,12,0],, ++[ .set nomacro ++ .text ++ ext0b $3, $2, $3 !lituse_bytoff!1 ++ ldl $2, a($29) !literal!1 ++ ldl $4, b($29) !literal!2 ++ ldl_u $3, 0($2) !lituse_base!1 ++ ldl $27, f($29) !literal!5 ++ call $26, ($27), f !lituse_jsr!5 ++ ldih $29, 0($26) !gpdisp!3 ++ ldi $0, c($29) !gprel ++ ldih $1, d($29) !gprelhigh ++ ldi $1, d($1) !gprellow ++ ldi $29, 0($29) !gpdisp!3],, ++ [AC_DEFINE(HAVE_AS_EXPLICIT_RELOCS, 1, ++ [Define if your assembler supports explicit relocations.])]) ++ gcc_GAS_CHECK_FEATURE([jsrdirect relocation support], ++ gcc_cv_as_sw_64_jsrdirect_relocs,, ++[ .set nomacro ++ .text ++ ldl $27, a($29) !literal!1 ++ call $26, ($27), a !lituse_jsrdirect!1],, ++ [AC_DEFINE(HAVE_AS_JSRDIRECT_RELOCS, 1, ++ [Define if your assembler supports the lituse_jsrdirect relocation.])]) ++# [AC_DEFINE(SW_64_ENABLE_ASAN, 1, ++# [Define if your target fully enable asan supports.])]) ++ ;; ++ + changequote(,)dnl + i[34567]86-*-* | x86_64-*-*) + changequote([,])dnl +@@ -5505,7 +5559,7 @@ esac + # version to the per-target configury. + case "$cpu_type" in + aarch64 | alpha | arc | arm | avr | bfin | cris | csky | i386 | loongarch | m32c \ +- | m68k | microblaze | mips | nds32 | nios2 | pa | riscv | rs6000 | score | sparc \ ++ | m68k | microblaze | mips | nds32 | nios2 | pa | riscv | rs6000 | score | sparc | sw_64 \ + | tilegx | tilepro | visium | xstormy16 | xtensa) + insn="nop" + ;; +@@ -6758,6 +6812,31 @@ if test x"$enable_linker_build_id" = xyes; then + fi + fi + ++# --no-relax ++AC_ARG_ENABLE(linker-no-relax, ++[AS_HELP_STRING([--enable-linker-no-relax], ++ [compiler will always pass --no-relax to linker])], ++[], ++enable_linker_no_relax=no) ++ ++if test x"$enable_linker_build_id" = xyes; then ++ if test x"$gcc_cv_ld_buildid" = xyes; then ++ AC_DEFINE(ENABLE_LD_BUILDID, 1, ++ [Define if gcc should always pass --build-id to linker.]) ++ else ++ AC_MSG_WARN(--build-id is not supported by your linker; --enable-linker-build-id ignored) ++ fi ++fi ++ ++# --no-relax ++if test x"$enable_linker_no_relax" = xyes; then ++ AC_DEFINE(ENABLE_LD_NORELAX, 1, ++ [Define if gcc should always pass --no-relax to linker.]) ++ else ++ AC_MSG_WARN(--no-relax is not supported by your linker; --enable-linker-no-relax ignored) ++ fi ++fi ++ + # In binutils 2.21, GNU ld gained support for new emulations fully + # supporting the Solaris 2 ABI. Detect their presence in the linker used. + AC_CACHE_CHECK(linker *_sol2 emulation support, +@@ -6930,7 +7009,8 @@ case "$target" in + powerpc*-*-linux* | \ + sparc*-*-linux* | \ + s390*-*-linux* | \ +- alpha*-*-linux*) ++ alpha*-*-linux*) | \ ++ sw_64*-*-linux*) + AC_ARG_WITH(long-double-128, + [AS_HELP_STRING([--with-long-double-128], + [use 128-bit long double by default])], +diff --git a/gcc/doc/install.texi b/gcc/doc/install.texi +index a650f60c7..40dc7ae75 100644 +--- a/gcc/doc/install.texi ++++ b/gcc/doc/install.texi +@@ -5005,6 +5005,15 @@ on a Solaris 11 system: + @heading sparcv9-*-solaris2* + This is a synonym for @samp{sparc64-*-solaris2*}. + ++@html ++
++@end html ++@anchor{sw_64-x-x} ++@heading sw_64*-*-* ++This section contains general configuration information for all ++SW64-based platforms using ELF@. In addition to reading this ++section, please read all other sections that match your target. ++ + @html +
+ @end html +-- +2.25.1 + diff --git a/0001-Version-Set-version-to-12.3.1.patch b/0001-Version-Set-version-to-12.3.1.patch new file mode 100644 index 0000000000000000000000000000000000000000..e5e920ecd05a42f5aae0d33ecc568fe25dcf25e7 --- /dev/null +++ b/0001-Version-Set-version-to-12.3.1.patch @@ -0,0 +1,19 @@ +From 73ee6351353b036f466ba1aab9a9e7d7865bf972 Mon Sep 17 00:00:00 2001 +From: eastb233 +Date: Tue, 11 Jul 2023 16:07:51 +0800 +Subject: [PATCH 01/22] [Version] Set version to 12.3.1 + +--- + gcc/BASE-VER | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/BASE-VER b/gcc/BASE-VER +index 4d23cb8e0..9c028e25d 100644 +--- a/gcc/BASE-VER ++++ b/gcc/BASE-VER +@@ -1 +1 @@ +-12.3.0 ++12.3.1 +-- +2.33.0 + diff --git a/0002-LoongArch-Check-whether-binutils-supports-the-relax-.patch b/0002-LoongArch-Check-whether-binutils-supports-the-relax-.patch new file mode 100644 index 0000000000000000000000000000000000000000..9281f74026640fe62107af93cd04cb1943a0f9fc --- /dev/null +++ b/0002-LoongArch-Check-whether-binutils-supports-the-relax-.patch @@ -0,0 +1,192 @@ +From 13c33536900709bf1f33171d5ae2b2af97789601 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Fri, 15 Sep 2023 10:22:49 +0800 +Subject: [PATCH 002/188] LoongArch: Check whether binutils supports the relax + function. If supported, explicit relocs are turned off by default. + +gcc/ChangeLog: + + * config.in: Regenerate. + * config/loongarch/genopts/loongarch.opt.in: Add compilation option + mrelax. And set the initial value of explicit-relocs according to the + detection status. + * config/loongarch/gnu-user.h: When compiling with -mno-relax, pass the + --no-relax option to the linker. + * config/loongarch/loongarch-driver.h (ASM_SPEC): When compiling with + -mno-relax, pass the -mno-relax option to the assembler. + * config/loongarch/loongarch-opts.h (HAVE_AS_MRELAX_OPTION): Define macro. + * config/loongarch/loongarch.opt: Regenerate. + * configure: Regenerate. + * configure.ac: Add detection of support for binutils relax function. +--- + gcc/config.in | 6 ++++ + gcc/config/loongarch/genopts/loongarch.opt.in | 7 ++++- + gcc/config/loongarch/gnu-user.h | 3 +- + gcc/config/loongarch/loongarch-driver.h | 2 +- + gcc/config/loongarch/loongarch-opts.h | 4 +++ + gcc/config/loongarch/loongarch.opt | 7 ++++- + gcc/configure | 31 +++++++++++++++++++ + gcc/configure.ac | 4 +++ + 8 files changed, 60 insertions(+), 4 deletions(-) + +diff --git a/gcc/config.in b/gcc/config.in +index 0dff36199..0c55e67e7 100644 +--- a/gcc/config.in ++++ b/gcc/config.in +@@ -637,6 +637,12 @@ + #endif + + ++/* Define if your assembler supports -mrelax option. */ ++#ifndef USED_FOR_TARGET ++#undef HAVE_AS_MRELAX_OPTION ++#endif ++ ++ + /* Define if your assembler supports .mspabi_attribute. */ + #ifndef USED_FOR_TARGET + #undef HAVE_AS_MSPABI_ATTRIBUTE +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index 2ef1b1e3b..f18733c24 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -181,7 +181,7 @@ Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init + -mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. + + mexplicit-relocs +-Target Var(TARGET_EXPLICIT_RELOCS) Init(HAVE_AS_EXPLICIT_RELOCS) ++Target Var(TARGET_EXPLICIT_RELOCS) Init(HAVE_AS_EXPLICIT_RELOCS & !HAVE_AS_MRELAX_OPTION) + Use %reloc() assembly operators. + + ; The code model option names for -mcmodel. +@@ -214,3 +214,8 @@ Specify the code model. + mdirect-extern-access + Target Var(TARGET_DIRECT_EXTERN_ACCESS) Init(0) + Avoid using the GOT to access external symbols. ++ ++mrelax ++Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION) ++Take advantage of linker relaxations to reduce the number of instructions ++required to materialize symbol addresses. +diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h +index 44e4f2575..60ef75601 100644 +--- a/gcc/config/loongarch/gnu-user.h ++++ b/gcc/config/loongarch/gnu-user.h +@@ -48,7 +48,8 @@ along with GCC; see the file COPYING3. If not see + "%{!shared: %{static} " \ + "%{!static: %{!static-pie: %{rdynamic:-export-dynamic} " \ + "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}} " \ +- "%{static-pie: -static -pie --no-dynamic-linker -z text}}" ++ "%{static-pie: -static -pie --no-dynamic-linker -z text}}" \ ++ "%{mno-relax: --no-relax}" + + + /* Similar to standard Linux, but adding -ffast-math support. */ +diff --git a/gcc/config/loongarch/loongarch-driver.h b/gcc/config/loongarch/loongarch-driver.h +index e7d083677..59fa3263d 100644 +--- a/gcc/config/loongarch/loongarch-driver.h ++++ b/gcc/config/loongarch/loongarch-driver.h +@@ -53,7 +53,7 @@ along with GCC; see the file COPYING3. If not see + + #undef ASM_SPEC + #define ASM_SPEC \ +- "%{mabi=*} %(subtarget_asm_spec)" ++ "%{mabi=*} %{mno-relax} %(subtarget_asm_spec)" + + + extern const char* +diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h +index 624e246bb..f2b59abe6 100644 +--- a/gcc/config/loongarch/loongarch-opts.h ++++ b/gcc/config/loongarch/loongarch-opts.h +@@ -99,4 +99,8 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target, + #define HAVE_AS_EXPLICIT_RELOCS 0 + #endif + ++#ifndef HAVE_AS_MRELAX_OPTION ++#define HAVE_AS_MRELAX_OPTION 0 ++#endif ++ + #endif /* LOONGARCH_OPTS_H */ +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index f2d21c9f3..78f2baf3a 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -188,7 +188,7 @@ Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init + -mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. + + mexplicit-relocs +-Target Var(TARGET_EXPLICIT_RELOCS) Init(HAVE_AS_EXPLICIT_RELOCS) ++Target Var(TARGET_EXPLICIT_RELOCS) Init(HAVE_AS_EXPLICIT_RELOCS & !HAVE_AS_MRELAX_OPTION) + Use %reloc() assembly operators. + + ; The code model option names for -mcmodel. +@@ -221,3 +221,8 @@ Specify the code model. + mdirect-extern-access + Target Var(TARGET_DIRECT_EXTERN_ACCESS) Init(0) + Avoid using the GOT to access external symbols. ++ ++mrelax ++Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION) ++Take advantage of linker relaxations to reduce the number of instructions ++required to materialize symbol addresses. +diff --git a/gcc/configure b/gcc/configure +index 2a5d3aaf3..8ae8a924a 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -28830,6 +28830,37 @@ if test $gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support = yes; then + + $as_echo "#define HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT 1" >>confdefs.h + ++fi ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for -mrelax option" >&5 ++$as_echo_n "checking assembler for -mrelax option... " >&6; } ++if ${gcc_cv_as_loongarch_relax+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ gcc_cv_as_loongarch_relax=no ++ if test x$gcc_cv_as != x; then ++ $as_echo '.text' > conftest.s ++ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -mrelax -o conftest.o conftest.s >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; } ++ then ++ gcc_cv_as_loongarch_relax=yes ++ else ++ echo "configure: failed program was" >&5 ++ cat conftest.s >&5 ++ fi ++ rm -f conftest.o conftest.s ++ fi ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_loongarch_relax" >&5 ++$as_echo "$gcc_cv_as_loongarch_relax" >&6; } ++if test $gcc_cv_as_loongarch_relax = yes; then ++ ++$as_echo "#define HAVE_AS_MRELAX_OPTION 1" >>confdefs.h ++ + fi + + ;; +diff --git a/gcc/configure.ac b/gcc/configure.ac +index ba2bf1ffc..f7161e66e 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -5322,6 +5322,10 @@ x: + .cfi_endproc],, + [AC_DEFINE(HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT, 1, + [Define if your assembler supports eh_frame pcrel encoding.])]) ++ gcc_GAS_CHECK_FEATURE([-mrelax option], gcc_cv_as_loongarch_relax, ++ [-mrelax], [.text],, ++ [AC_DEFINE(HAVE_AS_MRELAX_OPTION, 1, ++ [Define if your assembler supports -mrelax option.])]) + ;; + s390*-*-*) + gcc_GAS_CHECK_FEATURE([.gnu_attribute support], +-- +2.43.0 + diff --git a/0002-RISCV-Backport-inline-subword-atomic-patches.patch b/0002-RISCV-Backport-inline-subword-atomic-patches.patch new file mode 100644 index 0000000000000000000000000000000000000000..d3d2c33b749dcd48b72572ce510e55aac28c65f2 --- /dev/null +++ b/0002-RISCV-Backport-inline-subword-atomic-patches.patch @@ -0,0 +1,2042 @@ +From 123615a0aac59a731516ef11c1fe433d39b6573a Mon Sep 17 00:00:00 2001 +From: laokz +Date: Tue, 11 Jul 2023 21:03:14 +0800 +Subject: [PATCH 02/22] [RISCV] Backport inline subword atomic patches + +Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=f797260adaf52bee0ec0e16190bbefbe1bfc3692 + +203f3060dd363361b172f7295f42bb6bf5ac0b3b +--- + gcc/config/riscv/linux.h | 10 - + gcc/config/riscv/riscv-protos.h | 2 + + gcc/config/riscv/riscv.cc | 49 ++ + gcc/config/riscv/riscv.opt | 4 + + gcc/config/riscv/sync.md | 301 +++++++++ + gcc/doc/invoke.texi | 10 +- + .../gcc.target/riscv/inline-atomics-1.c | 18 + + .../gcc.target/riscv/inline-atomics-2.c | 9 + + .../gcc.target/riscv/inline-atomics-3.c | 569 ++++++++++++++++++ + .../gcc.target/riscv/inline-atomics-4.c | 566 +++++++++++++++++ + .../gcc.target/riscv/inline-atomics-5.c | 87 +++ + .../gcc.target/riscv/inline-atomics-6.c | 87 +++ + .../gcc.target/riscv/inline-atomics-7.c | 69 +++ + .../gcc.target/riscv/inline-atomics-8.c | 69 +++ + libgcc/config/riscv/atomic.c | 2 + + 15 files changed, 1841 insertions(+), 11 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-1.c + create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-2.c + create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-3.c + create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-4.c + create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-5.c + create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-6.c + create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-7.c + create mode 100644 gcc/testsuite/gcc.target/riscv/inline-atomics-8.c + +diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h +index 38803723b..b5c6c5027 100644 +--- a/gcc/config/riscv/linux.h ++++ b/gcc/config/riscv/linux.h +@@ -35,16 +35,6 @@ along with GCC; see the file COPYING3. If not see + #undef MUSL_DYNAMIC_LINKER + #define MUSL_DYNAMIC_LINKER "/lib/ld-musl-riscv" XLEN_SPEC MUSL_ABI_SUFFIX ".so.1" + +-/* Because RISC-V only has word-sized atomics, it requries libatomic where +- others do not. So link libatomic by default, as needed. */ +-#undef LIB_SPEC +-#ifdef LD_AS_NEEDED_OPTION +-#define LIB_SPEC GNU_USER_TARGET_LIB_SPEC \ +- " %{pthread:" LD_AS_NEEDED_OPTION " -latomic " LD_NO_AS_NEEDED_OPTION "}" +-#else +-#define LIB_SPEC GNU_USER_TARGET_LIB_SPEC " -latomic " +-#endif +- + #define ICACHE_FLUSH_FUNC "__riscv_flush_icache" + + #define CPP_SPEC "%{pthread:-D_REENTRANT}" +diff --git a/gcc/config/riscv/riscv-protos.h b/gcc/config/riscv/riscv-protos.h +index 65bb85f55..3b039e00d 100644 +--- a/gcc/config/riscv/riscv-protos.h ++++ b/gcc/config/riscv/riscv-protos.h +@@ -74,6 +74,8 @@ extern bool riscv_expand_block_move (rtx, rtx, rtx); + extern bool riscv_store_data_bypass_p (rtx_insn *, rtx_insn *); + extern rtx riscv_gen_gpr_save_insn (struct riscv_frame_info *); + extern bool riscv_gpr_save_operation_p (rtx); ++extern void riscv_subword_address (rtx, rtx *, rtx *, rtx *, rtx *); ++extern void riscv_lshift_subword (machine_mode, rtx, rtx, rtx *); + + /* Routines implemented in riscv-c.cc. */ + void riscv_cpu_cpp_builtins (cpp_reader *); +diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc +index 4939d9964..9cf79beba 100644 +--- a/gcc/config/riscv/riscv.cc ++++ b/gcc/config/riscv/riscv.cc +@@ -5605,6 +5605,55 @@ riscv_asan_shadow_offset (void) + return TARGET_64BIT ? (HOST_WIDE_INT_1 << 29) : 0; + } + ++/* Given memory reference MEM, expand code to compute the aligned ++ memory address, shift and mask values and store them into ++ *ALIGNED_MEM, *SHIFT, *MASK and *NOT_MASK. */ ++ ++void ++riscv_subword_address (rtx mem, rtx *aligned_mem, rtx *shift, rtx *mask, ++ rtx *not_mask) ++{ ++ /* Align the memory address to a word. */ ++ rtx addr = force_reg (Pmode, XEXP (mem, 0)); ++ ++ rtx addr_mask = gen_int_mode (-4, Pmode); ++ ++ rtx aligned_addr = gen_reg_rtx (Pmode); ++ emit_move_insn (aligned_addr, gen_rtx_AND (Pmode, addr, addr_mask)); ++ ++ *aligned_mem = change_address (mem, SImode, aligned_addr); ++ ++ /* Calculate the shift amount. */ ++ emit_move_insn (*shift, gen_rtx_AND (SImode, gen_lowpart (SImode, addr), ++ gen_int_mode (3, SImode))); ++ emit_move_insn (*shift, gen_rtx_ASHIFT (SImode, *shift, ++ gen_int_mode (3, SImode))); ++ ++ /* Calculate the mask. */ ++ int unshifted_mask = GET_MODE_MASK (GET_MODE (mem)); ++ ++ emit_move_insn (*mask, gen_int_mode (unshifted_mask, SImode)); ++ ++ emit_move_insn (*mask, gen_rtx_ASHIFT (SImode, *mask, ++ gen_lowpart (QImode, *shift))); ++ ++ emit_move_insn (*not_mask, gen_rtx_NOT(SImode, *mask)); ++} ++ ++/* Leftshift a subword within an SImode register. */ ++ ++void ++riscv_lshift_subword (machine_mode mode, rtx value, rtx shift, ++ rtx *shifted_value) ++{ ++ rtx value_reg = gen_reg_rtx (SImode); ++ emit_move_insn (value_reg, simplify_gen_subreg (SImode, value, ++ mode, 0)); ++ ++ emit_move_insn(*shifted_value, gen_rtx_ASHIFT (SImode, value_reg, ++ gen_lowpart (QImode, shift))); ++} ++ + /* Initialize the GCC target structure. */ + #undef TARGET_ASM_ALIGNED_HI_OP + #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t" +diff --git a/gcc/config/riscv/riscv.opt b/gcc/config/riscv/riscv.opt +index 492aad123..328d848d6 100644 +--- a/gcc/config/riscv/riscv.opt ++++ b/gcc/config/riscv/riscv.opt +@@ -225,3 +225,7 @@ Enum(isa_spec_class) String(20191213) Value(ISA_SPEC_CLASS_20191213) + misa-spec= + Target RejectNegative Joined Enum(isa_spec_class) Var(riscv_isa_spec) Init(TARGET_DEFAULT_ISA_SPEC) + Set the version of RISC-V ISA spec. ++ ++minline-atomics ++Target Var(TARGET_INLINE_SUBWORD_ATOMIC) Init(1) ++Always inline subword atomic operations. +diff --git a/gcc/config/riscv/sync.md b/gcc/config/riscv/sync.md +index 86b41e6b0..9c4fbabc6 100644 +--- a/gcc/config/riscv/sync.md ++++ b/gcc/config/riscv/sync.md +@@ -21,8 +21,11 @@ + + (define_c_enum "unspec" [ + UNSPEC_COMPARE_AND_SWAP ++ UNSPEC_COMPARE_AND_SWAP_SUBWORD + UNSPEC_SYNC_OLD_OP ++ UNSPEC_SYNC_OLD_OP_SUBWORD + UNSPEC_SYNC_EXCHANGE ++ UNSPEC_SYNC_EXCHANGE_SUBWORD + UNSPEC_ATOMIC_STORE + UNSPEC_MEMORY_BARRIER + ]) +@@ -92,6 +95,135 @@ + "%F3amo.%A3 %0,%z2,%1" + [(set (attr "length") (const_int 8))]) + ++(define_insn "subword_atomic_fetch_strong_" ++ [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem ++ (match_operand:SI 1 "memory_operand" "+A")) ;; mem location ++ (set (match_dup 1) ++ (unspec_volatile:SI ++ [(any_atomic:SI (match_dup 1) ++ (match_operand:SI 2 "register_operand" "rI")) ;; value for op ++ (match_operand:SI 3 "register_operand" "rI")] ;; mask ++ UNSPEC_SYNC_OLD_OP_SUBWORD)) ++ (match_operand:SI 4 "register_operand" "rI") ;; not_mask ++ (clobber (match_scratch:SI 5 "=&r")) ;; tmp_1 ++ (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_2 ++ "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC" ++ { ++ return "1:\;" ++ "lr.w.aq\t%0, %1\;" ++ "\t%5, %0, %2\;" ++ "and\t%5, %5, %3\;" ++ "and\t%6, %0, %4\;" ++ "or\t%6, %6, %5\;" ++ "sc.w.rl\t%5, %6, %1\;" ++ "bnez\t%5, 1b"; ++ } ++ [(set (attr "length") (const_int 28))]) ++ ++(define_expand "atomic_fetch_nand" ++ [(match_operand:SHORT 0 "register_operand") ;; old value at mem ++ (not:SHORT (and:SHORT (match_operand:SHORT 1 "memory_operand") ;; mem location ++ (match_operand:SHORT 2 "reg_or_0_operand"))) ;; value for op ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC" ++{ ++ /* We have no QImode/HImode atomics, so form a mask, then use ++ subword_atomic_fetch_strong_nand to implement a LR/SC version of the ++ operation. */ ++ ++ /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining ++ is disabled */ ++ ++ rtx old = gen_reg_rtx (SImode); ++ rtx mem = operands[1]; ++ rtx value = operands[2]; ++ rtx aligned_mem = gen_reg_rtx (SImode); ++ rtx shift = gen_reg_rtx (SImode); ++ rtx mask = gen_reg_rtx (SImode); ++ rtx not_mask = gen_reg_rtx (SImode); ++ ++ riscv_subword_address (mem, &aligned_mem, &shift, &mask, ¬_mask); ++ ++ rtx shifted_value = gen_reg_rtx (SImode); ++ riscv_lshift_subword (mode, value, shift, &shifted_value); ++ ++ emit_insn (gen_subword_atomic_fetch_strong_nand (old, aligned_mem, ++ shifted_value, ++ mask, not_mask)); ++ ++ emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old, ++ gen_lowpart (QImode, shift))); ++ ++ emit_move_insn (operands[0], gen_lowpart (mode, old)); ++ ++ DONE; ++}) ++ ++(define_insn "subword_atomic_fetch_strong_nand" ++ [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem ++ (match_operand:SI 1 "memory_operand" "+A")) ;; mem location ++ (set (match_dup 1) ++ (unspec_volatile:SI ++ [(not:SI (and:SI (match_dup 1) ++ (match_operand:SI 2 "register_operand" "rI"))) ;; value for op ++ (match_operand:SI 3 "register_operand" "rI")] ;; mask ++ UNSPEC_SYNC_OLD_OP_SUBWORD)) ++ (match_operand:SI 4 "register_operand" "rI") ;; not_mask ++ (clobber (match_scratch:SI 5 "=&r")) ;; tmp_1 ++ (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_2 ++ "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC" ++ { ++ return "1:\;" ++ "lr.w.aq\t%0, %1\;" ++ "and\t%5, %0, %2\;" ++ "not\t%5, %5\;" ++ "and\t%5, %5, %3\;" ++ "and\t%6, %0, %4\;" ++ "or\t%6, %6, %5\;" ++ "sc.w.rl\t%5, %6, %1\;" ++ "bnez\t%5, 1b"; ++ } ++ [(set (attr "length") (const_int 32))]) ++ ++(define_expand "atomic_fetch_" ++ [(match_operand:SHORT 0 "register_operand") ;; old value at mem ++ (any_atomic:SHORT (match_operand:SHORT 1 "memory_operand") ;; mem location ++ (match_operand:SHORT 2 "reg_or_0_operand")) ;; value for op ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC" ++{ ++ /* We have no QImode/HImode atomics, so form a mask, then use ++ subword_atomic_fetch_strong_ to implement a LR/SC version of the ++ operation. */ ++ ++ /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining ++ is disabled */ ++ ++ rtx old = gen_reg_rtx (SImode); ++ rtx mem = operands[1]; ++ rtx value = operands[2]; ++ rtx aligned_mem = gen_reg_rtx (SImode); ++ rtx shift = gen_reg_rtx (SImode); ++ rtx mask = gen_reg_rtx (SImode); ++ rtx not_mask = gen_reg_rtx (SImode); ++ ++ riscv_subword_address (mem, &aligned_mem, &shift, &mask, ¬_mask); ++ ++ rtx shifted_value = gen_reg_rtx (SImode); ++ riscv_lshift_subword (mode, value, shift, &shifted_value); ++ ++ emit_insn (gen_subword_atomic_fetch_strong_ (old, aligned_mem, ++ shifted_value, ++ mask, not_mask)); ++ ++ emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old, ++ gen_lowpart (QImode, shift))); ++ ++ emit_move_insn (operands[0], gen_lowpart (mode, old)); ++ ++ DONE; ++}) ++ + (define_insn "atomic_exchange" + [(set (match_operand:GPR 0 "register_operand" "=&r") + (unspec_volatile:GPR +@@ -104,6 +236,56 @@ + "%F3amoswap.%A3 %0,%z2,%1" + [(set (attr "length") (const_int 8))]) + ++(define_expand "atomic_exchange" ++ [(match_operand:SHORT 0 "register_operand") ;; old value at mem ++ (match_operand:SHORT 1 "memory_operand") ;; mem location ++ (match_operand:SHORT 2 "register_operand") ;; value ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC" ++{ ++ rtx old = gen_reg_rtx (SImode); ++ rtx mem = operands[1]; ++ rtx value = operands[2]; ++ rtx aligned_mem = gen_reg_rtx (SImode); ++ rtx shift = gen_reg_rtx (SImode); ++ rtx mask = gen_reg_rtx (SImode); ++ rtx not_mask = gen_reg_rtx (SImode); ++ ++ riscv_subword_address (mem, &aligned_mem, &shift, &mask, ¬_mask); ++ ++ rtx shifted_value = gen_reg_rtx (SImode); ++ riscv_lshift_subword (mode, value, shift, &shifted_value); ++ ++ emit_insn (gen_subword_atomic_exchange_strong (old, aligned_mem, ++ shifted_value, not_mask)); ++ ++ emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old, ++ gen_lowpart (QImode, shift))); ++ ++ emit_move_insn (operands[0], gen_lowpart (mode, old)); ++ DONE; ++}) ++ ++(define_insn "subword_atomic_exchange_strong" ++ [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem ++ (match_operand:SI 1 "memory_operand" "+A")) ;; mem location ++ (set (match_dup 1) ++ (unspec_volatile:SI ++ [(match_operand:SI 2 "reg_or_0_operand" "rI") ;; value ++ (match_operand:SI 3 "reg_or_0_operand" "rI")] ;; not_mask ++ UNSPEC_SYNC_EXCHANGE_SUBWORD)) ++ (clobber (match_scratch:SI 4 "=&r"))] ;; tmp_1 ++ "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC" ++ { ++ return "1:\;" ++ "lr.w.aq\t%0, %1\;" ++ "and\t%4, %0, %3\;" ++ "or\t%4, %4, %2\;" ++ "sc.w.rl\t%4, %4, %1\;" ++ "bnez\t%4, 1b"; ++ } ++ [(set (attr "length") (const_int 20))]) ++ + (define_insn "atomic_cas_value_strong" + [(set (match_operand:GPR 0 "register_operand" "=&r") + (match_operand:GPR 1 "memory_operand" "+A")) +@@ -152,6 +334,125 @@ + DONE; + }) + ++(define_expand "atomic_compare_and_swap" ++ [(match_operand:SI 0 "register_operand") ;; bool output ++ (match_operand:SHORT 1 "register_operand") ;; val output ++ (match_operand:SHORT 2 "memory_operand") ;; memory ++ (match_operand:SHORT 3 "reg_or_0_operand") ;; expected value ++ (match_operand:SHORT 4 "reg_or_0_operand") ;; desired value ++ (match_operand:SI 5 "const_int_operand") ;; is_weak ++ (match_operand:SI 6 "const_int_operand") ;; mod_s ++ (match_operand:SI 7 "const_int_operand")] ;; mod_f ++ "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC" ++{ ++ emit_insn (gen_atomic_cas_value_strong (operands[1], operands[2], ++ operands[3], operands[4], ++ operands[6], operands[7])); ++ ++ rtx val = gen_reg_rtx (SImode); ++ if (operands[1] != const0_rtx) ++ emit_move_insn (val, gen_rtx_SIGN_EXTEND (SImode, operands[1])); ++ else ++ emit_move_insn (val, const0_rtx); ++ ++ rtx exp = gen_reg_rtx (SImode); ++ if (operands[3] != const0_rtx) ++ emit_move_insn (exp, gen_rtx_SIGN_EXTEND (SImode, operands[3])); ++ else ++ emit_move_insn (exp, const0_rtx); ++ ++ rtx compare = val; ++ if (exp != const0_rtx) ++ { ++ rtx difference = gen_rtx_MINUS (SImode, val, exp); ++ compare = gen_reg_rtx (SImode); ++ emit_move_insn (compare, difference); ++ } ++ ++ if (word_mode != SImode) ++ { ++ rtx reg = gen_reg_rtx (word_mode); ++ emit_move_insn (reg, gen_rtx_SIGN_EXTEND (word_mode, compare)); ++ compare = reg; ++ } ++ ++ emit_move_insn (operands[0], gen_rtx_EQ (SImode, compare, const0_rtx)); ++ DONE; ++}) ++ ++(define_expand "atomic_cas_value_strong" ++ [(match_operand:SHORT 0 "register_operand") ;; val output ++ (match_operand:SHORT 1 "memory_operand") ;; memory ++ (match_operand:SHORT 2 "reg_or_0_operand") ;; expected value ++ (match_operand:SHORT 3 "reg_or_0_operand") ;; desired value ++ (match_operand:SI 4 "const_int_operand") ;; mod_s ++ (match_operand:SI 5 "const_int_operand") ;; mod_f ++ (match_scratch:SHORT 6)] ++ "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC" ++{ ++ /* We have no QImode/HImode atomics, so form a mask, then use ++ subword_atomic_cas_strong to implement a LR/SC version of the ++ operation. */ ++ ++ /* Logic duplicated in gcc/libgcc/config/riscv/atomic.c for use when inlining ++ is disabled */ ++ ++ rtx old = gen_reg_rtx (SImode); ++ rtx mem = operands[1]; ++ rtx aligned_mem = gen_reg_rtx (SImode); ++ rtx shift = gen_reg_rtx (SImode); ++ rtx mask = gen_reg_rtx (SImode); ++ rtx not_mask = gen_reg_rtx (SImode); ++ ++ riscv_subword_address (mem, &aligned_mem, &shift, &mask, ¬_mask); ++ ++ rtx o = operands[2]; ++ rtx n = operands[3]; ++ rtx shifted_o = gen_reg_rtx (SImode); ++ rtx shifted_n = gen_reg_rtx (SImode); ++ ++ riscv_lshift_subword (mode, o, shift, &shifted_o); ++ riscv_lshift_subword (mode, n, shift, &shifted_n); ++ ++ emit_move_insn (shifted_o, gen_rtx_AND (SImode, shifted_o, mask)); ++ emit_move_insn (shifted_n, gen_rtx_AND (SImode, shifted_n, mask)); ++ ++ emit_insn (gen_subword_atomic_cas_strong (old, aligned_mem, ++ shifted_o, shifted_n, ++ mask, not_mask)); ++ ++ emit_move_insn (old, gen_rtx_ASHIFTRT (SImode, old, ++ gen_lowpart (QImode, shift))); ++ ++ emit_move_insn (operands[0], gen_lowpart (mode, old)); ++ ++ DONE; ++}) ++ ++(define_insn "subword_atomic_cas_strong" ++ [(set (match_operand:SI 0 "register_operand" "=&r") ;; old value at mem ++ (match_operand:SI 1 "memory_operand" "+A")) ;; mem location ++ (set (match_dup 1) ++ (unspec_volatile:SI [(match_operand:SI 2 "reg_or_0_operand" "rJ") ;; expected value ++ (match_operand:SI 3 "reg_or_0_operand" "rJ")] ;; desired value ++ UNSPEC_COMPARE_AND_SWAP_SUBWORD)) ++ (match_operand:SI 4 "register_operand" "rI") ;; mask ++ (match_operand:SI 5 "register_operand" "rI") ;; not_mask ++ (clobber (match_scratch:SI 6 "=&r"))] ;; tmp_1 ++ "TARGET_ATOMIC && TARGET_INLINE_SUBWORD_ATOMIC" ++ { ++ return "1:\;" ++ "lr.w.aq\t%0, %1\;" ++ "and\t%6, %0, %4\;" ++ "bne\t%6, %z2, 1f\;" ++ "and\t%6, %0, %5\;" ++ "or\t%6, %6, %3\;" ++ "sc.w.rl\t%6, %6, %1\;" ++ "bnez\t%6, 1b\;" ++ "1:"; ++ } ++ [(set (attr "length") (const_int 28))]) ++ + (define_expand "atomic_test_and_set" + [(match_operand:QI 0 "register_operand" "") ;; bool output + (match_operand:QI 1 "memory_operand" "+A") ;; memory +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index cb83dd8a1..ff8cd032f 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -1210,7 +1210,8 @@ See RS/6000 and PowerPC Options. + -malign-data=@var{type} @gol + -mbig-endian -mlittle-endian @gol + -mstack-protector-guard=@var{guard} -mstack-protector-guard-reg=@var{reg} @gol +--mstack-protector-guard-offset=@var{offset}} ++-mstack-protector-guard-offset=@var{offset} ++-minline-atomics -mno-inline-atomics} + + @emph{RL78 Options} + @gccoptlist{-msim -mmul=none -mmul=g13 -mmul=g14 -mallregs @gol +@@ -28035,6 +28036,13 @@ Do or don't use smaller but slower prologue and epilogue code that uses + library function calls. The default is to use fast inline prologues and + epilogues. + ++@opindex minline-atomics ++@item -minline-atomics ++@itemx -mno-inline-atomics ++Do or don't use smaller but slower subword atomic emulation code that uses ++libatomic function calls. The default is to use fast inline subword atomics ++that do not require libatomic. ++ + @item -mshorten-memrefs + @itemx -mno-shorten-memrefs + @opindex mshorten-memrefs +diff --git a/gcc/testsuite/gcc.target/riscv/inline-atomics-1.c b/gcc/testsuite/gcc.target/riscv/inline-atomics-1.c +new file mode 100644 +index 000000000..5c5623d9b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-1.c +@@ -0,0 +1,18 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mno-inline-atomics" } */ ++/* { dg-message "note: '__sync_fetch_and_nand' changed semantics in GCC 4.4" "fetch_and_nand" { target *-*-* } 0 } */ ++/* { dg-final { scan-assembler "\tcall\t__sync_fetch_and_add_1" } } */ ++/* { dg-final { scan-assembler "\tcall\t__sync_fetch_and_nand_1" } } */ ++/* { dg-final { scan-assembler "\tcall\t__sync_bool_compare_and_swap_1" } } */ ++ ++char foo; ++char bar; ++char baz; ++ ++int ++main () ++{ ++ __sync_fetch_and_add(&foo, 1); ++ __sync_fetch_and_nand(&bar, 1); ++ __sync_bool_compare_and_swap (&baz, 1, 2); ++} +diff --git a/gcc/testsuite/gcc.target/riscv/inline-atomics-2.c b/gcc/testsuite/gcc.target/riscv/inline-atomics-2.c +new file mode 100644 +index 000000000..01b439086 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-2.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* Verify that subword atomics do not generate calls. */ ++/* { dg-options "-minline-atomics" } */ ++/* { dg-message "note: '__sync_fetch_and_nand' changed semantics in GCC 4.4" "fetch_and_nand" { target *-*-* } 0 } */ ++/* { dg-final { scan-assembler-not "\tcall\t__sync_fetch_and_add_1" } } */ ++/* { dg-final { scan-assembler-not "\tcall\t__sync_fetch_and_nand_1" } } */ ++/* { dg-final { scan-assembler-not "\tcall\t__sync_bool_compare_and_swap_1" } } */ ++ ++#include "inline-atomics-1.c" +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.target/riscv/inline-atomics-3.c b/gcc/testsuite/gcc.target/riscv/inline-atomics-3.c +new file mode 100644 +index 000000000..709f37343 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-3.c +@@ -0,0 +1,569 @@ ++/* Check all char alignments. */ ++/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-op-1.c */ ++/* Test __atomic routines for existence and proper execution on 1 byte ++ values with each valid memory model. */ ++/* { dg-do run } */ ++/* { dg-options "-minline-atomics -Wno-address-of-packed-member" } */ ++ ++/* Test the execution of the __atomic_*OP builtin routines for a char. */ ++ ++extern void abort(void); ++ ++char count, res; ++const char init = ~0; ++ ++struct A ++{ ++ char a; ++ char b; ++ char c; ++ char d; ++} __attribute__ ((packed)) A; ++ ++/* The fetch_op routines return the original value before the operation. */ ++ ++void ++test_fetch_add (char* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ if (__atomic_fetch_add (v, count, __ATOMIC_RELAXED) != 0) ++ abort (); ++ ++ if (__atomic_fetch_add (v, 1, __ATOMIC_CONSUME) != 1) ++ abort (); ++ ++ if (__atomic_fetch_add (v, count, __ATOMIC_ACQUIRE) != 2) ++ abort (); ++ ++ if (__atomic_fetch_add (v, 1, __ATOMIC_RELEASE) != 3) ++ abort (); ++ ++ if (__atomic_fetch_add (v, count, __ATOMIC_ACQ_REL) != 4) ++ abort (); ++ ++ if (__atomic_fetch_add (v, 1, __ATOMIC_SEQ_CST) != 5) ++ abort (); ++} ++ ++ ++void ++test_fetch_sub (char* v) ++{ ++ *v = res = 20; ++ count = 0; ++ ++ if (__atomic_fetch_sub (v, count + 1, __ATOMIC_RELAXED) != res--) ++ abort (); ++ ++ if (__atomic_fetch_sub (v, 1, __ATOMIC_CONSUME) != res--) ++ abort (); ++ ++ if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQUIRE) != res--) ++ abort (); ++ ++ if (__atomic_fetch_sub (v, 1, __ATOMIC_RELEASE) != res--) ++ abort (); ++ ++ if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQ_REL) != res--) ++ abort (); ++ ++ if (__atomic_fetch_sub (v, 1, __ATOMIC_SEQ_CST) != res--) ++ abort (); ++} ++ ++void ++test_fetch_and (char* v) ++{ ++ *v = init; ++ ++ if (__atomic_fetch_and (v, 0, __ATOMIC_RELAXED) != init) ++ abort (); ++ ++ if (__atomic_fetch_and (v, init, __ATOMIC_CONSUME) != 0) ++ abort (); ++ ++ if (__atomic_fetch_and (v, 0, __ATOMIC_ACQUIRE) != 0) ++ abort (); ++ ++ *v = ~*v; ++ if (__atomic_fetch_and (v, init, __ATOMIC_RELEASE) != init) ++ abort (); ++ ++ if (__atomic_fetch_and (v, 0, __ATOMIC_ACQ_REL) != init) ++ abort (); ++ ++ if (__atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST) != 0) ++ abort (); ++} ++ ++void ++test_fetch_nand (char* v) ++{ ++ *v = init; ++ ++ if (__atomic_fetch_nand (v, 0, __ATOMIC_RELAXED) != init) ++ abort (); ++ ++ if (__atomic_fetch_nand (v, init, __ATOMIC_CONSUME) != init) ++ abort (); ++ ++ if (__atomic_fetch_nand (v, 0, __ATOMIC_ACQUIRE) != 0 ) ++ abort (); ++ ++ if (__atomic_fetch_nand (v, init, __ATOMIC_RELEASE) != init) ++ abort (); ++ ++ if (__atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL) != 0) ++ abort (); ++ ++ if (__atomic_fetch_nand (v, 0, __ATOMIC_SEQ_CST) != init) ++ abort (); ++} ++ ++void ++test_fetch_xor (char* v) ++{ ++ *v = init; ++ count = 0; ++ ++ if (__atomic_fetch_xor (v, count, __ATOMIC_RELAXED) != init) ++ abort (); ++ ++ if (__atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME) != init) ++ abort (); ++ ++ if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQUIRE) != 0) ++ abort (); ++ ++ if (__atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE) != 0) ++ abort (); ++ ++ if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL) != init) ++ abort (); ++ ++ if (__atomic_fetch_xor (v, ~count, __ATOMIC_SEQ_CST) != init) ++ abort (); ++} ++ ++void ++test_fetch_or (char* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ if (__atomic_fetch_or (v, count, __ATOMIC_RELAXED) != 0) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_fetch_or (v, 2, __ATOMIC_CONSUME) != 1) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_fetch_or (v, count, __ATOMIC_ACQUIRE) != 3) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_fetch_or (v, 8, __ATOMIC_RELEASE) != 7) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_fetch_or (v, count, __ATOMIC_ACQ_REL) != 15) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_fetch_or (v, count, __ATOMIC_SEQ_CST) != 31) ++ abort (); ++} ++ ++/* The OP_fetch routines return the new value after the operation. */ ++ ++void ++test_add_fetch (char* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ if (__atomic_add_fetch (v, count, __ATOMIC_RELAXED) != 1) ++ abort (); ++ ++ if (__atomic_add_fetch (v, 1, __ATOMIC_CONSUME) != 2) ++ abort (); ++ ++ if (__atomic_add_fetch (v, count, __ATOMIC_ACQUIRE) != 3) ++ abort (); ++ ++ if (__atomic_add_fetch (v, 1, __ATOMIC_RELEASE) != 4) ++ abort (); ++ ++ if (__atomic_add_fetch (v, count, __ATOMIC_ACQ_REL) != 5) ++ abort (); ++ ++ if (__atomic_add_fetch (v, count, __ATOMIC_SEQ_CST) != 6) ++ abort (); ++} ++ ++ ++void ++test_sub_fetch (char* v) ++{ ++ *v = res = 20; ++ count = 0; ++ ++ if (__atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED) != --res) ++ abort (); ++ ++ if (__atomic_sub_fetch (v, 1, __ATOMIC_CONSUME) != --res) ++ abort (); ++ ++ if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQUIRE) != --res) ++ abort (); ++ ++ if (__atomic_sub_fetch (v, 1, __ATOMIC_RELEASE) != --res) ++ abort (); ++ ++ if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL) != --res) ++ abort (); ++ ++ if (__atomic_sub_fetch (v, count + 1, __ATOMIC_SEQ_CST) != --res) ++ abort (); ++} ++ ++void ++test_and_fetch (char* v) ++{ ++ *v = init; ++ ++ if (__atomic_and_fetch (v, 0, __ATOMIC_RELAXED) != 0) ++ abort (); ++ ++ *v = init; ++ if (__atomic_and_fetch (v, init, __ATOMIC_CONSUME) != init) ++ abort (); ++ ++ if (__atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE) != 0) ++ abort (); ++ ++ *v = ~*v; ++ if (__atomic_and_fetch (v, init, __ATOMIC_RELEASE) != init) ++ abort (); ++ ++ if (__atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL) != 0) ++ abort (); ++ ++ *v = ~*v; ++ if (__atomic_and_fetch (v, 0, __ATOMIC_SEQ_CST) != 0) ++ abort (); ++} ++ ++void ++test_nand_fetch (char* v) ++{ ++ *v = init; ++ ++ if (__atomic_nand_fetch (v, 0, __ATOMIC_RELAXED) != init) ++ abort (); ++ ++ if (__atomic_nand_fetch (v, init, __ATOMIC_CONSUME) != 0) ++ abort (); ++ ++ if (__atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE) != init) ++ abort (); ++ ++ if (__atomic_nand_fetch (v, init, __ATOMIC_RELEASE) != 0) ++ abort (); ++ ++ if (__atomic_nand_fetch (v, init, __ATOMIC_ACQ_REL) != init) ++ abort (); ++ ++ if (__atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST) != init) ++ abort (); ++} ++ ++ ++ ++void ++test_xor_fetch (char* v) ++{ ++ *v = init; ++ count = 0; ++ ++ if (__atomic_xor_fetch (v, count, __ATOMIC_RELAXED) != init) ++ abort (); ++ ++ if (__atomic_xor_fetch (v, ~count, __ATOMIC_CONSUME) != 0) ++ abort (); ++ ++ if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE) != 0) ++ abort (); ++ ++ if (__atomic_xor_fetch (v, ~count, __ATOMIC_RELEASE) != init) ++ abort (); ++ ++ if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQ_REL) != init) ++ abort (); ++ ++ if (__atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST) != 0) ++ abort (); ++} ++ ++void ++test_or_fetch (char* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ if (__atomic_or_fetch (v, count, __ATOMIC_RELAXED) != 1) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_or_fetch (v, 2, __ATOMIC_CONSUME) != 3) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_or_fetch (v, count, __ATOMIC_ACQUIRE) != 7) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_or_fetch (v, 8, __ATOMIC_RELEASE) != 15) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_or_fetch (v, count, __ATOMIC_ACQ_REL) != 31) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_or_fetch (v, count, __ATOMIC_SEQ_CST) != 63) ++ abort (); ++} ++ ++ ++/* Test the OP routines with a result which isn't used. Use both variations ++ within each function. */ ++ ++void ++test_add (char* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ __atomic_add_fetch (v, count, __ATOMIC_RELAXED); ++ if (*v != 1) ++ abort (); ++ ++ __atomic_fetch_add (v, count, __ATOMIC_CONSUME); ++ if (*v != 2) ++ abort (); ++ ++ __atomic_add_fetch (v, 1 , __ATOMIC_ACQUIRE); ++ if (*v != 3) ++ abort (); ++ ++ __atomic_fetch_add (v, 1, __ATOMIC_RELEASE); ++ if (*v != 4) ++ abort (); ++ ++ __atomic_add_fetch (v, count, __ATOMIC_ACQ_REL); ++ if (*v != 5) ++ abort (); ++ ++ __atomic_fetch_add (v, count, __ATOMIC_SEQ_CST); ++ if (*v != 6) ++ abort (); ++} ++ ++ ++void ++test_sub (char* v) ++{ ++ *v = res = 20; ++ count = 0; ++ ++ __atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED); ++ if (*v != --res) ++ abort (); ++ ++ __atomic_fetch_sub (v, count + 1, __ATOMIC_CONSUME); ++ if (*v != --res) ++ abort (); ++ ++ __atomic_sub_fetch (v, 1, __ATOMIC_ACQUIRE); ++ if (*v != --res) ++ abort (); ++ ++ __atomic_fetch_sub (v, 1, __ATOMIC_RELEASE); ++ if (*v != --res) ++ abort (); ++ ++ __atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL); ++ if (*v != --res) ++ abort (); ++ ++ __atomic_fetch_sub (v, count + 1, __ATOMIC_SEQ_CST); ++ if (*v != --res) ++ abort (); ++} ++ ++void ++test_and (char* v) ++{ ++ *v = init; ++ ++ __atomic_and_fetch (v, 0, __ATOMIC_RELAXED); ++ if (*v != 0) ++ abort (); ++ ++ *v = init; ++ __atomic_fetch_and (v, init, __ATOMIC_CONSUME); ++ if (*v != init) ++ abort (); ++ ++ __atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE); ++ if (*v != 0) ++ abort (); ++ ++ *v = ~*v; ++ __atomic_fetch_and (v, init, __ATOMIC_RELEASE); ++ if (*v != init) ++ abort (); ++ ++ __atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL); ++ if (*v != 0) ++ abort (); ++ ++ *v = ~*v; ++ __atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST); ++ if (*v != 0) ++ abort (); ++} ++ ++void ++test_nand (char* v) ++{ ++ *v = init; ++ ++ __atomic_fetch_nand (v, 0, __ATOMIC_RELAXED); ++ if (*v != init) ++ abort (); ++ ++ __atomic_fetch_nand (v, init, __ATOMIC_CONSUME); ++ if (*v != 0) ++ abort (); ++ ++ __atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE); ++ if (*v != init) ++ abort (); ++ ++ __atomic_nand_fetch (v, init, __ATOMIC_RELEASE); ++ if (*v != 0) ++ abort (); ++ ++ __atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL); ++ if (*v != init) ++ abort (); ++ ++ __atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST); ++ if (*v != init) ++ abort (); ++} ++ ++ ++ ++void ++test_xor (char* v) ++{ ++ *v = init; ++ count = 0; ++ ++ __atomic_xor_fetch (v, count, __ATOMIC_RELAXED); ++ if (*v != init) ++ abort (); ++ ++ __atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME); ++ if (*v != 0) ++ abort (); ++ ++ __atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE); ++ if (*v != 0) ++ abort (); ++ ++ __atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE); ++ if (*v != init) ++ abort (); ++ ++ __atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL); ++ if (*v != init) ++ abort (); ++ ++ __atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST); ++ if (*v != 0) ++ abort (); ++} ++ ++void ++test_or (char* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ __atomic_or_fetch (v, count, __ATOMIC_RELAXED); ++ if (*v != 1) ++ abort (); ++ ++ count *= 2; ++ __atomic_fetch_or (v, count, __ATOMIC_CONSUME); ++ if (*v != 3) ++ abort (); ++ ++ count *= 2; ++ __atomic_or_fetch (v, 4, __ATOMIC_ACQUIRE); ++ if (*v != 7) ++ abort (); ++ ++ count *= 2; ++ __atomic_fetch_or (v, 8, __ATOMIC_RELEASE); ++ if (*v != 15) ++ abort (); ++ ++ count *= 2; ++ __atomic_or_fetch (v, count, __ATOMIC_ACQ_REL); ++ if (*v != 31) ++ abort (); ++ ++ count *= 2; ++ __atomic_fetch_or (v, count, __ATOMIC_SEQ_CST); ++ if (*v != 63) ++ abort (); ++} ++ ++int ++main () ++{ ++ char* V[] = {&A.a, &A.b, &A.c, &A.d}; ++ ++ for (int i = 0; i < 4; i++) { ++ test_fetch_add (V[i]); ++ test_fetch_sub (V[i]); ++ test_fetch_and (V[i]); ++ test_fetch_nand (V[i]); ++ test_fetch_xor (V[i]); ++ test_fetch_or (V[i]); ++ ++ test_add_fetch (V[i]); ++ test_sub_fetch (V[i]); ++ test_and_fetch (V[i]); ++ test_nand_fetch (V[i]); ++ test_xor_fetch (V[i]); ++ test_or_fetch (V[i]); ++ ++ test_add (V[i]); ++ test_sub (V[i]); ++ test_and (V[i]); ++ test_nand (V[i]); ++ test_xor (V[i]); ++ test_or (V[i]); ++ } ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/riscv/inline-atomics-4.c b/gcc/testsuite/gcc.target/riscv/inline-atomics-4.c +new file mode 100644 +index 000000000..eecfaae5c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-4.c +@@ -0,0 +1,566 @@ ++/* Check all short alignments. */ ++/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-op-2.c */ ++/* Test __atomic routines for existence and proper execution on 2 byte ++ values with each valid memory model. */ ++/* { dg-do run } */ ++/* { dg-options "-minline-atomics -Wno-address-of-packed-member" } */ ++ ++/* Test the execution of the __atomic_*OP builtin routines for a short. */ ++ ++extern void abort(void); ++ ++short count, res; ++const short init = ~0; ++ ++struct A ++{ ++ short a; ++ short b; ++} __attribute__ ((packed)) A; ++ ++/* The fetch_op routines return the original value before the operation. */ ++ ++void ++test_fetch_add (short* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ if (__atomic_fetch_add (v, count, __ATOMIC_RELAXED) != 0) ++ abort (); ++ ++ if (__atomic_fetch_add (v, 1, __ATOMIC_CONSUME) != 1) ++ abort (); ++ ++ if (__atomic_fetch_add (v, count, __ATOMIC_ACQUIRE) != 2) ++ abort (); ++ ++ if (__atomic_fetch_add (v, 1, __ATOMIC_RELEASE) != 3) ++ abort (); ++ ++ if (__atomic_fetch_add (v, count, __ATOMIC_ACQ_REL) != 4) ++ abort (); ++ ++ if (__atomic_fetch_add (v, 1, __ATOMIC_SEQ_CST) != 5) ++ abort (); ++} ++ ++ ++void ++test_fetch_sub (short* v) ++{ ++ *v = res = 20; ++ count = 0; ++ ++ if (__atomic_fetch_sub (v, count + 1, __ATOMIC_RELAXED) != res--) ++ abort (); ++ ++ if (__atomic_fetch_sub (v, 1, __ATOMIC_CONSUME) != res--) ++ abort (); ++ ++ if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQUIRE) != res--) ++ abort (); ++ ++ if (__atomic_fetch_sub (v, 1, __ATOMIC_RELEASE) != res--) ++ abort (); ++ ++ if (__atomic_fetch_sub (v, count + 1, __ATOMIC_ACQ_REL) != res--) ++ abort (); ++ ++ if (__atomic_fetch_sub (v, 1, __ATOMIC_SEQ_CST) != res--) ++ abort (); ++} ++ ++void ++test_fetch_and (short* v) ++{ ++ *v = init; ++ ++ if (__atomic_fetch_and (v, 0, __ATOMIC_RELAXED) != init) ++ abort (); ++ ++ if (__atomic_fetch_and (v, init, __ATOMIC_CONSUME) != 0) ++ abort (); ++ ++ if (__atomic_fetch_and (v, 0, __ATOMIC_ACQUIRE) != 0) ++ abort (); ++ ++ *v = ~*v; ++ if (__atomic_fetch_and (v, init, __ATOMIC_RELEASE) != init) ++ abort (); ++ ++ if (__atomic_fetch_and (v, 0, __ATOMIC_ACQ_REL) != init) ++ abort (); ++ ++ if (__atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST) != 0) ++ abort (); ++} ++ ++void ++test_fetch_nand (short* v) ++{ ++ *v = init; ++ ++ if (__atomic_fetch_nand (v, 0, __ATOMIC_RELAXED) != init) ++ abort (); ++ ++ if (__atomic_fetch_nand (v, init, __ATOMIC_CONSUME) != init) ++ abort (); ++ ++ if (__atomic_fetch_nand (v, 0, __ATOMIC_ACQUIRE) != 0 ) ++ abort (); ++ ++ if (__atomic_fetch_nand (v, init, __ATOMIC_RELEASE) != init) ++ abort (); ++ ++ if (__atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL) != 0) ++ abort (); ++ ++ if (__atomic_fetch_nand (v, 0, __ATOMIC_SEQ_CST) != init) ++ abort (); ++} ++ ++void ++test_fetch_xor (short* v) ++{ ++ *v = init; ++ count = 0; ++ ++ if (__atomic_fetch_xor (v, count, __ATOMIC_RELAXED) != init) ++ abort (); ++ ++ if (__atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME) != init) ++ abort (); ++ ++ if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQUIRE) != 0) ++ abort (); ++ ++ if (__atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE) != 0) ++ abort (); ++ ++ if (__atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL) != init) ++ abort (); ++ ++ if (__atomic_fetch_xor (v, ~count, __ATOMIC_SEQ_CST) != init) ++ abort (); ++} ++ ++void ++test_fetch_or (short* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ if (__atomic_fetch_or (v, count, __ATOMIC_RELAXED) != 0) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_fetch_or (v, 2, __ATOMIC_CONSUME) != 1) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_fetch_or (v, count, __ATOMIC_ACQUIRE) != 3) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_fetch_or (v, 8, __ATOMIC_RELEASE) != 7) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_fetch_or (v, count, __ATOMIC_ACQ_REL) != 15) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_fetch_or (v, count, __ATOMIC_SEQ_CST) != 31) ++ abort (); ++} ++ ++/* The OP_fetch routines return the new value after the operation. */ ++ ++void ++test_add_fetch (short* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ if (__atomic_add_fetch (v, count, __ATOMIC_RELAXED) != 1) ++ abort (); ++ ++ if (__atomic_add_fetch (v, 1, __ATOMIC_CONSUME) != 2) ++ abort (); ++ ++ if (__atomic_add_fetch (v, count, __ATOMIC_ACQUIRE) != 3) ++ abort (); ++ ++ if (__atomic_add_fetch (v, 1, __ATOMIC_RELEASE) != 4) ++ abort (); ++ ++ if (__atomic_add_fetch (v, count, __ATOMIC_ACQ_REL) != 5) ++ abort (); ++ ++ if (__atomic_add_fetch (v, count, __ATOMIC_SEQ_CST) != 6) ++ abort (); ++} ++ ++ ++void ++test_sub_fetch (short* v) ++{ ++ *v = res = 20; ++ count = 0; ++ ++ if (__atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED) != --res) ++ abort (); ++ ++ if (__atomic_sub_fetch (v, 1, __ATOMIC_CONSUME) != --res) ++ abort (); ++ ++ if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQUIRE) != --res) ++ abort (); ++ ++ if (__atomic_sub_fetch (v, 1, __ATOMIC_RELEASE) != --res) ++ abort (); ++ ++ if (__atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL) != --res) ++ abort (); ++ ++ if (__atomic_sub_fetch (v, count + 1, __ATOMIC_SEQ_CST) != --res) ++ abort (); ++} ++ ++void ++test_and_fetch (short* v) ++{ ++ *v = init; ++ ++ if (__atomic_and_fetch (v, 0, __ATOMIC_RELAXED) != 0) ++ abort (); ++ ++ *v = init; ++ if (__atomic_and_fetch (v, init, __ATOMIC_CONSUME) != init) ++ abort (); ++ ++ if (__atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE) != 0) ++ abort (); ++ ++ *v = ~*v; ++ if (__atomic_and_fetch (v, init, __ATOMIC_RELEASE) != init) ++ abort (); ++ ++ if (__atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL) != 0) ++ abort (); ++ ++ *v = ~*v; ++ if (__atomic_and_fetch (v, 0, __ATOMIC_SEQ_CST) != 0) ++ abort (); ++} ++ ++void ++test_nand_fetch (short* v) ++{ ++ *v = init; ++ ++ if (__atomic_nand_fetch (v, 0, __ATOMIC_RELAXED) != init) ++ abort (); ++ ++ if (__atomic_nand_fetch (v, init, __ATOMIC_CONSUME) != 0) ++ abort (); ++ ++ if (__atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE) != init) ++ abort (); ++ ++ if (__atomic_nand_fetch (v, init, __ATOMIC_RELEASE) != 0) ++ abort (); ++ ++ if (__atomic_nand_fetch (v, init, __ATOMIC_ACQ_REL) != init) ++ abort (); ++ ++ if (__atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST) != init) ++ abort (); ++} ++ ++ ++ ++void ++test_xor_fetch (short* v) ++{ ++ *v = init; ++ count = 0; ++ ++ if (__atomic_xor_fetch (v, count, __ATOMIC_RELAXED) != init) ++ abort (); ++ ++ if (__atomic_xor_fetch (v, ~count, __ATOMIC_CONSUME) != 0) ++ abort (); ++ ++ if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE) != 0) ++ abort (); ++ ++ if (__atomic_xor_fetch (v, ~count, __ATOMIC_RELEASE) != init) ++ abort (); ++ ++ if (__atomic_xor_fetch (v, 0, __ATOMIC_ACQ_REL) != init) ++ abort (); ++ ++ if (__atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST) != 0) ++ abort (); ++} ++ ++void ++test_or_fetch (short* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ if (__atomic_or_fetch (v, count, __ATOMIC_RELAXED) != 1) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_or_fetch (v, 2, __ATOMIC_CONSUME) != 3) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_or_fetch (v, count, __ATOMIC_ACQUIRE) != 7) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_or_fetch (v, 8, __ATOMIC_RELEASE) != 15) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_or_fetch (v, count, __ATOMIC_ACQ_REL) != 31) ++ abort (); ++ ++ count *= 2; ++ if (__atomic_or_fetch (v, count, __ATOMIC_SEQ_CST) != 63) ++ abort (); ++} ++ ++ ++/* Test the OP routines with a result which isn't used. Use both variations ++ within each function. */ ++ ++void ++test_add (short* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ __atomic_add_fetch (v, count, __ATOMIC_RELAXED); ++ if (*v != 1) ++ abort (); ++ ++ __atomic_fetch_add (v, count, __ATOMIC_CONSUME); ++ if (*v != 2) ++ abort (); ++ ++ __atomic_add_fetch (v, 1 , __ATOMIC_ACQUIRE); ++ if (*v != 3) ++ abort (); ++ ++ __atomic_fetch_add (v, 1, __ATOMIC_RELEASE); ++ if (*v != 4) ++ abort (); ++ ++ __atomic_add_fetch (v, count, __ATOMIC_ACQ_REL); ++ if (*v != 5) ++ abort (); ++ ++ __atomic_fetch_add (v, count, __ATOMIC_SEQ_CST); ++ if (*v != 6) ++ abort (); ++} ++ ++ ++void ++test_sub (short* v) ++{ ++ *v = res = 20; ++ count = 0; ++ ++ __atomic_sub_fetch (v, count + 1, __ATOMIC_RELAXED); ++ if (*v != --res) ++ abort (); ++ ++ __atomic_fetch_sub (v, count + 1, __ATOMIC_CONSUME); ++ if (*v != --res) ++ abort (); ++ ++ __atomic_sub_fetch (v, 1, __ATOMIC_ACQUIRE); ++ if (*v != --res) ++ abort (); ++ ++ __atomic_fetch_sub (v, 1, __ATOMIC_RELEASE); ++ if (*v != --res) ++ abort (); ++ ++ __atomic_sub_fetch (v, count + 1, __ATOMIC_ACQ_REL); ++ if (*v != --res) ++ abort (); ++ ++ __atomic_fetch_sub (v, count + 1, __ATOMIC_SEQ_CST); ++ if (*v != --res) ++ abort (); ++} ++ ++void ++test_and (short* v) ++{ ++ *v = init; ++ ++ __atomic_and_fetch (v, 0, __ATOMIC_RELAXED); ++ if (*v != 0) ++ abort (); ++ ++ *v = init; ++ __atomic_fetch_and (v, init, __ATOMIC_CONSUME); ++ if (*v != init) ++ abort (); ++ ++ __atomic_and_fetch (v, 0, __ATOMIC_ACQUIRE); ++ if (*v != 0) ++ abort (); ++ ++ *v = ~*v; ++ __atomic_fetch_and (v, init, __ATOMIC_RELEASE); ++ if (*v != init) ++ abort (); ++ ++ __atomic_and_fetch (v, 0, __ATOMIC_ACQ_REL); ++ if (*v != 0) ++ abort (); ++ ++ *v = ~*v; ++ __atomic_fetch_and (v, 0, __ATOMIC_SEQ_CST); ++ if (*v != 0) ++ abort (); ++} ++ ++void ++test_nand (short* v) ++{ ++ *v = init; ++ ++ __atomic_fetch_nand (v, 0, __ATOMIC_RELAXED); ++ if (*v != init) ++ abort (); ++ ++ __atomic_fetch_nand (v, init, __ATOMIC_CONSUME); ++ if (*v != 0) ++ abort (); ++ ++ __atomic_nand_fetch (v, 0, __ATOMIC_ACQUIRE); ++ if (*v != init) ++ abort (); ++ ++ __atomic_nand_fetch (v, init, __ATOMIC_RELEASE); ++ if (*v != 0) ++ abort (); ++ ++ __atomic_fetch_nand (v, init, __ATOMIC_ACQ_REL); ++ if (*v != init) ++ abort (); ++ ++ __atomic_nand_fetch (v, 0, __ATOMIC_SEQ_CST); ++ if (*v != init) ++ abort (); ++} ++ ++ ++ ++void ++test_xor (short* v) ++{ ++ *v = init; ++ count = 0; ++ ++ __atomic_xor_fetch (v, count, __ATOMIC_RELAXED); ++ if (*v != init) ++ abort (); ++ ++ __atomic_fetch_xor (v, ~count, __ATOMIC_CONSUME); ++ if (*v != 0) ++ abort (); ++ ++ __atomic_xor_fetch (v, 0, __ATOMIC_ACQUIRE); ++ if (*v != 0) ++ abort (); ++ ++ __atomic_fetch_xor (v, ~count, __ATOMIC_RELEASE); ++ if (*v != init) ++ abort (); ++ ++ __atomic_fetch_xor (v, 0, __ATOMIC_ACQ_REL); ++ if (*v != init) ++ abort (); ++ ++ __atomic_xor_fetch (v, ~count, __ATOMIC_SEQ_CST); ++ if (*v != 0) ++ abort (); ++} ++ ++void ++test_or (short* v) ++{ ++ *v = 0; ++ count = 1; ++ ++ __atomic_or_fetch (v, count, __ATOMIC_RELAXED); ++ if (*v != 1) ++ abort (); ++ ++ count *= 2; ++ __atomic_fetch_or (v, count, __ATOMIC_CONSUME); ++ if (*v != 3) ++ abort (); ++ ++ count *= 2; ++ __atomic_or_fetch (v, 4, __ATOMIC_ACQUIRE); ++ if (*v != 7) ++ abort (); ++ ++ count *= 2; ++ __atomic_fetch_or (v, 8, __ATOMIC_RELEASE); ++ if (*v != 15) ++ abort (); ++ ++ count *= 2; ++ __atomic_or_fetch (v, count, __ATOMIC_ACQ_REL); ++ if (*v != 31) ++ abort (); ++ ++ count *= 2; ++ __atomic_fetch_or (v, count, __ATOMIC_SEQ_CST); ++ if (*v != 63) ++ abort (); ++} ++ ++int ++main () { ++ short* V[] = {&A.a, &A.b}; ++ ++ for (int i = 0; i < 2; i++) { ++ test_fetch_add (V[i]); ++ test_fetch_sub (V[i]); ++ test_fetch_and (V[i]); ++ test_fetch_nand (V[i]); ++ test_fetch_xor (V[i]); ++ test_fetch_or (V[i]); ++ ++ test_add_fetch (V[i]); ++ test_sub_fetch (V[i]); ++ test_and_fetch (V[i]); ++ test_nand_fetch (V[i]); ++ test_xor_fetch (V[i]); ++ test_or_fetch (V[i]); ++ ++ test_add (V[i]); ++ test_sub (V[i]); ++ test_and (V[i]); ++ test_nand (V[i]); ++ test_xor (V[i]); ++ test_or (V[i]); ++ } ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/riscv/inline-atomics-5.c b/gcc/testsuite/gcc.target/riscv/inline-atomics-5.c +new file mode 100644 +index 000000000..52093894a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-5.c +@@ -0,0 +1,87 @@ ++/* Test __atomic routines for existence and proper execution on 1 byte ++ values with each valid memory model. */ ++/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-compare-exchange-1.c */ ++/* { dg-do run } */ ++/* { dg-options "-minline-atomics" } */ ++ ++/* Test the execution of the __atomic_compare_exchange_n builtin for a char. */ ++ ++extern void abort(void); ++ ++char v = 0; ++char expected = 0; ++char max = ~0; ++char desired = ~0; ++char zero = 0; ++ ++#define STRONG 0 ++#define WEAK 1 ++ ++int ++main () ++{ ++ ++ if (!__atomic_compare_exchange_n (&v, &expected, max, STRONG , __ATOMIC_RELAXED, __ATOMIC_RELAXED)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ ++ if (__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) ++ abort (); ++ if (expected != max) ++ abort (); ++ ++ if (!__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE)) ++ abort (); ++ if (expected != max) ++ abort (); ++ if (v != 0) ++ abort (); ++ ++ if (__atomic_compare_exchange_n (&v, &expected, desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ ++ if (!__atomic_compare_exchange_n (&v, &expected, desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ if (v != max) ++ abort (); ++ ++ /* Now test the generic version. */ ++ ++ v = 0; ++ ++ if (!__atomic_compare_exchange (&v, &expected, &max, STRONG, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ ++ if (__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) ++ abort (); ++ if (expected != max) ++ abort (); ++ ++ if (!__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE)) ++ abort (); ++ if (expected != max) ++ abort (); ++ if (v != 0) ++ abort (); ++ ++ if (__atomic_compare_exchange (&v, &expected, &desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ ++ if (!__atomic_compare_exchange (&v, &expected, &desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ if (v != max) ++ abort (); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/riscv/inline-atomics-6.c b/gcc/testsuite/gcc.target/riscv/inline-atomics-6.c +new file mode 100644 +index 000000000..8fee8c448 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-6.c +@@ -0,0 +1,87 @@ ++/* Test __atomic routines for existence and proper execution on 2 byte ++ values with each valid memory model. */ ++/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-compare-exchange-2.c */ ++/* { dg-do run } */ ++/* { dg-options "-minline-atomics" } */ ++ ++/* Test the execution of the __atomic_compare_exchange_n builtin for a short. */ ++ ++extern void abort(void); ++ ++short v = 0; ++short expected = 0; ++short max = ~0; ++short desired = ~0; ++short zero = 0; ++ ++#define STRONG 0 ++#define WEAK 1 ++ ++int ++main () ++{ ++ ++ if (!__atomic_compare_exchange_n (&v, &expected, max, STRONG , __ATOMIC_RELAXED, __ATOMIC_RELAXED)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ ++ if (__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) ++ abort (); ++ if (expected != max) ++ abort (); ++ ++ if (!__atomic_compare_exchange_n (&v, &expected, 0, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE)) ++ abort (); ++ if (expected != max) ++ abort (); ++ if (v != 0) ++ abort (); ++ ++ if (__atomic_compare_exchange_n (&v, &expected, desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ ++ if (!__atomic_compare_exchange_n (&v, &expected, desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ if (v != max) ++ abort (); ++ ++ /* Now test the generic version. */ ++ ++ v = 0; ++ ++ if (!__atomic_compare_exchange (&v, &expected, &max, STRONG, __ATOMIC_RELAXED, __ATOMIC_RELAXED)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ ++ if (__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) ++ abort (); ++ if (expected != max) ++ abort (); ++ ++ if (!__atomic_compare_exchange (&v, &expected, &zero, STRONG , __ATOMIC_RELEASE, __ATOMIC_ACQUIRE)) ++ abort (); ++ if (expected != max) ++ abort (); ++ if (v != 0) ++ abort (); ++ ++ if (__atomic_compare_exchange (&v, &expected, &desired, WEAK, __ATOMIC_ACQ_REL, __ATOMIC_ACQUIRE)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ ++ if (!__atomic_compare_exchange (&v, &expected, &desired, STRONG , __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST)) ++ abort (); ++ if (expected != 0) ++ abort (); ++ if (v != max) ++ abort (); ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/riscv/inline-atomics-7.c b/gcc/testsuite/gcc.target/riscv/inline-atomics-7.c +new file mode 100644 +index 000000000..24c344c0c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-7.c +@@ -0,0 +1,69 @@ ++/* Test __atomic routines for existence and proper execution on 1 byte ++ values with each valid memory model. */ ++/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-exchange-1.c */ ++/* { dg-do run } */ ++/* { dg-options "-minline-atomics" } */ ++ ++/* Test the execution of the __atomic_exchange_n builtin for a char. */ ++ ++extern void abort(void); ++ ++char v, count, ret; ++ ++int ++main () ++{ ++ v = 0; ++ count = 0; ++ ++ if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELAXED) != count) ++ abort (); ++ count++; ++ ++ if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQUIRE) != count) ++ abort (); ++ count++; ++ ++ if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELEASE) != count) ++ abort (); ++ count++; ++ ++ if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQ_REL) != count) ++ abort (); ++ count++; ++ ++ if (__atomic_exchange_n (&v, count + 1, __ATOMIC_SEQ_CST) != count) ++ abort (); ++ count++; ++ ++ /* Now test the generic version. */ ++ ++ count++; ++ ++ __atomic_exchange (&v, &count, &ret, __ATOMIC_RELAXED); ++ if (ret != count - 1 || v != count) ++ abort (); ++ count++; ++ ++ __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQUIRE); ++ if (ret != count - 1 || v != count) ++ abort (); ++ count++; ++ ++ __atomic_exchange (&v, &count, &ret, __ATOMIC_RELEASE); ++ if (ret != count - 1 || v != count) ++ abort (); ++ count++; ++ ++ __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQ_REL); ++ if (ret != count - 1 || v != count) ++ abort (); ++ count++; ++ ++ __atomic_exchange (&v, &count, &ret, __ATOMIC_SEQ_CST); ++ if (ret != count - 1 || v != count) ++ abort (); ++ count++; ++ ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.target/riscv/inline-atomics-8.c b/gcc/testsuite/gcc.target/riscv/inline-atomics-8.c +new file mode 100644 +index 000000000..edc212df0 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/riscv/inline-atomics-8.c +@@ -0,0 +1,69 @@ ++/* Test __atomic routines for existence and proper execution on 2 byte ++ values with each valid memory model. */ ++/* Duplicate logic as libatomic/testsuite/libatomic.c/atomic-exchange-2.c */ ++/* { dg-do run } */ ++/* { dg-options "-minline-atomics" } */ ++ ++/* Test the execution of the __atomic_X builtin for a short. */ ++ ++extern void abort(void); ++ ++short v, count, ret; ++ ++int ++main () ++{ ++ v = 0; ++ count = 0; ++ ++ if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELAXED) != count) ++ abort (); ++ count++; ++ ++ if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQUIRE) != count) ++ abort (); ++ count++; ++ ++ if (__atomic_exchange_n (&v, count + 1, __ATOMIC_RELEASE) != count) ++ abort (); ++ count++; ++ ++ if (__atomic_exchange_n (&v, count + 1, __ATOMIC_ACQ_REL) != count) ++ abort (); ++ count++; ++ ++ if (__atomic_exchange_n (&v, count + 1, __ATOMIC_SEQ_CST) != count) ++ abort (); ++ count++; ++ ++ /* Now test the generic version. */ ++ ++ count++; ++ ++ __atomic_exchange (&v, &count, &ret, __ATOMIC_RELAXED); ++ if (ret != count - 1 || v != count) ++ abort (); ++ count++; ++ ++ __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQUIRE); ++ if (ret != count - 1 || v != count) ++ abort (); ++ count++; ++ ++ __atomic_exchange (&v, &count, &ret, __ATOMIC_RELEASE); ++ if (ret != count - 1 || v != count) ++ abort (); ++ count++; ++ ++ __atomic_exchange (&v, &count, &ret, __ATOMIC_ACQ_REL); ++ if (ret != count - 1 || v != count) ++ abort (); ++ count++; ++ ++ __atomic_exchange (&v, &count, &ret, __ATOMIC_SEQ_CST); ++ if (ret != count - 1 || v != count) ++ abort (); ++ count++; ++ ++ return 0; ++} +diff --git a/libgcc/config/riscv/atomic.c b/libgcc/config/riscv/atomic.c +index 7007e7a20..a29909b97 100644 +--- a/libgcc/config/riscv/atomic.c ++++ b/libgcc/config/riscv/atomic.c +@@ -30,6 +30,8 @@ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see + #define INVERT "not %[tmp1], %[tmp1]\n\t" + #define DONT_INVERT "" + ++/* Logic duplicated in gcc/gcc/config/riscv/sync.md for use when inlining is enabled */ ++ + #define GENERATE_FETCH_AND_OP(type, size, opname, insn, invert, cop) \ + type __sync_fetch_and_ ## opname ## _ ## size (type *p, type v) \ + { \ +-- +2.33.0 + diff --git a/0002-Sw64-Port-add-gcc-compiler.patch b/0002-Sw64-Port-add-gcc-compiler.patch new file mode 100644 index 0000000000000000000000000000000000000000..3432307f050fe3b06a182063580a20c62047b46e --- /dev/null +++ b/0002-Sw64-Port-add-gcc-compiler.patch @@ -0,0 +1,22899 @@ +From 98573a01187b7eadd9d632c8ac3c536e50f4cb97 Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:30:44 +0800 +Subject: [PATCH 02/16] Sw64 Port: add gcc compiler + +--- + gcc/ada/libgnarl/s-linux__sw_64.ads | 138 + + gcc/ada/libgnat/system-linux-sw_64.ads | 146 + + gcc/auto-inc-dec.cc | 15 +- + gcc/common/config/sw_64/sw_64-common.cc | 115 + + gcc/config/sw_64/constraints.md | 123 + + gcc/config/sw_64/driver-sw_64.cc | 105 + + gcc/config/sw_64/elf.h | 190 + + gcc/config/sw_64/elf.opt | 29 + + gcc/config/sw_64/freebsd.h | 69 + + gcc/config/sw_64/gnu-user.h | 177 + + gcc/config/sw_64/linux-elf.h | 54 + + gcc/config/sw_64/linux.h | 109 + + gcc/config/sw_64/netbsd.h | 68 + + gcc/config/sw_64/openbsd.h | 75 + + gcc/config/sw_64/predicates.md | 653 ++ + gcc/config/sw_64/sw6.md | 180 + + gcc/config/sw_64/sw8.md | 181 + + gcc/config/sw_64/sw_64-modes.def | 27 + + gcc/config/sw_64/sw_64-passes.def | 21 + + gcc/config/sw_64/sw_64-protos.h | 152 + + gcc/config/sw_64/sw_64.cc | 10076 ++++++++++++++++++++++ + gcc/config/sw_64/sw_64.h | 1001 +++ + gcc/config/sw_64/sw_64.md | 7814 +++++++++++++++++ + gcc/config/sw_64/sw_64.opt | 326 + + gcc/config/sw_64/sync.md | 495 ++ + gcc/config/sw_64/t-linux | 1 + + gcc/config/sw_64/t-sw_64 | 19 + + gcc/config/sw_64/x-sw_64 | 3 + + gcc/emit-rtl.cc | 35 + + gcc/explow.cc | 4 + + gcc/final.cc | 12 + + gcc/flags.h | 4 + + gcc/fortran/interface.cc | 11 +- + gcc/gcc.cc | 6 + + gcc/gimple-match-head.cc | 5 + + gcc/optabs.cc | 10 + + include/longlong.h | 59 + + 37 files changed, 22506 insertions(+), 2 deletions(-) + create mode 100644 gcc/ada/libgnarl/s-linux__sw_64.ads + create mode 100644 gcc/ada/libgnat/system-linux-sw_64.ads + create mode 100644 gcc/common/config/sw_64/sw_64-common.cc + create mode 100644 gcc/config/sw_64/constraints.md + create mode 100644 gcc/config/sw_64/driver-sw_64.cc + create mode 100644 gcc/config/sw_64/elf.h + create mode 100644 gcc/config/sw_64/elf.opt + create mode 100644 gcc/config/sw_64/freebsd.h + create mode 100644 gcc/config/sw_64/gnu-user.h + create mode 100644 gcc/config/sw_64/linux-elf.h + create mode 100644 gcc/config/sw_64/linux.h + create mode 100644 gcc/config/sw_64/netbsd.h + create mode 100644 gcc/config/sw_64/openbsd.h + create mode 100644 gcc/config/sw_64/predicates.md + create mode 100644 gcc/config/sw_64/sw6.md + create mode 100644 gcc/config/sw_64/sw8.md + create mode 100644 gcc/config/sw_64/sw_64-modes.def + create mode 100644 gcc/config/sw_64/sw_64-passes.def + create mode 100644 gcc/config/sw_64/sw_64-protos.h + create mode 100644 gcc/config/sw_64/sw_64.cc + create mode 100644 gcc/config/sw_64/sw_64.h + create mode 100644 gcc/config/sw_64/sw_64.md + create mode 100644 gcc/config/sw_64/sw_64.opt + create mode 100644 gcc/config/sw_64/sync.md + create mode 100644 gcc/config/sw_64/t-linux + create mode 100644 gcc/config/sw_64/t-sw_64 + create mode 100644 gcc/config/sw_64/x-sw_64 + +diff --git a/gcc/ada/libgnarl/s-linux__sw_64.ads b/gcc/ada/libgnarl/s-linux__sw_64.ads +new file mode 100644 +index 000000000..b63d4cc46 +--- /dev/null ++++ b/gcc/ada/libgnarl/s-linux__sw_64.ads +@@ -0,0 +1,138 @@ ++------------------------------------------------------------------------------ ++-- -- ++-- GNU ADA RUN-TIME LIBRARY (GNARL) COMPONENTS -- ++-- -- ++-- S Y S T E M . L I N U X -- ++-- -- ++-- S p e c -- ++-- -- ++-- Copyright (C) 2009-2022, Free Software Foundation, Inc. -- ++-- -- ++-- GNARL is free software; you can redistribute it and/or modify it under -- ++-- terms of the GNU General Public License as published by the Free Soft- -- ++-- ware Foundation; either version 3, or (at your option) any later ver- -- ++-- sion. GNAT is distributed in the hope that it will be useful, but WITH- -- ++-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -- ++-- or FITNESS FOR A PARTICULAR PURPOSE. -- ++-- -- ++-- As a special exception under Section 7 of GPL version 3, you are granted -- ++-- additional permissions described in the GCC Runtime Library Exception, -- ++-- version 3.1, as published by the Free Software Foundation. -- ++-- -- ++-- You should have received a copy of the GNU General Public License and -- ++-- a copy of the GCC Runtime Library Exception along with this program; -- ++-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -- ++-- . -- ++-- -- ++-- -- ++------------------------------------------------------------------------------ ++ ++-- This is the alpha version of this package ++ ++-- This package encapsulates cpu specific differences between implementations ++-- of GNU/Linux, in order to share s-osinte-linux.ads. ++ ++-- PLEASE DO NOT add any with-clauses to this package or remove the pragma ++-- Preelaborate. This package is designed to be a bottom-level (leaf) package. ++ ++with Interfaces.C; ++with System.Parameters; ++ ++package System.Linux is ++ pragma Preelaborate; ++ ++ ---------- ++ -- Time -- ++ ---------- ++ ++ subtype long is Interfaces.C.long; ++ subtype suseconds_t is Interfaces.C.long; ++ type time_t is range -2 ** (System.Parameters.time_t_bits - 1) ++ .. 2 ** (System.Parameters.time_t_bits - 1) - 1; ++ subtype clockid_t is Interfaces.C.int; ++ ++ type timespec is record ++ tv_sec : time_t; ++ tv_nsec : long; ++ end record; ++ pragma Convention (C, timespec); ++ ++ type timeval is record ++ tv_sec : time_t; ++ tv_usec : suseconds_t; ++ end record; ++ pragma Convention (C, timeval); ++ ++ ----------- ++ -- Errno -- ++ ----------- ++ ++ EAGAIN : constant := 35; ++ EINTR : constant := 4; ++ EINVAL : constant := 22; ++ ENOMEM : constant := 12; ++ EPERM : constant := 1; ++ ETIMEDOUT : constant := 60; ++ ++ ------------- ++ -- Signals -- ++ ------------- ++ ++ SIGHUP : constant := 1; -- hangup ++ SIGINT : constant := 2; -- interrupt (rubout) ++ SIGQUIT : constant := 3; -- quit (ASCD FS) ++ SIGILL : constant := 4; -- illegal instruction (not reset) ++ SIGTRAP : constant := 5; -- trace trap (not reset) ++ SIGIOT : constant := 6; -- IOT instruction ++ SIGABRT : constant := 6; -- used by abort, replace SIGIOT in the future ++ SIGFPE : constant := 8; -- floating point exception ++ SIGKILL : constant := 9; -- kill (cannot be caught or ignored) ++ SIGBUS : constant := 10; -- bus error ++ SIGSEGV : constant := 11; -- segmentation violation ++ SIGSYS : constant := 12; -- bad system call ++ SIGPIPE : constant := 13; -- write on a pipe with no one to read it ++ SIGALRM : constant := 14; -- alarm clock ++ SIGTERM : constant := 15; -- software termination signal from kill ++ SIGURG : constant := 16; -- urgent condition on IO channel ++ SIGSTOP : constant := 17; -- stop (cannot be caught or ignored) ++ SIGTSTP : constant := 18; -- user stop requested from tty ++ SIGCONT : constant := 19; -- stopped process has been continued ++ SIGCLD : constant := 20; -- alias for SIGCHLD ++ SIGCHLD : constant := 20; -- child status change ++ SIGTTIN : constant := 21; -- background tty read attempted ++ SIGTTOU : constant := 22; -- background tty write attempted ++ SIGIO : constant := 23; -- I/O now possible (4.2 BSD) ++ SIGPOLL : constant := 23; -- pollable event occurred ++ SIGXCPU : constant := 24; -- CPU time limit exceeded ++ SIGXFSZ : constant := 25; -- filesize limit exceeded ++ SIGVTALRM : constant := 26; -- virtual timer expired ++ SIGPROF : constant := 27; -- profiling timer expired ++ SIGWINCH : constant := 28; -- window size change ++ SIGPWR : constant := 29; -- power-fail restart ++ SIGUSR1 : constant := 30; -- user defined signal 1 ++ SIGUSR2 : constant := 31; -- user defined signal 2 ++ ++ SIG32 : constant := 32; -- glibc internal signal ++ SIG33 : constant := 33; -- glibc internal signal ++ SIG34 : constant := 34; -- glibc internal signal ++ ++ SIGADAABORT : constant := SIGABRT; ++ -- Change this if you want to use another signal for task abort. ++ -- SIGTERM might be a good one. ++ ++ SIGUNUSED : constant := 0; ++ SIGSTKFLT : constant := 0; ++ SIGLOST : constant := 0; ++ -- These don't exist for Linux/Alpha. The constants are present ++ -- so that we can continue to use a-intnam-linux.ads. ++ ++ -- struct_sigaction offsets ++ ++ sa_handler_pos : constant := 0; ++ sa_mask_pos : constant := Standard'Address_Size / 8; ++ sa_flags_pos : constant := 128 + sa_mask_pos; ++ ++ SA_SIGINFO : constant := 16#40#; ++ SA_ONSTACK : constant := 16#01#; ++ ++end System.Linux; +diff --git a/gcc/ada/libgnat/system-linux-sw_64.ads b/gcc/ada/libgnat/system-linux-sw_64.ads +new file mode 100644 +index 000000000..b6f15501e +--- /dev/null ++++ b/gcc/ada/libgnat/system-linux-sw_64.ads +@@ -0,0 +1,146 @@ ++------------------------------------------------------------------------------ ++-- -- ++-- GNAT RUN-TIME COMPONENTS -- ++-- -- ++-- S Y S T E M -- ++-- -- ++-- S p e c -- ++-- (GNU-Linux/alpha Version) -- ++-- -- ++-- Copyright (C) 1992-2022, Free Software Foundation, Inc. -- ++-- -- ++-- This specification is derived from the Ada Reference Manual for use with -- ++-- GNAT. The copyright notice above, and the license provisions that follow -- ++-- apply solely to the contents of the part following the private keyword. -- ++-- -- ++-- GNAT is free software; you can redistribute it and/or modify it under -- ++-- terms of the GNU General Public License as published by the Free Soft- -- ++-- ware Foundation; either version 3, or (at your option) any later ver- -- ++-- sion. GNAT is distributed in the hope that it will be useful, but WITH- -- ++-- OUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -- ++-- or FITNESS FOR A PARTICULAR PURPOSE. -- ++-- -- ++-- As a special exception under Section 7 of GPL version 3, you are granted -- ++-- additional permissions described in the GCC Runtime Library Exception, -- ++-- version 3.1, as published by the Free Software Foundation. -- ++-- -- ++-- You should have received a copy of the GNU General Public License and -- ++-- a copy of the GCC Runtime Library Exception along with this program; -- ++-- see the files COPYING3 and COPYING.RUNTIME respectively. If not, see -- ++-- . -- ++-- -- ++-- GNAT was originally developed by the GNAT team at New York University. -- ++-- Extensive contributions were provided by Ada Core Technologies Inc. -- ++-- -- ++------------------------------------------------------------------------------ ++ ++package System is ++ pragma Pure; ++ -- Note that we take advantage of the implementation permission to make ++ -- this unit Pure instead of Preelaborable; see RM 13.7.1(15). In Ada ++ -- 2005, this is Pure in any case (AI-362). ++ ++ pragma No_Elaboration_Code_All; ++ -- Allow the use of that restriction in units that WITH this unit ++ ++ type Name is (SYSTEM_NAME_GNAT); ++ System_Name : constant Name := SYSTEM_NAME_GNAT; ++ ++ -- System-Dependent Named Numbers ++ ++ Min_Int : constant := -2 ** (Standard'Max_Integer_Size - 1); ++ Max_Int : constant := 2 ** (Standard'Max_Integer_Size - 1) - 1; ++ ++ Max_Binary_Modulus : constant := 2 ** Standard'Max_Integer_Size; ++ Max_Nonbinary_Modulus : constant := 2 ** Integer'Size - 1; ++ ++ Max_Base_Digits : constant := Long_Long_Float'Digits; ++ Max_Digits : constant := Long_Long_Float'Digits; ++ ++ Max_Mantissa : constant := Standard'Max_Integer_Size - 1; ++ Fine_Delta : constant := 2.0 ** (-Max_Mantissa); ++ ++ Tick : constant := 1.0 / 1024.0; ++ ++ -- Storage-related Declarations ++ ++ type Address is private; ++ pragma Preelaborable_Initialization (Address); ++ Null_Address : constant Address; ++ ++ Storage_Unit : constant := 8; ++ Word_Size : constant := 64; ++ Memory_Size : constant := 2 ** 64; ++ ++ -- Address comparison ++ ++ function "<" (Left, Right : Address) return Boolean; ++ function "<=" (Left, Right : Address) return Boolean; ++ function ">" (Left, Right : Address) return Boolean; ++ function ">=" (Left, Right : Address) return Boolean; ++ function "=" (Left, Right : Address) return Boolean; ++ ++ pragma Import (Intrinsic, "<"); ++ pragma Import (Intrinsic, "<="); ++ pragma Import (Intrinsic, ">"); ++ pragma Import (Intrinsic, ">="); ++ pragma Import (Intrinsic, "="); ++ ++ -- Other System-Dependent Declarations ++ ++ type Bit_Order is (High_Order_First, Low_Order_First); ++ Default_Bit_Order : constant Bit_Order := Low_Order_First; ++ pragma Warnings (Off, Default_Bit_Order); -- kill constant condition warning ++ ++ -- Priority-related Declarations (RM D.1) ++ ++ Max_Priority : constant Positive := 30; ++ Max_Interrupt_Priority : constant Positive := 31; ++ ++ subtype Any_Priority is Integer range 0 .. 31; ++ subtype Priority is Any_Priority range 0 .. 30; ++ subtype Interrupt_Priority is Any_Priority range 31 .. 31; ++ ++ Default_Priority : constant Priority := 15; ++ ++private ++ ++ type Address is mod Memory_Size; ++ Null_Address : constant Address := 0; ++ ++ -------------------------------------- ++ -- System Implementation Parameters -- ++ -------------------------------------- ++ ++ -- These parameters provide information about the target that is used ++ -- by the compiler. They are in the private part of System, where they ++ -- can be accessed using the special circuitry in the Targparm unit ++ -- whose source should be consulted for more detailed descriptions ++ -- of the individual switch values. ++ ++ Backend_Divide_Checks : constant Boolean := False; ++ Backend_Overflow_Checks : constant Boolean := True; ++ Command_Line_Args : constant Boolean := True; ++ Configurable_Run_Time : constant Boolean := False; ++ Denorm : constant Boolean := True; ++ Duration_32_Bits : constant Boolean := False; ++ Exit_Status_Supported : constant Boolean := True; ++ Machine_Overflows : constant Boolean := False; ++ Machine_Rounds : constant Boolean := True; ++ Preallocated_Stacks : constant Boolean := False; ++ Signed_Zeros : constant Boolean := True; ++ Stack_Check_Default : constant Boolean := False; ++ Stack_Check_Probes : constant Boolean := True; ++ Stack_Check_Limits : constant Boolean := False; ++ Support_Aggregates : constant Boolean := True; ++ Support_Atomic_Primitives : constant Boolean := True; ++ Support_Composite_Assign : constant Boolean := True; ++ Support_Composite_Compare : constant Boolean := True; ++ Support_Long_Shifts : constant Boolean := True; ++ Always_Compatible_Rep : constant Boolean := False; ++ Suppress_Standard_Library : constant Boolean := False; ++ Use_Ada_Main_Program_Name : constant Boolean := False; ++ Frontend_Exceptions : constant Boolean := False; ++ ZCX_By_Default : constant Boolean := True; ++ ++end System; +diff --git a/gcc/auto-inc-dec.cc b/gcc/auto-inc-dec.cc +index 85fe9d149..277eda070 100644 +--- a/gcc/auto-inc-dec.cc ++++ b/gcc/auto-inc-dec.cc +@@ -892,6 +892,10 @@ parse_add_or_inc (rtx_insn *insn, bool before_mem) + inc_insn.reg1_val = -INTVAL (XEXP (SET_SRC (pat), 1)); + inc_insn.reg1 = GEN_INT (inc_insn.reg1_val); + } ++#ifdef FLAG_SW64_INC_DEC ++ if (inc_insn.reg1_val > 2047 || inc_insn.reg1_val < -2048) ++ return false; ++#endif + return true; + } + else if ((HAVE_PRE_MODIFY_REG || HAVE_POST_MODIFY_REG) +@@ -1369,6 +1373,10 @@ find_mem (rtx *address_of_x) + mem_insn.reg1_is_const = true; + /* Match with *(reg0 + c) where c is a const. */ + mem_insn.reg1_val = INTVAL (reg1); ++#ifdef FLAG_SW64_INC_DEC ++ if (mem_insn.reg1_val > 2047 || mem_insn.reg1_val < -2048) ++ return false; ++#endif + if (find_inc (true)) + return true; + } +@@ -1696,8 +1704,13 @@ public: + { + if (!AUTO_INC_DEC) + return false; +- ++#ifdef FLAG_SW64_INC_DEC ++ return ((optimize > 0 && flag_auto_inc_dec && (!TARGET_SW8A)) ++ || (optimize > 0 && flag_auto_inc_dec && (TARGET_SW8A) ++ && flag_sw_auto_inc_dec)); ++#else + return (optimize > 0 && flag_auto_inc_dec); ++#endif + } + + +diff --git a/gcc/common/config/sw_64/sw_64-common.cc b/gcc/common/config/sw_64/sw_64-common.cc +new file mode 100644 +index 000000000..b3d34dad5 +--- /dev/null ++++ b/gcc/common/config/sw_64/sw_64-common.cc +@@ -0,0 +1,115 @@ ++/* Common hooks for Sw64. ++ Copyright (C) 1992-2022 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "diagnostic-core.h" ++#include "tm.h" ++#include "common/common-target.h" ++#include "common/common-target-def.h" ++#include "opts.h" ++#include "flags.h" ++ ++int flag_fpcr_set; ++ ++/* Implement TARGET_OPTION_OPTIMIZATION_TABLE. */ ++static const struct default_options sw_64_option_optimization_table[] = { ++ /* Enable redundant extension instructions removal at -O2 and higher. */ ++ {OPT_LEVELS_2_PLUS, OPT_free, NULL, 1}, ++ {OPT_LEVELS_NONE, 0, NULL, 0}}; ++ ++/* Implement TARGET_OPTION_INIT_STRUCT. */ ++ ++static void ++sw_64_option_init_struct (struct gcc_options *opts ATTRIBUTE_UNUSED) ++{ ++ opts->x_target_flags |= MASK_IEEE; ++ global_options.x_flag_prefetch_loop_arrays = 1; ++} ++ ++/* Implement TARGET_HANDLE_OPTION. */ ++ ++static bool ++sw_64_handle_option (struct gcc_options *opts, ++ struct gcc_options *opts_set ATTRIBUTE_UNUSED, ++ const struct cl_decoded_option *decoded, location_t loc) ++{ ++ size_t code = decoded->opt_index; ++ const char *arg = decoded->arg; ++ int value = decoded->value; ++ ++ switch (code) ++ { ++ case OPT_mfp_regs: ++ if (value == 0) ++ opts->x_target_flags |= MASK_SOFT_FP; ++ break; ++ ++ case OPT_mieee: ++ case OPT_mieee_with_inexact: ++ /* add mieee for sw_64 */ ++ case OPT_mieee_main: ++ if (code == OPT_mieee) ++ flag_fpcr_set = 1; ++ else if (code == OPT_mieee_with_inexact) ++ flag_fpcr_set = 3; ++ else if (code == OPT_mieee_main) ++ flag_fpcr_set = 4; ++ opts->x_target_flags |= MASK_IEEE_CONFORMANT; ++ break; ++ ++ case OPT_mtls_size_: ++ if (value != 16 && value != 32 && value != 64) ++ error_at (loc, "bad value %qs for %<-mtls-size%> switch", arg); ++ break; ++ ++ case OPT_mtls_tlsgd_: ++ if (value != 16 && value != 32) ++ error_at (loc, "bad value %qs for -mtls-tlsgd switch", arg); ++ break; ++ ++ case OPT_mtls_tlsldm_: ++ if (value != 16 && value != 32) ++ error_at (loc, "bad value %qs for -mtls-tlsldm switch", arg); ++ break; ++ ++ // 2021-12-06 add by wangw, to support mgprel-size option ++ case OPT_mgprel_size_: ++ if (value != 16 && value != 32) ++ error_at (loc, "bad value %qs for -mgprel-size switch", arg); ++ break; ++ } ++ ++ return true; ++} ++ ++#undef TARGET_DEFAULT_TARGET_FLAGS ++#define TARGET_DEFAULT_TARGET_FLAGS \ ++ (TARGET_DEFAULT | TARGET_CPU_DEFAULT | TARGET_DEFAULT_EXPLICIT_RELOCS) ++#undef TARGET_HANDLE_OPTION ++#define TARGET_HANDLE_OPTION sw_64_handle_option ++ ++#undef TARGET_OPTION_INIT_STRUCT ++#define TARGET_OPTION_INIT_STRUCT sw_64_option_init_struct ++ ++#undef TARGET_OPTION_OPTIMIZATION_TABLE ++#define TARGET_OPTION_OPTIMIZATION_TABLE sw_64_option_optimization_table ++ ++struct gcc_targetm_common targetm_common = TARGETM_COMMON_INITIALIZER; +diff --git a/gcc/config/sw_64/constraints.md b/gcc/config/sw_64/constraints.md +new file mode 100644 +index 000000000..6d06f0e13 +--- /dev/null ++++ b/gcc/config/sw_64/constraints.md +@@ -0,0 +1,123 @@ ++;; Constraint definitions for Sw64. ++;; Copyright (C) 2007-2022 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++;;; Unused letters: ++;;; ABCDEF H V YZ ++;;; de ghijkl pq tu wxyz ++ ++;; Integer register constraints. ++ ++(define_register_constraint "a" "R24_REG" ++ "General register 24, input to division routine") ++ ++(define_register_constraint "b" "R25_REG" ++ "General register 24, input to division routine") ++ ++(define_register_constraint "c" "R27_REG" ++ "General register 27, function call address") ++ ++(define_register_constraint "f" "TARGET_FPREGS ? FLOAT_REGS : NO_REGS" ++ "Any floating-point register") ++ ++(define_register_constraint "v" "R0_REG" ++ "General register 0, function value return address") ++ ++(define_memory_constraint "w" ++ "A memory whose address is only a register" ++ (match_operand 0 "mem_noofs_operand")) ++ ++;; Integer constant constraints. ++(define_constraint "I" ++ "An unsigned 8 bit constant" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (ival, 0, 255)"))) ++ ++(define_constraint "J" ++ "The constant zero" ++ (and (match_code "const_int") ++ (match_test "ival == 0"))) ++ ++(define_constraint "K" ++ "Signed 16-bit integer constant" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (ival, -32768, 32767)"))) ++ ++(define_constraint "L" ++ "A shifted signed 16-bit constant appropriate for LDAH" ++ (and (match_code "const_int") ++ (match_test "(ival & 0xffff) == 0 ++ && (ival >> 31 == -1 || ival >> 31 == 0)"))) ++ ++(define_constraint "M" ++ "A valid operand of a ZAP insn" ++ (and (match_code "const_int") ++ (match_test "zap_mask (ival) != 0"))) ++ ++(define_constraint "N" ++ "A complemented unsigned 8-bit constant" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (~ival, 0, 255)"))) ++ ++(define_constraint "O" ++ "A negated unsigned 8-bit constant" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (-ival, 0, 255)"))) ++ ++(define_constraint "P" ++ "The constant 1, 2 or 3" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (ival, 1, 3)"))) ++ ++;; Floating-point constant constraints. ++(define_constraint "G" ++ "The floating point zero constant" ++ (and (match_code "const_double") ++ (match_test "op == CONST0_RTX (mode)"))) ++ ++;; "Extra" constraints. ++ ++;; A memory location that is not a reference ++;; (using an AND) to an unaligned location. ++(define_memory_constraint "Q" ++ "@internal A normal_memory_operand" ++ (and (match_code "mem") ++ (not (match_code "and" "0")))) ++ ++(define_constraint "R" ++ "@internal A direct_call_operand" ++ (match_operand:DI 0 "direct_call_operand")) ++ ++(define_constraint "S" ++ "An unsigned 6-bit constant" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (ival, 0, 63)"))) ++ ++(define_constraint "T" ++ "@internal A high-part symbol" ++ (match_code "high")) ++ ++(define_constraint "W" ++ "A vector zero constant" ++ (and (match_code "const_vector") ++ (match_test "op == CONST0_RTX (mode)"))) ++ ++(define_constraint "Y" ++ "An unsigned 5-bit constant" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (ival, 0, 31)"))) +diff --git a/gcc/config/sw_64/driver-sw_64.cc b/gcc/config/sw_64/driver-sw_64.cc +new file mode 100644 +index 000000000..a3e50aba1 +--- /dev/null ++++ b/gcc/config/sw_64/driver-sw_64.cc +@@ -0,0 +1,105 @@ ++/* Subroutines for the gcc driver. ++ Copyright (C) 2009-2022 Free Software Foundation, Inc. ++ Contributed by Arthur Loiret ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#define IN_TARGET_CODE 1 ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++ ++/* Chip family type IDs, returned by implver instruction. */ ++#define IMPLVER_SW6_FAMILY 2 /* SW6 */ ++#define IMPLVER_SW8_FAMILY 4 /* SW8 */ ++ ++/* Bit defines for amask instruction. */ ++#define AMASK_BWX 0x1 /* byte/word extension. */ ++#define AMASK_FIX \ ++ 0x2 /* sqrt and f <-> i conversions \ ++ extension. */ ++#define AMASK_CIX 0x4 /* count extension. */ ++#define AMASK_MVI 0x100 /* multimedia extension. */ ++#define AMASK_PRECISE 0x200 /* Precise arithmetic traps. */ ++#define AMASK_LOCKPFTCHOK \ ++ 0x1000 /* Safe to prefetch lock cache \ ++ block. */ ++#define AMASK_SW6A (1U << 16) ++#define AMASK_SW6B (1U << 17) ++#define AMASK_SW4D (1U << 18) ++#define AMASK_SW8A (1U << 19) ++ ++/* This will be called by the spec parser in gcc.c when it sees ++ a %:local_cpu_detect(args) construct. Currently it will be called ++ with either "cpu" or "tune" as argument depending on if -mcpu=native ++ or -mtune=native is to be substituted. ++ ++ It returns a string containing new command line parameters to be ++ put at the place of the above two options, depending on what CPU ++ this is executed. E.g. "-mcpu=sw6" on an Sw_64 for ++ -mcpu=native. If the routine can't detect a known processor, ++ the -mcpu or -mtune option is discarded. ++ ++ ARGC and ARGV are set depending on the actual arguments given ++ in the spec. */ ++const char * ++host_detect_local_cpu (int argc, const char **argv) ++{ ++ static const struct cpu_types ++ { ++ long implver; ++ long amask; ++ const char *const cpu; ++ } cpu_types[] = {{IMPLVER_SW6_FAMILY, ++ AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_SW6B, "sw6b"}, ++ {IMPLVER_SW6_FAMILY, ++ AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_SW6A, "sw6a"}, ++ {IMPLVER_SW6_FAMILY, ++ AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_SW4D, "sw4d"}, ++ {IMPLVER_SW8_FAMILY, ++ AMASK_BWX | AMASK_FIX | AMASK_CIX | AMASK_SW8A, "sw8a"}, ++ {0, 0, NULL}}; ++ long implver; ++ long amask; ++ const char *cpu; ++ int i; ++ ++ if (argc < 1) ++ return NULL; ++ ++ if (strcmp (argv[0], "cpu") && strcmp (argv[0], "tune")) ++ return NULL; ++ ++ implver = __builtin_sw_64_implver (); ++ amask = __builtin_sw_64_amask (~0L); ++ cpu = NULL; ++ ++ for (i = 0; cpu_types[i].cpu != NULL; i++) ++ if (implver == cpu_types[i].implver ++ && (~amask & cpu_types[i].amask) == cpu_types[i].amask) ++ { ++ cpu = cpu_types[i].cpu; ++ break; ++ } ++ ++ if (cpu == NULL) ++ return NULL; ++ ++ return concat ("-m", argv[0], "=", cpu, NULL); ++} +diff --git a/gcc/config/sw_64/elf.h b/gcc/config/sw_64/elf.h +new file mode 100644 +index 000000000..d3809508b +--- /dev/null ++++ b/gcc/config/sw_64/elf.h +@@ -0,0 +1,190 @@ ++/* Definitions of target machine for GNU compiler, for Sw64 w/ELF. ++ Copyright (C) 1996-2022 Free Software Foundation, Inc. ++ Contributed by Richard Henderson (rth@tamu.edu). ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#undef CC1_SPEC ++#define CC1_SPEC "%{G*}" ++ ++#undef ASM_SPEC ++#define ASM_SPEC "%{G*} %{relax:-relax} %{mcpu=*:-m%*}" ++ ++/* Do not output a .file directive at the beginning of the input file. */ ++ ++#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE ++#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true ++ ++/* This is how to output an assembler line ++ that says to advance the location counter ++ to a multiple of 2**LOG bytes. */ ++ ++#define ASM_OUTPUT_ALIGN(FILE, LOG) \ ++ if ((LOG) != 0) \ ++ fprintf (FILE, "\t.align %d\n", LOG); ++ ++/* This says how to output assembler code to declare an ++ uninitialized internal linkage data object. Under SVR4, ++ the linker seems to want the alignment of data objects ++ to depend on their types. We do exactly that here. */ ++ ++#undef ASM_OUTPUT_ALIGNED_LOCAL ++#define ASM_OUTPUT_ALIGNED_LOCAL(FILE, NAME, SIZE, ALIGN) \ ++ do \ ++ { \ ++ if ((SIZE) <= (unsigned HOST_WIDE_INT) g_switch_value) \ ++ switch_to_section (sbss_section); \ ++ else \ ++ switch_to_section (bss_section); \ ++ ASM_OUTPUT_TYPE_DIRECTIVE (FILE, NAME, "object"); \ ++ if (!flag_inhibit_size_directive) \ ++ ASM_OUTPUT_SIZE_DIRECTIVE (FILE, NAME, SIZE); \ ++ ASM_OUTPUT_ALIGN ((FILE), exact_log2 ((ALIGN) / BITS_PER_UNIT)); \ ++ ASM_OUTPUT_LABEL (FILE, NAME); \ ++ ASM_OUTPUT_SKIP ((FILE), (SIZE) ? (SIZE) : 1); \ ++ } while (0) ++ ++/* This says how to output assembler code to declare an ++ uninitialized external linkage data object. */ ++ ++#undef ASM_OUTPUT_ALIGNED_BSS ++#define ASM_OUTPUT_ALIGNED_BSS(FILE, DECL, NAME, SIZE, ALIGN) \ ++ do \ ++ { \ ++ ASM_OUTPUT_ALIGNED_LOCAL (FILE, NAME, SIZE, ALIGN); \ ++ } while (0) ++ ++#undef BSS_SECTION_ASM_OP ++#define BSS_SECTION_ASM_OP "\t.section\t.bss" ++#undef SBSS_SECTION_ASM_OP ++#define SBSS_SECTION_ASM_OP "\t.section\t.sbss,\"aw\"" ++#undef SDATA_SECTION_ASM_OP ++#define SDATA_SECTION_ASM_OP "\t.section\t.sdata,\"aw\"" ++ ++/* This is how we tell the assembler that two symbols have the same value. */ ++ ++#undef ASM_OUTPUT_DEF ++#define ASM_OUTPUT_DEF(FILE, ALIAS, NAME) \ ++ do \ ++ { \ ++ assemble_name (FILE, ALIAS); \ ++ fputs (" = ", FILE); \ ++ assemble_name (FILE, NAME); \ ++ fputc ('\n', FILE); \ ++ } while (0) ++ ++#undef ASM_OUTPUT_DEF_FROM_DECLS ++#define ASM_OUTPUT_DEF_FROM_DECLS(FILE, DECL, TARGET) \ ++ do \ ++ { \ ++ const char *alias = XSTR (XEXP (DECL_RTL (DECL), 0), 0); \ ++ const char *name = IDENTIFIER_POINTER (TARGET); \ ++ if (TREE_CODE (DECL) == FUNCTION_DECL) \ ++ { \ ++ fputc ('$', FILE); \ ++ assemble_name (FILE, alias); \ ++ fputs ("..ng = $", FILE); \ ++ assemble_name (FILE, name); \ ++ fputs ("..ng\n", FILE); \ ++ } \ ++ ASM_OUTPUT_DEF (FILE, alias, name); \ ++ } while (0) ++ ++/* Provide a STARTFILE_SPEC appropriate for ELF. Here we add the ++ (even more) magical crtbegin.o file which provides part of the ++ support for getting C++ file-scope static object constructed ++ before entering `main'. */ ++ ++#undef STARTFILE_SPEC ++#ifdef HAVE_LD_PIE ++#define STARTFILE_SPEC \ ++ "%{!shared: %{pg|p:gcrt1.o%s;pie:Scrt1.o%s;:crt1.o%s}}\ ++ crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}" ++#else ++#define STARTFILE_SPEC \ ++ "%{!shared: %{pg|p:gcrt1.o%s;:crt1.o%s}}\ ++ crti.o%s %{static:crtbeginT.o%s;shared|pie:crtbeginS.o%s;:crtbegin.o%s}" ++#endif ++ ++/* Provide a ENDFILE_SPEC appropriate for ELF. Here we tack on the ++ magical crtend.o file which provides part of the support for ++ getting C++ file-scope static object constructed before entering ++ `main', followed by a normal ELF "finalizer" file, `crtn.o'. */ ++ ++#undef ENDFILE_SPEC ++#define ENDFILE_SPEC \ ++ "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \ ++ %{shared|pie:crtendS.o%s;:crtend.o%s} crtn.o%s" ++ ++/* This variable should be set to 'true' if the target ABI requires ++ unwinding tables even when exceptions are not used. */ ++#define TARGET_UNWIND_TABLES_DEFAULT true ++ ++/* Select a format to encode pointers in exception handling data. CODE ++ is 0 for data, 1 for code labels, 2 for function pointers. GLOBAL is ++ true if the symbol may be affected by dynamic relocations. ++ ++ Since application size is already constrained to <2GB by the form of ++ the ldgp relocation, we can use a 32-bit pc-relative relocation to ++ static data. Dynamic data is accessed indirectly to allow for read ++ only EH sections. */ ++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \ ++ (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4) ++ ++/* If defined, a C statement to be executed just prior to the output of ++ assembler code for INSN. */ ++#define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \ ++ (sw_64_this_literal_sequence_number = 0, \ ++ sw_64_this_gpdisp_sequence_number = 0) ++extern int sw_64_this_literal_sequence_number; ++extern int sw_64_this_gpdisp_sequence_number; ++ ++/* Since the bits of the _init and _fini function is spread across ++ many object files, each potentially with its own GP, we must assume ++ we need to load our GP. Further, the .init/.fini section can ++ easily be more than 4MB away from the function to call so we can't ++ use bsr. */ ++// jsr->call ++#ifdef __sw_64_sw8a__ ++#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ ++ asm (SECTION_OP "\n" \ ++ " addpi 0, $29\n" \ ++ " ldgp $29,0($29)\n" \ ++ " unop\n" \ ++ " call $26," USER_LABEL_PREFIX #FUNC "\n" \ ++ " .align 3\n" \ ++ " .previous"); ++#else ++#define CRT_CALL_STATIC_FUNCTION(SECTION_OP, FUNC) \ ++ asm (SECTION_OP "\n" \ ++ " br $29,1f\n" \ ++ "1: ldgp $29,0($29)\n" \ ++ " unop\n" \ ++ " call $26," USER_LABEL_PREFIX #FUNC "\n" \ ++ " .align 3\n" \ ++ " .previous"); ++#endif ++/* If we have the capability create headers for efficient EH lookup. ++ As of Jan 2002, only glibc 2.2.4 can actually make use of this, but ++ I imagine that other systems will catch up. In the meantime, it ++ doesn't harm to make sure that the data exists to be used later. */ ++#if defined(HAVE_LD_EH_FRAME_HDR) ++#define LINK_EH_SPEC "%{!static|static-pie:--eh-frame-hdr} " ++#endif ++ ++#undef ASM_DECLARE_OBJECT_NAME ++#define ASM_DECLARE_OBJECT_NAME sw_64_declare_object_name +diff --git a/gcc/config/sw_64/elf.opt b/gcc/config/sw_64/elf.opt +new file mode 100644 +index 000000000..3cdc21f4a +--- /dev/null ++++ b/gcc/config/sw_64/elf.opt +@@ -0,0 +1,29 @@ ++; Sw_64 ELF options. ++ ++; Copyright (C) 2011-2022 Free Software Foundation, Inc. ++; ++; This file is part of GCC. ++; ++; GCC is free software; you can redistribute it and/or modify it under ++; the terms of the GNU General Public License as published by the Free ++; Software Foundation; either version 3, or (at your option) any later ++; version. ++; ++; GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++; WARRANTY; without even the implied warranty of MERCHANTABILITY or ++; FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++; for more details. ++; ++; You should have received a copy of the GNU General Public License ++; along with GCC; see the file COPYING3. If not see ++; . ++ ++; See the GCC internals manual (options.texi) for a description of ++; this file's format. ++ ++; Please try to keep this file in ASCII collating order. ++ ++relax ++Driver ++ ++; This comment is to ensure we retain the blank line above. +diff --git a/gcc/config/sw_64/freebsd.h b/gcc/config/sw_64/freebsd.h +new file mode 100644 +index 000000000..a866fa817 +--- /dev/null ++++ b/gcc/config/sw_64/freebsd.h +@@ -0,0 +1,69 @@ ++/* Definitions for Sw64 running FreeBSD using the ELF format ++ Copyright (C) 2000-2022 Free Software Foundation, Inc. ++ Contributed by David E. O'Brien and BSDi. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#undef EXTRA_SPECS ++#define EXTRA_SPECS \ ++ { \ ++ "fbsd_dynamic_linker", FBSD_DYNAMIC_LINKER \ ++ } ++ ++/* Provide a CPP_SPEC appropriate for FreeBSD/sw_64 -- dealing with ++ the GCC option `-posix'. */ ++ ++#undef CPP_SPEC ++#define CPP_SPEC "%{posix:-D_POSIX_SOURCE}" ++ ++#define LINK_SPEC \ ++ "%{G*} %{relax:-relax} \ ++ %{p:%nconsider using '-pg' instead of '-p' with gprof(1)} \ ++ %{assert*} %{R*} %{rpath*} %{defsym*} \ ++ %{shared:-Bshareable %{h*} %{soname*}} \ ++ %{!shared: \ ++ %{!static: \ ++ %{rdynamic:-export-dynamic} \ ++ -dynamic-linker %(fbsd_dynamic_linker) } \ ++ %{static:-Bstatic}} \ ++ %{symbolic:-Bsymbolic}" ++ ++/************************[ Target stuff ]***********************************/ ++ ++/* Define the actual types of some ANSI-mandated types. ++ Needs to agree with . GCC defaults come from c-decl.c, ++ c-common.c, and config//.h. */ ++ ++/* sw_64.h gets this wrong for FreeBSD. We use the GCC defaults instead. */ ++#undef WCHAR_TYPE ++ ++#undef WCHAR_TYPE_SIZE ++#define WCHAR_TYPE_SIZE 32 ++ ++#define TARGET_ELF 1 ++ ++#undef HAS_INIT_SECTION ++ ++/* Show that we need a GP when profiling. */ ++#undef TARGET_PROFILING_NEEDS_GP ++#define TARGET_PROFILING_NEEDS_GP 1 ++ ++/* Don't default to pcc-struct-return, we want to retain compatibility with ++ older FreeBSD releases AND pcc-struct-return may not be reentrant. */ ++ ++#undef DEFAULT_PCC_STRUCT_RETURN ++#define DEFAULT_PCC_STRUCT_RETURN 0 +diff --git a/gcc/config/sw_64/gnu-user.h b/gcc/config/sw_64/gnu-user.h +new file mode 100644 +index 000000000..539f0573a +--- /dev/null ++++ b/gcc/config/sw_64/gnu-user.h +@@ -0,0 +1,177 @@ ++/* Definitions for systems using, at least optionally, a GNU ++ (glibc-based) userspace or other userspace with libc derived from ++ glibc (e.g. uClibc) or for which similar specs are appropriate. ++ Copyright (C) 1995-2022 Free Software Foundation, Inc. ++ Contributed by Eric Youngdale. ++ Modified for stabs-in-ELF by H.J. Lu (hjl@lucon.org). ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. ++ ++You should have received a copy of the GNU General Public License and ++a copy of the GCC Runtime Library Exception along with this program; ++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++. */ ++ ++/* Don't assume anything about the header files. */ ++//#define SYSTEM_IMPLICIT_EXTERN_C ++/* ++#undef ASM_APP_ON ++#define ASM_APP_ON "#APP\n" ++ ++#undef ASM_APP_OFF ++#define ASM_APP_OFF "#NO_APP\n" ++*/ ++#if ENABLE_OFFLOADING == 1 ++#define CRTOFFLOADBEGIN "%{fopenacc|fopenmp:crtoffloadbegin%O%s}" ++#define CRTOFFLOADEND "%{fopenacc|fopenmp:crtoffloadend%O%s}" ++#else ++#define CRTOFFLOADBEGIN "" ++#define CRTOFFLOADEND "" ++#endif ++ ++/* Provide a STARTFILE_SPEC appropriate for GNU userspace. Here we add ++ the GNU userspace magical crtbegin.o file (see crtstuff.c) which ++ provides part of the support for getting C++ file-scope static ++ object constructed before entering `main'. */ ++ ++#if defined HAVE_LD_PIE ++#define GNU_USER_TARGET_STARTFILE_SPEC \ ++ "%{shared:; \ ++ pg|p|profile:%{static-pie:grcrt1.o%s;:gcrt1.o%s}; \ ++ static:crt1.o%s; \ ++ static-pie:rcrt1.o%s; \ ++ " PIE_SPEC ":Scrt1.o%s; \ ++ :crt1.o%s} \ ++ crti.o%s \ ++ %{static:crtbeginT.o%s; \ ++ shared|static-pie|" PIE_SPEC ":crtbeginS.o%s; \ ++ :crtbegin.o%s} \ ++ %{fvtable-verify=none:%s; \ ++ fvtable-verify=preinit:vtv_start_preinit.o%s; \ ++ fvtable-verify=std:vtv_start.o%s} \ ++ " CRTOFFLOADBEGIN ++#else ++#define GNU_USER_TARGET_STARTFILE_SPEC \ ++ "%{shared:; \ ++ pg|p|profile:gcrt1.o%s; \ ++ :crt1.o%s} \ ++ crti.o%s \ ++ %{static:crtbeginT.o%s; \ ++ shared|pie|static-pie:crtbeginS.o%s; \ ++ :crtbegin.o%s} \ ++ %{fvtable-verify=none:%s; \ ++ fvtable-verify=preinit:vtv_start_preinit.o%s; \ ++ fvtable-verify=std:vtv_start.o%s} \ ++ " CRTOFFLOADBEGIN ++#endif ++#undef STARTFILE_SPEC ++#define STARTFILE_SPEC GNU_USER_TARGET_STARTFILE_SPEC ++ ++/* Provide a ENDFILE_SPEC appropriate for GNU userspace. Here we tack on ++ the GNU userspace magical crtend.o file (see crtstuff.c) which ++ provides part of the support for getting C++ file-scope static ++ object constructed before entering `main', followed by a normal ++ GNU userspace "finalizer" file, `crtn.o'. */ ++ ++#if defined HAVE_LD_PIE ++#define GNU_USER_TARGET_ENDFILE_SPEC \ ++ "%{fvtable-verify=none:%s; \ ++ fvtable-verify=preinit:vtv_end_preinit.o%s; \ ++ fvtable-verify=std:vtv_end.o%s} \ ++ %{static:crtend.o%s; \ ++ shared|static-pie|" PIE_SPEC ":crtendS.o%s; \ ++ :crtend.o%s} \ ++ crtn.o%s \ ++ " CRTOFFLOADEND ++#else ++#define GNU_USER_TARGET_ENDFILE_SPEC \ ++ "%{fvtable-verify=none:%s; \ ++ fvtable-verify=preinit:vtv_end_preinit.o%s; \ ++ fvtable-verify=std:vtv_end.o%s} \ ++ %{static:crtend.o%s; \ ++ shared|pie|static-pie:crtendS.o%s; \ ++ :crtend.o%s} \ ++ crtn.o%s \ ++ " CRTOFFLOADEND ++#endif ++#undef ENDFILE_SPEC ++#define ENDFILE_SPEC GNU_USER_TARGET_ENDFILE_SPEC ++ ++/* This is for -profile to use -lc_p instead of -lc. */ ++#define GNU_USER_TARGET_CC1_SPEC "%{profile:-p}" ++#ifndef CC1_SPEC ++#define CC1_SPEC GNU_USER_TARGET_CC1_SPEC ++#endif ++ ++/* The GNU C++ standard library requires that these macros be defined. */ ++#undef CPLUSPLUS_CPP_SPEC ++#define CPLUSPLUS_CPP_SPEC "-D_GNU_SOURCE %(cpp)" ++ ++#define GNU_USER_TARGET_NO_PTHREADS_LIB_SPEC \ ++ "%{shared:-lc} \ ++ %{!shared:%{profile:-lc_p}%{!profile:-lc}}" ++ ++#define GNU_USER_TARGET_LIB_SPEC \ ++ "%{pthread:-lpthread} " GNU_USER_TARGET_NO_PTHREADS_LIB_SPEC ++ ++#undef LIB_SPEC ++#define LIB_SPEC GNU_USER_TARGET_LIB_SPEC ++ ++#if defined(HAVE_LD_EH_FRAME_HDR) ++#define LINK_EH_SPEC "%{!static|static-pie:--eh-frame-hdr} " ++#endif ++ ++#undef LINK_GCC_C_SEQUENCE_SPEC ++#define LINK_GCC_C_SEQUENCE_SPEC \ ++ "%{static|static-pie:--start-group} %G %L \ ++ %{static|static-pie:--end-group}%{!static:%{!static-pie:%G}}" ++ ++/* Use --as-needed -lgcc_s for eh support. */ ++#ifdef HAVE_LD_AS_NEEDED ++#define USE_LD_AS_NEEDED 1 ++#endif ++ ++#define TARGET_POSIX_IO ++ ++#undef TARGET_LIBC_HAS_FUNCTION ++#define TARGET_LIBC_HAS_FUNCTION gnu_libc_has_function ++ ++/* Link -lasan early on the command line. For -static-libasan, don't link ++ it for -shared link, the executable should be compiled with -static-libasan ++ in that case, and for executable link with --{,no-}whole-archive around ++ it to force everything into the executable. And similarly for -ltsan ++ and -llsan. */ ++#if defined(HAVE_LD_STATIC_DYNAMIC) ++#undef LIBASAN_EARLY_SPEC ++#define LIBASAN_EARLY_SPEC \ ++ "%{!shared:libasan_preinit%O%s} " \ ++ "%{static-libasan:%{!shared:" LD_STATIC_OPTION \ ++ " --whole-archive -lasan --no-whole-archive " LD_DYNAMIC_OPTION \ ++ "}}%{!static-libasan:-lasan}" ++#undef LIBTSAN_EARLY_SPEC ++#define LIBTSAN_EARLY_SPEC \ ++ "%{!shared:libtsan_preinit%O%s} " \ ++ "%{static-libtsan:%{!shared:" LD_STATIC_OPTION \ ++ " --whole-archive -ltsan --no-whole-archive " LD_DYNAMIC_OPTION \ ++ "}}%{!static-libtsan:-ltsan}" ++#undef LIBLSAN_EARLY_SPEC ++#define LIBLSAN_EARLY_SPEC \ ++ "%{!shared:liblsan_preinit%O%s} " \ ++ "%{static-liblsan:%{!shared:" LD_STATIC_OPTION \ ++ " --whole-archive -llsan --no-whole-archive " LD_DYNAMIC_OPTION \ ++ "}}%{!static-liblsan:-llsan}" ++#endif +diff --git a/gcc/config/sw_64/linux-elf.h b/gcc/config/sw_64/linux-elf.h +new file mode 100644 +index 000000000..79543910b +--- /dev/null ++++ b/gcc/config/sw_64/linux-elf.h +@@ -0,0 +1,54 @@ ++/* Definitions of target machine for GNU compiler ++ for Sw_64 Linux-based GNU systems using ELF. ++ Copyright (C) 1996-2022 Free Software Foundation, Inc. ++ Contributed by Richard Henderson. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#undef EXTRA_SPECS ++#define EXTRA_SPECS {"elf_dynamic_linker", ELF_DYNAMIC_LINKER}, ++ ++#define GLIBC_DYNAMIC_LINKER "/lib/ld-linux.so.2" ++#define UCLIBC_DYNAMIC_LINKER "/lib/ld-uClibc.so.0" ++#if DEFAULT_LIBC == LIBC_UCLIBC ++#define CHOOSE_DYNAMIC_LINKER(G, U) "%{mglibc:" G ";:" U "}" ++#elif DEFAULT_LIBC == LIBC_GLIBC ++#define CHOOSE_DYNAMIC_LINKER(G, U) "%{muclibc:" U ";:" G "}" ++#else ++#error "Unsupported DEFAULT_LIBC" ++#endif ++#define GNU_USER_DYNAMIC_LINKER \ ++ CHOOSE_DYNAMIC_LINKER (GLIBC_DYNAMIC_LINKER, UCLIBC_DYNAMIC_LINKER) ++ ++#define ELF_DYNAMIC_LINKER GNU_USER_DYNAMIC_LINKER ++ ++#define LINK_SPEC \ ++ "-m elf64sw_64 %{G*} %{relax:-relax} \ ++ %{O*:-O3} %{!O*:-O1} \ ++ %{shared:-shared} \ ++ %{!shared: \ ++ %{!static: \ ++ %{rdynamic:-export-dynamic} \ ++ -dynamic-linker %(elf_dynamic_linker)} \ ++ %{static:-static}}" ++ ++#undef LIB_SPEC ++#define LIB_SPEC \ ++ "%{pthread:-lpthread} " \ ++ "%{shared:-lc}%{!shared:%{profile:-lc_p}%{!profile:-lc}} " ++ ++#define TARGET_ASM_FILE_END file_end_indicate_exec_stack +diff --git a/gcc/config/sw_64/linux.h b/gcc/config/sw_64/linux.h +new file mode 100644 +index 000000000..38b93ed18 +--- /dev/null ++++ b/gcc/config/sw_64/linux.h +@@ -0,0 +1,109 @@ ++/* Definitions of target machine for GNU compiler, ++ for Sw_64 Linux-based GNU systems. ++ Copyright (C) 1996-2022 Free Software Foundation, Inc. ++ Contributed by Richard Henderson. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#define TARGET_OS_CPP_BUILTINS() \ ++ do \ ++ { \ ++ builtin_define ("__gnu_linux__"); \ ++ builtin_define ("_LONGLONG"); \ ++ builtin_define_std ("linux"); \ ++ builtin_define_std ("unix"); \ ++ builtin_assert ("system=linux"); \ ++ builtin_assert ("system=unix"); \ ++ builtin_assert ("system=posix"); \ ++ /* The GNU C++ standard library requires this. */ \ ++ if (c_dialect_cxx ()) \ ++ builtin_define ("_GNU_SOURCE"); \ ++ } while (0) ++ ++#undef LIB_SPEC ++#define LIB_SPEC \ ++ "%{pthread:-lpthread} \ ++ %{shared:-lc} \ ++ %{!shared: %{profile:-lc_p}%{!profile:-lc}}" ++ ++#undef CPP_SPEC ++#define CPP_SPEC "%{posix:-D_POSIX_SOURCE} %{pthread:-D_REENTRANT}" ++ ++/* Show that we need a GP when profiling. */ ++#undef TARGET_PROFILING_NEEDS_GP ++#define TARGET_PROFILING_NEEDS_GP 1 ++ ++/* Don't care about faults in the prologue. */ ++#undef TARGET_CAN_FAULT_IN_PROLOGUE ++#define TARGET_CAN_FAULT_IN_PROLOGUE 1 ++ ++#undef WCHAR_TYPE ++#define WCHAR_TYPE "int" ++ ++#ifdef SINGLE_LIBC ++#define OPTION_GLIBC_P(opts) (DEFAULT_LIBC == LIBC_GLIBC) ++#define OPTION_UCLIBC_P(opts) (DEFAULT_LIBC == LIBC_UCLIBC) ++#define OPTION_BIONIC_P(opts) (DEFAULT_LIBC == LIBC_BIONIC) ++#undef OPTION_MUSL_P ++#define OPTION_MUSL_P(opts) (DEFAULT_LIBC == LIBC_MUSL) ++#else ++#define OPTION_GLIBC_P(opts) (linux_libc == LIBC_GLIBC) ++#define OPTION_UCLIBC_P(opts) (linux_libc == LIBC_UCLIBC) ++#define OPTION_BIONIC_P(opts) (linux_libc == LIBC_BIONIC) ++#undef OPTION_MUSL_P ++#define OPTION_MUSL_P(opts) (linux_libc == LIBC_MUSL) ++#endif ++#define OPTION_GLIBC OPTION_GLIBC_P (&global_options) ++#define OPTION_UCLIBC OPTION_UCLIBC_P (&global_options) ++#define OPTION_BIONIC OPTION_BIONIC_P (&global_options) ++#undef OPTION_MUSL ++#define OPTION_MUSL OPTION_MUSL_P (&global_options) ++ ++/* Determine what functions are present at the runtime; ++ this includes full c99 runtime and sincos. */ ++#undef TARGET_LIBC_HAS_FUNCTION ++#define TARGET_LIBC_HAS_FUNCTION linux_libc_has_function ++ ++#define TARGET_POSIX_IO ++ ++#define LINK_GCC_C_SEQUENCE_SPEC \ ++ "%{static|static-pie:--start-group} %G %L \ ++ %{static|static-pie:--end-group}%{!static:%{!static-pie:%G}}" ++ ++/* Use --as-needed -lgcc_s for eh support. */ ++#ifdef HAVE_LD_AS_NEEDED ++#define USE_LD_AS_NEEDED 1 ++#endif ++ ++/* Define if long doubles should be mangled as 'g'. */ ++#define TARGET_ALTERNATE_LONG_DOUBLE_MANGLING ++ ++/* -mcpu=native handling only makes sense with compiler running on ++ an Sw_64 chip. */ ++#if defined(__sw_64__) || defined(__sw_64) ++extern const char * ++host_detect_local_cpu (int argc, const char **argv); ++#define EXTRA_SPEC_FUNCTIONS {"local_cpu_detect", host_detect_local_cpu}, ++ ++#define MCPU_MTUNE_NATIVE_SPECS \ ++ " %{mcpu=native:%. */ ++ ++#define TARGET_OS_CPP_BUILTINS() \ ++ do \ ++ { \ ++ NETBSD_OS_CPP_BUILTINS_ELF (); \ ++ } while (0) ++ ++/* NetBSD doesn't use the LANGUAGE* built-ins. */ ++#undef SUBTARGET_LANGUAGE_CPP_BUILTINS ++#define SUBTARGET_LANGUAGE_CPP_BUILTINS() /* nothing */ ++ ++/* Show that we need a GP when profiling. */ ++#undef TARGET_PROFILING_NEEDS_GP ++#define TARGET_PROFILING_NEEDS_GP 1 ++ ++/* Provide a CPP_SPEC appropriate for NetBSD/sw_64. We use ++ this to pull in CPP specs that all NetBSD configurations need. */ ++ ++#undef CPP_SPEC ++#define CPP_SPEC NETBSD_CPP_SPEC ++ ++#undef EXTRA_SPECS ++#define EXTRA_SPECS \ ++ {"netbsd_link_spec", NETBSD_LINK_SPEC_ELF}, \ ++ {"netbsd_entry_point", NETBSD_ENTRY_POINT}, \ ++ {"netbsd_endfile_spec", NETBSD_ENDFILE_SPEC}, ++ ++/* Provide a LINK_SPEC appropriate for a NetBSD/sw_64 ELF target. */ ++ ++#undef LINK_SPEC ++#define LINK_SPEC \ ++ "%{G*} %{relax:-relax} \ ++ %{O*:-O3} %{!O*:-O1} \ ++ %(netbsd_link_spec)" ++ ++#define NETBSD_ENTRY_POINT "__start" ++ ++/* Provide an ENDFILE_SPEC appropriate for NetBSD/sw_64 ELF. Here we ++ add crtend.o, which provides part of the support for getting ++ C++ file-scope static objects deconstructed after exiting "main". ++ ++ We also need to handle the GCC option `-ffast-math'. */ ++ ++#undef ENDFILE_SPEC ++#define ENDFILE_SPEC \ ++ "%{Ofast|ffast-math|funsafe-math-optimizations:crtfm%O%s} \ ++ %(netbsd_endfile_spec)" ++ ++#define HAVE_ENABLE_EXECUTE_STACK +diff --git a/gcc/config/sw_64/openbsd.h b/gcc/config/sw_64/openbsd.h +new file mode 100644 +index 000000000..6fca63ab4 +--- /dev/null ++++ b/gcc/config/sw_64/openbsd.h +@@ -0,0 +1,75 @@ ++/* Configuration file for an sw_64 OpenBSD target. ++ Copyright (C) 1999-2022 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Controlling the compilation driver. */ ++#undef TARGET_DEFAULT ++#define TARGET_DEFAULT (MASK_FPREGS | MASK_IEEE | MASK_IEEE_CONFORMANT) ++ ++#define LINK_SPEC \ ++ "%{!shared:%{!nostdlib:%{!r*:%{!e*:-e __start}}}} \ ++ %{shared:-shared} %{R*} \ ++ %{static:-Bstatic} \ ++ %{!static:-Bdynamic} \ ++ %{rdynamic:-export-dynamic} \ ++ %{assert*} \ ++ %{!dynamic-linker:-dynamic-linker /usr/libexec/ld.so}" ++ ++/* As an elf system, we need crtbegin/crtend stuff. */ ++#undef STARTFILE_SPEC ++#define STARTFILE_SPEC \ ++ "\ ++ %{!shared: %{pg:gcrt0%O%s} %{!pg:%{p:gcrt0%O%s} \ ++ %{!p:%{!static:crt0%O%s} %{static:%{nopie:crt0%O%s} \ ++ %{!nopie:rcrt0%O%s}}}} crtbegin%O%s} %{shared:crtbeginS%O%s}" ++#undef ENDFILE_SPEC ++#define ENDFILE_SPEC "%{!shared:crtend%O%s} %{shared:crtendS%O%s}" ++ ++/* run-time target specifications */ ++#define TARGET_OS_CPP_BUILTINS() \ ++ do \ ++ { \ ++ OPENBSD_OS_CPP_BUILTINS_ELF (); \ ++ OPENBSD_OS_CPP_BUILTINS_LP64 (); \ ++ } while (0) ++ ++/* Layout of source language data types. */ ++ ++/* This must agree with */ ++#undef SIZE_TYPE ++#define SIZE_TYPE "long unsigned int" ++ ++#undef PTRDIFF_TYPE ++#define PTRDIFF_TYPE "long int" ++ ++#undef INTMAX_TYPE ++#define INTMAX_TYPE "long long int" ++ ++#undef UINTMAX_TYPE ++#define UINTMAX_TYPE "long long unsigned int" ++ ++#undef WCHAR_TYPE ++#define WCHAR_TYPE "int" ++ ++#undef WCHAR_TYPE_SIZE ++#define WCHAR_TYPE_SIZE 32 ++ ++#undef WINT_TYPE ++#define WINT_TYPE "int" ++ ++#define LOCAL_LABEL_PREFIX "." +diff --git a/gcc/config/sw_64/predicates.md b/gcc/config/sw_64/predicates.md +new file mode 100644 +index 000000000..b22086aa9 +--- /dev/null ++++ b/gcc/config/sw_64/predicates.md +@@ -0,0 +1,653 @@ ++;; Predicate definitions for Sw64. ++;; Copyright (C) 2004-2022 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++;; Return 1 if OP is the zero constant for MODE. ++(define_predicate "const0_operand" ++ (and (match_code "const_int,const_wide_int,const_double,const_vector") ++ (match_test "op == CONST0_RTX (mode)"))) ++ ++;; Returns true if OP is either the constant zero or a register. ++(define_predicate "reg_or_0_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const0_operand"))) ++ ++;; Return 1 if OP is a constant in the range of 0-63 (for a shift) or ++;; any register. ++(define_predicate "reg_or_6bit_operand" ++ (if_then_else (match_code "const_int") ++ (match_test "INTVAL (op) >= 0 && INTVAL (op) < 64") ++ (match_operand 0 "register_operand"))) ++ ++;; Return 1 if OP is a constant in the range of 0-31 (for a shift) or ++;; any register. ++(define_predicate "reg_or_5bit_operand" ++ (if_then_else (match_code "const_int") ++ (match_test "INTVAL (op) >= 0 && INTVAL (op) < 32") ++ (match_operand 0 "register_operand"))) ++ ++;; Return 1 if OP is an 8-bit constant. ++(define_predicate "cint8_operand" ++ (and (match_code "const_int") ++ (match_test "INTVAL (op) >= 0 && INTVAL (op) < 256"))) ++ ++;; Return 1 if OP is an 8-bit constant or any register. ++(define_predicate "reg_or_8bit_operand" ++ (if_then_else (match_code "const_int") ++ (match_test "INTVAL (op) >= 0 && INTVAL (op) < 256") ++ (match_operand 0 "register_operand"))) ++ ++;; Return 1 if OP is a constant or any register. ++(define_predicate "reg_or_cint_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "const_int_operand"))) ++ ++;; Return 1 if the operand is a valid second operand to an add insn. ++(define_predicate "add_operand" ++ (if_then_else (match_code "const_int") ++ (match_test "satisfies_constraint_K (op) || satisfies_constraint_L (op)") ++ (match_operand 0 "register_operand"))) ++ ++;; Return 1 if the operand is a valid second operand to a ++;; sign-extending add insn. ++(define_predicate "sext_add_operand" ++ (if_then_else (match_code "const_int") ++ (match_test "satisfies_constraint_I (op) || satisfies_constraint_O (op)") ++ (match_operand 0 "register_operand"))) ++ ++;; Return 1 if the operand is a non-symbolic constant operand that ++;; does not satisfy add_operand. ++(define_predicate "non_add_const_operand" ++ (and (match_code "const_int,const_wide_int,const_double,const_vector") ++ (not (match_operand 0 "add_operand")))) ++ ++;; Return 1 if the operand is a non-symbolic, nonzero constant operand. ++(define_predicate "non_zero_const_operand" ++ (and (match_code "const_int,const_wide_int,const_double,const_vector") ++ (not (match_test "op == CONST0_RTX (mode)")))) ++ ++;; Return 1 if OP is the constant 1, 2 or 3. ++(define_predicate "const123_operand" ++ (and (match_code "const_int") ++ (match_test "IN_RANGE (INTVAL (op), 1, 3)"))) ++ ++;; Return 1 if OP is the constant 2 or 3. ++(define_predicate "const23_operand" ++ (and (match_code "const_int") ++ (match_test "INTVAL (op) == 2 || INTVAL (op) == 3"))) ++ ++;; Return 1 if OP is the constant 4 or 8. ++(define_predicate "const48_operand" ++ (and (match_code "const_int") ++ (match_test "INTVAL (op) == 4 || INTVAL (op) == 8"))) ++ ++;; Return 1 if OP is a valid first operand to an AND insn. ++(define_predicate "and_operand" ++ (if_then_else (match_code "const_int") ++ (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 0x100 ++ || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100 ++ || zap_mask (INTVAL (op))") ++ (match_operand 0 "register_operand"))) ++ ++;; Return 1 if OP is a valid first operand to an IOR or XOR insn. ++(define_predicate "or_operand" ++ (if_then_else (match_code "const_int") ++ (match_test "(unsigned HOST_WIDE_INT) INTVAL (op) < 0x100 ++ || (unsigned HOST_WIDE_INT) ~ INTVAL (op) < 0x100") ++ (match_operand 0 "register_operand"))) ++ ++;; Return 1 if OP is a constant that is the width, in bits, of an integral ++;; mode not larger than DImode. ++(define_predicate "mode_width_operand" ++ (match_code "const_int") ++{ ++ HOST_WIDE_INT i = INTVAL (op); ++ return i == 8 || i == 16 || i == 32 || i == 64; ++}) ++ ++;; Return 1 if OP is a constant that is a mask of ones of width of an ++;; integral machine mode not larger than DImode. ++(define_predicate "mode_mask_operand" ++ (match_code "const_int") ++{ ++ HOST_WIDE_INT value = INTVAL (op); ++ ++ if (value == 0xff) ++ return 1; ++ if (value == 0xffff) ++ return 1; ++ if (value == 0xffffffff) ++ return 1; ++ if (value == -1) ++ return 1; ++ ++ return 0; ++}) ++ ++;; Return 1 if OP is a multiple of 8 less than 64. ++(define_predicate "mul8_operand" ++ (match_code "const_int") ++{ ++ unsigned HOST_WIDE_INT i = INTVAL (op); ++ return i < 64 && i % 8 == 0; ++}) ++ ++;; Return 1 if OP is a hard floating-point register. ++(define_predicate "hard_fp_register_operand" ++ (match_operand 0 "register_operand") ++{ ++ if (SUBREG_P (op)) ++ op = SUBREG_REG (op); ++ return REGNO_REG_CLASS (REGNO (op)) == FLOAT_REGS; ++}) ++ ++;; Return 1 if OP is a hard general register. ++(define_predicate "hard_int_register_operand" ++ (match_operand 0 "register_operand") ++{ ++ if (SUBREG_P (op)) ++ op = SUBREG_REG (op); ++ return REGNO_REG_CLASS (REGNO (op)) == GENERAL_REGS; ++}) ++ ++;; Return 1 if OP is a valid operand for the source of a move insn. ++(define_predicate "input_operand" ++ (match_operand 0 "general_operand") ++{ ++ switch (GET_CODE (op)) ++ { ++ case LABEL_REF: ++ case SYMBOL_REF: ++ case CONST: ++ if (TARGET_EXPLICIT_RELOCS) ++ { ++ /* We don't split symbolic operands into something unintelligable ++ until after reload, but we do not wish non-small, non-global ++ symbolic operands to be reconstructed from their high/lo_sum ++ form. */ ++ return (small_symbolic_operand (op, mode) ++ || global_symbolic_operand (op, mode) ++ || gotdtp_symbolic_operand (op, mode) ++ || gottp_symbolic_operand (op, mode)); ++ } ++ /* VMS still has a 32-bit mode. */ ++ return mode == ptr_mode || mode == Pmode; ++ ++ case HIGH: ++ return (TARGET_EXPLICIT_RELOCS ++ && local_symbolic_operand (XEXP (op, 0), mode)); ++ ++ case REG: ++ return 1; ++ ++ case SUBREG: ++ if (register_operand (op, mode)) ++ return 1; ++ /* fall through */ ++ case MEM: ++ return ((TARGET_BWX || (mode != HImode && mode != QImode)) ++ && general_operand (op, mode)); ++ ++ case CONST_WIDE_INT: ++ case CONST_DOUBLE: ++ return op == CONST0_RTX (mode); ++ ++ case CONST_VECTOR: ++ if (reload_in_progress || reload_completed) ++ return sw_64_legitimate_constant_p (mode, op); ++ return op == CONST0_RTX (mode); ++ ++ case CONST_INT: ++ if (mode == QImode || mode == HImode) ++ return true; ++ if (reload_in_progress || reload_completed) ++ return sw_64_legitimate_constant_p (mode, op); ++ return add_operand (op, mode); ++ ++ default: ++ gcc_unreachable (); ++ } ++ return 0; ++}) ++ ++;; Return 1 if OP is a SYMBOL_REF for a function known to be in this ++;; file, and in the same section as the current function. ++ ++(define_predicate "samegp_function_operand" ++ (match_code "symbol_ref") ++{ ++ /* Easy test for recursion. */ ++ if (op == XEXP (DECL_RTL (current_function_decl), 0)) ++ return true; ++ ++ /* Functions that are not local can be overridden, and thus may ++ not share the same gp. */ ++ if (! SYMBOL_REF_LOCAL_P (op)) ++ return false; ++ ++ /* If -msmall-data is in effect, assume that there is only one GP ++ for the module, and so any local symbol has this property. We ++ need explicit relocations to be able to enforce this for symbols ++ not defined in this unit of translation, however. */ ++ if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA) ++ return true; ++ ++ /* Functions that are not external are defined in this UoT, ++ and thus must share the same gp. */ ++ return ! SYMBOL_REF_EXTERNAL_P (op); ++}) ++ ++;; Return 1 if OP is a SYMBOL_REF for which we can make a call via bsr. ++(define_predicate "direct_call_operand" ++ (match_operand 0 "samegp_function_operand") ++{ ++ /* If profiling is implemented via linker tricks, we can't jump ++ to the nogp alternate entry point. Note that crtl->profile ++ would not be correct, since that doesn't indicate if the target ++ function uses profiling. */ ++ /* ??? TARGET_PROFILING_NEEDS_GP isn't really the right test, ++ but is approximately correct for the SYSV ABIs. Don't know ++ what to do for VMS, NT, or UMK. */ ++ if (!TARGET_PROFILING_NEEDS_GP && profile_flag) ++ return false; ++ ++ /* Must be a function. In some cases folks create thunks in static ++ data structures and then make calls to them. If we allow the ++ direct call, we'll get an error from the linker about !samegp reloc ++ against a symbol without a .prologue directive. */ ++ if (!SYMBOL_REF_FUNCTION_P (op)) ++ return false; ++ ++ /* Must be "near" so that the branch is assumed to reach. With ++ -msmall-text, this is assumed true of all local symbols. Since ++ we've already checked samegp, locality is already assured. */ ++ if (TARGET_SMALL_TEXT) ++ return true; ++ ++ return false; ++}) ++ ++;; Return 1 if OP is a valid operand for the MEM of a CALL insn. ++;; ++;; For TARGET_ABI_SYSV, we want to restrict to R27 or a pseudo. ++ ++(define_predicate "call_operand" ++ (ior (match_code "symbol_ref") ++ (and (match_code "reg") ++ (ior (not (match_test "TARGET_ABI_OSF")) ++ (not (match_test "HARD_REGISTER_P (op)")) ++ (match_test "REGNO (op) == R27_REG"))))) ++ ++;; Return true if OP is a LABEL_REF, or SYMBOL_REF or CONST referencing ++;; a (non-tls) variable known to be defined in this file. ++(define_predicate "local_symbolic_operand" ++ (match_code "label_ref,const,symbol_ref") ++{ ++ if (GET_CODE (op) == CONST ++ && GET_CODE (XEXP (op, 0)) == PLUS ++ && CONST_INT_P (XEXP (XEXP (op, 0), 1))) ++ op = XEXP (XEXP (op, 0), 0); ++ ++ if (GET_CODE (op) == LABEL_REF) ++ return 1; ++ ++ if (GET_CODE (op) != SYMBOL_REF) ++ return 0; ++ ++ return (SYMBOL_REF_LOCAL_P (op) ++ && !SYMBOL_REF_WEAK (op) ++ && !SYMBOL_REF_TLS_MODEL (op)); ++}) ++ ++;; Return true if OP is a SYMBOL_REF or CONST referencing a variable ++;; known to be defined in this file in the small data area. ++(define_predicate "small_symbolic_operand" ++ (match_code "const,symbol_ref") ++{ ++ HOST_WIDE_INT ofs = 0, max_ofs = 0; ++ ++ if (! TARGET_SMALL_DATA) ++ return false; ++ ++ if (GET_CODE (op) == CONST ++ && GET_CODE (XEXP (op, 0)) == PLUS ++ && CONST_INT_P (XEXP (XEXP (op, 0), 1))) ++ { ++ ofs = INTVAL (XEXP (XEXP (op, 0), 1)); ++ op = XEXP (XEXP (op, 0), 0); ++ } ++ ++ if (GET_CODE (op) != SYMBOL_REF) ++ return false; ++ ++ /* ??? There's no encode_section_info equivalent for the rtl ++ constant pool, so SYMBOL_FLAG_SMALL never gets set. */ ++ if (CONSTANT_POOL_ADDRESS_P (op)) ++ { ++ max_ofs = GET_MODE_SIZE (get_pool_mode (op)); ++ if (max_ofs > g_switch_value) ++ return false; ++ } ++ else if (SYMBOL_REF_LOCAL_P (op) ++ && SYMBOL_REF_SMALL_P (op) ++ && !SYMBOL_REF_WEAK (op) ++ && !SYMBOL_REF_TLS_MODEL (op)) ++ { ++ if (SYMBOL_REF_DECL (op)) ++ max_ofs = tree_to_uhwi (DECL_SIZE_UNIT (SYMBOL_REF_DECL (op))); ++ } ++ else ++ return false; ++ ++ /* Given that we know that the GP is always 8 byte aligned, we can ++ always adjust by 7 without overflowing. */ ++ if (max_ofs < 8) ++ max_ofs = 8; ++ ++ /* Since we know this is an object in a small data section, we know the ++ entire section is addressable via GP. We don't know where the section ++ boundaries are, but we know the entire object is within. */ ++ /* support -mgprel-size option. */ ++ /*return IN_RANGE (ofs, 0, max_ofs - 1);*/ ++ ++ if (sw_64_gprel_size == 16) ++ return IN_RANGE (ofs, 0, max_ofs - 1); ++ if (sw_64_gprel_size == 32) ++ return false; ++}) ++ ++;; Return true if OP is a SYMBOL_REF or CONST referencing a variable ++;; not known (or known not) to be defined in this file. ++(define_predicate "global_symbolic_operand" ++ (match_code "const,symbol_ref") ++{ ++ if (GET_CODE (op) == CONST ++ && GET_CODE (XEXP (op, 0)) == PLUS ++ && CONST_INT_P (XEXP (XEXP (op, 0), 1))) ++ op = XEXP (XEXP (op, 0), 0); ++ ++ if (GET_CODE (op) != SYMBOL_REF) ++ return 0; ++ ++ return ((!SYMBOL_REF_LOCAL_P (op) || SYMBOL_REF_WEAK (op)) ++ && !SYMBOL_REF_TLS_MODEL (op)); ++}) ++ ++;; Returns 1 if OP is a symbolic operand, i.e. a symbol_ref or a label_ref, ++;; possibly with an offset. ++(define_predicate "symbolic_operand" ++ (ior (match_code "symbol_ref,label_ref") ++ (and (match_code "const") ++ (match_code "plus" "0") ++ (match_code "symbol_ref,label_ref" "00") ++ (match_code "const_int" "01")))) ++ ++;; Return true if OP is valid for 16-bit DTP relative relocations. ++(define_predicate "dtp16_symbolic_operand" ++ (and (match_code "const") ++ (match_test "tls_symbolic_operand_1 (op, 16, UNSPEC_DTPREL)"))) ++ ++;; Return true if OP is valid for 32-bit DTP relative relocations. ++(define_predicate "dtp32_symbolic_operand" ++ (and (match_code "const") ++ (match_test "tls_symbolic_operand_1 (op, 32, UNSPEC_DTPREL)"))) ++ ++;; Return true if OP is valid for 64-bit DTP relative relocations. ++(define_predicate "gotdtp_symbolic_operand" ++ (and (match_code "const") ++ (match_test "tls_symbolic_operand_1 (op, 64, UNSPEC_DTPREL)"))) ++ ++;; Return true if OP is valid for 16-bit TP relative relocations. ++(define_predicate "tp16_symbolic_operand" ++ (and (match_code "const") ++ (match_test "tls_symbolic_operand_1 (op, 16, UNSPEC_TPREL)"))) ++ ++;; Return true if OP is valid for 32-bit TP relative relocations. ++(define_predicate "tp32_symbolic_operand" ++ (and (match_code "const") ++ (match_test "tls_symbolic_operand_1 (op, 32, UNSPEC_TPREL)"))) ++ ++;; Return true if OP is valid for 64-bit TP relative relocations. ++(define_predicate "gottp_symbolic_operand" ++ (and (match_code "const") ++ (match_test "tls_symbolic_operand_1 (op, 64, UNSPEC_TPREL)"))) ++ ++;; Return 1 if this memory address is a known aligned register plus ++;; a constant. It must be a valid address. This means that we can do ++;; this as an aligned reference plus some offset. ++;; ++;; Take into account what reload will do. Oh god this is awful. ++;; The horrible comma-operator construct below is to prevent genrecog ++;; from thinking that this predicate accepts REG and SUBREG. We don't ++;; use recog during reload, so pretending these codes are accepted ++;; pessimizes things a tad. ++ ++(define_special_predicate "aligned_memory_operand" ++ (ior (match_test "op = resolve_reload_operand (op), 0") ++ (match_code "mem")) ++{ ++ rtx base; ++ int offset; ++ ++ if (MEM_ALIGN (op) >= 32) ++ return 1; ++ ++ op = XEXP (op, 0); ++ ++ /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo) ++ sorts of constructs. Dig for the real base register. */ ++ if (reload_in_progress ++ && GET_CODE (op) == PLUS ++ && GET_CODE (XEXP (op, 0)) == PLUS) ++ { ++ base = XEXP (XEXP (op, 0), 0); ++ offset = INTVAL (XEXP (op, 1)); ++ } ++ else ++ { ++ if (! memory_address_p (mode, op)) ++ return 0; ++ if (GET_CODE (op) == PLUS) ++ { ++ base = XEXP (op, 0); ++ offset = INTVAL (XEXP (op, 1)); ++ } ++ else ++ { ++ base = op; ++ offset = 0; ++ } ++ } ++ ++ if (offset % GET_MODE_SIZE (mode)) ++ return 0; ++ ++ return (REG_P (base) && REGNO_POINTER_ALIGN (REGNO (base)) >= 32); ++}) ++ ++;; Similar, but return 1 if OP is a MEM which is not alignable. ++ ++(define_special_predicate "unaligned_memory_operand" ++ (ior (match_test "op = resolve_reload_operand (op), 0") ++ (match_code "mem")) ++{ ++ rtx base; ++ int offset; ++ ++ if (MEM_ALIGN (op) >= 32) ++ return 0; ++ ++ op = XEXP (op, 0); ++ ++ /* LEGITIMIZE_RELOAD_ADDRESS creates (plus (plus reg const_hi) const_lo) ++ sorts of constructs. Dig for the real base register. */ ++ if (reload_in_progress ++ && GET_CODE (op) == PLUS ++ && GET_CODE (XEXP (op, 0)) == PLUS) ++ { ++ base = XEXP (XEXP (op, 0), 0); ++ offset = INTVAL (XEXP (op, 1)); ++ } ++ else ++ { ++ if (! memory_address_p (mode, op)) ++ return 0; ++ if (GET_CODE (op) == PLUS) ++ { ++ base = XEXP (op, 0); ++ offset = INTVAL (XEXP (op, 1)); ++ } ++ else ++ { ++ base = op; ++ offset = 0; ++ } ++ } ++ ++ if (offset % GET_MODE_SIZE (mode)) ++ return 1; ++ ++ return (REG_P (base) && REGNO_POINTER_ALIGN (REGNO (base)) < 32); ++}) ++ ++;; Return 1 if OP is any memory location. During reload a pseudo matches. ++(define_special_predicate "any_memory_operand" ++ (match_code "mem,reg,subreg") ++{ ++ if (SUBREG_P (op)) ++ op = SUBREG_REG (op); ++ ++ if (MEM_P (op)) ++ return true; ++ if (reload_in_progress && REG_P (op)) ++ { ++ unsigned regno = REGNO (op); ++ if (HARD_REGISTER_NUM_P (regno)) ++ return false; ++ else ++ return reg_renumber[regno] < 0; ++ } ++ ++ return false; ++}) ++ ++;; Returns 1 if OP is not an eliminable register. ++;; ++;; This exists to cure a pathological failure in the s8addq (et al) patterns, ++;; ++;; long foo () { long t; bar(); return (long) &t * 26107; } ++;; ++;; which run afoul of a hack in reload to cure a (presumably) similar ++;; problem with lea-type instructions on other targets. But there is ++;; one of us and many of them, so work around the problem by selectively ++;; preventing combine from making the optimization. ++ ++(define_predicate "reg_not_elim_operand" ++ (match_operand 0 "register_operand") ++{ ++ if (SUBREG_P (op)) ++ op = SUBREG_REG (op); ++ return op != frame_pointer_rtx && op != arg_pointer_rtx; ++}) ++ ++;; Accept a register, but not a subreg of any kind. This allows us to ++;; avoid pathological cases in reload wrt data movement common in ++;; int->fp conversion. */ ++(define_predicate "reg_no_subreg_operand" ++ (and (match_code "reg") ++ (match_operand 0 "register_operand"))) ++ ++;; Return 1 if OP is a valid Sw_64 comparison operator for "cbranch" ++;; instructions. ++(define_predicate "sw_64_cbranch_operator" ++ (ior (match_operand 0 "ordered_comparison_operator") ++ (match_code "ordered,unordered"))) ++ ++;; Return 1 if OP is a valid Sw_64 comparison operator for "cmp" style ++;; instructions. ++(define_predicate "sw_64_comparison_operator" ++ (match_code "eq,le,lt,leu,ltu")) ++ ++;; Similarly, but with swapped operands. ++(define_predicate "sw_64_swapped_comparison_operator" ++ (match_code "eq,ge,gt,gtu")) ++ ++;; Return 1 if OP is a valid Sw_64 comparison operator against zero ++;; for "bcc" style instructions. ++(define_predicate "sw_64_zero_comparison_operator" ++ (match_code "eq,ne,le,lt,leu,ltu")) ++ ++;; Return 1 if OP is a signed comparison operation. ++(define_predicate "signed_comparison_operator" ++ (match_code "eq,ne,le,lt,ge,gt")) ++ ++;; Return 1 if OP is a valid Sw_64 floating point comparison operator. ++(define_predicate "sw_64_fp_comparison_operator" ++ (match_code "eq,le,lt,unordered")) ++ ++;; Return 1 if this is a divide or modulus operator. ++(define_predicate "divmod_operator" ++ (match_code "div,mod,udiv,umod")) ++ ++;; Return 1 if this is a float->int conversion operator. ++(define_predicate "fix_operator" ++ (match_code "fix,unsigned_fix")) ++ ++;; Recognize an addition operation that includes a constant. Used to ++;; convince reload to canonize (plus (plus reg c1) c2) during register ++;; elimination. ++ ++(define_predicate "addition_operation" ++ (and (match_code "plus") ++ (match_test "register_operand (XEXP (op, 0), mode) ++ && satisfies_constraint_K (XEXP (op, 1))"))) ++ ++;; For TARGET_EXPLICIT_RELOCS, we don't obfuscate a SYMBOL_REF to a ++;; small symbolic operand until after reload. At which point we need ++;; to replace (mem (symbol_ref)) with (mem (lo_sum $29 symbol_ref)) ++;; so that sched2 has the proper dependency information. */ ++(define_predicate "some_small_symbolic_operand" ++ (match_code "set,parallel,prefetch,unspec,unspec_volatile") ++{ ++ /* Avoid search unless necessary. */ ++ if (!TARGET_EXPLICIT_RELOCS || !reload_completed) ++ return false; ++ return some_small_symbolic_operand_int (op); ++}) ++ ++;; Accept a register, or a memory if BWX is enabled. ++(define_predicate "reg_or_bwx_memory_operand" ++ (ior (match_operand 0 "register_operand") ++ (and (match_test "TARGET_BWX") ++ (match_operand 0 "memory_operand")))) ++ ++;; Accept a memory whose address is only a register. ++(define_predicate "mem_noofs_operand" ++ (and (match_code "mem") ++ (match_code "reg" "0"))) ++ ++;; Accept a register, or any immediate. ++(define_predicate "reg_or_immediate_operand" ++ (ior (match_operand 0 "register_operand") ++ (match_operand 0 "immediate_operand"))) ++ ++(define_predicate "sw_64_branch_combination" ++ (match_code "eq,ne,le,lt,ge,gt,leu,ltu,geu,gtu")) ++ ++(define_predicate "sw_64_swapped_branch_combination" ++ (match_code "ne,ge,gt,geu,gtu")) +diff --git a/gcc/config/sw_64/sw6.md b/gcc/config/sw_64/sw6.md +new file mode 100644 +index 000000000..c8971ec34 +--- /dev/null ++++ b/gcc/config/sw_64/sw6.md +@@ -0,0 +1,180 @@ ++;; Scheduling description for Sw64 SW6. ++;; Copyright (C) 2002-2022 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++; SW6 can issue 4 insns per clock. It's out-of-order, so this isn't ++; expected to help over-much, but a precise description can be important ++; for software pipelining. ++; ++; SW6 has two symmetric pairs ("clusters") of two asymmetric integer ++; units ("upper" and "lower"), yielding pipe names U0, U1, L0, L1. ++; ++; ??? The clusters have independent register files that are re-synced ++; every cycle. Thus there is one additional cycle of latency between ++; insns issued on different clusters. Possibly model that by duplicating ++; all EBOX insn_reservations that can issue to either cluster, increasing ++; all latencies by one, and adding bypasses within the cluster. ++; ++; ??? In addition, instruction order affects cluster issue. ++ ++(define_automaton "sw6_0,sw6_1") ++(define_cpu_unit "sw6_u0,sw6_u1,sw6_l0,sw6_l1" "sw6_0") ++(define_reservation "sw6_u" "sw6_u0|sw6_u1") ++(define_reservation "sw6_l" "sw6_l0|sw6_l1") ++(define_reservation "sw6_ebox" "sw6_u|sw6_l") ++ ++(define_cpu_unit "sw6_fa" "sw6_1") ++(define_cpu_unit "sw6_fm,sw6_fst0,sw6_fst1" "sw6_0") ++(define_reservation "sw6_fst" "sw6_fst0|sw6_fst1") ++ ++; Assume type "multi" single issues. ++(define_insn_reservation "sw6_multi" 1 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "multi")) ++ "sw6_u0+sw6_u1+sw6_l0+sw6_l1+sw6_fa+sw6_fm+sw6_fst0+sw6_fst1") ++ ++; Integer loads take at least 3 clocks, and only issue to lower units. ++; adjust_cost still factors in user-specified memory latency, so return 1 here. ++(define_insn_reservation "sw6_ild" 4 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "ild,ldsym,ld_l")) ++ "sw6_l") ++ ++(define_insn_reservation "sw6_ist" 4 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "ist,st_c")) ++ "sw6_l") ++ ++(define_insn_reservation "sw6_mb" 1 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "mb")) ++ "sw6_l1") ++ ++; FP loads take at least 4 clocks. adjust_cost still factors ++; in user-specified memory latency, so return 2 here. ++(define_insn_reservation "sw6_fld" 2 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "fld")) ++ "sw6_l") ++ ++; The FPU communicates with memory and the integer register file ++; via two fp store units. We need a slot in the fst immediately, and ++; a slot in LOW after the operand data is ready. At which point the ++; data may be moved either to the store queue or the integer register ++; file and the insn retired. ++ ++(define_insn_reservation "sw6_fst" 3 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "fst")) ++ "sw6_fst,nothing,sw6_l") ++ ++; Arithmetic goes anywhere. ++(define_insn_reservation "sw6_arith" 1 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "iadd,ilog,icmp")) ++ "sw6_ebox") ++ ++; Motion video insns also issue only to U0, and take three ticks. ++(define_insn_reservation "sw6_mvi" 3 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "mvi")) ++ "sw6_u0") ++ ++; Shifts issue to upper units. ++(define_insn_reservation "sw6_shift" 1 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "shift")) ++ "sw6_u") ++ ++; Multiplies issue only to U1, and all take 4 ticks. ++(define_insn_reservation "sw6_imul" 4 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "imul")) ++ "sw6_u1") ++ ++; Conditional moves decompose into two independent primitives, each taking ++; one cycle. Since sw6 is out-of-order, we can't see anything but two cycles. ++(define_insn_reservation "sw6_icmov" 1 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "icmov")) ++ "sw6_ebox,sw6_ebox") ++ ++; Integer branches issue to upper units ++(define_insn_reservation "sw6_ibr" 1 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "ibr,callpal")) ++ "sw6_u") ++ ++; Calls only issue to L0. ++(define_insn_reservation "sw6_jsr" 1 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "call")) ++ "sw6_l0") ++ ++; Ftoi/itof only issue to lower pipes. ++(define_insn_reservation "sw6_itof" 4 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "itof")) ++ "sw6_l") ++ ++(define_insn_reservation "sw6_ftoi" 4 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "ftoi")) ++ "sw6_fst,nothing,sw6_l") ++ ++(define_insn_reservation "sw6_fmul" 6 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "fmul")) ++ "sw6_fm") ++ ++(define_insn_reservation "sw6_fadd" 6 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "fadd,fcpys,fbr")) ++ "sw6_fa") ++ ++(define_bypass 8 "sw6_fmul,sw6_fadd" "sw6_fst,sw6_ftoi") ++ ++(define_insn_reservation "sw6_fcmov" 2 ++ (and (eq_attr "tune" "sw6") ++ (eq_attr "type" "fcmov")) ++ "sw6_fa,nothing*3,sw6_fa") ++ ++(define_bypass 4 "sw6_fcmov" "sw6_fst,sw6_ftoi") ++ ++(define_insn_reservation "sw6_fdivsf" 19 ++ (and (eq_attr "tune" "sw6") ++ (and (eq_attr "type" "fdiv") ++ (eq_attr "opsize" "si"))) ++ "sw6_fa*9") ++ ++(define_insn_reservation "sw6_fdivdf" 34 ++ (and (eq_attr "tune" "sw6") ++ (and (eq_attr "type" "fdiv") ++ (eq_attr "opsize" "di"))) ++ "sw6_fa*12") ++(define_insn_reservation "sw6_sqrtsf" 19 ++ (and (eq_attr "tune" "sw6") ++ (and (eq_attr "type" "fsqrt") ++ (eq_attr "opsize" "si"))) ++ "sw6_fa*15") ++ ++(define_insn_reservation "sw6_sqrtdf" 33 ++ (and (eq_attr "tune" "sw6") ++ (and (eq_attr "type" "fsqrt") ++ (eq_attr "opsize" "di"))) ++ "sw6_fa*30") +diff --git a/gcc/config/sw_64/sw8.md b/gcc/config/sw_64/sw8.md +new file mode 100644 +index 000000000..7946cdddd +--- /dev/null ++++ b/gcc/config/sw_64/sw8.md +@@ -0,0 +1,181 @@ ++;; Scheduling description for Sw64 SW8. ++;; Copyright (C) 2002-2022 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++; SW6 can issue 4 insns per clock. It's out-of-order, so this isn't ++; expected to help over-much, but a precise description can be important ++; for software pipelining. ++; ++; SW6 has two symmetric pairs ("clusters") of two asymmetric integer ++; units ("upper" and "lower"), yielding pipe names U0, U1, L0, L1. ++; ++; ??? The clusters have independent register files that are re-synced ++; every cycle. Thus there is one additional cycle of latency between ++; insns issued on different clusters. Possibly model that by duplicating ++; all EBOX insn_reservations that can issue to either cluster, increasing ++; all latencies by one, and adding bypasses within the cluster. ++; ++; ??? In addition, instruction order affects cluster issue. ++ ++(define_automaton "sw8_0,sw8_1") ++(define_cpu_unit "sw8_u0,sw8_u1,sw8_l0,sw8_l1" "sw8_0") ++(define_reservation "sw8_u" "sw8_u0|sw8_u1") ++(define_reservation "sw8_l" "sw8_l0|sw8_l1") ++(define_reservation "sw8_ebox" "sw8_u|sw8_l") ++ ++(define_cpu_unit "sw8_fa" "sw8_1") ++(define_cpu_unit "sw8_fm,sw8_fst0,sw8_fst1" "sw8_0") ++(define_reservation "sw8_fst" "sw8_fst0|sw8_fst1") ++ ++; Assume type "multi" single issues. ++(define_insn_reservation "sw8_multi" 1 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "multi")) ++ "sw8_u0+sw8_u1+sw8_l0+sw8_l1+sw8_fa+sw8_fm+sw8_fst0+sw8_fst1") ++ ++; Integer loads take at least 3 clocks, and only issue to lower units. ++; adjust_cost still factors in user-specified memory latency, so return 1 here. ++(define_insn_reservation "sw8_ild" 4 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "ild,ldsym,ld_l")) ++ "sw8_l") ++ ++(define_insn_reservation "sw8_ist" 4 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "ist,st_c")) ++ "sw8_l") ++ ++(define_insn_reservation "sw8_mb" 1 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "mb")) ++ "sw8_l1") ++ ++; FP loads take at least 4 clocks. adjust_cost still factors ++; in user-specified memory latency, so return 2 here. ++(define_insn_reservation "sw8_fld" 2 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "fld")) ++ "sw8_l") ++ ++; The FPU communicates with memory and the integer register file ++; via two fp store units. We need a slot in the fst immediately, and ++; a slot in LOW after the operand data is ready. At which point the ++; data may be moved either to the store queue or the integer register ++; file and the insn retired. ++ ++(define_insn_reservation "sw8_fst" 3 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "fst")) ++ "sw8_fst,nothing,sw8_l") ++ ++; Arithmetic goes anywhere. ++(define_insn_reservation "sw8_arith" 1 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "iadd,ilog,icmp")) ++ "sw8_ebox") ++ ++; Motion video insns also issue only to U0, and take three ticks. ++(define_insn_reservation "sw8_mvi" 3 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "mvi")) ++ "sw8_u0") ++ ++; Shifts issue to upper units. ++(define_insn_reservation "sw8_shift" 1 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "shift")) ++ "sw8_u") ++ ++; Multiplies issue only to U1, and all take 7 ticks. ++(define_insn_reservation "sw8_imul" 7 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "imul")) ++ "sw8_u1") ++ ++; Conditional moves decompose into two independent primitives, each taking ++; one cycle. Since sw8 is out-of-order, we can't see anything but two cycles. ++(define_insn_reservation "sw8_icmov" 2 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "icmov")) ++ "sw8_ebox,sw8_ebox") ++ ++; Integer branches issue to upper units ++(define_insn_reservation "sw8_ibr" 1 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "ibr,callpal")) ++ "sw8_u") ++ ++; Calls only issue to L0. ++(define_insn_reservation "sw8_jsr" 1 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "call")) ++ "sw8_l0") ++ ++; Ftoi/itof only issue to lower pipes. ++(define_insn_reservation "sw8_itof" 3 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "itof")) ++ "sw8_l") ++ ++(define_insn_reservation "sw8_ftoi" 3 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "ftoi")) ++ "sw8_fst,nothing,sw8_l") ++ ++(define_insn_reservation "sw8_fmul" 4 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "fmul")) ++ "sw8_fm") ++ ++(define_insn_reservation "sw8_fadd" 4 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "fadd,fcpys,fbr")) ++ "sw8_fa") ++ ++(define_bypass 6 "sw8_fmul,sw8_fadd" "sw8_fst,sw8_ftoi") ++ ++(define_insn_reservation "sw8_fcmov" 8 ++ (and (eq_attr "tune" "sw8") ++ (eq_attr "type" "fcmov")) ++ "sw8_fa,nothing*3,sw8_fa") ++ ++(define_bypass 10 "sw8_fcmov" "sw8_fst,sw8_ftoi") ++ ++(define_insn_reservation "sw8_fdivsf" 12 ++ (and (eq_attr "tune" "sw8") ++ (and (eq_attr "type" "fdiv") ++ (eq_attr "opsize" "si"))) ++ "sw8_fa*9") ++ ++(define_insn_reservation "sw8_fdivdf" 15 ++ (and (eq_attr "tune" "sw8") ++ (and (eq_attr "type" "fdiv") ++ (eq_attr "opsize" "di"))) ++ "sw8_fa*12") ++ ++(define_insn_reservation "sw8_sqrtsf" 18 ++ (and (eq_attr "tune" "sw8") ++ (and (eq_attr "type" "fsqrt") ++ (eq_attr "opsize" "si"))) ++ "sw8_fa*15") ++ ++(define_insn_reservation "sw8_sqrtdf" 33 ++ (and (eq_attr "tune" "sw8") ++ (and (eq_attr "type" "fsqrt") ++ (eq_attr "opsize" "di"))) ++ "sw8_fa*30") +diff --git a/gcc/config/sw_64/sw_64-modes.def b/gcc/config/sw_64/sw_64-modes.def +new file mode 100644 +index 000000000..ce55aca9a +--- /dev/null ++++ b/gcc/config/sw_64/sw_64-modes.def +@@ -0,0 +1,27 @@ ++/* Sw_64 extra machine modes. ++ Copyright (C) 2003-2022 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* 128-bit floating point. This gets reset in sw_64_option_override ++ if VAX float format is in use. */ ++FLOAT_MODE (TF, 16, ieee_quad_format); ++ ++/* Vector modes. */ ++VECTOR_MODES (INT, 8); /* V8QI V4HI V2SI */ ++VECTOR_MODE (INT, QI, 4); /* V4QI */ ++VECTOR_MODE (INT, QI, 2); /* V2QI */ +diff --git a/gcc/config/sw_64/sw_64-passes.def b/gcc/config/sw_64/sw_64-passes.def +new file mode 100644 +index 000000000..36c384c6f +--- /dev/null ++++ b/gcc/config/sw_64/sw_64-passes.def +@@ -0,0 +1,21 @@ ++/* Description of target passes for Sw64 ++ Copyright (C) 2016-2022 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++ INSERT_PASS_AFTER (pass_convert_to_eh_region_ranges, 1, pass_handle_trap_shadows); ++ INSERT_PASS_BEFORE (pass_shorten_branches, 1, pass_align_insns); +diff --git a/gcc/config/sw_64/sw_64-protos.h b/gcc/config/sw_64/sw_64-protos.h +new file mode 100644 +index 000000000..42cc6be0d +--- /dev/null ++++ b/gcc/config/sw_64/sw_64-protos.h +@@ -0,0 +1,152 @@ ++/* Prototypes for sw_64.c functions used in the md file & elsewhere. ++ Copyright (C) 1999-2022 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++extern int sw_64_next_sequence_number; ++ ++extern void ++literal_section (void); ++extern int zap_mask (HOST_WIDE_INT); ++extern bool ++direct_return (void); ++ ++extern HOST_WIDE_INT ++sw_64_initial_elimination_offset (unsigned int, unsigned int); ++extern void ++sw_64_expand_prologue (void); ++extern void ++sw_64_expand_epilogue (void); ++extern void ++sw_64_output_filename (FILE *, const char *); ++ ++extern bool sw_64_legitimate_constant_p (machine_mode, rtx); ++extern rtx ++sw_64_legitimize_reload_address (rtx, machine_mode, int, int, int); ++ ++extern rtx split_small_symbolic_operand (rtx); ++ ++extern void ++get_aligned_mem (rtx, rtx *, rtx *); ++extern rtx get_unaligned_address (rtx); ++extern rtx get_unaligned_offset (rtx, HOST_WIDE_INT); ++extern enum reg_class sw_64_preferred_reload_class (rtx, enum reg_class); ++ ++extern void sw_64_set_memflags (rtx, rtx); ++extern bool ++sw_64_split_const_mov (machine_mode, rtx *); ++extern bool ++sw_64_expand_mov (machine_mode, rtx *); ++extern bool ++sw_64_expand_mov_nobwx (machine_mode, rtx *); ++extern void ++sw_64_expand_movmisalign (machine_mode, rtx *); ++extern void sw_64_emit_floatuns (rtx[]); ++extern rtx sw_64_emit_conditional_move (rtx, machine_mode); ++extern void ++sw_64_split_tmode_pair (rtx[], machine_mode, bool); ++extern void sw_64_split_tfmode_frobsign (rtx[], rtx (*) (rtx, rtx, rtx)); ++extern void ++sw_64_expand_unaligned_load (rtx, rtx, HOST_WIDE_INT, HOST_WIDE_INT, int); ++extern void sw_64_expand_unaligned_store (rtx, rtx, HOST_WIDE_INT, ++ HOST_WIDE_INT); ++extern int sw_64_expand_block_move (rtx[]); ++extern int sw_64_expand_block_clear (rtx[]); ++extern rtx sw_64_expand_zap_mask (HOST_WIDE_INT); ++extern void sw_64_expand_builtin_vector_binop (rtx (*) (rtx, rtx, rtx), ++ machine_mode, rtx, rtx, rtx); ++ ++extern rtx ++sw_64_return_addr (int, rtx); ++extern rtx ++sw_64_gp_save_rtx (void); ++extern void ++sw_64_initialize_trampoline (rtx, rtx, rtx, int, int, int); ++ ++extern rtx sw_64_va_arg (tree, tree); ++ ++extern void ++sw_64_start_function (FILE *, const char *, tree); ++extern void ++sw_64_end_function (FILE *, const char *, tree); ++ ++extern bool sw_64_find_lo_sum_using_gp (rtx); ++ ++extern void ++sw_64_emit_rsqrt (rtx, rtx, bool); ++ ++#ifdef REAL_VALUE_TYPE ++extern int ++check_float_value (machine_mode, REAL_VALUE_TYPE *, int); ++#endif ++ ++#ifdef RTX_CODE ++extern void sw_64_emit_conditional_branch (rtx[], machine_mode); ++extern bool sw_64_emit_setcc (rtx[], machine_mode); ++extern int sw_64_split_conditional_move (enum rtx_code, rtx, rtx, rtx, rtx); ++extern void sw_64_emit_xfloating_arith (enum rtx_code, rtx[]); ++extern void sw_64_emit_xfloating_cvt (enum rtx_code, rtx[]); ++extern void sw_64_split_atomic_op (enum rtx_code, rtx, rtx, rtx, rtx, rtx, ++ enum memmodel); ++extern void ++sw_64_split_compare_and_swap (rtx op[]); ++extern void ++sw_64_expand_compare_and_swap_12 (rtx op[]); ++extern void ++sw_64_split_compare_and_swap_12 (rtx op[]); ++extern void ++sw_64_split_atomic_exchange (rtx op[]); ++extern void ++sw_64_expand_atomic_exchange_12 (rtx op[]); ++extern void ++sw_64_split_atomic_exchange_12 (rtx op[]); ++#endif ++ ++extern void ++sw_64_split_atomic_cas (rtx op[]); ++extern void ++sw_64_split_atomic_cas_12 (rtx op[]); ++ ++extern rtx ++sw_64_use_linkage (rtx, bool, bool); ++ ++extern rtx unicosmk_add_call_info_word (rtx); ++ ++extern bool some_small_symbolic_operand_int (rtx); ++extern int ++tls_symbolic_operand_1 (rtx, int, int); ++extern rtx resolve_reload_operand (rtx); ++ ++namespace gcc { ++class context; ++} ++class rtl_opt_pass; ++ ++extern rtl_opt_pass * ++make_pass_handle_trap_shadows (gcc::context *); ++extern rtl_opt_pass * ++make_pass_align_insns (gcc::context *); ++ ++extern void ++sw_64_emit_swdiv (rtx, rtx, rtx, bool); ++extern rtx gen_move_reg (rtx); ++ ++extern void ++sw_64_declare_object (FILE *, const char *, const char *, const char *, ++ ...) ATTRIBUTE_PRINTF_4; ++extern void ++sw_64_declare_object_name (FILE *, const char *, tree); +diff --git a/gcc/config/sw_64/sw_64.cc b/gcc/config/sw_64/sw_64.cc +new file mode 100644 +index 000000000..d2387100e +--- /dev/null ++++ b/gcc/config/sw_64/sw_64.cc +@@ -0,0 +1,10076 @@ ++/* Subroutines used for code generation on the Sw64. ++ Copyright (C) 1992-2022 Free Software Foundation, Inc. ++ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) ++ ++ This file is part of GCC. ++ ++ GCC is free software; you can redistribute it and/or modify ++ it under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3, or (at your option) ++ any later version. ++ ++ GCC is distributed in the hope that it will be useful, ++ but WITHOUT ANY WARRANTY; without even the implied warranty of ++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ GNU General Public License for more details. ++ ++ You should have received a copy of the GNU General Public License ++ along with GCC; see the file COPYING3. If not see ++ . */ ++ ++#define IN_TARGET_CODE 1 ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "backend.h" ++#include "target.h" ++#include "rtl.h" ++#include "tree.h" ++#include "stringpool.h" ++#include "attribs.h" ++#include "memmodel.h" ++#include "gimple.h" ++#include "df.h" ++#include "predict.h" ++#include "tm_p.h" ++#include "ssa.h" ++#include "expmed.h" ++#include "optabs.h" ++#include "regs.h" ++#include "emit-rtl.h" ++#include "recog.h" ++#include "diagnostic-core.h" ++#include "alias.h" ++#include "fold-const.h" ++#include "stor-layout.h" ++#include "calls.h" ++#include "varasm.h" ++#include "output.h" ++#include "insn-attr.h" ++#include "explow.h" ++#include "expr.h" ++#include "reload.h" ++#include "except.h" ++#include "common/common-target.h" ++#include "debug.h" ++#include "langhooks.h" ++#include "cfgrtl.h" ++#include "tree-pass.h" ++#include "context.h" ++#include "gimple-iterator.h" ++#include "gimplify.h" ++#include "tree-stdarg.h" ++#include "tm-constrs.h" ++#include "libfuncs.h" ++#include "opts.h" ++#include "builtins.h" ++#include "rtl-iter.h" ++#include "asan.h" ++ ++#include "flags.h" ++/* This file should be included last. */ ++#include "target-def.h" ++ ++/* Specify which cpu to schedule for. */ ++enum processor_type sw_64_tune; ++ ++/* Which cpu we're generating code for. */ ++enum processor_type sw_64_cpu; ++ ++static const char *const sw_64_cpu_name[] = { ++ "sw6", "sw8a" ++}; ++ ++/* Specify how accurate floating-point traps need to be. */ ++ ++enum sw_64_trap_precision sw_64_tp; ++ ++/* Specify the floating-point rounding mode. */ ++ ++enum sw_64_fp_rounding_mode sw_64_fprm; ++ ++/* Specify which things cause traps. */ ++ ++enum sw_64_fp_trap_mode sw_64_fptm; ++ ++/* Nonzero if inside of a function, because the Sw_64 asm can't ++ handle .files inside of functions. */ ++ ++static int inside_function = FALSE; ++ ++/* The number of cycles of latency we should assume on memory reads. */ ++ ++static int sw_64_memory_latency = 3; ++ ++/* Whether the function needs the GP. */ ++ ++static int sw_64_function_needs_gp; ++ ++/* The assembler name of the current function. */ ++ ++static const char *sw_64_fnname; ++ ++/* The next explicit relocation sequence number. */ ++extern GTY (()) int sw_64_next_sequence_number; ++int sw_64_next_sequence_number = 1; ++ ++int stfp3_flag; ++extern int flag_fpcr_set; ++/* The literal and gpdisp sequence numbers for this insn, as printed ++ by %# and %* respectively. */ ++extern GTY (()) int sw_64_this_literal_sequence_number; ++extern GTY (()) int sw_64_this_gpdisp_sequence_number; ++int sw_64_this_literal_sequence_number; ++int sw_64_this_gpdisp_sequence_number; ++ ++/* Costs of various operations on the different architectures. */ ++ ++struct sw_64_rtx_cost_data ++{ ++ unsigned char fp_add; ++ unsigned char fp_mult; ++ unsigned char fp_div_sf; ++ unsigned char fp_div_df; ++ unsigned char int_mult_si; ++ unsigned char int_mult_di; ++ unsigned char int_shift; ++ unsigned char int_cmov; ++ unsigned short int_div; ++}; ++static struct sw_64_rtx_cost_data const sw_64_rtx_cost_data[PROCESSOR_MAX + 1] = ++ { ++ { ++ /* sw6a */ ++ COSTS_N_INSNS (6), /* fp_add */ ++ COSTS_N_INSNS (6), /* fp_mult */ ++ COSTS_N_INSNS (19), /* fp_div_sf */ ++ COSTS_N_INSNS (19), /* fp_div_df */ ++ COSTS_N_INSNS (4), /* int_mult_si */ ++ COSTS_N_INSNS (4), /* int_mult_di */ ++ COSTS_N_INSNS (1), /* int_shift */ ++ COSTS_N_INSNS (1), /* int_cmov */ ++ COSTS_N_INSNS (83), /* int_div */ ++ }, ++ { ++ /* sw8a */ ++ COSTS_N_INSNS (6), /* fp_add */ ++ COSTS_N_INSNS (6), /* fp_mult */ ++ COSTS_N_INSNS (19), /* fp_div_sf */ ++ COSTS_N_INSNS (19), /* fp_div_df */ ++ COSTS_N_INSNS (4), /* int_mult_si */ ++ COSTS_N_INSNS (4), /* int_mult_di */ ++ COSTS_N_INSNS (1), /* int_shift */ ++ COSTS_N_INSNS (1), /* int_cmov */ ++ COSTS_N_INSNS (20), /* int_div */ ++ }, ++ }; ++ ++/* Similar but tuned for code size instead of execution latency. The ++ extra +N is fractional cost tuning based on latency. It's used to ++ encourage use of cheaper insns like shift, but only if there's just ++ one of them. */ ++ ++static struct sw_64_rtx_cost_data const sw_64_rtx_cost_size = { ++ COSTS_N_INSNS (1), /* fp_add */ ++ COSTS_N_INSNS (1), /* fp_mult */ ++ COSTS_N_INSNS (1), /* fp_div_sf */ ++ COSTS_N_INSNS (1) + 1, /* fp_div_df */ ++ COSTS_N_INSNS (1) + 1, /* int_mult_si */ ++ COSTS_N_INSNS (1) + 2, /* int_mult_di */ ++ COSTS_N_INSNS (1), /* int_shift */ ++ COSTS_N_INSNS (1), /* int_cmov */ ++ COSTS_N_INSNS (6), /* int_div */ ++}; ++ ++/* Get the number of args of a function in one of two ways. */ ++#define NUM_ARGS crtl->args.info ++ ++#define REG_PV 27 ++#define REG_RA 26 ++ ++/* Declarations of static functions. */ ++static struct machine_function * ++sw_64_init_machine_status (void); ++static rtx ++sw_64_emit_xfloating_compare (enum rtx_code *, rtx, rtx); ++static void ++sw_64_handle_trap_shadows (void); ++static void ++sw_64_align_insns (void); ++static void ++sw_64_override_options_after_change (void); ++ ++static unsigned int ++rest_of_handle_trap_shadows (void) ++{ ++ sw_64_handle_trap_shadows (); ++ return 0; ++} ++ ++namespace { ++ ++const pass_data pass_data_handle_trap_shadows = { ++ RTL_PASS, ++ "trap_shadows", /* name */ ++ OPTGROUP_NONE, /* optinfo_flags */ ++ TV_NONE, /* tv_id */ ++ 0, /* properties_required */ ++ 0, /* properties_provided */ ++ 0, /* properties_destroyed */ ++ 0, /* todo_flags_start */ ++ TODO_df_finish, /* todo_flags_finish */ ++}; ++ ++class pass_handle_trap_shadows : public rtl_opt_pass ++{ ++public: ++ pass_handle_trap_shadows (gcc::context *ctxt) ++ : rtl_opt_pass (pass_data_handle_trap_shadows, ctxt) ++ {} ++ ++ /* opt_pass methods: */ ++ virtual bool gate (function *) ++ { ++ return sw_64_tp != SW_64_TP_PROG || flag_exceptions; ++ } ++ ++ virtual unsigned int execute (function *) ++ { ++ return rest_of_handle_trap_shadows (); ++ } ++ ++}; // class pass_handle_trap_shadows ++ ++} // namespace ++ ++rtl_opt_pass * ++make_pass_handle_trap_shadows (gcc::context *ctxt) ++{ ++ return new pass_handle_trap_shadows (ctxt); ++} ++ ++static unsigned int ++rest_of_align_insns (void) ++{ ++ sw_64_align_insns (); ++ return 0; ++} ++ ++namespace { ++ ++const pass_data pass_data_align_insns = { ++ RTL_PASS, ++ "align_insns", /* name */ ++ OPTGROUP_NONE, /* optinfo_flags */ ++ TV_NONE, /* tv_id */ ++ 0, /* properties_required */ ++ 0, /* properties_provided */ ++ 0, /* properties_destroyed */ ++ 0, /* todo_flags_start */ ++ TODO_df_finish, /* todo_flags_finish */ ++}; ++ ++class pass_align_insns : public rtl_opt_pass ++{ ++public: ++ pass_align_insns (gcc::context *ctxt) ++ : rtl_opt_pass (pass_data_align_insns, ctxt) ++ {} ++ ++ /* opt_pass methods: */ ++ virtual bool gate (function *) ++ { ++ /* Due to the number of extra memb insns, don't bother fixing up ++ alignment when trap precision is instruction. Moreover, we can ++ only do our job when sched2 is run. */ ++ return ((sw_64_tune != PROCESSOR_SW6 && sw_64_tune != PROCESSOR_SW8) ++ && optimize && !optimize_size && sw_64_tp != SW_64_TP_INSN ++ && flag_schedule_insns_after_reload); ++ } ++ ++ virtual unsigned int execute (function *) ++ { ++ return rest_of_align_insns (); ++ } ++ ++}; // class pass_align_insns ++ ++} // namespace ++ ++rtl_opt_pass * ++make_pass_align_insns (gcc::context *ctxt) ++{ ++ return new pass_align_insns (ctxt); ++} ++ ++#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING ++/* Implement TARGET_MANGLE_TYPE. */ ++ ++static const char * ++sw_64_mangle_type (const_tree type) ++{ ++ if (TYPE_MAIN_VARIANT (type) == long_double_type_node ++ && TARGET_LONG_DOUBLE_128) ++ return "g"; ++ ++ /* For all other types, use normal C++ mangling. */ ++ return NULL; ++} ++#endif ++ ++/* Parse target option strings. */ ++ ++static void ++sw_64_option_override (void) ++{ ++ static const struct cpu_table ++ { ++ const char *const name; ++ const enum processor_type processor; ++ const int flags; ++ const unsigned short line_size; /* in bytes */ ++ const unsigned short l1_size; /* in kb. */ ++ const unsigned short l2_size; /* in kb. */ ++ } cpu_table[] = { ++ /* SW6/LCA45 had 8k L1 caches; SW6f had 16k L1 caches. ++ * SW6/SW6a had 128k to 16M 32-byte direct Bcache. LCA45 ++ * had 64k to 8M 8-byte direct Bcache. */ ++ {"sw6a", PROCESSOR_SW6, MASK_BWX | MASK_CIX | MASK_FIX | MASK_SW6A, 128, 32, ++ 512}, ++ {"sw6b", PROCESSOR_SW6, MASK_BWX | MASK_CIX | MASK_FIX | MASK_SW6B, 128, 32, ++ 512}, ++ {"sw4d", PROCESSOR_SW6, MASK_BWX | MASK_CIX | MASK_FIX | MASK_SW4D, 128, 32, ++ 512}, ++ {"sw8a", PROCESSOR_SW8, MASK_BWX | MASK_CIX | MASK_FIX | MASK_SW8A, 128, 32, ++ 512}, ++ }; ++ ++ int const ct_size = ARRAY_SIZE (cpu_table); ++ int line_size = 0, l1_size = 0, l2_size = 0; ++ int i; ++ ++#ifdef SUBTARGET_OVERRIDE_OPTIONS ++ SUBTARGET_OVERRIDE_OPTIONS; ++#endif ++ ++ /* Default to full IEEE compliance mode for Go language. */ ++ if (strcmp (lang_hooks.name, "GNU Go") == 0 ++ && !(target_flags_explicit & MASK_IEEE)) ++ target_flags |= MASK_IEEE; ++ ++ sw_64_fprm = SW_64_FPRM_NORM; ++ sw_64_tp = SW_64_TP_PROG; ++ sw_64_fptm = SW_64_FPTM_N; ++ ++ if (TARGET_IEEE) ++ { ++ sw_64_tp = SW_64_TP_INSN; ++ sw_64_fptm = SW_64_FPTM_SU; ++ } ++ if (TARGET_IEEE_WITH_INEXACT) ++ { ++ sw_64_tp = SW_64_TP_INSN; ++ sw_64_fptm = SW_64_FPTM_SUI; ++ } ++ if (TARGET_IEEE_MAIN) ++ { ++ sw_64_tp = SW_64_TP_INSN; ++ sw_64_fptm = SW_64_FPTM_SU; ++ } ++ ++ if (sw_64_tp_string) ++ { ++ if (!strcmp (sw_64_tp_string, "p")) ++ sw_64_tp = SW_64_TP_PROG; ++ else if (!strcmp (sw_64_tp_string, "f")) ++ sw_64_tp = SW_64_TP_FUNC; ++ else if (!strcmp (sw_64_tp_string, "i")) ++ sw_64_tp = SW_64_TP_INSN; ++ else ++ error ("bad value %qs for %<-mtrap-precision%> switch", ++ sw_64_tp_string); ++ } ++ ++ if (sw_64_fprm_string) ++ { ++ if (!strcmp (sw_64_fprm_string, "n")) ++ sw_64_fprm = SW_64_FPRM_NORM; ++ else if (!strcmp (sw_64_fprm_string, "m")) ++ sw_64_fprm = SW_64_FPRM_MINF; ++ else if (!strcmp (sw_64_fprm_string, "c")) ++ sw_64_fprm = SW_64_FPRM_CHOP; ++ else if (!strcmp (sw_64_fprm_string, "d")) ++ sw_64_fprm = SW_64_FPRM_DYN; ++ else ++ error ("bad value %qs for %<-mfp-rounding-mode%> switch", ++ sw_64_fprm_string); ++ } ++ ++ if (sw_64_fptm_string) ++ { ++ if (strcmp (sw_64_fptm_string, "n") == 0) ++ sw_64_fptm = SW_64_FPTM_N; ++ else if (strcmp (sw_64_fptm_string, "u") == 0) ++ sw_64_fptm = SW_64_FPTM_U; ++ else if (strcmp (sw_64_fptm_string, "su") == 0) ++ sw_64_fptm = SW_64_FPTM_SU; ++ else if (strcmp (sw_64_fptm_string, "sui") == 0) ++ sw_64_fptm = SW_64_FPTM_SUI; ++ else ++ error ("bad value %qs for %<-mfp-trap-mode%> switch", ++ sw_64_fptm_string); ++ } ++ ++ if (sw_64_cpu_string) ++ { ++ for (i = 0; i < ct_size; i++) ++ if (!strcmp (sw_64_cpu_string, cpu_table[i].name)) ++ { ++ sw_64_tune = sw_64_cpu = cpu_table[i].processor; ++ line_size = cpu_table[i].line_size; ++ l1_size = cpu_table[i].l1_size; ++ l2_size = cpu_table[i].l2_size; ++ target_flags &= ~(MASK_BWX | MASK_MAX | MASK_FIX | MASK_CIX ++ | MASK_SW6A | MASK_SW6B | MASK_SW4D | MASK_SW8A); ++ target_flags |= cpu_table[i].flags; ++ break; ++ } ++ if (i == ct_size) ++ error ("bad value %qs for %<-mcpu%> switch", sw_64_cpu_string); ++ } ++ ++ if (sw_64_tune_string) ++ { ++ for (i = 0; i < ct_size; i++) ++ if (!strcmp (sw_64_tune_string, cpu_table[i].name)) ++ { ++ sw_64_tune = cpu_table[i].processor; ++ line_size = cpu_table[i].line_size; ++ l1_size = cpu_table[i].l1_size; ++ l2_size = cpu_table[i].l2_size; ++ break; ++ } ++ if (i == ct_size) ++ error ("bad value %qs for %<-mtune%> switch", sw_64_tune_string); ++ } ++ if (line_size) ++ SET_OPTION_IF_UNSET (&global_options, &global_options_set, ++ param_l1_cache_line_size, line_size); ++ if (l1_size) ++ SET_OPTION_IF_UNSET (&global_options, &global_options_set, ++ param_l1_cache_size, l1_size); ++ if (l2_size) ++ SET_OPTION_IF_UNSET (&global_options, &global_options_set, ++ param_l2_cache_size, l2_size); ++ ++ // generate prefetch for cases like stream add ++ if (flag_sw_prefetch_add == 1) ++ SET_OPTION_IF_UNSET (&global_options, &global_options_set, ++ param_prefetch_min_insn_to_mem_ratio, 2); ++ /* This cannot reside in s390_option_optimization_table since HAVE_prefetch ++ requires the arch flags to be evaluated already. Since prefetching ++ is beneficial on s390, we enable it if available. */ ++ if (flag_prefetch_loop_arrays < 0 && HAVE_prefetch) ++ flag_prefetch_loop_arrays = 1; ++ ++ /* set simultaneous prefetches and latency for sw ++ * * need add some conditions to decide what the cpu kind */ ++ SET_OPTION_IF_UNSET (&global_options, &global_options_set, ++ param_simultaneous_prefetches, 8); ++ ++ if (flag_sw_non_temporal == 1) ++ /* set this param from default 3 to 2 to turn on prefetch and ++ * non-temporal for cases like c[i] = a[i] + b[i] in stream. */ ++ SET_OPTION_IF_UNSET (&global_options, &global_options_set, ++ param_prefetch_min_insn_to_mem_ratio, 2); ++ ++ if (flag_sw_prefetch_unroll == 1) ++ { ++ SET_OPTION_IF_UNSET (&global_options, &global_options_set, ++ param_max_unrolled_insns, 400); ++ } ++ /* Do some sanity checks on the above options. */ ++ ++ if ((sw_64_fptm == SW_64_FPTM_SU || sw_64_fptm == SW_64_FPTM_SUI) ++ && sw_64_tp != SW_64_TP_INSN && sw_64_cpu != PROCESSOR_SW6 ++ && sw_64_cpu != PROCESSOR_SW8) ++ { ++ warning (0, "fp software completion requires %<-mtrap-precision=i%>"); ++ sw_64_tp = SW_64_TP_INSN; ++ } ++ ++ if (sw_64_cpu == PROCESSOR_SW6 || sw_64_cpu == PROCESSOR_SW8) ++ { ++ /* Except for SW6 pass 1 (not released), we always have precise ++ arithmetic traps. Which means we can do software completion ++ without minding trap shadows. */ ++ sw_64_tp = SW_64_TP_PROG; ++ } ++ ++ if (TARGET_FLOAT_VAX) ++ { ++ if (sw_64_fprm == SW_64_FPRM_MINF || sw_64_fprm == SW_64_FPRM_DYN) ++ { ++ warning (0, "rounding mode not supported for VAX floats"); ++ sw_64_fprm = SW_64_FPRM_NORM; ++ } ++ if (sw_64_fptm == SW_64_FPTM_SUI) ++ { ++ warning (0, "trap mode not supported for VAX floats"); ++ sw_64_fptm = SW_64_FPTM_SU; ++ } ++ if (target_flags_explicit & MASK_LONG_DOUBLE_128) ++ warning (0, "128-bit long double not supported for VAX floats"); ++ target_flags &= ~MASK_LONG_DOUBLE_128; ++ } ++ ++ { ++ char *end; ++ int lat; ++ ++ if (!sw_64_mlat_string) ++ sw_64_mlat_string = "L1"; ++ ++ if (ISDIGIT ((unsigned char) sw_64_mlat_string[0]) ++ && (lat = strtol (sw_64_mlat_string, &end, 10), *end == '\0')) ++ ; ++ else if ((sw_64_mlat_string[0] == 'L' || sw_64_mlat_string[0] == 'l') ++ && ISDIGIT ((unsigned char) sw_64_mlat_string[1]) ++ && sw_64_mlat_string[2] == '\0') ++ { ++ static int cache_latency[][4] = { ++ {3, 12, 30}, /* sw6 -- Bcache from DS20 LMbench. */ ++ {4, 15, 90}, /* sw6b -- Bcache from DS20 LMbench. */ ++ {3, 7, 11}, /* sw8a -- Bcache from DS20 LMbench. */ ++ }; ++ if (flag_sw_rtx_cost) ++ { ++ cache_latency[sw_64_tune][0] = 3; ++ cache_latency[sw_64_tune][1] = 7; ++ cache_latency[sw_64_tune][2] = 11; ++ } ++ lat = sw_64_mlat_string[1] - '0'; ++ if (lat <= 0 || lat > 3 || cache_latency[sw_64_tune][lat - 1] == -1) ++ { ++ warning (0, "L%d cache latency unknown for %s", lat, ++ sw_64_cpu_name[sw_64_tune]); ++ lat = 3; ++ } ++ else ++ lat = cache_latency[sw_64_tune][lat - 1]; ++ } ++ else if (!strcmp (sw_64_mlat_string, "main")) ++ { ++ /* Most current memories have about 370ns latency. This is ++ a reasonable guess for a fast cpu. */ ++ lat = 150; ++ } ++ else ++ { ++ warning (0, "bad value %qs for %<-mmemory-latency%>", ++ sw_64_mlat_string); ++ lat = 3; ++ } ++ ++ sw_64_memory_latency = lat; ++ } ++ ++ /* Default the definition of "small data" to 8 bytes. */ ++ if (!OPTION_SET_P (g_switch_value)) ++ g_switch_value = 8; ++ ++ /* Infer TARGET_SMALL_DATA from -fpic/-fPIC. */ ++ if (flag_pic == 1) ++ target_flags |= MASK_SMALL_DATA; ++ else if (flag_pic == 2) ++ target_flags &= ~MASK_SMALL_DATA; ++ ++ sw_64_override_options_after_change (); ++ ++ /* Register variables and functions with the garbage collector. */ ++ ++ /* Set up function hooks. */ ++ init_machine_status = sw_64_init_machine_status; ++ ++ /* Tell the compiler when we're using VAX floating point. */ ++ if (TARGET_FLOAT_VAX) ++ { ++ REAL_MODE_FORMAT (SFmode) = &vax_f_format; ++ REAL_MODE_FORMAT (DFmode) = &vax_g_format; ++ REAL_MODE_FORMAT (TFmode) = NULL; ++ } ++ ++#ifdef TARGET_DEFAULT_LONG_DOUBLE_128 ++ if (!(target_flags_explicit & MASK_LONG_DOUBLE_128)) ++ target_flags |= MASK_LONG_DOUBLE_128; ++#endif ++} ++ ++/* Implement targetm.override_options_after_change. */ ++ ++static void ++sw_64_override_options_after_change (void) ++{ ++ /* Align labels and loops for optimal branching. */ ++ /* ??? Kludge these by not doing anything if we don't optimize. */ ++ if (optimize > 0) ++ { ++ if (flag_align_loops && !str_align_loops) ++ str_align_loops = "16"; ++ if (flag_align_jumps && !str_align_jumps) ++ str_align_jumps = "16"; ++ } ++ if (flag_align_functions && !str_align_functions) ++ str_align_functions = "16"; ++} ++ ++/* Returns 1 if VALUE is a mask that contains full bytes of zero or ones. */ ++ ++int ++zap_mask (HOST_WIDE_INT value) ++{ ++ int i; ++ ++ for (i = 0; i < HOST_BITS_PER_WIDE_INT / HOST_BITS_PER_CHAR; i++, value >>= 8) ++ if ((value & 0xff) != 0 && (value & 0xff) != 0xff) ++ return 0; ++ ++ return 1; ++} ++ ++/* Return true if OP is valid for a particular TLS relocation. ++ We are already guaranteed that OP is a CONST. */ ++ ++int ++tls_symbolic_operand_1 (rtx op, int size, int unspec) ++{ ++ op = XEXP (op, 0); ++ ++ if (GET_CODE (op) != UNSPEC || XINT (op, 1) != unspec) ++ return 0; ++ op = XVECEXP (op, 0, 0); ++ ++ if (GET_CODE (op) != SYMBOL_REF) ++ return 0; ++ ++ switch (SYMBOL_REF_TLS_MODEL (op)) ++ { ++ case TLS_MODEL_LOCAL_DYNAMIC: ++ return unspec == UNSPEC_DTPREL && size == sw_64_tls_size; ++ case TLS_MODEL_INITIAL_EXEC: ++ return unspec == UNSPEC_TPREL && size == 64; ++ case TLS_MODEL_LOCAL_EXEC: ++ return unspec == UNSPEC_TPREL && size == sw_64_tls_size; ++ default: ++ gcc_unreachable (); ++ } ++} ++ ++/* Used by aligned_memory_operand and unaligned_memory_operand to ++ resolve what reload is going to do with OP if it's a register. */ ++ ++rtx ++resolve_reload_operand (rtx op) ++{ ++ if (reload_in_progress) ++ { ++ rtx tmp = op; ++ if (SUBREG_P (tmp)) ++ tmp = SUBREG_REG (tmp); ++ if (REG_P (tmp) && REGNO (tmp) >= FIRST_PSEUDO_REGISTER) ++ { ++ op = reg_equiv_memory_loc (REGNO (tmp)); ++ if (op == 0) ++ return 0; ++ } ++ } ++ return op; ++} ++ ++/* The scalar modes supported differs from the default check-what-c-supports ++ version in that sometimes TFmode is available even when long double ++ indicates only DFmode. */ ++ ++static bool ++sw_64_scalar_mode_supported_p (scalar_mode mode) ++{ ++ switch (mode) ++ { ++ case E_QImode: ++ case E_HImode: ++ case E_SImode: ++ case E_DImode: ++ case E_TImode: /* via optabs.cc */ ++ return true; ++ ++ case E_SFmode: ++ case E_DFmode: ++ return true; ++ ++ case E_TFmode: ++ return TARGET_HAS_XFLOATING_LIBS; ++ ++ default: ++ return false; ++ } ++} ++ ++/* Sw_64 implements a couple of integer vector mode operations when ++ TARGET_MAX is enabled. We do not check TARGET_MAX here, however, ++ which allows the vectorizer to operate on e.g. move instructions, ++ or when expand_vector_operations can do something useful. */ ++ ++static bool ++sw_64_vector_mode_supported_p (machine_mode mode) ++{ ++ return mode == V8QImode || mode == V4HImode || mode == V2SImode; ++} ++ ++/* Return the TLS model to use for SYMBOL. */ ++ ++static enum tls_model ++tls_symbolic_operand_type (rtx symbol) ++{ ++ enum tls_model model; ++ ++ if (GET_CODE (symbol) != SYMBOL_REF) ++ return TLS_MODEL_NONE; ++ model = SYMBOL_REF_TLS_MODEL (symbol); ++ ++ /* Local-exec with a 64-bit size is the same code as initial-exec. */ ++ if (model == TLS_MODEL_LOCAL_EXEC && sw_64_tls_size == 64) ++ model = TLS_MODEL_INITIAL_EXEC; ++ ++ return model; ++} ++ ++/* Return true if the function DECL will share the same GP as any ++ function in the current unit of translation. */ ++ ++static bool ++decl_has_samegp (const_tree decl) ++{ ++ /* Functions that are not local can be overridden, and thus may ++ not share the same gp. */ ++ if (!(*targetm.binds_local_p) (decl)) ++ return false; ++ ++ /* If -msmall-data is in effect, assume that there is only one GP ++ for the module, and so any local symbol has this property. We ++ need explicit relocations to be able to enforce this for symbols ++ not defined in this unit of translation, however. */ ++ if (TARGET_EXPLICIT_RELOCS && TARGET_SMALL_DATA) ++ return true; ++ ++ /* Functions that are not external are defined in this UoT. */ ++ /* ??? Irritatingly, static functions not yet emitted are still ++ marked "external". Apply this to non-static functions only. */ ++ return !TREE_PUBLIC (decl) || !DECL_EXTERNAL (decl); ++} ++ ++/* Return true if EXP should be placed in the small data section. */ ++ ++static bool ++sw_64_in_small_data_p (const_tree exp) ++{ ++ /* We want to merge strings, so we never consider them small data. */ ++ if (TREE_CODE (exp) == STRING_CST) ++ return false; ++ ++ /* Functions are never in the small data area. Duh. */ ++ if (TREE_CODE (exp) == FUNCTION_DECL) ++ return false; ++ ++ /* COMMON symbols are never small data. */ ++ if (TREE_CODE (exp) == VAR_DECL && DECL_COMMON (exp)) ++ return false; ++ ++ if (TREE_CODE (exp) == VAR_DECL && DECL_SECTION_NAME (exp)) ++ { ++ const char *section = DECL_SECTION_NAME (exp); ++ if (strcmp (section, ".sdata") == 0 || strcmp (section, ".sbss") == 0) ++ return true; ++ } ++ else ++ { ++ HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (exp)); ++ ++ /* If this is an incomplete type with size 0, then we can't put it ++ in sdata because it might be too big when completed. */ ++ if (size > 0 && size <= g_switch_value) ++ return true; ++ } ++ ++ return false; ++} ++ ++/* legitimate_address_p recognizes an RTL expression that is a valid ++ memory address for an instruction. The MODE argument is the ++ machine mode for the MEM expression that wants to use this address. ++ ++ For Sw_64, we have either a constant address or the sum of a ++ register and a constant address, or just a register. For DImode, ++ any of those forms can be surrounded with an AND that clear the ++ low-order three bits; this is an "unaligned" access. */ ++ ++static bool ++sw_64_legitimate_address_p (machine_mode mode, rtx x, bool strict) ++{ ++ /* If this is an ldl_u type address, discard the outer AND. */ ++ if (mode == DImode && GET_CODE (x) == AND && CONST_INT_P (XEXP (x, 1)) ++ && INTVAL (XEXP (x, 1)) == -8) ++ x = XEXP (x, 0); ++ ++ /* Discard non-paradoxical subregs. */ ++ if (SUBREG_P (x) ++ && (GET_MODE_SIZE (GET_MODE (x)) ++ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))) ++ x = SUBREG_REG (x); ++ ++ /* Unadorned general registers are valid. */ ++ if (REG_P (x) ++ && (strict ? STRICT_REG_OK_FOR_BASE_P (x) ++ : NONSTRICT_REG_OK_FOR_BASE_P (x))) ++ return true; ++ ++ /* Constant addresses (i.e. +/- 32k) are valid. */ ++ if (CONSTANT_ADDRESS_P (x)) ++ return true; ++ ++ if ((GET_CODE (x) == POST_INC || GET_CODE (x) == POST_DEC ++ || GET_CODE (x) == POST_MODIFY) ++ && (TARGET_SW8A) ++ && (strict ? STRICT_REG_OK_FOR_BASE_P (XEXP (x, 0)) ++ : NONSTRICT_REG_OK_FOR_BASE_P (XEXP (x, 0)))) ++ return true; ++ /* Register plus a small constant offset is valid. */ ++ if (GET_CODE (x) == PLUS) ++ { ++ rtx ofs = XEXP (x, 1); ++ x = XEXP (x, 0); ++ ++ /* Discard non-paradoxical subregs. */ ++ if (SUBREG_P (x) ++ && (GET_MODE_SIZE (GET_MODE (x)) ++ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))) ++ x = SUBREG_REG (x); ++ ++ if (REG_P (x)) ++ { ++ if (!strict && NONSTRICT_REG_OK_FP_BASE_P (x) && CONST_INT_P (ofs)) ++ return true; ++ if ((strict ? STRICT_REG_OK_FOR_BASE_P (x) ++ : NONSTRICT_REG_OK_FOR_BASE_P (x)) ++ && CONSTANT_ADDRESS_P (ofs)) ++ return true; ++ } ++ } ++ ++ /* If we're managing explicit relocations, LO_SUM is valid, as are small ++ data symbols. Avoid explicit relocations of modes larger than word ++ mode since i.e. $LC0+8($1) can fold around +/- 32k offset. */ ++ else if (TARGET_EXPLICIT_RELOCS && GET_MODE_SIZE (mode) <= UNITS_PER_WORD) ++ { ++ if (small_symbolic_operand (x, Pmode)) ++ return true; ++ ++ if (GET_CODE (x) == LO_SUM) ++ { ++ rtx ofs = XEXP (x, 1); ++ x = XEXP (x, 0); ++ ++ /* Discard non-paradoxical subregs. */ ++ if (SUBREG_P (x) ++ && (GET_MODE_SIZE (GET_MODE (x)) ++ < GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))))) ++ x = SUBREG_REG (x); ++ ++ /* Must have a valid base register. */ ++ if (!(REG_P (x) ++ && (strict ? STRICT_REG_OK_FOR_BASE_P (x) ++ : NONSTRICT_REG_OK_FOR_BASE_P (x)))) ++ return false; ++ ++ /* The symbol must be local. */ ++ if (local_symbolic_operand (ofs, Pmode) ++ || dtp32_symbolic_operand (ofs, Pmode) ++ || tp32_symbolic_operand (ofs, Pmode)) ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++/* Build the SYMBOL_REF for __tls_get_addr. */ ++ ++static GTY (()) rtx tls_get_addr_libfunc; ++ ++static rtx ++get_tls_get_addr (void) ++{ ++ if (!tls_get_addr_libfunc) ++ tls_get_addr_libfunc = init_one_libfunc ("__tls_get_addr"); ++ return tls_get_addr_libfunc; ++} ++ ++/* Try machine-dependent ways of modifying an illegitimate address ++ to be legitimate. If we find one, return the new, valid address. */ ++ ++static rtx ++sw_64_legitimize_address_1 (rtx x, rtx scratch, machine_mode mode) ++{ ++ HOST_WIDE_INT addend; ++ ++ /* If the address is (plus reg const_int) and the CONST_INT is not a ++ valid offset, compute the high part of the constant and add it to ++ the register. Then our address is (plus temp low-part-const). */ ++ if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0)) && CONST_INT_P (XEXP (x, 1)) ++ && !CONSTANT_ADDRESS_P (XEXP (x, 1))) ++ { ++ addend = INTVAL (XEXP (x, 1)); ++ x = XEXP (x, 0); ++ goto split_addend; ++ } ++ ++ /* If the address is (const (plus FOO const_int)), find the low-order ++ part of the CONST_INT. Then load FOO plus any high-order part of the ++ CONST_INT into a register. Our address is (plus reg low-part-const). ++ This is done to reduce the number of GOT entries. */ ++ if (can_create_pseudo_p () && GET_CODE (x) == CONST ++ && GET_CODE (XEXP (x, 0)) == PLUS && CONST_INT_P (XEXP (XEXP (x, 0), 1))) ++ { ++ addend = INTVAL (XEXP (XEXP (x, 0), 1)); ++ x = force_reg (Pmode, XEXP (XEXP (x, 0), 0)); ++ goto split_addend; ++ } ++ ++ /* If we have a (plus reg const), emit the load as in (2), then add ++ the two registers, and finally generate (plus reg low-part-const) as ++ our address. */ ++ if (can_create_pseudo_p () && GET_CODE (x) == PLUS && REG_P (XEXP (x, 0)) ++ && GET_CODE (XEXP (x, 1)) == CONST ++ && GET_CODE (XEXP (XEXP (x, 1), 0)) == PLUS ++ && CONST_INT_P (XEXP (XEXP (XEXP (x, 1), 0), 1))) ++ { ++ addend = INTVAL (XEXP (XEXP (XEXP (x, 1), 0), 1)); ++ x = expand_simple_binop (Pmode, PLUS, XEXP (x, 0), ++ XEXP (XEXP (XEXP (x, 1), 0), 0), NULL_RTX, 1, ++ OPTAB_LIB_WIDEN); ++ goto split_addend; ++ } ++ ++ /* If this is a local symbol, split the address into HIGH/LO_SUM parts. ++ Avoid modes larger than word mode since i.e. $LC0+8($1) can fold ++ around +/- 32k offset. */ ++ if (TARGET_EXPLICIT_RELOCS && GET_MODE_SIZE (mode) <= UNITS_PER_WORD ++ && symbolic_operand (x, Pmode)) ++ { ++ rtx r0, r16, eqv, tga, tp, dest, seq; ++ rtx_insn *insn; ++ ++ switch (tls_symbolic_operand_type (x)) ++ { ++ case TLS_MODEL_NONE: ++ break; ++ ++ case TLS_MODEL_GLOBAL_DYNAMIC: { ++ start_sequence (); ++ ++ r0 = gen_rtx_REG (Pmode, 0); ++ r16 = gen_rtx_REG (Pmode, 16); ++ tga = get_tls_get_addr (); ++ dest = gen_reg_rtx (Pmode); ++ seq = GEN_INT (sw_64_next_sequence_number++); ++ if (sw_64_tls_gd == 16) ++ { ++ emit_insn ( ++ gen_movdi_er_tlsgd (r16, pic_offset_table_rtx, x, seq)); ++ } ++ else if (sw_64_tls_gd == 32) ++ { ++ eqv ++ = gen_rtx_UNSPEC (Pmode, ++ gen_rtvec (3, pic_offset_table_rtx, x, seq), ++ UNSPEC_TLSRELGOT); ++ // eqv = gen_rtx_CONST (Pmode, eqv); ++ emit_insn (gen_rtx_SET (r16, eqv)); ++ emit_insn (gen_movdi_er_tlsgd (r16, r16, x, seq)); ++ } ++ rtx val = gen_call_value_osf_tlsgd (r0, tga, seq); ++ insn = emit_call_insn (val); ++ RTL_CONST_CALL_P (insn) = 1; ++ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16); ++ ++ insn = get_insns (); ++ end_sequence (); ++ ++ emit_libcall_block (insn, dest, r0, x); ++ return dest; ++ } ++ ++ case TLS_MODEL_LOCAL_DYNAMIC: { ++ start_sequence (); ++ ++ r0 = gen_rtx_REG (Pmode, 0); ++ r16 = gen_rtx_REG (Pmode, 16); ++ tga = get_tls_get_addr (); ++ scratch = gen_reg_rtx (Pmode); ++ seq = GEN_INT (sw_64_next_sequence_number++); ++ if (sw_64_tls_ldm == 16) ++ { ++ emit_insn ( ++ gen_movdi_er_tlsldm (r16, pic_offset_table_rtx, seq)); ++ } ++ else if (sw_64_tls_ldm == 32) ++ { ++ eqv ++ = gen_rtx_UNSPEC (Pmode, ++ gen_rtvec (3, pic_offset_table_rtx, x, seq), ++ UNSPEC_TLSRELGOT); ++ // eqv = gen_rtx_CONST (Pmode, eqv); ++ emit_insn (gen_rtx_SET (r16, eqv)); ++ emit_insn (gen_movdi_er_tlsldm (r16, r16, seq)); ++ } ++ rtx val = gen_call_value_osf_tlsldm (r0, tga, seq); ++ insn = emit_call_insn (val); ++ RTL_CONST_CALL_P (insn) = 1; ++ use_reg (&CALL_INSN_FUNCTION_USAGE (insn), r16); ++ ++ insn = get_insns (); ++ end_sequence (); ++ ++ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx), ++ UNSPEC_TLSLDM_CALL); ++ emit_libcall_block (insn, scratch, r0, eqv); ++ ++ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_DTPREL); ++ eqv = gen_rtx_CONST (Pmode, eqv); ++ ++ if (sw_64_tls_size == 64) ++ { ++ if (sw_64_tls_gotdtprel == 16) ++ { ++ dest = gen_reg_rtx (Pmode); ++ emit_insn (gen_rtx_SET (dest, eqv)); ++ emit_insn (gen_adddi3 (dest, dest, scratch)); ++ } ++ else if (sw_64_tls_gotdtprel == 32) ++ { ++ seq = GEN_INT (sw_64_next_sequence_number++); ++ eqv = gen_rtx_UNSPEC (Pmode, ++ gen_rtvec (3, pic_offset_table_rtx, x, ++ seq), ++ UNSPEC_TLSRELGOT); ++ // eqv = gen_rtx_CONST (Pmode, eqv); ++ dest = gen_reg_rtx (Pmode); ++ emit_insn (gen_rtx_SET (dest, eqv)); ++ ++ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, dest, x, seq), ++ UNSPEC_GOTDTPREL); ++ // eqv = gen_rtx_CONST (Pmode, eqv); ++ emit_insn (gen_rtx_SET (dest, eqv)); ++ ++ emit_insn (gen_adddi3 (dest, dest, scratch)); ++ } ++ return dest; ++ } ++ if (sw_64_tls_size == 32) ++ { ++ rtx temp = gen_rtx_HIGH (Pmode, eqv); ++ temp = gen_rtx_PLUS (Pmode, scratch, temp); ++ scratch = gen_reg_rtx (Pmode); ++ emit_insn (gen_rtx_SET (scratch, temp)); ++ } ++ return gen_rtx_LO_SUM (Pmode, scratch, eqv); ++ } ++ ++ case TLS_MODEL_INITIAL_EXEC: ++ { ++ if (sw_64_tls_gottprel == 16) ++ { ++ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL); ++ eqv = gen_rtx_CONST (Pmode, eqv); ++ tp = gen_reg_rtx (Pmode); ++ scratch = gen_reg_rtx (Pmode); ++ dest = gen_reg_rtx (Pmode); ++ ++ emit_insn (gen_get_thread_pointerdi (tp)); ++ emit_insn (gen_rtx_SET (scratch, eqv)); ++ emit_insn (gen_adddi3 (dest, tp, scratch)); ++ } ++ else if (sw_64_tls_gottprel == 32) ++ { ++ seq = GEN_INT (sw_64_next_sequence_number++); ++ tp = gen_reg_rtx (Pmode); ++ emit_insn (gen_get_thread_pointerdi (tp)); ++ ++ scratch = gen_reg_rtx (Pmode); ++ eqv ++ = gen_rtx_UNSPEC (Pmode, ++ gen_rtvec (3, pic_offset_table_rtx, x, seq), ++ UNSPEC_TLSRELGOT); ++ // eqv = gen_rtx_CONST (Pmode, eqv); ++ emit_insn (gen_rtx_SET (scratch, eqv)); ++ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (3, scratch, x, seq), ++ UNSPEC_TPREL); ++ // eqv = gen_rtx_CONST (Pmode, eqv); ++ emit_insn (gen_rtx_SET (scratch, eqv)); ++ ++ dest = gen_reg_rtx (Pmode); ++ emit_insn (gen_adddi3 (dest, tp, scratch)); ++ } ++ return dest; ++ } ++ ++ case TLS_MODEL_LOCAL_EXEC: ++ eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, x), UNSPEC_TPREL); ++ eqv = gen_rtx_CONST (Pmode, eqv); ++ tp = gen_reg_rtx (Pmode); ++ ++ emit_insn (gen_get_thread_pointerdi (tp)); ++ if (sw_64_tls_size == 32) ++ { ++ rtx temp = gen_rtx_HIGH (Pmode, eqv); ++ temp = gen_rtx_PLUS (Pmode, tp, temp); ++ tp = gen_reg_rtx (Pmode); ++ emit_insn (gen_rtx_SET (tp, temp)); ++ } ++ return gen_rtx_LO_SUM (Pmode, tp, eqv); ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ if (local_symbolic_operand (x, Pmode)) ++ { ++ if (small_symbolic_operand (x, Pmode)) ++ return x; ++ else ++ { ++ if (can_create_pseudo_p ()) ++ scratch = gen_reg_rtx (Pmode); ++ emit_insn (gen_rtx_SET (scratch, gen_rtx_HIGH (Pmode, x))); ++ return gen_rtx_LO_SUM (Pmode, scratch, x); ++ } ++ } ++ } ++ ++ return NULL; ++ ++ split_addend : { ++ HOST_WIDE_INT low, high; ++ ++ low = ((addend & 0xffff) ^ 0x8000) - 0x8000; ++ addend -= low; ++ high = ((addend & 0xffffffff) ^ 0x80000000) - 0x80000000; ++ addend -= high; ++ ++ if (addend) ++ x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (addend), ++ (!can_create_pseudo_p () ? scratch : NULL_RTX), ++ 1, OPTAB_LIB_WIDEN); ++ if (high) ++ x = expand_simple_binop (Pmode, PLUS, x, GEN_INT (high), ++ (!can_create_pseudo_p () ? scratch : NULL_RTX), ++ 1, OPTAB_LIB_WIDEN); ++ ++ return plus_constant (Pmode, x, low); ++ } ++} ++ ++/* Try machine-dependent ways of modifying an illegitimate address ++ to be legitimate. Return X or the new, valid address. */ ++ ++static rtx ++sw_64_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED, machine_mode mode) ++{ ++ rtx new_x = sw_64_legitimize_address_1 (x, NULL_RTX, mode); ++ return new_x ? new_x : x; ++} ++ ++/* Return true if ADDR has an effect that depends on the machine mode it ++ is used for. On the Sw_64 this is true only for the unaligned modes. ++ We can simplify the test since we know that the address must be valid. */ ++ ++static bool ++sw_64_mode_dependent_address_p (const_rtx addr, ++ addr_space_t as ATTRIBUTE_UNUSED) ++{ ++ return GET_CODE (addr) == AND; ++} ++ ++/* Primarily this is required for TLS symbols, but given that our move ++ patterns *ought* to be able to handle any symbol at any time, we ++ should never be spilling symbolic operands to the constant pool, ever. */ ++ ++static bool ++sw_64_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x) ++{ ++ enum rtx_code code = GET_CODE (x); ++ return code == SYMBOL_REF || code == LABEL_REF || code == CONST; ++} ++ ++/* We do not allow indirect calls to be optimized into sibling calls, nor ++ can we allow a call to a function with a different GP to be optimized ++ into a sibcall. */ ++ ++static bool ++sw_64_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED) ++{ ++ /* Can't do indirect tail calls, since we don't know if the target ++ uses the same GP. */ ++ if (!decl) ++ return false; ++ ++ /* Otherwise, we can make a tail call if the target function shares ++ the same GP. */ ++ return decl_has_samegp (decl); ++} ++ ++bool ++some_small_symbolic_operand_int (rtx x) ++{ ++ subrtx_var_iterator::array_type array; ++ FOR_EACH_SUBRTX_VAR (iter, array, x, ALL) ++ { ++ rtx x = *iter; ++ /* Don't re-split. */ ++ if (GET_CODE (x) == LO_SUM) ++ iter.skip_subrtxes (); ++ else if (small_symbolic_operand (x, Pmode)) ++ return true; ++ } ++ return false; ++} ++ ++rtx ++split_small_symbolic_operand (rtx x) ++{ ++ x = copy_insn (x); ++ subrtx_ptr_iterator::array_type array; ++ FOR_EACH_SUBRTX_PTR (iter, array, &x, ALL) ++ { ++ rtx *ptr = *iter; ++ rtx x = *ptr; ++ /* Don't re-split. */ ++ if (GET_CODE (x) == LO_SUM) ++ iter.skip_subrtxes (); ++ else if (small_symbolic_operand (x, Pmode)) ++ { ++ *ptr = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, x); ++ iter.skip_subrtxes (); ++ } ++ } ++ return x; ++} ++ ++/* Indicate that INSN cannot be duplicated. This is true for any insn ++ that we've marked with gpdisp relocs, since those have to stay in ++ 1-1 correspondence with one another. ++ ++ Technically we could copy them if we could set up a mapping from one ++ sequence number to another, across the set of insns to be duplicated. ++ This seems overly complicated and error-prone since interblock motion ++ from sched-ebb could move one of the pair of insns to a different block. ++ ++ Also cannot allow call insns to be duplicated. If they throw exceptions, ++ then they'll be in a different block from their ldgp. Which could lead ++ the bb reorder code to think that it would be ok to copy just the block ++ containing the call and branch to the block containing the ldgp. */ ++ ++static bool ++sw_64_cannot_copy_insn_p (rtx_insn *insn) ++{ ++ if (!reload_completed || !TARGET_EXPLICIT_RELOCS) ++ return false; ++ if (recog_memoized (insn) >= 0) ++ return get_attr_cannot_copy (insn); ++ else ++ return false; ++} ++ ++/* Try a machine-dependent way of reloading an illegitimate address ++ operand. If we find one, push the reload and return the new rtx. */ ++ ++rtx ++sw_64_legitimize_reload_address (rtx x, machine_mode mode ATTRIBUTE_UNUSED, ++ int opnum, int type, ++ int ind_levels ATTRIBUTE_UNUSED) ++{ ++ /* We must recognize output that we have already generated ourselves. */ ++ if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 0)) == PLUS ++ && REG_P (XEXP (XEXP (x, 0), 0)) && CONST_INT_P (XEXP (XEXP (x, 0), 1)) ++ && CONST_INT_P (XEXP (x, 1))) ++ { ++ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL, BASE_REG_CLASS, ++ GET_MODE (x), VOIDmode, 0, 0, opnum, ++ (enum reload_type) type); ++ return x; ++ } ++ ++ /* We wish to handle large displacements off a base register by ++ splitting the addend across an ldih and the mem insn. This ++ cuts number of extra insns needed from 3 to 1. */ ++ if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0)) ++ && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER ++ && REGNO_OK_FOR_BASE_P (REGNO (XEXP (x, 0))) && CONST_INT_P (XEXP (x, 1))) ++ { ++ HOST_WIDE_INT val = INTVAL (XEXP (x, 1)); ++ HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000; ++ HOST_WIDE_INT high ++ = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000; ++ ++ /* Check for 32-bit overflow. */ ++ if (high + low != val) ++ return NULL_RTX; ++ ++ /* Reload the high part into a base reg; leave the low part ++ in the mem directly. */ ++ x = gen_rtx_PLUS (GET_MODE (x), ++ gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0), ++ GEN_INT (high)), ++ GEN_INT (low)); ++ ++ push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL, BASE_REG_CLASS, ++ GET_MODE (x), VOIDmode, 0, 0, opnum, ++ (enum reload_type) type); ++ return x; ++ } ++ ++ return NULL_RTX; ++} ++ ++/* Return the cost of moving between registers of various classes. Moving ++ between FLOAT_REGS and anything else except float regs is expensive. ++ In fact, we make it quite expensive because we really don't want to ++ do these moves unless it is clearly worth it. Optimizations may ++ reduce the impact of not being able to allocate a pseudo to a ++ hard register. */ ++ ++static int ++sw_64_register_move_cost (machine_mode mode, reg_class_t from_i, ++ reg_class_t to_i) ++{ ++ enum reg_class from = (enum reg_class) from_i; ++ enum reg_class to = (enum reg_class) to_i; ++ if (!flag_sw_rtx_cost) ++ { ++ if ((from == FLOAT_REGS) == (to == FLOAT_REGS)) ++ return 2; ++ if (TARGET_FIX) ++ return (from == FLOAT_REGS) ? 6 : 8; ++ return 4 + 2 * sw_64_memory_latency; ++ } ++ if (from == R0_REG || from == R24_REG || from == R25_REG || from == R27_REG) ++ from = GENERAL_REGS; ++ if (to == R0_REG || to == R24_REG || to == R25_REG || to == R27_REG) ++ to = GENERAL_REGS; ++ if (GET_MODE_SIZE (mode) == 32) ++ { ++ if (from == GENERAL_REGS && to == GENERAL_REGS) ++ return 1; ++ else if (from == GENERAL_REGS) ++ return 16; ++ else if (to == GENERAL_REGS) ++ return 16; ++ if (!TARGET_SW_SIMD) ++ return 34; ++ return 2; ++ } ++ if (from == GENERAL_REGS && to == GENERAL_REGS) ++ return 1; ++ else if (from == GENERAL_REGS) ++ return 4; ++ else if (to == GENERAL_REGS) ++ return 4; ++ return 2; ++} ++/* Return the cost of moving data of MODE from a register to ++ or from memory. On the Sw_64, bump this up a bit. */ ++ ++static int ++sw_64_memory_move_cost (machine_mode /*mode*/, reg_class_t /*regclass*/, ++ bool /*in*/) ++{ ++ if (flag_sw_rtx_cost) ++ return sw_64_memory_latency; ++ return 2 * sw_64_memory_latency; ++} ++ ++/* Compute a (partial) cost for rtx X. Return true if the complete ++ cost has been computed, and false if subexpressions should be ++ scanned. In either case, *TOTAL contains the cost result. */ ++ ++static bool ++sw_64_rtx_costs (rtx x, machine_mode mode, int outer_code, int opno, int *total, ++ bool speed) ++{ ++ int code = GET_CODE (x); ++ bool float_mode_p = FLOAT_MODE_P (mode); ++ const struct sw_64_rtx_cost_data *cost_data; ++ ++ if (!speed) ++ cost_data = &sw_64_rtx_cost_size; ++ else ++ if (flag_sw_rtx_cost) ++ cost_data = &sw_64_rtx_cost_data[1]; ++ else ++ cost_data = &sw_64_rtx_cost_data[sw_64_tune]; ++ ++ switch (code) ++ { ++ case CONST_INT: ++ /* If this is an 8-bit constant, return zero since it can be used ++ nearly anywhere with no cost. If it is a valid operand for an ++ ADD or AND, likewise return 0 if we know it will be used in that ++ context. Otherwise, return 2 since it might be used there later. ++ All other constants take at least two insns. */ ++ if (INTVAL (x) >= 0 && INTVAL (x) < 256) ++ { ++ *total = 0; ++ return true; ++ } ++ /* FALLTHRU */ ++ ++ case CONST_DOUBLE: ++ case CONST_WIDE_INT: ++ if (x == CONST0_RTX (mode)) ++ *total = 0; ++ else if ((outer_code == PLUS && add_operand (x, VOIDmode)) ++ || (outer_code == AND && and_operand (x, VOIDmode))) ++ *total = 0; ++ else if (add_operand (x, VOIDmode) || and_operand (x, VOIDmode)) ++ *total = 2; ++ else ++ *total = COSTS_N_INSNS (2); ++ return true; ++ ++ case CONST: ++ case SYMBOL_REF: ++ case LABEL_REF: ++ if (TARGET_EXPLICIT_RELOCS && small_symbolic_operand (x, VOIDmode)) ++ *total = COSTS_N_INSNS (outer_code != MEM); ++ else if (TARGET_EXPLICIT_RELOCS && local_symbolic_operand (x, VOIDmode)) ++ *total = COSTS_N_INSNS (1 + (outer_code != MEM)); ++ else if (tls_symbolic_operand_type (x)) ++ /* ??? How many insns do we emit here? More than one... */ ++ *total = COSTS_N_INSNS (15); ++ else ++ /* Otherwise we do a load from the GOT. */ ++ *total = COSTS_N_INSNS (!speed ? 1 : sw_64_memory_latency); ++ return true; ++ ++ case HIGH: ++ /* This is effectively an add_operand. */ ++ *total = 2; ++ return true; ++ ++ case PLUS: ++ case MINUS: ++ if (float_mode_p) ++ *total = cost_data->fp_add; ++ else if (GET_CODE (XEXP (x, 0)) == ASHIFT ++ && const48_operand (XEXP (XEXP (x, 0), 1), VOIDmode)) ++ { ++ *total = (rtx_cost (XEXP (XEXP (x, 0), 0), mode, ++ (enum rtx_code) outer_code, opno, speed) ++ + rtx_cost (XEXP (x, 1), mode, (enum rtx_code) outer_code, ++ opno, speed) ++ + COSTS_N_INSNS (1)); ++ return true; ++ } ++ return false; ++ ++ case MULT: ++ if (float_mode_p) ++ *total = cost_data->fp_mult; ++ else if (mode == DImode) ++ *total = cost_data->int_mult_di; ++ else ++ *total = cost_data->int_mult_si; ++ return false; ++ ++ case ASHIFT: ++ if (CONST_INT_P (XEXP (x, 1)) && INTVAL (XEXP (x, 1)) <= 3) ++ { ++ *total = COSTS_N_INSNS (1); ++ return false; ++ } ++ /* FALLTHRU */ ++ ++ case ASHIFTRT: ++ case LSHIFTRT: ++ *total = cost_data->int_shift; ++ return false; ++ ++ case IF_THEN_ELSE: ++ if (float_mode_p) ++ *total = cost_data->fp_add; ++ else ++ *total = cost_data->int_cmov; ++ if (flag_sw_rtx_cost && float_mode_p) ++ *total = COSTS_N_INSNS (2); ++ return false; ++ ++ case DIV: ++ case UDIV: ++ case MOD: ++ case UMOD: ++ if (!float_mode_p) ++ *total = cost_data->int_div; ++ else if (mode == SFmode) ++ *total = cost_data->fp_div_sf; ++ else ++ *total = cost_data->fp_div_df; ++ return false; ++ ++ case MEM: ++ *total = COSTS_N_INSNS (!speed ? 1 : sw_64_memory_latency); ++ return true; ++ ++ case NEG: ++ if (!float_mode_p) ++ { ++ *total = COSTS_N_INSNS (1); ++ return false; ++ } ++ if (flag_sw_rtx_cost) ++ { ++ *total = COSTS_N_INSNS (2); ++ return false; ++ } ++ /* FALLTHRU */ ++ ++ case ABS: ++ if (!float_mode_p) ++ { ++ *total = COSTS_N_INSNS (1) + cost_data->int_cmov; ++ return false; ++ } ++ /* FALLTHRU */ ++ ++ case FLOAT: ++ case UNSIGNED_FLOAT: ++ case FIX: ++ case UNSIGNED_FIX: ++ if (flag_sw_rtx_cost) ++ { ++ *total = COSTS_N_INSNS (4); ++ return false; ++ } ++ case FLOAT_TRUNCATE: ++ *total = cost_data->fp_add; ++ return false; ++ ++ case FLOAT_EXTEND: ++ if (MEM_P (XEXP (x, 0))) ++ *total = 0; ++ else ++ *total = cost_data->fp_add; ++ return false; ++ ++ default: ++ return false; ++ } ++} ++ ++/* REF is an alignable memory location. Place an aligned SImode ++ reference into *PALIGNED_MEM and the number of bits to shift into ++ *PBITNUM. SCRATCH is a free register for use in reloading out ++ of range stack slots. */ ++ ++void ++get_aligned_mem (rtx ref, rtx *paligned_mem, rtx *pbitnum) ++{ ++ rtx base; ++ HOST_WIDE_INT disp, offset; ++ ++ gcc_assert (MEM_P (ref)); ++ ++ if (reload_in_progress) ++ { ++ base = find_replacement (&XEXP (ref, 0)); ++ gcc_assert (memory_address_p (GET_MODE (ref), base)); ++ } ++ else ++ base = XEXP (ref, 0); ++ ++ if (GET_CODE (base) == PLUS) ++ disp = INTVAL (XEXP (base, 1)), base = XEXP (base, 0); ++ else ++ disp = 0; ++ ++ /* Find the byte offset within an aligned word. If the memory itself is ++ claimed to be aligned, believe it. Otherwise, aligned_memory_operand ++ will have examined the base register and determined it is aligned, and ++ thus displacements from it are naturally alignable. */ ++ if (MEM_ALIGN (ref) >= 32) ++ offset = 0; ++ else ++ offset = disp & 3; ++ ++ /* The location should not cross aligned word boundary. */ ++ gcc_assert (offset + GET_MODE_SIZE (GET_MODE (ref)) ++ <= GET_MODE_SIZE (SImode)); ++ ++ /* Access the entire aligned word. */ ++ *paligned_mem = widen_memory_access (ref, SImode, -offset); ++ ++ /* Convert the byte offset within the word to a bit offset. */ ++ offset *= BITS_PER_UNIT; ++ *pbitnum = GEN_INT (offset); ++} ++ ++/* Similar, but just get the address. Handle the two reload cases. ++ Add EXTRA_OFFSET to the address we return. */ ++ ++rtx ++get_unaligned_address (rtx ref) ++{ ++ rtx base; ++ HOST_WIDE_INT offset = 0; ++ ++ gcc_assert (MEM_P (ref)); ++ ++ if (reload_in_progress) ++ { ++ base = find_replacement (&XEXP (ref, 0)); ++ gcc_assert (memory_address_p (GET_MODE (ref), base)); ++ } ++ else ++ base = XEXP (ref, 0); ++ ++ if (GET_CODE (base) == PLUS) ++ offset += INTVAL (XEXP (base, 1)), base = XEXP (base, 0); ++ ++ return plus_constant (Pmode, base, offset); ++} ++ ++/* Compute a value X, such that X & 7 == (ADDR + OFS) & 7. ++ X is always returned in a register. */ ++ ++rtx ++get_unaligned_offset (rtx addr, HOST_WIDE_INT ofs) ++{ ++ if (GET_CODE (addr) == PLUS) ++ { ++ ofs += INTVAL (XEXP (addr, 1)); ++ addr = XEXP (addr, 0); ++ } ++ ++ return expand_simple_binop (Pmode, PLUS, addr, GEN_INT (ofs & 7), NULL_RTX, 1, ++ OPTAB_LIB_WIDEN); ++} ++ ++/* On the Sw_64, all (non-symbolic) constants except zero go into ++ a floating-point register via memory. Note that we cannot ++ return anything that is not a subset of RCLASS, and that some ++ symbolic constants cannot be dropped to memory. */ ++ ++enum reg_class ++sw_64_preferred_reload_class (rtx x, enum reg_class rclass) ++{ ++ /* Zero is present in any register class. */ ++ if (x == CONST0_RTX (GET_MODE (x))) ++ return rclass; ++ ++ /* These sorts of constants we can easily drop to memory. */ ++ if (CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) ++ || GET_CODE (x) == CONST_VECTOR) ++ { ++ if (rclass == FLOAT_REGS) ++ return NO_REGS; ++ if (rclass == ALL_REGS) ++ return GENERAL_REGS; ++ return rclass; ++ } ++ ++ /* All other kinds of constants should not (and in the case of HIGH ++ cannot) be dropped to memory -- instead we use a GENERAL_REGS ++ secondary reload. */ ++ if (CONSTANT_P (x)) ++ return (rclass == ALL_REGS ? GENERAL_REGS : rclass); ++ ++ return rclass; ++} ++ ++/* Inform reload about cases where moving X with a mode MODE to a register in ++ RCLASS requires an extra scratch or immediate register. Return the class ++ needed for the immediate register. */ ++ ++static reg_class_t ++sw_64_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i, ++ machine_mode mode, secondary_reload_info *sri) ++{ ++ enum reg_class rclass = (enum reg_class) rclass_i; ++ ++ /* Loading and storing HImode or QImode values to and from memory ++ usually requires a scratch register. */ ++ if (!TARGET_BWX && (mode == QImode || mode == HImode || mode == CQImode)) ++ { ++ if (any_memory_operand (x, mode)) ++ { ++ if (in_p) ++ { ++ if (!aligned_memory_operand (x, mode)) ++ sri->icode = direct_optab_handler (reload_in_optab, mode); ++ } ++ else ++ sri->icode = direct_optab_handler (reload_out_optab, mode); ++ return NO_REGS; ++ } ++ } ++ ++ /* We also cannot do integral arithmetic into FP regs, as might result ++ from register elimination into a DImode fp register. */ ++ if (rclass == FLOAT_REGS) ++ { ++ if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND) ++ return GENERAL_REGS; ++ if (in_p && INTEGRAL_MODE_P (mode) && !MEM_P (x) && !REG_P (x) ++ && !CONST_INT_P (x)) ++ return GENERAL_REGS; ++ } ++ ++ return NO_REGS; ++} ++ ++/* Implement TARGET_SECONDARY_MEMORY_NEEDED. ++ ++ If we are copying between general and FP registers, we need a memory ++ location unless the FIX extension is available. */ ++ ++static bool ++sw_64_secondary_memory_needed (machine_mode, reg_class_t class1, ++ reg_class_t class2) ++{ ++ return (!TARGET_FIX ++ && ((class1 == FLOAT_REGS && class2 != FLOAT_REGS) ++ || (class2 == FLOAT_REGS && class1 != FLOAT_REGS))); ++} ++ ++/* Implement TARGET_SECONDARY_MEMORY_NEEDED_MODE. If MODE is ++ floating-point, use it. Otherwise, widen to a word like the default. ++ This is needed because we always store integers in FP registers in ++ quadword format. This whole area is very tricky! */ ++ ++static machine_mode ++sw_64_secondary_memory_needed_mode (machine_mode mode) ++{ ++ if (GET_MODE_CLASS (mode) == MODE_FLOAT) ++ return mode; ++ if (GET_MODE_SIZE (mode) >= 4) ++ return mode; ++ return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require (); ++} ++ ++/* Given SEQ, which is an INSN list, look for any MEMs in either ++ a SET_DEST or a SET_SRC and copy the in-struct, unchanging, and ++ volatile flags from REF into each of the MEMs found. If REF is not ++ a MEM, don't do anything. */ ++ ++void ++sw_64_set_memflags (rtx seq, rtx ref) ++{ ++ rtx_insn *insn; ++ ++ if (!MEM_P (ref)) ++ return; ++ ++ /* This is only called from sw_64.md, after having had something ++ generated from one of the insn patterns. So if everything is ++ zero, the pattern is already up-to-date. */ ++ if (!MEM_VOLATILE_P (ref) && !MEM_NOTRAP_P (ref) && !MEM_READONLY_P (ref)) ++ return; ++ ++ subrtx_var_iterator::array_type array; ++ for (insn = as_a (seq); insn; insn = NEXT_INSN (insn)) ++ if (INSN_P (insn)) ++ FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST) ++ { ++ rtx x = *iter; ++ if (MEM_P (x)) ++ { ++ MEM_VOLATILE_P (x) = MEM_VOLATILE_P (ref); ++ MEM_NOTRAP_P (x) = MEM_NOTRAP_P (ref); ++ MEM_READONLY_P (x) = MEM_READONLY_P (ref); ++ /* Sadly, we cannot use alias sets because the extra ++ aliasing produced by the AND interferes. Given that ++ two-byte quantities are the only thing we would be ++ able to differentiate anyway, there does not seem to ++ be any point in convoluting the early out of the ++ alias check. */ ++ iter.skip_subrtxes (); ++ } ++ } ++ else ++ gcc_unreachable (); ++} ++ ++static rtx ++sw_64_emit_set_const (rtx, machine_mode, HOST_WIDE_INT, int, bool); ++ ++/* Internal routine for sw_64_emit_set_const to check for N or below insns. ++ If NO_OUTPUT is true, then we only check to see if N insns are possible, ++ and return pc_rtx if successful. */ ++ ++static rtx ++sw_64_emit_set_const_1 (rtx target, machine_mode mode, HOST_WIDE_INT c, int n, ++ bool no_output) ++{ ++ HOST_WIDE_INT new_const; ++ int i, bits; ++ /* Use a pseudo if highly optimizing and still generating RTL. */ ++ rtx subtarget ++ = (flag_expensive_optimizations && can_create_pseudo_p () ? 0 : target); ++ rtx temp, insn; ++ ++ /* If this is a sign-extended 32-bit constant, we can do this in at most ++ three insns, so do it if we have enough insns left. */ ++ ++ if (c >> 31 == -1 || c >> 31 == 0) ++ { ++ HOST_WIDE_INT low = ((c & 0xffff) ^ 0x8000) - 0x8000; ++ HOST_WIDE_INT tmp1 = c - low; ++ HOST_WIDE_INT high = (((tmp1 >> 16) & 0xffff) ^ 0x8000) - 0x8000; ++ HOST_WIDE_INT extra = 0; ++ ++ /* If HIGH will be interpreted as negative but the constant is ++ positive, we must adjust it to do two ldha insns. */ ++ ++ if ((high & 0x8000) != 0 && c >= 0) ++ { ++ extra = 0x4000; ++ tmp1 -= 0x40000000; ++ high = ((tmp1 >> 16) & 0xffff) - 2 * ((tmp1 >> 16) & 0x8000); ++ } ++ ++ if (c == low || (low == 0 && extra == 0)) ++ { ++ /* We used to use copy_to_suggested_reg (GEN_INT (c), target, mode) ++ but that meant that we can't handle INT_MIN on 32-bit machines ++ (like NT/Sw_64), because we recurse indefinitely through ++ emit_move_insn to gen_movdi. So instead, since we know exactly ++ what we want, create it explicitly. */ ++ ++ if (no_output) ++ return pc_rtx; ++ if (target == NULL) ++ target = gen_reg_rtx (mode); ++ emit_insn (gen_rtx_SET (target, GEN_INT (c))); ++ return target; ++ } ++ else if (n >= 2 + (extra != 0)) ++ { ++ if (no_output) ++ return pc_rtx; ++ if (!can_create_pseudo_p ()) ++ { ++ emit_insn (gen_rtx_SET (target, GEN_INT (high << 16))); ++ temp = target; ++ } ++ else ++ temp ++ = copy_to_suggested_reg (GEN_INT (high << 16), subtarget, mode); ++ ++ /* As of 2002-02-23, addsi3 is only available when not optimizing. ++ This means that if we go through expand_binop, we'll try to ++ generate extensions, etc, which will require new pseudos, which ++ will fail during some split phases. The SImode add patterns ++ still exist, but are not named. So build the insns by hand. */ ++ ++ if (extra != 0) ++ { ++ if (!subtarget) ++ subtarget = gen_reg_rtx (mode); ++ insn = gen_rtx_PLUS (mode, temp, GEN_INT (extra << 16)); ++ insn = gen_rtx_SET (subtarget, insn); ++ emit_insn (insn); ++ temp = subtarget; ++ } ++ ++ if (target == NULL) ++ target = gen_reg_rtx (mode); ++ insn = gen_rtx_PLUS (mode, temp, GEN_INT (low)); ++ insn = gen_rtx_SET (target, insn); ++ emit_insn (insn); ++ return target; ++ } ++ } ++ ++ /* If we couldn't do it that way, try some other methods. But if we have ++ no instructions left, don't bother. Likewise, if this is SImode and ++ we can't make pseudos, we can't do anything since the expand_binop ++ and expand_unop calls will widen and try to make pseudos. */ ++ ++ if (n == 1 || (mode == SImode && !can_create_pseudo_p ())) ++ return 0; ++ ++ /* Next, see if we can load a related constant and then shift and possibly ++ negate it to get the constant we want. Try this once each increasing ++ numbers of insns. */ ++ ++ for (i = 1; i < n; i++) ++ { ++ /* First, see if minus some low bits, we've an easy load of ++ high bits. */ ++ ++ new_const = ((c & 0xffff) ^ 0x8000) - 0x8000; ++ if (new_const != 0) ++ { ++ temp = sw_64_emit_set_const (subtarget, mode, c - new_const, i, ++ no_output); ++ if (temp) ++ { ++ if (no_output) ++ return temp; ++ return expand_binop (mode, add_optab, temp, GEN_INT (new_const), ++ target, 0, OPTAB_WIDEN); ++ } ++ } ++ ++ /* Next try complementing. */ ++ temp = sw_64_emit_set_const (subtarget, mode, ~c, i, no_output); ++ if (temp) ++ { ++ if (no_output) ++ return temp; ++ return expand_unop (mode, one_cmpl_optab, temp, target, 0); ++ } ++ ++ /* Next try to form a constant and do a left shift. We can do this ++ if some low-order bits are zero; the exact_log2 call below tells ++ us that information. The bits we are shifting out could be any ++ value, but here we'll just try the 0- and sign-extended forms of ++ the constant. To try to increase the chance of having the same ++ constant in more than one insn, start at the highest number of ++ bits to shift, but try all possibilities in case a ZAPNOT will ++ be useful. */ ++ ++ bits = exact_log2 (c & -c); ++ if (bits > 0) ++ for (; bits > 0; bits--) ++ { ++ new_const = c >> bits; ++ temp ++ = sw_64_emit_set_const (subtarget, mode, new_const, i, no_output); ++ if (!temp && c < 0) ++ { ++ new_const = (unsigned HOST_WIDE_INT) c >> bits; ++ temp = sw_64_emit_set_const (subtarget, mode, new_const, i, ++ no_output); ++ } ++ if (temp) ++ { ++ if (no_output) ++ return temp; ++ return expand_binop (mode, ashl_optab, temp, GEN_INT (bits), ++ target, 0, OPTAB_WIDEN); ++ } ++ } ++ ++ /* Now try high-order zero bits. Here we try the shifted-in bits as ++ all zero and all ones. Be careful to avoid shifting outside the ++ mode and to avoid shifting outside the host wide int size. */ ++ ++ bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8) ++ - floor_log2 (c) - 1); ++ if (bits > 0) ++ for (; bits > 0; bits--) ++ { ++ new_const = c << bits; ++ temp ++ = sw_64_emit_set_const (subtarget, mode, new_const, i, no_output); ++ if (!temp) ++ { ++ new_const = (c << bits) | ((HOST_WIDE_INT_1U << bits) - 1); ++ temp = sw_64_emit_set_const (subtarget, mode, new_const, i, ++ no_output); ++ } ++ if (temp) ++ { ++ if (no_output) ++ return temp; ++ return expand_binop (mode, lshr_optab, temp, GEN_INT (bits), ++ target, 1, OPTAB_WIDEN); ++ } ++ } ++ ++ /* Now try high-order 1 bits. We get that with a sign-extension. ++ But one bit isn't enough here. Be careful to avoid shifting outside ++ the mode and to avoid shifting outside the host wide int size. */ ++ ++ bits = (MIN (HOST_BITS_PER_WIDE_INT, GET_MODE_SIZE (mode) * 8) ++ - floor_log2 (~c) - 2); ++ if (bits > 0) ++ for (; bits > 0; bits--) ++ { ++ new_const = c << bits; ++ temp ++ = sw_64_emit_set_const (subtarget, mode, new_const, i, no_output); ++ if (!temp) ++ { ++ new_const = (c << bits) | ((HOST_WIDE_INT_1U << bits) - 1); ++ temp = sw_64_emit_set_const (subtarget, mode, new_const, i, ++ no_output); ++ } ++ if (temp) ++ { ++ if (no_output) ++ return temp; ++ return expand_binop (mode, ashr_optab, temp, GEN_INT (bits), ++ target, 0, OPTAB_WIDEN); ++ } ++ } ++ } ++ ++ /* Finally, see if can load a value into the target that is the same as the ++ constant except that all bytes that are 0 are changed to be 0xff. If we ++ can, then we can do a ZAPNOT to obtain the desired constant. */ ++ ++ new_const = c; ++ for (i = 0; i < 64; i += 8) ++ if ((new_const & ((HOST_WIDE_INT) 0xff << i)) == 0) ++ new_const |= (HOST_WIDE_INT) 0xff << i; ++ ++ /* We are only called for SImode and DImode. If this is SImode, ensure that ++ we are sign extended to a full word. */ ++ ++ if (mode == SImode) ++ new_const = ((new_const & 0xffffffff) ^ 0x80000000) - 0x80000000; ++ ++ if (new_const != c) ++ { ++ temp ++ = sw_64_emit_set_const (subtarget, mode, new_const, n - 1, no_output); ++ if (temp) ++ { ++ if (no_output) ++ return temp; ++ return expand_binop (mode, and_optab, temp, GEN_INT (c | ~new_const), ++ target, 0, OPTAB_WIDEN); ++ } ++ } ++ ++ return 0; ++} ++ ++/* Try to output insns to set TARGET equal to the constant C if it can be ++ done in less than N insns. Do all computations in MODE. Returns the place ++ where the output has been placed if it can be done and the insns have been ++ emitted. If it would take more than N insns, zero is returned and no ++ insns and emitted. */ ++ ++static rtx ++sw_64_emit_set_const (rtx target, machine_mode mode, HOST_WIDE_INT c, int n, ++ bool no_output) ++{ ++ machine_mode orig_mode = mode; ++ rtx orig_target = target; ++ rtx result = 0; ++ int i; ++ ++ /* If we can't make any pseudos, TARGET is an SImode hard register, we ++ can't load this constant in one insn, do this in DImode. */ ++ if (!can_create_pseudo_p () && mode == SImode && REG_P (target) ++ && REGNO (target) < FIRST_PSEUDO_REGISTER) ++ { ++ result = sw_64_emit_set_const_1 (target, mode, c, 1, no_output); ++ if (result) ++ return result; ++ ++ target = no_output ? NULL : gen_lowpart (DImode, target); ++ mode = DImode; ++ } ++ else if (mode == V8QImode || mode == V4HImode || mode == V2SImode) ++ { ++ target = no_output ? NULL : gen_lowpart (DImode, target); ++ mode = DImode; ++ } ++ ++ /* Try 1 insn, then 2, then up to N. */ ++ for (i = 1; i <= n; i++) ++ { ++ result = sw_64_emit_set_const_1 (target, mode, c, i, no_output); ++ if (result) ++ { ++ rtx_insn *insn; ++ rtx set; ++ ++ if (no_output) ++ return result; ++ ++ insn = get_last_insn (); ++ set = single_set (insn); ++ if (!CONSTANT_P (SET_SRC (set))) ++ set_unique_reg_note (get_last_insn (), REG_EQUAL, GEN_INT (c)); ++ break; ++ } ++ } ++ ++ /* Allow for the case where we changed the mode of TARGET. */ ++ if (result) ++ { ++ if (result == target) ++ result = orig_target; ++ else if (mode != orig_mode) ++ result = gen_lowpart (orig_mode, result); ++ } ++ ++ return result; ++} ++ ++/* Having failed to find a 3 insn sequence in sw_64_emit_set_const, ++ fall back to a straight forward decomposition. We do this to avoid ++ exponential run times encountered when looking for longer sequences ++ with sw_64_emit_set_const. */ ++ ++static rtx ++sw_64_emit_set_long_const (rtx target, HOST_WIDE_INT c1) ++{ ++ HOST_WIDE_INT d1, d2, d3, d4; ++ ++ /* Decompose the entire word */ ++ ++ d1 = ((c1 & 0xffff) ^ 0x8000) - 0x8000; ++ c1 -= d1; ++ d2 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000; ++ c1 = (c1 - d2) >> 32; ++ d3 = ((c1 & 0xffff) ^ 0x8000) - 0x8000; ++ c1 -= d3; ++ d4 = ((c1 & 0xffffffff) ^ 0x80000000) - 0x80000000; ++ gcc_assert (c1 == d4); ++ ++ /* Construct the high word */ ++ if (d4) ++ { ++ emit_move_insn (target, GEN_INT (d4)); ++ if (d3) ++ emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d3))); ++ } ++ else ++ emit_move_insn (target, GEN_INT (d3)); ++ ++ /* Shift it into place */ ++ emit_move_insn (target, gen_rtx_ASHIFT (DImode, target, GEN_INT (32))); ++ ++ /* Add in the low bits. */ ++ if (d2) ++ emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d2))); ++ if (d1) ++ emit_move_insn (target, gen_rtx_PLUS (DImode, target, GEN_INT (d1))); ++ ++ return target; ++} ++ ++/* Given an integral CONST_INT or CONST_VECTOR, return the low 64 bits. */ ++ ++static HOST_WIDE_INT ++sw_64_extract_integer (rtx x) ++{ ++ if (GET_CODE (x) == CONST_VECTOR) ++ x = simplify_subreg (DImode, x, GET_MODE (x), 0); ++ ++ gcc_assert (CONST_INT_P (x)); ++ ++ return INTVAL (x); ++} ++ ++/* -fsw-rsqrt function BEGIN */ ++/************************************************* ++*** THE ALGORITHM : ++*** float fast_inverse_sqrt (float x) ++*** { ++*** float xhalf = 0.5f * x; ++*** int i = *(int *)&x ; ++*** i = 0x5f3759df - (i >> 1); // 0x5f3759df is a Magic Number ++*** x = *(float *)&i; ++*** x = x *(1.5f - xhalf * x *x); ++*** x = x *(1.5f - xhalf * x *x); // SPEC2006 435 need this ++*** return x; ++*** } ++*** ++******************************************************/ ++/* Load up a constant. all of the vector elements. */ ++static rtx ++sw_64_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst) ++{ ++ rtx reg; ++ ++ if (mode == SFmode || mode == DFmode) ++ { ++ rtx d = const_double_from_real_value (dconst, mode); ++ reg = force_reg (mode, d); ++ } ++ else ++ gcc_unreachable (); ++ ++ return reg; ++} ++ ++void ++sw_64_emit_rsqrt (rtx dst, rtx x, bool note_p) ++{ ++ machine_mode mode = GET_MODE (dst); ++ rtx one, xhalf, mhalf, i, magical, x0, x1, x2; ++ ++ enum insn_code code = optab_handler (smul_optab, mode); ++ insn_gen_fn gen_mul = GEN_FCN (code); ++ gcc_assert (code != CODE_FOR_nothing); ++ ++ enum insn_code code1 = optab_handler (sub_optab, SImode); ++ insn_gen_fn gen_sub = GEN_FCN (code1); ++ gcc_assert (code1 != CODE_FOR_nothing); ++ ++ enum insn_code code2 = optab_handler (fnma_optab, mode); ++ insn_gen_fn gen_fnma = GEN_FCN (code2); ++ gcc_assert (code2 != CODE_FOR_nothing); ++ ++ enum insn_code code3 = optab_handler (add_optab, mode); ++ insn_gen_fn gen_add = GEN_FCN (code3); ++ gcc_assert (code3 != CODE_FOR_nothing); ++ ++ one = sw_64_load_constant_and_splat (mode, dconst1); ++ mhalf = sw_64_load_constant_and_splat (mode, dconsthalf); ++ ++ /* xhalf = 0.5f * x */ ++ xhalf = gen_reg_rtx (mode); ++ emit_insn (gen_mul (xhalf, mhalf, x)); ++ ++ if (x == CONST0_RTX (mode)) ++ gcc_unreachable (); ++ ++ /* int i = *(int *)&x */ ++ rtx vreg = gen_rtx_REG (SFmode, 28); ++ emit_insn ( ++ gen_rtx_SET (vreg, gen_rtx_UNSPEC (mode, gen_rtvec (1, x), UNSPEC_FIMOVS))); ++ ++ /* i = i >> 1 */ ++ i = gen_reg_rtx (DImode); ++ rtx subreg = gen_rtx_SUBREG (SImode, vreg, 0); ++ emit_insn (gen_extendsidi2 (i, subreg)); ++ emit_insn (gen_ashrdi3 (i, i, const1_rtx)); ++ ++ /* magical number: 0x5f3759df */ ++ magical = gen_reg_rtx (SImode); ++ emit_insn (gen_rtx_SET (magical, GEN_INT (0x5f370000))); ++ emit_insn ( ++ gen_rtx_SET (magical, gen_rtx_PLUS (SImode, magical, GEN_INT (0x59df)))); ++ ++ /* x0 = 0x5f3759df - i */ ++ subreg = gen_rtx_SUBREG (SImode, i, 0); ++ x0 = gen_reg_rtx (SImode); ++ emit_insn (gen_sub (x0, magical, subreg)); ++ ++ /* x = *(float *)&x0 */ ++ x = gen_rtx_REG (mode, 60); ++ x0 = gen_rtx_SUBREG (SFmode, x0, 0); ++ emit_insn (gen_rtx_SET (x, x0)); ++ ++ /* x= x *(1.5f - xhalf * x *x) */ ++ rtx number = gen_reg_rtx (mode); ++ emit_insn (gen_add (number, one, mhalf)); ++ ++ x1 = gen_reg_rtx (mode); ++ emit_insn (gen_mul (x1, x, x)); ++ emit_insn (gen_fnma (x1, x1, xhalf, number)); ++ emit_insn (gen_mul (x1, x1, x)); ++ ++ /* second iteration, SPEC2006 435 need this */ ++ x2 = gen_reg_rtx (mode); ++ emit_insn (gen_mul (x2, x1, x1)); ++ emit_insn (gen_fnma (x2, x2, xhalf, number)); ++ emit_insn (gen_mul (dst, x2, x1)); ++} ++/* -fsw-rsqrt function END */ ++ ++/* Implement TARGET_LEGITIMATE_CONSTANT_P. This is all constants for which ++ we are willing to load the value into a register via a move pattern. ++ Normally this is all symbolic constants, integral constants that ++ take three or fewer instructions, and floating-point zero. */ ++ ++bool ++sw_64_legitimate_constant_p (machine_mode mode, rtx x) ++{ ++ HOST_WIDE_INT i0; ++ ++ switch (GET_CODE (x)) ++ { ++ case LABEL_REF: ++ case HIGH: ++ return true; ++ ++ case CONST: ++ if (GET_CODE (XEXP (x, 0)) == PLUS && CONST_INT_P (XEXP (XEXP (x, 0), 1))) ++ x = XEXP (XEXP (x, 0), 0); ++ else ++ return true; ++ ++ if (GET_CODE (x) != SYMBOL_REF) ++ return true; ++ /* FALLTHRU */ ++ ++ case SYMBOL_REF: ++ /* TLS symbols are never valid. */ ++ return SYMBOL_REF_TLS_MODEL (x) == 0; ++ ++ case CONST_WIDE_INT: ++ if (TARGET_BUILD_CONSTANTS) ++ return true; ++ if (x == CONST0_RTX (mode)) ++ return true; ++ mode = DImode; ++ gcc_assert (CONST_WIDE_INT_NUNITS (x) == 2); ++ i0 = CONST_WIDE_INT_ELT (x, 1); ++ if (sw_64_emit_set_const_1 (NULL_RTX, mode, i0, 3, true) == NULL) ++ return false; ++ i0 = CONST_WIDE_INT_ELT (x, 0); ++ goto do_integer; ++ ++ case CONST_DOUBLE: ++ if (x == CONST0_RTX (mode)) ++ return true; ++ return false; ++ ++ case CONST_VECTOR: ++ if (x == CONST0_RTX (mode)) ++ return true; ++ if (GET_MODE_CLASS (mode) != MODE_VECTOR_INT) ++ return false; ++ if (GET_MODE_SIZE (mode) != 8) ++ return false; ++ /* FALLTHRU */ ++ ++ case CONST_INT: ++ if (TARGET_BUILD_CONSTANTS) ++ return true; ++ i0 = sw_64_extract_integer (x); ++ do_integer: ++ return sw_64_emit_set_const_1 (NULL_RTX, mode, i0, 3, true) != NULL; ++ ++ default: ++ return false; ++ } ++} ++ ++/* Operand 1 is known to be a constant, and should require more than one ++ instruction to load. Emit that multi-part load. */ ++ ++bool ++sw_64_split_const_mov (machine_mode mode, rtx *operands) ++{ ++ HOST_WIDE_INT i0; ++ rtx temp = NULL_RTX; ++ ++ i0 = sw_64_extract_integer (operands[1]); ++ ++ temp = sw_64_emit_set_const (operands[0], mode, i0, 3, false); ++ ++ if (!temp && TARGET_BUILD_CONSTANTS) ++ temp = sw_64_emit_set_long_const (operands[0], i0); ++ ++ if (temp) ++ { ++ if (!rtx_equal_p (operands[0], temp)) ++ emit_move_insn (operands[0], temp); ++ return true; ++ } ++ ++ return false; ++} ++ ++/* Expand a move instruction; return true if all work is done. ++ We don't handle non-bwx subword loads here. */ ++ ++bool ++sw_64_expand_mov (machine_mode mode, rtx *operands) ++{ ++ rtx tmp; ++ ++ /* If the output is not a register, the input must be. */ ++ if (MEM_P (operands[0]) && !reg_or_0_operand (operands[1], mode)) ++ operands[1] = force_reg (mode, operands[1]); ++ ++ /* Allow legitimize_address to perform some simplifications. */ ++ if (mode == Pmode && symbolic_operand (operands[1], mode)) ++ { ++ tmp = sw_64_legitimize_address_1 (operands[1], operands[0], mode); ++ if (tmp) ++ { ++ if (tmp == operands[0]) ++ return true; ++ operands[1] = tmp; ++ return false; ++ } ++ } ++ ++ /* Early out for non-constants and valid constants. */ ++ if (!CONSTANT_P (operands[1]) || input_operand (operands[1], mode)) ++ return false; ++ ++ /* Split large integers. */ ++ if (CONST_INT_P (operands[1]) || GET_CODE (operands[1]) == CONST_VECTOR) ++ { ++ if (sw_64_split_const_mov (mode, operands)) ++ return true; ++ } ++ ++ /* Otherwise we've nothing left but to drop the thing to memory. */ ++ tmp = force_const_mem (mode, operands[1]); ++ ++ if (tmp == NULL_RTX) ++ return false; ++ ++ if (reload_in_progress) ++ { ++ emit_move_insn (operands[0], XEXP (tmp, 0)); ++ operands[1] = replace_equiv_address (tmp, operands[0]); ++ } ++ else ++ operands[1] = validize_mem (tmp); ++ return false; ++} ++ ++/* Expand a non-bwx QImode or HImode move instruction; ++ return true if all work is done. */ ++ ++bool ++sw_64_expand_mov_nobwx (machine_mode mode, rtx *operands) ++{ ++ rtx seq; ++ ++ /* If the output is not a register, the input must be. */ ++ if (MEM_P (operands[0])) ++ operands[1] = force_reg (mode, operands[1]); ++ ++ /* Handle four memory cases, unaligned and aligned for either the input ++ or the output. The only case where we can be called during reload is ++ for aligned loads; all other cases require temporaries. */ ++ ++ if (any_memory_operand (operands[1], mode)) ++ { ++ if (aligned_memory_operand (operands[1], mode)) ++ { ++ if (reload_in_progress) ++ { ++ seq = gen_reload_in_aligned (mode, operands[0], operands[1]); ++ emit_insn (seq); ++ } ++ else ++ { ++ rtx aligned_mem, bitnum; ++ rtx scratch = gen_reg_rtx (SImode); ++ rtx subtarget; ++ bool copyout; ++ ++ get_aligned_mem (operands[1], &aligned_mem, &bitnum); ++ ++ subtarget = operands[0]; ++ if (REG_P (subtarget)) ++ subtarget = gen_lowpart (DImode, subtarget), copyout = false; ++ else ++ subtarget = gen_reg_rtx (DImode), copyout = true; ++ ++ if (mode == QImode) ++ seq = gen_aligned_loadqi (subtarget, aligned_mem, bitnum, ++ scratch); ++ else ++ seq = gen_aligned_loadhi (subtarget, aligned_mem, bitnum, ++ scratch); ++ emit_insn (seq); ++ ++ if (copyout) ++ emit_move_insn (operands[0], gen_lowpart (mode, subtarget)); ++ } ++ } ++ else ++ { ++ /* Don't pass these as parameters since that makes the generated ++ code depend on parameter evaluation order which will cause ++ bootstrap failures. */ ++ ++ rtx temp1, temp2, subtarget, ua; ++ bool copyout; ++ ++ temp1 = gen_reg_rtx (DImode); ++ temp2 = gen_reg_rtx (DImode); ++ ++ subtarget = operands[0]; ++ if (REG_P (subtarget)) ++ subtarget = gen_lowpart (DImode, subtarget), copyout = false; ++ else ++ subtarget = gen_reg_rtx (DImode), copyout = true; ++ ++ ua = get_unaligned_address (operands[1]); ++ if (mode == QImode) ++ seq = gen_unaligned_loadqi (subtarget, ua, temp1, temp2); ++ else ++ seq = gen_unaligned_loadhi (subtarget, ua, temp1, temp2); ++ ++ sw_64_set_memflags (seq, operands[1]); ++ emit_insn (seq); ++ ++ if (copyout) ++ emit_move_insn (operands[0], gen_lowpart (mode, subtarget)); ++ } ++ return true; ++ } ++ ++ if (any_memory_operand (operands[0], mode)) ++ { ++ if (aligned_memory_operand (operands[0], mode)) ++ { ++ rtx aligned_mem, bitnum; ++ rtx temp1 = gen_reg_rtx (SImode); ++ rtx temp2 = gen_reg_rtx (SImode); ++ ++ get_aligned_mem (operands[0], &aligned_mem, &bitnum); ++ ++ emit_insn ( ++ gen_aligned_store (aligned_mem, operands[1], bitnum, temp1, temp2)); ++ } ++ else ++ { ++ rtx temp1 = gen_reg_rtx (DImode); ++ rtx temp2 = gen_reg_rtx (DImode); ++ rtx temp3 = gen_reg_rtx (DImode); ++ rtx ua = get_unaligned_address (operands[0]); ++ ++ seq ++ = gen_unaligned_store (mode, ua, operands[1], temp1, temp2, temp3); ++ ++ sw_64_set_memflags (seq, operands[0]); ++ emit_insn (seq); ++ } ++ return true; ++ } ++ ++ return false; ++} ++ ++/* Implement the movmisalign patterns. One of the operands is a memory ++ that is not naturally aligned. Emit instructions to load it. */ ++ ++void ++sw_64_expand_movmisalign (machine_mode mode, rtx *operands) ++{ ++ /* Honor misaligned loads, for those we promised to do so. */ ++ if (MEM_P (operands[1])) ++ { ++ rtx tmp; ++ ++ if (register_operand (operands[0], mode)) ++ tmp = operands[0]; ++ else ++ tmp = gen_reg_rtx (mode); ++ ++ sw_64_expand_unaligned_load (tmp, operands[1], 8, 0, 0); ++ if (tmp != operands[0]) ++ emit_move_insn (operands[0], tmp); ++ } ++ else if (MEM_P (operands[0])) ++ { ++ if (!reg_or_0_operand (operands[1], mode)) ++ operands[1] = force_reg (mode, operands[1]); ++ sw_64_expand_unaligned_store (operands[0], operands[1], 8, 0); ++ } ++ else ++ gcc_unreachable (); ++} ++ ++/* Generate an unsigned DImode to FP conversion. This is the same code ++ optabs would emit if we didn't have TFmode patterns. ++ ++ For SFmode, this is the only construction I've found that can pass ++ gcc.c-torture/execute/ieee/rbug.c. No scenario that uses DFmode ++ intermediates will work, because you'll get intermediate rounding ++ that ruins the end result. Some of this could be fixed by turning ++ on round-to-positive-infinity, but that requires diddling the fpsr, ++ which kills performance. I tried turning this around and converting ++ to a negative number, so that I could turn on /m, but either I did ++ it wrong or there's something else cause I wound up with the exact ++ same single-bit error. There is a branch-less form of this same code: ++ ++ srl $16,1,$1 ++ and $16,1,$2 ++ cmplt $16,0,$3 ++ or $1,$2,$2 ++ cmovge $16,$16,$2 ++ ifmovd $3,$f10 ++ ifmovd $2,$f11 ++ cvtqs $f11,$f11 ++ adds $f11,$f11,$f0 ++ fcmoveq $f10,$f11,$f0 ++ ++ I'm not using it because it's the same number of instructions as ++ this branch-full form, and it has more serialized long latency ++ instructions on the critical path. ++ ++ For DFmode, we can avoid rounding errors by breaking up the word ++ into two pieces, converting them separately, and adding them back: ++ ++LC0: .long 0,0x5f800000 ++ ++ifmovd $16,$f11 ++ldi $2,LC0 ++cmplt $16,0,$1 ++cpyse $f11,$f31,$f10 ++cpyse $f31,$f11,$f11 ++s4addl $1,$2,$1 ++lds $f12,0($1) ++cvtqt $f10,$f10 ++cvtqt $f11,$f11 ++addt $f12,$f10,$f0 ++addt $f0,$f11,$f0 ++ ++This doesn't seem to be a clear-cut win over the optabs form. ++It probably all depends on the distribution of numbers being ++converted -- in the optabs form, all but high-bit-set has a ++much lower minimum execution time. */ ++ ++void ++sw_64_emit_floatuns (rtx operands[2]) ++{ ++ rtx neglab, donelab, i0, i1, f0, in, out; ++ machine_mode mode; ++ ++ out = operands[0]; ++ in = force_reg (DImode, operands[1]); ++ mode = GET_MODE (out); ++ neglab = gen_label_rtx (); ++ donelab = gen_label_rtx (); ++ i0 = gen_reg_rtx (DImode); ++ i1 = gen_reg_rtx (DImode); ++ f0 = gen_reg_rtx (mode); ++ ++ emit_cmp_and_jump_insns (in, const0_rtx, LT, const0_rtx, DImode, 0, neglab); ++ ++ emit_insn (gen_rtx_SET (out, gen_rtx_FLOAT (mode, in))); ++ emit_jump_insn (gen_jump (donelab)); ++ emit_barrier (); ++ ++ emit_label (neglab); ++ ++ emit_insn (gen_lshrdi3 (i0, in, const1_rtx)); ++ emit_insn (gen_anddi3 (i1, in, const1_rtx)); ++ emit_insn (gen_iordi3 (i0, i0, i1)); ++ emit_insn (gen_rtx_SET (f0, gen_rtx_FLOAT (mode, i0))); ++ emit_insn (gen_rtx_SET (out, gen_rtx_PLUS (mode, f0, f0))); ++ ++ emit_label (donelab); ++} ++ ++/* Generate the comparison for a conditional branch. */ ++ ++void ++sw_64_emit_conditional_branch (rtx operands[], machine_mode cmp_mode) ++{ ++ enum rtx_code cmp_code, branch_code; ++ machine_mode branch_mode = VOIDmode; ++ enum rtx_code code = GET_CODE (operands[0]); ++ rtx op0 = operands[1], op1 = operands[2]; ++ rtx tem; ++ ++ if (cmp_mode == TFmode) ++ { ++ op0 = sw_64_emit_xfloating_compare (&code, op0, op1); ++ op1 = const0_rtx; ++ cmp_mode = DImode; ++ } ++ ++ /* The general case: fold the comparison code to the types of compares ++ that we have, choosing the branch as necessary. */ ++ switch (code) ++ { ++ case EQ: ++ case LE: ++ case LT: ++ case LEU: ++ case LTU: ++ case UNORDERED: ++ /* We have these compares. */ ++ cmp_code = code, branch_code = NE; ++ break; ++ ++ case NE: ++ case ORDERED: ++ /* These must be reversed. */ ++ cmp_code = reverse_condition (code), branch_code = EQ; ++ break; ++ ++ case GE: ++ case GT: ++ case GEU: ++ case GTU: ++ /* For FP, we swap them, for INT, we reverse them. */ ++ if (cmp_mode == DFmode || (cmp_mode == SFmode && flag_sw_sf_cmpsel)) ++ { ++ cmp_code = swap_condition (code); ++ branch_code = NE; ++ std::swap (op0, op1); ++ } ++ else ++ { ++ cmp_code = reverse_condition (code); ++ branch_code = EQ; ++ } ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ if (cmp_mode == DFmode) ++ { ++ if (flag_unsafe_math_optimizations && cmp_code != UNORDERED) ++ { ++ /* When we are not as concerned about non-finite values, and we ++ are comparing against zero, we can branch directly. */ ++ if (op1 == CONST0_RTX (DFmode)) ++ cmp_code = UNKNOWN, branch_code = code; ++ else if (op0 == CONST0_RTX (DFmode)) ++ { ++ /* Undo the swap we probably did just above. */ ++ std::swap (op0, op1); ++ branch_code = swap_condition (cmp_code); ++ cmp_code = UNKNOWN; ++ } ++ } ++ else ++ { ++ /* ??? We mark the branch mode to be CCmode to prevent the ++ compare and branch from being combined, since the compare ++ insn follows IEEE rules that the branch does not. */ ++ branch_mode = CCmode; ++ } ++ } ++ else if (cmp_mode == SFmode && flag_sw_sf_cmpsel) ++ { ++ if (flag_unsafe_math_optimizations && cmp_code != UNORDERED) ++ { ++ /* When we are not as concerned about non-finite values, and we ++ are comparing against zero, we can branch directly. */ ++ if (op1 == CONST0_RTX (SFmode)) ++ cmp_code = UNKNOWN, branch_code = code; ++ else if (op0 == CONST0_RTX (SFmode)) ++ { ++ /* Undo the swap we probably did just above. */ ++ std::swap (op0, op1); ++ branch_code = swap_condition (cmp_code); ++ cmp_code = UNKNOWN; ++ } ++ } ++ ++ else ++ { ++ /* ??? We mark the branch mode to be CCmode to prevent the ++ compare and branch from being combined, since the compare ++ insn follows IEEE rules that the branch does not. */ ++ branch_mode = CCmode; ++ } ++ } ++ ++ else ++ { ++ /* The following optimizations are only for signed compares. */ ++ if (code != LEU && code != LTU && code != GEU && code != GTU) ++ { ++ /* Whee. Compare and branch against 0 directly. */ ++ if (op1 == const0_rtx) ++ cmp_code = UNKNOWN, branch_code = code; ++ ++ /* If the constants doesn't fit into an immediate, but can ++ be generated by ldi/ldih, we adjust the argument and ++ compare against zero, so we can use beq/bne directly. */ ++ /* ??? Don't do this when comparing against symbols, otherwise ++ we'll reduce (&x == 0x1234) to (&x-0x1234 == 0), which will ++ be declared false out of hand (at least for non-weak). */ ++ else if (CONST_INT_P (op1) && (code == EQ || code == NE) ++ && !(symbolic_operand (op0, VOIDmode) ++ || (REG_P (op0) && REG_POINTER (op0)))) ++ { ++ rtx n_op1 = GEN_INT (-INTVAL (op1)); ++ ++ if (!satisfies_constraint_I (op1) ++ && (satisfies_constraint_K (n_op1) ++ || satisfies_constraint_L (n_op1))) ++ cmp_code = PLUS, branch_code = code, op1 = n_op1; ++ } ++ } ++ ++ if (!reg_or_0_operand (op0, DImode)) ++ op0 = force_reg (DImode, op0); ++ if (cmp_code != PLUS && !reg_or_8bit_operand (op1, DImode)) ++ op1 = force_reg (DImode, op1); ++ } ++ ++ /* Emit an initial compare instruction, if necessary. */ ++ tem = op0; ++ if (cmp_code != UNKNOWN) ++ { ++ tem = gen_reg_rtx (cmp_mode); ++ emit_move_insn (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1)); ++ } ++ ++ /* Emit the branch instruction. */ ++ tem = gen_rtx_SET ( ++ pc_rtx, ++ gen_rtx_IF_THEN_ELSE (VOIDmode, ++ gen_rtx_fmt_ee (branch_code, branch_mode, tem, ++ CONST0_RTX (cmp_mode)), ++ gen_rtx_LABEL_REF (VOIDmode, operands[3]), pc_rtx)); ++ emit_jump_insn (tem); ++} ++ ++/* Certain simplifications can be done to make invalid setcc operations ++ valid. Return the final comparison, or NULL if we can't work. */ ++ ++bool ++sw_64_emit_setcc (rtx operands[], machine_mode cmp_mode) ++{ ++ enum rtx_code cmp_code; ++ enum rtx_code code = GET_CODE (operands[1]); ++ rtx op0 = operands[2], op1 = operands[3]; ++ rtx tmp; ++ ++ if (cmp_mode == TFmode) ++ { ++ op0 = sw_64_emit_xfloating_compare (&code, op0, op1); ++ op1 = const0_rtx; ++ cmp_mode = DImode; ++ } ++ ++ if (cmp_mode == DFmode && !TARGET_FIX) ++ return 0; ++ ++ /* The general case: fold the comparison code to the types of compares ++ that we have, choosing the branch as necessary. */ ++ ++ cmp_code = UNKNOWN; ++ switch (code) ++ { ++ case EQ: ++ case LE: ++ case LT: ++ case LEU: ++ case LTU: ++ case UNORDERED: ++ /* We have these compares. */ ++ if (cmp_mode == DFmode) ++ cmp_code = code, code = NE; ++ break; ++ ++ case NE: ++ if (cmp_mode == DImode && op1 == const0_rtx) ++ break; ++ /* FALLTHRU */ ++ ++ case ORDERED: ++ cmp_code = reverse_condition (code); ++ code = EQ; ++ break; ++ ++ case GE: ++ case GT: ++ case GEU: ++ case GTU: ++ /* These normally need swapping, but for integer zero we have ++ special patterns that recognize swapped operands. */ ++ if (cmp_mode == DImode && op1 == const0_rtx) ++ break; ++ code = swap_condition (code); ++ if (cmp_mode == DFmode) ++ cmp_code = code, code = NE; ++ std::swap (op0, op1); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ if (cmp_mode == DImode) ++ { ++ if (!register_operand (op0, DImode)) ++ op0 = force_reg (DImode, op0); ++ if (!reg_or_8bit_operand (op1, DImode)) ++ op1 = force_reg (DImode, op1); ++ } ++ ++ /* Emit an initial compare instruction, if necessary. */ ++ if (cmp_code != UNKNOWN) ++ { ++ tmp = gen_reg_rtx (cmp_mode); ++ emit_insn ( ++ gen_rtx_SET (tmp, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1))); ++ ++ op0 = cmp_mode != DImode ? gen_lowpart (DImode, tmp) : tmp; ++ op1 = const0_rtx; ++ } ++ ++ /* Emit the setcc instruction. */ ++ emit_insn ( ++ gen_rtx_SET (operands[0], gen_rtx_fmt_ee (code, DImode, op0, op1))); ++ return true; ++} ++ ++/* Rewrite a comparison against zero CMP of the form ++ (CODE (cc0) (const_int 0)) so it can be written validly in ++ a conditional move (if_then_else CMP ...). ++ If both of the operands that set cc0 are nonzero we must emit ++ an insn to perform the compare (it can't be done within ++ the conditional move). */ ++ ++rtx ++sw_64_emit_conditional_move (rtx cmp, machine_mode mode) ++{ ++ enum rtx_code code = GET_CODE (cmp); ++ enum rtx_code cmov_code = NE; ++ rtx op0 = XEXP (cmp, 0); ++ rtx op1 = XEXP (cmp, 1); ++ machine_mode cmp_mode ++ = (GET_MODE (op0) == VOIDmode ? DImode : GET_MODE (op0)); ++ machine_mode cmov_mode = VOIDmode; ++ int local_fast_math = flag_unsafe_math_optimizations; ++ rtx tem; ++ ++ if (cmp_mode == TFmode) ++ { ++ op0 = sw_64_emit_xfloating_compare (&code, op0, op1); ++ op1 = const0_rtx; ++ cmp_mode = DImode; ++ } ++ ++ gcc_assert (cmp_mode == DFmode || cmp_mode == DImode || cmp_mode == SFmode); ++ ++ if (FLOAT_MODE_P (cmp_mode) != FLOAT_MODE_P (mode)) ++ { ++ enum rtx_code cmp_code; ++ ++ if (!TARGET_FIX) ++ return 0; ++ ++ /* If we have fp<->int register move instructions, do a cmov by ++ performing the comparison in fp registers, and move the ++ zero/nonzero value to integer registers, where we can then ++ use a normal cmov, or vice-versa. */ ++ ++ switch (code) ++ { ++ case EQ: ++ case LE: ++ case LT: ++ case LEU: ++ case LTU: ++ case UNORDERED: ++ /* We have these compares. */ ++ cmp_code = code, code = NE; ++ break; ++ ++ case NE: ++ case ORDERED: ++ /* These must be reversed. */ ++ cmp_code = reverse_condition (code), code = EQ; ++ break; ++ ++ case GE: ++ case GT: ++ case GEU: ++ case GTU: ++ /* These normally need swapping, but for integer zero we have ++ special patterns that recognize swapped operands. */ ++ if (cmp_mode == DImode && op1 == const0_rtx) ++ cmp_code = code, code = NE; ++ else ++ { ++ cmp_code = swap_condition (code); ++ code = NE; ++ std::swap (op0, op1); ++ } ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ if (cmp_mode == DImode) ++ { ++ if (!reg_or_0_operand (op0, DImode)) ++ op0 = force_reg (DImode, op0); ++ if (!reg_or_8bit_operand (op1, DImode)) ++ op1 = force_reg (DImode, op1); ++ } ++ ++ tem = gen_reg_rtx (cmp_mode); ++ emit_insn ( ++ gen_rtx_SET (tem, gen_rtx_fmt_ee (cmp_code, cmp_mode, op0, op1))); ++ ++ cmp_mode = cmp_mode == DImode ? E_DFmode : E_DImode; ++ op0 = gen_lowpart (cmp_mode, tem); ++ op1 = CONST0_RTX (cmp_mode); ++ cmp = gen_rtx_fmt_ee (code, VOIDmode, op0, op1); ++ local_fast_math = 1; ++ } ++ ++ if (cmp_mode == DImode) ++ { ++ if (!reg_or_0_operand (op0, DImode)) ++ op0 = force_reg (DImode, op0); ++ if (!reg_or_8bit_operand (op1, DImode)) ++ op1 = force_reg (DImode, op1); ++ } ++ ++ /* We may be able to use a conditional move directly. ++ This avoids emitting spurious compares. */ ++ if (signed_comparison_operator (cmp, VOIDmode) ++ && (cmp_mode == DImode || local_fast_math) ++ && (op0 == CONST0_RTX (cmp_mode) || op1 == CONST0_RTX (cmp_mode))) ++ return gen_rtx_fmt_ee (code, VOIDmode, op0, op1); ++ ++ /* We can't put the comparison inside the conditional move; ++ emit a compare instruction and put that inside the ++ conditional move. Make sure we emit only comparisons we have; ++ swap or reverse as necessary. */ ++ ++ if (!can_create_pseudo_p ()) ++ return NULL_RTX; ++ ++ switch (code) ++ { ++ case EQ: ++ case LE: ++ case LT: ++ case LEU: ++ case LTU: ++ case UNORDERED: ++ /* We have these compares: */ ++ break; ++ ++ case NE: ++ case ORDERED: ++ /* These must be reversed. */ ++ code = reverse_condition (code); ++ cmov_code = EQ; ++ break; ++ ++ case GE: ++ case GT: ++ case GEU: ++ case GTU: ++ /* These normally need swapping, but for integer zero we have ++ special patterns that recognize swapped operands. */ ++ if (cmp_mode == DImode && op1 == const0_rtx) ++ break; ++ code = swap_condition (code); ++ std::swap (op0, op1); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ if (cmp_mode == DImode) ++ { ++ if (!reg_or_0_operand (op0, DImode)) ++ op0 = force_reg (DImode, op0); ++ if (!reg_or_8bit_operand (op1, DImode)) ++ op1 = force_reg (DImode, op1); ++ } ++ ++ /* ??? We mark the branch mode to be CCmode to prevent the compare ++ and cmov from being combined, since the compare insn follows IEEE ++ rules that the cmov does not. */ ++ if (cmp_mode == DFmode && !local_fast_math) ++ cmov_mode = CCmode; ++ ++ tem = gen_reg_rtx (cmp_mode); ++ emit_move_insn (tem, gen_rtx_fmt_ee (code, cmp_mode, op0, op1)); ++ return gen_rtx_fmt_ee (cmov_code, cmov_mode, tem, CONST0_RTX (cmp_mode)); ++} ++ ++/* Simplify a conditional move of two constants into a setcc with ++ arithmetic. This is done with a splitter since combine would ++ just undo the work if done during code generation. It also catches ++ cases we wouldn't have before cse. */ ++ ++int ++sw_64_split_conditional_move (enum rtx_code code, rtx dest, rtx cond, rtx t_rtx, ++ rtx f_rtx) ++{ ++ HOST_WIDE_INT t, f, diff; ++ machine_mode mode; ++ rtx target, subtarget, tmp; ++ ++ mode = GET_MODE (dest); ++ t = INTVAL (t_rtx); ++ f = INTVAL (f_rtx); ++ diff = t - f; ++ ++ if (((code == NE || code == EQ) && diff < 0) || (code == GE || code == GT)) ++ { ++ code = reverse_condition (code); ++ std::swap (t, f); ++ diff = -diff; ++ } ++ ++ subtarget = target = dest; ++ if (mode != DImode) ++ { ++ target = gen_lowpart (DImode, dest); ++ if (can_create_pseudo_p ()) ++ subtarget = gen_reg_rtx (DImode); ++ else ++ subtarget = target; ++ } ++ /* Below, we must be careful to use copy_rtx on target and subtarget ++ in intermediate insns, as they may be a subreg rtx, which may not ++ be shared. */ ++ ++ if (f == 0 && exact_log2 (diff) > 0 ++ && (diff <= 8 || sw_64_tune == PROCESSOR_SW6 ++ || sw_64_tune == PROCESSOR_SW8)) ++ { ++ tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx); ++ emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp)); ++ ++ tmp = gen_rtx_ASHIFT (DImode, copy_rtx (subtarget), ++ GEN_INT (exact_log2 (t))); ++ emit_insn (gen_rtx_SET (target, tmp)); ++ } ++ else if (f == 0 && t == -1) ++ { ++ tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx); ++ emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp)); ++ ++ emit_insn (gen_negdi2 (target, copy_rtx (subtarget))); ++ } ++ else if (diff == 1 || diff == 4 || diff == 8) ++ { ++ rtx add_op; ++ ++ tmp = gen_rtx_fmt_ee (code, DImode, cond, const0_rtx); ++ emit_insn (gen_rtx_SET (copy_rtx (subtarget), tmp)); ++ ++ if (diff == 1) ++ emit_insn (gen_adddi3 (target, copy_rtx (subtarget), GEN_INT (f))); ++ else ++ { ++ add_op = GEN_INT (f); ++ if (sext_add_operand (add_op, mode)) ++ { ++ // in sw_64 sxsubw is ra*x + rb ++ tmp = gen_rtx_MULT (DImode, copy_rtx (subtarget), GEN_INT (diff)); ++ tmp = gen_rtx_PLUS (DImode, tmp, add_op); ++ emit_insn (gen_rtx_SET (target, tmp)); ++ } ++ else ++ return 0; ++ } ++ } ++ else ++ return 0; ++ ++ return 1; ++} ++ ++/* Look up the function X_floating library function name for the ++ given operation. */ ++ ++struct GTY (()) xfloating_op ++{ ++ const enum rtx_code code; ++ const char *const GTY ((skip)) osf_func; ++ const char *const GTY ((skip)) vms_func; ++ rtx libcall; ++}; ++ ++static GTY (()) struct xfloating_op xfloating_ops[] ++ = {{PLUS, "_OtsAddX", "OTS$ADD_X", 0}, ++ {MINUS, "_OtsSubX", "OTS$SUB_X", 0}, ++ {MULT, "_OtsMulX", "OTS$MUL_X", 0}, ++ {DIV, "_OtsDivX", "OTS$DIV_X", 0}, ++ {EQ, "_OtsEqlX", "OTS$EQL_X", 0}, ++ {NE, "_OtsNeqX", "OTS$NEQ_X", 0}, ++ {LT, "_OtsLssX", "OTS$LSS_X", 0}, ++ {LE, "_OtsLeqX", "OTS$LEQ_X", 0}, ++ {GT, "_OtsGtrX", "OTS$GTR_X", 0}, ++ {GE, "_OtsGeqX", "OTS$GEQ_X", 0}, ++ {FIX, "_OtsCvtXQ", "OTS$CVTXQ", 0}, ++ {FLOAT, "_OtsCvtQX", "OTS$CVTQX", 0}, ++ {UNSIGNED_FLOAT, "_OtsCvtQUX", "OTS$CVTQUX", 0}, ++ {FLOAT_EXTEND, "_OtsConvertFloatTX", "OTS$CVT_FLOAT_T_X", 0}, ++ {FLOAT_TRUNCATE, "_OtsConvertFloatXT", "OTS$CVT_FLOAT_X_T", 0}}; ++ ++static GTY (()) struct xfloating_op vax_cvt_ops[] ++ = {{FLOAT_EXTEND, "_OtsConvertFloatGX", "OTS$CVT_FLOAT_G_X", 0}, ++ {FLOAT_TRUNCATE, "_OtsConvertFloatXG", "OTS$CVT_FLOAT_X_G", 0}}; ++ ++static rtx ++sw_64_lookup_xfloating_lib_func (enum rtx_code code) ++{ ++ struct xfloating_op *ops = xfloating_ops; ++ long n = ARRAY_SIZE (xfloating_ops); ++ long i; ++ ++ gcc_assert (TARGET_HAS_XFLOATING_LIBS); ++ ++ /* How irritating. Nothing to key off for the main table. */ ++ if (TARGET_FLOAT_VAX && (code == FLOAT_EXTEND || code == FLOAT_TRUNCATE)) ++ { ++ ops = vax_cvt_ops; ++ n = ARRAY_SIZE (vax_cvt_ops); ++ } ++ ++ for (i = 0; i < n; ++i, ++ops) ++ if (ops->code == code) ++ { ++ rtx func = ops->libcall; ++ if (!func) ++ { ++ func = init_one_libfunc (ops->osf_func); ++ ops->libcall = func; ++ } ++ return func; ++ } ++ ++ gcc_unreachable (); ++} ++ ++/* Most X_floating operations take the rounding mode as an argument. ++ Compute that here. */ ++ ++static int ++sw_64_compute_xfloating_mode_arg (enum rtx_code code, ++ enum sw_64_fp_rounding_mode round) ++{ ++ int mode; ++ ++ switch (round) ++ { ++ case SW_64_FPRM_NORM: ++ mode = 2; ++ break; ++ case SW_64_FPRM_MINF: ++ mode = 1; ++ break; ++ case SW_64_FPRM_CHOP: ++ mode = 0; ++ break; ++ case SW_64_FPRM_DYN: ++ mode = 4; ++ break; ++ default: ++ gcc_unreachable (); ++ ++ /* XXX For reference, round to +inf is mode = 3. */ ++ } ++ ++ if (code == FLOAT_TRUNCATE && sw_64_fptm == SW_64_FPTM_N) ++ mode |= 0x10000; ++ ++ return mode; ++} ++ ++/* Emit an X_floating library function call. ++ ++ Note that these functions do not follow normal calling conventions: ++ TFmode arguments are passed in two integer registers (as opposed to ++ indirect); TFmode return values appear in R16+R17. ++ ++ FUNC is the function to call. ++ TARGET is where the output belongs. ++ OPERANDS are the inputs. ++ NOPERANDS is the count of inputs. ++ EQUIV is the expression equivalent for the function. ++ */ ++ ++static void ++sw_64_emit_xfloating_libcall (rtx func, rtx target, rtx operands[], ++ int noperands, rtx equiv) ++{ ++ rtx usage = NULL_RTX, reg; ++ int regno = 16, i; ++ ++ start_sequence (); ++ ++ for (i = 0; i < noperands; ++i) ++ { ++ switch (GET_MODE (operands[i])) ++ { ++ case E_TFmode: ++ reg = gen_rtx_REG (TFmode, regno); ++ regno += 2; ++ break; ++ ++ case E_DFmode: ++ reg = gen_rtx_REG (DFmode, regno + 32); ++ regno += 1; ++ break; ++ ++ case E_VOIDmode: ++ gcc_assert (CONST_INT_P (operands[i])); ++ /* FALLTHRU */ ++ case E_DImode: ++ reg = gen_rtx_REG (DImode, regno); ++ regno += 1; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ emit_move_insn (reg, operands[i]); ++ use_reg (&usage, reg); ++ } ++ ++ switch (GET_MODE (target)) ++ { ++ case E_TFmode: ++ reg = gen_rtx_REG (TFmode, 16); ++ break; ++ case E_DFmode: ++ reg = gen_rtx_REG (DFmode, 32); ++ break; ++ case E_DImode: ++ reg = gen_rtx_REG (DImode, 0); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ ++ rtx mem = gen_rtx_MEM (QImode, func); ++ rtx_insn *tmp = emit_call_insn ( ++ gen_call_value (reg, mem, const0_rtx, const0_rtx, const0_rtx)); ++ CALL_INSN_FUNCTION_USAGE (tmp) = usage; ++ RTL_CONST_CALL_P (tmp) = 1; ++ ++ tmp = get_insns (); ++ end_sequence (); ++ ++ emit_libcall_block (tmp, target, reg, equiv); ++} ++ ++/* Emit an X_floating library function call for arithmetic (+,-,*,/). */ ++ ++void ++sw_64_emit_xfloating_arith (enum rtx_code code, rtx operands[]) ++{ ++ rtx func; ++ int mode; ++ rtx out_operands[3]; ++ ++ func = sw_64_lookup_xfloating_lib_func (code); ++ mode = sw_64_compute_xfloating_mode_arg (code, sw_64_fprm); ++ ++ out_operands[0] = operands[1]; ++ out_operands[1] = operands[2]; ++ out_operands[2] = GEN_INT (mode); ++ sw_64_emit_xfloating_libcall (func, operands[0], out_operands, 3, ++ gen_rtx_fmt_ee (code, TFmode, operands[1], ++ operands[2])); ++} ++ ++/* Emit an X_floating library function call for a comparison. */ ++ ++static rtx ++sw_64_emit_xfloating_compare (enum rtx_code *pcode, rtx op0, rtx op1) ++{ ++ enum rtx_code cmp_code, res_code; ++ rtx func, out, operands[2], note; ++ ++ /* X_floating library comparison functions return ++ -1 unordered ++ 0 false ++ 1 true ++ Convert the compare against the raw return value. */ ++ ++ cmp_code = *pcode; ++ switch (cmp_code) ++ { ++ case UNORDERED: ++ cmp_code = EQ; ++ res_code = LT; ++ break; ++ case ORDERED: ++ cmp_code = EQ; ++ res_code = GE; ++ break; ++ case NE: ++ res_code = NE; ++ break; ++ case EQ: ++ case LT: ++ case GT: ++ case LE: ++ case GE: ++ res_code = GT; ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ *pcode = res_code; ++ ++ func = sw_64_lookup_xfloating_lib_func (cmp_code); ++ ++ operands[0] = op0; ++ operands[1] = op1; ++ out = gen_reg_rtx (DImode); ++ ++ /* What's actually returned is -1,0,1, not a proper boolean value. */ ++ note = gen_rtx_fmt_ee (cmp_code, VOIDmode, op0, op1); ++ note = gen_rtx_UNSPEC (DImode, gen_rtvec (1, note), UNSPEC_XFLT_COMPARE); ++ sw_64_emit_xfloating_libcall (func, out, operands, 2, note); ++ ++ return out; ++} ++ ++/* Emit an X_floating library function call for a conversion. */ ++ ++void ++sw_64_emit_xfloating_cvt (enum rtx_code orig_code, rtx operands[]) ++{ ++ int noperands = 1, mode; ++ rtx out_operands[2]; ++ rtx func; ++ enum rtx_code code = orig_code; ++ ++ if (code == UNSIGNED_FIX) ++ code = FIX; ++ ++ func = sw_64_lookup_xfloating_lib_func (code); ++ ++ out_operands[0] = operands[1]; ++ ++ switch (code) ++ { ++ case FIX: ++ mode = sw_64_compute_xfloating_mode_arg (code, SW_64_FPRM_CHOP); ++ out_operands[1] = GEN_INT (mode); ++ noperands = 2; ++ break; ++ case FLOAT_TRUNCATE: ++ mode = sw_64_compute_xfloating_mode_arg (code, sw_64_fprm); ++ out_operands[1] = GEN_INT (mode); ++ noperands = 2; ++ break; ++ default: ++ break; ++ } ++ ++ sw_64_emit_xfloating_libcall (func, operands[0], out_operands, noperands, ++ gen_rtx_fmt_e (orig_code, ++ GET_MODE (operands[0]), ++ operands[1])); ++} ++ ++/* Split a TImode or TFmode move from OP[1] to OP[0] into a pair of ++ DImode moves from OP[2,3] to OP[0,1]. If FIXUP_OVERLAP is true, ++ guarantee that the sequence ++ set (OP[0] OP[2]) ++ set (OP[1] OP[3]) ++ is valid. Naturally, output operand ordering is little-endian. ++ This is used by *movtf_internal and *movti_internal. */ ++ ++void ++sw_64_split_tmode_pair (rtx operands[4], machine_mode mode, bool fixup_overlap) ++{ ++ switch (GET_CODE (operands[1])) ++ { ++ case REG: ++ operands[3] = gen_rtx_REG (DImode, REGNO (operands[1]) + 1); ++ operands[2] = gen_rtx_REG (DImode, REGNO (operands[1])); ++ break; ++ ++ case MEM: ++ operands[3] = adjust_address (operands[1], DImode, 8); ++ operands[2] = adjust_address (operands[1], DImode, 0); ++ break; ++ ++ CASE_CONST_SCALAR_INT: ++ case CONST_DOUBLE: ++ gcc_assert (operands[1] == CONST0_RTX (mode)); ++ operands[2] = operands[3] = const0_rtx; ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ switch (GET_CODE (operands[0])) ++ { ++ case REG: ++ operands[1] = gen_rtx_REG (DImode, REGNO (operands[0]) + 1); ++ operands[0] = gen_rtx_REG (DImode, REGNO (operands[0])); ++ break; ++ ++ case MEM: ++ operands[1] = adjust_address (operands[0], DImode, 8); ++ operands[0] = adjust_address (operands[0], DImode, 0); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ if (fixup_overlap && reg_overlap_mentioned_p (operands[0], operands[3])) ++ { ++ std::swap (operands[0], operands[1]); ++ std::swap (operands[2], operands[3]); ++ } ++} ++ ++/* Implement negtf2 or abstf2. Op0 is destination, op1 is source, ++ op2 is a register containing the sign bit, operation is the ++ logical operation to be performed. */ ++ ++void ++sw_64_split_tfmode_frobsign (rtx operands[3], rtx (*operation) (rtx, rtx, rtx)) ++{ ++ rtx high_bit = operands[2]; ++ rtx scratch; ++ int move; ++ ++ sw_64_split_tmode_pair (operands, TFmode, false); ++ ++ /* Detect three flavors of operand overlap. */ ++ move = 1; ++ if (rtx_equal_p (operands[0], operands[2])) ++ move = 0; ++ else if (rtx_equal_p (operands[1], operands[2])) ++ { ++ if (rtx_equal_p (operands[0], high_bit)) ++ move = 2; ++ else ++ move = -1; ++ } ++ ++ if (move < 0) ++ emit_move_insn (operands[0], operands[2]); ++ ++ /* ??? If the destination overlaps both source tf and high_bit, then ++ assume source tf is dead in its entirety and use the other half ++ for a scratch register. Otherwise "scratch" is just the proper ++ destination register. */ ++ scratch = operands[move < 2 ? 1 : 3]; ++ ++ emit_insn ((*operation) (scratch, high_bit, operands[3])); ++ ++ if (move > 0) ++ { ++ emit_move_insn (operands[0], operands[2]); ++ if (move > 1) ++ emit_move_insn (operands[1], scratch); ++ } ++} ++ ++/* Use ext[wlq][lh] as the Architecture Handbook describes for extracting ++ unaligned data: ++ ++ unsigned: signed: ++ word: ldl_u r1,X(r11) ldl_u r1,X(r11) ++ ldl_u r2,X+1(r11) ldl_u r2,X+1(r11) ++ ldi r3,X(r11) ldi r3,X+2(r11) ++ exthl r1,r3,r1 extll r1,r3,r1 ++ exthh r2,r3,r2 extlh r2,r3,r2 ++ or r1.r2.r1 or r1,r2,r1 ++ sra r1,48,r1 ++ ++ long: ldl_u r1,X(r11) ldq_u r1,X(r11) ++ ldl_u r2,X+3(r11) ldq_u r2,X+3(r11) ++ ldi r3,X(r11) lda r3,X(r11) ++ extll r1,r3,r1 extll r1,r3,r1 ++ extlh r2,r3,r2 extlh r2,r3,r2 ++ or r1.r2.r1 addl r1,r2,r1 ++ ++ quad: ldl_u r1,X(r11) ++ ldl_u r2,X+7(r11) ++ ldi r3,X(r11) ++ extll r1,r3,r1 ++ extlh r2,r3,r2 ++ or r1.r2.r1 ++ */ ++ ++void ++sw_64_expand_unaligned_load (rtx tgt, rtx mem, HOST_WIDE_INT size, ++ HOST_WIDE_INT ofs, int sign) ++{ ++ rtx meml, memh, addr, extl, exth, tmp, mema; ++ machine_mode mode; ++ ++ if (TARGET_BWX && size == 2) ++ { ++ meml = adjust_address (mem, QImode, ofs); ++ memh = adjust_address (mem, QImode, ofs + 1); ++ extl = gen_reg_rtx (DImode); ++ exth = gen_reg_rtx (DImode); ++ emit_insn (gen_zero_extendqidi2 (extl, meml)); ++ emit_insn (gen_zero_extendqidi2 (exth, memh)); ++ exth = expand_simple_binop (DImode, ASHIFT, exth, GEN_INT (8), NULL, 1, ++ OPTAB_LIB_WIDEN); ++ addr = expand_simple_binop (DImode, IOR, extl, exth, NULL, 1, ++ OPTAB_LIB_WIDEN); ++ ++ if (sign && GET_MODE (tgt) != HImode) ++ { ++ addr = gen_lowpart (HImode, addr); ++ emit_insn (gen_extend_insn (tgt, addr, GET_MODE (tgt), HImode, 0)); ++ } ++ else ++ { ++ if (GET_MODE (tgt) != DImode) ++ addr = gen_lowpart (GET_MODE (tgt), addr); ++ emit_move_insn (tgt, addr); ++ } ++ return; ++ } ++ ++ meml = gen_reg_rtx (DImode); ++ memh = gen_reg_rtx (DImode); ++ addr = gen_reg_rtx (DImode); ++ extl = gen_reg_rtx (DImode); ++ exth = gen_reg_rtx (DImode); ++ ++ mema = XEXP (mem, 0); ++ rtx mema_const, mema_ptr; ++ if (GET_CODE (mema) == LO_SUM) ++ mema = force_reg (Pmode, mema); ++ ++ // TODO: split const ptr ++ if (GET_CODE (mema) == PLUS) ++ { ++ mema_ptr = XEXP (mema, 0); ++ mema_const = XEXP (mema, 1); ++ } ++ /* AND addresses cannot be in any alias set, since they may implicitly ++ alias surrounding code. Ideally we'd have some alias set that ++ covered all types except those with alignment 8 or higher. */ ++ ++ tmp = change_address (mem, DImode, ++ gen_rtx_AND (DImode, plus_constant (DImode, mema, ofs), ++ GEN_INT (-8))); ++ set_mem_alias_set (tmp, 0); ++ emit_move_insn (meml, tmp); ++ ++ tmp ++ = change_address (mem, DImode, ++ gen_rtx_AND (DImode, ++ plus_constant (DImode, mema, ofs + size - 1), ++ GEN_INT (-8))); ++ set_mem_alias_set (tmp, 0); ++ emit_move_insn (memh, tmp); ++ ++ if (sign && size == 2) ++ { ++ emit_move_insn (addr, plus_constant (Pmode, mema, ofs + 2)); ++ ++ emit_insn (gen_extql (extl, meml, addr)); ++ emit_insn (gen_extqh (exth, memh, addr)); ++ ++ /* We must use tgt here for the target. Sw_64 port fails if we use ++ addr for the target, because addr is marked as a pointer and combine ++ knows that pointers are always sign-extended 32-bit values. */ ++ addr = expand_binop (DImode, ior_optab, extl, exth, tgt, 1, OPTAB_WIDEN); ++ addr = expand_binop (DImode, ashr_optab, addr, GEN_INT (48), addr, 1, ++ OPTAB_WIDEN); ++ } ++ else ++ { ++ if (GET_CODE (mema) == PLUS && CONST_INT_P (mema_const) ++ && (!add_operand (mema_const, VOIDmode))) ++ { ++ rtx tmpreg = gen_reg_rtx (DImode); ++ tmpreg = sw_64_emit_set_const ( ++ tmpreg, DImode, INTVAL (plus_constant (Pmode, mema_const, ofs)), 2, ++ false); ++ emit_insn (gen_adddi3 (addr, mema_ptr, tmpreg)); ++ } ++ else ++ { ++ emit_move_insn (addr, plus_constant (Pmode, mema, ofs)); ++ } ++ emit_insn (gen_extxl (extl, meml, GEN_INT (size * 8), addr)); ++ switch ((int) size) ++ { ++ case 2: ++ emit_insn (gen_extwh (exth, memh, addr)); ++ mode = HImode; ++ break; ++ case 4: ++ emit_insn (gen_extlh (exth, memh, addr)); ++ mode = SImode; ++ break; ++ case 8: ++ emit_insn (gen_extqh (exth, memh, addr)); ++ mode = DImode; ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ ++ addr = expand_binop (mode, ior_optab, gen_lowpart (mode, extl), ++ gen_lowpart (mode, exth), gen_lowpart (mode, tgt), ++ sign, OPTAB_WIDEN); ++ } ++ ++ if (addr != tgt) ++ emit_move_insn (tgt, gen_lowpart (GET_MODE (tgt), addr)); ++} ++ ++/* Similarly, use ins and msk instructions to perform unaligned stores. */ ++ ++void ++sw_64_expand_unaligned_store (rtx dst, rtx src, HOST_WIDE_INT size, ++ HOST_WIDE_INT ofs) ++{ ++ rtx dstl, dsth, addr, insl, insh, meml, memh, dsta; ++ ++ if (TARGET_BWX && size == 2) ++ { ++ if (src != const0_rtx) ++ { ++ dstl = gen_lowpart (QImode, src); ++ dsth = expand_simple_binop (DImode, LSHIFTRT, src, GEN_INT (8), NULL, ++ 1, OPTAB_LIB_WIDEN); ++ dsth = gen_lowpart (QImode, dsth); ++ } ++ else ++ dstl = dsth = const0_rtx; ++ ++ meml = adjust_address (dst, QImode, ofs); ++ memh = adjust_address (dst, QImode, ofs + 1); ++ ++ emit_move_insn (meml, dstl); ++ emit_move_insn (memh, dsth); ++ return; ++ } ++ ++ dstl = gen_reg_rtx (DImode); ++ dsth = gen_reg_rtx (DImode); ++ insl = gen_reg_rtx (DImode); ++ insh = gen_reg_rtx (DImode); ++ ++ dsta = XEXP (dst, 0); ++ if (GET_CODE (dsta) == LO_SUM) ++ dsta = force_reg (Pmode, dsta); ++ ++ /* AND addresses cannot be in any alias set, since they may implicitly ++ alias surrounding code. Ideally we'd have some alias set that ++ covered all types except those with alignment 8 or higher. */ ++ ++ meml = change_address (dst, DImode, ++ gen_rtx_AND (DImode, plus_constant (DImode, dsta, ofs), ++ GEN_INT (-8))); ++ set_mem_alias_set (meml, 0); ++ ++ memh ++ = change_address (dst, DImode, ++ gen_rtx_AND (DImode, ++ plus_constant (DImode, dsta, ofs + size - 1), ++ GEN_INT (-8))); ++ set_mem_alias_set (memh, 0); ++ ++ emit_move_insn (dsth, memh); ++ emit_move_insn (dstl, meml); ++ ++ addr = copy_addr_to_reg (plus_constant (Pmode, dsta, ofs)); ++ ++ if (src != CONST0_RTX (GET_MODE (src))) ++ { ++ emit_insn ( ++ gen_insxh (insh, gen_lowpart (DImode, src), GEN_INT (size * 8), addr)); ++ ++ switch ((int) size) ++ { ++ case 2: ++ emit_insn (gen_inswl (insl, gen_lowpart (HImode, src), addr)); ++ break; ++ case 4: ++ emit_insn (gen_insll (insl, gen_lowpart (SImode, src), addr)); ++ break; ++ case 8: ++ emit_insn (gen_insql (insl, gen_lowpart (DImode, src), addr)); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ ++ emit_insn (gen_mskxh (dsth, dsth, GEN_INT (size * 8), addr)); ++ ++ switch ((int) size) ++ { ++ case 2: ++ emit_insn (gen_mskwl (dstl, dstl, addr)); ++ break; ++ case 4: ++ emit_insn (gen_mskll (dstl, dstl, addr)); ++ break; ++ case 8: ++ emit_insn (gen_mskql (dstl, dstl, addr)); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ ++ if (src != CONST0_RTX (GET_MODE (src))) ++ { ++ dsth = expand_binop (DImode, ior_optab, insh, dsth, dsth, 0, OPTAB_WIDEN); ++ dstl = expand_binop (DImode, ior_optab, insl, dstl, dstl, 0, OPTAB_WIDEN); ++ } ++ ++ /* Must store high before low for degenerate case of aligned. */ ++ emit_move_insn (memh, dsth); ++ emit_move_insn (meml, dstl); ++} ++ ++/* The block move code tries to maximize speed by separating loads and ++ stores at the expense of register pressure: we load all of the data ++ before we store it back out. There are two secondary effects worth ++ mentioning, that this speeds copying to/from aligned and unaligned ++ buffers, and that it makes the code significantly easier to write. */ ++ ++#define MAX_MOVE_WORDS 8 ++ ++/* Load an integral number of consecutive unaligned quadwords. */ ++ ++static void ++sw_64_expand_unaligned_load_words (rtx *out_regs, rtx smem, HOST_WIDE_INT words, ++ HOST_WIDE_INT ofs) ++{ ++ rtx const im8 = GEN_INT (-8); ++ rtx ext_tmps[MAX_MOVE_WORDS], data_regs[MAX_MOVE_WORDS + 1]; ++ rtx sreg, areg, tmp, smema; ++ HOST_WIDE_INT i; ++ ++ smema = XEXP (smem, 0); ++ if (GET_CODE (smema) == LO_SUM) ++ smema = force_reg (Pmode, smema); ++ ++ /* Generate all the tmp registers we need. */ ++ for (i = 0; i < words; ++i) ++ { ++ data_regs[i] = out_regs[i]; ++ ext_tmps[i] = gen_reg_rtx (DImode); ++ } ++ data_regs[words] = gen_reg_rtx (DImode); ++ ++ if (ofs != 0) ++ smem = adjust_address (smem, GET_MODE (smem), ofs); ++ ++ /* Load up all of the source data. */ ++ for (i = 0; i < words; ++i) ++ { ++ tmp = change_address (smem, DImode, ++ gen_rtx_AND (DImode, ++ plus_constant (DImode, smema, 8 * i), ++ im8)); ++ set_mem_alias_set (tmp, 0); ++ emit_move_insn (data_regs[i], tmp); ++ } ++ ++ tmp = change_address ( ++ smem, DImode, ++ gen_rtx_AND (DImode, plus_constant (DImode, smema, 8 * words - 1), im8)); ++ set_mem_alias_set (tmp, 0); ++ emit_move_insn (data_regs[words], tmp); ++ ++ /* Extract the half-word fragments. Unfortunately DEC decided to make ++ extxh with offset zero a noop instead of zeroing the register, so ++ we must take care of that edge condition ourselves with cmov. */ ++ ++ sreg = copy_addr_to_reg (smema); ++ areg ++ = expand_binop (DImode, and_optab, sreg, GEN_INT (7), NULL, 1, OPTAB_WIDEN); ++ for (i = 0; i < words; ++i) ++ { ++ emit_insn (gen_extql (data_regs[i], data_regs[i], sreg)); ++ emit_insn (gen_extqh (ext_tmps[i], data_regs[i + 1], sreg)); ++ emit_insn (gen_rtx_SET ( ++ ext_tmps[i], ++ gen_rtx_IF_THEN_ELSE (DImode, gen_rtx_EQ (DImode, areg, const0_rtx), ++ const0_rtx, ext_tmps[i]))); ++ } ++ ++ /* Merge the half-words into whole words. */ ++ for (i = 0; i < words; ++i) ++ { ++ out_regs[i] = expand_binop (DImode, ior_optab, data_regs[i], ext_tmps[i], ++ data_regs[i], 1, OPTAB_WIDEN); ++ } ++} ++ ++/* Store an integral number of consecutive unaligned quadwords. DATA_REGS ++ may be NULL to store zeros. */ ++ ++static void ++sw_64_expand_unaligned_store_words (rtx *data_regs, rtx dmem, ++ HOST_WIDE_INT words, HOST_WIDE_INT ofs) ++{ ++ rtx const im8 = GEN_INT (-8); ++ rtx ins_tmps[MAX_MOVE_WORDS]; ++ rtx st_tmp_1, st_tmp_2, dreg; ++ rtx st_addr_1, st_addr_2, dmema; ++ HOST_WIDE_INT i; ++ ++ dmema = XEXP (dmem, 0); ++ if (GET_CODE (dmema) == LO_SUM) ++ dmema = force_reg (Pmode, dmema); ++ ++ /* Generate all the tmp registers we need. */ ++ if (data_regs != NULL) ++ for (i = 0; i < words; ++i) ++ ins_tmps[i] = gen_reg_rtx (DImode); ++ st_tmp_1 = gen_reg_rtx (DImode); ++ st_tmp_2 = gen_reg_rtx (DImode); ++ ++ if (ofs != 0) ++ dmem = adjust_address (dmem, GET_MODE (dmem), ofs); ++ ++ st_addr_2 = change_address ( ++ dmem, DImode, ++ gen_rtx_AND (DImode, plus_constant (DImode, dmema, words * 8 - 1), im8)); ++ set_mem_alias_set (st_addr_2, 0); ++ ++ st_addr_1 = change_address (dmem, DImode, gen_rtx_AND (DImode, dmema, im8)); ++ set_mem_alias_set (st_addr_1, 0); ++ ++ /* Load up the destination end bits. */ ++ emit_move_insn (st_tmp_2, st_addr_2); ++ emit_move_insn (st_tmp_1, st_addr_1); ++ ++ /* Shift the input data into place. */ ++ dreg = copy_addr_to_reg (dmema); ++ if (data_regs != NULL) ++ { ++ for (i = words - 1; i >= 0; --i) ++ { ++ emit_insn (gen_insqh (ins_tmps[i], data_regs[i], dreg)); ++ emit_insn (gen_insql (data_regs[i], data_regs[i], dreg)); ++ } ++ for (i = words - 1; i > 0; --i) ++ { ++ ins_tmps[i - 1] ++ = expand_binop (DImode, ior_optab, data_regs[i], ins_tmps[i - 1], ++ ins_tmps[i - 1], 1, OPTAB_WIDEN); ++ } ++ } ++ ++ /* Split and merge the ends with the destination data. */ ++ emit_insn (gen_mskqh (st_tmp_2, st_tmp_2, dreg)); ++ emit_insn (gen_mskql (st_tmp_1, st_tmp_1, dreg)); ++ ++ if (data_regs != NULL) ++ { ++ st_tmp_2 = expand_binop (DImode, ior_optab, st_tmp_2, ins_tmps[words - 1], ++ st_tmp_2, 1, OPTAB_WIDEN); ++ st_tmp_1 = expand_binop (DImode, ior_optab, st_tmp_1, data_regs[0], ++ st_tmp_1, 1, OPTAB_WIDEN); ++ } ++ ++ /* Store it all. */ ++ emit_move_insn (st_addr_2, st_tmp_2); ++ for (i = words - 1; i > 0; --i) ++ { ++ rtx tmp = change_address ( ++ dmem, DImode, ++ gen_rtx_AND (DImode, plus_constant (DImode, dmema, i * 8), im8)); ++ set_mem_alias_set (tmp, 0); ++ emit_move_insn (tmp, data_regs ? ins_tmps[i - 1] : const0_rtx); ++ } ++ emit_move_insn (st_addr_1, st_tmp_1); ++} ++ ++/* Expand string/block move operations. ++ ++ operands[0] is the pointer to the destination. ++ operands[1] is the pointer to the source. ++ operands[2] is the number of bytes to move. ++ operands[3] is the alignment. */ ++ ++int ++sw_64_expand_block_move (rtx operands[]) ++{ ++ rtx bytes_rtx = operands[2]; ++ rtx align_rtx = operands[3]; ++ HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx); ++ HOST_WIDE_INT bytes = orig_bytes; ++ HOST_WIDE_INT src_align = INTVAL (align_rtx) * BITS_PER_UNIT; ++ HOST_WIDE_INT dst_align = src_align; ++ rtx orig_src = operands[1]; ++ rtx orig_dst = operands[0]; ++ rtx data_regs[2 * MAX_MOVE_WORDS + 16]; ++ rtx tmp; ++ unsigned int i, words, ofs, nregs = 0; ++ ++ if (orig_bytes <= 0) ++ return 1; ++ else if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD) ++ return 0; ++ ++ /* Look for additional alignment information from recorded register info. */ ++ ++ tmp = XEXP (orig_src, 0); ++ if (REG_P (tmp)) ++ src_align = MAX (src_align, REGNO_POINTER_ALIGN (REGNO (tmp))); ++ else if (GET_CODE (tmp) == PLUS && REG_P (XEXP (tmp, 0)) ++ && CONST_INT_P (XEXP (tmp, 1))) ++ { ++ unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1)); ++ unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0))); ++ ++ if (a > src_align) ++ { ++ if (a >= 64 && c % 8 == 0) ++ src_align = 64; ++ else if (a >= 32 && c % 4 == 0) ++ src_align = 32; ++ else if (a >= 16 && c % 2 == 0) ++ src_align = 16; ++ } ++ } ++ ++ tmp = XEXP (orig_dst, 0); ++ if (REG_P (tmp)) ++ dst_align = MAX (dst_align, REGNO_POINTER_ALIGN (REGNO (tmp))); ++ else if (GET_CODE (tmp) == PLUS && REG_P (XEXP (tmp, 0)) ++ && CONST_INT_P (XEXP (tmp, 1))) ++ { ++ unsigned HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1)); ++ unsigned int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0))); ++ ++ if (a > dst_align) ++ { ++ if (a >= 64 && c % 8 == 0) ++ dst_align = 64; ++ else if (a >= 32 && c % 4 == 0) ++ dst_align = 32; ++ else if (a >= 16 && c % 2 == 0) ++ dst_align = 16; ++ } ++ } ++ ++ ofs = 0; ++ if (src_align >= 64 && bytes >= 8) ++ { ++ words = bytes / 8; ++ ++ for (i = 0; i < words; ++i) ++ data_regs[nregs + i] = gen_reg_rtx (DImode); ++ ++ for (i = 0; i < words; ++i) ++ emit_move_insn (data_regs[nregs + i], ++ adjust_address (orig_src, DImode, ofs + i * 8)); ++ ++ nregs += words; ++ bytes -= words * 8; ++ ofs += words * 8; ++ } ++ ++ if (src_align >= 32 && bytes >= 4) ++ { ++ words = bytes / 4; ++ ++ for (i = 0; i < words; ++i) ++ data_regs[nregs + i] = gen_reg_rtx (SImode); ++ ++ for (i = 0; i < words; ++i) ++ emit_move_insn (data_regs[nregs + i], ++ adjust_address (orig_src, SImode, ofs + i * 4)); ++ ++ nregs += words; ++ bytes -= words * 4; ++ ofs += words * 4; ++ } ++ ++ if (bytes >= 8) ++ { ++ words = bytes / 8; ++ ++ for (i = 0; i < words + 1; ++i) ++ data_regs[nregs + i] = gen_reg_rtx (DImode); ++ ++ sw_64_expand_unaligned_load_words (data_regs + nregs, orig_src, words, ++ ofs); ++ ++ nregs += words; ++ bytes -= words * 8; ++ ofs += words * 8; ++ } ++ ++ if (!TARGET_BWX && bytes >= 4) ++ { ++ data_regs[nregs++] = tmp = gen_reg_rtx (SImode); ++ sw_64_expand_unaligned_load (tmp, orig_src, 4, ofs, 0); ++ bytes -= 4; ++ ofs += 4; ++ } ++ ++ if (bytes >= 2) ++ { ++ if (src_align >= 16) ++ { ++ do ++ { ++ data_regs[nregs++] = tmp = gen_reg_rtx (HImode); ++ emit_move_insn (tmp, adjust_address (orig_src, HImode, ofs)); ++ bytes -= 2; ++ ofs += 2; ++ } while (bytes >= 2); ++ } ++ else if (!TARGET_BWX) ++ { ++ data_regs[nregs++] = tmp = gen_reg_rtx (HImode); ++ sw_64_expand_unaligned_load (tmp, orig_src, 2, ofs, 0); ++ bytes -= 2; ++ ofs += 2; ++ } ++ } ++ ++ while (bytes > 0) ++ { ++ data_regs[nregs++] = tmp = gen_reg_rtx (QImode); ++ emit_move_insn (tmp, adjust_address (orig_src, QImode, ofs)); ++ bytes -= 1; ++ ofs += 1; ++ } ++ ++ gcc_assert (nregs <= ARRAY_SIZE (data_regs)); ++ ++ /* Now save it back out again. */ ++ ++ i = 0, ofs = 0; ++ ++ /* Write out the data in whatever chunks reading the source allowed. */ ++ if (dst_align >= 64) ++ { ++ while (i < nregs && GET_MODE (data_regs[i]) == DImode) ++ { ++ emit_move_insn (adjust_address (orig_dst, DImode, ofs), data_regs[i]); ++ ofs += 8; ++ i++; ++ } ++ } ++ ++ if (dst_align >= 32) ++ { ++ /* If the source has remaining DImode regs, write them out in ++ two pieces. */ ++ while (i < nregs && GET_MODE (data_regs[i]) == DImode) ++ { ++ tmp = expand_binop (DImode, lshr_optab, data_regs[i], GEN_INT (32), ++ NULL_RTX, 1, OPTAB_WIDEN); ++ ++ emit_move_insn (adjust_address (orig_dst, SImode, ofs), ++ gen_lowpart (SImode, data_regs[i])); ++ emit_move_insn (adjust_address (orig_dst, SImode, ofs + 4), ++ gen_lowpart (SImode, tmp)); ++ ofs += 8; ++ i++; ++ } ++ ++ while (i < nregs && GET_MODE (data_regs[i]) == SImode) ++ { ++ emit_move_insn (adjust_address (orig_dst, SImode, ofs), data_regs[i]); ++ ofs += 4; ++ i++; ++ } ++ } ++ ++ if (i < nregs && GET_MODE (data_regs[i]) == DImode) ++ { ++ /* Write out a remaining block of words using unaligned methods. */ ++ ++ for (words = 1; i + words < nregs; words++) ++ if (GET_MODE (data_regs[i + words]) != DImode) ++ break; ++ ++ if (words == 1) ++ sw_64_expand_unaligned_store (orig_dst, data_regs[i], 8, ofs); ++ else ++ sw_64_expand_unaligned_store_words (data_regs + i, orig_dst, words, ++ ofs); ++ ++ i += words; ++ ofs += words * 8; ++ } ++ ++ /* Due to the above, this won't be aligned. */ ++ /* ??? If we have more than one of these, consider constructing full ++ words in registers and using sw_64_expand_unaligned_store_words. */ ++ while (i < nregs && GET_MODE (data_regs[i]) == SImode) ++ { ++ sw_64_expand_unaligned_store (orig_dst, data_regs[i], 4, ofs); ++ ofs += 4; ++ i++; ++ } ++ ++ if (dst_align >= 16) ++ while (i < nregs && GET_MODE (data_regs[i]) == HImode) ++ { ++ emit_move_insn (adjust_address (orig_dst, HImode, ofs), data_regs[i]); ++ i++; ++ ofs += 2; ++ } ++ else ++ while (i < nregs && GET_MODE (data_regs[i]) == HImode) ++ { ++ sw_64_expand_unaligned_store (orig_dst, data_regs[i], 2, ofs); ++ i++; ++ ofs += 2; ++ } ++ ++ /* The remainder must be byte copies. */ ++ while (i < nregs) ++ { ++ gcc_assert (GET_MODE (data_regs[i]) == QImode); ++ emit_move_insn (adjust_address (orig_dst, QImode, ofs), data_regs[i]); ++ i++; ++ ofs += 1; ++ } ++ ++ return 1; ++} ++ ++int ++sw_64_expand_block_clear (rtx operands[]) ++{ ++ rtx bytes_rtx = operands[1]; ++ rtx align_rtx = operands[3]; ++ HOST_WIDE_INT orig_bytes = INTVAL (bytes_rtx); ++ HOST_WIDE_INT bytes = orig_bytes; ++ HOST_WIDE_INT align = INTVAL (align_rtx) * BITS_PER_UNIT; ++ HOST_WIDE_INT alignofs = 0; ++ rtx orig_dst = operands[0]; ++ rtx tmp; ++ int i, words, ofs = 0; ++ ++ if (orig_bytes <= 0) ++ return 1; ++ if (orig_bytes > MAX_MOVE_WORDS * UNITS_PER_WORD) ++ return 0; ++ ++ /* Look for stricter alignment. */ ++ tmp = XEXP (orig_dst, 0); ++ if (REG_P (tmp)) ++ align = MAX (align, REGNO_POINTER_ALIGN (REGNO (tmp))); ++ else if (GET_CODE (tmp) == PLUS && REG_P (XEXP (tmp, 0)) ++ && CONST_INT_P (XEXP (tmp, 1))) ++ { ++ HOST_WIDE_INT c = INTVAL (XEXP (tmp, 1)); ++ int a = REGNO_POINTER_ALIGN (REGNO (XEXP (tmp, 0))); ++ ++ if (a > align) ++ { ++ if (a >= 64) ++ align = a, alignofs = 8 - c % 8; ++ else if (a >= 32) ++ align = a, alignofs = 4 - c % 4; ++ else if (a >= 16) ++ align = a, alignofs = 2 - c % 2; ++ } ++ } ++ ++ /* Handle an unaligned prefix first. */ ++ ++ if (alignofs > 0) ++ { ++ /* Given that alignofs is bounded by align, the only time BWX could ++ generate three stores is for a 7 byte fill. Prefer two individual ++ stores over a load/mask/store sequence. */ ++ if ((!TARGET_BWX || alignofs == 7) && align >= 32 ++ && !(alignofs == 4 && bytes >= 4)) ++ { ++ machine_mode mode = (align >= 64 ? DImode : SImode); ++ int inv_alignofs = (align >= 64 ? 8 : 4) - alignofs; ++ rtx mem, tmp; ++ HOST_WIDE_INT mask; ++ ++ mem = adjust_address (orig_dst, mode, ofs - inv_alignofs); ++ set_mem_alias_set (mem, 0); ++ ++ mask = ~(HOST_WIDE_INT_M1U << (inv_alignofs * 8)); ++ if (bytes < alignofs) ++ { ++ mask |= HOST_WIDE_INT_M1U << ((inv_alignofs + bytes) * 8); ++ ofs += bytes; ++ bytes = 0; ++ } ++ else ++ { ++ bytes -= alignofs; ++ ofs += alignofs; ++ } ++ alignofs = 0; ++ ++ tmp = expand_binop (mode, and_optab, mem, GEN_INT (mask), NULL_RTX, 1, ++ OPTAB_WIDEN); ++ ++ emit_move_insn (mem, tmp); ++ } ++ ++ if (TARGET_BWX && (alignofs & 1) && bytes >= 1) ++ { ++ emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx); ++ bytes -= 1; ++ ofs += 1; ++ alignofs -= 1; ++ } ++ if (TARGET_BWX && align >= 16 && (alignofs & 3) == 2 && bytes >= 2) ++ { ++ emit_move_insn (adjust_address (orig_dst, HImode, ofs), const0_rtx); ++ bytes -= 2; ++ ofs += 2; ++ alignofs -= 2; ++ } ++ if (alignofs == 4 && bytes >= 4) ++ { ++ emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx); ++ bytes -= 4; ++ ofs += 4; ++ alignofs = 0; ++ } ++ ++ /* If we've not used the extra lead alignment information by now, ++ we won't be able to. Downgrade align to match what's left over. */ ++ if (alignofs > 0) ++ { ++ alignofs = alignofs & -alignofs; ++ align = MIN (align, alignofs * BITS_PER_UNIT); ++ } ++ } ++ ++ /* Handle a block of contiguous long-words. */ ++ ++ if (align >= 64 && bytes >= 8) ++ { ++ words = bytes / 8; ++ ++ for (i = 0; i < words; ++i) ++ emit_move_insn (adjust_address (orig_dst, DImode, ofs + i * 8), ++ const0_rtx); ++ ++ bytes -= words * 8; ++ ofs += words * 8; ++ } ++ ++ /* If the block is large and appropriately aligned, emit a single ++ store followed by a sequence of stl_u insns. */ ++ ++ if (align >= 32 && bytes > 16) ++ { ++ rtx orig_dsta; ++ ++ emit_move_insn (adjust_address (orig_dst, SImode, ofs), const0_rtx); ++ bytes -= 4; ++ ofs += 4; ++ ++ orig_dsta = XEXP (orig_dst, 0); ++ if (GET_CODE (orig_dsta) == LO_SUM) ++ orig_dsta = force_reg (Pmode, orig_dsta); ++ ++ words = bytes / 8; ++ for (i = 0; i < words; ++i) ++ { ++ rtx mem = change_address ( ++ orig_dst, DImode, ++ gen_rtx_AND (DImode, plus_constant (DImode, orig_dsta, ofs + i * 8), ++ GEN_INT (-8))); ++ set_mem_alias_set (mem, 0); ++ emit_move_insn (mem, const0_rtx); ++ } ++ ++ /* Depending on the alignment, the first stl_u may have overlapped ++ with the initial stl, which means that the last stl_u didn't ++ write as much as it would appear. Leave those questionable bytes ++ unaccounted for. */ ++ bytes -= words * 8 - 4; ++ ofs += words * 8 - 4; ++ } ++ ++ /* Handle a smaller block of aligned words. */ ++ ++ if ((align >= 64 && bytes == 4) || (align == 32 && bytes >= 4)) ++ { ++ words = bytes / 4; ++ ++ for (i = 0; i < words; ++i) ++ emit_move_insn (adjust_address (orig_dst, SImode, ofs + i * 4), ++ const0_rtx); ++ ++ bytes -= words * 4; ++ ofs += words * 4; ++ } ++ ++ /* An unaligned block uses stl_u stores for as many as possible. */ ++ ++ if (bytes >= 8) ++ { ++ words = bytes / 8; ++ ++ sw_64_expand_unaligned_store_words (NULL, orig_dst, words, ofs); ++ ++ bytes -= words * 8; ++ ofs += words * 8; ++ } ++ ++ /* Next clean up any trailing pieces. */ ++ ++ /* Count the number of bits in BYTES for which aligned stores could ++ be emitted. */ ++ words = 0; ++ for (i = (TARGET_BWX ? 1 : 4); i * BITS_PER_UNIT <= align; i <<= 1) ++ if (bytes & i) ++ words += 1; ++ ++ /* If we have appropriate alignment (and it wouldn't take too many ++ instructions otherwise), mask out the bytes we need. */ ++ if (TARGET_BWX ? words > 2 : bytes > 0) ++ { ++ if (align >= 64) ++ { ++ rtx mem, tmp; ++ HOST_WIDE_INT mask; ++ ++ mem = adjust_address (orig_dst, DImode, ofs); ++ set_mem_alias_set (mem, 0); ++ ++ mask = HOST_WIDE_INT_M1U << (bytes * 8); ++ ++ tmp = expand_binop (DImode, and_optab, mem, GEN_INT (mask), NULL_RTX, ++ 1, OPTAB_WIDEN); ++ ++ emit_move_insn (mem, tmp); ++ return 1; ++ } ++ else if (align >= 32 && bytes < 4) ++ { ++ rtx mem, tmp; ++ HOST_WIDE_INT mask; ++ ++ mem = adjust_address (orig_dst, SImode, ofs); ++ set_mem_alias_set (mem, 0); ++ ++ mask = HOST_WIDE_INT_M1U << (bytes * 8); ++ ++ tmp = expand_binop (SImode, and_optab, mem, GEN_INT (mask), NULL_RTX, ++ 1, OPTAB_WIDEN); ++ ++ emit_move_insn (mem, tmp); ++ return 1; ++ } ++ } ++ ++ if (!TARGET_BWX && bytes >= 4) ++ { ++ sw_64_expand_unaligned_store (orig_dst, const0_rtx, 4, ofs); ++ bytes -= 4; ++ ofs += 4; ++ } ++ ++ if (bytes >= 2) ++ { ++ if (align >= 16) ++ { ++ do ++ { ++ emit_move_insn (adjust_address (orig_dst, HImode, ofs), ++ const0_rtx); ++ bytes -= 2; ++ ofs += 2; ++ } while (bytes >= 2); ++ } ++ else if (!TARGET_BWX) ++ { ++ sw_64_expand_unaligned_store (orig_dst, const0_rtx, 2, ofs); ++ bytes -= 2; ++ ofs += 2; ++ } ++ } ++ ++ while (bytes > 0) ++ { ++ emit_move_insn (adjust_address (orig_dst, QImode, ofs), const0_rtx); ++ bytes -= 1; ++ ofs += 1; ++ } ++ ++ return 1; ++} ++ ++/* Returns a mask so that zap (x, value) == x & mask. */ ++ ++rtx ++sw_64_expand_zap_mask (HOST_WIDE_INT value) ++{ ++ rtx result; ++ int i; ++ HOST_WIDE_INT mask = 0; ++ ++ for (i = 7; i >= 0; --i) ++ { ++ mask <<= 8; ++ if (!((value >> i) & 1)) ++ mask |= 0xff; ++ } ++ ++ result = gen_int_mode (mask, DImode); ++ return result; ++} ++ ++void ++sw_64_expand_builtin_vector_binop (rtx (*gen) (rtx, rtx, rtx), ++ machine_mode mode, rtx op0, rtx op1, rtx op2) ++{ ++ op0 = gen_lowpart (mode, op0); ++ ++ if (op1 == const0_rtx) ++ op1 = CONST0_RTX (mode); ++ else ++ op1 = gen_lowpart (mode, op1); ++ ++ if (op2 == const0_rtx) ++ op2 = CONST0_RTX (mode); ++ else ++ op2 = gen_lowpart (mode, op2); ++ ++ emit_insn ((*gen) (op0, op1, op2)); ++} ++ ++/* A subroutine of the atomic operation splitters. Jump to LABEL if ++ COND is true. Mark the jump as unlikely to be taken. */ ++ ++static void ++emit_unlikely_jump (rtx cond, rtx label) ++{ ++ rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx); ++ rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x)); ++ add_reg_br_prob_note (insn, profile_probability::very_unlikely ()); ++} ++ ++/* Subroutines of the atomic operation splitters. Emit barriers ++ as needed for the memory MODEL. */ ++ ++static void ++sw_64_pre_atomic_barrier (enum memmodel model) ++{ ++ if (need_atomic_barrier_p (model, true)) ++ emit_insn (gen_memory_barrier ()); ++} ++ ++static void ++sw_64_post_atomic_barrier (enum memmodel model) ++{ ++ if (need_atomic_barrier_p (model, false)) ++ emit_insn (gen_memory_barrier ()); ++} ++ ++/* A subroutine of the atomic operation splitters. Emit an insxl ++ instruction in MODE. */ ++ ++static rtx ++emit_insxl (machine_mode mode, rtx op1, rtx op2) ++{ ++ rtx ret = gen_reg_rtx (DImode); ++ rtx (*fn) (rtx, rtx, rtx); ++ ++ switch (mode) ++ { ++ case E_QImode: ++ fn = gen_insbl; ++ break; ++ case E_HImode: ++ fn = gen_inswl; ++ break; ++ case E_SImode: ++ fn = gen_insll; ++ break; ++ case E_DImode: ++ fn = gen_insql; ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ ++ op1 = force_reg (mode, op1); ++ emit_insn (fn (ret, op1, op2)); ++ ++ return ret; ++} ++ ++rtx ++gen_move_reg (rtx x) ++{ ++ rtx temp = gen_reg_rtx (GET_MODE (x)); ++ emit_move_insn (temp, x); ++ return temp; ++} ++ ++/* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P, ++ * * add a reg_note saying that this was a division. Support both scalar ++ * and ++ * * vector divide. Assumes no trapping math and finite arguments. */ ++ ++void ++sw_64_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p) ++{ ++ machine_mode mode = GET_MODE (dst); ++ rtx one, x0, e0, x1, x2, xprev, eprev, xnext, enext, u, v; ++ int i; ++ ++ int passes = flag_sw_recip_precision ? 2 : 1; ++ if (mode == DFmode) ++ passes += 2; ++ ++ enum insn_code code = optab_handler (smul_optab, mode); ++ insn_gen_fn gen_mul = GEN_FCN (code); ++ gcc_assert (code != CODE_FOR_nothing); ++ ++ enum insn_code code1 = optab_handler (fma_optab, mode); ++ insn_gen_fn gen_fma = GEN_FCN (code1); ++ gcc_assert (code1 != CODE_FOR_nothing); ++ ++ enum insn_code code2 = optab_handler (fnma_optab, mode); ++ insn_gen_fn gen_fnma = GEN_FCN (code2); ++ gcc_assert (code2 != CODE_FOR_nothing); ++ ++ one = sw_64_load_constant_and_splat (mode, dconst1); ++ ++ /* x0 = 1./d estimate */ ++ x0 = gen_reg_rtx (mode); ++ emit_insn ( ++ gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d), UNSPEC_FRECX))); ++ ++ /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */ ++ if (passes > 1) ++ { ++ /* e0 = 1. - d * x0 */ ++ e0 = gen_reg_rtx (mode); ++ emit_insn (gen_fnma (e0, d, x0, one)); ++ ++ /* x1 = x0 + e0 * x0 */ ++ x1 = gen_reg_rtx (mode); ++ emit_insn (gen_fma (x1, x0, e0, x0)); ++ ++ for (i = 0, xprev = x1, eprev = e0; i < passes - 2; ++ ++i, xprev = xnext, eprev = enext) ++ { ++ /* enext = eprev * eprev */ ++ enext = gen_reg_rtx (mode); ++ emit_insn (gen_mul (enext, eprev, eprev)); ++ ++ /* xnext = xprev + enext * xprev */ ++ xnext = gen_reg_rtx (mode); ++ emit_insn (gen_fma (xnext, xprev, enext, xprev)); ++ } ++ } ++ else ++ xprev = x0; ++ ++ /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */ ++ /* u = n * xprev */ ++ u = gen_reg_rtx (mode); ++ emit_insn (gen_mul (u, n, xprev)); ++ ++ /* v = n - (d * u) */ ++ v = gen_reg_rtx (mode); ++ emit_insn (gen_fnma (v, d, u, n)); ++ ++ /* dst = (v * xprev) + u */ ++ emit_insn (gen_fma (dst, v, xprev, u)); ++} ++ ++/* Expand an atomic fetch-and-operate pattern. CODE is the binary operation ++ to perform. MEM is the memory on which to operate. VAL is the second ++ operand of the binary operator. BEFORE and AFTER are optional locations to ++ return the value of MEM either before of after the operation. SCRATCH is ++ a scratch register. */ ++ ++void ++sw_64_split_atomic_op (enum rtx_code code, rtx mem, rtx val, rtx before, ++ rtx after, rtx scratch, enum memmodel model) ++{ ++ machine_mode mode = GET_MODE (mem); ++ rtx label, x, cond = gen_rtx_REG (DImode, REGNO (scratch)); ++ ++ if (flag_sw_delmemb == 0) ++ sw_64_pre_atomic_barrier (model); ++ label = gen_label_rtx (); ++ emit_label (label); ++ label = gen_rtx_LABEL_REF (DImode, label); ++ ++ if (before == NULL) ++ before = scratch; ++ emit_insn (gen_load_locked (mode, before, mem)); ++ if (!TARGET_SW8A) ++ { ++ if (after) ++ { ++ rtx cond1 = gen_rtx_REG (DImode, REGNO (after)); ++ emit_insn (gen_rtx_SET (cond1, const1_rtx)); ++ emit_insn (gen_builtin_wr_f (cond1)); ++ } ++ else ++ { ++ rtx cond2 = gen_rtx_REG (DImode, 28); ++ emit_insn (gen_rtx_SET (cond2, const1_rtx)); ++ emit_insn (gen_builtin_wr_f (cond2)); ++ } ++ } ++ if (code == NOT) ++ { ++ x = gen_rtx_AND (mode, before, val); ++ emit_insn (gen_rtx_SET (val, x)); ++ ++ x = gen_rtx_NOT (mode, val); ++ } ++ else ++ x = gen_rtx_fmt_ee (code, mode, before, val); ++ if (after) ++ emit_insn (gen_rtx_SET (after, copy_rtx (x))); ++ emit_insn (gen_rtx_SET (scratch, x)); ++ ++ emit_insn (gen_store_conditional (mode, cond, mem, scratch)); ++ if (!TARGET_SW8A) ++ emit_insn (gen_builtin_rd_f (cond)); ++ ++ ++ x = gen_rtx_EQ (DImode, cond, const0_rtx); ++ emit_unlikely_jump (x, label); ++} ++ ++/* Expand a compare and swap operation. */ ++ ++void ++sw_64_split_compare_and_swap (rtx operands[]) ++{ ++ rtx cond, retval, mem, oldval, newval; ++ bool is_weak; ++ enum memmodel mod_s, mod_f; ++ machine_mode mode; ++ rtx label1, label2, x; ++ ++ rtx imust = operands[8]; ++ cond = operands[0]; ++ retval = operands[1]; ++ mem = operands[2]; ++ oldval = operands[3]; ++ newval = operands[4]; ++ is_weak = (operands[5] != const0_rtx); ++ mod_s = memmodel_from_int (INTVAL (operands[6])); ++ mod_f = memmodel_from_int (INTVAL (operands[7])); ++ mode = GET_MODE (mem); ++ ++ if (flag_sw_delmemb == 0) ++ sw_64_pre_atomic_barrier (mod_s); ++ ++ label1 = NULL_RTX; ++ if (!is_weak) ++ { ++ label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); ++ emit_label (XEXP (label1, 0)); ++ } ++ label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); ++ ++ emit_insn (gen_load_locked (mode, retval, mem)); ++ x = gen_lowpart (DImode, retval); ++ rtx imust1; ++ if (TARGET_SW8A) ++ { ++ if (oldval == const0_rtx) ++ { ++ emit_move_insn (cond, const0_rtx); ++ x = gen_rtx_NE (DImode, x, const0_rtx); ++ } ++ else ++ { ++ x = gen_rtx_EQ (DImode, x, oldval); ++ emit_insn (gen_rtx_SET (cond, x)); ++ x = gen_rtx_EQ (DImode, cond, const0_rtx); ++ } ++ emit_unlikely_jump (x, label2); ++ } ++ else ++ { ++ x = gen_rtx_EQ (DImode, x, oldval); ++ imust1 = gen_lowpart (DImode, imust); ++ emit_insn (gen_rtx_SET (imust1, x)); ++ emit_insn (gen_builtin_wr_f (imust1)); ++ } ++ ++ emit_move_insn (cond, newval); ++ emit_insn (gen_store_conditional (mode, cond, mem, gen_lowpart (mode, cond))); ++ ++ if (!TARGET_SW8A) ++ { ++ emit_insn (gen_builtin_rd_f (cond)); ++ imust1 = gen_rtx_EQ (DImode, imust1, const0_rtx); ++ emit_unlikely_jump (imust1, label2); ++ } ++ if (!is_weak) ++ { ++ x = gen_rtx_EQ (DImode, cond, const0_rtx); ++ emit_unlikely_jump (x, label1); ++ } ++ ++ if (!is_mm_relaxed (mod_f)) ++ emit_label (XEXP (label2, 0)); ++ ++ if (is_mm_relaxed (mod_f)) ++ emit_label (XEXP (label2, 0)); ++} ++ ++void ++sw_64_expand_compare_and_swap_12 (rtx operands[]) ++{ ++ rtx cond, dst, mem, oldval, newval, is_weak, mod_s, mod_f; ++ machine_mode mode; ++ rtx addr, align, wdst; ++ rtx imust; ++ ++ cond = operands[0]; ++ dst = operands[1]; ++ mem = operands[2]; ++ oldval = operands[3]; ++ newval = operands[4]; ++ is_weak = operands[5]; ++ mod_s = operands[6]; ++ mod_f = operands[7]; ++ mode = GET_MODE (mem); ++ bool use_cas = GET_MODE_SIZE (mode) >= 32 && flag_sw_use_cas; ++ if (!use_cas) ++ imust = operands[8]; ++ ++ /* We forced the address into a register via mem_noofs_operand. */ ++ addr = XEXP (mem, 0); ++ gcc_assert (register_operand (addr, DImode)); ++ ++ align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8), NULL_RTX, 1, ++ OPTAB_DIRECT); ++ ++ if (oldval != const0_rtx && TARGET_SW8A && use_cas) ++ oldval = emit_insxl (mode, oldval, addr); ++ oldval = convert_modes (DImode, mode, oldval, 1); ++ ++ if (newval != const0_rtx) ++ newval = emit_insxl (mode, newval, addr); ++ ++ wdst = gen_reg_rtx (DImode); ++ if (TARGET_SW8A && use_cas) ++ emit_insn (gen_atomic_compare_and_swap_1_target_sw8a ( ++ mode, cond, wdst, mem, oldval, newval, align, is_weak, mod_s, mod_f)); ++ else ++ emit_insn (gen_atomic_compare_and_swap_1 (mode, cond, wdst, mem, oldval, ++ newval, align, is_weak, mod_s, ++ mod_f, imust)); ++ emit_move_insn (dst, gen_lowpart (mode, wdst)); ++} ++ ++void ++sw_64_split_compare_and_swap_12 (rtx operands[]) ++{ ++ rtx cond, dest, orig_mem, oldval, newval, align, scratch; ++ machine_mode mode; ++ bool is_weak; ++ enum memmodel mod_s, mod_f; ++ rtx label1, label2, mem, addr, width, mask, x; ++ rtx imust; ++ ++ cond = operands[0]; ++ dest = operands[1]; ++ orig_mem = operands[2]; ++ oldval = operands[3]; ++ newval = operands[4]; ++ align = operands[5]; ++ is_weak = (operands[6] != const0_rtx); ++ mod_s = memmodel_from_int (INTVAL (operands[7])); ++ mod_f = memmodel_from_int (INTVAL (operands[8])); ++ imust = operands[9]; ++ scratch = operands[10]; ++ mode = GET_MODE (orig_mem); ++ addr = XEXP (orig_mem, 0); ++ ++ mem = gen_rtx_MEM (DImode, align); ++ MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem); ++ if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER) ++ set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER); ++ ++ if (flag_sw_delmemb == 0) ++ sw_64_pre_atomic_barrier (mod_s); ++ ++ label1 = NULL_RTX; ++ if (!is_weak) ++ { ++ label1 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); ++ emit_label (XEXP (label1, 0)); ++ } ++ label2 = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); ++ ++ emit_insn (gen_load_locked (DImode, scratch, mem)); ++ ++ width = GEN_INT (GET_MODE_BITSIZE (mode)); ++ mask = GEN_INT (mode == QImode ? 0xff : 0xffff); ++ emit_insn (gen_extxl (dest, scratch, width, addr)); ++ ++ rtx imust1; ++ if (TARGET_SW8A) ++ { ++ if (oldval == const0_rtx) ++ { ++ emit_move_insn (cond, const0_rtx); ++ x = gen_rtx_NE (DImode, dest, const0_rtx); ++ } ++ else ++ { ++ x = gen_rtx_EQ (DImode, dest, oldval); ++ emit_insn (gen_rtx_SET (cond, x)); ++ x = gen_rtx_EQ (DImode, cond, const0_rtx); ++ } ++ emit_unlikely_jump (x, label2); ++ } ++ else ++ { ++ x = gen_rtx_EQ (DImode, dest, oldval); ++ imust1 = gen_lowpart (DImode, imust); ++ emit_insn (gen_rtx_SET (imust1, x)); ++ emit_insn (gen_builtin_wr_f (imust1)); ++ } ++ emit_insn (gen_mskxl (cond, scratch, mask, addr)); ++ ++ if (newval != const0_rtx) ++ emit_insn (gen_iordi3 (cond, cond, newval)); ++ ++ emit_insn (gen_store_conditional (DImode, cond, mem, cond)); ++ if (!TARGET_SW8A) ++ { ++ emit_insn (gen_builtin_rd_f (cond)); ++ imust1 = gen_rtx_EQ (DImode, imust1, const0_rtx); ++ emit_unlikely_jump (imust1, label2); ++ } ++ ++ if (!is_weak) ++ { ++ x = gen_rtx_EQ (DImode, cond, const0_rtx); ++ emit_unlikely_jump (x, label1); ++ } ++ ++ if (!is_mm_relaxed (mod_f)) ++ emit_label (XEXP (label2, 0)); ++ ++ if (is_mm_relaxed (mod_f)) ++ emit_label (XEXP (label2, 0)); ++} ++ ++/* Expand an atomic exchange operation. */ ++ ++void ++sw_64_split_atomic_exchange (rtx operands[]) ++{ ++ rtx retval, mem, val, scratch; ++ enum memmodel model; ++ machine_mode mode; ++ rtx label, x, cond; ++ ++ retval = operands[0]; ++ mem = operands[1]; ++ val = operands[2]; ++ model = (enum memmodel) INTVAL (operands[3]); ++ scratch = operands[4]; ++ mode = GET_MODE (mem); ++ cond = gen_lowpart (DImode, scratch); ++ ++ if (flag_sw_delmemb == 0) ++ sw_64_pre_atomic_barrier (model); ++ label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); ++ emit_label (XEXP (label, 0)); ++ ++ emit_insn (gen_load_locked (mode, retval, mem)); ++ if (!TARGET_SW8A) ++ { ++ emit_insn (gen_rtx_SET (cond, const1_rtx)); ++ emit_insn (gen_builtin_wr_f (cond)); ++ } ++ emit_move_insn (scratch, val); ++ emit_insn (gen_store_conditional (mode, cond, mem, scratch)); ++ if (!TARGET_SW8A) ++ emit_insn (gen_builtin_rd_f (cond)); ++ x = gen_rtx_EQ (DImode, cond, const0_rtx); ++ emit_unlikely_jump (x, label); ++} ++ ++/* Emit an atomic compare-and-swap operation. SI and larger modes */ ++void ++sw_64_split_atomic_cas (rtx operands[]) ++{ ++ rtx cond, retval, mem, oldval, newval; ++ rtx (*gen) (rtx, rtx, rtx); ++ enum memmodel mod_s; ++ machine_mode mode; ++ ++ cond = operands[0]; ++ retval = operands[1]; ++ mem = operands[2]; ++ oldval = operands[3]; ++ newval = operands[4]; ++ ++ mod_s = memmodel_from_int (INTVAL (operands[6])); ++ mode = GET_MODE (mem); ++ ++ if (GET_MODE (mem) == SImode && GET_MODE (oldval) == DImode ++ && GET_MODE (newval) == DImode) ++ { ++ oldval = gen_rtx_REG (SImode, REGNO (oldval)); ++ newval = gen_rtx_REG (SImode, REGNO (newval)); ++ } ++ ++ switch (mode) ++ { ++ case E_SImode: ++ gen = gen_sw_64_atomic_cassi; ++ break; ++ case E_DImode: ++ gen = gen_sw_64_atomic_casdi; ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ ++ emit_insn (gen_rtx_SET (retval, newval)); ++ emit_insn (gen (oldval, mem, retval)); ++ ++ rtx x = gen_lowpart (DImode, retval); ++ rtx x1 = gen_lowpart (DImode, oldval); ++ x = gen_rtx_EQ (DImode, x, x1); ++ emit_insn (gen_rtx_SET (cond, x)); ++} ++ ++/* Emit an atomic compare-and-swap operation. HI and smaller modes */ ++void ++sw_64_split_atomic_cas_12 (rtx operands[]) ++{ ++ rtx cond, dest, orig_mem, oldval, newval, align, scratch; ++ machine_mode mode; ++ bool is_weak; ++ enum memmodel mod_s, mod_f; ++ rtx label1, label2, mem, addr, width, mask, x; ++ ++ cond = operands[0]; ++ dest = operands[1]; ++ orig_mem = operands[2]; ++ oldval = operands[3]; ++ newval = operands[4]; ++ align = operands[5]; ++ is_weak = (operands[6] != const0_rtx); ++ mod_s = memmodel_from_int (INTVAL (operands[7])); ++ mod_f = memmodel_from_int (INTVAL (operands[8])); ++ scratch = operands[9]; ++ mode = GET_MODE (orig_mem); ++ addr = XEXP (orig_mem, 0); ++ ++ mem = gen_rtx_MEM (DImode, align); ++ MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem); ++ if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER) ++ set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER); ++ ++ emit_move_insn (scratch, mem); ++ ++ width = GEN_INT (GET_MODE_BITSIZE (mode)); ++ mask = GEN_INT (mode == QImode ? 0xff : 0xffff); ++ emit_insn (gen_extxl (dest, scratch, width, addr)); ++ emit_insn (gen_mskxl (cond, scratch, mask, addr)); ++ ++ rtx scratch2 = operands[10]; ++ if (newval != const0_rtx) ++ emit_insn (gen_iordi3 (scratch2, cond, newval)); ++ ++ if (oldval == const0_rtx) ++ { ++ emit_move_insn (cond, const0_rtx); ++ x = gen_rtx_NE (DImode, dest, const0_rtx); ++ } ++ else ++ { ++ emit_insn (gen_iordi3 (scratch, cond, oldval)); ++ emit_insn (gen_sw_64_atomic_casdi (scratch, mem, scratch2)); ++ ++ x = gen_rtx_EQ (DImode, scratch2, scratch); ++ emit_insn (gen_rtx_SET (cond, x)); ++ x = gen_rtx_EQ (DImode, cond, const0_rtx); ++ } ++} ++ ++void ++sw_64_expand_atomic_exchange_12 (rtx operands[]) ++{ ++ rtx dst, mem, val, model; ++ machine_mode mode; ++ rtx addr, align, wdst; ++ ++ dst = operands[0]; ++ mem = operands[1]; ++ val = operands[2]; ++ model = operands[3]; ++ mode = GET_MODE (mem); ++ ++ /* We forced the address into a register via mem_noofs_operand. */ ++ addr = XEXP (mem, 0); ++ gcc_assert (register_operand (addr, DImode)); ++ ++ align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-8), NULL_RTX, 1, ++ OPTAB_DIRECT); ++ ++ /* Insert val into the correct byte location within the word. */ ++ if (val != const0_rtx) ++ val = emit_insxl (mode, val, addr); ++ ++ wdst = gen_reg_rtx (DImode); ++ emit_insn (gen_atomic_exchange_1 (mode, wdst, mem, val, align, model)); ++ emit_move_insn (dst, gen_lowpart (mode, wdst)); ++} ++ ++void ++sw_64_split_atomic_exchange_12 (rtx operands[]) ++{ ++ rtx dest, orig_mem, addr, val, align, scratch; ++ rtx label, mem, width, mask, x; ++ machine_mode mode; ++ enum memmodel model; ++ ++ dest = operands[0]; ++ orig_mem = operands[1]; ++ val = operands[2]; ++ align = operands[3]; ++ model = (enum memmodel) INTVAL (operands[4]); ++ scratch = operands[5]; ++ mode = GET_MODE (orig_mem); ++ addr = XEXP (orig_mem, 0); ++ ++ mem = gen_rtx_MEM (DImode, align); ++ MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem); ++ if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER) ++ set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER); ++ ++ if (flag_sw_delmemb == 0) ++ sw_64_pre_atomic_barrier (model); ++ label = gen_rtx_LABEL_REF (DImode, gen_label_rtx ()); ++ emit_label (XEXP (label, 0)); ++ ++ emit_insn (gen_load_locked (DImode, scratch, mem)); ++ if (!TARGET_SW8A) ++ { ++ emit_insn (gen_rtx_SET (dest, const1_rtx)); ++ emit_insn (gen_builtin_wr_f (dest)); ++ } ++ ++ width = GEN_INT (GET_MODE_BITSIZE (mode)); ++ mask = GEN_INT (mode == QImode ? 0xff : 0xffff); ++ emit_insn (gen_extxl (dest, scratch, width, addr)); ++ emit_insn (gen_mskxl (scratch, scratch, mask, addr)); ++ if (val != const0_rtx) ++ emit_insn (gen_iordi3 (scratch, scratch, val)); ++ ++ emit_insn (gen_store_conditional (DImode, scratch, mem, scratch)); ++ if (!TARGET_SW8A) ++ emit_insn (gen_builtin_rd_f (scratch)); ++ ++ x = gen_rtx_EQ (DImode, scratch, const0_rtx); ++ emit_unlikely_jump (x, label); ++} ++ ++/* Adjust the cost of a scheduling dependency. Return the new cost of ++ a dependency LINK or INSN on DEP_INSN. COST is the current cost. */ ++ ++static int ++sw_64_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost, ++ unsigned int) ++{ ++ enum attr_type dep_insn_type; ++ ++ /* If the dependence is an anti-dependence, there is no cost. For an ++ output dependence, there is sometimes a cost, but it doesn't seem ++ worth handling those few cases. */ ++ if (dep_type != 0) ++ return cost; ++ ++ /* If we can't recognize the insns, we can't really do anything. */ ++ if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0) ++ return cost; ++ ++ dep_insn_type = get_attr_type (dep_insn); ++ ++ /* Bring in the user-defined memory latency. */ ++ if (dep_insn_type == TYPE_ILD || dep_insn_type == TYPE_FLD ++ || dep_insn_type == TYPE_LDSYM) ++ cost += sw_64_memory_latency - 1; ++ ++ /* Everything else handled in DFA bypasses now. */ ++ ++ return cost; ++} ++ ++/* The number of instructions that can be issued per cycle. */ ++ ++static int ++sw_64_issue_rate (void) ++{ ++ return ((sw_64_tune == PROCESSOR_SW6 || sw_64_tune == PROCESSOR_SW8) ? 4 : 2); ++} ++ ++/* How many alternative schedules to try. This should be as wide as the ++ scheduling freedom in the DFA, but no wider. Making this value too ++ large results extra work for the scheduler.*/ ++ ++static int ++sw_64_multipass_dfa_lookahead (void) ++{ ++ return ((sw_64_tune == PROCESSOR_SW6 || sw_64_tune == PROCESSOR_SW8) ? 4 : 2); ++} ++ ++/* Machine-specific function data. */ ++ ++struct GTY (()) sw_64_links; ++ ++/* Information about a function's frame layout. */ ++struct GTY (()) sw_64_frame_info ++{ ++ /* The size of the frame in bytes. */ ++ HOST_WIDE_INT frame_size; ++ ++ /* Bit X is set if the function saves or restores GPR X. */ ++ unsigned HOST_WIDE_INT sa_mask; ++ ++ /* The size of the saved callee-save int/FP registers. */ ++ HOST_WIDE_INT saved_regs_size; ++ ++ /* The number of extra stack bytes taken up by register varargs. */ ++ HOST_WIDE_INT saved_varargs_size; ++ ++ /* Offset of virtual frame pointer from stack pointer/frame bottom */ ++ HOST_WIDE_INT callee_offset; ++ ++ /* Offset of hard frame pointer from stack pointer/frame bottom */ ++ HOST_WIDE_INT hard_frame_pointer_offset; ++ ++ HOST_WIDE_INT local_offset; ++ ++ /* The offset of arg_pointer_rtx from the bottom of the frame. */ ++ HOST_WIDE_INT arg_pointer_offset; ++ ++ bool emit_frame_pointer; ++}; ++ ++struct GTY (()) machine_function ++{ ++ unsigned HOST_WIDE_INT sa_mask; ++ HOST_WIDE_INT sa_size; ++ HOST_WIDE_INT frame_size; ++ ++ /* For flag_reorder_blocks_and_partition. */ ++ rtx gp_save_rtx; ++ ++ bool uses_condition_handler; ++ ++ struct sw_64_frame_info frame; ++ ++ /* Linkage entries. */ ++ hash_map *links; ++}; ++ ++/* How to allocate a 'struct machine_function'. */ ++ ++static struct machine_function * ++sw_64_init_machine_status (void) ++{ ++ return ggc_cleared_alloc (); ++} ++ ++/* Start the ball rolling with RETURN_ADDR_RTX. */ ++ ++rtx ++sw_64_return_addr (int count, rtx frame ATTRIBUTE_UNUSED) ++{ ++ if (count != 0) ++ return const0_rtx; ++ ++ return get_hard_reg_initial_val (Pmode, REG_RA); ++} ++ ++/* Return or create a memory slot containing the gp value for the current ++ function. Needed only if TARGET_LD_BUGGY_LDGP. */ ++ ++rtx ++sw_64_gp_save_rtx (void) ++{ ++ rtx_insn *seq; ++ rtx m = cfun->machine->gp_save_rtx; ++ ++ if (m == NULL) ++ { ++ start_sequence (); ++ ++ m = assign_stack_local (DImode, UNITS_PER_WORD, BITS_PER_WORD); ++ m = validize_mem (m); ++ emit_move_insn (m, pic_offset_table_rtx); ++ ++ seq = get_insns (); ++ end_sequence (); ++ ++ /* We used to simply emit the sequence after entry_of_function. ++ However this breaks the CFG if the first instruction in the ++ first block is not the NOTE_INSN_BASIC_BLOCK, for example a ++ label. Emit the sequence properly on the edge. We are only ++ invoked from dw2_build_landing_pads and finish_eh_generation ++ will call commit_edge_insertions thanks to a kludge. */ ++ insert_insn_on_edge (seq, ++ single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))); ++ ++ cfun->machine->gp_save_rtx = m; ++ } ++ ++ return m; ++} ++ ++static void ++sw_64_instantiate_decls (void) ++{ ++ if (cfun->machine->gp_save_rtx != NULL_RTX) ++ instantiate_decl_rtl (cfun->machine->gp_save_rtx); ++} ++ ++static int ++sw_64_ra_ever_killed (void) ++{ ++ rtx_insn *top; ++ ++ if (!has_hard_reg_initial_val (Pmode, REG_RA)) ++ return (int) df_regs_ever_live_p (REG_RA); ++ ++ push_topmost_sequence (); ++ top = get_insns (); ++ pop_topmost_sequence (); ++ ++ return reg_set_between_p (gen_rtx_REG (Pmode, REG_RA), top, NULL); ++} ++ ++/* Return the trap mode suffix applicable to the current ++ instruction, or NULL. */ ++ ++static const char * ++get_trap_mode_suffix (void) ++{ ++ enum attr_trap_suffix s = get_attr_trap_suffix (current_output_insn); ++ ++ switch (s) ++ { ++ case TRAP_SUFFIX_NONE: ++ return NULL; ++ ++ case TRAP_SUFFIX_SU: ++ if (sw_64_fptm >= SW_64_FPTM_SU) ++ return "su"; ++ return NULL; ++ ++ case TRAP_SUFFIX_SUI: ++ if (sw_64_fptm >= SW_64_FPTM_SUI) ++ return "sui"; ++ return NULL; ++ ++ case TRAP_SUFFIX_V_SV: ++ switch (sw_64_fptm) ++ { ++ case SW_64_FPTM_N: ++ return NULL; ++ case SW_64_FPTM_U: ++ return "v"; ++ case SW_64_FPTM_SU: ++ case SW_64_FPTM_SUI: ++ return "sv"; ++ default: ++ gcc_unreachable (); ++ } ++ ++ case TRAP_SUFFIX_V_SV_SVI: ++ switch (sw_64_fptm) ++ { ++ case SW_64_FPTM_N: ++ return NULL; ++ case SW_64_FPTM_U: ++ return "v"; ++ case SW_64_FPTM_SU: ++ return "sv"; ++ case SW_64_FPTM_SUI: ++ return "svi"; ++ default: ++ gcc_unreachable (); ++ } ++ break; ++ ++ case TRAP_SUFFIX_U_SU_SUI: ++ switch (sw_64_fptm) ++ { ++ case SW_64_FPTM_N: ++ return NULL; ++ case SW_64_FPTM_U: ++ return "u"; ++ case SW_64_FPTM_SU: ++ return "su"; ++ case SW_64_FPTM_SUI: ++ return "sui"; ++ default: ++ gcc_unreachable (); ++ } ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ gcc_unreachable (); ++} ++ ++/* Return the rounding mode suffix applicable to the current ++ instruction, or NULL. */ ++ ++static const char * ++get_round_mode_suffix (void) ++{ ++ enum attr_round_suffix s = get_attr_round_suffix (current_output_insn); ++ ++ switch (s) ++ { ++ case ROUND_SUFFIX_NONE: ++ return NULL; ++ case ROUND_SUFFIX_NORMAL: ++ switch (sw_64_fprm) ++ { ++ case SW_64_FPRM_NORM: ++ return NULL; ++ case SW_64_FPRM_MINF: ++ return "m"; ++ case SW_64_FPRM_CHOP: ++ return "c"; ++ case SW_64_FPRM_DYN: ++ return "d"; ++ default: ++ gcc_unreachable (); ++ } ++ break; ++ ++ case ROUND_SUFFIX_C: ++ return "c"; ++ ++ default: ++ gcc_unreachable (); ++ } ++ gcc_unreachable (); ++} ++ ++/* Implement TARGET_PRINT_OPERAND_PUNCT_VALID_P. */ ++ ++static bool ++sw_64_print_operand_punct_valid_p (unsigned char code) ++{ ++ return (code == '/' || code == ',' || code == '-' || code == '~' ++ || code == '#' || code == '*' || code == '&'); ++} ++ ++/* Implement TARGET_PRINT_OPERAND. The sw_64-specific ++ operand codes are documented below. */ ++ ++static const char * ++get_round_mode_suffix_sw (void) ++{ ++ enum attr_round_suffix s = get_attr_round_suffix (current_output_insn); ++ ++ switch (s) ++ { ++ case ROUND_SUFFIX_NONE: ++ return NULL; ++ case ROUND_SUFFIX_NORMAL: ++ switch (sw_64_fprm) ++ { ++ case SW_64_FPRM_NORM: ++ return "_g"; ++ case SW_64_FPRM_MINF: ++ return "_p"; ++ case SW_64_FPRM_CHOP: ++ return "_z"; ++ case SW_64_FPRM_DYN: ++ return "_n"; ++ default: ++ gcc_unreachable (); ++ } ++ break; ++ ++ case ROUND_SUFFIX_C: ++ return "_z"; ++ ++ default: ++ gcc_unreachable (); ++ } ++ gcc_unreachable (); ++} ++static void ++sw_64_print_operand (FILE *file, rtx x, int code) ++{ ++ int i; ++ ++ switch (code) ++ { ++ case '~': ++ /* Print the assembler name of the current function. */ ++ assemble_name (file, sw_64_fnname); ++ break; ++ ++ case '&': ++ if (const char *name = get_some_local_dynamic_name ()) ++ assemble_name (file, name); ++ else ++ output_operand_lossage ("'%%&' used without any " ++ "local dynamic TLS references"); ++ break; ++ ++ case '/': ++ /* Generates the instruction suffix. The TRAP_SUFFIX and ROUND_SUFFIX ++ attributes are examined to determine what is appropriate. */ ++ { ++ const char *trap = get_trap_mode_suffix (); ++ const char *round = get_round_mode_suffix (); ++ ++ break; ++ } ++ ++ case 'T': { ++ const char *round_sw = get_round_mode_suffix_sw (); ++ ++ if (round_sw) ++ fprintf (file, "%s", (round_sw ? round_sw : "")); ++ break; ++ } ++ case ',': ++ /* Generates single precision suffix for floating point ++ instructions (s for IEEE, f for VAX). */ ++ fputc ((TARGET_FLOAT_VAX ? 'f' : 's'), file); ++ break; ++ ++ case '-': ++ /* Generates double precision suffix for floating point ++ instructions (t for IEEE, g for VAX). */ ++ fputc ((TARGET_FLOAT_VAX ? 'g' : 'd'), file); ++ break; ++ ++ case '#': ++ if (sw_64_this_literal_sequence_number == 0) ++ sw_64_this_literal_sequence_number = sw_64_next_sequence_number++; ++ fprintf (file, "%d", sw_64_this_literal_sequence_number); ++ break; ++ ++ case '*': ++ if (sw_64_this_gpdisp_sequence_number == 0) ++ sw_64_this_gpdisp_sequence_number = sw_64_next_sequence_number++; ++ fprintf (file, "%d", sw_64_this_gpdisp_sequence_number); ++ break; ++ ++ case 'J': { ++ const char *lituse; ++ ++ if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSGD_CALL) ++ { ++ x = XVECEXP (x, 0, 0); ++ lituse = "lituse_tlsgd"; ++ } ++ else if (GET_CODE (x) == UNSPEC && XINT (x, 1) == UNSPEC_TLSLDM_CALL) ++ { ++ x = XVECEXP (x, 0, 0); ++ lituse = "lituse_tlsldm"; ++ } ++ else if (CONST_INT_P (x)) ++ lituse = "lituse_jsr"; ++ else ++ { ++ output_operand_lossage ("invalid %%J value"); ++ break; ++ } ++ ++ if (x != const0_rtx) ++ fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x)); ++ } ++ break; ++ ++ case 'j': { ++ const char *lituse; ++ ++#ifdef HAVE_AS_JSRDIRECT_RELOCS ++ lituse = "lituse_jsrdirect"; ++#else ++ lituse = "lituse_jsr"; ++#endif ++ ++ gcc_assert (INTVAL (x) != 0); ++ fprintf (file, "\t\t!%s!%d", lituse, (int) INTVAL (x)); ++ } ++ break; ++ case 'r': ++ /* If this operand is the constant zero, write it as "$31". */ ++ if (REG_P (x)) ++ fprintf (file, "%s", reg_names[REGNO (x)]); ++ else if (x == CONST0_RTX (GET_MODE (x))) ++ fprintf (file, "$31"); ++ else ++ output_operand_lossage ("invalid %%r value"); ++ break; ++ ++ case 'R': ++ /* Similar, but for floating-point. */ ++ if (REG_P (x)) ++ fprintf (file, "%s", reg_names[REGNO (x)]); ++ else if (x == CONST0_RTX (GET_MODE (x))) ++ fprintf (file, "$f31"); ++ else ++ output_operand_lossage ("invalid %%R value"); ++ break; ++ ++ case 'N': ++ /* Write the 1's complement of a constant. */ ++ if (!CONST_INT_P (x)) ++ output_operand_lossage ("invalid %%N value"); ++ ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~INTVAL (x)); ++ break; ++ ++ case 'P': ++ /* Write 1 << C, for a constant C. */ ++ if (!CONST_INT_P (x)) ++ output_operand_lossage ("invalid %%P value"); ++ ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, HOST_WIDE_INT_1 << INTVAL (x)); ++ break; ++ ++ case 'h': ++ /* Write the high-order 16 bits of a constant, sign-extended. */ ++ if (!CONST_INT_P (x)) ++ output_operand_lossage ("invalid %%h value"); ++ ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) >> 16); ++ break; ++ ++ case 'L': ++ /* Write the low-order 16 bits of a constant, sign-extended. */ ++ if (!CONST_INT_P (x)) ++ output_operand_lossage ("invalid %%L value"); ++ ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, ++ (INTVAL (x) & 0xffff) - 2 * (INTVAL (x) & 0x8000)); ++ break; ++ ++ case 'm': ++ /* Write mask for ZAP insn. */ ++ if (CONST_INT_P (x)) ++ { ++ HOST_WIDE_INT mask = 0, value = INTVAL (x); ++ ++ for (i = 0; i < 8; i++, value >>= 8) ++ if (value & 0xff) ++ mask |= (1 << i); ++ ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, mask); ++ } ++ else ++ output_operand_lossage ("invalid %%m value"); ++ break; ++ ++ case 'M': ++ /* 'b', 'w', 'l', or 'q' as the value of the constant. */ ++ if (!mode_width_operand (x, VOIDmode)) ++ output_operand_lossage ("invalid %%M value"); ++ ++ fprintf (file, "%s", ++ (INTVAL (x) == 8 ? "b" ++ : INTVAL (x) == 16 ? "w" ++ : INTVAL (x) == 32 ? "l" ++ : "q")); ++ break; ++ ++ case 'U': ++ /* Similar, except do it from the mask. */ ++ if (CONST_INT_P (x)) ++ { ++ HOST_WIDE_INT value = INTVAL (x); ++ ++ if (value == 0xff) ++ { ++ fputc ('b', file); ++ break; ++ } ++ if (value == 0xffff) ++ { ++ fputc ('w', file); ++ break; ++ } ++ if (value == 0xffffffff) ++ { ++ fputc ('l', file); ++ break; ++ } ++ if (value == -1) ++ { ++ fputc ('q', file); ++ break; ++ } ++ } ++ /* Write "_a" for AUTO_INC_DEC access. */ ++ if (MEM_P (x) ++ && (GET_CODE (XEXP (x, 0)) == POST_INC ++ || GET_CODE (XEXP (x, 0)) == POST_DEC ++ || GET_CODE (XEXP (x, 0)) == POST_MODIFY)) ++ { ++ fprintf (file, "_a"); ++ break; ++ } ++ break; ++ ++ case 's': ++ /* Write the constant value divided by 8. */ ++ if (!CONST_INT_P (x) || (unsigned HOST_WIDE_INT) INTVAL (x) >= 64 ++ || (INTVAL (x) & 7) != 0) ++ output_operand_lossage ("invalid %%s value"); ++ ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) / 8); ++ break; ++ ++ case 'C': ++ case 'D': ++ case 'c': ++ case 'd': ++ /* Write out comparison name. */ ++ { ++ enum rtx_code c = GET_CODE (x); ++ ++ if (!COMPARISON_P (x)) ++ output_operand_lossage ("invalid %%C value"); ++ ++ else if (code == 'D') ++ c = reverse_condition (c); ++ else if (code == 'c') ++ c = swap_condition (c); ++ else if (code == 'd') ++ c = swap_condition (reverse_condition (c)); ++ ++ if (c == LEU) ++ fprintf (file, "ule"); ++ else if (c == LTU) ++ fprintf (file, "ult"); ++ else if (c == UNORDERED) ++ fprintf (file, "un"); ++ else ++ fprintf (file, "%s", GET_RTX_NAME (c)); ++ } ++ break; ++ ++ case 'E': ++ /* Write the divide or modulus operator. */ ++ switch (GET_CODE (x)) ++ { ++ case DIV: ++ fprintf (file, "div%s", GET_MODE (x) == SImode ? "w" : "l"); ++ break; ++ case UDIV: ++ fprintf (file, "div%su", GET_MODE (x) == SImode ? "w" : "l"); ++ break; ++ case MOD: ++ fprintf (file, "rem%s", GET_MODE (x) == SImode ? "w" : "l"); ++ break; ++ case UMOD: ++ fprintf (file, "rem%su", GET_MODE (x) == SImode ? "w" : "l"); ++ break; ++ default: ++ output_operand_lossage ("invalid %%E value"); ++ break; ++ } ++ break; ++ ++ case 'A': ++ /* Write "_u" for unaligned access. */ ++ if (MEM_P (x) && GET_CODE (XEXP (x, 0)) == AND) ++ fprintf (file, "_u"); ++ break; ++ ++ case 0: ++ if (REG_P (x)) ++ fprintf (file, "%s", reg_names[REGNO (x)]); ++ else if (MEM_P (x)) ++ { ++ if (GET_CODE (XEXP (x, 0)) == POST_INC) ++ fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)), ++ reg_names[REGNO (XEXP (XEXP (x, 0), 0))]); ++ else if (GET_CODE (XEXP (x, 0)) == POST_DEC) ++ fprintf (file, "%d(%s)", -GET_MODE_SIZE (GET_MODE (x)), ++ reg_names[REGNO (XEXP (XEXP (x, 0), 0))]); ++ else if (GET_CODE (XEXP (x, 0)) == POST_MODIFY) ++ output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1)); ++ else ++ output_address (GET_MODE (x), XEXP (x, 0)); ++ } ++ else if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == UNSPEC) ++ { ++ switch (XINT (XEXP (x, 0), 1)) ++ { ++ case UNSPEC_DTPREL: ++ case UNSPEC_TPREL: ++ output_addr_const (file, XVECEXP (XEXP (x, 0), 0, 0)); ++ break; ++ default: ++ output_operand_lossage ("unknown relocation unspec"); ++ break; ++ } ++ } ++ else ++ output_addr_const (file, x); ++ break; ++ ++ default: ++ output_operand_lossage ("invalid %%xn code"); ++ } ++} ++ ++/* Implement TARGET_PRINT_OPERAND_ADDRESS. */ ++ ++static void ++sw_64_print_operand_address (FILE *file, machine_mode /*mode*/, rtx addr) ++{ ++ int basereg = 31; ++ HOST_WIDE_INT offset = 0; ++ ++ if (GET_CODE (addr) == AND) ++ addr = XEXP (addr, 0); ++ ++ if (GET_CODE (addr) == PLUS && CONST_INT_P (XEXP (addr, 1))) ++ { ++ offset = INTVAL (XEXP (addr, 1)); ++ addr = XEXP (addr, 0); ++ } ++ ++ if (GET_CODE (addr) == LO_SUM) ++ { ++ const char *reloc16, *reloclo; ++ rtx op1 = XEXP (addr, 1); ++ ++ if (GET_CODE (op1) == CONST && GET_CODE (XEXP (op1, 0)) == UNSPEC) ++ { ++ op1 = XEXP (op1, 0); ++ switch (XINT (op1, 1)) ++ { ++ case UNSPEC_DTPREL: ++ reloc16 = NULL; ++ reloclo = (sw_64_tls_size == 16 ? "dtprel" : "dtprello"); ++ break; ++ case UNSPEC_TPREL: ++ reloc16 = NULL; ++ reloclo = (sw_64_tls_size == 16 ? "tprel" : "tprello"); ++ break; ++ default: ++ output_operand_lossage ("unknown relocation unspec"); ++ return; ++ } ++ ++ output_addr_const (file, XVECEXP (op1, 0, 0)); ++ } ++ else ++ { ++ reloc16 = "gprel"; ++ reloclo = "gprellow"; ++ output_addr_const (file, op1); ++ } ++ ++ if (offset) ++ fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset); ++ ++ addr = XEXP (addr, 0); ++ switch (GET_CODE (addr)) ++ { ++ case REG: ++ basereg = REGNO (addr); ++ break; ++ ++ case SUBREG: ++ basereg = subreg_regno (addr); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ fprintf (file, "($%d)\t\t!%s", basereg, ++ (basereg == 29 ? reloc16 : reloclo)); ++ return; ++ } ++ ++ switch (GET_CODE (addr)) ++ { ++ case REG: ++ basereg = REGNO (addr); ++ break; ++ ++ case SUBREG: ++ basereg = subreg_regno (addr); ++ break; ++ ++ case CONST_INT: ++ offset = INTVAL (addr); ++ break; ++ ++ case SYMBOL_REF: ++ gcc_assert (this_is_asm_operands); ++ fprintf (file, "%s", XSTR (addr, 0)); ++ return; ++ ++ case CONST: ++ gcc_assert (this_is_asm_operands); ++ gcc_assert (GET_CODE (XEXP (addr, 0)) == PLUS ++ && GET_CODE (XEXP (XEXP (addr, 0), 0)) == SYMBOL_REF); ++ fprintf (file, "%s+" HOST_WIDE_INT_PRINT_DEC, ++ XSTR (XEXP (XEXP (addr, 0), 0), 0), ++ INTVAL (XEXP (XEXP (addr, 0), 1))); ++ return; ++ ++ default: ++ output_operand_lossage ("invalid operand address"); ++ return; ++ } ++ ++ fprintf (file, HOST_WIDE_INT_PRINT_DEC "($%d)", offset, basereg); ++} ++ ++/* Emit RTL insns to initialize the variable parts of a trampoline at ++ M_TRAMP. FNDECL is target function's decl. CHAIN_VALUE is an rtx ++ for the static chain value for the function. */ ++ ++static void ++sw_64_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value) ++{ ++ rtx fnaddr, mem, word1, word2; ++ ++ fnaddr = XEXP (DECL_RTL (fndecl), 0); ++ ++#ifdef POINTERS_EXTEND_UNSIGNED ++ fnaddr = convert_memory_address (Pmode, fnaddr); ++ chain_value = convert_memory_address (Pmode, chain_value); ++#endif ++ ++ /* These 4 instructions are: ++ ldq $1,24($27) ++ ldq $27,16($27) ++ jmp $31,($27),0 ++ nop ++ We don't bother setting the HINT field of the jump; the nop ++ is merely there for padding. */ ++ word1 = GEN_INT (HOST_WIDE_INT_C (0x8f7b00108c3b0018)); ++ word2 = GEN_INT (HOST_WIDE_INT_C (0x43ff075f0ffb0000)); ++ ++ /* Store the first two words, as computed above. */ ++ mem = adjust_address (m_tramp, DImode, 0); ++ emit_move_insn (mem, word1); ++ mem = adjust_address (m_tramp, DImode, 8); ++ emit_move_insn (mem, word2); ++ ++ /* Store function address and static chain value. */ ++ mem = adjust_address (m_tramp, Pmode, 16); ++ emit_move_insn (mem, fnaddr); ++ mem = adjust_address (m_tramp, Pmode, 24); ++ emit_move_insn (mem, chain_value); ++ ++ emit_insn (gen_imb ()); ++#ifdef HAVE_ENABLE_EXECUTE_STACK ++ emit_library_call (init_one_libfunc ("__enable_execute_stack"), ++ LCT_NORMAL, VOIDmode, XEXP (m_tramp, 0), Pmode); ++#endif ++} ++ ++/* Determine where to put an argument to a function. ++ Value is zero to push the argument on the stack, ++ or a hard register in which to store the argument. ++ ++ CUM is a variable of type CUMULATIVE_ARGS which gives info about ++ the preceding args and about the function being called. ++ ++ ARG is a description of the argument. ++ On Sw_64 the first 6 words of args are normally in registers ++ and the rest are pushed. */ ++ ++static rtx ++sw_64_function_arg (cumulative_args_t cum_v, const function_arg_info &arg) ++{ ++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); ++ int basereg; ++ int num_args; ++ ++ /* Don't get confused and pass small structures in FP registers. */ ++ if (arg.aggregate_type_p ()) ++ basereg = 16; ++ else ++ { ++ /* With sw_64_split_complex_arg, we shouldn't see any raw complex ++ values here. */ ++ gcc_checking_assert (!COMPLEX_MODE_P (arg.mode)); ++ ++ /* Set up defaults for FP operands passed in FP registers, and ++ integral operands passed in integer registers. */ ++ if (TARGET_FPREGS && GET_MODE_CLASS (arg.mode) == MODE_FLOAT) ++ basereg = 32 + 16; ++ else ++ basereg = 16; ++ } ++ ++ /* ??? Irritatingly, the definition of CUMULATIVE_ARGS is different for ++ the two platforms, so we can't avoid conditional compilation. */ ++ { ++ if (*cum >= 6) ++ return NULL_RTX; ++ num_args = *cum; ++ ++ if (arg.end_marker_p ()) ++ basereg = 16; ++ else if (targetm.calls.must_pass_in_stack (arg)) ++ return NULL_RTX; ++ } ++ ++ return gen_rtx_REG (arg.mode, num_args + basereg); ++} ++ ++/* Update the data in CUM to advance over an argument ARG. */ ++ ++static void ++sw_64_function_arg_advance (cumulative_args_t cum_v, ++ const function_arg_info &arg) ++{ ++ CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v); ++ bool onstack = targetm.calls.must_pass_in_stack (arg); ++ int increment = onstack ? 6 : SW_64_ARG_SIZE (arg.mode, arg.type); ++ ++ *cum += increment; ++} ++ ++static int ++sw_64_arg_partial_bytes (cumulative_args_t cum_v, const function_arg_info &arg) ++{ ++ int words = 0; ++ CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED = get_cumulative_args (cum_v); ++ ++ if (*cum < 6 && 6 < *cum + SW_64_ARG_SIZE (arg.mode, arg.type)) ++ words = 6 - *cum; ++ ++ return words * UNITS_PER_WORD; ++} ++ ++/* Return true if ARG must be returned in memory, instead of in registers. */ ++ ++static bool ++sw_64_return_in_memory (const_tree type, const_tree fndecl ATTRIBUTE_UNUSED) ++{ ++ machine_mode mode = VOIDmode; ++ int size; ++ ++ if (type) ++ { ++ mode = TYPE_MODE (type); ++ ++ if (AGGREGATE_TYPE_P (type)) ++ return true; ++ } ++ ++ size = GET_MODE_SIZE (mode); ++ switch (GET_MODE_CLASS (mode)) ++ { ++ case MODE_VECTOR_FLOAT: ++ /* Pass all float vectors in memory, like an aggregate. */ ++ return true; ++ ++ case MODE_COMPLEX_FLOAT: ++ /* We judge complex floats on the size of their element, ++ not the size of the whole type. */ ++ size = GET_MODE_UNIT_SIZE (mode); ++ break; ++ ++ case MODE_INT: ++ case MODE_FLOAT: ++ case MODE_COMPLEX_INT: ++ case MODE_VECTOR_INT: ++ break; ++ ++ default: ++ /* ??? We get called on all sorts of random stuff from ++ aggregate_value_p. We must return something, but it's not ++ clear what's safe to return. Pretend it's a struct I ++ guess. */ ++ return true; ++ } ++ ++ /* Otherwise types must fit in one register. */ ++ return size > UNITS_PER_WORD; ++} ++ ++/* Return true if TYPE should be passed by invisible reference. */ ++ ++static bool ++sw_64_pass_by_reference (cumulative_args_t, const function_arg_info &arg) ++{ ++ /* Pass float and _Complex float variable arguments by reference. ++ This avoids 64-bit store from a FP register to a pretend args save area ++ and subsequent 32-bit load from the saved location to a FP register. ++ ++ Note that 32-bit loads and stores to/from a FP register on sw_64 reorder ++ bits to form a canonical 64-bit value in the FP register. This fact ++ invalidates compiler assumption that 32-bit FP value lives in the lower ++ 32-bits of the passed 64-bit FP value, so loading the 32-bit value from ++ the stored 64-bit location using 32-bit FP load is invalid on sw_64. ++ ++ This introduces sort of ABI incompatibility, but until _Float32 was ++ introduced, C-family languages promoted 32-bit float variable arg to ++ a 64-bit double, and it was not allowed to pass float as a varible ++ argument. Passing _Complex float as a variable argument never ++ worked on sw_64. Thus, we have no backward compatibility issues ++ to worry about, and passing unpromoted _Float32 and _Complex float ++ as a variable argument will actually work in the future. */ ++ ++ if (arg.mode == SFmode || arg.mode == SCmode) ++ return !arg.named; ++ ++ return arg.mode == TFmode || arg.mode == TCmode; ++} ++ ++/* Define how to find the value returned by a function. VALTYPE is the ++ data type of the value (as a tree). If the precise function being ++ called is known, FUNC is its FUNCTION_DECL; otherwise, FUNC is 0. ++ MODE is set instead of VALTYPE for libcalls. ++ ++ On Sw_64 the value is found in $0 for integer functions and ++ $f0 for floating-point functions. */ ++ ++static rtx ++sw_64_function_value_1 (const_tree valtype, const_tree func ATTRIBUTE_UNUSED, ++ machine_mode mode) ++{ ++ unsigned int regnum, dummy ATTRIBUTE_UNUSED; ++ enum mode_class mclass; ++ ++ gcc_assert (!valtype || !sw_64_return_in_memory (valtype, func)); ++ ++ if (valtype) ++ mode = TYPE_MODE (valtype); ++ ++ mclass = GET_MODE_CLASS (mode); ++ switch (mclass) ++ { ++ case MODE_INT: ++ /* Do the same thing as PROMOTE_MODE except for libcalls, ++ where we have them returning both SImode and DImode. */ ++ PROMOTE_MODE (mode, dummy, valtype); ++ /* FALLTHRU */ ++ ++ case MODE_COMPLEX_INT: ++ case MODE_VECTOR_INT: ++ regnum = 0; ++ break; ++ ++ case MODE_FLOAT: ++ regnum = 32; ++ break; ++ ++ case MODE_COMPLEX_FLOAT: { ++ machine_mode cmode = GET_MODE_INNER (mode); ++ ++ return gen_rtx_PARALLEL ( ++ VOIDmode, ++ gen_rtvec (2, ++ gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 32), ++ const0_rtx), ++ gen_rtx_EXPR_LIST (VOIDmode, gen_rtx_REG (cmode, 33), ++ GEN_INT (GET_MODE_SIZE (cmode))))); ++ } ++ ++ case MODE_RANDOM: ++ default: ++ gcc_unreachable (); ++ } ++ ++ return gen_rtx_REG (mode, regnum); ++} ++ ++/* Implement TARGET_FUNCTION_VALUE. */ ++ ++static rtx ++sw_64_function_value (const_tree valtype, const_tree fn_decl_or_type, ++ bool /*outgoing*/) ++{ ++ return sw_64_function_value_1 (valtype, fn_decl_or_type, VOIDmode); ++} ++ ++/* Implement TARGET_LIBCALL_VALUE. */ ++ ++static rtx ++sw_64_libcall_value (machine_mode mode, const_rtx /*fun*/) ++{ ++ return sw_64_function_value_1 (NULL_TREE, NULL_TREE, mode); ++} ++ ++/* Implement TARGET_FUNCTION_VALUE_REGNO_P. ++ ++ On the Sw_64, $0 $1 and $f0 $f1 are the only register thus used. */ ++ ++static bool ++sw_64_function_value_regno_p (const unsigned int regno) ++{ ++ return (regno == 0 || regno == 1 || regno == 32 || regno == 33); ++} ++ ++/* TCmode complex values are passed by invisible reference. We ++ should not split these values. */ ++ ++static bool ++sw_64_split_complex_arg (const_tree type) ++{ ++ return TYPE_MODE (type) != TCmode; ++} ++ ++static tree ++sw_64_build_builtin_va_list (void) ++{ ++ tree base, ofs, space, record, type_decl; ++ ++ record = (*lang_hooks.types.make_type) (RECORD_TYPE); ++ type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL, ++ get_identifier ("__va_list_tag"), record); ++ TYPE_STUB_DECL (record) = type_decl; ++ TYPE_NAME (record) = type_decl; ++ ++ /* C++? SET_IS_AGGR_TYPE (record, 1); */ ++ ++ /* Dummy field to prevent alignment warnings. */ ++ space ++ = build_decl (BUILTINS_LOCATION, FIELD_DECL, NULL_TREE, integer_type_node); ++ DECL_FIELD_CONTEXT (space) = record; ++ DECL_ARTIFICIAL (space) = 1; ++ DECL_IGNORED_P (space) = 1; ++ ++ ofs = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("__offset"), ++ integer_type_node); ++ DECL_FIELD_CONTEXT (ofs) = record; ++ DECL_CHAIN (ofs) = space; ++ ++ base = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("__base"), ++ ptr_type_node); ++ DECL_FIELD_CONTEXT (base) = record; ++ DECL_CHAIN (base) = ofs; ++ ++ TYPE_FIELDS (record) = base; ++ layout_type (record); ++ ++ va_list_gpr_counter_field = ofs; ++ return record; ++} ++ ++/* Helper function for sw_64_stdarg_optimize_hook. Skip over casts ++ and constant additions. */ ++ ++static gimple * ++va_list_skip_additions (tree lhs) ++{ ++ gimple *stmt; ++ ++ for (;;) ++ { ++ enum tree_code code; ++ ++ stmt = SSA_NAME_DEF_STMT (lhs); ++ ++ if (gimple_code (stmt) == GIMPLE_PHI) ++ return stmt; ++ ++ if (!is_gimple_assign (stmt) || gimple_assign_lhs (stmt) != lhs) ++ return NULL; ++ ++ if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME) ++ return stmt; ++ code = gimple_assign_rhs_code (stmt); ++ if (!CONVERT_EXPR_CODE_P (code) ++ && ((code != PLUS_EXPR && code != POINTER_PLUS_EXPR) ++ || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST ++ || !tree_fits_uhwi_p (gimple_assign_rhs2 (stmt)))) ++ return stmt; ++ ++ lhs = gimple_assign_rhs1 (stmt); ++ } ++} ++ ++/* Check if LHS = RHS statement is ++ LHS = *(ap.__base + ap.__offset + cst) ++ or ++ LHS = *(ap.__base ++ + ((ap.__offset + cst <= 47) ++ ? ap.__offset + cst - 48 : ap.__offset + cst) + cst2). ++ If the former, indicate that GPR registers are needed, ++ if the latter, indicate that FPR registers are needed. ++ ++ Also look for LHS = (*ptr).field, where ptr is one of the forms ++ listed above. ++ ++ On sw_64, cfun->va_list_gpr_size is used as size of the needed ++ regs and cfun->va_list_fpr_size is a bitmask, bit 0 set if GPR ++ registers are needed and bit 1 set if FPR registers are needed. ++ Return true if va_list references should not be scanned for the ++ current statement. */ ++ ++static bool ++sw_64_stdarg_optimize_hook (struct stdarg_info *si, const gimple *stmt) ++{ ++ tree base, offset, rhs; ++ int offset_arg = 1; ++ gimple *base_stmt; ++ ++ if (get_gimple_rhs_class (gimple_assign_rhs_code (stmt)) != GIMPLE_SINGLE_RHS) ++ return false; ++ ++ rhs = gimple_assign_rhs1 (stmt); ++ while (handled_component_p (rhs)) ++ rhs = TREE_OPERAND (rhs, 0); ++ if (TREE_CODE (rhs) != MEM_REF ++ || TREE_CODE (TREE_OPERAND (rhs, 0)) != SSA_NAME) ++ return false; ++ ++ stmt = va_list_skip_additions (TREE_OPERAND (rhs, 0)); ++ if (stmt == NULL || !is_gimple_assign (stmt) ++ || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR) ++ return false; ++ ++ base = gimple_assign_rhs1 (stmt); ++ if (TREE_CODE (base) == SSA_NAME) ++ { ++ base_stmt = va_list_skip_additions (base); ++ if (base_stmt && is_gimple_assign (base_stmt) ++ && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF) ++ base = gimple_assign_rhs1 (base_stmt); ++ } ++ ++ if (TREE_CODE (base) != COMPONENT_REF ++ || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node)) ++ { ++ base = gimple_assign_rhs2 (stmt); ++ if (TREE_CODE (base) == SSA_NAME) ++ { ++ base_stmt = va_list_skip_additions (base); ++ if (base_stmt && is_gimple_assign (base_stmt) ++ && gimple_assign_rhs_code (base_stmt) == COMPONENT_REF) ++ base = gimple_assign_rhs1 (base_stmt); ++ } ++ ++ if (TREE_CODE (base) != COMPONENT_REF ++ || TREE_OPERAND (base, 1) != TYPE_FIELDS (va_list_type_node)) ++ return false; ++ ++ offset_arg = 0; ++ } ++ ++ base = get_base_address (base); ++ if (TREE_CODE (base) != VAR_DECL ++ || !bitmap_bit_p (si->va_list_vars, DECL_UID (base) + num_ssa_names)) ++ return false; ++ ++ offset = gimple_op (stmt, 1 + offset_arg); ++ if (TREE_CODE (offset) == SSA_NAME) ++ { ++ gimple *offset_stmt = va_list_skip_additions (offset); ++ ++ if (offset_stmt && gimple_code (offset_stmt) == GIMPLE_PHI) ++ { ++ HOST_WIDE_INT sub; ++ gimple *arg1_stmt, *arg2_stmt; ++ tree arg1, arg2; ++ enum tree_code code1, code2; ++ ++ if (gimple_phi_num_args (offset_stmt) != 2) ++ goto escapes; ++ ++ arg1_stmt ++ = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 0)); ++ arg2_stmt ++ = va_list_skip_additions (gimple_phi_arg_def (offset_stmt, 1)); ++ if (arg1_stmt == NULL || !is_gimple_assign (arg1_stmt) ++ || arg2_stmt == NULL || !is_gimple_assign (arg2_stmt)) ++ goto escapes; ++ ++ code1 = gimple_assign_rhs_code (arg1_stmt); ++ code2 = gimple_assign_rhs_code (arg2_stmt); ++ if (code1 == COMPONENT_REF ++ && (code2 == MINUS_EXPR || code2 == PLUS_EXPR)) ++ /* Do nothing. */; ++ else if (code2 == COMPONENT_REF ++ && (code1 == MINUS_EXPR || code1 == PLUS_EXPR)) ++ { ++ std::swap (arg1_stmt, arg2_stmt); ++ code2 = code1; ++ } ++ else ++ goto escapes; ++ ++ if (!tree_fits_shwi_p (gimple_assign_rhs2 (arg2_stmt))) ++ goto escapes; ++ ++ sub = tree_to_shwi (gimple_assign_rhs2 (arg2_stmt)); ++ if (code2 == MINUS_EXPR) ++ sub = -sub; ++ if (sub < -48 || sub > -32) ++ goto escapes; ++ ++ arg1 = gimple_assign_rhs1 (arg1_stmt); ++ arg2 = gimple_assign_rhs1 (arg2_stmt); ++ if (TREE_CODE (arg2) == SSA_NAME) ++ { ++ arg2_stmt = va_list_skip_additions (arg2); ++ if (arg2_stmt == NULL || !is_gimple_assign (arg2_stmt) ++ || gimple_assign_rhs_code (arg2_stmt) != COMPONENT_REF) ++ goto escapes; ++ arg2 = gimple_assign_rhs1 (arg2_stmt); ++ } ++ if (arg1 != arg2) ++ goto escapes; ++ ++ if (TREE_CODE (arg1) != COMPONENT_REF ++ || TREE_OPERAND (arg1, 1) != va_list_gpr_counter_field ++ || get_base_address (arg1) != base) ++ goto escapes; ++ ++ /* Need floating point regs. */ ++ cfun->va_list_fpr_size |= 2; ++ return false; ++ } ++ if (offset_stmt && is_gimple_assign (offset_stmt) ++ && gimple_assign_rhs_code (offset_stmt) == COMPONENT_REF) ++ offset = gimple_assign_rhs1 (offset_stmt); ++ } ++ if (TREE_CODE (offset) != COMPONENT_REF ++ || TREE_OPERAND (offset, 1) != va_list_gpr_counter_field ++ || get_base_address (offset) != base) ++ goto escapes; ++ else ++ /* Need general regs. */ ++ cfun->va_list_fpr_size |= 1; ++ return false; ++ ++escapes: ++ si->va_list_escapes = true; ++ return false; ++} ++ ++/* Perform any needed actions needed for a function that is receiving a ++ variable number of arguments. */ ++ ++static void ++sw_64_setup_incoming_varargs (cumulative_args_t pcum, ++ const function_arg_info &arg, int *pretend_size, ++ int no_rtl) ++{ ++ CUMULATIVE_ARGS cum = *get_cumulative_args (pcum); ++ ++ /* Skip the current argument. */ ++ targetm.calls.function_arg_advance (pack_cumulative_args (&cum), arg); ++ ++ /* On SYSV and friends, we allocate space for all 12 arg registers, but ++ only push those that are remaining. However, if NO registers need to ++ be saved, don't allocate any space. This is not only because we won't ++ need the space, but because AP includes the current_pretend_args_size ++ and we don't want to mess up any ap-relative addresses already made. ++ ++ If we are not to use the floating-point registers, save the integer ++ registers where we would put the floating-point registers. This is ++ not the most efficient way to implement varargs with just one register ++ class, but it isn't worth doing anything more efficient in this rare ++ case. */ ++ if (cum >= 6) ++ return; ++ ++ if (!no_rtl) ++ { ++ int count; ++ alias_set_type set = get_varargs_alias_set (); ++ rtx tmp; ++ ++ count = cfun->va_list_gpr_size / UNITS_PER_WORD; ++ if (count > 6 - cum) ++ count = 6 - cum; ++ ++ /* Detect whether integer registers or floating-point registers ++ are needed by the detected va_arg statements. See above for ++ how these values are computed. Note that the "escape" value ++ is VA_LIST_MAX_FPR_SIZE, which is 255, which has both of ++ these bits set. */ ++ gcc_assert ((VA_LIST_MAX_FPR_SIZE & 3) == 3); ++ ++ if (cfun->va_list_fpr_size & 1) ++ { ++ tmp = gen_rtx_MEM (BLKmode, ++ plus_constant (Pmode, virtual_incoming_args_rtx, ++ (cum + 6) * UNITS_PER_WORD)); ++ MEM_NOTRAP_P (tmp) = 1; ++ set_mem_alias_set (tmp, set); ++ move_block_from_reg (16 + cum, tmp, count); ++ } ++ ++ if (cfun->va_list_fpr_size & 2) ++ { ++ tmp = gen_rtx_MEM (BLKmode, ++ plus_constant (Pmode, virtual_incoming_args_rtx, ++ cum * UNITS_PER_WORD)); ++ MEM_NOTRAP_P (tmp) = 1; ++ set_mem_alias_set (tmp, set); ++ move_block_from_reg (16 + cum + TARGET_FPREGS * 32, tmp, count); ++ } ++ } ++#ifdef SW_64_ENABLE_FULL_ASAN ++ cfun->machine->frame.saved_varargs_size = 12 * UNITS_PER_WORD; ++#else ++ *pretend_size = 12 * UNITS_PER_WORD; ++#endif ++} ++ ++static void ++sw_64_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED) ++{ ++ HOST_WIDE_INT offset; ++ tree t, offset_field, base_field; ++ ++ if (TREE_CODE (TREE_TYPE (valist)) == ERROR_MARK) ++ return; ++ ++ /* For Unix, TARGET_SETUP_INCOMING_VARARGS moves the starting address base ++ up by 48, storing fp arg registers in the first 48 bytes, and the ++ integer arg registers in the next 48 bytes. This is only done, ++ however, if any integer registers need to be stored. ++ ++ If no integer registers need be stored, then we must subtract 48 ++ in order to account for the integer arg registers which are counted ++ in argsize above, but which are not actually stored on the stack. ++ Must further be careful here about structures straddling the last ++ integer argument register; that futzes with pretend_args_size, ++ which changes the meaning of AP. */ ++ ++ if (NUM_ARGS < 6) ++ offset = 6 * UNITS_PER_WORD; ++ else ++#ifdef SW_64_ENABLE_FULL_ASAN ++ offset = -6 * UNITS_PER_WORD + cfun->machine->frame.saved_varargs_size ++ + crtl->args.pretend_args_size; ++#else ++ offset = -6 * UNITS_PER_WORD + crtl->args.pretend_args_size; ++#endif ++ ++ base_field = TYPE_FIELDS (TREE_TYPE (valist)); ++ offset_field = DECL_CHAIN (base_field); ++ ++ base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field), valist, ++ base_field, NULL_TREE); ++ offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field), valist, ++ offset_field, NULL_TREE); ++ ++ t = make_tree (ptr_type_node, virtual_incoming_args_rtx); ++ t = fold_build_pointer_plus_hwi (t, offset); ++ t = build2 (MODIFY_EXPR, TREE_TYPE (base_field), base_field, t); ++ TREE_SIDE_EFFECTS (t) = 1; ++ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); ++ ++ t = build_int_cst (NULL_TREE, NUM_ARGS * UNITS_PER_WORD); ++ t = build2 (MODIFY_EXPR, TREE_TYPE (offset_field), offset_field, t); ++ TREE_SIDE_EFFECTS (t) = 1; ++ expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL); ++} ++ ++static tree ++sw_64_gimplify_va_arg_1 (tree type, tree base, tree offset, gimple_seq *pre_p) ++{ ++ tree type_size, ptr_type, addend, t, addr; ++ gimple_seq internal_post; ++ ++ /* If the type could not be passed in registers, skip the block ++ reserved for the registers. */ ++ if (must_pass_va_arg_in_stack (type)) ++ { ++ t = build_int_cst (TREE_TYPE (offset), 6 * 8); ++ gimplify_assign (offset, build2 (MAX_EXPR, TREE_TYPE (offset), offset, t), ++ pre_p); ++ } ++ ++ addend = offset; ++ ptr_type = build_pointer_type_for_mode (type, ptr_mode, true); ++ ++ if (TREE_CODE (type) == COMPLEX_TYPE) ++ { ++ tree real_part, imag_part, real_temp; ++ ++ real_part ++ = sw_64_gimplify_va_arg_1 (TREE_TYPE (type), base, offset, pre_p); ++ ++ /* Copy the value into a new temporary, lest the formal temporary ++ be reused out from under us. */ ++ real_temp = get_initialized_tmp_var (real_part, pre_p, NULL); ++ ++ imag_part ++ = sw_64_gimplify_va_arg_1 (TREE_TYPE (type), base, offset, pre_p); ++ ++ return build2 (COMPLEX_EXPR, type, real_temp, imag_part); ++ } ++ else if (TREE_CODE (type) == REAL_TYPE) ++ { ++ tree fpaddend, cond, fourtyeight; ++ ++ fourtyeight = build_int_cst (TREE_TYPE (addend), 6 * 8); ++ fpaddend ++ = fold_build2 (MINUS_EXPR, TREE_TYPE (addend), addend, fourtyeight); ++ cond = fold_build2 (LT_EXPR, boolean_type_node, addend, fourtyeight); ++ addend ++ = fold_build3 (COND_EXPR, TREE_TYPE (addend), cond, fpaddend, addend); ++ } ++ ++ /* Build the final address and force that value into a temporary. */ ++ addr = fold_build_pointer_plus (fold_convert (ptr_type, base), addend); ++ internal_post = NULL; ++ gimplify_expr (&addr, pre_p, &internal_post, is_gimple_val, fb_rvalue); ++ gimple_seq_add_seq (pre_p, internal_post); ++ ++ /* Update the offset field. */ ++ type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type)); ++ if (type_size == NULL || TREE_OVERFLOW (type_size)) ++ t = size_zero_node; ++ else ++ { ++ t = size_binop (PLUS_EXPR, type_size, size_int (7)); ++ t = size_binop (TRUNC_DIV_EXPR, t, size_int (8)); ++ t = size_binop (MULT_EXPR, t, size_int (8)); ++ } ++ t = fold_convert (TREE_TYPE (offset), t); ++ gimplify_assign (offset, build2 (PLUS_EXPR, TREE_TYPE (offset), offset, t), ++ pre_p); ++ ++ return build_va_arg_indirect_ref (addr); ++} ++ ++static tree ++sw_64_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p, ++ gimple_seq *post_p) ++{ ++ tree offset_field, base_field, offset, base, t, r; ++ bool indirect; ++ ++ base_field = TYPE_FIELDS (va_list_type_node); ++ offset_field = DECL_CHAIN (base_field); ++ base_field = build3 (COMPONENT_REF, TREE_TYPE (base_field), valist, ++ base_field, NULL_TREE); ++ offset_field = build3 (COMPONENT_REF, TREE_TYPE (offset_field), valist, ++ offset_field, NULL_TREE); ++ ++ /* Pull the fields of the structure out into temporaries. Since we never ++ modify the base field, we can use a formal temporary. Sign-extend the ++ offset field so that it's the proper width for pointer arithmetic. */ ++ base = get_formal_tmp_var (base_field, pre_p); ++ ++ t = fold_convert (build_nonstandard_integer_type (64, 0), offset_field); ++ offset = get_initialized_tmp_var (t, pre_p, NULL); ++ ++ indirect = pass_va_arg_by_reference (type); ++ ++ if (indirect) ++ { ++ if (TREE_CODE (type) == COMPLEX_TYPE ++ && targetm.calls.split_complex_arg (type)) ++ { ++ tree real_part, imag_part, real_temp; ++ ++ tree ptr_type ++ = build_pointer_type_for_mode (TREE_TYPE (type), ptr_mode, true); ++ ++ real_part = sw_64_gimplify_va_arg_1 (ptr_type, base, offset, pre_p); ++ real_part = build_va_arg_indirect_ref (real_part); ++ ++ /* Copy the value into a new temporary, lest the formal temporary ++ be reused out from under us. */ ++ real_temp = get_initialized_tmp_var (real_part, pre_p, NULL); ++ ++ imag_part = sw_64_gimplify_va_arg_1 (ptr_type, base, offset, pre_p); ++ imag_part = build_va_arg_indirect_ref (imag_part); ++ ++ r = build2 (COMPLEX_EXPR, type, real_temp, imag_part); ++ ++ /* Stuff the offset temporary back into its field. */ ++ gimplify_assign (unshare_expr (offset_field), ++ fold_convert (TREE_TYPE (offset_field), offset), ++ pre_p); ++ return r; ++ } ++ else ++ type = build_pointer_type_for_mode (type, ptr_mode, true); ++ } ++ ++ /* Find the value. Note that this will be a stable indirection, or ++ a composite of stable indirections in the case of complex. */ ++ r = sw_64_gimplify_va_arg_1 (type, base, offset, pre_p); ++ ++ /* Stuff the offset temporary back into its field. */ ++ gimplify_assign (unshare_expr (offset_field), ++ fold_convert (TREE_TYPE (offset_field), offset), pre_p); ++ ++ if (indirect) ++ r = build_va_arg_indirect_ref (r); ++ ++ return r; ++} ++ ++/* Builtins. */ ++ ++enum sw_64_builtin ++{ ++ SW_64_BUILTIN_CMPBGE, ++ SW_64_BUILTIN_EXTBL, ++ SW_64_BUILTIN_EXTWL, ++ SW_64_BUILTIN_EXTLL, ++ SW_64_BUILTIN_EXTQL, ++ SW_64_BUILTIN_EXTWH, ++ SW_64_BUILTIN_EXTLH, ++ SW_64_BUILTIN_EXTQH, ++ SW_64_BUILTIN_INSBL, ++ SW_64_BUILTIN_INSWL, ++ SW_64_BUILTIN_INSLL, ++ SW_64_BUILTIN_INSQL, ++ SW_64_BUILTIN_INSWH, ++ SW_64_BUILTIN_INSLH, ++ SW_64_BUILTIN_INSQH, ++ SW_64_BUILTIN_MSKBL, ++ SW_64_BUILTIN_MSKWL, ++ SW_64_BUILTIN_MSKLL, ++ SW_64_BUILTIN_MSKQL, ++ SW_64_BUILTIN_MSKWH, ++ SW_64_BUILTIN_MSKLH, ++ SW_64_BUILTIN_MSKQH, ++ SW_64_BUILTIN_UMULH, ++ SW_64_BUILTIN_ZAP, ++ SW_64_BUILTIN_ZAPNOT, ++ SW_64_BUILTIN_AMASK, ++ SW_64_BUILTIN_IMPLVER, ++ SW_64_BUILTIN_RPCC, ++ ++ /* TARGET_MAX */ ++ SW_64_BUILTIN_MINUB8, ++ SW_64_BUILTIN_MINSB8, ++ SW_64_BUILTIN_MINUW4, ++ SW_64_BUILTIN_MINSW4, ++ SW_64_BUILTIN_MAXUB8, ++ SW_64_BUILTIN_MAXSB8, ++ SW_64_BUILTIN_MAXUW4, ++ SW_64_BUILTIN_MAXSW4, ++ SW_64_BUILTIN_PERR, ++ SW_64_BUILTIN_PKLB, ++ SW_64_BUILTIN_PKWB, ++ SW_64_BUILTIN_UNPKBL, ++ SW_64_BUILTIN_UNPKBW, ++ ++ /* TARGET_CIX */ ++ SW_64_BUILTIN_CTTZ, ++ SW_64_BUILTIN_CTLZ, ++ SW_64_BUILTIN_CTPOP, ++ ++ SW_64_BUILTIN_max ++}; ++ ++static enum insn_code const code_for_builtin[SW_64_BUILTIN_max] ++ = {CODE_FOR_builtin_cmpbge, CODE_FOR_extbl, CODE_FOR_extwl, CODE_FOR_extll, ++ CODE_FOR_extql, CODE_FOR_extwh, CODE_FOR_extlh, CODE_FOR_extqh, ++ CODE_FOR_builtin_insbl, CODE_FOR_builtin_inswl, CODE_FOR_builtin_insll, ++ CODE_FOR_insql, CODE_FOR_inswh, CODE_FOR_inslh, CODE_FOR_insqh, ++ CODE_FOR_mskbl, CODE_FOR_mskwl, CODE_FOR_mskll, CODE_FOR_mskql, ++ CODE_FOR_mskwh, CODE_FOR_msklh, CODE_FOR_mskqh, CODE_FOR_umuldi3_highpart, ++ CODE_FOR_builtin_zap, CODE_FOR_builtin_zapnot, CODE_FOR_builtin_amask, ++ CODE_FOR_builtin_implver, CODE_FOR_builtin_rpcc, ++ ++ /* TARGET_MAX */ ++ CODE_FOR_builtin_minub8, CODE_FOR_builtin_minsb8, CODE_FOR_builtin_minuw4, ++ CODE_FOR_builtin_minsw4, CODE_FOR_builtin_maxub8, CODE_FOR_builtin_maxsb8, ++ CODE_FOR_builtin_maxuw4, CODE_FOR_builtin_maxsw4, CODE_FOR_builtin_perr, ++ CODE_FOR_builtin_pklb, CODE_FOR_builtin_pkwb, CODE_FOR_builtin_unpkbl, ++ CODE_FOR_builtin_unpkbw, ++ ++ /* TARGET_CIX */ ++ CODE_FOR_ctzdi2, CODE_FOR_clzdi2, CODE_FOR_popcountdi2}; ++ ++struct sw_64_builtin_def ++{ ++ const char *name; ++ enum sw_64_builtin code; ++ unsigned int target_mask; ++ bool is_const; ++}; ++ ++static struct sw_64_builtin_def const zero_arg_builtins[] ++ = {{"__builtin_sw_64_implver", SW_64_BUILTIN_IMPLVER, 0, true}, ++ {"__builtin_sw_64_rpcc", SW_64_BUILTIN_RPCC, 0, false}}; ++ ++static struct sw_64_builtin_def const one_arg_builtins[] ++ = {{"__builtin_sw_64_amask", SW_64_BUILTIN_AMASK, 0, true}, ++ {"__builtin_sw_64_pklb", SW_64_BUILTIN_PKLB, MASK_MAX, true}, ++ {"__builtin_sw_64_pkwb", SW_64_BUILTIN_PKWB, MASK_MAX, true}, ++ {"__builtin_sw_64_unpkbl", SW_64_BUILTIN_UNPKBL, MASK_MAX, true}, ++ {"__builtin_sw_64_unpkbw", SW_64_BUILTIN_UNPKBW, MASK_MAX, true}, ++ {"__builtin_sw_64_cttz", SW_64_BUILTIN_CTTZ, MASK_CIX, true}, ++ {"__builtin_sw_64_ctlz", SW_64_BUILTIN_CTLZ, MASK_CIX, true}, ++ {"__builtin_sw_64_ctpop", SW_64_BUILTIN_CTPOP, MASK_CIX, true}}; ++ ++static struct sw_64_builtin_def const two_arg_builtins[] ++ = {{"__builtin_sw_64_cmpbge", SW_64_BUILTIN_CMPBGE, 0, true}, ++ {"__builtin_sw_64_extbl", SW_64_BUILTIN_EXTBL, 0, true}, ++ {"__builtin_sw_64_extwl", SW_64_BUILTIN_EXTWL, 0, true}, ++ {"__builtin_sw_64_extll", SW_64_BUILTIN_EXTLL, 0, true}, ++ {"__builtin_sw_64_extql", SW_64_BUILTIN_EXTQL, 0, true}, ++ {"__builtin_sw_64_extwh", SW_64_BUILTIN_EXTWH, 0, true}, ++ {"__builtin_sw_64_extlh", SW_64_BUILTIN_EXTLH, 0, true}, ++ {"__builtin_sw_64_extqh", SW_64_BUILTIN_EXTQH, 0, true}, ++ {"__builtin_sw_64_insbl", SW_64_BUILTIN_INSBL, 0, true}, ++ {"__builtin_sw_64_inswl", SW_64_BUILTIN_INSWL, 0, true}, ++ {"__builtin_sw_64_insll", SW_64_BUILTIN_INSLL, 0, true}, ++ {"__builtin_sw_64_insql", SW_64_BUILTIN_INSQL, 0, true}, ++ {"__builtin_sw_64_inswh", SW_64_BUILTIN_INSWH, 0, true}, ++ {"__builtin_sw_64_inslh", SW_64_BUILTIN_INSLH, 0, true}, ++ {"__builtin_sw_64_insqh", SW_64_BUILTIN_INSQH, 0, true}, ++ {"__builtin_sw_64_mskbl", SW_64_BUILTIN_MSKBL, 0, true}, ++ {"__builtin_sw_64_mskwl", SW_64_BUILTIN_MSKWL, 0, true}, ++ {"__builtin_sw_64_mskll", SW_64_BUILTIN_MSKLL, 0, true}, ++ {"__builtin_sw_64_mskql", SW_64_BUILTIN_MSKQL, 0, true}, ++ {"__builtin_sw_64_mskwh", SW_64_BUILTIN_MSKWH, 0, true}, ++ {"__builtin_sw_64_msklh", SW_64_BUILTIN_MSKLH, 0, true}, ++ {"__builtin_sw_64_mskqh", SW_64_BUILTIN_MSKQH, 0, true}, ++ {"__builtin_sw_64_umulh", SW_64_BUILTIN_UMULH, 0, true}, ++ {"__builtin_sw_64_zap", SW_64_BUILTIN_ZAP, 0, true}, ++ {"__builtin_sw_64_zapnot", SW_64_BUILTIN_ZAPNOT, 0, true}, ++ {"__builtin_sw_64_minub8", SW_64_BUILTIN_MINUB8, MASK_MAX, true}, ++ {"__builtin_sw_64_minsb8", SW_64_BUILTIN_MINSB8, MASK_MAX, true}, ++ {"__builtin_sw_64_minuw4", SW_64_BUILTIN_MINUW4, MASK_MAX, true}, ++ {"__builtin_sw_64_minsw4", SW_64_BUILTIN_MINSW4, MASK_MAX, true}, ++ {"__builtin_sw_64_maxub8", SW_64_BUILTIN_MAXUB8, MASK_MAX, true}, ++ {"__builtin_sw_64_maxsb8", SW_64_BUILTIN_MAXSB8, MASK_MAX, true}, ++ {"__builtin_sw_64_maxuw4", SW_64_BUILTIN_MAXUW4, MASK_MAX, true}, ++ {"__builtin_sw_64_maxsw4", SW_64_BUILTIN_MAXSW4, MASK_MAX, true}, ++ {"__builtin_sw_64_perr", SW_64_BUILTIN_PERR, MASK_MAX, true}}; ++ ++static GTY (()) tree sw_64_dimode_u; ++static GTY (()) tree sw_64_v8qi_u; ++static GTY (()) tree sw_64_v8qi_s; ++static GTY (()) tree sw_64_v4hi_u; ++static GTY (()) tree sw_64_v4hi_s; ++ ++static GTY (()) tree sw_64_builtins[(int) SW_64_BUILTIN_max]; ++ ++/* Return the sw_64 builtin for CODE. */ ++ ++static tree ++sw_64_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED) ++{ ++ if (code >= SW_64_BUILTIN_max) ++ return error_mark_node; ++ return sw_64_builtins[code]; ++} ++ ++/* Helper function of sw_64_init_builtins. Add the built-in specified ++ by NAME, TYPE, CODE, and ECF. */ ++ ++static void ++sw_64_builtin_function (const char *name, tree ftype, enum sw_64_builtin code, ++ unsigned ecf) ++{ ++ tree decl = add_builtin_function (name, ftype, (int) code, BUILT_IN_MD, NULL, ++ NULL_TREE); ++ ++ if (ecf & ECF_CONST) ++ TREE_READONLY (decl) = 1; ++ if (ecf & ECF_NOTHROW) ++ TREE_NOTHROW (decl) = 1; ++ ++ sw_64_builtins[(int) code] = decl; ++} ++ ++/* Helper function of sw_64_init_builtins. Add the COUNT built-in ++ functions pointed to by P, with function type FTYPE. */ ++ ++static void ++sw_64_add_builtins (const struct sw_64_builtin_def *p, size_t count, tree ftype) ++{ ++ size_t i; ++ ++ for (i = 0; i < count; ++i, ++p) ++ if ((target_flags & p->target_mask) == p->target_mask) ++ sw_64_builtin_function (p->name, ftype, p->code, ++ (p->is_const ? ECF_CONST : 0) | ECF_NOTHROW); ++} ++ ++static void ++sw_64_init_builtins (void) ++{ ++ tree ftype; ++ ++ sw_64_dimode_u = lang_hooks.types.type_for_mode (DImode, 1); ++ sw_64_v8qi_u = build_vector_type (unsigned_intQI_type_node, 8); ++ sw_64_v8qi_s = build_vector_type (intQI_type_node, 8); ++ sw_64_v4hi_u = build_vector_type (unsigned_intHI_type_node, 4); ++ sw_64_v4hi_s = build_vector_type (intHI_type_node, 4); ++ ++ ftype = build_function_type_list (sw_64_dimode_u, NULL_TREE); ++ sw_64_add_builtins (zero_arg_builtins, ARRAY_SIZE (zero_arg_builtins), ftype); ++ ++ ftype = build_function_type_list (sw_64_dimode_u, sw_64_dimode_u, NULL_TREE); ++ sw_64_add_builtins (one_arg_builtins, ARRAY_SIZE (one_arg_builtins), ftype); ++ ++ ftype = build_function_type_list (sw_64_dimode_u, sw_64_dimode_u, ++ sw_64_dimode_u, NULL_TREE); ++ sw_64_add_builtins (two_arg_builtins, ARRAY_SIZE (two_arg_builtins), ftype); ++ ++} ++ ++/* Expand an expression EXP that calls a built-in function, ++ with result going to TARGET if that's convenient ++ (and in mode MODE if that's convenient). ++ SUBTARGET may be used as the target for computing one of EXP's operands. ++ IGNORE is nonzero if the value is to be ignored. */ ++ ++static rtx ++sw_64_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED, ++ machine_mode mode ATTRIBUTE_UNUSED, ++ int ignore ATTRIBUTE_UNUSED) ++{ ++#define MAX_ARGS 2 ++ ++ tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0); ++ unsigned int fcode = DECL_MD_FUNCTION_CODE (fndecl); ++ tree arg; ++ call_expr_arg_iterator iter; ++ enum insn_code icode; ++ rtx op[MAX_ARGS], pat; ++ int arity; ++ bool nonvoid; ++ ++ if (fcode >= SW_64_BUILTIN_max) ++ internal_error ("bad builtin fcode"); ++ icode = code_for_builtin[fcode]; ++ if (icode == 0) ++ internal_error ("bad builtin fcode"); ++ ++ nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node; ++ ++ arity = 0; ++ FOR_EACH_CALL_EXPR_ARG (arg, iter, exp) ++ { ++ const struct insn_operand_data *insn_op; ++ ++ if (arg == error_mark_node) ++ return NULL_RTX; ++ if (arity > MAX_ARGS) ++ return NULL_RTX; ++ ++ insn_op = &insn_data[icode].operand[arity + nonvoid]; ++ ++ op[arity] = expand_expr (arg, NULL_RTX, insn_op->mode, EXPAND_NORMAL); ++ ++ if (!(*insn_op->predicate) (op[arity], insn_op->mode)) ++ op[arity] = copy_to_mode_reg (insn_op->mode, op[arity]); ++ arity++; ++ } ++ ++ if (nonvoid) ++ { ++ machine_mode tmode = insn_data[icode].operand[0].mode; ++ if (!target || GET_MODE (target) != tmode ++ || !(*insn_data[icode].operand[0].predicate) (target, tmode)) ++ target = gen_reg_rtx (tmode); ++ } ++ ++ switch (arity) ++ { ++ case 0: ++ pat = GEN_FCN (icode) (target); ++ break; ++ case 1: ++ if (nonvoid) ++ pat = GEN_FCN (icode) (target, op[0]); ++ else ++ pat = GEN_FCN (icode) (op[0]); ++ break; ++ case 2: ++ pat = GEN_FCN (icode) (target, op[0], op[1]); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ if (!pat) ++ return NULL_RTX; ++ emit_insn (pat); ++ ++ if (nonvoid) ++ return target; ++ else ++ return const0_rtx; ++} ++ ++/* Fold the builtin for the CMPBGE instruction. This is a vector comparison ++ with an 8-bit output vector. OPINT contains the integer operands; bit N ++ of OP_CONST is set if OPINT[N] is valid. */ ++ ++static tree ++sw_64_fold_builtin_cmpbge (unsigned HOST_WIDE_INT opint[], long op_const) ++{ ++ if (op_const == 3) ++ { ++ int i, val; ++ for (i = 0, val = 0; i < 8; ++i) ++ { ++ unsigned HOST_WIDE_INT c0 = (opint[0] >> (i * 8)) & 0xff; ++ unsigned HOST_WIDE_INT c1 = (opint[1] >> (i * 8)) & 0xff; ++ if (c0 >= c1) ++ val |= 1 << i; ++ } ++ return build_int_cst (sw_64_dimode_u, val); ++ } ++ else if (op_const == 2 && opint[1] == 0) ++ return build_int_cst (sw_64_dimode_u, 0xff); ++ return NULL; ++} ++ ++/* Fold the builtin for the ZAPNOT instruction. This is essentially a ++ specialized form of an AND operation. Other byte manipulation instructions ++ are defined in terms of this instruction, so this is also used as a ++ subroutine for other builtins. ++ ++ OP contains the tree operands; OPINT contains the extracted integer values. ++ Bit N of OP_CONST it set if OPINT[N] is valid. OP may be null if only ++ OPINT may be considered. */ ++ ++static tree ++sw_64_fold_builtin_zapnot (tree *op, unsigned HOST_WIDE_INT opint[], ++ long op_const) ++{ ++ if (op_const & 2) ++ { ++ unsigned HOST_WIDE_INT mask = 0; ++ int i; ++ ++ for (i = 0; i < 8; ++i) ++ if ((opint[1] >> i) & 1) ++ mask |= (unsigned HOST_WIDE_INT) 0xff << (i * 8); ++ ++ if (op_const & 1) ++ return build_int_cst (sw_64_dimode_u, opint[0] & mask); ++ ++ if (op) ++ return fold_build2 (BIT_AND_EXPR, sw_64_dimode_u, op[0], ++ build_int_cst (sw_64_dimode_u, mask)); ++ } ++ else if ((op_const & 1) && opint[0] == 0) ++ return build_int_cst (sw_64_dimode_u, 0); ++ return NULL; ++} ++ ++/* Fold the builtins for the EXT family of instructions. */ ++ ++static tree ++sw_64_fold_builtin_extxx (tree op[], unsigned HOST_WIDE_INT opint[], ++ long op_const, unsigned HOST_WIDE_INT bytemask, ++ bool is_high) ++{ ++ long zap_const = 2; ++ tree *zap_op = NULL; ++ ++ if (op_const & 2) ++ { ++ unsigned HOST_WIDE_INT loc; ++ ++ loc = opint[1] & 7; ++ loc *= BITS_PER_UNIT; ++ ++ if (loc != 0) ++ { ++ if (op_const & 1) ++ { ++ unsigned HOST_WIDE_INT temp = opint[0]; ++ if (is_high) ++ temp <<= loc; ++ else ++ temp >>= loc; ++ opint[0] = temp; ++ zap_const = 3; ++ } ++ } ++ else ++ zap_op = op; ++ } ++ ++ opint[1] = bytemask; ++ return sw_64_fold_builtin_zapnot (zap_op, opint, zap_const); ++} ++ ++/* Fold the builtins for the INS family of instructions. */ ++ ++static tree ++sw_64_fold_builtin_insxx (tree op[], unsigned HOST_WIDE_INT opint[], ++ long op_const, unsigned HOST_WIDE_INT bytemask, ++ bool is_high) ++{ ++ if ((op_const & 1) && opint[0] == 0) ++ return build_int_cst (sw_64_dimode_u, 0); ++ ++ if (op_const & 2) ++ { ++ unsigned HOST_WIDE_INT temp, loc, byteloc; ++ tree *zap_op = NULL; ++ ++ loc = opint[1] & 7; ++ bytemask <<= loc; ++ ++ temp = opint[0]; ++ if (is_high) ++ { ++ byteloc = (64 - (loc * 8)) & 0x3f; ++ if (byteloc == 0) ++ zap_op = op; ++ else ++ temp >>= byteloc; ++ bytemask >>= 8; ++ } ++ else ++ { ++ byteloc = loc * 8; ++ if (byteloc == 0) ++ zap_op = op; ++ else ++ temp <<= byteloc; ++ } ++ ++ opint[0] = temp; ++ opint[1] = bytemask; ++ return sw_64_fold_builtin_zapnot (zap_op, opint, op_const); ++ } ++ ++ return NULL; ++} ++ ++static tree ++sw_64_fold_builtin_mskxx (tree op[], unsigned HOST_WIDE_INT opint[], ++ long op_const, unsigned HOST_WIDE_INT bytemask, ++ bool is_high) ++{ ++ if (op_const & 2) ++ { ++ unsigned HOST_WIDE_INT loc; ++ ++ loc = opint[1] & 7; ++ bytemask <<= loc; ++ ++ if (is_high) ++ bytemask >>= 8; ++ ++ opint[1] = bytemask ^ 0xff; ++ } ++ ++ return sw_64_fold_builtin_zapnot (op, opint, op_const); ++} ++ ++static tree ++sw_64_fold_vector_minmax (enum tree_code code, tree op[], tree vtype) ++{ ++ tree op0 = fold_convert (vtype, op[0]); ++ tree op1 = fold_convert (vtype, op[1]); ++ tree val = fold_build2 (code, vtype, op0, op1); ++ return fold_build1 (VIEW_CONVERT_EXPR, sw_64_dimode_u, val); ++} ++ ++static tree ++sw_64_fold_builtin_perr (unsigned HOST_WIDE_INT opint[], long op_const) ++{ ++ unsigned HOST_WIDE_INT temp = 0; ++ int i; ++ ++ if (op_const != 3) ++ return NULL; ++ ++ for (i = 0; i < 8; ++i) ++ { ++ unsigned HOST_WIDE_INT a = (opint[0] >> (i * 8)) & 0xff; ++ unsigned HOST_WIDE_INT b = (opint[1] >> (i * 8)) & 0xff; ++ if (a >= b) ++ temp += a - b; ++ else ++ temp += b - a; ++ } ++ ++ return build_int_cst (sw_64_dimode_u, temp); ++} ++ ++static tree ++sw_64_fold_builtin_pklb (unsigned HOST_WIDE_INT opint[], long op_const) ++{ ++ unsigned HOST_WIDE_INT temp; ++ ++ if (op_const == 0) ++ return NULL; ++ ++ temp = opint[0] & 0xff; ++ temp |= (opint[0] >> 24) & 0xff00; ++ ++ return build_int_cst (sw_64_dimode_u, temp); ++} ++ ++static tree ++sw_64_fold_builtin_pkwb (unsigned HOST_WIDE_INT opint[], long op_const) ++{ ++ unsigned HOST_WIDE_INT temp; ++ ++ if (op_const == 0) ++ return NULL; ++ ++ temp = opint[0] & 0xff; ++ temp |= (opint[0] >> 8) & 0xff00; ++ temp |= (opint[0] >> 16) & 0xff0000; ++ temp |= (opint[0] >> 24) & 0xff000000; ++ ++ return build_int_cst (sw_64_dimode_u, temp); ++} ++ ++static tree ++sw_64_fold_builtin_unpkbl (unsigned HOST_WIDE_INT opint[], long op_const) ++{ ++ unsigned HOST_WIDE_INT temp; ++ ++ if (op_const == 0) ++ return NULL; ++ ++ temp = opint[0] & 0xff; ++ temp |= (opint[0] & 0xff00) << 24; ++ ++ return build_int_cst (sw_64_dimode_u, temp); ++} ++ ++static tree ++sw_64_fold_builtin_unpkbw (unsigned HOST_WIDE_INT opint[], long op_const) ++{ ++ unsigned HOST_WIDE_INT temp; ++ ++ if (op_const == 0) ++ return NULL; ++ ++ temp = opint[0] & 0xff; ++ temp |= (opint[0] & 0x0000ff00) << 8; ++ temp |= (opint[0] & 0x00ff0000) << 16; ++ temp |= (opint[0] & 0xff000000) << 24; ++ ++ return build_int_cst (sw_64_dimode_u, temp); ++} ++ ++static tree ++sw_64_fold_builtin_cttz (unsigned HOST_WIDE_INT opint[], long op_const) ++{ ++ unsigned HOST_WIDE_INT temp; ++ ++ if (op_const == 0) ++ return NULL; ++ ++ if (opint[0] == 0) ++ temp = 64; ++ else ++ temp = exact_log2 (opint[0] & -opint[0]); ++ ++ return build_int_cst (sw_64_dimode_u, temp); ++} ++ ++static tree ++sw_64_fold_builtin_ctlz (unsigned HOST_WIDE_INT opint[], long op_const) ++{ ++ unsigned HOST_WIDE_INT temp; ++ ++ if (op_const == 0) ++ return NULL; ++ ++ if (opint[0] == 0) ++ temp = 64; ++ else ++ temp = 64 - floor_log2 (opint[0]) - 1; ++ ++ return build_int_cst (sw_64_dimode_u, temp); ++} ++ ++static tree ++sw_64_fold_builtin_ctpop (unsigned HOST_WIDE_INT opint[], long op_const) ++{ ++ unsigned HOST_WIDE_INT temp, op; ++ ++ if (op_const == 0) ++ return NULL; ++ ++ op = opint[0]; ++ temp = 0; ++ while (op) ++ temp++, op &= op - 1; ++ ++ return build_int_cst (sw_64_dimode_u, temp); ++} ++ ++/* Fold one of our builtin functions. */ ++ ++static tree ++sw_64_fold_builtin (tree fndecl, int n_args, tree *op, ++ bool ignore ATTRIBUTE_UNUSED) ++{ ++ unsigned HOST_WIDE_INT opint[MAX_ARGS]; ++ long op_const = 0; ++ int i; ++ ++ if (n_args > MAX_ARGS) ++ return NULL; ++ ++ for (i = 0; i < n_args; i++) ++ { ++ tree arg = op[i]; ++ if (arg == error_mark_node) ++ return NULL; ++ ++ opint[i] = 0; ++ if (TREE_CODE (arg) == INTEGER_CST) ++ { ++ op_const |= 1L << i; ++ opint[i] = int_cst_value (arg); ++ } ++ } ++ ++ switch (DECL_MD_FUNCTION_CODE (fndecl)) ++ { ++ case SW_64_BUILTIN_CMPBGE: ++ return sw_64_fold_builtin_cmpbge (opint, op_const); ++ ++ case SW_64_BUILTIN_EXTBL: ++ return sw_64_fold_builtin_extxx (op, opint, op_const, 0x01, false); ++ case SW_64_BUILTIN_EXTWL: ++ return sw_64_fold_builtin_extxx (op, opint, op_const, 0x03, false); ++ case SW_64_BUILTIN_EXTLL: ++ return sw_64_fold_builtin_extxx (op, opint, op_const, 0x0f, false); ++ case SW_64_BUILTIN_EXTQL: ++ return sw_64_fold_builtin_extxx (op, opint, op_const, 0xff, false); ++ case SW_64_BUILTIN_EXTWH: ++ return sw_64_fold_builtin_extxx (op, opint, op_const, 0x03, true); ++ case SW_64_BUILTIN_EXTLH: ++ return sw_64_fold_builtin_extxx (op, opint, op_const, 0x0f, true); ++ case SW_64_BUILTIN_EXTQH: ++ return sw_64_fold_builtin_extxx (op, opint, op_const, 0xff, true); ++ ++ case SW_64_BUILTIN_INSBL: ++ return sw_64_fold_builtin_insxx (op, opint, op_const, 0x01, false); ++ case SW_64_BUILTIN_INSWL: ++ return sw_64_fold_builtin_insxx (op, opint, op_const, 0x03, false); ++ case SW_64_BUILTIN_INSLL: ++ return sw_64_fold_builtin_insxx (op, opint, op_const, 0x0f, false); ++ case SW_64_BUILTIN_INSQL: ++ return sw_64_fold_builtin_insxx (op, opint, op_const, 0xff, false); ++ case SW_64_BUILTIN_INSWH: ++ return sw_64_fold_builtin_insxx (op, opint, op_const, 0x03, true); ++ case SW_64_BUILTIN_INSLH: ++ return sw_64_fold_builtin_insxx (op, opint, op_const, 0x0f, true); ++ case SW_64_BUILTIN_INSQH: ++ return sw_64_fold_builtin_insxx (op, opint, op_const, 0xff, true); ++ ++ case SW_64_BUILTIN_MSKBL: ++ return sw_64_fold_builtin_mskxx (op, opint, op_const, 0x01, false); ++ case SW_64_BUILTIN_MSKWL: ++ return sw_64_fold_builtin_mskxx (op, opint, op_const, 0x03, false); ++ case SW_64_BUILTIN_MSKLL: ++ return sw_64_fold_builtin_mskxx (op, opint, op_const, 0x0f, false); ++ case SW_64_BUILTIN_MSKQL: ++ return sw_64_fold_builtin_mskxx (op, opint, op_const, 0xff, false); ++ case SW_64_BUILTIN_MSKWH: ++ return sw_64_fold_builtin_mskxx (op, opint, op_const, 0x03, true); ++ case SW_64_BUILTIN_MSKLH: ++ return sw_64_fold_builtin_mskxx (op, opint, op_const, 0x0f, true); ++ case SW_64_BUILTIN_MSKQH: ++ return sw_64_fold_builtin_mskxx (op, opint, op_const, 0xff, true); ++ ++ case SW_64_BUILTIN_ZAP: ++ opint[1] ^= 0xff; ++ /* FALLTHRU */ ++ case SW_64_BUILTIN_ZAPNOT: ++ return sw_64_fold_builtin_zapnot (op, opint, op_const); ++ ++ case SW_64_BUILTIN_MINUB8: ++ return sw_64_fold_vector_minmax (MIN_EXPR, op, sw_64_v8qi_u); ++ case SW_64_BUILTIN_MINSB8: ++ return sw_64_fold_vector_minmax (MIN_EXPR, op, sw_64_v8qi_s); ++ case SW_64_BUILTIN_MINUW4: ++ return sw_64_fold_vector_minmax (MIN_EXPR, op, sw_64_v4hi_u); ++ case SW_64_BUILTIN_MINSW4: ++ return sw_64_fold_vector_minmax (MIN_EXPR, op, sw_64_v4hi_s); ++ case SW_64_BUILTIN_MAXUB8: ++ return sw_64_fold_vector_minmax (MAX_EXPR, op, sw_64_v8qi_u); ++ case SW_64_BUILTIN_MAXSB8: ++ return sw_64_fold_vector_minmax (MAX_EXPR, op, sw_64_v8qi_s); ++ case SW_64_BUILTIN_MAXUW4: ++ return sw_64_fold_vector_minmax (MAX_EXPR, op, sw_64_v4hi_u); ++ case SW_64_BUILTIN_MAXSW4: ++ return sw_64_fold_vector_minmax (MAX_EXPR, op, sw_64_v4hi_s); ++ ++ case SW_64_BUILTIN_PERR: ++ return sw_64_fold_builtin_perr (opint, op_const); ++ case SW_64_BUILTIN_PKLB: ++ return sw_64_fold_builtin_pklb (opint, op_const); ++ case SW_64_BUILTIN_PKWB: ++ return sw_64_fold_builtin_pkwb (opint, op_const); ++ case SW_64_BUILTIN_UNPKBL: ++ return sw_64_fold_builtin_unpkbl (opint, op_const); ++ case SW_64_BUILTIN_UNPKBW: ++ return sw_64_fold_builtin_unpkbw (opint, op_const); ++ ++ case SW_64_BUILTIN_CTTZ: ++ return sw_64_fold_builtin_cttz (opint, op_const); ++ case SW_64_BUILTIN_CTLZ: ++ return sw_64_fold_builtin_ctlz (opint, op_const); ++ case SW_64_BUILTIN_CTPOP: ++ return sw_64_fold_builtin_ctpop (opint, op_const); ++ ++ case SW_64_BUILTIN_AMASK: ++ case SW_64_BUILTIN_IMPLVER: ++ case SW_64_BUILTIN_RPCC: ++ /* None of these are foldable at compile-time. */ ++ default: ++ return NULL; ++ } ++} ++ ++bool ++sw_64_gimple_fold_builtin (gimple_stmt_iterator *gsi) ++{ ++ bool changed = false; ++ gimple *stmt = gsi_stmt (*gsi); ++ tree call = gimple_call_fn (stmt); ++ gimple *new_stmt = NULL; ++ ++ if (call) ++ { ++ tree fndecl = gimple_call_fndecl (stmt); ++ ++ if (fndecl) ++ { ++ tree arg0, arg1; ++ ++ switch (DECL_MD_FUNCTION_CODE (fndecl)) ++ { ++ case SW_64_BUILTIN_UMULH: ++ arg0 = gimple_call_arg (stmt, 0); ++ arg1 = gimple_call_arg (stmt, 1); ++ ++ new_stmt = gimple_build_assign (gimple_call_lhs (stmt), ++ MULT_HIGHPART_EXPR, arg0, arg1); ++ break; ++ default: ++ break; ++ } ++ } ++ } ++ ++ if (new_stmt) ++ { ++ gsi_replace (gsi, new_stmt, true); ++ changed = true; ++ } ++ ++ return changed; ++} ++ ++/* This page contains routines that are used to determine what the function ++ prologue and epilogue code will do and write them out. */ ++ ++/* Compute the size of the save area in the stack. */ ++ ++/* These variables are used for communication between the following functions. ++ They indicate various things about the current function being compiled ++ that are used to tell what kind of prologue, epilogue and procedure ++ descriptor to generate. */ ++ ++/* Nonzero if we need a stack procedure. */ ++enum sw_64_procedure_types ++{ ++ PT_NULL = 0, ++ PT_REGISTER = 1, ++ PT_STACK = 2 ++}; ++static enum sw_64_procedure_types sw_64_procedure_type; ++ ++/* Compute register masks for saved registers, register save area size, ++ and total frame size. */ ++static void ++sw_64_compute_frame_layout (void) ++{ ++ unsigned HOST_WIDE_INT sa_mask = 0; ++ HOST_WIDE_INT frame_size; ++ int sa_size; ++ ++ /* When outputting a thunk, we don't have valid register life info, ++ but assemble_start_function wants to output .frame and .mask ++ directives. */ ++ if (!cfun->is_thunk) ++ { ++ /* One for every register we have to save. */ ++ for (unsigned i = 0; i < FIRST_PSEUDO_REGISTER; i++) ++ if (!call_used_or_fixed_reg_p (i) && df_regs_ever_live_p (i) ++ && i != REG_RA) ++ sa_mask |= HOST_WIDE_INT_1U << i; ++ ++ /* We need to restore these for the handler. */ ++ if (crtl->calls_eh_return) ++ { ++ for (unsigned i = 0;; ++i) ++ { ++ unsigned regno = EH_RETURN_DATA_REGNO (i); ++ if (regno == INVALID_REGNUM) ++ break; ++ sa_mask |= HOST_WIDE_INT_1U << regno; ++ } ++ } ++ /* If any register spilled, then spill the return address also. */ ++ /* ??? This is required by the Digital stack unwind specification ++ and isn't needed if we're doing Dwarf2 unwinding. */ ++ if (sa_mask || sw_64_ra_ever_killed ()) ++ sa_mask |= HOST_WIDE_INT_1U << REG_RA; ++ } ++ sa_size = popcount_hwi (sa_mask); ++ frame_size = get_frame_size (); ++ ++ /* Our size must be even (multiple of 16 bytes). */ ++ if (sa_size & 1) ++ sa_size++; ++ sa_size *= 8; ++ ++ frame_size = (SW_64_ROUND (crtl->outgoing_args_size) + sa_size ++ + SW_64_ROUND (frame_size + crtl->args.pretend_args_size)); ++ ++ cfun->machine->sa_mask = sa_mask; ++ cfun->machine->sa_size = sa_size; ++ cfun->machine->frame_size = frame_size; ++} ++ ++#undef TARGET_COMPUTE_FRAME_LAYOUT ++#define TARGET_COMPUTE_FRAME_LAYOUT sw_64_layout_frame ++ ++/* Return 1 if this function can directly return via $26. */ ++ ++bool ++direct_return (void) ++{ ++ return (reload_completed && cfun->machine->frame_size == 0); ++} ++ ++bool ++sw_64_find_lo_sum_using_gp (rtx insn) ++{ ++ subrtx_iterator::array_type array; ++ FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST) ++ { ++ const_rtx x = *iter; ++ if (GET_CODE (x) == LO_SUM && XEXP (x, 0) == pic_offset_table_rtx) ++ return true; ++ } ++ return false; ++} ++ ++static int ++sw_64_does_function_need_gp (void) ++{ ++ rtx_insn *insn; ++ ++ /* We need the gp to load the address of __mcount. */ ++ if (TARGET_PROFILING_NEEDS_GP && crtl->profile) ++ return 1; ++ ++ /* The code emitted by sw_64_output_mi_thunk_sysv uses the gp. */ ++ if (cfun->is_thunk) ++ return 1; ++ ++ /* The nonlocal receiver pattern assumes that the gp is valid for ++ the nested function. Reasonable because it's almost always set ++ correctly already. For the cases where that's wrong, make sure ++ the nested function loads its gp on entry. */ ++ if (crtl->has_nonlocal_goto) ++ return 1; ++ ++ /* If we need a GP (we have a LDSYM insn or a CALL_INSN), load it first. ++ Even if we are a static function, we still need to do this in case ++ our address is taken and passed to something like qsort. */ ++ ++ push_topmost_sequence (); ++ insn = get_insns (); ++ pop_topmost_sequence (); ++ ++ for (; insn; insn = NEXT_INSN (insn)) ++ if (NONDEBUG_INSN_P (insn) && GET_CODE (PATTERN (insn)) != USE ++ && GET_CODE (PATTERN (insn)) != CLOBBER && get_attr_usegp (insn)) ++ return 1; ++ ++ return 0; ++} ++ ++/* Helper function to set RTX_FRAME_RELATED_P on instructions, including ++ sequences. */ ++ ++static rtx_insn * ++set_frame_related_p (void) ++{ ++ rtx_insn *seq = get_insns (); ++ rtx_insn *insn; ++ ++ end_sequence (); ++ ++ if (!seq) ++ return NULL; ++ ++ if (INSN_P (seq)) ++ { ++ insn = seq; ++ while (insn != NULL_RTX) ++ { ++ RTX_FRAME_RELATED_P (insn) = 1; ++ insn = NEXT_INSN (insn); ++ } ++ seq = emit_insn (seq); ++ } ++ else ++ { ++ seq = emit_insn (seq); ++ RTX_FRAME_RELATED_P (seq) = 1; ++ } ++ return seq; ++} ++ ++#define FRP(exp) (start_sequence (), exp, set_frame_related_p ()) ++ ++/* Generates a store with the proper unwind info attached. VALUE is ++ stored at BASE_REG+BASE_OFS. If FRAME_BIAS is nonzero, then BASE_REG ++ contains SP+FRAME_BIAS, and that is the unwind info that should be ++ generated. If FRAME_REG != VALUE, then VALUE is being stored on ++ behalf of FRAME_REG, and FRAME_REG should be present in the unwind. */ ++ ++static void ++emit_frame_store_1 (rtx value, rtx base_reg, HOST_WIDE_INT frame_bias, ++ HOST_WIDE_INT base_ofs, rtx frame_reg) ++{ ++ rtx addr, mem; ++ rtx_insn *insn; ++ ++ addr = plus_constant (Pmode, base_reg, base_ofs); ++ mem = gen_frame_mem (DImode, addr); ++ ++ insn = emit_move_insn (mem, value); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ ++ if (frame_bias || value != frame_reg) ++ { ++ if (frame_bias) ++ { ++ addr ++ = plus_constant (Pmode, stack_pointer_rtx, frame_bias + base_ofs); ++ mem = gen_rtx_MEM (DImode, addr); ++ } ++ ++ add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_SET (mem, frame_reg)); ++ } ++} ++ ++static void ++emit_frame_store (unsigned int regno, rtx base_reg, HOST_WIDE_INT frame_bias, ++ HOST_WIDE_INT base_ofs) ++{ ++ rtx reg = gen_rtx_REG (DImode, regno); ++ emit_frame_store_1 (reg, base_reg, frame_bias, base_ofs, reg); ++} ++ ++static void ++sw64_add_cfa_expression (rtx_insn *insn, unsigned int reg, rtx base, ++ poly_int64 offset) ++{ ++ rtx mem = gen_frame_mem (DImode, plus_constant (Pmode, base, offset)); ++ add_reg_note (insn, REG_CFA_EXPRESSION, ++ gen_rtx_SET (mem, regno_reg_rtx[reg])); ++} ++ ++/* Write function prologue. */ ++ ++void ++sw_64_expand_prologue (void) ++{ ++ /* Registers to save. */ ++ unsigned HOST_WIDE_INT sa_mask = cfun->machine->frame.sa_mask; ++ /* Stack space needed for pushing registers clobbered by us. */ ++ HOST_WIDE_INT sa_size = cfun->machine->frame.saved_regs_size; ++ /* Complete stack size needed. */ ++ HOST_WIDE_INT frame_size = cfun->machine->frame.frame_size; ++ /* Probed stack size; it additionally includes the size of ++ the "reserve region" if any. */ ++ HOST_WIDE_INT probed_size, sa_bias; ++ /* Offset from base reg to register save area. */ ++ HOST_WIDE_INT reg_offset; ++ rtx sa_reg; ++ bool fp_flag = false; ++ ++ if (flag_stack_usage_info) ++ current_function_static_stack_size = frame_size; ++ ++#ifdef SW_64_ENABLE_FULL_ASAN ++ reg_offset = aligned_upper_bound (crtl->outgoing_args_size, ++ STACK_BOUNDARY / BITS_PER_UNIT); ++#else ++ reg_offset = SW_64_ROUND (crtl->outgoing_args_size); ++#endif ++ ++ /* Emit an insn to reload GP, if needed. */ ++ sw_64_function_needs_gp = sw_64_does_function_need_gp (); ++ if (sw_64_function_needs_gp) ++ emit_insn (gen_prologue_ldgp ()); ++ if (strcmp ("main", lang_hooks.decl_printable_name (cfun->decl, 1)) == 0 ++ && (TARGET_SW_32ALIGN || TARGET_SW_SIMD)) ++ { ++ rtx const16 = gen_rtx_REG (DImode, 7); ++ sw_64_emit_set_const (const16, DImode, 16, 3, false); ++ emit_insn (gen_anddi3 (const16, const16, stack_pointer_rtx)); ++ emit_insn (gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, const16)); ++ ++ emit_insn ( ++ gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-32))); ++ rtx mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (0)); ++ rtx tmp7 = gen_rtx_MEM (Pmode, mem_address); ++ emit_move_insn (tmp7, gen_rtx_REG (DImode, 7)); ++ } ++ /* TARGET_PROFILING_NEEDS_GP actually implies that we need to insert ++ the call to mcount ourselves, rather than having the linker do it ++ magically in response to -pg. Since _mcount has special linkage, ++ don't represent the call as a call. */ ++ if (TARGET_PROFILING_NEEDS_GP && crtl->profile) ++ emit_insn (gen_prologue_mcount ()); ++ ++ if (strcmp ("main", lang_hooks.decl_printable_name (cfun->decl, 1)) == 0 ++ && flag_sw_hardware_prefetch) ++ { ++ emit_insn ( ++ gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-256))); ++ rtx mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (8)); ++ rtx tmp16 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (16)); ++ rtx tmp17 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (24)); ++ rtx tmp18 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (32)); ++ rtx tmp19 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (40)); ++ rtx tmp26 = gen_rtx_MEM (Pmode, mem_address); ++ ++ emit_move_insn (tmp16, gen_rtx_REG (DImode, 16)); ++ emit_move_insn (tmp17, gen_rtx_REG (DImode, 17)); ++ emit_move_insn (tmp18, gen_rtx_REG (DImode, 18)); ++ emit_move_insn (tmp19, gen_rtx_REG (DImode, 19)); ++ emit_move_insn (tmp26, gen_rtx_REG (DImode, 26)); ++ ++ rtx tmp_clt = gen_rtx_REG (DImode, 7); ++ rtx tmp_cnt = gen_rtx_REG (DImode, 8); ++ rtx op = gen_rtx_REG (DImode, 17); ++ ++ unsigned long clt1, clt2, clt3; ++ unsigned long cnt1, cnt2, cnt3; ++ clt1 = flag_hardware_prefetch_clt % 2; ++ clt2 = (flag_hardware_prefetch_clt >> 1) % 2; ++ clt3 = (flag_hardware_prefetch_clt >> 2) % 2; ++ cnt1 = flag_hardware_prefetch_cnt_l1; ++ cnt2 = flag_hardware_prefetch_cnt_l2; ++ cnt3 = flag_hardware_prefetch_cnt_l3; ++ sw_64_emit_set_const (op, DImode, 0x10, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, clt1, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x11, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, clt2, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x12, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, clt3, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x1, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, cnt1, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x4, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, cnt2, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x8, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, cnt3, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ emit_move_insn (gen_rtx_REG (DImode, 16), tmp16); ++ emit_move_insn (gen_rtx_REG (DImode, 17), tmp17); ++ emit_move_insn (gen_rtx_REG (DImode, 18), tmp18); ++ emit_move_insn (gen_rtx_REG (DImode, 19), tmp19); ++ emit_move_insn (gen_rtx_REG (DImode, 26), tmp26); ++ emit_insn ( ++ gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (256))); ++ } ++ if (strcmp ("exit", lang_hooks.decl_printable_name (cfun->decl, 1)) == 0 ++ && flag_sw_hardware_prefetch_exit) ++ { ++ emit_insn ( ++ gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-256))); ++ rtx mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (8)); ++ rtx tmp16 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (16)); ++ rtx tmp17 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (24)); ++ rtx tmp18 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (32)); ++ rtx tmp19 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (40)); ++ rtx tmp26 = gen_rtx_MEM (Pmode, mem_address); ++ ++ emit_move_insn (tmp16, gen_rtx_REG (DImode, 16)); ++ emit_move_insn (tmp17, gen_rtx_REG (DImode, 17)); ++ emit_move_insn (tmp18, gen_rtx_REG (DImode, 18)); ++ emit_move_insn (tmp19, gen_rtx_REG (DImode, 19)); ++ emit_move_insn (tmp26, gen_rtx_REG (DImode, 26)); ++ ++ rtx tmp_clt = gen_rtx_REG (DImode, 7); ++ rtx tmp_cnt = gen_rtx_REG (DImode, 8); ++ rtx op = gen_rtx_REG (DImode, 17); ++ ++ unsigned long clt1, clt2, clt3; ++ unsigned long cnt1, cnt2, cnt3; ++ clt1 = 1; ++ clt2 = 0; ++ clt3 = 1; ++ cnt1 = 0; ++ cnt2 = 0; ++ cnt3 = 5; ++ sw_64_emit_set_const (op, DImode, 0x10, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, clt1, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x11, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, clt2, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x12, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, clt3, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x1, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, cnt1, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x4, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, cnt2, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x8, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, cnt3, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ emit_move_insn (gen_rtx_REG (DImode, 16), tmp16); ++ emit_move_insn (gen_rtx_REG (DImode, 17), tmp17); ++ emit_move_insn (gen_rtx_REG (DImode, 18), tmp18); ++ emit_move_insn (gen_rtx_REG (DImode, 19), tmp19); ++ emit_move_insn (gen_rtx_REG (DImode, 26), tmp26); ++ emit_insn ( ++ gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (256))); ++ } ++ ++ /* Adjust the stack by the frame size. If the frame size is > 4096 ++ bytes, we need to be sure we probe somewhere in the first and last ++ 4096 bytes (we can probably get away without the latter test) and ++ every 8192 bytes in between. If the frame size is > 32768, we ++ do this in a loop. Otherwise, we generate the explicit probe ++ instructions. ++ ++ Note that we are only allowed to adjust sp once in the prologue. */ ++ ++ probed_size = frame_size; ++ if (flag_stack_check || flag_stack_clash_protection) ++ probed_size += get_stack_check_protect (); ++ ++ if (probed_size <= 32768) ++ { ++ if (probed_size > 4096) ++ { ++ int probed; ++ ++ for (probed = 4096; probed < probed_size; probed += 8192) ++ emit_insn (gen_stack_probe_internal (GEN_INT (-probed))); ++ ++ /* We only have to do this probe if we aren't saving registers or ++ if we are probing beyond the frame because of -fstack-check. */ ++ if ((sa_size == 0 && probed_size > probed - 4096) || flag_stack_check ++ || flag_stack_clash_protection) ++ emit_insn (gen_stack_probe_internal (GEN_INT (-probed_size))); ++ } ++ ++ if (frame_size != 0) ++ FRP (emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, ++ GEN_INT (-frame_size)))); ++ } ++ else ++ { ++ /* Here we generate code to set R22 to SP + 4096 and set R23 to the ++ number of 8192 byte blocks to probe. We then probe each block ++ in the loop and then set SP to the proper location. If the ++ amount remaining is > 4096, we have to do one more probe if we ++ are not saving any registers or if we are probing beyond the ++ frame because of -fstack-check. */ ++ ++ HOST_WIDE_INT blocks = (probed_size + 4096) / 8192; ++ HOST_WIDE_INT leftover = probed_size + 4096 - blocks * 8192; ++ rtx ptr = gen_rtx_REG (DImode, 22); ++ rtx count = gen_rtx_REG (DImode, 23); ++ rtx seq; ++ ++ emit_move_insn (count, GEN_INT (blocks)); ++ emit_insn (gen_adddi3 (ptr, stack_pointer_rtx, GEN_INT (4096))); ++ ++ /* Because of the difficulty in emitting a new basic block this ++ late in the compilation, generate the loop as a single insn. */ ++ emit_insn (gen_prologue_stack_probe_loop (count, ptr)); ++ ++ if ((leftover > 4096 && sa_size == 0) || flag_stack_check ++ || flag_stack_clash_protection) ++ { ++ rtx last ++ = gen_rtx_MEM (DImode, plus_constant (Pmode, ptr, -leftover)); ++ MEM_VOLATILE_P (last) = 1; ++ emit_move_insn (last, const0_rtx); ++ } ++ ++ if (flag_stack_check || flag_stack_clash_protection) ++ { ++ /* If -fstack-check is specified we have to load the entire ++ constant into a register and subtract from the sp in one go, ++ because the probed stack size is not equal to the frame size. */ ++ HOST_WIDE_INT lo, hi; ++ lo = ((frame_size & 0xffff) ^ 0x8000) - 0x8000; ++ hi = frame_size - lo; ++ ++ emit_move_insn (ptr, GEN_INT (hi)); ++ emit_insn (gen_adddi3 (ptr, ptr, GEN_INT (lo))); ++ seq = emit_insn ( ++ gen_subdi3 (stack_pointer_rtx, stack_pointer_rtx, ptr)); ++ } ++ else ++ { ++ seq = emit_insn ( ++ gen_adddi3 (stack_pointer_rtx, ptr, GEN_INT (-leftover))); ++ } ++ ++ /* This alternative is special, because the DWARF code cannot ++ possibly intuit through the loop above. So we invent this ++ note it looks at instead. */ ++ RTX_FRAME_RELATED_P (seq) = 1; ++ add_reg_note (seq, REG_FRAME_RELATED_EXPR, ++ gen_rtx_SET (stack_pointer_rtx, ++ plus_constant (Pmode, stack_pointer_rtx, ++ -frame_size))); ++ } ++ ++ /* Cope with very large offsets to the register save area. */ ++ sa_bias = 0; ++ sa_reg = stack_pointer_rtx; ++ if (reg_offset + sa_size > 0x8000) ++ { ++ int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000; ++ rtx sa_bias_rtx; ++ ++ if (low + sa_size <= 0x8000) ++ sa_bias = reg_offset - low, reg_offset = low; ++ else ++ sa_bias = reg_offset, reg_offset = 0; ++ ++ sa_reg = gen_rtx_REG (DImode, 24); ++ sa_bias_rtx = GEN_INT (sa_bias); ++ ++ if (add_operand (sa_bias_rtx, DImode)) ++ emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_bias_rtx)); ++ else ++ { ++ emit_move_insn (sa_reg, sa_bias_rtx); ++ emit_insn (gen_adddi3 (sa_reg, stack_pointer_rtx, sa_reg)); ++ } ++ } ++ ++ /* Save register RA next, followed by any other registers ++ that need to be saved. */ ++ for (unsigned i = REG_RA; sa_mask != 0; i = ctz_hwi (sa_mask)) ++ { ++ /* if we need a frame pointer, set it from the stack pointer. */ ++ if (frame_pointer_needed && i != REG_RA && fp_flag == false) ++ { ++ emit_frame_store (HARD_FRAME_POINTER_REGNUM, sa_reg, sa_bias, ++ reg_offset); ++ sa_mask &= ~(HOST_WIDE_INT_1U << HARD_FRAME_POINTER_REGNUM); ++ reg_offset += 8; ++ fp_flag = true; ++ } ++ else ++ { ++ emit_frame_store (i, sa_reg, sa_bias, reg_offset); ++ reg_offset += 8; ++ sa_mask &= ~(HOST_WIDE_INT_1U << i); ++ } ++ } ++ ++ /* If we need a frame pointer, set it from the stack pointer. */ ++ if (frame_pointer_needed) ++ { ++ if (TARGET_CAN_FAULT_IN_PROLOGUE) ++ { ++ unsigned reg2 = 15; // FP ++ unsigned reg1 = 26; // R26 ++ long adj_size = SW_64_ROUND (crtl->outgoing_args_size); ++ if (adj_size > 0xFFFFFFFF) ++ { ++ int low = ((adj_size & 0xffff) ^ 0x8000) - 0x8000; ++ HOST_WIDE_INT bias; ++ ++ if (low <= 0x8000) ++ bias = adj_size - low, adj_size = low; ++ else ++ bias = adj_size, adj_size = 0; ++ ++ rtx fp_move; ++ rtx tmp_reg = gen_rtx_REG (DImode, 28); ++ rtx tmp = gen_int_mode (bias, DImode); ++ emit_move_insn (tmp_reg, tmp); ++ rtx adden = gen_movdi (tmp_reg, tmp); ++ ++ if (adj_size != 0) ++ fp_move = gen_adddi3 (hard_frame_pointer_rtx, ++ hard_frame_pointer_rtx, tmp_reg); ++ emit_insn (fp_move); ++ } ++ else if (adj_size > 0x8000) ++ { ++ int low = ((adj_size & 0xffff) ^ 0x8000) - 0x8000; ++ HOST_WIDE_INT bias; ++ ++ if (low <= 0x8000) ++ bias = adj_size - low, adj_size = low; ++ else ++ bias = adj_size, adj_size = 0; ++ ++ rtx fp_move; ++ rtx sa_reg_exp ++ = plus_constant (Pmode, stack_pointer_rtx, bias); ++ emit_move_insn (hard_frame_pointer_rtx, sa_reg_exp); ++ if (adj_size != 0) ++ fp_move ++ = gen_adddi3 (hard_frame_pointer_rtx, ++ hard_frame_pointer_rtx, GEN_INT (adj_size)); ++ ++ if ((void *) fp_move == NULL) ++ printf ("unable gen add3"); ++ emit_insn (fp_move); ++ } ++ else ++ { ++ rtx fp_move ++ = gen_add3_insn (hard_frame_pointer_rtx, stack_pointer_rtx, ++ GEN_INT ( ++ SW_64_ROUND (crtl->outgoing_args_size))); ++ FRP (emit_insn (fp_move)); ++ } ++ rtx_insn *insn = get_last_insn (); ++ if (!find_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX)) ++ { ++ rtx src ++ = plus_constant (Pmode, stack_pointer_rtx, ++ SW_64_ROUND (crtl->outgoing_args_size)); ++ add_reg_note (insn, REG_CFA_ADJUST_CFA, ++ gen_rtx_SET (hard_frame_pointer_rtx, src)); ++ } ++ ++ emit_insn ( ++ gen_stack_tie (stack_pointer_rtx, hard_frame_pointer_rtx)); ++ } ++ else ++ /* This must always be the last instruction in the ++ prologue, thus we emit a special move + clobber. */ ++ FRP (emit_insn ( ++ gen_init_fp (hard_frame_pointer_rtx, stack_pointer_rtx, sa_reg))); ++ } ++ ++ /* The ABIs for VMS and SYSV say that while we can schedule insns into ++ the prologue, for exception handling reasons, we cannot do this for ++ any insn that might fault. We could prevent this for mems with a ++ (clobber:BLK (scratch)), but this doesn't work for fp insns. So we ++ have to prevent all such scheduling with a blockage. ++ ++ Linux, on the other hand, never bothered to implement SYSV's ++ exception handling, and so doesn't care about such things. Anyone ++ planning to use dwarf2 frame-unwind info can also omit the blockage. */ ++ ++ if (!TARGET_CAN_FAULT_IN_PROLOGUE) ++ emit_insn (gen_blockage ()); ++} ++ ++/* Count the number of .file directives, so that .loc is up to date. */ ++int num_source_filenames = 0; ++ ++/* Output the textual info surrounding the prologue. */ ++ ++void ++sw_64_start_function (FILE *file, const char *fnname, ++ tree decl ATTRIBUTE_UNUSED) ++{ ++ unsigned long imask, fmask; ++ /* Complete stack size needed. */ ++ HOST_WIDE_INT frame_size = cfun->machine->frame.frame_size; ++ /* The maximum debuggable frame size. */ ++ const HOST_WIDE_INT max_frame_size = HOST_WIDE_INT_1 << 31; ++ /* Offset from base reg to register save area. */ ++ HOST_WIDE_INT reg_offset; ++ char *entry_label = (char *) alloca (strlen (fnname) + 6); ++ char *tramp_label = (char *) alloca (strlen (fnname) + 6); ++ int i; ++ ++ sw_64_fnname = fnname; ++ const char *main = "main"; ++ if (flag_fpcr_set == 4 && strcmp (fnname, main) == 0) ++ stfp3_flag = 1; ++ else ++ stfp3_flag = 0; ++ ++ reg_offset = SW_64_ROUND (crtl->outgoing_args_size); ++ ++ imask = cfun->machine->frame.sa_mask & 0xffffffffu; ++ fmask = cfun->machine->frame.sa_mask >> 32; ++ /* Issue function start and label. */ ++ if (!flag_inhibit_size_directive) ++ { ++ fputs ("\t.ent ", file); ++ assemble_name (file, fnname); ++ putc ('\n', file); ++ ++ /* If the function needs GP, we'll write the "..ng" label there. ++ Otherwise, do it here. */ ++ if (!sw_64_function_needs_gp && !cfun->is_thunk) ++ { ++ putc ('$', file); ++ assemble_name (file, fnname); ++ fputs ("..ng:\n", file); ++ } ++ } ++ /* Nested functions on VMS that are potentially called via trampoline ++ get a special transfer entry point that loads the called functions ++ procedure descriptor and static chain. */ ++ strcpy (entry_label, fnname); ++ ++ ASM_OUTPUT_LABEL (file, entry_label); ++ inside_function = TRUE; ++ ++ if (TARGET_IEEE_CONFORMANT && !flag_inhibit_size_directive) ++ { ++ /* Set flags in procedure descriptor to request IEEE-conformant ++ math-library routines. The value we set it to is PDSC_EXC_IEEE ++ (/usr/include/pdsc.h). */ ++ fputs ("\t.eflag 48\n", file); ++ } ++ ++ /* Set up offsets to sw_64 virtual arg/local debugging pointer. */ ++ sw_64_auto_offset = -frame_size + cfun->machine->frame.saved_varargs_size ++ + crtl->args.pretend_args_size; ++ sw_64_arg_offset = -frame_size + 48; ++ ++ /* Describe our frame. If the frame size is larger than an integer, ++ print it as zero to avoid an assembler error. We won't be ++ properly describing such a frame, but that's the best we can do. */ ++ if (!flag_inhibit_size_directive) ++ fprintf (file, "\t.frame $%d," HOST_WIDE_INT_PRINT_DEC ",$26,%d\n", ++ (frame_pointer_needed ? HARD_FRAME_POINTER_REGNUM ++ : STACK_POINTER_REGNUM), ++ frame_size >= max_frame_size ? 0 : frame_size, ++ crtl->args.pretend_args_size); ++ ++ /* Describe which registers were spilled. */ ++ if (!flag_inhibit_size_directive) ++ { ++ if (imask) ++ { ++ fprintf (file, "\t.mask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", imask, ++ frame_size >= max_frame_size ? 0 : reg_offset - frame_size); ++ ++ for (i = 0; i < 32; ++i) ++ if (imask & (1UL << i)) ++ reg_offset += 8; ++ } ++ ++ if (fmask) ++ fprintf (file, "\t.fmask 0x%lx," HOST_WIDE_INT_PRINT_DEC "\n", fmask, ++ frame_size >= max_frame_size ? 0 : reg_offset - frame_size); ++ } ++} ++ ++/* Emit the .prologue note at the scheduled end of the prologue. */ ++ ++static void ++sw_64_output_function_end_prologue (FILE *file) ++{ ++ if (!flag_inhibit_size_directive) ++ fprintf (file, "\t.prologue %d\n", ++ sw_64_function_needs_gp || cfun->is_thunk); ++} ++ ++/* Write function epilogue. */ ++ ++void ++sw_64_expand_epilogue (void) ++{ ++ /* Registers to save. */ ++ unsigned HOST_WIDE_INT sa_mask = cfun->machine->frame.sa_mask; ++ /* Stack space needed for pushing registers clobbered by us. */ ++ HOST_WIDE_INT sa_size = cfun->machine->frame.saved_regs_size; ++ /* Complete stack size needed. */ ++ HOST_WIDE_INT frame_size = cfun->machine->frame.frame_size; ++ /* Offset from base reg to register save area. */ ++ HOST_WIDE_INT reg_offset; ++ int fp_is_frame_pointer, fp_offset; ++ rtx sa_reg, sa_reg_exp = NULL; ++ rtx sp_adj1, sp_adj2, mem, reg, insn; ++ rtx eh_ofs; ++ rtx cfa_restores = NULL_RTX; ++ bool fp_flag = false; ++ ++#ifdef SW_64_ENABLE_FULL_ASAN ++ reg_offset = aligned_upper_bound (crtl->outgoing_args_size, ++ STACK_BOUNDARY / BITS_PER_UNIT); ++#else ++ reg_offset = SW_64_ROUND (crtl->outgoing_args_size); ++#endif ++ ++ if (strcmp ("main", lang_hooks.decl_printable_name (cfun->decl, 1)) == 0 ++ && flag_sw_hardware_prefetch) ++ { ++ emit_insn ( ++ gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (-256))); ++ rtx mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (8)); ++ rtx tmp16 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (16)); ++ rtx tmp17 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (24)); ++ rtx tmp18 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (32)); ++ rtx tmp19 = gen_rtx_MEM (Pmode, mem_address); ++ mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (40)); ++ rtx tmp26 = gen_rtx_MEM (Pmode, mem_address); ++ ++ emit_move_insn (tmp16, gen_rtx_REG (DImode, 16)); ++ emit_move_insn (tmp17, gen_rtx_REG (DImode, 17)); ++ emit_move_insn (tmp18, gen_rtx_REG (DImode, 18)); ++ emit_move_insn (tmp19, gen_rtx_REG (DImode, 19)); ++ emit_move_insn (tmp26, gen_rtx_REG (DImode, 26)); ++ ++ rtx tmp_clt = gen_rtx_REG (DImode, 7); ++ rtx tmp_cnt = gen_rtx_REG (DImode, 8); ++ rtx op = gen_rtx_REG (DImode, 17); ++ ++ unsigned long clt1, clt2, clt3; ++ unsigned long cnt1, cnt2, cnt3; ++ clt1 = 1; ++ clt2 = 0; ++ clt3 = 1; ++ cnt1 = 0; ++ cnt2 = 0; ++ cnt3 = 5; ++ sw_64_emit_set_const (op, DImode, 0x10, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, clt1, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x11, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, clt2, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x12, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, clt3, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x1, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, cnt1, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x4, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, cnt2, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ sw_64_emit_set_const (op, DImode, 0x8, 3, false); ++ sw_64_emit_set_const (tmp_clt, DImode, cnt3, 3, false); ++ emit_insn (gen_hardware_prefetch_use_syscall (tmp_clt, op)); ++ ++ emit_move_insn (gen_rtx_REG (DImode, 16), tmp16); ++ emit_move_insn (gen_rtx_REG (DImode, 17), tmp17); ++ emit_move_insn (gen_rtx_REG (DImode, 18), tmp18); ++ emit_move_insn (gen_rtx_REG (DImode, 19), tmp19); ++ emit_move_insn (gen_rtx_REG (DImode, 26), tmp26); ++ emit_insn ( ++ gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (256))); ++ } ++ ++ fp_is_frame_pointer = frame_pointer_needed; ++ fp_offset = 0; ++ sa_reg = stack_pointer_rtx; ++ ++ if (crtl->calls_eh_return) ++ eh_ofs = EH_RETURN_STACKADJ_RTX; ++ else ++ eh_ofs = NULL_RTX; ++ ++ if (sa_size) ++ { ++ /* If we have a frame pointer, restore SP from it. */ ++ if (frame_pointer_needed) ++ { ++ long adj_size = SW_64_ROUND (crtl->outgoing_args_size); ++ if (adj_size > 0xFFFFFFFF) ++ { ++ int low = ((adj_size & 0xffff) ^ 0x8000) - 0x8000; ++ HOST_WIDE_INT bias; ++ ++ if (low <= 0x8000) ++ bias = adj_size - low, adj_size = low; ++ else ++ bias = adj_size, adj_size = 0; ++ ++ rtx sa_reg = stack_pointer_rtx; ++ rtx tmp_reg = gen_rtx_REG (DImode, 28); ++ rtx tmp = gen_int_mode (bias, DImode); ++ emit_move_insn (tmp_reg, tmp); ++ rtx adden = gen_movdi (tmp_reg, tmp); ++ sa_reg_exp = gen_adddi3 (sa_reg, hard_frame_pointer_rtx, tmp_reg); ++ emit_insn (sa_reg_exp); ++ if (adj_size != 0) ++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, ++ GEN_INT (-adj_size))); ++ } ++ else if (adj_size > 0x8000) ++ { ++ int low = ((adj_size & 0xffff) ^ 0x8000) - 0x8000; ++ HOST_WIDE_INT bias; ++ ++ if (low <= 0x8000) ++ bias = adj_size - low, adj_size = low; ++ else ++ bias = adj_size, adj_size = 0; ++ ++ rtx sa_reg = stack_pointer_rtx; ++ rtx sa_reg_exp ++ = plus_constant (Pmode, hard_frame_pointer_rtx, -bias); ++ emit_move_insn (sa_reg, sa_reg_exp); ++ if (adj_size != 0) ++ emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx, ++ GEN_INT (-adj_size))); ++ } ++ else ++ { ++ emit_insn ( ++ gen_stack_tie (stack_pointer_rtx, hard_frame_pointer_rtx)); ++ rtx insn ++ = gen_add3_insn (stack_pointer_rtx, hard_frame_pointer_rtx, ++ GEN_INT ( ++ -SW_64_ROUND (crtl->outgoing_args_size))); ++ emit_insn (insn); ++ } ++ } ++ // emit_move_insn (stack_pointer_rtx, ++ //hard_frame_pointer_rtx); ++ ++ /* Cope with very large offsets to the register save area. */ ++ if (reg_offset + sa_size > 0x7FFFFFFFFFFFUL) ++ { ++ int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000; ++ HOST_WIDE_INT bias; ++ ++ if (low + sa_size <= 0x8000) ++ bias = reg_offset - low, reg_offset = low; ++ else ++ bias = reg_offset, reg_offset = 0; ++ ++ sa_reg = gen_rtx_REG (DImode, 22); ++ rtx tmp_reg = gen_rtx_REG (DImode, 28); ++ rtx tmp = gen_int_mode (bias, DImode); ++ emit_move_insn (tmp_reg, tmp); ++ rtx adden = gen_movdi (tmp_reg, tmp); ++ sa_reg_exp = gen_adddi3 (sa_reg, stack_pointer_rtx, tmp_reg); ++ ++ emit_insn (sa_reg_exp); ++ } ++ else if (reg_offset + sa_size > 0x8000) ++ { ++ int low = ((reg_offset & 0xffff) ^ 0x8000) - 0x8000; ++ HOST_WIDE_INT bias; ++ ++ if (low + sa_size <= 0x8000) ++ bias = reg_offset - low, reg_offset = low; ++ else ++ bias = reg_offset, reg_offset = 0; ++ ++ sa_reg = gen_rtx_REG (DImode, 22); ++ sa_reg_exp = plus_constant (Pmode, stack_pointer_rtx, bias); ++ ++ emit_move_insn (sa_reg, sa_reg_exp); ++ } ++ ++ /* Restore registers in order, excepting a true frame pointer. */ ++ for (unsigned i = REG_RA; sa_mask != 0; i = ctz_hwi (sa_mask)) ++ { ++ if (fp_is_frame_pointer && i != REG_RA && fp_flag == false) ++ { ++ emit_insn (gen_blockage ()); ++ mem = gen_frame_mem (DImode, ++ plus_constant (Pmode, sa_reg, reg_offset)); ++ emit_move_insn (hard_frame_pointer_rtx, mem); ++ cfa_restores ++ = alloc_reg_note (REG_CFA_RESTORE, hard_frame_pointer_rtx, ++ cfa_restores); ++ sa_mask &= ~(1UL << HARD_FRAME_POINTER_REGNUM); ++ reg_offset += 8; ++ fp_offset = reg_offset; ++ fp_flag = true; ++ } ++ else ++ { ++ mem = gen_frame_mem (DImode, ++ plus_constant (Pmode, sa_reg, reg_offset)); ++ reg = gen_rtx_REG (DImode, i); ++ emit_move_insn (reg, mem); ++ cfa_restores ++ = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores); ++ reg_offset += 8; ++ sa_mask &= ~(HOST_WIDE_INT_1U << i); ++ } ++ } ++ } ++ ++ if (frame_size || eh_ofs) ++ { ++ sp_adj1 = stack_pointer_rtx; ++ ++ if (eh_ofs) ++ { ++ sp_adj1 = gen_rtx_REG (DImode, 23); ++ emit_move_insn (sp_adj1, ++ gen_rtx_PLUS (Pmode, stack_pointer_rtx, eh_ofs)); ++ } ++ ++ /* If the stack size is large, begin computation into a temporary ++ register so as not to interfere with a potential fp restore, ++ which must be consecutive with an SP restore. */ ++ if (frame_size < 32768 && !cfun->calls_alloca) ++ sp_adj2 = GEN_INT (frame_size); ++ else if (frame_size < 0x40007fffL) ++ { ++ int low = ((frame_size & 0xffff) ^ 0x8000) - 0x8000; ++ ++ sp_adj2 = plus_constant (Pmode, sp_adj1, frame_size - low); ++ if (sa_reg_exp && rtx_equal_p (sa_reg_exp, sp_adj2)) ++ sp_adj1 = sa_reg; ++ else ++ { ++ sp_adj1 = gen_rtx_REG (DImode, 23); ++ emit_move_insn (sp_adj1, sp_adj2); ++ } ++ sp_adj2 = GEN_INT (low); ++ } ++ else ++ { ++ rtx tmp = gen_rtx_REG (DImode, 23); ++ sp_adj2 = sw_64_emit_set_const (tmp, DImode, frame_size, 3, false); ++ if (!sp_adj2) ++ { ++ /* We can't drop new things to memory this late, afaik, ++ so build it up by pieces. */ ++ sp_adj2 = sw_64_emit_set_long_const (tmp, frame_size); ++ gcc_assert (sp_adj2); ++ } ++ } ++ ++ /* Restore the stack pointer. */ ++ emit_insn (gen_blockage ()); ++ if (sp_adj2 == const0_rtx) ++ insn = emit_move_insn (stack_pointer_rtx, sp_adj1); ++ else ++ insn = emit_move_insn (stack_pointer_rtx, ++ gen_rtx_PLUS (DImode, sp_adj1, sp_adj2)); ++ REG_NOTES (insn) = cfa_restores; ++ add_reg_note (insn, REG_CFA_DEF_CFA, stack_pointer_rtx); ++ RTX_FRAME_RELATED_P (insn) = 1; ++ } ++ else ++ { ++ gcc_assert (cfa_restores == NULL); ++ } ++ if (strcmp ("main", lang_hooks.decl_printable_name (cfun->decl, 1)) == 0 ++ && (TARGET_SW_32ALIGN || TARGET_SW_SIMD)) ++ { ++ rtx mem_address = gen_rtx_PLUS (Pmode, stack_pointer_rtx, GEN_INT (0)); ++ rtx tmp7 = gen_rtx_MEM (Pmode, mem_address); ++ emit_move_insn (gen_rtx_REG (DImode, 7), tmp7); ++ rtx const16 = gen_rtx_REG (DImode, 7); ++ emit_insn ( ++ gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, GEN_INT (32))); ++ emit_insn (gen_adddi3 (stack_pointer_rtx, stack_pointer_rtx, const16)); ++ } ++} ++ ++/* Output the rest of the textual info surrounding the epilogue. */ ++ ++void ++sw_64_end_function (FILE *file, const char *fnname, tree decl ATTRIBUTE_UNUSED) ++{ ++ rtx_insn *insn; ++ ++ /* We output a nop after noreturn calls at the very end of the function to ++ ensure that the return address always remains in the caller's code range, ++ as not doing so might confuse unwinding engines. */ ++ insn = get_last_insn (); ++ if (!INSN_P (insn)) ++ insn = prev_active_insn (insn); ++ if (insn && CALL_P (insn)) ++ output_asm_insn (get_insn_template (CODE_FOR_nop, NULL), NULL); ++ ++ /* End the function. */ ++ if (!flag_inhibit_size_directive) ++ { ++ fputs ("\t.end ", file); ++ assemble_name (file, fnname); ++ putc ('\n', file); ++ } ++ inside_function = FALSE; ++} ++ ++/* Emit a tail call to FUNCTION after adjusting THIS by DELTA. ++ ++ In order to avoid the hordes of differences between generated code ++ with and without TARGET_EXPLICIT_RELOCS, and to avoid duplicating ++ lots of code loading up large constants, generate rtl and emit it ++ instead of going straight to text. ++ ++ Not sure why this idea hasn't been explored before... */ ++ ++static void ++sw_64_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED, ++ HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset, ++ tree function) ++{ ++ const char *fnname = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (thunk_fndecl)); ++ HOST_WIDE_INT hi, lo; ++ rtx this_rtx, funexp; ++ rtx_insn *insn; ++ ++ /* We always require a valid GP. */ ++ emit_insn (gen_prologue_ldgp ()); ++ emit_note (NOTE_INSN_PROLOGUE_END); ++ ++ /* Find the "this" pointer. If the function returns a structure, ++ the structure return pointer is in $16. */ ++ if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function)) ++ this_rtx = gen_rtx_REG (Pmode, 17); ++ else ++ this_rtx = gen_rtx_REG (Pmode, 16); ++ ++ /* Add DELTA. When possible we use ldih+ldi. Otherwise load the ++ entire constant for the add. */ ++ lo = ((delta & 0xffff) ^ 0x8000) - 0x8000; ++ hi = (((delta - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000; ++ if (hi + lo == delta) ++ { ++ if (hi) ++ emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (hi))); ++ if (lo) ++ emit_insn (gen_adddi3 (this_rtx, this_rtx, GEN_INT (lo))); ++ } ++ else ++ { ++ rtx tmp = sw_64_emit_set_long_const (gen_rtx_REG (Pmode, 0), delta); ++ emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp)); ++ } ++ ++ /* Add a delta stored in the vtable at VCALL_OFFSET. */ ++ if (vcall_offset) ++ { ++ rtx tmp, tmp2; ++ ++ tmp = gen_rtx_REG (Pmode, 0); ++ emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx)); ++ ++ lo = ((vcall_offset & 0xffff) ^ 0x8000) - 0x8000; ++ hi = (((vcall_offset - lo) & 0xffffffff) ^ 0x80000000) - 0x80000000; ++ if (hi + lo == vcall_offset) ++ { ++ if (hi) ++ emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (hi))); ++ } ++ else ++ { ++ tmp2 ++ = sw_64_emit_set_long_const (gen_rtx_REG (Pmode, 1), vcall_offset); ++ emit_insn (gen_adddi3 (tmp, tmp, tmp2)); ++ lo = 0; ++ } ++ if (lo) ++ tmp2 = gen_rtx_PLUS (Pmode, tmp, GEN_INT (lo)); ++ else ++ tmp2 = tmp; ++ emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp2)); ++ ++ emit_insn (gen_adddi3 (this_rtx, this_rtx, tmp)); ++ } ++ ++ /* Generate a tail call to the target function. */ ++ if (!TREE_USED (function)) ++ { ++ assemble_external (function); ++ TREE_USED (function) = 1; ++ } ++ funexp = XEXP (DECL_RTL (function), 0); ++ funexp = gen_rtx_MEM (FUNCTION_MODE, funexp); ++ insn = emit_call_insn (gen_sibcall (funexp, const0_rtx)); ++ SIBLING_CALL_P (insn) = 1; ++ ++ /* Run just enough of rest_of_compilation to get the insns emitted. ++ There's not really enough bulk here to make other passes such as ++ instruction scheduling worth while. */ ++ insn = get_insns (); ++ shorten_branches (insn); ++ assemble_start_function (thunk_fndecl, fnname); ++ final_start_function (insn, file, 1); ++ final (insn, file, 1); ++ final_end_function (); ++ assemble_end_function (thunk_fndecl, fnname); ++} ++ ++/* Name of the file containing the current function. */ ++ ++static const char *current_function_file = ""; ++ ++/* Offsets to sw_64 virtual arg/local debugging pointers. */ ++ ++long sw_64_arg_offset; ++long sw_64_auto_offset; ++ ++/* Emit a new filename to a stream. */ ++ ++void ++sw_64_output_filename (FILE *stream, const char *name) ++{ ++ static int first_time = TRUE; ++ ++ if (first_time) ++ { ++ first_time = FALSE; ++ ++num_source_filenames; ++ current_function_file = name; ++ fprintf (stream, "\t.file\t "); ++ output_quoted_string (stream, name); ++ fprintf (stream, "\n"); ++ } ++ ++ else if (name != current_function_file ++ && strcmp (name, current_function_file) != 0) ++ { ++ ++num_source_filenames; ++ current_function_file = name; ++ fprintf (stream, "\t.file\t "); ++ ++ output_quoted_string (stream, name); ++ fprintf (stream, "\n"); ++ } ++} ++ ++/* Structure to show the current status of registers and memory. */ ++ ++struct shadow_summary ++{ ++ struct ++ { ++ unsigned int i : 31; /* Mask of int regs */ ++ unsigned int fp : 31; /* Mask of fp regs */ ++ unsigned int mem : 1; /* mem == imem | fpmem */ ++ } used, defd; ++}; ++ ++/* Summary the effects of expression X on the machine. Update SUM, a pointer ++ to the summary structure. SET is nonzero if the insn is setting the ++ object, otherwise zero. */ ++ ++static void ++summarize_insn (rtx x, struct shadow_summary *sum, int set) ++{ ++ const char *format_ptr; ++ int i, j; ++ ++ if (x == 0) ++ return; ++ ++ switch (GET_CODE (x)) ++ { ++ /* ??? Note that this case would be incorrect if the Sw_64 had a ++ ZERO_EXTRACT in SET_DEST. */ ++ case SET: ++ summarize_insn (SET_SRC (x), sum, 0); ++ summarize_insn (SET_DEST (x), sum, 1); ++ break; ++ ++ case CLOBBER: ++ summarize_insn (XEXP (x, 0), sum, 1); ++ break; ++ ++ case USE: ++ summarize_insn (XEXP (x, 0), sum, 0); ++ break; ++ ++ case ASM_OPERANDS: ++ for (i = ASM_OPERANDS_INPUT_LENGTH (x) - 1; i >= 0; i--) ++ summarize_insn (ASM_OPERANDS_INPUT (x, i), sum, 0); ++ break; ++ ++ case PARALLEL: ++ for (i = XVECLEN (x, 0) - 1; i >= 0; i--) ++ summarize_insn (XVECEXP (x, 0, i), sum, 0); ++ break; ++ ++ case SUBREG: ++ summarize_insn (SUBREG_REG (x), sum, 0); ++ break; ++ ++ case REG: { ++ int regno = REGNO (x); ++ unsigned long mask = ((unsigned long) 1) << (regno % 32); ++ ++ if (regno == 31 || regno == 63) ++ break; ++ ++ if (set) ++ { ++ if (regno < 32) ++ sum->defd.i |= mask; ++ else ++ sum->defd.fp |= mask; ++ } ++ else ++ { ++ if (regno < 32) ++ sum->used.i |= mask; ++ else ++ sum->used.fp |= mask; ++ } ++ } ++ break; ++ ++ case MEM: ++ if (set) ++ sum->defd.mem = 1; ++ else ++ sum->used.mem = 1; ++ ++ /* Find the regs used in memory address computation: */ ++ summarize_insn (XEXP (x, 0), sum, 0); ++ break; ++ ++ case CONST_INT: ++ case CONST_WIDE_INT: ++ case CONST_DOUBLE: ++ case SYMBOL_REF: ++ case LABEL_REF: ++ case CONST: ++ case SCRATCH: ++ case ASM_INPUT: ++ break; ++ ++ /* Handle common unary and binary ops for efficiency. */ ++ case COMPARE: ++ case PLUS: ++ case MINUS: ++ case MULT: ++ case DIV: ++ case MOD: ++ case UDIV: ++ case UMOD: ++ case AND: ++ case IOR: ++ case XOR: ++ case ASHIFT: ++ case ROTATE: ++ case ASHIFTRT: ++ case LSHIFTRT: ++ case ROTATERT: ++ case SMIN: ++ case SMAX: ++ case UMIN: ++ case UMAX: ++ case NE: ++ case EQ: ++ case GE: ++ case GT: ++ case LE: ++ case LT: ++ case GEU: ++ case GTU: ++ case LEU: ++ case LTU: ++ summarize_insn (XEXP (x, 0), sum, 0); ++ summarize_insn (XEXP (x, 1), sum, 0); ++ break; ++ ++ case NEG: ++ case NOT: ++ case SIGN_EXTEND: ++ case ZERO_EXTEND: ++ case TRUNCATE: ++ case FLOAT_EXTEND: ++ case FLOAT_TRUNCATE: ++ case FLOAT: ++ case FIX: ++ case UNSIGNED_FLOAT: ++ case UNSIGNED_FIX: ++ case ABS: ++ case SQRT: ++ case FFS: ++ summarize_insn (XEXP (x, 0), sum, 0); ++ break; ++ ++ default: ++ format_ptr = GET_RTX_FORMAT (GET_CODE (x)); ++ for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--) ++ switch (format_ptr[i]) ++ { ++ case 'e': ++ summarize_insn (XEXP (x, i), sum, 0); ++ break; ++ ++ case 'E': ++ for (j = XVECLEN (x, i) - 1; j >= 0; j--) ++ summarize_insn (XVECEXP (x, i, j), sum, 0); ++ break; ++ ++ case 'i': ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++} ++ ++/* Ensure a sufficient number of `memb' insns are in the code when ++ the user requests code with a trap precision of functions or ++ instructions. ++ ++ In naive mode, when the user requests a trap-precision of ++ "instruction", a memb is needed after every instruction that may ++ generate a trap. This ensures that the code is resumption safe but ++ it is also slow. ++ ++ When optimizations are turned on, we delay issuing a memb as long ++ as possible. In this context, a trap shadow is the sequence of ++ instructions that starts with a (potentially) trap generating ++ instruction and extends to the next memb. We can delay (and ++ therefore sometimes omit) a memb subject to the following ++conditions: ++ ++(a) On entry to the trap shadow, if any Sw_64 register or memory ++location contains a value that is used as an operand value by some ++instruction in the trap shadow (live on entry), then no instruction ++in the trap shadow may modify the register or memory location. ++ ++(b) Within the trap shadow, the computation of the base register ++for a memory load or store instruction may not involve using the ++result of an instruction that might generate an UNPREDICTABLE ++result. ++ ++(c) Within the trap shadow, no register may be used more than once ++as a destination register. (This is to make life easier for the ++trap-handler.) ++ ++(d) The trap shadow may not include any branch instructions. */ ++ ++static void ++sw_64_handle_trap_shadows (void) ++{ ++ struct shadow_summary shadow; ++ int trap_pending, exception_nesting; ++ rtx_insn *i, *n; ++ ++ trap_pending = 0; ++ exception_nesting = 0; ++ shadow.used.i = 0; ++ shadow.used.fp = 0; ++ shadow.used.mem = 0; ++ shadow.defd = shadow.used; ++ ++ for (i = get_insns (); i; i = NEXT_INSN (i)) ++ { ++ if (NOTE_P (i)) ++ { ++ switch (NOTE_KIND (i)) ++ { ++ case NOTE_INSN_EH_REGION_BEG: ++ exception_nesting++; ++ if (trap_pending) ++ goto close_shadow; ++ break; ++ ++ case NOTE_INSN_EH_REGION_END: ++ exception_nesting--; ++ if (trap_pending) ++ goto close_shadow; ++ break; ++ ++ case NOTE_INSN_EPILOGUE_BEG: ++ if (trap_pending && sw_64_tp >= SW_64_TP_FUNC) ++ goto close_shadow; ++ break; ++ } ++ } ++ else if (trap_pending) ++ { ++ if (sw_64_tp == SW_64_TP_FUNC) ++ { ++ if (JUMP_P (i) && GET_CODE (PATTERN (i)) == RETURN) ++ goto close_shadow; ++ } ++ else if (sw_64_tp == SW_64_TP_INSN) ++ { ++ if (optimize > 0) ++ { ++ struct shadow_summary sum; ++ ++ sum.used.i = 0; ++ sum.used.fp = 0; ++ sum.used.mem = 0; ++ sum.defd = sum.used; ++ ++ switch (GET_CODE (i)) ++ { ++ case INSN: ++ /* Annoyingly, get_attr_trap will die on these. */ ++ if (GET_CODE (PATTERN (i)) == USE ++ || GET_CODE (PATTERN (i)) == CLOBBER) ++ break; ++ ++ summarize_insn (PATTERN (i), &sum, 0); ++ ++ if ((sum.defd.i & shadow.defd.i) ++ || (sum.defd.fp & shadow.defd.fp)) ++ { ++ /* (c) would be violated */ ++ goto close_shadow; ++ } ++ ++ /* Combine shadow with summary of current insn: */ ++ shadow.used.i |= sum.used.i; ++ shadow.used.fp |= sum.used.fp; ++ shadow.used.mem |= sum.used.mem; ++ shadow.defd.i |= sum.defd.i; ++ shadow.defd.fp |= sum.defd.fp; ++ shadow.defd.mem |= sum.defd.mem; ++ ++ if ((sum.defd.i & shadow.used.i) ++ || (sum.defd.fp & shadow.used.fp) ++ || (sum.defd.mem & shadow.used.mem)) ++ { ++ /* (a) would be violated (also takes care of (b)) */ ++ gcc_assert (get_attr_trap (i) != TRAP_YES ++ || (!(sum.defd.i & sum.used.i) ++ && !(sum.defd.fp & sum.used.fp))); ++ ++ goto close_shadow; ++ } ++ break; ++ ++ case BARRIER: ++ /* __builtin_unreachable can expand to no code at all, ++ leaving (barrier) RTXes in the instruction stream. */ ++ goto close_shadow_notrapb; ++ ++ case JUMP_INSN: ++ case CALL_INSN: ++ case CODE_LABEL: ++ goto close_shadow; ++ ++ case DEBUG_INSN: ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ } ++ else ++ { ++ close_shadow: ++ n = emit_insn_before (gen_trapb (), i); ++ PUT_MODE (n, TImode); ++ PUT_MODE (i, TImode); ++ close_shadow_notrapb: ++ trap_pending = 0; ++ shadow.used.i = 0; ++ shadow.used.fp = 0; ++ shadow.used.mem = 0; ++ shadow.defd = shadow.used; ++ } ++ } ++ } ++ ++ if ((exception_nesting > 0 || sw_64_tp >= SW_64_TP_FUNC) ++ && NONJUMP_INSN_P (i) && GET_CODE (PATTERN (i)) != USE ++ && GET_CODE (PATTERN (i)) != CLOBBER && get_attr_trap (i) == TRAP_YES) ++ { ++ if (optimize && !trap_pending) ++ summarize_insn (PATTERN (i), &shadow, 0); ++ trap_pending = 1; ++ } ++ } ++} ++ ++/* Sw_64 can only issue instruction groups simultaneously if they are ++ suitably aligned. This is very processor-specific. */ ++ ++/* The instruction group alignment main loop. */ ++ ++static void ++sw_64_align_insns_1 (unsigned int max_align, ++ rtx_insn *(*next_group) (rtx_insn *, int *, int *), ++ rtx (*next_nop) (int *)) ++{ ++ /* ALIGN is the known alignment for the insn group. */ ++ unsigned int align; ++ /* OFS is the offset of the current insn in the insn group. */ ++ int ofs; ++ int prev_in_use, in_use, len, ldgp; ++ rtx_insn *i, *next; ++ ++ /* Let shorten branches care for assigning alignments to code labels. */ ++ shorten_branches (get_insns ()); ++ ++ unsigned int option_alignment = align_functions.levels[0].get_value (); ++ if (option_alignment < 4) ++ align = 4; ++ else if ((unsigned int) option_alignment < max_align) ++ align = option_alignment; ++ else ++ align = max_align; ++ ++ ofs = prev_in_use = 0; ++ i = get_insns (); ++ if (NOTE_P (i)) ++ i = next_nonnote_insn (i); ++ ++ ldgp = sw_64_function_needs_gp ? 8 : 0; ++ ++ while (i) ++ { ++ next = (*next_group) (i, &in_use, &len); ++ ++ /* When we see a label, resync alignment etc. */ ++ if (LABEL_P (i)) ++ { ++ unsigned int new_align ++ = label_to_alignment (i).levels[0].get_value (); ++ if (new_align >= align) ++ { ++ align = new_align < max_align ? new_align : max_align; ++ ofs = 0; ++ } ++ ++ else if (ofs & (new_align - 1)) ++ ofs = (ofs | (new_align - 1)) + 1; ++ gcc_assert (!len); ++ } ++ ++ /* Handle complex instructions special. */ ++ else if (in_use == 0) ++ { ++ /* Asms will have length < 0. This is a signal that we have ++ lost alignment knowledge. Assume, however, that the asm ++ will not mis-align instructions. */ ++ if (len < 0) ++ { ++ ofs = 0; ++ align = 4; ++ len = 0; ++ } ++ } ++ ++ /* If the known alignment is smaller than the recognized insn group, ++ realign the output. */ ++ else if ((int) align < len) ++ { ++ unsigned int new_log_align = len > 8 ? 4 : 3; ++ rtx_insn *prev, *where; ++ ++ where = prev = prev_nonnote_insn (i); ++ if (!where || !LABEL_P (where)) ++ where = i; ++ ++ /* Can't realign between a call and its gp reload. */ ++ if (!(TARGET_EXPLICIT_RELOCS && prev && CALL_P (prev))) ++ { ++ emit_insn_before (gen_realign (GEN_INT (new_log_align)), where); ++ align = 1 << new_log_align; ++ ofs = 0; ++ } ++ } ++ ++ /* We may not insert padding inside the initial ldgp sequence. */ ++ else if (ldgp > 0) ++ ldgp -= len; ++ ++ /* If the group won't fit in the same INT16 as the previous, ++ we need to add padding to keep the group together. Rather ++ than simply leaving the insn filling to the assembler, we ++ can make use of the knowledge of what sorts of instructions ++ were issued in the previous group to make sure that all of ++ the added nops are really free. */ ++ else if (ofs + len > (int) align) ++ { ++ int nop_count = (align - ofs) / 4; ++ rtx_insn *where; ++ ++ /* Insert nops before labels, branches, and calls to truly merge ++ the execution of the nops with the previous instruction group. */ ++ where = prev_nonnote_insn (i); ++ if (where) ++ { ++ if (LABEL_P (where)) ++ { ++ rtx_insn *where2 = prev_nonnote_insn (where); ++ if (where2 && JUMP_P (where2)) ++ where = where2; ++ } ++ else if (NONJUMP_INSN_P (where)) ++ where = i; ++ } ++ else ++ where = i; ++ ++ do ++ emit_insn_before ((*next_nop) (&prev_in_use), where); ++ while (--nop_count); ++ ofs = 0; ++ } ++ ++ ofs = (ofs + len) & (align - 1); ++ prev_in_use = in_use; ++ i = next; ++ } ++} ++ ++static void ++sw_64_align_insns (void) ++{ ++ gcc_unreachable (); ++} ++ ++/* Insert an unop between sibcall or noreturn function call and GP load. */ ++ ++static void ++sw_64_pad_function_end (void) ++{ ++ rtx_insn *insn, *next; ++ ++ for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) ++ { ++ if (!CALL_P (insn) ++ || !(SIBLING_CALL_P (insn) ++ || find_reg_note (insn, REG_NORETURN, NULL_RTX))) ++ continue; ++ ++ next = next_active_insn (insn); ++ if (next) ++ { ++ rtx pat = PATTERN (next); ++ ++ if (GET_CODE (pat) == SET ++ && GET_CODE (SET_SRC (pat)) == UNSPEC_VOLATILE ++ && XINT (SET_SRC (pat), 1) == UNSPECV_LDGP1) ++ emit_insn_after (gen_unop (), insn); ++ } ++ } ++} ++ ++/* Machine dependent reorg pass. */ ++ ++static void ++sw_64_reorg (void) ++{ ++ /* Workaround for a linker error that triggers when an exception ++ handler immediatelly follows a sibcall or a noreturn function. ++ ++ In the sibcall case: ++ ++ The instruction stream from an object file: ++ ++1d8: 00 00 fb 6b jmp (t12) ++1dc: 00 00 ba 27 ldih gp,0(ra) ++1e0: 00 00 bd 23 ldi gp,0(gp) ++1e4: 00 00 7d a7 ldl t12,0(gp) ++1e8: 00 40 5b 6b call ra,(t12),1ec <__funcZ+0x1ec> ++ ++was converted in the final link pass to: ++ ++12003aa88: 67 fa ff c3 br 120039428 <...> ++12003aa8c: 00 00 fe 2f unop ++12003aa90: 00 00 fe 2f unop ++12003aa94: 48 83 7d a7 ldl t12,-31928(gp) ++12003aa98: 00 40 5b 6b call ra,(t12),12003aa9c <__func+0x1ec> ++ ++And in the noreturn case: ++ ++The instruction stream from an object file: ++ ++54: 00 40 5b 6b call ra,(t12),58 <__func+0x58> ++58: 00 00 ba 27 ldih gp,0(ra) ++5c: 00 00 bd 23 ldi gp,0(gp) ++60: 00 00 7d a7 ldl t12,0(gp) ++64: 00 40 5b 6b call ra,(t12),68 <__func+0x68> ++ ++was converted in the final link pass to: ++ ++fdb24: a0 03 40 d3 bsr ra,fe9a8 <_called_func+0x8> ++fdb28: 00 00 fe 2f unop ++fdb2c: 00 00 fe 2f unop ++fdb30: 30 82 7d a7 ldl t12,-32208(gp) ++fdb34: 00 40 5b 6b call ra,(t12),fdb38 <__func+0x68> ++ ++GP load instructions were wrongly cleared by the linker relaxation ++pass. This workaround prevents removal of GP loads by inserting ++an unop instruction between a sibcall or noreturn function call and ++exception handler prologue. */ ++ ++ if (current_function_has_exception_handlers ()) ++ sw_64_pad_function_end (); ++} ++ ++static void ++sw_64_file_start (void) ++{ ++ default_file_start (); ++ ++ fputs ("\t.set noreorder\n", asm_out_file); ++ fputs ("\t.set volatile\n", asm_out_file); ++ fputs ("\t.set noat\n", asm_out_file); ++ if (TARGET_EXPLICIT_RELOCS) ++ fputs ("\t.set nomacro\n", asm_out_file); ++ if (TARGET_SUPPORT_ARCH | TARGET_BWX | TARGET_MAX | TARGET_FIX | TARGET_CIX ++ | TARGET_SW6A | TARGET_SW6B | TARGET_SW4D | TARGET_SW8A) ++ { ++ const char *arch; ++ ++ if (sw_64_cpu == PROCESSOR_SW6 || TARGET_FIX || TARGET_CIX || TARGET_SW8A) ++ { ++ if (TARGET_SW6A) ++ arch = "sw6a"; ++ else if (TARGET_SW6B) ++ arch = "sw6b"; ++ else if (TARGET_SW4D) ++ arch = "sw4d"; ++ else if (TARGET_SW8A) ++ arch = "sw8a"; ++ } ++ else if (TARGET_MAX) ++ arch = "pca56"; ++ else ++ arch = "sw6b"; ++ ++ fprintf (asm_out_file, "\t.arch %s\n", arch); ++ } ++} ++ ++/* Since we don't have a .dynbss section, we should not allow global ++ relocations in the .rodata section. */ ++ ++static int ++sw_64_elf_reloc_rw_mask (void) ++{ ++ return flag_pic ? 3 : 2; ++} ++ ++/* Return a section for X. The only special thing we do here is to ++ honor small data. */ ++ ++static section * ++sw_64_elf_select_rtx_section (machine_mode mode, rtx x, ++ unsigned HOST_WIDE_INT align) ++{ ++ if (TARGET_SMALL_DATA && GET_MODE_SIZE (mode) <= g_switch_value) ++ /* ??? Consider using mergeable sdata sections. */ ++ return sdata_section; ++ else ++ return default_elf_select_rtx_section (mode, x, align); ++} ++ ++static unsigned int ++sw_64_elf_section_type_flags (tree decl, const char *name, int reloc) ++{ ++ unsigned int flags = 0; ++ ++ if (strcmp (name, ".sdata") == 0 || startswith (name, ".sdata.") ++ || startswith (name, ".gnu.linkonce.s.") || strcmp (name, ".sbss") == 0 ++ || startswith (name, ".sbss.") || startswith (name, ".gnu.linkonce.sb.")) ++ flags = SECTION_SMALL; ++ ++ flags |= default_section_type_flags (decl, name, reloc); ++ return flags; ++} ++ ++/* Structure to collect function names for final output in link section. */ ++/* Note that items marked with GTY can't be ifdef'ed out. */ ++ ++enum reloc_kind ++{ ++ KIND_LINKAGE, ++ KIND_CODEADDR ++}; ++ ++struct GTY (()) sw_64_links ++{ ++ rtx func; ++ rtx linkage; ++ enum reloc_kind rkind; ++}; ++ ++rtx ++sw_64_use_linkage (rtx func ATTRIBUTE_UNUSED, bool lflag ATTRIBUTE_UNUSED, ++ bool rflag ATTRIBUTE_UNUSED) ++{ ++ return NULL_RTX; ++} ++ ++/* On the Sw_64, we use this to disable the floating-point registers ++ when they don't exist. */ ++ ++static void ++sw_64_conditional_register_usage (void) ++{ ++ int i; ++ if (!TARGET_FPREGS) ++ for (i = 32; i < 63; i++) ++ fixed_regs[i] = call_used_regs[i] = 1; ++} ++ ++/* Canonicalize a comparison from one we don't have to one we do have. */ ++ ++static void ++sw_64_canonicalize_comparison (int *code, rtx *op0, rtx *op1, ++ bool op0_preserve_value) ++{ ++ if (!op0_preserve_value ++ && (*code == GE || *code == GT || *code == GEU || *code == GTU) ++ && (REG_P (*op1) || *op1 == const0_rtx)) ++ { ++ std::swap (*op0, *op1); ++ *code = (int) swap_condition ((enum rtx_code) * code); ++ } ++ ++ if ((*code == LT || *code == LTU) && CONST_INT_P (*op1) ++ && INTVAL (*op1) == 256) ++ { ++ *code = *code == LT ? LE : LEU; ++ *op1 = GEN_INT (255); ++ } ++} ++ ++/* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV. */ ++ ++static void ++sw_64_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update) ++{ ++ const unsigned HOST_WIDE_INT SWCR_STATUS_MASK = (0x3fUL << 17); ++ ++ tree fenv_var, get_fpscr, set_fpscr, mask, ld_fenv, masked_fenv; ++ tree new_fenv_var, reload_fenv, restore_fnenv; ++ tree update_call, atomic_feraiseexcept, hold_fnclex; ++ ++ /* Generate the equivalent of : ++ unsigned long fenv_var; ++ fenv_var = __ieee_get_fp_control (); ++ ++ unsigned long masked_fenv; ++ masked_fenv = fenv_var & mask; ++ ++ __ieee_set_fp_control (masked_fenv); */ ++ ++ fenv_var = create_tmp_var_raw (long_unsigned_type_node); ++ get_fpscr ++ = build_fn_decl ("__ieee_get_fp_control", ++ build_function_type_list (long_unsigned_type_node, NULL)); ++ set_fpscr = build_fn_decl ("__ieee_set_fp_control", ++ build_function_type_list (void_type_node, NULL)); ++ mask = build_int_cst (long_unsigned_type_node, ~SWCR_STATUS_MASK); ++ ld_fenv = build4 (TARGET_EXPR, long_unsigned_type_node, fenv_var, ++ build_call_expr (get_fpscr, 0), NULL_TREE, NULL_TREE); ++ masked_fenv = build2 (BIT_AND_EXPR, long_unsigned_type_node, fenv_var, mask); ++ hold_fnclex = build_call_expr (set_fpscr, 1, masked_fenv); ++ *hold = build2 (COMPOUND_EXPR, void_type_node, ++ build2 (COMPOUND_EXPR, void_type_node, masked_fenv, ld_fenv), ++ hold_fnclex); ++ ++ /* Store the value of masked_fenv to clear the exceptions: ++ __ieee_set_fp_control (masked_fenv); */ ++ ++ *clear = build_call_expr (set_fpscr, 1, masked_fenv); ++ ++ /* Generate the equivalent of : ++ unsigned long new_fenv_var; ++ new_fenv_var = __ieee_get_fp_control (); ++ ++ __ieee_set_fp_control (fenv_var); ++ ++ __atomic_feraiseexcept (new_fenv_var); */ ++ ++ new_fenv_var = create_tmp_var_raw (long_unsigned_type_node); ++ reload_fenv = build4 (TARGET_EXPR, long_unsigned_type_node, new_fenv_var, ++ build_call_expr (get_fpscr, 0), NULL_TREE, NULL_TREE); ++ restore_fnenv = build_call_expr (set_fpscr, 1, fenv_var); ++ atomic_feraiseexcept = builtin_decl_implicit (BUILT_IN_ATOMIC_FERAISEEXCEPT); ++ update_call ++ = build_call_expr (atomic_feraiseexcept, 1, ++ fold_convert (integer_type_node, new_fenv_var)); ++ *update = build2 (COMPOUND_EXPR, void_type_node, ++ build2 (COMPOUND_EXPR, void_type_node, reload_fenv, ++ restore_fnenv), ++ update_call); ++} ++ ++/* Implement TARGET_HARD_REGNO_MODE_OK. On Sw_64, the integer registers ++ can hold any mode. The floating-point registers can hold 64-bit ++ integers as well, but not smaller values. */ ++ ++static bool ++sw_64_hard_regno_mode_ok (unsigned int regno, machine_mode mode) ++{ ++ if (IN_RANGE (regno, 32, 62)) ++ return (mode == SFmode || mode == DFmode || mode == DImode || mode == SCmode ++ || mode == DCmode); ++ return true; ++} ++ ++/* Implement TARGET_MODES_TIEABLE_P. This asymmetric test is true when ++ MODE1 could be put in an FP register but MODE2 could not. */ ++ ++static bool ++sw_64_modes_tieable_p (machine_mode mode1, machine_mode mode2) ++{ ++ return (sw_64_hard_regno_mode_ok (32, mode1) ++ ? sw_64_hard_regno_mode_ok (32, mode2) ++ : true); ++} ++ ++int ++enable_asan_check_stack () ++{ ++ return asan_sanitize_stack_p (); ++} ++ ++/* Implement TARGET_CAN_CHANGE_MODE_CLASS. */ ++ ++static bool ++sw_64_can_change_mode_class (machine_mode from, machine_mode to, ++ reg_class_t rclass) ++{ ++ return (GET_MODE_SIZE (from) == GET_MODE_SIZE (to) ++ || !reg_classes_intersect_p (FLOAT_REGS, rclass)); ++} ++ ++#ifdef ASM_OUTPUT_SIZE_DIRECTIVE ++ ++/* Emit either a label, .comm, or .lcomm directive. When using assembler ++ * macros, mark the symbol as written so that sw_64_asm_output_external ++ * won't emit an .extern for it. STREAM is the output file, NAME is the ++ * name of the symbol, INIT_STRING is the string that should be written ++ * before the symbol and FINAL_STRING is the string that should be ++ * written after it. FINAL_STRING is a printf format that consumes the ++ * remaining arguments. */ ++ ++void ++sw_64_declare_object (FILE *stream, const char *name, const char *init_string, ++ const char *final_string, ...) ++{ ++ va_list ap; ++ ++ fputs (init_string, stream); ++ assemble_name (stream, name); ++ va_start (ap, final_string); ++ vfprintf (stream, final_string, ap); ++ va_end (ap); ++ ++ if (!TARGET_EXPLICIT_RELOCS) ++ { ++ tree name_tree = get_identifier (name); ++ TREE_ASM_WRITTEN (name_tree) = 1; ++ } ++} ++ ++extern int size_directive_output; ++ ++/* Implement ASM_DECLARE_OBJECT_NAME. This is like most of the standard ELF ++ * definitions except that it uses sw_64_declare_object to emit the label. */ ++ ++void ++sw_64_declare_object_name (FILE *stream, const char *name, ++ tree decl ATTRIBUTE_UNUSED) ++{ ++#ifdef ASM_OUTPUT_TYPE_DIRECTIVE ++#ifdef USE_GNU_UNIQUE_OBJECT ++ /* As in elfos.h. */ ++ if (USE_GNU_UNIQUE_OBJECT && DECL_ONE_ONLY (decl) ++ && (!DECL_ARTIFICIAL (decl) || !TREE_READONLY (decl))) ++ ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "gnu_unique_object"); ++ else ++#endif ++ ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object"); ++#endif ++ ++ size_directive_output = 0; ++ if (!flag_inhibit_size_directive && DECL_SIZE (decl)) ++ { ++ HOST_WIDE_INT size; ++ ++ size_directive_output = 1; ++ size = int_size_in_bytes (TREE_TYPE (decl)); ++ ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size); ++ } ++ ++ sw_64_declare_object (stream, name, "", ":\n"); ++} ++#endif ++ ++bool ++sw_64_slow_unaligned_access (machine_mode mode, unsigned int align) ++{ ++ return STRICT_ALIGNMENT; ++} ++ ++static bool ++sw_64_macro_fusion_p () ++{ ++ return (flag_sw_branch_fusion == 1); ++} ++ ++static bool ++sw_64_macro_fusion_pair_p (rtx_insn *condgen, rtx_insn *condjmp) ++{ ++ rtx src, dest; ++ enum rtx_code ccode; ++ rtx compare_set = NULL_RTX, test_if, cond; ++ rtx alu_set = NULL_RTX, addr = NULL_RTX; ++ if (get_attr_type (condjmp) != TYPE_IBR) ++ return false; ++ if (get_attr_type (condgen) != TYPE_ICMP) ++ return false; ++ compare_set = single_set (condgen); ++ if (compare_set == NULL_RTX) ++ { ++ int i; ++ rtx pat = PATTERN (condgen); ++ for (i = 0; i < XVECLEN (pat, 0); i++) ++ if (GET_CODE (XVECEXP (pat, 0, i)) == SET) ++ { ++ rtx set_src = SET_SRC (XVECEXP (pat, 0, i)); ++ alu_set = XVECEXP (pat, 0, i); ++ } ++ } ++ if (compare_set == NULL_RTX) ++ return false; ++ src = SET_SRC (compare_set); ++ if (GET_CODE (src) == UNSPEC) ++ return false; ++ test_if = SET_SRC (pc_set (condjmp)); ++ cond = XEXP (test_if, 0); ++ ccode = GET_CODE (cond); ++ return true; ++} ++ ++/* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */ ++static unsigned HOST_WIDE_INT ++sw_64_asan_shadow_offset (void) ++{ ++ return (HOST_WIDE_INT_1 << 49); ++} ++ ++static void ++sw_64_sa_mask (unsigned long *imaskP, unsigned long *fmaskP) ++{ ++ unsigned long imask = 0; ++ unsigned long fmask = 0; ++ unsigned int i; ++ ++ /* When outputting a thunk, we don't have valid register life info, ++ * but assemble_start_function wants to output .frame and .mask ++ * directives. */ ++ if (cfun->is_thunk) ++ { ++ *imaskP = 0; ++ *fmaskP = 0; ++ return; ++ } ++ ++#ifdef SW_64_ENABLE_FULL_ASAN ++ if (frame_pointer_needed) ++ imask |= (1UL << HARD_FRAME_POINTER_REGNUM); ++#endif ++ ++ /* One for every register we have to save. */ ++ for (i = 0; i < FIRST_PSEUDO_REGISTER; i++) ++ if (!fixed_regs[i] && !call_used_regs[i] && df_regs_ever_live_p (i) ++ && i != REG_RA) ++ { ++ if (i < 32) ++ imask |= (1UL << i); ++ else ++ fmask |= (1UL << (i - 32)); ++ } ++ ++ /* We need to restore these for the handler. */ ++ if (crtl->calls_eh_return) ++ { ++ for (i = 0;; ++i) ++ { ++ unsigned regno = EH_RETURN_DATA_REGNO (i); ++ if (regno == INVALID_REGNUM) ++ break; ++ imask |= 1UL << regno; ++ } ++ } ++ ++ /* If any register spilled, then spill the return address also. */ ++ /* ??? This is required by the Digital stack unwind specification ++ and isn't needed if we're doing Dwarf2 unwinding. */ ++ if (imask || fmask || sw_64_ra_ever_killed ()) ++ imask |= (1UL << REG_RA); ++ ++ *imaskP = imask; ++ *fmaskP = fmask; ++} ++ ++int ++sw_64_sa_size (void) ++{ ++ unsigned long mask[2]; ++ int sa_size = 0; ++ int i, j; ++ ++ sw_64_sa_mask (&mask[0], &mask[1]); ++ ++ for (j = 0; j < 2; ++j) ++ for (i = 0; i < 32; ++i) ++ if ((mask[j] >> i) & 1) ++ sa_size++; ++ ++ /* Our size must be even (multiple of 16 bytes). */ ++ if (sa_size & 1) ++ sa_size++; ++ return sa_size * 8; ++} ++ ++/* Sw64 stack frames generated by this compiler look like: ++ ++ +-------------------------------+ ++ | | ++ | incoming stack arguments | ++ | | ++ +-------------------------------+ ++ | | <-- incoming stack pointer (aligned) ++ | callee-allocated save area | ++ | for register varargs | ++ | | ++ +-------------------------------+ ++ | | ++ +-------------------------------+ ++ | local variables | <-- frame_pointer_rtx ++ | | ++ +-------------------------------+ ++ | padding | ++ +-------------------------------+ ++ | callee-saved registers | frame.saved_regs_size ++ +-------------------------------+ ++ | FP' | ++ +-------------------------------+ ++ | RA' | ++ +-------------------------------+ <- hard_frame_pointer_rtx (aligned) ++ | padding | ++ +-------------------------------+ ++ | outgoing stack arguments | <-- arg_pointer ++ | | ++ +-------------------------------+ ++ | | <-- stack_pointer_rtx (aligned) ++ ++ The following registers are reserved during frame layout and should not be ++ used for any other purpose: ++ ++ TODO: add other register purpose ++ - r26(RA), r15(FP): Used by standard frame layout. ++ ++ These registers must be avoided in frame layout related code unless the ++ explicit intention is to interact with one of the features listed above. */ ++ ++static void ++sw_64_layout_frame (void) ++{ ++ poly_int64 offset = 0; ++ ++ cfun->machine->frame.emit_frame_pointer ++ = frame_pointer_needed || crtl->calls_eh_return; ++ ++ unsigned HOST_WIDE_INT sa_mask = 0; ++ int sa_size; ++ ++ /* When outputting a thunk, we don't have valid register life info, ++ but assemble_start_function wants to output .frame and .mask ++ directives. */ ++ if (!cfun->is_thunk) ++ { ++ /* One for every register we have to save. */ ++ for (unsigned i = 0; i < FIRST_PSEUDO_REGISTER; i++) ++ if (!call_used_or_fixed_reg_p (i) && df_regs_ever_live_p (i) ++ && i != REG_RA) ++ sa_mask |= HOST_WIDE_INT_1U << i; ++ ++ /* We need to restore these for the handler. */ ++ if (crtl->calls_eh_return) ++ { ++ for (unsigned i = 0;; ++i) ++ { ++ unsigned regno = EH_RETURN_DATA_REGNO (i); ++ if (regno == INVALID_REGNUM) ++ break; ++ sa_mask |= HOST_WIDE_INT_1U << regno; ++ } ++ } ++ /* If any register spilled, then spill the return address also. */ ++ /* ??? This is required by the Digital stack unwind specification ++ and isn't needed if we're doing Dwarf2 unwinding. */ ++ if (sa_mask || sw_64_ra_ever_killed ()) ++ sa_mask |= HOST_WIDE_INT_1U << REG_RA; ++ } ++ sa_size = popcount_hwi (sa_mask); ++ poly_int64 frame_size = get_frame_size (); ++ ++ /* Our size must be even (multiple of 16 bytes). */ ++ if (sa_size & 1) ++ sa_size++; ++ sa_size *= 8; ++ ++ poly_int64 varargs_and_saved_regs_size ++ = sa_size + cfun->machine->frame.saved_varargs_size ++ + crtl->args.pretend_args_size; ++ ++ poly_int64 varargs_size ++ = cfun->machine->frame.saved_varargs_size + crtl->args.pretend_args_size; ++ ++ HOST_WIDE_INT extra_alignment ++ = SW_64_ROUND (frame_size + cfun->machine->frame.saved_varargs_size) ++ - cfun->machine->frame.saved_varargs_size; ++ ++ poly_int64 outgoing_args = SW_64_ROUND (crtl->outgoing_args_size); ++ ++ cfun->machine->frame.local_offset ++ = cfun->machine->frame.saved_varargs_size + crtl->args.pretend_args_size; ++ ++ poly_int64 total_size ++ = aligned_upper_bound (varargs_and_saved_regs_size + frame_size, ++ STACK_BOUNDARY / BITS_PER_UNIT) ++ + outgoing_args; ++ ++ cfun->machine->frame.hard_frame_pointer_offset ++ = aligned_upper_bound (varargs_and_saved_regs_size + frame_size, ++ STACK_BOUNDARY / BITS_PER_UNIT); ++ ++ // TODO: does sw_64 need this feild? ++ cfun->machine->frame.callee_offset ++ = cfun->machine->frame.hard_frame_pointer_offset; ++ cfun->machine->frame.arg_pointer_offset = total_size - varargs_size; ++ cfun->machine->frame.sa_mask = sa_mask; ++ cfun->machine->frame.saved_regs_size = sa_size; ++ cfun->machine->frame.frame_size = total_size; ++} ++ ++/* Define the offset between two registers, one to be eliminated, ++ and the other its replacement, at the start of a routine. */ ++ ++HOST_WIDE_INT ++sw_64_initial_elimination_offset (unsigned int from, ++ unsigned int to ATTRIBUTE_UNUSED) ++{ ++ HOST_WIDE_INT ret; ++#ifdef SW_64_ENABLE_FULL_ASAN ++ if (to == HARD_FRAME_POINTER_REGNUM) ++ { ++ if (from == ARG_POINTER_REGNUM) ++ { ++ // TODO: in sw64 variable arguments processing, all regs ++ // and pretending arguments offset a passive, so we have ++ // to minus varargs size. May be fix it is a better way? ++ return cfun->machine->frame.hard_frame_pointer_offset ++ - cfun->machine->frame.local_offset; ++ } ++ ++ if (from == FRAME_POINTER_REGNUM) ++ { ++ return cfun->machine->frame.hard_frame_pointer_offset ++ - cfun->machine->frame.local_offset; ++ } ++ } ++ ++ if (to == STACK_POINTER_REGNUM) ++ { ++ if (from == ARG_POINTER_REGNUM) ++ { ++ // TODO: same as HARD_FRAME_POINTER_REGNUM; ++ return cfun->machine->frame.arg_pointer_offset; ++ } ++ if (from == FRAME_POINTER_REGNUM) ++ { ++ return cfun->machine->frame.arg_pointer_offset; ++ } ++ } ++ ++ return cfun->machine->frame.frame_size; ++#else ++ ret = sw_64_sa_size (); ++ if (!frame_pointer_needed) ++ ret += SW_64_ROUND (crtl->outgoing_args_size); ++ ++ switch (from) ++ { ++ case FRAME_POINTER_REGNUM: ++ break; ++ ++ case ARG_POINTER_REGNUM: ++ ret += (SW_64_ROUND (get_frame_size () + crtl->args.pretend_args_size) ++ - crtl->args.pretend_args_size); ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ return ret; ++#endif ++} ++ ++/* Compute the frame size. SIZE is the size of the "naked" frame ++ * and SA_SIZE is the size of the register save area. */ ++ ++static HOST_WIDE_INT ++compute_frame_size (HOST_WIDE_INT size, HOST_WIDE_INT sa_size) ++{ ++#ifdef SW_64_ENABLE_FULL_ASAN ++ // sw_64_layout_frame (); ++ return cfun->machine->frame.frame_size; ++#else ++ return SW_64_ROUND (crtl->outgoing_args_size) + sa_size ++ + SW_64_ROUND (size + crtl->args.pretend_args_size); ++#endif ++} ++ ++/* Initialize the GCC target structure. */ ++#undef TARGET_IN_SMALL_DATA_P ++#define TARGET_IN_SMALL_DATA_P sw_64_in_small_data_p ++ ++#undef TARGET_ASM_ALIGNED_HI_OP ++#define TARGET_ASM_ALIGNED_HI_OP "\t.word\t" ++#undef TARGET_ASM_ALIGNED_DI_OP ++#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t" ++ ++/* Default unaligned ops are provided for ELF systems. To get unaligned ++ data for non-ELF systems, we have to turn off auto alignment. */ ++#undef TARGET_ASM_RELOC_RW_MASK ++#define TARGET_ASM_RELOC_RW_MASK sw_64_elf_reloc_rw_mask ++#undef TARGET_ASM_SELECT_RTX_SECTION ++#define TARGET_ASM_SELECT_RTX_SECTION sw_64_elf_select_rtx_section ++#undef TARGET_SECTION_TYPE_FLAGS ++#define TARGET_SECTION_TYPE_FLAGS sw_64_elf_section_type_flags ++ ++#undef TARGET_ASM_FUNCTION_END_PROLOGUE ++#define TARGET_ASM_FUNCTION_END_PROLOGUE sw_64_output_function_end_prologue ++ ++#undef TARGET_LEGITIMIZE_ADDRESS ++#define TARGET_LEGITIMIZE_ADDRESS sw_64_legitimize_address ++#undef TARGET_MODE_DEPENDENT_ADDRESS_P ++#define TARGET_MODE_DEPENDENT_ADDRESS_P sw_64_mode_dependent_address_p ++ ++#undef TARGET_ASM_FILE_START ++#define TARGET_ASM_FILE_START sw_64_file_start ++ ++#undef TARGET_SCHED_ADJUST_COST ++#define TARGET_SCHED_ADJUST_COST sw_64_adjust_cost ++#undef TARGET_SCHED_ISSUE_RATE ++#define TARGET_SCHED_ISSUE_RATE sw_64_issue_rate ++#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD ++#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \ ++ sw_64_multipass_dfa_lookahead ++ ++#undef TARGET_HAVE_TLS ++#define TARGET_HAVE_TLS HAVE_AS_TLS ++ ++#undef TARGET_BUILTIN_DECL ++#define TARGET_BUILTIN_DECL sw_64_builtin_decl ++#undef TARGET_INIT_BUILTINS ++#define TARGET_INIT_BUILTINS sw_64_init_builtins ++#undef TARGET_EXPAND_BUILTIN ++#define TARGET_EXPAND_BUILTIN sw_64_expand_builtin ++#undef TARGET_FOLD_BUILTIN ++#define TARGET_FOLD_BUILTIN sw_64_fold_builtin ++#undef TARGET_GIMPLE_FOLD_BUILTIN ++#define TARGET_GIMPLE_FOLD_BUILTIN sw_64_gimple_fold_builtin ++ ++#undef TARGET_FUNCTION_OK_FOR_SIBCALL ++#define TARGET_FUNCTION_OK_FOR_SIBCALL sw_64_function_ok_for_sibcall ++#undef TARGET_CANNOT_COPY_INSN_P ++#define TARGET_CANNOT_COPY_INSN_P sw_64_cannot_copy_insn_p ++#undef TARGET_LEGITIMATE_CONSTANT_P ++#define TARGET_LEGITIMATE_CONSTANT_P sw_64_legitimate_constant_p ++#undef TARGET_CANNOT_FORCE_CONST_MEM ++#define TARGET_CANNOT_FORCE_CONST_MEM sw_64_cannot_force_const_mem ++ ++#undef TARGET_ASM_OUTPUT_MI_THUNK ++#define TARGET_ASM_OUTPUT_MI_THUNK sw_64_output_mi_thunk_osf ++#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK ++#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \ ++ hook_bool_const_tree_hwi_hwi_const_tree_true ++#undef TARGET_STDARG_OPTIMIZE_HOOK ++#define TARGET_STDARG_OPTIMIZE_HOOK sw_64_stdarg_optimize_hook ++ ++#undef TARGET_PRINT_OPERAND ++#define TARGET_PRINT_OPERAND sw_64_print_operand ++#undef TARGET_PRINT_OPERAND_ADDRESS ++#define TARGET_PRINT_OPERAND_ADDRESS sw_64_print_operand_address ++#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P ++#define TARGET_PRINT_OPERAND_PUNCT_VALID_P sw_64_print_operand_punct_valid_p ++ ++/* Use 16-bits anchor. */ ++#undef TARGET_MIN_ANCHOR_OFFSET ++#define TARGET_MIN_ANCHOR_OFFSET -0x7fff - 1 ++#undef TARGET_MAX_ANCHOR_OFFSET ++#define TARGET_MAX_ANCHOR_OFFSET 0x7fff ++#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P ++#define TARGET_USE_BLOCKS_FOR_CONSTANT_P hook_bool_mode_const_rtx_true ++ ++#undef TARGET_REGISTER_MOVE_COST ++#define TARGET_REGISTER_MOVE_COST sw_64_register_move_cost ++#undef TARGET_MEMORY_MOVE_COST ++#define TARGET_MEMORY_MOVE_COST sw_64_memory_move_cost ++#undef TARGET_RTX_COSTS ++#define TARGET_RTX_COSTS sw_64_rtx_costs ++#undef TARGET_ADDRESS_COST ++#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0 ++ ++#undef TARGET_MACHINE_DEPENDENT_REORG ++#define TARGET_MACHINE_DEPENDENT_REORG sw_64_reorg ++ ++#undef TARGET_PROMOTE_FUNCTION_MODE ++#define TARGET_PROMOTE_FUNCTION_MODE \ ++ default_promote_function_mode_always_promote ++#undef TARGET_PROMOTE_PROTOTYPES ++#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_false ++ ++#undef TARGET_FUNCTION_VALUE ++#define TARGET_FUNCTION_VALUE sw_64_function_value ++#undef TARGET_LIBCALL_VALUE ++#define TARGET_LIBCALL_VALUE sw_64_libcall_value ++#undef TARGET_FUNCTION_VALUE_REGNO_P ++#define TARGET_FUNCTION_VALUE_REGNO_P sw_64_function_value_regno_p ++#undef TARGET_RETURN_IN_MEMORY ++#define TARGET_RETURN_IN_MEMORY sw_64_return_in_memory ++#undef TARGET_PASS_BY_REFERENCE ++#define TARGET_PASS_BY_REFERENCE sw_64_pass_by_reference ++#undef TARGET_SETUP_INCOMING_VARARGS ++#define TARGET_SETUP_INCOMING_VARARGS sw_64_setup_incoming_varargs ++#undef TARGET_STRICT_ARGUMENT_NAMING ++#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true ++#undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED ++#define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true ++#undef TARGET_SPLIT_COMPLEX_ARG ++#define TARGET_SPLIT_COMPLEX_ARG sw_64_split_complex_arg ++#undef TARGET_GIMPLIFY_VA_ARG_EXPR ++#define TARGET_GIMPLIFY_VA_ARG_EXPR sw_64_gimplify_va_arg ++#undef TARGET_ARG_PARTIAL_BYTES ++#define TARGET_ARG_PARTIAL_BYTES sw_64_arg_partial_bytes ++#undef TARGET_FUNCTION_ARG ++#define TARGET_FUNCTION_ARG sw_64_function_arg ++#undef TARGET_FUNCTION_ARG_ADVANCE ++#define TARGET_FUNCTION_ARG_ADVANCE sw_64_function_arg_advance ++#undef TARGET_TRAMPOLINE_INIT ++#define TARGET_TRAMPOLINE_INIT sw_64_trampoline_init ++ ++#undef TARGET_INSTANTIATE_DECLS ++#define TARGET_INSTANTIATE_DECLS sw_64_instantiate_decls ++ ++#undef TARGET_SECONDARY_RELOAD ++#define TARGET_SECONDARY_RELOAD sw_64_secondary_reload ++#undef TARGET_SECONDARY_MEMORY_NEEDED ++#define TARGET_SECONDARY_MEMORY_NEEDED sw_64_secondary_memory_needed ++#undef TARGET_SECONDARY_MEMORY_NEEDED_MODE ++#define TARGET_SECONDARY_MEMORY_NEEDED_MODE sw_64_secondary_memory_needed_mode ++ ++#undef TARGET_SCALAR_MODE_SUPPORTED_P ++#define TARGET_SCALAR_MODE_SUPPORTED_P sw_64_scalar_mode_supported_p ++#undef TARGET_VECTOR_MODE_SUPPORTED_P ++#define TARGET_VECTOR_MODE_SUPPORTED_P sw_64_vector_mode_supported_p ++ ++#undef TARGET_BUILD_BUILTIN_VA_LIST ++#define TARGET_BUILD_BUILTIN_VA_LIST sw_64_build_builtin_va_list ++ ++#undef TARGET_EXPAND_BUILTIN_VA_START ++#define TARGET_EXPAND_BUILTIN_VA_START sw_64_va_start ++ ++#undef TARGET_OPTION_OVERRIDE ++#define TARGET_OPTION_OVERRIDE sw_64_option_override ++ ++#undef TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE ++#define TARGET_OVERRIDE_OPTIONS_AFTER_CHANGE sw_64_override_options_after_change ++ ++#ifdef TARGET_ALTERNATE_LONG_DOUBLE_MANGLING ++#undef TARGET_MANGLE_TYPE ++#define TARGET_MANGLE_TYPE sw_64_mangle_type ++#endif ++ ++#undef TARGET_LRA_P ++#define TARGET_LRA_P hook_bool_void_false ++ ++#undef TARGET_LEGITIMATE_ADDRESS_P ++#define TARGET_LEGITIMATE_ADDRESS_P sw_64_legitimate_address_p ++ ++#undef TARGET_CONDITIONAL_REGISTER_USAGE ++#define TARGET_CONDITIONAL_REGISTER_USAGE sw_64_conditional_register_usage ++ ++#undef TARGET_CANONICALIZE_COMPARISON ++#define TARGET_CANONICALIZE_COMPARISON sw_64_canonicalize_comparison ++ ++#undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV ++#define TARGET_ATOMIC_ASSIGN_EXPAND_FENV sw_64_atomic_assign_expand_fenv ++ ++#undef TARGET_HARD_REGNO_MODE_OK ++#define TARGET_HARD_REGNO_MODE_OK sw_64_hard_regno_mode_ok ++ ++#undef TARGET_SLOW_UNALIGNED_ACCESS ++#define TARGET_SLOW_UNALIGNED_ACCESS sw_64_slow_unaligned_access ++ ++#undef TARGET_MODES_TIEABLE_P ++#define TARGET_MODES_TIEABLE_P sw_64_modes_tieable_p ++ ++#undef TARGET_CAN_CHANGE_MODE_CLASS ++#define TARGET_CAN_CHANGE_MODE_CLASS sw_64_can_change_mode_class ++ ++#undef TARGET_SCHED_MACRO_FUSION_P ++#define TARGET_SCHED_MACRO_FUSION_P sw_64_macro_fusion_p ++ ++#undef TARGET_SCHED_MACRO_FUSION_PAIR_P ++#define TARGET_SCHED_MACRO_FUSION_PAIR_P sw_64_macro_fusion_pair_p ++#undef TARGET_ASAN_SHADOW_OFFSET ++#define TARGET_ASAN_SHADOW_OFFSET sw_64_asan_shadow_offset ++ ++struct gcc_target targetm = TARGET_INITIALIZER; ++ ++#include "gt-sw-64.h" +diff --git a/gcc/config/sw_64/sw_64.h b/gcc/config/sw_64/sw_64.h +new file mode 100644 +index 000000000..fdf61ba04 +--- /dev/null ++++ b/gcc/config/sw_64/sw_64.h +@@ -0,0 +1,1001 @@ ++/* Definitions of target machine for GNU compiler, for Sw64. ++ Copyright (C) 1992-2022 Free Software Foundation, Inc. ++ Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Target CPU builtins. */ ++#define TARGET_CPU_CPP_BUILTINS() \ ++ do \ ++ { \ ++ builtin_define ("__sw_64"); \ ++ builtin_define ("__sw_64__"); \ ++ builtin_assert ("cpu=sw_64"); \ ++ builtin_assert ("machine=sw_64"); \ ++ if (TARGET_CIX) \ ++ { \ ++ builtin_define ("__sw_64_cix__"); \ ++ builtin_assert ("cpu=cix"); \ ++ } \ ++ if (TARGET_FIX) \ ++ { \ ++ builtin_define ("__sw_64_fix__"); \ ++ builtin_assert ("cpu=fix"); \ ++ } \ ++ if (TARGET_BWX) \ ++ { \ ++ builtin_define ("__sw_64_bwx__"); \ ++ builtin_assert ("cpu=bwx"); \ ++ } \ ++ if (TARGET_MAX) \ ++ { \ ++ builtin_define ("__sw_64_max__"); \ ++ builtin_assert ("cpu=max"); \ ++ } \ ++ if (sw_64_cpu_string) \ ++ { \ ++ if (strcmp (sw_64_cpu_string, "sw6a") == 0) \ ++ { \ ++ builtin_define ("__sw_64_sw6a__"); \ ++ builtin_assert ("cpu=sw6a"); \ ++ } \ ++ else if (strcmp (sw_64_cpu_string, "sw6b") == 0) \ ++ { \ ++ builtin_define ("__sw_64_sw6b__"); \ ++ builtin_assert ("cpu=sw6b"); \ ++ } \ ++ else if (strcmp (sw_64_cpu_string, "sw4d") == 0) \ ++ { \ ++ builtin_define ("__sw_64_sw4d__"); \ ++ builtin_assert ("cpu=sw4d"); \ ++ } \ ++ else if (strcmp (sw_64_cpu_string, "sw8a") == 0) \ ++ { \ ++ builtin_define ("__sw_64_sw8a__"); \ ++ builtin_assert ("cpu=sw8a"); \ ++ } \ ++ } \ ++ else /* Presumably sw6b. */ \ ++ { \ ++ builtin_define ("__sw_64_sw6b__"); \ ++ builtin_assert ("cpu=sw6b"); \ ++ } \ ++ if (TARGET_IEEE || TARGET_IEEE_WITH_INEXACT) \ ++ builtin_define ("_IEEE_FP"); \ ++ if (TARGET_IEEE_WITH_INEXACT) \ ++ builtin_define ("_IEEE_FP_INEXACT"); \ ++ if (TARGET_LONG_DOUBLE_128) \ ++ builtin_define ("__LONG_DOUBLE_128__"); \ ++ \ ++ /* Macros dependent on the C dialect. */ \ ++ SUBTARGET_LANGUAGE_CPP_BUILTINS (); \ ++ } while (0) ++ ++#ifndef SUBTARGET_LANGUAGE_CPP_BUILTINS ++#define SUBTARGET_LANGUAGE_CPP_BUILTINS() \ ++ do \ ++ { \ ++ if (preprocessing_asm_p ()) \ ++ builtin_define_std ("LANGUAGE_ASSEMBLY"); \ ++ else if (c_dialect_cxx ()) \ ++ { \ ++ builtin_define ("__LANGUAGE_C_PLUS_PLUS"); \ ++ builtin_define ("__LANGUAGE_C_PLUS_PLUS__"); \ ++ } \ ++ else \ ++ builtin_define_std ("LANGUAGE_C"); \ ++ if (c_dialect_objc ()) \ ++ { \ ++ builtin_define ("__LANGUAGE_OBJECTIVE_C"); \ ++ builtin_define ("__LANGUAGE_OBJECTIVE_C__"); \ ++ } \ ++ } while (0) ++#endif ++ ++/* Run-time compilation parameters selecting different hardware subsets. */ ++ ++/* Which processor to schedule for. The cpu attribute defines a list that ++ mirrors this list, so changes to sw_64.md must be made at the same time. */ ++ ++enum processor_type ++{ ++ PROCESSOR_SW6, /* SW6 */ ++ PROCESSOR_SW8, /* SW8 */ ++ PROCESSOR_MAX ++}; ++ ++extern enum processor_type sw_64_cpu; ++extern enum processor_type sw_64_tune; ++ ++enum sw_64_trap_precision ++{ ++ SW_64_TP_PROG, /* No precision (default). */ ++ SW_64_TP_FUNC, /* Trap contained within originating function. */ ++ SW_64_TP_INSN /* Instruction accuracy and code is resumption safe. */ ++}; ++ ++enum sw_64_fp_rounding_mode ++{ ++ SW_64_FPRM_NORM, /* Normal rounding mode. */ ++ SW_64_FPRM_MINF, /* Round towards minus-infinity. */ ++ SW_64_FPRM_CHOP, /* Chopped rounding mode (towards 0). */ ++ SW_64_FPRM_DYN /* Dynamic rounding mode. */ ++}; ++ ++enum sw_64_fp_trap_mode ++{ ++ SW_64_FPTM_N, /* Normal trap mode. */ ++ SW_64_FPTM_U, /* Underflow traps enabled. */ ++ SW_64_FPTM_SU, /* Software completion, w/underflow traps */ ++ SW_64_FPTM_SUI /* Software completion, w/underflow & inexact traps */ ++}; ++ ++extern enum sw_64_trap_precision sw_64_tp; ++extern enum sw_64_fp_rounding_mode sw_64_fprm; ++extern enum sw_64_fp_trap_mode sw_64_fptm; ++ ++/* Invert the easy way to make options work. */ ++#define TARGET_FP (!TARGET_SOFT_FP) ++ ++/* Macros to silence warnings about numbers being signed in traditional ++ * C and unsigned in ISO C when compiled on 32-bit hosts. */ ++ ++#define BITMASK_HIGH (((unsigned long) 1) << 31) /* 0x80000000 */ ++ ++/* These are for target os support and cannot be changed at runtime. */ ++#define TARGET_ABI_OSF 1 ++ ++#ifndef TARGET_CAN_FAULT_IN_PROLOGUE ++#define TARGET_CAN_FAULT_IN_PROLOGUE 0 ++#endif ++#ifndef TARGET_HAS_XFLOATING_LIBS ++#define TARGET_HAS_XFLOATING_LIBS TARGET_LONG_DOUBLE_128 ++#endif ++#ifndef TARGET_PROFILING_NEEDS_GP ++#define TARGET_PROFILING_NEEDS_GP 0 ++#endif ++#ifndef HAVE_AS_TLS ++#define HAVE_AS_TLS 0 ++#endif ++ ++#define TARGET_DEFAULT MASK_FPREGS ++ ++#ifndef TARGET_CPU_DEFAULT ++#define TARGET_CPU_DEFAULT 0 ++#endif ++ ++#ifndef TARGET_DEFAULT_EXPLICIT_RELOCS ++#ifdef HAVE_AS_EXPLICIT_RELOCS ++#define TARGET_DEFAULT_EXPLICIT_RELOCS MASK_EXPLICIT_RELOCS ++#define TARGET_SUPPORT_ARCH 1 ++#else ++#define TARGET_DEFAULT_EXPLICIT_RELOCS 0 ++#endif ++#endif ++ ++#ifndef TARGET_SUPPORT_ARCH ++#define TARGET_SUPPORT_ARCH 0 ++#endif ++ ++/* Support for a compile-time default CPU, et cetera. The rules are: ++ --with-cpu is ignored if -mcpu is specified. ++ --with-tune is ignored if -mtune is specified. */ ++#define OPTION_DEFAULT_SPECS \ ++ {"cpu", "%{!mcpu=*:-mcpu=%(VALUE)}"}, \ ++ { \ ++ "tune", "%{!mtune=*:-mtune=%(VALUE)}" \ ++ } ++ ++/* target machine storage layout */ ++ ++/* Define the size of `int'. The default is the same as the word size. */ ++#define INT_TYPE_SIZE 32 ++ ++/* Define the size of `long long'. The default is the twice the word size. */ ++#define LONG_LONG_TYPE_SIZE 64 ++ ++/* The two floating-point formats we support are S-floating, which is ++ 4 bytes, and T-floating, which is 8 bytes. `float' is S and `double' ++ and `long double' are T. */ ++ ++#define FLOAT_TYPE_SIZE 32 ++#define DOUBLE_TYPE_SIZE 64 ++#define LONG_DOUBLE_TYPE_SIZE (TARGET_LONG_DOUBLE_128 ? 128 : 64) ++ ++/* Work around target_flags dependency in ada/targtyps.c. */ ++#define WIDEST_HARDWARE_FP_SIZE 64 ++ ++#define WCHAR_TYPE "unsigned int" ++#define WCHAR_TYPE_SIZE 32 ++ ++/* Define this macro if it is advisable to hold scalars in registers ++ in a wider mode than that declared by the program. In such cases, ++ the value is constrained to be within the bounds of the declared ++ type, but kept valid in the wider mode. The signedness of the ++ extension may differ from that of the type. ++ ++ For Sw_64, we always store objects in a full register. 32-bit integers ++ are always sign-extended, but smaller objects retain their signedness. ++ ++ Note that small vector types can get mapped onto integer modes at the ++ whim of not appearing in sw_64-modes.def. We never promoted these ++ values before; don't do so now that we've trimmed the set of modes to ++ those actually implemented in the backend. */ ++ ++#define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \ ++ if (GET_MODE_CLASS (MODE) == MODE_INT \ ++ && (TYPE == NULL || TREE_CODE (TYPE) != VECTOR_TYPE) \ ++ && GET_MODE_SIZE (MODE) < UNITS_PER_WORD) \ ++ { \ ++ if ((MODE) == SImode) \ ++ (UNSIGNEDP) = 0; \ ++ (MODE) = DImode; \ ++ } ++ ++/* Define this if most significant bit is lowest numbered ++ in instructions that operate on numbered bit-fields. ++ ++ There are no such instructions on the Sw_64, but the documentation ++ is little endian. */ ++#define BITS_BIG_ENDIAN 0 ++ ++/* Define this if most significant byte of a word is the lowest numbered. ++ This is false on the Sw_64. */ ++#define BYTES_BIG_ENDIAN 0 ++ ++/* Define this if most significant word of a multiword number is lowest ++ numbered. ++ ++ For Sw_64 we can decide arbitrarily since there are no machine instructions ++ for them. Might as well be consistent with bytes. */ ++#define WORDS_BIG_ENDIAN 0 ++ ++/* Width of a word, in units (bytes). */ ++#define UNITS_PER_WORD 8 ++ ++/* Width in bits of a pointer. ++ See also the macro `Pmode' defined below. */ ++#define POINTER_SIZE 64 ++ ++/* Allocation boundary (in *bits*) for storing arguments in argument list. */ ++#define PARM_BOUNDARY 64 ++ ++/* Boundary (in *bits*) on which stack pointer should be aligned. */ ++#define STACK_BOUNDARY (TARGET_SW_32ALIGN ? 256 : 128) ++ ++/* Allocation boundary (in *bits*) for the code of a function. */ ++#define FUNCTION_BOUNDARY 32 ++ ++/* Alignment of field after `int : 0' in a structure. */ ++#define EMPTY_FIELD_BOUNDARY 64 ++ ++/* Every structure's size must be a multiple of this. */ ++#define STRUCTURE_SIZE_BOUNDARY 8 ++ ++/* A bit-field declared as `int' forces `int' alignment for the struct. */ ++#undef PCC_BITFILED_TYPE_MATTERS ++#define PCC_BITFIELD_TYPE_MATTERS 1 ++ ++/* No data type wants to be aligned rounder than this. */ ++#define BIGGEST_ALIGNMENT (TARGET_SW_32ALIGN ? 256 : 128) ++ ++/* For atomic access to objects, must have at least 32-bit alignment ++ unless the machine has byte operations. */ ++#define MINIMUM_ATOMIC_ALIGNMENT ((unsigned int) (TARGET_BWX ? 8 : 32)) ++ ++/* Align all constants and variables to at least a word boundary so ++ we can pick up pieces of them faster. */ ++/* ??? Only if block-move stuff knows about different source/destination ++ alignment. */ ++#if 0 ++#define DATA_ALIGNMENT(EXP, ALIGN) MAX ((ALIGN), BITS_PER_WORD) ++#endif ++ ++/* Set this nonzero if move instructions will actually fail to work ++ when given unaligned data. ++ ++ Since we get an error message when we do one, call them invalid. */ ++ ++#define STRICT_ALIGNMENT (flag_sw_unalign_byte != 1 || TARGET_SW8A == 0) ++ ++#define SW64_EXPAND_ALIGNMENT(COND, EXP, ALIGN) \ ++ (((COND) && ((ALIGN) < BITS_PER_WORD) \ ++ && (TREE_CODE (EXP) == ARRAY_TYPE || TREE_CODE (EXP) == UNION_TYPE \ ++ || TREE_CODE (EXP) == RECORD_TYPE)) \ ++ ? BITS_PER_WORD \ ++ : (ALIGN)) ++ ++/* Similarly, make sure that objects on the stack are sensibly aligned. */ ++#define LOCAL_ALIGNMENT(EXP, ALIGN) \ ++ SW64_EXPAND_ALIGNMENT (!flag_conserve_stack, EXP, ALIGN) ++ ++/* Standard register usage. */ ++ ++/* Number of actual hardware registers. ++ The hardware registers are assigned numbers for the compiler ++ from 0 to just below FIRST_PSEUDO_REGISTER. ++ All registers that the compiler knows about must be given numbers, ++ even those that are not normally considered general registers. ++ ++ We define all 32 integer registers, even though $31 is always zero, ++ and all 32 floating-point registers, even though $f31 is also ++ always zero. We do not bother defining the FP status register and ++ there are no other registers. ++ ++ Since $31 is always zero, we will use register number 31 as the ++ argument pointer. It will never appear in the generated code ++ because we will always be eliminating it in favor of the stack ++ pointer or hardware frame pointer. ++ ++ Likewise, we use $f31 for the frame pointer, which will always ++ be eliminated in favor of the hardware frame pointer or the ++ stack pointer. */ ++ ++#define FIRST_PSEUDO_REGISTER 64 ++ ++/* 1 for registers that have pervasive standard uses ++ and are not available for the register allocator. */ ++ ++#define FIXED_REGISTERS \ ++ { \ ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, \ ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 \ ++ } ++ ++/* 1 for registers not available across function calls. ++ These must include the FIXED_REGISTERS and also any ++ registers that can be used without being saved. ++ The latter must include the registers where values are returned ++ and the register where structure-value addresses are passed. ++ Aside from that, you can include as many other registers as you like. */ ++#define CALL_USED_REGISTERS \ ++ { \ ++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, \ ++ 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, \ ++ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 \ ++ } ++ ++/* List the order in which to allocate registers. Each register must be ++ listed once, even those in FIXED_REGISTERS. */ ++ ++#define REG_ALLOC_ORDER \ ++ { \ ++ 1, 2, 3, 4, 5, 6, 7, 8, /* nonsaved integer registers */ \ ++ 22, 23, 24, 25, 28, /* likewise */ \ ++ 0, /* likewise, but return value */ \ ++ 21, 20, 19, 18, 17, 16, /* likewise, but input args */ \ ++ 27, /* likewise, but SYSV procedure value */ \ ++ \ ++ 42, 43, 44, 45, 46, 47, /* nonsaved floating-point registers */ \ ++ 54, 55, 56, 57, 58, 59, /* likewise */ \ ++ 60, 61, 62, /* likewise */ \ ++ 32, 33, /* likewise, but return values */ \ ++ 53, 52, 51, 50, 49, 48, /* likewise, but input args */ \ ++ \ ++ 9, 10, 11, 12, 13, 14, /* saved integer registers */ \ ++ 26, /* return address */ \ ++ 15, /* hard frame pointer */ \ ++ \ ++ 34, 35, 36, 37, 38, 39, /* saved floating-point registers */ \ ++ 40, 41, /* likewise */ \ ++ \ ++ 29, 30, 31, 63 /* gp, sp, ap, sfp */ \ ++ } ++ ++/* Specify the registers used for certain standard purposes. ++ The values of these macros are register numbers. */ ++ ++/* Sw_64 pc isn't overloaded on a register that the compiler knows about. */ ++/* #define PC_REGNUM */ ++ ++/* Register to use for pushing function arguments. */ ++#define STACK_POINTER_REGNUM 30 ++ ++/* Base register for access to local variables of the function. */ ++#define HARD_FRAME_POINTER_REGNUM 15 ++ ++/* Base register for access to arguments of the function. */ ++#define ARG_POINTER_REGNUM 31 ++ ++/* Base register for access to local variables of function. */ ++#define FRAME_POINTER_REGNUM 63 ++ ++/* Register in which static-chain is passed to a function. ++ ++ For the Sw_64, this is based on an example; the calling sequence ++ doesn't seem to specify this. */ ++#define STATIC_CHAIN_REGNUM 1 ++ ++/* The register number of the register used to address a table of ++ static data addresses in memory. */ ++#define PIC_OFFSET_TABLE_REGNUM 29 ++ ++/* Define this macro if the register defined by `PIC_OFFSET_TABLE_REGNUM' ++ is clobbered by calls. */ ++/* ??? It is and it isn't. It's required to be valid for a given ++ function when the function returns. It isn't clobbered by ++ current_file functions. Moreover, we do not expose the ldgp ++ until after reload, so we're probably safe. */ ++/* #define PIC_OFFSET_TABLE_REG_CALL_CLOBBERED */ ++ ++/* Define the classes of registers for register constraints in the ++ machine description. Also define ranges of constants. ++ ++ One of the classes must always be named ALL_REGS and include all hard regs. ++ If there is more than one class, another class must be named NO_REGS ++ and contain no registers. ++ ++ The name GENERAL_REGS must be the name of a class (or an alias for ++ another name such as ALL_REGS). This is the class of registers ++ that is allowed by "g" or "r" in a register constraint. ++ Also, registers outside this class are allocated only when ++ instructions express preferences for them. ++ ++ The classes must be numbered in nondecreasing order; that is, ++ a larger-numbered class must never be contained completely ++ in a smaller-numbered class. ++ ++ For any two classes, it is very desirable that there be another ++ class that represents their union. */ ++ ++enum reg_class ++{ ++ NO_REGS, ++ R0_REG, ++ R24_REG, ++ R25_REG, ++ R27_REG, ++ GENERAL_REGS, ++ FLOAT_REGS, ++ ALL_REGS, ++ LIM_REG_CLASSES ++}; ++ ++#define N_REG_CLASSES (int) LIM_REG_CLASSES ++ ++/* Give names of register classes as strings for dump file. */ ++ ++#define REG_CLASS_NAMES \ ++ { \ ++ "NO_REGS", "R0_REG", "R24_REG", "R25_REG", "R27_REG", "GENERAL_REGS", \ ++ "FLOAT_REGS", "ALL_REGS" \ ++ } ++ ++/* Define which registers fit in which classes. ++ This is an initializer for a vector of HARD_REG_SET ++ of length N_REG_CLASSES. */ ++ ++#define REG_CLASS_CONTENTS \ ++ { \ ++ {0x00000000, 0x00000000}, /* NO_REGS */ \ ++ {0x00000001, 0x00000000}, /* R0_REG */ \ ++ {0x01000000, 0x00000000}, /* R24_REG */ \ ++ {0x02000000, 0x00000000}, /* R25_REG */ \ ++ {0x08000000, 0x00000000}, /* R27_REG */ \ ++ {0xffffffff, 0x80000000}, /* GENERAL_REGS */ \ ++ {0x00000000, 0x7fffffff}, /* FLOAT_REGS */ \ ++ { \ ++ 0xffffffff, 0xffffffff \ ++ } \ ++ } ++ ++/* The same information, inverted: ++ Return the class number of the smallest class containing ++ reg number REGNO. This could be a conditional expression ++ or could index an array. */ ++ ++#define REGNO_REG_CLASS(REGNO) \ ++ ((REGNO) == 0 ? R0_REG \ ++ : (REGNO) == 24 ? R24_REG \ ++ : (REGNO) == 25 ? R25_REG \ ++ : (REGNO) == 27 ? R27_REG \ ++ : IN_RANGE ((REGNO), 32, 62) ? FLOAT_REGS \ ++ : GENERAL_REGS) ++ ++/* The class value for index registers, and the one for base regs. */ ++#define INDEX_REG_CLASS NO_REGS ++#define BASE_REG_CLASS GENERAL_REGS ++ ++/* Given an rtx X being reloaded into a reg required to be ++ in class CLASS, return the class of reg to actually use. ++ In general this is just CLASS; but on some machines ++ in some cases it is preferable to use a more restrictive class. */ ++ ++#define PREFERRED_RELOAD_CLASS sw_64_preferred_reload_class ++ ++/* Provide the cost of a branch. Exact meaning under development. */ ++#define BRANCH_COST(speed_p, predictable_p) 5 ++ ++/* Stack layout; function entry, exit and calling. */ ++ ++/* Define this if pushing a word on the stack ++ makes the stack pointer a smaller address. */ ++#define STACK_GROWS_DOWNWARD 1 ++ ++/* Define this to nonzero if the nominal address of the stack frame ++ is at the high-address end of the local variables; ++ that is, each additional local variable allocated ++ goes at a more negative offset in the frame. */ ++//#define FRAME_GROWS_DOWNWARD SW_64_ENABLE_ASAN ++#define FRAME_GROWS_DOWNWARD 1 ++ ++/* If we generate an insn to push BYTES bytes, ++ this says how many the stack pointer really advances by. ++ On Sw_64, don't define this because there are no push insns. */ ++/* #define PUSH_ROUNDING(BYTES) */ ++ ++/* Define this to be nonzero if stack checking is built into the ABI. */ ++#define STACK_CHECK_BUILTIN 1 ++ ++/* Define this if the maximum size of all the outgoing args is to be ++ accumulated and pushed during the prologue. The amount can be ++ found in the variable crtl->outgoing_args_size. */ ++#define ACCUMULATE_OUTGOING_ARGS 1 ++ ++/* Offset of first parameter from the argument pointer register value. */ ++ ++#define FIRST_PARM_OFFSET(FNDECL) 0 ++ ++/* Definitions for register eliminations. ++ ++ We have two registers that can be eliminated on the Sw_64. First, the ++ frame pointer register can often be eliminated in favor of the stack ++ pointer register. Secondly, the argument pointer register can always be ++ eliminated; it is replaced with either the stack or frame pointer. */ ++ ++/* This is an array of structures. Each structure initializes one pair ++ of eliminable registers. The "from" register number is given first, ++ followed by "to". Eliminations of the same "from" register are listed ++ in order of preference. */ ++ ++#define ELIMINABLE_REGS \ ++ { \ ++ {ARG_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ ++ {ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM}, \ ++ {FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM}, \ ++ { \ ++ FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM \ ++ } \ ++ } ++ ++/* Round up to a multiple of 16 bytes. */ ++#define SW_64_ROUND(X) \ ++ (TARGET_SW_32ALIGN ? ROUND_UP ((X), 32) : ROUND_UP ((X), 16)) ++ ++/* Define the offset between two registers, one to be eliminated, and the other ++ its replacement, at the start of a routine. */ ++#define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \ ++ ((OFFSET) = sw_64_initial_elimination_offset (FROM, TO)) ++ ++/* Define this if stack space is still allocated for a parameter passed ++ in a register. */ ++/* #define REG_PARM_STACK_SPACE */ ++ ++/* 1 if N is a possible register number for function argument passing. ++ On Sw_64, these are $16-$21 and $f16-$f21. */ ++ ++#define FUNCTION_ARG_REGNO_P(N) \ ++ (IN_RANGE ((N), 16, 21) || ((N) >= 16 + 32 && (N) <= 21 + 32)) ++ ++/* Define a data type for recording info about an argument list ++ during the scan of that argument list. This data type should ++ hold all necessary information about the function itself ++ and about the args processed so far, enough to enable macros ++ such as FUNCTION_ARG to determine where the next arg should go. ++ ++ On Sw_64, this is a single integer, which is a number of words ++ of arguments scanned so far. ++ Thus 6 or more means all following args should go on the stack. */ ++ ++#define CUMULATIVE_ARGS int ++ ++/* Initialize a variable CUM of type CUMULATIVE_ARGS ++ for a call to a function whose data type is FNTYPE. ++ For a library call, FNTYPE is 0. */ ++ ++#define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, INDIRECT, N_NAMED_ARGS) \ ++ (CUM) = 0 ++ ++/* Define intermediate macro to compute ++ the size (in registers) of an argument. */ ++ ++#define SW_64_ARG_SIZE(MODE, TYPE) \ ++ ((MODE) == TFmode || (MODE) == TCmode \ ++ ? 1 \ ++ : CEIL (((MODE) == BLKmode ? int_size_in_bytes (TYPE) \ ++ : GET_MODE_SIZE (MODE)), \ ++ UNITS_PER_WORD)) ++ ++/* Make (or fake) .linkage entry for function call. ++ IS_LOCAL is 0 if name is used in call, 1 if name is used in definition. */ ++ ++/* This macro defines the start of an assembly comment. */ ++ ++#define ASM_COMMENT_START " #" ++ ++/* This macro produces the initial definition of a function. */ ++ ++#undef ASM_DECLARE_FUNCTION_NAME ++#define ASM_DECLARE_FUNCTION_NAME(FILE, NAME, DECL) \ ++ sw_64_start_function (FILE, NAME, DECL); ++ ++/* This macro closes up a function definition for the assembler. */ ++ ++#undef ASM_DECLARE_FUNCTION_SIZE ++#define ASM_DECLARE_FUNCTION_SIZE(FILE, NAME, DECL) \ ++ sw_64_end_function (FILE, NAME, DECL) ++ ++/* Output any profiling code before the prologue. */ ++ ++#define PROFILE_BEFORE_PROLOGUE 1 ++ ++/* Never use profile counters. */ ++ ++#define NO_PROFILE_COUNTERS 1 ++ ++/* Output assembler code to FILE to increment profiler label # LABELNO ++ for profiling a function entry. Under SYSV, profiling is enabled ++ by simply passing -pg to the assembler and linker. */ ++ ++#define FUNCTION_PROFILER(FILE, LABELNO) ++ ++/* EXIT_IGNORE_STACK should be nonzero if, when returning from a function, ++ the stack pointer does not matter. The value is tested only in ++ functions that have frame pointers. ++ No definition is equivalent to always zero. */ ++ ++#define EXIT_IGNORE_STACK 1 ++ ++/* Define registers used by the epilogue and return instruction. */ ++ ++#define EPILOGUE_USES(REGNO) ((REGNO) == 26) ++ ++/* Length in units of the trampoline for entering a nested function. */ ++ ++#define TRAMPOLINE_SIZE 32 ++ ++/* The alignment of a trampoline, in bits. */ ++ ++#define TRAMPOLINE_ALIGNMENT 64 ++ ++/* A C expression whose value is RTL representing the value of the return ++ address for the frame COUNT steps up from the current frame. ++ FRAMEADDR is the frame pointer of the COUNT frame, or the frame pointer of ++ the COUNT-1 frame if RETURN_ADDR_IN_PREVIOUS_FRAME is defined. */ ++ ++#define RETURN_ADDR_RTX sw_64_return_addr ++ ++/* Provide a definition of DWARF_FRAME_REGNUM here so that fallback unwinders ++ can use DWARF_ALT_FRAME_RETURN_COLUMN defined below. This is just the same ++ as the default definition in dwarf2out.c. */ ++#undef DWARF_FRAME_REGNUM ++#define DWARF_FRAME_REGNUM(REG) DBX_REGISTER_NUMBER (REG) ++ ++/* Before the prologue, RA lives in $26. */ ++#define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, 26) ++#define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (26) ++#define DWARF_ALT_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (64) ++#define DWARF_ZERO_REG 31 ++ ++/* Describe how we implement __builtin_eh_return. */ ++#define EH_RETURN_DATA_REGNO(N) ((N) < 4 ? (N) + 16 : INVALID_REGNUM) ++#define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, 28) ++#define EH_RETURN_HANDLER_RTX \ ++ gen_rtx_MEM (Pmode, plus_constant (Pmode, stack_pointer_rtx, \ ++ crtl->outgoing_args_size)) ++ ++/* Addressing modes, and classification of registers for them. */ ++ ++/* Macros to check register numbers against specific register classes. */ ++ ++/* These assume that REGNO is a hard or pseudo reg number. ++ They give nonzero only if REGNO is a hard reg of the suitable class ++ or a pseudo reg currently allocated to a suitable hard reg. ++ Since they use reg_renumber, they are safe only once reg_renumber ++ has been allocated, which happens in reginfo.c during register ++ allocation. */ ++ ++#define REGNO_OK_FOR_INDEX_P(REGNO) 0 ++#define REGNO_OK_FOR_BASE_P(REGNO) \ ++ ((REGNO) < 32 || (unsigned) reg_renumber[REGNO] < 32 || (REGNO) == 63 \ ++ || reg_renumber[REGNO] == 63) ++ ++/* Maximum number of registers that can appear in a valid memory address. */ ++#define MAX_REGS_PER_ADDRESS 1 ++ ++/* Recognize any constant value that is a valid address. For the Sw_64, ++ there are only constants none since we want to use LDI to load any ++ symbolic addresses into registers. */ ++ ++#define CONSTANT_ADDRESS_P(X) \ ++ (CONST_INT_P (X) && ((UINTVAL (X) + 0x8000) < 0x10000)) ++ ++/* The macros REG_OK_FOR..._P assume that the arg is a REG rtx ++ and check its validity for a certain class. ++ We have two alternate definitions for each of them. ++ The usual definition accepts all pseudo regs; the other rejects ++ them unless they have been allocated suitable hard regs. ++ The symbol REG_OK_STRICT causes the latter definition to be used. ++ ++ Most source files want to accept pseudo regs in the hope that ++ they will get allocated to the class that the insn wants them to be in. ++ Source files for reload pass need to be strict. ++ After reload, it makes no difference, since pseudo regs have ++ been eliminated by then. */ ++ ++/* Nonzero if X is a hard reg that can be used as an index ++ or if it is a pseudo reg. */ ++#define REG_OK_FOR_INDEX_P(X) 0 ++ ++/* Nonzero if X is a hard reg that can be used as a base reg ++ or if it is a pseudo reg. */ ++#define NONSTRICT_REG_OK_FOR_BASE_P(X) \ ++ (REGNO (X) < 32 || REGNO (X) == 63 || REGNO (X) >= FIRST_PSEUDO_REGISTER) ++ ++/* ??? Nonzero if X is the frame pointer, or some virtual register ++ that may eliminate to the frame pointer. These will be allowed to ++ have offsets greater than 32K. This is done because register ++ elimination offsets will change the hi/lo split, and if we split ++ before reload, we will require additional instructions. */ ++#define NONSTRICT_REG_OK_FP_BASE_P(X) \ ++ (REGNO (X) == 31 || REGNO (X) == 63 \ ++ || (REGNO (X) >= FIRST_PSEUDO_REGISTER \ ++ && REGNO (X) < LAST_VIRTUAL_POINTER_REGISTER)) ++ ++/* Nonzero if X is a hard reg that can be used as a base reg. */ ++#define STRICT_REG_OK_FOR_BASE_P(X) REGNO_OK_FOR_BASE_P (REGNO (X)) ++ ++#ifdef REG_OK_STRICT ++#define REG_OK_FOR_BASE_P(X) STRICT_REG_OK_FOR_BASE_P (X) ++#else ++#define REG_OK_FOR_BASE_P(X) NONSTRICT_REG_OK_FOR_BASE_P (X) ++#endif ++ ++/* Try a machine-dependent way of reloading an illegitimate address ++ operand. If we find one, push the reload and jump to WIN. This ++ macro is used in only one place: `find_reloads_address' in reload.c. */ ++ ++#define LEGITIMIZE_RELOAD_ADDRESS(X, MODE, OPNUM, TYPE, IND_L, WIN) \ ++ do \ ++ { \ ++ rtx new_x \ ++ = sw_64_legitimize_reload_address (X, MODE, OPNUM, TYPE, IND_L); \ ++ if (new_x) \ ++ { \ ++ X = new_x; \ ++ goto WIN; \ ++ } \ ++ } while (0) ++ ++/* Specify the machine mode that this machine uses ++ for the index in the tablejump instruction. */ ++#define CASE_VECTOR_MODE SImode ++ ++/* Define as C expression which evaluates to nonzero if the tablejump ++ instruction expects the table to contain offsets from the address of the ++ table. ++ ++ Do not define this if the table should contain absolute addresses. ++ On the Sw_64, the table is really GP-relative, not relative to the PC ++ of the table, but we pretend that it is PC-relative; this should be OK, ++ but we should try to find some better way sometime. */ ++#define CASE_VECTOR_PC_RELATIVE 1 ++ ++/* Define this as 1 if `char' should by default be signed; else as 0. */ ++#define DEFAULT_SIGNED_CHAR 1 ++ ++/* Max number of bytes we can move to or from memory ++ in one reasonably fast instruction. */ ++ ++#define MOVE_MAX 8 ++ ++/* If a memory-to-memory move would take MOVE_RATIO or more simple ++ move-instruction pairs, we will do a movmem or libcall instead. ++ ++ Without byte/word accesses, we want no more than four instructions; ++ with, several single byte accesses are better. */ ++ ++#define MOVE_RATIO(speed) (TARGET_BWX ? 7 : 2) ++ ++/* Largest number of bytes of an object that can be placed in a register. ++ On the Sw_64 we have plenty of registers, so use TImode. */ ++#define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode) ++ ++/* Nonzero if access to memory by bytes is no faster than for words. ++ Also nonzero if doing byte operations (specifically shifts) in registers ++ is undesirable. ++ ++ On the Sw_64, we want to not use the byte operation and instead use ++ masking operations to access fields; these will save instructions. */ ++ ++#define SLOW_BYTE_ACCESS 1 ++ ++/* Define if operations between registers always perform the operation ++ on the full register even if a narrower mode is specified. */ ++#define WORD_REGISTER_OPERATIONS 1 ++ ++/* Define if loading in MODE, an integral mode narrower than BITS_PER_WORD ++ will either zero-extend or sign-extend. The value of this macro should ++ be the code that says which one of the two operations is implicitly ++ done, UNKNOWN if none. */ ++#define LOAD_EXTEND_OP(MODE) ((MODE) == SImode ? SIGN_EXTEND : ZERO_EXTEND) ++ ++/* Define if loading short immediate values into registers sign extends. */ ++#define SHORT_IMMEDIATES_SIGN_EXTEND 1 ++ ++/* The CIX ctlz and cttz instructions return 64 for zero. */ ++#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ ++ ((VALUE) = 64, TARGET_CIX ? 1 : 0) ++#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \ ++ ((VALUE) = 64, TARGET_CIX ? 1 : 0) ++ ++/* Define the value returned by a floating-point comparison instruction. */ ++ ++#define FLOAT_STORE_FLAG_VALUE(MODE) \ ++ REAL_VALUE_ATOF ((TARGET_FLOAT_VAX ? "0.5" : "2.0"), (MODE)) ++ ++/* Specify the machine mode that pointers have. ++ After generation of rtl, the compiler makes no further distinction ++ between pointers and any other objects of this machine mode. */ ++#define Pmode DImode ++ ++/* Mode of a function address in a call instruction (for indexing purposes). */ ++ ++#define FUNCTION_MODE Pmode ++ ++/* Define this if addresses of constant functions ++ shouldn't be put through pseudo regs where they can be cse'd. ++ Desirable on machines where ordinary constants are expensive ++ but a CALL with constant address is cheap. ++ ++ We define this on the Sw_64 so that gen_call and gen_call_value ++ get to see the SYMBOL_REF (for the hint field of the jsr). It will ++ then copy it into a register, thus actually letting the address be ++ cse'ed. */ ++ ++#define NO_FUNCTION_CSE 1 ++ ++/* Define this to be nonzero if shift instructions ignore all but the low-order ++ few bits. */ ++#define SHIFT_COUNT_TRUNCATED 1 ++ ++/* Control the assembler format that we output. */ ++ ++/* Output to assembler file text saying following lines ++ may contain character constants, extra white space, comments, etc. */ ++#define ASM_APP_ON (TARGET_EXPLICIT_RELOCS ? "\t.set\tmacro\n" : "") ++ ++/* Output to assembler file text saying following lines ++ no longer contain unusual constructs. */ ++#define ASM_APP_OFF (TARGET_EXPLICIT_RELOCS ? "\t.set\tnomacro\n" : "") ++ ++#define TEXT_SECTION_ASM_OP "\t.text" ++ ++/* Output before writable data. */ ++ ++#define DATA_SECTION_ASM_OP "\t.data" ++ ++/* How to refer to registers in assembler output. ++ This sequence is indexed by compiler's hard-register-number (see above). */ ++ ++#define REGISTER_NAMES \ ++ { \ ++ "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$9", "$10", "$11", \ ++ "$12", "$13", "$14", "$15", "$16", "$17", "$18", "$19", "$20", "$21", \ ++ "$22", "$23", "$24", "$25", "$26", "$27", "$28", "$29", "$30", "AP", \ ++ "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7", "$f8", "$f9", \ ++ "$f10", "$f11", "$f12", "$f13", "$f14", "$f15", "$f16", "$f17", "$f18", \ ++ "$f19", "$f20", "$f21", "$f22", "$f23", "$f24", "$f25", "$f26", "$f27", \ ++ "$f28", "$f29", "$f30", "FP" \ ++ } ++ ++/* Strip name encoding when emitting labels. */ ++ ++#define ASM_OUTPUT_LABELREF(STREAM, NAME) \ ++ do \ ++ { \ ++ const char *name_ = NAME; \ ++ if (*name_ == '@' || *name_ == '%') \ ++ name_ += 2; \ ++ if (*name_ == '*') \ ++ name_++; \ ++ else \ ++ fputs (user_label_prefix, STREAM); \ ++ fputs (name_, STREAM); \ ++ } while (0) ++ ++/* Globalizing directive for a label. */ ++#define GLOBAL_ASM_OP "\t.globl " ++ ++/* Use dollar signs rather than periods in special g++ assembler names. */ ++ ++#undef NO_DOLLAR_IN_LABEL ++ ++/* This is how to store into the string LABEL ++ the symbol_ref name of an internal numbered label where ++ PREFIX is the class of label and NUM is the number within the class. ++ This is suitable for output with `assemble_name'. */ ++ ++#undef ASM_GENERATE_INTERNAL_LABEL ++#define ASM_GENERATE_INTERNAL_LABEL(LABEL, PREFIX, NUM) \ ++ sprintf ((LABEL), "*$%s%ld", (PREFIX), (long) (NUM)) ++ ++/* This is how to output an element of a case-vector that is relative. */ ++ ++#define ASM_OUTPUT_ADDR_DIFF_ELT(FILE, BODY, VALUE, REL) \ ++ fprintf (FILE, "\t.gprel32 $L%d\n", (VALUE)) ++ ++/* If we use NM, pass -g to it so it only lists globals. */ ++#define NM_FLAGS "-pg" ++ ++/* Definitions for debugging. */ ++ ++/* Correct the offset of automatic variables and arguments. Note that ++ the Sw_64 debug format wants all automatic variables and arguments ++ to be in terms of two different offsets from the virtual frame pointer, ++ which is the stack pointer before any adjustment in the function. ++ The offset for the argument pointer is fixed for the native compiler, ++ it is either zero (for the no arguments case) or large enough to hold ++ all argument registers. ++ The offset for the auto pointer is the fourth argument to the .frame ++ directive (local_offset). ++ To stay compatible with the native tools we use the same offsets ++ from the virtual frame pointer and adjust the debugger arg/auto offsets ++ accordingly. These debugger offsets are set up in output_prolog. */ ++ ++extern long sw_64_arg_offset; ++extern long sw_64_auto_offset; ++#define DEBUGGER_AUTO_OFFSET(X) \ ++ ((GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0) + sw_64_auto_offset) ++#define DEBUGGER_ARG_OFFSET(OFFSET, X) (OFFSET + sw_64_arg_offset) ++ ++#define ASM_OUTPUT_SOURCE_FILENAME(STREAM, NAME) \ ++ sw_64_output_filename (STREAM, NAME) ++ ++/* By default, turn on GDB extensions. */ ++#define DEFAULT_GDB_EXTENSIONS 1 ++ ++/* This version don't define SYSTEM_IMPLICIT_EXTERN_C Replace ++ * NO_IMPLICIT_EXTERN_C with SYSTEM_IMPLICIT_EXTERN_C.*/ ++/* The system headers under Sw_64 systems are generally C++-aware. */ ++/*#define NO_IMPLICIT_EXTERN_C*/ ++ ++#define TARGET_SUPPORTS_WIDE_INT 1 ++#define SW64_TARGET_SUPPORT_FPCR 1 ++ ++/* Fence to use after loop using storent. */ ++#define FENCE_FOLLOWING_MOVNT \ ++ (builtin_decl_implicit (BUILT_IN_SYNC_SYNCHRONIZE)) ++ ++#undef ASM_DECLARE_OBJECT_NAME ++#define ASM_DECLARE_OBJECT_NAME(STREAM, NAME, DECL) \ ++ sw_64_declare_object (STREAM, NAME, "", ":\n") ++ ++#define HAVE_POST_INCREMENT (TARGET_SW8A ? 1 : 0) ++#define HAVE_POST_DECREMENT (TARGET_SW8A ? 1 : 0) ++#define HAVE_POST_MODIFY_DISP (TARGET_SW8A ? 1 : 0) ++#define USE_LOAD_POST_INCREMENT(MODE) 0 ++#define USE_STORE_POST_INCREMENT(MODE) 0 ++int ++enable_asan_check_stack (); ++#ifndef SW_64_ENABLE_ASAN ++#define SW_64_ENABLE_FULL_ASAN 1 ++#else ++#undef SW_64_ENABLE_FULL_ASAN ++#define SW_64_ENABLE_ASAN 0 ++#endif +diff --git a/gcc/config/sw_64/sw_64.md b/gcc/config/sw_64/sw_64.md +new file mode 100644 +index 000000000..a3751466a +--- /dev/null ++++ b/gcc/config/sw_64/sw_64.md +@@ -0,0 +1,7814 @@ ++;; Machine description for Sw64 for GNU C compiler ++;; Copyright (C) 1992-2022 Free Software Foundation, Inc. ++;; Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu) ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++;;- See file "rtl.def" for documentation on define_insn, match_*, et. al. ++ ++;; Uses of UNSPEC in this file: ++ ++(define_c_enum "unspec" [ ++ UNSPEC_XFLT_COMPARE ++ UNSPEC_ARG_HOME ++ UNSPEC_LDGP1 ++ UNSPEC_INSXH ++ UNSPEC_MSKXH ++ UNSPEC_CVTQL ++ UNSPEC_CVTLQ ++ UNSPEC_LDGP2 ++ UNSPEC_LITERAL ++ UNSPEC_LITUSE ++ UNSPEC_SIBCALL ++ UNSPEC_SYMBOL ++ ++ UNSPEC_FRINTZ ++ UNSPEC_FRINTP ++ UNSPEC_FRINTG ++ UNSPEC_FRINTN ++ UNSPEC_FRINTI ++ ++ UNSPEC_FRECX ++ ++ ;; TLS Support ++ UNSPEC_TLSGD_CALL ++ UNSPEC_TLSLDM_CALL ++ UNSPEC_TLSGD ++ UNSPEC_TLSLDM ++ UNSPEC_DTPREL ++ UNSPEC_TPREL ++ UNSPEC_TP ++ UNSPEC_TLSRELGOT ++ UNSPEC_GOTDTPREL ++ ++ ;; Builtins ++ UNSPEC_CMPBGE ++ UNSPEC_ZAP ++ UNSPEC_AMASK ++ UNSPEC_IMPLVER ++ UNSPEC_PERR ++ UNSPEC_COPYSIGN ++ ++ ;; Atomic operations ++ UNSPEC_MB ++ UNSPEC_ATOMIC ++ UNSPEC_CMPXCHG ++ UNSPEC_XCHG ++ ++ UNSPECV_HARDWARE_PREFETCH_CNT ++ UNSPEC_PFSC ++ UNSPEC_PFTC ++ ++ UNSPEC_NTSI ++ UNSPEC_NTDI ++ UNSPEC_NTDF ++ UNSPEC_NTVEC ++ ++ UNSPEC_FIMOVS ++ ;;UNSPEC_FIMOVD ++]) ++ ++;; UNSPEC_VOLATILE: ++ ++(define_c_enum "unspecv" [ ++ UNSPECV_IMB ++ UNSPECV_BLOCKAGE ++ UNSPECV_SPECULATION_BARRIER ++ UNSPECV_SETJMPR ; builtin_setjmp_receiver ++ UNSPECV_LONGJMP ; builtin_longjmp ++ UNSPECV_TRAPB ++ UNSPECV_PSPL ; prologue_stack_probe_loop ++ UNSPECV_REALIGN ++ UNSPECV_EHR ; exception_receiver ++ UNSPECV_MCOUNT ++ UNSPECV_FORCE_MOV ++ UNSPECV_LDGP1 ++ UNSPECV_PLDGP2 ; prologue ldgp ++ UNSPECV_SET_TP ++ UNSPECV_RPCC ++ UNSPECV_SETJMPR_ER ; builtin_setjmp_receiver fragment ++ UNSPECV_LL ; load-locked ++ UNSPECV_SC ; store-conditional ++ UNSPECV_CMPXCHG ++ UNSPECV_LDGP2 ++ UNSPEC_TIE ;; TIE ++]) ++ ++;; CQImode must be handled the similarly to HImode ++;; when generating reloads. ++(define_mode_iterator RELOAD12 [QI HI CQI]) ++(define_mode_attr reloadmode [(QI "qi") (HI "hi") (CQI "hi")]) ++ ++;; Other mode iterators ++(define_mode_iterator IMODE [QI HI SI DI]) ++(define_mode_iterator I12MODE [QI HI]) ++(define_mode_iterator I124MODE [QI HI SI]) ++(define_mode_iterator I24MODE [HI SI]) ++(define_mode_iterator I248MODE [HI SI DI]) ++(define_mode_iterator I48MODE [SI DI]) ++ ++(define_mode_attr DWI [(SI "DI") (DI "TI")]) ++(define_mode_attr modesuffix [(QI "b") (HI "h") (SI "w") (DI "l") ++ (V8QI "b8") (V4HI "w4") ++ (SF "%,") (DF "%-")]) ++(define_mode_attr vecmodesuffix [(QI "b8") (HI "w4")]) ++ ++(define_code_iterator any_maxmin [smax smin umax umin]) ++ ++(define_code_attr maxmin [(smax "maxs") (smin "mins") ++ (umax "maxu") (umin "minu")]) ++ ++(define_mode_iterator SFDF [SF DF]) ++(define_mode_attr SD [(SF "s") (DF "d")]) ++(define_int_iterator FRINT [UNSPEC_FRINTZ UNSPEC_FRINTP UNSPEC_FRINTN ++ UNSPEC_FRINTG UNSPEC_FRINTI]) ++ ++;; Standard pattern names for floating-point rounding instructions. ++(define_int_attr frint_pattern [(UNSPEC_FRINTZ "btrunc") ++ (UNSPEC_FRINTP "ceil") ++ (UNSPEC_FRINTN "floor") ++ (UNSPEC_FRINTI "nearbyint") ++ (UNSPEC_FRINTG "round")]) ++ ++;; frint suffix for floating-point rounding instructions. ++(define_int_attr frint_suffix [(UNSPEC_FRINTZ "_z") ++ (UNSPEC_FRINTP "_p") ++ (UNSPEC_FRINTN "_n") ++ (UNSPEC_FRINTG "_g") ++ (UNSPEC_FRINTI "")]) ++ ++;; Where necessary, the suffixes _le and _be are used to distinguish between ++;; little-endian and big-endian patterns. ++;; ++;; Note that the Unicos/Mk assembler does not support the following ++;; opcodes: mov, fmov, nop, fnop, unop. ++ ++;; Processor type -- this attribute must exactly match the processor_type ++;; enumeration in sw_64.h. ++ ++(define_attr "tune" "sw6,sw8" ++ (const (symbol_ref "((enum attr_tune) sw_64_tune)"))) ++ ++;; Define an insn type attribute. This is used in function unit delay ++;; computations, among other purposes. For the most part, we use the names ++;; defined in the documentation, but add a few that we have to know about ++;; separately. ++ ++(define_attr "type" ++ "ild,fld,ldsym,ist,fst,ibr,callpal,fbr,call,iadd,ilog,shift,icmov,fcmov, ++ icmp,imul,fadd,fmul,fmadd,fcpys,fdiv,fsqrt,misc,mvi,ftoi,itof,mb,ld_l,st_c, ++ multi,none,frint,fp" ++ (const_string "iadd")) ++ ++;; Describe a user's asm statement. ++(define_asm_attributes ++ [(set_attr "type" "multi")]) ++ ++;; Define the operand size an insn operates on. Used primarily by mul ++;; and div operations that have size dependent timings. ++ ++(define_attr "opsize" "si,di,udi" ++ (const_string "di")) ++ ++;; The TRAP attribute marks instructions that may generate traps ++;; (which are imprecise and may need a trapb if software completion ++;; is desired). ++ ++(define_attr "trap" "no,yes" ++ (const_string "no")) ++ ++;; The ROUND_SUFFIX attribute marks which instructions require a ++;; rounding-mode suffix. The value NONE indicates no suffix, ++;; the value NORMAL indicates a suffix controlled by sw_64_fprm. ++ ++(define_attr "round_suffix" "none,normal,c" ++ (const_string "none")) ++ ++;; The TRAP_SUFFIX attribute marks instructions requiring a trap-mode suffix: ++;; NONE no suffix ++;; SU accepts only /su (cmpt et al) ++;; SUI accepts only /sui (cvtqt and cvtqs) ++;; V_SV accepts /v and /sv (cvtql only) ++;; V_SV_SVI accepts /v, /sv and /svi (cvttq only) ++;; U_SU_SUI accepts /u, /su and /sui (most fp instructions) ++;; ++;; The actual suffix emitted is controlled by sw_64_fptm. ++ ++(define_attr "trap_suffix" "none,su,sui,v_sv,v_sv_svi,u_su_sui" ++ (const_string "none")) ++ ++;; The length of an instruction sequence in bytes. ++ ++(define_attr "length" "" ++ (const_int 4)) ++ ++;; The USEGP attribute marks instructions that have relocations that use ++;; the GP. ++ ++(define_attr "usegp" "no,yes" ++ (cond [(eq_attr "type" "ldsym,call") ++ (const_string "yes") ++ (eq_attr "type" "ild,fld,ist,fst") ++ (symbol_ref "((enum attr_usegp) sw_64_find_lo_sum_using_gp (insn))") ++ ] ++ (const_string "no"))) ++ ++;; The CANNOT_COPY attribute marks instructions with relocations that ++;; cannot easily be duplicated. This includes insns with gpdisp relocs ++;; since they have to stay in 1-1 correspondence with one another. This ++;; also includes call insns, since they must stay in correspondence with ++;; the immediately following gpdisp instructions. ++ ++(define_attr "cannot_copy" "false,true" ++ (const_string "false")) ++ ++;; Used to control the "enabled" attribute on a per-instruction basis. ++;; For convenience, conflate ABI issues re loading of addresses with ++;; an "isa". ++(define_attr "isa" "base,bwx,max,fix,cix,vms,ner,er,sw6a,sw6b,sw4d,sw8a" ++ (const_string "base")) ++ ++(define_attr "enabled" "" ++ (cond [(eq_attr "isa" "bwx") (symbol_ref "TARGET_BWX") ++ (eq_attr "isa" "max") (symbol_ref "TARGET_MAX") ++ (eq_attr "isa" "fix") (symbol_ref "TARGET_FIX") ++ (eq_attr "isa" "cix") (symbol_ref "TARGET_CIX") ++ (eq_attr "isa" "vms") (symbol_ref "!TARGET_ABI_OSF") ++ (eq_attr "isa" "ner") (symbol_ref "!TARGET_EXPLICIT_RELOCS") ++ (eq_attr "isa" "er") (symbol_ref "TARGET_EXPLICIT_RELOCS") ++ (eq_attr "isa" "sw6a") (symbol_ref "TARGET_SW6A") ++ (eq_attr "isa" "sw6b") (symbol_ref "TARGET_SW6B") ++ (eq_attr "isa" "sw4d") (symbol_ref "TARGET_SW4D") ++ (eq_attr "isa" "sw8a") (symbol_ref "TARGET_SW8A") ++ ] ++ (const_int 1))) ++ ++;; Include scheduling descriptions. ++ ++(include "sw6.md") ++(include "sw8.md") ++ ++ ++;; Operand and operator predicates and constraints ++ ++(include "predicates.md") ++(include "constraints.md") ++ ++ ++;; First define the arithmetic insns. Note that the 32-bit forms also ++;; sign-extend. ++ ++;; Handle 32-64 bit extension from memory to a floating point register ++;; specially, since this occurs frequently in int->double conversions. ++;; ++;; Note that while we must retain the =f case in the insn for reload's ++;; benefit, it should be eliminated after reload, so we should never emit ++;; code for that case. But we don't reject the possibility. ++ ++(define_expand "extendsidi2" ++ [(set (match_operand:DI 0 "register_operand") ++ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand")))]) ++ ++(define_insn "*cvtlq" ++ [(set (match_operand:DI 0 "register_operand" "=f") ++ (unspec:DI [(match_operand:SF 1 "reg_or_0_operand" "fG")] ++ UNSPEC_CVTLQ))] ++ "" ++ "fcvtwl %1,%0" ++ [(set_attr "type" "fadd")]) ++ ++(define_insn "*extendsidi2_1" ++ [(set (match_operand:DI 0 "register_operand" "=r,r,!*f") ++ (sign_extend:DI ++ (match_operand:SI 1 "nonimmediate_operand" "r,m,m")))] ++ "" ++;; "@ ++;; addw $31,%1,%0 ++;; ldw %0,%1 ++;; flds %0,%1\;fcvtwl %0,%0" ++;; else ++ "@ ++ addw $31,%1,%0 ++ ldw%U1 %0,%1 ++ flds %0,%1\;fcvtwl %0,%0" ++ [(set_attr "type" "iadd,ild,fld") ++ (set_attr "length" "*,*,8")]) ++ ++(define_split ++ [(set (match_operand:DI 0 "hard_fp_register_operand") ++ (sign_extend:DI (match_operand:SI 1 "memory_operand")))] ++ "reload_completed" ++ [(set (match_dup 2) (match_dup 1)) ++ (set (match_dup 0) (unspec:DI [(match_dup 2)] UNSPEC_CVTLQ))] ++{ ++ operands[1] = adjust_address (operands[1], SFmode, 0); ++ operands[2] = gen_rtx_REG (SFmode, REGNO (operands[0])); ++}) ++ ++;; Optimize sign-extension of SImode loads. This shows up in the wake of ++;; reload when converting fp->int. ++ ++(define_peephole2 ++ [(set (match_operand:SI 0 "hard_int_register_operand") ++ (match_operand:SI 1 "memory_operand")) ++ (set (match_operand:DI 2 "hard_int_register_operand") ++ (sign_extend:DI (match_dup 0)))] ++ "true_regnum (operands[0]) == true_regnum (operands[2]) ++ || peep2_reg_dead_p (2, operands[0])" ++ [(set (match_dup 2) ++ (sign_extend:DI (match_dup 1)))]) ++ ++(define_peephole2 ++[ ++(set (match_operand:DF 0 "register_operand") ++ (match_operator:DF 1 "sw_64_fp_comparison_operator" ++ [(match_operand:DF 2 "register_operand") ++ (match_operand:DF 3 "const0_operand")])) ++(set (match_operand:DF 4 "register_operand") ++ (match_operator:DF 5 "sw_64_fp_comparison_operator" ++ [(match_operand:DF 6 "reg_or_0_operand") ++ (match_operand:DF 7 "reg_or_0_operand")])) ++(set (match_operand:SFDF 8 "register_operand") ++ (if_then_else:SFDF ++ (match_operand 9 "comparison_operator") ++ (match_operand:SFDF 10 "reg_or_8bit_operand") ++ (match_operand:SFDF 11 "reg_or_8bit_operand"))) ++] ++"(GET_CODE (operands[1])==LE || GET_CODE (operands[1])==LT) ++ && GET_CODE (operands[5])==EQ && GET_CODE (operands[9])==NE && flag_sw_fselect" ++ ++[ ++(set (match_operand:SFDF 8 "reg_or_0_operand") ++ (if_then_else:SFDF ++ (match_operator 1 "sw_64_fp_comparison_operator" ++ [(match_operand:SFDF 2 "reg_or_0_operand") ++ (match_operand:SFDF 3 "const0_operand")]) ++ (match_operand:SFDF 11 "reg_or_0_operand") ++ (match_operand:SFDF 10 "reg_or_0_operand"))) ++] ++) ++ ++(define_peephole2 ++[ ++(set (match_operand:DF 0 "register_operand") ++ (match_operator:DF 1 "sw_64_fp_comparison_operator" ++ [(match_operand:DF 2 "const0_operand") ++ (match_operand:DF 3 "reg_or_0_operand")])) ++(set (match_operand:DF 4 "register_operand") ++ (match_operator:DF 5 "sw_64_fp_comparison_operator" ++ [(match_operand:DF 6 "reg_or_0_operand") ++ (match_operand:DF 7 "reg_or_0_operand")])) ++(set (match_operand:SFDF 8 "register_operand") ++ (if_then_else:SFDF ++ (match_operand 9 "comparison_operator") ++ (match_operand:SFDF 10 "reg_or_8bit_operand") ++ (match_operand:SFDF 11 "reg_or_8bit_operand"))) ++] ++"(GET_CODE (operands[1])==LE || GET_CODE (operands[1])==LT) ++ && GET_CODE (operands[5])==EQ && GET_CODE (operands[9])==NE && flag_sw_fselect" ++ ++[ ++(set (match_operand:SFDF 8 "reg_or_0_operand") ++ (if_then_else:SFDF ++ (match_operator 1 "sw_64_fp_comparison_operator" ++ [(match_operand:SFDF 3 "reg_or_0_operand") ++ (match_operand:SFDF 2 "const0_operand")]) ++ (match_operand:SFDF 10 "reg_or_0_operand") ++ (match_operand:SFDF 11 "reg_or_0_operand"))) ++] ++) ++ ++(define_peephole2 ++[ ++(set (match_operand:DF 0 "register_operand") ++ (match_operator:DF 1 "sw_64_fp_comparison_operator" ++ [(match_operand:DF 2 "register_operand") ++ (match_operand:DF 3 "const0_operand")])) ++(set (match_operand:DF 4 "register_operand") ++ (match_operator:DF 5 "sw_64_fp_comparison_operator" ++ [(match_operand:DF 6 "register_operand") ++ (match_operand:DF 7 "const0_operand")])) ++(set (match_operand:SFDF 8 "register_operand") ++ (if_then_else:SFDF ++ (match_operand 9 "comparison_operator") ++ (match_operand:SFDF 10 "reg_or_8bit_operand") ++ (match_operand:SFDF 11 "reg_or_8bit_operand"))) ++] ++"GET_CODE (operands[1])==EQ && GET_CODE (operands[5])==EQ && ++ (GET_CODE (operands[9])==NE || GET_CODE (operands[9])==EQ)&& ++ (operands[0] == operands[6]) && flag_sw_fselect" ++[ ++(set (match_operand:SFDF 8 "reg_or_0_operand") ++ (if_then_else:SFDF ++ (match_operator 9 "sw_64_fp_comparison_operator" ++ [(match_operand:SFDF 2 "reg_or_0_operand") ++ (match_operand:SFDF 3 "const0_operand")]) ++ (match_operand:SFDF 10 "reg_or_0_operand") ++ (match_operand:SFDF 11 "reg_or_0_operand"))) ++] ++) ++ ++(define_insn "addsi3" ++ [(set (match_operand:SI 0 "register_operand" "=r,r,r,r") ++ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ,rJ,rJ") ++ (match_operand:SI 2 "add_operand" "rI,O,K,L")))] ++ "" ++ "@ ++ addw %r1,%2,%0 ++ subw %r1,%n2,%0 ++ ldi %0,%2(%r1) ++ ldih %0,%h2(%r1)") ++ ++(define_split ++ [(set (match_operand:SI 0 "register_operand") ++ (plus:SI (match_operand:SI 1 "register_operand") ++ (match_operand:SI 2 "const_int_operand")))] ++ "! add_operand (operands[2], SImode)" ++ [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 3))) ++ (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 4)))] ++{ ++ HOST_WIDE_INT val = INTVAL (operands[2]); ++ HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000); ++ HOST_WIDE_INT rest = val - low; ++ ++ operands[3] = GEN_INT (rest); ++ operands[4] = GEN_INT (low); ++}) ++ ++(define_insn "*addsi_se" ++ [(set (match_operand:DI 0 "register_operand" "=r,r") ++ (sign_extend:DI ++ (plus:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ") ++ (match_operand:SI 2 "sext_add_operand" "rI,O"))))] ++ "" ++ "@ ++ addw %r1,%2,%0 ++ subw %r1,%n2,%0") ++ ++(define_insn "*addsi_se2" ++ [(set (match_operand:DI 0 "register_operand" "=r,r") ++ (sign_extend:DI ++ (subreg:SI (plus:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ") ++ (match_operand:DI 2 "sext_add_operand" "rI,O")) ++ 0)))] ++ "" ++ "@ ++ addw %r1,%2,%0 ++ subw %r1,%n2,%0") ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (sign_extend:DI ++ (plus:SI (match_operand:SI 1 "reg_not_elim_operand") ++ (match_operand:SI 2 "const_int_operand")))) ++ (clobber (match_operand:SI 3 "reg_not_elim_operand"))] ++ "! sext_add_operand (operands[2], SImode) && INTVAL (operands[2]) > 0 ++ && INTVAL (operands[2]) % 4 == 0" ++ [(set (match_dup 3) (match_dup 4)) ++ (set (match_dup 0) (sign_extend:DI (plus:SI (mult:SI (match_dup 3) ++ (match_dup 5)) ++ (match_dup 1))))] ++{ ++ HOST_WIDE_INT val = INTVAL (operands[2]) / 4; ++ int mult = 4; ++ ++ if (val % 2 == 0) ++ val /= 2, mult = 8; ++ ++ operands[4] = GEN_INT (val); ++ operands[5] = GEN_INT (mult); ++}) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (sign_extend:DI ++ (plus:SI (match_operator:SI 1 "comparison_operator" ++ [(match_operand 2) ++ (match_operand 3)]) ++ (match_operand:SI 4 "add_operand")))) ++ (clobber (match_operand:DI 5 "register_operand"))] ++ "" ++ [(set (match_dup 5) (match_dup 6)) ++ (set (match_dup 0) (sign_extend:DI (plus:SI (match_dup 7) (match_dup 4))))] ++{ ++ operands[6] = gen_rtx_fmt_ee (GET_CODE (operands[1]), DImode, ++ operands[2], operands[3]); ++ operands[7] = gen_lowpart (SImode, operands[5]); ++}) ++ ++(define_expand "adddi3" ++ [(set (match_operand:DI 0 "register_operand") ++ (plus:DI (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "add_operand")))]) ++ ++(define_insn "*adddi_er_lo16_dtp" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (lo_sum:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "dtp16_symbolic_operand")))] ++ "HAVE_AS_TLS" ++ "ldi %0,%2(%1)\t\t!dtprel") ++ ++(define_insn "*adddi_er_hi32_dtp" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (high:DI (match_operand:DI 2 "dtp32_symbolic_operand"))))] ++ "HAVE_AS_TLS" ++ "ldih %0,%2(%1)\t\t!dtprelhi") ++ ++(define_insn "*adddi_er_lo32_dtp" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (lo_sum:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "dtp32_symbolic_operand")))] ++ "HAVE_AS_TLS" ++ "ldi %0,%2(%1)\t\t!dtprello") ++ ++(define_insn "*adddi_er_lo16_tp" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (lo_sum:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "tp16_symbolic_operand")))] ++ "HAVE_AS_TLS" ++ "ldi %0,%2(%1)\t\t!tprel") ++ ++(define_insn "*adddi_er_hi32_tp" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (high:DI (match_operand:DI 2 "tp32_symbolic_operand"))))] ++ "HAVE_AS_TLS" ++ "ldih %0,%2(%1)\t\t!tprelhi") ++ ++(define_insn "*adddi_er_lo32_tp" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (lo_sum:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "tp32_symbolic_operand")))] ++ "HAVE_AS_TLS" ++ "ldi %0,%2(%1)\t\t!tprello") ++ ++(define_insn "*adddi_er_high_l" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (plus:DI (match_operand:DI 1 "register_operand" "r") ++ (high:DI (match_operand:DI 2 "local_symbolic_operand"))))] ++ "TARGET_EXPLICIT_RELOCS && reload_completed" ++ "ldih %0,%2(%1)\t\t!gprelhigh" ++ [(set_attr "usegp" "yes")]) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (high:DI (match_operand:DI 1 "local_symbolic_operand")))] ++ "TARGET_EXPLICIT_RELOCS && reload_completed" ++ [(set (match_dup 0) ++ (plus:DI (match_dup 2) (high:DI (match_dup 1))))] ++ "operands[2] = pic_offset_table_rtx;") ++ ++;; We used to expend quite a lot of effort choosing addl/subl/ldi. ++;; With complications like ++;; ++;; The NT stack unwind code can't handle a subl to adjust the stack ++;; (that's a bug, but not one we can do anything about). As of NT4.0 SP3, ++;; the exception handling code will loop if a subl is used and an ++;; exception occurs. ++;; ++;; The 19980616 change to emit prologues as RTL also confused some ++;; versions of GDB, which also interprets prologues. This has been ++;; fixed as of GDB 4.18, but it does not harm to unconditionally ++;; use ldi here. ++;; ++;; and the fact that the three insns schedule exactly the same, it's ++;; just not worth the effort. ++ ++(define_insn "*adddi_internal" ++ [(set (match_operand:DI 0 "register_operand" "=r,r,r") ++ (plus:DI (match_operand:DI 1 "register_operand" "%r,r,r") ++ (match_operand:DI 2 "add_operand" "r,K,L")))] ++ "" ++ "@ ++ addl %1,%2,%0 ++ ldi %0,%2(%1) ++ ldih %0,%h2(%1)") ++ ++;; ??? Allow large constants when basing off the frame pointer or some ++;; virtual register that may eliminate to the frame pointer. This is ++;; done because register elimination offsets will change the hi/lo split, ++;; and if we split before reload, we will require additional instructions. ++ ++(define_insn "*adddi_fp_hack" ++ [(set (match_operand:DI 0 "register_operand" "=r,r,r") ++ (plus:DI (match_operand:DI 1 "reg_no_subreg_operand" "r,r,r") ++ (match_operand:DI 2 "const_int_operand" "K,L,n")))] ++ "NONSTRICT_REG_OK_FP_BASE_P (operands[1]) ++ && INTVAL (operands[2]) >= 0 ++ /* This is the largest constant an ldi+ldih pair can add, minus ++ an upper bound on the displacement between SP and AP during ++ register elimination. See INITIAL_ELIMINATION_OFFSET. */ ++ && INTVAL (operands[2]) ++ < (0x7fff8000 ++ - FIRST_PSEUDO_REGISTER * UNITS_PER_WORD ++ - SW_64_ROUND (crtl->outgoing_args_size) ++ - (SW_64_ROUND (get_frame_size () ++ + max_reg_num () * UNITS_PER_WORD ++ + crtl->args.pretend_args_size) ++ - crtl->args.pretend_args_size))" ++ "@ ++ ldi %0,%2(%1) ++ ldih %0,%h2(%1) ++ #") ++ ++;; Don't do this if we are adjusting SP since we don't want to do it ++;; in two steps. Don't split FP sources for the reason listed above. ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (plus:DI (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "const_int_operand")))] ++ "! add_operand (operands[2], DImode) ++ && operands[0] != stack_pointer_rtx ++ && operands[1] != frame_pointer_rtx ++ && operands[1] != arg_pointer_rtx" ++ [(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 3))) ++ (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))] ++{ ++ HOST_WIDE_INT val = INTVAL (operands[2]); ++ HOST_WIDE_INT low = (val & 0xffff) - 2 * (val & 0x8000); ++ HOST_WIDE_INT rest = val - low; ++ rtx rest_rtx = GEN_INT (rest); ++ ++ operands[4] = GEN_INT (low); ++ if (satisfies_constraint_L (rest_rtx)) ++ operands[3] = rest_rtx; ++ else if (can_create_pseudo_p ()) ++ { ++ operands[3] = gen_reg_rtx (DImode); ++ emit_move_insn (operands[3], operands[2]); ++ emit_insn (gen_adddi3 (operands[0], operands[1], operands[3])); ++ DONE; ++ } ++ else ++ FAIL; ++}) ++ ++; *sadd->*saddl/*saddq ++(define_insn "*saddl" ++ [(set (match_operand:SI 0 "register_operand" "=r,r") ++ (plus:SI ++ (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r,r") ++ (match_operand:SI 2 "const48_operand" "I,I")) ++ (match_operand:SI 3 "sext_add_operand" "rI,O")))] ++ "" ++ "@ ++ s%2addw %1,%3,%0 ++ s%2subw %1,%n3,%0") ++ ++(define_insn "*saddq" ++ [(set (match_operand:DI 0 "register_operand" "=r,r") ++ (plus:DI ++ (mult:DI (match_operand:DI 1 "reg_not_elim_operand" "r,r") ++ (match_operand:DI 2 "const48_operand" "I,I")) ++ (match_operand:DI 3 "sext_add_operand" "rI,O")))] ++ "" ++ "@ ++ s%2addl %1,%3,%0 ++ s%2subl %1,%n3,%0") ++ ++(define_insn "*saddl_se" ++ [(set (match_operand:DI 0 "register_operand" "=r,r") ++ (sign_extend:DI ++ (plus:SI ++ (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r,r") ++ (match_operand:SI 2 "const48_operand" "I,I")) ++ (match_operand:SI 3 "sext_add_operand" "rI,O"))))] ++ "" ++ "@ ++ s%2addw %1,%3,%0 ++ s%2subw %1,%n3,%0") ++ ++(define_insn "*sxaddw" ++ [(set (match_operand:SI 0 "register_operand" "=r,r") ++ (plus:SI ++ (subreg:SI ++ (ashift:DI ++ (subreg:DI (match_operand:SI 1 "reg_not_elim_operand" "r,r") 0) ++ (match_operand:DI 2 "const_int_operand" "I,I")) ++ 0) ++ (match_operand:SI 3 "sext_add_operand" "rI,O")))] ++ "flag_sw_sxaddl==1 && (INTVAL (operands[2])==3 || INTVAL (operands[2])==2)" ++ { ++ switch (which_alternative) ++ { ++ case 0: ++ if (INTVAL (operands[2]) == 3) ++ return "s8addw %1,%3,%0"; ++ if (INTVAL (operands[2]) == 2) ++ return "s4addw %1,%3,%0"; ++ case 1: ++ if (INTVAL (operands[2]) == 3) ++ return "s8subw %1,%n3,%0"; ++ if (INTVAL (operands[2]) == 2) ++ return "s4subw %1,%n3,%0"; ++ default: ++ gcc_unreachable (); ++ } ++ }) ++ ++(define_insn "*sxsubw" ++ [(set (match_operand:SI 0 "register_operand" "=r,r") ++ (minus:SI ++ (subreg:SI ++ (ashift:DI ++ (subreg:DI(match_operand:SI 1 "reg_not_elim_operand" "r,r") 0) ++ (match_operand:DI 2 "const_int_operand" "I,I")) ++ 0) ++ (match_operand:SI 3 "sext_add_operand" "rI,O")))] ++ "flag_sw_sxaddl==1 && (INTVAL (operands[2])==3 || INTVAL (operands[2])==2)" ++ { ++ switch (which_alternative) ++ { ++ case 0: ++ if (INTVAL (operands[2]) == 3) ++ return "s8subw %1,%3,%0"; ++ if (INTVAL (operands[2]) == 2) ++ return "s4subw %1,%3,%0"; ++ case 1: ++ if (INTVAL (operands[2]) == 3) ++ return "s8addw %1,%n3,%0"; ++ if (INTVAL (operands[2]) == 2) ++ return "s4addw %1,%n3,%0"; ++ default: ++ gcc_unreachable (); ++ } ++ }) ++ ++(define_insn "*sxaddl" ++ [(set (match_operand:DI 0 "register_operand" "=r,r") ++ (plus:DI ++ (ashift:DI (match_operand:DI 1 "reg_not_elim_operand" "r,r") ++ (match_operand:DI 2 "const_int_operand" "I,I")) ++ (match_operand:DI 3 "sext_add_operand" "rI,O")))] ++ "flag_sw_sxaddl==1 && (INTVAL (operands[2])==3 || INTVAL (operands[2])==2)" ++ { ++ switch (which_alternative) ++ { ++ case 0: ++ if (INTVAL (operands[2]) == 3) ++ return "s8addl %1,%3,%0"; ++ if (INTVAL (operands[2]) == 2) ++ return "s4addl %1,%3,%0"; ++ case 1: ++ if (INTVAL (operands[2]) == 3) ++ return "s8subl %1,%n3,%0"; ++ if (INTVAL (operands[2]) == 2) ++ return "s4subl %1,%n3,%0"; ++ default: ++ gcc_unreachable (); ++ } ++ }) ++ ++ ++ ++(define_insn "*sxsubl" ++ [(set (match_operand:DI 0 "register_operand" "=r,r") ++ (minus:DI ++ (ashift:DI (match_operand:DI 1 "reg_not_elim_operand" "r,r") ++ (match_operand:DI 2 "const_int_operand" "I,I")) ++ (match_operand:DI 3 "sext_add_operand" "rI,O")))] ++ "flag_sw_sxaddl==1 && (INTVAL (operands[2])==3 || INTVAL (operands[2])==2)" ++ { ++ switch (which_alternative) ++ { ++ case 0: ++ if (INTVAL (operands[2]) == 3) ++ return "s8subl %1,%3,%0"; ++ if (INTVAL (operands[2]) == 2) ++ return "s4subl %1,%3,%0"; ++ case 1: ++ if (INTVAL (operands[2]) == 3) ++ return "s8addl %1,%n3,%0"; ++ if (INTVAL (operands[2]) == 2) ++ return "s4addl %1,%n3,%0"; ++ default: ++ gcc_unreachable (); ++ } ++ }) ++ ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (sign_extend:DI ++ (plus:SI (mult:SI (match_operator:SI 1 "comparison_operator" ++ [(match_operand 2) ++ (match_operand 3)]) ++ (match_operand:SI 4 "const48_operand")) ++ (match_operand:SI 5 "sext_add_operand")))) ++ (clobber (match_operand:DI 6 "reg_not_elim_operand"))] ++ "" ++ [(set (match_dup 6) (match_dup 7)) ++ (set (match_dup 0) ++ (sign_extend:DI (plus:SI (mult:SI (match_dup 8) (match_dup 4)) ++ (match_dup 5))))] ++{ ++ operands[7] = gen_rtx_fmt_ee (GET_CODE (operands[1]), DImode, ++ operands[2], operands[3]); ++ operands[8] = gen_lowpart (SImode, operands[6]); ++}) ++ ++(define_insn "neg2" ++ [(set (match_operand:I48MODE 0 "register_operand" "=r") ++ (neg:I48MODE (match_operand:I48MODE 1 "reg_or_8bit_operand" "rI")))] ++ "" ++ "sub $31,%1,%0") ++ ++(define_insn "*negsi_se" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI (neg:SI ++ (match_operand:SI 1 "reg_or_8bit_operand" "rI"))))] ++ "" ++ "subw $31,%1,%0") ++ ++(define_insn "sub3" ++ [(set (match_operand:I48MODE 0 "register_operand" "=r") ++ (minus:I48MODE (match_operand:I48MODE 1 "reg_or_0_operand" "rJ") ++ (match_operand:I48MODE 2 "reg_or_8bit_operand" "rI")))] ++ "" ++ "sub %r1,%2,%0") ++ ++(define_insn "*subsi_se" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI ++ (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ") ++ (match_operand:SI 2 "reg_or_8bit_operand" "rI"))))] ++ "" ++ "subw %r1,%2,%0") ++ ++(define_insn "*subsi_se2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI ++ (subreg:SI (minus:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (match_operand:DI 2 "reg_or_8bit_operand" "rI")) ++ 0)))] ++ "" ++ "subw %r1,%2,%0") ++ ++(define_insn "*ssubl" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (minus:SI ++ (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r") ++ (match_operand:SI 2 "const48_operand" "I")) ++ (match_operand:SI 3 "reg_or_8bit_operand" "rI")))] ++ "" ++ "s%2subw %1,%3,%0") ++ ++(define_insn "*ssubq" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (minus:DI ++ (mult:DI (match_operand:DI 1 "reg_not_elim_operand" "r") ++ (match_operand:DI 2 "const48_operand" "I")) ++ (match_operand:DI 3 "reg_or_8bit_operand" "rI")))] ++ "" ++ "s%2subl %1,%3,%0") ++ ++(define_insn "*ssubl_se" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI ++ (minus:SI ++ (mult:SI (match_operand:SI 1 "reg_not_elim_operand" "r") ++ (match_operand:SI 2 "const48_operand" "I")) ++ (match_operand:SI 3 "reg_or_8bit_operand" "rI"))))] ++ "" ++ "s%2subw %1,%3,%0") ++ ++(define_insn "mul3" ++ [(set (match_operand:I48MODE 0 "register_operand" "=r") ++ (mult:I48MODE (match_operand:I48MODE 1 "reg_or_0_operand" "%rJ") ++ (match_operand:I48MODE 2 "reg_or_8bit_operand" "rI")))] ++ "" ++ "mul %r1,%2,%0" ++ [(set_attr "type" "imul") ++ (set_attr "opsize" "")]) ++ ++(define_insn "*mulsi_se" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI ++ (mult:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ") ++ (match_operand:SI 2 "reg_or_8bit_operand" "rI"))))] ++ "" ++ "mulw %r1,%2,%0" ++ [(set_attr "type" "imul") ++ (set_attr "opsize" "si")]) ++ ++(define_expand "umuldi3_highpart" ++ [(set (match_operand:DI 0 "register_operand") ++ (truncate:DI ++ (lshiftrt:TI ++ (mult:TI (zero_extend:TI ++ (match_operand:DI 1 "register_operand")) ++ (match_operand:DI 2 "reg_or_8bit_operand")) ++ (const_int 64))))] ++ "" ++{ ++ if (REG_P (operands[2])) ++ operands[2] = gen_rtx_ZERO_EXTEND (TImode, operands[2]); ++}) ++ ++(define_insn "*umuldi3_highpart_reg" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (truncate:DI ++ (lshiftrt:TI ++ (mult:TI (zero_extend:TI ++ (match_operand:DI 1 "register_operand" "r")) ++ (zero_extend:TI ++ (match_operand:DI 2 "register_operand" "r"))) ++ (const_int 64))))] ++ "" ++ "umulh %1,%2,%0" ++ [(set_attr "type" "imul") ++ (set_attr "opsize" "udi")]) ++ ++(define_insn "*umuldi3_highpart_const" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (truncate:DI ++ (lshiftrt:TI ++ (mult:TI (zero_extend:TI (match_operand:DI 1 "register_operand" "r")) ++ (match_operand:TI 2 "cint8_operand" "I")) ++ (const_int 64))))] ++ "" ++ "umulh %1,%2,%0" ++ [(set_attr "type" "imul") ++ (set_attr "opsize" "udi")]) ++ ++(define_expand "umulditi3" ++ [(set (match_operand:TI 0 "register_operand") ++ (mult:TI ++ (zero_extend:TI (match_operand:DI 1 "reg_no_subreg_operand")) ++ (zero_extend:TI (match_operand:DI 2 "reg_no_subreg_operand"))))] ++ "" ++{ ++ rtx l = gen_reg_rtx (DImode), h = gen_reg_rtx (DImode); ++ emit_insn (gen_muldi3 (l, operands[1], operands[2])); ++ emit_insn (gen_umuldi3_highpart (h, operands[1], operands[2])); ++ emit_move_insn (gen_lowpart (DImode, operands[0]), l); ++ emit_move_insn (gen_highpart (DImode, operands[0]), h); ++ DONE; ++}) ++ ++;; The divide and remainder operations take their inputs from r24 and ++;; r25, put their output in r27, and clobber r23 and r28 on all systems. ++;; ++;; ??? Force sign-extension here because some versions of SYSV and ++;; Interix/NT don't do the right thing if the inputs are not properly ++;; sign-extended. But Linux, for instance, does not have this ++;; problem. Is it worth the complication here to eliminate the sign ++;; extension? ++ ++(define_code_iterator any_divmod [div mod udiv umod]) ++ ++(define_expand "si3" ++ [(set (match_dup 3) ++ (sign_extend:DI (match_operand:SI 1 "nonimmediate_operand"))) ++ (set (match_dup 4) ++ (sign_extend:DI (match_operand:SI 2 "nonimmediate_operand"))) ++ (parallel [(set (match_dup 5) ++ (sign_extend:DI ++ (any_divmod:SI (match_dup 3) (match_dup 4)))) ++ (clobber (reg:DI 23)) ++ (clobber (reg:DI 28))]) ++ (set (match_operand:SI 0 "nonimmediate_operand") ++ (subreg:SI (match_dup 5) 0))] ++ "" ++{ ++ operands[3] = gen_reg_rtx (DImode); ++ operands[4] = gen_reg_rtx (DImode); ++ operands[5] = gen_reg_rtx (DImode); ++}) ++ ++(define_expand "di3" ++ [(parallel [(set (match_operand:DI 0 "register_operand") ++ (any_divmod:DI ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "register_operand"))) ++ (clobber (reg:DI 23)) ++ (clobber (reg:DI 28))])] ++ "") ++ ++(define_insn "int_div_use_float_si" ++ [(set (match_operand:DI 0 "register_operand" "=c") ++ (sign_extend:DI (match_operator:SI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")]))) ++ (clobber (reg:DF 55)) ++ (clobber (reg:DF 56)) ++ (clobber (reg:DF 60))] ++ "flag_sw_int_div_opt == 1 ++ &&(GET_CODE (operands[3])==DIV)" ++ "ifmovd %1,$f23 ++ fcvtld $f23,$f28 ++ fcpys $f28,$f28,$f23 ++ ifmovd %2,$f24 ++ fcvtld $f24,$f28 ++ fdivd $f23,$f28,$f24 ++ fcvtdl_z $f24,$f23 ++ fimovd $f23,%0" ++ [(set_attr "type" "fdiv")]) ++ ++(define_insn "int_divu_use_float_si" ++ [(set (match_operand:DI 0 "register_operand" "=c") ++ (sign_extend:DI (match_operator:SI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")]))) ++ (clobber (reg:DF 55)) ++ (clobber (reg:DF 56)) ++ (clobber (reg:DF 60))] ++ "flag_sw_int_div_opt == 1 ++ &&(GET_CODE (operands[3])==UDIV)" ++ "zap %1,240,%1 ++ zap %2,240,%2 ++ ifmovd %1,$f23 ++ fcvtld $f23,$f28 ++ fcpys $f28,$f28,$f23 ++ ifmovd %2,$f24 ++ fcvtld $f24,$f28 ++ fdivd $f23,$f28,$f24 ++ fcvtdl_z $f24,$f23 ++ fimovd $f23,%0" ++ [(set_attr "type" "fdiv")]) ++ ++(define_insn "int_rem_use_float_si" ++ [(set (match_operand:DI 0 "register_operand" "=c") ++ (sign_extend:DI (match_operator:SI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")]))) ++ (clobber (reg:DF 54)) ++ (clobber (reg:DF 55)) ++ (clobber (reg:DF 56)) ++ (clobber (reg:DF 60))] ++ "flag_sw_int_div_opt == 1 ++ &&(GET_CODE (operands[3])==MOD)" ++ "ifmovd %1,$f24 ++ fcvtld $f24,$f28 ++ fcpys $f28,$f28,$f24 ++ ifmovd %2,$f23 ++ fcvtld $f23,$f28 ++ fdivd $f24,$f28,$f22 ++ fcvtdl_z $f22,$f23 ++ fcvtld $f23,$f22 ++ fnmad $f22,$f28,$f24,$f23 ++ fcvtdl_z $f23,$f22 ++ fimovd $f22,%0" ++ [(set_attr "type" "fdiv")]) ++ ++(define_insn "int_remu_use_float_si" ++ [(set (match_operand:DI 0 "register_operand" "=c") ++ (sign_extend:DI (match_operator:SI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")]))) ++ (clobber (reg:DF 54)) ++ (clobber (reg:DF 55)) ++ (clobber (reg:DF 56)) ++ (clobber (reg:DF 60))] ++ "flag_sw_int_div_opt == 1 ++ &&(GET_CODE (operands[3])==UMOD)" ++ "zap %1,240,%1 ++ zap %2,240,%2 ++ ifmovd %1,$f22 ++ fcvtld $f22,$f24 ++ ifmovd %2,$f22 ++ fcvtld $f22,$f28 ++ fdivd $f24,$f28,$f23 ++ fcvtdl_z $f23,$f22 ++ fcvtld $f22,$f23 ++ fnmad $f23,$f28,$f24,$f22 ++ fcvtdl_z $f22,$f23 ++ fimovd $f23,%0" ++ [(set_attr "type" "fdiv")]) ++ ++(define_insn_and_split "*divmodsi_internal_er" ++ [(set (match_operand:DI 0 "register_operand" "=c") ++ (sign_extend:DI (match_operator:SI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")]))) ++ (clobber (reg:DI 23)) ++ (clobber (reg:DI 28))] ++ "TARGET_EXPLICIT_RELOCS && !(TARGET_SW8A && flag_sw_int_divmod)" ++ "#" ++ "&& reload_completed" ++ [(parallel [(set (match_dup 0) ++ (sign_extend:DI (match_dup 3))) ++ (use (match_dup 0)) ++ (use (match_dup 4)) ++ (clobber (reg:DI 23)) ++ (clobber (reg:DI 28))])] ++{ ++ if (flag_sw_int_div_opt) ++ { ++ const char *str; ++ operands[4] = GEN_INT (sw_64_next_sequence_number++); ++ switch (GET_CODE (operands[3])) ++ { ++ case DIV: ++ emit_insn (gen_int_div_use_float_si (operands[0], operands[1], operands[2], operands[3])); ++ break; ++ case UDIV: ++ emit_insn (gen_int_divu_use_float_si (operands[0], operands[1], operands[2], operands[3])); ++ break; ++ case MOD: ++ emit_insn (gen_int_rem_use_float_si (operands[0], operands[1], operands[2], operands[3])); ++ break; ++ case UMOD: ++ emit_insn (gen_int_remu_use_float_si (operands[0], operands[1], operands[2], operands[3])); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ else ++ { ++ const char *str; ++ switch (GET_CODE (operands[3])) ++ { ++ case DIV: ++ str = "__divw"; ++ break; ++ case UDIV: ++ str = "__divwu"; ++ break; ++ case MOD: ++ str = "__remw"; ++ break; ++ case UMOD: ++ str = "__remwu"; ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ operands[4] = GEN_INT (sw_64_next_sequence_number++); ++ emit_insn (gen_movdi_er_high_g (operands[0], pic_offset_table_rtx, ++ gen_rtx_SYMBOL_REF (DImode, str), ++ operands[4])); ++ } ++} ++ [(set_attr "type" "call") ++ (set_attr "length" "8")]) ++ ++(define_insn "*divmodsi_internal_er_1" ++ [(set (match_operand:DI 0 "register_operand" "=c") ++ (sign_extend:DI (match_operator:SI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")]))) ++ (use (match_operand:DI 4 "register_operand" "c")) ++ (use (match_operand 5 "const_int_operand")) ++ (clobber (reg:DI 23)) ++ (clobber (reg:DI 28))] ++ "TARGET_EXPLICIT_RELOCS && !(TARGET_SW8A && flag_sw_int_divmod)" ++ { ++ if (flag_sw_int_div_opt) ++ { ++ switch (GET_CODE (operands[3])) ++ { ++ case DIV: ++ case UDIV: ++ case MOD: ++ case UMOD: ++ return ""; ++ } ++ } ++ else ++ { ++ return "call $23,($27),__%E3%j5"; ++ } ++ } ++ [(set_attr "type" "call") ++ (set_attr "length" "4")]) ++ ++(define_insn "*divmodsi_internal" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI (match_operator:SI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "register_operand" "r")]))) ++ (clobber (reg:DI 23)) ++ (clobber (reg:DI 28))] ++ "TARGET_SW8A && flag_sw_int_divmod" ++ { ++ switch (GET_CODE (operands[3])) ++ { ++ case DIV: return "divw %1,%2,%0"; ++ case UDIV: return "udivw %1,%2,%0"; ++ case MOD: return "remw %1,%2,%0"; ++ case UMOD: return "uremw %1,%2,%0"; ++ } ++ } ++ [(set_attr "length" "4")]) ++ ++(define_insn "int_div_use_float_di" ++[(set (match_operand:DI 0 "register_operand" "=c") ++ (match_operator:DI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")])) ++ (use (match_operand:DI 4 "register_operand" "r")) ++ (use (match_operand:DI 5 "symbolic_operand")) ++ (use (match_operand 6 "const_int_operand")) ++ (use (label_ref:DI (match_operand 7))) ++ (use (label_ref:DI (match_operand 8))) ++ (clobber (reg:DF 55)) ++ (clobber (reg:DI 27)) ++ (clobber (reg:DI 28)) ++ (clobber (reg:DF 59)) ++ (clobber (reg:DF 60))] ++ "TARGET_EXPLICIT_RELOCS && flag_sw_int_div_opt == 1 &&(GET_CODE (operands[3])==DIV)" ++ "srl %1,52,$28 ++ srl %2,52,$27 ++ bis $28,$27,$28 ++ bne $28,%l7 ++ ifmovd %1,$f23 ++ fcvtld $f23,$f27 ++ ifmovd %2,$f28 ++ fcvtld $f28,$f23 ++ fdivd $f27,$f23,$f28 ++ fcvtdl_z $f28,$f23 ++ fimovd $f23,%0 ++ br %l8 ++%l7: ++ ldl %0,%5(%4)\t\t!literal!%6 ++ call $23,($27),__%E3%j6 ++%l8:" ++ [(set_attr "cannot_copy" "true") ++ (set_attr "type" "fdiv")]) ++ ++(define_insn "int_divu_use_float_di" ++[(set (match_operand:DI 0 "register_operand" "=c") ++ (match_operator:DI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")])) ++ (use (match_operand:DI 4 "register_operand" "r")) ++ (use (match_operand:DI 5 "symbolic_operand")) ++ (use (match_operand 6 "const_int_operand")) ++ (use (label_ref:DI (match_operand 7))) ++ (use (label_ref:DI (match_operand 8))) ++ (clobber (reg:DF 55)) ++ (clobber (reg:DI 27)) ++ (clobber (reg:DI 28)) ++ (clobber (reg:DF 59)) ++ (clobber (reg:DF 60))] ++ "TARGET_EXPLICIT_RELOCS && flag_sw_int_div_opt == 1 ++ &&(GET_CODE (operands[3])==UDIV)" ++ "srl %1,52,$28 ++ srl %2,52,$27 ++ bis $28,$27,$28 ++ bne $28,%l7 ++ ifmovd %1,$f23 ++ fcvtld $f23,$f27 ++ ifmovd %2,$f28 ++ fcvtld $f28,$f23 ++ fdivd $f27,$f23,$f28 ++ fcvtdl_z $f28,$f23 ++ fimovd $f23,%0 ++ br %l8 ++%l7: ++ ldl %0,%5(%4)\t\t!literal!%6 ++ call $23,($27),__%E3%j6 ++%l8:" ++ [(set_attr "cannot_copy" "true") ++ (set_attr "type" "fdiv")]) ++ ++(define_insn "int_rem_use_float_di" ++[(set (match_operand:DI 0 "register_operand" "=c") ++ (match_operator:DI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")])) ++ (use (match_operand:DI 4 "register_operand" "r")) ++ (use (match_operand:DI 5 "symbolic_operand")) ++ (use (match_operand 6 "const_int_operand")) ++ (use (label_ref:DI (match_operand 7))) ++ (use (label_ref:DI (match_operand 8))) ++ (clobber (reg:DF 54)) ++ (clobber (reg:DF 55)) ++ (clobber (reg:DI 27)) ++ (clobber (reg:DI 28)) ++ (clobber (reg:DF 56)) ++ (clobber (reg:DF 60))] ++ "TARGET_EXPLICIT_RELOCS && flag_sw_int_div_opt == 1 ++ &&(GET_CODE (operands[3])==MOD)" ++ "srl %1,52,$28 ++ srl %2,52,$27 ++ bis $28,$27,$28 ++ bne $28,%l7 ++ ifmovd %1,$f22 ++ fcvtld $f22,$f24 ++ ifmovd %2,$f22 ++ fcvtld $f22,$f28 ++ fdivd $f24,$f28,$f22 ++ fcvtdl_z $f22,$f23 ++ fcvtld $f23,$f22 ++ fnmad $f22,$f28,$f24,$f23 ++ fcvtdl_z $f23,$f22 ++ fimovd $f22,%0 ++ br %l8 ++%l7: ++ ldl %0,%5(%4)\t\t!literal!%6 ++ call $23,($27),__%E3%j6 ++%l8:" ++ [(set_attr "cannot_copy" "true") ++ (set_attr "type" "fdiv")]) ++ ++(define_insn "int_remu_use_float_di" ++[(set (match_operand:DI 0 "register_operand" "=c") ++ (match_operator:DI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")])) ++ (use (match_operand:DI 4 "register_operand" "r")) ++ (use (match_operand:DI 5 "symbolic_operand")) ++ (use (match_operand 6 "const_int_operand")) ++ (use (label_ref:DI (match_operand 7))) ++ (use (label_ref:DI (match_operand 8))) ++ (clobber (reg:DF 54)) ++ (clobber (reg:DF 55)) ++ (clobber (reg:DI 27)) ++ (clobber (reg:DI 28)) ++ (clobber (reg:DF 56)) ++ (clobber (reg:DF 60))] ++ "TARGET_EXPLICIT_RELOCS && flag_sw_int_div_opt == 1 ++ &&(GET_CODE (operands[3])==UMOD)" ++ " srl %1,52,$28 ++ srl %2,52,$27 ++ bis $28,$27,$28 ++ bne $28,%l7 ++ ifmovd %1,$f22 ++ fcvtld $f22,$f24 ++ ifmovd %2,$f22 ++ fcvtld $f22,$f28 ++ fdivd $f24,$f28,$f23 ++ fcvtdl_z $f23,$f22 ++ fcvtld $f22,$f23 ++ fnmad $f23,$f28,$f24,$f22 ++ fcvtdl_z $f22,$f23 ++ fimovd $f23,%0 ++ br %l8 ++%l7: ++ ldl %0,%5(%4)\t\t!literal!%6 ++ call $23,($27),__%E3%j6 ++%l8:" ++ [(set_attr "cannot_copy" "true") ++ (set_attr "type" "fdiv")]) ++ ++(define_insn_and_split "*divmoddi_internal_er" ++ [(set (match_operand:DI 0 "register_operand" "=c") ++ (match_operator:DI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")])) ++ (clobber (reg:DI 23)) ++ (clobber (reg:DI 28))] ++ "TARGET_EXPLICIT_RELOCS && !(TARGET_SW8A && flag_sw_int_divmod)" ++ "#" ++ "&& reload_completed" ++ [(parallel [(set (match_dup 0) (match_dup 3)) ++ (use (match_dup 0)) ++ (use (match_dup 4)) ++ (clobber (reg:DI 23)) ++ (clobber (reg:DI 28))])] ++{ ++ if (flag_sw_int_div_opt) ++ { ++ const char *str; ++ operands[4] = GEN_INT (sw_64_next_sequence_number++); ++ operands[7] = gen_label_rtx (); ++ operands[8] = gen_label_rtx (); ++ switch (GET_CODE (operands[3])) ++ { ++ case DIV: ++ str = "__divl"; ++ emit_insn (gen_int_div_use_float_di (operands[0],operands[1],operands[2],operands[3],pic_offset_table_rtx ,gen_rtx_SYMBOL_REF (DImode, str),operands[4],operands[7],operands[8])); ++ break; ++ case UDIV: ++ str = "__divlu"; ++ emit_insn (gen_int_divu_use_float_di (operands[0],operands[1],operands[2],operands[3],pic_offset_table_rtx ,gen_rtx_SYMBOL_REF (DImode, str),operands[4],operands[7],operands[8])); ++ break; ++ case MOD: ++ str = "__reml"; ++ emit_insn (gen_int_rem_use_float_di (operands[0],operands[1],operands[2],operands[3],pic_offset_table_rtx ,gen_rtx_SYMBOL_REF (DImode, str),operands[4],operands[7],operands[8])); ++ break; ++ case UMOD: ++ str = "__remlu"; ++ emit_insn (gen_int_remu_use_float_di (operands[0],operands[1],operands[2],operands[3],pic_offset_table_rtx ,gen_rtx_SYMBOL_REF (DImode, str),operands[4],operands[7],operands[8])); ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ else ++ { ++ const char *str; ++ switch (GET_CODE (operands[3])) ++ { ++ case DIV: ++ str = "__divl"; ++ break; ++ case UDIV: ++ str = "__divlu"; ++ break; ++ case MOD: ++ str = "__reml"; ++ break; ++ case UMOD: ++ str = "__remlu"; ++ break; ++ default: ++ gcc_unreachable (); ++ } ++ operands[4] = GEN_INT (sw_64_next_sequence_number++); ++ emit_insn (gen_movdi_er_high_g (operands[0], pic_offset_table_rtx, ++ gen_rtx_SYMBOL_REF (DImode, str), ++ operands[4])); ++ } ++} ++ [(set_attr "type" "call") ++ (set_attr "length" "8")]) ++ ++(define_insn "*divmoddi_internal_er_1" ++ [(set (match_operand:DI 0 "register_operand" "=c") ++ (match_operator:DI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "a") ++ (match_operand:DI 2 "register_operand" "b")])) ++ (use (match_operand:DI 4 "register_operand" "c")) ++ (use (match_operand 5 "const_int_operand")) ++ (clobber (reg:DI 23)) ++ (clobber (reg:DI 28))] ++ "TARGET_EXPLICIT_RELOCS && !(TARGET_SW8A && flag_sw_int_divmod)" ++ { ++ if (flag_sw_int_div_opt) ++ { ++ switch (GET_CODE (operands[3])) ++ { ++ case DIV: ++ case UDIV: ++ case MOD: ++ case UMOD: ++ return ""; ++ } ++ } ++ else ++ { ++ return "call $23,($27),__%E3%j5"; ++ } ++ } ++ [(set_attr "type" "call") ++ (set_attr "length" "4")]) ++ ++(define_insn "*divmoddi_internal" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (match_operator:DI 3 "divmod_operator" ++ [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "register_operand" "r")])) ++ (clobber (reg:DI 23)) ++ (clobber (reg:DI 28))] ++ "TARGET_SW8A && flag_sw_int_divmod" ++ { ++ switch (GET_CODE (operands[3])) ++ { ++ case DIV: return "divl %1,%2,%0"; ++ case UDIV: return "udivl %1,%2,%0"; ++ case MOD: return "reml %1,%2,%0"; ++ case UMOD: return "ureml %1,%2,%0"; ++ } ++ } ++ [(set_attr "length" "4")]) ++ ++;; Next are the basic logical operations. We only expose the DImode operations ++;; to the rtl expanders, but SImode versions exist for combine as well as for ++;; the atomic operation splitters. ++ ++(define_insn "*andsi_internal" ++ [(set (match_operand:SI 0 "register_operand" "=r,r,r") ++ (and:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ,rJ") ++ (match_operand:SI 2 "and_operand" "rI,N,M")))] ++ "" ++ "@ ++ and %r1,%2,%0 ++ bic %r1,%N2,%0 ++ zapnot %r1,%m2,%0" ++ [(set_attr "type" "ilog,ilog,shift")]) ++ ++(define_insn "anddi3" ++ [(set (match_operand:DI 0 "register_operand" "=r,r,r") ++ (and:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ,rJ") ++ (match_operand:DI 2 "and_operand" "rI,N,M")))] ++ "" ++ "@ ++ and %r1,%2,%0 ++ bic %r1,%N2,%0 ++ zapnot %r1,%m2,%0" ++ [(set_attr "type" "ilog,ilog,shift")]) ++ ++;; There are times when we can split an AND into two AND insns. This occurs ++;; when we can first clear any bytes and then clear anything else. For ++;; example "I & 0xffff07" is "(I & 0xffffff) & 0xffffffffffffff07". ++;; Only do this when running on 64-bit host since the computations are ++;; too messy otherwise. ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (and:DI (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "const_int_operand")))] ++ "! and_operand (operands[2], DImode)" ++ [(set (match_dup 0) (and:DI (match_dup 1) (match_dup 3))) ++ (set (match_dup 0) (and:DI (match_dup 0) (match_dup 4)))] ++{ ++ unsigned HOST_WIDE_INT mask1 = INTVAL (operands[2]); ++ unsigned HOST_WIDE_INT mask2 = mask1; ++ int i; ++ ++ /* For each byte that isn't all zeros, make it all ones. */ ++ for (i = 0; i < 64; i += 8) ++ if ((mask1 & ((HOST_WIDE_INT) 0xff << i)) != 0) ++ mask1 |= (HOST_WIDE_INT) 0xff << i; ++ ++ /* Now turn on any bits we've just turned off. */ ++ mask2 |= ~ mask1; ++ ++ operands[3] = GEN_INT (mask1); ++ operands[4] = GEN_INT (mask2); ++}) ++ ++(define_insn "zero_extendqi2" ++ [(set (match_operand:I248MODE 0 "register_operand" "=r,r") ++ (zero_extend:I248MODE ++ (match_operand:QI 1 "reg_or_bwx_memory_operand" "r,m")))] ++ "" ++ "@ ++ and %1,0xff,%0 ++ ldbu%U1 %0,%1" ++ [(set_attr "type" "ilog,ild") ++ (set_attr "isa" "*,bwx")]) ++ ++(define_insn "zero_extendhi2" ++ [(set (match_operand:I48MODE 0 "register_operand" "=r,r") ++ (zero_extend:I48MODE ++ (match_operand:HI 1 "reg_or_bwx_memory_operand" "r,m")))] ++ "" ++ "@ ++ zapnot %1,3,%0 ++ ldhu%U1 %0,%1" ++ [(set_attr "type" "shift,ild") ++ (set_attr "isa" "*,bwx")]) ++ ++(define_insn "zero_extendsidi2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (zero_extend:DI (match_operand:SI 1 "register_operand" "r")))] ++ "" ++ "zapnot %1,15,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "andnot3" ++ [(set (match_operand:I48MODE 0 "register_operand" "=r") ++ (and:I48MODE ++ (not:I48MODE (match_operand:I48MODE 1 "reg_or_8bit_operand" "rI")) ++ (match_operand:I48MODE 2 "reg_or_0_operand" "rJ")))] ++ "" ++ "bic %r2,%1,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "*iorsi_internal" ++ [(set (match_operand:SI 0 "register_operand" "=r,r") ++ (ior:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ") ++ (match_operand:SI 2 "or_operand" "rI,N")))] ++ "" ++ "@ ++ bis %r1,%2,%0 ++ ornot %r1,%N2,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "iordi3" ++ [(set (match_operand:DI 0 "register_operand" "=r,r") ++ (ior:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ") ++ (match_operand:DI 2 "or_operand" "rI,N")))] ++ "" ++ "@ ++ bis %r1,%2,%0 ++ ornot %r1,%N2,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "*one_cmplsi_internal" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (not:SI (match_operand:SI 1 "reg_or_8bit_operand" "rI")))] ++ "" ++ "ornot $31,%1,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "one_cmpldi2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (not:DI (match_operand:DI 1 "reg_or_8bit_operand" "rI")))] ++ "" ++ "ornot $31,%1,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "*iornot3" ++ [(set (match_operand:I48MODE 0 "register_operand" "=r") ++ (ior:I48MODE ++ (not:I48MODE (match_operand:I48MODE 1 "reg_or_8bit_operand" "rI")) ++ (match_operand:I48MODE 2 "reg_or_0_operand" "rJ")))] ++ "" ++ "ornot %r2,%1,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "*xorsi_internal" ++ [(set (match_operand:SI 0 "register_operand" "=r,r") ++ (xor:SI (match_operand:SI 1 "reg_or_0_operand" "%rJ,rJ") ++ (match_operand:SI 2 "or_operand" "rI,N")))] ++ "" ++ "@ ++ xor %r1,%2,%0 ++ eqv %r1,%N2,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "xordi3" ++ [(set (match_operand:DI 0 "register_operand" "=r,r") ++ (xor:DI (match_operand:DI 1 "reg_or_0_operand" "%rJ,rJ") ++ (match_operand:DI 2 "or_operand" "rI,N")))] ++ "" ++ "@ ++ xor %r1,%2,%0 ++ eqv %r1,%N2,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "*xornot3" ++ [(set (match_operand:I48MODE 0 "register_operand" "=r") ++ (not:I48MODE (xor:I48MODE ++ (match_operand:I48MODE 1 "register_operand" "%rJ") ++ (match_operand:I48MODE 2 "register_operand" "rI"))))] ++ "" ++ "eqv %r1,%2,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_expand "ffsdi2" ++ [(set (match_dup 2) ++ (ctz:DI (match_operand:DI 1 "register_operand"))) ++ (set (match_dup 3) ++ (plus:DI (match_dup 2) (const_int 1))) ++ (set (match_operand:DI 0 "register_operand") ++ (if_then_else:DI (eq (match_dup 1) (const_int 0)) ++ (const_int 0) (match_dup 3)))] ++ "" ++{ ++ operands[2] = gen_reg_rtx (DImode); ++ operands[3] = gen_reg_rtx (DImode); ++}) ++ ++(define_insn "clzdi2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (clz:DI (match_operand:DI 1 "register_operand" "r")))] ++ "" ++ "ctlz %1,%0" ++ [(set_attr "type" "mvi")]) ++ ++(define_insn "ctzdi2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ctz:DI (match_operand:DI 1 "register_operand" "r")))] ++ "" ++ "cttz %1,%0" ++ [(set_attr "type" "mvi")]) ++ ++(define_insn "popcountdi2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (popcount:DI (match_operand:DI 1 "register_operand" "r")))] ++ "" ++ "ctpop %1,%0" ++ [(set_attr "type" "mvi")]) ++ ++(define_insn "popcountsi2" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (popcount:SI (match_operand:SI 1 "register_operand" "r")))] ++ "" ++ "zapnot %1,15,%0\;ctpop %0,%0" ++ [(set_attr "type" "mvi")]) ++ ++(define_insn "bswapsi2_internal" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (bswap:SI (match_operand:SI 1 "register_operand" "r")))] ++ "TARGET_SW8A && flag_sw_rev == 1" ++ "revbw %1,%0" ++ [(set_attr "isa" "sw8a")]) ++ ++(define_expand "bswapsi2" ++ [(set (match_operand:SI 0 "register_operand") ++ (bswap:SI (match_operand:SI 1 "register_operand")))] ++ "!optimize_size" ++{ ++ if (TARGET_SW8A == 0 || flag_sw_rev != 1) ++ { ++ rtx t0, t1; ++ ++ t0 = gen_reg_rtx (DImode); ++ t1 = gen_reg_rtx (DImode); ++ ++ emit_insn (gen_inslh (t0, gen_lowpart (DImode, operands[1]), GEN_INT (7))); ++ emit_insn (gen_inswl_const (t1, gen_lowpart (HImode, operands[1]), ++ GEN_INT (24))); ++ emit_insn (gen_iordi3 (t1, t0, t1)); ++ emit_insn (gen_lshrdi3 (t0, t1, GEN_INT (16))); ++ emit_insn (gen_anddi3 (t1, t1, sw_64_expand_zap_mask (0x5))); ++ emit_insn (gen_anddi3 (t0, t0, sw_64_expand_zap_mask (0xa))); ++ emit_insn (gen_addsi3 (operands[0], gen_lowpart (SImode, t0), ++ gen_lowpart (SImode, t1))); ++ DONE; ++ } ++ else ++ { ++ emit_insn (gen_bswapsi2_internal (operands[0], operands[1])); ++ DONE; ++ } ++}) ++ ++;; Next come the shifts and the various extract and insert operations. ++ ++(define_insn "bswapdi2_internal" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (bswap:DI (match_operand:DI 1 "register_operand" "r")))] ++ "TARGET_SW8A && flag_sw_rev == 1" ++ "revbl %1,%0" ++ [(set_attr "isa" "sw8a")]) ++ ++(define_expand "bswapdi2" ++ [(set (match_operand:DI 0 "register_operand") ++ (bswap:DI (match_operand:DI 1 "register_operand")))] ++ "!optimize_size" ++{ ++ if (TARGET_SW8A == 0 || flag_sw_rev != 1) ++ { ++ rtx t0, t1; ++ ++ t0 = gen_reg_rtx (DImode); ++ t1 = gen_reg_rtx (DImode); ++ ++ /* This method of shifting and masking is not specific to Sw_64, but ++ is only profitable on Sw_64 because of our handy byte zap insn. */ ++ ++ emit_insn (gen_lshrdi3 (t0, operands[1], GEN_INT (32))); ++ emit_insn (gen_ashldi3 (t1, operands[1], GEN_INT (32))); ++ emit_insn (gen_iordi3 (t1, t0, t1)); ++ ++ emit_insn (gen_lshrdi3 (t0, t1, GEN_INT (16))); ++ emit_insn (gen_ashldi3 (t1, t1, GEN_INT (16))); ++ emit_insn (gen_anddi3 (t0, t0, sw_64_expand_zap_mask (0xcc))); ++ emit_insn (gen_anddi3 (t1, t1, sw_64_expand_zap_mask (0x33))); ++ emit_insn (gen_iordi3 (t1, t0, t1)); ++ ++ emit_insn (gen_lshrdi3 (t0, t1, GEN_INT (8))); ++ emit_insn (gen_ashldi3 (t1, t1, GEN_INT (8))); ++ emit_insn (gen_anddi3 (t0, t0, sw_64_expand_zap_mask (0xaa))); ++ emit_insn (gen_anddi3 (t1, t1, sw_64_expand_zap_mask (0x55))); ++ emit_insn (gen_iordi3 (operands[0], t0, t1)); ++ DONE; ++ } ++ else ++ { ++ emit_insn (gen_bswapdi2_internal (operands[0], operands[1])); ++ DONE; ++ } ++}) ++ ++(define_insn "lshrsi3" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (lshiftrt:SI (match_operand:SI 1 "reg_or_0_operand" "rJ") ++ (match_operand:SI 2 "reg_or_5bit_operand" "rY")))] ++ "TARGET_SW8A && flag_sw_shift_word == 1" ++ "srlw %r1,%2,%0" ++ [(set_attr "type" "shift") ++ (set_attr "isa" "sw8a")]) ++ ++(define_insn "ashrsi3" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (ashiftrt:SI (match_operand:SI 1 "register_operand" "r") ++ (match_operand:SI 2 "register_operand" "r")))] ++ "TARGET_SW8A && flag_sw_shift_word == 1" ++ "sraw %r1,%2,%0" ++ [(set_attr "type" "shift") ++ (set_attr "isa" "sw8a")]) ++ ++(define_insn "rotlsi3" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (subreg:SI ++ (zero_extend:DI (rotate:SI (match_operand:SI 1 "reg_or_0_operand" "rJ") ++ (match_operand:SI 2 "reg_or_5bit_operand" "rY"))) 0))] ++ "TARGET_SW8A && flag_sw_shift_word == 1" ++ "rolw %r1,%2,%0" ++ [(set_attr "type" "shift") ++ (set_attr "isa" "sw8a")]) ++ ++(define_insn "rotldi3" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (rotate:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (match_operand:DI 2 "reg_or_6bit_operand" "rS")))] ++ "TARGET_SW8A && flag_sw_shift_word == 1" ++ "roll %r1,%2,%0" ++ [(set_attr "type" "shift") ++ (set_attr "isa" "sw8a")]) ++ ++(define_insn "ashldi3" ++ [(set (match_operand:DI 0 "register_operand" "=r,r") ++ (ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ,rJ") ++ (match_operand:DI 2 "reg_or_6bit_operand" "P,rS")))] ++ "" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ if (operands[2] == const1_rtx) ++ return "addl %r1,%r1,%0"; ++ else ++ return "sll %r1,%2,%0"; ++ case 1: ++ if (TARGET_SW8A == 0 || flag_sw_shift_word != 1) ++ return "sll %r1,%2,%0"; ++ else ++ return "slll %r1,%2,%0"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "type" "iadd,shift")]) ++ ++(define_expand "ashlsi3" ++ [(set (match_operand:SI 0 "register_operand") ++ (ashift:SI (match_operand:SI 1 "reg_or_0_operand") ++ (match_operand:SI 2 "reg_or_5bit_operand")))]) ++ ++(define_insn "*ashlsi3_sll" ++ [(set (match_operand:SI 0 "register_operand" "=r,&r") ++ (ashift:SI (match_operand:SI 1 "reg_or_0_operand" "rJ,rJ") ++ (match_operand:SI 2 "reg_or_5bit_operand" "P,rS")))] ++ "TARGET_SW8A == 0 || flag_sw_shift_word != 1" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ if (operands[2] == const1_rtx) ++ return "addw %r1,%r1,%0"; ++ else ++ return "s%P2addw %r1,0,%0"; ++ case 1: ++ if (REG_P (operands[2])) ++ return "and %2,31,%0\;sll %r1,%0,%0"; ++ else ++ return "sll %r1,%2,%0"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "type" "iadd,shift")]) ++ ++(define_insn "*ashlsi3_sllw" ++ [(set (match_operand:SI 0 "register_operand" "=r,r") ++ (ashift:SI (match_operand:SI 1 "reg_or_0_operand" "rJ,rJ") ++ (match_operand:SI 2 "reg_or_5bit_operand" "P,rY")))] ++ "TARGET_SW8A && flag_sw_shift_word == 1" ++{ ++ switch (which_alternative) ++ { ++ case 0: ++ if (operands[2] == const1_rtx) ++ return "addw %r1,%r1,%0"; ++ else ++ return "s%P2addw %r1,0,%0"; ++ case 1: ++ return "sllw %r1,%2,%0"; ++ default: ++ gcc_unreachable (); ++ } ++} ++ [(set_attr "type" "iadd,shift") ++ (set_attr "isa" "*,sw8a")]) ++ ++(define_insn "*ashldi_se" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI ++ (subreg:SI (ashift:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (match_operand:DI 2 "const_int_operand" "P")) ++ 0)))] ++ "IN_RANGE (INTVAL (operands[2]), 1, 3)" ++{ ++ if (operands[2] == const1_rtx) ++ return "addw %r1,%r1,%0"; ++ else ++ return "s%P2addw %r1,0,%0"; ++} ++ [(set_attr "type" "iadd")]) ++ ++(define_insn "lshrdi3" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (match_operand:DI 2 "reg_or_6bit_operand" "rS")))] ++ "" ++{ ++ if (TARGET_SW8A == 0 || flag_sw_shift_word != 1) ++ return "srl %r1,%2,%0"; ++ else ++ return "srll %r1,%2,%0"; ++} ++ [(set_attr "type" "shift")]) ++ ++(define_insn "ashrdi3" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ashiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (match_operand:DI 2 "reg_or_6bit_operand" "rS")))] ++ "" ++{ ++ if (TARGET_SW8A == 0 || flag_sw_shift_word != 1) ++ return "sra %r1,%2,%0"; ++ else ++ return "sral %r1,%2,%0"; ++} ++ [(set_attr "type" "shift")]) ++ ++(define_insn "extendqi2" ++ [(set (match_operand:I24MODE 0 "register_operand" "=r") ++ (sign_extend:I24MODE ++ (match_operand:QI 1 "register_operand" "r")))] ++ "" ++ "sextb %1,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_expand "extendqidi2" ++ [(set (match_operand:DI 0 "register_operand") ++ (sign_extend:DI (match_operand:QI 1 "general_operand")))] ++ "" ++{ ++ operands[1] = force_reg (QImode, operands[1]); ++}) ++ ++(define_insn "*extendqidi2_bwx" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI (match_operand:QI 1 "register_operand" "r")))] ++ "" ++ "sextb %1,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "extendhisi2" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (sign_extend:SI (match_operand:HI 1 "register_operand" "r")))] ++ "" ++ "sexth %1,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_expand "extendhidi2" ++ [(set (match_operand:DI 0 "register_operand") ++ (sign_extend:DI (match_operand:HI 1 "general_operand")))] ++ "" ++{ ++ operands[1] = force_reg (HImode, operands[1]); ++}) ++ ++(define_insn "*extendhidi2_bwx" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI (match_operand:HI 1 "register_operand" "r")))] ++ "" ++ "sexth %1,%0" ++ [(set_attr "type" "shift")]) ++ ++;; Here's how we sign extend an unaligned byte and halfword. Doing this ++;; as a pattern saves one instruction. The code is similar to that for ++;; the unaligned loads (see below). ++;; ++;; Operand 1 is the address, operand 0 is the result. ++ ++(define_expand "unaligned_extendqidi" ++ [(set (match_dup 3) ++ (mem:DI (and:DI (match_operand:DI 1 "address_operand") (const_int -8)))) ++ (set (match_dup 4) ++ (ashift:DI (match_dup 3) ++ (minus:DI (const_int 64) ++ (ashift:DI ++ (and:DI (match_dup 2) (const_int 7)) ++ (const_int 3))))) ++ (set (match_operand:QI 0 "register_operand") ++ (ashiftrt:DI (match_dup 4) (const_int 56)))] ++ "" ++{ ++ operands[0] = gen_lowpart (DImode, operands[0]); ++ operands[2] = get_unaligned_offset (operands[1], 1); ++ operands[3] = gen_reg_rtx (DImode); ++ operands[4] = gen_reg_rtx (DImode); ++}) ++ ++(define_expand "unaligned_extendhidi" ++ [(set (match_dup 3) ++ (mem:DI (and:DI (match_operand:DI 1 "address_operand") (const_int -8)))) ++ (set (match_dup 4) ++ (ashift:DI (match_dup 3) ++ (minus:DI (const_int 64) ++ (ashift:DI ++ (and:DI (match_dup 2) (const_int 7)) ++ (const_int 3))))) ++ (set (match_operand:HI 0 "register_operand") ++ (ashiftrt:DI (match_dup 4) (const_int 48)))] ++ "" ++{ ++ operands[0] = gen_lowpart (DImode, operands[0]); ++ operands[2] = get_unaligned_offset (operands[1], 2); ++ operands[3] = gen_reg_rtx (DImode); ++ operands[4] = gen_reg_rtx (DImode); ++}) ++ ++(define_insn "*extxl_const" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (match_operand:DI 2 "mode_width_operand" "n") ++ (match_operand:DI 3 "mul8_operand" "I")))] ++ "" ++{ ++ if (INTVAL (operands[2])==8) ++ return "extlb %r1,%s3,%0"; ++ else if (INTVAL (operands[2])==16) ++ return "extlh %r1,%s3,%0"; ++ else if (INTVAL (operands[2])==32) ++ return "extlw %r1,%s3,%0"; ++ else if (INTVAL (operands[2])==64) ++ return "extll %r1,%s3,%0"; ++} ++ [(set_attr "type" "shift")]) ++ ++(define_insn "extxl" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (zero_extract:DI ++ (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (match_operand:DI 2 "mode_width_operand" "n") ++ (ashift:DI (match_operand:DI 3 "reg_or_8bit_operand" "rI") ++ (const_int 3))))] ++ "" ++{ ++ if (INTVAL (operands[2])==8) ++ return "extlb %r1,%3,%0"; ++ else if (INTVAL (operands[2])==16) ++ return "extlh %r1,%3,%0"; ++ else if (INTVAL (operands[2])==32) ++ return "extlw %r1,%3,%0"; ++ else if (INTVAL (operands[2])==64) ++ return "extll %r1,%3,%0"; ++} ++ [(set_attr "type" "shift")]) ++ ++;; Combine has some strange notion of preserving existing undefined behavior ++;; in shifts larger than a word size. So capture these patterns that it ++;; should have turned into zero_extracts. ++(define_insn "*extxl_1" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (and:DI (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI") ++ (const_int 3))) ++ (match_operand:DI 3 "mode_mask_operand" "n")))] ++ "" ++{ ++ if (INTVAL (operands[3]) == 0xff) ++ return "extlb %r1,%2,%0"; ++ else if (INTVAL (operands[3]) == 0xffff) ++ return "extlh %r1,%2,%0"; ++ else if (INTVAL (operands[3]) == 0xffffffff) ++ return "extlw %r1,%2,%0"; ++ else if (INTVAL (operands[3]) == -1) ++ return "extll %r1,%2,%0"; ++} ++ [(set_attr "type" "shift")]) ++ ++(define_insn "*extql_2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (lshiftrt:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI") ++ (const_int 3))))] ++ "" ++ "extll %1,%2,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "extqh" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ashift:DI ++ (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (minus:DI (const_int 64) ++ (ashift:DI ++ (and:DI ++ (match_operand:DI 2 "reg_or_8bit_operand" "rI") ++ (const_int 7)) ++ (const_int 3)))))] ++ "" ++ "exthl %r1,%2,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "extwh" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ashift:DI ++ (and:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (const_int 65535)) ++ (minus:DI (const_int 64) ++ (ashift:DI ++ (and:DI ++ (match_operand:DI 2 "reg_or_8bit_operand" "rI") ++ (const_int 7)) ++ (const_int 3)))))] ++ "" ++ "exthh %r1,%2,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "extlh" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ashift:DI ++ (and:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (const_int 2147483647)) ++ (minus:DI (const_int 64) ++ (ashift:DI ++ (and:DI ++ (match_operand:DI 2 "reg_or_8bit_operand" "rI") ++ (const_int 7)) ++ (const_int 3)))))] ++ "" ++ "exthw %r1,%2,%0" ++ [(set_attr "type" "shift")]) ++ ++;; This converts an extXl into an extXh with an appropriate adjustment ++;; to the address calculation. ++(define_insn "insbl_const" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ashift:DI (zero_extend:DI ++ (match_operand:QI 1 "register_operand" "r")) ++ (match_operand:DI 2 "mul8_operand" "I")))] ++ "" ++ "inslb %1,%s2,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "inswl_const" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ashift:DI (zero_extend:DI ++ (match_operand:HI 1 "register_operand" "r")) ++ (match_operand:DI 2 "mul8_operand" "I")))] ++ "" ++ "inslh %1,%s2,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "insll_const" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ashift:DI (zero_extend:DI ++ (match_operand:SI 1 "register_operand" "r")) ++ (match_operand:DI 2 "mul8_operand" "I")))] ++ "" ++ "inslw %1,%s2,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "insbl" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ashift:DI (zero_extend:DI ++ (match_operand:QI 1 "register_operand" "r")) ++ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI") ++ (const_int 3))))] ++ "" ++ "inslb %1,%2,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "inswl" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ashift:DI (zero_extend:DI ++ (match_operand:HI 1 "register_operand" "r")) ++ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI") ++ (const_int 3))))] ++ "" ++ "inslh %1,%2,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "insll" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ashift:DI (zero_extend:DI ++ (match_operand:SI 1 "register_operand" "r")) ++ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI") ++ (const_int 3))))] ++ "" ++ "inslw %1,%2,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "insql" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (ashift:DI (match_operand:DI 1 "register_operand" "r") ++ (ashift:DI (match_operand:DI 2 "reg_or_8bit_operand" "rI") ++ (const_int 3))))] ++ "" ++ "insll %1,%2,%0" ++ [(set_attr "type" "shift")]) ++ ++;; Combine has this sometimes habit of moving the and outside of the ++;; shift, making life more interesting. ++(define_insn "*insxl" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (and:DI (ashift:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "mul8_operand" "I")) ++ (match_operand:DI 3 "const_int_operand" "i")))] ++ "((unsigned HOST_WIDE_INT) 0xff << INTVAL (operands[2]) ++ == (unsigned HOST_WIDE_INT) INTVAL (operands[3])) ++ || ((unsigned HOST_WIDE_INT) 0xffff << INTVAL (operands[2]) ++ == (unsigned HOST_WIDE_INT) INTVAL (operands[3])) ++ || ((unsigned HOST_WIDE_INT) 0xffffffff << INTVAL (operands[2]) ++ == (unsigned HOST_WIDE_INT) INTVAL (operands[3]))" ++{ ++#if HOST_BITS_PER_WIDE_INT == 64 ++ if ((unsigned HOST_WIDE_INT) 0xff << INTVAL (operands[2]) ++ == (unsigned HOST_WIDE_INT) INTVAL (operands[3])) ++ return "inslb %1,%s2,%0"; ++ if ((unsigned HOST_WIDE_INT) 0xffff << INTVAL (operands[2]) ++ == (unsigned HOST_WIDE_INT) INTVAL (operands[3])) ++ return "inslh %1,%s2,%0"; ++ if ((unsigned HOST_WIDE_INT) 0xffffffff << INTVAL (operands[2]) ++ == (unsigned HOST_WIDE_INT) INTVAL (operands[3])) ++ return "inslw %1,%s2,%0"; ++#endif ++ gcc_unreachable (); ++} ++ [(set_attr "type" "shift")]) ++ ++;; We do not include the insXh insns because they are complex to express ++;; and it does not appear that we would ever want to generate them. ++;; ++;; Since we need them for block moves, though, cop out and use unspec. ++(define_insn "insxh" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "mode_width_operand" "n") ++ (match_operand:DI 3 "reg_or_8bit_operand" "rI")] ++ UNSPEC_INSXH))] ++ "" ++{ ++ if (INTVAL (operands[2])==16) ++ return "inshh %r1,%3,%0"; ++ else if (INTVAL (operands[2])==32) ++ return "inshw %r1,%3,%0"; ++ else if (INTVAL (operands[2])==64) ++ return "inshl %r1,%3,%0"; ++} ++ [(set_attr "type" "shift")]) ++ ++(define_insn "mskxl" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (and:DI (not:DI (ashift:DI ++ (match_operand:DI 2 "mode_mask_operand" "n") ++ (ashift:DI ++ (match_operand:DI 3 "reg_or_8bit_operand" "rI") ++ (const_int 3)))) ++ (match_operand:DI 1 "reg_or_0_operand" "rJ")))] ++ "" ++{ ++ if (INTVAL (operands[2]) == 0xff) ++ return "masklb %r1,%3,%0"; ++ else if (INTVAL (operands[2]) == 0xffff) ++ return "masklh %r1,%3,%0"; ++ else if (INTVAL (operands[2]) == 0xffffffff) ++ return "masklw %r1,%3,%0"; ++ else if (INTVAL (operands[2]) == -1) ++ return "maskll %r1,%3,%0"; ++} ++ [(set_attr "type" "shift")]) ++ ++;; We do not include the mskXh insns because it does not appear we would ++;; ever generate one. ++;; ++;; Again, we do for block moves and we use unspec again. ++ ++(define_insn "mskxh" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "mode_width_operand" "n") ++ (match_operand:DI 3 "reg_or_8bit_operand" "rI")] ++ UNSPEC_MSKXH))] ++ "" ++{ ++ if (INTVAL (operands[2])==16) ++ return "maskhh %r1,%3,%0"; ++ else if (INTVAL (operands[2])==32) ++ return "maskhw %r1,%3,%0"; ++ else if (INTVAL (operands[2])==64) ++ return "maskhl %r1,%3,%0"; ++} ++ [(set_attr "type" "shift")]) ++ ++(define_insn_and_split "*ze_and_ne" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (const_int 1) ++ (match_operand 2 "const_int_operand" "I")))] ++ "(unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 8" ++ "#" ++ "(unsigned HOST_WIDE_INT) INTVAL (operands[2]) < 8" ++ [(set (match_dup 0) ++ (and:DI (match_dup 1) (match_dup 3))) ++ (set (match_dup 0) ++ (ne:DI (match_dup 0) (const_int 0)))] ++ "operands[3] = GEN_INT (1 << INTVAL (operands[2]));") ++ ++;; Floating-point operations. All the double-precision insns can extend ++;; from single, so indicate that. The exception are the ones that simply ++;; play with the sign bits; it's not clear what to do there. ++ ++(define_mode_iterator FMODE [SF DF]) ++ ++(define_mode_attr opmode [(SF "si") (DF "di")]) ++ ++(define_insn "abs2" ++ [(set (match_operand:FMODE 0 "register_operand" "=f") ++ (abs:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG")))] ++ "TARGET_FP" ++ "fcpys $f31,%R1,%0" ++ [(set_attr "type" "fcpys")]) ++ ++(define_insn "*nabs2" ++ [(set (match_operand:FMODE 0 "register_operand" "=f") ++ (neg:FMODE ++ (abs:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP" ++ "fcpysn $f31,%R1,%0" ++ [(set_attr "type" "fadd")]) ++ ++(define_expand "abstf2" ++ [(parallel [(set (match_operand:TF 0 "register_operand") ++ (abs:TF (match_operand:TF 1 "reg_or_0_operand"))) ++ (use (match_dup 2))])] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "operands[2] = force_reg (DImode, GEN_INT (HOST_WIDE_INT_1U << 63));") ++ ++(define_insn_and_split "*abstf_internal" ++ [(set (match_operand:TF 0 "register_operand" "=r") ++ (abs:TF (match_operand:TF 1 "reg_or_0_operand" "rG"))) ++ (use (match_operand:DI 2 "register_operand" "r"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "#" ++ "&& reload_completed" ++ [(const_int 0)] ++ "sw_64_split_tfmode_frobsign (operands, gen_andnotdi3); DONE;") ++ ++(define_insn "neg2" ++ [(set (match_operand:FMODE 0 "register_operand" "=f") ++ (neg:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG")))] ++ "TARGET_FP" ++ "fcpysn %R1,%R1,%0" ++ [(set_attr "type" "fadd")]) ++ ++(define_expand "negtf2" ++ [(parallel [(set (match_operand:TF 0 "register_operand") ++ (neg:TF (match_operand:TF 1 "reg_or_0_operand"))) ++ (use (match_dup 2))])] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "operands[2] = force_reg (DImode, GEN_INT ((HOST_WIDE_INT) 1 << 63));") ++ ++(define_insn_and_split "*negtf_internal" ++ [(set (match_operand:TF 0 "register_operand" "=r") ++ (neg:TF (match_operand:TF 1 "reg_or_0_operand" "rG"))) ++ (use (match_operand:DI 2 "register_operand" "r"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "#" ++ "&& reload_completed" ++ [(const_int 0)] ++ "sw_64_split_tfmode_frobsign (operands, gen_xordi3); DONE;") ++ ++(define_insn "copysign3" ++ [(set (match_operand:FMODE 0 "register_operand" "=f") ++ (unspec:FMODE [(match_operand:FMODE 1 "reg_or_0_operand" "fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG")] ++ UNSPEC_COPYSIGN))] ++ "TARGET_FP" ++ "fcpys %R2,%R1,%0" ++ [(set_attr "type" "fadd")]) ++ ++(define_insn "*ncopysign3" ++ [(set (match_operand:FMODE 0 "register_operand" "=f") ++ (neg:FMODE ++ (unspec:FMODE [(match_operand:FMODE 1 "reg_or_0_operand" "fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG")] ++ UNSPEC_COPYSIGN)))] ++ "TARGET_FP" ++ "fcpysn %R2,%R1,%0" ++ [(set_attr "type" "fadd")]) ++ ++(define_insn "*add3" ++ [(set (match_operand:FMODE 0 "register_operand" "=&f,&f") ++ (plus:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "%fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 0" ++ "fadd%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*add3_same" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (plus:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "%fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 1" ++ "fadd%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_expand "add3" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (plus:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "%fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "TARGET_FP" ++ "" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++ ++(define_insn "*fmasf4" ++ [(set (match_operand:SF 0 "register_operand" "=&f") ++ (fma:SF (match_operand:SF 1 "register_operand" "f") ++ (match_operand:SF 2 "register_operand" "f") ++ (match_operand:SF 3 "register_operand" "f")))] ++ "flag_sw_sdsame == 0" ++ "fmas %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*fmasf4_same" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (fma:SF (match_operand:SF 1 "register_operand" "f") ++ (match_operand:SF 2 "register_operand" "f") ++ (match_operand:SF 3 "register_operand" "f")))] ++ "flag_sw_sdsame == 1" ++ "fmas %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_expand "fmasf4" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (fma:SF (match_operand:SF 1 "register_operand" "f") ++ (match_operand:SF 2 "register_operand" "f") ++ (match_operand:SF 3 "register_operand" "f")))] ++ "flag_sw_fma==1 && TARGET_FP" ++ "" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*fmadf4" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (fma:DF (match_operand:DF 1 "register_operand" "f") ++ (match_operand:DF 2 "register_operand" "f") ++ (match_operand:DF 3 "register_operand" "f")))] ++ "flag_sw_sdsame== 0" ++ "fmad %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*fmadf4_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (fma:DF (match_operand:DF 1 "register_operand" "f") ++ (match_operand:DF 2 "register_operand" "f") ++ (match_operand:DF 3 "register_operand" "f")))] ++ "flag_sw_sdsame == 1" ++ "fmad %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_expand "fmadf4" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (fma:DF (match_operand:DF 1 "register_operand" "f") ++ (match_operand:DF 2 "register_operand" "f") ++ (match_operand:DF 3 "register_operand" "f")))] ++ "flag_sw_fma==1 && TARGET_FP" ++ "" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*fmssf4" ++ [(set (match_operand:SF 0 "register_operand" "=&f") ++ (fma:SF ++ (match_operand:SF 1 "register_operand" "f") ++ (match_operand:SF 2 "register_operand" "f") ++ (neg:SF (match_operand:SF 3 "register_operand" "f"))))] ++ "flag_sw_sdsame == 0" ++ "fmss %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*fmssf4_same" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (fma:SF ++ (match_operand:SF 1 "register_operand" "f") ++ (match_operand:SF 2 "register_operand" "f") ++ (neg:SF (match_operand:SF 3 "register_operand" "f"))))] ++ "flag_sw_sdsame == 1" ++ "fmss %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*fmsdf4" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (fma:DF ++ (match_operand:DF 1 "register_operand" "f") ++ (match_operand:DF 2 "register_operand" "f") ++ (neg:DF (match_operand:DF 3 "register_operand" "f"))))] ++ "flag_sw_sdsame == 0" ++ "fmsd %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*fmsdf4_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (fma:DF ++ (match_operand:DF 1 "register_operand" "f") ++ (match_operand:DF 2 "register_operand" "f") ++ (neg:DF (match_operand:DF 3 "register_operand" "f"))))] ++ "flag_sw_sdsame == 1" ++ "fmsd %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_expand "fmsdf4" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (fma:DF ++ (match_operand:DF 1 "register_operand" "f") ++ (match_operand:DF 2 "register_operand" "f") ++ (neg:DF (match_operand:DF 3 "register_operand" "f"))))] ++ "flag_sw_fma==1 && TARGET_FP" ++ "" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*fnmasf4" ++ [(set (match_operand:SF 0 "register_operand" "=&f") ++ (fma:SF ++ (neg:SF (match_operand:SF 1 "register_operand" "f")) ++ (match_operand:SF 2 "register_operand" "f") ++ (match_operand:SF 3 "register_operand" "f")))] ++ "flag_sw_sdsame == 0" ++ "fnmas %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*fnmasf4_same" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (fma:SF ++ (neg:SF (match_operand:SF 1 "register_operand" "f")) ++ (match_operand:SF 2 "register_operand" "f") ++ (match_operand:SF 3 "register_operand" "f")))] ++ "flag_sw_sdsame == 1" ++ "fnmas %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_expand "fnmasf4" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (fma:SF ++ (neg:SF (match_operand:SF 1 "register_operand" "f")) ++ (match_operand:SF 2 "register_operand" "f") ++ (match_operand:SF 3 "register_operand" "f")))] ++ "flag_sw_fma==1 && TARGET_FP" ++ "" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*fnmadf4" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (fma:DF ++ (neg:DF (match_operand:DF 1 "register_operand" "f")) ++ (match_operand:DF 2 "register_operand" "f") ++ (match_operand:DF 3 "register_operand" "f")))] ++ "flag_sw_sdsame == 0" ++ "fnmad %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*fnmadf4_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (fma:DF ++ (neg:DF (match_operand:DF 1 "register_operand" "f")) ++ (match_operand:DF 2 "register_operand" "f") ++ (match_operand:DF 3 "register_operand" "f")))] ++ "flag_sw_sdsame == 1" ++ "fnmad %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_expand "fnmadf4" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (fma:DF ++ (neg:DF (match_operand:DF 1 "register_operand" "f")) ++ (match_operand:DF 2 "register_operand" "f") ++ (match_operand:DF 3 "register_operand" "f")))] ++ "flag_sw_fma==1 && TARGET_FP" ++ "" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*fnmssf4" ++ [(set (match_operand:SF 0 "register_operand" "=&f") ++ (fma:SF ++ (neg:SF (match_operand:SF 1 "register_operand" "f")) ++ (match_operand:SF 2 "register_operand" "f") ++ (neg:SF (match_operand:SF 3 "register_operand" "f"))))] ++ "flag_sw_sdsame == 0" ++ "fnmss %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*fnmssf4_same" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (fma:SF ++ (neg:SF (match_operand:SF 1 "register_operand" "f")) ++ (match_operand:SF 2 "register_operand" "f") ++ (neg:SF (match_operand:SF 3 "register_operand" "f"))))] ++ "flag_sw_sdsame == 1" ++ "fnmss %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_expand "fnmssf4" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (fma:SF ++ (neg:SF (match_operand:SF 1 "register_operand" "f")) ++ (match_operand:SF 2 "register_operand" "f") ++ (neg:SF (match_operand:SF 3 "register_operand" "f"))))] ++ "flag_sw_fma==1 && TARGET_FP" ++ "" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*fnmsdf4" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (fma:DF ++ (neg:DF (match_operand:DF 1 "register_operand" "f")) ++ (match_operand:DF 2 "register_operand" "f") ++ (neg:DF (match_operand:DF 3 "register_operand" "f"))))] ++ "flag_sw_sdsame == 0" ++ "fnmsd %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*fnmsdf4_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (fma:DF ++ (neg:DF (match_operand:DF 1 "register_operand" "f")) ++ (match_operand:DF 2 "register_operand" "f") ++ (neg:DF (match_operand:DF 3 "register_operand" "f"))))] ++ "flag_sw_sdsame == 1" ++ "fnmsd %R1,%R2,%R3,%0" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_expand "fnmsdf4" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (fma:DF ++ (neg:DF (match_operand:DF 1 "register_operand" "f")) ++ (match_operand:DF 2 "register_operand" "f") ++ (neg:DF (match_operand:DF 3 "register_operand" "f"))))] ++ "flag_sw_fma==1 && TARGET_FP" ++ "" ++ [(set_attr "type" "fmadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++ ++(define_insn "*adddf_ext1" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (plus:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (match_operand:DF 2 "reg_or_0_operand" "fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fadd%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*adddf_ext1_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (plus:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (match_operand:DF 2 "reg_or_0_operand" "fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fadd%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*adddf_ext2" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (plus:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "%fG")) ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fadd%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*adddf_ext2_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (plus:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "%fG")) ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fadd%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_expand "addtf3" ++ [(use (match_operand:TF 0 "register_operand")) ++ (use (match_operand:TF 1 "general_operand")) ++ (use (match_operand:TF 2 "general_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "sw_64_emit_xfloating_arith (PLUS, operands); DONE;") ++ ++(define_insn "*sub3" ++ [(set (match_operand:FMODE 0 "register_operand" "=&f,&f") ++ (minus:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 0" ++ "fsub%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*sub3_same" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (minus:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 1" ++ "fsub%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_expand "sub3" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (minus:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "TARGET_FP" ++ "" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*subdf_ext1" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (minus:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (match_operand:DF 2 "reg_or_0_operand" "fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fsub%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*subdf_ext1_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (minus:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (match_operand:DF 2 "reg_or_0_operand" "fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fsub%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*subdf_ext2" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (minus:DF (match_operand:DF 1 "reg_or_0_operand" "fG") ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fsub%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*subdf_ext2_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (minus:DF (match_operand:DF 1 "reg_or_0_operand" "fG") ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fsub%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*subdf_ext3" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (minus:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fsub%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*subdf_ext3_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (minus:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fsub%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_expand "subtf3" ++ [(use (match_operand:TF 0 "register_operand")) ++ (use (match_operand:TF 1 "general_operand")) ++ (use (match_operand:TF 2 "general_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "sw_64_emit_xfloating_arith (MINUS, operands); DONE;") ++ ++(define_insn "*mul3" ++ [(set (match_operand:FMODE 0 "register_operand" "=&f,&f") ++ (mult:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "%fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 0" ++ "fmul%/ %R1,%R2,%0" ++ [(set_attr "type" "fmul") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*mul3_same" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (mult:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "%fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 1" ++ "fmul%/ %R1,%R2,%0" ++ [(set_attr "type" "fmul") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_expand "mul3" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (mult:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "%fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "TARGET_FP" ++ "" ++ [(set_attr "type" "fmul") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++ ++(define_insn "*muldf_ext1" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (mult:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (match_operand:DF 2 "reg_or_0_operand" "fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fmul%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fmul") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*muldf_ext1_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (mult:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (match_operand:DF 2 "reg_or_0_operand" "fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fmul%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fmul") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*muldf_ext2" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (mult:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "%fG")) ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fmul%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fmul") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*muldf_ext2_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (mult:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "%fG")) ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fmul%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fmul") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_expand "multf3" ++ [(use (match_operand:TF 0 "register_operand")) ++ (use (match_operand:TF 1 "general_operand")) ++ (use (match_operand:TF 2 "general_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "sw_64_emit_xfloating_arith (MULT, operands); DONE;") ++ ++(define_insn "*div3" ++ [(set (match_operand:FMODE 0 "register_operand" "=&f,&f") ++ (div:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 0" ++ "fdiv%/ %R1,%R2,%0" ++ [(set_attr "type" "fdiv") ++ (set_attr "opsize" "") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*div3_same" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (div:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 1" ++ "fdiv%/ %R1,%R2,%0" ++ [(set_attr "type" "fdiv") ++ (set_attr "opsize" "") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++;; Floating point reciprocal approximation ++(define_insn "fre" ++ [(set (match_operand:SFDF 0 "register_operand" "=f") ++ (unspec:SFDF [(match_operand:SFDF 1 "register_operand" "f")] ++ UNSPEC_FRECX))] ++ "(flag_sw_recip || flag_sw_recip_precision) && flag_reciprocal_math && TARGET_SW8A" ++ "frec %1,%0" ++ [(set_attr "type" "fp")]) ++ ++(define_expand "div3" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (div:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG,fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG,fG")))] ++ "TARGET_FP" ++ { ++ if ((flag_sw_recip || flag_sw_recip_precision) && flag_reciprocal_math && TARGET_SW8A) ++ { ++ if (operands[1] == CONST0_RTX (mode)) ++ operands[1] = gen_move_reg (operands[1]); ++ ++ if (operands[2] == CONST0_RTX (mode)) ++ operands[2] = gen_move_reg (operands[2]); ++ ++ sw_64_emit_swdiv (operands[0], operands[1], operands[2], true); ++ DONE; ++ } ++ }) ++ ++(define_insn "*div3_fpr" ++ [(set (match_operand:FMODE 0 "register_operand" "=f") ++ (div:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG") ++ (match_operand:FMODE 2 "reg_or_0_operand" "fG")))] ++ "TARGET_FP" ++ "fdiv%/ %R1,%R2,%0" ++ [(set_attr "type" "fdiv") ++ (set_attr "opsize" "") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*divdf_ext1" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (div:DF (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (match_operand:DF 2 "reg_or_0_operand" "fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fdiv%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fdiv") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*divdf_ext1_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (div:DF (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (match_operand:DF 2 "reg_or_0_operand" "fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fdiv%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fdiv") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*divdf_ext2" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (div:DF (match_operand:DF 1 "reg_or_0_operand" "fG") ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fdiv%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fdiv") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*divdf_ext2_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (div:DF (match_operand:DF 1 "reg_or_0_operand" "fG") ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fdiv%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fdiv") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_insn "*divdf_ext3" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (div:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fdiv%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fdiv") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++(define_insn "*divdf_ext3_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (div:DF (float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG")) ++ (float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG"))))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fdiv%-%/ %R1,%R2,%0" ++ [(set_attr "type" "fdiv") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui")]) ++ ++(define_expand "divtf3" ++ [(use (match_operand:TF 0 "register_operand")) ++ (use (match_operand:TF 1 "general_operand")) ++ (use (match_operand:TF 2 "general_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "sw_64_emit_xfloating_arith (DIV, operands); DONE;") ++ ++(define_insn "2" ++ [(set (match_operand:SFDF 0 "register_operand" "=f") ++ (unspec:SFDF [(match_operand:SFDF 1 "register_operand" "f")] ++ FRINT))] ++ "TARGET_SW8A && flag_sw_fprnd == 1" ++ "fri %1, %0" ++ [(set_attr "type" "frint")]) ++ ++(define_insn "*sqrt2" ++ [(set (match_operand:FMODE 0 "register_operand" "=&f,&f") ++ (sqrt:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 0" ++ "fsqrt%/ %R1,%0" ++ [(set_attr "type" "fsqrt") ++ (set_attr "opsize" "") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*sqrt2_same" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (sqrt:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 1" ++ "fsqrt%/ %R1,%0" ++ [(set_attr "type" "fsqrt") ++ (set_attr "opsize" "") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_expand "sqrt2" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (sqrt:FMODE (match_operand:FMODE 1 "reg_or_0_operand" "fG,fG")))] ++ "TARGET_FP && TARGET_FIX" ++ "" ++ [(set_attr "type" "fsqrt") ++ (set_attr "opsize" "") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++;; Define conversion operators between DFmode and SImode, using the cvtql ++;; instruction. To allow combine et al to do useful things, we keep the ++;; operation as a unit until after reload, at which point we split the ++;; instructions. ++;; ++;; Note that we (attempt to) only consider this optimization when the ++;; ultimate destination is memory. If we will be doing further integer ++;; processing, it is cheaper to do the truncation in the int regs. ++ ++(define_insn "*cvtql" ++ [(set (match_operand:SF 0 "register_operand" "=&f") ++ (unspec:SF [(match_operand:DI 1 "reg_or_0_operand" "fG")] ++ UNSPEC_CVTQL))] ++ "TARGET_FP && flag_sw_sdsame == 0" ++ "fcvtlw%/ %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "v_sv")]) ++(define_insn "*cvtql_same" ++ [(set (match_operand:SF 0 "register_operand" "=f") ++ (unspec:SF [(match_operand:DI 1 "reg_or_0_operand" "fG")] ++ UNSPEC_CVTQL))] ++ "TARGET_FP && flag_sw_sdsame == 1" ++ "fcvtlw%/ %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "v_sv")]) ++ ++(define_insn_and_split "*fix_truncdfsi_ieee" ++ [(set (match_operand:SI 0 "memory_operand" "=m") ++ (subreg:SI ++ (match_operator:DI 4 "fix_operator" ++ [(match_operand:DF 1 "reg_or_0_operand" "fG")]) 0)) ++ (clobber (match_scratch:DI 2 "=&f")) ++ (clobber (match_scratch:SF 3 "=&f"))] ++ "TARGET_FP && sw_64_fptm >= SW_64_FPTM_SU && flag_sw_cmov == 0" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 2) (match_op_dup 4 [(match_dup 1)])) ++ (set (match_dup 3) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL)) ++ (set (match_dup 5) (match_dup 3))] ++{ ++ operands[5] = adjust_address (operands[0], SFmode, 0); ++} ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes")]) ++ ++(define_insn "fix_truncdfsi2_8a" ++ [(set (match_operand:SI 0 "reg_no_subreg_operand" "=&r,&r") ++ (fix:SI ++ (match_operand:DF 1 "reg_or_0_operand" "fG,fG")))] ++ "TARGET_FP && TARGET_SW8A && flag_sw_cmov == 1" ++ "cmov%-w%T2 %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "c") ++ (set_attr "trap_suffix" "v_sv_svi") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++(define_expand "fix_truncdfsi2" ++ [(set (match_operand:SI 0 "reg_no_subreg_operand") ++ (fix:SI (match_operand:DF 1 "reg_or_0_operand")))] ++ "TARGET_FP && TARGET_SW8A && flag_sw_cmov == 1") ++ ++(define_insn "*fixuns_truncdfsi2" ++ [(set (match_operand:SI 0 "reg_no_subreg_operand" "=&r,&r") ++ (unsigned_fix:SI ++ (match_operand:DF 1 "reg_or_0_operand" "fG,fG")))] ++ "TARGET_FP && TARGET_SW8A && flag_sw_cmov == 1" ++ "cmov%-wu%T2 %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "c") ++ (set_attr "trap_suffix" "v_sv_svi") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++(define_expand "fixuns_truncdfsi2" ++ [(set (match_operand:SI 0 "reg_no_subreg_operand") ++ (unsigned_fix:SI (match_operand:DF 1 "reg_or_0_operand")))] ++ "TARGET_FP && TARGET_SW8A && flag_sw_cmov == 1") ++ ++(define_insn "*wdfsi2" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (fix:SI ++ (unspec:DF [(match_operand:DF 1 "register_operand" "fG")] ++ FRINT)))] ++ "TARGET_SW8A && flag_sw_cmov == 1" ++ "cmov%-w %1, %0" ++ [(set_attr "type" "frint")]) ++ ++(define_insn "*wudfsi2" ++ [(set (match_operand:SI 0 "register_operand" "=&r,&r") ++ (unsigned_fix:SI ++ (unspec:DF [(match_operand:DF 1 "register_operand" "fG,fG")] ++ FRINT)))] ++ "TARGET_SW8A && flag_sw_cmov == 1" ++ "cmov%-wu %1, %0" ++ [(set_attr "type" "frint")]) ++ ++(define_insn "ldfdi2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (fix:DI ++ (unspec:DF [(match_operand:DF 1 "register_operand" "fG")] ++ FRINT)))] ++ "TARGET_SW8A && flag_sw_cmov == 1" ++ "cmov%-l %1, %0" ++ [(set_attr "type" "frint")]) ++ ++(define_insn "ludfdi2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unsigned_fix:DI ++ (unspec:DF [(match_operand:DF 1 "register_operand" "fG")] ++ FRINT)))] ++ "TARGET_SW8A && flag_sw_cmov == 1" ++ "cmov%-lu %1, %0" ++ [(set_attr "type" "frint")]) ++ ++(define_insn "fix_truncdfdi2_8a" ++ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=&r,&r") ++ (fix:DI ++ (match_operand:DF 1 "reg_or_0_operand" "fG,fG")))] ++ "TARGET_FP && TARGET_SW8A && flag_sw_cmov == 1" ++ "cmov%-l%T2 %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "c") ++ (set_attr "trap_suffix" "v_sv_svi") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "fixuns_truncdfdi2_internal" ++ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=&r,&r") ++ (unsigned_fix:DI ++ (match_operand:DF 1 "reg_or_0_operand" "fG,fG")))] ++ "TARGET_FP && TARGET_SW8A && flag_sw_cmov == 1" ++ "cmov%-lu%T2 %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "c") ++ (set_attr "trap_suffix" "v_sv_svi") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn_and_split "*fix_truncdfsi_internal" ++ [(set (match_operand:SI 0 "memory_operand" "=m") ++ (subreg:SI ++ (match_operator:DI 4 "fix_operator" ++ [(match_operand:DF 1 "reg_or_0_operand" "fG")]) 0)) ++ (clobber (match_scratch:DI 2 "=&f")) ++ (clobber (match_scratch:SF 3 "=&f"))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 2) (match_op_dup 4 [(match_dup 1)])) ++ (set (match_dup 3) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL)) ++ (set (match_dup 5) (match_dup 3))] ++{ ++ //operands[4] = gen_rtx_REG (SFmode, REGNO (operands[2])); ++ operands[5] = adjust_address (operands[0], SFmode, 0); ++} ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes")]) ++ ++(define_insn "*fix_truncdfdi2" ++ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=&f,&f") ++ (match_operator:DI 2 "fix_operator" ++ [(match_operand:DF 1 "reg_or_0_operand" "fG,fG")]))] ++ "TARGET_FP && ((flag_sw_sdsame == 0 && flag_sw_cmov == 0) || TARGET_SW6B)" ++ "fcvt%-l%T2 %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "c") ++ (set_attr "trap_suffix" "v_sv_svi") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*fix_truncdfdi2_same" ++ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=f,f") ++ (match_operator:DI 2 "fix_operator" ++ [(match_operand:DF 1 "reg_or_0_operand" "fG,fG")]))] ++ "TARGET_FP && ((flag_sw_sdsame == 1 && flag_sw_cmov == 0) || TARGET_SW6B)" ++ "fcvt%-l%T2 %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "c") ++ (set_attr "trap_suffix" "v_sv_svi") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_expand "fix_truncdfdi2" ++ [(set (match_operand:DI 0 "reg_no_subreg_operand") ++ (fix:DI (match_operand:DF 1 "reg_or_0_operand")))] ++ "TARGET_FP") ++ ++(define_expand "fixuns_truncdfdi2" ++ [(set (match_operand:DI 0 "reg_no_subreg_operand") ++ (unsigned_fix:DI (match_operand:DF 1 "reg_or_0_operand")))] ++ "TARGET_FP" ++{ ++ if ((TARGET_SW8A == 1 && flag_sw_cmov != 1) || TARGET_SW6B) ++ { ++ rtx reg1 = gen_reg_rtx (DFmode); ++ rtx reg2 = gen_reg_rtx (DFmode); ++ rtx reg3 = gen_reg_rtx (DImode); ++ rtx_code_label *label1 = gen_label_rtx (); ++ rtx_code_label *label2 = gen_label_rtx (); ++ rtx test; ++ REAL_VALUE_TYPE offset; ++ ++ real_2expN (&offset, 63, DFmode); ++ ++ emit_move_insn (reg1, const_double_from_real_value (offset, DFmode)); ++ do_pending_stack_adjust (); ++ ++ test = gen_rtx_GE (VOIDmode, operands[1], reg1); ++ emit_jump_insn (gen_cbranchdf4 (test, operands[1], reg1, label1)); ++ ++ emit_insn (gen_fix_truncdfdi2 (operands[0], operands[1])); ++ emit_jump_insn (gen_rtx_SET (pc_rtx, gen_rtx_LABEL_REF (VOIDmode, label2))); ++ emit_barrier (); ++ ++ emit_label (label1); ++ emit_move_insn (reg2, gen_rtx_MINUS (DFmode, operands[1], reg1)); ++ emit_move_insn (reg3, GEN_INT (BITMASK_HIGH)); ++ emit_insn (gen_ashldi3 (reg3, reg3, GEN_INT (32))); ++ ++ emit_insn (gen_fix_truncdfdi2 (operands[0], reg2)); ++ emit_insn (gen_iordi3 (operands[0], operands[0], reg3)); ++ ++ emit_label (label2); ++ ++ /* Allow REG_NOTES to be set on last insn (labels don't have enough ++ fields, and can't be used for REG_NOTES anyway). */ ++ emit_use (stack_pointer_rtx); ++ DONE; ++ } ++ else ++ { ++ emit_insn (gen_fixuns_truncdfdi2_internal (operands[0], operands[1])); ++ DONE; ++ } ++}) ++ ++ ++;; Likewise between SFmode and SImode. ++ ++(define_insn_and_split "*fix_truncsfsi_ieee" ++ [(set (match_operand:SI 0 "memory_operand" "=m") ++ (subreg:SI ++ (match_operator:DI 4 "fix_operator" ++ [(float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG"))]) 0)) ++ (clobber (match_scratch:DI 2 "=&f")) ++ (clobber (match_scratch:SF 3 "=&f"))] ++ "TARGET_FP && ((sw_64_fptm >= SW_64_FPTM_SU && flag_sw_cmov == 0) || TARGET_SW6B)" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 2) (match_op_dup 4 [(float_extend:DF (match_dup 1))])) ++ (set (match_dup 3) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL)) ++ (set (match_dup 5) (match_dup 3))] ++ "operands[5] = adjust_address (operands[0], SFmode, 0);" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes")]) ++ ++(define_insn_and_split "*fix_truncsfsi_internal" ++ [(set (match_operand:SI 0 "memory_operand" "=m") ++ (subreg:SI ++ (match_operator:DI 4 "fix_operator" ++ [(float_extend:DF ++ (match_operand:SF 1 "reg_or_0_operand" "fG"))]) 0)) ++ (clobber (match_scratch:DI 2 "=&f")) ++ (clobber (match_scratch:SF 3 "=&f"))] ++ "TARGET_FP && ((sw_64_fptm < SW_64_FPTM_SU && flag_sw_cmov == 0) || TARGET_SW6B)" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 2) (match_op_dup 4 [(float_extend:DF (match_dup 1))])) ++ (set (match_dup 3) (unspec:SF [(match_dup 2)] UNSPEC_CVTQL)) ++ (set (match_dup 5) (match_dup 3))] ++{ ++ // operands[4] = gen_rtx_REG (SFmode, REGNO (operands[2])); ++ operands[5] = adjust_address (operands[0], SFmode, 0); ++} ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes")]) ++ ++(define_insn "*fix_truncsfdi2" ++ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=&f,&f") ++ (match_operator:DI 2 "fix_operator" ++ [(float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG,fG"))]))] ++ "TARGET_FP && ((flag_sw_sdsame == 0 && flag_sw_cmov == 0) || TARGET_SW6B)" ++ "fcvt%-l%T2 %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "c") ++ (set_attr "trap_suffix" "v_sv_svi") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*fix_truncsfdi2_same" ++ [(set (match_operand:DI 0 "reg_no_subreg_operand" "=f,f") ++ (match_operator:DI 2 "fix_operator" ++ [(float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG,fG"))]))] ++ "TARGET_FP && ((flag_sw_sdsame == 1 && flag_sw_cmov == 0) || TARGET_SW6B)" ++ "fcvt%-l%T2 %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "c") ++ (set_attr "trap_suffix" "v_sv_svi") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_expand "fix_truncsfdi2" ++ [(set (match_operand:DI 0 "reg_no_subreg_operand") ++ (fix:DI (float_extend:DF (match_operand:SF 1 "reg_or_0_operand"))))] ++ "TARGET_FP && flag_sw_cmov == 0") ++ ++(define_expand "fixuns_truncsfdi2" ++ [(set (match_operand:DI 0 "reg_no_subreg_operand") ++ (unsigned_fix:DI ++ (float_extend:DF (match_operand:SF 1 "reg_or_0_operand"))))] ++ "TARGET_FP" ++{ ++ if ((TARGET_SW8A == 1 && flag_sw_cmov != 1) || TARGET_SW6B) ++ { ++ rtx reg1 = gen_reg_rtx (SFmode); ++ rtx reg2 = gen_reg_rtx (DFmode); ++ rtx reg3 = gen_reg_rtx (DImode); ++ rtx reg4 = gen_reg_rtx (DFmode); ++ rtx reg5 = gen_reg_rtx (DFmode); ++ rtx_code_label *label1 = gen_label_rtx (); ++ rtx_code_label *label2 = gen_label_rtx (); ++ rtx test; ++ REAL_VALUE_TYPE offset; ++ ++ real_2expN (&offset, 63, SFmode); ++ ++ emit_move_insn (reg1, const_double_from_real_value (offset, SFmode)); ++ do_pending_stack_adjust (); ++ ++ test = gen_rtx_GE (SFmode, operands[1], reg1); ++ emit_insn (gen_extendsfdf2 (reg4, reg1)); ++ emit_insn (gen_extendsfdf2 (reg2, operands[1])); ++ emit_jump_insn (gen_cbranchdf4 (test, reg2, reg4, label1)); ++ ++ emit_insn (gen_fix_truncdfdi2 (operands[0], reg2)); ++ emit_jump_insn (gen_rtx_SET (pc_rtx, gen_rtx_LABEL_REF (VOIDmode, label2))); ++ emit_barrier (); ++ ++ emit_label (label1); ++ emit_move_insn (reg5, gen_rtx_MINUS (DFmode, reg2, reg4)); ++ emit_move_insn (reg3, GEN_INT (BITMASK_HIGH)); ++ emit_insn (gen_ashldi3 (reg3, reg3, GEN_INT (32))); ++ ++ emit_insn (gen_fix_truncdfdi2 (operands[0], reg5)); ++ emit_insn (gen_iordi3 (operands[0], operands[0], reg3)); ++ ++ emit_label (label2); ++ ++ /* Allow REG_NOTES to be set on last insn (labels don't have enough ++ fields, and can't be used for REG_NOTES anyway). */ ++ emit_use (stack_pointer_rtx); ++ DONE; ++ } ++ else ++ { ++ rtx reg2 = gen_reg_rtx (DFmode); ++ emit_insn (gen_extendsfdf2 (reg2, operands[1])); ++ emit_insn (gen_fixuns_truncdfdi2_internal (operands[0], reg2)); ++ DONE; ++ } ++}) ++ ++ ++ ++(define_expand "fix_trunctfdi2" ++ [(use (match_operand:DI 0 "register_operand")) ++ (use (match_operand:TF 1 "general_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "sw_64_emit_xfloating_cvt (FIX, operands); DONE;") ++ ++(define_expand "fixuns_trunctfdi2" ++ [(use (match_operand:DI 0 "register_operand")) ++ (use (match_operand:TF 1 "general_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "sw_64_emit_xfloating_cvt (UNSIGNED_FIX, operands); DONE;") ++ ++(define_insn "floatdisf2_8a" ++ [(set (match_operand:SF 0 "register_operand" "=&f,&f") ++ (float:SF (match_operand:DI 1 "reg_no_subreg_operand" "r,r")))] ++ "TARGET_FP && TARGET_SW8A && flag_sw_cmov == 1" ++ "cmovl%,%/ %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*floatdisf2" ++ [(set (match_operand:SF 0 "register_operand" "=&f,&f") ++ (float:SF (match_operand:DI 1 "reg_no_subreg_operand" "f,f")))] ++ "(flag_sw_cmov == 0 && flag_sw_sdsame == 0) || TARGET_SW6B" ++ "fcvtl%,%/ %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*floatdisf2_same" ++ [(set (match_operand:SF 0 "register_operand" "=f,f") ++ (float:SF (match_operand:DI 1 "reg_no_subreg_operand" "f,f")))] ++ "(flag_sw_sdsame == 1 && flag_sw_cmov == 0) || TARGET_SW6B" ++ "fcvtl%,%/ %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_expand "floatdisf2" ++ [(set (match_operand:SF 0 "register_operand" "=f,f") ++ (float:SF (match_operand:DI 1 "reg_no_subreg_operand" "f,f")))] ++ "TARGET_FP" ++ "" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn_and_split "*floatsisf2_ieee" ++ [(set (match_operand:SF 0 "register_operand" "=&f") ++ (float:SF (match_operand:SI 1 "memory_operand" "m"))) ++ (clobber (match_scratch:DI 2 "=&f")) ++ (clobber (match_scratch:SF 3 "=&f"))] ++ "TARGET_FP && sw_64_fptm >= SW_64_FPTM_SU && flag_sw_cmov == 0" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 3) (match_dup 1)) ++ (set (match_dup 2) (unspec:DI [(match_dup 3)] UNSPEC_CVTLQ)) ++ (set (match_dup 0) (float:SF (match_dup 2)))] ++ "operands[1] = adjust_address (operands[1], SFmode, 0);") ++ ++ (define_insn "floatsisf2" ++ [(set (match_operand:SF 0 "register_operand" "=&f,&f") ++ (float:SF (match_operand:SI 1 "reg_no_subreg_operand" "r,r")))] ++ "TARGET_SW8A && flag_sw_cmov == 1" ++ "cmovws %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn_and_split "*floatsisf2" ++ [(set (match_operand:SF 0 "register_operand" "=&f") ++ (float:SF (match_operand:SI 1 "memory_operand" "m"))) ++ (clobber (match_scratch:DI 2 "=&f")) ++ (clobber (match_scratch:SF 3 "=&f"))] ++ "TARGET_FP && flag_sw_cmov == 0" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 3) (match_dup 1)) ++ (set (match_dup 2) (unspec:DI [(match_dup 3)] UNSPEC_CVTLQ)) ++ (set (match_dup 0) (float:SF (match_dup 2)))] ++{ ++ operands[1] = adjust_address (operands[1], SFmode, 0); ++ //operands[2] = gen_rtx_REG (DImode, REGNO (operands[0])); ++}) ++ ++(define_insn "floatdidf2_8a" ++ [(set (match_operand:DF 0 "register_operand" "=&f,&f") ++ (float:DF (match_operand:DI 1 "reg_no_subreg_operand" "r,r")))] ++ "TARGET_FP && TARGET_SW8A && flag_sw_cmov == 1" ++ "cmovl%-%/ %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*floatdidf2" ++ [(set (match_operand:DF 0 "register_operand" "=&f,&f") ++ (float:DF (match_operand:DI 1 "reg_no_subreg_operand" "f,f")))] ++ "flag_sw_sdsame == 0" ++ "fcvtl%-%/ %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*floatdidf2_same" ++ [(set (match_operand:DF 0 "register_operand" "=f,f") ++ (float:DF (match_operand:DI 1 "reg_no_subreg_operand" "f,f")))] ++ "flag_sw_sdsame == 1" ++ "fcvtl%-%/ %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_expand "floatdidf2" ++ [(set (match_operand:DF 0 "register_operand" "=f,f") ++ (float:DF (match_operand:DI 1 "reg_no_subreg_operand" "f,f")))] ++ "TARGET_FP" ++ "" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++ (define_insn "floatsidf2" ++ [(set (match_operand:DF 0 "register_operand" "=&f,&f") ++ (float:DF (match_operand:SI 1 "reg_no_subreg_operand" "r,r")))] ++ "TARGET_SW8A && flag_sw_cmov == 1" ++ "cmovwd %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++(define_insn_and_split "*floatsidf2_ieee" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (float:DF (match_operand:SI 1 "memory_operand" "m"))) ++ (clobber (match_scratch:DI 2 "=&f")) ++ (clobber (match_scratch:SF 3 "=&f"))] ++ "TARGET_FP && sw_64_fptm >= SW_64_FPTM_SU && flag_sw_cmov == 0" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 3) (match_dup 1)) ++ (set (match_dup 2) (unspec:DI [(match_dup 3)] UNSPEC_CVTLQ)) ++ (set (match_dup 0) (float:DF (match_dup 2)))] ++ "operands[1] = adjust_address (operands[1], SFmode, 0);") ++ ++(define_insn_and_split "*floatsidf2" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (float:DF (match_operand:SI 1 "memory_operand" "m"))) ++ (clobber (match_scratch:DI 2 "=&f")) ++ (clobber (match_scratch:SF 3 "=&f"))] ++ "TARGET_FP && flag_sw_cmov == 0" ++ "#" ++ "&& reload_completed" ++ [(set (match_dup 3) (match_dup 1)) ++ (set (match_dup 2) (unspec:DI [(match_dup 3)] UNSPEC_CVTLQ)) ++ (set (match_dup 0) (float:DF (match_dup 2)))] ++{ ++ operands[1] = adjust_address (operands[1], SFmode, 0); ++// operands[2] = gen_rtx_REG (DImode, REGNO (operands[0])); ++ // operands[3] = gen_rtx_REG (SFmode, REGNO (operands[0])); ++}) ++ ++(define_expand "floatditf2" ++ [(use (match_operand:TF 0 "register_operand")) ++ (use (match_operand:DI 1 "general_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "sw_64_emit_xfloating_cvt (FLOAT, operands); DONE;") ++ ++(define_insn "floatunsdisf2" ++ [(set (match_operand:SF 0 "register_operand" "=&f,&f") ++ (unsigned_float:SF (match_operand:DI 1 "reg_no_subreg_operand" "r,r")))] ++ "TARGET_FP && TARGET_SW8A && flag_sw_cmov == 1" ++ "cmovul%,%/ %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "floatunsdidf2" ++ [(set (match_operand:DF 0 "register_operand" "=&f,&f") ++ (unsigned_float:DF (match_operand:DI 1 "reg_no_subreg_operand" "r,r")))] ++ "TARGET_FP && TARGET_SW8A && flag_sw_cmov == 1" ++ "cmovul%-%/ %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "floatunssidf2" ++ [(set (match_operand:DF 0 "register_operand" "=&f,&f") ++ (unsigned_float:DF (match_operand:SI 1 "register_operand" "r,r")))] ++ "TARGET_SW8A && flag_sw_cmov == 1" ++ "cmovuwd %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ (define_insn "floatunssisf2" ++ [(set (match_operand:SF 0 "register_operand" "=&f,&f") ++ (unsigned_float:SF (match_operand:SI 1 "reg_no_subreg_operand" "r,r")))] ++ "TARGET_SW8A && flag_sw_cmov == 1" ++ "cmovuws %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_expand "floatunsditf2" ++ [(use (match_operand:TF 0 "register_operand")) ++ (use (match_operand:DI 1 "general_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "sw_64_emit_xfloating_cvt (UNSIGNED_FLOAT, operands); DONE;") ++ ++(define_expand "extendsfdf2" ++ [(set (match_operand:DF 0 "register_operand") ++ (float_extend:DF (match_operand:SF 1 "nonimmediate_operand")))] ++ "TARGET_FP" ++{ ++ if (sw_64_fptm >= SW_64_FPTM_SU) ++ operands[1] = force_reg (SFmode, operands[1]); ++}) ++ ++;; The Unicos/Mk assembler doesn't support cvtst, but we've already ++;; asserted that sw_64_fptm == SW_64_FPTM_N. ++ ++(define_insn "*cmpsf_internal" ++ [(set (match_operand:SF 0 "register_operand" "=&f,&f") ++ (match_operator:SF 1 "sw_64_fp_comparison_operator" ++ [(match_operand:SF 2 "reg_or_0_operand" "fG,fG") ++ (match_operand:SF 3 "reg_or_0_operand" "fG,fG")]))] ++ "TARGET_FP && flag_sw_sdsame == 0 && flag_sw_sf_cmpsel" ++ "fcmp%C1%/ %R2,%R3,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "su") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++(define_insn "*cmpsf_internal_same" ++ [(set (match_operand:SF 0 "register_operand" "=f,f") ++ (match_operator:SF 1 "sw_64_fp_comparison_operator" ++ [(match_operand:SF 2 "reg_or_0_operand" "fG,fG") ++ (match_operand:SF 3 "reg_or_0_operand" "fG,fG")]))] ++ "TARGET_FP && flag_sw_sdsame == 1 && flag_sw_sf_cmpsel" ++ "fcmp%C1%/ %R2,%R3,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "su") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*extendsfdf2_ieee" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))] ++ "TARGET_FP && sw_64_fptm >= SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fcvtsd %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes")]) ++(define_insn "*extendsfdf2_ieee_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (float_extend:DF (match_operand:SF 1 "register_operand" "f")))] ++ "TARGET_FP && sw_64_fptm >= SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fcvtsd %1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes")]) ++ ++(define_insn "*extendsfdf2_internal_1" ++ [(set (match_operand:DF 0 "register_operand" "=&f,f,m") ++ (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "f,m,f")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_nofcpys == 1" ++ "@ ++ fcvtsd %1,%0 ++ fld%,%U1 %0,%1 ++ fst%-%U0 %1,%0" ++ [(set_attr "type" "fcpys,fld,fst")]) ++ ++(define_insn "*extendsfdf2_internal_2" ++ [(set (match_operand:DF 0 "register_operand" "=&f,f,m") ++ (float_extend:DF (match_operand:SF 1 "nonimmediate_operand" "f,m,f")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_nofcpys == 0" ++ "@ ++ fcvtsd %1,%0 \;fcpys %0,%0,%0 ++ fld%, %0,%1 ++ fst%- %1,%0" ++ [(set_attr "type" "fcpys,fld,fst")]) ++ ++;; Use register_operand for operand 1 to prevent compress_float_constant ++;; from doing something silly. When optimizing we'll put things back ++;; together anyway. ++(define_expand "extendsftf2" ++ [(use (match_operand:TF 0 "register_operand")) ++ (use (match_operand:SF 1 "register_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++{ ++ rtx tmp = gen_reg_rtx (DFmode); ++ emit_insn (gen_extendsfdf2 (tmp, operands[1])); ++ emit_insn (gen_extenddftf2 (operands[0], tmp)); ++ DONE; ++}) ++ ++(define_expand "extenddftf2" ++ [(use (match_operand:TF 0 "register_operand")) ++ (use (match_operand:DF 1 "register_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "sw_64_emit_xfloating_cvt (FLOAT_EXTEND, operands); DONE;") ++ ++(define_insn "*truncdfsf2" ++ [(set (match_operand:SF 0 "register_operand" "=&f,&f") ++ (float_truncate:SF (match_operand:DF 1 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 0" ++ "fcvt%-%,%/ %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*truncdfsf2_same" ++ [(set (match_operand:SF 0 "register_operand" "=f,f") ++ (float_truncate:SF (match_operand:DF 1 "reg_or_0_operand" "fG,fG")))] ++ "flag_sw_sdsame == 1" ++ "fcvt%-%,%/ %R1,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++(define_expand "truncdfsf2" ++ [(set (match_operand:SF 0 "register_operand" "=f,f") ++ (float_truncate:SF (match_operand:DF 1 "reg_or_0_operand" "fG,fG")))] ++ "TARGET_FP" ++ "" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "round_suffix" "normal") ++ (set_attr "trap_suffix" "u_su_sui") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++ ++(define_expand "trunctfdf2" ++ [(use (match_operand:DF 0 "register_operand")) ++ (use (match_operand:TF 1 "general_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "sw_64_emit_xfloating_cvt (FLOAT_TRUNCATE, operands); DONE;") ++ ++(define_expand "trunctfsf2" ++ [(use (match_operand:SF 0 "register_operand")) ++ (use (match_operand:TF 1 "general_operand"))] ++ "TARGET_FP && TARGET_HAS_XFLOATING_LIBS" ++{ ++ rtx tmpf, sticky, arg, lo, hi; ++ ++ tmpf = gen_reg_rtx (DFmode); ++ sticky = gen_reg_rtx (DImode); ++ arg = copy_to_mode_reg (TFmode, operands[1]); ++ lo = gen_lowpart (DImode, arg); ++ hi = gen_highpart (DImode, arg); ++ ++ /* Convert the low word of the TFmode value into a sticky rounding bit, ++ then or it into the low bit of the high word. This leaves the sticky ++ bit at bit 48 of the fraction, which is representable in DFmode, ++ which prevents rounding error in the final conversion to SFmode. */ ++ ++ emit_insn (gen_rtx_SET (sticky, gen_rtx_NE (DImode, lo, const0_rtx))); ++ emit_insn (gen_iordi3 (hi, hi, sticky)); ++ emit_insn (gen_trunctfdf2 (tmpf, arg)); ++ emit_insn (gen_truncdfsf2 (operands[0], tmpf)); ++ DONE; ++}) ++ ++;; Next are all the integer comparisons, and conditional moves and branches ++;; and some of the related define_expand's and define_split's. ++ ++(define_insn "*setcc_internal" ++ [(set (match_operand 0 "register_operand" "=r") ++ (match_operator 1 "sw_64_comparison_operator" ++ [(match_operand:DI 2 "register_operand" "r") ++ (match_operand:DI 3 "reg_or_8bit_operand" "rI")]))] ++ "GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT ++ && GET_MODE_SIZE (GET_MODE (operands[0])) <= 8 ++ && GET_MODE (operands[0]) == GET_MODE (operands[1])" ++ "cmp%C1 %2,%3,%0" ++ [(set_attr "type" "icmp")]) ++ ++;; Yes, we can technically support reg_or_8bit_operand in operand 2, ++;; but that's non-canonical rtl and allowing that causes inefficiencies ++;; from cse on. ++(define_insn "*setcc_swapped_internal" ++ [(set (match_operand 0 "register_operand" "=r") ++ (match_operator 1 "sw_64_swapped_comparison_operator" ++ [(match_operand:DI 2 "register_operand" "r") ++ (match_operand:DI 3 "reg_or_0_operand" "rJ")]))] ++ "GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT ++ && GET_MODE_SIZE (GET_MODE (operands[0])) <= 8 ++ && GET_MODE (operands[0]) == GET_MODE (operands[1])" ++ "cmp%c1 %r3,%2,%0" ++ [(set_attr "type" "icmp")]) ++ ++;; Use match_operator rather than ne directly so that we can match ++;; multiple integer modes. ++(define_insn "*setne_internal" ++ [(set (match_operand 0 "register_operand" "=r") ++ (match_operator 1 "signed_comparison_operator" ++ [(match_operand:DI 2 "register_operand" "r") ++ (const_int 0)]))] ++ "GET_MODE_CLASS (GET_MODE (operands[0])) == MODE_INT ++ && GET_MODE_SIZE (GET_MODE (operands[0])) <= 8 ++ && GET_CODE (operands[1]) == NE ++ && GET_MODE (operands[0]) == GET_MODE (operands[1])" ++ "cmpult $31,%2,%0" ++ [(set_attr "type" "icmp")]) ++ ++;; The mode folding trick can't be used with const_int operands, since ++;; reload needs to know the proper mode. ++;; ++;; Use add_operand instead of the more seemingly natural reg_or_8bit_operand ++;; in order to create more pairs of constants. As long as we're allowing ++;; two constants at the same time, and will have to reload one of them... ++ ++(define_insn "*movcc_internal" ++ [(set (match_operand:IMODE 0 "register_operand" "=r,r,r,r") ++ (if_then_else:IMODE ++ (match_operator 2 "signed_comparison_operator" ++ [(match_operand:DI 3 "reg_or_0_operand" "rJ,rJ,J,J") ++ (match_operand:DI 4 "reg_or_0_operand" "J,J,rJ,rJ")]) ++ (match_operand:IMODE 1 "add_operand" "rI,0,rI,0") ++ (match_operand:IMODE 5 "add_operand" "0,rI,0,rI")))] ++ "(operands[3] == const0_rtx) ^ (operands[4] == const0_rtx)" ++ "@ ++ sel%C2 %r3,%1,%0,%0 ++ sel%D2 %r3,%5,%0,%0 ++ sel%c2 %r4,%1,%0,%0 ++ sel%d2 %r4,%5,%0,%0" ++ [(set_attr "type" "icmov")]) ++ ++(define_insn "*movcc_lbc" ++ [(set (match_operand:IMODE 0 "register_operand" "=r,r") ++ (if_then_else:IMODE ++ (eq (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ") ++ (const_int 1) ++ (const_int 0)) ++ (const_int 0)) ++ (match_operand:IMODE 1 "reg_or_8bit_operand" "rI,0") ++ (match_operand:IMODE 3 "reg_or_8bit_operand" "0,rI")))] ++ "" ++ "@ ++ sellbc %r2,%1,%0,%0 ++ sellbs %r2,%3,%0,%0" ++ [(set_attr "type" "icmov")]) ++ ++(define_insn "*movcc_lbs" ++ [(set (match_operand:IMODE 0 "register_operand" "=r,r") ++ (if_then_else:IMODE ++ (ne (zero_extract:DI (match_operand:DI 2 "reg_or_0_operand" "rJ,rJ") ++ (const_int 1) ++ (const_int 0)) ++ (const_int 0)) ++ (match_operand:IMODE 1 "reg_or_8bit_operand" "rI,0") ++ (match_operand:IMODE 3 "reg_or_8bit_operand" "0,rI")))] ++ "" ++ "@ ++ sellbs %r2,%1,%0,%0 ++ sellbc %r2,%3,%0,%0" ++ [(set_attr "type" "icmov")]) ++ ++;; For ABS, we have two choices, depending on whether the input and output ++;; registers are the same or not. ++(define_expand "absdi2" ++ [(set (match_operand:DI 0 "register_operand") ++ (abs:DI (match_operand:DI 1 "register_operand")))] ++ "" ++{ ++ if (rtx_equal_p (operands[0], operands[1])) ++ emit_insn (gen_absdi2_same (operands[0], gen_reg_rtx (DImode))); ++ else ++ emit_insn (gen_absdi2_diff (operands[0], operands[1])); ++ DONE; ++}) ++ ++(define_expand "absdi2_same" ++ [(set (match_operand:DI 1 "register_operand") ++ (neg:DI (match_operand:DI 0 "register_operand"))) ++ (set (match_dup 0) ++ (if_then_else:DI (ge (match_dup 0) (const_int 0)) ++ (match_dup 0) ++ (match_dup 1)))]) ++ ++(define_expand "absdi2_diff" ++ [(set (match_operand:DI 0 "register_operand") ++ (neg:DI (match_operand:DI 1 "register_operand"))) ++ (set (match_dup 0) ++ (if_then_else:DI (lt (match_dup 1) (const_int 0)) ++ (match_dup 0) ++ (match_dup 1)))]) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (abs:DI (match_dup 0))) ++ (clobber (match_operand:DI 1 "register_operand"))] ++ "" ++ [(set (match_dup 1) (neg:DI (match_dup 0))) ++ (set (match_dup 0) (if_then_else:DI (ge (match_dup 0) (const_int 0)) ++ (match_dup 0) (match_dup 1)))]) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (abs:DI (match_operand:DI 1 "register_operand")))] ++ "! rtx_equal_p (operands[0], operands[1])" ++ [(set (match_dup 0) (neg:DI (match_dup 1))) ++ (set (match_dup 0) (if_then_else:DI (lt (match_dup 1) (const_int 0)) ++ (match_dup 0) (match_dup 1)))]) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (neg:DI (abs:DI (match_dup 0)))) ++ (clobber (match_operand:DI 1 "register_operand"))] ++ "" ++ [(set (match_dup 1) (neg:DI (match_dup 0))) ++ (set (match_dup 0) (if_then_else:DI (le (match_dup 0) (const_int 0)) ++ (match_dup 0) (match_dup 1)))]) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (neg:DI (abs:DI (match_operand:DI 1 "register_operand"))))] ++ "! rtx_equal_p (operands[0], operands[1])" ++ [(set (match_dup 0) (neg:DI (match_dup 1))) ++ (set (match_dup 0) (if_then_else:DI (gt (match_dup 1) (const_int 0)) ++ (match_dup 0) (match_dup 1)))]) ++ ++(define_insn "3" ++ [(set (match_operand:I12MODE 0 "register_operand" "=r") ++ (any_maxmin:I12MODE ++ (match_operand:I12MODE 1 "reg_or_0_operand" "%rJ") ++ (match_operand:I12MODE 2 "reg_or_8bit_operand" "rI")))] ++ "TARGET_MAX" ++ " %r1,%2,%0" ++ [(set_attr "type" "mvi")]) ++ ++(define_expand "smaxdi3" ++ [(set (match_dup 3) ++ (le:DI (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand"))) ++ (set (match_operand:DI 0 "register_operand") ++ (if_then_else:DI (eq (match_dup 3) (const_int 0)) ++ (match_dup 1) (match_dup 2)))] ++ "" ++ "operands[3] = gen_reg_rtx (DImode);") ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (smax:DI (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand"))) ++ (clobber (match_operand:DI 3 "register_operand"))] ++ "operands[2] != const0_rtx" ++ [(set (match_dup 3) (le:DI (match_dup 1) (match_dup 2))) ++ (set (match_dup 0) (if_then_else:DI (eq (match_dup 3) (const_int 0)) ++ (match_dup 1) (match_dup 2)))]) ++ ++(define_insn "*smax_const0" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (smax:DI (match_operand:DI 1 "register_operand" "0") ++ (const_int 0)))] ++ "" ++ "sellt %0,0,%0,%0" ++ [(set_attr "type" "icmov")]) ++ ++(define_expand "smindi3" ++ [(set (match_dup 3) ++ (lt:DI (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand"))) ++ (set (match_operand:DI 0 "register_operand") ++ (if_then_else:DI (ne (match_dup 3) (const_int 0)) ++ (match_dup 1) (match_dup 2)))] ++ "" ++ "operands[3] = gen_reg_rtx (DImode);") ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (smin:DI (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand"))) ++ (clobber (match_operand:DI 3 "register_operand"))] ++ "operands[2] != const0_rtx" ++ [(set (match_dup 3) (lt:DI (match_dup 1) (match_dup 2))) ++ (set (match_dup 0) (if_then_else:DI (ne (match_dup 3) (const_int 0)) ++ (match_dup 1) (match_dup 2)))]) ++ ++(define_insn "*smin_const0" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (smin:DI (match_operand:DI 1 "register_operand" "0") ++ (const_int 0)))] ++ "" ++ "selgt %0,0,%0,%0" ++ [(set_attr "type" "icmov")]) ++ ++(define_expand "umaxdi3" ++ [(set (match_dup 3) ++ (leu:DI (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand"))) ++ (set (match_operand:DI 0 "register_operand") ++ (if_then_else:DI (eq (match_dup 3) (const_int 0)) ++ (match_dup 1) (match_dup 2)))] ++ "" ++ "operands[3] = gen_reg_rtx (DImode);") ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (umax:DI (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand"))) ++ (clobber (match_operand:DI 3 "register_operand"))] ++ "operands[2] != const0_rtx" ++ [(set (match_dup 3) (leu:DI (match_dup 1) (match_dup 2))) ++ (set (match_dup 0) (if_then_else:DI (eq (match_dup 3) (const_int 0)) ++ (match_dup 1) (match_dup 2)))]) ++ ++(define_expand "umindi3" ++ [(set (match_dup 3) ++ (ltu:DI (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand"))) ++ (set (match_operand:DI 0 "register_operand") ++ (if_then_else:DI (ne (match_dup 3) (const_int 0)) ++ (match_dup 1) (match_dup 2)))] ++ "" ++ "operands[3] = gen_reg_rtx (DImode);") ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (umin:DI (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand"))) ++ (clobber (match_operand:DI 3 "register_operand"))] ++ "operands[2] != const0_rtx" ++ [(set (match_dup 3) (ltu:DI (match_dup 1) (match_dup 2))) ++ (set (match_dup 0) (if_then_else:DI (ne (match_dup 3) (const_int 0)) ++ (match_dup 1) (match_dup 2)))]) ++ ++(define_insn "*bcc_normal" ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "signed_comparison_operator" ++ [(match_operand:DI 2 "reg_or_0_operand" "rJ") ++ (const_int 0)]) ++ (label_ref (match_operand 0)) ++ (pc)))] ++ "" ++ "b%C1 %r2,%0" ++ [(set_attr "type" "ibr")]) ++ ++(define_insn "*bcc_reverse" ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "signed_comparison_operator" ++ [(match_operand:DI 2 "register_operand" "r") ++ (const_int 0)]) ++ ++ (pc) ++ (label_ref (match_operand 0))))] ++ "" ++ "b%c1 %2,%0" ++ [(set_attr "type" "ibr")]) ++ ++(define_insn_and_split "*branchcombine" ++ [(set (pc) ++ (if_then_else (match_operator 1 "sw_64_branch_combination" ++ [(match_operand:DI 2 "register_operand") ++ (match_operand:DI 3 "reg_or_8bit_operand")]) ++ (label_ref (match_operand 0)) ++ (pc)))] ++"flag_sw_branch_combination==1 ++ && (can_create_pseudo_p ()) && operands[3]!=CONST0_RTX (DImode)" ++"#" ++"&& 1" ++ [(parallel ++ [(set (pc) ++ (if_then_else ++ (match_op_dup 1 ++ [(match_dup 2) ++ (match_dup 3)]) ++ (label_ref (match_dup 0)) ++ (pc))) ++ (clobber (match_dup 4))])] ++{ ++ operands[4]=gen_reg_rtx (DImode); ++}) ++ ++(define_insn "bcc_ne" ++ [(parallel ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "sw_64_comparison_operator" ++ [(match_operand:DI 2 "reg_or_0_operand" "rJ") ++ (match_operand:DI 3 "reg_or_8bit_operand" "rI")]) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_operand:DI 4 "register_operand" "=r"))])] ++ "flag_sw_branch_combination==1" ++ "cmp%C1 %r2,%3,%r4 ++ bne %r4,%0" ++ [(set_attr "type" "ibr")]) ++ ++(define_insn "bcc_eq" ++ [(parallel ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "sw_64_swapped_branch_combination" ++ [(match_operand:DI 2 "reg_or_0_operand" "rJ") ++ (match_operand:DI 3 "reg_or_8bit_operand" "rI")]) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_operand:DI 4 "register_operand" "=r"))])] ++ "flag_sw_branch_combination==1" ++ "cmp%D1 %r2,%3,%r4 ++ beq %r4,%0" ++ [(set_attr "type" "ibr")]) ++ ++(define_insn "*blbs_normal" ++ [(set (pc) ++ (if_then_else ++ (ne (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (const_int 1) ++ (const_int 0)) ++ (const_int 0)) ++ (label_ref (match_operand 0)) ++ (pc)))] ++ "" ++ "blbs %r1,%0" ++ [(set_attr "type" "ibr")]) ++ ++(define_insn "*blbc_normal" ++ [(set (pc) ++ (if_then_else ++ (eq (zero_extract:DI (match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (const_int 1) ++ (const_int 0)) ++ (const_int 0)) ++ (label_ref (match_operand 0)) ++ (pc)))] ++ "" ++ "blbc %r1,%0" ++ [(set_attr "type" "ibr")]) ++ ++(define_split ++ [(parallel ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "comparison_operator" ++ [(zero_extract:DI (match_operand:DI 2 "register_operand") ++ (const_int 1) ++ (match_operand:DI 3 "const_int_operand")) ++ (const_int 0)]) ++ (label_ref (match_operand 0)) ++ (pc))) ++ (clobber (match_operand:DI 4 "register_operand"))])] ++ "INTVAL (operands[3]) != 0" ++ [(set (match_dup 4) ++ (lshiftrt:DI (match_dup 2) (match_dup 3))) ++ (set (pc) ++ (if_then_else (match_op_dup 1 ++ [(zero_extract:DI (match_dup 4) ++ (const_int 1) ++ (const_int 0)) ++ (const_int 0)]) ++ (label_ref (match_dup 0)) ++ (pc)))] ++) ++ ++;; The following are the corresponding floating-point insns. Recall ++;; we need to have variants that expand the arguments from SFmode ++;; to DFmode. ++ ++(define_insn "*cmpdf_internal" ++ [(set (match_operand:DF 0 "register_operand" "=&f,&f") ++ (match_operator:DF 1 "sw_64_fp_comparison_operator" ++ [(match_operand:DF 2 "reg_or_0_operand" "fG,fG") ++ (match_operand:DF 3 "reg_or_0_operand" "fG,fG")]))] ++ "TARGET_FP && flag_sw_sdsame == 0" ++ "fcmp%C1%/ %R2,%R3,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "su") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*cmpdf_internal_same" ++ [(set (match_operand:DF 0 "register_operand" "=f,f") ++ (match_operator:DF 1 "sw_64_fp_comparison_operator" ++ [(match_operand:DF 2 "reg_or_0_operand" "fG,fG") ++ (match_operand:DF 3 "reg_or_0_operand" "fG,fG")]))] ++ "TARGET_FP && flag_sw_sdsame == 1" ++ "fcmp%C1%/ %R2,%R3,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "su") ++ (set (attr "enabled") ++ (cond [(eq_attr "alternative" "0") ++ (symbol_ref "sw_64_fptm < SW_64_FPTM_SU") ++ ] ++ (symbol_ref "true")))]) ++ ++(define_insn "*cmpdf_ext1" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (match_operator:DF 1 "sw_64_fp_comparison_operator" ++ [(float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG")) ++ (match_operand:DF 3 "reg_or_0_operand" "fG")]))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fcmp%C1%/ %R2,%R3,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "su")]) ++(define_insn "*cmpdf_ext1_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (match_operator:DF 1 "sw_64_fp_comparison_operator" ++ [(float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG")) ++ (match_operand:DF 3 "reg_or_0_operand" "fG")]))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fcmp%C1%/ %R2,%R3,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "su")]) ++ ++(define_insn "*cmpdf_ext2" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (match_operator:DF 1 "sw_64_fp_comparison_operator" ++ [(match_operand:DF 2 "reg_or_0_operand" "fG") ++ (float_extend:DF ++ (match_operand:SF 3 "reg_or_0_operand" "fG"))]))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fcmp%C1%/ %R2,%R3,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "su")]) ++(define_insn "*cmpdf_ext2_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (match_operator:DF 1 "sw_64_fp_comparison_operator" ++ [(match_operand:DF 2 "reg_or_0_operand" "fG") ++ (float_extend:DF ++ (match_operand:SF 3 "reg_or_0_operand" "fG"))]))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fcmp%C1%/ %R2,%R3,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "su")]) ++ ++(define_insn "*cmpdf_ext3" ++ [(set (match_operand:DF 0 "register_operand" "=&f") ++ (match_operator:DF 1 "sw_64_fp_comparison_operator" ++ [(float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG")) ++ (float_extend:DF ++ (match_operand:SF 3 "reg_or_0_operand" "fG"))]))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 0" ++ "fcmp%C1%/ %R2,%R3,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "su")]) ++(define_insn "*cmpdf_ext3_same" ++ [(set (match_operand:DF 0 "register_operand" "=f") ++ (match_operator:DF 1 "sw_64_fp_comparison_operator" ++ [(float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG")) ++ (float_extend:DF ++ (match_operand:SF 3 "reg_or_0_operand" "fG"))]))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU && flag_sw_sdsame == 1" ++ "fcmp%C1%/ %R2,%R3,%0" ++ [(set_attr "type" "fadd") ++ (set_attr "trap" "yes") ++ (set_attr "trap_suffix" "su")]) ++ ++ ++(define_insn "*movcc_internal" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (if_then_else:FMODE ++ (match_operator 3 "signed_comparison_operator" ++ [(match_operand:DF 4 "reg_or_0_operand" "fG,fG") ++ (match_operand:DF 2 "const0_operand" "G,G")]) ++ (match_operand:FMODE 1 "reg_or_0_operand" "fG,0") ++ (match_operand:FMODE 5 "reg_or_0_operand" "0,fG")))] ++ "TARGET_FP" ++ "@ ++ fsel%C3 %R4,%R1,%0,%0 ++ fsel%D3 %R4,%R5,%0,%0" ++ [(set_attr "type" "fcmov")]) ++ ++(define_insn "*movdfcc_ext1" ++ [(set (match_operand:DF 0 "register_operand" "=f,f") ++ (if_then_else:DF ++ (match_operator 3 "signed_comparison_operator" ++ [(match_operand:DF 4 "reg_or_0_operand" "fG,fG") ++ (match_operand:DF 2 "const0_operand" "G,G")]) ++ (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG,0")) ++ (match_operand:DF 5 "reg_or_0_operand" "0,fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU" ++ "@ ++ fsel%C3 %R4,%R1,%0,%0 ++ fsel%D3 %R4,%R5,%0,%0" ++ [(set_attr "type" "fcmov")]) ++ ++(define_insn "*movdfcc_ext2" ++ [(set (match_operand:DF 0 "register_operand" "=f,f") ++ (if_then_else:DF ++ (match_operator 3 "signed_comparison_operator" ++ [(float_extend:DF ++ (match_operand:SF 4 "reg_or_0_operand" "fG,fG")) ++ (match_operand:DF 2 "const0_operand" "G,G")]) ++ (match_operand:DF 1 "reg_or_0_operand" "fG,0") ++ (match_operand:DF 5 "reg_or_0_operand" "0,fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU" ++ "@ ++ fsel%C3 %R4,%R1,%0,%0 ++ fsel%D3 %R4,%R5,%0,%0" ++ [(set_attr "type" "fcmov")]) ++ ++(define_insn "*movdfcc_ext3" ++ [(set (match_operand:SF 0 "register_operand" "=f,f") ++ (if_then_else:SF ++ (match_operator 3 "signed_comparison_operator" ++ [(float_extend:DF ++ (match_operand:SF 4 "reg_or_0_operand" "fG,fG")) ++ (match_operand:DF 2 "const0_operand" "G,G")]) ++ (match_operand:SF 1 "reg_or_0_operand" "fG,0") ++ (match_operand:SF 5 "reg_or_0_operand" "0,fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU" ++ "@ ++ fsel%C3 %R4,%R1,%0,%0 ++ fsel%D3 %R4,%R5,%0,%0" ++ [(set_attr "type" "fcmov")]) ++ ++(define_insn "*movdfcc_ext4" ++ [(set (match_operand:DF 0 "register_operand" "=f,f") ++ (if_then_else:DF ++ (match_operator 3 "signed_comparison_operator" ++ [(float_extend:DF ++ (match_operand:SF 4 "reg_or_0_operand" "fG,fG")) ++ (match_operand:DF 2 "const0_operand" "G,G")]) ++ (float_extend:DF (match_operand:SF 1 "reg_or_0_operand" "fG,0")) ++ (match_operand:DF 5 "reg_or_0_operand" "0,fG")))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU" ++ "@ ++ fsel%C3 %R4,%R1,%0,%0 ++ fsel%D3 %R4,%R5,%0,%0" ++ [(set_attr "type" "fcmov")]) ++ ++(define_expand "smaxdf3" ++ [(set (match_dup 3) ++ (le:DF (match_operand:DF 1 "reg_or_0_operand") ++ (match_operand:DF 2 "reg_or_0_operand"))) ++ (set (match_operand:DF 0 "register_operand") ++ (if_then_else:DF (eq (match_dup 3) (match_dup 4)) ++ (match_dup 1) (match_dup 2)))] ++ "TARGET_FP" ++{ ++ operands[3] = gen_reg_rtx (DFmode); ++ operands[4] = CONST0_RTX (DFmode); ++}) ++ ++(define_expand "smindf3" ++ [(set (match_dup 3) ++ (lt:DF (match_operand:DF 1 "reg_or_0_operand") ++ (match_operand:DF 2 "reg_or_0_operand"))) ++ (set (match_operand:DF 0 "register_operand") ++ (if_then_else:DF (ne (match_dup 3) (match_dup 4)) ++ (match_dup 1) (match_dup 2)))] ++ "TARGET_FP" ++{ ++ operands[3] = gen_reg_rtx (DFmode); ++ operands[4] = CONST0_RTX (DFmode); ++}) ++ ++(define_expand "smaxsf3" ++ [(set (match_dup 3) ++ (le:DF (float_extend:DF (match_operand:SF 1 "reg_or_0_operand")) ++ (float_extend:DF (match_operand:SF 2 "reg_or_0_operand")))) ++ (set (match_operand:SF 0 "register_operand") ++ (if_then_else:SF (eq (match_dup 3) (match_dup 4)) ++ (match_dup 1) (match_dup 2)))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU" ++{ ++ operands[3] = gen_reg_rtx (DFmode); ++ operands[4] = CONST0_RTX (DFmode); ++}) ++ ++(define_expand "sminsf3" ++ [(set (match_dup 3) ++ (lt:DF (float_extend:DF (match_operand:SF 1 "reg_or_0_operand")) ++ (float_extend:DF (match_operand:SF 2 "reg_or_0_operand")))) ++ (set (match_operand:SF 0 "register_operand") ++ (if_then_else:SF (ne (match_dup 3) (match_dup 4)) ++ (match_dup 1) (match_dup 2)))] ++ "TARGET_FP && sw_64_fptm < SW_64_FPTM_SU" ++{ ++ operands[3] = gen_reg_rtx (DFmode); ++ operands[4] = CONST0_RTX (DFmode); ++}) ++ ++(define_insn "*fbcc_normal" ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "signed_comparison_operator" ++ [(match_operand:DF 2 "reg_or_0_operand" "fG") ++ (match_operand:DF 3 "const0_operand" "G")]) ++ (label_ref (match_operand 0)) ++ (pc)))] ++ "TARGET_FP" ++ "fb%C1 %R2,%0" ++ [(set_attr "type" "fbr")]) ++ ++(define_insn "*fbcc_ext_normal" ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "signed_comparison_operator" ++ [(float_extend:DF ++ (match_operand:SF 2 "reg_or_0_operand" "fG")) ++ (match_operand:DF 3 "const0_operand" "G")]) ++ (label_ref (match_operand 0)) ++ (pc)))] ++ "TARGET_FP" ++ "fb%C1 %R2,%0" ++ [(set_attr "type" "fbr")]) ++ ++;; These are the main define_expand's used to make conditional branches ++;; and compares. ++ ++(define_expand "cbranchsf4" ++ [(use (match_operator 0 "sw_64_cbranch_operator" ++ [(match_operand:SF 1 "reg_or_0_operand") ++ (match_operand:SF 2 "reg_or_0_operand")])) ++ (use (match_operand 3))] ++ "TARGET_FP && flag_sw_sf_cmpsel" ++ "sw_64_emit_conditional_branch (operands, SFmode); DONE;") ++ ++(define_insn "*sfbcc_normal" ++ [(set (pc) ++ (if_then_else ++ (match_operator 1 "signed_comparison_operator" ++ [(match_operand:SF 2 "reg_or_0_operand" "fG") ++ (match_operand:SF 3 "const0_operand" "G")]) ++ (label_ref (match_operand 0)) ++ (pc)))] ++ "TARGET_FP && flag_sw_sf_cmpsel" ++ "fb%C1 %R2,%0" ++ [(set_attr "type" "fbr")]) ++ ++(define_insn "*movsfcc_internal" ++ [(set (match_operand:FMODE 0 "register_operand" "=f,f") ++ (if_then_else:FMODE ++ (match_operator 3 "signed_comparison_operator" ++ [(match_operand:SF 4 "reg_or_0_operand" "fG,fG") ++ (match_operand:SF 2 "const0_operand" "G,G")]) ++ (match_operand:FMODE 1 "reg_or_0_operand" "fG,0") ++ (match_operand:FMODE 5 "reg_or_0_operand" "0,fG")))] ++ "TARGET_FP && flag_sw_sf_cmpsel" ++ "@ ++ fsel%C3 %R4,%R1,%0,%0 ++ fsel%D3 %R4,%R5,%0,%0" ++ [(set_attr "type" "fcmov")]) ++ ++ ++(define_expand "cbranchdf4" ++ [(use (match_operator 0 "sw_64_cbranch_operator" ++ [(match_operand:DF 1 "reg_or_0_operand") ++ (match_operand:DF 2 "reg_or_0_operand")])) ++ (use (match_operand 3))] ++ "TARGET_FP" ++ "sw_64_emit_conditional_branch (operands, DFmode); DONE;") ++ ++(define_expand "cbranchtf4" ++ [(use (match_operator 0 "sw_64_cbranch_operator" ++ [(match_operand:TF 1 "general_operand") ++ (match_operand:TF 2 "general_operand")])) ++ (use (match_operand 3))] ++ "TARGET_HAS_XFLOATING_LIBS" ++ "sw_64_emit_conditional_branch (operands, TFmode); DONE;") ++ ++(define_expand "cbranchdi4" ++ [(use (match_operator 0 "sw_64_cbranch_operator" ++ [(match_operand:DI 1 "general_operand") ++ (match_operand:DI 2 "general_operand")])) ++ (use (match_operand 3))] ++ "" ++ "sw_64_emit_conditional_branch (operands, DImode); DONE;") ++ ++(define_expand "cstoredf4" ++ [(use (match_operator:DI 1 "sw_64_cbranch_operator" ++ [(match_operand:DF 2 "reg_or_0_operand") ++ (match_operand:DF 3 "reg_or_0_operand")])) ++ (clobber (match_operand:DI 0 "register_operand"))] ++ "TARGET_FP" ++{ ++ if (sw_64_emit_setcc (operands, DFmode)) ++ DONE; ++ else ++ FAIL; ++}) ++ ++(define_expand "cstoretf4" ++ [(use (match_operator:DI 1 "sw_64_cbranch_operator" ++ [(match_operand:TF 2 "general_operand") ++ (match_operand:TF 3 "general_operand")])) ++ (clobber (match_operand:DI 0 "register_operand"))] ++ "TARGET_HAS_XFLOATING_LIBS" ++{ ++ if (sw_64_emit_setcc (operands, TFmode)) ++ DONE; ++ else ++ FAIL; ++}) ++ ++(define_expand "cstoredi4" ++ [(use (match_operator:DI 1 "sw_64_cbranch_operator" ++ [(match_operand:DI 2 "general_operand") ++ (match_operand:DI 3 "general_operand")])) ++ (clobber (match_operand:DI 0 "register_operand"))] ++ "" ++{ ++ if (sw_64_emit_setcc (operands, DImode)) ++ DONE; ++ else ++ FAIL; ++}) ++ ++;; These are the main define_expand's used to make conditional moves. ++ ++(define_expand "movcc" ++ [(set (match_operand:I48MODE 0 "register_operand") ++ (if_then_else:I48MODE ++ (match_operand 1 "comparison_operator") ++ (match_operand:I48MODE 2 "reg_or_8bit_operand") ++ (match_operand:I48MODE 3 "reg_or_8bit_operand")))] ++ "" ++{ ++ operands[1] = sw_64_emit_conditional_move (operands[1], mode); ++ if (operands[1] == 0) ++ FAIL; ++}) ++ ++(define_expand "movcc" ++ [(set (match_operand:FMODE 0 "register_operand") ++ (if_then_else:FMODE ++ (match_operand 1 "comparison_operator") ++ (match_operand:FMODE 2 "reg_or_8bit_operand") ++ (match_operand:FMODE 3 "reg_or_8bit_operand")))] ++ "" ++{ ++ operands[1] = sw_64_emit_conditional_move (operands[1], mode); ++ if (operands[1] == 0) ++ FAIL; ++}) ++ ++;; These define_split definitions are used in cases when comparisons have ++;; not be stated in the correct way and we need to reverse the second ++;; comparison. For example, x >= 7 has to be done as x < 6 with the ++;; comparison that tests the result being reversed. We have one define_split ++;; for each use of a comparison. They do not match valid insns and need ++;; not generate valid insns. ++;; ++;; We can also handle equality comparisons (and inequality comparisons in ++;; cases where the resulting add cannot overflow) by doing an add followed by ++;; a comparison with zero. This is faster since the addition takes one ++;; less cycle than a compare when feeding into a conditional move. ++;; For this case, we also have an SImode pattern since we can merge the add ++;; and sign extend and the order doesn't matter. ++;; ++;; We do not do this for floating-point, since it isn't clear how the "wrong" ++;; operation could have been generated. ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (if_then_else:DI ++ (match_operator 1 "comparison_operator" ++ [(match_operand:DI 2 "reg_or_0_operand") ++ (match_operand:DI 3 "reg_or_cint_operand")]) ++ (match_operand:DI 4 "reg_or_cint_operand") ++ (match_operand:DI 5 "reg_or_cint_operand"))) ++ (clobber (match_operand:DI 6 "register_operand"))] ++ "operands[3] != const0_rtx" ++ [(set (match_dup 6) (match_dup 7)) ++ (set (match_dup 0) ++ (if_then_else:DI (match_dup 8) (match_dup 4) (match_dup 5)))] ++{ ++ enum rtx_code code = GET_CODE (operands[1]); ++ int unsignedp = (code == GEU || code == LEU || code == GTU || code == LTU); ++ ++ /* If we are comparing for equality with a constant and that constant ++ appears in the arm when the register equals the constant, use the ++ register since that is more likely to match (and to produce better code ++ if both would). */ ++ ++ if (code == EQ && CONST_INT_P (operands[3]) ++ && rtx_equal_p (operands[4], operands[3])) ++ operands[4] = operands[2]; ++ ++ else if (code == NE && CONST_INT_P (operands[3]) ++ && rtx_equal_p (operands[5], operands[3])) ++ operands[5] = operands[2]; ++ ++ if (code == NE || code == EQ ++ || (extended_count (operands[2], DImode, unsignedp) >= 1 ++ && extended_count (operands[3], DImode, unsignedp) >= 1)) ++ { ++ if (CONST_INT_P (operands[3])) ++ operands[7] = gen_rtx_PLUS (DImode, operands[2], ++ GEN_INT (- INTVAL (operands[3]))); ++ else ++ operands[7] = gen_rtx_MINUS (DImode, operands[2], operands[3]); ++ ++ operands[8] = gen_rtx_fmt_ee (code, VOIDmode, operands[6], const0_rtx); ++ } ++ ++ else if (code == EQ || code == LE || code == LT ++ || code == LEU || code == LTU) ++ { ++ operands[7] = gen_rtx_fmt_ee (code, DImode, operands[2], operands[3]); ++ operands[8] = gen_rtx_NE (VOIDmode, operands[6], const0_rtx); ++ } ++ else ++ { ++ operands[7] = gen_rtx_fmt_ee (reverse_condition (code), DImode, ++ operands[2], operands[3]); ++ operands[8] = gen_rtx_EQ (VOIDmode, operands[6], const0_rtx); ++ } ++}) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (if_then_else:DI ++ (match_operator 1 "comparison_operator" ++ [(match_operand:SI 2 "reg_or_0_operand") ++ (match_operand:SI 3 "reg_or_cint_operand")]) ++ (match_operand:DI 4 "reg_or_8bit_operand") ++ (match_operand:DI 5 "reg_or_8bit_operand"))) ++ (clobber (match_operand:DI 6 "register_operand"))] ++ "operands[3] != const0_rtx ++ && (GET_CODE (operands[1]) == EQ || GET_CODE (operands[1]) == NE)" ++ [(set (match_dup 6) (match_dup 7)) ++ (set (match_dup 0) ++ (if_then_else:DI (match_dup 8) (match_dup 4) (match_dup 5)))] ++{ ++ enum rtx_code code = GET_CODE (operands[1]); ++ int unsignedp = (code == GEU || code == LEU || code == GTU || code == LTU); ++ rtx tem; ++ ++ if ((code != NE && code != EQ ++ && ! (extended_count (operands[2], DImode, unsignedp) >= 1 ++ && extended_count (operands[3], DImode, unsignedp) >= 1))) ++ FAIL; ++ ++ if (CONST_INT_P (operands[3])) ++ tem = gen_rtx_PLUS (SImode, operands[2], ++ GEN_INT (- INTVAL (operands[3]))); ++ else ++ tem = gen_rtx_MINUS (SImode, operands[2], operands[3]); ++ ++ operands[7] = gen_rtx_SIGN_EXTEND (DImode, tem); ++ operands[8] = gen_rtx_fmt_ee (GET_CODE (operands[1]), VOIDmode, ++ operands[6], const0_rtx); ++}) ++ ++;; Prefer to use cmp and arithmetic when possible instead of a cmove. ++ ++(define_split ++ [(set (match_operand 0 "register_operand") ++ (if_then_else (match_operator 1 "signed_comparison_operator" ++ [(match_operand:DI 2 "reg_or_0_operand") ++ (const_int 0)]) ++ (match_operand 3 "const_int_operand") ++ (match_operand 4 "const_int_operand")))] ++ "" ++ [(const_int 0)] ++{ ++ if (sw_64_split_conditional_move (GET_CODE (operands[1]), operands[0], ++ operands[2], operands[3], operands[4])) ++ DONE; ++ else ++ FAIL; ++}) ++ ++;; ??? Why combine is allowed to create such non-canonical rtl, I don't know. ++;; Oh well, we match it in movcc, so it must be partially our fault. ++(define_split ++ [(set (match_operand 0 "register_operand") ++ (if_then_else (match_operator 1 "signed_comparison_operator" ++ [(const_int 0) ++ (match_operand:DI 2 "reg_or_0_operand")]) ++ (match_operand 3 "const_int_operand") ++ (match_operand 4 "const_int_operand")))] ++ "" ++ [(const_int 0)] ++{ ++ if (sw_64_split_conditional_move (swap_condition (GET_CODE (operands[1])), ++ operands[0], operands[2], operands[3], ++ operands[4])) ++ DONE; ++ else ++ FAIL; ++}) ++ ++(define_insn_and_split "*cmp_sadd_di" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (plus:DI (if_then_else:DI ++ (match_operator 1 "sw_64_zero_comparison_operator" ++ [(match_operand:DI 2 "reg_or_0_operand" "rJ") ++ (const_int 0)]) ++ (match_operand:DI 3 "const48_operand" "I") ++ (const_int 0)) ++ (match_operand:DI 4 "sext_add_operand" "rIO"))) ++ (clobber (match_scratch:DI 5 "=r"))] ++ "" ++ "#" ++ "" ++ [(set (match_dup 5) ++ (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) ++ (set (match_dup 0) ++ (plus:DI (mult:DI (match_dup 5) (match_dup 3)) ++ (match_dup 4)))] ++{ ++ if (can_create_pseudo_p ()) ++ operands[5] = gen_reg_rtx (DImode); ++ else if (reg_overlap_mentioned_p (operands[5], operands[4])) ++ operands[5] = operands[0]; ++}) ++ ++(define_insn_and_split "*cmp_sadd_si" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (plus:SI (if_then_else:SI ++ (match_operator 1 "sw_64_zero_comparison_operator" ++ [(match_operand:DI 2 "reg_or_0_operand" "rJ") ++ (const_int 0)]) ++ (match_operand:SI 3 "const48_operand" "I") ++ (const_int 0)) ++ (match_operand:SI 4 "sext_add_operand" "rIO"))) ++ (clobber (match_scratch:DI 5 "=r"))] ++ "" ++ "#" ++ "" ++ [(set (match_dup 5) ++ (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) ++ (set (match_dup 0) ++ (plus:SI (mult:SI (match_dup 6) (match_dup 3)) ++ (match_dup 4)))] ++{ ++ if (can_create_pseudo_p ()) ++ operands[5] = gen_reg_rtx (DImode); ++ else if (reg_overlap_mentioned_p (operands[5], operands[4])) ++ operands[5] = gen_lowpart (DImode, operands[0]); ++ ++ operands[6] = gen_lowpart (SImode, operands[5]); ++}) ++ ++(define_insn_and_split "*cmp_sadd_sidi" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI ++ (plus:SI (if_then_else:SI ++ (match_operator 1 "sw_64_zero_comparison_operator" ++ [(match_operand:DI 2 "reg_or_0_operand" "rJ") ++ (const_int 0)]) ++ (match_operand:SI 3 "const48_operand" "I") ++ (const_int 0)) ++ (match_operand:SI 4 "sext_add_operand" "rIO")))) ++ (clobber (match_scratch:DI 5 "=r"))] ++ "" ++ "#" ++ "" ++ [(set (match_dup 5) ++ (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) ++ (set (match_dup 0) ++ (sign_extend:DI (plus:SI (mult:SI (match_dup 6) (match_dup 3)) ++ (match_dup 4))))] ++{ ++ if (can_create_pseudo_p ()) ++ operands[5] = gen_reg_rtx (DImode); ++ else if (reg_overlap_mentioned_p (operands[5], operands[4])) ++ operands[5] = operands[0]; ++ ++ operands[6] = gen_lowpart (SImode, operands[5]); ++}) ++ ++(define_insn_and_split "*cmp_ssub_di" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (minus:DI (if_then_else:DI ++ (match_operator 1 "sw_64_zero_comparison_operator" ++ [(match_operand:DI 2 "reg_or_0_operand" "rJ") ++ (const_int 0)]) ++ (match_operand:DI 3 "const48_operand" "I") ++ (const_int 0)) ++ (match_operand:DI 4 "reg_or_8bit_operand" "rI"))) ++ (clobber (match_scratch:DI 5 "=r"))] ++ "" ++ "#" ++ "" ++ [(set (match_dup 5) ++ (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) ++ (set (match_dup 0) ++ (minus:DI (mult:DI (match_dup 5) (match_dup 3)) ++ (match_dup 4)))] ++{ ++ if (can_create_pseudo_p ()) ++ operands[5] = gen_reg_rtx (DImode); ++ else if (reg_overlap_mentioned_p (operands[5], operands[4])) ++ operands[5] = operands[0]; ++}) ++ ++(define_insn_and_split "*cmp_ssub_si" ++ [(set (match_operand:SI 0 "register_operand" "=r") ++ (minus:SI (if_then_else:SI ++ (match_operator 1 "sw_64_zero_comparison_operator" ++ [(match_operand:DI 2 "reg_or_0_operand" "rJ") ++ (const_int 0)]) ++ (match_operand:SI 3 "const48_operand" "I") ++ (const_int 0)) ++ (match_operand:SI 4 "reg_or_8bit_operand" "rI"))) ++ (clobber (match_scratch:DI 5 "=r"))] ++ "" ++ "#" ++ "" ++ [(set (match_dup 5) ++ (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) ++ (set (match_dup 0) ++ (minus:SI (mult:SI (match_dup 6) (match_dup 3)) ++ (match_dup 4)))] ++{ ++ if (can_create_pseudo_p ()) ++ operands[5] = gen_reg_rtx (DImode); ++ else if (reg_overlap_mentioned_p (operands[5], operands[4])) ++ operands[5] = gen_lowpart (DImode, operands[0]); ++ ++ operands[6] = gen_lowpart (SImode, operands[5]); ++}) ++ ++(define_insn_and_split "*cmp_ssub_sidi" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (sign_extend:DI ++ (minus:SI (if_then_else:SI ++ (match_operator 1 "sw_64_zero_comparison_operator" ++ [(match_operand:DI 2 "reg_or_0_operand" "rJ") ++ (const_int 0)]) ++ (match_operand:SI 3 "const48_operand" "I") ++ (const_int 0)) ++ (match_operand:SI 4 "reg_or_8bit_operand" "rI")))) ++ (clobber (match_scratch:DI 5 "=r"))] ++ "" ++ "#" ++ "" ++ [(set (match_dup 5) ++ (match_op_dup:DI 1 [(match_dup 2) (const_int 0)])) ++ (set (match_dup 0) ++ (sign_extend:DI (minus:SI (mult:SI (match_dup 6) (match_dup 3)) ++ (match_dup 4))))] ++{ ++ if (can_create_pseudo_p ()) ++ operands[5] = gen_reg_rtx (DImode); ++ else if (reg_overlap_mentioned_p (operands[5], operands[4])) ++ operands[5] = operands[0]; ++ ++ operands[6] = gen_lowpart (SImode, operands[5]); ++}) ++ ++(define_expand "call" ++ [(use (match_operand:DI 0)) ++ (use (match_operand 1)) ++ (use (match_operand 2)) ++ (use (match_operand 3))] ++ "" ++{ ++ emit_call_insn (gen_call_osf (operands[0], operands[1])); ++ DONE; ++}) ++ ++(define_expand "sibcall" ++ [(parallel [(call (mem:DI (match_operand 0)) ++ (match_operand 1)) ++ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)])] ++ "" ++{ ++ gcc_assert (MEM_P (operands[0])); ++ operands[0] = XEXP (operands[0], 0); ++}) ++ ++(define_expand "call_osf" ++ [(parallel [(call (mem:DI (match_operand 0)) ++ (match_operand 1)) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))])] ++ "" ++{ ++ gcc_assert (MEM_P (operands[0])); ++ ++ operands[0] = XEXP (operands[0], 0); ++ if (! call_operand (operands[0], Pmode)) ++ operands[0] = copy_to_mode_reg (Pmode, operands[0]); ++}) ++ ++ ++(define_expand "call_value" ++ [(use (match_operand 0)) ++ (use (match_operand:DI 1)) ++ (use (match_operand 2)) ++ (use (match_operand 3)) ++ (use (match_operand 4))] ++ "" ++{ ++ emit_call_insn (gen_call_value_osf (operands[0], operands[1], ++ operands[2])); ++ DONE; ++}) ++ ++(define_expand "sibcall_value" ++ [(parallel [(set (match_operand 0) ++ (call (mem:DI (match_operand 1)) ++ (match_operand 2))) ++ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)])] ++ "" ++{ ++ gcc_assert (MEM_P (operands[1])); ++ operands[1] = XEXP (operands[1], 0); ++}) ++ ++(define_expand "call_value_osf" ++ [(parallel [(set (match_operand 0) ++ (call (mem:DI (match_operand 1)) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))])] ++ "" ++{ ++ gcc_assert (MEM_P (operands[1])); ++ ++ operands[1] = XEXP (operands[1], 0); ++ if (! call_operand (operands[1], Pmode)) ++ operands[1] = copy_to_mode_reg (Pmode, operands[1]); ++}) ++ ++ ++(define_insn "*call_osf_1_er_noreturn" ++ [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s")) ++ (match_operand 1)) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS ++ && find_reg_note (insn, REG_NORETURN, NULL_RTX)" ++ "@ ++ call $26,($27),0 ++ bsr $26,%0\t\t!samegp ++ ldl $27,%0($29)\t\t!literal!%#\;call $26,($27),%0\t\t!lituse_jsr!%#" ++ [(set_attr "type" "call") ++ (set_attr "length" "*,*,8")]) ++ ++(define_insn "*call_osf_1_er_setfpec0" ++ [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s")) ++ (match_operand 1)) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS && flag_fpcr_set == 3" ++ "@ ++ call $26,(%0),0\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%* ++ bsr $26,%0\t\t!samegp ++ ldl $27,%0($29)\t\t!literal!%#\;call $26,($27),%0\t\t!lituse_jsr!%#\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%*" ++ [(set_attr "type" "call") ++ (set_attr "length" "12,*,16")]) ++ ++(define_insn "*call_osf_1_er_setfpec1" ++ [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s")) ++ (match_operand 1)) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS && flag_fpcr_set == 1 " ++ "@ ++ call $26,(%0),0\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%* ++ bsr $26,%0\t\t!samegp ++ ldl $27,%0($29)\t\t!literal!%#\;call $26,($27),%0\t\t!lituse_jsr!%#\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%*" ++ [(set_attr "type" "call") ++ (set_attr "length" "12,*,16")]) ++ ++(define_insn "*call_osf_1_er" ++ [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s")) ++ (match_operand 1)) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS" ++ "@ ++ call $26,(%0),0\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%* ++ bsr $26,%0\t\t!samegp ++ ldl $27,%0($29)\t\t!literal!%#\;call $26,($27),%0\t\t!lituse_jsr!%#\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%*" ++ [(set_attr "type" "call") ++ (set_attr "length" "12,*,16")]) ++ ++(define_insn "*call_osf_2_er_nogp" ++ [(call (mem:DI (match_operand:DI 0 "register_operand" "c")) ++ (match_operand 1)) ++ (use (reg:DI 29)) ++ (use (match_operand 2)) ++ (use (match_operand 3 "const_int_operand")) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS" ++ "call $26,(%0),%2%J3" ++ [(set_attr "type" "call")]) ++ ++ ++(define_insn "*call_osf_2_er_setfpec0" ++ [(call (mem:DI (match_operand:DI 0 "register_operand" "c")) ++ (match_operand 1)) ++ (set (reg:DI 29) ++ (unspec:DI [(reg:DI 29) (match_operand 4 "const_int_operand")] ++ UNSPEC_LDGP1)) ++ (use (match_operand 2)) ++ (use (match_operand 3 "const_int_operand")) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS && flag_fpcr_set == 3 " ++ "call $26,(%0),%2%J3\;ldih $29,0($26)\t\t!gpdisp!%4" ++ [(set_attr "type" "call") ++ (set_attr "cannot_copy" "true") ++ (set_attr "length" "8")]) ++ ++(define_insn "*call_osf_2_er_setfpec1" ++ [(call (mem:DI (match_operand:DI 0 "register_operand" "c")) ++ (match_operand 1)) ++ (set (reg:DI 29) ++ (unspec:DI [(reg:DI 29) (match_operand 4 "const_int_operand")] ++ UNSPEC_LDGP1)) ++ (use (match_operand 2)) ++ (use (match_operand 3 "const_int_operand")) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS && flag_fpcr_set == 1 " ++ "call $26,(%0),%2%J3\;ldih $29,0($26)\t\t!gpdisp!%4" ++ [(set_attr "type" "call") ++ (set_attr "cannot_copy" "true") ++ (set_attr "length" "8")]) ++ ++(define_insn "*call_osf_2_er" ++ [(call (mem:DI (match_operand:DI 0 "register_operand" "c")) ++ (match_operand 1)) ++ (set (reg:DI 29) ++ (unspec:DI [(reg:DI 29) (match_operand 4 "const_int_operand")] ++ UNSPEC_LDGP1)) ++ (use (match_operand 2)) ++ (use (match_operand 3 "const_int_operand")) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS" ++ "call $26,(%0),%2%J3\;ldih $29,0($26)\t\t!gpdisp!%4" ++ [(set_attr "type" "call") ++ (set_attr "cannot_copy" "true") ++ (set_attr "length" "8")]) ++ ++(define_insn "*call_osf_1_noreturn" ++ [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s")) ++ (match_operand 1)) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "! TARGET_EXPLICIT_RELOCS ++ && find_reg_note (insn, REG_NORETURN, NULL_RTX)" ++ "@ ++ call $26,($27),0 ++ bsr $26,$%0..ng ++ call $26,%0" ++ [(set_attr "type" "call") ++ (set_attr "length" "*,*,8")]) ++ ++(define_insn "*call_osf_1" ++ [(call (mem:DI (match_operand:DI 0 "call_operand" "c,R,s")) ++ (match_operand 1)) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "! TARGET_EXPLICIT_RELOCS" ++ "@ ++ call $26,($27),0\;ldgp $29,0($26) ++ bsr $26,$%0..ng ++ call $26,%0\;ldgp $29,0($26)" ++ [(set_attr "type" "call") ++ (set_attr "length" "12,*,16")]) ++ ++(define_insn "*sibcall_osf_1_er" ++ [(call (mem:DI (match_operand:DI 0 "symbolic_operand" "R,s")) ++ (match_operand 1)) ++ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)] ++ "TARGET_EXPLICIT_RELOCS" ++ "@ ++ br $31,%0\t\t!samegp ++ ldl $27,%0($29)\t\t!literal!%#\;jmp $31,($27),%0\t\t!lituse_jsr!%#" ++ [(set_attr "type" "call") ++ (set_attr "length" "*,8")]) ++ ++(define_insn "*sibcall_osf_1" ++ [(call (mem:DI (match_operand:DI 0 "symbolic_operand" "R,s")) ++ (match_operand 1)) ++ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)] ++ "! TARGET_EXPLICIT_RELOCS" ++ "@ ++ br $31,$%0..ng ++ ldi $27,%0\;jmp $31,($27),%0" ++ [(set_attr "type" "call") ++ (set_attr "length" "*,8")]) ++ ++(define_expand "rsqrtsf2" ++ [(match_operand:SF 0 "register_operand" "") ++ (match_operand:SF 1 "register_operand" "")] ++ "TARGET_FP && flag_reciprocal_math == 1 && flag_sw_rsqrt == 1" ++ { ++ sw_64_emit_rsqrt (operands[0], operands[1], 1); ++ DONE; ++ }) ++ ++(define_insn "*movsf2" ++ [(set (match_operand:SF 0 "nonimmediate_operand" "=r") ++ (unspec:SF [(match_operand:SF 1 "input_operand" "f")] ++ UNSPEC_FIMOVS))] ++ "TARGET_FP && flag_reciprocal_math == 1 && flag_sw_rsqrt == 1" ++ "fimovs %1,%0" ++ [(set_attr "type" "ldsym")]) ++ ++(define_expand "untyped_call" ++ [(parallel [(call (match_operand 0) ++ (const_int 0)) ++ (match_operand 1) ++ (match_operand 2)])] ++ "" ++{ ++ int i; ++ ++ emit_call_insn (gen_call (operands[0], const0_rtx, NULL, const0_rtx)); ++ ++ for (i = 0; i < XVECLEN (operands[2], 0); i++) ++ { ++ rtx set = XVECEXP (operands[2], 0, i); ++ emit_move_insn (SET_DEST (set), SET_SRC (set)); ++ } ++ ++ /* The optimizer does not know that the call sets the function value ++ registers we stored in the result block. We avoid problems by ++ claiming that all hard registers are used and clobbered at this ++ point. */ ++ emit_insn (gen_blockage ()); ++ ++ DONE; ++}) ++ ++;; UNSPEC_VOLATILE is considered to use and clobber all hard registers and ++;; all of memory. This blocks insns from being moved across this point. ++ ++(define_insn "blockage" ++ [(unspec_volatile [(const_int 0)] UNSPECV_BLOCKAGE)] ++ "" ++ "" ++ [(set_attr "length" "0") ++ (set_attr "type" "none")]) ++ ++(define_insn "jump" ++ [(set (pc) ++ (label_ref (match_operand 0)))] ++ "" ++ "br $31,%l0" ++ [(set_attr "type" "ibr")]) ++ ++(define_expand "return" ++ [(return)] ++ "direct_return ()") ++ ++(define_insn "*return_internal" ++ [(return)] ++ "reload_completed" ++{ ++ return "ret $31,($26),1"; ++} ++ ++ [(set_attr "type" "ibr")]) ++ ++(define_insn "indirect_jump" ++ [(set (pc) (match_operand:DI 0 "register_operand" "r"))] ++ "" ++ "jmp $31,(%0),0" ++ [(set_attr "type" "ibr")]) ++ ++(define_expand "tablejump" ++ [(parallel [(set (pc) ++ (match_operand 0 "register_operand")) ++ (use (label_ref:DI (match_operand 1)))])] ++ "" ++{ ++ rtx dest = gen_reg_rtx (DImode); ++ emit_insn (gen_extendsidi2 (dest, operands[0])); ++ emit_insn (gen_adddi3 (dest, pic_offset_table_rtx, dest)); ++ operands[0] = dest; ++}) ++ ++(define_insn "*tablejump_internal" ++ [(set (pc) ++ (match_operand:DI 0 "register_operand" "r")) ++ (use (label_ref (match_operand 1)))] ++ "" ++ "jmp $31,(%0),0" ++ [(set_attr "type" "ibr")]) ++ ++(define_insn "imb" ++ [(unspec_volatile [(const_int 0)] UNSPECV_IMB)] ++ "" ++ "sys_call 0x86" ++ [(set_attr "type" "callpal")]) ++ ++(define_expand "clear_cache" ++ [(match_operand:DI 0) ; region start ++ (match_operand:DI 1)] ; region end ++ "" ++{ ++ emit_insn (gen_imb ()); ++ DONE; ++}) ++ ++(define_insn "trap" ++ [(trap_if (const_int 1) (const_int 0)) ++ (use (reg:DI 29))] ++ "" ++ "sys_call 0x80" ++ [(set_attr "type" "callpal")]) ++ ++;; For userland, we load the thread pointer from the TCB. ++;; For the kernel, we load the per-cpu private value. ++ ++(define_insn "get_thread_pointerdi" ++ [(set (match_operand:DI 0 "register_operand" "=v") ++ (unspec:DI [(const_int 0)] UNSPEC_TP))] ++ "" ++{ ++ if (TARGET_TLS_KERNEL) ++ return "sys_call 0x32"; ++ else if (flag_sw_rtid == 1) ++ return "rtid %0"; ++ else ++ return "sys_call 0x9e"; ++} ++ [(set_attr "type" "callpal")]) ++ ++;; For completeness, and possibly a __builtin function, here's how to ++;; set the thread pointer. Since we don't describe enough of this ++;; quantity for CSE, we have to use a volatile unspec, and then there's ++;; not much point in creating an R16_REG register class. ++ ++(define_expand "set_thread_pointerdi" ++ [(set (reg:DI 16) (match_operand:DI 0 "input_operand")) ++ (unspec_volatile [(reg:DI 16)] UNSPECV_SET_TP)] ++ "") ++ ++(define_insn "*set_tp" ++ [(unspec_volatile [(reg:DI 16)] UNSPECV_SET_TP)] ++ "" ++{ ++ if (TARGET_TLS_KERNEL) ++ return "sys_call 0x31"; ++ else ++ return "sys_call 0x9f"; ++} ++ [(set_attr "type" "callpal")]) ++ ++;; Finally, we have the basic data motion insns. The byte and word insns ++;; are done via define_expand. Start with the floating-point insns, since ++;; they are simpler. ++ ++(define_expand "movsf" ++ [(set (match_operand:SF 0 "nonimmediate_operand") ++ (match_operand:SF 1 "general_operand"))] ++ "" ++{ ++ if (MEM_P (operands[0]) ++ && ! reg_or_0_operand (operands[1], SFmode)) ++ operands[1] = force_reg (SFmode, operands[1]); ++}) ++ ++(define_insn "*movsf" ++ [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,*r,*r,m,m,f,*r") ++ (match_operand:SF 1 "input_operand" "fG,m,*rG,m,fG,*r,*r,f"))] ++ "register_operand (operands[0], SFmode) ++ || reg_or_0_operand (operands[1], SFmode)" ++ "@ ++ fcpys %R1,%R1,%0 ++ fld%,%U1 %0,%1 ++ bis $31,%r1,%0 ++ ldw %0,%1 ++ fst%,%U0 %R1,%0 ++ stw %r1,%0 ++ ifmovs %1,%0 ++ fimovs %1,%0" ++ [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist,itof,ftoi") ++ (set_attr "isa" "*,*,*,*,*,*,fix,fix")]) ++ ++(define_expand "movdf" ++ [(set (match_operand:DF 0 "nonimmediate_operand") ++ (match_operand:DF 1 "general_operand"))] ++ "" ++{ ++ if (MEM_P (operands[0]) ++ && ! reg_or_0_operand (operands[1], DFmode)) ++ operands[1] = force_reg (DFmode, operands[1]); ++}) ++(define_insn "*movdf" ++ [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,*r,*r,m,m,f,*r") ++ (match_operand:DF 1 "input_operand" "fG,m,*rG,m,fG,*r,*r,f"))] ++ "register_operand (operands[0], DFmode) ++ || reg_or_0_operand (operands[1], DFmode)" ++ "@ ++ fcpys %R1,%R1,%0 ++ fld%-%U1 %0,%1 ++ bis $31,%r1,%0 ++ ldl %0,%1 ++ fst%-%U0 %R1,%0 ++ stl %r1,%0 ++ ifmovd %1,%0 ++ fimovd %1,%0" ++ [(set_attr "type" "fcpys,fld,ilog,ild,fst,ist,itof,ftoi") ++ (set_attr "isa" "*,*,*,*,*,*,fix,fix")]) ++ ++(define_expand "storentdf" ++ [(set (match_operand:DF 0 "nonimmediate_operand") ++ (unspec:DF [(match_operand:DF 1 "general_operand")] UNSPEC_NTDF))] ++ "flag_sw_non_temporal == 1" ++{ ++ if (MEM_P (operands[0]) ++ && ! reg_or_0_operand (operands[1], DFmode)) ++ operands[1] = force_reg (DFmode, operands[1]); ++}) ++ ++(define_insn "*storentdf" ++ [(set (match_operand:DF 0 "nonimmediate_operand" "=m") ++ (unspec:DF [(match_operand:DF 1 "input_operand" "fG")] UNSPEC_NTDF))] ++ "register_operand (operands[0], DFmode) ++ || reg_or_0_operand (operands[1], DFmode)" ++ "std_nc %R1,%0" ++ [(set_attr "type" "fst") ++ (set_attr "isa" "*")]) ++ ++;; Subregs suck for register allocation. Pretend we can move TFmode ++;; data between general registers until after reload. ++;; ??? Is this still true now that we have the lower-subreg pass? ++ ++(define_expand "movtf" ++ [(set (match_operand:TF 0 "nonimmediate_operand") ++ (match_operand:TF 1 "general_operand"))] ++ "" ++{ ++ if (MEM_P (operands[0]) ++ && ! reg_or_0_operand (operands[1], TFmode)) ++ operands[1] = force_reg (TFmode, operands[1]); ++}) ++ ++(define_insn_and_split "*movtf_internal" ++ [(set (match_operand:TF 0 "nonimmediate_operand" "=r,m") ++ (match_operand:TF 1 "input_operand" "rmG,rG"))] ++ "register_operand (operands[0], TFmode) ++ || reg_or_0_operand (operands[1], TFmode)" ++ "#" ++ "reload_completed" ++ [(set (match_dup 0) (match_dup 2)) ++ (set (match_dup 1) (match_dup 3))] ++ "sw_64_split_tmode_pair (operands, TFmode, true);") ++ ++(define_expand "movsi" ++ [(set (match_operand:SI 0 "nonimmediate_operand") ++ (match_operand:SI 1 "general_operand"))] ++ "" ++{ ++ if (sw_64_expand_mov (SImode, operands)) ++ DONE; ++}) ++ ++(define_insn "*movsi" ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,r,r,m,r") ++ (match_operand:SI 1 "input_operand" "rJ,K,L,n,m,rJ,s"))] ++ "register_operand (operands[0], SImode) ++ || reg_or_0_operand (operands[1], SImode)" ++ "@ ++ bis $31,%r1,%0 ++ ldi %0,%1($31) ++ ldih %0,%h1($31) ++ # ++ ldw%U1 %0,%1 ++ stw%U0 %r1,%0 ++ ldi %0,%1" ++ [(set_attr "type" "ilog,iadd,iadd,multi,ild,ist,ldsym") ++ (set_attr "isa" "*,*,*,*,*,*,vms")]) ++ ++(define_expand "storentsi" ++ [(set (match_operand:SI 0 "nonimmediate_operand") ++ (unspec:SI [(match_operand:SI 1 "input_operand")] UNSPEC_NTSI))] ++ "flag_sw_non_temporal == 1" ++{ ++ if (sw_64_expand_mov (SImode, operands)) ++ DONE; ++}) ++ ++(define_insn "*storentsi" ++ [(set (match_operand:SI 0 "nonimmediate_operand" "=m") ++ (unspec:SI [(match_operand:SI 1 "input_operand" "rJ")] UNSPEC_NTSI))] ++ "register_operand (operands[0], SImode) ++ || reg_or_0_operand (operands[1], SImode)" ++ "stw_nc %r1,%0" ++ [(set_attr "type" "ist") ++ (set_attr "isa" "*")]) ++ ++;; Split a load of a large constant into the appropriate two-insn ++;; sequence. ++ ++(define_split ++ [(set (match_operand:SI 0 "register_operand") ++ (match_operand:SI 1 "non_add_const_operand"))] ++ "" ++ [(const_int 0)] ++{ ++ if (sw_64_split_const_mov (SImode, operands)) ++ DONE; ++ else ++ FAIL; ++}) ++ ++(define_insn "*movdi_er_low_l" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (lo_sum:DI (match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "local_symbolic_operand")))] ++ "TARGET_EXPLICIT_RELOCS" ++{ ++ if (true_regnum (operands[1]) == 29) ++ return "ldi %0,%2(%1)\t\t!gprel"; ++ else ++ return "ldi %0,%2(%1)\t\t!gprellow"; ++} ++ [(set_attr "usegp" "yes")]) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "small_symbolic_operand"))] ++ "TARGET_EXPLICIT_RELOCS && reload_completed" ++ [(set (match_dup 0) ++ (lo_sum:DI (match_dup 2) (match_dup 1)))] ++ "operands[2] = pic_offset_table_rtx;") ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "local_symbolic_operand"))] ++ "TARGET_EXPLICIT_RELOCS && reload_completed" ++ [(set (match_dup 0) ++ (plus:DI (match_dup 2) (high:DI (match_dup 1)))) ++ (set (match_dup 0) ++ (lo_sum:DI (match_dup 0) (match_dup 1)))] ++ "operands[2] = pic_offset_table_rtx;") ++ ++(define_split ++ [(match_operand 0 "some_small_symbolic_operand")] ++ "" ++ [(match_dup 0)] ++ "operands[0] = split_small_symbolic_operand (operands[0]);") ++ ++(define_insn "movdi_er_high_g" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "symbolic_operand") ++ (match_operand 3 "const_int_operand")] ++ UNSPEC_LITERAL))] ++ "TARGET_EXPLICIT_RELOCS" ++{ ++ if (INTVAL (operands[3]) == 0) ++ return "ldl %0,%2(%1)\t\t!literal"; ++ else ++ return "ldl %0,%2(%1)\t\t!literal!%3"; ++} ++ [(set_attr "type" "ldsym")]) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "global_symbolic_operand"))] ++ "TARGET_EXPLICIT_RELOCS && reload_completed" ++ [(set (match_dup 0) ++ (unspec:DI [(match_dup 2) ++ (match_dup 1) ++ (const_int 0)] UNSPEC_LITERAL))] ++ "operands[2] = pic_offset_table_rtx;") ++ ++(define_insn "movdi_er_tlsgd" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "symbolic_operand") ++ (match_operand 3 "const_int_operand")] ++ UNSPEC_TLSGD))] ++ "HAVE_AS_TLS" ++{ ++ if (INTVAL (operands[3]) == 0) ++ return "ldi %0,%2(%1)\t\t!tlsgd"; ++ else ++ return "ldi %0,%2(%1)\t\t!tlsgd!%3"; ++} ++[(set_attr "cannot_copy" "true")]) ++ ++(define_insn "*movdi_er_tlsrelgot" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "symbolic_operand") ++ (match_operand 3 "const_int_operand")] ++ UNSPEC_TLSRELGOT))] ++ "HAVE_AS_TLS" ++{ ++ if (INTVAL (operands[3]) == 0) ++ return "ldih %0,%2(%1)\t\t!tlsrel_got"; ++ else ++ return "ldih %0,%2(%1)\t\t!tlsrel_got!%3"; ++} ++[(set_attr "cannot_copy" "true")]) ++ ++(define_insn "movdi_er_tlsldm" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "const_int_operand")] ++ UNSPEC_TLSLDM))] ++ "HAVE_AS_TLS" ++{ ++ if (INTVAL (operands[2]) == 0) ++ return "ldi %0,%&(%1)\t\t!tlsldm"; ++ else ++ return "ldi %0,%&(%1)\t\t!tlsldm!%2"; ++} ++[(set_attr "cannot_copy" "true")]) ++ ++(define_insn "*movdi_er_gotdtprel" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "symbolic_operand") ++ (match_operand 3 "const_int_operand")] ++ UNSPEC_GOTDTPREL))] ++ "HAVE_AS_TLS" ++{ ++ if (INTVAL (operands[3]) == 0) ++ return "ldl %0,%2(%1)\t\t!gotdtprel"; ++ else ++ return "ldl %0,%2(%1)\t\t!gotdtprel!%3"; ++} ++[(set_attr "type" "ild") ++ (set_attr "usegp" "yes")]) ++ ++(define_insn "*movdi_er_gotdtp" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "symbolic_operand")] ++ UNSPEC_DTPREL))] ++ "HAVE_AS_TLS" ++ "ldl %0,%2(%1)\t\t!gotdtprel" ++ [(set_attr "type" "ild") ++ (set_attr "usegp" "yes")]) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "gotdtp_symbolic_operand"))] ++ "HAVE_AS_TLS && reload_completed" ++ [(set (match_dup 0) ++ (unspec:DI [(match_dup 2) ++ (match_dup 1)] UNSPEC_DTPREL))] ++{ ++ operands[1] = XVECEXP (XEXP (operands[1], 0), 0, 0); ++ operands[2] = pic_offset_table_rtx; ++}) ++ ++(define_insn "*movdi_er_gottprel" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "symbolic_operand") ++ (match_operand 3 "const_int_operand")] ++ UNSPEC_TPREL))] ++ "HAVE_AS_TLS" ++{ ++ if (INTVAL (operands[3]) == 0) ++ return "ldl %0,%2(%1)\t\t!gottprel"; ++ else ++ return "ldl %0,%2(%1)\t\t!gottprel!%3"; ++} ++[(set_attr "type" "ild") ++ (set_attr "usegp" "yes")]) ++ ++(define_insn "*movdi_er_gottp" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand:DI 2 "symbolic_operand")] ++ UNSPEC_TPREL))] ++ "HAVE_AS_TLS" ++ "ldl %0,%2(%1)\t\t!gottprel" ++ [(set_attr "type" "ild") ++ (set_attr "usegp" "yes")]) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "gottp_symbolic_operand"))] ++ "HAVE_AS_TLS && reload_completed" ++ [(set (match_dup 0) ++ (unspec:DI [(match_dup 2) ++ (match_dup 1)] UNSPEC_TPREL))] ++{ ++ operands[1] = XVECEXP (XEXP (operands[1], 0), 0, 0); ++ operands[2] = pic_offset_table_rtx; ++}) ++ ++(define_insn "*movdi" ++ [(set (match_operand:DI 0 "nonimmediate_operand" ++ "=r,r,r,r,r,r,r,r, m, *f,*f, Q, r,*f") ++ (match_operand:DI 1 "input_operand" ++ "rJ,K,L,T,s,n,s,m,rJ,*fJ, Q,*f,*f, r"))] ++ "register_operand (operands[0], DImode) ++ || reg_or_0_operand (operands[1], DImode)" ++ "@ ++ mov %r1,%0 ++ ldi %0,%1($31) ++ ldih %0,%h1($31) ++ # ++ # ++ # ++ ldi %0,%1 ++ ldl%A1%U1 %0,%1 ++ stl%A0%U0 %r1,%0 ++ fmov %R1,%0 ++ fldd%U1 %0,%1 ++ fstd%U0 %R1,%0 ++ fimovd %1,%0 ++ ifmovd %1,%0" ++ [(set_attr "type" "ilog,iadd,iadd,iadd,ldsym,multi,ldsym,ild,ist,fcpys,fld,fst,ftoi,itof") ++ (set_attr "isa" "*,*,*,er,er,*,ner,*,*,*,*,*,fix,fix") ++ (set_attr "usegp" "*,*,*,yes,*,*,*,*,*,*,*,*,*,*")]) ++ ++(define_expand "storentdi" ++ [(set (match_operand:DI 0 "nonimmediate_operand") ++ (unspec:DI [(match_operand:DI 1 "input_operand")] UNSPEC_NTDI))] ++ "flag_sw_non_temporal == 1" ++{ ++ if (sw_64_expand_mov (DImode, operands)) ++ DONE; ++}) ++ ++(define_insn "*storentdi" ++ [(set (match_operand:DI 0 "nonimmediate_operand" "=m") ++ (unspec:DI [(match_operand:DI 1 "input_operand" "rJ")] UNSPEC_NTDI))] ++ "register_operand (operands[0], DImode) ++ || reg_or_0_operand (operands[1], DImode)" ++ "stl_nc %r1,%0" ++ [(set_attr "type" "ist") ++ (set_attr "isa" "*") ++ (set_attr "usegp" "*")]) ++ ++(define_insn "force_movdi" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r")] ++ UNSPECV_FORCE_MOV))] ++ "" ++ "mov %1,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_expand "movdi" ++ [(set (match_operand:DI 0 "nonimmediate_operand") ++ (match_operand:DI 1 "general_operand"))] ++ "" ++{ ++ if (sw_64_expand_mov (DImode, operands)) ++ DONE; ++}) ++ ++;; Split a load of a large constant into the appropriate two-insn ++;; sequence. ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "non_add_const_operand"))] ++ "" ++ [(const_int 0)] ++{ ++ if (sw_64_split_const_mov (DImode, operands)) ++ DONE; ++ else ++ FAIL; ++}) ++ ++;; We need to prevent reload from splitting TImode moves, because it ++;; might decide to overwrite a pointer with the value it points to. ++;; In that case we have to do the loads in the appropriate order so ++;; that the pointer is not destroyed too early. ++ ++(define_insn_and_split "*movti_internal" ++ [(set (match_operand:TI 0 "nonimmediate_operand" "=r,m") ++ (match_operand:TI 1 "input_operand" "rmJ,rJ"))] ++ "(register_operand (operands[0], TImode) ++ /* Prevent rematerialization of constants. */ ++ && ! CONSTANT_P (operands[1])) ++ || reg_or_0_operand (operands[1], TImode)" ++ "#" ++ "reload_completed" ++ [(set (match_dup 0) (match_dup 2)) ++ (set (match_dup 1) (match_dup 3))] ++ "sw_64_split_tmode_pair (operands, TImode, true);") ++ ++(define_expand "movti" ++ [(set (match_operand:TI 0 "nonimmediate_operand") ++ (match_operand:TI 1 "general_operand"))] ++ "" ++{ ++ if (MEM_P (operands[0]) ++ && ! reg_or_0_operand (operands[1], TImode)) ++ operands[1] = force_reg (TImode, operands[1]); ++ ++ if (operands[1] == const0_rtx) ++ ; ++ /* We must put 64-bit constants in memory. We could keep the ++ 32-bit constants in TImode and rely on the splitter, but ++ this doesn't seem to be worth the pain. */ ++ else if (CONST_SCALAR_INT_P (operands[1])) ++ { ++ rtx in[2], out[2], target; ++ ++ gcc_assert (can_create_pseudo_p ()); ++ ++ split_double (operands[1], &in[0], &in[1]); ++ ++ if (in[0] == const0_rtx) ++ out[0] = const0_rtx; ++ else ++ { ++ out[0] = gen_reg_rtx (DImode); ++ emit_insn (gen_movdi (out[0], in[0])); ++ } ++ ++ if (in[1] == const0_rtx) ++ out[1] = const0_rtx; ++ else ++ { ++ out[1] = gen_reg_rtx (DImode); ++ emit_insn (gen_movdi (out[1], in[1])); ++ } ++ ++ if (!REG_P (operands[0])) ++ target = gen_reg_rtx (TImode); ++ else ++ target = operands[0]; ++ ++ emit_insn (gen_movdi (operand_subword (target, 0, 0, TImode), out[0])); ++ emit_insn (gen_movdi (operand_subword (target, 1, 0, TImode), out[1])); ++ ++ if (target != operands[0]) ++ emit_insn (gen_rtx_SET (operands[0], target)); ++ ++ DONE; ++ } ++}) ++ ++;; These are the partial-word cases. ++;; ++;; First we have the code to load an aligned word. Operand 0 is the register ++;; in which to place the result. It's mode is QImode or HImode. Operand 1 ++;; is an SImode MEM at the low-order byte of the proper word. Operand 2 is the ++;; number of bits within the word that the value is. Operand 3 is an SImode ++;; scratch register. If operand 0 is a hard register, operand 3 may be the ++;; same register. It is allowed to conflict with operand 1 as well. ++ ++(define_expand "aligned_loadqi" ++ [(set (match_operand:SI 3 "register_operand") ++ (match_operand:SI 1 "memory_operand")) ++ (set (match_operand:DI 0 "register_operand") ++ (zero_extract:DI (subreg:DI (match_dup 3) 0) ++ (const_int 8) ++ (match_operand:DI 2 "const_int_operand")))]) ++ ++(define_expand "aligned_loadhi" ++ [(set (match_operand:SI 3 "register_operand") ++ (match_operand:SI 1 "memory_operand")) ++ (set (match_operand:DI 0 "register_operand") ++ (zero_extract:DI (subreg:DI (match_dup 3) 0) ++ (const_int 16) ++ (match_operand:DI 2 "const_int_operand")))]) ++ ++;; Similar for unaligned loads, where we use the sequence from the ++;; Sw_64 Architecture manual. We have to distinguish between little-endian ++;; and big-endian systems as the sequences are different. ++;; ++;; Operand 1 is the address. Operands 2 and 3 are temporaries, where ++;; operand 3 can overlap the input and output registers. ++ ++(define_expand "unaligned_loadqi" ++ [(set (match_operand:DI 2 "register_operand") ++ (mem:DI (and:DI (match_operand:DI 1 "address_operand") ++ (const_int -8)))) ++ (set (match_operand:DI 3 "register_operand") ++ (match_dup 1)) ++ (set (match_operand:DI 0 "register_operand") ++ (zero_extract:DI (match_dup 2) ++ (const_int 8) ++ (ashift:DI (match_dup 3) (const_int 3))))]) ++ ++(define_expand "unaligned_loadhi" ++ [(set (match_operand:DI 2 "register_operand") ++ (mem:DI (and:DI (match_operand:DI 1 "address_operand") ++ (const_int -8)))) ++ (set (match_operand:DI 3 "register_operand") ++ (match_dup 1)) ++ (set (match_operand:DI 0 "register_operand") ++ (zero_extract:DI (match_dup 2) ++ (const_int 16) ++ (ashift:DI (match_dup 3) (const_int 3))))]) ++ ++;; Storing an aligned byte or word requires two temporaries. Operand 0 is the ++;; aligned SImode MEM. Operand 1 is the register containing the ++;; byte or word to store. Operand 2 is the number of bits within the word that ++;; the value should be placed. Operands 3 and 4 are SImode temporaries. ++ ++(define_expand "aligned_store" ++ [(set (match_operand:SI 3 "register_operand") ++ (match_operand:SI 0 "memory_operand")) ++ (set (subreg:DI (match_dup 3) 0) ++ (and:DI (subreg:DI (match_dup 3) 0) (match_dup 5))) ++ (set (subreg:DI (match_operand:SI 4 "register_operand") 0) ++ (ashift:DI (zero_extend:DI (match_operand 1 "register_operand")) ++ (match_operand:DI 2 "const_int_operand"))) ++ (set (subreg:DI (match_dup 4) 0) ++ (ior:DI (subreg:DI (match_dup 4) 0) (subreg:DI (match_dup 3) 0))) ++ (set (match_dup 0) (match_dup 4))] ++ "" ++{ ++ operands[5] = GEN_INT (~ (GET_MODE_MASK (GET_MODE (operands[1])) ++ << INTVAL (operands[2]))); ++}) ++ ++;; For the unaligned byte and halfword cases, we use code similar to that ++;; in the ;; Architecture book, but reordered to lower the number of registers ++;; required. Operand 0 is the address. Operand 1 is the data to store. ++;; Operands 2, 3, and 4 are DImode temporaries, where operands 2 and 4 may ++;; be the same temporary, if desired. If the address is in a register, ++;; operand 2 can be that register. ++ ++(define_expand "@unaligned_store" ++ [(set (match_operand:DI 3 "register_operand") ++ (mem:DI (and:DI (match_operand:DI 0 "address_operand") ++ (const_int -8)))) ++ (set (match_operand:DI 2 "register_operand") ++ (match_dup 0)) ++ (set (match_dup 3) ++ (and:DI (not:DI (ashift:DI (match_dup 5) ++ (ashift:DI (match_dup 2) (const_int 3)))) ++ (match_dup 3))) ++ (set (match_operand:DI 4 "register_operand") ++ (ashift:DI (zero_extend:DI ++ (match_operand:I12MODE 1 "register_operand")) ++ (ashift:DI (match_dup 2) (const_int 3)))) ++ (set (match_dup 4) (ior:DI (match_dup 4) (match_dup 3))) ++ (set (mem:DI (and:DI (match_dup 0) (const_int -8))) ++ (match_dup 4))] ++ "" ++ "operands[5] = GEN_INT (GET_MODE_MASK (mode));") ++ ++;; Here are the define_expand's for QI and HI moves that use the above ++;; patterns. We have the normal sets, plus the ones that need scratch ++;; registers for reload. ++ ++(define_expand "mov" ++ [(set (match_operand:I12MODE 0 "nonimmediate_operand") ++ (match_operand:I12MODE 1 "general_operand"))] ++ "" ++{ ++ if (sw_64_expand_mov (mode, operands)) ++ DONE; ++}) ++ ++(define_insn "*movqi" ++ [(set (match_operand:QI 0 "nonimmediate_operand" "=r,r,r,m") ++ (match_operand:QI 1 "input_operand" "rJ,n,m,rJ"))] ++ "register_operand (operands[0], QImode) ++ || reg_or_0_operand (operands[1], QImode)" ++ "@ ++ bis $31,%r1,%0 ++ ldi %0,%L1($31) ++ ldbu%U1 %0,%1 ++ stb%U0 %r1,%0" ++ [(set_attr "type" "ilog,iadd,ild,ist") ++ (set_attr "isa" "*,*,bwx,bwx")]) ++ ++(define_insn "*movhi" ++ [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,m") ++ (match_operand:HI 1 "input_operand" "rJ,n,m,rJ"))] ++ "register_operand (operands[0], HImode) ++ || reg_or_0_operand (operands[1], HImode)" ++ "@ ++ bis $31,%r1,%0 ++ ldi %0,%L1($31) ++ ldhu%U1 %0,%1 ++ sth%U0 %r1,%0" ++ [(set_attr "type" "ilog,iadd,ild,ist") ++ (set_attr "isa" "*,*,bwx,bwx")]) ++ ++;; Helpers for the above. The way reload is structured, we can't ++;; always get a proper address for a stack slot during reload_foo ++;; expansion, so we must delay our address manipulations until after. ++ ++(define_insn_and_split "@reload_in_aligned" ++ [(set (match_operand:I12MODE 0 "register_operand" "=r") ++ (match_operand:I12MODE 1 "memory_operand" "m"))] ++ "!TARGET_BWX && (reload_in_progress || reload_completed)" ++ "#" ++ "!TARGET_BWX && reload_completed" ++ [(const_int 0)] ++{ ++ rtx aligned_mem, bitnum; ++ get_aligned_mem (operands[1], &aligned_mem, &bitnum); ++ emit_insn (gen_aligned_load ++ (gen_lowpart (DImode, operands[0]), aligned_mem, bitnum, ++ gen_rtx_REG (SImode, REGNO (operands[0])))); ++ DONE; ++}) ++ ++ ++(define_mode_iterator VEC [V8QI V4HI V2SI]) ++(define_mode_iterator VEC12 [V8QI V4HI]) ++ ++(define_expand "mov" ++ [(set (match_operand:VEC 0 "nonimmediate_operand") ++ (match_operand:VEC 1 "general_operand"))] ++ "" ++{ ++ if (sw_64_expand_mov (mode, operands)) ++ DONE; ++}) ++ ++(define_split ++ [(set (match_operand:VEC 0 "register_operand") ++ (match_operand:VEC 1 "non_zero_const_operand"))] ++ "" ++ [(const_int 0)] ++{ ++ if (sw_64_split_const_mov (mode, operands)) ++ DONE; ++ else ++ FAIL; ++}) ++ ++(define_expand "movmisalign" ++ [(set (match_operand:VEC 0 "nonimmediate_operand") ++ (match_operand:VEC 1 "general_operand"))] ++ "flag_sw_unalign_byte != 1 || !TARGET_SW8A" ++{ ++ sw_64_expand_movmisalign (mode, operands); ++ DONE; ++}) ++ ++(define_insn "*mov_fix" ++ [(set (match_operand:VEC 0 "nonimmediate_operand" "=r,r,r,m,*f,*f,m,r,*f") ++ (match_operand:VEC 1 "input_operand" "rW,i,m,rW,*fW,m,*f,*f,r"))] ++ "register_operand (operands[0], mode) ++ || reg_or_0_operand (operands[1], mode)" ++ "@ ++ bis $31,%r1,%0 ++ # ++ ldl%A1%U1 %0,%1 ++ stl%A0%U0 %r1,%0 ++ fcpys %R1,%R1,%0 ++ fldd%U1 %0,%1 ++ fstd%U0 %R1,%0 ++ fimovd %1,%0 ++ ifmovd %1,%0" ++ [(set_attr "type" "ilog,multi,ild,ist,fcpys,fld,fst,ftoi,itof") ++ (set_attr "isa" "*,*,*,*,*,*,*,fix,fix")]) ++ ++(define_insn "3" ++ [(set (match_operand:VEC12 0 "register_operand" "=r") ++ (any_maxmin:VEC12 ++ (match_operand:VEC12 1 "reg_or_0_operand" "rW") ++ (match_operand:VEC12 2 "reg_or_0_operand" "rW")))] ++ "TARGET_MAX" ++ " %r1,%r2,%0" ++ [(set_attr "type" "mvi")]) ++ ++(define_insn "one_cmpl2" ++ [(set (match_operand:VEC 0 "register_operand" "=r") ++ (not:VEC (match_operand:VEC 1 "register_operand" "r")))] ++ "" ++ "ornot $31,%1,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "and3" ++ [(set (match_operand:VEC 0 "register_operand" "=r") ++ (and:VEC (match_operand:VEC 1 "register_operand" "r") ++ (match_operand:VEC 2 "register_operand" "r")))] ++ "" ++ "and %1,%2,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "*andnot3" ++ [(set (match_operand:VEC 0 "register_operand" "=r") ++ (and:VEC (not:VEC (match_operand:VEC 1 "register_operand" "r")) ++ (match_operand:VEC 2 "register_operand" "r")))] ++ "" ++ "bic %2,%1,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "ior3" ++ [(set (match_operand:VEC 0 "register_operand" "=r") ++ (ior:VEC (match_operand:VEC 1 "register_operand" "r") ++ (match_operand:VEC 2 "register_operand" "r")))] ++ "" ++ "bis %1,%2,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "*iornot3" ++ [(set (match_operand:VEC 0 "register_operand" "=r") ++ (ior:VEC (not:DI (match_operand:VEC 1 "register_operand" "r")) ++ (match_operand:VEC 2 "register_operand" "r")))] ++ "" ++ "ornot %2,%1,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "xor3" ++ [(set (match_operand:VEC 0 "register_operand" "=r") ++ (xor:VEC (match_operand:VEC 1 "register_operand" "r") ++ (match_operand:VEC 2 "register_operand" "r")))] ++ "" ++ "xor %1,%2,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "*xornot3" ++ [(set (match_operand:VEC 0 "register_operand" "=r") ++ (not:VEC (xor:VEC (match_operand:VEC 1 "register_operand" "r") ++ (match_operand:VEC 2 "register_operand" "r"))))] ++ "" ++ "eqv %1,%2,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_expand "vec_shl_" ++ [(set (match_operand:VEC 0 "register_operand") ++ (ashift:DI (match_operand:VEC 1 "register_operand") ++ (match_operand:DI 2 "reg_or_6bit_operand")))] ++ "" ++{ ++ operands[0] = gen_lowpart (DImode, operands[0]); ++ operands[1] = gen_lowpart (DImode, operands[1]); ++}) ++ ++(define_expand "vec_shr_" ++ [(set (match_operand:VEC 0 "register_operand") ++ (lshiftrt:DI (match_operand:VEC 1 "register_operand") ++ (match_operand:DI 2 "reg_or_6bit_operand")))] ++ "" ++{ ++ operands[0] = gen_lowpart (DImode, operands[0]); ++ operands[1] = gen_lowpart (DImode, operands[1]); ++}) ++ ++ ++(define_expand "extvmisaligndi" ++ [(set (match_operand:DI 0 "register_operand") ++ (sign_extract:DI (match_operand:BLK 1 "memory_operand") ++ (match_operand:DI 2 "const_int_operand") ++ (match_operand:DI 3 "const_int_operand")))] ++ "" ++{ ++ /* We can do 16, 32 and 64 bit fields, if aligned on byte boundaries. */ ++ if (INTVAL (operands[3]) % 8 != 0 ++ || (INTVAL (operands[2]) != 16 ++ && INTVAL (operands[2]) != 32 ++ && INTVAL (operands[2]) != 64)) ++ FAIL; ++ ++ sw_64_expand_unaligned_load (operands[0], operands[1], ++ INTVAL (operands[2]) / 8, ++ INTVAL (operands[3]) / 8, 1); ++ DONE; ++}) ++ ++(define_expand "extzvdi" ++ [(set (match_operand:DI 0 "register_operand") ++ (zero_extract:DI (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "const_int_operand") ++ (match_operand:DI 3 "const_int_operand")))] ++ "" ++{ ++ /* We can do 8, 16, 32 and 64 bit fields, if aligned on byte boundaries. */ ++ if (INTVAL (operands[3]) % 8 != 0 ++ || (INTVAL (operands[2]) != 8 ++ && INTVAL (operands[2]) != 16 ++ && INTVAL (operands[2]) != 32 ++ && INTVAL (operands[2]) != 64)) ++ FAIL; ++}) ++ ++(define_expand "extzvmisaligndi" ++ [(set (match_operand:DI 0 "register_operand") ++ (zero_extract:DI (match_operand:BLK 1 "memory_operand") ++ (match_operand:DI 2 "const_int_operand") ++ (match_operand:DI 3 "const_int_operand")))] ++ "" ++{ ++ /* We can do 16, 32 and 64 bit fields, if aligned on byte boundaries. ++ We fail 8-bit fields, falling back on a simple byte load. */ ++ if (INTVAL (operands[3]) % 8 != 0 ++ || (INTVAL (operands[2]) != 16 ++ && INTVAL (operands[2]) != 32 ++ && INTVAL (operands[2]) != 64)) ++ FAIL; ++ ++ sw_64_expand_unaligned_load (operands[0], operands[1], ++ INTVAL (operands[2]) / 8, ++ INTVAL (operands[3]) / 8, 0); ++ DONE; ++}) ++ ++(define_expand "insvmisaligndi" ++ [(set (zero_extract:DI (match_operand:BLK 0 "memory_operand") ++ (match_operand:DI 1 "const_int_operand") ++ (match_operand:DI 2 "const_int_operand")) ++ (match_operand:DI 3 "register_operand"))] ++ "" ++{ ++ /* We can do 16, 32 and 64 bit fields, if aligned on byte boundaries. */ ++ if (INTVAL (operands[2]) % 8 != 0 ++ || (INTVAL (operands[1]) != 16 ++ && INTVAL (operands[1]) != 32 ++ && INTVAL (operands[1]) != 64)) ++ FAIL; ++ ++ sw_64_expand_unaligned_store (operands[0], operands[3], ++ INTVAL (operands[1]) / 8, ++ INTVAL (operands[2]) / 8); ++ DONE; ++}) ++ ++;; Block move/clear, see sw_64.c for more details. ++;; Argument 0 is the destination ++;; Argument 1 is the source ++;; Argument 2 is the length ++;; Argument 3 is the alignment ++ ++(define_expand "cpymemqi" ++ [(parallel [(set (match_operand:BLK 0 "memory_operand") ++ (match_operand:BLK 1 "memory_operand")) ++ (use (match_operand:DI 2 "immediate_operand")) ++ (use (match_operand:DI 3 "immediate_operand"))])] ++ "flag_sw_unalign_byte != 1 || !TARGET_SW8A" ++{ ++ if (sw_64_expand_block_move (operands)) ++ DONE; ++ else ++ FAIL; ++}) ++ ++(define_expand "setmemqi" ++ [(parallel [(set (match_operand:BLK 0 "memory_operand") ++ (match_operand 2 "const_int_operand")) ++ (use (match_operand:DI 1 "immediate_operand")) ++ (use (match_operand:DI 3 "immediate_operand"))])] ++ "" ++{ ++ /* If value to set is not zero, use the library routine. */ ++ if (operands[2] != const0_rtx) ++ FAIL; ++ ++ if (sw_64_expand_block_clear (operands)) ++ DONE; ++ else ++ FAIL; ++}) ++ ++;; Subroutine of stack space allocation. Perform a stack probe. ++(define_expand "stack_probe_internal" ++ [(set (match_dup 1) (match_operand:DI 0 "const_int_operand"))] ++ "" ++{ ++ operands[1] = gen_rtx_MEM (DImode, plus_constant (Pmode, stack_pointer_rtx, ++ INTVAL (operands[0]))); ++ MEM_VOLATILE_P (operands[1]) = 1; ++ ++ operands[0] = const0_rtx; ++}) ++ ++;; This is how we allocate stack space. If we are allocating a ++;; constant amount of space and we know it is less than 4096 ++;; bytes, we need do nothing. ++;; ++;; If it is more than 4096 bytes, we need to probe the stack ++;; periodically. ++(define_expand "allocate_stack" ++ [(set (reg:DI 30) ++ (plus:DI (reg:DI 30) ++ (match_operand:DI 1 "reg_or_cint_operand"))) ++ (set (match_operand:DI 0 "register_operand" "=r") ++ (match_dup 2))] ++ "" ++{ ++ if (CONST_INT_P (operands[1]) ++ && INTVAL (operands[1]) < 32768) ++ { ++ if (INTVAL (operands[1]) >= 4096) ++ { ++ /* We do this the same way as in the prologue and generate explicit ++ probes. Then we update the stack by the constant. */ ++ ++ int probed = 4096; ++ ++ emit_insn (gen_stack_probe_internal (GEN_INT (- probed))); ++ while (probed + 8192 < INTVAL (operands[1])) ++ emit_insn (gen_stack_probe_internal ++ (GEN_INT (- (probed += 8192)))); ++ ++ if (probed + 4096 < INTVAL (operands[1])) ++ emit_insn (gen_stack_probe_internal ++ (GEN_INT (- INTVAL (operands[1])))); ++ } ++ ++ operands[1] = GEN_INT (- INTVAL (operands[1])); ++ operands[2] = virtual_stack_dynamic_rtx; ++ } ++ else ++ { ++ rtx_code_label *out_label = 0; ++ rtx_code_label *loop_label = gen_label_rtx (); ++ rtx want = gen_reg_rtx (Pmode); ++ rtx tmp = gen_reg_rtx (Pmode); ++ rtx memref, test; ++ ++ emit_insn (gen_subdi3 (want, stack_pointer_rtx, ++ force_reg (Pmode, operands[1]))); ++ ++ if (!CONST_INT_P (operands[1])) ++ { ++ rtx limit = GEN_INT (4096); ++ out_label = gen_label_rtx (); ++ test = gen_rtx_LTU (VOIDmode, operands[1], limit); ++ emit_jump_insn ++ (gen_cbranchdi4 (test, operands[1], limit, out_label)); ++ } ++ ++ emit_insn (gen_adddi3 (tmp, stack_pointer_rtx, GEN_INT (-4096))); ++ emit_label (loop_label); ++ memref = gen_rtx_MEM (DImode, tmp); ++ MEM_VOLATILE_P (memref) = 1; ++ emit_move_insn (memref, const0_rtx); ++ emit_insn (gen_adddi3 (tmp, tmp, GEN_INT (-8192))); ++ test = gen_rtx_GTU (VOIDmode, tmp, want); ++ emit_jump_insn (gen_cbranchdi4 (test, tmp, want, loop_label)); ++ ++ memref = gen_rtx_MEM (DImode, want); ++ MEM_VOLATILE_P (memref) = 1; ++ emit_move_insn (memref, const0_rtx); ++ ++ if (out_label) ++ emit_label (out_label); ++ ++ emit_move_insn (stack_pointer_rtx, want); ++ emit_move_insn (operands[0], virtual_stack_dynamic_rtx); ++ DONE; ++ } ++}) ++ ++;; This is used by sw_64_expand_prolog to do the same thing as above, ++;; except we cannot at that time generate new basic blocks, so we hide ++;; the loop in this one insn. ++ ++(define_insn "prologue_stack_probe_loop" ++ [(unspec_volatile [(match_operand:DI 0 "register_operand" "r") ++ (match_operand:DI 1 "register_operand" "r")] ++ UNSPECV_PSPL)] ++ "" ++{ ++ operands[2] = gen_label_rtx (); ++ (*targetm.asm_out.internal_label) (asm_out_file, "L", ++ CODE_LABEL_NUMBER (operands[2])); ++ ++ return "stl $31,-8192(%1)\;subl %0,1,%0\;ldi %1,-8192(%1)\;bne %0,%l2"; ++} ++ [(set_attr "length" "16") ++ (set_attr "type" "multi")]) ++ ++(define_expand "prologue" ++ [(const_int 0)] ++ "" ++{ ++ sw_64_expand_prologue (); ++ DONE; ++}) ++ ++;; These take care of emitting the ldgp insn in the prologue. This will be ++;; an ldi/ldih pair and we want to align them properly. So we have two ++;; unspec_volatile insns, the first of which emits the ldgp assembler macro ++;; and the second of which emits nothing. However, both are marked as type ++;; IADD (the default) so the alignment code in sw_64.c does the right thing ++;; with them. ++ ++(define_expand "prologue_ldgp" ++ [(set (match_dup 0) ++ (unspec_volatile:DI [(match_dup 1) (match_dup 2)] UNSPECV_LDGP1)) ++ (set (match_dup 0) ++ (unspec_volatile:DI [(match_dup 0) (match_dup 2)] UNSPECV_PLDGP2))] ++ "" ++{ ++ operands[0] = pic_offset_table_rtx; ++ operands[1] = gen_rtx_REG (Pmode, 27); ++ operands[2] = (TARGET_EXPLICIT_RELOCS ++ ? GEN_INT (sw_64_next_sequence_number++) ++ : const0_rtx); ++}) ++ ++(define_insn "*ldgp_er_1" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "const_int_operand")] ++ UNSPECV_LDGP1))] ++ "TARGET_EXPLICIT_RELOCS" ++ "ldih %0,0(%1)\t\t!gpdisp!%2" ++ [(set_attr "cannot_copy" "true")]) ++ ++(define_insn "*ldgp_er_2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "const_int_operand")] ++ UNSPEC_LDGP2))] ++ "TARGET_EXPLICIT_RELOCS" ++ "ldi %0,0(%1)\t\t!gpdisp!%2" ++ [(set_attr "cannot_copy" "true")]) ++ ++(define_insn "*exc_ldgp_er_2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "const_int_operand")] ++ UNSPECV_LDGP2))] ++ "TARGET_EXPLICIT_RELOCS" ++ "ldi %0,0(%1)\t\t!gpdisp!%2" ++ [(set_attr "cannot_copy" "true")]) ++ ++(define_insn "*prologue_ldgp_er_2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "const_int_operand")] ++ UNSPECV_PLDGP2))] ++ "TARGET_EXPLICIT_RELOCS" ++{ ++ return "ldi %0,0(%1)\t\t!gpdisp!%2\n$%~..ng:"; ++} ++ [(set_attr "cannot_copy" "true")]) ++ ++(define_insn "*prologue_ldgp_1" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "const_int_operand")] ++ UNSPECV_LDGP1))] ++ "" ++{ ++ return "ldgp %0,0(%1)\n$%~..ng:"; ++} ++ [(set_attr "cannot_copy" "true")]) ++ ++(define_insn "*prologue_ldgp_2" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec_volatile:DI [(match_operand:DI 1 "register_operand" "r") ++ (match_operand 2 "const_int_operand")] ++ UNSPECV_PLDGP2))] ++ "" ++) ++ ++(define_insn "hardware_prefetch_use_syscall" ++[(unspec_volatile [ ++(match_operand:DI 0 "register_operand" "=r") ++(match_operand:DI 1 "register_operand" "=r") ++] UNSPECV_HARDWARE_PREFETCH_CNT)] ++"" ++{ ++ return "ldi $16,109($31)\;ldi $18,1($31)\;ldi $19,120($30)\;\ ++stl %0,120($30)\;\ ++ldl $27,syscall($29)\t\t!literal!%#\;call $26,($27),syscall\t\t!lituse_jsr!%#\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%*\;" ++ ; ++} ++[(set_attr "type" "multi") ++ (set_attr "length" "8")]) ++;; The _mcount profiling hook has special calling conventions, and ++;; does not clobber all the registers that a normal call would. So ++;; hide the fact this is a call at all. ++ ++(define_insn "prologue_mcount" ++ [(unspec_volatile [(const_int 0)] UNSPECV_MCOUNT)] ++ "" ++{ ++ if (TARGET_EXPLICIT_RELOCS) ++ /* Note that we cannot use a lituse_jsr reloc, since _mcount ++ cannot be called via the PLT. */ ++ return "ldl $28,_mcount($29)\t\t!literal\;call $28,($28),_mcount"; ++ else ++ return "ldi $28,_mcount\;call $28,($28),_mcount"; ++} ++ [(set_attr "type" "multi") ++ (set_attr "length" "8")]) ++ ++(define_insn "init_fp" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (match_operand:DI 1 "register_operand" "r")) ++ (clobber (mem:BLK (match_operand:DI 2 "register_operand" "=r")))] ++ "" ++ "bis $31,%1,%0") ++ ++(define_expand "epilogue" ++ [(return)] ++ "" ++ "sw_64_expand_epilogue ();") ++ ++(define_expand "sibcall_epilogue" ++ [(return)] ++ "" ++{ ++ sw_64_expand_epilogue (); ++ DONE; ++}) ++ ++(define_expand "builtin_longjmp" ++ [(use (match_operand:DI 0 "register_operand" "r"))] ++ "" ++{ ++ /* The elements of the buffer are, in order: */ ++ rtx fp = gen_rtx_MEM (Pmode, operands[0]); ++ rtx lab = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 8)); ++ rtx stack = gen_rtx_MEM (Pmode, plus_constant (Pmode, operands[0], 16)); ++ rtx pv = gen_rtx_REG (Pmode, 27); ++ ++ /* This bit is the same as expand_builtin_longjmp. */ ++ emit_move_insn (pv, lab); ++ emit_stack_restore (SAVE_NONLOCAL, stack); ++ emit_use (hard_frame_pointer_rtx); ++ emit_use (stack_pointer_rtx); ++ ++ emit_move_insn (hard_frame_pointer_rtx, fp); ++ /* Load the label we are jumping through into $27 so that we know ++ where to look for it when we get back to setjmp's function for ++ restoring the gp. */ ++ emit_jump_insn (gen_builtin_longjmp_internal (pv)); ++ emit_barrier (); ++ DONE; ++}) ++ ++;; This is effectively a copy of indirect_jump, but constrained such ++;; that register renaming cannot foil our cunning plan with $27. ++(define_insn "builtin_longjmp_internal" ++ [(set (pc) ++ (unspec_volatile [(match_operand:DI 0 "register_operand" "c")] ++ UNSPECV_LONGJMP))] ++ "" ++ "jmp $31,(%0),0" ++ [(set_attr "type" "ibr")]) ++ ++(define_expand "builtin_setjmp_receiver" ++ [(unspec_volatile [(label_ref (match_operand 0))] UNSPECV_SETJMPR)] ++ "") ++ ++(define_insn_and_split "*builtin_setjmp_receiver_1" ++ [(unspec_volatile [(match_operand 0)] UNSPECV_SETJMPR)] ++ "" ++{ ++ if (TARGET_EXPLICIT_RELOCS) ++ return "#"; ++ else ++ return "br $27,$LSJ%=\n$LSJ%=:\;ldgp $29,0($27)"; ++} ++ "&& TARGET_EXPLICIT_RELOCS && reload_completed" ++ [(set (match_dup 1) ++ (unspec_volatile:DI [(match_dup 2) (match_dup 3)] UNSPECV_LDGP1)) ++ (set (match_dup 1) ++ (unspec:DI [(match_dup 1) (match_dup 3)] UNSPEC_LDGP2))] ++{ ++ if (prev_nonnote_insn (curr_insn) != XEXP (operands[0], 0)) ++ emit_insn (gen_rtx_UNSPEC_VOLATILE (VOIDmode, gen_rtvec (1, operands[0]), ++ UNSPECV_SETJMPR_ER)); ++ operands[1] = pic_offset_table_rtx; ++ operands[2] = gen_rtx_REG (Pmode, 27); ++ operands[3] = GEN_INT (sw_64_next_sequence_number++); ++} ++ [(set_attr "length" "12") ++ (set_attr "type" "multi")]) ++ ++(define_insn "*builtin_setjmp_receiver_er_sl_1" ++ [(unspec_volatile [(match_operand 0)] UNSPECV_SETJMPR_ER)] ++ "TARGET_EXPLICIT_RELOCS" ++ "ldi $27,$LSJ%=-%l0($27)\n$LSJ%=:") ++ ++;; When flag_reorder_blocks_and_partition is in effect, compiler puts ++;; exception landing pads in a cold section. To prevent inter-section offset ++;; calculation, a jump to original landing pad is emitted in the place of the ++;; original landing pad. Since landing pad is moved, RA-relative GP ++;; calculation in the prologue of landing pad breaks. To solve this problem, ++;; we use alternative GP load approach. ++ ++(define_expand "exception_receiver" ++ [(unspec_volatile [(match_dup 0)] UNSPECV_EHR)] ++ "" ++{ ++ if (flag_reorder_blocks_and_partition) ++ operands[0] = copy_rtx (sw_64_gp_save_rtx ()); ++ else ++ operands[0] = const0_rtx; ++}) ++ ++(define_insn "*exception_receiver_2" ++ [(unspec_volatile [(match_operand:DI 0 "memory_operand" "m")] UNSPECV_EHR)] ++ "flag_reorder_blocks_and_partition" ++ "ldl $29,%0" ++ [(set_attr "type" "ild")]) ++ ++(define_insn_and_split "*exception_receiver_1" ++ [(unspec_volatile [(const_int 0)] UNSPECV_EHR)] ++ "" ++{ ++ if (TARGET_EXPLICIT_RELOCS) ++ return "#"; ++ else ++ return "ldgp $29,0($26)"; ++} ++ "&& TARGET_EXPLICIT_RELOCS && reload_completed" ++ [(set (match_dup 0) ++ (unspec_volatile:DI [(match_dup 1) (match_dup 2)] UNSPECV_LDGP1)) ++ (set (match_dup 0) ++ (unspec:DI [(match_dup 0) (match_dup 2)] UNSPEC_LDGP2))] ++{ ++ operands[0] = pic_offset_table_rtx; ++ operands[1] = gen_rtx_REG (Pmode, 26); ++ operands[2] = GEN_INT (sw_64_next_sequence_number++); ++} ++ [(set_attr "length" "8") ++ (set_attr "type" "multi")]) ++ ++;; Prefetch data. ++;; On SW6, these become official prefetch instructions. ++ ++(define_insn "prefetch" ++ [(prefetch (match_operand:DI 0 "address_operand" "p") ++ (match_operand:DI 1 "const_int_operand" "n") ++ (match_operand:DI 2 "const_int_operand" "n"))] ++ "sw_64_cpu == PROCESSOR_SW6 || sw_64_cpu == PROCESSOR_SW8" ++{ ++ /* Interpret "no temporal locality" as this data should be evicted once ++ it is used. The "evict next" alternatives load the data into the cache ++ and leave the LRU eviction counter pointing to that block. */ ++ static const char * alt[2][2] ; ++ if (flag_sw_prefetch_l1) ++ { ++ alt[0][0] = "fillcs_e %a0" ; /* read, evict next */ ++ alt[0][1] = "fillcs %a0" ; /* read, evict next */ ++ alt[1][0] = "fillde_e %a0" ; /* write, evict next */ ++ alt[1][1] = "fillde %a0" ; /* write, evict next */ ++ ++ } ++ else ++ { ++ alt[0][0] = "s_fillde %a0" ; /* read, evict next */ ++ alt[0][1] = "s_fillcs %a0" ; /* read, evict next */ ++ alt[1][0] = "fillde_e %a0" ; /* write, evict next */ ++ alt[1][1] = "fillde %a0" ; /* write, evict next */ ++ } ++ ++ bool write = INTVAL (operands[1]) != 0; ++ bool lru = INTVAL (operands[2]) != 0; ++ ++ return alt[write][lru]; ++} ++ [(set_attr "type" "ild")]) ++ ++(define_expand "prefetch_sc" ++ [(prefetch (match_operand:DI 0 "address_operand") ++ (match_operand:DI 1 "const_int_operand") ++ (unspec: DI [(match_operand:DI 2 "const_int_operand")] UNSPEC_PFSC))] ++ "sw_64_cpu == PROCESSOR_SW6 || sw_64_cpu == PROCESSOR_SW8") ++ ++(define_insn "prefetch_sc_internal" ++ [(prefetch (match_operand:DI 0 "address_operand" "p") ++ (match_operand:DI 1 "const_int_operand" "n") ++ (unspec: DI [(match_operand:DI 2 "const_int_operand" "n")] UNSPEC_PFSC))] ++ "sw_64_cpu == PROCESSOR_SW6 || sw_64_cpu == PROCESSOR_SW8" ++{ ++ static const char * alt[2]; ++ ++ alt[0] = "s_fillcs %a0"; /* L2 read */ ++ alt[1] = "s_fillde %a0"; /* L2 write */ ++ ++ bool write = INTVAL (operands[1]) != 0; ++ ++ return alt[write]; ++} ++ [(set_attr "type" "ild")]) ++ ++(define_expand "prefetch_tc" ++ [(prefetch (match_operand:DI 0 "address_operand") ++ (match_operand:DI 1 "const_int_operand") ++ (unspec: DI [(match_operand:DI 2 "const_int_operand")] UNSPEC_PFTC))] ++ "sw_64_cpu == PROCESSOR_SW6 || sw_64_cpu == PROCESSOR_SW8") ++ ++(define_insn "prefetch_tc_internal" ++ [(prefetch (match_operand:DI 0 "address_operand" "p") ++ (match_operand:DI 1 "const_int_operand" "n") ++ (unspec: DI [(match_operand:DI 2 "const_int_operand" "n")] UNSPEC_PFTC))] ++ "sw_64_cpu == PROCESSOR_SW6 || sw_64_cpu == PROCESSOR_SW8" ++{ ++ static const char * alt[2]; ++ ++ alt[0] = "e_fillcs %a0"; /* L3 read */ ++ alt[1] = "e_fillde %a0"; /* L3 write */ ++ ++ bool write = INTVAL (operands[1]) != 0; ++ ++ return alt[write]; ++} ++ [(set_attr "type" "ild")]) ++ ++(define_insn "trapb" ++ [(unspec_volatile [(const_int 0)] UNSPECV_TRAPB)] ++ "" ++ "memb" ++ [(set_attr "type" "misc")]) ++ ++(define_insn "nop" ++ [(const_int 0)] ++ "" ++ "nop" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "fnop" ++ [(const_int 1)] ++ "TARGET_FP" ++ "fcpys $f31,$f31,$f31" ++ [(set_attr "type" "fcpys")]) ++ ++(define_insn "unop" ++ [(const_int 2)] ++ "" ++{ ++ return "ldl_u $31,0($30)"; ++} ++) ++ ++(define_insn "realign" ++ [(unspec_volatile [(match_operand 0 "immediate_operand" "i")] ++ UNSPECV_REALIGN)] ++ "" ++ ".align %0 #realign") ++ ++(define_insn "builtin_cmpbge" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "reg_or_0_operand" "rJ") ++ (match_operand:DI 2 "reg_or_8bit_operand" "rI")] ++ UNSPEC_CMPBGE))] ++ "" ++ "cmpgeb %r1,%2,%0" ++ [(set_attr "type" "icmp")]) ++ ++(define_expand "extbl" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ emit_insn (gen_extxl (operands[0], operands[1], GEN_INT (8), operands[2])); ++ DONE; ++}) ++ ++(define_expand "extwl" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ emit_insn (gen_extxl (operands[0], operands[1], GEN_INT (16), operands[2])); ++ DONE; ++}) ++ ++(define_expand "extll" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ emit_insn (gen_extxl (operands[0], operands[1], GEN_INT (32), operands[2])); ++ DONE; ++}) ++ ++(define_expand "extql" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ emit_insn (gen_extxl (operands[0], operands[1], GEN_INT (64), operands[2])); ++ DONE; ++}) ++ ++(define_expand "builtin_insbl" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ operands[1] = gen_lowpart (QImode, operands[1]); ++ emit_insn (gen_insbl (operands[0], operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "builtin_inswl" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ operands[1] = gen_lowpart (HImode, operands[1]); ++ emit_insn (gen_inswl (operands[0], operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "builtin_insll" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ operands[1] = gen_lowpart (SImode, operands[1]); ++ emit_insn (gen_insll (operands[0], operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "inswh" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ emit_insn (gen_insxh (operands[0], operands[1], GEN_INT (16), operands[2])); ++ DONE; ++}) ++ ++(define_expand "inslh" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ emit_insn (gen_insxh (operands[0], operands[1], GEN_INT (32), operands[2])); ++ DONE; ++}) ++ ++(define_expand "insqh" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ emit_insn (gen_insxh (operands[0], operands[1], GEN_INT (64), operands[2])); ++ DONE; ++}) ++ ++(define_expand "mskbl" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ rtx mask = GEN_INT (0xff); ++ emit_insn (gen_mskxl (operands[0], operands[1], mask, operands[2])); ++ DONE; ++}) ++ ++(define_expand "mskwl" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ rtx mask = GEN_INT (0xffff); ++ emit_insn (gen_mskxl (operands[0], operands[1], mask, operands[2])); ++ DONE; ++}) ++ ++(define_expand "mskll" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ rtx mask = gen_int_mode (0xffffffff, DImode); ++ emit_insn (gen_mskxl (operands[0], operands[1], mask, operands[2])); ++ DONE; ++}) ++ ++(define_expand "mskql" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ rtx mask = constm1_rtx; ++ emit_insn (gen_mskxl (operands[0], operands[1], mask, operands[2])); ++ DONE; ++}) ++ ++(define_expand "mskwh" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ emit_insn (gen_mskxh (operands[0], operands[1], GEN_INT (16), operands[2])); ++ DONE; ++}) ++ ++(define_expand "msklh" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ emit_insn (gen_mskxh (operands[0], operands[1], GEN_INT (32), operands[2])); ++ DONE; ++}) ++ ++(define_expand "mskqh" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "register_operand") ++ (match_operand:DI 2 "reg_or_8bit_operand")] ++ "" ++{ ++ emit_insn (gen_mskxh (operands[0], operands[1], GEN_INT (64), operands[2])); ++ DONE; ++}) ++ ++(define_expand "builtin_zap" ++ [(set (match_operand:DI 0 "register_operand") ++ (and:DI (unspec:DI ++ [(match_operand:DI 2 "reg_or_cint_operand")] ++ UNSPEC_ZAP) ++ (match_operand:DI 1 "reg_or_cint_operand")))] ++ "" ++{ ++ if (CONST_INT_P (operands[2])) ++ { ++ rtx mask = sw_64_expand_zap_mask (INTVAL (operands[2])); ++ ++ if (mask == const0_rtx) ++ { ++ emit_move_insn (operands[0], const0_rtx); ++ DONE; ++ } ++ if (mask == constm1_rtx) ++ { ++ emit_move_insn (operands[0], operands[1]); ++ DONE; ++ } ++ ++ operands[1] = force_reg (DImode, operands[1]); ++ emit_insn (gen_anddi3 (operands[0], operands[1], mask)); ++ DONE; ++ } ++ ++ operands[1] = force_reg (DImode, operands[1]); ++ operands[2] = gen_lowpart (QImode, operands[2]); ++}) ++ ++(define_insn "*builtin_zap_1" ++ [(set (match_operand:DI 0 "register_operand" "=r,r,r,r") ++ (and:DI (unspec:DI ++ [(match_operand:QI 2 "reg_or_cint_operand" "n,n,r,r")] ++ UNSPEC_ZAP) ++ (match_operand:DI 1 "reg_or_cint_operand" "n,r,J,r")))] ++ "" ++ "@ ++ # ++ # ++ bis $31,$31,%0 ++ zap %r1,%2,%0" ++ [(set_attr "type" "shift,shift,ilog,shift")]) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (and:DI (unspec:DI ++ [(match_operand:QI 2 "const_int_operand")] ++ UNSPEC_ZAP) ++ (match_operand:DI 1 "const_int_operand")))] ++ "" ++ [(const_int 0)] ++{ ++ rtx mask = sw_64_expand_zap_mask (INTVAL (operands[2])); ++ ++ operands[1] = gen_int_mode (INTVAL (operands[1]) & INTVAL (mask), DImode); ++ emit_move_insn (operands[0], operands[1]); ++ DONE; ++}) ++ ++(define_split ++ [(set (match_operand:DI 0 "register_operand") ++ (and:DI (unspec:DI ++ [(match_operand:QI 2 "const_int_operand")] ++ UNSPEC_ZAP) ++ (match_operand:DI 1 "register_operand")))] ++ "" ++ [(set (match_dup 0) ++ (and:DI (match_dup 1) (match_dup 2)))] ++{ ++ operands[2] = sw_64_expand_zap_mask (INTVAL (operands[2])); ++ if (operands[2] == const0_rtx) ++ { ++ emit_move_insn (operands[0], const0_rtx); ++ DONE; ++ } ++ if (operands[2] == constm1_rtx) ++ { ++ emit_move_insn (operands[0], operands[1]); ++ DONE; ++ } ++}) ++ ++(define_expand "builtin_zapnot" ++ [(set (match_operand:DI 0 "register_operand") ++ (and:DI (unspec:DI ++ [(not:QI (match_operand:DI 2 "reg_or_cint_operand"))] ++ UNSPEC_ZAP) ++ (match_operand:DI 1 "reg_or_cint_operand")))] ++ "" ++{ ++ if (CONST_INT_P (operands[2])) ++ { ++ rtx mask = sw_64_expand_zap_mask (~ INTVAL (operands[2])); ++ ++ if (mask == const0_rtx) ++ { ++ emit_move_insn (operands[0], const0_rtx); ++ DONE; ++ } ++ if (mask == constm1_rtx) ++ { ++ emit_move_insn (operands[0], operands[1]); ++ DONE; ++ } ++ ++ operands[1] = force_reg (DImode, operands[1]); ++ emit_insn (gen_anddi3 (operands[0], operands[1], mask)); ++ DONE; ++ } ++ ++ operands[1] = force_reg (DImode, operands[1]); ++ operands[2] = gen_lowpart (QImode, operands[2]); ++}) ++ ++(define_insn "*builtin_zapnot_1" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (and:DI (unspec:DI ++ [(not:QI (match_operand:QI 2 "register_operand" "r"))] ++ UNSPEC_ZAP) ++ (match_operand:DI 1 "reg_or_0_operand" "rJ")))] ++ "" ++ "zapnot %r1,%2,%0" ++ [(set_attr "type" "shift")]) ++ ++(define_insn "builtin_amask" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "reg_or_8bit_operand" "rI")] ++ UNSPEC_AMASK))] ++ "" ++ "amask %1,%0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "builtin_implver" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(const_int 0)] UNSPEC_IMPLVER))] ++ "" ++ "implver %0" ++ [(set_attr "type" "ilog")]) ++ ++(define_insn "builtin_rpcc" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec_volatile:DI [(const_int 0)] UNSPECV_RPCC))] ++ "" ++ "rtc %0" ++ [(set_attr "type" "ilog")]) ++ ++(define_expand "builtin_minub8" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_0_operand")] ++ "TARGET_MAX" ++{ ++ sw_64_expand_builtin_vector_binop (gen_uminv8qi3, V8QImode, operands[0], ++ operands[1], operands[2]); ++ DONE; ++}) ++ ++(define_expand "builtin_minsb8" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_0_operand")] ++ "TARGET_MAX" ++{ ++ sw_64_expand_builtin_vector_binop (gen_sminv8qi3, V8QImode, operands[0], ++ operands[1], operands[2]); ++ DONE; ++}) ++ ++(define_expand "builtin_minuw4" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_0_operand")] ++ "TARGET_MAX" ++{ ++ sw_64_expand_builtin_vector_binop (gen_uminv4hi3, V4HImode, operands[0], ++ operands[1], operands[2]); ++ DONE; ++}) ++ ++(define_expand "builtin_minsw4" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_0_operand")] ++ "TARGET_MAX" ++{ ++ sw_64_expand_builtin_vector_binop (gen_sminv4hi3, V4HImode, operands[0], ++ operands[1], operands[2]); ++ DONE; ++}) ++ ++(define_expand "builtin_maxub8" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_0_operand")] ++ "TARGET_MAX" ++{ ++ sw_64_expand_builtin_vector_binop (gen_umaxv8qi3, V8QImode, operands[0], ++ operands[1], operands[2]); ++ DONE; ++}) ++ ++(define_expand "builtin_maxsb8" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_0_operand")] ++ "TARGET_MAX" ++{ ++ sw_64_expand_builtin_vector_binop (gen_smaxv8qi3, V8QImode, operands[0], ++ operands[1], operands[2]); ++ DONE; ++}) ++ ++(define_expand "builtin_maxuw4" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_0_operand")] ++ "TARGET_MAX" ++{ ++ sw_64_expand_builtin_vector_binop (gen_umaxv4hi3, V4HImode, operands[0], ++ operands[1], operands[2]); ++ DONE; ++}) ++ ++(define_expand "builtin_maxsw4" ++ [(match_operand:DI 0 "register_operand") ++ (match_operand:DI 1 "reg_or_0_operand") ++ (match_operand:DI 2 "reg_or_0_operand")] ++ "TARGET_MAX" ++{ ++ sw_64_expand_builtin_vector_binop (gen_smaxv4hi3, V4HImode, operands[0], ++ operands[1], operands[2]); ++ DONE; ++}) ++ ++(define_insn "builtin_perr" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec:DI [(match_operand:DI 1 "reg_or_0_operand" "%rJ") ++ (match_operand:DI 2 "reg_or_8bit_operand" "rJ")] ++ UNSPEC_PERR))] ++ "TARGET_MAX" ++ "perr %r1,%r2,%0" ++ [(set_attr "type" "mvi")]) ++ ++(define_expand "builtin_pklb" ++ [(set (match_operand:DI 0 "register_operand") ++ (vec_concat:V8QI ++ (vec_concat:V4QI ++ (truncate:V2QI (match_operand:DI 1 "register_operand")) ++ (match_dup 2)) ++ (match_dup 3)))] ++ "TARGET_MAX" ++{ ++ operands[0] = gen_lowpart (V8QImode, operands[0]); ++ operands[1] = gen_lowpart (V2SImode, operands[1]); ++ operands[2] = CONST0_RTX (V2QImode); ++ operands[3] = CONST0_RTX (V4QImode); ++}) ++ ++(define_insn "*pklb" ++ [(set (match_operand:V8QI 0 "register_operand" "=r") ++ (vec_concat:V8QI ++ (vec_concat:V4QI ++ (truncate:V2QI (match_operand:V2SI 1 "register_operand" "r")) ++ (match_operand:V2QI 2 "const0_operand")) ++ (match_operand:V4QI 3 "const0_operand")))] ++ "TARGET_MAX" ++ "pklb %r1,%0" ++ [(set_attr "type" "mvi")]) ++ ++(define_expand "builtin_pkwb" ++ [(set (match_operand:DI 0 "register_operand") ++ (vec_concat:V8QI ++ (truncate:V4QI (match_operand:DI 1 "register_operand")) ++ (match_dup 2)))] ++ "TARGET_MAX" ++{ ++ operands[0] = gen_lowpart (V8QImode, operands[0]); ++ operands[1] = gen_lowpart (V4HImode, operands[1]); ++ operands[2] = CONST0_RTX (V4QImode); ++}) ++ ++(define_insn "*pkwb" ++ [(set (match_operand:V8QI 0 "register_operand" "=r") ++ (vec_concat:V8QI ++ (truncate:V4QI (match_operand:V4HI 1 "register_operand" "r")) ++ (match_operand:V4QI 2 "const0_operand")))] ++ "TARGET_MAX" ++ "pkwb %r1,%0" ++ [(set_attr "type" "mvi")]) ++ ++(define_expand "builtin_unpkbl" ++ [(set (match_operand:DI 0 "register_operand") ++ (zero_extend:V2SI ++ (vec_select:V2QI (match_operand:DI 1 "register_operand") ++ (parallel [(const_int 0) (const_int 1)]))))] ++ "TARGET_MAX" ++{ ++ operands[0] = gen_lowpart (V2SImode, operands[0]); ++ operands[1] = gen_lowpart (V8QImode, operands[1]); ++}) ++ ++(define_insn "*unpkbl" ++ [(set (match_operand:V2SI 0 "register_operand" "=r") ++ (zero_extend:V2SI ++ (vec_select:V2QI (match_operand:V8QI 1 "reg_or_0_operand" "rW") ++ (parallel [(const_int 0) (const_int 1)]))))] ++ "TARGET_MAX" ++ "unpkbl %r1,%0" ++ [(set_attr "type" "mvi")]) ++ ++(define_expand "builtin_unpkbw" ++ [(set (match_operand:DI 0 "register_operand") ++ (zero_extend:V4HI ++ (vec_select:V4QI (match_operand:DI 1 "register_operand") ++ (parallel [(const_int 0) ++ (const_int 1) ++ (const_int 2) ++ (const_int 3)]))))] ++ "TARGET_MAX" ++{ ++ operands[0] = gen_lowpart (V4HImode, operands[0]); ++ operands[1] = gen_lowpart (V8QImode, operands[1]); ++}) ++ ++(define_insn "*unpkbw" ++ [(set (match_operand:V4HI 0 "register_operand" "=r") ++ (zero_extend:V4HI ++ (vec_select:V4QI (match_operand:V8QI 1 "reg_or_0_operand" "rW") ++ (parallel [(const_int 0) ++ (const_int 1) ++ (const_int 2) ++ (const_int 3)]))))] ++ "TARGET_MAX" ++ "unpkbw %r1,%0" ++ [(set_attr "type" "mvi")]) ++ ++(include "sync.md") ++ ++;; The call patterns are at the end of the file because their ++;; wildcard operand0 interferes with nice recognition. ++ ++(define_insn "*call_value_osf_1_er_noreturn" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS ++ && find_reg_note (insn, REG_NORETURN, NULL_RTX)" ++ "@ ++ call $26,($27),0 ++ bsr $26,%1\t\t!samegp ++ ldl $27,%1($29)\t\t!literal!%#\;call $26,($27),%1\t\t!lituse_jsr!%#" ++ [(set_attr "type" "call") ++ (set_attr "length" "*,*,8")]) ++ ++(define_insn "*call_value_osf_1_er_setfpec0" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS && flag_fpcr_set == 3 " ++ "@ ++ call $26,(%1),0\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%* ++ bsr $26,%1\t\t!samegp ++ ldl $27,%1($29)\t\t!literal!%#\;call $26,($27),0\t\t!lituse_jsr!%#\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%*" ++ [(set_attr "type" "call") ++ (set_attr "length" "12,*,16")]) ++ ++(define_insn "*call_value_osf_1_er_setfpec1" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS && flag_fpcr_set == 1" ++ "@ ++ call $26,(%1),0\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%* ++ bsr $26,%1\t\t!samegp ++ ldl $27,%1($29)\t\t!literal!%#\;call $26,($27),0\t\t!lituse_jsr!%#\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%*" ++ [(set_attr "type" "call") ++ (set_attr "length" "12,*,16")]) ++ ++(define_insn "*call_value_osf_1_er" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS" ++ "@ ++ call $26,(%1),0\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%* ++ bsr $26,%1\t\t!samegp ++ ldl $27,%1($29)\t\t!literal!%#\;call $26,($27),0\t\t!lituse_jsr!%#\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%*" ++ [(set_attr "type" "call") ++ (set_attr "length" "12,*,16")]) ++ ++;; We must use peep2 instead of a split because we need accurate life ++;; information for $gp. Consider the case of { bar(); while (1); }. ++(define_peephole2 ++ [(parallel [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "call_operand")) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))])] ++ "TARGET_EXPLICIT_RELOCS && reload_completed ++ && ! samegp_function_operand (operands[1], Pmode) ++ && (peep2_regno_dead_p (1, 29) ++ || find_reg_note (insn, REG_NORETURN, NULL_RTX))" ++ [(parallel [(set (match_dup 0) ++ (call (mem:DI (match_dup 3)) ++ (match_dup 2))) ++ (use (reg:DI 29)) ++ (use (match_dup 1)) ++ (use (match_dup 4)) ++ (clobber (reg:DI 26))])] ++{ ++ if (CONSTANT_P (operands[1])) ++ { ++ operands[3] = gen_rtx_REG (Pmode, 27); ++ operands[4] = GEN_INT (sw_64_next_sequence_number++); ++ emit_insn (gen_movdi_er_high_g (operands[3], pic_offset_table_rtx, ++ operands[1], operands[4])); ++ } ++ else ++ { ++ operands[3] = operands[1]; ++ operands[1] = const0_rtx; ++ operands[4] = const0_rtx; ++ } ++}) ++(define_peephole2 ++ [(parallel [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "call_operand")) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))])] ++ "TARGET_EXPLICIT_RELOCS && reload_completed ++ && ! samegp_function_operand (operands[1], Pmode) ++ && ! (peep2_regno_dead_p (1, 29) ++ || find_reg_note (insn, REG_NORETURN, NULL_RTX)) ++ && !enable_asan_check_stack ()" ++ [(parallel [(set (match_dup 0) ++ (call (mem:DI (match_dup 3)) ++ (match_dup 2))) ++ (set (match_dup 6) ++ (unspec:DI [(match_dup 6) (match_dup 4)] UNSPEC_LDGP1)) ++ (use (match_dup 1)) ++ (use (match_dup 5)) ++ (clobber (reg:DI 26))]) ++ (set (match_dup 6) ++ (unspec:DI [(match_dup 6) (match_dup 4)] UNSPEC_LDGP2))] ++{ ++ if (CONSTANT_P (operands[1])) ++ { ++ operands[3] = gen_rtx_REG (Pmode, 27); ++ operands[5] = GEN_INT (sw_64_next_sequence_number++); ++ emit_insn (gen_movdi_er_high_g (operands[3], pic_offset_table_rtx, ++ operands[1], operands[5])); ++ } ++ else ++ { ++ operands[3] = operands[1]; ++ operands[1] = const0_rtx; ++ operands[5] = const0_rtx; ++ } ++ operands[4] = GEN_INT (sw_64_next_sequence_number++); ++ operands[6] = pic_offset_table_rtx; ++}) ++ ++(define_insn "*call_value_osf_2_er_nogp" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "register_operand" "c")) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (use (match_operand 3)) ++ (use (match_operand 4)) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS" ++ "call $26,(%1),%3%J4" ++ [(set_attr "type" "call")]) ++ ++(define_insn "*call_value_osf_2_er" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "register_operand" "c")) ++ (match_operand 2))) ++ (set (reg:DI 29) ++ (unspec:DI [(reg:DI 29) (match_operand 5 "const_int_operand")] ++ UNSPEC_LDGP1)) ++ (use (match_operand 3)) ++ (use (match_operand 4)) ++ (clobber (reg:DI 26))] ++ "TARGET_EXPLICIT_RELOCS" ++ { ++ return "call $26,(%1),%3%J4\;ldih $29,0($26)\t\t!gpdisp!%5"; ++ } ++ [(set_attr "type" "call") ++ (set_attr "cannot_copy" "true") ++ (set_attr "length" "8")]) ++ ++(define_insn "*call_value_osf_1_noreturn" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "! TARGET_EXPLICIT_RELOCS ++ && find_reg_note (insn, REG_NORETURN, NULL_RTX)" ++ "@ ++ call $26,($27),0 ++ bsr $26,$%1..ng ++ call $26,%1" ++ [(set_attr "type" "call") ++ (set_attr "length" "*,*,8")]) ++ ++(define_int_iterator TLS_CALL ++ [UNSPEC_TLSGD_CALL ++ UNSPEC_TLSLDM_CALL]) ++ ++(define_int_attr tls ++ [(UNSPEC_TLSGD_CALL "tlsgd") ++ (UNSPEC_TLSLDM_CALL "tlsldm")]) ++ ++(define_insn "call_value_osf_" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "symbolic_operand")) ++ (const_int 0))) ++ (unspec [(match_operand:DI 2 "const_int_operand")] TLS_CALL) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "HAVE_AS_TLS" ++ "ldl $27,%1($29)\t\t!literal!%2\;call $26,($27),%1\t\t!lituse_!%2\;ldih $29,0($26)\t\t!gpdisp!%*\;ldi $29,0($29)\t\t!gpdisp!%*" ++ [(set_attr "type" "call") ++ (set_attr "cannot_copy" "true") ++ (set_attr "length" "16")]) ++ ++;; We must use peep2 instead of a split because we need accurate life ++;; information for $gp. ++(define_peephole2 ++ [(parallel ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "symbolic_operand")) ++ (const_int 0))) ++ (unspec [(match_operand:DI 2 "const_int_operand")] TLS_CALL) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))])] ++ "HAVE_AS_TLS && reload_completed ++ && peep2_regno_dead_p (1, 29)" ++ [(set (match_dup 3) ++ (unspec:DI [(match_dup 5) ++ (match_dup 1) ++ (match_dup 2)] UNSPEC_LITERAL)) ++ (parallel [(set (match_dup 0) ++ (call (mem:DI (match_dup 3)) ++ (const_int 0))) ++ (use (match_dup 5)) ++ (use (match_dup 1)) ++ (use (unspec [(match_dup 2)] TLS_CALL)) ++ (clobber (reg:DI 26))]) ++ (set (match_dup 5) ++ (unspec:DI [(match_dup 5) (match_dup 4)] UNSPEC_LDGP2))] ++{ ++ operands[3] = gen_rtx_REG (Pmode, 27); ++ operands[4] = GEN_INT (sw_64_next_sequence_number++); ++ operands[5] = pic_offset_table_rtx; ++}) ++ ++(define_peephole2 ++ [(parallel ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "symbolic_operand")) ++ (const_int 0))) ++ (unspec [(match_operand:DI 2 "const_int_operand")] TLS_CALL) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))])] ++ "HAVE_AS_TLS && reload_completed ++ && !peep2_regno_dead_p (1, 29) ++ && !find_reg_note (insn, REG_EH_REGION, NULL_RTX)" ++ [(set (match_dup 3) ++ (unspec:DI [(match_dup 5) ++ (match_dup 1) ++ (match_dup 2)] UNSPEC_LITERAL)) ++ (parallel [(set (match_dup 0) ++ (call (mem:DI (match_dup 3)) ++ (const_int 0))) ++ (set (match_dup 5) ++ (unspec:DI [(match_dup 5) (match_dup 4)] UNSPEC_LDGP1)) ++ (use (match_dup 1)) ++ (use (unspec [(match_dup 2)] TLS_CALL)) ++ (clobber (reg:DI 26))]) ++ (set (match_dup 5) ++ (unspec:DI [(match_dup 5) (match_dup 4)] UNSPEC_LDGP2))] ++{ ++ operands[3] = gen_rtx_REG (Pmode, 27); ++ operands[4] = GEN_INT (sw_64_next_sequence_number++); ++ operands[5] = pic_offset_table_rtx; ++}) ++ ++ ++(define_insn "*call_value_osf_1_setfpec0" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "! TARGET_EXPLICIT_RELOCS && flag_fpcr_set == 3" ++ "@ ++ call $26,($27),0\;ldgp $29,0($26) ++ bsr $26,$%1..ng ++ call $26,%1\;ldgp $29,0($26)" ++ [(set_attr "type" "call") ++ (set_attr "length" "12,*,16")]) ++ ++(define_insn "*call_value_osf_1_setfpec1" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "! TARGET_EXPLICIT_RELOCS && flag_fpcr_set == 1" ++ "@ ++ call $26,($27),0\;ldgp $29,0($26) ++ bsr $26,$%1..ng ++ call $26,%1\;ldgp $29,0($26)" ++ [(set_attr "type" "call") ++ (set_attr "length" "12,*,16")]) ++ ++(define_insn "*call_value_osf_1" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "call_operand" "c,R,s")) ++ (match_operand 2))) ++ (use (reg:DI 29)) ++ (clobber (reg:DI 26))] ++ "! TARGET_EXPLICIT_RELOCS" ++ "@ ++ call $26,($27),0\;ldgp $29,0($26) ++ bsr $26,$%1..ng ++ call $26,%1\;ldgp $29,0($26)" ++ [(set_attr "type" "call") ++ (set_attr "length" "12,*,16")]) ++ ++(define_insn "*sibcall_value_osf_1_er" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "symbolic_operand" "R,s")) ++ (match_operand 2))) ++ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)] ++ "TARGET_EXPLICIT_RELOCS" ++ "@ ++ br $31,%1\t\t!samegp ++ ldl $27,%1($29)\t\t!literal!%#\;jmp $31,($27),%1\t\t!lituse_jsr!%#" ++ [(set_attr "type" "call") ++ (set_attr "length" "*,8")]) ++ ++(define_insn "*sibcall_value_osf_1" ++ [(set (match_operand 0) ++ (call (mem:DI (match_operand:DI 1 "symbolic_operand" "R,s")) ++ (match_operand 2))) ++ (unspec [(reg:DI 29)] UNSPEC_SIBCALL)] ++ "! TARGET_EXPLICIT_RELOCS" ++ "@ ++ br $31,$%1..ng ++ ldi $27,%1\;jmp $31,($27),%1" ++ [(set_attr "type" "call") ++ (set_attr "length" "*,8")]) ++ ++(define_insn "speculation_barrier" ++ [(unspec_volatile [(const_int 0)] UNSPECV_SPECULATION_BARRIER)] ++"" ++"imemb" ++[(set_attr "type" "misc")]) ++ ++(define_insn "bswaphi2" ++ [(set (match_operand:HI 0 "register_operand" "=r") ++ (bswap:HI (match_operand:HI 1 "register_operand" "r")))] ++ "TARGET_SW8A && flag_sw_rev == 1" ++ "revbh %1,%0" ++ [(set_attr "isa" "sw8a")]) ++ ++(define_insn "stack_tie" ++ [(set (mem:BLK (scratch)) ++ (unspec:BLK [(match_operand:DI 0 "register_operand" "r") ++ (match_operand:DI 1 "register_operand" "r")] ++ UNSPEC_TIE))] ++ "" ++ "" ++ [(set_attr "length" "0")] ++) +diff --git a/gcc/config/sw_64/sw_64.opt b/gcc/config/sw_64/sw_64.opt +new file mode 100644 +index 000000000..c818dff40 +--- /dev/null ++++ b/gcc/config/sw_64/sw_64.opt +@@ -0,0 +1,326 @@ ++; Options for the Sw64 port of the compiler ++; ++; Copyright (C) 2005-2022 Free Software Foundation, Inc. ++; ++; This file is part of GCC. ++; ++; GCC is free software; you can redistribute it and/or modify it under ++; the terms of the GNU General Public License as published by the Free ++; Software Foundation; either version 3, or (at your option) any later ++; version. ++; ++; GCC is distributed in the hope that it will be useful, but WITHOUT ++; ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++; or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public ++; License for more details. ++; ++; You should have received a copy of the GNU General Public License ++; along with GCC; see the file COPYING3. If not see ++; . ++msw-use-32align ++C C++ Fortran LTO Driver Target Mask(SW_32ALIGN) Save ++Use or not use 32align. ++ ++fsw-sf-cmpsel ++Target Var(flag_sw_sf_cmpsel) Init(0) ++use or not use SF cmp/br/selcet instructions. ++ ++fsw-hardware-prefetch ++Target Var(flag_sw_hardware_prefetch) Init(0) ++set hardware_prefetch registers:PFH_CTL,PFH_CNT. ++ ++fsw-hardware-prefetch-exit ++Target Var(flag_sw_hardware_prefetch_exit) Init(0) ++set hardware_prefetch default value in the function exit. ++ ++fsw-hardware-prefetch-clt= ++Common Joined RejectNegative UInteger Var(flag_hardware_prefetch_clt) Init(5) Optimization ++ ++fsw-hardware-prefetch-cnt-l1= ++Common Joined RejectNegative UInteger Var(flag_hardware_prefetch_cnt_l1) Init(0) Optimization ++ ++fsw-hardware-prefetch-cnt-l2= ++Common Joined RejectNegative UInteger Var(flag_hardware_prefetch_cnt_l2) Init(0) Optimization ++ ++fsw-hardware-prefetch-cnt-l3= ++Common Joined RejectNegative UInteger Var(flag_hardware_prefetch_cnt_l3) Init(5) Optimization ++ ++ ++fsw-branch-fusion ++Target Var(flag_sw_branch_fusion) Init(1) ++fuse the cbranch instructions. ++ ++fsw-branch-combination ++Target Var(flag_sw_branch_combination) Init(0) ++combine the cbranch instructions. ++ ++fsw-fselect ++Target Var(flag_sw_fselect) Init(0) ++Use or not use less instructions for sel/fsel. ++ ++fsw-prefetch-unroll ++Target Var(flag_sw_prefetch_unroll) Init(0) ++Optimize loop unroll in the prefetch pass. ++ ++fsw-delmemb ++Target Var(flag_sw_delmemb) Init(1) ++Use or not use memb. ++ ++fsw-non-temporal ++Target Var(flag_sw_non_temporal) Init(0) ++Use or not use stw_nc/stl_nc/std_nc. ++ ++fsw-prefetch-tc ++Target Var(flag_sw_prefetch_tc) Init(0) ++Generate L3 level prefetch instruction with multi-level method. ++ ++fsw-prefetch-sc ++Target Var(flag_sw_prefetch_sc) Init(0) ++Generate L2 level prefetch instruction with multi-level method. ++ ++fsw-rsqrt ++Target Var(flag_sw_rsqrt) Init(0) ++Fast calculation of 1/sqrt(x). ++ ++fsw-int-divmod ++Target Var(flag_sw_int_divmod) Init(1) ++Use or not use int div/mod instructions. ++;;;;;;;;;;;;;;;;;;;;;;;;;;;; ++ ++fsw-unalign-byte ++Target Var(flag_sw_unalign_byte) Init(0) ++Not use or use ldl_u/stl_u instructions. ++ ++fsw-auto-inc-dec ++Target Var(flag_sw_auto_inc_dec) Init(0) ++Use or not use int auto-inc-dec load/store instructions. ++ ++fsw-nofcpys ++Target Var(flag_sw_nofcpys) Init(1) ++delete fcpys after fcvtsd instruction. ++ ++fsw-sdsame ++Target Var(flag_sw_sdsame) Init(0) ++For des and src same. ++ ++fsw-fast-math ++Target Var(flag_sw_fast_math) Init(0) ++For 628 fast-math. ++ ++fsw-fma ++Target Var(flag_sw_fma) Init(1) ++Add fma option. ++ ++fsw-prefetch-dc ++Target Var(flag_sw_prefetch_dc) Init(1) ++Generate L1 level prefetch instruction with multi-level method. ++ ++fsw-rtx-cost ++Target Var(flag_sw_rtx_cost) Init(1) ++Adjust the rtx-cost. ++ ++fsw-delnop ++Target Var(flag_sw_delnop) Init(1) ++Delete the nop instruction. ++ ++fsw-sxaddl ++Target Var(flag_sw_sxaddl) Init(1) ++ ++fsw-use-cas ++Target Var(flag_sw_use_cas) Init(0) ++Use or no use compare and swap instruction. ++ ++fsw-fprnd ++Target Var(flag_sw_fprnd) Init(0) ++Use fp rounding instructions. ++ ++fsw-recip ++Target Var(flag_sw_recip) Init(0) ++Use ISA fp reciprocal instructions. ++ ++fsw-recip-precision ++Target Var(flag_sw_recip_precision) Init(0) ++Use ISA fp reciprocal instructions to provide more accuracy. ++ ++fsw-rtid ++Target Var(flag_sw_rtid) Init(1) ++Use rtid instead of syscall 0x9e. ++ ++fsw-int-div-opt ++Target Var(flag_sw_int_div_opt) Init(0) ++SW div opt. ++ ++fsw-prefetch-l1 ++Target Var(flag_sw_prefetch_l1) Init(1) ++Use l1 load prefetch instead of L2. ++ ++fsw-prefetch-add ++Target Var(flag_sw_prefetch_add) Init(1) ++generate prefetch for cases like stream add. ++ ++msoft-float ++Target Mask(SOFT_FP) ++Do not use hardware fp. ++ ++mfp-regs ++Target Mask(FPREGS) ++Use fp registers. ++ ++mgas ++Target Ignore ++Does nothing. Preserved for backward compatibility. ++ ++mieee-conformant ++Target RejectNegative Mask(IEEE_CONFORMANT) ++Request IEEE-conformant math library routines (SYSV). ++ ++mieee ++Target RejectNegative Mask(IEEE) ++Emit IEEE-conformant code, without inexact exceptions. ++ ++mieee-main ++Target RejectNegative Mask(IEEE_MAIN) ++Emit IEEE-conformant code, without inexact exceptions. ++ ++mieee-with-inexact ++Target RejectNegative Mask(IEEE_WITH_INEXACT) ++ ++mbuild-constants ++Target Mask(BUILD_CONSTANTS) ++Do not emit complex integer constants to read-only memory. ++ ++mfloat-vax ++Target RejectNegative Mask(FLOAT_VAX) ++Use VAX fp. ++ ++mfloat-ieee ++Target RejectNegative InverseMask(FLOAT_VAX) ++Do not use VAX fp. ++ ++mbwx ++Target Mask(BWX) ++Emit code for the byte/word ISA extension. ++ ++mmax ++Target Mask(MAX) ++Emit code for the motion video ISA extension. ++ ++mfix ++Target Mask(FIX) ++Emit code for the fp move and sqrt ISA extension. ++ ++mcix ++Target Mask(CIX) ++Emit code for the counting ISA extension. ++ ++msw6a ++Target Mask(SW6A) ++Emit code for the SW6A ISA extension. ++ ++msw6b ++Target Mask(SW6B) ++Emit code for the SW6B ISA extension. ++ ++msw4d ++Target Mask(SW4D) ++Emit code for the SW4D ISA extension. ++ ++msw8a ++Target Mask(SW8A) ++Emit code for the SW8A ISA extension. ++ ++mexplicit-relocs ++Target Mask(EXPLICIT_RELOCS) ++Emit code using explicit relocation directives. ++ ++msmall-data ++Target RejectNegative Mask(SMALL_DATA) ++Emit 16-bit relocations to the small data areas. ++ ++mlarge-data ++Target RejectNegative InverseMask(SMALL_DATA) ++Emit 32-bit relocations to the small data areas. ++ ++msmall-text ++Target RejectNegative Mask(SMALL_TEXT) ++Emit direct branches to local functions. ++ ++mlarge-text ++Target RejectNegative InverseMask(SMALL_TEXT) ++Emit indirect branches to local functions. ++ ++mtls-kernel ++Target Mask(TLS_KERNEL) ++Emit rdval for thread pointer. ++ ++mlong-double-128 ++Target RejectNegative Mask(LONG_DOUBLE_128) ++Use 128-bit long double. ++ ++mlong-double-64 ++Target RejectNegative InverseMask(LONG_DOUBLE_128) ++Use 64-bit long double. ++ ++mcpu= ++Target RejectNegative Joined Var(sw_64_cpu_string) ++Use features of and schedule given CPU. ++ ++mtune= ++Target RejectNegative Joined Var(sw_64_tune_string) ++Schedule given CPU. ++ ++mfp-rounding-mode= ++Target RejectNegative Joined Var(sw_64_fprm_string) ++Control the generated fp rounding mode. ++ ++mfp-trap-mode= ++Target RejectNegative Joined Var(sw_64_fptm_string) ++Control the IEEE trap mode. ++ ++mtrap-precision= ++Target RejectNegative Joined Var(sw_64_tp_string) ++Control the precision given to fp exceptions. ++ ++mmemory-latency= ++Target RejectNegative Joined Var(sw_64_mlat_string) ++Tune expected memory latency. ++ ++mtls-size= ++Target RejectNegative Joined UInteger Var(sw_64_tls_size) Init(32) ++Specify bit size of immediate TLS offsets. ++ ++msimd ++C C++ Fortran Driver Target Mask(SW_SIMD) Save ++Support SW SIMD built-in functions and code generation. ++ ++fsw-cmov ++Target Var(flag_sw_cmov) Init(1) ++Use added floating-point integer conversion instruction. ++ ++fsw-shift-word ++Target Var(flag_sw_shift_word) Init(1) ++Control: Rotate Left 64/32, Logical Shift Left/Right 64/32, Algorithmetic Shift Right 64/32. ++ ++fsw-rev ++Target Var(flag_sw_rev) Init(1) ++Control: Reverse 16/32/64. ++ ++mgprel-size= ++Target RejectNegative Joined UInteger Var(sw_64_gprel_size) Init(32) ++Specify bit size of gprel relocation offsets. ++ ++mtls-tlsgd= ++Target RejectNegative Joined UInteger Var(sw_64_tls_gd) Init(16) ++Specify the bitsize of tlsgd relocation offset relative GP. ++ ++mtls-tlsldm= ++Target RejectNegative Joined UInteger Var(sw_64_tls_ldm) Init(16) ++Specify the bitsize of tlsldm relocation offset relative GP. ++ ++mtls-gotdtprel= ++Target RejectNegative Joined UInteger Var(sw_64_tls_gotdtprel) Init(16) ++Specify the bitsize of gotdtprel relocation offset relative GP. ++ ++mtls-gottprel= ++Target RejectNegative Joined UInteger Var(sw_64_tls_gottprel) Init(16) ++Specify the bitsize of gottprel relocation offset relative GP. +diff --git a/gcc/config/sw_64/sync.md b/gcc/config/sw_64/sync.md +new file mode 100644 +index 000000000..3297bda25 +--- /dev/null ++++ b/gcc/config/sw_64/sync.md +@@ -0,0 +1,495 @@ ++;; GCC machine description for Sw64 synchronization instructions. ++;; Copyright (C) 2005-2022 Free Software Foundation, Inc. ++;; ++;; This file is part of GCC. ++;; ++;; GCC is free software; you can redistribute it and/or modify ++;; it under the terms of the GNU General Public License as published by ++;; the Free Software Foundation; either version 3, or (at your option) ++;; any later version. ++;; ++;; GCC is distributed in the hope that it will be useful, ++;; but WITHOUT ANY WARRANTY; without even the implied warranty of ++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++;; GNU General Public License for more details. ++;; ++;; You should have received a copy of the GNU General Public License ++;; along with GCC; see the file COPYING3. If not see ++;; . ++ ++(define_code_iterator FETCHOP [plus minus ior xor and]) ++(define_code_attr fetchop_name ++ [(plus "add") (minus "sub") (ior "or") (xor "xor") (and "and")]) ++(define_code_attr fetchop_pred ++ [(plus "add_operand") (minus "reg_or_8bit_operand") ++ (ior "or_operand") (xor "or_operand") (and "and_operand")]) ++(define_code_attr fetchop_constr ++ [(plus "rKL") (minus "rI") (ior "rIN") (xor "rIN") (and "rINM")]) ++ ++ ++(define_expand "memory_barrier" ++ [(set (match_dup 0) ++ (unspec:BLK [(match_dup 0)] UNSPEC_MB))] ++ "" ++{ ++ operands[0] = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode)); ++ MEM_VOLATILE_P (operands[0]) = 1; ++}) ++ ++;; mb-> memb ++(define_insn "*memory_barrier" ++ [(set (match_operand:BLK 0) ++ (unspec:BLK [(match_dup 0)] UNSPEC_MB))] ++ "" ++ "memb" ++ [(set_attr "type" "mb")]) ++ ++;; "ld_l %0,%1" ++(define_insn "@load_locked_" ++ [(set (match_operand:I48MODE 0 "register_operand" "=r") ++ (unspec_volatile:I48MODE ++ [(match_operand:I48MODE 1 "memory_operand" "m")] ++ UNSPECV_LL))] ++ "" ++ { ++ switch ('') ++ { ++ case 'w': ++ return "ldi %0,%1\;lldw %0,0(%0)"; ++ case 'l': ++ return "ldi %0,%1\;lldl %0,0(%0)"; ++ default: ++ return "ld_l %0,%1"; ++ } ++ } ++ [(set_attr "type" "ld_l")]) ++ ++;; "st_c %0,%1" ++(define_insn "@store_conditional_" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec_volatile:DI [(const_int 0)] UNSPECV_SC)) ++ (set (match_operand:I48MODE 1 "memory_operand" "=m") ++ (match_operand:I48MODE 2 "reg_or_0_operand" "0")) ++ (clobber (reg:DI 28))] ++ "" ++ { ++ switch ('') ++ { ++ case 'w': ++ return "ldi $28,%1\;lstw %0,0($28)"; ++ case 'l': ++ return "ldi $28,%1\;lstl %0,0($28)"; ++ default: ++ return "st_c %0,%1"; ++ } ++ } ++ [(set_attr "type" "st_c")]) ++ ++ (define_insn "builtin_rd_f" ++ [(set (match_operand:DI 0 "register_operand" "=r") ++ (unspec_volatile:DI [(const_int 0)] UNSPECV_SC))] ++ "" ++ "rd_f %0" ++ [(set_attr "type" "st_c")]) ++ ++ (define_insn "builtin_wr_f" ++ [(match_operand:DI 0 "register_operand" "r") ++ (unspec_volatile:DI [(const_int 0)] UNSPECV_LL)] ++ "" ++ "wr_f %0" ++ [(set_attr "type" "ld_l")]) ++ ++;; The Sw_64 Architecture Handbook says that it is UNPREDICTABLE whether ++;; the lock is cleared by a normal load or store. This means we cannot ++;; expand a ll/sc sequence before reload, lest a register spill is ++;; inserted inside the sequence. It is also UNPREDICTABLE whether the ++;; lock is cleared by a TAKEN branch. This means that we can not expand ++;; a ll/sc sequence containing a branch (i.e. compare-and-swap) until after ++;; the final basic-block reordering pass. ++ ++(define_expand "atomic_compare_and_swap" ++ [(parallel ++ [(set (match_operand:DI 0 "register_operand") ;; bool out ++ (unspec_volatile:DI [(const_int 0)] UNSPECV_CMPXCHG)) ++ (set (match_operand:I48MODE 1 "register_operand") ;; val out ++ (unspec_volatile:I48MODE [(const_int 0)] UNSPECV_CMPXCHG)) ++ (set (match_operand:I48MODE 2 "memory_operand") ;; memory ++ (unspec_volatile:I48MODE ++ [(match_dup 2) ++ (match_operand:I48MODE 3 "reg_or_8bit_operand") ;; expected ++ (match_operand:I48MODE 4 "add_operand") ;; desired ++ (match_operand:SI 5 "const_int_operand") ;; is_weak ++ (match_operand:SI 6 "const_int_operand") ;; succ model ++ (match_operand:SI 7 "const_int_operand") ;; fail model ++ (match_operand:DI 8 "register_operand")] ++ UNSPECV_CMPXCHG)) ++ (clobber (reg:DI 28))])] ++ "" ++{ ++ if (mode == SImode) ++ { ++ operands[3] = convert_modes (DImode, SImode, operands[3], 0); ++ operands[4] = convert_modes (DImode, SImode, operands[4], 0); ++ } ++ if (TARGET_SW8A) ++ { ++ if (flag_sw_use_cas) ++ { ++ if (CONST_INT_P (operands[3])) ++ operands[3] = force_reg (DImode, operands[3]); ++ ++ if (CONST_INT_P (operands[4])) ++ operands[4] = force_reg (DImode, operands[4]); ++ ++ emit_insn (gen_atomic_compare_and_swap_target_sw8a (operands[0], ++ operands[1], ++ operands[2], ++ operands[3], ++ operands[4], ++ operands[5], ++ operands[6], ++ operands[7])); ++ DONE; ++ } ++ } ++}) ++ ++(define_insn_and_split "*atomic_compare_and_swap" ++ [(set (match_operand:DI 0 "register_operand" "=&r") ;; bool out ++ (unspec_volatile:DI [(const_int 0)] UNSPECV_CMPXCHG)) ++ (set (match_operand:I48MODE 1 "register_operand" "=&r") ;; val out ++ (unspec_volatile:I48MODE [(const_int 0)] UNSPECV_CMPXCHG)) ++ (set (match_operand:I48MODE 2 "memory_operand" "+m") ;; memory ++ (unspec_volatile:I48MODE ++ [(match_dup 2) ++ (match_operand:DI 3 "reg_or_8bit_operand" "rI") ;; expected ++ (match_operand:DI 4 "add_operand" "rKL") ;; desired ++ (match_operand:SI 5 "const_int_operand") ;; is_weak ++ (match_operand:SI 6 "const_int_operand") ;; succ model ++ (match_operand:SI 7 "const_int_operand") ;; fail model ++ (match_operand:DI 8 "register_operand" "r")] ++ UNSPECV_CMPXCHG)) ++ (clobber (reg:DI 28))] ++ ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ sw_64_split_compare_and_swap (operands); ++ DONE; ++} ++ [(set_attr "type" "multi")]) ++ ++(define_expand "atomic_compare_and_swap" ++ [(match_operand:DI 0 "register_operand") ;; bool out ++ (match_operand:I12MODE 1 "register_operand") ;; val out ++ (match_operand:I12MODE 2 "mem_noofs_operand") ;; memory ++ (match_operand:I12MODE 3 "register_operand") ;; expected ++ (match_operand:I12MODE 4 "add_operand") ;; desired ++ (match_operand:SI 5 "const_int_operand") ;; is_weak ++ (match_operand:SI 6 "const_int_operand") ;; succ model ++ (match_operand:SI 7 "const_int_operand") ;; fail model ++ (match_operand:DI 8 "register_operand")] ++ "" ++{ ++ if ( (TARGET_SW8A) && flag_sw_use_cas) ++ { ++ if (CONST_INT_P (operands[3])) ++ operands[3] = force_reg (mode, operands[3]); ++ ++ if (CONST_INT_P (operands[4])) ++ operands[4] = force_reg (mode, operands[4]); ++ } ++ sw_64_expand_compare_and_swap_12 (operands); ++ DONE; ++}) ++ ++(define_insn_and_split "@atomic_compare_and_swap_1" ++ [(set (match_operand:DI 0 "register_operand" "=&r") ;; bool out ++ (unspec_volatile:DI [(const_int 0)] UNSPECV_CMPXCHG)) ++ (set (match_operand:DI 1 "register_operand" "=&r") ;; val out ++ (zero_extend:DI ++ (unspec_volatile:I12MODE [(const_int 0)] UNSPECV_CMPXCHG))) ++ (set (match_operand:I12MODE 2 "mem_noofs_operand" "+w") ;; memory ++ (unspec_volatile:I12MODE ++ [(match_dup 2) ++ (match_operand:DI 3 "reg_or_8bit_operand" "rI") ;; expected ++ (match_operand:DI 4 "reg_or_0_operand" "rJ") ;; desired ++ (match_operand:DI 5 "register_operand" "r") ;; align ++ (match_operand:SI 6 "const_int_operand") ;; is_weak ++ (match_operand:SI 7 "const_int_operand") ;; succ model ++ (match_operand:SI 8 "const_int_operand") ;; fail model ++ (match_operand:DI 9 "register_operand" "r")] ++ UNSPECV_CMPXCHG)) ++ (clobber (match_scratch:DI 10 "=&r")) ++ (clobber (reg:DI 28))] ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ sw_64_split_compare_and_swap_12 (operands); ++ DONE; ++} ++ [(set_attr "type" "multi")]) ++ ++(define_insn_and_split "atomic_compare_and_swap_target_sw8a" ++ [(set (match_operand:DI 0 "register_operand" "=&r") ;; bool out ++ (unspec_volatile:DI [(const_int 0)] UNSPECV_CMPXCHG)) ++ (set (match_operand:I48MODE 1 "register_operand" "=&r") ;; val out ++ (unspec_volatile:I48MODE [(const_int 0)] UNSPECV_CMPXCHG)) ++ (set (match_operand:I48MODE 2 "memory_operand" "+m") ;; memory ++ (unspec_volatile:I48MODE ++ [(match_dup 2) ++ (match_operand:DI 3 "reg_or_8bit_operand" "r") ;; expected ++ (match_operand:DI 4 "add_operand" "r") ;; desired ++ (match_operand:SI 5 "const_int_operand") ;; is_weak ++ (match_operand:SI 6 "const_int_operand") ;; succ model ++ (match_operand:SI 7 "const_int_operand")] ;; fail model ++ UNSPECV_CMPXCHG)) ++ (clobber (reg:DI 28))] ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ if ( TARGET_SW8A && flag_sw_use_cas) ++ sw_64_split_atomic_cas (operands); ++ else ++ sw_64_split_compare_and_swap (operands); ++ DONE; ++} ++ [(set_attr "type" "multi")]) ++ ++(define_insn_and_split "@atomic_compare_and_swap_1_target_sw8a" ++ [(set (match_operand:DI 0 "register_operand" "=&r") ;; bool out ++ (unspec_volatile:DI [(const_int 0)] UNSPECV_CMPXCHG)) ++ (set (match_operand:DI 1 "register_operand" "=&r") ;; val out ++ (zero_extend:DI ++ (unspec_volatile:I12MODE [(const_int 0)] UNSPECV_CMPXCHG))) ++ (set (match_operand:I12MODE 2 "mem_noofs_operand" "+w") ;; memory ++ (unspec_volatile:I12MODE ++ [(match_dup 2) ++ (match_operand:DI 3 "reg_or_8bit_operand" "rI") ;; expected ++ (match_operand:DI 4 "register_operand" "r") ;; desired ++ (match_operand:DI 5 "register_operand" "r") ;; align ++ (match_operand:SI 6 "const_int_operand") ;; is_weak ++ (match_operand:SI 7 "const_int_operand") ;; succ model ++ (match_operand:SI 8 "const_int_operand")] ;; fail model ++ UNSPECV_CMPXCHG)) ++ (clobber (match_scratch:DI 9 "=&r")) ++ (clobber (match_scratch:DI 10 "=&r")) ++ (clobber (reg:DI 28))] ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ sw_64_split_compare_and_swap_12 (operands); ++ DONE; ++} ++ [(set_attr "type" "multi")]) ++ ++(define_insn "sw_64_atomic_cas" ++ [(set (match_operand:I48MODE 0 "register_operand" "") ;; out ++ (match_operand:I48MODE 1 "memory_operand" "")) ;; memory. ++ (set (match_dup 1) ++ (unspec_volatile:I48MODE ++ [(match_dup 0) ++ (match_operand:I48MODE 2 "register_operand" "")] ;; value. ++ UNSPECV_CMPXCHG)) ++ (clobber (reg:DI 28))] ++ "TARGET_SW8A && flag_sw_use_cas" ++ "ldi $28,%1\;cas %0,$28,%2") ++;; endif ++ ++(define_insn_and_split "atomic_exchange" ++ [(set (match_operand:I48MODE 0 "register_operand" "=&r") ;; output ++ (match_operand:I48MODE 1 "memory_operand" "+m")) ;; memory ++ (set (match_dup 1) ++ (unspec:I48MODE ++ [(match_operand:I48MODE 2 "add_operand" "rKL") ;; input ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ UNSPEC_XCHG)) ++ (clobber (match_scratch:I48MODE 4 "=&r"))] ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ sw_64_split_atomic_exchange (operands); ++ DONE; ++} ++ [(set_attr "type" "multi")]) ++ ++(define_expand "atomic_exchange" ++ [(match_operand:I12MODE 0 "register_operand") ;; output ++ (match_operand:I12MODE 1 "mem_noofs_operand") ;; memory ++ (match_operand:I12MODE 2 "reg_or_0_operand") ;; input ++ (match_operand:SI 3 "const_int_operand")] ;; model ++ "" ++{ ++ sw_64_expand_atomic_exchange_12 (operands); ++ DONE; ++}) ++ ++(define_insn_and_split "@atomic_exchange_1" ++ [(set (match_operand:DI 0 "register_operand" "=&r") ;; output ++ (zero_extend:DI ++ (match_operand:I12MODE 1 "mem_noofs_operand" "+w"))) ;; memory ++ (set (match_dup 1) ++ (unspec:I12MODE ++ [(match_operand:DI 2 "reg_or_8bit_operand" "rI") ;; input ++ (match_operand:DI 3 "register_operand" "r") ;; align ++ (match_operand:SI 4 "const_int_operand")] ;; model ++ UNSPEC_XCHG)) ++ (clobber (match_scratch:DI 5 "=&r")) ++ (clobber (reg:DI 28))] ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ sw_64_split_atomic_exchange_12 (operands); ++ DONE; ++} ++ [(set_attr "type" "multi")]) ++ ++(define_insn_and_split "atomic_" ++ [(set (match_operand:I48MODE 0 "memory_operand" "+m") ++ (unspec:I48MODE ++ [(FETCHOP:I48MODE (match_dup 0) ++ (match_operand:I48MODE 1 "" "")) ++ (match_operand:SI 2 "const_int_operand")] ++ UNSPEC_ATOMIC)) ++ (clobber (match_scratch:I48MODE 3 "=&r")) ++ (clobber (reg:DI 28))] ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ sw_64_split_atomic_op (, operands[0], operands[1], ++ NULL, NULL, operands[3], ++ (enum memmodel) INTVAL (operands[2])); ++ DONE; ++} ++ [(set_attr "type" "multi")]) ++ ++(define_insn_and_split "atomic_nand" ++ [(set (match_operand:I48MODE 0 "memory_operand" "+m") ++ (unspec:I48MODE ++ [(not:I48MODE ++ (and:I48MODE (match_dup 0) ++ (match_operand:I48MODE 1 "register_operand" "r"))) ++ (match_operand:SI 2 "const_int_operand")] ++ UNSPEC_ATOMIC)) ++ (clobber (match_scratch:I48MODE 3 "=&r")) ++ (clobber (reg:DI 28))] ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ sw_64_split_atomic_op (NOT, operands[0], operands[1], ++ NULL, NULL, operands[3], ++ (enum memmodel) INTVAL (operands[2])); ++ DONE; ++} ++ [(set_attr "type" "multi")]) ++ ++(define_insn_and_split "atomic_fetch_" ++ [(set (match_operand:I48MODE 0 "register_operand" "=&r") ++ (match_operand:I48MODE 1 "memory_operand" "+m")) ++ (set (match_dup 1) ++ (unspec:I48MODE ++ [(FETCHOP:I48MODE (match_dup 1) ++ (match_operand:I48MODE 2 "" "")) ++ (match_operand:SI 3 "const_int_operand")] ++ UNSPEC_ATOMIC)) ++ (clobber (match_scratch:I48MODE 4 "=&r")) ++ (clobber (reg:DI 28))] ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ sw_64_split_atomic_op (, operands[1], operands[2], ++ operands[0], NULL, operands[4], ++ (enum memmodel) INTVAL (operands[3])); ++ DONE; ++} ++ [(set_attr "type" "multi")]) ++ ++(define_insn_and_split "atomic_fetch_nand" ++ [(set (match_operand:I48MODE 0 "register_operand" "=&r") ++ (match_operand:I48MODE 1 "memory_operand" "+m")) ++ (set (match_dup 1) ++ (unspec:I48MODE ++ [(not:I48MODE ++ (and:I48MODE (match_dup 1) ++ (match_operand:I48MODE 2 "register_operand" "r"))) ++ (match_operand:SI 3 "const_int_operand")] ++ UNSPEC_ATOMIC)) ++ (clobber (match_scratch:I48MODE 4 "=&r")) ++ (clobber (reg:DI 28))] ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ sw_64_split_atomic_op (NOT, operands[1], operands[2], ++ operands[0], NULL, operands[4], ++ (enum memmodel) INTVAL (operands[3])); ++ DONE; ++} ++ [(set_attr "type" "multi")]) ++ ++(define_insn_and_split "atomic__fetch" ++ [(set (match_operand:I48MODE 0 "register_operand" "=&r") ++ (FETCHOP:I48MODE ++ (match_operand:I48MODE 1 "memory_operand" "+m") ++ (match_operand:I48MODE 2 "" ""))) ++ (set (match_dup 1) ++ (unspec:I48MODE ++ [(FETCHOP:I48MODE (match_dup 1) (match_dup 2)) ++ (match_operand:SI 3 "const_int_operand")] ++ UNSPEC_ATOMIC)) ++ (clobber (match_scratch:I48MODE 4 "=&r")) ++ (clobber (reg:DI 28))] ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ sw_64_split_atomic_op (, operands[1], operands[2], ++ NULL, operands[0], operands[4], ++ (enum memmodel) INTVAL (operands[3])); ++ DONE; ++} ++ [(set_attr "type" "multi")]) ++ ++(define_insn_and_split "atomic_nand_fetch" ++ [(set (match_operand:I48MODE 0 "register_operand" "=&r") ++ (not:I48MODE ++ (and:I48MODE (match_operand:I48MODE 1 "memory_operand" "+m") ++ (match_operand:I48MODE 2 "register_operand" "r")))) ++ (set (match_dup 1) ++ (unspec:I48MODE ++ [(not:I48MODE (and:I48MODE (match_dup 1) (match_dup 2))) ++ (match_operand:SI 3 "const_int_operand")] ++ UNSPEC_ATOMIC)) ++ (clobber (match_scratch:I48MODE 4 "=&r")) ++ (clobber (reg:DI 28))] ++ "" ++ "#" ++ "epilogue_completed" ++ [(const_int 0)] ++{ ++ sw_64_split_atomic_op (NOT, operands[1], operands[2], ++ NULL, operands[0], operands[4], ++ (enum memmodel) INTVAL (operands[3])); ++ DONE; ++} ++ [(set_attr "type" "multi")]) +diff --git a/gcc/config/sw_64/t-linux b/gcc/config/sw_64/t-linux +new file mode 100644 +index 000000000..d78ef47df +--- /dev/null ++++ b/gcc/config/sw_64/t-linux +@@ -0,0 +1 @@ ++MULTIARCH_DIRNAME = $(call if_multiarch,sw_64-linux-gnu) +diff --git a/gcc/config/sw_64/t-sw_64 b/gcc/config/sw_64/t-sw_64 +new file mode 100644 +index 000000000..c9b4b6267 +--- /dev/null ++++ b/gcc/config/sw_64/t-sw_64 +@@ -0,0 +1,19 @@ ++# Copyright (C) 2016-2022 Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++PASSES_EXTRA += $(srcdir)/config/sw_64/sw_64-passes.def +diff --git a/gcc/config/sw_64/x-sw_64 b/gcc/config/sw_64/x-sw_64 +new file mode 100644 +index 000000000..ab615cd1b +--- /dev/null ++++ b/gcc/config/sw_64/x-sw_64 +@@ -0,0 +1,3 @@ ++driver-sw_64.o: $(srcdir)/config/sw_64/driver-sw_64.cc ++ $(COMPILE) $< ++ $(POSTCOMPILE) +diff --git a/gcc/emit-rtl.cc b/gcc/emit-rtl.cc +index 2df5ff414..e500b583a 100644 +--- a/gcc/emit-rtl.cc ++++ b/gcc/emit-rtl.cc +@@ -2409,6 +2409,41 @@ adjust_address_1 (rtx memref, machine_mode mode, poly_int64 offset, + addr = gen_rtx_ZERO_EXTEND (address_mode, + plus_constant (pointer_mode, + XEXP (addr, 0), offset)); ++#endif ++#ifdef FLAG_SW64_INC_DEC ++ else if (GET_CODE (addr) == POST_INC) ++ ; ++ else if (GET_CODE (addr) == POST_DEC) ++ { ++ rtx term; ++ rtx reg = XEXP (addr, 0); ++ if (known_eq (offset, 0)) ++ term = GEN_INT (8); ++ else ++ term = GEN_INT (-24); ++ addr ++ = gen_rtx_POST_MODIFY (mode, reg, gen_rtx_PLUS (mode, reg, term)); ++ } ++ else if (GET_CODE (addr) == POST_MODIFY) ++ { ++ if (GET_CODE (XEXP (addr, 1)) == PLUS) ++ { ++ if (CONSTANT_P (XEXP (XEXP (addr, 1), 1))) ++ { ++ rtx term; ++ rtx reg = XEXP (XEXP (addr, 1), 0); ++ if (known_eq (offset, 0)) ++ term = GEN_INT (8); ++ else ++ term = plus_constant (mode, XEXP (XEXP (addr, 1), 1), -8); ++ if (term == const0_rtx) ++ XEXP (addr, 1) = XEXP (XEXP (addr, 1), 0); ++ else ++ addr = gen_rtx_POST_MODIFY (mode, reg, ++ gen_rtx_PLUS (mode, reg, term)); ++ } ++ } ++ } + #endif + else + addr = plus_constant (address_mode, addr, offset); +diff --git a/gcc/explow.cc b/gcc/explow.cc +index 124e8f49e..c7a3570b3 100644 +--- a/gcc/explow.cc ++++ b/gcc/explow.cc +@@ -1275,7 +1275,11 @@ get_dynamic_stack_size (rtx *psize, unsigned size_align, + in SIZE for the hole that might result from the alignment operation. */ + + unsigned known_align = REGNO_POINTER_ALIGN (VIRTUAL_STACK_DYNAMIC_REGNUM); ++#ifndef FLAG_SW64_90139 ++ // it change from 710 extra = (required_align - BITS_PER_UNIT) ++ // / BITS_PER_UNIT; see the test pr20210303 + if (known_align == 0) ++#endif + known_align = BITS_PER_UNIT; + if (required_align > known_align) + { +diff --git a/gcc/final.cc b/gcc/final.cc +index e4bfceabc..3b3f3d415 100644 +--- a/gcc/final.cc ++++ b/gcc/final.cc +@@ -1830,7 +1830,12 @@ profile_function (FILE *file ATTRIBUTE_UNUSED) + { + int align = MIN (BIGGEST_ALIGNMENT, LONG_TYPE_SIZE); + switch_to_section (data_section); ++#ifdef FLAG_SW64_DELNOP ++ if (flag_sw_delnop == 0) ++ ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT)); ++#else + ASM_OUTPUT_ALIGN (file, floor_log2 (align / BITS_PER_UNIT)); ++#endif + targetm.asm_out.internal_label (file, "LP", current_function_funcdef_no); + assemble_integer (const0_rtx, LONG_TYPE_SIZE / BITS_PER_UNIT, align, 1); + } +@@ -2414,6 +2419,8 @@ final_scan_insn_1 (rtx_insn *insn, FILE *file, int optimize_p ATTRIBUTE_UNUSED, + #ifdef ASM_OUTPUT_ALIGN_WITH_NOP + ASM_OUTPUT_ALIGN_WITH_NOP (file, alignment.levels[0].log); + #else ++ if (flag_sw_delnop == 0) ++ ASM_OUTPUT_ALIGN (file, alignment.levels[0].log); + ASM_OUTPUT_ALIGN (file, alignment.levels[0].log); + #endif + #endif +@@ -2450,7 +2457,12 @@ final_scan_insn_1 (rtx_insn *insn, FILE *file, int optimize_p ATTRIBUTE_UNUSED, + #else + log_align = exact_log2 (BIGGEST_ALIGNMENT / BITS_PER_UNIT); + #endif ++#ifdef FLAG_SW64_DELNOP ++ if (flag_sw_delnop == 0) ++ ASM_OUTPUT_ALIGN (file, log_align); ++#else + ASM_OUTPUT_ALIGN (file, log_align); ++#endif + } + else + switch_to_section (current_function_section ()); +diff --git a/gcc/flags.h b/gcc/flags.h +index 212e357a0..7d025346b 100644 +--- a/gcc/flags.h ++++ b/gcc/flags.h +@@ -76,6 +76,10 @@ extern bool fast_math_flags_struct_set_p (struct cl_optimization *); + /* True if printing into -fdump-final-insns= dump. */ + + extern bool final_insns_dump_p; ++#ifdef SW64_TARGET_SUPPORT_FPCR ++extern int flag_fpcr_set; ++extern int stfp3_flag; ++#endif + + + /* Other basic status info about current function. */ +diff --git a/gcc/fortran/interface.cc b/gcc/fortran/interface.cc +index 874acb914..26ab11a39 100644 +--- a/gcc/fortran/interface.cc ++++ b/gcc/fortran/interface.cc +@@ -3355,10 +3355,19 @@ gfc_compare_actual_formal (gfc_actual_arglist **ap, gfc_formal_arglist *formal, + "at %L", f->sym->name, actual_size, + formal_size, &a->expr->where); + else +- gfc_error_now ("Actual argument contains too few " ++#ifdef FLAG_SW64_90139 // close this for it will cause speccpu 416 build err ++ gfc_warning (OPT_Wargument_mismatch, ++ "Actual argument contains too few " ++ "elements for dummy argument %qs (%lu/%lu) " ++ "at %L.Please add -std=legacy options", ++ f->sym->name, actual_size, formal_size, ++ &a->expr->where); ++#else ++ gfc_error_now ("Actual argument contains too few " + "elements for dummy argument %qs (%lu/%lu) " + "at %L", f->sym->name, actual_size, + formal_size, &a->expr->where); ++#endif + } + ok = false; + goto match; +diff --git a/gcc/gcc.cc b/gcc/gcc.cc +index b37b50be2..01889f0d4 100644 +--- a/gcc/gcc.cc ++++ b/gcc/gcc.cc +@@ -1953,6 +1953,12 @@ init_spec (void) + } + #endif + ++/* --no-relax for sw_64 */ ++#ifdef ENABLE_LD_NORELAX ++#define LINK_NORELAX_SPEC "%{!r:--no-relax} " ++ obstack_grow (&obstack, LINK_NORELAX_SPEC, sizeof (LINK_NORELAX_SPEC) - 1); ++#endif ++ + #if defined LINK_EH_SPEC || defined LINK_BUILDID_SPEC || \ + defined LINKER_HASH_STYLE + # ifdef LINK_BUILDID_SPEC +diff --git a/gcc/gimple-match-head.cc b/gcc/gimple-match-head.cc +index 1c74d3808..ec97284dd 100644 +--- a/gcc/gimple-match-head.cc ++++ b/gcc/gimple-match-head.cc +@@ -1243,6 +1243,11 @@ optimize_pow_to_exp (tree arg0, tree arg1) + case PLUS_EXPR: + case MINUS_EXPR: + break; ++#ifdef FLAG_SW64_FM ++ case PAREN_EXPR: ++ if (flag_sw_fast_math == 1) ++ return false; ++#endif + default: + return true; + } +diff --git a/gcc/optabs.cc b/gcc/optabs.cc +index b441137de..72b7c1513 100644 +--- a/gcc/optabs.cc ++++ b/gcc/optabs.cc +@@ -6988,7 +6988,12 @@ expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval, + enum memmodel fail_model) + { + machine_mode mode = GET_MODE (mem); ++#ifdef FLAG_SW64_ATOMIC ++ class expand_operand ops[9]; ++ rtx imust = gen_reg_rtx (DImode); ++#else + class expand_operand ops[8]; ++#endif + enum insn_code icode; + rtx target_oval, target_bool = NULL_RTX; + rtx libfunc; +@@ -7037,7 +7042,12 @@ expand_atomic_compare_and_swap (rtx *ptarget_bool, rtx *ptarget_oval, + create_integer_operand (&ops[5], is_weak); + create_integer_operand (&ops[6], succ_model); + create_integer_operand (&ops[7], fail_model); ++#ifdef FLAG_SW64_ATOMIC ++ create_fixed_operand (&ops[8], imust); ++ if (maybe_expand_insn (icode, 9, ops)) ++#else + if (maybe_expand_insn (icode, 8, ops)) ++#endif + { + /* Return success/failure. */ + target_bool = ops[0].value; +diff --git a/include/longlong.h b/include/longlong.h +index 64a7b10f9..9f65d6eca 100644 +--- a/include/longlong.h ++++ b/include/longlong.h +@@ -1458,6 +1458,65 @@ extern UDItype __umulsidi3 (USItype, USItype); + #define UDIV_TIME 230 + #endif /* sparc64 */ + ++#if defined(__sw_64) && W_TYPE_SIZE == 64 ++/* There is a bug in g++ before version 5 that ++ errors on __builtin_sw_64_umulh. */ ++#if !defined(__cplusplus) || __GNUC__ >= 5 ++#define umul_ppmm(ph, pl, m0, m1) \ ++ do \ ++ { \ ++ UDItype __m0 = (m0), __m1 = (m1); \ ++ (ph) = __builtin_sw_64_umulh (__m0, __m1); \ ++ (pl) = __m0 * __m1; \ ++ } while (0) ++#define UMUL_TIME 46 ++#endif /* !c++ */ ++#ifndef LONGLONG_STANDALONE ++#define udiv_qrnnd(q, r, n1, n0, d) \ ++ do \ ++ { \ ++ UDItype __r; \ ++ (q) = __udiv_qrnnd (&__r, (n1), (n0), (d)); \ ++ (r) = __r; \ ++ } while (0) ++extern UDItype ++__udiv_qrnnd (UDItype *, UDItype, UDItype, UDItype); ++#define UDIV_TIME 220 ++#endif /* LONGLONG_STANDALONE */ ++#ifdef __sw_64_cix__ ++#define count_leading_zeros(COUNT, X) ((COUNT) = __builtin_clzl (X)) ++#define count_trailing_zeros(COUNT, X) ((COUNT) = __builtin_ctzl (X)) ++#define COUNT_LEADING_ZEROS_0 64 ++#else ++#define count_leading_zeros(COUNT, X) \ ++ do \ ++ { \ ++ UDItype __xr = (X), __t, __a; \ ++ __t = __builtin_sw_64_cmpbge (0, __xr); \ ++ __a = __clz_tab[__t ^ 0xff] - 1; \ ++ __t = __builtin_sw_64_extbl (__xr, __a); \ ++ (COUNT) = 64 - (__clz_tab[__t] + __a * 8); \ ++ } while (0) ++#define count_trailing_zeros(COUNT, X) \ ++ do \ ++ { \ ++ UDItype __xr = (X), __t, __a; \ ++ __t = __builtin_sw_64_cmpbge (0, __xr); \ ++ __t = ~__t & -~__t; \ ++ __a = ((__t & 0xCC) != 0) * 2; \ ++ __a += ((__t & 0xF0) != 0) * 4; \ ++ __a += ((__t & 0xAA) != 0); \ ++ __t = __builtin_sw_64_extbl (__xr, __a); \ ++ __a <<= 3; \ ++ __t &= -__t; \ ++ __a += ((__t & 0xCC) != 0) * 2; \ ++ __a += ((__t & 0xF0) != 0) * 4; \ ++ __a += ((__t & 0xAA) != 0); \ ++ (COUNT) = __a; \ ++ } while (0) ++#endif /* __sw_64_cix__ */ ++#endif /* __sw_64 */ ++ + #if defined (__vax__) && W_TYPE_SIZE == 32 + #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ + __asm__ ("addl2 %5,%1\n\tadwc %3,%0" \ +-- +2.25.1 + diff --git a/0003-CONFIG-Regenerate-configure-file.patch b/0003-CONFIG-Regenerate-configure-file.patch new file mode 100644 index 0000000000000000000000000000000000000000..c31c887035066ed0964d7a8e79f31f2cce44c43d --- /dev/null +++ b/0003-CONFIG-Regenerate-configure-file.patch @@ -0,0 +1,45 @@ +From 37ef787e743d98f9f6e53005d99709fb8e284964 Mon Sep 17 00:00:00 2001 +From: eastb233 +Date: Fri, 14 Jul 2023 11:07:05 +0800 +Subject: [PATCH 03/22] [CONFIG] Regenerate configure file + +Regenerate configure file under libquadmath directory +since it is out of date. +--- + libquadmath/configure | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +diff --git a/libquadmath/configure b/libquadmath/configure +index b3ee64f9c..603f2f131 100755 +--- a/libquadmath/configure ++++ b/libquadmath/configure +@@ -10806,7 +10806,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 10819 "configure" ++#line 10809 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -10912,7 +10912,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 10925 "configure" ++#line 10915 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -13031,7 +13031,7 @@ case "$host" in + case "$enable_cet" in + auto) + # Check if target supports multi-byte NOPs +- # and if assembler supports CET insn. ++ # and if compiler and assembler support CET insn. + cet_save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -fcf-protection" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +-- +2.33.0 + diff --git a/0003-Modify-gas-uleb128-support-test.patch b/0003-Modify-gas-uleb128-support-test.patch new file mode 100644 index 0000000000000000000000000000000000000000..4151ed70748311c8ced344edaa269aebe068b5c7 --- /dev/null +++ b/0003-Modify-gas-uleb128-support-test.patch @@ -0,0 +1,115 @@ +From 38c338555e64da83fd35c608a1a89d738e1ca356 Mon Sep 17 00:00:00 2001 +From: mengqinggang +Date: Fri, 15 Sep 2023 12:04:04 +0800 +Subject: [PATCH 003/188] Modify gas uleb128 support test + +Some assemblers (GNU as for LoongArch) generates relocations for leb128 +symbol arithmetic for relaxation, we need to disable relaxation probing +leb128 support then. + +gcc/ChangeLog: + + * configure: Regenerate. + * configure.ac: Checking assembler for -mno-relax support. + Disable relaxation when probing leb128 support. + +co-authored-by: Xi Ruoyao +--- + gcc/configure | 42 +++++++++++++++++++++++++++++++++++++++++- + gcc/configure.ac | 17 ++++++++++++++++- + 2 files changed, 57 insertions(+), 2 deletions(-) + +diff --git a/gcc/configure b/gcc/configure +index 8ae8a924a..430d44dc3 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -24441,6 +24441,46 @@ _ACEOF + + + ++# Some assemblers (GNU as for LoongArch) generates relocations for ++# leb128 symbol arithmetic for relaxation, we need to disable relaxation ++# probing leb128 support then. ++case $target in ++ loongarch*-*-*) ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for -mno-relax support" >&5 ++$as_echo_n "checking assembler for -mno-relax support... " >&6; } ++if ${gcc_cv_as_mno_relax+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ gcc_cv_as_mno_relax=no ++ if test x$gcc_cv_as != x; then ++ $as_echo '.text' > conftest.s ++ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -mno-relax -o conftest.o conftest.s >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; } ++ then ++ gcc_cv_as_mno_relax=yes ++ else ++ echo "configure: failed program was" >&5 ++ cat conftest.s >&5 ++ fi ++ rm -f conftest.o conftest.s ++ fi ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_mno_relax" >&5 ++$as_echo "$gcc_cv_as_mno_relax" >&6; } ++if test $gcc_cv_as_mno_relax = yes; then ++ check_leb128_asflags=-mno-relax ++fi ++ ++ ;; ++ *) ++ check_leb128_asflags= ++ ;; ++esac ++ + # Check if we have .[us]leb128, and support symbol arithmetic with it. + # Older versions of GAS and some non-GNU assemblers, have a bugs handling + # these directives, even when they appear to accept them. +@@ -24459,7 +24499,7 @@ L1: + L2: + .uleb128 0x8000000000000000 + ' > conftest.s +- if { ac_try='$gcc_cv_as $gcc_cv_as_flags -o conftest.o conftest.s >&5' ++ if { ac_try='$gcc_cv_as $gcc_cv_as_flags $check_leb128_asflags -o conftest.o conftest.s >&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 + (eval $ac_try) 2>&5 + ac_status=$? +diff --git a/gcc/configure.ac b/gcc/configure.ac +index f7161e66e..4b24db190 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -3185,10 +3185,25 @@ AC_MSG_RESULT($gcc_cv_ld_ro_rw_mix) + + gcc_AC_INITFINI_ARRAY + ++# Some assemblers (GNU as for LoongArch) generates relocations for ++# leb128 symbol arithmetic for relaxation, we need to disable relaxation ++# probing leb128 support then. ++case $target in ++ loongarch*-*-*) ++ gcc_GAS_CHECK_FEATURE([-mno-relax support], ++ gcc_cv_as_mno_relax,[-mno-relax],[.text],, ++ [check_leb128_asflags=-mno-relax]) ++ ;; ++ *) ++ check_leb128_asflags= ++ ;; ++esac ++ + # Check if we have .[us]leb128, and support symbol arithmetic with it. + # Older versions of GAS and some non-GNU assemblers, have a bugs handling + # these directives, even when they appear to accept them. +-gcc_GAS_CHECK_FEATURE([.sleb128 and .uleb128], gcc_cv_as_leb128,, ++gcc_GAS_CHECK_FEATURE([.sleb128 and .uleb128], gcc_cv_as_leb128, ++[$check_leb128_asflags], + [ .data + .uleb128 L2 - L1 + L1: +-- +2.43.0 + diff --git a/0003-Sw64-Port-add-multi-prefetch-support-for-sw64.patch b/0003-Sw64-Port-add-multi-prefetch-support-for-sw64.patch new file mode 100644 index 0000000000000000000000000000000000000000..33bf800101a837556452c5e9db39ac8552942d9e --- /dev/null +++ b/0003-Sw64-Port-add-multi-prefetch-support-for-sw64.patch @@ -0,0 +1,528 @@ +From bfa51c2eda3e40fdfd64601f5e7df19049a006cd Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:33:10 +0800 +Subject: [PATCH 03/16] Sw64 Port: add multi-prefetch support for sw64 + +--- + gcc/builtins.cc | 161 +++++++++++++++++++++++++++++++++- + gcc/builtins.def | 2 + + gcc/ipa-pure-const.cc | 2 + + gcc/opt-functions.awk | 4 +- + gcc/params.opt | 12 +++ + gcc/target-insns.def | 3 + + gcc/tree-ssa-loop-prefetch.cc | 155 +++++++++++++++++++++++++++++++- + 7 files changed, 336 insertions(+), 3 deletions(-) + +diff --git a/gcc/builtins.cc b/gcc/builtins.cc +index 57929a42b..c2589f316 100644 +--- a/gcc/builtins.cc ++++ b/gcc/builtins.cc +@@ -109,6 +109,8 @@ static int apply_args_size (void); + static int apply_result_size (void); + static rtx result_vector (int, rtx); + static void expand_builtin_prefetch (tree); ++static void expand_builtin_prefetch_sc (tree); ++static void expand_builtin_prefetch_tc (tree); + static rtx expand_builtin_apply_args (void); + static rtx expand_builtin_apply_args_1 (void); + static rtx expand_builtin_apply (rtx, rtx, rtx); +@@ -1352,6 +1354,156 @@ expand_builtin_prefetch (tree exp) + emit_insn (op0); + } + ++static void ++expand_builtin_prefetch_sc (tree exp) ++{ ++ tree arg0, arg1, arg2; ++ int nargs; ++ rtx op0, op1, op2; ++ ++ if (!validate_arglist (exp, POINTER_TYPE, 0)) ++ return; ++ ++ arg0 = CALL_EXPR_ARG (exp, 0); ++ ++ /* Arguments 1 and 2 are optional; argument 1 (read/write) defaults to ++ * zero (read) and argument 2 (locality) defaults to 3 (high degree of ++ * locality). */ ++ nargs = call_expr_nargs (exp); ++ if (nargs > 1) ++ arg1 = CALL_EXPR_ARG (exp, 1); ++ else ++ arg1 = integer_zero_node; ++ if (nargs > 2) ++ arg2 = CALL_EXPR_ARG (exp, 2); ++ else ++ arg2 = integer_three_node; ++ ++ /* Argument 0 is an address. */ ++ op0 = expand_expr (arg0, NULL_RTX, Pmode, EXPAND_NORMAL); ++ ++ /* Argument 1 (read/write flag) must be a compile-time constant int. */ ++ if (TREE_CODE (arg1) != INTEGER_CST) ++ { ++ error ("second argument to %<__builtin_prefetch_sc%> must be a constant"); ++ arg1 = integer_zero_node; ++ } ++ op1 = expand_normal (arg1); ++ /* Argument 1 must be either zero or one. */ ++ if (INTVAL (op1) != 0 && INTVAL (op1) != 1) ++ { ++ warning (0, "invalid second argument to %<__builtin_prefetch_sc%>;" ++ " using zero"); ++ op1 = const0_rtx; ++ } ++ ++ /* Argument 2 (locality) must be a compile-time constant int. */ ++ if (TREE_CODE (arg2) != INTEGER_CST) ++ { ++ error ("third argument to %<__builtin_prefetch_sc%> must be a constant"); ++ arg2 = integer_zero_node; ++ } ++ op2 = expand_normal (arg2); ++ /* Argument 2 must be 0, 1, 2, or 3. */ ++ if (INTVAL (op2) < 0 || INTVAL (op2) > 3) ++ { ++ warning ( ++ 0, "invalid third argument to %<__builtin_prefetch_sc%>; using zero"); ++ op2 = const0_rtx; ++ } ++ ++ if (targetm.have_prefetch ()) ++ { ++ class expand_operand ops[3]; ++ ++ create_address_operand (&ops[0], op0); ++ create_integer_operand (&ops[1], INTVAL (op1)); ++ create_integer_operand (&ops[2], INTVAL (op2)); ++ if (maybe_expand_insn (targetm.code_for_prefetch_sc, 3, ops)) ++ return; ++ } ++ ++ /* Don't do anything with direct references to volatile memory, but ++ * generate code to handle other side effects. */ ++ if (!MEM_P (op0) && side_effects_p (op0)) ++ emit_insn (op0); ++} ++ ++static void ++expand_builtin_prefetch_tc (tree exp) ++{ ++ tree arg0, arg1, arg2; ++ int nargs; ++ rtx op0, op1, op2; ++ ++ if (!validate_arglist (exp, POINTER_TYPE, 0)) ++ return; ++ ++ arg0 = CALL_EXPR_ARG (exp, 0); ++ ++ /* Arguments 1 and 2 are optional; argument 1 (read/write) defaults to ++ * zero (read) and argument 2 (locality) defaults to 3 (high degree of ++ * locality). */ ++ nargs = call_expr_nargs (exp); ++ if (nargs > 1) ++ arg1 = CALL_EXPR_ARG (exp, 1); ++ else ++ arg1 = integer_zero_node; ++ if (nargs > 2) ++ arg2 = CALL_EXPR_ARG (exp, 2); ++ else ++ arg2 = integer_three_node; ++ ++ /* Argument 0 is an address. */ ++ op0 = expand_expr (arg0, NULL_RTX, Pmode, EXPAND_NORMAL); ++ ++ /* Argument 1 (read/write flag) must be a compile-time constant int. */ ++ if (TREE_CODE (arg1) != INTEGER_CST) ++ { ++ error ("second argument to %<__builtin_prefetch%> must be a constant"); ++ arg1 = integer_zero_node; ++ } ++ op1 = expand_normal (arg1); ++ /* Argument 1 must be either zero or one. */ ++ if (INTVAL (op1) != 0 && INTVAL (op1) != 1) ++ { ++ warning (0, "invalid second argument to %<__builtin_prefetch%>;" ++ " using zero"); ++ op1 = const0_rtx; ++ } ++ ++ /* Argument 2 (locality) must be a compile-time constant int. */ ++ if (TREE_CODE (arg2) != INTEGER_CST) ++ { ++ error ("third argument to %<__builtin_prefetch%> must be a constant"); ++ arg2 = integer_zero_node; ++ } ++ op2 = expand_normal (arg2); ++ /* Argument 2 must be 0, 1, 2, or 3. */ ++ if (INTVAL (op2) < 0 || INTVAL (op2) > 3) ++ { ++ warning (0, ++ "invalid third argument to %<__builtin_prefetch%>; using zero"); ++ op2 = const0_rtx; ++ } ++ ++ if (targetm.have_prefetch ()) ++ { ++ class expand_operand ops[3]; ++ ++ create_address_operand (&ops[0], op0); ++ create_integer_operand (&ops[1], INTVAL (op1)); ++ create_integer_operand (&ops[2], INTVAL (op2)); ++ if (maybe_expand_insn (targetm.code_for_prefetch_tc, 3, ops)) ++ return; ++ } ++ ++ /* Don't do anything with direct references to volatile memory, but ++ * generate code to handle other side effects. */ ++ if (!MEM_P (op0) && side_effects_p (op0)) ++ emit_insn (op0); ++} ++ + /* Get a MEM rtx for expression EXP which is the address of an operand + to be used in a string instruction (cmpstrsi, cpymemsi, ..). LEN is + the maximum length of the block of memory that might be accessed or +@@ -7598,7 +7750,12 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode, + case BUILT_IN_PREFETCH: + expand_builtin_prefetch (exp); + return const0_rtx; +- ++ case BUILT_IN_PREFETCH_SC: ++ expand_builtin_prefetch_sc (exp); ++ return const0_rtx; ++ case BUILT_IN_PREFETCH_TC: ++ expand_builtin_prefetch_tc (exp); ++ return const0_rtx; + case BUILT_IN_INIT_TRAMPOLINE: + return expand_builtin_init_trampoline (exp, true); + case BUILT_IN_INIT_HEAP_TRAMPOLINE: +@@ -10989,6 +11146,8 @@ is_inexpensive_builtin (tree decl) + case BUILT_IN_LABS: + case BUILT_IN_LLABS: + case BUILT_IN_PREFETCH: ++ case BUILT_IN_PREFETCH_SC: ++ case BUILT_IN_PREFETCH_TC: + case BUILT_IN_ACC_ON_DEVICE: + return true; + +diff --git a/gcc/builtins.def b/gcc/builtins.def +index 005976f34..983de293e 100644 +--- a/gcc/builtins.def ++++ b/gcc/builtins.def +@@ -924,6 +924,8 @@ DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTL, "popcountl", BT_FN_INT_ULONG, ATTR_C + DEF_GCC_BUILTIN (BUILT_IN_POPCOUNTLL, "popcountll", BT_FN_INT_ULONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST) + DEF_EXT_LIB_BUILTIN (BUILT_IN_POSIX_MEMALIGN, "posix_memalign", BT_FN_INT_PTRPTR_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF) + DEF_GCC_BUILTIN (BUILT_IN_PREFETCH, "prefetch", BT_FN_VOID_CONST_PTR_VAR, ATTR_NOVOPS_LEAF_LIST) ++DEF_GCC_BUILTIN (BUILT_IN_PREFETCH_SC, "prefetch_sc", BT_FN_VOID_CONST_PTR_VAR, ATTR_NOVOPS_LEAF_LIST) ++DEF_GCC_BUILTIN (BUILT_IN_PREFETCH_TC, "prefetch_tc", BT_FN_VOID_CONST_PTR_VAR, ATTR_NOVOPS_LEAF_LIST) + DEF_LIB_BUILTIN (BUILT_IN_REALLOC, "realloc", BT_FN_PTR_PTR_SIZE, ATTR_ALLOC_WARN_UNUSED_RESULT_SIZE_2_NOTHROW_LEAF_LIST) + DEF_GCC_BUILTIN (BUILT_IN_RETURN, "return", BT_FN_VOID_PTR, ATTR_NORETURN_NOTHROW_LEAF_LIST) + DEF_GCC_BUILTIN (BUILT_IN_RETURN_ADDRESS, "return_address", BT_FN_PTR_UINT, ATTR_LEAF_LIST) +diff --git a/gcc/ipa-pure-const.cc b/gcc/ipa-pure-const.cc +index 2642df91e..89a950966 100644 +--- a/gcc/ipa-pure-const.cc ++++ b/gcc/ipa-pure-const.cc +@@ -534,6 +534,8 @@ builtin_safe_for_const_function_p (bool *looping, tree callee) + *looping = false; + return true; + case BUILT_IN_PREFETCH: ++ case BUILT_IN_PREFETCH_SC: ++ case BUILT_IN_PREFETCH_TC: + *looping = true; + return true; + default: +diff --git a/gcc/opt-functions.awk b/gcc/opt-functions.awk +index 2aee0b9f1..0dabde89d 100644 +--- a/gcc/opt-functions.awk ++++ b/gcc/opt-functions.awk +@@ -247,6 +247,8 @@ function var_type(flags) + return "HOST_WIDE_INT " + else if (flag_set_p("UInteger", flags)) + return "int " ++ else if (flag_set_p("UInteger", flags)) ++ return "int " + else + return "const char *" + } +@@ -256,7 +258,7 @@ function var_type(flags) + # type instead of int to save space. + function var_type_struct(flags) + { +- if (flag_set_p("UInteger", flags)) { ++ if (flag_set_p("UInteger", flags)) { + if (host_wide_int[var_name(flags)] == "yes") + return "HOST_WIDE_INT "; + if (flag_set_p("ByteSize", flags)) +diff --git a/gcc/params.opt b/gcc/params.opt +index 3ddfaf5b2..5abc8ce82 100644 +--- a/gcc/params.opt ++++ b/gcc/params.opt +@@ -385,6 +385,18 @@ The size of L1 cache. + Common Joined UInteger Var(param_l2_cache_size) Init(512) Param Optimization + The size of L2 cache. + ++-param=pf1= ++Common Joined UInteger Var(PF1) Init(0) IntegerRange(0,200) Param Optimization ++The number of Cache lines add to L1 prefetch delta. ++ ++-param=pf2= ++Common Joined UInteger Var(PF2) Init(0) IntegerRange(0,200) Param Optimization ++The number of Cache lines add to L2 prefetch delta. ++ ++-param=pf3= ++Common Joined UInteger Var(PF3) Init(0) IntegerRange(0,200) Param Optimization ++The number of Cache lines add to L3 prefetch delta. ++ + -param=large-function-growth= + Common Joined UInteger Var(param_large_function_growth) Optimization Init(100) Param + Maximal growth due to inlining of large function (in percent). +diff --git a/gcc/target-insns.def b/gcc/target-insns.def +index de8c0092f..8b4da8bc4 100644 +--- a/gcc/target-insns.def ++++ b/gcc/target-insns.def +@@ -77,6 +77,9 @@ DEF_TARGET_INSN (omp_simt_vote_any, (rtx x0, rtx x1)) + DEF_TARGET_INSN (omp_simt_xchg_bfly, (rtx x0, rtx x1, rtx x2)) + DEF_TARGET_INSN (omp_simt_xchg_idx, (rtx x0, rtx x1, rtx x2)) + DEF_TARGET_INSN (prefetch, (rtx x0, rtx x1, rtx x2)) ++DEF_TARGET_INSN (prefetch_sc, (rtx x0, rtx x1, rtx x2)) ++DEF_TARGET_INSN (prefetch_tc, (rtx x0, rtx x1, rtx x2)) ++/*********************/ + DEF_TARGET_INSN (probe_stack, (rtx x0)) + DEF_TARGET_INSN (probe_stack_address, (rtx x0)) + DEF_TARGET_INSN (prologue, (void)) +diff --git a/gcc/tree-ssa-loop-prefetch.cc b/gcc/tree-ssa-loop-prefetch.cc +index aebd7c920..6aa242260 100644 +--- a/gcc/tree-ssa-loop-prefetch.cc ++++ b/gcc/tree-ssa-loop-prefetch.cc +@@ -193,6 +193,9 @@ along with GCC; see the file COPYING3. If not see + #define L1_CACHE_SIZE_BYTES ((unsigned) (param_l1_cache_size * 1024)) + #define L2_CACHE_SIZE_BYTES ((unsigned) (param_l2_cache_size * 1024)) + ++#ifdef FLAG_SW64_PREFETCH ++#define L1_CACHE_LINE_SIZE ((unsigned) (param_l1_cache_line_size)) ++#endif + /* We consider a memory access nontemporal if it is not reused sooner than + after L2_CACHE_SIZE_BYTES of memory are accessed. However, we ignore + accesses closer than L1_CACHE_SIZE_BYTES / NONTEMPORAL_FRACTION, +@@ -1057,7 +1060,11 @@ schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor, + + /* At most param_simultaneous_prefetches should be running + at the same time. */ ++#ifdef FLAG_SW64_PREFETCH ++ remaining_prefetch_slots = param_simultaneous_prefetches * 5; ++#else + remaining_prefetch_slots = param_simultaneous_prefetches; ++#endif + + /* The prefetch will run for AHEAD iterations of the original loop, i.e., + AHEAD / UNROLL_FACTOR iterations of the unrolled loop. In each iteration, +@@ -1081,8 +1088,10 @@ schedule_prefetches (struct mem_ref_group *groups, unsigned unroll_factor, + /* The loop is far from being sufficiently unrolled for this + prefetch. Do not generate prefetch to avoid many redudant + prefetches. */ +- if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO) ++#ifndef FLAG_SW64_PREFETCH ++ if (ref->prefetch_mod / unroll_factor > PREFETCH_MOD_TO_UNROLL_FACTOR_RATIO) + continue; ++#endif + + /* If we need to prefetch the reference each PREFETCH_MOD iterations, + and we unroll the loop UNROLL_FACTOR times, we need to insert +@@ -1153,6 +1162,19 @@ estimate_prefetch_count (struct mem_ref_group *groups, unsigned unroll_factor) + return prefetch_count; + } + ++#ifdef FLAG_SW64_PREFETCH ++/*Due to the need for SW to dynamically adjust the value of PF during ++ * prefetching,PF needs to handle negative values.However ,since Common Joined ++ * UInteger Var(PFX) is used, the function needs to convert unsigned (0-200) to ++ * (-100,100)*/ ++int ++convert_default_to_sw (unsigned int pf_value) ++{ ++ if (pf_value > 100) ++ return 100 - (int) pf_value; ++ return pf_value; ++} ++#endif + /* Issue prefetches for the reference REF into loop as decided before. + HEAD is the number of iterations to prefetch ahead. UNROLL_FACTOR + is the factor by which LOOP was unrolled. */ +@@ -1184,11 +1206,21 @@ issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead) + + for (ap = 0; ap < n_prefetches; ap++) + { ++#ifdef FLAG_SW64_PREFETCH ++ if (flag_sw_prefetch_dc == 1) ++ { ++#endif + if (cst_and_fits_in_hwi (ref->group->step)) + { + /* Determine the address to prefetch. */ ++#ifdef FLAG_SW64_PREFETCH ++ delta = (ahead + ap * ref->prefetch_mod) * ++ int_cst_value (ref->group->step) * 2 ++ + convert_default_to_sw (PF1) * L1_CACHE_LINE_SIZE; ++#else + delta = (ahead + ap * ref->prefetch_mod) * + int_cst_value (ref->group->step); ++#endif + addr = fold_build_pointer_plus_hwi (addr_base, delta); + addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, + NULL, true, GSI_SAME_STMT); +@@ -1220,6 +1252,86 @@ issue_prefetch_ref (struct mem_ref *ref, unsigned unroll_factor, unsigned ahead) + prefetch = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH), + 3, addr, write_p, local); + gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT); ++#ifdef FLAG_SW64_PREFETCH ++ } ++ /* Generate L2 prefetch if the option is open. */ ++ if (flag_sw_prefetch_sc == 1) ++ { ++ if (cst_and_fits_in_hwi (ref->group->step)) ++ { ++ delta = (ahead + ap * ref->prefetch_mod) * ++ int_cst_value (ref->group->step) * 2 ++ + (4 + convert_default_to_sw (PF2)) * L1_CACHE_LINE_SIZE; ++ ++ addr = fold_build_pointer_plus_hwi (addr_base, delta); ++ addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, ++ NULL, true, GSI_SAME_STMT); ++ } ++ else ++ { ++ ahead += (unsigned) (convert_default_to_sw (PF2) ++ - convert_default_to_sw (PF1)); ++ forward = fold_build2 (MULT_EXPR, sizetype, ++ fold_convert (sizetype, ref->group->step), ++ fold_convert (sizetype, size_int (ahead))); ++ addr = fold_build_pointer_plus (addr_base, forward); ++ addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, ++ NULL, true, GSI_SAME_STMT); ++ } ++ ++ if (addr_base != addr && TREE_CODE (addr_base) == SSA_NAME ++ && TREE_CODE (addr) == SSA_NAME) ++ { ++ duplicate_ssa_name_ptr_info (addr, SSA_NAME_PTR_INFO (addr_base)); ++ if (SSA_NAME_PTR_INFO (addr)) ++ mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr)); ++ } ++ ++ /* Create the L2 prefetch instruction. */ ++ prefetch ++ = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH_SC), ++ 3, addr, write_p, local); ++ gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT); ++ } ++ /* Generate L3 prefetch if the option is open. */ ++ if (flag_sw_prefetch_tc == 1) ++ { ++ if (cst_and_fits_in_hwi (ref->group->step)) ++ { ++ delta = (ahead + ap * ref->prefetch_mod) * ++ int_cst_value (ref->group->step) * 2 ++ + (10 + convert_default_to_sw (PF3)) * L1_CACHE_LINE_SIZE; ++ ++ addr = fold_build_pointer_plus_hwi (addr_base, delta); ++ addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, ++ NULL, true, GSI_SAME_STMT); ++ } ++ else ++ { ++ ahead += (unsigned) (convert_default_to_sw (PF3) ++ - convert_default_to_sw (PF1)); ++ forward = fold_build2 (MULT_EXPR, sizetype, ++ fold_convert (sizetype, ref->group->step), ++ fold_convert (sizetype, size_int (ahead))); ++ addr = fold_build_pointer_plus (addr_base, forward); ++ addr = force_gimple_operand_gsi (&bsi, unshare_expr (addr), true, ++ NULL, true, GSI_SAME_STMT); ++ } ++ ++ if (addr_base != addr && TREE_CODE (addr_base) == SSA_NAME ++ && TREE_CODE (addr) == SSA_NAME) ++ { ++ duplicate_ssa_name_ptr_info (addr, SSA_NAME_PTR_INFO (addr_base)); ++ if (SSA_NAME_PTR_INFO (addr)) ++ mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (addr)); ++ } ++ /* Create the L3 prefetch instruction. */ ++ prefetch ++ = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH_TC), ++ 3, addr, write_p, local); ++ gsi_insert_before (&bsi, prefetch, GSI_SAME_STMT); ++ } ++#endif + } + } + +@@ -1375,9 +1487,22 @@ should_unroll_loop_p (class loop *loop, class tree_niter_desc *desc, + as well; but the unrolling/prefetching is usually more profitable for + loops consisting of a single basic block, and we want to limit the + code growth. */ ++#ifdef FLAG_SW64_PREFETCH ++ if (flag_sw_prefetch_unroll == 1) ++ { ++ if (loop->num_nodes > 7) ++ return false; ++ } ++ else ++ { ++ if (loop->num_nodes > 2) ++ return false; ++ } ++#else + if (loop->num_nodes > 2) + return false; + ++#endif + return true; + } + +@@ -1422,6 +1547,12 @@ determine_unroll_factor (class loop *loop, struct mem_ref_group *refs, + if (should_issue_prefetch_p (ref)) + { + mod_constraint = ref->prefetch_mod; ++#ifdef FLAG_SW64_PREFETCH ++ /* TODO: mod_constraint is set to 4 by experience, but we should do it ++ * with precision. */ ++ if (mod_constraint > upper_bound) ++ mod_constraint = 4; ++#endif + nfactor = least_common_multiple (mod_constraint, factor); + if (nfactor <= upper_bound) + factor = nfactor; +@@ -2022,6 +2153,28 @@ tree_ssa_prefetch_arrays (void) + DECL_IS_NOVOPS (decl) = true; + set_builtin_decl (BUILT_IN_PREFETCH, decl, false); + } ++#ifdef FLAG_SW64_PREFETCH ++ if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH_SC)) ++ { ++ tree type = build_function_type_list (void_type_node, const_ptr_type_node, ++ NULL_TREE); ++ tree decl = add_builtin_function ("__builtin_prefetch_sc", type, ++ BUILT_IN_PREFETCH_SC, BUILT_IN_NORMAL, ++ NULL, NULL_TREE); ++ DECL_IS_NOVOPS (decl) = true; ++ set_builtin_decl (BUILT_IN_PREFETCH_SC, decl, false); ++ } ++ if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH_TC)) ++ { ++ tree type = build_function_type_list (void_type_node, const_ptr_type_node, ++ NULL_TREE); ++ tree decl = add_builtin_function ("__builtin_prefetch_tc", type, ++ BUILT_IN_PREFETCH_TC, BUILT_IN_NORMAL, ++ NULL, NULL_TREE); ++ DECL_IS_NOVOPS (decl) = true; ++ set_builtin_decl (BUILT_IN_PREFETCH_TC, decl, false); ++ } ++#endif + + for (auto loop : loops_list (cfun, LI_FROM_INNERMOST)) + { +-- +2.25.1 + diff --git a/0004-LoongArch-Optimizations-of-vector-construction.patch b/0004-LoongArch-Optimizations-of-vector-construction.patch new file mode 100644 index 0000000000000000000000000000000000000000..6a2c0226828b548bfa4b001d7a63317cb5d7f9e5 --- /dev/null +++ b/0004-LoongArch-Optimizations-of-vector-construction.patch @@ -0,0 +1,1310 @@ +From b74895b8b723a64bc136c4c560661abed81e013a Mon Sep 17 00:00:00 2001 +From: Guo Jie +Date: Thu, 21 Sep 2023 09:19:18 +0800 +Subject: [PATCH 004/188] LoongArch: Optimizations of vector construction. + +gcc/ChangeLog: + + * config/loongarch/lasx.md (lasx_vecinit_merge_): New + pattern for vector construction. + (vec_set_internal): Ditto. + (lasx_xvinsgr2vr__internal): Ditto. + (lasx_xvilvl__internal): Ditto. + * config/loongarch/loongarch.cc (loongarch_expand_vector_init): + Optimized the implementation of vector construction. + (loongarch_expand_vector_init_same): New function. + * config/loongarch/lsx.md (lsx_vilvl__internal): New + pattern for vector construction. + (lsx_vreplvei_mirror_): New pattern for vector + construction. + (vec_concatv2df): Ditto. + (vec_concatv4sf): Ditto. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-vec-construct-opt.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vec-construct-opt.c: New test. +--- + gcc/config/loongarch/lasx.md | 69 ++ + gcc/config/loongarch/loongarch.cc | 716 +++++++++--------- + gcc/config/loongarch/lsx.md | 134 ++++ + .../vector/lasx/lasx-vec-construct-opt.c | 102 +++ + .../vector/lsx/lsx-vec-construct-opt.c | 85 +++ + 5 files changed, 732 insertions(+), 374 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-construct-opt.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vec-construct-opt.c + +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +index 8111c8bb7..2bc5d47ed 100644 +--- a/gcc/config/loongarch/lasx.md ++++ b/gcc/config/loongarch/lasx.md +@@ -186,6 +186,9 @@ + UNSPEC_LASX_XVLDI + UNSPEC_LASX_XVLDX + UNSPEC_LASX_XVSTX ++ UNSPEC_LASX_VECINIT_MERGE ++ UNSPEC_LASX_VEC_SET_INTERNAL ++ UNSPEC_LASX_XVILVL_INTERNAL + ]) + + ;; All vector modes with 256 bits. +@@ -255,6 +258,15 @@ + [(V8SF "V4SF") + (V4DF "V2DF")]) + ++;; The attribute gives half int/float modes for vector modes. ++(define_mode_attr VHMODE256_ALL ++ [(V32QI "V16QI") ++ (V16HI "V8HI") ++ (V8SI "V4SI") ++ (V4DI "V2DI") ++ (V8SF "V4SF") ++ (V4DF "V2DF")]) ++ + ;; The attribute gives double modes for vector modes in LASX. + (define_mode_attr VDMODE256 + [(V8SI "V4DI") +@@ -312,6 +324,11 @@ + (V4DI "v4df") + (V8SI "v8sf")]) + ++;; This attribute gives V32QI mode and V16HI mode with half size. ++(define_mode_attr mode256_i_half ++ [(V32QI "v16qi") ++ (V16HI "v8hi")]) ++ + ;; This attribute gives suffix for LASX instructions. HOW? + (define_mode_attr lasxfmt + [(V4DF "d") +@@ -756,6 +773,20 @@ + [(set_attr "type" "simd_splat") + (set_attr "mode" "")]) + ++;; Only for loongarch_expand_vector_init in loongarch.cc. ++;; Support a LSX-mode input op2. ++(define_insn "lasx_vecinit_merge_" ++ [(set (match_operand:LASX 0 "register_operand" "=f") ++ (unspec:LASX ++ [(match_operand:LASX 1 "register_operand" "0") ++ (match_operand: 2 "register_operand" "f") ++ (match_operand 3 "const_uimm8_operand")] ++ UNSPEC_LASX_VECINIT_MERGE))] ++ "ISA_HAS_LASX" ++ "xvpermi.q\t%u0,%u2,%3" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ + (define_insn "lasx_xvpickve2gr_d" + [(set (match_operand:DI 0 "register_operand" "=r") + (any_extend:DI +@@ -779,6 +810,33 @@ + DONE; + }) + ++;; Only for loongarch_expand_vector_init in loongarch.cc. ++;; Simulate missing instructions xvinsgr2vr.b and xvinsgr2vr.h. ++(define_expand "vec_set_internal" ++ [(match_operand:ILASX_HB 0 "register_operand") ++ (match_operand: 1 "reg_or_0_operand") ++ (match_operand 2 "const__operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx index = GEN_INT (1 << INTVAL (operands[2])); ++ emit_insn (gen_lasx_xvinsgr2vr__internal ++ (operands[0], operands[1], operands[0], index)); ++ DONE; ++}) ++ ++(define_insn "lasx_xvinsgr2vr__internal" ++ [(set (match_operand:ILASX_HB 0 "register_operand" "=f") ++ (unspec:ILASX_HB [(match_operand: 1 "reg_or_0_operand" "rJ") ++ (match_operand:ILASX_HB 2 "register_operand" "0") ++ (match_operand 3 "const__operand" "")] ++ UNSPEC_LASX_VEC_SET_INTERNAL))] ++ "ISA_HAS_LASX" ++{ ++ return "vinsgr2vr.\t%w0,%z1,%y3"; ++} ++ [(set_attr "type" "simd_insert") ++ (set_attr "mode" "")]) ++ + (define_expand "vec_set" + [(match_operand:FLASX 0 "register_operand") + (match_operand: 1 "reg_or_0_operand") +@@ -1567,6 +1625,17 @@ + [(set_attr "type" "simd_flog2") + (set_attr "mode" "")]) + ++;; Only for loongarch_expand_vector_init in loongarch.cc. ++;; Merge two scalar floating-point op1 and op2 into a LASX op0. ++(define_insn "lasx_xvilvl__internal" ++ [(set (match_operand:FLASX 0 "register_operand" "=f") ++ (unspec:FLASX [(match_operand: 1 "register_operand" "f") ++ (match_operand: 2 "register_operand" "f")] ++ UNSPEC_LASX_XVILVL_INTERNAL))] ++ "ISA_HAS_LASX" ++ "xvilvl.\t%u0,%u2,%u1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) + + (define_insn "smax3" + [(set (match_operand:FLASX 0 "register_operand" "=f") +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index f2e796a6b..760b12268 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -10193,300 +10193,344 @@ loongarch_expand_vector_group_init (rtx target, rtx vals) + ops[1]))); + } + ++/* Expand initialization of a vector which has all same elements. */ ++ + void +-loongarch_expand_vector_init (rtx target, rtx vals) ++loongarch_expand_vector_init_same (rtx target, rtx vals, unsigned nvar) + { + machine_mode vmode = GET_MODE (target); + machine_mode imode = GET_MODE_INNER (vmode); +- unsigned i, nelt = GET_MODE_NUNITS (vmode); +- unsigned nvar = 0; +- bool all_same = true; +- rtx x; ++ rtx same = XVECEXP (vals, 0, 0); ++ rtx temp, temp2; + +- for (i = 0; i < nelt; ++i) ++ if (CONST_INT_P (same) && nvar == 0 ++ && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) ++ { ++ switch (vmode) ++ { ++ case E_V32QImode: ++ case E_V16HImode: ++ case E_V8SImode: ++ case E_V4DImode: ++ case E_V16QImode: ++ case E_V8HImode: ++ case E_V4SImode: ++ case E_V2DImode: ++ temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); ++ emit_move_insn (target, temp); ++ return; ++ default: ++ gcc_unreachable (); ++ } ++ } ++ temp = gen_reg_rtx (imode); ++ if (imode == GET_MODE (same)) ++ temp2 = same; ++ else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) + { +- x = XVECEXP (vals, 0, i); +- if (!loongarch_constant_elt_p (x)) +- nvar++; +- if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0))) +- all_same = false; ++ if (GET_CODE (same) == MEM) ++ { ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = simplify_gen_subreg (imode, reg_tmp, GET_MODE (reg_tmp), 0); ++ } ++ else ++ temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0); + } +- +- if (ISA_HAS_LASX && GET_MODE_SIZE (vmode) == 32) ++ else + { +- if (all_same) ++ if (GET_CODE (same) == MEM) + { +- rtx same = XVECEXP (vals, 0, 0); +- rtx temp, temp2; ++ rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); ++ loongarch_emit_move (reg_tmp, same); ++ temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp)); ++ } ++ else ++ temp2 = lowpart_subreg (imode, same, GET_MODE (same)); ++ } ++ emit_move_insn (temp, temp2); + +- if (CONST_INT_P (same) && nvar == 0 +- && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) +- { +- switch (vmode) +- { +- case E_V32QImode: +- case E_V16HImode: +- case E_V8SImode: +- case E_V4DImode: +- temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); +- emit_move_insn (target, temp); +- return; ++ switch (vmode) ++ { ++ case E_V32QImode: ++ case E_V16HImode: ++ case E_V8SImode: ++ case E_V4DImode: ++ case E_V16QImode: ++ case E_V8HImode: ++ case E_V4SImode: ++ case E_V2DImode: ++ loongarch_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp)); ++ break; + +- default: +- gcc_unreachable (); +- } +- } ++ case E_V8SFmode: ++ emit_insn (gen_lasx_xvreplve0_w_f_scalar (target, temp)); ++ break; + +- temp = gen_reg_rtx (imode); +- if (imode == GET_MODE (same)) +- temp2 = same; +- else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) +- { +- if (GET_CODE (same) == MEM) +- { +- rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); +- loongarch_emit_move (reg_tmp, same); +- temp2 = simplify_gen_subreg (imode, reg_tmp, +- GET_MODE (reg_tmp), 0); +- } +- else +- temp2 = simplify_gen_subreg (imode, same, +- GET_MODE (same), 0); +- } +- else +- { +- if (GET_CODE (same) == MEM) +- { +- rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); +- loongarch_emit_move (reg_tmp, same); +- temp2 = lowpart_subreg (imode, reg_tmp, +- GET_MODE (reg_tmp)); +- } +- else +- temp2 = lowpart_subreg (imode, same, GET_MODE (same)); +- } +- emit_move_insn (temp, temp2); ++ case E_V4DFmode: ++ emit_insn (gen_lasx_xvreplve0_d_f_scalar (target, temp)); ++ break; + +- switch (vmode) +- { +- case E_V32QImode: +- case E_V16HImode: +- case E_V8SImode: +- case E_V4DImode: +- loongarch_emit_move (target, +- gen_rtx_VEC_DUPLICATE (vmode, temp)); +- break; ++ case E_V4SFmode: ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (target, temp)); ++ break; + +- case E_V8SFmode: +- emit_insn (gen_lasx_xvreplve0_w_f_scalar (target, temp)); +- break; ++ case E_V2DFmode: ++ emit_insn (gen_lsx_vreplvei_d_f_scalar (target, temp)); ++ break; + +- case E_V4DFmode: +- emit_insn (gen_lasx_xvreplve0_d_f_scalar (target, temp)); +- break; ++ default: ++ gcc_unreachable (); ++ } ++} + +- default: +- gcc_unreachable (); +- } +- } +- else +- { +- rtvec vec = shallow_copy_rtvec (XVEC (vals, 0)); ++/* Expand a vector initialization. */ + +- for (i = 0; i < nelt; ++i) +- RTVEC_ELT (vec, i) = CONST0_RTX (imode); ++void ++loongarch_expand_vector_init (rtx target, rtx vals) ++{ ++ machine_mode vmode = GET_MODE (target); ++ machine_mode imode = GET_MODE_INNER (vmode); ++ unsigned i, nelt = GET_MODE_NUNITS (vmode); ++ /* VALS is divided into high and low half-part. */ ++ /* Number of non constant elements in corresponding parts of VALS. */ ++ unsigned nvar = 0, hi_nvar = 0, lo_nvar = 0; ++ /* all_same : true if all elements of VALS are the same. ++ hi_same : true if all elements of the high half-part are the same. ++ lo_same : true if all elements of the low half-part are the same. ++ half_same : true if the high half-part is the same as the low one. */ ++ bool all_same = false, hi_same = true, lo_same = true, half_same = true; ++ rtx val[32], val_hi[32], val_lo[16]; ++ rtx x, op0, op1; ++ /* Copy one element of vals to per element of target vector. */ ++ typedef rtx (*loongarch_vec_repl1_fn) (rtx, rtx); ++ /* Copy two elements of vals to target vector. */ ++ typedef rtx (*loongarch_vec_repl2_fn) (rtx, rtx, rtx); ++ /* Insert scalar operands into the specified position of the vector. */ ++ typedef rtx (*loongarch_vec_set_fn) (rtx, rtx, rtx); ++ /* Copy 64bit lowpart to highpart. */ ++ typedef rtx (*loongarch_vec_mirror_fn) (rtx, rtx, rtx); ++ /* Merge lowpart and highpart into target. */ ++ typedef rtx (*loongarch_vec_merge_fn) (rtx, rtx, rtx, rtx); ++ ++ loongarch_vec_repl1_fn loongarch_vec_repl1_128 = NULL, ++ loongarch_vec_repl1_256 = NULL; ++ loongarch_vec_repl2_fn loongarch_vec_repl2_128 = NULL, ++ loongarch_vec_repl2_256 = NULL; ++ loongarch_vec_set_fn loongarch_vec_set128 = NULL, loongarch_vec_set256 = NULL; ++ loongarch_vec_mirror_fn loongarch_vec_mirror = NULL; ++ loongarch_vec_merge_fn loongarch_lasx_vecinit_merge = NULL; ++ machine_mode half_mode = VOIDmode; ++ ++ /* Check whether elements of each part are the same. */ ++ for (i = 0; i < nelt / 2; ++i) ++ { ++ val_hi[i] = val_hi[i + nelt / 2] = val[i + nelt / 2] ++ = XVECEXP (vals, 0, i + nelt / 2); ++ val_lo[i] = val[i] = XVECEXP (vals, 0, i); ++ if (!loongarch_constant_elt_p (val_hi[i])) ++ hi_nvar++; ++ if (!loongarch_constant_elt_p (val_lo[i])) ++ lo_nvar++; ++ if (i > 0 && !rtx_equal_p (val_hi[i], val_hi[0])) ++ hi_same = false; ++ if (i > 0 && !rtx_equal_p (val_lo[i], val_lo[0])) ++ lo_same = false; ++ if (!rtx_equal_p (val_hi[i], val_lo[i])) ++ half_same = false; ++ } ++ ++ /* If all elements are the same, set all_same true. */ ++ if (hi_same && lo_same && half_same) ++ all_same = true; ++ ++ nvar = hi_nvar + lo_nvar; + +- emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec)); ++ switch (vmode) ++ { ++ case E_V32QImode: ++ half_mode = E_V16QImode; ++ loongarch_vec_set256 = gen_vec_setv32qi_internal; ++ loongarch_vec_repl1_256 = gen_lasx_xvreplgr2vr_b; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v32qi : gen_lasx_vecinit_merge_v32qi; ++ /* FALLTHRU. */ ++ case E_V16QImode: ++ loongarch_vec_set128 = gen_vec_setv16qi; ++ loongarch_vec_repl1_128 = gen_lsx_vreplgr2vr_b; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_b; ++ break; + +- machine_mode half_mode = VOIDmode; +- rtx target_hi, target_lo; ++ case E_V16HImode: ++ half_mode = E_V8HImode; ++ loongarch_vec_set256 = gen_vec_setv16hi_internal; ++ loongarch_vec_repl1_256 = gen_lasx_xvreplgr2vr_h; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v16hi : gen_lasx_vecinit_merge_v16hi; ++ /* FALLTHRU. */ ++ case E_V8HImode: ++ loongarch_vec_set128 = gen_vec_setv8hi; ++ loongarch_vec_repl1_128 = gen_lsx_vreplgr2vr_h; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_h; ++ break; + +- switch (vmode) +- { +- case E_V32QImode: +- half_mode=E_V16QImode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) +- { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); +- if (i == 0) +- { +- emit_insn (gen_lsx_vreplvei_b_scalar (target_hi, +- temp_hi)); +- emit_insn (gen_lsx_vreplvei_b_scalar (target_lo, +- temp_lo)); +- } +- else +- { +- emit_insn (gen_vec_setv16qi (target_hi, temp_hi, +- GEN_INT (i))); +- emit_insn (gen_vec_setv16qi (target_lo, temp_lo, +- GEN_INT (i))); +- } +- } +- emit_insn (gen_rtx_SET (target, +- gen_rtx_VEC_CONCAT (vmode, target_hi, +- target_lo))); +- break; ++ case E_V8SImode: ++ half_mode = V4SImode; ++ loongarch_vec_set256 = gen_vec_setv8si; ++ loongarch_vec_repl1_256 = gen_lasx_xvreplgr2vr_w; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v8si : gen_lasx_vecinit_merge_v8si; ++ /* FALLTHRU. */ ++ case E_V4SImode: ++ loongarch_vec_set128 = gen_vec_setv4si; ++ loongarch_vec_repl1_128 = gen_lsx_vreplgr2vr_w; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_w; ++ break; + +- case E_V16HImode: +- half_mode=E_V8HImode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) +- { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); +- if (i == 0) +- { +- emit_insn (gen_lsx_vreplvei_h_scalar (target_hi, +- temp_hi)); +- emit_insn (gen_lsx_vreplvei_h_scalar (target_lo, +- temp_lo)); +- } +- else +- { +- emit_insn (gen_vec_setv8hi (target_hi, temp_hi, +- GEN_INT (i))); +- emit_insn (gen_vec_setv8hi (target_lo, temp_lo, +- GEN_INT (i))); +- } +- } +- emit_insn (gen_rtx_SET (target, +- gen_rtx_VEC_CONCAT (vmode, target_hi, +- target_lo))); +- break; ++ case E_V4DImode: ++ half_mode = E_V2DImode; ++ loongarch_vec_set256 = gen_vec_setv4di; ++ loongarch_vec_repl1_256 = gen_lasx_xvreplgr2vr_d; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v4di : gen_lasx_vecinit_merge_v4di; ++ /* FALLTHRU. */ ++ case E_V2DImode: ++ loongarch_vec_set128 = gen_vec_setv2di; ++ loongarch_vec_repl1_128 = gen_lsx_vreplgr2vr_d; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_d; ++ break; + +- case E_V8SImode: +- half_mode=V4SImode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) +- { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); +- if (i == 0) +- { +- emit_insn (gen_lsx_vreplvei_w_scalar (target_hi, +- temp_hi)); +- emit_insn (gen_lsx_vreplvei_w_scalar (target_lo, +- temp_lo)); +- } +- else +- { +- emit_insn (gen_vec_setv4si (target_hi, temp_hi, +- GEN_INT (i))); +- emit_insn (gen_vec_setv4si (target_lo, temp_lo, +- GEN_INT (i))); +- } +- } +- emit_insn (gen_rtx_SET (target, +- gen_rtx_VEC_CONCAT (vmode, target_hi, +- target_lo))); +- break; ++ case E_V8SFmode: ++ half_mode = E_V4SFmode; ++ loongarch_vec_set256 = gen_vec_setv8sf; ++ loongarch_vec_repl1_128 = gen_lsx_vreplvei_w_f_scalar; ++ loongarch_vec_repl2_256 = gen_lasx_xvilvl_w_f_internal; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v8sf : gen_lasx_vecinit_merge_v8sf; ++ /* FALLTHRU. */ ++ case E_V4SFmode: ++ loongarch_vec_set128 = gen_vec_setv4sf; ++ loongarch_vec_repl2_128 = gen_lsx_vilvl_w_f_internal; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_w_f; ++ break; + +- case E_V4DImode: +- half_mode=E_V2DImode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) +- { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); +- if (i == 0) +- { +- emit_insn (gen_lsx_vreplvei_d_scalar (target_hi, +- temp_hi)); +- emit_insn (gen_lsx_vreplvei_d_scalar (target_lo, +- temp_lo)); +- } +- else +- { +- emit_insn (gen_vec_setv2di (target_hi, temp_hi, +- GEN_INT (i))); +- emit_insn (gen_vec_setv2di (target_lo, temp_lo, +- GEN_INT (i))); +- } +- } +- emit_insn (gen_rtx_SET (target, +- gen_rtx_VEC_CONCAT (vmode, target_hi, +- target_lo))); +- break; ++ case E_V4DFmode: ++ half_mode = E_V2DFmode; ++ loongarch_vec_set256 = gen_vec_setv4df; ++ loongarch_vec_repl1_128 = gen_lsx_vreplvei_d_f_scalar; ++ loongarch_vec_repl2_256 = gen_lasx_xvilvl_d_f_internal; ++ loongarch_lasx_vecinit_merge ++ = half_same ? gen_lasx_xvpermi_q_v4df : gen_lasx_vecinit_merge_v4df; ++ /* FALLTHRU. */ ++ case E_V2DFmode: ++ loongarch_vec_set128 = gen_vec_setv2df; ++ loongarch_vec_repl2_128 = gen_lsx_vilvl_d_f_internal; ++ loongarch_vec_mirror = gen_lsx_vreplvei_mirror_d_f; ++ break; + +- case E_V8SFmode: +- half_mode=E_V4SFmode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) ++ default: ++ gcc_unreachable (); ++ } ++ ++ if (ISA_HAS_LASX && GET_MODE_SIZE (vmode) == 32) ++ { ++ /* If all elements are the same, just do a broadcost. */ ++ if (all_same) ++ loongarch_expand_vector_init_same (target, vals, nvar); ++ else ++ { ++ gcc_assert (nelt >= 4); ++ ++ rtx target_hi, target_lo; ++ /* Write elements of high half-part in target directly. */ ++ target_hi = target; ++ target_lo = gen_reg_rtx (half_mode); ++ ++ /* If all elements of high half-part are the same, ++ just do a broadcost. Also applicable to low half-part. */ ++ if (hi_same) ++ { ++ rtx vtmp = gen_rtx_PARALLEL (vmode, gen_rtvec_v (nelt, val_hi)); ++ loongarch_expand_vector_init_same (target_hi, vtmp, hi_nvar); ++ } ++ if (lo_same) ++ { ++ rtx vtmp ++ = gen_rtx_PARALLEL (half_mode, gen_rtvec_v (nelt / 2, val_lo)); ++ loongarch_expand_vector_init_same (target_lo, vtmp, lo_nvar); ++ } ++ ++ for (i = 0; i < nelt / 2; ++i) ++ { ++ if (!hi_same) + { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); +- if (i == 0) ++ if (vmode == E_V8SFmode || vmode == E_V4DFmode) + { +- emit_insn (gen_lsx_vreplvei_w_f_scalar (target_hi, +- temp_hi)); +- emit_insn (gen_lsx_vreplvei_w_f_scalar (target_lo, +- temp_lo)); ++ /* Using xvilvl to load lowest 2 elements simultaneously ++ to reduce the number of instructions. */ ++ if (i == 1) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_hi[0]); ++ op1 = gen_reg_rtx (imode); ++ emit_move_insn (op1, val_hi[1]); ++ emit_insn ( ++ loongarch_vec_repl2_256 (target_hi, op0, op1)); ++ } ++ else if (i > 1) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_hi[i]); ++ emit_insn ( ++ loongarch_vec_set256 (target_hi, op0, GEN_INT (i))); ++ } + } + else + { +- emit_insn (gen_vec_setv4sf (target_hi, temp_hi, +- GEN_INT (i))); +- emit_insn (gen_vec_setv4sf (target_lo, temp_lo, +- GEN_INT (i))); ++ /* Assign the lowest element of val_hi to all elements ++ of target_hi. */ ++ if (i == 0) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_hi[0]); ++ emit_insn (loongarch_vec_repl1_256 (target_hi, op0)); ++ } ++ else if (!rtx_equal_p (val_hi[i], val_hi[0])) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_hi[i]); ++ emit_insn ( ++ loongarch_vec_set256 (target_hi, op0, GEN_INT (i))); ++ } + } + } +- emit_insn (gen_rtx_SET (target, +- gen_rtx_VEC_CONCAT (vmode, target_hi, +- target_lo))); +- break; +- +- case E_V4DFmode: +- half_mode=E_V2DFmode; +- target_hi = gen_reg_rtx (half_mode); +- target_lo = gen_reg_rtx (half_mode); +- for (i = 0; i < nelt/2; ++i) ++ if (!lo_same && !half_same) + { +- rtx temp_hi = gen_reg_rtx (imode); +- rtx temp_lo = gen_reg_rtx (imode); +- emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2)); +- emit_move_insn (temp_lo, XVECEXP (vals, 0, i)); ++ /* Assign the lowest element of val_lo to all elements ++ of target_lo. */ + if (i == 0) + { +- emit_insn (gen_lsx_vreplvei_d_f_scalar (target_hi, +- temp_hi)); +- emit_insn (gen_lsx_vreplvei_d_f_scalar (target_lo, +- temp_lo)); ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_lo[0]); ++ emit_insn (loongarch_vec_repl1_128 (target_lo, op0)); + } +- else ++ else if (!rtx_equal_p (val_lo[i], val_lo[0])) + { +- emit_insn (gen_vec_setv2df (target_hi, temp_hi, +- GEN_INT (i))); +- emit_insn (gen_vec_setv2df (target_lo, temp_lo, +- GEN_INT (i))); ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val_lo[i]); ++ emit_insn ( ++ loongarch_vec_set128 (target_lo, op0, GEN_INT (i))); + } + } +- emit_insn (gen_rtx_SET (target, +- gen_rtx_VEC_CONCAT (vmode, target_hi, +- target_lo))); +- break; +- +- default: +- gcc_unreachable (); + } +- ++ if (half_same) ++ { ++ emit_insn (loongarch_lasx_vecinit_merge (target, target_hi, ++ target_hi, const0_rtx)); ++ return; ++ } ++ emit_insn (loongarch_lasx_vecinit_merge (target, target_hi, target_lo, ++ GEN_INT (0x20))); + } + return; + } +@@ -10494,130 +10538,54 @@ loongarch_expand_vector_init (rtx target, rtx vals) + if (ISA_HAS_LSX) + { + if (all_same) ++ loongarch_expand_vector_init_same (target, vals, nvar); ++ else + { +- rtx same = XVECEXP (vals, 0, 0); +- rtx temp, temp2; +- +- if (CONST_INT_P (same) && nvar == 0 +- && loongarch_signed_immediate_p (INTVAL (same), 10, 0)) +- { +- switch (vmode) +- { +- case E_V16QImode: +- case E_V8HImode: +- case E_V4SImode: +- case E_V2DImode: +- temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)); +- emit_move_insn (target, temp); +- return; +- +- default: +- gcc_unreachable (); +- } +- } +- temp = gen_reg_rtx (imode); +- if (imode == GET_MODE (same)) +- temp2 = same; +- else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD) +- { +- if (GET_CODE (same) == MEM) +- { +- rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); +- loongarch_emit_move (reg_tmp, same); +- temp2 = simplify_gen_subreg (imode, reg_tmp, +- GET_MODE (reg_tmp), 0); +- } +- else +- temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0); +- } +- else ++ for (i = 0; i < nelt; ++i) + { +- if (GET_CODE (same) == MEM) ++ if (vmode == E_V4SFmode || vmode == E_V2DFmode) + { +- rtx reg_tmp = gen_reg_rtx (GET_MODE (same)); +- loongarch_emit_move (reg_tmp, same); +- temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp)); ++ /* Using vilvl to load lowest 2 elements simultaneously to ++ reduce the number of instructions. */ ++ if (i == 1) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val[0]); ++ op1 = gen_reg_rtx (imode); ++ emit_move_insn (op1, val[1]); ++ emit_insn (loongarch_vec_repl2_128 (target, op0, op1)); ++ } ++ else if (i > 1) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val[i]); ++ emit_insn ( ++ loongarch_vec_set128 (target, op0, GEN_INT (i))); ++ } + } + else +- temp2 = lowpart_subreg (imode, same, GET_MODE (same)); +- } +- emit_move_insn (temp, temp2); +- +- switch (vmode) +- { +- case E_V16QImode: +- case E_V8HImode: +- case E_V4SImode: +- case E_V2DImode: +- loongarch_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp)); +- break; +- +- case E_V4SFmode: +- emit_insn (gen_lsx_vreplvei_w_f_scalar (target, temp)); +- break; +- +- case E_V2DFmode: +- emit_insn (gen_lsx_vreplvei_d_f_scalar (target, temp)); +- break; +- +- default: +- gcc_unreachable (); +- } +- } +- else +- { +- emit_move_insn (target, CONST0_RTX (vmode)); +- +- for (i = 0; i < nelt; ++i) +- { +- rtx temp = gen_reg_rtx (imode); +- emit_move_insn (temp, XVECEXP (vals, 0, i)); +- switch (vmode) + { +- case E_V16QImode: +- if (i == 0) +- emit_insn (gen_lsx_vreplvei_b_scalar (target, temp)); +- else +- emit_insn (gen_vec_setv16qi (target, temp, GEN_INT (i))); +- break; +- +- case E_V8HImode: +- if (i == 0) +- emit_insn (gen_lsx_vreplvei_h_scalar (target, temp)); +- else +- emit_insn (gen_vec_setv8hi (target, temp, GEN_INT (i))); +- break; +- +- case E_V4SImode: +- if (i == 0) +- emit_insn (gen_lsx_vreplvei_w_scalar (target, temp)); +- else +- emit_insn (gen_vec_setv4si (target, temp, GEN_INT (i))); +- break; +- +- case E_V2DImode: +- if (i == 0) +- emit_insn (gen_lsx_vreplvei_d_scalar (target, temp)); +- else +- emit_insn (gen_vec_setv2di (target, temp, GEN_INT (i))); +- break; +- +- case E_V4SFmode: +- if (i == 0) +- emit_insn (gen_lsx_vreplvei_w_f_scalar (target, temp)); +- else +- emit_insn (gen_vec_setv4sf (target, temp, GEN_INT (i))); +- break; +- +- case E_V2DFmode: ++ if (half_same && i == nelt / 2) ++ { ++ emit_insn ( ++ loongarch_vec_mirror (target, target, const0_rtx)); ++ return; ++ } ++ /* Assign the lowest element of val to all elements of ++ target. */ + if (i == 0) +- emit_insn (gen_lsx_vreplvei_d_f_scalar (target, temp)); +- else +- emit_insn (gen_vec_setv2df (target, temp, GEN_INT (i))); +- break; +- +- default: +- gcc_unreachable (); ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val[0]); ++ emit_insn (loongarch_vec_repl1_128 (target, op0)); ++ } ++ else if (!rtx_equal_p (val[i], val[0])) ++ { ++ op0 = gen_reg_rtx (imode); ++ emit_move_insn (op0, val[i]); ++ emit_insn ( ++ loongarch_vec_set128 (target, op0, GEN_INT (i))); ++ } + } + } + } +@@ -10634,8 +10602,8 @@ loongarch_expand_vector_init (rtx target, rtx vals) + /* For two-part initialization, always use CONCAT. */ + if (nelt == 2) + { +- rtx op0 = force_reg (imode, XVECEXP (vals, 0, 0)); +- rtx op1 = force_reg (imode, XVECEXP (vals, 0, 1)); ++ rtx op0 = force_reg (imode, val[0]); ++ rtx op1 = force_reg (imode, val[1]); + x = gen_rtx_VEC_CONCAT (vmode, op0, op1); + emit_insn (gen_rtx_SET (target, x)); + return; +diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md +index fb4d228ba..075f6ba56 100644 +--- a/gcc/config/loongarch/lsx.md ++++ b/gcc/config/loongarch/lsx.md +@@ -176,6 +176,8 @@ + UNSPEC_LSX_VSSRARNI + UNSPEC_LSX_VSSRARNI2 + UNSPEC_LSX_VPERMI ++ UNSPEC_LSX_VILVL_INTERNAL ++ UNSPEC_LSX_VREPLVEI_MIRROR + ]) + + ;; This attribute gives suffix for integers in VHMODE. +@@ -1551,6 +1553,18 @@ + [(set_attr "type" "simd_flog2") + (set_attr "mode" "")]) + ++;; Only for loongarch_expand_vector_init in loongarch.cc. ++;; Merge two scalar floating-point op1 and op2 into a LSX op0. ++(define_insn "lsx_vilvl__internal" ++ [(set (match_operand:FLSX 0 "register_operand" "=f") ++ (unspec:FLSX [(match_operand: 1 "register_operand" "f") ++ (match_operand: 2 "register_operand" "f")] ++ UNSPEC_LSX_VILVL_INTERNAL))] ++ "ISA_HAS_LSX" ++ "vilvl.\t%w0,%w2,%w1" ++ [(set_attr "type" "simd_permute") ++ (set_attr "mode" "")]) ++ + (define_insn "smax3" + [(set (match_operand:FLSX 0 "register_operand" "=f") + (smax:FLSX (match_operand:FLSX 1 "register_operand" "f") +@@ -2289,6 +2303,16 @@ + [(set_attr "type" "simd_splat") + (set_attr "mode" "")]) + ++(define_insn "lsx_vreplvei_mirror_" ++ [(set (match_operand:LSX 0 "register_operand" "=f") ++ (unspec: LSX [(match_operand:LSX 1 "register_operand" "f") ++ (match_operand 2 "const__operand" "")] ++ UNSPEC_LSX_VREPLVEI_MIRROR))] ++ "ISA_HAS_LSX" ++ "vreplvei.d\t%w0,%w1,%2" ++ [(set_attr "type" "simd_splat") ++ (set_attr "mode" "")]) ++ + (define_insn "lsx_vreplvei_" + [(set (match_operand:LSX 0 "register_operand" "=f") + (vec_duplicate:LSX +@@ -2450,6 +2474,99 @@ + DONE; + }) + ++;; Implement vec_concatv2df by vilvl.d. ++(define_insn_and_split "vec_concatv2df" ++ [(set (match_operand:V2DF 0 "register_operand" "=f") ++ (vec_concat:V2DF ++ (match_operand:DF 1 "register_operand" "f") ++ (match_operand:DF 2 "register_operand" "f")))] ++ "ISA_HAS_LSX" ++ "" ++ "&& reload_completed" ++ [(const_int 0)] ++{ ++ emit_insn (gen_lsx_vilvl_d_f (operands[0], ++ gen_rtx_REG (V2DFmode, REGNO (operands[1])), ++ gen_rtx_REG (V2DFmode, REGNO (operands[2])))); ++ DONE; ++} ++ [(set_attr "mode" "V2DF")]) ++ ++;; Implement vec_concatv4sf. ++;; Optimize based on hardware register allocation of operands. ++(define_insn_and_split "vec_concatv4sf" ++ [(set (match_operand:V4SF 0 "register_operand" "=f") ++ (vec_concat:V4SF ++ (vec_concat:V2SF ++ (match_operand:SF 1 "register_operand" "f") ++ (match_operand:SF 2 "register_operand" "f")) ++ (vec_concat:V2SF ++ (match_operand:SF 3 "register_operand" "f") ++ (match_operand:SF 4 "register_operand" "f"))))] ++ "ISA_HAS_LSX" ++ "" ++ "&& reload_completed" ++ [(const_int 0)] ++{ ++ operands[5] = GEN_INT (1); ++ operands[6] = GEN_INT (2); ++ operands[7] = GEN_INT (4); ++ operands[8] = GEN_INT (8); ++ ++ /* If all input are same, use vreplvei.w to broadcast. */ ++ if (REGNO (operands[1]) == REGNO (operands[2]) ++ && REGNO (operands[1]) == REGNO (operands[3]) ++ && REGNO (operands[1]) == REGNO (operands[4])) ++ { ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (operands[0], operands[1])); ++ } ++ /* If op0 is equal to op3, use vreplvei.w to set each element of op0 as op3. ++ If other input is different from op3, use vextrins.w to insert. */ ++ else if (REGNO (operands[0]) == REGNO (operands[3])) ++ { ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (operands[0], operands[3])); ++ if (REGNO (operands[1]) != REGNO (operands[3])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[1], ++ operands[0], operands[5])); ++ if (REGNO (operands[2]) != REGNO (operands[3])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[2], ++ operands[0], operands[6])); ++ if (REGNO (operands[4]) != REGNO (operands[3])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[4], ++ operands[0], operands[8])); ++ } ++ /* If op0 is equal to op4, use vreplvei.w to set each element of op0 as op4. ++ If other input is different from op4, use vextrins.w to insert. */ ++ else if (REGNO (operands[0]) == REGNO (operands[4])) ++ { ++ emit_insn (gen_lsx_vreplvei_w_f_scalar (operands[0], operands[4])); ++ if (REGNO (operands[1]) != REGNO (operands[4])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[1], ++ operands[0], operands[5])); ++ if (REGNO (operands[2]) != REGNO (operands[4])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[2], ++ operands[0], operands[6])); ++ if (REGNO (operands[3]) != REGNO (operands[4])) ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[3], ++ operands[0], operands[7])); ++ } ++ /* Otherwise, use vilvl.w to merge op1 and op2 first. ++ If op3 is different from op1, use vextrins.w to insert. ++ If op4 is different from op2, use vextrins.w to insert. */ ++ else ++ { ++ emit_insn ( ++ gen_lsx_vilvl_w_f (operands[0], ++ gen_rtx_REG (V4SFmode, REGNO (operands[1])), ++ gen_rtx_REG (V4SFmode, REGNO (operands[2])))); ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[3], ++ operands[0], operands[7])); ++ emit_insn (gen_lsx_vextrins_w_f_scalar (operands[0], operands[4], ++ operands[0], operands[8])); ++ } ++ DONE; ++} ++ [(set_attr "mode" "V4SF")]) + + (define_insn "vandn3" + [(set (match_operand:LSX 0 "register_operand" "=f") +@@ -4465,3 +4582,20 @@ + "vpermi.w\t%w0,%w2,%3" + [(set_attr "type" "simd_bit") + (set_attr "mode" "V4SI")]) ++ ++;; Delete one of two instructions that exactly play the same role. ++(define_peephole2 ++ [(set (match_operand:V2DI 0 "register_operand") ++ (vec_duplicate:V2DI (match_operand:DI 1 "register_operand"))) ++ (set (match_operand:V2DI 2 "register_operand") ++ (vec_merge:V2DI ++ (vec_duplicate:V2DI (match_operand:DI 3 "register_operand")) ++ (match_operand:V2DI 4 "register_operand") ++ (match_operand 5 "const_int_operand")))] ++ "operands[0] == operands[2] && ++ operands[1] == operands[3] && ++ operands[2] == operands[4] && ++ INTVAL (operands[5]) == 2" ++ [(set (match_dup 0) ++ (vec_duplicate:V2DI (match_dup 1)))] ++ "") +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-construct-opt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-construct-opt.c +new file mode 100644 +index 000000000..487816a48 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-construct-opt.c +@@ -0,0 +1,102 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mlasx -O3" } */ ++ ++#include ++ ++extern long long *x_di; ++extern int *x_si; ++extern short int *x_hi; ++extern char *x_qi; ++extern double *y_df; ++extern float *y_sf; ++ ++/* Remove some unnecessary vinsgr2vr.d as the corresponding elements ++ have already been set. */ ++/* { dg-final { scan-assembler-not "v4i64:.*\tvinsgr2vr\\.d.*v4i64" } } */ ++/* { dg-final { scan-assembler-times "v4i64:.*\txvldrepl\\.d.*v4i64" 1 } } */ ++v4i64 ++vec_construct_v4i64 () ++{ ++ v4i64 res = ++ { x_di[0], x_di[0], x_di[1], x_di[1] } ++ ; ++ return res; ++} ++ ++/* Remove some unnecessary vinsgr2vr.w as the corresponding elements ++ have already been set. */ ++/* { dg-final { scan-assembler-not "v8i32:.*\tvinsgr2vr\\.w.*v8i32" } } */ ++/* { dg-final { scan-assembler-times "v8i32:.*\txvreplgr2vr\\.w.*v8i32" 1 } } */ ++v8i32 ++vec_construct_v8i32 () ++{ ++ v8i32 res = ++ { x_si[0], x_si[0], x_si[0], x_si[0], ++ x_si[0], x_si[2], x_si[0], x_si[0] } ++ ; ++ return res; ++} ++ ++/* Remove some unnecessary vinsgr2vr.h as the corresponding elements ++ have already been set. */ ++/* { dg-final { scan-assembler-not "v16i16:.*\tvori\\.b.*v16i16" } } */ ++/* { dg-final { scan-assembler-times "v16i16:.*\txvreplgr2vr\\.h.*v16i1" 1 } } */ ++v16i16 ++vec_construct_v16i16 () ++{ ++ v16i16 res = ++ { x_hi[1], x_hi[2], x_hi[1], x_hi[1], ++ x_hi[1], x_hi[1], x_hi[1], x_hi[1], ++ x_hi[1], x_hi[1], x_hi[1], x_hi[1], ++ x_hi[1], x_hi[1], x_hi[1], x_hi[2] } ++ ; ++ return res; ++} ++ ++/* Remove some unnecessary vinsgr2vr.b as the corresponding elements ++ have already been set. */ ++/* { dg-final { scan-assembler-not "v32i8:.*\tvori\\.b.*v32i8" } } */ ++/* { dg-final { scan-assembler-times "v32i8:.*\txvreplgr2vr\\.b.*v32i8" 1 } } */ ++v32i8 ++vec_construct_v32i8 () ++{ ++ v32i8 res = ++ { x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[2], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[0], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[3] } ++ ; ++ return res; ++} ++ ++/* Set 2 elements of a vector simultaneously by vilvl.d ++ and reducing more vextrins.d. */ ++/* { dg-final { scan-assembler-not "v4f64:.*\tvori\\.b.*v4f64" } } */ ++/* { dg-final { scan-assembler-not "v4f64:.*\tvextrins\\.d.*v4f64" } } */ ++/* { dg-final { scan-assembler-times "v4f64:.*\tvilvl\\.d.*v4f64" 1 } } */ ++v4f64 ++vec_construct_v4f64 () ++{ ++ v4f64 res = ++ { y_df[0], y_df[2], y_df[0], y_df[0]} ++ ; ++ return res; ++} ++ ++/* Set 2 elements of a vector simultaneously by vilvl.w ++ and reducing more vextrins.w. */ ++/* { dg-final { scan-assembler-not "v8f32:.*\tvextrins\\.w.*v8f32" } } */ ++/* { dg-final { scan-assembler-times "v8f32:.*\txvilvl\\.w.*v8f32" 1 } } */ ++v8f32 ++vec_construct_v8f32 () ++{ ++ v8f32 res = ++ { y_sf[2], y_sf[1], y_sf[2], y_sf[3], ++ y_sf[2], y_sf[1], y_sf[2], y_sf[3] } ++ ; ++ return res; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vec-construct-opt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vec-construct-opt.c +new file mode 100644 +index 000000000..92da1c8af +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vec-construct-opt.c +@@ -0,0 +1,85 @@ ++/* { dg-do compile } */ ++/* { dg-options "-mlsx -O3" } */ ++ ++#include ++ ++extern long long *x_di; ++extern int *x_si; ++extern short int *x_hi; ++extern char *x_qi; ++extern double *y_df; ++extern float *y_sf; ++ ++/* No change for V2DI mode. */ ++v2i64 ++vec_construct_v2i64 () ++{ ++ v2i64 res = ++ { x_di[1], x_di[0]} ++ ; ++ return res; ++} ++ ++/* Only load the lowest 2 elements and directly copy them to high half-part, ++ reducing more vinsgr2vr.w. */ ++/* { dg-final { scan-assembler-times "v4i32:.*\tvreplvei\\.d.*v4i32" 1 } } */ ++v4i32 ++vec_construct_v4i32 () ++{ ++ v4i32 res = ++ { x_si[0], x_si[1], x_si[0], x_si[1]} ++ ; ++ return res; ++} ++ ++/* Only load the lowest 4 elements and directly copy them to high half-part, ++ reducing more vinsgr2vr.h. */ ++/* { dg-final { scan-assembler-times "v8i16:.*\tvreplvei\\.d.*v8i16" 1 } } */ ++v8i16 ++vec_construct_v8i16 () ++{ ++ v8i16 res = ++ { x_hi[0], x_hi[0], x_hi[0], x_hi[1], ++ x_hi[0], x_hi[0], x_hi[0], x_hi[1] } ++ ; ++ return res; ++} ++ ++/* Only load the lowest 8 elements and directly copy them to high half-part, ++ reducing more vinsgr2vr.b. */ ++/* { dg-final { scan-assembler-times "v16i8:.*\tvreplvei\\.d.*v16i8" 1 } } */ ++v16i8 ++vec_construct_v16i8 () ++{ ++ v16i8 res = ++ { x_qi[0], x_qi[1], x_qi[0], x_qi[2], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[3], ++ x_qi[0], x_qi[1], x_qi[0], x_qi[2], ++ x_qi[0], x_qi[0], x_qi[0], x_qi[3] } ++ ; ++ return res; ++} ++ ++/* Set 2 elements of a vector simultaneously by vilvl.d. */ ++/* { dg-final { scan-assembler-not "v2f64:.*\tvextrins\\.d.*v2f64" } } */ ++/* { dg-final { scan-assembler-times "v2f64:.*\tvilvl\\.d.*v2f64" 1 } } */ ++v2f64 ++vec_construct_v2f64 () ++{ ++ v2f64 res = ++ { y_df[0], y_df[2] } ++ ; ++ return res; ++} ++ ++/* Set 2 elements of a vector simultaneously by vilvl.w ++ and reducing more vextrins.w. */ ++/* { dg-final { scan-assembler-times "v4f32:.*\tvilvl\\.w.*v4f32" 1 } } */ ++v4f32 ++vec_construct_v4f32 () ++{ ++ v4f32 res = ++ { y_sf[0], y_sf[1], y_sf[0], y_sf[0] } ++ ; ++ return res; ++} +-- +2.43.0 + diff --git a/0004-Sw64-Port-update-gcc-testsuite-for-sw64.patch b/0004-Sw64-Port-update-gcc-testsuite-for-sw64.patch new file mode 100644 index 0000000000000000000000000000000000000000..e097fa3cb9ca7164faedc50b22e42b0296cb1d6c --- /dev/null +++ b/0004-Sw64-Port-update-gcc-testsuite-for-sw64.patch @@ -0,0 +1,688 @@ +From f0e14563ae35b0e0c52bed8f091a750028a42e67 Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:44:09 +0800 +Subject: [PATCH 04/16] Sw64 Port: update gcc/testsuite for sw64 + +--- + contrib/compare-all-tests | 3 +- + .../c-c++-common/torture/asm-inline.c | 2 +- + gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C | 2 +- + .../g++.dg/no-stack-protector-attr-3.C | 4 +-- + gcc/testsuite/g++.dg/opt/devirt2.C | 3 +- + gcc/testsuite/g++.dg/pr49718.C | 2 +- + .../gcc.c-torture/execute/20101011-1.c | 3 ++ + gcc/testsuite/gcc.dg/20020312-2.c | 2 ++ + .../gcc.dg/atomic/c11-atomic-exec-5.c | 2 +- + gcc/testsuite/gcc.dg/attr-alloc_size-11.c | 4 +-- + gcc/testsuite/gcc.dg/cpp/assert4.c | 4 +-- + gcc/testsuite/gcc.dg/pr44194-1.c | 2 +- + gcc/testsuite/gcc.dg/stack-usage-1.c | 2 ++ + gcc/testsuite/gcc.dg/torture/restrict-8.c | 2 +- + gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c | 2 +- + gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c | 2 +- + gcc/testsuite/gcc.dg/tree-ssa/stdarg-2.c | 30 +++++++++---------- + gcc/testsuite/gcc.dg/tree-ssa/stdarg-3.c | 22 +++++++------- + gcc/testsuite/gcc.dg/tree-ssa/stdarg-4.c | 8 ++--- + gcc/testsuite/gcc.dg/tree-ssa/stdarg-5.c | 14 ++++----- + gcc/testsuite/go.test/go-test.exp | 3 ++ + gcc/testsuite/lib/target-supports.exp | 11 ++++++- + 22 files changed, 75 insertions(+), 54 deletions(-) + +diff --git a/contrib/compare-all-tests b/contrib/compare-all-tests +index 502cc64f5..02519a1f3 100644 +--- a/contrib/compare-all-tests ++++ b/contrib/compare-all-tests +@@ -33,8 +33,9 @@ ppc_opts='-m32 -m64' + s390_opts='-m31 -m31/-mzarch -m64' + sh_opts='-m3 -m3e -m4 -m4a -m4al -m4/-mieee -m1 -m1/-mno-cbranchdi -m2a -m2a/-mieee -m2e -m2e/-mieee' + sparc_opts='-mcpu=v8/-m32 -mcpu=v9/-m32 -m64' ++sw_64_opts='-mlong-double-64/-mieee -mlong-double-64 -mlong-double-128/-mieee -mlong-double-128' + +-all_targets='alpha arm avr bfin cris fr30 frv h8300 ia64 iq2000 m32c m32r m68k mcore mips mmix mn10300 pa pdp11 ppc sh sparc v850 vax xstormy16 xtensa' # e500 ++all_targets='alpha arm avr bfin cris fr30 frv h8300 ia64 iq2000 m32c m32r m68k mcore mips mmix mn10300 pa pdp11 ppc sh sparc sw_64 v850 vax xstormy16 xtensa' # e500 + + test_one_file () + { +diff --git a/gcc/testsuite/c-c++-common/torture/asm-inline.c b/gcc/testsuite/c-c++-common/torture/asm-inline.c +index dea89658b..f860b3a7b 100644 +--- a/gcc/testsuite/c-c++-common/torture/asm-inline.c ++++ b/gcc/testsuite/c-c++-common/torture/asm-inline.c +@@ -1,6 +1,6 @@ + /* { dg-do compile } */ + /* -O0 does no inlining, and -O3 does it too aggressively for this test: */ +-/* { dg-skip-if "" { *-*-* } { "-O0" "-O3" } { "" } } ++/* { dg-skip-if "" { *-*-* } { "-O0" "-O3" "-Og -g" } { "" } } + /* The normal asm is not inlined: */ + /* { dg-final { scan-assembler-times "w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w.w" 2 } } */ + /* But the asm inline is inlined: */ +diff --git a/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C b/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C +index 424979a60..37c539a54 100644 +--- a/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C ++++ b/gcc/testsuite/g++.dg/cpp0x/constexpr-rom.C +@@ -1,6 +1,6 @@ + // PR c++/49673: check that test_data goes into .rodata + // { dg-do compile { target c++11 } } +-// { dg-additional-options -G0 { target { { alpha*-*-* frv*-*-* ia64-*-* lm32*-*-* m32r*-*-* microblaze*-*-* mips*-*-* loongarch*-*-* nios2-*-* powerpc*-*-* rs6000*-*-* } && { ! { *-*-darwin* *-*-aix* alpha*-*-*vms* } } } } } ++// { dg-additional-options -G0 { target { { alpha*-*-* sw_64*-*-* frv*-*-* ia64-*-* lm32*-*-* m32r*-*-* microblaze*-*-* mips*-*-* loongarch*-*-* nios2-*-* powerpc*-*-* rs6000*-*-* } && { ! { *-*-darwin* *-*-aix* alpha*-*-*vms* } } } } } + // { dg-final { scan-assembler "\\.rdata" { target mips*-*-* } } } + // { dg-final { scan-assembler "rodata" { target { { *-*-linux-gnu *-*-gnu* *-*-elf } && { ! { mips*-*-* riscv*-*-* } } } } } } + +diff --git a/gcc/testsuite/g++.dg/no-stack-protector-attr-3.C b/gcc/testsuite/g++.dg/no-stack-protector-attr-3.C +index 76a5ec086..982dd9856 100644 +--- a/gcc/testsuite/g++.dg/no-stack-protector-attr-3.C ++++ b/gcc/testsuite/g++.dg/no-stack-protector-attr-3.C +@@ -20,5 +20,5 @@ int __attribute__((stack_protect)) bar() + return 0; + } + +-/* { dg-final { scan-assembler-times "stack_chk_fail" 1 { target { ! mips*-*-* } } } }*/ +-/* { dg-final { scan-assembler-times "stack_chk_fail" 2 { target { mips*-*-* } } } }*/ ++/* { dg-final { scan-assembler-times "stack_chk_fail" 1 { target { ! { mips*-*-* sw_64*-*-* } } } } }*/ ++/* { dg-final { scan-assembler-times "stack_chk_fail" 2 { target { mips*-*-* sw_64*-*-* } } } }*/ +diff --git a/gcc/testsuite/g++.dg/opt/devirt2.C b/gcc/testsuite/g++.dg/opt/devirt2.C +index cf4842bd4..c6b5a19fa 100644 +--- a/gcc/testsuite/g++.dg/opt/devirt2.C ++++ b/gcc/testsuite/g++.dg/opt/devirt2.C +@@ -5,7 +5,7 @@ + // { dg-additional-options "-mshort-calls" {target epiphany-*-*} } + // Using -mno-abicalls avoids a R_MIPS_JALR .reloc. + // { dg-additional-options "-mno-abicalls" { target mips*-*-* } } +-// { dg-final { scan-assembler-times "xyzzy" 2 { target { ! { alpha*-*-* hppa*-*-* ia64*-*-hpux* sparc*-*-* *-*-mingw* } } } } } ++// { dg-final { scan-assembler-times "xyzzy" 2 { target { ! { alpha*-*-* sw_64*-*-* hppa*-*-* ia64*-*-hpux* sparc*-*-* *-*-mingw* } } } } } + // For *-*-mingw* there is additionally one .def match + // { dg-final { scan-assembler-times "xyzzy" 3 { target *-*-mingw* } } } + // The IA64 and HPPA compilers generate external declarations in addition +@@ -15,6 +15,7 @@ + // If assembler supports explicit relocation directives, the alpha compiler generates + // literal/lituse_jsr pairs, so the scans need to be more specific. + // { dg-final { scan-assembler-times "jsr\[^\n\]*xyzzy" 2 { target alpha*-*-* } } } ++// { dg-final { scan-assembler-times "call\[^\n\]*xyzzy" 2 { target sw_64*-*-* } } } + // Unless the assembler supports -relax, the 32-bit SPARC compiler generates + // sethi/jmp instead of just call, so the scans need to be more specific. + // With subexpressions, Tcl regexp -inline -all returns both the complete +diff --git a/gcc/testsuite/g++.dg/pr49718.C b/gcc/testsuite/g++.dg/pr49718.C +index b1cc5deb7..13c661642 100644 +--- a/gcc/testsuite/g++.dg/pr49718.C ++++ b/gcc/testsuite/g++.dg/pr49718.C +@@ -1,6 +1,6 @@ + /* { dg-do compile } */ + /* { dg-options "-O2 -finstrument-functions" } */ +-/* { dg-additional-options "-mno-explicit-relocs" { target alpha*-*-* } } */ ++/* { dg-additional-options "-mno-explicit-relocs" { target alpha*-*-* sw_64*-*-* } } */ + /* { dg-additional-options "-mno-relax-pic-calls" { target mips*-*-* } } */ + /* { dg-final { scan-assembler-times "__cyg_profile_func_enter" 1 { target { ! { hppa*-*-hpux* } } } } } */ + /* { dg-final { scan-assembler-times "__cyg_profile_func_enter,%r" 1 { target hppa*-*-hpux* } } } */ +diff --git a/gcc/testsuite/gcc.c-torture/execute/20101011-1.c b/gcc/testsuite/gcc.c-torture/execute/20101011-1.c +index d2c0f9ab7..878be5eab 100644 +--- a/gcc/testsuite/gcc.c-torture/execute/20101011-1.c ++++ b/gcc/testsuite/gcc.c-torture/execute/20101011-1.c +@@ -29,6 +29,9 @@ + #elif defined (__aarch64__) + /* On AArch64 integer division by zero does not trap. */ + # define DO_TEST 0 ++#elif defined (__sw_64__) ++ /* On sw_64 integer division by zero does not trap. */ ++# define DO_TEST 0 + #elif defined (__TMS320C6X__) + /* On TI C6X division by zero does not trap. */ + # define DO_TEST 0 +diff --git a/gcc/testsuite/gcc.dg/20020312-2.c b/gcc/testsuite/gcc.dg/20020312-2.c +index 92bc150df..292964e02 100644 +--- a/gcc/testsuite/gcc.dg/20020312-2.c ++++ b/gcc/testsuite/gcc.dg/20020312-2.c +@@ -15,6 +15,8 @@ extern void abort (void); + + #if defined(__alpha__) + /* PIC register is $29, but is used even without -fpic. */ ++#elif defined(__sw_64__) ++/* PIC register is $29, but is used even without -fpic. */ + #elif defined(__arc__) + # define PIC_REG "26" + #elif defined(__arm__) +diff --git a/gcc/testsuite/gcc.dg/atomic/c11-atomic-exec-5.c b/gcc/testsuite/gcc.dg/atomic/c11-atomic-exec-5.c +index 692c64ad2..2f5457645 100644 +--- a/gcc/testsuite/gcc.dg/atomic/c11-atomic-exec-5.c ++++ b/gcc/testsuite/gcc.dg/atomic/c11-atomic-exec-5.c +@@ -24,7 +24,7 @@ + | FE_OVERFLOW \ + | FE_UNDERFLOW) + +-#if defined __alpha__ || defined __aarch64__ ++#if defined __alpha__ || defined __aarch64__ || defined __sw_64__ + #define ITER_COUNT 100 + #else + #define ITER_COUNT 10000 +diff --git a/gcc/testsuite/gcc.dg/attr-alloc_size-11.c b/gcc/testsuite/gcc.dg/attr-alloc_size-11.c +index 8332b3993..3d5a2e28a 100644 +--- a/gcc/testsuite/gcc.dg/attr-alloc_size-11.c ++++ b/gcc/testsuite/gcc.dg/attr-alloc_size-11.c +@@ -47,8 +47,8 @@ typedef __SIZE_TYPE__ size_t; + + /* The following tests fail because of missing range information. The xfail + exclusions are PR79356. */ +-TEST (signed char, SCHAR_MIN + 2, ALLOC_MAX); /* { dg-warning "argument 1 range \\\[13, \[0-9\]+\\\] exceeds maximum object size 12" "missing range info for signed char" { xfail { ! { aarch64*-*-* arm*-*-* avr-*-* alpha*-*-* ia64-*-* mips*-*-* or1k*-*-* pdp11*-*-* powerpc*-*-* sparc*-*-* s390*-*-* visium-*-* msp430-*-* nvptx*-*-*} } } } */ +-TEST (short, SHRT_MIN + 2, ALLOC_MAX); /* { dg-warning "argument 1 range \\\[13, \[0-9\]+\\\] exceeds maximum object size 12" "missing range info for short" { xfail { ! { aarch64*-*-* arm*-*-* alpha*-*-* avr-*-* ia64-*-* mips*-*-* or1k*-*-* pdp11*-*-* powerpc*-*-* sparc*-*-* s390x-*-* visium-*-* msp430-*-* nvptx*-*-* } } } } */ ++TEST (signed char, SCHAR_MIN + 2, ALLOC_MAX); /* { dg-warning "argument 1 range \\\[13, \[0-9\]+\\\] exceeds maximum object size 12" "missing range info for signed char" { xfail { ! { aarch64*-*-* arm*-*-* avr-*-* alpha*-*-* sw_64*-*-* ia64-*-* mips*-*-* or1k*-*-* pdp11*-*-* powerpc*-*-* sparc*-*-* s390*-*-* visium-*-* msp430-*-* nvptx*-*-*} } } } */ ++TEST (short, SHRT_MIN + 2, ALLOC_MAX); /* { dg-warning "argument 1 range \\\[13, \[0-9\]+\\\] exceeds maximum object size 12" "missing range info for short" { xfail { ! { aarch64*-*-* arm*-*-* alpha*-*-* sw_64*-*-* avr-*-* ia64-*-* mips*-*-* or1k*-*-* pdp11*-*-* powerpc*-*-* sparc*-*-* s390x-*-* visium-*-* msp430-*-* nvptx*-*-* } } } } */ + TEST (int, INT_MIN + 2, ALLOC_MAX); /* { dg-warning "argument 1 range \\\[13, \[0-9\]+\\\] exceeds maximum object size 12" } */ + TEST (int, -3, ALLOC_MAX); /* { dg-warning "argument 1 range \\\[13, \[0-9\]+\\\] exceeds maximum object size 12" } */ + TEST (int, -2, ALLOC_MAX); /* { dg-warning "argument 1 range \\\[13, \[0-9\]+\\\] exceeds maximum object size 12" } */ +diff --git a/gcc/testsuite/gcc.dg/cpp/assert4.c b/gcc/testsuite/gcc.dg/cpp/assert4.c +index 92e3dba5c..1b40ddeb6 100644 +--- a/gcc/testsuite/gcc.dg/cpp/assert4.c ++++ b/gcc/testsuite/gcc.dg/cpp/assert4.c +@@ -151,8 +151,8 @@ + || (!defined __alpha_ev4__ && #cpu(ev4)) + # error + # endif +-#elif #cpu(alpha) || #machine(alpha) || #cpu(cix) || #cpu(fix) || #cpu(bwx) \ +- || #cpu(max) || #cpu(ev6) || #cpu(ev5) || #cpu(ev4) ++#elif (#cpu(alpha) || #machine(alpha) || #cpu(cix) || #cpu(fix) || #cpu(bwx) \ ++ || #cpu(max) || #cpu(ev6) || #cpu(ev5) || #cpu(ev4)) && !#cpu(sw_64) + # error + #endif + +diff --git a/gcc/testsuite/gcc.dg/pr44194-1.c b/gcc/testsuite/gcc.dg/pr44194-1.c +index a38270b79..13709e727 100644 +--- a/gcc/testsuite/gcc.dg/pr44194-1.c ++++ b/gcc/testsuite/gcc.dg/pr44194-1.c +@@ -1,4 +1,4 @@ +-/* { dg-do compile { target { { { { { { { { { { i?86-*-* x86_64-*-* } && x32 } || lp64 } && { ! s390*-*-* } } && { ! hppa*64*-*-* } } && { ! alpha*-*-* } } && { { ! powerpc*-*-linux* } || powerpc_elfv2 } } && { ! nvptx-*-* } } } } } } */ ++/* { dg-do compile { target { { { { { { { { { { i?86-*-* x86_64-*-* } && x32 } || lp64 } && { ! s390*-*-* } } && { ! hppa*64*-*-* } } && { ! alpha*-*-* } } && { ! sw_64*-*-* } } && { { ! powerpc*-*-linux* } || powerpc_elfv2 } } && { ! nvptx-*-* } } } } } */ + /* { dg-skip-if "returns that struct in memory" { mmix-*-* } } */ + /* { dg-options "-O2 -fdump-rtl-dse1 -fdump-rtl-final" } */ + +diff --git a/gcc/testsuite/gcc.dg/stack-usage-1.c b/gcc/testsuite/gcc.dg/stack-usage-1.c +index 21cce0f44..618be5603 100644 +--- a/gcc/testsuite/gcc.dg/stack-usage-1.c ++++ b/gcc/testsuite/gcc.dg/stack-usage-1.c +@@ -31,6 +31,8 @@ + # define SIZE 192 + #elif defined (__alpha__) + # define SIZE 240 ++#elif defined (__sw_64__) ++# define SIZE 240 + #elif defined (__ia64__) + # define SIZE 272 + #elif defined(__mips__) +diff --git a/gcc/testsuite/gcc.dg/torture/restrict-8.c b/gcc/testsuite/gcc.dg/torture/restrict-8.c +index 0118de013..7cb50980c 100644 +--- a/gcc/testsuite/gcc.dg/torture/restrict-8.c ++++ b/gcc/testsuite/gcc.dg/torture/restrict-8.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-skip-if "" { *-*-* } { "-O0" } { "" } } */ ++/* { dg-skip-if "" { *-*-* } { "-O0" "-Og -g" } { "" } } */ + /* { dg-options "-fdump-tree-fre1" } */ + + struct S { int i; void *p; int j; }; +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c b/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c +index b9f8fd21a..ba487e689 100644 +--- a/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c ++++ b/gcc/testsuite/gcc.dg/tree-ssa/20040204-1.c +@@ -33,4 +33,4 @@ void test55 (int x, int y) + that the && should be emitted (based on BRANCH_COST). Fix this + by teaching dom to look through && and register all components + as true. */ +-/* { dg-final { scan-tree-dump-times "link_error" 0 "optimized" { xfail { ! "alpha*-*-* arm*-*-* aarch64*-*-* powerpc*-*-* cris-*-* hppa*-*-* i?86-*-* mmix-*-* mips*-*-* m68k*-*-* moxie-*-* nds32*-*-* s390*-*-* sh*-*-* sparc*-*-* visium-*-* x86_64-*-* riscv*-*-* or1k*-*-* msp430-*-* pru*-*-* nvptx*-*-*" } } } } */ ++/* { dg-final { scan-tree-dump-times "link_error" 0 "optimized" { xfail { ! "alpha*-*-* sw_64*-*-* arm*-*-* aarch64*-*-* powerpc*-*-* cris-*-* hppa*-*-* i?86-*-* mmix-*-* mips*-*-* m68k*-*-* moxie-*-* nds32*-*-* s390*-*-* sh*-*-* sparc*-*-* visium-*-* x86_64-*-* riscv*-*-* or1k*-*-* msp430-*-* pru*-*-* nvptx*-*-*" } } } } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c +index a879d3059..8e9391c11 100644 +--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c ++++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-dom-cse-2.c +@@ -27,4 +27,4 @@ foo () + but the loop reads only one element at a time, and DOM cannot resolve these. + The same happens on powerpc depending on the SIMD support available. */ + +-/* { dg-final { scan-tree-dump "return 28;" "optimized" { xfail { { alpha*-*-* hppa*64*-*-* nvptx*-*-* mmix-knuth-mmixware } || { { { lp64 && { powerpc*-*-* sparc*-*-* riscv*-*-* } } || aarch64_sve } || { arm*-*-* && { ! arm_neon } } } } } } } */ ++/* { dg-final { scan-tree-dump "return 28;" "optimized" { xfail { { alpha*-*-* sw_64*-*-* hppa*64*-*-* nvptx*-*-* mmix-knuth-mmixware } || { { { lp64 && { powerpc*-*-* sparc*-*-* riscv*-*-* } } || aarch64_sve } || { arm*-*-* && { ! arm_neon } } } } } } } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/stdarg-2.c b/gcc/testsuite/gcc.dg/tree-ssa/stdarg-2.c +index 0224997f1..d6bda1658 100644 +--- a/gcc/testsuite/gcc.dg/tree-ssa/stdarg-2.c ++++ b/gcc/testsuite/gcc.dg/tree-ssa/stdarg-2.c +@@ -23,7 +23,7 @@ f1 (int i, ...) + } + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save 0 GPR units and 0 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save 0 GPR units and 0 FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save 0 GPR units and 0 FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save 0 GPR units and 0 FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save 0 GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save 0 GPR units and 0 FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save 0 GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -44,7 +44,7 @@ f2 (int i, ...) + architecture or bytes on 64-bit architecture. */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save \[148\] GPR units and 0 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save \[148\] GPR units and 0 FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save 8 GPR units and 1" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save 8 GPR units and 1" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save 1 GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save 8 GPR units and 0 FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save \[148\] GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -63,7 +63,7 @@ f3 (int i, ...) + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 0 GPR units and \[1-9\]\[0-9\]* FPR units" "stdarg" { target { powerpc*-*-linux* && { powerpc_fprs && ilp32 } } } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 0 GPR units and 1 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 0 GPR units and 16 FPR units" "stdarg" { target aarch64*-*-* } } } */ +-/* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 8 GPR units and 2" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 8 GPR units and 2" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save \[1-9\]\[0-9\]* GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save \[1-9\]\[0-9\]* GPR units" "stdarg" { target ia64-*-* } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save \[1-9\]\[0-9\]* GPR units" "stdarg" { target { powerpc*-*-* && lp64 } } } } */ +@@ -79,7 +79,7 @@ f4 (int i, ...) + } + /* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -98,7 +98,7 @@ f5 (int i, ...) + } + /* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -119,7 +119,7 @@ f6 (int i, ...) + } + /* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save (3|12|24) GPR units and 0 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save (3|12|24) GPR units and 0 FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save 24 GPR units and 1" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save 24 GPR units and 1" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save 3 GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save 24 GPR units and 0 FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save (3|12|24) GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -137,7 +137,7 @@ f7 (int i, ...) + } + /* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -157,7 +157,7 @@ f8 (int i, ...) + } + /* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -175,7 +175,7 @@ f9 (int i, ...) + } + /* { dg-final { scan-tree-dump "f9: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f9: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f9: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f9: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f9: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f9: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f9: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -195,7 +195,7 @@ f10 (int i, ...) + } + /* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -216,7 +216,7 @@ f11 (int i, ...) + } + /* { dg-final { scan-tree-dump "f11: va_list escapes 0, needs to save (3|12|24) GPR units and 0 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f11: va_list escapes 0, needs to save (3|12|24) GPR units and 0 FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f11: va_list escapes 0, needs to save 24 GPR units and 1" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f11: va_list escapes 0, needs to save 24 GPR units and 1" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f11: va_list escapes 0, needs to save 3 GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f11: va_list escapes 0, needs to save 24 GPR units and 0 FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f11: va_list escapes 0, needs to save (3|12|24) GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -237,7 +237,7 @@ f12 (int i, ...) + } + /* { dg-final { scan-tree-dump "f12: va_list escapes 0, needs to save 0 GPR units and \[1-9\]\[0-9\]* FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f12: va_list escapes 0, needs to save 0 GPR units and \[1-9\]\[0-9\]* FPR units" "stdarg" { target { powerpc*-*-linux* && { powerpc_fprs && ilp32 } } } } } */ +-/* { dg-final { scan-tree-dump "f12: va_list escapes 0, needs to save 24 GPR units and 2" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f12: va_list escapes 0, needs to save 24 GPR units and 2" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f12: va_list escapes 0, needs to save 0 GPR units and 3 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f12: va_list escapes 0, needs to save 0 GPR units and 48 FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f12: va_list escapes 0, needs to save \[1-9]\[0-9\]* GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -258,7 +258,7 @@ f13 (int i, ...) + } + /* { dg-final { scan-tree-dump "f13: va_list escapes 0, needs to save 0 GPR units and \[1-9\]\[0-9\]* FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f13: va_list escapes 0, needs to save 0 GPR units and \[1-9\]\[0-9\]* FPR units" "stdarg" { target { powerpc*-*-linux* && { powerpc_fprs && ilp32 } } } } } */ +-/* { dg-final { scan-tree-dump "f13: va_list escapes 0, needs to save 24 GPR units and 2" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f13: va_list escapes 0, needs to save 24 GPR units and 2" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f13: va_list escapes 0, needs to save 0 GPR units and 3 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f13: va_list escapes 0, needs to save 0 GPR units and 48 FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f13: va_list escapes 0, needs to save \[1-9]\[0-9\]* GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -279,7 +279,7 @@ f14 (int i, ...) + } + /* { dg-final { scan-tree-dump "f14: va_list escapes 0, needs to save \[148\] GPR units and \[1-9\]\[0-9\]* FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f14: va_list escapes 0, needs to save \[148\] GPR units and \[1-9\]\[0-9\]* FPR units" "stdarg" { target { powerpc*-*-linux* && { powerpc_fprs && ilp32 } } } } } */ +-/* { dg-final { scan-tree-dump "f14: va_list escapes 0, needs to save 24 GPR units and 3" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f14: va_list escapes 0, needs to save 24 GPR units and 3" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f14: va_list escapes 0, needs to save 1 GPR units and 2 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f14: va_list escapes 0, needs to save 8 GPR units and 32 FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f14: va_list escapes 0, needs to save \[1-9]\[0-9\]* GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -308,7 +308,7 @@ f15 (int i, ...) + /* { dg-final { scan-tree-dump "f15: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + + /* We may be able to improve upon this after fixing PR66010/PR66013. */ +-/* { dg-final { scan-tree-dump "f15: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f15: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + + /* { dg-final { scan-tree-dump-not "f15: va_list escapes 0, needs to save 0 GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ + /* { dg-final { scan-tree-dump-not "f15: va_list escapes 0, needs to save 0 GPR units" "stdarg" { target ia64-*-* } } } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/stdarg-3.c b/gcc/testsuite/gcc.dg/tree-ssa/stdarg-3.c +index d044654e0..d92290bb0 100644 +--- a/gcc/testsuite/gcc.dg/tree-ssa/stdarg-3.c ++++ b/gcc/testsuite/gcc.dg/tree-ssa/stdarg-3.c +@@ -22,7 +22,7 @@ f1 (int i, ...) + } + /* { dg-final { scan-tree-dump "f1: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f1: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f1: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -38,7 +38,7 @@ f2 (int i, ...) + } + /* { dg-final { scan-tree-dump "f2: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f2: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f2: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -57,7 +57,7 @@ f3 (int i, ...) + } + /* { dg-final { scan-tree-dump "f3: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f3: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f3: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -74,7 +74,7 @@ f4 (int i, ...) + } + /* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -91,7 +91,7 @@ f5 (int i, ...) + } + /* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */ + /* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f5: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -110,7 +110,7 @@ f6 (int i, ...) + } + /* { dg-final { scan-tree-dump "f6: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */ + /* { dg-final { scan-tree-dump "f6: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f6: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f6: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f6: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f6: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f6: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -127,7 +127,7 @@ f7 (int i, ...) + } + /* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */ + /* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f7: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -144,7 +144,7 @@ f8 (int i, ...) + } + /* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */ + /* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f8: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -161,7 +161,7 @@ f10 (int i, ...) + } + /* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */ + /* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f10: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -178,7 +178,7 @@ f11 (int i, ...) + } + /* { dg-final { scan-tree-dump "f11: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */ + /* { dg-final { scan-tree-dump "f11: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f11: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f11: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f11: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f11: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f11: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -195,7 +195,7 @@ f12 (int i, ...) + } + /* { dg-final { scan-tree-dump "f12: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */ + /* { dg-final { scan-tree-dump "f12: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f12: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f12: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f12: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f12: va_list escapes 1, needs to save all GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f12: va_list escapes 1, needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/stdarg-4.c b/gcc/testsuite/gcc.dg/tree-ssa/stdarg-4.c +index 1a637d6ef..8b2f38929 100644 +--- a/gcc/testsuite/gcc.dg/tree-ssa/stdarg-4.c ++++ b/gcc/testsuite/gcc.dg/tree-ssa/stdarg-4.c +@@ -25,7 +25,7 @@ f1 (int i, ...) + } + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save all GPR units and 0 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { x32 || { ! { ia32 || llp64 } } } } } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save all GPR units and 0 FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save all GPR units and 1" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save all GPR units and 1" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save all GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save all GPR units and 0 FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes \[01\], needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -43,7 +43,7 @@ f2 (int i, ...) + } + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save 0 GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { x32 || { ! { ia32 || llp64 } } } } } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save 0 GPR units and all FPR units" "stdarg" { target { powerpc*-*-linux* && { powerpc_fprs && ilp32 } } } } } */ +-/* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save all GPR units and 2" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save all GPR units and 2" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save 0 GPR units and all FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save 0 GPR units and all FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes \[01\], needs to save all GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -67,7 +67,7 @@ f3 (int i, ...) + } + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save \[148\] GPR units and 0 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save \[148\] GPR units and 0 FPR units" "stdarg" { target { powerpc*-*-linux* && ilp32 } } } } */ +-/* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 8 GPR units and 1" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 8 GPR units and 1" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 1 GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 8 GPR units and 0 FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save \[148\] GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +@@ -89,7 +89,7 @@ f4 (int i, ...) + } + /* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save 0 GPR units and \[1-9\]\[0-9\]* FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save 0 GPR units and \[1-9\]\[0-9\]* FPR units" "stdarg" { target { powerpc*-*-linux* && { powerpc_fprs && ilp32 } } } } } */ +-/* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save 8 GPR units and 2" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save 8 GPR units and 2" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save 0 GPR units and 1 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save 0 GPR units and 16 FPR units" "stdarg" { target aarch64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save \[148\] GPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && ia32 } } } } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/stdarg-5.c b/gcc/testsuite/gcc.dg/tree-ssa/stdarg-5.c +index c8ad4fe32..c3eba1e21 100644 +--- a/gcc/testsuite/gcc.dg/tree-ssa/stdarg-5.c ++++ b/gcc/testsuite/gcc.dg/tree-ssa/stdarg-5.c +@@ -23,7 +23,7 @@ f1 (int i, ...) + va_end (ap); + } + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save 0 GPR units and 0 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ +-/* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save all GPR units and 1" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save all GPR units and 1" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save all GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f1: va_list escapes 0, needs to save all GPR units and 0 FPR units" "stdarg" { target aarch64*-*-* } } } */ + +@@ -37,7 +37,7 @@ f2 (int i, ...) + va_end (ap); + } + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save all GPR units and all FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ +-/* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save all GPR units and 1" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save all GPR units and 1" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save all GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f2: va_list escapes 0, needs to save all GPR units and 0 FPR units" "stdarg" { target aarch64*-*-* } } } */ + +@@ -56,7 +56,7 @@ f3 (int i, ...) + } + } + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 0 GPR units and 0 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ +-/* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 32 GPR units and 1" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 32 GPR units and 1" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 1 GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f3: va_list escapes 0, needs to save 8 GPR units and 0 FPR units" "stdarg" { target aarch64*-*-* } } } */ + +@@ -75,7 +75,7 @@ f4 (int i, ...) + } + } + /* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save 16 GPR units and 16 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ +-/* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save 24 GPR units and 1" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save 24 GPR units and 1" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save 2 GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f4: va_list escapes 0, needs to save 24 GPR units and 0 FPR units" "stdarg" { target aarch64*-*-* } } } */ + +@@ -90,7 +90,7 @@ f5 (int i, ...) + bar (__real__ ci + __imag__ ci); + } + /* { dg-final { scan-tree-dump "f5: va_list escapes 0, needs to save 16 GPR units and 0 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ +-/* { dg-final { scan-tree-dump "f5: va_list escapes 0, needs to save 32 GPR units and 1" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f5: va_list escapes 0, needs to save 32 GPR units and 1" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f5: va_list escapes 0, needs to save (4|2) GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f5: va_list escapes 0, needs to save 16 GPR units and 0 FPR units" "stdarg" { target aarch64*-*-* } } } */ + +@@ -105,7 +105,7 @@ f6 (int i, ...) + bar (__real__ ci + __imag__ cd); + } + /* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save 8 GPR units and 32 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ +-/* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save 32 GPR units and 3" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save 32 GPR units and 3" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save (3|2) GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f6: va_list escapes 0, needs to save 8 GPR units and 32 FPR units" "stdarg" { target aarch64*-*-* } } } */ + +@@ -120,6 +120,6 @@ f7 (int i, ...) + bar (__real__ cd + __imag__ cd); + } + /* { dg-final { scan-tree-dump "f7: va_list escapes 0, needs to save 0 GPR units and 64 FPR units" "stdarg" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 || llp64 } } } } } } */ +-/* { dg-final { scan-tree-dump "f7: va_list escapes 0, needs to save 32 GPR units and 2" "stdarg" { target alpha*-*-linux* } } } */ ++/* { dg-final { scan-tree-dump "f7: va_list escapes 0, needs to save 32 GPR units and 2" "stdarg" { target alpha*-*-linux* sw_64*-*-* } } } */ + /* { dg-final { scan-tree-dump "f7: va_list escapes 0, needs to save 2 GPR units and 0 FPR units" "stdarg" { target s390*-*-linux* } } } */ + /* { dg-final { scan-tree-dump "f7: va_list escapes 0, needs to save 0 GPR units and 64 FPR units" "stdarg" { target aarch64*-*-* } } } */ +diff --git a/gcc/testsuite/go.test/go-test.exp b/gcc/testsuite/go.test/go-test.exp +index 11c178ad7..d405316e1 100644 +--- a/gcc/testsuite/go.test/go-test.exp ++++ b/gcc/testsuite/go.test/go-test.exp +@@ -174,6 +174,9 @@ proc go-set-goarch { } { + "alpha*-*-*" { + set goarch "alpha" + } ++ "sw_64*-*-*" { ++ set goarch "sw_64" ++ } + "arm*-*-*" - + "ep9312*-*-*" - + "strongarm*-*-*" - +diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp +index 292737dae..534405aa7 100644 +--- a/gcc/testsuite/lib/target-supports.exp ++++ b/gcc/testsuite/lib/target-supports.exp +@@ -3526,6 +3526,7 @@ proc check_effective_target_vect_cmdline_needed { } { + return [check_cached_effective_target vect_cmdline_needed { + if { [istarget alpha*-*-*] + || [istarget ia64-*-*] ++ || [istarget sw_64-*-*] + || (([istarget i?86-*-*] || [istarget x86_64-*-*]) + && ![is-effective-target ia32]) + || ([istarget powerpc*-*-*] +@@ -3554,6 +3555,7 @@ proc check_effective_target_vect_int { } { + || [istarget amdgcn-*-*] + || [istarget sparc*-*-*] + || [istarget alpha*-*-*] ++ || [istarget sw_64*-*-*] + || [istarget ia64-*-*] + || [istarget aarch64*-*-*] + || [is-effective-target arm_neon] +@@ -7019,6 +7021,7 @@ proc check_effective_target_vect_no_int_min_max { } { + return [check_cached_effective_target_indexed vect_no_int_min_max { + expr { [istarget sparc*-*-*] + || [istarget alpha*-*-*] ++ || [istarget sw_64*-*-*] + || ([istarget mips*-*-*] + && [et-is-effective-target mips_loongson_mmi]) }}] + } +@@ -7031,7 +7034,7 @@ proc check_effective_target_vect_no_int_min_max { } { + proc check_effective_target_vect_no_int_add { } { + # Alpha only supports vector add on V8QI and V4HI. + return [check_cached_effective_target_indexed vect_no_int_add { +- expr { [istarget alpha*-*-*] }}] ++ expr { [istarget alpha*-*-*] || [istarget sw_64*-*-*] }}] + } + + # Return 1 if the target plus current options does not support vector +@@ -8551,6 +8554,7 @@ proc check_effective_target_sync_long_long { } { + || [istarget aarch64*-*-*] + || [istarget arm*-*-*] + || [istarget alpha*-*-*] ++ || [istarget sw_64*-*-*] + || ([istarget sparc*-*-*] && [check_effective_target_lp64]) + || [istarget s390*-*-*] } { + return 1 +@@ -8632,6 +8636,7 @@ proc check_effective_target_sync_long_long_runtime { } { + } + } "" ]) + || [istarget alpha*-*-*] ++ || [istarget sw_64*-*-*] + || ([istarget sparc*-*-*] + && [check_effective_target_lp64] + && [check_effective_target_ultrasparc_hw]) +@@ -8648,6 +8653,7 @@ proc check_effective_target_bswap { } { + return [check_cached_effective_target bswap { + expr { [istarget aarch64*-*-*] + || [istarget alpha*-*-*] ++ || [istarget sw_64*-*-*] + || [istarget i?86-*-*] || [istarget x86_64-*-*] + || [istarget m68k-*-*] + || [istarget powerpc*-*-*] +@@ -8672,6 +8678,7 @@ proc check_effective_target_sync_int_long { } { + || [istarget i?86-*-*] || [istarget x86_64-*-*] + || [istarget aarch64*-*-*] + || [istarget alpha*-*-*] ++ || [istarget sw_64*-*-*] + || [istarget arm*-*-linux-*] + || [istarget arm*-*-uclinuxfdpiceabi] + || ([istarget arm*-*-*] +@@ -8708,6 +8715,7 @@ proc check_effective_target_sync_char_short { } { + || [istarget ia64-*-*] + || [istarget i?86-*-*] || [istarget x86_64-*-*] + || [istarget alpha*-*-*] ++ || [istarget sw_64*-*-*] + || [istarget arm*-*-linux-*] + || [istarget arm*-*-uclinuxfdpiceabi] + || ([istarget arm*-*-*] +@@ -9149,6 +9157,7 @@ proc check_effective_target_fd_truncate { } { + + proc add_options_for_ieee { flags } { + if { [istarget alpha*-*-*] ++ || [istarget sw_64*-*-*] + || [istarget sh*-*-*] } { + return "$flags -mieee" + } +-- +2.25.1 + diff --git a/0004-libquadmath-Enable-libquadmath-on-kunpeng.patch b/0004-libquadmath-Enable-libquadmath-on-kunpeng.patch new file mode 100644 index 0000000000000000000000000000000000000000..0bd784e9c1c5f0e48005abdd3f309b4c5b833677 --- /dev/null +++ b/0004-libquadmath-Enable-libquadmath-on-kunpeng.patch @@ -0,0 +1,197 @@ +From 52a810b4d8a725a7edb2988f6c3813a9938362a5 Mon Sep 17 00:00:00 2001 +From: eastb233 +Date: Fri, 14 Jul 2023 11:10:24 +0800 +Subject: [PATCH 04/22] [libquadmath] Enable libquadmath on kunpeng + +This enable libquadmath on kunpeng platform to convenient +users that migrating from x86 platform. libquadmath uses "__float128" +as quad precision floating point type and with math functions with "q" +suffix like "cosq". For those who do not need to adapt to x86 platform, +you can use "long double" as quad precision floating point type and math +functions with "l" suffix like "cosl" in libm for quad precision math. +--- + libquadmath/Makefile.am | 4 ++++ + libquadmath/Makefile.in | 3 ++- + libquadmath/configure | 28 ++++++++++++++++++++++++++-- + libquadmath/configure.ac | 7 +++++++ + libquadmath/quadmath.h | 13 +++++++++++-- + 5 files changed, 50 insertions(+), 5 deletions(-) + +diff --git a/libquadmath/Makefile.am b/libquadmath/Makefile.am +index 35dffb46f..bf0398d9c 100644 +--- a/libquadmath/Makefile.am ++++ b/libquadmath/Makefile.am +@@ -2,6 +2,10 @@ + + AUTOMAKE_OPTIONS = foreign info-in-builddir + ++if ARCH_AARCH64 ++DEFS += -D__float128="long double" ++endif ++ + ## Skip over everything if the quadlib is not available: + if BUILD_LIBQUADMATH + ACLOCAL_AMFLAGS = -I .. -I ../config +diff --git a/libquadmath/Makefile.in b/libquadmath/Makefile.in +index 8c0112122..449cc8a06 100644 +--- a/libquadmath/Makefile.in ++++ b/libquadmath/Makefile.in +@@ -90,6 +90,7 @@ POST_UNINSTALL = : + build_triplet = @build@ + host_triplet = @host@ + target_triplet = @target@ ++@ARCH_AARCH64_TRUE@am__append_1 = -D__float128="long double" + @BUILD_LIBQUADMATH_FALSE@libquadmath_la_DEPENDENCIES = + subdir = . + ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +@@ -337,7 +338,7 @@ CFLAGS = @CFLAGS@ + CPP = @CPP@ + CPPFLAGS = @CPPFLAGS@ + CYGPATH_W = @CYGPATH_W@ +-DEFS = @DEFS@ ++DEFS = @DEFS@ $(am__append_1) + DEPDIR = @DEPDIR@ + DSYMUTIL = @DSYMUTIL@ + DUMPBIN = @DUMPBIN@ +diff --git a/libquadmath/configure b/libquadmath/configure +index 603f2f131..13a9088fb 100755 +--- a/libquadmath/configure ++++ b/libquadmath/configure +@@ -633,6 +633,8 @@ am__EXEEXT_TRUE + LTLIBOBJS + LIBOBJS + get_gcc_base_ver ++ARCH_AARCH64_FALSE ++ARCH_AARCH64_TRUE + GENINSRC_FALSE + GENINSRC_TRUE + XCFLAGS +@@ -10806,7 +10808,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 10809 "configure" ++#line 10811 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -10912,7 +10914,7 @@ else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +-#line 10915 "configure" ++#line 10917 "configure" + #include "confdefs.h" + + #if HAVE_DLFCN_H +@@ -12705,6 +12707,11 @@ else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + ++ #if defined(__aarch64__) ++ typedef long double __float128; ++ #define __builtin_huge_valq() (__extension__ 0x1.0p32767Q) ++ #endif ++ + #if (!defined(_ARCH_PPC)) || defined(__LONG_DOUBLE_IEEE128__) + typedef _Complex float __attribute__((mode(TC))) __complex128; + #else +@@ -12756,6 +12763,11 @@ fi + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ + ++ #if defined(__aarch64__) ++ typedef long double __float128; ++ #define __builtin_huge_valq() (__extension__ 0x1.0p32767Q) ++ #endif ++ + #if (!defined(_ARCH_PPC)) || defined(__LONG_DOUBLE_IEEE128__) + typedef _Complex float __attribute__((mode(TC))) __complex128; + #else +@@ -13242,6 +13254,14 @@ else + GENINSRC_FALSE= + fi + ++ if expr "$target_cpu" : "aarch64.*" > /dev/null; then ++ ARCH_AARCH64_TRUE= ++ ARCH_AARCH64_FALSE='#' ++else ++ ARCH_AARCH64_TRUE='#' ++ ARCH_AARCH64_FALSE= ++fi ++ + + # Determine what GCC version number to use in filesystem paths. + +@@ -13425,6 +13445,10 @@ if test -z "${GENINSRC_TRUE}" && test -z "${GENINSRC_FALSE}"; then + as_fn_error $? "conditional \"GENINSRC\" was never defined. + Usually this means the macro was only invoked conditionally." "$LINENO" 5 + fi ++if test -z "${ARCH_AARCH64_TRUE}" && test -z "${ARCH_AARCH64_FALSE}"; then ++ as_fn_error $? "conditional \"ARCH_AARCH64\" was never defined. ++Usually this means the macro was only invoked conditionally." "$LINENO" 5 ++fi + + : "${CONFIG_STATUS=./config.status}" + ac_write_fail=0 +diff --git a/libquadmath/configure.ac b/libquadmath/configure.ac +index eec4084a4..507c247d6 100644 +--- a/libquadmath/configure.ac ++++ b/libquadmath/configure.ac +@@ -218,6 +218,11 @@ AM_CONDITIONAL(LIBQUAD_USE_SYMVER_SUN, [test "x$quadmath_use_symver" = xsun]) + + AC_CACHE_CHECK([whether __float128 is supported], [libquad_cv_have_float128], + [GCC_TRY_COMPILE_OR_LINK([ ++ #if defined(__aarch64__) ++ typedef long double __float128; ++ #define __builtin_huge_valq() (__extension__ 0x1.0p32767Q) ++ #endif ++ + #if (!defined(_ARCH_PPC)) || defined(__LONG_DOUBLE_IEEE128__) + typedef _Complex float __attribute__((mode(TC))) __complex128; + #else +@@ -393,6 +398,8 @@ AS_HELP_STRING([--enable-generated-files-in-srcdir], + [enable_generated_files_in_srcdir=no]) + AC_MSG_RESULT($enable_generated_files_in_srcdir) + AM_CONDITIONAL(GENINSRC, test "$enable_generated_files_in_srcdir" = yes) ++AM_CONDITIONAL(ARCH_AARCH64, ++ [expr "$target_cpu" : "aarch64.*" > /dev/null]) + + # Determine what GCC version number to use in filesystem paths. + GCC_BASE_VER +diff --git a/libquadmath/quadmath.h b/libquadmath/quadmath.h +index 81eb957d2..bb1b49df6 100644 +--- a/libquadmath/quadmath.h ++++ b/libquadmath/quadmath.h +@@ -27,6 +27,12 @@ Boston, MA 02110-1301, USA. */ + extern "C" { + #endif + ++#if defined(__aarch64__) ++#ifndef __float128 ++typedef long double __float128; ++#endif ++#endif ++ + /* Define the complex type corresponding to __float128 + ("_Complex __float128" is not allowed) */ + #if (!defined(_ARCH_PPC)) || defined(__LONG_DOUBLE_IEEE128__) +@@ -160,10 +166,13 @@ extern int quadmath_snprintf (char *str, size_t size, + #define FLT128_MAX_10_EXP 4932 + + +-#define HUGE_VALQ __builtin_huge_valq() ++#if defined(__aarch64__) + /* The following alternative is valid, but brings the warning: + (floating constant exceeds range of ‘__float128’) */ +-/* #define HUGE_VALQ (__extension__ 0x1.0p32767Q) */ ++# define HUGE_VALQ (__extension__ 0x1.0p32767Q) ++#else ++# define HUGE_VALQ __builtin_huge_valq() ++#endif + + #define M_Eq 2.718281828459045235360287471352662498Q /* e */ + #define M_LOG2Eq 1.442695040888963407359924681001892137Q /* log_2 e */ +-- +2.33.0 + diff --git a/0005-LoongArch-Replace-UNSPEC_FCOPYSIGN-with-copysign-RTL.patch b/0005-LoongArch-Replace-UNSPEC_FCOPYSIGN-with-copysign-RTL.patch new file mode 100644 index 0000000000000000000000000000000000000000..1660b3b65b82a2340a432a709ed33e189243ad6f --- /dev/null +++ b/0005-LoongArch-Replace-UNSPEC_FCOPYSIGN-with-copysign-RTL.patch @@ -0,0 +1,51 @@ +From 9b2cbf361e38ea1ad672c2b8c8cf1dda4f6f7d72 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Mon, 2 Oct 2023 18:51:00 +0800 +Subject: [PATCH 005/188] LoongArch: Replace UNSPEC_FCOPYSIGN with copysign RTL + +When I added copysign support for LoongArch (r13-3702), we did not have +a copysign RTL insn, so I had to use UNSPEC to represent the copysign +instruction. Now the copysign RTX code has been added in r14-1586, so +this patch removes those UNSPECs, and it uses the native RTL copysign +insn. + +Inspired by rs6000 patch "Cleanup: Replace UNSPEC_COPYSIGN with copysign +RTL" [1] from Michael Meissner. + +[1]: https://gcc.gnu.org/pipermail/gcc-patches/2023-September/631701.html + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (UNSPEC_FCOPYSIGN): Delete. + (copysign3): Use copysign RTL instead of UNSPEC. +--- + gcc/config/loongarch/loongarch.md | 6 ++---- + 1 file changed, 2 insertions(+), 4 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 63ff32e75..73e2cbe0b 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -37,7 +37,6 @@ + UNSPEC_FCLASS + UNSPEC_FMAX + UNSPEC_FMIN +- UNSPEC_FCOPYSIGN + UNSPEC_FTINT + UNSPEC_FTINTRM + UNSPEC_FTINTRP +@@ -1129,9 +1128,8 @@ + + (define_insn "copysign3" + [(set (match_operand:ANYF 0 "register_operand" "=f") +- (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f") +- (match_operand:ANYF 2 "register_operand" "f")] +- UNSPEC_FCOPYSIGN))] ++ (copysign:ANYF (match_operand:ANYF 1 "register_operand" "f") ++ (match_operand:ANYF 2 "register_operand" "f")))] + "TARGET_HARD_FLOAT" + "fcopysign.\t%0,%1,%2" + [(set_attr "type" "fcopysign") +-- +2.43.0 + diff --git a/0005-Sw64-Port-libatomic.patch b/0005-Sw64-Port-libatomic.patch new file mode 100644 index 0000000000000000000000000000000000000000..182b959e68fc6803f1afe118ee6e06a6a8fa4ff8 --- /dev/null +++ b/0005-Sw64-Port-libatomic.patch @@ -0,0 +1,29 @@ +From 76693fb016acae2a7a1e130e196a5793f2b2f23b Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:50:36 +0800 +Subject: [PATCH 05/16] Sw64 Port: libatomic + +--- + libatomic/configure.tgt | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/libatomic/configure.tgt b/libatomic/configure.tgt +index 33f8c91ce..f9dbd2d35 100644 +--- a/libatomic/configure.tgt ++++ b/libatomic/configure.tgt +@@ -81,6 +81,12 @@ case "${target_cpu}" in + ARCH=sparc + ;; + ++ sw_64*) ++ # fenv.c needs this option to generate inexact exceptions. ++ XCFLAGS="${XCFLAGS} -mfp-trap-mode=sui" ++ ARCH=sw_64 ++ ;; ++ + i[3456]86 | x86_64) + cat > conftestx.c < +Date: Wed, 11 Oct 2023 17:59:53 +0800 +Subject: [PATCH 006/188] LoongArch: Adjust makefile dependency for loongarch + headers. + +gcc/ChangeLog: + + * config.gcc: Add loongarch-driver.h to tm_files. + * config/loongarch/loongarch.h: Do not include loongarch-driver.h. + * config/loongarch/t-loongarch: Append loongarch-multilib.h to $(GTM_H) + instead of $(TM_H) for building generator programs. +--- + gcc/config.gcc | 4 ++-- + gcc/config/loongarch/loongarch.h | 3 --- + gcc/config/loongarch/t-loongarch | 3 ++- + 3 files changed, 4 insertions(+), 6 deletions(-) + +diff --git a/gcc/config.gcc b/gcc/config.gcc +index e34a5fbb9..11ab620d0 100644 +--- a/gcc/config.gcc ++++ b/gcc/config.gcc +@@ -2508,7 +2508,7 @@ riscv*-*-freebsd*) + + loongarch*-*-linux*) + tm_file="elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file}" +- tm_file="${tm_file} loongarch/gnu-user.h loongarch/linux.h" ++ tm_file="${tm_file} loongarch/gnu-user.h loongarch/linux.h loongarch/loongarch-driver.h" + extra_options="${extra_options} linux-android.opt" + tmake_file="${tmake_file} loongarch/t-multilib loongarch/t-linux" + gnu_ld=yes +@@ -2521,7 +2521,7 @@ loongarch*-*-linux*) + + loongarch*-*-elf*) + tm_file="elfos.h newlib-stdint.h ${tm_file}" +- tm_file="${tm_file} loongarch/elf.h loongarch/linux.h" ++ tm_file="${tm_file} loongarch/elf.h loongarch/linux.h loongarch/loongarch-driver.h" + tmake_file="${tmake_file} loongarch/t-multilib loongarch/t-linux" + gnu_ld=yes + gas=yes +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index a443a6427..a2dc4ba8c 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -49,9 +49,6 @@ along with GCC; see the file COPYING3. If not see + + #define TARGET_LIBGCC_SDATA_SECTION ".sdata" + +-/* Driver native functions for SPEC processing in the GCC driver. */ +-#include "loongarch-driver.h" +- + /* This definition replaces the formerly used 'm' constraint with a + different constraint letter in order to avoid changing semantics of + the 'm' constraint when accepting new address formats in +diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch +index 28cfb49df..12734c37b 100644 +--- a/gcc/config/loongarch/t-loongarch ++++ b/gcc/config/loongarch/t-loongarch +@@ -16,7 +16,8 @@ + # along with GCC; see the file COPYING3. If not see + # . + +-TM_H += loongarch-multilib.h $(srcdir)/config/loongarch/loongarch-driver.h ++ ++GTM_H += loongarch-multilib.h + OPTIONS_H_EXTRA += $(srcdir)/config/loongarch/loongarch-def.h \ + $(srcdir)/config/loongarch/loongarch-tune.h + +-- +2.43.0 + diff --git a/0006-MULL64-1-3-Add-A-B-op-CST-B-match-and-simplify-optim.patch b/0006-MULL64-1-3-Add-A-B-op-CST-B-match-and-simplify-optim.patch new file mode 100644 index 0000000000000000000000000000000000000000..35dc803b530e6ec424d5f05bf577556455561968 --- /dev/null +++ b/0006-MULL64-1-3-Add-A-B-op-CST-B-match-and-simplify-optim.patch @@ -0,0 +1,89 @@ +From e7013d2640d82e928ebdaf830b6833051ac65296 Mon Sep 17 00:00:00 2001 +From: zhongyunde +Date: Sat, 5 Nov 2022 13:22:33 +0800 +Subject: [PATCH 06/22] [MULL64 1/3] Add A ? B op CST : B match and simplify + optimizations + + Refer to commit b6bdd7a4, use pattern match to simple + A ? B op CST : B (where CST is power of 2) simplifications. + Fixes the 1st issue of https://gitee.com/openeuler/gcc/issues/I5TSG0?from=project-issue. + + gcc/ + * match.pd (A ? B op CST : B): Add simplifcations for A ? B op POW2 : B + + gcc/testsuite/ + * gcc.dg/pr107190.c: New test. +--- + gcc/match.pd | 21 +++++++++++++++++++++ + gcc/testsuite/gcc.dg/pr107190.c | 27 +++++++++++++++++++++++++++ + 2 files changed, 48 insertions(+) + create mode 100644 gcc/testsuite/gcc.dg/pr107190.c + +diff --git a/gcc/match.pd b/gcc/match.pd +index fc2833bbd..fd0857fc9 100644 +--- a/gcc/match.pd ++++ b/gcc/match.pd +@@ -4280,6 +4280,27 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) + ) + #endif + ++#if GIMPLE ++(if (canonicalize_math_p ()) ++/* These patterns are mostly used by PHIOPT to move some operations outside of ++ the if statements. They should be done late because it gives jump threading ++ and few other passes to reduce what is going on. */ ++/* a ? x op C : x -> x op (a << log2(C)) when C is power of 2. */ ++ (for op (plus minus bit_ior bit_xor lshift rshift lrotate rrotate) ++ (simplify ++ (cond @0 (op:s @1 integer_pow2p@2) @1) ++ /* powerof2cst */ ++ (if (INTEGRAL_TYPE_P (type)) ++ (with { ++ tree shift = build_int_cst (integer_type_node, tree_log2 (@2)); ++ } ++ (op @1 (lshift (convert (convert:boolean_type_node @0)) { shift; }))) ++ ) ++ ) ++ ) ++) ++#endif ++ + /* Simplification moved from fold_cond_expr_with_comparison. It may also + be extended. */ + /* This pattern implements two kinds simplification: +diff --git a/gcc/testsuite/gcc.dg/pr107190.c b/gcc/testsuite/gcc.dg/pr107190.c +new file mode 100644 +index 000000000..235b2761a +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/pr107190.c +@@ -0,0 +1,27 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fexpensive-optimizations -fdump-tree-phiopt2-details" } */ ++ ++# define BN_BITS4 32 ++# define BN_MASK2 (0xffffffffffffffffL) ++# define BN_MASK2l (0xffffffffL) ++# define BN_MASK2h (0xffffffff00000000L) ++# define BN_MASK2h1 (0xffffffff80000000L) ++# define LBITS(a) ((a)&BN_MASK2l) ++# define HBITS(a) (((a)>>BN_BITS4)&BN_MASK2l) ++# define L2HBITS(a) (((a)< +Date: Mon, 25 Nov 2024 16:51:15 +0800 +Subject: [PATCH 06/16] Sw64 Port: libgcc + +--- + libgcc/config.host | 18 +++ + libgcc/config/sw_64/crtfastmath.c | 36 +++++ + libgcc/config/sw_64/libgcc-sw_64-ldbl.ver | 50 ++++++ + libgcc/config/sw_64/linux-unwind.h | 103 ++++++++++++ + libgcc/config/sw_64/qrnnd.S | 181 ++++++++++++++++++++++ + libgcc/config/sw_64/t-ieee | 2 + + libgcc/config/sw_64/t-linux | 1 + + libgcc/config/sw_64/t-sw_64 | 6 + + libgcc/libgcc2.c | 2 +- + 9 files changed, 398 insertions(+), 1 deletion(-) + create mode 100644 libgcc/config/sw_64/crtfastmath.c + create mode 100644 libgcc/config/sw_64/libgcc-sw_64-ldbl.ver + create mode 100644 libgcc/config/sw_64/linux-unwind.h + create mode 100644 libgcc/config/sw_64/qrnnd.S + create mode 100644 libgcc/config/sw_64/t-ieee + create mode 100644 libgcc/config/sw_64/t-linux + create mode 100644 libgcc/config/sw_64/t-sw_64 + +diff --git a/libgcc/config.host b/libgcc/config.host +index 8c56fcae5..01cb28d2e 100644 +--- a/libgcc/config.host ++++ b/libgcc/config.host +@@ -212,6 +212,9 @@ s390*-*-*) + sh[123456789lbe]*-*-*) + cpu_type=sh + ;; ++sw_64*-*-*) ++ cpu_type=sw_64 ++ ;; + tilegx*-*-*) + cpu_type=tilegx + ;; +@@ -1467,6 +1470,21 @@ sparc64-*-linux*) # 64-bit SPARC's running GNU/Linux + ;; + sparc64-*-netbsd*) + ;; ++sw_64*-*-linux*) ++ tmake_file="${tmake_file} sw_64/t-sw_64 sw_64/t-ieee t-crtfm sw_64/t-linux" ++ extra_parts="$extra_parts crtfastmath.o" ++ md_unwind_header=sw_64/linux-unwind.h ++ ;; ++sw_64*-*-freebsd*) ++ tmake_file="${tmake_file} sw_64/t-sw_64 sw_64/t-ieee t-crtfm" ++ extra_parts="$extra_parts crtbeginT.o crtfastmath.o" ++ ;; ++sw_64*-*-netbsd*) ++ tmake_file="${tmake_file} sw_64/t-sw_64 sw_64/t-ieee" ++ ;; ++sw_64*-*-openbsd*) ++ tmake_file="${tmake_file} sw_64/t-sw_64 sw_64/t-ieee" ++ ;; + tic6x-*-uclinux) + tmake_file="${tmake_file} t-softfp-sfdf t-softfp-excl t-softfp \ + c6x/t-elf c6x/t-uclinux t-crtstuff-pic t-libgcc-pic \ +diff --git a/libgcc/config/sw_64/crtfastmath.c b/libgcc/config/sw_64/crtfastmath.c +new file mode 100644 +index 000000000..aec92c819 +--- /dev/null ++++ b/libgcc/config/sw_64/crtfastmath.c +@@ -0,0 +1,36 @@ ++/* ++ * Copyright (C) 2001-2022 Free Software Foundation, Inc. ++ * Contributed by Richard Henderson (rth@redhat.com) ++ * ++ * This file is free software; you can redistribute it and/or modify it ++ * under the terms of the GNU General Public License as published by the ++ * Free Software Foundation; either version 3, or (at your option) any ++ * later version. ++ * ++ * This file is distributed in the hope that it will be useful, but ++ * WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ++ * General Public License for more details. ++ * ++ * Under Section 7 of GPL version 3, you are granted additional ++ * permissions described in the GCC Runtime Library Exception, version ++ * 3.1, as published by the Free Software Foundation. ++ * ++ * You should have received a copy of the GNU General Public License and ++ * a copy of the GCC Runtime Library Exception along with this program; ++ * see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ * . ++ */ ++ ++/* Assume SYSV/1 compatible interfaces. */ ++ ++extern void ++__ieee_set_fp_control (unsigned long int); ++ ++#define IEEE_MAP_DMZ (1UL << 12) /* Map denorm inputs to zero */ ++#define IEEE_MAP_UMZ (1UL << 13) /* Map underflowed outputs to zero */ ++ ++static void __attribute__ ((constructor)) set_fast_math (void) ++{ ++ __ieee_set_fp_control (IEEE_MAP_DMZ | IEEE_MAP_UMZ); ++} +diff --git a/libgcc/config/sw_64/libgcc-sw_64-ldbl.ver b/libgcc/config/sw_64/libgcc-sw_64-ldbl.ver +new file mode 100644 +index 000000000..6666bc639 +--- /dev/null ++++ b/libgcc/config/sw_64/libgcc-sw_64-ldbl.ver +@@ -0,0 +1,50 @@ ++# Copyright (C) 2006-2019 Free Software Foundation, Inc. ++# ++# This file is part of GCC. ++# ++# GCC is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3, or (at your option) ++# any later version. ++# ++# GCC is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++%ifdef __LONG_DOUBLE_128__ ++ ++# long double 128 bit support in libgcc_s.so.1 is only available ++# when configured with --with-long-double-128. Make sure all the ++# symbols are available at @@GCC_LDBL_* versions to make it clear ++# there is a configurable symbol set. ++ ++%exclude { ++ __fixtfdi ++ __fixunstfdi ++ __floatditf ++ ++ __divtc3 ++ __multc3 ++ __powitf2 ++} ++ ++%inherit GCC_LDBL_3.0 GCC_3.0 ++GCC_LDBL_3.0 { ++ __fixtfdi ++ __fixunstfdi ++ __floatditf ++} ++ ++%inherit GCC_LDBL_4.0.0 GCC_4.0.0 ++GCC_LDBL_4.0.0 { ++ __divtc3 ++ __multc3 ++ __powitf2 ++} ++ ++%endif +diff --git a/libgcc/config/sw_64/linux-unwind.h b/libgcc/config/sw_64/linux-unwind.h +new file mode 100644 +index 000000000..d446c123f +--- /dev/null ++++ b/libgcc/config/sw_64/linux-unwind.h +@@ -0,0 +1,103 @@ ++/* DWARF2 EH unwinding support for Sw_64 Linux. ++ Copyright (C) 2004-2022 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify ++it under the terms of the GNU General Public License as published by ++the Free Software Foundation; either version 3, or (at your option) ++any later version. ++ ++GCC is distributed in the hope that it will be useful, ++but WITHOUT ANY WARRANTY; without even the implied warranty of ++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++GNU General Public License for more details. ++ ++Under Section 7 of GPL version 3, you are granted additional ++permissions described in the GCC Runtime Library Exception, version ++3.1, as published by the Free Software Foundation. ++ ++You should have received a copy of the GNU General Public License and ++a copy of the GCC Runtime Library Exception along with this program; ++see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++. */ ++ ++#ifndef inhibit_libc ++/* Do code reading to identify a signal frame, and set the frame ++ state data appropriately. See unwind-dw2.c for the structs. */ ++ ++#include ++#include ++ ++#define MD_FALLBACK_FRAME_STATE_FOR sw_64_fallback_frame_state ++ ++static _Unwind_Reason_Code ++sw_64_fallback_frame_state (struct _Unwind_Context *context, ++ _Unwind_FrameState *fs) ++{ ++ unsigned int *pc = context->ra; ++ struct sigcontext *sc; ++ long new_cfa; ++ int i; ++ ++ if (pc[0] != 0x47fe0410 /* mov $30,$16 */ ++ || pc[2] != 0x00000083) /* callsys */ ++ return _URC_END_OF_STACK; ++ if (context->cfa == 0) ++ return _URC_END_OF_STACK; ++ if (pc[1] == 0x201f0067) /* lda $0,NR_sigreturn */ ++ sc = context->cfa; ++ else if (pc[1] == 0x201f015f) /* lda $0,NR_rt_sigreturn */ ++ { ++ struct rt_sigframe ++ { ++ siginfo_t info; ++ ucontext_t uc; ++ } *rt_ = context->cfa; ++ /* The void * cast is necessary to avoid an aliasing warning. ++ The aliasing warning is correct, but should not be a problem ++ because it does not alias anything. */ ++ sc = (struct sigcontext *) (void *) &rt_->uc.uc_mcontext; ++ } ++ else ++ return _URC_END_OF_STACK; ++ ++ new_cfa = sc->sc_regs[30]; ++ fs->regs.cfa_how = CFA_REG_OFFSET; ++ fs->regs.cfa_reg = 30; ++ fs->regs.cfa_offset = new_cfa - (long) context->cfa; ++ for (i = 0; i < 30; ++i) ++ { ++ fs->regs.reg[i].how = REG_SAVED_OFFSET; ++ fs->regs.reg[i].loc.offset = (long) &sc->sc_regs[i] - new_cfa; ++ } ++ for (i = 0; i < 31; ++i) ++ { ++ fs->regs.reg[i + 32].how = REG_SAVED_OFFSET; ++ fs->regs.reg[i + 32].loc.offset = (long) &sc->sc_fpregs[i] - new_cfa; ++ } ++ fs->regs.reg[64].how = REG_SAVED_OFFSET; ++ fs->regs.reg[64].loc.offset = (long) &sc->sc_pc - new_cfa; ++ fs->retaddr_column = 64; ++ fs->signal_frame = 1; ++ ++ return _URC_NO_REASON; ++} ++ ++#define MD_FROB_UPDATE_CONTEXT sw_64_frob_update_context ++ ++/* Fix up for signal handlers that don't have S flag set. */ ++ ++static void ++sw_64_frob_update_context (struct _Unwind_Context *context, ++ _Unwind_FrameState *fs ATTRIBUTE_UNUSED) ++{ ++ unsigned int *pc = context->ra; ++ ++ if (pc[0] == 0x47fe0410 /* mov $30,$16 */ ++ && pc[2] == 0x00000083 /* callsys */ ++ && (pc[1] == 0x201f0067 /* lda $0,NR_sigreturn */ ++ || pc[1] == 0x201f015f)) /* lda $0,NR_rt_sigreturn */ ++ _Unwind_SetSignalFrame (context, 1); ++} ++#endif +diff --git a/libgcc/config/sw_64/qrnnd.S b/libgcc/config/sw_64/qrnnd.S +new file mode 100644 +index 000000000..ab2e3d0bc +--- /dev/null ++++ b/libgcc/config/sw_64/qrnnd.S +@@ -0,0 +1,181 @@ ++ # Sw_64 __udiv_qrnnd ++ # Copyright (C) 1992-2022 Free Software Foundation, Inc. ++ ++ # This file is part of GCC. ++ ++ # The GNU MP Library is free software; you can redistribute it and/or modify ++ # it under the terms of the GNU General Public License as published by ++ # the Free Software Foundation; either version 3 of the License, or (at your ++ # option) any later version. ++ ++ # This file is distributed in the hope that it will be useful, but ++ # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY ++ # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public ++ # License for more details. ++ ++ # Under Section 7 of GPL version 3, you are granted additional ++ # permissions described in the GCC Runtime Library Exception, version ++ # 3.1, as published by the Free Software Foundation. ++ ++ # You should have received a copy of the GNU General Public License and ++ # a copy of the GCC Runtime Library Exception along with this program; ++ # see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ # . ++ ++#ifdef __ELF__ ++.section .note.GNU-stack,"" ++#endif ++ ++ .set noreorder ++ .set noat ++ ++ .text ++ ++ .globl __udiv_qrnnd ++ .ent __udiv_qrnnd ++#ifdef __VMS__ ++__udiv_qrnnd..en: ++ .frame $29,0,$26,0 ++ .prologue ++#else ++__udiv_qrnnd: ++ .frame $30,0,$26,0 ++ .prologue 0 ++#endif ++/* ++ ldiq -> ldi ++ addq->addl ++ subq->subl ++ cmovne qb,tmp,n1->selne qb,tmp,n1,n1 ++ stq ->stl ++ cmoveq tmp,AT,n1(n0)->seleq tmp,AT,n1,n1(n0,n0) */ ++#define cnt $2 ++#define tmp $3 ++#define rem_ptr $16 ++#define n1 $17 ++#define n0 $18 ++#define d $19 ++#define qb $20 ++#define AT $at ++ ++ ldi cnt,16 ++ blt d,$largedivisor ++ ++$loop1: cmplt n0,0,tmp ++ addl n1,n1,n1 ++ bis n1,tmp,n1 ++ addl n0,n0,n0 ++ cmpule d,n1,qb ++ subl n1,d,tmp ++ selne qb,tmp,n1,n1 ++ bis n0,qb,n0 ++ cmplt n0,0,tmp ++ addl n1,n1,n1 ++ bis n1,tmp,n1 ++ addl n0,n0,n0 ++ cmpule d,n1,qb ++ subl n1,d,tmp ++ selne qb,tmp,n1,n1 ++ bis n0,qb,n0 ++ cmplt n0,0,tmp ++ addl n1,n1,n1 ++ bis n1,tmp,n1 ++ addl n0,n0,n0 ++ cmpule d,n1,qb ++ subl n1,d,tmp ++ selne qb,tmp,n1,n1 ++ bis n0,qb,n0 ++ cmplt n0,0,tmp ++ addl n1,n1,n1 ++ bis n1,tmp,n1 ++ addl n0,n0,n0 ++ cmpule d,n1,qb ++ subl n1,d,tmp ++ selne qb,tmp,n1,n1 ++ bis n0,qb,n0 ++ subl cnt,1,cnt ++ bgt cnt,$loop1 ++ stl n1,0(rem_ptr) ++ bis $31,n0,$0 ++ ret $31,($26),1 ++ ++$largedivisor: ++ and n0,1,$4 ++ ++ srl n0,1,n0 ++ sll n1,63,tmp ++ or tmp,n0,n0 ++ srl n1,1,n1 ++ ++ and d,1,$6 ++ srl d,1,$5 ++ addl $5,$6,$5 ++ ++$loop2: cmplt n0,0,tmp ++ addl n1,n1,n1 ++ bis n1,tmp,n1 ++ addl n0,n0,n0 ++ cmpule $5,n1,qb ++ subl n1,$5,tmp ++ selne qb,tmp,n1,n1 ++ bis n0,qb,n0 ++ cmplt n0,0,tmp ++ addl n1,n1,n1 ++ bis n1,tmp,n1 ++ addl n0,n0,n0 ++ cmpule $5,n1,qb ++ subl n1,$5,tmp ++ selne qb,tmp,n1,n1 ++ bis n0,qb,n0 ++ cmplt n0,0,tmp ++ addl n1,n1,n1 ++ bis n1,tmp,n1 ++ addl n0,n0,n0 ++ cmpule $5,n1,qb ++ subl n1,$5,tmp ++ selne qb,tmp,n1,n1 ++ bis n0,qb,n0 ++ cmplt n0,0,tmp ++ addl n1,n1,n1 ++ bis n1,tmp,n1 ++ addl n0,n0,n0 ++ cmpule $5,n1,qb ++ subl n1,$5,tmp ++ selne qb,tmp,n1,n1 ++ bis n0,qb,n0 ++ subl cnt,1,cnt ++ bgt cnt,$loop2 ++ ++ addl n1,n1,n1 ++ addl $4,n1,n1 ++ bne $6,$Odd ++ stl n1,0(rem_ptr) ++ bis $31,n0,$0 ++ ret $31,($26),1 ++ ++$Odd: ++ /* q' in n0. r' in n1 */ ++ addl n1,n0,n1 ++ ++ cmpult n1,n0,tmp # tmp := carry from addl ++ subl n1,d,AT ++ addl n0,tmp,n0 ++ selne tmp,AT,n1,n1 ++ ++ cmpult n1,d,tmp ++ addl n0,1,AT ++ seleq tmp,AT,n0,n0 ++ subl n1,d,AT ++ seleq tmp,AT,n1,n1 ++ ++ stl n1,0(rem_ptr) ++ bis $31,n0,$0 ++ ret $31,($26),1 ++ ++#ifdef __VMS__ ++ .link ++ .align 3 ++__udiv_qrnnd: ++ .pdesc __udiv_qrnnd..en,null ++#endif ++ .end __udiv_qrnnd +diff --git a/libgcc/config/sw_64/t-ieee b/libgcc/config/sw_64/t-ieee +new file mode 100644 +index 000000000..9b66e50ac +--- /dev/null ++++ b/libgcc/config/sw_64/t-ieee +@@ -0,0 +1,2 @@ ++# All sw_64s get an IEEE complaint set of libraries. ++#HOST_LIBGCC2_CFLAGS += -mieee +diff --git a/libgcc/config/sw_64/t-linux b/libgcc/config/sw_64/t-linux +new file mode 100644 +index 000000000..fe9d20e9a +--- /dev/null ++++ b/libgcc/config/sw_64/t-linux +@@ -0,0 +1 @@ ++SHLIB_MAPFILES += $(srcdir)/config/sw_64/libgcc-sw_64-ldbl.ver +diff --git a/libgcc/config/sw_64/t-sw_64 b/libgcc/config/sw_64/t-sw_64 +new file mode 100644 +index 000000000..dffba8ee7 +--- /dev/null ++++ b/libgcc/config/sw_64/t-sw_64 +@@ -0,0 +1,6 @@ ++# This is a support routine for longlong.h, used by libgcc2.c. ++LIB2ADD += $(srcdir)/config/sw_64/qrnnd.S ++ ++# When GAS-generated unwind tables are created, they get created ++# after the __FRAME_END__ terminator, which causes an ld error. ++CRTSTUFF_T_CFLAGS = -fno-unwind-tables +diff --git a/libgcc/libgcc2.c b/libgcc/libgcc2.c +index 3ebfcc83f..f01a150c4 100644 +--- a/libgcc/libgcc2.c ++++ b/libgcc/libgcc2.c +@@ -2280,7 +2280,7 @@ int mprotect (char *,int, int); + int + getpagesize (void) + { +-#ifdef _ALPHA_ ++#ifdef _ALPHA_ || defined _SW_64_ + return 8192; + #else + return 4096; +-- +2.25.1 + diff --git a/0007-LoongArch-Enable-vect.exp-for-LoongArch.-PR111424.patch b/0007-LoongArch-Enable-vect.exp-for-LoongArch.-PR111424.patch new file mode 100644 index 0000000000000000000000000000000000000000..6cc726406052cacfc8a98b9145398b31d3e00596 --- /dev/null +++ b/0007-LoongArch-Enable-vect.exp-for-LoongArch.-PR111424.patch @@ -0,0 +1,65 @@ +From b75f00086e863ac7e9e1ee37f8107b199cf62550 Mon Sep 17 00:00:00 2001 +From: Chenghui Pan +Date: Fri, 25 Oct 2024 00:58:01 +0000 +Subject: [PATCH 007/188] LoongArch: Enable vect.exp for LoongArch. [PR111424] + +gcc/testsuite/ChangeLog: + + PR target/111424 + * lib/target-supports.exp: Enable vect.exp for LoongArch. +--- + gcc/testsuite/lib/target-supports.exp | 31 +++++++++++++++++++++++++++ + 1 file changed, 31 insertions(+) + +diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp +index 192e0aded..bbe145c1c 100644 +--- a/gcc/testsuite/lib/target-supports.exp ++++ b/gcc/testsuite/lib/target-supports.exp +@@ -10535,6 +10535,13 @@ proc check_vect_support_and_set_flags { } { + } + } elseif [istarget amdgcn-*-*] { + set dg-do-what-default run ++ } elseif [istarget loongarch*-*-*] { ++ lappend DEFAULT_VECTCFLAGS "-mdouble-float" "-mlasx" ++ if [check_effective_target_loongarch_asx_hw] { ++ set dg-do-what-default run ++ } else { ++ set dg-do-what-default compile ++ } + } else { + return 0 + } +@@ -10542,6 +10549,30 @@ proc check_vect_support_and_set_flags { } { + return 1 + } + ++proc check_effective_target_loongarch_sx_hw { } { ++ return [check_runtime loongarch_sx_hw { ++ #include ++ int main (void) ++ { ++ __m128i a, b, c; ++ c = __lsx_vand_v (a, b); ++ return 0; ++ } ++ } "-mlsx"] ++} ++ ++proc check_effective_target_loongarch_asx_hw { } { ++ return [check_runtime loongarch_asx_hw { ++ #include ++ int main (void) ++ { ++ __m256i a, b, c; ++ c = __lasx_xvand_v (a, b); ++ return 0; ++ } ++ } "-mlasx"] ++} ++ + # Return 1 if the target does *not* require strict alignment. + + proc check_effective_target_non_strict_align {} { +-- +2.43.0 + diff --git a/0007-MULL64-2-3-Fold-series-of-instructions-into-mul.patch b/0007-MULL64-2-3-Fold-series-of-instructions-into-mul.patch new file mode 100644 index 0000000000000000000000000000000000000000..efdd318cb4e13cc0e6f651c625904bc78cb036b7 --- /dev/null +++ b/0007-MULL64-2-3-Fold-series-of-instructions-into-mul.patch @@ -0,0 +1,130 @@ +From 547ab9b3e073ef389e5fd89d961bb1e3e6934ae9 Mon Sep 17 00:00:00 2001 +From: zhongyunde +Date: Wed, 9 Nov 2022 17:04:13 +0800 +Subject: [PATCH 07/22] [MULL64 2/3] Fold series of instructions into mul + + Merge the low part of series instructions into mul + + gcc/ + * match.pd: Add simplifcations for low part of mul + * common.opt: Add new option fmerge-mull enable with -O2 + * opts.c: default_options_table + + gcc/testsuite/ + * g++.dg/tree-ssa/mull64.C: New test. +--- + gcc/common.opt | 4 +++ + gcc/match.pd | 27 ++++++++++++++++++++ + gcc/opts.cc | 1 + + gcc/testsuite/g++.dg/tree-ssa/mull64.C | 34 ++++++++++++++++++++++++++ + 4 files changed, 66 insertions(+) + create mode 100644 gcc/testsuite/g++.dg/tree-ssa/mull64.C + +diff --git a/gcc/common.opt b/gcc/common.opt +index 8a0dafc52..e365a48bc 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -2126,6 +2126,10 @@ fmerge-debug-strings + Common Var(flag_merge_debug_strings) Init(1) + Attempt to merge identical debug strings across compilation units. + ++fmerge-mull ++Common Var(flag_merge_mull) Init(0) Optimization ++Attempt to merge series instructions into mul. ++ + fmessage-length= + Common RejectNegative Joined UInteger + -fmessage-length= Limit diagnostics to characters per line. 0 suppresses line-wrapping. +diff --git a/gcc/match.pd b/gcc/match.pd +index fd0857fc9..2092e6959 100644 +--- a/gcc/match.pd ++++ b/gcc/match.pd +@@ -4301,6 +4301,33 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) + ) + #endif + ++#if GIMPLE ++/* These patterns are mostly used by FORWPROP1 to fold some operations into more ++ simple IR. The following scenario should be matched: ++ In0Lo = In0(D) & 4294967295; ++ In0Hi = In0(D) >> 32; ++ In1Lo = In1(D) & 4294967295; ++ In1Hi = In1(D) >> 32; ++ Addc = In0Lo * In1Hi + In0Hi * In1Lo; ++ addc32 = Addc << 32; ++ ResLo = In0Lo * In1Lo + addc32 */ ++(simplify ++ (plus:c (mult @4 @5) ++ (lshift ++ (plus:c ++ (mult (bit_and@4 SSA_NAME@0 @2) (rshift SSA_NAME@1 @3)) ++ (mult (rshift SSA_NAME@0 @3) (bit_and@5 SSA_NAME@1 INTEGER_CST@2))) ++ INTEGER_CST@3 ++ ) ++ ) ++ (if (flag_merge_mull && INTEGRAL_TYPE_P (type) ++ && INTEGRAL_TYPE_P (TREE_TYPE (@0)) && types_match (@0, @1) ++ && TYPE_PRECISION (type) == 64) ++ (mult (convert:type @0) (convert:type @1)) ++ ) ++) ++#endif ++ + /* Simplification moved from fold_cond_expr_with_comparison. It may also + be extended. */ + /* This pattern implements two kinds simplification: +diff --git a/gcc/opts.cc b/gcc/opts.cc +index a97630d1c..eae71ed20 100644 +--- a/gcc/opts.cc ++++ b/gcc/opts.cc +@@ -647,6 +647,7 @@ static const struct default_options default_options_table[] = + VECT_COST_MODEL_VERY_CHEAP }, + { OPT_LEVELS_2_PLUS, OPT_finline_functions, NULL, 1 }, + { OPT_LEVELS_2_PLUS, OPT_ftree_loop_distribute_patterns, NULL, 1 }, ++ { OPT_LEVELS_2_PLUS, OPT_fmerge_mull, NULL, 1 }, + + /* -O2 and above optimizations, but not -Os or -Og. */ + { OPT_LEVELS_2_PLUS_SPEED_ONLY, OPT_falign_functions, NULL, 1 }, +diff --git a/gcc/testsuite/g++.dg/tree-ssa/mull64.C b/gcc/testsuite/g++.dg/tree-ssa/mull64.C +new file mode 100644 +index 000000000..2a3b74604 +--- /dev/null ++++ b/gcc/testsuite/g++.dg/tree-ssa/mull64.C +@@ -0,0 +1,34 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -Wno-psabi -fmerge-mull -fdump-tree-forwprop1-details" } */ ++ ++# define BN_BITS4 32 ++# define BN_MASK2 (0xffffffffffffffffL) ++# define BN_MASK2l (0xffffffffL) ++# define BN_MASK2h (0xffffffff00000000L) ++# define BN_MASK2h1 (0xffffffff80000000L) ++# define LBITS(a) ((a)&BN_MASK2l) ++# define HBITS(a) (((a)>>BN_BITS4)&BN_MASK2l) ++# define L2HBITS(a) (((a)< +Date: Mon, 25 Nov 2024 16:51:39 +0800 +Subject: [PATCH 07/16] Sw64 Port: libffi + +--- + libffi/Makefile.in | 23 +- + libffi/configure.host | 7 + + libffi/src/sw_64/ffi.c | 516 +++++++++++++++++++++++++++++++++++ + libffi/src/sw_64/ffitarget.h | 60 ++++ + libffi/src/sw_64/internal.h | 23 ++ + libffi/src/sw_64/sysv.S | 281 +++++++++++++++++++ + libffi/src/types.c | 4 +- + 7 files changed, 910 insertions(+), 4 deletions(-) + create mode 100644 libffi/src/sw_64/ffi.c + create mode 100644 libffi/src/sw_64/ffitarget.h + create mode 100644 libffi/src/sw_64/internal.h + create mode 100644 libffi/src/sw_64/sysv.S + +diff --git a/libffi/Makefile.in b/libffi/Makefile.in +index 5524a6a57..217ce305e 100644 +--- a/libffi/Makefile.in ++++ b/libffi/Makefile.in +@@ -548,6 +548,7 @@ noinst_HEADERS = src/aarch64/ffitarget.h src/aarch64/internal.h \ + src/s390/ffitarget.h src/s390/internal.h src/sh/ffitarget.h \ + src/sh64/ffitarget.h src/sparc/ffitarget.h \ + src/sparc/internal.h src/tile/ffitarget.h src/vax/ffitarget.h \ ++ src/sw_64/ffitarget.h src/sw_64/internal.h \ + src/x86/ffitarget.h src/x86/internal.h src/x86/internal64.h \ + src/x86/asmnames.h src/xtensa/ffitarget.h src/dlmalloc.c \ + src/kvx/ffitarget.h +@@ -576,6 +577,7 @@ EXTRA_libffi_la_SOURCES = src/aarch64/ffi.c src/aarch64/sysv.S \ + src/s390/sysv.S src/sh/ffi.c src/sh/sysv.S src/sh64/ffi.c \ + src/sh64/sysv.S src/sparc/ffi.c src/sparc/ffi64.c \ + src/sparc/v8.S src/sparc/v9.S src/tile/ffi.c src/tile/tile.S \ ++ src/sw_64/ffi.c src/sw_64/sysv.S \ + src/vax/ffi.c src/vax/elfbsd.S src/x86/ffi.c src/x86/sysv.S \ + src/x86/ffiw64.c src/x86/win64.S src/x86/ffi64.c \ + src/x86/unix64.S src/x86/sysv_intel.S src/x86/win64_intel.S \ +@@ -1012,6 +1014,16 @@ src/sparc/v8.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) + src/sparc/v9.lo: src/sparc/$(am__dirstamp) \ + src/sparc/$(DEPDIR)/$(am__dirstamp) ++src/sw_64/$(am__dirstamp): ++ @$(MKDIR_P) src/sw_64 ++ @: > src/sw_64/$(am__dirstamp) ++src/sw_64/$(DEPDIR)/$(am__dirstamp): ++ @$(MKDIR_P) src/sw_64/$(DEPDIR) ++ @: > src/sw_64/$(DEPDIR)/$(am__dirstamp) ++src/sw_64/ffi.lo: src/sw_64/$(am__dirstamp) \ ++ src/sw_64/$(DEPDIR)/$(am__dirstamp) ++src/sw_64/sysv.lo: src/sw_64/$(am__dirstamp) \ ++ src/sw_64/$(DEPDIR)/$(am__dirstamp) + src/tile/$(am__dirstamp): + @$(MKDIR_P) src/tile + @: > src/tile/$(am__dirstamp) +@@ -1139,6 +1151,8 @@ mostlyclean-compile: + -rm -f src/sh64/*.lo + -rm -f src/sparc/*.$(OBJEXT) + -rm -f src/sparc/*.lo ++ -rm -f src/sw_64/*.$(OBJEXT) ++ -rm -f src/sw_64/*.lo + -rm -f src/tile/*.$(OBJEXT) + -rm -f src/tile/*.lo + -rm -f src/vax/*.$(OBJEXT) +@@ -1228,6 +1242,8 @@ distclean-compile: + @AMDEP_TRUE@@am__include@ @am__quote@src/sparc/$(DEPDIR)/ffi64.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@src/sparc/$(DEPDIR)/v8.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@src/sparc/$(DEPDIR)/v9.Plo@am__quote@ ++@AMDEP_TRUE@@am__include@ @am__quote@src/sw_64/$(DEPDIR)/ffi.Plo@am__quote@ ++@AMDEP_TRUE@@am__include@ @am__quote@src/sw_64/$(DEPDIR)/sysv.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@src/tile/$(DEPDIR)/ffi.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@src/tile/$(DEPDIR)/tile.Plo@am__quote@ + @AMDEP_TRUE@@am__include@ @am__quote@src/vax/$(DEPDIR)/elfbsd.Plo@am__quote@ +@@ -1324,6 +1340,7 @@ clean-libtool: + -rm -rf src/sh/.libs src/sh/_libs + -rm -rf src/sh64/.libs src/sh64/_libs + -rm -rf src/sparc/.libs src/sparc/_libs ++ -rm -rf src/sw_64/.libs src/sw_64/_libs + -rm -rf src/tile/.libs src/tile/_libs + -rm -rf src/vax/.libs src/vax/_libs + -rm -rf src/x86/.libs src/x86/_libs +@@ -1690,6 +1707,8 @@ distclean-generic: + -rm -f src/sh64/$(am__dirstamp) + -rm -f src/sparc/$(DEPDIR)/$(am__dirstamp) + -rm -f src/sparc/$(am__dirstamp) ++ -rm -f src/sw_64/$(DEPDIR)/$(am__dirstamp) ++ -rm -f src/sw_64/$(am__dirstamp) + -rm -f src/tile/$(DEPDIR)/$(am__dirstamp) + -rm -f src/tile/$(am__dirstamp) + -rm -f src/vax/$(DEPDIR)/$(am__dirstamp) +@@ -1712,7 +1731,7 @@ clean-am: clean-aminfo clean-generic clean-libtool clean-local \ + + distclean: distclean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) +- -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arc/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/csky/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/kvx/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/m88k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/nios2/$(DEPDIR) src/or1k/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/riscv/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/tile/$(DEPDIR) src/vax/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR) ++ -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arc/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/csky/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/kvx/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/m88k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/nios2/$(DEPDIR) src/or1k/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/riscv/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/sw_64/$(DEPDIR) src/tile/$(DEPDIR) src/vax/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR) + -rm -f Makefile + distclean-am: clean-am distclean-compile distclean-generic \ + distclean-hdr distclean-libtool distclean-local distclean-tags +@@ -1851,7 +1870,7 @@ installcheck-am: + maintainer-clean: maintainer-clean-recursive + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache +- -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arc/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/csky/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/kvx/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/m88k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/nios2/$(DEPDIR) src/or1k/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/riscv/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/tile/$(DEPDIR) src/vax/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR) ++ -rm -rf src/$(DEPDIR) src/aarch64/$(DEPDIR) src/alpha/$(DEPDIR) src/arc/$(DEPDIR) src/arm/$(DEPDIR) src/avr32/$(DEPDIR) src/bfin/$(DEPDIR) src/cris/$(DEPDIR) src/csky/$(DEPDIR) src/frv/$(DEPDIR) src/ia64/$(DEPDIR) src/kvx/$(DEPDIR) src/m32r/$(DEPDIR) src/m68k/$(DEPDIR) src/m88k/$(DEPDIR) src/metag/$(DEPDIR) src/microblaze/$(DEPDIR) src/mips/$(DEPDIR) src/moxie/$(DEPDIR) src/nios2/$(DEPDIR) src/or1k/$(DEPDIR) src/pa/$(DEPDIR) src/powerpc/$(DEPDIR) src/riscv/$(DEPDIR) src/s390/$(DEPDIR) src/sh/$(DEPDIR) src/sh64/$(DEPDIR) src/sparc/$(DEPDIR) src/sw_64/$(DEPDIR) src/tile/$(DEPDIR) src/vax/$(DEPDIR) src/x86/$(DEPDIR) src/xtensa/$(DEPDIR) + -rm -f Makefile + maintainer-clean-am: distclean-am maintainer-clean-aminfo \ + maintainer-clean-generic maintainer-clean-local \ +diff --git a/libffi/configure.host b/libffi/configure.host +index 268267183..200f2d415 100644 +--- a/libffi/configure.host ++++ b/libffi/configure.host +@@ -247,6 +247,13 @@ case "${host}" in + SOURCES="ffi.c ffi64.c v8.S v9.S" + ;; + ++ sw_64*-*-*) ++ TARGET=SW_64; TARGETDIR=sw_64; ++ # Support 128-bit long double, changeable via command-line switch. ++ HAVE_LONG_DOUBLE='defined(__LONG_DOUBLE_128__)' ++ SOURCES="ffi.c sysv.S" ++ ;; ++ + tile*-*) + TARGET=TILE; TARGETDIR=tile + SOURCES="ffi.c tile.S" +diff --git a/libffi/src/sw_64/ffi.c b/libffi/src/sw_64/ffi.c +new file mode 100644 +index 000000000..2accc48ad +--- /dev/null ++++ b/libffi/src/sw_64/ffi.c +@@ -0,0 +1,516 @@ ++/* ----------------------------------------------------------------------- ++ ffi.c - Copyright (c) 2012 Anthony Green ++ Copyright (c) 1998, 2001, 2007, 2008 Red Hat, Inc. ++ Copyright (c) 2023, Wxiat ++ Sunway Foreign Function Interface ++ ++ Permission is hereby granted, free of charge, to any person obtaining ++ a copy of this software and associated documentation files (the ++ ``Software''), to deal in the Software without restriction, including ++ without limitation the rights to use, copy, modify, merge, publish, ++ distribute, sublicense, and/or sell copies of the Software, and to ++ permit persons to whom the Software is furnished to do so, subject to ++ the following conditions: ++ ++ The above copyright notice and this permission notice shall be included ++ in all copies or substantial portions of the Software. ++ ++ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, ++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ DEALINGS IN THE SOFTWARE. ++ ----------------------------------------------------------------------- */ ++ ++#include ++#include ++#include ++#include "internal.h" ++ ++/* Force FFI_TYPE_LONGDOUBLE to be different than FFI_TYPE_DOUBLE; ++ all further uses in this file will refer to the 128-bit type. */ ++#if defined(__LONG_DOUBLE_128__) ++#if FFI_TYPE_LONGDOUBLE != 4 ++#error FFI_TYPE_LONGDOUBLE out of date ++#endif ++#else ++#undef FFI_TYPE_LONGDOUBLE ++#define FFI_TYPE_LONGDOUBLE 4 ++#endif ++ ++extern void ++ffi_call_sysv (void *stack, void *frame, unsigned flags, void *raddr, ++ void (*fn) (void), void *closure) FFI_HIDDEN; ++extern void ++ffi_closure_sysv (void) FFI_HIDDEN; ++extern void ++ffi_go_closure_sysv (void) FFI_HIDDEN; ++ ++/* Promote a float value to its in-register double representation. ++ Unlike actually casting to double, this does not trap on NaN. */ ++static inline UINT64 ++lds (void *ptr) ++{ ++ UINT64 ret; ++ asm ("flds %0,%1" : "=f"(ret) : "m"(*(UINT32 *) ptr)); ++ return ret; ++} ++ ++/* And the reverse. */ ++static inline void ++sts (void *ptr, UINT64 val) ++{ ++ asm ("fsts %1,%0" : "=m"(*(UINT32 *) ptr) : "f"(val)); ++} ++ ++ffi_status FFI_HIDDEN ++ffi_prep_cif_machdep (ffi_cif *cif) ++{ ++ size_t bytes = 0; ++ int flags, i, avn; ++ ffi_type *rtype, *itype; ++ ++ if (cif->abi != FFI_OSF) ++ return FFI_BAD_ABI; ++ ++ /* Compute the size of the argument area. */ ++ for (i = 0, avn = cif->nargs; i < avn; i++) ++ { ++ itype = cif->arg_types[i]; ++ switch (itype->type) ++ { ++ case FFI_TYPE_INT: ++ case FFI_TYPE_SINT8: ++ case FFI_TYPE_UINT8: ++ case FFI_TYPE_SINT16: ++ case FFI_TYPE_UINT16: ++ case FFI_TYPE_SINT32: ++ case FFI_TYPE_UINT32: ++ case FFI_TYPE_SINT64: ++ case FFI_TYPE_UINT64: ++ case FFI_TYPE_POINTER: ++ case FFI_TYPE_FLOAT: ++ case FFI_TYPE_DOUBLE: ++ case FFI_TYPE_LONGDOUBLE: ++ /* All take one 8 byte slot. */ ++ bytes += 8; ++ break; ++ ++ case FFI_TYPE_VOID: ++ case FFI_TYPE_STRUCT: ++ /* Passed by value in N slots. */ ++ bytes += FFI_ALIGN (itype->size, FFI_SIZEOF_ARG); ++ break; ++ ++ case FFI_TYPE_COMPLEX: ++ /* _Complex long double passed by reference; others in 2 slots. */ ++ if (itype->elements[0]->type == FFI_TYPE_LONGDOUBLE) ++ bytes += 8; ++ else ++ bytes += 16; ++ break; ++ ++ default: ++ abort (); ++ } ++ } ++ ++ /* Set the return type flag */ ++ rtype = cif->rtype; ++ switch (rtype->type) ++ { ++ case FFI_TYPE_VOID: ++ flags = SW_64_FLAGS (SW_64_ST_VOID, SW_64_LD_VOID); ++ break; ++ case FFI_TYPE_INT: ++ case FFI_TYPE_UINT32: ++ case FFI_TYPE_SINT32: ++ flags = SW_64_FLAGS (SW_64_ST_INT, SW_64_LD_INT32); ++ break; ++ case FFI_TYPE_FLOAT: ++ flags = SW_64_FLAGS (SW_64_ST_FLOAT, SW_64_LD_FLOAT); ++ break; ++ case FFI_TYPE_DOUBLE: ++ flags = SW_64_FLAGS (SW_64_ST_DOUBLE, SW_64_LD_DOUBLE); ++ break; ++ case FFI_TYPE_UINT8: ++ flags = SW_64_FLAGS (SW_64_ST_INT, SW_64_LD_UINT8); ++ break; ++ case FFI_TYPE_SINT8: ++ flags = SW_64_FLAGS (SW_64_ST_INT, SW_64_LD_SINT8); ++ break; ++ case FFI_TYPE_UINT16: ++ flags = SW_64_FLAGS (SW_64_ST_INT, SW_64_LD_UINT16); ++ break; ++ case FFI_TYPE_SINT16: ++ flags = SW_64_FLAGS (SW_64_ST_INT, SW_64_LD_SINT16); ++ break; ++ case FFI_TYPE_UINT64: ++ case FFI_TYPE_SINT64: ++ case FFI_TYPE_POINTER: ++ flags = SW_64_FLAGS (SW_64_ST_INT, SW_64_LD_INT64); ++ break; ++ case FFI_TYPE_LONGDOUBLE: ++ case FFI_TYPE_STRUCT: ++ /* Passed in memory, with a hidden pointer. */ ++ flags = SW_64_RET_IN_MEM; ++ break; ++ case FFI_TYPE_COMPLEX: ++ itype = rtype->elements[0]; ++ switch (itype->type) ++ { ++ case FFI_TYPE_FLOAT: ++ flags = SW_64_FLAGS (SW_64_ST_CPLXF, SW_64_LD_CPLXF); ++ break; ++ case FFI_TYPE_DOUBLE: ++ flags = SW_64_FLAGS (SW_64_ST_CPLXD, SW_64_LD_CPLXD); ++ break; ++ default: ++ if (rtype->size <= 8) ++ flags = SW_64_FLAGS (SW_64_ST_INT, SW_64_LD_INT64); ++ else ++ flags = SW_64_RET_IN_MEM; ++ break; ++ } ++ break; ++ default: ++ abort (); ++ } ++ cif->flags = flags; ++ ++ /* Include the hidden structure pointer in args requirement. */ ++ if (flags == SW_64_RET_IN_MEM) ++ bytes += 8; ++ /* Minimum size is 6 slots, so that ffi_call_sysv can pop them. */ ++ if (bytes < 6 * 8) ++ bytes = 6 * 8; ++ cif->bytes = bytes; ++ ++ return FFI_OK; ++} ++ ++static unsigned long ++extend_basic_type (void *valp, int type, int argn) ++{ ++ switch (type) ++ { ++ case FFI_TYPE_SINT8: ++ return *(SINT8 *) valp; ++ case FFI_TYPE_UINT8: ++ return *(UINT8 *) valp; ++ case FFI_TYPE_SINT16: ++ return *(SINT16 *) valp; ++ case FFI_TYPE_UINT16: ++ return *(UINT16 *) valp; ++ ++ case FFI_TYPE_FLOAT: ++ if (argn < 6) ++ return lds (valp); ++ /* FALLTHRU */ ++ ++ case FFI_TYPE_INT: ++ case FFI_TYPE_SINT32: ++ case FFI_TYPE_UINT32: ++ /* Note that unsigned 32-bit quantities are sign extended. */ ++ return *(SINT32 *) valp; ++ ++ case FFI_TYPE_SINT64: ++ case FFI_TYPE_UINT64: ++ case FFI_TYPE_POINTER: ++ case FFI_TYPE_DOUBLE: ++ return *(UINT64 *) valp; ++ ++ default: ++ abort (); ++ } ++} ++ ++static void ++ffi_call_int (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, ++ void *closure) ++{ ++ unsigned long *argp; ++ long i, avn, argn, flags = cif->flags; ++ ffi_type **arg_types; ++ void *frame; ++ ++ /* If the return value is a struct and we don't have a return ++ value address then we need to make one. */ ++ if (rvalue == NULL && flags == SW_64_RET_IN_MEM) ++ rvalue = alloca (cif->rtype->size); ++ ++ /* Allocate the space for the arguments, plus 4 words of temp ++ space for ffi_call_sysv. */ ++ argp = frame = alloca (cif->bytes + 4 * FFI_SIZEOF_ARG); ++ frame += cif->bytes; ++ ++ argn = 0; ++ if (flags == SW_64_RET_IN_MEM) ++ argp[argn++] = (unsigned long) rvalue; ++ ++ avn = cif->nargs; ++ arg_types = cif->arg_types; ++ ++ for (i = 0, avn = cif->nargs; i < avn; i++) ++ { ++ ffi_type *ty = arg_types[i]; ++ void *valp = avalue[i]; ++ int type = ty->type; ++ size_t size; ++ ++ switch (type) ++ { ++ case FFI_TYPE_INT: ++ case FFI_TYPE_SINT8: ++ case FFI_TYPE_UINT8: ++ case FFI_TYPE_SINT16: ++ case FFI_TYPE_UINT16: ++ case FFI_TYPE_SINT32: ++ case FFI_TYPE_UINT32: ++ case FFI_TYPE_SINT64: ++ case FFI_TYPE_UINT64: ++ case FFI_TYPE_POINTER: ++ case FFI_TYPE_FLOAT: ++ case FFI_TYPE_DOUBLE: ++ argp[argn] = extend_basic_type (valp, type, argn); ++ argn++; ++ break; ++ ++ case FFI_TYPE_LONGDOUBLE: ++ by_reference: ++ /* Note that 128-bit long double is passed by reference. */ ++ argp[argn++] = (unsigned long) valp; ++ break; ++ ++ case FFI_TYPE_VOID: ++ case FFI_TYPE_STRUCT: ++ size = ty->size; ++ memcpy (argp + argn, valp, size); ++ argn += FFI_ALIGN (size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; ++ break; ++ ++ case FFI_TYPE_COMPLEX: ++ type = ty->elements[0]->type; ++ if (type == FFI_TYPE_LONGDOUBLE) ++ goto by_reference; ++ ++ /* Most complex types passed as two separate arguments. */ ++ size = ty->elements[0]->size; ++ argp[argn] = extend_basic_type (valp, type, argn); ++ argp[argn + 1] = extend_basic_type (valp + size, type, argn + 1); ++ argn += 2; ++ break; ++ ++ default: ++ abort (); ++ } ++ } ++ ++ flags = (flags >> SW_64_ST_SHIFT) & 0xff; ++ ffi_call_sysv (argp, frame, flags, rvalue, fn, closure); ++} ++ ++void ++ffi_call (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue) ++{ ++ ffi_call_int (cif, fn, rvalue, avalue, NULL); ++} ++ ++void ++ffi_call_go (ffi_cif *cif, void (*fn) (void), void *rvalue, void **avalue, ++ void *closure) ++{ ++ ffi_call_int (cif, fn, rvalue, avalue, closure); ++} ++ ++ffi_status ++ffi_prep_closure_loc (ffi_closure *closure, ffi_cif *cif, ++ void (*fun) (ffi_cif *, void *, void **, void *), ++ void *user_data, void *codeloc) ++{ ++ unsigned int *tramp; ++ ++ if (cif->abi != FFI_OSF) ++ return FFI_BAD_ABI; ++ ++ tramp = (unsigned int *) &closure->tramp[0]; ++ tramp[0] = 0x43fb0741; /* mov $27,$1 */ ++ tramp[1] = 0x8f7b0010; /* ldl $27,16($27) */ ++ tramp[2] = 0x0ffb0000; /* jmp $31,($27),0 */ ++ tramp[3] = 0x43ff075f; /* nop */ ++ *(void **) &tramp[4] = ffi_closure_sysv; ++ ++ closure->cif = cif; ++ closure->fun = fun; ++ closure->user_data = user_data; ++ ++ /* Flush the Icache. 0x86 is PAL_imb in Tru64 UNIX . */ ++ asm volatile ("sys_call 0x86" : : : "memory"); ++ ++ return FFI_OK; ++} ++ ++ffi_status ++ffi_prep_go_closure (ffi_go_closure *closure, ffi_cif *cif, ++ void (*fun) (ffi_cif *, void *, void **, void *)) ++{ ++ if (cif->abi != FFI_OSF) ++ return FFI_BAD_ABI; ++ ++ closure->tramp = (void *) ffi_go_closure_sysv; ++ closure->cif = cif; ++ closure->fun = fun; ++ ++ return FFI_OK; ++} ++ ++long FFI_HIDDEN ++ffi_closure_sysv_inner (ffi_cif *cif, ++ void (*fun) (ffi_cif *, void *, void **, void *), ++ void *user_data, void *rvalue, unsigned long *argp) ++{ ++ void **avalue; ++ ffi_type **arg_types; ++ long i, avn, argn, flags; ++ ++ avalue = alloca (cif->nargs * sizeof (void *)); ++ flags = cif->flags; ++ argn = 0; ++ ++ /* Copy the caller's structure return address to that the closure ++ returns the data directly to the caller. */ ++ if (flags == SW_64_RET_IN_MEM) ++ { ++ rvalue = (void *) argp[0]; ++ argn = 1; ++ } ++ ++ arg_types = cif->arg_types; ++ ++ /* Grab the addresses of the arguments from the stack frame. */ ++ for (i = 0, avn = cif->nargs; i < avn; i++) ++ { ++ ffi_type *ty = arg_types[i]; ++ int type = ty->type; ++ void *valp = &argp[argn]; ++ size_t size; ++ ++ switch (type) ++ { ++ case FFI_TYPE_INT: ++ case FFI_TYPE_SINT8: ++ case FFI_TYPE_UINT8: ++ case FFI_TYPE_SINT16: ++ case FFI_TYPE_UINT16: ++ case FFI_TYPE_SINT32: ++ case FFI_TYPE_UINT32: ++ case FFI_TYPE_SINT64: ++ case FFI_TYPE_UINT64: ++ case FFI_TYPE_POINTER: ++ argn += 1; ++ break; ++ ++ case FFI_TYPE_VOID: ++ case FFI_TYPE_STRUCT: ++ size = ty->size; ++ argn += FFI_ALIGN (size, FFI_SIZEOF_ARG) / FFI_SIZEOF_ARG; ++ break; ++ ++ case FFI_TYPE_FLOAT: ++ /* Floats coming from registers need conversion from double ++ back to float format. */ ++ if (argn < 6) ++ { ++ valp = &argp[argn - 6]; ++ sts (valp, argp[argn - 6]); ++ } ++ argn += 1; ++ break; ++ ++ case FFI_TYPE_DOUBLE: ++ if (argn < 6) ++ valp = &argp[argn - 6]; ++ argn += 1; ++ break; ++ ++ case FFI_TYPE_LONGDOUBLE: ++ by_reference: ++ /* 128-bit long double is passed by reference. */ ++ valp = (void *) argp[argn]; ++ argn += 1; ++ break; ++ ++ case FFI_TYPE_COMPLEX: ++ type = ty->elements[0]->type; ++ switch (type) ++ { ++ case FFI_TYPE_SINT64: ++ case FFI_TYPE_UINT64: ++ /* Passed as separate arguments, but they wind up sequential. */ ++ break; ++ ++ case FFI_TYPE_INT: ++ case FFI_TYPE_SINT8: ++ case FFI_TYPE_UINT8: ++ case FFI_TYPE_SINT16: ++ case FFI_TYPE_UINT16: ++ case FFI_TYPE_SINT32: ++ case FFI_TYPE_UINT32: ++ /* Passed as separate arguments. Disjoint, but there's room ++ enough in one slot to hold the pair. */ ++ size = ty->elements[0]->size; ++ memcpy (valp + size, valp + 8, size); ++ break; ++ ++ case FFI_TYPE_FLOAT: ++ /* Passed as separate arguments. Disjoint, and each piece ++ may need conversion back to float. */ ++ if (argn < 6) ++ { ++ valp = &argp[argn - 6]; ++ sts (valp, argp[argn - 6]); ++ } ++ if (argn + 1 < 6) ++ sts (valp + 4, argp[argn + 1 - 6]); ++ else ++ *(UINT32 *) (valp + 4) = argp[argn + 1]; ++ break; ++ ++ case FFI_TYPE_DOUBLE: ++ /* Passed as separate arguments. Only disjoint if one part ++ is in fp regs and the other is on the stack. */ ++ if (argn < 5) ++ valp = &argp[argn - 6]; ++ else if (argn == 5) ++ { ++ valp = alloca (16); ++ ((UINT64 *) valp)[0] = argp[5 - 6]; ++ ((UINT64 *) valp)[1] = argp[6]; ++ } ++ break; ++ ++ case FFI_TYPE_LONGDOUBLE: ++ goto by_reference; ++ ++ default: ++ abort (); ++ } ++ argn += 2; ++ break; ++ ++ default: ++ abort (); ++ } ++ ++ avalue[i] = valp; ++ } ++ ++ /* Invoke the closure. */ ++ fun (cif, rvalue, avalue, user_data); ++ ++ /* Tell ffi_closure_sysv how to perform return type promotions. */ ++ return (flags >> SW_64_LD_SHIFT) & 0xff; ++} +diff --git a/libffi/src/sw_64/ffitarget.h b/libffi/src/sw_64/ffitarget.h +new file mode 100644 +index 000000000..4ea1493c5 +--- /dev/null ++++ b/libffi/src/sw_64/ffitarget.h +@@ -0,0 +1,60 @@ ++/* -----------------------------------------------------------------*-C-*- ++ ffitarget.h - Copyright (c) 2012 Anthony Green ++ Copyright (c) 1996-2003 Red Hat, Inc. ++ Copyright (c) 2023, Wxiat ++ Target configuration macros for Sunway. ++ ++ Permission is hereby granted, free of charge, to any person obtaining ++ a copy of this software and associated documentation files (the ++ ``Software''), to deal in the Software without restriction, including ++ without limitation the rights to use, copy, modify, merge, publish, ++ distribute, sublicense, and/or sell copies of the Software, and to ++ permit persons to whom the Software is furnished to do so, subject to ++ the following conditions: ++ ++ The above copyright notice and this permission notice shall be included ++ in all copies or substantial portions of the Software. ++ ++ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, ++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ DEALINGS IN THE SOFTWARE. ++ ++ ----------------------------------------------------------------------- */ ++ ++#ifndef LIBFFI_TARGET_H ++#define LIBFFI_TARGET_H ++ ++#ifndef LIBFFI_H ++#error \ ++ "Please do not include ffitarget.h directly into your source. Use ffi.h instead." ++#endif ++ ++#ifndef LIBFFI_ASM ++typedef unsigned long ffi_arg; ++typedef signed long ffi_sarg; ++ ++typedef enum ffi_abi ++{ ++ FFI_FIRST_ABI = 0, ++ FFI_OSF, ++ FFI_LAST_ABI, ++ FFI_DEFAULT_ABI = FFI_OSF ++} ffi_abi; ++#endif ++ ++#define FFI_TARGET_SPECIFIC_STACK_SPACE_ALLOCATION ++#define FFI_TARGET_HAS_COMPLEX_TYPE ++ ++/* ---- Definitions for closures ----------------------------------------- */ ++ ++#define FFI_CLOSURES 1 ++#define FFI_GO_CLOSURES 1 ++#define FFI_TRAMPOLINE_SIZE 24 ++#define FFI_NATIVE_RAW_API 0 ++ ++#endif +diff --git a/libffi/src/sw_64/internal.h b/libffi/src/sw_64/internal.h +new file mode 100644 +index 000000000..92ad32179 +--- /dev/null ++++ b/libffi/src/sw_64/internal.h +@@ -0,0 +1,23 @@ ++#define SW_64_ST_VOID 0 ++#define SW_64_ST_INT 1 ++#define SW_64_ST_FLOAT 2 ++#define SW_64_ST_DOUBLE 3 ++#define SW_64_ST_CPLXF 4 ++#define SW_64_ST_CPLXD 5 ++ ++#define SW_64_LD_VOID 0 ++#define SW_64_LD_INT64 1 ++#define SW_64_LD_INT32 2 ++#define SW_64_LD_UINT16 3 ++#define SW_64_LD_SINT16 4 ++#define SW_64_LD_UINT8 5 ++#define SW_64_LD_SINT8 6 ++#define SW_64_LD_FLOAT 7 ++#define SW_64_LD_DOUBLE 8 ++#define SW_64_LD_CPLXF 9 ++#define SW_64_LD_CPLXD 10 ++ ++#define SW_64_ST_SHIFT 0 ++#define SW_64_LD_SHIFT 8 ++#define SW_64_RET_IN_MEM 0x10000 ++#define SW_64_FLAGS(S, L) (((L) << SW_64_LD_SHIFT) | (S)) +diff --git a/libffi/src/sw_64/sysv.S b/libffi/src/sw_64/sysv.S +new file mode 100644 +index 000000000..2c31400a4 +--- /dev/null ++++ b/libffi/src/sw_64/sysv.S +@@ -0,0 +1,281 @@ ++/* ----------------------------------------------------------------------- ++ sysv.S - Copyright (c) 1998, 2001, 2007, 2008, 2011, 2014 Red Hat ++ Copyright (c) 2023, Wxiat ++ Sunway/SYSV Foreign Function Interface ++ ++ Permission is hereby granted, free of charge, to any person obtaining ++ a copy of this software and associated documentation files (the ++ ``Software''), to deal in the Software without restriction, including ++ without limitation the rights to use, copy, modify, merge, publish, ++ distribute, sublicense, and/or sell copies of the Software, and to ++ permit persons to whom the Software is furnished to do so, subject to ++ the following conditions: ++ ++ The above copyright notice and this permission notice shall be included ++ in all copies or substantial portions of the Software. ++ ++ THE SOFTWARE IS PROVIDED ``AS IS'', WITHOUT WARRANTY OF ANY KIND, ++ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ++ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND ++ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT ++ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, ++ WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, ++ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER ++ DEALINGS IN THE SOFTWARE. ++ ----------------------------------------------------------------------- */ ++#define LIBFFI_ASM ++#include ++#include ++#include ++#include "internal.h" ++ ++ .arch sw6a ++ .text ++ ++/* Aid in building a direct addressed jump table, 4 insns per entry. */ ++.macro E index ++ .align 4 ++ .org 99b + \index * 16 ++.endm ++ ++/* ffi_call_sysv (void *stack, void *frame, unsigned flags, ++ void *raddr, void (*fnaddr)(void), void *closure) ++ ++ Bit o trickiness here -- FRAME is the base of the stack frame ++ for this function. This has been allocated by ffi_call. We also ++ deallocate some of the stack that has been alloca'd. */ ++ ++ .align 4 ++ .globl ffi_call_sysv ++ .ent ffi_call_sysv ++ FFI_HIDDEN(ffi_call_sysv) ++ ++ffi_call_sysv: ++ cfi_startproc ++ cfi_def_cfa($17, 32) ++ mov $16, $30 ++ stl $26, 0($17) ++ stl $15, 8($17) ++ mov $17, $15 ++ .prologue 0 ++ cfi_def_cfa_register($15) ++ cfi_rel_offset($26, 0) ++ cfi_rel_offset($15, 8) ++ ++ stl $18, 16($17) # save flags into frame ++ stl $19, 24($17) # save rvalue into frame ++ mov $20, $27 # fn into place for call ++ mov $21, $1 # closure into static chain ++ ++ # Load up all of the (potential) argument registers. ++ ldl $16, 0($30) ++ fldd $f16, 0($30) ++ fldd $f17, 8($30) ++ ldl $17, 8($30) ++ fldd $f18, 16($30) ++ ldl $18, 16($30) ++ fldd $f19, 24($30) ++ ldl $19, 24($30) ++ fldd $f20, 32($30) ++ ldl $20, 32($30) ++ fldd $f21, 40($30) ++ ldl $21, 40($30) ++ ++ # Deallocate the register argument area. ++ ldi $30, 48($30) ++ ++ call $26, ($27), 0 ++0: ++ ldih $29, 0($26) !gpdisp!1 ++ ldl $2, 24($15) # reload rvalue ++ ldi $29, 0($29) !gpdisp!1 ++ ldl $3, 16($15) # reload flags ++ ldi $1, 99f-0b($26) ++ ldl $26, 0($15) ++ ldl $15, 8($15) ++ cfi_restore($26) ++ cfi_restore($15) ++ cfi_def_cfa($sp, 0) ++ seleq $2, 0, $3 # mash null rvalue to void ++ addl $3, $3, $3 ++ s8addl $3, $1, $1 # 99f + stcode * 16 ++ jmp $31, ($1), $st_int ++ ++ .align 4 ++99: ++E 0 ++ ret ++E 1 ++$st_int: ++ stl $0, 0($2) ++ ret ++E 2 ++ fsts $f0, 0($2) ++ ret ++E 4 ++ fstd $f0, 0($2) ++ ret ++E 6 ++ fsts $f0, 0($2) ++ fsts $f1, 4($2) ++ ret ++E 10 ++ fstd $f0, 0($2) ++ fstd $f1, 8($2) ++ ret ++ ++ cfi_endproc ++ .end ffi_call_sysv ++ ++/* ffi_closure_sysv(...) ++ ++ Receives the closure argument in $1. */ ++ ++#define CLOSURE_FS (16*8) ++ ++ .align 4 ++ .globl ffi_go_closure_sysv ++ .ent ffi_go_closure_sysv ++ FFI_HIDDEN(ffi_go_closure_sysv) ++ ++ffi_go_closure_sysv: ++ cfi_startproc ++ ldgp $29, 0($27) ++ subl $30, CLOSURE_FS, $30 ++ cfi_adjust_cfa_offset(CLOSURE_FS) ++ stl $26, 0($30) ++ .prologue 1 ++ cfi_rel_offset($26, 0) ++ ++ stl $16, 10*8($30) ++ stl $17, 11*8($30) ++ stl $18, 12*8($30) ++ ++ ldl $16, 8($1) # load cif ++ ldl $17, 16($1) # load fun ++ mov $1, $18 # closure is user_data ++ br $do_closure ++ ++ cfi_endproc ++ .end ffi_go_closure_sysv ++ ++ .align 4 ++ .globl ffi_closure_sysv ++ .ent ffi_closure_sysv ++ FFI_HIDDEN(ffi_closure_sysv) ++ ++ffi_closure_sysv: ++ cfi_startproc ++ ldgp $29, 0($27) ++ subl $30, CLOSURE_FS, $30 ++ cfi_adjust_cfa_offset(CLOSURE_FS) ++ stl $26, 0($30) ++ .prologue 1 ++ cfi_rel_offset($26, 0) ++ ++ # Store all of the potential argument registers in va_list format. ++ stl $16, 10*8($30) ++ stl $17, 11*8($30) ++ stl $18, 12*8($30) ++ ++ ldl $16, 24($1) # load cif ++ ldl $17, 32($1) # load fun ++ ldl $18, 40($1) # load user_data ++ ++$do_closure: ++ stl $19, 13*8($30) ++ stl $20, 14*8($30) ++ stl $21, 15*8($30) ++ fstd $f16, 4*8($30) ++ fstd $f17, 5*8($30) ++ fstd $f18, 6*8($30) ++ fstd $f19, 7*8($30) ++ fstd $f20, 8*8($30) ++ fstd $f21, 9*8($30) ++ ++ # Call ffi_closure_sysv_inner to do the bulk of the work. ++ ldi $19, 2*8($30) ++ ldi $20, 10*8($30) ++ call $26, ffi_closure_sysv_inner ++0: ++ ldih $29, 0($26) !gpdisp!2 ++ ldi $2, 99f-0b($26) ++ s4addl $0, 0, $1 # ldcode * 4 ++ ldl $0, 16($30) # preload return value ++ s4addl $1, $2, $1 # 99f + ldcode * 16 ++ ldi $29, 0($29) !gpdisp!2 ++ ldl $26, 0($30) ++ cfi_restore($26) ++ jmp $31, ($1), $load_32 ++ ++.macro epilogue ++ addl $30, CLOSURE_FS, $30 ++ cfi_adjust_cfa_offset(-CLOSURE_FS) ++ ret ++ .align 4 ++ cfi_adjust_cfa_offset(CLOSURE_FS) ++.endm ++ ++ .align 4 ++99: ++E 0 ++ epilogue ++ ++E 1 ++ epilogue ++ ++E 2 ++$load_32: ++ sextl $0, $0 ++ epilogue ++ ++E 3 ++ zapnot $0, 3, $0 ++ epilogue ++ ++E 4 ++#ifdef __sw_64_bwx__ ++ sexth $0, $0 ++#else ++ sll $0, 48, $0 ++ sra $0, 48, $0 ++#endif ++ epilogue ++ ++E 5 ++ and $0, 0xff, $0 ++ epilogue ++ ++E 6 ++#ifdef __sw_64_bwx__ ++ sextb $0, $0 ++#else ++ sll $0, 56, $0 ++ sra $0, 56, $0 ++#endif ++ epilogue ++ ++E 7 ++ flds $f0, 16($sp) ++ epilogue ++ ++E 8 ++ fldd $f0, 16($sp) ++ epilogue ++ ++E 9 ++ flds $f0, 16($sp) ++ flds $f1, 20($sp) ++ epilogue ++ ++E 10 ++ fldd $f0, 16($sp) ++ fldd $f1, 24($sp) ++ epilogue ++ ++ cfi_endproc ++ .end ffi_closure_sysv ++ ++#if defined __ELF__ && defined __linux__ ++ .section .note.GNU-stack,"",@progbits ++#endif +diff --git a/libffi/src/types.c b/libffi/src/types.c +index 9ec27f6cf..6a31d380e 100644 +--- a/libffi/src/types.c ++++ b/libffi/src/types.c +@@ -80,13 +80,13 @@ FFI_TYPEDEF(pointer, void*, FFI_TYPE_POINTER, const); + FFI_TYPEDEF(float, float, FFI_TYPE_FLOAT, const); + FFI_TYPEDEF(double, double, FFI_TYPE_DOUBLE, const); + +-#if !defined HAVE_LONG_DOUBLE_VARIANT || defined __alpha__ ++#if !defined HAVE_LONG_DOUBLE_VARIANT || defined __alpha__ || defined __sw_64__ + #define FFI_LDBL_CONST const + #else + #define FFI_LDBL_CONST + #endif + +-#ifdef __alpha__ ++#ifdef __alpha__ || defined __sw_64__ + /* Even if we're not configured to default to 128-bit long double, + maintain binary compatibility, as -mlong-double-128 can be used + at any time. */ +-- +2.25.1 + diff --git a/0008-LoongArch-Delete-macro-definition-ASM_OUTPUT_ALIGN_W.patch b/0008-LoongArch-Delete-macro-definition-ASM_OUTPUT_ALIGN_W.patch new file mode 100644 index 0000000000000000000000000000000000000000..837cdfd1751ebb3e70ede6bca0fa238d6fc90b0a --- /dev/null +++ b/0008-LoongArch-Delete-macro-definition-ASM_OUTPUT_ALIGN_W.patch @@ -0,0 +1,48 @@ +From 3829ad1963a92526201b42233d2bb4facf7ba8d4 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Fri, 15 Sep 2023 11:56:01 +0800 +Subject: [PATCH 008/188] LoongArch: Delete macro definition + ASM_OUTPUT_ALIGN_WITH_NOP. + +There are two reasons for removing this macro definition: +1. The default in the assembler is to use the nop instruction for filling. +2. For assembly directives: .align [abs-expr[, abs-expr[, abs-expr]]] + The third expression it is the maximum number of bytes that should be + skipped by this alignment directive. + Therefore, it will affect the display of the specified alignment rules + and affect the operating efficiency. + +This modification relies on binutils commit 1fb3cdd87ec61715a5684925fb6d6a6cf53bb97c. +(Since the assembler will add nop based on the .align information when doing relax, +it will cause the conditional branch to go out of bounds during the assembly process. +This submission of binutils solves this problem.) + +gcc/ChangeLog: + + * config/loongarch/loongarch.h (ASM_OUTPUT_ALIGN_WITH_NOP): + Delete. + +Co-authored-by: Chenghua Xu +--- + gcc/config/loongarch/loongarch.h | 5 ----- + 1 file changed, 5 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index a2dc4ba8c..572b538be 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -1058,11 +1058,6 @@ typedef struct { + + #define ASM_OUTPUT_ALIGN(STREAM, LOG) fprintf (STREAM, "\t.align\t%d\n", (LOG)) + +-/* "nop" instruction 54525952 (andi $r0,$r0,0) is +- used for padding. */ +-#define ASM_OUTPUT_ALIGN_WITH_NOP(STREAM, LOG) \ +- fprintf (STREAM, "\t.align\t%d,54525952,4\n", (LOG)) +- + /* This is how to output an assembler line to advance the location + counter by SIZE bytes. */ + +-- +2.43.0 + diff --git a/0008-MULL64-3-3-Fold-series-of-instructions-into-umulh.patch b/0008-MULL64-3-3-Fold-series-of-instructions-into-umulh.patch new file mode 100644 index 0000000000000000000000000000000000000000..cac0b3947d8cdcec182185340d8fdeecff3e19d5 --- /dev/null +++ b/0008-MULL64-3-3-Fold-series-of-instructions-into-umulh.patch @@ -0,0 +1,105 @@ +From 4e536dbb4a08925cea259be13962969efcc0f3c1 Mon Sep 17 00:00:00 2001 +From: zhongyunde +Date: Fri, 11 Nov 2022 11:30:37 +0800 +Subject: [PATCH 08/22] [MULL64 3/3] Fold series of instructions into umulh + + Merge the high part of series instructions into umulh + + gcc/ + * match.pd: Add simplifcations for high part of umulh + + gcc/testsuite/ + * g++.dg/tree-ssa/mull64.C: Add checking of tree pass forwprop4 +--- + gcc/match.pd | 56 ++++++++++++++++++++++++++ + gcc/testsuite/g++.dg/tree-ssa/mull64.C | 5 ++- + 2 files changed, 59 insertions(+), 2 deletions(-) + +diff --git a/gcc/match.pd b/gcc/match.pd +index 2092e6959..b7e3588e8 100644 +--- a/gcc/match.pd ++++ b/gcc/match.pd +@@ -4301,6 +4301,62 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) + ) + #endif + ++#if GIMPLE ++/* These patterns are mostly used by FORWPROP4 to move some operations outside of ++ the if statements. They should be done late because it gives jump threading ++ and few other passes to reduce what is going on. */ ++/* Mul64 is defined as a multiplication algorithm which compute two 64-bit ++ integers to one 128-bit integer. Try to match the high part of mul pattern ++ after the low part of mul pattern is simplified. The following scenario ++ should be matched: ++ (i64 ResLo, i64 ResHi) = Mul64(i64 In0, i64 In1) { ++ In0Lo = In0(D) & 4294967295; -- bit_and@4 SSA_NAME@0 @2 ++ In0Hi = In0(D) >> 32; -- rshift@5 SSA_NAME@0 @3 ++ In1Lo = In1(D) & 4294967295; -- bit_and@6 SSA_NAME@1 INTEGER_CST@2 ++ In1Hi = In1(D) >> 32; -- rshift@7 SSA_NAME@1 INTEGER_CST@3 ++ Mull_01 = In0Hi * In1Lo; -- mult@8 @5 @6 ++ Addc = In0Lo * In1Hi + Mull_01; -- plus@9 (mult (@4 @7) @8 ++ AddH = (Addc >> 32) + In0Hi * In1Hi -- (plus@11 (rshift @9 @3) (mult @5 @7)) ++ addc32 = Addc << 32; -- lshift@10 @9 @3 ++ ResLo = In0(D) * In1(D); -- mult @0 @1 ++ ResHi = ((long unsigned int) (addc32 > ResLo)) + ++ (((long unsigned int) (Mull_01 > Addc)) << 32) + AddH; ++ } */ ++(simplify ++ (plus:c ++ (plus:c ++ (convert ++ (gt (lshift@10 @9 @3) ++ (mult:c @0 @1))) ++ (lshift ++ (convert ++ (gt @8 @9)) ++ @3)) ++ (plus:c@11 ++ (rshift ++ (plus:c@9 ++ (mult:c (bit_and@4 SSA_NAME@0 @2) @7) ++ (mult:c@8 @5 (bit_and@6 SSA_NAME@1 INTEGER_CST@2))) ++ @3) ++ (mult:c (rshift@5 SSA_NAME@0 @3) ++ (rshift@7 SSA_NAME@1 INTEGER_CST@3)) ++ ) ++ ) ++ (if (flag_merge_mull && INTEGRAL_TYPE_P (type) ++ && INTEGRAL_TYPE_P (TREE_TYPE (@0)) && types_match (@0, @1) ++ && TYPE_PRECISION (type) == 64) ++ (with { ++ tree i128_type = build_nonstandard_integer_type (128, TYPE_UNSIGNED (type)); ++ tree shift = build_int_cst (integer_type_node, 64); ++ } ++ (convert:type (rshift ++ (mult (convert:i128_type @0) ++ (convert:i128_type @1)) ++ { shift; }))) ++ ) ++) ++#endif ++ + #if GIMPLE + /* These patterns are mostly used by FORWPROP1 to fold some operations into more + simple IR. The following scenario should be matched: +diff --git a/gcc/testsuite/g++.dg/tree-ssa/mull64.C b/gcc/testsuite/g++.dg/tree-ssa/mull64.C +index 2a3b74604..f61cf5e6f 100644 +--- a/gcc/testsuite/g++.dg/tree-ssa/mull64.C ++++ b/gcc/testsuite/g++.dg/tree-ssa/mull64.C +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-O2 -Wno-psabi -fmerge-mull -fdump-tree-forwprop1-details" } */ ++/* { dg-options "-O2 -Wno-psabi -fdump-tree-forwprop1-details -fdump-tree-forwprop4-details" } */ + + # define BN_BITS4 32 + # define BN_MASK2 (0xffffffffffffffffL) +@@ -31,4 +31,5 @@ void mul64(unsigned long in0, unsigned long in1, + retHi = m11; + } + +-/* { dg-final { scan-tree-dump "gimple_simplified to low_18 = in0_4" "forwprop1" } } */ ++/* { dg-final { scan-tree-dump "gimple_simplified to" "forwprop1" } } */ ++/* { dg-final { scan-tree-dump-times "gimple_simplified to" 1 "forwprop4" } } */ +-- +2.33.0 + diff --git a/0008-Sw64-Port-libgfortran.patch b/0008-Sw64-Port-libgfortran.patch new file mode 100644 index 0000000000000000000000000000000000000000..66d2c7d8167dbcf323c66a8c877cfedbac75a713 --- /dev/null +++ b/0008-Sw64-Port-libgfortran.patch @@ -0,0 +1,57 @@ +From 5920c7d65bf452ddec031bfcbe610404324a38bc Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:52:12 +0800 +Subject: [PATCH 08/16] Sw64 Port: libgfortran + +--- + libgfortran/config/fpu-glibc.h | 6 +++--- + libgfortran/configure.host | 2 ++ + 2 files changed, 5 insertions(+), 3 deletions(-) + +diff --git a/libgfortran/config/fpu-glibc.h b/libgfortran/config/fpu-glibc.h +index 265ef6938..8a5eb3c99 100644 +--- a/libgfortran/config/fpu-glibc.h ++++ b/libgfortran/config/fpu-glibc.h +@@ -446,7 +446,7 @@ set_fpu_state (void *state) + int + support_fpu_underflow_control (int kind __attribute__((unused))) + { +-#if defined(__alpha__) && defined(FE_MAP_UMZ) ++#if (defined(__alpha__) || defined(__sw_64__)) && defined(FE_MAP_UMZ) + return (kind == 4 || kind == 8) ? 1 : 0; + #else + return 0; +@@ -457,7 +457,7 @@ support_fpu_underflow_control (int kind __attribute__((unused))) + int + get_fpu_underflow_mode (void) + { +-#if defined(__alpha__) && defined(FE_MAP_UMZ) ++#if (defined(__alpha__) || defined(__sw_64__)) && defined(FE_MAP_UMZ) + + fenv_t state = __ieee_get_fp_control (); + +@@ -475,7 +475,7 @@ get_fpu_underflow_mode (void) + void + set_fpu_underflow_mode (int gradual __attribute__((unused))) + { +-#if defined(__alpha__) && defined(FE_MAP_UMZ) ++#if (defined(__alpha__) || defined(__sw_64__)) && defined(FE_MAP_UMZ) + + fenv_t state = __ieee_get_fp_control (); + +diff --git a/libgfortran/configure.host b/libgfortran/configure.host +index 3d6c2db77..ddd24ac12 100644 +--- a/libgfortran/configure.host ++++ b/libgfortran/configure.host +@@ -71,6 +71,8 @@ case "${host_cpu}" in + ieee_flags="-mieee" ;; + sh*) + ieee_flags="-mieee" ;; ++ sw_64*) ++ ieee_flags="-mieee" ;; + esac + + tmake_file= +-- +2.25.1 + diff --git a/0009-LoongArch-Fix-vec_initv32qiv16qi-template-to-avoid-I.patch b/0009-LoongArch-Fix-vec_initv32qiv16qi-template-to-avoid-I.patch new file mode 100644 index 0000000000000000000000000000000000000000..cbb626626e1913ed99b65701e3aee6ced09a458b --- /dev/null +++ b/0009-LoongArch-Fix-vec_initv32qiv16qi-template-to-avoid-I.patch @@ -0,0 +1,105 @@ +From aa947bf395b5722a23f2edd9d6302e220473d900 Mon Sep 17 00:00:00 2001 +From: Chenghui Pan +Date: Wed, 11 Oct 2023 16:41:25 +0800 +Subject: [PATCH 009/188] LoongArch: Fix vec_initv32qiv16qi template to avoid + ICE. +MIME-Version: 1.0 +Content-Type: text/plain; charset=UTF-8 +Content-Transfer-Encoding: 8bit + +Following test code triggers unrecognized insn ICE on LoongArch target +with "-O3 -mlasx": + +void +foo (unsigned char *dst, unsigned char *src) +{ + for (int y = 0; y < 16; y++) + { + for (int x = 0; x < 16; x++) + dst[x] = src[x] + 1; + dst += 32; + src += 32; + } +} + +ICE info: +./test.c: In function ‘foo’: +./test.c:8:1: error: unrecognizable insn: + 8 | } + | ^ +(insn 15 14 16 4 (set (reg:V32QI 185 [ vect__24.7 ]) + (vec_concat:V32QI (reg:V16QI 186) + (const_vector:V16QI [ + (const_int 0 [0]) repeated x16 + ]))) "./test.c":4:19 -1 + (nil)) +during RTL pass: vregs +./test.c:8:1: internal compiler error: in extract_insn, at recog.cc:2791 +0x12028023b _fatal_insn(char const*, rtx_def const*, char const*, int, char const*) + /home/panchenghui/upstream/gcc/gcc/rtl-error.cc:108 +0x12028026f _fatal_insn_not_found(rtx_def const*, char const*, int, char const*) + /home/panchenghui/upstream/gcc/gcc/rtl-error.cc:116 +0x120a03c5b extract_insn(rtx_insn*) + /home/panchenghui/upstream/gcc/gcc/recog.cc:2791 +0x12067ff73 instantiate_virtual_regs_in_insn + /home/panchenghui/upstream/gcc/gcc/function.cc:1610 +0x12067ff73 instantiate_virtual_regs + /home/panchenghui/upstream/gcc/gcc/function.cc:1983 +0x12067ff73 execute + /home/panchenghui/upstream/gcc/gcc/function.cc:2030 + +This RTL is generated inside loongarch_expand_vector_group_init function (related +to vec_initv32qiv16qi template). Original impl doesn't ensure all vec_concat arguments +are register type. This patch adds force_reg() to the vec_concat argument generation. + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (loongarch_expand_vector_group_init): + fix impl related to vec_initv32qiv16qi template to avoid ICE. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-vec-init-1.c: New test. +--- + gcc/config/loongarch/loongarch.cc | 3 ++- + .../loongarch/vector/lasx/lasx-vec-init-1.c | 14 ++++++++++++++ + 2 files changed, 16 insertions(+), 1 deletion(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-1.c + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 760b12268..9a629a999 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -10188,7 +10188,8 @@ loongarch_gen_const_int_vector_shuffle (machine_mode mode, int val) + void + loongarch_expand_vector_group_init (rtx target, rtx vals) + { +- rtx ops[2] = { XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1) }; ++ rtx ops[2] = { force_reg (E_V16QImode, XVECEXP (vals, 0, 0)), ++ force_reg (E_V16QImode, XVECEXP (vals, 0, 1)) }; + emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (E_V32QImode, ops[0], + ops[1]))); + } +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-1.c +new file mode 100644 +index 000000000..28be32982 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-1.c +@@ -0,0 +1,14 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3" } */ ++ ++void ++foo (unsigned char *dst, unsigned char *src) ++{ ++ for (int y = 0; y < 16; y++) ++ { ++ for (int x = 0; x < 16; x++) ++ dst[x] = src[x] + 1; ++ dst += 32; ++ src += 32; ++ } ++} +-- +2.43.0 + diff --git a/0009-MULL64-Disable-mull64-transformation-by-default.patch b/0009-MULL64-Disable-mull64-transformation-by-default.patch new file mode 100644 index 0000000000000000000000000000000000000000..347ba0aeca65c80056fc86ba0bcc32a7b2a1fe2f --- /dev/null +++ b/0009-MULL64-Disable-mull64-transformation-by-default.patch @@ -0,0 +1,66 @@ +From 7c1f4425c680ea144d29bc55a1283d46444a2691 Mon Sep 17 00:00:00 2001 +From: eastb233 +Date: Wed, 7 Dec 2022 09:43:15 +0800 +Subject: [PATCH 09/22] [MULL64] Disable mull64 transformation by default + +This commit disables mull64 transformation by default since +it shows some runtime failure in workloads. + +This is a workaround fix for https://gitee.com/src-openeuler/gcc/issues/I64UQH +--- + gcc/match.pd | 2 +- + gcc/opts.cc | 1 - + gcc/testsuite/g++.dg/tree-ssa/mull64.C | 2 +- + gcc/testsuite/gcc.dg/pr107190.c | 2 +- + 4 files changed, 3 insertions(+), 4 deletions(-) + +diff --git a/gcc/match.pd b/gcc/match.pd +index b7e3588e8..6f24d5079 100644 +--- a/gcc/match.pd ++++ b/gcc/match.pd +@@ -4290,7 +4290,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT) + (simplify + (cond @0 (op:s @1 integer_pow2p@2) @1) + /* powerof2cst */ +- (if (INTEGRAL_TYPE_P (type)) ++ (if (flag_merge_mull && INTEGRAL_TYPE_P (type)) + (with { + tree shift = build_int_cst (integer_type_node, tree_log2 (@2)); + } +diff --git a/gcc/opts.cc b/gcc/opts.cc +index eae71ed20..a97630d1c 100644 +--- a/gcc/opts.cc ++++ b/gcc/opts.cc +@@ -647,7 +647,6 @@ static const struct default_options default_options_table[] = + VECT_COST_MODEL_VERY_CHEAP }, + { OPT_LEVELS_2_PLUS, OPT_finline_functions, NULL, 1 }, + { OPT_LEVELS_2_PLUS, OPT_ftree_loop_distribute_patterns, NULL, 1 }, +- { OPT_LEVELS_2_PLUS, OPT_fmerge_mull, NULL, 1 }, + + /* -O2 and above optimizations, but not -Os or -Og. */ + { OPT_LEVELS_2_PLUS_SPEED_ONLY, OPT_falign_functions, NULL, 1 }, +diff --git a/gcc/testsuite/g++.dg/tree-ssa/mull64.C b/gcc/testsuite/g++.dg/tree-ssa/mull64.C +index f61cf5e6f..cad891e62 100644 +--- a/gcc/testsuite/g++.dg/tree-ssa/mull64.C ++++ b/gcc/testsuite/g++.dg/tree-ssa/mull64.C +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-O2 -Wno-psabi -fdump-tree-forwprop1-details -fdump-tree-forwprop4-details" } */ ++/* { dg-options "-O2 -fmerge-mull -Wno-psabi -fdump-tree-forwprop1-details -fdump-tree-forwprop4-details" } */ + + # define BN_BITS4 32 + # define BN_MASK2 (0xffffffffffffffffL) +diff --git a/gcc/testsuite/gcc.dg/pr107190.c b/gcc/testsuite/gcc.dg/pr107190.c +index 235b2761a..d1e72e5df 100644 +--- a/gcc/testsuite/gcc.dg/pr107190.c ++++ b/gcc/testsuite/gcc.dg/pr107190.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-O2 -fexpensive-optimizations -fdump-tree-phiopt2-details" } */ ++/* { dg-options "-O2 -fmerge-mull -fexpensive-optimizations -fdump-tree-phiopt2-details" } */ + + # define BN_BITS4 32 + # define BN_MASK2 (0xffffffffffffffffL) +-- +2.33.0 + diff --git a/0009-Sw64-Port-libgo.patch b/0009-Sw64-Port-libgo.patch new file mode 100644 index 0000000000000000000000000000000000000000..9eba652e63cdc781c9b491ba9286a37eabd895ed --- /dev/null +++ b/0009-Sw64-Port-libgo.patch @@ -0,0 +1,629 @@ +From 9e32a64afd05cb18a5dcb09a27322e243cd245f4 Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:52:50 +0800 +Subject: [PATCH 09/16] Sw64 Port: libgo + +--- + libgo/configure | 7 +- + libgo/configure.ac | 7 +- + libgo/go/cmd/cgo/main.go | 2 + + libgo/go/cmd/internal/sys/arch.go | 11 +++ + libgo/go/debug/elf/elf.go | 72 ++++++++++++++++++ + libgo/go/debug/elf/elf_test.go | 1 + + libgo/go/debug/elf/file.go | 47 ++++++++++++ + libgo/go/encoding/xml/xml.go | 1 + + libgo/go/go/build/syslist.go | 2 +- + .../syscall/unix/getrandom_linux_sw_64.go | 9 +++ + .../syscall/unix/sysnum_linux_sw_64.go | 10 +++ + libgo/go/net/listen_test.go | 2 +- + libgo/go/regexp/testdata/basic.dat | 1 + + libgo/go/runtime/hash64.go | 2 +- + libgo/go/runtime/lfstack_64bit.go | 2 +- + libgo/go/runtime/mpagealloc_64bit.go | 2 +- + libgo/go/syscall/endian_little.go | 2 +- + libgo/go/syscall/libcall_linux_sw_64.go | 13 ++++ + libgo/go/syscall/syscall_linux_sw_64.go | 25 ++++++ + libgo/goarch.sh | 5 ++ + libgo/mksysinfo.sh | 5 ++ + libgo/runtime/go-signal.c | 76 ++++++++++--------- + 22 files changed, 257 insertions(+), 47 deletions(-) + create mode 100644 libgo/go/internal/syscall/unix/getrandom_linux_sw_64.go + create mode 100644 libgo/go/internal/syscall/unix/sysnum_linux_sw_64.go + create mode 100644 libgo/go/syscall/libcall_linux_sw_64.go + create mode 100644 libgo/go/syscall/syscall_linux_sw_64.go + +diff --git a/libgo/configure b/libgo/configure +index ffe17c9be..b90dd9dae 100755 +--- a/libgo/configure ++++ b/libgo/configure +@@ -14124,10 +14124,10 @@ esac + # - libgo/go/syscall/endian_XX.go + # - possibly others + # - possibly update files in libgo/go/internal/syscall/unix +-ALLGOARCH="386 alpha amd64 amd64p32 arm armbe arm64 arm64be ia64 m68k mips mipsle mips64 mips64le mips64p32 mips64p32le nios2 ppc ppc64 ppc64le riscv riscv64 s390 s390x sh shbe sparc sparc64 wasm" ++ALLGOARCH="386 alpha amd64 amd64p32 arm armbe arm64 arm64be ia64 m68k mips mipsle mips64 mips64le mips64p32 mips64p32le nios2 ppc ppc64 ppc64le riscv riscv64 s390 s390x sh shbe sparc sparc64 sw_64 wasm" + + # All known GOARCH family values. +-ALLGOARCHFAMILY="I386 ALPHA AMD64 ARM ARM64 IA64 M68K MIPS MIPS64 NIOS2 PPC PPC64 RISCV RISCV64 S390 S390X SH SPARC SPARC64 WASM" ++ALLGOARCHFAMILY="I386 ALPHA AMD64 ARM ARM64 IA64 M68K MIPS MIPS64 NIOS2 PPC PPC64 RISCV RISCV64 S390 S390X SH SPARC SPARC64 SW_64 WASM" + + GOARCH=unknown + case ${host} in +@@ -14323,6 +14323,9 @@ else + fi + rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ;; ++ sw_64*-*-*) ++ GOARCH=sw_64 ++ ;; + esac + + +diff --git a/libgo/configure.ac b/libgo/configure.ac +index 7e2b98ba6..9f903c64e 100644 +--- a/libgo/configure.ac ++++ b/libgo/configure.ac +@@ -239,10 +239,10 @@ AC_SUBST(USE_DEJAGNU) + # - libgo/go/syscall/endian_XX.go + # - possibly others + # - possibly update files in libgo/go/internal/syscall/unix +-ALLGOARCH="386 alpha amd64 amd64p32 arm armbe arm64 arm64be ia64 m68k mips mipsle mips64 mips64le mips64p32 mips64p32le nios2 ppc ppc64 ppc64le riscv riscv64 s390 s390x sh shbe sparc sparc64 wasm" ++ALLGOARCH="386 alpha amd64 amd64p32 arm armbe arm64 arm64be ia64 m68k mips mipsle mips64 mips64le mips64p32 mips64p32le nios2 ppc ppc64 ppc64le riscv riscv64 s390 s390x sh shbe sparc sparc64 sw_64 wasm" + + # All known GOARCH family values. +-ALLGOARCHFAMILY="I386 ALPHA AMD64 ARM ARM64 IA64 M68K MIPS MIPS64 NIOS2 PPC PPC64 RISCV RISCV64 S390 S390X SH SPARC SPARC64 WASM" ++ALLGOARCHFAMILY="I386 ALPHA SW_64 AMD64 ARM ARM64 IA64 M68K MIPS MIPS64 NIOS2 PPC PPC64 RISCV RISCV64 S390 S390X SH SPARC SPARC64 SW_64 WASM" + + GOARCH=unknown + case ${host} in +@@ -370,6 +370,9 @@ AC_COMPILE_IFELSE([AC_LANG_SOURCE([ + [GOARCH=sparc], + [GOARCH=sparc64]) + ;; ++ sw_64*-*-*) ++ GOARCH=sw_64 ++ ;; + esac + AC_SUBST(GOARCH) + AC_SUBST(ALLGOARCH) +diff --git a/libgo/go/cmd/cgo/main.go b/libgo/go/cmd/cgo/main.go +index 58477e470..842237774 100644 +--- a/libgo/go/cmd/cgo/main.go ++++ b/libgo/go/cmd/cgo/main.go +@@ -194,6 +194,7 @@ var ptrSizeMap = map[string]int64{ + "shbe": 4, + "sparc": 4, + "sparc64": 8, ++ "sw_64": 8, + } + + var intSizeMap = map[string]int64{ +@@ -221,6 +222,7 @@ var intSizeMap = map[string]int64{ + "shbe": 4, + "sparc": 4, + "sparc64": 8, ++ "sw_64": 8, + } + + var cPrefix string +diff --git a/libgo/go/cmd/internal/sys/arch.go b/libgo/go/cmd/internal/sys/arch.go +index 97d0ac9bb..dea328a34 100644 +--- a/libgo/go/cmd/internal/sys/arch.go ++++ b/libgo/go/cmd/internal/sys/arch.go +@@ -12,6 +12,7 @@ type ArchFamily byte + + const ( + NoArch ArchFamily = iota ++ SW_64 + AMD64 + ARM + ARM64 +@@ -229,7 +230,17 @@ var ArchWasm = &Arch{ + CanMergeLoads: false, + } + ++/*TODO*/ ++var ArchSW_64 = &Arch{ ++ Name: "sw_64", ++ Family: SW_64, ++ ByteOrder: binary.LittleEndian, ++ PtrSize: 8, ++ RegSize: 8, ++ MinLC: 1, ++} + var Archs = [...]*Arch{ ++ ArchSW_64, + Arch386, + ArchAMD64, + ArchARM, +diff --git a/libgo/go/debug/elf/elf.go b/libgo/go/debug/elf/elf.go +index 4c51bc4de..1899a4245 100644 +--- a/libgo/go/debug/elf/elf.go ++++ b/libgo/go/debug/elf/elf.go +@@ -6,6 +6,7 @@ + * $FreeBSD: src/sys/sys/elf64.h,v 1.10.14.1 2005/12/30 22:13:58 marcel Exp $ + * $FreeBSD: src/sys/sys/elf_common.h,v 1.15.8.1 2005/12/30 22:13:58 marcel Exp $ + * $FreeBSD: src/sys/alpha/include/elf.h,v 1.14 2003/09/25 01:10:22 peter Exp $ ++ * $FreeBSD: src/sys/sw_64/include/elf.h,v 1.14 2003/09/25 01:10:22 peter Exp $ + * $FreeBSD: src/sys/amd64/include/elf.h,v 1.18 2004/08/03 08:21:48 dfr Exp $ + * $FreeBSD: src/sys/arm/include/elf.h,v 1.5.2.1 2006/06/30 21:42:52 cognet Exp $ + * $FreeBSD: src/sys/i386/include/elf.h,v 1.16 2004/08/02 19:12:17 dfr Exp $ +@@ -390,6 +391,8 @@ const ( + EM_MIPS_RS4_BE Machine = 10 /* MIPS R4000 Big-Endian */ + EM_ALPHA_STD Machine = 41 /* Digital Alpha (standard value). */ + EM_ALPHA Machine = 0x9026 /* Alpha (written in the absence of an ABI) */ ++ EM_SW_64_STD Machine = 41 /* Digital Sw_64 (standard value). */ ++ EM_SW_64 Machine = 0x9916 /* Mieee-opt Sw_64 (written in the absence of an ABI) */ + ) + + var machineStrings = []intName{ +@@ -581,6 +584,8 @@ var machineStrings = []intName{ + {10, "EM_MIPS_RS4_BE"}, + {41, "EM_ALPHA_STD"}, + {0x9026, "EM_ALPHA"}, ++ {41, "EM_SW_64_STD"}, ++ {0x9916, "EM_SW_64"}, + } + + func (i Machine) String() string { return stringName(uint32(i), machineStrings, false) } +@@ -1645,6 +1650,73 @@ var ralphaStrings = []intName{ + func (i R_ALPHA) String() string { return stringName(uint32(i), ralphaStrings, false) } + func (i R_ALPHA) GoString() string { return stringName(uint32(i), ralphaStrings, true) } + ++// Relocation types for SW_64. ++type R_SW_64 int ++ ++const ( ++ R_SW_64_NONE R_SW_64 = 0 /* No reloc */ ++ R_SW_64_REFLONG R_SW_64 = 1 /* Direct 32 bit */ ++ R_SW_64_REFQUAD R_SW_64 = 2 /* Direct 64 bit */ ++ R_SW_64_GPREL32 R_SW_64 = 3 /* GP relative 32 bit */ ++ R_SW_64_LITERAL R_SW_64 = 4 /* GP relative 16 bit w/optimization */ ++ R_SW_64_LITUSE R_SW_64 = 5 /* Optimization hint for LITERAL */ ++ R_SW_64_GPDISP R_SW_64 = 6 /* Add displacement to GP */ ++ R_SW_64_BRADDR R_SW_64 = 7 /* PC+4 relative 23 bit shifted */ ++ R_SW_64_HINT R_SW_64 = 8 /* PC+4 relative 16 bit shifted */ ++ R_SW_64_SREL16 R_SW_64 = 9 /* PC relative 16 bit */ ++ R_SW_64_SREL32 R_SW_64 = 10 /* PC relative 32 bit */ ++ R_SW_64_SREL64 R_SW_64 = 11 /* PC relative 64 bit */ ++ R_SW_64_OP_PUSH R_SW_64 = 12 /* OP stack push */ ++ R_SW_64_OP_STORE R_SW_64 = 13 /* OP stack pop and store */ ++ R_SW_64_OP_PSUB R_SW_64 = 14 /* OP stack subtract */ ++ R_SW_64_OP_PRSHIFT R_SW_64 = 15 /* OP stack right shift */ ++ R_SW_64_GPVALUE R_SW_64 = 16 ++ R_SW_64_GPRELHIGH R_SW_64 = 17 ++ R_SW_64_GPRELLOW R_SW_64 = 18 ++ R_SW_64_IMMED_GP_16 R_SW_64 = 19 ++ R_SW_64_IMMED_GP_HI32 R_SW_64 = 20 ++ R_SW_64_IMMED_SCN_HI32 R_SW_64 = 21 ++ R_SW_64_IMMED_BR_HI32 R_SW_64 = 22 ++ R_SW_64_IMMED_LO32 R_SW_64 = 23 ++ R_SW_64_COPY R_SW_64 = 24 /* Copy sympol at runtime */ ++ R_SW_64_GLOB_DAT R_SW_64 = 25 /* Create GOT entry */ ++ R_SW_64_JMP_SLOT R_SW_64 = 26 /* Create PLT entry */ ++ R_SW_64_RELATIVE R_SW_64 = 27 /* Adjust by program base */ ++) ++var rsw_64Strings = []intName{ ++ {0, "R_SW_64_NONE"}, ++ {1, "R_SW_64_REFLONG"}, ++ {2, "R_SW_64_REFQUAD"}, ++ {3, "R_SW_64_GPREL32"}, ++ {4, "R_SW_64_LITERAL"}, ++ {5, "R_SW_64_LITUSE"}, ++ {6, "R_SW_64_GPDISP"}, ++ {7, "R_SW_64_BRADDR"}, ++ {8, "R_SW_64_HINT"}, ++ {9, "R_SW_64_SREL16"}, ++ {10, "R_SW_64_SREL32"}, ++ {11, "R_SW_64_SREL64"}, ++ {12, "R_SW_64_OP_PUSH"}, ++ {13, "R_SW_64_OP_STORE"}, ++ {14, "R_SW_64_OP_PSUB"}, ++ {15, "R_SW_64_OP_PRSHIFT"}, ++ {16, "R_SW_64_GPVALUE"}, ++ {17, "R_SW_64_GPRELHIGH"}, ++ {18, "R_SW_64_GPRELLOW"}, ++ {19, "R_SW_64_IMMED_GP_16"}, ++ {20, "R_SW_64_IMMED_GP_HI32"}, ++ {21, "R_SW_64_IMMED_SCN_HI32"}, ++ {22, "R_SW_64_IMMED_BR_HI32"}, ++ {23, "R_SW_64_IMMED_LO32"}, ++ {24, "R_SW_64_COPY"}, ++ {25, "R_SW_64_GLOB_DAT"}, ++ {26, "R_SW_64_JMP_SLOT"}, ++ {27, "R_SW_64_RELATIVE"}, ++} ++ ++func (i R_SW_64) String() string { return stringName(uint32(i), rsw_64Strings, false) } ++func (i R_SW_64) GoString() string { return stringName(uint32(i), rsw_64Strings, true) } ++ + // Relocation types for ARM. + type R_ARM int + +diff --git a/libgo/go/debug/elf/elf_test.go b/libgo/go/debug/elf/elf_test.go +index b8c310dba..940af9c51 100644 +--- a/libgo/go/debug/elf/elf_test.go ++++ b/libgo/go/debug/elf/elf_test.go +@@ -31,6 +31,7 @@ var nameTests = []nameTest{ + {STV_HIDDEN, "STV_HIDDEN"}, + {R_X86_64_PC32, "R_X86_64_PC32"}, + {R_ALPHA_OP_PUSH, "R_ALPHA_OP_PUSH"}, ++ {R_SW_64_OP_PUSH, "R_SW_64_OP_PUSH"}, + {R_ARM_THM_ABS5, "R_ARM_THM_ABS5"}, + {R_386_GOT32, "R_386_GOT32"}, + {R_PPC_GOT16_HI, "R_PPC_GOT16_HI"}, +diff --git a/libgo/go/debug/elf/file.go b/libgo/go/debug/elf/file.go +index 60d2788c9..53f34d78c 100644 +--- a/libgo/go/debug/elf/file.go ++++ b/libgo/go/debug/elf/file.go +@@ -632,6 +632,8 @@ func (f *File) applyRelocations(dst []byte, rels []byte) error { + return f.applyRelocationsSPARC64(dst, rels) + case f.Class == ELFCLASS64 && f.Machine == EM_ALPHA: + return f.applyRelocationsALPHA(dst, rels) ++ case f.Class == ELFCLASS64 && f.Machine == EM_SW_64: ++ return f.applyRelocationsSW_64(dst, rels) + default: + return errors.New("applyRelocations: not implemented") + } +@@ -1266,6 +1268,51 @@ func (f *File) applyRelocationsALPHA(dst []byte, rels []byte) error { + return nil + } + ++func (f *File) applyRelocationsSW_64(dst []byte, rels []byte) error { ++ // 24 is the size of Rela64. ++ if len(rels)%24 != 0 { ++ return errors.New("length of relocation section is not a multiple of 24") ++ } ++ ++ symbols, _, err := f.getSymbols(SHT_SYMTAB) ++ if err != nil { ++ return err ++ } ++ ++ b := bytes.NewReader(rels) ++ var rela Rela64 ++ for b.Len() > 0 { ++ binary.Read(b, f.ByteOrder, &rela) ++ symNo := rela.Info >> 32 ++ t := R_SW_64(rela.Info & 0xffff) ++ ++ if symNo == 0 || symNo > uint64(len(symbols)) { ++ continue ++ } ++ sym := &symbols[symNo-1] ++ if SymType(sym.Info&0xf) != STT_SECTION { ++ // We don't handle non-section relocations for now. ++ continue ++ } ++ ++ // There are relocations, so this must be a normal ++ // object file, and we only look at section symbols, ++ // so we assume that the symbol value is 0. ++ switch t { ++ case R_SW_64_REFQUAD: ++ if rela.Off+8 >= uint64(len(dst)) || rela.Addend < 0 { ++ continue ++ } ++ f.ByteOrder.PutUint64(dst[rela.Off:rela.Off+8], uint64(rela.Addend)) ++ case R_SW_64_REFLONG: ++ if rela.Off+4 >= uint64(len(dst)) || rela.Addend < 0 { ++ } ++ f.ByteOrder.PutUint32(dst[rela.Off:rela.Off+4], uint32(rela.Addend)) ++ } ++ } ++ return nil ++} ++ + func (f *File) DWARF() (*dwarf.Data, error) { + dwarfSuffix := func(s *Section) string { + switch { +diff --git a/libgo/go/encoding/xml/xml.go b/libgo/go/encoding/xml/xml.go +index 8a0a9c253..f40099a1b 100644 +--- a/libgo/go/encoding/xml/xml.go ++++ b/libgo/go/encoding/xml/xml.go +@@ -1727,6 +1727,7 @@ var htmlEntity = map[string]string{ + "Psi": "\u03A8", + "Omega": "\u03A9", + "alpha": "\u03B1", ++ "sw_64": "\u03B1", + "beta": "\u03B2", + "gamma": "\u03B3", + "delta": "\u03B4", +diff --git a/libgo/go/go/build/syslist.go b/libgo/go/go/build/syslist.go +index 1b11365f5..74d7fec11 100644 +--- a/libgo/go/go/build/syslist.go ++++ b/libgo/go/go/build/syslist.go +@@ -8,4 +8,4 @@ package build + // Do not remove from this list, as these are used for go/build filename matching. + + const goosList = "aix android darwin dragonfly freebsd hurd illumos ios js linux nacl netbsd openbsd plan9 solaris windows zos " +-const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be loong64 mips mipsle mips64 mips64le ppc ppc64 ppc64le riscv riscv64 s390 s390x sparc sparc64 wasm alpha m68k nios2 sh shbe " ++const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be loong64 mips mipsle mips64 mips64le ppc ppc64 ppc64le riscv riscv64 s390 s390x sparc sparc64 wasm alpha m68k nios2 sh shbe sw_64" +diff --git a/libgo/go/internal/syscall/unix/getrandom_linux_sw_64.go b/libgo/go/internal/syscall/unix/getrandom_linux_sw_64.go +new file mode 100644 +index 000000000..9587b5aa4 +--- /dev/null ++++ b/libgo/go/internal/syscall/unix/getrandom_linux_sw_64.go +@@ -0,0 +1,9 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package unix ++ ++// Linux getrandom system call number. ++// See GetRandom in getrandom_linux.go. ++const randomTrap uintptr = 511 +diff --git a/libgo/go/internal/syscall/unix/sysnum_linux_sw_64.go b/libgo/go/internal/syscall/unix/sysnum_linux_sw_64.go +new file mode 100644 +index 000000000..c40bc8488 +--- /dev/null ++++ b/libgo/go/internal/syscall/unix/sysnum_linux_sw_64.go +@@ -0,0 +1,10 @@ ++// Copyright 2016 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package unix ++ ++const ( ++ getrandomTrap uintptr = 511 ++ copyFileRangeTrap uintptr = 519 ++) +diff --git a/libgo/go/net/listen_test.go b/libgo/go/net/listen_test.go +index 59c011212..d61055a04 100644 +--- a/libgo/go/net/listen_test.go ++++ b/libgo/go/net/listen_test.go +@@ -673,7 +673,7 @@ func multicastRIBContains(ip IP) (bool, error) { + case "aix", "dragonfly", "netbsd", "openbsd", "plan9", "solaris", "illumos", "windows": + return true, nil // not implemented yet + case "linux": +- if runtime.GOARCH == "arm" || runtime.GOARCH == "alpha" { ++ if runtime.GOARCH == "arm" || runtime.GOARCH == "alpha" || runtime.GOARCH == "sw_64" { + return true, nil // not implemented yet + } + } +diff --git a/libgo/go/regexp/testdata/basic.dat b/libgo/go/regexp/testdata/basic.dat +index 1776b1ff9..b53926812 100644 +--- a/libgo/go/regexp/testdata/basic.dat ++++ b/libgo/go/regexp/testdata/basic.dat +@@ -153,6 +153,7 @@ E a[bcd]*dcdcde adcdcde (0,7) + E (ab|a)b*c abc (0,3)(0,2) + E ((a)(b)c)(d) abcd (0,4)(0,3)(0,1)(1,2)(3,4) + BE [A-Za-z_][A-Za-z0-9_]* alpha (0,5) ++BE [A-Za-z_][A-Za-z0-9_]* sw_64 (0,5) + E ^a(bc+|b[eh])g|.h$ abh (1,3) + E (bc+d$|ef*g.|h?i(j|k)) effgz (0,5)(0,5) + E (bc+d$|ef*g.|h?i(j|k)) ij (0,2)(0,2)(1,2) +diff --git a/libgo/go/runtime/hash64.go b/libgo/go/runtime/hash64.go +index a1d2529e7..ee793552c 100644 +--- a/libgo/go/runtime/hash64.go ++++ b/libgo/go/runtime/hash64.go +@@ -5,7 +5,7 @@ + // Hashing algorithm inspired by + // wyhash: https://github.com/wangyi-fudan/wyhash + +-//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm || alpha || arm64be || ia64 || sparc64 ++//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm || alpha || sw_64 || arm64be || ia64 || sparc64 + + package runtime + +diff --git a/libgo/go/runtime/lfstack_64bit.go b/libgo/go/runtime/lfstack_64bit.go +index 8e0883094..0e87c5059 100644 +--- a/libgo/go/runtime/lfstack_64bit.go ++++ b/libgo/go/runtime/lfstack_64bit.go +@@ -2,7 +2,7 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm || arm64be || alpha || sparc64 || ia64 ++//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || wasm || arm64be || alpha || sw_64 || sparc64 || ia64 + + package runtime + +diff --git a/libgo/go/runtime/mpagealloc_64bit.go b/libgo/go/runtime/mpagealloc_64bit.go +index 3d0d4c608..aca127d7c 100644 +--- a/libgo/go/runtime/mpagealloc_64bit.go ++++ b/libgo/go/runtime/mpagealloc_64bit.go +@@ -2,7 +2,7 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || arm64be || alpha || sparc64 || ia64 ++//go:build amd64 || arm64 || mips64 || mips64le || ppc64 || ppc64le || riscv64 || s390x || arm64be || alpha || sw_64 || sparc64 || ia64 + + package runtime + +diff --git a/libgo/go/syscall/endian_little.go b/libgo/go/syscall/endian_little.go +index 63e46d8b1..37af34bce 100644 +--- a/libgo/go/syscall/endian_little.go ++++ b/libgo/go/syscall/endian_little.go +@@ -2,7 +2,7 @@ + // Use of this source code is governed by a BSD-style + // license that can be found in the LICENSE file. + +-//go:build 386 || alpha || amd64 || amd64p32 || arm || arm64 || ia64 || ppc64le || mips64le || mipsle || mips64p32le || nios2 || riscv || riscv64 || sh || wasm ++//go:build 386 || alpha || sw_64 || amd64 || amd64p32 || arm || arm64 || ia64 || ppc64le || mips64le || mipsle || mips64p32le || nios2 || riscv || riscv64 || sh || wasm + + package syscall + +diff --git a/libgo/go/syscall/libcall_linux_sw_64.go b/libgo/go/syscall/libcall_linux_sw_64.go +new file mode 100644 +index 000000000..13ccf05a6 +--- /dev/null ++++ b/libgo/go/syscall/libcall_linux_sw_64.go +@@ -0,0 +1,13 @@ ++// Copyright 2012 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++// GNU/Linux library calls Alpha specific. ++ ++package syscall ++ ++//sys Ioperm(from int, num int, on int) (err error) ++//ioperm(from _C_long, num _C_long, on _C_int) _C_int ++ ++//sys Iopl(level int) (err error) ++//iopl(level _C_int) _C_int +diff --git a/libgo/go/syscall/syscall_linux_sw_64.go b/libgo/go/syscall/syscall_linux_sw_64.go +new file mode 100644 +index 000000000..5115b9b7c +--- /dev/null ++++ b/libgo/go/syscall/syscall_linux_sw_64.go +@@ -0,0 +1,25 @@ ++// syscall_linux_alpha.go -- GNU/Linux ALPHA specific support ++ ++// Copyright 2011 The Go Authors. All rights reserved. ++// Use of this source code is governed by a BSD-style ++// license that can be found in the LICENSE file. ++ ++package syscall ++ ++import "unsafe" ++ ++func (r *PtraceRegs) PC() uint64 { ++ return r.Pc ++} ++ ++func (r *PtraceRegs) SetPC(pc uint64) { ++ r.Pc = pc ++} ++ ++func PtraceGetRegs(pid int, regsout *PtraceRegs) (err error) { ++ return ptrace(PTRACE_GETREGS, pid, 0, uintptr(unsafe.Pointer(regsout))) ++} ++ ++func PtraceSetRegs(pid int, regs *PtraceRegs) (err error) { ++ return ptrace(PTRACE_SETREGS, pid, 0, uintptr(unsafe.Pointer(regs))) ++} +diff --git a/libgo/goarch.sh b/libgo/goarch.sh +index 977f318b3..a0cdcf17e 100755 +--- a/libgo/goarch.sh ++++ b/libgo/goarch.sh +@@ -54,6 +54,11 @@ case $goarch in + defaultphyspagesize=8192 + pcquantum=4 + ;; ++ sw_64) ++ family=SW_64 ++ defaultphyspagesize=8192 ++ pcquantum=4 ++ ;; + amd64) + family=AMD64 + ;; +diff --git a/libgo/mksysinfo.sh b/libgo/mksysinfo.sh +index 0c52ea5d7..11031f5a0 100755 +--- a/libgo/mksysinfo.sh ++++ b/libgo/mksysinfo.sh +@@ -377,7 +377,12 @@ if test "$regs" = ""; then + # mips* + regs=`grep '^type _pt_regs struct' gen-sysinfo.go || true` + fi ++if test "$regs" = ""; then ++ # sw_64* ++ regs=`grep '^type _user_pt_regs struct' gen-sysinfo.go || true` ++fi + if test "$regs" != ""; then ++ regs=`echo $regs | sed -e 's/type _user_pt_regs struct//'` + regs=`echo $regs | sed -e 's/type _pt_regs struct//'` + regs=`echo $regs | + sed -e 's/type __*user_regs_struct struct //' -e 's/[{}]//g'` +diff --git a/libgo/runtime/go-signal.c b/libgo/runtime/go-signal.c +index 528d9b6d9..20e6947b5 100644 +--- a/libgo/runtime/go-signal.c ++++ b/libgo/runtime/go-signal.c +@@ -230,6 +230,8 @@ getSiginfo(siginfo_t *info, void *context __attribute__((unused))) + ret.sigpc = ((ucontext_t*)(context))->uc_mcontext.gregs[REG_EIP]; + #elif defined(__alpha__) && defined(__linux__) + ret.sigpc = ((ucontext_t*)(context))->uc_mcontext.sc_pc; ++#elif defined(__sw_64__) && defined(__linux__) ++ ret.sigpc = ((ucontext_t *) (context))->uc_mcontext.sc_pc; + #elif defined(__PPC64__) && defined(__linux__) + ret.sigpc = ((ucontext_t*)(context))->uc_mcontext.gp_regs[32]; + #elif defined(__PPC__) && defined(__linux__) +@@ -311,43 +313,43 @@ dumpregs(siginfo_t *info __attribute__((unused)), void *context __attribute__((u + runtime_printf("fs %x\n", m->gregs[REG_FS]); + runtime_printf("gs %x\n", m->gregs[REG_GS]); + } +-#elif defined(__alpha__) && defined(__linux__) +- { +- mcontext_t *m = &((ucontext_t*)(context))->uc_mcontext; +- +- runtime_printf("v0 %X\n", m->sc_regs[0]); +- runtime_printf("t0 %X\n", m->sc_regs[1]); +- runtime_printf("t1 %X\n", m->sc_regs[2]); +- runtime_printf("t2 %X\n", m->sc_regs[3]); +- runtime_printf("t3 %X\n", m->sc_regs[4]); +- runtime_printf("t4 %X\n", m->sc_regs[5]); +- runtime_printf("t5 %X\n", m->sc_regs[6]); +- runtime_printf("t6 %X\n", m->sc_regs[7]); +- runtime_printf("t7 %X\n", m->sc_regs[8]); +- runtime_printf("s0 %X\n", m->sc_regs[9]); +- runtime_printf("s1 %X\n", m->sc_regs[10]); +- runtime_printf("s2 %X\n", m->sc_regs[11]); +- runtime_printf("s3 %X\n", m->sc_regs[12]); +- runtime_printf("s4 %X\n", m->sc_regs[13]); +- runtime_printf("s5 %X\n", m->sc_regs[14]); +- runtime_printf("fp %X\n", m->sc_regs[15]); +- runtime_printf("a0 %X\n", m->sc_regs[16]); +- runtime_printf("a1 %X\n", m->sc_regs[17]); +- runtime_printf("a2 %X\n", m->sc_regs[18]); +- runtime_printf("a3 %X\n", m->sc_regs[19]); +- runtime_printf("a4 %X\n", m->sc_regs[20]); +- runtime_printf("a5 %X\n", m->sc_regs[21]); +- runtime_printf("t8 %X\n", m->sc_regs[22]); +- runtime_printf("t9 %X\n", m->sc_regs[23]); +- runtime_printf("t10 %X\n", m->sc_regs[24]); +- runtime_printf("t11 %X\n", m->sc_regs[25]); +- runtime_printf("ra %X\n", m->sc_regs[26]); +- runtime_printf("t12 %X\n", m->sc_regs[27]); +- runtime_printf("at %X\n", m->sc_regs[28]); +- runtime_printf("gp %X\n", m->sc_regs[29]); +- runtime_printf("sp %X\n", m->sc_regs[30]); +- runtime_printf("pc %X\n", m->sc_pc); +- } ++#elif (defined(__alpha__) || defined(__sw_64__)) && defined(__linux__) ++ { ++ mcontext_t *m = &((ucontext_t *) (context))->uc_mcontext; ++ ++ runtime_printf ("v0 %X\n", m->sc_regs[0]); ++ runtime_printf ("t0 %X\n", m->sc_regs[1]); ++ runtime_printf ("t1 %X\n", m->sc_regs[2]); ++ runtime_printf ("t2 %X\n", m->sc_regs[3]); ++ runtime_printf ("t3 %X\n", m->sc_regs[4]); ++ runtime_printf ("t4 %X\n", m->sc_regs[5]); ++ runtime_printf ("t5 %X\n", m->sc_regs[6]); ++ runtime_printf ("t6 %X\n", m->sc_regs[7]); ++ runtime_printf ("t7 %X\n", m->sc_regs[8]); ++ runtime_printf ("s0 %X\n", m->sc_regs[9]); ++ runtime_printf ("s1 %X\n", m->sc_regs[10]); ++ runtime_printf ("s2 %X\n", m->sc_regs[11]); ++ runtime_printf ("s3 %X\n", m->sc_regs[12]); ++ runtime_printf ("s4 %X\n", m->sc_regs[13]); ++ runtime_printf ("s5 %X\n", m->sc_regs[14]); ++ runtime_printf ("fp %X\n", m->sc_regs[15]); ++ runtime_printf ("a0 %X\n", m->sc_regs[16]); ++ runtime_printf ("a1 %X\n", m->sc_regs[17]); ++ runtime_printf ("a2 %X\n", m->sc_regs[18]); ++ runtime_printf ("a3 %X\n", m->sc_regs[19]); ++ runtime_printf ("a4 %X\n", m->sc_regs[20]); ++ runtime_printf ("a5 %X\n", m->sc_regs[21]); ++ runtime_printf ("t8 %X\n", m->sc_regs[22]); ++ runtime_printf ("t9 %X\n", m->sc_regs[23]); ++ runtime_printf ("t10 %X\n", m->sc_regs[24]); ++ runtime_printf ("t11 %X\n", m->sc_regs[25]); ++ runtime_printf ("ra %X\n", m->sc_regs[26]); ++ runtime_printf ("t12 %X\n", m->sc_regs[27]); ++ runtime_printf ("at %X\n", m->sc_regs[28]); ++ runtime_printf ("gp %X\n", m->sc_regs[29]); ++ runtime_printf ("sp %X\n", m->sc_regs[30]); ++ runtime_printf ("pc %X\n", m->sc_pc); ++ } + #elif defined(__PPC__) && defined(__linux__) + { + int i; +-- +2.25.1 + diff --git a/0010-LoongArch-Use-fcmp.caf.s-instead-of-movgr2cf-for-zer.patch b/0010-LoongArch-Use-fcmp.caf.s-instead-of-movgr2cf-for-zer.patch new file mode 100644 index 0000000000000000000000000000000000000000..45fba29a8e30489acf01894b88c693bfadc48647 --- /dev/null +++ b/0010-LoongArch-Use-fcmp.caf.s-instead-of-movgr2cf-for-zer.patch @@ -0,0 +1,35 @@ +From 35bce671a97b27a41c425109ba92b24ab87ff35b Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Tue, 17 Oct 2023 21:55:05 +0800 +Subject: [PATCH 010/188] LoongArch: Use fcmp.caf.s instead of movgr2cf for + zeroing a fcc + +During the review of an LLVM change [1], on LA464 we found that zeroing +an fcc with fcmp.caf.s is much faster than a movgr2cf from $r0. + +[1]: https://github.com/llvm/llvm-project/pull/69300 + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (movfcc): Use fcmp.caf.s for + zeroing a fcc. +--- + gcc/config/loongarch/loongarch.md | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 73e2cbe0b..5f9e63d66 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -2150,7 +2150,7 @@ + [(set (match_operand:FCC 0 "register_operand" "=z") + (const_int 0))] + "" +- "movgr2cf\t%0,$r0") ++ "fcmp.caf.s\t%0,$f0,$f0") + + ;; Conditional move instructions. + +-- +2.43.0 + diff --git a/0010-Sw64-Port-libgomp.patch b/0010-Sw64-Port-libgomp.patch new file mode 100644 index 0000000000000000000000000000000000000000..fb71443d57b37c3abd4583c6b1e0f2feedc6a1cc --- /dev/null +++ b/0010-Sw64-Port-libgomp.patch @@ -0,0 +1,165 @@ +From 6ecc701c02c54cd1af013e70aef7ccf768f42da2 Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:53:22 +0800 +Subject: [PATCH 10/16] Sw64 Port: libgomp + +--- + libgomp/config/linux/sw_64/futex.h | 102 +++++++++++++++++++++++++++++ + libgomp/configure | 6 ++ + libgomp/configure.tgt | 4 ++ + libgomp/libgomp.spec.in | 2 +- + 4 files changed, 113 insertions(+), 1 deletion(-) + create mode 100644 libgomp/config/linux/sw_64/futex.h + +diff --git a/libgomp/config/linux/sw_64/futex.h b/libgomp/config/linux/sw_64/futex.h +new file mode 100644 +index 000000000..258f38289 +--- /dev/null ++++ b/libgomp/config/linux/sw_64/futex.h +@@ -0,0 +1,102 @@ ++/* Copyright (C) 2005-2022 Free Software Foundation, Inc. ++ Contributed by Richard Henderson . ++ ++ This file is part of the GNU Offloading and Multi Processing Library ++ (libgomp). ++ ++ Libgomp is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3, or (at your option) ++ any later version. ++ ++ Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS ++ FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++/* Provide target-specific access to the futex system call. */ ++ ++#ifndef SYS_futex ++#define SYS_futex 394 ++#endif ++ ++static inline void ++futex_wait (int *addr, int val) ++{ ++ register long sc_0 __asm__("$0"); ++ register long sc_16 __asm__("$16"); ++ register long sc_17 __asm__("$17"); ++ register long sc_18 __asm__("$18"); ++ register long sc_19 __asm__("$19"); ++ ++ sc_0 = SYS_futex; ++ sc_16 = (long) addr; ++ sc_17 = gomp_futex_wait; ++ sc_18 = val; ++ sc_19 = 0; ++ __asm volatile ("callsys" ++ : "=r"(sc_0), "=r"(sc_19) ++ : "0"(sc_0), "r"(sc_16), "r"(sc_17), "r"(sc_18), "1"(sc_19) ++ : "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$22", "$23", ++ "$24", "$25", "$27", "$28", "memory"); ++ if (__builtin_expect (sc_19, 0) && sc_0 == ENOSYS) ++ { ++ gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG; ++ gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG; ++ sc_0 = SYS_futex; ++ sc_17 &= ~FUTEX_PRIVATE_FLAG; ++ sc_19 = 0; ++ __asm volatile ("callsys" ++ : "=r"(sc_0), "=r"(sc_19) ++ : "0"(sc_0), "r"(sc_16), "r"(sc_17), "r"(sc_18), "1"(sc_19) ++ : "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$22", ++ "$23", "$24", "$25", "$27", "$28", "memory"); ++ } ++} ++ ++static inline void ++futex_wake (int *addr, int count) ++{ ++ register long sc_0 __asm__("$0"); ++ register long sc_16 __asm__("$16"); ++ register long sc_17 __asm__("$17"); ++ register long sc_18 __asm__("$18"); ++ register long sc_19 __asm__("$19"); ++ ++ sc_0 = SYS_futex; ++ sc_16 = (long) addr; ++ sc_17 = gomp_futex_wake; ++ sc_18 = count; ++ __asm volatile ("callsys" ++ : "=r"(sc_0), "=r"(sc_19) ++ : "0"(sc_0), "r"(sc_16), "r"(sc_17), "r"(sc_18) ++ : "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$22", "$23", ++ "$24", "$25", "$27", "$28", "memory"); ++ if (__builtin_expect (sc_19, 0) && sc_0 == ENOSYS) ++ { ++ gomp_futex_wait &= ~FUTEX_PRIVATE_FLAG; ++ gomp_futex_wake &= ~FUTEX_PRIVATE_FLAG; ++ sc_0 = SYS_futex; ++ sc_17 &= ~FUTEX_PRIVATE_FLAG; ++ __asm volatile ("callsys" ++ : "=r"(sc_0), "=r"(sc_19) ++ : "0"(sc_0), "r"(sc_16), "r"(sc_17), "r"(sc_18) ++ : "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$22", ++ "$23", "$24", "$25", "$27", "$28", "memory"); ++ } ++} ++ ++static inline void ++cpu_relax (void) ++{ ++ __asm volatile ("" : : : "memory"); ++} +diff --git a/libgomp/configure b/libgomp/configure +index 471c957b7..a1df23705 100755 +--- a/libgomp/configure ++++ b/libgomp/configure +@@ -11841,6 +11841,12 @@ case `echo $GFORTRAN` in + FC=no + fi ;; + esac ++case "${target}" in ++ sw_64-*-*) ++ FC="$GFORTRAN" ++ ;; ++*) ++esac + ac_ext=${ac_fc_srcext-f} + ac_compile='$FC -c $FCFLAGS $ac_fcflags_srcext conftest.$ac_ext >&5' + ac_link='$FC -o conftest$ac_exeext $FCFLAGS $LDFLAGS $ac_fcflags_srcext conftest.$ac_ext $LIBS >&5' +diff --git a/libgomp/configure.tgt b/libgomp/configure.tgt +index f924e9f98..a8023d0f2 100644 +--- a/libgomp/configure.tgt ++++ b/libgomp/configure.tgt +@@ -87,6 +87,10 @@ if test x$enable_linux_futex = xyes; then + config_path="linux/s390 linux posix" + ;; + ++ sw_64*-*-linux*) ++ config_path="linux/sw_64 linux posix" ++ ;; ++ + tile*-*-linux*) + config_path="linux/tile linux posix" + ;; +diff --git a/libgomp/libgomp.spec.in b/libgomp/libgomp.spec.in +index 5651603f4..8442e6313 100644 +--- a/libgomp/libgomp.spec.in ++++ b/libgomp/libgomp.spec.in +@@ -1,3 +1,3 @@ + # This spec file is read by gcc when linking. It is used to specify the + # standard libraries we need in order to link with libgomp. +-*link_gomp: @link_gomp@ ++*link_gomp: @link_gomp@ --whole-archive -lpthread --no-whole-archive +-- +2.25.1 + diff --git a/0010-Version-Clear-DATESTAMP_s.patch b/0010-Version-Clear-DATESTAMP_s.patch new file mode 100644 index 0000000000000000000000000000000000000000..e4c480fb2d3dfcde7221013970d7936360d8da5d --- /dev/null +++ b/0010-Version-Clear-DATESTAMP_s.patch @@ -0,0 +1,26 @@ +From 8e8f783b02df155e3aafa94af6cc1f66604e08eb Mon Sep 17 00:00:00 2001 +From: eastb233 +Date: Fri, 21 Jul 2023 14:45:27 +0800 +Subject: [PATCH 10/22] [Version] Clear DATESTAMP_s + +--- + gcc/Makefile.in | 3 +-- + 1 file changed, 1 insertion(+), 2 deletions(-) + +diff --git a/gcc/Makefile.in b/gcc/Makefile.in +index 31ff95500..db2a0e1bd 100644 +--- a/gcc/Makefile.in ++++ b/gcc/Makefile.in +@@ -897,8 +897,7 @@ PATCHLEVEL_c := \ + # significant - do not remove it. + BASEVER_s := "\"$(BASEVER_c)\"" + DEVPHASE_s := "\"$(if $(DEVPHASE_c), ($(DEVPHASE_c)))\"" +-DATESTAMP_s := \ +- "\"$(if $(DEVPHASE_c)$(filter-out 0,$(PATCHLEVEL_c)), $(DATESTAMP_c))\"" ++DATESTAMP_s := "\"\"" + PKGVERSION_s:= "\"@PKGVERSION@\"" + BUGURL_s := "\"@REPORT_BUGS_TO@\"" + +-- +2.33.0 + diff --git a/backport-Add-attribute-hot-judgement-for-INLINE_HINT_known_ho.patch b/0011-Add-attribute-hot-judgement-for-INLINE_HINT_known_ho.patch similarity index 94% rename from backport-Add-attribute-hot-judgement-for-INLINE_HINT_known_ho.patch rename to 0011-Add-attribute-hot-judgement-for-INLINE_HINT_known_ho.patch index 3e70f0c2f07c3f4243599d4c9963eb29af67e297..f3add4c20e6f983b6e92db3cf4ddbca71ed27f9b 100644 --- a/backport-Add-attribute-hot-judgement-for-INLINE_HINT_known_ho.patch +++ b/0011-Add-attribute-hot-judgement-for-INLINE_HINT_known_ho.patch @@ -1,7 +1,8 @@ -From 1b9a5cc9ec08e9f239dd2096edcc447b7a72f64a Mon Sep 17 00:00:00 2001 +From 355eb8e20327242442d139fb052d3a3befde3dd7 Mon Sep 17 00:00:00 2001 From: "Cui,Lili" Date: Tue, 1 Nov 2022 09:16:49 +0800 -Subject: [PATCH] Add attribute hot judgement for INLINE_HINT_known_hot hint. +Subject: [PATCH 11/22] Add attribute hot judgement for INLINE_HINT_known_hot + hint. We set up INLINE_HINT_known_hot hint only when we have profile feedback, now add function attribute judgement for it, when both caller and callee @@ -30,7 +31,7 @@ gcc/testsuite/ChangeLog: create mode 100644 gcc/testsuite/gcc.dg/ipa/inlinehint-6.c diff --git a/gcc/ipa-inline-analysis.cc b/gcc/ipa-inline-analysis.cc -index 1ca685d1b0e..7bd29c36590 100644 +index 11d8d09ee..16ac24cfc 100644 --- a/gcc/ipa-inline-analysis.cc +++ b/gcc/ipa-inline-analysis.cc @@ -48,6 +48,7 @@ along with GCC; see the file COPYING3. If not see @@ -67,7 +68,7 @@ index 1ca685d1b0e..7bd29c36590 100644 gcc_checking_assert (size >= 0); diff --git a/gcc/testsuite/gcc.dg/ipa/inlinehint-6.c b/gcc/testsuite/gcc.dg/ipa/inlinehint-6.c new file mode 100644 -index 00000000000..1f3be641c6d +index 000000000..1f3be641c --- /dev/null +++ b/gcc/testsuite/gcc.dg/ipa/inlinehint-6.c @@ -0,0 +1,47 @@ @@ -119,5 +120,5 @@ index 00000000000..1f3be641c6d +/* { dg-final { scan-ipa-dump "known_hot" "inline" } } */ + -- -2.18.2 +2.33.0 diff --git a/0011-LoongArch-Implement-avg-and-sad-standard-names.patch b/0011-LoongArch-Implement-avg-and-sad-standard-names.patch new file mode 100644 index 0000000000000000000000000000000000000000..ee1917db82c9eb43b0e74e3526f44df9ffa508f6 --- /dev/null +++ b/0011-LoongArch-Implement-avg-and-sad-standard-names.patch @@ -0,0 +1,389 @@ +From 159dd069968fae895f1f663ebda6f53970ec34b1 Mon Sep 17 00:00:00 2001 +From: Jiahao Xu +Date: Wed, 18 Oct 2023 17:36:12 +0800 +Subject: [PATCH 011/188] LoongArch:Implement avg and sad standard names. + +gcc/ChangeLog: + + * config/loongarch/lasx.md + (avg3_ceil): New patterns. + (uavg3_ceil): Ditto. + (avg3_floor): Ditto. + (uavg3_floor): Ditto. + (usadv32qi): Ditto. + (ssadv32qi): Ditto. + * config/loongarch/lsx.md + (avg3_ceil): New patterns. + (uavg3_ceil): Ditto. + (avg3_floor): Ditto. + (uavg3_floor): Ditto. + (usadv16qi): Ditto. + (ssadv16qi): Ditto. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/avg-ceil-lasx.c: New test. + * gcc.target/loongarch/avg-ceil-lsx.c: New test. + * gcc.target/loongarch/avg-floor-lasx.c: New test. + * gcc.target/loongarch/avg-floor-lsx.c: New test. + * gcc.target/loongarch/sad-lasx.c: New test. + * gcc.target/loongarch/sad-lsx.c: New test. +--- + gcc/config/loongarch/lasx.md | 78 +++++++++++++++++++ + gcc/config/loongarch/lsx.md | 78 +++++++++++++++++++ + .../gcc.target/loongarch/avg-ceil-lasx.c | 22 ++++++ + .../gcc.target/loongarch/avg-ceil-lsx.c | 22 ++++++ + .../gcc.target/loongarch/avg-floor-lasx.c | 22 ++++++ + .../gcc.target/loongarch/avg-floor-lsx.c | 22 ++++++ + gcc/testsuite/gcc.target/loongarch/sad-lasx.c | 20 +++++ + gcc/testsuite/gcc.target/loongarch/sad-lsx.c | 20 +++++ + 8 files changed, 284 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/avg-ceil-lasx.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/avg-ceil-lsx.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/avg-floor-lasx.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/avg-floor-lsx.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/sad-lasx.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/sad-lsx.c + +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +index 2bc5d47ed..c7496d68a 100644 +--- a/gcc/config/loongarch/lasx.md ++++ b/gcc/config/loongarch/lasx.md +@@ -5171,3 +5171,81 @@ + const0_rtx)); + DONE; + }) ++ ++(define_expand "avg3_ceil" ++ [(match_operand:ILASX_WHB 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand") ++ (match_operand:ILASX_WHB 2 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ emit_insn (gen_lasx_xvavgr_s_ (operands[0], ++ operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "uavg3_ceil" ++ [(match_operand:ILASX_WHB 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand") ++ (match_operand:ILASX_WHB 2 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ emit_insn (gen_lasx_xvavgr_u_ (operands[0], ++ operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "avg3_floor" ++ [(match_operand:ILASX_WHB 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand") ++ (match_operand:ILASX_WHB 2 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ emit_insn (gen_lasx_xvavg_s_ (operands[0], ++ operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "uavg3_floor" ++ [(match_operand:ILASX_WHB 0 "register_operand") ++ (match_operand:ILASX_WHB 1 "register_operand") ++ (match_operand:ILASX_WHB 2 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ emit_insn (gen_lasx_xvavg_u_ (operands[0], ++ operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "usadv32qi" ++ [(match_operand:V8SI 0 "register_operand") ++ (match_operand:V32QI 1 "register_operand") ++ (match_operand:V32QI 2 "register_operand") ++ (match_operand:V8SI 3 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx t1 = gen_reg_rtx (V32QImode); ++ rtx t2 = gen_reg_rtx (V16HImode); ++ rtx t3 = gen_reg_rtx (V8SImode); ++ emit_insn (gen_lasx_xvabsd_u_bu (t1, operands[1], operands[2])); ++ emit_insn (gen_lasx_xvhaddw_h_b (t2, t1, t1)); ++ emit_insn (gen_lasx_xvhaddw_w_h (t3, t2, t2)); ++ emit_insn (gen_addv8si3 (operands[0], t3, operands[3])); ++ DONE; ++}) ++ ++(define_expand "ssadv32qi" ++ [(match_operand:V8SI 0 "register_operand") ++ (match_operand:V32QI 1 "register_operand") ++ (match_operand:V32QI 2 "register_operand") ++ (match_operand:V8SI 3 "register_operand")] ++ "ISA_HAS_LASX" ++{ ++ rtx t1 = gen_reg_rtx (V32QImode); ++ rtx t2 = gen_reg_rtx (V16HImode); ++ rtx t3 = gen_reg_rtx (V8SImode); ++ emit_insn (gen_lasx_xvabsd_s_b (t1, operands[1], operands[2])); ++ emit_insn (gen_lasx_xvhaddw_h_b (t2, t1, t1)); ++ emit_insn (gen_lasx_xvhaddw_w_h (t3, t2, t2)); ++ emit_insn (gen_addv8si3 (operands[0], t3, operands[3])); ++ DONE; ++}) +diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md +index 075f6ba56..b4e92ae9c 100644 +--- a/gcc/config/loongarch/lsx.md ++++ b/gcc/config/loongarch/lsx.md +@@ -3581,6 +3581,84 @@ + DONE; + }) + ++(define_expand "avg3_ceil" ++ [(match_operand:ILSX_WHB 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand") ++ (match_operand:ILSX_WHB 2 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_lsx_vavgr_s_ (operands[0], ++ operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "uavg3_ceil" ++ [(match_operand:ILSX_WHB 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand") ++ (match_operand:ILSX_WHB 2 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_lsx_vavgr_u_ (operands[0], ++ operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "avg3_floor" ++ [(match_operand:ILSX_WHB 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand") ++ (match_operand:ILSX_WHB 2 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_lsx_vavg_s_ (operands[0], ++ operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "uavg3_floor" ++ [(match_operand:ILSX_WHB 0 "register_operand") ++ (match_operand:ILSX_WHB 1 "register_operand") ++ (match_operand:ILSX_WHB 2 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ emit_insn (gen_lsx_vavg_u_ (operands[0], ++ operands[1], operands[2])); ++ DONE; ++}) ++ ++(define_expand "usadv16qi" ++ [(match_operand:V4SI 0 "register_operand") ++ (match_operand:V16QI 1 "register_operand") ++ (match_operand:V16QI 2 "register_operand") ++ (match_operand:V4SI 3 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx t1 = gen_reg_rtx (V16QImode); ++ rtx t2 = gen_reg_rtx (V8HImode); ++ rtx t3 = gen_reg_rtx (V4SImode); ++ emit_insn (gen_lsx_vabsd_u_bu (t1, operands[1], operands[2])); ++ emit_insn (gen_lsx_vhaddw_h_b (t2, t1, t1)); ++ emit_insn (gen_lsx_vhaddw_w_h (t3, t2, t2)); ++ emit_insn (gen_addv4si3 (operands[0], t3, operands[3])); ++ DONE; ++}) ++ ++(define_expand "ssadv16qi" ++ [(match_operand:V4SI 0 "register_operand") ++ (match_operand:V16QI 1 "register_operand") ++ (match_operand:V16QI 2 "register_operand") ++ (match_operand:V4SI 3 "register_operand")] ++ "ISA_HAS_LSX" ++{ ++ rtx t1 = gen_reg_rtx (V16QImode); ++ rtx t2 = gen_reg_rtx (V8HImode); ++ rtx t3 = gen_reg_rtx (V4SImode); ++ emit_insn (gen_lsx_vabsd_s_b (t1, operands[1], operands[2])); ++ emit_insn (gen_lsx_vhaddw_h_b (t2, t1, t1)); ++ emit_insn (gen_lsx_vhaddw_w_h (t3, t2, t2)); ++ emit_insn (gen_addv4si3 (operands[0], t3, operands[3])); ++ DONE; ++}) ++ + (define_insn "lsx_vwev_d_w" + [(set (match_operand:V2DI 0 "register_operand" "=f") + (addsubmul:V2DI +diff --git a/gcc/testsuite/gcc.target/loongarch/avg-ceil-lasx.c b/gcc/testsuite/gcc.target/loongarch/avg-ceil-lasx.c +new file mode 100644 +index 000000000..16db7bf72 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/avg-ceil-lasx.c +@@ -0,0 +1,22 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++/* { dg-final { scan-assembler "xvavgr.b" } } */ ++/* { dg-final { scan-assembler "xvavgr.bu" } } */ ++/* { dg-final { scan-assembler "xvavgr.hu" } } */ ++/* { dg-final { scan-assembler "xvavgr.h" } } */ ++ ++#define N 1024 ++ ++#define TEST(TYPE, NAME) \ ++ TYPE a_##NAME[N], b_##NAME[N], c_##NAME[N]; \ ++ void f_##NAME (void) \ ++ { \ ++ int i; \ ++ for (i = 0; i < N; i++) \ ++ a_##NAME[i] = (b_##NAME[i] + c_##NAME[i] + 1) >> 1; \ ++ } ++ ++TEST(char, 1); ++TEST(short, 2); ++TEST(unsigned char, 3); ++TEST(unsigned short, 4); +diff --git a/gcc/testsuite/gcc.target/loongarch/avg-ceil-lsx.c b/gcc/testsuite/gcc.target/loongarch/avg-ceil-lsx.c +new file mode 100644 +index 000000000..94119c23b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/avg-ceil-lsx.c +@@ -0,0 +1,22 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlsx" } */ ++/* { dg-final { scan-assembler "vavgr.b" } } */ ++/* { dg-final { scan-assembler "vavgr.bu" } } */ ++/* { dg-final { scan-assembler "vavgr.hu" } } */ ++/* { dg-final { scan-assembler "vavgr.h" } } */ ++ ++#define N 1024 ++ ++#define TEST(TYPE, NAME) \ ++ TYPE a_##NAME[N], b_##NAME[N], c_##NAME[N]; \ ++ void f_##NAME (void) \ ++ { \ ++ int i; \ ++ for (i = 0; i < N; i++) \ ++ a_##NAME[i] = (b_##NAME[i] + c_##NAME[i] + 1) >> 1; \ ++ } ++ ++TEST(char, 1); ++TEST(short, 2); ++TEST(unsigned char, 3); ++TEST(unsigned short, 4); +diff --git a/gcc/testsuite/gcc.target/loongarch/avg-floor-lasx.c b/gcc/testsuite/gcc.target/loongarch/avg-floor-lasx.c +new file mode 100644 +index 000000000..da6896531 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/avg-floor-lasx.c +@@ -0,0 +1,22 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++/* { dg-final { scan-assembler "xvavg.b" } } */ ++/* { dg-final { scan-assembler "xvavg.bu" } } */ ++/* { dg-final { scan-assembler "xvavg.hu" } } */ ++/* { dg-final { scan-assembler "xvavg.h" } } */ ++ ++#define N 1024 ++ ++#define TEST(TYPE, NAME) \ ++ TYPE a_##NAME[N], b_##NAME[N], c_##NAME[N]; \ ++ void f_##NAME (void) \ ++ { \ ++ int i; \ ++ for (i = 0; i < N; i++) \ ++ a_##NAME[i] = (b_##NAME[i] + c_##NAME[i]) >> 1; \ ++ } ++ ++TEST(char, 1); ++TEST(short, 2); ++TEST(unsigned char, 3); ++TEST(unsigned short, 4); +diff --git a/gcc/testsuite/gcc.target/loongarch/avg-floor-lsx.c b/gcc/testsuite/gcc.target/loongarch/avg-floor-lsx.c +new file mode 100644 +index 000000000..bbb9db527 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/avg-floor-lsx.c +@@ -0,0 +1,22 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlsx" } */ ++/* { dg-final { scan-assembler "vavg.b" } } */ ++/* { dg-final { scan-assembler "vavg.bu" } } */ ++/* { dg-final { scan-assembler "vavg.hu" } } */ ++/* { dg-final { scan-assembler "vavg.h" } } */ ++ ++#define N 1024 ++ ++#define TEST(TYPE, NAME) \ ++ TYPE a_##NAME[N], b_##NAME[N], c_##NAME[N]; \ ++ void f_##NAME (void) \ ++ { \ ++ int i; \ ++ for (i = 0; i < N; i++) \ ++ a_##NAME[i] = (b_##NAME[i] + c_##NAME[i]) >> 1; \ ++ } ++ ++TEST(char, 1); ++TEST(short, 2); ++TEST(unsigned char, 3); ++TEST(unsigned short, 4); +diff --git a/gcc/testsuite/gcc.target/loongarch/sad-lasx.c b/gcc/testsuite/gcc.target/loongarch/sad-lasx.c +new file mode 100644 +index 000000000..6c0cdfd97 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/sad-lasx.c +@@ -0,0 +1,20 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++ ++#define N 1024 ++ ++#define TEST(SIGN) \ ++ SIGN char a_##SIGN[N], b_##SIGN[N]; \ ++ int f_##SIGN (void) \ ++ { \ ++ int i, sum = 0; \ ++ for (i = 0; i < N; i++) \ ++ sum += __builtin_abs (a_##SIGN[i] - b_##SIGN[i]);; \ ++ return sum; \ ++ } ++ ++TEST(signed); ++TEST(unsigned); ++ ++/* { dg-final { scan-assembler {\txvabsd.bu\t} } } */ ++/* { dg-final { scan-assembler {\txvabsd.b\t} } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/sad-lsx.c b/gcc/testsuite/gcc.target/loongarch/sad-lsx.c +new file mode 100644 +index 000000000..b92110a8b +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/sad-lsx.c +@@ -0,0 +1,20 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlsx" } */ ++ ++#define N 1024 ++ ++#define TEST(SIGN) \ ++ SIGN char a_##SIGN[N], b_##SIGN[N]; \ ++ int f_##SIGN (void) \ ++ { \ ++ int i, sum = 0; \ ++ for (i = 0; i < N; i++) \ ++ sum += __builtin_abs (a_##SIGN[i] - b_##SIGN[i]);; \ ++ return sum; \ ++ } ++ ++TEST(signed); ++TEST(unsigned); ++ ++/* { dg-final { scan-assembler {\tvabsd.bu\t} } } */ ++/* { dg-final { scan-assembler {\tvabsd.b\t} } } */ +-- +2.43.0 + diff --git a/0011-Sw64-Port-libitm.patch b/0011-Sw64-Port-libitm.patch new file mode 100644 index 0000000000000000000000000000000000000000..52a527ed7afccf0718e9fd9594c1f93a5ef706e4 --- /dev/null +++ b/0011-Sw64-Port-libitm.patch @@ -0,0 +1,260 @@ +From c506f4995a68274efbd31ede3751b14dc0fa0718 Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:53:43 +0800 +Subject: [PATCH 11/16] Sw64 Port: libitm + +--- + libitm/config/linux/sw_64/futex_bits.h | 56 +++++++++++++ + libitm/config/sw_64/sjlj.S | 112 +++++++++++++++++++++++++ + libitm/config/sw_64/target.h | 44 ++++++++++ + libitm/configure.tgt | 1 + + 4 files changed, 213 insertions(+) + create mode 100644 libitm/config/linux/sw_64/futex_bits.h + create mode 100644 libitm/config/sw_64/sjlj.S + create mode 100644 libitm/config/sw_64/target.h + +diff --git a/libitm/config/linux/sw_64/futex_bits.h b/libitm/config/linux/sw_64/futex_bits.h +new file mode 100644 +index 000000000..478a3078a +--- /dev/null ++++ b/libitm/config/linux/sw_64/futex_bits.h +@@ -0,0 +1,56 @@ ++/* Copyright (C) 2008-2022 Free Software Foundation, Inc. ++ Contributed by Richard Henderson . ++ ++ This file is part of the GNU Transactional Memory Library (libitm). ++ ++ Libitm is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3 of the License, or ++ (at your option) any later version. ++ ++ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS ++ FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++/* Provide target-specific access to the futex system call. */ ++ ++#ifndef SYS_futex ++#define SYS_futex 394 ++#endif ++ ++static inline long ++sys_futex0 (std::atomic *addr, int op, int val) ++{ ++ register long sc_0 __asm__("$0"); ++ register long sc_16 __asm__("$16"); ++ register long sc_17 __asm__("$17"); ++ register long sc_18 __asm__("$18"); ++ register long sc_19 __asm__("$19"); ++ long res; ++ ++ sc_0 = SYS_futex; ++ sc_16 = (long) addr; ++ sc_17 = op; ++ sc_18 = val; ++ sc_19 = 0; ++ __asm volatile("callsys" ++ : "=r"(sc_0), "=r"(sc_19) ++ : "0"(sc_0), "r"(sc_16), "r"(sc_17), "r"(sc_18), "1"(sc_19) ++ : "$1", "$2", "$3", "$4", "$5", "$6", "$7", "$8", "$22", "$23", ++ "$24", "$25", "$27", "$28", "memory"); ++ ++ res = sc_0; ++ if (__builtin_expect (sc_19, 0)) ++ res = -res; ++ return res; ++} +diff --git a/libitm/config/sw_64/sjlj.S b/libitm/config/sw_64/sjlj.S +new file mode 100644 +index 000000000..c4b74d76b +--- /dev/null ++++ b/libitm/config/sw_64/sjlj.S +@@ -0,0 +1,112 @@ ++/* Copyright (C) 2009-2022 Free Software Foundation, Inc. ++ Contributed by Richard Henderson . ++ ++ This file is part of the GNU Transactional Memory Library (libitm). ++ ++ Libitm is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3 of the License, or ++ (at your option) any later version. ++ ++ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS ++ FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++ .text ++ .align 4 ++ .globl _ITM_beginTransaction ++ .ent _ITM_beginTransaction ++ ++#define FRAME 144 ++ ++_ITM_beginTransaction: ++ ldgp $29, 0($27) ++ subl $30, FRAME, $30 ++ .frame $30, FRAME, $26, 0 ++ .mask 0x04000000, 0 ++ stl $26, 0($30) ++ .prologue 1 ++ ++ stl $9, 8($30) ++ stl $10, 16($30) ++ addl $30, FRAME, $0 ++ stl $11, 24($30) ++ ++ stl $12, 32($30) ++ stl $13, 40($30) ++ stl $14, 48($30) ++ stl $15, 56($30) ++ ++ stl $0, 64($30) ++ fstd $f2, 72($30) ++ fstd $f3, 80($30) ++ fstd $f4, 88($30) ++ ++ fstd $f5, 96($30) ++ fstd $f6, 104($30) ++ fstd $f7, 112($30) ++ fstd $f8, 120($30) ++ ++ fstd $f9, 128($30) ++ mov $30, $17 ++#ifdef __PIC__ ++ unop ++ bsr $26, GTM_begin_transaction !samegp ++#else ++ call $26, GTM_begin_transaction ++ ldgp $29, 0($26) ++#endif ++ ++ ldl $26, 0($30) ++ addl $30, FRAME, $30 ++ ret ++.end _ITM_beginTransaction ++ ++ .align 4 ++ .globl GTM_longjmp ++#ifdef __ELF__ ++ .hidden GTM_longjmp ++#endif ++ .ent GTM_longjmp ++ ++GTM_longjmp: ++ .prologue 0 ++ ldl $26, 0($17) ++ ldl $9, 8($17) ++ ldl $10, 16($17) ++ ldl $11, 24($17) ++ ++ ldl $12, 32($17) ++ ldl $13, 40($17) ++ ldl $14, 48($17) ++ ldl $15, 56($17) ++ ++ ldl $1, 64($17) ++ fldd $f2, 72($17) ++ fldd $f3, 80($17) ++ fldd $f4, 88($17) ++ ++ fldd $f5, 96($17) ++ fldd $f6, 104($17) ++ fldd $f7, 112($17) ++ fldd $f8, 120($17) ++ ++ fldd $f9, 128($17) ++ mov $16, $0 ++ mov $1, $30 ++ ret ++.end GTM_longjmp ++ ++#ifdef __linux__ ++.section .note.GNU-stack, "", @progbits ++#endif +diff --git a/libitm/config/sw_64/target.h b/libitm/config/sw_64/target.h +new file mode 100644 +index 000000000..7c3f1e3bd +--- /dev/null ++++ b/libitm/config/sw_64/target.h +@@ -0,0 +1,44 @@ ++/* Copyright (C) 2009-2022 Free Software Foundation, Inc. ++ Contributed by Richard Henderson . ++ ++ This file is part of the GNU Transactional Memory Library (libitm). ++ ++ Libitm is free software; you can redistribute it and/or modify it ++ under the terms of the GNU General Public License as published by ++ the Free Software Foundation; either version 3 of the License, or ++ (at your option) any later version. ++ ++ Libitm is distributed in the hope that it will be useful, but WITHOUT ANY ++ WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS ++ FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ Under Section 7 of GPL version 3, you are granted additional ++ permissions described in the GCC Runtime Library Exception, version ++ 3.1, as published by the Free Software Foundation. ++ ++ You should have received a copy of the GNU General Public License and ++ a copy of the GCC Runtime Library Exception along with this program; ++ see the files COPYING3 and COPYING.RUNTIME respectively. If not, see ++ . */ ++ ++namespace GTM HIDDEN { ++ ++typedef struct gtm_jmpbuf ++{ ++ unsigned long pc; ++ unsigned long s[7]; ++ void *cfa; ++ unsigned long f[8]; ++} gtm_jmpbuf; ++ ++/* The size of one line in hardware caches (in bytes). */ ++#define HW_CACHELINE_SIZE 64 ++ ++static inline void ++cpu_relax (void) ++{ ++ __asm volatile ("" : : : "memory"); ++} ++ ++} // namespace HIDDEN +diff --git a/libitm/configure.tgt b/libitm/configure.tgt +index 06e90973e..d831da5b0 100644 +--- a/libitm/configure.tgt ++++ b/libitm/configure.tgt +@@ -118,6 +118,7 @@ EOF + *) + ARCH="${target_cpu}" + ;; ++ sw_64*) ARCH=sw_64 ;; + esac + + # For the benefit of top-level configure, determine if the cpu is supported. +-- +2.25.1 + diff --git a/backport-Enable-small-loop-unrolling-for-O2.patch b/0012-Enable-small-loop-unrolling-for-O2.patch similarity index 89% rename from backport-Enable-small-loop-unrolling-for-O2.patch rename to 0012-Enable-small-loop-unrolling-for-O2.patch index b16171bcc35c283372b6f5360c44d5c152defa36..97ca890efb6887ce2287099dc432e2df4f0f9cf4 100644 --- a/backport-Enable-small-loop-unrolling-for-O2.patch +++ b/0012-Enable-small-loop-unrolling-for-O2.patch @@ -1,7 +1,7 @@ -From 6c977a4e458eab0dd7684b143baf72240b96fda8 Mon Sep 17 00:00:00 2001 +From 1070bc24f53e851cae55320e26715cc594efcd2f Mon Sep 17 00:00:00 2001 From: Hongyu Wang Date: Thu, 8 Sep 2022 16:52:02 +0800 -Subject: [PATCH 4/5] Enable small loop unrolling for O2 +Subject: [PATCH 12/22] Enable small loop unrolling for O2 Modern processors has multiple way instruction decoders For x86, icelake/zen3 has 5 uops, so for small loop with <= 4 @@ -51,16 +51,16 @@ gcc/testsuite/ChangeLog: gcc/config/i386/i386.cc | 18 ++++++++ gcc/config/i386/i386.h | 5 +++ gcc/config/i386/i386.opt | 4 ++ - gcc/config/i386/x86-tune-costs.h | 56 +++++++++++++++++++++++++ + gcc/config/i386/x86-tune-costs.h | 58 +++++++++++++++++++++++++ gcc/doc/invoke.texi | 11 ++++- gcc/loop-init.cc | 10 +++-- gcc/testsuite/gcc.dg/guality/loop-1.c | 2 + gcc/testsuite/gcc.target/i386/pr86270.c | 2 +- gcc/testsuite/gcc.target/i386/pr93002.c | 2 +- - 10 files changed, 105 insertions(+), 6 deletions(-) + 10 files changed, 107 insertions(+), 6 deletions(-) diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc -index 07fdd045f30..e1c1fb07d8a 100644 +index e2594cae4..cdd5caa55 100644 --- a/gcc/common/config/i386/i386-common.cc +++ b/gcc/common/config/i386/i386-common.cc @@ -1687,6 +1687,7 @@ static const struct default_options ix86_option_optimization_table[] = @@ -72,10 +72,10 @@ index 07fdd045f30..e1c1fb07d8a 100644 problem with not enough registers even worse. */ { OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 }, diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc -index b16df5b183e..39b2468799c 100644 +index 9a9ff3b34..e56004300 100644 --- a/gcc/config/i386/i386.cc +++ b/gcc/config/i386/i386.cc -@@ -23561,6 +23561,24 @@ ix86_loop_unroll_adjust (unsigned nunroll, class loop *loop) +@@ -23570,6 +23570,24 @@ ix86_loop_unroll_adjust (unsigned nunroll, class loop *loop) unsigned i; unsigned mem_count = 0; @@ -101,7 +101,7 @@ index b16df5b183e..39b2468799c 100644 return nunroll; diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h -index a61c32b8957..421801111a7 100644 +index fce0b3564..688aaabd3 100644 --- a/gcc/config/i386/i386.h +++ b/gcc/config/i386/i386.h @@ -219,6 +219,11 @@ struct processor_costs { @@ -117,7 +117,7 @@ index a61c32b8957..421801111a7 100644 extern const struct processor_costs *ix86_cost; diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt -index a6b0e28f238..3d369647bf7 100644 +index a3675e515..fc1b944ac 100644 --- a/gcc/config/i386/i386.opt +++ b/gcc/config/i386/i386.opt @@ -1214,3 +1214,7 @@ Do not use GOT to access external symbols. @@ -129,7 +129,7 @@ index a6b0e28f238..3d369647bf7 100644 +Target Var(ix86_unroll_only_small_loops) Init(0) Save +Enable conservative small loop unrolling. diff --git a/gcc/config/i386/x86-tune-costs.h b/gcc/config/i386/x86-tune-costs.h -index 017ffa69958..b4303e4e971 100644 +index f105d57ca..db4c2da34 100644 --- a/gcc/config/i386/x86-tune-costs.h +++ b/gcc/config/i386/x86-tune-costs.h @@ -135,6 +135,8 @@ struct processor_costs ix86_size_cost = {/* costs for tuning for size */ @@ -266,8 +266,17 @@ index 017ffa69958..b4303e4e971 100644 + 2, /* Small unroll factor. */ }; + /* This table currently replicates znver3_cost table. */ +@@ -1952,6 +1982,8 @@ struct processor_costs znver4_cost = { + "16", /* Jump alignment. */ + "0:0:8", /* Label alignment. */ + "16", /* Func alignment. */ ++ 4, /* Small unroll limit. */ ++ 2, /* Small unroll factor. */ + }; + /* skylake_cost should produce code tuned for Skylake familly of CPUs. */ -@@ -1942,6 +1972,8 @@ struct processor_costs skylake_cost = { +@@ -2076,6 +2108,8 @@ struct processor_costs skylake_cost = { "16:11:8", /* Jump alignment. */ "0:0:8", /* Label alignment. */ "16", /* Func alignment. */ @@ -276,7 +285,7 @@ index 017ffa69958..b4303e4e971 100644 }; /* icelake_cost should produce code tuned for Icelake family of CPUs. -@@ -2068,6 +2100,8 @@ struct processor_costs icelake_cost = { +@@ -2202,6 +2236,8 @@ struct processor_costs icelake_cost = { "16:11:8", /* Jump alignment. */ "0:0:8", /* Label alignment. */ "16", /* Func alignment. */ @@ -285,7 +294,7 @@ index 017ffa69958..b4303e4e971 100644 }; /* alderlake_cost should produce code tuned for alderlake family of CPUs. */ -@@ -2188,6 +2222,8 @@ struct processor_costs alderlake_cost = { +@@ -2322,6 +2358,8 @@ struct processor_costs alderlake_cost = { "16:11:8", /* Jump alignment. */ "0:0:8", /* Label alignment. */ "16", /* Func alignment. */ @@ -294,7 +303,7 @@ index 017ffa69958..b4303e4e971 100644 }; /* BTVER1 has optimized REP instruction for medium sized blocks, but for -@@ -2301,6 +2337,8 @@ const struct processor_costs btver1_cost = { +@@ -2435,6 +2473,8 @@ const struct processor_costs btver1_cost = { "16:8:8", /* Jump alignment. */ "0:0:8", /* Label alignment. */ "11", /* Func alignment. */ @@ -303,7 +312,7 @@ index 017ffa69958..b4303e4e971 100644 }; static stringop_algs btver2_memcpy[2] = { -@@ -2411,6 +2449,8 @@ const struct processor_costs btver2_cost = { +@@ -2545,6 +2585,8 @@ const struct processor_costs btver2_cost = { "16:8:8", /* Jump alignment. */ "0:0:8", /* Label alignment. */ "11", /* Func alignment. */ @@ -312,7 +321,7 @@ index 017ffa69958..b4303e4e971 100644 }; static stringop_algs pentium4_memcpy[2] = { -@@ -2520,6 +2560,8 @@ struct processor_costs pentium4_cost = { +@@ -2654,6 +2696,8 @@ struct processor_costs pentium4_cost = { NULL, /* Jump alignment. */ NULL, /* Label alignment. */ NULL, /* Func alignment. */ @@ -321,7 +330,7 @@ index 017ffa69958..b4303e4e971 100644 }; static stringop_algs nocona_memcpy[2] = { -@@ -2632,6 +2674,8 @@ struct processor_costs nocona_cost = { +@@ -2766,6 +2810,8 @@ struct processor_costs nocona_cost = { NULL, /* Jump alignment. */ NULL, /* Label alignment. */ NULL, /* Func alignment. */ @@ -330,7 +339,7 @@ index 017ffa69958..b4303e4e971 100644 }; static stringop_algs atom_memcpy[2] = { -@@ -2742,6 +2786,8 @@ struct processor_costs atom_cost = { +@@ -2876,6 +2922,8 @@ struct processor_costs atom_cost = { "16:8:8", /* Jump alignment. */ "0:0:8", /* Label alignment. */ "16", /* Func alignment. */ @@ -339,7 +348,7 @@ index 017ffa69958..b4303e4e971 100644 }; static stringop_algs slm_memcpy[2] = { -@@ -2852,6 +2898,8 @@ struct processor_costs slm_cost = { +@@ -2986,6 +3034,8 @@ struct processor_costs slm_cost = { "16:8:8", /* Jump alignment. */ "0:0:8", /* Label alignment. */ "16", /* Func alignment. */ @@ -348,7 +357,7 @@ index 017ffa69958..b4303e4e971 100644 }; static stringop_algs tremont_memcpy[2] = { -@@ -2976,6 +3024,8 @@ struct processor_costs tremont_cost = { +@@ -3110,6 +3160,8 @@ struct processor_costs tremont_cost = { "16:11:8", /* Jump alignment. */ "0:0:8", /* Label alignment. */ "16", /* Func alignment. */ @@ -357,7 +366,7 @@ index 017ffa69958..b4303e4e971 100644 }; static stringop_algs intel_memcpy[2] = { -@@ -3086,6 +3136,8 @@ struct processor_costs intel_cost = { +@@ -3220,6 +3272,8 @@ struct processor_costs intel_cost = { "16:8:8", /* Jump alignment. */ "0:0:8", /* Label alignment. */ "16", /* Func alignment. */ @@ -366,7 +375,7 @@ index 017ffa69958..b4303e4e971 100644 }; /* Generic should produce code tuned for Core-i7 (and newer chips) -@@ -3205,6 +3257,8 @@ struct processor_costs generic_cost = { +@@ -3339,6 +3393,8 @@ struct processor_costs generic_cost = { "16:11:8", /* Jump alignment. */ "0:0:8", /* Label alignment. */ "16", /* Func alignment. */ @@ -375,7 +384,7 @@ index 017ffa69958..b4303e4e971 100644 }; /* core_cost should produce code tuned for Core familly of CPUs. */ -@@ -3331,5 +3385,7 @@ struct processor_costs core_cost = { +@@ -3465,5 +3521,7 @@ struct processor_costs core_cost = { "16:11:8", /* Jump alignment. */ "0:0:8", /* Label alignment. */ "16", /* Func alignment. */ @@ -384,10 +393,10 @@ index 017ffa69958..b4303e4e971 100644 }; diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi -index 9ac7f89ebb1..1961cafa2bb 100644 +index ff8cd032f..16f4b367e 100644 --- a/gcc/doc/invoke.texi +++ b/gcc/doc/invoke.texi -@@ -1448,7 +1448,8 @@ See RS/6000 and PowerPC Options. +@@ -1449,7 +1449,8 @@ See RS/6000 and PowerPC Options. -mgeneral-regs-only -mcall-ms2sysv-xlogues -mrelax-cmpxchg-loop @gol -mindirect-branch=@var{choice} -mfunction-return=@var{choice} @gol -mindirect-branch-register -mharden-sls=@var{choice} @gol @@ -397,7 +406,7 @@ index 9ac7f89ebb1..1961cafa2bb 100644 @emph{x86 Windows Options} @gccoptlist{-mconsole -mcygwin -mno-cygwin -mdll @gol -@@ -33157,6 +33158,14 @@ treat access to protected symbols as local symbols. The default is +@@ -33183,6 +33184,14 @@ treat access to protected symbols as local symbols. The default is @option{-mno-direct-extern-access} and executable compiled with @option{-mdirect-extern-access} may not be binary compatible if protected symbols are used in shared libraries and executable. @@ -413,7 +422,7 @@ index 9ac7f89ebb1..1961cafa2bb 100644 @node x86 Windows Options diff --git a/gcc/loop-init.cc b/gcc/loop-init.cc -index 1e4f6cfd7fb..84336865ef7 100644 +index 1e4f6cfd7..f1c717041 100644 --- a/gcc/loop-init.cc +++ b/gcc/loop-init.cc @@ -565,9 +565,12 @@ public: @@ -421,7 +430,7 @@ index 1e4f6cfd7fb..84336865ef7 100644 /* opt_pass methods: */ - virtual bool gate (function *) -+ virtual bool gate (function * fun) ++ virtual bool gate (function *fun) { - return (flag_unroll_loops || flag_unroll_all_loops || cfun->has_unroll); + return (flag_unroll_loops || flag_unroll_all_loops || cfun->has_unroll @@ -442,7 +451,7 @@ index 1e4f6cfd7fb..84336865ef7 100644 if (flag_unroll_all_loops) flags |= UAP_UNROLL_ALL; diff --git a/gcc/testsuite/gcc.dg/guality/loop-1.c b/gcc/testsuite/gcc.dg/guality/loop-1.c -index 1b1f6d32271..a32ea445a3f 100644 +index 1b1f6d322..a32ea445a 100644 --- a/gcc/testsuite/gcc.dg/guality/loop-1.c +++ b/gcc/testsuite/gcc.dg/guality/loop-1.c @@ -1,5 +1,7 @@ @@ -454,7 +463,7 @@ index 1b1f6d32271..a32ea445a3f 100644 #include "../nop.h" diff --git a/gcc/testsuite/gcc.target/i386/pr86270.c b/gcc/testsuite/gcc.target/i386/pr86270.c -index 81841ef5bd7..cbc9fbb0450 100644 +index 81841ef5b..cbc9fbb04 100644 --- a/gcc/testsuite/gcc.target/i386/pr86270.c +++ b/gcc/testsuite/gcc.target/i386/pr86270.c @@ -1,5 +1,5 @@ @@ -465,7 +474,7 @@ index 81841ef5bd7..cbc9fbb0450 100644 int *a; long len; diff --git a/gcc/testsuite/gcc.target/i386/pr93002.c b/gcc/testsuite/gcc.target/i386/pr93002.c -index 0248fcc00a5..f75a847f75d 100644 +index 0248fcc00..f75a847f7 100644 --- a/gcc/testsuite/gcc.target/i386/pr93002.c +++ b/gcc/testsuite/gcc.target/i386/pr93002.c @@ -1,6 +1,6 @@ @@ -477,5 +486,5 @@ index 0248fcc00a5..f75a847f75d 100644 volatile int sink; -- -2.18.2 +2.33.0 diff --git a/0012-LoongArch-Implement-vec_widen-standard-names.patch b/0012-LoongArch-Implement-vec_widen-standard-names.patch new file mode 100644 index 0000000000000000000000000000000000000000..dc19fc29bbe5b190312cfa8b7caa2458b03488ae --- /dev/null +++ b/0012-LoongArch-Implement-vec_widen-standard-names.patch @@ -0,0 +1,403 @@ +From 81e2e22979d9f9d170b1c30ec27e30e1f25aec35 Mon Sep 17 00:00:00 2001 +From: Jiahao Xu +Date: Wed, 18 Oct 2023 17:39:40 +0800 +Subject: [PATCH 012/188] LoongArch:Implement vec_widen standard names. + +Add support for vec_widen lo/hi patterns. These do not directly +match on Loongarch lasx instructions but can be emulated with +even/odd + vector merge. + +gcc/ChangeLog: + + * config/loongarch/lasx.md + (vec_widen_mult_even_v8si): New patterns. + (vec_widen_add_hi_): Ditto. + (vec_widen_add_lo_): Ditto. + (vec_widen_sub_hi_): Ditto. + (vec_widen_sub_lo_): Ditto. + (vec_widen_mult_hi_): Ditto. + (vec_widen_mult_lo_): Ditto. + * config/loongarch/loongarch.md (u_bool): New iterator. + * config/loongarch/loongarch-protos.h + (loongarch_expand_vec_widen_hilo): New prototype. + * config/loongarch/loongarch.cc + (loongarch_expand_vec_interleave): New function. + (loongarch_expand_vec_widen_hilo): New function. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vect-widen-add.c: New test. + * gcc.target/loongarch/vect-widen-mul.c: New test. + * gcc.target/loongarch/vect-widen-sub.c: New test. +--- + gcc/config/loongarch/lasx.md | 82 ++++++++--- + gcc/config/loongarch/loongarch-protos.h | 1 + + gcc/config/loongarch/loongarch.cc | 137 ++++++++++++++++++ + gcc/config/loongarch/loongarch.md | 2 + + .../gcc.target/loongarch/vect-widen-add.c | 24 +++ + .../gcc.target/loongarch/vect-widen-mul.c | 24 +++ + .../gcc.target/loongarch/vect-widen-sub.c | 24 +++ + 7 files changed, 277 insertions(+), 17 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-widen-add.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-widen-mul.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-widen-sub.c + +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +index c7496d68a..442fda246 100644 +--- a/gcc/config/loongarch/lasx.md ++++ b/gcc/config/loongarch/lasx.md +@@ -5048,23 +5048,71 @@ + [(set_attr "type" "simd_store") + (set_attr "mode" "DI")]) + +-(define_insn "vec_widen_mult_even_v8si" +- [(set (match_operand:V4DI 0 "register_operand" "=f") +- (mult:V4DI +- (any_extend:V4DI +- (vec_select:V4SI +- (match_operand:V8SI 1 "register_operand" "%f") +- (parallel [(const_int 0) (const_int 2) +- (const_int 4) (const_int 6)]))) +- (any_extend:V4DI +- (vec_select:V4SI +- (match_operand:V8SI 2 "register_operand" "f") +- (parallel [(const_int 0) (const_int 2) +- (const_int 4) (const_int 6)])))))] +- "ISA_HAS_LASX" +- "xvmulwev.d.w\t%u0,%u1,%u2" +- [(set_attr "type" "simd_int_arith") +- (set_attr "mode" "V4DI")]) ++(define_expand "vec_widen_add_hi_" ++ [(match_operand: 0 "register_operand") ++ (any_extend: (match_operand:ILASX_HB 1 "register_operand")) ++ (any_extend: (match_operand:ILASX_HB 2 "register_operand"))] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], ++ , true, "add"); ++ DONE; ++}) ++ ++(define_expand "vec_widen_add_lo_" ++ [(match_operand: 0 "register_operand") ++ (any_extend: (match_operand:ILASX_HB 1 "register_operand")) ++ (any_extend: (match_operand:ILASX_HB 2 "register_operand"))] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], ++ , false, "add"); ++ DONE; ++}) ++ ++(define_expand "vec_widen_sub_hi_" ++ [(match_operand: 0 "register_operand") ++ (any_extend: (match_operand:ILASX_HB 1 "register_operand")) ++ (any_extend: (match_operand:ILASX_HB 2 "register_operand"))] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], ++ , true, "sub"); ++ DONE; ++}) ++ ++(define_expand "vec_widen_sub_lo_" ++ [(match_operand: 0 "register_operand") ++ (any_extend: (match_operand:ILASX_HB 1 "register_operand")) ++ (any_extend: (match_operand:ILASX_HB 2 "register_operand"))] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], ++ , false, "sub"); ++ DONE; ++}) ++ ++(define_expand "vec_widen_mult_hi_" ++ [(match_operand: 0 "register_operand") ++ (any_extend: (match_operand:ILASX_HB 1 "register_operand")) ++ (any_extend: (match_operand:ILASX_HB 2 "register_operand"))] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], ++ , true, "mult"); ++ DONE; ++}) ++ ++(define_expand "vec_widen_mult_lo_" ++ [(match_operand: 0 "register_operand") ++ (any_extend: (match_operand:ILASX_HB 1 "register_operand")) ++ (any_extend: (match_operand:ILASX_HB 2 "register_operand"))] ++ "ISA_HAS_LASX" ++{ ++ loongarch_expand_vec_widen_hilo (operands[0], operands[1], operands[2], ++ , false, "mult"); ++ DONE; ++}) + + ;; Vector reduction operation + (define_expand "reduc_plus_scal_v4di" +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index ea61cf567..163162598 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -205,6 +205,7 @@ extern void loongarch_register_frame_header_opt (void); + extern void loongarch_expand_vec_cond_expr (machine_mode, machine_mode, rtx *); + extern void loongarch_expand_vec_cond_mask_expr (machine_mode, machine_mode, + rtx *); ++extern void loongarch_expand_vec_widen_hilo (rtx, rtx, rtx, bool, bool, const char *); + + /* Routines implemented in loongarch-c.c. */ + void loongarch_cpu_cpp_builtins (cpp_reader *); +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 9a629a999..c0f58f9a9 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -8028,6 +8028,143 @@ loongarch_expand_vec_perm_even_odd (struct expand_vec_perm_d *d) + return loongarch_expand_vec_perm_even_odd_1 (d, odd); + } + ++static void ++loongarch_expand_vec_interleave (rtx target, rtx op0, rtx op1, bool high_p) ++{ ++ struct expand_vec_perm_d d; ++ unsigned i, nelt, base; ++ bool ok; ++ ++ d.target = target; ++ d.op0 = op0; ++ d.op1 = op1; ++ d.vmode = GET_MODE (target); ++ d.nelt = nelt = GET_MODE_NUNITS (d.vmode); ++ d.one_vector_p = false; ++ d.testing_p = false; ++ ++ base = high_p ? nelt / 2 : 0; ++ for (i = 0; i < nelt / 2; ++i) ++ { ++ d.perm[i * 2] = i + base; ++ d.perm[i * 2 + 1] = i + base + nelt; ++ } ++ ++ ok = loongarch_expand_vec_perm_interleave (&d); ++ gcc_assert (ok); ++} ++ ++/* The loongarch lasx instructions xvmulwev and xvmulwod return the even or odd ++ parts of the double sized result elements in the corresponding elements of ++ the target register. That's NOT what the vec_widen_umult_lo/hi patterns are ++ expected to do. We emulate the widening lo/hi multiplies with the even/odd ++ versions followed by a vector merge. */ ++ ++void ++loongarch_expand_vec_widen_hilo (rtx dest, rtx op1, rtx op2, ++ bool uns_p, bool high_p, const char *optab) ++{ ++ machine_mode wmode = GET_MODE (dest); ++ machine_mode mode = GET_MODE (op1); ++ rtx t1, t2, t3; ++ ++ t1 = gen_reg_rtx (wmode); ++ t2 = gen_reg_rtx (wmode); ++ t3 = gen_reg_rtx (wmode); ++ switch (mode) ++ { ++ case V16HImode: ++ if (!strcmp (optab, "add")) ++ { ++ if (!uns_p) ++ { ++ emit_insn (gen_lasx_xvaddwev_w_h (t1, op1, op2)); ++ emit_insn (gen_lasx_xvaddwod_w_h (t2, op1, op2)); ++ } ++ else ++ { ++ emit_insn (gen_lasx_xvaddwev_w_hu (t1, op1, op2)); ++ emit_insn (gen_lasx_xvaddwod_w_hu (t2, op1, op2)); ++ } ++ } ++ else if (!strcmp (optab, "mult")) ++ { ++ if (!uns_p) ++ { ++ emit_insn (gen_lasx_xvmulwev_w_h (t1, op1, op2)); ++ emit_insn (gen_lasx_xvmulwod_w_h (t2, op1, op2)); ++ } ++ else ++ { ++ emit_insn (gen_lasx_xvmulwev_w_hu (t1, op1, op2)); ++ emit_insn (gen_lasx_xvmulwod_w_hu (t2, op1, op2)); ++ } ++ } ++ else if (!strcmp (optab, "sub")) ++ { ++ if (!uns_p) ++ { ++ emit_insn (gen_lasx_xvsubwev_w_h (t1, op1, op2)); ++ emit_insn (gen_lasx_xvsubwod_w_h (t2, op1, op2)); ++ } ++ else ++ { ++ emit_insn (gen_lasx_xvsubwev_w_hu (t1, op1, op2)); ++ emit_insn (gen_lasx_xvsubwod_w_hu (t2, op1, op2)); ++ } ++ } ++ break; ++ ++ case V32QImode: ++ if (!strcmp (optab, "add")) ++ { ++ if (!uns_p) ++ { ++ emit_insn (gen_lasx_xvaddwev_h_b (t1, op1, op2)); ++ emit_insn (gen_lasx_xvaddwod_h_b (t2, op1, op2)); ++ } ++ else ++ { ++ emit_insn (gen_lasx_xvaddwev_h_bu (t1, op1, op2)); ++ emit_insn (gen_lasx_xvaddwod_h_bu (t2, op1, op2)); ++ } ++ } ++ else if (!strcmp (optab, "mult")) ++ { ++ if (!uns_p) ++ { ++ emit_insn (gen_lasx_xvmulwev_h_b (t1, op1, op2)); ++ emit_insn (gen_lasx_xvmulwod_h_b (t2, op1, op2)); ++ } ++ else ++ { ++ emit_insn (gen_lasx_xvmulwev_h_bu (t1, op1, op2)); ++ emit_insn (gen_lasx_xvmulwod_h_bu (t2, op1, op2)); ++ } ++ } ++ else if (!strcmp (optab, "sub")) ++ { ++ if (!uns_p) ++ { ++ emit_insn (gen_lasx_xvsubwev_h_b (t1, op1, op2)); ++ emit_insn (gen_lasx_xvsubwod_h_b (t2, op1, op2)); ++ } ++ else ++ { ++ emit_insn (gen_lasx_xvsubwev_h_bu (t1, op1, op2)); ++ emit_insn (gen_lasx_xvsubwod_h_bu (t2, op1, op2)); ++ } ++ } ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++ ++ loongarch_expand_vec_interleave (t3, t1, t2, high_p); ++ emit_move_insn (dest, gen_lowpart (wmode, t3)); ++} ++ + /* Expand a variable vector permutation for LASX. */ + + void +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 5f9e63d66..29ac950bf 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -509,6 +509,8 @@ + ;; is like , but the signed form expands to "s" rather than "". + (define_code_attr su [(sign_extend "s") (zero_extend "u")]) + ++(define_code_attr u_bool [(sign_extend "false") (zero_extend "true")]) ++ + ;; expands to the name of the optab for a particular code. + (define_code_attr optab [(ashift "ashl") + (ashiftrt "ashr") +diff --git a/gcc/testsuite/gcc.target/loongarch/vect-widen-add.c b/gcc/testsuite/gcc.target/loongarch/vect-widen-add.c +new file mode 100644 +index 000000000..0bf832d0e +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vect-widen-add.c +@@ -0,0 +1,24 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++/* { dg-final { scan-assembler "xvaddwev.w.h" } } */ ++/* { dg-final { scan-assembler "xvaddwod.w.h" } } */ ++/* { dg-final { scan-assembler "xvaddwev.w.hu" } } */ ++/* { dg-final { scan-assembler "xvaddwod.w.hu" } } */ ++ ++#include ++ ++#define SIZE 1024 ++ ++void ++wide_uadd (uint32_t *foo, uint16_t *a, uint16_t *b) ++{ ++ for ( int i = 0; i < SIZE; i++) ++ foo[i] = a[i] + b[i]; ++} ++ ++void ++wide_sadd (int32_t *foo, int16_t *a, int16_t *b) ++{ ++ for ( int i = 0; i < SIZE; i++) ++ foo[i] = a[i] + b[i]; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vect-widen-mul.c b/gcc/testsuite/gcc.target/loongarch/vect-widen-mul.c +new file mode 100644 +index 000000000..84b020eea +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vect-widen-mul.c +@@ -0,0 +1,24 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++/* { dg-final { scan-assembler "xvmulwev.w.h" } } */ ++/* { dg-final { scan-assembler "xvmulwod.w.h" } } */ ++/* { dg-final { scan-assembler "xvmulwev.w.hu" } } */ ++/* { dg-final { scan-assembler "xvmulwod.w.hu" } } */ ++ ++#include ++ ++#define SIZE 1024 ++ ++void ++wide_umul (uint32_t *foo, uint16_t *a, uint16_t *b) ++{ ++ for ( int i = 0; i < SIZE; i++) ++ foo[i] = a[i] * b[i]; ++} ++ ++void ++wide_smul (int32_t *foo, int16_t *a, int16_t *b) ++{ ++ for ( int i = 0; i < SIZE; i++) ++ foo[i] = a[i] * b[i]; ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/vect-widen-sub.c b/gcc/testsuite/gcc.target/loongarch/vect-widen-sub.c +new file mode 100644 +index 000000000..69fc3a517 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vect-widen-sub.c +@@ -0,0 +1,24 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -mlasx" } */ ++/* { dg-final { scan-assembler "xvsubwev.w.h" } } */ ++/* { dg-final { scan-assembler "xvsubwod.w.h" } } */ ++/* { dg-final { scan-assembler "xvsubwev.w.hu" } } */ ++/* { dg-final { scan-assembler "xvsubwod.w.hu" } } */ ++ ++#include ++ ++#define SIZE 1024 ++ ++void ++wide_usub (uint32_t *foo, uint16_t *a, uint16_t *b) ++{ ++ for ( int i = 0; i < SIZE; i++) ++ foo[i] = a[i] - b[i]; ++} ++ ++void ++wide_ssub (int32_t *foo, int16_t *a, int16_t *b) ++{ ++ for ( int i = 0; i < SIZE; i++) ++ foo[i] = a[i] - b[i]; ++} +-- +2.43.0 + diff --git a/0012-Sw64-Port-libstdc.patch b/0012-Sw64-Port-libstdc.patch new file mode 100644 index 0000000000000000000000000000000000000000..fbce1f8bc3040c32eecf3fa8f256bf6737e4b4e1 --- /dev/null +++ b/0012-Sw64-Port-libstdc.patch @@ -0,0 +1,6237 @@ +From f8ed3d1bb787694d2ad6cd4e46bedea0c6ab6a3b Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:54:30 +0800 +Subject: [PATCH 12/16] Sw64 Port: libstdc++ + +--- + libstdc++-v3/acinclude.m4 | 2 +- + .../post/sw_64-linux-gnu/baseline_symbols.txt | 6154 +++++++++++++++++ + libstdc++-v3/configure | 3 +- + libstdc++-v3/configure.ac | 1 + + libstdc++-v3/configure.host | 3 + + 5 files changed, 6161 insertions(+), 2 deletions(-) + create mode 100644 libstdc++-v3/config/abi/post/sw_64-linux-gnu/baseline_symbols.txt + +diff --git a/libstdc++-v3/acinclude.m4 b/libstdc++-v3/acinclude.m4 +index 04f2153fc..a7d7698bd 100644 +--- a/libstdc++-v3/acinclude.m4 ++++ b/libstdc++-v3/acinclude.m4 +@@ -4846,7 +4846,7 @@ AC_DEFUN([GLIBCXX_CHECK_EXCEPTION_PTR_SYMVER], [ + AC_MSG_CHECKING([for first version to support std::exception_ptr]) + case ${target} in + aarch64-*-* | alpha-*-* | hppa*-*-* | i?86-*-* | x86_64-*-* | \ +- m68k-*-* | powerpc*-*-* | s390*-*-* | *-*-solaris* ) ++ m68k-*-* | powerpc*-*-* | s390*-*-* | sw_64-*-* | *-*-solaris* ) + ac_exception_ptr_since_gcc46=yes + ;; + *) +diff --git a/libstdc++-v3/config/abi/post/sw_64-linux-gnu/baseline_symbols.txt b/libstdc++-v3/config/abi/post/sw_64-linux-gnu/baseline_symbols.txt +new file mode 100644 +index 000000000..0ca027cc9 +--- /dev/null ++++ b/libstdc++-v3/config/abi/post/sw_64-linux-gnu/baseline_symbols.txt +@@ -0,0 +1,6154 @@ ++FUNC:_ZGTtNKSt11logic_error4whatEv@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNKSt13bad_exception4whatEv@@CXXABI_1.3.10 ++FUNC:_ZGTtNKSt13bad_exceptionD1Ev@@CXXABI_1.3.10 ++FUNC:_ZGTtNKSt13runtime_error4whatEv@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNKSt9exception4whatEv@@CXXABI_1.3.10 ++FUNC:_ZGTtNKSt9exceptionD1Ev@@CXXABI_1.3.10 ++FUNC:_ZGTtNSt11logic_errorC1EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11logic_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11logic_errorC2EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11logic_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11logic_errorD0Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11logic_errorD1Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11logic_errorD2Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11range_errorC1EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11range_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11range_errorC2EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11range_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11range_errorD0Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11range_errorD1Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt11range_errorD2Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12domain_errorC1EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12domain_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12domain_errorC2EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12domain_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12domain_errorD0Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12domain_errorD1Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12domain_errorD2Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12length_errorC1EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12length_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12length_errorC2EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12length_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12length_errorD0Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12length_errorD1Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12length_errorD2Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12out_of_rangeC1EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12out_of_rangeC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12out_of_rangeC2EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12out_of_rangeC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12out_of_rangeD0Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12out_of_rangeD1Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt12out_of_rangeD2Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt13runtime_errorC1EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt13runtime_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt13runtime_errorC2EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt13runtime_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt13runtime_errorD0Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt13runtime_errorD1Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt13runtime_errorD2Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt14overflow_errorC1EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt14overflow_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt14overflow_errorC2EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt14overflow_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt14overflow_errorD0Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt14overflow_errorD1Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt14overflow_errorD2Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt15underflow_errorC1EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt15underflow_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt15underflow_errorC2EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt15underflow_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt15underflow_errorD0Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt15underflow_errorD1Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt15underflow_errorD2Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt16invalid_argumentC1EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt16invalid_argumentC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt16invalid_argumentC2EPKc@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt16invalid_argumentC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt16invalid_argumentD0Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt16invalid_argumentD1Ev@@GLIBCXX_3.4.22 ++FUNC:_ZGTtNSt16invalid_argumentD2Ev@@GLIBCXX_3.4.22 ++FUNC:_ZN10__cxxabiv116__enum_type_infoD0Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv116__enum_type_infoD1Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv116__enum_type_infoD2Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv117__array_type_infoD0Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv117__array_type_infoD1Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv117__array_type_infoD2Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv117__class_type_infoD0Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv117__class_type_infoD1Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv117__class_type_infoD2Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv117__pbase_type_infoD0Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv117__pbase_type_infoD1Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv117__pbase_type_infoD2Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv119__pointer_type_infoD0Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv119__pointer_type_infoD1Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv119__pointer_type_infoD2Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv120__function_type_infoD0Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv120__function_type_infoD1Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv120__function_type_infoD2Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv120__si_class_type_infoD0Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv120__si_class_type_infoD1Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv120__si_class_type_infoD2Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv121__vmi_class_type_infoD0Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv121__vmi_class_type_infoD1Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv121__vmi_class_type_infoD2Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv123__fundamental_type_infoD0Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv123__fundamental_type_infoD1Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv123__fundamental_type_infoD2Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv129__pointer_to_member_type_infoD0Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv129__pointer_to_member_type_infoD1Ev@@CXXABI_1.3 ++FUNC:_ZN10__cxxabiv129__pointer_to_member_type_infoD2Ev@@CXXABI_1.3 ++FUNC:_ZN10__gnu_norm15_List_node_base4hookEPS0_@@GLIBCXX_3.4 ++FUNC:_ZN10__gnu_norm15_List_node_base4swapERS0_S1_@@GLIBCXX_3.4 ++FUNC:_ZN10__gnu_norm15_List_node_base6unhookEv@@GLIBCXX_3.4 ++FUNC:_ZN10__gnu_norm15_List_node_base7reverseEv@@GLIBCXX_3.4 ++FUNC:_ZN10__gnu_norm15_List_node_base8transferEPS0_S1_@@GLIBCXX_3.4 ++FUNC:_ZN11__gnu_debug19_Safe_iterator_base12_M_get_mutexEv@@GLIBCXX_3.4.9 ++FUNC:_ZN11__gnu_debug19_Safe_iterator_base16_M_attach_singleEPNS_19_Safe_sequence_baseEb@@GLIBCXX_3.4.9 ++FUNC:_ZN11__gnu_debug19_Safe_iterator_base16_M_detach_singleEv@@GLIBCXX_3.4.9 ++FUNC:_ZN11__gnu_debug19_Safe_iterator_base9_M_attachEPNS_19_Safe_sequence_baseEb@@GLIBCXX_3.4 ++FUNC:_ZN11__gnu_debug19_Safe_iterator_base9_M_detachEv@@GLIBCXX_3.4 ++FUNC:_ZN11__gnu_debug19_Safe_sequence_base12_M_get_mutexEv@@GLIBCXX_3.4.9 ++FUNC:_ZN11__gnu_debug19_Safe_sequence_base13_M_detach_allEv@@GLIBCXX_3.4 ++FUNC:_ZN11__gnu_debug19_Safe_sequence_base18_M_detach_singularEv@@GLIBCXX_3.4 ++FUNC:_ZN11__gnu_debug19_Safe_sequence_base22_M_revalidate_singularEv@@GLIBCXX_3.4 ++FUNC:_ZN11__gnu_debug19_Safe_sequence_base7_M_swapERS0_@@GLIBCXX_3.4 ++FUNC:_ZN11__gnu_debug25_Safe_local_iterator_base16_M_attach_singleEPNS_19_Safe_sequence_baseEb@@GLIBCXX_3.4.26 ++FUNC:_ZN11__gnu_debug25_Safe_local_iterator_base9_M_attachEPNS_19_Safe_sequence_baseEb@@GLIBCXX_3.4.17 ++FUNC:_ZN11__gnu_debug25_Safe_local_iterator_base9_M_detachEv@@GLIBCXX_3.4.17 ++FUNC:_ZN11__gnu_debug30_Safe_unordered_container_base13_M_detach_allEv@@GLIBCXX_3.4.17 ++FUNC:_ZN11__gnu_debug30_Safe_unordered_container_base7_M_swapERS0_@@GLIBCXX_3.4.17 ++FUNC:_ZN14__gnu_parallel9_Settings3getEv@@GLIBCXX_3.4.10 ++FUNC:_ZN14__gnu_parallel9_Settings3setERS0_@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx12__atomic_addEPVii@@GLIBCXX_3.4 ++FUNC:_ZN9__gnu_cxx17__pool_alloc_base12_M_get_mutexEv@@GLIBCXX_3.4.2 ++FUNC:_ZN9__gnu_cxx17__pool_alloc_base16_M_get_free_listEm@@GLIBCXX_3.4.2 ++FUNC:_ZN9__gnu_cxx17__pool_alloc_base9_M_refillEm@@GLIBCXX_3.4.2 ++FUNC:_ZN9__gnu_cxx18__exchange_and_addEPVii@@GLIBCXX_3.4 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEE4fileEv@@GLIBCXX_3.4.2 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEE4syncEv@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEE5uflowEv@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEE6xsgetnEPcl@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEE6xsputnEPKcl@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEE7seekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEE7seekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEE8overflowEi@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEE9pbackfailEi@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEE9underflowEv@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEEC1EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEEC1EP8_IO_FILE@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEEC2EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEEC2EP8_IO_FILE@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEED0Ev@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEED1Ev@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEEaSEOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEE4fileEv@@GLIBCXX_3.4.2 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEE4syncEv@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEE5uflowEv@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEE6xsgetnEPwl@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEE6xsputnEPKwl@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEE7seekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEE7seekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEE8overflowEj@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEE9pbackfailEj@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEE9underflowEv@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEEC1EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEEC1EP8_IO_FILE@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEEC2EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEEC2EP8_IO_FILE@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4.10 ++FUNC:_ZN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEEaSEOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZN9__gnu_cxx27__verbose_terminate_handlerEv@@CXXABI_1.3 ++FUNC:_ZN9__gnu_cxx6__poolILb0EE10_M_destroyEv@@GLIBCXX_3.4.4 ++FUNC:_ZN9__gnu_cxx6__poolILb0EE13_M_initializeEv@@GLIBCXX_3.4.4 ++FUNC:_ZN9__gnu_cxx6__poolILb0EE16_M_reclaim_blockEPcm@@GLIBCXX_3.4.4 ++FUNC:_ZN9__gnu_cxx6__poolILb0EE16_M_reserve_blockEmm@@GLIBCXX_3.4.4 ++FUNC:_ZN9__gnu_cxx6__poolILb1EE10_M_destroyEv@@GLIBCXX_3.4.4 ++FUNC:_ZN9__gnu_cxx6__poolILb1EE13_M_initializeEPFvPvE@@GLIBCXX_3.4.4 ++FUNC:_ZN9__gnu_cxx6__poolILb1EE13_M_initializeEv@@GLIBCXX_3.4.6 ++FUNC:_ZN9__gnu_cxx6__poolILb1EE16_M_get_thread_idEv@@GLIBCXX_3.4.4 ++FUNC:_ZN9__gnu_cxx6__poolILb1EE16_M_reclaim_blockEPcm@@GLIBCXX_3.4.4 ++FUNC:_ZN9__gnu_cxx6__poolILb1EE16_M_reserve_blockEmm@@GLIBCXX_3.4.4 ++FUNC:_ZN9__gnu_cxx6__poolILb1EE21_M_destroy_thread_keyEPv@@GLIBCXX_3.4.4 ++FUNC:_ZN9__gnu_cxx9__freeresEv@@CXXABI_1.3.10 ++FUNC:_ZN9__gnu_cxx9free_list6_M_getEm@@GLIBCXX_3.4.4 ++FUNC:_ZN9__gnu_cxx9free_list8_M_clearEv@@GLIBCXX_3.4.4 ++FUNC:_ZNK10__cxxabiv117__class_type_info10__do_catchEPKSt9type_infoPPvj@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv117__class_type_info11__do_upcastEPKS0_PKvRNS0_15__upcast_resultE@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv117__class_type_info11__do_upcastEPKS0_PPv@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv117__class_type_info12__do_dyncastElNS0_10__sub_kindEPKS0_PKvS3_S5_RNS0_16__dyncast_resultE@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv117__class_type_info20__do_find_public_srcElPKvPKS0_S2_@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv117__pbase_type_info10__do_catchEPKSt9type_infoPPvj@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv117__pbase_type_info15__pointer_catchEPKS0_PPvj@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv119__pointer_type_info14__is_pointer_pEv@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv119__pointer_type_info15__pointer_catchEPKNS_17__pbase_type_infoEPPvj@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv120__function_type_info15__is_function_pEv@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv120__si_class_type_info11__do_upcastEPKNS_17__class_type_infoEPKvRNS1_15__upcast_resultE@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv120__si_class_type_info12__do_dyncastElNS_17__class_type_info10__sub_kindEPKS1_PKvS4_S6_RNS1_16__dyncast_resultE@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv120__si_class_type_info20__do_find_public_srcElPKvPKNS_17__class_type_infoES2_@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv121__vmi_class_type_info11__do_upcastEPKNS_17__class_type_infoEPKvRNS1_15__upcast_resultE@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv121__vmi_class_type_info12__do_dyncastElNS_17__class_type_info10__sub_kindEPKS1_PKvS4_S6_RNS1_16__dyncast_resultE@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv121__vmi_class_type_info20__do_find_public_srcElPKvPKNS_17__class_type_infoES2_@@CXXABI_1.3 ++FUNC:_ZNK10__cxxabiv129__pointer_to_member_type_info15__pointer_catchEPKNS_17__pbase_type_infoEPPvj@@CXXABI_1.3 ++FUNC:_ZNK11__gnu_debug16_Error_formatter10_M_messageENS_13_Debug_msg_idE@@GLIBCXX_3.4 ++FUNC:_ZNK11__gnu_debug16_Error_formatter10_Parameter14_M_print_fieldEPKS0_PKc@@GLIBCXX_3.4 ++FUNC:_ZNK11__gnu_debug16_Error_formatter10_Parameter20_M_print_descriptionEPKS0_@@GLIBCXX_3.4 ++FUNC:_ZNK11__gnu_debug16_Error_formatter13_M_print_wordEPKc@@GLIBCXX_3.4 ++FUNC:_ZNK11__gnu_debug16_Error_formatter15_M_print_stringEPKc@@GLIBCXX_3.4 ++FUNC:_ZNK11__gnu_debug16_Error_formatter17_M_get_max_lengthEv@@GLIBCXX_3.4.10 ++FUNC:_ZNK11__gnu_debug16_Error_formatter8_M_errorEv@@GLIBCXX_3.4 ++FUNC:_ZNK11__gnu_debug19_Safe_iterator_base11_M_singularEv@@GLIBCXX_3.4 ++FUNC:_ZNK11__gnu_debug19_Safe_iterator_base14_M_can_compareERKS0_@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE11_M_disjunctEPKw@@GLIBCXX_3.4.5 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE11_M_disjunctEPKw@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE12find_last_ofEPKwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE12find_last_ofEPKwmm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE12find_last_ofERKS2_m@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE12find_last_ofEwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE13find_first_ofEPKwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE13find_first_ofEPKwmm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE13find_first_ofERKS2_m@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE13find_first_ofEwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE13get_allocatorEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE15_M_check_lengthEmmPKc@@GLIBCXX_3.4.5 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE15_M_check_lengthEmmPKc@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE16find_last_not_ofEPKwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE16find_last_not_ofEPKwmm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE16find_last_not_ofERKS2_m@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE16find_last_not_ofEwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE17find_first_not_ofEPKwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE17find_first_not_ofEPKwmm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE17find_first_not_ofERKS2_m@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE17find_first_not_ofEwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE2atEm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE3endEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4_Rep12_M_is_leakedEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4_Rep12_M_is_sharedEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4backEv@@GLIBCXX_3.4.15 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4cendEv@@GLIBCXX_3.4.14 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4copyEPwmm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4dataEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4findEPKwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4findEPKwmm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4findERKS2_m@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4findEwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4rendEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE4sizeEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE5beginEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE5c_strEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE5crendEv@@GLIBCXX_3.4.14 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE5emptyEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE5frontEv@@GLIBCXX_3.4.15 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE5rfindEPKwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE5rfindEPKwmm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE5rfindERKS2_m@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE5rfindEwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE6_M_repEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE6cbeginEv@@GLIBCXX_3.4.14 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE6lengthEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE6rbeginEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE6substrEmm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE7_M_dataEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE7_M_iendEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE7compareEPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE7compareERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE7compareEmmPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE7compareEmmPKwm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE7compareEmmRKS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE7compareEmmRKS2_mm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE7crbeginEv@@GLIBCXX_3.4.14 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE8_M_checkEmPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE8_M_limitEmm@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE8capacityEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE8max_sizeEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEE9_M_ibeginEv@@GLIBCXX_3.4 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEEcvSt17basic_string_viewIwS0_EEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSbIwSt11char_traitsIwESaIwEEixEm@@GLIBCXX_3.4 ++FUNC:_ZNKSi6gcountEv@@GLIBCXX_3.4 ++FUNC:_ZNKSi6sentrycvbEv@@GLIBCXX_3.4 ++FUNC:_ZNKSo6sentrycvbEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs11_M_disjunctEPKc@@GLIBCXX_3.4.5 ++FUNC:_ZNKSs11_M_disjunctEPKc@GLIBCXX_3.4 ++FUNC:_ZNKSs12find_last_ofEPKcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs12find_last_ofEPKcmm@@GLIBCXX_3.4 ++FUNC:_ZNKSs12find_last_ofERKSsm@@GLIBCXX_3.4 ++FUNC:_ZNKSs12find_last_ofEcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs13find_first_ofEPKcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs13find_first_ofEPKcmm@@GLIBCXX_3.4 ++FUNC:_ZNKSs13find_first_ofERKSsm@@GLIBCXX_3.4 ++FUNC:_ZNKSs13find_first_ofEcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs13get_allocatorEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs15_M_check_lengthEmmPKc@@GLIBCXX_3.4.5 ++FUNC:_ZNKSs15_M_check_lengthEmmPKc@GLIBCXX_3.4 ++FUNC:_ZNKSs16find_last_not_ofEPKcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs16find_last_not_ofEPKcmm@@GLIBCXX_3.4 ++FUNC:_ZNKSs16find_last_not_ofERKSsm@@GLIBCXX_3.4 ++FUNC:_ZNKSs16find_last_not_ofEcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs17find_first_not_ofEPKcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs17find_first_not_ofEPKcmm@@GLIBCXX_3.4 ++FUNC:_ZNKSs17find_first_not_ofERKSsm@@GLIBCXX_3.4 ++FUNC:_ZNKSs17find_first_not_ofEcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs2atEm@@GLIBCXX_3.4 ++FUNC:_ZNKSs3endEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs4_Rep12_M_is_leakedEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs4_Rep12_M_is_sharedEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs4backEv@@GLIBCXX_3.4.15 ++FUNC:_ZNKSs4cendEv@@GLIBCXX_3.4.14 ++FUNC:_ZNKSs4copyEPcmm@@GLIBCXX_3.4 ++FUNC:_ZNKSs4dataEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs4findEPKcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs4findEPKcmm@@GLIBCXX_3.4 ++FUNC:_ZNKSs4findERKSsm@@GLIBCXX_3.4 ++FUNC:_ZNKSs4findEcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs4rendEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs4sizeEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs5beginEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs5c_strEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs5crendEv@@GLIBCXX_3.4.14 ++FUNC:_ZNKSs5emptyEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs5frontEv@@GLIBCXX_3.4.15 ++FUNC:_ZNKSs5rfindEPKcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs5rfindEPKcmm@@GLIBCXX_3.4 ++FUNC:_ZNKSs5rfindERKSsm@@GLIBCXX_3.4 ++FUNC:_ZNKSs5rfindEcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs6_M_repEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs6cbeginEv@@GLIBCXX_3.4.14 ++FUNC:_ZNKSs6lengthEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs6rbeginEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs6substrEmm@@GLIBCXX_3.4 ++FUNC:_ZNKSs7_M_dataEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs7_M_iendEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs7compareEPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSs7compareERKSs@@GLIBCXX_3.4 ++FUNC:_ZNKSs7compareEmmPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSs7compareEmmPKcm@@GLIBCXX_3.4 ++FUNC:_ZNKSs7compareEmmRKSs@@GLIBCXX_3.4 ++FUNC:_ZNKSs7compareEmmRKSsmm@@GLIBCXX_3.4 ++FUNC:_ZNKSs7crbeginEv@@GLIBCXX_3.4.14 ++FUNC:_ZNKSs8_M_checkEmPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSs8_M_limitEmm@@GLIBCXX_3.4 ++FUNC:_ZNKSs8capacityEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs8max_sizeEv@@GLIBCXX_3.4 ++FUNC:_ZNKSs9_M_ibeginEv@@GLIBCXX_3.4 ++FUNC:_ZNKSscvSt17basic_string_viewIcSt11char_traitsIcEEEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSsixEm@@GLIBCXX_3.4 ++FUNC:_ZNKSt10bad_typeid4whatEv@@GLIBCXX_3.4.9 ++FUNC:_ZNKSt10error_code23default_error_conditionEv@@GLIBCXX_3.4.11 ++FUNC:_ZNKSt10filesystem16filesystem_error4whatEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem16filesystem_error5path1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem16filesystem_error5path2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem18directory_iteratordeEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem28recursive_directory_iterator17recursion_pendingEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem28recursive_directory_iterator5depthEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem28recursive_directory_iterator7optionsEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem28recursive_directory_iteratordeEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path11parent_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path12has_filenameEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path13has_root_nameEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path13has_root_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path13relative_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path14root_directoryEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path15has_parent_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path16lexically_normalEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path17_M_find_extensionEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path17has_relative_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path18has_root_directoryEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path18lexically_relativeERKS0_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path19lexically_proximateERKS0_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path5_List13_Impl_deleterclEPNS1_5_ImplE@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path5_List3endEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path5_List5beginEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path7compareERKS0_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path7compareESt17basic_string_viewIcSt11char_traitsIcEE@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path9root_nameEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem4path9root_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx1116filesystem_error4whatEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx1116filesystem_error5path1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx1116filesystem_error5path2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx1118directory_iteratordeEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx1128recursive_directory_iterator17recursion_pendingEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx1128recursive_directory_iterator5depthEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx1128recursive_directory_iterator7optionsEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx1128recursive_directory_iteratordeEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path11parent_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path12has_filenameEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path13has_root_nameEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path13has_root_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path13relative_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path14root_directoryEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path15has_parent_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path16lexically_normalEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path17_M_find_extensionEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path17has_relative_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path18has_root_directoryEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path18lexically_relativeERKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path19lexically_proximateERKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path5_List13_Impl_deleterclEPNS2_5_ImplE@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path5_List3endEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path5_List5beginEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path7compareERKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path7compareESt17basic_string_viewIcSt11char_traitsIcEE@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path9root_nameEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10filesystem7__cxx114path9root_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt10istrstream5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10lock_error4whatEv@@GLIBCXX_3.4.11 ++FUNC:_ZNKSt10moneypunctIcLb0EE10neg_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE10pos_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE11curr_symbolEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE11do_groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE11frac_digitsEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE13decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE13do_neg_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE13do_pos_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE13negative_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE13positive_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE13thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE14do_curr_symbolEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE14do_frac_digitsEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE16do_decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE16do_negative_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE16do_positive_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE16do_thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb0EE8groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE10neg_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE10pos_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE11curr_symbolEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE11do_groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE11frac_digitsEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE13decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE13do_neg_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE13do_pos_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE13negative_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE13positive_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE13thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE14do_curr_symbolEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE14do_frac_digitsEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE16do_decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE16do_negative_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE16do_positive_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE16do_thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIcLb1EE8groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE10neg_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE10pos_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE11curr_symbolEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE11do_groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE11frac_digitsEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE13decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE13do_neg_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE13do_pos_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE13negative_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE13positive_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE13thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE14do_curr_symbolEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE14do_frac_digitsEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE16do_decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE16do_negative_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE16do_positive_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE16do_thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb0EE8groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE10neg_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE10pos_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE11curr_symbolEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE11do_groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE11frac_digitsEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE13decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE13do_neg_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE13do_pos_formatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE13negative_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE13positive_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE13thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE14do_curr_symbolEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE14do_frac_digitsEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE16do_decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE16do_negative_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE16do_positive_signEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE16do_thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10moneypunctIwLb1EE8groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10ostrstream5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt10ostrstream6pcountEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIcE15_M_am_pm_formatEPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIcE15_M_date_formatsEPPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIcE15_M_time_formatsEPPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIcE19_M_days_abbreviatedEPPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIcE20_M_date_time_formatsEPPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIcE21_M_months_abbreviatedEPPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIcE6_M_putEPcmPKcPK2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIcE7_M_daysEPPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIcE8_M_am_pmEPPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIcE9_M_monthsEPPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIwE15_M_am_pm_formatEPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIwE15_M_date_formatsEPPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIwE15_M_time_formatsEPPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIwE19_M_days_abbreviatedEPPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIwE20_M_date_time_formatsEPPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIwE21_M_months_abbreviatedEPPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIwE6_M_putEPwmPKwPK2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIwE7_M_daysEPPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIwE8_M_am_pmEPPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt11__timepunctIwE9_M_monthsEPPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt11logic_error4whatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt12__basic_fileIcE7is_openEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt12bad_weak_ptr4whatEv@@GLIBCXX_3.4.15 ++FUNC:_ZNKSt12future_error4whatEv@@GLIBCXX_3.4.14 ++FUNC:_ZNKSt12strstreambuf6pcountEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt13bad_exception4whatEv@@GLIBCXX_3.4.9 ++FUNC:_ZNKSt13basic_filebufIcSt11char_traitsIcEE7is_openEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt13basic_filebufIwSt11char_traitsIwEE7is_openEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt13basic_fstreamIcSt11char_traitsIcEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt13basic_fstreamIcSt11char_traitsIcEE7is_openEv@@GLIBCXX_3.4.5 ++FUNC:_ZNKSt13basic_fstreamIcSt11char_traitsIcEE7is_openEv@GLIBCXX_3.4 ++FUNC:_ZNKSt13basic_fstreamIwSt11char_traitsIwEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt13basic_fstreamIwSt11char_traitsIwEE7is_openEv@@GLIBCXX_3.4.5 ++FUNC:_ZNKSt13basic_fstreamIwSt11char_traitsIwEE7is_openEv@GLIBCXX_3.4 ++FUNC:_ZNKSt13basic_istreamIwSt11char_traitsIwEE6gcountEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt13basic_istreamIwSt11char_traitsIwEE6sentrycvbEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt13basic_ostreamIwSt11char_traitsIwEE6sentrycvbEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt13random_device13_M_getentropyEv@@GLIBCXX_3.4.25 ++FUNC:_ZNKSt13runtime_error4whatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt14basic_ifstreamIcSt11char_traitsIcEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt14basic_ifstreamIcSt11char_traitsIcEE7is_openEv@@GLIBCXX_3.4.5 ++FUNC:_ZNKSt14basic_ifstreamIcSt11char_traitsIcEE7is_openEv@GLIBCXX_3.4 ++FUNC:_ZNKSt14basic_ifstreamIwSt11char_traitsIwEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt14basic_ifstreamIwSt11char_traitsIwEE7is_openEv@@GLIBCXX_3.4.5 ++FUNC:_ZNKSt14basic_ifstreamIwSt11char_traitsIwEE7is_openEv@GLIBCXX_3.4 ++FUNC:_ZNKSt14basic_ofstreamIcSt11char_traitsIcEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt14basic_ofstreamIcSt11char_traitsIcEE7is_openEv@@GLIBCXX_3.4.5 ++FUNC:_ZNKSt14basic_ofstreamIcSt11char_traitsIcEE7is_openEv@GLIBCXX_3.4 ++FUNC:_ZNKSt14basic_ofstreamIwSt11char_traitsIwEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt14basic_ofstreamIwSt11char_traitsIwEE7is_openEv@@GLIBCXX_3.4.5 ++FUNC:_ZNKSt14basic_ofstreamIwSt11char_traitsIwEE7is_openEv@GLIBCXX_3.4 ++FUNC:_ZNKSt14error_category10equivalentERKSt10error_codei@@GLIBCXX_3.4.11 ++FUNC:_ZNKSt14error_category10equivalentEiRKSt15error_condition@@GLIBCXX_3.4.11 ++FUNC:_ZNKSt14error_category23default_error_conditionEi@@GLIBCXX_3.4.11 ++FUNC:_ZNKSt15__exception_ptr13exception_ptr20__cxa_exception_typeEv@@CXXABI_1.3.3 ++FUNC:_ZNKSt15__exception_ptr13exception_ptrcvMS0_FvvEEv@@CXXABI_1.3.3 ++FUNC:_ZNKSt15__exception_ptr13exception_ptrntEv@@CXXABI_1.3.3 ++FUNC:_ZNKSt15basic_streambufIcSt11char_traitsIcEE4gptrEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIcSt11char_traitsIcEE4pptrEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIcSt11char_traitsIcEE5ebackEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIcSt11char_traitsIcEE5egptrEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIcSt11char_traitsIcEE5epptrEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIcSt11char_traitsIcEE5pbaseEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIcSt11char_traitsIcEE6getlocEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIwSt11char_traitsIwEE4gptrEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIwSt11char_traitsIwEE4pptrEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIwSt11char_traitsIwEE5ebackEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIwSt11char_traitsIwEE5egptrEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIwSt11char_traitsIwEE5epptrEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIwSt11char_traitsIwEE5pbaseEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_streambufIwSt11char_traitsIwEE6getlocEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_stringbufIcSt11char_traitsIcESaIcEE3strEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt15basic_stringbufIwSt11char_traitsIwESaIwEE3strEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt16bad_array_length4whatEv@@CXXABI_1.3.8 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intIjEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intIlEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intImEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intItEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intIxEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intIyEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE16_M_extract_floatES4_S4_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRPv@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRb@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRf@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRj@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRl@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRm@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRt@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRx@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRy@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRPv@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRb@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRf@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRj@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRl@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRm@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRt@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRx@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRy@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE8__do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intIjEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intIlEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intImEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intItEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intIxEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intIyEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE16_M_extract_floatES4_S4_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRPv@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRb@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRf@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRj@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRl@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRm@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRt@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRx@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateRy@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRPv@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRb@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRf@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRj@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRl@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRm@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRt@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRx@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRy@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE8__do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE12_M_group_intEPKcmcRSt8ios_basePcSA_Ri@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE13_M_insert_intIlEES4_S4_RSt8ios_basecT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE13_M_insert_intImEES4_S4_RSt8ios_basecT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE13_M_insert_intIxEES4_S4_RSt8ios_basecT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE13_M_insert_intIyEES4_S4_RSt8ios_basecT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE14_M_group_floatEPKcmcS7_PcS8_Ri@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE15_M_insert_floatIdEES4_S4_RSt8ios_baseccT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE15_M_insert_floatIgEES4_S4_RSt8ios_baseccT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_RSt8ios_basecPKv@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_RSt8ios_basecb@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_RSt8ios_basecd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_RSt8ios_basecg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_RSt8ios_basecl@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_RSt8ios_basecm@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_RSt8ios_basecx@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_RSt8ios_basecy@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6_M_padEclRSt8ios_basePcPKcRi@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_RSt8ios_basecPKv@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_RSt8ios_basecb@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_RSt8ios_basecd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_RSt8ios_basecg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_RSt8ios_basecl@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_RSt8ios_basecm@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_RSt8ios_basecx@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_RSt8ios_basecy@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE8__do_putES4_RSt8ios_basecd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE12_M_group_intEPKcmwRSt8ios_basePwSA_Ri@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE13_M_insert_intIlEES4_S4_RSt8ios_basewT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE13_M_insert_intImEES4_S4_RSt8ios_basewT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE13_M_insert_intIxEES4_S4_RSt8ios_basewT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE13_M_insert_intIyEES4_S4_RSt8ios_basewT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE14_M_group_floatEPKcmwPKwPwSA_Ri@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE15_M_insert_floatIdEES4_S4_RSt8ios_basewcT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE15_M_insert_floatIgEES4_S4_RSt8ios_basewcT_@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_RSt8ios_basewPKv@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_RSt8ios_basewb@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_RSt8ios_basewd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_RSt8ios_basewg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_RSt8ios_basewl@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_RSt8ios_basewm@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_RSt8ios_basewx@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_RSt8ios_basewy@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6_M_padEwlRSt8ios_basePwPKwRi@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_RSt8ios_basewPKv@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_RSt8ios_basewb@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_RSt8ios_basewd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_RSt8ios_basewg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_RSt8ios_basewl@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_RSt8ios_basewm@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_RSt8ios_basewx@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_RSt8ios_basewy@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE8__do_putES4_RSt8ios_basewd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE10_M_extractILb0EEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE10_M_extractILb1EEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE8__do_getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE10_M_extractILb0EEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE10_M_extractILb1EEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRSbIwS3_SaIwEE@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRSbIwS3_SaIwEE@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE8__do_getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_bRSt8ios_basecRKSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_bRSt8ios_basecg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_bRSt8ios_basecRKSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_bRSt8ios_basecg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE8__do_putES4_bRSt8ios_basecd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE9_M_insertILb0EEES4_S4_RSt8ios_basecRKSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE9_M_insertILb1EEES4_S4_RSt8ios_basecRKSs@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_bRSt8ios_basewRKSbIwS3_SaIwEE@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_bRSt8ios_basewg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_bRSt8ios_basewRKSbIwS3_SaIwEE@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_bRSt8ios_basewg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE8__do_putES4_bRSt8ios_basewd@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE9_M_insertILb0EEES4_S4_RSt8ios_basewRKSbIwS3_SaIwEE@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE9_M_insertILb1EEES4_S4_RSt8ios_basewRKSbIwS3_SaIwEE@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNKSt17bad_function_call4whatEv@@GLIBCXX_3.4.18 ++FUNC:_ZNKSt18basic_stringstreamIcSt11char_traitsIcESaIcEE3strEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt18basic_stringstreamIcSt11char_traitsIcESaIcEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt18basic_stringstreamIwSt11char_traitsIwESaIwEE3strEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt18basic_stringstreamIwSt11char_traitsIwESaIwEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDiE10do_unshiftER11__mbstate_tPcS3_RS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDiE11do_encodingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDiE13do_max_lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDiE16do_always_noconvEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDiE5do_inER11__mbstate_tPKcS4_RS4_PDiS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDiE6do_outER11__mbstate_tPKDiS4_RS4_PcS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDiE9do_lengthER11__mbstate_tPKcS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDsE10do_unshiftER11__mbstate_tPcS3_RS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDsE11do_encodingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDsE13do_max_lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDsE16do_always_noconvEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDsE5do_inER11__mbstate_tPKcS4_RS4_PDsS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDsE6do_outER11__mbstate_tPKDsS4_RS4_PcS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIDsE9do_lengthER11__mbstate_tPKcS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIwE10do_unshiftER11__mbstate_tPcS3_RS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIwE11do_encodingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIwE13do_max_lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIwE16do_always_noconvEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIwE5do_inER11__mbstate_tPKcS4_RS4_PwS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIwE6do_outER11__mbstate_tPKwS4_RS4_PcS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19__codecvt_utf8_baseIwE9do_lengthER11__mbstate_tPKcS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt19basic_istringstreamIcSt11char_traitsIcESaIcEE3strEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt19basic_istringstreamIcSt11char_traitsIcESaIcEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt19basic_istringstreamIwSt11char_traitsIwESaIwEE3strEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt19basic_istringstreamIwSt11char_traitsIwESaIwEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE3strEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt19basic_ostringstreamIwSt11char_traitsIwESaIwEE3strEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt19basic_ostringstreamIwSt11char_traitsIwESaIwEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDiE10do_unshiftER11__mbstate_tPcS3_RS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDiE11do_encodingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDiE13do_max_lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDiE16do_always_noconvEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDiE5do_inER11__mbstate_tPKcS4_RS4_PDiS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDiE6do_outER11__mbstate_tPKDiS4_RS4_PcS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDiE9do_lengthER11__mbstate_tPKcS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDsE10do_unshiftER11__mbstate_tPcS3_RS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDsE11do_encodingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDsE13do_max_lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDsE16do_always_noconvEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDsE5do_inER11__mbstate_tPKcS4_RS4_PDsS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDsE6do_outER11__mbstate_tPKDsS4_RS4_PcS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIDsE9do_lengthER11__mbstate_tPKcS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIwE10do_unshiftER11__mbstate_tPcS3_RS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIwE11do_encodingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIwE13do_max_lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIwE16do_always_noconvEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIwE5do_inER11__mbstate_tPKcS4_RS4_PwS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIwE6do_outER11__mbstate_tPKwS4_RS4_PcS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20__codecvt_utf16_baseIwE9do_lengthER11__mbstate_tPKcS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt20bad_array_new_length4whatEv@@CXXABI_1.3.8 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDiE10do_unshiftER11__mbstate_tPcS3_RS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDiE11do_encodingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDiE13do_max_lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDiE16do_always_noconvEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDiE5do_inER11__mbstate_tPKcS4_RS4_PDiS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDiE6do_outER11__mbstate_tPKDiS4_RS4_PcS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDiE9do_lengthER11__mbstate_tPKcS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDsE10do_unshiftER11__mbstate_tPcS3_RS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDsE11do_encodingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDsE13do_max_lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDsE16do_always_noconvEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDsE5do_inER11__mbstate_tPKcS4_RS4_PDsS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDsE6do_outER11__mbstate_tPKDsS4_RS4_PcS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIDsE9do_lengthER11__mbstate_tPKcS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIwE10do_unshiftER11__mbstate_tPcS3_RS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIwE11do_encodingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIwE13do_max_lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIwE16do_always_noconvEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIwE5do_inER11__mbstate_tPKcS4_RS4_PwS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIwE6do_outER11__mbstate_tPKwS4_RS4_PcS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt25__codecvt_utf8_utf16_baseIwE9do_lengthER11__mbstate_tPKcS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt3_V214error_category10_M_messageB5cxx11Ei@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt3_V214error_category10_M_messageEi@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt3_V214error_category10equivalentERKSt10error_codei@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt3_V214error_category10equivalentEiRKSt15error_condition@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt3_V214error_category23default_error_conditionEi@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt3tr14hashINSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEEclES6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt3tr14hashINSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEEEclES6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt3tr14hashIRKSbIwSt11char_traitsIwESaIwEEEclES6_@@GLIBCXX_3.4.10 ++FUNC:_ZNKSt3tr14hashIRKSsEclES2_@@GLIBCXX_3.4.10 ++FUNC:_ZNKSt3tr14hashISbIwSt11char_traitsIwESaIwEEEclES4_@@GLIBCXX_3.4.10 ++FUNC:_ZNKSt3tr14hashISsEclESs@@GLIBCXX_3.4.10 ++FUNC:_ZNKSt3tr14hashIeEclEe@@GLIBCXX_3.4.10 ++FUNC:_ZNKSt3tr14hashIgEclEg@@GLIBCXX_LDBL_3.4.10 ++FUNC:_ZNKSt4hashIRKSbIwSt11char_traitsIwESaIwEEEclES5_@@GLIBCXX_3.4.10 ++FUNC:_ZNKSt4hashIRKSsEclES1_@@GLIBCXX_3.4.10 ++FUNC:_ZNKSt4hashISbIwSt11char_traitsIwESaIwEEEclES3_@@GLIBCXX_3.4.10 ++FUNC:_ZNKSt4hashISsEclESs@@GLIBCXX_3.4.10 ++FUNC:_ZNKSt4hashISt10error_codeEclES0_@@GLIBCXX_3.4.11 ++FUNC:_ZNKSt4hashIeEclEe@@GLIBCXX_3.4.10 ++FUNC:_ZNKSt4hashIgEclEg@@GLIBCXX_LDBL_3.4.10 ++FUNC:_ZNKSt5ctypeIcE10do_tolowerEPcPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIcE10do_tolowerEc@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIcE10do_toupperEPcPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIcE10do_toupperEc@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIcE13_M_widen_initEv@@GLIBCXX_3.4.11 ++FUNC:_ZNKSt5ctypeIcE14_M_narrow_initEv@@GLIBCXX_3.4.11 ++FUNC:_ZNKSt5ctypeIcE8do_widenEPKcS2_Pc@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIcE8do_widenEc@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIcE9do_narrowEPKcS2_cPc@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIcE9do_narrowEcc@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE10do_scan_isEtPKwS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE10do_tolowerEPwPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE10do_tolowerEw@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE10do_toupperEPwPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE10do_toupperEw@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE11do_scan_notEtPKwS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE19_M_convert_to_wmaskEt@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE5do_isEPKwS2_Pt@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE5do_isEtw@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE8do_widenEPKcS2_Pw@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE8do_widenEc@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE9do_narrowEPKwS2_cPc@@GLIBCXX_3.4 ++FUNC:_ZNKSt5ctypeIwE9do_narrowEwc@@GLIBCXX_3.4 ++FUNC:_ZNKSt6locale2id5_M_idEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt6locale4nameB5cxx11Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt6locale4nameEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt6localeeqERKS_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE10neg_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE10pos_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE11curr_symbolEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE11do_groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE11frac_digitsEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE13decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE13do_neg_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE13do_pos_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE13negative_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE13positive_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE13thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE14do_curr_symbolEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE14do_frac_digitsEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE16do_decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE16do_negative_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE16do_positive_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE16do_thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb0EE8groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE10neg_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE10pos_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE11curr_symbolEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE11do_groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE11frac_digitsEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE13decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE13do_neg_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE13do_pos_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE13negative_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE13positive_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE13thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE14do_curr_symbolEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE14do_frac_digitsEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE16do_decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE16do_negative_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE16do_positive_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE16do_thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIcLb1EE8groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE10neg_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE10pos_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE11curr_symbolEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE11do_groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE11frac_digitsEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE13decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE13do_neg_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE13do_pos_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE13negative_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE13positive_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE13thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE14do_curr_symbolEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE14do_frac_digitsEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE16do_decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE16do_negative_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE16do_positive_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE16do_thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb0EE8groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE10neg_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE10pos_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE11curr_symbolEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE11do_groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE11frac_digitsEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE13decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE13do_neg_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE13do_pos_formatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE13negative_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE13positive_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE13thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE14do_curr_symbolEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE14do_frac_digitsEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE16do_decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE16do_negative_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE16do_positive_signEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE16do_thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1110moneypunctIwLb1EE8groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE11_M_disjunctEPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE11_M_is_localEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12find_last_ofEPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12find_last_ofEPKcmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12find_last_ofERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12find_last_ofEcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13_M_local_dataEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13find_first_ofEPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13find_first_ofEPKcmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13find_first_ofERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13find_first_ofEcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13get_allocatorEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE15_M_check_lengthEmmPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE16_M_get_allocatorEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE16find_last_not_ofEPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE16find_last_not_ofEPKcmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE16find_last_not_ofERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE16find_last_not_ofEcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE17find_first_not_ofEPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE17find_first_not_ofEPKcmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE17find_first_not_ofERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE17find_first_not_ofEcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE2atEm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE3endEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4backEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4cendEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4copyEPcmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4dataEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4findEPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4findEPKcmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4findERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4findEcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4rendEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4sizeEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5beginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5c_strEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5crendEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5emptyEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5frontEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5rfindEPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5rfindEPKcmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5rfindERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5rfindEcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6cbeginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6rbeginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6substrEmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7_M_dataEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7compareEPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7compareERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7compareEmmPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7compareEmmPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7compareEmmRKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7compareEmmRKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7crbeginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE8_M_checkEmPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE8_M_limitEmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE8capacityEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE8max_sizeEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEcvSt17basic_string_viewIcS2_EEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEixEm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE11_M_disjunctEPKw@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE11_M_is_localEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12find_last_ofEPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12find_last_ofEPKwmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12find_last_ofERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12find_last_ofEwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13_M_local_dataEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13find_first_ofEPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13find_first_ofEPKwmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13find_first_ofERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13find_first_ofEwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13get_allocatorEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE15_M_check_lengthEmmPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE16_M_get_allocatorEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE16find_last_not_ofEPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE16find_last_not_ofEPKwmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE16find_last_not_ofERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE16find_last_not_ofEwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE17find_first_not_ofEPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE17find_first_not_ofEPKwmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE17find_first_not_ofERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE17find_first_not_ofEwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE2atEm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE3endEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4backEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4cendEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4copyEPwmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4dataEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4findEPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4findEPKwmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4findERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4findEwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4rendEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4sizeEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5beginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5c_strEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5crendEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5emptyEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5frontEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5rfindEPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5rfindEPKwmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5rfindERKS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5rfindEwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6cbeginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6rbeginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6substrEmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7_M_dataEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7compareEPKw@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7compareERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7compareEmmPKw@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7compareEmmPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7compareEmmRKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7compareEmmRKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7crbeginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE8_M_checkEmPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE8_M_limitEmm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE8capacityEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE8max_sizeEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEcvSt17basic_string_viewIwS2_EEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEixEm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE3strEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE3strEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEE3strEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEE5rdbufEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEE3strEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEE5rdbufEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEE3strEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEE5rdbufEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEE3strEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEE5rdbufEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEE3strEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEE5rdbufEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEE3strEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEE5rdbufEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIcE10_M_compareEPKcS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIcE10do_compareEPKcS3_S3_S3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIcE12_M_transformEPcPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIcE12do_transformEPKcS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIcE4hashEPKcS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIcE7compareEPKcS3_S3_S3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIcE7do_hashEPKcS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIcE9transformEPKcS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIwE10_M_compareEPKwS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIwE10do_compareEPKwS3_S3_S3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIwE12_M_transformEPwPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIwE12do_transformEPKwS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIwE4hashEPKwS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIwE7compareEPKwS3_S3_S3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIwE7do_hashEPKwS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx117collateIwE9transformEPKwS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIcE18_M_convert_to_charERKNS_12basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIcE20_M_convert_from_charEPc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIcE3getEiiiRKNS_12basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIcE4openERKNS_12basic_stringIcSt11char_traitsIcESaIcEEERKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIcE4openERKNS_12basic_stringIcSt11char_traitsIcESaIcEEERKSt6localePKc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIcE5closeEi@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIcE6do_getEiiiRKNS_12basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIcE7do_openERKNS_12basic_stringIcSt11char_traitsIcESaIcEEERKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIcE8do_closeEi@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIwE18_M_convert_to_charERKNS_12basic_stringIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIwE20_M_convert_from_charEPc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIwE3getEiiiRKNS_12basic_stringIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIwE4openERKNS_12basic_stringIcSt11char_traitsIcESaIcEEERKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIwE4openERKNS_12basic_stringIcSt11char_traitsIcESaIcEEERKSt6localePKc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIwE5closeEi@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIwE6do_getEiiiRKNS_12basic_stringIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIwE7do_openERKNS_12basic_stringIcSt11char_traitsIcESaIcEEERKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118messagesIwE8do_closeEi@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIcE11do_groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIcE11do_truenameEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIcE12do_falsenameEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIcE13decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIcE13thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIcE16do_decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIcE16do_thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIcE8groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIcE8truenameEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIcE9falsenameEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIwE11do_groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIwE11do_truenameEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIwE12do_falsenameEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIwE13decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIwE13thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIwE16do_decimal_pointEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIwE16do_thousands_sepEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIwE8groupingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIwE8truenameEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118numpunctIwE9falsenameEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE10date_orderEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE11do_get_dateES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE11do_get_timeES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE11do_get_yearES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE11get_weekdayES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE13do_date_orderEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE13get_monthnameES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_numES4_S4_RiiimRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14do_get_weekdayES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE15_M_extract_nameES4_S4_RiPPKcmRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE16do_get_monthnameES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE21_M_extract_via_formatES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tmPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE24_M_extract_wday_or_monthES4_S4_RiPPKcmRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tmPKcSD_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tmcc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tmcc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE8get_dateES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE8get_timeES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE8get_yearES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE10date_orderEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE11do_get_dateES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE11do_get_timeES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE11do_get_yearES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE11get_weekdayES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE13do_date_orderEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE13get_monthnameES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_numES4_S4_RiiimRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14do_get_weekdayES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE15_M_extract_nameES4_S4_RiPPKwmRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE16do_get_monthnameES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE21_M_extract_via_formatES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tmPKw@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE24_M_extract_wday_or_monthES4_S4_RiPPKwmRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tmPKwSD_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tmcc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tmcc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE8get_dateES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE8get_timeES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE8get_yearES4_S4_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE10_M_extractILb0EEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRNS_12basic_stringIcS3_SaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE10_M_extractILb1EEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRNS_12basic_stringIcS3_SaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRNS_12basic_stringIcS3_SaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRNS_12basic_stringIcS3_SaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE10_M_extractILb0EEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRNS_12basic_stringIcS2_IcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE10_M_extractILb1EEES4_S4_S4_RSt8ios_baseRSt12_Ios_IostateRNS_12basic_stringIcS2_IcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRNS_12basic_stringIwS3_SaIwEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRNS_12basic_stringIwS3_SaIwEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES4_S4_bRSt8ios_baseRSt12_Ios_IostateRg@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_bRSt8ios_basecRKNS_12basic_stringIcS3_SaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES4_bRSt8ios_basecg@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_bRSt8ios_basecRKNS_12basic_stringIcS3_SaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES4_bRSt8ios_basecg@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE9_M_insertILb0EEES4_S4_RSt8ios_basecRKNS_12basic_stringIcS3_SaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE9_M_insertILb1EEES4_S4_RSt8ios_basecRKNS_12basic_stringIcS3_SaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_bRSt8ios_basewRKNS_12basic_stringIwS3_SaIwEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES4_bRSt8ios_basewg@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_bRSt8ios_basewRKNS_12basic_stringIwS3_SaIwEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES4_bRSt8ios_basewg@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE9_M_insertILb0EEES4_S4_RSt8ios_basewRKNS_12basic_stringIwS3_SaIwEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE9_M_insertILb1EEES4_S4_RSt8ios_basewRKNS_12basic_stringIwS3_SaIwEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDiDu11__mbstate_tE10do_unshiftERS0_PDuS3_RS3_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDiDu11__mbstate_tE11do_encodingEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDiDu11__mbstate_tE13do_max_lengthEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDiDu11__mbstate_tE16do_always_noconvEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDiDu11__mbstate_tE5do_inERS0_PKDuS4_RS4_PDiS6_RS6_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDiDu11__mbstate_tE6do_outERS0_PKDiS4_RS4_PDuS6_RS6_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDiDu11__mbstate_tE9do_lengthERS0_PKDuS4_m@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDic11__mbstate_tE10do_unshiftERS0_PcS3_RS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDic11__mbstate_tE11do_encodingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDic11__mbstate_tE13do_max_lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDic11__mbstate_tE16do_always_noconvEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDic11__mbstate_tE5do_inERS0_PKcS4_RS4_PDiS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDic11__mbstate_tE6do_outERS0_PKDiS4_RS4_PcS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDic11__mbstate_tE9do_lengthERS0_PKcS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDsDu11__mbstate_tE10do_unshiftERS0_PDuS3_RS3_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDsDu11__mbstate_tE11do_encodingEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDsDu11__mbstate_tE13do_max_lengthEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDsDu11__mbstate_tE16do_always_noconvEv@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDsDu11__mbstate_tE5do_inERS0_PKDuS4_RS4_PDsS6_RS6_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDsDu11__mbstate_tE6do_outERS0_PKDsS4_RS4_PDuS6_RS6_@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDsDu11__mbstate_tE9do_lengthERS0_PKDuS4_m@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt7codecvtIDsc11__mbstate_tE10do_unshiftERS0_PcS3_RS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDsc11__mbstate_tE11do_encodingEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDsc11__mbstate_tE13do_max_lengthEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDsc11__mbstate_tE16do_always_noconvEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDsc11__mbstate_tE5do_inERS0_PKcS4_RS4_PDsS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDsc11__mbstate_tE6do_outERS0_PKDsS4_RS4_PcS6_RS6_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIDsc11__mbstate_tE9do_lengthERS0_PKcS4_m@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt7codecvtIcc11__mbstate_tE10do_unshiftERS0_PcS3_RS3_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIcc11__mbstate_tE11do_encodingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIcc11__mbstate_tE13do_max_lengthEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIcc11__mbstate_tE16do_always_noconvEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIcc11__mbstate_tE5do_inERS0_PKcS4_RS4_PcS6_RS6_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIcc11__mbstate_tE6do_outERS0_PKcS4_RS4_PcS6_RS6_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIcc11__mbstate_tE9do_lengthERS0_PKcS4_m@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIwc11__mbstate_tE10do_unshiftERS0_PcS3_RS3_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIwc11__mbstate_tE11do_encodingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIwc11__mbstate_tE13do_max_lengthEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIwc11__mbstate_tE16do_always_noconvEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIwc11__mbstate_tE5do_inERS0_PKcS4_RS4_PwS6_RS6_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIwc11__mbstate_tE6do_outERS0_PKwS4_RS4_PcS6_RS6_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7codecvtIwc11__mbstate_tE9do_lengthERS0_PKcS4_m@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIcE10_M_compareEPKcS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIcE10do_compareEPKcS2_S2_S2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIcE12_M_transformEPcPKcm@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIcE12do_transformEPKcS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIcE4hashEPKcS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIcE7compareEPKcS2_S2_S2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIcE7do_hashEPKcS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIcE9transformEPKcS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIwE10_M_compareEPKwS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIwE10do_compareEPKwS2_S2_S2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIwE12_M_transformEPwPKwm@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIwE12do_transformEPKwS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIwE4hashEPKwS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIwE7compareEPKwS2_S2_S2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIwE7do_hashEPKwS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7collateIwE9transformEPKwS2_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intIjEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intIlEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intImEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intItEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intIxEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_intIyEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE16_M_extract_floatES3_S3_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRPv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRb@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRe@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRf@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRj@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRl@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRm@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRt@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRx@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRy@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRPv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRb@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRe@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRf@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRj@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRl@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRm@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRt@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRx@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRy@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intIjEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intIlEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intImEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intItEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intIxEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_intIyEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE16_M_extract_floatES3_S3_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRPv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRb@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRe@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRf@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRj@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRl@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRm@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRt@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRx@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateRy@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRPv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRb@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRd@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRe@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRf@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRj@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRl@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRm@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRt@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRx@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateRy@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE12_M_group_intEPKcmcRSt8ios_basePcS9_Ri@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE13_M_insert_intIlEES3_S3_RSt8ios_basecT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE13_M_insert_intImEES3_S3_RSt8ios_basecT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE13_M_insert_intIxEES3_S3_RSt8ios_basecT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE13_M_insert_intIyEES3_S3_RSt8ios_basecT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE14_M_group_floatEPKcmcS6_PcS7_Ri@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE15_M_insert_floatIdEES3_S3_RSt8ios_baseccT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE15_M_insert_floatIeEES3_S3_RSt8ios_baseccT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_RSt8ios_basecPKv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_RSt8ios_basecb@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_RSt8ios_basecd@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_RSt8ios_basece@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_RSt8ios_basecl@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_RSt8ios_basecm@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_RSt8ios_basecx@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_RSt8ios_basecy@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6_M_padEclRSt8ios_basePcPKcRi@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecPKv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecb@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecd@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basece@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecl@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecm@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecx@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecy@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE12_M_group_intEPKcmwRSt8ios_basePwS9_Ri@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE13_M_insert_intIlEES3_S3_RSt8ios_basewT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE13_M_insert_intImEES3_S3_RSt8ios_basewT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE13_M_insert_intIxEES3_S3_RSt8ios_basewT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE13_M_insert_intIyEES3_S3_RSt8ios_basewT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE14_M_group_floatEPKcmwPKwPwS9_Ri@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE15_M_insert_floatIdEES3_S3_RSt8ios_basewcT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE15_M_insert_floatIeEES3_S3_RSt8ios_basewcT_@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_RSt8ios_basewPKv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_RSt8ios_basewb@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_RSt8ios_basewd@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_RSt8ios_basewe@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_RSt8ios_basewl@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_RSt8ios_basewm@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_RSt8ios_basewx@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_RSt8ios_basewy@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6_M_padEwlRSt8ios_basePwPKwRi@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewPKv@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewb@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewd@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewe@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewl@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewm@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewx@@GLIBCXX_3.4 ++FUNC:_ZNKSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewy@@GLIBCXX_3.4 ++FUNC:_ZNKSt8__detail20_Prime_rehash_policy11_M_next_bktEm@@GLIBCXX_3.4.18 ++FUNC:_ZNKSt8__detail20_Prime_rehash_policy14_M_need_rehashEmmm@@GLIBCXX_3.4.18 ++FUNC:_ZNKSt8bad_cast4whatEv@@GLIBCXX_3.4.9 ++FUNC:_ZNKSt8ios_base7failure4whatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8ios_base7failureB5cxx114whatEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt8messagesIcE18_M_convert_to_charERKSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIcE20_M_convert_from_charEPc@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIcE3getEiiiRKSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIcE4openERKSsRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIcE4openERKSsRKSt6localePKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIcE5closeEi@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIcE6do_getEiiiRKSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIcE7do_openERKSsRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIcE8do_closeEi@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIwE18_M_convert_to_charERKSbIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIwE20_M_convert_from_charEPc@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIwE3getEiiiRKSbIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIwE4openERKSsRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIwE4openERKSsRKSt6localePKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIwE5closeEi@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIwE6do_getEiiiRKSbIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIwE7do_openERKSsRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNKSt8messagesIwE8do_closeEi@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIcE11do_groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIcE11do_truenameEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIcE12do_falsenameEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIcE13decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIcE13thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIcE16do_decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIcE16do_thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIcE8groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIcE8truenameEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIcE9falsenameEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIwE11do_groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIwE11do_truenameEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIwE12do_falsenameEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIwE13decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIwE13thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIwE16do_decimal_pointEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIwE16do_thousands_sepEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIwE8groupingEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIwE8truenameEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8numpunctIwE9falsenameEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE10date_orderEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE11do_get_dateES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE11do_get_timeES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE11do_get_yearES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE11get_weekdayES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE13do_date_orderEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE13get_monthnameES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14_M_extract_numES3_S3_RiiimRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE14do_get_weekdayES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE15_M_extract_nameES3_S3_RiPPKcmRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE16do_get_monthnameES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE21_M_extract_via_formatES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tmPKc@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE24_M_extract_wday_or_monthES3_S3_RiPPKcmRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4.14 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tmPKcSC_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tmcc@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tmcc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE8get_dateES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE8get_timeES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE8get_yearES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE10date_orderEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE11do_get_dateES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE11do_get_timeES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE11do_get_yearES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE11get_weekdayES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE13do_date_orderEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE13get_monthnameES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14_M_extract_numES3_S3_RiiimRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE14do_get_weekdayES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE15_M_extract_nameES3_S3_RiPPKwmRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE16do_get_monthnameES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE21_M_extract_via_formatES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tmPKw@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE24_M_extract_wday_or_monthES3_S3_RiPPKwmRSt8ios_baseRSt12_Ios_Iostate@@GLIBCXX_3.4.14 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tmPKwSC_@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tmcc@@GLIBCXX_3.4.26 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tmcc@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE8get_dateES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE8get_timeES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE8get_yearES3_S3_RSt8ios_baseRSt12_Ios_IostateP2tm@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_RSt8ios_basecPK2tmPKcSB_@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_RSt8ios_basecPK2tmcc@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_RSt8ios_basecPK2tmcc@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_RSt8ios_basewPK2tmPKwSB_@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_RSt8ios_basewPK2tmcc@@GLIBCXX_3.4 ++FUNC:_ZNKSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_RSt8ios_basewPK2tmcc@@GLIBCXX_3.4 ++FUNC:_ZNKSt8valarrayImE4sizeEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9bad_alloc4whatEv@@GLIBCXX_3.4.9 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEE10exceptionsEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEE3badEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEE3eofEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEE3tieEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEE4failEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEE4fillEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEE4goodEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEE5widenEc@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEE6narrowEcc@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEE7rdstateEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEEcvPvEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEEcvbEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt9basic_iosIcSt11char_traitsIcEEntEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEE10exceptionsEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEE3badEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEE3eofEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEE3tieEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEE4failEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEE4fillEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEE4goodEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEE5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEE5widenEc@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEE6narrowEwc@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEE7rdstateEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEEcvPvEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEEcvbEv@@GLIBCXX_3.4.21 ++FUNC:_ZNKSt9basic_iosIwSt11char_traitsIwEEntEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9exception4whatEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE10_M_extractILb0EEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE10_M_extractILb1EEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_bRSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE3getES3_S3_bRSt8ios_baseRSt12_Ios_IostateRe@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_bRSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE6do_getES3_S3_bRSt8ios_baseRSt12_Ios_IostateRe@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE10_M_extractILb0EEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE10_M_extractILb1EEES3_S3_S3_RSt8ios_baseRSt12_Ios_IostateRSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_bRSt8ios_baseRSt12_Ios_IostateRSbIwS2_SaIwEE@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE3getES3_S3_bRSt8ios_baseRSt12_Ios_IostateRe@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_bRSt8ios_baseRSt12_Ios_IostateRSbIwS2_SaIwEE@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE6do_getES3_S3_bRSt8ios_baseRSt12_Ios_IostateRe@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_bRSt8ios_basecRKSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE3putES3_bRSt8ios_basece@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_bRSt8ios_basecRKSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE6do_putES3_bRSt8ios_basece@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE9_M_insertILb0EEES3_S3_RSt8ios_basecRKSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE9_M_insertILb1EEES3_S3_RSt8ios_basecRKSs@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_bRSt8ios_basewRKSbIwS2_SaIwEE@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE3putES3_bRSt8ios_basewe@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_bRSt8ios_basewRKSbIwS2_SaIwEE@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE6do_putES3_bRSt8ios_basewe@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE9_M_insertILb0EEES3_S3_RSt8ios_basewRKSbIwS2_SaIwEE@@GLIBCXX_3.4 ++FUNC:_ZNKSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE9_M_insertILb1EEES3_S3_RSt8ios_basewRKSbIwS2_SaIwEE@@GLIBCXX_3.4 ++FUNC:_ZNKSt9strstream5rdbufEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9strstream6pcountEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9type_info10__do_catchEPKS_PPvj@@GLIBCXX_3.4 ++FUNC:_ZNKSt9type_info11__do_upcastEPKN10__cxxabiv117__class_type_infoEPPv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9type_info14__is_pointer_pEv@@GLIBCXX_3.4 ++FUNC:_ZNKSt9type_info15__is_function_pEv@@GLIBCXX_3.4 ++FUNC:_ZNSaIcEC1ERKS_@@GLIBCXX_3.4 ++FUNC:_ZNSaIcEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSaIcEC2ERKS_@@GLIBCXX_3.4 ++FUNC:_ZNSaIcEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSaIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSaIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSaIwEC1ERKS_@@GLIBCXX_3.4 ++FUNC:_ZNSaIwEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSaIwEC2ERKS_@@GLIBCXX_3.4 ++FUNC:_ZNSaIwEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSaIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSaIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE10_S_compareEmm@@GLIBCXX_3.4.16 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE12_Alloc_hiderC1EPwRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE12_Alloc_hiderC2EPwRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE12_M_leak_hardEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE12_S_constructEmwRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE12_S_constructIN9__gnu_cxx17__normal_iteratorIPwS2_EEEES6_T_S8_RKS1_St20forward_iterator_tag@@GLIBCXX_3.4.14 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE12_S_constructIPKwEEPwT_S7_RKS1_St20forward_iterator_tag@@GLIBCXX_3.4.14 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE12_S_constructIPwEES4_T_S5_RKS1_St20forward_iterator_tag@@GLIBCXX_3.4.14 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE12_S_empty_repEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE12__sv_wrapperC1ESt17basic_string_viewIwS0_E@@GLIBCXX_3.4.26 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE12__sv_wrapperC2ESt17basic_string_viewIwS0_E@@GLIBCXX_3.4.26 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE13_S_copy_charsEPwN9__gnu_cxx17__normal_iteratorIPKwS2_EES8_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE13_S_copy_charsEPwN9__gnu_cxx17__normal_iteratorIS3_S2_EES6_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE13_S_copy_charsEPwPKwS5_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE13_S_copy_charsEPwS3_S3_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE13shrink_to_fitEv@@GLIBCXX_3.4.14 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE14_M_replace_auxEmmmw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE15_M_replace_safeEmmPKwm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE17_S_to_string_viewESt17basic_string_viewIwS0_E@@GLIBCXX_3.4.26 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE18_S_construct_aux_2EmwRKS1_@@GLIBCXX_3.4.14 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE2atEm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE3endEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep10_M_destroyERKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep10_M_disposeERKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep10_M_refcopyEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep10_M_refdataEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep12_S_empty_repEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep13_M_set_leakedEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep15_M_set_sharableEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep26_M_set_length_and_sharableEm@@GLIBCXX_3.4.5 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep26_M_set_length_and_sharableEm@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep7_M_grabERKS1_S5_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep8_M_cloneERKS1_m@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep9_S_createEmmRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4backEv@@GLIBCXX_3.4.15 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4dataEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4rendEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE4swapERS2_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE5beginEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE5clearEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE5eraseEN9__gnu_cxx17__normal_iteratorIPwS2_EE@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE5eraseEN9__gnu_cxx17__normal_iteratorIPwS2_EES6_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE5eraseEmm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE5frontEv@@GLIBCXX_3.4.15 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6appendEPKw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6appendEPKwm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6appendERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6appendERKS2_mm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6appendESt16initializer_listIwE@@GLIBCXX_3.4.11 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6appendEmw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6assignEOS2_@@GLIBCXX_3.4.14 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6assignEPKw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6assignEPKwm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6assignERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6assignERKS2_mm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6assignESt16initializer_listIwE@@GLIBCXX_3.4.11 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6assignEmw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6insertEN9__gnu_cxx17__normal_iteratorIPwS2_EESt16initializer_listIwE@@GLIBCXX_3.4.11 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6insertEN9__gnu_cxx17__normal_iteratorIPwS2_EEmw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6insertEN9__gnu_cxx17__normal_iteratorIPwS2_EEw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6insertEmPKw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6insertEmPKwm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6insertEmRKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6insertEmRKS2_mm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6insertEmmw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6rbeginEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6resizeEm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE6resizeEmw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7_M_copyEPwPKwm@@GLIBCXX_3.4.5 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7_M_copyEPwPKwm@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7_M_dataEPw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7_M_leakEv@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7_M_moveEPwPKwm@@GLIBCXX_3.4.5 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7_M_moveEPwPKwm@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS2_EES6_NS4_IPKwS2_EES9_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS2_EES6_PKw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS2_EES6_PKwS8_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS2_EES6_PKwm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS2_EES6_RKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS2_EES6_S5_S5_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS2_EES6_S6_S6_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS2_EES6_St16initializer_listIwE@@GLIBCXX_3.4.11 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS2_EES6_mw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEmmPKw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEmmPKwm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEmmRKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEmmRKS2_mm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7replaceEmmmw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE7reserveEm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE8pop_backEv@@GLIBCXX_3.4.17 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE9_M_assignEPwmw@@GLIBCXX_3.4.5 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE9_M_assignEPwmw@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE9_M_mutateEmmm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEE9push_backEw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1ENS2_12__sv_wrapperERKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1EOS2_@@GLIBCXX_3.4.14 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1EOS2_RKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1EPKwRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1EPKwmRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1ERKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1ERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1ERKS2_RKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1ERKS2_mRKS1_@@GLIBCXX_3.4.23 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1ERKS2_mm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1ERKS2_mmRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1ESt16initializer_listIwERKS1_@@GLIBCXX_3.4.11 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1EmwRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1IN9__gnu_cxx17__normal_iteratorIPwS2_EEEET_S8_RKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1IPKwEET_S6_RKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC1IPwEET_S5_RKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2ENS2_12__sv_wrapperERKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2EOS2_@@GLIBCXX_3.4.15 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2EOS2_RKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2EPKwRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2EPKwmRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2ERKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2ERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2ERKS2_RKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2ERKS2_mRKS1_@@GLIBCXX_3.4.23 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2ERKS2_mm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2ERKS2_mmRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2ESt16initializer_listIwERKS1_@@GLIBCXX_3.4.11 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2EmwRKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2IN9__gnu_cxx17__normal_iteratorIPwS2_EEEET_S8_RKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2IPKwEET_S6_RKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEC2IPwEET_S5_RKS1_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEaSEOS2_@@GLIBCXX_3.4.14 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEaSEPKw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEaSERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEaSESt16initializer_listIwE@@GLIBCXX_3.4.11 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEaSEw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEixEm@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEpLEPKw@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEpLERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEpLESt16initializer_listIwE@@GLIBCXX_3.4.11 ++FUNC:_ZNSbIwSt11char_traitsIwESaIwEEpLEw@@GLIBCXX_3.4 ++FUNC:_ZNSd4swapERSd@@GLIBCXX_3.4.21 ++FUNC:_ZNSdC1EOSd@@GLIBCXX_3.4.21 ++FUNC:_ZNSdC1EPSt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++FUNC:_ZNSdC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSdC2EOSd@@GLIBCXX_3.4.21 ++FUNC:_ZNSdC2EPSt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++FUNC:_ZNSdC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSdD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSdD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSdD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSdaSEOSd@@GLIBCXX_3.4.21 ++FUNC:_ZNSi10_M_extractIPvEERSiRT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSi10_M_extractIbEERSiRT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSi10_M_extractIdEERSiRT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSi10_M_extractIeEERSiRT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSi10_M_extractIfEERSiRT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSi10_M_extractIgEERSiRT_@@GLIBCXX_LDBL_3.4.7 ++FUNC:_ZNSi10_M_extractIjEERSiRT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSi10_M_extractIlEERSiRT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSi10_M_extractImEERSiRT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSi10_M_extractItEERSiRT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSi10_M_extractIxEERSiRT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSi10_M_extractIyEERSiRT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSi3getEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSi3getEPclc@@GLIBCXX_3.4 ++FUNC:_ZNSi3getERSt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++FUNC:_ZNSi3getERSt15basic_streambufIcSt11char_traitsIcEEc@@GLIBCXX_3.4 ++FUNC:_ZNSi3getERc@@GLIBCXX_3.4 ++FUNC:_ZNSi3getEv@@GLIBCXX_3.4 ++FUNC:_ZNSi4peekEv@@GLIBCXX_3.4 ++FUNC:_ZNSi4readEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSi4swapERSi@@GLIBCXX_3.4.21 ++FUNC:_ZNSi4syncEv@@GLIBCXX_3.4 ++FUNC:_ZNSi5seekgESt4fposI11__mbstate_tE@@GLIBCXX_3.4 ++FUNC:_ZNSi5seekgElSt12_Ios_Seekdir@@GLIBCXX_3.4 ++FUNC:_ZNSi5tellgEv@@GLIBCXX_3.4 ++FUNC:_ZNSi5ungetEv@@GLIBCXX_3.4 ++FUNC:_ZNSi6ignoreEl@@GLIBCXX_3.4.5 ++FUNC:_ZNSi6ignoreEl@GLIBCXX_3.4 ++FUNC:_ZNSi6ignoreEli@@GLIBCXX_3.4 ++FUNC:_ZNSi6ignoreEv@@GLIBCXX_3.4.5 ++FUNC:_ZNSi6ignoreEv@GLIBCXX_3.4 ++FUNC:_ZNSi6sentryC1ERSib@@GLIBCXX_3.4 ++FUNC:_ZNSi6sentryC2ERSib@@GLIBCXX_3.4 ++FUNC:_ZNSi7getlineEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSi7getlineEPclc@@GLIBCXX_3.4 ++FUNC:_ZNSi7putbackEc@@GLIBCXX_3.4 ++FUNC:_ZNSi8readsomeEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSiC1EOSi@@GLIBCXX_3.4.21 ++FUNC:_ZNSiC1EPSt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++FUNC:_ZNSiC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSiC2EOSi@@GLIBCXX_3.4.21 ++FUNC:_ZNSiC2EPSt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++FUNC:_ZNSiC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSiD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSiD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSiD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSiaSEOSi@@GLIBCXX_3.4.21 ++FUNC:_ZNSirsEPFRSiS_E@@GLIBCXX_3.4 ++FUNC:_ZNSirsEPFRSt8ios_baseS0_E@@GLIBCXX_3.4 ++FUNC:_ZNSirsEPFRSt9basic_iosIcSt11char_traitsIcEES3_E@@GLIBCXX_3.4 ++FUNC:_ZNSirsEPSt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++FUNC:_ZNSirsERPv@@GLIBCXX_3.4 ++FUNC:_ZNSirsERb@@GLIBCXX_3.4 ++FUNC:_ZNSirsERd@@GLIBCXX_3.4 ++FUNC:_ZNSirsERe@@GLIBCXX_3.4 ++FUNC:_ZNSirsERf@@GLIBCXX_3.4 ++FUNC:_ZNSirsERg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSirsERi@@GLIBCXX_3.4 ++FUNC:_ZNSirsERj@@GLIBCXX_3.4 ++FUNC:_ZNSirsERl@@GLIBCXX_3.4 ++FUNC:_ZNSirsERm@@GLIBCXX_3.4 ++FUNC:_ZNSirsERs@@GLIBCXX_3.4 ++FUNC:_ZNSirsERt@@GLIBCXX_3.4 ++FUNC:_ZNSirsERx@@GLIBCXX_3.4 ++FUNC:_ZNSirsERy@@GLIBCXX_3.4 ++FUNC:_ZNSo3putEc@@GLIBCXX_3.4 ++FUNC:_ZNSo4swapERSo@@GLIBCXX_3.4.21 ++FUNC:_ZNSo5flushEv@@GLIBCXX_3.4 ++FUNC:_ZNSo5seekpESt4fposI11__mbstate_tE@@GLIBCXX_3.4 ++FUNC:_ZNSo5seekpElSt12_Ios_Seekdir@@GLIBCXX_3.4 ++FUNC:_ZNSo5tellpEv@@GLIBCXX_3.4 ++FUNC:_ZNSo5writeEPKcl@@GLIBCXX_3.4 ++FUNC:_ZNSo6sentryC1ERSo@@GLIBCXX_3.4 ++FUNC:_ZNSo6sentryC2ERSo@@GLIBCXX_3.4 ++FUNC:_ZNSo6sentryD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSo6sentryD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSo8_M_writeEPKcl@@GLIBCXX_3.4 ++FUNC:_ZNSo9_M_insertIPKvEERSoT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSo9_M_insertIbEERSoT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSo9_M_insertIdEERSoT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSo9_M_insertIeEERSoT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSo9_M_insertIgEERSoT_@@GLIBCXX_LDBL_3.4.7 ++FUNC:_ZNSo9_M_insertIlEERSoT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSo9_M_insertImEERSoT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSo9_M_insertIxEERSoT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSo9_M_insertIyEERSoT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSoC1EOSo@@GLIBCXX_3.4.21 ++FUNC:_ZNSoC1EPSt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++FUNC:_ZNSoC1ERSd@@GLIBCXX_3.4.21 ++FUNC:_ZNSoC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSoC2EOSo@@GLIBCXX_3.4.21 ++FUNC:_ZNSoC2EPSt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++FUNC:_ZNSoC2ERSd@@GLIBCXX_3.4.21 ++FUNC:_ZNSoC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSoD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSoD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSoD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSoaSEOSo@@GLIBCXX_3.4.21 ++FUNC:_ZNSolsEDn@@GLIBCXX_3.4.26 ++FUNC:_ZNSolsEPFRSoS_E@@GLIBCXX_3.4 ++FUNC:_ZNSolsEPFRSt8ios_baseS0_E@@GLIBCXX_3.4 ++FUNC:_ZNSolsEPFRSt9basic_iosIcSt11char_traitsIcEES3_E@@GLIBCXX_3.4 ++FUNC:_ZNSolsEPKv@@GLIBCXX_3.4 ++FUNC:_ZNSolsEPSt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++FUNC:_ZNSolsEb@@GLIBCXX_3.4 ++FUNC:_ZNSolsEd@@GLIBCXX_3.4 ++FUNC:_ZNSolsEe@@GLIBCXX_3.4 ++FUNC:_ZNSolsEf@@GLIBCXX_3.4 ++FUNC:_ZNSolsEg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSolsEi@@GLIBCXX_3.4 ++FUNC:_ZNSolsEj@@GLIBCXX_3.4 ++FUNC:_ZNSolsEl@@GLIBCXX_3.4 ++FUNC:_ZNSolsEm@@GLIBCXX_3.4 ++FUNC:_ZNSolsEs@@GLIBCXX_3.4 ++FUNC:_ZNSolsEt@@GLIBCXX_3.4 ++FUNC:_ZNSolsEx@@GLIBCXX_3.4 ++FUNC:_ZNSolsEy@@GLIBCXX_3.4 ++FUNC:_ZNSs10_S_compareEmm@@GLIBCXX_3.4.16 ++FUNC:_ZNSs12_Alloc_hiderC1EPcRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSs12_Alloc_hiderC2EPcRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSs12_M_leak_hardEv@@GLIBCXX_3.4 ++FUNC:_ZNSs12_S_constructEmcRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSs12_S_constructIN9__gnu_cxx17__normal_iteratorIPcSsEEEES2_T_S4_RKSaIcESt20forward_iterator_tag@@GLIBCXX_3.4.14 ++FUNC:_ZNSs12_S_constructIPKcEEPcT_S3_RKSaIcESt20forward_iterator_tag@@GLIBCXX_3.4.14 ++FUNC:_ZNSs12_S_constructIPcEES0_T_S1_RKSaIcESt20forward_iterator_tag@@GLIBCXX_3.4.14 ++FUNC:_ZNSs12_S_empty_repEv@@GLIBCXX_3.4 ++FUNC:_ZNSs12__sv_wrapperC1ESt17basic_string_viewIcSt11char_traitsIcEE@@GLIBCXX_3.4.26 ++FUNC:_ZNSs12__sv_wrapperC2ESt17basic_string_viewIcSt11char_traitsIcEE@@GLIBCXX_3.4.26 ++FUNC:_ZNSs13_S_copy_charsEPcN9__gnu_cxx17__normal_iteratorIPKcSsEES4_@@GLIBCXX_3.4 ++FUNC:_ZNSs13_S_copy_charsEPcN9__gnu_cxx17__normal_iteratorIS_SsEES2_@@GLIBCXX_3.4 ++FUNC:_ZNSs13_S_copy_charsEPcPKcS1_@@GLIBCXX_3.4 ++FUNC:_ZNSs13_S_copy_charsEPcS_S_@@GLIBCXX_3.4 ++FUNC:_ZNSs13shrink_to_fitEv@@GLIBCXX_3.4.14 ++FUNC:_ZNSs14_M_replace_auxEmmmc@@GLIBCXX_3.4 ++FUNC:_ZNSs15_M_replace_safeEmmPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSs17_S_to_string_viewESt17basic_string_viewIcSt11char_traitsIcEE@@GLIBCXX_3.4.26 ++FUNC:_ZNSs18_S_construct_aux_2EmcRKSaIcE@@GLIBCXX_3.4.14 ++FUNC:_ZNSs2atEm@@GLIBCXX_3.4 ++FUNC:_ZNSs3endEv@@GLIBCXX_3.4 ++FUNC:_ZNSs4_Rep10_M_destroyERKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSs4_Rep10_M_disposeERKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSs4_Rep10_M_refcopyEv@@GLIBCXX_3.4 ++FUNC:_ZNSs4_Rep10_M_refdataEv@@GLIBCXX_3.4 ++FUNC:_ZNSs4_Rep12_S_empty_repEv@@GLIBCXX_3.4 ++FUNC:_ZNSs4_Rep13_M_set_leakedEv@@GLIBCXX_3.4 ++FUNC:_ZNSs4_Rep15_M_set_sharableEv@@GLIBCXX_3.4 ++FUNC:_ZNSs4_Rep26_M_set_length_and_sharableEm@@GLIBCXX_3.4.5 ++FUNC:_ZNSs4_Rep26_M_set_length_and_sharableEm@GLIBCXX_3.4 ++FUNC:_ZNSs4_Rep7_M_grabERKSaIcES2_@@GLIBCXX_3.4 ++FUNC:_ZNSs4_Rep8_M_cloneERKSaIcEm@@GLIBCXX_3.4 ++FUNC:_ZNSs4_Rep9_S_createEmmRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSs4backEv@@GLIBCXX_3.4.15 ++FUNC:_ZNSs4dataEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSs4rendEv@@GLIBCXX_3.4 ++FUNC:_ZNSs4swapERSs@@GLIBCXX_3.4 ++FUNC:_ZNSs5beginEv@@GLIBCXX_3.4 ++FUNC:_ZNSs5clearEv@@GLIBCXX_3.4 ++FUNC:_ZNSs5eraseEN9__gnu_cxx17__normal_iteratorIPcSsEE@@GLIBCXX_3.4 ++FUNC:_ZNSs5eraseEN9__gnu_cxx17__normal_iteratorIPcSsEES2_@@GLIBCXX_3.4 ++FUNC:_ZNSs5eraseEmm@@GLIBCXX_3.4 ++FUNC:_ZNSs5frontEv@@GLIBCXX_3.4.15 ++FUNC:_ZNSs6appendEPKc@@GLIBCXX_3.4 ++FUNC:_ZNSs6appendEPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSs6appendERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSs6appendERKSsmm@@GLIBCXX_3.4 ++FUNC:_ZNSs6appendESt16initializer_listIcE@@GLIBCXX_3.4.11 ++FUNC:_ZNSs6appendEmc@@GLIBCXX_3.4 ++FUNC:_ZNSs6assignEOSs@@GLIBCXX_3.4.14 ++FUNC:_ZNSs6assignEPKc@@GLIBCXX_3.4 ++FUNC:_ZNSs6assignEPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSs6assignERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSs6assignERKSsmm@@GLIBCXX_3.4 ++FUNC:_ZNSs6assignESt16initializer_listIcE@@GLIBCXX_3.4.11 ++FUNC:_ZNSs6assignEmc@@GLIBCXX_3.4 ++FUNC:_ZNSs6insertEN9__gnu_cxx17__normal_iteratorIPcSsEESt16initializer_listIcE@@GLIBCXX_3.4.11 ++FUNC:_ZNSs6insertEN9__gnu_cxx17__normal_iteratorIPcSsEEc@@GLIBCXX_3.4 ++FUNC:_ZNSs6insertEN9__gnu_cxx17__normal_iteratorIPcSsEEmc@@GLIBCXX_3.4 ++FUNC:_ZNSs6insertEmPKc@@GLIBCXX_3.4 ++FUNC:_ZNSs6insertEmPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSs6insertEmRKSs@@GLIBCXX_3.4 ++FUNC:_ZNSs6insertEmRKSsmm@@GLIBCXX_3.4 ++FUNC:_ZNSs6insertEmmc@@GLIBCXX_3.4 ++FUNC:_ZNSs6rbeginEv@@GLIBCXX_3.4 ++FUNC:_ZNSs6resizeEm@@GLIBCXX_3.4 ++FUNC:_ZNSs6resizeEmc@@GLIBCXX_3.4 ++FUNC:_ZNSs7_M_copyEPcPKcm@@GLIBCXX_3.4.5 ++FUNC:_ZNSs7_M_copyEPcPKcm@GLIBCXX_3.4 ++FUNC:_ZNSs7_M_dataEPc@@GLIBCXX_3.4 ++FUNC:_ZNSs7_M_leakEv@@GLIBCXX_3.4 ++FUNC:_ZNSs7_M_moveEPcPKcm@@GLIBCXX_3.4.5 ++FUNC:_ZNSs7_M_moveEPcPKcm@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEN9__gnu_cxx17__normal_iteratorIPcSsEES2_NS0_IPKcSsEES5_@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEN9__gnu_cxx17__normal_iteratorIPcSsEES2_PKc@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEN9__gnu_cxx17__normal_iteratorIPcSsEES2_PKcS4_@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEN9__gnu_cxx17__normal_iteratorIPcSsEES2_PKcm@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEN9__gnu_cxx17__normal_iteratorIPcSsEES2_RKSs@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEN9__gnu_cxx17__normal_iteratorIPcSsEES2_S1_S1_@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEN9__gnu_cxx17__normal_iteratorIPcSsEES2_S2_S2_@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEN9__gnu_cxx17__normal_iteratorIPcSsEES2_St16initializer_listIcE@@GLIBCXX_3.4.11 ++FUNC:_ZNSs7replaceEN9__gnu_cxx17__normal_iteratorIPcSsEES2_mc@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEmmPKc@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEmmPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEmmRKSs@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEmmRKSsmm@@GLIBCXX_3.4 ++FUNC:_ZNSs7replaceEmmmc@@GLIBCXX_3.4 ++FUNC:_ZNSs7reserveEm@@GLIBCXX_3.4 ++FUNC:_ZNSs8pop_backEv@@GLIBCXX_3.4.17 ++FUNC:_ZNSs9_M_assignEPcmc@@GLIBCXX_3.4.5 ++FUNC:_ZNSs9_M_assignEPcmc@GLIBCXX_3.4 ++FUNC:_ZNSs9_M_mutateEmmm@@GLIBCXX_3.4 ++FUNC:_ZNSs9push_backEc@@GLIBCXX_3.4 ++FUNC:_ZNSsC1ENSs12__sv_wrapperERKSaIcE@@GLIBCXX_3.4.26 ++FUNC:_ZNSsC1EOSs@@GLIBCXX_3.4.14 ++FUNC:_ZNSsC1EOSsRKSaIcE@@GLIBCXX_3.4.26 ++FUNC:_ZNSsC1EPKcRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC1EPKcmRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC1ERKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC1ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSsC1ERKSsRKSaIcE@@GLIBCXX_3.4.26 ++FUNC:_ZNSsC1ERKSsmRKSaIcE@@GLIBCXX_3.4.23 ++FUNC:_ZNSsC1ERKSsmm@@GLIBCXX_3.4 ++FUNC:_ZNSsC1ERKSsmmRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC1ESt16initializer_listIcERKSaIcE@@GLIBCXX_3.4.11 ++FUNC:_ZNSsC1EmcRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSsC1IN9__gnu_cxx17__normal_iteratorIPcSsEEEET_S4_RKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC1IPKcEET_S2_RKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC1IPcEET_S1_RKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC2ENSs12__sv_wrapperERKSaIcE@@GLIBCXX_3.4.26 ++FUNC:_ZNSsC2EOSs@@GLIBCXX_3.4.15 ++FUNC:_ZNSsC2EOSsRKSaIcE@@GLIBCXX_3.4.26 ++FUNC:_ZNSsC2EPKcRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC2EPKcmRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC2ERKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC2ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSsC2ERKSsRKSaIcE@@GLIBCXX_3.4.26 ++FUNC:_ZNSsC2ERKSsmRKSaIcE@@GLIBCXX_3.4.23 ++FUNC:_ZNSsC2ERKSsmm@@GLIBCXX_3.4 ++FUNC:_ZNSsC2ERKSsmmRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC2ESt16initializer_listIcERKSaIcE@@GLIBCXX_3.4.11 ++FUNC:_ZNSsC2EmcRKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSsC2IN9__gnu_cxx17__normal_iteratorIPcSsEEEET_S4_RKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC2IPKcEET_S2_RKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsC2IPcEET_S1_RKSaIcE@@GLIBCXX_3.4 ++FUNC:_ZNSsD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSsD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSsaSEOSs@@GLIBCXX_3.4.14 ++FUNC:_ZNSsaSEPKc@@GLIBCXX_3.4 ++FUNC:_ZNSsaSERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSsaSESt16initializer_listIcE@@GLIBCXX_3.4.11 ++FUNC:_ZNSsaSEc@@GLIBCXX_3.4 ++FUNC:_ZNSsixEm@@GLIBCXX_3.4 ++FUNC:_ZNSspLEPKc@@GLIBCXX_3.4 ++FUNC:_ZNSspLERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSspLESt16initializer_listIcE@@GLIBCXX_3.4.11 ++FUNC:_ZNSspLEc@@GLIBCXX_3.4 ++FUNC:_ZNSt10_Sp_lockerC1EPKv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt10_Sp_lockerC1EPKvS1_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt10_Sp_lockerC2EPKv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt10_Sp_lockerC2EPKvS1_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt10_Sp_lockerD1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt10_Sp_lockerD2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt10__num_base15_S_format_floatERKSt8ios_basePcc@@GLIBCXX_3.4 ++FUNC:_ZNSt10bad_typeidD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10bad_typeidD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10bad_typeidD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10filesystem10equivalentERKNS_4pathES2_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem10equivalentERKNS_4pathES2_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem10equivalentERKNS_7__cxx114pathES3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem10equivalentERKNS_7__cxx114pathES3_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem10hash_valueERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem10remove_allERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem10remove_allERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem10remove_allERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem10remove_allERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem11permissionsERKNS_4pathENS_5permsENS_12perm_optionsE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem11permissionsERKNS_4pathENS_5permsENS_12perm_optionsERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem11permissionsERKNS_7__cxx114pathENS_5permsENS_12perm_optionsE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem11permissionsERKNS_7__cxx114pathENS_5permsENS_12perm_optionsERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem11resize_fileERKNS_4pathEm@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem11resize_fileERKNS_4pathEmRSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem11resize_fileERKNS_7__cxx114pathEm@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem11resize_fileERKNS_7__cxx114pathEmRSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12copy_symlinkERKNS_4pathES2_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12copy_symlinkERKNS_4pathES2_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12copy_symlinkERKNS_7__cxx114pathES3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12copy_symlinkERKNS_7__cxx114pathES3_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12current_pathB5cxx11ERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12current_pathB5cxx11Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12current_pathERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12current_pathERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12current_pathERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12current_pathERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12current_pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12current_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12read_symlinkERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12read_symlinkERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12read_symlinkERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem12read_symlinkERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem14create_symlinkERKNS_4pathES2_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem14create_symlinkERKNS_4pathES2_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem14create_symlinkERKNS_7__cxx114pathES3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem14create_symlinkERKNS_7__cxx114pathES3_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem14symlink_statusERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem14symlink_statusERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem14symlink_statusERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem14symlink_statusERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15hard_link_countERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15hard_link_countERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15hard_link_countERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15hard_link_countERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15last_write_timeERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15last_write_timeERKNS_4pathENSt6chrono10time_pointINS_12__file_clockENS3_8durationIlSt5ratioILl1ELl1000000000EEEEEE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15last_write_timeERKNS_4pathENSt6chrono10time_pointINS_12__file_clockENS3_8durationIlSt5ratioILl1ELl1000000000EEEEEERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15last_write_timeERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15last_write_timeERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15last_write_timeERKNS_7__cxx114pathENSt6chrono10time_pointINS_12__file_clockENS4_8durationIlSt5ratioILl1ELl1000000000EEEEEE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15last_write_timeERKNS_7__cxx114pathENSt6chrono10time_pointINS_12__file_clockENS4_8durationIlSt5ratioILl1ELl1000000000EEEEEERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem15last_write_timeERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_directoryERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_directoryERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_directoryERKNS_4pathES2_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_directoryERKNS_4pathES2_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_directoryERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_directoryERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_directoryERKNS_7__cxx114pathES3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_directoryERKNS_7__cxx114pathES3_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_hard_linkERKNS_4pathES2_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_hard_linkERKNS_4pathES2_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_hard_linkERKNS_7__cxx114pathES3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16create_hard_linkERKNS_7__cxx114pathES3_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16filesystem_errorC1ERKSsRKNS_4pathES5_St10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16filesystem_errorC1ERKSsRKNS_4pathESt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16filesystem_errorC1ERKSsSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16filesystem_errorC2ERKSsRKNS_4pathES5_St10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16filesystem_errorC2ERKSsRKNS_4pathESt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16filesystem_errorC2ERKSsSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16filesystem_errorD0Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16filesystem_errorD1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16filesystem_errorD2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16weakly_canonicalERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16weakly_canonicalERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16weakly_canonicalERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem16weakly_canonicalERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem18create_directoriesERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem18create_directoriesERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem18create_directoriesERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem18create_directoriesERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem18directory_iterator9incrementERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem18directory_iteratorC1ERKNS_4pathENS_17directory_optionsEPSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem18directory_iteratorC2ERKNS_4pathENS_17directory_optionsEPSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem18directory_iteratorppEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem19temp_directory_pathB5cxx11ERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem19temp_directory_pathB5cxx11Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem19temp_directory_pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem19temp_directory_pathEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem24create_directory_symlinkERKNS_4pathES2_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem24create_directory_symlinkERKNS_4pathES2_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem24create_directory_symlinkERKNS_7__cxx114pathES3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem24create_directory_symlinkERKNS_7__cxx114pathES3_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem28recursive_directory_iterator25disable_recursion_pendingEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem28recursive_directory_iterator3popERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem28recursive_directory_iterator3popEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem28recursive_directory_iterator9incrementERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem28recursive_directory_iteratorC1ERKNS_4pathENS_17directory_optionsEPSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem28recursive_directory_iteratorC2ERKNS_4pathENS_17directory_optionsEPSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem28recursive_directory_iteratorD1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem28recursive_directory_iteratorD2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem28recursive_directory_iteratoraSEOS0_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem28recursive_directory_iteratoraSERKS0_@@GLIBCXX_3.4.27 ++FUNC:_ZNSt10filesystem28recursive_directory_iteratorppEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4copyERKNS_4pathES2_NS_12copy_optionsE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4copyERKNS_4pathES2_NS_12copy_optionsERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4copyERKNS_7__cxx114pathES3_NS_12copy_optionsE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4copyERKNS_7__cxx114pathES3_NS_12copy_optionsERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4path14_M_split_cmptsEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4path14_S_convert_locEPKcS2_RKSt6locale@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4path15remove_filenameEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4path16replace_filenameERKS0_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4path17replace_extensionERKS0_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4path5_ListC1ERKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4path5_ListC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4path9_M_appendESt17basic_string_viewIcSt11char_traitsIcEE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4path9_M_concatESt17basic_string_viewIcSt11char_traitsIcEE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4pathaSERKS0_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4pathdVERKS0_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem4pathpLERKS0_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem5spaceERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem5spaceERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem5spaceERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem5spaceERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6removeERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6removeERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6removeERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6removeERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6renameERKNS_4pathES2_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6renameERKNS_4pathES2_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6renameERKNS_7__cxx114pathES3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6renameERKNS_7__cxx114pathES3_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6statusERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6statusERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6statusERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem6statusERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1110hash_valueERKNS0_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1116filesystem_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKNS0_4pathESC_St10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1116filesystem_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKNS0_4pathESt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1116filesystem_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1116filesystem_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKNS0_4pathESC_St10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1116filesystem_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKNS0_4pathESt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1116filesystem_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEESt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1116filesystem_errorD0Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1116filesystem_errorD1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1116filesystem_errorD2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1118directory_iterator9incrementERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1118directory_iteratorC1ERKNS0_4pathENS_17directory_optionsEPSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1118directory_iteratorC2ERKNS0_4pathENS_17directory_optionsEPSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1118directory_iteratorppEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1128recursive_directory_iterator25disable_recursion_pendingEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1128recursive_directory_iterator3popERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1128recursive_directory_iterator3popEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1128recursive_directory_iterator9incrementERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1128recursive_directory_iteratorC1ERKNS0_4pathENS_17directory_optionsEPSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1128recursive_directory_iteratorC2ERKNS0_4pathENS_17directory_optionsEPSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1128recursive_directory_iteratorD1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1128recursive_directory_iteratorD2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1128recursive_directory_iteratoraSEOS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx1128recursive_directory_iteratoraSERKS1_@@GLIBCXX_3.4.27 ++FUNC:_ZNSt10filesystem7__cxx1128recursive_directory_iteratorppEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114path14_M_split_cmptsEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114path14_S_convert_locEPKcS3_RKSt6locale@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114path15remove_filenameEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114path16replace_filenameERKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114path17replace_extensionERKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114path5_ListC1ERKS2_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114path5_ListC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114path9_M_appendESt17basic_string_viewIcSt11char_traitsIcEE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114path9_M_concatESt17basic_string_viewIcSt11char_traitsIcEE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114pathaSERKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114pathdVERKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem7__cxx114pathpLERKS1_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8absoluteERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8absoluteERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8absoluteERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8absoluteERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8is_emptyERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8is_emptyERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8is_emptyERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8is_emptyERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8relativeERKNS_4pathES2_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8relativeERKNS_4pathES2_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8relativeERKNS_7__cxx114pathES3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem8relativeERKNS_7__cxx114pathES3_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9canonicalERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9canonicalERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9canonicalERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9canonicalERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9copy_fileERKNS_4pathES2_NS_12copy_optionsE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9copy_fileERKNS_4pathES2_NS_12copy_optionsERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9copy_fileERKNS_7__cxx114pathES3_NS_12copy_optionsE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9copy_fileERKNS_7__cxx114pathES3_NS_12copy_optionsERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9file_sizeERKNS_4pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9file_sizeERKNS_4pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9file_sizeERKNS_7__cxx114pathE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9file_sizeERKNS_7__cxx114pathERSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9proximateERKNS_4pathES2_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9proximateERKNS_4pathES2_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9proximateERKNS_7__cxx114pathES3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10filesystem9proximateERKNS_7__cxx114pathES3_RSt10error_code@@GLIBCXX_3.4.26 ++FUNC:_ZNSt10istrstream3strEv@@GLIBCXX_3.4 ++FUNC:_ZNSt10istrstreamC1EPKc@@GLIBCXX_3.4 ++FUNC:_ZNSt10istrstreamC1EPKcl@@GLIBCXX_3.4 ++FUNC:_ZNSt10istrstreamC1EPc@@GLIBCXX_3.4 ++FUNC:_ZNSt10istrstreamC1EPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt10istrstreamC2EPKc@@GLIBCXX_3.4 ++FUNC:_ZNSt10istrstreamC2EPKcl@@GLIBCXX_3.4 ++FUNC:_ZNSt10istrstreamC2EPc@@GLIBCXX_3.4 ++FUNC:_ZNSt10istrstreamC2EPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt10istrstreamD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10istrstreamD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10istrstreamD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10money_base20_S_construct_patternEccc@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb0EE24_M_initialize_moneypunctEP15__locale_structPKc@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb0EEC1EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb0EEC1EPSt18__moneypunct_cacheIcLb0EEm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb0EEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb0EEC2EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb0EEC2EPSt18__moneypunct_cacheIcLb0EEm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb0EEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb0EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb0EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb0EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb1EE24_M_initialize_moneypunctEP15__locale_structPKc@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb1EEC1EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb1EEC1EPSt18__moneypunct_cacheIcLb1EEm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb1EEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb1EEC2EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb1EEC2EPSt18__moneypunct_cacheIcLb1EEm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb1EEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb1EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb1EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIcLb1EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb0EE24_M_initialize_moneypunctEP15__locale_structPKc@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb0EEC1EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb0EEC1EPSt18__moneypunct_cacheIwLb0EEm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb0EEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb0EEC2EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb0EEC2EPSt18__moneypunct_cacheIwLb0EEm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb0EEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb0EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb0EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb0EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb1EE24_M_initialize_moneypunctEP15__locale_structPKc@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb1EEC1EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb1EEC1EPSt18__moneypunct_cacheIwLb1EEm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb1EEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb1EEC2EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb1EEC2EPSt18__moneypunct_cacheIwLb1EEm@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb1EEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb1EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb1EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10moneypunctIwLb1EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10ostrstream3strEv@@GLIBCXX_3.4 ++FUNC:_ZNSt10ostrstream6freezeEb@@GLIBCXX_3.4 ++FUNC:_ZNSt10ostrstreamC1EPciSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt10ostrstreamC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10ostrstreamC2EPciSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt10ostrstreamC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10ostrstreamD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10ostrstreamD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt10ostrstreamD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIcE23_M_initialize_timepunctEP15__locale_struct@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIcEC1EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIcEC1EPSt17__timepunct_cacheIcEm@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIcEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIcEC2EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIcEC2EPSt17__timepunct_cacheIcEm@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIcEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIcED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIwE23_M_initialize_timepunctEP15__locale_struct@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIwEC1EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIwEC1EPSt17__timepunct_cacheIwEm@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIwEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIwEC2EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIwEC2EPSt17__timepunct_cacheIwEm@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIwEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIwED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11__timepunctIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11char_traitsIcE2eqERKcS2_@@GLIBCXX_3.4.5 ++FUNC:_ZNSt11char_traitsIcE2eqERKcS2_@GLIBCXX_3.4 ++FUNC:_ZNSt11char_traitsIwE2eqERKwS2_@@GLIBCXX_3.4.5 ++FUNC:_ZNSt11char_traitsIwE2eqERKwS2_@GLIBCXX_3.4 ++FUNC:_ZNSt11logic_errorC1EOS_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt11logic_errorC1EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11logic_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11logic_errorC1ERKS_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11logic_errorC1ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt11logic_errorC2EOS_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt11logic_errorC2EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11logic_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11logic_errorC2ERKS_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11logic_errorC2ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt11logic_errorD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11logic_errorD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11logic_errorD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11logic_erroraSEOS_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt11logic_erroraSERKS_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11range_errorC1EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11range_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11range_errorC1ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt11range_errorC2EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11range_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11range_errorC2ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt11range_errorD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11range_errorD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt11range_errorD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt11regex_errorC1ENSt15regex_constants10error_typeE@@GLIBCXX_3.4.20 ++FUNC:_ZNSt11regex_errorC2ENSt15regex_constants10error_typeE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt11regex_errorD0Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt11regex_errorD1Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt11regex_errorD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt11this_thread11__sleep_forENSt6chrono8durationIlSt5ratioILl1ELl1EEEENS1_IlS2_ILl1ELl1000000000EEEE@@GLIBCXX_3.4.18 ++FUNC:_ZNSt12__basic_fileIcE2fdEv@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcE4fileEv@@GLIBCXX_3.4.1 ++FUNC:_ZNSt12__basic_fileIcE4openEPKcSt13_Ios_Openmodei@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcE4syncEv@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcE5closeEv@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcE6xsgetnEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcE6xsputnEPKcl@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcE7seekoffElSt12_Ios_Seekdir@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcE8sys_openEP8_IO_FILESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcE8sys_openEiSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcE8xsputn_2EPKclS2_l@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcE9showmanycEv@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcEC1EP15pthread_mutex_t@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcEC2EP15pthread_mutex_t@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12__basic_fileIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem28recursive_directory_iterator10_Dir_stackELN9__gnu_cxx12_Lock_policyE2EEC1EOS5_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem28recursive_directory_iterator10_Dir_stackELN9__gnu_cxx12_Lock_policyE2EEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem28recursive_directory_iterator10_Dir_stackELN9__gnu_cxx12_Lock_policyE2EEC2EOS5_@@GLIBCXX_3.4.28 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem28recursive_directory_iterator10_Dir_stackELN9__gnu_cxx12_Lock_policyE2EEC2Ev@@GLIBCXX_3.4.27 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem4_DirELN9__gnu_cxx12_Lock_policyE2EEC1EOS4_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem4_DirELN9__gnu_cxx12_Lock_policyE2EEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem4_DirELN9__gnu_cxx12_Lock_policyE2EEC2EOS4_@@GLIBCXX_3.4.28 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem4_DirELN9__gnu_cxx12_Lock_policyE2EEC2Ev@@GLIBCXX_3.4.27 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem4_DirELN9__gnu_cxx12_Lock_policyE2EEaSEOS4_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem7__cxx1128recursive_directory_iterator10_Dir_stackELN9__gnu_cxx12_Lock_policyE2EEC1EOS6_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem7__cxx1128recursive_directory_iterator10_Dir_stackELN9__gnu_cxx12_Lock_policyE2EEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem7__cxx1128recursive_directory_iterator10_Dir_stackELN9__gnu_cxx12_Lock_policyE2EEC2EOS6_@@GLIBCXX_3.4.28 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem7__cxx1128recursive_directory_iterator10_Dir_stackELN9__gnu_cxx12_Lock_policyE2EEC2Ev@@GLIBCXX_3.4.27 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem7__cxx114_DirELN9__gnu_cxx12_Lock_policyE2EEC1EOS5_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem7__cxx114_DirELN9__gnu_cxx12_Lock_policyE2EEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem7__cxx114_DirELN9__gnu_cxx12_Lock_policyE2EEC2EOS5_@@GLIBCXX_3.4.28 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem7__cxx114_DirELN9__gnu_cxx12_Lock_policyE2EEC2Ev@@GLIBCXX_3.4.27 ++FUNC:_ZNSt12__shared_ptrINSt10filesystem7__cxx114_DirELN9__gnu_cxx12_Lock_policyE2EEaSEOS5_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt12bad_weak_ptrD0Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt12bad_weak_ptrD1Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt12bad_weak_ptrD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt12ctype_bynameIcEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt12ctype_bynameIcEC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12ctype_bynameIcEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12ctype_bynameIcEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt12ctype_bynameIcEC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12ctype_bynameIcEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12ctype_bynameIcED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12ctype_bynameIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12ctype_bynameIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12ctype_bynameIwEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt12ctype_bynameIwEC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12ctype_bynameIwEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12ctype_bynameIwEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt12ctype_bynameIwEC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12ctype_bynameIwEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12ctype_bynameIwED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12ctype_bynameIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12ctype_bynameIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12domain_errorC1EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12domain_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12domain_errorC1ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt12domain_errorC2EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12domain_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12domain_errorC2ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt12domain_errorD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12domain_errorD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12domain_errorD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt12future_errorD0Ev@@GLIBCXX_3.4.14 ++FUNC:_ZNSt12future_errorD1Ev@@GLIBCXX_3.4.14 ++FUNC:_ZNSt12future_errorD2Ev@@GLIBCXX_3.4.14 ++FUNC:_ZNSt12length_errorC1EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12length_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12length_errorC1ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt12length_errorC2EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12length_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12length_errorC2ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt12length_errorD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12length_errorD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12length_errorD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt12out_of_rangeC1EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12out_of_rangeC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12out_of_rangeC1ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt12out_of_rangeC2EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12out_of_rangeC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt12out_of_rangeC2ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt12out_of_rangeD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12out_of_rangeD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12out_of_rangeD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt12strstreambuf3strEv@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambuf6freezeEb@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambuf6setbufEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambuf7_M_freeEPc@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambuf7seekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambuf7seekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambuf8_M_allocEm@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambuf8_M_setupEPcS0_l@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambuf8overflowEi@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambuf9pbackfailEi@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambuf9underflowEv@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC1EPFPvmEPFvS0_E@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC1EPKal@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC1EPKcl@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC1EPKhl@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC1EPalS0_@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC1EPclS0_@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC1EPhlS0_@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC1El@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC2EPFPvmEPFvS0_E@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC2EPKal@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC2EPKcl@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC2EPKhl@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC2EPalS0_@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC2EPclS0_@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC2EPhlS0_@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufC2El@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12strstreambufD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt12system_errorD0Ev@@GLIBCXX_3.4.11 ++FUNC:_ZNSt12system_errorD1Ev@@GLIBCXX_3.4.11 ++FUNC:_ZNSt12system_errorD2Ev@@GLIBCXX_3.4.11 ++FUNC:_ZNSt13__future_base11_State_baseD0Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt13__future_base11_State_baseD1Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt13__future_base11_State_baseD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt13__future_base12_Result_baseC1Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt13__future_base12_Result_baseC2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt13__future_base12_Result_baseD0Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt13__future_base12_Result_baseD1Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt13__future_base12_Result_baseD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt13__future_base13_State_baseV211_Make_ready6_M_setEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13__future_base19_Async_state_commonD0Ev@@GLIBCXX_3.4.17 ++FUNC:_ZNSt13__future_base19_Async_state_commonD1Ev@@GLIBCXX_3.4.17 ++FUNC:_ZNSt13__future_base19_Async_state_commonD2Ev@@GLIBCXX_3.4.17 ++FUNC:_ZNSt13bad_exceptionD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13bad_exceptionD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13bad_exceptionD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE13_M_set_bufferEl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE14_M_get_ext_posER11__mbstate_t@@GLIBCXX_3.4.15 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE15_M_create_pbackEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE16_M_destroy_pbackEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE19_M_terminate_outputEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE22_M_convert_to_externalEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE26_M_destroy_internal_bufferEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE27_M_allocate_internal_bufferEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE4openERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE4openERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE4syncEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE5closeEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE5imbueERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE6setbufEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE6xsgetnEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE6xsputnEPKcl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE7_M_seekElSt12_Ios_Seekdir11__mbstate_t@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE7seekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE7seekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE8overflowEi@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE9pbackfailEi@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE9showmanycEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEE9underflowEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEEC1EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEEC2EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIcSt11char_traitsIcEEaSEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE13_M_set_bufferEl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE14_M_get_ext_posER11__mbstate_t@@GLIBCXX_3.4.15 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE15_M_create_pbackEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE16_M_destroy_pbackEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE19_M_terminate_outputEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE22_M_convert_to_externalEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE26_M_destroy_internal_bufferEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE27_M_allocate_internal_bufferEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE4openEPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE4openERKNSt7__cxx1112basic_stringIcS0_IcESaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE4openERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE4syncEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE5closeEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE5imbueERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE6setbufEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE6xsgetnEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE6xsputnEPKwl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE7_M_seekElSt12_Ios_Seekdir11__mbstate_t@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE7seekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE7seekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE8overflowEj@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE9pbackfailEj@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE9showmanycEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEE9underflowEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEEC1EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEEC2EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_filebufIwSt11char_traitsIwEEaSEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEE4openERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEE4openERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEE5closeEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEE7is_openEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEEC1EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEEC1ERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEEC1ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEEC2EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEEC2EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEEC2ERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEEC2ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIcSt11char_traitsIcEEaSEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEE4openEPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEE4openERKNSt7__cxx1112basic_stringIcS0_IcESaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEE4openERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEE5closeEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEE7is_openEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEEC1EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEEC1EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEEC1ERKNSt7__cxx1112basic_stringIcS0_IcESaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEEC1ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEEC2EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEEC2EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEEC2ERKNSt7__cxx1112basic_stringIcS0_IcESaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEEC2ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_fstreamIwSt11char_traitsIwEEaSEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractIPvEERS2_RT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractIbEERS2_RT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractIdEERS2_RT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractIeEERS2_RT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractIfEERS2_RT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractIgEERS2_RT_@@GLIBCXX_LDBL_3.4.7 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractIjEERS2_RT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractIlEERS2_RT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractImEERS2_RT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractItEERS2_RT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractIxEERS2_RT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE10_M_extractIyEERS2_RT_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE3getEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE3getEPwlw@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE3getERSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE3getERSt15basic_streambufIwS1_Ew@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE3getERw@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE3getEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE4peekEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE4readEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE4syncEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE5seekgESt4fposI11__mbstate_tE@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE5seekgElSt12_Ios_Seekdir@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE5tellgEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE5ungetEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE6ignoreEl@@GLIBCXX_3.4.5 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE6ignoreEl@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE6ignoreElj@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE6ignoreEv@@GLIBCXX_3.4.5 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE6ignoreEv@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE6sentryC1ERS2_b@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE6sentryC2ERS2_b@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE7getlineEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE7getlineEPwlw@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE7putbackEw@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEE8readsomeEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEEC1EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEEC1EPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEEC2EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEEC2EPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEEaSEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsEPFRS2_S3_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsEPFRSt8ios_baseS4_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsEPFRSt9basic_iosIwS1_ES5_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsEPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERPv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERb@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERd@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERe@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERf@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERi@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERj@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERm@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERs@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERt@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERx@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_istreamIwSt11char_traitsIwEErsERy@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE3putEw@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE5flushEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE5seekpESt4fposI11__mbstate_tE@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE5seekpElSt12_Ios_Seekdir@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE5tellpEv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE5writeEPKwl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE6sentryC1ERS2_@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE6sentryC2ERS2_@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE6sentryD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE6sentryD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE8_M_writeEPKwl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE9_M_insertIPKvEERS2_T_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE9_M_insertIbEERS2_T_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE9_M_insertIdEERS2_T_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE9_M_insertIeEERS2_T_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE9_M_insertIgEERS2_T_@@GLIBCXX_LDBL_3.4.7 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE9_M_insertIlEERS2_T_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE9_M_insertImEERS2_T_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE9_M_insertIxEERS2_T_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEE9_M_insertIyEERS2_T_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEEC1EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEEC1EPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEEC1ERSt14basic_iostreamIwS1_E@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEEC2EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEEC2EPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEEC2ERSt14basic_iostreamIwS1_E@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEEaSEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEDn@@GLIBCXX_3.4.26 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEPFRS2_S3_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEPFRSt8ios_baseS4_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEPFRSt9basic_iosIwS1_ES5_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEPKv@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEb@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEd@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEe@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEf@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEg@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEi@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEj@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEl@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEm@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEs@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEt@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEx@@GLIBCXX_3.4 ++FUNC:_ZNSt13basic_ostreamIwSt11char_traitsIwEElsEy@@GLIBCXX_3.4 ++FUNC:_ZNSt13random_device14_M_init_pretr1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13random_device14_M_init_pretr1ERKSs@@GLIBCXX_3.4.18 ++FUNC:_ZNSt13random_device16_M_getval_pretr1Ev@@GLIBCXX_3.4.18 ++FUNC:_ZNSt13random_device7_M_finiEv@@GLIBCXX_3.4.18 ++FUNC:_ZNSt13random_device7_M_initERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13random_device7_M_initERKSs@@GLIBCXX_3.4.18 ++FUNC:_ZNSt13random_device9_M_getvalEv@@GLIBCXX_3.4.18 ++FUNC:_ZNSt13runtime_errorC1EOS_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt13runtime_errorC1EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13runtime_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13runtime_errorC1ERKS_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13runtime_errorC1ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt13runtime_errorC2EOS_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt13runtime_errorC2EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13runtime_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13runtime_errorC2ERKS_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt13runtime_errorC2ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt13runtime_errorD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13runtime_errorD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13runtime_errorD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt13runtime_erroraSEOS_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt13runtime_erroraSERKS_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEE4openERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEE4openERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEE5closeEv@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEE7is_openEv@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1ERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEEC2EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEEC2EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEEC2ERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEEC2ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIcSt11char_traitsIcEEaSEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEE4openEPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEE4openERKNSt7__cxx1112basic_stringIcS0_IcESaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEE4openERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEE5closeEv@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEE7is_openEv@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEEC1EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEEC1EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEEC1ERKNSt7__cxx1112basic_stringIcS0_IcESaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEEC1ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEEC2EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEEC2EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEEC2ERKNSt7__cxx1112basic_stringIcS0_IcESaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEEC2ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ifstreamIwSt11char_traitsIwEEaSEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_iostreamIwSt11char_traitsIwEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_iostreamIwSt11char_traitsIwEEC1EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_iostreamIwSt11char_traitsIwEEC1EPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_iostreamIwSt11char_traitsIwEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_iostreamIwSt11char_traitsIwEEC2EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_iostreamIwSt11char_traitsIwEEC2EPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_iostreamIwSt11char_traitsIwEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_iostreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_iostreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_iostreamIwSt11char_traitsIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_iostreamIwSt11char_traitsIwEEaSEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEE4openEPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEE4openERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEE4openERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEE5closeEv@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEE7is_openEv@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1ERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEEC2EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEEC2EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEEC2ERKNSt7__cxx1112basic_stringIcS1_SaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEEC2ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIcSt11char_traitsIcEEaSEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEE4openEPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEE4openERKNSt7__cxx1112basic_stringIcS0_IcESaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEE4openERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEE5closeEv@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEE7is_openEv@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEEC1EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEEC1EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEEC1ERKNSt7__cxx1112basic_stringIcS0_IcESaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEEC1ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEEC2EOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEEC2EPKcSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEEC2ERKNSt7__cxx1112basic_stringIcS0_IcESaIcEEESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEEC2ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4.13 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14basic_ofstreamIwSt11char_traitsIwEEaSEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14codecvt_bynameIcc11__mbstate_tEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt14codecvt_bynameIcc11__mbstate_tEC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14codecvt_bynameIcc11__mbstate_tEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14codecvt_bynameIcc11__mbstate_tEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt14codecvt_bynameIcc11__mbstate_tEC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14codecvt_bynameIcc11__mbstate_tEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14codecvt_bynameIcc11__mbstate_tED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14codecvt_bynameIcc11__mbstate_tED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14codecvt_bynameIcc11__mbstate_tED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14codecvt_bynameIwc11__mbstate_tEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt14codecvt_bynameIwc11__mbstate_tEC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14codecvt_bynameIwc11__mbstate_tEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14codecvt_bynameIwc11__mbstate_tEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt14codecvt_bynameIwc11__mbstate_tEC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14codecvt_bynameIwc11__mbstate_tEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14codecvt_bynameIwc11__mbstate_tED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14codecvt_bynameIwc11__mbstate_tED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14codecvt_bynameIwc11__mbstate_tED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14collate_bynameIcEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt14collate_bynameIcEC1ERKSsm@@GLIBCXX_3.4.26 ++FUNC:_ZNSt14collate_bynameIcEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt14collate_bynameIcEC2ERKSsm@@GLIBCXX_3.4.26 ++FUNC:_ZNSt14collate_bynameIcED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14collate_bynameIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14collate_bynameIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14collate_bynameIwEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt14collate_bynameIwEC1ERKSsm@@GLIBCXX_3.4.26 ++FUNC:_ZNSt14collate_bynameIwEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt14collate_bynameIwEC2ERKSsm@@GLIBCXX_3.4.26 ++FUNC:_ZNSt14collate_bynameIwED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14collate_bynameIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14collate_bynameIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14error_categoryC1Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt14error_categoryC2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt14error_categoryD0Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt14error_categoryD1Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt14error_categoryD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt14overflow_errorC1EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14overflow_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14overflow_errorC1ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt14overflow_errorC2EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14overflow_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt14overflow_errorC2ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt14overflow_errorD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14overflow_errorD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt14overflow_errorD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt15_List_node_base10_M_reverseEv@@GLIBCXX_3.4.14 ++FUNC:_ZNSt15_List_node_base11_M_transferEPS_S0_@@GLIBCXX_3.4.14 ++FUNC:_ZNSt15_List_node_base4hookEPS_@@GLIBCXX_3.4 ++FUNC:_ZNSt15_List_node_base4swapERS_S0_@@GLIBCXX_3.4 ++FUNC:_ZNSt15_List_node_base6unhookEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15_List_node_base7_M_hookEPS_@@GLIBCXX_3.4.14 ++FUNC:_ZNSt15_List_node_base7reverseEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15_List_node_base8transferEPS_S0_@@GLIBCXX_3.4 ++FUNC:_ZNSt15_List_node_base9_M_unhookEv@@GLIBCXX_3.4.14 ++FUNC:_ZNSt15__exception_ptr13exception_ptr4swapERS0_@@CXXABI_1.3.3 ++FUNC:_ZNSt15__exception_ptr13exception_ptrC1EMS0_FvvE@@CXXABI_1.3.3 ++FUNC:_ZNSt15__exception_ptr13exception_ptrC1EPv@@CXXABI_1.3.11 ++FUNC:_ZNSt15__exception_ptr13exception_ptrC1ERKS0_@@CXXABI_1.3.3 ++FUNC:_ZNSt15__exception_ptr13exception_ptrC1Ev@@CXXABI_1.3.3 ++FUNC:_ZNSt15__exception_ptr13exception_ptrC2EMS0_FvvE@@CXXABI_1.3.3 ++FUNC:_ZNSt15__exception_ptr13exception_ptrC2ERKS0_@@CXXABI_1.3.3 ++FUNC:_ZNSt15__exception_ptr13exception_ptrC2Ev@@CXXABI_1.3.3 ++FUNC:_ZNSt15__exception_ptr13exception_ptrD1Ev@@CXXABI_1.3.3 ++FUNC:_ZNSt15__exception_ptr13exception_ptrD2Ev@@CXXABI_1.3.3 ++FUNC:_ZNSt15__exception_ptr13exception_ptraSERKS0_@@CXXABI_1.3.3 ++FUNC:_ZNSt15__exception_ptreqERKNS_13exception_ptrES2_@@CXXABI_1.3.3 ++FUNC:_ZNSt15__exception_ptrneERKNS_13exception_ptrES2_@@CXXABI_1.3.3 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE10pubseekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE10pubseekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE12__safe_gbumpEl@@GLIBCXX_3.4.16 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE12__safe_pbumpEl@@GLIBCXX_3.4.16 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE4setgEPcS3_S3_@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE4setpEPcS3_@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE4syncEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE5gbumpEi@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE5imbueERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE5pbumpEi@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE5sgetcEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE5sgetnEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE5sputcEc@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE5sputnEPKcl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE5uflowEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE6sbumpcEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE6setbufEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE6snextcEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE6stosscEv@@GLIBCXX_3.4.10 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE6xsgetnEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE6xsputnEPKcl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE7pubsyncEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE7seekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE7seekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE7sungetcEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE8in_availEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE8overflowEi@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE8pubimbueERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE9pbackfailEi@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE9pubsetbufEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE9showmanycEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE9sputbackcEc@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEE9underflowEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEEC1ERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEEC2ERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIcSt11char_traitsIcEEaSERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE10pubseekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE10pubseekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE12__safe_gbumpEl@@GLIBCXX_3.4.16 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE12__safe_pbumpEl@@GLIBCXX_3.4.16 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE4setgEPwS3_S3_@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE4setpEPwS3_@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE4syncEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE5gbumpEi@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE5imbueERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE5pbumpEi@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE5sgetcEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE5sgetnEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE5sputcEw@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE5sputnEPKwl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE5uflowEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE6sbumpcEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE6setbufEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE6snextcEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE6stosscEv@@GLIBCXX_3.4.10 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE6xsgetnEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE6xsputnEPKwl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE7pubsyncEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE7seekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE7seekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE7sungetcEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE8in_availEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE8overflowEj@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE8pubimbueERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE9pbackfailEj@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE9pubsetbufEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE9showmanycEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE9sputbackcEw@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEE9underflowEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEEC1ERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEEC2ERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_streambufIwSt11char_traitsIwEEaSERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE15_M_update_egptrEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE17_M_stringbuf_initESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE3strERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE4swapERS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE6setbufEPcl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE7_M_syncEPcmm@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE7seekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE7seekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE8_M_pbumpEPcS4_l@@GLIBCXX_3.4.16 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE8overflowEi@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE9pbackfailEi@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE9showmanycEv@@GLIBCXX_3.4.6 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEE9underflowEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEEC1EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEEC1ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEEC2EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEEC2ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIcSt11char_traitsIcESaIcEEaSEOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE15_M_update_egptrEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE17_M_stringbuf_initESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE3strERKSbIwS1_S2_E@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE4swapERS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE6setbufEPwl@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE7_M_syncEPwmm@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE7seekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE7seekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE8_M_pbumpEPwS4_l@@GLIBCXX_3.4.16 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE8overflowEj@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE9pbackfailEj@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE9showmanycEv@@GLIBCXX_3.4.6 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEE9underflowEv@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEEC1EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEEC1ERKSbIwS1_S2_ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEEC2EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEEC2ERKSbIwS1_S2_ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15basic_stringbufIwSt11char_traitsIwESaIwEEaSEOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15messages_bynameIcEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15messages_bynameIcEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15messages_bynameIcEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15messages_bynameIcEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15messages_bynameIcED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15messages_bynameIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15messages_bynameIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15messages_bynameIwEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15messages_bynameIwEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15messages_bynameIwEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15messages_bynameIwEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15messages_bynameIwED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15messages_bynameIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15messages_bynameIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15numpunct_bynameIcEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15numpunct_bynameIcEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15numpunct_bynameIcEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15numpunct_bynameIcEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15numpunct_bynameIcED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15numpunct_bynameIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15numpunct_bynameIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15numpunct_bynameIwEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15numpunct_bynameIwEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15numpunct_bynameIwEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15numpunct_bynameIwEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15numpunct_bynameIwED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15numpunct_bynameIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15numpunct_bynameIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC1ERKNSt7__cxx1112basic_stringIcS2_SaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC2ERKNSt7__cxx1112basic_stringIcS2_SaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_put_bynameIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_put_bynameIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15time_put_bynameIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_put_bynameIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15time_put_bynameIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_put_bynameIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15time_put_bynameIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15underflow_errorC1EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15underflow_errorC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15underflow_errorC1ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt15underflow_errorC2EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15underflow_errorC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt15underflow_errorC2ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt15underflow_errorD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15underflow_errorD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt15underflow_errorD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt16__numpunct_cacheIcE8_M_cacheERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt16__numpunct_cacheIcEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt16__numpunct_cacheIcEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt16__numpunct_cacheIcED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt16__numpunct_cacheIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt16__numpunct_cacheIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt16__numpunct_cacheIwE8_M_cacheERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt16__numpunct_cacheIwEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt16__numpunct_cacheIwEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt16__numpunct_cacheIwED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt16__numpunct_cacheIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt16__numpunct_cacheIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt16bad_array_lengthD0Ev@@CXXABI_1.3.8 ++FUNC:_ZNSt16bad_array_lengthD1Ev@@CXXABI_1.3.8 ++FUNC:_ZNSt16bad_array_lengthD2Ev@@CXXABI_1.3.8 ++FUNC:_ZNSt16invalid_argumentC1EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt16invalid_argumentC1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt16invalid_argumentC1ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt16invalid_argumentC2EPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt16invalid_argumentC2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt16invalid_argumentC2ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt16invalid_argumentD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt16invalid_argumentD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt16invalid_argumentD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt16nested_exceptionD0Ev@@CXXABI_1.3.5 ++FUNC:_ZNSt16nested_exceptionD1Ev@@CXXABI_1.3.5 ++FUNC:_ZNSt16nested_exceptionD2Ev@@CXXABI_1.3.5 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_LDBL_3.4 ++FUNC:_ZNSt17__timepunct_cacheIcEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt17__timepunct_cacheIcEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt17__timepunct_cacheIcED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17__timepunct_cacheIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17__timepunct_cacheIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17__timepunct_cacheIwEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt17__timepunct_cacheIwEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt17__timepunct_cacheIwED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17__timepunct_cacheIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17__timepunct_cacheIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17bad_function_callD0Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt17bad_function_callD1Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt17bad_function_callD2Ev@@GLIBCXX_3.4.15 ++FUNC:_ZNSt17moneypunct_bynameIcLb0EEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIcLb0EEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt17moneypunct_bynameIcLb0EEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIcLb0EEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt17moneypunct_bynameIcLb0EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIcLb0EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIcLb0EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIcLb1EEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIcLb1EEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt17moneypunct_bynameIcLb1EEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIcLb1EEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt17moneypunct_bynameIcLb1EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIcLb1EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIcLb1EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIwLb0EEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIwLb0EEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt17moneypunct_bynameIwLb0EEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIwLb0EEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt17moneypunct_bynameIwLb0EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIwLb0EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIwLb0EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIwLb1EEC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIwLb1EEC1ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt17moneypunct_bynameIwLb1EEC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIwLb1EEC2ERKSsm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt17moneypunct_bynameIwLb1EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIwLb1EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt17moneypunct_bynameIwLb1EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb0EE8_M_cacheERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb0EEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb0EEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb0EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb0EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb0EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb1EE8_M_cacheERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb1EEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb1EEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb1EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb1EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIcLb1EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb0EE8_M_cacheERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb0EEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb0EEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb0EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb0EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb0EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb1EE8_M_cacheERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb1EEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb1EEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb1EED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb1EED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18__moneypunct_cacheIwLb1EED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEE3strERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEE4swapERS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEEC1EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEEC1ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEEC2EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEEC2ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIcSt11char_traitsIcESaIcEEaSEOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEE3strERKSbIwS1_S2_E@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEE4swapERS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEEC1EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEEC1ERKSbIwS1_S2_ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEEC2EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEEC2ERKSbIwS1_S2_ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt18basic_stringstreamIwSt11char_traitsIwESaIwEEaSEOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt18condition_variable10notify_allEv@@GLIBCXX_3.4.11 ++FUNC:_ZNSt18condition_variable10notify_oneEv@@GLIBCXX_3.4.11 ++FUNC:_ZNSt18condition_variable4waitERSt11unique_lockISt5mutexE@@GLIBCXX_3.4.11 ++FUNC:_ZNSt18condition_variableC1Ev@@GLIBCXX_3.4.11 ++FUNC:_ZNSt18condition_variableC2Ev@@GLIBCXX_3.4.11 ++FUNC:_ZNSt18condition_variableD1Ev@@GLIBCXX_3.4.11 ++FUNC:_ZNSt18condition_variableD2Ev@@GLIBCXX_3.4.11 ++FUNC:_ZNSt19_Sp_make_shared_tag5_S_eqERKSt9type_info@@GLIBCXX_3.4.26 ++FUNC:_ZNSt19__codecvt_utf8_baseIDiED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19__codecvt_utf8_baseIDiED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19__codecvt_utf8_baseIDiED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19__codecvt_utf8_baseIDsED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19__codecvt_utf8_baseIDsED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19__codecvt_utf8_baseIDsED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19__codecvt_utf8_baseIwED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19__codecvt_utf8_baseIwED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19__codecvt_utf8_baseIwED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEE3strERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEE4swapERS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEEC1EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEEC1ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEEC2EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEEC2ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIcSt11char_traitsIcESaIcEEaSEOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEE3strERKSbIwS1_S2_E@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEE4swapERS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEEC1EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEEC1ERKSbIwS1_S2_ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEEC2EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEEC2ERKSbIwS1_S2_ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_istringstreamIwSt11char_traitsIwESaIwEEaSEOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE3strERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE4swapERS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEEC1EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEEC1ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEEC2EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEEC2ERKSsSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIcSt11char_traitsIcESaIcEEaSEOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEE3strERKSbIwS1_S2_E@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEE4swapERS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEEC1EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEEC1ERKSbIwS1_S2_ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEEC2EOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEEC2ERKSbIwS1_S2_ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt19basic_ostringstreamIwSt11char_traitsIwESaIwEEaSEOS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt19istreambuf_iteratorIcSt11char_traitsIcEEppEv@@GLIBCXX_3.4.5 ++FUNC:_ZNSt19istreambuf_iteratorIcSt11char_traitsIcEEppEv@GLIBCXX_3.4 ++FUNC:_ZNSt19istreambuf_iteratorIwSt11char_traitsIwEEppEv@@GLIBCXX_3.4.5 ++FUNC:_ZNSt19istreambuf_iteratorIwSt11char_traitsIwEEppEv@GLIBCXX_3.4 ++FUNC:_ZNSt20__codecvt_utf16_baseIDiED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt20__codecvt_utf16_baseIDiED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt20__codecvt_utf16_baseIDiED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt20__codecvt_utf16_baseIDsED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt20__codecvt_utf16_baseIDsED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt20__codecvt_utf16_baseIDsED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt20__codecvt_utf16_baseIwED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt20__codecvt_utf16_baseIwED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt20__codecvt_utf16_baseIwED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt20bad_array_new_lengthD0Ev@@CXXABI_1.3.8 ++FUNC:_ZNSt20bad_array_new_lengthD1Ev@@CXXABI_1.3.8 ++FUNC:_ZNSt20bad_array_new_lengthD2Ev@@CXXABI_1.3.8 ++FUNC:_ZNSt22condition_variable_anyC1Ev@@GLIBCXX_3.4.11 ++FUNC:_ZNSt22condition_variable_anyC2Ev@@GLIBCXX_3.4.11 ++FUNC:_ZNSt22condition_variable_anyD1Ev@@GLIBCXX_3.4.11 ++FUNC:_ZNSt22condition_variable_anyD2Ev@@GLIBCXX_3.4.11 ++FUNC:_ZNSt25__codecvt_utf8_utf16_baseIDiED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt25__codecvt_utf8_utf16_baseIDiED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt25__codecvt_utf8_utf16_baseIDiED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt25__codecvt_utf8_utf16_baseIDsED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt25__codecvt_utf8_utf16_baseIDsED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt25__codecvt_utf8_utf16_baseIDsED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt25__codecvt_utf8_utf16_baseIwED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt25__codecvt_utf8_utf16_baseIwED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt25__codecvt_utf8_utf16_baseIwED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt28__atomic_futex_unsigned_base19_M_futex_notify_allEPj@@GLIBCXX_3.4.21 ++FUNC:_ZNSt28__atomic_futex_unsigned_base19_M_futex_wait_untilEPjjbNSt6chrono8durationIlSt5ratioILl1ELl1EEEENS2_IlS3_ILl1ELl1000000000EEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt3_V214error_categoryD0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt3_V214error_categoryD1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt3_V214error_categoryD2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt3_V215system_categoryEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt3_V216generic_categoryEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt3pmr19new_delete_resourceEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr20get_default_resourceEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr20null_memory_resourceEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr20set_default_resourceEPNS_15memory_resourceE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr25monotonic_buffer_resource13_M_new_bufferEmm@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr25monotonic_buffer_resource18_M_release_buffersEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr26synchronized_pool_resource11do_allocateEmm@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr26synchronized_pool_resource13do_deallocateEPvmm@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr26synchronized_pool_resource7releaseEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr26synchronized_pool_resourceC1ERKNS_12pool_optionsEPNS_15memory_resourceE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr26synchronized_pool_resourceC2ERKNS_12pool_optionsEPNS_15memory_resourceE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr26synchronized_pool_resourceD1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr26synchronized_pool_resourceD2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr28unsynchronized_pool_resource11do_allocateEmm@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr28unsynchronized_pool_resource13do_deallocateEPvmm@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr28unsynchronized_pool_resource7releaseEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr28unsynchronized_pool_resourceC1ERKNS_12pool_optionsEPNS_15memory_resourceE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr28unsynchronized_pool_resourceC2ERKNS_12pool_optionsEPNS_15memory_resourceE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr28unsynchronized_pool_resourceD1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt3pmr28unsynchronized_pool_resourceD2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt5ctypeIcE13classic_tableEv@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIcEC1EP15__locale_structPKtbm@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIcEC1EPKtbm@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIcEC2EP15__locale_structPKtbm@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIcEC2EPKtbm@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIcED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIwE19_M_initialize_ctypeEv@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIwEC1EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIwEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIwEC2EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIwEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIwED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt5ctypeIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt6__norm15_List_node_base10_M_reverseEv@@GLIBCXX_3.4.14 ++FUNC:_ZNSt6__norm15_List_node_base11_M_transferEPS0_S1_@@GLIBCXX_3.4.14 ++FUNC:_ZNSt6__norm15_List_node_base4hookEPS0_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt6__norm15_List_node_base4swapERS0_S1_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt6__norm15_List_node_base6unhookEv@@GLIBCXX_3.4.9 ++FUNC:_ZNSt6__norm15_List_node_base7_M_hookEPS0_@@GLIBCXX_3.4.14 ++FUNC:_ZNSt6__norm15_List_node_base7reverseEv@@GLIBCXX_3.4.9 ++FUNC:_ZNSt6__norm15_List_node_base8transferEPS0_S1_@@GLIBCXX_3.4.9 ++FUNC:_ZNSt6__norm15_List_node_base9_M_unhookEv@@GLIBCXX_3.4.14 ++FUNC:_ZNSt6chrono12system_clock3nowEv@@GLIBCXX_3.4.11 ++FUNC:_ZNSt6chrono3_V212steady_clock3nowEv@@GLIBCXX_3.4.19 ++FUNC:_ZNSt6chrono3_V212system_clock3nowEv@@GLIBCXX_3.4.19 ++FUNC:_ZNSt6gslice8_IndexerC1EmRKSt8valarrayImES4_@@GLIBCXX_3.4 ++FUNC:_ZNSt6gslice8_IndexerC2EmRKSt8valarrayImES4_@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale11_M_coalesceERKS_S1_i@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale21_S_normalize_categoryEi@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_Impl16_M_install_cacheEPKNS_5facetEm@@GLIBCXX_3.4.7 ++FUNC:_ZNSt6locale5_Impl16_M_install_facetEPKNS_2idEPKNS_5facetE@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_Impl16_M_replace_facetEPKS0_PKNS_2idE@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_Impl19_M_replace_categoryEPKS0_PKPKNS_2idE@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_Impl21_M_replace_categoriesEPKS0_i@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_ImplC1EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_ImplC1ERKS0_m@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_ImplC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_ImplC2EPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_ImplC2ERKS0_m@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_ImplC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_ImplD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5_ImplD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5facet13_S_get_c_nameEv@@GLIBCXX_3.4.6 ++FUNC:_ZNSt6locale5facet15_S_get_c_localeEv@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5facet17_S_clone_c_localeERP15__locale_struct@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5facet18_S_create_c_localeERP15__locale_structPKcS2_@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5facet19_S_destroy_c_localeERP15__locale_struct@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5facetD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5facetD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale5facetD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale6globalERKS_@@GLIBCXX_3.4 ++FUNC:_ZNSt6locale7classicEv@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC1EPKc@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC1EPNS_5_ImplE@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC1ERKS_@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC1ERKS_PKci@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC1ERKS_S1_i@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC2EPKc@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC2EPNS_5_ImplE@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC2ERKS_@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC2ERKS_PKci@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC2ERKS_S1_i@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt6localeaSERKS_@@GLIBCXX_3.4 ++FUNC:_ZNSt6thread15_M_start_threadESt10shared_ptrINS_10_Impl_baseEE@@GLIBCXX_3.4.11 ++FUNC:_ZNSt6thread15_M_start_threadESt10shared_ptrINS_10_Impl_baseEEPFvvE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt6thread15_M_start_threadESt10unique_ptrINS_6_StateESt14default_deleteIS1_EEPFvvE@@GLIBCXX_3.4.22 ++FUNC:_ZNSt6thread20hardware_concurrencyEv@@GLIBCXX_3.4.17 ++FUNC:_ZNSt6thread4joinEv@@GLIBCXX_3.4.11 ++FUNC:_ZNSt6thread6_StateD0Ev@@GLIBCXX_3.4.22 ++FUNC:_ZNSt6thread6_StateD1Ev@@GLIBCXX_3.4.22 ++FUNC:_ZNSt6thread6_StateD2Ev@@GLIBCXX_3.4.22 ++FUNC:_ZNSt6thread6detachEv@@GLIBCXX_3.4.11 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb0EE24_M_initialize_moneypunctEP15__locale_structPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb0EEC1EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb0EEC1EPSt18__moneypunct_cacheIcLb0EEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb0EEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb0EEC2EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb0EEC2EPSt18__moneypunct_cacheIcLb0EEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb0EEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb0EED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb0EED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb0EED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb1EE24_M_initialize_moneypunctEP15__locale_structPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb1EEC1EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb1EEC1EPSt18__moneypunct_cacheIcLb1EEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb1EEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb1EEC2EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb1EEC2EPSt18__moneypunct_cacheIcLb1EEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb1EEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb1EED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb1EED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIcLb1EED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb0EE24_M_initialize_moneypunctEP15__locale_structPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb0EEC1EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb0EEC1EPSt18__moneypunct_cacheIwLb0EEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb0EEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb0EEC2EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb0EEC2EPSt18__moneypunct_cacheIwLb0EEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb0EEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb0EED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb0EED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb0EED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb1EE24_M_initialize_moneypunctEP15__locale_structPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb1EEC1EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb1EEC1EPSt18__moneypunct_cacheIwLb1EEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb1EEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb1EEC2EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb1EEC2EPSt18__moneypunct_cacheIwLb1EEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb1EEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb1EED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb1EED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1110moneypunctIwLb1EED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_destroyEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_disposeEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_M_replaceEmmPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE10_S_compareEmm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE11_M_capacityEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_Alloc_hiderC1EPcOS3_@@GLIBCXX_3.4.23 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_Alloc_hiderC1EPcRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_Alloc_hiderC2EPcOS3_@@GLIBCXX_3.4.23 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_Alloc_hiderC2EPcRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructEmc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIN9__gnu_cxx17__normal_iteratorIPKcS4_EEEEvT_SB_St20forward_iterator_tag@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIN9__gnu_cxx17__normal_iteratorIPcS4_EEEEvT_SA_St20forward_iterator_tag@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPKcEEvT_S8_St20forward_iterator_tag@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12_M_constructIPcEEvT_S7_St20forward_iterator_tag@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12__sv_wrapperC1ESt17basic_string_viewIcS2_E@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE12__sv_wrapperC2ESt17basic_string_viewIcS2_E@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13_M_local_dataEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13_M_set_lengthEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13_S_copy_charsEPcN9__gnu_cxx17__normal_iteratorIPKcS4_EESA_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13_S_copy_charsEPcN9__gnu_cxx17__normal_iteratorIS5_S4_EES8_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13_S_copy_charsEPcPKcS7_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13_S_copy_charsEPcS5_S5_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE13shrink_to_fitEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE14_M_replace_auxEmmmc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE16_M_get_allocatorEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE17_S_to_string_viewESt17basic_string_viewIcS2_E@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE18_M_construct_aux_2Emc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE2atEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE3endEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4backEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4dataEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4rendEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4swapERS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5beginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5clearEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5eraseEN9__gnu_cxx17__normal_iteratorIPKcS4_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5eraseEN9__gnu_cxx17__normal_iteratorIPKcS4_EES9_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5eraseEN9__gnu_cxx17__normal_iteratorIPcS4_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5eraseEN9__gnu_cxx17__normal_iteratorIPcS4_EES8_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5eraseEmm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE5frontEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6appendEPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6appendEPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6appendERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6appendERKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6appendESt16initializer_listIcE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6appendEmc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6assignEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6assignEPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6assignEPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6assignERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6assignERKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6assignESt16initializer_listIcE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6assignEmc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertEN9__gnu_cxx17__normal_iteratorIPKcS4_EESt16initializer_listIcE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertEN9__gnu_cxx17__normal_iteratorIPKcS4_EEc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertEN9__gnu_cxx17__normal_iteratorIPKcS4_EEmc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertEN9__gnu_cxx17__normal_iteratorIPcS4_EESt16initializer_listIcE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertEN9__gnu_cxx17__normal_iteratorIPcS4_EEc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertEN9__gnu_cxx17__normal_iteratorIPcS4_EEmc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertEmPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertEmPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertEmRKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertEmRKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertEmmc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6insertIN9__gnu_cxx17__normal_iteratorIPcS4_EEEEvS9_T_SA_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6rbeginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6resizeEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE6resizeEmc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7_M_dataEPc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7_S_copyEPcPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7_S_moveEPcPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPKcS4_EES9_NS6_IPcS4_EESB_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPKcS4_EES9_PcSA_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPKcS4_EES9_RKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPKcS4_EES9_S8_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPKcS4_EES9_S8_S8_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPKcS4_EES9_S8_m@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPKcS4_EES9_S9_S9_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPKcS4_EES9_St16initializer_listIcE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPKcS4_EES9_mc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPcS4_EES8_NS6_IPKcS4_EESB_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPcS4_EES8_PKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPcS4_EES8_PKcSA_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPcS4_EES8_PKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPcS4_EES8_RKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPcS4_EES8_S7_S7_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPcS4_EES8_S8_S8_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEN9__gnu_cxx17__normal_iteratorIPcS4_EES8_mc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEmmPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEmmPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEmmRKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEmmRKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7replaceEmmmc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7reserveEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE8_M_eraseEmm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE8pop_backEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_M_appendEPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_M_assignERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_M_createERmm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_M_lengthEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_M_mutateEmmPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9_S_assignEPcmc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE9push_backEc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1ENS4_12__sv_wrapperERKS3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1EOS4_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1EPKcRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1EPKcmRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1ERKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1ERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1ERKS4_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1ERKS4_mRKS3_@@GLIBCXX_3.4.23 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1ERKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1ERKS4_mmRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1ESt16initializer_listIcERKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1EmcRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IN9__gnu_cxx17__normal_iteratorIPcS4_EEvEET_SA_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IPKcvEET_S8_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC1IPcvEET_S7_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2ENS4_12__sv_wrapperERKS3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2EOS4_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2EPKcRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2EPKcmRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2ERKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2ERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2ERKS4_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2ERKS4_mRKS3_@@GLIBCXX_3.4.23 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2ERKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2ERKS4_mmRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2ESt16initializer_listIcERKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2EmcRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IN9__gnu_cxx17__normal_iteratorIPcS4_EEvEET_SA_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IPKcvEET_S8_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEC2IPcvEET_S7_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEaSEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEaSEPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEaSERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEaSESt16initializer_listIcE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEaSEc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEixEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEpLEPKc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEpLERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEpLESt16initializer_listIcE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEpLEc@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE10_M_destroyEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE10_M_disposeEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE10_M_replaceEmmPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE10_S_compareEmm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE11_M_capacityEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12_Alloc_hiderC1EPwOS3_@@GLIBCXX_3.4.23 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12_Alloc_hiderC1EPwRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12_Alloc_hiderC2EPwOS3_@@GLIBCXX_3.4.23 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12_Alloc_hiderC2EPwRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12_M_constructEmw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12_M_constructIN9__gnu_cxx17__normal_iteratorIPKwS4_EEEEvT_SB_St20forward_iterator_tag@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12_M_constructIN9__gnu_cxx17__normal_iteratorIPwS4_EEEEvT_SA_St20forward_iterator_tag@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12_M_constructIPKwEEvT_S8_St20forward_iterator_tag@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12_M_constructIPwEEvT_S7_St20forward_iterator_tag@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12__sv_wrapperC1ESt17basic_string_viewIwS2_E@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE12__sv_wrapperC2ESt17basic_string_viewIwS2_E@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13_M_local_dataEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13_M_set_lengthEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13_S_copy_charsEPwN9__gnu_cxx17__normal_iteratorIPKwS4_EESA_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13_S_copy_charsEPwN9__gnu_cxx17__normal_iteratorIS5_S4_EES8_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13_S_copy_charsEPwPKwS7_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13_S_copy_charsEPwS5_S5_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE13shrink_to_fitEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE14_M_replace_auxEmmmw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE16_M_get_allocatorEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE17_S_to_string_viewESt17basic_string_viewIwS2_E@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE18_M_construct_aux_2Emw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE2atEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE3endEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4backEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4dataEv@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4rendEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4swapERS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5beginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5clearEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5eraseEN9__gnu_cxx17__normal_iteratorIPKwS4_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5eraseEN9__gnu_cxx17__normal_iteratorIPKwS4_EES9_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5eraseEN9__gnu_cxx17__normal_iteratorIPwS4_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5eraseEN9__gnu_cxx17__normal_iteratorIPwS4_EES8_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5eraseEmm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE5frontEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6appendEPKw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6appendEPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6appendERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6appendERKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6appendESt16initializer_listIwE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6appendEmw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6assignEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6assignEPKw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6assignEPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6assignERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6assignERKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6assignESt16initializer_listIwE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6assignEmw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertEN9__gnu_cxx17__normal_iteratorIPKwS4_EESt16initializer_listIwE@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertEN9__gnu_cxx17__normal_iteratorIPKwS4_EEmw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertEN9__gnu_cxx17__normal_iteratorIPKwS4_EEw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertEN9__gnu_cxx17__normal_iteratorIPwS4_EESt16initializer_listIwE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertEN9__gnu_cxx17__normal_iteratorIPwS4_EEmw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertEN9__gnu_cxx17__normal_iteratorIPwS4_EEw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertEmPKw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertEmPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertEmRKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertEmRKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertEmmw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6insertIN9__gnu_cxx17__normal_iteratorIPwS4_EEEEvS9_T_SA_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6rbeginEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6resizeEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE6resizeEmw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7_M_dataEPw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7_S_copyEPwPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7_S_moveEPwPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPKwS4_EES9_NS6_IPwS4_EESB_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPKwS4_EES9_PwSA_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPKwS4_EES9_RKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPKwS4_EES9_S8_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPKwS4_EES9_S8_S8_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPKwS4_EES9_S8_m@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPKwS4_EES9_S9_S9_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPKwS4_EES9_St16initializer_listIwE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPKwS4_EES9_mw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS4_EES8_NS6_IPKwS4_EESB_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS4_EES8_PKw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS4_EES8_PKwSA_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS4_EES8_PKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS4_EES8_RKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS4_EES8_S7_S7_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS4_EES8_S8_S8_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEN9__gnu_cxx17__normal_iteratorIPwS4_EES8_mw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEmmPKw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEmmPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEmmRKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEmmRKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7replaceEmmmw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE7reserveEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE8_M_eraseEmm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE8pop_backEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE9_M_appendEPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE9_M_assignERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE9_M_createERmm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE9_M_lengthEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE9_M_mutateEmmPKwm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE9_S_assignEPwmw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE9push_backEw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1ENS4_12__sv_wrapperERKS3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1EOS4_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1EPKwRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1EPKwmRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1ERKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1ERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1ERKS4_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1ERKS4_mRKS3_@@GLIBCXX_3.4.23 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1ERKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1ERKS4_mmRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1ESt16initializer_listIwERKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1EmwRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1IN9__gnu_cxx17__normal_iteratorIPwS4_EEvEET_SA_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1IPKwvEET_S8_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC1IPwvEET_S7_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2ENS4_12__sv_wrapperERKS3_@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2EOS4_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2EPKwRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2EPKwmRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2ERKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2ERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2ERKS4_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2ERKS4_mRKS3_@@GLIBCXX_3.4.23 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2ERKS4_mm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2ERKS4_mmRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2ESt16initializer_listIwERKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2EmwRKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2IN9__gnu_cxx17__normal_iteratorIPwS4_EEvEET_SA_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2IPKwvEET_S8_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEC2IPwvEET_S7_RKS3_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEaSEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEaSEPKw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEaSERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEaSESt16initializer_listIwE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEaSEw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEixEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEpLEPKw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEpLERKS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEpLESt16initializer_listIwE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEEpLEw@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIcEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIcEC1ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIcEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIcEC2ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIcED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIcED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIcED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIwEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIwEC1ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIwEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIwEC2ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIwED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIwED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1114collate_bynameIwED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE14__xfer_bufptrsC1ERKS4_PS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE14__xfer_bufptrsC2ERKS4_PS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE14__xfer_bufptrsD1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE14__xfer_bufptrsD2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE15_M_update_egptrEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE17_M_stringbuf_initESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE3strERKNS_12basic_stringIcS2_S3_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE4swapERS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE6setbufEPcl@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE7_M_syncEPcmm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE7seekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE7seekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE8_M_pbumpEPcS5_l@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE8overflowEi@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE9pbackfailEi@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE9showmanycEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEE9underflowEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEC1EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEC1EOS4_ONS4_14__xfer_bufptrsE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEC1ERKNS_12basic_stringIcS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEC2EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEC2EOS4_ONS4_14__xfer_bufptrsE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEC2ERKNS_12basic_stringIcS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEaSEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE14__xfer_bufptrsC1ERKS4_PS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE14__xfer_bufptrsC2ERKS4_PS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE14__xfer_bufptrsD1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE14__xfer_bufptrsD2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE15_M_update_egptrEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE17_M_stringbuf_initESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE3strERKNS_12basic_stringIwS2_S3_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE4swapERS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE6setbufEPwl@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE7_M_syncEPwmm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE7seekoffElSt12_Ios_SeekdirSt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE7seekposESt4fposI11__mbstate_tESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE8_M_pbumpEPwS5_l@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE8overflowEj@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE9pbackfailEj@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE9showmanycEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEE9underflowEv@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEC1EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEC1EOS4_ONS4_14__xfer_bufptrsE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEC1ERKNS_12basic_stringIwS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEC2EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEC2EOS4_ONS4_14__xfer_bufptrsE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEC2ERKNS_12basic_stringIwS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEaSEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIcEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIcEC1ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIcEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIcEC2ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIcED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIcED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIcED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIwEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIwEC1ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIwEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIwEC2ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIwED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIwED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115messages_bynameIwED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIcEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIcEC1ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIcEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIcEC2ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIcED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIcED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIcED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIwEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIwEC1ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIwEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIwEC2ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIwED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIwED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115numpunct_bynameIwED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC1ERKNS_12basic_stringIcS3_SaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC2ERKNS_12basic_stringIcS3_SaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC1ERKNS_12basic_stringIcS2_IcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC2ERKNS_12basic_stringIcS2_IcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1115time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb0EEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb0EEC1ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb0EEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb0EEC2ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb0EED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb0EED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb0EED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb1EEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb1EEC1ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb1EEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb1EEC2ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb1EED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb1EED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIcLb1EED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb0EEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb0EEC1ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb0EEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb0EEC2ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb0EED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb0EED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb0EED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb1EEC1EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb1EEC1ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb1EEC2EPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb1EEC2ERKNS_12basic_stringIcSt11char_traitsIcESaIcEEEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb1EED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb1EED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1117moneypunct_bynameIwLb1EED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEE3strERKNS_12basic_stringIcS2_S3_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEE4swapERS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEC1EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEC1ERKNS_12basic_stringIcS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEC2EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEC2ERKNS_12basic_stringIcS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEaSEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEE3strERKNS_12basic_stringIwS2_S3_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEE4swapERS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEC1EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEC1ERKNS_12basic_stringIwS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEC2EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEC2ERKNS_12basic_stringIwS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEaSEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEE3strERKNS_12basic_stringIcS2_S3_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEE4swapERS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEC1EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEC1ERKNS_12basic_stringIcS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEC2EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEC2ERKNS_12basic_stringIcS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEaSEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEE3strERKNS_12basic_stringIwS2_S3_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEE4swapERS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEC1EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEC1ERKNS_12basic_stringIwS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEC2EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEC2ERKNS_12basic_stringIwS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEaSEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEE3strERKNS_12basic_stringIcS2_S3_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEE4swapERS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEC1EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEC1ERKNS_12basic_stringIcS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEC2EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEC2ERKNS_12basic_stringIcS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEaSEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEE3strERKNS_12basic_stringIwS2_S3_EE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEE4swapERS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEC1EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEC1ERKNS_12basic_stringIwS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEC1ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEC1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEC2EOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEC2ERKNS_12basic_stringIwS2_S3_EESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEC2ESt13_Ios_Openmode@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEC2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEaSEOS4_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIcEC1EP15__locale_structm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIcEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIcEC2EP15__locale_structm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIcEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIcED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIcED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIcED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIwEC1EP15__locale_structm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIwEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIwEC2EP15__locale_structm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIwEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIwED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIwED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx117collateIwED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIcEC1EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIcEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIcEC2EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIcEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIcED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIcED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIcED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIwEC1EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIwEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIwEC2EP15__locale_structPKcm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIwEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIwED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIwED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118messagesIwED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIcE22_M_initialize_numpunctEP15__locale_struct@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIcEC1EP15__locale_structm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIcEC1EPSt16__numpunct_cacheIcEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIcEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIcEC2EP15__locale_structm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIcEC2EPSt16__numpunct_cacheIcEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIcEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIcED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIcED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIcED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIwE22_M_initialize_numpunctEP15__locale_struct@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIwEC1EP15__locale_structm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIwEC1EPSt16__numpunct_cacheIwEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIwEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIwEC2EP15__locale_structm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIwEC2EPSt16__numpunct_cacheIwEm@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIwEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIwED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIwED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118numpunctIwED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7codecvtIDiDu11__mbstate_tED0Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7codecvtIDiDu11__mbstate_tED1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7codecvtIDiDu11__mbstate_tED2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7codecvtIDic11__mbstate_tED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7codecvtIDic11__mbstate_tED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7codecvtIDic11__mbstate_tED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7codecvtIDsDu11__mbstate_tED0Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7codecvtIDsDu11__mbstate_tED1Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7codecvtIDsDu11__mbstate_tED2Ev@@GLIBCXX_3.4.26 ++FUNC:_ZNSt7codecvtIDsc11__mbstate_tED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7codecvtIDsc11__mbstate_tED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7codecvtIDsc11__mbstate_tED2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt7codecvtIcc11__mbstate_tEC1EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIcc11__mbstate_tEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIcc11__mbstate_tEC2EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIcc11__mbstate_tEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIcc11__mbstate_tED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIcc11__mbstate_tED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIcc11__mbstate_tED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIwc11__mbstate_tEC1EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIwc11__mbstate_tEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIwc11__mbstate_tEC2EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIwc11__mbstate_tEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIwc11__mbstate_tED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIwc11__mbstate_tED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7codecvtIwc11__mbstate_tED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIcEC1EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIcEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIcEC2EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIcEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIcED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIwEC1EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIwEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIwEC2EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIwEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIwED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7collateIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8__detail15_List_node_base10_M_reverseEv@@GLIBCXX_3.4.15 ++FUNC:_ZNSt8__detail15_List_node_base11_M_transferEPS0_S1_@@GLIBCXX_3.4.15 ++FUNC:_ZNSt8__detail15_List_node_base4swapERS0_S1_@@GLIBCXX_3.4.15 ++FUNC:_ZNSt8__detail15_List_node_base7_M_hookEPS0_@@GLIBCXX_3.4.15 ++FUNC:_ZNSt8__detail15_List_node_base9_M_unhookEv@@GLIBCXX_3.4.15 ++FUNC:_ZNSt8bad_castD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8bad_castD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8bad_castD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base13_M_grow_wordsEib@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base15sync_with_stdioEb@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base17_M_call_callbacksENS_5eventE@@GLIBCXX_3.4.6 ++FUNC:_ZNSt8ios_base17register_callbackEPFvNS_5eventERS_iEi@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base20_M_dispose_callbacksEv@@GLIBCXX_3.4.6 ++FUNC:_ZNSt8ios_base4InitC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base4InitC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base4InitD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base4InitD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base5imbueERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base6xallocEv@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base7_M_initEv@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base7_M_moveERS_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt8ios_base7_M_swapERS_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt8ios_base7failureB5cxx11C1EPKcRKSt10error_code@@GLIBCXX_3.4.21 ++FUNC:_ZNSt8ios_base7failureB5cxx11C1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt8ios_base7failureB5cxx11C1ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt10error_code@@GLIBCXX_3.4.21 ++FUNC:_ZNSt8ios_base7failureB5cxx11C2EPKcRKSt10error_code@@GLIBCXX_3.4.21 ++FUNC:_ZNSt8ios_base7failureB5cxx11C2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZNSt8ios_base7failureB5cxx11C2ERKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEERKSt10error_code@@GLIBCXX_3.4.21 ++FUNC:_ZNSt8ios_base7failureB5cxx11D0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt8ios_base7failureB5cxx11D1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt8ios_base7failureB5cxx11D2Ev@@GLIBCXX_3.4.21 ++FUNC:_ZNSt8ios_base7failureC1ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base7failureC2ERKSs@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base7failureD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base7failureD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_base7failureD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_baseC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_baseC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_baseD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_baseD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8ios_baseD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIcEC1EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIcEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIcEC2EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIcEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIcED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIwEC1EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIwEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIwEC2EP15__locale_structPKcm@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIwEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIwED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8messagesIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIcE22_M_initialize_numpunctEP15__locale_struct@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIcEC1EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIcEC1EPSt16__numpunct_cacheIcEm@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIcEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIcEC2EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIcEC2EPSt16__numpunct_cacheIcEm@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIcEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIcED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIcED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIcED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIwE22_M_initialize_numpunctEP15__locale_struct@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIwEC1EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIwEC1EPSt16__numpunct_cacheIwEm@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIwEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIwEC2EP15__locale_structm@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIwEC2EPSt16__numpunct_cacheIwEm@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIwEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIwED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIwED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8numpunctIwED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8valarrayImEC1ERKS0_@@GLIBCXX_3.4 ++FUNC:_ZNSt8valarrayImEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8valarrayImEC2ERKS0_@@GLIBCXX_3.4 ++FUNC:_ZNSt8valarrayImEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt8valarrayImED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8valarrayImED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt8valarrayImEixEm@@GLIBCXX_3.4 ++FUNC:_ZNSt9__atomic011atomic_flag12test_and_setESt12memory_order@@GLIBCXX_3.4.14 ++FUNC:_ZNSt9__atomic011atomic_flag5clearESt12memory_order@@GLIBCXX_3.4.14 ++FUNC:_ZNSt9__cxx199815_List_node_base10_M_reverseEv@@GLIBCXX_3.4.14 ++FUNC:_ZNSt9__cxx199815_List_node_base11_M_transferEPS0_S1_@@GLIBCXX_3.4.14 ++FUNC:_ZNSt9__cxx199815_List_node_base4hookEPS0_@@GLIBCXX_3.4.10 ++FUNC:_ZNSt9__cxx199815_List_node_base4swapERS0_S1_@@GLIBCXX_3.4.10 ++FUNC:_ZNSt9__cxx199815_List_node_base6unhookEv@@GLIBCXX_3.4.10 ++FUNC:_ZNSt9__cxx199815_List_node_base7_M_hookEPS0_@@GLIBCXX_3.4.14 ++FUNC:_ZNSt9__cxx199815_List_node_base7reverseEv@@GLIBCXX_3.4.10 ++FUNC:_ZNSt9__cxx199815_List_node_base8transferEPS0_S1_@@GLIBCXX_3.4.10 ++FUNC:_ZNSt9__cxx199815_List_node_base9_M_unhookEv@@GLIBCXX_3.4.14 ++FUNC:_ZNSt9bad_allocD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9bad_allocD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9bad_allocD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE10exceptionsESt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE11_M_setstateESt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE15_M_cache_localeERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE3tieEPSo@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE4fillEc@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE4initEPSt15basic_streambufIcS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE4moveEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE4moveERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE5clearESt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE5imbueERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE5rdbufEPSt15basic_streambufIcS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE7copyfmtERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE8setstateESt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEE9set_rdbufEPSt15basic_streambufIcS1_E@@GLIBCXX_3.4.21 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEEC1EPSt15basic_streambufIcS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEEC2EPSt15basic_streambufIcS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIcSt11char_traitsIcEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE10exceptionsESt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE11_M_setstateESt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE15_M_cache_localeERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE3tieEPSt13basic_ostreamIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE4fillEw@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE4initEPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE4moveEOS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE4moveERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE4swapERS2_@@GLIBCXX_3.4.21 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE5clearESt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE5imbueERKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE5rdbufEPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE7copyfmtERKS2_@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE8setstateESt12_Ios_Iostate@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEE9set_rdbufEPSt15basic_streambufIwS1_E@@GLIBCXX_3.4.21 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEEC1EPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEEC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEEC2EPSt15basic_streambufIwS1_E@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEEC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9basic_iosIwSt11char_traitsIwEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9exceptionD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9exceptionD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9exceptionD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC1Em@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEC2Em@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEED2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9strstream3strEv@@GLIBCXX_3.4 ++FUNC:_ZNSt9strstream6freezeEb@@GLIBCXX_3.4 ++FUNC:_ZNSt9strstreamC1EPciSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt9strstreamC1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9strstreamC2EPciSt13_Ios_Openmode@@GLIBCXX_3.4 ++FUNC:_ZNSt9strstreamC2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9strstreamD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9strstreamD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9strstreamD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9type_infoD0Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9type_infoD1Ev@@GLIBCXX_3.4 ++FUNC:_ZNSt9type_infoD2Ev@@GLIBCXX_3.4 ++FUNC:_ZNVSt9__atomic011atomic_flag12test_and_setESt12memory_order@@GLIBCXX_3.4.11 ++FUNC:_ZNVSt9__atomic011atomic_flag5clearESt12memory_order@@GLIBCXX_3.4.11 ++FUNC:_ZSt10unexpectedv@@GLIBCXX_3.4 ++FUNC:_ZSt11_Hash_bytesPKvmm@@CXXABI_1.3.5 ++FUNC:_ZSt13get_terminatev@@GLIBCXX_3.4.20 ++FUNC:_ZSt13set_terminatePFvvE@@GLIBCXX_3.4 ++FUNC:_ZSt14__convert_to_vIdEvPKcRT_RSt12_Ios_IostateRKP15__locale_struct@@GLIBCXX_3.4 ++FUNC:_ZSt14__convert_to_vIeEvPKcRT_RSt12_Ios_IostateRKP15__locale_struct@@GLIBCXX_3.4 ++FUNC:_ZSt14__convert_to_vIfEvPKcRT_RSt12_Ios_IostateRKP15__locale_struct@@GLIBCXX_3.4 ++FUNC:_ZSt14__convert_to_vIgEvPKcRT_RSt12_Ios_IostateRKP15__locale_struct@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt14get_unexpectedv@@GLIBCXX_3.4.20 ++FUNC:_ZSt14set_unexpectedPFvvE@@GLIBCXX_3.4 ++FUNC:_ZSt15_Fnv_hash_bytesPKvmm@@CXXABI_1.3.5 ++FUNC:_ZSt15future_categoryv@@GLIBCXX_3.4.15 ++FUNC:_ZSt15get_new_handlerv@@GLIBCXX_3.4.20 ++FUNC:_ZSt15set_new_handlerPFvvE@@GLIBCXX_3.4 ++FUNC:_ZSt15system_categoryv@@GLIBCXX_3.4.11 ++FUNC:_ZSt16__ostream_insertIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_PKS3_l@@GLIBCXX_3.4.9 ++FUNC:_ZSt16__ostream_insertIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_PKS3_l@@GLIBCXX_3.4.9 ++FUNC:_ZSt16__throw_bad_castv@@GLIBCXX_3.4 ++FUNC:_ZSt16generic_categoryv@@GLIBCXX_3.4.11 ++FUNC:_ZSt17__copy_streambufsIcSt11char_traitsIcEElPSt15basic_streambufIT_T0_ES6_@@GLIBCXX_3.4.8 ++FUNC:_ZSt17__copy_streambufsIwSt11char_traitsIwEElPSt15basic_streambufIT_T0_ES6_@@GLIBCXX_3.4.8 ++FUNC:_ZSt17__throw_bad_allocv@@GLIBCXX_3.4 ++FUNC:_ZSt17__verify_groupingPKcmRKNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++FUNC:_ZSt17__verify_groupingPKcmRKSs@@GLIBCXX_3.4.10 ++FUNC:_ZSt17current_exceptionv@@CXXABI_1.3.3 ++FUNC:_ZSt17iostream_categoryv@@GLIBCXX_3.4.21 ++FUNC:_ZSt17rethrow_exceptionNSt15__exception_ptr13exception_ptrE@@CXXABI_1.3.3 ++FUNC:_ZSt18_Rb_tree_decrementPKSt18_Rb_tree_node_base@@GLIBCXX_3.4 ++FUNC:_ZSt18_Rb_tree_decrementPSt18_Rb_tree_node_base@@GLIBCXX_3.4 ++FUNC:_ZSt18_Rb_tree_incrementPKSt18_Rb_tree_node_base@@GLIBCXX_3.4 ++FUNC:_ZSt18_Rb_tree_incrementPSt18_Rb_tree_node_base@@GLIBCXX_3.4 ++FUNC:_ZSt18__throw_bad_typeidv@@GLIBCXX_3.4 ++FUNC:_ZSt18uncaught_exceptionv@@GLIBCXX_3.4 ++FUNC:_ZSt19__throw_ios_failurePKc@@GLIBCXX_3.4 ++FUNC:_ZSt19__throw_ios_failurePKci@@GLIBCXX_3.4.26 ++FUNC:_ZSt19__throw_logic_errorPKc@@GLIBCXX_3.4 ++FUNC:_ZSt19__throw_range_errorPKc@@GLIBCXX_3.4 ++FUNC:_ZSt19__throw_regex_errorNSt15regex_constants10error_typeE@@GLIBCXX_3.4.15 ++FUNC:_ZSt19uncaught_exceptionsv@@GLIBCXX_3.4.22 ++FUNC:_ZSt20_Rb_tree_black_countPKSt18_Rb_tree_node_baseS1_@@GLIBCXX_3.4 ++FUNC:_ZSt20_Rb_tree_rotate_leftPSt18_Rb_tree_node_baseRS0_@@GLIBCXX_3.4 ++FUNC:_ZSt20__throw_domain_errorPKc@@GLIBCXX_3.4 ++FUNC:_ZSt20__throw_future_errori@@GLIBCXX_3.4.14 ++FUNC:_ZSt20__throw_length_errorPKc@@GLIBCXX_3.4 ++FUNC:_ZSt20__throw_out_of_rangePKc@@GLIBCXX_3.4 ++FUNC:_ZSt20__throw_system_errori@@GLIBCXX_3.4.11 ++FUNC:_ZSt21_Rb_tree_rotate_rightPSt18_Rb_tree_node_baseRS0_@@GLIBCXX_3.4 ++FUNC:_ZSt21__copy_streambufs_eofIcSt11char_traitsIcEElPSt15basic_streambufIT_T0_ES6_Rb@@GLIBCXX_3.4.9 ++FUNC:_ZSt21__copy_streambufs_eofIwSt11char_traitsIwEElPSt15basic_streambufIT_T0_ES6_Rb@@GLIBCXX_3.4.9 ++FUNC:_ZSt21__throw_bad_exceptionv@@GLIBCXX_3.4 ++FUNC:_ZSt21__throw_runtime_errorPKc@@GLIBCXX_3.4 ++FUNC:_ZSt22__throw_overflow_errorPKc@@GLIBCXX_3.4 ++FUNC:_ZSt23__throw_underflow_errorPKc@@GLIBCXX_3.4 ++FUNC:_ZSt24__throw_invalid_argumentPKc@@GLIBCXX_3.4 ++FUNC:_ZSt24__throw_out_of_range_fmtPKcz@@GLIBCXX_3.4.20 ++FUNC:_ZSt25__throw_bad_function_callv@@GLIBCXX_3.4.14 ++FUNC:_ZSt25notify_all_at_thread_exitRSt18condition_variableSt11unique_lockISt5mutexE@@GLIBCXX_3.4.21 ++FUNC:_ZSt28_Rb_tree_rebalance_for_erasePSt18_Rb_tree_node_baseRS_@@GLIBCXX_3.4 ++FUNC:_ZSt29_Rb_tree_insert_and_rebalancebPSt18_Rb_tree_node_baseS0_RS_@@GLIBCXX_3.4 ++FUNC:_ZSt2wsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_@@GLIBCXX_3.4 ++FUNC:_ZSt2wsIwSt11char_traitsIwEERSt13basic_istreamIT_T0_ES6_@@GLIBCXX_3.4 ++FUNC:_ZSt4endlIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@@GLIBCXX_3.4 ++FUNC:_ZSt4endlIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_@@GLIBCXX_3.4 ++FUNC:_ZSt4endsIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@@GLIBCXX_3.4 ++FUNC:_ZSt4endsIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_@@GLIBCXX_3.4 ++FUNC:_ZSt5flushIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_@@GLIBCXX_3.4 ++FUNC:_ZSt5flushIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_@@GLIBCXX_3.4 ++FUNC:_ZSt7getlineIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EE@@GLIBCXX_3.4.21 ++FUNC:_ZSt7getlineIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EES4_@@GLIBCXX_3.4.21 ++FUNC:_ZSt7getlineIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RSbIS4_S5_T1_E@@GLIBCXX_3.4 ++FUNC:_ZSt7getlineIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RSbIS4_S5_T1_ES4_@@GLIBCXX_3.4 ++FUNC:_ZSt7getlineIwSt11char_traitsIwESaIwEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EE@@GLIBCXX_3.4.21 ++FUNC:_ZSt7getlineIwSt11char_traitsIwESaIwEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EES4_@@GLIBCXX_3.4.21 ++FUNC:_ZSt7getlineIwSt11char_traitsIwESaIwEERSt13basic_istreamIT_T0_ES7_RSbIS4_S5_T1_E@@GLIBCXX_3.4 ++FUNC:_ZSt7getlineIwSt11char_traitsIwESaIwEERSt13basic_istreamIT_T0_ES7_RSbIS4_S5_T1_ES4_@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetINSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEEEbRKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9has_facetINSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEEEbRKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9has_facetINSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEEEbRKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9has_facetINSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEEEbRKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9has_facetINSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEEEbRKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9has_facetINSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEEEbRKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9has_facetINSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEEEbRKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9has_facetINSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEEEbRKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9has_facetINSt7__cxx1110moneypunctIcLb0EEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx1110moneypunctIwLb0EEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx117collateIcEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx117collateIwEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx118messagesIcEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx118messagesIwEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx118numpunctIcEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx118numpunctIwEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetINSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEEEbRKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9has_facetISt10moneypunctIcLb0EEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt10moneypunctIwLb0EEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt11__timepunctIcEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt11__timepunctIwEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt5ctypeIcEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt5ctypeIwEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt7codecvtIcc11__mbstate_tEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt7codecvtIwc11__mbstate_tEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt7collateIcEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt7collateIwEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt8messagesIcEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt8messagesIwEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt8numpunctIcEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt8numpunctIwEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9has_facetISt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEEbRKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9terminatev@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetINSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEEERKT_RKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9use_facetINSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEEERKT_RKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9use_facetINSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEEERKT_RKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9use_facetINSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEEERKT_RKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9use_facetINSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEEERKT_RKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9use_facetINSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEEERKT_RKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9use_facetINSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEEERKT_RKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9use_facetINSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEEERKT_RKSt6locale@@GLIBCXX_LDBL_3.4 ++FUNC:_ZSt9use_facetINSt7__cxx1110moneypunctIcLb0EEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx1110moneypunctIcLb1EEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx1110moneypunctIwLb0EEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx1110moneypunctIwLb1EEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx117collateIcEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx117collateIwEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx118messagesIcEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx118messagesIwEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx118numpunctIcEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx118numpunctIwEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetINSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEEERKT_RKSt6locale@@GLIBCXX_3.4.21 ++FUNC:_ZSt9use_facetISt10moneypunctIcLb0EEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt10moneypunctIcLb1EEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt10moneypunctIwLb0EEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt10moneypunctIwLb1EEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt11__timepunctIcEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt11__timepunctIwEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt5ctypeIcEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt5ctypeIwEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt7codecvtIcc11__mbstate_tEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt7codecvtIwc11__mbstate_tEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt7collateIcEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt7collateIwEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt8messagesIcEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt8messagesIwEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt8numpunctIcEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt8numpunctIwEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZSt9use_facetISt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEERKT_RKSt6locale@@GLIBCXX_3.4 ++FUNC:_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKa@@GLIBCXX_3.4 ++FUNC:_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKc@@GLIBCXX_3.4 ++FUNC:_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_PKh@@GLIBCXX_3.4 ++FUNC:_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_a@@GLIBCXX_3.4 ++FUNC:_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_c@@GLIBCXX_3.4 ++FUNC:_ZStlsISt11char_traitsIcEERSt13basic_ostreamIcT_ES5_h@@GLIBCXX_3.4 ++FUNC:_ZStlsIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_St12_Setiosflags@@GLIBCXX_3.4 ++FUNC:_ZStlsIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_St13_Setprecision@@GLIBCXX_3.4 ++FUNC:_ZStlsIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_St14_Resetiosflags@@GLIBCXX_3.4 ++FUNC:_ZStlsIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_St5_Setw@@GLIBCXX_3.4 ++FUNC:_ZStlsIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_St8_Setbase@@GLIBCXX_3.4 ++FUNC:_ZStlsIcSt11char_traitsIcEERSt13basic_ostreamIT_T0_ES6_St8_SetfillIS3_E@@GLIBCXX_3.4 ++FUNC:_ZStlsIcSt11char_traitsIcESaIcEERSt13basic_ostreamIT_T0_ES7_RKNSt7__cxx1112basic_stringIS4_S5_T1_EE@@GLIBCXX_3.4.21 ++FUNC:_ZStlsIcSt11char_traitsIcESaIcEERSt13basic_ostreamIT_T0_ES7_RKSbIS4_S5_T1_E@@GLIBCXX_3.4 ++FUNC:_ZStlsIdcSt11char_traitsIcEERSt13basic_ostreamIT0_T1_ES6_RKSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStlsIdwSt11char_traitsIwEERSt13basic_ostreamIT0_T1_ES6_RKSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStlsIecSt11char_traitsIcEERSt13basic_ostreamIT0_T1_ES6_RKSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStlsIewSt11char_traitsIwEERSt13basic_ostreamIT0_T1_ES6_RKSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStlsIfcSt11char_traitsIcEERSt13basic_ostreamIT0_T1_ES6_RKSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStlsIfwSt11char_traitsIwEERSt13basic_ostreamIT0_T1_ES6_RKSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStlsIgcSt11char_traitsIcEERSt13basic_ostreamIT0_T1_ES6_RKSt7complexIT_E@@GLIBCXX_LDBL_3.4 ++FUNC:_ZStlsIgwSt11char_traitsIwEERSt13basic_ostreamIT0_T1_ES6_RKSt7complexIT_E@@GLIBCXX_LDBL_3.4 ++FUNC:_ZStlsIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_PKS3_@@GLIBCXX_3.4 ++FUNC:_ZStlsIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_PKc@@GLIBCXX_3.4 ++FUNC:_ZStlsIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_S3_@@GLIBCXX_3.4 ++FUNC:_ZStlsIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_St12_Setiosflags@@GLIBCXX_3.4 ++FUNC:_ZStlsIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_St13_Setprecision@@GLIBCXX_3.4 ++FUNC:_ZStlsIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_St14_Resetiosflags@@GLIBCXX_3.4 ++FUNC:_ZStlsIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_St5_Setw@@GLIBCXX_3.4 ++FUNC:_ZStlsIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_St8_Setbase@@GLIBCXX_3.4 ++FUNC:_ZStlsIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_St8_SetfillIS3_E@@GLIBCXX_3.4 ++FUNC:_ZStlsIwSt11char_traitsIwEERSt13basic_ostreamIT_T0_ES6_c@@GLIBCXX_3.4 ++FUNC:_ZStlsIwSt11char_traitsIwESaIwEERSt13basic_ostreamIT_T0_ES7_RKNSt7__cxx1112basic_stringIS4_S5_T1_EE@@GLIBCXX_3.4.21 ++FUNC:_ZStlsIwSt11char_traitsIwESaIwEERSt13basic_ostreamIT_T0_ES7_RKSbIS4_S5_T1_E@@GLIBCXX_3.4 ++FUNC:_ZStplIcSt11char_traitsIcESaIcEENSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_RKS8_@@GLIBCXX_3.4.21 ++FUNC:_ZStplIcSt11char_traitsIcESaIcEENSt7__cxx1112basic_stringIT_T0_T1_EERKS8_SA_@@GLIBCXX_3.4.21 ++FUNC:_ZStplIcSt11char_traitsIcESaIcEENSt7__cxx1112basic_stringIT_T0_T1_EES5_RKS8_@@GLIBCXX_3.4.21 ++FUNC:_ZStplIcSt11char_traitsIcESaIcEESbIT_T0_T1_EPKS3_RKS6_@@GLIBCXX_3.4 ++FUNC:_ZStplIcSt11char_traitsIcESaIcEESbIT_T0_T1_ERKS6_S8_@@GLIBCXX_3.4 ++FUNC:_ZStplIcSt11char_traitsIcESaIcEESbIT_T0_T1_ES3_RKS6_@@GLIBCXX_3.4 ++FUNC:_ZStplIwSt11char_traitsIwESaIwEENSt7__cxx1112basic_stringIT_T0_T1_EEPKS5_RKS8_@@GLIBCXX_3.4.21 ++FUNC:_ZStplIwSt11char_traitsIwESaIwEENSt7__cxx1112basic_stringIT_T0_T1_EERKS8_SA_@@GLIBCXX_3.4.21 ++FUNC:_ZStplIwSt11char_traitsIwESaIwEENSt7__cxx1112basic_stringIT_T0_T1_EES5_RKS8_@@GLIBCXX_3.4.21 ++FUNC:_ZStplIwSt11char_traitsIwESaIwEESbIT_T0_T1_EPKS3_RKS6_@@GLIBCXX_3.4 ++FUNC:_ZStplIwSt11char_traitsIwESaIwEESbIT_T0_T1_ERKS6_S8_@@GLIBCXX_3.4 ++FUNC:_ZStplIwSt11char_traitsIwESaIwEESbIT_T0_T1_ES3_RKS6_@@GLIBCXX_3.4 ++FUNC:_ZStrsISt11char_traitsIcEERSt13basic_istreamIcT_ES5_Pa@@GLIBCXX_3.4 ++FUNC:_ZStrsISt11char_traitsIcEERSt13basic_istreamIcT_ES5_Ph@@GLIBCXX_3.4 ++FUNC:_ZStrsISt11char_traitsIcEERSt13basic_istreamIcT_ES5_Ra@@GLIBCXX_3.4 ++FUNC:_ZStrsISt11char_traitsIcEERSt13basic_istreamIcT_ES5_Rh@@GLIBCXX_3.4 ++FUNC:_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_PS3_@@GLIBCXX_3.4 ++FUNC:_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_RS3_@@GLIBCXX_3.4 ++FUNC:_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_St12_Setiosflags@@GLIBCXX_3.4 ++FUNC:_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_St13_Setprecision@@GLIBCXX_3.4 ++FUNC:_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_St14_Resetiosflags@@GLIBCXX_3.4 ++FUNC:_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_St5_Setw@@GLIBCXX_3.4 ++FUNC:_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_St8_Setbase@@GLIBCXX_3.4 ++FUNC:_ZStrsIcSt11char_traitsIcEERSt13basic_istreamIT_T0_ES6_St8_SetfillIS3_E@@GLIBCXX_3.4 ++FUNC:_ZStrsIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EE@@GLIBCXX_3.4.21 ++FUNC:_ZStrsIcSt11char_traitsIcESaIcEERSt13basic_istreamIT_T0_ES7_RSbIS4_S5_T1_E@@GLIBCXX_3.4 ++FUNC:_ZStrsIdcSt11char_traitsIcEERSt13basic_istreamIT0_T1_ES6_RSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStrsIdwSt11char_traitsIwEERSt13basic_istreamIT0_T1_ES6_RSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStrsIecSt11char_traitsIcEERSt13basic_istreamIT0_T1_ES6_RSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStrsIewSt11char_traitsIwEERSt13basic_istreamIT0_T1_ES6_RSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStrsIfcSt11char_traitsIcEERSt13basic_istreamIT0_T1_ES6_RSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStrsIfwSt11char_traitsIwEERSt13basic_istreamIT0_T1_ES6_RSt7complexIT_E@@GLIBCXX_3.4 ++FUNC:_ZStrsIgcSt11char_traitsIcEERSt13basic_istreamIT0_T1_ES6_RSt7complexIT_E@@GLIBCXX_LDBL_3.4 ++FUNC:_ZStrsIgwSt11char_traitsIwEERSt13basic_istreamIT0_T1_ES6_RSt7complexIT_E@@GLIBCXX_LDBL_3.4 ++FUNC:_ZStrsIwSt11char_traitsIwEERSt13basic_istreamIT_T0_ES6_PS3_@@GLIBCXX_3.4 ++FUNC:_ZStrsIwSt11char_traitsIwEERSt13basic_istreamIT_T0_ES6_RS3_@@GLIBCXX_3.4 ++FUNC:_ZStrsIwSt11char_traitsIwEERSt13basic_istreamIT_T0_ES6_St12_Setiosflags@@GLIBCXX_3.4 ++FUNC:_ZStrsIwSt11char_traitsIwEERSt13basic_istreamIT_T0_ES6_St13_Setprecision@@GLIBCXX_3.4 ++FUNC:_ZStrsIwSt11char_traitsIwEERSt13basic_istreamIT_T0_ES6_St14_Resetiosflags@@GLIBCXX_3.4 ++FUNC:_ZStrsIwSt11char_traitsIwEERSt13basic_istreamIT_T0_ES6_St5_Setw@@GLIBCXX_3.4 ++FUNC:_ZStrsIwSt11char_traitsIwEERSt13basic_istreamIT_T0_ES6_St8_Setbase@@GLIBCXX_3.4 ++FUNC:_ZStrsIwSt11char_traitsIwEERSt13basic_istreamIT_T0_ES6_St8_SetfillIS3_E@@GLIBCXX_3.4 ++FUNC:_ZStrsIwSt11char_traitsIwESaIwEERSt13basic_istreamIT_T0_ES7_RNSt7__cxx1112basic_stringIS4_S5_T1_EE@@GLIBCXX_3.4.21 ++FUNC:_ZStrsIwSt11char_traitsIwESaIwEERSt13basic_istreamIT_T0_ES7_RSbIS4_S5_T1_E@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSdD0Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSdD1Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt13basic_fstreamIcSt11char_traitsIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt13basic_fstreamIcSt11char_traitsIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt13basic_fstreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt13basic_fstreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt14basic_iostreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt14basic_iostreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt18basic_stringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt18basic_stringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt18basic_stringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt18basic_stringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZThn16_NSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZThn16_NSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZThn16_NSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZThn16_NSt9strstreamD0Ev@@GLIBCXX_3.4 ++FUNC:_ZThn16_NSt9strstreamD1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSdD0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSdD1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSiD0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSiD1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSoD0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSoD1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt10istrstreamD0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt10istrstreamD1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt10ostrstreamD0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt10ostrstreamD1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt13basic_fstreamIcSt11char_traitsIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt13basic_fstreamIcSt11char_traitsIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt13basic_fstreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt13basic_fstreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt13basic_istreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt13basic_istreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt13basic_ostreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt13basic_ostreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt14basic_ifstreamIcSt11char_traitsIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt14basic_ifstreamIcSt11char_traitsIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt14basic_ifstreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt14basic_ifstreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt14basic_iostreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt14basic_iostreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt14basic_ofstreamIcSt11char_traitsIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt14basic_ofstreamIcSt11char_traitsIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt14basic_ofstreamIwSt11char_traitsIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt14basic_ofstreamIwSt11char_traitsIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt18basic_stringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt18basic_stringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt18basic_stringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt18basic_stringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt19basic_istringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt19basic_istringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt19basic_istringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt19basic_istringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt19basic_ostringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt19basic_ostringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt19basic_ostringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt19basic_ostringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEED0Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEED1Ev@@GLIBCXX_3.4.21 ++FUNC:_ZTv0_n24_NSt9strstreamD0Ev@@GLIBCXX_3.4 ++FUNC:_ZTv0_n24_NSt9strstreamD1Ev@@GLIBCXX_3.4 ++FUNC:_ZdaPv@@GLIBCXX_3.4 ++FUNC:_ZdaPvRKSt9nothrow_t@@GLIBCXX_3.4 ++FUNC:_ZdaPvSt11align_val_t@@CXXABI_1.3.11 ++FUNC:_ZdaPvSt11align_val_tRKSt9nothrow_t@@CXXABI_1.3.11 ++FUNC:_ZdaPvm@@CXXABI_1.3.9 ++FUNC:_ZdaPvmSt11align_val_t@@CXXABI_1.3.11 ++FUNC:_ZdlPv@@GLIBCXX_3.4 ++FUNC:_ZdlPvRKSt9nothrow_t@@GLIBCXX_3.4 ++FUNC:_ZdlPvSt11align_val_t@@CXXABI_1.3.11 ++FUNC:_ZdlPvSt11align_val_tRKSt9nothrow_t@@CXXABI_1.3.11 ++FUNC:_ZdlPvm@@CXXABI_1.3.9 ++FUNC:_ZdlPvmSt11align_val_t@@CXXABI_1.3.11 ++FUNC:_Znam@@GLIBCXX_3.4 ++FUNC:_ZnamRKSt9nothrow_t@@GLIBCXX_3.4 ++FUNC:_ZnamSt11align_val_t@@CXXABI_1.3.11 ++FUNC:_ZnamSt11align_val_tRKSt9nothrow_t@@CXXABI_1.3.11 ++FUNC:_Znwm@@GLIBCXX_3.4 ++FUNC:_ZnwmRKSt9nothrow_t@@GLIBCXX_3.4 ++FUNC:_ZnwmSt11align_val_t@@CXXABI_1.3.11 ++FUNC:_ZnwmSt11align_val_tRKSt9nothrow_t@@CXXABI_1.3.11 ++FUNC:__atomic_flag_for_address@@GLIBCXX_3.4.11 ++FUNC:__atomic_flag_wait_explicit@@GLIBCXX_3.4.11 ++FUNC:__cxa_allocate_dependent_exception@@CXXABI_1.3.6 ++FUNC:__cxa_allocate_exception@@CXXABI_1.3 ++FUNC:__cxa_bad_cast@@CXXABI_1.3 ++FUNC:__cxa_bad_typeid@@CXXABI_1.3 ++FUNC:__cxa_begin_catch@@CXXABI_1.3 ++FUNC:__cxa_call_unexpected@@CXXABI_1.3 ++FUNC:__cxa_current_exception_type@@CXXABI_1.3 ++FUNC:__cxa_deleted_virtual@@CXXABI_1.3.6 ++FUNC:__cxa_demangle@@CXXABI_1.3 ++FUNC:__cxa_end_catch@@CXXABI_1.3 ++FUNC:__cxa_free_dependent_exception@@CXXABI_1.3.6 ++FUNC:__cxa_free_exception@@CXXABI_1.3 ++FUNC:__cxa_get_exception_ptr@@CXXABI_1.3.1 ++FUNC:__cxa_get_globals@@CXXABI_1.3 ++FUNC:__cxa_get_globals_fast@@CXXABI_1.3 ++FUNC:__cxa_guard_abort@@CXXABI_1.3 ++FUNC:__cxa_guard_acquire@@CXXABI_1.3 ++FUNC:__cxa_guard_release@@CXXABI_1.3 ++FUNC:__cxa_init_primary_exception@@CXXABI_1.3.11 ++FUNC:__cxa_pure_virtual@@CXXABI_1.3 ++FUNC:__cxa_rethrow@@CXXABI_1.3 ++FUNC:__cxa_thread_atexit@@CXXABI_1.3.7 ++FUNC:__cxa_throw@@CXXABI_1.3 ++FUNC:__cxa_throw_bad_array_length@@CXXABI_1.3.8 ++FUNC:__cxa_throw_bad_array_new_length@@CXXABI_1.3.8 ++FUNC:__cxa_tm_cleanup@@CXXABI_TM_1 ++FUNC:__cxa_vec_cctor@@CXXABI_1.3 ++FUNC:__cxa_vec_cleanup@@CXXABI_1.3 ++FUNC:__cxa_vec_ctor@@CXXABI_1.3 ++FUNC:__cxa_vec_delete2@@CXXABI_1.3 ++FUNC:__cxa_vec_delete3@@CXXABI_1.3 ++FUNC:__cxa_vec_delete@@CXXABI_1.3 ++FUNC:__cxa_vec_dtor@@CXXABI_1.3 ++FUNC:__cxa_vec_new2@@CXXABI_1.3 ++FUNC:__cxa_vec_new3@@CXXABI_1.3 ++FUNC:__cxa_vec_new@@CXXABI_1.3 ++FUNC:__dynamic_cast@@CXXABI_1.3 ++FUNC:__gxx_personality_v0@@CXXABI_1.3 ++FUNC:__once_proxy@@GLIBCXX_3.4.11 ++FUNC:acosl@GLIBCXX_3.4.3 ++FUNC:asinl@GLIBCXX_3.4.3 ++FUNC:atan2l@GLIBCXX_3.4 ++FUNC:atanl@GLIBCXX_3.4.3 ++FUNC:atomic_flag_clear_explicit@@GLIBCXX_3.4.11 ++FUNC:atomic_flag_test_and_set_explicit@@GLIBCXX_3.4.11 ++FUNC:ceill@GLIBCXX_3.4.3 ++FUNC:coshl@GLIBCXX_3.4 ++FUNC:cosl@GLIBCXX_3.4 ++FUNC:expl@GLIBCXX_3.4 ++FUNC:floorl@GLIBCXX_3.4.3 ++FUNC:fmodl@GLIBCXX_3.4.3 ++FUNC:frexpl@GLIBCXX_3.4.3 ++FUNC:hypotl@GLIBCXX_3.4 ++FUNC:ldexpl@GLIBCXX_3.4.3 ++FUNC:log10l@GLIBCXX_3.4 ++FUNC:logl@GLIBCXX_3.4 ++FUNC:modfl@GLIBCXX_3.4.3 ++FUNC:powl@GLIBCXX_3.4 ++FUNC:sinhl@GLIBCXX_3.4 ++FUNC:sinl@GLIBCXX_3.4 ++FUNC:sqrtl@GLIBCXX_3.4 ++FUNC:tanhl@GLIBCXX_3.4 ++FUNC:tanl@GLIBCXX_3.4 ++OBJECT:0:CXXABI_1.3 ++OBJECT:0:CXXABI_1.3.1 ++OBJECT:0:CXXABI_1.3.10 ++OBJECT:0:CXXABI_1.3.11 ++OBJECT:0:CXXABI_1.3.12 ++OBJECT:0:CXXABI_1.3.2 ++OBJECT:0:CXXABI_1.3.3 ++OBJECT:0:CXXABI_1.3.4 ++OBJECT:0:CXXABI_1.3.5 ++OBJECT:0:CXXABI_1.3.6 ++OBJECT:0:CXXABI_1.3.7 ++OBJECT:0:CXXABI_1.3.8 ++OBJECT:0:CXXABI_1.3.9 ++OBJECT:0:CXXABI_LDBL_1.3 ++OBJECT:0:CXXABI_TM_1 ++OBJECT:0:GLIBCXX_3.4 ++OBJECT:0:GLIBCXX_3.4.1 ++OBJECT:0:GLIBCXX_3.4.10 ++OBJECT:0:GLIBCXX_3.4.11 ++OBJECT:0:GLIBCXX_3.4.12 ++OBJECT:0:GLIBCXX_3.4.13 ++OBJECT:0:GLIBCXX_3.4.14 ++OBJECT:0:GLIBCXX_3.4.15 ++OBJECT:0:GLIBCXX_3.4.16 ++OBJECT:0:GLIBCXX_3.4.17 ++OBJECT:0:GLIBCXX_3.4.18 ++OBJECT:0:GLIBCXX_3.4.19 ++OBJECT:0:GLIBCXX_3.4.2 ++OBJECT:0:GLIBCXX_3.4.20 ++OBJECT:0:GLIBCXX_3.4.21 ++OBJECT:0:GLIBCXX_3.4.22 ++OBJECT:0:GLIBCXX_3.4.23 ++OBJECT:0:GLIBCXX_3.4.24 ++OBJECT:0:GLIBCXX_3.4.25 ++OBJECT:0:GLIBCXX_3.4.26 ++OBJECT:0:GLIBCXX_3.4.27 ++OBJECT:0:GLIBCXX_3.4.28 ++OBJECT:0:GLIBCXX_3.4.3 ++OBJECT:0:GLIBCXX_3.4.4 ++OBJECT:0:GLIBCXX_3.4.5 ++OBJECT:0:GLIBCXX_3.4.6 ++OBJECT:0:GLIBCXX_3.4.7 ++OBJECT:0:GLIBCXX_3.4.8 ++OBJECT:0:GLIBCXX_3.4.9 ++OBJECT:0:GLIBCXX_LDBL_3.4 ++OBJECT:0:GLIBCXX_LDBL_3.4.10 ++OBJECT:0:GLIBCXX_LDBL_3.4.21 ++OBJECT:0:GLIBCXX_LDBL_3.4.7 ++OBJECT:104:_ZTVNSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:104:_ZTVNSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:104:_ZTVNSt7__cxx1110moneypunctIcLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:104:_ZTVNSt7__cxx1110moneypunctIcLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:104:_ZTVNSt7__cxx1110moneypunctIwLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:104:_ZTVNSt7__cxx1110moneypunctIwLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:104:_ZTVNSt7__cxx1117moneypunct_bynameIcLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:104:_ZTVNSt7__cxx1117moneypunct_bynameIcLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:104:_ZTVNSt7__cxx1117moneypunct_bynameIwLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:104:_ZTVNSt7__cxx1117moneypunct_bynameIwLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:104:_ZTVSt10moneypunctIcLb0EE@@GLIBCXX_3.4 ++OBJECT:104:_ZTVSt10moneypunctIcLb1EE@@GLIBCXX_3.4 ++OBJECT:104:_ZTVSt10moneypunctIwLb0EE@@GLIBCXX_3.4 ++OBJECT:104:_ZTVSt10moneypunctIwLb1EE@@GLIBCXX_3.4 ++OBJECT:104:_ZTVSt17moneypunct_bynameIcLb0EE@@GLIBCXX_3.4 ++OBJECT:104:_ZTVSt17moneypunct_bynameIcLb1EE@@GLIBCXX_3.4 ++OBJECT:104:_ZTVSt17moneypunct_bynameIwLb0EE@@GLIBCXX_3.4 ++OBJECT:104:_ZTVSt17moneypunct_bynameIwLb1EE@@GLIBCXX_3.4 ++OBJECT:112:_ZNSt17__timepunct_cacheIcE12_S_timezonesE@@GLIBCXX_3.4 ++OBJECT:112:_ZNSt17__timepunct_cacheIwE12_S_timezonesE@@GLIBCXX_3.4 ++OBJECT:120:_ZTVNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:120:_ZTVNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:120:_ZTVSd@@GLIBCXX_3.4 ++OBJECT:120:_ZTVSt13basic_fstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:120:_ZTVSt13basic_fstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:120:_ZTVSt14basic_iostreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:120:_ZTVSt18basic_stringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:120:_ZTVSt18basic_stringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:120:_ZTVSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:120:_ZTVSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:120:_ZTVSt9strstream@@GLIBCXX_3.4 ++OBJECT:128:_ZTVN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:128:_ZTVN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:128:_ZTVNSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:128:_ZTVNSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:128:_ZTVNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:128:_ZTVNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:128:_ZTVSt12ctype_bynameIwE@@GLIBCXX_3.4 ++OBJECT:128:_ZTVSt12strstreambuf@@GLIBCXX_3.4 ++OBJECT:128:_ZTVSt13basic_filebufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:128:_ZTVSt13basic_filebufIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:128:_ZTVSt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:128:_ZTVSt15basic_streambufIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:128:_ZTVSt15basic_stringbufIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:128:_ZTVSt15basic_stringbufIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:128:_ZTVSt21__ctype_abstract_baseIcE@@GLIBCXX_3.4 ++OBJECT:128:_ZTVSt21__ctype_abstract_baseIwE@@GLIBCXX_3.4 ++OBJECT:128:_ZTVSt5ctypeIwE@@GLIBCXX_3.4 ++OBJECT:12:_ZTSSt5ctypeIcE@@GLIBCXX_3.4 ++OBJECT:12:_ZTSSt5ctypeIwE@@GLIBCXX_3.4 ++OBJECT:12:_ZTSSt8bad_cast@@GLIBCXX_3.4 ++OBJECT:12:_ZTSSt8ios_base@@GLIBCXX_3.4 ++OBJECT:13:_ZTSSt9bad_alloc@@GLIBCXX_3.4 ++OBJECT:13:_ZTSSt9exception@@GLIBCXX_3.4 ++OBJECT:13:_ZTSSt9strstream@@GLIBCXX_3.4 ++OBJECT:13:_ZTSSt9time_base@@GLIBCXX_3.4 ++OBJECT:13:_ZTSSt9type_info@@GLIBCXX_3.4 ++OBJECT:14:_ZTSSt7collateIcE@@GLIBCXX_3.4 ++OBJECT:14:_ZTSSt7collateIwE@@GLIBCXX_3.4 ++OBJECT:15:_ZTSSt10bad_typeid@@GLIBCXX_3.4 ++OBJECT:15:_ZTSSt10ctype_base@@GLIBCXX_3.4 ++OBJECT:15:_ZTSSt10istrstream@@GLIBCXX_3.4 ++OBJECT:15:_ZTSSt10lock_error@@GLIBCXX_3.4.11 ++OBJECT:15:_ZTSSt10money_base@@GLIBCXX_3.4 ++OBJECT:15:_ZTSSt10ostrstream@@GLIBCXX_3.4 ++OBJECT:15:_ZTSSt8messagesIcE@@GLIBCXX_3.4 ++OBJECT:15:_ZTSSt8messagesIwE@@GLIBCXX_3.4 ++OBJECT:15:_ZTSSt8numpunctIcE@@GLIBCXX_3.4 ++OBJECT:15:_ZTSSt8numpunctIwE@@GLIBCXX_3.4 ++OBJECT:16:_ZTIDd@@CXXABI_1.3.4 ++OBJECT:16:_ZTIDe@@CXXABI_1.3.4 ++OBJECT:16:_ZTIDf@@CXXABI_1.3.4 ++OBJECT:16:_ZTIDi@@CXXABI_1.3.3 ++OBJECT:16:_ZTIDn@@CXXABI_1.3.5 ++OBJECT:16:_ZTIDs@@CXXABI_1.3.3 ++OBJECT:16:_ZTIDu@@CXXABI_1.3.12 ++OBJECT:16:_ZTIN10__cxxabiv115__forced_unwindE@@CXXABI_1.3.2 ++OBJECT:16:_ZTIN10__cxxabiv119__foreign_exceptionE@@CXXABI_1.3.2 ++OBJECT:16:_ZTINSt13__future_base11_State_baseE@@GLIBCXX_3.4.15 ++OBJECT:16:_ZTINSt13__future_base12_Result_baseE@@GLIBCXX_3.4.15 ++OBJECT:16:_ZTINSt3_V214error_categoryE@@GLIBCXX_3.4.21 ++OBJECT:16:_ZTINSt6locale5facetE@@GLIBCXX_3.4 ++OBJECT:16:_ZTINSt6thread6_StateE@@GLIBCXX_3.4.22 ++OBJECT:16:_ZTISt10ctype_base@@GLIBCXX_3.4 ++OBJECT:16:_ZTISt10money_base@@GLIBCXX_3.4 ++OBJECT:16:_ZTISt12codecvt_base@@GLIBCXX_3.4 ++OBJECT:16:_ZTISt13messages_base@@GLIBCXX_3.4 ++OBJECT:16:_ZTISt14error_category@@GLIBCXX_3.4.11 ++OBJECT:16:_ZTISt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:16:_ZTISt15basic_streambufIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:16:_ZTISt16nested_exception@@CXXABI_1.3.5 ++OBJECT:16:_ZTISt8ios_base@@GLIBCXX_3.4 ++OBJECT:16:_ZTISt9exception@@GLIBCXX_3.4 ++OBJECT:16:_ZTISt9time_base@@GLIBCXX_3.4 ++OBJECT:16:_ZTISt9type_info@@GLIBCXX_3.4 ++OBJECT:16:_ZTIa@@CXXABI_1.3 ++OBJECT:16:_ZTIb@@CXXABI_1.3 ++OBJECT:16:_ZTIc@@CXXABI_1.3 ++OBJECT:16:_ZTId@@CXXABI_1.3 ++OBJECT:16:_ZTIe@@CXXABI_1.3 ++OBJECT:16:_ZTIf@@CXXABI_1.3 ++OBJECT:16:_ZTIg@@CXXABI_LDBL_1.3 ++OBJECT:16:_ZTIh@@CXXABI_1.3 ++OBJECT:16:_ZTIi@@CXXABI_1.3 ++OBJECT:16:_ZTIj@@CXXABI_1.3 ++OBJECT:16:_ZTIl@@CXXABI_1.3 ++OBJECT:16:_ZTIm@@CXXABI_1.3 ++OBJECT:16:_ZTIn@@CXXABI_1.3.5 ++OBJECT:16:_ZTIo@@CXXABI_1.3.5 ++OBJECT:16:_ZTIs@@CXXABI_1.3 ++OBJECT:16:_ZTIt@@CXXABI_1.3 ++OBJECT:16:_ZTIv@@CXXABI_1.3 ++OBJECT:16:_ZTIw@@CXXABI_1.3 ++OBJECT:16:_ZTIx@@CXXABI_1.3 ++OBJECT:16:_ZTIy@@CXXABI_1.3 ++OBJECT:16:_ZTSSt11logic_error@@GLIBCXX_3.4 ++OBJECT:16:_ZTSSt11range_error@@GLIBCXX_3.4 ++OBJECT:16:_ZTTSi@@GLIBCXX_3.4 ++OBJECT:16:_ZTTSo@@GLIBCXX_3.4 ++OBJECT:16:_ZTTSt13basic_istreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:16:_ZTTSt13basic_ostreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:17:_ZTSSt12codecvt_base@@GLIBCXX_3.4 ++OBJECT:17:_ZTSSt12domain_error@@GLIBCXX_3.4 ++OBJECT:17:_ZTSSt12future_error@@GLIBCXX_3.4.14 ++OBJECT:17:_ZTSSt12length_error@@GLIBCXX_3.4 ++OBJECT:17:_ZTSSt12out_of_range@@GLIBCXX_3.4 ++OBJECT:17:_ZTSSt12strstreambuf@@GLIBCXX_3.4 ++OBJECT:17:_ZTSSt12system_error@@GLIBCXX_3.4.11 ++OBJECT:18:_ZTSNSt6locale5facetE@@GLIBCXX_3.4 ++OBJECT:18:_ZTSSt13bad_exception@@GLIBCXX_3.4 ++OBJECT:18:_ZTSSt13messages_base@@GLIBCXX_3.4 ++OBJECT:18:_ZTSSt13runtime_error@@GLIBCXX_3.4 ++OBJECT:19:_ZTSNSt6thread6_StateE@@GLIBCXX_3.4.22 ++OBJECT:19:_ZTSSt11__timepunctIcE@@GLIBCXX_3.4 ++OBJECT:19:_ZTSSt11__timepunctIwE@@GLIBCXX_3.4 ++OBJECT:19:_ZTSSt14error_category@@GLIBCXX_3.4.11 ++OBJECT:19:_ZTSSt14overflow_error@@GLIBCXX_3.4 ++OBJECT:1:_ZNSs4_Rep11_S_terminalE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt10moneypunctIcLb0EE4intlE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt10moneypunctIcLb1EE4intlE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt10moneypunctIwLb0EE4intlE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt10moneypunctIwLb1EE4intlE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt12placeholders2_1E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders2_2E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders2_3E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders2_4E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders2_5E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders2_6E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders2_7E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders2_8E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders2_9E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_10E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_11E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_12E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_13E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_14E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_15E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_16E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_17E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_18E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_19E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_20E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_21E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_22E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_23E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_24E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_25E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_26E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_27E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_28E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt12placeholders3_29E@@GLIBCXX_3.4.15 ++OBJECT:1:_ZNSt14numeric_limitsIDiE10is_boundedE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE10is_integerE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE12has_infinityE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE13has_quiet_NaNE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE14is_specializedE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE15has_denorm_lossE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE15tinyness_beforeE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE17has_signaling_NaNE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE5trapsE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE8is_exactE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE9is_iec559E@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE9is_moduloE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDiE9is_signedE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE10is_boundedE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE10is_integerE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE12has_infinityE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE13has_quiet_NaNE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE14is_specializedE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE15has_denorm_lossE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE15tinyness_beforeE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE17has_signaling_NaNE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE5trapsE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE8is_exactE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE9is_iec559E@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE9is_moduloE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDsE9is_signedE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt14numeric_limitsIDuE10is_boundedE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE10is_integerE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE12has_infinityE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE13has_quiet_NaNE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE14is_specializedE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE15has_denorm_lossE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE15tinyness_beforeE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE17has_signaling_NaNE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE5trapsE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE8is_exactE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE9is_iec559E@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE9is_moduloE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIDuE9is_signedE@@GLIBCXX_3.4.26 ++OBJECT:1:_ZNSt14numeric_limitsIaE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIaE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIbE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIcE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIdE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIeE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIfE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE10is_boundedE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE10is_integerE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE12has_infinityE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE13has_quiet_NaNE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE14is_specializedE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE15has_denorm_lossE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE15tinyness_beforeE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE17has_signaling_NaNE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE5trapsE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE8is_exactE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE9is_iec559E@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE9is_moduloE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIgE9is_signedE@@GLIBCXX_LDBL_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIhE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIiE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIjE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIlE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsImE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsInE10is_boundedE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE10is_integerE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE12has_infinityE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE13has_quiet_NaNE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE14is_specializedE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE15has_denorm_lossE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE15tinyness_beforeE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE17has_signaling_NaNE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE5trapsE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE8is_exactE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE9is_iec559E@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE9is_moduloE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsInE9is_signedE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE10is_boundedE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE10is_integerE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE12has_infinityE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE13has_quiet_NaNE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE14is_specializedE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE15has_denorm_lossE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE15tinyness_beforeE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE17has_signaling_NaNE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE5trapsE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE8is_exactE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE9is_iec559E@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE9is_moduloE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIoE9is_signedE@@GLIBCXX_3.4.17 ++OBJECT:1:_ZNSt14numeric_limitsIsE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIsE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsItE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIwE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIxE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt14numeric_limitsIyE9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt17moneypunct_bynameIcLb0EE4intlE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt17moneypunct_bynameIcLb1EE4intlE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt17moneypunct_bynameIwLb0EE4intlE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt17moneypunct_bynameIwLb1EE4intlE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base10is_boundedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base10is_integerE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base12has_infinityE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base13has_quiet_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base14is_specializedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base15has_denorm_lossE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base15tinyness_beforeE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base17has_signaling_NaNE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base5trapsE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base8is_exactE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base9is_iec559E@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base9is_moduloE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt21__numeric_limits_base9is_signedE@@GLIBCXX_3.4 ++OBJECT:1:_ZNSt6chrono12system_clock12is_monotonicE@@GLIBCXX_3.4.11 ++OBJECT:1:_ZNSt6chrono3_V212steady_clock9is_steadyE@@GLIBCXX_3.4.19 ++OBJECT:1:_ZNSt6chrono3_V212system_clock9is_steadyE@@GLIBCXX_3.4.19 ++OBJECT:1:_ZNSt7__cxx1110moneypunctIcLb0EE4intlE@@GLIBCXX_3.4.21 ++OBJECT:1:_ZNSt7__cxx1110moneypunctIcLb1EE4intlE@@GLIBCXX_3.4.21 ++OBJECT:1:_ZNSt7__cxx1110moneypunctIwLb0EE4intlE@@GLIBCXX_3.4.21 ++OBJECT:1:_ZNSt7__cxx1110moneypunctIwLb1EE4intlE@@GLIBCXX_3.4.21 ++OBJECT:1:_ZNSt7__cxx1117moneypunct_bynameIcLb0EE4intlE@@GLIBCXX_3.4.21 ++OBJECT:1:_ZNSt7__cxx1117moneypunct_bynameIcLb1EE4intlE@@GLIBCXX_3.4.21 ++OBJECT:1:_ZNSt7__cxx1117moneypunct_bynameIwLb0EE4intlE@@GLIBCXX_3.4.21 ++OBJECT:1:_ZNSt7__cxx1117moneypunct_bynameIwLb1EE4intlE@@GLIBCXX_3.4.21 ++OBJECT:1:_ZSt10adopt_lock@@GLIBCXX_3.4.11 ++OBJECT:1:_ZSt10defer_lock@@GLIBCXX_3.4.11 ++OBJECT:1:_ZSt11try_to_lock@@GLIBCXX_3.4.11 ++OBJECT:1:_ZSt7nothrow@@GLIBCXX_3.4 ++OBJECT:20:_ZTSSt12ctype_bynameIcE@@GLIBCXX_3.4 ++OBJECT:20:_ZTSSt12ctype_bynameIwE@@GLIBCXX_3.4 ++OBJECT:20:_ZTSSt15underflow_error@@GLIBCXX_3.4 ++OBJECT:21:_ZTSSt16bad_array_length@@CXXABI_1.3.8 ++OBJECT:21:_ZTSSt16invalid_argument@@GLIBCXX_3.4 ++OBJECT:22:_ZTSNSt8ios_base7failureE@@GLIBCXX_3.4 ++OBJECT:22:_ZTSSt10moneypunctIcLb0EE@@GLIBCXX_3.4 ++OBJECT:22:_ZTSSt10moneypunctIcLb1EE@@GLIBCXX_3.4 ++OBJECT:22:_ZTSSt10moneypunctIwLb0EE@@GLIBCXX_3.4 ++OBJECT:22:_ZTSSt10moneypunctIwLb1EE@@GLIBCXX_3.4 ++OBJECT:22:_ZTSSt14collate_bynameIcE@@GLIBCXX_3.4 ++OBJECT:22:_ZTSSt14collate_bynameIwE@@GLIBCXX_3.4 ++OBJECT:23:_ZTSSt15messages_bynameIcE@@GLIBCXX_3.4 ++OBJECT:23:_ZTSSt15messages_bynameIwE@@GLIBCXX_3.4 ++OBJECT:23:_ZTSSt15numpunct_bynameIcE@@GLIBCXX_3.4 ++OBJECT:23:_ZTSSt15numpunct_bynameIwE@@GLIBCXX_3.4 ++OBJECT:2440:_ZNSt3tr18__detail12__prime_listE@@GLIBCXX_3.4.10 ++OBJECT:2440:_ZNSt8__detail12__prime_listE@@GLIBCXX_3.4.10 ++OBJECT:24:_ZTIN10__cxxabiv116__enum_type_infoE@@CXXABI_1.3 ++OBJECT:24:_ZTIN10__cxxabiv117__array_type_infoE@@CXXABI_1.3 ++OBJECT:24:_ZTIN10__cxxabiv117__class_type_infoE@@CXXABI_1.3 ++OBJECT:24:_ZTIN10__cxxabiv117__pbase_type_infoE@@CXXABI_1.3 ++OBJECT:24:_ZTIN10__cxxabiv119__pointer_type_infoE@@CXXABI_1.3 ++OBJECT:24:_ZTIN10__cxxabiv120__function_type_infoE@@CXXABI_1.3 ++OBJECT:24:_ZTIN10__cxxabiv120__si_class_type_infoE@@CXXABI_1.3 ++OBJECT:24:_ZTIN10__cxxabiv121__vmi_class_type_infoE@@CXXABI_1.3 ++OBJECT:24:_ZTIN10__cxxabiv123__fundamental_type_infoE@@CXXABI_1.3 ++OBJECT:24:_ZTIN10__cxxabiv129__pointer_to_member_type_infoE@@CXXABI_1.3 ++OBJECT:24:_ZTIN9__gnu_cxx13stdio_filebufIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTIN9__gnu_cxx13stdio_filebufIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTIN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTIN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTINSt10filesystem16filesystem_errorE@@GLIBCXX_3.4.26 ++OBJECT:24:_ZTINSt10filesystem7__cxx1116filesystem_errorE@@GLIBCXX_3.4.26 ++OBJECT:24:_ZTINSt13__future_base19_Async_state_commonE@@GLIBCXX_3.4.17 ++OBJECT:24:_ZTINSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:24:_ZTINSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:24:_ZTINSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:24:_ZTINSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:24:_ZTINSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:24:_ZTINSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:24:_ZTINSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:24:_ZTINSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:24:_ZTINSt3pmr26synchronized_pool_resourceE@@GLIBCXX_3.4.26 ++OBJECT:24:_ZTINSt3pmr28unsynchronized_pool_resourceE@@GLIBCXX_3.4.26 ++OBJECT:24:_ZTINSt7__cxx1114collate_bynameIcEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1114collate_bynameIwEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1115messages_bynameIcEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1115messages_bynameIwEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1115numpunct_bynameIcEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1115numpunct_bynameIwEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1115time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1115time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1117moneypunct_bynameIcLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1117moneypunct_bynameIcLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1117moneypunct_bynameIwLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1117moneypunct_bynameIwLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx117collateIcEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx117collateIwEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx118numpunctIcEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx118numpunctIwEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt8ios_base7failureB5cxx11E@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTINSt8ios_base7failureE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt10bad_typeid@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt10istrstream@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt10lock_error@@GLIBCXX_3.4.11 ++OBJECT:24:_ZTISt10ostrstream@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt11__timepunctIcE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt11__timepunctIwE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt11logic_error@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt11range_error@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt11regex_error@@GLIBCXX_3.4.15 ++OBJECT:24:_ZTISt12bad_weak_ptr@@GLIBCXX_3.4.15 ++OBJECT:24:_ZTISt12ctype_bynameIcE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt12ctype_bynameIwE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt12domain_error@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt12future_error@@GLIBCXX_3.4.14 ++OBJECT:24:_ZTISt12length_error@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt12out_of_range@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt12strstreambuf@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt12system_error@@GLIBCXX_3.4.11 ++OBJECT:24:_ZTISt13bad_exception@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt13basic_filebufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt13basic_filebufIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt13basic_fstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt13basic_fstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt13runtime_error@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt14basic_ifstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt14basic_ifstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt14basic_ofstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt14basic_ofstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt14codecvt_bynameIcc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt14codecvt_bynameIwc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt14collate_bynameIcE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt14collate_bynameIwE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt14overflow_error@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt15basic_stringbufIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt15basic_stringbufIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt15messages_bynameIcE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt15messages_bynameIwE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt15numpunct_bynameIcE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt15numpunct_bynameIwE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt15time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt15time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt15time_put_bynameIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt15underflow_error@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt16bad_array_length@@CXXABI_1.3.8 ++OBJECT:24:_ZTISt16invalid_argument@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt17bad_function_call@@GLIBCXX_3.4.15 ++OBJECT:24:_ZTISt17moneypunct_bynameIcLb0EE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt17moneypunct_bynameIcLb1EE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt17moneypunct_bynameIwLb0EE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt17moneypunct_bynameIwLb1EE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt18basic_stringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt18basic_stringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt19__codecvt_utf8_baseIDiE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTISt19__codecvt_utf8_baseIDsE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTISt19__codecvt_utf8_baseIwE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTISt19basic_istringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt19basic_istringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt19basic_ostringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt19basic_ostringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt20__codecvt_utf16_baseIDiE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTISt20__codecvt_utf16_baseIDsE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTISt20__codecvt_utf16_baseIwE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTISt20bad_array_new_length@@CXXABI_1.3.8 ++OBJECT:24:_ZTISt25__codecvt_utf8_utf16_baseIDiE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTISt25__codecvt_utf8_utf16_baseIDsE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTISt25__codecvt_utf8_utf16_baseIwE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTISt5ctypeIwE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt7codecvtIDiDu11__mbstate_tE@@GLIBCXX_3.4.26 ++OBJECT:24:_ZTISt7codecvtIDic11__mbstate_tE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTISt7codecvtIDsDu11__mbstate_tE@@GLIBCXX_3.4.26 ++OBJECT:24:_ZTISt7codecvtIDsc11__mbstate_tE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTISt7codecvtIcc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt7codecvtIwc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt7collateIcE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt7collateIwE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt8bad_cast@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt8numpunctIcE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt8numpunctIwE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt9bad_alloc@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt9basic_iosIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt9basic_iosIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:24:_ZTISt9strstream@@GLIBCXX_3.4 ++OBJECT:24:_ZTSNSt7__cxx117collateIcEE@@GLIBCXX_3.4.21 ++OBJECT:24:_ZTSNSt7__cxx117collateIwEE@@GLIBCXX_3.4.21 ++OBJECT:25:_ZTSNSt7__cxx118messagesIcEE@@GLIBCXX_3.4.21 ++OBJECT:25:_ZTSNSt7__cxx118messagesIwEE@@GLIBCXX_3.4.21 ++OBJECT:25:_ZTSNSt7__cxx118numpunctIcEE@@GLIBCXX_3.4.21 ++OBJECT:25:_ZTSNSt7__cxx118numpunctIwEE@@GLIBCXX_3.4.21 ++OBJECT:25:_ZTSSt20bad_array_new_length@@CXXABI_1.3.8 ++OBJECT:272:_ZSt4cerr@@GLIBCXX_3.4 ++OBJECT:272:_ZSt4clog@@GLIBCXX_3.4 ++OBJECT:272:_ZSt4cout@@GLIBCXX_3.4 ++OBJECT:272:_ZSt5wcerr@@GLIBCXX_3.4 ++OBJECT:272:_ZSt5wclog@@GLIBCXX_3.4 ++OBJECT:272:_ZSt5wcout@@GLIBCXX_3.4 ++OBJECT:27:_ZTSSt19__codecvt_utf8_baseIwE@@GLIBCXX_3.4.21 ++OBJECT:280:_ZSt3cin@@GLIBCXX_3.4 ++OBJECT:280:_ZSt4wcin@@GLIBCXX_3.4 ++OBJECT:28:_ZTSSt19__codecvt_utf8_baseIDiE@@GLIBCXX_3.4.21 ++OBJECT:28:_ZTSSt19__codecvt_utf8_baseIDsE@@GLIBCXX_3.4.21 ++OBJECT:28:_ZTSSt20__codecvt_utf16_baseIwE@@GLIBCXX_3.4.21 ++OBJECT:28:_ZTSSt7codecvtIcc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:28:_ZTSSt7codecvtIwc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:29:_ZTSNSt8ios_base7failureB5cxx11E@@GLIBCXX_3.4.21 ++OBJECT:29:_ZTSSt17moneypunct_bynameIcLb0EE@@GLIBCXX_3.4 ++OBJECT:29:_ZTSSt17moneypunct_bynameIcLb1EE@@GLIBCXX_3.4 ++OBJECT:29:_ZTSSt17moneypunct_bynameIwLb0EE@@GLIBCXX_3.4 ++OBJECT:29:_ZTSSt17moneypunct_bynameIwLb1EE@@GLIBCXX_3.4 ++OBJECT:29:_ZTSSt20__codecvt_utf16_baseIDiE@@GLIBCXX_3.4.21 ++OBJECT:29:_ZTSSt20__codecvt_utf16_baseIDsE@@GLIBCXX_3.4.21 ++OBJECT:29:_ZTSSt21__ctype_abstract_baseIcE@@GLIBCXX_3.4 ++OBJECT:29:_ZTSSt21__ctype_abstract_baseIwE@@GLIBCXX_3.4 ++OBJECT:29:_ZTSSt7codecvtIDic11__mbstate_tE@@GLIBCXX_3.4.21 ++OBJECT:29:_ZTSSt7codecvtIDsc11__mbstate_tE@@GLIBCXX_3.4.21 ++OBJECT:2:_ZNSt10ctype_base5alnumE@@GLIBCXX_3.4 ++OBJECT:2:_ZNSt10ctype_base5alphaE@@GLIBCXX_3.4 ++OBJECT:2:_ZNSt10ctype_base5blankE@@GLIBCXX_3.4.21 ++OBJECT:2:_ZNSt10ctype_base5cntrlE@@GLIBCXX_3.4 ++OBJECT:2:_ZNSt10ctype_base5digitE@@GLIBCXX_3.4 ++OBJECT:2:_ZNSt10ctype_base5graphE@@GLIBCXX_3.4 ++OBJECT:2:_ZNSt10ctype_base5lowerE@@GLIBCXX_3.4 ++OBJECT:2:_ZNSt10ctype_base5printE@@GLIBCXX_3.4 ++OBJECT:2:_ZNSt10ctype_base5punctE@@GLIBCXX_3.4 ++OBJECT:2:_ZNSt10ctype_base5spaceE@@GLIBCXX_3.4 ++OBJECT:2:_ZNSt10ctype_base5upperE@@GLIBCXX_3.4 ++OBJECT:2:_ZNSt10ctype_base6xdigitE@@GLIBCXX_3.4 ++OBJECT:2:_ZTSa@@CXXABI_1.3 ++OBJECT:2:_ZTSb@@CXXABI_1.3 ++OBJECT:2:_ZTSc@@CXXABI_1.3 ++OBJECT:2:_ZTSd@@CXXABI_1.3 ++OBJECT:2:_ZTSe@@CXXABI_1.3 ++OBJECT:2:_ZTSf@@CXXABI_1.3 ++OBJECT:2:_ZTSg@@CXXABI_LDBL_1.3 ++OBJECT:2:_ZTSh@@CXXABI_1.3 ++OBJECT:2:_ZTSi@@CXXABI_1.3 ++OBJECT:2:_ZTSj@@CXXABI_1.3 ++OBJECT:2:_ZTSl@@CXXABI_1.3 ++OBJECT:2:_ZTSm@@CXXABI_1.3 ++OBJECT:2:_ZTSn@@CXXABI_1.3.9 ++OBJECT:2:_ZTSo@@CXXABI_1.3.9 ++OBJECT:2:_ZTSs@@CXXABI_1.3 ++OBJECT:2:_ZTSt@@CXXABI_1.3 ++OBJECT:2:_ZTSv@@CXXABI_1.3 ++OBJECT:2:_ZTSw@@CXXABI_1.3 ++OBJECT:2:_ZTSx@@CXXABI_1.3 ++OBJECT:2:_ZTSy@@CXXABI_1.3 ++OBJECT:30:_ZTSSt7codecvtIDiDu11__mbstate_tE@@GLIBCXX_3.4.26 ++OBJECT:30:_ZTSSt7codecvtIDsDu11__mbstate_tE@@GLIBCXX_3.4.26 ++OBJECT:32:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep20_S_empty_rep_storageE@@GLIBCXX_3.4 ++OBJECT:32:_ZNSs4_Rep20_S_empty_rep_storageE@@GLIBCXX_3.4 ++OBJECT:32:_ZTIPDd@@CXXABI_1.3.4 ++OBJECT:32:_ZTIPDe@@CXXABI_1.3.4 ++OBJECT:32:_ZTIPDf@@CXXABI_1.3.4 ++OBJECT:32:_ZTIPDi@@CXXABI_1.3.3 ++OBJECT:32:_ZTIPDn@@CXXABI_1.3.5 ++OBJECT:32:_ZTIPDs@@CXXABI_1.3.3 ++OBJECT:32:_ZTIPDu@@CXXABI_1.3.12 ++OBJECT:32:_ZTIPKDd@@CXXABI_1.3.4 ++OBJECT:32:_ZTIPKDe@@CXXABI_1.3.4 ++OBJECT:32:_ZTIPKDf@@CXXABI_1.3.4 ++OBJECT:32:_ZTIPKDi@@CXXABI_1.3.3 ++OBJECT:32:_ZTIPKDn@@CXXABI_1.3.5 ++OBJECT:32:_ZTIPKDs@@CXXABI_1.3.3 ++OBJECT:32:_ZTIPKDu@@CXXABI_1.3.12 ++OBJECT:32:_ZTIPKa@@CXXABI_1.3 ++OBJECT:32:_ZTIPKb@@CXXABI_1.3 ++OBJECT:32:_ZTIPKc@@CXXABI_1.3 ++OBJECT:32:_ZTIPKd@@CXXABI_1.3 ++OBJECT:32:_ZTIPKe@@CXXABI_1.3 ++OBJECT:32:_ZTIPKf@@CXXABI_1.3 ++OBJECT:32:_ZTIPKg@@CXXABI_LDBL_1.3 ++OBJECT:32:_ZTIPKh@@CXXABI_1.3 ++OBJECT:32:_ZTIPKi@@CXXABI_1.3 ++OBJECT:32:_ZTIPKj@@CXXABI_1.3 ++OBJECT:32:_ZTIPKl@@CXXABI_1.3 ++OBJECT:32:_ZTIPKm@@CXXABI_1.3 ++OBJECT:32:_ZTIPKn@@CXXABI_1.3.5 ++OBJECT:32:_ZTIPKo@@CXXABI_1.3.5 ++OBJECT:32:_ZTIPKs@@CXXABI_1.3 ++OBJECT:32:_ZTIPKt@@CXXABI_1.3 ++OBJECT:32:_ZTIPKv@@CXXABI_1.3 ++OBJECT:32:_ZTIPKw@@CXXABI_1.3 ++OBJECT:32:_ZTIPKx@@CXXABI_1.3 ++OBJECT:32:_ZTIPKy@@CXXABI_1.3 ++OBJECT:32:_ZTIPa@@CXXABI_1.3 ++OBJECT:32:_ZTIPb@@CXXABI_1.3 ++OBJECT:32:_ZTIPc@@CXXABI_1.3 ++OBJECT:32:_ZTIPd@@CXXABI_1.3 ++OBJECT:32:_ZTIPe@@CXXABI_1.3 ++OBJECT:32:_ZTIPf@@CXXABI_1.3 ++OBJECT:32:_ZTIPg@@CXXABI_LDBL_1.3 ++OBJECT:32:_ZTIPh@@CXXABI_1.3 ++OBJECT:32:_ZTIPi@@CXXABI_1.3 ++OBJECT:32:_ZTIPj@@CXXABI_1.3 ++OBJECT:32:_ZTIPl@@CXXABI_1.3 ++OBJECT:32:_ZTIPm@@CXXABI_1.3 ++OBJECT:32:_ZTIPn@@CXXABI_1.3.5 ++OBJECT:32:_ZTIPo@@CXXABI_1.3.5 ++OBJECT:32:_ZTIPs@@CXXABI_1.3 ++OBJECT:32:_ZTIPt@@CXXABI_1.3 ++OBJECT:32:_ZTIPv@@CXXABI_1.3 ++OBJECT:32:_ZTIPw@@CXXABI_1.3 ++OBJECT:32:_ZTIPx@@CXXABI_1.3 ++OBJECT:32:_ZTIPy@@CXXABI_1.3 ++OBJECT:32:_ZTSNSt7__cxx1110moneypunctIcLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:32:_ZTSNSt7__cxx1110moneypunctIcLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:32:_ZTSNSt7__cxx1110moneypunctIwLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:32:_ZTSNSt7__cxx1110moneypunctIwLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:32:_ZTSNSt7__cxx1114collate_bynameIcEE@@GLIBCXX_3.4.21 ++OBJECT:32:_ZTSNSt7__cxx1114collate_bynameIwEE@@GLIBCXX_3.4.21 ++OBJECT:32:_ZTTNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:32:_ZTTNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:32:_ZTTNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:32:_ZTTNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:32:_ZTTSt10istrstream@@GLIBCXX_3.4 ++OBJECT:32:_ZTTSt10ostrstream@@GLIBCXX_3.4 ++OBJECT:32:_ZTTSt14basic_ifstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:32:_ZTTSt14basic_ifstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:32:_ZTTSt14basic_ofstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:32:_ZTTSt14basic_ofstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:32:_ZTTSt19basic_istringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:32:_ZTTSt19basic_istringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:32:_ZTTSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:32:_ZTTSt19basic_ostringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:32:_ZTVNSt6locale5facetE@@GLIBCXX_3.4 ++OBJECT:32:_ZTVSt11__timepunctIcE@@GLIBCXX_3.4 ++OBJECT:32:_ZTVSt11__timepunctIwE@@GLIBCXX_3.4 ++OBJECT:32:_ZTVSt16nested_exception@@CXXABI_1.3.5 ++OBJECT:32:_ZTVSt8ios_base@@GLIBCXX_3.4 ++OBJECT:32:_ZTVSt9basic_iosIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:32:_ZTVSt9basic_iosIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:33:_ZTSN10__cxxabiv116__enum_type_infoE@@CXXABI_1.3 ++OBJECT:33:_ZTSNSt7__cxx1115messages_bynameIcEE@@GLIBCXX_3.4.21 ++OBJECT:33:_ZTSNSt7__cxx1115messages_bynameIwEE@@GLIBCXX_3.4.21 ++OBJECT:33:_ZTSNSt7__cxx1115numpunct_bynameIcEE@@GLIBCXX_3.4.21 ++OBJECT:33:_ZTSNSt7__cxx1115numpunct_bynameIwEE@@GLIBCXX_3.4.21 ++OBJECT:33:_ZTSSt25__codecvt_utf8_utf16_baseIwE@@GLIBCXX_3.4.21 ++OBJECT:34:_ZTSN10__cxxabiv117__array_type_infoE@@CXXABI_1.3 ++OBJECT:34:_ZTSN10__cxxabiv117__class_type_infoE@@CXXABI_1.3 ++OBJECT:34:_ZTSN10__cxxabiv117__pbase_type_infoE@@CXXABI_1.3 ++OBJECT:34:_ZTSSt25__codecvt_utf8_utf16_baseIDiE@@GLIBCXX_3.4.21 ++OBJECT:34:_ZTSSt25__codecvt_utf8_utf16_baseIDsE@@GLIBCXX_3.4.21 ++OBJECT:34:_ZTSSt9basic_iosIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:34:_ZTSSt9basic_iosIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:36:_ZTSN10__cxxabiv119__pointer_type_infoE@@CXXABI_1.3 ++OBJECT:36:_ZTSSt14codecvt_bynameIcc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:36:_ZTSSt14codecvt_bynameIwc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:37:_ZTSN10__cxxabiv120__function_type_infoE@@CXXABI_1.3 ++OBJECT:37:_ZTSN10__cxxabiv120__si_class_type_infoE@@CXXABI_1.3 ++OBJECT:38:_ZTSN10__cxxabiv121__vmi_class_type_infoE@@CXXABI_1.3 ++OBJECT:39:_ZTSNSt7__cxx1117moneypunct_bynameIcLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:39:_ZTSNSt7__cxx1117moneypunct_bynameIcLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:39:_ZTSNSt7__cxx1117moneypunct_bynameIwLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:39:_ZTSNSt7__cxx1117moneypunct_bynameIwLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:39:_ZTSSt13basic_filebufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:39:_ZTSSt13basic_filebufIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:39:_ZTSSt13basic_fstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:39:_ZTSSt13basic_fstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:39:_ZTSSt13basic_istreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:39:_ZTSSt13basic_ostreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:3:_ZTSPa@@CXXABI_1.3 ++OBJECT:3:_ZTSPb@@CXXABI_1.3 ++OBJECT:3:_ZTSPc@@CXXABI_1.3 ++OBJECT:3:_ZTSPd@@CXXABI_1.3 ++OBJECT:3:_ZTSPe@@CXXABI_1.3 ++OBJECT:3:_ZTSPf@@CXXABI_1.3 ++OBJECT:3:_ZTSPg@@CXXABI_LDBL_1.3 ++OBJECT:3:_ZTSPh@@CXXABI_1.3 ++OBJECT:3:_ZTSPi@@CXXABI_1.3 ++OBJECT:3:_ZTSPj@@CXXABI_1.3 ++OBJECT:3:_ZTSPl@@CXXABI_1.3 ++OBJECT:3:_ZTSPm@@CXXABI_1.3 ++OBJECT:3:_ZTSPn@@CXXABI_1.3.9 ++OBJECT:3:_ZTSPo@@CXXABI_1.3.9 ++OBJECT:3:_ZTSPs@@CXXABI_1.3 ++OBJECT:3:_ZTSPt@@CXXABI_1.3 ++OBJECT:3:_ZTSPv@@CXXABI_1.3 ++OBJECT:3:_ZTSPw@@CXXABI_1.3 ++OBJECT:3:_ZTSPx@@CXXABI_1.3 ++OBJECT:3:_ZTSPy@@CXXABI_1.3 ++OBJECT:3:_ZTSSd@@GLIBCXX_3.4 ++OBJECT:3:_ZTSSi@@GLIBCXX_3.4 ++OBJECT:3:_ZTSSo@@GLIBCXX_3.4 ++OBJECT:40:_ZTISi@@GLIBCXX_3.4 ++OBJECT:40:_ZTISo@@GLIBCXX_3.4 ++OBJECT:40:_ZTISt13basic_istreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:40:_ZTISt13basic_ostreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:40:_ZTSN10__cxxabiv123__fundamental_type_infoE@@CXXABI_1.3 ++OBJECT:40:_ZTSSt14basic_ifstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:40:_ZTSSt14basic_ifstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:40:_ZTSSt14basic_iostreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:40:_ZTSSt14basic_ofstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:40:_ZTSSt14basic_ofstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:40:_ZTVNSt10filesystem16filesystem_errorE@@GLIBCXX_3.4.26 ++OBJECT:40:_ZTVNSt10filesystem7__cxx1116filesystem_errorE@@GLIBCXX_3.4.26 ++OBJECT:40:_ZTVNSt13__future_base11_State_baseE@@GLIBCXX_3.4.15 ++OBJECT:40:_ZTVNSt13__future_base12_Result_baseE@@GLIBCXX_3.4.15 ++OBJECT:40:_ZTVNSt13__future_base19_Async_state_commonE@@GLIBCXX_3.4.17 ++OBJECT:40:_ZTVNSt6thread6_StateE@@GLIBCXX_3.4.22 ++OBJECT:40:_ZTVNSt8ios_base7failureB5cxx11E@@GLIBCXX_3.4.21 ++OBJECT:40:_ZTVNSt8ios_base7failureE@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt10bad_typeid@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt10lock_error@@GLIBCXX_3.4.11 ++OBJECT:40:_ZTVSt11logic_error@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt11range_error@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt11regex_error@@GLIBCXX_3.4.15 ++OBJECT:40:_ZTVSt12bad_weak_ptr@@GLIBCXX_3.4.15 ++OBJECT:40:_ZTVSt12domain_error@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt12future_error@@GLIBCXX_3.4.14 ++OBJECT:40:_ZTVSt12length_error@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt12out_of_range@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt12system_error@@GLIBCXX_3.4.11 ++OBJECT:40:_ZTVSt13bad_exception@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt13runtime_error@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt14overflow_error@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt15time_put_bynameIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt15underflow_error@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt16bad_array_length@@CXXABI_1.3.8 ++OBJECT:40:_ZTVSt16invalid_argument@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt17bad_function_call@@GLIBCXX_3.4.15 ++OBJECT:40:_ZTVSt20bad_array_new_length@@CXXABI_1.3.8 ++OBJECT:40:_ZTVSt8bad_cast@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt9bad_alloc@@GLIBCXX_3.4 ++OBJECT:40:_ZTVSt9exception@@GLIBCXX_3.4 ++OBJECT:41:_ZTSNSt13__future_base19_Async_state_commonE@@GLIBCXX_3.4.17 ++OBJECT:41:_ZTSSt15basic_streambufIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:41:_ZTSSt15basic_streambufIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:45:_ZTSSt23__codecvt_abstract_baseIcc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:45:_ZTSSt23__codecvt_abstract_baseIwc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:46:_ZTSN10__cxxabiv129__pointer_to_member_type_infoE@@CXXABI_1.3 ++OBJECT:46:_ZTSSt15basic_stringbufIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:46:_ZTSSt15basic_stringbufIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:48:_ZTVNSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:48:_ZTVNSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:48:_ZTVNSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:48:_ZTVNSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:48:_ZTVSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:48:_ZTVSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:48:_ZTVSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:48:_ZTVSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:49:_ZTSN9__gnu_cxx13stdio_filebufIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:49:_ZTSN9__gnu_cxx13stdio_filebufIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:49:_ZTSSt18basic_stringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:49:_ZTSSt18basic_stringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep11_S_terminalE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt10money_base18_S_default_patternE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIDiE10has_denormE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDiE11round_styleE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDiE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIDiE12max_exponentE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDiE12min_exponentE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDiE14max_exponent10E@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDiE14min_exponent10E@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDiE5radixE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDiE6digitsE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDiE8digits10E@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDsE10has_denormE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDsE11round_styleE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDsE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIDsE12max_exponentE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDsE12min_exponentE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDsE14max_exponent10E@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDsE14min_exponent10E@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDsE5radixE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDsE6digitsE@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDsE8digits10E@@GLIBCXX_3.4.11 ++OBJECT:4:_ZNSt14numeric_limitsIDuE10has_denormE@@GLIBCXX_3.4.26 ++OBJECT:4:_ZNSt14numeric_limitsIDuE11round_styleE@@GLIBCXX_3.4.26 ++OBJECT:4:_ZNSt14numeric_limitsIDuE12max_exponentE@@GLIBCXX_3.4.26 ++OBJECT:4:_ZNSt14numeric_limitsIDuE12min_exponentE@@GLIBCXX_3.4.26 ++OBJECT:4:_ZNSt14numeric_limitsIDuE14max_exponent10E@@GLIBCXX_3.4.26 ++OBJECT:4:_ZNSt14numeric_limitsIDuE14min_exponent10E@@GLIBCXX_3.4.26 ++OBJECT:4:_ZNSt14numeric_limitsIDuE5radixE@@GLIBCXX_3.4.26 ++OBJECT:4:_ZNSt14numeric_limitsIDuE6digitsE@@GLIBCXX_3.4.26 ++OBJECT:4:_ZNSt14numeric_limitsIDuE8digits10E@@GLIBCXX_3.4.26 ++OBJECT:4:_ZNSt14numeric_limitsIaE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIaE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIaE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIaE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIaE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIaE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIaE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIaE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIaE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIaE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIbE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIbE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIbE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIbE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIbE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIbE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIbE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIbE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIbE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIbE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIcE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIcE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIcE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIcE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIcE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIcE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIcE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIcE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIcE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIcE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIdE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIdE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIdE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIdE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIdE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIdE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIdE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIdE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIdE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIdE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIeE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIeE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIeE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIeE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIeE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIeE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIeE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIeE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIeE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIfE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIfE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIfE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIfE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIfE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIfE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIfE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIfE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIfE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIfE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIgE10has_denormE@@GLIBCXX_LDBL_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIgE11round_styleE@@GLIBCXX_LDBL_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIgE12max_digits10E@@GLIBCXX_LDBL_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIgE12max_exponentE@@GLIBCXX_LDBL_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIgE12min_exponentE@@GLIBCXX_LDBL_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIgE14max_exponent10E@@GLIBCXX_LDBL_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIgE14min_exponent10E@@GLIBCXX_LDBL_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIgE5radixE@@GLIBCXX_LDBL_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIgE6digitsE@@GLIBCXX_LDBL_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIgE8digits10E@@GLIBCXX_LDBL_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIhE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIhE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIhE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIhE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIhE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIhE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIhE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIhE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIhE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIhE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIiE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIiE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIiE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIiE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIiE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIiE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIiE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIiE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIiE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIiE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIjE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIjE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIjE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIjE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIjE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIjE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIjE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIjE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIjE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIjE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIlE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIlE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIlE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIlE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIlE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIlE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIlE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIlE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIlE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIlE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsImE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsImE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsImE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsImE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsImE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsImE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsImE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsImE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsImE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsImE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsInE10has_denormE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsInE11round_styleE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsInE12max_digits10E@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsInE12max_exponentE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsInE12min_exponentE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsInE14max_exponent10E@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsInE14min_exponent10E@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsInE5radixE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsInE6digitsE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsInE8digits10E@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsIoE10has_denormE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsIoE11round_styleE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsIoE12max_digits10E@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsIoE12max_exponentE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsIoE12min_exponentE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsIoE14max_exponent10E@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsIoE14min_exponent10E@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsIoE5radixE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsIoE6digitsE@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsIoE8digits10E@@GLIBCXX_3.4.17 ++OBJECT:4:_ZNSt14numeric_limitsIsE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIsE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIsE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIsE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIsE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIsE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIsE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIsE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIsE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIsE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsItE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsItE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsItE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsItE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsItE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsItE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsItE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsItE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsItE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsItE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIwE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIwE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIwE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIwE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIwE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIwE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIwE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIwE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIwE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIwE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIxE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIxE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIxE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIxE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIxE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIxE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIxE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIxE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIxE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIxE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIyE10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIyE11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIyE12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt14numeric_limitsIyE12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIyE12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIyE14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIyE14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIyE5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIyE6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt14numeric_limitsIyE8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt21__numeric_limits_base10has_denormE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt21__numeric_limits_base11round_styleE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt21__numeric_limits_base12max_digits10E@@GLIBCXX_3.4.14 ++OBJECT:4:_ZNSt21__numeric_limits_base12max_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt21__numeric_limits_base12min_exponentE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt21__numeric_limits_base14max_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt21__numeric_limits_base14min_exponent10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt21__numeric_limits_base5radixE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt21__numeric_limits_base6digitsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt21__numeric_limits_base8digits10E@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt6locale3allE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt6locale4noneE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt6locale4timeE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt6locale5ctypeE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt6locale7collateE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt6locale7numericE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt6locale8messagesE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt6locale8monetaryE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base10floatfieldE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base10scientificE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base11adjustfieldE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base2inE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base3appE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base3ateE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base3begE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base3curE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base3decE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base3endE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base3hexE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base3octE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base3outE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base4leftE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base5fixedE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base5rightE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base5truncE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base6badbitE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base6binaryE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base6eofbitE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base6skipwsE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base7failbitE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base7goodbitE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base7showposE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base7unitbufE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base8internalE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base8showbaseE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base9basefieldE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base9boolalphaE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base9showpointE@@GLIBCXX_3.4 ++OBJECT:4:_ZNSt8ios_base9uppercaseE@@GLIBCXX_3.4 ++OBJECT:4:_ZTSPKa@@CXXABI_1.3 ++OBJECT:4:_ZTSPKb@@CXXABI_1.3 ++OBJECT:4:_ZTSPKc@@CXXABI_1.3 ++OBJECT:4:_ZTSPKd@@CXXABI_1.3 ++OBJECT:4:_ZTSPKe@@CXXABI_1.3 ++OBJECT:4:_ZTSPKf@@CXXABI_1.3 ++OBJECT:4:_ZTSPKg@@CXXABI_LDBL_1.3 ++OBJECT:4:_ZTSPKh@@CXXABI_1.3 ++OBJECT:4:_ZTSPKi@@CXXABI_1.3 ++OBJECT:4:_ZTSPKj@@CXXABI_1.3 ++OBJECT:4:_ZTSPKl@@CXXABI_1.3 ++OBJECT:4:_ZTSPKm@@CXXABI_1.3 ++OBJECT:4:_ZTSPKn@@CXXABI_1.3.9 ++OBJECT:4:_ZTSPKo@@CXXABI_1.3.9 ++OBJECT:4:_ZTSPKs@@CXXABI_1.3 ++OBJECT:4:_ZTSPKt@@CXXABI_1.3 ++OBJECT:4:_ZTSPKv@@CXXABI_1.3 ++OBJECT:4:_ZTSPKw@@CXXABI_1.3 ++OBJECT:4:_ZTSPKx@@CXXABI_1.3 ++OBJECT:4:_ZTSPKy@@CXXABI_1.3 ++OBJECT:50:_ZTSSt19basic_istringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:50:_ZTSSt19basic_istringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:50:_ZTSSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:50:_ZTSSt19basic_ostringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:54:_ZTSN9__gnu_cxx18stdio_sync_filebufIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:54:_ZTSN9__gnu_cxx18stdio_sync_filebufIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:56:_ZTINSt7__cxx1110moneypunctIcLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTINSt7__cxx1110moneypunctIcLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTINSt7__cxx1110moneypunctIwLb0EEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTINSt7__cxx1110moneypunctIwLb1EEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTINSt7__cxx118messagesIcEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTINSt7__cxx118messagesIwEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTINSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTINSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTISd@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt10moneypunctIcLb0EE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt10moneypunctIcLb1EE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt10moneypunctIwLb0EE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt10moneypunctIwLb1EE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt14basic_iostreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt21__ctype_abstract_baseIcE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt21__ctype_abstract_baseIwE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt23__codecvt_abstract_baseIcc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt23__codecvt_abstract_baseIwc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt5ctypeIcE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt8messagesIcE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt8messagesIwE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:56:_ZTISt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:56:_ZTSNSt7__cxx1115basic_stringbufIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTSNSt7__cxx1115basic_stringbufIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTTSd@@GLIBCXX_3.4 ++OBJECT:56:_ZTTSt14basic_iostreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:56:_ZTVNSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:56:_ZTVNSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:56:_ZTVNSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:56:_ZTVNSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:56:_ZTVNSt7__cxx1114collate_bynameIcEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTVNSt7__cxx1114collate_bynameIwEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTVNSt7__cxx1115messages_bynameIcEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTVNSt7__cxx1115messages_bynameIwEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTVNSt7__cxx117collateIcEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTVNSt7__cxx117collateIwEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTVNSt7__cxx118messagesIcEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTVNSt7__cxx118messagesIwEE@@GLIBCXX_3.4.21 ++OBJECT:56:_ZTVSt14collate_bynameIcE@@GLIBCXX_3.4 ++OBJECT:56:_ZTVSt14collate_bynameIwE@@GLIBCXX_3.4 ++OBJECT:56:_ZTVSt15messages_bynameIcE@@GLIBCXX_3.4 ++OBJECT:56:_ZTVSt15messages_bynameIwE@@GLIBCXX_3.4 ++OBJECT:56:_ZTVSt7collateIcE@@GLIBCXX_3.4 ++OBJECT:56:_ZTVSt7collateIwE@@GLIBCXX_3.4 ++OBJECT:56:_ZTVSt8messagesIcE@@GLIBCXX_3.4 ++OBJECT:56:_ZTVSt8messagesIwE@@GLIBCXX_3.4 ++OBJECT:58:_ZTSSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:58:_ZTSSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:58:_ZTSSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:58:_ZTSSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:59:_ZTSNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:59:_ZTSNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:59:_ZTSSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:59:_ZTSSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:59:_ZTSSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:59:_ZTSSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:60:_ZTSNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:60:_ZTSNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:60:_ZTSNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:60:_ZTSNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:60:_ZTSSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:60:_ZTSSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:60:_ZTSSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:60:_ZTSSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:64:_ZTVN10__cxxabiv116__enum_type_infoE@@CXXABI_1.3 ++OBJECT:64:_ZTVN10__cxxabiv117__array_type_infoE@@CXXABI_1.3 ++OBJECT:64:_ZTVN10__cxxabiv120__function_type_infoE@@CXXABI_1.3 ++OBJECT:64:_ZTVN10__cxxabiv123__fundamental_type_infoE@@CXXABI_1.3 ++OBJECT:64:_ZTVSt9type_info@@GLIBCXX_3.4 ++OBJECT:67:_ZTSSt15time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:67:_ZTSSt15time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:67:_ZTSSt15time_put_bynameIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:67:_ZTSSt15time_put_bynameIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:69:_ZTSNSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:69:_ZTSNSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:70:_ZTSNSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:70:_ZTSNSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:70:_ZTSNSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:70:_ZTSNSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:72:_ZTVN10__cxxabiv117__pbase_type_infoE@@CXXABI_1.3 ++OBJECT:72:_ZTVN10__cxxabiv119__pointer_type_infoE@@CXXABI_1.3 ++OBJECT:72:_ZTVN10__cxxabiv129__pointer_to_member_type_infoE@@CXXABI_1.3 ++OBJECT:72:_ZTVNSt7__cxx1115numpunct_bynameIcEE@@GLIBCXX_3.4.21 ++OBJECT:72:_ZTVNSt7__cxx1115numpunct_bynameIwEE@@GLIBCXX_3.4.21 ++OBJECT:72:_ZTVNSt7__cxx118numpunctIcEE@@GLIBCXX_3.4.21 ++OBJECT:72:_ZTVNSt7__cxx118numpunctIwEE@@GLIBCXX_3.4.21 ++OBJECT:72:_ZTVSt14error_category@@GLIBCXX_3.4.11 ++OBJECT:72:_ZTVSt15numpunct_bynameIcE@@GLIBCXX_3.4 ++OBJECT:72:_ZTVSt15numpunct_bynameIwE@@GLIBCXX_3.4 ++OBJECT:72:_ZTVSt8numpunctIcE@@GLIBCXX_3.4 ++OBJECT:72:_ZTVSt8numpunctIwE@@GLIBCXX_3.4 ++OBJECT:77:_ZTSNSt7__cxx1115time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:77:_ZTSNSt7__cxx1115time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:79:_ZTSNSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:79:_ZTSNSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:79:_ZTSNSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:79:_ZTSNSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:80:_ZTTNSt7__cxx1118basic_stringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:80:_ZTTNSt7__cxx1118basic_stringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:80:_ZTTSt13basic_fstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTTSt13basic_fstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTTSt18basic_stringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTTSt18basic_stringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTTSt9strstream@@GLIBCXX_3.4 ++OBJECT:80:_ZTVNSt3_V214error_categoryE@@GLIBCXX_3.4.21 ++OBJECT:80:_ZTVNSt7__cxx1119basic_istringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:80:_ZTVNSt7__cxx1119basic_istringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:80:_ZTVNSt7__cxx1119basic_ostringstreamIcSt11char_traitsIcESaIcEEE@@GLIBCXX_3.4.21 ++OBJECT:80:_ZTVNSt7__cxx1119basic_ostringstreamIwSt11char_traitsIwESaIwEEE@@GLIBCXX_3.4.21 ++OBJECT:80:_ZTVSi@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSo@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt10istrstream@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt10ostrstream@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt13basic_istreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt13basic_ostreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt14basic_ifstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt14basic_ifstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt14basic_ofstreamIcSt11char_traitsIcEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt14basic_ofstreamIwSt11char_traitsIwEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt15time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt15time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt19basic_istringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt19basic_istringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt19basic_ostringstreamIcSt11char_traitsIcESaIcEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt19basic_ostringstreamIwSt11char_traitsIwESaIwEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:80:_ZTVSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 ++OBJECT:81:_ZTSNSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:81:_ZTSNSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:81:_ZTSNSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:81:_ZTSNSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_LDBL_3.4 ++OBJECT:88:_ZTVN10__cxxabiv117__class_type_infoE@@CXXABI_1.3 ++OBJECT:88:_ZTVN10__cxxabiv120__si_class_type_infoE@@CXXABI_1.3 ++OBJECT:88:_ZTVN10__cxxabiv121__vmi_class_type_infoE@@CXXABI_1.3 ++OBJECT:88:_ZTVNSt7__cxx1115time_get_bynameIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVNSt7__cxx1115time_get_bynameIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVNSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEEE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVNSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEEE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt14codecvt_bynameIcc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:88:_ZTVSt14codecvt_bynameIwc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:88:_ZTVSt19__codecvt_utf8_baseIDiE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt19__codecvt_utf8_baseIDsE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt19__codecvt_utf8_baseIwE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt20__codecvt_utf16_baseIDiE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt20__codecvt_utf16_baseIDsE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt20__codecvt_utf16_baseIwE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt23__codecvt_abstract_baseIcc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:88:_ZTVSt23__codecvt_abstract_baseIwc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:88:_ZTVSt25__codecvt_utf8_utf16_baseIDiE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt25__codecvt_utf8_utf16_baseIDsE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt25__codecvt_utf8_utf16_baseIwE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt7codecvtIDiDu11__mbstate_tE@@GLIBCXX_3.4.26 ++OBJECT:88:_ZTVSt7codecvtIDic11__mbstate_tE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt7codecvtIDsDu11__mbstate_tE@@GLIBCXX_3.4.26 ++OBJECT:88:_ZTVSt7codecvtIDsc11__mbstate_tE@@GLIBCXX_3.4.21 ++OBJECT:88:_ZTVSt7codecvtIcc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:88:_ZTVSt7codecvtIwc11__mbstate_tE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt10moneypunctIcLb0EE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt10moneypunctIcLb1EE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt10moneypunctIwLb0EE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt10moneypunctIwLb1EE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt11__timepunctIcE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt11__timepunctIwE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZGVNSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZGVNSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZGVNSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZGVNSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZGVNSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZGVNSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZGVNSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZGVNSt7__cxx1110moneypunctIcLb0EE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx1110moneypunctIcLb1EE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx1110moneypunctIwLb0EE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx1110moneypunctIwLb1EE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx117collateIcE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx117collateIwE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx118messagesIcE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx118messagesIwE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx118numpunctIcE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx118numpunctIwE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZGVNSt7collateIcE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt7collateIwE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt8messagesIcE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt8messagesIwE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt8numpunctIcE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt8numpunctIwE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZGVNSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSbIwSt11char_traitsIwESaIwEE4_Rep11_S_max_sizeE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSbIwSt11char_traitsIwESaIwEE4nposE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSs4_Rep11_S_max_sizeE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSs4nposE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt10__num_base11_S_atoms_inE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt10__num_base12_S_atoms_outE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt10money_base8_S_atomsE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt10moneypunctIcLb0EE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt10moneypunctIcLb1EE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt10moneypunctIwLb0EE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt10moneypunctIwLb1EE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt11__timepunctIcE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt11__timepunctIwE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt17__gnu_cxx_ldbl1287num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZNSt17__gnu_cxx_ldbl1287num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZNSt17__gnu_cxx_ldbl1287num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZNSt17__gnu_cxx_ldbl1287num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZNSt17__gnu_cxx_ldbl1289money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZNSt17__gnu_cxx_ldbl1289money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZNSt17__gnu_cxx_ldbl1289money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZNSt17__gnu_cxx_ldbl1289money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_LDBL_3.4 ++OBJECT:8:_ZNSt5ctypeIcE10table_sizeE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt5ctypeIcE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt5ctypeIwE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt7__cxx1110moneypunctIcLb0EE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx1110moneypunctIcLb1EE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx1110moneypunctIwLb0EE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx1110moneypunctIwLb1EE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE4nposE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx1112basic_stringIwSt11char_traitsIwESaIwEE4nposE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx117collateIcE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx117collateIwE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx118messagesIcE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx118messagesIwE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx118numpunctIcE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx118numpunctIwE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx118time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx118time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx119money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx119money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx119money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7__cxx119money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7codecvtIDiDu11__mbstate_tE2idE@@GLIBCXX_3.4.26 ++OBJECT:8:_ZNSt7codecvtIDic11__mbstate_tE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7codecvtIDsDu11__mbstate_tE2idE@@GLIBCXX_3.4.26 ++OBJECT:8:_ZNSt7codecvtIDsc11__mbstate_tE2idE@@GLIBCXX_3.4.21 ++OBJECT:8:_ZNSt7codecvtIcc11__mbstate_tE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt7codecvtIwc11__mbstate_tE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt7collateIcE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt7collateIwE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt7num_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt7num_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt8messagesIcE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt8messagesIwE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt8numpunctIcE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt8numpunctIwE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt8time_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt8time_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt8time_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt8time_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt9money_getIcSt19istreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt9money_getIwSt19istreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt9money_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZNSt9money_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE2idE@@GLIBCXX_3.4 ++OBJECT:8:_ZSt15future_category@@GLIBCXX_3.4.14 ++OBJECT:96:_ZTVSt12ctype_bynameIcE@@GLIBCXX_3.4 ++OBJECT:96:_ZTVSt5ctypeIcE@@GLIBCXX_3.4 ++OBJECT:96:_ZTVSt7num_putIcSt19ostreambuf_iteratorIcSt11char_traitsIcEEE@@GLIBCXX_3.4 ++OBJECT:96:_ZTVSt7num_putIwSt19ostreambuf_iteratorIwSt11char_traitsIwEEE@@GLIBCXX_3.4 +diff --git a/libstdc++-v3/configure b/libstdc++-v3/configure +index c07e27569..ffa72a1bb 100755 +--- a/libstdc++-v3/configure ++++ b/libstdc++-v3/configure +@@ -75269,6 +75269,7 @@ case "$target" in + powerpc*-*-linux* | \ + sparc*-*-linux* | \ + s390*-*-linux* | \ ++ sw_64*-*-linux* | \ + alpha*-*-linux*) + cat confdefs.h - <<_ACEOF >conftest.$ac_ext + /* end confdefs.h. */ +@@ -77752,7 +77753,7 @@ _ACEOF + $as_echo_n "checking for first version to support std::exception_ptr... " >&6; } + case ${target} in + aarch64-*-* | alpha-*-* | hppa*-*-* | i?86-*-* | x86_64-*-* | \ +- m68k-*-* | powerpc*-*-* | s390*-*-* | *-*-solaris* ) ++ m68k-*-* | powerpc*-*-* | s390*-*-* | sw_64-*-* | *-*-solaris* ) + ac_exception_ptr_since_gcc46=yes + ;; + *) +diff --git a/libstdc++-v3/configure.ac b/libstdc++-v3/configure.ac +index e59bcdb29..895240f4e 100644 +--- a/libstdc++-v3/configure.ac ++++ b/libstdc++-v3/configure.ac +@@ -416,6 +416,7 @@ case "$target" in + powerpc*-*-linux* | \ + sparc*-*-linux* | \ + s390*-*-linux* | \ ++ sw_64*-*-linux* | \ + alpha*-*-linux*) + AC_TRY_COMPILE(, [ + #if !defined __LONG_DOUBLE_128__ || (defined(__sparc__) && defined(__arch64__)) +diff --git a/libstdc++-v3/configure.host b/libstdc++-v3/configure.host +index ec32980aa..930eef4c5 100644 +--- a/libstdc++-v3/configure.host ++++ b/libstdc++-v3/configure.host +@@ -127,6 +127,9 @@ case "${host_cpu}" in + sparc* | ultrasparc) + try_cpu=sparc + ;; ++ sw_64*) ++ try_cpu=sw_64 ++ ;; + *) + if test -d ${glibcxx_srcdir}/config/cpu/${host_cpu}; then + try_cpu=${host_cpu} +-- +2.25.1 + diff --git a/0013-LoongArch-Implement-the-new-vector-cost-model-framew.patch b/0013-LoongArch-Implement-the-new-vector-cost-model-framew.patch new file mode 100644 index 0000000000000000000000000000000000000000..14e86839b1a38dd6265167f7747a9168317b9ba8 --- /dev/null +++ b/0013-LoongArch-Implement-the-new-vector-cost-model-framew.patch @@ -0,0 +1,354 @@ +From 472890b43d2848a46fa13945279308f0a21c55d9 Mon Sep 17 00:00:00 2001 +From: Jiahao Xu +Date: Wed, 18 Oct 2023 17:43:39 +0800 +Subject: [PATCH 013/188] LoongArch:Implement the new vector cost model + framework. + +This patch make loongarch use the new vector hooks and implements the costing +function determine_suggested_unroll_factor, to make it be able to suggest the +unroll factor for a given loop being vectorized base vec_ops analysis during +vector costing and the available issue information. Referring to aarch64 and +rs6000 port. + +The patch also reduces the cost of unaligned stores, making it equal to the +cost of aligned ones in order to avoid odd alignment peeling. + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (loongarch_vector_costs): Inherit from + vector_costs. Add a constructor. + (loongarch_vector_costs::add_stmt_cost): Use adjust_cost_for_freq to + adjust the cost for inner loops. + (loongarch_vector_costs::count_operations): New function. + (loongarch_vector_costs::determine_suggested_unroll_factor): Ditto. + (loongarch_vector_costs::finish_cost): Ditto. + (loongarch_builtin_vectorization_cost): Adjust. + * config/loongarch/loongarch.opt (loongarch-vect-unroll-limit): New parameter. + (loongarcg-vect-issue-info): Ditto. + (mmemvec-cost): Delete. + * config/loongarch/genopts/loongarch.opt.in + (loongarch-vect-unroll-limit): Ditto. + (loongarcg-vect-issue-info): Ditto. + (mmemvec-cost): Delete. + * doc/invoke.texi (loongarcg-vect-unroll-limit): Document new option. +--- + gcc/config/loongarch/genopts/loongarch.opt.in | 15 +- + gcc/config/loongarch/loongarch.cc | 173 ++++++++++++++++-- + gcc/config/loongarch/loongarch.opt | 15 +- + gcc/doc/invoke.texi | 7 + + 4 files changed, 188 insertions(+), 22 deletions(-) + +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index f18733c24..74cf4a7f7 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -152,10 +152,6 @@ mbranch-cost= + Target RejectNegative Joined UInteger Var(loongarch_branch_cost) + -mbranch-cost=COST Set the cost of branches to roughly COST instructions. + +-mmemvec-cost= +-Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) +-mmemvec-cost=COST Set the cost of vector memory access instructions. +- + mcheck-zero-division + Target Mask(CHECK_ZERO_DIV) + Trap on integer divide by zero. +@@ -219,3 +215,14 @@ mrelax + Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION) + Take advantage of linker relaxations to reduce the number of instructions + required to materialize symbol addresses. ++ ++-param=loongarch-vect-unroll-limit= ++Target Joined UInteger Var(loongarch_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param ++Used to limit unroll factor which indicates how much the autovectorizer may ++unroll a loop. The default value is 6. ++ ++-param=loongarch-vect-issue-info= ++Target Undocumented Joined UInteger Var(loongarch_vect_issue_info) Init(4) IntegerRange(1, 64) Param ++Indicate how many non memory access vector instructions can be issued per ++cycle, it's used in unroll factor determination for autovectorizer. The ++default value is 4. +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index c0f58f9a9..e22a64600 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -65,6 +65,8 @@ along with GCC; see the file COPYING3. If not see + #include "rtl-iter.h" + #include "opts.h" + #include "function-abi.h" ++#include "cfgloop.h" ++#include "tree-vectorizer.h" + + /* This file should be included last. */ + #include "target-def.h" +@@ -3841,8 +3843,6 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code, + } + } + +-/* Vectorizer cost model implementation. */ +- + /* Implement targetm.vectorize.builtin_vectorization_cost. */ + + static int +@@ -3861,36 +3861,182 @@ loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, + case vector_load: + case vec_to_scalar: + case scalar_to_vec: +- case cond_branch_not_taken: +- case vec_promote_demote: + case scalar_store: + case vector_store: + return 1; + ++ case vec_promote_demote: + case vec_perm: + return LASX_SUPPORTED_MODE_P (mode) + && !LSX_SUPPORTED_MODE_P (mode) ? 2 : 1; + + case unaligned_load: +- case vector_gather_load: +- return 2; +- + case unaligned_store: +- case vector_scatter_store: +- return 10; ++ return 2; + + case cond_branch_taken: +- return 3; ++ return 4; ++ ++ case cond_branch_not_taken: ++ return 2; + + case vec_construct: + elements = TYPE_VECTOR_SUBPARTS (vectype); +- return elements / 2 + 1; ++ if (ISA_HAS_LASX) ++ return elements + 1; ++ else ++ return elements; + + default: + gcc_unreachable (); + } + } + ++class loongarch_vector_costs : public vector_costs ++{ ++public: ++ using vector_costs::vector_costs; ++ ++ unsigned int add_stmt_cost (int count, vect_cost_for_stmt kind, ++ stmt_vec_info stmt_info, slp_tree, tree vectype, ++ int misalign, ++ vect_cost_model_location where) override; ++ void finish_cost (const vector_costs *) override; ++ ++protected: ++ void count_operations (vect_cost_for_stmt, stmt_vec_info, ++ vect_cost_model_location, unsigned int); ++ unsigned int determine_suggested_unroll_factor (loop_vec_info); ++ /* The number of vectorized stmts in loop. */ ++ unsigned m_stmts = 0; ++ /* The number of load and store operations in loop. */ ++ unsigned m_loads = 0; ++ unsigned m_stores = 0; ++ /* Reduction factor for suggesting unroll factor. */ ++ unsigned m_reduc_factor = 0; ++ /* True if the loop contains an average operation. */ ++ bool m_has_avg =false; ++}; ++ ++/* Implement TARGET_VECTORIZE_CREATE_COSTS. */ ++static vector_costs * ++loongarch_vectorize_create_costs (vec_info *vinfo, bool costing_for_scalar) ++{ ++ return new loongarch_vector_costs (vinfo, costing_for_scalar); ++} ++ ++void ++loongarch_vector_costs::count_operations (vect_cost_for_stmt kind, ++ stmt_vec_info stmt_info, ++ vect_cost_model_location where, ++ unsigned int count) ++{ ++ if (!m_costing_for_scalar ++ && is_a (m_vinfo) ++ && where == vect_body) ++ { ++ m_stmts += count; ++ ++ if (kind == scalar_load ++ || kind == vector_load ++ || kind == unaligned_load) ++ m_loads += count; ++ else if (kind == scalar_store ++ || kind == vector_store ++ || kind == unaligned_store) ++ m_stores += count; ++ else if ((kind == scalar_stmt ++ || kind == vector_stmt ++ || kind == vec_to_scalar) ++ && stmt_info && vect_is_reduction (stmt_info)) ++ { ++ tree lhs = gimple_get_lhs (stmt_info->stmt); ++ unsigned int base = FLOAT_TYPE_P (TREE_TYPE (lhs)) ? 2 : 1; ++ m_reduc_factor = MAX (base * count, m_reduc_factor); ++ } ++ } ++} ++ ++unsigned int ++loongarch_vector_costs::determine_suggested_unroll_factor (loop_vec_info loop_vinfo) ++{ ++ class loop *loop = LOOP_VINFO_LOOP (loop_vinfo); ++ ++ if (m_has_avg) ++ return 1; ++ ++ /* Don't unroll if it's specified explicitly not to be unrolled. */ ++ if (loop->unroll == 1 ++ || (OPTION_SET_P (flag_unroll_loops) && !flag_unroll_loops) ++ || (OPTION_SET_P (flag_unroll_all_loops) && !flag_unroll_all_loops)) ++ return 1; ++ ++ unsigned int nstmts_nonldst = m_stmts - m_loads - m_stores; ++ /* Don't unroll if no vector instructions excepting for memory access. */ ++ if (nstmts_nonldst == 0) ++ return 1; ++ ++ /* Use this simple hardware resource model that how many non vld/vst ++ vector instructions can be issued per cycle. */ ++ unsigned int issue_info = loongarch_vect_issue_info; ++ unsigned int reduc_factor = m_reduc_factor > 1 ? m_reduc_factor : 1; ++ unsigned int uf = CEIL (reduc_factor * issue_info, nstmts_nonldst); ++ uf = MIN ((unsigned int) loongarch_vect_unroll_limit, uf); ++ ++ return 1 << ceil_log2 (uf); ++} ++ ++unsigned ++loongarch_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind, ++ stmt_vec_info stmt_info, slp_tree, ++ tree vectype, int misalign, ++ vect_cost_model_location where) ++{ ++ unsigned retval = 0; ++ ++ if (flag_vect_cost_model) ++ { ++ int stmt_cost = loongarch_builtin_vectorization_cost (kind, vectype, ++ misalign); ++ retval = adjust_cost_for_freq (stmt_info, where, count * stmt_cost); ++ m_costs[where] += retval; ++ ++ count_operations (kind, stmt_info, where, count); ++ } ++ ++ if (stmt_info) ++ { ++ /* Detect the use of an averaging operation. */ ++ gimple *stmt = stmt_info->stmt; ++ if (is_gimple_call (stmt) ++ && gimple_call_internal_p (stmt)) ++ { ++ switch (gimple_call_internal_fn (stmt)) ++ { ++ case IFN_AVG_FLOOR: ++ case IFN_AVG_CEIL: ++ m_has_avg = true; ++ default: ++ break; ++ } ++ } ++ } ++ ++ return retval; ++} ++ ++void ++loongarch_vector_costs::finish_cost (const vector_costs *scalar_costs) ++{ ++ loop_vec_info loop_vinfo = dyn_cast (m_vinfo); ++ if (loop_vinfo) ++ { ++ m_suggested_unroll_factor = determine_suggested_unroll_factor (loop_vinfo); ++ } ++ ++ vector_costs::finish_cost (scalar_costs); ++} ++ + /* Implement TARGET_ADDRESS_COST. */ + + static int +@@ -7261,9 +7407,6 @@ loongarch_option_override_internal (struct gcc_options *opts, + if (TARGET_DIRECT_EXTERN_ACCESS && flag_shlib) + error ("%qs cannot be used for compiling a shared library", + "-mdirect-extern-access"); +- if (loongarch_vector_access_cost == 0) +- loongarch_vector_access_cost = 5; +- + + switch (la_target.cmodel) + { +@@ -11275,6 +11418,8 @@ loongarch_builtin_support_vector_misalignment (machine_mode mode, + #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST + #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \ + loongarch_builtin_vectorization_cost ++#undef TARGET_VECTORIZE_CREATE_COSTS ++#define TARGET_VECTORIZE_CREATE_COSTS loongarch_vectorize_create_costs + + + #undef TARGET_IN_SMALL_DATA_P +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index 78f2baf3a..34bd832bd 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -159,10 +159,6 @@ mbranch-cost= + Target RejectNegative Joined UInteger Var(loongarch_branch_cost) + -mbranch-cost=COST Set the cost of branches to roughly COST instructions. + +-mmemvec-cost= +-Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5) +-mmemvec-cost=COST Set the cost of vector memory access instructions. +- + mcheck-zero-division + Target Mask(CHECK_ZERO_DIV) + Trap on integer divide by zero. +@@ -226,3 +222,14 @@ mrelax + Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION) + Take advantage of linker relaxations to reduce the number of instructions + required to materialize symbol addresses. ++ ++-param=loongarch-vect-unroll-limit= ++Target Joined UInteger Var(loongarch_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param ++Used to limit unroll factor which indicates how much the autovectorizer may ++unroll a loop. The default value is 6. ++ ++-param=loongarch-vect-issue-info= ++Target Undocumented Joined UInteger Var(loongarch_vect_issue_info) Init(4) IntegerRange(1, 64) Param ++Indicate how many non memory access vector instructions can be issued per ++cycle, it's used in unroll factor determination for autovectorizer. The ++default value is 4. +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index 7eed77836..168f3d0db 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -24632,6 +24632,13 @@ environments where no dynamic link is performed, like firmwares, OS + kernels, executables linked with @option{-static} or @option{-static-pie}. + @option{-mdirect-extern-access} is not compatible with @option{-fPIC} or + @option{-fpic}. ++ ++@item loongarch-vect-unroll-limit ++The vectorizer will use available tuning information to determine whether it ++would be beneficial to unroll the main vectorized loop and by how much. This ++parameter set's the upper bound of how much the vectorizer will unroll the main ++loop. The default value is six. ++ + @end table + + @node M32C Options +-- +2.43.0 + diff --git a/0013-Sw64-Port-set-raise-FPE-when-DivbyZero-on-Sw_64.patch b/0013-Sw64-Port-set-raise-FPE-when-DivbyZero-on-Sw_64.patch new file mode 100644 index 0000000000000000000000000000000000000000..788d5c804009741aea8d2f493a3ac6c4956eb5f1 --- /dev/null +++ b/0013-Sw64-Port-set-raise-FPE-when-DivbyZero-on-Sw_64.patch @@ -0,0 +1,28 @@ +From e8813c5a4ba57493f92214f6d97433208ac30d9e Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:55:30 +0800 +Subject: [PATCH 13/16] Sw64 Port: set raise FPE when DivbyZero on Sw_64 + platform + +--- + intl/dcigettext.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/intl/dcigettext.c b/intl/dcigettext.c +index a8d4a14d2..a828f0419 100644 +--- a/intl/dcigettext.c ++++ b/intl/dcigettext.c +@@ -72,8 +72,8 @@ extern int errno; + #ifdef _LIBC + /* Guess whether integer division by zero raises signal SIGFPE. + Set to 1 only if you know for sure. In case of doubt, set to 0. */ +-# if defined __alpha__ || defined __arm__ || defined __i386__ \ +- || defined __m68k__ || defined __s390__ ++#if defined __alpha__ || defined __arm__ || defined __i386__ \ ++ || defined __m68k__ || defined __s390__ || defined __sw_64__ + # define INTDIV0_RAISES_SIGFPE 1 + # else + # define INTDIV0_RAISES_SIGFPE 0 +-- +2.25.1 + diff --git a/backport-i386-Only-enable-small-loop-unrolling-in-backend-PR-.patch b/0013-i386-Only-enable-small-loop-unrolling-in-backend-PR-.patch similarity index 90% rename from backport-i386-Only-enable-small-loop-unrolling-in-backend-PR-.patch rename to 0013-i386-Only-enable-small-loop-unrolling-in-backend-PR-.patch index de3995fc2b996ceba0d834cfb4f8361f99b0bd63..6f89af0b59810287568674b79cd19e55140738aa 100644 --- a/backport-i386-Only-enable-small-loop-unrolling-in-backend-PR-.patch +++ b/0013-i386-Only-enable-small-loop-unrolling-in-backend-PR-.patch @@ -1,7 +1,7 @@ -From 5c07825ca0c34dd946a8cfc0325ddb452d7f65c5 Mon Sep 17 00:00:00 2001 +From 96898a9cd8c159625848247bd2f3a09e5c12fcfa Mon Sep 17 00:00:00 2001 From: Hongyu Wang Date: Sat, 19 Nov 2022 09:38:00 +0800 -Subject: [PATCH 5/5] i386: Only enable small loop unrolling in backend [PR +Subject: [PATCH 13/22] i386: Only enable small loop unrolling in backend [PR 107692] Followed by the discussion in pr107692, -munroll-only-small-loops @@ -44,14 +44,14 @@ gcc/testsuite/ChangeLog: gcc/common/config/i386/i386-common.cc | 8 ++++++ gcc/config/i386/i386-options.cc | 34 ++++++++++++++++++++++--- gcc/config/i386/i386.cc | 18 ++++--------- - gcc/loop-init.cc | 11 +++----- + gcc/loop-init.cc | 10 +++----- gcc/testsuite/gcc.dg/guality/loop-1.c | 2 -- gcc/testsuite/gcc.target/i386/pr86270.c | 2 +- gcc/testsuite/gcc.target/i386/pr93002.c | 2 +- - 7 files changed, 49 insertions(+), 28 deletions(-) + 7 files changed, 48 insertions(+), 28 deletions(-) diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc -index e1c1fb07d8a..5e777849f91 100644 +index cdd5caa55..f650e255f 100644 --- a/gcc/common/config/i386/i386-common.cc +++ b/gcc/common/config/i386/i386-common.cc @@ -1687,7 +1687,15 @@ static const struct default_options ix86_option_optimization_table[] = @@ -71,7 +71,7 @@ index e1c1fb07d8a..5e777849f91 100644 problem with not enough registers even worse. */ { OPT_LEVELS_ALL, OPT_fschedule_insns, NULL, 0 }, diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc -index 32cc58a764b..b853ff55825 100644 +index 099cec4b6..ff44ad4e0 100644 --- a/gcc/config/i386/i386-options.cc +++ b/gcc/config/i386/i386-options.cc @@ -1816,8 +1816,37 @@ ix86_recompute_optlev_based_flags (struct gcc_options *opts, @@ -132,10 +132,10 @@ index 32cc58a764b..b853ff55825 100644 SET_OPTION_IF_UNSET (opts, opts_set, ix86_branch_cost, ix86_tune_cost->branch_cost); diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc -index 39b2468799c..000415c0e2e 100644 +index e56004300..462dce10e 100644 --- a/gcc/config/i386/i386.cc +++ b/gcc/config/i386/i386.cc -@@ -23563,20 +23563,12 @@ ix86_loop_unroll_adjust (unsigned nunroll, class loop *loop) +@@ -23572,20 +23572,12 @@ ix86_loop_unroll_adjust (unsigned nunroll, class loop *loop) /* Unroll small size loop when unroll factor is not explicitly specified. */ @@ -162,26 +162,25 @@ index 39b2468799c..000415c0e2e 100644 if (!TARGET_ADJUST_UNROLL) diff --git a/gcc/loop-init.cc b/gcc/loop-init.cc -index 84336865ef7..ed1b2f6ebab 100644 +index f1c717041..1e4f6cfd7 100644 --- a/gcc/loop-init.cc +++ b/gcc/loop-init.cc -@@ -565,12 +565,10 @@ public: +@@ -565,12 +565,9 @@ public: {} /* opt_pass methods: */ -- virtual bool gate (function * fun) +- virtual bool gate (function *fun) + virtual bool gate (function *) { - return (flag_unroll_loops || flag_unroll_all_loops || cfun->has_unroll - || (targetm.loop_unroll_adjust - && optimize >= 2 - && optimize_function_for_speed_p (fun))); -+ return (flag_unroll_loops || flag_unroll_all_loops -+ || cfun->has_unroll); ++ return (flag_unroll_loops || flag_unroll_all_loops || cfun->has_unroll); } virtual unsigned int execute (function *); -@@ -586,8 +584,7 @@ pass_rtl_unroll_loops::execute (function *fun) +@@ -586,8 +583,7 @@ pass_rtl_unroll_loops::execute (function *fun) if (dump_file) df_dump (dump_file); @@ -192,7 +191,7 @@ index 84336865ef7..ed1b2f6ebab 100644 if (flag_unroll_all_loops) flags |= UAP_UNROLL_ALL; diff --git a/gcc/testsuite/gcc.dg/guality/loop-1.c b/gcc/testsuite/gcc.dg/guality/loop-1.c -index a32ea445a3f..1b1f6d32271 100644 +index a32ea445a..1b1f6d322 100644 --- a/gcc/testsuite/gcc.dg/guality/loop-1.c +++ b/gcc/testsuite/gcc.dg/guality/loop-1.c @@ -1,7 +1,5 @@ @@ -204,7 +203,7 @@ index a32ea445a3f..1b1f6d32271 100644 #include "../nop.h" diff --git a/gcc/testsuite/gcc.target/i386/pr86270.c b/gcc/testsuite/gcc.target/i386/pr86270.c -index cbc9fbb0450..98b012caf23 100644 +index cbc9fbb04..98b012caf 100644 --- a/gcc/testsuite/gcc.target/i386/pr86270.c +++ b/gcc/testsuite/gcc.target/i386/pr86270.c @@ -1,5 +1,5 @@ @@ -215,7 +214,7 @@ index cbc9fbb0450..98b012caf23 100644 int *a; long len; diff --git a/gcc/testsuite/gcc.target/i386/pr93002.c b/gcc/testsuite/gcc.target/i386/pr93002.c -index f75a847f75d..7e2d869e17b 100644 +index f75a847f7..7e2d869e1 100644 --- a/gcc/testsuite/gcc.target/i386/pr93002.c +++ b/gcc/testsuite/gcc.target/i386/pr93002.c @@ -1,6 +1,6 @@ @@ -227,5 +226,5 @@ index f75a847f75d..7e2d869e17b 100644 volatile int sink; -- -2.18.2 +2.33.0 diff --git a/0014-Array-widen-compare-Add-a-new-optimization-for-array.patch b/0014-Array-widen-compare-Add-a-new-optimization-for-array.patch new file mode 100644 index 0000000000000000000000000000000000000000..182560e962a0d32b84c9aa42cf822b9c241b0eaa --- /dev/null +++ b/0014-Array-widen-compare-Add-a-new-optimization-for-array.patch @@ -0,0 +1,1981 @@ +From 5ef5f6c4ae806f56ff81450c759f36d59b5b23db Mon Sep 17 00:00:00 2001 +From: dingguangya +Date: Sat, 29 Jul 2023 17:45:01 +0800 +Subject: [PATCH 14/22] [Array-widen-compare] Add a new optimization for array + comparison scenarios + +Add option farray-widen-compare. +For an array pointer whose element is a single-byte type, +by changing the pointer type to a long-byte type, the elements +can be combined and compared after loading. +--- + gcc/Makefile.in | 1 + + gcc/common.opt | 5 + + gcc/doc/invoke.texi | 13 +- + gcc/passes.def | 1 + + .../gcc.dg/tree-ssa/awiden-compare-1.c | 19 + + .../gcc.dg/tree-ssa/awiden-compare-2.c | 90 + + .../gcc.dg/tree-ssa/awiden-compare-3.c | 22 + + .../gcc.dg/tree-ssa/awiden-compare-4.c | 22 + + .../gcc.dg/tree-ssa/awiden-compare-5.c | 19 + + .../gcc.dg/tree-ssa/awiden-compare-6.c | 19 + + .../gcc.dg/tree-ssa/awiden-compare-7.c | 22 + + .../gcc.dg/tree-ssa/awiden-compare-8.c | 24 + + gcc/timevar.def | 1 + + gcc/tree-pass.h | 1 + + gcc/tree-ssa-loop-array-widen-compare.cc | 1555 +++++++++++++++++ + 15 files changed, 1813 insertions(+), 1 deletion(-) + create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-1.c + create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-2.c + create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-3.c + create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-4.c + create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-5.c + create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-6.c + create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-7.c + create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-8.c + create mode 100644 gcc/tree-ssa-loop-array-widen-compare.cc + +diff --git a/gcc/Makefile.in b/gcc/Makefile.in +index 31ff95500..0aabc6ea3 100644 +--- a/gcc/Makefile.in ++++ b/gcc/Makefile.in +@@ -1653,6 +1653,7 @@ OBJS = \ + tree-ssa-loop-ivopts.o \ + tree-ssa-loop-manip.o \ + tree-ssa-loop-niter.o \ ++ tree-ssa-loop-array-widen-compare.o \ + tree-ssa-loop-prefetch.o \ + tree-ssa-loop-split.o \ + tree-ssa-loop-unswitch.o \ +diff --git a/gcc/common.opt b/gcc/common.opt +index e365a48bc..4d91ce8cf 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1116,6 +1116,11 @@ fasynchronous-unwind-tables + Common Var(flag_asynchronous_unwind_tables) Optimization + Generate unwind tables that are exact at each instruction boundary. + ++farray-widen-compare ++Common Var(flag_array_widen_compare) Optimization ++Extends types for pointers to arrays to improve array comparsion performance. ++In some extreme situations this may result in unsafe behavior. ++ + fauto-inc-dec + Common Var(flag_auto_inc_dec) Init(1) Optimization + Generate auto-inc/dec instructions. +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index ff8cd032f..a11e2c24b 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -507,7 +507,7 @@ Objective-C and Objective-C++ Dialects}. + -falign-loops[=@var{n}[:@var{m}:[@var{n2}[:@var{m2}]]]] @gol + -fno-allocation-dce -fallow-store-data-races @gol + -fassociative-math -fauto-profile -fauto-profile[=@var{path}] @gol +--fauto-inc-dec -fbranch-probabilities @gol ++-farray-widen-compare -fauto-inc-dec -fbranch-probabilities @gol + -fcaller-saves @gol + -fcombine-stack-adjustments -fconserve-stack @gol + -fcompare-elim -fcprop-registers -fcrossjumping @gol +@@ -11387,6 +11387,17 @@ This pass is always skipped on architectures that do not have + instructions to support this. Enabled by default at @option{-O1} and + higher on architectures that support this. + ++@item -farray-widen-compare ++@opindex farray-widen-compare ++In the narrow-byte array comparison scenario, the types of pointers ++pointing to array are extended so that elements of multiple bytes can ++be loaded at a time when a wide type is used to dereference an array, ++thereby improving the performance of this comparison scenario. In some ++extreme situations this may result in unsafe behavior. ++ ++This option may generate better or worse code; results are highly dependent ++on the structure of loops within the source code. ++ + @item -fdce + @opindex fdce + Perform dead code elimination (DCE) on RTL@. +diff --git a/gcc/passes.def b/gcc/passes.def +index 375d3d62d..8dbb7983e 100644 +--- a/gcc/passes.def ++++ b/gcc/passes.def +@@ -94,6 +94,7 @@ along with GCC; see the file COPYING3. If not see + NEXT_PASS (pass_dse); + NEXT_PASS (pass_cd_dce, false /* update_address_taken_p */); + NEXT_PASS (pass_phiopt, true /* early_p */); ++ NEXT_PASS (pass_array_widen_compare); + NEXT_PASS (pass_tail_recursion); + NEXT_PASS (pass_if_to_switch); + NEXT_PASS (pass_convert_switch); +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-1.c b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-1.c +new file mode 100644 +index 000000000..e18ef5ec1 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-1.c +@@ -0,0 +1,19 @@ ++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */ ++/* { dg-options "-O3 -mabi=lp64 -farray-widen-compare -fdump-tree-awiden_compare-details" } */ ++ ++#include ++#include ++ ++#define my_min(x, y) ((x) < (y) ? (x) : (y)) ++ ++uint32_t ++func (uint32_t len0, uint32_t len1, const uint32_t len_limit, const uint8_t *const pb, const uint8_t *const cur) ++{ ++ uint32_t len = my_min(len0, len1); ++ while (++len != len_limit) ++ if (pb[len] != cur[len]) ++ break; ++ return len; ++} ++ ++/* { dg-final { scan-tree-dump-times "loop form is success" 1 "awiden_compare"} } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-2.c b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-2.c +new file mode 100644 +index 000000000..f4b20b43c +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-2.c +@@ -0,0 +1,90 @@ ++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */ ++/* { dg-options "-O3 -mabi=lp64 -farray-widen-compare -fdump-tree-awiden_compare-details" } */ ++ ++#include ++#include ++ ++#define EMPTY_HASH_VALUE 0 ++#define my_min(x, y) ((x) < (y) ? (x) : (y)) ++#define true 1 ++ ++typedef struct { ++ uint32_t len; ++ uint32_t dist; ++} lzma_match; ++ ++ ++lzma_match * ++func ( ++ const uint32_t len_limit, ++ const uint32_t pos, ++ const uint8_t *const cur, ++ uint32_t cur_match, ++ uint32_t depth, ++ uint32_t *const son, ++ const uint32_t cyclic_pos, ++ const uint32_t cyclic_size, ++ lzma_match *matches, ++ uint32_t len_best) ++{ ++ uint32_t *ptr0 = son + (cyclic_pos << 1) + 1; ++ uint32_t *ptr1 = son + (cyclic_pos << 1); ++ ++ uint32_t len0 = 0; ++ uint32_t len1 = 0; ++ ++ while (true) ++ { ++ const uint32_t delta = pos - cur_match; ++ if (depth-- == 0 || delta >= cyclic_size) ++ { ++ *ptr0 = EMPTY_HASH_VALUE; ++ *ptr1 = EMPTY_HASH_VALUE; ++ return matches; ++ } ++ ++ uint32_t *const pair = son + ((cyclic_pos - delta + (delta > cyclic_pos ? cyclic_size : 0)) << 1); ++ ++ const uint8_t *const pb = cur -delta; ++ uint32_t len = my_min(len0, len1); ++ ++ if (pb[len] == cur[len]) ++ { ++ while (++len != len_limit) ++ if (pb[len] != cur[len]) ++ break; ++ ++ if (len_best < len) ++ { ++ len_best = len; ++ matches->len = len; ++ matches->dist = delta - 1; ++ ++matches; ++ ++ if (len == len_limit) ++ { ++ *ptr1 = pair[0]; ++ *ptr0 = pair[1]; ++ return matches; ++ } ++ } ++ } ++ ++ if (pb[len] < cur[len]) ++ { ++ *ptr1 = cur_match; ++ ptr1 = pair + 1; ++ cur_match = *ptr1; ++ len1 = len; ++ } ++ else ++ { ++ *ptr0 = cur_match; ++ ptr0 = pair; ++ cur_match = *ptr0; ++ len0 = len; ++ } ++ } ++} ++ ++/* { dg-final { scan-tree-dump-times "loop form is success" 1 "awiden_compare"} } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-3.c b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-3.c +new file mode 100644 +index 000000000..86f5e7a1e +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-3.c +@@ -0,0 +1,22 @@ ++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */ ++/* { dg-options "-O3 -mabi=lp64 -farray-widen-compare -fdump-tree-awiden_compare-details" } */ ++ ++#include ++#include ++ ++#define my_min(x, y) ((x) < (y) ? (x) : (y)) ++ ++uint32_t ++func (uint32_t len0, uint32_t len1, const uint32_t len_limit, const uint8_t *const pb, const uint8_t *const cur) ++{ ++ uint32_t len = my_min(len0, len1); ++ while (len != len_limit) ++ { ++ if (pb[len] != cur[len]) ++ break; ++ len = len + 1; ++ } ++ return len; ++} ++ ++/* { dg-final { scan-tree-dump-times "loop form is success" 1 "awiden_compare"} } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-4.c b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-4.c +new file mode 100644 +index 000000000..d66558699 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-4.c +@@ -0,0 +1,22 @@ ++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */ ++/* { dg-options "-O3 -mabi=lp64 -farray-widen-compare -fdump-tree-awiden_compare-details" } */ ++ ++#include ++#include ++ ++#define my_min(x, y) ((x) < (y) ? (x) : (y)) ++ ++uint32_t ++func (uint32_t len0, uint32_t len1, const uint32_t len_limit, const uint8_t *const pb, const uint8_t *const cur) ++{ ++ uint32_t len = my_min(len0, len1); ++ while (len != len_limit) ++ { ++ if (pb[len] != cur[len]) ++ break; ++ len = len + 2; ++ } ++ return len; ++} ++ ++/* { dg-final { scan-tree-dump-times "loop form is success" 0 "awiden_compare"} } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-5.c b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-5.c +new file mode 100644 +index 000000000..e3e12bca4 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-5.c +@@ -0,0 +1,19 @@ ++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */ ++/* { dg-options "-O3 -mabi=lp64 -farray-widen-compare -fdump-tree-awiden_compare-details" } */ ++ ++#include ++#include ++ ++#define my_min(x, y) ((x) < (y) ? (x) : (y)) ++ ++uint32_t ++func (uint32_t len0, uint32_t len1, const uint32_t len_limit, const uint8_t *const pb, const uint8_t *const cur) ++{ ++ uint32_t len = my_min(len0, len1); ++ while (++len != len_limit) ++ if (pb[len] != cur[len-1]) ++ break; ++ return len; ++} ++ ++/* { dg-final { scan-tree-dump-times "loop form is success" 0 "awiden_compare"} } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-6.c b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-6.c +new file mode 100644 +index 000000000..b8500735e +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-6.c +@@ -0,0 +1,19 @@ ++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */ ++/* { dg-options "-O3 -mabi=lp64 -farray-widen-compare -fdump-tree-awiden_compare-details" } */ ++ ++#include ++#include ++ ++#define my_min(x, y) ((x) < (y) ? (x) : (y)) ++ ++uint32_t ++func (uint32_t len0, uint32_t len1, const uint32_t len_limit, const uint8_t *const pb, const uint8_t *const cur) ++{ ++ uint32_t len = my_min(len0, len1); ++ while (len++ != len_limit) ++ if (pb[len] != cur[len]) ++ break; ++ return len; ++} ++ ++/* { dg-final { scan-tree-dump-times "loop form is success" 0 "awiden_compare"} } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-7.c b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-7.c +new file mode 100644 +index 000000000..977bf5685 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-7.c +@@ -0,0 +1,22 @@ ++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */ ++/* { dg-options "-O3 -mabi=lp64 -farray-widen-compare -fdump-tree-awiden_compare-details" } */ ++ ++#include ++#include ++ ++#define my_min(x, y) ((x) < (y) ? (x) : (y)) ++ ++uint32_t ++func (uint32_t len0, uint32_t len1, const uint32_t len_limit, const uint8_t *const pb, const uint8_t *const cur) ++{ ++ uint32_t len = my_min(len0, len1); ++ while (len != len_limit) ++ { ++ len = len + 1; ++ if (pb[len] != cur[len]) ++ break; ++ } ++ return len; ++} ++ ++/* { dg-final { scan-tree-dump-times "loop form is success" 0 "awiden_compare"} } */ +diff --git a/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-8.c b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-8.c +new file mode 100644 +index 000000000..386784c92 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/tree-ssa/awiden-compare-8.c +@@ -0,0 +1,24 @@ ++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */ ++/* { dg-options "-O3 -mabi=lp64 -farray-widen-compare -fdump-tree-awiden_compare-details" } */ ++ ++#include ++#include ++ ++#define my_min(x, y) ((x) < (y) ? (x) : (y)) ++ ++uint32_t ++func (uint32_t len0, uint32_t len1, const uint32_t len_limit, const uint8_t *const pb, const uint8_t *const cur) ++{ ++ uint32_t len = my_min(len0, len1); ++ while (++len != len_limit) ++ { ++ if (pb[len] != cur[len]) ++ { ++ len = len - 1; ++ break; ++ } ++ } ++ return len; ++} ++ ++/* { dg-final { scan-tree-dump-times "loop form is success" 0 "awiden_compare"} } */ +diff --git a/gcc/timevar.def b/gcc/timevar.def +index 2dae5e1c7..794b8017d 100644 +--- a/gcc/timevar.def ++++ b/gcc/timevar.def +@@ -216,6 +216,7 @@ DEFTIMEVAR (TV_TREE_NRV , "tree NRV optimization") + DEFTIMEVAR (TV_TREE_COPY_RENAME , "tree rename SSA copies") + DEFTIMEVAR (TV_TREE_SSA_VERIFY , "tree SSA verifier") + DEFTIMEVAR (TV_TREE_STMT_VERIFY , "tree STMT verifier") ++DEFTIMEVAR (TV_TREE_ARRAY_WIDEN_COMPARE, "tree array widen compare") + DEFTIMEVAR (TV_TREE_SWITCH_CONVERSION, "tree switch conversion") + DEFTIMEVAR (TV_TREE_SWITCH_LOWERING, "tree switch lowering") + DEFTIMEVAR (TV_TREE_RECIP , "gimple CSE reciprocals") +diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h +index 606d1d60b..55ee2fe7f 100644 +--- a/gcc/tree-pass.h ++++ b/gcc/tree-pass.h +@@ -453,6 +453,7 @@ extern gimple_opt_pass *make_pass_cselim (gcc::context *ctxt); + extern gimple_opt_pass *make_pass_phiopt (gcc::context *ctxt); + extern gimple_opt_pass *make_pass_forwprop (gcc::context *ctxt); + extern gimple_opt_pass *make_pass_phiprop (gcc::context *ctxt); ++extern gimple_opt_pass *make_pass_array_widen_compare (gcc::context *ctxt); + extern gimple_opt_pass *make_pass_tree_ifcombine (gcc::context *ctxt); + extern gimple_opt_pass *make_pass_dse (gcc::context *ctxt); + extern gimple_opt_pass *make_pass_nrv (gcc::context *ctxt); +diff --git a/gcc/tree-ssa-loop-array-widen-compare.cc b/gcc/tree-ssa-loop-array-widen-compare.cc +new file mode 100644 +index 000000000..ba6170fa0 +--- /dev/null ++++ b/gcc/tree-ssa-loop-array-widen-compare.cc +@@ -0,0 +1,1555 @@ ++/* Array widen compare. ++ Copyright (C) 2022-2023 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it ++under the terms of the GNU General Public License as published by the ++Free Software Foundation; either version 3, or (at your option) any ++later version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ++ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "backend.h" ++#include "target.h" ++#include "tree.h" ++#include "gimple.h" ++#include "tree-pass.h" ++#include "gimple-ssa.h" ++#include "tree-pretty-print.h" ++#include "fold-const.h" ++#include "gimplify.h" ++#include "gimple-iterator.h" ++#include "tree-ssa-loop-manip.h" ++#include "tree-ssa-loop.h" ++#include "ssa.h" ++#include "tree-into-ssa.h" ++#include "cfganal.h" ++#include "cfgloop.h" ++#include "gimple-pretty-print.h" ++#include "tree-cfg.h" ++#include "cgraph.h" ++#include "print-tree.h" ++#include "cfghooks.h" ++#include "gimple-fold.h" ++ ++/* This pass handles scenarios similar to the following: ++ ++ uint32_t ++ func (uint32_t len0, uint32_t len1, const uint32_t len_limit, ++ const uint8_t *const pb, const uint8_t *const cur) ++ { ++ uint32_t len = my_min (len0, len1); ++ while (++len != len_limit) ++ if (pb[len] != cur[len]) ++ break; ++ return len; ++ } ++ ++ Features of this type of loop: ++ 1) the loop has two exits; ++ 2) One of the exits comes from the comparison result of the array; ++ ++ From the source code point of view, the pass completes the conversion of the ++ above scenario into: ++ ++ uint32_t ++ func (uint32_t len0, uint32_t len1, const uint32_t len_limit, ++ const uint8_t *const pb, const uint8_t *const cur) ++ { ++ uint32_t len = my_min (len0, len1); ++ // align_loop ++ for(++len; len + sizeof(uint64_t) <= len_limit; len += sizeof (uint64_t)) ++ { ++ uint64_t a = *((uint64_t*)(cur+len)); ++ uint64_t b = *((uint64_t*)(pb+len)); ++ if (a != b) ++ { ++ int lz = __builtin_ctzll (a ^ b); ++ len += lz / 8; ++ return len; ++ } ++ } ++ // epilogue_loop ++ for (;len != len_limit; ++len) ++ if (pb[len] != cur[len]) ++ break; ++ return len; ++ } ++ ++ This pass is to complete the conversion of such scenarios from the internal ++ perspective of the compiler: ++ 1) determine_loop_form: The function completes the screening of such ++ scenarios; ++ 2) convert_to_new_loop: The function completes the conversion of ++ origin_loop to new loops, and removes origin_loop; ++ 3) origin_loop_info: The structure is used to record important information ++ of origin_loop: such as loop exit, growth step size ++ of loop induction variable, initial value ++ of induction variable, etc; ++ 4) create_new_loops: The function is used as the key content of the pass ++ to complete the creation of new loops. */ ++ ++/* The useful information of origin loop. */ ++ ++struct origin_loop_info ++{ ++ tree base; /* The initial index of the array in the old loop. */ ++ tree limit; /* The limit index of the array in the old loop. */ ++ tree arr1; /* Array 1 in the old loop. */ ++ tree arr2; /* Array 2 in the old loop. */ ++ edge entry_edge; /* The edge into the old loop. */ ++ basic_block exit_bb1; ++ basic_block exit_bb2; ++ edge exit_e1; ++ edge exit_e2; ++ gimple *cond_stmt1; ++ gimple *cond_stmt2; ++ gimple *update_stmt; ++ bool exist_prolog_assgin; ++ /* Whether the marker has an initial value assigned ++ to the array index. */ ++ unsigned HOST_WIDE_INT step; ++ /* The growth step of the loop induction variable. */ ++}; ++ ++typedef struct origin_loop_info origin_loop_info; ++ ++static origin_loop_info origin_loop; ++hash_map defs_map; ++ ++/* Dump the bb information in a loop. */ ++ ++static void ++dump_loop_bb (struct loop *loop) ++{ ++ basic_block *body = get_loop_body_in_dom_order (loop); ++ basic_block bb = NULL; ++ ++ for (unsigned i = 0; i < loop->num_nodes; i++) ++ { ++ bb = body[i]; ++ if (bb->loop_father != loop) ++ { ++ continue; ++ } ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "===== the %dth bb of loop ==========:\n", i); ++ gimple_dump_bb (dump_file, bb, 0, dump_flags); ++ fprintf (dump_file, "\n"); ++ } ++ } ++ free (body); ++} ++ ++/* Return true if the loop has precisely one backedge. */ ++ ++static bool ++loop_single_backedge_p (class loop *loop) ++{ ++ basic_block latch = loop->latch; ++ if (!single_succ_p (latch)) ++ return false; ++ ++ edge e = single_succ_edge (latch); ++ edge backedge = find_edge (latch, loop->header); ++ ++ if (e != backedge) ++ return false; ++ ++ return true; ++} ++ ++/* Return true if the loop has precisely one preheader BB. */ ++ ++static bool ++loop_single_preheader_bb (class loop *loop) ++{ ++ basic_block header = loop->header; ++ if (EDGE_COUNT (header->preds) != 2) ++ return false; ++ ++ edge e1 = EDGE_PRED (header, 0); ++ edge e2 = EDGE_PRED (header, 1); ++ ++ if ((e1->src == loop->latch && e2->src->loop_father != loop) ++ || (e2->src == loop->latch && e1->src->loop_father != loop)) ++ return true; ++ ++ return false; ++} ++ ++/* Initialize the origin_loop structure. */ ++static void ++init_origin_loop_structure () ++{ ++ origin_loop.base = NULL; ++ origin_loop.limit = NULL; ++ origin_loop.arr1 = NULL; ++ origin_loop.arr2 = NULL; ++ origin_loop.exit_e1 = NULL; ++ origin_loop.exit_e2 = NULL; ++ origin_loop.exit_bb1 = NULL; ++ origin_loop.exit_bb2 =NULL; ++ origin_loop.entry_edge = NULL; ++ origin_loop.cond_stmt1 = NULL; ++ origin_loop.cond_stmt2 = NULL; ++ origin_loop.update_stmt = NULL; ++ origin_loop.exist_prolog_assgin = false; ++ origin_loop.step = 0; ++} ++ ++/* Get the edge that first entered the loop. */ ++ ++static edge ++get_loop_preheader_edge (class loop *loop) ++{ ++ edge e; ++ edge_iterator ei; ++ ++ FOR_EACH_EDGE (e, ei, loop->header->preds) ++ if (e->src != loop->latch) ++ break; ++ ++ if (!e) ++ { ++ gcc_assert (!loop_outer (loop)); ++ return single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)); ++ } ++ ++ return e; ++} ++ ++/* Make sure the exit condition stmt satisfies a specific form. */ ++ ++static bool ++check_cond_stmt (gimple *stmt) ++{ ++ if (!stmt) ++ return false; ++ if (gimple_code (stmt) != GIMPLE_COND) ++ return false; ++ ++ if (gimple_cond_code (stmt) != NE_EXPR && gimple_cond_code (stmt) != EQ_EXPR) ++ return false; ++ ++ tree lhs = gimple_cond_lhs (stmt); ++ tree rhs = gimple_cond_rhs (stmt); ++ ++ /* The parameter that does not support the cond statement is not SSA_NAME. ++ eg: if (len_1 != 100). */ ++ if (TREE_CODE (lhs) != SSA_NAME || TREE_CODE (rhs) != SSA_NAME) ++ return false; ++ ++ return true; ++} ++ ++/* Record the exit information in the original loop including exit edge, ++ exit bb block, exit condition stmt, ++ eg: exit_eX origin_exit_bbX cond_stmtX. */ ++ ++static bool ++record_origin_loop_exit_info (class loop *loop) ++{ ++ bool found = false; ++ edge e = NULL; ++ unsigned i = 0; ++ gimple *stmt; ++ ++ if (origin_loop.exit_e1 != NULL || origin_loop.exit_bb1 != NULL ++ || origin_loop.exit_e2 != NULL || origin_loop.exit_bb2 != NULL ++ || origin_loop.cond_stmt1 != NULL || origin_loop.cond_stmt2 != NULL) ++ return false; ++ ++ vec exit_edges = get_loop_exit_edges (loop); ++ if (exit_edges == vNULL) ++ return false; ++ ++ if (exit_edges.length () != 2) ++ goto fail; ++ ++ FOR_EACH_VEC_ELT (exit_edges, i, e) ++ { ++ if (e->src == loop->header) ++ { ++ origin_loop.exit_e1 = e; ++ origin_loop.exit_bb1 = e->dest; ++ stmt = gsi_stmt (gsi_last_bb (e->src)); ++ if (check_cond_stmt (stmt)) ++ origin_loop.cond_stmt1 = stmt; ++ } ++ else ++ { ++ origin_loop.exit_e2 = e; ++ origin_loop.exit_bb2 = e->dest; ++ stmt = gsi_stmt (gsi_last_bb (e->src)); ++ if (check_cond_stmt (stmt)) ++ origin_loop.cond_stmt2 = stmt; ++ } ++ } ++ ++ if (origin_loop.exit_e1 != NULL && origin_loop.exit_bb1 != NULL ++ && origin_loop.exit_e2 != NULL && origin_loop.exit_bb2 != NULL ++ && origin_loop.cond_stmt1 != NULL && origin_loop.cond_stmt2 != NULL) ++ found = true; ++ ++fail: ++ exit_edges.release (); ++ return found; ++} ++ ++/* Returns true if t is SSA_NAME and user variable exists. */ ++ ++static bool ++ssa_name_var_p (tree t) ++{ ++ if (!t || TREE_CODE (t) != SSA_NAME) ++ return false; ++ if (SSA_NAME_VAR (t)) ++ return true; ++ return false; ++} ++ ++/* Returns true if t1 and t2 are SSA_NAME and belong to the same variable. */ ++ ++static bool ++same_ssa_name_var_p (tree t1, tree t2) ++{ ++ if (!ssa_name_var_p (t1) || !ssa_name_var_p (t2)) ++ return false; ++ if (SSA_NAME_VAR (t1) == SSA_NAME_VAR (t2)) ++ return true; ++ return false; ++} ++ ++/* Get origin loop induction variable upper bound. */ ++ ++static bool ++get_iv_upper_bound (gimple *stmt) ++{ ++ if (origin_loop.limit != NULL) ++ return false; ++ ++ tree lhs = gimple_cond_lhs (stmt); ++ tree rhs = gimple_cond_rhs (stmt); ++ ++ if (TREE_CODE (TREE_TYPE (lhs)) != INTEGER_TYPE ++ || TREE_CODE (TREE_TYPE (rhs)) != INTEGER_TYPE) ++ return false; ++ ++ gimple *g = SSA_NAME_DEF_STMT (rhs); ++ ++ /* TODO: Currently, the input restrictions on lhs and rhs are implemented ++ through PARM_DECL. We may consider releasing the restrictions later, and ++ we need to consider the overall adaptation scenario and adding test ++ cases. */ ++ if (ssa_name_var_p (rhs) && TREE_CODE (SSA_NAME_VAR (rhs)) == PARM_DECL ++ && g && gimple_code (g) == GIMPLE_NOP ++ && (ssa_name_var_p (lhs) && TREE_CODE (SSA_NAME_VAR (lhs)) != PARM_DECL)) ++ { ++ origin_loop.limit = rhs; ++ } ++ else ++ return false; ++ ++ if (origin_loop.limit != NULL) ++ return true; ++ ++ return false; ++} ++ ++/* Returns true only when the expression on the rhs code of stmt is PLUS_EXPR, ++ rhs1 is SSA_NAME with the same var as origin_loop base, and rhs2 is ++ INTEGER_CST. */ ++ ++static bool ++check_update_stmt (gimple *stmt) ++{ ++ if (!stmt) ++ return false; ++ ++ if (gimple_assign_rhs_code (stmt) == PLUS_EXPR) ++ { ++ tree rhs1 = gimple_assign_rhs1 (stmt); ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ if (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (rhs2) == INTEGER_CST ++ && same_ssa_name_var_p (rhs1, origin_loop.base)) ++ { ++ origin_loop.step = tree_to_uhwi (rhs2); ++ if (origin_loop.step == 1) ++ return true; ++ } ++ } ++ return false; ++} ++ ++/* Get origin loop induction variable initial value. */ ++ ++static bool ++get_iv_base (gimple *stmt) ++{ ++ tree lhs = gimple_cond_lhs (stmt); ++ if (origin_loop.base != NULL || origin_loop.update_stmt != NULL) ++ return false; ++ ++ basic_block header = gimple_bb (stmt); ++ ++ gphi_iterator gsi; ++ edge e; ++ edge_iterator ei; ++ tree iv_after; ++ ++ for (gsi = gsi_start_phis (header); !gsi_end_p (gsi); gsi_next (&gsi)) ++ { ++ gphi *phi = gsi.phi (); ++ tree res = gimple_phi_result (phi); ++ if (!same_ssa_name_var_p (res, lhs)) ++ continue; ++ tree base = PHI_ARG_DEF_FROM_EDGE (phi, origin_loop.entry_edge); ++ if (!same_ssa_name_var_p (base, lhs)) ++ return false; ++ origin_loop.base = base; ++ FOR_EACH_EDGE (e, ei, header->preds) ++ { ++ if (e != origin_loop.entry_edge) ++ { ++ iv_after = PHI_ARG_DEF_FROM_EDGE (phi, e); ++ gimple *update = SSA_NAME_DEF_STMT (iv_after); ++ if (!check_update_stmt (update)) ++ return false; ++ origin_loop.update_stmt = update; ++ if (gimple_bb (update) == header && iv_after == lhs) ++ origin_loop.exist_prolog_assgin = true; ++ } ++ } ++ } ++ ++ if (origin_loop.base != NULL && origin_loop.update_stmt != NULL) ++ return true; ++ ++ return false; ++} ++ ++/* Record the upper bound and initial value of the induction variable in the ++ original loop; When prolog_assign is present, make sure loop header is in ++ simple form; And the interpretation of prolog_assign is as follows: ++ eg: while (++len != limit) ++ ...... ++ For such a loop, ++len will be processed before entering header_bb, and the ++ assign is regarded as the prolog_assign of the loop. */ ++ ++static bool ++record_origin_loop_header (class loop *loop) ++{ ++ basic_block header = loop->header; ++ ++ if (origin_loop.entry_edge != NULL || origin_loop.base != NULL ++ || origin_loop.update_stmt != NULL || origin_loop.limit != NULL) ++ return false; ++ origin_loop.entry_edge = get_loop_preheader_edge (loop); ++ ++ gimple_stmt_iterator gsi; ++ gimple *stmt; ++ ++ for (gsi = gsi_last_bb (header); !gsi_end_p (gsi); gsi_prev (&gsi)) ++ { ++ stmt = gsi_stmt (gsi); ++ if (stmt && is_gimple_debug (stmt)) ++ continue; ++ if (stmt && gimple_code (stmt) == GIMPLE_COND) ++ { ++ if (!get_iv_upper_bound (stmt)) ++ return false; ++ if (!get_iv_base (stmt)) ++ return false; ++ } ++ else if (stmt && gimple_code (stmt) == GIMPLE_ASSIGN) ++ { ++ if (stmt != origin_loop.update_stmt || !origin_loop.exist_prolog_assgin) ++ return false; ++ } ++ else ++ return false; ++ } ++ ++ if (origin_loop.entry_edge != NULL && origin_loop.base != NULL ++ && origin_loop.update_stmt != NULL && origin_loop.limit != NULL) ++ return true; ++ ++ return false; ++} ++ ++/* When prolog_assign does not exist, make sure that update_stmt exists in the ++ loop latch, and its form is a specific form, eg: ++ len_2 = len_1 + 1. */ ++ ++static bool ++record_origin_loop_latch (class loop *loop) ++{ ++ basic_block latch = loop->latch; ++ gimple_stmt_iterator gsi; ++ gimple *stmt; ++ ++ gsi = gsi_start_bb (latch); ++ ++ if (origin_loop.exist_prolog_assgin) ++ { ++ if (gsi_end_p (gsi)) ++ return true; ++ } ++ else ++ { ++ if (gsi_one_before_end_p (gsi)) ++ { ++ stmt = gsi_stmt (gsi); ++ if (stmt == origin_loop.update_stmt) ++ return true; ++ } ++ } ++ return false; ++} ++ ++/* Returns true when the DEF_STMT corresponding to arg0 of the mem_ref tree ++ satisfies the POINTER_PLUS_EXPR type. */ ++ ++static bool ++check_body_mem_ref (tree mem_ref) ++{ ++ tree arg0 = TREE_OPERAND (mem_ref , 0); ++ tree arg1 = TREE_OPERAND (mem_ref , 1); ++ ++ if (TREE_CODE (TREE_TYPE (arg0)) == POINTER_TYPE ++ && TREE_CODE (arg1) == INTEGER_CST ++ && tree_to_uhwi (arg1) == 0) ++ { ++ gimple *tmp_g = SSA_NAME_DEF_STMT (arg0); ++ if (tmp_g && gimple_assign_rhs_code (tmp_g) == POINTER_PLUS_EXPR) ++ return true; ++ } ++ return false; ++} ++ ++/* Returns true if the rh2 of the current stmt comes from the base in the ++ original loop. */ ++ ++static bool ++check_body_pointer_plus (gimple *stmt, tree &tmp_index) ++{ ++ tree rhs1 = gimple_assign_rhs1 (stmt); ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ if (TREE_CODE (TREE_TYPE (rhs1)) == POINTER_TYPE) ++ { ++ gimple *g = SSA_NAME_DEF_STMT (rhs2); ++ if (g && gimple_assign_rhs_code (g) == NOP_EXPR) ++ { ++ tree nop_rhs = gimple_assign_rhs1 (g); ++ if (same_ssa_name_var_p (nop_rhs, origin_loop.base)) ++ { ++ if (!origin_loop.arr1) ++ { ++ origin_loop.arr1 = rhs1; ++ tmp_index = rhs2; ++ } ++ else if (!origin_loop.arr2) ++ { ++ origin_loop.arr2 = rhs1; ++ if (tmp_index != rhs2) ++ return false; ++ } ++ else ++ return false; ++ return true; ++ } ++ } ++ } ++ return false; ++} ++ ++/* Record the array comparison information in the original loop, while ensuring ++ that there are only statements related to cont_stmt in the loop body. */ ++ ++static bool ++record_origin_loop_body (class loop *loop) ++{ ++ basic_block body = gimple_bb (origin_loop.cond_stmt2); ++ ++ if (origin_loop.arr1 != NULL || origin_loop.arr2 != NULL) ++ return false; ++ ++ gimple_stmt_iterator gsi; ++ for (gsi = gsi_start_bb (body); !gsi_end_p (gsi); gsi_next (&gsi)) ++ { ++ gimple_set_visited (gsi_stmt (gsi), false); ++ } ++ ++ tree cond_lhs = gimple_cond_lhs (origin_loop.cond_stmt2); ++ tree cond_rhs = gimple_cond_rhs (origin_loop.cond_stmt2); ++ if (TREE_CODE (TREE_TYPE (cond_lhs)) != INTEGER_TYPE ++ || TREE_CODE (TREE_TYPE (cond_rhs)) != INTEGER_TYPE) ++ return false; ++ ++ auto_vec stack; ++ tree tmp_index = NULL; ++ stack.safe_push (cond_lhs); ++ stack.safe_push (cond_rhs); ++ gimple_set_visited (origin_loop.cond_stmt2, true); ++ ++ while (!stack.is_empty ()) ++ { ++ tree op = stack.pop (); ++ gimple *g = SSA_NAME_DEF_STMT (op); ++ if (!g || gimple_bb (g) != body || !is_gimple_assign (g)) ++ continue; ++ gimple_set_visited (g, true); ++ if (gimple_assign_rhs_code (g) == MEM_REF) ++ { ++ tree mem_ref = gimple_assign_rhs1 (g); ++ if (!check_body_mem_ref (mem_ref)) ++ return false; ++ stack.safe_push (TREE_OPERAND (mem_ref , 0)); ++ } ++ else if (gimple_assign_rhs_code (g) == POINTER_PLUS_EXPR) ++ { ++ tree rhs2 = gimple_assign_rhs2 (g); ++ if (!check_body_pointer_plus (g, tmp_index)) ++ return false; ++ stack.safe_push (rhs2); ++ } ++ else if (gimple_assign_rhs_code (g) == NOP_EXPR) ++ { ++ tree rhs = gimple_assign_rhs1 (g); ++ if (!same_ssa_name_var_p (rhs, origin_loop.base)) ++ return false; ++ stack.safe_push (rhs); ++ } ++ else ++ return false; ++ } ++ bool allvisited = true; ++ for (gsi = gsi_start_bb (body); !gsi_end_p (gsi); gsi_next (&gsi)) ++ { ++ if (!gimple_visited_p (gsi_stmt (gsi)) ++ && !is_gimple_debug (gsi_stmt (gsi))) ++ allvisited = false; ++ } ++ if (allvisited) ++ { ++ if (origin_loop.arr1 != NULL && origin_loop.arr2 != NULL) ++ return true; ++ } ++ return false; ++} ++ ++/* Dump the original loop information to see if the origin loop ++ form matches. */ ++ ++static void ++dump_origin_loop_info () ++{ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nThe origin loop info:\n"); ++ fprintf (dump_file, "\n the origin_loop.limit is:\n"); ++ print_node (dump_file, "", origin_loop.limit, 0); ++ fprintf (dump_file, "\n"); ++ fprintf (dump_file, "\n the origin_loop.base is:\n"); ++ print_node (dump_file, "", origin_loop.base, 0); ++ fprintf (dump_file, "\n"); ++ fprintf (dump_file, "\n the origin_loop.arr1 is:\n"); ++ print_node (dump_file, "", origin_loop.arr1, 0); ++ fprintf (dump_file, "\n"); ++ fprintf (dump_file, "\n the origin_loop.arr2 is:\n"); ++ print_node (dump_file, "", origin_loop.arr2, 0); ++ fprintf (dump_file, "\n"); ++ fprintf (dump_file, "\n the origin_loop.cond_stmt1 is:\n"); ++ print_gimple_stmt (dump_file, origin_loop.cond_stmt1, 0); ++ fprintf (dump_file, "\n"); ++ fprintf (dump_file, "\n the origin_loop.cond_stmt2 is:\n"); ++ print_gimple_stmt (dump_file, origin_loop.cond_stmt2, 0); ++ fprintf (dump_file, "\n"); ++ fprintf (dump_file, "\n the origin_loop.update_stmt is:\n"); ++ print_gimple_stmt (dump_file, origin_loop.update_stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++} ++ ++/* Returns true only if the exit bb of the original loop is unique and its phi ++ node parameter comes from the same variable. */ ++ ++static bool ++check_exit_bb (class loop *loop) ++{ ++ if (origin_loop.exit_bb1 != origin_loop.exit_bb2 ++ || flow_bb_inside_loop_p (loop, origin_loop.exit_bb1)) ++ return false; ++ ++ gphi_iterator gsi; ++ for (gsi = gsi_start_phis (origin_loop.exit_bb1); !gsi_end_p (gsi); ++ gsi_next (&gsi)) ++ { ++ gphi *phi = gsi.phi (); ++ tree res = gimple_phi_result (phi); ++ if (!same_ssa_name_var_p (res, origin_loop.base)) ++ continue; ++ if (gimple_phi_num_args (phi) == 2) ++ { ++ tree arg0 = gimple_phi_arg_def (phi, 0); ++ tree arg1 = gimple_phi_arg_def (phi, 1); ++ if (arg0 == arg1) ++ return true; ++ } ++ } ++ return false; ++} ++ ++/* Make sure that the recorded origin_loop information meets the ++ relative requirements. */ ++ ++static bool ++check_origin_loop_info (class loop *loop) ++{ ++ dump_origin_loop_info (); ++ tree arr1_elem_size, arr2_elem_size; ++ ++ if (!check_exit_bb (loop)) ++ return false; ++ ++ if (TREE_CODE (origin_loop.base) != SSA_NAME) ++ return false; ++ ++ if (!TYPE_READONLY (TREE_TYPE (origin_loop.limit))) ++ return false; ++ ++ if (!TYPE_READONLY (TREE_TYPE (TREE_TYPE (origin_loop.arr1)))) ++ return false; ++ ++ if (!TYPE_READONLY (TREE_TYPE (TREE_TYPE (origin_loop.arr2)))) ++ return false; ++ ++ if (TREE_CODE (TREE_TYPE (origin_loop.arr1)) != POINTER_TYPE ++ || TREE_CODE (TREE_TYPE (origin_loop.arr2)) != POINTER_TYPE ++ || TREE_CODE (TREE_TYPE (TREE_TYPE (origin_loop.arr1))) != INTEGER_TYPE ++ || TREE_CODE (TREE_TYPE (TREE_TYPE (origin_loop.arr2))) != INTEGER_TYPE) ++ return false; ++ ++ arr1_elem_size = TYPE_SIZE (TREE_TYPE (TREE_TYPE (origin_loop.arr1))); ++ arr2_elem_size = TYPE_SIZE (TREE_TYPE (TREE_TYPE (origin_loop.arr2))); ++ ++ if (tree_to_uhwi (arr1_elem_size) != 8 || tree_to_uhwi (arr2_elem_size) != 8) ++ return false; ++ ++ return true; ++} ++ ++/* Record the useful information of the original loop and judge whether the ++ information meets the specified conditions. */ ++ ++static bool ++check_record_loop_form (class loop *loop) ++{ ++ if (!record_origin_loop_exit_info (loop)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nFailed to record loop exit information.\n"); ++ } ++ return false; ++ } ++ ++ if (!record_origin_loop_header (loop)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nFailed to record loop header information.\n"); ++ } ++ return false; ++ } ++ ++ if (!record_origin_loop_latch (loop)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nFailed to record loop latch information.\n"); ++ } ++ return false; ++ } ++ ++ if (!record_origin_loop_body (loop)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nFailed to record loop body information.\n"); ++ } ++ return false; ++ } ++ ++ if (!check_origin_loop_info (loop)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nFailed to check origin loop information.\n"); ++ } ++ return false; ++ } ++ ++ return true; ++} ++ ++/* The main entry for judging whether the loop meets some conditions. */ ++ ++static bool ++determine_loop_form (class loop *loop) ++{ ++ /* Currently only standard loops are processed, that is, only loop_header, ++ loop_latch, loop_body 3 bb blocks are included. */ ++ if (loop->inner || loop->num_nodes != 3) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nWrong loop form, there is inner loop or" ++ "redundant bb.\n"); ++ } ++ return false; ++ } ++ ++ if (single_exit (loop) || !loop->latch) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nWrong loop form, only one exit or loop_latch" ++ "does not exist.\n"); ++ } ++ return false; ++ } ++ ++ /* Support loop with only one backedge. */ ++ if (!loop_single_backedge_p (loop)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nWrong loop form, loop back edges are not" ++ "unique.\n"); ++ } ++ return false; ++ } ++ ++ /* Support loop with only one preheader BB. */ ++ if (!loop_single_preheader_bb (loop)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nWrong loop form, loop preheader bb are not" ++ "unique.\n"); ++ } ++ return false; ++ } ++ ++ init_origin_loop_structure (); ++ if (!check_record_loop_form (loop)) ++ return false; ++ ++ return true; ++} ++ ++/* Create prolog bb for newly constructed loop; When prolog_assign exists in ++ the original loop, the corresponding assign needs to be added to prolog_bb; ++ eg: ++ len_16 = len_10 + 1 ++ Create simple copy statement when prolog_assign does not exist; ++ eg: ++ len_16 = len_10 ++ ++ The IR of bb is as above. */ ++ ++static void ++create_prolog_bb (basic_block &prolog_bb, basic_block after_bb, ++ basic_block dominator_bb, class loop *outer, edge entry_edge) ++{ ++ gimple_seq stmts = NULL; ++ gimple_stmt_iterator gsi; ++ gimple *g; ++ tree lhs1; ++ ++ prolog_bb = create_empty_bb (after_bb); ++ add_bb_to_loop (prolog_bb, outer); ++ redirect_edge_and_branch (entry_edge, prolog_bb); ++ set_immediate_dominator (CDI_DOMINATORS, prolog_bb, dominator_bb); ++ gsi = gsi_last_bb (prolog_bb); ++ lhs1 = copy_ssa_name (origin_loop.base); ++ ++ if (origin_loop.exist_prolog_assgin) ++ g = gimple_build_assign (lhs1, PLUS_EXPR, origin_loop.base, ++ build_int_cst (TREE_TYPE (origin_loop.base), origin_loop.step)); ++ else ++ g = gimple_build_assign (lhs1, NOP_EXPR, origin_loop.base); ++ gimple_seq_add_stmt (&stmts, g); ++ gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); ++ set_current_def (origin_loop.base, lhs1); ++ defs_map.put (prolog_bb, lhs1); ++} ++ ++/* Create preheader bb for new loop; In order to ensure the standard form of ++ the loop, add a preheader_bb before loop_header. */ ++ ++static void ++create_loop_pred_bb (basic_block &loop_pred_bb, basic_block after_bb, ++ basic_block dominator_bb, class loop *outer) ++{ ++ loop_pred_bb = create_empty_bb (after_bb); ++ add_bb_to_loop (loop_pred_bb, outer); ++ set_immediate_dominator (CDI_DOMINATORS, loop_pred_bb, dominator_bb); ++ defs_map.put (loop_pred_bb, get_current_def (origin_loop.base)); ++} ++ ++/* Add phi_arg for bb with phi node. */ ++ ++static void ++rewrite_add_phi_arg (basic_block bb) ++{ ++ edge e; ++ edge_iterator ei; ++ gphi *phi; ++ gphi_iterator gsi; ++ tree res; ++ location_t loc; ++ ++ for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi)) ++ { ++ phi = gsi.phi (); ++ res = gimple_phi_result (phi); ++ ++ FOR_EACH_EDGE (e, ei, bb->preds) ++ { ++ if (PHI_ARG_DEF_FROM_EDGE (phi, e)) ++ continue; ++ tree var = *(defs_map.get (e->src)); ++ if (!same_ssa_name_var_p (var, res)) ++ continue; ++ if (virtual_operand_p (var)) ++ loc = UNKNOWN_LOCATION; ++ else ++ loc = gimple_location (SSA_NAME_DEF_STMT (var)); ++ add_phi_arg (phi, var, e, loc); ++ } ++ } ++} ++ ++/* Create loop_header BB for align_loop. ++ eg: ++ _18 = (long unsigned int) len_17; ++ _19 = _18 + 8; ++ _20 = (long unsigned int) len_limit_12 (D); ++ if (_19 <= _20) ++ ++ The IR of bb is as above. */ ++ ++static void ++create_align_loop_header (basic_block &align_loop_header, basic_block after_bb, ++ basic_block dominator_bb, class loop *outer) ++{ ++ gimple_seq stmts = NULL; ++ gimple_stmt_iterator gsi; ++ gcond *cond_stmt; ++ gphi *phi; ++ tree res; ++ ++ tree entry_node = get_current_def (origin_loop.base); ++ align_loop_header = create_empty_bb (after_bb); ++ add_bb_to_loop (align_loop_header, outer); ++ make_single_succ_edge (after_bb, align_loop_header, EDGE_FALLTHRU); ++ set_immediate_dominator (CDI_DOMINATORS, align_loop_header, dominator_bb); ++ gsi = gsi_last_bb (align_loop_header); ++ phi = create_phi_node (NULL_TREE, align_loop_header); ++ create_new_def_for (entry_node, phi, gimple_phi_result_ptr (phi)); ++ res = gimple_phi_result (phi); ++ ++ tree lhs1 = gimple_build (&stmts, NOP_EXPR, long_unsigned_type_node, res); ++ tree lhs2 = gimple_build (&stmts, PLUS_EXPR, TREE_TYPE (lhs1), lhs1, ++ build_int_cst (TREE_TYPE (lhs1), 8)); ++ tree lhs3 = gimple_build (&stmts, NOP_EXPR, long_unsigned_type_node, ++ origin_loop.limit); ++ cond_stmt = gimple_build_cond (LE_EXPR, lhs2, lhs3, NULL_TREE, NULL_TREE); ++ gimple_seq_add_stmt (&stmts, cond_stmt); ++ gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); ++ ++ set_current_def (origin_loop.base, res); ++ defs_map.put (align_loop_header, res); ++} ++ ++/* Create loop body BB for align_loop. ++ eg: ++ _21 = (sizetype) len_17; ++ _22 = cur_15 (D) + _21; ++ _23 = MEM[(long unsigned int *)_22]; ++ _24 = pb_13 (D) + _21; ++ _25 = MEM[(long unsigned int *)_24]; ++ if (_23 != _25) ++ ++ The IR of bb is as above. */ ++ ++static void ++create_align_loop_body_bb (basic_block &align_loop_body_bb, ++ basic_block after_bb, basic_block dominator_bb, ++ class loop *outer) ++{ ++ gimple_seq stmts = NULL; ++ gimple_stmt_iterator gsi; ++ gimple *g; ++ gcond *cond_stmt; ++ tree lhs1, lhs2; ++ ++ align_loop_body_bb = create_empty_bb (after_bb); ++ add_bb_to_loop (align_loop_body_bb, outer); ++ make_edge (after_bb, align_loop_body_bb, EDGE_TRUE_VALUE); ++ set_immediate_dominator (CDI_DOMINATORS, align_loop_body_bb, dominator_bb); ++ gsi = gsi_last_bb (align_loop_body_bb); ++ ++ tree var = gimple_build (&stmts, NOP_EXPR, sizetype, ++ get_current_def (origin_loop.base)); ++ lhs1 = gimple_build (&stmts, POINTER_PLUS_EXPR, TREE_TYPE (origin_loop.arr2), ++ origin_loop.arr2, var); ++ g = gimple_build_assign (make_ssa_name (long_unsigned_type_node), ++ fold_build2 (MEM_REF, long_unsigned_type_node, lhs1, ++ build_int_cst (build_pointer_type (long_unsigned_type_node), 0))); ++ gimple_seq_add_stmt (&stmts, g); ++ lhs1 = gimple_assign_lhs (g); ++ lhs2 = gimple_build (&stmts, POINTER_PLUS_EXPR, TREE_TYPE (origin_loop.arr1), ++ origin_loop.arr1, var); ++ g = gimple_build_assign (make_ssa_name (long_unsigned_type_node), ++ fold_build2 (MEM_REF, long_unsigned_type_node, lhs2, ++ build_int_cst (build_pointer_type (long_unsigned_type_node), 0))); ++ gimple_seq_add_stmt (&stmts, g); ++ lhs2 = gimple_assign_lhs (g); ++ cond_stmt = gimple_build_cond (gimple_cond_code (origin_loop.cond_stmt2), ++ lhs1, lhs2, NULL_TREE, NULL_TREE); ++ gimple_seq_add_stmt (&stmts, cond_stmt); ++ gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); ++} ++ ++/* Create loop_latch BB for align_loop. ++ eg: ++ len_26 = len_17 + 8; ++ ++ The IR of bb is as above. */ ++ ++static void ++create_align_loop_latch (basic_block &align_loop_latch, basic_block after_bb, ++ basic_block dominator_bb, class loop *outer) ++{ ++ gimple_seq stmts = NULL; ++ gimple_stmt_iterator gsi; ++ gimple *g; ++ tree res; ++ ++ tree entry_node = get_current_def (origin_loop.base); ++ align_loop_latch = create_empty_bb (after_bb); ++ add_bb_to_loop (align_loop_latch, outer); ++ make_edge (after_bb, align_loop_latch, EDGE_FALSE_VALUE); ++ set_immediate_dominator (CDI_DOMINATORS, align_loop_latch, dominator_bb); ++ gsi = gsi_last_bb (align_loop_latch); ++ res = copy_ssa_name (entry_node); ++ g = gimple_build_assign (res, PLUS_EXPR, entry_node, ++ build_int_cst (TREE_TYPE (entry_node), 8)); ++ gimple_seq_add_stmt (&stmts, g); ++ gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); ++ defs_map.put (align_loop_latch, res); ++} ++ ++/* Create a new loop and add it to outer_loop and return. */ ++ ++static class loop * ++init_new_loop (class loop *outer_loop, basic_block header, basic_block latch) ++{ ++ class loop *new_loop; ++ new_loop = alloc_loop (); ++ new_loop->header = header; ++ new_loop->latch = latch; ++ add_loop (new_loop, outer_loop); ++ ++ return new_loop; ++} ++ ++/* Create necessary exit BB for align_loop. ++ eg: ++ _27 = _23 ^ _25; ++ _28 = __builtin_ctzll (_27); ++ _29 = _28 >> 3; ++ len_30 = _29 + len_17; ++ ++ The IR of bb is as above. */ ++ ++static void ++create_align_loop_exit_bb (basic_block &align_loop_exit_bb, ++ basic_block after_bb, basic_block dominator_bb, ++ class loop *outer) ++{ ++ gimple_seq stmts = NULL; ++ gimple_stmt_iterator gsi; ++ gimple *g; ++ gimple *cond_stmt; ++ tree lhs1, lhs2; ++ tree cond_lhs, cond_rhs; ++ gcall *build_ctzll; ++ ++ tree entry_node = get_current_def (origin_loop.base); ++ align_loop_exit_bb = create_empty_bb (after_bb); ++ add_bb_to_loop (align_loop_exit_bb, outer); ++ make_edge (after_bb, align_loop_exit_bb, EDGE_TRUE_VALUE); ++ set_immediate_dominator (CDI_DOMINATORS, align_loop_exit_bb, dominator_bb); ++ gsi = gsi_last_bb (align_loop_exit_bb); ++ ++ cond_stmt = gsi_stmt (gsi_last_bb (after_bb)); ++ cond_lhs = gimple_cond_lhs (cond_stmt); ++ cond_rhs = gimple_cond_rhs (cond_stmt); ++ ++ lhs1 = gimple_build (&stmts, BIT_XOR_EXPR, TREE_TYPE (cond_lhs), cond_lhs, ++ cond_rhs); ++ build_ctzll = gimple_build_call (builtin_decl_explicit (BUILT_IN_CTZLL), 1, ++ lhs1); ++ lhs1 = make_ssa_name (integer_type_node); ++ gimple_call_set_lhs (build_ctzll, lhs1); ++ gimple_seq_add_stmt (&stmts, build_ctzll); ++ lhs2 = copy_ssa_name (lhs1); ++ g = gimple_build_assign (lhs2, RSHIFT_EXPR, lhs1, ++ build_int_cst (TREE_TYPE (lhs1), 3)); ++ gimple_seq_add_stmt (&stmts, g); ++ lhs1 = gimple_build (&stmts, NOP_EXPR, TREE_TYPE (entry_node), lhs2); ++ lhs2 = copy_ssa_name (entry_node); ++ g = gimple_build_assign (lhs2, PLUS_EXPR, lhs1, entry_node); ++ gimple_seq_add_stmt (&stmts, g); ++ gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); ++ defs_map.put (align_loop_exit_bb, lhs2); ++} ++ ++/* Create loop_header BB for epilogue_loop. ++ eg: ++ # len_31 = PHI ++ if (len_31 != len_limit_12 (D)) ++ ++ The IR of bb is as above. */ ++ ++static void ++create_epilogue_loop_header (basic_block &epilogue_loop_header, ++ basic_block after_bb, basic_block dominator_bb, ++ class loop *outer) ++{ ++ gimple_seq stmts = NULL; ++ gimple_stmt_iterator gsi; ++ gcond *cond_stmt; ++ tree res; ++ gphi *phi; ++ ++ tree entry_node = get_current_def (origin_loop.base); ++ epilogue_loop_header = create_empty_bb (after_bb); ++ add_bb_to_loop (epilogue_loop_header, outer); ++ make_single_succ_edge (after_bb, epilogue_loop_header, EDGE_FALLTHRU); ++ set_immediate_dominator (CDI_DOMINATORS, epilogue_loop_header, dominator_bb); ++ gsi = gsi_last_bb (epilogue_loop_header); ++ phi = create_phi_node (NULL_TREE, epilogue_loop_header); ++ create_new_def_for (entry_node, phi, gimple_phi_result_ptr (phi)); ++ res = gimple_phi_result (phi); ++ cond_stmt = gimple_build_cond (gimple_cond_code (origin_loop.cond_stmt1), res, ++ origin_loop.limit, NULL_TREE, NULL_TREE); ++ gimple_seq_add_stmt (&stmts, cond_stmt); ++ gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); ++ ++ set_current_def (origin_loop.base, res); ++ defs_map.put (epilogue_loop_header, res); ++} ++ ++/* Create loop body BB for epilogue_loop. ++ eg: ++ _32 = (sizetype) len_31; ++ _33 = pb_13 (D) + _32; ++ _34 = *_33; ++ _35 = cur_15 (D) + _32; ++ _36 = *_35; ++ if (_34 != _36) ++ ++ The IR of bb is as above. */ ++ ++static void ++create_epilogue_loop_body_bb (basic_block &epilogue_loop_body_bb, ++ basic_block after_bb, basic_block dominator_bb, ++ class loop *outer) ++{ ++ gimple_seq stmts = NULL; ++ gimple_stmt_iterator gsi; ++ gimple *g; ++ gcond *cond_stmt; ++ tree lhs1, lhs2, lhs3; ++ ++ tree entry_node = get_current_def (origin_loop.base); ++ epilogue_loop_body_bb = create_empty_bb (after_bb); ++ add_bb_to_loop (epilogue_loop_body_bb, outer); ++ make_edge (after_bb, epilogue_loop_body_bb, EDGE_TRUE_VALUE); ++ set_immediate_dominator (CDI_DOMINATORS, epilogue_loop_body_bb, dominator_bb); ++ gsi = gsi_last_bb (epilogue_loop_body_bb); ++ lhs1 = gimple_build (&stmts, NOP_EXPR, sizetype, entry_node); ++ lhs2 = gimple_build (&stmts, POINTER_PLUS_EXPR, TREE_TYPE (origin_loop.arr1), ++ origin_loop.arr1, lhs1); ++ g = gimple_build_assign (make_ssa_name (unsigned_char_type_node), ++ fold_build2 (MEM_REF, unsigned_char_type_node, lhs2, ++ build_int_cst (TREE_TYPE (lhs2), 0))); ++ gimple_seq_add_stmt (&stmts, g); ++ lhs2 = gimple_assign_lhs (g); ++ lhs3 = gimple_build (&stmts, POINTER_PLUS_EXPR, TREE_TYPE (origin_loop.arr2), ++ origin_loop.arr2, lhs1); ++ g = gimple_build_assign (make_ssa_name (unsigned_char_type_node), ++ fold_build2 (MEM_REF, unsigned_char_type_node, lhs3, ++ build_int_cst (TREE_TYPE (lhs3), 0))); ++ gimple_seq_add_stmt (&stmts, g); ++ lhs3 = gimple_assign_lhs (g); ++ cond_stmt = gimple_build_cond (gimple_cond_code (origin_loop.cond_stmt2), lhs2, ++ lhs3, NULL_TREE, NULL_TREE); ++ gimple_seq_add_stmt (&stmts, cond_stmt); ++ gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); ++ defs_map.put (epilogue_loop_body_bb, get_current_def (origin_loop.base)); ++} ++ ++/* Create loop_latch BB for epilogue_loop. ++ eg: ++ len_37 = len_31 + 1; ++ ++ The IR of bb is as above. */ ++ ++static void ++create_epilogue_loop_latch (basic_block &epilogue_loop_latch, ++ basic_block after_bb, basic_block dominator_bb, ++ class loop *outer) ++{ ++ gimple_seq stmts = NULL; ++ gimple_stmt_iterator gsi; ++ gimple *g; ++ tree res; ++ ++ tree entry_node = get_current_def (origin_loop.base); ++ epilogue_loop_latch = create_empty_bb (after_bb); ++ add_bb_to_loop (epilogue_loop_latch, outer); ++ make_edge (after_bb, epilogue_loop_latch, EDGE_FALSE_VALUE); ++ set_immediate_dominator (CDI_DOMINATORS, epilogue_loop_latch, dominator_bb); ++ gsi = gsi_last_bb (epilogue_loop_latch); ++ res = copy_ssa_name (entry_node); ++ g = gimple_build_assign (res, PLUS_EXPR, entry_node, ++ build_int_cst (TREE_TYPE (entry_node), origin_loop.step)); ++ gimple_seq_add_stmt (&stmts, g); ++ gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT); ++ defs_map.put (epilogue_loop_latch, res); ++} ++ ++/* convert_to_new_loop ++ | | ++ | | ++ | | entry_edge ++ | ______ | ++ | / V V ++ | | -----origin_loop_header--- ++ | | | | ++ | | -------------------------\ ++ | | | \ ++ | | V \___ ___ ___ ___ ___ ___ ___ ++ | | -----origin_loop_body----- | ++ | | | | | ++ | | -------------------------\ | ++ | | | \___ ___ ___ ___ | ++ | | V V V ++ | | -----origin_loop_latch---- -----exit_bb------ ++ | | | | | | ++ | | /-------------------------- ------------------ ++ | \ __ / ++ | ++ | | ++ | ====> |entry_edge ++ | V ++ | -------prolog_bb----- ++ | | | ++ | --------------------- ++ | | ++ | V ++ | -----align_loop_header---- ++ | /-----------------> | | ++ |/ -------------------------- ++ || / \ ++ || V V ++ || ---align_loop_body--- ---epilogue_loop_header-- ++ || | | -------| |<---| ++ || --------------------\ / ------------------------- | ++ || | \____ | | | ++ || V | | V | ++ || ---align_loop_latch--- | | ---epilogue_loop_body---- | ++ || | | | | ----| | | ++ || ---------------------- | | / ------------------------- | ++ || / __________/ | | | | ++ || / | | | V | ++ | \ __________/ | | | ---epilogue_loop_latch--- | ++ | | | | | | | ++ | | | | ------------------------- / ++ | V | | | / ++ | -align_loop_exit_bb- | | \______________/ ++ | | | | | ++ | -------------------- | | ++ | | | | ++ | | V V ++ | | -----exit_bb------ ++ | |---->| | ++ | ------------------ ++ ++ The origin_loop conversion process starts from entry_edge and ends at ++ exit_bb; The execution logic of origin_loop is completely replaced by ++ align_loop + epilogue_loop: ++ 1) align_loop mainly implements the idea of ​​using wide-type dereference ++ and comparison on array elements, so as to achieve the effect of ++ acceleration; For the corresponding source code understanding, please ++ refer to the description of the pass at the beginning; ++ 2) epilogue_loop processes the previous loop remaining array element ++ comparison. */ ++ ++static void ++create_new_loops (edge entry_edge) ++{ ++ basic_block prolog_bb; ++ basic_block align_loop_header, align_loop_latch, align_loop_body_bb; ++ basic_block align_pred_bb, align_loop_exit_bb; ++ basic_block epilogue_loop_header, epilogue_loop_latch, epilogue_loop_body_bb; ++ basic_block epilogue_loop_pred_bb; ++ class loop *align_loop; ++ class loop *epilogue_loop; ++ ++ class loop *outer = entry_edge->src->loop_father; ++ ++ create_prolog_bb (prolog_bb, entry_edge->src, entry_edge->src, outer, ++ entry_edge); ++ ++ create_loop_pred_bb (align_pred_bb, prolog_bb, prolog_bb, outer); ++ make_single_succ_edge (prolog_bb, align_pred_bb, EDGE_FALLTHRU); ++ ++ create_align_loop_header (align_loop_header, align_pred_bb, ++ align_pred_bb, outer); ++ ++ create_align_loop_body_bb (align_loop_body_bb, align_loop_header, ++ align_loop_header, outer); ++ ++ create_align_loop_latch (align_loop_latch, align_loop_body_bb, ++ align_loop_body_bb, outer); ++ make_edge (align_loop_latch, align_loop_header, EDGE_FALLTHRU); ++ rewrite_add_phi_arg (align_loop_header); ++ ++ align_loop = init_new_loop (outer, align_loop_header, align_loop_latch); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nPrint byte align loop %d:\n", align_loop->num); ++ flow_loop_dump (align_loop, dump_file, NULL, 1); ++ fprintf (dump_file, "\n\n"); ++ } ++ ++ create_align_loop_exit_bb (align_loop_exit_bb, align_loop_body_bb, ++ align_loop_body_bb, outer); ++ ++ create_loop_pred_bb (epilogue_loop_pred_bb, align_loop_header, ++ align_loop_header, outer); ++ make_edge (align_loop_header, epilogue_loop_pred_bb, EDGE_FALSE_VALUE); ++ ++ create_epilogue_loop_header (epilogue_loop_header, epilogue_loop_pred_bb, ++ epilogue_loop_pred_bb, outer); ++ ++ create_epilogue_loop_body_bb (epilogue_loop_body_bb, epilogue_loop_header, ++ epilogue_loop_header, outer); ++ ++ create_epilogue_loop_latch (epilogue_loop_latch, epilogue_loop_body_bb, ++ epilogue_loop_body_bb, outer); ++ make_single_succ_edge (epilogue_loop_latch, epilogue_loop_header, ++ EDGE_FALLTHRU); ++ rewrite_add_phi_arg (epilogue_loop_header); ++ ++ epilogue_loop = init_new_loop (outer, epilogue_loop_header, ++ epilogue_loop_latch); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nPrint epilogue loop %d:\n", epilogue_loop->num); ++ flow_loop_dump (epilogue_loop, dump_file, NULL, 1); ++ fprintf (dump_file, "\n\n"); ++ } ++ make_single_succ_edge (align_loop_exit_bb, origin_loop.exit_bb1, ++ EDGE_FALLTHRU); ++ set_immediate_dominator (CDI_DOMINATORS, origin_loop.exit_bb1, ++ entry_edge->src); ++ make_edge (epilogue_loop_body_bb, origin_loop.exit_bb1, EDGE_TRUE_VALUE); ++ ++ make_edge (epilogue_loop_header, origin_loop.exit_bb2, EDGE_FALSE_VALUE); ++ set_immediate_dominator (CDI_DOMINATORS, origin_loop.exit_bb2, ++ entry_edge->src); ++ ++ rewrite_add_phi_arg (origin_loop.exit_bb1); ++ rewrite_add_phi_arg (origin_loop.exit_bb2); ++ ++ remove_edge (origin_loop.exit_e1); ++ remove_edge (origin_loop.exit_e2); ++} ++ ++/* Make sure that the dominance relationship of the newly inserted cfg ++ is not missing. */ ++ ++static void ++update_loop_dominator (cdi_direction dir) ++{ ++ gcc_assert (dom_info_available_p (dir)); ++ ++ basic_block bb; ++ FOR_EACH_BB_FN (bb, cfun) ++ { ++ basic_block imm_bb = get_immediate_dominator (dir, bb); ++ if (!imm_bb || bb == origin_loop.exit_bb1) ++ { ++ set_immediate_dominator (CDI_DOMINATORS, bb, ++ recompute_dominator (CDI_DOMINATORS, bb)); ++ continue; ++ } ++ } ++} ++ ++/* Clear information about the original loop. */ ++ ++static void ++remove_origin_loop (class loop *loop) ++{ ++ basic_block *body; ++ ++ body = get_loop_body_in_dom_order (loop); ++ unsigned n = loop->num_nodes; ++ for (unsigned i = 0; i < n; i++) ++ { ++ delete_basic_block (body[i]); ++ } ++ free (body); ++ delete_loop (loop); ++} ++ ++/* Perform the conversion of origin_loop to new_loop. */ ++ ++static void ++convert_to_new_loop (class loop *loop) ++{ ++ create_new_loops (origin_loop.entry_edge); ++ remove_origin_loop (loop); ++ update_loop_dominator (CDI_DOMINATORS); ++ update_ssa (TODO_update_ssa); ++} ++ ++/* The main entry of array-widen-compare optimizes. */ ++ ++static unsigned int ++tree_ssa_array_widen_compare () ++{ ++ unsigned int todo = 0; ++ class loop *loop; ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ flow_loops_dump (dump_file, NULL, 1); ++ fprintf (dump_file, "\nConfirm which loop can be optimized using" ++ " array-widen-compare\n"); ++ } ++ ++ enum li_flags LI = LI_FROM_INNERMOST; ++ for (auto loop : loops_list (cfun, LI)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "======================================\n"); ++ fprintf (dump_file, "Processing loop %d:\n", loop->num); ++ fprintf (dump_file, "======================================\n"); ++ flow_loop_dump (loop, dump_file, NULL, 1); ++ fprintf (dump_file, "\n\n"); ++ } ++ ++ if (determine_loop_form (loop)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "The %dth loop form is success matched," ++ "and the loop can be optimized.\n", ++ loop->num); ++ dump_loop_bb (loop); ++ } ++ ++ convert_to_new_loop (loop); ++ } ++ } ++ ++ todo |= (TODO_update_ssa); ++ return todo; ++} ++ ++/* Array widen compare. */ ++ ++namespace { ++ ++const pass_data pass_data_tree_array_widen_compare = ++{ ++ GIMPLE_PASS, ++ "awiden_compare", ++ OPTGROUP_LOOP, ++ TV_TREE_ARRAY_WIDEN_COMPARE, ++ (PROP_cfg | PROP_ssa), ++ 0, ++ 0, ++ 0, ++ (TODO_update_ssa | TODO_verify_all) ++}; ++ ++class pass_array_widen_compare : public gimple_opt_pass ++{ ++public: ++ pass_array_widen_compare (gcc::context *ctxt) ++ : gimple_opt_pass (pass_data_tree_array_widen_compare, ctxt) ++ {} ++ ++ /* opt_pass methods: */ ++ virtual bool gate (function *); ++ virtual unsigned int execute (function *); ++ ++}; // class pass_array_widen_compare ++ ++bool ++pass_array_widen_compare::gate (function *) ++{ ++ return (flag_array_widen_compare > 0 && optimize >= 3); ++} ++ ++unsigned int ++pass_array_widen_compare::execute (function *fun) ++{ ++ if (number_of_loops (fun) <= 1) ++ return 0; ++ ++ /* Only supports LP64 data mode. */ ++ if (TYPE_PRECISION (long_integer_type_node) != 64 ++ || POINTER_SIZE != 64 || TYPE_PRECISION (integer_type_node) != 32) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "The current data mode is not supported," ++ "only the LP64 date mode is supported.\n"); ++ return 0; ++ } ++ ++ return tree_ssa_array_widen_compare (); ++} ++ ++} // anon namespace ++ ++gimple_opt_pass * ++make_pass_array_widen_compare (gcc::context *ctxt) ++{ ++ return new pass_array_widen_compare (ctxt); ++} +-- +2.33.0 + diff --git a/0014-LoongArch-Define-macro-CLEAR_INSN_CACHE.patch b/0014-LoongArch-Define-macro-CLEAR_INSN_CACHE.patch new file mode 100644 index 0000000000000000000000000000000000000000..5ff13dac987d3e0452a4863494f3341653d85695 --- /dev/null +++ b/0014-LoongArch-Define-macro-CLEAR_INSN_CACHE.patch @@ -0,0 +1,34 @@ +From 7f4a912da99f5787c88b275b83ee547c9e1aa3d7 Mon Sep 17 00:00:00 2001 +From: Lulu Cheng +Date: Mon, 23 Oct 2023 09:07:32 +0800 +Subject: [PATCH 014/188] LoongArch: Define macro CLEAR_INSN_CACHE. + +LoongArch's microstructure ensures cache consistency by hardware. +Due to out-of-order execution, "ibar" is required to ensure the visibility of the +store (invalidated icache) executed by this CPU before "ibar" (to the instance). +"ibar" will not invalidate the icache, so the start and end parameters are not Affect +"ibar" performance. + +gcc/ChangeLog: + + * config/loongarch/loongarch.h (CLEAR_INSN_CACHE): New definition. +--- + gcc/config/loongarch/loongarch.h | 5 +++++ + 1 file changed, 5 insertions(+) + +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index 572b538be..f7ddfc452 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -1231,3 +1231,8 @@ struct GTY (()) machine_function + (TARGET_HARD_FLOAT_ABI ? (TARGET_DOUBLE_FLOAT_ABI ? 8 : 4) : 0) + + #define FUNCTION_VALUE_REGNO_P(N) ((N) == GP_RETURN || (N) == FP_RETURN) ++ ++/* LoongArch maintains ICache/DCache coherency by hardware, ++ we just need "ibar" to avoid instruction hazard here. */ ++#undef CLEAR_INSN_CACHE ++#define CLEAR_INSN_CACHE(beg, end) __builtin_loongarch_ibar (0) +-- +2.43.0 + diff --git a/0014-Sw64-Port-add-lex-builtin-support-in-libcpp.patch b/0014-Sw64-Port-add-lex-builtin-support-in-libcpp.patch new file mode 100644 index 0000000000000000000000000000000000000000..71524b2f9ab5cd43300b63ce36d75e3c6fd618bd --- /dev/null +++ b/0014-Sw64-Port-add-lex-builtin-support-in-libcpp.patch @@ -0,0 +1,35 @@ +From b85fb9eca6a6a7612ae6451995f9908bfbbba72f Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 16:56:12 +0800 +Subject: [PATCH 14/16] Sw64 Port: add lex builtin support in libcpp + +--- + libcpp/lex.cc | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +diff --git a/libcpp/lex.cc b/libcpp/lex.cc +index fb1dfabb7..bc2541ef9 100644 +--- a/libcpp/lex.cc ++++ b/libcpp/lex.cc +@@ -168,6 +168,8 @@ acc_char_cmp (word_type val, word_type c) + /* We can get exact results using a compare-bytes instruction. + Get (val == c) via (0 >= (val ^ c)). */ + return __builtin_alpha_cmpbge (0, val ^ c); ++#elif defined(__GNUC__) && defined(__sw_64__) ++ return __builtin_sw_64_cmpbge (0, val ^ c); + #else + word_type magic = 0x7efefefeU; + if (sizeof(word_type) == 8) +@@ -186,7 +188,8 @@ static inline int + acc_char_index (word_type cmp ATTRIBUTE_UNUSED, + word_type val ATTRIBUTE_UNUSED) + { +-#if defined(__GNUC__) && defined(__alpha__) && !WORDS_BIGENDIAN ++#if defined(__GNUC__) && (defined(__alpha__) || defined(__sw_64__)) \ ++ && !WORDS_BIGENDIAN + /* The cmpbge instruction sets *bits* of the result corresponding to + matches in the bytes with no false positives. */ + return __builtin_ctzl (cmp); +-- +2.25.1 + diff --git a/0015-Backport-Structure-reorganization-optimization.patch b/0015-Backport-Structure-reorganization-optimization.patch new file mode 100644 index 0000000000000000000000000000000000000000..d3809165b286a017c89eb1766ce79028ef683335 --- /dev/null +++ b/0015-Backport-Structure-reorganization-optimization.patch @@ -0,0 +1,6170 @@ +From 8631d4a39453bb262675bea9abb5c1b7d52af624 Mon Sep 17 00:00:00 2001 +From: eastb233 +Date: Wed, 19 Jul 2023 10:28:04 +0800 +Subject: [PATCH 15/22] [Backport] Structure reorganization optimization + +Reference: https://gcc.gnu.org/git/?p=gcc-old.git;a=commit;h=6e1bd1c900533c627b5e4fbbecb41dcd7974b522 + +Introduce structure reorganization optimization, that change C-like +structures layout in order to better utilize spatial locality. This +transformation is affective for programs containing arrays of structures. +--- + gcc/Makefile.in | 1 + + gcc/common.opt | 4 +- + gcc/configure | 2 +- + gcc/configure.ac | 2 +- + gcc/doc/invoke.texi | 23 + + gcc/gimple-ssa-warn-access.cc | 8 + + gcc/ipa-param-manipulation.cc | 3 +- + gcc/ipa-param-manipulation.h | 3 +- + gcc/ipa-struct-reorg/escapes.def | 60 + + gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 4015 +++++++++++++++++ + gcc/ipa-struct-reorg/ipa-struct-reorg.h | 235 + + gcc/params.opt | 4 + + gcc/passes.def | 2 + + gcc/testsuite/gcc.dg/struct/struct-reorg.exp | 35 + + gcc/testsuite/gcc.dg/struct/struct_reorg-1.c | 24 + + gcc/testsuite/gcc.dg/struct/struct_reorg-2.c | 29 + + gcc/testsuite/gcc.dg/struct/struct_reorg-3.c | 23 + + gcc/testsuite/gcc.dg/struct/struct_reorg-4.c | 59 + + .../gcc.dg/struct/w_prof_global_array.c | 29 + + .../gcc.dg/struct/w_prof_global_var.c | 42 + + .../gcc.dg/struct/w_prof_local_array.c | 37 + + .../gcc.dg/struct/w_prof_local_var.c | 40 + + .../gcc.dg/struct/w_prof_single_str_global.c | 31 + + gcc/testsuite/gcc.dg/struct/w_prof_two_strs.c | 64 + + .../gcc.dg/struct/w_ratio_cold_str.c | 43 + + .../gcc.dg/struct/wo_prof_array_field.c | 26 + + .../struct/wo_prof_array_through_pointer.c | 38 + + .../gcc.dg/struct/wo_prof_double_malloc.c | 29 + + .../gcc.dg/struct/wo_prof_empty_str.c | 44 + + .../struct/wo_prof_escape_arg_to_local.c | 44 + + .../gcc.dg/struct/wo_prof_escape_return-1.c | 33 + + .../gcc.dg/struct/wo_prof_escape_return.c | 32 + + .../gcc.dg/struct/wo_prof_escape_str_init.c | 31 + + .../struct/wo_prof_escape_substr_array.c | 33 + + .../struct/wo_prof_escape_substr_pointer.c | 48 + + .../struct/wo_prof_escape_substr_value.c | 45 + + .../gcc.dg/struct/wo_prof_global_array.c | 32 + + .../gcc.dg/struct/wo_prof_global_var.c | 45 + + .../gcc.dg/struct/wo_prof_local_array.c | 40 + + .../gcc.dg/struct/wo_prof_local_var.c | 43 + + .../gcc.dg/struct/wo_prof_malloc_size_var-1.c | 47 + + .../gcc.dg/struct/wo_prof_malloc_size_var.c | 47 + + .../struct/wo_prof_mult_field_peeling.c | 42 + + .../gcc.dg/struct/wo_prof_single_str_global.c | 34 + + .../gcc.dg/struct/wo_prof_single_str_local.c | 34 + + .../struct/wo_prof_single_str_pointer.c | 38 + + .../gcc.dg/struct/wo_prof_two_strs.c | 67 + + gcc/timevar.def | 1 + + gcc/tree-pass.h | 1 + + 49 files changed, 5686 insertions(+), 6 deletions(-) + create mode 100644 gcc/ipa-struct-reorg/escapes.def + create mode 100644 gcc/ipa-struct-reorg/ipa-struct-reorg.cc + create mode 100644 gcc/ipa-struct-reorg/ipa-struct-reorg.h + create mode 100644 gcc/testsuite/gcc.dg/struct/struct-reorg.exp + create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-1.c + create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-2.c + create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-3.c + create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-4.c + create mode 100644 gcc/testsuite/gcc.dg/struct/w_prof_global_array.c + create mode 100644 gcc/testsuite/gcc.dg/struct/w_prof_global_var.c + create mode 100644 gcc/testsuite/gcc.dg/struct/w_prof_local_array.c + create mode 100644 gcc/testsuite/gcc.dg/struct/w_prof_local_var.c + create mode 100644 gcc/testsuite/gcc.dg/struct/w_prof_single_str_global.c + create mode 100644 gcc/testsuite/gcc.dg/struct/w_prof_two_strs.c + create mode 100644 gcc/testsuite/gcc.dg/struct/w_ratio_cold_str.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_array_field.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_array_through_pointer.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_double_malloc.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_empty_str.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_escape_arg_to_local.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_escape_return-1.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_escape_return.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_escape_str_init.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_array.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_pointer.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_value.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_global_array.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_global_var.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_local_array.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_local_var.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_malloc_size_var-1.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_malloc_size_var.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_mult_field_peeling.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_single_str_global.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_single_str_local.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_single_str_pointer.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_two_strs.c + +diff --git a/gcc/Makefile.in b/gcc/Makefile.in +index 31ff95500..c863ad992 100644 +--- a/gcc/Makefile.in ++++ b/gcc/Makefile.in +@@ -1451,6 +1451,7 @@ OBJS = \ + incpath.o \ + init-regs.o \ + internal-fn.o \ ++ ipa-struct-reorg/ipa-struct-reorg.o \ + ipa-cp.o \ + ipa-sra.o \ + ipa-devirt.o \ +diff --git a/gcc/common.opt b/gcc/common.opt +index e365a48bc..b48fa3228 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1950,8 +1950,8 @@ Common Ignore + Does nothing. Preserved for backward compatibility. + + fipa-struct-reorg +-Common Ignore +-Does nothing. Preserved for backward compatibility. ++Common Var(flag_ipa_struct_reorg) Init(0) Optimization ++Perform structure layout optimizations. + + fipa-vrp + Common Var(flag_ipa_vrp) Optimization +diff --git a/gcc/configure b/gcc/configure +index c749ace01..98bbf0f85 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -34191,7 +34191,7 @@ $as_echo "$as_me: executing $ac_file commands" >&6;} + "depdir":C) $SHELL $ac_aux_dir/mkinstalldirs $DEPDIR ;; + "gccdepdir":C) + ${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs build/$DEPDIR +- for lang in $subdirs c-family common analyzer rtl-ssa ++ for lang in $subdirs c-family common analyzer rtl-ssa ipa-struct-reorg + do + ${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs $lang/$DEPDIR + done ;; +diff --git a/gcc/configure.ac b/gcc/configure.ac +index 992a50e7b..c74f4b555 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -1340,7 +1340,7 @@ AC_CHECK_HEADERS(ext/hash_map) + ZW_CREATE_DEPDIR + AC_CONFIG_COMMANDS([gccdepdir],[ + ${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs build/$DEPDIR +- for lang in $subdirs c-family common analyzer rtl-ssa ++ for lang in $subdirs c-family common analyzer rtl-ssa ipa-struct-reorg + do + ${CONFIG_SHELL-/bin/sh} $ac_aux_dir/mkinstalldirs $lang/$DEPDIR + done], [subdirs="$subdirs" ac_aux_dir=$ac_aux_dir DEPDIR=$DEPDIR]) +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index ff8cd032f..e37bae5b1 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -526,6 +526,7 @@ Objective-C and Objective-C++ Dialects}. + -finline-functions -finline-functions-called-once -finline-limit=@var{n} @gol + -finline-small-functions -fipa-modref -fipa-cp -fipa-cp-clone @gol + -fipa-bit-cp -fipa-vrp -fipa-pta -fipa-profile -fipa-pure-const @gol ++-fipa-struct-reorg @gol + -fipa-reference -fipa-reference-addressable @gol + -fipa-stack-alignment -fipa-icf -fira-algorithm=@var{algorithm} @gol + -flive-patching=@var{level} @gol +@@ -11886,6 +11887,19 @@ higher. + Discover which functions are pure or constant. + Enabled by default at @option{-O1} and higher. + ++@item -fipa-struct-reorg ++@opindex fipa-struct-reorg ++Perform structure reorganization optimization, that change C-like structures ++layout in order to better utilize spatial locality. This transformation is ++affective for programs containing arrays of structures. Available in two ++compilation modes: profile-based (enabled with @option{-fprofile-generate}) ++or static (which uses built-in heuristics). It works only in whole program ++mode, so it requires @option{-fwhole-program} to be ++enabled. Structures considered @samp{cold} by this transformation are not ++affected (see @option{--param struct-reorg-cold-struct-ratio=@var{value}}). ++ ++With this flag, the program debug info reflects a new structure layout. ++ + @item -fipa-reference + @opindex fipa-reference + Discover which static variables do not escape the +@@ -13772,6 +13786,15 @@ In each case, the @var{value} is an integer. The following choices + of @var{name} are recognized for all targets: + + @table @gcctabopt ++@item struct-reorg-cold-struct-ratio ++The threshold ratio (as a percentage) between a structure frequency ++and the frequency of the hottest structure in the program. This parameter ++is used by struct-reorg optimization enabled by @option{-fipa-struct-reorg}. ++We say that if the ratio of a structure frequency, calculated by profiling, ++to the hottest structure frequency in the program is less than this ++parameter, then structure reorganization is not applied to this structure. ++The default is 10. ++ + @item predictable-branch-outcome + When branch is predicted to be taken with probability lower than this threshold + (in percent), then it is considered well predictable. +diff --git a/gcc/gimple-ssa-warn-access.cc b/gcc/gimple-ssa-warn-access.cc +index 8d088ad33..a24645783 100644 +--- a/gcc/gimple-ssa-warn-access.cc ++++ b/gcc/gimple-ssa-warn-access.cc +@@ -2193,6 +2193,14 @@ pass_waccess::set_pass_param (unsigned int n, bool early) + bool + pass_waccess::gate (function *) + { ++ /* FIXME: In structure optimizations, some statements will be ++ rewritten and removed from the BB, leaving some unused SSA. ++ In pass waccess, it will traverse all SSA and cause ICE ++ when handling these unused SSA. So temporarily disable ++ pass waccess when enable structure optimizations. */ ++ if (flag_ipa_struct_reorg) ++ return false; ++ + return (warn_free_nonheap_object + || warn_mismatched_alloc + || warn_mismatched_new_delete); +diff --git a/gcc/ipa-param-manipulation.cc b/gcc/ipa-param-manipulation.cc +index 38328c3e8..f9e956008 100644 +--- a/gcc/ipa-param-manipulation.cc ++++ b/gcc/ipa-param-manipulation.cc +@@ -55,7 +55,8 @@ static const char *ipa_param_prefixes[IPA_PARAM_PREFIX_COUNT] + = {"SYNTH", + "ISRA", + "simd", +- "mask"}; ++ "mask", ++ "struct_reorg"}; + + /* Names of parameters for dumping. Keep in sync with enum ipa_parm_op. */ + +diff --git a/gcc/ipa-param-manipulation.h b/gcc/ipa-param-manipulation.h +index a9ad2b216..71f4a0a2f 100644 +--- a/gcc/ipa-param-manipulation.h ++++ b/gcc/ipa-param-manipulation.h +@@ -126,6 +126,7 @@ enum ipa_param_name_prefix_indices + IPA_PARAM_PREFIX_ISRA, + IPA_PARAM_PREFIX_SIMD, + IPA_PARAM_PREFIX_MASK, ++ IPA_PARAM_PREFIX_REORG, + IPA_PARAM_PREFIX_COUNT + }; + +@@ -189,7 +190,7 @@ struct GTY(()) ipa_adjusted_param + + /* Index into ipa_param_prefixes specifying a prefix to be used with + DECL_NAMEs of newly synthesized parameters. */ +- unsigned param_prefix_index : 2; ++ unsigned param_prefix_index : 3; + + /* Storage order of the original parameter (for the cases when the new + parameter is a component of an original one). */ +diff --git a/gcc/ipa-struct-reorg/escapes.def b/gcc/ipa-struct-reorg/escapes.def +new file mode 100644 +index 000000000..c4c8e0739 +--- /dev/null ++++ b/gcc/ipa-struct-reorg/escapes.def +@@ -0,0 +1,60 @@ ++/* Copyright (C) 2016-2023 Free Software Foundation, Inc. ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* Before including this file, you should define a macro: ++ DEF_ESCAPE (ENUM, TEXT) ++ ++ This macro will be called once for each escape reason. The ++ ENUM will be of type "escape_type". The TEXT is describing ++ the reason for the escape. ++*/ ++DEF_ESCAPE (escape_marked_as_used, "Type used in variable marked as used") ++DEF_ESCAPE (escape_via_global_var, "Type used via a external visible variable") ++DEF_ESCAPE (escape_via_global_init, "Type used via a global init of a variable") ++DEF_ESCAPE (escape_non_supported_allocator, "Type used by allocation which is not currently supported") ++DEF_ESCAPE (escape_dependent_type_escapes, "Type uses a type which escapes or is used by a type which escapes") ++DEF_ESCAPE (escape_var_arg_function, "Types escapes via a variable argument function") ++DEF_ESCAPE (escape_bitfields, "Types has bitfields") ++DEF_ESCAPE (escape_recusive_type, "Type has a recusive relationship") ++DEF_ESCAPE (escape_variable_sized_array, "Type has a variable sized type") ++DEF_ESCAPE (escape_external_function, "Type escapes via an external function call") ++DEF_ESCAPE (escape_visible_function, "Type escapes via expternally visible function call") ++DEF_ESCAPE (escape_pointer_function, "Type escapes via an function pointer call") ++DEF_ESCAPE (escape_unkown_field, "Type escapes via an unkown field accessed") ++DEF_ESCAPE (escape_union, "Type escapes via an union") ++DEF_ESCAPE (escape_inline_asm, "Type escapes via inline-asm") ++DEF_ESCAPE (escape_non_multiply_size, "Type escapes a pointer plus which is not a multiplicate of the size") ++DEF_ESCAPE (escape_cast_void, "Type escapes a cast to/from void*") ++DEF_ESCAPE (escape_cast_another_ptr, "Type escapes a cast to a different pointer") ++DEF_ESCAPE (escape_cast_int, "Type escapes a cast from/to intergral type") ++DEF_ESCAPE (escape_int_const, "Type escapes via integer constant") ++DEF_ESCAPE (escape_vce, "Type escapes via a VIEW_CONVERT_EXPR") ++DEF_ESCAPE (escape_array_access, "Type escapes via an array access") ++DEF_ESCAPE (escape_noclonable_function, "Type escapes via a non-clonable function") ++DEF_ESCAPE (escape_rescusive_type, "Recusive type") ++DEF_ESCAPE (escape_user_alignment, "Type has an user alignment set") ++DEF_ESCAPE (escape_volatile, "Type has an variable which is volatile") ++DEF_ESCAPE (escape_non_eq, "Type has a comparison other than equals or not equals") ++DEF_ESCAPE (escape_addr, "Type escapes via taking the address of field") ++DEF_ESCAPE (escape_cannot_change_signature, "Type used in a call that cannot change signature") ++DEF_ESCAPE (escape_non_optimize, "Type used by a function which turns off struct reorg") ++DEF_ESCAPE (escape_array, "Type is used in an array [not handled yet]") ++DEF_ESCAPE (escape_ptr_ptr, "Type is used in a pointer to a pointer [not handled yet]") ++DEF_ESCAPE (escape_return, "Type escapes via a return [not handled yet]") ++ ++#undef DEF_ESCAPE +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +new file mode 100644 +index 000000000..238530860 +--- /dev/null ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +@@ -0,0 +1,4015 @@ ++/* Struct-reorg optimizations. ++ Copyright (C) 2016-2023 Free Software Foundation, Inc. ++ Contributed by Andrew Pinski ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++/* This pass implements the structure reorganization organization ++ (struct-reorg). ++ ++ Right now it handles just splitting off the hottest fields for a struct ++ of 2 fields: ++ struct s { ++ type1 field1; // Hot field ++ type2 field2; ++ }; ++ s *v; ++ into: ++ struct s_hot { ++ type1 field1; ++ }; ++ struct c_cold { ++ type2 field2; ++ }; ++ s_hot *v_hot; ++ s_cold *v_cold; ++ ++ TODO: This pass can be extended to more fields, and other alogrothims ++ like reordering. ++ ++ This pass operate in four stages: ++ 1. All of the field accesses, declarations (struct types and pointers ++ to that type) and struct types are scanned and recorded. This includes ++ global declarations. Also record all allocation and freeing sites; ++ this is needed for the rewriting phase. ++ ++ FIXME: If there is a top-level inline-asm, the pass immediately returns. ++ ++ 2. Prune out the types which are considered escaping. ++ Examples of types which are considered escaping: ++ a. A declaration has been marked as having the attribute used or ++ has user defined alignment (type too). ++ b. Accesses are via a BIT_FIELD_REF. ++ FIXME: Handle VECTOR_TYPE for this case. ++ c. The "allocation" site is not a known builtin function. ++ d. Casting to/from an integer. ++ ++ 3. Analyze the types for which optimization to do. ++ a. Split the fields into two different structs. ++ (FIXME: two field case handled only) ++ Look at all structs which contain two fields, if one of the fields ++ is hotter then split it and put it on the rewritting for accesses. ++ Allocations and freeing are marked to split into two functions; ++ all uses of that type will now be considered as two. ++ b. Reorder fields hottest to the coldest. TODO: Implement. ++ ++ 4. Rewrite each access and allocation and free whichis marked as ++ rewriting. ++ ++*/ ++ ++#include "config.h" ++#include "system.h" ++#include "coretypes.h" ++#include "tm.h" ++#include "tree.h" ++#include "tree-pass.h" ++#include "cgraph.h" ++#include "diagnostic-core.h" ++#include "function.h" ++#include "basic-block.h" ++#include "gimple.h" ++#include "vec.h" ++#include "tree-pretty-print.h" ++#include "gimple-pretty-print.h" ++#include "gimple-iterator.h" ++#include "cfg.h" ++#include "ssa.h" ++#include "tree-dfa.h" ++#include "fold-const.h" ++#include "tree-inline.h" ++#include "stor-layout.h" ++#include "tree-into-ssa.h" ++#include "tree-cfg.h" ++#include "alloc-pool.h" ++#include "symbol-summary.h" ++#include "ipa-prop.h" ++#include "ipa-struct-reorg.h" ++#include "tree-eh.h" ++#include "bitmap.h" ++#include "tree-ssa-live.h" /* For remove_unused_locals. */ ++#include "ipa-param-manipulation.h" ++#include "gimplify-me.h" ++ ++namespace { ++ ++using namespace struct_reorg; ++ ++#define VOID_POINTER_P(type) \ ++ (POINTER_TYPE_P (type) && VOID_TYPE_P (TREE_TYPE (type))) ++ ++/* Return true iff TYPE is stdarg va_list type. */ ++ ++static inline bool ++is_va_list_type (tree type) ++{ ++ return TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (va_list_type_node); ++} ++ ++static const char * ++get_type_name (tree type) ++{ ++ const char *tname = NULL; ++ ++ if (type == NULL) ++ return NULL; ++ ++ if (TYPE_NAME (type) != NULL) ++ { ++ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE) ++ tname = IDENTIFIER_POINTER (TYPE_NAME (type)); ++ else if (DECL_NAME (TYPE_NAME (type)) != NULL) ++ tname = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))); ++ } ++ return tname; ++} ++ ++/* Return the inner most type for arrays and pointers of TYPE. */ ++ ++static tree ++inner_type (tree type) ++{ ++ while (POINTER_TYPE_P (type) ++ || TREE_CODE (type) == ARRAY_TYPE) ++ type = TREE_TYPE (type); ++ return type; ++} ++ ++/* Return true if TYPE is a type which struct reorg should handled. */ ++ ++static bool ++handled_type (tree type) ++{ ++ type = inner_type (type); ++ if (TREE_CODE (type) == RECORD_TYPE) ++ return !is_va_list_type (type); ++ return false; ++} ++ ++/* The gimplify_buildN API is moved to tree-vect-generic.c locally ++ at commit b972e036f40c12b106f9070c3e8adea0eb8a45fa. ++ ++ The gimplify_buildN API is copied from gcc 10 implementation. ++*/ ++ ++/* Build a binary operation and gimplify it. Emit code before GSI. ++ Return the gimple_val holding the result. */ ++ ++static tree ++gimplify_build2 (gimple_stmt_iterator *gsi, enum tree_code code, ++ tree type, tree a, tree b) ++{ ++ tree ret; ++ ++ ret = fold_build2_loc (gimple_location (gsi_stmt (*gsi)), code, type, a, b); ++ return force_gimple_operand_gsi (gsi, ret, true, NULL, true, ++ GSI_SAME_STMT); ++} ++ ++/* Build a unary operation and gimplify it. Emit code before GSI. ++ Return the gimple_val holding the result. */ ++ ++static tree ++gimplify_build1 (gimple_stmt_iterator *gsi, enum tree_code code, tree type, ++ tree a) ++{ ++ tree ret; ++ ++ ret = fold_build1_loc (gimple_location (gsi_stmt (*gsi)), code, type, a); ++ return force_gimple_operand_gsi (gsi, ret, true, NULL, true, ++ GSI_SAME_STMT); ++} ++ ++} // anon namespace ++ ++ ++namespace struct_reorg { ++ ++/* Constructor of srfunction. */ ++ ++srfunction::srfunction (cgraph_node *n) ++ : node (n), ++ old (NULL), ++ newnode (NULL), ++ newf (NULL) ++{} ++ ++/* Add an ARG to the list of arguments for the function. */ ++ ++void ++srfunction::add_arg (srdecl *arg) ++{ ++ args.safe_push (arg); ++} ++ ++/* Dump the SRFUNCTION to the file FILE. */ ++ ++void ++srfunction::dump (FILE *file) ++{ ++ if (node) ++ { ++ fprintf (file, "function : "); ++ print_generic_expr (file, node->decl); ++ fprintf (file, " with arguments: "); ++ for (unsigned i = 0; i < args.length (); i++) ++ { ++ if (i == 0) ++ fprintf (file, "\n "); ++ else ++ fprintf (file, "\n, "); ++ args[i]->dump (file); ++ } ++ ++ fprintf (file, "\nuses globals: "); ++ for (unsigned i = 0; i < globals.length (); i++) ++ { ++ fprintf (file, "\n "); ++ globals[i]->dump (file); ++ } ++ ++ fprintf (file, "\ndecls: "); ++ } ++ else ++ fprintf (file, "globals : "); ++ ++ for (unsigned i = 0; i < decls.length (); i++) ++ { ++ fprintf (file, "\n "); ++ decls[i]->dump (file); ++ } ++} ++ ++/* Simple dump the SRFUNCTION to the file FILE; ++ used so it is not recusive. */ ++ ++void ++srfunction::simple_dump (FILE *file) ++{ ++ print_generic_expr (file, node->decl); ++} ++ ++/* Constructor of FIELD. */ ++ ++srfield::srfield (tree field, srtype *base) ++ : offset (int_byte_position (field)), ++ fieldtype (TREE_TYPE (field)), ++ fielddecl (field), ++ base (base), ++ type (NULL), ++ clusternum (0) ++{ ++ for (int i = 0; i < max_split; i++) ++ newfield[i] = NULL_TREE; ++} ++ ++/* Constructor of TYPE. */ ++ ++srtype::srtype (tree type) ++ : type (type), ++ chain_type (false), ++ escapes (does_not_escape), ++ visited (false) ++{ ++ for (int i = 0; i < max_split; i++) ++ newtype[i] = NULL_TREE; ++ ++ for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) ++ { ++ if (TREE_CODE (field) == FIELD_DECL) ++ { ++ if (DECL_BIT_FIELD (field)) ++ { ++ escapes = escape_bitfields; ++ continue; ++ } ++ else if (!DECL_SIZE (field) ++ || TREE_CODE (DECL_SIZE (field)) != INTEGER_CST) ++ { ++ escapes = escape_variable_sized_array; ++ break; ++ } ++ srfield *t = new srfield (field, this); ++ fields.safe_push (t); ++ } ++ } ++} ++ ++/* Mark the type as escaping type E at statement STMT. */ ++ ++void ++srtype::mark_escape (escape_type e, gimple *stmt) ++{ ++ /* Once the type has escaped, it should never ++ change back to non escaping. */ ++ gcc_assert (e != does_not_escape); ++ if (has_escaped ()) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nO type: "); ++ simple_dump (dump_file); ++ fprintf (dump_file, " has already escaped."); ++ fprintf (dump_file, " old = \"%s\" ", ++ escape_type_string[escapes - 1]); ++ fprintf (dump_file, " new = \"%s\"\n", escape_type_string[e - 1]); ++ if (stmt) ++ print_gimple_stmt (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ return; ++ } ++ escapes = e; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nN type: "); ++ simple_dump (dump_file); ++ fprintf (dump_file, " new = \"%s\"\n", escape_reason ()); ++ if (stmt) ++ print_gimple_stmt (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++} ++ ++/* Add FIELD to the list of fields that use this type. */ ++ ++void ++srtype::add_field_site (srfield *field) ++{ ++ field_sites.safe_push (field); ++} ++ ++/* Constructor of DECL. */ ++ ++srdecl::srdecl (srtype *tp, tree decl, int argnum) ++ : type (tp), ++ decl (decl), ++ func (NULL_TREE), ++ argumentnum (argnum), ++ visited (false) ++{ ++ if (TREE_CODE (decl) == SSA_NAME) ++ func = current_function_decl; ++ else if (!is_global_var (decl)) ++ func = DECL_CONTEXT (decl); ++ for (int i = 0; i < max_split; i++) ++ newdecl[i] = NULL_TREE; ++} ++ ++/* Find DECL in the function. */ ++ ++srdecl * ++srfunction::find_decl (tree decl) ++{ ++ for (unsigned i = 0; i < decls.length (); i++) ++ if (decls[i]->decl == decl) ++ return decls[i]; ++ return NULL; ++} ++ ++/* Record DECL of the TYPE with argument num ARG. */ ++ ++srdecl * ++srfunction::record_decl (srtype *type, tree decl, int arg) ++{ ++ // Search for the decl to see if it is already there. ++ srdecl *decl1 = find_decl (decl); ++ ++ if (decl1) ++ return decl1; ++ ++ gcc_assert (type); ++ ++ decl1 = new srdecl (type, decl, arg); ++ decls.safe_push (decl1); ++ return decl1; ++} ++ ++/* Find the field at OFF offset. */ ++ ++srfield * ++srtype::find_field (unsigned HOST_WIDE_INT off) ++{ ++ unsigned int i; ++ srfield *field; ++ ++ /* FIXME: handle array/struct field inside the current struct. */ ++ /* NOTE This does not need to be fixed to handle libquatumn. */ ++ FOR_EACH_VEC_ELT (fields, i, field) ++ { ++ if (off == field->offset) ++ return field; ++ } ++ return NULL; ++} ++ ++/* Add the function FN to the list of functions if it ++ is there not already. */ ++ ++void ++srtype::add_function (srfunction *fn) ++{ ++ unsigned decluid; ++ unsigned i; ++ decluid = DECL_UID (fn->node->decl); ++ ++ srfunction *fn1; ++ // Search for the decl to see if it is already there. ++ FOR_EACH_VEC_ELT (functions, i, fn1) ++ { ++ if (DECL_UID (fn1->node->decl) == decluid) ++ return; ++ } ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Recording new function: %u.\n", decluid); ++ ++ functions.safe_push (fn); ++} ++ ++/* Dump out the type structure to FILE. */ ++ ++void ++srtype::dump (FILE *f) ++{ ++ unsigned int i; ++ srfield *field; ++ srfunction *fn; ++ sraccess *access; ++ ++ if (chain_type) ++ fprintf (f, "chain decl "); ++ ++ fprintf (f, "type : "); ++ print_generic_expr (f, type); ++ fprintf (f, "(%d) { ", TYPE_UID (type)); ++ if (escapes != does_not_escape) ++ fprintf (f, " escapes = \"%s\"\n", escape_reason ()); ++ fprintf (f, " fields = { "); ++ FOR_EACH_VEC_ELT (fields, i, field) ++ { ++ if (i == 0) ++ fprintf (f, "\n "); ++ else ++ fprintf (f, "\n, "); ++ field->dump (f); ++ } ++ fprintf (f, " }\n "); ++ fprintf (f, "\n accesses = {"); ++ FOR_EACH_VEC_ELT (accesses, i, access) ++ { ++ fprintf (f, "\n"); ++ access->dump (f); ++ } ++ fprintf (f, " }\n "); ++ fprintf (f, "\n functions = {"); ++ FOR_EACH_VEC_ELT (functions, i, fn) ++ { ++ fprintf (f, " \n"); ++ fn->simple_dump (f); ++ } ++ fprintf (f, "\n }\n"); ++ fprintf (f, "\n field_sites = {"); ++ FOR_EACH_VEC_ELT (field_sites, i, field) ++ { ++ fprintf (f, " \n"); ++ field->simple_dump (f); ++ } ++ fprintf (f, "\n }\n"); ++ fprintf (f, "}\n"); ++} ++ ++/* A simplified dump out the type structure to FILE. */ ++ ++void ++srtype::simple_dump (FILE *f) ++{ ++ print_generic_expr (f, type); ++} ++ ++/* Analyze the type and decide what to be done with it. */ ++ ++void ++srtype::analyze (void) ++{ ++ /* Chain decl types can't be split ++ so don't try. */ ++ if (chain_type) ++ return; ++ ++ /* If there is only one field then there is nothing ++ to be done. */ ++ if (fields.length () == 1) ++ return; ++ ++ /* For now we unconditionally split only structures with 2 fields ++ into 2 different structures. In future we intend to add profile ++ info and/or static heuristics to differentiate splitting process. */ ++ if (fields.length () == 2) ++ fields[1]->clusternum = 1; ++ ++ /* Otherwise we do nothing. */ ++ if (fields.length () >= 3) ++ return; ++} ++ ++/* Create the new fields for this field. */ ++ ++void ++srfield::create_new_fields (tree newtype[max_split], ++ tree newfields[max_split], ++ tree newlast[max_split]) ++{ ++ tree nt[max_split]; ++ ++ for (unsigned i = 0; i < max_split; i++) ++ nt[i] = NULL; ++ ++ if (type == NULL) ++ nt[0] = fieldtype; ++ else ++ memcpy (nt, type->newtype, sizeof (type->newtype)); ++ ++ for (unsigned i = 0; i < max_split && nt[i] != NULL; i++) ++ { ++ tree field = make_node (FIELD_DECL); ++ if (nt[1] != NULL && DECL_NAME (fielddecl)) ++ { ++ const char *tname = IDENTIFIER_POINTER (DECL_NAME (fielddecl)); ++ char id[10]; ++ char *name; ++ ++ sprintf (id, "%d", i); ++ name = concat (tname, ".reorg.", id, NULL); ++ DECL_NAME (field) = get_identifier (name); ++ free (name); ++ } ++ else ++ DECL_NAME (field) = DECL_NAME (fielddecl); ++ ++ TREE_TYPE (field) = reconstruct_complex_type ( ++ TREE_TYPE (fielddecl), nt[i]); ++ DECL_SOURCE_LOCATION (field) = DECL_SOURCE_LOCATION (fielddecl); ++ SET_DECL_ALIGN (field, DECL_ALIGN (fielddecl)); ++ DECL_USER_ALIGN (field) = DECL_USER_ALIGN (fielddecl); ++ TREE_ADDRESSABLE (field) = TREE_ADDRESSABLE (fielddecl); ++ DECL_NONADDRESSABLE_P (field) = !TREE_ADDRESSABLE (fielddecl); ++ TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (fielddecl); ++ DECL_CONTEXT (field) = newtype[clusternum]; ++ ++ if (newfields[clusternum] == NULL) ++ newfields[clusternum] = newlast[clusternum] = field; ++ else ++ { ++ DECL_CHAIN (newlast[clusternum]) = field; ++ newlast[clusternum] = field; ++ } ++ newfield[i] = field; ++ } ++} ++ ++/* Create the new TYPE corresponding to THIS type. */ ++ ++bool ++srtype::create_new_type (void) ++{ ++ /* If the type has been visited, ++ then return if a new type was ++ created or not. */ ++ if (visited) ++ return has_new_type (); ++ ++ visited = true; ++ ++ if (escapes != does_not_escape) ++ { ++ newtype[0] = type; ++ return false; ++ } ++ ++ bool createnewtype = false; ++ unsigned maxclusters = 0; ++ ++ /* Create a new type for each field. */ ++ for (unsigned i = 0; i < fields.length (); i++) ++ { ++ srfield *field = fields[i]; ++ if (field->type) ++ createnewtype |= field->type->create_new_type (); ++ if (field->clusternum > maxclusters) ++ maxclusters = field->clusternum; ++ } ++ ++ /* If the fields' types did have a change or ++ we are not splitting the struct into two clusters, ++ then just return false and don't change the type. */ ++ if (!createnewtype && maxclusters == 0) ++ { ++ newtype[0] = type; ++ return false; ++ } ++ ++ /* Should have at most max_split clusters. */ ++ gcc_assert (maxclusters < max_split); ++ ++ tree newfields[max_split]; ++ tree newlast[max_split]; ++ ++ maxclusters++; ++ ++ const char *tname = NULL; ++ ++ if (TYPE_NAME (type) != NULL) ++ { ++ if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE) ++ tname = IDENTIFIER_POINTER (TYPE_NAME (type)); ++ else if (DECL_NAME (TYPE_NAME (type)) != NULL) ++ tname = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))); ++ } ++ ++ for (unsigned i = 0; i < maxclusters; i++) ++ { ++ newfields[i] = NULL_TREE; ++ newlast[i] = NULL_TREE; ++ newtype[i] = make_node (RECORD_TYPE); ++ ++ char *name = NULL; ++ char id[10]; ++ sprintf (id, "%d", i); ++ if (tname) ++ { ++ name = concat (tname, ".reorg.", id, NULL); ++ TYPE_NAME (newtype[i]) = get_identifier (name); ++ free (name); ++ } ++ } ++ ++ for (unsigned i = 0; i < fields.length (); i++) ++ { ++ srfield *f = fields[i]; ++ f->create_new_fields (newtype, newfields, newlast); ++ } ++ ++ /* No reason to warn about these structs since the warning would ++ have happened already. */ ++ int save_warn_padded = warn_padded; ++ warn_padded = 0; ++ ++ for (unsigned i = 0; i < maxclusters; i++) ++ { ++ TYPE_FIELDS (newtype[i]) = newfields[i]; ++ layout_type (newtype[i]); ++ } ++ ++ warn_padded = save_warn_padded; ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Created %d types:\n", maxclusters); ++ for (unsigned i = 0; i < maxclusters; i++) ++ { ++ print_generic_expr (dump_file, newtype[i]); ++ fprintf (dump_file, "\n"); ++ } ++ } ++ ++ return true; ++} ++ ++/* Helper function to copy some attributes from ORIG_DECL to the NEW_DECL. */ ++ ++static inline void ++copy_var_attributes (tree new_decl, tree orig_decl) ++{ ++ DECL_ARTIFICIAL (new_decl) = 1; ++ DECL_EXTERNAL (new_decl) = DECL_EXTERNAL (orig_decl); ++ TREE_STATIC (new_decl) = TREE_STATIC (orig_decl); ++ TREE_PUBLIC (new_decl) = TREE_PUBLIC (orig_decl); ++ TREE_USED (new_decl) = TREE_USED (orig_decl); ++ DECL_CONTEXT (new_decl) = DECL_CONTEXT (orig_decl); ++ TREE_THIS_VOLATILE (new_decl) = TREE_THIS_VOLATILE (orig_decl); ++ TREE_ADDRESSABLE (new_decl) = TREE_ADDRESSABLE (orig_decl); ++ TREE_READONLY (new_decl) = TREE_READONLY (orig_decl); ++ if (is_global_var (orig_decl)) ++ set_decl_tls_model (new_decl, DECL_TLS_MODEL (orig_decl)); ++} ++ ++/* Create all of the new decls (SSA_NAMES included) for THIS function. */ ++ ++void ++srfunction::create_new_decls (void) ++{ ++ /* If this function has been cloned, we don't need to ++ create the new decls. */ ++ if (newnode) ++ return; ++ ++ if (node) ++ set_cfun (DECL_STRUCT_FUNCTION (node->decl)); ++ ++ for (unsigned i = 0; i < decls.length (); i++) ++ { ++ srdecl *decl = decls[i]; ++ srtype *type = decl->type; ++ /* If the type of the decl does not change, ++ then don't create a new decl. */ ++ if (!type->has_new_type ()) ++ { ++ decl->newdecl[0] = decl->decl; ++ continue; ++ } ++ ++ /* Handle SSA_NAMEs. */ ++ if (TREE_CODE (decl->decl) == SSA_NAME) ++ { ++ tree newtype1[max_split]; ++ tree inner = SSA_NAME_VAR (decl->decl); ++ tree newinner[max_split]; ++ memset (newinner, 0, sizeof (newinner)); ++ for (unsigned j = 0; j < max_split && type->newtype[j]; j++) ++ newtype1[j] = reconstruct_complex_type (TREE_TYPE (decls[i]->decl), ++ type->newtype[j]); ++ if (inner) ++ { ++ srdecl *in = find_decl (inner); ++ gcc_assert (in); ++ memcpy (newinner, in->newdecl, sizeof (newinner)); ++ } ++ tree od = decls[i]->decl; ++ /* Create the new ssa names and copy some attributes ++ from the old one. */ ++ for (unsigned j = 0; j < max_split && type->newtype[j]; j++) ++ { ++ tree nd = make_ssa_name (newinner[j] ? newinner[j] ++ : newtype1[j]); ++ decl->newdecl[j] = nd; ++ /* If the old decl was a default definition, ++ handle it specially. */ ++ if (SSA_NAME_IS_DEFAULT_DEF (od)) ++ { ++ SSA_NAME_IS_DEFAULT_DEF (nd) = true; ++ SSA_NAME_DEF_STMT (nd) = gimple_build_nop (); ++ ++ /* Set the default definition for the ssaname if needed. */ ++ if (inner) ++ { ++ gcc_assert (newinner[j]); ++ set_ssa_default_def (cfun, newinner[j], nd); ++ } ++ } ++ SSA_NAME_OCCURS_IN_ABNORMAL_PHI (nd) ++ = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (od); ++ statistics_counter_event (cfun, "Create new ssa_name", 1); ++ } ++ } ++ else if (TREE_CODE (decls[i]->decl) == VAR_DECL) ++ { ++ tree orig_var = decl->decl; ++ const char *tname = NULL; ++ if (DECL_NAME (orig_var)) ++ tname = IDENTIFIER_POINTER (DECL_NAME (orig_var)); ++ for (unsigned j = 0; j < max_split && type->newtype[j]; j++) ++ { ++ tree new_name = NULL; ++ char *name = NULL; ++ char id[10]; ++ sprintf (id, "%d", j); ++ if (tname) ++ { ++ name = concat (tname, ".reorg.", id, NULL); ++ new_name = get_identifier (name); ++ free (name); ++ } ++ tree newtype1 = reconstruct_complex_type (TREE_TYPE (orig_var), ++ type->newtype[j]); ++ decl->newdecl[j] = build_decl (DECL_SOURCE_LOCATION (orig_var), ++ VAR_DECL, new_name, newtype1); ++ copy_var_attributes (decl->newdecl[j], orig_var); ++ if (!is_global_var (orig_var)) ++ add_local_decl (cfun, decl->newdecl[j]); ++ else ++ varpool_node::add (decl->newdecl[j]); ++ statistics_counter_event (cfun, "Create new var decl", 1); ++ } ++ } ++ /* Paramater decls are already handled in create_new_functions. */ ++ else if (TREE_CODE (decls[i]->decl) == PARM_DECL) ++ ; ++ else ++ internal_error ("Unhandled declaration type stored"); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Created New decls for decl:\n"); ++ fprintf (dump_file, "\n"); ++ decls[i]->dump (dump_file); ++ fprintf (dump_file, "\n"); ++ for (unsigned j = 0; j < max_split && decls[i]->newdecl[j]; j++) ++ { ++ print_generic_expr (dump_file, decls[i]->newdecl[j]); ++ fprintf (dump_file, "\n"); ++ } ++ fprintf (dump_file, "\n"); ++ } ++ } ++ ++ set_cfun (NULL); ++} ++ ++/* Dump out the field structure to FILE. */ ++ ++void ++srfield::dump (FILE *f) ++{ ++ fprintf (f, "field (%d) { ", DECL_UID (fielddecl)); ++ fprintf (f, "base = "); ++ base->simple_dump (f); ++ fprintf (f, ", offset = " HOST_WIDE_INT_PRINT_DEC, offset); ++ fprintf (f, ", type = "); ++ print_generic_expr (f, fieldtype); ++ if (type) ++ { ++ fprintf (f, "( srtype = "); ++ type->simple_dump (f); ++ fprintf (f, ")"); ++ } ++ fprintf (f, "\n}\n"); ++} ++ ++/* A simplified dump out the field structure to FILE. */ ++ ++void ++srfield::simple_dump (FILE *f) ++{ ++ fprintf (f, "field (%d)", DECL_UID (fielddecl)); ++} ++ ++/* Dump out the access structure to FILE. */ ++ ++void ++sraccess::dump (FILE *f) ++{ ++ fprintf (f, "access { "); ++ fprintf (f, "type = '("); ++ type->simple_dump (f); ++ fprintf (f, ")'"); ++ if (field) ++ { ++ fprintf (f, ", field = '("); ++ field->simple_dump (f); ++ fprintf (f, ")'"); ++ } ++ else ++ fprintf (f, ", whole type"); ++ fprintf (f, " in function: %s/%d", node->name (), node->order); ++ fprintf (f, ", stmt:\n"); ++ print_gimple_stmt (f, stmt, 0); ++ fprintf (f, "\n }\n"); ++} ++ ++/* Dump out the decl structure to FILE. */ ++ ++void ++srdecl::dump (FILE *file) ++{ ++ if (!func) ++ fprintf (file, "global "); ++ if (argumentnum != -1) ++ fprintf (file, "argument(%d) ", argumentnum); ++ fprintf (file, "decl: "); ++ print_generic_expr (file, decl); ++ fprintf (file, " type: "); ++ type->simple_dump (file); ++} ++ ++} // namespace struct_reorg ++ ++ ++namespace { ++ ++struct ipa_struct_reorg ++{ ++public: ++ // Constructors ++ ipa_struct_reorg (void) ++ : current_function (NULL), ++ done_recording (false) ++ {} ++ ++ // Public methods ++ unsigned execute (void); ++ void mark_type_as_escape (tree type, escape_type, gimple *stmt = NULL); ++private: ++ // Fields ++ auto_vec_del types; ++ auto_vec_del functions; ++ srglobal globals; ++ srfunction *current_function; ++ ++ bool done_recording; ++ ++ // Private methods ++ void dump_types (FILE *f); ++ void dump_types_escaped (FILE *f); ++ void dump_functions (FILE *f); ++ void record_accesses (void); ++ void detect_cycles (void); ++ bool walk_field_for_cycles (srtype *); ++ void prune_escaped_types (void); ++ void propagate_escape (void); ++ void analyze_types (void); ++ void clear_visited (void); ++ bool create_new_types (void); ++ void restore_field_type (void); ++ void create_new_decls (void); ++ srdecl *find_decl (tree); ++ void create_new_functions (void); ++ void create_new_args (cgraph_node *new_node); ++ unsigned rewrite_functions (void); ++ srdecl *record_var (tree decl, ++ escape_type escapes = does_not_escape, ++ int arg = -1); ++ srfunction *record_function (cgraph_node *node); ++ srfunction *find_function (cgraph_node *node); ++ srtype *record_type (tree type); ++ void process_union (tree type); ++ srtype *find_type (tree type); ++ void maybe_record_stmt (cgraph_node *, gimple *); ++ void maybe_record_assign (cgraph_node *, gassign *); ++ void maybe_record_call (cgraph_node *, gcall *); ++ void maybe_record_allocation_site (cgraph_node *, gimple *); ++ void record_stmt_expr (tree expr, cgraph_node *node, gimple *stmt); ++ void mark_expr_escape (tree, escape_type, gimple *stmt); ++ tree allocate_size (srtype *t, gimple *stmt); ++ ++ void mark_decls_in_as_not_needed (tree fn); ++ ++ bool rewrite_stmt (gimple *, gimple_stmt_iterator *); ++ bool rewrite_assign (gassign *, gimple_stmt_iterator *); ++ bool rewrite_call (gcall *, gimple_stmt_iterator *); ++ bool rewrite_cond (gcond *, gimple_stmt_iterator *); ++ bool rewrite_debug (gimple *, gimple_stmt_iterator *); ++ bool rewrite_phi (gphi *); ++ bool rewrite_expr (tree expr, ++ tree newexpr[max_split], ++ bool ignore_missing_decl = false); ++ bool rewrite_lhs_rhs (tree lhs, tree rhs, tree newlhs[max_split], ++ tree newrhs[max_split]); ++ bool get_type_field (tree expr, tree &base, bool &indirect, ++ srtype *&type, srfield *&field, ++ bool &realpart, bool &imagpart, ++ bool &address, bool should_create = false, ++ bool can_escape = false); ++ bool wholeaccess (tree expr, tree base, tree accesstype, srtype *t); ++ ++ void check_definition (srdecl *decl, vec &); ++ void check_uses (srdecl *decl, vec &); ++ void check_use (srdecl *decl, gimple *stmt, vec &); ++ void check_type_and_push (tree newdecl, srtype *type, ++ vec &worklist, gimple *stmt); ++ void check_other_side (srdecl *decl, tree other, gimple *stmt, ++ vec &worklist); ++ ++ void find_vars (gimple *stmt); ++ void find_var (tree expr, gimple *stmt); ++ void mark_types_asm (gasm *astmt); ++ ++ bool has_rewritten_type (srfunction *); ++ void maybe_mark_or_record_other_side (tree side, tree other, gimple *stmt); ++}; ++ ++/* Dump all of the recorded types to file F. */ ++ ++void ++ipa_struct_reorg::dump_types (FILE *f) ++{ ++ unsigned i; ++ srtype *type; ++ FOR_EACH_VEC_ELT (types, i, type) ++ { ++ type->dump (f); ++ } ++ fprintf (f, "\n"); ++} ++ ++/* Dump all of the recorded types to file F. */ ++ ++void ++ipa_struct_reorg::dump_types_escaped (FILE *f) ++{ ++ unsigned i; ++ srtype *type; ++ FOR_EACH_VEC_ELT (types, i, type) ++ { ++ if (type->has_escaped ()) ++ { ++ type->simple_dump (f); ++ fprintf (f, " has escaped: \"%s\"\n", type->escape_reason ()); ++ } ++ } ++ fprintf (f, "\n"); ++} ++ ++/* Dump all of the record functions to file F. */ ++ ++void ++ipa_struct_reorg::dump_functions (FILE *f) ++{ ++ unsigned i; ++ srfunction *fn; ++ ++ fprintf (f, "\n\n"); ++ globals.dump (f); ++ fprintf (f, "\n\n"); ++ FOR_EACH_VEC_ELT (functions, i, fn) ++ { ++ fn->dump (f); ++ fprintf (f, "\n"); ++ } ++ fprintf (f, "\n\n"); ++} ++ ++/* Find the recorded srtype corresponding to TYPE. */ ++ ++srtype * ++ipa_struct_reorg::find_type (tree type) ++{ ++ unsigned i; ++ /* Get the main variant as we are going ++ to find that type only. */ ++ type = TYPE_MAIN_VARIANT (type); ++ ++ srtype *type1; ++ // Search for the type to see if it is already there. ++ FOR_EACH_VEC_ELT (types, i, type1) ++ { ++ if (types_compatible_p (type1->type, type)) ++ return type1; ++ } ++ return NULL; ++} ++ ++/* Is TYPE a volatile type or one which points ++ to a volatile type. */ ++ ++static bool ++isvolatile_type (tree type) ++{ ++ if (TYPE_VOLATILE (type)) ++ return true; ++ while (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE) ++ { ++ type = TREE_TYPE (type); ++ if (TYPE_VOLATILE (type)) ++ return true; ++ } ++ return false; ++} ++ ++/* Is TYPE an array type or points to an array type. */ ++ ++static bool ++isarraytype (tree type) ++{ ++ if (TREE_CODE (type) == ARRAY_TYPE) ++ return true; ++ while (POINTER_TYPE_P (type)) ++ { ++ type = TREE_TYPE (type); ++ if (TREE_CODE (type) == ARRAY_TYPE) ++ return true; ++ } ++ return false; ++} ++ ++/* Is TYPE a pointer to another pointer. */ ++ ++static bool ++isptrptr (tree type) ++{ ++ bool firstptr = false; ++ while (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE) ++ { ++ if (POINTER_TYPE_P (type)) ++ { ++ if (firstptr) ++ return true; ++ firstptr = true; ++ } ++ type = TREE_TYPE (type); ++ } ++ return false; ++} ++ ++/* Return the escape type which corresponds to if ++ this is an volatile type, an array type or a pointer ++ to a pointer type. */ ++ ++static escape_type ++escape_type_volatile_array_or_ptrptr (tree type) ++{ ++ if (isvolatile_type (type)) ++ return escape_volatile; ++ if (isarraytype (type)) ++ return escape_array; ++ if (isptrptr (type)) ++ return escape_ptr_ptr; ++ return does_not_escape; ++} ++ ++/* Record TYPE if not already recorded. */ ++ ++srtype * ++ipa_struct_reorg::record_type (tree type) ++{ ++ unsigned typeuid; ++ ++ /* Get the main variant as we are going ++ to record that type only. */ ++ type = TYPE_MAIN_VARIANT (type); ++ typeuid = TYPE_UID (type); ++ ++ srtype *type1; ++ ++ type1 = find_type (type); ++ if (type1) ++ return type1; ++ ++ /* If already done recording just return NULL. */ ++ if (done_recording) ++ return NULL; ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Recording new type: %u.\n", typeuid); ++ ++ type1 = new srtype (type); ++ types.safe_push (type1); ++ ++ /* If the type has an user alignment set, ++ that means the user most likely already setup the type. */ ++ if (TYPE_USER_ALIGN (type)) ++ type1->mark_escape (escape_user_alignment, NULL); ++ ++ for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) ++ { ++ if (TREE_CODE (field) == FIELD_DECL) ++ { ++ tree t = TREE_TYPE (field); ++ process_union (t); ++ if (TREE_CODE (inner_type (t)) == UNION_TYPE ++ || TREE_CODE (inner_type (t)) == QUAL_UNION_TYPE) ++ type1->mark_escape (escape_union, NULL); ++ if (isvolatile_type (t)) ++ type1->mark_escape (escape_volatile, NULL); ++ escape_type e = escape_type_volatile_array_or_ptrptr (t); ++ if (e != does_not_escape) ++ type1->mark_escape (e, NULL); ++ if (handled_type (t)) ++ { ++ srtype *t1 = record_type (inner_type (t)); ++ srfield *f = type1->find_field (int_byte_position (field)); ++ /* We might have an variable sized type which ++ we don't set the handle. */ ++ if (f) ++ { ++ f->type = t1; ++ t1->add_field_site (f); ++ } ++ if (t1 == type1) ++ type1->mark_escape (escape_rescusive_type, NULL); ++ } ++ } ++ } ++ ++ return type1; ++} ++ ++/* Mark TYPE as escaping with ESCAPES as the reason. */ ++ ++void ++ipa_struct_reorg::mark_type_as_escape (tree type, ++ escape_type escapes, ++ gimple *stmt) ++{ ++ if (handled_type (type)) ++ { ++ srtype *stype = record_type (inner_type (type)); ++ ++ if (!stype) ++ return; ++ ++ stype->mark_escape (escapes, stmt); ++ } ++} ++ ++/* Maybe process the union of type TYPE, such that marking all of the fields' ++ types as being escaping. */ ++ ++void ++ipa_struct_reorg::process_union (tree type) ++{ ++ static hash_set unions_recorded; ++ ++ type = inner_type (type); ++ if (TREE_CODE (type) != UNION_TYPE ++ && TREE_CODE (type) != QUAL_UNION_TYPE) ++ return; ++ ++ type = TYPE_MAIN_VARIANT (type); ++ ++ /* We already processed this type. */ ++ if (unions_recorded.add (type)) ++ return; ++ ++ for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) ++ { ++ if (TREE_CODE (field) == FIELD_DECL) ++ { ++ mark_type_as_escape (TREE_TYPE (field), escape_union); ++ process_union (TREE_TYPE (field)); ++ } ++ } ++} ++ ++/* Used by record_var function as a callback to walk_tree. ++ Mark the type as escaping if it has expressions which ++ cannot be converted for global initializations. */ ++ ++static tree ++record_init_types (tree *tp, int *walk_subtrees, void *data) ++{ ++ ipa_struct_reorg *c = (ipa_struct_reorg *)data; ++ switch (TREE_CODE (*tp)) ++ { ++ CASE_CONVERT: ++ case COMPONENT_REF: ++ case VIEW_CONVERT_EXPR: ++ case ARRAY_REF: ++ { ++ tree typeouter = TREE_TYPE (*tp); ++ tree typeinner = TREE_TYPE (TREE_OPERAND (*tp, 0)); ++ c->mark_type_as_escape (typeouter, escape_via_global_init); ++ c->mark_type_as_escape (typeinner, escape_via_global_init); ++ break; ++ } ++ case INTEGER_CST: ++ if (!integer_zerop (*tp)) ++ c->mark_type_as_escape (TREE_TYPE (*tp), escape_via_global_init); ++ break; ++ case VAR_DECL: ++ case PARM_DECL: ++ case FIELD_DECL: ++ c->mark_type_as_escape (TREE_TYPE (*tp), escape_via_global_init); ++ *walk_subtrees = false; ++ break; ++ default: ++ *walk_subtrees = true; ++ break; ++ } ++ return NULL_TREE; ++} ++ ++/* Record var DECL; optionally specify the escape reason and the argument ++ number in a function. */ ++ ++srdecl * ++ipa_struct_reorg::record_var (tree decl, escape_type escapes, int arg) ++{ ++ srtype *type; ++ srdecl *sd = NULL; ++ ++ process_union (TREE_TYPE (decl)); ++ ++ if (handled_type (TREE_TYPE (decl))) ++ { ++ type = record_type (inner_type (TREE_TYPE (decl))); ++ escape_type e; ++ ++ if (done_recording && !type) ++ return NULL; ++ ++ gcc_assert (type); ++ if (TREE_CODE (decl) == VAR_DECL && is_global_var (decl)) ++ sd = globals.record_decl (type, decl, arg); ++ else ++ { ++ gcc_assert (current_function); ++ sd = current_function->record_decl (type, decl, arg); ++ } ++ ++ /* If the variable has the "used" attribute, ++ then treat the type as escaping. */ ++ if (escapes != does_not_escape) ++ e = escapes; ++ else if (TREE_CODE (decl) != SSA_NAME && DECL_PRESERVE_P (decl)) ++ e = escape_marked_as_used; ++ else if (TREE_THIS_VOLATILE (decl)) ++ e = escape_volatile; ++ else if (TREE_CODE (decl) != SSA_NAME && DECL_USER_ALIGN (decl)) ++ e = escape_user_alignment; ++ else if (TREE_CODE (decl) != SSA_NAME && TREE_STATIC (decl) ++ && TREE_PUBLIC (decl)) ++ e = escape_via_global_var; ++ /* We don't have an initlizer. */ ++ else if (TREE_CODE (decl) != SSA_NAME ++ && DECL_INITIAL (decl) == error_mark_node) ++ e = escape_via_global_var; ++ else ++ e = escape_type_volatile_array_or_ptrptr (TREE_TYPE (decl)); ++ ++ if (e != does_not_escape) ++ type->mark_escape (e, NULL); ++ } ++ ++ /* Record the initial usage of variables as types escapes. */ ++ if (TREE_CODE (decl) != SSA_NAME && TREE_STATIC (decl) ++ && DECL_INITIAL (decl)) ++ { ++ walk_tree_without_duplicates (&DECL_INITIAL (decl), ++ record_init_types, this); ++ if (!integer_zerop (DECL_INITIAL (decl)) ++ && DECL_INITIAL (decl) != error_mark_node) ++ mark_type_as_escape (TREE_TYPE (decl), escape_via_global_init); ++ } ++ return sd; ++} ++ ++/* Find void* ssa_names which are used inside MEM[] or if we have &a.c, ++ mark the type as escaping. */ ++ ++void ++ipa_struct_reorg::find_var (tree expr, gimple *stmt) ++{ ++ /* If we have VCE mark the outer type as escaping and the inner one ++ Also mark the inner most operand. */ ++ if (TREE_CODE (expr) == VIEW_CONVERT_EXPR) ++ { ++ mark_type_as_escape (TREE_TYPE (expr), escape_vce, stmt); ++ mark_type_as_escape (TREE_TYPE (TREE_OPERAND (expr, 0)), ++ escape_vce, stmt); ++ } ++ ++ /* If we have &b.c then we need to mark the type of b ++ as escaping as tracking a will be hard. */ ++ if (TREE_CODE (expr) == ADDR_EXPR ++ || TREE_CODE (expr) == VIEW_CONVERT_EXPR) ++ { ++ tree r = TREE_OPERAND (expr, 0); ++ if (handled_component_p (r) ++ || TREE_CODE (r) == MEM_REF) ++ { ++ while (handled_component_p (r) ++ || TREE_CODE (r) == MEM_REF) ++ { ++ if (TREE_CODE (r) == VIEW_CONVERT_EXPR) ++ { ++ mark_type_as_escape (TREE_TYPE (r), escape_vce, stmt); ++ mark_type_as_escape (TREE_TYPE (TREE_OPERAND (r, 0)), ++ escape_vce, stmt); ++ } ++ if (TREE_CODE (r) == MEM_REF) ++ mark_type_as_escape (TREE_TYPE (TREE_OPERAND (r, 1)), ++ escape_addr, stmt); ++ r = TREE_OPERAND (r, 0); ++ } ++ mark_expr_escape (r, escape_addr, stmt); ++ } ++ } ++ ++ tree base; ++ bool indirect; ++ srtype *type; ++ srfield *field; ++ bool realpart, imagpart, address; ++ get_type_field (expr, base, indirect, type, field, ++ realpart, imagpart, address, true, true); ++} ++ ++void ++ipa_struct_reorg::find_vars (gimple *stmt) ++{ ++ gasm *astmt; ++ switch (gimple_code (stmt)) ++ { ++ case GIMPLE_ASSIGN: ++ if (gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS ++ || gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR) ++ { ++ tree lhs = gimple_assign_lhs (stmt); ++ tree rhs = gimple_assign_rhs1 (stmt); ++ find_var (gimple_assign_lhs (stmt), stmt); ++ find_var (gimple_assign_rhs1 (stmt), stmt); ++ if (TREE_CODE (lhs) == SSA_NAME ++ && VOID_POINTER_P (TREE_TYPE (lhs)) ++ && handled_type (TREE_TYPE (rhs))) ++ { ++ srtype *t = find_type (inner_type (TREE_TYPE (rhs))); ++ srdecl *d = find_decl (lhs); ++ if (!d && t) ++ current_function->record_decl (t, lhs, -1); ++ } ++ if (TREE_CODE (rhs) == SSA_NAME ++ && VOID_POINTER_P (TREE_TYPE (rhs)) ++ && handled_type (TREE_TYPE (lhs))) ++ { ++ srtype *t = find_type (inner_type (TREE_TYPE (lhs))); ++ srdecl *d = find_decl (rhs); ++ if (!d && t) ++ current_function->record_decl (t, rhs, -1); ++ } ++ } ++ break; ++ ++ case GIMPLE_CALL: ++ if (gimple_call_lhs (stmt)) ++ find_var (gimple_call_lhs (stmt), stmt); ++ ++ if (gimple_call_chain (stmt)) ++ find_var (gimple_call_chain (stmt), stmt); ++ ++ for (unsigned i = 0; i < gimple_call_num_args (stmt); i++) ++ find_var (gimple_call_arg (stmt, i), stmt); ++ break; ++ ++ case GIMPLE_ASM: ++ astmt = as_a (stmt); ++ for (unsigned i = 0; i < gimple_asm_ninputs (astmt); i++) ++ find_var (TREE_VALUE (gimple_asm_input_op (astmt, i)), stmt); ++ for (unsigned i = 0; i < gimple_asm_noutputs (astmt); i++) ++ find_var (TREE_VALUE (gimple_asm_output_op (astmt, i)), stmt); ++ mark_types_asm (astmt); ++ break; ++ ++ case GIMPLE_RETURN: ++ { ++ tree expr = gimple_return_retval (as_a (stmt)); ++ if (expr) ++ find_var (expr, stmt); ++ /* return &a; should mark the type of a as escaping ++ through a return. */ ++ if (expr && TREE_CODE (expr) == ADDR_EXPR) ++ { ++ expr = TREE_OPERAND (expr, 0); ++ srdecl *d = find_decl (expr); ++ if (d) ++ d->type->mark_escape (escape_return, stmt); ++ } ++ } ++ break; ++ ++ default: ++ break; ++ } ++} ++ ++/* Maybe record access of statement for further analaysis. */ ++ ++void ++ipa_struct_reorg::maybe_record_stmt (cgraph_node *node, gimple *stmt) ++{ ++ switch (gimple_code (stmt)) ++ { ++ case GIMPLE_ASSIGN: ++ maybe_record_assign (node, as_a (stmt)); ++ break; ++ case GIMPLE_CALL: ++ maybe_record_call (node, as_a (stmt)); ++ break; ++ case GIMPLE_DEBUG: ++ break; ++ case GIMPLE_GOTO: ++ case GIMPLE_SWITCH: ++ break; ++ default: ++ break; ++ } ++} ++ ++/* This function checks whether ARG is a result of multiplication ++ of some number by STRUCT_SIZE. If yes, the function returns true ++ and this number is filled into NUM. */ ++ ++static bool ++is_result_of_mult (tree arg, tree *num, tree struct_size) ++{ ++ if (!struct_size ++ || TREE_CODE (struct_size) != INTEGER_CST ++ || integer_zerop (struct_size)) ++ return false; ++ ++ /* If we have a integer, just check if it is a multiply of STRUCT_SIZE. */ ++ if (TREE_CODE (arg) == INTEGER_CST) ++ { ++ if (integer_zerop (size_binop (FLOOR_MOD_EXPR, arg, struct_size))) ++ { ++ *num = size_binop (FLOOR_DIV_EXPR, arg, struct_size); ++ return true; ++ } ++ return false; ++ } ++ gimple *size_def_stmt = SSA_NAME_DEF_STMT (arg); ++ ++ /* If the allocation statement was of the form ++ D.2229_10 = (D.2228_9); ++ then size_def_stmt can be D.2228_9 = num.3_8 * 8; */ ++ ++ while (size_def_stmt && is_gimple_assign (size_def_stmt)) ++ { ++ tree lhs = gimple_assign_lhs (size_def_stmt); ++ ++ /* We expect temporary here. */ ++ if (!is_gimple_reg (lhs)) ++ return false; ++ ++ // FIXME: this should handle SHIFT also. ++ if (gimple_assign_rhs_code (size_def_stmt) == PLUS_EXPR) ++ { ++ tree num1, num2; ++ tree arg0 = gimple_assign_rhs1 (size_def_stmt); ++ tree arg1 = gimple_assign_rhs2 (size_def_stmt); ++ if (!is_result_of_mult (arg0, &num1, struct_size)) ++ return false; ++ if (!is_result_of_mult (arg1, &num2, struct_size)) ++ return false; ++ *num = size_binop (PLUS_EXPR, num1, num2); ++ return true; ++ } ++ else if (gimple_assign_rhs_code (size_def_stmt) == MULT_EXPR) ++ { ++ tree arg0 = gimple_assign_rhs1 (size_def_stmt); ++ tree arg1 = gimple_assign_rhs2 (size_def_stmt); ++ tree num1; ++ ++ if (is_result_of_mult (arg0, &num1, struct_size)) ++ { ++ *num = size_binop (MULT_EXPR, arg1, num1); ++ return true; ++ } ++ if (is_result_of_mult (arg1, &num1, struct_size)) ++ { ++ *num = size_binop (MULT_EXPR, arg0, num1); ++ return true; ++ } ++ ++ *num = NULL_TREE; ++ return false; ++ } ++ else if (gimple_assign_rhs_code (size_def_stmt) == SSA_NAME) ++ { ++ arg = gimple_assign_rhs1 (size_def_stmt); ++ size_def_stmt = SSA_NAME_DEF_STMT (arg); ++ } ++ else ++ { ++ *num = NULL_TREE; ++ return false; ++ } ++ } ++ ++ *num = NULL_TREE; ++ return false; ++} ++ ++/* Return TRUE if STMT is an allocation statement that is handled. */ ++ ++static bool ++handled_allocation_stmt (gimple *stmt) ++{ ++ if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALIGNED_ALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) ++ return true; ++ return false; ++} ++ ++/* Returns the allocated size / T size for STMT. That is the number of ++ elements in the array allocated. */ ++ ++tree ++ipa_struct_reorg::allocate_size (srtype *type, gimple *stmt) ++{ ++ if (!stmt ++ || gimple_code (stmt) != GIMPLE_CALL ++ || !handled_allocation_stmt (stmt)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nNot a allocate statment:\n"); ++ print_gimple_stmt (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ return NULL; ++ } ++ ++ if (type->has_escaped ()) ++ return NULL; ++ ++ tree struct_size = TYPE_SIZE_UNIT (type->type); ++ ++ tree size = gimple_call_arg (stmt, 0); ++ ++ if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALIGNED_ALLOC)) ++ size = gimple_call_arg (stmt, 1); ++ else if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) ++ { ++ tree arg1; ++ arg1 = gimple_call_arg (stmt, 1); ++ /* Check that second argument is a constant equal to ++ the size of structure. */ ++ if (operand_equal_p (arg1, struct_size, 0)) ++ return size; ++ /* Check that first argument is a constant equal to ++ the size of structure. */ ++ if (operand_equal_p (size, struct_size, 0)) ++ return arg1; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\ncalloc the correct size:\n"); ++ print_gimple_stmt (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ return NULL; ++ } ++ ++ tree num; ++ if (!is_result_of_mult (size, &num, struct_size)) ++ return NULL; ++ ++ return num; ++} ++ ++void ++ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other, ++ gimple *stmt) ++{ ++ gcc_assert (TREE_CODE (side) == SSA_NAME || TREE_CODE (side) == ADDR_EXPR); ++ srtype *type = NULL; ++ if (handled_type (TREE_TYPE (other))) ++ type = record_type (inner_type (TREE_TYPE (other))); ++ if (TREE_CODE (side) == ADDR_EXPR) ++ side = TREE_OPERAND (side, 0); ++ srdecl *d = find_decl (side); ++ if (!type) ++ { ++ if (!d) ++ return; ++ if (TREE_CODE (side) == SSA_NAME ++ && VOID_POINTER_P (TREE_TYPE (side))) ++ return; ++ d->type->mark_escape (escape_cast_another_ptr, stmt); ++ return; ++ } ++ ++ if (!d) ++ { ++ if (VOID_POINTER_P (TREE_TYPE (side)) ++ && TREE_CODE (side) == SSA_NAME) ++ current_function->record_decl (type, side, -1); ++ else ++ type->mark_escape (escape_cast_another_ptr, stmt); ++ } ++ else if (type != d->type) ++ { ++ type->mark_escape (escape_cast_another_ptr, stmt); ++ d->type->mark_escape (escape_cast_another_ptr, stmt); ++ } ++} ++ ++/* Record accesses in an assignment statement STMT. */ ++ ++void ++ipa_struct_reorg::maybe_record_assign (cgraph_node *node, gassign *stmt) ++{ ++ if (gimple_clobber_p (stmt)) ++ { ++ record_stmt_expr (gimple_assign_lhs (stmt), node, stmt); ++ return; ++ } ++ ++ if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR) ++ { ++ tree lhs = gimple_assign_lhs (stmt); ++ tree rhs1 = gimple_assign_rhs1 (stmt); ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ tree num; ++ if (!handled_type (TREE_TYPE (lhs))) ++ return; ++ /* Check if rhs2 is a multiplication of the size of the type. */ ++ if (is_result_of_mult (rhs2, &num, ++ TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (lhs))))) ++ { ++ record_stmt_expr (lhs, node, stmt); ++ record_stmt_expr (rhs1, node, stmt); ++ } ++ else ++ { ++ mark_expr_escape (lhs, escape_non_multiply_size, stmt); ++ mark_expr_escape (rhs1, escape_non_multiply_size, stmt); ++ } ++ return; ++ } ++ /* Copies, References, Taking addresses. */ ++ if (gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS) ++ { ++ tree lhs = gimple_assign_lhs (stmt); ++ tree rhs = gimple_assign_rhs1 (stmt); ++ /* If we have a = &b.c then we need to mark the type of b ++ as escaping as tracking a will be hard. */ ++ if (TREE_CODE (rhs) == ADDR_EXPR) ++ { ++ tree r = TREE_OPERAND (rhs, 0); ++ if (handled_component_p (r)) ++ { ++ while (handled_component_p (r)) ++ r = TREE_OPERAND (r, 0); ++ mark_expr_escape (r, escape_addr, stmt); ++ return; ++ } ++ } ++ if ((TREE_CODE (rhs) == SSA_NAME || TREE_CODE (rhs) == ADDR_EXPR)) ++ maybe_mark_or_record_other_side (rhs, lhs, stmt); ++ if (TREE_CODE (lhs) == SSA_NAME) ++ maybe_mark_or_record_other_side (lhs, rhs, stmt); ++ } ++} ++ ++static tree ++get_ref_base_and_offset (tree &e, HOST_WIDE_INT &offset, ++ bool &realpart, bool &imagpart, ++ tree &accesstype) ++{ ++ offset = 0; ++ realpart = false; ++ imagpart = false; ++ accesstype = NULL_TREE; ++ if (TREE_CODE (e) == REALPART_EXPR) ++ { ++ e = TREE_OPERAND (e, 0); ++ realpart = true; ++ } ++ if (TREE_CODE (e) == IMAGPART_EXPR) ++ { ++ e = TREE_OPERAND (e, 0); ++ imagpart = true; ++ } ++ tree expr = e; ++ while (true) ++ { ++ switch (TREE_CODE (expr)) ++ { ++ case COMPONENT_REF: ++ { ++ tree field = TREE_OPERAND (expr, 1); ++ tree field_off = byte_position (field); ++ if (TREE_CODE (field_off) != INTEGER_CST) ++ return NULL; ++ offset += tree_to_shwi (field_off); ++ expr = TREE_OPERAND (expr, 0); ++ accesstype = NULL; ++ break; ++ } ++ case MEM_REF: ++ { ++ tree field_off = TREE_OPERAND (expr, 1); ++ gcc_assert (TREE_CODE (field_off) == INTEGER_CST); ++ /* So we can mark the types as escaping if different. */ ++ accesstype = TREE_TYPE (field_off); ++ offset += tree_to_uhwi (field_off); ++ return TREE_OPERAND (expr, 0); ++ } ++ default: ++ return expr; ++ } ++ } ++} ++ ++/* Return true if EXPR was accessing the whole type T. */ ++ ++bool ++ipa_struct_reorg::wholeaccess (tree expr, tree base, ++ tree accesstype, srtype *t) ++{ ++ if (expr == base) ++ return true; ++ ++ if (TREE_CODE (expr) == ADDR_EXPR && TREE_OPERAND (expr, 0) == base) ++ return true; ++ ++ if (!accesstype) ++ return false; ++ ++ if (!types_compatible_p (TREE_TYPE (expr), TREE_TYPE (accesstype))) ++ return false; ++ ++ if (!handled_type (TREE_TYPE (expr))) ++ return false; ++ ++ srtype *other_type = find_type (inner_type (TREE_TYPE (expr))); ++ ++ if (t == other_type) ++ return true; ++ ++ return false; ++} ++ ++bool ++ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect, ++ srtype *&type, srfield *&field, ++ bool &realpart, bool &imagpart, ++ bool &address, bool should_create, ++ bool can_escape) ++{ ++ HOST_WIDE_INT offset; ++ tree accesstype; ++ address = false; ++ bool mark_as_bit_field = false; ++ ++ if (TREE_CODE (expr) == BIT_FIELD_REF) ++ { ++ expr = TREE_OPERAND (expr, 0); ++ mark_as_bit_field = true; ++ } ++ ++ base = get_ref_base_and_offset (expr, offset, realpart, imagpart, ++ accesstype); ++ ++ /* Variable access, unkown type. */ ++ if (base == NULL) ++ return false; ++ ++ if (TREE_CODE (base) == ADDR_EXPR) ++ { ++ address = true; ++ base = TREE_OPERAND (base, 0); ++ } ++ ++ if (offset != 0 && accesstype) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Non zero offset (%d) with MEM.\n", (int)offset); ++ print_generic_expr (dump_file, expr); ++ fprintf (dump_file, "\n"); ++ print_generic_expr (dump_file, base); ++ fprintf (dump_file, "\n"); ++ } ++ } ++ ++ srdecl *d = find_decl (base); ++ srtype *t; ++ ++ if (integer_zerop (base)) ++ { ++ gcc_assert (!d); ++ if (!accesstype) ++ return false; ++ t = find_type (inner_type (inner_type (accesstype))); ++ if (!t && should_create && handled_type (accesstype)) ++ t = record_type (inner_type (accesstype)); ++ if (!t) ++ return false; ++ } ++ else if (!d && accesstype) ++ { ++ if (!should_create) ++ return false; ++ if (!handled_type (accesstype)) ++ return false; ++ t = find_type (inner_type (inner_type (accesstype))); ++ if (!t) ++ t = record_type (inner_type (accesstype)); ++ if (!t || t->has_escaped ()) ++ return false; ++ /* If base is not void* mark the type as escaping. */ ++ if (!VOID_POINTER_P (TREE_TYPE (base))) ++ { ++ gcc_assert (can_escape); ++ t->mark_escape (escape_cast_another_ptr, NULL); ++ return false; ++ } ++ if (TREE_CODE (base) == SSA_NAME) ++ current_function->record_decl (t, base, -1); ++ } ++ else if (!d) ++ return false; ++ else ++ t = d->type; ++ ++ if (t->has_escaped ()) ++ return false; ++ ++ if (mark_as_bit_field) ++ { ++ gcc_assert (can_escape); ++ t->mark_escape (escape_bitfields, NULL); ++ return false; ++ } ++ ++ if (wholeaccess (expr, base, accesstype, t)) ++ { ++ field = NULL; ++ type = t; ++ indirect = accesstype != NULL; ++ return true; ++ } ++ ++ srfield *f = t->find_field (offset); ++ if (!f) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nunkown field\n"); ++ print_generic_expr (dump_file, expr); ++ fprintf (dump_file, "\n"); ++ print_generic_expr (dump_file, base); ++ fprintf (dump_file, "\n"); ++ } ++ gcc_assert (can_escape); ++ t->mark_escape (escape_unkown_field, NULL); ++ return false; ++ } ++ if (!types_compatible_p (f->fieldtype, TREE_TYPE (expr))) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nfieldtype = "); ++ print_generic_expr (dump_file, f->fieldtype); ++ fprintf (dump_file, "\naccess type = "); ++ print_generic_expr (dump_file, TREE_TYPE (expr)); ++ fprintf (dump_file, "original expr = "); ++ print_generic_expr (dump_file, expr); ++ fprintf (dump_file, "\n"); ++ } ++ gcc_assert (can_escape); ++ t->mark_escape (escape_unkown_field, NULL); ++ return false; ++ } ++ field = f; ++ type = t; ++ indirect = accesstype != NULL; ++ return true; ++} ++ ++/* Mark the type used in EXPR as escaping. */ ++ ++void ++ipa_struct_reorg::mark_expr_escape (tree expr, escape_type escapes, ++ gimple *stmt) ++{ ++ tree base; ++ bool indirect; ++ srtype *type; ++ srfield *field; ++ bool realpart, imagpart, address; ++ if (!get_type_field (expr, base, indirect, type, field, ++ realpart, imagpart, address)) ++ return; ++ ++ type->mark_escape (escapes, stmt); ++} ++ ++/* Record accesses in a call statement STMT. */ ++ ++void ++ipa_struct_reorg::maybe_record_call (cgraph_node *node, gcall *stmt) ++{ ++ tree argtype; ++ tree fndecl; ++ escape_type escapes = does_not_escape; ++ bool free_or_realloc = gimple_call_builtin_p (stmt, BUILT_IN_FREE) ++ || gimple_call_builtin_p (stmt, BUILT_IN_REALLOC); ++ ++ /* We check allocation sites in a different location. */ ++ if (handled_allocation_stmt (stmt)) ++ return; ++ ++ /* A few cases here: ++ 1) assigned from the lhs ++ 2) Used in argument ++ If a function being called is global (or indirect) ++ then we reject the types as being escaping. */ ++ ++ if (tree chain = gimple_call_chain (stmt)) ++ record_stmt_expr (chain, node, stmt); ++ ++ /* Assigned from LHS. */ ++ if (tree lhs = gimple_call_lhs (stmt)) ++ { ++ /* FIXME: handle return types. */ ++ mark_type_as_escape (TREE_TYPE (lhs), escape_return); ++ } ++ ++ /* If we have an internal call, just record the stmt. */ ++ if (gimple_call_internal_p (stmt)) ++ { ++ for (unsigned i = 0; i < gimple_call_num_args (stmt); i++) ++ record_stmt_expr (gimple_call_arg (stmt, i), node, stmt); ++ return; ++ } ++ ++ fndecl = gimple_call_fndecl (stmt); ++ ++ /* If we have an indrect call, just mark the types as escape. */ ++ if (!fndecl) ++ escapes = escape_pointer_function; ++ /* Non local functions cause escape except for calls to free ++ and realloc. ++ FIXME: should support function annotations too. */ ++ else if (!free_or_realloc ++ && !cgraph_node::local_info_node (fndecl)->local) ++ escapes = escape_external_function; ++ else if (!free_or_realloc ++ && !cgraph_node::local_info_node (fndecl)->can_change_signature) ++ escapes = escape_cannot_change_signature; ++ /* FIXME: we should be able to handle functions in other partitions. */ ++ else if (symtab_node::get (fndecl)->in_other_partition) ++ escapes = escape_external_function; ++ ++ if (escapes != does_not_escape) ++ { ++ for (unsigned i = 0; i < gimple_call_num_args (stmt); i++) ++ mark_type_as_escape (TREE_TYPE (gimple_call_arg (stmt, i)), ++ escapes); ++ return; ++ } ++ ++ argtype = TYPE_ARG_TYPES (gimple_call_fntype (stmt)); ++ for (unsigned i = 0; i < gimple_call_num_args (stmt); i++) ++ { ++ tree arg = gimple_call_arg (stmt, i); ++ if (argtype) ++ { ++ tree argtypet = TREE_VALUE (argtype); ++ if (!free_or_realloc ++ && VOID_POINTER_P (argtypet)) ++ mark_type_as_escape (TREE_TYPE (arg), escape_cast_void); ++ else ++ record_stmt_expr (arg, node, stmt); ++ } ++ else ++ mark_type_as_escape (TREE_TYPE (arg), escape_var_arg_function); ++ ++ argtype = argtype ? TREE_CHAIN (argtype) : NULL_TREE; ++ } ++} ++ ++void ++ipa_struct_reorg::record_stmt_expr (tree expr, cgraph_node *node, gimple *stmt) ++{ ++ tree base; ++ bool indirect; ++ srtype *type; ++ srfield *field; ++ bool realpart, imagpart, address; ++ if (!get_type_field (expr, base, indirect, type, field, ++ realpart, imagpart, address)) ++ return; ++ ++ if (!opt_for_fn (current_function_decl, flag_ipa_struct_reorg)) ++ type->mark_escape (escape_non_optimize, stmt); ++ ++ /* Record it. */ ++ type->add_access (new sraccess (stmt, node, type, field)); ++} ++ ++/* Find function corresponding to NODE. */ ++ ++srfunction * ++ipa_struct_reorg::find_function (cgraph_node *node) ++{ ++ for (unsigned i = 0; i < functions.length (); i++) ++ if (functions[i]->node == node) ++ return functions[i]; ++ return NULL; ++} ++ ++void ++ipa_struct_reorg::check_type_and_push (tree newdecl, srtype *type, ++ vec &worklist, ++ gimple *stmt) ++{ ++ if (integer_zerop (newdecl)) ++ return; ++ ++ if (TREE_CODE (newdecl) == ADDR_EXPR) ++ { ++ srdecl *d = find_decl (TREE_OPERAND (newdecl, 0)); ++ if (!d) ++ { ++ type->mark_escape (escape_cast_another_ptr, stmt); ++ return; ++ } ++ if (d->type == type) ++ return; ++ ++ srtype *type1 = d->type; ++ type->mark_escape (escape_cast_another_ptr, stmt); ++ type1->mark_escape (escape_cast_another_ptr, stmt); ++ return; ++ } ++ ++ srdecl *d = find_decl (newdecl); ++ if (!d) ++ { ++ if (TREE_CODE (newdecl) == INTEGER_CST) ++ { ++ type->mark_escape (escape_int_const, stmt); ++ return; ++ } ++ /* If we have a non void* or a decl (which is hard to track), ++ then mark the type as escaping. */ ++ if (!VOID_POINTER_P (TREE_TYPE (newdecl)) ++ || DECL_P (newdecl)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nunkown decl: "); ++ print_generic_expr (dump_file, newdecl); ++ fprintf (dump_file, " in type:\n"); ++ print_generic_expr (dump_file, TREE_TYPE (newdecl)); ++ fprintf (dump_file, "\n"); ++ } ++ type->mark_escape (escape_cast_another_ptr, stmt); ++ return; ++ } ++ /* At this point there should only be unkown void* ssa names. */ ++ gcc_assert (TREE_CODE (newdecl) == SSA_NAME); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nrecording unkown decl: "); ++ print_generic_expr (dump_file, newdecl); ++ fprintf (dump_file, " as type:\n"); ++ type->simple_dump (dump_file); ++ fprintf (dump_file, "\n"); ++ } ++ d = current_function->record_decl (type, newdecl, -1); ++ worklist.safe_push (d); ++ return; ++ } ++ ++ /* Only add to the worklist if the decl is a SSA_NAME. */ ++ if (TREE_CODE (newdecl) == SSA_NAME) ++ worklist.safe_push (d); ++ if (d->type == type) ++ return; ++ ++ srtype *type1 = d->type; ++ type->mark_escape (escape_cast_another_ptr, stmt); ++ type1->mark_escape (escape_cast_another_ptr, stmt); ++} ++ ++/* ++ 2) Check SSA_NAMEs for non type usages (source or use) (worlist of srdecl) ++ a) if the SSA_NAME is sourced from a pointer plus, record the pointer and ++ check to make sure the addition was a multiple of the size. ++ check the pointer type too. ++ b) If the name is sourced from an allocation check the allocation ++ i) Add SSA_NAME (void*) to the worklist if allocated from realloc ++ c) if the name is from a param, make sure the param type was of the ++ original type ++ d) if the name is from a cast/assignment, make sure it is used as that ++ type or void* ++ i) If void* then push the ssa_name into worklist ++*/ ++void ++ipa_struct_reorg::check_definition (srdecl *decl, vec &worklist) ++{ ++ tree ssa_name = decl->decl; ++ srtype *type = decl->type; ++ ++ /* ++ c) if the name is from a param, make sure the param type was of the ++ original type. ++ */ ++ if (SSA_NAME_IS_DEFAULT_DEF (ssa_name)) ++ { ++ tree var = SSA_NAME_VAR (ssa_name); ++ if (var ++ && TREE_CODE (var) == PARM_DECL ++ && VOID_POINTER_P (TREE_TYPE (ssa_name))) ++ type->mark_escape (escape_cast_void, NULL); ++ return; ++ } ++ gimple *stmt = SSA_NAME_DEF_STMT (ssa_name); ++ ++ /* ++ b) If the name is sourced from an allocation check the allocation ++ i) Add SSA_NAME (void*) to the worklist if allocated from realloc ++ */ ++ if (gimple_code (stmt) == GIMPLE_CALL) ++ { ++ /* For realloc, check the type of the argument. */ ++ if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)) ++ check_type_and_push (gimple_call_arg (stmt, 0), type, worklist, stmt); ++ ++ if (!handled_allocation_stmt (stmt) ++ || !allocate_size (type, stmt)) ++ type->mark_escape (escape_return, stmt); ++ return; ++ } ++ /* If the SSA_NAME is sourced from an inline-asm, ++ just mark the type as escaping. */ ++ if (gimple_code (stmt) == GIMPLE_ASM) ++ { ++ type->mark_escape (escape_inline_asm, stmt); ++ return; ++ } ++ ++ /* If the SSA_NAME is sourced from a PHI check add ++ each name to the worklist and check to make sure ++ they are used correctly. */ ++ if (gimple_code (stmt) == GIMPLE_PHI) ++ { ++ for (unsigned i = 0; i < gimple_phi_num_args (stmt); i++) ++ check_type_and_push (gimple_phi_arg_def (stmt, i), ++ type, worklist, stmt); ++ return; ++ } ++ ++ gcc_assert (gimple_code (stmt) == GIMPLE_ASSIGN); ++ /* ++ a) if the SSA_NAME is sourced from a pointer plus, record the pointer and ++ check to make sure the addition was a multiple of the size. ++ check the pointer type too. ++ */ ++ ++ tree rhs = gimple_assign_rhs1 (stmt); ++ if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR) ++ { ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ tree num; ++ if (!is_result_of_mult (rhs2, &num, TYPE_SIZE_UNIT (type->type))) ++ type->mark_escape (escape_non_multiply_size, stmt); ++ ++ if (TREE_CODE (rhs) == SSA_NAME) ++ check_type_and_push (rhs, type, worklist, stmt); ++ return; ++ } ++ ++ /* Casts between pointers and integer are escaping. */ ++ if (gimple_assign_cast_p (stmt)) ++ { ++ type->mark_escape (escape_cast_int, stmt); ++ return; ++ } ++ ++ /* ++ d) if the name is from a cast/assignment, make sure it is used as that ++ type or void* ++ i) If void* then push the ssa_name into worklist ++ */ ++ gcc_assert (gimple_assign_single_p (stmt)); ++ check_other_side (decl, rhs, stmt, worklist); ++} ++ ++/* Mark the types used by the inline-asm as escaping. ++ It is unkown what happens inside an inline-asm. */ ++ ++void ++ipa_struct_reorg::mark_types_asm (gasm *astmt) ++{ ++ for (unsigned i = 0; i < gimple_asm_ninputs (astmt); i++) ++ { ++ tree v = TREE_VALUE (gimple_asm_input_op (astmt, i)); ++ /* If we have &b, just strip the & here. */ ++ if (TREE_CODE (v) == ADDR_EXPR) ++ v = TREE_OPERAND (v, 0); ++ mark_expr_escape (v, escape_inline_asm, astmt); ++ } ++ for (unsigned i = 0; i < gimple_asm_noutputs (astmt); i++) ++ { ++ tree v = TREE_VALUE (gimple_asm_output_op (astmt, i)); ++ /* If we have &b, just strip the & here. */ ++ if (TREE_CODE (v) == ADDR_EXPR) ++ v = TREE_OPERAND (v, 0); ++ mark_expr_escape (v, escape_inline_asm, astmt); ++ } ++} ++ ++void ++ipa_struct_reorg::check_other_side (srdecl *decl, tree other, gimple *stmt, ++ vec &worklist) ++{ ++ srtype *type = decl->type; ++ ++ if (TREE_CODE (other) == SSA_NAME ++ || DECL_P (other) ++ || TREE_CODE (other) == INTEGER_CST) ++ { ++ check_type_and_push (other, type, worklist, stmt); ++ return; ++ } ++ ++ tree t = TREE_TYPE (other); ++ if (!handled_type (t)) ++ { ++ type->mark_escape (escape_cast_another_ptr, stmt); ++ return; ++ } ++ ++ srtype *t1 = find_type (inner_type (t)); ++ if (t1 == type) ++ { ++ tree base; ++ bool indirect; ++ srtype *type1; ++ srfield *field; ++ bool realpart, imagpart, address; ++ if (!get_type_field (other, base, indirect, type1, field, ++ realpart, imagpart, address)) ++ type->mark_escape (escape_cast_another_ptr, stmt); ++ ++ return; ++ } ++ ++ if (t1) ++ t1->mark_escape (escape_cast_another_ptr, stmt); ++ ++ type->mark_escape (escape_cast_another_ptr, stmt); ++} ++ ++void ++ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, ++ vec &worklist) ++{ ++ srtype *type = decl->type; ++ ++ if (gimple_code (stmt) == GIMPLE_RETURN) ++ { ++ type->mark_escape (escape_return, stmt); ++ return; ++ } ++ /* If the SSA_NAME PHI check and add the src to the worklist and ++ check to make sure they are used correctly. */ ++ if (gimple_code (stmt) == GIMPLE_PHI) ++ { ++ check_type_and_push (gimple_phi_result (stmt), type, worklist, stmt); ++ return; ++ } ++ ++ if (gimple_code (stmt) == GIMPLE_ASM) ++ { ++ mark_types_asm (as_a (stmt)); ++ return; ++ } ++ ++ if (gimple_code (stmt) == GIMPLE_COND) ++ { ++ tree rhs1 = gimple_cond_lhs (stmt); ++ tree rhs2 = gimple_cond_rhs (stmt); ++ tree orhs = rhs1; ++ if (gimple_cond_code (stmt) != EQ_EXPR ++ && gimple_cond_code (stmt) != NE_EXPR) ++ { ++ mark_expr_escape (rhs1, escape_non_eq, stmt); ++ mark_expr_escape (rhs2, escape_non_eq, stmt); ++ } ++ if (rhs1 == decl->decl) ++ orhs = rhs2; ++ if (integer_zerop (orhs)) ++ return; ++ if (TREE_CODE (orhs) != SSA_NAME) ++ mark_expr_escape (rhs1, escape_non_eq, stmt); ++ check_type_and_push (orhs, type, worklist, stmt); ++ return; ++ } ++ ++ /* Casts between pointers and integer are escaping. */ ++ if (gimple_assign_cast_p (stmt)) ++ { ++ type->mark_escape (escape_cast_int, stmt); ++ return; ++ } ++ ++ /* We might have a_1 = ptr_2 == ptr_3; */ ++ if (is_gimple_assign (stmt) ++ && TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) == tcc_comparison) ++ { ++ tree rhs1 = gimple_assign_rhs1 (stmt); ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ tree orhs = rhs1; ++ if (gimple_assign_rhs_code (stmt) != EQ_EXPR ++ && gimple_assign_rhs_code (stmt) != NE_EXPR) ++ { ++ mark_expr_escape (rhs1, escape_non_eq, stmt); ++ mark_expr_escape (rhs2, escape_non_eq, stmt); ++ } ++ if (rhs1 == decl->decl) ++ orhs = rhs2; ++ if (integer_zerop (orhs)) ++ return; ++ if (TREE_CODE (orhs) != SSA_NAME) ++ mark_expr_escape (rhs1, escape_non_eq, stmt); ++ check_type_and_push (orhs, type, worklist, stmt); ++ return; ++ } ++ ++ if (gimple_assign_single_p (stmt)) ++ { ++ tree lhs = gimple_assign_lhs (stmt); ++ tree rhs = gimple_assign_rhs1 (stmt); ++ /* Check if we have a_1 = b_2; that a_1 is in the correct type. */ ++ if (decl->decl == rhs) ++ { ++ check_other_side (decl, lhs, stmt, worklist); ++ return; ++ } ++ } ++ ++ if (is_gimple_assign (stmt) ++ && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR) ++ { ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ tree lhs = gimple_assign_lhs (stmt); ++ tree num; ++ check_other_side (decl, lhs, stmt, worklist); ++ if (!is_result_of_mult (rhs2, &num, TYPE_SIZE_UNIT (type->type))) ++ type->mark_escape (escape_non_multiply_size, stmt); ++ } ++} ++ ++/* ++ 2) Check SSA_NAMEs for non type usages (source or use) (worlist of srdecl) ++ d) if the name is from a cast/assignment, make sure it is used as that ++ type or void* ++ i) If void* then push the ssa_name into worklist ++ e) if used in conditional check the other side ++ i) If the conditional is non NE/EQ then mark the type as non rejecting ++ f) Check if the use in a Pointer PLUS EXPR Is used by mulitplication ++ of its size ++ */ ++void ++ipa_struct_reorg::check_uses (srdecl *decl, vec &worklist) ++{ ++ tree ssa_name = decl->decl; ++ imm_use_iterator imm_iter; ++ use_operand_p use_p; ++ ++ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, ssa_name) ++ { ++ gimple *stmt = USE_STMT (use_p); ++ ++ if (is_gimple_debug (stmt)) ++ continue; ++ ++ check_use (decl, stmt, worklist); ++ } ++} ++ ++/* Record function corresponding to NODE. */ ++ ++srfunction * ++ipa_struct_reorg::record_function (cgraph_node *node) ++{ ++ function *fn; ++ tree parm, var; ++ unsigned int i; ++ srfunction *sfn; ++ escape_type escapes = does_not_escape; ++ ++ sfn = new srfunction (node); ++ functions.safe_push (sfn); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, ++ "\nRecording accesses and types from function: %s/%u\n", ++ node->name (), node->order); ++ ++ /* Nodes without a body are not interesting. Especially do not ++ visit clones at this point for now - we get duplicate decls ++ there for inline clones at least. */ ++ if (!node->has_gimple_body_p () || node->inlined_to) ++ return sfn; ++ ++ node->get_body (); ++ fn = DECL_STRUCT_FUNCTION (node->decl); ++ ++ if (!fn) ++ return sfn; ++ ++ current_function = sfn; ++ ++ if (DECL_PRESERVE_P (node->decl)) ++ escapes = escape_marked_as_used; ++ else if (!node->local) ++ escapes = escape_visible_function; ++ else if (!node->can_change_signature) ++ escapes = escape_cannot_change_signature; ++ else if (!tree_versionable_function_p (node->decl)) ++ escapes = escape_noclonable_function; ++ else if (!opt_for_fn (node->decl, flag_ipa_struct_reorg)) ++ escapes = escape_non_optimize; ++ ++ basic_block bb; ++ gimple_stmt_iterator si; ++ ++ /* Record the static chain decl. */ ++ if (fn->static_chain_decl) ++ { ++ srdecl *sd = record_var (fn->static_chain_decl, ++ escapes, -2); ++ if (sd) ++ { ++ /* Specify that this type is used by the static ++ chain so it cannot be split. */ ++ sd->type->chain_type = true; ++ sfn->add_arg (sd); ++ sd->type->add_function (sfn); ++ } ++ } ++ ++ /* Record the arguments. */ ++ for (parm = DECL_ARGUMENTS (node->decl), i = 0; ++ parm; ++ parm = DECL_CHAIN (parm), i++) ++ { ++ srdecl *sd = record_var (parm, escapes, i); ++ if (sd) ++ { ++ sfn->add_arg (sd); ++ sd->type->add_function (sfn); ++ } ++ } ++ ++ /* Mark the return type as escaping. */ ++ { ++ tree return_type = TREE_TYPE (TREE_TYPE (node->decl)); ++ mark_type_as_escape (return_type, escape_return, NULL); ++ } ++ ++ /* If the cfg does not exist for the function, ++ don't process the function. */ ++ if (!fn->cfg) ++ { ++ current_function = NULL; ++ return sfn; ++ } ++ ++ /* The following order is done for recording stage: ++ 0) Record all variables/SSA_NAMES that are of struct type ++ 1) Record MEM_REF/COMPONENT_REFs ++ a) Record SSA_NAMEs (void*) and record that as the accessed type. ++ */ ++ ++ push_cfun (fn); ++ ++ FOR_EACH_LOCAL_DECL (cfun, i, var) ++ { ++ if (TREE_CODE (var) != VAR_DECL) ++ continue; ++ ++ record_var (var); ++ } ++ ++ for (i = 1; i < num_ssa_names; ++i) ++ { ++ tree name = ssa_name (i); ++ if (!name ++ || has_zero_uses (name) ++ || virtual_operand_p (name)) ++ continue; ++ ++ record_var (name); ++ } ++ ++ /* Find the variables which are used via MEM_REF and are void* types. */ ++ FOR_EACH_BB_FN (bb, cfun) ++ { ++ for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) ++ { ++ gimple *stmt = gsi_stmt (si); ++ find_vars (stmt); ++ } ++ } ++ ++ auto_vec worklist; ++ for (unsigned i = 0; i < current_function->decls.length (); i++) ++ { ++ srdecl *decl = current_function->decls[i]; ++ if (TREE_CODE (decl->decl) == SSA_NAME) ++ { ++ decl->visited = false; ++ worklist.safe_push (decl); ++ } ++ } ++ ++/* ++ 2) Check SSA_NAMEs for non type usages (source or use) (worlist of srdecl) ++ a) if the SSA_NAME is sourced from a pointer plus, record the pointer and ++ check to make sure the addition was a multiple of the size. ++ check the pointer type too. ++ b) If the name is sourced from an allocation check the allocation ++ i) Add SSA_NAME (void*) to the worklist if allocated from realloc ++ c) if the name is from a param, make sure the param type was of the ++ original type ++ d) if the name is used in a cast/assignment, make sure it is used as that ++ type or void* ++ i) If void* then push the ssa_name into worklist ++ e) if used in conditional check the other side ++ i) If the conditional is non NE/EQ then mark the type as non rejecting ++ f) Check if the use in a POinter PLUS EXPR Is used by mulitplication ++ of its size ++*/ ++ ++ while (!worklist.is_empty ()) ++ { ++ srdecl *decl = worklist.pop (); ++ if (decl->visited) ++ continue; ++ decl->visited = true; ++ check_definition (decl, worklist); ++ check_uses (decl, worklist); ++ } ++ ++ FOR_EACH_BB_FN (bb, cfun) ++ { ++ for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si)) ++ { ++ gimple *stmt = gsi_stmt (si); ++ maybe_record_stmt (node, stmt); ++ } ++ } ++ ++ pop_cfun (); ++ current_function = NULL; ++ return sfn; ++} ++ ++/* Record all accesses for all types including global variables. */ ++ ++void ++ipa_struct_reorg::record_accesses (void) ++{ ++ varpool_node *var; ++ cgraph_node *cnode; ++ ++ /* Record global (non-auto) variables first. */ ++ FOR_EACH_VARIABLE (var) ++ { ++ if (!var->real_symbol_p ()) ++ continue; ++ ++ /* Record all variables including the accesses inside a variable. */ ++ escape_type escapes = does_not_escape; ++ if (var->externally_visible || !var->definition) ++ escapes = escape_via_global_var; ++ if (var->in_other_partition) ++ escapes = escape_via_global_var; ++ if (!var->externally_visible && var->definition) ++ var->get_constructor (); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Recording global variable: "); ++ print_generic_expr (dump_file, var->decl); ++ fprintf (dump_file, "\n"); ++ } ++ record_var (var->decl, escapes); ++ } ++ ++ FOR_EACH_FUNCTION (cnode) ++ { ++ if (!cnode->real_symbol_p ()) ++ continue; ++ ++ /* Record accesses inside a function. */ ++ if (cnode->definition) ++ record_function (cnode); ++ } ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "all types (before pruning):\n"); ++ dump_types (dump_file); ++ fprintf (dump_file, "all functions (before pruning):\n"); ++ dump_functions (dump_file); ++ } ++ done_recording = true; ++} ++ ++/* A helper function to detect cycles (recusive) types. ++ Return TRUE if TYPE was a rescusive type. */ ++ ++bool ++ipa_struct_reorg::walk_field_for_cycles (srtype *type) ++{ ++ unsigned i; ++ srfield *field; ++ ++ type->visited = true; ++ if (type->escaped_rescusive ()) ++ return true; ++ ++ if (type->has_escaped ()) ++ return false; ++ ++ FOR_EACH_VEC_ELT (type->fields, i, field) ++ { ++ if (!field->type) ++ ; ++ else if (field->type->visited ++ || walk_field_for_cycles (field->type)) ++ { ++ type->mark_escape (escape_rescusive_type, NULL); ++ return true; ++ } ++ } ++ ++ return false; ++} ++ ++/* Clear visited on all types. */ ++ ++void ++ipa_struct_reorg::clear_visited (void) ++{ ++ for (unsigned i = 0; i < types.length (); i++) ++ types[i]->visited = false; ++} ++ ++/* Detect recusive types and mark them as escaping. */ ++ ++void ++ipa_struct_reorg::detect_cycles (void) ++{ ++ for (unsigned i = 0; i < types.length (); i++) ++ { ++ if (types[i]->has_escaped ()) ++ continue; ++ ++ clear_visited (); ++ walk_field_for_cycles (types[i]); ++ } ++} ++ ++/* Propagate escaping to depdenent types. */ ++ ++void ++ipa_struct_reorg::propagate_escape (void) ++{ ++ unsigned i; ++ srtype *type; ++ bool changed = false; ++ ++ do ++ { ++ changed = false; ++ FOR_EACH_VEC_ELT (types, i, type) ++ { ++ for (tree field = TYPE_FIELDS (type->type); ++ field; ++ field = DECL_CHAIN (field)) ++ { ++ if (TREE_CODE (field) == FIELD_DECL ++ && handled_type (TREE_TYPE (field))) ++ { ++ tree t = inner_type (TREE_TYPE (field)); ++ srtype *type1 = find_type (t); ++ if (!type1) ++ continue; ++ if (type1->has_escaped () ++ && !type->has_escaped ()) ++ { ++ type->mark_escape (escape_dependent_type_escapes, NULL); ++ changed = true; ++ } ++ if (type->has_escaped () ++ && !type1->has_escaped ()) ++ { ++ type1->mark_escape (escape_dependent_type_escapes, NULL); ++ changed = true; ++ } ++ } ++ } ++ } ++ } while (changed); ++} ++ ++/* Prune the escaped types and their decls from what was recorded. */ ++ ++void ++ipa_struct_reorg::prune_escaped_types (void) ++{ ++ detect_cycles (); ++ propagate_escape (); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "all types (after prop but before pruning):\n"); ++ dump_types (dump_file); ++ fprintf (dump_file, "all functions (after prop but before pruning):\n"); ++ dump_functions (dump_file); ++ } ++ ++ if (dump_file) ++ dump_types_escaped (dump_file); ++ ++ /* Prune the function arguments which escape ++ and functions which have no types as arguments. */ ++ for (unsigned i = 0; i < functions.length ();) ++ { ++ srfunction *function = functions[i]; ++ ++ /* Prune function arguments of types that escape. */ ++ for (unsigned j = 0; j < function->args.length ();) ++ { ++ if (function->args[j]->type->has_escaped ()) ++ function->args.ordered_remove (j); ++ else ++ j++; ++ } ++ ++ /* Prune global variables that the function uses of types ++ that escape. */ ++ for (unsigned j = 0; j < function->globals.length ();) ++ { ++ if (function->globals[j]->type->has_escaped ()) ++ function->globals.ordered_remove (j); ++ else ++ j++; ++ } ++ ++ /* Prune variables that the function uses of types that escape. */ ++ for (unsigned j = 0; j < function->decls.length ();) ++ { ++ srdecl *decl = function->decls[j]; ++ if (decl->type->has_escaped ()) ++ { ++ function->decls.ordered_remove (j); ++ delete decl; ++ } ++ else ++ j++; ++ } ++ ++ /* Prune functions which don't refer to any variables any more. */ ++ if (function->args.is_empty () ++ && function->decls.is_empty () ++ && function->globals.is_empty ()) ++ { ++ delete function; ++ functions.ordered_remove (i); ++ } ++ else ++ i++; ++ } ++ ++ /* Prune globals of types that escape, all references to those decls ++ will have been removed in the first loop. */ ++ for (unsigned j = 0; j < globals.decls.length ();) ++ { ++ srdecl *decl = globals.decls[j]; ++ if (decl->type->has_escaped ()) ++ { ++ globals.decls.ordered_remove (j); ++ delete decl; ++ } ++ else ++ j++; ++ } ++ ++ /* Prune types that escape, all references to those types ++ will have been removed in the above loops. */ ++ for (unsigned i = 0; i < types.length ();) ++ { ++ srtype *type = types[i]; ++ if (type->has_escaped ()) ++ { ++ /* All references to this type should have been removed now. */ ++ delete type; ++ types.ordered_remove (i); ++ } ++ else ++ i++; ++ } ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "all types (after pruning):\n"); ++ dump_types (dump_file); ++ fprintf (dump_file, "all functions (after pruning):\n"); ++ dump_functions (dump_file); ++ } ++} ++ ++/* Analyze all of the types. */ ++ ++void ++ipa_struct_reorg::analyze_types (void) ++{ ++ for (unsigned i = 0; i < types.length (); i++) ++ { ++ if (!types[i]->has_escaped ()) ++ types[i]->analyze (); ++ } ++} ++ ++/* When struct A has a struct B member, B's type info ++ is not stored in ++ TYPE_FIELDS (TREE_TYPE (TYPE_FIELDS (typeA))) ++ Try to restore B's type information. */ ++ ++void ++ipa_struct_reorg::restore_field_type (void) ++{ ++ for (unsigned i = 0; i < types.length (); i++) ++ { ++ for (unsigned j = 0; j < types[i]->fields.length (); j++) ++ { ++ srfield *field = types[i]->fields[j]; ++ if (TREE_CODE (inner_type (field->fieldtype)) == RECORD_TYPE) ++ { ++ /* If field type has TYPE_FIELDS information, ++ we do not need to do this. */ ++ if (TYPE_FIELDS (field->type->type) != NULL) ++ continue; ++ for (unsigned k = 0; k < types.length (); k++) ++ { ++ if (i == k) ++ continue; ++ const char *type1 = get_type_name (field->type->type); ++ const char *type2 = get_type_name (types[k]->type); ++ if (type1 == NULL || type2 == NULL) ++ continue; ++ if (type1 == type2 ++ && TYPE_FIELDS (types[k]->type)) ++ field->type = types[k]; ++ } ++ } ++ } ++ } ++} ++ ++/* Create all new types we want to create. */ ++ ++bool ++ipa_struct_reorg::create_new_types (void) ++{ ++ int newtypes = 0; ++ clear_visited (); ++ for (unsigned i = 0; i < types.length (); i++) ++ newtypes += types[i]->create_new_type (); ++ ++ if (dump_file) ++ { ++ if (newtypes) ++ fprintf (dump_file, "\nNumber of structures to transform is %d\n", ++ newtypes); ++ else ++ fprintf (dump_file, "\nNo structures to transform.\n"); ++ } ++ ++ return newtypes != 0; ++} ++ ++/* Create all the new decls except for the new arguments ++ which create_new_functions would have created. */ ++ ++void ++ipa_struct_reorg::create_new_decls (void) ++{ ++ globals.create_new_decls (); ++ for (unsigned i = 0; i < functions.length (); i++) ++ functions[i]->create_new_decls (); ++} ++ ++/* Create the new arguments for the function corresponding to NODE. */ ++ ++void ++ipa_struct_reorg::create_new_args (cgraph_node *new_node) ++{ ++ tree decl = new_node->decl; ++ auto_vec params; ++ push_function_arg_decls (¶ms, decl); ++ vec *adjs = NULL; ++ vec_safe_reserve (adjs, params.length ()); ++ for (unsigned i = 0; i < params.length (); i++) ++ { ++ struct ipa_adjusted_param adj; ++ tree parm = params[i]; ++ memset (&adj, 0, sizeof (adj)); ++ adj.base_index = i; ++ adj.prev_clone_index = i; ++ srtype *t = find_type (inner_type (TREE_TYPE (parm))); ++ if (!t ++ || t->has_escaped () ++ || !t->has_new_type ()) ++ { ++ adj.op = IPA_PARAM_OP_COPY; ++ vec_safe_push (adjs, adj); ++ continue; ++ } ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Creating a new argument for: "); ++ print_generic_expr (dump_file, params[i]); ++ fprintf (dump_file, " in function: "); ++ print_generic_expr (dump_file, decl); ++ fprintf (dump_file, "\n"); ++ } ++ adj.op = IPA_PARAM_OP_NEW; ++ adj.param_prefix_index = IPA_PARAM_PREFIX_REORG; ++ for (unsigned j = 0; j < max_split && t->newtype[j]; j++) ++ { ++ adj.type = reconstruct_complex_type (TREE_TYPE (parm), ++ t->newtype[j]); ++ vec_safe_push (adjs, adj); ++ } ++ } ++ ipa_param_body_adjustments *adjustments ++ = new ipa_param_body_adjustments (adjs, decl); ++ adjustments->modify_formal_parameters (); ++ auto_vec new_params; ++ push_function_arg_decls (&new_params, decl); ++ unsigned veclen = vec_safe_length (adjs); ++ for (unsigned i = 0; i < veclen; i++) ++ { ++ if ((*adjs)[i].op != IPA_PARAM_OP_NEW) ++ continue; ++ tree decl = params[(*adjs)[i].base_index]; ++ srdecl *d = find_decl (decl); ++ if (!d) ++ continue; ++ unsigned j = 0; ++ while (j < max_split && d->newdecl[j]) ++ j++; ++ d->newdecl[j] = new_params[i]; ++ } ++ ++ function *fn = DECL_STRUCT_FUNCTION (decl); ++ ++ if (!fn->static_chain_decl) ++ return; ++ srdecl *chain = find_decl (fn->static_chain_decl); ++ if (!chain) ++ return; ++ ++ srtype *type = chain->type; ++ tree orig_var = chain->decl; ++ const char *tname = NULL; ++ if (DECL_NAME (orig_var)) ++ tname = IDENTIFIER_POINTER (DECL_NAME (orig_var)); ++ gcc_assert (!type->newtype[1]); ++ tree new_name = NULL; ++ char *name = NULL; ++ if (tname) ++ { ++ name = concat (tname, ".reorg.0", NULL); ++ new_name = get_identifier (name); ++ free (name); ++ } ++ tree newtype1 = reconstruct_complex_type (TREE_TYPE (orig_var), ++ type->newtype[0]); ++ chain->newdecl[0] = build_decl (DECL_SOURCE_LOCATION (orig_var), ++ PARM_DECL, new_name, newtype1); ++ copy_var_attributes (chain->newdecl[0], orig_var); ++ fn->static_chain_decl = chain->newdecl[0]; ++} ++ ++/* Find the refered DECL in the current function or globals. ++ If this is a global decl, record that as being used ++ in the current function. */ ++ ++srdecl * ++ipa_struct_reorg::find_decl (tree decl) ++{ ++ srdecl *d; ++ d = globals.find_decl (decl); ++ if (d) ++ { ++ /* Record the global usage in the current function. */ ++ if (!done_recording && current_function) ++ { ++ bool add = true; ++ /* No reason to add it to the current function if it is ++ already recorded as such. */ ++ for (unsigned i = 0; i < current_function->globals.length (); i++) ++ { ++ if (current_function->globals[i] == d) ++ { ++ add = false; ++ break; ++ } ++ } ++ if (add) ++ current_function->globals.safe_push (d); ++ } ++ return d; ++ } ++ if (current_function) ++ return current_function->find_decl (decl); ++ return NULL; ++} ++ ++/* Create new function clones for the cases where the arguments ++ need to be changed. */ ++ ++void ++ipa_struct_reorg::create_new_functions (void) ++{ ++ for (unsigned i = 0; i < functions.length (); i++) ++ { ++ srfunction *f = functions[i]; ++ bool anyargchanges = false; ++ cgraph_node *new_node; ++ cgraph_node *node = f->node; ++ int newargs = 0; ++ if (f->old) ++ continue; ++ ++ if (f->args.length () == 0) ++ continue; ++ ++ for (unsigned j = 0; j < f->args.length (); j++) ++ { ++ srdecl *d = f->args[j]; ++ srtype *t = d->type; ++ if (t->has_new_type ()) ++ { ++ newargs += t->newtype[1] != NULL; ++ anyargchanges = true; ++ } ++ } ++ if (!anyargchanges) ++ continue; ++ ++ if (dump_file) ++ { ++ fprintf (dump_file, "Creating a clone of function: "); ++ f->simple_dump (dump_file); ++ fprintf (dump_file, "\n"); ++ } ++ statistics_counter_event (NULL, "Create new function", 1); ++ new_node = node->create_version_clone_with_body (vNULL, NULL, ++ NULL, NULL, NULL, ++ "struct_reorg"); ++ new_node->can_change_signature = node->can_change_signature; ++ new_node->make_local (); ++ f->newnode = new_node; ++ srfunction *n = record_function (new_node); ++ current_function = n; ++ n->old = f; ++ f->newf = n; ++ /* Create New arguments. */ ++ create_new_args (new_node); ++ current_function = NULL; ++ } ++} ++ ++bool ++ipa_struct_reorg::rewrite_lhs_rhs (tree lhs, tree rhs, ++ tree newlhs[max_split], ++ tree newrhs[max_split]) ++{ ++ bool l = rewrite_expr (lhs, newlhs); ++ bool r = rewrite_expr (rhs, newrhs); ++ ++ /* Handle NULL pointer specially. */ ++ if (l && !r && integer_zerop (rhs)) ++ { ++ r = true; ++ for (unsigned i = 0; i < max_split && newlhs[i]; i++) ++ newrhs[i] = fold_convert (TREE_TYPE (newlhs[i]), rhs); ++ } ++ ++ return l || r; ++} ++ ++bool ++ipa_struct_reorg::rewrite_expr (tree expr, ++ tree newexpr[max_split], ++ bool ignore_missing_decl) ++{ ++ tree base; ++ bool indirect; ++ srtype *t; ++ srfield *f; ++ bool realpart, imagpart; ++ bool address; ++ ++ tree newbase[max_split]; ++ memset (newexpr, 0, sizeof (tree[max_split])); ++ ++ if (TREE_CODE (expr) == CONSTRUCTOR) ++ { ++ srtype *t = find_type (TREE_TYPE (expr)); ++ if (!t) ++ return false; ++ gcc_assert (CONSTRUCTOR_NELTS (expr) == 0); ++ if (!t->has_new_type ()) ++ return false; ++ for (unsigned i = 0; i < max_split && t->newtype[i]; i++) ++ newexpr[i] = build_constructor (t->newtype[i], NULL); ++ return true; ++ } ++ ++ if (!get_type_field (expr, base, indirect, t, f, ++ realpart, imagpart, address)) ++ return false; ++ ++ /* If the type is not changed, then just return false. */ ++ if (!t->has_new_type ()) ++ return false; ++ ++ /* NULL pointer handling is "special". */ ++ if (integer_zerop (base)) ++ { ++ gcc_assert (indirect && !address); ++ for (unsigned i = 0; i < max_split && t->newtype[i]; i++) ++ { ++ tree newtype1 = reconstruct_complex_type (TREE_TYPE (base), ++ t->newtype[i]); ++ newbase[i] = fold_convert (newtype1, base); ++ } ++ } ++ else ++ { ++ srdecl *d = find_decl (base); ++ ++ if (!d && dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Can't find decl:\n"); ++ print_generic_expr (dump_file, base); ++ fprintf (dump_file, "\ntype:\n"); ++ t->dump (dump_file); ++ } ++ if (!d && ignore_missing_decl) ++ return true; ++ gcc_assert (d); ++ memcpy (newbase, d->newdecl, sizeof (d->newdecl)); ++ } ++ ++ if (f == NULL) ++ { ++ memcpy (newexpr, newbase, sizeof (newbase)); ++ for (unsigned i = 0; i < max_split && newexpr[i]; i++) ++ { ++ if (address) ++ newexpr[i] = build_fold_addr_expr (newexpr[i]); ++ if (indirect) ++ newexpr[i] = build_simple_mem_ref (newexpr[i]); ++ if (imagpart) ++ newexpr[i] = build1 (IMAGPART_EXPR, ++ TREE_TYPE (TREE_TYPE (newexpr[i])), ++ newexpr[i]); ++ if (realpart) ++ newexpr[i] = build1 (REALPART_EXPR, ++ TREE_TYPE (TREE_TYPE (newexpr[i])), ++ newexpr[i]); ++ } ++ return true; ++ } ++ ++ tree newdecl = newbase[f->clusternum]; ++ for (unsigned i = 0; i < max_split && f->newfield[i]; i++) ++ { ++ tree newbase1 = newdecl; ++ if (address) ++ newbase1 = build_fold_addr_expr (newbase1); ++ if (indirect) ++ newbase1 = build_simple_mem_ref (newbase1); ++ newexpr[i] = build3 (COMPONENT_REF, TREE_TYPE (f->newfield[i]), ++ newbase1, f->newfield[i], NULL_TREE); ++ if (imagpart) ++ newexpr[i] = build1 (IMAGPART_EXPR, ++ TREE_TYPE (TREE_TYPE (newexpr[i])), ++ newexpr[i]); ++ if (realpart) ++ newexpr[i] = build1 (REALPART_EXPR, ++ TREE_TYPE (TREE_TYPE (newexpr[i])), ++ newexpr[i]); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "cluster: %d. decl = ", (int)f->clusternum); ++ print_generic_expr (dump_file, newbase1); ++ fprintf (dump_file, "\nnewexpr = "); ++ print_generic_expr (dump_file, newexpr[i]); ++ fprintf (dump_file, "\n"); ++ } ++ } ++ return true; ++} ++ ++bool ++ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) ++{ ++ bool remove = false; ++ if (gimple_clobber_p (stmt)) ++ { ++ tree lhs = gimple_assign_lhs (stmt); ++ tree newlhs[max_split]; ++ if (!rewrite_expr (lhs, newlhs)) ++ return false; ++ for (unsigned i = 0; i < max_split && newlhs[i]; i++) ++ { ++ tree clobber = build_constructor (TREE_TYPE (newlhs[i]), NULL); ++ TREE_THIS_VOLATILE (clobber) = true; ++ gimple *newstmt = gimple_build_assign (newlhs[i], clobber); ++ gsi_insert_before (gsi, newstmt, GSI_SAME_STMT); ++ remove = true; ++ } ++ return remove; ++ } ++ ++ if (gimple_assign_rhs_code (stmt) == EQ_EXPR ++ || gimple_assign_rhs_code (stmt) == NE_EXPR) ++ { ++ tree rhs1 = gimple_assign_rhs1 (stmt); ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ tree newrhs1[max_split]; ++ tree newrhs2[max_split]; ++ tree_code rhs_code = gimple_assign_rhs_code (stmt); ++ tree_code code = rhs_code == EQ_EXPR ? BIT_AND_EXPR : BIT_IOR_EXPR; ++ if (!rewrite_lhs_rhs (rhs1, rhs2, newrhs1, newrhs2)) ++ return false; ++ tree newexpr = NULL_TREE; ++ for (unsigned i = 0; i < max_split && newrhs1[i]; i++) ++ { ++ tree expr = gimplify_build2 (gsi, rhs_code, boolean_type_node, ++ newrhs1[i], newrhs2[i]); ++ if (!newexpr) ++ newexpr = expr; ++ else ++ newexpr = gimplify_build2 (gsi, code, boolean_type_node, ++ newexpr, expr); ++ } ++ ++ if (newexpr) ++ { ++ newexpr = fold_convert (TREE_TYPE (gimple_assign_lhs (stmt)), ++ newexpr); ++ gimple_assign_set_rhs_from_tree (gsi, newexpr); ++ update_stmt (stmt); ++ } ++ return false; ++ } ++ ++ if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR) ++ { ++ tree lhs = gimple_assign_lhs (stmt); ++ tree rhs1 = gimple_assign_rhs1 (stmt); ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ tree newlhs[max_split]; ++ tree newrhs[max_split]; ++ ++ if (!rewrite_lhs_rhs (lhs, rhs1, newlhs, newrhs)) ++ return false; ++ tree size = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (lhs))); ++ tree num; ++ /* Check if rhs2 is a multiplication of the size of the type. */ ++ if (!is_result_of_mult (rhs2, &num, size)) ++ internal_error ( ++ "The rhs of pointer is not a multiplicate and it slips through"); ++ ++ num = gimplify_build1 (gsi, NOP_EXPR, sizetype, num); ++ for (unsigned i = 0; i < max_split && newlhs[i]; i++) ++ { ++ gimple *new_stmt; ++ ++ tree newsize = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (newlhs[i]))); ++ newsize = gimplify_build2 (gsi, MULT_EXPR, sizetype, num, newsize); ++ new_stmt = gimple_build_assign (newlhs[i], POINTER_PLUS_EXPR, ++ newrhs[i], newsize); ++ gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); ++ remove = true; ++ } ++ return remove; ++ } ++ if (gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS) ++ { ++ tree lhs = gimple_assign_lhs (stmt); ++ tree rhs = gimple_assign_rhs1 (stmt); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "rewriting statement:\n"); ++ print_gimple_stmt (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ tree newlhs[max_split]; ++ tree newrhs[max_split]; ++ if (!rewrite_lhs_rhs (lhs, rhs, newlhs, newrhs)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\nDid nothing to statement.\n"); ++ return false; ++ } ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\nreplaced with:\n"); ++ for (unsigned i = 0; i < max_split && (newlhs[i] || newrhs[i]); i++) ++ { ++ gimple *newstmt = gimple_build_assign (newlhs[i] ? newlhs[i] : lhs, ++ newrhs[i] ? newrhs[i] : rhs); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ print_gimple_stmt (dump_file, newstmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ gsi_insert_before (gsi, newstmt, GSI_SAME_STMT); ++ remove = true; ++ } ++ return remove; ++ } ++ ++ return remove; ++} ++ ++/* Rewrite function call statement STMT. Return TRUE if the statement ++ is to be removed. */ ++ ++bool ++ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) ++{ ++ /* Handled allocation calls are handled seperately from normal ++ function calls. */ ++ if (handled_allocation_stmt (stmt)) ++ { ++ tree lhs = gimple_call_lhs (stmt); ++ tree newrhs1[max_split]; ++ srdecl *decl = find_decl (lhs); ++ if (!decl || !decl->type) ++ return false; ++ srtype *type = decl->type; ++ tree num = allocate_size (type, stmt); ++ gcc_assert (num); ++ memset (newrhs1, 0, sizeof (newrhs1)); ++ ++ /* The realloc call needs to have its first argument rewritten. */ ++ if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)) ++ { ++ tree rhs1 = gimple_call_arg (stmt, 0); ++ if (integer_zerop (rhs1)) ++ { ++ for (unsigned i = 0; i < max_split; i++) ++ newrhs1[i] = rhs1; ++ } ++ else if (!rewrite_expr (rhs1, newrhs1)) ++ internal_error ("Rewrite failed for realloc"); ++ } ++ ++ /* Go through each new lhs. */ ++ for (unsigned i = 0; i < max_split && decl->newdecl[i]; i++) ++ { ++ tree newsize = TYPE_SIZE_UNIT (type->type); ++ gimple *g; ++ /* Every allocation except for calloc needs ++ the size multiplied out. */ ++ if (!gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) ++ newsize = gimplify_build2 (gsi, MULT_EXPR, sizetype, num, newsize); ++ ++ if (gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA)) ++ g = gimple_build_call (gimple_call_fndecl (stmt), ++ 1, newsize); ++ else if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) ++ g = gimple_build_call (gimple_call_fndecl (stmt), ++ 2, num, newsize); ++ else if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)) ++ g = gimple_build_call (gimple_call_fndecl (stmt), ++ 2, newrhs1[i], newsize); ++ else ++ gcc_assert (false); ++ gimple_call_set_lhs (g, decl->newdecl[i]); ++ gsi_insert_before (gsi, g, GSI_SAME_STMT); ++ } ++ return true; ++ } ++ ++ /* The function call free needs to be handled special. */ ++ if (gimple_call_builtin_p (stmt, BUILT_IN_FREE)) ++ { ++ tree expr = gimple_call_arg (stmt, 0); ++ tree newexpr[max_split]; ++ if (!rewrite_expr (expr, newexpr)) ++ return false; ++ ++ if (newexpr[1] == NULL) ++ { ++ gimple_call_set_arg (stmt, 0, newexpr[0]); ++ update_stmt (stmt); ++ return false; ++ } ++ ++ for (unsigned i = 0; i < max_split && newexpr[i]; i++) ++ { ++ gimple *g = gimple_build_call (gimple_call_fndecl (stmt), ++ 1, newexpr[i]); ++ gsi_insert_before (gsi, g, GSI_SAME_STMT); ++ } ++ return true; ++ } ++ ++ /* Otherwise, look up the function to see if we have cloned it ++ and rewrite the arguments. */ ++ tree fndecl = gimple_call_fndecl (stmt); ++ ++ /* Indirect calls are already marked as escaping so ignore. */ ++ if (!fndecl) ++ return false; ++ ++ cgraph_node *node = cgraph_node::get (fndecl); ++ gcc_assert (node); ++ srfunction *f = find_function (node); ++ ++ /* Did not find the function or had not cloned it return saying don't ++ change the function call. */ ++ if (!f || !f->newf) ++ return false; ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Changing arguments for function call :\n"); ++ print_gimple_expr (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ ++ /* Move over to the new function. */ ++ f = f->newf; ++ ++ tree chain = gimple_call_chain (stmt); ++ unsigned nargs = gimple_call_num_args (stmt); ++ auto_vec vargs (nargs); ++ ++ if (chain) ++ { ++ tree newchains[max_split]; ++ if (rewrite_expr (chain, newchains)) ++ { ++ /* Chain decl's type cannot be split and but it can change. */ ++ gcc_assert (newchains[1] == NULL); ++ chain = newchains[0]; ++ } ++ } ++ ++ for (unsigned i = 0; i < nargs; i++) ++ vargs.quick_push (gimple_call_arg (stmt, i)); ++ ++ int extraargs = 0; ++ ++ for (unsigned i = 0; i < f->args.length (); i++) ++ { ++ srdecl *d = f->args[i]; ++ if (d->argumentnum == -2) ++ continue; ++ gcc_assert (d->argumentnum != -1); ++ tree arg = vargs[d->argumentnum + extraargs]; ++ tree newargs[max_split]; ++ if (!rewrite_expr (arg, newargs)) ++ continue; ++ ++ /* If this ARG has a replacement handle the replacement. */ ++ for (unsigned j = 0; j < max_split && d->newdecl[j]; j++) ++ { ++ gcc_assert (newargs[j]); ++ /* If this is the first replacement of the arugment, ++ then just replace it. */ ++ if (j == 0) ++ vargs[d->argumentnum + extraargs] = newargs[j]; ++ else ++ { ++ /* More than one replacement, ++ we need to insert into the array. */ ++ extraargs++; ++ vargs.safe_insert (d->argumentnum + extraargs, newargs[j]); ++ } ++ } ++ } ++ ++ gcall *new_stmt; ++ ++ new_stmt = gimple_build_call_vec (f->node->decl, vargs); ++ ++ if (gimple_call_lhs (stmt)) ++ gimple_call_set_lhs (new_stmt, gimple_call_lhs (stmt)); ++ ++ gimple_set_vuse (new_stmt, gimple_vuse (stmt)); ++ gimple_set_vdef (new_stmt, gimple_vdef (stmt)); ++ ++ if (gimple_has_location (stmt)) ++ gimple_set_location (new_stmt, gimple_location (stmt)); ++ gimple_call_copy_flags (new_stmt, stmt); ++ gimple_call_set_chain (new_stmt, chain); ++ ++ gimple_set_modified (new_stmt, true); ++ ++ if (gimple_vdef (new_stmt) ++ && TREE_CODE (gimple_vdef (new_stmt)) == SSA_NAME) ++ SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt; ++ ++ gsi_replace (gsi, new_stmt, false); ++ ++ /* We need to defer cleaning EH info on the new statement to ++ fixup-cfg. We may not have dominator information at this point ++ and thus would end up with unreachable blocks and have no way ++ to communicate that we need to run CFG cleanup then. */ ++ int lp_nr = lookup_stmt_eh_lp (stmt); ++ if (lp_nr != 0) ++ { ++ remove_stmt_from_eh_lp (stmt); ++ add_stmt_to_eh_lp (new_stmt, lp_nr); ++ } ++ ++ return false; ++} ++ ++/* Rewrite the conditional statement STMT. Return TRUE if the ++ old statement is to be removed. */ ++ ++bool ++ipa_struct_reorg::rewrite_cond (gcond *stmt, gimple_stmt_iterator *gsi) ++{ ++ tree_code rhs_code = gimple_cond_code (stmt); ++ ++ /* Handle only equals or not equals conditionals. */ ++ if (rhs_code != EQ_EXPR ++ && rhs_code != NE_EXPR) ++ return false; ++ tree rhs1 = gimple_cond_lhs (stmt); ++ tree rhs2 = gimple_cond_rhs (stmt); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "COND: Rewriting\n"); ++ print_gimple_stmt (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ print_generic_expr (dump_file, rhs1); ++ fprintf (dump_file, "\n"); ++ print_generic_expr (dump_file, rhs2); ++ fprintf (dump_file, "\n"); ++ } ++ ++ tree newrhs1[max_split]; ++ tree newrhs2[max_split]; ++ tree_code code = rhs_code == EQ_EXPR ? BIT_AND_EXPR : BIT_IOR_EXPR; ++ if (!rewrite_lhs_rhs (rhs1, rhs2, newrhs1, newrhs2)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\nDid nothing to statement.\n"); ++ return false; ++ } ++ ++ tree newexpr = NULL_TREE; ++ for (unsigned i = 0; i < max_split && newrhs1[i]; i++) ++ { ++ tree expr = gimplify_build2 (gsi, rhs_code, boolean_type_node, ++ newrhs1[i], newrhs2[i]); ++ if (!newexpr) ++ newexpr = expr; ++ else ++ newexpr = gimplify_build2 (gsi, code, boolean_type_node, ++ newexpr, expr); ++ } ++ ++ if (newexpr) ++ { ++ gimple_cond_set_lhs (stmt, newexpr); ++ gimple_cond_set_rhs (stmt, boolean_true_node); ++ update_stmt (stmt); ++ } ++ return false; ++} ++ ++/* Rewrite debug statments if possible. Return TRUE if the statement ++ should be removed. */ ++ ++bool ++ipa_struct_reorg::rewrite_debug (gimple *stmt, gimple_stmt_iterator *) ++{ ++ bool remove = false; ++ if (gimple_debug_bind_p (stmt)) ++ { ++ tree var = gimple_debug_bind_get_var (stmt); ++ tree newvar[max_split]; ++ if (rewrite_expr (var, newvar, true)) ++ remove = true; ++ if (gimple_debug_bind_has_value_p (stmt)) ++ { ++ var = gimple_debug_bind_get_value (stmt); ++ if (TREE_CODE (var) == POINTER_PLUS_EXPR) ++ var = TREE_OPERAND (var, 0); ++ if (rewrite_expr (var, newvar, true)) ++ remove = true; ++ } ++ } ++ else if (gimple_debug_source_bind_p (stmt)) ++ { ++ tree var = gimple_debug_source_bind_get_var (stmt); ++ tree newvar[max_split]; ++ if (rewrite_expr (var, newvar, true)) ++ remove = true; ++ var = gimple_debug_source_bind_get_value (stmt); ++ if (TREE_CODE (var) == POINTER_PLUS_EXPR) ++ var = TREE_OPERAND (var, 0); ++ if (rewrite_expr (var, newvar, true)) ++ remove = true; ++ } ++ ++ return remove; ++} ++ ++/* Rewrite PHI nodes, return true if the PHI was replaced. */ ++ ++bool ++ipa_struct_reorg::rewrite_phi (gphi *phi) ++{ ++ tree newlhs[max_split]; ++ gphi *newphi[max_split]; ++ tree result = gimple_phi_result (phi); ++ gphi_iterator gsi; ++ ++ memset (newphi, 0, sizeof (newphi)); ++ ++ if (!rewrite_expr (result, newlhs)) ++ return false; ++ ++ if (newlhs[0] == NULL) ++ return false; ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nrewriting PHI:"); ++ print_gimple_stmt (dump_file, phi, 0); ++ } ++ ++ for (unsigned i = 0; i < max_split && newlhs[i]; i++) ++ newphi[i] = create_phi_node (newlhs[i], gimple_bb (phi)); ++ ++ for (unsigned i = 0; i < gimple_phi_num_args (phi); i++) ++ { ++ tree newrhs[max_split]; ++ phi_arg_d rhs = *gimple_phi_arg (phi, i); ++ rewrite_expr (rhs.def, newrhs); ++ for (unsigned j = 0; j < max_split && newlhs[j]; j++) ++ { ++ SET_PHI_ARG_DEF (newphi[j], i, newrhs[j]); ++ gimple_phi_arg_set_location (newphi[j], i, rhs.locus); ++ update_stmt (newphi[j]); ++ } ++ } ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\ninto\n:"); ++ for (unsigned i = 0; i < max_split && newlhs[i]; i++) ++ { ++ print_gimple_stmt (dump_file, newphi[i], 0); ++ fprintf (dump_file, "\n"); ++ } ++ } ++ ++ gsi = gsi_for_phi (phi); ++ remove_phi_node (&gsi, false); ++ ++ return true; ++} ++ ++/* Rewrite gimple statement STMT, return true if the STATEMENT ++ is to be removed. */ ++ ++bool ++ipa_struct_reorg::rewrite_stmt (gimple *stmt, gimple_stmt_iterator *gsi) ++{ ++ switch (gimple_code (stmt)) ++ { ++ case GIMPLE_ASSIGN: ++ return rewrite_assign (as_a (stmt), gsi); ++ case GIMPLE_CALL: ++ return rewrite_call (as_a (stmt), gsi); ++ case GIMPLE_COND: ++ return rewrite_cond (as_a (stmt), gsi); ++ break; ++ case GIMPLE_GOTO: ++ case GIMPLE_SWITCH: ++ break; ++ case GIMPLE_DEBUG: ++ case GIMPLE_ASM: ++ break; ++ default: ++ break; ++ } ++ return false; ++} ++ ++/* Does the function F uses any decl which has changed. */ ++ ++bool ++ipa_struct_reorg::has_rewritten_type (srfunction *f) ++{ ++ for (unsigned i = 0; i < f->decls.length (); i++) ++ { ++ srdecl *d = f->decls[i]; ++ if (d->newdecl[0] != d->decl) ++ return true; ++ } ++ ++ for (unsigned i = 0; i < f->globals.length (); i++) ++ { ++ srdecl *d = f->globals[i]; ++ if (d->newdecl[0] != d->decl) ++ return true; ++ } ++ return false; ++} ++ ++/* Rewrite the functions if needed, return ++ the TODOs requested. */ ++ ++unsigned ++ipa_struct_reorg::rewrite_functions (void) ++{ ++ unsigned retval = 0; ++ ++ restore_field_type (); ++ /* Create new types, if we did not create any new types, ++ then don't rewrite any accesses. */ ++ if (!create_new_types ()) ++ return 0; ++ ++ if (functions.length ()) ++ { ++ retval = TODO_remove_functions; ++ create_new_functions (); ++ } ++ ++ create_new_decls (); ++ ++ for (unsigned i = 0; i < functions.length (); i++) ++ { ++ srfunction *f = functions[i]; ++ if (f->newnode) ++ continue; ++ ++ /* Function uses no rewriten types so don't cause a rewrite. */ ++ if (!has_rewritten_type (f)) ++ continue; ++ ++ cgraph_node *node = f->node; ++ basic_block bb; ++ ++ push_cfun (DECL_STRUCT_FUNCTION (node->decl)); ++ current_function = f; ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nBefore rewrite:\n"); ++ dump_function_to_file (current_function_decl, dump_file, ++ dump_flags | TDF_VOPS); ++ } ++ FOR_EACH_BB_FN (bb, cfun) ++ { ++ for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);) ++ { ++ if (rewrite_phi (si.phi ())) ++ si = gsi_start_phis (bb); ++ else ++ gsi_next (&si); ++ } ++ ++ for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);) ++ { ++ gimple *stmt = gsi_stmt (si); ++ if (rewrite_stmt (stmt, &si)) ++ gsi_remove (&si, true); ++ else ++ gsi_next (&si); ++ } ++ } ++ ++ /* Debug statements need to happen after all other statements ++ have changed. */ ++ FOR_EACH_BB_FN (bb, cfun) ++ { ++ for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);) ++ { ++ gimple *stmt = gsi_stmt (si); ++ if (gimple_code (stmt) == GIMPLE_DEBUG ++ && rewrite_debug (stmt, &si)) ++ gsi_remove (&si, true); ++ else ++ gsi_next (&si); ++ } ++ } ++ ++ /* Release the old SSA_NAMES for old arguments. */ ++ if (f->old) ++ { ++ for (unsigned i = 0; i < f->args.length (); i++) ++ { ++ srdecl *d = f->args[i]; ++ if (d->newdecl[0] != d->decl) ++ { ++ tree ssa_name = ssa_default_def (cfun, d->decl); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Found "); ++ print_generic_expr (dump_file, ssa_name); ++ fprintf (dump_file, " to be released.\n"); ++ } ++ release_ssa_name (ssa_name); ++ } ++ } ++ } ++ ++ update_ssa (TODO_update_ssa_only_virtuals); ++ ++ if (flag_tree_pta) ++ compute_may_aliases (); ++ ++ remove_unused_locals (); ++ ++ cgraph_edge::rebuild_edges (); ++ ++ free_dominance_info (CDI_DOMINATORS); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nAfter rewrite:\n"); ++ dump_function_to_file (current_function_decl, dump_file, ++ dump_flags | TDF_VOPS); ++ } ++ ++ pop_cfun (); ++ current_function = NULL; ++ } ++ ++ return retval | TODO_verify_all; ++} ++ ++unsigned int ++ipa_struct_reorg::execute (void) ++{ ++ /* FIXME: If there is a top-level inline-asm, ++ the pass immediately returns. */ ++ if (symtab->first_asm_symbol ()) ++ return 0; ++ record_accesses (); ++ prune_escaped_types (); ++ analyze_types (); ++ ++ return rewrite_functions (); ++} ++ ++const pass_data pass_data_ipa_struct_reorg = ++{ ++ SIMPLE_IPA_PASS, // type ++ "struct_reorg", // name ++ OPTGROUP_NONE, // optinfo_flags ++ TV_IPA_STRUCT_REORG, // tv_id ++ 0, // properties_required ++ 0, // properties_provided ++ 0, // properties_destroyed ++ 0, // todo_flags_start ++ 0, // todo_flags_finish ++}; ++ ++class pass_ipa_struct_reorg : public simple_ipa_opt_pass ++{ ++public: ++ pass_ipa_struct_reorg (gcc::context *ctxt) ++ : simple_ipa_opt_pass (pass_data_ipa_struct_reorg, ctxt) ++ {} ++ ++ /* opt_pass methods: */ ++ virtual bool gate (function *); ++ virtual unsigned int execute (function *) ++ { ++ return ipa_struct_reorg ().execute (); ++ } ++ ++}; // class pass_ipa_struct_reorg ++ ++bool ++pass_ipa_struct_reorg::gate (function *) ++{ ++ return (optimize ++ && flag_ipa_struct_reorg ++ /* Don't bother doing anything if the program has errors. */ ++ && !seen_error ()); ++} ++ ++} // anon namespace ++ ++ ++simple_ipa_opt_pass * ++make_pass_ipa_struct_reorg (gcc::context *ctxt) ++{ ++ return new pass_ipa_struct_reorg (ctxt); ++} +\ No newline at end of file +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.h b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +new file mode 100644 +index 000000000..a58794070 +--- /dev/null ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +@@ -0,0 +1,235 @@ ++/* Struct-reorg optimizations. ++ Copyright (C) 2016-2023 Free Software Foundation, Inc. ++ Contributed by Andrew Pinski ++ ++This file is part of GCC. ++ ++GCC is free software; you can redistribute it and/or modify it under ++the terms of the GNU General Public License as published by the Free ++Software Foundation; either version 3, or (at your option) any later ++version. ++ ++GCC is distributed in the hope that it will be useful, but WITHOUT ANY ++WARRANTY; without even the implied warranty of MERCHANTABILITY or ++FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License ++for more details. ++ ++You should have received a copy of the GNU General Public License ++along with GCC; see the file COPYING3. If not see ++. */ ++ ++#ifndef IPA_STRUCT_REORG_H ++#define IPA_STRUCT_REORG_H ++ ++namespace struct_reorg { ++ ++const int max_split = 2; ++ ++template ++struct auto_vec_del : auto_vec ++{ ++ ~auto_vec_del (); ++}; ++ ++template ++auto_vec_del::~auto_vec_del (void) ++{ ++ unsigned i; ++ T *t; ++ FOR_EACH_VEC_ELT (*this, i, t) ++ { ++ delete t; ++ } ++} ++ ++enum escape_type ++{ ++ does_not_escape, ++#define DEF_ESCAPE(ENUM, TEXT) ENUM, ++#include "escapes.def" ++ escape_max_escape ++}; ++ ++const char *escape_type_string[escape_max_escape - 1] = ++{ ++#define DEF_ESCAPE(ENUM, TEXT) TEXT, ++#include "escapes.def" ++}; ++ ++struct srfield; ++struct srtype; ++struct sraccess; ++struct srdecl; ++struct srfunction; ++ ++struct srfunction ++{ ++ cgraph_node *node; ++ auto_vec args; ++ auto_vec globals; ++ auto_vec_del decls; ++ srdecl *record_decl (srtype *, tree, int arg); ++ ++ srfunction *old; ++ cgraph_node *newnode; ++ srfunction *newf; ++ ++ // Constructors ++ srfunction (cgraph_node *n); ++ ++ // Methods ++ void add_arg (srdecl *arg); ++ void dump (FILE *file); ++ void simple_dump (FILE *file); ++ ++ bool check_args (void); ++ void create_new_decls (void); ++ srdecl *find_decl (tree); ++}; ++ ++struct srglobal : private srfunction ++{ ++ srglobal () ++ : srfunction (NULL) ++ {} ++ ++ using srfunction::dump; ++ using srfunction::create_new_decls; ++ using srfunction::find_decl; ++ using srfunction::record_decl; ++ using srfunction::decls; ++}; ++ ++struct srtype ++{ ++ tree type; ++ auto_vec_del fields; ++ ++ // array of fields that use this type. ++ auto_vec field_sites; ++ ++ // array of functions which use directly the type ++ auto_vec functions; ++ ++ auto_vec_del accesses; ++ bool chain_type; ++ ++private: ++ escape_type escapes; ++ ++public: ++ tree newtype[max_split]; ++ bool visited; ++ ++ // Constructors ++ srtype (tree type); ++ ++ // Methods ++ void dump (FILE *file); ++ void simple_dump (FILE *file); ++ void add_function (srfunction *); ++ void add_access (sraccess *a) ++ { ++ accesses.safe_push (a); ++ } ++ void add_field_site (srfield *); ++ ++ srfield *find_field (unsigned HOST_WIDE_INT offset); ++ ++ bool create_new_type (void); ++ void analyze (void); ++ void mark_escape (escape_type, gimple *stmt); ++ bool has_escaped (void) ++ { ++ return escapes != does_not_escape; ++ } ++ const char *escape_reason (void) ++ { ++ if (!has_escaped ()) ++ return NULL; ++ return escape_type_string[escapes - 1]; ++ } ++ bool escaped_rescusive (void) ++ { ++ return escapes == escape_rescusive_type; ++ } ++ bool has_new_type (void) ++ { ++ return newtype[0] && newtype[0] != type; ++ } ++}; ++ ++struct srfield ++{ ++ unsigned HOST_WIDE_INT offset; ++ tree fieldtype; ++ tree fielddecl; ++ srtype *base; ++ srtype *type; ++ ++ unsigned clusternum; ++ ++ tree newfield[max_split]; ++ ++ // Constructors ++ srfield (tree field, srtype *base); ++ ++ // Methods ++ void dump (FILE *file); ++ void simple_dump (FILE *file); ++ ++ void create_new_fields (tree newtype[max_split], ++ tree newfields[max_split], ++ tree newlast[max_split]); ++}; ++ ++struct sraccess ++{ ++ gimple *stmt; ++ cgraph_node *node; ++ ++ srtype *type; ++ // NULL field means the whole type is accessed ++ srfield *field; ++ ++ // Constructors ++ sraccess (gimple *s, cgraph_node *n, srtype *t, srfield *f = NULL) ++ : stmt (s), ++ node (n), ++ type (t), ++ field (f) ++ {} ++ ++ // Methods ++ void dump (FILE *file); ++}; ++ ++struct srdecl ++{ ++ srtype *type; ++ tree decl; ++ tree func; ++ /* -1 : not an argument ++ -2 : static chain ++ */ ++ int argumentnum; ++ ++ bool visited; ++ ++ tree newdecl[max_split]; ++ ++ // Constructors ++ srdecl (srtype *type, tree decl, int argumentnum = -1); ++ ++ // Methods ++ void dump (FILE *file); ++ bool has_new_decl (void) ++ { ++ return newdecl[0] && newdecl[0] != decl; ++ } ++}; ++ ++ ++} // namespace struct_reorg ++ ++#endif +diff --git a/gcc/params.opt b/gcc/params.opt +index e0ff9e210..1ddf1343f 100644 +--- a/gcc/params.opt ++++ b/gcc/params.opt +@@ -865,6 +865,10 @@ Enum(parloops_schedule_type) String(runtime) Value(PARLOOPS_SCHEDULE_RUNTIME) + Common Joined UInteger Var(param_partial_inlining_entry_probability) Init(70) Optimization IntegerRange(0, 100) Param + Maximum probability of the entry BB of split region (in percent relative to entry BB of the function) to make partial inlining happen. + ++-param=struct-reorg-cold-struct-ratio= ++Common Joined UInteger Var(param_struct_reorg_cold_struct_ratio) Init(10) IntegerRange(0, 100) Param Optimization ++The threshold ratio between current and hottest structure counts. ++ + -param=predictable-branch-outcome= + Common Joined UInteger Var(param_predictable_branch_outcome) Init(2) IntegerRange(0, 50) Param Optimization + Maximal estimated outcome of branch considered predictable. +diff --git a/gcc/passes.def b/gcc/passes.def +index 375d3d62d..1c1658c4a 100644 +--- a/gcc/passes.def ++++ b/gcc/passes.def +@@ -177,6 +177,8 @@ along with GCC; see the file COPYING3. If not see + compiled unit. */ + INSERT_PASSES_AFTER (all_late_ipa_passes) + NEXT_PASS (pass_ipa_pta); ++ /* FIXME: this should be a normal IP pass. */ ++ NEXT_PASS (pass_ipa_struct_reorg); + NEXT_PASS (pass_omp_simd_clone); + TERMINATE_PASS_LIST (all_late_ipa_passes) + +diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +new file mode 100644 +index 000000000..43913104e +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +@@ -0,0 +1,35 @@ ++# Copyright (C) 1997-2023 Free Software Foundation, Inc. ++ ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3 of the License, or ++# (at your option) any later version. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++load_lib gcc-dg.exp ++load_lib torture-options.exp ++ ++# Initialize `dg'. ++dg-init ++torture-init ++ ++set STRUCT_REORG_TORTURE_OPTIONS [list \ ++ { -O3 } \ ++ { -Ofast } ] ++ ++set-torture-options $STRUCT_REORG_TORTURE_OPTIONS {{}} ++ ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.c]] \ ++ "" "-fipa-struct-reorg -fdump-ipa-all -flto-partition=one -fwhole-program" ++ ++# All done. ++torture-finish ++dg-finish +diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-1.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-1.c +new file mode 100644 +index 000000000..6565fe8dd +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-1.c +@@ -0,0 +1,24 @@ ++// { dg-do compile } ++// { dg-options "-O3 -flto-partition=one -fipa-struct-reorg -fdump-ipa-all" } ++ ++struct a ++{ ++ int t, t1; ++}; ++ ++static struct a *b; ++ ++void *xmalloc(int); ++ ++ ++void f(void) ++{ ++ b = xmalloc (sizeof(*b)); ++} ++ ++int g(void) ++{ ++ return b->t; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform." "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-2.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-2.c +new file mode 100644 +index 000000000..44babd35b +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-2.c +@@ -0,0 +1,29 @@ ++// { dg-do run } ++ ++#include ++ ++struct a ++{ ++ int t; ++ int t1; ++}; ++ ++__attribute__((noinline)) int f(int i, int j) ++{ ++ struct a *t; ++ struct a t1 = {i, j}; ++ t = &t1; ++ auto int g(void) __attribute__((noinline)); ++ int g(void) ++ { ++ return t->t + t->t1; ++ } ++ return g(); ++} ++ ++int main() ++{ ++ assert (f(1, 2) == 3); ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-3.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-3.c +new file mode 100644 +index 000000000..5864ad46f +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-3.c +@@ -0,0 +1,23 @@ ++// { dg-do compile } ++// { dg-options "-O3 -flto-partition=one -fipa-struct-reorg -fdump-ipa-all" } ++ ++#include ++typedef struct { ++ long laststart_offset; ++ unsigned regnum; ++} compile_stack_elt_t; ++typedef struct { ++ compile_stack_elt_t *stack; ++ unsigned size; ++} compile_stack_type; ++void f (const char *p, const char *pend, int c) ++{ ++ compile_stack_type compile_stack; ++ while (p != pend) ++ if (c) ++ compile_stack.stack = realloc (compile_stack.stack, ++ (compile_stack.size << 1) ++ * sizeof (compile_stack_elt_t)); ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-4.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-4.c +new file mode 100644 +index 000000000..e5a8a6c84 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-4.c +@@ -0,0 +1,59 @@ ++/* { dg-do run } */ ++ ++extern void abort (void); ++ ++struct S ++{ ++ int b; ++ int *c; ++}; ++static int d, e; ++ ++static struct S s; ++ ++static int * ++__attribute__((noinline, const)) ++foo (void) ++{ ++ return &s.b; ++} ++ ++int * ++__attribute__((noinline)) ++bar (int **f) ++{ ++ s.c = &d; ++ *f = &e; ++ /* As nothing ever takes the address of any int * field in struct S, ++ the write to *f can't alias with the s.c field. */ ++ return s.c; ++} ++ ++int ++__attribute__((noinline)) ++baz (int *x) ++{ ++ s.b = 1; ++ *x = 4; ++ /* Function foo takes address of an int field in struct S, ++ so *x can alias with the s.b field (and it does in this testcase). */ ++ return s.b; ++} ++ ++int ++__attribute__((noinline)) ++t (void) ++{ ++ int *f = (int *) 0; ++ return 10 * (bar (&f) != &d) + baz (foo ()); ++} ++ ++int ++main (void) ++{ ++ if (t () != 4) ++ abort (); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform." "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/w_prof_global_array.c b/gcc/testsuite/gcc.dg/struct/w_prof_global_array.c +new file mode 100644 +index 000000000..733413a94 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/w_prof_global_array.c +@@ -0,0 +1,29 @@ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#define N 1000 ++str_t A[N]; ++ ++int ++main () ++{ ++ int i; ++ ++ for (i = 0; i < N; i++) ++ { ++ A[i].a = 0; ++ } ++ ++ for (i = 0; i < N; i++) ++ if (A[i].a != 0) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" { xfail *-*-* } } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/w_prof_global_var.c b/gcc/testsuite/gcc.dg/struct/w_prof_global_var.c +new file mode 100644 +index 000000000..0ef686e74 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/w_prof_global_var.c +@@ -0,0 +1,42 @@ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 8000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/8) ++#endif ++#else ++#define N 1000 ++#endif ++ ++str_t *p; ++ ++int ++main () ++{ ++ int i, sum; ++ ++ p = malloc (N * sizeof (str_t)); ++ if (p == NULL) ++ return 0; ++ for (i = 0; i < N; i++) ++ p[i].b = i; ++ ++ for (i = 0; i < N; i++) ++ p[i].a = p[i].b + 1; ++ ++ for (i = 0; i < N; i++) ++ if (p[i].a != p[i].b + 1) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/w_prof_local_array.c b/gcc/testsuite/gcc.dg/struct/w_prof_local_array.c +new file mode 100644 +index 000000000..23a53be53 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/w_prof_local_array.c +@@ -0,0 +1,37 @@ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 8000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/8) ++#endif ++#else ++#define N 1000 ++#endif ++ ++int ++main () ++{ ++ int i; ++ str_t A[N]; ++ ++ for (i = 0; i < N; i++) ++ { ++ A[i].a = 0; ++ } ++ ++ for (i = 0; i < N; i++) ++ if (A[i].a != 0) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" { xfail *-*-* } } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/w_prof_local_var.c b/gcc/testsuite/gcc.dg/struct/w_prof_local_var.c +new file mode 100644 +index 000000000..0cbb172f2 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/w_prof_local_var.c +@@ -0,0 +1,40 @@ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 8000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/8) ++#endif ++#else ++#define N 1000 ++#endif ++ ++int ++main () ++{ ++ int i, sum; ++ ++ str_t * p = malloc (N * sizeof (str_t)); ++ if (p == NULL) ++ return 0; ++ for (i = 0; i < N; i++) ++ p[i].b = i; ++ ++ for (i = 0; i < N; i++) ++ p[i].a = p[i].b + 1; ++ ++ for (i = 0; i < N; i++) ++ if (p[i].a != p[i].b + 1) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/w_prof_single_str_global.c b/gcc/testsuite/gcc.dg/struct/w_prof_single_str_global.c +new file mode 100644 +index 000000000..f900b1349 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/w_prof_single_str_global.c +@@ -0,0 +1,31 @@ ++#include ++typedef struct ++{ ++ int a; ++ int b; ++}str_t; ++ ++#define N 3 ++ ++str_t str; ++ ++int ++main () ++{ ++ int i; ++ int res = 1<<(1< ++ ++typedef struct ++{ ++ int a; ++ float b; ++}str_t1; ++ ++typedef struct ++{ ++ int c; ++ float d; ++}str_t2; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 16000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/16) ++#endif ++#else ++#define N 1000 ++#endif ++ ++str_t1 *p1; ++str_t2 *p2; ++int num; ++ ++void ++foo (void) ++{ ++ int i; ++ ++ for (i=0; i < num; i++) ++ p2[i].c = 2; ++} ++ ++int ++main () ++{ ++ int i, r; ++ ++ r = rand (); ++ num = r > N ? N : r; ++ p1 = malloc (num * sizeof (str_t1)); ++ p2 = malloc (num * sizeof (str_t2)); ++ ++ if (p1 == NULL || p2 == NULL) ++ return 0; ++ ++ for (i = 0; i < num; i++) ++ p1[i].a = 1; ++ ++ foo (); ++ ++ for (i = 0; i < num; i++) ++ if (p1[i].a != 1 || p2[i].c != 2) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/w_ratio_cold_str.c b/gcc/testsuite/gcc.dg/struct/w_ratio_cold_str.c +new file mode 100644 +index 000000000..dcc545964 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/w_ratio_cold_str.c +@@ -0,0 +1,43 @@ ++#include ++typedef struct ++{ ++ int a; ++ int b; ++}str_t1; ++ ++typedef struct ++{ ++ float a; ++ float b; ++}str_t2; ++ ++#define N1 1000 ++#define N2 100 ++str_t1 A1[N1]; ++str_t2 A2[N2]; ++ ++int ++main () ++{ ++ int i; ++ ++ for (i = 0; i < N1; i++) ++ A1[i].a = 0; ++ ++ for (i = 0; i < N2; i++) ++ A2[i].a = 0; ++ ++ for (i = 0; i < N1; i++) ++ if (A1[i].a != 0) ++ abort (); ++ ++ for (i = 0; i < N2; i++) ++ if (A2[i].a != 0) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* Arrays are not handled. */ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" { xfail *-*-* } } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_array_field.c b/gcc/testsuite/gcc.dg/struct/wo_prof_array_field.c +new file mode 100644 +index 000000000..6d6375fc1 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_array_field.c +@@ -0,0 +1,26 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct basic ++{ ++ int a; ++ int b[10]; ++} type_struct; ++ ++type_struct *str1; ++ ++int main() ++{ ++ int i; ++ ++ str1 = malloc (10 * sizeof (type_struct)); ++ ++ for (i=0; i<=9; i++) ++ str1[i].a = str1[i].b[0]; ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" { xfail *-*-* } } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_array_through_pointer.c b/gcc/testsuite/gcc.dg/struct/wo_prof_array_through_pointer.c +new file mode 100644 +index 000000000..9d3213408 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_array_through_pointer.c +@@ -0,0 +1,38 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 8000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/8) ++#endif ++#else ++#define N 1000 ++#endif ++ ++int ++main () ++{ ++ int i; ++ str_t A[N]; ++ str_t *p = A; ++ ++ for (i = 0; i < N; i++) ++ p[i].a = 0; ++ ++ for (i = 0; i < N; i++) ++ if (p[i].a != 0) ++ abort (); ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" { xfail *-*-* } } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_double_malloc.c b/gcc/testsuite/gcc.dg/struct/wo_prof_double_malloc.c +new file mode 100644 +index 000000000..d79992a53 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_double_malloc.c +@@ -0,0 +1,29 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++ ++typedef struct test_struct ++{ ++ int a; ++ int b; ++} type_struct; ++ ++typedef type_struct **struct_pointer2; ++ ++struct_pointer2 str1; ++ ++int main() ++{ ++ int i, j; ++ ++ str1 = malloc (2 * sizeof (type_struct *)); ++ ++ for (i = 0; i <= 1; i++) ++ str1[i] = malloc (2 * sizeof (type_struct)); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" { xfail *-*-* } } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_empty_str.c b/gcc/testsuite/gcc.dg/struct/wo_prof_empty_str.c +new file mode 100644 +index 000000000..ee9b0d765 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_empty_str.c +@@ -0,0 +1,44 @@ ++/* { dg-do run } */ ++ ++#include ++ ++struct S { int a; struct V *b; }; ++typedef struct { int c; } T; ++typedef struct { int d; int e; } U; ++ ++void * ++fn (void *x) ++{ ++ return x; ++} ++ ++int ++foo (struct S *s) ++{ ++ T x; ++ ++ T y = *(T *)fn (&x); ++ return y.c; ++} ++ ++int ++bar (struct S *s) ++{ ++ U x; ++ ++ U y = *(U *)fn (&x); ++ return y.d + s->a; ++} ++ ++int ++main () ++{ ++ struct S s; ++ ++ foo(&s) + bar (&s); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "No structures to transform" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_escape_arg_to_local.c b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_arg_to_local.c +new file mode 100644 +index 000000000..9ebb2b4cc +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_arg_to_local.c +@@ -0,0 +1,44 @@ ++/* { dg-do run } */ ++ ++#include ++struct str ++{ ++ int a; ++ float b; ++}; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 8000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/8) ++#endif ++#else ++#define N 1000 ++#endif ++ ++int ++foo (struct str * p_str) ++{ ++ static int sum = 0; ++ ++ sum = sum + p_str->a; ++ return sum; ++} ++ ++int ++main () ++{ ++ int i, sum; ++ struct str * p = malloc (N * sizeof (struct str)); ++ if (p == NULL) ++ return 0; ++ for (i = 0; i < N; i++) ++ sum = foo (p+i); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ ++ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_escape_return-1.c b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_return-1.c +new file mode 100644 +index 000000000..d0dce8b53 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_return-1.c +@@ -0,0 +1,33 @@ ++/* { dg-do run } */ ++/* { dg-additional-options "-fno-ipa-sra" } */ ++ ++#include ++ ++struct A { ++ int d; ++ int d1; ++}; ++ ++struct A a; ++ ++struct A *foo () __attribute__((noinline)); ++struct A *foo () ++{ ++ a.d = 5; ++ return &a; ++} ++ ++int ++main () ++{ ++ a.d = 0; ++ foo (); ++ ++ if (a.d != 5) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "has escaped. .Type escapes via a return" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_escape_return.c b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_return.c +new file mode 100644 +index 000000000..71167182d +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_return.c +@@ -0,0 +1,32 @@ ++/* { dg-do run } */ ++/* { dg-additional-options "-fno-ipa-sra" } */ ++ ++#include ++ ++struct A { ++ int d; ++}; ++ ++struct A a; ++ ++struct A foo () __attribute__((noinline)); ++struct A foo () ++{ ++ a.d = 5; ++ return a; ++} ++ ++int ++main () ++{ ++ a.d = 0; ++ foo (); ++ ++ if (a.d != 5) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "has escaped: \"Type escapes via a return" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_escape_str_init.c b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_str_init.c +new file mode 100644 +index 000000000..74fa11f39 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_str_init.c +@@ -0,0 +1,31 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ int b; ++}str_t; ++ ++#define N 2 ++ ++str_t A[2] = {{1,1},{2,2}}; ++ ++int ++main () ++{ ++ int i; ++ ++ for (i = 0; i < N; i++) ++ A[i].b = A[i].a; ++ ++ for (i = 0; i < N; i++) ++ if (A[i].b != A[i].a) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "No structures to transform." "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_array.c b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_array.c +new file mode 100644 +index 000000000..60d2466e1 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_array.c +@@ -0,0 +1,33 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#define N 1000 ++ ++typedef struct ++{ ++ str_t A[N]; ++ int c; ++}str_with_substr_t; ++ ++str_with_substr_t a; ++ ++int ++main () ++{ ++ int i; ++ ++ for (i = 0; i < N; i++) ++ a.A[i].b = 0; ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" { xfail *-*-* } } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_pointer.c b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_pointer.c +new file mode 100644 +index 000000000..baf617816 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_pointer.c +@@ -0,0 +1,48 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 16000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/16) ++#endif ++#else ++#define N 1000 ++#endif ++ ++typedef struct ++{ ++ str_t * sub_str; ++ int c; ++}str_with_substr_t; ++ ++int foo; ++ ++int ++main (void) ++{ ++ int i; ++ str_with_substr_t A[N]; ++ str_t a[N]; ++ ++ for (i=0; i < N; i++) ++ A[i].sub_str = &(a[i]); ++ ++ for (i=0; i < N; i++) ++ A[i].sub_str->a = 5; ++ ++ foo = A[56].sub_str->a; ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "has escaped...Type is used in an array" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_value.c b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_value.c +new file mode 100644 +index 000000000..33fce3b23 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_substr_value.c +@@ -0,0 +1,45 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 8000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/8) ++#endif ++#else ++#define N 1000 ++#endif ++ ++ ++typedef struct ++{ ++ str_t sub_str; ++ int c; ++}str_with_substr_t; ++ ++int ++main () ++{ ++ int i; ++ str_with_substr_t A[N]; ++ ++ for (i = 0; i < N; i++) ++ A[i].sub_str.a = 5; ++ ++ for (i = 0; i < N; i++) ++ if (A[i].sub_str.a != 5) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "has escaped...Type is used in an array" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_global_array.c b/gcc/testsuite/gcc.dg/struct/wo_prof_global_array.c +new file mode 100644 +index 000000000..1c5a3aa15 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_global_array.c +@@ -0,0 +1,32 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#define N 1000 ++str_t A[N]; ++ ++int ++main () ++{ ++ int i; ++ ++ for (i = 0; i < N; i++) ++ { ++ A[i].a = 0; ++ } ++ ++ for (i = 0; i < N; i++) ++ if (A[i].a != 0) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" { xfail *-*-* } } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_global_var.c b/gcc/testsuite/gcc.dg/struct/wo_prof_global_var.c +new file mode 100644 +index 000000000..a0d1467fe +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_global_var.c +@@ -0,0 +1,45 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 8000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/8) ++#endif ++#else ++#define N 1000 ++#endif ++ ++str_t *p; ++ ++int ++main () ++{ ++ int i, sum; ++ ++ p = malloc (N * sizeof (str_t)); ++ if (p == NULL) ++ return 0; ++ for (i = 0; i < N; i++) ++ p[i].b = i; ++ ++ for (i = 0; i < N; i++) ++ p[i].b = p[i].a + 1; ++ ++ for (i = 0; i < N; i++) ++ if (p[i].b != p[i].a + 1) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_local_array.c b/gcc/testsuite/gcc.dg/struct/wo_prof_local_array.c +new file mode 100644 +index 000000000..6c24e1c8b +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_local_array.c +@@ -0,0 +1,40 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 8000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/8) ++#endif ++#else ++#define N 1000 ++#endif ++ ++int ++main () ++{ ++ int i; ++ str_t A[N]; ++ ++ for (i = 0; i < N; i++) ++ { ++ A[i].a = 0; ++ } ++ ++ for (i = 0; i < N; i++) ++ if (A[i].a != 0) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" { xfail *-*-* } } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_local_var.c b/gcc/testsuite/gcc.dg/struct/wo_prof_local_var.c +new file mode 100644 +index 000000000..8f2f8143f +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_local_var.c +@@ -0,0 +1,43 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 8000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/8) ++#endif ++#else ++#define N 1000 ++#endif ++ ++int ++main () ++{ ++ int i, sum; ++ ++ str_t * p = malloc (N * sizeof (str_t)); ++ if (p == NULL) ++ return 0; ++ for (i = 0; i < N; i++) ++ p[i].b = i; ++ ++ for (i = 0; i < N; i++) ++ p[i].b = p[i].a + 1; ++ ++ for (i = 0; i < N; i++) ++ if (p[i].b != p[i].a + 1) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_malloc_size_var-1.c b/gcc/testsuite/gcc.dg/struct/wo_prof_malloc_size_var-1.c +new file mode 100644 +index 000000000..98bf01a6d +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_malloc_size_var-1.c +@@ -0,0 +1,47 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 8000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/8) ++#endif ++#else ++#define N 1000 ++#endif ++ ++int ++main () ++{ ++ long i, num; ++ ++ num = rand(); ++ num = num > N ? N : num; ++ str_t * p = malloc (num * sizeof (str_t)); ++ ++ if (p == 0) ++ return 0; ++ ++ for (i = 1; i <= num; i++) ++ p[i-1].b = i; ++ ++ for (i = 1; i <= num; i++) ++ p[i-1].a = p[i-1].b + 1; ++ ++ for (i = 0; i < num; i++) ++ if (p[i].a != p[i].b + 1) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_malloc_size_var.c b/gcc/testsuite/gcc.dg/struct/wo_prof_malloc_size_var.c +new file mode 100644 +index 000000000..66b0f967c +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_malloc_size_var.c +@@ -0,0 +1,47 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 8000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/8) ++#endif ++#else ++#define N 1000 ++#endif ++ ++int ++main () ++{ ++ int i, num; ++ ++ num = rand(); ++ num = num > N ? N : num; ++ str_t * p = malloc (num * sizeof (str_t)); ++ ++ if (p == 0) ++ return 0; ++ ++ for (i = 0; i < num; i++) ++ p[i].b = i; ++ ++ for (i = 0; i < num; i++) ++ p[i].a = p[i].b + 1; ++ ++ for (i = 0; i < num; i++) ++ if (p[i].a != p[i].b + 1) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_mult_field_peeling.c b/gcc/testsuite/gcc.dg/struct/wo_prof_mult_field_peeling.c +new file mode 100644 +index 000000000..d28bcfb02 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_mult_field_peeling.c +@@ -0,0 +1,42 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ float b; ++ int c; ++ float d; ++}str_t; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 1600 ++#define N 100 ++#else ++#define N (STACK_SIZE/16) ++#endif ++#else ++#define N 100 ++#endif ++ ++int ++main () ++{ ++ int i; ++ str_t *p = malloc (N * sizeof (str_t)); ++ if (p == NULL) ++ return 0; ++ for (i = 0; i < N; i++) ++ p[i].a = 5; ++ ++ for (i = 0; i < N; i++) ++ if (p[i].a != 5) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* Two more fields structure is not splitted. */ ++/* { dg-final { scan-ipa-dump "No structures to transform." "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_single_str_global.c b/gcc/testsuite/gcc.dg/struct/wo_prof_single_str_global.c +new file mode 100644 +index 000000000..37a6a43a8 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_single_str_global.c +@@ -0,0 +1,34 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++typedef struct ++{ ++ int a; ++ int b; ++}str_t; ++ ++#define N 3 ++ ++str_t str; ++ ++int ++main () ++{ ++ int i; ++ int res = 1<<(1< ++typedef struct ++{ ++ int a; ++ int b; ++}str_t; ++ ++#define N 3 ++ ++int ++main () ++{ ++ int i; ++ int res = 1<<(1< ++typedef struct ++{ ++ int a; ++ int *b; ++}str_t; ++ ++#define N 3 ++ ++str_t *p; ++ ++int ++main () ++{ ++ str_t str; ++ int i; ++ int res = 1 << (1 << N); ++ p = &str; ++ str.a = 2; ++ ++ p->b = &(p->a); ++ ++ for (i=0; i < N; i++) ++ p->a = *(p->b)*(*(p->b)); ++ ++ if (p->a != res) ++ abort (); ++ ++ /* POSIX ignores all but the 8 low-order bits, but other ++ environments may not. */ ++ return (p->a & 255); ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "has escaped...Type escapes a cast to a different" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_two_strs.c b/gcc/testsuite/gcc.dg/struct/wo_prof_two_strs.c +new file mode 100644 +index 000000000..cba92e995 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_two_strs.c +@@ -0,0 +1,67 @@ ++/* { dg-do compile } */ ++/* { dg-do run } */ ++ ++#include ++ ++typedef struct ++{ ++ int a; ++ float b; ++}str_t1; ++ ++typedef struct ++{ ++ int c; ++ float d; ++}str_t2; ++ ++#ifdef STACK_SIZE ++#if STACK_SIZE > 16000 ++#define N 1000 ++#else ++#define N (STACK_SIZE/16) ++#endif ++#else ++#define N 1000 ++#endif ++ ++str_t1 *p1; ++str_t2 *p2; ++int num; ++ ++void ++foo (void) ++{ ++ int i; ++ ++ for (i=0; i < num; i++) ++ p2[i].c = 2; ++} ++ ++int ++main () ++{ ++ int i, r; ++ ++ r = rand (); ++ num = r > N ? N : r; ++ p1 = malloc (num * sizeof (str_t1)); ++ p2 = malloc (num * sizeof (str_t2)); ++ ++ if (p1 == NULL || p2 == NULL) ++ return 0; ++ ++ for (i = 0; i < num; i++) ++ p1[i].a = 1; ++ ++ foo (); ++ ++ for (i = 0; i < num; i++) ++ if (p1[i].a != 1 || p2[i].c != 2) ++ abort (); ++ ++ return 0; ++} ++ ++/*--------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +diff --git a/gcc/timevar.def b/gcc/timevar.def +index 2dae5e1c7..366118126 100644 +--- a/gcc/timevar.def ++++ b/gcc/timevar.def +@@ -80,6 +80,7 @@ DEFTIMEVAR (TV_IPA_CONSTANT_PROP , "ipa cp") + DEFTIMEVAR (TV_IPA_INLINING , "ipa inlining heuristics") + DEFTIMEVAR (TV_IPA_FNSPLIT , "ipa function splitting") + DEFTIMEVAR (TV_IPA_COMDATS , "ipa comdats") ++DEFTIMEVAR (TV_IPA_STRUCT_REORG , "ipa struct reorg optimization") + DEFTIMEVAR (TV_IPA_OPT , "ipa various optimizations") + DEFTIMEVAR (TV_IPA_LTO_DECOMPRESS , "lto stream decompression") + DEFTIMEVAR (TV_IPA_LTO_COMPRESS , "lto stream compression") +diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h +index 606d1d60b..ec7be874c 100644 +--- a/gcc/tree-pass.h ++++ b/gcc/tree-pass.h +@@ -526,6 +526,7 @@ extern ipa_opt_pass_d *make_pass_ipa_devirt (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_odr (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_pure_const (gcc::context *ctxt); ++extern simple_ipa_opt_pass *make_pass_ipa_struct_reorg (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_ipa_pta (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_ipa_tm (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_target_clone (gcc::context *ctxt); +-- +2.33.0 + diff --git a/0015-LoongArch-Add-enum-style-mexplicit-relocs-option.patch b/0015-LoongArch-Add-enum-style-mexplicit-relocs-option.patch new file mode 100644 index 0000000000000000000000000000000000000000..0786ad20d4e9e2abb784cc512f717cc90e3d8f58 --- /dev/null +++ b/0015-LoongArch-Add-enum-style-mexplicit-relocs-option.patch @@ -0,0 +1,233 @@ +From 56403837a7859f0a7ccbc56c055261c9adf22fb8 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Mon, 23 Oct 2023 15:23:11 +0800 +Subject: [PATCH 015/188] LoongArch: Add enum-style -mexplicit-relocs= option + +To take a better balance between scheduling and relaxation when -flto is +enabled, add three-way -mexplicit-relocs={auto,none,always} options. +The old -mexplicit-relocs and -mno-explicit-relocs options are still +supported, they are mapped to -mexplicit-relocs=always and +-mexplicit-relocs=none. + +The default choice is determined by probing assembler capabilities at +build time. If the assembler does not supports explicit relocs at all, +the default will be none; if it supports explicit relocs but not +relaxation, the default will be always; if both explicit relocs and +relaxation are supported, the default will be auto. + +Currently auto is same as none. We will make auto more clever in +following changes. + +gcc/ChangeLog: + + * config/loongarch/genopts/loongarch-strings: Add strings for + -mexplicit-relocs={auto,none,always}. + * config/loongarch/genopts/loongarch.opt.in: Add options for + -mexplicit-relocs={auto,none,always}. + * config/loongarch/loongarch-str.h: Regenerate. + * config/loongarch/loongarch.opt: Regenerate. + * config/loongarch/loongarch-def.h + (EXPLICIT_RELOCS_AUTO): Define. + (EXPLICIT_RELOCS_NONE): Define. + (EXPLICIT_RELOCS_ALWAYS): Define. + (N_EXPLICIT_RELOCS_TYPES): Define. + * config/loongarch/loongarch.cc + (loongarch_option_override_internal): Error out if the old-style + -m[no-]explicit-relocs option is used with + -mexplicit-relocs={auto,none,always} together. Map + -mno-explicit-relocs to -mexplicit-relocs=none and + -mexplicit-relocs to -mexplicit-relocs=always for backward + compatibility. Set a proper default for -mexplicit-relocs= + based on configure-time probed linker capability. Update a + diagnostic message to mention -mexplicit-relocs=always instead + of the old-style -mexplicit-relocs. + (loongarch_handle_model_attribute): Update a diagnostic message + to mention -mexplicit-relocs=always instead of the old-style + -mexplicit-relocs. + * config/loongarch/loongarch.h (TARGET_EXPLICIT_RELOCS): Define. +--- + .../loongarch/genopts/loongarch-strings | 6 +++++ + gcc/config/loongarch/genopts/loongarch.opt.in | 21 ++++++++++++++-- + gcc/config/loongarch/loongarch-def.h | 6 +++++ + gcc/config/loongarch/loongarch-str.h | 5 ++++ + gcc/config/loongarch/loongarch.cc | 24 +++++++++++++++++-- + gcc/config/loongarch/loongarch.h | 3 +++ + gcc/config/loongarch/loongarch.opt | 21 ++++++++++++++-- + 7 files changed, 80 insertions(+), 6 deletions(-) + +diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings +index eb5086fe3..6c8a42af2 100644 +--- a/gcc/config/loongarch/genopts/loongarch-strings ++++ b/gcc/config/loongarch/genopts/loongarch-strings +@@ -65,3 +65,9 @@ STR_CMODEL_TS tiny-static + STR_CMODEL_MEDIUM medium + STR_CMODEL_LARGE large + STR_CMODEL_EXTREME extreme ++ ++# -mexplicit-relocs ++OPTSTR_EXPLICIT_RELOCS explicit-relocs ++STR_EXPLICIT_RELOCS_AUTO auto ++STR_EXPLICIT_RELOCS_NONE none ++STR_EXPLICIT_RELOCS_ALWAYS always +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index 74cf4a7f7..e7df1964a 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -176,10 +176,27 @@ mmax-inline-memcpy-size= + Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) + -mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. + +-mexplicit-relocs +-Target Var(TARGET_EXPLICIT_RELOCS) Init(HAVE_AS_EXPLICIT_RELOCS & !HAVE_AS_MRELAX_OPTION) ++Enum ++Name(explicit_relocs) Type(int) ++The code model option names for -mexplicit-relocs: ++ ++EnumValue ++Enum(explicit_relocs) String(@@STR_EXPLICIT_RELOCS_AUTO@@) Value(EXPLICIT_RELOCS_AUTO) ++ ++EnumValue ++Enum(explicit_relocs) String(@@STR_EXPLICIT_RELOCS_NONE@@) Value(EXPLICIT_RELOCS_NONE) ++ ++EnumValue ++Enum(explicit_relocs) String(@@STR_EXPLICIT_RELOCS_ALWAYS@@) Value(EXPLICIT_RELOCS_ALWAYS) ++ ++mexplicit-relocs= ++Target RejectNegative Joined Enum(explicit_relocs) Var(la_opt_explicit_relocs) Init(M_OPT_UNSET) + Use %reloc() assembly operators. + ++mexplicit-relocs ++Target Var(la_opt_explicit_relocs_backward) Init(M_OPT_UNSET) ++Use %reloc() assembly operators (for backward compatibility). ++ + ; The code model option names for -mcmodel. + Enum + Name(cmodel) Type(int) +diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h +index eb8e53b20..4757de14b 100644 +--- a/gcc/config/loongarch/loongarch-def.h ++++ b/gcc/config/loongarch/loongarch-def.h +@@ -100,6 +100,12 @@ extern const char* loongarch_cmodel_strings[]; + #define CMODEL_EXTREME 5 + #define N_CMODEL_TYPES 6 + ++/* enum explicit_relocs */ ++#define EXPLICIT_RELOCS_AUTO 0 ++#define EXPLICIT_RELOCS_NONE 1 ++#define EXPLICIT_RELOCS_ALWAYS 2 ++#define N_EXPLICIT_RELOCS_TYPES 3 ++ + /* The common default value for variables whose assignments + are triggered by command-line options. */ + +diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h +index ecfebf9db..037e9e583 100644 +--- a/gcc/config/loongarch/loongarch-str.h ++++ b/gcc/config/loongarch/loongarch-str.h +@@ -64,4 +64,9 @@ along with GCC; see the file COPYING3. If not see + #define STR_CMODEL_LARGE "large" + #define STR_CMODEL_EXTREME "extreme" + ++#define OPTSTR_EXPLICIT_RELOCS "explicit-relocs" ++#define STR_EXPLICIT_RELOCS_AUTO "auto" ++#define STR_EXPLICIT_RELOCS_NONE "none" ++#define STR_EXPLICIT_RELOCS_ALWAYS "always" ++ + #endif /* LOONGARCH_STR_H */ +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index e22a64600..3258c8655 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -7383,6 +7383,25 @@ loongarch_option_override_internal (struct gcc_options *opts, + loongarch_update_gcc_opt_status (&la_target, opts, opts_set); + loongarch_cpu_option_override (&la_target, opts, opts_set); + ++ if (la_opt_explicit_relocs != M_OPT_UNSET ++ && la_opt_explicit_relocs_backward != M_OPT_UNSET) ++ error ("do not use %qs (with %qs) and %qs (without %qs) together", ++ "-mexplicit-relocs=", "=", ++ la_opt_explicit_relocs_backward ? "-mexplicit-relocs" ++ : "-mno-explicit-relocs", "="); ++ ++ if (la_opt_explicit_relocs_backward != M_OPT_UNSET) ++ la_opt_explicit_relocs = (la_opt_explicit_relocs_backward ++ ? EXPLICIT_RELOCS_ALWAYS ++ : EXPLICIT_RELOCS_NONE); ++ ++ if (la_opt_explicit_relocs == M_OPT_UNSET) ++ la_opt_explicit_relocs = (HAVE_AS_EXPLICIT_RELOCS ++ ? (HAVE_AS_MRELAX_OPTION ++ ? EXPLICIT_RELOCS_AUTO ++ : EXPLICIT_RELOCS_ALWAYS) ++ : EXPLICIT_RELOCS_NONE); ++ + if (TARGET_ABI_LP64) + flag_pcc_struct_return = 0; + +@@ -7413,7 +7432,7 @@ loongarch_option_override_internal (struct gcc_options *opts, + case CMODEL_EXTREME: + if (!TARGET_EXPLICIT_RELOCS) + error ("code model %qs needs %s", +- "extreme", "-mexplicit-relocs"); ++ "extreme", "-mexplicit-relocs=always"); + + if (opts->x_flag_plt) + { +@@ -7717,7 +7736,8 @@ loongarch_handle_model_attribute (tree *node, tree name, tree arg, int, + if (!TARGET_EXPLICIT_RELOCS) + { + error_at (DECL_SOURCE_LOCATION (decl), +- "%qE attribute requires %s", name, "-mexplicit-relocs"); ++ "%qE attribute requires %s", name, ++ "-mexplicit-relocs=always"); + *no_add_attrs = true; + return NULL_TREE; + } +diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h +index f7ddfc452..6e8ac293a 100644 +--- a/gcc/config/loongarch/loongarch.h ++++ b/gcc/config/loongarch/loongarch.h +@@ -1236,3 +1236,6 @@ struct GTY (()) machine_function + we just need "ibar" to avoid instruction hazard here. */ + #undef CLEAR_INSN_CACHE + #define CLEAR_INSN_CACHE(beg, end) __builtin_loongarch_ibar (0) ++ ++#define TARGET_EXPLICIT_RELOCS \ ++ (la_opt_explicit_relocs == EXPLICIT_RELOCS_ALWAYS) +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index 34bd832bd..44376fd77 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -183,10 +183,27 @@ mmax-inline-memcpy-size= + Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024) + -mmax-inline-memcpy-size=SIZE Set the max size of memcpy to inline, default is 1024. + +-mexplicit-relocs +-Target Var(TARGET_EXPLICIT_RELOCS) Init(HAVE_AS_EXPLICIT_RELOCS & !HAVE_AS_MRELAX_OPTION) ++Enum ++Name(explicit_relocs) Type(int) ++The code model option names for -mexplicit-relocs: ++ ++EnumValue ++Enum(explicit_relocs) String(auto) Value(EXPLICIT_RELOCS_AUTO) ++ ++EnumValue ++Enum(explicit_relocs) String(none) Value(EXPLICIT_RELOCS_NONE) ++ ++EnumValue ++Enum(explicit_relocs) String(always) Value(EXPLICIT_RELOCS_ALWAYS) ++ ++mexplicit-relocs= ++Target RejectNegative Joined Enum(explicit_relocs) Var(la_opt_explicit_relocs) Init(M_OPT_UNSET) + Use %reloc() assembly operators. + ++mexplicit-relocs ++Target Var(la_opt_explicit_relocs_backward) Init(M_OPT_UNSET) ++Use %reloc() assembly operators (for backward compatibility). ++ + ; The code model option names for -mcmodel. + Enum + Name(cmodel) Type(int) +-- +2.43.0 + diff --git a/0015-Sw64-Port-libsanitizer.patch b/0015-Sw64-Port-libsanitizer.patch new file mode 100644 index 0000000000000000000000000000000000000000..e0d86ba704a5894c3aa2e7993f71ccf7fe989435 --- /dev/null +++ b/0015-Sw64-Port-libsanitizer.patch @@ -0,0 +1,1290 @@ +From df516f16c04a4806fde50fefc45f9d4c518d6872 Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 17:14:17 +0800 +Subject: [PATCH 15/16] Sw64 Port: libsanitizer + +--- + libsanitizer/asan/asan_allocator.h | 5 + + libsanitizer/asan/asan_mapping.h | 3 + + libsanitizer/configure.tgt | 4 + + libsanitizer/lsan/lsan_allocator.cpp | 2 +- + libsanitizer/lsan/lsan_allocator.h | 3 +- + libsanitizer/lsan/lsan_common.cpp | 2 + + libsanitizer/lsan/lsan_common.h | 2 +- + .../sanitizer_common_interceptors.inc | 77 ++++-- + .../sanitizer_common_syscalls.inc | 4 +- + .../sanitizer_common/sanitizer_linux.cpp | 131 +++++++++- + .../sanitizer_common/sanitizer_linux.h | 2 +- + .../sanitizer_linux_libcdep.cpp | 6 +- + .../sanitizer_common/sanitizer_platform.h | 8 + + .../sanitizer_platform_interceptors.h | 6 +- + .../sanitizer_platform_limits_linux.cpp | 2 +- + .../sanitizer_platform_limits_posix.cpp | 16 +- + .../sanitizer_platform_limits_posix.h | 24 +- + .../sanitizer_common/sanitizer_stacktrace.h | 2 + + .../sanitizer_stoptheworld_linux_libcdep.cpp | 8 +- + .../sanitizer_symbolizer_libcdep.cpp | 2 + + libsanitizer/tsan/Makefile.am | 2 +- + libsanitizer/tsan/Makefile.in | 2 +- + libsanitizer/tsan/tsan_interceptors_posix.cpp | 14 +- + libsanitizer/tsan/tsan_platform.h | 41 +++ + libsanitizer/tsan/tsan_platform_linux.cpp | 4 + + libsanitizer/tsan/tsan_rtl.h | 2 +- + libsanitizer/tsan/tsan_rtl_sw64.S | 236 ++++++++++++++++++ + 27 files changed, 550 insertions(+), 60 deletions(-) + create mode 100644 libsanitizer/tsan/tsan_rtl_sw64.S + +diff --git a/libsanitizer/asan/asan_allocator.h b/libsanitizer/asan/asan_allocator.h +index 27d826fb6..8b7b22db6 100644 +--- a/libsanitizer/asan/asan_allocator.h ++++ b/libsanitizer/asan/asan_allocator.h +@@ -149,6 +149,11 @@ typedef DefaultSizeClassMap SizeClassMap; + const uptr kAllocatorSpace = ~(uptr)0; + const uptr kAllocatorSize = 0x8000000000ULL; // 500G + typedef DefaultSizeClassMap SizeClassMap; ++# elif SANITIZER_SW64 ++// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap. ++const uptr kAllocatorSpace = ~(uptr)0; ++const uptr kAllocatorSize = 0x40000000000ULL; // 4T. ++typedef DefaultSizeClassMap SizeClassMap; + # else + const uptr kAllocatorSpace = 0x600000000000ULL; + const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h +index 4b0037fce..3b752d81d 100644 +--- a/libsanitizer/asan/asan_mapping.h ++++ b/libsanitizer/asan/asan_mapping.h +@@ -165,6 +165,7 @@ static const u64 kAArch64_ShadowOffset64 = 1ULL << 36; + static const u64 kRiscv64_ShadowOffset64 = 0xd55550000; + static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000; + static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37; ++static const u64 kSW64_ShadowOffset64 = 1ULL << 49; + static const u64 kPPC64_ShadowOffset64 = 1ULL << 41; + static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52; + static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000 +@@ -205,6 +206,8 @@ static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000 + # define SHADOW_OFFSET kAArch64_ShadowOffset64 + # elif defined(__powerpc64__) + # define SHADOW_OFFSET kPPC64_ShadowOffset64 ++# elif defined(__sw_64__) ++# define SHADOW_OFFSET kSW64_ShadowOffset64 + # elif defined(__s390x__) + # define SHADOW_OFFSET kSystemZ_ShadowOffset64 + # elif SANITIZER_FREEBSD +diff --git a/libsanitizer/configure.tgt b/libsanitizer/configure.tgt +index fb89df493..f62e59ef8 100644 +--- a/libsanitizer/configure.tgt ++++ b/libsanitizer/configure.tgt +@@ -54,6 +54,10 @@ case "${target}" in + ;; + arm*-*-linux*) + ;; ++ sw_64*-*-linux*) ++ TSAN_SUPPORTED=yes ++ LSAN_SUPPORTED=yes ++ ;; + mips*-*-linux*) + ;; + aarch64*-*-linux*) +diff --git a/libsanitizer/lsan/lsan_allocator.cpp b/libsanitizer/lsan/lsan_allocator.cpp +index 91e34ebb3..9895719fd 100644 +--- a/libsanitizer/lsan/lsan_allocator.cpp ++++ b/libsanitizer/lsan/lsan_allocator.cpp +@@ -28,7 +28,7 @@ extern "C" void *memset(void *ptr, int value, uptr num); + namespace __lsan { + #if defined(__i386__) || defined(__arm__) + static const uptr kMaxAllowedMallocSize = 1UL << 30; +-#elif defined(__mips64) || defined(__aarch64__) ++#elif defined(__mips64) || defined(__aarch64__) || defined(__sw_64__) + static const uptr kMaxAllowedMallocSize = 4UL << 30; + #else + static const uptr kMaxAllowedMallocSize = 8UL << 30; +diff --git a/libsanitizer/lsan/lsan_allocator.h b/libsanitizer/lsan/lsan_allocator.h +index 45c6ac406..a6ca2b250 100644 +--- a/libsanitizer/lsan/lsan_allocator.h ++++ b/libsanitizer/lsan/lsan_allocator.h +@@ -50,7 +50,8 @@ struct ChunkMetadata { + }; + + #if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \ +- defined(__arm__) || SANITIZER_RISCV64 || defined(__hexagon__) ++ defined(__arm__) || SANITIZER_RISCV64 || defined(__hexagon__) || \ ++ defined(__sw_64__) + template + struct AP32 { + static const uptr kSpaceBeg = 0; +diff --git a/libsanitizer/lsan/lsan_common.cpp b/libsanitizer/lsan/lsan_common.cpp +index 308dbb3e4..16b664b14 100644 +--- a/libsanitizer/lsan/lsan_common.cpp ++++ b/libsanitizer/lsan/lsan_common.cpp +@@ -163,6 +163,8 @@ static inline bool CanBeAHeapPointer(uptr p) { + return ((p >> 47) == 0); + #elif defined(__mips64) + return ((p >> 40) == 0); ++#elif defined(__sw_64__) ++ return ((p >> 52) == 0); + #elif defined(__aarch64__) + unsigned runtimeVMA = + (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); +diff --git a/libsanitizer/lsan/lsan_common.h b/libsanitizer/lsan/lsan_common.h +index f9b55e4e8..6afff378c 100644 +--- a/libsanitizer/lsan/lsan_common.h ++++ b/libsanitizer/lsan/lsan_common.h +@@ -36,7 +36,7 @@ + #define CAN_SANITIZE_LEAKS 0 + #elif (SANITIZER_LINUX || SANITIZER_MAC) && (SANITIZER_WORDSIZE == 64) && \ + (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \ +- defined(__powerpc64__) || defined(__s390x__)) ++ defined(__powerpc64__) || defined(__s390x__) || defined(__sw_64__)) + #define CAN_SANITIZE_LEAKS 1 + #elif defined(__i386__) && (SANITIZER_LINUX || SANITIZER_MAC) + #define CAN_SANITIZE_LEAKS 1 +diff --git a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc +index abb38ccfa..2b6a7d612 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc ++++ b/libsanitizer/sanitizer_common/sanitizer_common_interceptors.inc +@@ -1532,6 +1532,16 @@ VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap) + + INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap) + VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap) ++ ++INTERCEPTOR(int, __isoc23_vscanf, const char *format, va_list ap) ++VSCANF_INTERCEPTOR_IMPL(__isoc23_vscanf, false, format, ap) ++ ++INTERCEPTOR(int, __isoc23_vsscanf, const char *str, const char *format, ++ va_list ap) ++VSCANF_INTERCEPTOR_IMPL(__isoc23_vsscanf, false, str, format, ap) ++ ++INTERCEPTOR(int, __isoc23_vfscanf, void *stream, const char *format, va_list ap) ++VSCANF_INTERCEPTOR_IMPL(__isoc23_vfscanf, false, stream, format, ap) + #endif // SANITIZER_INTERCEPT_ISOC99_SCANF + + INTERCEPTOR(int, scanf, const char *format, ...) +@@ -1552,6 +1562,15 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format) + + INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...) + FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format) ++ ++INTERCEPTOR(int, __isoc23_scanf, const char *format, ...) ++FORMAT_INTERCEPTOR_IMPL(__isoc23_scanf, __isoc23_vscanf, format) ++ ++INTERCEPTOR(int, __isoc23_fscanf, void *stream, const char *format, ...) ++FORMAT_INTERCEPTOR_IMPL(__isoc23_fscanf, __isoc23_vfscanf, stream, format) ++ ++INTERCEPTOR(int, __isoc23_sscanf, const char *str, const char *format, ...) ++FORMAT_INTERCEPTOR_IMPL(__isoc23_sscanf, __isoc23_vsscanf, str, format) + #endif + + #endif +@@ -1575,7 +1594,13 @@ FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format) + COMMON_INTERCEPT_FUNCTION(__isoc99_fscanf); \ + COMMON_INTERCEPT_FUNCTION(__isoc99_vscanf); \ + COMMON_INTERCEPT_FUNCTION(__isoc99_vsscanf); \ +- COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf); ++ COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf); \ ++ COMMON_INTERCEPT_FUNCTION(__isoc23_scanf); \ ++ COMMON_INTERCEPT_FUNCTION(__isoc23_sscanf); \ ++ COMMON_INTERCEPT_FUNCTION(__isoc23_fscanf); \ ++ COMMON_INTERCEPT_FUNCTION(__isoc23_vscanf); \ ++ COMMON_INTERCEPT_FUNCTION(__isoc23_vsscanf); \ ++ COMMON_INTERCEPT_FUNCTION(__isoc23_vfscanf); + #else + #define INIT_ISOC99_SCANF + #endif +@@ -3502,30 +3527,26 @@ UNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr, + (real_endptr - nptr) + 1 : 0); + } + +- + #if SANITIZER_INTERCEPT_STRTOIMAX +-INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) { +- void *ctx; +- COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base); +- // FIXME: under ASan the call below may write to freed memory and corrupt +- // its metadata. See +- // https://github.com/google/sanitizers/issues/321. ++template ++static ALWAYS_INLINE auto StrtoimaxImpl(void *ctx, Fn real, const char *nptr, ++ char **endptr, int base) ++ -> decltype(real(nullptr, nullptr, 0)) { + char *real_endptr; +- INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base); ++ auto res = real(nptr, &real_endptr, base); + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); + return res; + } + ++INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) { ++ void *ctx; ++ COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base); ++ return StrtoimaxImpl(ctx, REAL(strtoimax), nptr, endptr, base); ++} + INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base); +- // FIXME: under ASan the call below may write to freed memory and corrupt +- // its metadata. See +- // https://github.com/google/sanitizers/issues/321. +- char *real_endptr; +- UINTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base); +- StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); +- return res; ++ return StrtoimaxImpl(ctx, REAL(strtoumax), nptr, endptr, base); + } + + #define INIT_STRTOIMAX \ +@@ -3535,6 +3556,25 @@ INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) { + #define INIT_STRTOIMAX + #endif + ++#if SANITIZER_INTERCEPT_STRTOIMAX && SANITIZER_GLIBC ++INTERCEPTOR(INTMAX_T, __isoc23_strtoimax, const char *nptr, char **endptr, int base) { ++ void *ctx; ++ COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoimax, nptr, endptr, base); ++ return StrtoimaxImpl(ctx, REAL(__isoc23_strtoimax), nptr, endptr, base); ++} ++INTERCEPTOR(UINTMAX_T, __isoc23_strtoumax, const char *nptr, char **endptr, int base) { ++ void *ctx; ++ COMMON_INTERCEPTOR_ENTER(ctx, __isoc23_strtoumax, nptr, endptr, base); ++ return StrtoimaxImpl(ctx, REAL(__isoc23_strtoumax), nptr, endptr, base); ++} ++ ++# define INIT_STRTOIMAX_C23 \ ++ COMMON_INTERCEPT_FUNCTION(__isoc23_strtoimax); \ ++ COMMON_INTERCEPT_FUNCTION(__isoc23_strtoumax); ++#else ++# define INIT_STRTOIMAX_C23 ++#endif ++ + #if SANITIZER_INTERCEPT_MBSTOWCS + INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) { + void *ctx; +@@ -4708,7 +4748,11 @@ INTERCEPTOR(int, shmctl, int shmid, int cmd, void *buf) { + } + return res; + } ++#ifdef SANITIZER_SW64 ++#define INIT_SHMCTL COMMON_INTERCEPT_FUNCTION_VER(shmctl, "GLIBC_2.2"); ++#else + #define INIT_SHMCTL COMMON_INTERCEPT_FUNCTION(shmctl); ++#endif + #else + #define INIT_SHMCTL + #endif +@@ -10325,6 +10369,7 @@ static void InitializeCommonInterceptors() { + INIT_GETCWD; + INIT_GET_CURRENT_DIR_NAME; + INIT_STRTOIMAX; ++ INIT_STRTOIMAX_C23; + INIT_MBSTOWCS; + INIT_MBSNRTOWCS; + INIT_WCSTOMBS; +diff --git a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc +index a38b13408..fd4e1e3b6 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc ++++ b/libsanitizer/sanitizer_common/sanitizer_common_syscalls.inc +@@ -2512,7 +2512,7 @@ PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) { + # if !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ +- SANITIZER_RISCV64) ++ defined(__sw_64__) || SANITIZER_RISCV64) + if (data) { + if (request == ptrace_setregs) { + PRE_READ((void *)data, struct_user_regs_struct_sz); +@@ -2534,7 +2534,7 @@ POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) { + # if !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__) || \ +- SANITIZER_RISCV64) ++ defined(__sw_64__) || SANITIZER_RISCV64) + if (res >= 0 && data) { + // Note that this is different from the interceptor in + // sanitizer_common_interceptors.inc. +diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_linux.cpp +index aa59d9718..711079d86 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_linux.cpp ++++ b/libsanitizer/sanitizer_common/sanitizer_linux.cpp +@@ -50,6 +50,16 @@ + #undef stat + #endif + ++#if defined(__sw_64__) ++#define stat kernel_stat ++#define stat64 kernel_stat64 ++#include ++#undef stat ++#undef stat64 ++#include ++#include ++#endif ++ + #include + #include + #include +@@ -282,7 +292,7 @@ static void stat64_to_stat(struct stat64 *in, struct stat *out) { + } + #endif + +-#if defined(__mips64) ++#if defined(__mips64) || defined(__sw_64__) + // Undefine compatibility macros from + // so that they would not clash with the kernel_stat + // st_[a|m|c]time fields +@@ -312,6 +322,12 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) { + out->st_size = in->st_size; + out->st_blksize = in->st_blksize; + out->st_blocks = in->st_blocks; ++#if defined(__sw_64__) ++ // There's no nsecs in sw_64's struct stat ++ out->st_atim.tv_sec = in->st_atime; ++ out->st_mtim.tv_sec = in->st_mtime; ++ out->st_ctim.tv_sec = in->st_ctime; ++#else + #if defined(__USE_MISC) || \ + defined(__USE_XOPEN2K8) || \ + defined(SANITIZER_ANDROID) +@@ -329,6 +345,7 @@ static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) { + out->st_ctime = in->st_ctime; + out->st_atimensec = in->st_ctime_nsec; + #endif ++#endif + } + #endif + +@@ -339,8 +356,8 @@ uptr internal_stat(const char *path, void *buf) { + return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, + 0); + #elif SANITIZER_LINUX_USES_64BIT_SYSCALLS +-# if defined(__mips64) +- // For mips64, stat syscall fills buffer in the format of kernel_stat ++# if defined(__mips64) || defined(__sw_64__) ++ // For mips64 and sw_64, stat syscall fills buffer in the format of kernel_stat + struct kernel_stat kbuf; + int res = internal_syscall(SYSCALL(stat), path, &kbuf); + kernel_stat_to_stat(&kbuf, (struct stat *)buf); +@@ -364,8 +381,8 @@ uptr internal_lstat(const char *path, void *buf) { + return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf, + AT_SYMLINK_NOFOLLOW); + #elif SANITIZER_LINUX_USES_64BIT_SYSCALLS +-# if SANITIZER_MIPS64 +- // For mips64, lstat syscall fills buffer in the format of kernel_stat ++# if SANITIZER_MIPS64 || SANITIZER_SW64 ++ // For mips64 and sw_64, lstat syscall fills buffer in the format of kernel_stat + struct kernel_stat kbuf; + int res = internal_syscall(SYSCALL(lstat), path, &kbuf); + kernel_stat_to_stat(&kbuf, (struct stat *)buf); +@@ -383,8 +400,8 @@ uptr internal_lstat(const char *path, void *buf) { + + uptr internal_fstat(fd_t fd, void *buf) { + #if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS +-#if SANITIZER_MIPS64 +- // For mips64, fstat syscall fills buffer in the format of kernel_stat ++#if (SANITIZER_MIPS64 || SANITIZER_SW64) ++ // For mips64 and sw_64, fstat syscall fills buffer in the format of kernel_stat + struct kernel_stat kbuf; + int res = internal_syscall(SYSCALL(fstat), fd, &kbuf); + kernel_stat_to_stat(&kbuf, (struct stat *)buf); +@@ -718,6 +735,19 @@ uptr internal_waitpid(int pid, int *status, int options) { + 0 /* rusage */); + } + ++#ifdef __sw_64__ ++uptr internal_getpid() { ++ return internal_syscall(SYSCALL(getxpid)); ++} ++ ++uptr internal_getppid() { ++ uptr ppid; ++ internal_syscall(SYSCALL(getxpid)); ++ asm("mov $20, %0\n" ++ :"=r"(ppid)); ++ return ppid; ++} ++#else + uptr internal_getpid() { + return internal_syscall(SYSCALL(getpid)); + } +@@ -725,6 +755,7 @@ uptr internal_getpid() { + uptr internal_getppid() { + return internal_syscall(SYSCALL(getppid)); + } ++#endif + + int internal_dlinfo(void *handle, int request, void *p) { + #if SANITIZER_FREEBSD +@@ -759,7 +790,7 @@ uptr internal_sigaltstack(const void *ss, void *oss) { + } + + int internal_fork() { +-#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS ++#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS || SANITIZER_SW64 + return internal_syscall(SYSCALL(clone), SIGCHLD, 0); + #else + return internal_syscall(SYSCALL(fork)); +@@ -826,7 +857,7 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) { + // rt_sigaction, so we need to do the same (we'll need to reimplement the + // restorers; for x86_64 the restorer address can be obtained from + // oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact). +-#if !SANITIZER_ANDROID || !SANITIZER_MIPS32 ++#if ( !SANITIZER_ANDROID || !SANITIZER_MIPS32 ) && !SANITIZER_SW64 + k_act.sa_restorer = u_act->sa_restorer; + #endif + } +@@ -842,7 +873,7 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact) { + internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask, + sizeof(__sanitizer_kernel_sigset_t)); + u_oldact->sa_flags = k_oldact.sa_flags; +-#if !SANITIZER_ANDROID || !SANITIZER_MIPS32 ++#if ( !SANITIZER_ANDROID || !SANITIZER_MIPS32 ) && !SANITIZER_SW64 + u_oldact->sa_restorer = k_oldact.sa_restorer; + #endif + } +@@ -1045,6 +1076,11 @@ uptr GetMaxVirtualAddress() { + return (1ULL << 38) - 1; + # elif defined(__mips64) + return (1ULL << 40) - 1; // 0x000000ffffffffffUL; ++# elif defined(__sw_64__) ++// SW64 has a 42-bit user address space(4TiB) ++// according to TASK_SIZE in kernel. ++// In sw6b PGTABLE is SW_4LEVEL. ++ return (1ULL << 52) - 1; // 0x000fffffffffffffUL; + # elif defined(__s390x__) + return (1ULL << 53) - 1; // 0x001fffffffffffffUL; + #elif defined(__sparc__) +@@ -1377,6 +1413,71 @@ uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + : "memory"); + return res; + } ++#elif defined(__sw_64__) ++uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, ++ int *parent_tidptr, void *newtls, int *child_tidptr) { ++ long long res; ++ if (!fn || !child_stack) ++ return -EINVAL; ++ child_stack = (char *)child_stack - 4 * sizeof(unsigned long long); ++ ((unsigned long long *)child_stack)[0] = (uptr)fn; ++ ((unsigned long long *)child_stack)[1] = (uptr)arg; ++ ((unsigned long long *)child_stack)[2] = (uptr)flags; ++ ++ register void *r20 __asm__("$20") = newtls; ++ register int *r22 __asm__("$22") = child_tidptr; ++ ++ __asm__ __volatile__( ++ /* $v0 = syscall($v0 = __NR_clone, ++ * $a0 = flags, ++ * $a1 = child_stack, ++ * $a2 = parent_tidptr, ++ * $a3 = child_tidptr, ++ * $a4 = new_tls) ++ */ ++ "mov %[flag],$16\n" ++ "mov %[usp],$17\n" ++ "mov %[ptid],$18\n" ++ "ldl $19,0($sp)\n" ++ "mov %5,$20\n" ++ /* Store the fifth argument on stack ++ * if we are using 32-bit abi. ++ */ ++ "ldi $0,%[NR_clone];\n" ++ "sys_call 0x83;\n" ++ ++ /* if ($v0 != 0) ++ * return; ++ */ ++ "bne $0,1f;\n" ++ "mov $31,$15;\n" ++ /* Call "fn(arg)". */ ++ "ldl $27,0($sp);\n" ++ "ldl $16,8($sp);\n" ++ "ldi $sp,32($sp);\n" ++ ++ "call $26,($27),0;\n" ++ "ldgp $29, 0($26);\n" ++ ++ /* Call _exit($v0). */ ++ "mov $0,$16;\n" ++ "ldi $0,%[NR_exit];\n" ++ "sys_call 0x83;\n" ++ ++ /* Return to parent. */ ++ "1:\n" ++ : "=r" (res) ++ : [flag]"r"(flags), ++ [usp]"r"(child_stack), ++ [ptid]"r"(parent_tidptr), ++ "r"(r20), ++ "r"(r22), ++ [NR_clone]"i"(__NR_clone), ++ [NR_exit]"i"(__NR_exit) ++ : "memory", "$30"); ++ ++ return res; ++} + #elif defined(__aarch64__) + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr) { +@@ -2026,6 +2127,11 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { + *pc = ucontext->uc_mcontext.pc; + *bp = ucontext->uc_mcontext.regs[29]; + *sp = ucontext->uc_mcontext.sp; ++#elif defined(__sw_64__) ++ ucontext_t *ucontext = (ucontext_t*)context; ++ *pc = ucontext->uc_mcontext.sc_pc; ++ *bp = ucontext->uc_mcontext.sc_regs[15]; ++ *sp = ucontext->uc_mcontext.sc_regs[30]; + #elif defined(__hppa__) + ucontext_t *ucontext = (ucontext_t*)context; + *pc = ucontext->uc_mcontext.sc_iaoq[0]; +@@ -2103,6 +2209,11 @@ static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) { + *pc = ucontext->uc_mcontext.pc; + *bp = ucontext->uc_mcontext.gregs[30]; + *sp = ucontext->uc_mcontext.gregs[29]; ++#elif defined(__sw_64__) ++ ucontext_t *ucontext = (ucontext_t*)context; ++ *pc = ucontext->uc_mcontext.sc_pc; ++ *bp = ucontext->uc_mcontext.sc_regs[15]; ++ *sp = ucontext->uc_mcontext.sc_regs[30]; + #elif defined(__s390__) + ucontext_t *ucontext = (ucontext_t*)context; + # if defined(__s390x__) +diff --git a/libsanitizer/sanitizer_common/sanitizer_linux.h b/libsanitizer/sanitizer_common/sanitizer_linux.h +index 6a235db0e..a5d18e73d 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_linux.h ++++ b/libsanitizer/sanitizer_common/sanitizer_linux.h +@@ -73,7 +73,7 @@ int internal_sigaction_norestorer(int signum, const void *act, void *oldact); + void internal_sigdelset(__sanitizer_sigset_t *set, int signum); + #if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \ + defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \ +- defined(__arm__) || SANITIZER_RISCV64 ++ defined(__arm__) || defined(__sw_64__) || SANITIZER_RISCV64 + uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg, + int *parent_tidptr, void *newtls, int *child_tidptr); + #endif +diff --git a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp +index 4f22c78a1..ef8ab755f 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp ++++ b/libsanitizer/sanitizer_common/sanitizer_linux_libcdep.cpp +@@ -203,7 +203,7 @@ void InitTlsSize() { + g_use_dlpi_tls_data = + GetLibcVersion(&major, &minor, &patch) && major == 2 && minor >= 25; + +-#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__) ++#if defined(__aarch64__) || defined(__x86_64__) || defined(__powerpc64__) || defined(__sw_64__) + void *get_tls_static_info = dlsym(RTLD_NEXT, "_dl_get_tls_static_info"); + size_t tls_align; + ((void (*)(size_t *, size_t *))get_tls_static_info)(&g_tls_size, &tls_align); +@@ -282,6 +282,8 @@ static uptr ThreadDescriptorSizeFallback() { + val = 1776; + #elif defined(__powerpc64__) + val = 1776; // from glibc.ppc64le 2.20-8.fc21 ++#elif defined(__sw_64__) ++ val = 1776; + #endif + return val; + } +@@ -471,7 +473,7 @@ static void GetTls(uptr *addr, uptr *size) { + *size = g_tls_size; + *addr -= *size; + *addr += ThreadDescriptorSize(); +-#elif SANITIZER_GLIBC && defined(__aarch64__) ++#elif SANITIZER_GLIBC && (defined(__aarch64__) || defined(__sw_64__)) + *addr = reinterpret_cast(__builtin_thread_pointer()) - + ThreadDescriptorSize(); + *size = g_tls_size + ThreadDescriptorSize(); +diff --git a/libsanitizer/sanitizer_common/sanitizer_platform.h b/libsanitizer/sanitizer_common/sanitizer_platform.h +index 3153de34e..5cec9a7c0 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_platform.h ++++ b/libsanitizer/sanitizer_common/sanitizer_platform.h +@@ -159,6 +159,12 @@ + # define SANITIZER_MIPS64 0 + #endif + ++#if defined(__sw_64__) ++# define SANITIZER_SW64 1 ++#else ++# define SANITIZER_SW64 0 ++#endif ++ + #if defined(__s390__) + # define SANITIZER_S390 1 + # if defined(__s390x__) +@@ -264,6 +270,8 @@ + # endif + #elif defined(__sparc__) + #define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52) ++#elif defined(__sw_64__) ++# define SANITIZER_MMAP_RANGE_SIZE 1ULL << 52 + #else + # define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47) + #endif +diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h +index 14610f2df..03cdf0a07 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h ++++ b/libsanitizer/sanitizer_common/sanitizer_platform_interceptors.h +@@ -234,7 +234,11 @@ + #define SANITIZER_INTERCEPT_GETITIMER SI_POSIX + #define SANITIZER_INTERCEPT_TIME SI_POSIX + #define SANITIZER_INTERCEPT_GLOB (SI_GLIBC || SI_SOLARIS) ++#if SANITIZER_SW64 ++#define SANITIZER_INTERCEPT_GLOB64 0 ++#else + #define SANITIZER_INTERCEPT_GLOB64 SI_GLIBC ++#endif + #define SANITIZER_INTERCEPT_POSIX_SPAWN SI_POSIX + #define SANITIZER_INTERCEPT_WAIT SI_POSIX + #define SANITIZER_INTERCEPT_INET SI_POSIX +@@ -271,7 +275,7 @@ + #if SI_LINUX_NOT_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__) || SANITIZER_RISCV64) ++ defined(__s390__) || defined(__sw_64__) || SANITIZER_RISCV64) + #define SANITIZER_INTERCEPT_PTRACE 1 + #else + #define SANITIZER_INTERCEPT_PTRACE 0 +diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp +index 2b1a2f793..e7b20605f 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp ++++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_linux.cpp +@@ -68,7 +68,7 @@ namespace __sanitizer { + + # if !defined(__powerpc64__) && !defined(__x86_64__) && \ + !defined(__aarch64__) && !defined(__mips__) && !defined(__s390__) && \ +- !defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__) ++ !defined(__sparc__) && !defined(__riscv) && !defined(__hexagon__) && !defined(__sw_64__) + COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat)); + #endif + +diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp +index c335f33dd..c96d9b043 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp ++++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.cpp +@@ -94,7 +94,7 @@ + # include + # include + # if defined(__mips64) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__hexagon__) || SANITIZER_RISCV64 ++ defined(__hexagon__) || defined(__sw_64__) || SANITIZER_RISCV64 + # include + # ifdef __arm__ + typedef struct user_fpregs elf_fpregset_t; +@@ -141,7 +141,7 @@ typedef struct user_fpregs elf_fpregset_t; + #include + #include + #include +-#if defined(__mips64) ++#if defined(__mips64) || defined(__sw_64__) // for elf_gregset_t + # include + #endif + #include +@@ -242,7 +242,7 @@ namespace __sanitizer { + // has been removed from glibc 2.28. + #if defined(__aarch64__) || defined(__s390x__) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) || \ +- defined(__x86_64__) || SANITIZER_RISCV64 ++ defined(__x86_64__) || SANITIZER_RISCV64 || defined(__sw_64__) + #define SIZEOF_STRUCT_USTAT 32 + # elif defined(__arm__) || defined(__i386__) || defined(__mips__) || \ + defined(__powerpc__) || defined(__s390__) || defined(__sparc__) || \ +@@ -322,14 +322,14 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); + #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__) || SANITIZER_RISCV64) ++ defined(__s390__) || SANITIZER_RISCV64 || defined(__sw_64__)) + #if defined(__mips64) || defined(__powerpc64__) || defined(__arm__) + unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs); + unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t); + #elif SANITIZER_RISCV64 + unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct); + unsigned struct_user_fpregs_struct_sz = sizeof(struct __riscv_q_ext_state); +-#elif defined(__aarch64__) ++#elif defined(__aarch64__) || defined(__sw_64__) + unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs); + unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state); + #elif defined(__s390__) +@@ -341,12 +341,12 @@ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr); + #endif // __mips64 || __powerpc64__ || __aarch64__ + #if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \ + defined(__aarch64__) || defined(__arm__) || defined(__s390__) || \ +- SANITIZER_RISCV64 ++ defined(__sw_64__) || SANITIZER_RISCV64 + unsigned struct_user_fpxregs_struct_sz = 0; + #else + unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct); + #endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__ +-// || __s390__ ++// || __s390__ || __sw_64__ + #ifdef __arm__ + unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE; + #else +@@ -1080,7 +1080,7 @@ CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask); + // didn't exist. + CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags); + #endif +-#if SANITIZER_LINUX && (!SANITIZER_ANDROID || !SANITIZER_MIPS32) ++#if SANITIZER_LINUX && (!SANITIZER_ANDROID || !SANITIZER_MIPS32) && !SANITIZER_SW64 + CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer); + #endif + +diff --git a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h +index da53b5abe..524a6b942 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h ++++ b/libsanitizer/sanitizer_common/sanitizer_platform_limits_posix.h +@@ -74,6 +74,9 @@ const unsigned struct_kernel_stat64_sz = 104; + #elif defined(__aarch64__) + const unsigned struct_kernel_stat_sz = 128; + const unsigned struct_kernel_stat64_sz = 104; ++#elif defined(__sw_64__) ++const unsigned struct_kernel_stat_sz = 80; ++const unsigned struct_kernel_stat64_sz = 136; + #elif defined(__powerpc__) && !defined(__powerpc64__) + const unsigned struct_kernel_stat_sz = 72; + const unsigned struct_kernel_stat64_sz = 104; +@@ -105,7 +108,10 @@ const unsigned struct_kernel_stat64_sz = 0; // RISCV64 does not use stat64 + # elif defined(__hexagon__) + const unsigned struct_kernel_stat_sz = 128; + const unsigned struct_kernel_stat64_sz = 0; +-# endif ++#elif defined(__sw_64__) ++const unsigned struct_kernel_stat_sz = 80; ++const unsigned struct_kernel_stat64_sz = 136; ++#endif + struct __sanitizer_perf_event_attr { + unsigned type; + unsigned size; +@@ -263,15 +269,15 @@ struct __sanitizer_shmid_ds { + u64 shm_ctime; + #else + uptr shm_atime; +-#if !defined(_LP64) && !defined(__mips__) ++#if !defined(_LP64) && !defined(__mips__) && !defined(__sw_64__) + uptr __unused1; + #endif + uptr shm_dtime; +-#if !defined(_LP64) && !defined(__mips__) ++#if !defined(_LP64) && !defined(__mips__) && !defined(__sw_64__) + uptr __unused2; + #endif + uptr shm_ctime; +-#if !defined(_LP64) && !defined(__mips__) ++#if !defined(_LP64) && !defined(__mips__) && !defined(__sw_64__) + uptr __unused3; + #endif + #endif +@@ -515,7 +521,7 @@ typedef int __sanitizer_clockid_t; + + #if SANITIZER_LINUX + # if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__) || \ +- defined(__mips__) || defined(__hexagon__) ++ defined(__mips__) || defined(__hexagon__) && !defined(__sw_64__) + typedef unsigned __sanitizer___kernel_uid_t; + typedef unsigned __sanitizer___kernel_gid_t; + #else +@@ -528,7 +534,7 @@ typedef long long __sanitizer___kernel_off_t; + typedef long __sanitizer___kernel_off_t; + #endif + +-#if defined(__powerpc__) || defined(__mips__) ++#if defined(__powerpc__) || defined(__mips__) && !defined(__sw_64__) + typedef unsigned int __sanitizer___kernel_old_uid_t; + typedef unsigned int __sanitizer___kernel_old_gid_t; + #else +@@ -640,7 +646,7 @@ struct __sanitizer_sigaction { + #endif + #endif + #endif +-#if SANITIZER_LINUX ++#if SANITIZER_LINUX && !defined(__sw_64__) + void (*sa_restorer)(); + #endif + #if defined(__mips__) && (SANITIZER_WORDSIZE == 32) +@@ -820,7 +826,7 @@ typedef void __sanitizer_FILE; + #if SANITIZER_LINUX && !SANITIZER_ANDROID && \ + (defined(__i386) || defined(__x86_64) || defined(__mips64) || \ + defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \ +- defined(__s390__) || SANITIZER_RISCV64) ++ defined(__s390__) || defined(__sw_64__) || SANITIZER_RISCV64) + extern unsigned struct_user_regs_struct_sz; + extern unsigned struct_user_fpregs_struct_sz; + extern unsigned struct_user_fpxregs_struct_sz; +@@ -906,7 +912,7 @@ struct __sanitizer_cookie_io_functions_t { + #define IOC_NRBITS 8 + #define IOC_TYPEBITS 8 + #if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \ +- defined(__sparc__) ++ defined(__sparc__) || defined(__sw_64__) + #define IOC_SIZEBITS 13 + #define IOC_DIRBITS 3 + #define IOC_NONE 1U +diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h +index 11c6154b0..35a4bfd16 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.h ++++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.h +@@ -24,6 +24,8 @@ static const u32 kStackTraceMax = 256; + + #if SANITIZER_LINUX && defined(__mips__) + # define SANITIZER_CAN_FAST_UNWIND 0 ++#elif defined(__sw_64__) ++# define SANITIZER_CAN_FAST_UNWIND 0 + #elif SANITIZER_WINDOWS + # define SANITIZER_CAN_FAST_UNWIND 0 + #else +diff --git a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp +index 403bda117..574a82dc5 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp ++++ b/libsanitizer/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cpp +@@ -16,7 +16,7 @@ + #if SANITIZER_LINUX && \ + (defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) || \ + defined(__powerpc64__) || defined(__s390__) || defined(__i386__) || \ +- defined(__arm__) || SANITIZER_RISCV64) ++ defined(__arm__) || defined(__sw_64__) || SANITIZER_RISCV64) + + #include "sanitizer_stoptheworld.h" + +@@ -508,6 +508,12 @@ typedef struct user regs_struct; + # define REG_SP regs[EF_REG29] + # endif + ++#elif defined(__sw_64__) ++typedef struct user regs_struct; ++static constexpr uptr kExtraRegs[] = {0}; ++#define REG_SP regs[EF_SP] ++#define ARCH_IOVEC_FOR_GETREGSET ++ + #elif defined(__aarch64__) + typedef struct user_pt_regs regs_struct; + #define REG_SP sp +diff --git a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp +index 3fc994fd3..8f1cba26f 100644 +--- a/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp ++++ b/libsanitizer/sanitizer_common/sanitizer_symbolizer_libcdep.cpp +@@ -273,6 +273,8 @@ class LLVMSymbolizerProcess final : public SymbolizerProcess { + const char* const kSymbolizerArch = "--default-arch=s390x"; + #elif defined(__s390__) + const char* const kSymbolizerArch = "--default-arch=s390"; ++#elif defined(__sw_64__) ++ const char* const kSymbolizerArch = "--default-arch=sw_64"; + #else + const char* const kSymbolizerArch = "--default-arch=unknown"; + #endif +diff --git a/libsanitizer/tsan/Makefile.am b/libsanitizer/tsan/Makefile.am +index ae588a67d..f0608f3d1 100644 +--- a/libsanitizer/tsan/Makefile.am ++++ b/libsanitizer/tsan/Makefile.am +@@ -50,7 +50,7 @@ tsan_files = \ + tsan_vector_clock.cpp + + libtsan_la_SOURCES = $(tsan_files) +-EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S tsan_rtl_s390x.S ++EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S tsan_rtl_s390x.S tsan_rtl_sw64.S + libtsan_la_LIBADD = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS) + libtsan_la_DEPENDENCIES = $(top_builddir)/sanitizer_common/libsanitizer_common.la $(top_builddir)/interception/libinterception.la $(TSAN_TARGET_DEPENDENT_OBJECTS) + if LIBBACKTRACE_SUPPORTED +diff --git a/libsanitizer/tsan/Makefile.in b/libsanitizer/tsan/Makefile.in +index 538d2e8eb..4c1be63a3 100644 +--- a/libsanitizer/tsan/Makefile.in ++++ b/libsanitizer/tsan/Makefile.in +@@ -456,7 +456,7 @@ tsan_files = \ + tsan_vector_clock.cpp + + libtsan_la_SOURCES = $(tsan_files) +-EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S tsan_rtl_s390x.S ++EXTRA_libtsan_la_SOURCES = tsan_rtl_amd64.S tsan_rtl_aarch64.S tsan_rtl_mips64.S tsan_rtl_ppc64.S tsan_rtl_s390x.S tsan_rtl_sw64.S + libtsan_la_LIBADD = \ + $(top_builddir)/sanitizer_common/libsanitizer_common.la \ + $(top_builddir)/interception/libinterception.la \ +diff --git a/libsanitizer/tsan/tsan_interceptors_posix.cpp b/libsanitizer/tsan/tsan_interceptors_posix.cpp +index 9a85ee00d..54532a0fd 100644 +--- a/libsanitizer/tsan/tsan_interceptors_posix.cpp ++++ b/libsanitizer/tsan/tsan_interceptors_posix.cpp +@@ -72,7 +72,7 @@ struct ucontext_t { + #endif + + #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 || \ +- defined(__s390x__) ++ defined(__s390x__) || defined(__sw_64__) + #define PTHREAD_ABI_BASE "GLIBC_2.3.2" + #elif defined(__aarch64__) || SANITIZER_PPC64V2 + #define PTHREAD_ABI_BASE "GLIBC_2.17" +@@ -141,7 +141,7 @@ typedef __sanitizer::u16 mode_t; + # define F_TLOCK 2 /* Test and lock a region for exclusive use. */ + # define F_TEST 3 /* Test a region for other processes locks. */ + +-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD ++#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || SANITIZER_SW64 + const int SA_SIGINFO = 0x40; + const int SIG_SETMASK = 3; + #elif defined(__mips__) +@@ -2487,7 +2487,7 @@ int sigaction_impl(int sig, const __sanitizer_sigaction *act, + sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags; + internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask, + sizeof(sigactions[sig].sa_mask)); +-#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD ++#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_SW64 + sigactions[sig].sa_restorer = act->sa_restorer; + #endif + internal_memcpy(&newact, act, sizeof(newact)); +@@ -2786,6 +2786,14 @@ void InitializeInterceptors() { + TSAN_INTERCEPT(pthread_timedjoin_np); + #endif + ++ #if SANITIZER_SW64 ++ // sw64 have two version of timer function, osf_xxx with @glibc2.0, ++ // which is 32bits syscall for old kernal. xxx with @glibc2.1 is 64bits ++ // syscall for new kernal, we use the new one. ++ TSAN_INTERCEPT_VER(setitimer, "GLIBC_2.1"); ++ TSAN_INTERCEPT_VER(setitimer, "GLIBC_2.1"); ++ #endif ++ + TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE); + TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE); +diff --git a/libsanitizer/tsan/tsan_platform.h b/libsanitizer/tsan/tsan_platform.h +index 7ff0acace..7ae0c9f3b 100644 +--- a/libsanitizer/tsan/tsan_platform.h ++++ b/libsanitizer/tsan/tsan_platform.h +@@ -392,6 +392,44 @@ struct MappingS390x { + static const uptr kMidAppMemEnd = 0; + }; + ++// TODO(sw64_map): as sw64 kernal doesn't map such large space, we just map ++// it for test, for now it works will. ++// TODO(sw64_map_la): as sw64 map all space in low address, we set all user ++// space ++// in Lo address, perhaps there is some way to change it. ++/* ++C/C++ on linux/sw64 (52-bit VMA) ++0000 0000 0000 - 0001 2000 0000: modules and main thread stack ++0001 2000 0000 - 0008 0000 0000: main binary ++0400 0000 0000 - 0600 0000 0000: pie main binary (including heap) ++0600 0000 0000 - 4000 0000 0000: - ++4000 0000 0000 - 6000 0000 0000: shadow ++6000 0000 0000 - 7000 0000 0000: metainfo ++7000 0000 0000 - 7c00 0000 0000: trace ++*/ ++ ++struct MappingSW64_52 { ++ static const uptr kLoAppMemBeg = 0x0000000000000ull; ++ static const uptr kLoAppMemEnd = 0x0600000000000ull; ++ static const uptr kShadowBeg = 0x4000000000000ull; ++ static const uptr kShadowEnd = 0x6000000000000ull; ++ static const uptr kHiAppMemBeg = 0xfff0000000000ull; ++ static const uptr kHiAppMemEnd = 0xfff0000000000ull; ++ static const uptr kShadowMsk = 0x0000000000000ull; ++ //distans between lo address to shadow begin ++ static const uptr kShadowXor = 0x1000000000000ull; ++ static const uptr kShadowAdd = 0x0000000000000ull; ++ static const uptr kHeapMemBeg = 0xff00000000000ull; ++ static const uptr kHeapMemEnd = 0xff00000000000ull; ++ static const uptr kMetaShadowBeg = 0x6000000000000ull; ++ static const uptr kMetaShadowEnd = 0x7000000000000ull; ++ static const uptr kTraceMemBeg = 0x7000000000000ull; ++ static const uptr kTraceMemEnd = 0x7c00000000000ull; ++ static const uptr kVdsoBeg = 0x3c00000000000000ull; ++ static const uptr kMidAppMemBeg = 0; ++ static const uptr kMidAppMemEnd = 0; ++}; ++ + /* Go on linux, darwin and freebsd on x86_64 + 0000 0000 1000 - 0000 1000 0000: executable + 0000 1000 0000 - 00c0 0000 0000: - +@@ -674,6 +712,8 @@ ALWAYS_INLINE auto SelectMapping(Arg arg) { + return Func::template Apply(arg); + # elif defined(__s390x__) + return Func::template Apply(arg); ++# elif defined(__sw_64__) ++ return Func::template Apply(arg); + # else + # error "unsupported platform" + # endif +@@ -693,6 +733,7 @@ void ForEachMapping() { + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); ++ Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); +diff --git a/libsanitizer/tsan/tsan_platform_linux.cpp b/libsanitizer/tsan/tsan_platform_linux.cpp +index 73ec14892..83dfa2391 100644 +--- a/libsanitizer/tsan/tsan_platform_linux.cpp ++++ b/libsanitizer/tsan/tsan_platform_linux.cpp +@@ -400,6 +400,8 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) { + // tcbhead_t.stack_guard + uptr xor_key = ((uptr *)__builtin_thread_pointer())[5]; + return mangled_sp ^ xor_key; ++#elif defined(__sw_64__) ++ return mangled_sp; + #else + #error "Unknown platform" + #endif +@@ -422,6 +424,8 @@ static uptr UnmangleLongJmpSp(uptr mangled_sp) { + # define LONG_JMP_SP_ENV_SLOT 1 + # elif defined(__s390x__) + # define LONG_JMP_SP_ENV_SLOT 9 ++# elif defined(__sw_64__) ++# define LONG_JMP_SP_ENV_SLOT 8 + # else + # define LONG_JMP_SP_ENV_SLOT 6 + # endif +diff --git a/libsanitizer/tsan/tsan_rtl.h b/libsanitizer/tsan/tsan_rtl.h +index eab837042..d3ceae483 100644 +--- a/libsanitizer/tsan/tsan_rtl.h ++++ b/libsanitizer/tsan/tsan_rtl.h +@@ -55,7 +55,7 @@ namespace __tsan { + + #if !SANITIZER_GO + struct MapUnmapCallback; +-#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) ++#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__) || defined(__sw_64__) + + struct AP32 { + static const uptr kSpaceBeg = 0; +diff --git a/libsanitizer/tsan/tsan_rtl_sw64.S b/libsanitizer/tsan/tsan_rtl_sw64.S +new file mode 100644 +index 000000000..f74bfef8d +--- /dev/null ++++ b/libsanitizer/tsan/tsan_rtl_sw64.S +@@ -0,0 +1,236 @@ ++// The content of this file is sw64-only: ++#if defined(__sw_64__) ++ ++#include "sanitizer_common/sanitizer_asm.h" ++ ++.section .text ++.set noreorder ++ ++ASM_HIDDEN(__tsan_setjmp) ++.comm _ZN14__interception11real_setjmpE,8,8 ++.globl ASM_SYMBOL_INTERCEPTOR(setjmp) ++ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp)) ++ASM_SYMBOL_INTERCEPTOR(setjmp): ++ ldgp $r29, 0($r27) ++ CFI_STARTPROC ++ ++ // Save frame/link register ++ ldi $sp, -32($sp) ++ stl $r26, 0($sp) ++ stl $fp, 8($sp) ++ CFI_DEF_CFA_OFFSET (32) ++ CFI_OFFSET (26, -32) ++ CFI_OFFSET (15, -24) ++ ++ // Adjust the SP for previous frame ++ ldi $fp,0($sp) ++ CFI_DEF_CFA_REGISTER (15) ++ ++ // Save env parameter ++ stl $r16, 16($sp) ++ CFI_OFFSET (0, -16) ++ ++ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)` ++ ldi $r16, 32($sp) ++ ++ // call tsan interceptor ++ //ldih $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !gprelhigh ++ //ldi $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !gprellow ++ ldl $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !literal ++ call $r26, ($r27), 0 ++ ldgp $r29, 0($r26) ++ ++ // Restore env parameter ++ ldl $r16, 16($sp) ++ CFI_RESTORE (0) ++ ++ // Restore frame/link register ++ ldl $fp, 8($sp) ++ ldl $r26, 0($sp) ++ CFI_RESTORE (15) ++ CFI_RESTORE (26) ++ CFI_DEF_CFA (31, 0) ++ ldi $sp, 32($sp) ++ ++ // tail jump to libc setjmp ++ ldl $r27, _ZN14__interception11real_setjmpE($r29) !literal ++ ldl $r27, 0($r27) ++ ++ jmp $r31, ($r27) ++ ++ CFI_ENDPROC ++ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp)) ++ ++ASM_HIDDEN(__tsan_setjmp) ++.comm _ZN14__interception12real__setjmpE,8,8 ++.globl ASM_SYMBOL_INTERCEPTOR(_setjmp) ++ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp)) ++ASM_SYMBOL_INTERCEPTOR(_setjmp): ++ ldgp $r29, 0($r27) ++ CFI_STARTPROC ++ ++ // Save frame/link register ++ ldi $sp, -32($sp) ++ stl $r26, 0($sp) ++ stl $fp, 8($sp) ++ CFI_DEF_CFA_OFFSET (32) ++ CFI_OFFSET (26, -32) ++ CFI_OFFSET (15, -24) ++ ++ // Adjust the SP for previous frame ++ ldi $fp,0($sp) ++ CFI_DEF_CFA_REGISTER (15) ++ ++ // Save env parameter ++ stl $r16, 16($sp) ++ CFI_OFFSET (0, -16) ++ ++ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)` ++ ldi $r16, 32($sp) ++ ++ // call tsan interceptor ++ //ldih $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !gprelhigh ++ //ldi $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !gprellow ++ ldl $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !literal ++ call $r26, ($r27), 0 ++ ldgp $r29, 0($r26) ++ ++ // Restore env parameter ++ ldl $r16, 16($sp) ++ CFI_RESTORE (0) ++ ++ // Restore frame/link register ++ ldl $fp, 8($sp) ++ ldl $r26, 0($sp) ++ CFI_RESTORE (15) ++ CFI_RESTORE (26) ++ CFI_DEF_CFA (31, 0) ++ ldi $sp, 32($sp) ++ ++ // tail jump to libc setjmp ++ ldl $r27, _ZN14__interception12real__setjmpE($r29) !literal ++ ldl $r27, 0($r27) ++ ++ jmp $r31, ($r27) ++ ++ CFI_ENDPROC ++ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp)) ++ ++ASM_HIDDEN(__tsan_setjmp) ++.comm _ZN14__interception14real_sigsetjmpE,8,8 ++.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp) ++ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp)) ++ASM_SYMBOL_INTERCEPTOR(sigsetjmp): ++ ldgp $r29, 0($r27) ++ CFI_STARTPROC ++ ++ // Save frame/link register ++ ldi $sp, -32($sp) ++ stl $r26, 0($sp) ++ stl $fp, 8($sp) ++ CFI_DEF_CFA_OFFSET (32) ++ CFI_OFFSET (26, -32) ++ CFI_OFFSET (15, -24) ++ ++ // Adjust the SP for previous frame ++ ldi $fp,0($sp) ++ CFI_DEF_CFA_REGISTER (15) ++ ++ // Save env parameter ++ stl $r16, 16($sp) ++ stl $r17, 24($sp) ++ CFI_OFFSET (16, -16) ++ CFI_OFFSET (17, -8) ++ ++ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)` ++ ldi $r16, 32($sp) ++ ++ // call tsan interceptor ++ //ldih $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !gprelhigh ++ //ldi $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !gprellow ++ ldl $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !literal ++ call $r26, ($r27), 0 ++ ldgp $r29, 0($r26) ++ ++ // Restore env parameter ++ ldl $r16, 16($sp) ++ ldl $r17, 24($sp) ++ CFI_RESTORE (0) ++ CFI_RESTORE (1) ++ ++ // Restore frame/link register ++ ldl $fp, 8($sp) ++ ldl $r26, 0($sp) ++ CFI_RESTORE (15) ++ CFI_RESTORE (26) ++ CFI_DEF_CFA (31, 0) ++ ldi $sp, 32($sp) ++ ++ // tail jump to libc setjmp ++ ldl $r27, _ZN14__interception14real_sigsetjmpE($r29) !literal ++ ldl $r27, 0($r27) ++ ++ jmp $r31, ($r27) ++ ++ CFI_ENDPROC ++ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp)) ++ ++ASM_HIDDEN(__tsan_setjmp) ++.comm _ZN14__interception16real___sigsetjmpE,8,8 ++.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp) ++ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)) ++ASM_SYMBOL_INTERCEPTOR(__sigsetjmp): ++ ldgp $r29, 0($r27) ++ CFI_STARTPROC ++ ++ // Save frame/link register ++ ldi $sp, -32($sp) ++ stl $r26, 0($sp) ++ stl $fp, 8($sp) ++ CFI_DEF_CFA_OFFSET (32) ++ CFI_OFFSET (26, -32) ++ CFI_OFFSET (15, -24) ++ ++ // Adjust the SP for previous frame ++ ldi $fp,0($sp) ++ CFI_DEF_CFA_REGISTER (15) ++ ++ // Save env parameter ++ stl $r16, 16($sp) ++ stl $r17, 24($sp) ++ CFI_OFFSET (16, -16) ++ CFI_OFFSET (17, -8) ++ ++ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)` ++ ldi $r16, 32($sp) ++ ++ // call tsan interceptor ++ //ldih $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !gprelhigh ++ //ldi $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !gprellow ++ ldl $r27, ASM_SYMBOL(__tsan_setjmp)($r29) !literal ++ call $r26, ($r27), 0 ++ ldgp $r29, 0($r26) ++ ++ // Restore env parameter ++ ldl $r16, 16($sp) ++ ldl $r17, 24($sp) ++ CFI_RESTORE (0) ++ CFI_RESTORE (1) ++ ++ // Restore frame/link register ++ ldl $fp, 8($sp) ++ ldl $r26, 0($sp) ++ CFI_RESTORE (15) ++ CFI_RESTORE (26) ++ CFI_DEF_CFA (31, 0) ++ ldi $sp, 32($sp) ++ ++ // tail jump to libc setjmp ++ ldl $r27, _ZN14__interception16real___sigsetjmpE($r29) !literal ++ ldl $r27, 0($r27) ++ jmp $r31, ($r27) ++ ++ CFI_ENDPROC ++ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)) ++ ++#endif +-- +2.25.1 + diff --git a/0016-CompleteStructRelayout-Complete-Structure-Relayout.patch b/0016-CompleteStructRelayout-Complete-Structure-Relayout.patch new file mode 100644 index 0000000000000000000000000000000000000000..37657ef0bc2eb3296dfd5c377d5c8463cc42ce2a --- /dev/null +++ b/0016-CompleteStructRelayout-Complete-Structure-Relayout.patch @@ -0,0 +1,2056 @@ +From 699caeaa2d89966e4af1d36bc96b53eb4dac0a09 Mon Sep 17 00:00:00 2001 +From: eastb233 +Date: Fri, 25 Aug 2023 09:59:39 +0800 +Subject: [PATCH 16/22] [CompleteStructRelayout] Complete Structure Relayout + +Introduce complete structure reorganization based on original +structure reorganization optimization, which change array of +structure to structure of array in order to better utilize +spatial locality. +--- + gcc/ipa-struct-reorg/escapes.def | 2 + + gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 994 ++++++++++++++++-- + gcc/ipa-struct-reorg/ipa-struct-reorg.h | 33 + + .../g++.dg/struct/no-body-function.cpp | 18 + + .../g++.dg/struct/struct-reorg-1.cpp | 13 + + .../g++.dg/struct/struct-reorg-2.cpp | 17 + + .../g++.dg/struct/struct-reorg-3.cpp | 24 + + gcc/testsuite/g++.dg/struct/struct-reorg.exp | 26 + + gcc/testsuite/gcc.dg/struct/csr_1.c | 60 ++ + .../gcc.dg/struct/csr_allocation-1.c | 46 + + .../gcc.dg/struct/csr_allocation-2.c | 59 ++ + .../gcc.dg/struct/csr_allocation-3.c | 77 ++ + gcc/testsuite/gcc.dg/struct/csr_cast_int.c | 52 + + .../gcc.dg/struct/csr_separate_instance.c | 48 + + .../gcc.dg/struct/sr_address_of_field.c | 37 + + gcc/testsuite/gcc.dg/struct/sr_convert_mem.c | 23 + + gcc/testsuite/gcc.dg/struct/sr_maxmin_expr.c | 25 + + gcc/testsuite/gcc.dg/struct/sr_pointer_and.c | 17 + + .../gcc.dg/struct/sr_pointer_minus.c | 33 + + 19 files changed, 1539 insertions(+), 65 deletions(-) + create mode 100644 gcc/testsuite/g++.dg/struct/no-body-function.cpp + create mode 100644 gcc/testsuite/g++.dg/struct/struct-reorg-1.cpp + create mode 100644 gcc/testsuite/g++.dg/struct/struct-reorg-2.cpp + create mode 100644 gcc/testsuite/g++.dg/struct/struct-reorg-3.cpp + create mode 100644 gcc/testsuite/g++.dg/struct/struct-reorg.exp + create mode 100644 gcc/testsuite/gcc.dg/struct/csr_1.c + create mode 100644 gcc/testsuite/gcc.dg/struct/csr_allocation-1.c + create mode 100644 gcc/testsuite/gcc.dg/struct/csr_allocation-2.c + create mode 100644 gcc/testsuite/gcc.dg/struct/csr_allocation-3.c + create mode 100644 gcc/testsuite/gcc.dg/struct/csr_cast_int.c + create mode 100644 gcc/testsuite/gcc.dg/struct/csr_separate_instance.c + create mode 100644 gcc/testsuite/gcc.dg/struct/sr_address_of_field.c + create mode 100644 gcc/testsuite/gcc.dg/struct/sr_convert_mem.c + create mode 100644 gcc/testsuite/gcc.dg/struct/sr_maxmin_expr.c + create mode 100644 gcc/testsuite/gcc.dg/struct/sr_pointer_and.c + create mode 100644 gcc/testsuite/gcc.dg/struct/sr_pointer_minus.c + +diff --git a/gcc/ipa-struct-reorg/escapes.def b/gcc/ipa-struct-reorg/escapes.def +index c4c8e0739..d825eb3e6 100644 +--- a/gcc/ipa-struct-reorg/escapes.def ++++ b/gcc/ipa-struct-reorg/escapes.def +@@ -56,5 +56,7 @@ DEF_ESCAPE (escape_non_optimize, "Type used by a function which turns off struct + DEF_ESCAPE (escape_array, "Type is used in an array [not handled yet]") + DEF_ESCAPE (escape_ptr_ptr, "Type is used in a pointer to a pointer [not handled yet]") + DEF_ESCAPE (escape_return, "Type escapes via a return [not handled yet]") ++DEF_ESCAPE (escape_separate_instance, "Type escapes via a separate instance") ++DEF_ESCAPE (escape_unhandled_rewrite, "Type escapes via a unhandled rewrite stmt") + + #undef DEF_ESCAPE +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +index 238530860..c8b975a92 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +@@ -104,10 +104,12 @@ along with GCC; see the file COPYING3. If not see + #include "tree-ssa-live.h" /* For remove_unused_locals. */ + #include "ipa-param-manipulation.h" + #include "gimplify-me.h" ++#include "cfgloop.h" + + namespace { + + using namespace struct_reorg; ++using namespace struct_relayout; + + #define VOID_POINTER_P(type) \ + (POINTER_TYPE_P (type) && VOID_TYPE_P (TREE_TYPE (type))) +@@ -194,6 +196,14 @@ gimplify_build1 (gimple_stmt_iterator *gsi, enum tree_code code, tree type, + GSI_SAME_STMT); + } + ++enum srmode ++{ ++ NORMAL = 0, ++ COMPLETE_STRUCT_RELAYOUT ++}; ++ ++static bool is_result_of_mult (tree, tree *, tree); ++ + } // anon namespace + + +@@ -283,7 +293,8 @@ srtype::srtype (tree type) + : type (type), + chain_type (false), + escapes (does_not_escape), +- visited (false) ++ visited (false), ++ has_alloc_array (0) + { + for (int i = 0; i < max_split; i++) + newtype[i] = NULL_TREE; +@@ -483,13 +494,6 @@ srtype::dump (FILE *f) + fn->simple_dump (f); + } + fprintf (f, "\n }\n"); +- fprintf (f, "\n field_sites = {"); +- FOR_EACH_VEC_ELT (field_sites, i, field) +- { +- fprintf (f, " \n"); +- field->simple_dump (f); +- } +- fprintf (f, "\n }\n"); + fprintf (f, "}\n"); + } + +@@ -631,15 +635,7 @@ srtype::create_new_type (void) + + maxclusters++; + +- const char *tname = NULL; +- +- if (TYPE_NAME (type) != NULL) +- { +- if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE) +- tname = IDENTIFIER_POINTER (TYPE_NAME (type)); +- else if (DECL_NAME (TYPE_NAME (type)) != NULL) +- tname = IDENTIFIER_POINTER (DECL_NAME (TYPE_NAME (type))); +- } ++ const char *tname = get_type_name (type); + + for (unsigned i = 0; i < maxclusters; i++) + { +@@ -653,7 +649,10 @@ srtype::create_new_type (void) + if (tname) + { + name = concat (tname, ".reorg.", id, NULL); +- TYPE_NAME (newtype[i]) = get_identifier (name); ++ TYPE_NAME (newtype[i]) = build_decl (UNKNOWN_LOCATION, ++ TYPE_DECL, ++ get_identifier (name), ++ newtype[i]); + free (name); + } + } +@@ -673,6 +672,8 @@ srtype::create_new_type (void) + { + TYPE_FIELDS (newtype[i]) = newfields[i]; + layout_type (newtype[i]); ++ if (TYPE_NAME (newtype[i]) != NULL) ++ layout_decl (TYPE_NAME (newtype[i]), 0); + } + + warn_padded = save_warn_padded; +@@ -841,12 +842,6 @@ srfield::dump (FILE *f) + fprintf (f, ", offset = " HOST_WIDE_INT_PRINT_DEC, offset); + fprintf (f, ", type = "); + print_generic_expr (f, fieldtype); +- if (type) +- { +- fprintf (f, "( srtype = "); +- type->simple_dump (f); +- fprintf (f, ")"); +- } + fprintf (f, "\n}\n"); + } + +@@ -855,7 +850,8 @@ srfield::dump (FILE *f) + void + srfield::simple_dump (FILE *f) + { +- fprintf (f, "field (%d)", DECL_UID (fielddecl)); ++ if (fielddecl) ++ fprintf (f, "field (%d)", DECL_UID (fielddecl)); + } + + /* Dump out the access structure to FILE. */ +@@ -899,6 +895,92 @@ srdecl::dump (FILE *file) + } // namespace struct_reorg + + ++namespace struct_relayout { ++ ++/* Complete Structure Relayout Optimization. ++ It reorganizes all structure members, and puts same member together. ++ struct s { ++ long a; ++ int b; ++ struct s *c; ++ }; ++ Array looks like ++ abcabcabcabc... ++ will be transformed to ++ aaaa...bbbb...cccc... ++*/ ++ ++#define GPTR_SIZE(i) \ ++ TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (gptr[i]))) ++ ++unsigned transformed = 0; ++ ++unsigned ++csrtype::calculate_field_num (tree field_offset) ++{ ++ if (field_offset == NULL) ++ return 0; ++ ++ HOST_WIDE_INT off = int_byte_position (field_offset); ++ unsigned i = 1; ++ for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) ++ { ++ if (off == int_byte_position (field)) ++ return i; ++ i++; ++ } ++ return 0; ++} ++ ++void ++csrtype::init_type_info (void) ++{ ++ if (!type) ++ return; ++ new_size = old_size = tree_to_uhwi (TYPE_SIZE_UNIT (type)); ++ ++ /* Close enough to pad to improve performance. ++ 33~63 should pad to 64 but 33~48 (first half) are too far away, and ++ 65~127 should pad to 128 but 65~96 (first half) are too far away. */ ++ if (old_size > 48 && old_size < 64) ++ new_size = 64; ++ if (old_size > 96 && old_size < 128) ++ new_size = 128; ++ ++ /* For performance reasons, only allow structure size ++ that is a power of 2 and not too big. */ ++ if (new_size != 1 && new_size != 2 ++ && new_size != 4 && new_size != 8 ++ && new_size != 16 && new_size != 32 ++ && new_size != 64 && new_size != 128) ++ { ++ new_size = 0; ++ field_count = 0; ++ return; ++ } ++ ++ unsigned i = 0; ++ for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) ++ if (TREE_CODE (field) == FIELD_DECL) ++ i++; ++ field_count = i; ++ ++ struct_size = build_int_cstu (TREE_TYPE (TYPE_SIZE_UNIT (type)), ++ new_size); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Type: "); ++ print_generic_expr (dump_file, type); ++ fprintf (dump_file, " has %d members.\n", field_count); ++ fprintf (dump_file, "Modify struct size from %ld to %ld.\n", ++ old_size, new_size); ++ } ++} ++ ++} // namespace struct_relayout ++ ++ + namespace { + + struct ipa_struct_reorg +@@ -907,13 +989,10 @@ public: + // Constructors + ipa_struct_reorg (void) + : current_function (NULL), +- done_recording (false) ++ done_recording (false), ++ current_mode (NORMAL) + {} + +- // Public methods +- unsigned execute (void); +- void mark_type_as_escape (tree type, escape_type, gimple *stmt = NULL); +-private: + // Fields + auto_vec_del types; + auto_vec_del functions; +@@ -921,8 +1000,13 @@ private: + srfunction *current_function; + + bool done_recording; ++ srmode current_mode; ++ ++ // Methods ++ unsigned execute (enum srmode mode); ++ void mark_type_as_escape (tree type, escape_type escapes, ++ gimple *stmt = NULL); + +- // Private methods + void dump_types (FILE *f); + void dump_types_escaped (FILE *f); + void dump_functions (FILE *f); +@@ -954,6 +1038,7 @@ private: + void maybe_record_allocation_site (cgraph_node *, gimple *); + void record_stmt_expr (tree expr, cgraph_node *node, gimple *stmt); + void mark_expr_escape (tree, escape_type, gimple *stmt); ++ bool handled_allocation_stmt (gimple *stmt); + tree allocate_size (srtype *t, gimple *stmt); + + void mark_decls_in_as_not_needed (tree fn); +@@ -976,6 +1061,7 @@ private: + bool can_escape = false); + bool wholeaccess (tree expr, tree base, tree accesstype, srtype *t); + ++ void check_alloc_num (gimple *stmt, srtype *type); + void check_definition (srdecl *decl, vec &); + void check_uses (srdecl *decl, vec &); + void check_use (srdecl *decl, gimple *stmt, vec &); +@@ -990,8 +1076,591 @@ private: + + bool has_rewritten_type (srfunction *); + void maybe_mark_or_record_other_side (tree side, tree other, gimple *stmt); ++ ++ unsigned execute_struct_relayout (void); + }; + ++struct ipa_struct_relayout ++{ ++public: ++ // Fields ++ tree gptr[max_relayout_split + 1]; ++ csrtype ctype; ++ ipa_struct_reorg *sr; ++ cgraph_node *current_node; ++ ++ // Constructors ++ ipa_struct_relayout (tree type, ipa_struct_reorg *sr_) ++ { ++ ctype.type = type; ++ sr = sr_; ++ current_node = NULL; ++ for (int i = 0; i < max_relayout_split + 1; i++) ++ gptr[i] = NULL; ++ } ++ ++ // Methods ++ tree create_new_vars (tree type, const char *name); ++ void create_global_ptrs (void); ++ unsigned int rewrite (void); ++ void rewrite_stmt_in_function (void); ++ bool rewrite_debug (gimple *stmt, gimple_stmt_iterator *gsi); ++ bool rewrite_stmt (gimple *stmt, gimple_stmt_iterator *gsi); ++ bool handled_allocation_stmt (gcall *stmt); ++ void init_global_ptrs (gcall *stmt, gimple_stmt_iterator *gsi); ++ bool check_call_uses (gcall *stmt); ++ bool rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi); ++ tree create_ssa (tree node, gimple_stmt_iterator *gsi); ++ bool is_candidate (tree xhs); ++ tree rewrite_address (tree xhs, gimple_stmt_iterator *gsi); ++ tree rewrite_offset (tree offset, HOST_WIDE_INT num); ++ bool rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi); ++ bool maybe_rewrite_cst (tree cst, gimple_stmt_iterator *gsi, ++ HOST_WIDE_INT ×); ++ unsigned int execute (void); ++}; ++ ++} // anon namespace ++ ++namespace { ++ ++/* Methods for ipa_struct_relayout. */ ++ ++static void ++set_var_attributes (tree var) ++{ ++ if (!var) ++ return; ++ gcc_assert (TREE_CODE (var) == VAR_DECL); ++ ++ DECL_ARTIFICIAL (var) = 1; ++ DECL_EXTERNAL (var) = 0; ++ TREE_STATIC (var) = 1; ++ TREE_PUBLIC (var) = 0; ++ TREE_USED (var) = 1; ++ DECL_CONTEXT (var) = NULL; ++ TREE_THIS_VOLATILE (var) = 0; ++ TREE_ADDRESSABLE (var) = 0; ++ TREE_READONLY (var) = 0; ++ if (is_global_var (var)) ++ set_decl_tls_model (var, TLS_MODEL_NONE); ++} ++ ++tree ++ipa_struct_relayout::create_new_vars (tree type, const char *name) ++{ ++ gcc_assert (type); ++ tree new_type = build_pointer_type (type); ++ ++ tree new_name = NULL; ++ if (name) ++ new_name = get_identifier (name); ++ ++ tree new_var = build_decl (UNKNOWN_LOCATION, VAR_DECL, new_name, new_type); ++ ++ /* Set new_var's attributes. */ ++ set_var_attributes (new_var); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Created new var: "); ++ print_generic_expr (dump_file, new_var); ++ fprintf (dump_file, "\n"); ++ } ++ return new_var; ++} ++ ++void ++ipa_struct_relayout::create_global_ptrs (void) ++{ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "Create global gptrs: {\n"); ++ ++ char *gptr0_name = NULL; ++ const char *type_name = get_type_name (ctype.type); ++ ++ if (type_name) ++ gptr0_name = concat (type_name, "_gptr0", NULL); ++ tree var_gptr0 = create_new_vars (ctype.type, gptr0_name); ++ gptr[0] = var_gptr0; ++ varpool_node::add (var_gptr0); ++ ++ unsigned i = 1; ++ for (tree field = TYPE_FIELDS (ctype.type); field; ++ field = DECL_CHAIN (field)) ++ { ++ if (TREE_CODE (field) == FIELD_DECL) ++ { ++ tree type = TREE_TYPE (field); ++ ++ char *name = NULL; ++ char id[10] = {0}; ++ sprintf (id, "%d", i); ++ const char *decl_name = IDENTIFIER_POINTER (DECL_NAME (field)); ++ ++ if (type_name && decl_name) ++ name = concat (type_name, "_", decl_name, "_gptr", id, NULL); ++ tree var = create_new_vars (type, name); ++ ++ gptr[i] = var; ++ varpool_node::add (var); ++ i++; ++ } ++ } ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\nTotally create %d gptrs. }\n\n", i); ++ gcc_assert (ctype.field_count == i - 1); ++} ++ ++void ++ipa_struct_relayout::rewrite_stmt_in_function (void) ++{ ++ gcc_assert (cfun); ++ ++ basic_block bb = NULL; ++ gimple_stmt_iterator si; ++ FOR_EACH_BB_FN (bb, cfun) ++ { ++ for (si = gsi_start_bb (bb); !gsi_end_p (si);) ++ { ++ gimple *stmt = gsi_stmt (si); ++ if (rewrite_stmt (stmt, &si)) ++ gsi_remove (&si, true); ++ else ++ gsi_next (&si); ++ } ++ } ++ ++ /* Debug statements need to happen after all other statements ++ have changed. */ ++ FOR_EACH_BB_FN (bb, cfun) ++ { ++ for (si = gsi_start_bb (bb); !gsi_end_p (si);) ++ { ++ gimple *stmt = gsi_stmt (si); ++ if (gimple_code (stmt) == GIMPLE_DEBUG ++ && rewrite_debug (stmt, &si)) ++ gsi_remove (&si, true); ++ else ++ gsi_next (&si); ++ } ++ } ++} ++ ++unsigned int ++ipa_struct_relayout::rewrite (void) ++{ ++ cgraph_node *cnode = NULL; ++ function *fn = NULL; ++ FOR_EACH_FUNCTION (cnode) ++ { ++ if (!cnode->real_symbol_p () || !cnode->has_gimple_body_p ()) ++ continue; ++ if (cnode->definition) ++ { ++ fn = DECL_STRUCT_FUNCTION (cnode->decl); ++ if (fn == NULL) ++ continue; ++ ++ current_node = cnode; ++ push_cfun (fn); ++ ++ rewrite_stmt_in_function (); ++ ++ update_ssa (TODO_update_ssa_only_virtuals); ++ ++ if (flag_tree_pta) ++ compute_may_aliases (); ++ ++ remove_unused_locals (); ++ ++ cgraph_edge::rebuild_edges (); ++ ++ free_dominance_info (CDI_DOMINATORS); ++ ++ pop_cfun (); ++ current_node = NULL; ++ } ++ } ++ return TODO_verify_all; ++} ++ ++bool ++ipa_struct_relayout::rewrite_debug (gimple *stmt ATTRIBUTE_UNUSED, ++ gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED) ++{ ++ /* Delete debug gimple now. */ ++ return true; ++} ++ ++bool ++ipa_struct_relayout::rewrite_stmt (gimple *stmt, gimple_stmt_iterator *gsi) ++{ ++ switch (gimple_code (stmt)) ++ { ++ case GIMPLE_ASSIGN: ++ return rewrite_assign (as_a (stmt), gsi); ++ case GIMPLE_CALL: ++ return rewrite_call (as_a (stmt), gsi); ++ default: ++ break; ++ } ++ return false; ++} ++ ++bool ++ipa_struct_relayout::handled_allocation_stmt (gcall *stmt) ++{ ++ if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) ++ return true; ++ return false; ++} ++ ++void ++ipa_struct_relayout::init_global_ptrs (gcall *stmt, gimple_stmt_iterator *gsi) ++{ ++ gcc_assert (handled_allocation_stmt (stmt)); ++ ++ tree lhs = gimple_call_lhs (stmt); ++ ++ /* Case that gimple is at the end of bb. */ ++ if (gsi_one_before_end_p (*gsi)) ++ { ++ gassign *gptr0 = gimple_build_assign (gptr[0], lhs); ++ gsi_insert_after (gsi, gptr0, GSI_SAME_STMT); ++ } ++ gsi_next (gsi); ++ ++ /* Emit gimple gptr0 = _X and gptr1 = _X. */ ++ gassign *gptr0 = gimple_build_assign (gptr[0], lhs); ++ gsi_insert_before (gsi, gptr0, GSI_SAME_STMT); ++ gassign *gptr1 = gimple_build_assign (gptr[1], lhs); ++ gsi_insert_before (gsi, gptr1, GSI_SAME_STMT); ++ ++ /* Emit gimple gptr_[i] = gptr_[i-1] + _Y[gap]. */ ++ for (unsigned i = 2; i <= ctype.field_count; i++) ++ { ++ gimple *new_stmt = NULL; ++ tree gptr_i_prev_ssa = create_ssa (gptr[i-1], gsi); ++ tree gptr_i_ssa = make_ssa_name (TREE_TYPE (gptr[i-1])); ++ ++ /* Emit gimple _Y[gap] = N * sizeof (member). */ ++ tree member_gap = gimplify_build2 (gsi, MULT_EXPR, ++ long_unsigned_type_node, ++ gimple_call_arg (stmt, 0), ++ GPTR_SIZE (i-1)); ++ ++ new_stmt = gimple_build_assign (gptr_i_ssa, POINTER_PLUS_EXPR, ++ gptr_i_prev_ssa, member_gap); ++ gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); ++ ++ gassign *gptr_i = gimple_build_assign (gptr[i], gptr_i_ssa); ++ gsi_insert_before (gsi, gptr_i, GSI_SAME_STMT); ++ } ++ gsi_prev (gsi); ++} ++ ++bool ++ipa_struct_relayout::check_call_uses (gcall *stmt) ++{ ++ gcc_assert (current_node); ++ srfunction *fn = sr->find_function (current_node); ++ tree lhs = gimple_call_lhs (stmt); ++ ++ if (fn == NULL) ++ return false; ++ ++ srdecl *d = fn->find_decl (lhs); ++ if (d == NULL) ++ return false; ++ if (types_compatible_p (d->type->type, ctype.type)) ++ return true; ++ ++ return false; ++} ++ ++bool ++ipa_struct_relayout::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) ++{ ++ if (handled_allocation_stmt (stmt)) ++ { ++ /* Rewrite stmt _X = calloc (N, sizeof (struct)). */ ++ tree size = gimple_call_arg (stmt, 1); ++ if (TREE_CODE (size) != INTEGER_CST) ++ return false; ++ if (tree_to_uhwi (size) != ctype.old_size) ++ return false; ++ if (!check_call_uses (stmt)) ++ return false; ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Rewrite allocation call:\n"); ++ print_gimple_stmt (dump_file, stmt, 0); ++ fprintf (dump_file, "to\n"); ++ } ++ ++ /* Modify sizeof (struct). */ ++ gimple_call_set_arg (stmt, 1, ctype.struct_size); ++ update_stmt (stmt); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ print_gimple_stmt (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ ++ init_global_ptrs (stmt, gsi); ++ } ++ return false; ++} ++ ++tree ++ipa_struct_relayout::create_ssa (tree node, gimple_stmt_iterator *gsi) ++{ ++ gcc_assert (TREE_CODE (node) == VAR_DECL); ++ tree node_ssa = make_ssa_name (TREE_TYPE (node)); ++ gassign *stmt = gimple_build_assign (node_ssa, node); ++ gsi_insert_before (gsi, stmt, GSI_SAME_STMT); ++ return node_ssa; ++} ++ ++bool ++ipa_struct_relayout::is_candidate (tree xhs) ++{ ++ if (TREE_CODE (xhs) != COMPONENT_REF) ++ return false; ++ tree mem = TREE_OPERAND (xhs, 0); ++ if (TREE_CODE (mem) == MEM_REF) ++ { ++ tree type = TREE_TYPE (mem); ++ if (types_compatible_p (type, ctype.type)) ++ return true; ++ } ++ return false; ++} ++ ++tree ++ipa_struct_relayout::rewrite_address (tree xhs, gimple_stmt_iterator *gsi) ++{ ++ tree mem_ref = TREE_OPERAND (xhs, 0); ++ tree pointer = TREE_OPERAND (mem_ref, 0); ++ tree pointer_offset = TREE_OPERAND (mem_ref, 1); ++ tree field = TREE_OPERAND (xhs, 1); ++ ++ tree pointer_ssa = fold_convert (long_unsigned_type_node, pointer); ++ tree gptr0_ssa = fold_convert (long_unsigned_type_node, gptr[0]); ++ ++ /* Emit gimple _X1 = ptr - gptr0. */ ++ tree step1 = gimplify_build2 (gsi, MINUS_EXPR, long_unsigned_type_node, ++ pointer_ssa, gptr0_ssa); ++ ++ /* Emit gimple _X2 = _X1 / sizeof (struct). */ ++ tree step2 = gimplify_build2 (gsi, TRUNC_DIV_EXPR, long_unsigned_type_node, ++ step1, ctype.struct_size); ++ ++ unsigned field_num = ctype.calculate_field_num (field); ++ gcc_assert (field_num > 0 && field_num <= ctype.field_count); ++ ++ /* Emit gimple _X3 = _X2 * sizeof (member). */ ++ tree step3 = gimplify_build2 (gsi, MULT_EXPR, long_unsigned_type_node, ++ step2, GPTR_SIZE (field_num)); ++ ++ /* Emit gimple _X4 = gptr[I]. */ ++ tree gptr_field_ssa = create_ssa (gptr[field_num], gsi); ++ tree new_address = make_ssa_name (TREE_TYPE (gptr[field_num])); ++ gassign *new_stmt = gimple_build_assign (new_address, POINTER_PLUS_EXPR, ++ gptr_field_ssa, step3); ++ gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); ++ ++ /* MEM_REF with nonzero offset like ++ MEM[ptr + sizeof (struct)] = 0B ++ should be transformed to ++ MEM[gptr + sizeof (member)] = 0B ++ */ ++ HOST_WIDE_INT size ++ = tree_to_shwi (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_address)))); ++ tree new_size = rewrite_offset (pointer_offset, size); ++ if (new_size) ++ TREE_OPERAND (mem_ref, 1) = new_size; ++ ++ /* Update mem_ref pointer. */ ++ TREE_OPERAND (mem_ref, 0) = new_address; ++ ++ /* Update mem_ref TREE_TYPE. */ ++ TREE_TYPE (mem_ref) = TREE_TYPE (TREE_TYPE (new_address)); ++ ++ return mem_ref; ++} ++ ++tree ++ipa_struct_relayout::rewrite_offset (tree offset, HOST_WIDE_INT num) ++{ ++ if (TREE_CODE (offset) == INTEGER_CST) ++ { ++ bool sign = false; ++ HOST_WIDE_INT off = TREE_INT_CST_LOW (offset); ++ if (off == 0) ++ return NULL; ++ if (off < 0) ++ { ++ off = -off; ++ sign = true; ++ } ++ if (off % ctype.old_size == 0) ++ { ++ HOST_WIDE_INT times = off / ctype.old_size; ++ times = sign ? -times : times; ++ return build_int_cst (TREE_TYPE (offset), num * times); ++ } ++ } ++ return NULL; ++} ++ ++#define REWRITE_ASSIGN_TREE_IN_STMT(node) \ ++do \ ++{ \ ++ tree node = gimple_assign_##node (stmt); \ ++ if (node && is_candidate (node)) \ ++ { \ ++ tree mem_ref = rewrite_address (node, gsi); \ ++ gimple_assign_set_##node (stmt, mem_ref); \ ++ update_stmt (stmt); \ ++ } \ ++} while (0) ++ ++/* COMPONENT_REF = exp => MEM_REF = exp ++ / \ / \ ++ MEM_REF field gptr offset ++ / \ ++ pointer offset ++*/ ++bool ++ipa_struct_relayout::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) ++{ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Maybe rewrite assign:\n"); ++ print_gimple_stmt (dump_file, stmt, 0); ++ fprintf (dump_file, "to\n"); ++ } ++ ++ switch (gimple_num_ops (stmt)) ++ { ++ case 4: REWRITE_ASSIGN_TREE_IN_STMT (rhs3); // FALLTHRU ++ case 3: ++ { ++ REWRITE_ASSIGN_TREE_IN_STMT (rhs2); ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ if (rhs2 && TREE_CODE (rhs2) == INTEGER_CST) ++ { ++ /* Handle pointer++ and pointer-- or ++ factor is euqal to struct size. */ ++ HOST_WIDE_INT times = 1; ++ if (maybe_rewrite_cst (rhs2, gsi, times)) ++ { ++ tree tmp = build_int_cst ( ++ TREE_TYPE (TYPE_SIZE_UNIT (ctype.type)), ++ ctype.new_size * times); ++ gimple_assign_set_rhs2 (stmt, tmp); ++ update_stmt (stmt); ++ } ++ } ++ } // FALLTHRU ++ case 2: REWRITE_ASSIGN_TREE_IN_STMT (rhs1); // FALLTHRU ++ case 1: REWRITE_ASSIGN_TREE_IN_STMT (lhs); // FALLTHRU ++ case 0: break; ++ default: gcc_unreachable (); ++ } ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ print_gimple_stmt (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } ++ return false; ++} ++ ++bool ++ipa_struct_relayout::maybe_rewrite_cst (tree cst, gimple_stmt_iterator *gsi, ++ HOST_WIDE_INT ×) ++{ ++ bool ret = false; ++ gcc_assert (TREE_CODE (cst) == INTEGER_CST); ++ ++ gimple *stmt = gsi_stmt (*gsi); ++ if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR) ++ { ++ tree lhs = gimple_assign_lhs (stmt); ++ tree rhs1 = gimple_assign_rhs1 (stmt); ++ if (types_compatible_p (inner_type (TREE_TYPE (rhs1)), ctype.type) ++ || types_compatible_p (inner_type (TREE_TYPE (lhs)), ctype.type)) ++ { ++ tree num = NULL; ++ if (is_result_of_mult (cst, &num, TYPE_SIZE_UNIT (ctype.type))) ++ { ++ times = TREE_INT_CST_LOW (num); ++ return true; ++ } ++ } ++ } ++ ++ if (gimple_assign_rhs_code (stmt) == MULT_EXPR) ++ { ++ if (gsi_one_before_end_p (*gsi)) ++ return false; ++ gsi_next (gsi); ++ gimple *stmt2 = gsi_stmt (*gsi); ++ ++ if (gimple_code (stmt2) == GIMPLE_ASSIGN ++ && gimple_assign_rhs_code (stmt2) == POINTER_PLUS_EXPR) ++ { ++ tree lhs = gimple_assign_lhs (stmt2); ++ tree rhs1 = gimple_assign_rhs1 (stmt2); ++ if (types_compatible_p (inner_type (TREE_TYPE (rhs1)), ctype.type) ++ || types_compatible_p (inner_type (TREE_TYPE (lhs)), ctype.type)) ++ { ++ tree num = NULL; ++ if (is_result_of_mult (cst, &num, TYPE_SIZE_UNIT (ctype.type))) ++ { ++ times = TREE_INT_CST_LOW (num); ++ ret = true; ++ } ++ } ++ } ++ gsi_prev (gsi); ++ return ret; ++ } ++ return false; ++} ++ ++unsigned int ++ipa_struct_relayout::execute (void) ++{ ++ ctype.init_type_info (); ++ if (ctype.field_count < min_relayout_split ++ || ctype.field_count > max_relayout_split) ++ return 0; ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Complete Struct Relayout Type: "); ++ print_generic_expr (dump_file, ctype.type); ++ fprintf (dump_file, "\n"); ++ } ++ transformed++; ++ ++ create_global_ptrs (); ++ return rewrite (); ++} ++ ++} // anon namespace ++ ++ ++namespace { ++ ++/* Methods for ipa_struct_reorg. */ ++ + /* Dump all of the recorded types to file F. */ + + void +@@ -1189,7 +1858,7 @@ ipa_struct_reorg::record_type (tree type) + f->type = t1; + t1->add_field_site (f); + } +- if (t1 == type1) ++ if (t1 == type1 && current_mode != COMPLETE_STRUCT_RELAYOUT) + type1->mark_escape (escape_rescusive_type, NULL); + } + } +@@ -1331,6 +2000,12 @@ ipa_struct_reorg::record_var (tree decl, escape_type escapes, int arg) + else + e = escape_type_volatile_array_or_ptrptr (TREE_TYPE (decl)); + ++ /* Separate instance is hard to trace in complete struct ++ relayout optimization. */ ++ if (current_mode == COMPLETE_STRUCT_RELAYOUT ++ && TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE) ++ e = escape_separate_instance; ++ + if (e != does_not_escape) + type->mark_escape (e, NULL); + } +@@ -1369,6 +2044,7 @@ ipa_struct_reorg::find_var (tree expr, gimple *stmt) + || TREE_CODE (expr) == VIEW_CONVERT_EXPR) + { + tree r = TREE_OPERAND (expr, 0); ++ tree orig_type = TREE_TYPE (expr); + if (handled_component_p (r) + || TREE_CODE (r) == MEM_REF) + { +@@ -1382,8 +2058,18 @@ ipa_struct_reorg::find_var (tree expr, gimple *stmt) + escape_vce, stmt); + } + if (TREE_CODE (r) == MEM_REF) +- mark_type_as_escape (TREE_TYPE (TREE_OPERAND (r, 1)), +- escape_addr, stmt); ++ { ++ mark_type_as_escape (TREE_TYPE (TREE_OPERAND (r, 1)), ++ escape_addr, stmt); ++ tree inner_type = TREE_TYPE (TREE_OPERAND (r, 0)); ++ if (orig_type != inner_type) ++ { ++ mark_type_as_escape (orig_type, ++ escape_cast_another_ptr, stmt); ++ mark_type_as_escape (inner_type, ++ escape_cast_another_ptr, stmt); ++ } ++ } + r = TREE_OPERAND (r, 0); + } + mark_expr_escape (r, escape_addr, stmt); +@@ -1407,7 +2093,8 @@ ipa_struct_reorg::find_vars (gimple *stmt) + { + case GIMPLE_ASSIGN: + if (gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS +- || gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR) ++ || gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR ++ || gimple_assign_rhs_code (stmt) == NOP_EXPR) + { + tree lhs = gimple_assign_lhs (stmt); + tree rhs = gimple_assign_rhs1 (stmt); +@@ -1432,6 +2119,32 @@ ipa_struct_reorg::find_vars (gimple *stmt) + current_function->record_decl (t, rhs, -1); + } + } ++ else ++ { ++ /* Because we won't handle these stmts in rewrite phase, ++ just mark these types as escaped. */ ++ switch (gimple_num_ops (stmt)) ++ { ++ case 4: mark_type_as_escape ( ++ TREE_TYPE (gimple_assign_rhs3 (stmt)), ++ escape_unhandled_rewrite, stmt); ++ // FALLTHRU ++ case 3: mark_type_as_escape ( ++ TREE_TYPE (gimple_assign_rhs2 (stmt)), ++ escape_unhandled_rewrite, stmt); ++ // FALLTHRU ++ case 2: mark_type_as_escape ( ++ TREE_TYPE (gimple_assign_rhs1 (stmt)), ++ escape_unhandled_rewrite, stmt); ++ // FALLTHRU ++ case 1: mark_type_as_escape ( ++ TREE_TYPE (gimple_assign_lhs (stmt)), ++ escape_unhandled_rewrite, stmt); ++ // FALLTHRU ++ case 0: break; ++ default: gcc_unreachable (); ++ } ++ } + break; + + case GIMPLE_CALL: +@@ -1514,9 +2227,21 @@ is_result_of_mult (tree arg, tree *num, tree struct_size) + /* If we have a integer, just check if it is a multiply of STRUCT_SIZE. */ + if (TREE_CODE (arg) == INTEGER_CST) + { +- if (integer_zerop (size_binop (FLOOR_MOD_EXPR, arg, struct_size))) ++ bool sign = false; ++ HOST_WIDE_INT size = TREE_INT_CST_LOW (arg); ++ if (size < 0) ++ { ++ size = -size; ++ sign = true; ++ } ++ tree arg2 = build_int_cst (TREE_TYPE (arg), size); ++ if (integer_zerop (size_binop (FLOOR_MOD_EXPR, arg2, struct_size))) + { +- *num = size_binop (FLOOR_DIV_EXPR, arg, struct_size); ++ tree number = size_binop (FLOOR_DIV_EXPR, arg2, struct_size); ++ if (sign) ++ number = build_int_cst (TREE_TYPE (number), ++ -tree_to_shwi (number)); ++ *num = number; + return true; + } + return false; +@@ -1586,16 +2311,21 @@ is_result_of_mult (tree arg, tree *num, tree struct_size) + + /* Return TRUE if STMT is an allocation statement that is handled. */ + +-static bool +-handled_allocation_stmt (gimple *stmt) ++bool ++ipa_struct_reorg::handled_allocation_stmt (gimple *stmt) + { +- if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC) +- || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) +- || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC) +- || gimple_call_builtin_p (stmt, BUILT_IN_ALIGNED_ALLOC) +- || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA) +- || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) ++ if (current_mode == COMPLETE_STRUCT_RELAYOUT ++ && gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) + return true; ++ ++ if (current_mode != COMPLETE_STRUCT_RELAYOUT) ++ if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALIGNED_ALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) ++ return true; + return false; + } + +@@ -1636,7 +2366,7 @@ ipa_struct_reorg::allocate_size (srtype *type, gimple *stmt) + the size of structure. */ + if (operand_equal_p (arg1, struct_size, 0)) + return size; +- /* Check that first argument is a constant equal to ++ /* ??? Check that first argument is a constant equal to + the size of structure. */ + if (operand_equal_p (size, struct_size, 0)) + return arg1; +@@ -1751,6 +2481,25 @@ ipa_struct_reorg::maybe_record_assign (cgraph_node *node, gassign *stmt) + } + } + ++static bool ++check_mem_ref_offset (tree expr) ++{ ++ tree num = NULL; ++ bool ret = false; ++ ++ if (TREE_CODE (expr) != MEM_REF) ++ return false; ++ ++ /* Try to find the structure size. */ ++ tree field_off = TREE_OPERAND (expr, 1); ++ tree tmp = TREE_OPERAND (expr, 0); ++ if (TREE_CODE (tmp) == ADDR_EXPR) ++ tmp = TREE_OPERAND (tmp, 0); ++ tree size = TYPE_SIZE_UNIT (inner_type (TREE_TYPE (tmp))); ++ ret = is_result_of_mult (field_off, &num, size); ++ return ret; ++} ++ + static tree + get_ref_base_and_offset (tree &e, HOST_WIDE_INT &offset, + bool &realpart, bool &imagpart, +@@ -1792,7 +2541,8 @@ get_ref_base_and_offset (tree &e, HOST_WIDE_INT &offset, + gcc_assert (TREE_CODE (field_off) == INTEGER_CST); + /* So we can mark the types as escaping if different. */ + accesstype = TREE_TYPE (field_off); +- offset += tree_to_uhwi (field_off); ++ if (!check_mem_ref_offset (expr)) ++ offset += tree_to_uhwi (field_off); + return TREE_OPERAND (expr, 0); + } + default: +@@ -2176,6 +2926,31 @@ ipa_struct_reorg::check_type_and_push (tree newdecl, srtype *type, + type1->mark_escape (escape_cast_another_ptr, stmt); + } + ++void ++ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type) ++{ ++ if (current_mode == COMPLETE_STRUCT_RELAYOUT ++ && handled_allocation_stmt (stmt)) ++ { ++ tree arg0 = gimple_call_arg (stmt, 0); ++ basic_block bb = gimple_bb (stmt); ++ cgraph_node *node = current_function->node; ++ if (integer_onep (arg0)) ++ /* Actually NOT an array, but may ruin other array. */ ++ type->has_alloc_array = -1; ++ else if (bb->loop_father != NULL ++ && loop_outer (bb->loop_father) != NULL) ++ /* The allocation is in a loop. */ ++ type->has_alloc_array = -2; ++ else if (node->callers != NULL) ++ type->has_alloc_array = -3; ++ else ++ type->has_alloc_array = type->has_alloc_array < 0 ++ ? type->has_alloc_array ++ : type->has_alloc_array + 1; ++ } ++} ++ + /* + 2) Check SSA_NAMEs for non type usages (source or use) (worlist of srdecl) + a) if the SSA_NAME is sourced from a pointer plus, record the pointer and +@@ -2223,6 +2998,7 @@ ipa_struct_reorg::check_definition (srdecl *decl, vec &worklist) + if (!handled_allocation_stmt (stmt) + || !allocate_size (type, stmt)) + type->mark_escape (escape_return, stmt); ++ check_alloc_num (stmt, type); + return; + } + /* If the SSA_NAME is sourced from an inline-asm, +@@ -2264,6 +3040,20 @@ ipa_struct_reorg::check_definition (srdecl *decl, vec &worklist) + return; + } + ++ if (gimple_assign_rhs_code (stmt) == MAX_EXPR ++ || gimple_assign_rhs_code (stmt) == MIN_EXPR ++ || gimple_assign_rhs_code (stmt) == BIT_IOR_EXPR ++ || gimple_assign_rhs_code (stmt) == BIT_XOR_EXPR ++ || gimple_assign_rhs_code (stmt) == BIT_AND_EXPR) ++ { ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ if (TREE_CODE (rhs) == SSA_NAME) ++ check_type_and_push (rhs, type, worklist, stmt); ++ if (TREE_CODE (rhs2) == SSA_NAME) ++ check_type_and_push (rhs2, type, worklist, stmt); ++ return; ++ } ++ + /* Casts between pointers and integer are escaping. */ + if (gimple_assign_cast_p (stmt)) + { +@@ -2328,6 +3118,11 @@ ipa_struct_reorg::check_other_side (srdecl *decl, tree other, gimple *stmt, + srtype *t1 = find_type (inner_type (t)); + if (t1 == type) + { ++ /* In Complete Struct Relayout, if lhs type is the same ++ as rhs type, we could return without any harm. */ ++ if (current_mode == COMPLETE_STRUCT_RELAYOUT) ++ return; ++ + tree base; + bool indirect; + srtype *type1; +@@ -2376,8 +3171,11 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + tree rhs1 = gimple_cond_lhs (stmt); + tree rhs2 = gimple_cond_rhs (stmt); + tree orhs = rhs1; +- if (gimple_cond_code (stmt) != EQ_EXPR +- && gimple_cond_code (stmt) != NE_EXPR) ++ enum tree_code code = gimple_cond_code (stmt); ++ if (code != EQ_EXPR && code != NE_EXPR ++ && (current_mode != COMPLETE_STRUCT_RELAYOUT ++ || (code != LT_EXPR && code != LE_EXPR ++ && code != GT_EXPR && code != GE_EXPR))) + { + mark_expr_escape (rhs1, escape_non_eq, stmt); + mark_expr_escape (rhs2, escape_non_eq, stmt); +@@ -2406,8 +3204,11 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + tree rhs1 = gimple_assign_rhs1 (stmt); + tree rhs2 = gimple_assign_rhs2 (stmt); + tree orhs = rhs1; +- if (gimple_assign_rhs_code (stmt) != EQ_EXPR +- && gimple_assign_rhs_code (stmt) != NE_EXPR) ++ enum tree_code code = gimple_assign_rhs_code (stmt); ++ if (code != EQ_EXPR && code != NE_EXPR ++ && (current_mode != COMPLETE_STRUCT_RELAYOUT ++ || (code != LT_EXPR && code != LE_EXPR ++ && code != GT_EXPR && code != GE_EXPR))) + { + mark_expr_escape (rhs1, escape_non_eq, stmt); + mark_expr_escape (rhs2, escape_non_eq, stmt); +@@ -2692,6 +3493,12 @@ ipa_struct_reorg::record_accesses (void) + /* Record accesses inside a function. */ + if (cnode->definition) + record_function (cnode); ++ else ++ { ++ tree return_type = TREE_TYPE (TREE_TYPE (cnode->decl)); ++ mark_type_as_escape (return_type, escape_return, NULL); ++ } ++ + } + + if (dump_file && (dump_flags & TDF_DETAILS)) +@@ -2807,8 +3614,11 @@ ipa_struct_reorg::propagate_escape (void) + void + ipa_struct_reorg::prune_escaped_types (void) + { +- detect_cycles (); +- propagate_escape (); ++ if (current_mode != COMPLETE_STRUCT_RELAYOUT) ++ { ++ detect_cycles (); ++ propagate_escape (); ++ } + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +@@ -3954,17 +4764,66 @@ ipa_struct_reorg::rewrite_functions (void) + } + + unsigned int +-ipa_struct_reorg::execute (void) ++ipa_struct_reorg::execute_struct_relayout (void) + { +- /* FIXME: If there is a top-level inline-asm, +- the pass immediately returns. */ +- if (symtab->first_asm_symbol ()) +- return 0; +- record_accesses (); +- prune_escaped_types (); +- analyze_types (); ++ unsigned retval = 0; ++ for (unsigned i = 0; i < types.length (); i++) ++ { ++ tree type = types[i]->type; ++ if (TYPE_FIELDS (type) == NULL) ++ continue; ++ if (types[i]->has_alloc_array != 1) ++ continue; ++ if (types[i]->chain_type) ++ continue; ++ retval |= ipa_struct_relayout (type, this).execute (); ++ } ++ ++ if (dump_file) ++ { ++ if (transformed) ++ fprintf (dump_file, "\nNumber of structures to transform in " ++ "Complete Structure Relayout is %d\n", transformed); ++ else ++ fprintf (dump_file, "\nNo structures to transform in " ++ "Complete Structure Relayout.\n"); ++ } ++ ++ return retval; ++} ++ ++unsigned int ++ipa_struct_reorg::execute (enum srmode mode) ++{ ++ unsigned int ret = 0; ++ ++ if (mode == NORMAL) ++ { ++ current_mode = NORMAL; ++ /* FIXME: If there is a top-level inline-asm, ++ the pass immediately returns. */ ++ if (symtab->first_asm_symbol ()) ++ return 0; ++ record_accesses (); ++ prune_escaped_types (); ++ analyze_types (); ++ ++ ret = rewrite_functions (); ++ } ++ else if (mode == COMPLETE_STRUCT_RELAYOUT) ++ { ++ if (dump_file) ++ fprintf (dump_file, "\n\nTry Complete Struct Relayout:\n"); ++ current_mode = COMPLETE_STRUCT_RELAYOUT; ++ if (symtab->first_asm_symbol ()) ++ return 0; ++ record_accesses (); ++ prune_escaped_types (); ++ ++ ret = execute_struct_relayout (); ++ } + +- return rewrite_functions (); ++ return ret; + } + + const pass_data pass_data_ipa_struct_reorg = +@@ -3991,7 +4850,11 @@ public: + virtual bool gate (function *); + virtual unsigned int execute (function *) + { +- return ipa_struct_reorg ().execute (); ++ unsigned int ret = 0; ++ ret = ipa_struct_reorg ().execute (NORMAL); ++ if (!ret) ++ ret = ipa_struct_reorg ().execute (COMPLETE_STRUCT_RELAYOUT); ++ return ret; + } + + }; // class pass_ipa_struct_reorg +@@ -3999,10 +4862,11 @@ public: + bool + pass_ipa_struct_reorg::gate (function *) + { +- return (optimize ++ return (optimize >= 3 + && flag_ipa_struct_reorg + /* Don't bother doing anything if the program has errors. */ +- && !seen_error ()); ++ && !seen_error () ++ && flag_lto_partition == LTO_PARTITION_ONE); + } + + } // anon namespace +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.h b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +index a58794070..ef7f4c780 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.h ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +@@ -120,6 +120,9 @@ private: + public: + tree newtype[max_split]; + bool visited; ++ /* Negative number means it has illegal allocated arrays ++ that we do not optimize. */ ++ int has_alloc_array; + + // Constructors + srtype (tree type); +@@ -232,4 +235,34 @@ struct srdecl + + } // namespace struct_reorg + ++ ++namespace struct_relayout { ++ ++const int min_relayout_split = 8; ++const int max_relayout_split = 16; ++ ++struct csrtype ++{ ++ tree type; ++ unsigned HOST_WIDE_INT old_size; ++ unsigned HOST_WIDE_INT new_size; ++ unsigned field_count; ++ tree struct_size; ++ ++ // Constructors ++ csrtype () ++ : type (NULL), ++ old_size (0), ++ new_size (0), ++ field_count (0), ++ struct_size (NULL) ++ {} ++ ++ // Methods ++ unsigned calculate_field_num (tree field_offset); ++ void init_type_info (void); ++}; ++ ++} // namespace struct_relayout ++ + #endif +diff --git a/gcc/testsuite/g++.dg/struct/no-body-function.cpp b/gcc/testsuite/g++.dg/struct/no-body-function.cpp +new file mode 100644 +index 000000000..4e56e73fc +--- /dev/null ++++ b/gcc/testsuite/g++.dg/struct/no-body-function.cpp +@@ -0,0 +1,18 @@ ++/* { dg-do compile } */ ++/* { dg-options "-std=gnu++17 -Wno-builtin-declaration-mismatch -O3 -fwhole-program -flto-partition=one -fipa-struct-reorg -S" } */ ++ ++struct S { ++ int x; ++ double y; ++}; ++S f(); ++ ++const auto [x0, y0] = f(); ++const auto [x1, y1] = f(); ++ ++static union { ++int a; ++double b; ++}; ++ ++const auto [x2, y2] = f(); +diff --git a/gcc/testsuite/g++.dg/struct/struct-reorg-1.cpp b/gcc/testsuite/g++.dg/struct/struct-reorg-1.cpp +new file mode 100644 +index 000000000..6ab71abe1 +--- /dev/null ++++ b/gcc/testsuite/g++.dg/struct/struct-reorg-1.cpp +@@ -0,0 +1,13 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O3 -fwhole-program -flto-partition=one -fipa-struct-reorg -fdump-ipa-struct_reorg-details -S" } */ ++ ++struct Foo { int foo; int a; }; ++Foo& ignoreSetMutex = *(new Foo); ++ ++struct Goo { int goo; int a; }; ++ ++int main () ++{ ++ Goo* a; ++ return a->goo = 90; ++} +diff --git a/gcc/testsuite/g++.dg/struct/struct-reorg-2.cpp b/gcc/testsuite/g++.dg/struct/struct-reorg-2.cpp +new file mode 100644 +index 000000000..72b7db8a9 +--- /dev/null ++++ b/gcc/testsuite/g++.dg/struct/struct-reorg-2.cpp +@@ -0,0 +1,17 @@ ++/* { dg-do run } */ ++/* { dg-options "-O3 -fwhole-program -flto-partition=one -fipa-struct-reorg -fdump-ipa-struct_reorg-details" } */ ++ ++#include ++ ++struct testg { ++ int b; ++ float c; ++}; ++ ++testg *testgvar; ++int main () ++{ ++ testgvar = (testg*) calloc(10, sizeof(testg)); ++ int b = testgvar->b; ++ return b; ++} +diff --git a/gcc/testsuite/g++.dg/struct/struct-reorg-3.cpp b/gcc/testsuite/g++.dg/struct/struct-reorg-3.cpp +new file mode 100644 +index 000000000..771164a96 +--- /dev/null ++++ b/gcc/testsuite/g++.dg/struct/struct-reorg-3.cpp +@@ -0,0 +1,24 @@ ++/* { dg-do run } */ ++/* { dg-options "-O3 -fwhole-program -flto-partition=one -fipa-struct-reorg -fdump-ipa-struct_reorg-details" } */ ++ ++#include ++ ++struct testg { ++ int b; ++ float c; ++ double d; ++ double e; ++ double f; ++ double h; ++ double i; ++ double j; ++ int k; ++}; ++ ++testg *testgvar; ++int main () ++{ ++ testgvar = (testg*) calloc(10, sizeof(testg)); ++ int b = testgvar->b; ++ return b; ++} +diff --git a/gcc/testsuite/g++.dg/struct/struct-reorg.exp b/gcc/testsuite/g++.dg/struct/struct-reorg.exp +new file mode 100644 +index 000000000..e3ffe1388 +--- /dev/null ++++ b/gcc/testsuite/g++.dg/struct/struct-reorg.exp +@@ -0,0 +1,26 @@ ++# Copyright (C) 2021-2023 Free Software Foundation, Inc. ++ ++# This program is free software; you can redistribute it and/or modify ++# it under the terms of the GNU General Public License as published by ++# the Free Software Foundation; either version 3 of the License, or ++# (at your option) any later version. ++# ++# This program is distributed in the hope that it will be useful, ++# but WITHOUT ANY WARRANTY; without even the implied warranty of ++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++# GNU General Public License for more details. ++# ++# You should have received a copy of the GNU General Public License ++# along with GCC; see the file COPYING3. If not see ++# . ++ ++load_lib g++-dg.exp ++ ++# Initialize `dg'. ++dg-init ++ ++g++-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.cpp]] \ ++ "" "" ++ ++# All done. ++dg-finish +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/csr_1.c b/gcc/testsuite/gcc.dg/struct/csr_1.c +new file mode 100644 +index 000000000..811030bf1 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/csr_1.c +@@ -0,0 +1,60 @@ ++// { dg-do run } ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node* node_p; ++ ++struct node { ++ unsigned long a; ++ unsigned long b; ++ node_p c; ++ node_p d; ++ long e; ++ long f; ++ long g; ++ long h; ++ long i; ++ long j; ++ long k; ++ long l; ++ int m; ++ int n; ++}; ++ ++const int MAX = 10000; ++node_p n; ++ ++int ++main () ++{ ++ n = (node_p) calloc (MAX, sizeof (node_t)); ++ ++ for (int i = 0; i < MAX; i++) ++ { ++ n[i].a = 100; ++ } ++ for (int i = 0; i < MAX; i++) ++ { ++ if (n[i].a != 100) ++ { ++ abort (); ++ } ++ } ++ ++ for (int i = 0; i < MAX; i++) ++ { ++ n[i].l = n[i].a; ++ } ++ for (int i = 0; i < MAX; i++) ++ { ++ if (n[i].l != 100) ++ { ++ abort (); ++ } ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform in Complete Structure Relayout is 1" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/csr_allocation-1.c b/gcc/testsuite/gcc.dg/struct/csr_allocation-1.c +new file mode 100644 +index 000000000..63bb695ae +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/csr_allocation-1.c +@@ -0,0 +1,46 @@ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node* node_p; ++ ++struct node { ++ unsigned long a; ++ unsigned long b; ++ node_p c; ++ node_p d; ++ long e; ++ long f; ++ long g; ++ long h; ++ long i; ++ long j; ++ long k; ++ long l; ++ int m; ++ int n; ++}; ++ ++const int MAX = 1; ++node_p n; ++ ++int ++main () ++{ ++ n = (node_p) calloc (MAX, sizeof (node_t)); ++ ++ for (int i = 0; i < MAX; i++) ++ { ++ n[i].a = 100; ++ } ++ for (int i = 0; i < MAX; i++) ++ { ++ if (n[i].a != 100) ++ { ++ abort (); ++ } ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform in Complete Structure Relayout." "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/csr_allocation-2.c b/gcc/testsuite/gcc.dg/struct/csr_allocation-2.c +new file mode 100644 +index 000000000..0f75d5d12 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/csr_allocation-2.c +@@ -0,0 +1,59 @@ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node* node_p; ++ ++struct node { ++ unsigned long a; ++ unsigned long b; ++ node_p c; ++ node_p d; ++ long e; ++ long f; ++ long g; ++ long h; ++ long i; ++ long j; ++ long k; ++ long l; ++ int m; ++ int n; ++}; ++ ++const int MAX = 10; ++node_p n; ++node_p m; ++ ++int main() ++{ ++ int i; ++ for (i = 0; i < MAX / 5; i++) ++ { ++ n = (node_p) calloc(MAX, sizeof(node_t)); ++ if (i == 0) ++ { ++ m = n; ++ } ++ } ++ ++ for (int i = 0; i < MAX; i++) ++ { ++ n[i].a = 100; ++ } ++ for (int i = 0; i < MAX; i++) ++ { ++ m[i].a = 50; ++ } ++ ++ for (int i = 0; i < MAX; i++) ++ { ++ if (n[i].a != 100) ++ { ++ abort (); ++ } ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform in Complete Structure Relayout." "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/csr_allocation-3.c b/gcc/testsuite/gcc.dg/struct/csr_allocation-3.c +new file mode 100644 +index 000000000..3dcb674c6 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/csr_allocation-3.c +@@ -0,0 +1,77 @@ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node* node_p; ++ ++struct node { ++ unsigned long a; ++ unsigned long b; ++ node_p c; ++ node_p d; ++ long e; ++ long f; ++ long g; ++ long h; ++ long i; ++ long j; ++ long k; ++ long l; ++ int m; ++ int n; ++}; ++ ++const int MAX = 10; ++node_p n; ++node_p m; ++ ++void test (int, int) __attribute__((noinline)); ++ ++void ++test (int num, int flag) ++{ ++ if (num <= 0) ++ { ++ return; ++ } ++ n = (node_p) calloc (num, sizeof (node_t)); ++ if (flag) ++ { ++ m = n; ++ } ++ return; ++} ++ ++int ++main () ++{ ++ test (MAX, 1); ++ test (MAX, 0); ++ ++ for (int i = 0; i < MAX; i++) ++ { ++ n[i].a = 100; ++ } ++ for (int i = 0; i < MAX; i++) ++ { ++ m[i].a = 50; ++ } ++ ++ for (int i = 0; i < MAX; i++) ++ { ++ if (n[i].a != 100) ++ { ++ abort (); ++ } ++ } ++ for (int i = 0; i < MAX; i++) ++ { ++ if (m[i].a != 50) ++ { ++ abort (); ++ } ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform in Complete Structure Relayout." "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/csr_cast_int.c b/gcc/testsuite/gcc.dg/struct/csr_cast_int.c +new file mode 100644 +index 000000000..6907158c9 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/csr_cast_int.c +@@ -0,0 +1,52 @@ ++// { dg-do run } ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node* node_p; ++ ++struct node { ++ unsigned long a; ++ unsigned long b; ++ node_p c; ++ node_p d; ++ long e; ++ long f; ++ long g; ++ long h; ++ long i; ++ long j; ++ long k; ++ long l; ++ int m; ++ int n; ++}; ++ ++const int MAX = 100; ++node_p n; ++unsigned long y; ++ ++int ++main () ++{ ++ n = (node_p) calloc (MAX, sizeof (node_t)); ++ ++ for (int i = 0; i < MAX; i++) ++ { ++ n[i].b = 50; ++ } ++ ++ node_p x = &n[5]; ++ y = (unsigned long) x; ++ y += 8; ++ ++ if (*((unsigned long*) y) != 50) ++ { ++ abort (); ++ } ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "struct node has escaped: \"Type escapes a cast from/to intergral type\"" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/csr_separate_instance.c b/gcc/testsuite/gcc.dg/struct/csr_separate_instance.c +new file mode 100644 +index 000000000..9e5e05838 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/csr_separate_instance.c +@@ -0,0 +1,48 @@ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node* node_p; ++ ++struct node { ++ unsigned long a; ++ unsigned long b; ++ node_p c; ++ node_p d; ++ long e; ++ long f; ++ long g; ++ long h; ++ long i; ++ long j; ++ long k; ++ long l; ++ int m; ++ int n; ++}; ++ ++const int MAX = 10000; ++node_p n; ++node_t t; ++ ++int ++main () ++{ ++ n = (node_p) calloc (MAX, sizeof (node_t)); ++ t.a = 100; ++ ++ for (int i = 0; i < MAX; i++) ++ { ++ n[i].a = t.a; ++ } ++ for (int i = 0; i < MAX; i++) ++ { ++ if (n[i].a != 100) ++ { ++ abort (); ++ } ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "struct node has escaped: \"Type escapes via a separate instance\"" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/sr_address_of_field.c b/gcc/testsuite/gcc.dg/struct/sr_address_of_field.c +new file mode 100644 +index 000000000..9d58edab8 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/sr_address_of_field.c +@@ -0,0 +1,37 @@ ++/* { dg-do run } */ ++ ++static struct S { ++ int *p1; ++ int *p2; ++} s; ++ ++typedef __UINTPTR_TYPE__ uintptr_t; ++ ++int ++foo () ++{ ++ int i = 1; ++ int j = 2; ++ struct S s; ++ int **p; ++ s.p1 = &i; ++ s.p2 = &j; ++ p = &s.p1; ++ uintptr_t pi = (uintptr_t) p; ++ pi = pi + sizeof (int *); ++ p = (int **)pi; ++ **p = 3; ++ return j; ++} ++ ++int ++main () ++{ ++ if (foo () != 3) ++ { ++ __builtin_abort (); ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "struct S has escaped: \"Type escapes via taking the address of field\"" "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/sr_convert_mem.c b/gcc/testsuite/gcc.dg/struct/sr_convert_mem.c +new file mode 100644 +index 000000000..a99ee0de4 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/sr_convert_mem.c +@@ -0,0 +1,23 @@ ++/* { dg-do compile } */ ++ ++struct T1 { ++ long var1; ++ int var2; ++}; ++ ++struct T2 { ++ long var1; ++ int var2; ++}; ++ ++void test (void*); ++ ++__attribute__((used)) void ++foo (struct T2 *t2) ++{ ++ struct T1* t1 = (void *)(&t2[1]); ++ void* data = (void *)(&t1[1]); ++ ++ test(data); ++ return; ++} +diff --git a/gcc/testsuite/gcc.dg/struct/sr_maxmin_expr.c b/gcc/testsuite/gcc.dg/struct/sr_maxmin_expr.c +new file mode 100644 +index 000000000..fb135ef0b +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/sr_maxmin_expr.c +@@ -0,0 +1,25 @@ ++// { dg-do compile } ++ ++#include ++ ++struct S { ++ unsigned long a; ++ unsigned long b; ++}; ++ ++struct S* s; ++struct S* t = (struct S*) 1000; ++ ++int ++main () ++{ ++ s = (struct S*) calloc (1000, sizeof (struct S)); ++ s = s > t ? s : t; ++ if (s == 0) ++ { ++ abort (); ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform." "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/sr_pointer_and.c b/gcc/testsuite/gcc.dg/struct/sr_pointer_and.c +new file mode 100644 +index 000000000..9a4b10d9a +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/sr_pointer_and.c +@@ -0,0 +1,17 @@ ++/* { dg-do compile } */ ++ ++struct test {long val; struct test* next; }; ++ ++unsigned long P_DATA; ++ ++void func (struct test*); ++ ++__attribute__((used)) static void ++foo (struct test* pt) ++{ ++ struct test t; ++ ++ t.next = (void *)((unsigned long)pt->next & P_DATA); ++ func(&t); ++ return; ++} +diff --git a/gcc/testsuite/gcc.dg/struct/sr_pointer_minus.c b/gcc/testsuite/gcc.dg/struct/sr_pointer_minus.c +new file mode 100644 +index 000000000..9a82da0d6 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/sr_pointer_minus.c +@@ -0,0 +1,33 @@ ++// { dg-do compile } ++ ++#include ++ ++typedef struct node node_t; ++typedef struct node* node_p; ++ ++struct node { ++ unsigned long a; ++ unsigned long b; ++}; ++ ++int max; ++int x; ++ ++node_p n; ++node_p z; ++ ++int ++main () ++{ ++ n = (node_p) calloc (max, sizeof (node_t)); ++ ++ node_p xp = &n[x]; ++ ++ if (xp - z == 10) ++ { ++ abort (); ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "struct node has escaped: \"Type escapes via a unhandled rewrite stmt\"" "struct_reorg" } } */ +-- +2.33.0 + diff --git a/0016-LoongArch-Use-explicit-relocs-for-GOT-access-when-me.patch b/0016-LoongArch-Use-explicit-relocs-for-GOT-access-when-me.patch new file mode 100644 index 0000000000000000000000000000000000000000..eccf1079c585209a386a755653c6663709c4c59b --- /dev/null +++ b/0016-LoongArch-Use-explicit-relocs-for-GOT-access-when-me.patch @@ -0,0 +1,212 @@ +From 8539e5560e7bf11473cc7c386043b7019264236a Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Sat, 30 Sep 2023 18:46:28 +0800 +Subject: [PATCH 016/188] LoongArch: Use explicit relocs for GOT access when + -mexplicit-relocs=auto and LTO during a final link with linker plugin + +If we are performing LTO for a final link and linker plugin is enabled, +then we are sure any GOT access may resolve to a symbol out of the link +unit (otherwise the linker plugin will tell us the symbol should be +resolved locally and we'll use PC-relative access instead). + +Produce machine instructions with explicit relocs instead of la.global +for better scheduling. + +gcc/ChangeLog: + + * config/loongarch/loongarch-protos.h + (loongarch_explicit_relocs_p): Declare new function. + * config/loongarch/loongarch.cc (loongarch_explicit_relocs_p): + Implement. + (loongarch_symbol_insns): Call loongarch_explicit_relocs_p for + SYMBOL_GOT_DISP, instead of using TARGET_EXPLICIT_RELOCS. + (loongarch_split_symbol): Call loongarch_explicit_relocs_p for + deciding if return early, instead of using + TARGET_EXPLICIT_RELOCS. + (loongarch_output_move): CAll loongarch_explicit_relocs_p + instead of using TARGET_EXPLICIT_RELOCS. + * config/loongarch/loongarch.md (*low): Remove + TARGET_EXPLICIT_RELOCS from insn condition. + (@ld_from_got): Likewise. + * config/loongarch/predicates.md (move_operand): Call + loongarch_explicit_relocs_p instead of using + TARGET_EXPLICIT_RELOCS. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/explicit-relocs-auto-lto.c: New test. +--- + gcc/config/loongarch/loongarch-protos.h | 1 + + gcc/config/loongarch/loongarch.cc | 34 +++++++++++++++---- + gcc/config/loongarch/loongarch.md | 4 +-- + gcc/config/loongarch/predicates.md | 8 ++--- + .../loongarch/explicit-relocs-auto-lto.c | 26 ++++++++++++++ + 5 files changed, 59 insertions(+), 14 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-lto.c + +diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h +index 163162598..51d38177b 100644 +--- a/gcc/config/loongarch/loongarch-protos.h ++++ b/gcc/config/loongarch/loongarch-protos.h +@@ -220,4 +220,5 @@ extern rtx loongarch_gen_const_int_vector_shuffle (machine_mode, int); + extern tree loongarch_build_builtin_va_list (void); + + extern rtx loongarch_build_signbit_mask (machine_mode, bool, bool); ++extern bool loongarch_explicit_relocs_p (enum loongarch_symbol_type); + #endif /* ! GCC_LOONGARCH_PROTOS_H */ +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 3258c8655..1d20577e7 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -1922,6 +1922,29 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type) + gcc_unreachable (); + } + ++/* If -mexplicit-relocs=auto, we use machine operations with reloc hints ++ for cases where the linker is unable to relax so we can schedule the ++ machine operations, otherwise use an assembler pseudo-op so the ++ assembler will generate R_LARCH_RELAX. */ ++ ++bool ++loongarch_explicit_relocs_p (enum loongarch_symbol_type type) ++{ ++ if (la_opt_explicit_relocs != EXPLICIT_RELOCS_AUTO) ++ return la_opt_explicit_relocs == EXPLICIT_RELOCS_ALWAYS; ++ ++ /* If we are performing LTO for a final link, and we have the linker ++ plugin so we know the resolution of the symbols, then all GOT ++ references are binding to external symbols or preemptable symbols. ++ So the linker cannot relax them. */ ++ return (in_lto_p ++ && !flag_incremental_link ++ && HAVE_LTO_PLUGIN == 2 ++ && (!global_options_set.x_flag_use_linker_plugin ++ || global_options.x_flag_use_linker_plugin) ++ && type == SYMBOL_GOT_DISP); ++} ++ + /* Returns the number of instructions necessary to reference a symbol. */ + + static int +@@ -1937,7 +1960,7 @@ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode) + case SYMBOL_GOT_DISP: + /* The constant will have to be loaded from the GOT before it + is used in an address. */ +- if (!TARGET_EXPLICIT_RELOCS && mode != MAX_MACHINE_MODE) ++ if (!loongarch_explicit_relocs_p (type) && mode != MAX_MACHINE_MODE) + return 0; + + return 3; +@@ -3034,7 +3057,7 @@ loongarch_symbol_extreme_p (enum loongarch_symbol_type type) + If so, and if LOW_OUT is nonnull, emit the high part and store the + low part in *LOW_OUT. Leave *LOW_OUT unchanged otherwise. + +- Return false if build with '-mno-explicit-relocs'. ++ Return false if build with '-mexplicit-relocs=none'. + + TEMP is as for loongarch_force_temporary and is used to load the high + part into a register. +@@ -3048,12 +3071,9 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out) + { + enum loongarch_symbol_type symbol_type; + +- /* If build with '-mno-explicit-relocs', don't split symbol. */ +- if (!TARGET_EXPLICIT_RELOCS) +- return false; +- + if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE) + || !loongarch_symbolic_constant_p (addr, &symbol_type) ++ || !loongarch_explicit_relocs_p (symbol_type) + || loongarch_symbol_insns (symbol_type, mode) == 0 + || !loongarch_split_symbol_type (symbol_type)) + return false; +@@ -4793,7 +4813,7 @@ loongarch_output_move (rtx dest, rtx src) + } + } + +- if (!TARGET_EXPLICIT_RELOCS ++ if (!loongarch_explicit_relocs_p (loongarch_classify_symbol (src)) + && dest_code == REG && symbolic_operand (src, VOIDmode)) + { + if (loongarch_classify_symbol (src) == SYMBOL_PCREL) +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 29ac950bf..81c97393b 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -2247,7 +2247,7 @@ + [(set (match_operand:P 0 "register_operand" "=r") + (lo_sum:P (match_operand:P 1 "register_operand" " r") + (match_operand:P 2 "symbolic_operand" "")))] +- "TARGET_EXPLICIT_RELOCS" ++ "" + "addi.\t%0,%1,%L2" + [(set_attr "type" "arith") + (set_attr "mode" "")]) +@@ -2275,7 +2275,7 @@ + (match_operand:P 1 "register_operand" "r") + (match_operand:P 2 "symbolic_operand")))] + UNSPEC_LOAD_FROM_GOT))] +- "TARGET_EXPLICIT_RELOCS" ++ "" + "ld.\t%0,%1,%L2" + [(set_attr "type" "move")] + ) +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index ad6cee5c4..6b50b3a4d 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -541,16 +541,14 @@ + case SYMBOL_REF: + case LABEL_REF: + return (loongarch_symbolic_constant_p (op, &symbol_type) +- && (!TARGET_EXPLICIT_RELOCS ++ && (!loongarch_explicit_relocs_p (symbol_type) + || !loongarch_split_symbol_type (symbol_type))); + + case HIGH: +- /* '-mno-explicit-relocs' don't generate high/low pairs. */ +- if (!TARGET_EXPLICIT_RELOCS) +- return false; +- + op = XEXP (op, 0); ++ + return (loongarch_symbolic_constant_p (op, &symbol_type) ++ && loongarch_explicit_relocs_p (symbol_type) + && loongarch_split_symbol_type (symbol_type)); + + default: +diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-lto.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-lto.c +new file mode 100644 +index 000000000..f53b54689 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-lto.c +@@ -0,0 +1,26 @@ ++/* { dg-do link } */ ++/* { dg-require-effective-target lto } */ ++/* { dg-require-linker-plugin "" } */ ++/* { dg-options "-fpic -shared -O2 --save-temps -mexplicit-relocs=auto -flto -fuse-linker-plugin -flto-partition=one" } */ ++ ++int pcrel __attribute__ ((visibility ("hidden"))); ++int got __attribute__ ((visibility ("default"))); ++ ++int ++*addr_pcrel (void) ++{ ++ return &pcrel; ++} ++ ++int ++*addr_got (void) ++{ ++ return &got; ++} ++ ++/* With linker plugin we should use la.local (it can be relaxed to pcaddi), ++ but not la.global (we are pretty sure the linker cannot relax la.global ++ got). */ ++/* { dg-final { scan-lto-assembler "la.local.*pcrel" } } */ ++/* { dg-final { scan-lto-assembler "pcalau12i.*%got_pc_hi20\\\(got\\\)" } } */ ++/* { dg-final { scan-lto-assembler "ld.*%got_pc_lo12\\\(got\\\)" } } */ +-- +2.43.0 + diff --git a/0016-libsanitizer-fix-isoc23-function-interception.patch b/0016-libsanitizer-fix-isoc23-function-interception.patch new file mode 100644 index 0000000000000000000000000000000000000000..7d4c70bef977b09e5646d37bc0e25d7b8f09c1ed --- /dev/null +++ b/0016-libsanitizer-fix-isoc23-function-interception.patch @@ -0,0 +1,104 @@ +From add6d92567cfdc16e0acfaf73fb4b8cbc213661f Mon Sep 17 00:00:00 2001 +From: swcompiler +Date: Mon, 25 Nov 2024 17:15:16 +0800 +Subject: [PATCH 16/16] libsanitizer: fix isoc23 function interception for + glibc-2.38 + +--- + libsanitizer/asan/asan_interceptors.cpp | 55 +++++++++++++++---------- + 1 file changed, 33 insertions(+), 22 deletions(-) + +diff --git a/libsanitizer/asan/asan_interceptors.cpp b/libsanitizer/asan/asan_interceptors.cpp +index b28909152..ae1c9bfcb 100644 +--- a/libsanitizer/asan/asan_interceptors.cpp ++++ b/libsanitizer/asan/asan_interceptors.cpp +@@ -41,6 +41,8 @@ + # define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" + # elif defined(__mips__) && SANITIZER_LINUX + # define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2" ++# elif defined(__sw_64__) ++# define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1" + # endif + + namespace __asan { +@@ -472,19 +474,32 @@ INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) { + return REAL(strncpy)(to, from, size); + } + +-INTERCEPTOR(long, strtol, const char *nptr, char **endptr, int base) { +- void *ctx; +- ASAN_INTERCEPTOR_ENTER(ctx, strtol); +- ENSURE_ASAN_INITED(); +- if (!flags()->replace_str) { +- return REAL(strtol)(nptr, endptr, base); +- } ++template ++static ALWAYS_INLINE auto StrtolImpl(void *ctx, Fn real, const char *nptr, ++ char **endptr, int base) ++ -> decltype(real(nullptr, nullptr, 0)) { ++ if (!flags()->replace_str) ++ return real(nptr, endptr, base); + char *real_endptr; +- long result = REAL(strtol)(nptr, &real_endptr, base); ++ auto res = real(nptr, &real_endptr, base); + StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); +- return result; ++ return res; + } + ++# define INTERCEPTOR_STRTO_BASE(ret_type, func) \ ++ INTERCEPTOR(ret_type, func, const char *nptr, char **endptr, int base) { \ ++ void *ctx; \ ++ ASAN_INTERCEPTOR_ENTER(ctx, func); \ ++ ENSURE_ASAN_INITED(); \ ++ return StrtolImpl(ctx, REAL(func), nptr, endptr, base); \ ++ } ++ ++INTERCEPTOR_STRTO_BASE(long, strtol) ++ ++#if SANITIZER_GLIBC ++INTERCEPTOR_STRTO_BASE(long, __isoc23_strtol) ++#endif ++ + INTERCEPTOR(int, atoi, const char *nptr) { + void *ctx; + ASAN_INTERCEPTOR_ENTER(ctx, atoi); +@@ -524,18 +539,11 @@ INTERCEPTOR(long, atol, const char *nptr) { + } + + #if ASAN_INTERCEPT_ATOLL_AND_STRTOLL +-INTERCEPTOR(long long, strtoll, const char *nptr, char **endptr, int base) { +- void *ctx; +- ASAN_INTERCEPTOR_ENTER(ctx, strtoll); +- ENSURE_ASAN_INITED(); +- if (!flags()->replace_str) { +- return REAL(strtoll)(nptr, endptr, base); +- } +- char *real_endptr; +- long long result = REAL(strtoll)(nptr, &real_endptr, base); +- StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base); +- return result; +-} ++INTERCEPTOR_STRTO_BASE(long long, strtoll) ++ ++#if SANITIZER_GLIBC ++INTERCEPTOR_STRTO_BASE(long long, __isoc23_strtoll) ++#endif + + INTERCEPTOR(long long, atoll, const char *nptr) { + void *ctx; +@@ -639,7 +647,10 @@ void InitializeAsanInterceptors() { + ASAN_INTERCEPT_FUNC(atoll); + ASAN_INTERCEPT_FUNC(strtoll); + #endif +- ++#if SANITIZER_GLIBC ++ ASAN_INTERCEPT_FUNC(__isoc23_strtol); ++ ASAN_INTERCEPT_FUNC(__isoc23_strtoll); ++#endif + // Intecept jump-related functions. + ASAN_INTERCEPT_FUNC(longjmp); + +-- +2.25.1 + diff --git a/0017-LoongArch-Use-explicit-relocs-for-TLS-access-with-me.patch b/0017-LoongArch-Use-explicit-relocs-for-TLS-access-with-me.patch new file mode 100644 index 0000000000000000000000000000000000000000..e4a1f37417b435993d3393c9c66cd91616eee279 --- /dev/null +++ b/0017-LoongArch-Use-explicit-relocs-for-TLS-access-with-me.patch @@ -0,0 +1,146 @@ +From 23b4166c6699a1a3063b11fa45497c1a1524bd48 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Mon, 2 Oct 2023 13:00:18 +0800 +Subject: [PATCH 017/188] LoongArch: Use explicit relocs for TLS access with + -mexplicit-relocs=auto + +The linker does not know how to relax TLS access for LoongArch, so let's +emit machine instructions with explicit relocs for TLS. + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc (loongarch_explicit_relocs_p): + Return true for TLS symbol types if -mexplicit-relocs=auto. + (loongarch_call_tls_get_addr): Replace TARGET_EXPLICIT_RELOCS + with la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE. + (loongarch_legitimize_tls_address): Likewise. + * config/loongarch/loongarch.md (@tls_low): Remove + TARGET_EXPLICIT_RELOCS from insn condition. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c: New + test. + * gcc.target/loongarch/explicit-relocs-auto-tls-le-ie.c: New + test. +--- + gcc/config/loongarch/loongarch.cc | 37 ++++++++++++------- + gcc/config/loongarch/loongarch.md | 2 +- + .../explicit-relocs-auto-tls-ld-gd.c | 9 +++++ + .../explicit-relocs-auto-tls-le-ie.c | 6 +++ + 4 files changed, 40 insertions(+), 14 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-le-ie.c + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 1d20577e7..fa5c14be6 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -1933,16 +1933,27 @@ loongarch_explicit_relocs_p (enum loongarch_symbol_type type) + if (la_opt_explicit_relocs != EXPLICIT_RELOCS_AUTO) + return la_opt_explicit_relocs == EXPLICIT_RELOCS_ALWAYS; + +- /* If we are performing LTO for a final link, and we have the linker +- plugin so we know the resolution of the symbols, then all GOT +- references are binding to external symbols or preemptable symbols. +- So the linker cannot relax them. */ +- return (in_lto_p +- && !flag_incremental_link +- && HAVE_LTO_PLUGIN == 2 +- && (!global_options_set.x_flag_use_linker_plugin +- || global_options.x_flag_use_linker_plugin) +- && type == SYMBOL_GOT_DISP); ++ switch (type) ++ { ++ case SYMBOL_TLS_IE: ++ case SYMBOL_TLS_LE: ++ case SYMBOL_TLSGD: ++ case SYMBOL_TLSLDM: ++ /* The linker don't know how to relax TLS accesses. */ ++ return true; ++ case SYMBOL_GOT_DISP: ++ /* If we are performing LTO for a final link, and we have the ++ linker plugin so we know the resolution of the symbols, then ++ all GOT references are binding to external symbols or ++ preemptable symbols. So the linker cannot relax them. */ ++ return (in_lto_p ++ && !flag_incremental_link ++ && HAVE_LTO_PLUGIN == 2 ++ && (!global_options_set.x_flag_use_linker_plugin ++ || global_options.x_flag_use_linker_plugin)); ++ default: ++ return false; ++ } + } + + /* Returns the number of instructions necessary to reference a symbol. */ +@@ -2749,7 +2760,7 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0) + + start_sequence (); + +- if (TARGET_EXPLICIT_RELOCS) ++ if (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE) + { + /* Split tls symbol to high and low. */ + rtx high = gen_rtx_HIGH (Pmode, copy_rtx (loc)); +@@ -2914,7 +2925,7 @@ loongarch_legitimize_tls_address (rtx loc) + tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM); + tmp1 = gen_reg_rtx (Pmode); + dest = gen_reg_rtx (Pmode); +- if (TARGET_EXPLICIT_RELOCS) ++ if (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE) + { + tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_IE); + tmp3 = gen_reg_rtx (Pmode); +@@ -2951,7 +2962,7 @@ loongarch_legitimize_tls_address (rtx loc) + tmp1 = gen_reg_rtx (Pmode); + dest = gen_reg_rtx (Pmode); + +- if (TARGET_EXPLICIT_RELOCS) ++ if (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE) + { + tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_LE); + tmp3 = gen_reg_rtx (Pmode); +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 81c97393b..3b836d535 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -2257,7 +2257,7 @@ + (unspec:P [(mem:P (lo_sum:P (match_operand:P 1 "register_operand" "r") + (match_operand:P 2 "symbolic_operand" "")))] + UNSPEC_TLS_LOW))] +- "TARGET_EXPLICIT_RELOCS" ++ "" + "addi.\t%0,%1,%L2" + [(set_attr "type" "arith") + (set_attr "mode" "")]) +diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c +new file mode 100644 +index 000000000..957ff98df +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c +@@ -0,0 +1,9 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -fPIC -mexplicit-relocs=auto" } */ ++ ++__thread int a __attribute__((visibility("hidden"))); ++extern __thread int b __attribute__((visibility("default"))); ++ ++int test() { return a + b; } ++ ++/* { dg-final { scan-assembler-not "la.tls" { target tls_native } } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-le-ie.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-le-ie.c +new file mode 100644 +index 000000000..78898cfc6 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-le-ie.c +@@ -0,0 +1,6 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -mexplicit-relocs=auto" } */ ++ ++#include "explicit-relocs-auto-tls-ld-gd.c" ++ ++/* { dg-final { scan-assembler-not "la.tls" { target tls_native } } } */ +-- +2.43.0 + diff --git a/0017-StructReorg-Some-bugfix-for-structure-reorganization.patch b/0017-StructReorg-Some-bugfix-for-structure-reorganization.patch new file mode 100644 index 0000000000000000000000000000000000000000..2e06afe123ff731f5fc9f96f2e3779cac380ba17 --- /dev/null +++ b/0017-StructReorg-Some-bugfix-for-structure-reorganization.patch @@ -0,0 +1,489 @@ +From 2b4db34d3b21ff8597373e9e67858b3b60cc7dae Mon Sep 17 00:00:00 2001 +From: eastb233 +Date: Fri, 21 Jul 2023 11:20:51 +0800 +Subject: [PATCH 17/22] [StructReorg] Some bugfix for structure reorganization + +Some bugfix for structure reorganization, +1. disable type simplify in LTO within optimizations +2. only enable optimizations in C language +3. use new to initialize allocated memory in symbol-summary.h +4. cover escape scenarios not considered +--- + gcc/ipa-free-lang-data.cc | 11 ++ + gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 101 +++++++++++-------- + gcc/symbol-summary.h | 13 ++- + gcc/testsuite/gcc.dg/struct/struct_reorg-5.c | 31 ++++++ + gcc/testsuite/gcc.dg/struct/struct_reorg-6.c | 54 ++++++++++ + gcc/testsuite/gcc.dg/struct/struct_reorg-7.c | 38 +++++++ + gcc/testsuite/gcc.dg/struct/struct_reorg-8.c | 25 +++++ + gcc/testsuite/gcc.dg/struct/struct_reorg-9.c | 54 ++++++++++ + 8 files changed, 283 insertions(+), 44 deletions(-) + create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-5.c + create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-6.c + create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-7.c + create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-8.c + create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-9.c + +diff --git a/gcc/ipa-free-lang-data.cc b/gcc/ipa-free-lang-data.cc +index a74215685..5450be9fe 100644 +--- a/gcc/ipa-free-lang-data.cc ++++ b/gcc/ipa-free-lang-data.cc +@@ -102,6 +102,12 @@ fld_worklist_push (tree t, class free_lang_data_d *fld) + static tree + fld_simplified_type_name (tree type) + { ++ /* Simplify type will cause that struct A and struct A within ++ struct B are different type pointers, so skip it in structure ++ optimizations. */ ++ if (flag_ipa_struct_reorg) ++ return TYPE_NAME (type); ++ + if (!TYPE_NAME (type) || TREE_CODE (TYPE_NAME (type)) != TYPE_DECL) + return TYPE_NAME (type); + /* Drop TYPE_DECLs in TYPE_NAME in favor of the identifier in the +@@ -340,6 +346,11 @@ fld_simplified_type (tree t, class free_lang_data_d *fld) + { + if (!t) + return t; ++ /* Simplify type will cause that struct A and struct A within ++ struct B are different type pointers, so skip it in structure ++ optimizations. */ ++ if (flag_ipa_struct_reorg) ++ return t; + if (POINTER_TYPE_P (t)) + return fld_incomplete_type_of (t, fld); + /* FIXME: This triggers verification error, see PR88140. */ +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +index c8b975a92..9f790b28b 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +@@ -105,6 +105,7 @@ along with GCC; see the file COPYING3. If not see + #include "ipa-param-manipulation.h" + #include "gimplify-me.h" + #include "cfgloop.h" ++#include "langhooks.h" + + namespace { + +@@ -196,6 +197,39 @@ gimplify_build1 (gimple_stmt_iterator *gsi, enum tree_code code, tree type, + GSI_SAME_STMT); + } + ++/* Check whether in C language or LTO with only C language. */ ++ ++static bool ++lang_c_p (void) ++{ ++ const char *language_string = lang_hooks.name; ++ ++ if (!language_string) ++ return false; ++ ++ if (strcmp (language_string, "GNU GIMPLE") == 0) ++ { ++ unsigned i = 0; ++ tree t = NULL; ++ const char *unit_string = NULL; ++ ++ FOR_EACH_VEC_SAFE_ELT (all_translation_units, i, t) ++ { ++ unit_string = TRANSLATION_UNIT_LANGUAGE (t); ++ if (!unit_string ++ || (strncmp (unit_string, "GNU C", 5) != 0) ++ || (!ISDIGIT (unit_string[5]))) ++ return false; ++ } ++ return true; ++ } ++ else if (strncmp (language_string, "GNU C", 5) == 0 ++ && ISDIGIT (language_string[5])) ++ return true; ++ ++ return false; ++} ++ + enum srmode + { + NORMAL = 0, +@@ -1018,7 +1052,6 @@ public: + void analyze_types (void); + void clear_visited (void); + bool create_new_types (void); +- void restore_field_type (void); + void create_new_decls (void); + srdecl *find_decl (tree); + void create_new_functions (void); +@@ -2107,7 +2140,12 @@ ipa_struct_reorg::find_vars (gimple *stmt) + srtype *t = find_type (inner_type (TREE_TYPE (rhs))); + srdecl *d = find_decl (lhs); + if (!d && t) +- current_function->record_decl (t, lhs, -1); ++ { ++ current_function->record_decl (t, lhs, -1); ++ tree var = SSA_NAME_VAR (lhs); ++ if (var && VOID_POINTER_P (TREE_TYPE (var))) ++ current_function->record_decl (t, var, -1); ++ } + } + if (TREE_CODE (rhs) == SSA_NAME + && VOID_POINTER_P (TREE_TYPE (rhs)) +@@ -2116,7 +2154,12 @@ ipa_struct_reorg::find_vars (gimple *stmt) + srtype *t = find_type (inner_type (TREE_TYPE (lhs))); + srdecl *d = find_decl (rhs); + if (!d && t) +- current_function->record_decl (t, rhs, -1); ++ { ++ current_function->record_decl (t, rhs, -1); ++ tree var = SSA_NAME_VAR (rhs); ++ if (var && VOID_POINTER_P (TREE_TYPE (var))) ++ current_function->record_decl (t, var, -1); ++ } + } + } + else +@@ -2796,8 +2839,14 @@ ipa_struct_reorg::maybe_record_call (cgraph_node *node, gcall *stmt) + if (escapes != does_not_escape) + { + for (unsigned i = 0; i < gimple_call_num_args (stmt); i++) +- mark_type_as_escape (TREE_TYPE (gimple_call_arg (stmt, i)), +- escapes); ++ { ++ mark_type_as_escape (TREE_TYPE (gimple_call_arg (stmt, i)), ++ escapes); ++ srdecl *d = current_function->find_decl ( ++ gimple_call_arg (stmt, i)); ++ if (d) ++ d->type->mark_escape (escapes, stmt); ++ } + return; + } + +@@ -3731,42 +3780,6 @@ ipa_struct_reorg::analyze_types (void) + } + } + +-/* When struct A has a struct B member, B's type info +- is not stored in +- TYPE_FIELDS (TREE_TYPE (TYPE_FIELDS (typeA))) +- Try to restore B's type information. */ +- +-void +-ipa_struct_reorg::restore_field_type (void) +-{ +- for (unsigned i = 0; i < types.length (); i++) +- { +- for (unsigned j = 0; j < types[i]->fields.length (); j++) +- { +- srfield *field = types[i]->fields[j]; +- if (TREE_CODE (inner_type (field->fieldtype)) == RECORD_TYPE) +- { +- /* If field type has TYPE_FIELDS information, +- we do not need to do this. */ +- if (TYPE_FIELDS (field->type->type) != NULL) +- continue; +- for (unsigned k = 0; k < types.length (); k++) +- { +- if (i == k) +- continue; +- const char *type1 = get_type_name (field->type->type); +- const char *type2 = get_type_name (types[k]->type); +- if (type1 == NULL || type2 == NULL) +- continue; +- if (type1 == type2 +- && TYPE_FIELDS (types[k]->type)) +- field->type = types[k]; +- } +- } +- } +- } +-} +- + /* Create all new types we want to create. */ + + bool +@@ -4647,7 +4660,6 @@ ipa_struct_reorg::rewrite_functions (void) + { + unsigned retval = 0; + +- restore_field_type (); + /* Create new types, if we did not create any new types, + then don't rewrite any accesses. */ + if (!create_new_types ()) +@@ -4866,7 +4878,10 @@ pass_ipa_struct_reorg::gate (function *) + && flag_ipa_struct_reorg + /* Don't bother doing anything if the program has errors. */ + && !seen_error () +- && flag_lto_partition == LTO_PARTITION_ONE); ++ && flag_lto_partition == LTO_PARTITION_ONE ++ /* Only enable struct optimizations in C since other ++ languages' grammar forbid. */ ++ && lang_c_p ()); + } + + } // anon namespace +diff --git a/gcc/symbol-summary.h b/gcc/symbol-summary.h +index c54d3084c..3fe64047c 100644 +--- a/gcc/symbol-summary.h ++++ b/gcc/symbol-summary.h +@@ -103,6 +103,12 @@ protected: + /* Allocates new data that are stored within map. */ + T* allocate_new () + { ++ /* In structure optimizatons, we call new to ensure that ++ the allocated memory is initialized to 0. */ ++ if (flag_ipa_struct_reorg) ++ return is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T () ++ : new T (); ++ + /* Call gcc_internal_because we do not want to call finalizer for + a type T. We call dtor explicitly. */ + return is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T () +@@ -115,7 +121,12 @@ protected: + if (is_ggc ()) + ggc_delete (item); + else +- m_allocator.remove (item); ++ { ++ if (flag_ipa_struct_reorg) ++ delete item; ++ else ++ m_allocator.remove (item); ++ } + } + + /* Unregister all call-graph hooks. */ +diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-5.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-5.c +new file mode 100644 +index 000000000..273baa9a3 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-5.c +@@ -0,0 +1,31 @@ ++/* { dg-do compile } */ ++/* { dg-additional-options "-flto -fno-use-linker-plugin" } */ ++ ++struct D ++{ ++ int n; ++ int c [8]; ++}; ++ ++struct A ++{ ++ int i; ++ char *p; ++}; ++ ++struct B ++{ ++ struct A *a; ++ struct D *d; ++}; ++ ++int dtInsert1 (struct B *b) ++{ ++ struct A a = { 0, 0 }; ++ struct D *d; ++ b->a = &a; ++ d = b->d; ++ &d->c [d->n]; ++ return 0; ++} ++ +diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-6.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-6.c +new file mode 100644 +index 000000000..455f9b501 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-6.c +@@ -0,0 +1,54 @@ ++/* { dg-do compile } */ ++/* { dg-additional-options "-flto -fno-use-linker-plugin" } */ ++ ++typedef struct basic_block_def *basic_block; ++typedef struct gimple_seq_node_d *gimple_seq_node; ++typedef struct gimple_seq_d *gimple_seq; ++typedef struct ++{ ++ gimple_seq_node ptr; ++ gimple_seq seq; ++ basic_block bb; ++} gimple_stmt_iterator; ++typedef void *gimple; ++extern void exit(int); ++struct gimple_seq_node_d ++{ ++ gimple stmt; ++ struct gimple_seq_node_d *next; ++}; ++struct gimple_seq_d ++{ ++}; ++static __inline__ gimple_stmt_iterator ++gsi_start (gimple_seq seq) ++{ ++ gimple_stmt_iterator i; ++ i.seq = seq; ++ return i; ++} ++static __inline__ unsigned char ++gsi_end_p (gimple_stmt_iterator i) ++{ ++ return i.ptr == ((void *)0); ++} ++static __inline__ void ++gsi_next (gimple_stmt_iterator *i) ++{ ++ i->ptr = i->ptr->next; ++} ++static __inline__ gimple ++gsi_stmt (gimple_stmt_iterator i) ++{ ++ return i.ptr->stmt; ++} ++void ++c_warn_unused_result (gimple_seq seq) ++{ ++ gimple_stmt_iterator i; ++ for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (&i)) ++ { ++ gimple g = gsi_stmt (i); ++ if (!g) exit(0); ++ } ++} +diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-7.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-7.c +new file mode 100644 +index 000000000..afc0bd86c +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-7.c +@@ -0,0 +1,38 @@ ++/* { dg-do run } */ ++ ++#include ++#include ++ ++struct gki_elem { ++ char *key; ++ int idx; ++}; ++ ++typedef struct { ++ struct gki_elem *table; ++ ++ int primelevel; ++ int nhash; ++ int nkeys; ++} GKI; ++ ++void * ++sre_malloc(size_t size) ++{ ++ void *ptr = malloc (size); ++ return ptr; ++} ++ ++__attribute__((noinline)) int ++GKIStoreKey(GKI *hash) ++{ ++ hash->table = sre_malloc(sizeof(struct gki_elem)); ++} ++ ++int ++main () ++{ ++ GKI *hash = malloc (sizeof(GKI)); ++ GKIStoreKey(hash); ++ return 0; ++} +diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-8.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-8.c +new file mode 100644 +index 000000000..9bcfaf368 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-8.c +@@ -0,0 +1,25 @@ ++/* { dg-do run } */ ++ ++#include ++#include ++#include ++ ++typedef struct { ++ unsigned char blue; ++ unsigned char green; ++} Pixel; ++ ++typedef struct { ++ unsigned short colormaplength; ++ Pixel *colormapdata; ++} TargaImage; ++ ++TargaImage *img; ++ ++int main() { ++ img = (TargaImage *) malloc( sizeof(TargaImage) ); ++ if (img->colormaplength > 0) { ++ img->colormapdata = (Pixel *) malloc(sizeof(Pixel) * img->colormaplength); ++ memset(img->colormapdata, 0, (sizeof(Pixel) * img->colormaplength) ); ++ } ++} +diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-9.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-9.c +new file mode 100644 +index 000000000..052f4e3bd +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-9.c +@@ -0,0 +1,54 @@ ++/* { dg-do run } */ ++ ++extern void abort(void); ++ ++struct packed_ushort { ++ unsigned short ucs; ++} __attribute__((packed)); ++ ++struct source { ++ int pos, length; ++}; ++ ++static int flag; ++ ++static void __attribute__((noinline)) fetch(struct source *p) ++{ ++ p->length = 128; ++} ++ ++static struct packed_ushort __attribute__((noinline)) next(struct source *p) ++{ ++ struct packed_ushort rv; ++ ++ if (p->pos >= p->length) { ++ if (flag) { ++ flag = 0; ++ fetch(p); ++ return next(p); ++ } ++ flag = 1; ++ rv.ucs = 0xffff; ++ return rv; ++ } ++ rv.ucs = 0; ++ return rv; ++} ++ ++int main(void) ++{ ++ struct source s; ++ int i; ++ ++ s.pos = 0; ++ s.length = 0; ++ flag = 0; ++ ++ for (i = 0; i < 16; i++) { ++ struct packed_ushort rv = next(&s); ++ if ((i == 0 && rv.ucs != 0xffff) ++ || (i > 0 && rv.ucs != 0)) ++ abort(); ++ } ++ return 0; ++} +-- +2.33.0 + diff --git a/0017-Sw64-Port-Fix-target-explicit_mask.patch b/0017-Sw64-Port-Fix-target-explicit_mask.patch new file mode 100644 index 0000000000000000000000000000000000000000..44cf058045d7da5d8bb9617e2200ea9e90b9988a --- /dev/null +++ b/0017-Sw64-Port-Fix-target-explicit_mask.patch @@ -0,0 +1,14 @@ +diff --git a/gcc/config/sw_64/sw_64.opt b/gcc/config/sw_64/sw_64.opt +index c818dff40..22d0cdd5d 100644 +--- a/gcc/config/sw_64/sw_64.opt ++++ b/gcc/config/sw_64/sw_64.opt +@@ -21,6 +21,9 @@ msw-use-32align + C C++ Fortran LTO Driver Target Mask(SW_32ALIGN) Save + Use or not use 32align. + ++TargetVariable ++uint64_t sw_64_dummy_target = 0 ++ + fsw-sf-cmpsel + Target Var(flag_sw_sf_cmpsel) Init(0) + use or not use SF cmp/br/selcet instructions. diff --git a/0018-LoongArch-Use-explicit-relocs-for-addresses-only-use.patch b/0018-LoongArch-Use-explicit-relocs-for-addresses-only-use.patch new file mode 100644 index 0000000000000000000000000000000000000000..b2962b8c5cf8832986f017a6b6f964428cc40455 --- /dev/null +++ b/0018-LoongArch-Use-explicit-relocs-for-addresses-only-use.patch @@ -0,0 +1,245 @@ +From c29a4f4fb5ff24ef975ba27688a3da696aa7d006 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Sun, 1 Oct 2023 11:14:29 +0800 +Subject: [PATCH 018/188] LoongArch: Use explicit relocs for addresses only + used for one load or store with -mexplicit-relocs=auto and + -mcmodel={normal,medium} + +In these cases, if we use explicit relocs, we end up with 2 +instructions: + + pcalau12i t0, %pc_hi20(x) + ld.d t0, t0, %pc_lo12(x) + +If we use la.local pseudo-op, in the best scenario (x is in +/- 2MiB +range) we still have 2 instructions: + + pcaddi t0, %pcrel_20(x) + ld.d t0, t0, 0 + +If x is out of the range we'll have 3 instructions. So for these cases +just emit machine instructions with explicit relocs. + +gcc/ChangeLog: + + * config/loongarch/predicates.md (symbolic_pcrel_operand): New + predicate. + * config/loongarch/loongarch.md (define_peephole2): Optimize + la.local + ld/st to pcalau12i + ld/st if the address is only used + once if -mexplicit-relocs=auto and -mcmodel=normal or medium. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/explicit-relocs-auto-single-load-store.c: + New test. + * gcc.target/loongarch/explicit-relocs-auto-single-load-store-no-anchor.c: + New test. +--- + gcc/config/loongarch/loongarch.md | 122 ++++++++++++++++++ + gcc/config/loongarch/predicates.md | 7 + + ...-relocs-auto-single-load-store-no-anchor.c | 6 + + .../explicit-relocs-auto-single-load-store.c | 14 ++ + 4 files changed, 149 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-no-anchor.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 3b836d535..c4c6baa60 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -65,6 +65,7 @@ + + UNSPEC_LOAD_FROM_GOT + UNSPEC_PCALAU12I ++ UNSPEC_PCALAU12I_GR + UNSPEC_ORI_L_LO12 + UNSPEC_LUI_L_HI20 + UNSPEC_LUI_H_LO20 +@@ -2297,6 +2298,16 @@ + "pcalau12i\t%0,%%pc_hi20(%1)" + [(set_attr "type" "move")]) + ++;; @pcalau12i may be used for sibcall so it has a strict constraint. This ++;; allows any general register as the operand. ++(define_insn "@pcalau12i_gr" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (unspec:P [(match_operand:P 1 "symbolic_operand" "")] ++ UNSPEC_PCALAU12I_GR))] ++ "" ++ "pcalau12i\t%0,%%pc_hi20(%1)" ++ [(set_attr "type" "move")]) ++ + (define_insn "@ori_l_lo12" + [(set (match_operand:P 0 "register_operand" "=r") + (unspec:P [(match_operand:P 1 "register_operand" "r") +@@ -3748,6 +3759,117 @@ + [(set_attr "type" "unknown") + (set_attr "mode" "")]) + ++;; With normal or medium code models, if the only use of a pc-relative ++;; address is for loading or storing a value, then relying on linker ++;; relaxation is not better than emitting the machine instruction directly. ++;; Even if the la.local pseudo op can be relaxed, we get: ++;; ++;; pcaddi $t0, %pcrel_20(x) ++;; ld.d $t0, $t0, 0 ++;; ++;; There are still two instructions, same as using the machine instructions ++;; and explicit relocs: ++;; ++;; pcalau12i $t0, %pc_hi20(x) ++;; ld.d $t0, $t0, %pc_lo12(x) ++;; ++;; And if the pseudo op cannot be relaxed, we'll get a worse result (with ++;; 3 instructions). ++(define_peephole2 ++ [(set (match_operand:P 0 "register_operand") ++ (match_operand:P 1 "symbolic_pcrel_operand")) ++ (set (match_operand:GPR 2 "register_operand") ++ (mem:GPR (match_dup 0)))] ++ "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \ ++ && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \ ++ && (peep2_reg_dead_p (2, operands[0]) \ ++ || REGNO (operands[0]) == REGNO (operands[2]))" ++ [(set (match_dup 2) (mem:GPR (lo_sum:P (match_dup 0) (match_dup 1))))] ++ { ++ emit_insn (gen_pcalau12i_gr (operands[0], operands[1])); ++ }) ++ ++(define_peephole2 ++ [(set (match_operand:P 0 "register_operand") ++ (match_operand:P 1 "symbolic_pcrel_operand")) ++ (set (match_operand:GPR 2 "register_operand") ++ (mem:GPR (plus (match_dup 0) ++ (match_operand 3 "const_int_operand"))))] ++ "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \ ++ && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \ ++ && (peep2_reg_dead_p (2, operands[0]) \ ++ || REGNO (operands[0]) == REGNO (operands[2]))" ++ [(set (match_dup 2) (mem:GPR (lo_sum:P (match_dup 0) (match_dup 1))))] ++ { ++ operands[1] = plus_constant (Pmode, operands[1], INTVAL (operands[3])); ++ emit_insn (gen_pcalau12i_gr (operands[0], operands[1])); ++ }) ++ ++(define_peephole2 ++ [(set (match_operand:P 0 "register_operand") ++ (match_operand:P 1 "symbolic_pcrel_operand")) ++ (set (match_operand:GPR 2 "register_operand") ++ (any_extend:GPR (mem:SUBDI (match_dup 0))))] ++ "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \ ++ && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \ ++ && (peep2_reg_dead_p (2, operands[0]) \ ++ || REGNO (operands[0]) == REGNO (operands[2]))" ++ [(set (match_dup 2) ++ (any_extend:GPR (mem:SUBDI (lo_sum:P (match_dup 0) ++ (match_dup 1)))))] ++ { ++ emit_insn (gen_pcalau12i_gr (operands[0], operands[1])); ++ }) ++ ++(define_peephole2 ++ [(set (match_operand:P 0 "register_operand") ++ (match_operand:P 1 "symbolic_pcrel_operand")) ++ (set (match_operand:GPR 2 "register_operand") ++ (any_extend:GPR ++ (mem:SUBDI (plus (match_dup 0) ++ (match_operand 3 "const_int_operand")))))] ++ "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \ ++ && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \ ++ && (peep2_reg_dead_p (2, operands[0]) \ ++ || REGNO (operands[0]) == REGNO (operands[2]))" ++ [(set (match_dup 2) ++ (any_extend:GPR (mem:SUBDI (lo_sum:P (match_dup 0) ++ (match_dup 1)))))] ++ { ++ operands[1] = plus_constant (Pmode, operands[1], INTVAL (operands[3])); ++ emit_insn (gen_pcalau12i_gr (operands[0], operands[1])); ++ }) ++ ++(define_peephole2 ++ [(set (match_operand:P 0 "register_operand") ++ (match_operand:P 1 "symbolic_pcrel_operand")) ++ (set (mem:QHWD (match_dup 0)) ++ (match_operand:QHWD 2 "register_operand"))] ++ "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \ ++ && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \ ++ && (peep2_reg_dead_p (2, operands[0])) \ ++ && REGNO (operands[0]) != REGNO (operands[2])" ++ [(set (mem:QHWD (lo_sum:P (match_dup 0) (match_dup 1))) (match_dup 2))] ++ { ++ emit_insn (gen_pcalau12i_gr (operands[0], operands[1])); ++ }) ++ ++(define_peephole2 ++ [(set (match_operand:P 0 "register_operand") ++ (match_operand:P 1 "symbolic_pcrel_operand")) ++ (set (mem:QHWD (plus (match_dup 0) ++ (match_operand 3 "const_int_operand"))) ++ (match_operand:QHWD 2 "register_operand"))] ++ "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \ ++ && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \ ++ && (peep2_reg_dead_p (2, operands[0])) \ ++ && REGNO (operands[0]) != REGNO (operands[2])" ++ [(set (mem:QHWD (lo_sum:P (match_dup 0) (match_dup 1))) (match_dup 2))] ++ { ++ operands[1] = plus_constant (Pmode, operands[1], INTVAL (operands[3])); ++ emit_insn (gen_pcalau12i_gr (operands[0], operands[1])); ++ }) ++ + ;; Synchronization instructions. + + (include "sync.md") +diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md +index 6b50b3a4d..1d669f560 100644 +--- a/gcc/config/loongarch/predicates.md ++++ b/gcc/config/loongarch/predicates.md +@@ -563,6 +563,13 @@ + return loongarch_symbolic_constant_p (op, &type); + }) + ++(define_predicate "symbolic_pcrel_operand" ++ (match_code "const,symbol_ref,label_ref") ++{ ++ enum loongarch_symbol_type type; ++ return loongarch_symbolic_constant_p (op, &type) && type == SYMBOL_PCREL; ++}) ++ + (define_predicate "equality_operator" + (match_code "eq,ne")) + +diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-no-anchor.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-no-anchor.c +new file mode 100644 +index 000000000..fb03403d7 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-no-anchor.c +@@ -0,0 +1,6 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -march=loongarch64 -mabi=lp64d -mexplicit-relocs=auto -fno-section-anchors" } */ ++ ++#include "explicit-relocs-auto-single-load-store.c" ++ ++/* { dg-final { scan-assembler-not "la.local" } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store.c +new file mode 100644 +index 000000000..0d53644cd +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store.c +@@ -0,0 +1,14 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -march=loongarch64 -mabi=lp64d -mexplicit-relocs=auto" } */ ++ ++long a; ++int b; ++unsigned int c; ++ ++long load_a() { return a; } ++long load_b() { return b; } ++long load_c() { return c; } ++void store_a(long x) { a = x; } ++void store_b(int x) { b = x; } ++ ++/* { dg-final { scan-assembler-not "la.local" } } */ +-- +2.43.0 + diff --git a/0018-ccmp-Add-another-optimization-opportunity-for-ccmp-i.patch b/0018-ccmp-Add-another-optimization-opportunity-for-ccmp-i.patch new file mode 100644 index 0000000000000000000000000000000000000000..6f99e5cd8d50b9b5c17e50976412c7aab94aebbc --- /dev/null +++ b/0018-ccmp-Add-another-optimization-opportunity-for-ccmp-i.patch @@ -0,0 +1,342 @@ +From 19ded9dad06b22b9b7aa9e3902e3e7a38a2256ab Mon Sep 17 00:00:00 2001 +From: dingguangya +Date: Sat, 29 Jul 2023 18:27:10 +0800 +Subject: [PATCH 18/22] [ccmp] Add another optimization opportunity for ccmp + instruction + +Add flag -fccmp2. +Enables the use of the ccmp instruction by creating a new conflict +relationship for instances where temporary expressions replacement +cannot be effectively created. +--- + gcc/ccmp.cc | 33 ++++ + gcc/ccmp.h | 1 + + gcc/common.opt | 4 + + gcc/testsuite/gcc.target/aarch64/ccmp_3.c | 15 ++ + gcc/tree-ssa-coalesce.cc | 197 ++++++++++++++++++++++ + 5 files changed, 250 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/aarch64/ccmp_3.c + +diff --git a/gcc/ccmp.cc b/gcc/ccmp.cc +index 3db0a264e..e34f3bcc6 100644 +--- a/gcc/ccmp.cc ++++ b/gcc/ccmp.cc +@@ -37,6 +37,7 @@ along with GCC; see the file COPYING3. If not see + #include "cfgexpand.h" + #include "ccmp.h" + #include "predict.h" ++#include "gimple-iterator.h" + + /* Check whether T is a simple boolean variable or a SSA name + set by a comparison operator in the same basic block. */ +@@ -129,6 +130,38 @@ ccmp_candidate_p (gimple *g) + return false; + } + ++/* Check whether bb is a potential conditional compare candidate. */ ++bool ++check_ccmp_candidate (basic_block bb) ++{ ++ gimple_stmt_iterator gsi; ++ gimple *bb_last_stmt, *stmt; ++ tree op0, op1; ++ ++ gsi = gsi_last_bb (bb); ++ bb_last_stmt = gsi_stmt (gsi); ++ ++ if (bb_last_stmt && gimple_code (bb_last_stmt) == GIMPLE_COND) ++ { ++ op0 = gimple_cond_lhs (bb_last_stmt); ++ op1 = gimple_cond_rhs (bb_last_stmt); ++ ++ if (TREE_CODE (op0) == SSA_NAME ++ && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE ++ && TREE_CODE (op1) == INTEGER_CST ++ && ((gimple_cond_code (bb_last_stmt) == NE_EXPR) ++ || (gimple_cond_code (bb_last_stmt) == EQ_EXPR))) ++ { ++ stmt = SSA_NAME_DEF_STMT (op0); ++ if (stmt && gimple_code (stmt) == GIMPLE_ASSIGN) ++ { ++ return ccmp_candidate_p (stmt); ++ } ++ } ++ } ++ return false; ++} ++ + /* Extract the comparison we want to do from the tree. */ + void + get_compare_parts (tree t, int *up, rtx_code *rcode, +diff --git a/gcc/ccmp.h b/gcc/ccmp.h +index 1799d5fed..efe3a1c14 100644 +--- a/gcc/ccmp.h ++++ b/gcc/ccmp.h +@@ -21,5 +21,6 @@ along with GCC; see the file COPYING3. If not see + #define GCC_CCMP_H + + extern rtx expand_ccmp_expr (gimple *, machine_mode); ++extern bool check_ccmp_candidate (basic_block bb); + + #endif /* GCC_CCMP_H */ +diff --git a/gcc/common.opt b/gcc/common.opt +index 4d91ce8cf..0aa516719 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -2017,6 +2017,10 @@ fira-verbose= + Common RejectNegative Joined UInteger Var(flag_ira_verbose) Init(5) + -fira-verbose= Control IRA's level of diagnostic messages. + ++fccmp2 ++Common Var(flag_ccmp2) Init(0) Optimization ++Optimize potential ccmp instruction in complex scenarios. ++ + fivopts + Common Var(flag_ivopts) Init(1) Optimization + Optimize induction variables on trees. +diff --git a/gcc/testsuite/gcc.target/aarch64/ccmp_3.c b/gcc/testsuite/gcc.target/aarch64/ccmp_3.c +new file mode 100644 +index 000000000..b509ba810 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/aarch64/ccmp_3.c +@@ -0,0 +1,15 @@ ++/* { dg-do compile { target { aarch64*-*-linux* } } } */ ++/* { dg-options "-O -fdump-rtl-expand-details -fccmp2" } */ ++ ++int func (int a, int b, int c) ++{ ++ while(1) ++ { ++ if(a-- == 0 || b >= c) ++ { ++ return 1; ++ } ++ } ++} ++ ++/* { dg-final { scan-assembler-times "\tccmp\t" 1} } */ +diff --git a/gcc/tree-ssa-coalesce.cc b/gcc/tree-ssa-coalesce.cc +index dccf41ab8..195e06428 100644 +--- a/gcc/tree-ssa-coalesce.cc ++++ b/gcc/tree-ssa-coalesce.cc +@@ -38,6 +38,9 @@ along with GCC; see the file COPYING3. If not see + #include "explow.h" + #include "tree-dfa.h" + #include "stor-layout.h" ++#include "ccmp.h" ++#include "target.h" ++#include "tree-outof-ssa.h" + + /* This set of routines implements a coalesce_list. This is an object which + is used to track pairs of ssa_names which are desirable to coalesce +@@ -854,6 +857,198 @@ live_track_clear_base_vars (live_track *ptr) + bitmap_clear (&ptr->live_base_var); + } + ++/* Return true if gimple is a copy assignment. */ ++ ++static inline bool ++gimple_is_assign_copy_p (gimple *gs) ++{ ++ return (is_gimple_assign (gs) && gimple_assign_copy_p (gs) ++ && TREE_CODE (gimple_assign_lhs (gs)) == SSA_NAME ++ && TREE_CODE (gimple_assign_rhs1 (gs)) == SSA_NAME); ++} ++ ++#define MAX_CCMP_CONFLICT_NUM 5 ++ ++/* Clear high-cost conflict graphs. */ ++ ++static void ++remove_high_cost_graph_for_ccmp (ssa_conflicts *conflict_graph) ++{ ++ unsigned x = 0; ++ int add_conflict_num = 0; ++ bitmap b; ++ FOR_EACH_VEC_ELT (conflict_graph->conflicts, x, b) ++ { ++ if (b) ++ { ++ add_conflict_num++; ++ } ++ } ++ if (add_conflict_num >= MAX_CCMP_CONFLICT_NUM) ++ { ++ conflict_graph->conflicts.release (); ++ } ++} ++ ++/* Adding a new conflict graph to the original graph. */ ++ ++static void ++process_add_graph (live_track *live, basic_block bb, ++ ssa_conflicts *conflict_graph) ++{ ++ tree use, def; ++ ssa_op_iter iter; ++ gimple *first_visit_stmt = NULL; ++ for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi); ++ gsi_next (&gsi)) ++ { ++ if (gimple_visited_p (gsi_stmt (gsi))) ++ { ++ first_visit_stmt = gsi_stmt (gsi); ++ break; ++ } ++ } ++ if (!first_visit_stmt) ++ return; ++ ++ for (gimple_stmt_iterator gsi = gsi_last_bb (bb); ++ gsi_stmt (gsi) != first_visit_stmt; gsi_prev (&gsi)) ++ { ++ gimple *stmt = gsi_stmt (gsi); ++ if (gimple_visited_p (gsi_stmt (gsi)) && is_gimple_debug (stmt)) ++ { ++ continue; ++ } ++ if (gimple_is_assign_copy_p (stmt)) ++ { ++ live_track_clear_var (live, gimple_assign_rhs1 (stmt)); ++ } ++ FOR_EACH_SSA_TREE_OPERAND (def, stmt, iter, SSA_OP_DEF) ++ { ++ live_track_process_def (live, def, conflict_graph); ++ } ++ FOR_EACH_SSA_TREE_OPERAND (use, stmt, iter, SSA_OP_USE) ++ { ++ live_track_process_use (live, use); ++ } ++ } ++} ++ ++/* Build a conflict graph based on ccmp candidate. */ ++ ++static void ++add_ccmp_conflict_graph (ssa_conflicts *conflict_graph, ++ tree_live_info_p liveinfo, var_map map, basic_block bb) ++{ ++ live_track *live; ++ tree use, def; ++ ssa_op_iter iter; ++ live = new_live_track (map); ++ live_track_init (live, live_on_exit (liveinfo, bb)); ++ ++ gimple *last_stmt = gsi_stmt (gsi_last_bb (bb)); ++ gcc_assert (gimple_cond_lhs (last_stmt)); ++ ++ auto_vec stack; ++ stack.safe_push (gimple_cond_lhs (last_stmt)); ++ while (!stack.is_empty ()) ++ { ++ tree op = stack.pop (); ++ gimple *op_stmt = SSA_NAME_DEF_STMT (op); ++ if (!op_stmt || gimple_bb (op_stmt) != bb ++ || !is_gimple_assign (op_stmt) ++ || !ssa_is_replaceable_p (op_stmt)) ++ { ++ continue; ++ } ++ if (gimple_is_assign_copy_p (op_stmt)) ++ { ++ live_track_clear_var (live, gimple_assign_rhs1 (op_stmt)); ++ } ++ gimple_set_visited (op_stmt, true); ++ FOR_EACH_SSA_TREE_OPERAND (def, op_stmt, iter, SSA_OP_DEF) ++ { ++ live_track_process_def (live, def, conflict_graph); ++ } ++ FOR_EACH_SSA_TREE_OPERAND (use, op_stmt, iter, SSA_OP_USE) ++ { ++ stack.safe_push (use); ++ live_track_process_use (live, use); ++ } ++ } ++ ++ process_add_graph (live, bb, conflict_graph); ++ delete_live_track (live); ++ remove_high_cost_graph_for_ccmp (conflict_graph); ++} ++ ++/* Determine whether the ccmp conflict graph can be added. ++ i.e, ++ ++ ;; basic block 3, loop depth 1 ++ ;; pred: 2 ++ ;; 3 ++ # ivtmp.5_10 = PHI ++ _7 = b_4 (D) >= c_5 (D); ++ _8 = ivtmp.5_10 == 0; ++ _9 = _7 | _8; ++ ivtmp.5_11 = ivtmp.5_10 - 1; ++ if (_9 != 0) ++ goto ; [10.70%] ++ else ++ goto ; [89.30%] ++ ++ In the above loop, the expression will be replaced: ++ ++ _7 replaced by b_4 (D) >= c_5 (D) ++ _8 replaced by ivtmp.5_10 == 0 ++ ++ If the current case want use the ccmp instruction, then ++ ++ _9 can replaced by _7 | _8 ++ ++ So this requires that ivtmp.5_11 and ivtmp.5_10 be divided into different ++ partitions. ++ ++ Now this function can achieve this ability. */ ++ ++static void ++determine_add_ccmp_conflict_graph (basic_block bb, tree_live_info_p liveinfo, ++ var_map map, ssa_conflicts *graph) ++{ ++ if (!flag_ccmp2 || !targetm.gen_ccmp_first || !check_ccmp_candidate (bb)) ++ return; ++ for (gimple_stmt_iterator bsi = gsi_start_bb (bb); !gsi_end_p (bsi); ++ gsi_next (&bsi)) ++ { ++ gimple_set_visited (gsi_stmt (bsi), false); ++ } ++ ssa_conflicts *ccmp_conflict_graph; ++ ccmp_conflict_graph = ssa_conflicts_new (num_var_partitions (map)); ++ add_ccmp_conflict_graph (ccmp_conflict_graph, liveinfo, map, bb); ++ unsigned x; ++ bitmap b; ++ if (ccmp_conflict_graph) ++ { ++ FOR_EACH_VEC_ELT (ccmp_conflict_graph->conflicts, x, b) ++ { ++ if (!b) ++ continue; ++ unsigned y = bitmap_first_set_bit (b); ++ if (!graph->conflicts[x] || !bitmap_bit_p (graph->conflicts[x], y)) ++ { ++ ssa_conflicts_add (graph, x, y); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "potential ccmp: add additional " ++ "conflict-ssa : bb[%d] %d:%d\n", ++ bb->index, x, y); ++ } ++ } ++ } ++ } ++ ssa_conflicts_delete (ccmp_conflict_graph); ++} + + /* Build a conflict graph based on LIVEINFO. Any partitions which are in the + partition view of the var_map liveinfo is based on get entries in the +@@ -938,6 +1133,8 @@ build_ssa_conflict_graph (tree_live_info_p liveinfo) + live_track_process_use (live, var); + } + ++ determine_add_ccmp_conflict_graph (bb, liveinfo, map, graph); ++ + /* If result of a PHI is unused, looping over the statements will not + record any conflicts since the def was never live. Since the PHI node + is going to be translated out of SSA form, it will insert a copy. +-- +2.33.0 + diff --git a/0019-LoongArch-Implement-__builtin_thread_pointer-for-TLS.patch b/0019-LoongArch-Implement-__builtin_thread_pointer-for-TLS.patch new file mode 100644 index 0000000000000000000000000000000000000000..ec34040300493526cbeef3f1abc221c605cf6d7c --- /dev/null +++ b/0019-LoongArch-Implement-__builtin_thread_pointer-for-TLS.patch @@ -0,0 +1,84 @@ +From 619b6081064bf85a19f4659e278a361875e4f9fb Mon Sep 17 00:00:00 2001 +From: chenxiaolong +Date: Tue, 24 Oct 2023 14:40:14 +0800 +Subject: [PATCH 019/188] LoongArch: Implement __builtin_thread_pointer for + TLS. + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (get_thread_pointer):Adds the + instruction template corresponding to the __builtin_thread_pointer + function. + * doc/extend.texi:Add the __builtin_thread_pointer function support + description to the documentation. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/builtin_thread_pointer.c: New test. +--- + gcc/config/loongarch/loongarch.md | 7 +++++++ + gcc/doc/extend.texi | 5 +++++ + .../gcc.target/loongarch/builtin_thread_pointer.c | 10 ++++++++++ + 3 files changed, 22 insertions(+) + create mode 100644 gcc/testsuite/gcc.target/loongarch/builtin_thread_pointer.c + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index c4c6baa60..80487488d 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -113,6 +113,7 @@ + + (define_constants + [(RETURN_ADDR_REGNUM 1) ++ (TP_REGNUM 2) + (T0_REGNUM 12) + (T1_REGNUM 13) + (S0_REGNUM 23) +@@ -3647,6 +3648,12 @@ + [(set_attr "length" "0") + (set_attr "type" "ghost")]) + ++;; Named pattern for expanding thread pointer reference. ++(define_expand "get_thread_pointer" ++ [(set (match_operand:P 0 "register_operand" "=r") ++ (reg:P TP_REGNUM))] ++ "HAVE_AS_TLS" ++ {}) + + (define_split + [(match_operand 0 "small_data_pattern")] +diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi +index 1d1bac255..497c6de5f 100644 +--- a/gcc/doc/extend.texi ++++ b/gcc/doc/extend.texi +@@ -16257,6 +16257,11 @@ function you need to include @code{larchintrin.h}. + void __break (imm0_32767) + @end smallexample + ++Returns the value that is currently set in the @samp{tp} register. ++@smallexample ++ void * __builtin_thread_pointer (void) ++@end smallexample ++ + @node MIPS DSP Built-in Functions + @subsection MIPS DSP Built-in Functions + +diff --git a/gcc/testsuite/gcc.target/loongarch/builtin_thread_pointer.c b/gcc/testsuite/gcc.target/loongarch/builtin_thread_pointer.c +new file mode 100644 +index 000000000..541e3b143 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/builtin_thread_pointer.c +@@ -0,0 +1,10 @@ ++/* { dg-do compile } */ ++/* { dg-require-effective-target tls_native } */ ++/* { dg-options "-O2" } */ ++/* { dg-final { scan-assembler "or\t\\\$r4,\\\$r2,\\\$r0" } } */ ++ ++void * ++get_tp () ++{ ++ return __builtin_thread_pointer (); ++} +-- +2.43.0 + diff --git a/0019-fp-model-Enable-fp-model-on-kunpeng.patch b/0019-fp-model-Enable-fp-model-on-kunpeng.patch new file mode 100644 index 0000000000000000000000000000000000000000..46ea52c0e7de1144fac0353c3dbbdf5d19b5f1e4 --- /dev/null +++ b/0019-fp-model-Enable-fp-model-on-kunpeng.patch @@ -0,0 +1,405 @@ +From 8cdb316a3fe205a3089b9c17aec0442f4d5f75be Mon Sep 17 00:00:00 2001 +From: bule +Date: Sun, 27 Aug 2023 16:49:04 +0800 +Subject: [PATCH 19/22] [fp-model] Enable fp-model on kunpeng + +Enable fp-model options on kunpeng for precision control. +--- + gcc/common.opt | 26 +++++ + gcc/config/aarch64/aarch64-linux.h | 3 +- + gcc/flag-types.h | 9 ++ + gcc/fortran/options.cc | 8 ++ + gcc/opts-common.cc | 146 ++++++++++++++++++++++++++++- + gcc/opts.cc | 68 ++++++++++++++ + 6 files changed, 256 insertions(+), 4 deletions(-) + +diff --git a/gcc/common.opt b/gcc/common.opt +index 8a0dafc52..f5eef8a45 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1642,6 +1642,32 @@ ffp-int-builtin-inexact + Common Var(flag_fp_int_builtin_inexact) Init(1) Optimization + Allow built-in functions ceil, floor, round, trunc to raise \"inexact\" exceptions. + ++fftz ++Common Var(flag_ftz) Optimization ++Control fpcr register for flush to zero. ++ ++fp-model= ++Common Joined RejectNegative Enum(fp_model) Var(flag_fp_model) Init(FP_MODEL_NORMAL) Optimization ++-fp-model=[normal|fast|precise|except|strict] Perform floating-point precision control. ++ ++Enum ++Name(fp_model) Type(enum fp_model) UnknownError(unknown floating point precision model %qs) ++ ++EnumValue ++Enum(fp_model) String(normal) Value(FP_MODEL_NORMAL) ++ ++EnumValue ++Enum(fp_model) String(fast) Value(FP_MODEL_FAST) ++ ++EnumValue ++Enum(fp_model) String(precise) Value(FP_MODEL_PRECISE) ++ ++EnumValue ++Enum(fp_model) String(except) Value(FP_MODEL_EXCEPT) ++ ++EnumValue ++Enum(fp_model) String(strict) Value(FP_MODEL_STRICT) ++ + ; Nonzero means don't put addresses of constant functions in registers. + ; Used for compiling the Unix kernel, where strange substitutions are + ; done on the assembly output. +diff --git a/gcc/config/aarch64/aarch64-linux.h b/gcc/config/aarch64/aarch64-linux.h +index 5e4553d79..a5cba6391 100644 +--- a/gcc/config/aarch64/aarch64-linux.h ++++ b/gcc/config/aarch64/aarch64-linux.h +@@ -50,7 +50,8 @@ + #define LINK_SPEC LINUX_TARGET_LINK_SPEC AARCH64_ERRATA_LINK_SPEC + + #define GNU_USER_TARGET_MATHFILE_SPEC \ +- "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s}" ++ "%{Ofast|ffast-math|funsafe-math-optimizations|fp-model=fast|fftz:\ ++ %{!fno-ftz:crtfastmath.o%s}}" + + #undef ENDFILE_SPEC + #define ENDFILE_SPEC \ +diff --git a/gcc/flag-types.h b/gcc/flag-types.h +index 2c8498169..64c64eb32 100644 +--- a/gcc/flag-types.h ++++ b/gcc/flag-types.h +@@ -260,6 +260,15 @@ enum fp_contract_mode { + FP_CONTRACT_FAST = 2 + }; + ++/* Floating-point precision mode. */ ++enum fp_model { ++ FP_MODEL_NORMAL = 0, ++ FP_MODEL_FAST = 1, ++ FP_MODEL_PRECISE = 2, ++ FP_MODEL_EXCEPT = 3, ++ FP_MODEL_STRICT = 4 ++}; ++ + /* Scalar storage order kind. */ + enum scalar_storage_order_kind { + SSO_NATIVE = 0, +diff --git a/gcc/fortran/options.cc b/gcc/fortran/options.cc +index d0fa634f1..3eb99a84a 100644 +--- a/gcc/fortran/options.cc ++++ b/gcc/fortran/options.cc +@@ -243,6 +243,7 @@ form_from_filename (const char *filename) + return f_form; + } + ++static void gfc_handle_fpe_option (const char *arg, bool trap); + + /* Finalize commandline options. */ + +@@ -286,6 +287,13 @@ gfc_post_options (const char **pfilename) + if (flag_protect_parens == -1) + flag_protect_parens = !optimize_fast; + ++ /* If fp-model=precise/strict, turn on all ffpe-trap and ffpe-summary. */ ++ if (flag_fp_model == FP_MODEL_EXCEPT || flag_fp_model == FP_MODEL_STRICT) ++ { ++ gfc_handle_fpe_option ("all", false); ++ gfc_handle_fpe_option ("invalid,zero,overflow,underflow", true); ++ } ++ + /* -Ofast sets implies -fstack-arrays unless an explicit size is set for + stack arrays. */ + if (flag_stack_arrays == -1 && flag_max_stack_var_size == -2) +diff --git a/gcc/opts-common.cc b/gcc/opts-common.cc +index 7c07d5046..489a6e02a 100644 +--- a/gcc/opts-common.cc ++++ b/gcc/opts-common.cc +@@ -28,7 +28,8 @@ along with GCC; see the file COPYING3. If not see + #include "spellcheck.h" + #include "opts-jobserver.h" + +-static void prune_options (struct cl_decoded_option **, unsigned int *); ++static void prune_options (struct cl_decoded_option **, unsigned int *, ++ unsigned int); + + /* An option that is undocumented, that takes a joined argument, and + that doesn't fit any of the classes of uses (language/common, +@@ -1091,7 +1092,7 @@ decode_cmdline_options_to_array (unsigned int argc, const char **argv, + + *decoded_options = opt_array; + *decoded_options_count = num_decoded_options; +- prune_options (decoded_options, decoded_options_count); ++ prune_options (decoded_options, decoded_options_count, lang_mask); + } + + /* Return true if NEXT_OPT_IDX cancels OPT_IDX. Return false if the +@@ -1112,11 +1113,109 @@ cancel_option (int opt_idx, int next_opt_idx, int orig_next_opt_idx) + return false; + } + ++/* Check whether opt_idx exists in decoded_options array between index ++ start and end. If found, return its index in decoded_options, ++ else return end. */ ++static unsigned int ++find_opt_idx (const struct cl_decoded_option *decoded_options, ++ unsigned int decoded_options_count, ++ unsigned int start, unsigned int end, unsigned int opt_idx) ++{ ++ gcc_assert (end <= decoded_options_count); ++ gcc_assert (opt_idx < cl_options_count); ++ unsigned int k; ++ for (k = start; k < end; k++) ++ { ++ if (decoded_options[k].opt_index == opt_idx) ++ { ++ return k; ++ } ++ } ++ return k; ++} ++ ++/* remove the opt_index element from decoded_options array. */ ++static unsigned int ++remove_option (struct cl_decoded_option *decoded_options, ++ unsigned int decoded_options_count, ++ unsigned int opt_index) ++{ ++ gcc_assert (opt_index < decoded_options_count); ++ unsigned int i; ++ for (i = opt_index; i < decoded_options_count - 1; i++) ++ { ++ decoded_options[i] = decoded_options[i + 1]; ++ } ++ return decoded_options_count - 1; ++} ++ ++/* Handle the priority between fp-model, Ofast, and ++ ffast-math. */ ++static unsigned int ++handle_fp_model_driver (struct cl_decoded_option *decoded_options, ++ unsigned int decoded_options_count, ++ unsigned int fp_model_index, ++ unsigned int lang_mask) ++{ ++ struct cl_decoded_option fp_model_opt = decoded_options[fp_model_index]; ++ enum fp_model model = (enum fp_model) fp_model_opt.value; ++ if (model == FP_MODEL_PRECISE || model == FP_MODEL_STRICT) ++ { ++ /* If found Ofast, override Ofast with O3. */ ++ unsigned int Ofast_index; ++ Ofast_index = find_opt_idx (decoded_options, decoded_options_count, ++ 0, decoded_options_count, OPT_Ofast); ++ while (Ofast_index != decoded_options_count) ++ { ++ const char *tmp_argv = "-O3"; ++ decode_cmdline_option (&tmp_argv, lang_mask, ++ &decoded_options[Ofast_index]); ++ warning (0, "%<-Ofast%> is degraded to %<-O3%> due to %qs", ++ fp_model_opt.orig_option_with_args_text); ++ Ofast_index = find_opt_idx (decoded_options, decoded_options_count, ++ 0, decoded_options_count, OPT_Ofast); ++ } ++ /* If found ffast-math before fp-model=precise/strict ++ it, cancel it. */ ++ unsigned int ffast_math_index; ++ ffast_math_index ++ = find_opt_idx (decoded_options, decoded_options_count, 0, ++ fp_model_index, OPT_ffast_math); ++ if (ffast_math_index != fp_model_index) ++ { ++ decoded_options_count ++ = remove_option (decoded_options, decoded_options_count, ++ ffast_math_index); ++ warning (0, "%<-ffast-math%> before %qs is canceled", ++ fp_model_opt.orig_option_with_args_text); ++ } ++ } ++ if (model == FP_MODEL_FAST) ++ { ++ /* If found -fno-fast-math after fp-model=fast, cancel this one. */ ++ unsigned int fno_fast_math_index; ++ fno_fast_math_index ++ = find_opt_idx (decoded_options, decoded_options_count, fp_model_index, ++ decoded_options_count, OPT_ffast_math); ++ if (fno_fast_math_index != decoded_options_count ++ && decoded_options[fno_fast_math_index].value == 0) ++ { ++ decoded_options_count ++ = remove_option (decoded_options, decoded_options_count, ++ fp_model_index); ++ warning (0, ++ "%<-fp-model=fast%> before %<-fno-fast-math%> is canceled"); ++ } ++ } ++ return decoded_options_count; ++} ++ + /* Filter out options canceled by the ones after them. */ + + static void + prune_options (struct cl_decoded_option **decoded_options, +- unsigned int *decoded_options_count) ++ unsigned int *decoded_options_count, ++ unsigned int lang_mask) + { + unsigned int old_decoded_options_count = *decoded_options_count; + struct cl_decoded_option *old_decoded_options = *decoded_options; +@@ -1127,7 +1226,12 @@ prune_options (struct cl_decoded_option **decoded_options, + const struct cl_option *option; + unsigned int fdiagnostics_color_idx = 0; + ++ if (!diagnostic_ready_p ()) ++ diagnostic_initialize (global_dc, 0); ++ + /* Remove arguments which are negated by others after them. */ ++ ++ unsigned int fp_model_index = old_decoded_options_count; + new_decoded_options_count = 0; + for (i = 0; i < old_decoded_options_count; i++) + { +@@ -1151,6 +1255,34 @@ prune_options (struct cl_decoded_option **decoded_options, + fdiagnostics_color_idx = i; + continue; + ++ case OPT_fp_model_: ++ /* Only the last fp-model option will take effect. */ ++ unsigned int next_fp_model_idx; ++ next_fp_model_idx = find_opt_idx (old_decoded_options, ++ old_decoded_options_count, ++ i + 1, ++ old_decoded_options_count, ++ OPT_fp_model_); ++ if (next_fp_model_idx != old_decoded_options_count) ++ { ++ /* Found more than one fp-model, cancel this one. */ ++ if (old_decoded_options[i].value ++ != old_decoded_options[next_fp_model_idx].value) ++ { ++ warning (0, "%qs is overrided by %qs", ++ old_decoded_options[i]. ++ orig_option_with_args_text, ++ old_decoded_options[next_fp_model_idx]. ++ orig_option_with_args_text); ++ } ++ break; ++ } ++ else ++ { ++ /* Found the last fp-model option. */ ++ fp_model_index = new_decoded_options_count; ++ } ++ /* FALLTHRU. */ + default: + gcc_assert (opt_idx < cl_options_count); + option = &cl_options[opt_idx]; +@@ -1190,6 +1322,14 @@ keep: + break; + } + } ++ if (fp_model_index < new_decoded_options_count) ++ { ++ new_decoded_options_count ++ = handle_fp_model_driver (new_decoded_options, ++ new_decoded_options_count, ++ fp_model_index, ++ lang_mask); ++ } + + if (fdiagnostics_color_idx >= 1) + { +diff --git a/gcc/opts.cc b/gcc/opts.cc +index a97630d1c..b522ed7e2 100644 +--- a/gcc/opts.cc ++++ b/gcc/opts.cc +@@ -328,6 +328,7 @@ static void set_debug_level (uint32_t dinfo, int extended, + struct gcc_options *opts_set, + location_t loc); + static void set_fast_math_flags (struct gcc_options *opts, int set); ++static void set_fp_model_flags (struct gcc_options *opts, int set); + static void decode_d_option (const char *arg, struct gcc_options *opts, + location_t loc, diagnostic_context *dc); + static void set_unsafe_math_optimizations_flags (struct gcc_options *opts, +@@ -2857,6 +2858,10 @@ common_handle_option (struct gcc_options *opts, + set_fast_math_flags (opts, value); + break; + ++ case OPT_fp_model_: ++ set_fp_model_flags (opts, value); ++ break; ++ + case OPT_funsafe_math_optimizations: + set_unsafe_math_optimizations_flags (opts, value); + break; +@@ -3266,6 +3271,69 @@ set_fast_math_flags (struct gcc_options *opts, int set) + } + } + ++/* Handle fp-model options. */ ++static void ++set_fp_model_flags (struct gcc_options *opts, int set) ++{ ++ enum fp_model model = (enum fp_model) set; ++ switch (model) ++ { ++ case FP_MODEL_FAST: ++ /* Equivalent to open ffast-math. */ ++ set_fast_math_flags (opts, 1); ++ break; ++ ++ case FP_MODEL_PRECISE: ++ /* Equivalent to close ffast-math. */ ++ set_fast_math_flags (opts, 0); ++ /* Turn on -frounding-math -fsignaling-nans. */ ++ if (!opts->frontend_set_flag_signaling_nans) ++ opts->x_flag_signaling_nans = 1; ++ if (!opts->frontend_set_flag_rounding_math) ++ opts->x_flag_rounding_math = 1; ++ opts->x_flag_expensive_optimizations = 0; ++ opts->x_flag_code_hoisting = 0; ++ opts->x_flag_predictive_commoning = 0; ++ opts->x_flag_fp_contract_mode = FP_CONTRACT_OFF; ++ break; ++ ++ case FP_MODEL_EXCEPT: ++ if (!opts->frontend_set_flag_signaling_nans) ++ opts->x_flag_signaling_nans = 1; ++ if (!opts->frontend_set_flag_errno_math) ++ opts->x_flag_errno_math = 1; ++ if (!opts->frontend_set_flag_trapping_math) ++ opts->x_flag_trapping_math = 1; ++ opts->x_flag_fp_int_builtin_inexact = 1; ++ /* Also turn on ffpe-trap in fortran. */ ++ break; ++ ++ case FP_MODEL_STRICT: ++ /* Turn on both precise and except. */ ++ if (!opts->frontend_set_flag_signaling_nans) ++ opts->x_flag_signaling_nans = 1; ++ if (!opts->frontend_set_flag_rounding_math) ++ opts->x_flag_rounding_math = 1; ++ opts->x_flag_expensive_optimizations = 0; ++ opts->x_flag_code_hoisting = 0; ++ opts->x_flag_predictive_commoning = 0; ++ if (!opts->frontend_set_flag_errno_math) ++ opts->x_flag_errno_math = 1; ++ if (!opts->frontend_set_flag_trapping_math) ++ opts->x_flag_trapping_math = 1; ++ opts->x_flag_fp_int_builtin_inexact = 1; ++ opts->x_flag_fp_contract_mode = FP_CONTRACT_OFF; ++ break; ++ ++ case FP_MODEL_NORMAL: ++ /* Do nothing. */ ++ break; ++ ++ default: ++ gcc_unreachable (); ++ } ++} ++ + /* When -funsafe-math-optimizations is set the following + flags are set as well. */ + static void +-- +2.33.0 + diff --git a/0020-LoongArch-Fix-vfrint-releated-comments-in-lsxintrin..patch b/0020-LoongArch-Fix-vfrint-releated-comments-in-lsxintrin..patch new file mode 100644 index 0000000000000000000000000000000000000000..549bee51c85240d259e0f531fad2aa9e2e6d4e71 --- /dev/null +++ b/0020-LoongArch-Fix-vfrint-releated-comments-in-lsxintrin..patch @@ -0,0 +1,189 @@ +From 9b29e6ba10716656ba9b32c33f021e920bb05f3d Mon Sep 17 00:00:00 2001 +From: Chenghui Pan +Date: Mon, 23 Oct 2023 10:13:24 +0800 +Subject: [PATCH 020/188] LoongArch: Fix vfrint-releated comments in + lsxintrin.h and lasxintrin.h + +The comment of vfrint-related intrinsic functions does not match the return +value type in definition. This patch fixes these comments. + +gcc/ChangeLog: + + * config/loongarch/lasxintrin.h (__lasx_xvftintrnel_l_s): Fix comments. + (__lasx_xvfrintrne_s): Ditto. + (__lasx_xvfrintrne_d): Ditto. + (__lasx_xvfrintrz_s): Ditto. + (__lasx_xvfrintrz_d): Ditto. + (__lasx_xvfrintrp_s): Ditto. + (__lasx_xvfrintrp_d): Ditto. + (__lasx_xvfrintrm_s): Ditto. + (__lasx_xvfrintrm_d): Ditto. + * config/loongarch/lsxintrin.h (__lsx_vftintrneh_l_s): Ditto. + (__lsx_vfrintrne_s): Ditto. + (__lsx_vfrintrne_d): Ditto. + (__lsx_vfrintrz_s): Ditto. + (__lsx_vfrintrz_d): Ditto. + (__lsx_vfrintrp_s): Ditto. + (__lsx_vfrintrp_d): Ditto. + (__lsx_vfrintrm_s): Ditto. + (__lsx_vfrintrm_d): Ditto. +--- + gcc/config/loongarch/lasxintrin.h | 16 ++++++++-------- + gcc/config/loongarch/lsxintrin.h | 16 ++++++++-------- + 2 files changed, 16 insertions(+), 16 deletions(-) + +diff --git a/gcc/config/loongarch/lasxintrin.h b/gcc/config/loongarch/lasxintrin.h +index d39379927..7bce2c757 100644 +--- a/gcc/config/loongarch/lasxintrin.h ++++ b/gcc/config/loongarch/lasxintrin.h +@@ -3368,7 +3368,7 @@ __m256i __lasx_xvftintrnel_l_s (__m256 _1) + } + + /* Assembly instruction format: xd, xj. */ +-/* Data types in instruction templates: V8SI, V8SF. */ ++/* Data types in instruction templates: V8SF, V8SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m256 __lasx_xvfrintrne_s (__m256 _1) + { +@@ -3376,7 +3376,7 @@ __m256 __lasx_xvfrintrne_s (__m256 _1) + } + + /* Assembly instruction format: xd, xj. */ +-/* Data types in instruction templates: V4DI, V4DF. */ ++/* Data types in instruction templates: V4DF, V4DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m256d __lasx_xvfrintrne_d (__m256d _1) + { +@@ -3384,7 +3384,7 @@ __m256d __lasx_xvfrintrne_d (__m256d _1) + } + + /* Assembly instruction format: xd, xj. */ +-/* Data types in instruction templates: V8SI, V8SF. */ ++/* Data types in instruction templates: V8SF, V8SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m256 __lasx_xvfrintrz_s (__m256 _1) + { +@@ -3392,7 +3392,7 @@ __m256 __lasx_xvfrintrz_s (__m256 _1) + } + + /* Assembly instruction format: xd, xj. */ +-/* Data types in instruction templates: V4DI, V4DF. */ ++/* Data types in instruction templates: V4DF, V4DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m256d __lasx_xvfrintrz_d (__m256d _1) + { +@@ -3400,7 +3400,7 @@ __m256d __lasx_xvfrintrz_d (__m256d _1) + } + + /* Assembly instruction format: xd, xj. */ +-/* Data types in instruction templates: V8SI, V8SF. */ ++/* Data types in instruction templates: V8SF, V8SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m256 __lasx_xvfrintrp_s (__m256 _1) + { +@@ -3408,7 +3408,7 @@ __m256 __lasx_xvfrintrp_s (__m256 _1) + } + + /* Assembly instruction format: xd, xj. */ +-/* Data types in instruction templates: V4DI, V4DF. */ ++/* Data types in instruction templates: V4DF, V4DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m256d __lasx_xvfrintrp_d (__m256d _1) + { +@@ -3416,7 +3416,7 @@ __m256d __lasx_xvfrintrp_d (__m256d _1) + } + + /* Assembly instruction format: xd, xj. */ +-/* Data types in instruction templates: V8SI, V8SF. */ ++/* Data types in instruction templates: V8SF, V8SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m256 __lasx_xvfrintrm_s (__m256 _1) + { +@@ -3424,7 +3424,7 @@ __m256 __lasx_xvfrintrm_s (__m256 _1) + } + + /* Assembly instruction format: xd, xj. */ +-/* Data types in instruction templates: V4DI, V4DF. */ ++/* Data types in instruction templates: V4DF, V4DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m256d __lasx_xvfrintrm_d (__m256d _1) + { +diff --git a/gcc/config/loongarch/lsxintrin.h b/gcc/config/loongarch/lsxintrin.h +index ec4206990..29553c093 100644 +--- a/gcc/config/loongarch/lsxintrin.h ++++ b/gcc/config/loongarch/lsxintrin.h +@@ -3412,7 +3412,7 @@ __m128i __lsx_vftintrneh_l_s (__m128 _1) + } + + /* Assembly instruction format: vd, vj. */ +-/* Data types in instruction templates: V4SI, V4SF. */ ++/* Data types in instruction templates: V4SF, V4SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m128 __lsx_vfrintrne_s (__m128 _1) + { +@@ -3420,7 +3420,7 @@ __m128 __lsx_vfrintrne_s (__m128 _1) + } + + /* Assembly instruction format: vd, vj. */ +-/* Data types in instruction templates: V2DI, V2DF. */ ++/* Data types in instruction templates: V2DF, V2DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m128d __lsx_vfrintrne_d (__m128d _1) + { +@@ -3428,7 +3428,7 @@ __m128d __lsx_vfrintrne_d (__m128d _1) + } + + /* Assembly instruction format: vd, vj. */ +-/* Data types in instruction templates: V4SI, V4SF. */ ++/* Data types in instruction templates: V4SF, V4SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m128 __lsx_vfrintrz_s (__m128 _1) + { +@@ -3436,7 +3436,7 @@ __m128 __lsx_vfrintrz_s (__m128 _1) + } + + /* Assembly instruction format: vd, vj. */ +-/* Data types in instruction templates: V2DI, V2DF. */ ++/* Data types in instruction templates: V2DF, V2DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m128d __lsx_vfrintrz_d (__m128d _1) + { +@@ -3444,7 +3444,7 @@ __m128d __lsx_vfrintrz_d (__m128d _1) + } + + /* Assembly instruction format: vd, vj. */ +-/* Data types in instruction templates: V4SI, V4SF. */ ++/* Data types in instruction templates: V4SF, V4SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m128 __lsx_vfrintrp_s (__m128 _1) + { +@@ -3452,7 +3452,7 @@ __m128 __lsx_vfrintrp_s (__m128 _1) + } + + /* Assembly instruction format: vd, vj. */ +-/* Data types in instruction templates: V2DI, V2DF. */ ++/* Data types in instruction templates: V2DF, V2DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m128d __lsx_vfrintrp_d (__m128d _1) + { +@@ -3460,7 +3460,7 @@ __m128d __lsx_vfrintrp_d (__m128d _1) + } + + /* Assembly instruction format: vd, vj. */ +-/* Data types in instruction templates: V4SI, V4SF. */ ++/* Data types in instruction templates: V4SF, V4SF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m128 __lsx_vfrintrm_s (__m128 _1) + { +@@ -3468,7 +3468,7 @@ __m128 __lsx_vfrintrm_s (__m128 _1) + } + + /* Assembly instruction format: vd, vj. */ +-/* Data types in instruction templates: V2DI, V2DF. */ ++/* Data types in instruction templates: V2DF, V2DF. */ + extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__)) + __m128d __lsx_vfrintrm_d (__m128d _1) + { +-- +2.43.0 + diff --git a/0020-simdmath-Enable-simdmath-on-kunpeng.patch b/0020-simdmath-Enable-simdmath-on-kunpeng.patch new file mode 100644 index 0000000000000000000000000000000000000000..f6b7a48b75818dc25efe3b608c6940f95abea328 --- /dev/null +++ b/0020-simdmath-Enable-simdmath-on-kunpeng.patch @@ -0,0 +1,317 @@ +From 49ad10199dbdda2c36850a2617f5c985977939c5 Mon Sep 17 00:00:00 2001 +From: bule +Date: Sun, 27 Aug 2023 16:49:42 +0800 +Subject: [PATCH 20/22] [simdmath] Enable simdmath on kunpeng + +This enable simd math function supported by libmathlib on fortran/c/c++. +Use -fsimdmath to turn on the generation of simdmath function. The +supported functions can be found in simdmath.h. Add more simd declaration +if you need more kinds of math functions. -msimdmath-64 is used to turn +on 64-bit simd math functions which is not supported by libmathlib. +Therefore, this option is default to off. +--- + gcc/c-family/c-opts.cc | 4 ++ + gcc/common.opt | 4 ++ + gcc/config/aarch64/aarch64.cc | 9 ++++- + gcc/config/aarch64/aarch64.opt | 6 +++ + gcc/fortran/scanner.cc | 3 ++ + gcc/opts.cc | 17 ++++++++ + .../gcc.target/aarch64/simd_pcs_attribute-3.c | 2 +- + libgomp/Makefile.am | 4 +- + libgomp/Makefile.in | 10 +++-- + libgomp/configure | 4 +- + libgomp/configure.ac | 2 +- + libgomp/simdmath.h.in | 40 +++++++++++++++++++ + libgomp/simdmath_f.h.in | 11 +++++ + 13 files changed, 106 insertions(+), 10 deletions(-) + create mode 100644 libgomp/simdmath.h.in + create mode 100644 libgomp/simdmath_f.h.in + +diff --git a/gcc/c-family/c-opts.cc b/gcc/c-family/c-opts.cc +index a341a0617..5134f6128 100644 +--- a/gcc/c-family/c-opts.cc ++++ b/gcc/c-family/c-opts.cc +@@ -801,6 +801,10 @@ c_common_post_options (const char **pfilename) + if (cpp_opts->deps.style == DEPS_NONE) + check_deps_environment_vars (); + ++ if (flag_simdmath) ++ { ++ defer_opt (OPT_include, "simdmath.h"); ++ } + handle_deferred_opts (); + + sanitize_cpp_opts (); +diff --git a/gcc/common.opt b/gcc/common.opt +index f5eef8a45..e9d580957 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -2125,6 +2125,10 @@ fmath-errno + Common Var(flag_errno_math) Init(1) Optimization SetByCombined + Set errno after built-in math functions. + ++fsimdmath ++Common Var(flag_simdmath) Init(0) Optimization ++Enable auto-vectorize math functions for mathlib. This option will turn on -fno-math-errno and -fopenmp-simd. ++ + fmax-errors= + Common Joined RejectNegative UInteger Var(flag_max_errors) + -fmax-errors= Maximum number of errors to report. +diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc +index 226dc9dff..a3da4ca30 100644 +--- a/gcc/config/aarch64/aarch64.cc ++++ b/gcc/config/aarch64/aarch64.cc +@@ -26904,8 +26904,13 @@ aarch64_simd_clone_compute_vecsize_and_simdlen (struct cgraph_node *node, + elt_bits = GET_MODE_BITSIZE (SCALAR_TYPE_MODE (base_type)); + if (known_eq (clonei->simdlen, 0U)) + { +- count = 2; +- vec_bits = (num == 0 ? 64 : 128); ++ /* Currently mathlib or sleef hasn't provide function for V2SF mode ++ simdclone of single precision functions. (e.g._ZCVnN2v_expf) ++ Therefore this mode is disabled by default to avoid link error. ++ Use -msimdmath-64 option to enable this mode. */ ++ count = flag_simdmath_64 ? 2 : 1; ++ vec_bits = ((num == 0 && flag_simdmath_64) ? 64 : 128); ++ + clonei->simdlen = exact_div (vec_bits, elt_bits); + } + else +diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt +index 92220b26e..a64b927e9 100644 +--- a/gcc/config/aarch64/aarch64.opt ++++ b/gcc/config/aarch64/aarch64.opt +@@ -190,6 +190,12 @@ precision of square root results to about 16 bits for + single precision and to 32 bits for double precision. + If enabled, it implies -mlow-precision-recip-sqrt. + ++msimdmath-64 ++Target Var(flag_simdmath_64) Optimization ++Allow compiler to generate V2SF 64 bits simdclone of math functions, ++which is not currently supported in mathlib or sleef. ++Therefore this option is disabled by default. ++ + mlow-precision-div + Target Var(flag_mlow_precision_div) Optimization + Enable the division approximation. Enabling this reduces +diff --git a/gcc/fortran/scanner.cc b/gcc/fortran/scanner.cc +index 2dff25147..63e262f51 100644 +--- a/gcc/fortran/scanner.cc ++++ b/gcc/fortran/scanner.cc +@@ -2769,6 +2769,9 @@ gfc_new_file (void) + if (flag_pre_include != NULL) + load_file (flag_pre_include, NULL, false); + ++ if (flag_simdmath) ++ load_file ("simdmath_f.h", NULL, false); ++ + if (gfc_cpp_enabled ()) + { + gfc_cpp_preprocess (gfc_source_file); +diff --git a/gcc/opts.cc b/gcc/opts.cc +index b522ed7e2..c3cc2c169 100644 +--- a/gcc/opts.cc ++++ b/gcc/opts.cc +@@ -322,6 +322,7 @@ static const char undocumented_msg[] = N_("This option lacks documentation."); + static const char use_diagnosed_msg[] = N_("Uses of this option are diagnosed."); + + typedef char *char_p; /* For DEF_VEC_P. */ ++static void set_simdmath_flags (struct gcc_options *opts, int set); + + static void set_debug_level (uint32_t dinfo, int extended, + const char *arg, struct gcc_options *opts, +@@ -2850,6 +2851,10 @@ common_handle_option (struct gcc_options *opts, + dc->min_margin_width = value; + break; + ++ case OPT_fsimdmath: ++ set_simdmath_flags (opts, value); ++ break; ++ + case OPT_fdump_: + /* Deferred. */ + break; +@@ -3227,6 +3232,18 @@ common_handle_option (struct gcc_options *opts, + return true; + } + ++/* The following routines are used to set -fno-math-errno and -fopenmp-simd ++ to enable vector mathlib. */ ++static void ++set_simdmath_flags (struct gcc_options *opts, int set) ++{ ++ if (set) ++ { ++ opts->x_flag_errno_math = 0; ++ opts->x_flag_openmp_simd = 1; ++ } ++} ++ + /* Used to set the level of strict aliasing warnings in OPTS, + when no level is specified (i.e., when -Wstrict-aliasing, and not + -Wstrict-aliasing=level was given). +diff --git a/gcc/testsuite/gcc.target/aarch64/simd_pcs_attribute-3.c b/gcc/testsuite/gcc.target/aarch64/simd_pcs_attribute-3.c +index 95f6a6803..e0e0efa9d 100644 +--- a/gcc/testsuite/gcc.target/aarch64/simd_pcs_attribute-3.c ++++ b/gcc/testsuite/gcc.target/aarch64/simd_pcs_attribute-3.c +@@ -1,5 +1,5 @@ + /* { dg-do compile } */ +-/* { dg-options "-Ofast" } */ ++/* { dg-options "-Ofast -msimdmath-64" } */ + + __attribute__ ((__simd__)) + __attribute__ ((__nothrow__ , __leaf__ , __const__)) +diff --git a/libgomp/Makefile.am b/libgomp/Makefile.am +index f8b2a06d6..8dfa160d6 100644 +--- a/libgomp/Makefile.am ++++ b/libgomp/Makefile.am +@@ -75,10 +75,10 @@ libgomp_la_SOURCES += openacc.f90 + endif + + nodist_noinst_HEADERS = libgomp_f.h +-nodist_libsubinclude_HEADERS = omp.h openacc.h acc_prof.h ++nodist_libsubinclude_HEADERS = omp.h openacc.h acc_prof.h simdmath.h + if USE_FORTRAN + nodist_finclude_HEADERS = omp_lib.h omp_lib.f90 omp_lib.mod omp_lib_kinds.mod \ +- openacc_lib.h openacc.f90 openacc.mod openacc_kinds.mod ++ openacc_lib.h openacc.f90 openacc.mod openacc_kinds.mod simdmath_f.h + endif + + LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) +diff --git a/libgomp/Makefile.in b/libgomp/Makefile.in +index 6f0cb7161..90fc326f0 100644 +--- a/libgomp/Makefile.in ++++ b/libgomp/Makefile.in +@@ -147,7 +147,7 @@ am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno config.status.lineno + mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs + CONFIG_HEADER = config.h +-CONFIG_CLEAN_FILES = omp.h omp_lib.h omp_lib.f90 libgomp_f.h \ ++CONFIG_CLEAN_FILES = omp.h omp_lib.h simdmath.h simdmath_f.h omp_lib.f90 libgomp_f.h \ + libgomp.spec + CONFIG_CLEAN_VPATH_FILES = + am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +@@ -583,9 +583,9 @@ libgomp_la_SOURCES = alloc.c atomic.c barrier.c critical.c env.c \ + @PLUGIN_GCN_TRUE@libgomp_plugin_gcn_la_LIBADD = libgomp.la $(PLUGIN_GCN_LIBS) + @PLUGIN_GCN_TRUE@libgomp_plugin_gcn_la_LIBTOOLFLAGS = --tag=disable-static + nodist_noinst_HEADERS = libgomp_f.h +-nodist_libsubinclude_HEADERS = omp.h openacc.h acc_prof.h ++nodist_libsubinclude_HEADERS = omp.h openacc.h acc_prof.h simdmath.h + @USE_FORTRAN_TRUE@nodist_finclude_HEADERS = omp_lib.h omp_lib.f90 omp_lib.mod omp_lib_kinds.mod \ +-@USE_FORTRAN_TRUE@ openacc_lib.h openacc.f90 openacc.mod openacc_kinds.mod ++@USE_FORTRAN_TRUE@ openacc_lib.h openacc.f90 openacc.mod openacc_kinds.mod simdmath_f.h + + LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS)) + LINK = $(LIBTOOL) --tag CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=link \ +@@ -676,6 +676,10 @@ omp.h: $(top_builddir)/config.status $(srcdir)/omp.h.in + cd $(top_builddir) && $(SHELL) ./config.status $@ + omp_lib.h: $(top_builddir)/config.status $(srcdir)/omp_lib.h.in + cd $(top_builddir) && $(SHELL) ./config.status $@ ++simdmath_f.h: $(top_builddir)/config.status $(srcdir)/simdmath_f.h.in ++ cd $(top_builddir) && $(SHELL) ./config.status $@ ++simdmath.h: $(top_builddir)/config.status $(srcdir)/simdmath.h.in ++ cd $(top_builddir) && $(SHELL) ./config.status $@ + omp_lib.f90: $(top_builddir)/config.status $(srcdir)/omp_lib.f90.in + cd $(top_builddir) && $(SHELL) ./config.status $@ + libgomp_f.h: $(top_builddir)/config.status $(srcdir)/libgomp_f.h.in +diff --git a/libgomp/configure b/libgomp/configure +index 85fdb4d3f..471c957b7 100755 +--- a/libgomp/configure ++++ b/libgomp/configure +@@ -17064,7 +17064,7 @@ fi + + + +-ac_config_files="$ac_config_files omp.h omp_lib.h omp_lib.f90 libgomp_f.h" ++ac_config_files="$ac_config_files omp.h omp_lib.h simdmath.h simdmath_f.h omp_lib.f90 libgomp_f.h" + + ac_config_files="$ac_config_files Makefile testsuite/Makefile libgomp.spec" + +@@ -18215,6 +18215,8 @@ do + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + "omp.h") CONFIG_FILES="$CONFIG_FILES omp.h" ;; + "omp_lib.h") CONFIG_FILES="$CONFIG_FILES omp_lib.h" ;; ++ "simdmath.h") CONFIG_FILES="$CONFIG_FILES simdmath.h" ;; ++ "simdmath_f.h") CONFIG_FILES="$CONFIG_FILES simdmath_f.h" ;; + "omp_lib.f90") CONFIG_FILES="$CONFIG_FILES omp_lib.f90" ;; + "libgomp_f.h") CONFIG_FILES="$CONFIG_FILES libgomp_f.h" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; +diff --git a/libgomp/configure.ac b/libgomp/configure.ac +index a9b1f3973..1f81a0d30 100644 +--- a/libgomp/configure.ac ++++ b/libgomp/configure.ac +@@ -472,7 +472,7 @@ CFLAGS="$save_CFLAGS" + # Determine what GCC version number to use in filesystem paths. + GCC_BASE_VER + +-AC_CONFIG_FILES(omp.h omp_lib.h omp_lib.f90 libgomp_f.h) ++AC_CONFIG_FILES(omp.h omp_lib.h simdmath.h simdmath_f.h omp_lib.f90 libgomp_f.h) + AC_CONFIG_FILES(Makefile testsuite/Makefile libgomp.spec) + AC_CONFIG_FILES([testsuite/libgomp-test-support.pt.exp:testsuite/libgomp-test-support.exp.in]) + AC_CONFIG_FILES([testsuite/libgomp-site-extra.exp]) +diff --git a/libgomp/simdmath.h.in b/libgomp/simdmath.h.in +new file mode 100644 +index 000000000..ab91a4ec3 +--- /dev/null ++++ b/libgomp/simdmath.h.in +@@ -0,0 +1,40 @@ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#pragma omp declare simd simdlen(2) notinbranch ++double cos (double x); ++ ++#pragma omp declare simd simdlen(4) notinbranch ++float cosf (float x); ++ ++#pragma omp declare simd simdlen(2) notinbranch ++double sin (double x); ++ ++#pragma omp declare simd simdlen(4) notinbranch ++float sinf (float x); ++ ++#pragma omp declare simd simdlen(2) notinbranch ++double exp (double x); ++ ++#pragma omp declare simd simdlen(4) notinbranch ++float expf (float x); ++ ++#pragma omp declare simd simdlen(2) notinbranch ++double log (double x); ++ ++#pragma omp declare simd simdlen(4) notinbranch ++float logf (float x); ++ ++#pragma omp declare simd simdlen(2) notinbranch ++double pow (double x, double y); ++ ++#pragma omp declare simd simdlen(4) notinbranch ++float powf (float x, float y); ++ ++#pragma omp declare simd simdlen(4) notinbranch ++float exp2f (float x); ++ ++#ifdef __cplusplus ++} // extern "C" ++#endif +diff --git a/libgomp/simdmath_f.h.in b/libgomp/simdmath_f.h.in +new file mode 100644 +index 000000000..550595015 +--- /dev/null ++++ b/libgomp/simdmath_f.h.in +@@ -0,0 +1,11 @@ ++!GCC$ builtin (cos) attributes simd (notinbranch) ++!GCC$ builtin (cosf) attributes simd (notinbranch) ++!GCC$ builtin (sin) attributes simd (notinbranch) ++!GCC$ builtin (sinf) attributes simd (notinbranch) ++!GCC$ builtin (exp) attributes simd (notinbranch) ++!GCC$ builtin (expf) attributes simd (notinbranch) ++!GCC$ builtin (exp2f) attributes simd (notinbranch) ++!GCC$ builtin (log) attributes simd (notinbranch) ++!GCC$ builtin (logf) attributes simd (notinbranch) ++!GCC$ builtin (pow) attributes simd (notinbranch) ++!GCC$ builtin (powf) attributes simd (notinbranch) +-- +2.33.0 + diff --git a/0021-LoongArch-Enable-vcond_mask_mn-expanders-for-SF-DF-m.patch b/0021-LoongArch-Enable-vcond_mask_mn-expanders-for-SF-DF-m.patch new file mode 100644 index 0000000000000000000000000000000000000000..1fd8d8baf03bf3acd19737390cb5b1b4107fc68d --- /dev/null +++ b/0021-LoongArch-Enable-vcond_mask_mn-expanders-for-SF-DF-m.patch @@ -0,0 +1,418 @@ +From 156d9451a5b20ac336370f1610a949db1bef7a26 Mon Sep 17 00:00:00 2001 +From: Jiahao Xu +Date: Thu, 26 Oct 2023 09:34:32 +0800 +Subject: [PATCH 021/188] LoongArch:Enable vcond_mask_mn expanders for SF/DF + modes. + +If the vcond_mask patterns don't support fp modes, the vector +FP comparison instructions will not be generated. + +gcc/ChangeLog: + + * config/loongarch/lasx.md (vcond_mask_): Change to + (vcond_mask_): this. + * config/loongarch/lsx.md (vcond_mask_): Change to + (vcond_mask_): this. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/vector/lasx/lasx-vcond-1.c: New test. + * gcc.target/loongarch/vector/lasx/lasx-vcond-2.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vcond-1.c: New test. + * gcc.target/loongarch/vector/lsx/lsx-vcond-2.c: New test. +--- + gcc/config/loongarch/lasx.md | 14 +-- + gcc/config/loongarch/lsx.md | 14 +-- + .../loongarch/vector/lasx/lasx-vcond-1.c | 64 ++++++++++++++ + .../loongarch/vector/lasx/lasx-vcond-2.c | 87 +++++++++++++++++++ + .../loongarch/vector/lsx/lsx-vcond-1.c | 64 ++++++++++++++ + .../loongarch/vector/lsx/lsx-vcond-2.c | 87 +++++++++++++++++++ + 6 files changed, 316 insertions(+), 14 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c + +diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md +index 442fda246..f0f2dd08d 100644 +--- a/gcc/config/loongarch/lasx.md ++++ b/gcc/config/loongarch/lasx.md +@@ -906,15 +906,15 @@ + }) + + ;; Same as vcond_ +-(define_expand "vcond_mask_" +- [(match_operand:ILASX 0 "register_operand") +- (match_operand:ILASX 1 "reg_or_m1_operand") +- (match_operand:ILASX 2 "reg_or_0_operand") +- (match_operand:ILASX 3 "register_operand")] ++(define_expand "vcond_mask_" ++ [(match_operand:LASX 0 "register_operand") ++ (match_operand:LASX 1 "reg_or_m1_operand") ++ (match_operand:LASX 2 "reg_or_0_operand") ++ (match_operand: 3 "register_operand")] + "ISA_HAS_LASX" + { +- loongarch_expand_vec_cond_mask_expr (mode, +- mode, operands); ++ loongarch_expand_vec_cond_mask_expr (mode, ++ mode, operands); + DONE; + }) + +diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md +index b4e92ae9c..4af32c8df 100644 +--- a/gcc/config/loongarch/lsx.md ++++ b/gcc/config/loongarch/lsx.md +@@ -644,15 +644,15 @@ + DONE; + }) + +-(define_expand "vcond_mask_" +- [(match_operand:ILSX 0 "register_operand") +- (match_operand:ILSX 1 "reg_or_m1_operand") +- (match_operand:ILSX 2 "reg_or_0_operand") +- (match_operand:ILSX 3 "register_operand")] ++(define_expand "vcond_mask_" ++ [(match_operand:LSX 0 "register_operand") ++ (match_operand:LSX 1 "reg_or_m1_operand") ++ (match_operand:LSX 2 "reg_or_0_operand") ++ (match_operand: 3 "register_operand")] + "ISA_HAS_LSX" + { +- loongarch_expand_vec_cond_mask_expr (mode, +- mode, operands); ++ loongarch_expand_vec_cond_mask_expr (mode, ++ mode, operands); + DONE; + }) + +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-1.c +new file mode 100644 +index 000000000..ee9cb1a1f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-1.c +@@ -0,0 +1,64 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -ftree-vectorize -fno-unroll-loops -fno-vect-cost-model -mlasx" } */ ++ ++#include ++ ++#define DEF_VCOND_VAR(DATA_TYPE, CMP_TYPE, COND, SUFFIX) \ ++ void __attribute__ ((noinline, noclone)) \ ++ vcond_var_##CMP_TYPE##_##SUFFIX (DATA_TYPE *__restrict__ r, \ ++ DATA_TYPE *__restrict__ x, \ ++ DATA_TYPE *__restrict__ y, \ ++ CMP_TYPE *__restrict__ a, \ ++ CMP_TYPE *__restrict__ b, \ ++ int n) \ ++ { \ ++ for (int i = 0; i < n; i++) \ ++ { \ ++ DATA_TYPE xval = x[i], yval = y[i]; \ ++ CMP_TYPE aval = a[i], bval = b[i]; \ ++ r[i] = aval COND bval ? xval : yval; \ ++ } \ ++ } ++ ++#define TEST_COND_VAR_SIGNED_ALL(T, COND, SUFFIX) \ ++ T (int8_t, int8_t, COND, SUFFIX) \ ++ T (int16_t, int16_t, COND, SUFFIX) \ ++ T (int32_t, int32_t, COND, SUFFIX) \ ++ T (int64_t, int64_t, COND, SUFFIX) \ ++ T (float, int32_t, COND, SUFFIX##_float) \ ++ T (double, int64_t, COND, SUFFIX##_double) ++ ++#define TEST_COND_VAR_UNSIGNED_ALL(T, COND, SUFFIX) \ ++ T (uint8_t, uint8_t, COND, SUFFIX) \ ++ T (uint16_t, uint16_t, COND, SUFFIX) \ ++ T (uint32_t, uint32_t, COND, SUFFIX) \ ++ T (uint64_t, uint64_t, COND, SUFFIX) \ ++ T (float, uint32_t, COND, SUFFIX##_float) \ ++ T (double, uint64_t, COND, SUFFIX##_double) ++ ++#define TEST_COND_VAR_ALL(T, COND, SUFFIX) \ ++ TEST_COND_VAR_SIGNED_ALL (T, COND, SUFFIX) \ ++ TEST_COND_VAR_UNSIGNED_ALL (T, COND, SUFFIX) ++ ++#define TEST_VAR_ALL(T) \ ++ TEST_COND_VAR_ALL (T, >, _gt) \ ++ TEST_COND_VAR_ALL (T, <, _lt) \ ++ TEST_COND_VAR_ALL (T, >=, _ge) \ ++ TEST_COND_VAR_ALL (T, <=, _le) \ ++ TEST_COND_VAR_ALL (T, ==, _eq) \ ++ TEST_COND_VAR_ALL (T, !=, _ne) ++ ++TEST_VAR_ALL (DEF_VCOND_VAR) ++ ++/* { dg-final { scan-assembler-times {\txvslt\.b} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvslt\.h} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvslt\.w} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvslt\.d} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvsle\.b} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvsle\.h} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvsle\.w} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvsle\.d} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvseq\.b} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvseq\.h} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvseq\.w} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvseq\.d} 4 } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c +new file mode 100644 +index 000000000..5f40ed44c +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c +@@ -0,0 +1,87 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -ftree-vectorize -fno-vect-cost-model -fno-unroll-loops -mlasx" } */ ++ ++#include ++ ++#define eq(A, B) ((A) == (B)) ++#define ne(A, B) ((A) != (B)) ++#define olt(A, B) ((A) < (B)) ++#define ole(A, B) ((A) <= (B)) ++#define oge(A, B) ((A) >= (B)) ++#define ogt(A, B) ((A) > (B)) ++#define ordered(A, B) (!__builtin_isunordered (A, B)) ++#define unordered(A, B) (__builtin_isunordered (A, B)) ++#define ueq(A, B) (!__builtin_islessgreater (A, B)) ++#define ult(A, B) (__builtin_isless (A, B)) ++#define ule(A, B) (__builtin_islessequal (A, B)) ++#define uge(A, B) (__builtin_isgreaterequal (A, B)) ++#define ugt(A, B) (__builtin_isgreater (A, B)) ++#define nueq(A, B) (__builtin_islessgreater (A, B)) ++#define nult(A, B) (!__builtin_isless (A, B)) ++#define nule(A, B) (!__builtin_islessequal (A, B)) ++#define nuge(A, B) (!__builtin_isgreaterequal (A, B)) ++#define nugt(A, B) (!__builtin_isgreater (A, B)) ++ ++#define TEST_LOOP(TYPE1, TYPE2, CMP) \ ++ void __attribute__ ((noinline, noclone)) \ ++ test_##TYPE1##_##TYPE2##_##CMP##_var (TYPE1 *restrict dest, \ ++ TYPE1 *restrict src, \ ++ TYPE1 fallback, \ ++ TYPE2 *restrict a, \ ++ TYPE2 *restrict b, \ ++ int count) \ ++ { \ ++ for (int i = 0; i < count; ++i) \ ++ {\ ++ TYPE2 aval = a[i]; \ ++ TYPE2 bval = b[i]; \ ++ TYPE1 srcval = src[i]; \ ++ dest[i] = CMP (aval, bval) ? srcval : fallback; \ ++ }\ ++ } ++ ++#define TEST_CMP(CMP) \ ++ TEST_LOOP (int32_t, float, CMP) \ ++ TEST_LOOP (uint32_t, float, CMP) \ ++ TEST_LOOP (float, float, CMP) \ ++ TEST_LOOP (int64_t, double, CMP) \ ++ TEST_LOOP (uint64_t, double, CMP) \ ++ TEST_LOOP (double, double, CMP) ++ ++TEST_CMP (eq) ++TEST_CMP (ne) ++TEST_CMP (olt) ++TEST_CMP (ole) ++TEST_CMP (oge) ++TEST_CMP (ogt) ++TEST_CMP (ordered) ++TEST_CMP (unordered) ++TEST_CMP (ueq) ++TEST_CMP (ult) ++TEST_CMP (ule) ++TEST_CMP (uge) ++TEST_CMP (ugt) ++TEST_CMP (nueq) ++TEST_CMP (nult) ++TEST_CMP (nule) ++TEST_CMP (nuge) ++TEST_CMP (nugt) ++ ++/* { dg-final { scan-assembler-times {\txvfcmp\.ceq\.s} 2 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.ceq\.d} 2 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cne\.s} 2 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cne\.d} 2 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.slt\.s} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.slt\.d} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.sle\.s} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.sle\.d} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cor\.s} 2 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cor\.d} 2 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cun\.s} 2 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cun\.d} 2 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cueq\.s} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cueq\.d} 4 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cule\.s} 8 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cule\.d} 8 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cult\.s} 8 } } */ ++/* { dg-final { scan-assembler-times {\txvfcmp\.cult\.d} 8 } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-1.c +new file mode 100644 +index 000000000..138adccfa +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-1.c +@@ -0,0 +1,64 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -ftree-vectorize -fno-unroll-loops -fno-vect-cost-model -mlsx" } */ ++ ++#include ++ ++#define DEF_VCOND_VAR(DATA_TYPE, CMP_TYPE, COND, SUFFIX) \ ++ void __attribute__ ((noinline, noclone)) \ ++ vcond_var_##CMP_TYPE##_##SUFFIX (DATA_TYPE *__restrict__ r, \ ++ DATA_TYPE *__restrict__ x, \ ++ DATA_TYPE *__restrict__ y, \ ++ CMP_TYPE *__restrict__ a, \ ++ CMP_TYPE *__restrict__ b, \ ++ int n) \ ++ { \ ++ for (int i = 0; i < n; i++) \ ++ { \ ++ DATA_TYPE xval = x[i], yval = y[i]; \ ++ CMP_TYPE aval = a[i], bval = b[i]; \ ++ r[i] = aval COND bval ? xval : yval; \ ++ } \ ++ } ++ ++#define TEST_COND_VAR_SIGNED_ALL(T, COND, SUFFIX) \ ++ T (int8_t, int8_t, COND, SUFFIX) \ ++ T (int16_t, int16_t, COND, SUFFIX) \ ++ T (int32_t, int32_t, COND, SUFFIX) \ ++ T (int64_t, int64_t, COND, SUFFIX) \ ++ T (float, int32_t, COND, SUFFIX##_float) \ ++ T (double, int64_t, COND, SUFFIX##_double) ++ ++#define TEST_COND_VAR_UNSIGNED_ALL(T, COND, SUFFIX) \ ++ T (uint8_t, uint8_t, COND, SUFFIX) \ ++ T (uint16_t, uint16_t, COND, SUFFIX) \ ++ T (uint32_t, uint32_t, COND, SUFFIX) \ ++ T (uint64_t, uint64_t, COND, SUFFIX) \ ++ T (float, uint32_t, COND, SUFFIX##_float) \ ++ T (double, uint64_t, COND, SUFFIX##_double) ++ ++#define TEST_COND_VAR_ALL(T, COND, SUFFIX) \ ++ TEST_COND_VAR_SIGNED_ALL (T, COND, SUFFIX) \ ++ TEST_COND_VAR_UNSIGNED_ALL (T, COND, SUFFIX) ++ ++#define TEST_VAR_ALL(T) \ ++ TEST_COND_VAR_ALL (T, >, _gt) \ ++ TEST_COND_VAR_ALL (T, <, _lt) \ ++ TEST_COND_VAR_ALL (T, >=, _ge) \ ++ TEST_COND_VAR_ALL (T, <=, _le) \ ++ TEST_COND_VAR_ALL (T, ==, _eq) \ ++ TEST_COND_VAR_ALL (T, !=, _ne) ++ ++TEST_VAR_ALL (DEF_VCOND_VAR) ++ ++/* { dg-final { scan-assembler-times {\tvslt\.b} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvslt\.h} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvslt\.w} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvslt\.d} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvsle\.b} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvsle\.h} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvsle\.w} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvsle\.d} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvseq\.b} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvseq\.h} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvseq\.w} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvseq\.d} 4 } } */ +diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c +new file mode 100644 +index 000000000..e8fe31f8f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c +@@ -0,0 +1,87 @@ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -ftree-vectorize -fno-vect-cost-model -fno-unroll-loops -mlsx" } */ ++ ++#include ++ ++#define eq(A, B) ((A) == (B)) ++#define ne(A, B) ((A) != (B)) ++#define olt(A, B) ((A) < (B)) ++#define ole(A, B) ((A) <= (B)) ++#define oge(A, B) ((A) >= (B)) ++#define ogt(A, B) ((A) > (B)) ++#define ordered(A, B) (!__builtin_isunordered (A, B)) ++#define unordered(A, B) (__builtin_isunordered (A, B)) ++#define ueq(A, B) (!__builtin_islessgreater (A, B)) ++#define ult(A, B) (__builtin_isless (A, B)) ++#define ule(A, B) (__builtin_islessequal (A, B)) ++#define uge(A, B) (__builtin_isgreaterequal (A, B)) ++#define ugt(A, B) (__builtin_isgreater (A, B)) ++#define nueq(A, B) (__builtin_islessgreater (A, B)) ++#define nult(A, B) (!__builtin_isless (A, B)) ++#define nule(A, B) (!__builtin_islessequal (A, B)) ++#define nuge(A, B) (!__builtin_isgreaterequal (A, B)) ++#define nugt(A, B) (!__builtin_isgreater (A, B)) ++ ++#define TEST_LOOP(TYPE1, TYPE2, CMP) \ ++ void __attribute__ ((noinline, noclone)) \ ++ test_##TYPE1##_##TYPE2##_##CMP##_var (TYPE1 *restrict dest, \ ++ TYPE1 *restrict src, \ ++ TYPE1 fallback, \ ++ TYPE2 *restrict a, \ ++ TYPE2 *restrict b, \ ++ int count) \ ++ { \ ++ for (int i = 0; i < count; ++i) \ ++ {\ ++ TYPE2 aval = a[i]; \ ++ TYPE2 bval = b[i]; \ ++ TYPE1 srcval = src[i]; \ ++ dest[i] = CMP (aval, bval) ? srcval : fallback; \ ++ }\ ++ } ++ ++#define TEST_CMP(CMP) \ ++ TEST_LOOP (int32_t, float, CMP) \ ++ TEST_LOOP (uint32_t, float, CMP) \ ++ TEST_LOOP (float, float, CMP) \ ++ TEST_LOOP (int64_t, double, CMP) \ ++ TEST_LOOP (uint64_t, double, CMP) \ ++ TEST_LOOP (double, double, CMP) ++ ++TEST_CMP (eq) ++TEST_CMP (ne) ++TEST_CMP (olt) ++TEST_CMP (ole) ++TEST_CMP (oge) ++TEST_CMP (ogt) ++TEST_CMP (ordered) ++TEST_CMP (unordered) ++TEST_CMP (ueq) ++TEST_CMP (ult) ++TEST_CMP (ule) ++TEST_CMP (uge) ++TEST_CMP (ugt) ++TEST_CMP (nueq) ++TEST_CMP (nult) ++TEST_CMP (nule) ++TEST_CMP (nuge) ++TEST_CMP (nugt) ++ ++/* { dg-final { scan-assembler-times {\tvfcmp\.ceq\.s} 2 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.ceq\.d} 2 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cne\.s} 2 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cne\.d} 2 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.slt\.s} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.slt\.d} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.sle\.s} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.sle\.d} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cor\.s} 2 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cor\.d} 2 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cun\.s} 2 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cun\.d} 2 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cueq\.s} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cueq\.d} 4 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cule\.s} 8 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cule\.d} 8 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cult\.s} 8 } } */ ++/* { dg-final { scan-assembler-times {\tvfcmp\.cult\.d} 8 } } */ +-- +2.43.0 + diff --git a/0021-StructReorderFields-Structure-reorder-fields.patch b/0021-StructReorderFields-Structure-reorder-fields.patch new file mode 100644 index 0000000000000000000000000000000000000000..8324617e42c94ad2713d7de741f5fa75c6e0dfe9 --- /dev/null +++ b/0021-StructReorderFields-Structure-reorder-fields.patch @@ -0,0 +1,5739 @@ +From 6997c9ad8985f6f0bfc16cdb46e7386af299a226 Mon Sep 17 00:00:00 2001 +From: h00564365 +Date: Mon, 31 Jul 2023 22:01:56 +0800 +Subject: [PATCH 21/22] [StructReorderFields] Structure reorder fields + +Introduce structure fields reordering optimization, that change +fields ordering of C-like structures in order to better utilize spatial +locality. +--- + gcc/common.opt | 4 + + gcc/doc/invoke.texi | 1 + + gcc/gimple-ssa-warn-access.cc | 2 +- + gcc/ipa-free-lang-data.cc | 4 +- + gcc/ipa-struct-reorg/escapes.def | 3 + + gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 2545 +++++++++++++---- + gcc/ipa-struct-reorg/ipa-struct-reorg.h | 14 +- + gcc/passes.def | 1 + + gcc/symbol-summary.h | 4 +- + .../struct/rf_DTE_struct_instance_field.c | 75 + + gcc/testsuite/gcc.dg/struct/rf_DTE_verify.c | 94 + + .../gcc.dg/struct/rf_check_ptr_layers_bug.c | 24 + + .../gcc.dg/struct/rf_create_fields_bug.c | 82 + + .../gcc.dg/struct/rf_create_new_func_bug.c | 56 + + .../gcc.dg/struct/rf_ele_minus_verify.c | 60 + + .../gcc.dg/struct/rf_escape_by_base.c | 83 + + .../gcc.dg/struct/rf_external_func_types.c | 69 + + gcc/testsuite/gcc.dg/struct/rf_int_cast_ptr.c | 72 + + .../gcc.dg/struct/rf_mem_ref_offset.c | 58 + + .../struct/rf_mul_layer_ptr_record_bug.c | 30 + + .../gcc.dg/struct/rf_pass_conflict.c | 109 + + gcc/testsuite/gcc.dg/struct/rf_ptr2void_lto.c | 87 + + gcc/testsuite/gcc.dg/struct/rf_ptr_diff.c | 71 + + .../gcc.dg/struct/rf_ptr_negate_expr.c | 55 + + gcc/testsuite/gcc.dg/struct/rf_ptr_offset.c | 34 + + gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c | 55 + + gcc/testsuite/gcc.dg/struct/rf_ptr_ptr_ptr.c | 58 + + .../gcc.dg/struct/rf_rescusive_type.c | 57 + + .../struct/rf_rewrite_assign_more_cmp.c | 65 + + .../gcc.dg/struct/rf_rewrite_cond_bug.c | 72 + + .../gcc.dg/struct/rf_rewrite_cond_more_cmp.c | 58 + + .../gcc.dg/struct/rf_rewrite_phi_bug.c | 81 + + gcc/testsuite/gcc.dg/struct/rf_shwi.c | 23 + + gcc/testsuite/gcc.dg/struct/rf_visible_func.c | 92 + + .../gcc.dg/struct/rf_void_ptr_param_func.c | 54 + + gcc/testsuite/gcc.dg/struct/struct-reorg.exp | 15 +- + gcc/testsuite/gcc.dg/struct/struct_reorg-1.c | 8 +- + gcc/testsuite/gcc.dg/struct/struct_reorg-3.c | 9 +- + gcc/timevar.def | 1 + + gcc/tree-pass.h | 1 + + 40 files changed, 3796 insertions(+), 490 deletions(-) + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_DTE_struct_instance_field.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_DTE_verify.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_check_ptr_layers_bug.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_create_new_func_bug.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_ele_minus_verify.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_escape_by_base.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_external_func_types.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_int_cast_ptr.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_mem_ref_offset.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_mul_layer_ptr_record_bug.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_pass_conflict.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_ptr2void_lto.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_ptr_diff.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_ptr_negate_expr.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_ptr_offset.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_ptr_ptr_ptr.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_rescusive_type.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_rewrite_assign_more_cmp.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_bug.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_rewrite_phi_bug.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_shwi.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_visible_func.c + create mode 100644 gcc/testsuite/gcc.dg/struct/rf_void_ptr_param_func.c + +diff --git a/gcc/common.opt b/gcc/common.opt +index 0c7bd2f6c..98169de7c 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1954,6 +1954,10 @@ fipa-matrix-reorg + Common Ignore + Does nothing. Preserved for backward compatibility. + ++fipa-reorder-fields ++Common Var(flag_ipa_reorder_fields) Init(0) Optimization ++Perform structure fields reorder optimizations. ++ + fipa-struct-reorg + Common Var(flag_ipa_struct_reorg) Init(0) Optimization + Perform structure layout optimizations. +diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi +index 3485cc8af..2b376e0e9 100644 +--- a/gcc/doc/invoke.texi ++++ b/gcc/doc/invoke.texi +@@ -526,6 +526,7 @@ Objective-C and Objective-C++ Dialects}. + -finline-functions -finline-functions-called-once -finline-limit=@var{n} @gol + -finline-small-functions -fipa-modref -fipa-cp -fipa-cp-clone @gol + -fipa-bit-cp -fipa-vrp -fipa-pta -fipa-profile -fipa-pure-const @gol ++-fipa-reorder-fields @gol + -fipa-struct-reorg @gol + -fipa-reference -fipa-reference-addressable @gol + -fipa-stack-alignment -fipa-icf -fira-algorithm=@var{algorithm} @gol +diff --git a/gcc/gimple-ssa-warn-access.cc b/gcc/gimple-ssa-warn-access.cc +index a24645783..7f5c92c96 100644 +--- a/gcc/gimple-ssa-warn-access.cc ++++ b/gcc/gimple-ssa-warn-access.cc +@@ -2198,7 +2198,7 @@ pass_waccess::gate (function *) + In pass waccess, it will traverse all SSA and cause ICE + when handling these unused SSA. So temporarily disable + pass waccess when enable structure optimizations. */ +- if (flag_ipa_struct_reorg) ++ if (flag_ipa_struct_reorg || flag_ipa_reorder_fields) + return false; + + return (warn_free_nonheap_object +diff --git a/gcc/ipa-free-lang-data.cc b/gcc/ipa-free-lang-data.cc +index 5450be9fe..a88381ddb 100644 +--- a/gcc/ipa-free-lang-data.cc ++++ b/gcc/ipa-free-lang-data.cc +@@ -105,7 +105,7 @@ fld_simplified_type_name (tree type) + /* Simplify type will cause that struct A and struct A within + struct B are different type pointers, so skip it in structure + optimizations. */ +- if (flag_ipa_struct_reorg) ++ if (flag_ipa_struct_reorg || flag_ipa_reorder_fields) + return TYPE_NAME (type); + + if (!TYPE_NAME (type) || TREE_CODE (TYPE_NAME (type)) != TYPE_DECL) +@@ -349,7 +349,7 @@ fld_simplified_type (tree t, class free_lang_data_d *fld) + /* Simplify type will cause that struct A and struct A within + struct B are different type pointers, so skip it in structure + optimizations. */ +- if (flag_ipa_struct_reorg) ++ if (flag_ipa_struct_reorg || flag_ipa_reorder_fields) + return t; + if (POINTER_TYPE_P (t)) + return fld_incomplete_type_of (t, fld); +diff --git a/gcc/ipa-struct-reorg/escapes.def b/gcc/ipa-struct-reorg/escapes.def +index d825eb3e6..996a09bac 100644 +--- a/gcc/ipa-struct-reorg/escapes.def ++++ b/gcc/ipa-struct-reorg/escapes.def +@@ -58,5 +58,8 @@ DEF_ESCAPE (escape_ptr_ptr, "Type is used in a pointer to a pointer [not handled + DEF_ESCAPE (escape_return, "Type escapes via a return [not handled yet]") + DEF_ESCAPE (escape_separate_instance, "Type escapes via a separate instance") + DEF_ESCAPE (escape_unhandled_rewrite, "Type escapes via a unhandled rewrite stmt") ++DEF_ESCAPE (escape_via_orig_escape, "Type escapes via a original escape type") ++DEF_ESCAPE (escape_instance_field, "Type escapes via a field of instance") ++DEF_ESCAPE (escape_via_empty_no_orig, "Type escapes via empty and no original") + + #undef DEF_ESCAPE +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +index 9f790b28b..3e5f9538b 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +@@ -207,50 +207,88 @@ lang_c_p (void) + if (!language_string) + return false; + +- if (strcmp (language_string, "GNU GIMPLE") == 0) ++ if (lang_GNU_C ()) ++ return true; ++ else if (strcmp (language_string, "GNU GIMPLE") == 0) // For LTO check + { + unsigned i = 0; +- tree t = NULL; +- const char *unit_string = NULL; ++ tree t = NULL_TREE; + + FOR_EACH_VEC_SAFE_ELT (all_translation_units, i, t) + { +- unit_string = TRANSLATION_UNIT_LANGUAGE (t); +- if (!unit_string +- || (strncmp (unit_string, "GNU C", 5) != 0) +- || (!ISDIGIT (unit_string[5]))) ++ language_string = TRANSLATION_UNIT_LANGUAGE (t); ++ if (language_string == NULL ++ || strncmp (language_string, "GNU C", 5) ++ || (language_string[5] != '\0' ++ && !(ISDIGIT (language_string[5])))) + return false; + } + return true; + } +- else if (strncmp (language_string, "GNU C", 5) == 0 +- && ISDIGIT (language_string[5])) +- return true; +- + return false; + } + ++/* Get the number of pointer layers. */ ++ ++int ++get_ptr_layers (tree expr) ++{ ++ int layers = 0; ++ while (POINTER_TYPE_P (expr) || TREE_CODE (expr) == ARRAY_TYPE) ++ { ++ layers++; ++ expr = TREE_TYPE (expr); ++ } ++ return layers; ++} ++ ++/* Comparison pointer layers. */ ++ ++bool ++cmp_ptr_layers (tree a, tree b) ++{ ++ return get_ptr_layers (a) == get_ptr_layers (b); ++} ++ ++/* Return true if the ssa_name comes from the void* parameter. */ ++ ++bool ++is_from_void_ptr_parm (tree ssa_name) ++{ ++ gcc_assert (TREE_CODE (ssa_name) == SSA_NAME); ++ tree var = SSA_NAME_VAR (ssa_name); ++ return (var && TREE_CODE (var) == PARM_DECL ++ && VOID_POINTER_P (TREE_TYPE (ssa_name))); ++} ++ + enum srmode + { + NORMAL = 0, +- COMPLETE_STRUCT_RELAYOUT ++ COMPLETE_STRUCT_RELAYOUT, ++ STRUCT_REORDER_FIELDS + }; + +-static bool is_result_of_mult (tree, tree *, tree); ++static bool is_result_of_mult (tree arg, tree *num, tree struct_size); ++static bool isptrptr (tree type); + +-} // anon namespace ++srmode current_mode; + ++} // anon namespace + + namespace struct_reorg { + ++hash_map > fields_to_finish; ++ + /* Constructor of srfunction. */ + + srfunction::srfunction (cgraph_node *n) + : node (n), + old (NULL), + newnode (NULL), +- newf (NULL) +-{} ++ newf (NULL), ++ is_safe_func (false) ++{ ++} + + /* Add an ARG to the list of arguments for the function. */ + +@@ -400,12 +438,13 @@ srtype::add_field_site (srfield *field) + + /* Constructor of DECL. */ + +-srdecl::srdecl (srtype *tp, tree decl, int argnum) ++srdecl::srdecl (srtype *tp, tree decl, int argnum, tree orig_type) + : type (tp), + decl (decl), + func (NULL_TREE), + argumentnum (argnum), +- visited (false) ++ visited (false), ++ orig_type (orig_type) + { + if (TREE_CODE (decl) == SSA_NAME) + func = current_function_decl; +@@ -429,17 +468,23 @@ srfunction::find_decl (tree decl) + /* Record DECL of the TYPE with argument num ARG. */ + + srdecl * +-srfunction::record_decl (srtype *type, tree decl, int arg) ++srfunction::record_decl (srtype *type, tree decl, int arg, tree orig_type) + { + // Search for the decl to see if it is already there. + srdecl *decl1 = find_decl (decl); + + if (decl1) +- return decl1; ++ { ++ /* Added the orig_type information. */ ++ if (!decl1->orig_type && orig_type && isptrptr (orig_type)) ++ decl1->orig_type = orig_type; ++ return decl1; ++ } + + gcc_assert (type); + +- decl1 = new srdecl (type, decl, arg); ++ orig_type = isptrptr (TREE_TYPE (decl)) ? TREE_TYPE (decl) : orig_type; ++ decl1 = new srdecl (type, decl, arg, isptrptr (orig_type) ? orig_type : NULL); + decls.safe_push (decl1); + return decl1; + } +@@ -503,31 +548,21 @@ srtype::dump (FILE *f) + print_generic_expr (f, type); + fprintf (f, "(%d) { ", TYPE_UID (type)); + if (escapes != does_not_escape) +- fprintf (f, " escapes = \"%s\"\n", escape_reason ()); +- fprintf (f, " fields = { "); ++ fprintf (f, "escapes = \"%s\"", escape_reason ()); ++ fprintf (f, "\nfields = {\n"); + FOR_EACH_VEC_ELT (fields, i, field) +- { +- if (i == 0) +- fprintf (f, "\n "); +- else +- fprintf (f, "\n, "); +- field->dump (f); +- } +- fprintf (f, " }\n "); +- fprintf (f, "\n accesses = {"); ++ field->dump (f); ++ fprintf (f, "}\n "); ++ ++ fprintf (f, "\naccesses = {\n"); + FOR_EACH_VEC_ELT (accesses, i, access) +- { +- fprintf (f, "\n"); +- access->dump (f); +- } +- fprintf (f, " }\n "); +- fprintf (f, "\n functions = {"); ++ access->dump (f); ++ fprintf (f, "}\n "); ++ ++ fprintf (f, "\nfunctions = {\n"); + FOR_EACH_VEC_ELT (functions, i, fn) +- { +- fprintf (f, " \n"); +- fn->simple_dump (f); +- } +- fprintf (f, "\n }\n"); ++ fn->simple_dump (f); ++ fprintf (f, "}\n"); + fprintf (f, "}\n"); + } + +@@ -537,6 +572,8 @@ void + srtype::simple_dump (FILE *f) + { + print_generic_expr (f, type); ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ fprintf (f, "(%d)", TYPE_UID (type)); + } + + /* Analyze the type and decide what to be done with it. */ +@@ -572,6 +609,12 @@ srfield::create_new_fields (tree newtype[max_split], + tree newfields[max_split], + tree newlast[max_split]) + { ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ create_new_reorder_fields (newtype, newfields, newlast); ++ return; ++ } ++ + tree nt[max_split]; + + for (unsigned i = 0; i < max_split; i++) +@@ -620,6 +663,104 @@ srfield::create_new_fields (tree newtype[max_split], + } + } + ++/* Reorder fields. */ ++ ++void ++srfield::reorder_fields (tree newfields[max_split], tree newlast[max_split], ++ tree &field) ++{ ++ /* Reorder fields in descending. ++ newfields: always stores the first member of the chain ++ and with the largest size. ++ field: indicates the node to be inserted. */ ++ if (newfields[clusternum] == NULL) ++ { ++ newfields[clusternum] = field; ++ newlast[clusternum] = field; ++ } ++ else ++ { ++ tree tmp = newfields[clusternum]; ++ if (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field))) ++ > tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tmp)))) ++ { ++ DECL_CHAIN (field) = tmp; ++ newfields[clusternum] = field; ++ } ++ else ++ { ++ while (DECL_CHAIN (tmp) ++ && (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field))) ++ <= tree_to_uhwi ( ++ TYPE_SIZE (TREE_TYPE (DECL_CHAIN (tmp)))))) ++ tmp = DECL_CHAIN (tmp); ++ ++ /* Now tmp size > field size ++ insert field: tmp -> xx ==> tmp -> field -> xx. */ ++ DECL_CHAIN (field) = DECL_CHAIN (tmp); // field -> xx ++ DECL_CHAIN (tmp) = field; // tmp -> field ++ } ++ } ++} ++ ++/* Create the new reorder fields for this field. ++ newtype[max_split]: srtype's member variable, ++ newfields[max_split]: created by create_new_type func, ++ newlast[max_split]: created by create_new_type func. */ ++ ++void ++srfield::create_new_reorder_fields (tree newtype[max_split], ++ tree newfields[max_split], ++ tree newlast[max_split]) ++{ ++ /* newtype, corresponding to newtype[max_split] in srtype. */ ++ tree nt = NULL_TREE; ++ if (type == NULL) ++ /* Common var. */ ++ nt = fieldtype; ++ else ++ { ++ /* RECORD_TYPE var. */ ++ if (type->has_escaped ()) ++ nt = type->type; ++ else ++ nt = type->newtype[0]; ++ } ++ tree field = make_node (FIELD_DECL); ++ ++ /* Used for recursive types. ++ fields_to_finish: hase_map in the format of "type: {fieldA, fieldB}", ++ key : indicates the original type, ++ vaule: filed that need to be updated to newtype. */ ++ if (nt == NULL) ++ { ++ nt = make_node (RECORD_TYPE); ++ auto_vec &fields ++ = fields_to_finish.get_or_insert (inner_type (type->type)); ++ fields.safe_push (field); ++ } ++ ++ DECL_NAME (field) = DECL_NAME (fielddecl); ++ if (type == NULL) ++ /* Common members do not need to reconstruct. ++ Otherwise, int* -> int** or void* -> void**. */ ++ TREE_TYPE (field) = nt; ++ else ++ TREE_TYPE (field) = reconstruct_complex_type (TREE_TYPE (fielddecl), nt); ++ DECL_SOURCE_LOCATION (field) = DECL_SOURCE_LOCATION (fielddecl); ++ SET_DECL_ALIGN (field, DECL_ALIGN (fielddecl)); ++ DECL_USER_ALIGN (field) = DECL_USER_ALIGN (fielddecl); ++ TREE_ADDRESSABLE (field) = TREE_ADDRESSABLE (fielddecl); ++ DECL_NONADDRESSABLE_P (field) = !TREE_ADDRESSABLE (fielddecl); ++ TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (fielddecl); ++ DECL_CONTEXT (field) = newtype[clusternum]; ++ ++ reorder_fields (newfields, newlast, field); ++ ++ /* srfield member variable, which stores the new field decl. */ ++ newfield[0] = field; ++} ++ + /* Create the new TYPE corresponding to THIS type. */ + + bool +@@ -655,7 +796,8 @@ srtype::create_new_type (void) + /* If the fields' types did have a change or + we are not splitting the struct into two clusters, + then just return false and don't change the type. */ +- if (!createnewtype && maxclusters == 0) ++ if (!createnewtype && maxclusters == 0 ++ && current_mode != STRUCT_REORDER_FIELDS) + { + newtype[0] = type; + return false; +@@ -664,6 +806,7 @@ srtype::create_new_type (void) + /* Should have at most max_split clusters. */ + gcc_assert (maxclusters < max_split); + ++ /* Record the first member of the field chain. */ + tree newfields[max_split]; + tree newlast[max_split]; + +@@ -682,7 +825,8 @@ srtype::create_new_type (void) + sprintf (id, "%d", i); + if (tname) + { +- name = concat (tname, ".reorg.", id, NULL); ++ name = concat (tname, current_mode == STRUCT_REORDER_FIELDS ++ ? ".reorder." : ".reorg.", id, NULL); + TYPE_NAME (newtype[i]) = build_decl (UNKNOWN_LOCATION, + TYPE_DECL, + get_identifier (name), +@@ -718,6 +862,7 @@ srtype::create_new_type (void) + for (unsigned i = 0; i < maxclusters; i++) + { + print_generic_expr (dump_file, newtype[i]); ++ fprintf (dump_file, "(%d)", TYPE_UID (newtype[i])); + fprintf (dump_file, "\n"); + } + } +@@ -776,8 +921,12 @@ srfunction::create_new_decls (void) + tree newinner[max_split]; + memset (newinner, 0, sizeof (newinner)); + for (unsigned j = 0; j < max_split && type->newtype[j]; j++) +- newtype1[j] = reconstruct_complex_type (TREE_TYPE (decls[i]->decl), +- type->newtype[j]); ++ { ++ newtype1[j] = reconstruct_complex_type ( ++ isptrptr (decls[i]->orig_type) ? decls[i]->orig_type ++ : TREE_TYPE (decls[i]->decl), ++ type->newtype[j]); ++ } + if (inner) + { + srdecl *in = find_decl (inner); +@@ -825,7 +974,8 @@ srfunction::create_new_decls (void) + sprintf (id, "%d", j); + if (tname) + { +- name = concat (tname, ".reorg.", id, NULL); ++ name = concat (tname, current_mode == STRUCT_REORDER_FIELDS ++ ? ".reorder." : ".reorg.", id, NULL); + new_name = get_identifier (name); + free (name); + } +@@ -850,7 +1000,6 @@ srfunction::create_new_decls (void) + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "Created New decls for decl:\n"); +- fprintf (dump_file, "\n"); + decls[i]->dump (dump_file); + fprintf (dump_file, "\n"); + for (unsigned j = 0; j < max_split && decls[i]->newdecl[j]; j++) +@@ -876,7 +1025,7 @@ srfield::dump (FILE *f) + fprintf (f, ", offset = " HOST_WIDE_INT_PRINT_DEC, offset); + fprintf (f, ", type = "); + print_generic_expr (f, fieldtype); +- fprintf (f, "\n}\n"); ++ fprintf (f, "}\n"); + } + + /* A simplified dump out the field structure to FILE. */ +@@ -908,7 +1057,7 @@ sraccess::dump (FILE *f) + fprintf (f, " in function: %s/%d", node->name (), node->order); + fprintf (f, ", stmt:\n"); + print_gimple_stmt (f, stmt, 0); +- fprintf (f, "\n }\n"); ++ fprintf (f, "}\n"); + } + + /* Dump out the decl structure to FILE. */ +@@ -1023,8 +1172,7 @@ public: + // Constructors + ipa_struct_reorg (void) + : current_function (NULL), +- done_recording (false), +- current_mode (NORMAL) ++ done_recording (false) + {} + + // Fields +@@ -1032,9 +1180,10 @@ public: + auto_vec_del functions; + srglobal globals; + srfunction *current_function; ++ hash_set safe_functions; ++ auto_vec ext_func_types; + + bool done_recording; +- srmode current_mode; + + // Methods + unsigned execute (enum srmode mode); +@@ -1042,6 +1191,7 @@ public: + gimple *stmt = NULL); + + void dump_types (FILE *f); ++ void dump_newtypes (FILE *f); + void dump_types_escaped (FILE *f); + void dump_functions (FILE *f); + void record_accesses (void); +@@ -1049,6 +1199,9 @@ public: + bool walk_field_for_cycles (srtype *); + void prune_escaped_types (void); + void propagate_escape (void); ++ void propagate_escape_via_original (void); ++ void propagate_escape_via_empty_with_no_original (void); ++ void propagate_escape_via_ext_func_types (void); + void analyze_types (void); + void clear_visited (void); + bool create_new_types (void); +@@ -1060,8 +1213,11 @@ public: + srdecl *record_var (tree decl, + escape_type escapes = does_not_escape, + int arg = -1); ++ void record_safe_func_with_void_ptr_parm (void); + srfunction *record_function (cgraph_node *node); + srfunction *find_function (cgraph_node *node); ++ void record_field_type (tree field, srtype *base_srtype); ++ void record_struct_field_types (tree base_type, srtype *base_srtype); + srtype *record_type (tree type); + void process_union (tree type); + srtype *find_type (tree type); +@@ -1072,7 +1228,7 @@ public: + void record_stmt_expr (tree expr, cgraph_node *node, gimple *stmt); + void mark_expr_escape (tree, escape_type, gimple *stmt); + bool handled_allocation_stmt (gimple *stmt); +- tree allocate_size (srtype *t, gimple *stmt); ++ tree allocate_size (srtype *t, srdecl *decl, gimple *stmt); + + void mark_decls_in_as_not_needed (tree fn); + +@@ -1087,21 +1243,23 @@ public: + bool ignore_missing_decl = false); + bool rewrite_lhs_rhs (tree lhs, tree rhs, tree newlhs[max_split], + tree newrhs[max_split]); +- bool get_type_field (tree expr, tree &base, bool &indirect, +- srtype *&type, srfield *&field, +- bool &realpart, bool &imagpart, +- bool &address, bool should_create = false, +- bool can_escape = false); ++ bool get_type_field (tree expr, tree &base, bool &indirect, srtype *&type, ++ srfield *&field, bool &realpart, bool &imagpart, ++ bool &address, bool &escape_from_base, ++ bool should_create = false, bool can_escape = false); + bool wholeaccess (tree expr, tree base, tree accesstype, srtype *t); + + void check_alloc_num (gimple *stmt, srtype *type); ++ void check_definition_assign (srdecl *decl, vec &worklist); ++ void check_definition_call (srdecl *decl, vec &worklist); + void check_definition (srdecl *decl, vec &); + void check_uses (srdecl *decl, vec &); + void check_use (srdecl *decl, gimple *stmt, vec &); +- void check_type_and_push (tree newdecl, srtype *type, ++ void check_type_and_push (tree newdecl, srdecl *decl, + vec &worklist, gimple *stmt); + void check_other_side (srdecl *decl, tree other, gimple *stmt, + vec &worklist); ++ void check_ptr_layers (tree a_expr, tree b_expr, gimple *stmt); + + void find_vars (gimple *stmt); + void find_var (tree expr, gimple *stmt); +@@ -1703,9 +1861,42 @@ ipa_struct_reorg::dump_types (FILE *f) + srtype *type; + FOR_EACH_VEC_ELT (types, i, type) + { ++ fprintf (f, "======= the %dth type: ======\n", i); + type->dump (f); ++ fprintf (f, "\n"); ++ } ++} ++ ++/* Dump all of the created newtypes to file F. */ ++ ++void ++ipa_struct_reorg::dump_newtypes (FILE *f) ++{ ++ unsigned i = 0; ++ srtype *type = NULL; ++ FOR_EACH_VEC_ELT (types, i, type) ++ { ++ if (type->has_escaped ()) ++ continue; ++ fprintf (f, "======= the %dth newtype: ======\n", i); ++ fprintf (f, "type : "); ++ print_generic_expr (f, type->newtype[0]); ++ fprintf (f, "(%d) ", TYPE_UID (type->newtype[0])); ++ fprintf (f, "{ "); ++ fprintf (f, "\nfields = {\n"); ++ ++ for (tree field = TYPE_FIELDS (TYPE_MAIN_VARIANT (type->newtype[0])); ++ field; field = DECL_CHAIN (field)) ++ { ++ fprintf (f, "field (%d) ", DECL_UID (field)); ++ fprintf (f, "{"); ++ fprintf (f, "type = "); ++ print_generic_expr (f, TREE_TYPE (field)); ++ fprintf (f, "}\n"); ++ } ++ fprintf (f, "}\n "); ++ fprintf (f, "\n"); + } +- fprintf (f, "\n"); + } + + /* Dump all of the recorded types to file F. */ +@@ -1803,6 +1994,8 @@ isarraytype (tree type) + static bool + isptrptr (tree type) + { ++ if (type == NULL) ++ return false; + bool firstptr = false; + while (POINTER_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE) + { +@@ -1817,154 +2010,740 @@ isptrptr (tree type) + return false; + } + +-/* Return the escape type which corresponds to if +- this is an volatile type, an array type or a pointer +- to a pointer type. */ ++/* Adding node to map and stack. */ + +-static escape_type +-escape_type_volatile_array_or_ptrptr (tree type) ++bool ++add_node (tree node, int layers, hash_map &map, ++ auto_vec &stack) + { +- if (isvolatile_type (type)) +- return escape_volatile; +- if (isarraytype (type)) +- return escape_array; +- if (isptrptr (type)) +- return escape_ptr_ptr; +- return does_not_escape; ++ if (TREE_CODE (node) != SSA_NAME) ++ return false; ++ if (map.get (node) == NULL) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, " "); ++ fprintf (dump_file, "add node: \t\t"); ++ print_generic_expr (dump_file, node); ++ fprintf (dump_file, ",\t\tptr layers: %d: \n", layers); ++ } ++ map.put (node, layers); ++ stack.safe_push (node); ++ } ++ else if (*map.get (node) != layers) ++ return false; ++ return true; + } + +-/* Record TYPE if not already recorded. */ ++/* Check the number of pointer layers of the gimple phi in definition. */ + +-srtype * +-ipa_struct_reorg::record_type (tree type) ++bool ++check_def_phi (tree def_node, hash_map &ptr_layers) + { +- unsigned typeuid; +- +- /* Get the main variant as we are going +- to record that type only. */ +- type = TYPE_MAIN_VARIANT (type); +- typeuid = TYPE_UID (type); ++ bool res = true; ++ gimple *def_stmt = SSA_NAME_DEF_STMT (def_node); ++ for (unsigned j = 0; j < gimple_phi_num_args (def_stmt); j++) ++ { ++ tree phi_node = gimple_phi_arg_def (def_stmt, j); ++ if (integer_zerop (phi_node)) ++ continue; ++ if (ptr_layers.get (phi_node) == NULL) ++ return false; ++ res &= *ptr_layers.get (def_node) == *ptr_layers.get (phi_node); ++ } ++ return res; ++} + +- srtype *type1; ++/* Check the number of pointer layers of the gimple assign in definition. */ + +- type1 = find_type (type); +- if (type1) +- return type1; ++bool ++check_def_assign (tree def_node, hash_map &ptr_layers) ++{ ++ bool res = true; ++ gimple *def_stmt = SSA_NAME_DEF_STMT (def_node); ++ gimple_rhs_class rhs_class = gimple_assign_rhs_class (def_stmt); ++ tree_code rhs_code = gimple_assign_rhs_code (def_stmt); ++ tree rhs1 = gimple_assign_rhs1 (def_stmt); ++ tree rhs1_base = TREE_CODE (rhs1) == MEM_REF ? TREE_OPERAND (rhs1, 0) : rhs1; ++ if (ptr_layers.get (rhs1_base) == NULL) ++ return false; ++ if (rhs_class == GIMPLE_SINGLE_RHS || rhs_class == GIMPLE_UNARY_RHS) ++ { ++ if (TREE_CODE (rhs1) == SSA_NAME) ++ res = *ptr_layers.get (def_node) == *ptr_layers.get (rhs1); ++ else if (TREE_CODE (rhs1) == MEM_REF) ++ res = *ptr_layers.get (def_node) ++ == *ptr_layers.get (TREE_OPERAND (rhs1, 0)); ++ else ++ { ++ return false; ++ } ++ } ++ else if (rhs_class == GIMPLE_BINARY_RHS) ++ { ++ if (rhs_code == POINTER_PLUS_EXPR) ++ res = *ptr_layers.get (def_node) == *ptr_layers.get (rhs1); ++ else if (rhs_code == BIT_AND_EXPR) ++ res = *ptr_layers.get (def_node) == *ptr_layers.get (rhs1); ++ else ++ return false; ++ } ++ else ++ return false; ++ return res; ++} + +- /* If already done recording just return NULL. */ +- if (done_recording) +- return NULL; ++/* Check node definition. */ + ++bool ++check_node_def (hash_map &ptr_layers) ++{ ++ bool res = true; + if (dump_file && (dump_flags & TDF_DETAILS)) +- fprintf (dump_file, "Recording new type: %u.\n", typeuid); +- +- type1 = new srtype (type); +- types.safe_push (type1); +- +- /* If the type has an user alignment set, +- that means the user most likely already setup the type. */ +- if (TYPE_USER_ALIGN (type)) +- type1->mark_escape (escape_user_alignment, NULL); +- +- for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) ++ fprintf (dump_file, "\n======== check node definition ========\n"); ++ for (unsigned i = 1; i < num_ssa_names; ++i) + { +- if (TREE_CODE (field) == FIELD_DECL) ++ tree name = ssa_name (i); ++ if (name && ptr_layers.get (name) != NULL) + { +- tree t = TREE_TYPE (field); +- process_union (t); +- if (TREE_CODE (inner_type (t)) == UNION_TYPE +- || TREE_CODE (inner_type (t)) == QUAL_UNION_TYPE) +- type1->mark_escape (escape_union, NULL); +- if (isvolatile_type (t)) +- type1->mark_escape (escape_volatile, NULL); +- escape_type e = escape_type_volatile_array_or_ptrptr (t); +- if (e != does_not_escape) +- type1->mark_escape (e, NULL); +- if (handled_type (t)) +- { +- srtype *t1 = record_type (inner_type (t)); +- srfield *f = type1->find_field (int_byte_position (field)); +- /* We might have an variable sized type which +- we don't set the handle. */ +- if (f) +- { +- f->type = t1; +- t1->add_field_site (f); +- } +- if (t1 == type1 && current_mode != COMPLETE_STRUCT_RELAYOUT) +- type1->mark_escape (escape_rescusive_type, NULL); +- } ++ gimple *def_stmt = SSA_NAME_DEF_STMT (name); ++ if (dump_file && (dump_flags & TDF_DETAILS) ++ && gimple_code (def_stmt) != GIMPLE_DEBUG) ++ print_gimple_stmt (dump_file, def_stmt, 0); ++ ++ if (gimple_code (def_stmt) == GIMPLE_PHI) ++ res = check_def_phi (name, ptr_layers); ++ else if (gimple_code (def_stmt) == GIMPLE_ASSIGN) ++ res = check_def_assign (name, ptr_layers); ++ else if (gimple_code (def_stmt) == GIMPLE_NOP) ++ continue; ++ else ++ return false; + } + } ++ return res; ++} + +- return type1; ++/* Check pointer usage. */ ++ ++bool ++check_record_ptr_usage (gimple *use_stmt, tree ¤t_node, ++ hash_map &ptr_layers, ++ auto_vec &ssa_name_stack) ++{ ++ gimple_rhs_class rhs_class = gimple_assign_rhs_class (use_stmt); ++ tree rhs1 = gimple_assign_rhs1 (use_stmt); ++ tree lhs = gimple_assign_lhs (use_stmt); ++ if (rhs_class != GIMPLE_SINGLE_RHS ++ || (TREE_CODE (rhs1) != COMPONENT_REF && TREE_CODE (rhs1) != SSA_NAME) ++ || (TREE_CODE (lhs) != MEM_REF && TREE_CODE (lhs) != SSA_NAME)) ++ return false; ++ ++ bool res = true; ++ /* MEM[(long int *)a_1] = _1; (record). ++ If lhs is ssa_name, lhs cannot be the current node. ++ _2 = _1->flow; (No record). */ ++ if (TREE_CODE (rhs1) == SSA_NAME) ++ { ++ tree tmp = (rhs1 != current_node) ? rhs1 : lhs; ++ if (TREE_CODE (tmp) == MEM_REF) ++ res = add_node (TREE_OPERAND (tmp, 0), ++ *ptr_layers.get (current_node) + 1, ++ ptr_layers, ssa_name_stack); ++ else ++ res = add_node (tmp, *ptr_layers.get (current_node), ++ ptr_layers, ssa_name_stack); ++ } ++ else if (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs1) == COMPONENT_REF) ++ res = !(POINTER_TYPE_P (TREE_TYPE (rhs1))); ++ else ++ res = false; ++ return res; + } + +-/* Mark TYPE as escaping with ESCAPES as the reason. */ ++/* Check and record a single node. */ + +-void +-ipa_struct_reorg::mark_type_as_escape (tree type, +- escape_type escapes, +- gimple *stmt) ++bool ++check_record_single_node (gimple *use_stmt, tree ¤t_node, ++ hash_map &ptr_layers, ++ auto_vec &ssa_name_stack) + { +- if (handled_type (type)) +- { +- srtype *stype = record_type (inner_type (type)); ++ gimple_rhs_class rhs_class = gimple_assign_rhs_class (use_stmt); ++ tree rhs1 = gimple_assign_rhs1 (use_stmt); ++ tree lhs = gimple_assign_lhs (use_stmt); ++ gcc_assert (rhs_class == GIMPLE_SINGLE_RHS || rhs_class == GIMPLE_UNARY_RHS); + +- if (!stype) +- return; ++ if ((TREE_CODE (rhs1) != SSA_NAME && TREE_CODE (rhs1) != MEM_REF) ++ || (TREE_CODE (lhs) != SSA_NAME && TREE_CODE (lhs) != MEM_REF)) ++ return false; + +- stype->mark_escape (escapes, stmt); ++ bool res = true; ++ if (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs1) == MEM_REF) ++ /* Add such as: _2 = MEM[(struct arc_t * *)_1]. */ ++ res = add_node (lhs, *ptr_layers.get (current_node) - 1, ++ ptr_layers, ssa_name_stack); ++ else if (TREE_CODE (lhs) == MEM_REF && TREE_CODE (rhs1) == SSA_NAME) ++ { ++ /* Add such as: MEM[(long int *)a_1] = _1. */ ++ if (rhs1 == current_node) ++ res = add_node (TREE_OPERAND (lhs, 0), ++ *ptr_layers.get (current_node) + 1, ++ ptr_layers, ssa_name_stack); ++ else ++ res = add_node (rhs1, *ptr_layers.get (current_node) - 1, ++ ptr_layers, ssa_name_stack); + } ++ else if (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs1) == SSA_NAME) ++ res = add_node (lhs, *ptr_layers.get (current_node), ++ ptr_layers, ssa_name_stack); ++ else ++ res = false; ++ ++ return res; + } + +-/* Maybe process the union of type TYPE, such that marking all of the fields' +- types as being escaping. */ ++/* Check and record multiple nodes. */ + +-void +-ipa_struct_reorg::process_union (tree type) ++bool ++check_record_mult_node (gimple *use_stmt, tree ¤t_node, ++ hash_map &ptr_layers, ++ auto_vec &ssa_name_stack) + { +- static hash_set unions_recorded; ++ gimple_rhs_class rhs_class = gimple_assign_rhs_class (use_stmt); ++ tree_code rhs_code = gimple_assign_rhs_code (use_stmt); ++ tree rhs1 = gimple_assign_rhs1 (use_stmt); ++ tree lhs = gimple_assign_lhs (use_stmt); ++ tree rhs2 = gimple_assign_rhs2 (use_stmt); ++ gcc_assert (rhs_class == GIMPLE_BINARY_RHS); ++ ++ if ((rhs_code != POINTER_PLUS_EXPR && rhs_code != POINTER_DIFF_EXPR ++ && rhs_code != BIT_AND_EXPR) ++ || (TREE_CODE (lhs) != SSA_NAME && TREE_CODE (rhs1) != SSA_NAME)) ++ return false; + +- type = inner_type (type); +- if (TREE_CODE (type) != UNION_TYPE +- && TREE_CODE (type) != QUAL_UNION_TYPE) +- return; ++ bool res = true; ++ if (rhs_code == POINTER_PLUS_EXPR) ++ res = add_node (lhs == current_node ? rhs1 : lhs, ++ *ptr_layers.get (current_node), ++ ptr_layers, ssa_name_stack); ++ else if (rhs_code == POINTER_DIFF_EXPR) ++ res = add_node (rhs1 != current_node ? rhs1 : rhs2, ++ *ptr_layers.get (current_node), ++ ptr_layers, ssa_name_stack); ++ else if (rhs_code == BIT_AND_EXPR) ++ { ++ if (TREE_CODE (rhs2) != INTEGER_CST) ++ return false; ++ res = add_node (lhs == current_node ? rhs1 : lhs, ++ *ptr_layers.get (current_node), ++ ptr_layers, ssa_name_stack); ++ } ++ return res; ++} + +- type = TYPE_MAIN_VARIANT (type); ++/* Check whether gimple assign is correctly used and record node. */ + +- /* We already processed this type. */ +- if (unions_recorded.add (type)) +- return; ++bool ++check_record_assign (tree ¤t_node, gimple *use_stmt, ++ hash_map &ptr_layers, ++ auto_vec &ssa_name_stack) ++{ ++ gimple_rhs_class rhs_class = gimple_assign_rhs_class (use_stmt); ++ if (*ptr_layers.get (current_node) == 1) ++ return check_record_ptr_usage (use_stmt, current_node, ++ ptr_layers, ssa_name_stack); ++ else if (*ptr_layers.get (current_node) > 1) ++ { ++ if (rhs_class != GIMPLE_BINARY_RHS ++ && rhs_class != GIMPLE_UNARY_RHS ++ && rhs_class != GIMPLE_SINGLE_RHS) ++ return false; + +- for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) ++ if (rhs_class == GIMPLE_SINGLE_RHS || rhs_class == GIMPLE_UNARY_RHS) ++ return check_record_single_node (use_stmt, current_node, ++ ptr_layers, ssa_name_stack); ++ else if (rhs_class == GIMPLE_BINARY_RHS) ++ return check_record_mult_node (use_stmt, current_node, ++ ptr_layers, ssa_name_stack); ++ } ++ else ++ return false; ++ ++ return true; ++} ++ ++/* Check whether gimple phi is correctly used and record node. */ ++ ++bool ++check_record_phi (tree ¤t_node, gimple *use_stmt, ++ hash_map &ptr_layers, ++ auto_vec &ssa_name_stack) ++{ ++ bool res = true; ++ res &= add_node (gimple_phi_result (use_stmt), *ptr_layers.get (current_node), ++ ptr_layers, ssa_name_stack); ++ ++ for (unsigned i = 0; i < gimple_phi_num_args (use_stmt); i++) + { +- if (TREE_CODE (field) == FIELD_DECL) +- { +- mark_type_as_escape (TREE_TYPE (field), escape_union); +- process_union (TREE_TYPE (field)); +- } ++ if (integer_zerop (gimple_phi_arg_def (use_stmt, i))) ++ continue; ++ res &= add_node (gimple_phi_arg_def (use_stmt, i), ++ *ptr_layers.get (current_node), ++ ptr_layers, ssa_name_stack); + } ++ return res; + } + +-/* Used by record_var function as a callback to walk_tree. +- Mark the type as escaping if it has expressions which +- cannot be converted for global initializations. */ ++/* Check the use of callee. */ + +-static tree +-record_init_types (tree *tp, int *walk_subtrees, void *data) ++bool ++check_callee (cgraph_node *node, gimple *stmt, ++ hash_map &ptr_layers, int input_layers) + { +- ipa_struct_reorg *c = (ipa_struct_reorg *)data; +- switch (TREE_CODE (*tp)) ++ /* caller main () ++ { spec_qsort.constprop (_649, _651); } ++ def spec_qsort.constprop (void * a, size_t n) ++ { spec_qsort.constprop (a_1, _139); } */ ++ /* In safe functions, only call itself is allowed. */ ++ if (node->get_edge (stmt)->callee != node) ++ return false; ++ tree input_node = gimple_call_arg (stmt, 0); ++ if (ptr_layers.get (input_node) == NULL ++ || *ptr_layers.get (input_node) != input_layers) ++ return false; ++ if (SSA_NAME_VAR (input_node) != DECL_ARGUMENTS (node->decl)) ++ return false; ++ ++ for (unsigned i = 1; i < gimple_call_num_args (stmt); i++) + { +- CASE_CONVERT: +- case COMPONENT_REF: +- case VIEW_CONVERT_EXPR: +- case ARRAY_REF: +- { +- tree typeouter = TREE_TYPE (*tp); +- tree typeinner = TREE_TYPE (TREE_OPERAND (*tp, 0)); +- c->mark_type_as_escape (typeouter, escape_via_global_init); ++ if (ptr_layers.get (gimple_call_arg (stmt, i)) != NULL) ++ return false; ++ } ++ return true; ++} ++ ++/* Check the usage of input nodes and related nodes. */ ++ ++bool ++check_node_use (cgraph_node *node, tree current_node, ++ hash_map &ptr_layers, ++ auto_vec &ssa_name_stack, ++ int input_layers) ++{ ++ imm_use_iterator imm_iter; ++ gimple *use_stmt = NULL; ++ bool res = true; ++ /* Use FOR_EACH_IMM_USE_STMT as an indirect edge ++ to search for possible related nodes and push to stack. */ ++ FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, current_node) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS) ++ && gimple_code (use_stmt) != GIMPLE_DEBUG) ++ { ++ fprintf (dump_file, "%*s", 4, ""); ++ print_gimple_stmt (dump_file, use_stmt, 0); ++ } ++ /* For other types of gimple, do not record the node. */ ++ if (res) ++ { ++ if (gimple_code (use_stmt) == GIMPLE_PHI) ++ res = check_record_phi (current_node, use_stmt, ++ ptr_layers, ssa_name_stack); ++ else if (gimple_code (use_stmt) == GIMPLE_ASSIGN) ++ res = check_record_assign (current_node, use_stmt, ++ ptr_layers, ssa_name_stack); ++ else if (gimple_code (use_stmt) == GIMPLE_CALL) ++ res = check_callee (node, use_stmt, ptr_layers, input_layers); ++ else if (gimple_code (use_stmt) == GIMPLE_RETURN) ++ res = false; ++ } ++ } ++ return res; ++} ++ ++/* Trace the pointer layers of void node. */ ++ ++bool ++get_void_node_ptr_layers (tree input, int &input_layers) ++{ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "input type is void* node\n"); ++ imm_use_iterator imm_iter; ++ gimple *use_stmt = NULL; ++ FOR_EACH_IMM_USE_STMT (use_stmt, imm_iter, input) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ print_gimple_stmt (dump_file, use_stmt, 0); ++ if (gimple_code (use_stmt) == GIMPLE_ASSIGN ++ && gimple_assign_rhs_class (use_stmt) == GIMPLE_SINGLE_RHS) ++ { ++ tree rhs1 = gimple_assign_rhs1 (use_stmt); ++ tree lhs = gimple_assign_lhs (use_stmt); ++ if (TREE_CODE (lhs) == SSA_NAME && handled_type (TREE_TYPE (lhs))) ++ { ++ if (TREE_CODE (rhs1) == MEM_REF) ++ { ++ input_layers = get_ptr_layers (TREE_TYPE (lhs)) + 1; ++ return true; ++ } ++ } ++ } ++ } ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "end trace pointer layers of void* node\n"); ++ return false; ++} ++ ++/* Preparing the First Node for DFS. */ ++ ++bool ++set_init_node (cgraph_node *node, cgraph_edge *caller, ++ hash_map &ptr_layers, ++ auto_vec &ssa_name_stack, int &input_layers) ++{ ++ /* Set input_layer ++ caller spec_qsort.constprop (_649, _651) ++ |-- Obtains the actual ptr layer ++ from the input node. */ ++ caller->caller->get_untransformed_body (); ++ if (caller->call_stmt == NULL ++ || gimple_call_num_args (caller->call_stmt) == 0) ++ return false; ++ tree input = gimple_call_arg (caller->call_stmt, 0); ++ if (!(POINTER_TYPE_P (TREE_TYPE (input)) ++ || TREE_CODE (TREE_TYPE (input)) == ARRAY_TYPE)) ++ return false; ++ if (handled_type (TREE_TYPE (input))) ++ input_layers = get_ptr_layers (TREE_TYPE (input)); ++ else ++ { ++ if (VOID_POINTER_P (TREE_TYPE (input))) ++ { ++ if (!get_void_node_ptr_layers (input, input_layers)) ++ return false; ++ } ++ } ++ ++ /* Set initial node ++ def spec_qsort.constprop (void * a, size_t n) ++ |-- Find the initial ssa_name ++ from the parameter node. */ ++ tree parm = DECL_ARGUMENTS (node->decl); ++ for (unsigned j = 1; j < num_ssa_names; ++j) ++ { ++ tree name = ssa_name (j); ++ if (!name || has_zero_uses (name) || virtual_operand_p (name)) ++ continue; ++ if (SSA_NAME_VAR (name) == parm ++ && gimple_code (SSA_NAME_DEF_STMT (name)) == GIMPLE_NOP) ++ { ++ if (!add_node (name, input_layers, ptr_layers, ssa_name_stack)) ++ return false; ++ } ++ } ++ return !ssa_name_stack.is_empty (); ++} ++ ++/* Check the usage of each call. */ ++ ++bool ++check_each_call (cgraph_node *node, cgraph_edge *caller) ++{ ++ hash_map ptr_layers; ++ auto_vec ssa_name_stack; ++ int input_layers = 0; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "======== check each call : %s/%u ========\n", ++ node->name (), node->order); ++ if (!set_init_node (node, caller, ptr_layers, ssa_name_stack, input_layers)) ++ return false; ++ int i = 0; ++ while (!ssa_name_stack.is_empty ()) ++ { ++ tree current_node = ssa_name_stack.pop (); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\ncur node %d: \t", i++); ++ print_generic_expr (dump_file, current_node); ++ fprintf (dump_file, ",\t\tptr layers: %d: \n", ++ *ptr_layers.get (current_node)); ++ } ++ if (get_ptr_layers (TREE_TYPE (current_node)) ++ > *ptr_layers.get (current_node)) ++ return false; ++ if (!check_node_use (node, current_node, ptr_layers, ssa_name_stack, ++ input_layers)) ++ return false; ++ } ++ ++ if (!check_node_def (ptr_layers)) ++ return false; ++ return true; ++} ++ ++/* Filter out function: void func (void*, int n), ++ and the function has no static variable, no structure-related variable, ++ and no global variable is used. */ ++ ++bool ++filter_func (cgraph_node *node) ++{ ++ tree parm = DECL_ARGUMENTS (node->decl); ++ if (!(parm && VOID_POINTER_P (TREE_TYPE (parm)) ++ && VOID_TYPE_P (TREE_TYPE (TREE_TYPE (node->decl))))) ++ return false; ++ ++ for (parm = DECL_CHAIN (parm); parm; parm = DECL_CHAIN (parm)) ++ { ++ if (TREE_CODE (TREE_TYPE (parm)) != INTEGER_TYPE) ++ return false; ++ } ++ ++ if (DECL_STRUCT_FUNCTION (node->decl)->static_chain_decl) ++ return false; ++ ++ tree var = NULL_TREE; ++ unsigned int i = 0; ++ bool res = true; ++ FOR_EACH_LOCAL_DECL (cfun, i, var) ++ { ++ if (TREE_CODE (var) == VAR_DECL && handled_type (TREE_TYPE (var))) ++ res = false; ++ } ++ if (!res) ++ return false; ++ ++ for (unsigned j = 1; j < num_ssa_names; ++j) ++ { ++ tree name = ssa_name (j); ++ if (!name || has_zero_uses (name) || virtual_operand_p (name)) ++ continue; ++ tree var = SSA_NAME_VAR (name); ++ if (var && TREE_CODE (var) == VAR_DECL && is_global_var (var)) ++ return false; ++ } ++ return true; ++} ++ ++/* Check whether the function with the void* parameter and uses the input node ++ safely. ++ In these functions only component_ref can be used to dereference the last ++ layer of the input structure pointer. The hack operation pointer offset ++ after type cast cannot be used. ++*/ ++ ++bool ++is_safe_func_with_void_ptr_parm (cgraph_node *node) ++{ ++ if (!filter_func (node)) ++ return false; ++ ++ /* Distinguish Recursive Callers ++ normal_callers: main () ++ { spec_qsort.constprop (_649, _651); } ++ definition: spec_qsort.constprop (void * a, size_t n) ++ recursive_callers: { spec_qsort.constprop (a_1, _139); } */ ++ auto_vec callers = node->collect_callers (); ++ auto_vec normal_callers; ++ for (unsigned i = 0; i < callers.length (); i++) ++ { ++ if (callers[i]->caller != node) ++ normal_callers.safe_push (callers[i]); ++ } ++ if (normal_callers.length () == 0) ++ return false; ++ ++ for (unsigned i = 0; i < normal_callers.length (); i++) ++ { ++ if (!check_each_call (node, normal_callers[i])) ++ return false; ++ } ++ return true; ++} ++ ++/* Return the escape type which corresponds to if ++ this is an volatile type, an array type or a pointer ++ to a pointer type. */ ++ ++static escape_type ++escape_type_volatile_array_or_ptrptr (tree type) ++{ ++ if (isvolatile_type (type)) ++ return escape_volatile; ++ if (isarraytype (type)) ++ return escape_array; ++ if (isptrptr (type) && (current_mode != STRUCT_REORDER_FIELDS)) ++ return escape_ptr_ptr; ++ return does_not_escape; ++} ++ ++/* Record field type. */ ++ ++void ++ipa_struct_reorg::record_field_type (tree field, srtype *base_srtype) ++{ ++ tree field_type = TREE_TYPE (field); ++ /* The uid of the type in the structure is different ++ from that outside the structure. */ ++ srtype *field_srtype = record_type (inner_type (field_type)); ++ srfield *field_srfield = base_srtype->find_field (int_byte_position (field)); ++ /* We might have an variable sized type which we don't set the handle. */ ++ if (field_srfield) ++ { ++ field_srfield->type = field_srtype; ++ field_srtype->add_field_site (field_srfield); ++ } ++ if (field_srtype == base_srtype && current_mode != COMPLETE_STRUCT_RELAYOUT ++ && current_mode != STRUCT_REORDER_FIELDS) ++ base_srtype->mark_escape (escape_rescusive_type, NULL); ++ /* Types of non-pointer field are difficult to track the correctness ++ of the rewrite when it used by the escaped type. */ ++ if (current_mode == STRUCT_REORDER_FIELDS ++ && TREE_CODE (field_type) == RECORD_TYPE) ++ field_srtype->mark_escape (escape_instance_field, NULL); ++} ++ ++/* Record structure all field types. */ ++ ++void ++ipa_struct_reorg::record_struct_field_types (tree base_type, ++ srtype *base_srtype) ++{ ++ for (tree field = TYPE_FIELDS (base_type); field; field = DECL_CHAIN (field)) ++ { ++ if (TREE_CODE (field) == FIELD_DECL) ++ { ++ tree field_type = TREE_TYPE (field); ++ process_union (field_type); ++ if (TREE_CODE (inner_type (field_type)) == UNION_TYPE ++ || TREE_CODE (inner_type (field_type)) == QUAL_UNION_TYPE) ++ base_srtype->mark_escape (escape_union, NULL); ++ if (isvolatile_type (field_type)) ++ base_srtype->mark_escape (escape_volatile, NULL); ++ escape_type e = escape_type_volatile_array_or_ptrptr (field_type); ++ if (e != does_not_escape) ++ base_srtype->mark_escape (e, NULL); ++ /* Types of non-pointer field are difficult to track the correctness ++ of the rewrite when it used by the escaped type. */ ++ if (current_mode == STRUCT_REORDER_FIELDS ++ && TREE_CODE (field_type) == RECORD_TYPE) ++ base_srtype->mark_escape (escape_instance_field, NULL); ++ if (handled_type (field_type)) ++ record_field_type (field, base_srtype); ++ } ++ } ++} ++ ++/* Record TYPE if not already recorded. */ ++ ++srtype * ++ipa_struct_reorg::record_type (tree type) ++{ ++ unsigned typeuid; ++ ++ /* Get the main variant as we are going ++ to record that type only. */ ++ type = TYPE_MAIN_VARIANT (type); ++ typeuid = TYPE_UID (type); ++ ++ srtype *type1; ++ ++ type1 = find_type (type); ++ if (type1) ++ return type1; ++ ++ /* If already done recording just return NULL. */ ++ if (done_recording) ++ return NULL; ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "Recording new type: %u.\n", typeuid); ++ const char *type_name = get_type_name (type); ++ if (type_name == NULL) ++ fprintf (dump_file, "Recording new type NULL name\n"); ++ else ++ fprintf (dump_file, "Recording new type name: %s.\n", type_name); ++ } ++ ++ type1 = new srtype (type); ++ types.safe_push (type1); ++ ++ /* If the type has an user alignment set, ++ that means the user most likely already setup the type. */ ++ if (TYPE_USER_ALIGN (type)) ++ type1->mark_escape (escape_user_alignment, NULL); ++ ++ record_struct_field_types (type, type1); ++ ++ return type1; ++} ++ ++/* Mark TYPE as escaping with ESCAPES as the reason. */ ++ ++void ++ipa_struct_reorg::mark_type_as_escape (tree type, ++ escape_type escapes, ++ gimple *stmt) ++{ ++ if (handled_type (type)) ++ { ++ srtype *stype = record_type (inner_type (type)); ++ ++ if (!stype) ++ return; ++ ++ stype->mark_escape (escapes, stmt); ++ } ++} ++ ++/* Maybe process the union of type TYPE, such that marking all of the fields' ++ types as being escaping. */ ++ ++void ++ipa_struct_reorg::process_union (tree type) ++{ ++ static hash_set unions_recorded; ++ ++ type = inner_type (type); ++ if (TREE_CODE (type) != UNION_TYPE ++ && TREE_CODE (type) != QUAL_UNION_TYPE) ++ return; ++ ++ type = TYPE_MAIN_VARIANT (type); ++ ++ /* We already processed this type. */ ++ if (unions_recorded.add (type)) ++ return; ++ ++ for (tree field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field)) ++ { ++ if (TREE_CODE (field) == FIELD_DECL) ++ { ++ mark_type_as_escape (TREE_TYPE (field), escape_union); ++ process_union (TREE_TYPE (field)); ++ } ++ } ++} ++ ++/* Used by record_var function as a callback to walk_tree. ++ Mark the type as escaping if it has expressions which ++ cannot be converted for global initializations. */ ++ ++static tree ++record_init_types (tree *tp, int *walk_subtrees, void *data) ++{ ++ ipa_struct_reorg *c = (ipa_struct_reorg *)data; ++ switch (TREE_CODE (*tp)) ++ { ++ CASE_CONVERT: ++ case COMPONENT_REF: ++ case VIEW_CONVERT_EXPR: ++ case ARRAY_REF: ++ { ++ tree typeouter = TREE_TYPE (*tp); ++ tree typeinner = TREE_TYPE (TREE_OPERAND (*tp, 0)); ++ c->mark_type_as_escape (typeouter, escape_via_global_init); + c->mark_type_as_escape (typeinner, escape_via_global_init); + break; + } +@@ -1996,6 +2775,8 @@ ipa_struct_reorg::record_var (tree decl, escape_type escapes, int arg) + + process_union (TREE_TYPE (decl)); + ++ /* Only the structure type RECORD_TYPE is recorded. ++ Therefore, the void* type is filtered out. */ + if (handled_type (TREE_TYPE (decl))) + { + type = record_type (inner_type (TREE_TYPE (decl))); +@@ -2035,7 +2816,8 @@ ipa_struct_reorg::record_var (tree decl, escape_type escapes, int arg) + + /* Separate instance is hard to trace in complete struct + relayout optimization. */ +- if (current_mode == COMPLETE_STRUCT_RELAYOUT ++ if ((current_mode == COMPLETE_STRUCT_RELAYOUT ++ || current_mode == STRUCT_REORDER_FIELDS) + && TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE) + e = escape_separate_instance; + +@@ -2078,11 +2860,9 @@ ipa_struct_reorg::find_var (tree expr, gimple *stmt) + { + tree r = TREE_OPERAND (expr, 0); + tree orig_type = TREE_TYPE (expr); +- if (handled_component_p (r) +- || TREE_CODE (r) == MEM_REF) ++ if (handled_component_p (r) || TREE_CODE (r) == MEM_REF) + { +- while (handled_component_p (r) +- || TREE_CODE (r) == MEM_REF) ++ while (handled_component_p (r) || TREE_CODE (r) == MEM_REF) + { + if (TREE_CODE (r) == VIEW_CONVERT_EXPR) + { +@@ -2114,8 +2894,10 @@ ipa_struct_reorg::find_var (tree expr, gimple *stmt) + srtype *type; + srfield *field; + bool realpart, imagpart, address; ++ bool escape_from_base = false; ++ /* The should_create flag is true, the declaration can be recorded. */ + get_type_field (expr, base, indirect, type, field, +- realpart, imagpart, address, true, true); ++ realpart, imagpart, address, escape_from_base, true, true); + } + + void +@@ -2132,36 +2914,79 @@ ipa_struct_reorg::find_vars (gimple *stmt) + tree lhs = gimple_assign_lhs (stmt); + tree rhs = gimple_assign_rhs1 (stmt); + find_var (gimple_assign_lhs (stmt), stmt); ++ /* _2 = MEM[(struct arc_t * *)_1]; ++ records the right value _1 declaration. */ + find_var (gimple_assign_rhs1 (stmt), stmt); +- if (TREE_CODE (lhs) == SSA_NAME ++ ++ /* Add a safe func mechanism. */ ++ bool l_find = true; ++ bool r_find = true; ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ l_find = !(current_function->is_safe_func ++ && TREE_CODE (lhs) == SSA_NAME ++ && is_from_void_ptr_parm (lhs)); ++ r_find = !(current_function->is_safe_func ++ && TREE_CODE (rhs) == SSA_NAME ++ && is_from_void_ptr_parm (rhs)); ++ } ++ ++ if ((TREE_CODE (lhs) == SSA_NAME) + && VOID_POINTER_P (TREE_TYPE (lhs)) +- && handled_type (TREE_TYPE (rhs))) ++ && handled_type (TREE_TYPE (rhs)) && l_find) + { + srtype *t = find_type (inner_type (TREE_TYPE (rhs))); + srdecl *d = find_decl (lhs); + if (!d && t) + { +- current_function->record_decl (t, lhs, -1); ++ current_function->record_decl (t, lhs, -1, ++ isptrptr (TREE_TYPE (rhs)) ? TREE_TYPE (rhs) : NULL); + tree var = SSA_NAME_VAR (lhs); + if (var && VOID_POINTER_P (TREE_TYPE (var))) +- current_function->record_decl (t, var, -1); ++ current_function->record_decl (t, var, -1, ++ isptrptr (TREE_TYPE (rhs)) ? TREE_TYPE (rhs) : NULL); + } + } ++ /* Find void ssa_name such as: ++ void * _1; struct arc * _2; ++ _2 = _1 + _3; _1 = calloc (100, 40). */ + if (TREE_CODE (rhs) == SSA_NAME + && VOID_POINTER_P (TREE_TYPE (rhs)) +- && handled_type (TREE_TYPE (lhs))) ++ && handled_type (TREE_TYPE (lhs)) && r_find) + { + srtype *t = find_type (inner_type (TREE_TYPE (lhs))); + srdecl *d = find_decl (rhs); + if (!d && t) + { +- current_function->record_decl (t, rhs, -1); ++ current_function->record_decl (t, rhs, -1, ++ isptrptr (TREE_TYPE (lhs)) ? TREE_TYPE (lhs) : NULL); + tree var = SSA_NAME_VAR (rhs); + if (var && VOID_POINTER_P (TREE_TYPE (var))) +- current_function->record_decl (t, var, -1); ++ current_function->record_decl (t, var, -1, ++ isptrptr (TREE_TYPE (lhs)) ? TREE_TYPE (lhs) : NULL); + } + } + } ++ else if ((current_mode == STRUCT_REORDER_FIELDS) ++ && (gimple_assign_rhs_code (stmt) == LE_EXPR ++ || gimple_assign_rhs_code (stmt) == LT_EXPR ++ || gimple_assign_rhs_code (stmt) == GE_EXPR ++ || gimple_assign_rhs_code (stmt) == GT_EXPR)) ++ { ++ find_var (gimple_assign_lhs (stmt), stmt); ++ find_var (gimple_assign_rhs1 (stmt), stmt); ++ find_var (gimple_assign_rhs2 (stmt), stmt); ++ } ++ /* Find void ssa_name from stmt such as: _2 = _1 - old_arcs_1. */ ++ else if ((current_mode == STRUCT_REORDER_FIELDS) ++ && gimple_assign_rhs_code (stmt) == POINTER_DIFF_EXPR ++ && types_compatible_p ( ++ TYPE_MAIN_VARIANT (TREE_TYPE (gimple_assign_rhs1 (stmt))), ++ TYPE_MAIN_VARIANT (TREE_TYPE (gimple_assign_rhs2 (stmt))))) ++ { ++ find_var (gimple_assign_rhs1 (stmt), stmt); ++ find_var (gimple_assign_rhs2 (stmt), stmt); ++ } + else + { + /* Because we won't handle these stmts in rewrite phase, +@@ -2232,27 +3057,134 @@ ipa_struct_reorg::find_vars (gimple *stmt) + } + } + +-/* Maybe record access of statement for further analaysis. */ ++/* Maybe record access of statement for further analaysis. */ ++ ++void ++ipa_struct_reorg::maybe_record_stmt (cgraph_node *node, gimple *stmt) ++{ ++ switch (gimple_code (stmt)) ++ { ++ case GIMPLE_ASSIGN: ++ maybe_record_assign (node, as_a (stmt)); ++ break; ++ case GIMPLE_CALL: ++ maybe_record_call (node, as_a (stmt)); ++ break; ++ case GIMPLE_DEBUG: ++ break; ++ case GIMPLE_GOTO: ++ case GIMPLE_SWITCH: ++ break; ++ default: ++ break; ++ } ++} ++ ++/* Calculate the multiplier. */ ++ ++static bool ++calculate_mult_num (tree arg, tree *num, tree struct_size) ++{ ++ gcc_assert (TREE_CODE (arg) == INTEGER_CST); ++ bool sign = false; ++ HOST_WIDE_INT size = TREE_INT_CST_LOW (arg); ++ if (size < 0) ++ { ++ size = -size; ++ sign = true; ++ } ++ tree arg2 = build_int_cst (TREE_TYPE (arg), size); ++ if (integer_zerop (size_binop (FLOOR_MOD_EXPR, arg2, struct_size))) ++ { ++ tree number = size_binop (FLOOR_DIV_EXPR, arg2, struct_size); ++ if (sign) ++ number = build_int_cst (TREE_TYPE (number), -tree_to_shwi (number)); ++ *num = number; ++ return true; ++ } ++ return false; ++} ++ ++/* Trace and calculate the multiplier of PLUS_EXPR. */ ++ ++static bool ++trace_calculate_plus (gimple *size_def_stmt, tree *num, tree struct_size) ++{ ++ gcc_assert (gimple_assign_rhs_code (size_def_stmt) == PLUS_EXPR); ++ ++ tree num1 = NULL_TREE; ++ tree num2 = NULL_TREE; ++ tree arg0 = gimple_assign_rhs1 (size_def_stmt); ++ tree arg1 = gimple_assign_rhs2 (size_def_stmt); ++ if (!is_result_of_mult (arg0, &num1, struct_size) || num1 == NULL_TREE) ++ return false; ++ if (!is_result_of_mult (arg1, &num2, struct_size) || num2 == NULL_TREE) ++ return false; ++ *num = size_binop (PLUS_EXPR, num1, num2); ++ return true; ++} ++ ++/* Trace and calculate the multiplier of MULT_EXPR. */ ++ ++static bool ++trace_calculate_mult (gimple *size_def_stmt, tree *num, tree struct_size) ++{ ++ gcc_assert (gimple_assign_rhs_code (size_def_stmt) == MULT_EXPR); ++ ++ tree arg0 = gimple_assign_rhs1 (size_def_stmt); ++ tree arg1 = gimple_assign_rhs2 (size_def_stmt); ++ tree num1 = NULL_TREE; ++ ++ if (is_result_of_mult (arg0, &num1, struct_size) && num1 != NULL_TREE) ++ { ++ *num = size_binop (MULT_EXPR, arg1, num1); ++ return true; ++ } ++ if (is_result_of_mult (arg1, &num1, struct_size) && num1 != NULL_TREE) ++ { ++ *num = size_binop (MULT_EXPR, arg0, num1); ++ return true; ++ } ++ *num = NULL_TREE; ++ return false; ++} ++ ++/* Trace and calculate the multiplier of NEGATE_EXPR. */ ++ ++static bool ++trace_calculate_negate (gimple *size_def_stmt, tree *num, tree struct_size) ++{ ++ gcc_assert (gimple_assign_rhs_code (size_def_stmt) == NEGATE_EXPR); ++ ++ /* Support NEGATE_EXPR trace: _3 = -_2; _2 = _1 * 72. */ ++ tree num1 = NULL_TREE; ++ tree arg0 = gimple_assign_rhs1 (size_def_stmt); ++ if (!is_result_of_mult (arg0, &num1, struct_size) || num1 == NULL_TREE) ++ return false; ++ tree num0 = build_int_cst (TREE_TYPE (num1), -1); ++ *num = size_binop (MULT_EXPR, num0, num1); ++ return true; ++} ++ ++/* Trace and calculate the multiplier of POINTER_DIFF_EXPR. */ + +-void +-ipa_struct_reorg::maybe_record_stmt (cgraph_node *node, gimple *stmt) ++static bool ++trace_calculate_diff (gimple *size_def_stmt, tree *num) + { +- switch (gimple_code (stmt)) ++ gcc_assert (gimple_assign_rhs_code (size_def_stmt) == NOP_EXPR); ++ ++ /* Support POINTER_DIFF_EXPR trace: ++ _3 = (long unsigned int) _2; _2 = _1 - old_arcs_1. */ ++ tree arg = gimple_assign_rhs1 (size_def_stmt); ++ size_def_stmt = SSA_NAME_DEF_STMT (arg); ++ if (size_def_stmt && is_gimple_assign (size_def_stmt) ++ && gimple_assign_rhs_code (size_def_stmt) == POINTER_DIFF_EXPR) + { +- case GIMPLE_ASSIGN: +- maybe_record_assign (node, as_a (stmt)); +- break; +- case GIMPLE_CALL: +- maybe_record_call (node, as_a (stmt)); +- break; +- case GIMPLE_DEBUG: +- break; +- case GIMPLE_GOTO: +- case GIMPLE_SWITCH: +- break; +- default: +- break; ++ *num = NULL_TREE; ++ return true; + } ++ *num = NULL_TREE; ++ return false; + } + + /* This function checks whether ARG is a result of multiplication +@@ -2269,26 +3201,8 @@ is_result_of_mult (tree arg, tree *num, tree struct_size) + + /* If we have a integer, just check if it is a multiply of STRUCT_SIZE. */ + if (TREE_CODE (arg) == INTEGER_CST) +- { +- bool sign = false; +- HOST_WIDE_INT size = TREE_INT_CST_LOW (arg); +- if (size < 0) +- { +- size = -size; +- sign = true; +- } +- tree arg2 = build_int_cst (TREE_TYPE (arg), size); +- if (integer_zerop (size_binop (FLOOR_MOD_EXPR, arg2, struct_size))) +- { +- tree number = size_binop (FLOOR_DIV_EXPR, arg2, struct_size); +- if (sign) +- number = build_int_cst (TREE_TYPE (number), +- -tree_to_shwi (number)); +- *num = number; +- return true; +- } +- return false; +- } ++ return calculate_mult_num (arg, num, struct_size); ++ + gimple *size_def_stmt = SSA_NAME_DEF_STMT (arg); + + /* If the allocation statement was of the form +@@ -2304,43 +3218,20 @@ is_result_of_mult (tree arg, tree *num, tree struct_size) + return false; + + // FIXME: this should handle SHIFT also. +- if (gimple_assign_rhs_code (size_def_stmt) == PLUS_EXPR) +- { +- tree num1, num2; +- tree arg0 = gimple_assign_rhs1 (size_def_stmt); +- tree arg1 = gimple_assign_rhs2 (size_def_stmt); +- if (!is_result_of_mult (arg0, &num1, struct_size)) +- return false; +- if (!is_result_of_mult (arg1, &num2, struct_size)) +- return false; +- *num = size_binop (PLUS_EXPR, num1, num2); +- return true; +- } +- else if (gimple_assign_rhs_code (size_def_stmt) == MULT_EXPR) +- { +- tree arg0 = gimple_assign_rhs1 (size_def_stmt); +- tree arg1 = gimple_assign_rhs2 (size_def_stmt); +- tree num1; +- +- if (is_result_of_mult (arg0, &num1, struct_size)) +- { +- *num = size_binop (MULT_EXPR, arg1, num1); +- return true; +- } +- if (is_result_of_mult (arg1, &num1, struct_size)) +- { +- *num = size_binop (MULT_EXPR, arg0, num1); +- return true; +- } +- +- *num = NULL_TREE; +- return false; +- } +- else if (gimple_assign_rhs_code (size_def_stmt) == SSA_NAME) ++ tree_code rhs_code = gimple_assign_rhs_code (size_def_stmt); ++ if (rhs_code == PLUS_EXPR) ++ return trace_calculate_plus (size_def_stmt, num, struct_size); ++ else if (rhs_code == MULT_EXPR) ++ return trace_calculate_mult (size_def_stmt, num, struct_size); ++ else if (rhs_code == SSA_NAME) + { + arg = gimple_assign_rhs1 (size_def_stmt); + size_def_stmt = SSA_NAME_DEF_STMT (arg); + } ++ else if (rhs_code == NEGATE_EXPR && current_mode == STRUCT_REORDER_FIELDS) ++ return trace_calculate_negate (size_def_stmt, num, struct_size); ++ else if (rhs_code == NOP_EXPR && current_mode == STRUCT_REORDER_FIELDS) ++ return trace_calculate_diff (size_def_stmt, num); + else + { + *num = NULL_TREE; +@@ -2357,18 +3248,22 @@ is_result_of_mult (tree arg, tree *num, tree struct_size) + bool + ipa_struct_reorg::handled_allocation_stmt (gimple *stmt) + { +- if (current_mode == COMPLETE_STRUCT_RELAYOUT ++ if ((current_mode == STRUCT_REORDER_FIELDS) ++ && (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))) ++ return true; ++ if ((current_mode == COMPLETE_STRUCT_RELAYOUT) + && gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) + return true; +- +- if (current_mode != COMPLETE_STRUCT_RELAYOUT) +- if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC) +- || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) +- || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC) +- || gimple_call_builtin_p (stmt, BUILT_IN_ALIGNED_ALLOC) +- || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA) +- || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN)) +- return true; ++ if ((current_mode == NORMAL) ++ && (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALIGNED_ALLOC) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA) ++ || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA_WITH_ALIGN))) ++ return true; + return false; + } + +@@ -2376,7 +3271,7 @@ ipa_struct_reorg::handled_allocation_stmt (gimple *stmt) + elements in the array allocated. */ + + tree +-ipa_struct_reorg::allocate_size (srtype *type, gimple *stmt) ++ipa_struct_reorg::allocate_size (srtype *type, srdecl *decl, gimple *stmt) + { + if (!stmt + || gimple_code (stmt) != GIMPLE_CALL +@@ -2396,6 +3291,10 @@ ipa_struct_reorg::allocate_size (srtype *type, gimple *stmt) + + tree struct_size = TYPE_SIZE_UNIT (type->type); + ++ /* Specify the correct size to relax multi-layer pointer. */ ++ if (TREE_CODE (decl->decl) == SSA_NAME && isptrptr (decl->orig_type)) ++ struct_size = TYPE_SIZE_UNIT (decl->orig_type); ++ + tree size = gimple_call_arg (stmt, 0); + + if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC) +@@ -2409,8 +3308,10 @@ ipa_struct_reorg::allocate_size (srtype *type, gimple *stmt) + the size of structure. */ + if (operand_equal_p (arg1, struct_size, 0)) + return size; +- /* ??? Check that first argument is a constant equal to +- the size of structure. */ ++ /* ??? Check that first argument is a constant ++ equal to the size of structure. */ ++ /* If the allocated number is equal to the value of struct_size, ++ the value of arg1 is changed to the allocated number. */ + if (operand_equal_p (size, struct_size, 0)) + return arg1; + if (dump_file && (dump_flags & TDF_DETAILS)) +@@ -2453,10 +3354,16 @@ ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other, + + if (!d) + { ++ /* MEM[(struct arc *)_1].head = _2; _2 = calloc (100, 104). */ + if (VOID_POINTER_P (TREE_TYPE (side)) + && TREE_CODE (side) == SSA_NAME) +- current_function->record_decl (type, side, -1); ++ { ++ /* The type is other, the declaration is side. */ ++ current_function->record_decl (type, side, -1, ++ isptrptr (TREE_TYPE (other)) ? TREE_TYPE (other) : NULL); ++ } + else ++ /* *_1 = &MEM[(void *)&x + 8B]. */ + type->mark_escape (escape_cast_another_ptr, stmt); + } + else if (type != d->type) +@@ -2464,6 +3371,17 @@ ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other, + type->mark_escape (escape_cast_another_ptr, stmt); + d->type->mark_escape (escape_cast_another_ptr, stmt); + } ++ /* x_1 = y.x_nodes; void *x; ++ Directly mark the structure pointer type assigned ++ to the void* variable as escape. */ ++ else if (current_mode == STRUCT_REORDER_FIELDS ++ && TREE_CODE (side) == SSA_NAME ++ && VOID_POINTER_P (TREE_TYPE (side)) ++ && SSA_NAME_VAR (side) ++ && VOID_POINTER_P (TREE_TYPE (SSA_NAME_VAR (side)))) ++ mark_type_as_escape (TREE_TYPE (other), escape_cast_void, stmt); ++ ++ check_ptr_layers (side, other, stmt); + } + + /* Record accesses in an assignment statement STMT. */ +@@ -2486,8 +3404,12 @@ ipa_struct_reorg::maybe_record_assign (cgraph_node *node, gassign *stmt) + if (!handled_type (TREE_TYPE (lhs))) + return; + /* Check if rhs2 is a multiplication of the size of the type. */ ++ /* The size adjustment and judgment of multi-layer pointers ++ are added. */ + if (is_result_of_mult (rhs2, &num, +- TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (lhs))))) ++ isptrptr (TREE_TYPE (lhs)) ++ ? TYPE_SIZE_UNIT (TREE_TYPE (lhs)) ++ : TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (lhs))))) + { + record_stmt_expr (lhs, node, stmt); + record_stmt_expr (rhs1, node, stmt); +@@ -2525,9 +3447,8 @@ ipa_struct_reorg::maybe_record_assign (cgraph_node *node, gassign *stmt) + } + + static bool +-check_mem_ref_offset (tree expr) ++check_mem_ref_offset (tree expr, tree *num) + { +- tree num = NULL; + bool ret = false; + + if (TREE_CODE (expr) != MEM_REF) +@@ -2538,15 +3459,18 @@ check_mem_ref_offset (tree expr) + tree tmp = TREE_OPERAND (expr, 0); + if (TREE_CODE (tmp) == ADDR_EXPR) + tmp = TREE_OPERAND (tmp, 0); +- tree size = TYPE_SIZE_UNIT (inner_type (TREE_TYPE (tmp))); +- ret = is_result_of_mult (field_off, &num, size); ++ /* Specify the correct size for the multi-layer pointer. */ ++ tree size = isptrptr (TREE_TYPE (tmp)) ++ ? TYPE_SIZE_UNIT (TREE_TYPE (tmp)) ++ : TYPE_SIZE_UNIT (inner_type (TREE_TYPE (tmp))); ++ ret = is_result_of_mult (field_off, num, size); + return ret; + } + + static tree + get_ref_base_and_offset (tree &e, HOST_WIDE_INT &offset, + bool &realpart, bool &imagpart, +- tree &accesstype) ++ tree &accesstype, tree *num) + { + offset = 0; + realpart = false; +@@ -2569,22 +3493,29 @@ get_ref_base_and_offset (tree &e, HOST_WIDE_INT &offset, + { + case COMPONENT_REF: + { ++ /* x.a = _1; If expr is the lvalue of stmt, ++ then field type is FIELD_DECL - POINTER_TYPE - RECORD_TYPE. */ + tree field = TREE_OPERAND (expr, 1); + tree field_off = byte_position (field); + if (TREE_CODE (field_off) != INTEGER_CST) + return NULL; + offset += tree_to_shwi (field_off); ++ /* x.a = _1; If expr is the lvalue of stmt, ++ then expr type is VAR_DECL - RECORD_TYPE (fetch x) */ + expr = TREE_OPERAND (expr, 0); + accesstype = NULL; + break; + } + case MEM_REF: + { ++ /* _2 = MEM[(struct s * *)_1]; ++ If expr is the right value of stmt, then field_off type is ++ INTEGER_CST - POINTER_TYPE - POINTER_TYPE - RECORD_TYPE. */ + tree field_off = TREE_OPERAND (expr, 1); + gcc_assert (TREE_CODE (field_off) == INTEGER_CST); + /* So we can mark the types as escaping if different. */ + accesstype = TREE_TYPE (field_off); +- if (!check_mem_ref_offset (expr)) ++ if (!check_mem_ref_offset (expr, num)) + offset += tree_to_uhwi (field_off); + return TREE_OPERAND (expr, 0); + } +@@ -2626,10 +3557,11 @@ ipa_struct_reorg::wholeaccess (tree expr, tree base, + bool + ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect, + srtype *&type, srfield *&field, +- bool &realpart, bool &imagpart, +- bool &address, bool should_create, ++ bool &realpart, bool &imagpart, bool &address, ++ bool &escape_from_base, bool should_create, + bool can_escape) + { ++ tree num = NULL_TREE; + HOST_WIDE_INT offset; + tree accesstype; + address = false; +@@ -2641,8 +3573,9 @@ ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect, + mark_as_bit_field = true; + } + ++ /* Ref is classified into two types: COMPONENT_REF or MER_REF. */ + base = get_ref_base_and_offset (expr, offset, realpart, imagpart, +- accesstype); ++ accesstype, &num); + + /* Variable access, unkown type. */ + if (base == NULL) +@@ -2680,6 +3613,8 @@ ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect, + if (!t) + return false; + } ++ /* If no such decl is finded ++ or orig_type is not added to this decl, then add it. */ + else if (!d && accesstype) + { + if (!should_create) +@@ -2691,15 +3626,52 @@ ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect, + t = record_type (inner_type (accesstype)); + if (!t || t->has_escaped ()) + return false; +- /* If base is not void* mark the type as escaping. */ +- if (!VOID_POINTER_P (TREE_TYPE (base))) ++ /* If base is not void* mark the type as escaping. ++ release INTEGER_TYPE cast to struct pointer. ++ (If t has escpaed above, then directly returns ++ and doesn't mark escape follow.). */ ++ /* _1 = MEM[(struct arc_t * *)a_1]. ++ then base a_1: ssa_name - pointer_type - integer_type. */ ++ if (current_mode == STRUCT_REORDER_FIELDS) + { +- gcc_assert (can_escape); +- t->mark_escape (escape_cast_another_ptr, NULL); +- return false; ++ bool is_int_ptr = POINTER_TYPE_P (TREE_TYPE (base)) ++ && (TREE_CODE (inner_type (TREE_TYPE (base))) ++ == INTEGER_TYPE); ++ if (!(VOID_POINTER_P (TREE_TYPE (base)) ++ || (current_function->is_safe_func && is_int_ptr))) ++ { ++ gcc_assert (can_escape); ++ t->mark_escape (escape_cast_another_ptr, NULL); ++ return false; ++ } ++ if (TREE_CODE (base) == SSA_NAME ++ && !(current_function->is_safe_func && is_int_ptr)) ++ { ++ /* Add a safe func mechanism. */ ++ if (!(current_function->is_safe_func ++ && is_from_void_ptr_parm (base))) ++ /* Add auxiliary information of the multi-layer pointer ++ type. */ ++ current_function->record_decl (t, base, -1, ++ isptrptr (accesstype) ? accesstype : NULL); ++ } ++ } ++ else ++ { ++ if (!(VOID_POINTER_P (TREE_TYPE (base)))) ++ { ++ gcc_assert (can_escape); ++ t->mark_escape (escape_cast_another_ptr, NULL); ++ return false; ++ } ++ if (TREE_CODE (base) == SSA_NAME) ++ { ++ /* Add auxiliary information of the multi-layer pointer ++ type. */ ++ current_function->record_decl (t, base, -1, ++ isptrptr (accesstype) ? accesstype : NULL); ++ } + } +- if (TREE_CODE (base) == SSA_NAME) +- current_function->record_decl (t, base, -1); + } + else if (!d) + return false; +@@ -2707,7 +3679,10 @@ ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect, + t = d->type; + + if (t->has_escaped ()) ++ { ++ escape_from_base = true; + return false; ++ } + + if (mark_as_bit_field) + { +@@ -2716,6 +3691,17 @@ ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect, + return false; + } + ++ /* Escape the operation of fetching field with pointer offset such as: ++ *(&(t->right)) = malloc (0); -> MEM[(struct node * *)_1 + 8B] = malloc (0); ++ */ ++ if (current_mode != NORMAL ++ && (TREE_CODE (expr) == MEM_REF) && (offset != 0)) ++ { ++ gcc_assert (can_escape); ++ t->mark_escape (escape_non_multiply_size, NULL); ++ return false; ++ } ++ + if (wholeaccess (expr, base, accesstype, t)) + { + field = NULL; +@@ -2733,7 +3719,6 @@ ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect, + print_generic_expr (dump_file, expr); + fprintf (dump_file, "\n"); + print_generic_expr (dump_file, base); +- fprintf (dump_file, "\n"); + } + gcc_assert (can_escape); + t->mark_escape (escape_unkown_field, NULL); +@@ -2747,9 +3732,8 @@ ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect, + print_generic_expr (dump_file, f->fieldtype); + fprintf (dump_file, "\naccess type = "); + print_generic_expr (dump_file, TREE_TYPE (expr)); +- fprintf (dump_file, "original expr = "); ++ fprintf (dump_file, "\noriginal expr = "); + print_generic_expr (dump_file, expr); +- fprintf (dump_file, "\n"); + } + gcc_assert (can_escape); + t->mark_escape (escape_unkown_field, NULL); +@@ -2772,8 +3756,9 @@ ipa_struct_reorg::mark_expr_escape (tree expr, escape_type escapes, + srtype *type; + srfield *field; + bool realpart, imagpart, address; ++ bool escape_from_base = false; + if (!get_type_field (expr, base, indirect, type, field, +- realpart, imagpart, address)) ++ realpart, imagpart, address, escape_from_base)) + return; + + type->mark_escape (escapes, stmt); +@@ -2846,10 +3831,23 @@ ipa_struct_reorg::maybe_record_call (cgraph_node *node, gcall *stmt) + gimple_call_arg (stmt, i)); + if (d) + d->type->mark_escape (escapes, stmt); ++ ++ if (escapes == escape_external_function ++ && !gimple_call_builtin_p (stmt, BUILT_IN_MEMSET)) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "escape_external_function: "); ++ print_gimple_stmt (dump_file, stmt, 0); ++ } ++ if (d) ++ ext_func_types.safe_push (d->type); ++ } + } + return; + } + ++ /* Get func param it's tree_list. */ + argtype = TYPE_ARG_TYPES (gimple_call_fntype (stmt)); + for (unsigned i = 0; i < gimple_call_num_args (stmt); i++) + { +@@ -2857,9 +3855,14 @@ ipa_struct_reorg::maybe_record_call (cgraph_node *node, gcall *stmt) + if (argtype) + { + tree argtypet = TREE_VALUE (argtype); +- if (!free_or_realloc ++ /* callee_func (_1, _2); ++ Check the callee func, instead of current func. */ ++ if (!(free_or_realloc ++ || (current_mode == STRUCT_REORDER_FIELDS ++ && safe_functions.contains ( ++ node->get_edge (stmt)->callee))) + && VOID_POINTER_P (argtypet)) +- mark_type_as_escape (TREE_TYPE (arg), escape_cast_void); ++ mark_type_as_escape (TREE_TYPE (arg), escape_cast_void, stmt); + else + record_stmt_expr (arg, node, stmt); + } +@@ -2878,12 +3881,22 @@ ipa_struct_reorg::record_stmt_expr (tree expr, cgraph_node *node, gimple *stmt) + srtype *type; + srfield *field; + bool realpart, imagpart, address; ++ bool escape_from_base = false; + if (!get_type_field (expr, base, indirect, type, field, +- realpart, imagpart, address)) ++ realpart, imagpart, address, escape_from_base)) + return; + +- if (!opt_for_fn (current_function_decl, flag_ipa_struct_reorg)) +- type->mark_escape (escape_non_optimize, stmt); ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ if (!opt_for_fn (current_function_decl, flag_ipa_reorder_fields)) ++ type->mark_escape (escape_non_optimize, stmt); ++ } ++ else ++ { ++ if (!opt_for_fn (current_function_decl, flag_ipa_struct_reorg)) ++ type->mark_escape (escape_non_optimize, stmt); ++ } ++ + + /* Record it. */ + type->add_access (new sraccess (stmt, node, type, field)); +@@ -2901,10 +3914,10 @@ ipa_struct_reorg::find_function (cgraph_node *node) + } + + void +-ipa_struct_reorg::check_type_and_push (tree newdecl, srtype *type, +- vec &worklist, +- gimple *stmt) ++ipa_struct_reorg::check_type_and_push (tree newdecl, srdecl *decl, ++ vec &worklist, gimple *stmt) + { ++ srtype *type = decl->type; + if (integer_zerop (newdecl)) + return; + +@@ -2916,7 +3929,8 @@ ipa_struct_reorg::check_type_and_push (tree newdecl, srtype *type, + type->mark_escape (escape_cast_another_ptr, stmt); + return; + } +- if (d->type == type) ++ if (d->type == type ++ && cmp_ptr_layers (TREE_TYPE (newdecl), TREE_TYPE (decl->decl))) + return; + + srtype *type1 = d->type; +@@ -2967,7 +3981,9 @@ ipa_struct_reorg::check_type_and_push (tree newdecl, srtype *type, + /* Only add to the worklist if the decl is a SSA_NAME. */ + if (TREE_CODE (newdecl) == SSA_NAME) + worklist.safe_push (d); +- if (d->type == type) ++ tree a_decl = d->orig_type ? d->orig_type : TREE_TYPE (newdecl); ++ tree b_decl = decl->orig_type ? decl->orig_type : TREE_TYPE (decl->decl); ++ if (d->type == type && cmp_ptr_layers (a_decl, b_decl)) + return; + + srtype *type1 = d->type; +@@ -3000,6 +4016,96 @@ ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type) + } + } + ++/* Check the definition of gimple assign. */ ++ ++void ++ipa_struct_reorg::check_definition_assign (srdecl *decl, ++ vec &worklist) ++{ ++ tree ssa_name = decl->decl; ++ srtype *type = decl->type; ++ gimple *stmt = SSA_NAME_DEF_STMT (ssa_name); ++ gcc_assert (gimple_code (stmt) == GIMPLE_ASSIGN); ++ /* a) if the SSA_NAME is sourced from a pointer plus, record the pointer and ++ check to make sure the addition was a multiple of the size. ++ check the pointer type too. */ ++ tree rhs = gimple_assign_rhs1 (stmt); ++ if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR) ++ { ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ tree num = NULL_TREE; ++ /* Specify the correct size for the multi-layer pointer. */ ++ if (!is_result_of_mult (rhs2, &num, isptrptr (decl->orig_type) ++ ? TYPE_SIZE_UNIT (decl->orig_type) ++ : TYPE_SIZE_UNIT (type->type))) ++ type->mark_escape (escape_non_multiply_size, stmt); ++ ++ if (TREE_CODE (rhs) == SSA_NAME) ++ check_type_and_push (rhs, decl, worklist, stmt); ++ return; ++ } ++ ++ if (gimple_assign_rhs_code (stmt) == MAX_EXPR ++ || gimple_assign_rhs_code (stmt) == MIN_EXPR ++ || gimple_assign_rhs_code (stmt) == BIT_IOR_EXPR ++ || gimple_assign_rhs_code (stmt) == BIT_XOR_EXPR ++ || gimple_assign_rhs_code (stmt) == BIT_AND_EXPR) ++ { ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ if (TREE_CODE (rhs) == SSA_NAME) ++ check_type_and_push (rhs, decl, worklist, stmt); ++ if (TREE_CODE (rhs2) == SSA_NAME) ++ check_type_and_push (rhs2, decl, worklist, stmt); ++ return; ++ } ++ ++ /* Casts between pointers and integer are escaping. */ ++ if (gimple_assign_cast_p (stmt)) ++ { ++ type->mark_escape (escape_cast_int, stmt); ++ return; ++ } ++ ++ /* d) if the name is from a cast/assignment, make sure it is used as ++ that type or void* ++ i) If void* then push the ssa_name into worklist. */ ++ gcc_assert (gimple_assign_single_p (stmt)); ++ check_other_side (decl, rhs, stmt, worklist); ++ check_ptr_layers (decl->decl, rhs, stmt); ++} ++ ++/* Check the definition of gimple call. */ ++ ++void ++ipa_struct_reorg::check_definition_call (srdecl *decl, vec &worklist) ++{ ++ tree ssa_name = decl->decl; ++ srtype *type = decl->type; ++ gimple *stmt = SSA_NAME_DEF_STMT (ssa_name); ++ gcc_assert (gimple_code (stmt) == GIMPLE_CALL); ++ ++ /* For realloc, check the type of the argument. */ ++ if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)) ++ check_type_and_push (gimple_call_arg (stmt, 0), decl, worklist, stmt); ++ ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ if (!handled_allocation_stmt (stmt)) ++ type->mark_escape (escape_return, stmt); ++ if (!allocate_size (type, decl, stmt)) ++ type->mark_escape (escape_non_multiply_size, stmt); ++ } ++ else ++ { ++ if (!handled_allocation_stmt (stmt) ++ || !allocate_size (type, decl, stmt)) ++ type->mark_escape (escape_return, stmt); ++ } ++ ++ check_alloc_num (stmt, type); ++ return; ++} ++ + /* + 2) Check SSA_NAMEs for non type usages (source or use) (worlist of srdecl) + a) if the SSA_NAME is sourced from a pointer plus, record the pointer and +@@ -3029,9 +4135,12 @@ ipa_struct_reorg::check_definition (srdecl *decl, vec &worklist) + if (var + && TREE_CODE (var) == PARM_DECL + && VOID_POINTER_P (TREE_TYPE (ssa_name))) +- type->mark_escape (escape_cast_void, NULL); ++ type->mark_escape (escape_cast_void, SSA_NAME_DEF_STMT (ssa_name)); + return; + } ++ if (current_mode == STRUCT_REORDER_FIELDS && SSA_NAME_VAR (ssa_name) ++ && VOID_POINTER_P (TREE_TYPE (SSA_NAME_VAR (ssa_name)))) ++ type->mark_escape (escape_cast_void, SSA_NAME_DEF_STMT (ssa_name)); + gimple *stmt = SSA_NAME_DEF_STMT (ssa_name); + + /* +@@ -3039,17 +4148,7 @@ ipa_struct_reorg::check_definition (srdecl *decl, vec &worklist) + i) Add SSA_NAME (void*) to the worklist if allocated from realloc + */ + if (gimple_code (stmt) == GIMPLE_CALL) +- { +- /* For realloc, check the type of the argument. */ +- if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)) +- check_type_and_push (gimple_call_arg (stmt, 0), type, worklist, stmt); +- +- if (!handled_allocation_stmt (stmt) +- || !allocate_size (type, stmt)) +- type->mark_escape (escape_return, stmt); +- check_alloc_num (stmt, type); +- return; +- } ++ check_definition_call (decl, worklist); + /* If the SSA_NAME is sourced from an inline-asm, + just mark the type as escaping. */ + if (gimple_code (stmt) == GIMPLE_ASM) +@@ -3065,58 +4164,11 @@ ipa_struct_reorg::check_definition (srdecl *decl, vec &worklist) + { + for (unsigned i = 0; i < gimple_phi_num_args (stmt); i++) + check_type_and_push (gimple_phi_arg_def (stmt, i), +- type, worklist, stmt); +- return; +- } +- +- gcc_assert (gimple_code (stmt) == GIMPLE_ASSIGN); +- /* +- a) if the SSA_NAME is sourced from a pointer plus, record the pointer and +- check to make sure the addition was a multiple of the size. +- check the pointer type too. +- */ +- +- tree rhs = gimple_assign_rhs1 (stmt); +- if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR) +- { +- tree rhs2 = gimple_assign_rhs2 (stmt); +- tree num; +- if (!is_result_of_mult (rhs2, &num, TYPE_SIZE_UNIT (type->type))) +- type->mark_escape (escape_non_multiply_size, stmt); +- +- if (TREE_CODE (rhs) == SSA_NAME) +- check_type_and_push (rhs, type, worklist, stmt); +- return; +- } +- +- if (gimple_assign_rhs_code (stmt) == MAX_EXPR +- || gimple_assign_rhs_code (stmt) == MIN_EXPR +- || gimple_assign_rhs_code (stmt) == BIT_IOR_EXPR +- || gimple_assign_rhs_code (stmt) == BIT_XOR_EXPR +- || gimple_assign_rhs_code (stmt) == BIT_AND_EXPR) +- { +- tree rhs2 = gimple_assign_rhs2 (stmt); +- if (TREE_CODE (rhs) == SSA_NAME) +- check_type_and_push (rhs, type, worklist, stmt); +- if (TREE_CODE (rhs2) == SSA_NAME) +- check_type_and_push (rhs2, type, worklist, stmt); +- return; +- } +- +- /* Casts between pointers and integer are escaping. */ +- if (gimple_assign_cast_p (stmt)) +- { +- type->mark_escape (escape_cast_int, stmt); ++ decl, worklist, stmt); + return; + } +- +- /* +- d) if the name is from a cast/assignment, make sure it is used as that +- type or void* +- i) If void* then push the ssa_name into worklist +- */ +- gcc_assert (gimple_assign_single_p (stmt)); +- check_other_side (decl, rhs, stmt, worklist); ++ if (gimple_code (stmt) == GIMPLE_ASSIGN) ++ check_definition_assign (decl, worklist); + } + + /* Mark the types used by the inline-asm as escaping. +@@ -3149,45 +4201,121 @@ ipa_struct_reorg::check_other_side (srdecl *decl, tree other, gimple *stmt, + { + srtype *type = decl->type; + +- if (TREE_CODE (other) == SSA_NAME +- || DECL_P (other) ++ if (TREE_CODE (other) == SSA_NAME || DECL_P (other) + || TREE_CODE (other) == INTEGER_CST) + { +- check_type_and_push (other, type, worklist, stmt); ++ check_type_and_push (other, decl, worklist, stmt); ++ return; ++ } ++ ++ tree t = TREE_TYPE (other); ++ if (!handled_type (t)) ++ { ++ type->mark_escape (escape_cast_another_ptr, stmt); ++ return; ++ } ++ ++ srtype *t1 = find_type (inner_type (t)); ++ if (t1 == type) ++ { ++ /* In Complete Struct Relayout, if lhs type is the same ++ as rhs type, we could return without any harm. */ ++ if (current_mode == COMPLETE_STRUCT_RELAYOUT) ++ return; ++ ++ tree base; ++ bool indirect; ++ srtype *type1; ++ srfield *field; ++ bool realpart, imagpart, address; ++ bool escape_from_base = false; ++ if (!get_type_field (other, base, indirect, type1, field, ++ realpart, imagpart, address, escape_from_base)) ++ { ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ /* Release INTEGER_TYPE cast to struct pointer. */ ++ bool cast_from_int_ptr = current_function->is_safe_func && base ++ && find_decl (base) == NULL && POINTER_TYPE_P (TREE_TYPE (base)) ++ && (TREE_CODE (inner_type (TREE_TYPE (base))) == INTEGER_TYPE); ++ ++ /* Add a safe func mechanism. */ ++ bool from_void_ptr_parm = current_function->is_safe_func ++ && TREE_CODE (base) == SSA_NAME && is_from_void_ptr_parm (base); ++ ++ /* Release type is used by a type which escapes. */ ++ if (escape_from_base || cast_from_int_ptr || from_void_ptr_parm) ++ return; ++ } ++ type->mark_escape (escape_cast_another_ptr, stmt); ++ } ++ + return; + } + +- tree t = TREE_TYPE (other); +- if (!handled_type (t)) ++ if (t1) ++ t1->mark_escape (escape_cast_another_ptr, stmt); ++ ++ type->mark_escape (escape_cast_another_ptr, stmt); ++} ++ ++ ++/* Get the expr base. */ ++ ++void ++get_base (tree &base, tree expr) ++{ ++ if (TREE_CODE (expr) == MEM_REF) ++ base = TREE_OPERAND (expr, 0); ++ else if (TREE_CODE (expr) == COMPONENT_REF) ++ { ++ base = TREE_OPERAND (expr, 0); ++ base = (TREE_CODE (base) == MEM_REF) ? TREE_OPERAND (base, 0) : base; ++ } ++ else if (TREE_CODE (expr) == ADDR_EXPR) ++ base = TREE_OPERAND (expr, 0); ++} ++ ++/* Check whether the number of pointer layers of exprs is equal, ++ marking unequals as escape. */ ++ ++void ++ipa_struct_reorg::check_ptr_layers (tree a_expr, tree b_expr, gimple *stmt) ++{ ++ if (current_mode != STRUCT_REORDER_FIELDS || current_function->is_safe_func ++ || !(POINTER_TYPE_P (TREE_TYPE (a_expr))) ++ || !(POINTER_TYPE_P (TREE_TYPE (b_expr))) ++ || !handled_type (TREE_TYPE (a_expr)) ++ || !handled_type (TREE_TYPE (b_expr))) ++ return; ++ ++ tree a_base = a_expr; ++ tree b_base = b_expr; ++ get_base (a_base, a_expr); ++ get_base (b_base, b_expr); ++ ++ srdecl *a = find_decl (a_base); ++ srdecl *b = find_decl (b_base); ++ if (a && b == NULL && TREE_CODE (b_expr) != INTEGER_CST) + { +- type->mark_escape (escape_cast_another_ptr, stmt); ++ a->type->mark_escape (escape_cast_another_ptr, stmt); + return; + } +- +- srtype *t1 = find_type (inner_type (t)); +- if (t1 == type) ++ else if (b && a == NULL && TREE_CODE (a_expr) != INTEGER_CST) + { +- /* In Complete Struct Relayout, if lhs type is the same +- as rhs type, we could return without any harm. */ +- if (current_mode == COMPLETE_STRUCT_RELAYOUT) +- return; +- +- tree base; +- bool indirect; +- srtype *type1; +- srfield *field; +- bool realpart, imagpart, address; +- if (!get_type_field (other, base, indirect, type1, field, +- realpart, imagpart, address)) +- type->mark_escape (escape_cast_another_ptr, stmt); +- ++ b->type->mark_escape (escape_cast_another_ptr, stmt); + return; + } ++ else if (a == NULL && b == NULL) ++ return; + +- if (t1) +- t1->mark_escape (escape_cast_another_ptr, stmt); ++ if (cmp_ptr_layers (TREE_TYPE (a_expr), TREE_TYPE (b_expr))) ++ return; + +- type->mark_escape (escape_cast_another_ptr, stmt); ++ if (a) ++ a->type->mark_escape (escape_cast_another_ptr, stmt); ++ if (b) ++ b->type->mark_escape (escape_cast_another_ptr, stmt); + } + + void +@@ -3205,7 +4333,7 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + check to make sure they are used correctly. */ + if (gimple_code (stmt) == GIMPLE_PHI) + { +- check_type_and_push (gimple_phi_result (stmt), type, worklist, stmt); ++ check_type_and_push (gimple_phi_result (stmt), decl, worklist, stmt); + return; + } + +@@ -3221,10 +4349,15 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + tree rhs2 = gimple_cond_rhs (stmt); + tree orhs = rhs1; + enum tree_code code = gimple_cond_code (stmt); +- if (code != EQ_EXPR && code != NE_EXPR +- && (current_mode != COMPLETE_STRUCT_RELAYOUT +- || (code != LT_EXPR && code != LE_EXPR +- && code != GT_EXPR && code != GE_EXPR))) ++ if ((current_mode == NORMAL && (code != EQ_EXPR && code != NE_EXPR)) ++ || (current_mode == COMPLETE_STRUCT_RELAYOUT ++ && (code != EQ_EXPR && code != NE_EXPR ++ && code != LT_EXPR && code != LE_EXPR ++ && code != GT_EXPR && code != GE_EXPR)) ++ || (current_mode == STRUCT_REORDER_FIELDS ++ && (code != EQ_EXPR && code != NE_EXPR ++ && code != LT_EXPR && code != LE_EXPR ++ && code != GT_EXPR && code != GE_EXPR))) + { + mark_expr_escape (rhs1, escape_non_eq, stmt); + mark_expr_escape (rhs2, escape_non_eq, stmt); +@@ -3235,7 +4368,7 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + return; + if (TREE_CODE (orhs) != SSA_NAME) + mark_expr_escape (rhs1, escape_non_eq, stmt); +- check_type_and_push (orhs, type, worklist, stmt); ++ check_type_and_push (orhs, decl, worklist, stmt); + return; + } + +@@ -3254,9 +4387,14 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + tree rhs2 = gimple_assign_rhs2 (stmt); + tree orhs = rhs1; + enum tree_code code = gimple_assign_rhs_code (stmt); +- if (code != EQ_EXPR && code != NE_EXPR +- && (current_mode != COMPLETE_STRUCT_RELAYOUT +- || (code != LT_EXPR && code != LE_EXPR ++ if ((current_mode == NORMAL && (code != EQ_EXPR && code != NE_EXPR)) ++ || (current_mode == COMPLETE_STRUCT_RELAYOUT ++ && (code != EQ_EXPR && code != NE_EXPR ++ && code != LT_EXPR && code != LE_EXPR ++ && code != GT_EXPR && code != GE_EXPR)) ++ || (current_mode == STRUCT_REORDER_FIELDS ++ && (code != EQ_EXPR && code != NE_EXPR ++ && code != LT_EXPR && code != LE_EXPR + && code != GT_EXPR && code != GE_EXPR))) + { + mark_expr_escape (rhs1, escape_non_eq, stmt); +@@ -3268,7 +4406,7 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + return; + if (TREE_CODE (orhs) != SSA_NAME) + mark_expr_escape (rhs1, escape_non_eq, stmt); +- check_type_and_push (orhs, type, worklist, stmt); ++ check_type_and_push (orhs, decl, worklist, stmt); + return; + } + +@@ -3282,6 +4420,7 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + check_other_side (decl, lhs, stmt, worklist); + return; + } ++ check_ptr_layers (lhs, rhs, stmt); + } + + if (is_gimple_assign (stmt) +@@ -3291,9 +4430,26 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + tree lhs = gimple_assign_lhs (stmt); + tree num; + check_other_side (decl, lhs, stmt, worklist); +- if (!is_result_of_mult (rhs2, &num, TYPE_SIZE_UNIT (type->type))) ++ check_ptr_layers (lhs, decl->decl, stmt); ++ /* Specify the correct size for the multi-layer pointer. */ ++ if (!is_result_of_mult (rhs2, &num, isptrptr (decl->orig_type) ++ ? TYPE_SIZE_UNIT (decl->orig_type) ++ : TYPE_SIZE_UNIT (type->type))) + type->mark_escape (escape_non_multiply_size, stmt); + } ++ ++ if (is_gimple_assign (stmt) ++ && gimple_assign_rhs_code (stmt) == POINTER_DIFF_EXPR) ++ { ++ tree rhs1 = gimple_assign_rhs1 (stmt); ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ tree other = rhs1 == decl->decl ? rhs2 : rhs1; ++ ++ check_other_side (decl, other, stmt, worklist); ++ check_ptr_layers (decl->decl, other, stmt); ++ return; ++ } ++ + } + + /* +@@ -3360,17 +4516,43 @@ ipa_struct_reorg::record_function (cgraph_node *node) + if (DECL_PRESERVE_P (node->decl)) + escapes = escape_marked_as_used; + else if (!node->local) +- escapes = escape_visible_function; ++ { ++ if (current_mode != STRUCT_REORDER_FIELDS) ++ escapes = escape_visible_function; ++ if (current_mode == STRUCT_REORDER_FIELDS && node->externally_visible) ++ escapes = escape_visible_function; ++ } + else if (!node->can_change_signature) + escapes = escape_cannot_change_signature; + else if (!tree_versionable_function_p (node->decl)) + escapes = escape_noclonable_function; +- else if (!opt_for_fn (node->decl, flag_ipa_struct_reorg)) +- escapes = escape_non_optimize; ++ ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ if (!opt_for_fn (node->decl, flag_ipa_reorder_fields)) ++ escapes = escape_non_optimize; ++ } ++ else if (current_mode == NORMAL || current_mode == COMPLETE_STRUCT_RELAYOUT) ++ { ++ if (!opt_for_fn (node->decl, flag_ipa_struct_reorg)) ++ escapes = escape_non_optimize; ++ } + + basic_block bb; + gimple_stmt_iterator si; + ++ /* Add a safe func mechanism. */ ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ current_function->is_safe_func = safe_functions.contains (node); ++ if (dump_file) ++ { ++ fprintf (dump_file, "\nfunction %s/%u: is_safe_func = %d\n", ++ node->name (), node->order, ++ current_function->is_safe_func); ++ } ++ } ++ + /* Record the static chain decl. */ + if (fn->static_chain_decl) + { +@@ -3503,6 +4685,42 @@ ipa_struct_reorg::record_function (cgraph_node *node) + return sfn; + } + ++ ++/* For a function that contains the void* parameter and passes the structure ++ pointer, check whether the function uses the input node safely. ++ For these functions, the void* parameter and related ssa_name are not ++ recorded in record_function (), and the input structure type is not escaped. ++*/ ++ ++void ++ipa_struct_reorg::record_safe_func_with_void_ptr_parm () ++{ ++ cgraph_node *node = NULL; ++ FOR_EACH_FUNCTION (node) ++ { ++ if (!node->real_symbol_p ()) ++ continue; ++ if (node->definition) ++ { ++ if (!node->has_gimple_body_p () || node->inlined_to) ++ continue; ++ node->get_body (); ++ function *fn = DECL_STRUCT_FUNCTION (node->decl); ++ if (!fn) ++ continue; ++ push_cfun (fn); ++ if (is_safe_func_with_void_ptr_parm (node)) ++ { ++ safe_functions.add (node); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\nfunction %s/%u is safe function.\n", ++ node->name (), node->order); ++ } ++ pop_cfun (); ++ } ++ } ++} ++ + /* Record all accesses for all types including global variables. */ + + void +@@ -3534,6 +4752,10 @@ ipa_struct_reorg::record_accesses (void) + record_var (var->decl, escapes); + } + ++ /* Add a safe func mechanism. */ ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ record_safe_func_with_void_ptr_parm (); ++ + FOR_EACH_FUNCTION (cnode) + { + if (!cnode->real_symbol_p ()) +@@ -3552,11 +4774,14 @@ ipa_struct_reorg::record_accesses (void) + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- fprintf (dump_file, "all types (before pruning):\n"); ++ fprintf (dump_file, "\n"); ++ fprintf (dump_file, "==============================================\n\n"); ++ fprintf (dump_file, "======== all types (before pruning): ========\n\n"); + dump_types (dump_file); +- fprintf (dump_file, "all functions (before pruning):\n"); ++ fprintf (dump_file, "======= all functions (before pruning): =======\n"); + dump_functions (dump_file); + } ++ /* If record_var () is called later, new types will not be recorded. */ + done_recording = true; + } + +@@ -3580,6 +4805,7 @@ ipa_struct_reorg::walk_field_for_cycles (srtype *type) + { + if (!field->type) + ; ++ /* If there are two members of the same structure pointer type? */ + else if (field->type->visited + || walk_field_for_cycles (field->type)) + { +@@ -3658,22 +4884,113 @@ ipa_struct_reorg::propagate_escape (void) + } while (changed); + } + ++/* If the original type (with members) has escaped, corresponding to the ++ struct pointer type (empty member) in the structure fields ++ should also marked as escape. */ ++ ++void ++ipa_struct_reorg::propagate_escape_via_original (void) ++{ ++ for (unsigned i = 0; i < types.length (); i++) ++ { ++ for (unsigned j = 0; j < types.length (); j++) ++ { ++ const char *type1 = get_type_name (types[i]->type); ++ const char *type2 = get_type_name (types[j]->type); ++ if (type1 == NULL || type2 == NULL) ++ continue; ++ if (type1 == type2 && types[j]->has_escaped ()) ++ { ++ if (!types[i]->has_escaped ()) ++ types[i]->mark_escape (escape_via_orig_escape, NULL); ++ break; ++ } ++ } ++ } ++} ++ ++/* Marks the fileds as empty and does not have the original structure type ++ is escape. */ ++ ++void ++ipa_struct_reorg::propagate_escape_via_empty_with_no_original (void) ++{ ++ for (unsigned i = 0; i < types.length (); i++) ++ { ++ if (types[i]->fields.length () == 0) ++ { ++ for (unsigned j = 0; j < types.length (); j++) ++ { ++ if (i != j && types[j]->fields.length ()) ++ { ++ const char *type1 = get_type_name (types[i]->type); ++ const char *type2 = get_type_name (types[j]->type); ++ if (type1 != NULL && type2 != NULL && type1 == type2) ++ break; ++ } ++ if (j == types.length () - 1) ++ types[i]->mark_escape (escape_via_empty_no_orig, NULL); ++ } ++ } ++ } ++} ++ ++/* Escape propagation is performed on types that escape through external ++ functions. */ ++ ++void ++ipa_struct_reorg::propagate_escape_via_ext_func_types (void) ++{ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ fprintf (dump_file, "\n propagate_escape_via_ext_func_types: \n\n"); ++ unsigned i = 0; ++ hash_set visited_types; ++ while (i < ext_func_types.length ()) ++ { ++ visited_types.add (ext_func_types[i]); ++ unsigned j = 0; ++ srfield * field; ++ FOR_EACH_VEC_ELT (ext_func_types[i]->fields, j, field) ++ { ++ if (field->type) ++ { ++ if (!field->type->has_escaped ()) ++ field->type->mark_escape (escape_dependent_type_escapes, NULL); ++ if (!visited_types.contains (field->type)) ++ ext_func_types.safe_push (field->type); ++ } ++ } ++ i++; ++ } ++} ++ + /* Prune the escaped types and their decls from what was recorded. */ + + void + ipa_struct_reorg::prune_escaped_types (void) + { +- if (current_mode != COMPLETE_STRUCT_RELAYOUT) ++ if (current_mode != COMPLETE_STRUCT_RELAYOUT ++ && current_mode != STRUCT_REORDER_FIELDS) + { ++ /* Detect recusive types and mark them as escaping. */ + detect_cycles (); ++ /* If contains or is contained by the escape type, ++ mark them as escaping. */ + propagate_escape (); + } ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ propagate_escape_via_original (); ++ propagate_escape_via_empty_with_no_original (); ++ propagate_escape_via_ext_func_types (); ++ } + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- fprintf (dump_file, "all types (after prop but before pruning):\n"); ++ fprintf (dump_file, "==============================================\n\n"); ++ fprintf (dump_file, "all types (after prop but before pruning): \n\n"); + dump_types (dump_file); +- fprintf (dump_file, "all functions (after prop but before pruning):\n"); ++ fprintf (dump_file, "all functions (after prop but before pruning): \n"); + dump_functions (dump_file); + } + +@@ -3721,7 +5038,8 @@ ipa_struct_reorg::prune_escaped_types (void) + /* Prune functions which don't refer to any variables any more. */ + if (function->args.is_empty () + && function->decls.is_empty () +- && function->globals.is_empty ()) ++ && function->globals.is_empty () ++ && current_mode != STRUCT_REORDER_FIELDS) + { + delete function; + functions.ordered_remove (i); +@@ -3746,24 +5064,31 @@ ipa_struct_reorg::prune_escaped_types (void) + + /* Prune types that escape, all references to those types + will have been removed in the above loops. */ +- for (unsigned i = 0; i < types.length ();) ++ /* The escape type is not deleted in STRUCT_REORDER_FIELDS, ++ Then the type that contains the escaped type fields ++ can find complete information. */ ++ if (current_mode != STRUCT_REORDER_FIELDS) + { +- srtype *type = types[i]; +- if (type->has_escaped ()) ++ for (unsigned i = 0; i < types.length ();) + { +- /* All references to this type should have been removed now. */ +- delete type; +- types.ordered_remove (i); ++ srtype *type = types[i]; ++ if (type->has_escaped ()) ++ { ++ /* All references to this type should have been removed now. */ ++ delete type; ++ types.ordered_remove (i); ++ } ++ else ++ i++; + } +- else +- i++; + } + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- fprintf (dump_file, "all types (after pruning):\n"); ++ fprintf (dump_file, "==============================================\n\n"); ++ fprintf (dump_file, "========= all types (after pruning): =========\n\n"); + dump_types (dump_file); +- fprintf (dump_file, "all functions (after pruning):\n"); ++ fprintf (dump_file, "======== all functions (after pruning): ========\n"); + dump_functions (dump_file); + } + } +@@ -3790,6 +5115,26 @@ ipa_struct_reorg::create_new_types (void) + for (unsigned i = 0; i < types.length (); i++) + newtypes += types[i]->create_new_type (); + ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ for (unsigned i = 0; i < types.length (); i++) ++ { ++ auto_vec *fields = fields_to_finish.get (types[i]->type); ++ if (fields) ++ { ++ for (unsigned j = 0; j < fields->length (); j++) ++ { ++ tree field = (*fields)[j]; ++ TREE_TYPE (field) ++ = reconstruct_complex_type (TREE_TYPE (field), ++ types[i]->newtype[0]); ++ } ++ } ++ } ++ for (unsigned i = 0; i < types.length (); i++) ++ layout_type (types[i]->newtype[0]); ++ } ++ + if (dump_file) + { + if (newtypes) +@@ -3894,7 +5239,8 @@ ipa_struct_reorg::create_new_args (cgraph_node *new_node) + char *name = NULL; + if (tname) + { +- name = concat (tname, ".reorg.0", NULL); ++ name = concat (tname, current_mode == STRUCT_REORDER_FIELDS ++ ? ".reorder.0" : ".reorg.0", NULL); + new_name = get_identifier (name); + free (name); + } +@@ -3980,9 +5326,10 @@ ipa_struct_reorg::create_new_functions (void) + fprintf (dump_file, "\n"); + } + statistics_counter_event (NULL, "Create new function", 1); +- new_node = node->create_version_clone_with_body (vNULL, NULL, +- NULL, NULL, NULL, +- "struct_reorg"); ++ new_node = node->create_version_clone_with_body ( ++ vNULL, NULL, NULL, NULL, NULL, ++ current_mode == STRUCT_REORDER_FIELDS ++ ? "struct_reorder" : "struct_reorg"); + new_node->can_change_signature = node->can_change_signature; + new_node->make_local (); + f->newnode = new_node; +@@ -4026,6 +5373,7 @@ ipa_struct_reorg::rewrite_expr (tree expr, + srfield *f; + bool realpart, imagpart; + bool address; ++ bool escape_from_base = false; + + tree newbase[max_split]; + memset (newexpr, 0, sizeof (tree[max_split])); +@@ -4043,8 +5391,8 @@ ipa_struct_reorg::rewrite_expr (tree expr, + return true; + } + +- if (!get_type_field (expr, base, indirect, t, f, +- realpart, imagpart, address)) ++ if (!get_type_field (expr, base, indirect, t, f, realpart, imagpart, ++ address, escape_from_base)) + return false; + + /* If the type is not changed, then just return false. */ +@@ -4107,7 +5455,38 @@ ipa_struct_reorg::rewrite_expr (tree expr, + if (address) + newbase1 = build_fold_addr_expr (newbase1); + if (indirect) +- newbase1 = build_simple_mem_ref (newbase1); ++ { ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ /* Supports the MEM_REF offset. ++ _1 = MEM[(struct arc *)ap_1 + 72B].flow; ++ Old rewrite: _1 = ap.reorder.0_8->flow; ++ New rewrite: _1 ++ = MEM[(struct arc.reorder.0 *)ap.reorder.0_8 + 64B].flow; ++ */ ++ HOST_WIDE_INT offset_tmp = 0; ++ HOST_WIDE_INT mem_offset = 0; ++ bool realpart_tmp = false; ++ bool imagpart_tmp = false; ++ tree accesstype_tmp = NULL_TREE; ++ tree num = NULL_TREE; ++ get_ref_base_and_offset (expr, offset_tmp, ++ realpart_tmp, imagpart_tmp, ++ accesstype_tmp, &num); ++ ++ tree ptype = TREE_TYPE (newbase1); ++ /* Specify the correct size for the multi-layer pointer. */ ++ tree size = isptrptr (ptype) ? TYPE_SIZE_UNIT (ptype) : ++ TYPE_SIZE_UNIT (inner_type (ptype)); ++ mem_offset = (num != NULL) ++ ? TREE_INT_CST_LOW (num) * tree_to_shwi (size) ++ : 0; ++ newbase1 = build2 (MEM_REF, TREE_TYPE (ptype), newbase1, ++ build_int_cst (ptype, mem_offset)); ++ } ++ else ++ newbase1 = build_simple_mem_ref (newbase1); ++ } + newexpr[i] = build3 (COMPONENT_REF, TREE_TYPE (f->newfield[i]), + newbase1, f->newfield[i], NULL_TREE); + if (imagpart) +@@ -4151,8 +5530,12 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + return remove; + } + +- if (gimple_assign_rhs_code (stmt) == EQ_EXPR +- || gimple_assign_rhs_code (stmt) == NE_EXPR) ++ if ((current_mode != STRUCT_REORDER_FIELDS ++ && (gimple_assign_rhs_code (stmt) == EQ_EXPR ++ || gimple_assign_rhs_code (stmt) == NE_EXPR)) ++ || (current_mode == STRUCT_REORDER_FIELDS ++ && (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) ++ == tcc_comparison))) + { + tree rhs1 = gimple_assign_rhs1 (stmt); + tree rhs2 = gimple_assign_rhs2 (stmt); +@@ -4160,6 +5543,10 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + tree newrhs2[max_split]; + tree_code rhs_code = gimple_assign_rhs_code (stmt); + tree_code code = rhs_code == EQ_EXPR ? BIT_AND_EXPR : BIT_IOR_EXPR; ++ if (current_mode == STRUCT_REORDER_FIELDS ++ && rhs_code != EQ_EXPR && rhs_code != NE_EXPR) ++ code = rhs_code; ++ + if (!rewrite_lhs_rhs (rhs1, rhs2, newrhs1, newrhs2)) + return false; + tree newexpr = NULL_TREE; +@@ -4201,20 +5588,78 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + internal_error ( + "The rhs of pointer is not a multiplicate and it slips through"); + +- num = gimplify_build1 (gsi, NOP_EXPR, sizetype, num); ++ /* Add the judgment of num, support for POINTER_DIFF_EXPR. ++ _6 = _4 + _5; ++ _5 = (long unsigned int) _3; ++ _3 = _1 - old_2. */ ++ if (current_mode != STRUCT_REORDER_FIELDS ++ || (current_mode == STRUCT_REORDER_FIELDS && (num != NULL))) ++ num = gimplify_build1 (gsi, NOP_EXPR, sizetype, num); + for (unsigned i = 0; i < max_split && newlhs[i]; i++) + { + gimple *new_stmt; + +- tree newsize = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (newlhs[i]))); +- newsize = gimplify_build2 (gsi, MULT_EXPR, sizetype, num, newsize); +- new_stmt = gimple_build_assign (newlhs[i], POINTER_PLUS_EXPR, +- newrhs[i], newsize); ++ if (num != NULL) ++ { ++ tree newsize = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (newlhs[i]))); ++ newsize = gimplify_build2 (gsi, MULT_EXPR, sizetype, num, ++ newsize); ++ new_stmt = gimple_build_assign (newlhs[i], POINTER_PLUS_EXPR, ++ newrhs[i], newsize); ++ } ++ else ++ new_stmt = gimple_build_assign (newlhs[i], POINTER_PLUS_EXPR, ++ newrhs[i], rhs2); + gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); + remove = true; + } + return remove; + } ++ ++ /* Support POINTER_DIFF_EXPR rewriting. */ ++ if (current_mode == STRUCT_REORDER_FIELDS ++ && gimple_assign_rhs_code (stmt) == POINTER_DIFF_EXPR) ++ { ++ tree rhs1 = gimple_assign_rhs1 (stmt); ++ tree rhs2 = gimple_assign_rhs2 (stmt); ++ tree newrhs1[max_split]; ++ tree newrhs2[max_split]; ++ ++ bool r1 = rewrite_expr (rhs1, newrhs1); ++ bool r2 = rewrite_expr (rhs2, newrhs2); ++ ++ if (r1 != r2) ++ { ++ /* Handle NULL pointer specially. */ ++ if (r1 && !r2 && integer_zerop (rhs2)) ++ { ++ r2 = true; ++ for (unsigned i = 0; i < max_split && newrhs1[i]; i++) ++ newrhs2[i] = fold_convert (TREE_TYPE (newrhs1[i]), rhs2); ++ } ++ else if (r2 && !r1 && integer_zerop (rhs1)) ++ { ++ r1 = true; ++ for (unsigned i = 0; i < max_split && newrhs2[i]; i++) ++ newrhs1[i] = fold_convert (TREE_TYPE (newrhs2[i]), rhs1); ++ } ++ else ++ return false; ++ } ++ else if (!r1 && !r2) ++ return false; ++ ++ /* The two operands always have pointer/reference type. */ ++ for (unsigned i = 0; i < max_split && newrhs1[i] && newrhs2[i]; i++) ++ { ++ gimple_assign_set_rhs1 (stmt, newrhs1[i]); ++ gimple_assign_set_rhs2 (stmt, newrhs2[i]); ++ update_stmt (stmt); ++ } ++ remove = false; ++ return remove; ++ } ++ + if (gimple_assign_rhs_class (stmt) == GIMPLE_SINGLE_RHS) + { + tree lhs = gimple_assign_lhs (stmt); +@@ -4222,9 +5667,8 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- fprintf (dump_file, "rewriting statement:\n"); ++ fprintf (dump_file, "\nrewriting stamtenet:\n"); + print_gimple_stmt (dump_file, stmt, 0); +- fprintf (dump_file, "\n"); + } + tree newlhs[max_split]; + tree newrhs[max_split]; +@@ -4271,7 +5715,7 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) + if (!decl || !decl->type) + return false; + srtype *type = decl->type; +- tree num = allocate_size (type, stmt); ++ tree num = allocate_size (type, decl, stmt); + gcc_assert (num); + memset (newrhs1, 0, sizeof (newrhs1)); + +@@ -4291,7 +5735,10 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) + /* Go through each new lhs. */ + for (unsigned i = 0; i < max_split && decl->newdecl[i]; i++) + { +- tree newsize = TYPE_SIZE_UNIT (type->type); ++ /* Specify the correct size for the multi-layer pointer. */ ++ tree newsize = isptrptr (decl->orig_type) ++ ? TYPE_SIZE_UNIT (decl->orig_type) ++ : TYPE_SIZE_UNIT (type->newtype[i]); + gimple *g; + /* Every allocation except for calloc needs + the size multiplied out. */ +@@ -4352,6 +5799,23 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) + gcc_assert (node); + srfunction *f = find_function (node); + ++ /* Add a safe func mechanism. */ ++ if (current_mode == STRUCT_REORDER_FIELDS && f && f->is_safe_func) ++ { ++ tree expr = gimple_call_arg (stmt, 0); ++ tree newexpr[max_split]; ++ if (!rewrite_expr (expr, newexpr)) ++ return false; ++ ++ if (newexpr[1] == NULL) ++ { ++ gimple_call_set_arg (stmt, 0, newexpr[0]); ++ update_stmt (stmt); ++ return false; ++ } ++ return false; ++ } ++ + /* Did not find the function or had not cloned it return saying don't + change the function call. */ + if (!f || !f->newf) +@@ -4437,7 +5901,7 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) + && TREE_CODE (gimple_vdef (new_stmt)) == SSA_NAME) + SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt; + +- gsi_replace (gsi, new_stmt, false); ++ gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT); + + /* We need to defer cleaning EH info on the new statement to + fixup-cfg. We may not have dominator information at this point +@@ -4450,7 +5914,7 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) + add_stmt_to_eh_lp (new_stmt, lp_nr); + } + +- return false; ++ return true; + } + + /* Rewrite the conditional statement STMT. Return TRUE if the +@@ -4462,50 +5926,52 @@ ipa_struct_reorg::rewrite_cond (gcond *stmt, gimple_stmt_iterator *gsi) + tree_code rhs_code = gimple_cond_code (stmt); + + /* Handle only equals or not equals conditionals. */ +- if (rhs_code != EQ_EXPR +- && rhs_code != NE_EXPR) ++ if ((current_mode != STRUCT_REORDER_FIELDS ++ && (rhs_code != EQ_EXPR && rhs_code != NE_EXPR)) ++ || (current_mode == STRUCT_REORDER_FIELDS ++ && TREE_CODE_CLASS (rhs_code) != tcc_comparison)) + return false; +- tree rhs1 = gimple_cond_lhs (stmt); +- tree rhs2 = gimple_cond_rhs (stmt); ++ tree lhs = gimple_cond_lhs (stmt); ++ tree rhs = gimple_cond_rhs (stmt); + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- fprintf (dump_file, "COND: Rewriting\n"); ++ fprintf (dump_file, "\nCOND: Rewriting\n"); + print_gimple_stmt (dump_file, stmt, 0); ++ print_generic_expr (dump_file, lhs); + fprintf (dump_file, "\n"); +- print_generic_expr (dump_file, rhs1); +- fprintf (dump_file, "\n"); +- print_generic_expr (dump_file, rhs2); ++ print_generic_expr (dump_file, rhs); + fprintf (dump_file, "\n"); + } + +- tree newrhs1[max_split]; +- tree newrhs2[max_split]; +- tree_code code = rhs_code == EQ_EXPR ? BIT_AND_EXPR : BIT_IOR_EXPR; +- if (!rewrite_lhs_rhs (rhs1, rhs2, newrhs1, newrhs2)) ++ tree newlhs[max_split] = {}; ++ tree newrhs[max_split] = {}; ++ if (!rewrite_lhs_rhs (lhs, rhs, newlhs, newrhs)) + { + if (dump_file && (dump_flags & TDF_DETAILS)) +- fprintf (dump_file, "\nDid nothing to statement.\n"); ++ fprintf (dump_file, "Did nothing to statement.\n"); + return false; + } + +- tree newexpr = NULL_TREE; +- for (unsigned i = 0; i < max_split && newrhs1[i]; i++) ++ /* Old rewrite: if (x_1 != 0B) ++ -> _1 = x.reorder.0_1 != 0B; if (_1 != 1) ++ The logic is incorrect. ++ New rewrite: if (x_1 != 0B) ++ -> if (x.reorder.0_1 != 0B); */ ++ for (unsigned i = 0; i < max_split && (newlhs[i] || newrhs[i]); i++) + { +- tree expr = gimplify_build2 (gsi, rhs_code, boolean_type_node, +- newrhs1[i], newrhs2[i]); +- if (!newexpr) +- newexpr = expr; +- else +- newexpr = gimplify_build2 (gsi, code, boolean_type_node, +- newexpr, expr); +- } +- +- if (newexpr) +- { +- gimple_cond_set_lhs (stmt, newexpr); +- gimple_cond_set_rhs (stmt, boolean_true_node); ++ if (newlhs[i]) ++ gimple_cond_set_lhs (stmt, newlhs[i]); ++ if (newrhs[i]) ++ gimple_cond_set_rhs (stmt, newrhs[i]); + update_stmt (stmt); ++ ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "replaced with:\n"); ++ print_gimple_stmt (dump_file, stmt, 0); ++ fprintf (dump_file, "\n"); ++ } + } + return false; + } +@@ -4516,6 +5982,9 @@ ipa_struct_reorg::rewrite_cond (gcond *stmt, gimple_stmt_iterator *gsi) + bool + ipa_struct_reorg::rewrite_debug (gimple *stmt, gimple_stmt_iterator *) + { ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ /* Delete debug gimple now. */ ++ return true; + bool remove = false; + if (gimple_debug_bind_p (stmt)) + { +@@ -4568,7 +6037,7 @@ ipa_struct_reorg::rewrite_phi (gphi *phi) + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- fprintf (dump_file, "\nrewriting PHI:"); ++ fprintf (dump_file, "\nrewriting PHI:\n"); + print_gimple_stmt (dump_file, phi, 0); + } + +@@ -4579,7 +6048,15 @@ ipa_struct_reorg::rewrite_phi (gphi *phi) + { + tree newrhs[max_split]; + phi_arg_d rhs = *gimple_phi_arg (phi, i); +- rewrite_expr (rhs.def, newrhs); ++ ++ /* Handling the NULL phi Node. */ ++ bool r = rewrite_expr (rhs.def, newrhs); ++ if (!r && integer_zerop (rhs.def)) ++ { ++ for (unsigned i = 0; i < max_split && newlhs[i]; i++) ++ newrhs[i] = fold_convert (TREE_TYPE (newlhs[i]), rhs.def); ++ } ++ + for (unsigned j = 0; j < max_split && newlhs[j]; j++) + { + SET_PHI_ARG_DEF (newphi[j], i, newrhs[j]); +@@ -4590,7 +6067,7 @@ ipa_struct_reorg::rewrite_phi (gphi *phi) + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- fprintf (dump_file, "\ninto\n:"); ++ fprintf (dump_file, "into:\n"); + for (unsigned i = 0; i < max_split && newlhs[i]; i++) + { + print_gimple_stmt (dump_file, newphi[i], 0); +@@ -4663,12 +6140,59 @@ ipa_struct_reorg::rewrite_functions (void) + /* Create new types, if we did not create any new types, + then don't rewrite any accesses. */ + if (!create_new_types ()) +- return 0; ++ { ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ for (unsigned i = 0; i < functions.length (); i++) ++ { ++ srfunction *f = functions[i]; ++ cgraph_node *node = f->node; ++ push_cfun (DECL_STRUCT_FUNCTION (node->decl)); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\nNo rewrite:\n"); ++ dump_function_to_file (current_function_decl, dump_file, ++ dump_flags | TDF_VOPS); ++ } ++ pop_cfun (); ++ } ++ } ++ return 0; ++ } ++ ++ if (current_mode == STRUCT_REORDER_FIELDS && dump_file) ++ { ++ fprintf (dump_file, "=========== all created newtypes: ===========\n\n"); ++ dump_newtypes (dump_file); ++ } + + if (functions.length ()) + { + retval = TODO_remove_functions; + create_new_functions (); ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ prune_escaped_types (); ++ } ++ } ++ ++ if (current_mode == STRUCT_REORDER_FIELDS) ++ { ++ for (unsigned i = 0; i < functions.length (); i++) ++ { ++ srfunction *f = functions[i]; ++ cgraph_node *node = f->node; ++ push_cfun (DECL_STRUCT_FUNCTION (node->decl)); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "==== Before create decls: %dth_%s ====\n\n", ++ i, f->node->name ()); ++ if (current_function_decl) ++ dump_function_to_file (current_function_decl, dump_file, ++ dump_flags | TDF_VOPS); ++ } ++ pop_cfun (); ++ } + } + + create_new_decls (); +@@ -4691,9 +6215,12 @@ ipa_struct_reorg::rewrite_functions (void) + + if (dump_file && (dump_flags & TDF_DETAILS)) + { +- fprintf (dump_file, "\nBefore rewrite:\n"); ++ fprintf (dump_file, "\nBefore rewrite: %dth_%s\n", ++ i, f->node->name ()); + dump_function_to_file (current_function_decl, dump_file, + dump_flags | TDF_VOPS); ++ fprintf (dump_file, "\n======== Start to rewrite: %dth_%s ========\n", ++ i, f->node->name ()); + } + FOR_EACH_BB_FN (bb, cfun) + { +@@ -4761,9 +6288,10 @@ ipa_struct_reorg::rewrite_functions (void) + + free_dominance_info (CDI_DOMINATORS); + +- if (dump_file && (dump_flags & TDF_DETAILS)) ++ if (dump_file) + { +- fprintf (dump_file, "\nAfter rewrite:\n"); ++ fprintf (dump_file, "\nAfter rewrite: %dth_%s\n", ++ i, f->node->name ()); + dump_function_to_file (current_function_decl, dump_file, + dump_flags | TDF_VOPS); + } +@@ -4809,16 +6337,21 @@ ipa_struct_reorg::execute (enum srmode mode) + { + unsigned int ret = 0; + +- if (mode == NORMAL) ++ if (dump_file) ++ fprintf (dump_file, "\n\n====== ipa_struct_reorg level %d ======\n\n", ++ mode); ++ ++ if (mode == NORMAL || mode == STRUCT_REORDER_FIELDS) + { +- current_mode = NORMAL; +- /* FIXME: If there is a top-level inline-asm, ++ current_mode = mode; ++ /* If there is a top-level inline-asm, + the pass immediately returns. */ + if (symtab->first_asm_symbol ()) + return 0; + record_accesses (); + prune_escaped_types (); +- analyze_types (); ++ if (current_mode == NORMAL) ++ analyze_types (); + + ret = rewrite_functions (); + } +@@ -4881,7 +6414,55 @@ pass_ipa_struct_reorg::gate (function *) + && flag_lto_partition == LTO_PARTITION_ONE + /* Only enable struct optimizations in C since other + languages' grammar forbid. */ +- && lang_c_p ()); ++ && lang_c_p () ++ /* Only enable struct optimizations in lto or whole_program. */ ++ && (in_lto_p || flag_whole_program)); ++} ++ ++const pass_data pass_data_ipa_reorder_fields = ++{ ++ SIMPLE_IPA_PASS, // type ++ "reorder_fields", // name ++ OPTGROUP_NONE, // optinfo_flags ++ TV_IPA_REORDER_FIELDS, // tv_id ++ 0, // properties_required ++ 0, // properties_provided ++ 0, // properties_destroyed ++ 0, // todo_flags_start ++ 0, // todo_flags_finish ++}; ++ ++class pass_ipa_reorder_fields : public simple_ipa_opt_pass ++{ ++public: ++ pass_ipa_reorder_fields (gcc::context *ctxt) ++ : simple_ipa_opt_pass (pass_data_ipa_reorder_fields, ctxt) ++ {} ++ ++ /* opt_pass methods: */ ++ virtual bool gate (function *); ++ virtual unsigned int execute (function *) ++ { ++ unsigned int ret = 0; ++ ret = ipa_struct_reorg ().execute (STRUCT_REORDER_FIELDS); ++ return ret; ++ } ++ ++}; // class pass_ipa_reorder_fields ++ ++bool ++pass_ipa_reorder_fields::gate (function *) ++{ ++ return (optimize >= 3 ++ && flag_ipa_reorder_fields ++ /* Don't bother doing anything if the program has errors. */ ++ && !seen_error () ++ && flag_lto_partition == LTO_PARTITION_ONE ++ /* Only enable struct optimizations in C since other ++ languages' grammar forbid. */ ++ && lang_c_p () ++ /* Only enable struct optimizations in lto or whole_program. */ ++ && (in_lto_p || flag_whole_program)); + } + + } // anon namespace +@@ -4891,4 +6472,10 @@ simple_ipa_opt_pass * + make_pass_ipa_struct_reorg (gcc::context *ctxt) + { + return new pass_ipa_struct_reorg (ctxt); +-} +\ No newline at end of file ++} ++ ++simple_ipa_opt_pass * ++make_pass_ipa_reorder_fields (gcc::context *ctxt) ++{ ++ return new pass_ipa_reorder_fields (ctxt); ++} +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.h b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +index ef7f4c780..6f85adeb4 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.h ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +@@ -68,12 +68,14 @@ struct srfunction + auto_vec args; + auto_vec globals; + auto_vec_del decls; +- srdecl *record_decl (srtype *, tree, int arg); ++ srdecl *record_decl (srtype *, tree, int arg, tree orig_type = NULL); + + srfunction *old; + cgraph_node *newnode; + srfunction *newf; + ++ bool is_safe_func; ++ + // Constructors + srfunction (cgraph_node *n); + +@@ -184,6 +186,11 @@ struct srfield + void create_new_fields (tree newtype[max_split], + tree newfields[max_split], + tree newlast[max_split]); ++ void reorder_fields (tree newfields[max_split], tree newlast[max_split], ++ tree &field); ++ void create_new_reorder_fields (tree newtype[max_split], ++ tree newfields[max_split], ++ tree newlast[max_split]); + }; + + struct sraccess +@@ -221,8 +228,11 @@ struct srdecl + + tree newdecl[max_split]; + ++ /* Auxiliary record complete original type information of the void* type. */ ++ tree orig_type; ++ + // Constructors +- srdecl (srtype *type, tree decl, int argumentnum = -1); ++ srdecl (srtype *type, tree decl, int argumentnum = -1, tree orgtype = NULL); + + // Methods + void dump (FILE *file); +diff --git a/gcc/passes.def b/gcc/passes.def +index 9692066e4..bdc835b87 100644 +--- a/gcc/passes.def ++++ b/gcc/passes.def +@@ -178,6 +178,7 @@ along with GCC; see the file COPYING3. If not see + compiled unit. */ + INSERT_PASSES_AFTER (all_late_ipa_passes) + NEXT_PASS (pass_ipa_pta); ++ NEXT_PASS (pass_ipa_reorder_fields); + /* FIXME: this should be a normal IP pass. */ + NEXT_PASS (pass_ipa_struct_reorg); + NEXT_PASS (pass_omp_simd_clone); +diff --git a/gcc/symbol-summary.h b/gcc/symbol-summary.h +index 3fe64047c..6fa529eee 100644 +--- a/gcc/symbol-summary.h ++++ b/gcc/symbol-summary.h +@@ -105,7 +105,7 @@ protected: + { + /* In structure optimizatons, we call new to ensure that + the allocated memory is initialized to 0. */ +- if (flag_ipa_struct_reorg) ++ if (flag_ipa_struct_reorg || flag_ipa_reorder_fields) + return is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T () + : new T (); + +@@ -122,7 +122,7 @@ protected: + ggc_delete (item); + else + { +- if (flag_ipa_struct_reorg) ++ if (flag_ipa_struct_reorg || flag_ipa_reorder_fields) + delete item; + else + m_allocator.remove (item); +diff --git a/gcc/testsuite/gcc.dg/struct/rf_DTE_struct_instance_field.c b/gcc/testsuite/gcc.dg/struct/rf_DTE_struct_instance_field.c +new file mode 100644 +index 000000000..b95be2dab +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_DTE_struct_instance_field.c +@@ -0,0 +1,75 @@ ++// escape_instance_field, "Type escapes via a field of instance". ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++typedef struct network ++{ ++ arc_p arcs; ++ arc_p sorted_arcs; ++ int x; ++ node_p nodes; ++ node_p stop_nodes; ++ node_t node; ++} network_t; ++ ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++ network_t* net_add; ++ node_t node; ++}; ++ ++ ++const int MAX = 100; ++ ++/* let it escape_array, "Type is used in an array [not handled yet]". */ ++network_t* net[2]; ++ ++int ++main () ++{ ++ net[0] = (network_t*) calloc (1, sizeof(network_t)); ++ net[0]->arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ ++ /* Contains an escape type and has structure instance field. */ ++ net[0]->arcs->node = net[0]->node; ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform." "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_DTE_verify.c b/gcc/testsuite/gcc.dg/struct/rf_DTE_verify.c +new file mode 100644 +index 000000000..3d243313b +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_DTE_verify.c +@@ -0,0 +1,94 @@ ++// Verify in escape_dependent_type_escapes, ++// the multi-layer dereference is rewriting correctly,and the memory access ++// is correct. ++ ++// release ++// escape_dependent_type_escapes, ++// "Type uses a type which escapes or is used by a type which escapes" ++// avoid escape_cast_another_ptr, "Type escapes a cast to a different pointer" ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs; ++ arc_p sorted_arcs; ++ int x; ++ node_p nodes; ++ node_p stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++ network_t* net_add; ++}; ++ ++ ++const int MAX = 100; ++ ++/* let it escape_array, "Type is used in an array [not handled yet]". */ ++network_t* net[2]; ++arc_p stop_arcs = NULL; ++ ++int ++main () ++{ ++ net[0] = (network_t*) calloc (1, sizeof(network_t)); ++ net[0]->arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ stop_arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ ++ net[0]->arcs->id = 100; ++ ++ for (unsigned i = 0; i < 3; i++) ++ { ++ net[0]->arcs->id = net[0]->arcs->id + 2; ++ stop_arcs->cost = net[0]->arcs->id / 2; ++ stop_arcs->net_add = net[0]; ++ printf("stop_arcs->cost = %ld\n", stop_arcs->cost); ++ net[0]->arcs++; ++ stop_arcs++; ++ } ++ ++ if( net[1] != 0 && stop_arcs != 0) ++ { ++ return -1; ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_check_ptr_layers_bug.c b/gcc/testsuite/gcc.dg/struct/rf_check_ptr_layers_bug.c +new file mode 100644 +index 000000000..faaf1e3a5 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_check_ptr_layers_bug.c +@@ -0,0 +1,24 @@ ++/* check_ptr_layers bugfix.*/ ++/* { dg-do compile } */ ++struct { ++ char a; ++} **b = 0, *e = 0; ++long c; ++char d = 9; ++int f; ++ ++void g() ++{ ++ for (; f;) ++ if (c) ++ (*e).a++; ++ if (!d) ++ for (;;) ++ b &&c; ++} ++int ++main() ++{ ++ g(); ++} ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c b/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c +new file mode 100644 +index 000000000..886706ae9 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c +@@ -0,0 +1,82 @@ ++// bugfix: ++// Common members do not need to reconstruct. ++// Otherwise, eg:int* -> int** and void* -> void**. ++/* { dg-do compile } */ ++ ++#include ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t* cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t** org_cost; ++}; ++ ++struct a ++{ ++ int t; ++ int t1; ++}; ++ ++__attribute__((noinline)) int ++f(int i, int j) ++{ ++ struct a *t = NULL; ++ struct a t1 = {i, j}; ++ t = &t1; ++ auto int g(void) __attribute__((noinline)); ++ int g(void) ++ { ++ return t->t + t->t1; ++ } ++ return g(); ++} ++ ++arc_t **ap = NULL; ++const int MAX = 100; ++ ++int ++main() ++{ ++ if (f(1, 2) != 3) ++ { ++ abort (); ++ } ++ ap = (arc_t**) malloc(MAX * sizeof(arc_t*)); ++ (*ap)[0].id = 300; ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_create_new_func_bug.c b/gcc/testsuite/gcc.dg/struct/rf_create_new_func_bug.c +new file mode 100644 +index 000000000..f3785f392 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_create_new_func_bug.c +@@ -0,0 +1,56 @@ ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++#define MallocOrDie(x) sre_malloc((x)) ++ ++struct gki_elem { ++ char *key; ++ int idx; ++ struct gki_elem *nxt; ++}; ++ ++typedef struct { ++ struct gki_elem **table; ++ ++ int primelevel; ++ int nhash; ++ int nkeys; ++} GKI; ++ ++void ++Die(char *format, ...) ++{ ++ exit(1); ++} ++ ++void * ++sre_malloc(size_t size) ++{ ++ void *ptr; ++ ++ if ((ptr = malloc (size)) == NULL) ++ { ++ Die("malloc of %ld bytes failed", size); ++ } ++ return ptr; ++} ++ ++ ++__attribute__((noinline)) int ++GKIStoreKey(GKI *hash, char *key) ++{ ++ hash->table[0] = MallocOrDie(sizeof(struct gki_elem)); ++} ++ ++int ++main () ++{ ++ GKI *hash; ++ char *key; ++ GKIStoreKey(hash, key); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ele_minus_verify.c b/gcc/testsuite/gcc.dg/struct/rf_ele_minus_verify.c +new file mode 100644 +index 000000000..1415d759a +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_ele_minus_verify.c +@@ -0,0 +1,60 @@ ++// verify newarc[cmp-1].flow ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++const int MAX = 100; ++arc_p ap = NULL; ++ ++int ++main () ++{ ++ ap = (arc_p) calloc(MAX, sizeof(arc_t)); ++ printf("%d\n", ap[0].id); ++ for (int i = 1; i < MAX; i++) ++ { ++ ap[i-1].id = 500; ++ } ++ printf("%d\n", ap[0].id); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_escape_by_base.c b/gcc/testsuite/gcc.dg/struct/rf_escape_by_base.c +new file mode 100644 +index 000000000..003da0b57 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_escape_by_base.c +@@ -0,0 +1,83 @@ ++// release type is used by a type which escapes. ++// avoid escape_cast_another_ptr, "Type escapes a cast to a different pointer" ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs; ++ arc_p sorted_arcs; ++ int x; ++ node_p nodes; ++ node_p stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++const int MAX = 100; ++network_t* net = NULL; ++arc_p stop_arcs = NULL; ++int cnt = 0; ++ ++int ++main () ++{ ++ net = (network_t*) calloc (1, 20); ++ net->arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ stop_arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ if(!(net->arcs)) ++ { ++ return -1; ++ } ++ ++ for( int i = 0; i < MAX; i++, net->arcs = stop_arcs) ++ { ++ cnt++; ++ } ++ ++ net = (network_t*) calloc (1, 20); ++ if( !(net->arcs) ) ++ { ++ return -1; ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_external_func_types.c b/gcc/testsuite/gcc.dg/struct/rf_external_func_types.c +new file mode 100644 +index 000000000..84a34f241 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_external_func_types.c +@@ -0,0 +1,69 @@ ++/* { dg-do compile } */ ++/* { dg-additional-options "-shared" } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ int x; ++ arc_p arcs, sorted_arcs; ++ node_p nodes, stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++extern int bcf_sr_add_reader (network_t *); ++extern int bcf_hdr_dup (arc_p); ++ ++int ++test () ++{ ++ network_t *net = (network_t *) calloc (1, 20); ++ ++ if (!bcf_sr_add_reader(net)) ++ printf("error"); ++ arc_p arc = net->nodes->basic_arc; ++ if(!bcf_hdr_dup(arc)) ++ { ++ return -1; ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform." "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_int_cast_ptr.c b/gcc/testsuite/gcc.dg/struct/rf_int_cast_ptr.c +new file mode 100644 +index 000000000..10dcf098c +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_int_cast_ptr.c +@@ -0,0 +1,72 @@ ++// release escape_cast_another_ptr, "Type escapes a cast to a different pointer" ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++typedef int cmp_t(const void *, const void *); ++ ++__attribute__((noinline)) void ++spec_qsort(void *a, cmp_t *cmp) ++{ ++ char *pb = NULL; ++ while (cmp(pb, a)) ++ { ++ pb += 1; ++ } ++} ++ ++static int arc_compare( arc_t **a1, int a2 ) ++{ ++ if( (*a1)->id < a2 ) ++ { ++ return -1; ++ } ++ return 1; ++} ++ ++int ++main() ++{ ++ spec_qsort(NULL, (int (*)(const void *, const void *))arc_compare); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_mem_ref_offset.c b/gcc/testsuite/gcc.dg/struct/rf_mem_ref_offset.c +new file mode 100644 +index 000000000..8d1a9a114 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_mem_ref_offset.c +@@ -0,0 +1,58 @@ ++/* Supports the MEM_REF offset. ++ _1 = MEM[(struct arc *)ap_4 + 72B].flow; ++ Old rewrite:_1 = ap.reorder.0_8->flow; ++ New rewrite:_1 = MEM[(struct arc.reorder.0 *)ap.reorder.0_8 + 64B].flow. */ ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++int ++main () ++{ ++ const int MAX = 100; ++ /* A similar scenario can be reproduced only by using local variables. */ ++ arc_p ap = NULL; ++ ap = (arc_p) calloc(MAX, sizeof(arc_t)); ++ printf("%d\n", ap[1].flow); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_mul_layer_ptr_record_bug.c b/gcc/testsuite/gcc.dg/struct/rf_mul_layer_ptr_record_bug.c +new file mode 100644 +index 000000000..23765fc56 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_mul_layer_ptr_record_bug.c +@@ -0,0 +1,30 @@ ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct T_HASH_ENTRY ++{ ++ unsigned int hash; ++ unsigned int klen; ++ char *key; ++} iHashEntry; ++ ++typedef struct T_HASH ++{ ++ unsigned int size; ++ unsigned int fill; ++ unsigned int keys; ++ ++ iHashEntry **array; ++} uHash; ++ ++uHash *retval; ++ ++int ++main() { ++ retval->array = (iHashEntry **)calloc(sizeof(iHashEntry *), retval->size); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_pass_conflict.c b/gcc/testsuite/gcc.dg/struct/rf_pass_conflict.c +new file mode 100644 +index 000000000..54e737ee8 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_pass_conflict.c +@@ -0,0 +1,109 @@ ++// For testing: ++/* ++Compile options: gcc -O3 -g ++-flto -flto-partition=one -fipa-reorder-fields -fipa-struct-reorg ++-v -save-temps -fdump-ipa-all-details test.c -o test ++ ++in COMPLETE_STRUCT_RELAYOUT pass: ++N type: struct node.reorder.0 new = "Type escapes a cast to a different pointer" ++copy$head_26 = test_arc.reorder.0_49->head; ++ ++type : struct arc.reorder.0(1599) { ++fields = { ++field (5382) {type = cost_t} ++field (5383) {type = struct node.reorder.0 *} // but node has escaped. ++field (5384) {type = struct node.reorder.0 *} ++field (5386) {type = struct arc.reorder.0 *} ++field (5387) {type = struct arc.reorder.0 *} ++field (5388) {type = flow_t} ++field (5389) {type = cost_t} ++field (5381) {type = int} ++field (5385) {type = short int} ++} ++ ++// The types of the two types are inconsistent after the rewriting. ++newarc_2(D)->tail = tail_1(D); ++vs ++struct_reorder.0_61(D)->tail = tail_1(D); ++*/ ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs; ++ arc_p sorted_arcs; ++ int x; ++ node_p nodes; ++ node_p stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++__attribute__((noinline)) void ++replace_weaker_arc( arc_t *newarc, node_t *tail, node_t *head) ++{ ++ printf("test"); ++} ++ ++__attribute__((noinline)) int64_t ++switch_arcs(arc_t** deleted_arcs, arc_t* arcnew) ++{ ++ int64_t count = 0; ++ arc_t *test_arc, copy; ++ ++ if (!test_arc->ident) ++ { ++ copy = *test_arc; ++ count++; ++ *test_arc = arcnew[0]; ++ replace_weaker_arc(arcnew, NULL, NULL); ++ } ++ return count; ++} ++ ++int ++main () ++{ ++ switch_arcs(NULL, NULL); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr2void_lto.c b/gcc/testsuite/gcc.dg/struct/rf_ptr2void_lto.c +new file mode 100644 +index 000000000..2ae46fb31 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr2void_lto.c +@@ -0,0 +1,87 @@ ++// escape_cast_void, "Type escapes a cast to/from void*" ++// stop_393 = net.stop_nodes; void *stop; ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs, sorted_arcs; ++ int x; ++ node_p nodes, stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++const int MAX = 100; ++network_t* net = NULL; ++int cnt = 0; ++ ++__attribute__((noinline)) int ++primal_feasible (network_t *net) ++{ ++ void* stop; ++ node_t *node; ++ ++ node = net->nodes; ++ stop = (void *)net->stop_nodes; ++ for( node++; node < (node_t *)stop; node++ ) ++ { ++ printf( "PRIMAL NETWORK SIMPLEX: " ); ++ } ++ return 0; ++} ++ ++int ++main () ++{ ++ net = (network_t*) calloc (1, 20); ++ net->nodes = calloc (MAX, sizeof (node_t)); ++ net->stop_nodes = calloc (MAX, sizeof (node_t)); ++ cnt = primal_feasible( net ); ++ ++ net = (network_t*) calloc (1, 20); ++ if( !(net->arcs) ) ++ { ++ return -1; ++ } ++ return cnt; ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform." "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_diff.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_diff.c +new file mode 100644 +index 000000000..3a3c10b70 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_diff.c +@@ -0,0 +1,71 @@ ++// support POINTER_DIFF_EXPR & NOP_EXPR to avoid ++// escape_unhandled_rewrite, "Type escapes via a unhandled rewrite stmt" ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs; ++ arc_p sorted_arcs; ++ int x; ++ node_p nodes; ++ node_p stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++int ++main () ++{ ++ arc_t *old_arcs; ++ node_t *node; ++ node_t *stop; ++ size_t off; ++ network_t* net; ++ ++ for( ; node->number < stop->number; node++ ) ++ { ++ off = node->basic_arc - old_arcs; ++ node->basic_arc = (arc_t *)(net->arcs + off); ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 3" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_negate_expr.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_negate_expr.c +new file mode 100644 +index 000000000..7b7d110df +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_negate_expr.c +@@ -0,0 +1,55 @@ ++// support NEGATE_EXPR rewriting ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++int ++main () ++{ ++ int64_t susp = 0; ++ const int MAX = 100; ++ arc_p ap = (arc_p) calloc(MAX, sizeof(arc_t)); ++ ap -= susp; ++ printf("%d\n", ap[1].flow); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_offset.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_offset.c +new file mode 100644 +index 000000000..317aafa5f +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_offset.c +@@ -0,0 +1,34 @@ ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++struct node ++{ ++ struct node *left, *right; ++ double a, b, c, d, e, f; ++} ++*a; ++int b, c; ++void ++CreateNode (struct node **p1) ++{ ++ *p1 = calloc (10, sizeof (struct node)); ++} ++ ++int ++main () ++{ ++ a->left = 0; ++ struct node *t = a; ++ CreateNode (&t->right); ++ ++ struct node p = *a; ++ b = 1; ++ if (p.left) ++ b = 0; ++ if (b) ++ printf (" Tree.\n"); ++} ++ ++/* { dg-final { scan-ipa-dump "No structures to transform." "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c +new file mode 100644 +index 000000000..01a33f669 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c +@@ -0,0 +1,55 @@ ++// release escape_ptr_ptr, "Type is used in a pointer to a pointer [not handled yet]"; ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++const int MAX = 100; ++arc_t **ap = NULL; ++ ++int ++main () ++{ ++ ap = (arc_t**) malloc(MAX * sizeof(arc_t*)); ++ (*ap)[0].id = 300; ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr_ptr.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr_ptr.c +new file mode 100644 +index 000000000..a38556533 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr_ptr.c +@@ -0,0 +1,58 @@ ++// release escape_ptr_ptr, "Type is used in a pointer to a pointer [not handled yet]" ++ ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++const int MAX = 100; ++arc_p **ap; ++ ++ ++int ++main () ++{ ++ ap = (arc_p**) calloc(MAX, sizeof(arc_p*)); ++ (**ap)[0].id = 500; ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_rescusive_type.c b/gcc/testsuite/gcc.dg/struct/rf_rescusive_type.c +new file mode 100644 +index 000000000..5c17ee528 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_rescusive_type.c +@@ -0,0 +1,57 @@ ++// release escape_rescusive_type, "Recusive type" ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++const int MAX = 100; ++arc_p ap = NULL; ++ ++int ++main () ++{ ++ ap = (arc_p) calloc (MAX, sizeof (arc_t)); ++ ap[0].id = 100; ++ ap[0].head = (node_p) calloc (MAX, sizeof (node_t)); ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_rewrite_assign_more_cmp.c b/gcc/testsuite/gcc.dg/struct/rf_rewrite_assign_more_cmp.c +new file mode 100644 +index 000000000..710517ee9 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_rewrite_assign_more_cmp.c +@@ -0,0 +1,65 @@ ++// support more gimple assign rhs code ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++__attribute__((noinline)) int ++compare(arc_p p1, arc_p p2) ++{ ++ return p1 < p2; ++} ++ ++int n = 0; ++int m = 0; ++ ++int ++main () ++{ ++ scanf ("%d %d", &n, &m); ++ arc_p p = calloc (10, sizeof (struct arc)); ++ if (compare (&p[n], &p[m])) ++ { ++ printf ("ss!"); ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_bug.c b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_bug.c +new file mode 100644 +index 000000000..6ed0a5d2d +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_bug.c +@@ -0,0 +1,72 @@ ++// rewrite_cond bugfix; ++/* ++if (iterator_600 != 0B) ++old rewrite: _1369 = iterator.reorder.0_1249 != 0B; if (_1369 != 1) ++new rewrite: if (iterator.reorder.0_1249 != 0B) ++*/ ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct list_elem ++{ ++ arc_t* arc; ++ struct list_elem* next; ++}list_elem; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++int i = 0; ++ ++int ++main () ++{ ++ register list_elem *first_list_elem; ++ register list_elem* iterator; ++ iterator = first_list_elem->next; ++ while (iterator) ++ { ++ iterator = iterator->next; ++ i++; ++ } ++ ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 3" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c +new file mode 100644 +index 000000000..5a2dd964f +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c +@@ -0,0 +1,58 @@ ++// support if (_150 >= _154) ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++int ++main() ++{ ++ arc_p **ap = (arc_p**) malloc(1 * sizeof(arc_p*)); ++ arc_p **arcs_pointer_sorted = (arc_p**) malloc(1 * sizeof(arc_p*)); ++ arcs_pointer_sorted[0] = (arc_p*) calloc (1, sizeof(arc_p)); ++ ++ if (arcs_pointer_sorted >= ap) ++ { ++ return -1; ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_rewrite_phi_bug.c b/gcc/testsuite/gcc.dg/struct/rf_rewrite_phi_bug.c +new file mode 100644 +index 000000000..faa90b42d +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_rewrite_phi_bug.c +@@ -0,0 +1,81 @@ ++/* ++Exclude the rewriting error caused by ++first_list_elem = (list_elem *)NULL; ++rewriting PHI:first_list_elem_700 = PHI <0B(144), 0B(146)> ++into: ++first_list_elem.reorder.0_55 = PHI <(144), (146)> ++*/ ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct list_elem ++{ ++ arc_t* arc; ++ struct list_elem* next; ++}list_elem; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout, firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail, head; ++ short ident; ++ arc_p nextout, nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++const int MAX = 100; ++ ++list_elem* new_list_elem; ++list_elem* first_list_elem; ++ ++int ++main () ++{ ++ int i = 0; ++ list_elem *first_list_elem; ++ list_elem *new_list_elem; ++ arc_t *arcout; ++ for( ; i < MAX && arcout->ident == -1; i++); ++ ++ first_list_elem = (list_elem *)NULL; ++ for( ; i < MAX; i++) ++ { ++ new_list_elem = (list_elem*) calloc(1, sizeof(list_elem)); ++ new_list_elem->next = first_list_elem; ++ first_list_elem = new_list_elem; ++ } ++ if (first_list_elem != 0) ++ { ++ return -1; ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 3" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_shwi.c b/gcc/testsuite/gcc.dg/struct/rf_shwi.c +new file mode 100644 +index 000000000..2bb326ff2 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_shwi.c +@@ -0,0 +1,23 @@ ++/* { dg-do compile } */ ++ ++struct foo {int dx; long dy; int dz; }; ++struct goo {long offset; struct foo* pfoo; }; ++ ++void* func (long); ++ ++__attribute__((used)) static void ++test(struct goo* g) ++{ ++ void* pvoid; ++ struct foo* f; ++ ++ for (f = g->pfoo; f->dx; f++) ++ { ++ if (f->dy) ++ break; ++ } ++ f--; ++ ++ pvoid = func(f->dz + g->offset); ++ return; ++} +diff --git a/gcc/testsuite/gcc.dg/struct/rf_visible_func.c b/gcc/testsuite/gcc.dg/struct/rf_visible_func.c +new file mode 100644 +index 000000000..8f2da99cc +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_visible_func.c +@@ -0,0 +1,92 @@ ++// release escape_visible_function, "Type escapes via expternally visible function call" ++// compile options: gcc -O3 -fno-inline -fwhole-program ++// -flto-partition=one -fipa-struct-reorg arc_compare.c -fdump-ipa-all -S -v ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++__attribute__((noinline)) static int ++arc_compare( arc_t **a1, arc_t **a2 ) ++{ ++ if( (*a1)->flow > (*a2)->flow ) ++ { ++ return 1; ++ } ++ if( (*a1)->flow < (*a2)->flow ) ++ { ++ return -1; ++ } ++ if( (*a1)->id < (*a2)->id ) ++ { ++ return -1; ++ } ++ ++ return 1; ++} ++ ++__attribute__((noinline)) void ++spec_qsort(void *array, int nitems, int size, ++ int (*cmp)(const void*,const void*)) ++{ ++ for (int i = 0; i < nitems - 1; i++) ++ { ++ if (cmp (array , array)) ++ { ++ printf ("CMP 1\n"); ++ } ++ else ++ { ++ printf ("CMP 2\n"); ++ } ++ } ++} ++ ++typedef int cmp_t(const void *, const void *); ++ ++int ++main () ++{ ++ void *p = calloc (100, sizeof (arc_t **)); ++ spec_qsort (p, 100, 0, (int (*)(const void *, const void *))arc_compare); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_void_ptr_param_func.c b/gcc/testsuite/gcc.dg/struct/rf_void_ptr_param_func.c +new file mode 100644 +index 000000000..723142c59 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/rf_void_ptr_param_func.c +@@ -0,0 +1,54 @@ ++// Add a safe func mechanism. ++// avoid escape_unkown_field, "Type escapes via an unkown field accessed" ++// avoid escape_cast_void, "Type escapes a cast to/from void*" eg: GIMPLE_NOP ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++void ++__attribute__((noinline)) spec_qsort (void *a, size_t es) ++{ ++ char *pa; ++ char *pb; ++ int cmp_result; ++ ++ while ((*(arc_t **)a)->id < *((int *)a)) ++ { ++ if (cmp_result == 0) ++ { ++ spec_qsort (a, es); ++ pa = (char *)a - es; ++ a += es; ++ *(long *)pb = *(long *)pa; ++ } ++ else ++ { ++ a -= pa - pb; ++ } ++ } ++} ++ ++int ++main() ++{ ++ arc_p **arcs_pointer_sorted; ++ spec_qsort (arcs_pointer_sorted[0], sizeof (arc_p)); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "reorder_fields" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +index 43913104e..5a476e8f9 100644 +--- a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp ++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +@@ -27,8 +27,21 @@ set STRUCT_REORG_TORTURE_OPTIONS [list \ + + set-torture-options $STRUCT_REORG_TORTURE_OPTIONS {{}} + +-gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.c]] \ ++# -fipa-struct-reorg ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/wo_*.c]] \ ++ "" "-fipa-struct-reorg -fdump-ipa-all -flto-partition=one -fwhole-program" ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/w_*.c]] \ + "" "-fipa-struct-reorg -fdump-ipa-all -flto-partition=one -fwhole-program" ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/struct_reorg*.cpp]] \ ++ "" "-fipa-struct-reorg -fdump-ipa-all -flto-partition=one -fwhole-program" ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/sr_*.c]] \ ++ "" "-fipa-struct-reorg -fdump-ipa-all -flto-partition=one -fwhole-program" ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/csr_*.c]] \ ++ "" "-fipa-struct-reorg -fdump-ipa-all -flto-partition=one -fwhole-program" ++ ++# -fipa-reorder-fields ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/rf_*.c]] \ ++ "" "-fipa-reorder-fields -fdump-ipa-all -flto-partition=one -fwhole-program" + + # All done. + torture-finish +diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-1.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-1.c +index 6565fe8dd..23444fe8b 100644 +--- a/gcc/testsuite/gcc.dg/struct/struct_reorg-1.c ++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-1.c +@@ -1,5 +1,5 @@ + // { dg-do compile } +-// { dg-options "-O3 -flto-partition=one -fipa-struct-reorg -fdump-ipa-all" } ++// { dg-options "-O3 -flto-partition=one -fipa-struct-reorg -fdump-ipa-all -fwhole-program" } + + struct a + { +@@ -21,4 +21,10 @@ int g(void) + return b->t; + } + ++int main() ++{ ++ f (); ++ return g (); ++} ++ + /* { dg-final { scan-ipa-dump "No structures to transform." "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-3.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-3.c +index 5864ad46f..2d1f95c99 100644 +--- a/gcc/testsuite/gcc.dg/struct/struct_reorg-3.c ++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-3.c +@@ -1,5 +1,5 @@ + // { dg-do compile } +-// { dg-options "-O3 -flto-partition=one -fipa-struct-reorg -fdump-ipa-all" } ++// { dg-options "-O3 -flto-partition=one -fipa-struct-reorg -fdump-ipa-all -fwhole-program" } + + #include + typedef struct { +@@ -10,7 +10,7 @@ typedef struct { + compile_stack_elt_t *stack; + unsigned size; + } compile_stack_type; +-void f (const char *p, const char *pend, int c) ++__attribute__((noinline)) void f (const char *p, const char *pend, int c) + { + compile_stack_type compile_stack; + while (p != pend) +@@ -20,4 +20,9 @@ void f (const char *p, const char *pend, int c) + * sizeof (compile_stack_elt_t)); + } + ++int main() ++{ ++ f (NULL, NULL, 1); ++} ++ + /* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +diff --git a/gcc/timevar.def b/gcc/timevar.def +index 98a5a490f..2b27c858a 100644 +--- a/gcc/timevar.def ++++ b/gcc/timevar.def +@@ -80,6 +80,7 @@ DEFTIMEVAR (TV_IPA_CONSTANT_PROP , "ipa cp") + DEFTIMEVAR (TV_IPA_INLINING , "ipa inlining heuristics") + DEFTIMEVAR (TV_IPA_FNSPLIT , "ipa function splitting") + DEFTIMEVAR (TV_IPA_COMDATS , "ipa comdats") ++DEFTIMEVAR (TV_IPA_REORDER_FIELDS , "ipa struct reorder fields optimization") + DEFTIMEVAR (TV_IPA_STRUCT_REORG , "ipa struct reorg optimization") + DEFTIMEVAR (TV_IPA_OPT , "ipa various optimizations") + DEFTIMEVAR (TV_IPA_LTO_DECOMPRESS , "lto stream decompression") +diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h +index 56898e019..a9ec8ed21 100644 +--- a/gcc/tree-pass.h ++++ b/gcc/tree-pass.h +@@ -527,6 +527,7 @@ extern ipa_opt_pass_d *make_pass_ipa_devirt (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_odr (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_pure_const (gcc::context *ctxt); ++extern simple_ipa_opt_pass *make_pass_ipa_reorder_fields (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_ipa_struct_reorg (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_ipa_pta (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_ipa_tm (gcc::context *ctxt); +-- +2.33.0 + diff --git a/0022-DFE-Add-Dead-Field-Elimination-in-Struct-Reorg.patch b/0022-DFE-Add-Dead-Field-Elimination-in-Struct-Reorg.patch new file mode 100644 index 0000000000000000000000000000000000000000..2822078bdc2a0e6968bc751ef2449e49752c2c09 --- /dev/null +++ b/0022-DFE-Add-Dead-Field-Elimination-in-Struct-Reorg.patch @@ -0,0 +1,1753 @@ +From 9d03b0a7741915e3a0172d60b9c21bf5abbda89e Mon Sep 17 00:00:00 2001 +From: Mingchuan Wu +Date: Mon, 28 Aug 2023 18:11:02 +0800 +Subject: [PATCH 22/22] [DFE] Add Dead Field Elimination in Struct-Reorg. + +We can transform gimple to eliminate fields that are never read +and replace the dead fields in stmt by creating a new ssa. +--- + gcc/common.opt | 4 + + gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 240 +++++++++++++++++- + gcc/ipa-struct-reorg/ipa-struct-reorg.h | 8 + + gcc/opts.cc | 17 ++ + gcc/testsuite/gcc.dg/struct/dfe_DTE_verify.c | 86 +++++++ + .../gcc.dg/struct/dfe_ele_minus_verify.c | 60 +++++ + .../gcc.dg/struct/dfe_extr_board_init.c | 77 ++++++ + gcc/testsuite/gcc.dg/struct/dfe_extr_claw.c | 84 ++++++ + gcc/testsuite/gcc.dg/struct/dfe_extr_dtrace.c | 56 ++++ + gcc/testsuite/gcc.dg/struct/dfe_extr_gc.c | 162 ++++++++++++ + gcc/testsuite/gcc.dg/struct/dfe_extr_hpsa.c | 126 +++++++++ + .../gcc.dg/struct/dfe_extr_mv_udc_core.c | 82 ++++++ + .../gcc.dg/struct/dfe_extr_tcp_usrreq.c | 58 +++++ + .../gcc.dg/struct/dfe_extr_ui_main.c | 61 +++++ + .../gcc.dg/struct/dfe_mem_ref_offset.c | 58 +++++ + .../struct/dfe_mul_layer_ptr_record_bug.c | 30 +++ + gcc/testsuite/gcc.dg/struct/dfe_ptr_diff.c | 71 ++++++ + .../gcc.dg/struct/dfe_ptr_negate_expr.c | 55 ++++ + gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c | 55 ++++ + gcc/testsuite/gcc.dg/struct/struct-reorg.exp | 4 + + .../struct/wo_prof_escape_replace_type.c | 49 ++++ + 21 files changed, 1436 insertions(+), 7 deletions(-) + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_DTE_verify.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_ele_minus_verify.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_extr_board_init.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_extr_claw.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_extr_dtrace.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_extr_gc.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_extr_hpsa.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_extr_mv_udc_core.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_extr_tcp_usrreq.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_extr_ui_main.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_mem_ref_offset.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_mul_layer_ptr_record_bug.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_ptr_diff.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_ptr_negate_expr.c + create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c + create mode 100644 gcc/testsuite/gcc.dg/struct/wo_prof_escape_replace_type.c + +diff --git a/gcc/common.opt b/gcc/common.opt +index 14633c821..8bb735551 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1988,6 +1988,10 @@ fipa-struct-reorg + Common Var(flag_ipa_struct_reorg) Init(0) Optimization + Perform structure layout optimizations. + ++fipa-struct-reorg= ++Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 3) ++-fipa-struct-reorg=[0,1,2,3] adding none, struct-reorg, reorder-fields, dfe optimizations. ++ + fipa-vrp + Common Var(flag_ipa_vrp) Optimization + Perform IPA Value Range Propagation. +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +index 3e5f9538b..eac5fac7e 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +@@ -87,6 +87,7 @@ along with GCC; see the file COPYING3. If not see + #include "tree-pretty-print.h" + #include "gimple-pretty-print.h" + #include "gimple-iterator.h" ++#include "gimple-walk.h" + #include "cfg.h" + #include "ssa.h" + #include "tree-dfa.h" +@@ -268,10 +269,43 @@ enum srmode + STRUCT_REORDER_FIELDS + }; + ++/* Enum the struct layout optimize level, ++ which should be the same as the option -fstruct-reorg=. */ ++ ++enum struct_layout_opt_level ++{ ++ NONE = 0, ++ STRUCT_REORG, ++ STRUCT_REORDER_FIELDS_SLO, ++ DEAD_FIELD_ELIMINATION ++}; ++ + static bool is_result_of_mult (tree arg, tree *num, tree struct_size); + static bool isptrptr (tree type); ++void get_base (tree &base, tree expr); + + srmode current_mode; ++hash_map replace_type_map; ++ ++/* Return true if one of these types is created by struct-reorg. */ ++ ++static bool ++is_replace_type (tree type1, tree type2) ++{ ++ if (replace_type_map.is_empty ()) ++ return false; ++ if (type1 == NULL_TREE || type2 == NULL_TREE) ++ return false; ++ tree *type_value = replace_type_map.get (type1); ++ if (type_value) ++ if (types_compatible_p (*type_value, type2)) ++ return true; ++ type_value = replace_type_map.get (type2); ++ if (type_value) ++ if (types_compatible_p (*type_value, type1)) ++ return true; ++ return false; ++} + + } // anon namespace + +@@ -353,7 +387,8 @@ srfield::srfield (tree field, srtype *base) + fielddecl (field), + base (base), + type (NULL), +- clusternum (0) ++ clusternum (0), ++ field_access (EMPTY_FIELD) + { + for (int i = 0; i < max_split; i++) + newfield[i] = NULL_TREE; +@@ -392,6 +427,25 @@ srtype::srtype (tree type) + } + } + ++/* Check it if all fields in the RECORD_TYPE are referenced. */ ++ ++bool ++srtype::has_dead_field (void) ++{ ++ bool may_dfe = false; ++ srfield *this_field; ++ unsigned i; ++ FOR_EACH_VEC_ELT (fields, i, this_field) ++ { ++ if (!(this_field->field_access & READ_FIELD)) ++ { ++ may_dfe = true; ++ break; ++ } ++ } ++ return may_dfe; ++} ++ + /* Mark the type as escaping type E at statement STMT. */ + + void +@@ -595,7 +649,17 @@ srtype::analyze (void) + into 2 different structures. In future we intend to add profile + info and/or static heuristics to differentiate splitting process. */ + if (fields.length () == 2) +- fields[1]->clusternum = 1; ++ { ++ /* Currently, when the replacement structure type exists, ++ we only split the replacement structure. */ ++ for (hash_map::iterator it = replace_type_map.begin (); ++ it != replace_type_map.end (); ++it) ++ { ++ if (types_compatible_p ((*it).second, this->type)) ++ return; ++ } ++ fields[1]->clusternum = 1; ++ } + + /* Otherwise we do nothing. */ + if (fields.length () >= 3) +@@ -838,6 +902,10 @@ srtype::create_new_type (void) + for (unsigned i = 0; i < fields.length (); i++) + { + srfield *f = fields[i]; ++ if (current_mode == STRUCT_REORDER_FIELDS ++ && struct_layout_optimize_level >= DEAD_FIELD_ELIMINATION ++ && !(f->field_access & READ_FIELD)) ++ continue; + f->create_new_fields (newtype, newfields, newlast); + } + +@@ -856,6 +924,16 @@ srtype::create_new_type (void) + + warn_padded = save_warn_padded; + ++ if (current_mode == STRUCT_REORDER_FIELDS ++ && replace_type_map.get (this->newtype[0]) == NULL) ++ replace_type_map.put (this->newtype[0], this->type); ++ if (dump_file) ++ { ++ if (current_mode == STRUCT_REORDER_FIELDS ++ && struct_layout_optimize_level >= DEAD_FIELD_ELIMINATION ++ && has_dead_field ()) ++ fprintf (dump_file, "Dead field elimination.\n"); ++ } + if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "Created %d types:\n", maxclusters); +@@ -1269,6 +1347,7 @@ public: + void maybe_mark_or_record_other_side (tree side, tree other, gimple *stmt); + + unsigned execute_struct_relayout (void); ++ bool remove_dead_field_stmt (tree lhs); + }; + + struct ipa_struct_relayout +@@ -3057,6 +3136,119 @@ ipa_struct_reorg::find_vars (gimple *stmt) + } + } + ++static HOST_WIDE_INT ++get_offset (tree op, HOST_WIDE_INT offset) ++{ ++ switch (TREE_CODE (op)) ++ { ++ case COMPONENT_REF: ++ { ++ return int_byte_position (TREE_OPERAND (op, 1)); ++ } ++ case MEM_REF: ++ { ++ return tree_to_uhwi (TREE_OPERAND (op, 1)); ++ } ++ default: ++ return offset; ++ } ++ return offset; ++} ++ ++/* Record field access. */ ++static void ++record_field_access (tree type, HOST_WIDE_INT offset, ++ unsigned access, void *data) ++{ ++ srtype *this_srtype = ((ipa_struct_reorg *)data)->find_type (type); ++ if (this_srtype == NULL) ++ return; ++ srfield *this_srfield = this_srtype->find_field (offset); ++ if (this_srfield == NULL) ++ return; ++ ++ this_srfield->field_access |= access; ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "record field access %d:", access); ++ print_generic_expr (dump_file, type); ++ fprintf (dump_file, " field:"); ++ print_generic_expr (dump_file, this_srfield->fielddecl); ++ fprintf (dump_file, "\n"); ++ } ++ return; ++ ++} ++ ++/* Update field_access in srfield. */ ++ ++static void ++update_field_access (tree node, tree op, unsigned access, void *data) ++{ ++ HOST_WIDE_INT offset = 0; ++ offset = get_offset (op, offset); ++ tree node_type = inner_type (TREE_TYPE (node)); ++ record_field_access (node_type, offset, access, data); ++ tree base = node; ++ get_base (base, node); ++ tree base_type = inner_type (TREE_TYPE (base)); ++ if (!types_compatible_p (base_type, node_type)) ++ { ++ record_field_access (base_type, get_offset (node, offset), ++ access, data); ++ } ++ return; ++} ++ ++/* A callback for walk_stmt_load_store_ops to visit store. */ ++ ++static bool ++find_field_p_store (gimple *stmt ATTRIBUTE_UNUSED, ++ tree node, tree op, void *data) ++{ ++ update_field_access (node, op, WRITE_FIELD, data); ++ ++ return false; ++} ++ ++/* A callback for walk_stmt_load_store_ops to visit load. */ ++ ++static bool ++find_field_p_load (gimple *stmt ATTRIBUTE_UNUSED, ++ tree node, tree op, void *data) ++{ ++ update_field_access (node, op, READ_FIELD, data); ++ ++ return false; ++} ++ ++/* Determine whether the stmt should be deleted. */ ++ ++bool ++ipa_struct_reorg::remove_dead_field_stmt (tree lhs) ++{ ++ tree base = NULL_TREE; ++ bool indirect = false; ++ srtype *t = NULL; ++ srfield *f = NULL; ++ bool realpart = false; ++ bool imagpart = false; ++ bool address = false; ++ bool escape_from_base = false; ++ if (!get_type_field (lhs, base, indirect, t, f, realpart, imagpart, ++ address, escape_from_base)) ++ return false; ++ if (t ==NULL) ++ return false; ++ if (t->newtype[0] == t->type) ++ return false; ++ if (f == NULL) ++ return false; ++ if (f->newfield[0] == NULL) ++ return true; ++ return false; ++} ++ + /* Maybe record access of statement for further analaysis. */ + + void +@@ -3078,6 +3270,13 @@ ipa_struct_reorg::maybe_record_stmt (cgraph_node *node, gimple *stmt) + default: + break; + } ++ if (current_mode == STRUCT_REORDER_FIELDS ++ && struct_layout_optimize_level >= DEAD_FIELD_ELIMINATION) ++ { ++ /* Look for loads and stores. */ ++ walk_stmt_load_store_ops (stmt, this, find_field_p_load, ++ find_field_p_store); ++ } + } + + /* Calculate the multiplier. */ +@@ -3368,8 +3567,11 @@ ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other, + } + else if (type != d->type) + { +- type->mark_escape (escape_cast_another_ptr, stmt); +- d->type->mark_escape (escape_cast_another_ptr, stmt); ++ if (!is_replace_type (d->type->type, type->type)) ++ { ++ type->mark_escape (escape_cast_another_ptr, stmt); ++ d->type->mark_escape (escape_cast_another_ptr, stmt); ++ } + } + /* x_1 = y.x_nodes; void *x; + Directly mark the structure pointer type assigned +@@ -3949,8 +4151,9 @@ ipa_struct_reorg::check_type_and_push (tree newdecl, srdecl *decl, + } + /* If we have a non void* or a decl (which is hard to track), + then mark the type as escaping. */ +- if (!VOID_POINTER_P (TREE_TYPE (newdecl)) +- || DECL_P (newdecl)) ++ if (replace_type_map.get (type->type) == NULL ++ && (!VOID_POINTER_P (TREE_TYPE (newdecl)) ++ || DECL_P (newdecl))) + { + if (dump_file && (dump_flags & TDF_DETAILS)) + { +@@ -4216,7 +4419,9 @@ ipa_struct_reorg::check_other_side (srdecl *decl, tree other, gimple *stmt, + } + + srtype *t1 = find_type (inner_type (t)); +- if (t1 == type) ++ /* In the other side check, escape mark is added ++ when the replacement struct type exists. */ ++ if (t1 == type || is_replace_type (inner_type (t), type->type)) + { + /* In Complete Struct Relayout, if lhs type is the same + as rhs type, we could return without any harm. */ +@@ -5513,6 +5718,27 @@ bool + ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + { + bool remove = false; ++ ++ if (current_mode == STRUCT_REORDER_FIELDS ++ && struct_layout_optimize_level >= DEAD_FIELD_ELIMINATION ++ && remove_dead_field_stmt (gimple_assign_lhs (stmt))) ++ { ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "\n rewriting statement (remove): \n"); ++ print_gimple_stmt (dump_file, stmt, 0); ++ } ++ /* Replace the dead field in stmt by creating a dummy ssa. */ ++ tree dummy_ssa = make_ssa_name (TREE_TYPE (gimple_assign_lhs (stmt))); ++ gimple_assign_set_lhs (stmt, dummy_ssa); ++ update_stmt (stmt); ++ if (dump_file && (dump_flags & TDF_DETAILS)) ++ { ++ fprintf (dump_file, "To: \n"); ++ print_gimple_stmt (dump_file, stmt, 0); ++ } ++ } ++ + if (gimple_clobber_p (stmt)) + { + tree lhs = gimple_assign_lhs (stmt); +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.h b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +index 6f85adeb4..719f7b308 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.h ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.h +@@ -143,6 +143,7 @@ public: + + bool create_new_type (void); + void analyze (void); ++ bool has_dead_field (void); + void mark_escape (escape_type, gimple *stmt); + bool has_escaped (void) + { +@@ -164,6 +165,12 @@ public: + } + }; + ++/* Bitflags used for determining if a field ++ is never accessed, read or written. */ ++const unsigned EMPTY_FIELD = 0x0u; ++const unsigned READ_FIELD = 0x01u; ++const unsigned WRITE_FIELD = 0x02u; ++ + struct srfield + { + unsigned HOST_WIDE_INT offset; +@@ -175,6 +182,7 @@ struct srfield + unsigned clusternum; + + tree newfield[max_split]; ++ unsigned field_access; /* FIELD_DECL -> bitflag (use for dfe). */ + + // Constructors + srfield (tree field, srtype *base); +diff --git a/gcc/opts.cc b/gcc/opts.cc +index c3cc2c169..b868d189e 100644 +--- a/gcc/opts.cc ++++ b/gcc/opts.cc +@@ -2957,6 +2957,23 @@ common_handle_option (struct gcc_options *opts, + SET_OPTION_IF_UNSET (opts, opts_set, flag_profile_correction, value); + break; + ++ case OPT_fipa_struct_reorg_: ++ /* No break here - do -fipa-struct-reorg processing. */ ++ /* FALLTHRU. */ ++ case OPT_fipa_struct_reorg: ++ opts->x_flag_ipa_struct_reorg = value; ++ if (value && !opts->x_struct_layout_optimize_level) ++ { ++ /* Using the -fipa-struct-reorg option is equivalent to using ++ -fipa-struct-reorg=1. */ ++ opts->x_struct_layout_optimize_level = 1; ++ } ++ break; ++ ++ case OPT_fipa_reorder_fields: ++ SET_OPTION_IF_UNSET (opts, opts_set, flag_ipa_struct_reorg, value); ++ break; ++ + case OPT_fprofile_generate_: + opts->x_profile_data_prefix = xstrdup (arg); + value = true; +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_DTE_verify.c b/gcc/testsuite/gcc.dg/struct/dfe_DTE_verify.c +new file mode 100644 +index 000000000..0c9e384c4 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_DTE_verify.c +@@ -0,0 +1,86 @@ ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs; ++ arc_p sorted_arcs; ++ int x; ++ node_p nodes; ++ node_p stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++ network_t* net_add; ++}; ++ ++ ++const int MAX = 100; ++ ++/* let it escape_array, "Type is used in an array [not handled yet]". */ ++network_t* net[2]; ++arc_p stop_arcs = NULL; ++ ++int ++main () ++{ ++ net[0] = (network_t*) calloc (1, sizeof(network_t)); ++ net[0]->arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ stop_arcs = (arc_p) calloc (MAX, sizeof (arc_t)); ++ ++ net[0]->arcs->id = 100; ++ ++ for (unsigned i = 0; i < 3; i++) ++ { ++ net[0]->arcs->id = net[0]->arcs->id + 2; ++ stop_arcs->cost = net[0]->arcs->id / 2; ++ stop_arcs->net_add = net[0]; ++ printf("stop_arcs->cost = %ld\n", stop_arcs->cost); ++ net[0]->arcs++; ++ stop_arcs++; ++ } ++ ++ if( net[1] != 0 && stop_arcs != 0) ++ { ++ return -1; ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_ele_minus_verify.c b/gcc/testsuite/gcc.dg/struct/dfe_ele_minus_verify.c +new file mode 100644 +index 000000000..717fcc386 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_ele_minus_verify.c +@@ -0,0 +1,60 @@ ++// verify newarc[cmp-1].flow ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++const int MAX = 100; ++arc_p ap = NULL; ++ ++int ++main () ++{ ++ ap = (arc_p) calloc(MAX, sizeof(arc_t)); ++ printf("%d\n", ap[0].id); ++ for (int i = 1; i < MAX; i++) ++ { ++ ap[i-1].id = 500; ++ } ++ printf("%d\n", ap[0].id); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_board_init.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_board_init.c +new file mode 100644 +index 000000000..7723c240b +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_board_init.c +@@ -0,0 +1,77 @@ ++/* { dg-do compile} */ ++ ++#define NULL ((void*)0) ++typedef unsigned long size_t; ++typedef long intptr_t; ++typedef unsigned long uintptr_t; ++typedef long scalar_t__; ++typedef int bool; ++#define false 0 ++#define true 1 ++ ++typedef struct TYPE_5__ TYPE_2__; ++typedef struct TYPE_4__ TYPE_1__; ++ ++struct TYPE_4__ ++{ ++ int Pin; ++ int Pull; ++ int Mode; ++ int Speed; ++}; ++ ++struct TYPE_5__ ++{ ++ int MEMRMP; ++}; ++typedef TYPE_1__ GPIO_InitTypeDef; ++ ++int BT_RST_PIN; ++int BT_RST_PORT; ++int CONN_POS10_PIN; ++int CONN_POS10_PORT; ++int GPIO_HIGH (int, int); ++int GPIO_MODE_INPUT; ++int GPIO_MODE_OUTPUT_PP; ++int GPIO_NOPULL; ++int GPIO_PULLUP; ++int GPIO_SPEED_FREQ_LOW; ++int HAL_GPIO_Init (int, TYPE_1__ *); ++scalar_t__ IS_GPIO_RESET (int, int); ++TYPE_2__ *SYSCFG; ++int __HAL_RCC_GPIOB_CLK_ENABLE (); ++int __HAL_RCC_GPIOC_CLK_ENABLE (); ++ ++__attribute__((used)) static void ++LBF_DFU_If_Needed (void) ++{ ++ GPIO_InitTypeDef GPIO_InitStruct; ++ __HAL_RCC_GPIOC_CLK_ENABLE (); ++ GPIO_InitStruct.Mode = GPIO_MODE_OUTPUT_PP; ++ GPIO_InitStruct.Pull = GPIO_NOPULL; ++ GPIO_InitStruct.Speed = GPIO_SPEED_FREQ_LOW; ++ GPIO_InitStruct.Pin = BT_RST_PIN; ++ HAL_GPIO_Init (BT_RST_PORT, &GPIO_InitStruct); ++ ++ GPIO_HIGH (BT_RST_PORT, BT_RST_PIN); ++ __HAL_RCC_GPIOB_CLK_ENABLE (); ++ GPIO_InitStruct.Mode = GPIO_MODE_INPUT; ++ GPIO_InitStruct.Pull = GPIO_PULLUP; ++ GPIO_InitStruct.Pin = CONN_POS10_PIN; ++ HAL_GPIO_Init (CONN_POS10_PORT, &GPIO_InitStruct); ++ ++ if (IS_GPIO_RESET (CONN_POS10_PORT, CONN_POS10_PIN)) ++ { ++ SYSCFG->MEMRMP = 0x00000001; ++ asm ( ++ "LDR R0, =0x000000\n\t" ++ "LDR SP, [R0, #0]\n\t" ++ ); ++ asm ( ++ "LDR R0, [R0, #0]\n\t" ++ "BX R0\n\t" ++ ); ++ } ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_claw.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_claw.c +new file mode 100644 +index 000000000..a1feac966 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_claw.c +@@ -0,0 +1,84 @@ ++/* { dg-do compile} */ ++ ++#define NULL ((void*)0) ++typedef unsigned long size_t; ++typedef long intptr_t; ++typedef unsigned long uintptr_t; ++typedef long scalar_t__; ++typedef int bool; ++#define false 0 ++#define true 1 ++ ++typedef struct TYPE_2__ TYPE_1__; ++ ++struct net_device ++{ ++ struct claw_privbk* ml_priv; ++}; ++struct clawctl ++{ ++ int linkid; ++}; ++struct claw_privbk ++{ ++ int system_validate_comp; ++ TYPE_1__* p_env; ++ int ctl_bk; ++}; ++typedef int __u8; ++struct TYPE_2__ ++{ ++ scalar_t__ packing; ++ int api_type; ++}; ++ ++int CLAW_DBF_TEXT (int, int, char*); ++int CONNECTION_REQUEST; ++int HOST_APPL_NAME; ++scalar_t__ PACKING_ASK; ++scalar_t__ PACK_SEND; ++int WS_APPL_NAME_IP_NAME; ++int WS_APPL_NAME_PACKED; ++int claw_send_control (struct net_device*, int, int, int, int, int, int); ++int setup; ++ ++__attribute__((noinline)) int ++claw_send_control (struct net_device* net, int a, int b, int c, int d, int e, ++ int f) ++{ ++ return net->ml_priv->system_validate_comp + a + b + c + d + f; ++} ++ ++__attribute__((used)) static int ++claw_snd_conn_req (struct net_device *dev, __u8 link) ++{ ++ int rc; ++ struct claw_privbk *privptr = dev->ml_priv; ++ struct clawctl *p_ctl; ++ CLAW_DBF_TEXT (2, setup, "snd_conn"); ++ rc = 1; ++ p_ctl = (struct clawctl *)&privptr->ctl_bk; ++ p_ctl->linkid = link; ++ if (privptr->system_validate_comp == 0x00) ++ { ++ return rc; ++ } ++ if (privptr->p_env->packing == PACKING_ASK) ++ { ++ rc = claw_send_control (dev, CONNECTION_REQUEST, 0, 0, 0, ++ WS_APPL_NAME_PACKED, WS_APPL_NAME_PACKED); ++ } ++ if (privptr->p_env->packing == PACK_SEND) ++ { ++ rc = claw_send_control (dev, CONNECTION_REQUEST, 0, 0, 0, ++ WS_APPL_NAME_IP_NAME, WS_APPL_NAME_IP_NAME); ++ } ++ if (privptr->p_env->packing == 0) ++ { ++ rc = claw_send_control (dev, CONNECTION_REQUEST, 0, 0, 0, ++ HOST_APPL_NAME, privptr->p_env->api_type); ++ } ++ return rc; ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 1 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_dtrace.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_dtrace.c +new file mode 100644 +index 000000000..fd1e936ca +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_dtrace.c +@@ -0,0 +1,56 @@ ++/* { dg-do compile} */ ++ ++#define NULL ((void*)0) ++typedef unsigned long size_t; ++typedef long intptr_t; ++typedef unsigned long uintptr_t; ++typedef long scalar_t__; ++typedef int bool; ++#define false 0 ++#define true 1 ++ ++typedef struct TYPE_4__ TYPE_2__; ++typedef struct TYPE_3__ TYPE_1__; ++ ++typedef int uint8_t; ++typedef int uint16_t; ++ ++struct TYPE_4__ ++{ ++ size_t cpu_id; ++}; ++ ++struct TYPE_3__ ++{ ++ int cpuc_dtrace_flags; ++}; ++ ++TYPE_2__ *CPU; ++volatile int CPU_DTRACE_FAULT; ++TYPE_1__ *cpu_core; ++scalar_t__ dtrace_load8 (uintptr_t); ++ ++__attribute__((used)) static int ++dtrace_bcmp (const void *s1, const void *s2, size_t len) ++{ ++ volatile uint16_t *flags; ++ flags = (volatile uint16_t *)&cpu_core[CPU->cpu_id].cpuc_dtrace_flags; ++ if (s1 == s2) ++ return (0); ++ if (s1 == NULL || s2 == NULL) ++ return (1); ++ if (s1 != s2 && len != 0) ++ { ++ const uint8_t *ps1 = s1; ++ const uint8_t *ps2 = s2; ++ do ++ { ++ if (dtrace_load8 ((uintptr_t)ps1++) != *ps2++) ++ return (1); ++ } ++ while (--len != 0 && !(*flags & CPU_DTRACE_FAULT)); ++ } ++ return (0); ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_gc.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_gc.c +new file mode 100644 +index 000000000..b13d785a9 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_gc.c +@@ -0,0 +1,162 @@ ++/* { dg-do compile} */ ++ ++#define NULL ((void*)0) ++typedef unsigned long size_t; ++typedef long intptr_t; ++typedef unsigned long uintptr_t; ++typedef long scalar_t__; ++typedef int bool; ++#define false 0 ++#define true 1 ++ ++struct mrb_context ++{ ++ size_t stack; ++ size_t stbase; ++ size_t stend; ++ size_t eidx; ++ int *ci; ++ int *cibase; ++ int status; ++}; ++ ++struct RObject ++{ ++ int dummy; ++}; ++ ++struct RHash ++{ ++ int dummy; ++}; ++ ++struct RFiber ++{ ++ struct mrb_context *cxt; ++}; ++ ++struct RClass ++{ ++ int dummy; ++}; ++ ++struct RBasic ++{ ++ int tt; ++}; ++ ++struct RArray ++{ ++ int dummy; ++}; ++ ++typedef int mrb_state; ++typedef int mrb_gc; ++typedef int mrb_callinfo; ++size_t ARY_LEN (struct RArray *); ++size_t MRB_ENV_STACK_LEN (struct RBasic *); ++int MRB_FIBER_TERMINATED; ++ ++#define MRB_TT_ARRAY 140 ++#define MRB_TT_CLASS 139 ++#define MRB_TT_DATA 138 ++#define MRB_TT_ENV 137 ++#define MRB_TT_EXCEPTION 136 ++#define MRB_TT_FIBER 135 ++#define MRB_TT_HASH 134 ++#define MRB_TT_ICLASS 133 ++#define MRB_TT_MODULE 132 ++#define MRB_TT_OBJECT 131 ++#define MRB_TT_PROC 130 ++#define MRB_TT_RANGE 129 ++#define MRB_TT_SCLASS 128 ++ ++size_t ci_nregs (int *); ++int gc_mark_children (int *, int *, struct RBasic *); ++size_t mrb_gc_mark_hash_size (int *, struct RHash *); ++size_t mrb_gc_mark_iv_size (int *, struct RObject *); ++size_t mrb_gc_mark_mt_size (int *, struct RClass *); ++ ++__attribute__((used)) static size_t ++gc_gray_mark (mrb_state *mrb, mrb_gc *gc, struct RBasic *obj) ++{ ++ size_t children = 0; ++ gc_mark_children (mrb, gc, obj); ++ switch (obj->tt) ++ { ++ case MRB_TT_ICLASS: ++ children++; ++ break; ++ ++ case MRB_TT_CLASS: ++ case MRB_TT_SCLASS: ++ case MRB_TT_MODULE: ++ { ++ struct RClass *c = (struct RClass *)obj; ++ children += mrb_gc_mark_iv_size (mrb, (struct RObject *)obj); ++ children += mrb_gc_mark_mt_size (mrb, c); ++ children ++; ++ } ++ break; ++ ++ case MRB_TT_OBJECT: ++ case MRB_TT_DATA: ++ case MRB_TT_EXCEPTION: ++ children += mrb_gc_mark_iv_size (mrb, (struct RObject *)obj); ++ break; ++ ++ case MRB_TT_ENV: ++ children += MRB_ENV_STACK_LEN (obj); ++ break; ++ ++ case MRB_TT_FIBER: ++ { ++ struct mrb_context *c = ((struct RFiber *)obj)->cxt; ++ size_t i; ++ mrb_callinfo *ci; ++ if (!c || c->status == MRB_FIBER_TERMINATED) ++ break; ++ ++ i = c->stack - c->stbase; ++ if (c->ci) ++ { ++ i += ci_nregs (c->ci); ++ } ++ if (c->stbase + i > c->stend) ++ i = c->stend - c->stbase; ++ ++ children += i; ++ children += c->eidx; ++ if (c->cibase) ++ { ++ for (i = 0, ci = c->cibase; ci <= c->ci; i++, ci++) ++ ; ++ } ++ children += i; ++ } ++ break; ++ ++ case MRB_TT_ARRAY: ++ { ++ struct RArray *a = (struct RArray *)obj; ++ children += ARY_LEN (a); ++ } ++ break; ++ ++ case MRB_TT_HASH: ++ children += mrb_gc_mark_iv_size (mrb, (struct RObject *)obj); ++ children += mrb_gc_mark_hash_size (mrb, (struct RHash *)obj); ++ break; ++ ++ case MRB_TT_PROC: ++ case MRB_TT_RANGE: ++ children += 2; ++ break; ++ default: ++ break; ++ } ++ ++ return children; ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_hpsa.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_hpsa.c +new file mode 100644 +index 000000000..bc28a658a +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_hpsa.c +@@ -0,0 +1,126 @@ ++/* { dg-do compile} */ ++ ++#define NULL ((void*)0) ++typedef unsigned long size_t; ++typedef long intptr_t; ++typedef unsigned long uintptr_t; ++typedef long scalar_t__; ++typedef int bool; ++#define false 0 ++#define true 1 ++ ++typedef struct TYPE_6__ TYPE_3__; ++typedef struct TYPE_5__ TYPE_2__; ++typedef struct TYPE_4__ TYPE_1__; ++ ++struct io_accel2_cmd ++{ ++ int dummy; ++}; ++ ++struct hpsa_tmf_struct ++{ ++ int it_nexus; ++}; ++ ++struct hpsa_scsi_dev_t ++{ ++ int nphysical_disks; ++ int ioaccel_handle; ++ struct hpsa_scsi_dev_t **phys_disk; ++}; ++ ++struct ctlr_info ++{ ++ TYPE_3__ *pdev; ++ struct io_accel2_cmd *ioaccel2_cmd_pool; ++}; ++struct TYPE_4__ ++{ ++ int LunAddrBytes; ++}; ++ ++struct TYPE_5__ ++{ ++ TYPE_1__ LUN; ++}; ++ ++struct CommandList ++{ ++ size_t cmdindex; ++ int cmd_type; ++ struct hpsa_scsi_dev_t *phys_disk; ++ TYPE_2__ Header; ++}; ++ ++struct TYPE_6__ ++{ ++ int dev; ++}; ++ ++int BUG (); ++#define CMD_IOACCEL1 132 ++#define CMD_IOACCEL2 131 ++#define CMD_IOCTL_PEND 130 ++#define CMD_SCSI 129 ++#define IOACCEL2_TMF 128 ++int dev_err (int *, char *, int); ++scalar_t__ hpsa_is_cmd_idle (struct CommandList *); ++int le32_to_cpu (int); ++int test_memcmp (unsigned char *, int *, int); ++ ++__attribute__((used)) static bool ++hpsa_cmd_dev_match (struct ctlr_info *h, struct CommandList *c, ++ struct hpsa_scsi_dev_t *dev, unsigned char *scsi3addr) ++{ ++ int i; ++ bool match = false; ++ struct io_accel2_cmd * c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; ++ struct hpsa_tmf_struct *ac = (struct hpsa_tmf_struct *)c2; ++ ++ if (hpsa_is_cmd_idle (c)) ++ return false; ++ ++ switch (c->cmd_type) ++ { ++ case CMD_SCSI: ++ case CMD_IOCTL_PEND: ++ match = !test_memcmp (scsi3addr, &c->Header.LUN.LunAddrBytes, ++ sizeof (c->Header.LUN.LunAddrBytes)); ++ break; ++ ++ case CMD_IOACCEL1: ++ case CMD_IOACCEL2: ++ if (c->phys_disk == dev) ++ { ++ match = true; ++ } ++ else ++ { ++ for (i = 0; i < dev->nphysical_disks && !match; i++) ++ { ++ match = dev->phys_disk[i] == c->phys_disk; ++ } ++ } ++ break; ++ ++ case IOACCEL2_TMF: ++ for (i = 0; i < dev->nphysical_disks && !match; i++) ++ { ++ match = dev->phys_disk[i]->ioaccel_handle == ++ le32_to_cpu (ac->it_nexus); ++ } ++ break; ++ ++ case 0: ++ match = false; ++ break; ++ default: ++ dev_err (&h->pdev->dev, "unexpected cmd_type: %d\n", c->cmd_type); ++ BUG (); ++ } ++ ++ return match; ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_mv_udc_core.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_mv_udc_core.c +new file mode 100644 +index 000000000..0a585ac3d +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_mv_udc_core.c +@@ -0,0 +1,82 @@ ++/* { dg-do compile} */ ++ ++#define NULL ((void*)0) ++typedef unsigned long size_t; ++typedef long intptr_t; ++typedef unsigned long uintptr_t; ++typedef long scalar_t__; ++typedef int bool; ++#define false 0 ++#define true 1 ++ ++typedef struct TYPE_4__ TYPE_2__; ++typedef struct TYPE_3__ TYPE_1__; ++typedef int u32; ++ ++struct mv_udc ++{ ++ TYPE_2__ *op_regs; ++ TYPE_1__ *ep_dqh; ++ struct mv_ep *eps; ++}; ++ ++struct mv_ep ++{ ++ TYPE_1__ *dqh; ++ struct mv_udc *udc; ++}; ++ ++struct TYPE_4__ ++{ ++ int *epctrlx; ++}; ++ ++struct TYPE_3__ ++{ ++ int max_packet_length; ++ int next_dtd_ptr; ++}; ++ ++int EP0_MAX_PKT_SIZE; ++int EPCTRL_RX_ENABLE; ++int EPCTRL_RX_EP_TYPE_SHIFT; ++int EPCTRL_TX_ENABLE; ++int EPCTRL_TX_EP_TYPE_SHIFT; ++int EP_QUEUE_HEAD_IOS; ++int EP_QUEUE_HEAD_MAX_PKT_LEN_POS; ++int EP_QUEUE_HEAD_NEXT_TERMINATE; ++int USB_ENDPOINT_XFER_CONTROL; ++int readl (int *); ++int writel (int, int *); ++ ++__attribute__((used)) static void ++ep0_reset (struct mv_udc *udc) ++{ ++ struct mv_ep *ep; ++ u32 epctrlx; ++ int i = 0; ++ for (i = 0; i < 2; i++) ++ { ++ ep = &udc->eps[i]; ++ ep->udc = udc; ++ ep->dqh = &udc->ep_dqh[i]; ++ ep->dqh->max_packet_length = ++ (EP0_MAX_PKT_SIZE << EP_QUEUE_HEAD_MAX_PKT_LEN_POS) ++ | EP_QUEUE_HEAD_IOS; ++ ep->dqh->next_dtd_ptr = EP_QUEUE_HEAD_NEXT_TERMINATE; ++ epctrlx = readl (&udc->op_regs->epctrlx[0]); ++ if (i) ++ { ++ epctrlx |= EPCTRL_TX_ENABLE ++ | (USB_ENDPOINT_XFER_CONTROL << EPCTRL_TX_EP_TYPE_SHIFT); ++ } ++ else ++ { ++ epctrlx |= EPCTRL_RX_ENABLE ++ | (USB_ENDPOINT_XFER_CONTROL << EPCTRL_RX_EP_TYPE_SHIFT); ++ } ++ writel (epctrlx, &udc->op_regs->epctrlx[0]); ++ } ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_tcp_usrreq.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_tcp_usrreq.c +new file mode 100644 +index 000000000..bddd862fe +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_tcp_usrreq.c +@@ -0,0 +1,58 @@ ++/* { dg-do compile} */ ++ ++#define NULL ((void*)0) ++typedef unsigned long size_t; ++typedef long intptr_t; ++typedef unsigned long uintptr_t; ++typedef long scalar_t__; ++typedef int bool; ++#define false 0 ++#define true 1 ++ ++struct tcpcb ++{ ++ int t_state; ++}; ++ ++struct socket ++{ ++ int dummy; ++}; ++ ++struct proc ++{ ++ int dummy; ++}; ++ ++struct inpcb ++{ ++ scalar_t__ inp_lport; ++}; ++ ++int COMMON_END (int); ++int COMMON_START (); ++int PRU_LISTEN; ++int TCPS_LISTEN; ++int in_pcbbind (struct inpcb *, int *, struct proc *); ++struct inpcb* sotoinpcb (struct socket *); ++ ++__attribute__((used)) static void ++tcp_usr_listen (struct socket *so, struct proc *p) ++{ ++ int error = 0; ++ struct inpcb *inp = sotoinpcb (so); ++ struct tcpcb *tp; ++ ++ COMMON_START (); ++ if (inp->inp_lport == 0) ++ { ++ error = in_pcbbind (inp, NULL, p); ++ } ++ if (error == 0) ++ { ++ tp->t_state = TCPS_LISTEN; ++ } ++ COMMON_END (PRU_LISTEN); ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 1 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_ui_main.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_ui_main.c +new file mode 100644 +index 000000000..1a06f5eec +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_ui_main.c +@@ -0,0 +1,61 @@ ++/* { dg-do compile} */ ++ ++#define NULL ((void*)0) ++typedef unsigned long size_t; ++typedef long intptr_t; ++typedef unsigned long uintptr_t; ++typedef long scalar_t__; ++typedef int bool; ++#define false 0 ++#define true 1 ++ ++typedef struct TYPE_4__ TYPE_2__; ++typedef struct TYPE_3__ TYPE_1__; ++ ++struct TYPE_4__ ++{ ++ size_t modCount; ++ TYPE_1__ *modList; ++}; ++ ++struct TYPE_3__ ++{ ++ void *modDescr; ++ void *modName; ++}; ++ ++size_t MAX_MODS; ++void *String_Alloc (char *); ++int test_strlen (char *); ++int trap_FD_GetFileList (char *, char *, char *, int); ++TYPE_2__ uiInfo; ++ ++__attribute__((used)) static void ++UI_LoadMods () ++{ ++ int numdirs; ++ char dirlist[2048]; ++ char *dirptr; ++ char *descptr; ++ int i; ++ int dirlen; ++ ++ uiInfo.modCount = 0; ++ numdirs = trap_FD_GetFileList ("$modelist", "", dirlist, sizeof (dirlist)); ++ dirptr = dirlist; ++ for (i = 0; i < numdirs; i++) ++ { ++ dirlen = test_strlen (dirptr) + 1; ++ descptr = dirptr + dirlen; ++ uiInfo.modList[uiInfo.modCount].modName = String_Alloc (dirptr); ++ uiInfo.modList[uiInfo.modCount].modDescr = String_Alloc (descptr); ++ dirptr += dirlen + test_strlen (descptr) + 1; ++ uiInfo.modCount++; ++ if (uiInfo.modCount >= MAX_MODS) ++ { ++ break; ++ } ++ } ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 1 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_mem_ref_offset.c b/gcc/testsuite/gcc.dg/struct/dfe_mem_ref_offset.c +new file mode 100644 +index 000000000..94eb88d5c +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_mem_ref_offset.c +@@ -0,0 +1,58 @@ ++/* Supports the MEM_REF offset. ++ _1 = MEM[(struct arc *)ap_4 + 72B].flow; ++ Old rewrite:_1 = ap.reorder.0_8->flow; ++ New rewrite:_1 = MEM[(struct arc.reorder.0 *)ap.reorder.0_8 + 64B].flow. */ ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++int ++main () ++{ ++ const int MAX = 100; ++ /* A similar scenario can be reproduced only by using local variables. */ ++ arc_p ap = NULL; ++ ap = (arc_p) calloc(MAX, sizeof(arc_t)); ++ printf("%d\n", ap[1].flow); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_mul_layer_ptr_record_bug.c b/gcc/testsuite/gcc.dg/struct/dfe_mul_layer_ptr_record_bug.c +new file mode 100644 +index 000000000..bbf9420d0 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_mul_layer_ptr_record_bug.c +@@ -0,0 +1,30 @@ ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct T_HASH_ENTRY ++{ ++ unsigned int hash; ++ unsigned int klen; ++ char *key; ++} iHashEntry; ++ ++typedef struct T_HASH ++{ ++ unsigned int size; ++ unsigned int fill; ++ unsigned int keys; ++ ++ iHashEntry **array; ++} uHash; ++ ++uHash *retval; ++ ++int ++main() { ++ retval->array = (iHashEntry **)calloc(sizeof(iHashEntry *), retval->size); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_ptr_diff.c b/gcc/testsuite/gcc.dg/struct/dfe_ptr_diff.c +new file mode 100644 +index 000000000..f706db968 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_ptr_diff.c +@@ -0,0 +1,71 @@ ++// support POINTER_DIFF_EXPR & NOP_EXPR to avoid ++// escape_unhandled_rewrite, "Type escapes via a unhandled rewrite stmt" ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++typedef struct network ++{ ++ arc_p arcs; ++ arc_p sorted_arcs; ++ int x; ++ node_p nodes; ++ node_p stop_nodes; ++} network_t; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++int ++main () ++{ ++ arc_t *old_arcs; ++ node_t *node; ++ node_t *stop; ++ size_t off; ++ network_t* net; ++ ++ for( ; node->number < stop->number; node++ ) ++ { ++ off = node->basic_arc - old_arcs; ++ node->basic_arc = (arc_t *)(net->arcs + off); ++ } ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 3 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_ptr_negate_expr.c b/gcc/testsuite/gcc.dg/struct/dfe_ptr_negate_expr.c +new file mode 100644 +index 000000000..963295cb4 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_ptr_negate_expr.c +@@ -0,0 +1,55 @@ ++// support NEGATE_EXPR rewriting ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++int ++main () ++{ ++ int64_t susp = 0; ++ const int MAX = 100; ++ arc_p ap = (arc_p) calloc(MAX, sizeof(arc_t)); ++ ap -= susp; ++ printf("%d\n", ap[1].flow); ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c b/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c +new file mode 100644 +index 000000000..aa10506a1 +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c +@@ -0,0 +1,55 @@ ++// release escape_ptr_ptr, "Type is used in a pointer to a pointer [not handled yet]"; ++/* { dg-do compile } */ ++ ++#include ++#include ++ ++typedef struct node node_t; ++typedef struct node *node_p; ++ ++typedef struct arc arc_t; ++typedef struct arc *arc_p; ++ ++struct node ++{ ++ int64_t potential; ++ int orientation; ++ node_p child; ++ node_p pred; ++ node_p sibling; ++ node_p sibling_prev; ++ arc_p basic_arc; ++ arc_p firstout; ++ arc_p firstin; ++ arc_p arc_tmp; ++ int64_t flow; ++ int64_t depth; ++ int number; ++ int time; ++}; ++ ++struct arc ++{ ++ int id; ++ int64_t cost; ++ node_p tail; ++ node_p head; ++ short ident; ++ arc_p nextout; ++ arc_p nextin; ++ int64_t flow; ++ int64_t org_cost; ++}; ++ ++const int MAX = 100; ++arc_t **ap = NULL; ++ ++int ++main () ++{ ++ ap = (arc_t**) malloc(MAX * sizeof(arc_t*)); ++ (*ap)[0].id = 300; ++ return 0; ++} ++ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +index 5a476e8f9..6ccb753b5 100644 +--- a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp ++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +@@ -43,6 +43,10 @@ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/csr_*.c]] \ + gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/rf_*.c]] \ + "" "-fipa-reorder-fields -fdump-ipa-all -flto-partition=one -fwhole-program" + ++# -fipa-struct-reorg=3 ++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/dfe*.c]] \ ++ "" "-fipa-reorder-fields -fipa-struct-reorg=3 -fdump-ipa-all -flto-partition=one -fwhole-program" ++ + # All done. + torture-finish + dg-finish +diff --git a/gcc/testsuite/gcc.dg/struct/wo_prof_escape_replace_type.c b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_replace_type.c +new file mode 100644 +index 000000000..fa8c66b9e +--- /dev/null ++++ b/gcc/testsuite/gcc.dg/struct/wo_prof_escape_replace_type.c +@@ -0,0 +1,49 @@ ++/* { dg-do compile } */ ++ ++#include ++ ++struct AngleDef ++{ ++ double K; ++ double th0; ++}; ++typedef struct AngleDef angldef; ++ ++struct bndangdihe ++{ ++ int nbond; ++ int nangl; ++ int ndihe; ++}; ++typedef struct bndangdihe bah; ++ ++struct ambprmtop ++{ ++ double *AnglK; ++ double *AnglEq; ++ bah nBAH; ++ angldef *AParam; ++ char source[512]; ++ char eprulesource[512]; ++}; ++typedef struct ambprmtop prmtop; ++ ++static void OrderBondParameters (prmtop *tp) ++{ ++ int i; ++ tp->AParam = (angldef *)malloc (tp->nBAH.nangl * sizeof (angldef)); ++ for (i = 0; i < tp->nBAH.nangl; i++) ++ { ++ tp->AParam[i].K = tp->AnglK[i]; ++ tp->AParam[i].th0 = tp->AnglEq[i]; ++ } ++} ++ ++void main () ++{ ++ prmtop *tp = (prmtop *)malloc (100 * sizeof (prmtop)); ++ OrderBondParameters (tp); ++} ++ ++/*---------------------------------------------------------------------------------------------*/ ++/* { dg-final { scan-ipa-dump "No structures to transform" "struct_reorg" } } */ +-- +2.33.0 + diff --git a/0022-LoongArch-Define-HAVE_AS_TLS-to-0-if-it-s-undefined-.patch b/0022-LoongArch-Define-HAVE_AS_TLS-to-0-if-it-s-undefined-.patch new file mode 100644 index 0000000000000000000000000000000000000000..403d90e1e4d8191a69f9ada985967de8acfd7b6f --- /dev/null +++ b/0022-LoongArch-Define-HAVE_AS_TLS-to-0-if-it-s-undefined-.patch @@ -0,0 +1,34 @@ +From 0527589fb1b7b97cff2c441c1219fb9c8a44dd23 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Mon, 30 Oct 2023 19:39:27 +0800 +Subject: [PATCH 022/188] LoongArch: Define HAVE_AS_TLS to 0 if it's undefined + [PR112299] + +Now loongarch.md uses HAVE_AS_TLS, we need this to fix the failure +building a cross compiler if the cross assembler is not installed yet. + +gcc/ChangeLog: + + PR target/112299 + * config/loongarch/loongarch-opts.h (HAVE_AS_TLS): Define to 0 + if not defined yet. +--- + gcc/config/loongarch/loongarch-opts.h | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h +index f2b59abe6..c4975af00 100644 +--- a/gcc/config/loongarch/loongarch-opts.h ++++ b/gcc/config/loongarch/loongarch-opts.h +@@ -103,4 +103,8 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target, + #define HAVE_AS_MRELAX_OPTION 0 + #endif + ++#ifndef HAVE_AS_TLS ++#define HAVE_AS_TLS 0 ++#endif ++ + #endif /* LOONGARCH_OPTS_H */ +-- +2.43.0 + diff --git a/0023-LoongArch-Fix-instruction-name-typo-in-lsx_vreplgr2v.patch b/0023-LoongArch-Fix-instruction-name-typo-in-lsx_vreplgr2v.patch new file mode 100644 index 0000000000000000000000000000000000000000..72f8585f64fc261163c30e6a3a5871fbb0fd8b4d --- /dev/null +++ b/0023-LoongArch-Fix-instruction-name-typo-in-lsx_vreplgr2v.patch @@ -0,0 +1,30 @@ +From bc3ae60454a51b80538b6deba21975d43de23b6a Mon Sep 17 00:00:00 2001 +From: Chenghui Pan +Date: Fri, 3 Nov 2023 17:01:36 +0800 +Subject: [PATCH 023/188] LoongArch: Fix instruction name typo in + lsx_vreplgr2vr_ template + +gcc/ChangeLog: + + * config/loongarch/lsx.md: Fix instruction name typo in + lsx_vreplgr2vr_ template. +--- + gcc/config/loongarch/lsx.md | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md +index 4af32c8df..55c7d79a0 100644 +--- a/gcc/config/loongarch/lsx.md ++++ b/gcc/config/loongarch/lsx.md +@@ -1523,7 +1523,7 @@ + "ISA_HAS_LSX" + { + if (which_alternative == 1) +- return "ldi.\t%w0,0"; ++ return "vldi.\t%w0,0"; + + if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode)) + return "#"; +-- +2.43.0 + diff --git a/0023-PGO-kernel-Add-fkernel-pgo-option-to-support-PGO-ker.patch b/0023-PGO-kernel-Add-fkernel-pgo-option-to-support-PGO-ker.patch new file mode 100644 index 0000000000000000000000000000000000000000..7577a015423da9117ca25f4de38eb2bd08c1face --- /dev/null +++ b/0023-PGO-kernel-Add-fkernel-pgo-option-to-support-PGO-ker.patch @@ -0,0 +1,44 @@ +From 9dc6d315ba350c9113f486ec897217a82838fb73 Mon Sep 17 00:00:00 2001 +From: Xiong Zhou +Date: Mon, 7 Aug 2023 14:44:56 +0800 +Subject: [PATCH 1/2] [PGO kernel] Add fkernel-pgo option to support PGO kernel + compilation. + +--- + gcc/common.opt | 4 ++++ + gcc/tree-profile.cc | 4 +++- + 2 files changed, 7 insertions(+), 1 deletion(-) + +diff --git a/gcc/common.opt b/gcc/common.opt +index e365a48bc..bd3b7dcb1 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -2363,6 +2363,10 @@ fprofile-generate= + Common Joined RejectNegative + Enable common options for generating profile info for profile feedback directed optimizations, and set -fprofile-dir=. + ++fkernel-pgo ++Common Var(flag_kernel_pgo) Optimization Init(0) ++Disable TLS setting of instrumentation variables to support PGO kernel compilation in -fprofile-generate, as kernel does not support TLS. ++ + fprofile-info-section + Common RejectNegative + Register the profile information in the .gcov_info section instead of using a constructor/destructor. +diff --git a/gcc/tree-profile.cc b/gcc/tree-profile.cc +index 6d40401f8..e7646f1a1 100644 +--- a/gcc/tree-profile.cc ++++ b/gcc/tree-profile.cc +@@ -108,7 +108,9 @@ init_ic_make_global_vars (void) + DECL_ARTIFICIAL (ic_tuple_var) = 1; + DECL_INITIAL (ic_tuple_var) = NULL; + DECL_EXTERNAL (ic_tuple_var) = 1; +- if (targetm.have_tls) ++ /* Disable TLS setting when compiling kernel in -fprofile-generate, ++ as kernel does not support TLS. */ ++ if (targetm.have_tls && !flag_kernel_pgo) + set_decl_tls_model (ic_tuple_var, decl_default_tls_model (ic_tuple_var)); + } + +-- +2.33.0 + diff --git a/0024-LoongArch-Use-simplify_gen_subreg-instead-of-gen_rtx.patch b/0024-LoongArch-Use-simplify_gen_subreg-instead-of-gen_rtx.patch new file mode 100644 index 0000000000000000000000000000000000000000..0a43f5ea482fd264788107d6177020b4a75fc4ba --- /dev/null +++ b/0024-LoongArch-Use-simplify_gen_subreg-instead-of-gen_rtx.patch @@ -0,0 +1,116 @@ +From b8f47a362000bb51dec88e0a73f885c57a46f568 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Sun, 12 Nov 2023 00:55:13 +0800 +Subject: [PATCH 024/188] LoongArch: Use simplify_gen_subreg instead of + gen_rtx_SUBREG in loongarch_expand_vec_cond_mask_expr [PR112476] + +GCC internal says: + + 'subreg's of 'subreg's are not supported. Using + 'simplify_gen_subreg' is the recommended way to avoid this problem. + +Unfortunately loongarch_expand_vec_cond_mask_expr might create nested +subreg under certain circumstances, causing an ICE. + +Use simplify_gen_subreg as the internal document suggests. + +gcc/ChangeLog: + + PR target/112476 + * config/loongarch/loongarch.cc + (loongarch_expand_vec_cond_mask_expr): Call simplify_gen_subreg + instead of gen_rtx_SUBREG. + +gcc/testsuite/ChangeLog: + + PR target/112476 + * gcc.target/loongarch/pr112476-1.c: New test. + * gcc.target/loongarch/pr112476-2.c: New test. +--- + gcc/config/loongarch/loongarch.cc | 11 ++++++--- + .../gcc.target/loongarch/pr112476-1.c | 24 +++++++++++++++++++ + .../gcc.target/loongarch/pr112476-2.c | 5 ++++ + 3 files changed, 37 insertions(+), 3 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr112476-1.c + create mode 100644 gcc/testsuite/gcc.target/loongarch/pr112476-2.c + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index fa5c14be6..65ca1489f 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -11190,7 +11190,9 @@ loongarch_expand_vec_cond_mask_expr (machine_mode mode, machine_mode vimode, + if (mode != vimode) + { + xop1 = gen_reg_rtx (vimode); +- emit_move_insn (xop1, gen_rtx_SUBREG (vimode, operands[1], 0)); ++ emit_move_insn (xop1, ++ simplify_gen_subreg (vimode, operands[1], ++ mode, 0)); + } + emit_move_insn (src1, xop1); + } +@@ -11207,7 +11209,9 @@ loongarch_expand_vec_cond_mask_expr (machine_mode mode, machine_mode vimode, + if (mode != vimode) + { + xop2 = gen_reg_rtx (vimode); +- emit_move_insn (xop2, gen_rtx_SUBREG (vimode, operands[2], 0)); ++ emit_move_insn (xop2, ++ simplify_gen_subreg (vimode, operands[2], ++ mode, 0)); + } + emit_move_insn (src2, xop2); + } +@@ -11226,7 +11230,8 @@ loongarch_expand_vec_cond_mask_expr (machine_mode mode, machine_mode vimode, + gen_rtx_AND (vimode, mask, src1)); + /* The result is placed back to a register with the mask. */ + emit_insn (gen_rtx_SET (mask, bsel)); +- emit_move_insn (operands[0], gen_rtx_SUBREG (mode, mask, 0)); ++ emit_move_insn (operands[0], simplify_gen_subreg (mode, mask, ++ vimode, 0)); + } + } + +diff --git a/gcc/testsuite/gcc.target/loongarch/pr112476-1.c b/gcc/testsuite/gcc.target/loongarch/pr112476-1.c +new file mode 100644 +index 000000000..4cf133e7a +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/pr112476-1.c +@@ -0,0 +1,24 @@ ++/* PR target/112476: ICE with -mlsx */ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -march=loongarch64 -mfpu=64 -mabi=lp64d -mlsx" } */ ++ ++int foo, bar; ++float baz, res, a; ++ ++void ++apply_adjacent_ternary (float *dst, float *src0) ++{ ++ do ++ { ++ __builtin_memcpy (&res, &src0, sizeof (res)); ++ *dst = foo ? baz : res; ++ dst++; ++ } ++ while (dst != src0); ++} ++ ++void ++xx (void) ++{ ++ apply_adjacent_ternary (&a, &a); ++} +diff --git a/gcc/testsuite/gcc.target/loongarch/pr112476-2.c b/gcc/testsuite/gcc.target/loongarch/pr112476-2.c +new file mode 100644 +index 000000000..cc0dfbfc9 +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/pr112476-2.c +@@ -0,0 +1,5 @@ ++/* PR target/112476: ICE with -mlasx */ ++/* { dg-do compile } */ ++/* { dg-options "-O2 -march=loongarch64 -mfpu=64 -mabi=lp64d -mlasx" } */ ++ ++#include "pr112476-1.c" +-- +2.43.0 + diff --git a/0024-Struct-Reorg-Refactoring-and-merge-reorder-fields-in.patch b/0024-Struct-Reorg-Refactoring-and-merge-reorder-fields-in.patch new file mode 100644 index 0000000000000000000000000000000000000000..ddea235db2e99be7a4427d1ec5ba9de802016fd5 --- /dev/null +++ b/0024-Struct-Reorg-Refactoring-and-merge-reorder-fields-in.patch @@ -0,0 +1,1470 @@ +From 535d00d764c38e70c563ac59e702a20e3b744a95 Mon Sep 17 00:00:00 2001 +From: liyancheng <412998149@qq.com> +Date: Tue, 29 Aug 2023 19:18:21 +0800 +Subject: [PATCH] [Struct Reorg] Refactoring and merge reorder fields into + struct reorg optimization + +Merge reorder_fields pass into struct_reorg pass. Using flag -fipa-struct-reorg=[0,1,2,3] +to enable none, strcut reorg, reorder fields and dfe optimizations. +--- + gcc/gimple-ssa-warn-access.cc | 2 +- + gcc/ipa-free-lang-data.cc | 9 +- + gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 353 ++++++++---------- + gcc/passes.def | 1 - + gcc/symbol-summary.h | 4 +- + gcc/testsuite/gcc.dg/struct/dfe_DTE_verify.c | 2 +- + .../gcc.dg/struct/dfe_ele_minus_verify.c | 2 +- + .../gcc.dg/struct/dfe_extr_board_init.c | 2 +- + gcc/testsuite/gcc.dg/struct/dfe_extr_claw.c | 2 +- + gcc/testsuite/gcc.dg/struct/dfe_extr_dtrace.c | 2 +- + gcc/testsuite/gcc.dg/struct/dfe_extr_gc.c | 2 +- + gcc/testsuite/gcc.dg/struct/dfe_extr_hpsa.c | 2 +- + .../gcc.dg/struct/dfe_extr_mv_udc_core.c | 2 +- + .../gcc.dg/struct/dfe_extr_tcp_usrreq.c | 2 +- + .../gcc.dg/struct/dfe_extr_ui_main.c | 2 +- + .../gcc.dg/struct/dfe_mem_ref_offset.c | 2 +- + .../struct/dfe_mul_layer_ptr_record_bug.c | 2 +- + gcc/testsuite/gcc.dg/struct/dfe_ptr_diff.c | 2 +- + .../gcc.dg/struct/dfe_ptr_negate_expr.c | 2 +- + gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c | 2 +- + .../struct/rf_DTE_struct_instance_field.c | 2 +- + gcc/testsuite/gcc.dg/struct/rf_DTE_verify.c | 2 +- + .../gcc.dg/struct/rf_check_ptr_layers_bug.c | 2 +- + .../gcc.dg/struct/rf_create_fields_bug.c | 2 +- + .../gcc.dg/struct/rf_create_new_func_bug.c | 2 +- + .../gcc.dg/struct/rf_ele_minus_verify.c | 2 +- + .../gcc.dg/struct/rf_escape_by_base.c | 2 +- + .../gcc.dg/struct/rf_external_func_types.c | 2 +- + gcc/testsuite/gcc.dg/struct/rf_int_cast_ptr.c | 2 +- + .../gcc.dg/struct/rf_mem_ref_offset.c | 2 +- + .../struct/rf_mul_layer_ptr_record_bug.c | 2 +- + .../gcc.dg/struct/rf_pass_conflict.c | 2 +- + gcc/testsuite/gcc.dg/struct/rf_ptr2void_lto.c | 2 +- + gcc/testsuite/gcc.dg/struct/rf_ptr_diff.c | 2 +- + .../gcc.dg/struct/rf_ptr_negate_expr.c | 2 +- + gcc/testsuite/gcc.dg/struct/rf_ptr_offset.c | 2 +- + gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c | 2 +- + gcc/testsuite/gcc.dg/struct/rf_ptr_ptr_ptr.c | 2 +- + .../gcc.dg/struct/rf_rescusive_type.c | 2 +- + .../struct/rf_rewrite_assign_more_cmp.c | 2 +- + .../gcc.dg/struct/rf_rewrite_cond_bug.c | 2 +- + .../gcc.dg/struct/rf_rewrite_cond_more_cmp.c | 2 +- + .../gcc.dg/struct/rf_rewrite_phi_bug.c | 2 +- + gcc/testsuite/gcc.dg/struct/rf_visible_func.c | 2 +- + .../gcc.dg/struct/rf_void_ptr_param_func.c | 2 +- + gcc/testsuite/gcc.dg/struct/struct-reorg.exp | 2 +- + gcc/timevar.def | 1 - + gcc/tree-pass.h | 1 - + 48 files changed, 201 insertions(+), 252 deletions(-) + +diff --git a/gcc/gimple-ssa-warn-access.cc b/gcc/gimple-ssa-warn-access.cc +index 7f5c92c96..a24645783 100644 +--- a/gcc/gimple-ssa-warn-access.cc ++++ b/gcc/gimple-ssa-warn-access.cc +@@ -2198,7 +2198,7 @@ pass_waccess::gate (function *) + In pass waccess, it will traverse all SSA and cause ICE + when handling these unused SSA. So temporarily disable + pass waccess when enable structure optimizations. */ +- if (flag_ipa_struct_reorg || flag_ipa_reorder_fields) ++ if (flag_ipa_struct_reorg) + return false; + + return (warn_free_nonheap_object +diff --git a/gcc/ipa-free-lang-data.cc b/gcc/ipa-free-lang-data.cc +index a88381ddb..801e95cea 100644 +--- a/gcc/ipa-free-lang-data.cc ++++ b/gcc/ipa-free-lang-data.cc +@@ -49,6 +49,9 @@ + #include "except.h" + #include "ipa-utils.h" + ++/* Check whether in C language or LTO with only C language. */ ++extern bool lang_c_p (void); ++ + namespace { + + /* Data used when collecting DECLs and TYPEs for language data removal. */ +@@ -105,7 +108,8 @@ fld_simplified_type_name (tree type) + /* Simplify type will cause that struct A and struct A within + struct B are different type pointers, so skip it in structure + optimizations. */ +- if (flag_ipa_struct_reorg || flag_ipa_reorder_fields) ++ if (flag_ipa_struct_reorg && lang_c_p () ++ && flag_lto_partition == LTO_PARTITION_ONE) + return TYPE_NAME (type); + + if (!TYPE_NAME (type) || TREE_CODE (TYPE_NAME (type)) != TYPE_DECL) +@@ -349,7 +353,8 @@ fld_simplified_type (tree t, class free_lang_data_d *fld) + /* Simplify type will cause that struct A and struct A within + struct B are different type pointers, so skip it in structure + optimizations. */ +- if (flag_ipa_struct_reorg || flag_ipa_reorder_fields) ++ if (flag_ipa_struct_reorg && lang_c_p () ++ && flag_lto_partition == LTO_PARTITION_ONE) + return t; + if (POINTER_TYPE_P (t)) + return fld_incomplete_type_of (t, fld); +diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +index eac5fac7e..dcc6df496 100644 +--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc ++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc +@@ -108,6 +108,37 @@ along with GCC; see the file COPYING3. If not see + #include "cfgloop.h" + #include "langhooks.h" + ++/* Check whether in C language or LTO with only C language. */ ++ ++bool ++lang_c_p (void) ++{ ++ const char *language_string = lang_hooks.name; ++ ++ if (!language_string) ++ return false; ++ ++ if (lang_GNU_C ()) ++ return true; ++ else if (strcmp (language_string, "GNU GIMPLE") == 0) // for LTO check ++ { ++ unsigned i = 0; ++ tree t = NULL_TREE; ++ ++ FOR_EACH_VEC_SAFE_ELT (all_translation_units, i, t) ++ { ++ language_string = TRANSLATION_UNIT_LANGUAGE (t); ++ if (language_string == NULL ++ || strncmp (language_string, "GNU C", 5) ++ || (language_string[5] != '\0' ++ && !(ISDIGIT (language_string[5])))) ++ return false; ++ } ++ return true; ++ } ++ return false; ++} ++ + namespace { + + using namespace struct_reorg; +@@ -198,37 +229,6 @@ gimplify_build1 (gimple_stmt_iterator *gsi, enum tree_code code, tree type, + GSI_SAME_STMT); + } + +-/* Check whether in C language or LTO with only C language. */ +- +-static bool +-lang_c_p (void) +-{ +- const char *language_string = lang_hooks.name; +- +- if (!language_string) +- return false; +- +- if (lang_GNU_C ()) +- return true; +- else if (strcmp (language_string, "GNU GIMPLE") == 0) // For LTO check +- { +- unsigned i = 0; +- tree t = NULL_TREE; +- +- FOR_EACH_VEC_SAFE_ELT (all_translation_units, i, t) +- { +- language_string = TRANSLATION_UNIT_LANGUAGE (t); +- if (language_string == NULL +- || strncmp (language_string, "GNU C", 5) +- || (language_string[5] != '\0' +- && !(ISDIGIT (language_string[5])))) +- return false; +- } +- return true; +- } +- return false; +-} +- + /* Get the number of pointer layers. */ + + int +@@ -262,29 +262,23 @@ is_from_void_ptr_parm (tree ssa_name) + && VOID_POINTER_P (TREE_TYPE (ssa_name))); + } + +-enum srmode +-{ +- NORMAL = 0, +- COMPLETE_STRUCT_RELAYOUT, +- STRUCT_REORDER_FIELDS +-}; +- + /* Enum the struct layout optimize level, + which should be the same as the option -fstruct-reorg=. */ + + enum struct_layout_opt_level + { + NONE = 0, +- STRUCT_REORG, +- STRUCT_REORDER_FIELDS_SLO, +- DEAD_FIELD_ELIMINATION ++ STRUCT_SPLIT = 1 << 0, ++ COMPLETE_STRUCT_RELAYOUT = 1 << 1, ++ STRUCT_REORDER_FIELDS = 1 << 2, ++ DEAD_FIELD_ELIMINATION = 1 << 3 + }; + + static bool is_result_of_mult (tree arg, tree *num, tree struct_size); + static bool isptrptr (tree type); + void get_base (tree &base, tree expr); + +-srmode current_mode; ++static unsigned int current_layout_opt_level; + hash_map replace_type_map; + + /* Return true if one of these types is created by struct-reorg. */ +@@ -626,7 +620,7 @@ void + srtype::simple_dump (FILE *f) + { + print_generic_expr (f, type); +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + fprintf (f, "(%d)", TYPE_UID (type)); + } + +@@ -673,7 +667,7 @@ srfield::create_new_fields (tree newtype[max_split], + tree newfields[max_split], + tree newlast[max_split]) + { +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + create_new_reorder_fields (newtype, newfields, newlast); + return; +@@ -861,7 +855,7 @@ srtype::create_new_type (void) + we are not splitting the struct into two clusters, + then just return false and don't change the type. */ + if (!createnewtype && maxclusters == 0 +- && current_mode != STRUCT_REORDER_FIELDS) ++ && current_layout_opt_level < STRUCT_REORDER_FIELDS) + { + newtype[0] = type; + return false; +@@ -889,8 +883,7 @@ srtype::create_new_type (void) + sprintf (id, "%d", i); + if (tname) + { +- name = concat (tname, current_mode == STRUCT_REORDER_FIELDS +- ? ".reorder." : ".reorg.", id, NULL); ++ name = concat (tname, ".reorg.", id, NULL); + TYPE_NAME (newtype[i]) = build_decl (UNKNOWN_LOCATION, + TYPE_DECL, + get_identifier (name), +@@ -902,8 +895,7 @@ srtype::create_new_type (void) + for (unsigned i = 0; i < fields.length (); i++) + { + srfield *f = fields[i]; +- if (current_mode == STRUCT_REORDER_FIELDS +- && struct_layout_optimize_level >= DEAD_FIELD_ELIMINATION ++ if (current_layout_opt_level & DEAD_FIELD_ELIMINATION + && !(f->field_access & READ_FIELD)) + continue; + f->create_new_fields (newtype, newfields, newlast); +@@ -924,13 +916,12 @@ srtype::create_new_type (void) + + warn_padded = save_warn_padded; + +- if (current_mode == STRUCT_REORDER_FIELDS ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS + && replace_type_map.get (this->newtype[0]) == NULL) + replace_type_map.put (this->newtype[0], this->type); + if (dump_file) + { +- if (current_mode == STRUCT_REORDER_FIELDS +- && struct_layout_optimize_level >= DEAD_FIELD_ELIMINATION ++ if (current_layout_opt_level & DEAD_FIELD_ELIMINATION + && has_dead_field ()) + fprintf (dump_file, "Dead field elimination.\n"); + } +@@ -1052,8 +1043,7 @@ srfunction::create_new_decls (void) + sprintf (id, "%d", j); + if (tname) + { +- name = concat (tname, current_mode == STRUCT_REORDER_FIELDS +- ? ".reorder." : ".reorg.", id, NULL); ++ name = concat (tname, ".reorg.", id, NULL); + new_name = get_identifier (name); + free (name); + } +@@ -1264,7 +1254,7 @@ public: + bool done_recording; + + // Methods +- unsigned execute (enum srmode mode); ++ unsigned execute (unsigned int opt); + void mark_type_as_escape (tree type, escape_type escapes, + gimple *stmt = NULL); + +@@ -2651,7 +2641,7 @@ escape_type_volatile_array_or_ptrptr (tree type) + return escape_volatile; + if (isarraytype (type)) + return escape_array; +- if (isptrptr (type) && (current_mode != STRUCT_REORDER_FIELDS)) ++ if (isptrptr (type) && (current_layout_opt_level < STRUCT_REORDER_FIELDS)) + return escape_ptr_ptr; + return does_not_escape; + } +@@ -2672,12 +2662,11 @@ ipa_struct_reorg::record_field_type (tree field, srtype *base_srtype) + field_srfield->type = field_srtype; + field_srtype->add_field_site (field_srfield); + } +- if (field_srtype == base_srtype && current_mode != COMPLETE_STRUCT_RELAYOUT +- && current_mode != STRUCT_REORDER_FIELDS) ++ if (field_srtype == base_srtype && current_layout_opt_level == STRUCT_SPLIT) + base_srtype->mark_escape (escape_rescusive_type, NULL); + /* Types of non-pointer field are difficult to track the correctness + of the rewrite when it used by the escaped type. */ +- if (current_mode == STRUCT_REORDER_FIELDS ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS + && TREE_CODE (field_type) == RECORD_TYPE) + field_srtype->mark_escape (escape_instance_field, NULL); + } +@@ -2704,7 +2693,7 @@ ipa_struct_reorg::record_struct_field_types (tree base_type, + base_srtype->mark_escape (e, NULL); + /* Types of non-pointer field are difficult to track the correctness + of the rewrite when it used by the escaped type. */ +- if (current_mode == STRUCT_REORDER_FIELDS ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS + && TREE_CODE (field_type) == RECORD_TYPE) + base_srtype->mark_escape (escape_instance_field, NULL); + if (handled_type (field_type)) +@@ -2895,8 +2884,7 @@ ipa_struct_reorg::record_var (tree decl, escape_type escapes, int arg) + + /* Separate instance is hard to trace in complete struct + relayout optimization. */ +- if ((current_mode == COMPLETE_STRUCT_RELAYOUT +- || current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= COMPLETE_STRUCT_RELAYOUT + && TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE) + e = escape_separate_instance; + +@@ -3000,7 +2988,7 @@ ipa_struct_reorg::find_vars (gimple *stmt) + /* Add a safe func mechanism. */ + bool l_find = true; + bool r_find = true; +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + l_find = !(current_function->is_safe_func + && TREE_CODE (lhs) == SSA_NAME +@@ -3046,7 +3034,7 @@ ipa_struct_reorg::find_vars (gimple *stmt) + } + } + } +- else if ((current_mode == STRUCT_REORDER_FIELDS) ++ else if ((current_layout_opt_level >= STRUCT_REORDER_FIELDS) + && (gimple_assign_rhs_code (stmt) == LE_EXPR + || gimple_assign_rhs_code (stmt) == LT_EXPR + || gimple_assign_rhs_code (stmt) == GE_EXPR +@@ -3057,7 +3045,7 @@ ipa_struct_reorg::find_vars (gimple *stmt) + find_var (gimple_assign_rhs2 (stmt), stmt); + } + /* Find void ssa_name from stmt such as: _2 = _1 - old_arcs_1. */ +- else if ((current_mode == STRUCT_REORDER_FIELDS) ++ else if ((current_layout_opt_level >= STRUCT_REORDER_FIELDS) + && gimple_assign_rhs_code (stmt) == POINTER_DIFF_EXPR + && types_compatible_p ( + TYPE_MAIN_VARIANT (TREE_TYPE (gimple_assign_rhs1 (stmt))), +@@ -3270,8 +3258,7 @@ ipa_struct_reorg::maybe_record_stmt (cgraph_node *node, gimple *stmt) + default: + break; + } +- if (current_mode == STRUCT_REORDER_FIELDS +- && struct_layout_optimize_level >= DEAD_FIELD_ELIMINATION) ++ if (current_layout_opt_level & DEAD_FIELD_ELIMINATION) + { + /* Look for loads and stores. */ + walk_stmt_load_store_ops (stmt, this, find_field_p_load, +@@ -3427,9 +3414,11 @@ is_result_of_mult (tree arg, tree *num, tree struct_size) + arg = gimple_assign_rhs1 (size_def_stmt); + size_def_stmt = SSA_NAME_DEF_STMT (arg); + } +- else if (rhs_code == NEGATE_EXPR && current_mode == STRUCT_REORDER_FIELDS) ++ else if (rhs_code == NEGATE_EXPR ++ && current_layout_opt_level >= STRUCT_REORDER_FIELDS) + return trace_calculate_negate (size_def_stmt, num, struct_size); +- else if (rhs_code == NOP_EXPR && current_mode == STRUCT_REORDER_FIELDS) ++ else if (rhs_code == NOP_EXPR ++ && current_layout_opt_level >= STRUCT_REORDER_FIELDS) + return trace_calculate_diff (size_def_stmt, num); + else + { +@@ -3447,15 +3436,15 @@ is_result_of_mult (tree arg, tree *num, tree struct_size) + bool + ipa_struct_reorg::handled_allocation_stmt (gimple *stmt) + { +- if ((current_mode == STRUCT_REORDER_FIELDS) ++ if ((current_layout_opt_level >= STRUCT_REORDER_FIELDS) + && (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC) + || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) + || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))) + return true; +- if ((current_mode == COMPLETE_STRUCT_RELAYOUT) ++ if ((current_layout_opt_level == COMPLETE_STRUCT_RELAYOUT) + && gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)) + return true; +- if ((current_mode == NORMAL) ++ if ((current_layout_opt_level == STRUCT_SPLIT) + && (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC) + || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC) + || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC) +@@ -3576,7 +3565,7 @@ ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other, + /* x_1 = y.x_nodes; void *x; + Directly mark the structure pointer type assigned + to the void* variable as escape. */ +- else if (current_mode == STRUCT_REORDER_FIELDS ++ else if (current_layout_opt_level >= STRUCT_REORDER_FIELDS + && TREE_CODE (side) == SSA_NAME + && VOID_POINTER_P (TREE_TYPE (side)) + && SSA_NAME_VAR (side) +@@ -3834,7 +3823,7 @@ ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect, + and doesn't mark escape follow.). */ + /* _1 = MEM[(struct arc_t * *)a_1]. + then base a_1: ssa_name - pointer_type - integer_type. */ +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + bool is_int_ptr = POINTER_TYPE_P (TREE_TYPE (base)) + && (TREE_CODE (inner_type (TREE_TYPE (base))) +@@ -3896,7 +3885,7 @@ ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect, + /* Escape the operation of fetching field with pointer offset such as: + *(&(t->right)) = malloc (0); -> MEM[(struct node * *)_1 + 8B] = malloc (0); + */ +- if (current_mode != NORMAL ++ if (current_layout_opt_level > STRUCT_SPLIT + && (TREE_CODE (expr) == MEM_REF) && (offset != 0)) + { + gcc_assert (can_escape); +@@ -4060,7 +4049,7 @@ ipa_struct_reorg::maybe_record_call (cgraph_node *node, gcall *stmt) + /* callee_func (_1, _2); + Check the callee func, instead of current func. */ + if (!(free_or_realloc +- || (current_mode == STRUCT_REORDER_FIELDS ++ || (current_layout_opt_level >= STRUCT_REORDER_FIELDS + && safe_functions.contains ( + node->get_edge (stmt)->callee))) + && VOID_POINTER_P (argtypet)) +@@ -4088,12 +4077,7 @@ ipa_struct_reorg::record_stmt_expr (tree expr, cgraph_node *node, gimple *stmt) + realpart, imagpart, address, escape_from_base)) + return; + +- if (current_mode == STRUCT_REORDER_FIELDS) +- { +- if (!opt_for_fn (current_function_decl, flag_ipa_reorder_fields)) +- type->mark_escape (escape_non_optimize, stmt); +- } +- else ++ if (current_layout_opt_level > NONE) + { + if (!opt_for_fn (current_function_decl, flag_ipa_struct_reorg)) + type->mark_escape (escape_non_optimize, stmt); +@@ -4197,7 +4181,7 @@ ipa_struct_reorg::check_type_and_push (tree newdecl, srdecl *decl, + void + ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type) + { +- if (current_mode == COMPLETE_STRUCT_RELAYOUT ++ if (current_layout_opt_level == COMPLETE_STRUCT_RELAYOUT + && handled_allocation_stmt (stmt)) + { + tree arg0 = gimple_call_arg (stmt, 0); +@@ -4291,7 +4275,7 @@ ipa_struct_reorg::check_definition_call (srdecl *decl, vec &worklist) + if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)) + check_type_and_push (gimple_call_arg (stmt, 0), decl, worklist, stmt); + +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + if (!handled_allocation_stmt (stmt)) + type->mark_escape (escape_return, stmt); +@@ -4341,7 +4325,8 @@ ipa_struct_reorg::check_definition (srdecl *decl, vec &worklist) + type->mark_escape (escape_cast_void, SSA_NAME_DEF_STMT (ssa_name)); + return; + } +- if (current_mode == STRUCT_REORDER_FIELDS && SSA_NAME_VAR (ssa_name) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS ++ && SSA_NAME_VAR (ssa_name) + && VOID_POINTER_P (TREE_TYPE (SSA_NAME_VAR (ssa_name)))) + type->mark_escape (escape_cast_void, SSA_NAME_DEF_STMT (ssa_name)); + gimple *stmt = SSA_NAME_DEF_STMT (ssa_name); +@@ -4425,7 +4410,7 @@ ipa_struct_reorg::check_other_side (srdecl *decl, tree other, gimple *stmt, + { + /* In Complete Struct Relayout, if lhs type is the same + as rhs type, we could return without any harm. */ +- if (current_mode == COMPLETE_STRUCT_RELAYOUT) ++ if (current_layout_opt_level == COMPLETE_STRUCT_RELAYOUT) + return; + + tree base; +@@ -4437,7 +4422,7 @@ ipa_struct_reorg::check_other_side (srdecl *decl, tree other, gimple *stmt, + if (!get_type_field (other, base, indirect, type1, field, + realpart, imagpart, address, escape_from_base)) + { +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + /* Release INTEGER_TYPE cast to struct pointer. */ + bool cast_from_int_ptr = current_function->is_safe_func && base +@@ -4487,7 +4472,8 @@ get_base (tree &base, tree expr) + void + ipa_struct_reorg::check_ptr_layers (tree a_expr, tree b_expr, gimple *stmt) + { +- if (current_mode != STRUCT_REORDER_FIELDS || current_function->is_safe_func ++ if (current_layout_opt_level < STRUCT_REORDER_FIELDS ++ || current_function->is_safe_func + || !(POINTER_TYPE_P (TREE_TYPE (a_expr))) + || !(POINTER_TYPE_P (TREE_TYPE (b_expr))) + || !handled_type (TREE_TYPE (a_expr)) +@@ -4554,12 +4540,9 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + tree rhs2 = gimple_cond_rhs (stmt); + tree orhs = rhs1; + enum tree_code code = gimple_cond_code (stmt); +- if ((current_mode == NORMAL && (code != EQ_EXPR && code != NE_EXPR)) +- || (current_mode == COMPLETE_STRUCT_RELAYOUT +- && (code != EQ_EXPR && code != NE_EXPR +- && code != LT_EXPR && code != LE_EXPR +- && code != GT_EXPR && code != GE_EXPR)) +- || (current_mode == STRUCT_REORDER_FIELDS ++ if ((current_layout_opt_level == STRUCT_SPLIT ++ && (code != EQ_EXPR && code != NE_EXPR)) ++ || (current_layout_opt_level >= COMPLETE_STRUCT_RELAYOUT + && (code != EQ_EXPR && code != NE_EXPR + && code != LT_EXPR && code != LE_EXPR + && code != GT_EXPR && code != GE_EXPR))) +@@ -4592,15 +4575,12 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt, + tree rhs2 = gimple_assign_rhs2 (stmt); + tree orhs = rhs1; + enum tree_code code = gimple_assign_rhs_code (stmt); +- if ((current_mode == NORMAL && (code != EQ_EXPR && code != NE_EXPR)) +- || (current_mode == COMPLETE_STRUCT_RELAYOUT +- && (code != EQ_EXPR && code != NE_EXPR +- && code != LT_EXPR && code != LE_EXPR +- && code != GT_EXPR && code != GE_EXPR)) +- || (current_mode == STRUCT_REORDER_FIELDS ++ if ((current_layout_opt_level == STRUCT_SPLIT ++ && (code != EQ_EXPR && code != NE_EXPR)) ++ || (current_layout_opt_level >= COMPLETE_STRUCT_RELAYOUT + && (code != EQ_EXPR && code != NE_EXPR + && code != LT_EXPR && code != LE_EXPR +- && code != GT_EXPR && code != GE_EXPR))) ++ && code != GT_EXPR && code != GE_EXPR))) + { + mark_expr_escape (rhs1, escape_non_eq, stmt); + mark_expr_escape (rhs2, escape_non_eq, stmt); +@@ -4722,9 +4702,9 @@ ipa_struct_reorg::record_function (cgraph_node *node) + escapes = escape_marked_as_used; + else if (!node->local) + { +- if (current_mode != STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level < STRUCT_REORDER_FIELDS) + escapes = escape_visible_function; +- if (current_mode == STRUCT_REORDER_FIELDS && node->externally_visible) ++ else if (node->externally_visible) + escapes = escape_visible_function; + } + else if (!node->can_change_signature) +@@ -4732,12 +4712,7 @@ ipa_struct_reorg::record_function (cgraph_node *node) + else if (!tree_versionable_function_p (node->decl)) + escapes = escape_noclonable_function; + +- if (current_mode == STRUCT_REORDER_FIELDS) +- { +- if (!opt_for_fn (node->decl, flag_ipa_reorder_fields)) +- escapes = escape_non_optimize; +- } +- else if (current_mode == NORMAL || current_mode == COMPLETE_STRUCT_RELAYOUT) ++ if (current_layout_opt_level > NONE) + { + if (!opt_for_fn (node->decl, flag_ipa_struct_reorg)) + escapes = escape_non_optimize; +@@ -4747,10 +4722,10 @@ ipa_struct_reorg::record_function (cgraph_node *node) + gimple_stmt_iterator si; + + /* Add a safe func mechanism. */ +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + current_function->is_safe_func = safe_functions.contains (node); +- if (dump_file) ++ if (dump_file && (dump_flags & TDF_DETAILS)) + { + fprintf (dump_file, "\nfunction %s/%u: is_safe_func = %d\n", + node->name (), node->order, +@@ -4958,7 +4933,7 @@ ipa_struct_reorg::record_accesses (void) + } + + /* Add a safe func mechanism. */ +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + record_safe_func_with_void_ptr_parm (); + + FOR_EACH_FUNCTION (cnode) +@@ -5174,8 +5149,7 @@ ipa_struct_reorg::propagate_escape_via_ext_func_types (void) + void + ipa_struct_reorg::prune_escaped_types (void) + { +- if (current_mode != COMPLETE_STRUCT_RELAYOUT +- && current_mode != STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level == STRUCT_SPLIT) + { + /* Detect recusive types and mark them as escaping. */ + detect_cycles (); +@@ -5183,7 +5157,7 @@ ipa_struct_reorg::prune_escaped_types (void) + mark them as escaping. */ + propagate_escape (); + } +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + propagate_escape_via_original (); + propagate_escape_via_empty_with_no_original (); +@@ -5244,7 +5218,7 @@ ipa_struct_reorg::prune_escaped_types (void) + if (function->args.is_empty () + && function->decls.is_empty () + && function->globals.is_empty () +- && current_mode != STRUCT_REORDER_FIELDS) ++ && current_layout_opt_level < STRUCT_REORDER_FIELDS) + { + delete function; + functions.ordered_remove (i); +@@ -5272,7 +5246,7 @@ ipa_struct_reorg::prune_escaped_types (void) + /* The escape type is not deleted in STRUCT_REORDER_FIELDS, + Then the type that contains the escaped type fields + can find complete information. */ +- if (current_mode != STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level < STRUCT_REORDER_FIELDS) + { + for (unsigned i = 0; i < types.length ();) + { +@@ -5320,7 +5294,7 @@ ipa_struct_reorg::create_new_types (void) + for (unsigned i = 0; i < types.length (); i++) + newtypes += types[i]->create_new_type (); + +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + for (unsigned i = 0; i < types.length (); i++) + { +@@ -5444,8 +5418,7 @@ ipa_struct_reorg::create_new_args (cgraph_node *new_node) + char *name = NULL; + if (tname) + { +- name = concat (tname, current_mode == STRUCT_REORDER_FIELDS +- ? ".reorder.0" : ".reorg.0", NULL); ++ name = concat (tname, ".reorg.0", NULL); + new_name = get_identifier (name); + free (name); + } +@@ -5532,9 +5505,7 @@ ipa_struct_reorg::create_new_functions (void) + } + statistics_counter_event (NULL, "Create new function", 1); + new_node = node->create_version_clone_with_body ( +- vNULL, NULL, NULL, NULL, NULL, +- current_mode == STRUCT_REORDER_FIELDS +- ? "struct_reorder" : "struct_reorg"); ++ vNULL, NULL, NULL, NULL, NULL, "struct_reorg"); + new_node->can_change_signature = node->can_change_signature; + new_node->make_local (); + f->newnode = new_node; +@@ -5661,7 +5632,7 @@ ipa_struct_reorg::rewrite_expr (tree expr, + newbase1 = build_fold_addr_expr (newbase1); + if (indirect) + { +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + /* Supports the MEM_REF offset. + _1 = MEM[(struct arc *)ap_1 + 72B].flow; +@@ -5719,8 +5690,7 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + { + bool remove = false; + +- if (current_mode == STRUCT_REORDER_FIELDS +- && struct_layout_optimize_level >= DEAD_FIELD_ELIMINATION ++ if (current_layout_opt_level & DEAD_FIELD_ELIMINATION + && remove_dead_field_stmt (gimple_assign_lhs (stmt))) + { + if (dump_file && (dump_flags & TDF_DETAILS)) +@@ -5756,10 +5726,10 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + return remove; + } + +- if ((current_mode != STRUCT_REORDER_FIELDS ++ if ((current_layout_opt_level < STRUCT_REORDER_FIELDS + && (gimple_assign_rhs_code (stmt) == EQ_EXPR + || gimple_assign_rhs_code (stmt) == NE_EXPR)) +- || (current_mode == STRUCT_REORDER_FIELDS ++ || (current_layout_opt_level >= STRUCT_REORDER_FIELDS + && (TREE_CODE_CLASS (gimple_assign_rhs_code (stmt)) + == tcc_comparison))) + { +@@ -5769,7 +5739,7 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + tree newrhs2[max_split]; + tree_code rhs_code = gimple_assign_rhs_code (stmt); + tree_code code = rhs_code == EQ_EXPR ? BIT_AND_EXPR : BIT_IOR_EXPR; +- if (current_mode == STRUCT_REORDER_FIELDS ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS + && rhs_code != EQ_EXPR && rhs_code != NE_EXPR) + code = rhs_code; + +@@ -5818,8 +5788,9 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + _6 = _4 + _5; + _5 = (long unsigned int) _3; + _3 = _1 - old_2. */ +- if (current_mode != STRUCT_REORDER_FIELDS +- || (current_mode == STRUCT_REORDER_FIELDS && (num != NULL))) ++ if (current_layout_opt_level < STRUCT_REORDER_FIELDS ++ || (current_layout_opt_level >= STRUCT_REORDER_FIELDS ++ && (num != NULL))) + num = gimplify_build1 (gsi, NOP_EXPR, sizetype, num); + for (unsigned i = 0; i < max_split && newlhs[i]; i++) + { +@@ -5843,7 +5814,7 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi) + } + + /* Support POINTER_DIFF_EXPR rewriting. */ +- if (current_mode == STRUCT_REORDER_FIELDS ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS + && gimple_assign_rhs_code (stmt) == POINTER_DIFF_EXPR) + { + tree rhs1 = gimple_assign_rhs1 (stmt); +@@ -6026,7 +5997,8 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi) + srfunction *f = find_function (node); + + /* Add a safe func mechanism. */ +- if (current_mode == STRUCT_REORDER_FIELDS && f && f->is_safe_func) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS ++ && f && f->is_safe_func) + { + tree expr = gimple_call_arg (stmt, 0); + tree newexpr[max_split]; +@@ -6152,9 +6124,9 @@ ipa_struct_reorg::rewrite_cond (gcond *stmt, gimple_stmt_iterator *gsi) + tree_code rhs_code = gimple_cond_code (stmt); + + /* Handle only equals or not equals conditionals. */ +- if ((current_mode != STRUCT_REORDER_FIELDS ++ if ((current_layout_opt_level < STRUCT_REORDER_FIELDS + && (rhs_code != EQ_EXPR && rhs_code != NE_EXPR)) +- || (current_mode == STRUCT_REORDER_FIELDS ++ || (current_layout_opt_level >= STRUCT_REORDER_FIELDS + && TREE_CODE_CLASS (rhs_code) != tcc_comparison)) + return false; + tree lhs = gimple_cond_lhs (stmt); +@@ -6208,7 +6180,7 @@ ipa_struct_reorg::rewrite_cond (gcond *stmt, gimple_stmt_iterator *gsi) + bool + ipa_struct_reorg::rewrite_debug (gimple *stmt, gimple_stmt_iterator *) + { +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + /* Delete debug gimple now. */ + return true; + bool remove = false; +@@ -6367,7 +6339,7 @@ ipa_struct_reorg::rewrite_functions (void) + then don't rewrite any accesses. */ + if (!create_new_types ()) + { +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + for (unsigned i = 0; i < functions.length (); i++) + { +@@ -6386,7 +6358,7 @@ ipa_struct_reorg::rewrite_functions (void) + return 0; + } + +- if (current_mode == STRUCT_REORDER_FIELDS && dump_file) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS && dump_file) + { + fprintf (dump_file, "=========== all created newtypes: ===========\n\n"); + dump_newtypes (dump_file); +@@ -6396,13 +6368,13 @@ ipa_struct_reorg::rewrite_functions (void) + { + retval = TODO_remove_functions; + create_new_functions (); +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + prune_escaped_types (); + } + } + +- if (current_mode == STRUCT_REORDER_FIELDS) ++ if (current_layout_opt_level >= STRUCT_REORDER_FIELDS) + { + for (unsigned i = 0; i < functions.length (); i++) + { +@@ -6559,33 +6531,33 @@ ipa_struct_reorg::execute_struct_relayout (void) + } + + unsigned int +-ipa_struct_reorg::execute (enum srmode mode) ++ipa_struct_reorg::execute (unsigned int opt) + { + unsigned int ret = 0; + + if (dump_file) + fprintf (dump_file, "\n\n====== ipa_struct_reorg level %d ======\n\n", +- mode); ++ opt); + +- if (mode == NORMAL || mode == STRUCT_REORDER_FIELDS) ++ if (opt != COMPLETE_STRUCT_RELAYOUT) + { +- current_mode = mode; ++ current_layout_opt_level = opt; + /* If there is a top-level inline-asm, + the pass immediately returns. */ + if (symtab->first_asm_symbol ()) + return 0; + record_accesses (); + prune_escaped_types (); +- if (current_mode == NORMAL) ++ if (current_layout_opt_level == STRUCT_SPLIT) + analyze_types (); + + ret = rewrite_functions (); + } +- else if (mode == COMPLETE_STRUCT_RELAYOUT) ++ else + { + if (dump_file) + fprintf (dump_file, "\n\nTry Complete Struct Relayout:\n"); +- current_mode = COMPLETE_STRUCT_RELAYOUT; ++ current_layout_opt_level = COMPLETE_STRUCT_RELAYOUT; + if (symtab->first_asm_symbol ()) + return 0; + record_accesses (); +@@ -6622,10 +6594,37 @@ public: + virtual unsigned int execute (function *) + { + unsigned int ret = 0; +- ret = ipa_struct_reorg ().execute (NORMAL); +- if (!ret) +- ret = ipa_struct_reorg ().execute (COMPLETE_STRUCT_RELAYOUT); +- return ret; ++ unsigned int ret_reorg = 0; ++ unsigned int level = 0; ++ switch (struct_layout_optimize_level) ++ { ++ case 3: level |= DEAD_FIELD_ELIMINATION; ++ // FALLTHRU ++ case 2: level |= STRUCT_REORDER_FIELDS; ++ // FALLTHRU ++ case 1: ++ level |= COMPLETE_STRUCT_RELAYOUT; ++ level |= STRUCT_SPLIT; ++ break; ++ case 0: break; ++ default: gcc_unreachable (); ++ } ++ /* Preserved for backward compatibility, reorder fields needs run before ++ struct split and complete struct relayout. */ ++ if (flag_ipa_reorder_fields && level < STRUCT_REORDER_FIELDS) ++ ret = ipa_struct_reorg ().execute (STRUCT_REORDER_FIELDS); ++ ++ if (level >= STRUCT_REORDER_FIELDS) ++ ret = ipa_struct_reorg ().execute (level); ++ ++ if (level >= COMPLETE_STRUCT_RELAYOUT) ++ { ++ /* Preserved for backward compatibility. */ ++ ret_reorg = ipa_struct_reorg ().execute (STRUCT_SPLIT); ++ if (!ret_reorg) ++ ret_reorg = ipa_struct_reorg ().execute (COMPLETE_STRUCT_RELAYOUT); ++ } ++ return ret | ret_reorg; + } + + }; // class pass_ipa_struct_reorg +@@ -6645,52 +6644,6 @@ pass_ipa_struct_reorg::gate (function *) + && (in_lto_p || flag_whole_program)); + } + +-const pass_data pass_data_ipa_reorder_fields = +-{ +- SIMPLE_IPA_PASS, // type +- "reorder_fields", // name +- OPTGROUP_NONE, // optinfo_flags +- TV_IPA_REORDER_FIELDS, // tv_id +- 0, // properties_required +- 0, // properties_provided +- 0, // properties_destroyed +- 0, // todo_flags_start +- 0, // todo_flags_finish +-}; +- +-class pass_ipa_reorder_fields : public simple_ipa_opt_pass +-{ +-public: +- pass_ipa_reorder_fields (gcc::context *ctxt) +- : simple_ipa_opt_pass (pass_data_ipa_reorder_fields, ctxt) +- {} +- +- /* opt_pass methods: */ +- virtual bool gate (function *); +- virtual unsigned int execute (function *) +- { +- unsigned int ret = 0; +- ret = ipa_struct_reorg ().execute (STRUCT_REORDER_FIELDS); +- return ret; +- } +- +-}; // class pass_ipa_reorder_fields +- +-bool +-pass_ipa_reorder_fields::gate (function *) +-{ +- return (optimize >= 3 +- && flag_ipa_reorder_fields +- /* Don't bother doing anything if the program has errors. */ +- && !seen_error () +- && flag_lto_partition == LTO_PARTITION_ONE +- /* Only enable struct optimizations in C since other +- languages' grammar forbid. */ +- && lang_c_p () +- /* Only enable struct optimizations in lto or whole_program. */ +- && (in_lto_p || flag_whole_program)); +-} +- + } // anon namespace + + +@@ -6699,9 +6652,3 @@ make_pass_ipa_struct_reorg (gcc::context *ctxt) + { + return new pass_ipa_struct_reorg (ctxt); + } +- +-simple_ipa_opt_pass * +-make_pass_ipa_reorder_fields (gcc::context *ctxt) +-{ +- return new pass_ipa_reorder_fields (ctxt); +-} +diff --git a/gcc/passes.def b/gcc/passes.def +index bdc835b87..9692066e4 100644 +--- a/gcc/passes.def ++++ b/gcc/passes.def +@@ -178,7 +178,6 @@ along with GCC; see the file COPYING3. If not see + compiled unit. */ + INSERT_PASSES_AFTER (all_late_ipa_passes) + NEXT_PASS (pass_ipa_pta); +- NEXT_PASS (pass_ipa_reorder_fields); + /* FIXME: this should be a normal IP pass. */ + NEXT_PASS (pass_ipa_struct_reorg); + NEXT_PASS (pass_omp_simd_clone); +diff --git a/gcc/symbol-summary.h b/gcc/symbol-summary.h +index 6fa529eee..3fe64047c 100644 +--- a/gcc/symbol-summary.h ++++ b/gcc/symbol-summary.h +@@ -105,7 +105,7 @@ protected: + { + /* In structure optimizatons, we call new to ensure that + the allocated memory is initialized to 0. */ +- if (flag_ipa_struct_reorg || flag_ipa_reorder_fields) ++ if (flag_ipa_struct_reorg) + return is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T () + : new T (); + +@@ -122,7 +122,7 @@ protected: + ggc_delete (item); + else + { +- if (flag_ipa_struct_reorg || flag_ipa_reorder_fields) ++ if (flag_ipa_struct_reorg) + delete item; + else + m_allocator.remove (item); +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_DTE_verify.c b/gcc/testsuite/gcc.dg/struct/dfe_DTE_verify.c +index 0c9e384c4..afa181e07 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_DTE_verify.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_DTE_verify.c +@@ -83,4 +83,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_ele_minus_verify.c b/gcc/testsuite/gcc.dg/struct/dfe_ele_minus_verify.c +index 717fcc386..c87db2aba 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_ele_minus_verify.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_ele_minus_verify.c +@@ -57,4 +57,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_board_init.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_board_init.c +index 7723c240b..d217f7bd8 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_extr_board_init.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_board_init.c +@@ -74,4 +74,4 @@ LBF_DFU_If_Needed (void) + } + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_claw.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_claw.c +index a1feac966..f9e2cf471 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_extr_claw.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_claw.c +@@ -81,4 +81,4 @@ claw_snd_conn_req (struct net_device *dev, __u8 link) + return rc; + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 1 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 1 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_dtrace.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_dtrace.c +index fd1e936ca..c86c4bb3c 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_extr_dtrace.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_dtrace.c +@@ -53,4 +53,4 @@ dtrace_bcmp (const void *s1, const void *s2, size_t len) + return (0); + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_gc.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_gc.c +index b13d785a9..8484d29d2 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_extr_gc.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_gc.c +@@ -159,4 +159,4 @@ gc_gray_mark (mrb_state *mrb, mrb_gc *gc, struct RBasic *obj) + return children; + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_hpsa.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_hpsa.c +index bc28a658a..300b2dac4 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_extr_hpsa.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_hpsa.c +@@ -123,4 +123,4 @@ hpsa_cmd_dev_match (struct ctlr_info *h, struct CommandList *c, + return match; + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_mv_udc_core.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_mv_udc_core.c +index 0a585ac3d..9397b98ea 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_extr_mv_udc_core.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_mv_udc_core.c +@@ -79,4 +79,4 @@ ep0_reset (struct mv_udc *udc) + } + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_tcp_usrreq.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_tcp_usrreq.c +index bddd862fe..0ae75e13e 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_extr_tcp_usrreq.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_tcp_usrreq.c +@@ -55,4 +55,4 @@ tcp_usr_listen (struct socket *so, struct proc *p) + COMMON_END (PRU_LISTEN); + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 1 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 1 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_extr_ui_main.c b/gcc/testsuite/gcc.dg/struct/dfe_extr_ui_main.c +index 1a06f5eec..512fb37a7 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_extr_ui_main.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_extr_ui_main.c +@@ -58,4 +58,4 @@ UI_LoadMods () + } + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 1 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 1 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_mem_ref_offset.c b/gcc/testsuite/gcc.dg/struct/dfe_mem_ref_offset.c +index 94eb88d5c..0dea5517c 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_mem_ref_offset.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_mem_ref_offset.c +@@ -55,4 +55,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_mul_layer_ptr_record_bug.c b/gcc/testsuite/gcc.dg/struct/dfe_mul_layer_ptr_record_bug.c +index bbf9420d0..00bd911c1 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_mul_layer_ptr_record_bug.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_mul_layer_ptr_record_bug.c +@@ -27,4 +27,4 @@ main() { + return 0; + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_ptr_diff.c b/gcc/testsuite/gcc.dg/struct/dfe_ptr_diff.c +index f706db968..0cfa6554e 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_ptr_diff.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_ptr_diff.c +@@ -68,4 +68,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 3 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 3 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_ptr_negate_expr.c b/gcc/testsuite/gcc.dg/struct/dfe_ptr_negate_expr.c +index 963295cb4..4a7069244 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_ptr_negate_expr.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_ptr_negate_expr.c +@@ -52,4 +52,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c b/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c +index aa10506a1..b91efe10f 100644 +--- a/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c ++++ b/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c +@@ -52,4 +52,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "reorder_fields" } } */ ++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 2 "struct_reorg" } } */ +diff --git a/gcc/testsuite/gcc.dg/struct/rf_DTE_struct_instance_field.c b/gcc/testsuite/gcc.dg/struct/rf_DTE_struct_instance_field.c +index b95be2dab..1b6a462e2 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_DTE_struct_instance_field.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_DTE_struct_instance_field.c +@@ -72,4 +72,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "No structures to transform." "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "No structures to transform." "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_DTE_verify.c b/gcc/testsuite/gcc.dg/struct/rf_DTE_verify.c +index 3d243313b..346c71264 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_DTE_verify.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_DTE_verify.c +@@ -91,4 +91,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_check_ptr_layers_bug.c b/gcc/testsuite/gcc.dg/struct/rf_check_ptr_layers_bug.c +index faaf1e3a5..b876fef86 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_check_ptr_layers_bug.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_check_ptr_layers_bug.c +@@ -21,4 +21,4 @@ main() + { + g(); + } +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c b/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c +index 886706ae9..7d7641f01 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c +@@ -79,4 +79,4 @@ main() + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_create_new_func_bug.c b/gcc/testsuite/gcc.dg/struct/rf_create_new_func_bug.c +index f3785f392..63fb3f828 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_create_new_func_bug.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_create_new_func_bug.c +@@ -53,4 +53,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ele_minus_verify.c b/gcc/testsuite/gcc.dg/struct/rf_ele_minus_verify.c +index 1415d759a..8c431e15f 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_ele_minus_verify.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_ele_minus_verify.c +@@ -57,4 +57,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_escape_by_base.c b/gcc/testsuite/gcc.dg/struct/rf_escape_by_base.c +index 003da0b57..efc95a4cd 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_escape_by_base.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_escape_by_base.c +@@ -80,4 +80,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_external_func_types.c b/gcc/testsuite/gcc.dg/struct/rf_external_func_types.c +index 84a34f241..2a9bea783 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_external_func_types.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_external_func_types.c +@@ -66,4 +66,4 @@ test () + return 0; + } + +-/* { dg-final { scan-ipa-dump "No structures to transform." "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "No structures to transform." "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_int_cast_ptr.c b/gcc/testsuite/gcc.dg/struct/rf_int_cast_ptr.c +index 10dcf098c..75fc10575 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_int_cast_ptr.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_int_cast_ptr.c +@@ -69,4 +69,4 @@ main() + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_mem_ref_offset.c b/gcc/testsuite/gcc.dg/struct/rf_mem_ref_offset.c +index 8d1a9a114..9fb06877b 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_mem_ref_offset.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_mem_ref_offset.c +@@ -55,4 +55,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_mul_layer_ptr_record_bug.c b/gcc/testsuite/gcc.dg/struct/rf_mul_layer_ptr_record_bug.c +index 23765fc56..e8eb0eaa0 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_mul_layer_ptr_record_bug.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_mul_layer_ptr_record_bug.c +@@ -27,4 +27,4 @@ main() { + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_pass_conflict.c b/gcc/testsuite/gcc.dg/struct/rf_pass_conflict.c +index 54e737ee8..bd535afd0 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_pass_conflict.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_pass_conflict.c +@@ -106,4 +106,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr2void_lto.c b/gcc/testsuite/gcc.dg/struct/rf_ptr2void_lto.c +index 2ae46fb31..11393a197 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_ptr2void_lto.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr2void_lto.c +@@ -84,4 +84,4 @@ main () + return cnt; + } + +-/* { dg-final { scan-ipa-dump "No structures to transform." "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "No structures to transform." "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_diff.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_diff.c +index 3a3c10b70..d601fae64 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_ptr_diff.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_diff.c +@@ -68,4 +68,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 3" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 3" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_negate_expr.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_negate_expr.c +index 7b7d110df..4d5f25aa1 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_ptr_negate_expr.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_negate_expr.c +@@ -52,4 +52,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_offset.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_offset.c +index 317aafa5f..b3891fde9 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_ptr_offset.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_offset.c +@@ -31,4 +31,4 @@ main () + printf (" Tree.\n"); + } + +-/* { dg-final { scan-ipa-dump "No structures to transform." "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "No structures to transform." "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c +index 01a33f669..4df79e4f0 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c +@@ -52,4 +52,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr_ptr.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr_ptr.c +index a38556533..49d2106d1 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr_ptr.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr_ptr.c +@@ -55,4 +55,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_rescusive_type.c b/gcc/testsuite/gcc.dg/struct/rf_rescusive_type.c +index 5c17ee528..f71c7894f 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_rescusive_type.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_rescusive_type.c +@@ -54,4 +54,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_rewrite_assign_more_cmp.c b/gcc/testsuite/gcc.dg/struct/rf_rewrite_assign_more_cmp.c +index 710517ee9..721cee2c6 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_rewrite_assign_more_cmp.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_rewrite_assign_more_cmp.c +@@ -62,4 +62,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_bug.c b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_bug.c +index 6ed0a5d2d..3871d3d99 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_bug.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_bug.c +@@ -69,4 +69,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 3" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 3" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c +index 5a2dd964f..5ad206433 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c +@@ -55,4 +55,4 @@ main() + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_rewrite_phi_bug.c b/gcc/testsuite/gcc.dg/struct/rf_rewrite_phi_bug.c +index faa90b42d..a002f9889 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_rewrite_phi_bug.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_rewrite_phi_bug.c +@@ -78,4 +78,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 3" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 3" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_visible_func.c b/gcc/testsuite/gcc.dg/struct/rf_visible_func.c +index 8f2da99cc..f77a062bd 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_visible_func.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_visible_func.c +@@ -89,4 +89,4 @@ main () + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/rf_void_ptr_param_func.c b/gcc/testsuite/gcc.dg/struct/rf_void_ptr_param_func.c +index 723142c59..cba6225a5 100644 +--- a/gcc/testsuite/gcc.dg/struct/rf_void_ptr_param_func.c ++++ b/gcc/testsuite/gcc.dg/struct/rf_void_ptr_param_func.c +@@ -51,4 +51,4 @@ main() + return 0; + } + +-/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "reorder_fields" } } */ +\ No newline at end of file ++/* { dg-final { scan-ipa-dump "Number of structures to transform is 1" "struct_reorg" } } */ +\ No newline at end of file +diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +index 6ccb753b5..278c4e4f5 100644 +--- a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp ++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp +@@ -45,7 +45,7 @@ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/rf_*.c]] \ + + # -fipa-struct-reorg=3 + gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/dfe*.c]] \ +- "" "-fipa-reorder-fields -fipa-struct-reorg=3 -fdump-ipa-all -flto-partition=one -fwhole-program" ++ "" "-fipa-struct-reorg=3 -fdump-ipa-all -flto-partition=one -fwhole-program" + + # All done. + torture-finish +diff --git a/gcc/timevar.def b/gcc/timevar.def +index 2b27c858a..98a5a490f 100644 +--- a/gcc/timevar.def ++++ b/gcc/timevar.def +@@ -80,7 +80,6 @@ DEFTIMEVAR (TV_IPA_CONSTANT_PROP , "ipa cp") + DEFTIMEVAR (TV_IPA_INLINING , "ipa inlining heuristics") + DEFTIMEVAR (TV_IPA_FNSPLIT , "ipa function splitting") + DEFTIMEVAR (TV_IPA_COMDATS , "ipa comdats") +-DEFTIMEVAR (TV_IPA_REORDER_FIELDS , "ipa struct reorder fields optimization") + DEFTIMEVAR (TV_IPA_STRUCT_REORG , "ipa struct reorg optimization") + DEFTIMEVAR (TV_IPA_OPT , "ipa various optimizations") + DEFTIMEVAR (TV_IPA_LTO_DECOMPRESS , "lto stream decompression") +diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h +index a9ec8ed21..56898e019 100644 +--- a/gcc/tree-pass.h ++++ b/gcc/tree-pass.h +@@ -527,7 +527,6 @@ extern ipa_opt_pass_d *make_pass_ipa_devirt (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_odr (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt); + extern ipa_opt_pass_d *make_pass_ipa_pure_const (gcc::context *ctxt); +-extern simple_ipa_opt_pass *make_pass_ipa_reorder_fields (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_ipa_struct_reorg (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_ipa_pta (gcc::context *ctxt); + extern simple_ipa_opt_pass *make_pass_ipa_tm (gcc::context *ctxt); +-- +2.33.0 + diff --git a/0025-AArch64-Rewrite-the-tsv110-option.patch b/0025-AArch64-Rewrite-the-tsv110-option.patch new file mode 100644 index 0000000000000000000000000000000000000000..d2e0dcd1dd7a3665e69ba114602e524248e7334c --- /dev/null +++ b/0025-AArch64-Rewrite-the-tsv110-option.patch @@ -0,0 +1,114 @@ +From 2f0d0b1298fb9c3266bb102796b027a5570ad833 Mon Sep 17 00:00:00 2001 +From: dingguangya +Date: Mon, 4 Sep 2023 16:27:38 +0800 +Subject: [PATCH 1/2] [AArch64] Rewrite the tsv110 option + +Reset the more appropriate options for tsv110. +--- + gcc/common/config/aarch64/aarch64-common.cc | 76 +++++++++++++++++++++ + 1 file changed, 76 insertions(+) + +diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc +index dfda5b837..85ce8133b 100644 +--- a/gcc/common/config/aarch64/aarch64-common.cc ++++ b/gcc/common/config/aarch64/aarch64-common.cc +@@ -44,6 +44,8 @@ + #undef TARGET_OPTION_INIT_STRUCT + #define TARGET_OPTION_INIT_STRUCT aarch64_option_init_struct + ++#define INVALID_IMP ((unsigned) -1) ++ + /* Set default optimization options. */ + static const struct default_options aarch_option_optimization_table[] = + { +@@ -65,6 +67,77 @@ static const struct default_options aarch_option_optimization_table[] = + { OPT_LEVELS_NONE, 0, NULL, 0 } + }; + ++/* CPU vendor id. */ ++static unsigned vendor_id = INVALID_IMP; ++ ++/* The part number of the CPU. */ ++static unsigned part_id = INVALID_IMP; ++ ++/* Return the hex integer that is after ':' for the FIELD. ++ Return -1 if there was problem parsing the integer. */ ++static unsigned ++parse_cpuinfo (char *field) ++{ ++ if (field == NULL) ++ return INVALID_IMP; ++ const char *rest = strchr (field, ':'); ++ ++ if (rest == NULL) ++ return INVALID_IMP; ++ ++ char *after; ++ unsigned fint = strtol (rest + 1, &after, 16); ++ if (after == rest + 1) ++ return INVALID_IMP; ++ return fint; ++} ++ ++/* Read CPU vendor_id and part_id. */ ++ ++static void ++read_cpuinfo () ++{ ++ FILE *fp = fopen ("/proc/cpuinfo", "r"); ++ if (fp == NULL) ++ return; ++ ++ /* Read 1024-byte data from /proc/cpuinfo. */ ++ char cpuinfo[1024]; ++ fread(cpuinfo, sizeof(char), sizeof(cpuinfo) - 1, fp); ++ ++ char *vendor = strstr(cpuinfo, "CPU implementer"); ++ vendor_id = parse_cpuinfo(vendor); ++ ++ char *part = strstr(cpuinfo, "CPU part"); ++ part_id = parse_cpuinfo(part); ++ ++ fclose(fp); ++} ++ ++/* Reset the tsv110 option. After checking the platform information, ++ this function can reset the more appropriate options. ++ TODO: Currently, this function is not applicable to the cross ++ compilation scenario. */ ++ ++static void ++reset_tsv110_option () ++{ ++ /* Read CPU Information. */ ++ if (vendor_id == INVALID_IMP) ++ read_cpuinfo (); ++ ++ if (vendor_id == 0x48 && part_id == 0xd01) ++ { ++ /* Outline-atomics is enabled by default and ++ aarch64_flag_outline_atomics defaults to 2. Therefore, the current ++ modification affects only the default scenario. When the option ++ moutline-atomics is added, the value of aarch64_flag_outline_atomics is 1, ++ that is, aarch64_flag_outline_atomics is not reset to 0. */ ++ if (aarch64_flag_outline_atomics == 2) ++ aarch64_flag_outline_atomics = 0; ++ } ++} ++ + /* Implement TARGET_HANDLE_OPTION. + This function handles the target specific options for CPU/target selection. + +@@ -83,6 +156,9 @@ aarch64_handle_option (struct gcc_options *opts, + const char *arg = decoded->arg; + int val = decoded->value; + ++ /* Reset the tsv110 options. */ ++ reset_tsv110_option (); ++ + switch (code) + { + case OPT_march_: +-- +2.33.0 + diff --git a/0025-LoongArch-Optimize-single-used-address-with-mexplici.patch b/0025-LoongArch-Optimize-single-used-address-with-mexplici.patch new file mode 100644 index 0000000000000000000000000000000000000000..91b35d93782978efdc1c07237696d8cc339c86c1 --- /dev/null +++ b/0025-LoongArch-Optimize-single-used-address-with-mexplici.patch @@ -0,0 +1,116 @@ +From b23a89e835962ae7d89e5c6f87a69c021097d715 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Mon, 30 Oct 2023 20:24:58 +0800 +Subject: [PATCH 025/188] LoongArch: Optimize single-used address with + -mexplicit-relocs=auto for fld/fst + +fld and fst have same address mode as ld.w and st.w, so the same +optimization as r14-4851 should be applied for them too. + +gcc/ChangeLog: + + * config/loongarch/loongarch.md (LD_AT_LEAST_32_BIT): New mode + iterator. + (ST_ANY): New mode iterator. + (define_peephole2): Use LD_AT_LEAST_32_BIT instead of GPR and + ST_ANY instead of QHWD for applicable patterns. +--- + gcc/config/loongarch/loongarch.md | 38 +++++++++++++++++++------------ + 1 file changed, 24 insertions(+), 14 deletions(-) + +diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md +index 80487488d..ed86c95bd 100644 +--- a/gcc/config/loongarch/loongarch.md ++++ b/gcc/config/loongarch/loongarch.md +@@ -400,6 +400,14 @@ + (DI "!TARGET_64BIT && TARGET_DOUBLE_FLOAT") + (TF "TARGET_64BIT && TARGET_DOUBLE_FLOAT")]) + ++;; A mode for anything with 32 bits or more, and able to be loaded with ++;; the same addressing mode as ld.w. ++(define_mode_iterator LD_AT_LEAST_32_BIT [GPR ANYF]) ++ ++;; A mode for anything able to be stored with the same addressing mode as ++;; st.w. ++(define_mode_iterator ST_ANY [QHWD ANYF]) ++ + ;; In GPR templates, a string like "mul." will expand to "mul.w" in the + ;; 32-bit version and "mul.d" in the 64-bit version. + (define_mode_attr d [(SI "w") (DI "d")]) +@@ -3785,13 +3793,14 @@ + (define_peephole2 + [(set (match_operand:P 0 "register_operand") + (match_operand:P 1 "symbolic_pcrel_operand")) +- (set (match_operand:GPR 2 "register_operand") +- (mem:GPR (match_dup 0)))] ++ (set (match_operand:LD_AT_LEAST_32_BIT 2 "register_operand") ++ (mem:LD_AT_LEAST_32_BIT (match_dup 0)))] + "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \ + && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \ + && (peep2_reg_dead_p (2, operands[0]) \ + || REGNO (operands[0]) == REGNO (operands[2]))" +- [(set (match_dup 2) (mem:GPR (lo_sum:P (match_dup 0) (match_dup 1))))] ++ [(set (match_dup 2) ++ (mem:LD_AT_LEAST_32_BIT (lo_sum:P (match_dup 0) (match_dup 1))))] + { + emit_insn (gen_pcalau12i_gr (operands[0], operands[1])); + }) +@@ -3799,14 +3808,15 @@ + (define_peephole2 + [(set (match_operand:P 0 "register_operand") + (match_operand:P 1 "symbolic_pcrel_operand")) +- (set (match_operand:GPR 2 "register_operand") +- (mem:GPR (plus (match_dup 0) +- (match_operand 3 "const_int_operand"))))] ++ (set (match_operand:LD_AT_LEAST_32_BIT 2 "register_operand") ++ (mem:LD_AT_LEAST_32_BIT (plus (match_dup 0) ++ (match_operand 3 "const_int_operand"))))] + "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \ + && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \ + && (peep2_reg_dead_p (2, operands[0]) \ + || REGNO (operands[0]) == REGNO (operands[2]))" +- [(set (match_dup 2) (mem:GPR (lo_sum:P (match_dup 0) (match_dup 1))))] ++ [(set (match_dup 2) ++ (mem:LD_AT_LEAST_32_BIT (lo_sum:P (match_dup 0) (match_dup 1))))] + { + operands[1] = plus_constant (Pmode, operands[1], INTVAL (operands[3])); + emit_insn (gen_pcalau12i_gr (operands[0], operands[1])); +@@ -3850,13 +3860,13 @@ + (define_peephole2 + [(set (match_operand:P 0 "register_operand") + (match_operand:P 1 "symbolic_pcrel_operand")) +- (set (mem:QHWD (match_dup 0)) +- (match_operand:QHWD 2 "register_operand"))] ++ (set (mem:ST_ANY (match_dup 0)) ++ (match_operand:ST_ANY 2 "register_operand"))] + "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \ + && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \ + && (peep2_reg_dead_p (2, operands[0])) \ + && REGNO (operands[0]) != REGNO (operands[2])" +- [(set (mem:QHWD (lo_sum:P (match_dup 0) (match_dup 1))) (match_dup 2))] ++ [(set (mem:ST_ANY (lo_sum:P (match_dup 0) (match_dup 1))) (match_dup 2))] + { + emit_insn (gen_pcalau12i_gr (operands[0], operands[1])); + }) +@@ -3864,14 +3874,14 @@ + (define_peephole2 + [(set (match_operand:P 0 "register_operand") + (match_operand:P 1 "symbolic_pcrel_operand")) +- (set (mem:QHWD (plus (match_dup 0) +- (match_operand 3 "const_int_operand"))) +- (match_operand:QHWD 2 "register_operand"))] ++ (set (mem:ST_ANY (plus (match_dup 0) ++ (match_operand 3 "const_int_operand"))) ++ (match_operand:ST_ANY 2 "register_operand"))] + "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \ + && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \ + && (peep2_reg_dead_p (2, operands[0])) \ + && REGNO (operands[0]) != REGNO (operands[2])" +- [(set (mem:QHWD (lo_sum:P (match_dup 0) (match_dup 1))) (match_dup 2))] ++ [(set (mem:ST_ANY (lo_sum:P (match_dup 0) (match_dup 1))) (match_dup 2))] + { + operands[1] = plus_constant (Pmode, operands[1], INTVAL (operands[3])); + emit_insn (gen_pcalau12i_gr (operands[0], operands[1])); +-- +2.43.0 + diff --git a/0026-GOMP-Enabling-moutline-atomics-improves-libgomp-perf.patch b/0026-GOMP-Enabling-moutline-atomics-improves-libgomp-perf.patch new file mode 100644 index 0000000000000000000000000000000000000000..b57a5a6f9e6ba4b3d4d83d7ac305deefd57f5900 --- /dev/null +++ b/0026-GOMP-Enabling-moutline-atomics-improves-libgomp-perf.patch @@ -0,0 +1,37 @@ +From 7efae59159577657f22511aa3b2cebe85ca60d9d Mon Sep 17 00:00:00 2001 +From: dingguangya +Date: Mon, 4 Sep 2023 16:30:58 +0800 +Subject: [PATCH 2/2] [GOMP] Enabling moutline-atomics improves libgomp + performance in multi-thread scenarios + +Libgomp is used in multi-thread scenarios, +Enabling moutline-atomics improves performance. +--- + libgomp/configure.tgt | 11 +++++++++++ + 1 file changed, 11 insertions(+) + +diff --git a/libgomp/configure.tgt b/libgomp/configure.tgt +index 2cd7272fc..f924e9f98 100644 +--- a/libgomp/configure.tgt ++++ b/libgomp/configure.tgt +@@ -32,6 +32,17 @@ if test $gcc_cv_have_tls = yes ; then + esac + fi + ++# Enabling moutline-atomics improves libgomp performance in multi-thread scenarios. ++case "${target_cpu}" in ++ aarch64*) ++ case "${target}" in ++ aarch64*-*-linux*) ++ XCFLAGS="${XCFLAGS} -moutline-atomics" ++ ;; ++ esac ++ ;; ++esac ++ + tmake_file= + # Since we require POSIX threads, assume a POSIX system by default. + config_path="posix" +-- +2.33.0 + diff --git a/0026-LoongArch-Disable-relaxation-if-the-assembler-don-t-.patch b/0026-LoongArch-Disable-relaxation-if-the-assembler-don-t-.patch new file mode 100644 index 0000000000000000000000000000000000000000..cbe62f4df416e46371665ba262b910ec305ac978 --- /dev/null +++ b/0026-LoongArch-Disable-relaxation-if-the-assembler-don-t-.patch @@ -0,0 +1,305 @@ +From f1cfdec1602a5a316a9b9022a95143a7385489c2 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Fri, 3 Nov 2023 21:19:59 +0800 +Subject: [PATCH 026/188] LoongArch: Disable relaxation if the assembler don't + support conditional branch relaxation [PR112330] + +As the commit message of r14-4674 has indicated, if the assembler does +not support conditional branch relaxation, a relocation overflow may +happen on conditional branches when relaxation is enabled because the +number of NOP instructions inserted by the assembler will be more than +the number estimated by GCC. + +To work around this issue, disable relaxation by default if the +assembler is detected incapable to perform conditional branch relaxation +at GCC build time. We also need to pass -mno-relax to the assembler to +really disable relaxation. But, if the assembler does not support +-mrelax option at all, we should not pass -mno-relax to the assembler or +it will immediately error out. Also handle this with the build time +assembler capability probing, and add a pair of options +-m[no-]pass-mrelax-to-as to allow using a different assembler from the +build-time one. + +With this change, if GCC is built with GAS 2.41, relaxation will be +disabled by default. So the default value of -mexplicit-relocs= is also +changed to 'always' if -mno-relax is specified or implied by the +build-time default, because using assembler macros for symbol addresses +produces no benefit when relaxation is disabled. + +gcc/ChangeLog: + + PR target/112330 + * config/loongarch/genopts/loongarch.opt.in: Add + -m[no]-pass-relax-to-as. Change the default of -m[no]-relax to + account conditional branch relaxation support status. + * config/loongarch/loongarch.opt: Regenerate. + * configure.ac (gcc_cv_as_loongarch_cond_branch_relax): Check if + the assembler supports conditional branch relaxation. + * configure: Regenerate. + * config.in: Regenerate. Note that there are some unrelated + changes introduced by r14-5424 (which does not contain a + config.in regeneration). + * config/loongarch/loongarch-opts.h + (HAVE_AS_COND_BRANCH_RELAXATION): Define to 0 if not defined. + * config/loongarch/loongarch-driver.h (ASM_MRELAX_DEFAULT): + Define. + (ASM_MRELAX_SPEC): Define. + (ASM_SPEC): Use ASM_MRELAX_SPEC instead of "%{mno-relax}". + * config/loongarch/loongarch.cc: Take the setting of + -m[no-]relax into account when determining the default of + -mexplicit-relocs=. + * doc/invoke.texi: Document -m[no-]relax and + -m[no-]pass-mrelax-to-as for LoongArch. Update the default + value of -mexplicit-relocs=. +--- + gcc/config.in | 35 ++++++++++++++++++- + gcc/config/loongarch/genopts/loongarch.opt.in | 6 +++- + gcc/config/loongarch/loongarch-driver.h | 16 ++++++++- + gcc/config/loongarch/loongarch-opts.h | 4 +++ + gcc/config/loongarch/loongarch.cc | 2 +- + gcc/config/loongarch/loongarch.opt | 6 +++- + gcc/configure | 35 +++++++++++++++++++ + gcc/configure.ac | 10 ++++++ + 8 files changed, 109 insertions(+), 5 deletions(-) + +diff --git a/gcc/config.in b/gcc/config.in +index 0c55e67e7..04968b53c 100644 +--- a/gcc/config.in ++++ b/gcc/config.in +@@ -374,6 +374,12 @@ + #endif + + ++/* Define if your assembler supports conditional branch relaxation. */ ++#ifndef USED_FOR_TARGET ++#undef HAVE_AS_COND_BRANCH_RELAXATION ++#endif ++ ++ + /* Define if your assembler supports the --debug-prefix-map option. */ + #ifndef USED_FOR_TARGET + #undef HAVE_AS_DEBUG_PREFIX_MAP +@@ -798,6 +804,20 @@ + #endif + + ++/* Define to 1 if you have the Mac OS X function ++ CFLocaleCopyPreferredLanguages in the CoreFoundation framework. */ ++#ifndef USED_FOR_TARGET ++#undef HAVE_CFLOCALECOPYPREFERREDLANGUAGES ++#endif ++ ++ ++/* Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in ++ the CoreFoundation framework. */ ++#ifndef USED_FOR_TARGET ++#undef HAVE_CFPREFERENCESCOPYAPPVALUE ++#endif ++ ++ + /* Define to 1 if you have the `clearerr_unlocked' function. */ + #ifndef USED_FOR_TARGET + #undef HAVE_CLEARERR_UNLOCKED +@@ -822,6 +842,13 @@ + #endif + + ++/* Define if the GNU dcgettext() function is already present or preinstalled. ++ */ ++#ifndef USED_FOR_TARGET ++#undef HAVE_DCGETTEXT ++#endif ++ ++ + /* Define to 1 if we found a declaration for 'abort', otherwise define to 0. + */ + #ifndef USED_FOR_TARGET +@@ -1554,6 +1581,12 @@ + #endif + + ++/* Define if the GNU gettext() function is already present or preinstalled. */ ++#ifndef USED_FOR_TARGET ++#undef HAVE_GETTEXT ++#endif ++ ++ + /* Define to 1 if you have the `gettimeofday' function. */ + #ifndef USED_FOR_TARGET + #undef HAVE_GETTIMEOFDAY +@@ -1585,7 +1618,7 @@ + #endif + + +-/* Define if you have the iconv() function. */ ++/* Define if you have the iconv() function and it works. */ + #ifndef USED_FOR_TARGET + #undef HAVE_ICONV + #endif +diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in +index e7df1964a..bd3cfaf60 100644 +--- a/gcc/config/loongarch/genopts/loongarch.opt.in ++++ b/gcc/config/loongarch/genopts/loongarch.opt.in +@@ -229,10 +229,14 @@ Target Var(TARGET_DIRECT_EXTERN_ACCESS) Init(0) + Avoid using the GOT to access external symbols. + + mrelax +-Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION) ++Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION && HAVE_AS_COND_BRANCH_RELAXATION) + Take advantage of linker relaxations to reduce the number of instructions + required to materialize symbol addresses. + ++mpass-mrelax-to-as ++Target Var(loongarch_pass_mrelax_to_as) Init(HAVE_AS_MRELAX_OPTION) ++Pass -mrelax or -mno-relax option to the assembler. ++ + -param=loongarch-vect-unroll-limit= + Target Joined UInteger Var(loongarch_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param + Used to limit unroll factor which indicates how much the autovectorizer may +diff --git a/gcc/config/loongarch/loongarch-driver.h b/gcc/config/loongarch/loongarch-driver.h +index 59fa3263d..c8dba2cc4 100644 +--- a/gcc/config/loongarch/loongarch-driver.h ++++ b/gcc/config/loongarch/loongarch-driver.h +@@ -51,9 +51,23 @@ along with GCC; see the file COPYING3. If not see + "%{G*} %{,ada:-gnatea %{mabi=*} -gnatez} " \ + "%(subtarget_cc1_spec)" + ++#if HAVE_AS_MRELAX_OPTION && HAVE_AS_COND_BRANCH_RELAXATION ++#define ASM_MRELAX_DEFAULT "%{!mrelax:%{!mno-relax:-mrelax}}" ++#else ++#define ASM_MRELAX_DEFAULT "%{!mrelax:%{!mno-relax:-mno-relax}}" ++#endif ++ ++#if HAVE_AS_MRELAX_OPTION ++#define ASM_MRELAX_SPEC \ ++ "%{!mno-pass-mrelax-to-as:%{mrelax} %{mno-relax} " ASM_MRELAX_DEFAULT "}" ++#else ++#define ASM_MRELAX_SPEC \ ++ "%{mpass-mrelax-to-as:%{mrelax} %{mno-relax} " ASM_MRELAX_DEFAULT "}" ++#endif ++ + #undef ASM_SPEC + #define ASM_SPEC \ +- "%{mabi=*} %{mno-relax} %(subtarget_asm_spec)" ++ "%{mabi=*} " ASM_MRELAX_SPEC " %(subtarget_asm_spec)" + + + extern const char* +diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h +index c4975af00..dfbe9dd5c 100644 +--- a/gcc/config/loongarch/loongarch-opts.h ++++ b/gcc/config/loongarch/loongarch-opts.h +@@ -103,6 +103,10 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target, + #define HAVE_AS_MRELAX_OPTION 0 + #endif + ++#ifndef HAVE_AS_COND_BRANCH_RELAXATION ++#define HAVE_AS_COND_BRANCH_RELAXATION 0 ++#endif ++ + #ifndef HAVE_AS_TLS + #define HAVE_AS_TLS 0 + #endif +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 65ca1489f..6d580ee75 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -7428,7 +7428,7 @@ loongarch_option_override_internal (struct gcc_options *opts, + + if (la_opt_explicit_relocs == M_OPT_UNSET) + la_opt_explicit_relocs = (HAVE_AS_EXPLICIT_RELOCS +- ? (HAVE_AS_MRELAX_OPTION ++ ? (loongarch_mrelax + ? EXPLICIT_RELOCS_AUTO + : EXPLICIT_RELOCS_ALWAYS) + : EXPLICIT_RELOCS_NONE); +diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt +index 44376fd77..d936954b8 100644 +--- a/gcc/config/loongarch/loongarch.opt ++++ b/gcc/config/loongarch/loongarch.opt +@@ -236,10 +236,14 @@ Target Var(TARGET_DIRECT_EXTERN_ACCESS) Init(0) + Avoid using the GOT to access external symbols. + + mrelax +-Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION) ++Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION && HAVE_AS_COND_BRANCH_RELAXATION) + Take advantage of linker relaxations to reduce the number of instructions + required to materialize symbol addresses. + ++mpass-mrelax-to-as ++Target Var(loongarch_pass_mrelax_to_as) Init(HAVE_AS_MRELAX_OPTION) ++Pass -mrelax or -mno-relax option to the assembler. ++ + -param=loongarch-vect-unroll-limit= + Target Joined UInteger Var(loongarch_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param + Used to limit unroll factor which indicates how much the autovectorizer may +diff --git a/gcc/configure b/gcc/configure +index 430d44dc3..09bacfec3 100755 +--- a/gcc/configure ++++ b/gcc/configure +@@ -28901,6 +28901,41 @@ if test $gcc_cv_as_loongarch_relax = yes; then + + $as_echo "#define HAVE_AS_MRELAX_OPTION 1" >>confdefs.h + ++fi ++ ++ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for conditional branch relaxation support" >&5 ++$as_echo_n "checking assembler for conditional branch relaxation support... " >&6; } ++if ${gcc_cv_as_loongarch_cond_branch_relax+:} false; then : ++ $as_echo_n "(cached) " >&6 ++else ++ gcc_cv_as_loongarch_cond_branch_relax=no ++ if test x$gcc_cv_as != x; then ++ $as_echo 'a: ++ .rept 32769 ++ nop ++ .endr ++ beq $a0,$a1,a' > conftest.s ++ if { ac_try='$gcc_cv_as $gcc_cv_as_flags -o conftest.o conftest.s >&5' ++ { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5 ++ (eval $ac_try) 2>&5 ++ ac_status=$? ++ $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 ++ test $ac_status = 0; }; } ++ then ++ gcc_cv_as_loongarch_cond_branch_relax=yes ++ else ++ echo "configure: failed program was" >&5 ++ cat conftest.s >&5 ++ fi ++ rm -f conftest.o conftest.s ++ fi ++fi ++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_loongarch_cond_branch_relax" >&5 ++$as_echo "$gcc_cv_as_loongarch_cond_branch_relax" >&6; } ++if test $gcc_cv_as_loongarch_cond_branch_relax = yes; then ++ ++$as_echo "#define HAVE_AS_COND_BRANCH_RELAXATION 1" >>confdefs.h ++ + fi + + ;; +diff --git a/gcc/configure.ac b/gcc/configure.ac +index 4b24db190..a0999152e 100644 +--- a/gcc/configure.ac ++++ b/gcc/configure.ac +@@ -5341,6 +5341,16 @@ x: + [-mrelax], [.text],, + [AC_DEFINE(HAVE_AS_MRELAX_OPTION, 1, + [Define if your assembler supports -mrelax option.])]) ++ gcc_GAS_CHECK_FEATURE([conditional branch relaxation support], ++ gcc_cv_as_loongarch_cond_branch_relax, ++ [], ++ [a: ++ .rept 32769 ++ nop ++ .endr ++ beq $a0,$a1,a],, ++ [AC_DEFINE(HAVE_AS_COND_BRANCH_RELAXATION, 1, ++ [Define if your assembler supports conditional branch relaxation.])]) + ;; + s390*-*-*) + gcc_GAS_CHECK_FEATURE([.gnu_attribute support], +-- +2.43.0 + diff --git a/0027-LoongArch-Remove-redundant-barrier-instructions-befo.patch b/0027-LoongArch-Remove-redundant-barrier-instructions-befo.patch new file mode 100644 index 0000000000000000000000000000000000000000..28df51c7cea1209a83ad0d4e41a6b2a932f8470b --- /dev/null +++ b/0027-LoongArch-Remove-redundant-barrier-instructions-befo.patch @@ -0,0 +1,391 @@ +From 4498010fba61c1446286c96cbda24d5ed53c53c7 Mon Sep 17 00:00:00 2001 +From: Xi Ruoyao +Date: Mon, 6 Nov 2023 16:06:08 +0800 +Subject: [PATCH 027/188] LoongArch: Remove redundant barrier instructions + before LL-SC loops + +This is isomorphic to the LLVM changes [1-2]. + +On LoongArch, the LL and SC instructions has memory barrier semantics: + +- LL: + +- SC: + + +But the compare and swap operation is allowed to fail, and if it fails +the SC instruction is not executed, thus the guarantee of acquiring +semantics cannot be ensured. Therefore, an acquire barrier needs to be +generated when failure_memorder includes an acquire operation. + +On CPUs implementing LoongArch v1.10 or later, "dbar 0b10100" is an +acquire barrier; on CPUs implementing LoongArch v1.00, it is a full +barrier. So it's always enough for acquire semantics. OTOH if an +acquire semantic is not needed, we still needs the "dbar 0x700" as the +load-load barrier like all LL-SC loops. + +[1]:https://github.com/llvm/llvm-project/pull/67391 +[2]:https://github.com/llvm/llvm-project/pull/69339 + +gcc/ChangeLog: + + * config/loongarch/loongarch.cc + (loongarch_memmodel_needs_release_fence): Remove. + (loongarch_cas_failure_memorder_needs_acquire): New static + function. + (loongarch_print_operand): Redefine 'G' for the barrier on CAS + failure. + * config/loongarch/sync.md (atomic_cas_value_strong): + Remove the redundant barrier before the LL instruction, and + emit an acquire barrier on failure if needed by + failure_memorder. + (atomic_cas_value_cmp_and_7_): Likewise. + (atomic_cas_value_add_7_): Remove the unnecessary barrier + before the LL instruction. + (atomic_cas_value_sub_7_): Likewise. + (atomic_cas_value_and_7_): Likewise. + (atomic_cas_value_xor_7_): Likewise. + (atomic_cas_value_or_7_): Likewise. + (atomic_cas_value_nand_7_): Likewise. + (atomic_cas_value_exchange_7_): Likewise. + +gcc/testsuite/ChangeLog: + + * gcc.target/loongarch/cas-acquire.c: New test. +--- + gcc/config/loongarch/loongarch.cc | 30 ++++--- + gcc/config/loongarch/sync.md | 49 +++++------ + .../gcc.target/loongarch/cas-acquire.c | 82 +++++++++++++++++++ + 3 files changed, 119 insertions(+), 42 deletions(-) + create mode 100644 gcc/testsuite/gcc.target/loongarch/cas-acquire.c + +diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc +index 6d580ee75..8467f03cf 100644 +--- a/gcc/config/loongarch/loongarch.cc ++++ b/gcc/config/loongarch/loongarch.cc +@@ -5829,27 +5829,27 @@ loongarch_memmodel_needs_rel_acq_fence (enum memmodel model) + } + } + +-/* Return true if a FENCE should be emitted to before a memory access to +- implement the release portion of memory model MODEL. */ ++/* Return true if a FENCE should be emitted after a failed CAS to ++ implement the acquire semantic of failure_memorder. */ + + static bool +-loongarch_memmodel_needs_release_fence (enum memmodel model) ++loongarch_cas_failure_memorder_needs_acquire (enum memmodel model) + { +- switch (model) ++ switch (memmodel_base (model)) + { ++ case MEMMODEL_ACQUIRE: + case MEMMODEL_ACQ_REL: + case MEMMODEL_SEQ_CST: +- case MEMMODEL_SYNC_SEQ_CST: +- case MEMMODEL_RELEASE: +- case MEMMODEL_SYNC_RELEASE: + return true; + +- case MEMMODEL_ACQUIRE: +- case MEMMODEL_CONSUME: +- case MEMMODEL_SYNC_ACQUIRE: + case MEMMODEL_RELAXED: ++ case MEMMODEL_RELEASE: + return false; + ++ /* MEMMODEL_CONSUME is deliberately not handled because it's always ++ replaced by MEMMODEL_ACQUIRE as at now. If you see an ICE caused by ++ MEMMODEL_CONSUME, read the change (re)introducing it carefully and ++ decide what to do. See PR 59448 and get_memmodel in builtins.cc. */ + default: + gcc_unreachable (); + } +@@ -5962,7 +5962,8 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part, + 'd' Print CONST_INT OP in decimal. + 'E' Print CONST_INT OP element 0 of a replicated CONST_VECTOR in decimal. + 'F' Print the FPU branch condition for comparison OP. +- 'G' Print a DBAR insn if the memory model requires a release. ++ 'G' Print a DBAR insn for CAS failure (with an acquire semantic if ++ needed, otherwise a simple load-load barrier). + 'H' Print address 52-61bit relocation associated with OP. + 'h' Print the high-part relocation associated with OP. + 'i' Print i if the operand is not a register. +@@ -6053,8 +6054,11 @@ loongarch_print_operand (FILE *file, rtx op, int letter) + break; + + case 'G': +- if (loongarch_memmodel_needs_release_fence ((enum memmodel) INTVAL (op))) +- fputs ("dbar\t0", file); ++ if (loongarch_cas_failure_memorder_needs_acquire ( ++ memmodel_from_int (INTVAL (op)))) ++ fputs ("dbar\t0b10100", file); ++ else ++ fputs ("dbar\t0x700", file); + break; + + case 'h': +diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md +index efa40f24c..dd1f98946 100644 +--- a/gcc/config/loongarch/sync.md ++++ b/gcc/config/loongarch/sync.md +@@ -162,19 +162,18 @@ + (clobber (match_scratch:GPR 6 "=&r"))] + "" + { +- return "%G5\\n\\t" +- "1:\\n\\t" ++ return "1:\\n\\t" + "ll.\\t%0,%1\\n\\t" + "bne\\t%0,%z2,2f\\n\\t" + "or%i3\\t%6,$zero,%3\\n\\t" + "sc.\\t%6,%1\\n\\t" +- "beq\\t$zero,%6,1b\\n\\t" ++ "beqz\\t%6,1b\\n\\t" + "b\\t3f\\n\\t" + "2:\\n\\t" +- "dbar\\t0x700\\n\\t" ++ "%G5\\n\\t" + "3:\\n\\t"; + } +- [(set (attr "length") (const_int 32))]) ++ [(set (attr "length") (const_int 28))]) + + (define_expand "atomic_compare_and_swap" + [(match_operand:SI 0 "register_operand" "") ;; bool output +@@ -267,8 +266,7 @@ + (clobber (match_scratch:GPR 7 "=&r"))] + "" + { +- return "%G6\\n\\t" +- "1:\\n\\t" ++ return "1:\\n\\t" + "ll.\\t%0,%1\\n\\t" + "and\\t%7,%0,%2\\n\\t" + "bne\\t%7,%z4,2f\\n\\t" +@@ -278,10 +276,10 @@ + "beq\\t$zero,%7,1b\\n\\t" + "b\\t3f\\n\\t" + "2:\\n\\t" +- "dbar\\t0x700\\n\\t" ++ "%G6\\n\\t" + "3:\\n\\t"; + } +- [(set (attr "length") (const_int 40))]) ++ [(set (attr "length") (const_int 36))]) + + (define_expand "atomic_compare_and_swap" + [(match_operand:SI 0 "register_operand" "") ;; bool output +@@ -336,8 +334,7 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\\n\\t" +- "1:\\n\\t" ++ return "1:\\n\\t" + "ll.\\t%0,%1\\n\\t" + "and\\t%7,%0,%3\\n\\t" + "add.w\\t%8,%0,%z5\\n\\t" +@@ -347,7 +344,7 @@ + "beq\\t$zero,%7,1b"; + } + +- [(set (attr "length") (const_int 32))]) ++ [(set (attr "length") (const_int 28))]) + + (define_insn "atomic_cas_value_sub_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res +@@ -363,8 +360,7 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\\n\\t" +- "1:\\n\\t" ++ return "1:\\n\\t" + "ll.\\t%0,%1\\n\\t" + "and\\t%7,%0,%3\\n\\t" + "sub.w\\t%8,%0,%z5\\n\\t" +@@ -373,7 +369,7 @@ + "sc.\\t%7,%1\\n\\t" + "beq\\t$zero,%7,1b"; + } +- [(set (attr "length") (const_int 32))]) ++ [(set (attr "length") (const_int 28))]) + + (define_insn "atomic_cas_value_and_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res +@@ -389,8 +385,7 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\\n\\t" +- "1:\\n\\t" ++ return "1:\\n\\t" + "ll.\\t%0,%1\\n\\t" + "and\\t%7,%0,%3\\n\\t" + "and\\t%8,%0,%z5\\n\\t" +@@ -399,7 +394,7 @@ + "sc.\\t%7,%1\\n\\t" + "beq\\t$zero,%7,1b"; + } +- [(set (attr "length") (const_int 32))]) ++ [(set (attr "length") (const_int 28))]) + + (define_insn "atomic_cas_value_xor_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res +@@ -415,8 +410,7 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\\n\\t" +- "1:\\n\\t" ++ return "1:\\n\\t" + "ll.\\t%0,%1\\n\\t" + "and\\t%7,%0,%3\\n\\t" + "xor\\t%8,%0,%z5\\n\\t" +@@ -426,7 +420,7 @@ + "beq\\t$zero,%7,1b"; + } + +- [(set (attr "length") (const_int 32))]) ++ [(set (attr "length") (const_int 28))]) + + (define_insn "atomic_cas_value_or_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res +@@ -442,8 +436,7 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\\n\\t" +- "1:\\n\\t" ++ return "1:\\n\\t" + "ll.\\t%0,%1\\n\\t" + "and\\t%7,%0,%3\\n\\t" + "or\\t%8,%0,%z5\\n\\t" +@@ -453,7 +446,7 @@ + "beq\\t$zero,%7,1b"; + } + +- [(set (attr "length") (const_int 32))]) ++ [(set (attr "length") (const_int 28))]) + + (define_insn "atomic_cas_value_nand_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") ;; res +@@ -469,8 +462,7 @@ + (clobber (match_scratch:GPR 8 "=&r"))] + "" + { +- return "%G6\\n\\t" +- "1:\\n\\t" ++ return "1:\\n\\t" + "ll.\\t%0,%1\\n\\t" + "and\\t%7,%0,%3\\n\\t" + "and\\t%8,%0,%z5\\n\\t" +@@ -479,7 +471,7 @@ + "sc.\\t%7,%1\\n\\t" + "beq\\t$zero,%7,1b"; + } +- [(set (attr "length") (const_int 32))]) ++ [(set (attr "length") (const_int 28))]) + + (define_insn "atomic_cas_value_exchange_7_" + [(set (match_operand:GPR 0 "register_operand" "=&r") +@@ -494,8 +486,7 @@ + (clobber (match_scratch:GPR 7 "=&r"))] + "" + { +- return "%G6\\n\\t" +- "1:\\n\\t" ++ return "1:\\n\\t" + "ll.\\t%0,%1\\n\\t" + "and\\t%7,%0,%z3\\n\\t" + "or%i5\\t%7,%7,%5\\n\\t" +diff --git a/gcc/testsuite/gcc.target/loongarch/cas-acquire.c b/gcc/testsuite/gcc.target/loongarch/cas-acquire.c +new file mode 100644 +index 000000000..ff7ba866f +--- /dev/null ++++ b/gcc/testsuite/gcc.target/loongarch/cas-acquire.c +@@ -0,0 +1,82 @@ ++/* { dg-do run } */ ++/* { dg-require-effective-target c99_runtime } */ ++/* { dg-require-effective-target pthread } */ ++/* { dg-options "-std=c99 -pthread" } */ ++ ++/* https://github.com/llvm/llvm-project/pull/67391#issuecomment-1752403934 ++ reported that this had failed with GCC and 3A6000. */ ++ ++#include ++#include ++#include ++#include ++ ++static unsigned int tags[32]; ++static unsigned int vals[32]; ++ ++static void * ++writer_entry (void *data) ++{ ++ atomic_uint *pt = (atomic_uint *)tags; ++ atomic_uint *pv = (atomic_uint *)vals; ++ ++ for (unsigned int n = 1; n < 10000; n++) ++ { ++ atomic_store_explicit (&pv[n & 31], n, memory_order_release); ++ atomic_store_explicit (&pt[n & 31], n, memory_order_release); ++ } ++ ++ return NULL; ++} ++ ++static void * ++reader_entry (void *data) ++{ ++ atomic_uint *pt = (atomic_uint *)tags; ++ atomic_uint *pv = (atomic_uint *)vals; ++ int i; ++ ++ for (;;) ++ { ++ for (i = 0; i < 32; i++) ++ { ++ unsigned int tag = 0; ++ bool res; ++ ++ res = atomic_compare_exchange_weak_explicit ( ++ &pt[i], &tag, 0, memory_order_acquire, memory_order_acquire); ++ if (!res) ++ { ++ unsigned int val; ++ ++ val = atomic_load_explicit (&pv[i], memory_order_relaxed); ++ if (val < tag) ++ __builtin_trap (); ++ } ++ } ++ } ++ ++ return NULL; ++} ++ ++int ++main (int argc, char *argv[]) ++{ ++ pthread_t writer; ++ pthread_t reader; ++ int res; ++ ++ res = pthread_create (&writer, NULL, writer_entry, NULL); ++ if (res < 0) ++ __builtin_trap (); ++ ++ res = pthread_create (&reader, NULL, reader_entry, NULL); ++ if (res < 0) ++ __builtin_trap (); ++ ++ res = pthread_join (writer, NULL); ++ if (res < 0) ++ __builtin_trap (); ++ ++ return 0; ++} +-- +2.43.0 + diff --git a/0027-LoopElim-Redundant-loop-elimination-optimization.patch b/0027-LoopElim-Redundant-loop-elimination-optimization.patch new file mode 100644 index 0000000000000000000000000000000000000000..91b45cd903183239fc72b70d702ab4a778de998a --- /dev/null +++ b/0027-LoopElim-Redundant-loop-elimination-optimization.patch @@ -0,0 +1,503 @@ +From 14d9ee793571c6b6f16fa098cde137ebac7aa58f Mon Sep 17 00:00:00 2001 +From: eastb233 +Date: Mon, 4 Sep 2023 14:58:42 +0800 +Subject: [PATCH] [LoopElim] Redundant loop elimination optimization + +Introduce redundant loop elimination optimization controlled +by -floop-elim. And it's often used with -ffinite-loops. +--- + gcc/common.opt | 4 + + gcc/tree-ssa-phiopt.cc | 448 +++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 452 insertions(+) + +diff --git a/gcc/common.opt b/gcc/common.opt +index cae7b380f..b01df919e 100644 +--- a/gcc/common.opt ++++ b/gcc/common.opt +@@ -1230,6 +1230,10 @@ fcompare-elim + Common Var(flag_compare_elim_after_reload) Optimization + Perform comparison elimination after register allocation has finished. + ++floop-elim ++Common Var(flag_loop_elim) Init(0) Optimization ++Perform redundant loop elimination. ++ + fconserve-stack + Common Var(flag_conserve_stack) Optimization + Do not perform optimizations increasing noticeably stack usage. +diff --git a/gcc/tree-ssa-phiopt.cc b/gcc/tree-ssa-phiopt.cc +index c56d0b9ff..cf300d141 100644 +--- a/gcc/tree-ssa-phiopt.cc ++++ b/gcc/tree-ssa-phiopt.cc +@@ -77,6 +77,7 @@ static hash_set * get_non_trapping (); + static void replace_phi_edge_with_variable (basic_block, edge, gphi *, tree); + static void hoist_adjacent_loads (basic_block, basic_block, + basic_block, basic_block); ++static bool do_phiopt_pattern (basic_block, basic_block, basic_block); + static bool gate_hoist_loads (void); + + /* This pass tries to transform conditional stores into unconditional +@@ -266,6 +267,10 @@ tree_ssa_phiopt_worker (bool do_store_elim, bool do_hoist_loads, bool early_p) + hoist_adjacent_loads (bb, bb1, bb2, bb3); + continue; + } ++ else if (flag_loop_elim && do_phiopt_pattern (bb, bb1, bb2)) ++ { ++ continue; ++ } + else + continue; + +@@ -3767,6 +3772,449 @@ hoist_adjacent_loads (basic_block bb0, basic_block bb1, + } + } + ++static bool check_uses (tree, hash_set *); ++ ++/* Check SSA_NAME is used in ++ if (SSA_NAME == 0) ++ ... ++ or ++ if (SSA_NAME != 0) ++ ... ++*/ ++static bool ++check_uses_cond (const_tree ssa_name, gimple *stmt, ++ hash_set *hset ATTRIBUTE_UNUSED) ++{ ++ tree_code code = gimple_cond_code (stmt); ++ if (code != EQ_EXPR && code != NE_EXPR) ++ { ++ return false; ++ } ++ ++ tree lhs = gimple_cond_lhs (stmt); ++ tree rhs = gimple_cond_rhs (stmt); ++ if ((lhs == ssa_name && integer_zerop (rhs)) ++ || (rhs == ssa_name && integer_zerop (lhs))) ++ { ++ return true; ++ } ++ ++ return false; ++} ++ ++/* Check SSA_NAME is used in ++ _tmp = SSA_NAME == 0; ++ or ++ _tmp = SSA_NAME != 0; ++ or ++ _tmp = SSA_NAME | _tmp2; ++*/ ++static bool ++check_uses_assign (const_tree ssa_name, gimple *stmt, hash_set *hset) ++{ ++ tree_code code = gimple_assign_rhs_code (stmt); ++ tree lhs, rhs1, rhs2; ++ ++ switch (code) ++ { ++ case EQ_EXPR: ++ case NE_EXPR: ++ rhs1 = gimple_assign_rhs1 (stmt); ++ rhs2 = gimple_assign_rhs2 (stmt); ++ if ((rhs1 == ssa_name && integer_zerop (rhs2)) ++ || (rhs2 == ssa_name && integer_zerop (rhs1))) ++ { ++ return true; ++ } ++ break; ++ ++ case BIT_IOR_EXPR: ++ lhs = gimple_assign_lhs (stmt); ++ if (hset->contains (lhs)) ++ { ++ return false; ++ } ++ /* We should check the use of _tmp further. */ ++ return check_uses (lhs, hset); ++ ++ default: ++ break; ++ } ++ return false; ++} ++ ++/* Check SSA_NAME is used in ++ # result = PHI ++*/ ++static bool ++check_uses_phi (const_tree ssa_name, gimple *stmt, hash_set *hset) ++{ ++ for (unsigned i = 0; i < gimple_phi_num_args (stmt); i++) ++ { ++ tree arg = gimple_phi_arg_def (stmt, i); ++ if (!integer_zerop (arg) && arg != ssa_name) ++ { ++ return false; ++ } ++ } ++ ++ tree result = gimple_phi_result (stmt); ++ ++ /* It is used to avoid infinite recursion, ++ ++ if (cond) ++ goto ++ else ++ goto ++ ++ ++ # _tmp2 = PHI <0 (bb 1), _tmp3 (bb 3)> ++ {BODY} ++ if (cond) ++ goto ++ else ++ goto ++ ++ ++ # _tmp3 = PHI <0 (bb 1), _tmp2 (bb 2)> ++ {BODY} ++ if (cond) ++ goto ++ else ++ goto ++ ++ ++ ... ++ */ ++ if (hset->contains (result)) ++ { ++ return false; ++ } ++ ++ return check_uses (result, hset); ++} ++ ++/* Check the use of SSA_NAME, it should only be used in comparison ++ operation and PHI node. HSET is used to record the ssa_names ++ that have been already checked. */ ++static bool ++check_uses (tree ssa_name, hash_set *hset) ++{ ++ imm_use_iterator imm_iter; ++ use_operand_p use_p; ++ ++ if (TREE_CODE (ssa_name) != SSA_NAME) ++ { ++ return false; ++ } ++ ++ if (SSA_NAME_VAR (ssa_name) ++ && is_global_var (SSA_NAME_VAR (ssa_name))) ++ { ++ return false; ++ } ++ ++ hset->add (ssa_name); ++ ++ FOR_EACH_IMM_USE_FAST (use_p, imm_iter, ssa_name) ++ { ++ gimple *stmt = USE_STMT (use_p); ++ ++ /* Ignore debug gimple statements. */ ++ if (is_gimple_debug (stmt)) ++ { ++ continue; ++ } ++ ++ switch (gimple_code (stmt)) ++ { ++ case GIMPLE_COND: ++ if (!check_uses_cond (ssa_name, stmt, hset)) ++ { ++ return false; ++ } ++ break; ++ ++ case GIMPLE_ASSIGN: ++ if (!check_uses_assign (ssa_name, stmt, hset)) ++ { ++ return false; ++ } ++ break; ++ ++ case GIMPLE_PHI: ++ if (!check_uses_phi (ssa_name, stmt, hset)) ++ { ++ return false; ++ } ++ break; ++ ++ default: ++ return false; ++ } ++ } ++ return true; ++} ++ ++static bool ++check_def_gimple (gimple *def1, gimple *def2, const_tree result) ++{ ++ /* def1 and def2 should be POINTER_PLUS_EXPR. */ ++ if (!is_gimple_assign (def1) || !is_gimple_assign (def2) ++ || gimple_assign_rhs_code (def1) != POINTER_PLUS_EXPR ++ || gimple_assign_rhs_code (def2) != POINTER_PLUS_EXPR) ++ { ++ return false; ++ } ++ ++ tree rhs12 = gimple_assign_rhs2 (def1); ++ ++ tree rhs21 = gimple_assign_rhs1 (def2); ++ tree rhs22 = gimple_assign_rhs2 (def2); ++ ++ if (rhs21 != result) ++ { ++ return false; ++ } ++ ++ /* We should have a positive pointer-plus constant to ensure ++ that the pointer value is continuously increasing. */ ++ if (TREE_CODE (rhs12) != INTEGER_CST || TREE_CODE (rhs22) != INTEGER_CST ++ || compare_tree_int (rhs12, 0) <= 0 || compare_tree_int (rhs22, 0) <= 0) ++ { ++ return false; ++ } ++ ++ return true; ++} ++ ++static bool ++check_loop_body (basic_block bb0, basic_block bb2, const_tree result) ++{ ++ gimple *g01 = first_stmt (bb0); ++ if (!g01 || !is_gimple_assign (g01) ++ || gimple_assign_rhs_code (g01) != MEM_REF ++ || TREE_OPERAND (gimple_assign_rhs1 (g01), 0) != result) ++ { ++ return false; ++ } ++ ++ gimple *g02 = g01->next; ++ /* GIMPLE_COND would be the last gimple in a basic block, ++ and have no other side effects on RESULT. */ ++ if (!g02 || gimple_code (g02) != GIMPLE_COND) ++ { ++ return false; ++ } ++ ++ if (first_stmt (bb2) != last_stmt (bb2)) ++ { ++ return false; ++ } ++ ++ return true; ++} ++ ++/* Pattern is like ++
++   arg1 = base (rhs11) + cst (rhs12); [def1]
++   goto 
++
++   
++   arg2 = result (rhs21) + cst (rhs22); [def2]
++
++   
++   # result = PHI 
++   _v = *result;  [g01]
++   if (_v == 0)   [g02]
++     goto 
++   else
++     goto 
++
++   
++   _1 = result - base;     [g1]
++   _2 = _1 /[ex] cst;      [g2]
++   _3 = (unsigned int) _2; [g3]
++   if (_3 == 0)
++   ...
++*/
++static bool
++check_bb_order (basic_block bb0, basic_block &bb1, basic_block &bb2,
++		gphi *phi_stmt, gimple *&output)
++{
++  /* Start check from PHI node in BB0.  */
++  if (gimple_phi_num_args (phi_stmt) != 2
++      || virtual_operand_p (gimple_phi_result (phi_stmt)))
++    {
++      return false;
++    }
++
++  tree result = gimple_phi_result (phi_stmt);
++  tree arg1 = gimple_phi_arg_def (phi_stmt, 0);
++  tree arg2 = gimple_phi_arg_def (phi_stmt, 1);
++
++  if (TREE_CODE (arg1) != SSA_NAME
++      || TREE_CODE (arg2) != SSA_NAME
++      || SSA_NAME_IS_DEFAULT_DEF (arg1)
++      || SSA_NAME_IS_DEFAULT_DEF (arg2))
++    {
++      return false;
++    }
++
++  gimple *def1 = SSA_NAME_DEF_STMT (arg1);
++  gimple *def2 = SSA_NAME_DEF_STMT (arg2);
++
++  /* Swap bb1 and bb2 if pattern is like
++     if (_v != 0)
++       goto 
++     else
++       goto 
++  */
++  if (gimple_bb (def2) == bb1 && EDGE_SUCC (bb1, 0)->dest == bb0)
++    {
++      std::swap (bb1, bb2);
++    }
++
++  /* prebb[def1] --> bb0 <-- bb2[def2] */
++  if (!gimple_bb (def1)
++      || EDGE_SUCC (gimple_bb (def1), 0)->dest != bb0
++      || gimple_bb (def2) != bb2 || EDGE_SUCC (bb2, 0)->dest != bb0)
++    {
++      return false;
++    }
++
++  /* Check whether define gimple meets the pattern requirements.  */
++  if (!check_def_gimple (def1, def2, result))
++    {
++      return false;
++    }
++
++  if (!check_loop_body (bb0, bb2, result))
++    {
++      return false;
++    }
++
++  output = def1;
++  return true;
++}
++
++/* Check pattern
++   
++   _1 = result - base;     [g1]
++   _2 = _1 /[ex] cst;      [g2]
++   _3 = (unsigned int) _2; [g3]
++   if (_3 == 0)
++   ...
++*/
++static bool
++check_gimple_order (basic_block bb1, const_tree base, const_tree cst,
++		    const_tree result, gimple *&output)
++{
++  gimple *g1 = first_stmt (bb1);
++  if (!g1 || !is_gimple_assign (g1)
++      || gimple_assign_rhs_code (g1) != POINTER_DIFF_EXPR
++      || gimple_assign_rhs1 (g1) != result
++      || gimple_assign_rhs2 (g1) != base)
++    {
++      return false;
++    }
++
++  gimple *g2 = g1->next;
++  if (!g2 || !is_gimple_assign (g2)
++      || gimple_assign_rhs_code (g2) != EXACT_DIV_EXPR
++      || gimple_assign_lhs (g1) != gimple_assign_rhs1 (g2)
++      || TREE_CODE (gimple_assign_rhs2 (g2)) != INTEGER_CST)
++    {
++      return false;
++    }
++
++  /* INTEGER_CST cst in gimple def1.  */
++  HOST_WIDE_INT num1 = TREE_INT_CST_LOW (cst);
++  /* INTEGER_CST cst in gimple g2.  */
++  HOST_WIDE_INT num2 = TREE_INT_CST_LOW (gimple_assign_rhs2 (g2));
++  /* _2 must be at least a positive number.  */
++  if (num2 == 0 || num1 / num2 <= 0)
++    {
++      return false;
++    }
++
++  gimple *g3 = g2->next;
++  if (!g3 || !is_gimple_assign (g3)
++      || gimple_assign_rhs_code (g3) != NOP_EXPR
++      || gimple_assign_lhs (g2) != gimple_assign_rhs1 (g3)
++      || TREE_CODE (gimple_assign_lhs (g3)) != SSA_NAME)
++    {
++      return false;
++    }
++
++  /* _3 should only be used in comparison operation or PHI node.  */
++  hash_set *hset = new hash_set;
++  if (!check_uses (gimple_assign_lhs (g3), hset))
++    {
++      delete hset;
++      return false;
++    }
++  delete hset;
++
++  output = g3;
++  return true;
++}
++
++static bool
++do_phiopt_pattern (basic_block bb0, basic_block bb1, basic_block bb2)
++{
++  gphi_iterator gsi;
++
++  for (gsi = gsi_start_phis (bb0); !gsi_end_p (gsi); gsi_next (&gsi))
++    {
++      gphi *phi_stmt = gsi.phi ();
++      gimple *def1 = NULL;
++      tree base, cst, result;
++
++      if (!check_bb_order (bb0, bb1, bb2, phi_stmt, def1))
++	{
++	  continue;
++	}
++
++      base = gimple_assign_rhs1 (def1);
++      cst = gimple_assign_rhs2 (def1);
++      result = gimple_phi_result (phi_stmt);
++
++      gimple *stmt = NULL;
++      if (!check_gimple_order (bb1, base, cst, result, stmt))
++	{
++	  continue;
++	}
++
++      gcc_assert (stmt);
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "PHIOPT pattern optimization (1) - Rewrite:\n");
++	  print_gimple_stmt (dump_file, stmt, 0);
++	  fprintf (dump_file, "to\n");
++	}
++
++      /* Rewrite statement
++	   _3 = (unsigned int) _2;
++	 to
++	   _3 = (unsigned int) 1;
++      */
++      tree type = TREE_TYPE (gimple_assign_rhs1 (stmt));
++      gimple_assign_set_rhs1 (stmt, build_int_cst (type, 1));
++      update_stmt (stmt);
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  print_gimple_stmt (dump_file, stmt, 0);
++	  fprintf (dump_file, "\n");
++	}
++
++      return true;
++    }
++  return false;
++}
++
+ /* Determine whether we should attempt to hoist adjacent loads out of
+    diamond patterns in pass_phiopt.  Always hoist loads if
+    -fhoist-adjacent-loads is specified and the target machine has
+-- 
+2.28.0.windows.1
+
diff --git a/0028-Array-widen-compare-Fix-the-return-value-match-after.patch b/0028-Array-widen-compare-Fix-the-return-value-match-after.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c5622b8358ffa66c5ee3326dd0b0646be65c2ed0
--- /dev/null
+++ b/0028-Array-widen-compare-Fix-the-return-value-match-after.patch
@@ -0,0 +1,40 @@
+From a89b84cd038e02f0cdafecea1cf337d5f8480683 Mon Sep 17 00:00:00 2001
+From: dingguangya 
+Date: Fri, 8 Sep 2023 11:55:04 +0800
+Subject: [PATCH] [Array-widen-compare] Fix the return value match after update
+ the get_loop_exit_edges interface
+
+The get_loop_exit_edges interface returns a value type
+from vec -> auto_vec
+
+diff --git a/gcc/tree-ssa-loop-array-widen-compare.cc b/gcc/tree-ssa-loop-array-widen-compare.cc
+index ba6170fa0..d27f91f11 100644
+--- a/gcc/tree-ssa-loop-array-widen-compare.cc
++++ b/gcc/tree-ssa-loop-array-widen-compare.cc
+@@ -272,12 +272,12 @@ record_origin_loop_exit_info (class loop *loop)
+       || origin_loop.cond_stmt1 != NULL || origin_loop.cond_stmt2 != NULL)
+     return false;
+ 
+-  vec exit_edges = get_loop_exit_edges (loop);
++  auto_vec exit_edges = get_loop_exit_edges (loop);
+   if (exit_edges == vNULL)
+     return false;
+ 
+   if (exit_edges.length () != 2)
+-    goto fail;
++    return false;
+ 
+   FOR_EACH_VEC_ELT (exit_edges, i, e)
+     {
+@@ -304,8 +304,6 @@ record_origin_loop_exit_info (class loop *loop)
+       && origin_loop.cond_stmt1 != NULL && origin_loop.cond_stmt2 != NULL)
+     found = true;
+ 
+-fail:
+-  exit_edges.release ();
+   return found;
+ }
+ 
+-- 
+2.33.0
+
diff --git a/0028-LoongArch-Fix-scan-assembler-times-of-lasx-lsx-test-.patch b/0028-LoongArch-Fix-scan-assembler-times-of-lasx-lsx-test-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..42c232fedc08c0241ef084a2541ee4db023e36ab
--- /dev/null
+++ b/0028-LoongArch-Fix-scan-assembler-times-of-lasx-lsx-test-.patch
@@ -0,0 +1,161 @@
+From 9731abbe19b9fad184dfe728bd9b2cc02b40c543 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Thu, 16 Nov 2023 20:31:09 +0800
+Subject: [PATCH 028/188] LoongArch: Fix scan-assembler-times of lasx/lsx test
+ case.
+
+These tests fail when they are first added,this patch adjusts the scan-assembler-times
+to fix them.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-vcond-1.c: Adjust assembler times.
+	* gcc.target/loongarch/vector/lasx/lasx-vcond-2.c: Ditto.
+	* gcc.target/loongarch/vector/lsx/lsx-vcond-1.c: Ditto.
+	* gcc.target/loongarch/vector/lsx/lsx-vcond-2.c: Ditto.
+---
+ .../loongarch/vector/lasx/lasx-vcond-1.c      | 12 +++----
+ .../loongarch/vector/lasx/lasx-vcond-2.c      | 36 +++++++++----------
+ .../loongarch/vector/lsx/lsx-vcond-1.c        | 12 +++----
+ .../loongarch/vector/lsx/lsx-vcond-2.c        | 36 +++++++++----------
+ 4 files changed, 48 insertions(+), 48 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-1.c
+index ee9cb1a1f..57064eac9 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-1.c
+@@ -52,13 +52,13 @@ TEST_VAR_ALL (DEF_VCOND_VAR)
+ 
+ /* { dg-final { scan-assembler-times {\txvslt\.b} 4 } } */
+ /* { dg-final { scan-assembler-times {\txvslt\.h} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvslt\.w} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvslt\.d} 4 } } */
++/* { dg-final { scan-assembler-times {\txvslt\.w} 8 } } */
++/* { dg-final { scan-assembler-times {\txvslt\.d} 8 } } */
+ /* { dg-final { scan-assembler-times {\txvsle\.b} 4 } } */
+ /* { dg-final { scan-assembler-times {\txvsle\.h} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvsle\.w} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvsle\.d} 4 } } */
++/* { dg-final { scan-assembler-times {\txvsle\.w} 8 } } */
++/* { dg-final { scan-assembler-times {\txvsle\.d} 8 } } */
+ /* { dg-final { scan-assembler-times {\txvseq\.b} 4 } } */
+ /* { dg-final { scan-assembler-times {\txvseq\.h} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvseq\.w} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvseq\.d} 4 } } */
++/* { dg-final { scan-assembler-times {\txvseq\.w} 8 } } */
++/* { dg-final { scan-assembler-times {\txvseq\.d} 8 } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c
+index 5f40ed44c..55d5a084c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c
+@@ -67,21 +67,21 @@ TEST_CMP (nule)
+ TEST_CMP (nuge)
+ TEST_CMP (nugt)
+ 
+-/* { dg-final { scan-assembler-times {\txvfcmp\.ceq\.s} 2 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.ceq\.d} 2 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cne\.s} 2 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cne\.d} 2 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.slt\.s} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.slt\.d} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.sle\.s} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.sle\.d} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cor\.s} 2 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cor\.d} 2 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cun\.s} 2 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cun\.d} 2 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cueq\.s} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cueq\.d} 4 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cule\.s} 8 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cule\.d} 8 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cult\.s} 8 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cult\.d} 8 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.ceq\.s} 3 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.ceq\.d} 3 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cne\.s} 3 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cne\.d} 3 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.slt\.s} 6 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.slt\.d} 6 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.sle\.s} 6 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.sle\.d} 6 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cor\.s} 3 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cor\.d} 3 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cun\.s} 3 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cun\.d} 3 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cueq\.s} 6 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cueq\.d} 6 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cule\.s} 12 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cule\.d} 12 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cult\.s} 12 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cult\.d} 12 } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-1.c
+index 138adccfa..8c69f0d9b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-1.c
+@@ -52,13 +52,13 @@ TEST_VAR_ALL (DEF_VCOND_VAR)
+ 
+ /* { dg-final { scan-assembler-times {\tvslt\.b} 4 } } */
+ /* { dg-final { scan-assembler-times {\tvslt\.h} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvslt\.w} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvslt\.d} 4 } } */
++/* { dg-final { scan-assembler-times {\tvslt\.w} 8 } } */
++/* { dg-final { scan-assembler-times {\tvslt\.d} 8 } } */
+ /* { dg-final { scan-assembler-times {\tvsle\.b} 4 } } */
+ /* { dg-final { scan-assembler-times {\tvsle\.h} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvsle\.w} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvsle\.d} 4 } } */
++/* { dg-final { scan-assembler-times {\tvsle\.w} 8 } } */
++/* { dg-final { scan-assembler-times {\tvsle\.d} 8 } } */
+ /* { dg-final { scan-assembler-times {\tvseq\.b} 4 } } */
+ /* { dg-final { scan-assembler-times {\tvseq\.h} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvseq\.w} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvseq\.d} 4 } } */
++/* { dg-final { scan-assembler-times {\tvseq\.w} 8 } } */
++/* { dg-final { scan-assembler-times {\tvseq\.d} 8 } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c
+index e8fe31f8f..2214afd0a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c
+@@ -67,21 +67,21 @@ TEST_CMP (nule)
+ TEST_CMP (nuge)
+ TEST_CMP (nugt)
+ 
+-/* { dg-final { scan-assembler-times {\tvfcmp\.ceq\.s} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.ceq\.d} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cne\.s} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cne\.d} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.slt\.s} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.slt\.d} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.sle\.s} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.sle\.d} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cor\.s} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cor\.d} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cun\.s} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cun\.d} 2 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cueq\.s} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cueq\.d} 4 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cule\.s} 8 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cule\.d} 8 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cult\.s} 8 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cult\.d} 8 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.ceq\.s} 3 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.ceq\.d} 3 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cne\.s} 3 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cne\.d} 3 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.slt\.s} 6 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.slt\.d} 6 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.sle\.s} 6 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.sle\.d} 6 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cor\.s} 3 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cor\.d} 3 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cun\.s} 3 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cun\.d} 3 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cueq\.s} 6 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cueq\.d} 6 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cule\.s} 12 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cule\.d} 12 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cult\.s} 12 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cult\.d} 12 } } */
+-- 
+2.43.0
+
diff --git a/0029-LoongArch-Increase-cost-of-vector-aligned-store-load.patch b/0029-LoongArch-Increase-cost-of-vector-aligned-store-load.patch
new file mode 100644
index 0000000000000000000000000000000000000000..27d00d55e428c5012c7aa750a610bc7644728701
--- /dev/null
+++ b/0029-LoongArch-Increase-cost-of-vector-aligned-store-load.patch
@@ -0,0 +1,45 @@
+From 526e1effd86cfa0b1afae88890ce4f74f7150d88 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Thu, 16 Nov 2023 16:44:36 +0800
+Subject: [PATCH 029/188] LoongArch: Increase cost of vector aligned
+ store/load.
+
+Based on SPEC2017 performance evaluation results, it's better to make them equal
+to the cost of unaligned store/load so as to avoid odd alignment peeling.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc
+	(loongarch_builtin_vectorization_cost): Adjust.
+---
+ gcc/config/loongarch/loongarch.cc | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 8467f03cf..b6f0d61ef 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -3889,11 +3889,9 @@ loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
+       case scalar_stmt:
+       case scalar_load:
+       case vector_stmt:
+-      case vector_load:
+       case vec_to_scalar:
+       case scalar_to_vec:
+       case scalar_store:
+-      case vector_store:
+ 	return 1;
+ 
+       case vec_promote_demote:
+@@ -3901,6 +3899,8 @@ loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
+ 	return LASX_SUPPORTED_MODE_P (mode)
+ 	  && !LSX_SUPPORTED_MODE_P (mode) ? 2 : 1;
+ 
++      case vector_load:
++      case vector_store:
+       case unaligned_load:
+       case unaligned_store:
+ 	return 2;
+-- 
+2.43.0
+
diff --git a/0029-Struct-Reorg-Add-Safe-Structure-Pointer-Compression.patch b/0029-Struct-Reorg-Add-Safe-Structure-Pointer-Compression.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7b097470db0ad84ae12342399db7c8334ff23298
--- /dev/null
+++ b/0029-Struct-Reorg-Add-Safe-Structure-Pointer-Compression.patch
@@ -0,0 +1,1191 @@
+From 7930d75c9fd3f36cc2dce934569f00c71248bb31 Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Sat, 25 Nov 2023 10:28:48 +0800
+Subject: [PATCH] [Struct Reorg] Add Safe Structure Pointer Compression
+
+Safe structure pointer compression allows safely transfer pointers
+stored in structure into the index of structure array with smaller
+type to reduce the size of structure.
+Add flag -fipa-struct-reorg=4 to enable safe structure pointer
+compression.
+Add param compressed-pointer-size=[8,16,32] to control the compressed
+pointer size.
+---
+ gcc/common.opt                           |   5 +-
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 908 ++++++++++++++++++++++-
+ gcc/ipa-struct-reorg/ipa-struct-reorg.h  |   4 +
+ gcc/params.opt                           |   4 +
+ 4 files changed, 882 insertions(+), 39 deletions(-)
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index b01df919e..f6e20c1e8 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1993,8 +1993,9 @@ Common Var(flag_ipa_struct_reorg) Init(0) Optimization
+ Perform structure layout optimizations.
+ 
+ fipa-struct-reorg=
+-Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 3)
+--fipa-struct-reorg=[0,1,2,3] adding none, struct-reorg, reorder-fields, dfe optimizations.
++Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 4)
++-fipa-struct-reorg=[0,1,2,3,4] adding none, struct-reorg, reorder-fields,
++dfe, safe-pointer-compression optimizations.
+ 
+ fipa-vrp
+ Common Var(flag_ipa_vrp) Optimization
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index dcc6df496..5d451c4c8 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -89,6 +89,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "gimple-iterator.h"
+ #include "gimple-walk.h"
+ #include "cfg.h"
++#include "cfghooks.h" /* For split_block.  */
+ #include "ssa.h"
+ #include "tree-dfa.h"
+ #include "fold-const.h"
+@@ -147,7 +148,27 @@ using namespace struct_relayout;
+ #define VOID_POINTER_P(type) \
+   (POINTER_TYPE_P (type) && VOID_TYPE_P (TREE_TYPE (type)))
+ 
+-/* Return true iff TYPE is stdarg va_list type.  */
++static void
++set_var_attributes (tree var)
++{
++  if (!var)
++    return;
++  gcc_assert (TREE_CODE (var) == VAR_DECL);
++
++  DECL_ARTIFICIAL (var) = 1;
++  DECL_EXTERNAL (var) = 0;
++  TREE_STATIC (var) = 1;
++  TREE_PUBLIC (var) = 0;
++  TREE_USED (var) = 1;
++  DECL_CONTEXT (var) = NULL;
++  TREE_THIS_VOLATILE (var) = 0;
++  TREE_ADDRESSABLE (var) = 0;
++  TREE_READONLY (var) = 0;
++  if (is_global_var (var))
++    set_decl_tls_model (var, TLS_MODEL_NONE);
++}
++
++/* Return true if TYPE is stdarg va_list type.  */
+ 
+ static inline bool
+ is_va_list_type (tree type)
+@@ -271,9 +292,15 @@ enum struct_layout_opt_level
+   STRUCT_SPLIT = 1 << 0,
+   COMPLETE_STRUCT_RELAYOUT = 1 << 1,
+   STRUCT_REORDER_FIELDS = 1 << 2,
+-  DEAD_FIELD_ELIMINATION = 1 << 3
++  DEAD_FIELD_ELIMINATION = 1 << 3,
++  POINTER_COMPRESSION_SAFE = 1 << 4
+ };
+ 
++/* Defines the target pointer size of compressed pointer, which should be 8,
++   16, 32.  */
++
++static int compressed_size = 32;
++
+ static bool is_result_of_mult (tree arg, tree *num, tree struct_size);
+ static bool isptrptr (tree type);
+ void get_base (tree &base, tree expr);
+@@ -394,7 +421,10 @@ srtype::srtype (tree type)
+   : type (type),
+     chain_type (false),
+     escapes (does_not_escape),
++    pc_gptr (NULL_TREE),
+     visited (false),
++    pc_candidate (false),
++    has_legal_alloc_num (false),
+     has_alloc_array (0)
+ {
+   for (int i = 0; i < max_split; i++)
+@@ -476,6 +506,31 @@ srtype::mark_escape (escape_type e, gimple *stmt)
+     }
+ }
+ 
++/* Create a global header for compressed struct.  */
++
++void
++srtype::create_global_ptr_for_pc ()
++{
++  if (!pc_candidate || pc_gptr != NULL_TREE)
++    return;
++
++  const char *type_name = get_type_name (type);
++  gcc_assert (type_name != NULL);
++
++  char *gptr_name = concat (type_name, "_pc", NULL);
++  tree new_name = get_identifier (gptr_name);
++  tree new_type = build_pointer_type (newtype[0]);
++  tree new_var = build_decl (UNKNOWN_LOCATION, VAR_DECL, new_name, new_type);
++  set_var_attributes (new_var);
++  pc_gptr = new_var;
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nType: %s has create global header for pointer"
++	       " compression: %s\n", type_name, gptr_name);
++
++  free (gptr_name);
++}
++
+ /* Add FIELD to the list of fields that use this type.  */
+ 
+ void
+@@ -798,15 +853,31 @@ srfield::create_new_reorder_fields (tree newtype[max_split],
+       fields.safe_push (field);
+     }
+ 
+-  DECL_NAME (field) = DECL_NAME (fielddecl);
+   if (type == NULL)
+-    /* Common members do not need to reconstruct.
++    {
++      DECL_NAME (field) = DECL_NAME (fielddecl);
++      /* Common members do not need to reconstruct.
+        Otherwise, int* -> int** or void* -> void**.  */
+-    TREE_TYPE (field) = nt;
++      TREE_TYPE (field) = nt;
++      SET_DECL_ALIGN (field, DECL_ALIGN (fielddecl));
++    }
++  else if (type->pc_candidate)
++    {
++      const char *old_name = IDENTIFIER_POINTER (DECL_NAME (fielddecl));
++      char *new_name = concat (old_name, "_pc", NULL);
++      DECL_NAME (field) = get_identifier (new_name);
++      free (new_name);
++      TREE_TYPE (field) = make_unsigned_type (compressed_size);
++      SET_DECL_ALIGN (field, compressed_size);
++    }
+   else
+-    TREE_TYPE (field) = reconstruct_complex_type (TREE_TYPE (fielddecl), nt);
++    {
++      TREE_TYPE (field) = reconstruct_complex_type (TREE_TYPE (fielddecl), nt);
++      DECL_NAME (field) = DECL_NAME (fielddecl);
++      SET_DECL_ALIGN (field, DECL_ALIGN (fielddecl));
++    }
++
+   DECL_SOURCE_LOCATION (field) = DECL_SOURCE_LOCATION (fielddecl);
+-  SET_DECL_ALIGN (field, DECL_ALIGN (fielddecl));
+   DECL_USER_ALIGN (field) = DECL_USER_ALIGN (fielddecl);
+   TREE_ADDRESSABLE (field) = TREE_ADDRESSABLE (fielddecl);
+   DECL_NONADDRESSABLE_P (field) = !TREE_ADDRESSABLE (fielddecl);
+@@ -925,6 +996,10 @@ srtype::create_new_type (void)
+ 	  && has_dead_field ())
+ 	fprintf (dump_file, "Dead field elimination.\n");
+     }
++
++  if (pc_candidate && pc_gptr == NULL_TREE)
++    create_global_ptr_for_pc ();
++
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "Created %d types:\n", maxclusters);
+@@ -1338,6 +1413,30 @@ public:
+ 
+   unsigned execute_struct_relayout (void);
+   bool remove_dead_field_stmt (tree lhs);
++
++  // Pointer compression methods:
++  void check_and_prune_struct_for_pointer_compression (void);
++  void try_rewrite_with_pointer_compression (gassign *, gimple_stmt_iterator *,
++					     tree, tree, tree &, tree &);
++  bool safe_void_cmp_p (tree, srtype *);
++  bool pc_candidate_st_type_p (tree);
++  bool pc_candidate_tree_p (tree);
++  bool pc_type_conversion_candidate_p (tree);
++  bool pc_direct_rewrite_chance_p (tree, tree &);
++  bool compress_candidate_with_check (gimple_stmt_iterator *, tree, tree &);
++  bool compress_candidate (gassign *, gimple_stmt_iterator *, tree, tree &);
++  bool decompress_candidate_with_check (gimple_stmt_iterator *, tree, tree &);
++  bool decompress_candidate (gimple_stmt_iterator *, tree, tree, tree &,
++			     tree &);
++  srtype *get_compression_candidate_type (tree);
++  tree compress_ptr_to_offset (tree, srtype *, gimple_stmt_iterator *);
++  tree decompress_offset_to_ptr (tree, srtype *, gimple_stmt_iterator *);
++  basic_block create_bb_for_compress_candidate (basic_block, tree, srtype *,
++						tree &);
++  basic_block create_bb_for_decompress_candidate (basic_block, tree, srtype *,
++						  tree &);
++  basic_block create_bb_for_compress_nullptr (basic_block, tree &);
++  basic_block create_bb_for_decompress_nullptr (basic_block, tree, tree &);
+ };
+ 
+ struct ipa_struct_relayout
+@@ -1386,26 +1485,6 @@ namespace {
+ 
+ /* Methods for ipa_struct_relayout.  */
+ 
+-static void
+-set_var_attributes (tree var)
+-{
+-  if (!var)
+-    return;
+-  gcc_assert (TREE_CODE (var) == VAR_DECL);
+-
+-  DECL_ARTIFICIAL (var) = 1;
+-  DECL_EXTERNAL (var) = 0;
+-  TREE_STATIC (var) = 1;
+-  TREE_PUBLIC (var) = 0;
+-  TREE_USED (var) = 1;
+-  DECL_CONTEXT (var) = NULL;
+-  TREE_THIS_VOLATILE (var) = 0;
+-  TREE_ADDRESSABLE (var) = 0;
+-  TREE_READONLY (var) = 0;
+-  if (is_global_var (var))
+-    set_decl_tls_model (var, TLS_MODEL_NONE);
+-}
+-
+ tree
+ ipa_struct_relayout::create_new_vars (tree type, const char *name)
+ {
+@@ -2985,6 +3064,19 @@ ipa_struct_reorg::find_vars (gimple *stmt)
+ 	     records the right value _1 declaration.  */
+ 	  find_var (gimple_assign_rhs1 (stmt), stmt);
+ 
++	  /* Pointer types from non-zero pointer need to be escaped in pointer
++	     compression and complete relayout.
++	     e.g _1->t = (struct *) 0x400000.  */
++	  if (current_layout_opt_level >= COMPLETE_STRUCT_RELAYOUT
++	      && TREE_CODE (lhs) == COMPONENT_REF
++	      && TREE_CODE (TREE_TYPE (lhs)) == POINTER_TYPE
++	      && TREE_CODE (rhs) == INTEGER_CST
++	      && !integer_zerop (rhs))
++	    {
++	      mark_type_as_escape (inner_type (TREE_TYPE (lhs)),
++				   escape_cast_int, stmt);
++	    }
++
+ 	  /* Add a safe func mechanism.  */
+ 	  bool l_find = true;
+ 	  bool r_find = true;
+@@ -3436,12 +3528,13 @@ is_result_of_mult (tree arg, tree *num, tree struct_size)
+ bool
+ ipa_struct_reorg::handled_allocation_stmt (gimple *stmt)
+ {
+-  if ((current_layout_opt_level >= STRUCT_REORDER_FIELDS)
++  if ((current_layout_opt_level & STRUCT_REORDER_FIELDS)
+       && (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)
+ 	  || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC)
+ 	  || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)))
+     return true;
+-  if ((current_layout_opt_level == COMPLETE_STRUCT_RELAYOUT)
++  if ((current_layout_opt_level == COMPLETE_STRUCT_RELAYOUT
++       || current_layout_opt_level & POINTER_COMPRESSION_SAFE)
+       && gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))
+     return true;
+   if ((current_layout_opt_level == STRUCT_SPLIT)
+@@ -3563,14 +3656,19 @@ ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other,
+ 	}
+     }
+   /* x_1 = y.x_nodes; void *x;
+-     Directly mark the structure pointer type assigned
+-     to the void* variable as escape.  */
++     Mark the structure pointer type assigned
++     to the void* variable as escape.  Unless the void* is only used to compare
++     with variables of the same type.  */
+   else if (current_layout_opt_level >= STRUCT_REORDER_FIELDS
+ 	   && TREE_CODE (side) == SSA_NAME
+ 	   && VOID_POINTER_P (TREE_TYPE (side))
+ 	   && SSA_NAME_VAR (side)
+ 	   && VOID_POINTER_P (TREE_TYPE (SSA_NAME_VAR (side))))
+-    mark_type_as_escape (TREE_TYPE (other), escape_cast_void, stmt);
++      if (current_layout_opt_level < POINTER_COMPRESSION_SAFE
++	  || !safe_void_cmp_p (side, type))
++	{
++	  mark_type_as_escape (TREE_TYPE (other), escape_cast_void, stmt);
++	}
+ 
+   check_ptr_layers (side, other, stmt);
+ }
+@@ -4181,7 +4279,7 @@ ipa_struct_reorg::check_type_and_push (tree newdecl, srdecl *decl,
+ void
+ ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type)
+ {
+-  if (current_layout_opt_level == COMPLETE_STRUCT_RELAYOUT
++  if (current_layout_opt_level >= COMPLETE_STRUCT_RELAYOUT
+       && handled_allocation_stmt (stmt))
+     {
+       tree arg0 = gimple_call_arg (stmt, 0);
+@@ -4200,6 +4298,23 @@ ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type)
+ 	type->has_alloc_array = type->has_alloc_array < 0
+ 				  ? type->has_alloc_array
+ 				  : type->has_alloc_array + 1;
++
++      if (current_layout_opt_level & POINTER_COMPRESSION_SAFE
++	  && TREE_CODE (arg0) == INTEGER_CST)
++	{
++	  /* Only known size during compilation can be optimized
++	     at this level.  */
++	  unsigned HOST_WIDE_INT max_alloc_size = 0;
++	  switch (compressed_size)
++	    {
++	      case 8: max_alloc_size = 0xff; break; // max of uint8
++	      case 16: max_alloc_size = 0xffff; break; // max of uint16
++	      case 32: max_alloc_size = 0xffffffff; break; // max of uint32
++	      default: gcc_unreachable (); break;
++	    }
++	  if (tree_to_uhwi (arg0) < max_alloc_size)
++	    type->has_legal_alloc_num = true;
++	}
+     }
+ }
+ 
+@@ -4328,7 +4443,13 @@ ipa_struct_reorg::check_definition (srdecl *decl, vec &worklist)
+   if (current_layout_opt_level >= STRUCT_REORDER_FIELDS
+       && SSA_NAME_VAR (ssa_name)
+       && VOID_POINTER_P (TREE_TYPE (SSA_NAME_VAR (ssa_name))))
+-    type->mark_escape (escape_cast_void, SSA_NAME_DEF_STMT (ssa_name));
++      {
++	if (current_layout_opt_level < POINTER_COMPRESSION_SAFE
++	  || !safe_void_cmp_p (ssa_name, type))
++	  {
++	    type->mark_escape (escape_cast_void, SSA_NAME_DEF_STMT (ssa_name));
++	  }
++      }
+   gimple *stmt = SSA_NAME_DEF_STMT (ssa_name);
+ 
+   /*
+@@ -5294,6 +5415,8 @@ ipa_struct_reorg::create_new_types (void)
+   for (unsigned i = 0; i < types.length (); i++)
+     newtypes += types[i]->create_new_type ();
+ 
++  /* Some new types may not have been created at create_new_type (), so
++     recreate new type for all struct fields.  */
+   if (current_layout_opt_level >= STRUCT_REORDER_FIELDS)
+     {
+       for (unsigned i = 0; i < types.length (); i++)
+@@ -5304,9 +5427,18 @@ ipa_struct_reorg::create_new_types (void)
+ 	      for (unsigned j = 0; j < fields->length (); j++)
+ 		{
+ 		  tree field = (*fields)[j];
+-		  TREE_TYPE (field)
+-		  = reconstruct_complex_type (TREE_TYPE (field),
+-					      types[i]->newtype[0]);
++		  if (types[i]->pc_candidate)
++		    {
++		      TREE_TYPE (field)
++			= make_unsigned_type (compressed_size);
++		      SET_DECL_ALIGN (field, compressed_size);
++		    }
++		  else
++		    {
++		      TREE_TYPE (field)
++			= reconstruct_complex_type (TREE_TYPE (field),
++						    types[i]->newtype[0]);
++		    }
+ 		}
+ 	    }
+ 	}
+@@ -5685,6 +5817,554 @@ ipa_struct_reorg::rewrite_expr (tree expr,
+   return true;
+ }
+ 
++/* Emit a series of gimples to compress the pointer to the index relative to
++   the global header.  The basic blocks where gsi is located must have at least
++   one stmt.  */
++
++tree
++ipa_struct_reorg::compress_ptr_to_offset (tree xhs, srtype *type,
++					  gimple_stmt_iterator *gsi)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nCompress candidate pointer:\n");
++      print_generic_expr (dump_file, xhs);
++      fprintf (dump_file, "\nto offset:\n");
++    }
++
++  /* Emit gimple _X1 = ptr - gptr.  */
++  tree pointer_addr = fold_convert (long_unsigned_type_node, xhs);
++  tree gptr_addr = fold_convert (long_unsigned_type_node, type->pc_gptr);
++  tree step1 = gimplify_build2 (gsi, MINUS_EXPR, long_unsigned_type_node,
++				pointer_addr, gptr_addr);
++
++  /* Emit gimple _X2 = _X1 / sizeof (struct).  */
++  tree step2 = gimplify_build2 (gsi, TRUNC_DIV_EXPR, long_unsigned_type_node,
++				step1, TYPE_SIZE_UNIT (type->newtype[0]));
++
++  /* Emit gimple _X3 = _X2 + 1.  */
++  tree step3 = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node,
++				step2, build_one_cst (long_unsigned_type_node));
++
++  /* Emit _X4 = (compressed_size) _X3.  */
++  tree step4 = gimplify_build1 (gsi, NOP_EXPR,
++				make_unsigned_type (compressed_size), step3);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      print_generic_expr (dump_file, step3);
++      fprintf (dump_file, "\n");
++    }
++  return step4;
++}
++
++/* Emit a series of gimples to decompress the index into the original
++   pointer.  The basic blocks where gsi is located must have at least
++   one stmt.  */
++
++tree
++ipa_struct_reorg::decompress_offset_to_ptr (tree xhs, srtype *type,
++					    gimple_stmt_iterator *gsi)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nDecompress candidate offset:\n");
++      print_generic_expr (dump_file, xhs);
++      fprintf (dump_file, "\nto pointer:\n");
++    }
++
++  /* Emit _X1 = xhs - 1.  */
++  tree offset = fold_convert (long_unsigned_type_node, xhs);
++  tree step1 = gimplify_build2 (gsi, MINUS_EXPR, long_unsigned_type_node,
++				offset,
++				build_one_cst (long_unsigned_type_node));
++
++  /* Emit _X2 = _X1 * sizeof (struct).  */
++  tree step2 = gimplify_build2 (gsi, MULT_EXPR, long_unsigned_type_node,
++				step1, TYPE_SIZE_UNIT (type->newtype[0]));
++
++  /* Emit _X3 = phead + _X2.  */
++  tree gptr_addr = fold_convert (long_unsigned_type_node, type->pc_gptr);
++  tree step3 = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node,
++				gptr_addr, step2);
++
++  /* Emit _X4 = (struct *) _X3.  */
++  tree step4 = gimplify_build1 (gsi, NOP_EXPR, TREE_TYPE (type->pc_gptr),
++				step3);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      print_generic_expr (dump_file, step3);
++      fprintf (dump_file, "\n");
++    }
++  return step4;
++}
++
++/* Return the compression candidate srtype of SSA_NAME or COMPONENT_REF.  */
++
++srtype *
++ipa_struct_reorg::get_compression_candidate_type (tree xhs)
++{
++  if (xhs == NULL_TREE)
++    return NULL;
++
++  if (TREE_CODE (xhs) == SSA_NAME || TREE_CODE (xhs) == COMPONENT_REF)
++    {
++      srtype *access_type = find_type (inner_type (TREE_TYPE (xhs)));
++      if (access_type != NULL && access_type->pc_candidate)
++	return access_type;
++    }
++  return NULL;
++}
++
++/* True if the input type is the candidate type for pointer compression.  */
++
++bool
++ipa_struct_reorg::pc_candidate_st_type_p (tree type)
++{
++  if (type == NULL_TREE)
++    return false;
++
++  if (TREE_CODE (type) == POINTER_TYPE)
++    {
++      if (TREE_CODE (TREE_TYPE (type)) == RECORD_TYPE)
++	{
++	  srtype *access_type = find_type (TREE_TYPE (type));
++	  if (access_type != NULL && access_type->pc_candidate)
++	    return true;
++	}
++    }
++  return false;
++}
++
++/* True if the input xhs is a candidate for pointer compression.  */
++
++bool
++ipa_struct_reorg::pc_candidate_tree_p (tree xhs)
++{
++  if (xhs == NULL_TREE)
++    return false;
++
++  if (TREE_CODE (xhs) == COMPONENT_REF)
++    {
++      srtype *base_type = find_type (TREE_TYPE (TREE_OPERAND (xhs, 0)));
++      if (base_type == NULL || base_type->has_escaped ())
++	return false;
++
++      return pc_candidate_st_type_p (TREE_TYPE (xhs));
++    }
++  return false;
++}
++
++/* True if xhs is a component_ref that base has escaped but uses a compression
++   candidate type.  */
++
++bool
++ipa_struct_reorg::pc_type_conversion_candidate_p (tree xhs)
++{
++  if (xhs == NULL_TREE)
++    return false;
++
++  if (TREE_CODE (xhs) == COMPONENT_REF)
++    {
++      srtype *base_type = find_type (TREE_TYPE (TREE_OPERAND (xhs, 0)));
++      if (base_type != NULL && base_type->has_escaped ())
++	return pc_candidate_st_type_p (TREE_TYPE (xhs));
++
++    }
++  return false;
++}
++
++/* Creates a new basic block with zero for compressed null pointers.  */
++
++basic_block
++ipa_struct_reorg::create_bb_for_compress_nullptr (basic_block last_bb,
++						  tree &phi)
++{
++  basic_block new_bb = create_empty_bb (last_bb);
++  if (last_bb->loop_father != NULL)
++    {
++      add_bb_to_loop (new_bb, last_bb->loop_father);
++      loops_state_set (LOOPS_NEED_FIXUP);
++    }
++
++  /* Emit phi = 0.  */
++  gimple_stmt_iterator gsi = gsi_last_bb (new_bb);
++  phi = make_ssa_name (make_unsigned_type (compressed_size));
++  tree rhs = build_int_cst (make_unsigned_type (compressed_size), 0);
++  gimple *new_stmt = gimple_build_assign (phi, rhs);
++  gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nCreate bb %d for compress nullptr:\n",
++	       new_bb->index);
++      gimple_dump_bb (dump_file, new_bb, 0, dump_flags);
++    }
++  return new_bb;
++}
++
++/* Create a new basic block to compress the pointer to the index relative to
++   the allocated memory pool header.  */
++
++basic_block
++ipa_struct_reorg::create_bb_for_compress_candidate (basic_block last_bb,
++						    tree new_rhs, srtype *type,
++						    tree &phi)
++{
++  basic_block new_bb = create_empty_bb (last_bb);
++  if (last_bb->loop_father != NULL)
++    {
++      add_bb_to_loop (new_bb, last_bb->loop_father);
++      loops_state_set (LOOPS_NEED_FIXUP);
++    }
++
++  gimple_stmt_iterator gsi = gsi_last_bb (new_bb);
++  /* compress_ptr_to_offset () needs at least one stmt in target bb.  */
++  gsi_insert_after (&gsi, gimple_build_nop (), GSI_NEW_STMT);
++  phi = compress_ptr_to_offset (new_rhs, type, &gsi);
++  /* Remove the NOP created above.  */
++  gsi_remove (&gsi, true);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nCreate bb %d for compress candidate:\n",
++	       new_bb->index);
++      gimple_dump_bb (dump_file, new_bb, 0, dump_flags);
++    }
++  return new_bb;
++}
++
++/* Compression can be simplified by these following cases:
++     1.  if rhs is NULL, uses zero to represent it.
++     2.  if new_rhs has been converted into INTEGER_TYPE in the previous stmt,
++	 just use it here.  For example:
++	    _1 = t->s
++	 -> tt->s = _1.  */
++
++bool
++ipa_struct_reorg::pc_direct_rewrite_chance_p (tree rhs, tree &new_rhs)
++{
++  if (integer_zerop (rhs))
++    {
++      new_rhs = build_int_cst (make_unsigned_type (compressed_size), 0);
++      return true;
++    }
++  else if (new_rhs && TREE_CODE (TREE_TYPE (new_rhs)) == INTEGER_TYPE)
++    {
++      return true;
++    }
++  return false;
++}
++
++/* Perform pointer compression with check.  The conversion will be as shown in
++   the following example:
++     Orig bb:
++     bb <1>:
++     _1->t = _2
++
++     will be transformed to:
++     bb <1>:
++     _3 = _2
++     if (_2 == NULL)
++       goto bb <2>
++     else
++       goto bb <3>
++
++     bb <2>:
++     _3 = 0
++     goto bb <4>
++
++     bb <3>:
++     ...
++     _4 = compress (_2)
++     goto bb <4>
++
++     bb <4>:
++     _5 = PHI (_3, _4)
++     _1->t = _5
++   The gsi will move to the beginning of split dst bb <4>, _1->t = _5 will be
++   emitted by rewrite_assign ().  */
++
++bool
++ipa_struct_reorg::compress_candidate_with_check (gimple_stmt_iterator *gsi,
++						 tree rhs, tree &new_rhs)
++{
++  tree cond_lhs = make_ssa_name (TREE_TYPE (new_rhs));
++  gimple *assign_stmt = gimple_build_assign (cond_lhs, new_rhs);
++  gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
++
++  /* Insert cond stmt.  */
++  tree rhs_pointer_type = build_pointer_type (TREE_TYPE (new_rhs));
++  gcond *cond = gimple_build_cond (EQ_EXPR, cond_lhs,
++				   build_int_cst (rhs_pointer_type, 0),
++				   NULL_TREE, NULL_TREE);
++  gimple_set_location (cond, UNKNOWN_LOCATION);
++  gsi_insert_before (gsi, cond, GSI_SAME_STMT);
++
++  edge e = split_block (cond->bb, cond);
++  basic_block split_src_bb = e->src;
++  basic_block split_dst_bb = e->dest;
++
++  /* Create bb for nullptr.  */
++  tree phi1 = NULL_TREE;
++  basic_block true_bb = create_bb_for_compress_nullptr (split_src_bb, phi1);
++
++  /* Create bb for comprssion.  */
++  srtype *type = get_compression_candidate_type (rhs);
++  gcc_assert (type != NULL);
++  tree phi2 = NULL_TREE;
++  basic_block false_bb = create_bb_for_compress_candidate (true_bb, new_rhs,
++							   type, phi2);
++
++  /* Rebuild and reset cfg.  */
++  remove_edge_raw (e);
++
++  edge etrue = make_edge (split_src_bb, true_bb, EDGE_TRUE_VALUE);
++  etrue->probability = profile_probability::unlikely ();
++  true_bb->count = etrue->count ();
++
++  edge efalse = make_edge (split_src_bb, false_bb, EDGE_FALSE_VALUE);
++  efalse->probability = profile_probability::likely ();
++  false_bb->count = efalse->count ();
++
++  edge e1 = make_single_succ_edge (true_bb, split_dst_bb, EDGE_FALLTHRU);
++  edge e2 = make_single_succ_edge (false_bb, split_dst_bb, EDGE_FALLTHRU);
++
++  tree phi = make_ssa_name (make_unsigned_type (compressed_size));
++  gphi *phi_node = create_phi_node (phi, split_dst_bb);
++  add_phi_arg (phi_node, phi1, e1, UNKNOWN_LOCATION);
++  add_phi_arg (phi_node, phi2, e2, UNKNOWN_LOCATION);
++
++  if (dom_info_available_p (CDI_DOMINATORS))
++    {
++      set_immediate_dominator (CDI_DOMINATORS, split_dst_bb, split_src_bb);
++      set_immediate_dominator (CDI_DOMINATORS, true_bb, split_src_bb);
++      set_immediate_dominator (CDI_DOMINATORS, false_bb, split_src_bb);
++    }
++  *gsi = gsi_start_bb (split_dst_bb);
++  new_rhs = phi;
++  return true;
++}
++
++/* If there is a direct rewrite chance or simplification opportunity, perform
++   the simplified compression rewrite.  Otherwise, create a cond expression and
++   two basic blocks to implement pointer compression.  */
++
++bool
++ipa_struct_reorg::compress_candidate (gassign *stmt, gimple_stmt_iterator *gsi,
++				      tree rhs, tree &new_rhs)
++{
++  if (pc_direct_rewrite_chance_p (rhs, new_rhs))
++    return true;
++
++  return compress_candidate_with_check (gsi, rhs, new_rhs);
++}
++
++/* Create a new basic block to decompress the index to null pointer.  */
++
++basic_block
++ipa_struct_reorg::create_bb_for_decompress_nullptr (basic_block last_bb,
++						    tree new_rhs,
++						    tree &phi_node)
++{
++  basic_block new_bb = create_empty_bb (last_bb);
++  if (last_bb->loop_father != NULL)
++    {
++      add_bb_to_loop (new_bb, last_bb->loop_father);
++      loops_state_set (LOOPS_NEED_FIXUP);
++    }
++  gimple_stmt_iterator gsi = gsi_last_bb (new_bb);
++  tree rhs_pointer_type = build_pointer_type (TREE_TYPE (new_rhs));
++  phi_node = make_ssa_name (rhs_pointer_type);
++  gimple *new_stmt = gimple_build_assign (phi_node,
++					  build_int_cst (rhs_pointer_type, 0));
++  gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nCreate bb %d for decompress nullptr:\n",
++	       new_bb->index);
++      gimple_dump_bb (dump_file, new_bb, 0, dump_flags);
++    }
++  return new_bb;
++}
++
++/* Create a new basic block to decompress the index into original pointer.  */
++
++basic_block
++ipa_struct_reorg::create_bb_for_decompress_candidate (basic_block last_bb,
++						      tree lhs, srtype *type,
++						      tree &phi_node)
++{
++  basic_block new_bb = create_empty_bb (last_bb);
++  if (last_bb->loop_father != NULL)
++    {
++      add_bb_to_loop (new_bb, last_bb->loop_father);
++      loops_state_set (LOOPS_NEED_FIXUP);
++    }
++  gimple_stmt_iterator gsi = gsi_last_bb (new_bb);
++  /* decompress_ptr_to_offset () needs at least one stmt in target bb.  */
++  gsi_insert_after (&gsi, gimple_build_nop (), GSI_NEW_STMT);
++  phi_node = decompress_offset_to_ptr (lhs, type, &gsi);
++  /* Remove the NOP created above.  */
++  gsi_remove (&gsi, true);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nCreate bb %d for decompress candidate:\n",
++	       new_bb->index);
++      gimple_dump_bb (dump_file, new_bb, 0, dump_flags);
++    }
++  return new_bb;
++}
++
++/* Perform pointer decompression with check.  The conversion will be as shown
++   in the following example:
++     Orig bb:
++     bb <1>:
++     _1 = _2->t
++
++     will be transformed to:
++     bb <1>:
++     _3 = _2->t
++     if (_3 == 0)
++       goto bb <2>
++     else
++       goto bb <3>
++
++     bb <2>:
++     _4 = NULL
++     goto bb <4>
++
++     bb <3>:
++     ...
++     _5 = decompress (_3)
++     goto bb <4>
++
++     bb <4>:
++     _6 = PHI (_4, _5)
++     _1 = _6
++   The gsi will move to the beginning of split dst bb <4>, _1 = _6 will be
++   emitted by rewrite_assign ().  */
++
++bool
++ipa_struct_reorg::decompress_candidate_with_check (gimple_stmt_iterator *gsi,
++						   tree rhs, tree &new_rhs)
++{
++  /* Insert cond stmt.  */
++  tree cond_lhs = make_ssa_name (TREE_TYPE (new_rhs));
++  gassign *cond_assign = gimple_build_assign (cond_lhs, new_rhs);
++  gsi_insert_before (gsi, cond_assign, GSI_SAME_STMT);
++
++  tree pc_type = make_unsigned_type (compressed_size);
++  gcond *cond = gimple_build_cond (EQ_EXPR, cond_lhs,
++				   build_int_cst (pc_type, 0),
++				   NULL_TREE, NULL_TREE);
++  gimple_set_location (cond, UNKNOWN_LOCATION);
++  gsi_insert_before (gsi, cond, GSI_SAME_STMT);
++
++  /* Split bb.  */
++  edge e = split_block (cond->bb, cond);
++  basic_block split_src_bb = e->src;
++  basic_block split_dst_bb = e->dest;
++
++  /* Create bb for decompress nullptr.  */
++  tree phi1 = NULL_TREE;
++  basic_block true_bb = create_bb_for_decompress_nullptr (split_src_bb,
++							  new_rhs, phi1);
++
++  /* Create bb for decomprssion candidate.  */
++  tree phi2 = NULL_TREE;
++  srtype *type = get_compression_candidate_type (rhs);
++  gcc_assert (type != NULL);
++  basic_block false_bb = create_bb_for_decompress_candidate (true_bb, cond_lhs,
++							     type, phi2);
++
++  /* Refresh and reset cfg.  */
++  remove_edge_raw (e);
++
++  edge etrue = make_edge (split_src_bb, true_bb, EDGE_TRUE_VALUE);
++  etrue->probability = profile_probability::unlikely ();
++  true_bb->count = etrue->count ();
++
++  edge efalse = make_edge (split_src_bb, false_bb, EDGE_FALSE_VALUE);
++  efalse->probability = profile_probability::likely ();
++  false_bb->count = efalse->count ();
++
++  edge e1 = make_single_succ_edge (true_bb, split_dst_bb, EDGE_FALLTHRU);
++  edge e2 = make_single_succ_edge (false_bb, split_dst_bb, EDGE_FALLTHRU);
++
++  tree phi = make_ssa_name (build_pointer_type (TREE_TYPE (cond_lhs)));
++  gphi *phi_node = create_phi_node (phi, split_dst_bb);
++  add_phi_arg (phi_node, phi1, e1, UNKNOWN_LOCATION);
++  add_phi_arg (phi_node, phi2, e2, UNKNOWN_LOCATION);
++
++  if (dom_info_available_p (CDI_DOMINATORS))
++    {
++      set_immediate_dominator (CDI_DOMINATORS, split_dst_bb, split_src_bb);
++      set_immediate_dominator (CDI_DOMINATORS, true_bb, split_src_bb);
++      set_immediate_dominator (CDI_DOMINATORS, false_bb, split_src_bb);
++    }
++  *gsi = gsi_start_bb (split_dst_bb);
++  new_rhs = phi;
++  return true;
++}
++
++/* If there is a simplification opportunity, perform the simplified
++   decompression rewrite.  Otherwise, create a cond expression and two basic
++   blocks to implement pointer decompression.  */
++
++bool
++ipa_struct_reorg::decompress_candidate (gimple_stmt_iterator *gsi,
++					tree lhs, tree rhs, tree &new_lhs,
++					tree &new_rhs)
++{
++  // TODO: simplifiy check and rewrite will be pushed in next PR.
++  return decompress_candidate_with_check (gsi, rhs, new_rhs);
++}
++
++/* Try to perform pointer compression and decompression.  */
++
++void
++ipa_struct_reorg::try_rewrite_with_pointer_compression (gassign *stmt,
++							gimple_stmt_iterator
++							*gsi, tree lhs,
++							tree rhs, tree &new_lhs,
++							tree &new_rhs)
++{
++  bool l = pc_candidate_tree_p (lhs);
++  bool r = pc_candidate_tree_p (rhs);
++  if (!l && !r)
++    {
++      tree tmp_rhs = new_rhs == NULL_TREE ? rhs : new_rhs;
++      if (pc_type_conversion_candidate_p (lhs))
++	{
++	  /* Transfer MEM[(struct *)_1].files = _4;
++	     to MEM[(struct *)_1].files = (struct *)_4; */
++	  new_rhs = fold_convert (TREE_TYPE (lhs), tmp_rhs);
++	}
++      else if (pc_type_conversion_candidate_p (rhs))
++	{
++	  /* Transfer _4 = MEM[(struct *)_1].nodes;
++	     to _4  = (new_struct *) MEM[(struct *)_1].nodes; */
++	  new_rhs = fold_convert (TREE_TYPE (new_lhs), tmp_rhs);
++	}
++    }
++  else if (l && r)
++    gcc_unreachable ();
++  else if (l)
++    {
++      if (!compress_candidate (stmt, gsi, rhs, new_rhs))
++	gcc_unreachable ();
++    }
++  else if (r)
++    {
++      if (!decompress_candidate (gsi, lhs, rhs, new_lhs, new_rhs))
++	gcc_unreachable ();
++    }
++}
++
+ bool
+ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ {
+@@ -5880,6 +6560,9 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 	fprintf (dump_file, "\nreplaced with:\n");
+       for (unsigned i = 0; i < max_split && (newlhs[i] || newrhs[i]); i++)
+ 	{
++	  if (current_layout_opt_level >= POINTER_COMPRESSION_SAFE)
++	    try_rewrite_with_pointer_compression (stmt, gsi, lhs, rhs,
++						  newlhs[i], newrhs[i]);
+ 	  gimple *newstmt = gimple_build_assign (newlhs[i] ? newlhs[i] : lhs,
+ 						 newrhs[i] ? newrhs[i] : rhs);
+ 	  if (dump_file && (dump_flags & TDF_DETAILS))
+@@ -5956,6 +6639,13 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi)
+ 	    gcc_assert (false);
+ 	  gimple_call_set_lhs (g, decl->newdecl[i]);
+ 	  gsi_insert_before (gsi, g, GSI_SAME_STMT);
++	  if (type->pc_candidate)
++	    {
++	      /* Init global header for pointer compression.  */
++	      gassign *gptr
++		= gimple_build_assign (type->pc_gptr, decl->newdecl[i]);
++	      gsi_insert_before (gsi, gptr, GSI_SAME_STMT);
++	    }
+ 	}
+       return true;
+     }
+@@ -6411,6 +7101,12 @@ ipa_struct_reorg::rewrite_functions (void)
+       push_cfun (DECL_STRUCT_FUNCTION (node->decl));
+       current_function = f;
+ 
++      if (current_layout_opt_level >= POINTER_COMPRESSION_SAFE)
++	{
++	  calculate_dominance_info (CDI_DOMINATORS);
++	  loop_optimizer_init (0);
++	}
++
+       if (dump_file && (dump_flags & TDF_DETAILS))
+ 	{
+ 	  fprintf (dump_file, "\nBefore rewrite: %dth_%s\n",
+@@ -6486,6 +7182,9 @@ ipa_struct_reorg::rewrite_functions (void)
+ 
+       free_dominance_info (CDI_DOMINATORS);
+ 
++      if (current_layout_opt_level >= POINTER_COMPRESSION_SAFE)
++	loop_optimizer_finalize ();
++
+       if (dump_file)
+ 	{
+ 	  fprintf (dump_file, "\nAfter rewrite: %dth_%s\n",
+@@ -6514,6 +7213,8 @@ ipa_struct_reorg::execute_struct_relayout (void)
+ 	continue;
+       if (types[i]->chain_type)
+ 	continue;
++      if (get_type_name (types[i]->type) == NULL)
++	continue;
+       retval |= ipa_struct_relayout (type, this).execute ();
+     }
+ 
+@@ -6530,6 +7231,131 @@ ipa_struct_reorg::execute_struct_relayout (void)
+   return retval;
+ }
+ 
++/* True if the var with void type is only used to compare with the same
++   target type.  */
++
++bool
++ipa_struct_reorg::safe_void_cmp_p (tree var, srtype *type)
++{
++  imm_use_iterator imm_iter;
++  use_operand_p use_p;
++  FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var)
++    {
++      gimple *use_stmt = USE_STMT (use_p);
++      if (is_gimple_debug (use_stmt))
++	continue;
++
++      if (gimple_code (use_stmt) == GIMPLE_COND)
++	{
++	  tree lhs = gimple_cond_lhs (use_stmt);
++	  tree rhs = gimple_cond_rhs (use_stmt);
++	  tree xhs = lhs == var ? rhs : lhs;
++	  if (types_compatible_p (inner_type (TREE_TYPE (xhs)), type->type))
++	    continue;
++
++	}
++      return false;
++    }
++  return true;
++}
++
++/* Mark the structure that should perform pointer compression.  */
++
++void
++ipa_struct_reorg::check_and_prune_struct_for_pointer_compression (void)
++{
++  unsigned pc_transform_num = 0;
++
++  if (dump_file)
++    fprintf (dump_file, "\nMark the structure that should perform pointer"
++			" compression:\n");
++
++  for (unsigned i = 0; i < types.length (); i++)
++    {
++      srtype *type = types[i];
++      if (dump_file)
++	print_generic_expr (dump_file, type->type);
++
++      if (type->has_escaped ())
++	{
++	  if (dump_file)
++	    fprintf (dump_file, " has escaped by %s, skip compression.\n",
++		     type->escape_reason ());
++	  continue;
++	}
++      if (TYPE_FIELDS (type->type) == NULL)
++	{
++	  if (dump_file)
++	    fprintf (dump_file, " has zero field, skip compression.\n");
++	  continue;
++	}
++      if (type->chain_type)
++	{
++	  if (dump_file)
++	      fprintf (dump_file, " is chain_type, skip compression.\n");
++	  continue;
++	}
++      if (type->has_alloc_array != 1)
++	{
++	  if (dump_file)
++	    fprintf (dump_file, " has alloc number: %d, skip compression.\n",
++		     type->has_alloc_array);
++	  continue;
++	}
++      if (get_type_name (type->type) == NULL)
++	{
++	  if (dump_file)
++	    fprintf (dump_file, " has empty struct name,"
++				" skip compression.\n");
++	  continue;
++	}
++      if ((current_layout_opt_level & POINTER_COMPRESSION_SAFE)
++	  && !type->has_legal_alloc_num)
++	{
++	  if (dump_file)
++	    fprintf (dump_file, " has illegal struct array size,"
++				" skip compression.\n");
++	  continue;
++	}
++      pc_transform_num++;
++      type->pc_candidate = true;
++      if (dump_file)
++	fprintf (dump_file, " attemps to do pointer compression.\n");
++    }
++
++  if (dump_file)
++    {
++      if (pc_transform_num)
++	fprintf (dump_file, "\nNumber of structures to transform in "
++			    "pointer compression is %d\n", pc_transform_num);
++      else
++	fprintf (dump_file, "\nNo structures to transform in "
++			    "pointer compression.\n");
++    }
++}
++
++/* Init pointer size from parameter param_pointer_compression_size.  */
++
++static void
++init_pointer_size_for_pointer_compression (void)
++{
++  switch (param_pointer_compression_size)
++    {
++      case 8:
++	compressed_size = 8; // sizeof (uint8)
++	break;
++      case 16:
++	compressed_size = 16; // sizeof (uint16)
++	break;
++      case 32:
++	compressed_size = 32; // sizeof (uint32)
++	break;
++      default:
++	error ("Invalid pointer compression size, using the following param: "
++	       "\"--param compressed-pointer-size=[8,16,32]\"");
++    }
++}
++
+ unsigned int
+ ipa_struct_reorg::execute (unsigned int opt)
+ {
+@@ -6551,6 +7377,8 @@ ipa_struct_reorg::execute (unsigned int opt)
+       if (current_layout_opt_level == STRUCT_SPLIT)
+ 	analyze_types ();
+ 
++      if (opt >= POINTER_COMPRESSION_SAFE)
++	check_and_prune_struct_for_pointer_compression ();
+       ret = rewrite_functions ();
+     }
+   else
+@@ -6598,6 +7426,8 @@ public:
+     unsigned int level = 0;
+     switch (struct_layout_optimize_level)
+       {
++	case 4: level |= POINTER_COMPRESSION_SAFE;
++	// FALLTHRU
+ 	case 3: level |= DEAD_FIELD_ELIMINATION;
+ 	// FALLTHRU
+ 	case 2: level |= STRUCT_REORDER_FIELDS;
+@@ -6609,6 +7439,10 @@ public:
+ 	case 0: break;
+ 	default: gcc_unreachable ();
+       }
++
++    if (level & POINTER_COMPRESSION_SAFE)
++      init_pointer_size_for_pointer_compression ();
++
+     /* Preserved for backward compatibility, reorder fields needs run before
+        struct split and complete struct relayout.  */
+     if (flag_ipa_reorder_fields && level < STRUCT_REORDER_FIELDS)
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.h b/gcc/ipa-struct-reorg/ipa-struct-reorg.h
+index 719f7b308..6c4469597 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.h
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.h
+@@ -121,7 +121,10 @@ private:
+ 
+ public:
+   tree newtype[max_split];
++  tree pc_gptr;
+   bool visited;
++  bool pc_candidate;
++  bool has_legal_alloc_num;
+   /* Negative number means it has illegal allocated arrays
+      that we do not optimize.  */
+   int has_alloc_array;
+@@ -145,6 +148,7 @@ public:
+   void analyze (void);
+   bool has_dead_field (void);
+   void mark_escape (escape_type, gimple *stmt);
++  void create_global_ptr_for_pc ();
+   bool has_escaped (void)
+   {
+     return escapes != does_not_escape;
+diff --git a/gcc/params.opt b/gcc/params.opt
+index 1ddf1343f..d2196dc68 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -1205,4 +1205,8 @@ Enum(vrp_mode) String(vrp) Value(VRP_MODE_VRP)
+ EnumValue
+ Enum(vrp_mode) String(ranger) Value(VRP_MODE_RANGER)
+ 
++-param=compressed-pointer-size=
++Common Joined UInteger Var(param_pointer_compression_size) Init(32) IntegerRange(8, 32) Param Optimization
++Target size of compressed pointer, which should be 8, 16 or 32.
++
+ ; This comment is to ensure we retain the blank line above.
+-- 
+2.33.0
+
diff --git a/0030-LoongArch-Implement-C-LT-Z_DEFINED_VALUE_AT_ZERO.patch b/0030-LoongArch-Implement-C-LT-Z_DEFINED_VALUE_AT_ZERO.patch
new file mode 100644
index 0000000000000000000000000000000000000000..37cb90118955bc9fb71bc93721210cb9494608a3
--- /dev/null
+++ b/0030-LoongArch-Implement-C-LT-Z_DEFINED_VALUE_AT_ZERO.patch
@@ -0,0 +1,58 @@
+From bd74cb3e1238e842d15bcd4044c9e2f246cc18bc Mon Sep 17 00:00:00 2001
+From: Li Wei 
+Date: Fri, 17 Nov 2023 10:38:02 +0800
+Subject: [PATCH 030/188] LoongArch: Implement C[LT]Z_DEFINED_VALUE_AT_ZERO
+
+The LoongArch has defined ctz and clz on the backend, but if we want GCC
+do CTZ transformation optimization in forwprop2 pass, GCC need to know
+the value of c[lt]z at zero, which may be beneficial for some test cases
+(like spec2017 deepsjeng_r).
+
+After implementing the macro, we test dynamic instruction count on
+deepsjeng_r:
+- before 1688423249186
+- after  1660311215745 (1.66% reduction)
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.h (CLZ_DEFINED_VALUE_AT_ZERO):
+	Implement.
+	(CTZ_DEFINED_VALUE_AT_ZERO): Same.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.dg/pr90838.c: add clz/ctz test support on LoongArch.
+---
+ gcc/config/loongarch/loongarch.h | 5 +++++
+ gcc/testsuite/gcc.dg/pr90838.c   | 5 +++++
+ 2 files changed, 10 insertions(+)
+
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 6e8ac293a..19cf6fd33 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -1239,3 +1239,8 @@ struct GTY (()) machine_function
+ 
+ #define TARGET_EXPLICIT_RELOCS \
+   (la_opt_explicit_relocs == EXPLICIT_RELOCS_ALWAYS)
++
++#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
++  ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
++#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
++  ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
+diff --git a/gcc/testsuite/gcc.dg/pr90838.c b/gcc/testsuite/gcc.dg/pr90838.c
+index 7502b8463..7aa912525 100644
+--- a/gcc/testsuite/gcc.dg/pr90838.c
++++ b/gcc/testsuite/gcc.dg/pr90838.c
+@@ -82,3 +82,8 @@ int ctz4 (unsigned long x)
+ /* { dg-final { scan-assembler-times "ctz\t" 3 { target { rv32 } } } } */
+ /* { dg-final { scan-assembler-times "andi\t" 1 { target { rv32 } } } } */
+ /* { dg-final { scan-assembler-times "mul\t" 1 { target { rv32 } } } } */
++
++/* { dg-final { scan-tree-dump-times {= \.CTZ} 4 "forwprop2" { target { loongarch64*-*-* } } } } */
++/* { dg-final { scan-assembler-times "ctz.d\t" 1 { target { loongarch64*-*-* } } } } */
++/* { dg-final { scan-assembler-times "ctz.w\t" 3 { target { loongarch64*-*-* } } } } */
++/* { dg-final { scan-assembler-times "andi\t" 4 { target { loongarch64*-*-* } } } } */
+-- 
+2.43.0
+
diff --git a/0030-Struct-Reorg-Add-unsafe-structure-pointer-compressio.patch b/0030-Struct-Reorg-Add-unsafe-structure-pointer-compressio.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2bca621d6cd208a52ae2bf278d8dbbae8fd7ab35
--- /dev/null
+++ b/0030-Struct-Reorg-Add-unsafe-structure-pointer-compressio.patch
@@ -0,0 +1,1232 @@
+From 82d6166cd29fb1c3474f29b28cb7e5478d3a551a Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Mon, 25 Dec 2023 11:17:04 +0800
+Subject: [PATCH] [Struct Reorg] Add unsafe structure pointer compression
+
+Unsafe structure pointer compression allows for some dangerous
+conversions for better performance.
+Add flag -fipa-struct-reorg=5 to enable unsafe structure pointer
+compression.
+---
+ gcc/common.opt                                |   6 +-
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      | 365 ++++++++++++++----
+ gcc/symbol-summary.h                          |  22 +-
+ .../gcc.dg/struct/csr_skip_void_struct_name.c |  53 +++
+ gcc/testsuite/gcc.dg/struct/pc_cast_int.c     |  91 +++++
+ .../gcc.dg/struct/pc_compress_and_decomress.c |  90 +++++
+ gcc/testsuite/gcc.dg/struct/pc_ptr2void.c     |  87 +++++
+ .../gcc.dg/struct/pc_simple_rewrite_pc.c      | 112 ++++++
+ .../gcc.dg/struct/pc_skip_void_struct_name.c  |  53 +++
+ gcc/testsuite/gcc.dg/struct/struct-reorg.exp  |   8 +
+ 10 files changed, 804 insertions(+), 83 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/csr_skip_void_struct_name.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/pc_cast_int.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/pc_compress_and_decomress.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/pc_ptr2void.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/pc_simple_rewrite_pc.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/pc_skip_void_struct_name.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 56b547506..c7c6bc256 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1993,9 +1993,9 @@ Common Var(flag_ipa_struct_reorg) Init(0) Optimization
+ Perform structure layout optimizations.
+ 
+ fipa-struct-reorg=
+-Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 4)
+--fipa-struct-reorg=[0,1,2,3,4] adding none, struct-reorg, reorder-fields,
+-dfe, safe-pointer-compression optimizations.
++Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 5)
++-fipa-struct-reorg=[0,1,2,3,4,5] adding none, struct-reorg, reorder-fields,
++dfe, safe-pointer-compression, unsafe-pointer-compression optimizations.
+ 
+ fipa-vrp
+ Common Var(flag_ipa_vrp) Optimization
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index 5d451c4c8..fa33f2d35 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -293,7 +293,8 @@ enum struct_layout_opt_level
+   COMPLETE_STRUCT_RELAYOUT = 1 << 1,
+   STRUCT_REORDER_FIELDS = 1 << 2,
+   DEAD_FIELD_ELIMINATION = 1 << 3,
+-  POINTER_COMPRESSION_SAFE = 1 << 4
++  POINTER_COMPRESSION_SAFE = 1 << 4,
++  POINTER_COMPRESSION_UNSAFE = 1 << 5
+ };
+ 
+ /* Defines the target pointer size of compressed pointer, which should be 8,
+@@ -1267,10 +1268,10 @@ csrtype::init_type_info (void)
+ 
+   /* Close enough to pad to improve performance.
+      33~63 should pad to 64 but 33~48 (first half) are too far away, and
+-     65~127 should pad to 128 but 65~96 (first half) are too far away.  */
++     70~127 should pad to 128 but 65~70 (first half) are too far away.  */
+   if (old_size > 48 && old_size < 64)
+     new_size = 64;
+-  if (old_size > 96 && old_size < 128)
++  if (old_size > 70 && old_size < 128)
+     new_size = 128;
+ 
+   /* For performance reasons, only allow structure size
+@@ -1423,8 +1424,12 @@ public:
+   bool pc_candidate_tree_p (tree);
+   bool pc_type_conversion_candidate_p (tree);
+   bool pc_direct_rewrite_chance_p (tree, tree &);
++  bool pc_simplify_chance_for_compress_p (gassign *, tree);
++  bool compress_candidate_without_check (gimple_stmt_iterator *, tree, tree &);
+   bool compress_candidate_with_check (gimple_stmt_iterator *, tree, tree &);
+   bool compress_candidate (gassign *, gimple_stmt_iterator *, tree, tree &);
++  bool decompress_candidate_without_check (gimple_stmt_iterator *,
++					   tree, tree, tree &, tree &);
+   bool decompress_candidate_with_check (gimple_stmt_iterator *, tree, tree &);
+   bool decompress_candidate (gimple_stmt_iterator *, tree, tree, tree &,
+ 			     tree &);
+@@ -1924,7 +1929,6 @@ bool
+ ipa_struct_relayout::maybe_rewrite_cst (tree cst, gimple_stmt_iterator *gsi,
+ 					HOST_WIDE_INT ×)
+ {
+-  bool ret = false;
+   gcc_assert (TREE_CODE (cst) == INTEGER_CST);
+ 
+   gimple *stmt = gsi_stmt (*gsi);
+@@ -1948,27 +1952,95 @@ ipa_struct_relayout::maybe_rewrite_cst (tree cst, gimple_stmt_iterator *gsi,
+     {
+       if (gsi_one_before_end_p (*gsi))
+ 	return false;
+-      gsi_next (gsi);
+-      gimple *stmt2 = gsi_stmt (*gsi);
+-
+-      if (gimple_code (stmt2) == GIMPLE_ASSIGN
+-	  && gimple_assign_rhs_code (stmt2) == POINTER_PLUS_EXPR)
++      // Check uses.
++      imm_use_iterator imm_iter_lhs;
++      use_operand_p use_p_lhs;
++      FOR_EACH_IMM_USE_FAST (use_p_lhs, imm_iter_lhs, gimple_assign_lhs (stmt))
+ 	{
+-	  tree lhs = gimple_assign_lhs (stmt2);
+-	  tree rhs1 = gimple_assign_rhs1 (stmt2);
+-	  if (types_compatible_p (inner_type (TREE_TYPE (rhs1)), ctype.type)
+-	      || types_compatible_p (inner_type (TREE_TYPE (lhs)), ctype.type))
++	  gimple *stmt2 = USE_STMT (use_p_lhs);
++	  if (gimple_code (stmt2) != GIMPLE_ASSIGN)
++	    continue;
++	  if (gimple_assign_rhs_code (stmt2) == POINTER_PLUS_EXPR)
+ 	    {
+-	      tree num = NULL;
+-	      if (is_result_of_mult (cst, &num, TYPE_SIZE_UNIT (ctype.type)))
++	      tree lhs = gimple_assign_lhs (stmt2);
++	      tree rhs1 = gimple_assign_rhs1 (stmt2);
++	      if (types_compatible_p (inner_type (TREE_TYPE (rhs1)), ctype.type)
++		  || types_compatible_p (inner_type (TREE_TYPE (lhs)),
++					 ctype.type))
+ 		{
+-		  times = TREE_INT_CST_LOW (num);
+-		  ret = true;
++		  tree num = NULL;
++		  if (is_result_of_mult (cst, &num,
++					 TYPE_SIZE_UNIT (ctype.type)))
++		    {
++		      times = TREE_INT_CST_LOW (num);
++		      return true;
++		    }
++		}
++	    }
++	  // For pointer compression, handle plus stmt.
++	  else if (gimple_assign_rhs_code (stmt2) == PLUS_EXPR)
++	    {
++	      // Check uses.
++	      imm_use_iterator imm_iter_cast;
++	      use_operand_p use_p_cast;
++	      FOR_EACH_IMM_USE_FAST (use_p_cast, imm_iter_cast,
++				     gimple_assign_lhs (stmt2))
++		{
++		  gimple *stmt_cast = USE_STMT (use_p_cast);
++		  if (gimple_code (stmt_cast) != GIMPLE_ASSIGN)
++		    continue;
++		  if (gimple_assign_cast_p (stmt_cast))
++		    {
++		      tree lhs_type = inner_type (TREE_TYPE (
++					gimple_assign_lhs (stmt_cast)));
++		      if (types_compatible_p (lhs_type, ctype.type))
++			{
++			  tree num = NULL;
++			  if (is_result_of_mult (cst, &num,
++						 TYPE_SIZE_UNIT (ctype.type)))
++			    {
++			      times = TREE_INT_CST_LOW (num);
++			      return true;
++			    }
++			}
++		    }
+ 		}
+ 	    }
+ 	}
+-      gsi_prev (gsi);
+-      return ret;
++    }
++  // For pointer compression, handle div stmt.
++  if (gimple_assign_rhs_code (stmt) == TRUNC_DIV_EXPR)
++    {
++      imm_use_iterator imm_iter;
++      use_operand_p use_p;
++      tree lhs = gimple_assign_lhs (stmt);
++      if (lhs == NULL_TREE)
++	return false;
++      FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
++	{
++	  gimple *use_stmt = USE_STMT (use_p);
++	  if (is_gimple_debug (use_stmt))
++	    continue;
++	  if (gimple_code (use_stmt) != GIMPLE_ASSIGN)
++	    continue;
++	  if (gimple_assign_cast_p (use_stmt))
++	    {
++	      tree lhs_type = inner_type (TREE_TYPE (
++				gimple_assign_lhs (use_stmt)));
++	      if (TYPE_UNSIGNED (lhs_type)
++		  && TREE_CODE (lhs_type) == INTEGER_TYPE
++		  && TYPE_PRECISION (lhs_type) == compressed_size)
++		{
++		  tree num = NULL;
++		  if (is_result_of_mult (cst, &num,
++					 TYPE_SIZE_UNIT (ctype.type)))
++		    {
++		      times = TREE_INT_CST_LOW (num);
++		      return true;
++		    }
++		}
++ 	    }
++ 	}
+     }
+   return false;
+ }
+@@ -2967,7 +3039,9 @@ ipa_struct_reorg::record_var (tree decl, escape_type escapes, int arg)
+ 	  && TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE)
+ 	e = escape_separate_instance;
+ 
+-      if (e != does_not_escape)
++      if (e != does_not_escape
++	  && (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT
++	      || replace_type_map.get (type->type) == NULL))
+ 	type->mark_escape (e, NULL);
+     }
+ 
+@@ -3629,7 +3703,9 @@ ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other,
+       if (TREE_CODE (side) == SSA_NAME
+ 	  && VOID_POINTER_P (TREE_TYPE (side)))
+ 	return;
+-      d->type->mark_escape (escape_cast_another_ptr, stmt);
++      if (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT
++	  || replace_type_map.get (d->type->type) == NULL)
++	d->type->mark_escape (escape_cast_another_ptr, stmt);
+       return;
+     }
+ 
+@@ -3645,7 +3721,9 @@ ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other,
+ 	}
+       else
+ 	/* *_1 = &MEM[(void *)&x + 8B].  */
+-	type->mark_escape (escape_cast_another_ptr, stmt);
++	if (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT
++	    || replace_type_map.get (type->type) == NULL)
++	    type->mark_escape (escape_cast_another_ptr, stmt);
+     }
+   else if (type != d->type)
+     {
+@@ -4364,7 +4442,9 @@ ipa_struct_reorg::check_definition_assign (srdecl *decl,
+   /* Casts between pointers and integer are escaping.  */
+   if (gimple_assign_cast_p (stmt))
+     {
+-      type->mark_escape (escape_cast_int, stmt);
++      if (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT
++	  || replace_type_map.get (type->type) == NULL)
++	type->mark_escape (escape_cast_int, stmt);
+       return;
+     }
+ 
+@@ -4684,7 +4764,9 @@ ipa_struct_reorg::check_use (srdecl *decl, gimple *stmt,
+   /* Casts between pointers and integer are escaping.  */
+   if (gimple_assign_cast_p (stmt))
+     {
+-      type->mark_escape (escape_cast_int, stmt);
++      if (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT
++	  || replace_type_map.get (type->type) == NULL)
++	type->mark_escape (escape_cast_int, stmt);
+       return;
+     }
+ 
+@@ -5364,9 +5446,9 @@ ipa_struct_reorg::prune_escaped_types (void)
+ 
+   /* Prune types that escape, all references to those types
+      will have been removed in the above loops.  */
+-  /* The escape type is not deleted in STRUCT_REORDER_FIELDS,
+-     Then the type that contains the escaped type fields
+-     can find complete information.  */
++  /* The escape type is not deleted in current_layout_opt_level
++     after STRUCT_REORDER_FIELDS, then the type that contains
++     the escaped type fields can find complete information.  */
+   if (current_layout_opt_level < STRUCT_REORDER_FIELDS)
+     {
+       for (unsigned i = 0; i < types.length ();)
+@@ -5842,17 +5924,17 @@ ipa_struct_reorg::compress_ptr_to_offset (tree xhs, srtype *type,
+   tree step2 = gimplify_build2 (gsi, TRUNC_DIV_EXPR, long_unsigned_type_node,
+ 				step1, TYPE_SIZE_UNIT (type->newtype[0]));
+ 
+-  /* Emit gimple _X3 = _X2 + 1.  */
+-  tree step3 = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node,
+-				step2, build_one_cst (long_unsigned_type_node));
++  /* Emit _X3 = (compressed_size) _X2.  */
++  tree pc_type = make_unsigned_type (compressed_size);
++  tree step3 = gimplify_build1 (gsi, NOP_EXPR, pc_type, step2);
+ 
+-  /* Emit _X4 = (compressed_size) _X3.  */
+-  tree step4 = gimplify_build1 (gsi, NOP_EXPR,
+-				make_unsigned_type (compressed_size), step3);
++  /* Emit gimple _X4 = _X3 + 1.  */
++  tree step4 = gimplify_build2 (gsi, PLUS_EXPR, pc_type, step3,
++				build_one_cst (pc_type));
+ 
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+-      print_generic_expr (dump_file, step3);
++      print_generic_expr (dump_file, step4);
+       fprintf (dump_file, "\n");
+     }
+   return step4;
+@@ -5894,7 +5976,7 @@ ipa_struct_reorg::decompress_offset_to_ptr (tree xhs, srtype *type,
+ 
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+-      print_generic_expr (dump_file, step3);
++      print_generic_expr (dump_file, step4);
+       fprintf (dump_file, "\n");
+     }
+   return step4;
+@@ -5967,7 +6049,10 @@ ipa_struct_reorg::pc_type_conversion_candidate_p (tree xhs)
+ 
+   if (TREE_CODE (xhs) == COMPONENT_REF)
+     {
+-      srtype *base_type = find_type (TREE_TYPE (TREE_OPERAND (xhs, 0)));
++      tree mem = TREE_OPERAND (xhs, 0);
++      if (TREE_CODE (mem) != MEM_REF)
++	return false;
++      srtype *base_type = find_type (TREE_TYPE (mem));
+       if (base_type != NULL && base_type->has_escaped ())
+ 	return pc_candidate_st_type_p (TREE_TYPE (xhs));
+ 
+@@ -6057,6 +6142,49 @@ ipa_struct_reorg::pc_direct_rewrite_chance_p (tree rhs, tree &new_rhs)
+   return false;
+ }
+ 
++/* The following cases can simplify the checking of null pointer:
++     1. rhs defined from POINTER_PLUS_EXPR.
++     2. rhs used as COMPONENT_REF in this basic block.  */
++
++bool
++ipa_struct_reorg::pc_simplify_chance_for_compress_p (gassign *stmt,
++						     tree rhs)
++{
++  imm_use_iterator imm_iter;
++  use_operand_p use_p;
++  gimple *def_stmt = SSA_NAME_DEF_STMT (rhs);
++
++  if (def_stmt && is_gimple_assign (def_stmt)
++      && gimple_assign_rhs_code (def_stmt) == POINTER_PLUS_EXPR)
++    return true;
++
++  FOR_EACH_IMM_USE_FAST (use_p, imm_iter, rhs)
++    {
++      gimple *use_stmt = USE_STMT (use_p);
++      if (use_stmt->bb != stmt->bb || !is_gimple_assign (use_stmt))
++	continue;
++
++      tree use_rhs = gimple_assign_rhs1 (use_stmt);
++      if (TREE_CODE (use_rhs) == COMPONENT_REF
++	  && TREE_OPERAND (TREE_OPERAND (use_rhs, 0), 0) == rhs)
++	return true;
++    }
++  return false;
++}
++
++/* Perform compression directly without checking null pointer.  */
++
++bool
++ipa_struct_reorg::compress_candidate_without_check (gimple_stmt_iterator *gsi,
++						    tree rhs,
++						    tree &new_rhs)
++{
++  srtype *type = get_compression_candidate_type (rhs);
++  gcc_assert (type != NULL);
++  new_rhs = compress_ptr_to_offset (new_rhs, type, gsi);
++  return true;
++}
++
+ /* Perform pointer compression with check.  The conversion will be as shown in
+    the following example:
+      Orig bb:
+@@ -6157,6 +6285,9 @@ ipa_struct_reorg::compress_candidate (gassign *stmt, gimple_stmt_iterator *gsi,
+ {
+   if (pc_direct_rewrite_chance_p (rhs, new_rhs))
+     return true;
++  else if (current_layout_opt_level & POINTER_COMPRESSION_UNSAFE
++	   && pc_simplify_chance_for_compress_p (stmt, rhs))
++    return compress_candidate_without_check (gsi, rhs, new_rhs);
+ 
+   return compress_candidate_with_check (gsi, rhs, new_rhs);
+ }
+@@ -6219,6 +6350,80 @@ ipa_struct_reorg::create_bb_for_decompress_candidate (basic_block last_bb,
+   return new_bb;
+ }
+ 
++/* Try decompress candidate without check.  */
++
++bool
++ipa_struct_reorg::decompress_candidate_without_check (gimple_stmt_iterator *gsi,
++						      tree lhs, tree rhs,
++						      tree &new_lhs,
++						      tree &new_rhs)
++{
++  imm_use_iterator imm_iter;
++  use_operand_p use_p;
++  bool processed = false;
++
++  if (!gsi_one_before_end_p (*gsi))
++    {
++      gsi_next (gsi);
++      gimple *next_stmt = gsi_stmt (*gsi);
++      if (gimple_code (next_stmt) == GIMPLE_ASSIGN
++	  && gimple_assign_rhs_class (next_stmt) == GIMPLE_SINGLE_RHS)
++	{
++	  tree next_rhs = gimple_assign_rhs1 (next_stmt);
++	  /* If current lhs is used as rhs in the next stmt:
++	     -> _1 = t->s
++		tt->s = _1.  */
++	  if (lhs == next_rhs)
++	    {
++	      /* Check whether:
++	       1. the lhs is only used in the next stmt.
++	       2. the next lhs is candidate type.  */
++	      if (has_single_use (lhs)
++		  && pc_candidate_tree_p (gimple_assign_lhs (next_stmt)))
++		{
++		  processed = true;
++		  /* Copy directly without conversion after update type.  */
++		  TREE_TYPE (new_lhs)
++		    = make_unsigned_type (compressed_size);
++		}
++	    }
++	  /* -> _1 = t->s
++	        _2 = _1->s
++	     In this case, _1 might not be nullptr, so decompress it without
++	     check.  */
++	  else if (TREE_CODE (next_rhs) == COMPONENT_REF)
++	    {
++	      tree use_base = TREE_OPERAND (TREE_OPERAND (next_rhs, 0), 0);
++	      if (use_base == lhs)
++		{
++		  srtype *type = get_compression_candidate_type (rhs);
++		  gcc_assert (type != NULL);
++		  gsi_prev (gsi);
++		  tree new_ref = NULL_TREE;
++		  if (TREE_CODE (new_rhs) == MEM_REF)
++		    new_ref = new_rhs;
++		  else
++		    {
++		      tree base = TREE_OPERAND (TREE_OPERAND (new_rhs, 0), 0);
++		      tree new_mem_ref = build_simple_mem_ref (base);
++		      new_ref = build3 (COMPONENT_REF,
++					TREE_TYPE (new_rhs),
++					new_mem_ref,
++					TREE_OPERAND (new_rhs, 1),
++					NULL_TREE);
++		    }
++		  new_rhs = decompress_offset_to_ptr (new_ref, type, gsi);
++		  processed = true;
++		  gsi_next (gsi);
++		}
++	    }
++	}
++      gsi_prev (gsi);
++      return processed;
++    }
++  return false;
++}
++
+ /* Perform pointer decompression with check.  The conversion will be as shown
+    in the following example:
+      Orig bb:
+@@ -6320,7 +6525,10 @@ ipa_struct_reorg::decompress_candidate (gimple_stmt_iterator *gsi,
+ 					tree lhs, tree rhs, tree &new_lhs,
+ 					tree &new_rhs)
+ {
+-  // TODO: simplifiy check and rewrite will be pushed in next PR.
++  if (current_layout_opt_level & POINTER_COMPRESSION_UNSAFE
++      && decompress_candidate_without_check (gsi, lhs, rhs, new_lhs, new_rhs))
++    return true;
++
+   return decompress_candidate_with_check (gsi, rhs, new_rhs);
+ }
+ 
+@@ -6341,14 +6549,23 @@ ipa_struct_reorg::try_rewrite_with_pointer_compression (gassign *stmt,
+       if (pc_type_conversion_candidate_p (lhs))
+ 	{
+ 	  /* Transfer MEM[(struct *)_1].files = _4;
+-	     to MEM[(struct *)_1].files = (struct *)_4; */
+-	  new_rhs = fold_convert (TREE_TYPE (lhs), tmp_rhs);
++	     to _tmp = (struct *)_4;
++		MEM[(struct *)_1].files = _tmp; */
++	  tree tmp_reg = create_tmp_reg (TREE_TYPE (lhs));
++	  tree tmp_rhs_cvt = fold_convert (TREE_TYPE (lhs), tmp_rhs);
++	  gimple *copy_stmt = gimple_build_assign (tmp_reg, tmp_rhs_cvt);
++	  gsi_insert_before (gsi, copy_stmt, GSI_SAME_STMT);
++	  new_rhs = tmp_reg;
+ 	}
+       else if (pc_type_conversion_candidate_p (rhs))
+ 	{
+ 	  /* Transfer _4 = MEM[(struct *)_1].nodes;
+-	     to _4  = (new_struct *) MEM[(struct *)_1].nodes; */
+-	  new_rhs = fold_convert (TREE_TYPE (new_lhs), tmp_rhs);
++	     to _tmp = MEM[(struct *)_1].nodes;
++		_4  = (new_struct *) _tmp; */
++	  tree tmp_reg = create_tmp_reg (TREE_TYPE (new_lhs));
++	  gimple *copy_stmt = gimple_build_assign (tmp_reg, tmp_rhs);
++	  gsi_insert_before (gsi, copy_stmt, GSI_SAME_STMT);
++	  new_rhs = fold_convert (TREE_TYPE (new_lhs), tmp_reg);
+ 	}
+     }
+   else if (l && r)
+@@ -6544,7 +6761,7 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 
+       if (dump_file && (dump_flags & TDF_DETAILS))
+ 	{
+-	  fprintf (dump_file, "\nrewriting stamtenet:\n");
++	  fprintf (dump_file, "\nrewriting statement:\n");
+ 	  print_gimple_stmt (dump_file, stmt, 0);
+ 	}
+       tree newlhs[max_split];
+@@ -6809,7 +7026,8 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi)
+    old statement is to be removed.  */
+ 
+ bool
+-ipa_struct_reorg::rewrite_cond (gcond *stmt, gimple_stmt_iterator *gsi)
++ipa_struct_reorg::rewrite_cond (gcond *stmt,
++				gimple_stmt_iterator *gsi ATTRIBUTE_UNUSED)
+ {
+   tree_code rhs_code = gimple_cond_code (stmt);
+ 
+@@ -7039,8 +7257,11 @@ ipa_struct_reorg::rewrite_functions (void)
+ 	      if (dump_file && (dump_flags & TDF_DETAILS))
+ 		{
+ 		  fprintf (dump_file, "\nNo rewrite:\n");
+-		  dump_function_to_file (current_function_decl, dump_file,
+-			dump_flags | TDF_VOPS);
++		  if (current_function_decl)
++		    dump_function_to_file (current_function_decl, dump_file,
++					   dump_flags | TDF_VOPS);
++		  else
++		    fprintf (dump_file, " no declaration\n");
+ 		}
+ 	      pop_cfun ();
+ 	    }
+@@ -7073,11 +7294,13 @@ ipa_struct_reorg::rewrite_functions (void)
+ 	  push_cfun (DECL_STRUCT_FUNCTION (node->decl));
+ 	  if (dump_file && (dump_flags & TDF_DETAILS))
+ 	    {
+-	      fprintf (dump_file, "==== Before create decls: %dth_%s ====\n\n",
++	      fprintf (dump_file, "==== Before create decls: %dth %s ====\n\n",
+ 		       i, f->node->name ());
+ 	      if (current_function_decl)
+ 		dump_function_to_file (current_function_decl, dump_file,
+ 				       dump_flags | TDF_VOPS);
++	      else
++		fprintf (dump_file, " no declaration\n");
+ 	    }
+ 	  pop_cfun ();
+ 	}
+@@ -7109,10 +7332,13 @@ ipa_struct_reorg::rewrite_functions (void)
+ 
+       if (dump_file && (dump_flags & TDF_DETAILS))
+ 	{
+-	  fprintf (dump_file, "\nBefore rewrite: %dth_%s\n",
++	  fprintf (dump_file, "\nBefore rewrite: %dth %s\n",
+ 		   i, f->node->name ());
+-	  dump_function_to_file (current_function_decl, dump_file,
+-				 dump_flags | TDF_VOPS);
++	  if (current_function_decl)
++	    dump_function_to_file (current_function_decl, dump_file,
++				   dump_flags | TDF_VOPS);
++	  else
++	    fprintf (dump_file, " no declaration\n");
+ 	  fprintf (dump_file, "\n======== Start to rewrite: %dth_%s ========\n",
+ 		   i, f->node->name ());
+ 	}
+@@ -7187,10 +7413,13 @@ ipa_struct_reorg::rewrite_functions (void)
+ 
+       if (dump_file)
+ 	{
+-	  fprintf (dump_file, "\nAfter rewrite: %dth_%s\n",
++	  fprintf (dump_file, "\nAfter rewrite: %dth %s\n",
+ 		   i, f->node->name ());
+-	  dump_function_to_file (current_function_decl, dump_file,
+-				 dump_flags | TDF_VOPS);
++	  if (current_function_decl)
++	    dump_function_to_file (current_function_decl, dump_file,
++				   dump_flags | TDF_VOPS);
++	  else
++	    fprintf (dump_file, " no declaration\n");
+ 	}
+ 
+       pop_cfun ();
+@@ -7309,18 +7538,24 @@ ipa_struct_reorg::check_and_prune_struct_for_pointer_compression (void)
+ 				" skip compression.\n");
+ 	  continue;
+ 	}
+-      if ((current_layout_opt_level & POINTER_COMPRESSION_SAFE)
+-	  && !type->has_legal_alloc_num)
++      if (!type->has_legal_alloc_num)
+ 	{
+-	  if (dump_file)
+-	    fprintf (dump_file, " has illegal struct array size,"
+-				" skip compression.\n");
+-	  continue;
++	  if (current_layout_opt_level & POINTER_COMPRESSION_UNSAFE)
++	    if (dump_file)
++	      fprintf (dump_file, " has unknown alloc size, but"
++				  " in unsafe mode, so");
++	  else
++	    {
++	      if (dump_file)
++		fprintf (dump_file, " has illegal struct array size,"
++				    " skip compression.\n");
++	      continue;
++	    }
+ 	}
+       pc_transform_num++;
+       type->pc_candidate = true;
+       if (dump_file)
+-	fprintf (dump_file, " attemps to do pointer compression.\n");
++	fprintf (dump_file, " attempts to do pointer compression.\n");
+     }
+ 
+   if (dump_file)
+@@ -7342,14 +7577,10 @@ init_pointer_size_for_pointer_compression (void)
+   switch (param_pointer_compression_size)
+     {
+       case 8:
+-	compressed_size = 8; // sizeof (uint8)
+-	break;
++      // FALLTHRU
+       case 16:
+-	compressed_size = 16; // sizeof (uint16)
+-	break;
+-      case 32:
+-	compressed_size = 32; // sizeof (uint32)
+-	break;
++      // FALLTHRU
++      case 32: compressed_size = param_pointer_compression_size; break;
+       default:
+ 	error ("Invalid pointer compression size, using the following param: "
+ 	       "\"--param compressed-pointer-size=[8,16,32]\"");
+@@ -7426,6 +7657,8 @@ public:
+     unsigned int level = 0;
+     switch (struct_layout_optimize_level)
+       {
++	case 5: level |= POINTER_COMPRESSION_UNSAFE;
++	// FALLTHRU
+ 	case 4: level |= POINTER_COMPRESSION_SAFE;
+ 	// FALLTHRU
+ 	case 3: level |= DEAD_FIELD_ELIMINATION;
+diff --git a/gcc/symbol-summary.h b/gcc/symbol-summary.h
+index 3fe64047c..4f896f4e4 100644
+--- a/gcc/symbol-summary.h
++++ b/gcc/symbol-summary.h
+@@ -103,16 +103,15 @@ protected:
+   /* Allocates new data that are stored within map.  */
+   T* allocate_new ()
+   {
+-    /* In structure optimizatons, we call new to ensure that
+-       the allocated memory is initialized to 0.  */
+-    if (flag_ipa_struct_reorg)
+-      return is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T ()
+-		       : new T ();
+-
+     /* Call gcc_internal_because we do not want to call finalizer for
+        a type T.  We call dtor explicitly.  */
+-    return is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T ()
+-		     : m_allocator.allocate () ;
++    T* allocated = is_ggc () ? new (ggc_internal_alloc (sizeof (T))) T ()
++			     : m_allocator.allocate ();
++    /* In structure optimizatons, we call memset to ensure that
++       the allocated memory is initialized to 0.  */
++    if (flag_ipa_struct_reorg)
++      memset (allocated, 0, sizeof (T));
++    return allocated;
+   }
+ 
+   /* Release an item that is stored within map.  */
+@@ -121,12 +120,7 @@ protected:
+     if (is_ggc ())
+       ggc_delete (item);
+     else
+-      {
+-	if (flag_ipa_struct_reorg)
+-	  delete item;
+-	else
+-	  m_allocator.remove (item);
+-      }
++      m_allocator.remove (item);
+   }
+ 
+   /* Unregister all call-graph hooks.  */
+diff --git a/gcc/testsuite/gcc.dg/struct/csr_skip_void_struct_name.c b/gcc/testsuite/gcc.dg/struct/csr_skip_void_struct_name.c
+new file mode 100644
+index 000000000..c5e4968d9
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/csr_skip_void_struct_name.c
+@@ -0,0 +1,53 @@
++// Structures without names should not be optimized
++/* { dg-do compile } */
++#include 
++#include 
++
++typedef struct
++{
++  int a;
++  float b;
++  double s1;
++  double s2;
++  double s3;
++  double s4;
++  double s5;
++  double s6;
++  double s7;
++  double s8;
++} str_t1;
++
++#define N 1000
++
++int num;
++
++int
++main ()
++{
++  int i, r;
++
++  r = rand ();
++  num = r > N ? N : r;
++  str_t1 *p1 = calloc (num, sizeof (str_t1));
++
++  if (p1 == NULL)
++    return 0;
++
++  for (i = 0; i < num; i++)
++    p1[i].a = 1;
++
++  for (i = 0; i < num; i++)
++    p1[i].b = 2;
++
++  for (i = 0; i < num; i++)
++    if (p1[i].a != 1)
++      abort ();
++
++  for (i = 0; i < num; i++)
++    if (fabsf (p1[i].b - 2) > 0.0001)
++      abort ();
++
++  return 0;
++}
++
++/* { dg-final { scan-ipa-dump "No structures to transform in Complete Structure Relayout." "struct_reorg" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/struct/pc_cast_int.c b/gcc/testsuite/gcc.dg/struct/pc_cast_int.c
+new file mode 100644
+index 000000000..6f67fc556
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/pc_cast_int.c
+@@ -0,0 +1,91 @@
++// Escape cast int for pointer compression
++/* { dg-do compile } */
++
++#include 
++#include 
++
++typedef struct node node_t;
++typedef struct node *node_p;
++
++typedef struct arc arc_t;
++typedef struct arc *arc_p;
++
++typedef struct network
++{
++  arc_p arcs;
++  arc_p sorted_arcs;
++  int x;
++  node_p nodes;
++  node_p stop_nodes;
++} network_t;
++
++struct node
++{
++  int64_t potential;
++  int orientation;
++  node_p child;
++  node_p pred;
++  node_p sibling;
++  node_p sibling_prev;
++  arc_p basic_arc;
++  arc_p firstout;
++  arc_p firstin;
++  arc_p arc_tmp;
++  int64_t flow;
++  int64_t depth;
++  int number;
++  int time;
++};
++
++struct arc
++{
++  int id;
++  int64_t cost;
++  node_p tail;
++  node_p head;
++  short ident;
++  arc_p nextout;
++  arc_p nextin;
++  int64_t flow;
++  int64_t org_cost;
++  network_t* net_add;
++};
++
++
++const int MAX = 100;
++network_t* net;
++node_p node;
++
++int
++main ()
++{
++  net = (network_t*) calloc (1, sizeof(network_t));
++  net->arcs = (arc_p) calloc (MAX, sizeof (arc_t));
++  net->sorted_arcs = (arc_p) calloc (MAX, sizeof (arc_t));
++  net->nodes = (node_p) calloc (MAX, sizeof (node_t));
++  net->arcs->id = 100;
++
++  node = net->nodes;
++  node_p n1 = (node_p) 0x123456;
++
++  for (unsigned i = 0; i < MAX; i++)
++    {
++      node->pred = n1;
++      node = node + 1;
++    }
++
++  node = net->nodes;
++
++  for (unsigned i = 0; i < MAX; i++)
++    {
++      if (node->pred != n1)
++	{
++	  abort ();
++	}
++      node = node + 1;
++    }
++
++  return 0;
++}
++
++/* { dg-final { scan-ipa-dump "No structures to transform in pointer compression" "struct_reorg" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/struct/pc_compress_and_decomress.c b/gcc/testsuite/gcc.dg/struct/pc_compress_and_decomress.c
+new file mode 100644
+index 000000000..d0b8d1afa
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/pc_compress_and_decomress.c
+@@ -0,0 +1,90 @@
++// Support basic pointer compression and decompression
++/* { dg-do compile } */
++
++#include 
++#include 
++
++typedef struct node node_t;
++typedef struct node *node_p;
++
++typedef struct arc arc_t;
++typedef struct arc *arc_p;
++
++typedef struct network
++{
++  arc_p arcs;
++  arc_p sorted_arcs;
++  int x;
++  node_p nodes;
++  node_p stop_nodes;
++} network_t;
++
++struct node
++{
++  int64_t potential;
++  int orientation;
++  node_p child;
++  node_p pred;
++  node_p sibling;
++  node_p sibling_prev;
++  arc_p basic_arc;
++  arc_p firstout;
++  arc_p firstin;
++  arc_p arc_tmp;
++  int64_t flow;
++  int64_t depth;
++  int number;
++  int time;
++};
++
++struct arc
++{
++  int id;
++  int64_t cost;
++  node_p tail;
++  node_p head;
++  short ident;
++  arc_p nextout;
++  arc_p nextin;
++  int64_t flow;
++  int64_t org_cost;
++  network_t* net_add;
++};
++
++
++const int MAX = 100;
++network_t* net;
++node_p node;
++
++int
++main ()
++{
++  net = (network_t*) calloc (1, sizeof(network_t));
++  net->arcs = (arc_p) calloc (MAX, sizeof (arc_t));
++  net->sorted_arcs = (arc_p) calloc (MAX, sizeof (arc_t));
++  net->nodes = (node_p) calloc (MAX, sizeof (node_t));
++  net->arcs->id = 100;
++
++  node = net->nodes;
++
++  for (unsigned i = 0; i < MAX; i++)
++    {
++      node->pred = node;
++      node = node + 1;
++    }
++
++  node = net->nodes;
++
++  for (unsigned i = 0; i < MAX; i++)
++    {
++      if (node->pred != node)
++	{
++	  abort ();
++	}
++      node = node + 1;
++    }
++
++  return 0;
++}
++
++/* { dg-final { scan-ipa-dump "Number of structures to transform in pointer compression is 1" "struct_reorg" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/struct/pc_ptr2void.c b/gcc/testsuite/gcc.dg/struct/pc_ptr2void.c
+new file mode 100644
+index 000000000..5022c1967
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/pc_ptr2void.c
+@@ -0,0 +1,87 @@
++// Partially support escape_cast_void for pointer compression.
++/* { dg-do compile } */
++
++#include 
++#include 
++
++typedef struct node node_t;
++typedef struct node *node_p;
++
++typedef struct arc arc_t;
++typedef struct arc *arc_p;
++
++typedef struct network
++{    
++  arc_p arcs, sorted_arcs;
++  int x;
++  node_p nodes, stop_nodes;
++} network_t;
++
++struct node
++{
++  int64_t potential;
++  int orientation;
++  node_p child;
++  node_p pred;
++  node_p sibling;
++  node_p sibling_prev;
++  arc_p basic_arc;
++  arc_p firstout;
++  arc_p firstin;
++  arc_p arc_tmp;
++  int64_t flow;
++  int64_t depth;
++  int number;
++  int time;
++};
++
++struct arc
++{
++  int id;
++  int64_t cost;
++  node_p tail;
++  node_p head;
++  short ident;
++  arc_p nextout;
++  arc_p nextin;
++  int64_t flow;
++  int64_t org_cost;
++};
++
++const int MAX = 100;
++network_t* net = NULL;
++int cnt = 0;
++
++__attribute__((noinline)) int
++primal_feasible (network_t *net)
++{
++  void* stop;
++  node_t *node;
++
++  node = net->nodes;
++  stop = (void *)net->stop_nodes;
++  for( node++; node < (node_t *)stop; node++ )
++    {
++      net->x = 1;
++      printf( "PRIMAL NETWORK SIMPLEX: ");
++    }
++  return 0;
++}
++
++int
++main ()
++{
++  net = (network_t*) calloc (1, 20);
++  net->nodes = calloc (MAX, sizeof (node_t));
++  net->stop_nodes = net->nodes + MAX - 1;
++  cnt = primal_feasible( net );
++
++  net = (network_t*) calloc (1, 20);
++  if( !(net->arcs) )
++    {
++      return -1;
++    }
++  return cnt;
++}
++
++/* { dg-final { scan-ipa-dump "Number of structures to transform in pointer compression is 1" "struct_reorg" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/struct/pc_simple_rewrite_pc.c b/gcc/testsuite/gcc.dg/struct/pc_simple_rewrite_pc.c
+new file mode 100644
+index 000000000..98943c9b8
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/pc_simple_rewrite_pc.c
+@@ -0,0 +1,112 @@
++// Check simplify rewrite chance for pointer compression and decompression
++/* { dg-do compile } */
++
++#include 
++#include 
++
++typedef struct node node_t;
++typedef struct node *node_p;
++
++typedef struct arc arc_t;
++typedef struct arc *arc_p;
++
++typedef struct network
++{
++  arc_p arcs;
++  arc_p sorted_arcs;
++  int x;
++  node_p nodes;
++  node_p stop_nodes;
++} network_t;
++
++struct node
++{
++  int64_t potential;
++  int orientation;
++  node_p child;
++  node_p pred;
++  node_p sibling;
++  node_p sibling_prev;
++  arc_p basic_arc;
++  arc_p firstout;
++  arc_p firstin;
++  arc_p arc_tmp;
++  int64_t flow;
++  int64_t depth;
++  int number;
++  int time;
++};
++
++struct arc
++{
++  int id;
++  int64_t cost;
++  node_p tail;
++  node_p head;
++  short ident;
++  arc_p nextout;
++  arc_p nextin;
++  int64_t flow;
++  int64_t org_cost;
++  network_t* net_add;
++};
++
++
++const int MAX = 100;
++network_t* net;
++node_p node;
++arc_p arc;
++
++int
++main ()
++{
++  net = (network_t*) calloc (1, sizeof(network_t));
++  net->arcs = (arc_p) calloc (MAX, sizeof (arc_t));
++  net->sorted_arcs = (arc_p) calloc (MAX, sizeof (arc_t));
++  net->nodes = (node_p) calloc (MAX, sizeof (node_t));
++  net->arcs->id = 100;
++
++  node = net->nodes;
++  arc = net->arcs;
++
++  for (unsigned i = 0; i < MAX; i++)
++    {
++      arc->head = node;
++      arc->head->child = node;
++      node->potential = i + 1;
++      arc->cost = arc->head->potential;
++      arc->tail = node->sibling;
++      if (i % 2)
++	node->pred = net->nodes + i;
++      else
++	node->pred = NULL;
++
++      if (node->pred && node->pred->child != NULL)
++	node->number = 0;
++      else
++	node->number = 1;
++
++      node = node + 1;
++      arc = arc + 1;
++    }
++
++  node = net->nodes;
++  arc = net->arcs;
++
++  for (unsigned i = 0; i < MAX; i++)
++    {
++      node_p t = i % 2 ? node : NULL;
++      int tt = i % 2 ? 0 : 1;
++      if (arc->head->pred != t || arc->cost == 0
++	  || arc->tail != node->sibling || node->number != tt)
++	{
++	  abort ();
++	}
++      arc = arc + 1;
++      node = node + 1;
++    }
++
++  return 0;
++}
++
++/* { dg-final { scan-ipa-dump "Number of structures to transform in pointer compression is 1" "struct_reorg" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/struct/pc_skip_void_struct_name.c b/gcc/testsuite/gcc.dg/struct/pc_skip_void_struct_name.c
+new file mode 100644
+index 000000000..a0e191267
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/pc_skip_void_struct_name.c
+@@ -0,0 +1,53 @@
++// Structures without names should not be optimized
++/* { dg-do compile } */
++#include 
++#include 
++
++typedef struct
++{
++  int a;
++  float b;
++  double s1;
++  double s2;
++  double s3;
++  double s4;
++  double s5;
++  double s6;
++  double s7;
++  double s8;
++} str_t1;
++
++#define N 1000
++
++int num;
++
++int
++main ()
++{
++  int i, r;
++
++  r = rand ();
++  num = r > N ? N : r;
++  str_t1 *p1 = calloc (num, sizeof (str_t1));
++
++  if (p1 == NULL)
++    return 0;
++
++  for (i = 0; i < num; i++)
++    p1[i].a = 1;
++
++  for (i = 0; i < num; i++)
++    p1[i].b = 2;
++
++  for (i = 0; i < num; i++)
++    if (p1[i].a != 1)
++      abort ();
++
++  for (i = 0; i < num; i++)
++    if (fabsf (p1[i].b - 2) > 0.0001)
++      abort ();
++
++  return 0;
++}
++
++/* { dg-final { scan-ipa-dump "No structures to transform in pointer compression" "struct_reorg" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+index 278c4e4f5..c40474407 100644
+--- a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+@@ -47,6 +47,14 @@ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/rf_*.c]] \
+ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/dfe*.c]] \
+ 	"" "-fipa-struct-reorg=3 -fdump-ipa-all -flto-partition=one -fwhole-program"
+ 
++# -fipa-struct-reorg=4
++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/pc*.c]] \
++	"" "-fipa-struct-reorg=4 -fdump-ipa-all -flto-partition=one -fwhole-program"
++
++# -fipa-struct-reorg=5
++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/pc*.c]] \
++	"" "-fipa-struct-reorg=5 -fdump-ipa-all -flto-partition=one -fwhole-program"
++
+ # All done.
+ torture-finish
+ dg-finish
+-- 
+2.33.0
+
diff --git a/0031-AutoBOLT-Support-saving-feedback-count-info-to-ELF-s.patch b/0031-AutoBOLT-Support-saving-feedback-count-info-to-ELF-s.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5e16f31f15bc7f83a7ad4f9df489f95215a5c676
--- /dev/null
+++ b/0031-AutoBOLT-Support-saving-feedback-count-info-to-ELF-s.patch
@@ -0,0 +1,550 @@
+From 72531376df5ed93c2d945469368ba5514eca8407 Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao_admin 
+Date: Tue, 5 Dec 2023 15:33:08 +0800
+Subject: [PATCH] [AutoBOLT] Support saving feedback count info to ELF segment
+ 1/3
+
+---
+ gcc/common.opt |   8 +
+ gcc/final.cc   | 405 ++++++++++++++++++++++++++++++++++++++++++++++++-
+ gcc/opts.cc    |  61 ++++++++
+ 3 files changed, 473 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index b01df919e..e69947fc2 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -2546,6 +2546,14 @@ freorder-functions
+ Common Var(flag_reorder_functions) Optimization
+ Reorder functions to improve code placement.
+ 
++fauto-bolt
++Common Var(flag_auto_bolt)
++Generate profile from AutoFDO or PGO and do BOLT optimization after linkage.
++
++fauto-bolt=
++Common Joined RejectNegative
++Specify the feedback data directory required by BOLT-plugin.  The default is the current directory.
++
+ frerun-cse-after-loop
+ Common Var(flag_rerun_cse_after_loop) Optimization
+ Add a common subexpression elimination pass after loop optimizations.
+diff --git a/gcc/final.cc b/gcc/final.cc
+index a9868861b..d4c4fa08f 100644
+--- a/gcc/final.cc
++++ b/gcc/final.cc
+@@ -81,6 +81,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "rtl-iter.h"
+ #include "print-rtl.h"
+ #include "function-abi.h"
++#include "insn-codes.h"
+ #include "common/common-target.h"
+ 
+ #ifdef XCOFF_DEBUGGING_INFO
+@@ -4266,7 +4267,403 @@ leaf_renumber_regs_insn (rtx in_rtx)
+       }
+ }
+ #endif
+-
++
++#define ASM_FDO_SECTION_PREFIX ".text.fdo."
++
++#define ASM_FDO_CALLER_FLAG ".fdo.caller "
++#define ASM_FDO_CALLER_SIZE_FLAG ".fdo.caller.size "
++#define ASM_FDO_CALLER_BIND_FLAG ".fdo.caller.bind"
++
++#define ASM_FDO_CALLEE_FLAG ".fdo.callee"
++
++/* Return the relative offset address of the start instruction of BB,
++   return -1 if it is empty instruction.    */
++
++static int 
++get_bb_start_addr (basic_block bb)
++{
++  rtx_insn *insn;
++  FOR_BB_INSNS (bb, insn)
++    {
++      if (!INSN_P (insn))
++	{
++	  continue;
++	}
++      /* The jump target of call is not in this function, so
++	 it should be excluded.    */
++      if (CALL_P (insn))
++        {
++	  return -1;
++	}
++
++      int insn_code = recog_memoized (insn);
++
++      /* The instruction NOP in llvm-bolt belongs to the previous
++	 BB, so it needs to be skipped.   */
++      if (insn_code != CODE_FOR_nop)
++        {
++	  return INSN_ADDRESSES (INSN_UID (insn));
++	}
++    }
++  return -1;
++}
++
++/* Return the relative offet address of the end instruction of BB,
++   return -1 if it is empty or call instruction.    */
++
++static int
++get_bb_end_addr (basic_block bb)
++{
++  rtx_insn *insn;
++  int num_succs = EDGE_COUNT (bb->succs);
++  FOR_BB_INSNS_REVERSE (bb, insn)
++    {
++      if (!INSN_P (insn))
++        {
++	  continue;
++	}
++      /* The jump target of call is not in this function, so
++	 it should be excluded.     */
++      if (CALL_P (insn))
++        {
++	  return -1;
++	}
++      if ((num_succs == 1)
++	   || ((num_succs == 2) && any_condjump_p (insn)))
++	{
++	  return INSN_ADDRESSES (INSN_UID (insn));
++	}
++      else
++        {
++	  return -1;
++	}
++    }
++  return -1;
++}
++
++/* Return the end address of cfun.    */
++
++static int 
++get_function_end_addr ()
++{
++  rtx_insn *insn = get_last_insn ();
++  for (; insn != get_insns (); insn = PREV_INSN (insn))
++    {
++      if (!INSN_P (insn))
++        {
++	  continue;
++	}
++      return INSN_ADDRESSES (INSN_UID (insn));
++    }
++	  
++  return -1;
++} 
++
++/* Return the function profile status string.    */
++
++static const char * 
++get_function_profile_status () 
++{
++  const char *profile_status[] = {
++    "PROFILE_ABSENT",
++    "PROFILE_GUESSED",
++    "PROFILE_READ",
++    "PROFILE_LAST"     /* Last value, used by profile streaming.    */
++  };
++
++  return profile_status[profile_status_for_fn (cfun)];
++}
++
++/* Return the count from the feedback data, such as PGO or ADDO.    */
++
++inline static gcov_type 
++get_fdo_count (profile_count count)
++{
++  return count.quality () >= GUESSED 
++         ? count.to_gcov_type () : 0;
++}
++
++/* Return the profile quality string.    */
++
++static const char *
++get_fdo_count_quality (profile_count count)
++{
++  const char *profile_quality[] = {
++    "UNINITIALIZED_PROFILE",
++    "GUESSED_LOCAL",
++    "GUESSED_GLOBAL0",
++    "GUESSED_GLOBAL0_ADJUSTED",
++    "GUESSED",
++    "AFDO",
++    "ADJUSTED",
++    "PRECISE"
++  };
++
++  return profile_quality[count.quality ()];
++}
++
++static const char *
++alias_local_functions (const char *fnname)
++{
++  if (TREE_PUBLIC (cfun->decl))
++    {
++      return fnname;
++    }
++  return concat (fnname, "/", lbasename (dump_base_name), NULL);
++}
++
++/* Return function bind type string.    */
++
++static const char * 
++simple_get_function_bind ()
++{
++  const char *function_bind[] = {
++    "GLOBAL",
++    "WEAK",
++    "LOCAL",
++    "UNKNOWN"
++  };
++
++  if (TREE_PUBLIC (cfun->decl))
++    {
++      if (!(DECL_WEAK (cfun->decl)))
++        {
++	  return function_bind[0];
++	}
++      else
++        {
++	  return function_bind[1];
++	}
++    }
++  else  
++    {
++      return function_bind[2];
++    }
++		
++  return function_bind[3];
++}
++
++/* Dumo the callee functions insn in bb by CALL_P (insn).   */
++
++static void 
++dump_direct_callee_info_to_asm (basic_block bb, gcov_type call_count)
++{
++  rtx_insn *insn;
++  FOR_BB_INSNS (bb, insn)
++    {
++      if (insn && CALL_P (insn))
++        {
++	  tree callee = get_call_fndecl (insn);
++
++	  if (callee)
++	    {
++	      fprintf (asm_out_file, "\t.string \"%x\"\n",
++		       INSN_ADDRESSES (INSN_UID (insn)));
++
++	      fprintf (asm_out_file, "\t.string \"%s%s\"\n",
++		       ASM_FDO_CALLEE_FLAG,
++                       alias_local_functions (get_fnname_from_decl (callee)));
++
++              fprintf (asm_out_file,
++                       "\t.string \"" HOST_WIDE_INT_PRINT_DEC "\"\n",
++                       call_count);
++
++              if (dump_file)
++                {
++                  fprintf (dump_file, "call: %x --> %s \n",
++                           INSN_ADDRESSES (INSN_UID (insn)),
++                           alias_local_functions
++                           (get_fnname_from_decl (callee)));
++                }
++            }
++        }
++     } 
++}
++
++/* Dump the edge info into asm.    */
++static int
++dump_edge_jump_info_to_asm (basic_block bb, gcov_type bb_count)
++{
++  edge e;
++  edge_iterator ei;
++  gcov_type edge_total_count = 0;
++
++  FOR_EACH_EDGE (e, ei, bb->succs)
++    {
++      gcov_type edge_count = get_fdo_count (e->count ());
++      edge_total_count += edge_count;
++
++      int edge_start_addr = get_bb_end_addr (e->src);
++      int edge_end_addr = get_bb_start_addr(e->dest);
++
++      if (edge_start_addr == -1 || edge_end_addr == -1)
++        {
++          continue;
++        }
++      
++      /* This is a reserved assert for the original design.    If this
++         assert is found, use the address of the previous instruction
++         as edge_start_addr.   */
++      gcc_assert (edge_start_addr != edge_end_addr);
++
++      if (dump_file)
++        {
++          fprintf (dump_file, "edge: %x --> %x = (%ld)\n",
++                   edge_start_addr, edge_end_addr, edge_count);
++        }
++
++      if (edge_count > 0)
++        {
++          fprintf(asm_out_file, "\t.string \"%x\"\n", edge_start_addr);
++          fprintf(asm_out_file, "\t.string \"%x\"\n", edge_end_addr);
++          fprintf(asm_out_file, "\t.string \"" HOST_WIDE_INT_PRINT_DEC "\"\n",
++                  edge_count);
++        }
++    }
++
++    gcov_type call_count = MAX (edge_total_count, bb_count);
++    if (call_count > 0)
++      {
++        dump_direct_callee_info_to_asm (bb, call_count);
++      }
++}
++
++/* Dump the bb info into asm.    */
++
++static void 
++dump_bb_info_to_asm (basic_block bb, gcov_type bb_count)
++{
++  int bb_start_addr = get_bb_start_addr (bb);
++  if (bb_start_addr != -1)
++    {
++      fprintf (asm_out_file, "\t.string \"%x\"\n", bb_start_addr);
++      fprintf (asm_out_file, "\t.string \"" HOST_WIDE_INT_PRINT_DEC "\"\n",
++               bb_count);
++    }
++}
++
++/* Dump the function info into asm.    */
++
++static void 
++dump_function_info_to_asm (const char *fnname)
++{
++  fprintf (asm_out_file, "\t.string \"%s%s\"\n",
++           ASM_FDO_CALLER_FLAG, alias_local_functions (fnname));
++  fprintf (asm_out_file, "\t.string \"%s%d\"\n",
++           ASM_FDO_CALLER_SIZE_FLAG, get_function_end_addr ());
++  fprintf (asm_out_file, "\t.string \"%s%s\"\n",
++           ASM_FDO_CALLER_BIND_FLAG, simple_get_function_bind ());
++
++  if (dump_file)
++    {
++      fprintf (dump_file, "\n FUNC_NAME: %s\n",
++               alias_local_functions (fnname));
++      fprintf (dump_file, " file: %s\n",
++               dump_base_name);
++      fprintf (dump_file, "profile_status: %s\n",
++               get_function_profile_status ());
++      fprintf (dump_file, " size: %x\n",
++               get_function_end_addr ());
++      fprintf (dump_file, " function_bind: %s\n",
++               simple_get_function_bind ());
++    }
++}
++
++/* Dump function profile into form AutoFDO or PGO to asm.    */
++
++static void
++dump_fdo_info_to_asm (const char *fnname)
++{
++  basic_block bb;
++
++  dump_function_info_to_asm (fnname);
++
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      gcov_type bb_count = get_fdo_count (bb->count);
++      if (bb_count == 0)
++        {
++          continue;
++        }
++      
++      if (dump_file)
++        {
++          fprintf (dump_file, "BB: %x --> %x = (%ld) [%s]\n",
++                   get_bb_start_addr (bb), get_bb_end_addr (bb),
++                   bb_count, get_fdo_count_quality (bb->count));
++        }
++
++      if (flag_profile_use) 
++        {
++          dump_edge_jump_info_to_asm (bb, bb_count);
++        }
++      else if (flag_auto_profile)
++        {
++          dump_bb_info_to_asm (bb, bb_count);
++        }
++    }
++}
++
++/* When -fauto-bolt option is turnded on, the .text.fdo section 
++   will be generated in the *.s file if there is feedback information
++   from PGO or AutoFDO. This section will parserd in BOLT-plugin.    */
++
++static void 
++dump_profile_to_elf_sections ()
++{
++  if (!flag_function_sections)
++    {
++      error ("-fauto-bolt should work with -ffunction-section");
++      return;
++    }
++  if (!flag_ipa_ra)
++    {
++      error ("-fauto-bolt should work with -fipa-ra");
++      return;
++    }
++  if (flag_align_jumps)
++    {
++      error ("-fauto-bolt is not supported with -falign-jumps");
++      return;
++    }
++  if (flag_align_labels)
++    {
++      error ("-fauto-bolt is not spported with -falign-loops");
++      return;
++    }
++  if (flag_align_loops)
++    {
++      error ("-fauto-bolt is not supported with -falign-loops");
++      return;
++    }
++  
++  /* Return if no feedback data.    */
++  if (!flag_profile_use && !flag_auto_profile)
++    {
++      error ("-fauto-bolt should use with -profile-use or -fauto-profile");
++      return;
++    }
++  
++  /* Avoid empty functions.    */
++  if (TREE_CODE (cfun->decl) != FUNCTION_DECL)
++    {
++      return;
++    }
++  int flags = SECTION_DEBUG | SECTION_EXCLUDE;
++  const char *fnname = get_fnname_from_decl (current_function_decl);
++  char *profile_fnname = NULL;
++
++  asprintf (&profile_fnname, "%s%s", ASM_FDO_SECTION_PREFIX, fnname);
++  switch_to_section (get_section (profile_fnname, flags, NULL));
++  dump_fdo_info_to_asm (fnname);
++
++  if (profile_fnname)
++    {
++      free (profile_fnname);
++      profile_fnname = NULL;
++    }
++}
++
+ /* Turn the RTL into assembly.  */
+ static unsigned int
+ rest_of_handle_final (void)
+@@ -4334,6 +4731,12 @@ rest_of_handle_final (void)
+     targetm.asm_out.destructor (XEXP (DECL_RTL (current_function_decl), 0),
+ 				decl_fini_priority_lookup
+ 				  (current_function_decl));
++    
++    if (flag_auto_bolt)
++      {
++        dump_profile_to_elf_sections ();
++      }
++
+   return 0;
+ }
+ 
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index b868d189e..6d57e7d69 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -1279,6 +1279,10 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
+   if (opts->x_flag_vtable_verify && opts->x_flag_lto)
+     sorry ("vtable verification is not supported with LTO");
+ 
++  /* Currently -fauto-bolt is not supported for LTO.    */
++  if (opts->x_flag_auto_bolt && opts->x_flag_lto)
++    sorry ("%<-fauto-bolt%> is not supported with LTO");
++
+   /* Control IPA optimizations based on different -flive-patching level.  */
+   if (opts->x_flag_live_patching)
+     control_options_for_live_patching (opts, opts_set,
+@@ -1291,6 +1295,58 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
+       = (opts->x_flag_unroll_loops
+          || opts->x_flag_peel_loops
+          || opts->x_optimize >= 3);
++  
++  if (opts->x_flag_auto_bolt)
++    {
++      /* Record the function section to facilitate the feedback
++	 data storage.  */
++      if (!opts->x_flag_function_sections)
++        {
++	  inform (loc,
++	          "%<-fauto-bolt%> should work with %<-ffunction-sections%>,"
++		  " enabling %<-ffunction-sections%>");
++	  opts->x_flag_function_sections = true;
++	}
++
++      /* Cancel the internal alignment of the function.  The binary
++	 optimizer bolt will cancel the internal alignment optimization
++	 of the function, so the alignment is meaningless at this time,
++	 and if not, it will bring trouble to the calculation of the
++	 offset address of the instruction.  */
++      if (opts->x_flag_align_jumps)
++        {
++	  inform (loc,
++		  "%<-fauto-bolt%> should not work with %<-falign-jumps%>,"
++		  " disabling %<-falign-jumps%>");
++	  opts->x_flag_align_jumps = false;
++	}
++
++      if (opts->x_flag_align_labels)
++        {
++	  inform (loc,
++		  "%<-fauto-bolt%> should not work with %<-falign-labels%>,"
++		  " disabling %<-falign-labels%>");
++	          opts->x_flag_align_labels = false;
++	}
++
++      if (opts->x_flag_align_loops)
++        {
++	  inform (loc,
++		  "%<-fauto-bolt%> should not work with %<-falign-loops%>,"
++		  " disabling %<-falign-loops%>");
++	  opts->x_flag_align_loops = false;
++	}
++
++      /* When parsing instructions in RTL phase, we need to know
++	 the call information of instructions to avoid being optimized.  */
++      if (!opts->x_flag_ipa_ra)
++        {
++	  inform (loc,
++		  "%<-fauto-bolt%> should work with %<-fipa-ra%>,"
++		  " enabling %<-fipa-ra%>");
++	  opts->x_flag_ipa_ra = true;
++	}
++    }
+ 
+   /* With -fcx-limited-range, we do cheap and quick complex arithmetic.  */
+   if (opts->x_flag_cx_limited_range)
+@@ -3226,6 +3282,11 @@ common_handle_option (struct gcc_options *opts,
+ 				&opts->x_flag_align_functions,
+ 				&opts->x_str_align_functions);
+       break;
++    
++    case OPT_fauto_bolt_:
++    case OPT_fauto_bolt:
++      /* Deferred.  */  
++      break;  
+ 
+     case OPT_ftabstop_:
+       /* It is documented that we silently ignore silly values.  */
+-- 
+2.33.0
+
diff --git a/0031-LoongArch-Handle-vectorized-copysign-x-1-expansion-e.patch b/0031-LoongArch-Handle-vectorized-copysign-x-1-expansion-e.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d5e2a3bf646bfc08a0af52671e0875b52f1e61d1
--- /dev/null
+++ b/0031-LoongArch-Handle-vectorized-copysign-x-1-expansion-e.patch
@@ -0,0 +1,197 @@
+From 61daf071708947ef8431ac36bc6c6b47339fdd2a Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 14 Nov 2023 00:17:19 +0800
+Subject: [PATCH 031/188] LoongArch: Handle vectorized copysign (x, -1)
+ expansion efficiently
+
+With LSX or LASX, copysign (x[i], -1) (or any negative constant) can be
+vectorized using [x]vbitseti.{w/d} instructions to directly set the
+signbits.
+
+Inspired by Tamar Christina's "AArch64: Handle copysign (x, -1) expansion
+efficiently" (r14-5289).
+
+gcc/ChangeLog:
+
+	* config/loongarch/lsx.md (copysign3): Allow operand[2] to
+	be an reg_or_vector_same_val_operand.  If it's a const vector
+	with same negative elements, expand the copysign with a bitset
+	instruction.  Otherwise, force it into an register.
+	* config/loongarch/lasx.md (copysign3): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.target/loongarch/vect-copysign-negconst.C: New test.
+	* g++.target/loongarch/vect-copysign-negconst-run.C: New test.
+---
+ gcc/config/loongarch/lasx.md                  | 22 ++++++++-
+ gcc/config/loongarch/lsx.md                   | 22 ++++++++-
+ .../loongarch/vect-copysign-negconst-run.C    | 47 +++++++++++++++++++
+ .../loongarch/vect-copysign-negconst.C        | 27 +++++++++++
+ 4 files changed, 116 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/g++.target/loongarch/vect-copysign-negconst-run.C
+ create mode 100644 gcc/testsuite/g++.target/loongarch/vect-copysign-negconst.C
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index f0f2dd08d..2e11f0612 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -3136,11 +3136,31 @@
+ 	  (match_operand:FLASX 1 "register_operand")))
+    (set (match_dup 5)
+ 	(and:FLASX (match_dup 3)
+-		   (match_operand:FLASX 2 "register_operand")))
++		   (match_operand:FLASX 2 "reg_or_vector_same_val_operand")))
+    (set (match_operand:FLASX 0 "register_operand")
+ 	(ior:FLASX (match_dup 4) (match_dup 5)))]
+   "ISA_HAS_LASX"
+ {
++  /* copysign (x, -1) should instead be expanded as setting the sign
++     bit.  */
++  if (!REG_P (operands[2]))
++    {
++      rtx op2_elt = unwrap_const_vec_duplicate (operands[2]);
++      if (GET_CODE (op2_elt) == CONST_DOUBLE
++	  && real_isneg (CONST_DOUBLE_REAL_VALUE (op2_elt)))
++	{
++	  rtx n = GEN_INT (8 * GET_MODE_SIZE (mode) - 1);
++	  operands[0] = lowpart_subreg (mode, operands[0],
++					mode);
++	  operands[1] = lowpart_subreg (mode, operands[1],
++					mode);
++	  emit_insn (gen_lasx_xvbitseti_ (operands[0],
++						   operands[1], n));
++	  DONE;
++	}
++    }
++
++  operands[2] = force_reg (mode, operands[2]);
+   operands[3] = loongarch_build_signbit_mask (mode, 1, 0);
+ 
+   operands[4] = gen_reg_rtx (mode);
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 55c7d79a0..8ea41c85b 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -2873,11 +2873,31 @@
+ 	  (match_operand:FLSX 1 "register_operand")))
+    (set (match_dup 5)
+ 	(and:FLSX (match_dup 3)
+-		  (match_operand:FLSX 2 "register_operand")))
++		  (match_operand:FLSX 2 "reg_or_vector_same_val_operand")))
+    (set (match_operand:FLSX 0 "register_operand")
+ 	(ior:FLSX (match_dup 4) (match_dup 5)))]
+   "ISA_HAS_LSX"
+ {
++  /* copysign (x, -1) should instead be expanded as setting the sign
++     bit.  */
++  if (!REG_P (operands[2]))
++    {
++      rtx op2_elt = unwrap_const_vec_duplicate (operands[2]);
++      if (GET_CODE (op2_elt) == CONST_DOUBLE
++	  && real_isneg (CONST_DOUBLE_REAL_VALUE (op2_elt)))
++	{
++	  rtx n = GEN_INT (8 * GET_MODE_SIZE (mode) - 1);
++	  operands[0] = lowpart_subreg (mode, operands[0],
++					mode);
++	  operands[1] = lowpart_subreg (mode, operands[1],
++					mode);
++	  emit_insn (gen_lsx_vbitseti_ (operands[0], operands[1],
++						n));
++	  DONE;
++	}
++    }
++
++  operands[2] = force_reg (mode, operands[2]);
+   operands[3] = loongarch_build_signbit_mask (mode, 1, 0);
+ 
+   operands[4] = gen_reg_rtx (mode);
+diff --git a/gcc/testsuite/g++.target/loongarch/vect-copysign-negconst-run.C b/gcc/testsuite/g++.target/loongarch/vect-copysign-negconst-run.C
+new file mode 100644
+index 000000000..d2d5d15c9
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/vect-copysign-negconst-run.C
+@@ -0,0 +1,47 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -march=loongarch64 -mlasx -mno-strict-align" } */
++/* { dg-require-effective-target loongarch_asx_hw } */
++
++#include "vect-copysign-negconst.C"
++
++double d[] = {1.2, -3.4, -5.6, 7.8};
++float f[] = {1.2, -3.4, -5.6, 7.8, -9.0, -11.4, 51.4, 1919.810};
++
++double _abs(double x) { return __builtin_fabs (x); }
++float _abs(float x) { return __builtin_fabsf (x); }
++
++template 
++void
++check (T *arr, T *orig, int len)
++{
++  for (int i = 0; i < len; i++)
++    {
++      if (arr[i] > 0)
++	__builtin_trap ();
++      if (_abs (arr[i]) != _abs (orig[i]))
++	__builtin_trap ();
++    }
++}
++
++int
++main()
++{
++  double test_d[4];
++  float test_f[8];
++
++  __builtin_memcpy (test_d, d, sizeof (test_d));
++  force_negative<2> (test_d);
++  check (test_d, d, 2);
++
++  __builtin_memcpy (test_d, d, sizeof (test_d));
++  force_negative<4> (test_d);
++  check (test_d, d, 4);
++
++  __builtin_memcpy (test_f, f, sizeof (test_f));
++  force_negative<4> (test_f);
++  check (test_f, f, 4);
++
++  __builtin_memcpy (test_f, f, sizeof (test_f));
++  force_negative<8> (test_f);
++  check (test_f, f, 8);
++}
+diff --git a/gcc/testsuite/g++.target/loongarch/vect-copysign-negconst.C b/gcc/testsuite/g++.target/loongarch/vect-copysign-negconst.C
+new file mode 100644
+index 000000000..5e8820d2b
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/vect-copysign-negconst.C
+@@ -0,0 +1,27 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mlasx -mno-strict-align" } */
++/* { dg-final { scan-assembler "\txvbitseti.*63" } } */
++/* { dg-final { scan-assembler "\txvbitseti.*31" } } */
++/* { dg-final { scan-assembler "\tvbitseti.*63" } } */
++/* { dg-final { scan-assembler "\tvbitseti.*31" } } */
++
++template 
++__attribute__ ((noipa)) void
++force_negative (float *arr)
++{
++  for (int i = 0; i < N; i++)
++    arr[i] = __builtin_copysignf (arr[i], -2);
++}
++
++template 
++__attribute__ ((noipa)) void
++force_negative (double *arr)
++{
++  for (int i = 0; i < N; i++)
++    arr[i] = __builtin_copysign (arr[i], -3);
++}
++
++template void force_negative<4>(float *);
++template void force_negative<8>(float *);
++template void force_negative<2>(double *);
++template void force_negative<4>(double *);
+-- 
+2.43.0
+
diff --git a/0032-AutoBOLT-Add-bolt-linker-plugin-2-3.patch b/0032-AutoBOLT-Add-bolt-linker-plugin-2-3.patch
new file mode 100644
index 0000000000000000000000000000000000000000..118d1ca7b5ccdf239a603d50e1d3b1627a706ee4
--- /dev/null
+++ b/0032-AutoBOLT-Add-bolt-linker-plugin-2-3.patch
@@ -0,0 +1,34094 @@
+From 82f9f48406955a6150def998b69b4eace4bd51eb Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao_admin 
+Date: Thu, 7 Dec 2023 11:43:08 +0800
+Subject: [PATCH] [AutoBOLT] Add bolt linker plugin 2/3
+
+---
+ bolt-plugin/Makefile       |   675 ++
+ bolt-plugin/Makefile.am    |    43 +
+ bolt-plugin/Makefile.in    |   675 ++
+ bolt-plugin/aclocal.m4     | 10250 +++++++++++++++++
+ bolt-plugin/bolt-plugin.cc |  1153 ++
+ bolt-plugin/config.h.in    |   179 +
+ bolt-plugin/configure      | 20909 +++++++++++++++++++++++++++++++++++
+ bolt-plugin/configure.ac   |    60 +
+ gcc/common.opt             |    16 +
+ gcc/opts.cc                |    27 +-
+ 10 files changed, 33985 insertions(+), 2 deletions(-)
+ create mode 100644 bolt-plugin/Makefile
+ create mode 100644 bolt-plugin/Makefile.am
+ create mode 100644 bolt-plugin/Makefile.in
+ create mode 100644 bolt-plugin/aclocal.m4
+ create mode 100644 bolt-plugin/bolt-plugin.cc
+ create mode 100644 bolt-plugin/config.h.in
+ create mode 100755 bolt-plugin/configure
+ create mode 100644 bolt-plugin/configure.ac
+
+diff --git a/bolt-plugin/Makefile b/bolt-plugin/Makefile
+new file mode 100644
+index 000000000..82a4bc2c6
+--- /dev/null
++++ b/bolt-plugin/Makefile
+@@ -0,0 +1,675 @@
++# Makefile.in generated by automake 1.16.5 from Makefile.am.
++# Makefile.  Generated from Makefile.in by configure.
++
++# Copyright (C) 1994-2021 Free Software Foundation, Inc.
++
++# This Makefile.in is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
++# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
++# PARTICULAR PURPOSE.
++
++
++
++
++am__is_gnu_make = { \
++  if test -z '$(MAKELEVEL)'; then \
++    false; \
++  elif test -n '$(MAKE_HOST)'; then \
++    true; \
++  elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
++    true; \
++  else \
++    false; \
++  fi; \
++}
++am__make_running_with_option = \
++  case $${target_option-} in \
++      ?) ;; \
++      *) echo "am__make_running_with_option: internal error: invalid" \
++              "target option '$${target_option-}' specified" >&2; \
++         exit 1;; \
++  esac; \
++  has_opt=no; \
++  sane_makeflags=$$MAKEFLAGS; \
++  if $(am__is_gnu_make); then \
++    sane_makeflags=$$MFLAGS; \
++  else \
++    case $$MAKEFLAGS in \
++      *\\[\ \	]*) \
++        bs=\\; \
++        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
++          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
++    esac; \
++  fi; \
++  skip_next=no; \
++  strip_trailopt () \
++  { \
++    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
++  }; \
++  for flg in $$sane_makeflags; do \
++    test $$skip_next = yes && { skip_next=no; continue; }; \
++    case $$flg in \
++      *=*|--*) continue;; \
++        -*I) strip_trailopt 'I'; skip_next=yes;; \
++      -*I?*) strip_trailopt 'I';; \
++        -*O) strip_trailopt 'O'; skip_next=yes;; \
++      -*O?*) strip_trailopt 'O';; \
++        -*l) strip_trailopt 'l'; skip_next=yes;; \
++      -*l?*) strip_trailopt 'l';; \
++      -[dEDm]) skip_next=yes;; \
++      -[JT]) skip_next=yes;; \
++    esac; \
++    case $$flg in \
++      *$$target_option*) has_opt=yes; break;; \
++    esac; \
++  done; \
++  test $$has_opt = yes
++am__make_dryrun = (target_option=n; $(am__make_running_with_option))
++am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
++pkgdatadir = $(datadir)/bolt-plugin
++pkgincludedir = $(includedir)/bolt-plugin
++pkglibdir = $(libdir)/bolt-plugin
++pkglibexecdir = $(libexecdir)/bolt-plugin
++am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
++install_sh_DATA = $(install_sh) -c -m 644
++install_sh_PROGRAM = $(install_sh) -c
++install_sh_SCRIPT = $(install_sh) -c
++INSTALL_HEADER = $(INSTALL_DATA)
++transform = $(program_transform_name)
++NORMAL_INSTALL = :
++PRE_INSTALL = :
++POST_INSTALL = :
++NORMAL_UNINSTALL = :
++PRE_UNINSTALL = :
++POST_UNINSTALL = :
++build_triplet = aarch64-unknown-linux-gnu
++host_triplet = aarch64-unknown-linux-gnu
++target_triplet = aarch64-unknown-linux-gnu
++subdir = .
++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
++am__aclocal_m4_deps = $(top_srcdir)/configure.ac
++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
++	$(ACLOCAL_M4)
++DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \
++	$(am__configure_deps)
++am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
++ configure.lineno config.status.lineno
++mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs
++CONFIG_HEADER = config.h
++CONFIG_CLEAN_FILES =
++CONFIG_CLEAN_VPATH_FILES =
++am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
++am__vpath_adj = case $$p in \
++    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
++    *) f=$$p;; \
++  esac;
++am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
++am__install_max = 40
++am__nobase_strip_setup = \
++  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
++am__nobase_strip = \
++  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
++am__nobase_list = $(am__nobase_strip_setup); \
++  for p in $$list; do echo "$$p $$p"; done | \
++  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
++  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
++    if (++n[$$2] == $(am__install_max)) \
++      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
++    END { for (dir in files) print dir, files[dir] }'
++am__base_list = \
++  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
++  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
++am__uninstall_files_from_dir = { \
++  test -z "$$files" \
++    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
++    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
++         $(am__cd) "$$dir" && rm -f $$files; }; \
++  }
++am__installdirs = "$(DESTDIR)$(libexecsubdir)"
++LTLIBRARIES = $(libexecsub_LTLIBRARIES)
++am_libbolt_plugin_la_OBJECTS = bolt-plugin.lo
++libbolt_plugin_la_OBJECTS = $(am_libbolt_plugin_la_OBJECTS)
++AM_V_P = $(am__v_P_$(V))
++am__v_P_ = $(am__v_P_$(AM_DEFAULT_VERBOSITY))
++am__v_P_0 = false
++am__v_P_1 = :
++AM_V_GEN = $(am__v_GEN_$(V))
++am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY))
++am__v_GEN_0 = @echo "  GEN     " $@;
++am__v_GEN_1 = 
++AM_V_at = $(am__v_at_$(V))
++am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY))
++am__v_at_0 = @
++am__v_at_1 = 
++DEFAULT_INCLUDES = -I.
++depcomp =
++am__maybe_remake_depfiles =
++CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
++	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
++AM_V_lt = $(am__v_lt_$(V))
++am__v_lt_ = $(am__v_lt_$(AM_DEFAULT_VERBOSITY))
++am__v_lt_0 = --silent
++am__v_lt_1 = 
++LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
++	$(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \
++	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
++	$(AM_CXXFLAGS) $(CXXFLAGS)
++AM_V_CXX = $(am__v_CXX_$(V))
++am__v_CXX_ = $(am__v_CXX_$(AM_DEFAULT_VERBOSITY))
++am__v_CXX_0 = @echo "  CXX     " $@;
++am__v_CXX_1 = 
++CXXLD = $(CXX)
++CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
++	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
++	$(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
++AM_V_CXXLD = $(am__v_CXXLD_$(V))
++am__v_CXXLD_ = $(am__v_CXXLD_$(AM_DEFAULT_VERBOSITY))
++am__v_CXXLD_0 = @echo "  CXXLD   " $@;
++am__v_CXXLD_1 = 
++SOURCES = $(libbolt_plugin_la_SOURCES)
++am__can_run_installinfo = \
++  case $$AM_UPDATE_INFO_DIR in \
++    n|no|NO) false;; \
++    *) (install-info --version) >/dev/null 2>&1;; \
++  esac
++am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) \
++	config.h.in
++# Read a list of newline-separated strings from the standard input,
++# and print each of them once, without duplicates.  Input order is
++# *not* preserved.
++am__uniquify_input = $(AWK) '\
++  BEGIN { nonempty = 0; } \
++  { items[$$0] = 1; nonempty = 1; } \
++  END { if (nonempty) { for (i in items) print i; }; } \
++'
++# Make sure the list of sources is unique.  This is necessary because,
++# e.g., the same source file might be shared among _SOURCES variables
++# for different programs/libraries.
++am__define_uniq_tagged_files = \
++  list='$(am__tagged_files)'; \
++  unique=`for i in $$list; do \
++    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
++  done | $(am__uniquify_input)`
++AM_RECURSIVE_TARGETS = cscope
++ACLOCAL = ${SHELL} '/home/zzy/trans/gcc_1/missing' aclocal-1.16
++AMTAR = $${TAR-tar}
++AM_DEFAULT_VERBOSITY = 1
++AR = ar
++AUTOCONF = ${SHELL} '/home/zzy/trans/gcc_1/missing' autoconf
++AUTOHEADER = ${SHELL} '/home/zzy/trans/gcc_1/missing' autoheader
++AUTOMAKE = ${SHELL} '/home/zzy/trans/gcc_1/missing' automake-1.16
++AWK = gawk
++CC = gcc
++CCDEPMODE = depmode=gcc3
++CFLAGS = -g -O2
++CPPFLAGS = 
++CSCOPE = cscope
++CTAGS = ctags
++CXX = g++
++CXXCPP = g++ -E
++CXXDEPMODE = depmode=gcc3
++CXXFLAGS = -g -O2
++CYGPATH_W = echo
++DEFS = -DHAVE_CONFIG_H
++DEPDIR = .deps
++DLLTOOL = false
++DSYMUTIL = 
++DUMPBIN = 
++ECHO_C = 
++ECHO_N = -n
++ECHO_T = 
++EGREP = /usr/bin/grep -E
++ETAGS = etags
++EXEEXT = 
++FGREP = /usr/bin/grep -F
++FILECMD = file
++GREP = /usr/bin/grep
++INSTALL = /usr/bin/install -c
++INSTALL_DATA = ${INSTALL} -m 644
++INSTALL_PROGRAM = ${INSTALL}
++INSTALL_SCRIPT = ${INSTALL}
++INSTALL_STRIP_PROGRAM = $(install_sh) -c -s
++LD = /usr/bin/ld
++LDFLAGS = 
++LIBOBJS = 
++LIBS = 
++LIBTOOL = $(SHELL) $(top_builddir)/libtool
++LIPO = 
++LN_S = ln -s
++LTLIBOBJS = 
++LT_SYS_LIBRARY_PATH = 
++MAINT = #
++MAKEINFO = ${SHELL} '/home/zzy/trans/gcc_1/missing' makeinfo
++MANIFEST_TOOL = :
++MKDIR_P = /usr/bin/mkdir -p
++NM = /usr/bin/nm -B
++NMEDIT = 
++OBJDUMP = objdump
++OBJEXT = o
++OTOOL = 
++OTOOL64 = 
++PACKAGE = bolt-plugin
++PACKAGE_BUGREPORT = 
++PACKAGE_NAME = bolt plugin for ld
++PACKAGE_STRING = bolt plugin for ld 0.1
++PACKAGE_TARNAME = bolt-plugin
++PACKAGE_URL = 
++PACKAGE_VERSION = 0.1
++PATH_SEPARATOR = :
++RANLIB = ranlib
++SED = /usr/bin/sed
++SET_MAKE = 
++SHELL = /bin/sh
++STRIP = strip
++VERSION = 0.1
++abs_builddir = /home/zzy/trans/gcc_1/bolt-plugin
++abs_srcdir = /home/zzy/trans/gcc_1/bolt-plugin
++abs_top_builddir = /home/zzy/trans/gcc_1/bolt-plugin
++abs_top_srcdir = /home/zzy/trans/gcc_1/bolt-plugin
++ac_bolt_plugin_ldflags = -Wc,-static-libgcc
++ac_ct_AR = ar
++ac_ct_CC = gcc
++ac_ct_CXX = g++
++ac_ct_DUMPBIN = 
++accel_dir_suffix = 
++am__include = include
++am__leading_dot = .
++am__quote = 
++am__tar = $${TAR-tar} chof - "$$tardir"
++am__untar = $${TAR-tar} xf -
++bindir = ${exec_prefix}/bin
++build = aarch64-unknown-linux-gnu
++build_alias = 
++build_cpu = aarch64
++build_os = linux-gnu
++build_vendor = unknown
++builddir = .
++datadir = ${datarootdir}
++datarootdir = ${prefix}/share
++docdir = ${datarootdir}/doc/${PACKAGE_TARNAME}
++dvidir = ${docdir}
++exec_prefix = ${prefix}
++gcc_build_dir = ../..//gcc
++host = aarch64-unknown-linux-gnu
++host_alias = 
++host_cpu = aarch64
++host_os = linux-gnu
++host_vendor = unknown
++htmldir = ${docdir}
++includedir = ${prefix}/include
++infodir = ${datarootdir}/info
++install_sh = ${SHELL} /home/zzy/trans/gcc_1/install-sh
++libdir = ${exec_prefix}/lib
++libexecdir = ${exec_prefix}/libexec
++localedir = ${datarootdir}/locale
++localstatedir = ${prefix}/var
++mandir = ${datarootdir}/man
++mkdir_p = $(MKDIR_P)
++oldincludedir = /usr/include
++pdfdir = ${docdir}
++prefix = /usr/local
++program_transform_name = s,x,x,
++psdir = ${docdir}
++real_target_noncanonical = 
++runstatedir = ${localstatedir}/run
++sbindir = ${exec_prefix}/sbin
++sharedstatedir = ${prefix}/com
++srcdir = .
++sysconfdir = ${prefix}/etc
++target = aarch64-unknown-linux-gnu
++target_alias = 
++target_cpu = aarch64
++target_noncanonical := 
++target_os = linux-gnu
++target_vendor = unknown
++top_build_prefix = 
++top_builddir = .
++top_srcdir = .
++with_libiberty = ../libiberty
++ACLOCAL_AMFLAGS = -I .. -I ../config
++AUTOMAKE_OPTIONS = no-dependencies
++gcc_version := $(shell @get_gcc_base_ver@ $(top_srcdir)/../gcc/BASE-VER)
++libexecsubdir := $(libexecdir)/gcc/$(real_target_noncanonical)/$(gcc_version)$(accel_dir_suffix)
++AM_CPPFLAGS = -I$(top_srcdir)/../include $(DEFS) -std=c++11
++AM_CXXFLAGS = @ac_bolt_plugin_warn_cflags@ -std=c++11
++AM_LDFLAGS = -Wc,-static-libgcc
++AM_LIBTOOLFLAGS = --tag=disable-static
++libexecsub_LTLIBRARIES = libbolt_plugin.la
++in_gcc_libs = $(foreach lib, $(libexecsub_LTLIBRARIES), $(gcc_build_dir)/$(lib))
++libbolt_plugin_la_SOURCES = bolt-plugin.cc
++# Note that we intentionally override the bindir supplied by ACX_LT_HOST_FLAGS.
++libbolt_plugin_la_LDFLAGS = $(AM_LDFLAGS) $(lt_host_flags) -module \
++	-bindir $(libexecsubdir) $(if $(wildcard \
++	$(libiberty_noasan)),, $(if $(wildcard \
++	$(libiberty_pic)),,-Wc,$(libiberty)))
++# Can be simplified when libiberty becomes a normal convenience library.
++libiberty = $(with_libiberty)/libiberty.a
++libiberty_noasan = $(with_libiberty)/noasan/libiberty.a
++libiberty_pic = $(with_libiberty)/pic/libiberty.a
++Wc = -Wc,
++libbolt_plugin_la_LIBADD = \
++	$(if $(wildcard $(libiberty_noasan)),$(Wc)$(libiberty_noasan), \
++	$(if $(wildcard $(libiberty_pic)),$(Wc)$(libiberty_pic),))
++
++libbolt_plugin_la_DEPENDENCIES = \
++	$(if $(wildcard $(libiberty_noasan)),$(libiberty_noasan), \
++	$(if $(wildcard $(libiberty_pic)),$(libiberty_pic),))
++
++LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS))
++libbolt_plugin_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
++	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) ${AM_CPPFLAGS} $(CXXFLAGS) \
++	$(libbolt_plugin_la_LDFLAGS) $(LTLDFLAGS) -o $@
++
++all: config.h
++	$(MAKE) $(AM_MAKEFLAGS) all-am
++
++.SUFFIXES:
++.SUFFIXES: .cc .lo .o .obj
++am--refresh: Makefile
++	@:
++$(srcdir)/Makefile.in: # $(srcdir)/Makefile.am  $(am__configure_deps)
++	@for dep in $?; do \
++	  case '$(am__configure_deps)' in \
++	    *$$dep*) \
++	      echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \
++	      $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \
++		&& exit 0; \
++	      exit 1;; \
++	  esac; \
++	done; \
++	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \
++	$(am__cd) $(top_srcdir) && \
++	  $(AUTOMAKE) --foreign Makefile
++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
++	@case '$?' in \
++	  *config.status*) \
++	    echo ' $(SHELL) ./config.status'; \
++	    $(SHELL) ./config.status;; \
++	  *) \
++	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \
++	    cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \
++	esac;
++
++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
++	$(SHELL) ./config.status --recheck
++
++$(top_srcdir)/configure: # $(am__configure_deps)
++	$(am__cd) $(srcdir) && $(AUTOCONF)
++$(ACLOCAL_M4): # $(am__aclocal_m4_deps)
++	$(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
++$(am__aclocal_m4_deps):
++
++config.h: stamp-h1
++	@test -f $@ || rm -f stamp-h1
++	@test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1
++
++stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
++	@rm -f stamp-h1
++	cd $(top_builddir) && $(SHELL) ./config.status config.h
++$(srcdir)/config.h.in: # $(am__configure_deps) 
++	($(am__cd) $(top_srcdir) && $(AUTOHEADER))
++	rm -f stamp-h1
++	touch $@
++
++distclean-hdr:
++	-rm -f config.h stamp-h1
++
++install-libexecsubLTLIBRARIES: $(libexecsub_LTLIBRARIES)
++	@$(NORMAL_INSTALL)
++	@list='$(libexecsub_LTLIBRARIES)'; test -n "$(libexecsubdir)" || list=; \
++	list2=; for p in $$list; do \
++	  if test -f $$p; then \
++	    list2="$$list2 $$p"; \
++	  else :; fi; \
++	done; \
++	test -z "$$list2" || { \
++	  echo " $(MKDIR_P) '$(DESTDIR)$(libexecsubdir)'"; \
++	  $(MKDIR_P) "$(DESTDIR)$(libexecsubdir)" || exit 1; \
++	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libexecsubdir)'"; \
++	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libexecsubdir)"; \
++	}
++
++uninstall-libexecsubLTLIBRARIES:
++	@$(NORMAL_UNINSTALL)
++	@list='$(libexecsub_LTLIBRARIES)'; test -n "$(libexecsubdir)" || list=; \
++	for p in $$list; do \
++	  $(am__strip_dir) \
++	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libexecsubdir)/$$f'"; \
++	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libexecsubdir)/$$f"; \
++	done
++
++clean-libexecsubLTLIBRARIES:
++	-test -z "$(libexecsub_LTLIBRARIES)" || rm -f $(libexecsub_LTLIBRARIES)
++	@list='$(libexecsub_LTLIBRARIES)'; \
++	locs=`for p in $$list; do echo $$p; done | \
++	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
++	      sort -u`; \
++	test -z "$$locs" || { \
++	  echo rm -f $${locs}; \
++	  rm -f $${locs}; \
++	}
++
++libbolt_plugin.la: $(libbolt_plugin_la_OBJECTS) $(libbolt_plugin_la_DEPENDENCIES) $(EXTRA_libbolt_plugin_la_DEPENDENCIES) 
++	$(AM_V_GEN)$(libbolt_plugin_la_LINK) -rpath $(libexecsubdir) $(libbolt_plugin_la_OBJECTS) $(libbolt_plugin_la_LIBADD) $(LIBS)
++
++mostlyclean-compile:
++	-rm -f *.$(OBJEXT)
++
++distclean-compile:
++	-rm -f *.tab.c
++
++.cc.o:
++	$(AM_V_CXX)$(CXXCOMPILE) -c -o $@ $<
++
++.cc.obj:
++	$(AM_V_CXX)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
++
++.cc.lo:
++	$(AM_V_CXX)$(LTCXXCOMPILE) -c -o $@ $<
++
++mostlyclean-libtool:
++	-rm -f *.lo
++
++clean-libtool:
++	-rm -rf .libs _libs
++
++distclean-libtool:
++	-rm -f libtool config.lt
++
++ID: $(am__tagged_files)
++	$(am__define_uniq_tagged_files); mkid -fID $$unique
++tags: tags-am
++TAGS: tags
++
++tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
++	set x; \
++	here=`pwd`; \
++	$(am__define_uniq_tagged_files); \
++	shift; \
++	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
++	  test -n "$$unique" || unique=$$empty_fix; \
++	  if test $$# -gt 0; then \
++	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
++	      "$$@" $$unique; \
++	  else \
++	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
++	      $$unique; \
++	  fi; \
++	fi
++ctags: ctags-am
++
++CTAGS: ctags
++ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
++	$(am__define_uniq_tagged_files); \
++	test -z "$(CTAGS_ARGS)$$unique" \
++	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
++	     $$unique
++
++GTAGS:
++	here=`$(am__cd) $(top_builddir) && pwd` \
++	  && $(am__cd) $(top_srcdir) \
++	  && gtags -i $(GTAGS_ARGS) "$$here"
++cscope: cscope.files
++	test ! -s cscope.files \
++	  || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS)
++clean-cscope:
++	-rm -f cscope.files
++cscope.files: clean-cscope cscopelist
++cscopelist: cscopelist-am
++
++cscopelist-am: $(am__tagged_files)
++	list='$(am__tagged_files)'; \
++	case "$(srcdir)" in \
++	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
++	  *) sdir=$(subdir)/$(srcdir) ;; \
++	esac; \
++	for i in $$list; do \
++	  if test -f "$$i"; then \
++	    echo "$(subdir)/$$i"; \
++	  else \
++	    echo "$$sdir/$$i"; \
++	  fi; \
++	done >> $(top_builddir)/cscope.files
++
++distclean-tags:
++	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
++	-rm -f cscope.out cscope.in.out cscope.po.out cscope.files
++check-am: all-am
++check: check-am
++all-am: Makefile $(LTLIBRARIES) config.h
++installdirs:
++	for dir in "$(DESTDIR)$(libexecsubdir)"; do \
++	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
++	done
++install: install-am
++install-exec: install-exec-am
++install-data: install-data-am
++uninstall: uninstall-am
++
++install-am: all-am
++	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
++
++installcheck: installcheck-am
++install-strip:
++	if test -z '$(STRIP)'; then \
++	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
++	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
++	      install; \
++	else \
++	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
++	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
++	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
++	fi
++mostlyclean-generic:
++
++clean-generic:
++
++distclean-generic:
++	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
++	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
++
++maintainer-clean-generic:
++	@echo "This command is intended for maintainers to use"
++	@echo "it deletes files that may require special tools to rebuild."
++clean: clean-am
++
++clean-am: clean-generic clean-libexecsubLTLIBRARIES clean-libtool \
++	mostlyclean-am
++
++distclean: distclean-am
++	-rm -f $(am__CONFIG_DISTCLEAN_FILES)
++	-rm -f Makefile
++distclean-am: clean-am distclean-compile distclean-generic \
++	distclean-hdr distclean-libtool distclean-tags
++
++dvi: dvi-am
++
++dvi-am:
++
++html: html-am
++
++html-am:
++
++info: info-am
++
++info-am:
++
++install-data-am:
++
++install-dvi: install-dvi-am
++
++install-dvi-am:
++
++install-exec-am: install-libexecsubLTLIBRARIES
++
++install-html: install-html-am
++
++install-html-am:
++
++install-info: install-info-am
++
++install-info-am:
++
++install-man:
++
++install-pdf: install-pdf-am
++
++install-pdf-am:
++
++install-ps: install-ps-am
++
++install-ps-am:
++
++installcheck-am:
++
++maintainer-clean: maintainer-clean-am
++	-rm -f $(am__CONFIG_DISTCLEAN_FILES)
++	-rm -rf $(top_srcdir)/autom4te.cache
++	-rm -f Makefile
++maintainer-clean-am: distclean-am maintainer-clean-generic
++
++mostlyclean: mostlyclean-am
++
++mostlyclean-am: mostlyclean-compile mostlyclean-generic \
++	mostlyclean-libtool
++
++pdf: pdf-am
++
++pdf-am:
++
++ps: ps-am
++
++ps-am:
++
++uninstall-am: uninstall-libexecsubLTLIBRARIES
++
++.MAKE: all install-am install-strip
++
++.PHONY: CTAGS GTAGS TAGS all all-am am--refresh check check-am clean \
++	clean-cscope clean-generic clean-libexecsubLTLIBRARIES \
++	clean-libtool cscope cscopelist-am ctags ctags-am distclean \
++	distclean-compile distclean-generic distclean-hdr \
++	distclean-libtool distclean-tags dvi dvi-am html html-am info \
++	info-am install install-am install-data install-data-am \
++	install-dvi install-dvi-am install-exec install-exec-am \
++	install-html install-html-am install-info install-info-am \
++	install-libexecsubLTLIBRARIES install-man install-pdf \
++	install-pdf-am install-ps install-ps-am install-strip \
++	installcheck installcheck-am installdirs maintainer-clean \
++	maintainer-clean-generic mostlyclean mostlyclean-compile \
++	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
++	tags tags-am uninstall uninstall-am \
++	uninstall-libexecsubLTLIBRARIES
++
++.PRECIOUS: Makefile
++
++override CXXFLAGS := $(filter-out -fsanitize=address,$(CXXFLAGS))
++override LDFLAGS := $(filter-out -fsanitize=address,$(LDFLAGS))
++
++# Tell versions [3.59,3.63) of GNU make to not export all variables.
++# Otherwise a system limit (for SysV at least) may be exceeded.
++.NOEXPORT:
+diff --git a/bolt-plugin/Makefile.am b/bolt-plugin/Makefile.am
+new file mode 100644
+index 000000000..c21999237
+--- /dev/null
++++ b/bolt-plugin/Makefile.am
+@@ -0,0 +1,43 @@
++## Process this file with automake to produce Makefile.in.
++
++ACLOCAL_AMFLAGS = -I .. -I ../config
++AUTOMAKE_OPTIONS = no-dependencies
++
++gcc_version := $(shell @get_gcc_base_ver@ $(top_srcdir)/../gcc/BASE-VER)
++target_noncanonical := @target_noncanonical@
++libexecsubdir := $(libexecdir)/gcc/$(real_target_noncanonical)/$(gcc_version)$(accel_dir_suffix)
++
++AM_CPPFLAGS = -I$(top_srcdir)/../include $(DEFS) -std=c++11
++AM_CXXFLAGS = @ac_bolt_plugin_warn_cflags@ -std=c++11
++AM_LDFLAGS = @ac_bolt_plugin_ldflags@
++AM_LIBTOOLFLAGS = --tag=disable-static
++override CXXFLAGS := $(filter-out -fsanitize=address,$(CXXFLAGS))
++override LDFLAGS := $(filter-out -fsanitize=address,$(LDFLAGS))
++
++libexecsub_LTLIBRARIES = libbolt_plugin.la
++gcc_build_dir = @gcc_build_dir@
++in_gcc_libs = $(foreach lib, $(libexecsub_LTLIBRARIES), $(gcc_build_dir)/$(lib))
++
++libbolt_plugin_la_SOURCES = bolt-plugin.cc
++# Note that we intentionally override the bindir supplied by ACX_LT_HOST_FLAGS.
++libbolt_plugin_la_LDFLAGS = $(AM_LDFLAGS) \
++	$(lt_host_flags) -module -bindir $(libexecsubdir)
++# Can be simplified when libiberty becomes a normal convenience library.
++libiberty = $(with_libiberty)/libiberty.a
++libiberty_noasan = $(with_libiberty)/noasan/libiberty.a
++libiberty_pic = $(with_libiberty)/pic/libiberty.a
++Wc=-Wc,
++libbolt_plugin_la_LIBADD = \
++	$(if $(wildcard $(libiberty_noasan)),$(Wc)$(libiberty_noasan), \
++	$(if $(wildcard $(libiberty_pic)),$(Wc)$(libiberty_pic),))
++libbolt_plugin_la_LDFLAGS += \
++	$(if $(wildcard $(libiberty_noasan)),, \
++	$(if $(wildcard $(libiberty_pic)),,-Wc,$(libiberty)))
++libbolt_plugin_la_DEPENDENCIES = \
++	$(if $(wildcard $(libiberty_noasan)),$(libiberty_noasan), \
++	$(if $(wildcard $(libiberty_pic)),$(libiberty_pic),))
++LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS))
++libbolt_plugin_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
++	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) ${AM_CPPFLAGS} $(CXXFLAGS) \
++	$(libbolt_plugin_la_LDFLAGS) $(LTLDFLAGS) -o $@
++
+diff --git a/bolt-plugin/Makefile.in b/bolt-plugin/Makefile.in
+new file mode 100644
+index 000000000..11b59407e
+--- /dev/null
++++ b/bolt-plugin/Makefile.in
+@@ -0,0 +1,675 @@
++# Makefile.in generated by automake 1.16.5 from Makefile.am.
++# @configure_input@
++
++# Copyright (C) 1994-2021 Free Software Foundation, Inc.
++
++# This Makefile.in is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
++# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
++# PARTICULAR PURPOSE.
++
++@SET_MAKE@
++
++VPATH = @srcdir@
++am__is_gnu_make = { \
++  if test -z '$(MAKELEVEL)'; then \
++    false; \
++  elif test -n '$(MAKE_HOST)'; then \
++    true; \
++  elif test -n '$(MAKE_VERSION)' && test -n '$(CURDIR)'; then \
++    true; \
++  else \
++    false; \
++  fi; \
++}
++am__make_running_with_option = \
++  case $${target_option-} in \
++      ?) ;; \
++      *) echo "am__make_running_with_option: internal error: invalid" \
++              "target option '$${target_option-}' specified" >&2; \
++         exit 1;; \
++  esac; \
++  has_opt=no; \
++  sane_makeflags=$$MAKEFLAGS; \
++  if $(am__is_gnu_make); then \
++    sane_makeflags=$$MFLAGS; \
++  else \
++    case $$MAKEFLAGS in \
++      *\\[\ \	]*) \
++        bs=\\; \
++        sane_makeflags=`printf '%s\n' "$$MAKEFLAGS" \
++          | sed "s/$$bs$$bs[$$bs $$bs	]*//g"`;; \
++    esac; \
++  fi; \
++  skip_next=no; \
++  strip_trailopt () \
++  { \
++    flg=`printf '%s\n' "$$flg" | sed "s/$$1.*$$//"`; \
++  }; \
++  for flg in $$sane_makeflags; do \
++    test $$skip_next = yes && { skip_next=no; continue; }; \
++    case $$flg in \
++      *=*|--*) continue;; \
++        -*I) strip_trailopt 'I'; skip_next=yes;; \
++      -*I?*) strip_trailopt 'I';; \
++        -*O) strip_trailopt 'O'; skip_next=yes;; \
++      -*O?*) strip_trailopt 'O';; \
++        -*l) strip_trailopt 'l'; skip_next=yes;; \
++      -*l?*) strip_trailopt 'l';; \
++      -[dEDm]) skip_next=yes;; \
++      -[JT]) skip_next=yes;; \
++    esac; \
++    case $$flg in \
++      *$$target_option*) has_opt=yes; break;; \
++    esac; \
++  done; \
++  test $$has_opt = yes
++am__make_dryrun = (target_option=n; $(am__make_running_with_option))
++am__make_keepgoing = (target_option=k; $(am__make_running_with_option))
++pkgdatadir = $(datadir)/@PACKAGE@
++pkgincludedir = $(includedir)/@PACKAGE@
++pkglibdir = $(libdir)/@PACKAGE@
++pkglibexecdir = $(libexecdir)/@PACKAGE@
++am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
++install_sh_DATA = $(install_sh) -c -m 644
++install_sh_PROGRAM = $(install_sh) -c
++install_sh_SCRIPT = $(install_sh) -c
++INSTALL_HEADER = $(INSTALL_DATA)
++transform = $(program_transform_name)
++NORMAL_INSTALL = :
++PRE_INSTALL = :
++POST_INSTALL = :
++NORMAL_UNINSTALL = :
++PRE_UNINSTALL = :
++POST_UNINSTALL = :
++build_triplet = @build@
++host_triplet = @host@
++target_triplet = @target@
++subdir = .
++ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
++am__aclocal_m4_deps = $(top_srcdir)/configure.ac
++am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
++	$(ACLOCAL_M4)
++DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \
++	$(am__configure_deps)
++am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
++ configure.lineno config.status.lineno
++mkinstalldirs = $(SHELL) $(top_srcdir)/../mkinstalldirs
++CONFIG_HEADER = config.h
++CONFIG_CLEAN_FILES =
++CONFIG_CLEAN_VPATH_FILES =
++am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`;
++am__vpath_adj = case $$p in \
++    $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \
++    *) f=$$p;; \
++  esac;
++am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`;
++am__install_max = 40
++am__nobase_strip_setup = \
++  srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'`
++am__nobase_strip = \
++  for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||"
++am__nobase_list = $(am__nobase_strip_setup); \
++  for p in $$list; do echo "$$p $$p"; done | \
++  sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \
++  $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \
++    if (++n[$$2] == $(am__install_max)) \
++      { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \
++    END { for (dir in files) print dir, files[dir] }'
++am__base_list = \
++  sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \
++  sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g'
++am__uninstall_files_from_dir = { \
++  test -z "$$files" \
++    || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \
++    || { echo " ( cd '$$dir' && rm -f" $$files ")"; \
++         $(am__cd) "$$dir" && rm -f $$files; }; \
++  }
++am__installdirs = "$(DESTDIR)$(libexecsubdir)"
++LTLIBRARIES = $(libexecsub_LTLIBRARIES)
++am_libbolt_plugin_la_OBJECTS = bolt-plugin.lo
++libbolt_plugin_la_OBJECTS = $(am_libbolt_plugin_la_OBJECTS)
++AM_V_P = $(am__v_P_@AM_V@)
++am__v_P_ = $(am__v_P_@AM_DEFAULT_V@)
++am__v_P_0 = false
++am__v_P_1 = :
++AM_V_GEN = $(am__v_GEN_@AM_V@)
++am__v_GEN_ = $(am__v_GEN_@AM_DEFAULT_V@)
++am__v_GEN_0 = @echo "  GEN     " $@;
++am__v_GEN_1 = 
++AM_V_at = $(am__v_at_@AM_V@)
++am__v_at_ = $(am__v_at_@AM_DEFAULT_V@)
++am__v_at_0 = @
++am__v_at_1 = 
++DEFAULT_INCLUDES = -I.@am__isrc@
++depcomp =
++am__maybe_remake_depfiles =
++CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \
++	$(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS)
++AM_V_lt = $(am__v_lt_@AM_V@)
++am__v_lt_ = $(am__v_lt_@AM_DEFAULT_V@)
++am__v_lt_0 = --silent
++am__v_lt_1 = 
++LTCXXCOMPILE = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
++	$(LIBTOOLFLAGS) --mode=compile $(CXX) $(DEFS) \
++	$(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) \
++	$(AM_CXXFLAGS) $(CXXFLAGS)
++AM_V_CXX = $(am__v_CXX_@AM_V@)
++am__v_CXX_ = $(am__v_CXX_@AM_DEFAULT_V@)
++am__v_CXX_0 = @echo "  CXX     " $@;
++am__v_CXX_1 = 
++CXXLD = $(CXX)
++CXXLINK = $(LIBTOOL) $(AM_V_lt) --tag=CXX $(AM_LIBTOOLFLAGS) \
++	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) \
++	$(CXXFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@
++AM_V_CXXLD = $(am__v_CXXLD_@AM_V@)
++am__v_CXXLD_ = $(am__v_CXXLD_@AM_DEFAULT_V@)
++am__v_CXXLD_0 = @echo "  CXXLD   " $@;
++am__v_CXXLD_1 = 
++SOURCES = $(libbolt_plugin_la_SOURCES)
++am__can_run_installinfo = \
++  case $$AM_UPDATE_INFO_DIR in \
++    n|no|NO) false;; \
++    *) (install-info --version) >/dev/null 2>&1;; \
++  esac
++am__tagged_files = $(HEADERS) $(SOURCES) $(TAGS_FILES) $(LISP) \
++	config.h.in
++# Read a list of newline-separated strings from the standard input,
++# and print each of them once, without duplicates.  Input order is
++# *not* preserved.
++am__uniquify_input = $(AWK) '\
++  BEGIN { nonempty = 0; } \
++  { items[$$0] = 1; nonempty = 1; } \
++  END { if (nonempty) { for (i in items) print i; }; } \
++'
++# Make sure the list of sources is unique.  This is necessary because,
++# e.g., the same source file might be shared among _SOURCES variables
++# for different programs/libraries.
++am__define_uniq_tagged_files = \
++  list='$(am__tagged_files)'; \
++  unique=`for i in $$list; do \
++    if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
++  done | $(am__uniquify_input)`
++AM_RECURSIVE_TARGETS = cscope
++ACLOCAL = @ACLOCAL@
++AMTAR = @AMTAR@
++AM_DEFAULT_VERBOSITY = @AM_DEFAULT_VERBOSITY@
++AR = @AR@
++AUTOCONF = @AUTOCONF@
++AUTOHEADER = @AUTOHEADER@
++AUTOMAKE = @AUTOMAKE@
++AWK = @AWK@
++CC = @CC@
++CCDEPMODE = @CCDEPMODE@
++CFLAGS = @CFLAGS@
++CPPFLAGS = @CPPFLAGS@
++CSCOPE = @CSCOPE@
++CTAGS = @CTAGS@
++CXX = @CXX@
++CXXCPP = @CXXCPP@
++CXXDEPMODE = @CXXDEPMODE@
++CXXFLAGS = @CXXFLAGS@
++CYGPATH_W = @CYGPATH_W@
++DEFS = @DEFS@
++DEPDIR = @DEPDIR@
++DLLTOOL = @DLLTOOL@
++DSYMUTIL = @DSYMUTIL@
++DUMPBIN = @DUMPBIN@
++ECHO_C = @ECHO_C@
++ECHO_N = @ECHO_N@
++ECHO_T = @ECHO_T@
++EGREP = @EGREP@
++ETAGS = @ETAGS@
++EXEEXT = @EXEEXT@
++FGREP = @FGREP@
++FILECMD = @FILECMD@
++GREP = @GREP@
++INSTALL = @INSTALL@
++INSTALL_DATA = @INSTALL_DATA@
++INSTALL_PROGRAM = @INSTALL_PROGRAM@
++INSTALL_SCRIPT = @INSTALL_SCRIPT@
++INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
++LD = @LD@
++LDFLAGS = @LDFLAGS@
++LIBOBJS = @LIBOBJS@
++LIBS = @LIBS@
++LIBTOOL = @LIBTOOL@
++LIPO = @LIPO@
++LN_S = @LN_S@
++LTLIBOBJS = @LTLIBOBJS@
++LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@
++MAINT = @MAINT@
++MAKEINFO = @MAKEINFO@
++MANIFEST_TOOL = @MANIFEST_TOOL@
++MKDIR_P = @MKDIR_P@
++NM = @NM@
++NMEDIT = @NMEDIT@
++OBJDUMP = @OBJDUMP@
++OBJEXT = @OBJEXT@
++OTOOL = @OTOOL@
++OTOOL64 = @OTOOL64@
++PACKAGE = @PACKAGE@
++PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
++PACKAGE_NAME = @PACKAGE_NAME@
++PACKAGE_STRING = @PACKAGE_STRING@
++PACKAGE_TARNAME = @PACKAGE_TARNAME@
++PACKAGE_URL = @PACKAGE_URL@
++PACKAGE_VERSION = @PACKAGE_VERSION@
++PATH_SEPARATOR = @PATH_SEPARATOR@
++RANLIB = @RANLIB@
++SED = @SED@
++SET_MAKE = @SET_MAKE@
++SHELL = @SHELL@
++STRIP = @STRIP@
++VERSION = @VERSION@
++abs_builddir = @abs_builddir@
++abs_srcdir = @abs_srcdir@
++abs_top_builddir = @abs_top_builddir@
++abs_top_srcdir = @abs_top_srcdir@
++ac_bolt_plugin_ldflags = @ac_bolt_plugin_ldflags@
++ac_ct_AR = @ac_ct_AR@
++ac_ct_CC = @ac_ct_CC@
++ac_ct_CXX = @ac_ct_CXX@
++ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
++accel_dir_suffix = @accel_dir_suffix@
++am__include = @am__include@
++am__leading_dot = @am__leading_dot@
++am__quote = @am__quote@
++am__tar = @am__tar@
++am__untar = @am__untar@
++bindir = @bindir@
++build = @build@
++build_alias = @build_alias@
++build_cpu = @build_cpu@
++build_os = @build_os@
++build_vendor = @build_vendor@
++builddir = @builddir@
++datadir = @datadir@
++datarootdir = @datarootdir@
++docdir = @docdir@
++dvidir = @dvidir@
++exec_prefix = @exec_prefix@
++gcc_build_dir = @gcc_build_dir@
++host = @host@
++host_alias = @host_alias@
++host_cpu = @host_cpu@
++host_os = @host_os@
++host_vendor = @host_vendor@
++htmldir = @htmldir@
++includedir = @includedir@
++infodir = @infodir@
++install_sh = @install_sh@
++libdir = @libdir@
++libexecdir = @libexecdir@
++localedir = @localedir@
++localstatedir = @localstatedir@
++mandir = @mandir@
++mkdir_p = @mkdir_p@
++oldincludedir = @oldincludedir@
++pdfdir = @pdfdir@
++prefix = @prefix@
++program_transform_name = @program_transform_name@
++psdir = @psdir@
++real_target_noncanonical = @real_target_noncanonical@
++runstatedir = @runstatedir@
++sbindir = @sbindir@
++sharedstatedir = @sharedstatedir@
++srcdir = @srcdir@
++sysconfdir = @sysconfdir@
++target = @target@
++target_alias = @target_alias@
++target_cpu = @target_cpu@
++target_noncanonical := @target_noncanonical@
++target_os = @target_os@
++target_vendor = @target_vendor@
++top_build_prefix = @top_build_prefix@
++top_builddir = @top_builddir@
++top_srcdir = @top_srcdir@
++with_libiberty = @with_libiberty@
++ACLOCAL_AMFLAGS = -I .. -I ../config
++AUTOMAKE_OPTIONS = no-dependencies
++gcc_version := $(shell @get_gcc_base_ver@ $(top_srcdir)/../gcc/BASE-VER)
++libexecsubdir := $(libexecdir)/gcc/$(real_target_noncanonical)/$(gcc_version)$(accel_dir_suffix)
++AM_CPPFLAGS = -I$(top_srcdir)/../include $(DEFS) -std=c++11
++AM_CXXFLAGS = @ac_bolt_plugin_warn_cflags@ -std=c++11
++AM_LDFLAGS = @ac_bolt_plugin_ldflags@
++AM_LIBTOOLFLAGS = --tag=disable-static
++libexecsub_LTLIBRARIES = libbolt_plugin.la
++in_gcc_libs = $(foreach lib, $(libexecsub_LTLIBRARIES), $(gcc_build_dir)/$(lib))
++libbolt_plugin_la_SOURCES = bolt-plugin.cc
++# Note that we intentionally override the bindir supplied by ACX_LT_HOST_FLAGS.
++libbolt_plugin_la_LDFLAGS = $(AM_LDFLAGS) $(lt_host_flags) -module \
++	-bindir $(libexecsubdir) $(if $(wildcard \
++	$(libiberty_noasan)),, $(if $(wildcard \
++	$(libiberty_pic)),,-Wc,$(libiberty)))
++# Can be simplified when libiberty becomes a normal convenience library.
++libiberty = $(with_libiberty)/libiberty.a
++libiberty_noasan = $(with_libiberty)/noasan/libiberty.a
++libiberty_pic = $(with_libiberty)/pic/libiberty.a
++Wc = -Wc,
++libbolt_plugin_la_LIBADD = \
++	$(if $(wildcard $(libiberty_noasan)),$(Wc)$(libiberty_noasan), \
++	$(if $(wildcard $(libiberty_pic)),$(Wc)$(libiberty_pic),))
++
++libbolt_plugin_la_DEPENDENCIES = \
++	$(if $(wildcard $(libiberty_noasan)),$(libiberty_noasan), \
++	$(if $(wildcard $(libiberty_pic)),$(libiberty_pic),))
++
++LTLDFLAGS = $(shell $(SHELL) $(top_srcdir)/../libtool-ldflags $(LDFLAGS))
++libbolt_plugin_la_LINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) \
++	$(LIBTOOLFLAGS) --mode=link $(CXXLD) $(AM_CXXFLAGS) ${AM_CPPFLAGS} $(CXXFLAGS) \
++	$(libbolt_plugin_la_LDFLAGS) $(LTLDFLAGS) -o $@
++
++all: config.h
++	$(MAKE) $(AM_MAKEFLAGS) all-am
++
++.SUFFIXES:
++.SUFFIXES: .cc .lo .o .obj
++am--refresh: Makefile
++	@:
++$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.am  $(am__configure_deps)
++	@for dep in $?; do \
++	  case '$(am__configure_deps)' in \
++	    *$$dep*) \
++	      echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \
++	      $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \
++		&& exit 0; \
++	      exit 1;; \
++	  esac; \
++	done; \
++	echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \
++	$(am__cd) $(top_srcdir) && \
++	  $(AUTOMAKE) --foreign Makefile
++Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status
++	@case '$?' in \
++	  *config.status*) \
++	    echo ' $(SHELL) ./config.status'; \
++	    $(SHELL) ./config.status;; \
++	  *) \
++	    echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles)'; \
++	    cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__maybe_remake_depfiles);; \
++	esac;
++
++$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
++	$(SHELL) ./config.status --recheck
++
++$(top_srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(am__configure_deps)
++	$(am__cd) $(srcdir) && $(AUTOCONF)
++$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ $(am__aclocal_m4_deps)
++	$(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
++$(am__aclocal_m4_deps):
++
++config.h: stamp-h1
++	@test -f $@ || rm -f stamp-h1
++	@test -f $@ || $(MAKE) $(AM_MAKEFLAGS) stamp-h1
++
++stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
++	@rm -f stamp-h1
++	cd $(top_builddir) && $(SHELL) ./config.status config.h
++$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(am__configure_deps) 
++	($(am__cd) $(top_srcdir) && $(AUTOHEADER))
++	rm -f stamp-h1
++	touch $@
++
++distclean-hdr:
++	-rm -f config.h stamp-h1
++
++install-libexecsubLTLIBRARIES: $(libexecsub_LTLIBRARIES)
++	@$(NORMAL_INSTALL)
++	@list='$(libexecsub_LTLIBRARIES)'; test -n "$(libexecsubdir)" || list=; \
++	list2=; for p in $$list; do \
++	  if test -f $$p; then \
++	    list2="$$list2 $$p"; \
++	  else :; fi; \
++	done; \
++	test -z "$$list2" || { \
++	  echo " $(MKDIR_P) '$(DESTDIR)$(libexecsubdir)'"; \
++	  $(MKDIR_P) "$(DESTDIR)$(libexecsubdir)" || exit 1; \
++	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libexecsubdir)'"; \
++	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libexecsubdir)"; \
++	}
++
++uninstall-libexecsubLTLIBRARIES:
++	@$(NORMAL_UNINSTALL)
++	@list='$(libexecsub_LTLIBRARIES)'; test -n "$(libexecsubdir)" || list=; \
++	for p in $$list; do \
++	  $(am__strip_dir) \
++	  echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libexecsubdir)/$$f'"; \
++	  $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libexecsubdir)/$$f"; \
++	done
++
++clean-libexecsubLTLIBRARIES:
++	-test -z "$(libexecsub_LTLIBRARIES)" || rm -f $(libexecsub_LTLIBRARIES)
++	@list='$(libexecsub_LTLIBRARIES)'; \
++	locs=`for p in $$list; do echo $$p; done | \
++	      sed 's|^[^/]*$$|.|; s|/[^/]*$$||; s|$$|/so_locations|' | \
++	      sort -u`; \
++	test -z "$$locs" || { \
++	  echo rm -f $${locs}; \
++	  rm -f $${locs}; \
++	}
++
++libbolt_plugin.la: $(libbolt_plugin_la_OBJECTS) $(libbolt_plugin_la_DEPENDENCIES) $(EXTRA_libbolt_plugin_la_DEPENDENCIES) 
++	$(AM_V_GEN)$(libbolt_plugin_la_LINK) -rpath $(libexecsubdir) $(libbolt_plugin_la_OBJECTS) $(libbolt_plugin_la_LIBADD) $(LIBS)
++
++mostlyclean-compile:
++	-rm -f *.$(OBJEXT)
++
++distclean-compile:
++	-rm -f *.tab.c
++
++.cc.o:
++	$(AM_V_CXX)$(CXXCOMPILE) -c -o $@ $<
++
++.cc.obj:
++	$(AM_V_CXX)$(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'`
++
++.cc.lo:
++	$(AM_V_CXX)$(LTCXXCOMPILE) -c -o $@ $<
++
++mostlyclean-libtool:
++	-rm -f *.lo
++
++clean-libtool:
++	-rm -rf .libs _libs
++
++distclean-libtool:
++	-rm -f libtool config.lt
++
++ID: $(am__tagged_files)
++	$(am__define_uniq_tagged_files); mkid -fID $$unique
++tags: tags-am
++TAGS: tags
++
++tags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
++	set x; \
++	here=`pwd`; \
++	$(am__define_uniq_tagged_files); \
++	shift; \
++	if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \
++	  test -n "$$unique" || unique=$$empty_fix; \
++	  if test $$# -gt 0; then \
++	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
++	      "$$@" $$unique; \
++	  else \
++	    $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
++	      $$unique; \
++	  fi; \
++	fi
++ctags: ctags-am
++
++CTAGS: ctags
++ctags-am: $(TAGS_DEPENDENCIES) $(am__tagged_files)
++	$(am__define_uniq_tagged_files); \
++	test -z "$(CTAGS_ARGS)$$unique" \
++	  || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
++	     $$unique
++
++GTAGS:
++	here=`$(am__cd) $(top_builddir) && pwd` \
++	  && $(am__cd) $(top_srcdir) \
++	  && gtags -i $(GTAGS_ARGS) "$$here"
++cscope: cscope.files
++	test ! -s cscope.files \
++	  || $(CSCOPE) -b -q $(AM_CSCOPEFLAGS) $(CSCOPEFLAGS) -i cscope.files $(CSCOPE_ARGS)
++clean-cscope:
++	-rm -f cscope.files
++cscope.files: clean-cscope cscopelist
++cscopelist: cscopelist-am
++
++cscopelist-am: $(am__tagged_files)
++	list='$(am__tagged_files)'; \
++	case "$(srcdir)" in \
++	  [\\/]* | ?:[\\/]*) sdir="$(srcdir)" ;; \
++	  *) sdir=$(subdir)/$(srcdir) ;; \
++	esac; \
++	for i in $$list; do \
++	  if test -f "$$i"; then \
++	    echo "$(subdir)/$$i"; \
++	  else \
++	    echo "$$sdir/$$i"; \
++	  fi; \
++	done >> $(top_builddir)/cscope.files
++
++distclean-tags:
++	-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
++	-rm -f cscope.out cscope.in.out cscope.po.out cscope.files
++check-am: all-am
++check: check-am
++all-am: Makefile $(LTLIBRARIES) config.h
++installdirs:
++	for dir in "$(DESTDIR)$(libexecsubdir)"; do \
++	  test -z "$$dir" || $(MKDIR_P) "$$dir"; \
++	done
++install: install-am
++install-exec: install-exec-am
++install-data: install-data-am
++uninstall: uninstall-am
++
++install-am: all-am
++	@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
++
++installcheck: installcheck-am
++install-strip:
++	if test -z '$(STRIP)'; then \
++	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
++	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
++	      install; \
++	else \
++	  $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
++	    install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
++	    "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \
++	fi
++mostlyclean-generic:
++
++clean-generic:
++
++distclean-generic:
++	-test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES)
++	-test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES)
++
++maintainer-clean-generic:
++	@echo "This command is intended for maintainers to use"
++	@echo "it deletes files that may require special tools to rebuild."
++clean: clean-am
++
++clean-am: clean-generic clean-libexecsubLTLIBRARIES clean-libtool \
++	mostlyclean-am
++
++distclean: distclean-am
++	-rm -f $(am__CONFIG_DISTCLEAN_FILES)
++	-rm -f Makefile
++distclean-am: clean-am distclean-compile distclean-generic \
++	distclean-hdr distclean-libtool distclean-tags
++
++dvi: dvi-am
++
++dvi-am:
++
++html: html-am
++
++html-am:
++
++info: info-am
++
++info-am:
++
++install-data-am:
++
++install-dvi: install-dvi-am
++
++install-dvi-am:
++
++install-exec-am: install-libexecsubLTLIBRARIES
++
++install-html: install-html-am
++
++install-html-am:
++
++install-info: install-info-am
++
++install-info-am:
++
++install-man:
++
++install-pdf: install-pdf-am
++
++install-pdf-am:
++
++install-ps: install-ps-am
++
++install-ps-am:
++
++installcheck-am:
++
++maintainer-clean: maintainer-clean-am
++	-rm -f $(am__CONFIG_DISTCLEAN_FILES)
++	-rm -rf $(top_srcdir)/autom4te.cache
++	-rm -f Makefile
++maintainer-clean-am: distclean-am maintainer-clean-generic
++
++mostlyclean: mostlyclean-am
++
++mostlyclean-am: mostlyclean-compile mostlyclean-generic \
++	mostlyclean-libtool
++
++pdf: pdf-am
++
++pdf-am:
++
++ps: ps-am
++
++ps-am:
++
++uninstall-am: uninstall-libexecsubLTLIBRARIES
++
++.MAKE: all install-am install-strip
++
++.PHONY: CTAGS GTAGS TAGS all all-am am--refresh check check-am clean \
++	clean-cscope clean-generic clean-libexecsubLTLIBRARIES \
++	clean-libtool cscope cscopelist-am ctags ctags-am distclean \
++	distclean-compile distclean-generic distclean-hdr \
++	distclean-libtool distclean-tags dvi dvi-am html html-am info \
++	info-am install install-am install-data install-data-am \
++	install-dvi install-dvi-am install-exec install-exec-am \
++	install-html install-html-am install-info install-info-am \
++	install-libexecsubLTLIBRARIES install-man install-pdf \
++	install-pdf-am install-ps install-ps-am install-strip \
++	installcheck installcheck-am installdirs maintainer-clean \
++	maintainer-clean-generic mostlyclean mostlyclean-compile \
++	mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
++	tags tags-am uninstall uninstall-am \
++	uninstall-libexecsubLTLIBRARIES
++
++.PRECIOUS: Makefile
++
++override CXXFLAGS := $(filter-out -fsanitize=address,$(CXXFLAGS))
++override LDFLAGS := $(filter-out -fsanitize=address,$(LDFLAGS))
++
++# Tell versions [3.59,3.63) of GNU make to not export all variables.
++# Otherwise a system limit (for SysV at least) may be exceeded.
++.NOEXPORT:
+diff --git a/bolt-plugin/aclocal.m4 b/bolt-plugin/aclocal.m4
+new file mode 100644
+index 000000000..679f2baa4
+--- /dev/null
++++ b/bolt-plugin/aclocal.m4
+@@ -0,0 +1,10250 @@
++# generated automatically by aclocal 1.16.5 -*- Autoconf -*-
++
++# Copyright (C) 1996-2021 Free Software Foundation, Inc.
++
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
++# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
++# PARTICULAR PURPOSE.
++
++m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])])
++m4_ifndef([AC_AUTOCONF_VERSION],
++  [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
++m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.71],,
++[m4_warning([this file was generated for autoconf 2.71.
++You have another version of autoconf.  It may work, but is not guaranteed to.
++If you have problems, you may need to regenerate the build system entirely.
++To do so, use the procedure documented by the package, typically 'autoreconf'.])])
++
++# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*-
++#
++#   Copyright (C) 1996-2001, 2003-2019, 2021-2022 Free Software
++#   Foundation, Inc.
++#   Written by Gordon Matzigkeit, 1996
++#
++# This file is free software; the Free Software Foundation gives
++# unlimited permission to copy and/or distribute it, with or without
++# modifications, as long as this notice is preserved.
++
++m4_define([_LT_COPYING], [dnl
++# Copyright (C) 2014 Free Software Foundation, Inc.
++# This is free software; see the source for copying conditions.  There is NO
++# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
++
++# GNU Libtool is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of of the License, or
++# (at your option) any later version.
++#
++# As a special exception to the GNU General Public License, if you
++# distribute this file as part of a program or library that is built
++# using GNU Libtool, you may include this file under the  same
++# distribution terms that you use for the rest of that program.
++#
++# GNU Libtool is distributed in the hope that it will be useful, but
++# WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see .
++])
++
++# serial 59 LT_INIT
++
++
++# LT_PREREQ(VERSION)
++# ------------------
++# Complain and exit if this libtool version is less that VERSION.
++m4_defun([LT_PREREQ],
++[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1,
++       [m4_default([$3],
++		   [m4_fatal([Libtool version $1 or higher is required],
++		             63)])],
++       [$2])])
++
++
++# _LT_CHECK_BUILDDIR
++# ------------------
++# Complain if the absolute build directory name contains unusual characters
++m4_defun([_LT_CHECK_BUILDDIR],
++[case `pwd` in
++  *\ * | *\	*)
++    AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;;
++esac
++])
++
++
++# LT_INIT([OPTIONS])
++# ------------------
++AC_DEFUN([LT_INIT],
++[AC_PREREQ([2.62])dnl We use AC_PATH_PROGS_FEATURE_CHECK
++AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
++AC_BEFORE([$0], [LT_LANG])dnl
++AC_BEFORE([$0], [LT_OUTPUT])dnl
++AC_BEFORE([$0], [LTDL_INIT])dnl
++m4_require([_LT_CHECK_BUILDDIR])dnl
++
++dnl Autoconf doesn't catch unexpanded LT_ macros by default:
++m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl
++m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl
++dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4
++dnl unless we require an AC_DEFUNed macro:
++AC_REQUIRE([LTOPTIONS_VERSION])dnl
++AC_REQUIRE([LTSUGAR_VERSION])dnl
++AC_REQUIRE([LTVERSION_VERSION])dnl
++AC_REQUIRE([LTOBSOLETE_VERSION])dnl
++m4_require([_LT_PROG_LTMAIN])dnl
++
++_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}])
++
++dnl Parse OPTIONS
++_LT_SET_OPTIONS([$0], [$1])
++
++# This can be used to rebuild libtool when needed
++LIBTOOL_DEPS=$ltmain
++
++# Always use our own libtool.
++LIBTOOL='$(SHELL) $(top_builddir)/libtool'
++AC_SUBST(LIBTOOL)dnl
++
++_LT_SETUP
++
++# Only expand once:
++m4_define([LT_INIT])
++])# LT_INIT
++
++# Old names:
++AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT])
++AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_PROG_LIBTOOL], [])
++dnl AC_DEFUN([AM_PROG_LIBTOOL], [])
++
++
++# _LT_PREPARE_CC_BASENAME
++# -----------------------
++m4_defun([_LT_PREPARE_CC_BASENAME], [
++# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
++func_cc_basename ()
++{
++    for cc_temp in @S|@*""; do
++      case $cc_temp in
++        compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;;
++        distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;;
++        \-*) ;;
++        *) break;;
++      esac
++    done
++    func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
++}
++])# _LT_PREPARE_CC_BASENAME
++
++
++# _LT_CC_BASENAME(CC)
++# -------------------
++# It would be clearer to call AC_REQUIREs from _LT_PREPARE_CC_BASENAME,
++# but that macro is also expanded into generated libtool script, which
++# arranges for $SED and $ECHO to be set by different means.
++m4_defun([_LT_CC_BASENAME],
++[m4_require([_LT_PREPARE_CC_BASENAME])dnl
++AC_REQUIRE([_LT_DECL_SED])dnl
++AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl
++func_cc_basename $1
++cc_basename=$func_cc_basename_result
++])
++
++
++# _LT_FILEUTILS_DEFAULTS
++# ----------------------
++# It is okay to use these file commands and assume they have been set
++# sensibly after 'm4_require([_LT_FILEUTILS_DEFAULTS])'.
++m4_defun([_LT_FILEUTILS_DEFAULTS],
++[: ${CP="cp -f"}
++: ${MV="mv -f"}
++: ${RM="rm -f"}
++])# _LT_FILEUTILS_DEFAULTS
++
++
++# _LT_SETUP
++# ---------
++m4_defun([_LT_SETUP],
++[AC_REQUIRE([AC_CANONICAL_HOST])dnl
++AC_REQUIRE([AC_CANONICAL_BUILD])dnl
++AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl
++AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl
++
++_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl
++dnl
++_LT_DECL([], [host_alias], [0], [The host system])dnl
++_LT_DECL([], [host], [0])dnl
++_LT_DECL([], [host_os], [0])dnl
++dnl
++_LT_DECL([], [build_alias], [0], [The build system])dnl
++_LT_DECL([], [build], [0])dnl
++_LT_DECL([], [build_os], [0])dnl
++dnl
++AC_REQUIRE([AC_PROG_CC])dnl
++AC_REQUIRE([LT_PATH_LD])dnl
++AC_REQUIRE([LT_PATH_NM])dnl
++dnl
++AC_REQUIRE([AC_PROG_LN_S])dnl
++test -z "$LN_S" && LN_S="ln -s"
++_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl
++dnl
++AC_REQUIRE([LT_CMD_MAX_LEN])dnl
++_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl
++_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl
++dnl
++m4_require([_LT_FILEUTILS_DEFAULTS])dnl
++m4_require([_LT_CHECK_SHELL_FEATURES])dnl
++m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl
++m4_require([_LT_CMD_RELOAD])dnl
++m4_require([_LT_DECL_FILECMD])dnl
++m4_require([_LT_CHECK_MAGIC_METHOD])dnl
++m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl
++m4_require([_LT_CMD_OLD_ARCHIVE])dnl
++m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
++m4_require([_LT_WITH_SYSROOT])dnl
++m4_require([_LT_CMD_TRUNCATE])dnl
++
++_LT_CONFIG_LIBTOOL_INIT([
++# See if we are running on zsh, and set the options that allow our
++# commands through without removal of \ escapes INIT.
++if test -n "\${ZSH_VERSION+set}"; then
++   setopt NO_GLOB_SUBST
++fi
++])
++if test -n "${ZSH_VERSION+set}"; then
++   setopt NO_GLOB_SUBST
++fi
++
++_LT_CHECK_OBJDIR
++
++m4_require([_LT_TAG_COMPILER])dnl
++
++case $host_os in
++aix3*)
++  # AIX sometimes has problems with the GCC collect2 program.  For some
++  # reason, if we set the COLLECT_NAMES environment variable, the problems
++  # vanish in a puff of smoke.
++  if test set != "${COLLECT_NAMES+set}"; then
++    COLLECT_NAMES=
++    export COLLECT_NAMES
++  fi
++  ;;
++esac
++
++# Global variables:
++ofile=libtool
++can_build_shared=yes
++
++# All known linkers require a '.a' archive for static linking (except MSVC and
++# ICC, which need '.lib').
++libext=a
++
++with_gnu_ld=$lt_cv_prog_gnu_ld
++
++old_CC=$CC
++old_CFLAGS=$CFLAGS
++
++# Set sane defaults for various variables
++test -z "$CC" && CC=cc
++test -z "$LTCC" && LTCC=$CC
++test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
++test -z "$LD" && LD=ld
++test -z "$ac_objext" && ac_objext=o
++
++_LT_CC_BASENAME([$compiler])
++
++# Only perform the check for file, if the check method requires it
++test -z "$MAGIC_CMD" && MAGIC_CMD=file
++case $deplibs_check_method in
++file_magic*)
++  if test "$file_magic_cmd" = '$MAGIC_CMD'; then
++    _LT_PATH_MAGIC
++  fi
++  ;;
++esac
++
++# Use C for the default configuration in the libtool script
++LT_SUPPORTED_TAG([CC])
++_LT_LANG_C_CONFIG
++_LT_LANG_DEFAULT_CONFIG
++_LT_CONFIG_COMMANDS
++])# _LT_SETUP
++
++
++# _LT_PREPARE_SED_QUOTE_VARS
++# --------------------------
++# Define a few sed substitution that help us do robust quoting.
++m4_defun([_LT_PREPARE_SED_QUOTE_VARS],
++[# Backslashify metacharacters that are still active within
++# double-quoted strings.
++sed_quote_subst='s/\([["`$\\]]\)/\\\1/g'
++
++# Same as above, but do not quote variable references.
++double_quote_subst='s/\([["`\\]]\)/\\\1/g'
++
++# Sed substitution to delay expansion of an escaped shell variable in a
++# double_quote_subst'ed string.
++delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
++
++# Sed substitution to delay expansion of an escaped single quote.
++delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
++
++# Sed substitution to avoid accidental globbing in evaled expressions
++no_glob_subst='s/\*/\\\*/g'
++])
++
++# _LT_PROG_LTMAIN
++# ---------------
++# Note that this code is called both from 'configure', and 'config.status'
++# now that we use AC_CONFIG_COMMANDS to generate libtool.  Notably,
++# 'config.status' has no value for ac_aux_dir unless we are using Automake,
++# so we pass a copy along to make sure it has a sensible value anyway.
++m4_defun([_LT_PROG_LTMAIN],
++[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl
++_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir'])
++ltmain=$ac_aux_dir/ltmain.sh
++])# _LT_PROG_LTMAIN
++
++
++
++# So that we can recreate a full libtool script including additional
++# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS
++# in macros and then make a single call at the end using the 'libtool'
++# label.
++
++
++# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS])
++# ----------------------------------------
++# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later.
++m4_define([_LT_CONFIG_LIBTOOL_INIT],
++[m4_ifval([$1],
++          [m4_append([_LT_OUTPUT_LIBTOOL_INIT],
++                     [$1
++])])])
++
++# Initialize.
++m4_define([_LT_OUTPUT_LIBTOOL_INIT])
++
++
++# _LT_CONFIG_LIBTOOL([COMMANDS])
++# ------------------------------
++# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later.
++m4_define([_LT_CONFIG_LIBTOOL],
++[m4_ifval([$1],
++          [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS],
++                     [$1
++])])])
++
++# Initialize.
++m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS])
++
++
++# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS])
++# -----------------------------------------------------
++m4_defun([_LT_CONFIG_SAVE_COMMANDS],
++[_LT_CONFIG_LIBTOOL([$1])
++_LT_CONFIG_LIBTOOL_INIT([$2])
++])
++
++
++# _LT_FORMAT_COMMENT([COMMENT])
++# -----------------------------
++# Add leading comment marks to the start of each line, and a trailing
++# full-stop to the whole comment if one is not present already.
++m4_define([_LT_FORMAT_COMMENT],
++[m4_ifval([$1], [
++m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])],
++              [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.])
++)])
++
++
++
++
++
++# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?])
++# -------------------------------------------------------------------
++# CONFIGNAME is the name given to the value in the libtool script.
++# VARNAME is the (base) name used in the configure script.
++# VALUE may be 0, 1 or 2 for a computed quote escaped value based on
++# VARNAME.  Any other value will be used directly.
++m4_define([_LT_DECL],
++[lt_if_append_uniq([lt_decl_varnames], [$2], [, ],
++    [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name],
++	[m4_ifval([$1], [$1], [$2])])
++    lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3])
++    m4_ifval([$4],
++	[lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])])
++    lt_dict_add_subkey([lt_decl_dict], [$2],
++	[tagged?], [m4_ifval([$5], [yes], [no])])])
++])
++
++
++# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION])
++# --------------------------------------------------------
++m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])])
++
++
++# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...])
++# ------------------------------------------------
++m4_define([lt_decl_tag_varnames],
++[_lt_decl_filter([tagged?], [yes], $@)])
++
++
++# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..])
++# ---------------------------------------------------------
++m4_define([_lt_decl_filter],
++[m4_case([$#],
++  [0], [m4_fatal([$0: too few arguments: $#])],
++  [1], [m4_fatal([$0: too few arguments: $#: $1])],
++  [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)],
++  [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)],
++  [lt_dict_filter([lt_decl_dict], $@)])[]dnl
++])
++
++
++# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...])
++# --------------------------------------------------
++m4_define([lt_decl_quote_varnames],
++[_lt_decl_filter([value], [1], $@)])
++
++
++# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...])
++# ---------------------------------------------------
++m4_define([lt_decl_dquote_varnames],
++[_lt_decl_filter([value], [2], $@)])
++
++
++# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...])
++# ---------------------------------------------------
++m4_define([lt_decl_varnames_tagged],
++[m4_assert([$# <= 2])dnl
++_$0(m4_quote(m4_default([$1], [[, ]])),
++    m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]),
++    m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))])
++m4_define([_lt_decl_varnames_tagged],
++[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])])
++
++
++# lt_decl_all_varnames([SEPARATOR], [VARNAME1...])
++# ------------------------------------------------
++m4_define([lt_decl_all_varnames],
++[_$0(m4_quote(m4_default([$1], [[, ]])),
++     m4_if([$2], [],
++	   m4_quote(lt_decl_varnames),
++	m4_quote(m4_shift($@))))[]dnl
++])
++m4_define([_lt_decl_all_varnames],
++[lt_join($@, lt_decl_varnames_tagged([$1],
++			lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl
++])
++
++
++# _LT_CONFIG_STATUS_DECLARE([VARNAME])
++# ------------------------------------
++# Quote a variable value, and forward it to 'config.status' so that its
++# declaration there will have the same value as in 'configure'.  VARNAME
++# must have a single quote delimited value for this to work.
++m4_define([_LT_CONFIG_STATUS_DECLARE],
++[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`'])
++
++
++# _LT_CONFIG_STATUS_DECLARATIONS
++# ------------------------------
++# We delimit libtool config variables with single quotes, so when
++# we write them to config.status, we have to be sure to quote all
++# embedded single quotes properly.  In configure, this macro expands
++# each variable declared with _LT_DECL (and _LT_TAGDECL) into:
++#
++#    ='`$ECHO "$" | $SED "$delay_single_quote_subst"`'
++m4_defun([_LT_CONFIG_STATUS_DECLARATIONS],
++[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames),
++    [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])])
++
++
++# _LT_LIBTOOL_TAGS
++# ----------------
++# Output comment and list of tags supported by the script
++m4_defun([_LT_LIBTOOL_TAGS],
++[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl
++available_tags='_LT_TAGS'dnl
++])
++
++
++# _LT_LIBTOOL_DECLARE(VARNAME, [TAG])
++# -----------------------------------
++# Extract the dictionary values for VARNAME (optionally with TAG) and
++# expand to a commented shell variable setting:
++#
++#    # Some comment about what VAR is for.
++#    visible_name=$lt_internal_name
++m4_define([_LT_LIBTOOL_DECLARE],
++[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1],
++					   [description])))[]dnl
++m4_pushdef([_libtool_name],
++    m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl
++m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])),
++    [0], [_libtool_name=[$]$1],
++    [1], [_libtool_name=$lt_[]$1],
++    [2], [_libtool_name=$lt_[]$1],
++    [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl
++m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl
++])
++
++
++# _LT_LIBTOOL_CONFIG_VARS
++# -----------------------
++# Produce commented declarations of non-tagged libtool config variables
++# suitable for insertion in the LIBTOOL CONFIG section of the 'libtool'
++# script.  Tagged libtool config variables (even for the LIBTOOL CONFIG
++# section) are produced by _LT_LIBTOOL_TAG_VARS.
++m4_defun([_LT_LIBTOOL_CONFIG_VARS],
++[m4_foreach([_lt_var],
++    m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)),
++    [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])])
++
++
++# _LT_LIBTOOL_TAG_VARS(TAG)
++# -------------------------
++m4_define([_LT_LIBTOOL_TAG_VARS],
++[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames),
++    [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])])
++
++
++# _LT_TAGVAR(VARNAME, [TAGNAME])
++# ------------------------------
++m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])])
++
++
++# _LT_CONFIG_COMMANDS
++# -------------------
++# Send accumulated output to $CONFIG_STATUS.  Thanks to the lists of
++# variables for single and double quote escaping we saved from calls
++# to _LT_DECL, we can put quote escaped variables declarations
++# into 'config.status', and then the shell code to quote escape them in
++# for loops in 'config.status'.  Finally, any additional code accumulated
++# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded.
++m4_defun([_LT_CONFIG_COMMANDS],
++[AC_PROVIDE_IFELSE([LT_OUTPUT],
++	dnl If the libtool generation code has been placed in $CONFIG_LT,
++	dnl instead of duplicating it all over again into config.status,
++	dnl then we will have config.status run $CONFIG_LT later, so it
++	dnl needs to know what name is stored there:
++        [AC_CONFIG_COMMANDS([libtool],
++            [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])],
++    dnl If the libtool generation code is destined for config.status,
++    dnl expand the accumulated commands and init code now:
++    [AC_CONFIG_COMMANDS([libtool],
++        [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])])
++])#_LT_CONFIG_COMMANDS
++
++
++# Initialize.
++m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT],
++[
++
++# The HP-UX ksh and POSIX shell print the target directory to stdout
++# if CDPATH is set.
++(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
++
++sed_quote_subst='$sed_quote_subst'
++double_quote_subst='$double_quote_subst'
++delay_variable_subst='$delay_variable_subst'
++_LT_CONFIG_STATUS_DECLARATIONS
++LTCC='$LTCC'
++LTCFLAGS='$LTCFLAGS'
++compiler='$compiler_DEFAULT'
++
++# A function that is used when there is no print builtin or printf.
++func_fallback_echo ()
++{
++  eval 'cat <<_LTECHO_EOF
++\$[]1
++_LTECHO_EOF'
++}
++
++# Quote evaled strings.
++for var in lt_decl_all_varnames([[ \
++]], lt_decl_quote_varnames); do
++    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
++    *[[\\\\\\\`\\"\\\$]]*)
++      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
++      ;;
++    *)
++      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
++      ;;
++    esac
++done
++
++# Double-quote double-evaled strings.
++for var in lt_decl_all_varnames([[ \
++]], lt_decl_dquote_varnames); do
++    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
++    *[[\\\\\\\`\\"\\\$]]*)
++      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
++      ;;
++    *)
++      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
++      ;;
++    esac
++done
++
++_LT_OUTPUT_LIBTOOL_INIT
++])
++
++# _LT_GENERATED_FILE_INIT(FILE, [COMMENT])
++# ------------------------------------
++# Generate a child script FILE with all initialization necessary to
++# reuse the environment learned by the parent script, and make the
++# file executable.  If COMMENT is supplied, it is inserted after the
++# '#!' sequence but before initialization text begins.  After this
++# macro, additional text can be appended to FILE to form the body of
++# the child script.  The macro ends with non-zero status if the
++# file could not be fully written (such as if the disk is full).
++m4_ifdef([AS_INIT_GENERATED],
++[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])],
++[m4_defun([_LT_GENERATED_FILE_INIT],
++[m4_require([AS_PREPARE])]dnl
++[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl
++[lt_write_fail=0
++cat >$1 <<_ASEOF || lt_write_fail=1
++#! $SHELL
++# Generated by $as_me.
++$2
++SHELL=\${CONFIG_SHELL-$SHELL}
++export SHELL
++_ASEOF
++cat >>$1 <<\_ASEOF || lt_write_fail=1
++AS_SHELL_SANITIZE
++_AS_PREPARE
++exec AS_MESSAGE_FD>&1
++_ASEOF
++test 0 = "$lt_write_fail" && chmod +x $1[]dnl
++m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT
++
++# LT_OUTPUT
++# ---------
++# This macro allows early generation of the libtool script (before
++# AC_OUTPUT is called), incase it is used in configure for compilation
++# tests.
++AC_DEFUN([LT_OUTPUT],
++[: ${CONFIG_LT=./config.lt}
++AC_MSG_NOTICE([creating $CONFIG_LT])
++_LT_GENERATED_FILE_INIT(["$CONFIG_LT"],
++[# Run this file to recreate a libtool stub with the current configuration.])
++
++cat >>"$CONFIG_LT" <<\_LTEOF
++lt_cl_silent=false
++exec AS_MESSAGE_LOG_FD>>config.log
++{
++  echo
++  AS_BOX([Running $as_me.])
++} >&AS_MESSAGE_LOG_FD
++
++lt_cl_help="\
++'$as_me' creates a local libtool stub from the current configuration,
++for use in further configure time tests before the real libtool is
++generated.
++
++Usage: $[0] [[OPTIONS]]
++
++  -h, --help      print this help, then exit
++  -V, --version   print version number, then exit
++  -q, --quiet     do not print progress messages
++  -d, --debug     don't remove temporary files
++
++Report bugs to ."
++
++lt_cl_version="\
++m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl
++m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION])
++configured by $[0], generated by m4_PACKAGE_STRING.
++
++Copyright (C) 2011 Free Software Foundation, Inc.
++This config.lt script is free software; the Free Software Foundation
++gives unlimited permision to copy, distribute and modify it."
++
++while test 0 != $[#]
++do
++  case $[1] in
++    --version | --v* | -V )
++      echo "$lt_cl_version"; exit 0 ;;
++    --help | --h* | -h )
++      echo "$lt_cl_help"; exit 0 ;;
++    --debug | --d* | -d )
++      debug=: ;;
++    --quiet | --q* | --silent | --s* | -q )
++      lt_cl_silent=: ;;
++
++    -*) AC_MSG_ERROR([unrecognized option: $[1]
++Try '$[0] --help' for more information.]) ;;
++
++    *) AC_MSG_ERROR([unrecognized argument: $[1]
++Try '$[0] --help' for more information.]) ;;
++  esac
++  shift
++done
++
++if $lt_cl_silent; then
++  exec AS_MESSAGE_FD>/dev/null
++fi
++_LTEOF
++
++cat >>"$CONFIG_LT" <<_LTEOF
++_LT_OUTPUT_LIBTOOL_COMMANDS_INIT
++_LTEOF
++
++cat >>"$CONFIG_LT" <<\_LTEOF
++AC_MSG_NOTICE([creating $ofile])
++_LT_OUTPUT_LIBTOOL_COMMANDS
++AS_EXIT(0)
++_LTEOF
++chmod +x "$CONFIG_LT"
++
++# configure is writing to config.log, but config.lt does its own redirection,
++# appending to config.log, which fails on DOS, as config.log is still kept
++# open by configure.  Here we exec the FD to /dev/null, effectively closing
++# config.log, so it can be properly (re)opened and appended to by config.lt.
++lt_cl_success=:
++test yes = "$silent" &&
++  lt_config_lt_args="$lt_config_lt_args --quiet"
++exec AS_MESSAGE_LOG_FD>/dev/null
++$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false
++exec AS_MESSAGE_LOG_FD>>config.log
++$lt_cl_success || AS_EXIT(1)
++])# LT_OUTPUT
++
++
++# _LT_CONFIG(TAG)
++# ---------------
++# If TAG is the built-in tag, create an initial libtool script with a
++# default configuration from the untagged config vars.  Otherwise add code
++# to config.status for appending the configuration named by TAG from the
++# matching tagged config vars.
++m4_defun([_LT_CONFIG],
++[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
++_LT_CONFIG_SAVE_COMMANDS([
++  m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl
++  m4_if(_LT_TAG, [C], [
++    # See if we are running on zsh, and set the options that allow our
++    # commands through without removal of \ escapes.
++    if test -n "${ZSH_VERSION+set}"; then
++      setopt NO_GLOB_SUBST
++    fi
++
++    cfgfile=${ofile}T
++    trap "$RM \"$cfgfile\"; exit 1" 1 2 15
++    $RM "$cfgfile"
++
++    cat <<_LT_EOF >> "$cfgfile"
++#! $SHELL
++# Generated automatically by $as_me ($PACKAGE) $VERSION
++# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
++# NOTE: Changes made to this file will be lost: look at ltmain.sh.
++
++# Provide generalized library-building support services.
++# Written by Gordon Matzigkeit, 1996
++
++_LT_COPYING
++_LT_LIBTOOL_TAGS
++
++# Configured defaults for sys_lib_dlsearch_path munging.
++: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"}
++
++# ### BEGIN LIBTOOL CONFIG
++_LT_LIBTOOL_CONFIG_VARS
++_LT_LIBTOOL_TAG_VARS
++# ### END LIBTOOL CONFIG
++
++_LT_EOF
++
++    cat <<'_LT_EOF' >> "$cfgfile"
++
++# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE
++
++_LT_PREPARE_MUNGE_PATH_LIST
++_LT_PREPARE_CC_BASENAME
++
++# ### END FUNCTIONS SHARED WITH CONFIGURE
++
++_LT_EOF
++
++  case $host_os in
++  aix3*)
++    cat <<\_LT_EOF >> "$cfgfile"
++# AIX sometimes has problems with the GCC collect2 program.  For some
++# reason, if we set the COLLECT_NAMES environment variable, the problems
++# vanish in a puff of smoke.
++if test set != "${COLLECT_NAMES+set}"; then
++  COLLECT_NAMES=
++  export COLLECT_NAMES
++fi
++_LT_EOF
++    ;;
++  esac
++
++  _LT_PROG_LTMAIN
++
++  # We use sed instead of cat because bash on DJGPP gets confused if
++  # if finds mixed CR/LF and LF-only lines.  Since sed operates in
++  # text mode, it properly converts lines to CR/LF.  This bash problem
++  # is reportedly fixed, but why not run on old versions too?
++  $SED '$q' "$ltmain" >> "$cfgfile" \
++     || (rm -f "$cfgfile"; exit 1)
++
++   mv -f "$cfgfile" "$ofile" ||
++    (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
++  chmod +x "$ofile"
++],
++[cat <<_LT_EOF >> "$ofile"
++
++dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded
++dnl in a comment (ie after a #).
++# ### BEGIN LIBTOOL TAG CONFIG: $1
++_LT_LIBTOOL_TAG_VARS(_LT_TAG)
++# ### END LIBTOOL TAG CONFIG: $1
++_LT_EOF
++])dnl /m4_if
++],
++[m4_if([$1], [], [
++    PACKAGE='$PACKAGE'
++    VERSION='$VERSION'
++    RM='$RM'
++    ofile='$ofile'], [])
++])dnl /_LT_CONFIG_SAVE_COMMANDS
++])# _LT_CONFIG
++
++
++# LT_SUPPORTED_TAG(TAG)
++# ---------------------
++# Trace this macro to discover what tags are supported by the libtool
++# --tag option, using:
++#    autoconf --trace 'LT_SUPPORTED_TAG:$1'
++AC_DEFUN([LT_SUPPORTED_TAG], [])
++
++
++# C support is built-in for now
++m4_define([_LT_LANG_C_enabled], [])
++m4_define([_LT_TAGS], [])
++
++
++# LT_LANG(LANG)
++# -------------
++# Enable libtool support for the given language if not already enabled.
++AC_DEFUN([LT_LANG],
++[AC_BEFORE([$0], [LT_OUTPUT])dnl
++m4_case([$1],
++  [C],			[_LT_LANG(C)],
++  [C++],		[_LT_LANG(CXX)],
++  [Go],			[_LT_LANG(GO)],
++  [Java],		[_LT_LANG(GCJ)],
++  [Fortran 77],		[_LT_LANG(F77)],
++  [Fortran],		[_LT_LANG(FC)],
++  [Windows Resource],	[_LT_LANG(RC)],
++  [m4_ifdef([_LT_LANG_]$1[_CONFIG],
++    [_LT_LANG($1)],
++    [m4_fatal([$0: unsupported language: "$1"])])])dnl
++])# LT_LANG
++
++
++# _LT_LANG(LANGNAME)
++# ------------------
++m4_defun([_LT_LANG],
++[m4_ifdef([_LT_LANG_]$1[_enabled], [],
++  [LT_SUPPORTED_TAG([$1])dnl
++  m4_append([_LT_TAGS], [$1 ])dnl
++  m4_define([_LT_LANG_]$1[_enabled], [])dnl
++  _LT_LANG_$1_CONFIG($1)])dnl
++])# _LT_LANG
++
++
++m4_ifndef([AC_PROG_GO], [
++# NOTE: This macro has been submitted for inclusion into   #
++#  GNU Autoconf as AC_PROG_GO.  When it is available in    #
++#  a released version of Autoconf we should remove this    #
++#  macro and use it instead.                               #
++m4_defun([AC_PROG_GO],
++[AC_LANG_PUSH(Go)dnl
++AC_ARG_VAR([GOC],     [Go compiler command])dnl
++AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl
++_AC_ARG_VAR_LDFLAGS()dnl
++AC_CHECK_TOOL(GOC, gccgo)
++if test -z "$GOC"; then
++  if test -n "$ac_tool_prefix"; then
++    AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo])
++  fi
++fi
++if test -z "$GOC"; then
++  AC_CHECK_PROG(GOC, gccgo, gccgo, false)
++fi
++])#m4_defun
++])#m4_ifndef
++
++
++# _LT_LANG_DEFAULT_CONFIG
++# -----------------------
++m4_defun([_LT_LANG_DEFAULT_CONFIG],
++[AC_PROVIDE_IFELSE([AC_PROG_CXX],
++  [LT_LANG(CXX)],
++  [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])])
++
++AC_PROVIDE_IFELSE([AC_PROG_F77],
++  [LT_LANG(F77)],
++  [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])])
++
++AC_PROVIDE_IFELSE([AC_PROG_FC],
++  [LT_LANG(FC)],
++  [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])])
++
++dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal
++dnl pulling things in needlessly.
++AC_PROVIDE_IFELSE([AC_PROG_GCJ],
++  [LT_LANG(GCJ)],
++  [AC_PROVIDE_IFELSE([A][M_PROG_GCJ],
++    [LT_LANG(GCJ)],
++    [AC_PROVIDE_IFELSE([LT_PROG_GCJ],
++      [LT_LANG(GCJ)],
++      [m4_ifdef([AC_PROG_GCJ],
++	[m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])])
++       m4_ifdef([A][M_PROG_GCJ],
++	[m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])])
++       m4_ifdef([LT_PROG_GCJ],
++	[m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])])
++
++AC_PROVIDE_IFELSE([AC_PROG_GO],
++  [LT_LANG(GO)],
++  [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])])
++
++AC_PROVIDE_IFELSE([LT_PROG_RC],
++  [LT_LANG(RC)],
++  [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])])
++])# _LT_LANG_DEFAULT_CONFIG
++
++# Obsolete macros:
++AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)])
++AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)])
++AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)])
++AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)])
++AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_LIBTOOL_CXX], [])
++dnl AC_DEFUN([AC_LIBTOOL_F77], [])
++dnl AC_DEFUN([AC_LIBTOOL_FC], [])
++dnl AC_DEFUN([AC_LIBTOOL_GCJ], [])
++dnl AC_DEFUN([AC_LIBTOOL_RC], [])
++
++
++# _LT_TAG_COMPILER
++# ----------------
++m4_defun([_LT_TAG_COMPILER],
++[AC_REQUIRE([AC_PROG_CC])dnl
++
++_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl
++_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl
++_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl
++_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl
++
++# If no C compiler was specified, use CC.
++LTCC=${LTCC-"$CC"}
++
++# If no C compiler flags were specified, use CFLAGS.
++LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
++
++# Allow CC to be a program name with arguments.
++compiler=$CC
++])# _LT_TAG_COMPILER
++
++
++# _LT_COMPILER_BOILERPLATE
++# ------------------------
++# Check for compiler boilerplate output or warnings with
++# the simple compiler test code.
++m4_defun([_LT_COMPILER_BOILERPLATE],
++[m4_require([_LT_DECL_SED])dnl
++ac_outfile=conftest.$ac_objext
++echo "$lt_simple_compile_test_code" >conftest.$ac_ext
++eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
++_lt_compiler_boilerplate=`cat conftest.err`
++$RM conftest*
++])# _LT_COMPILER_BOILERPLATE
++
++
++# _LT_LINKER_BOILERPLATE
++# ----------------------
++# Check for linker boilerplate output or warnings with
++# the simple link test code.
++m4_defun([_LT_LINKER_BOILERPLATE],
++[m4_require([_LT_DECL_SED])dnl
++ac_outfile=conftest.$ac_objext
++echo "$lt_simple_link_test_code" >conftest.$ac_ext
++eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
++_lt_linker_boilerplate=`cat conftest.err`
++$RM -r conftest*
++])# _LT_LINKER_BOILERPLATE
++
++# _LT_REQUIRED_DARWIN_CHECKS
++# -------------------------
++m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[
++  case $host_os in
++    rhapsody* | darwin*)
++    AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:])
++    AC_CHECK_TOOL([NMEDIT], [nmedit], [:])
++    AC_CHECK_TOOL([LIPO], [lipo], [:])
++    AC_CHECK_TOOL([OTOOL], [otool], [:])
++    AC_CHECK_TOOL([OTOOL64], [otool64], [:])
++    _LT_DECL([], [DSYMUTIL], [1],
++      [Tool to manipulate archived DWARF debug symbol files on Mac OS X])
++    _LT_DECL([], [NMEDIT], [1],
++      [Tool to change global to local symbols on Mac OS X])
++    _LT_DECL([], [LIPO], [1],
++      [Tool to manipulate fat objects and archives on Mac OS X])
++    _LT_DECL([], [OTOOL], [1],
++      [ldd/readelf like tool for Mach-O binaries on Mac OS X])
++    _LT_DECL([], [OTOOL64], [1],
++      [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4])
++
++    AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod],
++      [lt_cv_apple_cc_single_mod=no
++      if test -z "$LT_MULTI_MODULE"; then
++	# By default we will add the -single_module flag. You can override
++	# by either setting the environment variable LT_MULTI_MODULE
++	# non-empty at configure time, or by adding -multi_module to the
++	# link flags.
++	rm -rf libconftest.dylib*
++	echo "int foo(void){return 1;}" > conftest.c
++	echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
++-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD
++	$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
++	  -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
++        _lt_result=$?
++	# If there is a non-empty error log, and "single_module"
++	# appears in it, assume the flag caused a linker warning
++        if test -s conftest.err && $GREP single_module conftest.err; then
++	  cat conftest.err >&AS_MESSAGE_LOG_FD
++	# Otherwise, if the output was created with a 0 exit code from
++	# the compiler, it worked.
++	elif test -f libconftest.dylib && test 0 = "$_lt_result"; then
++	  lt_cv_apple_cc_single_mod=yes
++	else
++	  cat conftest.err >&AS_MESSAGE_LOG_FD
++	fi
++	rm -rf libconftest.dylib*
++	rm -f conftest.*
++      fi])
++
++    AC_CACHE_CHECK([for -exported_symbols_list linker flag],
++      [lt_cv_ld_exported_symbols_list],
++      [lt_cv_ld_exported_symbols_list=no
++      save_LDFLAGS=$LDFLAGS
++      echo "_main" > conftest.sym
++      LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym"
++      AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
++	[lt_cv_ld_exported_symbols_list=yes],
++	[lt_cv_ld_exported_symbols_list=no])
++	LDFLAGS=$save_LDFLAGS
++    ])
++
++    AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load],
++      [lt_cv_ld_force_load=no
++      cat > conftest.c << _LT_EOF
++int forced_loaded() { return 2;}
++_LT_EOF
++      echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD
++      $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD
++      echo "$AR $AR_FLAGS libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD
++      $AR $AR_FLAGS libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD
++      echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD
++      $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD
++      cat > conftest.c << _LT_EOF
++int main() { return 0;}
++_LT_EOF
++      echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD
++      $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
++      _lt_result=$?
++      if test -s conftest.err && $GREP force_load conftest.err; then
++	cat conftest.err >&AS_MESSAGE_LOG_FD
++      elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then
++	lt_cv_ld_force_load=yes
++      else
++	cat conftest.err >&AS_MESSAGE_LOG_FD
++      fi
++        rm -f conftest.err libconftest.a conftest conftest.c
++        rm -rf conftest.dSYM
++    ])
++    case $host_os in
++    rhapsody* | darwin1.[[012]])
++      _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;;
++    darwin1.*)
++      _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
++    darwin*)
++      case $MACOSX_DEPLOYMENT_TARGET,$host in
++        10.[[012]],*|,*powerpc*-darwin[[5-8]]*)
++          _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
++        *)
++          _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;;
++      esac
++    ;;
++  esac
++    if test yes = "$lt_cv_apple_cc_single_mod"; then
++      _lt_dar_single_mod='$single_module'
++    fi
++    if test yes = "$lt_cv_ld_exported_symbols_list"; then
++      _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym'
++    else
++      _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib'
++    fi
++    if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then
++      _lt_dsymutil='~$DSYMUTIL $lib || :'
++    else
++      _lt_dsymutil=
++    fi
++    ;;
++  esac
++])
++
++
++# _LT_DARWIN_LINKER_FEATURES([TAG])
++# ---------------------------------
++# Checks for linker and compiler features on darwin
++m4_defun([_LT_DARWIN_LINKER_FEATURES],
++[
++  m4_require([_LT_REQUIRED_DARWIN_CHECKS])
++  _LT_TAGVAR(archive_cmds_need_lc, $1)=no
++  _LT_TAGVAR(hardcode_direct, $1)=no
++  _LT_TAGVAR(hardcode_automatic, $1)=yes
++  _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
++  if test yes = "$lt_cv_ld_force_load"; then
++    _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
++    m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes],
++                  [FC],  [_LT_TAGVAR(compiler_needs_object, $1)=yes])
++  else
++    _LT_TAGVAR(whole_archive_flag_spec, $1)=''
++  fi
++  _LT_TAGVAR(link_all_deplibs, $1)=yes
++  _LT_TAGVAR(allow_undefined_flag, $1)=$_lt_dar_allow_undefined
++  case $cc_basename in
++     ifort*|nagfor*) _lt_dar_can_shared=yes ;;
++     *) _lt_dar_can_shared=$GCC ;;
++  esac
++  if test yes = "$_lt_dar_can_shared"; then
++    output_verbose_link_cmd=func_echo_all
++    _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil"
++    _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil"
++    _LT_TAGVAR(archive_expsym_cmds, $1)="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil"
++    _LT_TAGVAR(module_expsym_cmds, $1)="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil"
++    m4_if([$1], [CXX],
++[   if test yes != "$lt_cv_apple_cc_single_mod"; then
++      _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil"
++      _LT_TAGVAR(archive_expsym_cmds, $1)="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil"
++    fi
++],[])
++  else
++  _LT_TAGVAR(ld_shlibs, $1)=no
++  fi
++])
++
++# _LT_SYS_MODULE_PATH_AIX([TAGNAME])
++# ----------------------------------
++# Links a minimal program and checks the executable
++# for the system default hardcoded library path. In most cases,
++# this is /usr/lib:/lib, but when the MPI compilers are used
++# the location of the communication and MPI libs are included too.
++# If we don't find anything, use the default library path according
++# to the aix ld manual.
++# Store the results from the different compilers for each TAGNAME.
++# Allow to override them for all tags through lt_cv_aix_libpath.
++m4_defun([_LT_SYS_MODULE_PATH_AIX],
++[m4_require([_LT_DECL_SED])dnl
++if test set = "${lt_cv_aix_libpath+set}"; then
++  aix_libpath=$lt_cv_aix_libpath
++else
++  AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])],
++  [AC_LINK_IFELSE([AC_LANG_PROGRAM],[
++  lt_aix_libpath_sed='[
++      /Import File Strings/,/^$/ {
++	  /^0/ {
++	      s/^0  *\([^ ]*\) *$/\1/
++	      p
++	  }
++      }]'
++  _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++  # Check for a 64-bit object if we didn't find anything.
++  if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
++    _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++  fi],[])
++  if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
++    _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=/usr/lib:/lib
++  fi
++  ])
++  aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])
++fi
++])# _LT_SYS_MODULE_PATH_AIX
++
++
++# _LT_SHELL_INIT(ARG)
++# -------------------
++m4_define([_LT_SHELL_INIT],
++[m4_divert_text([M4SH-INIT], [$1
++])])# _LT_SHELL_INIT
++
++
++
++# _LT_PROG_ECHO_BACKSLASH
++# -----------------------
++# Find how we can fake an echo command that does not interpret backslash.
++# In particular, with Autoconf 2.60 or later we add some code to the start
++# of the generated configure script that will find a shell with a builtin
++# printf (that we can use as an echo command).
++m4_defun([_LT_PROG_ECHO_BACKSLASH],
++[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
++ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
++ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
++
++AC_MSG_CHECKING([how to print strings])
++# Test print first, because it will be a builtin if present.
++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
++   test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
++  ECHO='print -r --'
++elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
++  ECHO='printf %s\n'
++else
++  # Use this function as a fallback that always works.
++  func_fallback_echo ()
++  {
++    eval 'cat <<_LTECHO_EOF
++$[]1
++_LTECHO_EOF'
++  }
++  ECHO='func_fallback_echo'
++fi
++
++# func_echo_all arg...
++# Invoke $ECHO with all args, space-separated.
++func_echo_all ()
++{
++    $ECHO "$*"
++}
++
++case $ECHO in
++  printf*) AC_MSG_RESULT([printf]) ;;
++  print*) AC_MSG_RESULT([print -r]) ;;
++  *) AC_MSG_RESULT([cat]) ;;
++esac
++
++m4_ifdef([_AS_DETECT_SUGGESTED],
++[_AS_DETECT_SUGGESTED([
++  test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || (
++    ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
++    ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
++    ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
++    PATH=/empty FPATH=/empty; export PATH FPATH
++    test "X`printf %s $ECHO`" = "X$ECHO" \
++      || test "X`print -r -- $ECHO`" = "X$ECHO" )])])
++
++_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts])
++_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes])
++])# _LT_PROG_ECHO_BACKSLASH
++
++
++# _LT_WITH_SYSROOT
++# ----------------
++AC_DEFUN([_LT_WITH_SYSROOT],
++[m4_require([_LT_DECL_SED])dnl
++AC_MSG_CHECKING([for sysroot])
++AC_ARG_WITH([sysroot],
++[AS_HELP_STRING([--with-sysroot@<:@=DIR@:>@],
++  [Search for dependent libraries within DIR (or the compiler's sysroot
++   if not specified).])],
++[], [with_sysroot=no])
++
++dnl lt_sysroot will always be passed unquoted.  We quote it here
++dnl in case the user passed a directory name.
++lt_sysroot=
++case $with_sysroot in #(
++ yes)
++   if test yes = "$GCC"; then
++     lt_sysroot=`$CC --print-sysroot 2>/dev/null`
++   fi
++   ;; #(
++ /*)
++   lt_sysroot=`echo "$with_sysroot" | $SED -e "$sed_quote_subst"`
++   ;; #(
++ no|'')
++   ;; #(
++ *)
++   AC_MSG_RESULT([$with_sysroot])
++   AC_MSG_ERROR([The sysroot must be an absolute path.])
++   ;;
++esac
++
++ AC_MSG_RESULT([${lt_sysroot:-no}])
++_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl
++[dependent libraries, and where our libraries should be installed.])])
++
++# _LT_ENABLE_LOCK
++# ---------------
++m4_defun([_LT_ENABLE_LOCK],
++[AC_ARG_ENABLE([libtool-lock],
++  [AS_HELP_STRING([--disable-libtool-lock],
++    [avoid locking (might break parallel builds)])])
++test no = "$enable_libtool_lock" || enable_libtool_lock=yes
++
++# Some flags need to be propagated to the compiler or linker for good
++# libtool support.
++case $host in
++ia64-*-hpux*)
++  # Find out what ABI is being produced by ac_compile, and set mode
++  # options accordingly.
++  echo 'int i;' > conftest.$ac_ext
++  if AC_TRY_EVAL(ac_compile); then
++    case `$FILECMD conftest.$ac_objext` in
++      *ELF-32*)
++	HPUX_IA64_MODE=32
++	;;
++      *ELF-64*)
++	HPUX_IA64_MODE=64
++	;;
++    esac
++  fi
++  rm -rf conftest*
++  ;;
++*-*-irix6*)
++  # Find out what ABI is being produced by ac_compile, and set linker
++  # options accordingly.
++  echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext
++  if AC_TRY_EVAL(ac_compile); then
++    if test yes = "$lt_cv_prog_gnu_ld"; then
++      case `$FILECMD conftest.$ac_objext` in
++	*32-bit*)
++	  LD="${LD-ld} -melf32bsmip"
++	  ;;
++	*N32*)
++	  LD="${LD-ld} -melf32bmipn32"
++	  ;;
++	*64-bit*)
++	  LD="${LD-ld} -melf64bmip"
++	;;
++      esac
++    else
++      case `$FILECMD conftest.$ac_objext` in
++	*32-bit*)
++	  LD="${LD-ld} -32"
++	  ;;
++	*N32*)
++	  LD="${LD-ld} -n32"
++	  ;;
++	*64-bit*)
++	  LD="${LD-ld} -64"
++	  ;;
++      esac
++    fi
++  fi
++  rm -rf conftest*
++  ;;
++
++mips64*-*linux*)
++  # Find out what ABI is being produced by ac_compile, and set linker
++  # options accordingly.
++  echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext
++  if AC_TRY_EVAL(ac_compile); then
++    emul=elf
++    case `$FILECMD conftest.$ac_objext` in
++      *32-bit*)
++	emul="${emul}32"
++	;;
++      *64-bit*)
++	emul="${emul}64"
++	;;
++    esac
++    case `$FILECMD conftest.$ac_objext` in
++      *MSB*)
++	emul="${emul}btsmip"
++	;;
++      *LSB*)
++	emul="${emul}ltsmip"
++	;;
++    esac
++    case `$FILECMD conftest.$ac_objext` in
++      *N32*)
++	emul="${emul}n32"
++	;;
++    esac
++    LD="${LD-ld} -m $emul"
++  fi
++  rm -rf conftest*
++  ;;
++
++x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \
++s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
++  # Find out what ABI is being produced by ac_compile, and set linker
++  # options accordingly.  Note that the listed cases only cover the
++  # situations where additional linker options are needed (such as when
++  # doing 32-bit compilation for a host where ld defaults to 64-bit, or
++  # vice versa); the common cases where no linker options are needed do
++  # not appear in the list.
++  echo 'int i;' > conftest.$ac_ext
++  if AC_TRY_EVAL(ac_compile); then
++    case `$FILECMD conftest.o` in
++      *32-bit*)
++	case $host in
++	  x86_64-*kfreebsd*-gnu)
++	    LD="${LD-ld} -m elf_i386_fbsd"
++	    ;;
++	  x86_64-*linux*)
++	    case `$FILECMD conftest.o` in
++	      *x86-64*)
++		LD="${LD-ld} -m elf32_x86_64"
++		;;
++	      *)
++		LD="${LD-ld} -m elf_i386"
++		;;
++	    esac
++	    ;;
++	  powerpc64le-*linux*)
++	    LD="${LD-ld} -m elf32lppclinux"
++	    ;;
++	  powerpc64-*linux*)
++	    LD="${LD-ld} -m elf32ppclinux"
++	    ;;
++	  s390x-*linux*)
++	    LD="${LD-ld} -m elf_s390"
++	    ;;
++	  sparc64-*linux*)
++	    LD="${LD-ld} -m elf32_sparc"
++	    ;;
++	esac
++	;;
++      *64-bit*)
++	case $host in
++	  x86_64-*kfreebsd*-gnu)
++	    LD="${LD-ld} -m elf_x86_64_fbsd"
++	    ;;
++	  x86_64-*linux*)
++	    LD="${LD-ld} -m elf_x86_64"
++	    ;;
++	  powerpcle-*linux*)
++	    LD="${LD-ld} -m elf64lppc"
++	    ;;
++	  powerpc-*linux*)
++	    LD="${LD-ld} -m elf64ppc"
++	    ;;
++	  s390*-*linux*|s390*-*tpf*)
++	    LD="${LD-ld} -m elf64_s390"
++	    ;;
++	  sparc*-*linux*)
++	    LD="${LD-ld} -m elf64_sparc"
++	    ;;
++	esac
++	;;
++    esac
++  fi
++  rm -rf conftest*
++  ;;
++
++*-*-sco3.2v5*)
++  # On SCO OpenServer 5, we need -belf to get full-featured binaries.
++  SAVE_CFLAGS=$CFLAGS
++  CFLAGS="$CFLAGS -belf"
++  AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf,
++    [AC_LANG_PUSH(C)
++     AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no])
++     AC_LANG_POP])
++  if test yes != "$lt_cv_cc_needs_belf"; then
++    # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
++    CFLAGS=$SAVE_CFLAGS
++  fi
++  ;;
++*-*solaris*)
++  # Find out what ABI is being produced by ac_compile, and set linker
++  # options accordingly.
++  echo 'int i;' > conftest.$ac_ext
++  if AC_TRY_EVAL(ac_compile); then
++    case `$FILECMD conftest.o` in
++    *64-bit*)
++      case $lt_cv_prog_gnu_ld in
++      yes*)
++        case $host in
++        i?86-*-solaris*|x86_64-*-solaris*)
++          LD="${LD-ld} -m elf_x86_64"
++          ;;
++        sparc*-*-solaris*)
++          LD="${LD-ld} -m elf64_sparc"
++          ;;
++        esac
++        # GNU ld 2.21 introduced _sol2 emulations.  Use them if available.
++        if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
++          LD=${LD-ld}_sol2
++        fi
++        ;;
++      *)
++	if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
++	  LD="${LD-ld} -64"
++	fi
++	;;
++      esac
++      ;;
++    esac
++  fi
++  rm -rf conftest*
++  ;;
++esac
++
++need_locks=$enable_libtool_lock
++])# _LT_ENABLE_LOCK
++
++
++# _LT_PROG_AR
++# -----------
++m4_defun([_LT_PROG_AR],
++[AC_CHECK_TOOLS(AR, [ar], false)
++: ${AR=ar}
++_LT_DECL([], [AR], [1], [The archiver])
++
++# Use ARFLAGS variable as AR's operation code to sync the variable naming with
++# Automake.  If both AR_FLAGS and ARFLAGS are specified, AR_FLAGS should have
++# higher priority because thats what people were doing historically (setting
++# ARFLAGS for automake and AR_FLAGS for libtool).  FIXME: Make the AR_FLAGS
++# variable obsoleted/removed.
++
++test ${AR_FLAGS+y} || AR_FLAGS=${ARFLAGS-cr}
++lt_ar_flags=$AR_FLAGS
++_LT_DECL([], [lt_ar_flags], [0], [Flags to create an archive (by configure)])
++
++# Make AR_FLAGS overridable by 'make ARFLAGS='.  Don't try to run-time override
++# by AR_FLAGS because that was never working and AR_FLAGS is about to die.
++_LT_DECL([], [AR_FLAGS], [\@S|@{ARFLAGS-"\@S|@lt_ar_flags"}],
++         [Flags to create an archive])
++
++AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file],
++  [lt_cv_ar_at_file=no
++   AC_COMPILE_IFELSE([AC_LANG_PROGRAM],
++     [echo conftest.$ac_objext > conftest.lst
++      lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD'
++      AC_TRY_EVAL([lt_ar_try])
++      if test 0 -eq "$ac_status"; then
++	# Ensure the archiver fails upon bogus file names.
++	rm -f conftest.$ac_objext libconftest.a
++	AC_TRY_EVAL([lt_ar_try])
++	if test 0 -ne "$ac_status"; then
++          lt_cv_ar_at_file=@
++        fi
++      fi
++      rm -f conftest.* libconftest.a
++     ])
++  ])
++
++if test no = "$lt_cv_ar_at_file"; then
++  archiver_list_spec=
++else
++  archiver_list_spec=$lt_cv_ar_at_file
++fi
++_LT_DECL([], [archiver_list_spec], [1],
++  [How to feed a file listing to the archiver])
++])# _LT_PROG_AR
++
++
++# _LT_CMD_OLD_ARCHIVE
++# -------------------
++m4_defun([_LT_CMD_OLD_ARCHIVE],
++[_LT_PROG_AR
++
++AC_CHECK_TOOL(STRIP, strip, :)
++test -z "$STRIP" && STRIP=:
++_LT_DECL([], [STRIP], [1], [A symbol stripping program])
++
++AC_CHECK_TOOL(RANLIB, ranlib, :)
++test -z "$RANLIB" && RANLIB=:
++_LT_DECL([], [RANLIB], [1],
++    [Commands used to install an old-style archive])
++
++# Determine commands to create old-style static archives.
++old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs'
++old_postinstall_cmds='chmod 644 $oldlib'
++old_postuninstall_cmds=
++
++if test -n "$RANLIB"; then
++  case $host_os in
++  bitrig* | openbsd*)
++    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
++    ;;
++  *)
++    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
++    ;;
++  esac
++  old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
++fi
++
++case $host_os in
++  darwin*)
++    lock_old_archive_extraction=yes ;;
++  *)
++    lock_old_archive_extraction=no ;;
++esac
++_LT_DECL([], [old_postinstall_cmds], [2])
++_LT_DECL([], [old_postuninstall_cmds], [2])
++_LT_TAGDECL([], [old_archive_cmds], [2],
++    [Commands used to build an old-style archive])
++_LT_DECL([], [lock_old_archive_extraction], [0],
++    [Whether to use a lock for old archive extraction])
++])# _LT_CMD_OLD_ARCHIVE
++
++
++# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
++#		[OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE])
++# ----------------------------------------------------------------
++# Check whether the given compiler option works
++AC_DEFUN([_LT_COMPILER_OPTION],
++[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
++m4_require([_LT_DECL_SED])dnl
++AC_CACHE_CHECK([$1], [$2],
++  [$2=no
++   m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4])
++   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++   lt_compiler_flag="$3"  ## exclude from sc_useless_quotes_in_assignment
++   # Insert the option either (1) after the last *FLAGS variable, or
++   # (2) before a word containing "conftest.", or (3) at the end.
++   # Note that $ac_compile itself does not contain backslashes and begins
++   # with a dollar sign (not a hyphen), so the echo should work correctly.
++   # The option is referenced via a variable to avoid confusing sed.
++   lt_compile=`echo "$ac_compile" | $SED \
++   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
++   -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
++   -e 's:$: $lt_compiler_flag:'`
++   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
++   (eval "$lt_compile" 2>conftest.err)
++   ac_status=$?
++   cat conftest.err >&AS_MESSAGE_LOG_FD
++   echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
++   if (exit $ac_status) && test -s "$ac_outfile"; then
++     # The compiler can only warn and ignore the option if not recognized
++     # So say no if there are warnings other than the usual output.
++     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
++     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
++     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
++       $2=yes
++     fi
++   fi
++   $RM conftest*
++])
++
++if test yes = "[$]$2"; then
++    m4_if([$5], , :, [$5])
++else
++    m4_if([$6], , :, [$6])
++fi
++])# _LT_COMPILER_OPTION
++
++# Old name:
++AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], [])
++
++
++# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
++#                  [ACTION-SUCCESS], [ACTION-FAILURE])
++# ----------------------------------------------------
++# Check whether the given linker option works
++AC_DEFUN([_LT_LINKER_OPTION],
++[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
++m4_require([_LT_DECL_SED])dnl
++AC_CACHE_CHECK([$1], [$2],
++  [$2=no
++   save_LDFLAGS=$LDFLAGS
++   LDFLAGS="$LDFLAGS $3"
++   echo "$lt_simple_link_test_code" > conftest.$ac_ext
++   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
++     # The linker can only warn and ignore the option if not recognized
++     # So say no if there are warnings
++     if test -s conftest.err; then
++       # Append any errors to the config.log.
++       cat conftest.err 1>&AS_MESSAGE_LOG_FD
++       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
++       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
++       if diff conftest.exp conftest.er2 >/dev/null; then
++         $2=yes
++       fi
++     else
++       $2=yes
++     fi
++   fi
++   $RM -r conftest*
++   LDFLAGS=$save_LDFLAGS
++])
++
++if test yes = "[$]$2"; then
++    m4_if([$4], , :, [$4])
++else
++    m4_if([$5], , :, [$5])
++fi
++])# _LT_LINKER_OPTION
++
++# Old name:
++AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], [])
++
++
++# LT_CMD_MAX_LEN
++#---------------
++AC_DEFUN([LT_CMD_MAX_LEN],
++[AC_REQUIRE([AC_CANONICAL_HOST])dnl
++# find the maximum length of command line arguments
++AC_MSG_CHECKING([the maximum length of command line arguments])
++AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl
++  i=0
++  teststring=ABCD
++
++  case $build_os in
++  msdosdjgpp*)
++    # On DJGPP, this test can blow up pretty badly due to problems in libc
++    # (any single argument exceeding 2000 bytes causes a buffer overrun
++    # during glob expansion).  Even if it were fixed, the result of this
++    # check would be larger than it should be.
++    lt_cv_sys_max_cmd_len=12288;    # 12K is about right
++    ;;
++
++  gnu*)
++    # Under GNU Hurd, this test is not required because there is
++    # no limit to the length of command line arguments.
++    # Libtool will interpret -1 as no limit whatsoever
++    lt_cv_sys_max_cmd_len=-1;
++    ;;
++
++  cygwin* | mingw* | cegcc*)
++    # On Win9x/ME, this test blows up -- it succeeds, but takes
++    # about 5 minutes as the teststring grows exponentially.
++    # Worse, since 9x/ME are not pre-emptively multitasking,
++    # you end up with a "frozen" computer, even though with patience
++    # the test eventually succeeds (with a max line length of 256k).
++    # Instead, let's just punt: use the minimum linelength reported by
++    # all of the supported platforms: 8192 (on NT/2K/XP).
++    lt_cv_sys_max_cmd_len=8192;
++    ;;
++
++  mint*)
++    # On MiNT this can take a long time and run out of memory.
++    lt_cv_sys_max_cmd_len=8192;
++    ;;
++
++  amigaos*)
++    # On AmigaOS with pdksh, this test takes hours, literally.
++    # So we just punt and use a minimum line length of 8192.
++    lt_cv_sys_max_cmd_len=8192;
++    ;;
++
++  bitrig* | darwin* | dragonfly* | freebsd* | midnightbsd* | netbsd* | openbsd*)
++    # This has been around since 386BSD, at least.  Likely further.
++    if test -x /sbin/sysctl; then
++      lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
++    elif test -x /usr/sbin/sysctl; then
++      lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
++    else
++      lt_cv_sys_max_cmd_len=65536	# usable default for all BSDs
++    fi
++    # And add a safety zone
++    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
++    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
++    ;;
++
++  interix*)
++    # We know the value 262144 and hardcode it with a safety zone (like BSD)
++    lt_cv_sys_max_cmd_len=196608
++    ;;
++
++  os2*)
++    # The test takes a long time on OS/2.
++    lt_cv_sys_max_cmd_len=8192
++    ;;
++
++  osf*)
++    # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
++    # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
++    # nice to cause kernel panics so lets avoid the loop below.
++    # First set a reasonable default.
++    lt_cv_sys_max_cmd_len=16384
++    #
++    if test -x /sbin/sysconfig; then
++      case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
++        *1*) lt_cv_sys_max_cmd_len=-1 ;;
++      esac
++    fi
++    ;;
++  sco3.2v5*)
++    lt_cv_sys_max_cmd_len=102400
++    ;;
++  sysv5* | sco5v6* | sysv4.2uw2*)
++    kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
++    if test -n "$kargmax"; then
++      lt_cv_sys_max_cmd_len=`echo $kargmax | $SED 's/.*[[	 ]]//'`
++    else
++      lt_cv_sys_max_cmd_len=32768
++    fi
++    ;;
++  *)
++    lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
++    if test -n "$lt_cv_sys_max_cmd_len" && \
++       test undefined != "$lt_cv_sys_max_cmd_len"; then
++      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
++      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
++    else
++      # Make teststring a little bigger before we do anything with it.
++      # a 1K string should be a reasonable start.
++      for i in 1 2 3 4 5 6 7 8; do
++        teststring=$teststring$teststring
++      done
++      SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
++      # If test is not a shell built-in, we'll probably end up computing a
++      # maximum length that is only half of the actual maximum length, but
++      # we can't tell.
++      while { test X`env echo "$teststring$teststring" 2>/dev/null` \
++	         = "X$teststring$teststring"; } >/dev/null 2>&1 &&
++	      test 17 != "$i" # 1/2 MB should be enough
++      do
++        i=`expr $i + 1`
++        teststring=$teststring$teststring
++      done
++      # Only check the string length outside the loop.
++      lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1`
++      teststring=
++      # Add a significant safety factor because C++ compilers can tack on
++      # massive amounts of additional arguments before passing them to the
++      # linker.  It appears as though 1/2 is a usable value.
++      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2`
++    fi
++    ;;
++  esac
++])
++if test -n "$lt_cv_sys_max_cmd_len"; then
++  AC_MSG_RESULT($lt_cv_sys_max_cmd_len)
++else
++  AC_MSG_RESULT(none)
++fi
++max_cmd_len=$lt_cv_sys_max_cmd_len
++_LT_DECL([], [max_cmd_len], [0],
++    [What is the maximum length of a command?])
++])# LT_CMD_MAX_LEN
++
++# Old name:
++AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], [])
++
++
++# _LT_HEADER_DLFCN
++# ----------------
++m4_defun([_LT_HEADER_DLFCN],
++[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl
++])# _LT_HEADER_DLFCN
++
++
++# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE,
++#                      ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING)
++# ----------------------------------------------------------------
++m4_defun([_LT_TRY_DLOPEN_SELF],
++[m4_require([_LT_HEADER_DLFCN])dnl
++if test yes = "$cross_compiling"; then :
++  [$4]
++else
++  lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
++  lt_status=$lt_dlunknown
++  cat > conftest.$ac_ext <<_LT_EOF
++[#line $LINENO "configure"
++#include "confdefs.h"
++
++#if HAVE_DLFCN_H
++#include 
++#endif
++
++#include 
++
++#ifdef RTLD_GLOBAL
++#  define LT_DLGLOBAL		RTLD_GLOBAL
++#else
++#  ifdef DL_GLOBAL
++#    define LT_DLGLOBAL		DL_GLOBAL
++#  else
++#    define LT_DLGLOBAL		0
++#  endif
++#endif
++
++/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
++   find out it does not work in some platform. */
++#ifndef LT_DLLAZY_OR_NOW
++#  ifdef RTLD_LAZY
++#    define LT_DLLAZY_OR_NOW		RTLD_LAZY
++#  else
++#    ifdef DL_LAZY
++#      define LT_DLLAZY_OR_NOW		DL_LAZY
++#    else
++#      ifdef RTLD_NOW
++#        define LT_DLLAZY_OR_NOW	RTLD_NOW
++#      else
++#        ifdef DL_NOW
++#          define LT_DLLAZY_OR_NOW	DL_NOW
++#        else
++#          define LT_DLLAZY_OR_NOW	0
++#        endif
++#      endif
++#    endif
++#  endif
++#endif
++
++/* When -fvisibility=hidden is used, assume the code has been annotated
++   correspondingly for the symbols needed.  */
++#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
++int fnord () __attribute__((visibility("default")));
++#endif
++
++int fnord () { return 42; }
++int main ()
++{
++  void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
++  int status = $lt_dlunknown;
++
++  if (self)
++    {
++      if (dlsym (self,"fnord"))       status = $lt_dlno_uscore;
++      else
++        {
++	  if (dlsym( self,"_fnord"))  status = $lt_dlneed_uscore;
++          else puts (dlerror ());
++	}
++      /* dlclose (self); */
++    }
++  else
++    puts (dlerror ());
++
++  return status;
++}]
++_LT_EOF
++  if AC_TRY_EVAL(ac_link) && test -s "conftest$ac_exeext" 2>/dev/null; then
++    (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null
++    lt_status=$?
++    case x$lt_status in
++      x$lt_dlno_uscore) $1 ;;
++      x$lt_dlneed_uscore) $2 ;;
++      x$lt_dlunknown|x*) $3 ;;
++    esac
++  else :
++    # compilation failed
++    $3
++  fi
++fi
++rm -fr conftest*
++])# _LT_TRY_DLOPEN_SELF
++
++
++# LT_SYS_DLOPEN_SELF
++# ------------------
++AC_DEFUN([LT_SYS_DLOPEN_SELF],
++[m4_require([_LT_HEADER_DLFCN])dnl
++if test yes != "$enable_dlopen"; then
++  enable_dlopen=unknown
++  enable_dlopen_self=unknown
++  enable_dlopen_self_static=unknown
++else
++  lt_cv_dlopen=no
++  lt_cv_dlopen_libs=
++
++  case $host_os in
++  beos*)
++    lt_cv_dlopen=load_add_on
++    lt_cv_dlopen_libs=
++    lt_cv_dlopen_self=yes
++    ;;
++
++  mingw* | pw32* | cegcc*)
++    lt_cv_dlopen=LoadLibrary
++    lt_cv_dlopen_libs=
++    ;;
++
++  cygwin*)
++    lt_cv_dlopen=dlopen
++    lt_cv_dlopen_libs=
++    ;;
++
++  darwin*)
++    # if libdl is installed we need to link against it
++    AC_CHECK_LIB([dl], [dlopen],
++		[lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],[
++    lt_cv_dlopen=dyld
++    lt_cv_dlopen_libs=
++    lt_cv_dlopen_self=yes
++    ])
++    ;;
++
++  tpf*)
++    # Don't try to run any link tests for TPF.  We know it's impossible
++    # because TPF is a cross-compiler, and we know how we open DSOs.
++    lt_cv_dlopen=dlopen
++    lt_cv_dlopen_libs=
++    lt_cv_dlopen_self=no
++    ;;
++
++  *)
++    AC_CHECK_FUNC([shl_load],
++	  [lt_cv_dlopen=shl_load],
++      [AC_CHECK_LIB([dld], [shl_load],
++	    [lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld],
++	[AC_CHECK_FUNC([dlopen],
++	      [lt_cv_dlopen=dlopen],
++	  [AC_CHECK_LIB([dl], [dlopen],
++		[lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],
++	    [AC_CHECK_LIB([svld], [dlopen],
++		  [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld],
++	      [AC_CHECK_LIB([dld], [dld_link],
++		    [lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld])
++	      ])
++	    ])
++	  ])
++	])
++      ])
++    ;;
++  esac
++
++  if test no = "$lt_cv_dlopen"; then
++    enable_dlopen=no
++  else
++    enable_dlopen=yes
++  fi
++
++  case $lt_cv_dlopen in
++  dlopen)
++    save_CPPFLAGS=$CPPFLAGS
++    test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
++
++    save_LDFLAGS=$LDFLAGS
++    wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
++
++    save_LIBS=$LIBS
++    LIBS="$lt_cv_dlopen_libs $LIBS"
++
++    AC_CACHE_CHECK([whether a program can dlopen itself],
++	  lt_cv_dlopen_self, [dnl
++	  _LT_TRY_DLOPEN_SELF(
++	    lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes,
++	    lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross)
++    ])
++
++    if test yes = "$lt_cv_dlopen_self"; then
++      wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
++      AC_CACHE_CHECK([whether a statically linked program can dlopen itself],
++	  lt_cv_dlopen_self_static, [dnl
++	  _LT_TRY_DLOPEN_SELF(
++	    lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes,
++	    lt_cv_dlopen_self_static=no,  lt_cv_dlopen_self_static=cross)
++      ])
++    fi
++
++    CPPFLAGS=$save_CPPFLAGS
++    LDFLAGS=$save_LDFLAGS
++    LIBS=$save_LIBS
++    ;;
++  esac
++
++  case $lt_cv_dlopen_self in
++  yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
++  *) enable_dlopen_self=unknown ;;
++  esac
++
++  case $lt_cv_dlopen_self_static in
++  yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
++  *) enable_dlopen_self_static=unknown ;;
++  esac
++fi
++_LT_DECL([dlopen_support], [enable_dlopen], [0],
++	 [Whether dlopen is supported])
++_LT_DECL([dlopen_self], [enable_dlopen_self], [0],
++	 [Whether dlopen of programs is supported])
++_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0],
++	 [Whether dlopen of statically linked programs is supported])
++])# LT_SYS_DLOPEN_SELF
++
++# Old name:
++AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], [])
++
++
++# _LT_COMPILER_C_O([TAGNAME])
++# ---------------------------
++# Check to see if options -c and -o are simultaneously supported by compiler.
++# This macro does not hard code the compiler like AC_PROG_CC_C_O.
++m4_defun([_LT_COMPILER_C_O],
++[m4_require([_LT_DECL_SED])dnl
++m4_require([_LT_FILEUTILS_DEFAULTS])dnl
++m4_require([_LT_TAG_COMPILER])dnl
++AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext],
++  [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)],
++  [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no
++   $RM -r conftest 2>/dev/null
++   mkdir conftest
++   cd conftest
++   mkdir out
++   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++
++   lt_compiler_flag="-o out/conftest2.$ac_objext"
++   # Insert the option either (1) after the last *FLAGS variable, or
++   # (2) before a word containing "conftest.", or (3) at the end.
++   # Note that $ac_compile itself does not contain backslashes and begins
++   # with a dollar sign (not a hyphen), so the echo should work correctly.
++   lt_compile=`echo "$ac_compile" | $SED \
++   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
++   -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
++   -e 's:$: $lt_compiler_flag:'`
++   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
++   (eval "$lt_compile" 2>out/conftest.err)
++   ac_status=$?
++   cat out/conftest.err >&AS_MESSAGE_LOG_FD
++   echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
++   if (exit $ac_status) && test -s out/conftest2.$ac_objext
++   then
++     # The compiler can only warn and ignore the option if not recognized
++     # So say no if there are warnings
++     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
++     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
++     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
++       _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
++     fi
++   fi
++   chmod u+w . 2>&AS_MESSAGE_LOG_FD
++   $RM conftest*
++   # SGI C++ compiler will create directory out/ii_files/ for
++   # template instantiation
++   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
++   $RM out/* && rmdir out
++   cd ..
++   $RM -r conftest
++   $RM conftest*
++])
++_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1],
++	[Does compiler simultaneously support -c and -o options?])
++])# _LT_COMPILER_C_O
++
++
++# _LT_COMPILER_FILE_LOCKS([TAGNAME])
++# ----------------------------------
++# Check to see if we can do hard links to lock some files if needed
++m4_defun([_LT_COMPILER_FILE_LOCKS],
++[m4_require([_LT_ENABLE_LOCK])dnl
++m4_require([_LT_FILEUTILS_DEFAULTS])dnl
++_LT_COMPILER_C_O([$1])
++
++hard_links=nottested
++if test no = "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" && test no != "$need_locks"; then
++  # do not overwrite the value of need_locks provided by the user
++  AC_MSG_CHECKING([if we can lock with hard links])
++  hard_links=yes
++  $RM conftest*
++  ln conftest.a conftest.b 2>/dev/null && hard_links=no
++  touch conftest.a
++  ln conftest.a conftest.b 2>&5 || hard_links=no
++  ln conftest.a conftest.b 2>/dev/null && hard_links=no
++  AC_MSG_RESULT([$hard_links])
++  if test no = "$hard_links"; then
++    AC_MSG_WARN(['$CC' does not support '-c -o', so 'make -j' may be unsafe])
++    need_locks=warn
++  fi
++else
++  need_locks=no
++fi
++_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?])
++])# _LT_COMPILER_FILE_LOCKS
++
++
++# _LT_CHECK_OBJDIR
++# ----------------
++m4_defun([_LT_CHECK_OBJDIR],
++[AC_CACHE_CHECK([for objdir], [lt_cv_objdir],
++[rm -f .libs 2>/dev/null
++mkdir .libs 2>/dev/null
++if test -d .libs; then
++  lt_cv_objdir=.libs
++else
++  # MS-DOS does not allow filenames that begin with a dot.
++  lt_cv_objdir=_libs
++fi
++rmdir .libs 2>/dev/null])
++objdir=$lt_cv_objdir
++_LT_DECL([], [objdir], [0],
++         [The name of the directory that contains temporary libtool files])dnl
++m4_pattern_allow([LT_OBJDIR])dnl
++AC_DEFINE_UNQUOTED([LT_OBJDIR], "$lt_cv_objdir/",
++  [Define to the sub-directory where libtool stores uninstalled libraries.])
++])# _LT_CHECK_OBJDIR
++
++
++# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME])
++# --------------------------------------
++# Check hardcoding attributes.
++m4_defun([_LT_LINKER_HARDCODE_LIBPATH],
++[AC_MSG_CHECKING([how to hardcode library paths into programs])
++_LT_TAGVAR(hardcode_action, $1)=
++if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" ||
++   test -n "$_LT_TAGVAR(runpath_var, $1)" ||
++   test yes = "$_LT_TAGVAR(hardcode_automatic, $1)"; then
++
++  # We can hardcode non-existent directories.
++  if test no != "$_LT_TAGVAR(hardcode_direct, $1)" &&
++     # If the only mechanism to avoid hardcoding is shlibpath_var, we
++     # have to relink, otherwise we might link with an installed library
++     # when we should be linking with a yet-to-be-installed one
++     ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" &&
++     test no != "$_LT_TAGVAR(hardcode_minus_L, $1)"; then
++    # Linking always hardcodes the temporary library directory.
++    _LT_TAGVAR(hardcode_action, $1)=relink
++  else
++    # We can link without hardcoding, and we can hardcode nonexisting dirs.
++    _LT_TAGVAR(hardcode_action, $1)=immediate
++  fi
++else
++  # We cannot hardcode anything, or else we can only hardcode existing
++  # directories.
++  _LT_TAGVAR(hardcode_action, $1)=unsupported
++fi
++AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)])
++
++if test relink = "$_LT_TAGVAR(hardcode_action, $1)" ||
++   test yes = "$_LT_TAGVAR(inherit_rpath, $1)"; then
++  # Fast installation is not supported
++  enable_fast_install=no
++elif test yes = "$shlibpath_overrides_runpath" ||
++     test no = "$enable_shared"; then
++  # Fast installation is not necessary
++  enable_fast_install=needless
++fi
++_LT_TAGDECL([], [hardcode_action], [0],
++    [How to hardcode a shared library path into an executable])
++])# _LT_LINKER_HARDCODE_LIBPATH
++
++
++# _LT_CMD_STRIPLIB
++# ----------------
++m4_defun([_LT_CMD_STRIPLIB],
++[m4_require([_LT_DECL_EGREP])
++striplib=
++old_striplib=
++AC_MSG_CHECKING([whether stripping libraries is possible])
++if test -z "$STRIP"; then
++  AC_MSG_RESULT([no])
++else
++  if $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
++    old_striplib="$STRIP --strip-debug"
++    striplib="$STRIP --strip-unneeded"
++    AC_MSG_RESULT([yes])
++  else
++    case $host_os in
++    darwin*)
++      # FIXME - insert some real tests, host_os isn't really good enough
++      striplib="$STRIP -x"
++      old_striplib="$STRIP -S"
++      AC_MSG_RESULT([yes])
++      ;;
++    freebsd*)
++      if $STRIP -V 2>&1 | $GREP "elftoolchain" >/dev/null; then
++        old_striplib="$STRIP --strip-debug"
++        striplib="$STRIP --strip-unneeded"
++        AC_MSG_RESULT([yes])
++      else
++        AC_MSG_RESULT([no])
++      fi
++      ;;
++    *)
++      AC_MSG_RESULT([no])
++      ;;
++    esac
++  fi
++fi
++_LT_DECL([], [old_striplib], [1], [Commands to strip libraries])
++_LT_DECL([], [striplib], [1])
++])# _LT_CMD_STRIPLIB
++
++
++# _LT_PREPARE_MUNGE_PATH_LIST
++# ---------------------------
++# Make sure func_munge_path_list() is defined correctly.
++m4_defun([_LT_PREPARE_MUNGE_PATH_LIST],
++[[# func_munge_path_list VARIABLE PATH
++# -----------------------------------
++# VARIABLE is name of variable containing _space_ separated list of
++# directories to be munged by the contents of PATH, which is string
++# having a format:
++# "DIR[:DIR]:"
++#       string "DIR[ DIR]" will be prepended to VARIABLE
++# ":DIR[:DIR]"
++#       string "DIR[ DIR]" will be appended to VARIABLE
++# "DIRP[:DIRP]::[DIRA:]DIRA"
++#       string "DIRP[ DIRP]" will be prepended to VARIABLE and string
++#       "DIRA[ DIRA]" will be appended to VARIABLE
++# "DIR[:DIR]"
++#       VARIABLE will be replaced by "DIR[ DIR]"
++func_munge_path_list ()
++{
++    case x@S|@2 in
++    x)
++        ;;
++    *:)
++        eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'` \@S|@@S|@1\"
++        ;;
++    x:*)
++        eval @S|@1=\"\@S|@@S|@1 `$ECHO @S|@2 | $SED 's/:/ /g'`\"
++        ;;
++    *::*)
++        eval @S|@1=\"\@S|@@S|@1\ `$ECHO @S|@2 | $SED -e 's/.*:://' -e 's/:/ /g'`\"
++        eval @S|@1=\"`$ECHO @S|@2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \@S|@@S|@1\"
++        ;;
++    *)
++        eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'`\"
++        ;;
++    esac
++}
++]])# _LT_PREPARE_PATH_LIST
++
++
++# _LT_SYS_DYNAMIC_LINKER([TAG])
++# -----------------------------
++# PORTME Fill in your ld.so characteristics
++m4_defun([_LT_SYS_DYNAMIC_LINKER],
++[AC_REQUIRE([AC_CANONICAL_HOST])dnl
++m4_require([_LT_DECL_EGREP])dnl
++m4_require([_LT_FILEUTILS_DEFAULTS])dnl
++m4_require([_LT_DECL_OBJDUMP])dnl
++m4_require([_LT_DECL_SED])dnl
++m4_require([_LT_CHECK_SHELL_FEATURES])dnl
++m4_require([_LT_PREPARE_MUNGE_PATH_LIST])dnl
++AC_MSG_CHECKING([dynamic linker characteristics])
++m4_if([$1],
++	[], [
++if test yes = "$GCC"; then
++  case $host_os in
++    darwin*) lt_awk_arg='/^libraries:/,/LR/' ;;
++    *) lt_awk_arg='/^libraries:/' ;;
++  esac
++  case $host_os in
++    mingw* | cegcc*) lt_sed_strip_eq='s|=\([[A-Za-z]]:\)|\1|g' ;;
++    *) lt_sed_strip_eq='s|=/|/|g' ;;
++  esac
++  lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
++  case $lt_search_path_spec in
++  *\;*)
++    # if the path contains ";" then we assume it to be the separator
++    # otherwise default to the standard path separator (i.e. ":") - it is
++    # assumed that no part of a normal pathname contains ";" but that should
++    # okay in the real world where ";" in dirpaths is itself problematic.
++    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'`
++    ;;
++  *)
++    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"`
++    ;;
++  esac
++  # Ok, now we have the path, separated by spaces, we can step through it
++  # and add multilib dir if necessary...
++  lt_tmp_lt_search_path_spec=
++  lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
++  # ...but if some path component already ends with the multilib dir we assume
++  # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer).
++  case "$lt_multi_os_dir; $lt_search_path_spec " in
++  "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*)
++    lt_multi_os_dir=
++    ;;
++  esac
++  for lt_sys_path in $lt_search_path_spec; do
++    if test -d "$lt_sys_path$lt_multi_os_dir"; then
++      lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir"
++    elif test -n "$lt_multi_os_dir"; then
++      test -d "$lt_sys_path" && \
++	lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
++    fi
++  done
++  lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
++BEGIN {RS = " "; FS = "/|\n";} {
++  lt_foo = "";
++  lt_count = 0;
++  for (lt_i = NF; lt_i > 0; lt_i--) {
++    if ($lt_i != "" && $lt_i != ".") {
++      if ($lt_i == "..") {
++        lt_count++;
++      } else {
++        if (lt_count == 0) {
++          lt_foo = "/" $lt_i lt_foo;
++        } else {
++          lt_count--;
++        }
++      }
++    }
++  }
++  if (lt_foo != "") { lt_freq[[lt_foo]]++; }
++  if (lt_freq[[lt_foo]] == 1) { print lt_foo; }
++}'`
++  # AWK program above erroneously prepends '/' to C:/dos/paths
++  # for these hosts.
++  case $host_os in
++    mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
++      $SED 's|/\([[A-Za-z]]:\)|\1|g'` ;;
++  esac
++  sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
++else
++  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
++fi])
++library_names_spec=
++libname_spec='lib$name'
++soname_spec=
++shrext_cmds=.so
++postinstall_cmds=
++postuninstall_cmds=
++finish_cmds=
++finish_eval=
++shlibpath_var=
++shlibpath_overrides_runpath=unknown
++version_type=none
++dynamic_linker="$host_os ld.so"
++sys_lib_dlsearch_path_spec="/lib /usr/lib"
++need_lib_prefix=unknown
++hardcode_into_libs=no
++
++# when you set need_version to no, make sure it does not cause -set_version
++# flags to be left without arguments
++need_version=unknown
++
++AC_ARG_VAR([LT_SYS_LIBRARY_PATH],
++[User-defined run-time library search path.])
++
++case $host_os in
++aix3*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname.a'
++  shlibpath_var=LIBPATH
++
++  # AIX 3 has no versioning support, so we append a major version to the name.
++  soname_spec='$libname$release$shared_ext$major'
++  ;;
++
++aix[[4-9]]*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  hardcode_into_libs=yes
++  if test ia64 = "$host_cpu"; then
++    # AIX 5 supports IA64
++    library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext'
++    shlibpath_var=LD_LIBRARY_PATH
++  else
++    # With GCC up to 2.95.x, collect2 would create an import file
++    # for dependence libraries.  The import file would start with
++    # the line '#! .'.  This would cause the generated library to
++    # depend on '.', always an invalid library.  This was fixed in
++    # development snapshots of GCC prior to 3.0.
++    case $host_os in
++      aix4 | aix4.[[01]] | aix4.[[01]].*)
++      if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
++	   echo ' yes '
++	   echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then
++	:
++      else
++	can_build_shared=no
++      fi
++      ;;
++    esac
++    # Using Import Files as archive members, it is possible to support
++    # filename-based versioning of shared library archives on AIX. While
++    # this would work for both with and without runtime linking, it will
++    # prevent static linking of such archives. So we do filename-based
++    # shared library versioning with .so extension only, which is used
++    # when both runtime linking and shared linking is enabled.
++    # Unfortunately, runtime linking may impact performance, so we do
++    # not want this to be the default eventually. Also, we use the
++    # versioned .so libs for executables only if there is the -brtl
++    # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only.
++    # To allow for filename-based versioning support, we need to create
++    # libNAME.so.V as an archive file, containing:
++    # *) an Import File, referring to the versioned filename of the
++    #    archive as well as the shared archive member, telling the
++    #    bitwidth (32 or 64) of that shared object, and providing the
++    #    list of exported symbols of that shared object, eventually
++    #    decorated with the 'weak' keyword
++    # *) the shared object with the F_LOADONLY flag set, to really avoid
++    #    it being seen by the linker.
++    # At run time we better use the real file rather than another symlink,
++    # but for link time we create the symlink libNAME.so -> libNAME.so.V
++
++    case $with_aix_soname,$aix_use_runtimelinking in
++    # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct
++    # soname into executable. Probably we can add versioning support to
++    # collect2, so additional links can be useful in future.
++    aix,yes) # traditional libtool
++      dynamic_linker='AIX unversionable lib.so'
++      # If using run time linking (on AIX 4.2 or later) use lib.so
++      # instead of lib.a to let people know that these are not
++      # typical AIX shared libraries.
++      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++      ;;
++    aix,no) # traditional AIX only
++      dynamic_linker='AIX lib.a[(]lib.so.V[)]'
++      # We preserve .a as extension for shared libraries through AIX4.2
++      # and later when we are not doing run time linking.
++      library_names_spec='$libname$release.a $libname.a'
++      soname_spec='$libname$release$shared_ext$major'
++      ;;
++    svr4,*) # full svr4 only
++      dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)]"
++      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
++      # We do not specify a path in Import Files, so LIBPATH fires.
++      shlibpath_overrides_runpath=yes
++      ;;
++    *,yes) # both, prefer svr4
++      dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)], lib.a[(]lib.so.V[)]"
++      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
++      # unpreferred sharedlib libNAME.a needs extra handling
++      postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"'
++      postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"'
++      # We do not specify a path in Import Files, so LIBPATH fires.
++      shlibpath_overrides_runpath=yes
++      ;;
++    *,no) # both, prefer aix
++      dynamic_linker="AIX lib.a[(]lib.so.V[)], lib.so.V[(]$shared_archive_member_spec.o[)]"
++      library_names_spec='$libname$release.a $libname.a'
++      soname_spec='$libname$release$shared_ext$major'
++      # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling
++      postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)'
++      postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"'
++      ;;
++    esac
++    shlibpath_var=LIBPATH
++  fi
++  ;;
++
++amigaos*)
++  case $host_cpu in
++  powerpc)
++    # Since July 2007 AmigaOS4 officially supports .so libraries.
++    # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    ;;
++  m68k)
++    library_names_spec='$libname.ixlibrary $libname.a'
++    # Create ${libname}_ixlibrary.a entries in /sys/libs.
++    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
++    ;;
++  esac
++  ;;
++
++beos*)
++  library_names_spec='$libname$shared_ext'
++  dynamic_linker="$host_os ld.so"
++  shlibpath_var=LIBRARY_PATH
++  ;;
++
++bsdi[[45]]*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
++  sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
++  # the default ld.so.conf also contains /usr/contrib/lib and
++  # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
++  # libtool to hard-code these into programs
++  ;;
++
++cygwin* | mingw* | pw32* | cegcc*)
++  version_type=windows
++  shrext_cmds=.dll
++  need_version=no
++  need_lib_prefix=no
++
++  case $GCC,$cc_basename in
++  yes,*)
++    # gcc
++    library_names_spec='$libname.dll.a'
++    # DLL is installed to $(libdir)/../bin by postinstall_cmds
++    postinstall_cmds='base_file=`basename \$file`~
++      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
++      dldir=$destdir/`dirname \$dlpath`~
++      test -d \$dldir || mkdir -p \$dldir~
++      $install_prog $dir/$dlname \$dldir/$dlname~
++      chmod a+x \$dldir/$dlname~
++      if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
++        eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
++      fi'
++    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
++      dlpath=$dir/\$dldll~
++       $RM \$dlpath'
++    shlibpath_overrides_runpath=yes
++
++    case $host_os in
++    cygwin*)
++      # Cygwin DLLs use 'cyg' prefix rather than 'lib'
++      soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
++m4_if([$1], [],[
++      sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"])
++      ;;
++    mingw* | cegcc*)
++      # MinGW DLLs use traditional 'lib' prefix
++      soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
++      ;;
++    pw32*)
++      # pw32 DLLs use 'pw' prefix rather than 'lib'
++      library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
++      ;;
++    esac
++    dynamic_linker='Win32 ld.exe'
++    ;;
++
++  *,cl* | *,icl*)
++    # Native MSVC or ICC
++    libname_spec='$name'
++    soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
++    library_names_spec='$libname.dll.lib'
++
++    case $build_os in
++    mingw*)
++      sys_lib_search_path_spec=
++      lt_save_ifs=$IFS
++      IFS=';'
++      for lt_path in $LIB
++      do
++        IFS=$lt_save_ifs
++        # Let DOS variable expansion print the short 8.3 style file name.
++        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
++        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
++      done
++      IFS=$lt_save_ifs
++      # Convert to MSYS style.
++      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'`
++      ;;
++    cygwin*)
++      # Convert to unix form, then to dos form, then back to unix form
++      # but this time dos style (no spaces!) so that the unix form looks
++      # like /cygdrive/c/PROGRA~1:/cygdr...
++      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
++      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
++      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
++      ;;
++    *)
++      sys_lib_search_path_spec=$LIB
++      if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then
++        # It is most probably a Windows format PATH.
++        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
++      else
++        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
++      fi
++      # FIXME: find the short name or the path components, as spaces are
++      # common. (e.g. "Program Files" -> "PROGRA~1")
++      ;;
++    esac
++
++    # DLL is installed to $(libdir)/../bin by postinstall_cmds
++    postinstall_cmds='base_file=`basename \$file`~
++      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
++      dldir=$destdir/`dirname \$dlpath`~
++      test -d \$dldir || mkdir -p \$dldir~
++      $install_prog $dir/$dlname \$dldir/$dlname'
++    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
++      dlpath=$dir/\$dldll~
++       $RM \$dlpath'
++    shlibpath_overrides_runpath=yes
++    dynamic_linker='Win32 link.exe'
++    ;;
++
++  *)
++    # Assume MSVC and ICC wrapper
++    library_names_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext $libname.lib'
++    dynamic_linker='Win32 ld.exe'
++    ;;
++  esac
++  # FIXME: first we should search . and the directory the executable is in
++  shlibpath_var=PATH
++  ;;
++
++darwin* | rhapsody*)
++  dynamic_linker="$host_os dyld"
++  version_type=darwin
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$major$shared_ext $libname$shared_ext'
++  soname_spec='$libname$release$major$shared_ext'
++  shlibpath_overrides_runpath=yes
++  shlibpath_var=DYLD_LIBRARY_PATH
++  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
++m4_if([$1], [],[
++  sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"])
++  sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
++  ;;
++
++dgux*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  ;;
++
++freebsd* | dragonfly* | midnightbsd*)
++  # DragonFly does not have aout.  When/if they implement a new
++  # versioning mechanism, adjust this.
++  if test -x /usr/bin/objformat; then
++    objformat=`/usr/bin/objformat`
++  else
++    case $host_os in
++    freebsd[[23]].*) objformat=aout ;;
++    *) objformat=elf ;;
++    esac
++  fi
++  version_type=freebsd-$objformat
++  case $version_type in
++    freebsd-elf*)
++      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++      soname_spec='$libname$release$shared_ext$major'
++      need_version=no
++      need_lib_prefix=no
++      ;;
++    freebsd-*)
++      library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++      need_version=yes
++      ;;
++  esac
++  shlibpath_var=LD_LIBRARY_PATH
++  case $host_os in
++  freebsd2.*)
++    shlibpath_overrides_runpath=yes
++    ;;
++  freebsd3.[[01]]* | freebsdelf3.[[01]]*)
++    shlibpath_overrides_runpath=yes
++    hardcode_into_libs=yes
++    ;;
++  freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \
++  freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1)
++    shlibpath_overrides_runpath=no
++    hardcode_into_libs=yes
++    ;;
++  *) # from 4.6 on, and DragonFly
++    shlibpath_overrides_runpath=yes
++    hardcode_into_libs=yes
++    ;;
++  esac
++  ;;
++
++haiku*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  dynamic_linker="$host_os runtime_loader"
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
++  hardcode_into_libs=yes
++  ;;
++
++hpux9* | hpux10* | hpux11*)
++  # Give a soname corresponding to the major version so that dld.sl refuses to
++  # link against other versions.
++  version_type=sunos
++  need_lib_prefix=no
++  need_version=no
++  case $host_cpu in
++  ia64*)
++    shrext_cmds='.so'
++    hardcode_into_libs=yes
++    dynamic_linker="$host_os dld.so"
++    shlibpath_var=LD_LIBRARY_PATH
++    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    if test 32 = "$HPUX_IA64_MODE"; then
++      sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
++      sys_lib_dlsearch_path_spec=/usr/lib/hpux32
++    else
++      sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
++      sys_lib_dlsearch_path_spec=/usr/lib/hpux64
++    fi
++    ;;
++  hppa*64*)
++    shrext_cmds='.sl'
++    hardcode_into_libs=yes
++    dynamic_linker="$host_os dld.sl"
++    shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
++    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
++    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
++    ;;
++  *)
++    shrext_cmds='.sl'
++    dynamic_linker="$host_os dld.sl"
++    shlibpath_var=SHLIB_PATH
++    shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    ;;
++  esac
++  # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
++  postinstall_cmds='chmod 555 $lib'
++  # or fails outright, so override atomically:
++  install_override_mode=555
++  ;;
++
++interix[[3-9]]*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  hardcode_into_libs=yes
++  ;;
++
++irix5* | irix6* | nonstopux*)
++  case $host_os in
++    nonstopux*) version_type=nonstopux ;;
++    *)
++	if test yes = "$lt_cv_prog_gnu_ld"; then
++		version_type=linux # correct to gnu/linux during the next big refactor
++	else
++		version_type=irix
++	fi ;;
++  esac
++  need_lib_prefix=no
++  need_version=no
++  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext'
++  case $host_os in
++  irix5* | nonstopux*)
++    libsuff= shlibsuff=
++    ;;
++  *)
++    case $LD in # libtool.m4 will add one of these switches to LD
++    *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
++      libsuff= shlibsuff= libmagic=32-bit;;
++    *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
++      libsuff=32 shlibsuff=N32 libmagic=N32;;
++    *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
++      libsuff=64 shlibsuff=64 libmagic=64-bit;;
++    *) libsuff= shlibsuff= libmagic=never-match;;
++    esac
++    ;;
++  esac
++  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
++  shlibpath_overrides_runpath=no
++  sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff"
++  sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff"
++  hardcode_into_libs=yes
++  ;;
++
++# No shared lib support for Linux oldld, aout, or coff.
++linux*oldld* | linux*aout* | linux*coff*)
++  dynamic_linker=no
++  ;;
++
++linux*android*)
++  version_type=none # Android doesn't support versioned libraries.
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext'
++  soname_spec='$libname$release$shared_ext'
++  finish_cmds=
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++
++  # This implies no fast_install, which is unacceptable.
++  # Some rework will be needed to allow for fast_install
++  # before this can be enabled.
++  hardcode_into_libs=yes
++
++  dynamic_linker='Android linker'
++  # Don't embed -rpath directories since the linker doesn't support them.
++  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++  ;;
++
++# This must be glibc/ELF.
++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++
++  # Some binutils ld are patched to set DT_RUNPATH
++  AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath],
++    [lt_cv_shlibpath_overrides_runpath=no
++    save_LDFLAGS=$LDFLAGS
++    save_libdir=$libdir
++    eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \
++	 LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\""
++    AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
++      [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null],
++	 [lt_cv_shlibpath_overrides_runpath=yes])])
++    LDFLAGS=$save_LDFLAGS
++    libdir=$save_libdir
++    ])
++  shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
++
++  # This implies no fast_install, which is unacceptable.
++  # Some rework will be needed to allow for fast_install
++  # before this can be enabled.
++  hardcode_into_libs=yes
++
++  # Add ABI-specific directories to the system library path.
++  sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib"
++
++  # Ideally, we could use ldconfig to report *all* directores which are
++  # searched for libraries, however this is still not possible.  Aside from not
++  # being certain /sbin/ldconfig is available, command
++  # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64,
++  # even though it is searched at run-time.  Try to do the best guess by
++  # appending ld.so.conf contents (and includes) to the search path.
++  if test -f /etc/ld.so.conf; then
++    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
++    sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra"
++  fi
++
++  # We used to test for /lib/ld.so.1 and disable shared libraries on
++  # powerpc, because MkLinux only supported shared libraries with the
++  # GNU dynamic linker.  Since this was broken with cross compilers,
++  # most powerpc-linux boxes support dynamic linking these days and
++  # people can always --disable-shared, the test was removed, and we
++  # assume the GNU/Linux dynamic linker is in use.
++  dynamic_linker='GNU/Linux ld.so'
++  ;;
++
++netbsd*)
++  version_type=sunos
++  need_lib_prefix=no
++  need_version=no
++  if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++    finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
++    dynamic_linker='NetBSD (a.out) ld.so'
++  else
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    dynamic_linker='NetBSD ld.elf_so'
++  fi
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  hardcode_into_libs=yes
++  ;;
++
++newsos6)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  ;;
++
++*nto* | *qnx*)
++  version_type=qnx
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  hardcode_into_libs=yes
++  dynamic_linker='ldqnx.so'
++  ;;
++
++openbsd* | bitrig*)
++  version_type=sunos
++  sys_lib_dlsearch_path_spec=/usr/lib
++  need_lib_prefix=no
++  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
++    need_version=no
++  else
++    need_version=yes
++  fi
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  ;;
++
++os2*)
++  libname_spec='$name'
++  version_type=windows
++  shrext_cmds=.dll
++  need_version=no
++  need_lib_prefix=no
++  # OS/2 can only load a DLL with a base name of 8 characters or less.
++  soname_spec='`test -n "$os2dllname" && libname="$os2dllname";
++    v=$($ECHO $release$versuffix | tr -d .-);
++    n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _);
++    $ECHO $n$v`$shared_ext'
++  library_names_spec='${libname}_dll.$libext'
++  dynamic_linker='OS/2 ld.exe'
++  shlibpath_var=BEGINLIBPATH
++  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
++  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
++  postinstall_cmds='base_file=`basename \$file`~
++    dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~
++    dldir=$destdir/`dirname \$dlpath`~
++    test -d \$dldir || mkdir -p \$dldir~
++    $install_prog $dir/$dlname \$dldir/$dlname~
++    chmod a+x \$dldir/$dlname~
++    if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
++      eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
++    fi'
++  postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~
++    dlpath=$dir/\$dldll~
++    $RM \$dlpath'
++  ;;
++
++osf3* | osf4* | osf5*)
++  version_type=osf
++  need_lib_prefix=no
++  need_version=no
++  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  shlibpath_var=LD_LIBRARY_PATH
++  sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
++  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
++  ;;
++
++rdos*)
++  dynamic_linker=no
++  ;;
++
++solaris*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  hardcode_into_libs=yes
++  # ldd complains unless libraries are executable
++  postinstall_cmds='chmod +x $lib'
++  ;;
++
++sunos4*)
++  version_type=sunos
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++  finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  if test yes = "$with_gnu_ld"; then
++    need_lib_prefix=no
++  fi
++  need_version=yes
++  ;;
++
++sysv4 | sysv4.3*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  case $host_vendor in
++    sni)
++      shlibpath_overrides_runpath=no
++      need_lib_prefix=no
++      runpath_var=LD_RUN_PATH
++      ;;
++    siemens)
++      need_lib_prefix=no
++      ;;
++    motorola)
++      need_lib_prefix=no
++      need_version=no
++      shlibpath_overrides_runpath=no
++      sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
++      ;;
++  esac
++  ;;
++
++sysv4*MP*)
++  if test -d /usr/nec; then
++    version_type=linux # correct to gnu/linux during the next big refactor
++    library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext'
++    soname_spec='$libname$shared_ext.$major'
++    shlibpath_var=LD_LIBRARY_PATH
++  fi
++  ;;
++
++sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
++  version_type=sco
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  hardcode_into_libs=yes
++  if test yes = "$with_gnu_ld"; then
++    sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
++  else
++    sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
++    case $host_os in
++      sco3.2v5*)
++        sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
++	;;
++    esac
++  fi
++  sys_lib_dlsearch_path_spec='/usr/lib'
++  ;;
++
++tpf*)
++  # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  hardcode_into_libs=yes
++  ;;
++
++uts4*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  ;;
++
++*)
++  dynamic_linker=no
++  ;;
++esac
++AC_MSG_RESULT([$dynamic_linker])
++test no = "$dynamic_linker" && can_build_shared=no
++
++variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
++if test yes = "$GCC"; then
++  variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
++fi
++
++if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then
++  sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec
++fi
++
++if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then
++  sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec
++fi
++
++# remember unaugmented sys_lib_dlsearch_path content for libtool script decls...
++configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec
++
++# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code
++func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH"
++
++# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool
++configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH
++
++_LT_DECL([], [variables_saved_for_relink], [1],
++    [Variables whose values should be saved in libtool wrapper scripts and
++    restored at link time])
++_LT_DECL([], [need_lib_prefix], [0],
++    [Do we need the "lib" prefix for modules?])
++_LT_DECL([], [need_version], [0], [Do we need a version for libraries?])
++_LT_DECL([], [version_type], [0], [Library versioning type])
++_LT_DECL([], [runpath_var], [0],  [Shared library runtime path variable])
++_LT_DECL([], [shlibpath_var], [0],[Shared library path variable])
++_LT_DECL([], [shlibpath_overrides_runpath], [0],
++    [Is shlibpath searched before the hard-coded library search path?])
++_LT_DECL([], [libname_spec], [1], [Format of library name prefix])
++_LT_DECL([], [library_names_spec], [1],
++    [[List of archive names.  First name is the real one, the rest are links.
++    The last name is the one that the linker finds with -lNAME]])
++_LT_DECL([], [soname_spec], [1],
++    [[The coded name of the library, if different from the real name]])
++_LT_DECL([], [install_override_mode], [1],
++    [Permission mode override for installation of shared libraries])
++_LT_DECL([], [postinstall_cmds], [2],
++    [Command to use after installation of a shared archive])
++_LT_DECL([], [postuninstall_cmds], [2],
++    [Command to use after uninstallation of a shared archive])
++_LT_DECL([], [finish_cmds], [2],
++    [Commands used to finish a libtool library installation in a directory])
++_LT_DECL([], [finish_eval], [1],
++    [[As "finish_cmds", except a single script fragment to be evaled but
++    not shown]])
++_LT_DECL([], [hardcode_into_libs], [0],
++    [Whether we should hardcode library paths into libraries])
++_LT_DECL([], [sys_lib_search_path_spec], [2],
++    [Compile-time system search path for libraries])
++_LT_DECL([sys_lib_dlsearch_path_spec], [configure_time_dlsearch_path], [2],
++    [Detected run-time system search path for libraries])
++_LT_DECL([], [configure_time_lt_sys_library_path], [2],
++    [Explicit LT_SYS_LIBRARY_PATH set during ./configure time])
++])# _LT_SYS_DYNAMIC_LINKER
++
++
++# _LT_PATH_TOOL_PREFIX(TOOL)
++# --------------------------
++# find a file program that can recognize shared library
++AC_DEFUN([_LT_PATH_TOOL_PREFIX],
++[m4_require([_LT_DECL_EGREP])dnl
++AC_MSG_CHECKING([for $1])
++AC_CACHE_VAL(lt_cv_path_MAGIC_CMD,
++[case $MAGIC_CMD in
++[[\\/*] |  ?:[\\/]*])
++  lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path.
++  ;;
++*)
++  lt_save_MAGIC_CMD=$MAGIC_CMD
++  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++dnl $ac_dummy forces splitting on constant user-supplied paths.
++dnl POSIX.2 word splitting is done only on the output of word expansions,
++dnl not every word.  This closes a longstanding sh security hole.
++  ac_dummy="m4_if([$2], , $PATH, [$2])"
++  for ac_dir in $ac_dummy; do
++    IFS=$lt_save_ifs
++    test -z "$ac_dir" && ac_dir=.
++    if test -f "$ac_dir/$1"; then
++      lt_cv_path_MAGIC_CMD=$ac_dir/"$1"
++      if test -n "$file_magic_test_file"; then
++	case $deplibs_check_method in
++	"file_magic "*)
++	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
++	  MAGIC_CMD=$lt_cv_path_MAGIC_CMD
++	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
++	    $EGREP "$file_magic_regex" > /dev/null; then
++	    :
++	  else
++	    cat <<_LT_EOF 1>&2
++
++*** Warning: the command libtool uses to detect shared libraries,
++*** $file_magic_cmd, produces output that libtool cannot recognize.
++*** The result is that libtool may fail to recognize shared libraries
++*** as such.  This will affect the creation of libtool libraries that
++*** depend on shared libraries, but programs linked with such libtool
++*** libraries will work regardless of this problem.  Nevertheless, you
++*** may want to report the problem to your system manager and/or to
++*** bug-libtool@gnu.org
++
++_LT_EOF
++	  fi ;;
++	esac
++      fi
++      break
++    fi
++  done
++  IFS=$lt_save_ifs
++  MAGIC_CMD=$lt_save_MAGIC_CMD
++  ;;
++esac])
++MAGIC_CMD=$lt_cv_path_MAGIC_CMD
++if test -n "$MAGIC_CMD"; then
++  AC_MSG_RESULT($MAGIC_CMD)
++else
++  AC_MSG_RESULT(no)
++fi
++_LT_DECL([], [MAGIC_CMD], [0],
++	 [Used to examine libraries when file_magic_cmd begins with "file"])dnl
++])# _LT_PATH_TOOL_PREFIX
++
++# Old name:
++AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], [])
++
++
++# _LT_PATH_MAGIC
++# --------------
++# find a file program that can recognize a shared library
++m4_defun([_LT_PATH_MAGIC],
++[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH)
++if test -z "$lt_cv_path_MAGIC_CMD"; then
++  if test -n "$ac_tool_prefix"; then
++    _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH)
++  else
++    MAGIC_CMD=:
++  fi
++fi
++])# _LT_PATH_MAGIC
++
++
++# LT_PATH_LD
++# ----------
++# find the pathname to the GNU or non-GNU linker
++AC_DEFUN([LT_PATH_LD],
++[AC_REQUIRE([AC_PROG_CC])dnl
++AC_REQUIRE([AC_CANONICAL_HOST])dnl
++AC_REQUIRE([AC_CANONICAL_BUILD])dnl
++m4_require([_LT_DECL_SED])dnl
++m4_require([_LT_DECL_EGREP])dnl
++m4_require([_LT_PROG_ECHO_BACKSLASH])dnl
++
++AC_ARG_WITH([gnu-ld],
++    [AS_HELP_STRING([--with-gnu-ld],
++	[assume the C compiler uses GNU ld @<:@default=no@:>@])],
++    [test no = "$withval" || with_gnu_ld=yes],
++    [with_gnu_ld=no])dnl
++
++ac_prog=ld
++if test yes = "$GCC"; then
++  # Check if gcc -print-prog-name=ld gives a path.
++  AC_MSG_CHECKING([for ld used by $CC])
++  case $host in
++  *-*-mingw*)
++    # gcc leaves a trailing carriage return, which upsets mingw
++    ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
++  *)
++    ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
++  esac
++  case $ac_prog in
++    # Accept absolute paths.
++    [[\\/]]* | ?:[[\\/]]*)
++      re_direlt='/[[^/]][[^/]]*/\.\./'
++      # Canonicalize the pathname of ld
++      ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
++      while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
++	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
++      done
++      test -z "$LD" && LD=$ac_prog
++      ;;
++  "")
++    # If it fails, then pretend we aren't using GCC.
++    ac_prog=ld
++    ;;
++  *)
++    # If it is relative, then search for the first ld in PATH.
++    with_gnu_ld=unknown
++    ;;
++  esac
++elif test yes = "$with_gnu_ld"; then
++  AC_MSG_CHECKING([for GNU ld])
++else
++  AC_MSG_CHECKING([for non-GNU ld])
++fi
++AC_CACHE_VAL(lt_cv_path_LD,
++[if test -z "$LD"; then
++  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++  for ac_dir in $PATH; do
++    IFS=$lt_save_ifs
++    test -z "$ac_dir" && ac_dir=.
++    if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
++      lt_cv_path_LD=$ac_dir/$ac_prog
++      # Check to see if the program is GNU ld.  I'd rather use --version,
++      # but apparently some variants of GNU ld only accept -v.
++      # Break only if it was the GNU/non-GNU ld that we prefer.
++      case `"$lt_cv_path_LD" -v 2>&1 &1 conftest.i
++cat conftest.i conftest.i >conftest2.i
++: ${lt_DD:=$DD}
++AC_PATH_PROGS_FEATURE_CHECK([lt_DD], [dd],
++[if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then
++  cmp -s conftest.i conftest.out \
++  && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=:
++fi])
++rm -f conftest.i conftest2.i conftest.out])
++])# _LT_PATH_DD
++
++
++# _LT_CMD_TRUNCATE
++# ----------------
++# find command to truncate a binary pipe
++m4_defun([_LT_CMD_TRUNCATE],
++[m4_require([_LT_PATH_DD])
++AC_CACHE_CHECK([how to truncate binary pipes], [lt_cv_truncate_bin],
++[printf 0123456789abcdef0123456789abcdef >conftest.i
++cat conftest.i conftest.i >conftest2.i
++lt_cv_truncate_bin=
++if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then
++  cmp -s conftest.i conftest.out \
++  && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1"
++fi
++rm -f conftest.i conftest2.i conftest.out
++test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"])
++_LT_DECL([lt_truncate_bin], [lt_cv_truncate_bin], [1],
++  [Command to truncate a binary pipe])
++])# _LT_CMD_TRUNCATE
++
++
++# _LT_CHECK_MAGIC_METHOD
++# ----------------------
++# how to check for library dependencies
++#  -- PORTME fill in with the dynamic library characteristics
++m4_defun([_LT_CHECK_MAGIC_METHOD],
++[m4_require([_LT_DECL_EGREP])
++m4_require([_LT_DECL_OBJDUMP])
++AC_CACHE_CHECK([how to recognize dependent libraries],
++lt_cv_deplibs_check_method,
++[lt_cv_file_magic_cmd='$MAGIC_CMD'
++lt_cv_file_magic_test_file=
++lt_cv_deplibs_check_method='unknown'
++# Need to set the preceding variable on all platforms that support
++# interlibrary dependencies.
++# 'none' -- dependencies not supported.
++# 'unknown' -- same as none, but documents that we really don't know.
++# 'pass_all' -- all dependencies passed with no checks.
++# 'test_compile' -- check by making test program.
++# 'file_magic [[regex]]' -- check by looking for files in library path
++# that responds to the $file_magic_cmd with a given extended regex.
++# If you have 'file' or equivalent on your system and you're not sure
++# whether 'pass_all' will *always* work, you probably want this one.
++
++case $host_os in
++aix[[4-9]]*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++beos*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++bsdi[[45]]*)
++  lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)'
++  lt_cv_file_magic_cmd='$FILECMD -L'
++  lt_cv_file_magic_test_file=/shlib/libc.so
++  ;;
++
++cygwin*)
++  # func_win32_libid is a shell function defined in ltmain.sh
++  lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
++  lt_cv_file_magic_cmd='func_win32_libid'
++  ;;
++
++mingw* | pw32*)
++  # Base MSYS/MinGW do not provide the 'file' command needed by
++  # func_win32_libid shell function, so use a weaker test based on 'objdump',
++  # unless we find 'file', for example because we are cross-compiling.
++  if ( file / ) >/dev/null 2>&1; then
++    lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
++    lt_cv_file_magic_cmd='func_win32_libid'
++  else
++    # Keep this pattern in sync with the one in func_win32_libid.
++    lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
++    lt_cv_file_magic_cmd='$OBJDUMP -f'
++  fi
++  ;;
++
++cegcc*)
++  # use the weaker test based on 'objdump'. See mingw*.
++  lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
++  lt_cv_file_magic_cmd='$OBJDUMP -f'
++  ;;
++
++darwin* | rhapsody*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++freebsd* | dragonfly* | midnightbsd*)
++  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
++    case $host_cpu in
++    i*86 )
++      # Not sure whether the presence of OpenBSD here was a mistake.
++      # Let's accept both of them until this is cleared up.
++      lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library'
++      lt_cv_file_magic_cmd=$FILECMD
++      lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
++      ;;
++    esac
++  else
++    lt_cv_deplibs_check_method=pass_all
++  fi
++  ;;
++
++haiku*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++hpux10.20* | hpux11*)
++  lt_cv_file_magic_cmd=$FILECMD
++  case $host_cpu in
++  ia64*)
++    lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64'
++    lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
++    ;;
++  hppa*64*)
++    [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]']
++    lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
++    ;;
++  *)
++    lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library'
++    lt_cv_file_magic_test_file=/usr/lib/libc.sl
++    ;;
++  esac
++  ;;
++
++interix[[3-9]]*)
++  # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here
++  lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$'
++  ;;
++
++irix5* | irix6* | nonstopux*)
++  case $LD in
++  *-32|*"-32 ") libmagic=32-bit;;
++  *-n32|*"-n32 ") libmagic=N32;;
++  *-64|*"-64 ") libmagic=64-bit;;
++  *) libmagic=never-match;;
++  esac
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++# This must be glibc/ELF.
++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++netbsd*)
++  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
++    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
++  else
++    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$'
++  fi
++  ;;
++
++newos6*)
++  lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)'
++  lt_cv_file_magic_cmd=$FILECMD
++  lt_cv_file_magic_test_file=/usr/lib/libnls.so
++  ;;
++
++*nto* | *qnx*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++openbsd* | bitrig*)
++  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
++    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$'
++  else
++    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
++  fi
++  ;;
++
++osf3* | osf4* | osf5*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++rdos*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++solaris*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++sysv4 | sysv4.3*)
++  case $host_vendor in
++  motorola)
++    lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]'
++    lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
++    ;;
++  ncr)
++    lt_cv_deplibs_check_method=pass_all
++    ;;
++  sequent)
++    lt_cv_file_magic_cmd='/bin/file'
++    lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )'
++    ;;
++  sni)
++    lt_cv_file_magic_cmd='/bin/file'
++    lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib"
++    lt_cv_file_magic_test_file=/lib/libc.so
++    ;;
++  siemens)
++    lt_cv_deplibs_check_method=pass_all
++    ;;
++  pc)
++    lt_cv_deplibs_check_method=pass_all
++    ;;
++  esac
++  ;;
++
++tpf*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++os2*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++esac
++])
++
++file_magic_glob=
++want_nocaseglob=no
++if test "$build" = "$host"; then
++  case $host_os in
++  mingw* | pw32*)
++    if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
++      want_nocaseglob=yes
++    else
++      file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"`
++    fi
++    ;;
++  esac
++fi
++
++file_magic_cmd=$lt_cv_file_magic_cmd
++deplibs_check_method=$lt_cv_deplibs_check_method
++test -z "$deplibs_check_method" && deplibs_check_method=unknown
++
++_LT_DECL([], [deplibs_check_method], [1],
++    [Method to check whether dependent libraries are shared objects])
++_LT_DECL([], [file_magic_cmd], [1],
++    [Command to use when deplibs_check_method = "file_magic"])
++_LT_DECL([], [file_magic_glob], [1],
++    [How to find potential files when deplibs_check_method = "file_magic"])
++_LT_DECL([], [want_nocaseglob], [1],
++    [Find potential files using nocaseglob when deplibs_check_method = "file_magic"])
++])# _LT_CHECK_MAGIC_METHOD
++
++
++# LT_PATH_NM
++# ----------
++# find the pathname to a BSD- or MS-compatible name lister
++AC_DEFUN([LT_PATH_NM],
++[AC_REQUIRE([AC_PROG_CC])dnl
++AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM,
++[if test -n "$NM"; then
++  # Let the user override the test.
++  lt_cv_path_NM=$NM
++else
++  lt_nm_to_check=${ac_tool_prefix}nm
++  if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
++    lt_nm_to_check="$lt_nm_to_check nm"
++  fi
++  for lt_tmp_nm in $lt_nm_to_check; do
++    lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++    for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
++      IFS=$lt_save_ifs
++      test -z "$ac_dir" && ac_dir=.
++      tmp_nm=$ac_dir/$lt_tmp_nm
++      if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then
++	# Check to see if the nm accepts a BSD-compat flag.
++	# Adding the 'sed 1q' prevents false positives on HP-UX, which says:
++	#   nm: unknown option "B" ignored
++	# Tru64's nm complains that /dev/null is an invalid object file
++	# MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty
++	case $build_os in
++	mingw*) lt_bad_file=conftest.nm/nofile ;;
++	*) lt_bad_file=/dev/null ;;
++	esac
++	case `"$tmp_nm" -B $lt_bad_file 2>&1 | $SED '1q'` in
++	*$lt_bad_file* | *'Invalid file or object type'*)
++	  lt_cv_path_NM="$tmp_nm -B"
++	  break 2
++	  ;;
++	*)
++	  case `"$tmp_nm" -p /dev/null 2>&1 | $SED '1q'` in
++	  */dev/null*)
++	    lt_cv_path_NM="$tmp_nm -p"
++	    break 2
++	    ;;
++	  *)
++	    lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
++	    continue # so that we can try to find one that supports BSD flags
++	    ;;
++	  esac
++	  ;;
++	esac
++      fi
++    done
++    IFS=$lt_save_ifs
++  done
++  : ${lt_cv_path_NM=no}
++fi])
++if test no != "$lt_cv_path_NM"; then
++  NM=$lt_cv_path_NM
++else
++  # Didn't find any BSD compatible name lister, look for dumpbin.
++  if test -n "$DUMPBIN"; then :
++    # Let the user override the test.
++  else
++    AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :)
++    case `$DUMPBIN -symbols -headers /dev/null 2>&1 | $SED '1q'` in
++    *COFF*)
++      DUMPBIN="$DUMPBIN -symbols -headers"
++      ;;
++    *)
++      DUMPBIN=:
++      ;;
++    esac
++  fi
++  AC_SUBST([DUMPBIN])
++  if test : != "$DUMPBIN"; then
++    NM=$DUMPBIN
++  fi
++fi
++test -z "$NM" && NM=nm
++AC_SUBST([NM])
++_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl
++
++AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface],
++  [lt_cv_nm_interface="BSD nm"
++  echo "int some_variable = 0;" > conftest.$ac_ext
++  (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD)
++  (eval "$ac_compile" 2>conftest.err)
++  cat conftest.err >&AS_MESSAGE_LOG_FD
++  (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD)
++  (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
++  cat conftest.err >&AS_MESSAGE_LOG_FD
++  (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD)
++  cat conftest.out >&AS_MESSAGE_LOG_FD
++  if $GREP 'External.*some_variable' conftest.out > /dev/null; then
++    lt_cv_nm_interface="MS dumpbin"
++  fi
++  rm -f conftest*])
++])# LT_PATH_NM
++
++# Old names:
++AU_ALIAS([AM_PROG_NM], [LT_PATH_NM])
++AU_ALIAS([AC_PROG_NM], [LT_PATH_NM])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AM_PROG_NM], [])
++dnl AC_DEFUN([AC_PROG_NM], [])
++
++# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
++# --------------------------------
++# how to determine the name of the shared library
++# associated with a specific link library.
++#  -- PORTME fill in with the dynamic library characteristics
++m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB],
++[m4_require([_LT_DECL_EGREP])
++m4_require([_LT_DECL_OBJDUMP])
++m4_require([_LT_DECL_DLLTOOL])
++AC_CACHE_CHECK([how to associate runtime and link libraries],
++lt_cv_sharedlib_from_linklib_cmd,
++[lt_cv_sharedlib_from_linklib_cmd='unknown'
++
++case $host_os in
++cygwin* | mingw* | pw32* | cegcc*)
++  # two different shell functions defined in ltmain.sh;
++  # decide which one to use based on capabilities of $DLLTOOL
++  case `$DLLTOOL --help 2>&1` in
++  *--identify-strict*)
++    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
++    ;;
++  *)
++    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
++    ;;
++  esac
++  ;;
++*)
++  # fallback: assume linklib IS sharedlib
++  lt_cv_sharedlib_from_linklib_cmd=$ECHO
++  ;;
++esac
++])
++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
++
++_LT_DECL([], [sharedlib_from_linklib_cmd], [1],
++    [Command to associate shared and link libraries])
++])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
++
++
++# _LT_PATH_MANIFEST_TOOL
++# ----------------------
++# locate the manifest tool
++m4_defun([_LT_PATH_MANIFEST_TOOL],
++[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :)
++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
++AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool],
++  [lt_cv_path_mainfest_tool=no
++  echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD
++  $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
++  cat conftest.err >&AS_MESSAGE_LOG_FD
++  if $GREP 'Manifest Tool' conftest.out > /dev/null; then
++    lt_cv_path_mainfest_tool=yes
++  fi
++  rm -f conftest*])
++if test yes != "$lt_cv_path_mainfest_tool"; then
++  MANIFEST_TOOL=:
++fi
++_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl
++])# _LT_PATH_MANIFEST_TOOL
++
++
++# _LT_DLL_DEF_P([FILE])
++# ---------------------
++# True iff FILE is a Windows DLL '.def' file.
++# Keep in sync with func_dll_def_p in the libtool script
++AC_DEFUN([_LT_DLL_DEF_P],
++[dnl
++  test DEF = "`$SED -n dnl
++    -e '\''s/^[[	 ]]*//'\'' dnl Strip leading whitespace
++    -e '\''/^\(;.*\)*$/d'\'' dnl      Delete empty lines and comments
++    -e '\''s/^\(EXPORTS\|LIBRARY\)\([[	 ]].*\)*$/DEF/p'\'' dnl
++    -e q dnl                          Only consider the first "real" line
++    $1`" dnl
++])# _LT_DLL_DEF_P
++
++
++# LT_LIB_M
++# --------
++# check for math library
++AC_DEFUN([LT_LIB_M],
++[AC_REQUIRE([AC_CANONICAL_HOST])dnl
++LIBM=
++case $host in
++*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*)
++  # These system don't have libm, or don't need it
++  ;;
++*-ncr-sysv4.3*)
++  AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM=-lmw)
++  AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm")
++  ;;
++*)
++  AC_CHECK_LIB(m, cos, LIBM=-lm)
++  ;;
++esac
++AC_SUBST([LIBM])
++])# LT_LIB_M
++
++# Old name:
++AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_CHECK_LIBM], [])
++
++
++# _LT_COMPILER_NO_RTTI([TAGNAME])
++# -------------------------------
++m4_defun([_LT_COMPILER_NO_RTTI],
++[m4_require([_LT_TAG_COMPILER])dnl
++
++_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
++
++if test yes = "$GCC"; then
++  case $cc_basename in
++  nvcc*)
++    _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;;
++  *)
++    _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;;
++  esac
++
++  _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions],
++    lt_cv_prog_compiler_rtti_exceptions,
++    [-fno-rtti -fno-exceptions], [],
++    [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"])
++fi
++_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1],
++	[Compiler flag to turn off builtin functions])
++])# _LT_COMPILER_NO_RTTI
++
++
++# _LT_CMD_GLOBAL_SYMBOLS
++# ----------------------
++m4_defun([_LT_CMD_GLOBAL_SYMBOLS],
++[AC_REQUIRE([AC_CANONICAL_HOST])dnl
++AC_REQUIRE([AC_PROG_CC])dnl
++AC_REQUIRE([AC_PROG_AWK])dnl
++AC_REQUIRE([LT_PATH_NM])dnl
++AC_REQUIRE([LT_PATH_LD])dnl
++m4_require([_LT_DECL_SED])dnl
++m4_require([_LT_DECL_EGREP])dnl
++m4_require([_LT_TAG_COMPILER])dnl
++
++# Check for command to grab the raw symbol name followed by C symbol from nm.
++AC_MSG_CHECKING([command to parse $NM output from $compiler object])
++AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe],
++[
++# These are sane defaults that work on at least a few old systems.
++# [They come from Ultrix.  What could be older than Ultrix?!! ;)]
++
++# Character class describing NM global symbol codes.
++symcode='[[BCDEGRST]]'
++
++# Regexp to match symbols that can be accessed directly from C.
++sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)'
++
++# Define system-specific variables.
++case $host_os in
++aix*)
++  symcode='[[BCDT]]'
++  ;;
++cygwin* | mingw* | pw32* | cegcc*)
++  symcode='[[ABCDGISTW]]'
++  ;;
++hpux*)
++  if test ia64 = "$host_cpu"; then
++    symcode='[[ABCDEGRST]]'
++  fi
++  ;;
++irix* | nonstopux*)
++  symcode='[[BCDEGRST]]'
++  ;;
++osf*)
++  symcode='[[BCDEGQRST]]'
++  ;;
++solaris*)
++  symcode='[[BDRT]]'
++  ;;
++sco3.2v5*)
++  symcode='[[DT]]'
++  ;;
++sysv4.2uw2*)
++  symcode='[[DT]]'
++  ;;
++sysv5* | sco5v6* | unixware* | OpenUNIX*)
++  symcode='[[ABDT]]'
++  ;;
++sysv4)
++  symcode='[[DFNSTU]]'
++  ;;
++esac
++
++# If we're using GNU nm, then use its standard symbol codes.
++case `$NM -V 2>&1` in
++*GNU* | *'with BFD'*)
++  symcode='[[ABCDGIRSTW]]' ;;
++esac
++
++if test "$lt_cv_nm_interface" = "MS dumpbin"; then
++  # Gets list of data symbols to import.
++  lt_cv_sys_global_symbol_to_import="$SED -n -e 's/^I .* \(.*\)$/\1/p'"
++  # Adjust the below global symbol transforms to fixup imported variables.
++  lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'"
++  lt_c_name_hook=" -e 's/^I .* \(.*\)$/  {\"\1\", (void *) 0},/p'"
++  lt_c_name_lib_hook="\
++  -e 's/^I .* \(lib.*\)$/  {\"\1\", (void *) 0},/p'\
++  -e 's/^I .* \(.*\)$/  {\"lib\1\", (void *) 0},/p'"
++else
++  # Disable hooks by default.
++  lt_cv_sys_global_symbol_to_import=
++  lt_cdecl_hook=
++  lt_c_name_hook=
++  lt_c_name_lib_hook=
++fi
++
++# Transform an extracted symbol line into a proper C declaration.
++# Some systems (esp. on ia64) link data and code symbols differently,
++# so use this general approach.
++lt_cv_sys_global_symbol_to_cdecl="$SED -n"\
++$lt_cdecl_hook\
++" -e 's/^T .* \(.*\)$/extern int \1();/p'"\
++" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'"
++
++# Transform an extracted symbol line into symbol name and symbol address
++lt_cv_sys_global_symbol_to_c_name_address="$SED -n"\
++$lt_c_name_hook\
++" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
++" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/p'"
++
++# Transform an extracted symbol line into symbol name with lib prefix and
++# symbol address.
++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="$SED -n"\
++$lt_c_name_lib_hook\
++" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
++" -e 's/^$symcode$symcode* .* \(lib.*\)$/  {\"\1\", (void *) \&\1},/p'"\
++" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"lib\1\", (void *) \&\1},/p'"
++
++# Handle CRLF in mingw tool chain
++opt_cr=
++case $build_os in
++mingw*)
++  opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp
++  ;;
++esac
++
++# Try without a prefix underscore, then with it.
++for ac_symprfx in "" "_"; do
++
++  # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
++  symxfrm="\\1 $ac_symprfx\\2 \\2"
++
++  # Write the raw and C identifiers.
++  if test "$lt_cv_nm_interface" = "MS dumpbin"; then
++    # Fake it for dumpbin and say T for any non-static function,
++    # D for any global variable and I for any imported variable.
++    # Also find C++ and __fastcall symbols from MSVC++ or ICC,
++    # which start with @ or ?.
++    lt_cv_sys_global_symbol_pipe="$AWK ['"\
++"     {last_section=section; section=\$ 3};"\
++"     /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
++"     /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
++"     /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\
++"     /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\
++"     /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\
++"     \$ 0!~/External *\|/{next};"\
++"     / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
++"     {if(hide[section]) next};"\
++"     {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\
++"     {split(\$ 0,a,/\||\r/); split(a[2],s)};"\
++"     s[1]~/^[@?]/{print f,s[1],s[1]; next};"\
++"     s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\
++"     ' prfx=^$ac_symprfx]"
++  else
++    lt_cv_sys_global_symbol_pipe="$SED -n -e 's/^.*[[	 ]]\($symcode$symcode*\)[[	 ]][[	 ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
++  fi
++  lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | $SED '/ __gnu_lto/d'"
++
++  # Check to see that the pipe works correctly.
++  pipe_works=no
++
++  rm -f conftest*
++  cat > conftest.$ac_ext <<_LT_EOF
++#ifdef __cplusplus
++extern "C" {
++#endif
++char nm_test_var;
++void nm_test_func(void);
++void nm_test_func(void){}
++#ifdef __cplusplus
++}
++#endif
++int main(){nm_test_var='a';nm_test_func();return(0);}
++_LT_EOF
++
++  if AC_TRY_EVAL(ac_compile); then
++    # Now try to grab the symbols.
++    nlist=conftest.nm
++    if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then
++      # Try sorting and uniquifying the output.
++      if sort "$nlist" | uniq > "$nlist"T; then
++	mv -f "$nlist"T "$nlist"
++      else
++	rm -f "$nlist"T
++      fi
++
++      # Make sure that we snagged all the symbols we need.
++      if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
++	if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
++	  cat <<_LT_EOF > conftest.$ac_ext
++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
++#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE
++/* DATA imports from DLLs on WIN32 can't be const, because runtime
++   relocations are performed -- see ld's documentation on pseudo-relocs.  */
++# define LT@&t@_DLSYM_CONST
++#elif defined __osf__
++/* This system does not cope well with relocations in const data.  */
++# define LT@&t@_DLSYM_CONST
++#else
++# define LT@&t@_DLSYM_CONST const
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++_LT_EOF
++	  # Now generate the symbol file.
++	  eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext'
++
++	  cat <<_LT_EOF >> conftest.$ac_ext
++
++/* The mapping between symbol names and symbols.  */
++LT@&t@_DLSYM_CONST struct {
++  const char *name;
++  void       *address;
++}
++lt__PROGRAM__LTX_preloaded_symbols[[]] =
++{
++  { "@PROGRAM@", (void *) 0 },
++_LT_EOF
++	  $SED "s/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
++	  cat <<\_LT_EOF >> conftest.$ac_ext
++  {0, (void *) 0}
++};
++
++/* This works around a problem in FreeBSD linker */
++#ifdef FREEBSD_WORKAROUND
++static const void *lt_preloaded_setup() {
++  return lt__PROGRAM__LTX_preloaded_symbols;
++}
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++_LT_EOF
++	  # Now try linking the two files.
++	  mv conftest.$ac_objext conftstm.$ac_objext
++	  lt_globsym_save_LIBS=$LIBS
++	  lt_globsym_save_CFLAGS=$CFLAGS
++	  LIBS=conftstm.$ac_objext
++	  CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)"
++	  if AC_TRY_EVAL(ac_link) && test -s conftest$ac_exeext; then
++	    pipe_works=yes
++	  fi
++	  LIBS=$lt_globsym_save_LIBS
++	  CFLAGS=$lt_globsym_save_CFLAGS
++	else
++	  echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD
++	fi
++      else
++	echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD
++      fi
++    else
++      echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD
++    fi
++  else
++    echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD
++    cat conftest.$ac_ext >&5
++  fi
++  rm -rf conftest* conftst*
++
++  # Do not use the global_symbol_pipe unless it works.
++  if test yes = "$pipe_works"; then
++    break
++  else
++    lt_cv_sys_global_symbol_pipe=
++  fi
++done
++])
++if test -z "$lt_cv_sys_global_symbol_pipe"; then
++  lt_cv_sys_global_symbol_to_cdecl=
++fi
++if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
++  AC_MSG_RESULT(failed)
++else
++  AC_MSG_RESULT(ok)
++fi
++
++# Response file support.
++if test "$lt_cv_nm_interface" = "MS dumpbin"; then
++  nm_file_list_spec='@'
++elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then
++  nm_file_list_spec='@'
++fi
++
++_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1],
++    [Take the output of nm and produce a listing of raw symbols and C names])
++_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1],
++    [Transform the output of nm in a proper C declaration])
++_LT_DECL([global_symbol_to_import], [lt_cv_sys_global_symbol_to_import], [1],
++    [Transform the output of nm into a list of symbols to manually relocate])
++_LT_DECL([global_symbol_to_c_name_address],
++    [lt_cv_sys_global_symbol_to_c_name_address], [1],
++    [Transform the output of nm in a C name address pair])
++_LT_DECL([global_symbol_to_c_name_address_lib_prefix],
++    [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1],
++    [Transform the output of nm in a C name address pair when lib prefix is needed])
++_LT_DECL([nm_interface], [lt_cv_nm_interface], [1],
++    [The name lister interface])
++_LT_DECL([], [nm_file_list_spec], [1],
++    [Specify filename containing input files for $NM])
++]) # _LT_CMD_GLOBAL_SYMBOLS
++
++
++# _LT_COMPILER_PIC([TAGNAME])
++# ---------------------------
++m4_defun([_LT_COMPILER_PIC],
++[m4_require([_LT_TAG_COMPILER])dnl
++_LT_TAGVAR(lt_prog_compiler_wl, $1)=
++_LT_TAGVAR(lt_prog_compiler_pic, $1)=
++_LT_TAGVAR(lt_prog_compiler_static, $1)=
++
++m4_if([$1], [CXX], [
++  # C++ specific cases for pic, static, wl, etc.
++  if test yes = "$GXX"; then
++    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
++
++    case $host_os in
++    aix*)
++      # All AIX code is PIC.
++      if test ia64 = "$host_cpu"; then
++	# AIX 5 now supports IA64 processor
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++      fi
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++      ;;
++
++    amigaos*)
++      case $host_cpu in
++      powerpc)
++            # see comment about AmigaOS4 .so support
++            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++        ;;
++      m68k)
++            # FIXME: we need at least 68020 code to build shared libraries, but
++            # adding the '-m68020' flag to GCC prevents building anything better,
++            # like '-m68040'.
++            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
++        ;;
++      esac
++      ;;
++
++    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
++      # PIC is the default for these OSes.
++      ;;
++    mingw* | cygwin* | os2* | pw32* | cegcc*)
++      # This hack is so that the source file can tell whether it is being
++      # built for inclusion in a dll (and should export symbols for example).
++      # Although the cygwin gcc ignores -fPIC, still need this for old-style
++      # (--disable-auto-import) libraries
++      m4_if([$1], [GCJ], [],
++	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
++      case $host_os in
++      os2*)
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static'
++	;;
++      esac
++      ;;
++    darwin* | rhapsody*)
++      # PIC is the default on this platform
++      # Common symbols not allowed in MH_DYLIB files
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
++      ;;
++    *djgpp*)
++      # DJGPP does not support shared libraries at all
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)=
++      ;;
++    haiku*)
++      # PIC is the default for Haiku.
++      # The "-static" flag exists, but is broken.
++      _LT_TAGVAR(lt_prog_compiler_static, $1)=
++      ;;
++    interix[[3-9]]*)
++      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
++      # Instead, we relocate shared libraries at runtime.
++      ;;
++    sysv4*MP*)
++      if test -d /usr/nec; then
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
++      fi
++      ;;
++    hpux*)
++      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
++      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
++      # sets the default TLS model and affects inlining.
++      case $host_cpu in
++      hppa*64*)
++	;;
++      *)
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++	;;
++      esac
++      ;;
++    *qnx* | *nto*)
++      # QNX uses GNU C++, but need to define -shared option too, otherwise
++      # it will coredump.
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
++      ;;
++    *)
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++      ;;
++    esac
++  else
++    case $host_os in
++      aix[[4-9]]*)
++	# All AIX code is PIC.
++	if test ia64 = "$host_cpu"; then
++	  # AIX 5 now supports IA64 processor
++	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++	else
++	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
++	fi
++	;;
++      chorus*)
++	case $cc_basename in
++	cxch68*)
++	  # Green Hills C++ Compiler
++	  # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
++	  ;;
++	esac
++	;;
++      mingw* | cygwin* | os2* | pw32* | cegcc*)
++	# This hack is so that the source file can tell whether it is being
++	# built for inclusion in a dll (and should export symbols for example).
++	m4_if([$1], [GCJ], [],
++	  [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
++	;;
++      dgux*)
++	case $cc_basename in
++	  ec++*)
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++	    ;;
++	  ghcx*)
++	    # Green Hills C++ Compiler
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      freebsd* | dragonfly* | midnightbsd*)
++	# FreeBSD uses GNU C++
++	;;
++      hpux9* | hpux10* | hpux11*)
++	case $cc_basename in
++	  CC*)
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive'
++	    if test ia64 != "$host_cpu"; then
++	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
++	    fi
++	    ;;
++	  aCC*)
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive'
++	    case $host_cpu in
++	    hppa*64*|ia64*)
++	      # +Z the default
++	      ;;
++	    *)
++	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
++	      ;;
++	    esac
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      interix*)
++	# This is c89, which is MS Visual C++ (no shared libs)
++	# Anyone wants to do a port?
++	;;
++      irix5* | irix6* | nonstopux*)
++	case $cc_basename in
++	  CC*)
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
++	    # CC pic flag -KPIC is the default.
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++	case $cc_basename in
++	  KCC*)
++	    # KAI C++ Compiler
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++	    ;;
++	  ecpc* )
++	    # old Intel C++ for x86_64, which still supported -KPIC.
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
++	    ;;
++	  icpc* )
++	    # Intel C++, used to be incompatible with GCC.
++	    # ICC 10 doesn't accept -KPIC any more.
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
++	    ;;
++	  pgCC* | pgcpp*)
++	    # Portland Group C++ compiler
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++	    ;;
++	  cxx*)
++	    # Compaq C++
++	    # Make sure the PIC flag is empty.  It appears that all Alpha
++	    # Linux and Compaq Tru64 Unix objects are PIC.
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
++	    ;;
++	  xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*)
++	    # IBM XL 8.0, 9.0 on PPC and BlueGene
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
++	    ;;
++	  *)
++	    case `$CC -V 2>&1 | $SED 5q` in
++	    *Sun\ C*)
++	      # Sun C++ 5.9
++	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++	      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++	      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
++	      ;;
++	    esac
++	    ;;
++	esac
++	;;
++      lynxos*)
++	;;
++      m88k*)
++	;;
++      mvs*)
++	case $cc_basename in
++	  cxx*)
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      netbsd*)
++	;;
++      *qnx* | *nto*)
++        # QNX uses GNU C++, but need to define -shared option too, otherwise
++        # it will coredump.
++        _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
++        ;;
++      osf3* | osf4* | osf5*)
++	case $cc_basename in
++	  KCC*)
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
++	    ;;
++	  RCC*)
++	    # Rational C++ 2.4.1
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
++	    ;;
++	  cxx*)
++	    # Digital/Compaq C++
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	    # Make sure the PIC flag is empty.  It appears that all Alpha
++	    # Linux and Compaq Tru64 Unix objects are PIC.
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      psos*)
++	;;
++      solaris*)
++	case $cc_basename in
++	  CC* | sunCC*)
++	    # Sun C++ 4.2, 5.x and Centerline C++
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
++	    ;;
++	  gcx*)
++	    # Green Hills C++ Compiler
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      sunos4*)
++	case $cc_basename in
++	  CC*)
++	    # Sun C++ 4.x
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++	    ;;
++	  lcc*)
++	    # Lucid
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
++	case $cc_basename in
++	  CC*)
++	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++	    ;;
++	esac
++	;;
++      tandem*)
++	case $cc_basename in
++	  NCC*)
++	    # NonStop-UX NCC 3.20
++	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      vxworks*)
++	;;
++      *)
++	_LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
++	;;
++    esac
++  fi
++],
++[
++  if test yes = "$GCC"; then
++    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
++
++    case $host_os in
++      aix*)
++      # All AIX code is PIC.
++      if test ia64 = "$host_cpu"; then
++	# AIX 5 now supports IA64 processor
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++      fi
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++      ;;
++
++    amigaos*)
++      case $host_cpu in
++      powerpc)
++            # see comment about AmigaOS4 .so support
++            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++        ;;
++      m68k)
++            # FIXME: we need at least 68020 code to build shared libraries, but
++            # adding the '-m68020' flag to GCC prevents building anything better,
++            # like '-m68040'.
++            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
++        ;;
++      esac
++      ;;
++
++    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
++      # PIC is the default for these OSes.
++      ;;
++
++    mingw* | cygwin* | pw32* | os2* | cegcc*)
++      # This hack is so that the source file can tell whether it is being
++      # built for inclusion in a dll (and should export symbols for example).
++      # Although the cygwin gcc ignores -fPIC, still need this for old-style
++      # (--disable-auto-import) libraries
++      m4_if([$1], [GCJ], [],
++	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
++      case $host_os in
++      os2*)
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static'
++	;;
++      esac
++      ;;
++
++    darwin* | rhapsody*)
++      # PIC is the default on this platform
++      # Common symbols not allowed in MH_DYLIB files
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
++      ;;
++
++    haiku*)
++      # PIC is the default for Haiku.
++      # The "-static" flag exists, but is broken.
++      _LT_TAGVAR(lt_prog_compiler_static, $1)=
++      ;;
++
++    hpux*)
++      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
++      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
++      # sets the default TLS model and affects inlining.
++      case $host_cpu in
++      hppa*64*)
++	# +Z the default
++	;;
++      *)
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++	;;
++      esac
++      ;;
++
++    interix[[3-9]]*)
++      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
++      # Instead, we relocate shared libraries at runtime.
++      ;;
++
++    msdosdjgpp*)
++      # Just because we use GCC doesn't mean we suddenly get shared libraries
++      # on systems that don't support them.
++      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
++      enable_shared=no
++      ;;
++
++    *nto* | *qnx*)
++      # QNX uses GNU C++, but need to define -shared option too, otherwise
++      # it will coredump.
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
++      ;;
++
++    sysv4*MP*)
++      if test -d /usr/nec; then
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
++      fi
++      ;;
++
++    *)
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++      ;;
++    esac
++
++    case $cc_basename in
++    nvcc*) # Cuda Compiler Driver 2.2
++      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker '
++      if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
++        _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)"
++      fi
++      ;;
++    esac
++  else
++    # PORTME Check for flag to pass linker flags through the system compiler.
++    case $host_os in
++    aix*)
++      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++      if test ia64 = "$host_cpu"; then
++	# AIX 5 now supports IA64 processor
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++      else
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
++      fi
++      ;;
++
++    darwin* | rhapsody*)
++      # PIC is the default on this platform
++      # Common symbols not allowed in MH_DYLIB files
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
++      case $cc_basename in
++      nagfor*)
++        # NAG Fortran compiler
++        _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,'
++        _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
++        _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++        ;;
++      esac
++      ;;
++
++    mingw* | cygwin* | pw32* | os2* | cegcc*)
++      # This hack is so that the source file can tell whether it is being
++      # built for inclusion in a dll (and should export symbols for example).
++      m4_if([$1], [GCJ], [],
++	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
++      case $host_os in
++      os2*)
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static'
++	;;
++      esac
++      ;;
++
++    hpux9* | hpux10* | hpux11*)
++      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++      # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
++      # not for PA HP-UX.
++      case $host_cpu in
++      hppa*64*|ia64*)
++	# +Z the default
++	;;
++      *)
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
++	;;
++      esac
++      # Is there a better lt_prog_compiler_static that works with the bundled CC?
++      _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive'
++      ;;
++
++    irix5* | irix6* | nonstopux*)
++      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++      # PIC (with -KPIC) is the default.
++      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
++      ;;
++
++    linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++      case $cc_basename in
++      # old Intel for x86_64, which still supported -KPIC.
++      ecc*)
++	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
++        ;;
++      # icc used to be incompatible with GCC.
++      # ICC 10 doesn't accept -KPIC any more.
++      icc* | ifort*)
++	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
++        ;;
++      # Lahey Fortran 8.1.
++      lf95*)
++	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared'
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='--static'
++	;;
++      nagfor*)
++	# NAG Fortran compiler
++	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,'
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++	;;
++      tcc*)
++	# Fabrice Bellard et al's Tiny C Compiler
++	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
++	;;
++      pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
++        # Portland Group compilers (*not* the Pentium gcc compiler,
++	# which looks to be a dead project)
++	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++        ;;
++      ccc*)
++        _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++        # All Alpha code is PIC.
++        _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
++        ;;
++      xl* | bgxl* | bgf* | mpixl*)
++	# IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
++	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
++	;;
++      *)
++	case `$CC -V 2>&1 | $SED 5q` in
++	*Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*)
++	  # Sun Fortran 8.3 passes all unrecognized flags to the linker
++	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++	  _LT_TAGVAR(lt_prog_compiler_wl, $1)=''
++	  ;;
++	*Sun\ F* | *Sun*Fortran*)
++	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
++	  ;;
++	*Sun\ C*)
++	  # Sun C 5.9
++	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	  ;;
++        *Intel*\ [[CF]]*Compiler*)
++	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
++	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
++	  ;;
++	*Portland\ Group*)
++	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
++	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++	  ;;
++	esac
++	;;
++      esac
++      ;;
++
++    newsos6)
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++      ;;
++
++    *nto* | *qnx*)
++      # QNX uses GNU C++, but need to define -shared option too, otherwise
++      # it will coredump.
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
++      ;;
++
++    osf3* | osf4* | osf5*)
++      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++      # All OSF/1 code is PIC.
++      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
++      ;;
++
++    rdos*)
++      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
++      ;;
++
++    solaris*)
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++      case $cc_basename in
++      f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
++	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';;
++      *)
++	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';;
++      esac
++      ;;
++
++    sunos4*)
++      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
++      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++      ;;
++
++    sysv4 | sysv4.2uw2* | sysv4.3*)
++      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++      ;;
++
++    sysv4*MP*)
++      if test -d /usr/nec; then
++	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic'
++	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++      fi
++      ;;
++
++    sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
++      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
++      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++      ;;
++
++    unicos*)
++      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
++      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
++      ;;
++
++    uts4*)
++      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
++      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
++      ;;
++
++    *)
++      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
++      ;;
++    esac
++  fi
++])
++case $host_os in
++  # For platforms that do not support PIC, -DPIC is meaningless:
++  *djgpp*)
++    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
++    ;;
++  *)
++    _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])"
++    ;;
++esac
++
++AC_CACHE_CHECK([for $compiler option to produce PIC],
++  [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)],
++  [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)])
++_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)
++
++#
++# Check to make sure the PIC flag actually works.
++#
++if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
++  _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works],
++    [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)],
++    [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [],
++    [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in
++     "" | " "*) ;;
++     *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;;
++     esac],
++    [_LT_TAGVAR(lt_prog_compiler_pic, $1)=
++     _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no])
++fi
++_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1],
++	[Additional compiler flags for building library objects])
++
++_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1],
++	[How to pass a linker flag through the compiler])
++#
++# Check to make sure the static flag actually works.
++#
++wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\"
++_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works],
++  _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1),
++  $lt_tmp_static_flag,
++  [],
++  [_LT_TAGVAR(lt_prog_compiler_static, $1)=])
++_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1],
++	[Compiler flag to prevent dynamic linking])
++])# _LT_COMPILER_PIC
++
++
++# _LT_LINKER_SHLIBS([TAGNAME])
++# ----------------------------
++# See if the linker supports building shared libraries.
++m4_defun([_LT_LINKER_SHLIBS],
++[AC_REQUIRE([LT_PATH_LD])dnl
++AC_REQUIRE([LT_PATH_NM])dnl
++m4_require([_LT_PATH_MANIFEST_TOOL])dnl
++m4_require([_LT_FILEUTILS_DEFAULTS])dnl
++m4_require([_LT_DECL_EGREP])dnl
++m4_require([_LT_DECL_SED])dnl
++m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
++m4_require([_LT_TAG_COMPILER])dnl
++AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
++m4_if([$1], [CXX], [
++  _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
++  _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
++  case $host_os in
++  aix[[4-9]]*)
++    # If we're using GNU nm, then we don't want the "-C" option.
++    # -C means demangle to GNU nm, but means don't demangle to AIX nm.
++    # Without the "-l" option, or with the "-B" option, AIX nm treats
++    # weak defined symbols like other global defined symbols, whereas
++    # GNU nm marks them as "W".
++    # While the 'weak' keyword is ignored in the Export File, we need
++    # it in the Import File for the 'aix-soname' feature, so we have
++    # to replace the "-B" option with "-P" for AIX nm.
++    if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
++      _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
++    else
++      _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
++    fi
++    ;;
++  pw32*)
++    _LT_TAGVAR(export_symbols_cmds, $1)=$ltdll_cmds
++    ;;
++  cygwin* | mingw* | cegcc*)
++    case $cc_basename in
++    cl* | icl*)
++      _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
++      ;;
++    *)
++      _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
++      _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
++      ;;
++    esac
++    ;;
++  *)
++    _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
++    ;;
++  esac
++], [
++  runpath_var=
++  _LT_TAGVAR(allow_undefined_flag, $1)=
++  _LT_TAGVAR(always_export_symbols, $1)=no
++  _LT_TAGVAR(archive_cmds, $1)=
++  _LT_TAGVAR(archive_expsym_cmds, $1)=
++  _LT_TAGVAR(compiler_needs_object, $1)=no
++  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
++  _LT_TAGVAR(export_dynamic_flag_spec, $1)=
++  _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
++  _LT_TAGVAR(hardcode_automatic, $1)=no
++  _LT_TAGVAR(hardcode_direct, $1)=no
++  _LT_TAGVAR(hardcode_direct_absolute, $1)=no
++  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
++  _LT_TAGVAR(hardcode_libdir_separator, $1)=
++  _LT_TAGVAR(hardcode_minus_L, $1)=no
++  _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
++  _LT_TAGVAR(inherit_rpath, $1)=no
++  _LT_TAGVAR(link_all_deplibs, $1)=unknown
++  _LT_TAGVAR(module_cmds, $1)=
++  _LT_TAGVAR(module_expsym_cmds, $1)=
++  _LT_TAGVAR(old_archive_from_new_cmds, $1)=
++  _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)=
++  _LT_TAGVAR(thread_safe_flag_spec, $1)=
++  _LT_TAGVAR(whole_archive_flag_spec, $1)=
++  # include_expsyms should be a list of space-separated symbols to be *always*
++  # included in the symbol list
++  _LT_TAGVAR(include_expsyms, $1)=
++  # exclude_expsyms can be an extended regexp of symbols to exclude
++  # it will be wrapped by ' (' and ')$', so one must not match beginning or
++  # end of line.  Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc',
++  # as well as any symbol that contains 'd'.
++  _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
++  # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
++  # platforms (ab)use it in PIC code, but their linkers get confused if
++  # the symbol is explicitly referenced.  Since portable code cannot
++  # rely on this symbol name, it's probably fine to never include it in
++  # preloaded symbol tables.
++  # Exclude shared library initialization/finalization symbols.
++dnl Note also adjust exclude_expsyms for C++ above.
++  extract_expsyms_cmds=
++
++  case $host_os in
++  cygwin* | mingw* | pw32* | cegcc*)
++    # FIXME: the MSVC++ and ICC port hasn't been tested in a loooong time
++    # When not using gcc, we currently assume that we are using
++    # Microsoft Visual C++ or Intel C++ Compiler.
++    if test yes != "$GCC"; then
++      with_gnu_ld=no
++    fi
++    ;;
++  interix*)
++    # we just hope/assume this is gcc and not c89 (= MSVC++ or ICC)
++    with_gnu_ld=yes
++    ;;
++  openbsd* | bitrig*)
++    with_gnu_ld=no
++    ;;
++  esac
++
++  _LT_TAGVAR(ld_shlibs, $1)=yes
++
++  # On some targets, GNU ld is compatible enough with the native linker
++  # that we're better off using the native interface for both.
++  lt_use_gnu_ld_interface=no
++  if test yes = "$with_gnu_ld"; then
++    case $host_os in
++      aix*)
++	# The AIX port of GNU ld has always aspired to compatibility
++	# with the native linker.  However, as the warning in the GNU ld
++	# block says, versions before 2.19.5* couldn't really create working
++	# shared libraries, regardless of the interface used.
++	case `$LD -v 2>&1` in
++	  *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
++	  *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;;
++	  *\ \(GNU\ Binutils\)\ [[3-9]]*) ;;
++	  *)
++	    lt_use_gnu_ld_interface=yes
++	    ;;
++	esac
++	;;
++      *)
++	lt_use_gnu_ld_interface=yes
++	;;
++    esac
++  fi
++
++  if test yes = "$lt_use_gnu_ld_interface"; then
++    # If archive_cmds runs LD, not CC, wlarc should be empty
++    wlarc='$wl'
++
++    # Set some defaults for GNU ld with shared library support. These
++    # are reset later if shared libraries are not supported. Putting them
++    # here allows them to be overridden if necessary.
++    runpath_var=LD_RUN_PATH
++    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
++    # ancient GNU ld didn't support --whole-archive et. al.
++    if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
++      _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
++    else
++      _LT_TAGVAR(whole_archive_flag_spec, $1)=
++    fi
++    supports_anon_versioning=no
++    case `$LD -v | $SED -e 's/([[^)]]\+)\s\+//' 2>&1` in
++      *GNU\ gold*) supports_anon_versioning=yes ;;
++      *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11
++      *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
++      *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
++      *\ 2.11.*) ;; # other 2.11 versions
++      *) supports_anon_versioning=yes ;;
++    esac
++
++    # See if GNU ld supports shared libraries.
++    case $host_os in
++    aix[[3-9]]*)
++      # On AIX/PPC, the GNU linker is very broken
++      if test ia64 != "$host_cpu"; then
++	_LT_TAGVAR(ld_shlibs, $1)=no
++	cat <<_LT_EOF 1>&2
++
++*** Warning: the GNU linker, at least up to release 2.19, is reported
++*** to be unable to reliably create shared libraries on AIX.
++*** Therefore, libtool is disabling shared libraries support.  If you
++*** really care for shared libraries, you may want to install binutils
++*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
++*** You will then need to restart the configuration process.
++
++_LT_EOF
++      fi
++      ;;
++
++    amigaos*)
++      case $host_cpu in
++      powerpc)
++            # see comment about AmigaOS4 .so support
++            _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++            _LT_TAGVAR(archive_expsym_cmds, $1)=''
++        ;;
++      m68k)
++            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
++            _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++            _LT_TAGVAR(hardcode_minus_L, $1)=yes
++        ;;
++      esac
++      ;;
++
++    beos*)
++      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
++	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
++	# Joseph Beckenbach  says some releases of gcc
++	# support --undefined.  This deserves some investigation.  FIXME
++	_LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++      else
++	_LT_TAGVAR(ld_shlibs, $1)=no
++      fi
++      ;;
++
++    cygwin* | mingw* | pw32* | cegcc*)
++      # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
++      # as there is no search path for DLLs.
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols'
++      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
++      _LT_TAGVAR(always_export_symbols, $1)=no
++      _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
++      _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
++      _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
++
++      if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
++        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++	# If the export-symbols file already is a .def file, use it as
++	# is; otherwise, prepend EXPORTS...
++	_LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
++          cp $export_symbols $output_objdir/$soname.def;
++        else
++          echo EXPORTS > $output_objdir/$soname.def;
++          cat $export_symbols >> $output_objdir/$soname.def;
++        fi~
++        $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++      else
++	_LT_TAGVAR(ld_shlibs, $1)=no
++      fi
++      ;;
++
++    haiku*)
++      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++      _LT_TAGVAR(link_all_deplibs, $1)=yes
++      ;;
++
++    os2*)
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++      _LT_TAGVAR(hardcode_minus_L, $1)=yes
++      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
++      shrext_cmds=.dll
++      _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	$ECHO EXPORTS >> $output_objdir/$libname.def~
++	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
++	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	emximp -o $lib $output_objdir/$libname.def'
++      _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	$ECHO EXPORTS >> $output_objdir/$libname.def~
++	prefix_cmds="$SED"~
++	if test EXPORTS = "`$SED 1q $export_symbols`"; then
++	  prefix_cmds="$prefix_cmds -e 1d";
++	fi~
++	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
++	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
++	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	emximp -o $lib $output_objdir/$libname.def'
++      _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
++      _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
++      _LT_TAGVAR(file_list_spec, $1)='@'
++      ;;
++
++    interix[[3-9]]*)
++      _LT_TAGVAR(hardcode_direct, $1)=no
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
++      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
++      # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
++      # Instead, shared libraries are loaded at an image base (0x10000000 by
++      # default) and relocated if they conflict, which is a slow very memory
++      # consuming and fragmenting process.  To avoid this, we pick a random,
++      # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
++      # time.  Moving up from 0x10000000 also allows more sbrk(2) space.
++      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++      _LT_TAGVAR(archive_expsym_cmds, $1)='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++      ;;
++
++    gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
++      tmp_diet=no
++      if test linux-dietlibc = "$host_os"; then
++	case $cc_basename in
++	  diet\ *) tmp_diet=yes;;	# linux-dietlibc with static linking (!diet-dyn)
++	esac
++      fi
++      if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
++	 && test no = "$tmp_diet"
++      then
++	tmp_addflag=' $pic_flag'
++	tmp_sharedflag='-shared'
++	case $cc_basename,$host_cpu in
++        pgcc*)				# Portland Group C compiler
++	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  tmp_addflag=' $pic_flag'
++	  ;;
++	pgf77* | pgf90* | pgf95* | pgfortran*)
++					# Portland Group f77 and f90 compilers
++	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  tmp_addflag=' $pic_flag -Mnomain' ;;
++	ecc*,ia64* | icc*,ia64*)	# Intel C compiler on ia64
++	  tmp_addflag=' -i_dynamic' ;;
++	efc*,ia64* | ifort*,ia64*)	# Intel Fortran compiler on ia64
++	  tmp_addflag=' -i_dynamic -nofor_main' ;;
++	ifc* | ifort*)			# Intel Fortran compiler
++	  tmp_addflag=' -nofor_main' ;;
++	lf95*)				# Lahey Fortran 8.1
++	  _LT_TAGVAR(whole_archive_flag_spec, $1)=
++	  tmp_sharedflag='--shared' ;;
++        nagfor*)                        # NAGFOR 5.3
++          tmp_sharedflag='-Wl,-shared' ;;
++	xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below)
++	  tmp_sharedflag='-qmkshrobj'
++	  tmp_addflag= ;;
++	nvcc*)	# Cuda Compiler Driver 2.2
++	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  _LT_TAGVAR(compiler_needs_object, $1)=yes
++	  ;;
++	esac
++	case `$CC -V 2>&1 | $SED 5q` in
++	*Sun\ C*)			# Sun C 5.9
++	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  _LT_TAGVAR(compiler_needs_object, $1)=yes
++	  tmp_sharedflag='-G' ;;
++	*Sun\ F*)			# Sun Fortran 8.3
++	  tmp_sharedflag='-G' ;;
++	esac
++	_LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++
++        if test yes = "$supports_anon_versioning"; then
++          _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
++            cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
++            echo "local: *; };" >> $output_objdir/$libname.ver~
++            $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
++        fi
++
++	case $cc_basename in
++	tcc*)
++	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='-rdynamic'
++	  ;;
++	xlf* | bgf* | bgxlf* | mpixlf*)
++	  # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
++	  _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive'
++	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++	  _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
++	  if test yes = "$supports_anon_versioning"; then
++	    _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
++              cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
++              echo "local: *; };" >> $output_objdir/$libname.ver~
++              $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
++	  fi
++	  ;;
++	esac
++      else
++        _LT_TAGVAR(ld_shlibs, $1)=no
++      fi
++      ;;
++
++    netbsd*)
++      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
++	_LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
++	wlarc=
++      else
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++      fi
++      ;;
++
++    solaris*)
++      if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
++	_LT_TAGVAR(ld_shlibs, $1)=no
++	cat <<_LT_EOF 1>&2
++
++*** Warning: The releases 2.8.* of the GNU linker cannot reliably
++*** create shared libraries on Solaris systems.  Therefore, libtool
++*** is disabling shared libraries support.  We urge you to upgrade GNU
++*** binutils to release 2.9.1 or newer.  Another option is to modify
++*** your PATH or compiler configuration so that the native linker is
++*** used, and then restart.
++
++_LT_EOF
++      elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++      else
++	_LT_TAGVAR(ld_shlibs, $1)=no
++      fi
++      ;;
++
++    sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
++      case `$LD -v 2>&1` in
++        *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*)
++	_LT_TAGVAR(ld_shlibs, $1)=no
++	cat <<_LT_EOF 1>&2
++
++*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot
++*** reliably create shared libraries on SCO systems.  Therefore, libtool
++*** is disabling shared libraries support.  We urge you to upgrade GNU
++*** binutils to release 2.16.91.0.3 or newer.  Another option is to modify
++*** your PATH or compiler configuration so that the native linker is
++*** used, and then restart.
++
++_LT_EOF
++	;;
++	*)
++	  # For security reasons, it is highly recommended that you always
++	  # use absolute paths for naming shared libraries, and exclude the
++	  # DT_RUNPATH tag from executables and libraries.  But doing so
++	  # requires that you compile everything twice, which is a pain.
++	  if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
++	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	  else
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	  fi
++	;;
++      esac
++      ;;
++
++    sunos4*)
++      _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
++      wlarc=
++      _LT_TAGVAR(hardcode_direct, $1)=yes
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      ;;
++
++    *)
++      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++      else
++	_LT_TAGVAR(ld_shlibs, $1)=no
++      fi
++      ;;
++    esac
++
++    if test no = "$_LT_TAGVAR(ld_shlibs, $1)"; then
++      runpath_var=
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
++      _LT_TAGVAR(export_dynamic_flag_spec, $1)=
++      _LT_TAGVAR(whole_archive_flag_spec, $1)=
++    fi
++  else
++    # PORTME fill in a description of your system's linker (not GNU ld)
++    case $host_os in
++    aix3*)
++      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
++      _LT_TAGVAR(always_export_symbols, $1)=yes
++      _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
++      # Note: this linker hardcodes the directories in LIBPATH if there
++      # are no directories specified by -L.
++      _LT_TAGVAR(hardcode_minus_L, $1)=yes
++      if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then
++	# Neither direct hardcoding nor static linking is supported with a
++	# broken collect2.
++	_LT_TAGVAR(hardcode_direct, $1)=unsupported
++      fi
++      ;;
++
++    aix[[4-9]]*)
++      if test ia64 = "$host_cpu"; then
++	# On IA64, the linker does run time linking by default, so we don't
++	# have to do anything special.
++	aix_use_runtimelinking=no
++	exp_sym_flag='-Bexport'
++	no_entry_flag=
++      else
++	# If we're using GNU nm, then we don't want the "-C" option.
++	# -C means demangle to GNU nm, but means don't demangle to AIX nm.
++	# Without the "-l" option, or with the "-B" option, AIX nm treats
++	# weak defined symbols like other global defined symbols, whereas
++	# GNU nm marks them as "W".
++	# While the 'weak' keyword is ignored in the Export File, we need
++	# it in the Import File for the 'aix-soname' feature, so we have
++	# to replace the "-B" option with "-P" for AIX nm.
++	if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
++	  _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
++	else
++	  _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
++	fi
++	aix_use_runtimelinking=no
++
++	# Test if we are trying to use run time linking or normal
++	# AIX style linking. If -brtl is somewhere in LDFLAGS, we
++	# have runtime linking enabled, and use it for executables.
++	# For shared libraries, we enable/disable runtime linking
++	# depending on the kind of the shared library created -
++	# when "with_aix_soname,aix_use_runtimelinking" is:
++	# "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
++	# "aix,yes"  lib.so          shared, rtl:yes, for executables
++	#            lib.a           static archive
++	# "both,no"  lib.so.V(shr.o) shared, rtl:yes
++	#            lib.a(lib.so.V) shared, rtl:no,  for executables
++	# "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
++	#            lib.a(lib.so.V) shared, rtl:no
++	# "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
++	#            lib.a           static archive
++	case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
++	  for ld_flag in $LDFLAGS; do
++	  if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then
++	    aix_use_runtimelinking=yes
++	    break
++	  fi
++	  done
++	  if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
++	    # With aix-soname=svr4, we create the lib.so.V shared archives only,
++	    # so we don't have lib.a shared libs to link our executables.
++	    # We have to force runtime linking in this case.
++	    aix_use_runtimelinking=yes
++	    LDFLAGS="$LDFLAGS -Wl,-brtl"
++	  fi
++	  ;;
++	esac
++
++	exp_sym_flag='-bexport'
++	no_entry_flag='-bnoentry'
++      fi
++
++      # When large executables or shared objects are built, AIX ld can
++      # have problems creating the table of contents.  If linking a library
++      # or program results in "error TOC overflow" add -mminimal-toc to
++      # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
++      # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
++
++      _LT_TAGVAR(archive_cmds, $1)=''
++      _LT_TAGVAR(hardcode_direct, $1)=yes
++      _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
++      _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
++      _LT_TAGVAR(link_all_deplibs, $1)=yes
++      _LT_TAGVAR(file_list_spec, $1)='$wl-f,'
++      case $with_aix_soname,$aix_use_runtimelinking in
++      aix,*) ;; # traditional, no import file
++      svr4,* | *,yes) # use import file
++	# The Import File defines what to hardcode.
++	_LT_TAGVAR(hardcode_direct, $1)=no
++	_LT_TAGVAR(hardcode_direct_absolute, $1)=no
++	;;
++      esac
++
++      if test yes = "$GCC"; then
++	case $host_os in aix4.[[012]]|aix4.[[012]].*)
++	# We only want to do this on AIX 4.2 and lower, the check
++	# below for broken collect2 doesn't work under 4.3+
++	  collect2name=`$CC -print-prog-name=collect2`
++	  if test -f "$collect2name" &&
++	   strings "$collect2name" | $GREP resolve_lib_name >/dev/null
++	  then
++	  # We have reworked collect2
++	  :
++	  else
++	  # We have old collect2
++	  _LT_TAGVAR(hardcode_direct, $1)=unsupported
++	  # It fails to find uninstalled libraries when the uninstalled
++	  # path is not listed in the libpath.  Setting hardcode_minus_L
++	  # to unsupported forces relinking
++	  _LT_TAGVAR(hardcode_minus_L, $1)=yes
++	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++	  _LT_TAGVAR(hardcode_libdir_separator, $1)=
++	  fi
++	  ;;
++	esac
++	shared_flag='-shared'
++	if test yes = "$aix_use_runtimelinking"; then
++	  shared_flag="$shared_flag "'$wl-G'
++	fi
++	# Need to ensure runtime linking is disabled for the traditional
++	# shared library, or the linker may eventually find shared libraries
++	# /with/ Import File - we do not want to mix them.
++	shared_flag_aix='-shared'
++	shared_flag_svr4='-shared $wl-G'
++      else
++	# not using gcc
++	if test ia64 = "$host_cpu"; then
++	# VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
++	# chokes on -Wl,-G. The following line is correct:
++	  shared_flag='-G'
++	else
++	  if test yes = "$aix_use_runtimelinking"; then
++	    shared_flag='$wl-G'
++	  else
++	    shared_flag='$wl-bM:SRE'
++	  fi
++	  shared_flag_aix='$wl-bM:SRE'
++	  shared_flag_svr4='$wl-G'
++	fi
++      fi
++
++      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall'
++      # It seems that -bexpall does not export symbols beginning with
++      # underscore (_), so it is better to generate a list of symbols to export.
++      _LT_TAGVAR(always_export_symbols, $1)=yes
++      if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
++	# Warning - without using the other runtime loading flags (-brtl),
++	# -berok will link without error, but may produce a broken library.
++	_LT_TAGVAR(allow_undefined_flag, $1)='-berok'
++        # Determine the default libpath from the value encoded in an
++        # empty executable.
++        _LT_SYS_MODULE_PATH_AIX([$1])
++        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
++        _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
++      else
++	if test ia64 = "$host_cpu"; then
++	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib'
++	  _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
++	  _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
++	else
++	 # Determine the default libpath from the value encoded in an
++	 # empty executable.
++	 _LT_SYS_MODULE_PATH_AIX([$1])
++	 _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
++	  # Warning - without using the other run time loading flags,
++	  # -berok will link without error, but may produce a broken library.
++	  _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok'
++	  _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok'
++	  if test yes = "$with_gnu_ld"; then
++	    # We only use this code for GNU lds that support --whole-archive.
++	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive'
++	  else
++	    # Exported symbols can be pulled into shared objects from archives
++	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
++	  fi
++	  _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
++	  _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
++	  # -brtl affects multiple linker settings, -berok does not and is overridden later
++	  compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`'
++	  if test svr4 != "$with_aix_soname"; then
++	    # This is similar to how AIX traditionally builds its shared libraries.
++	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
++	  fi
++	  if test aix != "$with_aix_soname"; then
++	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
++	  else
++	    # used by -dlpreopen to get the symbols
++	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
++	  fi
++	  _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d'
++	fi
++      fi
++      ;;
++
++    amigaos*)
++      case $host_cpu in
++      powerpc)
++            # see comment about AmigaOS4 .so support
++            _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++            _LT_TAGVAR(archive_expsym_cmds, $1)=''
++        ;;
++      m68k)
++            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
++            _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++            _LT_TAGVAR(hardcode_minus_L, $1)=yes
++        ;;
++      esac
++      ;;
++
++    bsdi[[45]]*)
++      _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic
++      ;;
++
++    cygwin* | mingw* | pw32* | cegcc*)
++      # When not using gcc, we currently assume that we are using
++      # Microsoft Visual C++ or Intel C++ Compiler.
++      # hardcode_libdir_flag_spec is actually meaningless, as there is
++      # no search path for DLLs.
++      case $cc_basename in
++      cl* | icl*)
++	# Native MSVC or ICC
++	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
++	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
++	_LT_TAGVAR(always_export_symbols, $1)=yes
++	_LT_TAGVAR(file_list_spec, $1)='@'
++	# Tell ltmain to make .lib files, not .a files.
++	libext=lib
++	# Tell ltmain to make .dll files, not .so files.
++	shrext_cmds=.dll
++	# FIXME: Setting linknames here is a bad hack.
++	_LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
++	_LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
++            cp "$export_symbols" "$output_objdir/$soname.def";
++            echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
++          else
++            $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
++          fi~
++          $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
++          linknames='
++	# The linker will not automatically build a static lib if we build a DLL.
++	# _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
++	_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
++	_LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
++	_LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols'
++	# Don't use ranlib
++	_LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
++	_LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
++          lt_tool_outputfile="@TOOL_OUTPUT@"~
++          case $lt_outputfile in
++            *.exe|*.EXE) ;;
++            *)
++              lt_outputfile=$lt_outputfile.exe
++              lt_tool_outputfile=$lt_tool_outputfile.exe
++              ;;
++          esac~
++          if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
++            $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
++            $RM "$lt_outputfile.manifest";
++          fi'
++	;;
++      *)
++	# Assume MSVC and ICC wrapper
++	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
++	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
++	# Tell ltmain to make .lib files, not .a files.
++	libext=lib
++	# Tell ltmain to make .dll files, not .so files.
++	shrext_cmds=.dll
++	# FIXME: Setting linknames here is a bad hack.
++	_LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
++	# The linker will automatically build a .lib file if we build a DLL.
++	_LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
++	# FIXME: Should let the user specify the lib program.
++	_LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs'
++	_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
++	;;
++      esac
++      ;;
++
++    darwin* | rhapsody*)
++      _LT_DARWIN_LINKER_FEATURES($1)
++      ;;
++
++    dgux*)
++      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      ;;
++
++    # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
++    # support.  Future versions do this automatically, but an explicit c++rt0.o
++    # does not break anything, and helps significantly (at the cost of a little
++    # extra space).
++    freebsd2.2*)
++      _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
++      _LT_TAGVAR(hardcode_direct, $1)=yes
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      ;;
++
++    # Unfortunately, older versions of FreeBSD 2 do not have this feature.
++    freebsd2.*)
++      _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
++      _LT_TAGVAR(hardcode_direct, $1)=yes
++      _LT_TAGVAR(hardcode_minus_L, $1)=yes
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      ;;
++
++    # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
++    freebsd* | dragonfly* | midnightbsd*)
++      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
++      _LT_TAGVAR(hardcode_direct, $1)=yes
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      ;;
++
++    hpux9*)
++      if test yes = "$GCC"; then
++	_LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++      else
++	_LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++      fi
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
++      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++      _LT_TAGVAR(hardcode_direct, $1)=yes
++
++      # hardcode_minus_L: Not really in the search PATH,
++      # but as the default location of the library.
++      _LT_TAGVAR(hardcode_minus_L, $1)=yes
++      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
++      ;;
++
++    hpux10*)
++      if test yes,no = "$GCC,$with_gnu_ld"; then
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
++      else
++	_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
++      fi
++      if test no = "$with_gnu_ld"; then
++	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
++	_LT_TAGVAR(hardcode_libdir_separator, $1)=:
++	_LT_TAGVAR(hardcode_direct, $1)=yes
++	_LT_TAGVAR(hardcode_direct_absolute, $1)=yes
++	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
++	# hardcode_minus_L: Not really in the search PATH,
++	# but as the default location of the library.
++	_LT_TAGVAR(hardcode_minus_L, $1)=yes
++      fi
++      ;;
++
++    hpux11*)
++      if test yes,no = "$GCC,$with_gnu_ld"; then
++	case $host_cpu in
++	hppa*64*)
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	ia64*)
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	*)
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	esac
++      else
++	case $host_cpu in
++	hppa*64*)
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	ia64*)
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	*)
++	m4_if($1, [], [
++	  # Older versions of the 11.00 compiler do not understand -b yet
++	  # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
++	  _LT_LINKER_OPTION([if $CC understands -b],
++	    _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b],
++	    [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'],
++	    [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])],
++	  [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'])
++	  ;;
++	esac
++      fi
++      if test no = "$with_gnu_ld"; then
++	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
++	_LT_TAGVAR(hardcode_libdir_separator, $1)=:
++
++	case $host_cpu in
++	hppa*64*|ia64*)
++	  _LT_TAGVAR(hardcode_direct, $1)=no
++	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++	  ;;
++	*)
++	  _LT_TAGVAR(hardcode_direct, $1)=yes
++	  _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
++	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
++
++	  # hardcode_minus_L: Not really in the search PATH,
++	  # but as the default location of the library.
++	  _LT_TAGVAR(hardcode_minus_L, $1)=yes
++	  ;;
++	esac
++      fi
++      ;;
++
++    irix5* | irix6* | nonstopux*)
++      if test yes = "$GCC"; then
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++	# Try to use the -exported_symbol ld option, if it does not
++	# work, assume that -exports_file does not work either and
++	# implicitly export all symbols.
++	# This should be the same for all languages, so no per-tag cache variable.
++	AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol],
++	  [lt_cv_irix_exported_symbol],
++	  [save_LDFLAGS=$LDFLAGS
++	   LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null"
++	   AC_LINK_IFELSE(
++	     [AC_LANG_SOURCE(
++	        [AC_LANG_CASE([C], [[int foo (void) { return 0; }]],
++			      [C++], [[int foo (void) { return 0; }]],
++			      [Fortran 77], [[
++      subroutine foo
++      end]],
++			      [Fortran], [[
++      subroutine foo
++      end]])])],
++	      [lt_cv_irix_exported_symbol=yes],
++	      [lt_cv_irix_exported_symbol=no])
++           LDFLAGS=$save_LDFLAGS])
++	if test yes = "$lt_cv_irix_exported_symbol"; then
++          _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib'
++	fi
++      else
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib'
++      fi
++      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++      _LT_TAGVAR(inherit_rpath, $1)=yes
++      _LT_TAGVAR(link_all_deplibs, $1)=yes
++      ;;
++
++    linux*)
++      case $cc_basename in
++      tcc*)
++	# Fabrice Bellard et al's Tiny C Compiler
++	_LT_TAGVAR(ld_shlibs, $1)=yes
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
++	;;
++      esac
++      ;;
++
++    netbsd*)
++      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
++	_LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
++      else
++	_LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags'      # ELF
++      fi
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
++      _LT_TAGVAR(hardcode_direct, $1)=yes
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      ;;
++
++    newsos6)
++      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++      _LT_TAGVAR(hardcode_direct, $1)=yes
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      ;;
++
++    *nto* | *qnx*)
++      ;;
++
++    openbsd* | bitrig*)
++      if test -f /usr/libexec/ld.so; then
++	_LT_TAGVAR(hardcode_direct, $1)=yes
++	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++	_LT_TAGVAR(hardcode_direct_absolute, $1)=yes
++	if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
++	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols'
++	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
++	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
++	else
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
++	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
++	fi
++      else
++	_LT_TAGVAR(ld_shlibs, $1)=no
++      fi
++      ;;
++
++    os2*)
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++      _LT_TAGVAR(hardcode_minus_L, $1)=yes
++      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
++      shrext_cmds=.dll
++      _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	$ECHO EXPORTS >> $output_objdir/$libname.def~
++	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
++	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	emximp -o $lib $output_objdir/$libname.def'
++      _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	$ECHO EXPORTS >> $output_objdir/$libname.def~
++	prefix_cmds="$SED"~
++	if test EXPORTS = "`$SED 1q $export_symbols`"; then
++	  prefix_cmds="$prefix_cmds -e 1d";
++	fi~
++	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
++	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
++	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	emximp -o $lib $output_objdir/$libname.def'
++      _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
++      _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
++      _LT_TAGVAR(file_list_spec, $1)='@'
++      ;;
++
++    osf3*)
++      if test yes = "$GCC"; then
++	_LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++      else
++	_LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++      fi
++      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++      ;;
++
++    osf4* | osf5*)	# as osf3* with the addition of -msym flag
++      if test yes = "$GCC"; then
++	_LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++      else
++	_LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
++          $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp'
++
++	# Both c and cxx compiler support -rpath directly
++	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
++      fi
++      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
++      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++      ;;
++
++    solaris*)
++      _LT_TAGVAR(no_undefined_flag, $1)=' -z defs'
++      if test yes = "$GCC"; then
++	wlarc='$wl'
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++          $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
++      else
++	case `$CC -V 2>&1` in
++	*"Compilers 5.0"*)
++	  wlarc=''
++	  _LT_TAGVAR(archive_cmds, $1)='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags'
++	  _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++            $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
++	  ;;
++	*)
++	  wlarc='$wl'
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags'
++	  _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++            $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
++	  ;;
++	esac
++      fi
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      case $host_os in
++      solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
++      *)
++	# The compiler driver will combine and reorder linker options,
++	# but understands '-z linker_flag'.  GCC discards it without '$wl',
++	# but is careful enough not to reorder.
++	# Supported since Solaris 2.6 (maybe 2.5.1?)
++	if test yes = "$GCC"; then
++	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
++	else
++	  _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
++	fi
++	;;
++      esac
++      _LT_TAGVAR(link_all_deplibs, $1)=yes
++      ;;
++
++    sunos4*)
++      if test sequent = "$host_vendor"; then
++	# Use $CC to link under sequent, because it throws in some extra .o
++	# files that make .init and .fini sections work.
++	_LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags'
++      else
++	_LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
++      fi
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++      _LT_TAGVAR(hardcode_direct, $1)=yes
++      _LT_TAGVAR(hardcode_minus_L, $1)=yes
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      ;;
++
++    sysv4)
++      case $host_vendor in
++	sni)
++	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++	  _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true???
++	;;
++	siemens)
++	  ## LD is ld it makes a PLAMLIB
++	  ## CC just makes a GrossModule.
++	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags'
++	  _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs'
++	  _LT_TAGVAR(hardcode_direct, $1)=no
++        ;;
++	motorola)
++	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++	  _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie
++	;;
++      esac
++      runpath_var='LD_RUN_PATH'
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      ;;
++
++    sysv4.3*)
++      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport'
++      ;;
++
++    sysv4*MP*)
++      if test -d /usr/nec; then
++	_LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++	runpath_var=LD_RUN_PATH
++	hardcode_runpath_var=yes
++	_LT_TAGVAR(ld_shlibs, $1)=yes
++      fi
++      ;;
++
++    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
++      _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
++      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      runpath_var='LD_RUN_PATH'
++
++      if test yes = "$GCC"; then
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++      else
++	_LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++      fi
++      ;;
++
++    sysv5* | sco3.2v5* | sco5v6*)
++      # Note: We CANNOT use -z defs as we might desire, because we do not
++      # link with -lc, and that would cause any symbols used from libc to
++      # always be unresolved, which means just about no library would
++      # ever link correctly.  If we're not using GNU ld we use -z text
++      # though, which does catch some bad symbols but isn't as heavy-handed
++      # as -z defs.
++      _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
++      _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs'
++      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir'
++      _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
++      _LT_TAGVAR(link_all_deplibs, $1)=yes
++      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport'
++      runpath_var='LD_RUN_PATH'
++
++      if test yes = "$GCC"; then
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++      else
++	_LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++      fi
++      ;;
++
++    uts4*)
++      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      ;;
++
++    *)
++      _LT_TAGVAR(ld_shlibs, $1)=no
++      ;;
++    esac
++
++    if test sni = "$host_vendor"; then
++      case $host in
++      sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
++	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Blargedynsym'
++	;;
++      esac
++    fi
++  fi
++])
++AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
++test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no
++
++_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld
++
++_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl
++_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl
++_LT_DECL([], [extract_expsyms_cmds], [2],
++    [The commands to extract the exported symbol list from a shared archive])
++
++#
++# Do we need to explicitly link libc?
++#
++case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in
++x|xyes)
++  # Assume -lc should be added
++  _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
++
++  if test yes,yes = "$GCC,$enable_shared"; then
++    case $_LT_TAGVAR(archive_cmds, $1) in
++    *'~'*)
++      # FIXME: we may have to deal with multi-command sequences.
++      ;;
++    '$CC '*)
++      # Test whether the compiler implicitly links with -lc since on some
++      # systems, -lgcc has to come before -lc. If gcc already passes -lc
++      # to ld, don't add -lc before -lgcc.
++      AC_CACHE_CHECK([whether -lc should be explicitly linked in],
++	[lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1),
++	[$RM conftest*
++	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++
++	if AC_TRY_EVAL(ac_compile) 2>conftest.err; then
++	  soname=conftest
++	  lib=conftest
++	  libobjs=conftest.$ac_objext
++	  deplibs=
++	  wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1)
++	  pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1)
++	  compiler_flags=-v
++	  linker_flags=-v
++	  verstring=
++	  output_objdir=.
++	  libname=conftest
++	  lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1)
++	  _LT_TAGVAR(allow_undefined_flag, $1)=
++	  if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1)
++	  then
++	    lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no
++	  else
++	    lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes
++	  fi
++	  _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag
++	else
++	  cat conftest.err 1>&5
++	fi
++	$RM conftest*
++	])
++      _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)
++      ;;
++    esac
++  fi
++  ;;
++esac
++
++_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0],
++    [Whether or not to add -lc for building shared libraries])
++_LT_TAGDECL([allow_libtool_libs_with_static_runtimes],
++    [enable_shared_with_static_runtimes], [0],
++    [Whether or not to disallow shared libs when runtime libs are static])
++_LT_TAGDECL([], [export_dynamic_flag_spec], [1],
++    [Compiler flag to allow reflexive dlopens])
++_LT_TAGDECL([], [whole_archive_flag_spec], [1],
++    [Compiler flag to generate shared objects directly from archives])
++_LT_TAGDECL([], [compiler_needs_object], [1],
++    [Whether the compiler copes with passing no objects directly])
++_LT_TAGDECL([], [old_archive_from_new_cmds], [2],
++    [Create an old-style archive from a shared archive])
++_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2],
++    [Create a temporary old-style archive to link instead of a shared archive])
++_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive])
++_LT_TAGDECL([], [archive_expsym_cmds], [2])
++_LT_TAGDECL([], [module_cmds], [2],
++    [Commands used to build a loadable module if different from building
++    a shared archive.])
++_LT_TAGDECL([], [module_expsym_cmds], [2])
++_LT_TAGDECL([], [with_gnu_ld], [1],
++    [Whether we are building with GNU ld or not])
++_LT_TAGDECL([], [allow_undefined_flag], [1],
++    [Flag that allows shared libraries with undefined symbols to be built])
++_LT_TAGDECL([], [no_undefined_flag], [1],
++    [Flag that enforces no undefined symbols])
++_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1],
++    [Flag to hardcode $libdir into a binary during linking.
++    This must work even if $libdir does not exist])
++_LT_TAGDECL([], [hardcode_libdir_separator], [1],
++    [Whether we need a single "-rpath" flag with a separated argument])
++_LT_TAGDECL([], [hardcode_direct], [0],
++    [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes
++    DIR into the resulting binary])
++_LT_TAGDECL([], [hardcode_direct_absolute], [0],
++    [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes
++    DIR into the resulting binary and the resulting library dependency is
++    "absolute", i.e impossible to change by setting $shlibpath_var if the
++    library is relocated])
++_LT_TAGDECL([], [hardcode_minus_L], [0],
++    [Set to "yes" if using the -LDIR flag during linking hardcodes DIR
++    into the resulting binary])
++_LT_TAGDECL([], [hardcode_shlibpath_var], [0],
++    [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
++    into the resulting binary])
++_LT_TAGDECL([], [hardcode_automatic], [0],
++    [Set to "yes" if building a shared library automatically hardcodes DIR
++    into the library and all subsequent libraries and executables linked
++    against it])
++_LT_TAGDECL([], [inherit_rpath], [0],
++    [Set to yes if linker adds runtime paths of dependent libraries
++    to runtime path list])
++_LT_TAGDECL([], [link_all_deplibs], [0],
++    [Whether libtool must link a program against all its dependency libraries])
++_LT_TAGDECL([], [always_export_symbols], [0],
++    [Set to "yes" if exported symbols are required])
++_LT_TAGDECL([], [export_symbols_cmds], [2],
++    [The commands to list exported symbols])
++_LT_TAGDECL([], [exclude_expsyms], [1],
++    [Symbols that should not be listed in the preloaded symbols])
++_LT_TAGDECL([], [include_expsyms], [1],
++    [Symbols that must always be exported])
++_LT_TAGDECL([], [prelink_cmds], [2],
++    [Commands necessary for linking programs (against libraries) with templates])
++_LT_TAGDECL([], [postlink_cmds], [2],
++    [Commands necessary for finishing linking programs])
++_LT_TAGDECL([], [file_list_spec], [1],
++    [Specify filename containing input files])
++dnl FIXME: Not yet implemented
++dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1],
++dnl    [Compiler flag to generate thread safe objects])
++])# _LT_LINKER_SHLIBS
++
++
++# _LT_LANG_C_CONFIG([TAG])
++# ------------------------
++# Ensure that the configuration variables for a C compiler are suitably
++# defined.  These variables are subsequently used by _LT_CONFIG to write
++# the compiler configuration to 'libtool'.
++m4_defun([_LT_LANG_C_CONFIG],
++[m4_require([_LT_DECL_EGREP])dnl
++lt_save_CC=$CC
++AC_LANG_PUSH(C)
++
++# Source file extension for C test sources.
++ac_ext=c
++
++# Object file extension for compiled C test sources.
++objext=o
++_LT_TAGVAR(objext, $1)=$objext
++
++# Code to be used in simple compile tests
++lt_simple_compile_test_code="int some_variable = 0;"
++
++# Code to be used in simple link tests
++lt_simple_link_test_code='int main(){return(0);}'
++
++_LT_TAG_COMPILER
++# Save the default compiler, since it gets overwritten when the other
++# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP.
++compiler_DEFAULT=$CC
++
++# save warnings/boilerplate of simple test code
++_LT_COMPILER_BOILERPLATE
++_LT_LINKER_BOILERPLATE
++
++if test -n "$compiler"; then
++  _LT_COMPILER_NO_RTTI($1)
++  _LT_COMPILER_PIC($1)
++  _LT_COMPILER_C_O($1)
++  _LT_COMPILER_FILE_LOCKS($1)
++  _LT_LINKER_SHLIBS($1)
++  _LT_SYS_DYNAMIC_LINKER($1)
++  _LT_LINKER_HARDCODE_LIBPATH($1)
++  LT_SYS_DLOPEN_SELF
++  _LT_CMD_STRIPLIB
++
++  # Report what library types will actually be built
++  AC_MSG_CHECKING([if libtool supports shared libraries])
++  AC_MSG_RESULT([$can_build_shared])
++
++  AC_MSG_CHECKING([whether to build shared libraries])
++  test no = "$can_build_shared" && enable_shared=no
++
++  # On AIX, shared libraries and static libraries use the same namespace, and
++  # are all built from PIC.
++  case $host_os in
++  aix3*)
++    test yes = "$enable_shared" && enable_static=no
++    if test -n "$RANLIB"; then
++      archive_cmds="$archive_cmds~\$RANLIB \$lib"
++      postinstall_cmds='$RANLIB $lib'
++    fi
++    ;;
++
++  aix[[4-9]]*)
++    if test ia64 != "$host_cpu"; then
++      case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
++      yes,aix,yes) ;;			# shared object as lib.so file only
++      yes,svr4,*) ;;			# shared object as lib.so archive member only
++      yes,*) enable_static=no ;;	# shared object in lib.a archive as well
++      esac
++    fi
++    ;;
++  esac
++  AC_MSG_RESULT([$enable_shared])
++
++  AC_MSG_CHECKING([whether to build static libraries])
++  # Make sure either enable_shared or enable_static is yes.
++  test yes = "$enable_shared" || enable_static=yes
++  AC_MSG_RESULT([$enable_static])
++
++  _LT_CONFIG($1)
++fi
++AC_LANG_POP
++CC=$lt_save_CC
++])# _LT_LANG_C_CONFIG
++
++
++# _LT_LANG_CXX_CONFIG([TAG])
++# --------------------------
++# Ensure that the configuration variables for a C++ compiler are suitably
++# defined.  These variables are subsequently used by _LT_CONFIG to write
++# the compiler configuration to 'libtool'.
++m4_defun([_LT_LANG_CXX_CONFIG],
++[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
++m4_require([_LT_DECL_EGREP])dnl
++m4_require([_LT_PATH_MANIFEST_TOOL])dnl
++if test -n "$CXX" && ( test no != "$CXX" &&
++    ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) ||
++    (test g++ != "$CXX"))); then
++  AC_PROG_CXXCPP
++else
++  _lt_caught_CXX_error=yes
++fi
++
++AC_LANG_PUSH(C++)
++_LT_TAGVAR(archive_cmds_need_lc, $1)=no
++_LT_TAGVAR(allow_undefined_flag, $1)=
++_LT_TAGVAR(always_export_symbols, $1)=no
++_LT_TAGVAR(archive_expsym_cmds, $1)=
++_LT_TAGVAR(compiler_needs_object, $1)=no
++_LT_TAGVAR(export_dynamic_flag_spec, $1)=
++_LT_TAGVAR(hardcode_direct, $1)=no
++_LT_TAGVAR(hardcode_direct_absolute, $1)=no
++_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
++_LT_TAGVAR(hardcode_libdir_separator, $1)=
++_LT_TAGVAR(hardcode_minus_L, $1)=no
++_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
++_LT_TAGVAR(hardcode_automatic, $1)=no
++_LT_TAGVAR(inherit_rpath, $1)=no
++_LT_TAGVAR(module_cmds, $1)=
++_LT_TAGVAR(module_expsym_cmds, $1)=
++_LT_TAGVAR(link_all_deplibs, $1)=unknown
++_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
++_LT_TAGVAR(reload_flag, $1)=$reload_flag
++_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
++_LT_TAGVAR(no_undefined_flag, $1)=
++_LT_TAGVAR(whole_archive_flag_spec, $1)=
++_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
++
++# Source file extension for C++ test sources.
++ac_ext=cpp
++
++# Object file extension for compiled C++ test sources.
++objext=o
++_LT_TAGVAR(objext, $1)=$objext
++
++# No sense in running all these tests if we already determined that
++# the CXX compiler isn't working.  Some variables (like enable_shared)
++# are currently assumed to apply to all compilers on this platform,
++# and will be corrupted by setting them based on a non-working compiler.
++if test yes != "$_lt_caught_CXX_error"; then
++  # Code to be used in simple compile tests
++  lt_simple_compile_test_code="int some_variable = 0;"
++
++  # Code to be used in simple link tests
++  lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }'
++
++  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
++  _LT_TAG_COMPILER
++
++  # save warnings/boilerplate of simple test code
++  _LT_COMPILER_BOILERPLATE
++  _LT_LINKER_BOILERPLATE
++
++  # Allow CC to be a program name with arguments.
++  lt_save_CC=$CC
++  lt_save_CFLAGS=$CFLAGS
++  lt_save_LD=$LD
++  lt_save_GCC=$GCC
++  GCC=$GXX
++  lt_save_with_gnu_ld=$with_gnu_ld
++  lt_save_path_LD=$lt_cv_path_LD
++  if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then
++    lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx
++  else
++    $as_unset lt_cv_prog_gnu_ld
++  fi
++  if test -n "${lt_cv_path_LDCXX+set}"; then
++    lt_cv_path_LD=$lt_cv_path_LDCXX
++  else
++    $as_unset lt_cv_path_LD
++  fi
++  test -z "${LDCXX+set}" || LD=$LDCXX
++  CC=${CXX-"c++"}
++  CFLAGS=$CXXFLAGS
++  compiler=$CC
++  _LT_TAGVAR(compiler, $1)=$CC
++  _LT_CC_BASENAME([$compiler])
++
++  if test -n "$compiler"; then
++    # We don't want -fno-exception when compiling C++ code, so set the
++    # no_builtin_flag separately
++    if test yes = "$GXX"; then
++      _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin'
++    else
++      _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
++    fi
++
++    if test yes = "$GXX"; then
++      # Set up default GNU C++ configuration
++
++      LT_PATH_LD
++
++      # Check if GNU C++ uses GNU ld as the underlying linker, since the
++      # archiving commands below assume that GNU ld is being used.
++      if test yes = "$with_gnu_ld"; then
++        _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
++        _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++
++        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++        _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
++
++        # If archive_cmds runs LD, not CC, wlarc should be empty
++        # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
++        #     investigate it a little bit more. (MM)
++        wlarc='$wl'
++
++        # ancient GNU ld didn't support --whole-archive et. al.
++        if eval "`$CC -print-prog-name=ld` --help 2>&1" |
++	  $GREP 'no-whole-archive' > /dev/null; then
++          _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
++        else
++          _LT_TAGVAR(whole_archive_flag_spec, $1)=
++        fi
++      else
++        with_gnu_ld=no
++        wlarc=
++
++        # A generic and very simple default shared library creation
++        # command for GNU C++ for the case where it uses the native
++        # linker, instead of GNU ld.  If possible, this setting should
++        # overridden to take advantage of the native linker features on
++        # the platform it is being used on.
++        _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
++      fi
++
++      # Commands to make compiler produce verbose output that lists
++      # what "hidden" libraries, object files and flags are used when
++      # linking a shared library.
++      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
++
++    else
++      GXX=no
++      with_gnu_ld=no
++      wlarc=
++    fi
++
++    # PORTME: fill in a description of your system's C++ link characteristics
++    AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
++    _LT_TAGVAR(ld_shlibs, $1)=yes
++    case $host_os in
++      aix3*)
++        # FIXME: insert proper C++ library support
++        _LT_TAGVAR(ld_shlibs, $1)=no
++        ;;
++      aix[[4-9]]*)
++        if test ia64 = "$host_cpu"; then
++          # On IA64, the linker does run time linking by default, so we don't
++          # have to do anything special.
++          aix_use_runtimelinking=no
++          exp_sym_flag='-Bexport'
++          no_entry_flag=
++        else
++          aix_use_runtimelinking=no
++
++          # Test if we are trying to use run time linking or normal
++          # AIX style linking. If -brtl is somewhere in LDFLAGS, we
++          # have runtime linking enabled, and use it for executables.
++          # For shared libraries, we enable/disable runtime linking
++          # depending on the kind of the shared library created -
++          # when "with_aix_soname,aix_use_runtimelinking" is:
++          # "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
++          # "aix,yes"  lib.so          shared, rtl:yes, for executables
++          #            lib.a           static archive
++          # "both,no"  lib.so.V(shr.o) shared, rtl:yes
++          #            lib.a(lib.so.V) shared, rtl:no,  for executables
++          # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
++          #            lib.a(lib.so.V) shared, rtl:no
++          # "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
++          #            lib.a           static archive
++          case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
++	    for ld_flag in $LDFLAGS; do
++	      case $ld_flag in
++	      *-brtl*)
++	        aix_use_runtimelinking=yes
++	        break
++	        ;;
++	      esac
++	    done
++	    if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
++	      # With aix-soname=svr4, we create the lib.so.V shared archives only,
++	      # so we don't have lib.a shared libs to link our executables.
++	      # We have to force runtime linking in this case.
++	      aix_use_runtimelinking=yes
++	      LDFLAGS="$LDFLAGS -Wl,-brtl"
++	    fi
++	    ;;
++          esac
++
++          exp_sym_flag='-bexport'
++          no_entry_flag='-bnoentry'
++        fi
++
++        # When large executables or shared objects are built, AIX ld can
++        # have problems creating the table of contents.  If linking a library
++        # or program results in "error TOC overflow" add -mminimal-toc to
++        # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
++        # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
++
++        _LT_TAGVAR(archive_cmds, $1)=''
++        _LT_TAGVAR(hardcode_direct, $1)=yes
++        _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
++        _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
++        _LT_TAGVAR(link_all_deplibs, $1)=yes
++        _LT_TAGVAR(file_list_spec, $1)='$wl-f,'
++        case $with_aix_soname,$aix_use_runtimelinking in
++        aix,*) ;;	# no import file
++        svr4,* | *,yes) # use import file
++          # The Import File defines what to hardcode.
++          _LT_TAGVAR(hardcode_direct, $1)=no
++          _LT_TAGVAR(hardcode_direct_absolute, $1)=no
++          ;;
++        esac
++
++        if test yes = "$GXX"; then
++          case $host_os in aix4.[[012]]|aix4.[[012]].*)
++          # We only want to do this on AIX 4.2 and lower, the check
++          # below for broken collect2 doesn't work under 4.3+
++	  collect2name=`$CC -print-prog-name=collect2`
++	  if test -f "$collect2name" &&
++	     strings "$collect2name" | $GREP resolve_lib_name >/dev/null
++	  then
++	    # We have reworked collect2
++	    :
++	  else
++	    # We have old collect2
++	    _LT_TAGVAR(hardcode_direct, $1)=unsupported
++	    # It fails to find uninstalled libraries when the uninstalled
++	    # path is not listed in the libpath.  Setting hardcode_minus_L
++	    # to unsupported forces relinking
++	    _LT_TAGVAR(hardcode_minus_L, $1)=yes
++	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++	    _LT_TAGVAR(hardcode_libdir_separator, $1)=
++	  fi
++          esac
++          shared_flag='-shared'
++	  if test yes = "$aix_use_runtimelinking"; then
++	    shared_flag=$shared_flag' $wl-G'
++	  fi
++	  # Need to ensure runtime linking is disabled for the traditional
++	  # shared library, or the linker may eventually find shared libraries
++	  # /with/ Import File - we do not want to mix them.
++	  shared_flag_aix='-shared'
++	  shared_flag_svr4='-shared $wl-G'
++        else
++          # not using gcc
++          if test ia64 = "$host_cpu"; then
++	  # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
++	  # chokes on -Wl,-G. The following line is correct:
++	  shared_flag='-G'
++          else
++	    if test yes = "$aix_use_runtimelinking"; then
++	      shared_flag='$wl-G'
++	    else
++	      shared_flag='$wl-bM:SRE'
++	    fi
++	    shared_flag_aix='$wl-bM:SRE'
++	    shared_flag_svr4='$wl-G'
++          fi
++        fi
++
++        _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall'
++        # It seems that -bexpall does not export symbols beginning with
++        # underscore (_), so it is better to generate a list of symbols to
++	# export.
++        _LT_TAGVAR(always_export_symbols, $1)=yes
++	if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
++          # Warning - without using the other runtime loading flags (-brtl),
++          # -berok will link without error, but may produce a broken library.
++          # The "-G" linker flag allows undefined symbols.
++          _LT_TAGVAR(no_undefined_flag, $1)='-bernotok'
++          # Determine the default libpath from the value encoded in an empty
++          # executable.
++          _LT_SYS_MODULE_PATH_AIX([$1])
++          _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
++
++          _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
++        else
++          if test ia64 = "$host_cpu"; then
++	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib'
++	    _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
++	    _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
++          else
++	    # Determine the default libpath from the value encoded in an
++	    # empty executable.
++	    _LT_SYS_MODULE_PATH_AIX([$1])
++	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
++	    # Warning - without using the other run time loading flags,
++	    # -berok will link without error, but may produce a broken library.
++	    _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok'
++	    _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok'
++	    if test yes = "$with_gnu_ld"; then
++	      # We only use this code for GNU lds that support --whole-archive.
++	      _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive'
++	    else
++	      # Exported symbols can be pulled into shared objects from archives
++	      _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
++	    fi
++	    _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
++	    _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
++	    # -brtl affects multiple linker settings, -berok does not and is overridden later
++	    compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`'
++	    if test svr4 != "$with_aix_soname"; then
++	      # This is similar to how AIX traditionally builds its shared
++	      # libraries. Need -bnortl late, we may have -brtl in LDFLAGS.
++	      _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
++	    fi
++	    if test aix != "$with_aix_soname"; then
++	      _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
++	    else
++	      # used by -dlpreopen to get the symbols
++	      _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
++	    fi
++	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d'
++          fi
++        fi
++        ;;
++
++      beos*)
++	if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
++	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
++	  # Joseph Beckenbach  says some releases of gcc
++	  # support --undefined.  This deserves some investigation.  FIXME
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	else
++	  _LT_TAGVAR(ld_shlibs, $1)=no
++	fi
++	;;
++
++      chorus*)
++        case $cc_basename in
++          *)
++	  # FIXME: insert proper C++ library support
++	  _LT_TAGVAR(ld_shlibs, $1)=no
++	  ;;
++        esac
++        ;;
++
++      cygwin* | mingw* | pw32* | cegcc*)
++	case $GXX,$cc_basename in
++	,cl* | no,cl* | ,icl* | no,icl*)
++	  # Native MSVC or ICC
++	  # hardcode_libdir_flag_spec is actually meaningless, as there is
++	  # no search path for DLLs.
++	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
++	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
++	  _LT_TAGVAR(always_export_symbols, $1)=yes
++	  _LT_TAGVAR(file_list_spec, $1)='@'
++	  # Tell ltmain to make .lib files, not .a files.
++	  libext=lib
++	  # Tell ltmain to make .dll files, not .so files.
++	  shrext_cmds=.dll
++	  # FIXME: Setting linknames here is a bad hack.
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
++	  _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
++              cp "$export_symbols" "$output_objdir/$soname.def";
++              echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
++            else
++              $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
++            fi~
++            $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
++            linknames='
++	  # The linker will not automatically build a static lib if we build a DLL.
++	  # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
++	  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
++	  # Don't use ranlib
++	  _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
++	  _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
++            lt_tool_outputfile="@TOOL_OUTPUT@"~
++            case $lt_outputfile in
++              *.exe|*.EXE) ;;
++              *)
++                lt_outputfile=$lt_outputfile.exe
++                lt_tool_outputfile=$lt_tool_outputfile.exe
++                ;;
++            esac~
++            func_to_tool_file "$lt_outputfile"~
++            if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
++              $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
++              $RM "$lt_outputfile.manifest";
++            fi'
++	  ;;
++	*)
++	  # g++
++	  # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
++	  # as there is no search path for DLLs.
++	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols'
++	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
++	  _LT_TAGVAR(always_export_symbols, $1)=no
++	  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
++
++	  if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
++	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++	    # If the export-symbols file already is a .def file, use it as
++	    # is; otherwise, prepend EXPORTS...
++	    _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
++              cp $export_symbols $output_objdir/$soname.def;
++            else
++              echo EXPORTS > $output_objdir/$soname.def;
++              cat $export_symbols >> $output_objdir/$soname.def;
++            fi~
++            $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++	  else
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	  fi
++	  ;;
++	esac
++	;;
++      darwin* | rhapsody*)
++        _LT_DARWIN_LINKER_FEATURES($1)
++	;;
++
++      os2*)
++	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
++	_LT_TAGVAR(hardcode_minus_L, $1)=yes
++	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
++	shrext_cmds=.dll
++	_LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	  $ECHO EXPORTS >> $output_objdir/$libname.def~
++	  emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
++	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	  emximp -o $lib $output_objdir/$libname.def'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	  $ECHO EXPORTS >> $output_objdir/$libname.def~
++	  prefix_cmds="$SED"~
++	  if test EXPORTS = "`$SED 1q $export_symbols`"; then
++	    prefix_cmds="$prefix_cmds -e 1d";
++	  fi~
++	  prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
++	  cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
++	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	  emximp -o $lib $output_objdir/$libname.def'
++	_LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
++	_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
++	_LT_TAGVAR(file_list_spec, $1)='@'
++	;;
++
++      dgux*)
++        case $cc_basename in
++          ec++*)
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++          ghcx*)
++	    # Green Hills C++ Compiler
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++          *)
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++        esac
++        ;;
++
++      freebsd2.*)
++        # C++ shared libraries reported to be fairly broken before
++	# switch to ELF
++        _LT_TAGVAR(ld_shlibs, $1)=no
++        ;;
++
++      freebsd-elf*)
++        _LT_TAGVAR(archive_cmds_need_lc, $1)=no
++        ;;
++
++      freebsd* | dragonfly* | midnightbsd*)
++        # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
++        # conventions
++        _LT_TAGVAR(ld_shlibs, $1)=yes
++        ;;
++
++      haiku*)
++        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++        _LT_TAGVAR(link_all_deplibs, $1)=yes
++        ;;
++
++      hpux9*)
++        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
++        _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++        _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
++        _LT_TAGVAR(hardcode_direct, $1)=yes
++        _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
++				             # but as the default
++				             # location of the library.
++
++        case $cc_basename in
++          CC*)
++            # FIXME: insert proper C++ library support
++            _LT_TAGVAR(ld_shlibs, $1)=no
++            ;;
++          aCC*)
++            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++            # Commands to make compiler produce verbose output that lists
++            # what "hidden" libraries, object files and flags are used when
++            # linking a shared library.
++            #
++            # There doesn't appear to be a way to prevent this compiler from
++            # explicitly linking system object files so we need to strip them
++            # from the output so that they don't get included in the library
++            # dependencies.
++            output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++            ;;
++          *)
++            if test yes = "$GXX"; then
++              _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++            else
++              # FIXME: insert proper C++ library support
++              _LT_TAGVAR(ld_shlibs, $1)=no
++            fi
++            ;;
++        esac
++        ;;
++
++      hpux10*|hpux11*)
++        if test no = "$with_gnu_ld"; then
++	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
++	  _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++
++          case $host_cpu in
++            hppa*64*|ia64*)
++              ;;
++            *)
++	      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
++              ;;
++          esac
++        fi
++        case $host_cpu in
++          hppa*64*|ia64*)
++            _LT_TAGVAR(hardcode_direct, $1)=no
++            _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++            ;;
++          *)
++            _LT_TAGVAR(hardcode_direct, $1)=yes
++            _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
++            _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
++					         # but as the default
++					         # location of the library.
++            ;;
++        esac
++
++        case $cc_basename in
++          CC*)
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++          aCC*)
++	    case $host_cpu in
++	      hppa*64*)
++	        _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	        ;;
++	      ia64*)
++	        _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	        ;;
++	      *)
++	        _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	        ;;
++	    esac
++	    # Commands to make compiler produce verbose output that lists
++	    # what "hidden" libraries, object files and flags are used when
++	    # linking a shared library.
++	    #
++	    # There doesn't appear to be a way to prevent this compiler from
++	    # explicitly linking system object files so we need to strip them
++	    # from the output so that they don't get included in the library
++	    # dependencies.
++	    output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++	    ;;
++          *)
++	    if test yes = "$GXX"; then
++	      if test no = "$with_gnu_ld"; then
++	        case $host_cpu in
++	          hppa*64*)
++	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	            ;;
++	          ia64*)
++	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	            ;;
++	          *)
++	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	            ;;
++	        esac
++	      fi
++	    else
++	      # FIXME: insert proper C++ library support
++	      _LT_TAGVAR(ld_shlibs, $1)=no
++	    fi
++	    ;;
++        esac
++        ;;
++
++      interix[[3-9]]*)
++	_LT_TAGVAR(hardcode_direct, $1)=no
++	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
++	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
++	# Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
++	# Instead, shared libraries are loaded at an image base (0x10000000 by
++	# default) and relocated if they conflict, which is a slow very memory
++	# consuming and fragmenting process.  To avoid this, we pick a random,
++	# 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
++	# time.  Moving up from 0x10000000 also allows more sbrk(2) space.
++	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++	_LT_TAGVAR(archive_expsym_cmds, $1)='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++	;;
++      irix5* | irix6*)
++        case $cc_basename in
++          CC*)
++	    # SGI C++
++	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++
++	    # Archives containing C++ object files must be created using
++	    # "CC -ar", where "CC" is the IRIX C++ compiler.  This is
++	    # necessary to make sure instantiated templates are included
++	    # in the archive.
++	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs'
++	    ;;
++          *)
++	    if test yes = "$GXX"; then
++	      if test no = "$with_gnu_ld"; then
++	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++	      else
++	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib'
++	      fi
++	    fi
++	    _LT_TAGVAR(link_all_deplibs, $1)=yes
++	    ;;
++        esac
++        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++        _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++        _LT_TAGVAR(inherit_rpath, $1)=yes
++        ;;
++
++      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++        case $cc_basename in
++          KCC*)
++	    # Kuck and Associates, Inc. (KAI) C++ Compiler
++
++	    # KCC will only create a shared library if the output file
++	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
++	    # to its proper name (with version) after linking.
++	    _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
++	    _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib'
++	    # Commands to make compiler produce verbose output that lists
++	    # what "hidden" libraries, object files and flags are used when
++	    # linking a shared library.
++	    #
++	    # There doesn't appear to be a way to prevent this compiler from
++	    # explicitly linking system object files so we need to strip them
++	    # from the output so that they don't get included in the library
++	    # dependencies.
++	    output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++
++	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
++	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
++
++	    # Archives containing C++ object files must be created using
++	    # "CC -Bstatic", where "CC" is the KAI C++ compiler.
++	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs'
++	    ;;
++	  icpc* | ecpc* )
++	    # Intel C++
++	    with_gnu_ld=yes
++	    # version 8.0 and above of icpc choke on multiply defined symbols
++	    # if we add $predep_objects and $postdep_objects, however 7.1 and
++	    # earlier do not add the objects themselves.
++	    case `$CC -V 2>&1` in
++	      *"Version 7."*)
++	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
++		_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++		;;
++	      *)  # Version 8.0 or newer
++	        tmp_idyn=
++	        case $host_cpu in
++		  ia64*) tmp_idyn=' -i_dynamic';;
++		esac
++	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++		_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++		;;
++	    esac
++	    _LT_TAGVAR(archive_cmds_need_lc, $1)=no
++	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
++	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
++	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive'
++	    ;;
++          pgCC* | pgcpp*)
++            # Portland Group C++ compiler
++	    case `$CC -V` in
++	    *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*)
++	      _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~
++               rm -rf $tpldir~
++               $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
++               compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
++	      _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~
++                rm -rf $tpldir~
++                $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
++                $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
++                $RANLIB $oldlib'
++	      _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~
++                rm -rf $tpldir~
++                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
++                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
++	      _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~
++                rm -rf $tpldir~
++                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
++                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	      ;;
++	    *) # Version 6 and above use weak symbols
++	      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
++	      _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	      ;;
++	    esac
++
++	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl--rpath $wl$libdir'
++	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
++	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++            ;;
++	  cxx*)
++	    # Compaq C++
++	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
++	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname  -o $lib $wl-retain-symbols-file $wl$export_symbols'
++
++	    runpath_var=LD_RUN_PATH
++	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
++	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++
++	    # Commands to make compiler produce verbose output that lists
++	    # what "hidden" libraries, object files and flags are used when
++	    # linking a shared library.
++	    #
++	    # There doesn't appear to be a way to prevent this compiler from
++	    # explicitly linking system object files so we need to strip them
++	    # from the output so that they don't get included in the library
++	    # dependencies.
++	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
++	    ;;
++	  xl* | mpixl* | bgxl*)
++	    # IBM XL 8.0 on PPC, with GNU ld
++	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
++	    _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	    if test yes = "$supports_anon_versioning"; then
++	      _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
++                cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
++                echo "local: *; };" >> $output_objdir/$libname.ver~
++                $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
++	    fi
++	    ;;
++	  *)
++	    case `$CC -V 2>&1 | $SED 5q` in
++	    *Sun\ C*)
++	      # Sun C++ 5.9
++	      _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
++	      _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	      _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols'
++	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
++	      _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	      _LT_TAGVAR(compiler_needs_object, $1)=yes
++
++	      # Not sure whether something based on
++	      # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1
++	      # would be better.
++	      output_verbose_link_cmd='func_echo_all'
++
++	      # Archives containing C++ object files must be created using
++	      # "CC -xar", where "CC" is the Sun C++ compiler.  This is
++	      # necessary to make sure instantiated templates are included
++	      # in the archive.
++	      _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
++	      ;;
++	    esac
++	    ;;
++	esac
++	;;
++
++      lynxos*)
++        # FIXME: insert proper C++ library support
++	_LT_TAGVAR(ld_shlibs, $1)=no
++	;;
++
++      m88k*)
++        # FIXME: insert proper C++ library support
++        _LT_TAGVAR(ld_shlibs, $1)=no
++	;;
++
++      mvs*)
++        case $cc_basename in
++          cxx*)
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++	  *)
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++	esac
++	;;
++
++      netbsd*)
++        if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
++	  _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable  -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
++	  wlarc=
++	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
++	  _LT_TAGVAR(hardcode_direct, $1)=yes
++	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++	fi
++	# Workaround some broken pre-1.5 toolchains
++	output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"'
++	;;
++
++      *nto* | *qnx*)
++        _LT_TAGVAR(ld_shlibs, $1)=yes
++	;;
++
++      openbsd* | bitrig*)
++	if test -f /usr/libexec/ld.so; then
++	  _LT_TAGVAR(hardcode_direct, $1)=yes
++	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++	  _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
++	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
++	  if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then
++	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib'
++	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
++	    _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
++	  fi
++	  output_verbose_link_cmd=func_echo_all
++	else
++	  _LT_TAGVAR(ld_shlibs, $1)=no
++	fi
++	;;
++
++      osf3* | osf4* | osf5*)
++        case $cc_basename in
++          KCC*)
++	    # Kuck and Associates, Inc. (KAI) C++ Compiler
++
++	    # KCC will only create a shared library if the output file
++	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
++	    # to its proper name (with version) after linking.
++	    _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
++
++	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
++	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++
++	    # Archives containing C++ object files must be created using
++	    # the KAI C++ compiler.
++	    case $host in
++	      osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;;
++	      *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;;
++	    esac
++	    ;;
++          RCC*)
++	    # Rational C++ 2.4.1
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++          cxx*)
++	    case $host in
++	      osf3*)
++	        _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
++	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++		;;
++	      *)
++	        _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
++	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	        _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
++                  echo "-hidden">> $lib.exp~
++                  $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp  `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~
++                  $RM $lib.exp'
++	        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
++		;;
++	    esac
++
++	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++
++	    # Commands to make compiler produce verbose output that lists
++	    # what "hidden" libraries, object files and flags are used when
++	    # linking a shared library.
++	    #
++	    # There doesn't appear to be a way to prevent this compiler from
++	    # explicitly linking system object files so we need to strip them
++	    # from the output so that they don't get included in the library
++	    # dependencies.
++	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++	    ;;
++	  *)
++	    if test yes,no = "$GXX,$with_gnu_ld"; then
++	      _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
++	      case $host in
++	        osf3*)
++	          _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++		  ;;
++	        *)
++	          _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++		  ;;
++	      esac
++
++	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
++	      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
++
++	      # Commands to make compiler produce verbose output that lists
++	      # what "hidden" libraries, object files and flags are used when
++	      # linking a shared library.
++	      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
++
++	    else
++	      # FIXME: insert proper C++ library support
++	      _LT_TAGVAR(ld_shlibs, $1)=no
++	    fi
++	    ;;
++        esac
++        ;;
++
++      psos*)
++        # FIXME: insert proper C++ library support
++        _LT_TAGVAR(ld_shlibs, $1)=no
++        ;;
++
++      sunos4*)
++        case $cc_basename in
++          CC*)
++	    # Sun C++ 4.x
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++          lcc*)
++	    # Lucid
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++          *)
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++        esac
++        ;;
++
++      solaris*)
++        case $cc_basename in
++          CC* | sunCC*)
++	    # Sun C++ 4.2, 5.x and Centerline C++
++            _LT_TAGVAR(archive_cmds_need_lc,$1)=yes
++	    _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
++	    _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	    _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++              $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
++
++	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
++	    _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++	    case $host_os in
++	      solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
++	      *)
++		# The compiler driver will combine and reorder linker options,
++		# but understands '-z linker_flag'.
++	        # Supported since Solaris 2.6 (maybe 2.5.1?)
++		_LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
++	        ;;
++	    esac
++	    _LT_TAGVAR(link_all_deplibs, $1)=yes
++
++	    output_verbose_link_cmd='func_echo_all'
++
++	    # Archives containing C++ object files must be created using
++	    # "CC -xar", where "CC" is the Sun C++ compiler.  This is
++	    # necessary to make sure instantiated templates are included
++	    # in the archive.
++	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
++	    ;;
++          gcx*)
++	    # Green Hills C++ Compiler
++	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
++
++	    # The C++ compiler must be used to create the archive.
++	    _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
++	    ;;
++          *)
++	    # GNU C++ compiler with Solaris linker
++	    if test yes,no = "$GXX,$with_gnu_ld"; then
++	      _LT_TAGVAR(no_undefined_flag, $1)=' $wl-z ${wl}defs'
++	      if $CC --version | $GREP -v '^2\.7' > /dev/null; then
++	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
++	        _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++                  $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
++
++	        # Commands to make compiler produce verbose output that lists
++	        # what "hidden" libraries, object files and flags are used when
++	        # linking a shared library.
++	        output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
++	      else
++	        # g++ 2.7 appears to require '-G' NOT '-shared' on this
++	        # platform.
++	        _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
++	        _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++                  $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
++
++	        # Commands to make compiler produce verbose output that lists
++	        # what "hidden" libraries, object files and flags are used when
++	        # linking a shared library.
++	        output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
++	      fi
++
++	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $wl$libdir'
++	      case $host_os in
++		solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
++		*)
++		  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
++		  ;;
++	      esac
++	    fi
++	    ;;
++        esac
++        ;;
++
++    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
++      _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
++      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
++      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++      runpath_var='LD_RUN_PATH'
++
++      case $cc_basename in
++        CC*)
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	*)
++	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++      esac
++      ;;
++
++      sysv5* | sco3.2v5* | sco5v6*)
++	# Note: We CANNOT use -z defs as we might desire, because we do not
++	# link with -lc, and that would cause any symbols used from libc to
++	# always be unresolved, which means just about no library would
++	# ever link correctly.  If we're not using GNU ld we use -z text
++	# though, which does catch some bad symbols but isn't as heavy-handed
++	# as -z defs.
++	_LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
++	_LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs'
++	_LT_TAGVAR(archive_cmds_need_lc, $1)=no
++	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
++	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir'
++	_LT_TAGVAR(hardcode_libdir_separator, $1)=':'
++	_LT_TAGVAR(link_all_deplibs, $1)=yes
++	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport'
++	runpath_var='LD_RUN_PATH'
++
++	case $cc_basename in
++          CC*)
++	    _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~
++              '"$_LT_TAGVAR(old_archive_cmds, $1)"
++	    _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~
++              '"$_LT_TAGVAR(reload_cmds, $1)"
++	    ;;
++	  *)
++	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    ;;
++	esac
++      ;;
++
++      tandem*)
++        case $cc_basename in
++          NCC*)
++	    # NonStop-UX NCC 3.20
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++          *)
++	    # FIXME: insert proper C++ library support
++	    _LT_TAGVAR(ld_shlibs, $1)=no
++	    ;;
++        esac
++        ;;
++
++      vxworks*)
++        # FIXME: insert proper C++ library support
++        _LT_TAGVAR(ld_shlibs, $1)=no
++        ;;
++
++      *)
++        # FIXME: insert proper C++ library support
++        _LT_TAGVAR(ld_shlibs, $1)=no
++        ;;
++    esac
++
++    AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
++    test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no
++
++    _LT_TAGVAR(GCC, $1)=$GXX
++    _LT_TAGVAR(LD, $1)=$LD
++
++    ## CAVEAT EMPTOR:
++    ## There is no encapsulation within the following macros, do not change
++    ## the running order or otherwise move them around unless you know exactly
++    ## what you are doing...
++    _LT_SYS_HIDDEN_LIBDEPS($1)
++    _LT_COMPILER_PIC($1)
++    _LT_COMPILER_C_O($1)
++    _LT_COMPILER_FILE_LOCKS($1)
++    _LT_LINKER_SHLIBS($1)
++    _LT_SYS_DYNAMIC_LINKER($1)
++    _LT_LINKER_HARDCODE_LIBPATH($1)
++
++    _LT_CONFIG($1)
++  fi # test -n "$compiler"
++
++  CC=$lt_save_CC
++  CFLAGS=$lt_save_CFLAGS
++  LDCXX=$LD
++  LD=$lt_save_LD
++  GCC=$lt_save_GCC
++  with_gnu_ld=$lt_save_with_gnu_ld
++  lt_cv_path_LDCXX=$lt_cv_path_LD
++  lt_cv_path_LD=$lt_save_path_LD
++  lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld
++  lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld
++fi # test yes != "$_lt_caught_CXX_error"
++
++AC_LANG_POP
++])# _LT_LANG_CXX_CONFIG
++
++
++# _LT_FUNC_STRIPNAME_CNF
++# ----------------------
++# func_stripname_cnf prefix suffix name
++# strip PREFIX and SUFFIX off of NAME.
++# PREFIX and SUFFIX must not contain globbing or regex special
++# characters, hashes, percent signs, but SUFFIX may contain a leading
++# dot (in which case that matches only a dot).
++#
++# This function is identical to the (non-XSI) version of func_stripname,
++# except this one can be used by m4 code that may be executed by configure,
++# rather than the libtool script.
++m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl
++AC_REQUIRE([_LT_DECL_SED])
++AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])
++func_stripname_cnf ()
++{
++  case @S|@2 in
++  .*) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%\\\\@S|@2\$%%"`;;
++  *)  func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%@S|@2\$%%"`;;
++  esac
++} # func_stripname_cnf
++])# _LT_FUNC_STRIPNAME_CNF
++
++
++# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME])
++# ---------------------------------
++# Figure out "hidden" library dependencies from verbose
++# compiler output when linking a shared library.
++# Parse the compiler output and extract the necessary
++# objects, libraries and library flags.
++m4_defun([_LT_SYS_HIDDEN_LIBDEPS],
++[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
++AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl
++# Dependencies to place before and after the object being linked:
++_LT_TAGVAR(predep_objects, $1)=
++_LT_TAGVAR(postdep_objects, $1)=
++_LT_TAGVAR(predeps, $1)=
++_LT_TAGVAR(postdeps, $1)=
++_LT_TAGVAR(compiler_lib_search_path, $1)=
++
++dnl we can't use the lt_simple_compile_test_code here,
++dnl because it contains code intended for an executable,
++dnl not a library.  It's possible we should let each
++dnl tag define a new lt_????_link_test_code variable,
++dnl but it's only used here...
++m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF
++int a;
++void foo (void) { a = 0; }
++_LT_EOF
++], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF
++class Foo
++{
++public:
++  Foo (void) { a = 0; }
++private:
++  int a;
++};
++_LT_EOF
++], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF
++      subroutine foo
++      implicit none
++      integer*4 a
++      a=0
++      return
++      end
++_LT_EOF
++], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF
++      subroutine foo
++      implicit none
++      integer a
++      a=0
++      return
++      end
++_LT_EOF
++], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF
++public class foo {
++  private int a;
++  public void bar (void) {
++    a = 0;
++  }
++};
++_LT_EOF
++], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF
++package foo
++func foo() {
++}
++_LT_EOF
++])
++
++_lt_libdeps_save_CFLAGS=$CFLAGS
++case "$CC $CFLAGS " in #(
++*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
++*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
++*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
++esac
++
++dnl Parse the compiler output and extract the necessary
++dnl objects, libraries and library flags.
++if AC_TRY_EVAL(ac_compile); then
++  # Parse the compiler output and extract the necessary
++  # objects, libraries and library flags.
++
++  # Sentinel used to keep track of whether or not we are before
++  # the conftest object file.
++  pre_test_object_deps_done=no
++
++  for p in `eval "$output_verbose_link_cmd"`; do
++    case $prev$p in
++
++    -L* | -R* | -l*)
++       # Some compilers place space between "-{L,R}" and the path.
++       # Remove the space.
++       if test x-L = "$p" ||
++          test x-R = "$p"; then
++	 prev=$p
++	 continue
++       fi
++
++       # Expand the sysroot to ease extracting the directories later.
++       if test -z "$prev"; then
++         case $p in
++         -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
++         -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
++         -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
++         esac
++       fi
++       case $p in
++       =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
++       esac
++       if test no = "$pre_test_object_deps_done"; then
++	 case $prev in
++	 -L | -R)
++	   # Internal compiler library paths should come after those
++	   # provided the user.  The postdeps already come after the
++	   # user supplied libs so there is no need to process them.
++	   if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then
++	     _LT_TAGVAR(compiler_lib_search_path, $1)=$prev$p
++	   else
++	     _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} $prev$p"
++	   fi
++	   ;;
++	 # The "-l" case would never come before the object being
++	 # linked, so don't bother handling this case.
++	 esac
++       else
++	 if test -z "$_LT_TAGVAR(postdeps, $1)"; then
++	   _LT_TAGVAR(postdeps, $1)=$prev$p
++	 else
++	   _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} $prev$p"
++	 fi
++       fi
++       prev=
++       ;;
++
++    *.lto.$objext) ;; # Ignore GCC LTO objects
++    *.$objext)
++       # This assumes that the test object file only shows up
++       # once in the compiler output.
++       if test "$p" = "conftest.$objext"; then
++	 pre_test_object_deps_done=yes
++	 continue
++       fi
++
++       if test no = "$pre_test_object_deps_done"; then
++	 if test -z "$_LT_TAGVAR(predep_objects, $1)"; then
++	   _LT_TAGVAR(predep_objects, $1)=$p
++	 else
++	   _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p"
++	 fi
++       else
++	 if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then
++	   _LT_TAGVAR(postdep_objects, $1)=$p
++	 else
++	   _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p"
++	 fi
++       fi
++       ;;
++
++    *) ;; # Ignore the rest.
++
++    esac
++  done
++
++  # Clean up.
++  rm -f a.out a.exe
++else
++  echo "libtool.m4: error: problem compiling $1 test program"
++fi
++
++$RM -f confest.$objext
++CFLAGS=$_lt_libdeps_save_CFLAGS
++
++# PORTME: override above test on systems where it is broken
++m4_if([$1], [CXX],
++[case $host_os in
++interix[[3-9]]*)
++  # Interix 3.5 installs completely hosed .la files for C++, so rather than
++  # hack all around it, let's just trust "g++" to DTRT.
++  _LT_TAGVAR(predep_objects,$1)=
++  _LT_TAGVAR(postdep_objects,$1)=
++  _LT_TAGVAR(postdeps,$1)=
++  ;;
++esac
++])
++
++case " $_LT_TAGVAR(postdeps, $1) " in
++*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;;
++esac
++ _LT_TAGVAR(compiler_lib_search_dirs, $1)=
++if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then
++ _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | $SED -e 's! -L! !g' -e 's!^ !!'`
++fi
++_LT_TAGDECL([], [compiler_lib_search_dirs], [1],
++    [The directories searched by this compiler when creating a shared library])
++_LT_TAGDECL([], [predep_objects], [1],
++    [Dependencies to place before and after the objects being linked to
++    create a shared library])
++_LT_TAGDECL([], [postdep_objects], [1])
++_LT_TAGDECL([], [predeps], [1])
++_LT_TAGDECL([], [postdeps], [1])
++_LT_TAGDECL([], [compiler_lib_search_path], [1],
++    [The library search path used internally by the compiler when linking
++    a shared library])
++])# _LT_SYS_HIDDEN_LIBDEPS
++
++
++# _LT_LANG_F77_CONFIG([TAG])
++# --------------------------
++# Ensure that the configuration variables for a Fortran 77 compiler are
++# suitably defined.  These variables are subsequently used by _LT_CONFIG
++# to write the compiler configuration to 'libtool'.
++m4_defun([_LT_LANG_F77_CONFIG],
++[AC_LANG_PUSH(Fortran 77)
++if test -z "$F77" || test no = "$F77"; then
++  _lt_disable_F77=yes
++fi
++
++_LT_TAGVAR(archive_cmds_need_lc, $1)=no
++_LT_TAGVAR(allow_undefined_flag, $1)=
++_LT_TAGVAR(always_export_symbols, $1)=no
++_LT_TAGVAR(archive_expsym_cmds, $1)=
++_LT_TAGVAR(export_dynamic_flag_spec, $1)=
++_LT_TAGVAR(hardcode_direct, $1)=no
++_LT_TAGVAR(hardcode_direct_absolute, $1)=no
++_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
++_LT_TAGVAR(hardcode_libdir_separator, $1)=
++_LT_TAGVAR(hardcode_minus_L, $1)=no
++_LT_TAGVAR(hardcode_automatic, $1)=no
++_LT_TAGVAR(inherit_rpath, $1)=no
++_LT_TAGVAR(module_cmds, $1)=
++_LT_TAGVAR(module_expsym_cmds, $1)=
++_LT_TAGVAR(link_all_deplibs, $1)=unknown
++_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
++_LT_TAGVAR(reload_flag, $1)=$reload_flag
++_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
++_LT_TAGVAR(no_undefined_flag, $1)=
++_LT_TAGVAR(whole_archive_flag_spec, $1)=
++_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
++
++# Source file extension for f77 test sources.
++ac_ext=f
++
++# Object file extension for compiled f77 test sources.
++objext=o
++_LT_TAGVAR(objext, $1)=$objext
++
++# No sense in running all these tests if we already determined that
++# the F77 compiler isn't working.  Some variables (like enable_shared)
++# are currently assumed to apply to all compilers on this platform,
++# and will be corrupted by setting them based on a non-working compiler.
++if test yes != "$_lt_disable_F77"; then
++  # Code to be used in simple compile tests
++  lt_simple_compile_test_code="\
++      subroutine t
++      return
++      end
++"
++
++  # Code to be used in simple link tests
++  lt_simple_link_test_code="\
++      program t
++      end
++"
++
++  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
++  _LT_TAG_COMPILER
++
++  # save warnings/boilerplate of simple test code
++  _LT_COMPILER_BOILERPLATE
++  _LT_LINKER_BOILERPLATE
++
++  # Allow CC to be a program name with arguments.
++  lt_save_CC=$CC
++  lt_save_GCC=$GCC
++  lt_save_CFLAGS=$CFLAGS
++  CC=${F77-"f77"}
++  CFLAGS=$FFLAGS
++  compiler=$CC
++  _LT_TAGVAR(compiler, $1)=$CC
++  _LT_CC_BASENAME([$compiler])
++  GCC=$G77
++  if test -n "$compiler"; then
++    AC_MSG_CHECKING([if libtool supports shared libraries])
++    AC_MSG_RESULT([$can_build_shared])
++
++    AC_MSG_CHECKING([whether to build shared libraries])
++    test no = "$can_build_shared" && enable_shared=no
++
++    # On AIX, shared libraries and static libraries use the same namespace, and
++    # are all built from PIC.
++    case $host_os in
++      aix3*)
++        test yes = "$enable_shared" && enable_static=no
++        if test -n "$RANLIB"; then
++          archive_cmds="$archive_cmds~\$RANLIB \$lib"
++          postinstall_cmds='$RANLIB $lib'
++        fi
++        ;;
++      aix[[4-9]]*)
++	if test ia64 != "$host_cpu"; then
++	  case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
++	  yes,aix,yes) ;;		# shared object as lib.so file only
++	  yes,svr4,*) ;;		# shared object as lib.so archive member only
++	  yes,*) enable_static=no ;;	# shared object in lib.a archive as well
++	  esac
++	fi
++        ;;
++    esac
++    AC_MSG_RESULT([$enable_shared])
++
++    AC_MSG_CHECKING([whether to build static libraries])
++    # Make sure either enable_shared or enable_static is yes.
++    test yes = "$enable_shared" || enable_static=yes
++    AC_MSG_RESULT([$enable_static])
++
++    _LT_TAGVAR(GCC, $1)=$G77
++    _LT_TAGVAR(LD, $1)=$LD
++
++    ## CAVEAT EMPTOR:
++    ## There is no encapsulation within the following macros, do not change
++    ## the running order or otherwise move them around unless you know exactly
++    ## what you are doing...
++    _LT_COMPILER_PIC($1)
++    _LT_COMPILER_C_O($1)
++    _LT_COMPILER_FILE_LOCKS($1)
++    _LT_LINKER_SHLIBS($1)
++    _LT_SYS_DYNAMIC_LINKER($1)
++    _LT_LINKER_HARDCODE_LIBPATH($1)
++
++    _LT_CONFIG($1)
++  fi # test -n "$compiler"
++
++  GCC=$lt_save_GCC
++  CC=$lt_save_CC
++  CFLAGS=$lt_save_CFLAGS
++fi # test yes != "$_lt_disable_F77"
++
++AC_LANG_POP
++])# _LT_LANG_F77_CONFIG
++
++
++# _LT_LANG_FC_CONFIG([TAG])
++# -------------------------
++# Ensure that the configuration variables for a Fortran compiler are
++# suitably defined.  These variables are subsequently used by _LT_CONFIG
++# to write the compiler configuration to 'libtool'.
++m4_defun([_LT_LANG_FC_CONFIG],
++[AC_LANG_PUSH(Fortran)
++
++if test -z "$FC" || test no = "$FC"; then
++  _lt_disable_FC=yes
++fi
++
++_LT_TAGVAR(archive_cmds_need_lc, $1)=no
++_LT_TAGVAR(allow_undefined_flag, $1)=
++_LT_TAGVAR(always_export_symbols, $1)=no
++_LT_TAGVAR(archive_expsym_cmds, $1)=
++_LT_TAGVAR(export_dynamic_flag_spec, $1)=
++_LT_TAGVAR(hardcode_direct, $1)=no
++_LT_TAGVAR(hardcode_direct_absolute, $1)=no
++_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
++_LT_TAGVAR(hardcode_libdir_separator, $1)=
++_LT_TAGVAR(hardcode_minus_L, $1)=no
++_LT_TAGVAR(hardcode_automatic, $1)=no
++_LT_TAGVAR(inherit_rpath, $1)=no
++_LT_TAGVAR(module_cmds, $1)=
++_LT_TAGVAR(module_expsym_cmds, $1)=
++_LT_TAGVAR(link_all_deplibs, $1)=unknown
++_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
++_LT_TAGVAR(reload_flag, $1)=$reload_flag
++_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
++_LT_TAGVAR(no_undefined_flag, $1)=
++_LT_TAGVAR(whole_archive_flag_spec, $1)=
++_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
++
++# Source file extension for fc test sources.
++ac_ext=${ac_fc_srcext-f}
++
++# Object file extension for compiled fc test sources.
++objext=o
++_LT_TAGVAR(objext, $1)=$objext
++
++# No sense in running all these tests if we already determined that
++# the FC compiler isn't working.  Some variables (like enable_shared)
++# are currently assumed to apply to all compilers on this platform,
++# and will be corrupted by setting them based on a non-working compiler.
++if test yes != "$_lt_disable_FC"; then
++  # Code to be used in simple compile tests
++  lt_simple_compile_test_code="\
++      subroutine t
++      return
++      end
++"
++
++  # Code to be used in simple link tests
++  lt_simple_link_test_code="\
++      program t
++      end
++"
++
++  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
++  _LT_TAG_COMPILER
++
++  # save warnings/boilerplate of simple test code
++  _LT_COMPILER_BOILERPLATE
++  _LT_LINKER_BOILERPLATE
++
++  # Allow CC to be a program name with arguments.
++  lt_save_CC=$CC
++  lt_save_GCC=$GCC
++  lt_save_CFLAGS=$CFLAGS
++  CC=${FC-"f95"}
++  CFLAGS=$FCFLAGS
++  compiler=$CC
++  GCC=$ac_cv_fc_compiler_gnu
++
++  _LT_TAGVAR(compiler, $1)=$CC
++  _LT_CC_BASENAME([$compiler])
++
++  if test -n "$compiler"; then
++    AC_MSG_CHECKING([if libtool supports shared libraries])
++    AC_MSG_RESULT([$can_build_shared])
++
++    AC_MSG_CHECKING([whether to build shared libraries])
++    test no = "$can_build_shared" && enable_shared=no
++
++    # On AIX, shared libraries and static libraries use the same namespace, and
++    # are all built from PIC.
++    case $host_os in
++      aix3*)
++        test yes = "$enable_shared" && enable_static=no
++        if test -n "$RANLIB"; then
++          archive_cmds="$archive_cmds~\$RANLIB \$lib"
++          postinstall_cmds='$RANLIB $lib'
++        fi
++        ;;
++      aix[[4-9]]*)
++	if test ia64 != "$host_cpu"; then
++	  case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
++	  yes,aix,yes) ;;		# shared object as lib.so file only
++	  yes,svr4,*) ;;		# shared object as lib.so archive member only
++	  yes,*) enable_static=no ;;	# shared object in lib.a archive as well
++	  esac
++	fi
++        ;;
++    esac
++    AC_MSG_RESULT([$enable_shared])
++
++    AC_MSG_CHECKING([whether to build static libraries])
++    # Make sure either enable_shared or enable_static is yes.
++    test yes = "$enable_shared" || enable_static=yes
++    AC_MSG_RESULT([$enable_static])
++
++    _LT_TAGVAR(GCC, $1)=$ac_cv_fc_compiler_gnu
++    _LT_TAGVAR(LD, $1)=$LD
++
++    ## CAVEAT EMPTOR:
++    ## There is no encapsulation within the following macros, do not change
++    ## the running order or otherwise move them around unless you know exactly
++    ## what you are doing...
++    _LT_SYS_HIDDEN_LIBDEPS($1)
++    _LT_COMPILER_PIC($1)
++    _LT_COMPILER_C_O($1)
++    _LT_COMPILER_FILE_LOCKS($1)
++    _LT_LINKER_SHLIBS($1)
++    _LT_SYS_DYNAMIC_LINKER($1)
++    _LT_LINKER_HARDCODE_LIBPATH($1)
++
++    _LT_CONFIG($1)
++  fi # test -n "$compiler"
++
++  GCC=$lt_save_GCC
++  CC=$lt_save_CC
++  CFLAGS=$lt_save_CFLAGS
++fi # test yes != "$_lt_disable_FC"
++
++AC_LANG_POP
++])# _LT_LANG_FC_CONFIG
++
++
++# _LT_LANG_GCJ_CONFIG([TAG])
++# --------------------------
++# Ensure that the configuration variables for the GNU Java Compiler compiler
++# are suitably defined.  These variables are subsequently used by _LT_CONFIG
++# to write the compiler configuration to 'libtool'.
++m4_defun([_LT_LANG_GCJ_CONFIG],
++[AC_REQUIRE([LT_PROG_GCJ])dnl
++AC_LANG_SAVE
++
++# Source file extension for Java test sources.
++ac_ext=java
++
++# Object file extension for compiled Java test sources.
++objext=o
++_LT_TAGVAR(objext, $1)=$objext
++
++# Code to be used in simple compile tests
++lt_simple_compile_test_code="class foo {}"
++
++# Code to be used in simple link tests
++lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }'
++
++# ltmain only uses $CC for tagged configurations so make sure $CC is set.
++_LT_TAG_COMPILER
++
++# save warnings/boilerplate of simple test code
++_LT_COMPILER_BOILERPLATE
++_LT_LINKER_BOILERPLATE
++
++# Allow CC to be a program name with arguments.
++lt_save_CC=$CC
++lt_save_CFLAGS=$CFLAGS
++lt_save_GCC=$GCC
++GCC=yes
++CC=${GCJ-"gcj"}
++CFLAGS=$GCJFLAGS
++compiler=$CC
++_LT_TAGVAR(compiler, $1)=$CC
++_LT_TAGVAR(LD, $1)=$LD
++_LT_CC_BASENAME([$compiler])
++
++# GCJ did not exist at the time GCC didn't implicitly link libc in.
++_LT_TAGVAR(archive_cmds_need_lc, $1)=no
++
++_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
++_LT_TAGVAR(reload_flag, $1)=$reload_flag
++_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
++
++if test -n "$compiler"; then
++  _LT_COMPILER_NO_RTTI($1)
++  _LT_COMPILER_PIC($1)
++  _LT_COMPILER_C_O($1)
++  _LT_COMPILER_FILE_LOCKS($1)
++  _LT_LINKER_SHLIBS($1)
++  _LT_LINKER_HARDCODE_LIBPATH($1)
++
++  _LT_CONFIG($1)
++fi
++
++AC_LANG_RESTORE
++
++GCC=$lt_save_GCC
++CC=$lt_save_CC
++CFLAGS=$lt_save_CFLAGS
++])# _LT_LANG_GCJ_CONFIG
++
++
++# _LT_LANG_GO_CONFIG([TAG])
++# --------------------------
++# Ensure that the configuration variables for the GNU Go compiler
++# are suitably defined.  These variables are subsequently used by _LT_CONFIG
++# to write the compiler configuration to 'libtool'.
++m4_defun([_LT_LANG_GO_CONFIG],
++[AC_REQUIRE([LT_PROG_GO])dnl
++AC_LANG_SAVE
++
++# Source file extension for Go test sources.
++ac_ext=go
++
++# Object file extension for compiled Go test sources.
++objext=o
++_LT_TAGVAR(objext, $1)=$objext
++
++# Code to be used in simple compile tests
++lt_simple_compile_test_code="package main; func main() { }"
++
++# Code to be used in simple link tests
++lt_simple_link_test_code='package main; func main() { }'
++
++# ltmain only uses $CC for tagged configurations so make sure $CC is set.
++_LT_TAG_COMPILER
++
++# save warnings/boilerplate of simple test code
++_LT_COMPILER_BOILERPLATE
++_LT_LINKER_BOILERPLATE
++
++# Allow CC to be a program name with arguments.
++lt_save_CC=$CC
++lt_save_CFLAGS=$CFLAGS
++lt_save_GCC=$GCC
++GCC=yes
++CC=${GOC-"gccgo"}
++CFLAGS=$GOFLAGS
++compiler=$CC
++_LT_TAGVAR(compiler, $1)=$CC
++_LT_TAGVAR(LD, $1)=$LD
++_LT_CC_BASENAME([$compiler])
++
++# Go did not exist at the time GCC didn't implicitly link libc in.
++_LT_TAGVAR(archive_cmds_need_lc, $1)=no
++
++_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
++_LT_TAGVAR(reload_flag, $1)=$reload_flag
++_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
++
++if test -n "$compiler"; then
++  _LT_COMPILER_NO_RTTI($1)
++  _LT_COMPILER_PIC($1)
++  _LT_COMPILER_C_O($1)
++  _LT_COMPILER_FILE_LOCKS($1)
++  _LT_LINKER_SHLIBS($1)
++  _LT_LINKER_HARDCODE_LIBPATH($1)
++
++  _LT_CONFIG($1)
++fi
++
++AC_LANG_RESTORE
++
++GCC=$lt_save_GCC
++CC=$lt_save_CC
++CFLAGS=$lt_save_CFLAGS
++])# _LT_LANG_GO_CONFIG
++
++
++# _LT_LANG_RC_CONFIG([TAG])
++# -------------------------
++# Ensure that the configuration variables for the Windows resource compiler
++# are suitably defined.  These variables are subsequently used by _LT_CONFIG
++# to write the compiler configuration to 'libtool'.
++m4_defun([_LT_LANG_RC_CONFIG],
++[AC_REQUIRE([LT_PROG_RC])dnl
++AC_LANG_SAVE
++
++# Source file extension for RC test sources.
++ac_ext=rc
++
++# Object file extension for compiled RC test sources.
++objext=o
++_LT_TAGVAR(objext, $1)=$objext
++
++# Code to be used in simple compile tests
++lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }'
++
++# Code to be used in simple link tests
++lt_simple_link_test_code=$lt_simple_compile_test_code
++
++# ltmain only uses $CC for tagged configurations so make sure $CC is set.
++_LT_TAG_COMPILER
++
++# save warnings/boilerplate of simple test code
++_LT_COMPILER_BOILERPLATE
++_LT_LINKER_BOILERPLATE
++
++# Allow CC to be a program name with arguments.
++lt_save_CC=$CC
++lt_save_CFLAGS=$CFLAGS
++lt_save_GCC=$GCC
++GCC=
++CC=${RC-"windres"}
++CFLAGS=
++compiler=$CC
++_LT_TAGVAR(compiler, $1)=$CC
++_LT_CC_BASENAME([$compiler])
++_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
++
++if test -n "$compiler"; then
++  :
++  _LT_CONFIG($1)
++fi
++
++GCC=$lt_save_GCC
++AC_LANG_RESTORE
++CC=$lt_save_CC
++CFLAGS=$lt_save_CFLAGS
++])# _LT_LANG_RC_CONFIG
++
++
++# LT_PROG_GCJ
++# -----------
++AC_DEFUN([LT_PROG_GCJ],
++[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ],
++  [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ],
++    [AC_CHECK_TOOL(GCJ, gcj,)
++      test set = "${GCJFLAGS+set}" || GCJFLAGS="-g -O2"
++      AC_SUBST(GCJFLAGS)])])[]dnl
++])
++
++# Old name:
++AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([LT_AC_PROG_GCJ], [])
++
++
++# LT_PROG_GO
++# ----------
++AC_DEFUN([LT_PROG_GO],
++[AC_CHECK_TOOL(GOC, gccgo,)
++])
++
++
++# LT_PROG_RC
++# ----------
++AC_DEFUN([LT_PROG_RC],
++[AC_CHECK_TOOL(RC, windres,)
++])
++
++# Old name:
++AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([LT_AC_PROG_RC], [])
++
++
++# _LT_DECL_EGREP
++# --------------
++# If we don't have a new enough Autoconf to choose the best grep
++# available, choose the one first in the user's PATH.
++m4_defun([_LT_DECL_EGREP],
++[AC_REQUIRE([AC_PROG_EGREP])dnl
++AC_REQUIRE([AC_PROG_FGREP])dnl
++test -z "$GREP" && GREP=grep
++_LT_DECL([], [GREP], [1], [A grep program that handles long lines])
++_LT_DECL([], [EGREP], [1], [An ERE matcher])
++_LT_DECL([], [FGREP], [1], [A literal string matcher])
++dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too
++AC_SUBST([GREP])
++])
++
++
++# _LT_DECL_OBJDUMP
++# --------------
++# If we don't have a new enough Autoconf to choose the best objdump
++# available, choose the one first in the user's PATH.
++m4_defun([_LT_DECL_OBJDUMP],
++[AC_CHECK_TOOL(OBJDUMP, objdump, false)
++test -z "$OBJDUMP" && OBJDUMP=objdump
++_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper])
++AC_SUBST([OBJDUMP])
++])
++
++# _LT_DECL_DLLTOOL
++# ----------------
++# Ensure DLLTOOL variable is set.
++m4_defun([_LT_DECL_DLLTOOL],
++[AC_CHECK_TOOL(DLLTOOL, dlltool, false)
++test -z "$DLLTOOL" && DLLTOOL=dlltool
++_LT_DECL([], [DLLTOOL], [1], [DLL creation program])
++AC_SUBST([DLLTOOL])
++])
++
++# _LT_DECL_FILECMD
++# ----------------
++# Check for a file(cmd) program that can be used to detect file type and magic
++m4_defun([_LT_DECL_FILECMD],
++[AC_CHECK_TOOL([FILECMD], [file], [:])
++_LT_DECL([], [FILECMD], [1], [A file(cmd) program that detects file types])
++])# _LD_DECL_FILECMD
++
++# _LT_DECL_SED
++# ------------
++# Check for a fully-functional sed program, that truncates
++# as few characters as possible.  Prefer GNU sed if found.
++m4_defun([_LT_DECL_SED],
++[AC_PROG_SED
++test -z "$SED" && SED=sed
++Xsed="$SED -e 1s/^X//"
++_LT_DECL([], [SED], [1], [A sed program that does not truncate output])
++_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"],
++    [Sed that helps us avoid accidentally triggering echo(1) options like -n])
++])# _LT_DECL_SED
++
++m4_ifndef([AC_PROG_SED], [
++# NOTE: This macro has been submitted for inclusion into   #
++#  GNU Autoconf as AC_PROG_SED.  When it is available in   #
++#  a released version of Autoconf we should remove this    #
++#  macro and use it instead.                               #
++
++m4_defun([AC_PROG_SED],
++[AC_MSG_CHECKING([for a sed that does not truncate output])
++AC_CACHE_VAL(lt_cv_path_SED,
++[# Loop through the user's path and test for sed and gsed.
++# Then use that list of sed's as ones to test for truncation.
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  test -z "$as_dir" && as_dir=.
++  for lt_ac_prog in sed gsed; do
++    for ac_exec_ext in '' $ac_executable_extensions; do
++      if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then
++        lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext"
++      fi
++    done
++  done
++done
++IFS=$as_save_IFS
++lt_ac_max=0
++lt_ac_count=0
++# Add /usr/xpg4/bin/sed as it is typically found on Solaris
++# along with /bin/sed that truncates output.
++for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do
++  test ! -f "$lt_ac_sed" && continue
++  cat /dev/null > conftest.in
++  lt_ac_count=0
++  echo $ECHO_N "0123456789$ECHO_C" >conftest.in
++  # Check for GNU sed and select it if it is found.
++  if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then
++    lt_cv_path_SED=$lt_ac_sed
++    break
++  fi
++  while true; do
++    cat conftest.in conftest.in >conftest.tmp
++    mv conftest.tmp conftest.in
++    cp conftest.in conftest.nl
++    echo >>conftest.nl
++    $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break
++    cmp -s conftest.out conftest.nl || break
++    # 10000 chars as input seems more than enough
++    test 10 -lt "$lt_ac_count" && break
++    lt_ac_count=`expr $lt_ac_count + 1`
++    if test "$lt_ac_count" -gt "$lt_ac_max"; then
++      lt_ac_max=$lt_ac_count
++      lt_cv_path_SED=$lt_ac_sed
++    fi
++  done
++done
++])
++SED=$lt_cv_path_SED
++AC_SUBST([SED])
++AC_MSG_RESULT([$SED])
++])#AC_PROG_SED
++])#m4_ifndef
++
++# Old name:
++AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED])
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([LT_AC_PROG_SED], [])
++
++
++# _LT_CHECK_SHELL_FEATURES
++# ------------------------
++# Find out whether the shell is Bourne or XSI compatible,
++# or has some other useful features.
++m4_defun([_LT_CHECK_SHELL_FEATURES],
++[if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
++  lt_unset=unset
++else
++  lt_unset=false
++fi
++_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl
++
++# test EBCDIC or ASCII
++case `echo X|tr X '\101'` in
++ A) # ASCII based system
++    # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
++  lt_SP2NL='tr \040 \012'
++  lt_NL2SP='tr \015\012 \040\040'
++  ;;
++ *) # EBCDIC based system
++  lt_SP2NL='tr \100 \n'
++  lt_NL2SP='tr \r\n \100\100'
++  ;;
++esac
++_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl
++_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl
++])# _LT_CHECK_SHELL_FEATURES
++
++
++# _LT_PATH_CONVERSION_FUNCTIONS
++# -----------------------------
++# Determine what file name conversion functions should be used by
++# func_to_host_file (and, implicitly, by func_to_host_path).  These are needed
++# for certain cross-compile configurations and native mingw.
++m4_defun([_LT_PATH_CONVERSION_FUNCTIONS],
++[AC_REQUIRE([AC_CANONICAL_HOST])dnl
++AC_REQUIRE([AC_CANONICAL_BUILD])dnl
++AC_MSG_CHECKING([how to convert $build file names to $host format])
++AC_CACHE_VAL(lt_cv_to_host_file_cmd,
++[case $host in
++  *-*-mingw* )
++    case $build in
++      *-*-mingw* ) # actually msys
++        lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
++        ;;
++      *-*-cygwin* )
++        lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
++        ;;
++      * ) # otherwise, assume *nix
++        lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
++        ;;
++    esac
++    ;;
++  *-*-cygwin* )
++    case $build in
++      *-*-mingw* ) # actually msys
++        lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
++        ;;
++      *-*-cygwin* )
++        lt_cv_to_host_file_cmd=func_convert_file_noop
++        ;;
++      * ) # otherwise, assume *nix
++        lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
++        ;;
++    esac
++    ;;
++  * ) # unhandled hosts (and "normal" native builds)
++    lt_cv_to_host_file_cmd=func_convert_file_noop
++    ;;
++esac
++])
++to_host_file_cmd=$lt_cv_to_host_file_cmd
++AC_MSG_RESULT([$lt_cv_to_host_file_cmd])
++_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd],
++         [0], [convert $build file names to $host format])dnl
++
++AC_MSG_CHECKING([how to convert $build file names to toolchain format])
++AC_CACHE_VAL(lt_cv_to_tool_file_cmd,
++[#assume ordinary cross tools, or native build.
++lt_cv_to_tool_file_cmd=func_convert_file_noop
++case $host in
++  *-*-mingw* )
++    case $build in
++      *-*-mingw* ) # actually msys
++        lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
++        ;;
++    esac
++    ;;
++esac
++])
++to_tool_file_cmd=$lt_cv_to_tool_file_cmd
++AC_MSG_RESULT([$lt_cv_to_tool_file_cmd])
++_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd],
++         [0], [convert $build files to toolchain format])dnl
++])# _LT_PATH_CONVERSION_FUNCTIONS
++
++# Helper functions for option handling.                    -*- Autoconf -*-
++#
++#   Copyright (C) 2004-2005, 2007-2009, 2011-2019, 2021-2022 Free
++#   Software Foundation, Inc.
++#   Written by Gary V. Vaughan, 2004
++#
++# This file is free software; the Free Software Foundation gives
++# unlimited permission to copy and/or distribute it, with or without
++# modifications, as long as this notice is preserved.
++
++# serial 8 ltoptions.m4
++
++# This is to help aclocal find these macros, as it can't see m4_define.
++AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])])
++
++
++# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME)
++# ------------------------------------------
++m4_define([_LT_MANGLE_OPTION],
++[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])])
++
++
++# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME)
++# ---------------------------------------
++# Set option OPTION-NAME for macro MACRO-NAME, and if there is a
++# matching handler defined, dispatch to it.  Other OPTION-NAMEs are
++# saved as a flag.
++m4_define([_LT_SET_OPTION],
++[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl
++m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]),
++        _LT_MANGLE_DEFUN([$1], [$2]),
++    [m4_warning([Unknown $1 option '$2'])])[]dnl
++])
++
++
++# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET])
++# ------------------------------------------------------------
++# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
++m4_define([_LT_IF_OPTION],
++[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])])
++
++
++# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET)
++# -------------------------------------------------------
++# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME
++# are set.
++m4_define([_LT_UNLESS_OPTIONS],
++[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
++	    [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option),
++		      [m4_define([$0_found])])])[]dnl
++m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3
++])[]dnl
++])
++
++
++# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST)
++# ----------------------------------------
++# OPTION-LIST is a space-separated list of Libtool options associated
++# with MACRO-NAME.  If any OPTION has a matching handler declared with
++# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about
++# the unknown option and exit.
++m4_defun([_LT_SET_OPTIONS],
++[# Set options
++m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
++    [_LT_SET_OPTION([$1], _LT_Option)])
++
++m4_if([$1],[LT_INIT],[
++  dnl
++  dnl Simply set some default values (i.e off) if boolean options were not
++  dnl specified:
++  _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no
++  ])
++  _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no
++  ])
++  dnl
++  dnl If no reference was made to various pairs of opposing options, then
++  dnl we run the default mode handler for the pair.  For example, if neither
++  dnl 'shared' nor 'disable-shared' was passed, we enable building of shared
++  dnl archives by default:
++  _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED])
++  _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC])
++  _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC])
++  _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install],
++		   [_LT_ENABLE_FAST_INSTALL])
++  _LT_UNLESS_OPTIONS([LT_INIT], [aix-soname=aix aix-soname=both aix-soname=svr4],
++		   [_LT_WITH_AIX_SONAME([aix])])
++  ])
++])# _LT_SET_OPTIONS
++
++
++
++# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME)
++# -----------------------------------------
++m4_define([_LT_MANGLE_DEFUN],
++[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])])
++
++
++# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE)
++# -----------------------------------------------
++m4_define([LT_OPTION_DEFINE],
++[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl
++])# LT_OPTION_DEFINE
++
++
++# dlopen
++# ------
++LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes
++])
++
++AU_DEFUN([AC_LIBTOOL_DLOPEN],
++[_LT_SET_OPTION([LT_INIT], [dlopen])
++AC_DIAGNOSE([obsolete],
++[$0: Remove this warning and the call to _LT_SET_OPTION when you
++put the 'dlopen' option into LT_INIT's first parameter.])
++])
++
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], [])
++
++
++# win32-dll
++# ---------
++# Declare package support for building win32 dll's.
++LT_OPTION_DEFINE([LT_INIT], [win32-dll],
++[enable_win32_dll=yes
++
++case $host in
++*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*)
++  AC_CHECK_TOOL(AS, as, false)
++  AC_CHECK_TOOL(DLLTOOL, dlltool, false)
++  AC_CHECK_TOOL(OBJDUMP, objdump, false)
++  ;;
++esac
++
++test -z "$AS" && AS=as
++_LT_DECL([], [AS],      [1], [Assembler program])dnl
++
++test -z "$DLLTOOL" && DLLTOOL=dlltool
++_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl
++
++test -z "$OBJDUMP" && OBJDUMP=objdump
++_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl
++])# win32-dll
++
++AU_DEFUN([AC_LIBTOOL_WIN32_DLL],
++[AC_REQUIRE([AC_CANONICAL_HOST])dnl
++_LT_SET_OPTION([LT_INIT], [win32-dll])
++AC_DIAGNOSE([obsolete],
++[$0: Remove this warning and the call to _LT_SET_OPTION when you
++put the 'win32-dll' option into LT_INIT's first parameter.])
++])
++
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], [])
++
++
++# _LT_ENABLE_SHARED([DEFAULT])
++# ----------------------------
++# implement the --enable-shared flag, and supports the 'shared' and
++# 'disable-shared' LT_INIT options.
++# DEFAULT is either 'yes' or 'no'.  If omitted, it defaults to 'yes'.
++m4_define([_LT_ENABLE_SHARED],
++[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl
++AC_ARG_ENABLE([shared],
++    [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@],
++	[build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])],
++    [p=${PACKAGE-default}
++    case $enableval in
++    yes) enable_shared=yes ;;
++    no) enable_shared=no ;;
++    *)
++      enable_shared=no
++      # Look at the argument we got.  We use all the common list separators.
++      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
++      for pkg in $enableval; do
++	IFS=$lt_save_ifs
++	if test "X$pkg" = "X$p"; then
++	  enable_shared=yes
++	fi
++      done
++      IFS=$lt_save_ifs
++      ;;
++    esac],
++    [enable_shared=]_LT_ENABLE_SHARED_DEFAULT)
++
++    _LT_DECL([build_libtool_libs], [enable_shared], [0],
++	[Whether or not to build shared libraries])
++])# _LT_ENABLE_SHARED
++
++LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])])
++LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])])
++
++# Old names:
++AC_DEFUN([AC_ENABLE_SHARED],
++[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared])
++])
++
++AC_DEFUN([AC_DISABLE_SHARED],
++[_LT_SET_OPTION([LT_INIT], [disable-shared])
++])
++
++AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)])
++AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)])
++
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AM_ENABLE_SHARED], [])
++dnl AC_DEFUN([AM_DISABLE_SHARED], [])
++
++
++
++# _LT_ENABLE_STATIC([DEFAULT])
++# ----------------------------
++# implement the --enable-static flag, and support the 'static' and
++# 'disable-static' LT_INIT options.
++# DEFAULT is either 'yes' or 'no'.  If omitted, it defaults to 'yes'.
++m4_define([_LT_ENABLE_STATIC],
++[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl
++AC_ARG_ENABLE([static],
++    [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@],
++	[build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])],
++    [p=${PACKAGE-default}
++    case $enableval in
++    yes) enable_static=yes ;;
++    no) enable_static=no ;;
++    *)
++     enable_static=no
++      # Look at the argument we got.  We use all the common list separators.
++      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
++      for pkg in $enableval; do
++	IFS=$lt_save_ifs
++	if test "X$pkg" = "X$p"; then
++	  enable_static=yes
++	fi
++      done
++      IFS=$lt_save_ifs
++      ;;
++    esac],
++    [enable_static=]_LT_ENABLE_STATIC_DEFAULT)
++
++    _LT_DECL([build_old_libs], [enable_static], [0],
++	[Whether or not to build static libraries])
++])# _LT_ENABLE_STATIC
++
++LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])])
++LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])])
++
++# Old names:
++AC_DEFUN([AC_ENABLE_STATIC],
++[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static])
++])
++
++AC_DEFUN([AC_DISABLE_STATIC],
++[_LT_SET_OPTION([LT_INIT], [disable-static])
++])
++
++AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)])
++AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)])
++
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AM_ENABLE_STATIC], [])
++dnl AC_DEFUN([AM_DISABLE_STATIC], [])
++
++
++
++# _LT_ENABLE_FAST_INSTALL([DEFAULT])
++# ----------------------------------
++# implement the --enable-fast-install flag, and support the 'fast-install'
++# and 'disable-fast-install' LT_INIT options.
++# DEFAULT is either 'yes' or 'no'.  If omitted, it defaults to 'yes'.
++m4_define([_LT_ENABLE_FAST_INSTALL],
++[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl
++AC_ARG_ENABLE([fast-install],
++    [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@],
++    [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])],
++    [p=${PACKAGE-default}
++    case $enableval in
++    yes) enable_fast_install=yes ;;
++    no) enable_fast_install=no ;;
++    *)
++      enable_fast_install=no
++      # Look at the argument we got.  We use all the common list separators.
++      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
++      for pkg in $enableval; do
++	IFS=$lt_save_ifs
++	if test "X$pkg" = "X$p"; then
++	  enable_fast_install=yes
++	fi
++      done
++      IFS=$lt_save_ifs
++      ;;
++    esac],
++    [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT)
++
++_LT_DECL([fast_install], [enable_fast_install], [0],
++	 [Whether or not to optimize for fast installation])dnl
++])# _LT_ENABLE_FAST_INSTALL
++
++LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])])
++LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])])
++
++# Old names:
++AU_DEFUN([AC_ENABLE_FAST_INSTALL],
++[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install])
++AC_DIAGNOSE([obsolete],
++[$0: Remove this warning and the call to _LT_SET_OPTION when you put
++the 'fast-install' option into LT_INIT's first parameter.])
++])
++
++AU_DEFUN([AC_DISABLE_FAST_INSTALL],
++[_LT_SET_OPTION([LT_INIT], [disable-fast-install])
++AC_DIAGNOSE([obsolete],
++[$0: Remove this warning and the call to _LT_SET_OPTION when you put
++the 'disable-fast-install' option into LT_INIT's first parameter.])
++])
++
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], [])
++dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], [])
++
++
++# _LT_WITH_AIX_SONAME([DEFAULT])
++# ----------------------------------
++# implement the --with-aix-soname flag, and support the `aix-soname=aix'
++# and `aix-soname=both' and `aix-soname=svr4' LT_INIT options. DEFAULT
++# is either `aix', `both' or `svr4'.  If omitted, it defaults to `aix'.
++m4_define([_LT_WITH_AIX_SONAME],
++[m4_define([_LT_WITH_AIX_SONAME_DEFAULT], [m4_if($1, svr4, svr4, m4_if($1, both, both, aix))])dnl
++shared_archive_member_spec=
++case $host,$enable_shared in
++power*-*-aix[[5-9]]*,yes)
++  AC_MSG_CHECKING([which variant of shared library versioning to provide])
++  AC_ARG_WITH([aix-soname],
++    [AS_HELP_STRING([--with-aix-soname=aix|svr4|both],
++      [shared library versioning (aka "SONAME") variant to provide on AIX, @<:@default=]_LT_WITH_AIX_SONAME_DEFAULT[@:>@.])],
++    [case $withval in
++    aix|svr4|both)
++      ;;
++    *)
++      AC_MSG_ERROR([Unknown argument to --with-aix-soname])
++      ;;
++    esac
++    lt_cv_with_aix_soname=$with_aix_soname],
++    [AC_CACHE_VAL([lt_cv_with_aix_soname],
++      [lt_cv_with_aix_soname=]_LT_WITH_AIX_SONAME_DEFAULT)
++    with_aix_soname=$lt_cv_with_aix_soname])
++  AC_MSG_RESULT([$with_aix_soname])
++  if test aix != "$with_aix_soname"; then
++    # For the AIX way of multilib, we name the shared archive member
++    # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o',
++    # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File.
++    # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag,
++    # the AIX toolchain works better with OBJECT_MODE set (default 32).
++    if test 64 = "${OBJECT_MODE-32}"; then
++      shared_archive_member_spec=shr_64
++    else
++      shared_archive_member_spec=shr
++    fi
++  fi
++  ;;
++*)
++  with_aix_soname=aix
++  ;;
++esac
++
++_LT_DECL([], [shared_archive_member_spec], [0],
++    [Shared archive member basename, for filename based shared library versioning on AIX])dnl
++])# _LT_WITH_AIX_SONAME
++
++LT_OPTION_DEFINE([LT_INIT], [aix-soname=aix], [_LT_WITH_AIX_SONAME([aix])])
++LT_OPTION_DEFINE([LT_INIT], [aix-soname=both], [_LT_WITH_AIX_SONAME([both])])
++LT_OPTION_DEFINE([LT_INIT], [aix-soname=svr4], [_LT_WITH_AIX_SONAME([svr4])])
++
++
++# _LT_WITH_PIC([MODE])
++# --------------------
++# implement the --with-pic flag, and support the 'pic-only' and 'no-pic'
++# LT_INIT options.
++# MODE is either 'yes' or 'no'.  If omitted, it defaults to 'both'.
++m4_define([_LT_WITH_PIC],
++[AC_ARG_WITH([pic],
++    [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@],
++	[try to use only PIC/non-PIC objects @<:@default=use both@:>@])],
++    [lt_p=${PACKAGE-default}
++    case $withval in
++    yes|no) pic_mode=$withval ;;
++    *)
++      pic_mode=default
++      # Look at the argument we got.  We use all the common list separators.
++      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
++      for lt_pkg in $withval; do
++	IFS=$lt_save_ifs
++	if test "X$lt_pkg" = "X$lt_p"; then
++	  pic_mode=yes
++	fi
++      done
++      IFS=$lt_save_ifs
++      ;;
++    esac],
++    [pic_mode=m4_default([$1], [default])])
++
++_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl
++])# _LT_WITH_PIC
++
++LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])])
++LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])])
++
++# Old name:
++AU_DEFUN([AC_LIBTOOL_PICMODE],
++[_LT_SET_OPTION([LT_INIT], [pic-only])
++AC_DIAGNOSE([obsolete],
++[$0: Remove this warning and the call to _LT_SET_OPTION when you
++put the 'pic-only' option into LT_INIT's first parameter.])
++])
++
++dnl aclocal-1.4 backwards compatibility:
++dnl AC_DEFUN([AC_LIBTOOL_PICMODE], [])
++
++
++m4_define([_LTDL_MODE], [])
++LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive],
++		 [m4_define([_LTDL_MODE], [nonrecursive])])
++LT_OPTION_DEFINE([LTDL_INIT], [recursive],
++		 [m4_define([_LTDL_MODE], [recursive])])
++LT_OPTION_DEFINE([LTDL_INIT], [subproject],
++		 [m4_define([_LTDL_MODE], [subproject])])
++
++m4_define([_LTDL_TYPE], [])
++LT_OPTION_DEFINE([LTDL_INIT], [installable],
++		 [m4_define([_LTDL_TYPE], [installable])])
++LT_OPTION_DEFINE([LTDL_INIT], [convenience],
++		 [m4_define([_LTDL_TYPE], [convenience])])
++
++# ltsugar.m4 -- libtool m4 base layer.                         -*-Autoconf-*-
++#
++# Copyright (C) 2004-2005, 2007-2008, 2011-2019, 2021-2022 Free Software
++# Foundation, Inc.
++# Written by Gary V. Vaughan, 2004
++#
++# This file is free software; the Free Software Foundation gives
++# unlimited permission to copy and/or distribute it, with or without
++# modifications, as long as this notice is preserved.
++
++# serial 6 ltsugar.m4
++
++# This is to help aclocal find these macros, as it can't see m4_define.
++AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])])
++
++
++# lt_join(SEP, ARG1, [ARG2...])
++# -----------------------------
++# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their
++# associated separator.
++# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier
++# versions in m4sugar had bugs.
++m4_define([lt_join],
++[m4_if([$#], [1], [],
++       [$#], [2], [[$2]],
++       [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])])
++m4_define([_lt_join],
++[m4_if([$#$2], [2], [],
++       [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])])
++
++
++# lt_car(LIST)
++# lt_cdr(LIST)
++# ------------
++# Manipulate m4 lists.
++# These macros are necessary as long as will still need to support
++# Autoconf-2.59, which quotes differently.
++m4_define([lt_car], [[$1]])
++m4_define([lt_cdr],
++[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])],
++       [$#], 1, [],
++       [m4_dquote(m4_shift($@))])])
++m4_define([lt_unquote], $1)
++
++
++# lt_append(MACRO-NAME, STRING, [SEPARATOR])
++# ------------------------------------------
++# Redefine MACRO-NAME to hold its former content plus 'SEPARATOR''STRING'.
++# Note that neither SEPARATOR nor STRING are expanded; they are appended
++# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked).
++# No SEPARATOR is output if MACRO-NAME was previously undefined (different
++# than defined and empty).
++#
++# This macro is needed until we can rely on Autoconf 2.62, since earlier
++# versions of m4sugar mistakenly expanded SEPARATOR but not STRING.
++m4_define([lt_append],
++[m4_define([$1],
++	   m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])])
++
++
++
++# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...])
++# ----------------------------------------------------------
++# Produce a SEP delimited list of all paired combinations of elements of
++# PREFIX-LIST with SUFFIX1 through SUFFIXn.  Each element of the list
++# has the form PREFIXmINFIXSUFFIXn.
++# Needed until we can rely on m4_combine added in Autoconf 2.62.
++m4_define([lt_combine],
++[m4_if(m4_eval([$# > 3]), [1],
++       [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl
++[[m4_foreach([_Lt_prefix], [$2],
++	     [m4_foreach([_Lt_suffix],
++		]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[,
++	[_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])])
++
++
++# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ])
++# -----------------------------------------------------------------------
++# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited
++# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ.
++m4_define([lt_if_append_uniq],
++[m4_ifdef([$1],
++	  [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1],
++		 [lt_append([$1], [$2], [$3])$4],
++		 [$5])],
++	  [lt_append([$1], [$2], [$3])$4])])
++
++
++# lt_dict_add(DICT, KEY, VALUE)
++# -----------------------------
++m4_define([lt_dict_add],
++[m4_define([$1($2)], [$3])])
++
++
++# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE)
++# --------------------------------------------
++m4_define([lt_dict_add_subkey],
++[m4_define([$1($2:$3)], [$4])])
++
++
++# lt_dict_fetch(DICT, KEY, [SUBKEY])
++# ----------------------------------
++m4_define([lt_dict_fetch],
++[m4_ifval([$3],
++	m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]),
++    m4_ifdef([$1($2)], [m4_defn([$1($2)])]))])
++
++
++# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE])
++# -----------------------------------------------------------------
++m4_define([lt_if_dict_fetch],
++[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4],
++	[$5],
++    [$6])])
++
++
++# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...])
++# --------------------------------------------------------------
++m4_define([lt_dict_filter],
++[m4_if([$5], [], [],
++  [lt_join(m4_quote(m4_default([$4], [[, ]])),
++           lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]),
++		      [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl
++])
++
++# ltversion.m4 -- version numbers			-*- Autoconf -*-
++#
++#   Copyright (C) 2004, 2011-2019, 2021-2022 Free Software Foundation,
++#   Inc.
++#   Written by Scott James Remnant, 2004
++#
++# This file is free software; the Free Software Foundation gives
++# unlimited permission to copy and/or distribute it, with or without
++# modifications, as long as this notice is preserved.
++
++# @configure_input@
++
++# serial 4245 ltversion.m4
++# This file is part of GNU Libtool
++
++m4_define([LT_PACKAGE_VERSION], [2.4.7])
++m4_define([LT_PACKAGE_REVISION], [2.4.7])
++
++AC_DEFUN([LTVERSION_VERSION],
++[macro_version='2.4.7'
++macro_revision='2.4.7'
++_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?])
++_LT_DECL(, macro_revision, 0)
++])
++
++# lt~obsolete.m4 -- aclocal satisfying obsolete definitions.    -*-Autoconf-*-
++#
++#   Copyright (C) 2004-2005, 2007, 2009, 2011-2019, 2021-2022 Free
++#   Software Foundation, Inc.
++#   Written by Scott James Remnant, 2004.
++#
++# This file is free software; the Free Software Foundation gives
++# unlimited permission to copy and/or distribute it, with or without
++# modifications, as long as this notice is preserved.
++
++# serial 5 lt~obsolete.m4
++
++# These exist entirely to fool aclocal when bootstrapping libtool.
++#
++# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN),
++# which have later been changed to m4_define as they aren't part of the
++# exported API, or moved to Autoconf or Automake where they belong.
++#
++# The trouble is, aclocal is a bit thick.  It'll see the old AC_DEFUN
++# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us
++# using a macro with the same name in our local m4/libtool.m4 it'll
++# pull the old libtool.m4 in (it doesn't see our shiny new m4_define
++# and doesn't know about Autoconf macros at all.)
++#
++# So we provide this file, which has a silly filename so it's always
++# included after everything else.  This provides aclocal with the
++# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything
++# because those macros already exist, or will be overwritten later.
++# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6.
++#
++# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here.
++# Yes, that means every name once taken will need to remain here until
++# we give up compatibility with versions before 1.7, at which point
++# we need to keep only those names which we still refer to.
++
++# This is to help aclocal find these macros, as it can't see m4_define.
++AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])])
++
++m4_ifndef([AC_LIBTOOL_LINKER_OPTION],	[AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])])
++m4_ifndef([AC_PROG_EGREP],		[AC_DEFUN([AC_PROG_EGREP])])
++m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH],	[AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])])
++m4_ifndef([_LT_AC_SHELL_INIT],		[AC_DEFUN([_LT_AC_SHELL_INIT])])
++m4_ifndef([_LT_AC_SYS_LIBPATH_AIX],	[AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])])
++m4_ifndef([_LT_PROG_LTMAIN],		[AC_DEFUN([_LT_PROG_LTMAIN])])
++m4_ifndef([_LT_AC_TAGVAR],		[AC_DEFUN([_LT_AC_TAGVAR])])
++m4_ifndef([AC_LTDL_ENABLE_INSTALL],	[AC_DEFUN([AC_LTDL_ENABLE_INSTALL])])
++m4_ifndef([AC_LTDL_PREOPEN],		[AC_DEFUN([AC_LTDL_PREOPEN])])
++m4_ifndef([_LT_AC_SYS_COMPILER],	[AC_DEFUN([_LT_AC_SYS_COMPILER])])
++m4_ifndef([_LT_AC_LOCK],		[AC_DEFUN([_LT_AC_LOCK])])
++m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE],	[AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])])
++m4_ifndef([_LT_AC_TRY_DLOPEN_SELF],	[AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])])
++m4_ifndef([AC_LIBTOOL_PROG_CC_C_O],	[AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])])
++m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])])
++m4_ifndef([AC_LIBTOOL_OBJDIR],		[AC_DEFUN([AC_LIBTOOL_OBJDIR])])
++m4_ifndef([AC_LTDL_OBJDIR],		[AC_DEFUN([AC_LTDL_OBJDIR])])
++m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])])
++m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP],	[AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])])
++m4_ifndef([AC_PATH_MAGIC],		[AC_DEFUN([AC_PATH_MAGIC])])
++m4_ifndef([AC_PROG_LD_GNU],		[AC_DEFUN([AC_PROG_LD_GNU])])
++m4_ifndef([AC_PROG_LD_RELOAD_FLAG],	[AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])])
++m4_ifndef([AC_DEPLIBS_CHECK_METHOD],	[AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])])
++m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])])
++m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])])
++m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])])
++m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS],	[AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])])
++m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP],	[AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])])
++m4_ifndef([LT_AC_PROG_EGREP],		[AC_DEFUN([LT_AC_PROG_EGREP])])
++m4_ifndef([LT_AC_PROG_SED],		[AC_DEFUN([LT_AC_PROG_SED])])
++m4_ifndef([_LT_CC_BASENAME],		[AC_DEFUN([_LT_CC_BASENAME])])
++m4_ifndef([_LT_COMPILER_BOILERPLATE],	[AC_DEFUN([_LT_COMPILER_BOILERPLATE])])
++m4_ifndef([_LT_LINKER_BOILERPLATE],	[AC_DEFUN([_LT_LINKER_BOILERPLATE])])
++m4_ifndef([_AC_PROG_LIBTOOL],		[AC_DEFUN([_AC_PROG_LIBTOOL])])
++m4_ifndef([AC_LIBTOOL_SETUP],		[AC_DEFUN([AC_LIBTOOL_SETUP])])
++m4_ifndef([_LT_AC_CHECK_DLFCN],		[AC_DEFUN([_LT_AC_CHECK_DLFCN])])
++m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER],	[AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])])
++m4_ifndef([_LT_AC_TAGCONFIG],		[AC_DEFUN([_LT_AC_TAGCONFIG])])
++m4_ifndef([AC_DISABLE_FAST_INSTALL],	[AC_DEFUN([AC_DISABLE_FAST_INSTALL])])
++m4_ifndef([_LT_AC_LANG_CXX],		[AC_DEFUN([_LT_AC_LANG_CXX])])
++m4_ifndef([_LT_AC_LANG_F77],		[AC_DEFUN([_LT_AC_LANG_F77])])
++m4_ifndef([_LT_AC_LANG_GCJ],		[AC_DEFUN([_LT_AC_LANG_GCJ])])
++m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])])
++m4_ifndef([_LT_AC_LANG_C_CONFIG],	[AC_DEFUN([_LT_AC_LANG_C_CONFIG])])
++m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])])
++m4_ifndef([_LT_AC_LANG_CXX_CONFIG],	[AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])])
++m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])])
++m4_ifndef([_LT_AC_LANG_F77_CONFIG],	[AC_DEFUN([_LT_AC_LANG_F77_CONFIG])])
++m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])])
++m4_ifndef([_LT_AC_LANG_GCJ_CONFIG],	[AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])])
++m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])])
++m4_ifndef([_LT_AC_LANG_RC_CONFIG],	[AC_DEFUN([_LT_AC_LANG_RC_CONFIG])])
++m4_ifndef([AC_LIBTOOL_CONFIG],		[AC_DEFUN([AC_LIBTOOL_CONFIG])])
++m4_ifndef([_LT_AC_FILE_LTDLL_C],	[AC_DEFUN([_LT_AC_FILE_LTDLL_C])])
++m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS],	[AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])])
++m4_ifndef([_LT_AC_PROG_CXXCPP],		[AC_DEFUN([_LT_AC_PROG_CXXCPP])])
++m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS],	[AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])])
++m4_ifndef([_LT_PROG_ECHO_BACKSLASH],	[AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])])
++m4_ifndef([_LT_PROG_F77],		[AC_DEFUN([_LT_PROG_F77])])
++m4_ifndef([_LT_PROG_FC],		[AC_DEFUN([_LT_PROG_FC])])
++m4_ifndef([_LT_PROG_CXX],		[AC_DEFUN([_LT_PROG_CXX])])
++
++# Copyright (C) 2002-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# AM_AUTOMAKE_VERSION(VERSION)
++# ----------------------------
++# Automake X.Y traces this macro to ensure aclocal.m4 has been
++# generated from the m4 files accompanying Automake X.Y.
++# (This private macro should not be called outside this file.)
++AC_DEFUN([AM_AUTOMAKE_VERSION],
++[am__api_version='1.16'
++dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
++dnl require some minimum version.  Point them to the right macro.
++m4_if([$1], [1.16.5], [],
++      [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
++])
++
++# _AM_AUTOCONF_VERSION(VERSION)
++# -----------------------------
++# aclocal traces this macro to find the Autoconf version.
++# This is a private macro too.  Using m4_define simplifies
++# the logic in aclocal, which can simply ignore this definition.
++m4_define([_AM_AUTOCONF_VERSION], [])
++
++# AM_SET_CURRENT_AUTOMAKE_VERSION
++# -------------------------------
++# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
++# This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
++AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
++[AM_AUTOMAKE_VERSION([1.16.5])dnl
++m4_ifndef([AC_AUTOCONF_VERSION],
++  [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
++_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
++
++# AM_AUX_DIR_EXPAND                                         -*- Autoconf -*-
++
++# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets
++# $ac_aux_dir to '$srcdir/foo'.  In other projects, it is set to
++# '$srcdir', '$srcdir/..', or '$srcdir/../..'.
++#
++# Of course, Automake must honor this variable whenever it calls a
++# tool from the auxiliary directory.  The problem is that $srcdir (and
++# therefore $ac_aux_dir as well) can be either absolute or relative,
++# depending on how configure is run.  This is pretty annoying, since
++# it makes $ac_aux_dir quite unusable in subdirectories: in the top
++# source directory, any form will work fine, but in subdirectories a
++# relative path needs to be adjusted first.
++#
++# $ac_aux_dir/missing
++#    fails when called from a subdirectory if $ac_aux_dir is relative
++# $top_srcdir/$ac_aux_dir/missing
++#    fails if $ac_aux_dir is absolute,
++#    fails when called from a subdirectory in a VPATH build with
++#          a relative $ac_aux_dir
++#
++# The reason of the latter failure is that $top_srcdir and $ac_aux_dir
++# are both prefixed by $srcdir.  In an in-source build this is usually
++# harmless because $srcdir is '.', but things will broke when you
++# start a VPATH build or use an absolute $srcdir.
++#
++# So we could use something similar to $top_srcdir/$ac_aux_dir/missing,
++# iff we strip the leading $srcdir from $ac_aux_dir.  That would be:
++#   am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"`
++# and then we would define $MISSING as
++#   MISSING="\${SHELL} $am_aux_dir/missing"
++# This will work as long as MISSING is not called from configure, because
++# unfortunately $(top_srcdir) has no meaning in configure.
++# However there are other variables, like CC, which are often used in
++# configure, and could therefore not use this "fixed" $ac_aux_dir.
++#
++# Another solution, used here, is to always expand $ac_aux_dir to an
++# absolute PATH.  The drawback is that using absolute paths prevent a
++# configured tree to be moved without reconfiguration.
++
++AC_DEFUN([AM_AUX_DIR_EXPAND],
++[AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
++# Expand $ac_aux_dir to an absolute path.
++am_aux_dir=`cd "$ac_aux_dir" && pwd`
++])
++
++# AM_CONDITIONAL                                            -*- Autoconf -*-
++
++# Copyright (C) 1997-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# AM_CONDITIONAL(NAME, SHELL-CONDITION)
++# -------------------------------------
++# Define a conditional.
++AC_DEFUN([AM_CONDITIONAL],
++[AC_PREREQ([2.52])dnl
++ m4_if([$1], [TRUE],  [AC_FATAL([$0: invalid condition: $1])],
++       [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl
++AC_SUBST([$1_TRUE])dnl
++AC_SUBST([$1_FALSE])dnl
++_AM_SUBST_NOTMAKE([$1_TRUE])dnl
++_AM_SUBST_NOTMAKE([$1_FALSE])dnl
++m4_define([_AM_COND_VALUE_$1], [$2])dnl
++if $2; then
++  $1_TRUE=
++  $1_FALSE='#'
++else
++  $1_TRUE='#'
++  $1_FALSE=
++fi
++AC_CONFIG_COMMANDS_PRE(
++[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then
++  AC_MSG_ERROR([[conditional "$1" was never defined.
++Usually this means the macro was only invoked conditionally.]])
++fi])])
++
++# Copyright (C) 1999-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++
++# There are a few dirty hacks below to avoid letting 'AC_PROG_CC' be
++# written in clear, in which case automake, when reading aclocal.m4,
++# will think it sees a *use*, and therefore will trigger all it's
++# C support machinery.  Also note that it means that autoscan, seeing
++# CC etc. in the Makefile, will ask for an AC_PROG_CC use...
++
++
++# _AM_DEPENDENCIES(NAME)
++# ----------------------
++# See how the compiler implements dependency checking.
++# NAME is "CC", "CXX", "OBJC", "OBJCXX", "UPC", or "GJC".
++# We try a few techniques and use that to set a single cache variable.
++#
++# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was
++# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular
++# dependency, and given that the user is not expected to run this macro,
++# just rely on AC_PROG_CC.
++AC_DEFUN([_AM_DEPENDENCIES],
++[AC_REQUIRE([AM_SET_DEPDIR])dnl
++AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl
++AC_REQUIRE([AM_MAKE_INCLUDE])dnl
++AC_REQUIRE([AM_DEP_TRACK])dnl
++
++m4_if([$1], [CC],   [depcc="$CC"   am_compiler_list=],
++      [$1], [CXX],  [depcc="$CXX"  am_compiler_list=],
++      [$1], [OBJC], [depcc="$OBJC" am_compiler_list='gcc3 gcc'],
++      [$1], [OBJCXX], [depcc="$OBJCXX" am_compiler_list='gcc3 gcc'],
++      [$1], [UPC],  [depcc="$UPC"  am_compiler_list=],
++      [$1], [GCJ],  [depcc="$GCJ"  am_compiler_list='gcc3 gcc'],
++                    [depcc="$$1"   am_compiler_list=])
++
++AC_CACHE_CHECK([dependency style of $depcc],
++               [am_cv_$1_dependencies_compiler_type],
++[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
++  # We make a subdir and do the tests there.  Otherwise we can end up
++  # making bogus files that we don't know about and never remove.  For
++  # instance it was reported that on HP-UX the gcc test will end up
++  # making a dummy file named 'D' -- because '-MD' means "put the output
++  # in D".
++  rm -rf conftest.dir
++  mkdir conftest.dir
++  # Copy depcomp to subdir because otherwise we won't find it if we're
++  # using a relative directory.
++  cp "$am_depcomp" conftest.dir
++  cd conftest.dir
++  # We will build objects and dependencies in a subdirectory because
++  # it helps to detect inapplicable dependency modes.  For instance
++  # both Tru64's cc and ICC support -MD to output dependencies as a
++  # side effect of compilation, but ICC will put the dependencies in
++  # the current directory while Tru64 will put them in the object
++  # directory.
++  mkdir sub
++
++  am_cv_$1_dependencies_compiler_type=none
++  if test "$am_compiler_list" = ""; then
++     am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp`
++  fi
++  am__universal=false
++  m4_case([$1], [CC],
++    [case " $depcc " in #(
++     *\ -arch\ *\ -arch\ *) am__universal=true ;;
++     esac],
++    [CXX],
++    [case " $depcc " in #(
++     *\ -arch\ *\ -arch\ *) am__universal=true ;;
++     esac])
++
++  for depmode in $am_compiler_list; do
++    # Setup a source with many dependencies, because some compilers
++    # like to wrap large dependency lists on column 80 (with \), and
++    # we should not choose a depcomp mode which is confused by this.
++    #
++    # We need to recreate these files for each test, as the compiler may
++    # overwrite some of them when testing with obscure command lines.
++    # This happens at least with the AIX C compiler.
++    : > sub/conftest.c
++    for i in 1 2 3 4 5 6; do
++      echo '#include "conftst'$i'.h"' >> sub/conftest.c
++      # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
++      # Solaris 10 /bin/sh.
++      echo '/* dummy */' > sub/conftst$i.h
++    done
++    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
++
++    # We check with '-c' and '-o' for the sake of the "dashmstdout"
++    # mode.  It turns out that the SunPro C++ compiler does not properly
++    # handle '-M -o', and we need to detect this.  Also, some Intel
++    # versions had trouble with output in subdirs.
++    am__obj=sub/conftest.${OBJEXT-o}
++    am__minus_obj="-o $am__obj"
++    case $depmode in
++    gcc)
++      # This depmode causes a compiler race in universal mode.
++      test "$am__universal" = false || continue
++      ;;
++    nosideeffect)
++      # After this tag, mechanisms are not by side-effect, so they'll
++      # only be used when explicitly requested.
++      if test "x$enable_dependency_tracking" = xyes; then
++	continue
++      else
++	break
++      fi
++      ;;
++    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
++      # This compiler won't grok '-c -o', but also, the minuso test has
++      # not run yet.  These depmodes are late enough in the game, and
++      # so weak that their functioning should not be impacted.
++      am__obj=conftest.${OBJEXT-o}
++      am__minus_obj=
++      ;;
++    none) break ;;
++    esac
++    if depmode=$depmode \
++       source=sub/conftest.c object=$am__obj \
++       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
++       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
++         >/dev/null 2>conftest.err &&
++       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
++       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
++       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
++       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
++      # icc doesn't choke on unknown options, it will just issue warnings
++      # or remarks (even with -Werror).  So we grep stderr for any message
++      # that says an option was ignored or not supported.
++      # When given -MP, icc 7.0 and 7.1 complain thusly:
++      #   icc: Command line warning: ignoring option '-M'; no argument required
++      # The diagnosis changed in icc 8.0:
++      #   icc: Command line remark: option '-MP' not supported
++      if (grep 'ignoring option' conftest.err ||
++          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
++        am_cv_$1_dependencies_compiler_type=$depmode
++        break
++      fi
++    fi
++  done
++
++  cd ..
++  rm -rf conftest.dir
++else
++  am_cv_$1_dependencies_compiler_type=none
++fi
++])
++AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type])
++AM_CONDITIONAL([am__fastdep$1], [
++  test "x$enable_dependency_tracking" != xno \
++  && test "$am_cv_$1_dependencies_compiler_type" = gcc3])
++])
++
++
++# AM_SET_DEPDIR
++# -------------
++# Choose a directory name for dependency files.
++# This macro is AC_REQUIREd in _AM_DEPENDENCIES.
++AC_DEFUN([AM_SET_DEPDIR],
++[AC_REQUIRE([AM_SET_LEADING_DOT])dnl
++AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl
++])
++
++
++# AM_DEP_TRACK
++# ------------
++AC_DEFUN([AM_DEP_TRACK],
++[AC_ARG_ENABLE([dependency-tracking], [dnl
++AS_HELP_STRING(
++  [--enable-dependency-tracking],
++  [do not reject slow dependency extractors])
++AS_HELP_STRING(
++  [--disable-dependency-tracking],
++  [speeds up one-time build])])
++if test "x$enable_dependency_tracking" != xno; then
++  am_depcomp="$ac_aux_dir/depcomp"
++  AMDEPBACKSLASH='\'
++  am__nodep='_no'
++fi
++AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno])
++AC_SUBST([AMDEPBACKSLASH])dnl
++_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl
++AC_SUBST([am__nodep])dnl
++_AM_SUBST_NOTMAKE([am__nodep])dnl
++])
++
++# Generate code to set up dependency tracking.              -*- Autoconf -*-
++
++# Copyright (C) 1999-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# _AM_OUTPUT_DEPENDENCY_COMMANDS
++# ------------------------------
++AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS],
++[{
++  # Older Autoconf quotes --file arguments for eval, but not when files
++  # are listed without --file.  Let's play safe and only enable the eval
++  # if we detect the quoting.
++  # TODO: see whether this extra hack can be removed once we start
++  # requiring Autoconf 2.70 or later.
++  AS_CASE([$CONFIG_FILES],
++          [*\'*], [eval set x "$CONFIG_FILES"],
++          [*], [set x $CONFIG_FILES])
++  shift
++  # Used to flag and report bootstrapping failures.
++  am_rc=0
++  for am_mf
++  do
++    # Strip MF so we end up with the name of the file.
++    am_mf=`AS_ECHO(["$am_mf"]) | sed -e 's/:.*$//'`
++    # Check whether this is an Automake generated Makefile which includes
++    # dependency-tracking related rules and includes.
++    # Grep'ing the whole file directly is not great: AIX grep has a line
++    # limit of 2048, but all sed's we know have understand at least 4000.
++    sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \
++      || continue
++    am_dirpart=`AS_DIRNAME(["$am_mf"])`
++    am_filepart=`AS_BASENAME(["$am_mf"])`
++    AM_RUN_LOG([cd "$am_dirpart" \
++      && sed -e '/# am--include-marker/d' "$am_filepart" \
++        | $MAKE -f - am--depfiles]) || am_rc=$?
++  done
++  if test $am_rc -ne 0; then
++    AC_MSG_FAILURE([Something went wrong bootstrapping makefile fragments
++    for automatic dependency tracking.  If GNU make was not used, consider
++    re-running the configure script with MAKE="gmake" (or whatever is
++    necessary).  You can also try re-running configure with the
++    '--disable-dependency-tracking' option to at least be able to build
++    the package (albeit without support for automatic dependency tracking).])
++  fi
++  AS_UNSET([am_dirpart])
++  AS_UNSET([am_filepart])
++  AS_UNSET([am_mf])
++  AS_UNSET([am_rc])
++  rm -f conftest-deps.mk
++}
++])# _AM_OUTPUT_DEPENDENCY_COMMANDS
++
++
++# AM_OUTPUT_DEPENDENCY_COMMANDS
++# -----------------------------
++# This macro should only be invoked once -- use via AC_REQUIRE.
++#
++# This code is only required when automatic dependency tracking is enabled.
++# This creates each '.Po' and '.Plo' makefile fragment that we'll need in
++# order to bootstrap the dependency handling code.
++AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
++[AC_CONFIG_COMMANDS([depfiles],
++     [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS],
++     [AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"])])
++
++# Do all the work for Automake.                             -*- Autoconf -*-
++
++# Copyright (C) 1996-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# This macro actually does too much.  Some checks are only needed if
++# your package does certain things.  But this isn't really a big deal.
++
++dnl Redefine AC_PROG_CC to automatically invoke _AM_PROG_CC_C_O.
++m4_define([AC_PROG_CC],
++m4_defn([AC_PROG_CC])
++[_AM_PROG_CC_C_O
++])
++
++# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE])
++# AM_INIT_AUTOMAKE([OPTIONS])
++# -----------------------------------------------
++# The call with PACKAGE and VERSION arguments is the old style
++# call (pre autoconf-2.50), which is being phased out.  PACKAGE
++# and VERSION should now be passed to AC_INIT and removed from
++# the call to AM_INIT_AUTOMAKE.
++# We support both call styles for the transition.  After
++# the next Automake release, Autoconf can make the AC_INIT
++# arguments mandatory, and then we can depend on a new Autoconf
++# release and drop the old call support.
++AC_DEFUN([AM_INIT_AUTOMAKE],
++[AC_PREREQ([2.65])dnl
++m4_ifdef([_$0_ALREADY_INIT],
++  [m4_fatal([$0 expanded multiple times
++]m4_defn([_$0_ALREADY_INIT]))],
++  [m4_define([_$0_ALREADY_INIT], m4_expansion_stack)])dnl
++dnl Autoconf wants to disallow AM_ names.  We explicitly allow
++dnl the ones we care about.
++m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
++AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl
++AC_REQUIRE([AC_PROG_INSTALL])dnl
++if test "`cd $srcdir && pwd`" != "`pwd`"; then
++  # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
++  # is not polluted with repeated "-I."
++  AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl
++  # test to see if srcdir already configured
++  if test -f $srcdir/config.status; then
++    AC_MSG_ERROR([source directory already configured; run "make distclean" there first])
++  fi
++fi
++
++# test whether we have cygpath
++if test -z "$CYGPATH_W"; then
++  if (cygpath --version) >/dev/null 2>/dev/null; then
++    CYGPATH_W='cygpath -w'
++  else
++    CYGPATH_W=echo
++  fi
++fi
++AC_SUBST([CYGPATH_W])
++
++# Define the identity of the package.
++dnl Distinguish between old-style and new-style calls.
++m4_ifval([$2],
++[AC_DIAGNOSE([obsolete],
++             [$0: two- and three-arguments forms are deprecated.])
++m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
++ AC_SUBST([PACKAGE], [$1])dnl
++ AC_SUBST([VERSION], [$2])],
++[_AM_SET_OPTIONS([$1])dnl
++dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
++m4_if(
++  m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]),
++  [ok:ok],,
++  [m4_fatal([AC_INIT should be called with package and version arguments])])dnl
++ AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
++ AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl
++
++_AM_IF_OPTION([no-define],,
++[AC_DEFINE_UNQUOTED([PACKAGE], ["$PACKAGE"], [Name of package])
++ AC_DEFINE_UNQUOTED([VERSION], ["$VERSION"], [Version number of package])])dnl
++
++# Some tools Automake needs.
++AC_REQUIRE([AM_SANITY_CHECK])dnl
++AC_REQUIRE([AC_ARG_PROGRAM])dnl
++AM_MISSING_PROG([ACLOCAL], [aclocal-${am__api_version}])
++AM_MISSING_PROG([AUTOCONF], [autoconf])
++AM_MISSING_PROG([AUTOMAKE], [automake-${am__api_version}])
++AM_MISSING_PROG([AUTOHEADER], [autoheader])
++AM_MISSING_PROG([MAKEINFO], [makeinfo])
++AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
++AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl
++AC_REQUIRE([AC_PROG_MKDIR_P])dnl
++# For better backward compatibility.  To be removed once Automake 1.9.x
++# dies out for good.  For more background, see:
++# 
++# 
++AC_SUBST([mkdir_p], ['$(MKDIR_P)'])
++# We need awk for the "check" target (and possibly the TAP driver).  The
++# system "awk" is bad on some platforms.
++AC_REQUIRE([AC_PROG_AWK])dnl
++AC_REQUIRE([AC_PROG_MAKE_SET])dnl
++AC_REQUIRE([AM_SET_LEADING_DOT])dnl
++_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])],
++	      [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])],
++			     [_AM_PROG_TAR([v7])])])
++_AM_IF_OPTION([no-dependencies],,
++[AC_PROVIDE_IFELSE([AC_PROG_CC],
++		  [_AM_DEPENDENCIES([CC])],
++		  [m4_define([AC_PROG_CC],
++			     m4_defn([AC_PROG_CC])[_AM_DEPENDENCIES([CC])])])dnl
++AC_PROVIDE_IFELSE([AC_PROG_CXX],
++		  [_AM_DEPENDENCIES([CXX])],
++		  [m4_define([AC_PROG_CXX],
++			     m4_defn([AC_PROG_CXX])[_AM_DEPENDENCIES([CXX])])])dnl
++AC_PROVIDE_IFELSE([AC_PROG_OBJC],
++		  [_AM_DEPENDENCIES([OBJC])],
++		  [m4_define([AC_PROG_OBJC],
++			     m4_defn([AC_PROG_OBJC])[_AM_DEPENDENCIES([OBJC])])])dnl
++AC_PROVIDE_IFELSE([AC_PROG_OBJCXX],
++		  [_AM_DEPENDENCIES([OBJCXX])],
++		  [m4_define([AC_PROG_OBJCXX],
++			     m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl
++])
++# Variables for tags utilities; see am/tags.am
++if test -z "$CTAGS"; then
++  CTAGS=ctags
++fi
++AC_SUBST([CTAGS])
++if test -z "$ETAGS"; then
++  ETAGS=etags
++fi
++AC_SUBST([ETAGS])
++if test -z "$CSCOPE"; then
++  CSCOPE=cscope
++fi
++AC_SUBST([CSCOPE])
++
++AC_REQUIRE([AM_SILENT_RULES])dnl
++dnl The testsuite driver may need to know about EXEEXT, so add the
++dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen.  This
++dnl macro is hooked onto _AC_COMPILER_EXEEXT early, see below.
++AC_CONFIG_COMMANDS_PRE(dnl
++[m4_provide_if([_AM_COMPILER_EXEEXT],
++  [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl
++
++# POSIX will say in a future version that running "rm -f" with no argument
++# is OK; and we want to be able to make that assumption in our Makefile
++# recipes.  So use an aggressive probe to check that the usage we want is
++# actually supported "in the wild" to an acceptable degree.
++# See automake bug#10828.
++# To make any issue more visible, cause the running configure to be aborted
++# by default if the 'rm' program in use doesn't match our expectations; the
++# user can still override this though.
++if rm -f && rm -fr && rm -rf; then : OK; else
++  cat >&2 <<'END'
++Oops!
++
++Your 'rm' program seems unable to run without file operands specified
++on the command line, even when the '-f' option is present.  This is contrary
++to the behaviour of most rm programs out there, and not conforming with
++the upcoming POSIX standard: 
++
++Please tell bug-automake@gnu.org about your system, including the value
++of your $PATH and any error possibly output before this message.  This
++can help us improve future automake versions.
++
++END
++  if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
++    echo 'Configuration will proceed anyway, since you have set the' >&2
++    echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
++    echo >&2
++  else
++    cat >&2 <<'END'
++Aborting the configuration process, to ensure you take notice of the issue.
++
++You can download and install GNU coreutils to get an 'rm' implementation
++that behaves properly: .
++
++If you want to complete the configuration process using your problematic
++'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
++to "yes", and re-run configure.
++
++END
++    AC_MSG_ERROR([Your 'rm' program is bad, sorry.])
++  fi
++fi
++dnl The trailing newline in this macro's definition is deliberate, for
++dnl backward compatibility and to allow trailing 'dnl'-style comments
++dnl after the AM_INIT_AUTOMAKE invocation. See automake bug#16841.
++])
++
++dnl Hook into '_AC_COMPILER_EXEEXT' early to learn its expansion.  Do not
++dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further
++dnl mangled by Autoconf and run in a shell conditional statement.
++m4_define([_AC_COMPILER_EXEEXT],
++m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])])
++
++# When config.status generates a header, we must update the stamp-h file.
++# This file resides in the same directory as the config header
++# that is generated.  The stamp files are numbered to have different names.
++
++# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the
++# loop where config.status creates the headers, so we can generate
++# our stamp files there.
++AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK],
++[# Compute $1's index in $config_headers.
++_am_arg=$1
++_am_stamp_count=1
++for _am_header in $config_headers :; do
++  case $_am_header in
++    $_am_arg | $_am_arg:* )
++      break ;;
++    * )
++      _am_stamp_count=`expr $_am_stamp_count + 1` ;;
++  esac
++done
++echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
++
++# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# AM_PROG_INSTALL_SH
++# ------------------
++# Define $install_sh.
++AC_DEFUN([AM_PROG_INSTALL_SH],
++[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
++if test x"${install_sh+set}" != xset; then
++  case $am_aux_dir in
++  *\ * | *\	*)
++    install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
++  *)
++    install_sh="\${SHELL} $am_aux_dir/install-sh"
++  esac
++fi
++AC_SUBST([install_sh])])
++
++# Copyright (C) 2003-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# Check whether the underlying file-system supports filenames
++# with a leading dot.  For instance MS-DOS doesn't.
++AC_DEFUN([AM_SET_LEADING_DOT],
++[rm -rf .tst 2>/dev/null
++mkdir .tst 2>/dev/null
++if test -d .tst; then
++  am__leading_dot=.
++else
++  am__leading_dot=_
++fi
++rmdir .tst 2>/dev/null
++AC_SUBST([am__leading_dot])])
++
++# Add --enable-maintainer-mode option to configure.         -*- Autoconf -*-
++# From Jim Meyering
++
++# Copyright (C) 1996-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# AM_MAINTAINER_MODE([DEFAULT-MODE])
++# ----------------------------------
++# Control maintainer-specific portions of Makefiles.
++# Default is to disable them, unless 'enable' is passed literally.
++# For symmetry, 'disable' may be passed as well.  Anyway, the user
++# can override the default with the --enable/--disable switch.
++AC_DEFUN([AM_MAINTAINER_MODE],
++[m4_case(m4_default([$1], [disable]),
++       [enable], [m4_define([am_maintainer_other], [disable])],
++       [disable], [m4_define([am_maintainer_other], [enable])],
++       [m4_define([am_maintainer_other], [enable])
++        m4_warn([syntax], [unexpected argument to AM@&t@_MAINTAINER_MODE: $1])])
++AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
++  dnl maintainer-mode's default is 'disable' unless 'enable' is passed
++  AC_ARG_ENABLE([maintainer-mode],
++    [AS_HELP_STRING([--]am_maintainer_other[-maintainer-mode],
++      am_maintainer_other[ make rules and dependencies not useful
++      (and sometimes confusing) to the casual installer])],
++    [USE_MAINTAINER_MODE=$enableval],
++    [USE_MAINTAINER_MODE=]m4_if(am_maintainer_other, [enable], [no], [yes]))
++  AC_MSG_RESULT([$USE_MAINTAINER_MODE])
++  AM_CONDITIONAL([MAINTAINER_MODE], [test $USE_MAINTAINER_MODE = yes])
++  MAINT=$MAINTAINER_MODE_TRUE
++  AC_SUBST([MAINT])dnl
++]
++)
++
++# Check to see how 'make' treats includes.	            -*- Autoconf -*-
++
++# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# AM_MAKE_INCLUDE()
++# -----------------
++# Check whether make has an 'include' directive that can support all
++# the idioms we need for our automatic dependency tracking code.
++AC_DEFUN([AM_MAKE_INCLUDE],
++[AC_MSG_CHECKING([whether ${MAKE-make} supports the include directive])
++cat > confinc.mk << 'END'
++am__doit:
++	@echo this is the am__doit target >confinc.out
++.PHONY: am__doit
++END
++am__include="#"
++am__quote=
++# BSD make does it like this.
++echo '.include "confinc.mk" # ignored' > confmf.BSD
++# Other make implementations (GNU, Solaris 10, AIX) do it like this.
++echo 'include confinc.mk # ignored' > confmf.GNU
++_am_result=no
++for s in GNU BSD; do
++  AM_RUN_LOG([${MAKE-make} -f confmf.$s && cat confinc.out])
++  AS_CASE([$?:`cat confinc.out 2>/dev/null`],
++      ['0:this is the am__doit target'],
++      [AS_CASE([$s],
++          [BSD], [am__include='.include' am__quote='"'],
++          [am__include='include' am__quote=''])])
++  if test "$am__include" != "#"; then
++    _am_result="yes ($s style)"
++    break
++  fi
++done
++rm -f confinc.* confmf.*
++AC_MSG_RESULT([${_am_result}])
++AC_SUBST([am__include])])
++AC_SUBST([am__quote])])
++
++# Fake the existence of programs that GNU maintainers use.  -*- Autoconf -*-
++
++# Copyright (C) 1997-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# AM_MISSING_PROG(NAME, PROGRAM)
++# ------------------------------
++AC_DEFUN([AM_MISSING_PROG],
++[AC_REQUIRE([AM_MISSING_HAS_RUN])
++$1=${$1-"${am_missing_run}$2"}
++AC_SUBST($1)])
++
++# AM_MISSING_HAS_RUN
++# ------------------
++# Define MISSING if not defined so far and test if it is modern enough.
++# If it is, set am_missing_run to use it, otherwise, to nothing.
++AC_DEFUN([AM_MISSING_HAS_RUN],
++[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
++AC_REQUIRE_AUX_FILE([missing])dnl
++if test x"${MISSING+set}" != xset; then
++  MISSING="\${SHELL} '$am_aux_dir/missing'"
++fi
++# Use eval to expand $SHELL
++if eval "$MISSING --is-lightweight"; then
++  am_missing_run="$MISSING "
++else
++  am_missing_run=
++  AC_MSG_WARN(['missing' script is too old or missing])
++fi
++])
++
++# Helper functions for option handling.                     -*- Autoconf -*-
++
++# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# _AM_MANGLE_OPTION(NAME)
++# -----------------------
++AC_DEFUN([_AM_MANGLE_OPTION],
++[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])])
++
++# _AM_SET_OPTION(NAME)
++# --------------------
++# Set option NAME.  Presently that only means defining a flag for this option.
++AC_DEFUN([_AM_SET_OPTION],
++[m4_define(_AM_MANGLE_OPTION([$1]), [1])])
++
++# _AM_SET_OPTIONS(OPTIONS)
++# ------------------------
++# OPTIONS is a space-separated list of Automake options.
++AC_DEFUN([_AM_SET_OPTIONS],
++[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])])
++
++# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET])
++# -------------------------------------------
++# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
++AC_DEFUN([_AM_IF_OPTION],
++[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
++
++# Copyright (C) 1999-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# _AM_PROG_CC_C_O
++# ---------------
++# Like AC_PROG_CC_C_O, but changed for automake.  We rewrite AC_PROG_CC
++# to automatically call this.
++AC_DEFUN([_AM_PROG_CC_C_O],
++[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
++AC_REQUIRE_AUX_FILE([compile])dnl
++AC_LANG_PUSH([C])dnl
++AC_CACHE_CHECK(
++  [whether $CC understands -c and -o together],
++  [am_cv_prog_cc_c_o],
++  [AC_LANG_CONFTEST([AC_LANG_PROGRAM([])])
++  # Make sure it works both with $CC and with simple cc.
++  # Following AC_PROG_CC_C_O, we do the test twice because some
++  # compilers refuse to overwrite an existing .o file with -o,
++  # though they will create one.
++  am_cv_prog_cc_c_o=yes
++  for am_i in 1 2; do
++    if AM_RUN_LOG([$CC -c conftest.$ac_ext -o conftest2.$ac_objext]) \
++         && test -f conftest2.$ac_objext; then
++      : OK
++    else
++      am_cv_prog_cc_c_o=no
++      break
++    fi
++  done
++  rm -f core conftest*
++  unset am_i])
++if test "$am_cv_prog_cc_c_o" != yes; then
++   # Losing compiler, so override with the script.
++   # FIXME: It is wrong to rewrite CC.
++   # But if we don't then we get into trouble of one sort or another.
++   # A longer-term fix would be to have automake use am__CC in this case,
++   # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
++   CC="$am_aux_dir/compile $CC"
++fi
++AC_LANG_POP([C])])
++
++# For backward compatibility.
++AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])])
++
++# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# AM_RUN_LOG(COMMAND)
++# -------------------
++# Run COMMAND, save the exit status in ac_status, and log it.
++# (This has been adapted from Autoconf's _AC_RUN_LOG macro.)
++AC_DEFUN([AM_RUN_LOG],
++[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD
++   ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD
++   ac_status=$?
++   echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
++   (exit $ac_status); }])
++
++# Check to make sure that the build environment is sane.    -*- Autoconf -*-
++
++# Copyright (C) 1996-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# AM_SANITY_CHECK
++# ---------------
++AC_DEFUN([AM_SANITY_CHECK],
++[AC_MSG_CHECKING([whether build environment is sane])
++# Reject unsafe characters in $srcdir or the absolute working directory
++# name.  Accept space and tab only in the latter.
++am_lf='
++'
++case `pwd` in
++  *[[\\\"\#\$\&\'\`$am_lf]]*)
++    AC_MSG_ERROR([unsafe absolute working directory name]);;
++esac
++case $srcdir in
++  *[[\\\"\#\$\&\'\`$am_lf\ \	]]*)
++    AC_MSG_ERROR([unsafe srcdir value: '$srcdir']);;
++esac
++
++# Do 'set' in a subshell so we don't clobber the current shell's
++# arguments.  Must try -L first in case configure is actually a
++# symlink; some systems play weird games with the mod time of symlinks
++# (eg FreeBSD returns the mod time of the symlink's containing
++# directory).
++if (
++   am_has_slept=no
++   for am_try in 1 2; do
++     echo "timestamp, slept: $am_has_slept" > conftest.file
++     set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
++     if test "$[*]" = "X"; then
++	# -L didn't work.
++	set X `ls -t "$srcdir/configure" conftest.file`
++     fi
++     if test "$[*]" != "X $srcdir/configure conftest.file" \
++	&& test "$[*]" != "X conftest.file $srcdir/configure"; then
++
++	# If neither matched, then we have a broken ls.  This can happen
++	# if, for instance, CONFIG_SHELL is bash and it inherits a
++	# broken ls alias from the environment.  This has actually
++	# happened.  Such a system could not be considered "sane".
++	AC_MSG_ERROR([ls -t appears to fail.  Make sure there is not a broken
++  alias in your environment])
++     fi
++     if test "$[2]" = conftest.file || test $am_try -eq 2; then
++       break
++     fi
++     # Just in case.
++     sleep 1
++     am_has_slept=yes
++   done
++   test "$[2]" = conftest.file
++   )
++then
++   # Ok.
++   :
++else
++   AC_MSG_ERROR([newly created file is older than distributed files!
++Check your system clock])
++fi
++AC_MSG_RESULT([yes])
++# If we didn't sleep, we still need to ensure time stamps of config.status and
++# generated files are strictly newer.
++am_sleep_pid=
++if grep 'slept: no' conftest.file >/dev/null 2>&1; then
++  ( sleep 1 ) &
++  am_sleep_pid=$!
++fi
++AC_CONFIG_COMMANDS_PRE(
++  [AC_MSG_CHECKING([that generated files are newer than configure])
++   if test -n "$am_sleep_pid"; then
++     # Hide warnings about reused PIDs.
++     wait $am_sleep_pid 2>/dev/null
++   fi
++   AC_MSG_RESULT([done])])
++rm -f conftest.file
++])
++
++# Copyright (C) 2009-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# AM_SILENT_RULES([DEFAULT])
++# --------------------------
++# Enable less verbose build rules; with the default set to DEFAULT
++# ("yes" being less verbose, "no" or empty being verbose).
++AC_DEFUN([AM_SILENT_RULES],
++[AC_ARG_ENABLE([silent-rules], [dnl
++AS_HELP_STRING(
++  [--enable-silent-rules],
++  [less verbose build output (undo: "make V=1")])
++AS_HELP_STRING(
++  [--disable-silent-rules],
++  [verbose build output (undo: "make V=0")])dnl
++])
++case $enable_silent_rules in @%:@ (((
++  yes) AM_DEFAULT_VERBOSITY=0;;
++   no) AM_DEFAULT_VERBOSITY=1;;
++    *) AM_DEFAULT_VERBOSITY=m4_if([$1], [yes], [0], [1]);;
++esac
++dnl
++dnl A few 'make' implementations (e.g., NonStop OS and NextStep)
++dnl do not support nested variable expansions.
++dnl See automake bug#9928 and bug#10237.
++am_make=${MAKE-make}
++AC_CACHE_CHECK([whether $am_make supports nested variables],
++   [am_cv_make_support_nested_variables],
++   [if AS_ECHO([['TRUE=$(BAR$(V))
++BAR0=false
++BAR1=true
++V=1
++am__doit:
++	@$(TRUE)
++.PHONY: am__doit']]) | $am_make -f - >/dev/null 2>&1; then
++  am_cv_make_support_nested_variables=yes
++else
++  am_cv_make_support_nested_variables=no
++fi])
++if test $am_cv_make_support_nested_variables = yes; then
++  dnl Using '$V' instead of '$(V)' breaks IRIX make.
++  AM_V='$(V)'
++  AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
++else
++  AM_V=$AM_DEFAULT_VERBOSITY
++  AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
++fi
++AC_SUBST([AM_V])dnl
++AM_SUBST_NOTMAKE([AM_V])dnl
++AC_SUBST([AM_DEFAULT_V])dnl
++AM_SUBST_NOTMAKE([AM_DEFAULT_V])dnl
++AC_SUBST([AM_DEFAULT_VERBOSITY])dnl
++AM_BACKSLASH='\'
++AC_SUBST([AM_BACKSLASH])dnl
++_AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl
++])
++
++# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# AM_PROG_INSTALL_STRIP
++# ---------------------
++# One issue with vendor 'install' (even GNU) is that you can't
++# specify the program used to strip binaries.  This is especially
++# annoying in cross-compiling environments, where the build's strip
++# is unlikely to handle the host's binaries.
++# Fortunately install-sh will honor a STRIPPROG variable, so we
++# always use install-sh in "make install-strip", and initialize
++# STRIPPROG with the value of the STRIP variable (set by the user).
++AC_DEFUN([AM_PROG_INSTALL_STRIP],
++[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl
++# Installed binaries are usually stripped using 'strip' when the user
++# run "make install-strip".  However 'strip' might not be the right
++# tool to use in cross-compilation environments, therefore Automake
++# will honor the 'STRIP' environment variable to overrule this program.
++dnl Don't test for $cross_compiling = yes, because it might be 'maybe'.
++if test "$cross_compiling" != no; then
++  AC_CHECK_TOOL([STRIP], [strip], :)
++fi
++INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
++AC_SUBST([INSTALL_STRIP_PROGRAM])])
++
++# Copyright (C) 2006-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# _AM_SUBST_NOTMAKE(VARIABLE)
++# ---------------------------
++# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in.
++# This macro is traced by Automake.
++AC_DEFUN([_AM_SUBST_NOTMAKE])
++
++# AM_SUBST_NOTMAKE(VARIABLE)
++# --------------------------
++# Public sister of _AM_SUBST_NOTMAKE.
++AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
++
++# Check how to create a tarball.                            -*- Autoconf -*-
++
++# Copyright (C) 2004-2021 Free Software Foundation, Inc.
++#
++# This file is free software; the Free Software Foundation
++# gives unlimited permission to copy and/or distribute it,
++# with or without modifications, as long as this notice is preserved.
++
++# _AM_PROG_TAR(FORMAT)
++# --------------------
++# Check how to create a tarball in format FORMAT.
++# FORMAT should be one of 'v7', 'ustar', or 'pax'.
++#
++# Substitute a variable $(am__tar) that is a command
++# writing to stdout a FORMAT-tarball containing the directory
++# $tardir.
++#     tardir=directory && $(am__tar) > result.tar
++#
++# Substitute a variable $(am__untar) that extract such
++# a tarball read from stdin.
++#     $(am__untar) < result.tar
++#
++AC_DEFUN([_AM_PROG_TAR],
++[# Always define AMTAR for backward compatibility.  Yes, it's still used
++# in the wild :-(  We should find a proper way to deprecate it ...
++AC_SUBST([AMTAR], ['$${TAR-tar}'])
++
++# We'll loop over all known methods to create a tar archive until one works.
++_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none'
++
++m4_if([$1], [v7],
++  [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'],
++
++  [m4_case([$1],
++    [ustar],
++     [# The POSIX 1988 'ustar' format is defined with fixed-size fields.
++      # There is notably a 21 bits limit for the UID and the GID.  In fact,
++      # the 'pax' utility can hang on bigger UID/GID (see automake bug#8343
++      # and bug#13588).
++      am_max_uid=2097151 # 2^21 - 1
++      am_max_gid=$am_max_uid
++      # The $UID and $GID variables are not portable, so we need to resort
++      # to the POSIX-mandated id(1) utility.  Errors in the 'id' calls
++      # below are definitely unexpected, so allow the users to see them
++      # (that is, avoid stderr redirection).
++      am_uid=`id -u || echo unknown`
++      am_gid=`id -g || echo unknown`
++      AC_MSG_CHECKING([whether UID '$am_uid' is supported by ustar format])
++      if test $am_uid -le $am_max_uid; then
++         AC_MSG_RESULT([yes])
++      else
++         AC_MSG_RESULT([no])
++         _am_tools=none
++      fi
++      AC_MSG_CHECKING([whether GID '$am_gid' is supported by ustar format])
++      if test $am_gid -le $am_max_gid; then
++         AC_MSG_RESULT([yes])
++      else
++        AC_MSG_RESULT([no])
++        _am_tools=none
++      fi],
++
++  [pax],
++    [],
++
++  [m4_fatal([Unknown tar format])])
++
++  AC_MSG_CHECKING([how to create a $1 tar archive])
++
++  # Go ahead even if we have the value already cached.  We do so because we
++  # need to set the values for the 'am__tar' and 'am__untar' variables.
++  _am_tools=${am_cv_prog_tar_$1-$_am_tools}
++
++  for _am_tool in $_am_tools; do
++    case $_am_tool in
++    gnutar)
++      for _am_tar in tar gnutar gtar; do
++        AM_RUN_LOG([$_am_tar --version]) && break
++      done
++      am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"'
++      am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"'
++      am__untar="$_am_tar -xf -"
++      ;;
++    plaintar)
++      # Must skip GNU tar: if it does not support --format= it doesn't create
++      # ustar tarball either.
++      (tar --version) >/dev/null 2>&1 && continue
++      am__tar='tar chf - "$$tardir"'
++      am__tar_='tar chf - "$tardir"'
++      am__untar='tar xf -'
++      ;;
++    pax)
++      am__tar='pax -L -x $1 -w "$$tardir"'
++      am__tar_='pax -L -x $1 -w "$tardir"'
++      am__untar='pax -r'
++      ;;
++    cpio)
++      am__tar='find "$$tardir" -print | cpio -o -H $1 -L'
++      am__tar_='find "$tardir" -print | cpio -o -H $1 -L'
++      am__untar='cpio -i -H $1 -d'
++      ;;
++    none)
++      am__tar=false
++      am__tar_=false
++      am__untar=false
++      ;;
++    esac
++
++    # If the value was cached, stop now.  We just wanted to have am__tar
++    # and am__untar set.
++    test -n "${am_cv_prog_tar_$1}" && break
++
++    # tar/untar a dummy directory, and stop if the command works.
++    rm -rf conftest.dir
++    mkdir conftest.dir
++    echo GrepMe > conftest.dir/file
++    AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar])
++    rm -rf conftest.dir
++    if test -s conftest.tar; then
++      AM_RUN_LOG([$am__untar /dev/null 2>&1 && break
++    fi
++  done
++  rm -rf conftest.dir
++
++  AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool])
++  AC_MSG_RESULT([$am_cv_prog_tar_$1])])
++
++AC_SUBST([am__tar])
++AC_SUBST([am__untar])
++]) # _AM_PROG_TAR
++
+diff --git a/bolt-plugin/bolt-plugin.cc b/bolt-plugin/bolt-plugin.cc
+new file mode 100644
+index 000000000..f65011fd1
+--- /dev/null
++++ b/bolt-plugin/bolt-plugin.cc
+@@ -0,0 +1,1153 @@
++/* bolt plugin for gold and/or GNU ld.
++   Copyright (C) 2022-2023 Free Software Foundation, Inc.
++   Contributed by Majin and Liyancheng.
++
++This program is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++This program is distributed in the hope that it will be useful, but
++WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with this program; see the file COPYING3.  If not see
++.  */
++
++/* The plugin has only one external function: onload.  Gold passes it an
++   array of function that the plugin uses to communicate back to gold.
++
++   With the functions provided by gold, the plugin can be notified when
++   gold first analyzes a file and passes a symbol table back to gold.  The
++   plugin is also notified when all symbols have been read and it is time
++   to generate machine code for the necessary symbols.
++
++   More information at http://gcc.gnu.org/wiki/whopr/driver.  */
++
++/* Firstly, this plugin read profile info from .text.fdo.func_name section from
++   each claim file and parse it into BOLT profile.
++
++   The section read from the claim file will follow the following example.
++   .section .text.fdo.sort_array	// Section name
++   .string ".fdo.caller sort_array"	// Function name
++   .string ".fdo.caller.size 492"	// Function size
++   .string ".fdo.caller.bind GLOBAL"	// Bind type
++   .string "58"				// branch source address
++   .string "0"				// branch destination address
++   .string "336"			// count
++
++   The above is the case where the profile data comes from PGO.
++   If the data comes from AutoFDO, branch source address will be
++   BB address and branch destination address will be disabled. e.g.
++   .string "58"				// BB address
++   .string "336"			// count
++
++   The BOLT profile file format follows the syntax below which defined in
++   llvm-bolt.
++
++   Branch info mode when profile collect from PGO:
++     
++     
++    
++
++   Examples:
++
++   1 main 58 1 main 78 0 100
++
++   BB info mode when profile collect from AutoFDO:
++      
++
++   Examples:
++
++   1 main 58 100
++
++   Secondly, it also receive BOLT profile generated by perf2bolt.
++
++   Finally, this plugin calls llvm-bolt to do optimizations after linkage.
++
++*/
++
++#ifdef HAVE_CONFIG_H
++#include "config.h"
++#endif
++#if HAVE_STDINT_H
++#include 
++#endif
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#ifdef HAVE_SYS_WAIT_H
++#include 
++#endif
++#include 
++#include 
++#include "simple-object.h"
++#include "plugin-api.h"
++
++namespace LIBIBERTY
++{
++#include 
++}
++using LIBIBERTY::xmalloc;
++using LIBIBERTY::lbasename;
++using LIBIBERTY::xstrdup;
++using LIBIBERTY::concat;
++using LIBIBERTY::lrealpath;
++
++#include 
++#include 
++#include 
++#include 
++
++using std::vector;
++using std::string;
++using std::map;
++using std::set;
++
++static ld_plugin_register_claim_file register_claim_file = NULL;
++static ld_plugin_register_all_symbols_read register_all_symbols_read = NULL;
++static ld_plugin_register_cleanup register_cleanup = NULL;
++static ld_plugin_message message = NULL;
++
++static enum ld_plugin_output_file_type linker_output;
++
++extern "C"
++{
++  enum ld_plugin_status onload (struct ld_plugin_tv *tv);
++}
++
++/* C99 bool type cannot coerce parm 'gate' range, so use int here.  */
++
++static void
++check_gate (int gate, enum ld_plugin_level level, const char *text)
++{
++  if (gate)
++    {
++      return;
++    }
++
++  if (message)
++    {
++      message (level, text);
++    }
++  else
++    {
++      /* Print msg to stderr if there is no nicer way to inform the user.  */
++      fprintf (stderr, "%s\n", text);
++      if (level == LDPL_FATAL)
++	{
++	  abort ();
++	}
++    }
++}
++
++/* This wrapper allows macro CHECK to be called with a non-integer argument
++   GATE.  For pointer cases, GATE should be no-Null.  */
++
++#define CHECK(GATE, LEVEL, TEXT) check_gate (((GATE) != 0), (LEVEL), (TEXT))
++
++#define __MSG_INFO__
++#define __MSG_WARN__
++#define __MSG_ERROR__
++
++#ifdef __MSG_INFO__
++#define MSG_INFO(...)						\
++  if (message)							\
++    {								\
++      message (LDPL_INFO, "BOLT-PLUGIN-INFO: " __VA_ARGS__);	\
++    }								\
++  else								\
++    {								\
++      fprintf (stderr, "BOLT-PLUGIN-INFO: " __VA_ARGS__);	\
++    }
++#else
++#define MSG_INFO(...)
++#endif
++
++#ifdef __MSG_WARN__
++#define MSG_WARN(...)							\
++  if (message)								\
++    {									\
++      message (LDPL_WARNING, "BOLT-PLUGIN-WARNING: " __VA_ARGS__);	\
++    }									\
++  else									\
++    {									\
++      fprintf (stderr, "BOLT-PLUGIN-WARNING: " __VA_ARGS__);		\
++    }
++#else
++#define MSG_WARN(...)
++#endif
++
++#ifdef __MSG_ERROR__
++#define MSG_ERROR(...)						\
++  if (message)							\
++    {								\
++      message (LDPL_FATAL, "BOLT-PLUGIN-ERROR: " __VA_ARGS__);	\
++    }								\
++  else								\
++    {								\
++      fprintf (stderr, "BOLT-PLUGIN-ERROR: " __VA_ARGS__);	\
++      abort ();							\
++    }
++#else
++#define MSG_ERROR(...)
++#endif
++
++#if HAVE_DOS_BASED_FILE_SYSTEM
++const char *separator = "\\";
++#else
++const char *separator = "/";
++#endif
++
++/* Encapsulates object file data during symbol scan.  */
++struct plugin_objfile
++{
++  simple_object_read *objfile;
++  const struct ld_plugin_input_file *file;
++};
++
++struct jump_info
++{
++  string des_func_name;
++  string src_addr_offset;
++  string dst_addr_offset;
++  string count;
++};
++
++struct func_info
++{
++  string function_name;
++  string bind_type; /* "GLOBAL","WEAK","LOCAL","UNKNOWN".  */
++  string size;
++  vector edges;
++};
++
++/* Define feedback data type.  */
++enum feedback_type
++{
++  NULL_TYPE,  /* No feedback data.  */
++  PGO_TYPE,   /* Feedback data from PGO.  */
++  AFDO_TYPE,  /* Feedback data from AutoFDO.  */
++  BOLT_TYPE,  /* Feedback data from BOLT.  */
++};
++
++#define DEFAULT_BOLT_OUT_DIR (get_current_dir_name ())
++#define DEFAULT_BOLT_OUT_NAME "default.fdata"
++#define DEFAULT_BOLT_OUT_NAME_SUFFIX ".fdata"
++
++/* The FDO section's special prefix names.  */
++#define ASM_FDO_SECTION_PREFIX ".text.fdo."
++#define ASM_FDO_CALLER_FLAG ".fdo.caller "
++#define ASM_FDO_CALLER_BIND_FLAG ".fdo.caller.bind "
++#define ASM_FDO_CALLER_SIZE_FLAG ".fdo.caller.size "
++#define ASM_FDO_CALLEE_FLAG ".fdo.callee "
++
++static int linker_output_set;
++
++/* BOLT profile name generated by -fauto-bolt or
++   read from -fbolt-use.  */
++static string bolt_profile_name;
++
++/* Path to save configuration file generated by -fauto-bolt.  */
++static string bolt_dir_path;
++
++/* BOLT profile file FD generated by -fauto-bolt.  */
++static FILE *bolt_file_fd = NULL;
++
++/* Temporary binary or dynamic file with reloc info.  */
++static string tmp_out_file_name = "a.out";
++
++/* Binary or dynamic file after BOLT.  */
++static string bolt_opt_target;
++
++/* Format of bolt_optimize_options should be "reorder-functions=hfsort+ ...",
++   command 'llvm-bolt' has been added here.  */
++static string bolt_optimize_options ("llvm-bolt ");
++
++static enum feedback_type fdo_type = feedback_type::NULL_TYPE;
++
++static vector gcc_options;
++
++/* Map of >  */
++static map> weak_functions;
++
++/* Returns 1 if two strings have the same prefix.  */
++
++inline static int
++is_prefix_of (const char *prefix, const char *str)
++{
++  return strncmp (prefix, str, strlen (prefix)) == 0;
++}
++
++static bool
++file_exist (const char *file_name)
++{
++  if (file_name == nullptr)
++    {
++      MSG_ERROR ("file_exist get empty input file name.");
++      return false;
++    }
++  struct stat buffer;
++  if (stat (file_name, &buffer) == 0)
++    {
++      return true;
++    }
++
++  MSG_WARN ("file_exist check failed: %s does not exist!", file_name);
++  return false;
++}
++
++/* Popen run cmd, use safe character set for whitelist verification.  */
++
++static void
++popen_run (const string& cmd)
++{
++  for (const char &ch : cmd)
++    {
++      if ((ch >= '0' && ch <= '9')
++	  || (ch >= 'A' && ch <= 'Z')
++	  || (ch >= 'a' && ch <= 'z')
++	  || (ch == ' ' || ch == '_')
++	  || (ch == '-' || ch == '/')
++	  || (ch == '.' || ch == '+')
++	  || (ch == '=' || ch == '#'))
++	{
++	  continue;
++	}
++      else
++	{
++	  MSG_WARN ("Unsafe command: %s", cmd.c_str ());
++	  MSG_ERROR ("The command can only contain the following characters "
++		     "0-9, A-Z, a-z, '_', '-', '/', ' ', '.', '+', '=', '#' ");
++	}
++    }
++  MSG_INFO ("Execute command: %s", cmd.c_str ());
++  FILE *fd = popen (cmd.c_str (), "r");
++  if (fd == nullptr)
++    {
++      MSG_WARN ("Execute command faild!");
++    }
++  else
++    {
++      char result_buf[1024];
++      while (fgets (result_buf, sizeof (result_buf), fd) != NULL)
++	{
++	  if (result_buf[strlen (result_buf) - 1] == '\n')
++	    {
++	      result_buf[strlen (result_buf) - 1] = '\0';
++	    }
++	  MSG_INFO ("%s", result_buf);
++	}
++      pclose (fd);
++    }
++}
++
++/* Generate bolt optimize command.  */
++
++static string
++generate_bolt_cmd ()
++{
++  string new_binary = tmp_out_file_name + ".bolt";
++  string cmd;
++
++  /* bolt_optimize_options != "llvm-bolt "
++     means that the user uses custom input options.  */
++  if (bolt_optimize_options != "llvm-bolt ")
++    {
++      cmd = bolt_optimize_options + " " + tmp_out_file_name
++	+ " -o " + new_binary
++	+ " -data=" + bolt_profile_name;
++    }
++  else
++    {
++      if (fdo_type == feedback_type::AFDO_TYPE)
++	{
++	  cmd = string ("llvm-bolt  -reorder-functions=hfsort+ ")
++	    + tmp_out_file_name + " -o " + new_binary
++	    + " -data=" + bolt_profile_name;
++	}
++      else if (fdo_type == feedback_type::PGO_TYPE
++	  || fdo_type == feedback_type::BOLT_TYPE)
++	{
++	  cmd = string ("llvm-bolt -reorder-blocks=cache+ ")
++	    + string (" -reorder-functions=hfsort+ ")
++	    + string (" -split-functions=3 -split-all-cold ")
++	    + string (" -dyno-stats -icf=1 -use-gnu-stack ")
++	    + tmp_out_file_name + " -o " + new_binary
++	    + " -data=" + bolt_profile_name;
++	}
++      else
++	{
++	  MSG_ERROR ("Invalid profile type!");
++	  return string ();
++	}
++      MSG_INFO ("Using the default llvm-bolt optimization option,"
++		" manually specify this option by -fbolt-option.  ");
++    }
++  return cmd;
++}
++
++/* Execute BOLT optimization, backup original binary with .orig .  */
++
++static void
++do_bolt_opt ()
++{
++  string cmd = generate_bolt_cmd ();
++  if (cmd.empty ())
++    {
++      return;
++    }
++  popen_run (cmd);
++  string new_binary = tmp_out_file_name + ".bolt";
++  if (file_exist (new_binary.c_str ()))
++    {
++      cmd = "mv -f " + tmp_out_file_name + " " + tmp_out_file_name + ".orig";
++      popen_run (cmd);
++
++      cmd = "cp -f " + new_binary + " " + tmp_out_file_name;
++      popen_run (cmd);
++    }
++  else
++    {
++      MSG_ERROR ("BOLT optimization fail!"
++		 " Try installing llvm-bolt or"
++		 " enabling relocation info with flag -Wl,-q");
++    }
++}
++
++/* If -fbolt-target is set and this binary is the target, return true.  */
++
++inline static bool
++is_bolt_opt_target ()
++{
++  if (!bolt_opt_target.empty ()
++      && strcmp (lbasename (tmp_out_file_name.c_str ()),
++      lbasename (bolt_opt_target.c_str ())) != 0)
++    {
++      MSG_INFO ("BOLT optmization target is %s, processing %s, skip.",
++		bolt_opt_target.c_str (), tmp_out_file_name.c_str ());
++      return false;
++    }
++  return true;
++}
++
++/* Remove temporary files after linkage, and do BOLT optimization.  */
++
++static enum ld_plugin_status
++cleanup_handler ()
++{
++  if (bolt_file_fd)
++    {
++      fclose (bolt_file_fd);
++    }
++
++  if (file_exist (tmp_out_file_name.c_str ())
++      && file_exist (bolt_profile_name.c_str ())
++      && is_bolt_opt_target ())
++    {
++      do_bolt_opt ();
++    }
++
++  return LDPS_OK;
++}
++
++/* Open BOLT profile file generated by -fauto-bolt.  */
++
++static void
++open_bolt_profile_file (const char *file_name)
++{
++  if (file_name == NULL)
++    {
++      MSG_ERROR ("Empty BOLT profile name, exit!");
++    }
++
++  if (bolt_file_fd == NULL)
++    {
++      MSG_INFO ("Generate profile file for BOLT: %s", file_name);
++      bolt_file_fd = fopen (file_name, "wt");
++      if (!bolt_file_fd)
++	{
++	  MSG_ERROR ("Failed to open the file: %s."
++		     " Please check whether the target path exists.",
++		     file_name);
++	}
++      return;
++    }
++  else
++    {
++      MSG_WARN ("BOLT profile file: %s is open, skip.", file_name);
++    }
++}
++
++/* In BOLT profile, function with same name represent as func_name/file_name/1,
++   also, `/` has been added in gcc/final.c, so add /1 if this function is same
++   name function.  */
++
++static string
++add_suffix (string str)
++{
++  if (str.empty () || (strstr (str.c_str (), "/") == NULL))
++    {
++      return str;
++    }
++
++  return str + "/1";
++}
++
++/* Dump function info to BOLT profile, bolt_file_fd does not need
++   to be closed here.  */
++
++static void
++dump_func_to_bolt_profile_file (const struct func_info &func)
++{
++  if (func.edges.empty ())
++    {
++      return;
++    }
++
++  if (!bolt_file_fd)
++    {
++      open_bolt_profile_file (bolt_profile_name.c_str ());
++
++      /* Check whether the feedback data is from AutoFDO.  */
++      if (fdo_type == feedback_type::AFDO_TYPE)
++       {
++	 fprintf (bolt_file_fd, "no_lbr cycles:u:\n");
++       }
++    }
++
++  for (const auto &edge: func.edges)
++    {
++      if (fdo_type == feedback_type::PGO_TYPE)
++	{
++	  fprintf (bolt_file_fd, "1 %s %s 1 %s %s 0 %s\n",
++		   add_suffix (func.function_name).c_str (),
++		   edge.src_addr_offset.c_str (),
++		   add_suffix (edge.des_func_name).c_str (),
++		   edge.dst_addr_offset.c_str (), edge.count.c_str ());
++	}
++      else if (fdo_type == feedback_type::AFDO_TYPE)
++	{
++	  fprintf (bolt_file_fd, "1 %s %s %s\n",
++		   add_suffix (func.function_name).c_str (),
++		   edge.src_addr_offset.c_str (),
++		   edge.count.c_str ());
++	}
++    }
++
++  fflush (bolt_file_fd);
++}
++
++/* Called by the linker when all symbols have been read.  */
++
++static enum ld_plugin_status
++all_symbols_read_handler ()
++{
++  for (const auto &functions: weak_functions)
++    {
++      /* More than one weak function.  */
++      if (functions.second.size () > 1)
++	{
++	  MSG_WARN ("The weak function: %s is confusing, take the first one.",
++		    functions.first.c_str ());
++	}
++
++      dump_func_to_bolt_profile_file (functions.second[0]);
++    }
++  return LDPS_OK;
++}
++
++/* Move pointer p to end and return end.  */
++
++static char *
++get_next_content (char *p, char *end)
++{
++  while (*p && p < end)
++    {
++      p++;
++    }
++  p++;
++
++  return p;
++}
++
++/* Process function head info.  */
++
++static char *
++process_function_head (char *data , char *end, struct func_info *func)
++{
++  CHECK (is_prefix_of (ASM_FDO_CALLER_FLAG, data), LDPL_FATAL,
++	 "The function name is missing.");
++  func->function_name = xstrdup (data + strlen (ASM_FDO_CALLER_FLAG));
++  data = get_next_content (data, end);
++
++  CHECK (is_prefix_of (ASM_FDO_CALLER_SIZE_FLAG, data), LDPL_FATAL,
++	 "The function size is missing.");
++  func->size = xstrdup (data + strlen (ASM_FDO_CALLER_SIZE_FLAG));
++  data = get_next_content (data, end);
++
++  CHECK (is_prefix_of (ASM_FDO_CALLER_BIND_FLAG, data), LDPL_FATAL,
++	 "The function bind type is missing.");
++  func->bind_type = xstrdup (data + strlen (ASM_FDO_CALLER_BIND_FLAG));
++  data = get_next_content (data, end);
++  return data;
++}
++
++/* Read profile info from the symbol table located between data and end.  */
++
++static void
++process_section (char *data, char *end)
++{
++  struct func_info func;
++
++  data = process_function_head (data, end, &func);
++
++  while (*data && data < end)
++    {
++      struct jump_info jump;
++
++      CHECK (data, LDPL_FATAL, "data is NULL");
++      jump.src_addr_offset = xstrdup (data);
++
++      data = get_next_content (data, end);
++      CHECK (data, LDPL_FATAL, "data is NULL");
++      if (is_prefix_of (ASM_FDO_CALLEE_FLAG, data))
++	{
++	  jump.des_func_name = xstrdup (data + strlen (ASM_FDO_CALLEE_FLAG));
++	  jump.dst_addr_offset = "0";
++	  data = get_next_content (data, end);
++	  CHECK (data, LDPL_FATAL, "data is NULL");
++	}
++      else if (fdo_type == feedback_type::PGO_TYPE)
++	{
++	  jump.des_func_name = func.function_name;
++	  jump.dst_addr_offset = xstrdup (data);
++	  data = get_next_content (data, end);
++	  CHECK (data, LDPL_FATAL, "data is NULL");
++	}
++      else
++	{
++	  jump.des_func_name = func.function_name;
++	}
++
++      jump.count = xstrdup (data);
++      data = get_next_content (data, end);
++
++      func.edges.push_back (jump);
++    }
++
++  if (is_prefix_of ("WEAK", func.bind_type.c_str ()))
++    {
++      weak_functions[func.function_name].push_back (func);
++    }
++  else
++    {
++      dump_func_to_bolt_profile_file (func);
++    }
++}
++
++/* Process error when calling function process_symtab.  */
++
++static int
++process_symtab_error (struct plugin_objfile *obj, char *secdatastart)
++{
++  MSG_ERROR ("%s: corrupt object file.", obj->file->name);
++
++  /* Force claim_file_handler to abandon this file.  */
++  if (secdatastart != NULL)
++    {
++      free (secdatastart);
++    }
++  return 0;
++}
++
++/* Process one section of an object file.  Return to 1 to continue processing
++   other sections which define in simple_object_find_sections.  */
++
++static int
++process_symtab (void *data, const char *name, off_t offset, off_t length)
++{
++  if (data == NULL)
++    {
++      MSG_WARN ("Empty symtab! skip it.");
++      return 0;
++    }
++  if (name == NULL)
++    {
++      MSG_WARN ("Empty symtab name! skip it.");
++      return 0;
++    }
++  struct plugin_objfile *obj = (struct plugin_objfile *)data;
++  char *secdatastart;
++  char *secdata;
++
++  if (!is_prefix_of (ASM_FDO_SECTION_PREFIX, name))
++    {
++      return 1;
++    }
++
++  secdata = secdatastart = (char *)xmalloc (length * sizeof (char));
++  offset += obj->file->offset;
++  if (offset != lseek (obj->file->fd, offset, SEEK_SET))
++    {
++      return process_symtab_error (obj, secdatastart);
++    }
++
++  do
++    {
++      ssize_t got = read (obj->file->fd, secdata, length);
++
++      if (got == 0)
++	{
++	  break;
++	}
++      else if (got > 0)
++	{
++	  secdata += got;
++	  length -= got;
++	}
++      else if (errno != EINTR)
++	{
++	  return process_symtab_error (obj, secdatastart);
++	}
++     }
++  while (length > 0);
++
++  if (length > 0)
++    {
++      return process_symtab_error (obj, secdatastart);
++    }
++
++  process_section (secdatastart, secdata);
++  free (secdatastart);
++  return 1;
++}
++
++/* Callback used by gold to check if the plugin will claim FILE.  Writes
++   the result in CLAIMED.  */
++
++static enum ld_plugin_status
++claim_file_handler (const struct ld_plugin_input_file *file, int *claimed)
++{
++  struct plugin_objfile obj;
++  int err;
++  const char *errmsg = NULL;
++  /* If file is empty, bolt plugin do nothing and return ok.  */
++  if (file == NULL)
++    {
++      return LDPS_OK;
++    }
++  /* BOLT plugin does not need claimd number, so set *claimed to 0.  */
++  *claimed = 0;
++
++  obj.file = file;
++  obj.objfile = simple_object_start_read (file->fd, file->offset, NULL,
++					  &errmsg, &err);
++
++  /* No file, but also no error code means unrecognized format,
++     skip it.  */
++  if (!obj.objfile && !err)
++    {
++      return LDPS_OK;
++    }
++
++  if (obj.objfile)
++    {
++      simple_object_find_sections (obj.objfile, process_symtab, &obj, &err);
++      simple_object_release_read (obj.objfile);
++    }
++
++  return LDPS_OK;
++}
++
++/* Mangle filename path of BASE and output new allocated pointer with
++   mangled path.  */
++
++static string
++mangle_path (const string &base)
++{
++  if (base.empty ())
++    {
++      return base;
++    }
++
++  /* Convert '/' to '#', convert '..' to '^',
++     convert ':' to '~' on DOS based file system.  */
++
++  string new_path;
++  int base_len = base.size ();
++  int l = 0;
++  int r = 0;
++  while (l < base_len)
++    {
++      while (r < base_len && base[r] != '/')
++	{
++	  r++;
++	}
++
++      int len = r - l;
++      if (len == 2 && base[r - 2] == '.' && base[r - 1] == '.')
++	{
++	  new_path += '^';
++	}
++      else
++	{
++	  new_path += base.substr (l, r - l);
++	}
++      if (r < base_len)
++	{
++	  new_path += '#';
++	}
++
++      r++;
++      l = r;
++    }
++  return new_path;
++}
++
++/* Generate BOLT profile name from file_name.  */
++
++static string
++generate_bolt_profile_name (string file_name)
++{
++  if (!IS_ABSOLUTE_PATH (file_name.c_str ()))
++    {
++      if (!bolt_dir_path.empty ())
++	{
++	  file_name = concat (get_current_dir_name (),
++			      separator, file_name.c_str (), NULL);
++	  file_name = mangle_path (file_name);
++	}
++      else
++	{
++	  bolt_dir_path = DEFAULT_BOLT_OUT_DIR;
++	}
++    }
++  file_name = concat (bolt_dir_path.c_str (), separator, file_name.c_str (),
++		      NULL);
++  return file_name;
++}
++
++/* Match option_prefix from gcc_options, return the index of gcc_options.  */
++
++static int
++match_gcc_option (const char *option_prefix)
++{
++  if (option_prefix == NULL)
++    {
++      return -1;
++    }
++
++  for (size_t i = 0; i < gcc_options.size (); i++)
++    {
++      if (is_prefix_of (option_prefix, gcc_options[i].c_str ()))
++	{
++	  return i;
++	}
++    }
++
++  return -1;
++}
++
++/* Get options form environment COLLECT_GCC_OPTIONS.  */
++
++static void
++get_options_from_collect_gcc_options (const char *collect_gcc,
++				      const char *collect_gcc_options)
++{
++  /* When using GCC, collect_gcc will not be empty.  */
++  if (collect_gcc == NULL || collect_gcc_options == NULL)
++    {
++      return;
++    }
++
++  size_t len = strlen (collect_gcc_options);
++  size_t r = 0;
++  while (r < len && collect_gcc_options[r] != '\0')
++    {
++      if (collect_gcc_options[r] == '\'')
++	{
++	  string option;
++	  ++r;
++	  do
++	    {
++	      if (collect_gcc_options[r] == '\0')
++		{
++		  MSG_ERROR ("Malformed COLLECT_GCC_OPTIONS");
++		}
++	      else if (is_prefix_of ("'\\''", &collect_gcc_options[r]))
++		{
++		  option.push_back ('\'');
++		  r += 4;
++		}
++	      else if (collect_gcc_options[r] == '\'')
++		{
++		  break;
++		}
++	      else
++		{
++		  option.push_back (collect_gcc_options[r]);
++		  ++r;
++		}
++	    }
++	  while (1);
++
++	  if (!option.empty ())
++	    {
++	      gcc_options.push_back (option);
++	    }
++	}
++      ++r;
++    }
++}
++
++/* Substitute comma with space in RAW_STRING, used for parser
++  -fbolt-option.  */
++
++static string
++parser_bolt_optimize_option (string raw_string)
++{
++  for (auto &ch : raw_string)
++    {
++      if (ch == ',')
++	{
++	  ch = ' ';
++	}
++    }
++
++  return raw_string;
++}
++
++/* Process option -fauto-bolt.  */
++
++static void
++process_auto_bolt_option (const string &flag_auto_bolt)
++{
++  const int auto_bolt_index = match_gcc_option (flag_auto_bolt.c_str ());
++
++  if (auto_bolt_index != -1)
++    {
++      if (gcc_options[auto_bolt_index] == "-fauto-bolt")
++	{
++	  MSG_INFO ("Use default output directory %s, ", DEFAULT_BOLT_OUT_DIR);
++	  MSG_INFO ("Specify it using -fauto-bolt= if needed.");
++	}
++      else
++	{
++	  string flag_auto_bolt_equal = "-fauto-bolt=";
++	  bolt_dir_path = lrealpath (gcc_options[auto_bolt_index].substr (
++			  flag_auto_bolt_equal.size ()).c_str ());
++	  MSG_INFO ("Get bolt profile path: %s", bolt_dir_path.c_str ());
++	}
++      bolt_profile_name = generate_bolt_profile_name(bolt_profile_name);
++    }
++}
++
++/* Process option -fbolt-use=.  */
++
++static void
++process_bolt_use_option (const string &flag_bolt_use)
++{
++  const int bolt_use_index = match_gcc_option (flag_bolt_use.c_str ());
++
++  if (bolt_use_index != -1)
++    {
++      /* bolt_profile_name may be initialized in
++	 function process_output_option.  */
++      bolt_profile_name = gcc_options[bolt_use_index].substr (
++			  flag_bolt_use.size ()).c_str ();
++      if (bolt_profile_name.empty ())
++	{
++	  bolt_profile_name = DEFAULT_BOLT_OUT_NAME;
++	}
++      MSG_INFO ("Get bolt profile: %s", bolt_profile_name.c_str ());
++    }
++}
++
++/* Process option -fbolt-target=.  */
++
++static void
++process_bolt_target_option (const string &flag_bolt_target)
++{
++  const int bolt_target_index = match_gcc_option (flag_bolt_target.c_str ());
++  if (bolt_target_index != -1)
++    {
++      bolt_opt_target = gcc_options[bolt_target_index].substr (
++	flag_bolt_target.size ()).c_str ();
++      MSG_INFO ("Get bolt target: %s", bolt_opt_target.c_str ());
++    }
++}
++
++/* Process option -fbolt-option=.  */
++
++static void
++process_bolt_option (const string &flag_bolt_optimize_options)
++{
++  const int bolt_optimize_options_index
++    = match_gcc_option (flag_bolt_optimize_options.c_str ());
++
++  if (bolt_optimize_options_index != -1)
++    {
++      bolt_optimize_options.append (parser_bolt_optimize_option (
++	gcc_options[bolt_optimize_options_index].substr (
++	flag_bolt_optimize_options.size ()).c_str ()));
++
++      MSG_INFO ("Get bolt optimize options is %s",
++	bolt_optimize_options.c_str ());
++    }
++}
++
++/* If -o is specified, set binary name and bolt profile name.  This
++   function must be called before the process_bolt_use_option function.  */
++
++static void
++process_output_option (const string &flag_o)
++{
++  const int o_index = match_gcc_option (flag_o.c_str ());
++  if (o_index != -1)
++    {
++      tmp_out_file_name = gcc_options[o_index + 1];
++      /* bolt_profile_name may be overridden in
++	 function process_auto_bolt_option and
++	 process_bolt_use_option.  */
++      bolt_profile_name = gcc_options[o_index + 1];
++      bolt_profile_name.append (DEFAULT_BOLT_OUT_NAME_SUFFIX);
++    }
++  else
++    {
++      bolt_profile_name = DEFAULT_BOLT_OUT_NAME;
++      MSG_INFO ("Use default file name %s, specify it using -o if needed.",
++		DEFAULT_BOLT_OUT_NAME);
++    }
++}
++
++/* Parse the plugin options.  */
++
++static void
++process_gcc_option ()
++{
++  string flag_profile_use = "-fprofile-use";
++  string flag_auto_profile = "-fauto-profile";
++  string flag_auto_bolt = "-fauto-bolt";
++  string flag_bolt_use = "-fbolt-use=";
++  string flag_bolt_target = "-fbolt-target=";
++  string flag_bolt_optimize_options = "-fbolt-option=";
++  string flag_o = "-o";
++
++  char *collect_gcc = getenv ("COLLECT_GCC");
++  char *collect_gcc_option = getenv ("COLLECT_GCC_OPTIONS");
++
++  get_options_from_collect_gcc_options (collect_gcc, collect_gcc_option);
++
++  /* Function process_output_option should be processed before
++     process_auto_bolt_option to obtain correct bolt_profile_name.  */
++  process_output_option (flag_o);
++  process_auto_bolt_option (flag_auto_bolt);
++  process_bolt_use_option (flag_bolt_use);
++  process_bolt_target_option (flag_bolt_target);
++  process_bolt_option (flag_bolt_optimize_options);
++  
++  if (match_gcc_option (flag_profile_use.c_str ()) != -1)
++    {
++      fdo_type = feedback_type::PGO_TYPE;
++    }
++  else if (match_gcc_option (flag_auto_profile.c_str ()) != -1)
++    {
++      fdo_type = feedback_type::AFDO_TYPE;
++    }
++
++  if (match_gcc_option (flag_bolt_use.c_str ()) != -1)
++    {
++      fdo_type = feedback_type::BOLT_TYPE;
++    }
++
++  if (fdo_type == feedback_type::NULL_TYPE)
++    {
++      MSG_ERROR ("No feedback data, maybe use -fprofile-use "
++		 "-fbolt-use or -fauto-profile.");
++    }
++}
++
++/* Register callback function including all_symbols_read_handler,
++   cleanup_handler and claim_file_handler.  */
++
++static void
++register_callback_function ()
++{
++  enum ld_plugin_status status;
++
++  if (linker_output_set && linker_output != LDPO_EXEC)
++    {
++      MSG_INFO ("This linker[%d] is not for exec, just skip.", linker_output);
++      return;
++    }
++
++  CHECK (register_claim_file, LDPL_FATAL, "register_claim_file not found");
++  status = register_claim_file (claim_file_handler);
++  CHECK (status == LDPS_OK, LDPL_FATAL,
++	 "could not register the claim_file callback");
++
++  if (register_cleanup)
++    {
++      status = register_cleanup (cleanup_handler);
++      CHECK (status == LDPS_OK, LDPL_FATAL,
++	     "could not register the cleanup callback");
++    }
++
++  if (register_all_symbols_read)
++    {
++      status = register_all_symbols_read (all_symbols_read_handler);
++      CHECK (status == LDPS_OK, LDPL_FATAL,
++	     "could not register the all_symbols_read callback");
++    }
++}
++
++/* Called by gold after loading the plugin.  TV is the transfer vector.  */
++
++enum ld_plugin_status
++onload (struct ld_plugin_tv *tv)
++{
++  struct ld_plugin_tv *p;
++
++  p = tv;
++  while (p->tv_tag)
++    {
++      switch (p->tv_tag)
++	{
++	  case LDPT_MESSAGE:
++	    message = p->tv_u.tv_message;
++	    break;
++	  case LDPT_REGISTER_CLAIM_FILE_HOOK:
++	    register_claim_file = p->tv_u.tv_register_claim_file;
++	    break;
++	  case LDPT_REGISTER_ALL_SYMBOLS_READ_HOOK:
++	    register_all_symbols_read = p->tv_u.tv_register_all_symbols_read;
++	    break;
++	  case LDPT_REGISTER_CLEANUP_HOOK:
++	    register_cleanup = p->tv_u.tv_register_cleanup;
++	    break;
++	  case LDPT_LINKER_OUTPUT:
++	    linker_output = (enum ld_plugin_output_file_type)p->tv_u.tv_val;
++	    linker_output_set = 1;
++	    break;
++	  default:
++	    break;
++	}
++      p++;
++    }
++
++  register_callback_function ();
++  process_gcc_option ();
++
++  return LDPS_OK;
++}
++
+diff --git a/bolt-plugin/config.h.in b/bolt-plugin/config.h.in
+new file mode 100644
+index 000000000..9e9d316ec
+--- /dev/null
++++ b/bolt-plugin/config.h.in
+@@ -0,0 +1,179 @@
++/* config.h.in.  Generated from configure.ac by autoheader.  */
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_DLFCN_H
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_INTTYPES_H
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_MINIX_CONFIG_H
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_STDINT_H
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_STDIO_H
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_STDLIB_H
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_STRINGS_H
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_STRING_H
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_SYS_STAT_H
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_SYS_TYPES_H
++
++/* Define to 1 if you have  that is POSIX.1 compatible. */
++#undef HAVE_SYS_WAIT_H
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_UNISTD_H
++
++/* Define to 1 if you have the  header file. */
++#undef HAVE_WCHAR_H
++
++/* Define to the sub-directory where libtool stores uninstalled libraries. */
++#undef LT_OBJDIR
++
++/* Name of package */
++#undef PACKAGE
++
++/* Define to the address where bug reports for this package should be sent. */
++#undef PACKAGE_BUGREPORT
++
++/* Define to the full name of this package. */
++#undef PACKAGE_NAME
++
++/* Define to the full name and version of this package. */
++#undef PACKAGE_STRING
++
++/* Define to the one symbol short name of this package. */
++#undef PACKAGE_TARNAME
++
++/* Define to the home page for this package. */
++#undef PACKAGE_URL
++
++/* Define to the version of this package. */
++#undef PACKAGE_VERSION
++
++/* Define to 1 if all of the C90 standard headers exist (not just the ones
++   required in a freestanding environment). This macro is provided for
++   backward compatibility; new code need not use it. */
++#undef STDC_HEADERS
++
++/* Enable extensions on AIX 3, Interix.  */
++#ifndef _ALL_SOURCE
++# undef _ALL_SOURCE
++#endif
++/* Enable general extensions on macOS.  */
++#ifndef _DARWIN_C_SOURCE
++# undef _DARWIN_C_SOURCE
++#endif
++/* Enable general extensions on Solaris.  */
++#ifndef __EXTENSIONS__
++# undef __EXTENSIONS__
++#endif
++/* Enable GNU extensions on systems that have them.  */
++#ifndef _GNU_SOURCE
++# undef _GNU_SOURCE
++#endif
++/* Enable X/Open compliant socket functions that do not require linking
++   with -lxnet on HP-UX 11.11.  */
++#ifndef _HPUX_ALT_XOPEN_SOCKET_API
++# undef _HPUX_ALT_XOPEN_SOCKET_API
++#endif
++/* Identify the host operating system as Minix.
++   This macro does not affect the system headers' behavior.
++   A future release of Autoconf may stop defining this macro.  */
++#ifndef _MINIX
++# undef _MINIX
++#endif
++/* Enable general extensions on NetBSD.
++   Enable NetBSD compatibility extensions on Minix.  */
++#ifndef _NETBSD_SOURCE
++# undef _NETBSD_SOURCE
++#endif
++/* Enable OpenBSD compatibility extensions on NetBSD.
++   Oddly enough, this does nothing on OpenBSD.  */
++#ifndef _OPENBSD_SOURCE
++# undef _OPENBSD_SOURCE
++#endif
++/* Define to 1 if needed for POSIX-compatible behavior.  */
++#ifndef _POSIX_SOURCE
++# undef _POSIX_SOURCE
++#endif
++/* Define to 2 if needed for POSIX-compatible behavior.  */
++#ifndef _POSIX_1_SOURCE
++# undef _POSIX_1_SOURCE
++#endif
++/* Enable POSIX-compatible threading on Solaris.  */
++#ifndef _POSIX_PTHREAD_SEMANTICS
++# undef _POSIX_PTHREAD_SEMANTICS
++#endif
++/* Enable extensions specified by ISO/IEC TS 18661-5:2014.  */
++#ifndef __STDC_WANT_IEC_60559_ATTRIBS_EXT__
++# undef __STDC_WANT_IEC_60559_ATTRIBS_EXT__
++#endif
++/* Enable extensions specified by ISO/IEC TS 18661-1:2014.  */
++#ifndef __STDC_WANT_IEC_60559_BFP_EXT__
++# undef __STDC_WANT_IEC_60559_BFP_EXT__
++#endif
++/* Enable extensions specified by ISO/IEC TS 18661-2:2015.  */
++#ifndef __STDC_WANT_IEC_60559_DFP_EXT__
++# undef __STDC_WANT_IEC_60559_DFP_EXT__
++#endif
++/* Enable extensions specified by ISO/IEC TS 18661-4:2015.  */
++#ifndef __STDC_WANT_IEC_60559_FUNCS_EXT__
++# undef __STDC_WANT_IEC_60559_FUNCS_EXT__
++#endif
++/* Enable extensions specified by ISO/IEC TS 18661-3:2015.  */
++#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__
++# undef __STDC_WANT_IEC_60559_TYPES_EXT__
++#endif
++/* Enable extensions specified by ISO/IEC TR 24731-2:2010.  */
++#ifndef __STDC_WANT_LIB_EXT2__
++# undef __STDC_WANT_LIB_EXT2__
++#endif
++/* Enable extensions specified by ISO/IEC 24747:2009.  */
++#ifndef __STDC_WANT_MATH_SPEC_FUNCS__
++# undef __STDC_WANT_MATH_SPEC_FUNCS__
++#endif
++/* Enable extensions on HP NonStop.  */
++#ifndef _TANDEM_SOURCE
++# undef _TANDEM_SOURCE
++#endif
++/* Enable X/Open extensions.  Define to 500 only if necessary
++   to make mbstate_t available.  */
++#ifndef _XOPEN_SOURCE
++# undef _XOPEN_SOURCE
++#endif
++
++
++/* Version number of package */
++#undef VERSION
++
++/* Number of bits in a file offset, on hosts where this is settable. */
++#undef _FILE_OFFSET_BITS
++
++/* Define for large files, on AIX-style hosts. */
++#undef _LARGE_FILES
++
++/* Define for Solaris 2.5.1 so the uint64_t typedef from ,
++   , or  is not used. If the typedef were allowed, the
++   #define below would cause a syntax error. */
++#undef _UINT64_T
++
++/* Define to the type of a signed integer type of width exactly 64 bits if
++   such a type exists and the standard includes do not define it. */
++#undef int64_t
++
++/* Define to the type of an unsigned integer type of width exactly 64 bits if
++   such a type exists and the standard includes do not define it. */
++#undef uint64_t
+diff --git a/bolt-plugin/configure b/bolt-plugin/configure
+new file mode 100755
+index 000000000..63bde9a41
+--- /dev/null
++++ b/bolt-plugin/configure
+@@ -0,0 +1,20909 @@
++#! /bin/sh
++# Guess values for system-dependent variables and create Makefiles.
++# Generated by GNU Autoconf 2.71 for bolt plugin for ld 0.1.
++#
++#
++# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation,
++# Inc.
++#
++#
++# This configure script is free software; the Free Software Foundation
++# gives unlimited permission to copy, distribute and modify it.
++## -------------------- ##
++## M4sh Initialization. ##
++## -------------------- ##
++
++# Be more Bourne compatible
++DUALCASE=1; export DUALCASE # for MKS sh
++as_nop=:
++if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
++then :
++  emulate sh
++  NULLCMD=:
++  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
++  # is contrary to our usage.  Disable this feature.
++  alias -g '${1+"$@"}'='"$@"'
++  setopt NO_GLOB_SUBST
++else $as_nop
++  case `(set -o) 2>/dev/null` in #(
++  *posix*) :
++    set -o posix ;; #(
++  *) :
++     ;;
++esac
++fi
++
++
++
++# Reset variables that may have inherited troublesome values from
++# the environment.
++
++# IFS needs to be set, to space, tab, and newline, in precisely that order.
++# (If _AS_PATH_WALK were called with IFS unset, it would have the
++# side effect of setting IFS to empty, thus disabling word splitting.)
++# Quoting is to prevent editors from complaining about space-tab.
++as_nl='
++'
++export as_nl
++IFS=" ""	$as_nl"
++
++PS1='$ '
++PS2='> '
++PS4='+ '
++
++# Ensure predictable behavior from utilities with locale-dependent output.
++LC_ALL=C
++export LC_ALL
++LANGUAGE=C
++export LANGUAGE
++
++# We cannot yet rely on "unset" to work, but we need these variables
++# to be unset--not just set to an empty or harmless value--now, to
++# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh).  This construct
++# also avoids known problems related to "unset" and subshell syntax
++# in other old shells (e.g. bash 2.01 and pdksh 5.2.14).
++for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH
++do eval test \${$as_var+y} \
++  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
++done
++
++# Ensure that fds 0, 1, and 2 are open.
++if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi
++if (exec 3>&2)            ; then :; else exec 2>/dev/null; fi
++
++# The user is always right.
++if ${PATH_SEPARATOR+false} :; then
++  PATH_SEPARATOR=:
++  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
++    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
++      PATH_SEPARATOR=';'
++  }
++fi
++
++
++# Find who we are.  Look in the path if we contain no directory separator.
++as_myself=
++case $0 in #((
++  *[\\/]* ) as_myself=$0 ;;
++  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    test -r "$as_dir$0" && as_myself=$as_dir$0 && break
++  done
++IFS=$as_save_IFS
++
++     ;;
++esac
++# We did not find ourselves, most probably we were run as `sh COMMAND'
++# in which case we are not to be found in the path.
++if test "x$as_myself" = x; then
++  as_myself=$0
++fi
++if test ! -f "$as_myself"; then
++  printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
++  exit 1
++fi
++
++
++# Use a proper internal environment variable to ensure we don't fall
++  # into an infinite loop, continuously re-executing ourselves.
++  if test x"${_as_can_reexec}" != xno && test "x$CONFIG_SHELL" != x; then
++    _as_can_reexec=no; export _as_can_reexec;
++    # We cannot yet assume a decent shell, so we have to provide a
++# neutralization value for shells without unset; and this also
++# works around shells that cannot unset nonexistent variables.
++# Preserve -v and -x to the replacement shell.
++BASH_ENV=/dev/null
++ENV=/dev/null
++(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
++case $- in # ((((
++  *v*x* | *x*v* ) as_opts=-vx ;;
++  *v* ) as_opts=-v ;;
++  *x* ) as_opts=-x ;;
++  * ) as_opts= ;;
++esac
++exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
++# Admittedly, this is quite paranoid, since all the known shells bail
++# out after a failed `exec'.
++printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2
++exit 255
++  fi
++  # We don't want this to propagate to other subprocesses.
++          { _as_can_reexec=; unset _as_can_reexec;}
++if test "x$CONFIG_SHELL" = x; then
++  as_bourne_compatible="as_nop=:
++if test \${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
++then :
++  emulate sh
++  NULLCMD=:
++  # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
++  # is contrary to our usage.  Disable this feature.
++  alias -g '\${1+\"\$@\"}'='\"\$@\"'
++  setopt NO_GLOB_SUBST
++else \$as_nop
++  case \`(set -o) 2>/dev/null\` in #(
++  *posix*) :
++    set -o posix ;; #(
++  *) :
++     ;;
++esac
++fi
++"
++  as_required="as_fn_return () { (exit \$1); }
++as_fn_success () { as_fn_return 0; }
++as_fn_failure () { as_fn_return 1; }
++as_fn_ret_success () { return 0; }
++as_fn_ret_failure () { return 1; }
++
++exitcode=0
++as_fn_success || { exitcode=1; echo as_fn_success failed.; }
++as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
++as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
++as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
++if ( set x; as_fn_ret_success y && test x = \"\$1\" )
++then :
++
++else \$as_nop
++  exitcode=1; echo positional parameters were not saved.
++fi
++test x\$exitcode = x0 || exit 1
++blah=\$(echo \$(echo blah))
++test x\"\$blah\" = xblah || exit 1
++test -x / || exit 1"
++  as_suggested="  as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
++  as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
++  eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
++  test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
++
++  test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || (
++    ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
++    ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO
++    ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO
++    PATH=/empty FPATH=/empty; export PATH FPATH
++    test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\
++      || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1
++test \$(( 1 + 1 )) = 2 || exit 1"
++  if (eval "$as_required") 2>/dev/null
++then :
++  as_have_required=yes
++else $as_nop
++  as_have_required=no
++fi
++  if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null
++then :
++
++else $as_nop
++  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++as_found=false
++for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++  as_found=:
++  case $as_dir in #(
++	 /*)
++	   for as_base in sh bash ksh sh5; do
++	     # Try only shells that exist, to save several forks.
++	     as_shell=$as_dir$as_base
++	     if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
++		    as_run=a "$as_shell" -c "$as_bourne_compatible""$as_required" 2>/dev/null
++then :
++  CONFIG_SHELL=$as_shell as_have_required=yes
++		   if as_run=a "$as_shell" -c "$as_bourne_compatible""$as_suggested" 2>/dev/null
++then :
++  break 2
++fi
++fi
++	   done;;
++       esac
++  as_found=false
++done
++IFS=$as_save_IFS
++if $as_found
++then :
++
++else $as_nop
++  if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
++	      as_run=a "$SHELL" -c "$as_bourne_compatible""$as_required" 2>/dev/null
++then :
++  CONFIG_SHELL=$SHELL as_have_required=yes
++fi
++fi
++
++
++      if test "x$CONFIG_SHELL" != x
++then :
++  export CONFIG_SHELL
++             # We cannot yet assume a decent shell, so we have to provide a
++# neutralization value for shells without unset; and this also
++# works around shells that cannot unset nonexistent variables.
++# Preserve -v and -x to the replacement shell.
++BASH_ENV=/dev/null
++ENV=/dev/null
++(unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV
++case $- in # ((((
++  *v*x* | *x*v* ) as_opts=-vx ;;
++  *v* ) as_opts=-v ;;
++  *x* ) as_opts=-x ;;
++  * ) as_opts= ;;
++esac
++exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
++# Admittedly, this is quite paranoid, since all the known shells bail
++# out after a failed `exec'.
++printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2
++exit 255
++fi
++
++    if test x$as_have_required = xno
++then :
++  printf "%s\n" "$0: This script requires a shell more modern than all"
++  printf "%s\n" "$0: the shells that I found on your system."
++  if test ${ZSH_VERSION+y} ; then
++    printf "%s\n" "$0: In particular, zsh $ZSH_VERSION has bugs and should"
++    printf "%s\n" "$0: be upgraded to zsh 4.3.4 or later."
++  else
++    printf "%s\n" "$0: Please tell bug-autoconf@gnu.org about your system,
++$0: including any error possibly output before this
++$0: message. Then install a modern shell, or manually run
++$0: the script under such a shell if you do have one."
++  fi
++  exit 1
++fi
++fi
++fi
++SHELL=${CONFIG_SHELL-/bin/sh}
++export SHELL
++# Unset more variables known to interfere with behavior of common tools.
++CLICOLOR_FORCE= GREP_OPTIONS=
++unset CLICOLOR_FORCE GREP_OPTIONS
++
++## --------------------- ##
++## M4sh Shell Functions. ##
++## --------------------- ##
++# as_fn_unset VAR
++# ---------------
++# Portably unset VAR.
++as_fn_unset ()
++{
++  { eval $1=; unset $1;}
++}
++as_unset=as_fn_unset
++
++
++# as_fn_set_status STATUS
++# -----------------------
++# Set $? to STATUS, without forking.
++as_fn_set_status ()
++{
++  return $1
++} # as_fn_set_status
++
++# as_fn_exit STATUS
++# -----------------
++# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
++as_fn_exit ()
++{
++  set +e
++  as_fn_set_status $1
++  exit $1
++} # as_fn_exit
++# as_fn_nop
++# ---------
++# Do nothing but, unlike ":", preserve the value of $?.
++as_fn_nop ()
++{
++  return $?
++}
++as_nop=as_fn_nop
++
++# as_fn_mkdir_p
++# -------------
++# Create "$as_dir" as a directory, including parents if necessary.
++as_fn_mkdir_p ()
++{
++
++  case $as_dir in #(
++  -*) as_dir=./$as_dir;;
++  esac
++  test -d "$as_dir" || eval $as_mkdir_p || {
++    as_dirs=
++    while :; do
++      case $as_dir in #(
++      *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
++      *) as_qdir=$as_dir;;
++      esac
++      as_dirs="'$as_qdir' $as_dirs"
++      as_dir=`$as_dirname -- "$as_dir" ||
++$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
++	 X"$as_dir" : 'X\(//\)[^/]' \| \
++	 X"$as_dir" : 'X\(//\)$' \| \
++	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
++printf "%s\n" X"$as_dir" |
++    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)[^/].*/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\).*/{
++	    s//\1/
++	    q
++	  }
++	  s/.*/./; q'`
++      test -d "$as_dir" && break
++    done
++    test -z "$as_dirs" || eval "mkdir $as_dirs"
++  } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
++
++
++} # as_fn_mkdir_p
++
++# as_fn_executable_p FILE
++# -----------------------
++# Test if FILE is an executable regular file.
++as_fn_executable_p ()
++{
++  test -f "$1" && test -x "$1"
++} # as_fn_executable_p
++# as_fn_append VAR VALUE
++# ----------------------
++# Append the text in VALUE to the end of the definition contained in VAR. Take
++# advantage of any shell optimizations that allow amortized linear growth over
++# repeated appends, instead of the typical quadratic growth present in naive
++# implementations.
++if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null
++then :
++  eval 'as_fn_append ()
++  {
++    eval $1+=\$2
++  }'
++else $as_nop
++  as_fn_append ()
++  {
++    eval $1=\$$1\$2
++  }
++fi # as_fn_append
++
++# as_fn_arith ARG...
++# ------------------
++# Perform arithmetic evaluation on the ARGs, and store the result in the
++# global $as_val. Take advantage of shells that can avoid forks. The arguments
++# must be portable across $(()) and expr.
++if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null
++then :
++  eval 'as_fn_arith ()
++  {
++    as_val=$(( $* ))
++  }'
++else $as_nop
++  as_fn_arith ()
++  {
++    as_val=`expr "$@" || test $? -eq 1`
++  }
++fi # as_fn_arith
++
++# as_fn_nop
++# ---------
++# Do nothing but, unlike ":", preserve the value of $?.
++as_fn_nop ()
++{
++  return $?
++}
++as_nop=as_fn_nop
++
++# as_fn_error STATUS ERROR [LINENO LOG_FD]
++# ----------------------------------------
++# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
++# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
++# script with STATUS, using 1 if that was 0.
++as_fn_error ()
++{
++  as_status=$1; test $as_status -eq 0 && as_status=1
++  if test "$4"; then
++    as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
++  fi
++  printf "%s\n" "$as_me: error: $2" >&2
++  as_fn_exit $as_status
++} # as_fn_error
++
++if expr a : '\(a\)' >/dev/null 2>&1 &&
++   test "X`expr 00001 : '.*\(...\)'`" = X001; then
++  as_expr=expr
++else
++  as_expr=false
++fi
++
++if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
++  as_basename=basename
++else
++  as_basename=false
++fi
++
++if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
++  as_dirname=dirname
++else
++  as_dirname=false
++fi
++
++as_me=`$as_basename -- "$0" ||
++$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
++	 X"$0" : 'X\(//\)$' \| \
++	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
++printf "%s\n" X/"$0" |
++    sed '/^.*\/\([^/][^/]*\)\/*$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\/\(\/\/\)$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\/\(\/\).*/{
++	    s//\1/
++	    q
++	  }
++	  s/.*/./; q'`
++
++# Avoid depending upon Character Ranges.
++as_cr_letters='abcdefghijklmnopqrstuvwxyz'
++as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
++as_cr_Letters=$as_cr_letters$as_cr_LETTERS
++as_cr_digits='0123456789'
++as_cr_alnum=$as_cr_Letters$as_cr_digits
++
++
++  as_lineno_1=$LINENO as_lineno_1a=$LINENO
++  as_lineno_2=$LINENO as_lineno_2a=$LINENO
++  eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" &&
++  test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || {
++  # Blame Lee E. McMahon (1931-1989) for sed's syntax.  :-)
++  sed -n '
++    p
++    /[$]LINENO/=
++  ' <$as_myself |
++    sed '
++      s/[$]LINENO.*/&-/
++      t lineno
++      b
++      :lineno
++      N
++      :loop
++      s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/
++      t loop
++      s/-\n.*//
++    ' >$as_me.lineno &&
++  chmod +x "$as_me.lineno" ||
++    { printf "%s\n" "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
++
++  # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
++  # already done that, so ensure we don't try to do so again and fall
++  # in an infinite loop.  This has already happened in practice.
++  _as_can_reexec=no; export _as_can_reexec
++  # Don't try to exec as it changes $[0], causing all sort of problems
++  # (the dirname of $[0] is not the place where we might find the
++  # original and so on.  Autoconf is especially sensitive to this).
++  . "./$as_me.lineno"
++  # Exit status is that of the last command.
++  exit
++}
++
++
++# Determine whether it's possible to make 'echo' print without a newline.
++# These variables are no longer used directly by Autoconf, but are AC_SUBSTed
++# for compatibility with existing Makefiles.
++ECHO_C= ECHO_N= ECHO_T=
++case `echo -n x` in #(((((
++-n*)
++  case `echo 'xy\c'` in
++  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
++  xy)  ECHO_C='\c';;
++  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
++       ECHO_T='	';;
++  esac;;
++*)
++  ECHO_N='-n';;
++esac
++
++# For backward compatibility with old third-party macros, we provide
++# the shell variables $as_echo and $as_echo_n.  New code should use
++# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively.
++as_echo='printf %s\n'
++as_echo_n='printf %s'
++
++
++rm -f conf$$ conf$$.exe conf$$.file
++if test -d conf$$.dir; then
++  rm -f conf$$.dir/conf$$.file
++else
++  rm -f conf$$.dir
++  mkdir conf$$.dir 2>/dev/null
++fi
++if (echo >conf$$.file) 2>/dev/null; then
++  if ln -s conf$$.file conf$$ 2>/dev/null; then
++    as_ln_s='ln -s'
++    # ... but there are two gotchas:
++    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
++    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
++    # In both cases, we have to default to `cp -pR'.
++    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
++      as_ln_s='cp -pR'
++  elif ln conf$$.file conf$$ 2>/dev/null; then
++    as_ln_s=ln
++  else
++    as_ln_s='cp -pR'
++  fi
++else
++  as_ln_s='cp -pR'
++fi
++rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
++rmdir conf$$.dir 2>/dev/null
++
++if mkdir -p . 2>/dev/null; then
++  as_mkdir_p='mkdir -p "$as_dir"'
++else
++  test -d ./-p && rmdir ./-p
++  as_mkdir_p=false
++fi
++
++as_test_x='test -x'
++as_executable_p=as_fn_executable_p
++
++# Sed expression to map a string onto a valid CPP name.
++as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
++
++# Sed expression to map a string onto a valid variable name.
++as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
++
++SHELL=${CONFIG_SHELL-/bin/sh}
++
++
++test -n "$DJDIR" || exec 7<&0 &1
++
++# Name of the host.
++# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status,
++# so uname gets run too.
++ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q`
++
++#
++# Initializations.
++#
++ac_default_prefix=/usr/local
++ac_clean_files=
++ac_config_libobj_dir=.
++LIBOBJS=
++cross_compiling=no
++subdirs=
++MFLAGS=
++MAKEFLAGS=
++
++# Identity of this package.
++PACKAGE_NAME='bolt plugin for ld'
++PACKAGE_TARNAME='bolt-plugin'
++PACKAGE_VERSION='0.1'
++PACKAGE_STRING='bolt plugin for ld 0.1'
++PACKAGE_BUGREPORT=''
++PACKAGE_URL=''
++
++# Factoring default headers for most tests.
++ac_includes_default="\
++#include 
++#ifdef HAVE_STDIO_H
++# include 
++#endif
++#ifdef HAVE_STDLIB_H
++# include 
++#endif
++#ifdef HAVE_STRING_H
++# include 
++#endif
++#ifdef HAVE_INTTYPES_H
++# include 
++#endif
++#ifdef HAVE_STDINT_H
++# include 
++#endif
++#ifdef HAVE_STRINGS_H
++# include 
++#endif
++#ifdef HAVE_SYS_TYPES_H
++# include 
++#endif
++#ifdef HAVE_SYS_STAT_H
++# include 
++#endif
++#ifdef HAVE_UNISTD_H
++# include 
++#endif"
++
++ac_header_c_list=
++ac_subst_vars='am__EXEEXT_FALSE
++am__EXEEXT_TRUE
++LTLIBOBJS
++LIBOBJS
++target_noncanonical
++CXXCPP
++LT_SYS_LIBRARY_PATH
++OTOOL64
++OTOOL
++LIPO
++NMEDIT
++DSYMUTIL
++MANIFEST_TOOL
++RANLIB
++ac_ct_AR
++AR
++DLLTOOL
++OBJDUMP
++FILECMD
++LN_S
++NM
++ac_ct_DUMPBIN
++DUMPBIN
++LD
++FGREP
++EGREP
++GREP
++SED
++LIBTOOL
++real_target_noncanonical
++accel_dir_suffix
++gcc_build_dir
++ac_bolt_plugin_ldflags
++am__fastdepCXX_FALSE
++am__fastdepCXX_TRUE
++CXXDEPMODE
++ac_ct_CXX
++CXXFLAGS
++CXX
++am__fastdepCC_FALSE
++am__fastdepCC_TRUE
++CCDEPMODE
++am__nodep
++AMDEPBACKSLASH
++AMDEP_FALSE
++AMDEP_TRUE
++am__include
++DEPDIR
++OBJEXT
++EXEEXT
++ac_ct_CC
++CPPFLAGS
++LDFLAGS
++CFLAGS
++CC
++with_libiberty
++MAINT
++MAINTAINER_MODE_FALSE
++MAINTAINER_MODE_TRUE
++AM_BACKSLASH
++AM_DEFAULT_VERBOSITY
++AM_DEFAULT_V
++AM_V
++CSCOPE
++ETAGS
++CTAGS
++am__untar
++am__tar
++AMTAR
++am__leading_dot
++SET_MAKE
++AWK
++mkdir_p
++MKDIR_P
++INSTALL_STRIP_PROGRAM
++STRIP
++install_sh
++MAKEINFO
++AUTOHEADER
++AUTOMAKE
++AUTOCONF
++ACLOCAL
++VERSION
++PACKAGE
++CYGPATH_W
++am__isrc
++INSTALL_DATA
++INSTALL_SCRIPT
++INSTALL_PROGRAM
++target_os
++target_vendor
++target_cpu
++target
++host_os
++host_vendor
++host_cpu
++host
++build_os
++build_vendor
++build_cpu
++build
++target_alias
++host_alias
++build_alias
++LIBS
++ECHO_T
++ECHO_N
++ECHO_C
++DEFS
++mandir
++localedir
++libdir
++psdir
++pdfdir
++dvidir
++htmldir
++infodir
++docdir
++oldincludedir
++includedir
++runstatedir
++localstatedir
++sharedstatedir
++sysconfdir
++datadir
++datarootdir
++libexecdir
++sbindir
++bindir
++program_transform_name
++prefix
++exec_prefix
++PACKAGE_URL
++PACKAGE_BUGREPORT
++PACKAGE_STRING
++PACKAGE_VERSION
++PACKAGE_TARNAME
++PACKAGE_NAME
++PATH_SEPARATOR
++SHELL
++am__quote'
++ac_subst_files=''
++ac_user_opts='
++enable_option_checking
++enable_silent_rules
++enable_maintainer_mode
++with_libiberty
++enable_dependency_tracking
++enable_largefile
++enable_shared
++enable_static
++with_pic
++enable_fast_install
++with_aix_soname
++with_gnu_ld
++with_sysroot
++enable_libtool_lock
++'
++      ac_precious_vars='build_alias
++host_alias
++target_alias
++CC
++CFLAGS
++LDFLAGS
++LIBS
++CPPFLAGS
++CXX
++CXXFLAGS
++CCC
++LT_SYS_LIBRARY_PATH
++CXXCPP'
++
++
++# Initialize some variables set by options.
++ac_init_help=
++ac_init_version=false
++ac_unrecognized_opts=
++ac_unrecognized_sep=
++# The variables have the same names as the options, with
++# dashes changed to underlines.
++cache_file=/dev/null
++exec_prefix=NONE
++no_create=
++no_recursion=
++prefix=NONE
++program_prefix=NONE
++program_suffix=NONE
++program_transform_name=s,x,x,
++silent=
++site=
++srcdir=
++verbose=
++x_includes=NONE
++x_libraries=NONE
++
++# Installation directory options.
++# These are left unexpanded so users can "make install exec_prefix=/foo"
++# and all the variables that are supposed to be based on exec_prefix
++# by default will actually change.
++# Use braces instead of parens because sh, perl, etc. also accept them.
++# (The list follows the same order as the GNU Coding Standards.)
++bindir='${exec_prefix}/bin'
++sbindir='${exec_prefix}/sbin'
++libexecdir='${exec_prefix}/libexec'
++datarootdir='${prefix}/share'
++datadir='${datarootdir}'
++sysconfdir='${prefix}/etc'
++sharedstatedir='${prefix}/com'
++localstatedir='${prefix}/var'
++runstatedir='${localstatedir}/run'
++includedir='${prefix}/include'
++oldincludedir='/usr/include'
++docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
++infodir='${datarootdir}/info'
++htmldir='${docdir}'
++dvidir='${docdir}'
++pdfdir='${docdir}'
++psdir='${docdir}'
++libdir='${exec_prefix}/lib'
++localedir='${datarootdir}/locale'
++mandir='${datarootdir}/man'
++
++ac_prev=
++ac_dashdash=
++for ac_option
++do
++  # If the previous option needs an argument, assign it.
++  if test -n "$ac_prev"; then
++    eval $ac_prev=\$ac_option
++    ac_prev=
++    continue
++  fi
++
++  case $ac_option in
++  *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;;
++  *=)   ac_optarg= ;;
++  *)    ac_optarg=yes ;;
++  esac
++
++  case $ac_dashdash$ac_option in
++  --)
++    ac_dashdash=yes ;;
++
++  -bindir | --bindir | --bindi | --bind | --bin | --bi)
++    ac_prev=bindir ;;
++  -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*)
++    bindir=$ac_optarg ;;
++
++  -build | --build | --buil | --bui | --bu)
++    ac_prev=build_alias ;;
++  -build=* | --build=* | --buil=* | --bui=* | --bu=*)
++    build_alias=$ac_optarg ;;
++
++  -cache-file | --cache-file | --cache-fil | --cache-fi \
++  | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c)
++    ac_prev=cache_file ;;
++  -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \
++  | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*)
++    cache_file=$ac_optarg ;;
++
++  --config-cache | -C)
++    cache_file=config.cache ;;
++
++  -datadir | --datadir | --datadi | --datad)
++    ac_prev=datadir ;;
++  -datadir=* | --datadir=* | --datadi=* | --datad=*)
++    datadir=$ac_optarg ;;
++
++  -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \
++  | --dataroo | --dataro | --datar)
++    ac_prev=datarootdir ;;
++  -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \
++  | --dataroot=* | --dataroo=* | --dataro=* | --datar=*)
++    datarootdir=$ac_optarg ;;
++
++  -disable-* | --disable-*)
++    ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
++    # Reject names that are not valid shell variable names.
++    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
++      as_fn_error $? "invalid feature name: \`$ac_useropt'"
++    ac_useropt_orig=$ac_useropt
++    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
++    case $ac_user_opts in
++      *"
++"enable_$ac_useropt"
++"*) ;;
++      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig"
++	 ac_unrecognized_sep=', ';;
++    esac
++    eval enable_$ac_useropt=no ;;
++
++  -docdir | --docdir | --docdi | --doc | --do)
++    ac_prev=docdir ;;
++  -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*)
++    docdir=$ac_optarg ;;
++
++  -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv)
++    ac_prev=dvidir ;;
++  -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*)
++    dvidir=$ac_optarg ;;
++
++  -enable-* | --enable-*)
++    ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
++    # Reject names that are not valid shell variable names.
++    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
++      as_fn_error $? "invalid feature name: \`$ac_useropt'"
++    ac_useropt_orig=$ac_useropt
++    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
++    case $ac_user_opts in
++      *"
++"enable_$ac_useropt"
++"*) ;;
++      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig"
++	 ac_unrecognized_sep=', ';;
++    esac
++    eval enable_$ac_useropt=\$ac_optarg ;;
++
++  -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \
++  | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \
++  | --exec | --exe | --ex)
++    ac_prev=exec_prefix ;;
++  -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \
++  | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \
++  | --exec=* | --exe=* | --ex=*)
++    exec_prefix=$ac_optarg ;;
++
++  -gas | --gas | --ga | --g)
++    # Obsolete; use --with-gas.
++    with_gas=yes ;;
++
++  -help | --help | --hel | --he | -h)
++    ac_init_help=long ;;
++  -help=r* | --help=r* | --hel=r* | --he=r* | -hr*)
++    ac_init_help=recursive ;;
++  -help=s* | --help=s* | --hel=s* | --he=s* | -hs*)
++    ac_init_help=short ;;
++
++  -host | --host | --hos | --ho)
++    ac_prev=host_alias ;;
++  -host=* | --host=* | --hos=* | --ho=*)
++    host_alias=$ac_optarg ;;
++
++  -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht)
++    ac_prev=htmldir ;;
++  -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \
++  | --ht=*)
++    htmldir=$ac_optarg ;;
++
++  -includedir | --includedir | --includedi | --included | --include \
++  | --includ | --inclu | --incl | --inc)
++    ac_prev=includedir ;;
++  -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \
++  | --includ=* | --inclu=* | --incl=* | --inc=*)
++    includedir=$ac_optarg ;;
++
++  -infodir | --infodir | --infodi | --infod | --info | --inf)
++    ac_prev=infodir ;;
++  -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*)
++    infodir=$ac_optarg ;;
++
++  -libdir | --libdir | --libdi | --libd)
++    ac_prev=libdir ;;
++  -libdir=* | --libdir=* | --libdi=* | --libd=*)
++    libdir=$ac_optarg ;;
++
++  -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \
++  | --libexe | --libex | --libe)
++    ac_prev=libexecdir ;;
++  -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \
++  | --libexe=* | --libex=* | --libe=*)
++    libexecdir=$ac_optarg ;;
++
++  -localedir | --localedir | --localedi | --localed | --locale)
++    ac_prev=localedir ;;
++  -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*)
++    localedir=$ac_optarg ;;
++
++  -localstatedir | --localstatedir | --localstatedi | --localstated \
++  | --localstate | --localstat | --localsta | --localst | --locals)
++    ac_prev=localstatedir ;;
++  -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \
++  | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*)
++    localstatedir=$ac_optarg ;;
++
++  -mandir | --mandir | --mandi | --mand | --man | --ma | --m)
++    ac_prev=mandir ;;
++  -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*)
++    mandir=$ac_optarg ;;
++
++  -nfp | --nfp | --nf)
++    # Obsolete; use --without-fp.
++    with_fp=no ;;
++
++  -no-create | --no-create | --no-creat | --no-crea | --no-cre \
++  | --no-cr | --no-c | -n)
++    no_create=yes ;;
++
++  -no-recursion | --no-recursion | --no-recursio | --no-recursi \
++  | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r)
++    no_recursion=yes ;;
++
++  -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \
++  | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \
++  | --oldin | --oldi | --old | --ol | --o)
++    ac_prev=oldincludedir ;;
++  -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \
++  | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \
++  | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*)
++    oldincludedir=$ac_optarg ;;
++
++  -prefix | --prefix | --prefi | --pref | --pre | --pr | --p)
++    ac_prev=prefix ;;
++  -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*)
++    prefix=$ac_optarg ;;
++
++  -program-prefix | --program-prefix | --program-prefi | --program-pref \
++  | --program-pre | --program-pr | --program-p)
++    ac_prev=program_prefix ;;
++  -program-prefix=* | --program-prefix=* | --program-prefi=* \
++  | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*)
++    program_prefix=$ac_optarg ;;
++
++  -program-suffix | --program-suffix | --program-suffi | --program-suff \
++  | --program-suf | --program-su | --program-s)
++    ac_prev=program_suffix ;;
++  -program-suffix=* | --program-suffix=* | --program-suffi=* \
++  | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*)
++    program_suffix=$ac_optarg ;;
++
++  -program-transform-name | --program-transform-name \
++  | --program-transform-nam | --program-transform-na \
++  | --program-transform-n | --program-transform- \
++  | --program-transform | --program-transfor \
++  | --program-transfo | --program-transf \
++  | --program-trans | --program-tran \
++  | --progr-tra | --program-tr | --program-t)
++    ac_prev=program_transform_name ;;
++  -program-transform-name=* | --program-transform-name=* \
++  | --program-transform-nam=* | --program-transform-na=* \
++  | --program-transform-n=* | --program-transform-=* \
++  | --program-transform=* | --program-transfor=* \
++  | --program-transfo=* | --program-transf=* \
++  | --program-trans=* | --program-tran=* \
++  | --progr-tra=* | --program-tr=* | --program-t=*)
++    program_transform_name=$ac_optarg ;;
++
++  -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd)
++    ac_prev=pdfdir ;;
++  -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*)
++    pdfdir=$ac_optarg ;;
++
++  -psdir | --psdir | --psdi | --psd | --ps)
++    ac_prev=psdir ;;
++  -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*)
++    psdir=$ac_optarg ;;
++
++  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
++  | -silent | --silent | --silen | --sile | --sil)
++    silent=yes ;;
++
++  -runstatedir | --runstatedir | --runstatedi | --runstated \
++  | --runstate | --runstat | --runsta | --runst | --runs \
++  | --run | --ru | --r)
++    ac_prev=runstatedir ;;
++  -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
++  | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
++  | --run=* | --ru=* | --r=*)
++    runstatedir=$ac_optarg ;;
++
++  -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
++    ac_prev=sbindir ;;
++  -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
++  | --sbi=* | --sb=*)
++    sbindir=$ac_optarg ;;
++
++  -sharedstatedir | --sharedstatedir | --sharedstatedi \
++  | --sharedstated | --sharedstate | --sharedstat | --sharedsta \
++  | --sharedst | --shareds | --shared | --share | --shar \
++  | --sha | --sh)
++    ac_prev=sharedstatedir ;;
++  -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \
++  | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \
++  | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \
++  | --sha=* | --sh=*)
++    sharedstatedir=$ac_optarg ;;
++
++  -site | --site | --sit)
++    ac_prev=site ;;
++  -site=* | --site=* | --sit=*)
++    site=$ac_optarg ;;
++
++  -srcdir | --srcdir | --srcdi | --srcd | --src | --sr)
++    ac_prev=srcdir ;;
++  -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*)
++    srcdir=$ac_optarg ;;
++
++  -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \
++  | --syscon | --sysco | --sysc | --sys | --sy)
++    ac_prev=sysconfdir ;;
++  -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \
++  | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*)
++    sysconfdir=$ac_optarg ;;
++
++  -target | --target | --targe | --targ | --tar | --ta | --t)
++    ac_prev=target_alias ;;
++  -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*)
++    target_alias=$ac_optarg ;;
++
++  -v | -verbose | --verbose | --verbos | --verbo | --verb)
++    verbose=yes ;;
++
++  -version | --version | --versio | --versi | --vers | -V)
++    ac_init_version=: ;;
++
++  -with-* | --with-*)
++    ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
++    # Reject names that are not valid shell variable names.
++    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
++      as_fn_error $? "invalid package name: \`$ac_useropt'"
++    ac_useropt_orig=$ac_useropt
++    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
++    case $ac_user_opts in
++      *"
++"with_$ac_useropt"
++"*) ;;
++      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig"
++	 ac_unrecognized_sep=', ';;
++    esac
++    eval with_$ac_useropt=\$ac_optarg ;;
++
++  -without-* | --without-*)
++    ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
++    # Reject names that are not valid shell variable names.
++    expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
++      as_fn_error $? "invalid package name: \`$ac_useropt'"
++    ac_useropt_orig=$ac_useropt
++    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
++    case $ac_user_opts in
++      *"
++"with_$ac_useropt"
++"*) ;;
++      *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig"
++	 ac_unrecognized_sep=', ';;
++    esac
++    eval with_$ac_useropt=no ;;
++
++  --x)
++    # Obsolete; use --with-x.
++    with_x=yes ;;
++
++  -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \
++  | --x-incl | --x-inc | --x-in | --x-i)
++    ac_prev=x_includes ;;
++  -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \
++  | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*)
++    x_includes=$ac_optarg ;;
++
++  -x-libraries | --x-libraries | --x-librarie | --x-librari \
++  | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l)
++    ac_prev=x_libraries ;;
++  -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \
++  | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*)
++    x_libraries=$ac_optarg ;;
++
++  -*) as_fn_error $? "unrecognized option: \`$ac_option'
++Try \`$0 --help' for more information"
++    ;;
++
++  *=*)
++    ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='`
++    # Reject names that are not valid shell variable names.
++    case $ac_envvar in #(
++      '' | [0-9]* | *[!_$as_cr_alnum]* )
++      as_fn_error $? "invalid variable name: \`$ac_envvar'" ;;
++    esac
++    eval $ac_envvar=\$ac_optarg
++    export $ac_envvar ;;
++
++  *)
++    # FIXME: should be removed in autoconf 3.0.
++    printf "%s\n" "$as_me: WARNING: you should use --build, --host, --target" >&2
++    expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
++      printf "%s\n" "$as_me: WARNING: invalid host type: $ac_option" >&2
++    : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
++    ;;
++
++  esac
++done
++
++if test -n "$ac_prev"; then
++  ac_option=--`echo $ac_prev | sed 's/_/-/g'`
++  as_fn_error $? "missing argument to $ac_option"
++fi
++
++if test -n "$ac_unrecognized_opts"; then
++  case $enable_option_checking in
++    no) ;;
++    fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
++    *)     printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
++  esac
++fi
++
++# Check all directory arguments for consistency.
++for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
++		datadir sysconfdir sharedstatedir localstatedir includedir \
++		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
++		libdir localedir mandir runstatedir
++do
++  eval ac_val=\$$ac_var
++  # Remove trailing slashes.
++  case $ac_val in
++    */ )
++      ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'`
++      eval $ac_var=\$ac_val;;
++  esac
++  # Be sure to have absolute directory names.
++  case $ac_val in
++    [\\/$]* | ?:[\\/]* )  continue;;
++    NONE | '' ) case $ac_var in *prefix ) continue;; esac;;
++  esac
++  as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val"
++done
++
++# There might be people who depend on the old broken behavior: `$host'
++# used to hold the argument of --host etc.
++# FIXME: To remove some day.
++build=$build_alias
++host=$host_alias
++target=$target_alias
++
++# FIXME: To remove some day.
++if test "x$host_alias" != x; then
++  if test "x$build_alias" = x; then
++    cross_compiling=maybe
++  elif test "x$build_alias" != "x$host_alias"; then
++    cross_compiling=yes
++  fi
++fi
++
++ac_tool_prefix=
++test -n "$host_alias" && ac_tool_prefix=$host_alias-
++
++test "$silent" = yes && exec 6>/dev/null
++
++
++ac_pwd=`pwd` && test -n "$ac_pwd" &&
++ac_ls_di=`ls -di .` &&
++ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` ||
++  as_fn_error $? "working directory cannot be determined"
++test "X$ac_ls_di" = "X$ac_pwd_ls_di" ||
++  as_fn_error $? "pwd does not report name of working directory"
++
++
++# Find the source files, if location was not specified.
++if test -z "$srcdir"; then
++  ac_srcdir_defaulted=yes
++  # Try the directory containing this script, then the parent directory.
++  ac_confdir=`$as_dirname -- "$as_myself" ||
++$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
++	 X"$as_myself" : 'X\(//\)[^/]' \| \
++	 X"$as_myself" : 'X\(//\)$' \| \
++	 X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
++printf "%s\n" X"$as_myself" |
++    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)[^/].*/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\).*/{
++	    s//\1/
++	    q
++	  }
++	  s/.*/./; q'`
++  srcdir=$ac_confdir
++  if test ! -r "$srcdir/$ac_unique_file"; then
++    srcdir=..
++  fi
++else
++  ac_srcdir_defaulted=no
++fi
++if test ! -r "$srcdir/$ac_unique_file"; then
++  test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .."
++  as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir"
++fi
++ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work"
++ac_abs_confdir=`(
++	cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg"
++	pwd)`
++# When building in place, set srcdir=.
++if test "$ac_abs_confdir" = "$ac_pwd"; then
++  srcdir=.
++fi
++# Remove unnecessary trailing slashes from srcdir.
++# Double slashes in file names in object file debugging info
++# mess up M-x gdb in Emacs.
++case $srcdir in
++*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;;
++esac
++for ac_var in $ac_precious_vars; do
++  eval ac_env_${ac_var}_set=\${${ac_var}+set}
++  eval ac_env_${ac_var}_value=\$${ac_var}
++  eval ac_cv_env_${ac_var}_set=\${${ac_var}+set}
++  eval ac_cv_env_${ac_var}_value=\$${ac_var}
++done
++
++#
++# Report the --help message.
++#
++if test "$ac_init_help" = "long"; then
++  # Omit some internal or obsolete options to make the list less imposing.
++  # This message is too long to be a string in the A/UX 3.1 sh.
++  cat <<_ACEOF
++\`configure' configures bolt plugin for ld 0.1 to adapt to many kinds of systems.
++
++Usage: $0 [OPTION]... [VAR=VALUE]...
++
++To assign environment variables (e.g., CC, CFLAGS...), specify them as
++VAR=VALUE.  See below for descriptions of some of the useful variables.
++
++Defaults for the options are specified in brackets.
++
++Configuration:
++  -h, --help              display this help and exit
++      --help=short        display options specific to this package
++      --help=recursive    display the short help of all the included packages
++  -V, --version           display version information and exit
++  -q, --quiet, --silent   do not print \`checking ...' messages
++      --cache-file=FILE   cache test results in FILE [disabled]
++  -C, --config-cache      alias for \`--cache-file=config.cache'
++  -n, --no-create         do not create output files
++      --srcdir=DIR        find the sources in DIR [configure dir or \`..']
++
++Installation directories:
++  --prefix=PREFIX         install architecture-independent files in PREFIX
++                          [$ac_default_prefix]
++  --exec-prefix=EPREFIX   install architecture-dependent files in EPREFIX
++                          [PREFIX]
++
++By default, \`make install' will install all the files in
++\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc.  You can specify
++an installation prefix other than \`$ac_default_prefix' using \`--prefix',
++for instance \`--prefix=\$HOME'.
++
++For better control, use the options below.
++
++Fine tuning of the installation directories:
++  --bindir=DIR            user executables [EPREFIX/bin]
++  --sbindir=DIR           system admin executables [EPREFIX/sbin]
++  --libexecdir=DIR        program executables [EPREFIX/libexec]
++  --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
++  --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
++  --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
++  --runstatedir=DIR       modifiable per-process data [LOCALSTATEDIR/run]
++  --libdir=DIR            object code libraries [EPREFIX/lib]
++  --includedir=DIR        C header files [PREFIX/include]
++  --oldincludedir=DIR     C header files for non-gcc [/usr/include]
++  --datarootdir=DIR       read-only arch.-independent data root [PREFIX/share]
++  --datadir=DIR           read-only architecture-independent data [DATAROOTDIR]
++  --infodir=DIR           info documentation [DATAROOTDIR/info]
++  --localedir=DIR         locale-dependent data [DATAROOTDIR/locale]
++  --mandir=DIR            man documentation [DATAROOTDIR/man]
++  --docdir=DIR            documentation root [DATAROOTDIR/doc/bolt-plugin]
++  --htmldir=DIR           html documentation [DOCDIR]
++  --dvidir=DIR            dvi documentation [DOCDIR]
++  --pdfdir=DIR            pdf documentation [DOCDIR]
++  --psdir=DIR             ps documentation [DOCDIR]
++_ACEOF
++
++  cat <<\_ACEOF
++
++Program names:
++  --program-prefix=PREFIX            prepend PREFIX to installed program names
++  --program-suffix=SUFFIX            append SUFFIX to installed program names
++  --program-transform-name=PROGRAM   run sed PROGRAM on installed program names
++
++System types:
++  --build=BUILD     configure for building on BUILD [guessed]
++  --host=HOST       cross-compile to build programs to run on HOST [BUILD]
++  --target=TARGET   configure for building compilers for TARGET [HOST]
++_ACEOF
++fi
++
++if test -n "$ac_init_help"; then
++  case $ac_init_help in
++     short | recursive ) echo "Configuration of bolt plugin for ld 0.1:";;
++   esac
++  cat <<\_ACEOF
++
++Optional Features:
++  --disable-option-checking  ignore unrecognized --enable/--with options
++  --disable-FEATURE       do not include FEATURE (same as --enable-FEATURE=no)
++  --enable-FEATURE[=ARG]  include FEATURE [ARG=yes]
++  --enable-silent-rules   less verbose build output (undo: "make V=1")
++  --disable-silent-rules  verbose build output (undo: "make V=0")
++  --enable-maintainer-mode
++                          enable make rules and dependencies not useful (and
++                          sometimes confusing) to the casual installer
++  --enable-dependency-tracking
++                          do not reject slow dependency extractors
++  --disable-dependency-tracking
++                          speeds up one-time build
++  --disable-largefile     omit support for large files
++  --enable-shared[=PKGS]  build shared libraries [default=yes]
++  --enable-static[=PKGS]  build static libraries [default=yes]
++  --enable-fast-install[=PKGS]
++                          optimize for fast installation [default=yes]
++  --disable-libtool-lock  avoid locking (might break parallel builds)
++
++Optional Packages:
++  --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
++  --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
++  --with-libiberty=PATH   specify the directory where to find libiberty
++                          [../libiberty]
++  --with-pic[=PKGS]       try to use only PIC/non-PIC objects [default=use
++                          both]
++  --with-aix-soname=aix|svr4|both
++                          shared library versioning (aka "SONAME") variant to
++                          provide on AIX, [default=aix].
++  --with-gnu-ld           assume the C compiler uses GNU ld [default=no]
++  --with-sysroot[=DIR]    Search for dependent libraries within DIR (or the
++                          compiler's sysroot if not specified).
++
++Some influential environment variables:
++  CC          C compiler command
++  CFLAGS      C compiler flags
++  LDFLAGS     linker flags, e.g. -L if you have libraries in a
++              nonstandard directory 
++  LIBS        libraries to pass to the linker, e.g. -l
++  CPPFLAGS    (Objective) C/C++ preprocessor flags, e.g. -I if
++              you have headers in a nonstandard directory 
++  CXX         C++ compiler command
++  CXXFLAGS    C++ compiler flags
++  LT_SYS_LIBRARY_PATH
++              User-defined run-time library search path.
++  CXXCPP      C++ preprocessor
++
++Use these variables to override the choices made by `configure' or to help
++it to find libraries and programs with nonstandard names/locations.
++
++Report bugs to the package provider.
++_ACEOF
++ac_status=$?
++fi
++
++if test "$ac_init_help" = "recursive"; then
++  # If there are subdirs, report their specific --help.
++  for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue
++    test -d "$ac_dir" ||
++      { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } ||
++      continue
++    ac_builddir=.
++
++case "$ac_dir" in
++.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
++*)
++  ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'`
++  # A ".." for each directory in $ac_dir_suffix.
++  ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
++  case $ac_top_builddir_sub in
++  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
++  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
++  esac ;;
++esac
++ac_abs_top_builddir=$ac_pwd
++ac_abs_builddir=$ac_pwd$ac_dir_suffix
++# for backward compatibility:
++ac_top_builddir=$ac_top_build_prefix
++
++case $srcdir in
++  .)  # We are building in place.
++    ac_srcdir=.
++    ac_top_srcdir=$ac_top_builddir_sub
++    ac_abs_top_srcdir=$ac_pwd ;;
++  [\\/]* | ?:[\\/]* )  # Absolute name.
++    ac_srcdir=$srcdir$ac_dir_suffix;
++    ac_top_srcdir=$srcdir
++    ac_abs_top_srcdir=$srcdir ;;
++  *) # Relative name.
++    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
++    ac_top_srcdir=$ac_top_build_prefix$srcdir
++    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
++esac
++ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
++
++    cd "$ac_dir" || { ac_status=$?; continue; }
++    # Check for configure.gnu first; this name is used for a wrapper for
++    # Metaconfig's "Configure" on case-insensitive file systems.
++    if test -f "$ac_srcdir/configure.gnu"; then
++      echo &&
++      $SHELL "$ac_srcdir/configure.gnu" --help=recursive
++    elif test -f "$ac_srcdir/configure"; then
++      echo &&
++      $SHELL "$ac_srcdir/configure" --help=recursive
++    else
++      printf "%s\n" "$as_me: WARNING: no configuration information is in $ac_dir" >&2
++    fi || ac_status=$?
++    cd "$ac_pwd" || { ac_status=$?; break; }
++  done
++fi
++
++test -n "$ac_init_help" && exit $ac_status
++if $ac_init_version; then
++  cat <<\_ACEOF
++bolt plugin for ld configure 0.1
++generated by GNU Autoconf 2.71
++
++Copyright (C) 2021 Free Software Foundation, Inc.
++This configure script is free software; the Free Software Foundation
++gives unlimited permission to copy, distribute and modify it.
++_ACEOF
++  exit
++fi
++
++## ------------------------ ##
++## Autoconf initialization. ##
++## ------------------------ ##
++
++# ac_fn_c_try_compile LINENO
++# --------------------------
++# Try to compile conftest.$ac_ext, and return whether this succeeded.
++ac_fn_c_try_compile ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  rm -f conftest.$ac_objext conftest.beam
++  if { { ac_try="$ac_compile"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_compile") 2>conftest.err
++  ac_status=$?
++  if test -s conftest.err; then
++    grep -v '^ *+' conftest.err >conftest.er1
++    cat conftest.er1 >&5
++    mv -f conftest.er1 conftest.err
++  fi
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && {
++	 test -z "$ac_c_werror_flag" ||
++	 test ! -s conftest.err
++       } && test -s conftest.$ac_objext
++then :
++  ac_retval=0
++else $as_nop
++  printf "%s\n" "$as_me: failed program was:" >&5
++sed 's/^/| /' conftest.$ac_ext >&5
++
++	ac_retval=1
++fi
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++  as_fn_set_status $ac_retval
++
++} # ac_fn_c_try_compile
++
++# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
++# -------------------------------------------------------
++# Tests whether HEADER exists and can be compiled using the include files in
++# INCLUDES, setting the cache variable VAR accordingly.
++ac_fn_c_check_header_compile ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
++printf %s "checking for $2... " >&6; }
++if eval test \${$3+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$4
++#include <$2>
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  eval "$3=yes"
++else $as_nop
++  eval "$3=no"
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++eval ac_res=\$$3
++	       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
++printf "%s\n" "$ac_res" >&6; }
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++
++} # ac_fn_c_check_header_compile
++
++# ac_fn_cxx_try_compile LINENO
++# ----------------------------
++# Try to compile conftest.$ac_ext, and return whether this succeeded.
++ac_fn_cxx_try_compile ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  rm -f conftest.$ac_objext conftest.beam
++  if { { ac_try="$ac_compile"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_compile") 2>conftest.err
++  ac_status=$?
++  if test -s conftest.err; then
++    grep -v '^ *+' conftest.err >conftest.er1
++    cat conftest.er1 >&5
++    mv -f conftest.er1 conftest.err
++  fi
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && {
++	 test -z "$ac_cxx_werror_flag" ||
++	 test ! -s conftest.err
++       } && test -s conftest.$ac_objext
++then :
++  ac_retval=0
++else $as_nop
++  printf "%s\n" "$as_me: failed program was:" >&5
++sed 's/^/| /' conftest.$ac_ext >&5
++
++	ac_retval=1
++fi
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++  as_fn_set_status $ac_retval
++
++} # ac_fn_cxx_try_compile
++
++# ac_fn_c_try_link LINENO
++# -----------------------
++# Try to link conftest.$ac_ext, and return whether this succeeded.
++ac_fn_c_try_link ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext
++  if { { ac_try="$ac_link"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_link") 2>conftest.err
++  ac_status=$?
++  if test -s conftest.err; then
++    grep -v '^ *+' conftest.err >conftest.er1
++    cat conftest.er1 >&5
++    mv -f conftest.er1 conftest.err
++  fi
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && {
++	 test -z "$ac_c_werror_flag" ||
++	 test ! -s conftest.err
++       } && test -s conftest$ac_exeext && {
++	 test "$cross_compiling" = yes ||
++	 test -x conftest$ac_exeext
++       }
++then :
++  ac_retval=0
++else $as_nop
++  printf "%s\n" "$as_me: failed program was:" >&5
++sed 's/^/| /' conftest.$ac_ext >&5
++
++	ac_retval=1
++fi
++  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
++  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
++  # interfere with the next link command; also delete a directory that is
++  # left behind by Apple's compiler.  We do this before executing the actions.
++  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++  as_fn_set_status $ac_retval
++
++} # ac_fn_c_try_link
++
++# ac_fn_c_check_func LINENO FUNC VAR
++# ----------------------------------
++# Tests whether FUNC exists, setting the cache variable VAR accordingly
++ac_fn_c_check_func ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
++printf %s "checking for $2... " >&6; }
++if eval test \${$3+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++/* Define $2 to an innocuous variant, in case  declares $2.
++   For example, HP-UX 11i  declares gettimeofday.  */
++#define $2 innocuous_$2
++
++/* System header to define __stub macros and hopefully few prototypes,
++   which can conflict with char $2 (); below.  */
++
++#include 
++#undef $2
++
++/* Override any GCC internal prototype to avoid an error.
++   Use char because int might match the return type of a GCC
++   builtin and then its argument prototype would still apply.  */
++#ifdef __cplusplus
++extern "C"
++#endif
++char $2 ();
++/* The GNU C library defines this for functions which it implements
++    to always fail with ENOSYS.  Some functions are actually named
++    something starting with __ and the normal name is an alias.  */
++#if defined __stub_$2 || defined __stub___$2
++choke me
++#endif
++
++int
++main (void)
++{
++return $2 ();
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++  eval "$3=yes"
++else $as_nop
++  eval "$3=no"
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++fi
++eval ac_res=\$$3
++	       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
++printf "%s\n" "$ac_res" >&6; }
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++
++} # ac_fn_c_check_func
++
++# ac_fn_cxx_try_cpp LINENO
++# ------------------------
++# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
++ac_fn_cxx_try_cpp ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  if { { ac_try="$ac_cpp conftest.$ac_ext"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
++  ac_status=$?
++  if test -s conftest.err; then
++    grep -v '^ *+' conftest.err >conftest.er1
++    cat conftest.er1 >&5
++    mv -f conftest.er1 conftest.err
++  fi
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } > conftest.i && {
++	 test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
++	 test ! -s conftest.err
++       }
++then :
++  ac_retval=0
++else $as_nop
++  printf "%s\n" "$as_me: failed program was:" >&5
++sed 's/^/| /' conftest.$ac_ext >&5
++
++    ac_retval=1
++fi
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++  as_fn_set_status $ac_retval
++
++} # ac_fn_cxx_try_cpp
++
++# ac_fn_cxx_try_link LINENO
++# -------------------------
++# Try to link conftest.$ac_ext, and return whether this succeeded.
++ac_fn_cxx_try_link ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext
++  if { { ac_try="$ac_link"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_link") 2>conftest.err
++  ac_status=$?
++  if test -s conftest.err; then
++    grep -v '^ *+' conftest.err >conftest.er1
++    cat conftest.er1 >&5
++    mv -f conftest.er1 conftest.err
++  fi
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && {
++	 test -z "$ac_cxx_werror_flag" ||
++	 test ! -s conftest.err
++       } && test -s conftest$ac_exeext && {
++	 test "$cross_compiling" = yes ||
++	 test -x conftest$ac_exeext
++       }
++then :
++  ac_retval=0
++else $as_nop
++  printf "%s\n" "$as_me: failed program was:" >&5
++sed 's/^/| /' conftest.$ac_ext >&5
++
++	ac_retval=1
++fi
++  # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information
++  # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would
++  # interfere with the next link command; also delete a directory that is
++  # left behind by Apple's compiler.  We do this before executing the actions.
++  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++  as_fn_set_status $ac_retval
++
++} # ac_fn_cxx_try_link
++
++# ac_fn_c_find_intX_t LINENO BITS VAR
++# -----------------------------------
++# Finds a signed integer type with width BITS, setting cache variable VAR
++# accordingly.
++ac_fn_c_find_intX_t ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for int$2_t" >&5
++printf %s "checking for int$2_t... " >&6; }
++if eval test \${$3+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  eval "$3=no"
++     # Order is important - never check a type that is potentially smaller
++     # than half of the expected target width.
++     for ac_type in int$2_t 'int' 'long int' \
++	 'long long int' 'short int' 'signed char'; do
++       cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$ac_includes_default
++	     enum { N = $2 / 2 - 1 };
++int
++main (void)
++{
++static int test_array [1 - 2 * !(0 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1))];
++test_array [0] = 0;
++return test_array [0];
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$ac_includes_default
++	        enum { N = $2 / 2 - 1 };
++int
++main (void)
++{
++static int test_array [1 - 2 * !(($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1)
++		 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 2))];
++test_array [0] = 0;
++return test_array [0];
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++
++else $as_nop
++  case $ac_type in #(
++  int$2_t) :
++    eval "$3=yes" ;; #(
++  *) :
++    eval "$3=\$ac_type" ;;
++esac
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++       if eval test \"x\$"$3"\" = x"no"
++then :
++
++else $as_nop
++  break
++fi
++     done
++fi
++eval ac_res=\$$3
++	       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
++printf "%s\n" "$ac_res" >&6; }
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++
++} # ac_fn_c_find_intX_t
++
++# ac_fn_c_find_uintX_t LINENO BITS VAR
++# ------------------------------------
++# Finds an unsigned integer type with width BITS, setting cache variable VAR
++# accordingly.
++ac_fn_c_find_uintX_t ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5
++printf %s "checking for uint$2_t... " >&6; }
++if eval test \${$3+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  eval "$3=no"
++     # Order is important - never check a type that is potentially smaller
++     # than half of the expected target width.
++     for ac_type in uint$2_t 'unsigned int' 'unsigned long int' \
++	 'unsigned long long int' 'unsigned short int' 'unsigned char'; do
++       cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$ac_includes_default
++int
++main (void)
++{
++static int test_array [1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)];
++test_array [0] = 0;
++return test_array [0];
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  case $ac_type in #(
++  uint$2_t) :
++    eval "$3=yes" ;; #(
++  *) :
++    eval "$3=\$ac_type" ;;
++esac
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++       if eval test \"x\$"$3"\" = x"no"
++then :
++
++else $as_nop
++  break
++fi
++     done
++fi
++eval ac_res=\$$3
++	       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
++printf "%s\n" "$ac_res" >&6; }
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++
++} # ac_fn_c_find_uintX_t
++ac_configure_args_raw=
++for ac_arg
++do
++  case $ac_arg in
++  *\'*)
++    ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
++  esac
++  as_fn_append ac_configure_args_raw " '$ac_arg'"
++done
++
++case $ac_configure_args_raw in
++  *$as_nl*)
++    ac_safe_unquote= ;;
++  *)
++    ac_unsafe_z='|&;<>()$`\\"*?[ ''	' # This string ends in space, tab.
++    ac_unsafe_a="$ac_unsafe_z#~"
++    ac_safe_unquote="s/ '\\([^$ac_unsafe_a][^$ac_unsafe_z]*\\)'/ \\1/g"
++    ac_configure_args_raw=`      printf "%s\n" "$ac_configure_args_raw" | sed "$ac_safe_unquote"`;;
++esac
++
++cat >config.log <<_ACEOF
++This file contains any messages produced by compilers while
++running configure, to aid debugging if configure makes a mistake.
++
++It was created by bolt plugin for ld $as_me 0.1, which was
++generated by GNU Autoconf 2.71.  Invocation command line was
++
++  $ $0$ac_configure_args_raw
++
++_ACEOF
++exec 5>>config.log
++{
++cat <<_ASUNAME
++## --------- ##
++## Platform. ##
++## --------- ##
++
++hostname = `(hostname || uname -n) 2>/dev/null | sed 1q`
++uname -m = `(uname -m) 2>/dev/null || echo unknown`
++uname -r = `(uname -r) 2>/dev/null || echo unknown`
++uname -s = `(uname -s) 2>/dev/null || echo unknown`
++uname -v = `(uname -v) 2>/dev/null || echo unknown`
++
++/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown`
++/bin/uname -X     = `(/bin/uname -X) 2>/dev/null     || echo unknown`
++
++/bin/arch              = `(/bin/arch) 2>/dev/null              || echo unknown`
++/usr/bin/arch -k       = `(/usr/bin/arch -k) 2>/dev/null       || echo unknown`
++/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown`
++/usr/bin/hostinfo      = `(/usr/bin/hostinfo) 2>/dev/null      || echo unknown`
++/bin/machine           = `(/bin/machine) 2>/dev/null           || echo unknown`
++/usr/bin/oslevel       = `(/usr/bin/oslevel) 2>/dev/null       || echo unknown`
++/bin/universe          = `(/bin/universe) 2>/dev/null          || echo unknown`
++
++_ASUNAME
++
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    printf "%s\n" "PATH: $as_dir"
++  done
++IFS=$as_save_IFS
++
++} >&5
++
++cat >&5 <<_ACEOF
++
++
++## ----------- ##
++## Core tests. ##
++## ----------- ##
++
++_ACEOF
++
++
++# Keep a trace of the command line.
++# Strip out --no-create and --no-recursion so they do not pile up.
++# Strip out --silent because we don't want to record it for future runs.
++# Also quote any args containing shell meta-characters.
++# Make two passes to allow for proper duplicate-argument suppression.
++ac_configure_args=
++ac_configure_args0=
++ac_configure_args1=
++ac_must_keep_next=false
++for ac_pass in 1 2
++do
++  for ac_arg
++  do
++    case $ac_arg in
++    -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;;
++    -q | -quiet | --quiet | --quie | --qui | --qu | --q \
++    | -silent | --silent | --silen | --sile | --sil)
++      continue ;;
++    *\'*)
++      ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
++    esac
++    case $ac_pass in
++    1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
++    2)
++      as_fn_append ac_configure_args1 " '$ac_arg'"
++      if test $ac_must_keep_next = true; then
++	ac_must_keep_next=false # Got value, back to normal.
++      else
++	case $ac_arg in
++	  *=* | --config-cache | -C | -disable-* | --disable-* \
++	  | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \
++	  | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \
++	  | -with-* | --with-* | -without-* | --without-* | --x)
++	    case "$ac_configure_args0 " in
++	      "$ac_configure_args1"*" '$ac_arg' "* ) continue ;;
++	    esac
++	    ;;
++	  -* ) ac_must_keep_next=true ;;
++	esac
++      fi
++      as_fn_append ac_configure_args " '$ac_arg'"
++      ;;
++    esac
++  done
++done
++{ ac_configure_args0=; unset ac_configure_args0;}
++{ ac_configure_args1=; unset ac_configure_args1;}
++
++# When interrupted or exit'd, cleanup temporary files, and complete
++# config.log.  We remove comments because anyway the quotes in there
++# would cause problems or look ugly.
++# WARNING: Use '\'' to represent an apostrophe within the trap.
++# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
++trap 'exit_status=$?
++  # Sanitize IFS.
++  IFS=" ""	$as_nl"
++  # Save into config.log some information that might help in debugging.
++  {
++    echo
++
++    printf "%s\n" "## ---------------- ##
++## Cache variables. ##
++## ---------------- ##"
++    echo
++    # The following way of writing the cache mishandles newlines in values,
++(
++  for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do
++    eval ac_val=\$$ac_var
++    case $ac_val in #(
++    *${as_nl}*)
++      case $ac_var in #(
++      *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
++printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
++      esac
++      case $ac_var in #(
++      _ | IFS | as_nl) ;; #(
++      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
++      *) { eval $ac_var=; unset $ac_var;} ;;
++      esac ;;
++    esac
++  done
++  (set) 2>&1 |
++    case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #(
++    *${as_nl}ac_space=\ *)
++      sed -n \
++	"s/'\''/'\''\\\\'\'''\''/g;
++	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p"
++      ;; #(
++    *)
++      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
++      ;;
++    esac |
++    sort
++)
++    echo
++
++    printf "%s\n" "## ----------------- ##
++## Output variables. ##
++## ----------------- ##"
++    echo
++    for ac_var in $ac_subst_vars
++    do
++      eval ac_val=\$$ac_var
++      case $ac_val in
++      *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
++      esac
++      printf "%s\n" "$ac_var='\''$ac_val'\''"
++    done | sort
++    echo
++
++    if test -n "$ac_subst_files"; then
++      printf "%s\n" "## ------------------- ##
++## File substitutions. ##
++## ------------------- ##"
++      echo
++      for ac_var in $ac_subst_files
++      do
++	eval ac_val=\$$ac_var
++	case $ac_val in
++	*\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
++	esac
++	printf "%s\n" "$ac_var='\''$ac_val'\''"
++      done | sort
++      echo
++    fi
++
++    if test -s confdefs.h; then
++      printf "%s\n" "## ----------- ##
++## confdefs.h. ##
++## ----------- ##"
++      echo
++      cat confdefs.h
++      echo
++    fi
++    test "$ac_signal" != 0 &&
++      printf "%s\n" "$as_me: caught signal $ac_signal"
++    printf "%s\n" "$as_me: exit $exit_status"
++  } >&5
++  rm -f core *.core core.conftest.* &&
++    rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
++    exit $exit_status
++' 0
++for ac_signal in 1 2 13 15; do
++  trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal
++done
++ac_signal=0
++
++# confdefs.h avoids OS command line length limits that DEFS can exceed.
++rm -f -r conftest* confdefs.h
++
++printf "%s\n" "/* confdefs.h */" > confdefs.h
++
++# Predefined preprocessor variables.
++
++printf "%s\n" "#define PACKAGE_NAME \"$PACKAGE_NAME\"" >>confdefs.h
++
++printf "%s\n" "#define PACKAGE_TARNAME \"$PACKAGE_TARNAME\"" >>confdefs.h
++
++printf "%s\n" "#define PACKAGE_VERSION \"$PACKAGE_VERSION\"" >>confdefs.h
++
++printf "%s\n" "#define PACKAGE_STRING \"$PACKAGE_STRING\"" >>confdefs.h
++
++printf "%s\n" "#define PACKAGE_BUGREPORT \"$PACKAGE_BUGREPORT\"" >>confdefs.h
++
++printf "%s\n" "#define PACKAGE_URL \"$PACKAGE_URL\"" >>confdefs.h
++
++
++# Let the site file select an alternate cache file if it wants to.
++# Prefer an explicitly selected file to automatically selected ones.
++if test -n "$CONFIG_SITE"; then
++  ac_site_files="$CONFIG_SITE"
++elif test "x$prefix" != xNONE; then
++  ac_site_files="$prefix/share/config.site $prefix/etc/config.site"
++else
++  ac_site_files="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site"
++fi
++
++for ac_site_file in $ac_site_files
++do
++  case $ac_site_file in #(
++  */*) :
++     ;; #(
++  *) :
++    ac_site_file=./$ac_site_file ;;
++esac
++  if test -f "$ac_site_file" && test -r "$ac_site_file"; then
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
++printf "%s\n" "$as_me: loading site script $ac_site_file" >&6;}
++    sed 's/^/| /' "$ac_site_file" >&5
++    . "$ac_site_file" \
++      || { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++as_fn_error $? "failed to load site script $ac_site_file
++See \`config.log' for more details" "$LINENO" 5; }
++  fi
++done
++
++if test -r "$cache_file"; then
++  # Some versions of bash will fail to source /dev/null (special files
++  # actually), so we avoid doing that.  DJGPP emulates it as a regular file.
++  if test /dev/null != "$cache_file" && test -f "$cache_file"; then
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
++printf "%s\n" "$as_me: loading cache $cache_file" >&6;}
++    case $cache_file in
++      [\\/]* | ?:[\\/]* ) . "$cache_file";;
++      *)                      . "./$cache_file";;
++    esac
++  fi
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
++printf "%s\n" "$as_me: creating cache $cache_file" >&6;}
++  >$cache_file
++fi
++
++as_fn_append ac_header_c_list " stdio.h stdio_h HAVE_STDIO_H"
++# Test code for whether the C compiler supports C89 (global declarations)
++ac_c_conftest_c89_globals='
++/* Does the compiler advertise C89 conformance?
++   Do not test the value of __STDC__, because some compilers set it to 0
++   while being otherwise adequately conformant. */
++#if !defined __STDC__
++# error "Compiler does not advertise C89 conformance"
++#endif
++
++#include 
++#include 
++struct stat;
++/* Most of the following tests are stolen from RCS 5.7 src/conf.sh.  */
++struct buf { int x; };
++struct buf * (*rcsopen) (struct buf *, struct stat *, int);
++static char *e (p, i)
++     char **p;
++     int i;
++{
++  return p[i];
++}
++static char *f (char * (*g) (char **, int), char **p, ...)
++{
++  char *s;
++  va_list v;
++  va_start (v,p);
++  s = g (p, va_arg (v,int));
++  va_end (v);
++  return s;
++}
++
++/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
++   function prototypes and stuff, but not \xHH hex character constants.
++   These do not provoke an error unfortunately, instead are silently treated
++   as an "x".  The following induces an error, until -std is added to get
++   proper ANSI mode.  Curiously \x00 != x always comes out true, for an
++   array size at least.  It is necessary to write \x00 == 0 to get something
++   that is true only with -std.  */
++int osf4_cc_array ['\''\x00'\'' == 0 ? 1 : -1];
++
++/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
++   inside strings and character constants.  */
++#define FOO(x) '\''x'\''
++int xlc6_cc_array[FOO(a) == '\''x'\'' ? 1 : -1];
++
++int test (int i, double x);
++struct s1 {int (*f) (int a);};
++struct s2 {int (*f) (double a);};
++int pairnames (int, char **, int *(*)(struct buf *, struct stat *, int),
++               int, int);'
++
++# Test code for whether the C compiler supports C89 (body of main).
++ac_c_conftest_c89_main='
++ok |= (argc == 0 || f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]);
++'
++
++# Test code for whether the C compiler supports C99 (global declarations)
++ac_c_conftest_c99_globals='
++// Does the compiler advertise C99 conformance?
++#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 199901L
++# error "Compiler does not advertise C99 conformance"
++#endif
++
++#include 
++extern int puts (const char *);
++extern int printf (const char *, ...);
++extern int dprintf (int, const char *, ...);
++extern void *malloc (size_t);
++
++// Check varargs macros.  These examples are taken from C99 6.10.3.5.
++// dprintf is used instead of fprintf to avoid needing to declare
++// FILE and stderr.
++#define debug(...) dprintf (2, __VA_ARGS__)
++#define showlist(...) puts (#__VA_ARGS__)
++#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__))
++static void
++test_varargs_macros (void)
++{
++  int x = 1234;
++  int y = 5678;
++  debug ("Flag");
++  debug ("X = %d\n", x);
++  showlist (The first, second, and third items.);
++  report (x>y, "x is %d but y is %d", x, y);
++}
++
++// Check long long types.
++#define BIG64 18446744073709551615ull
++#define BIG32 4294967295ul
++#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0)
++#if !BIG_OK
++  #error "your preprocessor is broken"
++#endif
++#if BIG_OK
++#else
++  #error "your preprocessor is broken"
++#endif
++static long long int bignum = -9223372036854775807LL;
++static unsigned long long int ubignum = BIG64;
++
++struct incomplete_array
++{
++  int datasize;
++  double data[];
++};
++
++struct named_init {
++  int number;
++  const wchar_t *name;
++  double average;
++};
++
++typedef const char *ccp;
++
++static inline int
++test_restrict (ccp restrict text)
++{
++  // See if C++-style comments work.
++  // Iterate through items via the restricted pointer.
++  // Also check for declarations in for loops.
++  for (unsigned int i = 0; *(text+i) != '\''\0'\''; ++i)
++    continue;
++  return 0;
++}
++
++// Check varargs and va_copy.
++static bool
++test_varargs (const char *format, ...)
++{
++  va_list args;
++  va_start (args, format);
++  va_list args_copy;
++  va_copy (args_copy, args);
++
++  const char *str = "";
++  int number = 0;
++  float fnumber = 0;
++
++  while (*format)
++    {
++      switch (*format++)
++	{
++	case '\''s'\'': // string
++	  str = va_arg (args_copy, const char *);
++	  break;
++	case '\''d'\'': // int
++	  number = va_arg (args_copy, int);
++	  break;
++	case '\''f'\'': // float
++	  fnumber = va_arg (args_copy, double);
++	  break;
++	default:
++	  break;
++	}
++    }
++  va_end (args_copy);
++  va_end (args);
++
++  return *str && number && fnumber;
++}
++'
++
++# Test code for whether the C compiler supports C99 (body of main).
++ac_c_conftest_c99_main='
++  // Check bool.
++  _Bool success = false;
++  success |= (argc != 0);
++
++  // Check restrict.
++  if (test_restrict ("String literal") == 0)
++    success = true;
++  char *restrict newvar = "Another string";
++
++  // Check varargs.
++  success &= test_varargs ("s, d'\'' f .", "string", 65, 34.234);
++  test_varargs_macros ();
++
++  // Check flexible array members.
++  struct incomplete_array *ia =
++    malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10));
++  ia->datasize = 10;
++  for (int i = 0; i < ia->datasize; ++i)
++    ia->data[i] = i * 1.234;
++
++  // Check named initializers.
++  struct named_init ni = {
++    .number = 34,
++    .name = L"Test wide string",
++    .average = 543.34343,
++  };
++
++  ni.number = 58;
++
++  int dynamic_array[ni.number];
++  dynamic_array[0] = argv[0][0];
++  dynamic_array[ni.number - 1] = 543;
++
++  // work around unused variable warnings
++  ok |= (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == '\''x'\''
++	 || dynamic_array[ni.number - 1] != 543);
++'
++
++# Test code for whether the C compiler supports C11 (global declarations)
++ac_c_conftest_c11_globals='
++// Does the compiler advertise C11 conformance?
++#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 201112L
++# error "Compiler does not advertise C11 conformance"
++#endif
++
++// Check _Alignas.
++char _Alignas (double) aligned_as_double;
++char _Alignas (0) no_special_alignment;
++extern char aligned_as_int;
++char _Alignas (0) _Alignas (int) aligned_as_int;
++
++// Check _Alignof.
++enum
++{
++  int_alignment = _Alignof (int),
++  int_array_alignment = _Alignof (int[100]),
++  char_alignment = _Alignof (char)
++};
++_Static_assert (0 < -_Alignof (int), "_Alignof is signed");
++
++// Check _Noreturn.
++int _Noreturn does_not_return (void) { for (;;) continue; }
++
++// Check _Static_assert.
++struct test_static_assert
++{
++  int x;
++  _Static_assert (sizeof (int) <= sizeof (long int),
++                  "_Static_assert does not work in struct");
++  long int y;
++};
++
++// Check UTF-8 literals.
++#define u8 syntax error!
++char const utf8_literal[] = u8"happens to be ASCII" "another string";
++
++// Check duplicate typedefs.
++typedef long *long_ptr;
++typedef long int *long_ptr;
++typedef long_ptr long_ptr;
++
++// Anonymous structures and unions -- taken from C11 6.7.2.1 Example 1.
++struct anonymous
++{
++  union {
++    struct { int i; int j; };
++    struct { int k; long int l; } w;
++  };
++  int m;
++} v1;
++'
++
++# Test code for whether the C compiler supports C11 (body of main).
++ac_c_conftest_c11_main='
++  _Static_assert ((offsetof (struct anonymous, i)
++		   == offsetof (struct anonymous, w.k)),
++		  "Anonymous union alignment botch");
++  v1.i = 2;
++  v1.w.k = 5;
++  ok |= v1.i != 5;
++'
++
++# Test code for whether the C compiler supports C11 (complete).
++ac_c_conftest_c11_program="${ac_c_conftest_c89_globals}
++${ac_c_conftest_c99_globals}
++${ac_c_conftest_c11_globals}
++
++int
++main (int argc, char **argv)
++{
++  int ok = 0;
++  ${ac_c_conftest_c89_main}
++  ${ac_c_conftest_c99_main}
++  ${ac_c_conftest_c11_main}
++  return ok;
++}
++"
++
++# Test code for whether the C compiler supports C99 (complete).
++ac_c_conftest_c99_program="${ac_c_conftest_c89_globals}
++${ac_c_conftest_c99_globals}
++
++int
++main (int argc, char **argv)
++{
++  int ok = 0;
++  ${ac_c_conftest_c89_main}
++  ${ac_c_conftest_c99_main}
++  return ok;
++}
++"
++
++# Test code for whether the C compiler supports C89 (complete).
++ac_c_conftest_c89_program="${ac_c_conftest_c89_globals}
++
++int
++main (int argc, char **argv)
++{
++  int ok = 0;
++  ${ac_c_conftest_c89_main}
++  return ok;
++}
++"
++
++as_fn_append ac_header_c_list " stdlib.h stdlib_h HAVE_STDLIB_H"
++as_fn_append ac_header_c_list " string.h string_h HAVE_STRING_H"
++as_fn_append ac_header_c_list " inttypes.h inttypes_h HAVE_INTTYPES_H"
++as_fn_append ac_header_c_list " stdint.h stdint_h HAVE_STDINT_H"
++as_fn_append ac_header_c_list " strings.h strings_h HAVE_STRINGS_H"
++as_fn_append ac_header_c_list " sys/stat.h sys_stat_h HAVE_SYS_STAT_H"
++as_fn_append ac_header_c_list " sys/types.h sys_types_h HAVE_SYS_TYPES_H"
++as_fn_append ac_header_c_list " unistd.h unistd_h HAVE_UNISTD_H"
++as_fn_append ac_header_c_list " wchar.h wchar_h HAVE_WCHAR_H"
++as_fn_append ac_header_c_list " minix/config.h minix_config_h HAVE_MINIX_CONFIG_H"
++# Test code for whether the C++ compiler supports C++98 (global declarations)
++ac_cxx_conftest_cxx98_globals='
++// Does the compiler advertise C++98 conformance?
++#if !defined __cplusplus || __cplusplus < 199711L
++# error "Compiler does not advertise C++98 conformance"
++#endif
++
++// These inclusions are to reject old compilers that
++// lack the unsuffixed header files.
++#include 
++#include 
++
++//  and  are *not* freestanding headers in C++98.
++extern void assert (int);
++namespace std {
++  extern int strcmp (const char *, const char *);
++}
++
++// Namespaces, exceptions, and templates were all added after "C++ 2.0".
++using std::exception;
++using std::strcmp;
++
++namespace {
++
++void test_exception_syntax()
++{
++  try {
++    throw "test";
++  } catch (const char *s) {
++    // Extra parentheses suppress a warning when building autoconf itself,
++    // due to lint rules shared with more typical C programs.
++    assert (!(strcmp) (s, "test"));
++  }
++}
++
++template  struct test_template
++{
++  T const val;
++  explicit test_template(T t) : val(t) {}
++  template  T add(U u) { return static_cast(u) + val; }
++};
++
++} // anonymous namespace
++'
++
++# Test code for whether the C++ compiler supports C++98 (body of main)
++ac_cxx_conftest_cxx98_main='
++  assert (argc);
++  assert (! argv[0]);
++{
++  test_exception_syntax ();
++  test_template tt (2.0);
++  assert (tt.add (4) == 6.0);
++  assert (true && !false);
++}
++'
++
++# Test code for whether the C++ compiler supports C++11 (global declarations)
++ac_cxx_conftest_cxx11_globals='
++// Does the compiler advertise C++ 2011 conformance?
++#if !defined __cplusplus || __cplusplus < 201103L
++# error "Compiler does not advertise C++11 conformance"
++#endif
++
++namespace cxx11test
++{
++  constexpr int get_val() { return 20; }
++
++  struct testinit
++  {
++    int i;
++    double d;
++  };
++
++  class delegate
++  {
++  public:
++    delegate(int n) : n(n) {}
++    delegate(): delegate(2354) {}
++
++    virtual int getval() { return this->n; };
++  protected:
++    int n;
++  };
++
++  class overridden : public delegate
++  {
++  public:
++    overridden(int n): delegate(n) {}
++    virtual int getval() override final { return this->n * 2; }
++  };
++
++  class nocopy
++  {
++  public:
++    nocopy(int i): i(i) {}
++    nocopy() = default;
++    nocopy(const nocopy&) = delete;
++    nocopy & operator=(const nocopy&) = delete;
++  private:
++    int i;
++  };
++
++  // for testing lambda expressions
++  template  Ret eval(Fn f, Ret v)
++  {
++    return f(v);
++  }
++
++  // for testing variadic templates and trailing return types
++  template  auto sum(V first) -> V
++  {
++    return first;
++  }
++  template  auto sum(V first, Args... rest) -> V
++  {
++    return first + sum(rest...);
++  }
++}
++'
++
++# Test code for whether the C++ compiler supports C++11 (body of main)
++ac_cxx_conftest_cxx11_main='
++{
++  // Test auto and decltype
++  auto a1 = 6538;
++  auto a2 = 48573953.4;
++  auto a3 = "String literal";
++
++  int total = 0;
++  for (auto i = a3; *i; ++i) { total += *i; }
++
++  decltype(a2) a4 = 34895.034;
++}
++{
++  // Test constexpr
++  short sa[cxx11test::get_val()] = { 0 };
++}
++{
++  // Test initializer lists
++  cxx11test::testinit il = { 4323, 435234.23544 };
++}
++{
++  // Test range-based for
++  int array[] = {9, 7, 13, 15, 4, 18, 12, 10, 5, 3,
++                 14, 19, 17, 8, 6, 20, 16, 2, 11, 1};
++  for (auto &x : array) { x += 23; }
++}
++{
++  // Test lambda expressions
++  using cxx11test::eval;
++  assert (eval ([](int x) { return x*2; }, 21) == 42);
++  double d = 2.0;
++  assert (eval ([&](double x) { return d += x; }, 3.0) == 5.0);
++  assert (d == 5.0);
++  assert (eval ([=](double x) mutable { return d += x; }, 4.0) == 9.0);
++  assert (d == 5.0);
++}
++{
++  // Test use of variadic templates
++  using cxx11test::sum;
++  auto a = sum(1);
++  auto b = sum(1, 2);
++  auto c = sum(1.0, 2.0, 3.0);
++}
++{
++  // Test constructor delegation
++  cxx11test::delegate d1;
++  cxx11test::delegate d2();
++  cxx11test::delegate d3(45);
++}
++{
++  // Test override and final
++  cxx11test::overridden o1(55464);
++}
++{
++  // Test nullptr
++  char *c = nullptr;
++}
++{
++  // Test template brackets
++  test_template<::test_template> v(test_template(12));
++}
++{
++  // Unicode literals
++  char const *utf8 = u8"UTF-8 string \u2500";
++  char16_t const *utf16 = u"UTF-8 string \u2500";
++  char32_t const *utf32 = U"UTF-32 string \u2500";
++}
++'
++
++# Test code for whether the C compiler supports C++11 (complete).
++ac_cxx_conftest_cxx11_program="${ac_cxx_conftest_cxx98_globals}
++${ac_cxx_conftest_cxx11_globals}
++
++int
++main (int argc, char **argv)
++{
++  int ok = 0;
++  ${ac_cxx_conftest_cxx98_main}
++  ${ac_cxx_conftest_cxx11_main}
++  return ok;
++}
++"
++
++# Test code for whether the C compiler supports C++98 (complete).
++ac_cxx_conftest_cxx98_program="${ac_cxx_conftest_cxx98_globals}
++int
++main (int argc, char **argv)
++{
++  int ok = 0;
++  ${ac_cxx_conftest_cxx98_main}
++  return ok;
++}
++"
++
++
++# Auxiliary files required by this configure script.
++ac_aux_files="ltmain.sh compile missing install-sh config.guess config.sub"
++
++# Locations in which to look for auxiliary files.
++ac_aux_dir_candidates="${srcdir}${PATH_SEPARATOR}${srcdir}/..${PATH_SEPARATOR}${srcdir}/../.."
++
++# Search for a directory containing all of the required auxiliary files,
++# $ac_aux_files, from the $PATH-style list $ac_aux_dir_candidates.
++# If we don't find one directory that contains all the files we need,
++# we report the set of missing files from the *first* directory in
++# $ac_aux_dir_candidates and give up.
++ac_missing_aux_files=""
++ac_first_candidate=:
++printf "%s\n" "$as_me:${as_lineno-$LINENO}: looking for aux files: $ac_aux_files" >&5
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++as_found=false
++for as_dir in $ac_aux_dir_candidates
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++  as_found=:
++
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}:  trying $as_dir" >&5
++  ac_aux_dir_found=yes
++  ac_install_sh=
++  for ac_aux in $ac_aux_files
++  do
++    # As a special case, if "install-sh" is required, that requirement
++    # can be satisfied by any of "install-sh", "install.sh", or "shtool",
++    # and $ac_install_sh is set appropriately for whichever one is found.
++    if test x"$ac_aux" = x"install-sh"
++    then
++      if test -f "${as_dir}install-sh"; then
++        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}install-sh found" >&5
++        ac_install_sh="${as_dir}install-sh -c"
++      elif test -f "${as_dir}install.sh"; then
++        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}install.sh found" >&5
++        ac_install_sh="${as_dir}install.sh -c"
++      elif test -f "${as_dir}shtool"; then
++        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}shtool found" >&5
++        ac_install_sh="${as_dir}shtool install -c"
++      else
++        ac_aux_dir_found=no
++        if $ac_first_candidate; then
++          ac_missing_aux_files="${ac_missing_aux_files} install-sh"
++        else
++          break
++        fi
++      fi
++    else
++      if test -f "${as_dir}${ac_aux}"; then
++        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}${ac_aux} found" >&5
++      else
++        ac_aux_dir_found=no
++        if $ac_first_candidate; then
++          ac_missing_aux_files="${ac_missing_aux_files} ${ac_aux}"
++        else
++          break
++        fi
++      fi
++    fi
++  done
++  if test "$ac_aux_dir_found" = yes; then
++    ac_aux_dir="$as_dir"
++    break
++  fi
++  ac_first_candidate=false
++
++  as_found=false
++done
++IFS=$as_save_IFS
++if $as_found
++then :
++
++else $as_nop
++  as_fn_error $? "cannot find required auxiliary files:$ac_missing_aux_files" "$LINENO" 5
++fi
++
++
++# These three variables are undocumented and unsupported,
++# and are intended to be withdrawn in a future Autoconf release.
++# They can cause serious problems if a builder's source tree is in a directory
++# whose full name contains unusual characters.
++if test -f "${ac_aux_dir}config.guess"; then
++  ac_config_guess="$SHELL ${ac_aux_dir}config.guess"
++fi
++if test -f "${ac_aux_dir}config.sub"; then
++  ac_config_sub="$SHELL ${ac_aux_dir}config.sub"
++fi
++if test -f "$ac_aux_dir/configure"; then
++  ac_configure="$SHELL ${ac_aux_dir}configure"
++fi
++
++# Check that the precious variables saved in the cache have kept the same
++# value.
++ac_cache_corrupted=false
++for ac_var in $ac_precious_vars; do
++  eval ac_old_set=\$ac_cv_env_${ac_var}_set
++  eval ac_new_set=\$ac_env_${ac_var}_set
++  eval ac_old_val=\$ac_cv_env_${ac_var}_value
++  eval ac_new_val=\$ac_env_${ac_var}_value
++  case $ac_old_set,$ac_new_set in
++    set,)
++      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
++printf "%s\n" "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
++      ac_cache_corrupted=: ;;
++    ,set)
++      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
++printf "%s\n" "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
++      ac_cache_corrupted=: ;;
++    ,);;
++    *)
++      if test "x$ac_old_val" != "x$ac_new_val"; then
++	# differences in whitespace do not lead to failure.
++	ac_old_val_w=`echo x $ac_old_val`
++	ac_new_val_w=`echo x $ac_new_val`
++	if test "$ac_old_val_w" != "$ac_new_val_w"; then
++	  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
++printf "%s\n" "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
++	  ac_cache_corrupted=:
++	else
++	  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
++printf "%s\n" "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
++	  eval $ac_var=\$ac_old_val
++	fi
++	{ printf "%s\n" "$as_me:${as_lineno-$LINENO}:   former value:  \`$ac_old_val'" >&5
++printf "%s\n" "$as_me:   former value:  \`$ac_old_val'" >&2;}
++	{ printf "%s\n" "$as_me:${as_lineno-$LINENO}:   current value: \`$ac_new_val'" >&5
++printf "%s\n" "$as_me:   current value: \`$ac_new_val'" >&2;}
++      fi;;
++  esac
++  # Pass precious variables to config.status.
++  if test "$ac_new_set" = set; then
++    case $ac_new_val in
++    *\'*) ac_arg=$ac_var=`printf "%s\n" "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
++    *) ac_arg=$ac_var=$ac_new_val ;;
++    esac
++    case " $ac_configure_args " in
++      *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
++      *) as_fn_append ac_configure_args " '$ac_arg'" ;;
++    esac
++  fi
++done
++if $ac_cache_corrupted; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
++printf "%s\n" "$as_me: error: changes in the environment can compromise the build" >&2;}
++  as_fn_error $? "run \`${MAKE-make} distclean' and/or \`rm $cache_file'
++	    and start over" "$LINENO" 5
++fi
++## -------------------- ##
++## Main body of script. ##
++## -------------------- ##
++
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++
++
++
++
++  # Make sure we can run config.sub.
++$SHELL "${ac_aux_dir}config.sub" sun4 >/dev/null 2>&1 ||
++  as_fn_error $? "cannot run $SHELL ${ac_aux_dir}config.sub" "$LINENO" 5
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
++printf %s "checking build system type... " >&6; }
++if test ${ac_cv_build+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_build_alias=$build_alias
++test "x$ac_build_alias" = x &&
++  ac_build_alias=`$SHELL "${ac_aux_dir}config.guess"`
++test "x$ac_build_alias" = x &&
++  as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5
++ac_cv_build=`$SHELL "${ac_aux_dir}config.sub" $ac_build_alias` ||
++  as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $ac_build_alias failed" "$LINENO" 5
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
++printf "%s\n" "$ac_cv_build" >&6; }
++case $ac_cv_build in
++*-*-*) ;;
++*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;;
++esac
++build=$ac_cv_build
++ac_save_IFS=$IFS; IFS='-'
++set x $ac_cv_build
++shift
++build_cpu=$1
++build_vendor=$2
++shift; shift
++# Remember, the first character of IFS is used to create $*,
++# except with old shells:
++build_os=$*
++IFS=$ac_save_IFS
++case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
++printf %s "checking host system type... " >&6; }
++if test ${ac_cv_host+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test "x$host_alias" = x; then
++  ac_cv_host=$ac_cv_build
++else
++  ac_cv_host=`$SHELL "${ac_aux_dir}config.sub" $host_alias` ||
++    as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $host_alias failed" "$LINENO" 5
++fi
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
++printf "%s\n" "$ac_cv_host" >&6; }
++case $ac_cv_host in
++*-*-*) ;;
++*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;;
++esac
++host=$ac_cv_host
++ac_save_IFS=$IFS; IFS='-'
++set x $ac_cv_host
++shift
++host_cpu=$1
++host_vendor=$2
++shift; shift
++# Remember, the first character of IFS is used to create $*,
++# except with old shells:
++host_os=$*
++IFS=$ac_save_IFS
++case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking target system type" >&5
++printf %s "checking target system type... " >&6; }
++if test ${ac_cv_target+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test "x$target_alias" = x; then
++  ac_cv_target=$ac_cv_host
++else
++  ac_cv_target=`$SHELL "${ac_aux_dir}config.sub" $target_alias` ||
++    as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $target_alias failed" "$LINENO" 5
++fi
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_target" >&5
++printf "%s\n" "$ac_cv_target" >&6; }
++case $ac_cv_target in
++*-*-*) ;;
++*) as_fn_error $? "invalid value of canonical target" "$LINENO" 5;;
++esac
++target=$ac_cv_target
++ac_save_IFS=$IFS; IFS='-'
++set x $ac_cv_target
++shift
++target_cpu=$1
++target_vendor=$2
++shift; shift
++# Remember, the first character of IFS is used to create $*,
++# except with old shells:
++target_os=$*
++IFS=$ac_save_IFS
++case $target_os in *\ *) target_os=`echo "$target_os" | sed 's/ /-/g'`;; esac
++
++
++# The aliases save the names the user supplied, while $host etc.
++# will get canonicalized.
++test -n "$target_alias" &&
++  test "$program_prefix$program_suffix$program_transform_name" = \
++    NONENONEs,x,x, &&
++  program_prefix=${target_alias}-
++GCC_TOPLEV_SUBDIRS
++am__api_version='1.16'
++
++
++  # Find a good install program.  We prefer a C program (faster),
++# so one script is as good as another.  But avoid the broken or
++# incompatible versions:
++# SysV /etc/install, /usr/sbin/install
++# SunOS /usr/etc/install
++# IRIX /sbin/install
++# AIX /bin/install
++# AmigaOS /C/install, which installs bootblocks on floppy discs
++# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag
++# AFS /usr/afsws/bin/install, which mishandles nonexistent args
++# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff"
++# OS/2's system install, which has a completely different semantic
++# ./install, which can be erroneously created by make from ./install.sh.
++# Reject install programs that cannot install multiple files.
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
++printf %s "checking for a BSD-compatible install... " >&6; }
++if test -z "$INSTALL"; then
++if test ${ac_cv_path_install+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    # Account for fact that we put trailing slashes in our PATH walk.
++case $as_dir in #((
++  ./ | /[cC]/* | \
++  /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
++  ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \
++  /usr/ucb/* ) ;;
++  *)
++    # OSF1 and SCO ODT 3.0 have their own names for install.
++    # Don't use installbsd from OSF since it installs stuff as root
++    # by default.
++    for ac_prog in ginstall scoinst install; do
++      for ac_exec_ext in '' $ac_executable_extensions; do
++	if as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext"; then
++	  if test $ac_prog = install &&
++	    grep dspmsg "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
++	    # AIX install.  It has an incompatible calling convention.
++	    :
++	  elif test $ac_prog = install &&
++	    grep pwplus "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
++	    # program-specific install script used by HP pwplus--don't use.
++	    :
++	  else
++	    rm -rf conftest.one conftest.two conftest.dir
++	    echo one > conftest.one
++	    echo two > conftest.two
++	    mkdir conftest.dir
++	    if "$as_dir$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir/" &&
++	      test -s conftest.one && test -s conftest.two &&
++	      test -s conftest.dir/conftest.one &&
++	      test -s conftest.dir/conftest.two
++	    then
++	      ac_cv_path_install="$as_dir$ac_prog$ac_exec_ext -c"
++	      break 3
++	    fi
++	  fi
++	fi
++      done
++    done
++    ;;
++esac
++
++  done
++IFS=$as_save_IFS
++
++rm -rf conftest.one conftest.two conftest.dir
++
++fi
++  if test ${ac_cv_path_install+y}; then
++    INSTALL=$ac_cv_path_install
++  else
++    # As a last resort, use the slow shell script.  Don't cache a
++    # value for INSTALL within a source directory, because that will
++    # break other packages using the cache if that directory is
++    # removed, or if the value is a relative name.
++    INSTALL=$ac_install_sh
++  fi
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
++printf "%s\n" "$INSTALL" >&6; }
++
++# Use test -z because SunOS4 sh mishandles braces in ${var-val}.
++# It thinks the first close brace ends the variable substitution.
++test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}'
++
++test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
++
++test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5
++printf %s "checking whether build environment is sane... " >&6; }
++# Reject unsafe characters in $srcdir or the absolute working directory
++# name.  Accept space and tab only in the latter.
++am_lf='
++'
++case `pwd` in
++  *[\\\"\#\$\&\'\`$am_lf]*)
++    as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;;
++esac
++case $srcdir in
++  *[\\\"\#\$\&\'\`$am_lf\ \	]*)
++    as_fn_error $? "unsafe srcdir value: '$srcdir'" "$LINENO" 5;;
++esac
++
++# Do 'set' in a subshell so we don't clobber the current shell's
++# arguments.  Must try -L first in case configure is actually a
++# symlink; some systems play weird games with the mod time of symlinks
++# (eg FreeBSD returns the mod time of the symlink's containing
++# directory).
++if (
++   am_has_slept=no
++   for am_try in 1 2; do
++     echo "timestamp, slept: $am_has_slept" > conftest.file
++     set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null`
++     if test "$*" = "X"; then
++	# -L didn't work.
++	set X `ls -t "$srcdir/configure" conftest.file`
++     fi
++     if test "$*" != "X $srcdir/configure conftest.file" \
++	&& test "$*" != "X conftest.file $srcdir/configure"; then
++
++	# If neither matched, then we have a broken ls.  This can happen
++	# if, for instance, CONFIG_SHELL is bash and it inherits a
++	# broken ls alias from the environment.  This has actually
++	# happened.  Such a system could not be considered "sane".
++	as_fn_error $? "ls -t appears to fail.  Make sure there is not a broken
++  alias in your environment" "$LINENO" 5
++     fi
++     if test "$2" = conftest.file || test $am_try -eq 2; then
++       break
++     fi
++     # Just in case.
++     sleep 1
++     am_has_slept=yes
++   done
++   test "$2" = conftest.file
++   )
++then
++   # Ok.
++   :
++else
++   as_fn_error $? "newly created file is older than distributed files!
++Check your system clock" "$LINENO" 5
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++printf "%s\n" "yes" >&6; }
++# If we didn't sleep, we still need to ensure time stamps of config.status and
++# generated files are strictly newer.
++am_sleep_pid=
++if grep 'slept: no' conftest.file >/dev/null 2>&1; then
++  ( sleep 1 ) &
++  am_sleep_pid=$!
++fi
++
++rm -f conftest.file
++
++test "$program_prefix" != NONE &&
++  program_transform_name="s&^&$program_prefix&;$program_transform_name"
++# Use a double $ so make ignores it.
++test "$program_suffix" != NONE &&
++  program_transform_name="s&\$&$program_suffix&;$program_transform_name"
++# Double any \ or $.
++# By default was `s,x,x', remove it if useless.
++ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
++program_transform_name=`printf "%s\n" "$program_transform_name" | sed "$ac_script"`
++
++
++# Expand $ac_aux_dir to an absolute path.
++am_aux_dir=`cd "$ac_aux_dir" && pwd`
++
++
++  if test x"${MISSING+set}" != xset; then
++  MISSING="\${SHELL} '$am_aux_dir/missing'"
++fi
++# Use eval to expand $SHELL
++if eval "$MISSING --is-lightweight"; then
++  am_missing_run="$MISSING "
++else
++  am_missing_run=
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5
++printf "%s\n" "$as_me: WARNING: 'missing' script is too old or missing" >&2;}
++fi
++
++if test x"${install_sh+set}" != xset; then
++  case $am_aux_dir in
++  *\ * | *\	*)
++    install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;;
++  *)
++    install_sh="\${SHELL} $am_aux_dir/install-sh"
++  esac
++fi
++
++# Installed binaries are usually stripped using 'strip' when the user
++# run "make install-strip".  However 'strip' might not be the right
++# tool to use in cross-compilation environments, therefore Automake
++# will honor the 'STRIP' environment variable to overrule this program.
++if test "$cross_compiling" != no; then
++  if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
++set dummy ${ac_tool_prefix}strip; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_STRIP+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$STRIP"; then
++  ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_STRIP="${ac_tool_prefix}strip"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++STRIP=$ac_cv_prog_STRIP
++if test -n "$STRIP"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
++printf "%s\n" "$STRIP" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_STRIP"; then
++  ac_ct_STRIP=$STRIP
++  # Extract the first word of "strip", so it can be a program name with args.
++set dummy strip; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_STRIP+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_STRIP"; then
++  ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_STRIP="strip"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
++if test -n "$ac_ct_STRIP"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
++printf "%s\n" "$ac_ct_STRIP" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_STRIP" = x; then
++    STRIP=":"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    STRIP=$ac_ct_STRIP
++  fi
++else
++  STRIP="$ac_cv_prog_STRIP"
++fi
++
++fi
++INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
++
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a race-free mkdir -p" >&5
++printf %s "checking for a race-free mkdir -p... " >&6; }
++if test -z "$MKDIR_P"; then
++  if test ${ac_cv_path_mkdir+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_prog in mkdir gmkdir; do
++	 for ac_exec_ext in '' $ac_executable_extensions; do
++	   as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext" || continue
++	   case `"$as_dir$ac_prog$ac_exec_ext" --version 2>&1` in #(
++	     'mkdir ('*'coreutils) '* | \
++	     'BusyBox '* | \
++	     'mkdir (fileutils) '4.1*)
++	       ac_cv_path_mkdir=$as_dir$ac_prog$ac_exec_ext
++	       break 3;;
++	   esac
++	 done
++       done
++  done
++IFS=$as_save_IFS
++
++fi
++
++  test -d ./--version && rmdir ./--version
++  if test ${ac_cv_path_mkdir+y}; then
++    MKDIR_P="$ac_cv_path_mkdir -p"
++  else
++    # As a last resort, use the slow shell script.  Don't cache a
++    # value for MKDIR_P within a source directory, because that will
++    # break other packages using the cache if that directory is
++    # removed, or if the value is a relative name.
++    MKDIR_P="$ac_install_sh -d"
++  fi
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
++printf "%s\n" "$MKDIR_P" >&6; }
++
++for ac_prog in gawk mawk nawk awk
++do
++  # Extract the first word of "$ac_prog", so it can be a program name with args.
++set dummy $ac_prog; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_AWK+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$AWK"; then
++  ac_cv_prog_AWK="$AWK" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_AWK="$ac_prog"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++AWK=$ac_cv_prog_AWK
++if test -n "$AWK"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
++printf "%s\n" "$AWK" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++  test -n "$AWK" && break
++done
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
++printf %s "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
++set x ${MAKE-make}
++ac_make=`printf "%s\n" "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
++if eval test \${ac_cv_prog_make_${ac_make}_set+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat >conftest.make <<\_ACEOF
++SHELL = /bin/sh
++all:
++	@echo '@@@%%%=$(MAKE)=@@@%%%'
++_ACEOF
++# GNU make sometimes prints "make[1]: Entering ...", which would confuse us.
++case `${MAKE-make} -f conftest.make 2>/dev/null` in
++  *@@@%%%=?*=@@@%%%*)
++    eval ac_cv_prog_make_${ac_make}_set=yes;;
++  *)
++    eval ac_cv_prog_make_${ac_make}_set=no;;
++esac
++rm -f conftest.make
++fi
++if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++printf "%s\n" "yes" >&6; }
++  SET_MAKE=
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++  SET_MAKE="MAKE=${MAKE-make}"
++fi
++
++rm -rf .tst 2>/dev/null
++mkdir .tst 2>/dev/null
++if test -d .tst; then
++  am__leading_dot=.
++else
++  am__leading_dot=_
++fi
++rmdir .tst 2>/dev/null
++
++# Check whether --enable-silent-rules was given.
++if test ${enable_silent_rules+y}
++then :
++  enableval=$enable_silent_rules;
++fi
++
++case $enable_silent_rules in # (((
++  yes) AM_DEFAULT_VERBOSITY=0;;
++   no) AM_DEFAULT_VERBOSITY=1;;
++    *) AM_DEFAULT_VERBOSITY=1;;
++esac
++am_make=${MAKE-make}
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5
++printf %s "checking whether $am_make supports nested variables... " >&6; }
++if test ${am_cv_make_support_nested_variables+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if printf "%s\n" 'TRUE=$(BAR$(V))
++BAR0=false
++BAR1=true
++V=1
++am__doit:
++	@$(TRUE)
++.PHONY: am__doit' | $am_make -f - >/dev/null 2>&1; then
++  am_cv_make_support_nested_variables=yes
++else
++  am_cv_make_support_nested_variables=no
++fi
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5
++printf "%s\n" "$am_cv_make_support_nested_variables" >&6; }
++if test $am_cv_make_support_nested_variables = yes; then
++    AM_V='$(V)'
++  AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
++else
++  AM_V=$AM_DEFAULT_VERBOSITY
++  AM_DEFAULT_V=$AM_DEFAULT_VERBOSITY
++fi
++AM_BACKSLASH='\'
++
++if test "`cd $srcdir && pwd`" != "`pwd`"; then
++  # Use -I$(srcdir) only when $(srcdir) != ., so that make's output
++  # is not polluted with repeated "-I."
++  am__isrc=' -I$(srcdir)'
++  # test to see if srcdir already configured
++  if test -f $srcdir/config.status; then
++    as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5
++  fi
++fi
++
++# test whether we have cygpath
++if test -z "$CYGPATH_W"; then
++  if (cygpath --version) >/dev/null 2>/dev/null; then
++    CYGPATH_W='cygpath -w'
++  else
++    CYGPATH_W=echo
++  fi
++fi
++
++
++# Define the identity of the package.
++ PACKAGE='bolt-plugin'
++ VERSION='0.1'
++
++
++printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h
++
++
++printf "%s\n" "#define VERSION \"$VERSION\"" >>confdefs.h
++
++# Some tools Automake needs.
++
++ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"}
++
++
++AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"}
++
++
++AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"}
++
++
++AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"}
++
++
++MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"}
++
++# For better backward compatibility.  To be removed once Automake 1.9.x
++# dies out for good.  For more background, see:
++# 
++# 
++mkdir_p='$(MKDIR_P)'
++
++# We need awk for the "check" target (and possibly the TAP driver).  The
++# system "awk" is bad on some platforms.
++# Always define AMTAR for backward compatibility.  Yes, it's still used
++# in the wild :-(  We should find a proper way to deprecate it ...
++AMTAR='$${TAR-tar}'
++
++
++# We'll loop over all known methods to create a tar archive until one works.
++_am_tools='gnutar  pax cpio none'
++
++am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
++
++
++
++
++
++# Variables for tags utilities; see am/tags.am
++if test -z "$CTAGS"; then
++  CTAGS=ctags
++fi
++
++if test -z "$ETAGS"; then
++  ETAGS=etags
++fi
++
++if test -z "$CSCOPE"; then
++  CSCOPE=cscope
++fi
++
++
++
++# POSIX will say in a future version that running "rm -f" with no argument
++# is OK; and we want to be able to make that assumption in our Makefile
++# recipes.  So use an aggressive probe to check that the usage we want is
++# actually supported "in the wild" to an acceptable degree.
++# See automake bug#10828.
++# To make any issue more visible, cause the running configure to be aborted
++# by default if the 'rm' program in use doesn't match our expectations; the
++# user can still override this though.
++if rm -f && rm -fr && rm -rf; then : OK; else
++  cat >&2 <<'END'
++Oops!
++
++Your 'rm' program seems unable to run without file operands specified
++on the command line, even when the '-f' option is present.  This is contrary
++to the behaviour of most rm programs out there, and not conforming with
++the upcoming POSIX standard: 
++
++Please tell bug-automake@gnu.org about your system, including the value
++of your $PATH and any error possibly output before this message.  This
++can help us improve future automake versions.
++
++END
++  if test x"$ACCEPT_INFERIOR_RM_PROGRAM" = x"yes"; then
++    echo 'Configuration will proceed anyway, since you have set the' >&2
++    echo 'ACCEPT_INFERIOR_RM_PROGRAM variable to "yes"' >&2
++    echo >&2
++  else
++    cat >&2 <<'END'
++Aborting the configuration process, to ensure you take notice of the issue.
++
++You can download and install GNU coreutils to get an 'rm' implementation
++that behaves properly: .
++
++If you want to complete the configuration process using your problematic
++'rm' anyway, export the environment variable ACCEPT_INFERIOR_RM_PROGRAM
++to "yes", and re-run configure.
++
++END
++    as_fn_error $? "Your 'rm' program is bad, sorry." "$LINENO" 5
++  fi
++fi
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5
++printf %s "checking whether to enable maintainer-specific portions of Makefiles... " >&6; }
++    # Check whether --enable-maintainer-mode was given.
++if test ${enable_maintainer_mode+y}
++then :
++  enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval
++else $as_nop
++  USE_MAINTAINER_MODE=no
++fi
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5
++printf "%s\n" "$USE_MAINTAINER_MODE" >&6; }
++   if test $USE_MAINTAINER_MODE = yes; then
++  MAINTAINER_MODE_TRUE=
++  MAINTAINER_MODE_FALSE='#'
++else
++  MAINTAINER_MODE_TRUE='#'
++  MAINTAINER_MODE_FALSE=
++fi
++
++  MAINT=$MAINTAINER_MODE_TRUE
++
++
++
++# Check whether --with-libiberty was given.
++if test ${with_libiberty+y}
++then :
++  withval=$with_libiberty;
++else $as_nop
++  with_libiberty=../libiberty
++fi
++
++
++
++
++
++
++
++
++
++
++
++DEPDIR="${am__leading_dot}deps"
++
++ac_config_commands="$ac_config_commands depfiles"
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5
++printf %s "checking whether ${MAKE-make} supports the include directive... " >&6; }
++cat > confinc.mk << 'END'
++am__doit:
++	@echo this is the am__doit target >confinc.out
++.PHONY: am__doit
++END
++am__include="#"
++am__quote=
++# BSD make does it like this.
++echo '.include "confinc.mk" # ignored' > confmf.BSD
++# Other make implementations (GNU, Solaris 10, AIX) do it like this.
++echo 'include confinc.mk # ignored' > confmf.GNU
++_am_result=no
++for s in GNU BSD; do
++  { echo "$as_me:$LINENO: ${MAKE-make} -f confmf.$s && cat confinc.out" >&5
++   (${MAKE-make} -f confmf.$s && cat confinc.out) >&5 2>&5
++   ac_status=$?
++   echo "$as_me:$LINENO: \$? = $ac_status" >&5
++   (exit $ac_status); }
++  case $?:`cat confinc.out 2>/dev/null` in #(
++  '0:this is the am__doit target') :
++    case $s in #(
++  BSD) :
++    am__include='.include' am__quote='"' ;; #(
++  *) :
++    am__include='include' am__quote='' ;;
++esac ;; #(
++  *) :
++     ;;
++esac
++  if test "$am__include" != "#"; then
++    _am_result="yes ($s style)"
++    break
++  fi
++done
++rm -f confinc.* confmf.*
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5
++printf "%s\n" "${_am_result}" >&6; }
++
++# Check whether --enable-dependency-tracking was given.
++if test ${enable_dependency_tracking+y}
++then :
++  enableval=$enable_dependency_tracking;
++fi
++
++if test "x$enable_dependency_tracking" != xno; then
++  am_depcomp="$ac_aux_dir/depcomp"
++  AMDEPBACKSLASH='\'
++  am__nodep='_no'
++fi
++ if test "x$enable_dependency_tracking" != xno; then
++  AMDEP_TRUE=
++  AMDEP_FALSE='#'
++else
++  AMDEP_TRUE='#'
++  AMDEP_FALSE=
++fi
++
++
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
++set dummy ${ac_tool_prefix}gcc; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$CC"; then
++  ac_cv_prog_CC="$CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_CC="${ac_tool_prefix}gcc"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++CC=$ac_cv_prog_CC
++if test -n "$CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++printf "%s\n" "$CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_CC"; then
++  ac_ct_CC=$CC
++  # Extract the first word of "gcc", so it can be a program name with args.
++set dummy gcc; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_CC"; then
++  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_CC="gcc"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_CC=$ac_cv_prog_ac_ct_CC
++if test -n "$ac_ct_CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
++printf "%s\n" "$ac_ct_CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_CC" = x; then
++    CC=""
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    CC=$ac_ct_CC
++  fi
++else
++  CC="$ac_cv_prog_CC"
++fi
++
++if test -z "$CC"; then
++          if test -n "$ac_tool_prefix"; then
++    # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
++set dummy ${ac_tool_prefix}cc; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$CC"; then
++  ac_cv_prog_CC="$CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_CC="${ac_tool_prefix}cc"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++CC=$ac_cv_prog_CC
++if test -n "$CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++printf "%s\n" "$CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++  fi
++fi
++if test -z "$CC"; then
++  # Extract the first word of "cc", so it can be a program name with args.
++set dummy cc; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$CC"; then
++  ac_cv_prog_CC="$CC" # Let the user override the test.
++else
++  ac_prog_rejected=no
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
++       ac_prog_rejected=yes
++       continue
++     fi
++    ac_cv_prog_CC="cc"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++if test $ac_prog_rejected = yes; then
++  # We found a bogon in the path, so make sure we never use it.
++  set dummy $ac_cv_prog_CC
++  shift
++  if test $# != 0; then
++    # We chose a different compiler from the bogus one.
++    # However, it has the same basename, so the bogon will be chosen
++    # first if we set CC to just the basename; use the full file name.
++    shift
++    ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@"
++  fi
++fi
++fi
++fi
++CC=$ac_cv_prog_CC
++if test -n "$CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++printf "%s\n" "$CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$CC"; then
++  if test -n "$ac_tool_prefix"; then
++  for ac_prog in cl.exe
++  do
++    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
++set dummy $ac_tool_prefix$ac_prog; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$CC"; then
++  ac_cv_prog_CC="$CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++CC=$ac_cv_prog_CC
++if test -n "$CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++printf "%s\n" "$CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++    test -n "$CC" && break
++  done
++fi
++if test -z "$CC"; then
++  ac_ct_CC=$CC
++  for ac_prog in cl.exe
++do
++  # Extract the first word of "$ac_prog", so it can be a program name with args.
++set dummy $ac_prog; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_CC"; then
++  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_CC="$ac_prog"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_CC=$ac_cv_prog_ac_ct_CC
++if test -n "$ac_ct_CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
++printf "%s\n" "$ac_ct_CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++  test -n "$ac_ct_CC" && break
++done
++
++  if test "x$ac_ct_CC" = x; then
++    CC=""
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    CC=$ac_ct_CC
++  fi
++fi
++
++fi
++if test -z "$CC"; then
++  if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args.
++set dummy ${ac_tool_prefix}clang; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$CC"; then
++  ac_cv_prog_CC="$CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_CC="${ac_tool_prefix}clang"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++CC=$ac_cv_prog_CC
++if test -n "$CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++printf "%s\n" "$CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_CC"; then
++  ac_ct_CC=$CC
++  # Extract the first word of "clang", so it can be a program name with args.
++set dummy clang; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_CC"; then
++  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_CC="clang"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_CC=$ac_cv_prog_ac_ct_CC
++if test -n "$ac_ct_CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
++printf "%s\n" "$ac_ct_CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_CC" = x; then
++    CC=""
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    CC=$ac_ct_CC
++  fi
++else
++  CC="$ac_cv_prog_CC"
++fi
++
++fi
++
++
++test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++as_fn_error $? "no acceptable C compiler found in \$PATH
++See \`config.log' for more details" "$LINENO" 5; }
++
++# Provide some information about the compiler.
++printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
++set X $ac_compile
++ac_compiler=$2
++for ac_option in --version -v -V -qversion -version; do
++  { { ac_try="$ac_compiler $ac_option >&5"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
++  ac_status=$?
++  if test -s conftest.err; then
++    sed '10a\
++... rest of stderr output deleted ...
++         10q' conftest.err >conftest.er1
++    cat conftest.er1 >&5
++  fi
++  rm -f conftest.er1 conftest.err
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }
++done
++
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++ac_clean_files_save=$ac_clean_files
++ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
++# Try to create an executable without -o first, disregard a.out.
++# It will help us diagnose broken compilers, and finding out an intuition
++# of exeext.
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
++printf %s "checking whether the C compiler works... " >&6; }
++ac_link_default=`printf "%s\n" "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
++
++# The possible output files:
++ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
++
++ac_rmfiles=
++for ac_file in $ac_files
++do
++  case $ac_file in
++    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
++    * ) ac_rmfiles="$ac_rmfiles $ac_file";;
++  esac
++done
++rm -f $ac_rmfiles
++
++if { { ac_try="$ac_link_default"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_link_default") 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }
++then :
++  # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
++# So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
++# in a Makefile.  We should not override ac_cv_exeext if it was cached,
++# so that the user can short-circuit this test for compilers unknown to
++# Autoconf.
++for ac_file in $ac_files ''
++do
++  test -f "$ac_file" || continue
++  case $ac_file in
++    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj )
++	;;
++    [ab].out )
++	# We found the default executable, but exeext='' is most
++	# certainly right.
++	break;;
++    *.* )
++	if test ${ac_cv_exeext+y} && test "$ac_cv_exeext" != no;
++	then :; else
++	   ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
++	fi
++	# We set ac_cv_exeext here because the later test for it is not
++	# safe: cross compilers may not add the suffix if given an `-o'
++	# argument, so we may need to know it at that point already.
++	# Even if this section looks crufty: it has the advantage of
++	# actually working.
++	break;;
++    * )
++	break;;
++  esac
++done
++test "$ac_cv_exeext" = no && ac_cv_exeext=
++
++else $as_nop
++  ac_file=''
++fi
++if test -z "$ac_file"
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++printf "%s\n" "$as_me: failed program was:" >&5
++sed 's/^/| /' conftest.$ac_ext >&5
++
++{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++as_fn_error 77 "C compiler cannot create executables
++See \`config.log' for more details" "$LINENO" 5; }
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++printf "%s\n" "yes" >&6; }
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
++printf %s "checking for C compiler default output file name... " >&6; }
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
++printf "%s\n" "$ac_file" >&6; }
++ac_exeext=$ac_cv_exeext
++
++rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
++ac_clean_files=$ac_clean_files_save
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
++printf %s "checking for suffix of executables... " >&6; }
++if { { ac_try="$ac_link"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_link") 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }
++then :
++  # If both `conftest.exe' and `conftest' are `present' (well, observable)
++# catch `conftest.exe'.  For instance with Cygwin, `ls conftest' will
++# work properly (i.e., refer to `conftest.exe'), while it won't with
++# `rm'.
++for ac_file in conftest.exe conftest conftest.*; do
++  test -f "$ac_file" || continue
++  case $ac_file in
++    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;;
++    *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
++	  break;;
++    * ) break;;
++  esac
++done
++else $as_nop
++  { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++as_fn_error $? "cannot compute suffix of executables: cannot compile and link
++See \`config.log' for more details" "$LINENO" 5; }
++fi
++rm -f conftest conftest$ac_cv_exeext
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
++printf "%s\n" "$ac_cv_exeext" >&6; }
++
++rm -f conftest.$ac_ext
++EXEEXT=$ac_cv_exeext
++ac_exeext=$EXEEXT
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
++int
++main (void)
++{
++FILE *f = fopen ("conftest.out", "w");
++ return ferror (f) || fclose (f) != 0;
++
++  ;
++  return 0;
++}
++_ACEOF
++ac_clean_files="$ac_clean_files conftest.out"
++# Check that the compiler produces executables we can run.  If not, either
++# the compiler is broken, or we cross compile.
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
++printf %s "checking whether we are cross compiling... " >&6; }
++if test "$cross_compiling" != yes; then
++  { { ac_try="$ac_link"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_link") 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }
++  if { ac_try='./conftest$ac_cv_exeext'
++  { { case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_try") 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; }; then
++    cross_compiling=no
++  else
++    if test "$cross_compiling" = maybe; then
++	cross_compiling=yes
++    else
++	{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++as_fn_error 77 "cannot run C compiled programs.
++If you meant to cross compile, use \`--host'.
++See \`config.log' for more details" "$LINENO" 5; }
++    fi
++  fi
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
++printf "%s\n" "$cross_compiling" >&6; }
++
++rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
++ac_clean_files=$ac_clean_files_save
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
++printf %s "checking for suffix of object files... " >&6; }
++if test ${ac_cv_objext+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++rm -f conftest.o conftest.obj
++if { { ac_try="$ac_compile"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_compile") 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }
++then :
++  for ac_file in conftest.o conftest.obj conftest.*; do
++  test -f "$ac_file" || continue;
++  case $ac_file in
++    *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;;
++    *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'`
++       break;;
++  esac
++done
++else $as_nop
++  printf "%s\n" "$as_me: failed program was:" >&5
++sed 's/^/| /' conftest.$ac_ext >&5
++
++{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++as_fn_error $? "cannot compute suffix of object files: cannot compile
++See \`config.log' for more details" "$LINENO" 5; }
++fi
++rm -f conftest.$ac_cv_objext conftest.$ac_ext
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
++printf "%s\n" "$ac_cv_objext" >&6; }
++OBJEXT=$ac_cv_objext
++ac_objext=$OBJEXT
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5
++printf %s "checking whether the compiler supports GNU C... " >&6; }
++if test ${ac_cv_c_compiler_gnu+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++#ifndef __GNUC__
++       choke me
++#endif
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_compiler_gnu=yes
++else $as_nop
++  ac_compiler_gnu=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++ac_cv_c_compiler_gnu=$ac_compiler_gnu
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
++printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; }
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++if test $ac_compiler_gnu = yes; then
++  GCC=yes
++else
++  GCC=
++fi
++ac_test_CFLAGS=${CFLAGS+y}
++ac_save_CFLAGS=$CFLAGS
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
++printf %s "checking whether $CC accepts -g... " >&6; }
++if test ${ac_cv_prog_cc_g+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_save_c_werror_flag=$ac_c_werror_flag
++   ac_c_werror_flag=yes
++   ac_cv_prog_cc_g=no
++   CFLAGS="-g"
++   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_prog_cc_g=yes
++else $as_nop
++  CFLAGS=""
++      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++
++else $as_nop
++  ac_c_werror_flag=$ac_save_c_werror_flag
++	 CFLAGS="-g"
++	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_prog_cc_g=yes
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++   ac_c_werror_flag=$ac_save_c_werror_flag
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
++printf "%s\n" "$ac_cv_prog_cc_g" >&6; }
++if test $ac_test_CFLAGS; then
++  CFLAGS=$ac_save_CFLAGS
++elif test $ac_cv_prog_cc_g = yes; then
++  if test "$GCC" = yes; then
++    CFLAGS="-g -O2"
++  else
++    CFLAGS="-g"
++  fi
++else
++  if test "$GCC" = yes; then
++    CFLAGS="-O2"
++  else
++    CFLAGS=
++  fi
++fi
++ac_prog_cc_stdc=no
++if test x$ac_prog_cc_stdc = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5
++printf %s "checking for $CC option to enable C11 features... " >&6; }
++if test ${ac_cv_prog_cc_c11+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_cv_prog_cc_c11=no
++ac_save_CC=$CC
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$ac_c_conftest_c11_program
++_ACEOF
++for ac_arg in '' -std=gnu11
++do
++  CC="$ac_save_CC $ac_arg"
++  if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_prog_cc_c11=$ac_arg
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam
++  test "x$ac_cv_prog_cc_c11" != "xno" && break
++done
++rm -f conftest.$ac_ext
++CC=$ac_save_CC
++fi
++
++if test "x$ac_cv_prog_cc_c11" = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
++printf "%s\n" "unsupported" >&6; }
++else $as_nop
++  if test "x$ac_cv_prog_cc_c11" = x
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
++printf "%s\n" "none needed" >&6; }
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5
++printf "%s\n" "$ac_cv_prog_cc_c11" >&6; }
++     CC="$CC $ac_cv_prog_cc_c11"
++fi
++  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11
++  ac_prog_cc_stdc=c11
++fi
++fi
++if test x$ac_prog_cc_stdc = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5
++printf %s "checking for $CC option to enable C99 features... " >&6; }
++if test ${ac_cv_prog_cc_c99+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_cv_prog_cc_c99=no
++ac_save_CC=$CC
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$ac_c_conftest_c99_program
++_ACEOF
++for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99=
++do
++  CC="$ac_save_CC $ac_arg"
++  if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_prog_cc_c99=$ac_arg
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam
++  test "x$ac_cv_prog_cc_c99" != "xno" && break
++done
++rm -f conftest.$ac_ext
++CC=$ac_save_CC
++fi
++
++if test "x$ac_cv_prog_cc_c99" = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
++printf "%s\n" "unsupported" >&6; }
++else $as_nop
++  if test "x$ac_cv_prog_cc_c99" = x
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
++printf "%s\n" "none needed" >&6; }
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5
++printf "%s\n" "$ac_cv_prog_cc_c99" >&6; }
++     CC="$CC $ac_cv_prog_cc_c99"
++fi
++  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99
++  ac_prog_cc_stdc=c99
++fi
++fi
++if test x$ac_prog_cc_stdc = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5
++printf %s "checking for $CC option to enable C89 features... " >&6; }
++if test ${ac_cv_prog_cc_c89+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_cv_prog_cc_c89=no
++ac_save_CC=$CC
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$ac_c_conftest_c89_program
++_ACEOF
++for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
++do
++  CC="$ac_save_CC $ac_arg"
++  if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_prog_cc_c89=$ac_arg
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam
++  test "x$ac_cv_prog_cc_c89" != "xno" && break
++done
++rm -f conftest.$ac_ext
++CC=$ac_save_CC
++fi
++
++if test "x$ac_cv_prog_cc_c89" = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
++printf "%s\n" "unsupported" >&6; }
++else $as_nop
++  if test "x$ac_cv_prog_cc_c89" = x
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
++printf "%s\n" "none needed" >&6; }
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
++printf "%s\n" "$ac_cv_prog_cc_c89" >&6; }
++     CC="$CC $ac_cv_prog_cc_c89"
++fi
++  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89
++  ac_prog_cc_stdc=c89
++fi
++fi
++
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++
++  ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
++printf %s "checking whether $CC understands -c and -o together... " >&6; }
++if test ${am_cv_prog_cc_c_o+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++  # Make sure it works both with $CC and with simple cc.
++  # Following AC_PROG_CC_C_O, we do the test twice because some
++  # compilers refuse to overwrite an existing .o file with -o,
++  # though they will create one.
++  am_cv_prog_cc_c_o=yes
++  for am_i in 1 2; do
++    if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5
++   ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5
++   ac_status=$?
++   echo "$as_me:$LINENO: \$? = $ac_status" >&5
++   (exit $ac_status); } \
++         && test -f conftest2.$ac_objext; then
++      : OK
++    else
++      am_cv_prog_cc_c_o=no
++      break
++    fi
++  done
++  rm -f core conftest*
++  unset am_i
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
++printf "%s\n" "$am_cv_prog_cc_c_o" >&6; }
++if test "$am_cv_prog_cc_c_o" != yes; then
++   # Losing compiler, so override with the script.
++   # FIXME: It is wrong to rewrite CC.
++   # But if we don't then we get into trouble of one sort or another.
++   # A longer-term fix would be to have automake use am__CC in this case,
++   # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
++   CC="$am_aux_dir/compile $CC"
++fi
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++
++depcc="$CC"   am_compiler_list=
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
++printf %s "checking dependency style of $depcc... " >&6; }
++if test ${am_cv_CC_dependencies_compiler_type+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
++  # We make a subdir and do the tests there.  Otherwise we can end up
++  # making bogus files that we don't know about and never remove.  For
++  # instance it was reported that on HP-UX the gcc test will end up
++  # making a dummy file named 'D' -- because '-MD' means "put the output
++  # in D".
++  rm -rf conftest.dir
++  mkdir conftest.dir
++  # Copy depcomp to subdir because otherwise we won't find it if we're
++  # using a relative directory.
++  cp "$am_depcomp" conftest.dir
++  cd conftest.dir
++  # We will build objects and dependencies in a subdirectory because
++  # it helps to detect inapplicable dependency modes.  For instance
++  # both Tru64's cc and ICC support -MD to output dependencies as a
++  # side effect of compilation, but ICC will put the dependencies in
++  # the current directory while Tru64 will put them in the object
++  # directory.
++  mkdir sub
++
++  am_cv_CC_dependencies_compiler_type=none
++  if test "$am_compiler_list" = ""; then
++     am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
++  fi
++  am__universal=false
++  case " $depcc " in #(
++     *\ -arch\ *\ -arch\ *) am__universal=true ;;
++     esac
++
++  for depmode in $am_compiler_list; do
++    # Setup a source with many dependencies, because some compilers
++    # like to wrap large dependency lists on column 80 (with \), and
++    # we should not choose a depcomp mode which is confused by this.
++    #
++    # We need to recreate these files for each test, as the compiler may
++    # overwrite some of them when testing with obscure command lines.
++    # This happens at least with the AIX C compiler.
++    : > sub/conftest.c
++    for i in 1 2 3 4 5 6; do
++      echo '#include "conftst'$i'.h"' >> sub/conftest.c
++      # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
++      # Solaris 10 /bin/sh.
++      echo '/* dummy */' > sub/conftst$i.h
++    done
++    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
++
++    # We check with '-c' and '-o' for the sake of the "dashmstdout"
++    # mode.  It turns out that the SunPro C++ compiler does not properly
++    # handle '-M -o', and we need to detect this.  Also, some Intel
++    # versions had trouble with output in subdirs.
++    am__obj=sub/conftest.${OBJEXT-o}
++    am__minus_obj="-o $am__obj"
++    case $depmode in
++    gcc)
++      # This depmode causes a compiler race in universal mode.
++      test "$am__universal" = false || continue
++      ;;
++    nosideeffect)
++      # After this tag, mechanisms are not by side-effect, so they'll
++      # only be used when explicitly requested.
++      if test "x$enable_dependency_tracking" = xyes; then
++	continue
++      else
++	break
++      fi
++      ;;
++    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
++      # This compiler won't grok '-c -o', but also, the minuso test has
++      # not run yet.  These depmodes are late enough in the game, and
++      # so weak that their functioning should not be impacted.
++      am__obj=conftest.${OBJEXT-o}
++      am__minus_obj=
++      ;;
++    none) break ;;
++    esac
++    if depmode=$depmode \
++       source=sub/conftest.c object=$am__obj \
++       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
++       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
++         >/dev/null 2>conftest.err &&
++       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
++       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
++       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
++       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
++      # icc doesn't choke on unknown options, it will just issue warnings
++      # or remarks (even with -Werror).  So we grep stderr for any message
++      # that says an option was ignored or not supported.
++      # When given -MP, icc 7.0 and 7.1 complain thusly:
++      #   icc: Command line warning: ignoring option '-M'; no argument required
++      # The diagnosis changed in icc 8.0:
++      #   icc: Command line remark: option '-MP' not supported
++      if (grep 'ignoring option' conftest.err ||
++          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
++        am_cv_CC_dependencies_compiler_type=$depmode
++        break
++      fi
++    fi
++  done
++
++  cd ..
++  rm -rf conftest.dir
++else
++  am_cv_CC_dependencies_compiler_type=none
++fi
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
++printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; }
++CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
++
++ if
++  test "x$enable_dependency_tracking" != xno \
++  && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
++  am__fastdepCC_TRUE=
++  am__fastdepCC_FALSE='#'
++else
++  am__fastdepCC_TRUE='#'
++  am__fastdepCC_FALSE=
++fi
++
++
++
++ac_header= ac_cache=
++for ac_item in $ac_header_c_list
++do
++  if test $ac_cache; then
++    ac_fn_c_check_header_compile "$LINENO" $ac_header ac_cv_header_$ac_cache "$ac_includes_default"
++    if eval test \"x\$ac_cv_header_$ac_cache\" = xyes; then
++      printf "%s\n" "#define $ac_item 1" >> confdefs.h
++    fi
++    ac_header= ac_cache=
++  elif test $ac_header; then
++    ac_cache=$ac_item
++  else
++    ac_header=$ac_item
++  fi
++done
++
++
++
++
++
++
++
++
++if test $ac_cv_header_stdlib_h = yes && test $ac_cv_header_string_h = yes
++then :
++
++printf "%s\n" "#define STDC_HEADERS 1" >>confdefs.h
++
++fi
++
++
++
++
++
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5
++printf %s "checking whether it is safe to define __EXTENSIONS__... " >&6; }
++if test ${ac_cv_safe_to_define___extensions__+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++#         define __EXTENSIONS__ 1
++          $ac_includes_default
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_safe_to_define___extensions__=yes
++else $as_nop
++  ac_cv_safe_to_define___extensions__=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5
++printf "%s\n" "$ac_cv_safe_to_define___extensions__" >&6; }
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether _XOPEN_SOURCE should be defined" >&5
++printf %s "checking whether _XOPEN_SOURCE should be defined... " >&6; }
++if test ${ac_cv_should_define__xopen_source+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_cv_should_define__xopen_source=no
++    if test $ac_cv_header_wchar_h = yes
++then :
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++          #include 
++          mbstate_t x;
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++            #define _XOPEN_SOURCE 500
++            #include 
++            mbstate_t x;
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_should_define__xopen_source=yes
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_should_define__xopen_source" >&5
++printf "%s\n" "$ac_cv_should_define__xopen_source" >&6; }
++
++  printf "%s\n" "#define _ALL_SOURCE 1" >>confdefs.h
++
++  printf "%s\n" "#define _DARWIN_C_SOURCE 1" >>confdefs.h
++
++  printf "%s\n" "#define _GNU_SOURCE 1" >>confdefs.h
++
++  printf "%s\n" "#define _HPUX_ALT_XOPEN_SOCKET_API 1" >>confdefs.h
++
++  printf "%s\n" "#define _NETBSD_SOURCE 1" >>confdefs.h
++
++  printf "%s\n" "#define _OPENBSD_SOURCE 1" >>confdefs.h
++
++  printf "%s\n" "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h
++
++  printf "%s\n" "#define __STDC_WANT_IEC_60559_ATTRIBS_EXT__ 1" >>confdefs.h
++
++  printf "%s\n" "#define __STDC_WANT_IEC_60559_BFP_EXT__ 1" >>confdefs.h
++
++  printf "%s\n" "#define __STDC_WANT_IEC_60559_DFP_EXT__ 1" >>confdefs.h
++
++  printf "%s\n" "#define __STDC_WANT_IEC_60559_FUNCS_EXT__ 1" >>confdefs.h
++
++  printf "%s\n" "#define __STDC_WANT_IEC_60559_TYPES_EXT__ 1" >>confdefs.h
++
++  printf "%s\n" "#define __STDC_WANT_LIB_EXT2__ 1" >>confdefs.h
++
++  printf "%s\n" "#define __STDC_WANT_MATH_SPEC_FUNCS__ 1" >>confdefs.h
++
++  printf "%s\n" "#define _TANDEM_SOURCE 1" >>confdefs.h
++
++  if test $ac_cv_header_minix_config_h = yes
++then :
++  MINIX=yes
++    printf "%s\n" "#define _MINIX 1" >>confdefs.h
++
++    printf "%s\n" "#define _POSIX_SOURCE 1" >>confdefs.h
++
++    printf "%s\n" "#define _POSIX_1_SOURCE 2" >>confdefs.h
++
++else $as_nop
++  MINIX=
++fi
++  if test $ac_cv_safe_to_define___extensions__ = yes
++then :
++  printf "%s\n" "#define __EXTENSIONS__ 1" >>confdefs.h
++
++fi
++  if test $ac_cv_should_define__xopen_source = yes
++then :
++  printf "%s\n" "#define _XOPEN_SOURCE 500" >>confdefs.h
++
++fi
++
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
++set dummy ${ac_tool_prefix}gcc; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$CC"; then
++  ac_cv_prog_CC="$CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_CC="${ac_tool_prefix}gcc"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++CC=$ac_cv_prog_CC
++if test -n "$CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++printf "%s\n" "$CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_CC"; then
++  ac_ct_CC=$CC
++  # Extract the first word of "gcc", so it can be a program name with args.
++set dummy gcc; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_CC"; then
++  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_CC="gcc"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_CC=$ac_cv_prog_ac_ct_CC
++if test -n "$ac_ct_CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
++printf "%s\n" "$ac_ct_CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_CC" = x; then
++    CC=""
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    CC=$ac_ct_CC
++  fi
++else
++  CC="$ac_cv_prog_CC"
++fi
++
++if test -z "$CC"; then
++          if test -n "$ac_tool_prefix"; then
++    # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
++set dummy ${ac_tool_prefix}cc; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$CC"; then
++  ac_cv_prog_CC="$CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_CC="${ac_tool_prefix}cc"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++CC=$ac_cv_prog_CC
++if test -n "$CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++printf "%s\n" "$CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++  fi
++fi
++if test -z "$CC"; then
++  # Extract the first word of "cc", so it can be a program name with args.
++set dummy cc; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$CC"; then
++  ac_cv_prog_CC="$CC" # Let the user override the test.
++else
++  ac_prog_rejected=no
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
++       ac_prog_rejected=yes
++       continue
++     fi
++    ac_cv_prog_CC="cc"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++if test $ac_prog_rejected = yes; then
++  # We found a bogon in the path, so make sure we never use it.
++  set dummy $ac_cv_prog_CC
++  shift
++  if test $# != 0; then
++    # We chose a different compiler from the bogus one.
++    # However, it has the same basename, so the bogon will be chosen
++    # first if we set CC to just the basename; use the full file name.
++    shift
++    ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@"
++  fi
++fi
++fi
++fi
++CC=$ac_cv_prog_CC
++if test -n "$CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++printf "%s\n" "$CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$CC"; then
++  if test -n "$ac_tool_prefix"; then
++  for ac_prog in cl.exe
++  do
++    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
++set dummy $ac_tool_prefix$ac_prog; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$CC"; then
++  ac_cv_prog_CC="$CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++CC=$ac_cv_prog_CC
++if test -n "$CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++printf "%s\n" "$CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++    test -n "$CC" && break
++  done
++fi
++if test -z "$CC"; then
++  ac_ct_CC=$CC
++  for ac_prog in cl.exe
++do
++  # Extract the first word of "$ac_prog", so it can be a program name with args.
++set dummy $ac_prog; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_CC"; then
++  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_CC="$ac_prog"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_CC=$ac_cv_prog_ac_ct_CC
++if test -n "$ac_ct_CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
++printf "%s\n" "$ac_ct_CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++  test -n "$ac_ct_CC" && break
++done
++
++  if test "x$ac_ct_CC" = x; then
++    CC=""
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    CC=$ac_ct_CC
++  fi
++fi
++
++fi
++if test -z "$CC"; then
++  if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args.
++set dummy ${ac_tool_prefix}clang; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$CC"; then
++  ac_cv_prog_CC="$CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_CC="${ac_tool_prefix}clang"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++CC=$ac_cv_prog_CC
++if test -n "$CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++printf "%s\n" "$CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_CC"; then
++  ac_ct_CC=$CC
++  # Extract the first word of "clang", so it can be a program name with args.
++set dummy clang; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_CC"; then
++  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_CC="clang"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_CC=$ac_cv_prog_ac_ct_CC
++if test -n "$ac_ct_CC"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
++printf "%s\n" "$ac_ct_CC" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_CC" = x; then
++    CC=""
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    CC=$ac_ct_CC
++  fi
++else
++  CC="$ac_cv_prog_CC"
++fi
++
++fi
++
++
++test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++as_fn_error $? "no acceptable C compiler found in \$PATH
++See \`config.log' for more details" "$LINENO" 5; }
++
++# Provide some information about the compiler.
++printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
++set X $ac_compile
++ac_compiler=$2
++for ac_option in --version -v -V -qversion -version; do
++  { { ac_try="$ac_compiler $ac_option >&5"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
++  ac_status=$?
++  if test -s conftest.err; then
++    sed '10a\
++... rest of stderr output deleted ...
++         10q' conftest.err >conftest.er1
++    cat conftest.er1 >&5
++  fi
++  rm -f conftest.er1 conftest.err
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }
++done
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5
++printf %s "checking whether the compiler supports GNU C... " >&6; }
++if test ${ac_cv_c_compiler_gnu+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++#ifndef __GNUC__
++       choke me
++#endif
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_compiler_gnu=yes
++else $as_nop
++  ac_compiler_gnu=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++ac_cv_c_compiler_gnu=$ac_compiler_gnu
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
++printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; }
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++if test $ac_compiler_gnu = yes; then
++  GCC=yes
++else
++  GCC=
++fi
++ac_test_CFLAGS=${CFLAGS+y}
++ac_save_CFLAGS=$CFLAGS
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
++printf %s "checking whether $CC accepts -g... " >&6; }
++if test ${ac_cv_prog_cc_g+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_save_c_werror_flag=$ac_c_werror_flag
++   ac_c_werror_flag=yes
++   ac_cv_prog_cc_g=no
++   CFLAGS="-g"
++   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_prog_cc_g=yes
++else $as_nop
++  CFLAGS=""
++      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++
++else $as_nop
++  ac_c_werror_flag=$ac_save_c_werror_flag
++	 CFLAGS="-g"
++	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_prog_cc_g=yes
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++   ac_c_werror_flag=$ac_save_c_werror_flag
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
++printf "%s\n" "$ac_cv_prog_cc_g" >&6; }
++if test $ac_test_CFLAGS; then
++  CFLAGS=$ac_save_CFLAGS
++elif test $ac_cv_prog_cc_g = yes; then
++  if test "$GCC" = yes; then
++    CFLAGS="-g -O2"
++  else
++    CFLAGS="-g"
++  fi
++else
++  if test "$GCC" = yes; then
++    CFLAGS="-O2"
++  else
++    CFLAGS=
++  fi
++fi
++ac_prog_cc_stdc=no
++if test x$ac_prog_cc_stdc = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5
++printf %s "checking for $CC option to enable C11 features... " >&6; }
++if test ${ac_cv_prog_cc_c11+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_cv_prog_cc_c11=no
++ac_save_CC=$CC
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$ac_c_conftest_c11_program
++_ACEOF
++for ac_arg in '' -std=gnu11
++do
++  CC="$ac_save_CC $ac_arg"
++  if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_prog_cc_c11=$ac_arg
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam
++  test "x$ac_cv_prog_cc_c11" != "xno" && break
++done
++rm -f conftest.$ac_ext
++CC=$ac_save_CC
++fi
++
++if test "x$ac_cv_prog_cc_c11" = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
++printf "%s\n" "unsupported" >&6; }
++else $as_nop
++  if test "x$ac_cv_prog_cc_c11" = x
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
++printf "%s\n" "none needed" >&6; }
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5
++printf "%s\n" "$ac_cv_prog_cc_c11" >&6; }
++     CC="$CC $ac_cv_prog_cc_c11"
++fi
++  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11
++  ac_prog_cc_stdc=c11
++fi
++fi
++if test x$ac_prog_cc_stdc = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5
++printf %s "checking for $CC option to enable C99 features... " >&6; }
++if test ${ac_cv_prog_cc_c99+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_cv_prog_cc_c99=no
++ac_save_CC=$CC
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$ac_c_conftest_c99_program
++_ACEOF
++for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99=
++do
++  CC="$ac_save_CC $ac_arg"
++  if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_prog_cc_c99=$ac_arg
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam
++  test "x$ac_cv_prog_cc_c99" != "xno" && break
++done
++rm -f conftest.$ac_ext
++CC=$ac_save_CC
++fi
++
++if test "x$ac_cv_prog_cc_c99" = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
++printf "%s\n" "unsupported" >&6; }
++else $as_nop
++  if test "x$ac_cv_prog_cc_c99" = x
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
++printf "%s\n" "none needed" >&6; }
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5
++printf "%s\n" "$ac_cv_prog_cc_c99" >&6; }
++     CC="$CC $ac_cv_prog_cc_c99"
++fi
++  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99
++  ac_prog_cc_stdc=c99
++fi
++fi
++if test x$ac_prog_cc_stdc = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5
++printf %s "checking for $CC option to enable C89 features... " >&6; }
++if test ${ac_cv_prog_cc_c89+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_cv_prog_cc_c89=no
++ac_save_CC=$CC
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$ac_c_conftest_c89_program
++_ACEOF
++for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
++do
++  CC="$ac_save_CC $ac_arg"
++  if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_prog_cc_c89=$ac_arg
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam
++  test "x$ac_cv_prog_cc_c89" != "xno" && break
++done
++rm -f conftest.$ac_ext
++CC=$ac_save_CC
++fi
++
++if test "x$ac_cv_prog_cc_c89" = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
++printf "%s\n" "unsupported" >&6; }
++else $as_nop
++  if test "x$ac_cv_prog_cc_c89" = x
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
++printf "%s\n" "none needed" >&6; }
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
++printf "%s\n" "$ac_cv_prog_cc_c89" >&6; }
++     CC="$CC $ac_cv_prog_cc_c89"
++fi
++  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89
++  ac_prog_cc_stdc=c89
++fi
++fi
++
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++
++  ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
++printf %s "checking whether $CC understands -c and -o together... " >&6; }
++if test ${am_cv_prog_cc_c_o+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++  # Make sure it works both with $CC and with simple cc.
++  # Following AC_PROG_CC_C_O, we do the test twice because some
++  # compilers refuse to overwrite an existing .o file with -o,
++  # though they will create one.
++  am_cv_prog_cc_c_o=yes
++  for am_i in 1 2; do
++    if { echo "$as_me:$LINENO: $CC -c conftest.$ac_ext -o conftest2.$ac_objext" >&5
++   ($CC -c conftest.$ac_ext -o conftest2.$ac_objext) >&5 2>&5
++   ac_status=$?
++   echo "$as_me:$LINENO: \$? = $ac_status" >&5
++   (exit $ac_status); } \
++         && test -f conftest2.$ac_objext; then
++      : OK
++    else
++      am_cv_prog_cc_c_o=no
++      break
++    fi
++  done
++  rm -f core conftest*
++  unset am_i
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
++printf "%s\n" "$am_cv_prog_cc_c_o" >&6; }
++if test "$am_cv_prog_cc_c_o" != yes; then
++   # Losing compiler, so override with the script.
++   # FIXME: It is wrong to rewrite CC.
++   # But if we don't then we get into trouble of one sort or another.
++   # A longer-term fix would be to have automake use am__CC in this case,
++   # and then we could set am__CC="\$(top_srcdir)/compile \$(CC)"
++   CC="$am_aux_dir/compile $CC"
++fi
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++
++depcc="$CC"   am_compiler_list=
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
++printf %s "checking dependency style of $depcc... " >&6; }
++if test ${am_cv_CC_dependencies_compiler_type+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
++  # We make a subdir and do the tests there.  Otherwise we can end up
++  # making bogus files that we don't know about and never remove.  For
++  # instance it was reported that on HP-UX the gcc test will end up
++  # making a dummy file named 'D' -- because '-MD' means "put the output
++  # in D".
++  rm -rf conftest.dir
++  mkdir conftest.dir
++  # Copy depcomp to subdir because otherwise we won't find it if we're
++  # using a relative directory.
++  cp "$am_depcomp" conftest.dir
++  cd conftest.dir
++  # We will build objects and dependencies in a subdirectory because
++  # it helps to detect inapplicable dependency modes.  For instance
++  # both Tru64's cc and ICC support -MD to output dependencies as a
++  # side effect of compilation, but ICC will put the dependencies in
++  # the current directory while Tru64 will put them in the object
++  # directory.
++  mkdir sub
++
++  am_cv_CC_dependencies_compiler_type=none
++  if test "$am_compiler_list" = ""; then
++     am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
++  fi
++  am__universal=false
++  case " $depcc " in #(
++     *\ -arch\ *\ -arch\ *) am__universal=true ;;
++     esac
++
++  for depmode in $am_compiler_list; do
++    # Setup a source with many dependencies, because some compilers
++    # like to wrap large dependency lists on column 80 (with \), and
++    # we should not choose a depcomp mode which is confused by this.
++    #
++    # We need to recreate these files for each test, as the compiler may
++    # overwrite some of them when testing with obscure command lines.
++    # This happens at least with the AIX C compiler.
++    : > sub/conftest.c
++    for i in 1 2 3 4 5 6; do
++      echo '#include "conftst'$i'.h"' >> sub/conftest.c
++      # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
++      # Solaris 10 /bin/sh.
++      echo '/* dummy */' > sub/conftst$i.h
++    done
++    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
++
++    # We check with '-c' and '-o' for the sake of the "dashmstdout"
++    # mode.  It turns out that the SunPro C++ compiler does not properly
++    # handle '-M -o', and we need to detect this.  Also, some Intel
++    # versions had trouble with output in subdirs.
++    am__obj=sub/conftest.${OBJEXT-o}
++    am__minus_obj="-o $am__obj"
++    case $depmode in
++    gcc)
++      # This depmode causes a compiler race in universal mode.
++      test "$am__universal" = false || continue
++      ;;
++    nosideeffect)
++      # After this tag, mechanisms are not by side-effect, so they'll
++      # only be used when explicitly requested.
++      if test "x$enable_dependency_tracking" = xyes; then
++	continue
++      else
++	break
++      fi
++      ;;
++    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
++      # This compiler won't grok '-c -o', but also, the minuso test has
++      # not run yet.  These depmodes are late enough in the game, and
++      # so weak that their functioning should not be impacted.
++      am__obj=conftest.${OBJEXT-o}
++      am__minus_obj=
++      ;;
++    none) break ;;
++    esac
++    if depmode=$depmode \
++       source=sub/conftest.c object=$am__obj \
++       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
++       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
++         >/dev/null 2>conftest.err &&
++       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
++       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
++       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
++       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
++      # icc doesn't choke on unknown options, it will just issue warnings
++      # or remarks (even with -Werror).  So we grep stderr for any message
++      # that says an option was ignored or not supported.
++      # When given -MP, icc 7.0 and 7.1 complain thusly:
++      #   icc: Command line warning: ignoring option '-M'; no argument required
++      # The diagnosis changed in icc 8.0:
++      #   icc: Command line remark: option '-MP' not supported
++      if (grep 'ignoring option' conftest.err ||
++          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
++        am_cv_CC_dependencies_compiler_type=$depmode
++        break
++      fi
++    fi
++  done
++
++  cd ..
++  rm -rf conftest.dir
++else
++  am_cv_CC_dependencies_compiler_type=none
++fi
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
++printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; }
++CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
++
++ if
++  test "x$enable_dependency_tracking" != xno \
++  && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
++  am__fastdepCC_TRUE=
++  am__fastdepCC_FALSE='#'
++else
++  am__fastdepCC_TRUE='#'
++  am__fastdepCC_FALSE=
++fi
++
++
++
++
++
++
++
++
++ac_ext=cpp
++ac_cpp='$CXXCPP $CPPFLAGS'
++ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
++if test -z "$CXX"; then
++  if test -n "$CCC"; then
++    CXX=$CCC
++  else
++    if test -n "$ac_tool_prefix"; then
++  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++
++  do
++    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
++set dummy $ac_tool_prefix$ac_prog; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_CXX+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$CXX"; then
++  ac_cv_prog_CXX="$CXX" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++CXX=$ac_cv_prog_CXX
++if test -n "$CXX"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
++printf "%s\n" "$CXX" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++    test -n "$CXX" && break
++  done
++fi
++if test -z "$CXX"; then
++  ac_ct_CXX=$CXX
++  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++
++do
++  # Extract the first word of "$ac_prog", so it can be a program name with args.
++set dummy $ac_prog; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_CXX+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_CXX"; then
++  ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_CXX="$ac_prog"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
++if test -n "$ac_ct_CXX"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
++printf "%s\n" "$ac_ct_CXX" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++  test -n "$ac_ct_CXX" && break
++done
++
++  if test "x$ac_ct_CXX" = x; then
++    CXX="g++"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    CXX=$ac_ct_CXX
++  fi
++fi
++
++  fi
++fi
++# Provide some information about the compiler.
++printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
++set X $ac_compile
++ac_compiler=$2
++for ac_option in --version -v -V -qversion; do
++  { { ac_try="$ac_compiler $ac_option >&5"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++printf "%s\n" "$ac_try_echo"; } >&5
++  (eval "$ac_compiler $ac_option >&5") 2>conftest.err
++  ac_status=$?
++  if test -s conftest.err; then
++    sed '10a\
++... rest of stderr output deleted ...
++         10q' conftest.err >conftest.er1
++    cat conftest.er1 >&5
++  fi
++  rm -f conftest.er1 conftest.err
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }
++done
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C++" >&5
++printf %s "checking whether the compiler supports GNU C++... " >&6; }
++if test ${ac_cv_cxx_compiler_gnu+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++#ifndef __GNUC__
++       choke me
++#endif
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_cxx_try_compile "$LINENO"
++then :
++  ac_compiler_gnu=yes
++else $as_nop
++  ac_compiler_gnu=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
++printf "%s\n" "$ac_cv_cxx_compiler_gnu" >&6; }
++ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
++
++if test $ac_compiler_gnu = yes; then
++  GXX=yes
++else
++  GXX=
++fi
++ac_test_CXXFLAGS=${CXXFLAGS+y}
++ac_save_CXXFLAGS=$CXXFLAGS
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
++printf %s "checking whether $CXX accepts -g... " >&6; }
++if test ${ac_cv_prog_cxx_g+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_save_cxx_werror_flag=$ac_cxx_werror_flag
++   ac_cxx_werror_flag=yes
++   ac_cv_prog_cxx_g=no
++   CXXFLAGS="-g"
++   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_cxx_try_compile "$LINENO"
++then :
++  ac_cv_prog_cxx_g=yes
++else $as_nop
++  CXXFLAGS=""
++      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_cxx_try_compile "$LINENO"
++then :
++
++else $as_nop
++  ac_cxx_werror_flag=$ac_save_cxx_werror_flag
++	 CXXFLAGS="-g"
++	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_cxx_try_compile "$LINENO"
++then :
++  ac_cv_prog_cxx_g=yes
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++   ac_cxx_werror_flag=$ac_save_cxx_werror_flag
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
++printf "%s\n" "$ac_cv_prog_cxx_g" >&6; }
++if test $ac_test_CXXFLAGS; then
++  CXXFLAGS=$ac_save_CXXFLAGS
++elif test $ac_cv_prog_cxx_g = yes; then
++  if test "$GXX" = yes; then
++    CXXFLAGS="-g -O2"
++  else
++    CXXFLAGS="-g"
++  fi
++else
++  if test "$GXX" = yes; then
++    CXXFLAGS="-O2"
++  else
++    CXXFLAGS=
++  fi
++fi
++ac_prog_cxx_stdcxx=no
++if test x$ac_prog_cxx_stdcxx = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++11 features" >&5
++printf %s "checking for $CXX option to enable C++11 features... " >&6; }
++if test ${ac_cv_prog_cxx_cxx11+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_cv_prog_cxx_cxx11=no
++ac_save_CXX=$CXX
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$ac_cxx_conftest_cxx11_program
++_ACEOF
++for ac_arg in '' -std=gnu++11 -std=gnu++0x -std=c++11 -std=c++0x -qlanglvl=extended0x -AA
++do
++  CXX="$ac_save_CXX $ac_arg"
++  if ac_fn_cxx_try_compile "$LINENO"
++then :
++  ac_cv_prog_cxx_cxx11=$ac_arg
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam
++  test "x$ac_cv_prog_cxx_cxx11" != "xno" && break
++done
++rm -f conftest.$ac_ext
++CXX=$ac_save_CXX
++fi
++
++if test "x$ac_cv_prog_cxx_cxx11" = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
++printf "%s\n" "unsupported" >&6; }
++else $as_nop
++  if test "x$ac_cv_prog_cxx_cxx11" = x
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
++printf "%s\n" "none needed" >&6; }
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx11" >&5
++printf "%s\n" "$ac_cv_prog_cxx_cxx11" >&6; }
++     CXX="$CXX $ac_cv_prog_cxx_cxx11"
++fi
++  ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx11
++  ac_prog_cxx_stdcxx=cxx11
++fi
++fi
++if test x$ac_prog_cxx_stdcxx = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++98 features" >&5
++printf %s "checking for $CXX option to enable C++98 features... " >&6; }
++if test ${ac_cv_prog_cxx_cxx98+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_cv_prog_cxx_cxx98=no
++ac_save_CXX=$CXX
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$ac_cxx_conftest_cxx98_program
++_ACEOF
++for ac_arg in '' -std=gnu++98 -std=c++98 -qlanglvl=extended -AA
++do
++  CXX="$ac_save_CXX $ac_arg"
++  if ac_fn_cxx_try_compile "$LINENO"
++then :
++  ac_cv_prog_cxx_cxx98=$ac_arg
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam
++  test "x$ac_cv_prog_cxx_cxx98" != "xno" && break
++done
++rm -f conftest.$ac_ext
++CXX=$ac_save_CXX
++fi
++
++if test "x$ac_cv_prog_cxx_cxx98" = xno
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
++printf "%s\n" "unsupported" >&6; }
++else $as_nop
++  if test "x$ac_cv_prog_cxx_cxx98" = x
++then :
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
++printf "%s\n" "none needed" >&6; }
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx98" >&5
++printf "%s\n" "$ac_cv_prog_cxx_cxx98" >&6; }
++     CXX="$CXX $ac_cv_prog_cxx_cxx98"
++fi
++  ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx98
++  ac_prog_cxx_stdcxx=cxx98
++fi
++fi
++
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++depcc="$CXX"  am_compiler_list=
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
++printf %s "checking dependency style of $depcc... " >&6; }
++if test ${am_cv_CXX_dependencies_compiler_type+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
++  # We make a subdir and do the tests there.  Otherwise we can end up
++  # making bogus files that we don't know about and never remove.  For
++  # instance it was reported that on HP-UX the gcc test will end up
++  # making a dummy file named 'D' -- because '-MD' means "put the output
++  # in D".
++  rm -rf conftest.dir
++  mkdir conftest.dir
++  # Copy depcomp to subdir because otherwise we won't find it if we're
++  # using a relative directory.
++  cp "$am_depcomp" conftest.dir
++  cd conftest.dir
++  # We will build objects and dependencies in a subdirectory because
++  # it helps to detect inapplicable dependency modes.  For instance
++  # both Tru64's cc and ICC support -MD to output dependencies as a
++  # side effect of compilation, but ICC will put the dependencies in
++  # the current directory while Tru64 will put them in the object
++  # directory.
++  mkdir sub
++
++  am_cv_CXX_dependencies_compiler_type=none
++  if test "$am_compiler_list" = ""; then
++     am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp`
++  fi
++  am__universal=false
++  case " $depcc " in #(
++     *\ -arch\ *\ -arch\ *) am__universal=true ;;
++     esac
++
++  for depmode in $am_compiler_list; do
++    # Setup a source with many dependencies, because some compilers
++    # like to wrap large dependency lists on column 80 (with \), and
++    # we should not choose a depcomp mode which is confused by this.
++    #
++    # We need to recreate these files for each test, as the compiler may
++    # overwrite some of them when testing with obscure command lines.
++    # This happens at least with the AIX C compiler.
++    : > sub/conftest.c
++    for i in 1 2 3 4 5 6; do
++      echo '#include "conftst'$i'.h"' >> sub/conftest.c
++      # Using ": > sub/conftst$i.h" creates only sub/conftst1.h with
++      # Solaris 10 /bin/sh.
++      echo '/* dummy */' > sub/conftst$i.h
++    done
++    echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf
++
++    # We check with '-c' and '-o' for the sake of the "dashmstdout"
++    # mode.  It turns out that the SunPro C++ compiler does not properly
++    # handle '-M -o', and we need to detect this.  Also, some Intel
++    # versions had trouble with output in subdirs.
++    am__obj=sub/conftest.${OBJEXT-o}
++    am__minus_obj="-o $am__obj"
++    case $depmode in
++    gcc)
++      # This depmode causes a compiler race in universal mode.
++      test "$am__universal" = false || continue
++      ;;
++    nosideeffect)
++      # After this tag, mechanisms are not by side-effect, so they'll
++      # only be used when explicitly requested.
++      if test "x$enable_dependency_tracking" = xyes; then
++	continue
++      else
++	break
++      fi
++      ;;
++    msvc7 | msvc7msys | msvisualcpp | msvcmsys)
++      # This compiler won't grok '-c -o', but also, the minuso test has
++      # not run yet.  These depmodes are late enough in the game, and
++      # so weak that their functioning should not be impacted.
++      am__obj=conftest.${OBJEXT-o}
++      am__minus_obj=
++      ;;
++    none) break ;;
++    esac
++    if depmode=$depmode \
++       source=sub/conftest.c object=$am__obj \
++       depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \
++       $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \
++         >/dev/null 2>conftest.err &&
++       grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 &&
++       grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 &&
++       grep $am__obj sub/conftest.Po > /dev/null 2>&1 &&
++       ${MAKE-make} -s -f confmf > /dev/null 2>&1; then
++      # icc doesn't choke on unknown options, it will just issue warnings
++      # or remarks (even with -Werror).  So we grep stderr for any message
++      # that says an option was ignored or not supported.
++      # When given -MP, icc 7.0 and 7.1 complain thusly:
++      #   icc: Command line warning: ignoring option '-M'; no argument required
++      # The diagnosis changed in icc 8.0:
++      #   icc: Command line remark: option '-MP' not supported
++      if (grep 'ignoring option' conftest.err ||
++          grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else
++        am_cv_CXX_dependencies_compiler_type=$depmode
++        break
++      fi
++    fi
++  done
++
++  cd ..
++  rm -rf conftest.dir
++else
++  am_cv_CXX_dependencies_compiler_type=none
++fi
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5
++printf "%s\n" "$am_cv_CXX_dependencies_compiler_type" >&6; }
++CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type
++
++ if
++  test "x$enable_dependency_tracking" != xno \
++  && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then
++  am__fastdepCXX_TRUE=
++  am__fastdepCXX_FALSE='#'
++else
++  am__fastdepCXX_TRUE='#'
++  am__fastdepCXX_FALSE=
++fi
++
++
++# Check whether --enable-largefile was given.
++if test ${enable_largefile+y}
++then :
++  enableval=$enable_largefile;
++fi
++
++if test "$enable_largefile" != no; then
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5
++printf %s "checking for special C compiler options needed for large files... " >&6; }
++if test ${ac_cv_sys_largefile_CC+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_cv_sys_largefile_CC=no
++     if test "$GCC" != yes; then
++       ac_save_CC=$CC
++       while :; do
++	 # IRIX 6.2 and later do not support large files by default,
++	 # so use the C compiler's -n32 option if that helps.
++	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
++ /* Check that off_t can represent 2**63 - 1 correctly.
++    We can't simply define LARGE_OFF_T to be 9223372036854775807,
++    since some C++ compilers masquerading as C compilers
++    incorrectly reject 9223372036854775807.  */
++#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31))
++  int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
++		       && LARGE_OFF_T % 2147483647 == 1)
++		      ? 1 : -1];
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++	 if ac_fn_c_try_compile "$LINENO"
++then :
++  break
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam
++	 CC="$CC -n32"
++	 if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_sys_largefile_CC=' -n32'; break
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam
++	 break
++       done
++       CC=$ac_save_CC
++       rm -f conftest.$ac_ext
++    fi
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5
++printf "%s\n" "$ac_cv_sys_largefile_CC" >&6; }
++  if test "$ac_cv_sys_largefile_CC" != no; then
++    CC=$CC$ac_cv_sys_largefile_CC
++  fi
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5
++printf %s "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; }
++if test ${ac_cv_sys_file_offset_bits+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  while :; do
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
++ /* Check that off_t can represent 2**63 - 1 correctly.
++    We can't simply define LARGE_OFF_T to be 9223372036854775807,
++    since some C++ compilers masquerading as C compilers
++    incorrectly reject 9223372036854775807.  */
++#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31))
++  int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
++		       && LARGE_OFF_T % 2147483647 == 1)
++		      ? 1 : -1];
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_sys_file_offset_bits=no; break
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#define _FILE_OFFSET_BITS 64
++#include 
++ /* Check that off_t can represent 2**63 - 1 correctly.
++    We can't simply define LARGE_OFF_T to be 9223372036854775807,
++    since some C++ compilers masquerading as C compilers
++    incorrectly reject 9223372036854775807.  */
++#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31))
++  int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
++		       && LARGE_OFF_T % 2147483647 == 1)
++		      ? 1 : -1];
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_sys_file_offset_bits=64; break
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++  ac_cv_sys_file_offset_bits=unknown
++  break
++done
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5
++printf "%s\n" "$ac_cv_sys_file_offset_bits" >&6; }
++case $ac_cv_sys_file_offset_bits in #(
++  no | unknown) ;;
++  *)
++printf "%s\n" "#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits" >>confdefs.h
++;;
++esac
++rm -rf conftest*
++  if test $ac_cv_sys_file_offset_bits = unknown; then
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5
++printf %s "checking for _LARGE_FILES value needed for large files... " >&6; }
++if test ${ac_cv_sys_large_files+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  while :; do
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
++ /* Check that off_t can represent 2**63 - 1 correctly.
++    We can't simply define LARGE_OFF_T to be 9223372036854775807,
++    since some C++ compilers masquerading as C compilers
++    incorrectly reject 9223372036854775807.  */
++#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31))
++  int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
++		       && LARGE_OFF_T % 2147483647 == 1)
++		      ? 1 : -1];
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_sys_large_files=no; break
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#define _LARGE_FILES 1
++#include 
++ /* Check that off_t can represent 2**63 - 1 correctly.
++    We can't simply define LARGE_OFF_T to be 9223372036854775807,
++    since some C++ compilers masquerading as C compilers
++    incorrectly reject 9223372036854775807.  */
++#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31))
++  int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
++		       && LARGE_OFF_T % 2147483647 == 1)
++		      ? 1 : -1];
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_sys_large_files=1; break
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++  ac_cv_sys_large_files=unknown
++  break
++done
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5
++printf "%s\n" "$ac_cv_sys_large_files" >&6; }
++case $ac_cv_sys_large_files in #(
++  no | unknown) ;;
++  *)
++printf "%s\n" "#define _LARGE_FILES $ac_cv_sys_large_files" >>confdefs.h
++;;
++esac
++rm -rf conftest*
++  fi
++fi
++
++
++# Check whether -static-libgcc is supported.
++saved_LDFLAGS="$LDFLAGS"
++LDFLAGS="$LDFLAGS -static-libgcc"
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -static-libgcc" >&5
++printf %s "checking for -static-libgcc... " >&6; }
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++  int main() {}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++  have_static_libgcc=yes
++else $as_nop
++  have_static_libgcc=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $have_static_libgcc" >&5
++printf "%s\n" "$have_static_libgcc" >&6; };
++LDFLAGS="$saved_LDFLAGS"
++# Need -Wc to get it through libtool.
++if test "x$have_static_libgcc" = xyes; then
++   ac_bolt_plugin_ldflags="-Wc,-static-libgcc"
++fi
++
++
++if test x"$host_subdir" = x.; then
++   gcc_build_dir=../gcc
++else
++   gcc_build_dir=../../$host_subdir/gcc
++fi
++
++
++# Used for constructing correct paths for offload compilers.
++accel_dir_suffix=
++real_target_noncanonical=${target_noncanonical}
++if test x"$enable_as_accelerator_for" != x; then
++  accel_dir_suffix=/accel/${target_noncanonical}
++  real_target_noncanonical=${enable_as_accelerator_for}
++fi
++
++
++
++# Determine what GCC version number to use in filesystem paths.
++GCC_BASE_VER
++
++case `pwd` in
++  *\ * | *\	*)
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5
++printf "%s\n" "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;;
++esac
++
++
++
++macro_version='2.4.7'
++macro_revision='2.4.7'
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++ltmain=$ac_aux_dir/ltmain.sh
++
++# Backslashify metacharacters that are still active within
++# double-quoted strings.
++sed_quote_subst='s/\(["`$\\]\)/\\\1/g'
++
++# Same as above, but do not quote variable references.
++double_quote_subst='s/\(["`\\]\)/\\\1/g'
++
++# Sed substitution to delay expansion of an escaped shell variable in a
++# double_quote_subst'ed string.
++delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
++
++# Sed substitution to delay expansion of an escaped single quote.
++delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
++
++# Sed substitution to avoid accidental globbing in evaled expressions
++no_glob_subst='s/\*/\\\*/g'
++
++ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
++ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
++ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5
++printf %s "checking how to print strings... " >&6; }
++# Test print first, because it will be a builtin if present.
++if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
++   test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
++  ECHO='print -r --'
++elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
++  ECHO='printf %s\n'
++else
++  # Use this function as a fallback that always works.
++  func_fallback_echo ()
++  {
++    eval 'cat <<_LTECHO_EOF
++$1
++_LTECHO_EOF'
++  }
++  ECHO='func_fallback_echo'
++fi
++
++# func_echo_all arg...
++# Invoke $ECHO with all args, space-separated.
++func_echo_all ()
++{
++    $ECHO ""
++}
++
++case $ECHO in
++  printf*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: printf" >&5
++printf "%s\n" "printf" >&6; } ;;
++  print*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: print -r" >&5
++printf "%s\n" "print -r" >&6; } ;;
++  *) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: cat" >&5
++printf "%s\n" "cat" >&6; } ;;
++esac
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5
++printf %s "checking for a sed that does not truncate output... " >&6; }
++if test ${ac_cv_path_SED+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++            ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/
++     for ac_i in 1 2 3 4 5 6 7; do
++       ac_script="$ac_script$as_nl$ac_script"
++     done
++     echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed
++     { ac_script=; unset ac_script;}
++     if test -z "$SED"; then
++  ac_path_SED_found=false
++  # Loop through the user's path and test for each of PROGNAME-LIST
++  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_prog in sed gsed
++   do
++    for ac_exec_ext in '' $ac_executable_extensions; do
++      ac_path_SED="$as_dir$ac_prog$ac_exec_ext"
++      as_fn_executable_p "$ac_path_SED" || continue
++# Check for GNU ac_path_SED and select it if it is found.
++  # Check for GNU $ac_path_SED
++case `"$ac_path_SED" --version 2>&1` in
++*GNU*)
++  ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;;
++*)
++  ac_count=0
++  printf %s 0123456789 >"conftest.in"
++  while :
++  do
++    cat "conftest.in" "conftest.in" >"conftest.tmp"
++    mv "conftest.tmp" "conftest.in"
++    cp "conftest.in" "conftest.nl"
++    printf "%s\n" '' >> "conftest.nl"
++    "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break
++    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
++    as_fn_arith $ac_count + 1 && ac_count=$as_val
++    if test $ac_count -gt ${ac_path_SED_max-0}; then
++      # Best one so far, save it but keep looking for a better one
++      ac_cv_path_SED="$ac_path_SED"
++      ac_path_SED_max=$ac_count
++    fi
++    # 10*(2^10) chars as input seems more than enough
++    test $ac_count -gt 10 && break
++  done
++  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
++esac
++
++      $ac_path_SED_found && break 3
++    done
++  done
++  done
++IFS=$as_save_IFS
++  if test -z "$ac_cv_path_SED"; then
++    as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5
++  fi
++else
++  ac_cv_path_SED=$SED
++fi
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5
++printf "%s\n" "$ac_cv_path_SED" >&6; }
++ SED="$ac_cv_path_SED"
++  rm -f conftest.sed
++
++test -z "$SED" && SED=sed
++Xsed="$SED -e 1s/^X//"
++
++
++
++
++
++
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
++printf %s "checking for grep that handles long lines and -e... " >&6; }
++if test ${ac_cv_path_GREP+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -z "$GREP"; then
++  ac_path_GREP_found=false
++  # Loop through the user's path and test for each of PROGNAME-LIST
++  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_prog in grep ggrep
++   do
++    for ac_exec_ext in '' $ac_executable_extensions; do
++      ac_path_GREP="$as_dir$ac_prog$ac_exec_ext"
++      as_fn_executable_p "$ac_path_GREP" || continue
++# Check for GNU ac_path_GREP and select it if it is found.
++  # Check for GNU $ac_path_GREP
++case `"$ac_path_GREP" --version 2>&1` in
++*GNU*)
++  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
++*)
++  ac_count=0
++  printf %s 0123456789 >"conftest.in"
++  while :
++  do
++    cat "conftest.in" "conftest.in" >"conftest.tmp"
++    mv "conftest.tmp" "conftest.in"
++    cp "conftest.in" "conftest.nl"
++    printf "%s\n" 'GREP' >> "conftest.nl"
++    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
++    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
++    as_fn_arith $ac_count + 1 && ac_count=$as_val
++    if test $ac_count -gt ${ac_path_GREP_max-0}; then
++      # Best one so far, save it but keep looking for a better one
++      ac_cv_path_GREP="$ac_path_GREP"
++      ac_path_GREP_max=$ac_count
++    fi
++    # 10*(2^10) chars as input seems more than enough
++    test $ac_count -gt 10 && break
++  done
++  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
++esac
++
++      $ac_path_GREP_found && break 3
++    done
++  done
++  done
++IFS=$as_save_IFS
++  if test -z "$ac_cv_path_GREP"; then
++    as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
++  fi
++else
++  ac_cv_path_GREP=$GREP
++fi
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
++printf "%s\n" "$ac_cv_path_GREP" >&6; }
++ GREP="$ac_cv_path_GREP"
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
++printf %s "checking for egrep... " >&6; }
++if test ${ac_cv_path_EGREP+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
++   then ac_cv_path_EGREP="$GREP -E"
++   else
++     if test -z "$EGREP"; then
++  ac_path_EGREP_found=false
++  # Loop through the user's path and test for each of PROGNAME-LIST
++  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_prog in egrep
++   do
++    for ac_exec_ext in '' $ac_executable_extensions; do
++      ac_path_EGREP="$as_dir$ac_prog$ac_exec_ext"
++      as_fn_executable_p "$ac_path_EGREP" || continue
++# Check for GNU ac_path_EGREP and select it if it is found.
++  # Check for GNU $ac_path_EGREP
++case `"$ac_path_EGREP" --version 2>&1` in
++*GNU*)
++  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
++*)
++  ac_count=0
++  printf %s 0123456789 >"conftest.in"
++  while :
++  do
++    cat "conftest.in" "conftest.in" >"conftest.tmp"
++    mv "conftest.tmp" "conftest.in"
++    cp "conftest.in" "conftest.nl"
++    printf "%s\n" 'EGREP' >> "conftest.nl"
++    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
++    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
++    as_fn_arith $ac_count + 1 && ac_count=$as_val
++    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
++      # Best one so far, save it but keep looking for a better one
++      ac_cv_path_EGREP="$ac_path_EGREP"
++      ac_path_EGREP_max=$ac_count
++    fi
++    # 10*(2^10) chars as input seems more than enough
++    test $ac_count -gt 10 && break
++  done
++  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
++esac
++
++      $ac_path_EGREP_found && break 3
++    done
++  done
++  done
++IFS=$as_save_IFS
++  if test -z "$ac_cv_path_EGREP"; then
++    as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
++  fi
++else
++  ac_cv_path_EGREP=$EGREP
++fi
++
++   fi
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
++printf "%s\n" "$ac_cv_path_EGREP" >&6; }
++ EGREP="$ac_cv_path_EGREP"
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5
++printf %s "checking for fgrep... " >&6; }
++if test ${ac_cv_path_FGREP+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1
++   then ac_cv_path_FGREP="$GREP -F"
++   else
++     if test -z "$FGREP"; then
++  ac_path_FGREP_found=false
++  # Loop through the user's path and test for each of PROGNAME-LIST
++  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_prog in fgrep
++   do
++    for ac_exec_ext in '' $ac_executable_extensions; do
++      ac_path_FGREP="$as_dir$ac_prog$ac_exec_ext"
++      as_fn_executable_p "$ac_path_FGREP" || continue
++# Check for GNU ac_path_FGREP and select it if it is found.
++  # Check for GNU $ac_path_FGREP
++case `"$ac_path_FGREP" --version 2>&1` in
++*GNU*)
++  ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;;
++*)
++  ac_count=0
++  printf %s 0123456789 >"conftest.in"
++  while :
++  do
++    cat "conftest.in" "conftest.in" >"conftest.tmp"
++    mv "conftest.tmp" "conftest.in"
++    cp "conftest.in" "conftest.nl"
++    printf "%s\n" 'FGREP' >> "conftest.nl"
++    "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break
++    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
++    as_fn_arith $ac_count + 1 && ac_count=$as_val
++    if test $ac_count -gt ${ac_path_FGREP_max-0}; then
++      # Best one so far, save it but keep looking for a better one
++      ac_cv_path_FGREP="$ac_path_FGREP"
++      ac_path_FGREP_max=$ac_count
++    fi
++    # 10*(2^10) chars as input seems more than enough
++    test $ac_count -gt 10 && break
++  done
++  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
++esac
++
++      $ac_path_FGREP_found && break 3
++    done
++  done
++  done
++IFS=$as_save_IFS
++  if test -z "$ac_cv_path_FGREP"; then
++    as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
++  fi
++else
++  ac_cv_path_FGREP=$FGREP
++fi
++
++   fi
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5
++printf "%s\n" "$ac_cv_path_FGREP" >&6; }
++ FGREP="$ac_cv_path_FGREP"
++
++
++test -z "$GREP" && GREP=grep
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++# Check whether --with-gnu-ld was given.
++if test ${with_gnu_ld+y}
++then :
++  withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes
++else $as_nop
++  with_gnu_ld=no
++fi
++
++ac_prog=ld
++if test yes = "$GCC"; then
++  # Check if gcc -print-prog-name=ld gives a path.
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
++printf %s "checking for ld used by $CC... " >&6; }
++  case $host in
++  *-*-mingw*)
++    # gcc leaves a trailing carriage return, which upsets mingw
++    ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
++  *)
++    ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
++  esac
++  case $ac_prog in
++    # Accept absolute paths.
++    [\\/]* | ?:[\\/]*)
++      re_direlt='/[^/][^/]*/\.\./'
++      # Canonicalize the pathname of ld
++      ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
++      while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
++	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
++      done
++      test -z "$LD" && LD=$ac_prog
++      ;;
++  "")
++    # If it fails, then pretend we aren't using GCC.
++    ac_prog=ld
++    ;;
++  *)
++    # If it is relative, then search for the first ld in PATH.
++    with_gnu_ld=unknown
++    ;;
++  esac
++elif test yes = "$with_gnu_ld"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
++printf %s "checking for GNU ld... " >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
++printf %s "checking for non-GNU ld... " >&6; }
++fi
++if test ${lt_cv_path_LD+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -z "$LD"; then
++  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++  for ac_dir in $PATH; do
++    IFS=$lt_save_ifs
++    test -z "$ac_dir" && ac_dir=.
++    if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
++      lt_cv_path_LD=$ac_dir/$ac_prog
++      # Check to see if the program is GNU ld.  I'd rather use --version,
++      # but apparently some variants of GNU ld only accept -v.
++      # Break only if it was the GNU/non-GNU ld that we prefer.
++      case `"$lt_cv_path_LD" -v 2>&1 &5
++printf "%s\n" "$LD" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
++printf %s "checking if the linker ($LD) is GNU ld... " >&6; }
++if test ${lt_cv_prog_gnu_ld+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  # I'd rather use --version here, but apparently some GNU lds only accept -v.
++case `$LD -v 2>&1 &5
++printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; }
++with_gnu_ld=$lt_cv_prog_gnu_ld
++
++
++
++
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5
++printf %s "checking for BSD- or MS-compatible name lister (nm)... " >&6; }
++if test ${lt_cv_path_NM+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$NM"; then
++  # Let the user override the test.
++  lt_cv_path_NM=$NM
++else
++  lt_nm_to_check=${ac_tool_prefix}nm
++  if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
++    lt_nm_to_check="$lt_nm_to_check nm"
++  fi
++  for lt_tmp_nm in $lt_nm_to_check; do
++    lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++    for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
++      IFS=$lt_save_ifs
++      test -z "$ac_dir" && ac_dir=.
++      tmp_nm=$ac_dir/$lt_tmp_nm
++      if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then
++	# Check to see if the nm accepts a BSD-compat flag.
++	# Adding the 'sed 1q' prevents false positives on HP-UX, which says:
++	#   nm: unknown option "B" ignored
++	# Tru64's nm complains that /dev/null is an invalid object file
++	# MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty
++	case $build_os in
++	mingw*) lt_bad_file=conftest.nm/nofile ;;
++	*) lt_bad_file=/dev/null ;;
++	esac
++	case `"$tmp_nm" -B $lt_bad_file 2>&1 | $SED '1q'` in
++	*$lt_bad_file* | *'Invalid file or object type'*)
++	  lt_cv_path_NM="$tmp_nm -B"
++	  break 2
++	  ;;
++	*)
++	  case `"$tmp_nm" -p /dev/null 2>&1 | $SED '1q'` in
++	  */dev/null*)
++	    lt_cv_path_NM="$tmp_nm -p"
++	    break 2
++	    ;;
++	  *)
++	    lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
++	    continue # so that we can try to find one that supports BSD flags
++	    ;;
++	  esac
++	  ;;
++	esac
++      fi
++    done
++    IFS=$lt_save_ifs
++  done
++  : ${lt_cv_path_NM=no}
++fi
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5
++printf "%s\n" "$lt_cv_path_NM" >&6; }
++if test no != "$lt_cv_path_NM"; then
++  NM=$lt_cv_path_NM
++else
++  # Didn't find any BSD compatible name lister, look for dumpbin.
++  if test -n "$DUMPBIN"; then :
++    # Let the user override the test.
++  else
++    if test -n "$ac_tool_prefix"; then
++  for ac_prog in dumpbin "link -dump"
++  do
++    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
++set dummy $ac_tool_prefix$ac_prog; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_DUMPBIN+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$DUMPBIN"; then
++  ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++DUMPBIN=$ac_cv_prog_DUMPBIN
++if test -n "$DUMPBIN"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5
++printf "%s\n" "$DUMPBIN" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++    test -n "$DUMPBIN" && break
++  done
++fi
++if test -z "$DUMPBIN"; then
++  ac_ct_DUMPBIN=$DUMPBIN
++  for ac_prog in dumpbin "link -dump"
++do
++  # Extract the first word of "$ac_prog", so it can be a program name with args.
++set dummy $ac_prog; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_DUMPBIN+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_DUMPBIN"; then
++  ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_DUMPBIN="$ac_prog"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN
++if test -n "$ac_ct_DUMPBIN"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5
++printf "%s\n" "$ac_ct_DUMPBIN" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++  test -n "$ac_ct_DUMPBIN" && break
++done
++
++  if test "x$ac_ct_DUMPBIN" = x; then
++    DUMPBIN=":"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    DUMPBIN=$ac_ct_DUMPBIN
++  fi
++fi
++
++    case `$DUMPBIN -symbols -headers /dev/null 2>&1 | $SED '1q'` in
++    *COFF*)
++      DUMPBIN="$DUMPBIN -symbols -headers"
++      ;;
++    *)
++      DUMPBIN=:
++      ;;
++    esac
++  fi
++
++  if test : != "$DUMPBIN"; then
++    NM=$DUMPBIN
++  fi
++fi
++test -z "$NM" && NM=nm
++
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5
++printf %s "checking the name lister ($NM) interface... " >&6; }
++if test ${lt_cv_nm_interface+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_nm_interface="BSD nm"
++  echo "int some_variable = 0;" > conftest.$ac_ext
++  (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5)
++  (eval "$ac_compile" 2>conftest.err)
++  cat conftest.err >&5
++  (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5)
++  (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
++  cat conftest.err >&5
++  (eval echo "\"\$as_me:$LINENO: output\"" >&5)
++  cat conftest.out >&5
++  if $GREP 'External.*some_variable' conftest.out > /dev/null; then
++    lt_cv_nm_interface="MS dumpbin"
++  fi
++  rm -f conftest*
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5
++printf "%s\n" "$lt_cv_nm_interface" >&6; }
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5
++printf %s "checking whether ln -s works... " >&6; }
++LN_S=$as_ln_s
++if test "$LN_S" = "ln -s"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++printf "%s\n" "yes" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5
++printf "%s\n" "no, using $LN_S" >&6; }
++fi
++
++# find the maximum length of command line arguments
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5
++printf %s "checking the maximum length of command line arguments... " >&6; }
++if test ${lt_cv_sys_max_cmd_len+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++    i=0
++  teststring=ABCD
++
++  case $build_os in
++  msdosdjgpp*)
++    # On DJGPP, this test can blow up pretty badly due to problems in libc
++    # (any single argument exceeding 2000 bytes causes a buffer overrun
++    # during glob expansion).  Even if it were fixed, the result of this
++    # check would be larger than it should be.
++    lt_cv_sys_max_cmd_len=12288;    # 12K is about right
++    ;;
++
++  gnu*)
++    # Under GNU Hurd, this test is not required because there is
++    # no limit to the length of command line arguments.
++    # Libtool will interpret -1 as no limit whatsoever
++    lt_cv_sys_max_cmd_len=-1;
++    ;;
++
++  cygwin* | mingw* | cegcc*)
++    # On Win9x/ME, this test blows up -- it succeeds, but takes
++    # about 5 minutes as the teststring grows exponentially.
++    # Worse, since 9x/ME are not pre-emptively multitasking,
++    # you end up with a "frozen" computer, even though with patience
++    # the test eventually succeeds (with a max line length of 256k).
++    # Instead, let's just punt: use the minimum linelength reported by
++    # all of the supported platforms: 8192 (on NT/2K/XP).
++    lt_cv_sys_max_cmd_len=8192;
++    ;;
++
++  mint*)
++    # On MiNT this can take a long time and run out of memory.
++    lt_cv_sys_max_cmd_len=8192;
++    ;;
++
++  amigaos*)
++    # On AmigaOS with pdksh, this test takes hours, literally.
++    # So we just punt and use a minimum line length of 8192.
++    lt_cv_sys_max_cmd_len=8192;
++    ;;
++
++  bitrig* | darwin* | dragonfly* | freebsd* | midnightbsd* | netbsd* | openbsd*)
++    # This has been around since 386BSD, at least.  Likely further.
++    if test -x /sbin/sysctl; then
++      lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
++    elif test -x /usr/sbin/sysctl; then
++      lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
++    else
++      lt_cv_sys_max_cmd_len=65536	# usable default for all BSDs
++    fi
++    # And add a safety zone
++    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
++    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
++    ;;
++
++  interix*)
++    # We know the value 262144 and hardcode it with a safety zone (like BSD)
++    lt_cv_sys_max_cmd_len=196608
++    ;;
++
++  os2*)
++    # The test takes a long time on OS/2.
++    lt_cv_sys_max_cmd_len=8192
++    ;;
++
++  osf*)
++    # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
++    # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
++    # nice to cause kernel panics so lets avoid the loop below.
++    # First set a reasonable default.
++    lt_cv_sys_max_cmd_len=16384
++    #
++    if test -x /sbin/sysconfig; then
++      case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
++        *1*) lt_cv_sys_max_cmd_len=-1 ;;
++      esac
++    fi
++    ;;
++  sco3.2v5*)
++    lt_cv_sys_max_cmd_len=102400
++    ;;
++  sysv5* | sco5v6* | sysv4.2uw2*)
++    kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
++    if test -n "$kargmax"; then
++      lt_cv_sys_max_cmd_len=`echo $kargmax | $SED 's/.*[	 ]//'`
++    else
++      lt_cv_sys_max_cmd_len=32768
++    fi
++    ;;
++  *)
++    lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
++    if test -n "$lt_cv_sys_max_cmd_len" && \
++       test undefined != "$lt_cv_sys_max_cmd_len"; then
++      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
++      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
++    else
++      # Make teststring a little bigger before we do anything with it.
++      # a 1K string should be a reasonable start.
++      for i in 1 2 3 4 5 6 7 8; do
++        teststring=$teststring$teststring
++      done
++      SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
++      # If test is not a shell built-in, we'll probably end up computing a
++      # maximum length that is only half of the actual maximum length, but
++      # we can't tell.
++      while { test X`env echo "$teststring$teststring" 2>/dev/null` \
++	         = "X$teststring$teststring"; } >/dev/null 2>&1 &&
++	      test 17 != "$i" # 1/2 MB should be enough
++      do
++        i=`expr $i + 1`
++        teststring=$teststring$teststring
++      done
++      # Only check the string length outside the loop.
++      lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1`
++      teststring=
++      # Add a significant safety factor because C++ compilers can tack on
++      # massive amounts of additional arguments before passing them to the
++      # linker.  It appears as though 1/2 is a usable value.
++      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2`
++    fi
++    ;;
++  esac
++
++fi
++
++if test -n "$lt_cv_sys_max_cmd_len"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5
++printf "%s\n" "$lt_cv_sys_max_cmd_len" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none" >&5
++printf "%s\n" "none" >&6; }
++fi
++max_cmd_len=$lt_cv_sys_max_cmd_len
++
++
++
++
++
++
++: ${CP="cp -f"}
++: ${MV="mv -f"}
++: ${RM="rm -f"}
++
++if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
++  lt_unset=unset
++else
++  lt_unset=false
++fi
++
++
++
++
++
++# test EBCDIC or ASCII
++case `echo X|tr X '\101'` in
++ A) # ASCII based system
++    # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
++  lt_SP2NL='tr \040 \012'
++  lt_NL2SP='tr \015\012 \040\040'
++  ;;
++ *) # EBCDIC based system
++  lt_SP2NL='tr \100 \n'
++  lt_NL2SP='tr \r\n \100\100'
++  ;;
++esac
++
++
++
++
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5
++printf %s "checking how to convert $build file names to $host format... " >&6; }
++if test ${lt_cv_to_host_file_cmd+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  case $host in
++  *-*-mingw* )
++    case $build in
++      *-*-mingw* ) # actually msys
++        lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
++        ;;
++      *-*-cygwin* )
++        lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
++        ;;
++      * ) # otherwise, assume *nix
++        lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
++        ;;
++    esac
++    ;;
++  *-*-cygwin* )
++    case $build in
++      *-*-mingw* ) # actually msys
++        lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
++        ;;
++      *-*-cygwin* )
++        lt_cv_to_host_file_cmd=func_convert_file_noop
++        ;;
++      * ) # otherwise, assume *nix
++        lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
++        ;;
++    esac
++    ;;
++  * ) # unhandled hosts (and "normal" native builds)
++    lt_cv_to_host_file_cmd=func_convert_file_noop
++    ;;
++esac
++
++fi
++
++to_host_file_cmd=$lt_cv_to_host_file_cmd
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5
++printf "%s\n" "$lt_cv_to_host_file_cmd" >&6; }
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5
++printf %s "checking how to convert $build file names to toolchain format... " >&6; }
++if test ${lt_cv_to_tool_file_cmd+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  #assume ordinary cross tools, or native build.
++lt_cv_to_tool_file_cmd=func_convert_file_noop
++case $host in
++  *-*-mingw* )
++    case $build in
++      *-*-mingw* ) # actually msys
++        lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
++        ;;
++    esac
++    ;;
++esac
++
++fi
++
++to_tool_file_cmd=$lt_cv_to_tool_file_cmd
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5
++printf "%s\n" "$lt_cv_to_tool_file_cmd" >&6; }
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5
++printf %s "checking for $LD option to reload object files... " >&6; }
++if test ${lt_cv_ld_reload_flag+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_ld_reload_flag='-r'
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5
++printf "%s\n" "$lt_cv_ld_reload_flag" >&6; }
++reload_flag=$lt_cv_ld_reload_flag
++case $reload_flag in
++"" | " "*) ;;
++*) reload_flag=" $reload_flag" ;;
++esac
++reload_cmds='$LD$reload_flag -o $output$reload_objs'
++case $host_os in
++  cygwin* | mingw* | pw32* | cegcc*)
++    if test yes != "$GCC"; then
++      reload_cmds=false
++    fi
++    ;;
++  darwin*)
++    if test yes = "$GCC"; then
++      reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs'
++    else
++      reload_cmds='$LD$reload_flag -o $output$reload_objs'
++    fi
++    ;;
++esac
++
++
++
++
++
++
++
++
++
++if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}file", so it can be a program name with args.
++set dummy ${ac_tool_prefix}file; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_FILECMD+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$FILECMD"; then
++  ac_cv_prog_FILECMD="$FILECMD" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_FILECMD="${ac_tool_prefix}file"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++FILECMD=$ac_cv_prog_FILECMD
++if test -n "$FILECMD"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $FILECMD" >&5
++printf "%s\n" "$FILECMD" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_FILECMD"; then
++  ac_ct_FILECMD=$FILECMD
++  # Extract the first word of "file", so it can be a program name with args.
++set dummy file; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_FILECMD+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_FILECMD"; then
++  ac_cv_prog_ac_ct_FILECMD="$ac_ct_FILECMD" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_FILECMD="file"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_FILECMD=$ac_cv_prog_ac_ct_FILECMD
++if test -n "$ac_ct_FILECMD"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_FILECMD" >&5
++printf "%s\n" "$ac_ct_FILECMD" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_FILECMD" = x; then
++    FILECMD=":"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    FILECMD=$ac_ct_FILECMD
++  fi
++else
++  FILECMD="$ac_cv_prog_FILECMD"
++fi
++
++
++
++
++
++
++
++if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args.
++set dummy ${ac_tool_prefix}objdump; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_OBJDUMP+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$OBJDUMP"; then
++  ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++OBJDUMP=$ac_cv_prog_OBJDUMP
++if test -n "$OBJDUMP"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5
++printf "%s\n" "$OBJDUMP" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_OBJDUMP"; then
++  ac_ct_OBJDUMP=$OBJDUMP
++  # Extract the first word of "objdump", so it can be a program name with args.
++set dummy objdump; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_OBJDUMP+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_OBJDUMP"; then
++  ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_OBJDUMP="objdump"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP
++if test -n "$ac_ct_OBJDUMP"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5
++printf "%s\n" "$ac_ct_OBJDUMP" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_OBJDUMP" = x; then
++    OBJDUMP="false"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    OBJDUMP=$ac_ct_OBJDUMP
++  fi
++else
++  OBJDUMP="$ac_cv_prog_OBJDUMP"
++fi
++
++test -z "$OBJDUMP" && OBJDUMP=objdump
++
++
++
++
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5
++printf %s "checking how to recognize dependent libraries... " >&6; }
++if test ${lt_cv_deplibs_check_method+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_file_magic_cmd='$MAGIC_CMD'
++lt_cv_file_magic_test_file=
++lt_cv_deplibs_check_method='unknown'
++# Need to set the preceding variable on all platforms that support
++# interlibrary dependencies.
++# 'none' -- dependencies not supported.
++# 'unknown' -- same as none, but documents that we really don't know.
++# 'pass_all' -- all dependencies passed with no checks.
++# 'test_compile' -- check by making test program.
++# 'file_magic [[regex]]' -- check by looking for files in library path
++# that responds to the $file_magic_cmd with a given extended regex.
++# If you have 'file' or equivalent on your system and you're not sure
++# whether 'pass_all' will *always* work, you probably want this one.
++
++case $host_os in
++aix[4-9]*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++beos*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++bsdi[45]*)
++  lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)'
++  lt_cv_file_magic_cmd='$FILECMD -L'
++  lt_cv_file_magic_test_file=/shlib/libc.so
++  ;;
++
++cygwin*)
++  # func_win32_libid is a shell function defined in ltmain.sh
++  lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
++  lt_cv_file_magic_cmd='func_win32_libid'
++  ;;
++
++mingw* | pw32*)
++  # Base MSYS/MinGW do not provide the 'file' command needed by
++  # func_win32_libid shell function, so use a weaker test based on 'objdump',
++  # unless we find 'file', for example because we are cross-compiling.
++  if ( file / ) >/dev/null 2>&1; then
++    lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
++    lt_cv_file_magic_cmd='func_win32_libid'
++  else
++    # Keep this pattern in sync with the one in func_win32_libid.
++    lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
++    lt_cv_file_magic_cmd='$OBJDUMP -f'
++  fi
++  ;;
++
++cegcc*)
++  # use the weaker test based on 'objdump'. See mingw*.
++  lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
++  lt_cv_file_magic_cmd='$OBJDUMP -f'
++  ;;
++
++darwin* | rhapsody*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++freebsd* | dragonfly* | midnightbsd*)
++  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
++    case $host_cpu in
++    i*86 )
++      # Not sure whether the presence of OpenBSD here was a mistake.
++      # Let's accept both of them until this is cleared up.
++      lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library'
++      lt_cv_file_magic_cmd=$FILECMD
++      lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
++      ;;
++    esac
++  else
++    lt_cv_deplibs_check_method=pass_all
++  fi
++  ;;
++
++haiku*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++hpux10.20* | hpux11*)
++  lt_cv_file_magic_cmd=$FILECMD
++  case $host_cpu in
++  ia64*)
++    lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64'
++    lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
++    ;;
++  hppa*64*)
++    lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'
++    lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
++    ;;
++  *)
++    lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library'
++    lt_cv_file_magic_test_file=/usr/lib/libc.sl
++    ;;
++  esac
++  ;;
++
++interix[3-9]*)
++  # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here
++  lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$'
++  ;;
++
++irix5* | irix6* | nonstopux*)
++  case $LD in
++  *-32|*"-32 ") libmagic=32-bit;;
++  *-n32|*"-n32 ") libmagic=N32;;
++  *-64|*"-64 ") libmagic=64-bit;;
++  *) libmagic=never-match;;
++  esac
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++# This must be glibc/ELF.
++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++netbsd*)
++  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
++    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
++  else
++    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$'
++  fi
++  ;;
++
++newos6*)
++  lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)'
++  lt_cv_file_magic_cmd=$FILECMD
++  lt_cv_file_magic_test_file=/usr/lib/libnls.so
++  ;;
++
++*nto* | *qnx*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++openbsd* | bitrig*)
++  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
++    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$'
++  else
++    lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
++  fi
++  ;;
++
++osf3* | osf4* | osf5*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++rdos*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++solaris*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
++sysv4 | sysv4.3*)
++  case $host_vendor in
++  motorola)
++    lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]'
++    lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
++    ;;
++  ncr)
++    lt_cv_deplibs_check_method=pass_all
++    ;;
++  sequent)
++    lt_cv_file_magic_cmd='/bin/file'
++    lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )'
++    ;;
++  sni)
++    lt_cv_file_magic_cmd='/bin/file'
++    lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib"
++    lt_cv_file_magic_test_file=/lib/libc.so
++    ;;
++  siemens)
++    lt_cv_deplibs_check_method=pass_all
++    ;;
++  pc)
++    lt_cv_deplibs_check_method=pass_all
++    ;;
++  esac
++  ;;
++
++tpf*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++os2*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++esac
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5
++printf "%s\n" "$lt_cv_deplibs_check_method" >&6; }
++
++file_magic_glob=
++want_nocaseglob=no
++if test "$build" = "$host"; then
++  case $host_os in
++  mingw* | pw32*)
++    if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
++      want_nocaseglob=yes
++    else
++      file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"`
++    fi
++    ;;
++  esac
++fi
++
++file_magic_cmd=$lt_cv_file_magic_cmd
++deplibs_check_method=$lt_cv_deplibs_check_method
++test -z "$deplibs_check_method" && deplibs_check_method=unknown
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args.
++set dummy ${ac_tool_prefix}dlltool; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_DLLTOOL+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$DLLTOOL"; then
++  ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++DLLTOOL=$ac_cv_prog_DLLTOOL
++if test -n "$DLLTOOL"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5
++printf "%s\n" "$DLLTOOL" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_DLLTOOL"; then
++  ac_ct_DLLTOOL=$DLLTOOL
++  # Extract the first word of "dlltool", so it can be a program name with args.
++set dummy dlltool; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_DLLTOOL+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_DLLTOOL"; then
++  ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_DLLTOOL="dlltool"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL
++if test -n "$ac_ct_DLLTOOL"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5
++printf "%s\n" "$ac_ct_DLLTOOL" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_DLLTOOL" = x; then
++    DLLTOOL="false"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    DLLTOOL=$ac_ct_DLLTOOL
++  fi
++else
++  DLLTOOL="$ac_cv_prog_DLLTOOL"
++fi
++
++test -z "$DLLTOOL" && DLLTOOL=dlltool
++
++
++
++
++
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5
++printf %s "checking how to associate runtime and link libraries... " >&6; }
++if test ${lt_cv_sharedlib_from_linklib_cmd+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_sharedlib_from_linklib_cmd='unknown'
++
++case $host_os in
++cygwin* | mingw* | pw32* | cegcc*)
++  # two different shell functions defined in ltmain.sh;
++  # decide which one to use based on capabilities of $DLLTOOL
++  case `$DLLTOOL --help 2>&1` in
++  *--identify-strict*)
++    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
++    ;;
++  *)
++    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
++    ;;
++  esac
++  ;;
++*)
++  # fallback: assume linklib IS sharedlib
++  lt_cv_sharedlib_from_linklib_cmd=$ECHO
++  ;;
++esac
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5
++printf "%s\n" "$lt_cv_sharedlib_from_linklib_cmd" >&6; }
++sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
++test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
++
++
++
++
++
++
++
++if test -n "$ac_tool_prefix"; then
++  for ac_prog in ar
++  do
++    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
++set dummy $ac_tool_prefix$ac_prog; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_AR+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$AR"; then
++  ac_cv_prog_AR="$AR" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_AR="$ac_tool_prefix$ac_prog"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++AR=$ac_cv_prog_AR
++if test -n "$AR"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AR" >&5
++printf "%s\n" "$AR" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++    test -n "$AR" && break
++  done
++fi
++if test -z "$AR"; then
++  ac_ct_AR=$AR
++  for ac_prog in ar
++do
++  # Extract the first word of "$ac_prog", so it can be a program name with args.
++set dummy $ac_prog; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_AR+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_AR"; then
++  ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_AR="$ac_prog"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_AR=$ac_cv_prog_ac_ct_AR
++if test -n "$ac_ct_AR"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5
++printf "%s\n" "$ac_ct_AR" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++  test -n "$ac_ct_AR" && break
++done
++
++  if test "x$ac_ct_AR" = x; then
++    AR="false"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    AR=$ac_ct_AR
++  fi
++fi
++
++: ${AR=ar}
++
++
++
++
++
++
++# Use ARFLAGS variable as AR's operation code to sync the variable naming with
++# Automake.  If both AR_FLAGS and ARFLAGS are specified, AR_FLAGS should have
++# higher priority because thats what people were doing historically (setting
++# ARFLAGS for automake and AR_FLAGS for libtool).  FIXME: Make the AR_FLAGS
++# variable obsoleted/removed.
++
++test ${AR_FLAGS+y} || AR_FLAGS=${ARFLAGS-cr}
++lt_ar_flags=$AR_FLAGS
++
++
++
++
++
++
++# Make AR_FLAGS overridable by 'make ARFLAGS='.  Don't try to run-time override
++# by AR_FLAGS because that was never working and AR_FLAGS is about to die.
++
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5
++printf %s "checking for archiver @FILE support... " >&6; }
++if test ${lt_cv_ar_at_file+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_ar_at_file=no
++   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  echo conftest.$ac_objext > conftest.lst
++      lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5'
++      { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
++  (eval $lt_ar_try) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }
++      if test 0 -eq "$ac_status"; then
++	# Ensure the archiver fails upon bogus file names.
++	rm -f conftest.$ac_objext libconftest.a
++	{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
++  (eval $lt_ar_try) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }
++	if test 0 -ne "$ac_status"; then
++          lt_cv_ar_at_file=@
++        fi
++      fi
++      rm -f conftest.* libconftest.a
++
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5
++printf "%s\n" "$lt_cv_ar_at_file" >&6; }
++
++if test no = "$lt_cv_ar_at_file"; then
++  archiver_list_spec=
++else
++  archiver_list_spec=$lt_cv_ar_at_file
++fi
++
++
++
++
++
++
++
++if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
++set dummy ${ac_tool_prefix}strip; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_STRIP+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$STRIP"; then
++  ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_STRIP="${ac_tool_prefix}strip"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++STRIP=$ac_cv_prog_STRIP
++if test -n "$STRIP"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
++printf "%s\n" "$STRIP" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_STRIP"; then
++  ac_ct_STRIP=$STRIP
++  # Extract the first word of "strip", so it can be a program name with args.
++set dummy strip; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_STRIP+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_STRIP"; then
++  ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_STRIP="strip"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
++if test -n "$ac_ct_STRIP"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
++printf "%s\n" "$ac_ct_STRIP" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_STRIP" = x; then
++    STRIP=":"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    STRIP=$ac_ct_STRIP
++  fi
++else
++  STRIP="$ac_cv_prog_STRIP"
++fi
++
++test -z "$STRIP" && STRIP=:
++
++
++
++
++
++
++if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
++set dummy ${ac_tool_prefix}ranlib; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_RANLIB+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$RANLIB"; then
++  ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++RANLIB=$ac_cv_prog_RANLIB
++if test -n "$RANLIB"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5
++printf "%s\n" "$RANLIB" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_RANLIB"; then
++  ac_ct_RANLIB=$RANLIB
++  # Extract the first word of "ranlib", so it can be a program name with args.
++set dummy ranlib; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_RANLIB+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_RANLIB"; then
++  ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_RANLIB="ranlib"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
++if test -n "$ac_ct_RANLIB"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5
++printf "%s\n" "$ac_ct_RANLIB" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_RANLIB" = x; then
++    RANLIB=":"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    RANLIB=$ac_ct_RANLIB
++  fi
++else
++  RANLIB="$ac_cv_prog_RANLIB"
++fi
++
++test -z "$RANLIB" && RANLIB=:
++
++
++
++
++
++
++# Determine commands to create old-style static archives.
++old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs'
++old_postinstall_cmds='chmod 644 $oldlib'
++old_postuninstall_cmds=
++
++if test -n "$RANLIB"; then
++  case $host_os in
++  bitrig* | openbsd*)
++    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
++    ;;
++  *)
++    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
++    ;;
++  esac
++  old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
++fi
++
++case $host_os in
++  darwin*)
++    lock_old_archive_extraction=yes ;;
++  *)
++    lock_old_archive_extraction=no ;;
++esac
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++# If no C compiler was specified, use CC.
++LTCC=${LTCC-"$CC"}
++
++# If no C compiler flags were specified, use CFLAGS.
++LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
++
++# Allow CC to be a program name with arguments.
++compiler=$CC
++
++
++# Check for command to grab the raw symbol name followed by C symbol from nm.
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5
++printf %s "checking command to parse $NM output from $compiler object... " >&6; }
++if test ${lt_cv_sys_global_symbol_pipe+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++
++# These are sane defaults that work on at least a few old systems.
++# [They come from Ultrix.  What could be older than Ultrix?!! ;)]
++
++# Character class describing NM global symbol codes.
++symcode='[BCDEGRST]'
++
++# Regexp to match symbols that can be accessed directly from C.
++sympat='\([_A-Za-z][_A-Za-z0-9]*\)'
++
++# Define system-specific variables.
++case $host_os in
++aix*)
++  symcode='[BCDT]'
++  ;;
++cygwin* | mingw* | pw32* | cegcc*)
++  symcode='[ABCDGISTW]'
++  ;;
++hpux*)
++  if test ia64 = "$host_cpu"; then
++    symcode='[ABCDEGRST]'
++  fi
++  ;;
++irix* | nonstopux*)
++  symcode='[BCDEGRST]'
++  ;;
++osf*)
++  symcode='[BCDEGQRST]'
++  ;;
++solaris*)
++  symcode='[BDRT]'
++  ;;
++sco3.2v5*)
++  symcode='[DT]'
++  ;;
++sysv4.2uw2*)
++  symcode='[DT]'
++  ;;
++sysv5* | sco5v6* | unixware* | OpenUNIX*)
++  symcode='[ABDT]'
++  ;;
++sysv4)
++  symcode='[DFNSTU]'
++  ;;
++esac
++
++# If we're using GNU nm, then use its standard symbol codes.
++case `$NM -V 2>&1` in
++*GNU* | *'with BFD'*)
++  symcode='[ABCDGIRSTW]' ;;
++esac
++
++if test "$lt_cv_nm_interface" = "MS dumpbin"; then
++  # Gets list of data symbols to import.
++  lt_cv_sys_global_symbol_to_import="$SED -n -e 's/^I .* \(.*\)$/\1/p'"
++  # Adjust the below global symbol transforms to fixup imported variables.
++  lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'"
++  lt_c_name_hook=" -e 's/^I .* \(.*\)$/  {\"\1\", (void *) 0},/p'"
++  lt_c_name_lib_hook="\
++  -e 's/^I .* \(lib.*\)$/  {\"\1\", (void *) 0},/p'\
++  -e 's/^I .* \(.*\)$/  {\"lib\1\", (void *) 0},/p'"
++else
++  # Disable hooks by default.
++  lt_cv_sys_global_symbol_to_import=
++  lt_cdecl_hook=
++  lt_c_name_hook=
++  lt_c_name_lib_hook=
++fi
++
++# Transform an extracted symbol line into a proper C declaration.
++# Some systems (esp. on ia64) link data and code symbols differently,
++# so use this general approach.
++lt_cv_sys_global_symbol_to_cdecl="$SED -n"\
++$lt_cdecl_hook\
++" -e 's/^T .* \(.*\)$/extern int \1();/p'"\
++" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'"
++
++# Transform an extracted symbol line into symbol name and symbol address
++lt_cv_sys_global_symbol_to_c_name_address="$SED -n"\
++$lt_c_name_hook\
++" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
++" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/p'"
++
++# Transform an extracted symbol line into symbol name with lib prefix and
++# symbol address.
++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="$SED -n"\
++$lt_c_name_lib_hook\
++" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
++" -e 's/^$symcode$symcode* .* \(lib.*\)$/  {\"\1\", (void *) \&\1},/p'"\
++" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"lib\1\", (void *) \&\1},/p'"
++
++# Handle CRLF in mingw tool chain
++opt_cr=
++case $build_os in
++mingw*)
++  opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp
++  ;;
++esac
++
++# Try without a prefix underscore, then with it.
++for ac_symprfx in "" "_"; do
++
++  # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
++  symxfrm="\\1 $ac_symprfx\\2 \\2"
++
++  # Write the raw and C identifiers.
++  if test "$lt_cv_nm_interface" = "MS dumpbin"; then
++    # Fake it for dumpbin and say T for any non-static function,
++    # D for any global variable and I for any imported variable.
++    # Also find C++ and __fastcall symbols from MSVC++ or ICC,
++    # which start with @ or ?.
++    lt_cv_sys_global_symbol_pipe="$AWK '"\
++"     {last_section=section; section=\$ 3};"\
++"     /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
++"     /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
++"     /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\
++"     /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\
++"     /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\
++"     \$ 0!~/External *\|/{next};"\
++"     / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
++"     {if(hide[section]) next};"\
++"     {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\
++"     {split(\$ 0,a,/\||\r/); split(a[2],s)};"\
++"     s[1]~/^[@?]/{print f,s[1],s[1]; next};"\
++"     s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\
++"     ' prfx=^$ac_symprfx"
++  else
++    lt_cv_sys_global_symbol_pipe="$SED -n -e 's/^.*[	 ]\($symcode$symcode*\)[	 ][	 ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
++  fi
++  lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | $SED '/ __gnu_lto/d'"
++
++  # Check to see that the pipe works correctly.
++  pipe_works=no
++
++  rm -f conftest*
++  cat > conftest.$ac_ext <<_LT_EOF
++#ifdef __cplusplus
++extern "C" {
++#endif
++char nm_test_var;
++void nm_test_func(void);
++void nm_test_func(void){}
++#ifdef __cplusplus
++}
++#endif
++int main(){nm_test_var='a';nm_test_func();return(0);}
++_LT_EOF
++
++  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
++  (eval $ac_compile) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; then
++    # Now try to grab the symbols.
++    nlist=conftest.nm
++    if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5
++  (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && test -s "$nlist"; then
++      # Try sorting and uniquifying the output.
++      if sort "$nlist" | uniq > "$nlist"T; then
++	mv -f "$nlist"T "$nlist"
++      else
++	rm -f "$nlist"T
++      fi
++
++      # Make sure that we snagged all the symbols we need.
++      if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
++	if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
++	  cat <<_LT_EOF > conftest.$ac_ext
++/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
++#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE
++/* DATA imports from DLLs on WIN32 can't be const, because runtime
++   relocations are performed -- see ld's documentation on pseudo-relocs.  */
++# define LT_DLSYM_CONST
++#elif defined __osf__
++/* This system does not cope well with relocations in const data.  */
++# define LT_DLSYM_CONST
++#else
++# define LT_DLSYM_CONST const
++#endif
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++_LT_EOF
++	  # Now generate the symbol file.
++	  eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext'
++
++	  cat <<_LT_EOF >> conftest.$ac_ext
++
++/* The mapping between symbol names and symbols.  */
++LT_DLSYM_CONST struct {
++  const char *name;
++  void       *address;
++}
++lt__PROGRAM__LTX_preloaded_symbols[] =
++{
++  { "@PROGRAM@", (void *) 0 },
++_LT_EOF
++	  $SED "s/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
++	  cat <<\_LT_EOF >> conftest.$ac_ext
++  {0, (void *) 0}
++};
++
++/* This works around a problem in FreeBSD linker */
++#ifdef FREEBSD_WORKAROUND
++static const void *lt_preloaded_setup() {
++  return lt__PROGRAM__LTX_preloaded_symbols;
++}
++#endif
++
++#ifdef __cplusplus
++}
++#endif
++_LT_EOF
++	  # Now try linking the two files.
++	  mv conftest.$ac_objext conftstm.$ac_objext
++	  lt_globsym_save_LIBS=$LIBS
++	  lt_globsym_save_CFLAGS=$CFLAGS
++	  LIBS=conftstm.$ac_objext
++	  CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag"
++	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
++  (eval $ac_link) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && test -s conftest$ac_exeext; then
++	    pipe_works=yes
++	  fi
++	  LIBS=$lt_globsym_save_LIBS
++	  CFLAGS=$lt_globsym_save_CFLAGS
++	else
++	  echo "cannot find nm_test_func in $nlist" >&5
++	fi
++      else
++	echo "cannot find nm_test_var in $nlist" >&5
++      fi
++    else
++      echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5
++    fi
++  else
++    echo "$progname: failed program was:" >&5
++    cat conftest.$ac_ext >&5
++  fi
++  rm -rf conftest* conftst*
++
++  # Do not use the global_symbol_pipe unless it works.
++  if test yes = "$pipe_works"; then
++    break
++  else
++    lt_cv_sys_global_symbol_pipe=
++  fi
++done
++
++fi
++
++if test -z "$lt_cv_sys_global_symbol_pipe"; then
++  lt_cv_sys_global_symbol_to_cdecl=
++fi
++if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: failed" >&5
++printf "%s\n" "failed" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ok" >&5
++printf "%s\n" "ok" >&6; }
++fi
++
++# Response file support.
++if test "$lt_cv_nm_interface" = "MS dumpbin"; then
++  nm_file_list_spec='@'
++elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then
++  nm_file_list_spec='@'
++fi
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5
++printf %s "checking for sysroot... " >&6; }
++
++# Check whether --with-sysroot was given.
++if test ${with_sysroot+y}
++then :
++  withval=$with_sysroot;
++else $as_nop
++  with_sysroot=no
++fi
++
++
++lt_sysroot=
++case $with_sysroot in #(
++ yes)
++   if test yes = "$GCC"; then
++     lt_sysroot=`$CC --print-sysroot 2>/dev/null`
++   fi
++   ;; #(
++ /*)
++   lt_sysroot=`echo "$with_sysroot" | $SED -e "$sed_quote_subst"`
++   ;; #(
++ no|'')
++   ;; #(
++ *)
++   { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5
++printf "%s\n" "$with_sysroot" >&6; }
++   as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5
++   ;;
++esac
++
++ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5
++printf "%s\n" "${lt_sysroot:-no}" >&6; }
++
++
++
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5
++printf %s "checking for a working dd... " >&6; }
++if test ${ac_cv_path_lt_DD+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  printf 0123456789abcdef0123456789abcdef >conftest.i
++cat conftest.i conftest.i >conftest2.i
++: ${lt_DD:=$DD}
++if test -z "$lt_DD"; then
++  ac_path_lt_DD_found=false
++  # Loop through the user's path and test for each of PROGNAME-LIST
++  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_prog in dd
++   do
++    for ac_exec_ext in '' $ac_executable_extensions; do
++      ac_path_lt_DD="$as_dir$ac_prog$ac_exec_ext"
++      as_fn_executable_p "$ac_path_lt_DD" || continue
++if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then
++  cmp -s conftest.i conftest.out \
++  && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=:
++fi
++      $ac_path_lt_DD_found && break 3
++    done
++  done
++  done
++IFS=$as_save_IFS
++  if test -z "$ac_cv_path_lt_DD"; then
++    :
++  fi
++else
++  ac_cv_path_lt_DD=$lt_DD
++fi
++
++rm -f conftest.i conftest2.i conftest.out
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5
++printf "%s\n" "$ac_cv_path_lt_DD" >&6; }
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5
++printf %s "checking how to truncate binary pipes... " >&6; }
++if test ${lt_cv_truncate_bin+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  printf 0123456789abcdef0123456789abcdef >conftest.i
++cat conftest.i conftest.i >conftest2.i
++lt_cv_truncate_bin=
++if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then
++  cmp -s conftest.i conftest.out \
++  && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1"
++fi
++rm -f conftest.i conftest2.i conftest.out
++test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5
++printf "%s\n" "$lt_cv_truncate_bin" >&6; }
++
++
++
++
++
++
++
++# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
++func_cc_basename ()
++{
++    for cc_temp in $*""; do
++      case $cc_temp in
++        compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
++        distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
++        \-*) ;;
++        *) break;;
++      esac
++    done
++    func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
++}
++
++# Check whether --enable-libtool-lock was given.
++if test ${enable_libtool_lock+y}
++then :
++  enableval=$enable_libtool_lock;
++fi
++
++test no = "$enable_libtool_lock" || enable_libtool_lock=yes
++
++# Some flags need to be propagated to the compiler or linker for good
++# libtool support.
++case $host in
++ia64-*-hpux*)
++  # Find out what ABI is being produced by ac_compile, and set mode
++  # options accordingly.
++  echo 'int i;' > conftest.$ac_ext
++  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
++  (eval $ac_compile) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; then
++    case `$FILECMD conftest.$ac_objext` in
++      *ELF-32*)
++	HPUX_IA64_MODE=32
++	;;
++      *ELF-64*)
++	HPUX_IA64_MODE=64
++	;;
++    esac
++  fi
++  rm -rf conftest*
++  ;;
++*-*-irix6*)
++  # Find out what ABI is being produced by ac_compile, and set linker
++  # options accordingly.
++  echo '#line '$LINENO' "configure"' > conftest.$ac_ext
++  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
++  (eval $ac_compile) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; then
++    if test yes = "$lt_cv_prog_gnu_ld"; then
++      case `$FILECMD conftest.$ac_objext` in
++	*32-bit*)
++	  LD="${LD-ld} -melf32bsmip"
++	  ;;
++	*N32*)
++	  LD="${LD-ld} -melf32bmipn32"
++	  ;;
++	*64-bit*)
++	  LD="${LD-ld} -melf64bmip"
++	;;
++      esac
++    else
++      case `$FILECMD conftest.$ac_objext` in
++	*32-bit*)
++	  LD="${LD-ld} -32"
++	  ;;
++	*N32*)
++	  LD="${LD-ld} -n32"
++	  ;;
++	*64-bit*)
++	  LD="${LD-ld} -64"
++	  ;;
++      esac
++    fi
++  fi
++  rm -rf conftest*
++  ;;
++
++mips64*-*linux*)
++  # Find out what ABI is being produced by ac_compile, and set linker
++  # options accordingly.
++  echo '#line '$LINENO' "configure"' > conftest.$ac_ext
++  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
++  (eval $ac_compile) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; then
++    emul=elf
++    case `$FILECMD conftest.$ac_objext` in
++      *32-bit*)
++	emul="${emul}32"
++	;;
++      *64-bit*)
++	emul="${emul}64"
++	;;
++    esac
++    case `$FILECMD conftest.$ac_objext` in
++      *MSB*)
++	emul="${emul}btsmip"
++	;;
++      *LSB*)
++	emul="${emul}ltsmip"
++	;;
++    esac
++    case `$FILECMD conftest.$ac_objext` in
++      *N32*)
++	emul="${emul}n32"
++	;;
++    esac
++    LD="${LD-ld} -m $emul"
++  fi
++  rm -rf conftest*
++  ;;
++
++x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \
++s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
++  # Find out what ABI is being produced by ac_compile, and set linker
++  # options accordingly.  Note that the listed cases only cover the
++  # situations where additional linker options are needed (such as when
++  # doing 32-bit compilation for a host where ld defaults to 64-bit, or
++  # vice versa); the common cases where no linker options are needed do
++  # not appear in the list.
++  echo 'int i;' > conftest.$ac_ext
++  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
++  (eval $ac_compile) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; then
++    case `$FILECMD conftest.o` in
++      *32-bit*)
++	case $host in
++	  x86_64-*kfreebsd*-gnu)
++	    LD="${LD-ld} -m elf_i386_fbsd"
++	    ;;
++	  x86_64-*linux*)
++	    case `$FILECMD conftest.o` in
++	      *x86-64*)
++		LD="${LD-ld} -m elf32_x86_64"
++		;;
++	      *)
++		LD="${LD-ld} -m elf_i386"
++		;;
++	    esac
++	    ;;
++	  powerpc64le-*linux*)
++	    LD="${LD-ld} -m elf32lppclinux"
++	    ;;
++	  powerpc64-*linux*)
++	    LD="${LD-ld} -m elf32ppclinux"
++	    ;;
++	  s390x-*linux*)
++	    LD="${LD-ld} -m elf_s390"
++	    ;;
++	  sparc64-*linux*)
++	    LD="${LD-ld} -m elf32_sparc"
++	    ;;
++	esac
++	;;
++      *64-bit*)
++	case $host in
++	  x86_64-*kfreebsd*-gnu)
++	    LD="${LD-ld} -m elf_x86_64_fbsd"
++	    ;;
++	  x86_64-*linux*)
++	    LD="${LD-ld} -m elf_x86_64"
++	    ;;
++	  powerpcle-*linux*)
++	    LD="${LD-ld} -m elf64lppc"
++	    ;;
++	  powerpc-*linux*)
++	    LD="${LD-ld} -m elf64ppc"
++	    ;;
++	  s390*-*linux*|s390*-*tpf*)
++	    LD="${LD-ld} -m elf64_s390"
++	    ;;
++	  sparc*-*linux*)
++	    LD="${LD-ld} -m elf64_sparc"
++	    ;;
++	esac
++	;;
++    esac
++  fi
++  rm -rf conftest*
++  ;;
++
++*-*-sco3.2v5*)
++  # On SCO OpenServer 5, we need -belf to get full-featured binaries.
++  SAVE_CFLAGS=$CFLAGS
++  CFLAGS="$CFLAGS -belf"
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5
++printf %s "checking whether the C compiler needs -belf... " >&6; }
++if test ${lt_cv_cc_needs_belf+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++     cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++  lt_cv_cc_needs_belf=yes
++else $as_nop
++  lt_cv_cc_needs_belf=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++     ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5
++printf "%s\n" "$lt_cv_cc_needs_belf" >&6; }
++  if test yes != "$lt_cv_cc_needs_belf"; then
++    # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
++    CFLAGS=$SAVE_CFLAGS
++  fi
++  ;;
++*-*solaris*)
++  # Find out what ABI is being produced by ac_compile, and set linker
++  # options accordingly.
++  echo 'int i;' > conftest.$ac_ext
++  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
++  (eval $ac_compile) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; then
++    case `$FILECMD conftest.o` in
++    *64-bit*)
++      case $lt_cv_prog_gnu_ld in
++      yes*)
++        case $host in
++        i?86-*-solaris*|x86_64-*-solaris*)
++          LD="${LD-ld} -m elf_x86_64"
++          ;;
++        sparc*-*-solaris*)
++          LD="${LD-ld} -m elf64_sparc"
++          ;;
++        esac
++        # GNU ld 2.21 introduced _sol2 emulations.  Use them if available.
++        if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
++          LD=${LD-ld}_sol2
++        fi
++        ;;
++      *)
++	if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
++	  LD="${LD-ld} -64"
++	fi
++	;;
++      esac
++      ;;
++    esac
++  fi
++  rm -rf conftest*
++  ;;
++esac
++
++need_locks=$enable_libtool_lock
++
++if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args.
++set dummy ${ac_tool_prefix}mt; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_MANIFEST_TOOL+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$MANIFEST_TOOL"; then
++  ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL
++if test -n "$MANIFEST_TOOL"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5
++printf "%s\n" "$MANIFEST_TOOL" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_MANIFEST_TOOL"; then
++  ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL
++  # Extract the first word of "mt", so it can be a program name with args.
++set dummy mt; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_MANIFEST_TOOL+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_MANIFEST_TOOL"; then
++  ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_MANIFEST_TOOL="mt"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL
++if test -n "$ac_ct_MANIFEST_TOOL"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5
++printf "%s\n" "$ac_ct_MANIFEST_TOOL" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_MANIFEST_TOOL" = x; then
++    MANIFEST_TOOL=":"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL
++  fi
++else
++  MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL"
++fi
++
++test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5
++printf %s "checking if $MANIFEST_TOOL is a manifest tool... " >&6; }
++if test ${lt_cv_path_mainfest_tool+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_path_mainfest_tool=no
++  echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5
++  $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
++  cat conftest.err >&5
++  if $GREP 'Manifest Tool' conftest.out > /dev/null; then
++    lt_cv_path_mainfest_tool=yes
++  fi
++  rm -f conftest*
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5
++printf "%s\n" "$lt_cv_path_mainfest_tool" >&6; }
++if test yes != "$lt_cv_path_mainfest_tool"; then
++  MANIFEST_TOOL=:
++fi
++
++
++
++
++
++
++  case $host_os in
++    rhapsody* | darwin*)
++    if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args.
++set dummy ${ac_tool_prefix}dsymutil; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_DSYMUTIL+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$DSYMUTIL"; then
++  ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++DSYMUTIL=$ac_cv_prog_DSYMUTIL
++if test -n "$DSYMUTIL"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5
++printf "%s\n" "$DSYMUTIL" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_DSYMUTIL"; then
++  ac_ct_DSYMUTIL=$DSYMUTIL
++  # Extract the first word of "dsymutil", so it can be a program name with args.
++set dummy dsymutil; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_DSYMUTIL+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_DSYMUTIL"; then
++  ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_DSYMUTIL="dsymutil"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL
++if test -n "$ac_ct_DSYMUTIL"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5
++printf "%s\n" "$ac_ct_DSYMUTIL" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_DSYMUTIL" = x; then
++    DSYMUTIL=":"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    DSYMUTIL=$ac_ct_DSYMUTIL
++  fi
++else
++  DSYMUTIL="$ac_cv_prog_DSYMUTIL"
++fi
++
++    if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args.
++set dummy ${ac_tool_prefix}nmedit; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_NMEDIT+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$NMEDIT"; then
++  ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++NMEDIT=$ac_cv_prog_NMEDIT
++if test -n "$NMEDIT"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5
++printf "%s\n" "$NMEDIT" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_NMEDIT"; then
++  ac_ct_NMEDIT=$NMEDIT
++  # Extract the first word of "nmedit", so it can be a program name with args.
++set dummy nmedit; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_NMEDIT+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_NMEDIT"; then
++  ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_NMEDIT="nmedit"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT
++if test -n "$ac_ct_NMEDIT"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5
++printf "%s\n" "$ac_ct_NMEDIT" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_NMEDIT" = x; then
++    NMEDIT=":"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    NMEDIT=$ac_ct_NMEDIT
++  fi
++else
++  NMEDIT="$ac_cv_prog_NMEDIT"
++fi
++
++    if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args.
++set dummy ${ac_tool_prefix}lipo; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_LIPO+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$LIPO"; then
++  ac_cv_prog_LIPO="$LIPO" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_LIPO="${ac_tool_prefix}lipo"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++LIPO=$ac_cv_prog_LIPO
++if test -n "$LIPO"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5
++printf "%s\n" "$LIPO" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_LIPO"; then
++  ac_ct_LIPO=$LIPO
++  # Extract the first word of "lipo", so it can be a program name with args.
++set dummy lipo; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_LIPO+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_LIPO"; then
++  ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_LIPO="lipo"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO
++if test -n "$ac_ct_LIPO"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5
++printf "%s\n" "$ac_ct_LIPO" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_LIPO" = x; then
++    LIPO=":"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    LIPO=$ac_ct_LIPO
++  fi
++else
++  LIPO="$ac_cv_prog_LIPO"
++fi
++
++    if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args.
++set dummy ${ac_tool_prefix}otool; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_OTOOL+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$OTOOL"; then
++  ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_OTOOL="${ac_tool_prefix}otool"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++OTOOL=$ac_cv_prog_OTOOL
++if test -n "$OTOOL"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5
++printf "%s\n" "$OTOOL" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_OTOOL"; then
++  ac_ct_OTOOL=$OTOOL
++  # Extract the first word of "otool", so it can be a program name with args.
++set dummy otool; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_OTOOL+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_OTOOL"; then
++  ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_OTOOL="otool"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL
++if test -n "$ac_ct_OTOOL"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5
++printf "%s\n" "$ac_ct_OTOOL" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_OTOOL" = x; then
++    OTOOL=":"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    OTOOL=$ac_ct_OTOOL
++  fi
++else
++  OTOOL="$ac_cv_prog_OTOOL"
++fi
++
++    if test -n "$ac_tool_prefix"; then
++  # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args.
++set dummy ${ac_tool_prefix}otool64; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_OTOOL64+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$OTOOL64"; then
++  ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++OTOOL64=$ac_cv_prog_OTOOL64
++if test -n "$OTOOL64"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5
++printf "%s\n" "$OTOOL64" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++fi
++if test -z "$ac_cv_prog_OTOOL64"; then
++  ac_ct_OTOOL64=$OTOOL64
++  # Extract the first word of "otool64", so it can be a program name with args.
++set dummy otool64; ac_word=$2
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++printf %s "checking for $ac_word... " >&6; }
++if test ${ac_cv_prog_ac_ct_OTOOL64+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -n "$ac_ct_OTOOL64"; then
++  ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test.
++else
++as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    for ac_exec_ext in '' $ac_executable_extensions; do
++  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_OTOOL64="otool64"
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    break 2
++  fi
++done
++  done
++IFS=$as_save_IFS
++
++fi
++fi
++ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64
++if test -n "$ac_ct_OTOOL64"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5
++printf "%s\n" "$ac_ct_OTOOL64" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++  if test "x$ac_ct_OTOOL64" = x; then
++    OTOOL64=":"
++  else
++    case $cross_compiling:$ac_tool_warned in
++yes:)
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++ac_tool_warned=yes ;;
++esac
++    OTOOL64=$ac_ct_OTOOL64
++  fi
++else
++  OTOOL64="$ac_cv_prog_OTOOL64"
++fi
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5
++printf %s "checking for -single_module linker flag... " >&6; }
++if test ${lt_cv_apple_cc_single_mod+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_apple_cc_single_mod=no
++      if test -z "$LT_MULTI_MODULE"; then
++	# By default we will add the -single_module flag. You can override
++	# by either setting the environment variable LT_MULTI_MODULE
++	# non-empty at configure time, or by adding -multi_module to the
++	# link flags.
++	rm -rf libconftest.dylib*
++	echo "int foo(void){return 1;}" > conftest.c
++	echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
++-dynamiclib -Wl,-single_module conftest.c" >&5
++	$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
++	  -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
++        _lt_result=$?
++	# If there is a non-empty error log, and "single_module"
++	# appears in it, assume the flag caused a linker warning
++        if test -s conftest.err && $GREP single_module conftest.err; then
++	  cat conftest.err >&5
++	# Otherwise, if the output was created with a 0 exit code from
++	# the compiler, it worked.
++	elif test -f libconftest.dylib && test 0 = "$_lt_result"; then
++	  lt_cv_apple_cc_single_mod=yes
++	else
++	  cat conftest.err >&5
++	fi
++	rm -rf libconftest.dylib*
++	rm -f conftest.*
++      fi
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5
++printf "%s\n" "$lt_cv_apple_cc_single_mod" >&6; }
++
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5
++printf %s "checking for -exported_symbols_list linker flag... " >&6; }
++if test ${lt_cv_ld_exported_symbols_list+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_ld_exported_symbols_list=no
++      save_LDFLAGS=$LDFLAGS
++      echo "_main" > conftest.sym
++      LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym"
++      cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++  lt_cv_ld_exported_symbols_list=yes
++else $as_nop
++  lt_cv_ld_exported_symbols_list=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++	LDFLAGS=$save_LDFLAGS
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5
++printf "%s\n" "$lt_cv_ld_exported_symbols_list" >&6; }
++
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5
++printf %s "checking for -force_load linker flag... " >&6; }
++if test ${lt_cv_ld_force_load+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_ld_force_load=no
++      cat > conftest.c << _LT_EOF
++int forced_loaded() { return 2;}
++_LT_EOF
++      echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5
++      $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5
++      echo "$AR $AR_FLAGS libconftest.a conftest.o" >&5
++      $AR $AR_FLAGS libconftest.a conftest.o 2>&5
++      echo "$RANLIB libconftest.a" >&5
++      $RANLIB libconftest.a 2>&5
++      cat > conftest.c << _LT_EOF
++int main() { return 0;}
++_LT_EOF
++      echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5
++      $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
++      _lt_result=$?
++      if test -s conftest.err && $GREP force_load conftest.err; then
++	cat conftest.err >&5
++      elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then
++	lt_cv_ld_force_load=yes
++      else
++	cat conftest.err >&5
++      fi
++        rm -f conftest.err libconftest.a conftest conftest.c
++        rm -rf conftest.dSYM
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5
++printf "%s\n" "$lt_cv_ld_force_load" >&6; }
++    case $host_os in
++    rhapsody* | darwin1.[012])
++      _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;;
++    darwin1.*)
++      _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
++    darwin*)
++      case $MACOSX_DEPLOYMENT_TARGET,$host in
++        10.[012],*|,*powerpc*-darwin[5-8]*)
++          _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
++        *)
++          _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;;
++      esac
++    ;;
++  esac
++    if test yes = "$lt_cv_apple_cc_single_mod"; then
++      _lt_dar_single_mod='$single_module'
++    fi
++    if test yes = "$lt_cv_ld_exported_symbols_list"; then
++      _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym'
++    else
++      _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib'
++    fi
++    if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then
++      _lt_dsymutil='~$DSYMUTIL $lib || :'
++    else
++      _lt_dsymutil=
++    fi
++    ;;
++  esac
++
++# func_munge_path_list VARIABLE PATH
++# -----------------------------------
++# VARIABLE is name of variable containing _space_ separated list of
++# directories to be munged by the contents of PATH, which is string
++# having a format:
++# "DIR[:DIR]:"
++#       string "DIR[ DIR]" will be prepended to VARIABLE
++# ":DIR[:DIR]"
++#       string "DIR[ DIR]" will be appended to VARIABLE
++# "DIRP[:DIRP]::[DIRA:]DIRA"
++#       string "DIRP[ DIRP]" will be prepended to VARIABLE and string
++#       "DIRA[ DIRA]" will be appended to VARIABLE
++# "DIR[:DIR]"
++#       VARIABLE will be replaced by "DIR[ DIR]"
++func_munge_path_list ()
++{
++    case x$2 in
++    x)
++        ;;
++    *:)
++        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\"
++        ;;
++    x:*)
++        eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\"
++        ;;
++    *::*)
++        eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\"
++        eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\"
++        ;;
++    *)
++        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\"
++        ;;
++    esac
++}
++
++ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default
++"
++if test "x$ac_cv_header_dlfcn_h" = xyes
++then :
++  printf "%s\n" "#define HAVE_DLFCN_H 1" >>confdefs.h
++
++fi
++
++
++
++
++func_stripname_cnf ()
++{
++  case $2 in
++  .*) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%\\\\$2\$%%"`;;
++  *)  func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%$2\$%%"`;;
++  esac
++} # func_stripname_cnf
++
++
++
++
++
++# Set options
++
++
++
++        enable_dlopen=no
++
++
++  enable_win32_dll=no
++
++
++            # Check whether --enable-shared was given.
++if test ${enable_shared+y}
++then :
++  enableval=$enable_shared; p=${PACKAGE-default}
++    case $enableval in
++    yes) enable_shared=yes ;;
++    no) enable_shared=no ;;
++    *)
++      enable_shared=no
++      # Look at the argument we got.  We use all the common list separators.
++      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
++      for pkg in $enableval; do
++	IFS=$lt_save_ifs
++	if test "X$pkg" = "X$p"; then
++	  enable_shared=yes
++	fi
++      done
++      IFS=$lt_save_ifs
++      ;;
++    esac
++else $as_nop
++  enable_shared=yes
++fi
++
++
++
++
++
++
++
++
++
++  # Check whether --enable-static was given.
++if test ${enable_static+y}
++then :
++  enableval=$enable_static; p=${PACKAGE-default}
++    case $enableval in
++    yes) enable_static=yes ;;
++    no) enable_static=no ;;
++    *)
++     enable_static=no
++      # Look at the argument we got.  We use all the common list separators.
++      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
++      for pkg in $enableval; do
++	IFS=$lt_save_ifs
++	if test "X$pkg" = "X$p"; then
++	  enable_static=yes
++	fi
++      done
++      IFS=$lt_save_ifs
++      ;;
++    esac
++else $as_nop
++  enable_static=yes
++fi
++
++
++
++
++
++
++
++
++
++
++# Check whether --with-pic was given.
++if test ${with_pic+y}
++then :
++  withval=$with_pic; lt_p=${PACKAGE-default}
++    case $withval in
++    yes|no) pic_mode=$withval ;;
++    *)
++      pic_mode=default
++      # Look at the argument we got.  We use all the common list separators.
++      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
++      for lt_pkg in $withval; do
++	IFS=$lt_save_ifs
++	if test "X$lt_pkg" = "X$lt_p"; then
++	  pic_mode=yes
++	fi
++      done
++      IFS=$lt_save_ifs
++      ;;
++    esac
++else $as_nop
++  pic_mode=default
++fi
++
++
++
++
++
++
++
++
++  # Check whether --enable-fast-install was given.
++if test ${enable_fast_install+y}
++then :
++  enableval=$enable_fast_install; p=${PACKAGE-default}
++    case $enableval in
++    yes) enable_fast_install=yes ;;
++    no) enable_fast_install=no ;;
++    *)
++      enable_fast_install=no
++      # Look at the argument we got.  We use all the common list separators.
++      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
++      for pkg in $enableval; do
++	IFS=$lt_save_ifs
++	if test "X$pkg" = "X$p"; then
++	  enable_fast_install=yes
++	fi
++      done
++      IFS=$lt_save_ifs
++      ;;
++    esac
++else $as_nop
++  enable_fast_install=yes
++fi
++
++
++
++
++
++
++
++
++  shared_archive_member_spec=
++case $host,$enable_shared in
++power*-*-aix[5-9]*,yes)
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5
++printf %s "checking which variant of shared library versioning to provide... " >&6; }
++
++# Check whether --with-aix-soname was given.
++if test ${with_aix_soname+y}
++then :
++  withval=$with_aix_soname; case $withval in
++    aix|svr4|both)
++      ;;
++    *)
++      as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5
++      ;;
++    esac
++    lt_cv_with_aix_soname=$with_aix_soname
++else $as_nop
++  if test ${lt_cv_with_aix_soname+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_with_aix_soname=aix
++fi
++
++    with_aix_soname=$lt_cv_with_aix_soname
++fi
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5
++printf "%s\n" "$with_aix_soname" >&6; }
++  if test aix != "$with_aix_soname"; then
++    # For the AIX way of multilib, we name the shared archive member
++    # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o',
++    # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File.
++    # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag,
++    # the AIX toolchain works better with OBJECT_MODE set (default 32).
++    if test 64 = "${OBJECT_MODE-32}"; then
++      shared_archive_member_spec=shr_64
++    else
++      shared_archive_member_spec=shr
++    fi
++  fi
++  ;;
++*)
++  with_aix_soname=aix
++  ;;
++esac
++
++
++
++
++
++
++
++
++
++
++# This can be used to rebuild libtool when needed
++LIBTOOL_DEPS=$ltmain
++
++# Always use our own libtool.
++LIBTOOL='$(SHELL) $(top_builddir)/libtool'
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++test -z "$LN_S" && LN_S="ln -s"
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++if test -n "${ZSH_VERSION+set}"; then
++   setopt NO_GLOB_SUBST
++fi
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5
++printf %s "checking for objdir... " >&6; }
++if test ${lt_cv_objdir+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  rm -f .libs 2>/dev/null
++mkdir .libs 2>/dev/null
++if test -d .libs; then
++  lt_cv_objdir=.libs
++else
++  # MS-DOS does not allow filenames that begin with a dot.
++  lt_cv_objdir=_libs
++fi
++rmdir .libs 2>/dev/null
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5
++printf "%s\n" "$lt_cv_objdir" >&6; }
++objdir=$lt_cv_objdir
++
++
++
++
++
++printf "%s\n" "#define LT_OBJDIR \"$lt_cv_objdir/\"" >>confdefs.h
++
++
++
++
++case $host_os in
++aix3*)
++  # AIX sometimes has problems with the GCC collect2 program.  For some
++  # reason, if we set the COLLECT_NAMES environment variable, the problems
++  # vanish in a puff of smoke.
++  if test set != "${COLLECT_NAMES+set}"; then
++    COLLECT_NAMES=
++    export COLLECT_NAMES
++  fi
++  ;;
++esac
++
++# Global variables:
++ofile=libtool
++can_build_shared=yes
++
++# All known linkers require a '.a' archive for static linking (except MSVC and
++# ICC, which need '.lib').
++libext=a
++
++with_gnu_ld=$lt_cv_prog_gnu_ld
++
++old_CC=$CC
++old_CFLAGS=$CFLAGS
++
++# Set sane defaults for various variables
++test -z "$CC" && CC=cc
++test -z "$LTCC" && LTCC=$CC
++test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
++test -z "$LD" && LD=ld
++test -z "$ac_objext" && ac_objext=o
++
++func_cc_basename $compiler
++cc_basename=$func_cc_basename_result
++
++
++# Only perform the check for file, if the check method requires it
++test -z "$MAGIC_CMD" && MAGIC_CMD=file
++case $deplibs_check_method in
++file_magic*)
++  if test "$file_magic_cmd" = '$MAGIC_CMD'; then
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5
++printf %s "checking for ${ac_tool_prefix}file... " >&6; }
++if test ${lt_cv_path_MAGIC_CMD+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  case $MAGIC_CMD in
++[\\/*] |  ?:[\\/]*)
++  lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path.
++  ;;
++*)
++  lt_save_MAGIC_CMD=$MAGIC_CMD
++  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++  ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
++  for ac_dir in $ac_dummy; do
++    IFS=$lt_save_ifs
++    test -z "$ac_dir" && ac_dir=.
++    if test -f "$ac_dir/${ac_tool_prefix}file"; then
++      lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file"
++      if test -n "$file_magic_test_file"; then
++	case $deplibs_check_method in
++	"file_magic "*)
++	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
++	  MAGIC_CMD=$lt_cv_path_MAGIC_CMD
++	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
++	    $EGREP "$file_magic_regex" > /dev/null; then
++	    :
++	  else
++	    cat <<_LT_EOF 1>&2
++
++*** Warning: the command libtool uses to detect shared libraries,
++*** $file_magic_cmd, produces output that libtool cannot recognize.
++*** The result is that libtool may fail to recognize shared libraries
++*** as such.  This will affect the creation of libtool libraries that
++*** depend on shared libraries, but programs linked with such libtool
++*** libraries will work regardless of this problem.  Nevertheless, you
++*** may want to report the problem to your system manager and/or to
++*** bug-libtool@gnu.org
++
++_LT_EOF
++	  fi ;;
++	esac
++      fi
++      break
++    fi
++  done
++  IFS=$lt_save_ifs
++  MAGIC_CMD=$lt_save_MAGIC_CMD
++  ;;
++esac
++fi
++
++MAGIC_CMD=$lt_cv_path_MAGIC_CMD
++if test -n "$MAGIC_CMD"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
++printf "%s\n" "$MAGIC_CMD" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++
++
++
++if test -z "$lt_cv_path_MAGIC_CMD"; then
++  if test -n "$ac_tool_prefix"; then
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for file" >&5
++printf %s "checking for file... " >&6; }
++if test ${lt_cv_path_MAGIC_CMD+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  case $MAGIC_CMD in
++[\\/*] |  ?:[\\/]*)
++  lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path.
++  ;;
++*)
++  lt_save_MAGIC_CMD=$MAGIC_CMD
++  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++  ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
++  for ac_dir in $ac_dummy; do
++    IFS=$lt_save_ifs
++    test -z "$ac_dir" && ac_dir=.
++    if test -f "$ac_dir/file"; then
++      lt_cv_path_MAGIC_CMD=$ac_dir/"file"
++      if test -n "$file_magic_test_file"; then
++	case $deplibs_check_method in
++	"file_magic "*)
++	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
++	  MAGIC_CMD=$lt_cv_path_MAGIC_CMD
++	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
++	    $EGREP "$file_magic_regex" > /dev/null; then
++	    :
++	  else
++	    cat <<_LT_EOF 1>&2
++
++*** Warning: the command libtool uses to detect shared libraries,
++*** $file_magic_cmd, produces output that libtool cannot recognize.
++*** The result is that libtool may fail to recognize shared libraries
++*** as such.  This will affect the creation of libtool libraries that
++*** depend on shared libraries, but programs linked with such libtool
++*** libraries will work regardless of this problem.  Nevertheless, you
++*** may want to report the problem to your system manager and/or to
++*** bug-libtool@gnu.org
++
++_LT_EOF
++	  fi ;;
++	esac
++      fi
++      break
++    fi
++  done
++  IFS=$lt_save_ifs
++  MAGIC_CMD=$lt_save_MAGIC_CMD
++  ;;
++esac
++fi
++
++MAGIC_CMD=$lt_cv_path_MAGIC_CMD
++if test -n "$MAGIC_CMD"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
++printf "%s\n" "$MAGIC_CMD" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++
++
++  else
++    MAGIC_CMD=:
++  fi
++fi
++
++  fi
++  ;;
++esac
++
++# Use C for the default configuration in the libtool script
++
++lt_save_CC=$CC
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++
++# Source file extension for C test sources.
++ac_ext=c
++
++# Object file extension for compiled C test sources.
++objext=o
++objext=$objext
++
++# Code to be used in simple compile tests
++lt_simple_compile_test_code="int some_variable = 0;"
++
++# Code to be used in simple link tests
++lt_simple_link_test_code='int main(){return(0);}'
++
++
++
++
++
++
++
++# If no C compiler was specified, use CC.
++LTCC=${LTCC-"$CC"}
++
++# If no C compiler flags were specified, use CFLAGS.
++LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
++
++# Allow CC to be a program name with arguments.
++compiler=$CC
++
++# Save the default compiler, since it gets overwritten when the other
++# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP.
++compiler_DEFAULT=$CC
++
++# save warnings/boilerplate of simple test code
++ac_outfile=conftest.$ac_objext
++echo "$lt_simple_compile_test_code" >conftest.$ac_ext
++eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
++_lt_compiler_boilerplate=`cat conftest.err`
++$RM conftest*
++
++ac_outfile=conftest.$ac_objext
++echo "$lt_simple_link_test_code" >conftest.$ac_ext
++eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
++_lt_linker_boilerplate=`cat conftest.err`
++$RM -r conftest*
++
++
++if test -n "$compiler"; then
++
++lt_prog_compiler_no_builtin_flag=
++
++if test yes = "$GCC"; then
++  case $cc_basename in
++  nvcc*)
++    lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;;
++  *)
++    lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;;
++  esac
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5
++printf %s "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; }
++if test ${lt_cv_prog_compiler_rtti_exceptions+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler_rtti_exceptions=no
++   ac_outfile=conftest.$ac_objext
++   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++   lt_compiler_flag="-fno-rtti -fno-exceptions"  ## exclude from sc_useless_quotes_in_assignment
++   # Insert the option either (1) after the last *FLAGS variable, or
++   # (2) before a word containing "conftest.", or (3) at the end.
++   # Note that $ac_compile itself does not contain backslashes and begins
++   # with a dollar sign (not a hyphen), so the echo should work correctly.
++   # The option is referenced via a variable to avoid confusing sed.
++   lt_compile=`echo "$ac_compile" | $SED \
++   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
++   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
++   -e 's:$: $lt_compiler_flag:'`
++   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
++   (eval "$lt_compile" 2>conftest.err)
++   ac_status=$?
++   cat conftest.err >&5
++   echo "$as_me:$LINENO: \$? = $ac_status" >&5
++   if (exit $ac_status) && test -s "$ac_outfile"; then
++     # The compiler can only warn and ignore the option if not recognized
++     # So say no if there are warnings other than the usual output.
++     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
++     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
++     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
++       lt_cv_prog_compiler_rtti_exceptions=yes
++     fi
++   fi
++   $RM conftest*
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5
++printf "%s\n" "$lt_cv_prog_compiler_rtti_exceptions" >&6; }
++
++if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then
++    lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions"
++else
++    :
++fi
++
++fi
++
++
++
++
++
++
++  lt_prog_compiler_wl=
++lt_prog_compiler_pic=
++lt_prog_compiler_static=
++
++
++  if test yes = "$GCC"; then
++    lt_prog_compiler_wl='-Wl,'
++    lt_prog_compiler_static='-static'
++
++    case $host_os in
++      aix*)
++      # All AIX code is PIC.
++      if test ia64 = "$host_cpu"; then
++	# AIX 5 now supports IA64 processor
++	lt_prog_compiler_static='-Bstatic'
++      fi
++      lt_prog_compiler_pic='-fPIC'
++      ;;
++
++    amigaos*)
++      case $host_cpu in
++      powerpc)
++            # see comment about AmigaOS4 .so support
++            lt_prog_compiler_pic='-fPIC'
++        ;;
++      m68k)
++            # FIXME: we need at least 68020 code to build shared libraries, but
++            # adding the '-m68020' flag to GCC prevents building anything better,
++            # like '-m68040'.
++            lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4'
++        ;;
++      esac
++      ;;
++
++    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
++      # PIC is the default for these OSes.
++      ;;
++
++    mingw* | cygwin* | pw32* | os2* | cegcc*)
++      # This hack is so that the source file can tell whether it is being
++      # built for inclusion in a dll (and should export symbols for example).
++      # Although the cygwin gcc ignores -fPIC, still need this for old-style
++      # (--disable-auto-import) libraries
++      lt_prog_compiler_pic='-DDLL_EXPORT'
++      case $host_os in
++      os2*)
++	lt_prog_compiler_static='$wl-static'
++	;;
++      esac
++      ;;
++
++    darwin* | rhapsody*)
++      # PIC is the default on this platform
++      # Common symbols not allowed in MH_DYLIB files
++      lt_prog_compiler_pic='-fno-common'
++      ;;
++
++    haiku*)
++      # PIC is the default for Haiku.
++      # The "-static" flag exists, but is broken.
++      lt_prog_compiler_static=
++      ;;
++
++    hpux*)
++      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
++      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
++      # sets the default TLS model and affects inlining.
++      case $host_cpu in
++      hppa*64*)
++	# +Z the default
++	;;
++      *)
++	lt_prog_compiler_pic='-fPIC'
++	;;
++      esac
++      ;;
++
++    interix[3-9]*)
++      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
++      # Instead, we relocate shared libraries at runtime.
++      ;;
++
++    msdosdjgpp*)
++      # Just because we use GCC doesn't mean we suddenly get shared libraries
++      # on systems that don't support them.
++      lt_prog_compiler_can_build_shared=no
++      enable_shared=no
++      ;;
++
++    *nto* | *qnx*)
++      # QNX uses GNU C++, but need to define -shared option too, otherwise
++      # it will coredump.
++      lt_prog_compiler_pic='-fPIC -shared'
++      ;;
++
++    sysv4*MP*)
++      if test -d /usr/nec; then
++	lt_prog_compiler_pic=-Kconform_pic
++      fi
++      ;;
++
++    *)
++      lt_prog_compiler_pic='-fPIC'
++      ;;
++    esac
++
++    case $cc_basename in
++    nvcc*) # Cuda Compiler Driver 2.2
++      lt_prog_compiler_wl='-Xlinker '
++      if test -n "$lt_prog_compiler_pic"; then
++        lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic"
++      fi
++      ;;
++    esac
++  else
++    # PORTME Check for flag to pass linker flags through the system compiler.
++    case $host_os in
++    aix*)
++      lt_prog_compiler_wl='-Wl,'
++      if test ia64 = "$host_cpu"; then
++	# AIX 5 now supports IA64 processor
++	lt_prog_compiler_static='-Bstatic'
++      else
++	lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp'
++      fi
++      ;;
++
++    darwin* | rhapsody*)
++      # PIC is the default on this platform
++      # Common symbols not allowed in MH_DYLIB files
++      lt_prog_compiler_pic='-fno-common'
++      case $cc_basename in
++      nagfor*)
++        # NAG Fortran compiler
++        lt_prog_compiler_wl='-Wl,-Wl,,'
++        lt_prog_compiler_pic='-PIC'
++        lt_prog_compiler_static='-Bstatic'
++        ;;
++      esac
++      ;;
++
++    mingw* | cygwin* | pw32* | os2* | cegcc*)
++      # This hack is so that the source file can tell whether it is being
++      # built for inclusion in a dll (and should export symbols for example).
++      lt_prog_compiler_pic='-DDLL_EXPORT'
++      case $host_os in
++      os2*)
++	lt_prog_compiler_static='$wl-static'
++	;;
++      esac
++      ;;
++
++    hpux9* | hpux10* | hpux11*)
++      lt_prog_compiler_wl='-Wl,'
++      # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
++      # not for PA HP-UX.
++      case $host_cpu in
++      hppa*64*|ia64*)
++	# +Z the default
++	;;
++      *)
++	lt_prog_compiler_pic='+Z'
++	;;
++      esac
++      # Is there a better lt_prog_compiler_static that works with the bundled CC?
++      lt_prog_compiler_static='$wl-a ${wl}archive'
++      ;;
++
++    irix5* | irix6* | nonstopux*)
++      lt_prog_compiler_wl='-Wl,'
++      # PIC (with -KPIC) is the default.
++      lt_prog_compiler_static='-non_shared'
++      ;;
++
++    linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++      case $cc_basename in
++      # old Intel for x86_64, which still supported -KPIC.
++      ecc*)
++	lt_prog_compiler_wl='-Wl,'
++	lt_prog_compiler_pic='-KPIC'
++	lt_prog_compiler_static='-static'
++        ;;
++      # icc used to be incompatible with GCC.
++      # ICC 10 doesn't accept -KPIC any more.
++      icc* | ifort*)
++	lt_prog_compiler_wl='-Wl,'
++	lt_prog_compiler_pic='-fPIC'
++	lt_prog_compiler_static='-static'
++        ;;
++      # Lahey Fortran 8.1.
++      lf95*)
++	lt_prog_compiler_wl='-Wl,'
++	lt_prog_compiler_pic='--shared'
++	lt_prog_compiler_static='--static'
++	;;
++      nagfor*)
++	# NAG Fortran compiler
++	lt_prog_compiler_wl='-Wl,-Wl,,'
++	lt_prog_compiler_pic='-PIC'
++	lt_prog_compiler_static='-Bstatic'
++	;;
++      tcc*)
++	# Fabrice Bellard et al's Tiny C Compiler
++	lt_prog_compiler_wl='-Wl,'
++	lt_prog_compiler_pic='-fPIC'
++	lt_prog_compiler_static='-static'
++	;;
++      pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
++        # Portland Group compilers (*not* the Pentium gcc compiler,
++	# which looks to be a dead project)
++	lt_prog_compiler_wl='-Wl,'
++	lt_prog_compiler_pic='-fpic'
++	lt_prog_compiler_static='-Bstatic'
++        ;;
++      ccc*)
++        lt_prog_compiler_wl='-Wl,'
++        # All Alpha code is PIC.
++        lt_prog_compiler_static='-non_shared'
++        ;;
++      xl* | bgxl* | bgf* | mpixl*)
++	# IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
++	lt_prog_compiler_wl='-Wl,'
++	lt_prog_compiler_pic='-qpic'
++	lt_prog_compiler_static='-qstaticlink'
++	;;
++      *)
++	case `$CC -V 2>&1 | $SED 5q` in
++	*Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*)
++	  # Sun Fortran 8.3 passes all unrecognized flags to the linker
++	  lt_prog_compiler_pic='-KPIC'
++	  lt_prog_compiler_static='-Bstatic'
++	  lt_prog_compiler_wl=''
++	  ;;
++	*Sun\ F* | *Sun*Fortran*)
++	  lt_prog_compiler_pic='-KPIC'
++	  lt_prog_compiler_static='-Bstatic'
++	  lt_prog_compiler_wl='-Qoption ld '
++	  ;;
++	*Sun\ C*)
++	  # Sun C 5.9
++	  lt_prog_compiler_pic='-KPIC'
++	  lt_prog_compiler_static='-Bstatic'
++	  lt_prog_compiler_wl='-Wl,'
++	  ;;
++        *Intel*\ [CF]*Compiler*)
++	  lt_prog_compiler_wl='-Wl,'
++	  lt_prog_compiler_pic='-fPIC'
++	  lt_prog_compiler_static='-static'
++	  ;;
++	*Portland\ Group*)
++	  lt_prog_compiler_wl='-Wl,'
++	  lt_prog_compiler_pic='-fpic'
++	  lt_prog_compiler_static='-Bstatic'
++	  ;;
++	esac
++	;;
++      esac
++      ;;
++
++    newsos6)
++      lt_prog_compiler_pic='-KPIC'
++      lt_prog_compiler_static='-Bstatic'
++      ;;
++
++    *nto* | *qnx*)
++      # QNX uses GNU C++, but need to define -shared option too, otherwise
++      # it will coredump.
++      lt_prog_compiler_pic='-fPIC -shared'
++      ;;
++
++    osf3* | osf4* | osf5*)
++      lt_prog_compiler_wl='-Wl,'
++      # All OSF/1 code is PIC.
++      lt_prog_compiler_static='-non_shared'
++      ;;
++
++    rdos*)
++      lt_prog_compiler_static='-non_shared'
++      ;;
++
++    solaris*)
++      lt_prog_compiler_pic='-KPIC'
++      lt_prog_compiler_static='-Bstatic'
++      case $cc_basename in
++      f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
++	lt_prog_compiler_wl='-Qoption ld ';;
++      *)
++	lt_prog_compiler_wl='-Wl,';;
++      esac
++      ;;
++
++    sunos4*)
++      lt_prog_compiler_wl='-Qoption ld '
++      lt_prog_compiler_pic='-PIC'
++      lt_prog_compiler_static='-Bstatic'
++      ;;
++
++    sysv4 | sysv4.2uw2* | sysv4.3*)
++      lt_prog_compiler_wl='-Wl,'
++      lt_prog_compiler_pic='-KPIC'
++      lt_prog_compiler_static='-Bstatic'
++      ;;
++
++    sysv4*MP*)
++      if test -d /usr/nec; then
++	lt_prog_compiler_pic='-Kconform_pic'
++	lt_prog_compiler_static='-Bstatic'
++      fi
++      ;;
++
++    sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
++      lt_prog_compiler_wl='-Wl,'
++      lt_prog_compiler_pic='-KPIC'
++      lt_prog_compiler_static='-Bstatic'
++      ;;
++
++    unicos*)
++      lt_prog_compiler_wl='-Wl,'
++      lt_prog_compiler_can_build_shared=no
++      ;;
++
++    uts4*)
++      lt_prog_compiler_pic='-pic'
++      lt_prog_compiler_static='-Bstatic'
++      ;;
++
++    *)
++      lt_prog_compiler_can_build_shared=no
++      ;;
++    esac
++  fi
++
++case $host_os in
++  # For platforms that do not support PIC, -DPIC is meaningless:
++  *djgpp*)
++    lt_prog_compiler_pic=
++    ;;
++  *)
++    lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC"
++    ;;
++esac
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
++printf %s "checking for $compiler option to produce PIC... " >&6; }
++if test ${lt_cv_prog_compiler_pic+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler_pic=$lt_prog_compiler_pic
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5
++printf "%s\n" "$lt_cv_prog_compiler_pic" >&6; }
++lt_prog_compiler_pic=$lt_cv_prog_compiler_pic
++
++#
++# Check to make sure the PIC flag actually works.
++#
++if test -n "$lt_prog_compiler_pic"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5
++printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; }
++if test ${lt_cv_prog_compiler_pic_works+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler_pic_works=no
++   ac_outfile=conftest.$ac_objext
++   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++   lt_compiler_flag="$lt_prog_compiler_pic -DPIC"  ## exclude from sc_useless_quotes_in_assignment
++   # Insert the option either (1) after the last *FLAGS variable, or
++   # (2) before a word containing "conftest.", or (3) at the end.
++   # Note that $ac_compile itself does not contain backslashes and begins
++   # with a dollar sign (not a hyphen), so the echo should work correctly.
++   # The option is referenced via a variable to avoid confusing sed.
++   lt_compile=`echo "$ac_compile" | $SED \
++   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
++   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
++   -e 's:$: $lt_compiler_flag:'`
++   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
++   (eval "$lt_compile" 2>conftest.err)
++   ac_status=$?
++   cat conftest.err >&5
++   echo "$as_me:$LINENO: \$? = $ac_status" >&5
++   if (exit $ac_status) && test -s "$ac_outfile"; then
++     # The compiler can only warn and ignore the option if not recognized
++     # So say no if there are warnings other than the usual output.
++     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
++     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
++     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
++       lt_cv_prog_compiler_pic_works=yes
++     fi
++   fi
++   $RM conftest*
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5
++printf "%s\n" "$lt_cv_prog_compiler_pic_works" >&6; }
++
++if test yes = "$lt_cv_prog_compiler_pic_works"; then
++    case $lt_prog_compiler_pic in
++     "" | " "*) ;;
++     *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;;
++     esac
++else
++    lt_prog_compiler_pic=
++     lt_prog_compiler_can_build_shared=no
++fi
++
++fi
++
++
++
++
++
++
++
++
++
++
++
++#
++# Check to make sure the static flag actually works.
++#
++wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\"
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
++printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
++if test ${lt_cv_prog_compiler_static_works+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler_static_works=no
++   save_LDFLAGS=$LDFLAGS
++   LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
++   echo "$lt_simple_link_test_code" > conftest.$ac_ext
++   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
++     # The linker can only warn and ignore the option if not recognized
++     # So say no if there are warnings
++     if test -s conftest.err; then
++       # Append any errors to the config.log.
++       cat conftest.err 1>&5
++       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
++       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
++       if diff conftest.exp conftest.er2 >/dev/null; then
++         lt_cv_prog_compiler_static_works=yes
++       fi
++     else
++       lt_cv_prog_compiler_static_works=yes
++     fi
++   fi
++   $RM -r conftest*
++   LDFLAGS=$save_LDFLAGS
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5
++printf "%s\n" "$lt_cv_prog_compiler_static_works" >&6; }
++
++if test yes = "$lt_cv_prog_compiler_static_works"; then
++    :
++else
++    lt_prog_compiler_static=
++fi
++
++
++
++
++
++
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
++printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
++if test ${lt_cv_prog_compiler_c_o+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler_c_o=no
++   $RM -r conftest 2>/dev/null
++   mkdir conftest
++   cd conftest
++   mkdir out
++   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++
++   lt_compiler_flag="-o out/conftest2.$ac_objext"
++   # Insert the option either (1) after the last *FLAGS variable, or
++   # (2) before a word containing "conftest.", or (3) at the end.
++   # Note that $ac_compile itself does not contain backslashes and begins
++   # with a dollar sign (not a hyphen), so the echo should work correctly.
++   lt_compile=`echo "$ac_compile" | $SED \
++   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
++   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
++   -e 's:$: $lt_compiler_flag:'`
++   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
++   (eval "$lt_compile" 2>out/conftest.err)
++   ac_status=$?
++   cat out/conftest.err >&5
++   echo "$as_me:$LINENO: \$? = $ac_status" >&5
++   if (exit $ac_status) && test -s out/conftest2.$ac_objext
++   then
++     # The compiler can only warn and ignore the option if not recognized
++     # So say no if there are warnings
++     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
++     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
++     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
++       lt_cv_prog_compiler_c_o=yes
++     fi
++   fi
++   chmod u+w . 2>&5
++   $RM conftest*
++   # SGI C++ compiler will create directory out/ii_files/ for
++   # template instantiation
++   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
++   $RM out/* && rmdir out
++   cd ..
++   $RM -r conftest
++   $RM conftest*
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
++printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; }
++
++
++
++
++
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
++printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
++if test ${lt_cv_prog_compiler_c_o+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler_c_o=no
++   $RM -r conftest 2>/dev/null
++   mkdir conftest
++   cd conftest
++   mkdir out
++   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++
++   lt_compiler_flag="-o out/conftest2.$ac_objext"
++   # Insert the option either (1) after the last *FLAGS variable, or
++   # (2) before a word containing "conftest.", or (3) at the end.
++   # Note that $ac_compile itself does not contain backslashes and begins
++   # with a dollar sign (not a hyphen), so the echo should work correctly.
++   lt_compile=`echo "$ac_compile" | $SED \
++   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
++   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
++   -e 's:$: $lt_compiler_flag:'`
++   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
++   (eval "$lt_compile" 2>out/conftest.err)
++   ac_status=$?
++   cat out/conftest.err >&5
++   echo "$as_me:$LINENO: \$? = $ac_status" >&5
++   if (exit $ac_status) && test -s out/conftest2.$ac_objext
++   then
++     # The compiler can only warn and ignore the option if not recognized
++     # So say no if there are warnings
++     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
++     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
++     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
++       lt_cv_prog_compiler_c_o=yes
++     fi
++   fi
++   chmod u+w . 2>&5
++   $RM conftest*
++   # SGI C++ compiler will create directory out/ii_files/ for
++   # template instantiation
++   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
++   $RM out/* && rmdir out
++   cd ..
++   $RM -r conftest
++   $RM conftest*
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
++printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; }
++
++
++
++
++hard_links=nottested
++if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then
++  # do not overwrite the value of need_locks provided by the user
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
++printf %s "checking if we can lock with hard links... " >&6; }
++  hard_links=yes
++  $RM conftest*
++  ln conftest.a conftest.b 2>/dev/null && hard_links=no
++  touch conftest.a
++  ln conftest.a conftest.b 2>&5 || hard_links=no
++  ln conftest.a conftest.b 2>/dev/null && hard_links=no
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
++printf "%s\n" "$hard_links" >&6; }
++  if test no = "$hard_links"; then
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5
++printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;}
++    need_locks=warn
++  fi
++else
++  need_locks=no
++fi
++
++
++
++
++
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
++printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
++
++  runpath_var=
++  allow_undefined_flag=
++  always_export_symbols=no
++  archive_cmds=
++  archive_expsym_cmds=
++  compiler_needs_object=no
++  enable_shared_with_static_runtimes=no
++  export_dynamic_flag_spec=
++  export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
++  hardcode_automatic=no
++  hardcode_direct=no
++  hardcode_direct_absolute=no
++  hardcode_libdir_flag_spec=
++  hardcode_libdir_separator=
++  hardcode_minus_L=no
++  hardcode_shlibpath_var=unsupported
++  inherit_rpath=no
++  link_all_deplibs=unknown
++  module_cmds=
++  module_expsym_cmds=
++  old_archive_from_new_cmds=
++  old_archive_from_expsyms_cmds=
++  thread_safe_flag_spec=
++  whole_archive_flag_spec=
++  # include_expsyms should be a list of space-separated symbols to be *always*
++  # included in the symbol list
++  include_expsyms=
++  # exclude_expsyms can be an extended regexp of symbols to exclude
++  # it will be wrapped by ' (' and ')$', so one must not match beginning or
++  # end of line.  Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc',
++  # as well as any symbol that contains 'd'.
++  exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
++  # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
++  # platforms (ab)use it in PIC code, but their linkers get confused if
++  # the symbol is explicitly referenced.  Since portable code cannot
++  # rely on this symbol name, it's probably fine to never include it in
++  # preloaded symbol tables.
++  # Exclude shared library initialization/finalization symbols.
++  extract_expsyms_cmds=
++
++  case $host_os in
++  cygwin* | mingw* | pw32* | cegcc*)
++    # FIXME: the MSVC++ and ICC port hasn't been tested in a loooong time
++    # When not using gcc, we currently assume that we are using
++    # Microsoft Visual C++ or Intel C++ Compiler.
++    if test yes != "$GCC"; then
++      with_gnu_ld=no
++    fi
++    ;;
++  interix*)
++    # we just hope/assume this is gcc and not c89 (= MSVC++ or ICC)
++    with_gnu_ld=yes
++    ;;
++  openbsd* | bitrig*)
++    with_gnu_ld=no
++    ;;
++  esac
++
++  ld_shlibs=yes
++
++  # On some targets, GNU ld is compatible enough with the native linker
++  # that we're better off using the native interface for both.
++  lt_use_gnu_ld_interface=no
++  if test yes = "$with_gnu_ld"; then
++    case $host_os in
++      aix*)
++	# The AIX port of GNU ld has always aspired to compatibility
++	# with the native linker.  However, as the warning in the GNU ld
++	# block says, versions before 2.19.5* couldn't really create working
++	# shared libraries, regardless of the interface used.
++	case `$LD -v 2>&1` in
++	  *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
++	  *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;;
++	  *\ \(GNU\ Binutils\)\ [3-9]*) ;;
++	  *)
++	    lt_use_gnu_ld_interface=yes
++	    ;;
++	esac
++	;;
++      *)
++	lt_use_gnu_ld_interface=yes
++	;;
++    esac
++  fi
++
++  if test yes = "$lt_use_gnu_ld_interface"; then
++    # If archive_cmds runs LD, not CC, wlarc should be empty
++    wlarc='$wl'
++
++    # Set some defaults for GNU ld with shared library support. These
++    # are reset later if shared libraries are not supported. Putting them
++    # here allows them to be overridden if necessary.
++    runpath_var=LD_RUN_PATH
++    hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
++    export_dynamic_flag_spec='$wl--export-dynamic'
++    # ancient GNU ld didn't support --whole-archive et. al.
++    if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
++      whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
++    else
++      whole_archive_flag_spec=
++    fi
++    supports_anon_versioning=no
++    case `$LD -v | $SED -e 's/([^)]\+)\s\+//' 2>&1` in
++      *GNU\ gold*) supports_anon_versioning=yes ;;
++      *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11
++      *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
++      *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
++      *\ 2.11.*) ;; # other 2.11 versions
++      *) supports_anon_versioning=yes ;;
++    esac
++
++    # See if GNU ld supports shared libraries.
++    case $host_os in
++    aix[3-9]*)
++      # On AIX/PPC, the GNU linker is very broken
++      if test ia64 != "$host_cpu"; then
++	ld_shlibs=no
++	cat <<_LT_EOF 1>&2
++
++*** Warning: the GNU linker, at least up to release 2.19, is reported
++*** to be unable to reliably create shared libraries on AIX.
++*** Therefore, libtool is disabling shared libraries support.  If you
++*** really care for shared libraries, you may want to install binutils
++*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
++*** You will then need to restart the configuration process.
++
++_LT_EOF
++      fi
++      ;;
++
++    amigaos*)
++      case $host_cpu in
++      powerpc)
++            # see comment about AmigaOS4 .so support
++            archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++            archive_expsym_cmds=''
++        ;;
++      m68k)
++            archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
++            hardcode_libdir_flag_spec='-L$libdir'
++            hardcode_minus_L=yes
++        ;;
++      esac
++      ;;
++
++    beos*)
++      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
++	allow_undefined_flag=unsupported
++	# Joseph Beckenbach  says some releases of gcc
++	# support --undefined.  This deserves some investigation.  FIXME
++	archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++      else
++	ld_shlibs=no
++      fi
++      ;;
++
++    cygwin* | mingw* | pw32* | cegcc*)
++      # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless,
++      # as there is no search path for DLLs.
++      hardcode_libdir_flag_spec='-L$libdir'
++      export_dynamic_flag_spec='$wl--export-all-symbols'
++      allow_undefined_flag=unsupported
++      always_export_symbols=no
++      enable_shared_with_static_runtimes=yes
++      export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
++      exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
++
++      if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
++        archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++	# If the export-symbols file already is a .def file, use it as
++	# is; otherwise, prepend EXPORTS...
++	archive_expsym_cmds='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
++          cp $export_symbols $output_objdir/$soname.def;
++        else
++          echo EXPORTS > $output_objdir/$soname.def;
++          cat $export_symbols >> $output_objdir/$soname.def;
++        fi~
++        $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++      else
++	ld_shlibs=no
++      fi
++      ;;
++
++    haiku*)
++      archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++      link_all_deplibs=yes
++      ;;
++
++    os2*)
++      hardcode_libdir_flag_spec='-L$libdir'
++      hardcode_minus_L=yes
++      allow_undefined_flag=unsupported
++      shrext_cmds=.dll
++      archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	$ECHO EXPORTS >> $output_objdir/$libname.def~
++	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
++	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	emximp -o $lib $output_objdir/$libname.def'
++      archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	$ECHO EXPORTS >> $output_objdir/$libname.def~
++	prefix_cmds="$SED"~
++	if test EXPORTS = "`$SED 1q $export_symbols`"; then
++	  prefix_cmds="$prefix_cmds -e 1d";
++	fi~
++	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
++	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
++	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	emximp -o $lib $output_objdir/$libname.def'
++      old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
++      enable_shared_with_static_runtimes=yes
++      file_list_spec='@'
++      ;;
++
++    interix[3-9]*)
++      hardcode_direct=no
++      hardcode_shlibpath_var=no
++      hardcode_libdir_flag_spec='$wl-rpath,$libdir'
++      export_dynamic_flag_spec='$wl-E'
++      # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
++      # Instead, shared libraries are loaded at an image base (0x10000000 by
++      # default) and relocated if they conflict, which is a slow very memory
++      # consuming and fragmenting process.  To avoid this, we pick a random,
++      # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
++      # time.  Moving up from 0x10000000 also allows more sbrk(2) space.
++      archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++      archive_expsym_cmds='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++      ;;
++
++    gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
++      tmp_diet=no
++      if test linux-dietlibc = "$host_os"; then
++	case $cc_basename in
++	  diet\ *) tmp_diet=yes;;	# linux-dietlibc with static linking (!diet-dyn)
++	esac
++      fi
++      if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
++	 && test no = "$tmp_diet"
++      then
++	tmp_addflag=' $pic_flag'
++	tmp_sharedflag='-shared'
++	case $cc_basename,$host_cpu in
++        pgcc*)				# Portland Group C compiler
++	  whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  tmp_addflag=' $pic_flag'
++	  ;;
++	pgf77* | pgf90* | pgf95* | pgfortran*)
++					# Portland Group f77 and f90 compilers
++	  whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  tmp_addflag=' $pic_flag -Mnomain' ;;
++	ecc*,ia64* | icc*,ia64*)	# Intel C compiler on ia64
++	  tmp_addflag=' -i_dynamic' ;;
++	efc*,ia64* | ifort*,ia64*)	# Intel Fortran compiler on ia64
++	  tmp_addflag=' -i_dynamic -nofor_main' ;;
++	ifc* | ifort*)			# Intel Fortran compiler
++	  tmp_addflag=' -nofor_main' ;;
++	lf95*)				# Lahey Fortran 8.1
++	  whole_archive_flag_spec=
++	  tmp_sharedflag='--shared' ;;
++        nagfor*)                        # NAGFOR 5.3
++          tmp_sharedflag='-Wl,-shared' ;;
++	xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below)
++	  tmp_sharedflag='-qmkshrobj'
++	  tmp_addflag= ;;
++	nvcc*)	# Cuda Compiler Driver 2.2
++	  whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  compiler_needs_object=yes
++	  ;;
++	esac
++	case `$CC -V 2>&1 | $SED 5q` in
++	*Sun\ C*)			# Sun C 5.9
++	  whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  compiler_needs_object=yes
++	  tmp_sharedflag='-G' ;;
++	*Sun\ F*)			# Sun Fortran 8.3
++	  tmp_sharedflag='-G' ;;
++	esac
++	archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++
++        if test yes = "$supports_anon_versioning"; then
++          archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
++            cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
++            echo "local: *; };" >> $output_objdir/$libname.ver~
++            $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
++        fi
++
++	case $cc_basename in
++	tcc*)
++	  export_dynamic_flag_spec='-rdynamic'
++	  ;;
++	xlf* | bgf* | bgxlf* | mpixlf*)
++	  # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
++	  whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive'
++	  hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
++	  archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
++	  if test yes = "$supports_anon_versioning"; then
++	    archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
++              cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
++              echo "local: *; };" >> $output_objdir/$libname.ver~
++              $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
++	  fi
++	  ;;
++	esac
++      else
++        ld_shlibs=no
++      fi
++      ;;
++
++    netbsd*)
++      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
++	archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
++	wlarc=
++      else
++	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++      fi
++      ;;
++
++    solaris*)
++      if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
++	ld_shlibs=no
++	cat <<_LT_EOF 1>&2
++
++*** Warning: The releases 2.8.* of the GNU linker cannot reliably
++*** create shared libraries on Solaris systems.  Therefore, libtool
++*** is disabling shared libraries support.  We urge you to upgrade GNU
++*** binutils to release 2.9.1 or newer.  Another option is to modify
++*** your PATH or compiler configuration so that the native linker is
++*** used, and then restart.
++
++_LT_EOF
++      elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
++	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++      else
++	ld_shlibs=no
++      fi
++      ;;
++
++    sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
++      case `$LD -v 2>&1` in
++        *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*)
++	ld_shlibs=no
++	cat <<_LT_EOF 1>&2
++
++*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot
++*** reliably create shared libraries on SCO systems.  Therefore, libtool
++*** is disabling shared libraries support.  We urge you to upgrade GNU
++*** binutils to release 2.16.91.0.3 or newer.  Another option is to modify
++*** your PATH or compiler configuration so that the native linker is
++*** used, and then restart.
++
++_LT_EOF
++	;;
++	*)
++	  # For security reasons, it is highly recommended that you always
++	  # use absolute paths for naming shared libraries, and exclude the
++	  # DT_RUNPATH tag from executables and libraries.  But doing so
++	  # requires that you compile everything twice, which is a pain.
++	  if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
++	    hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
++	    archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	    archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	  else
++	    ld_shlibs=no
++	  fi
++	;;
++      esac
++      ;;
++
++    sunos4*)
++      archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
++      wlarc=
++      hardcode_direct=yes
++      hardcode_shlibpath_var=no
++      ;;
++
++    *)
++      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
++	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++      else
++	ld_shlibs=no
++      fi
++      ;;
++    esac
++
++    if test no = "$ld_shlibs"; then
++      runpath_var=
++      hardcode_libdir_flag_spec=
++      export_dynamic_flag_spec=
++      whole_archive_flag_spec=
++    fi
++  else
++    # PORTME fill in a description of your system's linker (not GNU ld)
++    case $host_os in
++    aix3*)
++      allow_undefined_flag=unsupported
++      always_export_symbols=yes
++      archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
++      # Note: this linker hardcodes the directories in LIBPATH if there
++      # are no directories specified by -L.
++      hardcode_minus_L=yes
++      if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then
++	# Neither direct hardcoding nor static linking is supported with a
++	# broken collect2.
++	hardcode_direct=unsupported
++      fi
++      ;;
++
++    aix[4-9]*)
++      if test ia64 = "$host_cpu"; then
++	# On IA64, the linker does run time linking by default, so we don't
++	# have to do anything special.
++	aix_use_runtimelinking=no
++	exp_sym_flag='-Bexport'
++	no_entry_flag=
++      else
++	# If we're using GNU nm, then we don't want the "-C" option.
++	# -C means demangle to GNU nm, but means don't demangle to AIX nm.
++	# Without the "-l" option, or with the "-B" option, AIX nm treats
++	# weak defined symbols like other global defined symbols, whereas
++	# GNU nm marks them as "W".
++	# While the 'weak' keyword is ignored in the Export File, we need
++	# it in the Import File for the 'aix-soname' feature, so we have
++	# to replace the "-B" option with "-P" for AIX nm.
++	if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
++	  export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
++	else
++	  export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
++	fi
++	aix_use_runtimelinking=no
++
++	# Test if we are trying to use run time linking or normal
++	# AIX style linking. If -brtl is somewhere in LDFLAGS, we
++	# have runtime linking enabled, and use it for executables.
++	# For shared libraries, we enable/disable runtime linking
++	# depending on the kind of the shared library created -
++	# when "with_aix_soname,aix_use_runtimelinking" is:
++	# "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
++	# "aix,yes"  lib.so          shared, rtl:yes, for executables
++	#            lib.a           static archive
++	# "both,no"  lib.so.V(shr.o) shared, rtl:yes
++	#            lib.a(lib.so.V) shared, rtl:no,  for executables
++	# "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
++	#            lib.a(lib.so.V) shared, rtl:no
++	# "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
++	#            lib.a           static archive
++	case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
++	  for ld_flag in $LDFLAGS; do
++	  if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then
++	    aix_use_runtimelinking=yes
++	    break
++	  fi
++	  done
++	  if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
++	    # With aix-soname=svr4, we create the lib.so.V shared archives only,
++	    # so we don't have lib.a shared libs to link our executables.
++	    # We have to force runtime linking in this case.
++	    aix_use_runtimelinking=yes
++	    LDFLAGS="$LDFLAGS -Wl,-brtl"
++	  fi
++	  ;;
++	esac
++
++	exp_sym_flag='-bexport'
++	no_entry_flag='-bnoentry'
++      fi
++
++      # When large executables or shared objects are built, AIX ld can
++      # have problems creating the table of contents.  If linking a library
++      # or program results in "error TOC overflow" add -mminimal-toc to
++      # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
++      # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
++
++      archive_cmds=''
++      hardcode_direct=yes
++      hardcode_direct_absolute=yes
++      hardcode_libdir_separator=':'
++      link_all_deplibs=yes
++      file_list_spec='$wl-f,'
++      case $with_aix_soname,$aix_use_runtimelinking in
++      aix,*) ;; # traditional, no import file
++      svr4,* | *,yes) # use import file
++	# The Import File defines what to hardcode.
++	hardcode_direct=no
++	hardcode_direct_absolute=no
++	;;
++      esac
++
++      if test yes = "$GCC"; then
++	case $host_os in aix4.[012]|aix4.[012].*)
++	# We only want to do this on AIX 4.2 and lower, the check
++	# below for broken collect2 doesn't work under 4.3+
++	  collect2name=`$CC -print-prog-name=collect2`
++	  if test -f "$collect2name" &&
++	   strings "$collect2name" | $GREP resolve_lib_name >/dev/null
++	  then
++	  # We have reworked collect2
++	  :
++	  else
++	  # We have old collect2
++	  hardcode_direct=unsupported
++	  # It fails to find uninstalled libraries when the uninstalled
++	  # path is not listed in the libpath.  Setting hardcode_minus_L
++	  # to unsupported forces relinking
++	  hardcode_minus_L=yes
++	  hardcode_libdir_flag_spec='-L$libdir'
++	  hardcode_libdir_separator=
++	  fi
++	  ;;
++	esac
++	shared_flag='-shared'
++	if test yes = "$aix_use_runtimelinking"; then
++	  shared_flag="$shared_flag "'$wl-G'
++	fi
++	# Need to ensure runtime linking is disabled for the traditional
++	# shared library, or the linker may eventually find shared libraries
++	# /with/ Import File - we do not want to mix them.
++	shared_flag_aix='-shared'
++	shared_flag_svr4='-shared $wl-G'
++      else
++	# not using gcc
++	if test ia64 = "$host_cpu"; then
++	# VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
++	# chokes on -Wl,-G. The following line is correct:
++	  shared_flag='-G'
++	else
++	  if test yes = "$aix_use_runtimelinking"; then
++	    shared_flag='$wl-G'
++	  else
++	    shared_flag='$wl-bM:SRE'
++	  fi
++	  shared_flag_aix='$wl-bM:SRE'
++	  shared_flag_svr4='$wl-G'
++	fi
++      fi
++
++      export_dynamic_flag_spec='$wl-bexpall'
++      # It seems that -bexpall does not export symbols beginning with
++      # underscore (_), so it is better to generate a list of symbols to export.
++      always_export_symbols=yes
++      if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
++	# Warning - without using the other runtime loading flags (-brtl),
++	# -berok will link without error, but may produce a broken library.
++	allow_undefined_flag='-berok'
++        # Determine the default libpath from the value encoded in an
++        # empty executable.
++        if test set = "${lt_cv_aix_libpath+set}"; then
++  aix_libpath=$lt_cv_aix_libpath
++else
++  if test ${lt_cv_aix_libpath_+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++
++  lt_aix_libpath_sed='
++      /Import File Strings/,/^$/ {
++	  /^0/ {
++	      s/^0  *\([^ ]*\) *$/\1/
++	      p
++	  }
++      }'
++  lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++  # Check for a 64-bit object if we didn't find anything.
++  if test -z "$lt_cv_aix_libpath_"; then
++    lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++  fi
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++  if test -z "$lt_cv_aix_libpath_"; then
++    lt_cv_aix_libpath_=/usr/lib:/lib
++  fi
++
++fi
++
++  aix_libpath=$lt_cv_aix_libpath_
++fi
++
++        hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath"
++        archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
++      else
++	if test ia64 = "$host_cpu"; then
++	  hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib'
++	  allow_undefined_flag="-z nodefs"
++	  archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
++	else
++	 # Determine the default libpath from the value encoded in an
++	 # empty executable.
++	 if test set = "${lt_cv_aix_libpath+set}"; then
++  aix_libpath=$lt_cv_aix_libpath
++else
++  if test ${lt_cv_aix_libpath_+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++
++  lt_aix_libpath_sed='
++      /Import File Strings/,/^$/ {
++	  /^0/ {
++	      s/^0  *\([^ ]*\) *$/\1/
++	      p
++	  }
++      }'
++  lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++  # Check for a 64-bit object if we didn't find anything.
++  if test -z "$lt_cv_aix_libpath_"; then
++    lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++  fi
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++  if test -z "$lt_cv_aix_libpath_"; then
++    lt_cv_aix_libpath_=/usr/lib:/lib
++  fi
++
++fi
++
++  aix_libpath=$lt_cv_aix_libpath_
++fi
++
++	 hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath"
++	  # Warning - without using the other run time loading flags,
++	  # -berok will link without error, but may produce a broken library.
++	  no_undefined_flag=' $wl-bernotok'
++	  allow_undefined_flag=' $wl-berok'
++	  if test yes = "$with_gnu_ld"; then
++	    # We only use this code for GNU lds that support --whole-archive.
++	    whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive'
++	  else
++	    # Exported symbols can be pulled into shared objects from archives
++	    whole_archive_flag_spec='$convenience'
++	  fi
++	  archive_cmds_need_lc=yes
++	  archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
++	  # -brtl affects multiple linker settings, -berok does not and is overridden later
++	  compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`'
++	  if test svr4 != "$with_aix_soname"; then
++	    # This is similar to how AIX traditionally builds its shared libraries.
++	    archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
++	  fi
++	  if test aix != "$with_aix_soname"; then
++	    archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
++	  else
++	    # used by -dlpreopen to get the symbols
++	    archive_expsym_cmds="$archive_expsym_cmds"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
++	  fi
++	  archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d'
++	fi
++      fi
++      ;;
++
++    amigaos*)
++      case $host_cpu in
++      powerpc)
++            # see comment about AmigaOS4 .so support
++            archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++            archive_expsym_cmds=''
++        ;;
++      m68k)
++            archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
++            hardcode_libdir_flag_spec='-L$libdir'
++            hardcode_minus_L=yes
++        ;;
++      esac
++      ;;
++
++    bsdi[45]*)
++      export_dynamic_flag_spec=-rdynamic
++      ;;
++
++    cygwin* | mingw* | pw32* | cegcc*)
++      # When not using gcc, we currently assume that we are using
++      # Microsoft Visual C++ or Intel C++ Compiler.
++      # hardcode_libdir_flag_spec is actually meaningless, as there is
++      # no search path for DLLs.
++      case $cc_basename in
++      cl* | icl*)
++	# Native MSVC or ICC
++	hardcode_libdir_flag_spec=' '
++	allow_undefined_flag=unsupported
++	always_export_symbols=yes
++	file_list_spec='@'
++	# Tell ltmain to make .lib files, not .a files.
++	libext=lib
++	# Tell ltmain to make .dll files, not .so files.
++	shrext_cmds=.dll
++	# FIXME: Setting linknames here is a bad hack.
++	archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
++	archive_expsym_cmds='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
++            cp "$export_symbols" "$output_objdir/$soname.def";
++            echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
++          else
++            $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
++          fi~
++          $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
++          linknames='
++	# The linker will not automatically build a static lib if we build a DLL.
++	# _LT_TAGVAR(old_archive_from_new_cmds, )='true'
++	enable_shared_with_static_runtimes=yes
++	exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
++	export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols'
++	# Don't use ranlib
++	old_postinstall_cmds='chmod 644 $oldlib'
++	postlink_cmds='lt_outputfile="@OUTPUT@"~
++          lt_tool_outputfile="@TOOL_OUTPUT@"~
++          case $lt_outputfile in
++            *.exe|*.EXE) ;;
++            *)
++              lt_outputfile=$lt_outputfile.exe
++              lt_tool_outputfile=$lt_tool_outputfile.exe
++              ;;
++          esac~
++          if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
++            $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
++            $RM "$lt_outputfile.manifest";
++          fi'
++	;;
++      *)
++	# Assume MSVC and ICC wrapper
++	hardcode_libdir_flag_spec=' '
++	allow_undefined_flag=unsupported
++	# Tell ltmain to make .lib files, not .a files.
++	libext=lib
++	# Tell ltmain to make .dll files, not .so files.
++	shrext_cmds=.dll
++	# FIXME: Setting linknames here is a bad hack.
++	archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
++	# The linker will automatically build a .lib file if we build a DLL.
++	old_archive_from_new_cmds='true'
++	# FIXME: Should let the user specify the lib program.
++	old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs'
++	enable_shared_with_static_runtimes=yes
++	;;
++      esac
++      ;;
++
++    darwin* | rhapsody*)
++
++
++  archive_cmds_need_lc=no
++  hardcode_direct=no
++  hardcode_automatic=yes
++  hardcode_shlibpath_var=unsupported
++  if test yes = "$lt_cv_ld_force_load"; then
++    whole_archive_flag_spec='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
++
++  else
++    whole_archive_flag_spec=''
++  fi
++  link_all_deplibs=yes
++  allow_undefined_flag=$_lt_dar_allow_undefined
++  case $cc_basename in
++     ifort*|nagfor*) _lt_dar_can_shared=yes ;;
++     *) _lt_dar_can_shared=$GCC ;;
++  esac
++  if test yes = "$_lt_dar_can_shared"; then
++    output_verbose_link_cmd=func_echo_all
++    archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil"
++    module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil"
++    archive_expsym_cmds="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil"
++    module_expsym_cmds="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil"
++
++  else
++  ld_shlibs=no
++  fi
++
++      ;;
++
++    dgux*)
++      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++      hardcode_libdir_flag_spec='-L$libdir'
++      hardcode_shlibpath_var=no
++      ;;
++
++    # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
++    # support.  Future versions do this automatically, but an explicit c++rt0.o
++    # does not break anything, and helps significantly (at the cost of a little
++    # extra space).
++    freebsd2.2*)
++      archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
++      hardcode_libdir_flag_spec='-R$libdir'
++      hardcode_direct=yes
++      hardcode_shlibpath_var=no
++      ;;
++
++    # Unfortunately, older versions of FreeBSD 2 do not have this feature.
++    freebsd2.*)
++      archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
++      hardcode_direct=yes
++      hardcode_minus_L=yes
++      hardcode_shlibpath_var=no
++      ;;
++
++    # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
++    freebsd* | dragonfly* | midnightbsd*)
++      archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
++      hardcode_libdir_flag_spec='-R$libdir'
++      hardcode_direct=yes
++      hardcode_shlibpath_var=no
++      ;;
++
++    hpux9*)
++      if test yes = "$GCC"; then
++	archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++      else
++	archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++      fi
++      hardcode_libdir_flag_spec='$wl+b $wl$libdir'
++      hardcode_libdir_separator=:
++      hardcode_direct=yes
++
++      # hardcode_minus_L: Not really in the search PATH,
++      # but as the default location of the library.
++      hardcode_minus_L=yes
++      export_dynamic_flag_spec='$wl-E'
++      ;;
++
++    hpux10*)
++      if test yes,no = "$GCC,$with_gnu_ld"; then
++	archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
++      else
++	archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
++      fi
++      if test no = "$with_gnu_ld"; then
++	hardcode_libdir_flag_spec='$wl+b $wl$libdir'
++	hardcode_libdir_separator=:
++	hardcode_direct=yes
++	hardcode_direct_absolute=yes
++	export_dynamic_flag_spec='$wl-E'
++	# hardcode_minus_L: Not really in the search PATH,
++	# but as the default location of the library.
++	hardcode_minus_L=yes
++      fi
++      ;;
++
++    hpux11*)
++      if test yes,no = "$GCC,$with_gnu_ld"; then
++	case $host_cpu in
++	hppa*64*)
++	  archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	ia64*)
++	  archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	*)
++	  archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	esac
++      else
++	case $host_cpu in
++	hppa*64*)
++	  archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	ia64*)
++	  archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	*)
++
++	  # Older versions of the 11.00 compiler do not understand -b yet
++	  # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
++	  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5
++printf %s "checking if $CC understands -b... " >&6; }
++if test ${lt_cv_prog_compiler__b+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler__b=no
++   save_LDFLAGS=$LDFLAGS
++   LDFLAGS="$LDFLAGS -b"
++   echo "$lt_simple_link_test_code" > conftest.$ac_ext
++   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
++     # The linker can only warn and ignore the option if not recognized
++     # So say no if there are warnings
++     if test -s conftest.err; then
++       # Append any errors to the config.log.
++       cat conftest.err 1>&5
++       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
++       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
++       if diff conftest.exp conftest.er2 >/dev/null; then
++         lt_cv_prog_compiler__b=yes
++       fi
++     else
++       lt_cv_prog_compiler__b=yes
++     fi
++   fi
++   $RM -r conftest*
++   LDFLAGS=$save_LDFLAGS
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5
++printf "%s\n" "$lt_cv_prog_compiler__b" >&6; }
++
++if test yes = "$lt_cv_prog_compiler__b"; then
++    archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
++else
++    archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
++fi
++
++	  ;;
++	esac
++      fi
++      if test no = "$with_gnu_ld"; then
++	hardcode_libdir_flag_spec='$wl+b $wl$libdir'
++	hardcode_libdir_separator=:
++
++	case $host_cpu in
++	hppa*64*|ia64*)
++	  hardcode_direct=no
++	  hardcode_shlibpath_var=no
++	  ;;
++	*)
++	  hardcode_direct=yes
++	  hardcode_direct_absolute=yes
++	  export_dynamic_flag_spec='$wl-E'
++
++	  # hardcode_minus_L: Not really in the search PATH,
++	  # but as the default location of the library.
++	  hardcode_minus_L=yes
++	  ;;
++	esac
++      fi
++      ;;
++
++    irix5* | irix6* | nonstopux*)
++      if test yes = "$GCC"; then
++	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++	# Try to use the -exported_symbol ld option, if it does not
++	# work, assume that -exports_file does not work either and
++	# implicitly export all symbols.
++	# This should be the same for all languages, so no per-tag cache variable.
++	{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5
++printf %s "checking whether the $host_os linker accepts -exported_symbol... " >&6; }
++if test ${lt_cv_irix_exported_symbol+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  save_LDFLAGS=$LDFLAGS
++	   LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null"
++	   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++int foo (void) { return 0; }
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++  lt_cv_irix_exported_symbol=yes
++else $as_nop
++  lt_cv_irix_exported_symbol=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++           LDFLAGS=$save_LDFLAGS
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5
++printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; }
++	if test yes = "$lt_cv_irix_exported_symbol"; then
++          archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib'
++	fi
++      else
++	archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib'
++      fi
++      archive_cmds_need_lc='no'
++      hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
++      hardcode_libdir_separator=:
++      inherit_rpath=yes
++      link_all_deplibs=yes
++      ;;
++
++    linux*)
++      case $cc_basename in
++      tcc*)
++	# Fabrice Bellard et al's Tiny C Compiler
++	ld_shlibs=yes
++	archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
++	;;
++      esac
++      ;;
++
++    netbsd*)
++      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
++	archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
++      else
++	archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags'      # ELF
++      fi
++      hardcode_libdir_flag_spec='-R$libdir'
++      hardcode_direct=yes
++      hardcode_shlibpath_var=no
++      ;;
++
++    newsos6)
++      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++      hardcode_direct=yes
++      hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
++      hardcode_libdir_separator=:
++      hardcode_shlibpath_var=no
++      ;;
++
++    *nto* | *qnx*)
++      ;;
++
++    openbsd* | bitrig*)
++      if test -f /usr/libexec/ld.so; then
++	hardcode_direct=yes
++	hardcode_shlibpath_var=no
++	hardcode_direct_absolute=yes
++	if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
++	  archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols'
++	  hardcode_libdir_flag_spec='$wl-rpath,$libdir'
++	  export_dynamic_flag_spec='$wl-E'
++	else
++	  archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
++	  hardcode_libdir_flag_spec='$wl-rpath,$libdir'
++	fi
++      else
++	ld_shlibs=no
++      fi
++      ;;
++
++    os2*)
++      hardcode_libdir_flag_spec='-L$libdir'
++      hardcode_minus_L=yes
++      allow_undefined_flag=unsupported
++      shrext_cmds=.dll
++      archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	$ECHO EXPORTS >> $output_objdir/$libname.def~
++	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
++	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	emximp -o $lib $output_objdir/$libname.def'
++      archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	$ECHO EXPORTS >> $output_objdir/$libname.def~
++	prefix_cmds="$SED"~
++	if test EXPORTS = "`$SED 1q $export_symbols`"; then
++	  prefix_cmds="$prefix_cmds -e 1d";
++	fi~
++	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
++	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
++	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	emximp -o $lib $output_objdir/$libname.def'
++      old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
++      enable_shared_with_static_runtimes=yes
++      file_list_spec='@'
++      ;;
++
++    osf3*)
++      if test yes = "$GCC"; then
++	allow_undefined_flag=' $wl-expect_unresolved $wl\*'
++	archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++      else
++	allow_undefined_flag=' -expect_unresolved \*'
++	archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++      fi
++      archive_cmds_need_lc='no'
++      hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
++      hardcode_libdir_separator=:
++      ;;
++
++    osf4* | osf5*)	# as osf3* with the addition of -msym flag
++      if test yes = "$GCC"; then
++	allow_undefined_flag=' $wl-expect_unresolved $wl\*'
++	archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++	hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
++      else
++	allow_undefined_flag=' -expect_unresolved \*'
++	archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
++          $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp'
++
++	# Both c and cxx compiler support -rpath directly
++	hardcode_libdir_flag_spec='-rpath $libdir'
++      fi
++      archive_cmds_need_lc='no'
++      hardcode_libdir_separator=:
++      ;;
++
++    solaris*)
++      no_undefined_flag=' -z defs'
++      if test yes = "$GCC"; then
++	wlarc='$wl'
++	archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++          $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
++      else
++	case `$CC -V 2>&1` in
++	*"Compilers 5.0"*)
++	  wlarc=''
++	  archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags'
++	  archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++            $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
++	  ;;
++	*)
++	  wlarc='$wl'
++	  archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++            $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
++	  ;;
++	esac
++      fi
++      hardcode_libdir_flag_spec='-R$libdir'
++      hardcode_shlibpath_var=no
++      case $host_os in
++      solaris2.[0-5] | solaris2.[0-5].*) ;;
++      *)
++	# The compiler driver will combine and reorder linker options,
++	# but understands '-z linker_flag'.  GCC discards it without '$wl',
++	# but is careful enough not to reorder.
++	# Supported since Solaris 2.6 (maybe 2.5.1?)
++	if test yes = "$GCC"; then
++	  whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
++	else
++	  whole_archive_flag_spec='-z allextract$convenience -z defaultextract'
++	fi
++	;;
++      esac
++      link_all_deplibs=yes
++      ;;
++
++    sunos4*)
++      if test sequent = "$host_vendor"; then
++	# Use $CC to link under sequent, because it throws in some extra .o
++	# files that make .init and .fini sections work.
++	archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags'
++      else
++	archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
++      fi
++      hardcode_libdir_flag_spec='-L$libdir'
++      hardcode_direct=yes
++      hardcode_minus_L=yes
++      hardcode_shlibpath_var=no
++      ;;
++
++    sysv4)
++      case $host_vendor in
++	sni)
++	  archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++	  hardcode_direct=yes # is this really true???
++	;;
++	siemens)
++	  ## LD is ld it makes a PLAMLIB
++	  ## CC just makes a GrossModule.
++	  archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags'
++	  reload_cmds='$CC -r -o $output$reload_objs'
++	  hardcode_direct=no
++        ;;
++	motorola)
++	  archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++	  hardcode_direct=no #Motorola manual says yes, but my tests say they lie
++	;;
++      esac
++      runpath_var='LD_RUN_PATH'
++      hardcode_shlibpath_var=no
++      ;;
++
++    sysv4.3*)
++      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++      hardcode_shlibpath_var=no
++      export_dynamic_flag_spec='-Bexport'
++      ;;
++
++    sysv4*MP*)
++      if test -d /usr/nec; then
++	archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++	hardcode_shlibpath_var=no
++	runpath_var=LD_RUN_PATH
++	hardcode_runpath_var=yes
++	ld_shlibs=yes
++      fi
++      ;;
++
++    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
++      no_undefined_flag='$wl-z,text'
++      archive_cmds_need_lc=no
++      hardcode_shlibpath_var=no
++      runpath_var='LD_RUN_PATH'
++
++      if test yes = "$GCC"; then
++	archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++      else
++	archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++      fi
++      ;;
++
++    sysv5* | sco3.2v5* | sco5v6*)
++      # Note: We CANNOT use -z defs as we might desire, because we do not
++      # link with -lc, and that would cause any symbols used from libc to
++      # always be unresolved, which means just about no library would
++      # ever link correctly.  If we're not using GNU ld we use -z text
++      # though, which does catch some bad symbols but isn't as heavy-handed
++      # as -z defs.
++      no_undefined_flag='$wl-z,text'
++      allow_undefined_flag='$wl-z,nodefs'
++      archive_cmds_need_lc=no
++      hardcode_shlibpath_var=no
++      hardcode_libdir_flag_spec='$wl-R,$libdir'
++      hardcode_libdir_separator=':'
++      link_all_deplibs=yes
++      export_dynamic_flag_spec='$wl-Bexport'
++      runpath_var='LD_RUN_PATH'
++
++      if test yes = "$GCC"; then
++	archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++      else
++	archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++      fi
++      ;;
++
++    uts4*)
++      archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
++      hardcode_libdir_flag_spec='-L$libdir'
++      hardcode_shlibpath_var=no
++      ;;
++
++    *)
++      ld_shlibs=no
++      ;;
++    esac
++
++    if test sni = "$host_vendor"; then
++      case $host in
++      sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
++	export_dynamic_flag_spec='$wl-Blargedynsym'
++	;;
++      esac
++    fi
++  fi
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5
++printf "%s\n" "$ld_shlibs" >&6; }
++test no = "$ld_shlibs" && can_build_shared=no
++
++with_gnu_ld=$with_gnu_ld
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++#
++# Do we need to explicitly link libc?
++#
++case "x$archive_cmds_need_lc" in
++x|xyes)
++  # Assume -lc should be added
++  archive_cmds_need_lc=yes
++
++  if test yes,yes = "$GCC,$enable_shared"; then
++    case $archive_cmds in
++    *'~'*)
++      # FIXME: we may have to deal with multi-command sequences.
++      ;;
++    '$CC '*)
++      # Test whether the compiler implicitly links with -lc since on some
++      # systems, -lgcc has to come before -lc. If gcc already passes -lc
++      # to ld, don't add -lc before -lgcc.
++      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
++printf %s "checking whether -lc should be explicitly linked in... " >&6; }
++if test ${lt_cv_archive_cmds_need_lc+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  $RM conftest*
++	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++
++	if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
++  (eval $ac_compile) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } 2>conftest.err; then
++	  soname=conftest
++	  lib=conftest
++	  libobjs=conftest.$ac_objext
++	  deplibs=
++	  wl=$lt_prog_compiler_wl
++	  pic_flag=$lt_prog_compiler_pic
++	  compiler_flags=-v
++	  linker_flags=-v
++	  verstring=
++	  output_objdir=.
++	  libname=conftest
++	  lt_save_allow_undefined_flag=$allow_undefined_flag
++	  allow_undefined_flag=
++	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
++  (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }
++	  then
++	    lt_cv_archive_cmds_need_lc=no
++	  else
++	    lt_cv_archive_cmds_need_lc=yes
++	  fi
++	  allow_undefined_flag=$lt_save_allow_undefined_flag
++	else
++	  cat conftest.err 1>&5
++	fi
++	$RM conftest*
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5
++printf "%s\n" "$lt_cv_archive_cmds_need_lc" >&6; }
++      archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc
++      ;;
++    esac
++  fi
++  ;;
++esac
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
++printf %s "checking dynamic linker characteristics... " >&6; }
++
++if test yes = "$GCC"; then
++  case $host_os in
++    darwin*) lt_awk_arg='/^libraries:/,/LR/' ;;
++    *) lt_awk_arg='/^libraries:/' ;;
++  esac
++  case $host_os in
++    mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;;
++    *) lt_sed_strip_eq='s|=/|/|g' ;;
++  esac
++  lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
++  case $lt_search_path_spec in
++  *\;*)
++    # if the path contains ";" then we assume it to be the separator
++    # otherwise default to the standard path separator (i.e. ":") - it is
++    # assumed that no part of a normal pathname contains ";" but that should
++    # okay in the real world where ";" in dirpaths is itself problematic.
++    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'`
++    ;;
++  *)
++    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"`
++    ;;
++  esac
++  # Ok, now we have the path, separated by spaces, we can step through it
++  # and add multilib dir if necessary...
++  lt_tmp_lt_search_path_spec=
++  lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
++  # ...but if some path component already ends with the multilib dir we assume
++  # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer).
++  case "$lt_multi_os_dir; $lt_search_path_spec " in
++  "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*)
++    lt_multi_os_dir=
++    ;;
++  esac
++  for lt_sys_path in $lt_search_path_spec; do
++    if test -d "$lt_sys_path$lt_multi_os_dir"; then
++      lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir"
++    elif test -n "$lt_multi_os_dir"; then
++      test -d "$lt_sys_path" && \
++	lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
++    fi
++  done
++  lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
++BEGIN {RS = " "; FS = "/|\n";} {
++  lt_foo = "";
++  lt_count = 0;
++  for (lt_i = NF; lt_i > 0; lt_i--) {
++    if ($lt_i != "" && $lt_i != ".") {
++      if ($lt_i == "..") {
++        lt_count++;
++      } else {
++        if (lt_count == 0) {
++          lt_foo = "/" $lt_i lt_foo;
++        } else {
++          lt_count--;
++        }
++      }
++    }
++  }
++  if (lt_foo != "") { lt_freq[lt_foo]++; }
++  if (lt_freq[lt_foo] == 1) { print lt_foo; }
++}'`
++  # AWK program above erroneously prepends '/' to C:/dos/paths
++  # for these hosts.
++  case $host_os in
++    mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
++      $SED 's|/\([A-Za-z]:\)|\1|g'` ;;
++  esac
++  sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
++else
++  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
++fi
++library_names_spec=
++libname_spec='lib$name'
++soname_spec=
++shrext_cmds=.so
++postinstall_cmds=
++postuninstall_cmds=
++finish_cmds=
++finish_eval=
++shlibpath_var=
++shlibpath_overrides_runpath=unknown
++version_type=none
++dynamic_linker="$host_os ld.so"
++sys_lib_dlsearch_path_spec="/lib /usr/lib"
++need_lib_prefix=unknown
++hardcode_into_libs=no
++
++# when you set need_version to no, make sure it does not cause -set_version
++# flags to be left without arguments
++need_version=unknown
++
++
++
++case $host_os in
++aix3*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname.a'
++  shlibpath_var=LIBPATH
++
++  # AIX 3 has no versioning support, so we append a major version to the name.
++  soname_spec='$libname$release$shared_ext$major'
++  ;;
++
++aix[4-9]*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  hardcode_into_libs=yes
++  if test ia64 = "$host_cpu"; then
++    # AIX 5 supports IA64
++    library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext'
++    shlibpath_var=LD_LIBRARY_PATH
++  else
++    # With GCC up to 2.95.x, collect2 would create an import file
++    # for dependence libraries.  The import file would start with
++    # the line '#! .'.  This would cause the generated library to
++    # depend on '.', always an invalid library.  This was fixed in
++    # development snapshots of GCC prior to 3.0.
++    case $host_os in
++      aix4 | aix4.[01] | aix4.[01].*)
++      if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
++	   echo ' yes '
++	   echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then
++	:
++      else
++	can_build_shared=no
++      fi
++      ;;
++    esac
++    # Using Import Files as archive members, it is possible to support
++    # filename-based versioning of shared library archives on AIX. While
++    # this would work for both with and without runtime linking, it will
++    # prevent static linking of such archives. So we do filename-based
++    # shared library versioning with .so extension only, which is used
++    # when both runtime linking and shared linking is enabled.
++    # Unfortunately, runtime linking may impact performance, so we do
++    # not want this to be the default eventually. Also, we use the
++    # versioned .so libs for executables only if there is the -brtl
++    # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only.
++    # To allow for filename-based versioning support, we need to create
++    # libNAME.so.V as an archive file, containing:
++    # *) an Import File, referring to the versioned filename of the
++    #    archive as well as the shared archive member, telling the
++    #    bitwidth (32 or 64) of that shared object, and providing the
++    #    list of exported symbols of that shared object, eventually
++    #    decorated with the 'weak' keyword
++    # *) the shared object with the F_LOADONLY flag set, to really avoid
++    #    it being seen by the linker.
++    # At run time we better use the real file rather than another symlink,
++    # but for link time we create the symlink libNAME.so -> libNAME.so.V
++
++    case $with_aix_soname,$aix_use_runtimelinking in
++    # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct
++    # soname into executable. Probably we can add versioning support to
++    # collect2, so additional links can be useful in future.
++    aix,yes) # traditional libtool
++      dynamic_linker='AIX unversionable lib.so'
++      # If using run time linking (on AIX 4.2 or later) use lib.so
++      # instead of lib.a to let people know that these are not
++      # typical AIX shared libraries.
++      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++      ;;
++    aix,no) # traditional AIX only
++      dynamic_linker='AIX lib.a(lib.so.V)'
++      # We preserve .a as extension for shared libraries through AIX4.2
++      # and later when we are not doing run time linking.
++      library_names_spec='$libname$release.a $libname.a'
++      soname_spec='$libname$release$shared_ext$major'
++      ;;
++    svr4,*) # full svr4 only
++      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)"
++      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
++      # We do not specify a path in Import Files, so LIBPATH fires.
++      shlibpath_overrides_runpath=yes
++      ;;
++    *,yes) # both, prefer svr4
++      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)"
++      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
++      # unpreferred sharedlib libNAME.a needs extra handling
++      postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"'
++      postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"'
++      # We do not specify a path in Import Files, so LIBPATH fires.
++      shlibpath_overrides_runpath=yes
++      ;;
++    *,no) # both, prefer aix
++      dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)"
++      library_names_spec='$libname$release.a $libname.a'
++      soname_spec='$libname$release$shared_ext$major'
++      # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling
++      postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)'
++      postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"'
++      ;;
++    esac
++    shlibpath_var=LIBPATH
++  fi
++  ;;
++
++amigaos*)
++  case $host_cpu in
++  powerpc)
++    # Since July 2007 AmigaOS4 officially supports .so libraries.
++    # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    ;;
++  m68k)
++    library_names_spec='$libname.ixlibrary $libname.a'
++    # Create ${libname}_ixlibrary.a entries in /sys/libs.
++    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
++    ;;
++  esac
++  ;;
++
++beos*)
++  library_names_spec='$libname$shared_ext'
++  dynamic_linker="$host_os ld.so"
++  shlibpath_var=LIBRARY_PATH
++  ;;
++
++bsdi[45]*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
++  sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
++  # the default ld.so.conf also contains /usr/contrib/lib and
++  # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
++  # libtool to hard-code these into programs
++  ;;
++
++cygwin* | mingw* | pw32* | cegcc*)
++  version_type=windows
++  shrext_cmds=.dll
++  need_version=no
++  need_lib_prefix=no
++
++  case $GCC,$cc_basename in
++  yes,*)
++    # gcc
++    library_names_spec='$libname.dll.a'
++    # DLL is installed to $(libdir)/../bin by postinstall_cmds
++    postinstall_cmds='base_file=`basename \$file`~
++      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
++      dldir=$destdir/`dirname \$dlpath`~
++      test -d \$dldir || mkdir -p \$dldir~
++      $install_prog $dir/$dlname \$dldir/$dlname~
++      chmod a+x \$dldir/$dlname~
++      if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
++        eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
++      fi'
++    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
++      dlpath=$dir/\$dldll~
++       $RM \$dlpath'
++    shlibpath_overrides_runpath=yes
++
++    case $host_os in
++    cygwin*)
++      # Cygwin DLLs use 'cyg' prefix rather than 'lib'
++      soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++
++      sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"
++      ;;
++    mingw* | cegcc*)
++      # MinGW DLLs use traditional 'lib' prefix
++      soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++      ;;
++    pw32*)
++      # pw32 DLLs use 'pw' prefix rather than 'lib'
++      library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++      ;;
++    esac
++    dynamic_linker='Win32 ld.exe'
++    ;;
++
++  *,cl* | *,icl*)
++    # Native MSVC or ICC
++    libname_spec='$name'
++    soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++    library_names_spec='$libname.dll.lib'
++
++    case $build_os in
++    mingw*)
++      sys_lib_search_path_spec=
++      lt_save_ifs=$IFS
++      IFS=';'
++      for lt_path in $LIB
++      do
++        IFS=$lt_save_ifs
++        # Let DOS variable expansion print the short 8.3 style file name.
++        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
++        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
++      done
++      IFS=$lt_save_ifs
++      # Convert to MSYS style.
++      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
++      ;;
++    cygwin*)
++      # Convert to unix form, then to dos form, then back to unix form
++      # but this time dos style (no spaces!) so that the unix form looks
++      # like /cygdrive/c/PROGRA~1:/cygdr...
++      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
++      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
++      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
++      ;;
++    *)
++      sys_lib_search_path_spec=$LIB
++      if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
++        # It is most probably a Windows format PATH.
++        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
++      else
++        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
++      fi
++      # FIXME: find the short name or the path components, as spaces are
++      # common. (e.g. "Program Files" -> "PROGRA~1")
++      ;;
++    esac
++
++    # DLL is installed to $(libdir)/../bin by postinstall_cmds
++    postinstall_cmds='base_file=`basename \$file`~
++      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
++      dldir=$destdir/`dirname \$dlpath`~
++      test -d \$dldir || mkdir -p \$dldir~
++      $install_prog $dir/$dlname \$dldir/$dlname'
++    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
++      dlpath=$dir/\$dldll~
++       $RM \$dlpath'
++    shlibpath_overrides_runpath=yes
++    dynamic_linker='Win32 link.exe'
++    ;;
++
++  *)
++    # Assume MSVC and ICC wrapper
++    library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib'
++    dynamic_linker='Win32 ld.exe'
++    ;;
++  esac
++  # FIXME: first we should search . and the directory the executable is in
++  shlibpath_var=PATH
++  ;;
++
++darwin* | rhapsody*)
++  dynamic_linker="$host_os dyld"
++  version_type=darwin
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$major$shared_ext $libname$shared_ext'
++  soname_spec='$libname$release$major$shared_ext'
++  shlibpath_overrides_runpath=yes
++  shlibpath_var=DYLD_LIBRARY_PATH
++  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
++
++  sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"
++  sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
++  ;;
++
++dgux*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  ;;
++
++freebsd* | dragonfly* | midnightbsd*)
++  # DragonFly does not have aout.  When/if they implement a new
++  # versioning mechanism, adjust this.
++  if test -x /usr/bin/objformat; then
++    objformat=`/usr/bin/objformat`
++  else
++    case $host_os in
++    freebsd[23].*) objformat=aout ;;
++    *) objformat=elf ;;
++    esac
++  fi
++  version_type=freebsd-$objformat
++  case $version_type in
++    freebsd-elf*)
++      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++      soname_spec='$libname$release$shared_ext$major'
++      need_version=no
++      need_lib_prefix=no
++      ;;
++    freebsd-*)
++      library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++      need_version=yes
++      ;;
++  esac
++  shlibpath_var=LD_LIBRARY_PATH
++  case $host_os in
++  freebsd2.*)
++    shlibpath_overrides_runpath=yes
++    ;;
++  freebsd3.[01]* | freebsdelf3.[01]*)
++    shlibpath_overrides_runpath=yes
++    hardcode_into_libs=yes
++    ;;
++  freebsd3.[2-9]* | freebsdelf3.[2-9]* | \
++  freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1)
++    shlibpath_overrides_runpath=no
++    hardcode_into_libs=yes
++    ;;
++  *) # from 4.6 on, and DragonFly
++    shlibpath_overrides_runpath=yes
++    hardcode_into_libs=yes
++    ;;
++  esac
++  ;;
++
++haiku*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  dynamic_linker="$host_os runtime_loader"
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
++  hardcode_into_libs=yes
++  ;;
++
++hpux9* | hpux10* | hpux11*)
++  # Give a soname corresponding to the major version so that dld.sl refuses to
++  # link against other versions.
++  version_type=sunos
++  need_lib_prefix=no
++  need_version=no
++  case $host_cpu in
++  ia64*)
++    shrext_cmds='.so'
++    hardcode_into_libs=yes
++    dynamic_linker="$host_os dld.so"
++    shlibpath_var=LD_LIBRARY_PATH
++    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    if test 32 = "$HPUX_IA64_MODE"; then
++      sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
++      sys_lib_dlsearch_path_spec=/usr/lib/hpux32
++    else
++      sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
++      sys_lib_dlsearch_path_spec=/usr/lib/hpux64
++    fi
++    ;;
++  hppa*64*)
++    shrext_cmds='.sl'
++    hardcode_into_libs=yes
++    dynamic_linker="$host_os dld.sl"
++    shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
++    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
++    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
++    ;;
++  *)
++    shrext_cmds='.sl'
++    dynamic_linker="$host_os dld.sl"
++    shlibpath_var=SHLIB_PATH
++    shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    ;;
++  esac
++  # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
++  postinstall_cmds='chmod 555 $lib'
++  # or fails outright, so override atomically:
++  install_override_mode=555
++  ;;
++
++interix[3-9]*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  hardcode_into_libs=yes
++  ;;
++
++irix5* | irix6* | nonstopux*)
++  case $host_os in
++    nonstopux*) version_type=nonstopux ;;
++    *)
++	if test yes = "$lt_cv_prog_gnu_ld"; then
++		version_type=linux # correct to gnu/linux during the next big refactor
++	else
++		version_type=irix
++	fi ;;
++  esac
++  need_lib_prefix=no
++  need_version=no
++  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext'
++  case $host_os in
++  irix5* | nonstopux*)
++    libsuff= shlibsuff=
++    ;;
++  *)
++    case $LD in # libtool.m4 will add one of these switches to LD
++    *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
++      libsuff= shlibsuff= libmagic=32-bit;;
++    *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
++      libsuff=32 shlibsuff=N32 libmagic=N32;;
++    *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
++      libsuff=64 shlibsuff=64 libmagic=64-bit;;
++    *) libsuff= shlibsuff= libmagic=never-match;;
++    esac
++    ;;
++  esac
++  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
++  shlibpath_overrides_runpath=no
++  sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff"
++  sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff"
++  hardcode_into_libs=yes
++  ;;
++
++# No shared lib support for Linux oldld, aout, or coff.
++linux*oldld* | linux*aout* | linux*coff*)
++  dynamic_linker=no
++  ;;
++
++linux*android*)
++  version_type=none # Android doesn't support versioned libraries.
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext'
++  soname_spec='$libname$release$shared_ext'
++  finish_cmds=
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++
++  # This implies no fast_install, which is unacceptable.
++  # Some rework will be needed to allow for fast_install
++  # before this can be enabled.
++  hardcode_into_libs=yes
++
++  dynamic_linker='Android linker'
++  # Don't embed -rpath directories since the linker doesn't support them.
++  hardcode_libdir_flag_spec='-L$libdir'
++  ;;
++
++# This must be glibc/ELF.
++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++
++  # Some binutils ld are patched to set DT_RUNPATH
++  if test ${lt_cv_shlibpath_overrides_runpath+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_shlibpath_overrides_runpath=no
++    save_LDFLAGS=$LDFLAGS
++    save_libdir=$libdir
++    eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \
++	 LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\""
++    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++  if  ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null
++then :
++  lt_cv_shlibpath_overrides_runpath=yes
++fi
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++    LDFLAGS=$save_LDFLAGS
++    libdir=$save_libdir
++
++fi
++
++  shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
++
++  # This implies no fast_install, which is unacceptable.
++  # Some rework will be needed to allow for fast_install
++  # before this can be enabled.
++  hardcode_into_libs=yes
++
++  # Add ABI-specific directories to the system library path.
++  sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib"
++
++  # Ideally, we could use ldconfig to report *all* directores which are
++  # searched for libraries, however this is still not possible.  Aside from not
++  # being certain /sbin/ldconfig is available, command
++  # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64,
++  # even though it is searched at run-time.  Try to do the best guess by
++  # appending ld.so.conf contents (and includes) to the search path.
++  if test -f /etc/ld.so.conf; then
++    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
++    sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra"
++  fi
++
++  # We used to test for /lib/ld.so.1 and disable shared libraries on
++  # powerpc, because MkLinux only supported shared libraries with the
++  # GNU dynamic linker.  Since this was broken with cross compilers,
++  # most powerpc-linux boxes support dynamic linking these days and
++  # people can always --disable-shared, the test was removed, and we
++  # assume the GNU/Linux dynamic linker is in use.
++  dynamic_linker='GNU/Linux ld.so'
++  ;;
++
++netbsd*)
++  version_type=sunos
++  need_lib_prefix=no
++  need_version=no
++  if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++    finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
++    dynamic_linker='NetBSD (a.out) ld.so'
++  else
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    dynamic_linker='NetBSD ld.elf_so'
++  fi
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  hardcode_into_libs=yes
++  ;;
++
++newsos6)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  ;;
++
++*nto* | *qnx*)
++  version_type=qnx
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  hardcode_into_libs=yes
++  dynamic_linker='ldqnx.so'
++  ;;
++
++openbsd* | bitrig*)
++  version_type=sunos
++  sys_lib_dlsearch_path_spec=/usr/lib
++  need_lib_prefix=no
++  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
++    need_version=no
++  else
++    need_version=yes
++  fi
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  ;;
++
++os2*)
++  libname_spec='$name'
++  version_type=windows
++  shrext_cmds=.dll
++  need_version=no
++  need_lib_prefix=no
++  # OS/2 can only load a DLL with a base name of 8 characters or less.
++  soname_spec='`test -n "$os2dllname" && libname="$os2dllname";
++    v=$($ECHO $release$versuffix | tr -d .-);
++    n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _);
++    $ECHO $n$v`$shared_ext'
++  library_names_spec='${libname}_dll.$libext'
++  dynamic_linker='OS/2 ld.exe'
++  shlibpath_var=BEGINLIBPATH
++  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
++  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
++  postinstall_cmds='base_file=`basename \$file`~
++    dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~
++    dldir=$destdir/`dirname \$dlpath`~
++    test -d \$dldir || mkdir -p \$dldir~
++    $install_prog $dir/$dlname \$dldir/$dlname~
++    chmod a+x \$dldir/$dlname~
++    if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
++      eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
++    fi'
++  postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~
++    dlpath=$dir/\$dldll~
++    $RM \$dlpath'
++  ;;
++
++osf3* | osf4* | osf5*)
++  version_type=osf
++  need_lib_prefix=no
++  need_version=no
++  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  shlibpath_var=LD_LIBRARY_PATH
++  sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
++  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
++  ;;
++
++rdos*)
++  dynamic_linker=no
++  ;;
++
++solaris*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  hardcode_into_libs=yes
++  # ldd complains unless libraries are executable
++  postinstall_cmds='chmod +x $lib'
++  ;;
++
++sunos4*)
++  version_type=sunos
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++  finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  if test yes = "$with_gnu_ld"; then
++    need_lib_prefix=no
++  fi
++  need_version=yes
++  ;;
++
++sysv4 | sysv4.3*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  case $host_vendor in
++    sni)
++      shlibpath_overrides_runpath=no
++      need_lib_prefix=no
++      runpath_var=LD_RUN_PATH
++      ;;
++    siemens)
++      need_lib_prefix=no
++      ;;
++    motorola)
++      need_lib_prefix=no
++      need_version=no
++      shlibpath_overrides_runpath=no
++      sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
++      ;;
++  esac
++  ;;
++
++sysv4*MP*)
++  if test -d /usr/nec; then
++    version_type=linux # correct to gnu/linux during the next big refactor
++    library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext'
++    soname_spec='$libname$shared_ext.$major'
++    shlibpath_var=LD_LIBRARY_PATH
++  fi
++  ;;
++
++sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
++  version_type=sco
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  hardcode_into_libs=yes
++  if test yes = "$with_gnu_ld"; then
++    sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
++  else
++    sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
++    case $host_os in
++      sco3.2v5*)
++        sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
++	;;
++    esac
++  fi
++  sys_lib_dlsearch_path_spec='/usr/lib'
++  ;;
++
++tpf*)
++  # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  hardcode_into_libs=yes
++  ;;
++
++uts4*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  ;;
++
++*)
++  dynamic_linker=no
++  ;;
++esac
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
++printf "%s\n" "$dynamic_linker" >&6; }
++test no = "$dynamic_linker" && can_build_shared=no
++
++variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
++if test yes = "$GCC"; then
++  variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
++fi
++
++if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then
++  sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec
++fi
++
++if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then
++  sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec
++fi
++
++# remember unaugmented sys_lib_dlsearch_path content for libtool script decls...
++configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec
++
++# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code
++func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH"
++
++# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool
++configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
++printf %s "checking how to hardcode library paths into programs... " >&6; }
++hardcode_action=
++if test -n "$hardcode_libdir_flag_spec" ||
++   test -n "$runpath_var" ||
++   test yes = "$hardcode_automatic"; then
++
++  # We can hardcode non-existent directories.
++  if test no != "$hardcode_direct" &&
++     # If the only mechanism to avoid hardcoding is shlibpath_var, we
++     # have to relink, otherwise we might link with an installed library
++     # when we should be linking with a yet-to-be-installed one
++     ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" &&
++     test no != "$hardcode_minus_L"; then
++    # Linking always hardcodes the temporary library directory.
++    hardcode_action=relink
++  else
++    # We can link without hardcoding, and we can hardcode nonexisting dirs.
++    hardcode_action=immediate
++  fi
++else
++  # We cannot hardcode anything, or else we can only hardcode existing
++  # directories.
++  hardcode_action=unsupported
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5
++printf "%s\n" "$hardcode_action" >&6; }
++
++if test relink = "$hardcode_action" ||
++   test yes = "$inherit_rpath"; then
++  # Fast installation is not supported
++  enable_fast_install=no
++elif test yes = "$shlibpath_overrides_runpath" ||
++     test no = "$enable_shared"; then
++  # Fast installation is not necessary
++  enable_fast_install=needless
++fi
++
++
++
++
++
++
++  if test yes != "$enable_dlopen"; then
++  enable_dlopen=unknown
++  enable_dlopen_self=unknown
++  enable_dlopen_self_static=unknown
++else
++  lt_cv_dlopen=no
++  lt_cv_dlopen_libs=
++
++  case $host_os in
++  beos*)
++    lt_cv_dlopen=load_add_on
++    lt_cv_dlopen_libs=
++    lt_cv_dlopen_self=yes
++    ;;
++
++  mingw* | pw32* | cegcc*)
++    lt_cv_dlopen=LoadLibrary
++    lt_cv_dlopen_libs=
++    ;;
++
++  cygwin*)
++    lt_cv_dlopen=dlopen
++    lt_cv_dlopen_libs=
++    ;;
++
++  darwin*)
++    # if libdl is installed we need to link against it
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
++printf %s "checking for dlopen in -ldl... " >&6; }
++if test ${ac_cv_lib_dl_dlopen+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_check_lib_save_LIBS=$LIBS
++LIBS="-ldl  $LIBS"
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++/* Override any GCC internal prototype to avoid an error.
++   Use char because int might match the return type of a GCC
++   builtin and then its argument prototype would still apply.  */
++char dlopen ();
++int
++main (void)
++{
++return dlopen ();
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++  ac_cv_lib_dl_dlopen=yes
++else $as_nop
++  ac_cv_lib_dl_dlopen=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++LIBS=$ac_check_lib_save_LIBS
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
++printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; }
++if test "x$ac_cv_lib_dl_dlopen" = xyes
++then :
++  lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl
++else $as_nop
++
++    lt_cv_dlopen=dyld
++    lt_cv_dlopen_libs=
++    lt_cv_dlopen_self=yes
++
++fi
++
++    ;;
++
++  tpf*)
++    # Don't try to run any link tests for TPF.  We know it's impossible
++    # because TPF is a cross-compiler, and we know how we open DSOs.
++    lt_cv_dlopen=dlopen
++    lt_cv_dlopen_libs=
++    lt_cv_dlopen_self=no
++    ;;
++
++  *)
++    ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load"
++if test "x$ac_cv_func_shl_load" = xyes
++then :
++  lt_cv_dlopen=shl_load
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5
++printf %s "checking for shl_load in -ldld... " >&6; }
++if test ${ac_cv_lib_dld_shl_load+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_check_lib_save_LIBS=$LIBS
++LIBS="-ldld  $LIBS"
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++/* Override any GCC internal prototype to avoid an error.
++   Use char because int might match the return type of a GCC
++   builtin and then its argument prototype would still apply.  */
++char shl_load ();
++int
++main (void)
++{
++return shl_load ();
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++  ac_cv_lib_dld_shl_load=yes
++else $as_nop
++  ac_cv_lib_dld_shl_load=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++LIBS=$ac_check_lib_save_LIBS
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5
++printf "%s\n" "$ac_cv_lib_dld_shl_load" >&6; }
++if test "x$ac_cv_lib_dld_shl_load" = xyes
++then :
++  lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld
++else $as_nop
++  ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen"
++if test "x$ac_cv_func_dlopen" = xyes
++then :
++  lt_cv_dlopen=dlopen
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
++printf %s "checking for dlopen in -ldl... " >&6; }
++if test ${ac_cv_lib_dl_dlopen+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_check_lib_save_LIBS=$LIBS
++LIBS="-ldl  $LIBS"
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++/* Override any GCC internal prototype to avoid an error.
++   Use char because int might match the return type of a GCC
++   builtin and then its argument prototype would still apply.  */
++char dlopen ();
++int
++main (void)
++{
++return dlopen ();
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++  ac_cv_lib_dl_dlopen=yes
++else $as_nop
++  ac_cv_lib_dl_dlopen=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++LIBS=$ac_check_lib_save_LIBS
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
++printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; }
++if test "x$ac_cv_lib_dl_dlopen" = xyes
++then :
++  lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5
++printf %s "checking for dlopen in -lsvld... " >&6; }
++if test ${ac_cv_lib_svld_dlopen+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_check_lib_save_LIBS=$LIBS
++LIBS="-lsvld  $LIBS"
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++/* Override any GCC internal prototype to avoid an error.
++   Use char because int might match the return type of a GCC
++   builtin and then its argument prototype would still apply.  */
++char dlopen ();
++int
++main (void)
++{
++return dlopen ();
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++  ac_cv_lib_svld_dlopen=yes
++else $as_nop
++  ac_cv_lib_svld_dlopen=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++LIBS=$ac_check_lib_save_LIBS
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5
++printf "%s\n" "$ac_cv_lib_svld_dlopen" >&6; }
++if test "x$ac_cv_lib_svld_dlopen" = xyes
++then :
++  lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld
++else $as_nop
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5
++printf %s "checking for dld_link in -ldld... " >&6; }
++if test ${ac_cv_lib_dld_dld_link+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  ac_check_lib_save_LIBS=$LIBS
++LIBS="-ldld  $LIBS"
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++/* Override any GCC internal prototype to avoid an error.
++   Use char because int might match the return type of a GCC
++   builtin and then its argument prototype would still apply.  */
++char dld_link ();
++int
++main (void)
++{
++return dld_link ();
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_link "$LINENO"
++then :
++  ac_cv_lib_dld_dld_link=yes
++else $as_nop
++  ac_cv_lib_dld_dld_link=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++LIBS=$ac_check_lib_save_LIBS
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5
++printf "%s\n" "$ac_cv_lib_dld_dld_link" >&6; }
++if test "x$ac_cv_lib_dld_dld_link" = xyes
++then :
++  lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld
++fi
++
++
++fi
++
++
++fi
++
++
++fi
++
++
++fi
++
++
++fi
++
++    ;;
++  esac
++
++  if test no = "$lt_cv_dlopen"; then
++    enable_dlopen=no
++  else
++    enable_dlopen=yes
++  fi
++
++  case $lt_cv_dlopen in
++  dlopen)
++    save_CPPFLAGS=$CPPFLAGS
++    test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
++
++    save_LDFLAGS=$LDFLAGS
++    wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
++
++    save_LIBS=$LIBS
++    LIBS="$lt_cv_dlopen_libs $LIBS"
++
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5
++printf %s "checking whether a program can dlopen itself... " >&6; }
++if test ${lt_cv_dlopen_self+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  	  if test yes = "$cross_compiling"; then :
++  lt_cv_dlopen_self=cross
++else
++  lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
++  lt_status=$lt_dlunknown
++  cat > conftest.$ac_ext <<_LT_EOF
++#line $LINENO "configure"
++#include "confdefs.h"
++
++#if HAVE_DLFCN_H
++#include 
++#endif
++
++#include 
++
++#ifdef RTLD_GLOBAL
++#  define LT_DLGLOBAL		RTLD_GLOBAL
++#else
++#  ifdef DL_GLOBAL
++#    define LT_DLGLOBAL		DL_GLOBAL
++#  else
++#    define LT_DLGLOBAL		0
++#  endif
++#endif
++
++/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
++   find out it does not work in some platform. */
++#ifndef LT_DLLAZY_OR_NOW
++#  ifdef RTLD_LAZY
++#    define LT_DLLAZY_OR_NOW		RTLD_LAZY
++#  else
++#    ifdef DL_LAZY
++#      define LT_DLLAZY_OR_NOW		DL_LAZY
++#    else
++#      ifdef RTLD_NOW
++#        define LT_DLLAZY_OR_NOW	RTLD_NOW
++#      else
++#        ifdef DL_NOW
++#          define LT_DLLAZY_OR_NOW	DL_NOW
++#        else
++#          define LT_DLLAZY_OR_NOW	0
++#        endif
++#      endif
++#    endif
++#  endif
++#endif
++
++/* When -fvisibility=hidden is used, assume the code has been annotated
++   correspondingly for the symbols needed.  */
++#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
++int fnord () __attribute__((visibility("default")));
++#endif
++
++int fnord () { return 42; }
++int main ()
++{
++  void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
++  int status = $lt_dlunknown;
++
++  if (self)
++    {
++      if (dlsym (self,"fnord"))       status = $lt_dlno_uscore;
++      else
++        {
++	  if (dlsym( self,"_fnord"))  status = $lt_dlneed_uscore;
++          else puts (dlerror ());
++	}
++      /* dlclose (self); */
++    }
++  else
++    puts (dlerror ());
++
++  return status;
++}
++_LT_EOF
++  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
++  (eval $ac_link) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then
++    (./conftest; exit; ) >&5 2>/dev/null
++    lt_status=$?
++    case x$lt_status in
++      x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;;
++      x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;;
++      x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;;
++    esac
++  else :
++    # compilation failed
++    lt_cv_dlopen_self=no
++  fi
++fi
++rm -fr conftest*
++
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5
++printf "%s\n" "$lt_cv_dlopen_self" >&6; }
++
++    if test yes = "$lt_cv_dlopen_self"; then
++      wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
++      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5
++printf %s "checking whether a statically linked program can dlopen itself... " >&6; }
++if test ${lt_cv_dlopen_self_static+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  	  if test yes = "$cross_compiling"; then :
++  lt_cv_dlopen_self_static=cross
++else
++  lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
++  lt_status=$lt_dlunknown
++  cat > conftest.$ac_ext <<_LT_EOF
++#line $LINENO "configure"
++#include "confdefs.h"
++
++#if HAVE_DLFCN_H
++#include 
++#endif
++
++#include 
++
++#ifdef RTLD_GLOBAL
++#  define LT_DLGLOBAL		RTLD_GLOBAL
++#else
++#  ifdef DL_GLOBAL
++#    define LT_DLGLOBAL		DL_GLOBAL
++#  else
++#    define LT_DLGLOBAL		0
++#  endif
++#endif
++
++/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
++   find out it does not work in some platform. */
++#ifndef LT_DLLAZY_OR_NOW
++#  ifdef RTLD_LAZY
++#    define LT_DLLAZY_OR_NOW		RTLD_LAZY
++#  else
++#    ifdef DL_LAZY
++#      define LT_DLLAZY_OR_NOW		DL_LAZY
++#    else
++#      ifdef RTLD_NOW
++#        define LT_DLLAZY_OR_NOW	RTLD_NOW
++#      else
++#        ifdef DL_NOW
++#          define LT_DLLAZY_OR_NOW	DL_NOW
++#        else
++#          define LT_DLLAZY_OR_NOW	0
++#        endif
++#      endif
++#    endif
++#  endif
++#endif
++
++/* When -fvisibility=hidden is used, assume the code has been annotated
++   correspondingly for the symbols needed.  */
++#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
++int fnord () __attribute__((visibility("default")));
++#endif
++
++int fnord () { return 42; }
++int main ()
++{
++  void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
++  int status = $lt_dlunknown;
++
++  if (self)
++    {
++      if (dlsym (self,"fnord"))       status = $lt_dlno_uscore;
++      else
++        {
++	  if (dlsym( self,"_fnord"))  status = $lt_dlneed_uscore;
++          else puts (dlerror ());
++	}
++      /* dlclose (self); */
++    }
++  else
++    puts (dlerror ());
++
++  return status;
++}
++_LT_EOF
++  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
++  (eval $ac_link) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then
++    (./conftest; exit; ) >&5 2>/dev/null
++    lt_status=$?
++    case x$lt_status in
++      x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;;
++      x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;;
++      x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;;
++    esac
++  else :
++    # compilation failed
++    lt_cv_dlopen_self_static=no
++  fi
++fi
++rm -fr conftest*
++
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5
++printf "%s\n" "$lt_cv_dlopen_self_static" >&6; }
++    fi
++
++    CPPFLAGS=$save_CPPFLAGS
++    LDFLAGS=$save_LDFLAGS
++    LIBS=$save_LIBS
++    ;;
++  esac
++
++  case $lt_cv_dlopen_self in
++  yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
++  *) enable_dlopen_self=unknown ;;
++  esac
++
++  case $lt_cv_dlopen_self_static in
++  yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
++  *) enable_dlopen_self_static=unknown ;;
++  esac
++fi
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++striplib=
++old_striplib=
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5
++printf %s "checking whether stripping libraries is possible... " >&6; }
++if test -z "$STRIP"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++else
++  if $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
++    old_striplib="$STRIP --strip-debug"
++    striplib="$STRIP --strip-unneeded"
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++printf "%s\n" "yes" >&6; }
++  else
++    case $host_os in
++    darwin*)
++      # FIXME - insert some real tests, host_os isn't really good enough
++      striplib="$STRIP -x"
++      old_striplib="$STRIP -S"
++      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++printf "%s\n" "yes" >&6; }
++      ;;
++    freebsd*)
++      if $STRIP -V 2>&1 | $GREP "elftoolchain" >/dev/null; then
++        old_striplib="$STRIP --strip-debug"
++        striplib="$STRIP --strip-unneeded"
++        { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++printf "%s\n" "yes" >&6; }
++      else
++        { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++      fi
++      ;;
++    *)
++      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++      ;;
++    esac
++  fi
++fi
++
++
++
++
++
++
++
++
++
++
++
++
++  # Report what library types will actually be built
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5
++printf %s "checking if libtool supports shared libraries... " >&6; }
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5
++printf "%s\n" "$can_build_shared" >&6; }
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5
++printf %s "checking whether to build shared libraries... " >&6; }
++  test no = "$can_build_shared" && enable_shared=no
++
++  # On AIX, shared libraries and static libraries use the same namespace, and
++  # are all built from PIC.
++  case $host_os in
++  aix3*)
++    test yes = "$enable_shared" && enable_static=no
++    if test -n "$RANLIB"; then
++      archive_cmds="$archive_cmds~\$RANLIB \$lib"
++      postinstall_cmds='$RANLIB $lib'
++    fi
++    ;;
++
++  aix[4-9]*)
++    if test ia64 != "$host_cpu"; then
++      case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
++      yes,aix,yes) ;;			# shared object as lib.so file only
++      yes,svr4,*) ;;			# shared object as lib.so archive member only
++      yes,*) enable_static=no ;;	# shared object in lib.a archive as well
++      esac
++    fi
++    ;;
++  esac
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5
++printf "%s\n" "$enable_shared" >&6; }
++
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5
++printf %s "checking whether to build static libraries... " >&6; }
++  # Make sure either enable_shared or enable_static is yes.
++  test yes = "$enable_shared" || enable_static=yes
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5
++printf "%s\n" "$enable_static" >&6; }
++
++
++
++
++fi
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++CC=$lt_save_CC
++
++      if test -n "$CXX" && ( test no != "$CXX" &&
++    ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) ||
++    (test g++ != "$CXX"))); then
++  ac_ext=cpp
++ac_cpp='$CXXCPP $CPPFLAGS'
++ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5
++printf %s "checking how to run the C++ preprocessor... " >&6; }
++if test -z "$CXXCPP"; then
++  if test ${ac_cv_prog_CXXCPP+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++      # Double quotes because $CXX needs to be expanded
++    for CXXCPP in "$CXX -E" cpp /lib/cpp
++    do
++      ac_preproc_ok=false
++for ac_cxx_preproc_warn_flag in '' yes
++do
++  # Use a header file that comes with gcc, so configuring glibc
++  # with a fresh cross-compiler works.
++  # On the NeXT, cc -E runs the code through the compiler's parser,
++  # not just through cpp. "Syntax error" is here to catch this case.
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
++		     Syntax error
++_ACEOF
++if ac_fn_cxx_try_cpp "$LINENO"
++then :
++
++else $as_nop
++  # Broken: fails on valid input.
++continue
++fi
++rm -f conftest.err conftest.i conftest.$ac_ext
++
++  # OK, works on sane cases.  Now check whether nonexistent headers
++  # can be detected and how.
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
++_ACEOF
++if ac_fn_cxx_try_cpp "$LINENO"
++then :
++  # Broken: success on invalid input.
++continue
++else $as_nop
++  # Passes both tests.
++ac_preproc_ok=:
++break
++fi
++rm -f conftest.err conftest.i conftest.$ac_ext
++
++done
++# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
++rm -f conftest.i conftest.err conftest.$ac_ext
++if $ac_preproc_ok
++then :
++  break
++fi
++
++    done
++    ac_cv_prog_CXXCPP=$CXXCPP
++
++fi
++  CXXCPP=$ac_cv_prog_CXXCPP
++else
++  ac_cv_prog_CXXCPP=$CXXCPP
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5
++printf "%s\n" "$CXXCPP" >&6; }
++ac_preproc_ok=false
++for ac_cxx_preproc_warn_flag in '' yes
++do
++  # Use a header file that comes with gcc, so configuring glibc
++  # with a fresh cross-compiler works.
++  # On the NeXT, cc -E runs the code through the compiler's parser,
++  # not just through cpp. "Syntax error" is here to catch this case.
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
++		     Syntax error
++_ACEOF
++if ac_fn_cxx_try_cpp "$LINENO"
++then :
++
++else $as_nop
++  # Broken: fails on valid input.
++continue
++fi
++rm -f conftest.err conftest.i conftest.$ac_ext
++
++  # OK, works on sane cases.  Now check whether nonexistent headers
++  # can be detected and how.
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
++_ACEOF
++if ac_fn_cxx_try_cpp "$LINENO"
++then :
++  # Broken: success on invalid input.
++continue
++else $as_nop
++  # Passes both tests.
++ac_preproc_ok=:
++break
++fi
++rm -f conftest.err conftest.i conftest.$ac_ext
++
++done
++# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
++rm -f conftest.i conftest.err conftest.$ac_ext
++if $ac_preproc_ok
++then :
++
++else $as_nop
++  { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check
++See \`config.log' for more details" "$LINENO" 5; }
++fi
++
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++else
++  _lt_caught_CXX_error=yes
++fi
++
++ac_ext=cpp
++ac_cpp='$CXXCPP $CPPFLAGS'
++ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
++
++archive_cmds_need_lc_CXX=no
++allow_undefined_flag_CXX=
++always_export_symbols_CXX=no
++archive_expsym_cmds_CXX=
++compiler_needs_object_CXX=no
++export_dynamic_flag_spec_CXX=
++hardcode_direct_CXX=no
++hardcode_direct_absolute_CXX=no
++hardcode_libdir_flag_spec_CXX=
++hardcode_libdir_separator_CXX=
++hardcode_minus_L_CXX=no
++hardcode_shlibpath_var_CXX=unsupported
++hardcode_automatic_CXX=no
++inherit_rpath_CXX=no
++module_cmds_CXX=
++module_expsym_cmds_CXX=
++link_all_deplibs_CXX=unknown
++old_archive_cmds_CXX=$old_archive_cmds
++reload_flag_CXX=$reload_flag
++reload_cmds_CXX=$reload_cmds
++no_undefined_flag_CXX=
++whole_archive_flag_spec_CXX=
++enable_shared_with_static_runtimes_CXX=no
++
++# Source file extension for C++ test sources.
++ac_ext=cpp
++
++# Object file extension for compiled C++ test sources.
++objext=o
++objext_CXX=$objext
++
++# No sense in running all these tests if we already determined that
++# the CXX compiler isn't working.  Some variables (like enable_shared)
++# are currently assumed to apply to all compilers on this platform,
++# and will be corrupted by setting them based on a non-working compiler.
++if test yes != "$_lt_caught_CXX_error"; then
++  # Code to be used in simple compile tests
++  lt_simple_compile_test_code="int some_variable = 0;"
++
++  # Code to be used in simple link tests
++  lt_simple_link_test_code='int main(int, char *[]) { return(0); }'
++
++  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
++
++
++
++
++
++
++# If no C compiler was specified, use CC.
++LTCC=${LTCC-"$CC"}
++
++# If no C compiler flags were specified, use CFLAGS.
++LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
++
++# Allow CC to be a program name with arguments.
++compiler=$CC
++
++
++  # save warnings/boilerplate of simple test code
++  ac_outfile=conftest.$ac_objext
++echo "$lt_simple_compile_test_code" >conftest.$ac_ext
++eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
++_lt_compiler_boilerplate=`cat conftest.err`
++$RM conftest*
++
++  ac_outfile=conftest.$ac_objext
++echo "$lt_simple_link_test_code" >conftest.$ac_ext
++eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
++_lt_linker_boilerplate=`cat conftest.err`
++$RM -r conftest*
++
++
++  # Allow CC to be a program name with arguments.
++  lt_save_CC=$CC
++  lt_save_CFLAGS=$CFLAGS
++  lt_save_LD=$LD
++  lt_save_GCC=$GCC
++  GCC=$GXX
++  lt_save_with_gnu_ld=$with_gnu_ld
++  lt_save_path_LD=$lt_cv_path_LD
++  if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then
++    lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx
++  else
++    $as_unset lt_cv_prog_gnu_ld
++  fi
++  if test -n "${lt_cv_path_LDCXX+set}"; then
++    lt_cv_path_LD=$lt_cv_path_LDCXX
++  else
++    $as_unset lt_cv_path_LD
++  fi
++  test -z "${LDCXX+set}" || LD=$LDCXX
++  CC=${CXX-"c++"}
++  CFLAGS=$CXXFLAGS
++  compiler=$CC
++  compiler_CXX=$CC
++  func_cc_basename $compiler
++cc_basename=$func_cc_basename_result
++
++
++  if test -n "$compiler"; then
++    # We don't want -fno-exception when compiling C++ code, so set the
++    # no_builtin_flag separately
++    if test yes = "$GXX"; then
++      lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin'
++    else
++      lt_prog_compiler_no_builtin_flag_CXX=
++    fi
++
++    if test yes = "$GXX"; then
++      # Set up default GNU C++ configuration
++
++
++
++# Check whether --with-gnu-ld was given.
++if test ${with_gnu_ld+y}
++then :
++  withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes
++else $as_nop
++  with_gnu_ld=no
++fi
++
++ac_prog=ld
++if test yes = "$GCC"; then
++  # Check if gcc -print-prog-name=ld gives a path.
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
++printf %s "checking for ld used by $CC... " >&6; }
++  case $host in
++  *-*-mingw*)
++    # gcc leaves a trailing carriage return, which upsets mingw
++    ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
++  *)
++    ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
++  esac
++  case $ac_prog in
++    # Accept absolute paths.
++    [\\/]* | ?:[\\/]*)
++      re_direlt='/[^/][^/]*/\.\./'
++      # Canonicalize the pathname of ld
++      ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
++      while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
++	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
++      done
++      test -z "$LD" && LD=$ac_prog
++      ;;
++  "")
++    # If it fails, then pretend we aren't using GCC.
++    ac_prog=ld
++    ;;
++  *)
++    # If it is relative, then search for the first ld in PATH.
++    with_gnu_ld=unknown
++    ;;
++  esac
++elif test yes = "$with_gnu_ld"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
++printf %s "checking for GNU ld... " >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
++printf %s "checking for non-GNU ld... " >&6; }
++fi
++if test ${lt_cv_path_LD+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  if test -z "$LD"; then
++  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++  for ac_dir in $PATH; do
++    IFS=$lt_save_ifs
++    test -z "$ac_dir" && ac_dir=.
++    if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
++      lt_cv_path_LD=$ac_dir/$ac_prog
++      # Check to see if the program is GNU ld.  I'd rather use --version,
++      # but apparently some variants of GNU ld only accept -v.
++      # Break only if it was the GNU/non-GNU ld that we prefer.
++      case `"$lt_cv_path_LD" -v 2>&1 &5
++printf "%s\n" "$LD" >&6; }
++else
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
++printf "%s\n" "no" >&6; }
++fi
++test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
++printf %s "checking if the linker ($LD) is GNU ld... " >&6; }
++if test ${lt_cv_prog_gnu_ld+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  # I'd rather use --version here, but apparently some GNU lds only accept -v.
++case `$LD -v 2>&1 &5
++printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; }
++with_gnu_ld=$lt_cv_prog_gnu_ld
++
++
++
++
++
++
++
++      # Check if GNU C++ uses GNU ld as the underlying linker, since the
++      # archiving commands below assume that GNU ld is being used.
++      if test yes = "$with_gnu_ld"; then
++        archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
++        archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++
++        hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
++        export_dynamic_flag_spec_CXX='$wl--export-dynamic'
++
++        # If archive_cmds runs LD, not CC, wlarc should be empty
++        # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
++        #     investigate it a little bit more. (MM)
++        wlarc='$wl'
++
++        # ancient GNU ld didn't support --whole-archive et. al.
++        if eval "`$CC -print-prog-name=ld` --help 2>&1" |
++	  $GREP 'no-whole-archive' > /dev/null; then
++          whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
++        else
++          whole_archive_flag_spec_CXX=
++        fi
++      else
++        with_gnu_ld=no
++        wlarc=
++
++        # A generic and very simple default shared library creation
++        # command for GNU C++ for the case where it uses the native
++        # linker, instead of GNU ld.  If possible, this setting should
++        # overridden to take advantage of the native linker features on
++        # the platform it is being used on.
++        archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
++      fi
++
++      # Commands to make compiler produce verbose output that lists
++      # what "hidden" libraries, object files and flags are used when
++      # linking a shared library.
++      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
++
++    else
++      GXX=no
++      with_gnu_ld=no
++      wlarc=
++    fi
++
++    # PORTME: fill in a description of your system's C++ link characteristics
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
++printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
++    ld_shlibs_CXX=yes
++    case $host_os in
++      aix3*)
++        # FIXME: insert proper C++ library support
++        ld_shlibs_CXX=no
++        ;;
++      aix[4-9]*)
++        if test ia64 = "$host_cpu"; then
++          # On IA64, the linker does run time linking by default, so we don't
++          # have to do anything special.
++          aix_use_runtimelinking=no
++          exp_sym_flag='-Bexport'
++          no_entry_flag=
++        else
++          aix_use_runtimelinking=no
++
++          # Test if we are trying to use run time linking or normal
++          # AIX style linking. If -brtl is somewhere in LDFLAGS, we
++          # have runtime linking enabled, and use it for executables.
++          # For shared libraries, we enable/disable runtime linking
++          # depending on the kind of the shared library created -
++          # when "with_aix_soname,aix_use_runtimelinking" is:
++          # "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
++          # "aix,yes"  lib.so          shared, rtl:yes, for executables
++          #            lib.a           static archive
++          # "both,no"  lib.so.V(shr.o) shared, rtl:yes
++          #            lib.a(lib.so.V) shared, rtl:no,  for executables
++          # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
++          #            lib.a(lib.so.V) shared, rtl:no
++          # "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
++          #            lib.a           static archive
++          case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
++	    for ld_flag in $LDFLAGS; do
++	      case $ld_flag in
++	      *-brtl*)
++	        aix_use_runtimelinking=yes
++	        break
++	        ;;
++	      esac
++	    done
++	    if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
++	      # With aix-soname=svr4, we create the lib.so.V shared archives only,
++	      # so we don't have lib.a shared libs to link our executables.
++	      # We have to force runtime linking in this case.
++	      aix_use_runtimelinking=yes
++	      LDFLAGS="$LDFLAGS -Wl,-brtl"
++	    fi
++	    ;;
++          esac
++
++          exp_sym_flag='-bexport'
++          no_entry_flag='-bnoentry'
++        fi
++
++        # When large executables or shared objects are built, AIX ld can
++        # have problems creating the table of contents.  If linking a library
++        # or program results in "error TOC overflow" add -mminimal-toc to
++        # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
++        # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
++
++        archive_cmds_CXX=''
++        hardcode_direct_CXX=yes
++        hardcode_direct_absolute_CXX=yes
++        hardcode_libdir_separator_CXX=':'
++        link_all_deplibs_CXX=yes
++        file_list_spec_CXX='$wl-f,'
++        case $with_aix_soname,$aix_use_runtimelinking in
++        aix,*) ;;	# no import file
++        svr4,* | *,yes) # use import file
++          # The Import File defines what to hardcode.
++          hardcode_direct_CXX=no
++          hardcode_direct_absolute_CXX=no
++          ;;
++        esac
++
++        if test yes = "$GXX"; then
++          case $host_os in aix4.[012]|aix4.[012].*)
++          # We only want to do this on AIX 4.2 and lower, the check
++          # below for broken collect2 doesn't work under 4.3+
++	  collect2name=`$CC -print-prog-name=collect2`
++	  if test -f "$collect2name" &&
++	     strings "$collect2name" | $GREP resolve_lib_name >/dev/null
++	  then
++	    # We have reworked collect2
++	    :
++	  else
++	    # We have old collect2
++	    hardcode_direct_CXX=unsupported
++	    # It fails to find uninstalled libraries when the uninstalled
++	    # path is not listed in the libpath.  Setting hardcode_minus_L
++	    # to unsupported forces relinking
++	    hardcode_minus_L_CXX=yes
++	    hardcode_libdir_flag_spec_CXX='-L$libdir'
++	    hardcode_libdir_separator_CXX=
++	  fi
++          esac
++          shared_flag='-shared'
++	  if test yes = "$aix_use_runtimelinking"; then
++	    shared_flag=$shared_flag' $wl-G'
++	  fi
++	  # Need to ensure runtime linking is disabled for the traditional
++	  # shared library, or the linker may eventually find shared libraries
++	  # /with/ Import File - we do not want to mix them.
++	  shared_flag_aix='-shared'
++	  shared_flag_svr4='-shared $wl-G'
++        else
++          # not using gcc
++          if test ia64 = "$host_cpu"; then
++	  # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
++	  # chokes on -Wl,-G. The following line is correct:
++	  shared_flag='-G'
++          else
++	    if test yes = "$aix_use_runtimelinking"; then
++	      shared_flag='$wl-G'
++	    else
++	      shared_flag='$wl-bM:SRE'
++	    fi
++	    shared_flag_aix='$wl-bM:SRE'
++	    shared_flag_svr4='$wl-G'
++          fi
++        fi
++
++        export_dynamic_flag_spec_CXX='$wl-bexpall'
++        # It seems that -bexpall does not export symbols beginning with
++        # underscore (_), so it is better to generate a list of symbols to
++	# export.
++        always_export_symbols_CXX=yes
++	if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
++          # Warning - without using the other runtime loading flags (-brtl),
++          # -berok will link without error, but may produce a broken library.
++          # The "-G" linker flag allows undefined symbols.
++          no_undefined_flag_CXX='-bernotok'
++          # Determine the default libpath from the value encoded in an empty
++          # executable.
++          if test set = "${lt_cv_aix_libpath+set}"; then
++  aix_libpath=$lt_cv_aix_libpath
++else
++  if test ${lt_cv_aix_libpath__CXX+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_cxx_try_link "$LINENO"
++then :
++
++  lt_aix_libpath_sed='
++      /Import File Strings/,/^$/ {
++	  /^0/ {
++	      s/^0  *\([^ ]*\) *$/\1/
++	      p
++	  }
++      }'
++  lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++  # Check for a 64-bit object if we didn't find anything.
++  if test -z "$lt_cv_aix_libpath__CXX"; then
++    lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++  fi
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++  if test -z "$lt_cv_aix_libpath__CXX"; then
++    lt_cv_aix_libpath__CXX=/usr/lib:/lib
++  fi
++
++fi
++
++  aix_libpath=$lt_cv_aix_libpath__CXX
++fi
++
++          hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath"
++
++          archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
++        else
++          if test ia64 = "$host_cpu"; then
++	    hardcode_libdir_flag_spec_CXX='$wl-R $libdir:/usr/lib:/lib'
++	    allow_undefined_flag_CXX="-z nodefs"
++	    archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
++          else
++	    # Determine the default libpath from the value encoded in an
++	    # empty executable.
++	    if test set = "${lt_cv_aix_libpath+set}"; then
++  aix_libpath=$lt_cv_aix_libpath
++else
++  if test ${lt_cv_aix_libpath__CXX+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_cxx_try_link "$LINENO"
++then :
++
++  lt_aix_libpath_sed='
++      /Import File Strings/,/^$/ {
++	  /^0/ {
++	      s/^0  *\([^ ]*\) *$/\1/
++	      p
++	  }
++      }'
++  lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++  # Check for a 64-bit object if we didn't find anything.
++  if test -z "$lt_cv_aix_libpath__CXX"; then
++    lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++  fi
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++  if test -z "$lt_cv_aix_libpath__CXX"; then
++    lt_cv_aix_libpath__CXX=/usr/lib:/lib
++  fi
++
++fi
++
++  aix_libpath=$lt_cv_aix_libpath__CXX
++fi
++
++	    hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath"
++	    # Warning - without using the other run time loading flags,
++	    # -berok will link without error, but may produce a broken library.
++	    no_undefined_flag_CXX=' $wl-bernotok'
++	    allow_undefined_flag_CXX=' $wl-berok'
++	    if test yes = "$with_gnu_ld"; then
++	      # We only use this code for GNU lds that support --whole-archive.
++	      whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive'
++	    else
++	      # Exported symbols can be pulled into shared objects from archives
++	      whole_archive_flag_spec_CXX='$convenience'
++	    fi
++	    archive_cmds_need_lc_CXX=yes
++	    archive_expsym_cmds_CXX='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
++	    # -brtl affects multiple linker settings, -berok does not and is overridden later
++	    compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`'
++	    if test svr4 != "$with_aix_soname"; then
++	      # This is similar to how AIX traditionally builds its shared
++	      # libraries. Need -bnortl late, we may have -brtl in LDFLAGS.
++	      archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
++	    fi
++	    if test aix != "$with_aix_soname"; then
++	      archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
++	    else
++	      # used by -dlpreopen to get the symbols
++	      archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
++	    fi
++	    archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$RM -r $output_objdir/$realname.d'
++          fi
++        fi
++        ;;
++
++      beos*)
++	if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
++	  allow_undefined_flag_CXX=unsupported
++	  # Joseph Beckenbach  says some releases of gcc
++	  # support --undefined.  This deserves some investigation.  FIXME
++	  archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	else
++	  ld_shlibs_CXX=no
++	fi
++	;;
++
++      chorus*)
++        case $cc_basename in
++          *)
++	  # FIXME: insert proper C++ library support
++	  ld_shlibs_CXX=no
++	  ;;
++        esac
++        ;;
++
++      cygwin* | mingw* | pw32* | cegcc*)
++	case $GXX,$cc_basename in
++	,cl* | no,cl* | ,icl* | no,icl*)
++	  # Native MSVC or ICC
++	  # hardcode_libdir_flag_spec is actually meaningless, as there is
++	  # no search path for DLLs.
++	  hardcode_libdir_flag_spec_CXX=' '
++	  allow_undefined_flag_CXX=unsupported
++	  always_export_symbols_CXX=yes
++	  file_list_spec_CXX='@'
++	  # Tell ltmain to make .lib files, not .a files.
++	  libext=lib
++	  # Tell ltmain to make .dll files, not .so files.
++	  shrext_cmds=.dll
++	  # FIXME: Setting linknames here is a bad hack.
++	  archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
++	  archive_expsym_cmds_CXX='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
++              cp "$export_symbols" "$output_objdir/$soname.def";
++              echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
++            else
++              $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
++            fi~
++            $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
++            linknames='
++	  # The linker will not automatically build a static lib if we build a DLL.
++	  # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true'
++	  enable_shared_with_static_runtimes_CXX=yes
++	  # Don't use ranlib
++	  old_postinstall_cmds_CXX='chmod 644 $oldlib'
++	  postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~
++            lt_tool_outputfile="@TOOL_OUTPUT@"~
++            case $lt_outputfile in
++              *.exe|*.EXE) ;;
++              *)
++                lt_outputfile=$lt_outputfile.exe
++                lt_tool_outputfile=$lt_tool_outputfile.exe
++                ;;
++            esac~
++            func_to_tool_file "$lt_outputfile"~
++            if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
++              $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
++              $RM "$lt_outputfile.manifest";
++            fi'
++	  ;;
++	*)
++	  # g++
++	  # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless,
++	  # as there is no search path for DLLs.
++	  hardcode_libdir_flag_spec_CXX='-L$libdir'
++	  export_dynamic_flag_spec_CXX='$wl--export-all-symbols'
++	  allow_undefined_flag_CXX=unsupported
++	  always_export_symbols_CXX=no
++	  enable_shared_with_static_runtimes_CXX=yes
++
++	  if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
++	    archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++	    # If the export-symbols file already is a .def file, use it as
++	    # is; otherwise, prepend EXPORTS...
++	    archive_expsym_cmds_CXX='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
++              cp $export_symbols $output_objdir/$soname.def;
++            else
++              echo EXPORTS > $output_objdir/$soname.def;
++              cat $export_symbols >> $output_objdir/$soname.def;
++            fi~
++            $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++	  else
++	    ld_shlibs_CXX=no
++	  fi
++	  ;;
++	esac
++	;;
++      darwin* | rhapsody*)
++
++
++  archive_cmds_need_lc_CXX=no
++  hardcode_direct_CXX=no
++  hardcode_automatic_CXX=yes
++  hardcode_shlibpath_var_CXX=unsupported
++  if test yes = "$lt_cv_ld_force_load"; then
++    whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
++
++  else
++    whole_archive_flag_spec_CXX=''
++  fi
++  link_all_deplibs_CXX=yes
++  allow_undefined_flag_CXX=$_lt_dar_allow_undefined
++  case $cc_basename in
++     ifort*|nagfor*) _lt_dar_can_shared=yes ;;
++     *) _lt_dar_can_shared=$GCC ;;
++  esac
++  if test yes = "$_lt_dar_can_shared"; then
++    output_verbose_link_cmd=func_echo_all
++    archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil"
++    module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil"
++    archive_expsym_cmds_CXX="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil"
++    module_expsym_cmds_CXX="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil"
++       if test yes != "$lt_cv_apple_cc_single_mod"; then
++      archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil"
++      archive_expsym_cmds_CXX="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil"
++    fi
++
++  else
++  ld_shlibs_CXX=no
++  fi
++
++	;;
++
++      os2*)
++	hardcode_libdir_flag_spec_CXX='-L$libdir'
++	hardcode_minus_L_CXX=yes
++	allow_undefined_flag_CXX=unsupported
++	shrext_cmds=.dll
++	archive_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	  $ECHO EXPORTS >> $output_objdir/$libname.def~
++	  emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
++	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	  emximp -o $lib $output_objdir/$libname.def'
++	archive_expsym_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
++	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
++	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
++	  $ECHO EXPORTS >> $output_objdir/$libname.def~
++	  prefix_cmds="$SED"~
++	  if test EXPORTS = "`$SED 1q $export_symbols`"; then
++	    prefix_cmds="$prefix_cmds -e 1d";
++	  fi~
++	  prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
++	  cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
++	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
++	  emximp -o $lib $output_objdir/$libname.def'
++	old_archive_From_new_cmds_CXX='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
++	enable_shared_with_static_runtimes_CXX=yes
++	file_list_spec_CXX='@'
++	;;
++
++      dgux*)
++        case $cc_basename in
++          ec++*)
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++          ghcx*)
++	    # Green Hills C++ Compiler
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++          *)
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++        esac
++        ;;
++
++      freebsd2.*)
++        # C++ shared libraries reported to be fairly broken before
++	# switch to ELF
++        ld_shlibs_CXX=no
++        ;;
++
++      freebsd-elf*)
++        archive_cmds_need_lc_CXX=no
++        ;;
++
++      freebsd* | dragonfly* | midnightbsd*)
++        # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
++        # conventions
++        ld_shlibs_CXX=yes
++        ;;
++
++      haiku*)
++        archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++        link_all_deplibs_CXX=yes
++        ;;
++
++      hpux9*)
++        hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir'
++        hardcode_libdir_separator_CXX=:
++        export_dynamic_flag_spec_CXX='$wl-E'
++        hardcode_direct_CXX=yes
++        hardcode_minus_L_CXX=yes # Not in the search PATH,
++				             # but as the default
++				             # location of the library.
++
++        case $cc_basename in
++          CC*)
++            # FIXME: insert proper C++ library support
++            ld_shlibs_CXX=no
++            ;;
++          aCC*)
++            archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++            # Commands to make compiler produce verbose output that lists
++            # what "hidden" libraries, object files and flags are used when
++            # linking a shared library.
++            #
++            # There doesn't appear to be a way to prevent this compiler from
++            # explicitly linking system object files so we need to strip them
++            # from the output so that they don't get included in the library
++            # dependencies.
++            output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++            ;;
++          *)
++            if test yes = "$GXX"; then
++              archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++            else
++              # FIXME: insert proper C++ library support
++              ld_shlibs_CXX=no
++            fi
++            ;;
++        esac
++        ;;
++
++      hpux10*|hpux11*)
++        if test no = "$with_gnu_ld"; then
++	  hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir'
++	  hardcode_libdir_separator_CXX=:
++
++          case $host_cpu in
++            hppa*64*|ia64*)
++              ;;
++            *)
++	      export_dynamic_flag_spec_CXX='$wl-E'
++              ;;
++          esac
++        fi
++        case $host_cpu in
++          hppa*64*|ia64*)
++            hardcode_direct_CXX=no
++            hardcode_shlibpath_var_CXX=no
++            ;;
++          *)
++            hardcode_direct_CXX=yes
++            hardcode_direct_absolute_CXX=yes
++            hardcode_minus_L_CXX=yes # Not in the search PATH,
++					         # but as the default
++					         # location of the library.
++            ;;
++        esac
++
++        case $cc_basename in
++          CC*)
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++          aCC*)
++	    case $host_cpu in
++	      hppa*64*)
++	        archive_cmds_CXX='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	        ;;
++	      ia64*)
++	        archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	        ;;
++	      *)
++	        archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	        ;;
++	    esac
++	    # Commands to make compiler produce verbose output that lists
++	    # what "hidden" libraries, object files and flags are used when
++	    # linking a shared library.
++	    #
++	    # There doesn't appear to be a way to prevent this compiler from
++	    # explicitly linking system object files so we need to strip them
++	    # from the output so that they don't get included in the library
++	    # dependencies.
++	    output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++	    ;;
++          *)
++	    if test yes = "$GXX"; then
++	      if test no = "$with_gnu_ld"; then
++	        case $host_cpu in
++	          hppa*64*)
++	            archive_cmds_CXX='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	            ;;
++	          ia64*)
++	            archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	            ;;
++	          *)
++	            archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	            ;;
++	        esac
++	      fi
++	    else
++	      # FIXME: insert proper C++ library support
++	      ld_shlibs_CXX=no
++	    fi
++	    ;;
++        esac
++        ;;
++
++      interix[3-9]*)
++	hardcode_direct_CXX=no
++	hardcode_shlibpath_var_CXX=no
++	hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
++	export_dynamic_flag_spec_CXX='$wl-E'
++	# Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
++	# Instead, shared libraries are loaded at an image base (0x10000000 by
++	# default) and relocated if they conflict, which is a slow very memory
++	# consuming and fragmenting process.  To avoid this, we pick a random,
++	# 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
++	# time.  Moving up from 0x10000000 also allows more sbrk(2) space.
++	archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++	archive_expsym_cmds_CXX='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++	;;
++      irix5* | irix6*)
++        case $cc_basename in
++          CC*)
++	    # SGI C++
++	    archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++
++	    # Archives containing C++ object files must be created using
++	    # "CC -ar", where "CC" is the IRIX C++ compiler.  This is
++	    # necessary to make sure instantiated templates are included
++	    # in the archive.
++	    old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs'
++	    ;;
++          *)
++	    if test yes = "$GXX"; then
++	      if test no = "$with_gnu_ld"; then
++	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++	      else
++	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib'
++	      fi
++	    fi
++	    link_all_deplibs_CXX=yes
++	    ;;
++        esac
++        hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
++        hardcode_libdir_separator_CXX=:
++        inherit_rpath_CXX=yes
++        ;;
++
++      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++        case $cc_basename in
++          KCC*)
++	    # Kuck and Associates, Inc. (KAI) C++ Compiler
++
++	    # KCC will only create a shared library if the output file
++	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
++	    # to its proper name (with version) after linking.
++	    archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
++	    archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib'
++	    # Commands to make compiler produce verbose output that lists
++	    # what "hidden" libraries, object files and flags are used when
++	    # linking a shared library.
++	    #
++	    # There doesn't appear to be a way to prevent this compiler from
++	    # explicitly linking system object files so we need to strip them
++	    # from the output so that they don't get included in the library
++	    # dependencies.
++	    output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++
++	    hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
++	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
++
++	    # Archives containing C++ object files must be created using
++	    # "CC -Bstatic", where "CC" is the KAI C++ compiler.
++	    old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs'
++	    ;;
++	  icpc* | ecpc* )
++	    # Intel C++
++	    with_gnu_ld=yes
++	    # version 8.0 and above of icpc choke on multiply defined symbols
++	    # if we add $predep_objects and $postdep_objects, however 7.1 and
++	    # earlier do not add the objects themselves.
++	    case `$CC -V 2>&1` in
++	      *"Version 7."*)
++	        archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
++		archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++		;;
++	      *)  # Version 8.0 or newer
++	        tmp_idyn=
++	        case $host_cpu in
++		  ia64*) tmp_idyn=' -i_dynamic';;
++		esac
++	        archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++		archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++		;;
++	    esac
++	    archive_cmds_need_lc_CXX=no
++	    hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
++	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
++	    whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive'
++	    ;;
++          pgCC* | pgcpp*)
++            # Portland Group C++ compiler
++	    case `$CC -V` in
++	    *pgCC\ [1-5].* | *pgcpp\ [1-5].*)
++	      prelink_cmds_CXX='tpldir=Template.dir~
++               rm -rf $tpldir~
++               $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
++               compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
++	      old_archive_cmds_CXX='tpldir=Template.dir~
++                rm -rf $tpldir~
++                $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
++                $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
++                $RANLIB $oldlib'
++	      archive_cmds_CXX='tpldir=Template.dir~
++                rm -rf $tpldir~
++                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
++                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
++	      archive_expsym_cmds_CXX='tpldir=Template.dir~
++                rm -rf $tpldir~
++                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
++                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	      ;;
++	    *) # Version 6 and above use weak symbols
++	      archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
++	      archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	      ;;
++	    esac
++
++	    hardcode_libdir_flag_spec_CXX='$wl--rpath $wl$libdir'
++	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
++	    whole_archive_flag_spec_CXX='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++            ;;
++	  cxx*)
++	    # Compaq C++
++	    archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
++	    archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname  -o $lib $wl-retain-symbols-file $wl$export_symbols'
++
++	    runpath_var=LD_RUN_PATH
++	    hardcode_libdir_flag_spec_CXX='-rpath $libdir'
++	    hardcode_libdir_separator_CXX=:
++
++	    # Commands to make compiler produce verbose output that lists
++	    # what "hidden" libraries, object files and flags are used when
++	    # linking a shared library.
++	    #
++	    # There doesn't appear to be a way to prevent this compiler from
++	    # explicitly linking system object files so we need to strip them
++	    # from the output so that they don't get included in the library
++	    # dependencies.
++	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
++	    ;;
++	  xl* | mpixl* | bgxl*)
++	    # IBM XL 8.0 on PPC, with GNU ld
++	    hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
++	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
++	    archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	    if test yes = "$supports_anon_versioning"; then
++	      archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~
++                cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
++                echo "local: *; };" >> $output_objdir/$libname.ver~
++                $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
++	    fi
++	    ;;
++	  *)
++	    case `$CC -V 2>&1 | $SED 5q` in
++	    *Sun\ C*)
++	      # Sun C++ 5.9
++	      no_undefined_flag_CXX=' -zdefs'
++	      archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	      archive_expsym_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols'
++	      hardcode_libdir_flag_spec_CXX='-R$libdir'
++	      whole_archive_flag_spec_CXX='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	      compiler_needs_object_CXX=yes
++
++	      # Not sure whether something based on
++	      # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1
++	      # would be better.
++	      output_verbose_link_cmd='func_echo_all'
++
++	      # Archives containing C++ object files must be created using
++	      # "CC -xar", where "CC" is the Sun C++ compiler.  This is
++	      # necessary to make sure instantiated templates are included
++	      # in the archive.
++	      old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs'
++	      ;;
++	    esac
++	    ;;
++	esac
++	;;
++
++      lynxos*)
++        # FIXME: insert proper C++ library support
++	ld_shlibs_CXX=no
++	;;
++
++      m88k*)
++        # FIXME: insert proper C++ library support
++        ld_shlibs_CXX=no
++	;;
++
++      mvs*)
++        case $cc_basename in
++          cxx*)
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++	  *)
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++	esac
++	;;
++
++      netbsd*)
++        if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
++	  archive_cmds_CXX='$LD -Bshareable  -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
++	  wlarc=
++	  hardcode_libdir_flag_spec_CXX='-R$libdir'
++	  hardcode_direct_CXX=yes
++	  hardcode_shlibpath_var_CXX=no
++	fi
++	# Workaround some broken pre-1.5 toolchains
++	output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"'
++	;;
++
++      *nto* | *qnx*)
++        ld_shlibs_CXX=yes
++	;;
++
++      openbsd* | bitrig*)
++	if test -f /usr/libexec/ld.so; then
++	  hardcode_direct_CXX=yes
++	  hardcode_shlibpath_var_CXX=no
++	  hardcode_direct_absolute_CXX=yes
++	  archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
++	  hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
++	  if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then
++	    archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib'
++	    export_dynamic_flag_spec_CXX='$wl-E'
++	    whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
++	  fi
++	  output_verbose_link_cmd=func_echo_all
++	else
++	  ld_shlibs_CXX=no
++	fi
++	;;
++
++      osf3* | osf4* | osf5*)
++        case $cc_basename in
++          KCC*)
++	    # Kuck and Associates, Inc. (KAI) C++ Compiler
++
++	    # KCC will only create a shared library if the output file
++	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
++	    # to its proper name (with version) after linking.
++	    archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
++
++	    hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
++	    hardcode_libdir_separator_CXX=:
++
++	    # Archives containing C++ object files must be created using
++	    # the KAI C++ compiler.
++	    case $host in
++	      osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;;
++	      *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;;
++	    esac
++	    ;;
++          RCC*)
++	    # Rational C++ 2.4.1
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++          cxx*)
++	    case $host in
++	      osf3*)
++	        allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*'
++	        archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	        hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
++		;;
++	      *)
++	        allow_undefined_flag_CXX=' -expect_unresolved \*'
++	        archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	        archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
++                  echo "-hidden">> $lib.exp~
++                  $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp  `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~
++                  $RM $lib.exp'
++	        hardcode_libdir_flag_spec_CXX='-rpath $libdir'
++		;;
++	    esac
++
++	    hardcode_libdir_separator_CXX=:
++
++	    # Commands to make compiler produce verbose output that lists
++	    # what "hidden" libraries, object files and flags are used when
++	    # linking a shared library.
++	    #
++	    # There doesn't appear to be a way to prevent this compiler from
++	    # explicitly linking system object files so we need to strip them
++	    # from the output so that they don't get included in the library
++	    # dependencies.
++	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++	    ;;
++	  *)
++	    if test yes,no = "$GXX,$with_gnu_ld"; then
++	      allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*'
++	      case $host in
++	        osf3*)
++	          archive_cmds_CXX='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++		  ;;
++	        *)
++	          archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++		  ;;
++	      esac
++
++	      hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
++	      hardcode_libdir_separator_CXX=:
++
++	      # Commands to make compiler produce verbose output that lists
++	      # what "hidden" libraries, object files and flags are used when
++	      # linking a shared library.
++	      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
++
++	    else
++	      # FIXME: insert proper C++ library support
++	      ld_shlibs_CXX=no
++	    fi
++	    ;;
++        esac
++        ;;
++
++      psos*)
++        # FIXME: insert proper C++ library support
++        ld_shlibs_CXX=no
++        ;;
++
++      sunos4*)
++        case $cc_basename in
++          CC*)
++	    # Sun C++ 4.x
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++          lcc*)
++	    # Lucid
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++          *)
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++        esac
++        ;;
++
++      solaris*)
++        case $cc_basename in
++          CC* | sunCC*)
++	    # Sun C++ 4.2, 5.x and Centerline C++
++            archive_cmds_need_lc_CXX=yes
++	    no_undefined_flag_CXX=' -zdefs'
++	    archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	    archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++              $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
++
++	    hardcode_libdir_flag_spec_CXX='-R$libdir'
++	    hardcode_shlibpath_var_CXX=no
++	    case $host_os in
++	      solaris2.[0-5] | solaris2.[0-5].*) ;;
++	      *)
++		# The compiler driver will combine and reorder linker options,
++		# but understands '-z linker_flag'.
++	        # Supported since Solaris 2.6 (maybe 2.5.1?)
++		whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract'
++	        ;;
++	    esac
++	    link_all_deplibs_CXX=yes
++
++	    output_verbose_link_cmd='func_echo_all'
++
++	    # Archives containing C++ object files must be created using
++	    # "CC -xar", where "CC" is the Sun C++ compiler.  This is
++	    # necessary to make sure instantiated templates are included
++	    # in the archive.
++	    old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs'
++	    ;;
++          gcx*)
++	    # Green Hills C++ Compiler
++	    archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
++
++	    # The C++ compiler must be used to create the archive.
++	    old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
++	    ;;
++          *)
++	    # GNU C++ compiler with Solaris linker
++	    if test yes,no = "$GXX,$with_gnu_ld"; then
++	      no_undefined_flag_CXX=' $wl-z ${wl}defs'
++	      if $CC --version | $GREP -v '^2\.7' > /dev/null; then
++	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
++	        archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++                  $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
++
++	        # Commands to make compiler produce verbose output that lists
++	        # what "hidden" libraries, object files and flags are used when
++	        # linking a shared library.
++	        output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
++	      else
++	        # g++ 2.7 appears to require '-G' NOT '-shared' on this
++	        # platform.
++	        archive_cmds_CXX='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
++	        archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
++                  $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
++
++	        # Commands to make compiler produce verbose output that lists
++	        # what "hidden" libraries, object files and flags are used when
++	        # linking a shared library.
++	        output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
++	      fi
++
++	      hardcode_libdir_flag_spec_CXX='$wl-R $wl$libdir'
++	      case $host_os in
++		solaris2.[0-5] | solaris2.[0-5].*) ;;
++		*)
++		  whole_archive_flag_spec_CXX='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
++		  ;;
++	      esac
++	    fi
++	    ;;
++        esac
++        ;;
++
++    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
++      no_undefined_flag_CXX='$wl-z,text'
++      archive_cmds_need_lc_CXX=no
++      hardcode_shlibpath_var_CXX=no
++      runpath_var='LD_RUN_PATH'
++
++      case $cc_basename in
++        CC*)
++	  archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++	*)
++	  archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  ;;
++      esac
++      ;;
++
++      sysv5* | sco3.2v5* | sco5v6*)
++	# Note: We CANNOT use -z defs as we might desire, because we do not
++	# link with -lc, and that would cause any symbols used from libc to
++	# always be unresolved, which means just about no library would
++	# ever link correctly.  If we're not using GNU ld we use -z text
++	# though, which does catch some bad symbols but isn't as heavy-handed
++	# as -z defs.
++	no_undefined_flag_CXX='$wl-z,text'
++	allow_undefined_flag_CXX='$wl-z,nodefs'
++	archive_cmds_need_lc_CXX=no
++	hardcode_shlibpath_var_CXX=no
++	hardcode_libdir_flag_spec_CXX='$wl-R,$libdir'
++	hardcode_libdir_separator_CXX=':'
++	link_all_deplibs_CXX=yes
++	export_dynamic_flag_spec_CXX='$wl-Bexport'
++	runpath_var='LD_RUN_PATH'
++
++	case $cc_basename in
++          CC*)
++	    archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~
++              '"$old_archive_cmds_CXX"
++	    reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~
++              '"$reload_cmds_CXX"
++	    ;;
++	  *)
++	    archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    ;;
++	esac
++      ;;
++
++      tandem*)
++        case $cc_basename in
++          NCC*)
++	    # NonStop-UX NCC 3.20
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++          *)
++	    # FIXME: insert proper C++ library support
++	    ld_shlibs_CXX=no
++	    ;;
++        esac
++        ;;
++
++      vxworks*)
++        # FIXME: insert proper C++ library support
++        ld_shlibs_CXX=no
++        ;;
++
++      *)
++        # FIXME: insert proper C++ library support
++        ld_shlibs_CXX=no
++        ;;
++    esac
++
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
++printf "%s\n" "$ld_shlibs_CXX" >&6; }
++    test no = "$ld_shlibs_CXX" && can_build_shared=no
++
++    GCC_CXX=$GXX
++    LD_CXX=$LD
++
++    ## CAVEAT EMPTOR:
++    ## There is no encapsulation within the following macros, do not change
++    ## the running order or otherwise move them around unless you know exactly
++    ## what you are doing...
++    # Dependencies to place before and after the object being linked:
++predep_objects_CXX=
++postdep_objects_CXX=
++predeps_CXX=
++postdeps_CXX=
++compiler_lib_search_path_CXX=
++
++cat > conftest.$ac_ext <<_LT_EOF
++class Foo
++{
++public:
++  Foo (void) { a = 0; }
++private:
++  int a;
++};
++_LT_EOF
++
++
++_lt_libdeps_save_CFLAGS=$CFLAGS
++case "$CC $CFLAGS " in #(
++*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
++*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
++*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
++esac
++
++if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
++  (eval $ac_compile) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; then
++  # Parse the compiler output and extract the necessary
++  # objects, libraries and library flags.
++
++  # Sentinel used to keep track of whether or not we are before
++  # the conftest object file.
++  pre_test_object_deps_done=no
++
++  for p in `eval "$output_verbose_link_cmd"`; do
++    case $prev$p in
++
++    -L* | -R* | -l*)
++       # Some compilers place space between "-{L,R}" and the path.
++       # Remove the space.
++       if test x-L = "$p" ||
++          test x-R = "$p"; then
++	 prev=$p
++	 continue
++       fi
++
++       # Expand the sysroot to ease extracting the directories later.
++       if test -z "$prev"; then
++         case $p in
++         -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
++         -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
++         -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
++         esac
++       fi
++       case $p in
++       =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
++       esac
++       if test no = "$pre_test_object_deps_done"; then
++	 case $prev in
++	 -L | -R)
++	   # Internal compiler library paths should come after those
++	   # provided the user.  The postdeps already come after the
++	   # user supplied libs so there is no need to process them.
++	   if test -z "$compiler_lib_search_path_CXX"; then
++	     compiler_lib_search_path_CXX=$prev$p
++	   else
++	     compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} $prev$p"
++	   fi
++	   ;;
++	 # The "-l" case would never come before the object being
++	 # linked, so don't bother handling this case.
++	 esac
++       else
++	 if test -z "$postdeps_CXX"; then
++	   postdeps_CXX=$prev$p
++	 else
++	   postdeps_CXX="${postdeps_CXX} $prev$p"
++	 fi
++       fi
++       prev=
++       ;;
++
++    *.lto.$objext) ;; # Ignore GCC LTO objects
++    *.$objext)
++       # This assumes that the test object file only shows up
++       # once in the compiler output.
++       if test "$p" = "conftest.$objext"; then
++	 pre_test_object_deps_done=yes
++	 continue
++       fi
++
++       if test no = "$pre_test_object_deps_done"; then
++	 if test -z "$predep_objects_CXX"; then
++	   predep_objects_CXX=$p
++	 else
++	   predep_objects_CXX="$predep_objects_CXX $p"
++	 fi
++       else
++	 if test -z "$postdep_objects_CXX"; then
++	   postdep_objects_CXX=$p
++	 else
++	   postdep_objects_CXX="$postdep_objects_CXX $p"
++	 fi
++       fi
++       ;;
++
++    *) ;; # Ignore the rest.
++
++    esac
++  done
++
++  # Clean up.
++  rm -f a.out a.exe
++else
++  echo "libtool.m4: error: problem compiling CXX test program"
++fi
++
++$RM -f confest.$objext
++CFLAGS=$_lt_libdeps_save_CFLAGS
++
++# PORTME: override above test on systems where it is broken
++case $host_os in
++interix[3-9]*)
++  # Interix 3.5 installs completely hosed .la files for C++, so rather than
++  # hack all around it, let's just trust "g++" to DTRT.
++  predep_objects_CXX=
++  postdep_objects_CXX=
++  postdeps_CXX=
++  ;;
++esac
++
++
++case " $postdeps_CXX " in
++*" -lc "*) archive_cmds_need_lc_CXX=no ;;
++esac
++ compiler_lib_search_dirs_CXX=
++if test -n "${compiler_lib_search_path_CXX}"; then
++ compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | $SED -e 's! -L! !g' -e 's!^ !!'`
++fi
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++    lt_prog_compiler_wl_CXX=
++lt_prog_compiler_pic_CXX=
++lt_prog_compiler_static_CXX=
++
++
++  # C++ specific cases for pic, static, wl, etc.
++  if test yes = "$GXX"; then
++    lt_prog_compiler_wl_CXX='-Wl,'
++    lt_prog_compiler_static_CXX='-static'
++
++    case $host_os in
++    aix*)
++      # All AIX code is PIC.
++      if test ia64 = "$host_cpu"; then
++	# AIX 5 now supports IA64 processor
++	lt_prog_compiler_static_CXX='-Bstatic'
++      fi
++      lt_prog_compiler_pic_CXX='-fPIC'
++      ;;
++
++    amigaos*)
++      case $host_cpu in
++      powerpc)
++            # see comment about AmigaOS4 .so support
++            lt_prog_compiler_pic_CXX='-fPIC'
++        ;;
++      m68k)
++            # FIXME: we need at least 68020 code to build shared libraries, but
++            # adding the '-m68020' flag to GCC prevents building anything better,
++            # like '-m68040'.
++            lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4'
++        ;;
++      esac
++      ;;
++
++    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
++      # PIC is the default for these OSes.
++      ;;
++    mingw* | cygwin* | os2* | pw32* | cegcc*)
++      # This hack is so that the source file can tell whether it is being
++      # built for inclusion in a dll (and should export symbols for example).
++      # Although the cygwin gcc ignores -fPIC, still need this for old-style
++      # (--disable-auto-import) libraries
++      lt_prog_compiler_pic_CXX='-DDLL_EXPORT'
++      case $host_os in
++      os2*)
++	lt_prog_compiler_static_CXX='$wl-static'
++	;;
++      esac
++      ;;
++    darwin* | rhapsody*)
++      # PIC is the default on this platform
++      # Common symbols not allowed in MH_DYLIB files
++      lt_prog_compiler_pic_CXX='-fno-common'
++      ;;
++    *djgpp*)
++      # DJGPP does not support shared libraries at all
++      lt_prog_compiler_pic_CXX=
++      ;;
++    haiku*)
++      # PIC is the default for Haiku.
++      # The "-static" flag exists, but is broken.
++      lt_prog_compiler_static_CXX=
++      ;;
++    interix[3-9]*)
++      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
++      # Instead, we relocate shared libraries at runtime.
++      ;;
++    sysv4*MP*)
++      if test -d /usr/nec; then
++	lt_prog_compiler_pic_CXX=-Kconform_pic
++      fi
++      ;;
++    hpux*)
++      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
++      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
++      # sets the default TLS model and affects inlining.
++      case $host_cpu in
++      hppa*64*)
++	;;
++      *)
++	lt_prog_compiler_pic_CXX='-fPIC'
++	;;
++      esac
++      ;;
++    *qnx* | *nto*)
++      # QNX uses GNU C++, but need to define -shared option too, otherwise
++      # it will coredump.
++      lt_prog_compiler_pic_CXX='-fPIC -shared'
++      ;;
++    *)
++      lt_prog_compiler_pic_CXX='-fPIC'
++      ;;
++    esac
++  else
++    case $host_os in
++      aix[4-9]*)
++	# All AIX code is PIC.
++	if test ia64 = "$host_cpu"; then
++	  # AIX 5 now supports IA64 processor
++	  lt_prog_compiler_static_CXX='-Bstatic'
++	else
++	  lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp'
++	fi
++	;;
++      chorus*)
++	case $cc_basename in
++	cxch68*)
++	  # Green Hills C++ Compiler
++	  # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
++	  ;;
++	esac
++	;;
++      mingw* | cygwin* | os2* | pw32* | cegcc*)
++	# This hack is so that the source file can tell whether it is being
++	# built for inclusion in a dll (and should export symbols for example).
++	lt_prog_compiler_pic_CXX='-DDLL_EXPORT'
++	;;
++      dgux*)
++	case $cc_basename in
++	  ec++*)
++	    lt_prog_compiler_pic_CXX='-KPIC'
++	    ;;
++	  ghcx*)
++	    # Green Hills C++ Compiler
++	    lt_prog_compiler_pic_CXX='-pic'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      freebsd* | dragonfly* | midnightbsd*)
++	# FreeBSD uses GNU C++
++	;;
++      hpux9* | hpux10* | hpux11*)
++	case $cc_basename in
++	  CC*)
++	    lt_prog_compiler_wl_CXX='-Wl,'
++	    lt_prog_compiler_static_CXX='$wl-a ${wl}archive'
++	    if test ia64 != "$host_cpu"; then
++	      lt_prog_compiler_pic_CXX='+Z'
++	    fi
++	    ;;
++	  aCC*)
++	    lt_prog_compiler_wl_CXX='-Wl,'
++	    lt_prog_compiler_static_CXX='$wl-a ${wl}archive'
++	    case $host_cpu in
++	    hppa*64*|ia64*)
++	      # +Z the default
++	      ;;
++	    *)
++	      lt_prog_compiler_pic_CXX='+Z'
++	      ;;
++	    esac
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      interix*)
++	# This is c89, which is MS Visual C++ (no shared libs)
++	# Anyone wants to do a port?
++	;;
++      irix5* | irix6* | nonstopux*)
++	case $cc_basename in
++	  CC*)
++	    lt_prog_compiler_wl_CXX='-Wl,'
++	    lt_prog_compiler_static_CXX='-non_shared'
++	    # CC pic flag -KPIC is the default.
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++	case $cc_basename in
++	  KCC*)
++	    # KAI C++ Compiler
++	    lt_prog_compiler_wl_CXX='--backend -Wl,'
++	    lt_prog_compiler_pic_CXX='-fPIC'
++	    ;;
++	  ecpc* )
++	    # old Intel C++ for x86_64, which still supported -KPIC.
++	    lt_prog_compiler_wl_CXX='-Wl,'
++	    lt_prog_compiler_pic_CXX='-KPIC'
++	    lt_prog_compiler_static_CXX='-static'
++	    ;;
++	  icpc* )
++	    # Intel C++, used to be incompatible with GCC.
++	    # ICC 10 doesn't accept -KPIC any more.
++	    lt_prog_compiler_wl_CXX='-Wl,'
++	    lt_prog_compiler_pic_CXX='-fPIC'
++	    lt_prog_compiler_static_CXX='-static'
++	    ;;
++	  pgCC* | pgcpp*)
++	    # Portland Group C++ compiler
++	    lt_prog_compiler_wl_CXX='-Wl,'
++	    lt_prog_compiler_pic_CXX='-fpic'
++	    lt_prog_compiler_static_CXX='-Bstatic'
++	    ;;
++	  cxx*)
++	    # Compaq C++
++	    # Make sure the PIC flag is empty.  It appears that all Alpha
++	    # Linux and Compaq Tru64 Unix objects are PIC.
++	    lt_prog_compiler_pic_CXX=
++	    lt_prog_compiler_static_CXX='-non_shared'
++	    ;;
++	  xlc* | xlC* | bgxl[cC]* | mpixl[cC]*)
++	    # IBM XL 8.0, 9.0 on PPC and BlueGene
++	    lt_prog_compiler_wl_CXX='-Wl,'
++	    lt_prog_compiler_pic_CXX='-qpic'
++	    lt_prog_compiler_static_CXX='-qstaticlink'
++	    ;;
++	  *)
++	    case `$CC -V 2>&1 | $SED 5q` in
++	    *Sun\ C*)
++	      # Sun C++ 5.9
++	      lt_prog_compiler_pic_CXX='-KPIC'
++	      lt_prog_compiler_static_CXX='-Bstatic'
++	      lt_prog_compiler_wl_CXX='-Qoption ld '
++	      ;;
++	    esac
++	    ;;
++	esac
++	;;
++      lynxos*)
++	;;
++      m88k*)
++	;;
++      mvs*)
++	case $cc_basename in
++	  cxx*)
++	    lt_prog_compiler_pic_CXX='-W c,exportall'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      netbsd*)
++	;;
++      *qnx* | *nto*)
++        # QNX uses GNU C++, but need to define -shared option too, otherwise
++        # it will coredump.
++        lt_prog_compiler_pic_CXX='-fPIC -shared'
++        ;;
++      osf3* | osf4* | osf5*)
++	case $cc_basename in
++	  KCC*)
++	    lt_prog_compiler_wl_CXX='--backend -Wl,'
++	    ;;
++	  RCC*)
++	    # Rational C++ 2.4.1
++	    lt_prog_compiler_pic_CXX='-pic'
++	    ;;
++	  cxx*)
++	    # Digital/Compaq C++
++	    lt_prog_compiler_wl_CXX='-Wl,'
++	    # Make sure the PIC flag is empty.  It appears that all Alpha
++	    # Linux and Compaq Tru64 Unix objects are PIC.
++	    lt_prog_compiler_pic_CXX=
++	    lt_prog_compiler_static_CXX='-non_shared'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      psos*)
++	;;
++      solaris*)
++	case $cc_basename in
++	  CC* | sunCC*)
++	    # Sun C++ 4.2, 5.x and Centerline C++
++	    lt_prog_compiler_pic_CXX='-KPIC'
++	    lt_prog_compiler_static_CXX='-Bstatic'
++	    lt_prog_compiler_wl_CXX='-Qoption ld '
++	    ;;
++	  gcx*)
++	    # Green Hills C++ Compiler
++	    lt_prog_compiler_pic_CXX='-PIC'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      sunos4*)
++	case $cc_basename in
++	  CC*)
++	    # Sun C++ 4.x
++	    lt_prog_compiler_pic_CXX='-pic'
++	    lt_prog_compiler_static_CXX='-Bstatic'
++	    ;;
++	  lcc*)
++	    # Lucid
++	    lt_prog_compiler_pic_CXX='-pic'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
++	case $cc_basename in
++	  CC*)
++	    lt_prog_compiler_wl_CXX='-Wl,'
++	    lt_prog_compiler_pic_CXX='-KPIC'
++	    lt_prog_compiler_static_CXX='-Bstatic'
++	    ;;
++	esac
++	;;
++      tandem*)
++	case $cc_basename in
++	  NCC*)
++	    # NonStop-UX NCC 3.20
++	    lt_prog_compiler_pic_CXX='-KPIC'
++	    ;;
++	  *)
++	    ;;
++	esac
++	;;
++      vxworks*)
++	;;
++      *)
++	lt_prog_compiler_can_build_shared_CXX=no
++	;;
++    esac
++  fi
++
++case $host_os in
++  # For platforms that do not support PIC, -DPIC is meaningless:
++  *djgpp*)
++    lt_prog_compiler_pic_CXX=
++    ;;
++  *)
++    lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC"
++    ;;
++esac
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
++printf %s "checking for $compiler option to produce PIC... " >&6; }
++if test ${lt_cv_prog_compiler_pic_CXX+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5
++printf "%s\n" "$lt_cv_prog_compiler_pic_CXX" >&6; }
++lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX
++
++#
++# Check to make sure the PIC flag actually works.
++#
++if test -n "$lt_prog_compiler_pic_CXX"; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5
++printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; }
++if test ${lt_cv_prog_compiler_pic_works_CXX+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler_pic_works_CXX=no
++   ac_outfile=conftest.$ac_objext
++   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++   lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC"  ## exclude from sc_useless_quotes_in_assignment
++   # Insert the option either (1) after the last *FLAGS variable, or
++   # (2) before a word containing "conftest.", or (3) at the end.
++   # Note that $ac_compile itself does not contain backslashes and begins
++   # with a dollar sign (not a hyphen), so the echo should work correctly.
++   # The option is referenced via a variable to avoid confusing sed.
++   lt_compile=`echo "$ac_compile" | $SED \
++   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
++   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
++   -e 's:$: $lt_compiler_flag:'`
++   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
++   (eval "$lt_compile" 2>conftest.err)
++   ac_status=$?
++   cat conftest.err >&5
++   echo "$as_me:$LINENO: \$? = $ac_status" >&5
++   if (exit $ac_status) && test -s "$ac_outfile"; then
++     # The compiler can only warn and ignore the option if not recognized
++     # So say no if there are warnings other than the usual output.
++     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
++     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
++     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
++       lt_cv_prog_compiler_pic_works_CXX=yes
++     fi
++   fi
++   $RM conftest*
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5
++printf "%s\n" "$lt_cv_prog_compiler_pic_works_CXX" >&6; }
++
++if test yes = "$lt_cv_prog_compiler_pic_works_CXX"; then
++    case $lt_prog_compiler_pic_CXX in
++     "" | " "*) ;;
++     *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;;
++     esac
++else
++    lt_prog_compiler_pic_CXX=
++     lt_prog_compiler_can_build_shared_CXX=no
++fi
++
++fi
++
++
++
++
++
++#
++# Check to make sure the static flag actually works.
++#
++wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\"
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
++printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
++if test ${lt_cv_prog_compiler_static_works_CXX+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler_static_works_CXX=no
++   save_LDFLAGS=$LDFLAGS
++   LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
++   echo "$lt_simple_link_test_code" > conftest.$ac_ext
++   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
++     # The linker can only warn and ignore the option if not recognized
++     # So say no if there are warnings
++     if test -s conftest.err; then
++       # Append any errors to the config.log.
++       cat conftest.err 1>&5
++       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
++       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
++       if diff conftest.exp conftest.er2 >/dev/null; then
++         lt_cv_prog_compiler_static_works_CXX=yes
++       fi
++     else
++       lt_cv_prog_compiler_static_works_CXX=yes
++     fi
++   fi
++   $RM -r conftest*
++   LDFLAGS=$save_LDFLAGS
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5
++printf "%s\n" "$lt_cv_prog_compiler_static_works_CXX" >&6; }
++
++if test yes = "$lt_cv_prog_compiler_static_works_CXX"; then
++    :
++else
++    lt_prog_compiler_static_CXX=
++fi
++
++
++
++
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
++printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
++if test ${lt_cv_prog_compiler_c_o_CXX+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler_c_o_CXX=no
++   $RM -r conftest 2>/dev/null
++   mkdir conftest
++   cd conftest
++   mkdir out
++   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++
++   lt_compiler_flag="-o out/conftest2.$ac_objext"
++   # Insert the option either (1) after the last *FLAGS variable, or
++   # (2) before a word containing "conftest.", or (3) at the end.
++   # Note that $ac_compile itself does not contain backslashes and begins
++   # with a dollar sign (not a hyphen), so the echo should work correctly.
++   lt_compile=`echo "$ac_compile" | $SED \
++   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
++   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
++   -e 's:$: $lt_compiler_flag:'`
++   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
++   (eval "$lt_compile" 2>out/conftest.err)
++   ac_status=$?
++   cat out/conftest.err >&5
++   echo "$as_me:$LINENO: \$? = $ac_status" >&5
++   if (exit $ac_status) && test -s out/conftest2.$ac_objext
++   then
++     # The compiler can only warn and ignore the option if not recognized
++     # So say no if there are warnings
++     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
++     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
++     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
++       lt_cv_prog_compiler_c_o_CXX=yes
++     fi
++   fi
++   chmod u+w . 2>&5
++   $RM conftest*
++   # SGI C++ compiler will create directory out/ii_files/ for
++   # template instantiation
++   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
++   $RM out/* && rmdir out
++   cd ..
++   $RM -r conftest
++   $RM conftest*
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
++printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; }
++
++
++
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
++printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
++if test ${lt_cv_prog_compiler_c_o_CXX+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_prog_compiler_c_o_CXX=no
++   $RM -r conftest 2>/dev/null
++   mkdir conftest
++   cd conftest
++   mkdir out
++   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++
++   lt_compiler_flag="-o out/conftest2.$ac_objext"
++   # Insert the option either (1) after the last *FLAGS variable, or
++   # (2) before a word containing "conftest.", or (3) at the end.
++   # Note that $ac_compile itself does not contain backslashes and begins
++   # with a dollar sign (not a hyphen), so the echo should work correctly.
++   lt_compile=`echo "$ac_compile" | $SED \
++   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
++   -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \
++   -e 's:$: $lt_compiler_flag:'`
++   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5)
++   (eval "$lt_compile" 2>out/conftest.err)
++   ac_status=$?
++   cat out/conftest.err >&5
++   echo "$as_me:$LINENO: \$? = $ac_status" >&5
++   if (exit $ac_status) && test -s out/conftest2.$ac_objext
++   then
++     # The compiler can only warn and ignore the option if not recognized
++     # So say no if there are warnings
++     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
++     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
++     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
++       lt_cv_prog_compiler_c_o_CXX=yes
++     fi
++   fi
++   chmod u+w . 2>&5
++   $RM conftest*
++   # SGI C++ compiler will create directory out/ii_files/ for
++   # template instantiation
++   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
++   $RM out/* && rmdir out
++   cd ..
++   $RM -r conftest
++   $RM conftest*
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
++printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; }
++
++
++
++
++hard_links=nottested
++if test no = "$lt_cv_prog_compiler_c_o_CXX" && test no != "$need_locks"; then
++  # do not overwrite the value of need_locks provided by the user
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
++printf %s "checking if we can lock with hard links... " >&6; }
++  hard_links=yes
++  $RM conftest*
++  ln conftest.a conftest.b 2>/dev/null && hard_links=no
++  touch conftest.a
++  ln conftest.a conftest.b 2>&5 || hard_links=no
++  ln conftest.a conftest.b 2>/dev/null && hard_links=no
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
++printf "%s\n" "$hard_links" >&6; }
++  if test no = "$hard_links"; then
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5
++printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;}
++    need_locks=warn
++  fi
++else
++  need_locks=no
++fi
++
++
++
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
++printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
++
++  export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
++  exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
++  case $host_os in
++  aix[4-9]*)
++    # If we're using GNU nm, then we don't want the "-C" option.
++    # -C means demangle to GNU nm, but means don't demangle to AIX nm.
++    # Without the "-l" option, or with the "-B" option, AIX nm treats
++    # weak defined symbols like other global defined symbols, whereas
++    # GNU nm marks them as "W".
++    # While the 'weak' keyword is ignored in the Export File, we need
++    # it in the Import File for the 'aix-soname' feature, so we have
++    # to replace the "-B" option with "-P" for AIX nm.
++    if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
++      export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
++    else
++      export_symbols_cmds_CXX='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
++    fi
++    ;;
++  pw32*)
++    export_symbols_cmds_CXX=$ltdll_cmds
++    ;;
++  cygwin* | mingw* | cegcc*)
++    case $cc_basename in
++    cl* | icl*)
++      exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
++      ;;
++    *)
++      export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
++      exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
++      ;;
++    esac
++    ;;
++  *)
++    export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
++    ;;
++  esac
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
++printf "%s\n" "$ld_shlibs_CXX" >&6; }
++test no = "$ld_shlibs_CXX" && can_build_shared=no
++
++with_gnu_ld_CXX=$with_gnu_ld
++
++
++
++
++
++
++#
++# Do we need to explicitly link libc?
++#
++case "x$archive_cmds_need_lc_CXX" in
++x|xyes)
++  # Assume -lc should be added
++  archive_cmds_need_lc_CXX=yes
++
++  if test yes,yes = "$GCC,$enable_shared"; then
++    case $archive_cmds_CXX in
++    *'~'*)
++      # FIXME: we may have to deal with multi-command sequences.
++      ;;
++    '$CC '*)
++      # Test whether the compiler implicitly links with -lc since on some
++      # systems, -lgcc has to come before -lc. If gcc already passes -lc
++      # to ld, don't add -lc before -lgcc.
++      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
++printf %s "checking whether -lc should be explicitly linked in... " >&6; }
++if test ${lt_cv_archive_cmds_need_lc_CXX+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  $RM conftest*
++	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
++
++	if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
++  (eval $ac_compile) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } 2>conftest.err; then
++	  soname=conftest
++	  lib=conftest
++	  libobjs=conftest.$ac_objext
++	  deplibs=
++	  wl=$lt_prog_compiler_wl_CXX
++	  pic_flag=$lt_prog_compiler_pic_CXX
++	  compiler_flags=-v
++	  linker_flags=-v
++	  verstring=
++	  output_objdir=.
++	  libname=conftest
++	  lt_save_allow_undefined_flag=$allow_undefined_flag_CXX
++	  allow_undefined_flag_CXX=
++	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
++  (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
++  ac_status=$?
++  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }
++	  then
++	    lt_cv_archive_cmds_need_lc_CXX=no
++	  else
++	    lt_cv_archive_cmds_need_lc_CXX=yes
++	  fi
++	  allow_undefined_flag_CXX=$lt_save_allow_undefined_flag
++	else
++	  cat conftest.err 1>&5
++	fi
++	$RM conftest*
++
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5
++printf "%s\n" "$lt_cv_archive_cmds_need_lc_CXX" >&6; }
++      archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX
++      ;;
++    esac
++  fi
++  ;;
++esac
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
++printf %s "checking dynamic linker characteristics... " >&6; }
++
++library_names_spec=
++libname_spec='lib$name'
++soname_spec=
++shrext_cmds=.so
++postinstall_cmds=
++postuninstall_cmds=
++finish_cmds=
++finish_eval=
++shlibpath_var=
++shlibpath_overrides_runpath=unknown
++version_type=none
++dynamic_linker="$host_os ld.so"
++sys_lib_dlsearch_path_spec="/lib /usr/lib"
++need_lib_prefix=unknown
++hardcode_into_libs=no
++
++# when you set need_version to no, make sure it does not cause -set_version
++# flags to be left without arguments
++need_version=unknown
++
++
++
++case $host_os in
++aix3*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname.a'
++  shlibpath_var=LIBPATH
++
++  # AIX 3 has no versioning support, so we append a major version to the name.
++  soname_spec='$libname$release$shared_ext$major'
++  ;;
++
++aix[4-9]*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  hardcode_into_libs=yes
++  if test ia64 = "$host_cpu"; then
++    # AIX 5 supports IA64
++    library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext'
++    shlibpath_var=LD_LIBRARY_PATH
++  else
++    # With GCC up to 2.95.x, collect2 would create an import file
++    # for dependence libraries.  The import file would start with
++    # the line '#! .'.  This would cause the generated library to
++    # depend on '.', always an invalid library.  This was fixed in
++    # development snapshots of GCC prior to 3.0.
++    case $host_os in
++      aix4 | aix4.[01] | aix4.[01].*)
++      if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
++	   echo ' yes '
++	   echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then
++	:
++      else
++	can_build_shared=no
++      fi
++      ;;
++    esac
++    # Using Import Files as archive members, it is possible to support
++    # filename-based versioning of shared library archives on AIX. While
++    # this would work for both with and without runtime linking, it will
++    # prevent static linking of such archives. So we do filename-based
++    # shared library versioning with .so extension only, which is used
++    # when both runtime linking and shared linking is enabled.
++    # Unfortunately, runtime linking may impact performance, so we do
++    # not want this to be the default eventually. Also, we use the
++    # versioned .so libs for executables only if there is the -brtl
++    # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only.
++    # To allow for filename-based versioning support, we need to create
++    # libNAME.so.V as an archive file, containing:
++    # *) an Import File, referring to the versioned filename of the
++    #    archive as well as the shared archive member, telling the
++    #    bitwidth (32 or 64) of that shared object, and providing the
++    #    list of exported symbols of that shared object, eventually
++    #    decorated with the 'weak' keyword
++    # *) the shared object with the F_LOADONLY flag set, to really avoid
++    #    it being seen by the linker.
++    # At run time we better use the real file rather than another symlink,
++    # but for link time we create the symlink libNAME.so -> libNAME.so.V
++
++    case $with_aix_soname,$aix_use_runtimelinking in
++    # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct
++    # soname into executable. Probably we can add versioning support to
++    # collect2, so additional links can be useful in future.
++    aix,yes) # traditional libtool
++      dynamic_linker='AIX unversionable lib.so'
++      # If using run time linking (on AIX 4.2 or later) use lib.so
++      # instead of lib.a to let people know that these are not
++      # typical AIX shared libraries.
++      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++      ;;
++    aix,no) # traditional AIX only
++      dynamic_linker='AIX lib.a(lib.so.V)'
++      # We preserve .a as extension for shared libraries through AIX4.2
++      # and later when we are not doing run time linking.
++      library_names_spec='$libname$release.a $libname.a'
++      soname_spec='$libname$release$shared_ext$major'
++      ;;
++    svr4,*) # full svr4 only
++      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)"
++      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
++      # We do not specify a path in Import Files, so LIBPATH fires.
++      shlibpath_overrides_runpath=yes
++      ;;
++    *,yes) # both, prefer svr4
++      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)"
++      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
++      # unpreferred sharedlib libNAME.a needs extra handling
++      postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"'
++      postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"'
++      # We do not specify a path in Import Files, so LIBPATH fires.
++      shlibpath_overrides_runpath=yes
++      ;;
++    *,no) # both, prefer aix
++      dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)"
++      library_names_spec='$libname$release.a $libname.a'
++      soname_spec='$libname$release$shared_ext$major'
++      # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling
++      postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)'
++      postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"'
++      ;;
++    esac
++    shlibpath_var=LIBPATH
++  fi
++  ;;
++
++amigaos*)
++  case $host_cpu in
++  powerpc)
++    # Since July 2007 AmigaOS4 officially supports .so libraries.
++    # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    ;;
++  m68k)
++    library_names_spec='$libname.ixlibrary $libname.a'
++    # Create ${libname}_ixlibrary.a entries in /sys/libs.
++    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
++    ;;
++  esac
++  ;;
++
++beos*)
++  library_names_spec='$libname$shared_ext'
++  dynamic_linker="$host_os ld.so"
++  shlibpath_var=LIBRARY_PATH
++  ;;
++
++bsdi[45]*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
++  sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
++  # the default ld.so.conf also contains /usr/contrib/lib and
++  # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
++  # libtool to hard-code these into programs
++  ;;
++
++cygwin* | mingw* | pw32* | cegcc*)
++  version_type=windows
++  shrext_cmds=.dll
++  need_version=no
++  need_lib_prefix=no
++
++  case $GCC,$cc_basename in
++  yes,*)
++    # gcc
++    library_names_spec='$libname.dll.a'
++    # DLL is installed to $(libdir)/../bin by postinstall_cmds
++    postinstall_cmds='base_file=`basename \$file`~
++      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
++      dldir=$destdir/`dirname \$dlpath`~
++      test -d \$dldir || mkdir -p \$dldir~
++      $install_prog $dir/$dlname \$dldir/$dlname~
++      chmod a+x \$dldir/$dlname~
++      if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
++        eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
++      fi'
++    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
++      dlpath=$dir/\$dldll~
++       $RM \$dlpath'
++    shlibpath_overrides_runpath=yes
++
++    case $host_os in
++    cygwin*)
++      # Cygwin DLLs use 'cyg' prefix rather than 'lib'
++      soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++
++      ;;
++    mingw* | cegcc*)
++      # MinGW DLLs use traditional 'lib' prefix
++      soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++      ;;
++    pw32*)
++      # pw32 DLLs use 'pw' prefix rather than 'lib'
++      library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++      ;;
++    esac
++    dynamic_linker='Win32 ld.exe'
++    ;;
++
++  *,cl* | *,icl*)
++    # Native MSVC or ICC
++    libname_spec='$name'
++    soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++    library_names_spec='$libname.dll.lib'
++
++    case $build_os in
++    mingw*)
++      sys_lib_search_path_spec=
++      lt_save_ifs=$IFS
++      IFS=';'
++      for lt_path in $LIB
++      do
++        IFS=$lt_save_ifs
++        # Let DOS variable expansion print the short 8.3 style file name.
++        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
++        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
++      done
++      IFS=$lt_save_ifs
++      # Convert to MSYS style.
++      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
++      ;;
++    cygwin*)
++      # Convert to unix form, then to dos form, then back to unix form
++      # but this time dos style (no spaces!) so that the unix form looks
++      # like /cygdrive/c/PROGRA~1:/cygdr...
++      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
++      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
++      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
++      ;;
++    *)
++      sys_lib_search_path_spec=$LIB
++      if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
++        # It is most probably a Windows format PATH.
++        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
++      else
++        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
++      fi
++      # FIXME: find the short name or the path components, as spaces are
++      # common. (e.g. "Program Files" -> "PROGRA~1")
++      ;;
++    esac
++
++    # DLL is installed to $(libdir)/../bin by postinstall_cmds
++    postinstall_cmds='base_file=`basename \$file`~
++      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
++      dldir=$destdir/`dirname \$dlpath`~
++      test -d \$dldir || mkdir -p \$dldir~
++      $install_prog $dir/$dlname \$dldir/$dlname'
++    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
++      dlpath=$dir/\$dldll~
++       $RM \$dlpath'
++    shlibpath_overrides_runpath=yes
++    dynamic_linker='Win32 link.exe'
++    ;;
++
++  *)
++    # Assume MSVC and ICC wrapper
++    library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib'
++    dynamic_linker='Win32 ld.exe'
++    ;;
++  esac
++  # FIXME: first we should search . and the directory the executable is in
++  shlibpath_var=PATH
++  ;;
++
++darwin* | rhapsody*)
++  dynamic_linker="$host_os dyld"
++  version_type=darwin
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$major$shared_ext $libname$shared_ext'
++  soname_spec='$libname$release$major$shared_ext'
++  shlibpath_overrides_runpath=yes
++  shlibpath_var=DYLD_LIBRARY_PATH
++  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
++
++  sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
++  ;;
++
++dgux*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  ;;
++
++freebsd* | dragonfly* | midnightbsd*)
++  # DragonFly does not have aout.  When/if they implement a new
++  # versioning mechanism, adjust this.
++  if test -x /usr/bin/objformat; then
++    objformat=`/usr/bin/objformat`
++  else
++    case $host_os in
++    freebsd[23].*) objformat=aout ;;
++    *) objformat=elf ;;
++    esac
++  fi
++  version_type=freebsd-$objformat
++  case $version_type in
++    freebsd-elf*)
++      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++      soname_spec='$libname$release$shared_ext$major'
++      need_version=no
++      need_lib_prefix=no
++      ;;
++    freebsd-*)
++      library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++      need_version=yes
++      ;;
++  esac
++  shlibpath_var=LD_LIBRARY_PATH
++  case $host_os in
++  freebsd2.*)
++    shlibpath_overrides_runpath=yes
++    ;;
++  freebsd3.[01]* | freebsdelf3.[01]*)
++    shlibpath_overrides_runpath=yes
++    hardcode_into_libs=yes
++    ;;
++  freebsd3.[2-9]* | freebsdelf3.[2-9]* | \
++  freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1)
++    shlibpath_overrides_runpath=no
++    hardcode_into_libs=yes
++    ;;
++  *) # from 4.6 on, and DragonFly
++    shlibpath_overrides_runpath=yes
++    hardcode_into_libs=yes
++    ;;
++  esac
++  ;;
++
++haiku*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  dynamic_linker="$host_os runtime_loader"
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
++  hardcode_into_libs=yes
++  ;;
++
++hpux9* | hpux10* | hpux11*)
++  # Give a soname corresponding to the major version so that dld.sl refuses to
++  # link against other versions.
++  version_type=sunos
++  need_lib_prefix=no
++  need_version=no
++  case $host_cpu in
++  ia64*)
++    shrext_cmds='.so'
++    hardcode_into_libs=yes
++    dynamic_linker="$host_os dld.so"
++    shlibpath_var=LD_LIBRARY_PATH
++    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    if test 32 = "$HPUX_IA64_MODE"; then
++      sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
++      sys_lib_dlsearch_path_spec=/usr/lib/hpux32
++    else
++      sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
++      sys_lib_dlsearch_path_spec=/usr/lib/hpux64
++    fi
++    ;;
++  hppa*64*)
++    shrext_cmds='.sl'
++    hardcode_into_libs=yes
++    dynamic_linker="$host_os dld.sl"
++    shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
++    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
++    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
++    ;;
++  *)
++    shrext_cmds='.sl'
++    dynamic_linker="$host_os dld.sl"
++    shlibpath_var=SHLIB_PATH
++    shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    ;;
++  esac
++  # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
++  postinstall_cmds='chmod 555 $lib'
++  # or fails outright, so override atomically:
++  install_override_mode=555
++  ;;
++
++interix[3-9]*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  hardcode_into_libs=yes
++  ;;
++
++irix5* | irix6* | nonstopux*)
++  case $host_os in
++    nonstopux*) version_type=nonstopux ;;
++    *)
++	if test yes = "$lt_cv_prog_gnu_ld"; then
++		version_type=linux # correct to gnu/linux during the next big refactor
++	else
++		version_type=irix
++	fi ;;
++  esac
++  need_lib_prefix=no
++  need_version=no
++  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext'
++  case $host_os in
++  irix5* | nonstopux*)
++    libsuff= shlibsuff=
++    ;;
++  *)
++    case $LD in # libtool.m4 will add one of these switches to LD
++    *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
++      libsuff= shlibsuff= libmagic=32-bit;;
++    *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
++      libsuff=32 shlibsuff=N32 libmagic=N32;;
++    *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
++      libsuff=64 shlibsuff=64 libmagic=64-bit;;
++    *) libsuff= shlibsuff= libmagic=never-match;;
++    esac
++    ;;
++  esac
++  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
++  shlibpath_overrides_runpath=no
++  sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff"
++  sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff"
++  hardcode_into_libs=yes
++  ;;
++
++# No shared lib support for Linux oldld, aout, or coff.
++linux*oldld* | linux*aout* | linux*coff*)
++  dynamic_linker=no
++  ;;
++
++linux*android*)
++  version_type=none # Android doesn't support versioned libraries.
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext'
++  soname_spec='$libname$release$shared_ext'
++  finish_cmds=
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++
++  # This implies no fast_install, which is unacceptable.
++  # Some rework will be needed to allow for fast_install
++  # before this can be enabled.
++  hardcode_into_libs=yes
++
++  dynamic_linker='Android linker'
++  # Don't embed -rpath directories since the linker doesn't support them.
++  hardcode_libdir_flag_spec_CXX='-L$libdir'
++  ;;
++
++# This must be glibc/ELF.
++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++
++  # Some binutils ld are patched to set DT_RUNPATH
++  if test ${lt_cv_shlibpath_overrides_runpath+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  lt_cv_shlibpath_overrides_runpath=no
++    save_LDFLAGS=$LDFLAGS
++    save_libdir=$libdir
++    eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \
++	 LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\""
++    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main (void)
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_cxx_try_link "$LINENO"
++then :
++  if  ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null
++then :
++  lt_cv_shlibpath_overrides_runpath=yes
++fi
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam \
++    conftest$ac_exeext conftest.$ac_ext
++    LDFLAGS=$save_LDFLAGS
++    libdir=$save_libdir
++
++fi
++
++  shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
++
++  # This implies no fast_install, which is unacceptable.
++  # Some rework will be needed to allow for fast_install
++  # before this can be enabled.
++  hardcode_into_libs=yes
++
++  # Add ABI-specific directories to the system library path.
++  sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib"
++
++  # Ideally, we could use ldconfig to report *all* directores which are
++  # searched for libraries, however this is still not possible.  Aside from not
++  # being certain /sbin/ldconfig is available, command
++  # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64,
++  # even though it is searched at run-time.  Try to do the best guess by
++  # appending ld.so.conf contents (and includes) to the search path.
++  if test -f /etc/ld.so.conf; then
++    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
++    sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra"
++  fi
++
++  # We used to test for /lib/ld.so.1 and disable shared libraries on
++  # powerpc, because MkLinux only supported shared libraries with the
++  # GNU dynamic linker.  Since this was broken with cross compilers,
++  # most powerpc-linux boxes support dynamic linking these days and
++  # people can always --disable-shared, the test was removed, and we
++  # assume the GNU/Linux dynamic linker is in use.
++  dynamic_linker='GNU/Linux ld.so'
++  ;;
++
++netbsd*)
++  version_type=sunos
++  need_lib_prefix=no
++  need_version=no
++  if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++    finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
++    dynamic_linker='NetBSD (a.out) ld.so'
++  else
++    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    soname_spec='$libname$release$shared_ext$major'
++    dynamic_linker='NetBSD ld.elf_so'
++  fi
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  hardcode_into_libs=yes
++  ;;
++
++newsos6)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  ;;
++
++*nto* | *qnx*)
++  version_type=qnx
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  hardcode_into_libs=yes
++  dynamic_linker='ldqnx.so'
++  ;;
++
++openbsd* | bitrig*)
++  version_type=sunos
++  sys_lib_dlsearch_path_spec=/usr/lib
++  need_lib_prefix=no
++  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
++    need_version=no
++  else
++    need_version=yes
++  fi
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  ;;
++
++os2*)
++  libname_spec='$name'
++  version_type=windows
++  shrext_cmds=.dll
++  need_version=no
++  need_lib_prefix=no
++  # OS/2 can only load a DLL with a base name of 8 characters or less.
++  soname_spec='`test -n "$os2dllname" && libname="$os2dllname";
++    v=$($ECHO $release$versuffix | tr -d .-);
++    n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _);
++    $ECHO $n$v`$shared_ext'
++  library_names_spec='${libname}_dll.$libext'
++  dynamic_linker='OS/2 ld.exe'
++  shlibpath_var=BEGINLIBPATH
++  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
++  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
++  postinstall_cmds='base_file=`basename \$file`~
++    dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~
++    dldir=$destdir/`dirname \$dlpath`~
++    test -d \$dldir || mkdir -p \$dldir~
++    $install_prog $dir/$dlname \$dldir/$dlname~
++    chmod a+x \$dldir/$dlname~
++    if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
++      eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
++    fi'
++  postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~
++    dlpath=$dir/\$dldll~
++    $RM \$dlpath'
++  ;;
++
++osf3* | osf4* | osf5*)
++  version_type=osf
++  need_lib_prefix=no
++  need_version=no
++  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  shlibpath_var=LD_LIBRARY_PATH
++  sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
++  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
++  ;;
++
++rdos*)
++  dynamic_linker=no
++  ;;
++
++solaris*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  hardcode_into_libs=yes
++  # ldd complains unless libraries are executable
++  postinstall_cmds='chmod +x $lib'
++  ;;
++
++sunos4*)
++  version_type=sunos
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++  finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  if test yes = "$with_gnu_ld"; then
++    need_lib_prefix=no
++  fi
++  need_version=yes
++  ;;
++
++sysv4 | sysv4.3*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  case $host_vendor in
++    sni)
++      shlibpath_overrides_runpath=no
++      need_lib_prefix=no
++      runpath_var=LD_RUN_PATH
++      ;;
++    siemens)
++      need_lib_prefix=no
++      ;;
++    motorola)
++      need_lib_prefix=no
++      need_version=no
++      shlibpath_overrides_runpath=no
++      sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
++      ;;
++  esac
++  ;;
++
++sysv4*MP*)
++  if test -d /usr/nec; then
++    version_type=linux # correct to gnu/linux during the next big refactor
++    library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext'
++    soname_spec='$libname$shared_ext.$major'
++    shlibpath_var=LD_LIBRARY_PATH
++  fi
++  ;;
++
++sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
++  version_type=sco
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=yes
++  hardcode_into_libs=yes
++  if test yes = "$with_gnu_ld"; then
++    sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
++  else
++    sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
++    case $host_os in
++      sco3.2v5*)
++        sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
++	;;
++    esac
++  fi
++  sys_lib_dlsearch_path_spec='/usr/lib'
++  ;;
++
++tpf*)
++  # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
++  version_type=linux # correct to gnu/linux during the next big refactor
++  need_lib_prefix=no
++  need_version=no
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  shlibpath_var=LD_LIBRARY_PATH
++  shlibpath_overrides_runpath=no
++  hardcode_into_libs=yes
++  ;;
++
++uts4*)
++  version_type=linux # correct to gnu/linux during the next big refactor
++  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='$libname$release$shared_ext$major'
++  shlibpath_var=LD_LIBRARY_PATH
++  ;;
++
++*)
++  dynamic_linker=no
++  ;;
++esac
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
++printf "%s\n" "$dynamic_linker" >&6; }
++test no = "$dynamic_linker" && can_build_shared=no
++
++variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
++if test yes = "$GCC"; then
++  variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
++fi
++
++if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then
++  sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec
++fi
++
++if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then
++  sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec
++fi
++
++# remember unaugmented sys_lib_dlsearch_path content for libtool script decls...
++configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec
++
++# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code
++func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH"
++
++# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool
++configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
++printf %s "checking how to hardcode library paths into programs... " >&6; }
++hardcode_action_CXX=
++if test -n "$hardcode_libdir_flag_spec_CXX" ||
++   test -n "$runpath_var_CXX" ||
++   test yes = "$hardcode_automatic_CXX"; then
++
++  # We can hardcode non-existent directories.
++  if test no != "$hardcode_direct_CXX" &&
++     # If the only mechanism to avoid hardcoding is shlibpath_var, we
++     # have to relink, otherwise we might link with an installed library
++     # when we should be linking with a yet-to-be-installed one
++     ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" &&
++     test no != "$hardcode_minus_L_CXX"; then
++    # Linking always hardcodes the temporary library directory.
++    hardcode_action_CXX=relink
++  else
++    # We can link without hardcoding, and we can hardcode nonexisting dirs.
++    hardcode_action_CXX=immediate
++  fi
++else
++  # We cannot hardcode anything, or else we can only hardcode existing
++  # directories.
++  hardcode_action_CXX=unsupported
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5
++printf "%s\n" "$hardcode_action_CXX" >&6; }
++
++if test relink = "$hardcode_action_CXX" ||
++   test yes = "$inherit_rpath_CXX"; then
++  # Fast installation is not supported
++  enable_fast_install=no
++elif test yes = "$shlibpath_overrides_runpath" ||
++     test no = "$enable_shared"; then
++  # Fast installation is not necessary
++  enable_fast_install=needless
++fi
++
++
++
++
++
++
++
++  fi # test -n "$compiler"
++
++  CC=$lt_save_CC
++  CFLAGS=$lt_save_CFLAGS
++  LDCXX=$LD
++  LD=$lt_save_LD
++  GCC=$lt_save_GCC
++  with_gnu_ld=$lt_save_with_gnu_ld
++  lt_cv_path_LDCXX=$lt_cv_path_LD
++  lt_cv_path_LD=$lt_save_path_LD
++  lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld
++  lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld
++fi # test yes != "$_lt_caught_CXX_error"
++
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++
++        ac_config_commands="$ac_config_commands libtool"
++
++
++
++
++# Only expand once:
++
++
++ACX_LT_HOST_FLAGS
++
++ac_fn_c_find_intX_t "$LINENO" "64" "ac_cv_c_int64_t"
++case $ac_cv_c_int64_t in #(
++  no|yes) ;; #(
++  *)
++
++printf "%s\n" "#define int64_t $ac_cv_c_int64_t" >>confdefs.h
++;;
++esac
++
++ac_fn_c_find_uintX_t "$LINENO" "64" "ac_cv_c_uint64_t"
++case $ac_cv_c_uint64_t in #(
++  no|yes) ;; #(
++  *)
++
++printf "%s\n" "#define _UINT64_T 1" >>confdefs.h
++
++
++printf "%s\n" "#define uint64_t $ac_cv_c_uint64_t" >>confdefs.h
++;;
++  esac
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sys/wait.h that is POSIX.1 compatible" >&5
++printf %s "checking for sys/wait.h that is POSIX.1 compatible... " >&6; }
++if test ${ac_cv_header_sys_wait_h+y}
++then :
++  printf %s "(cached) " >&6
++else $as_nop
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
++#include 
++#ifndef WEXITSTATUS
++# define WEXITSTATUS(stat_val) ((unsigned int) (stat_val) >> 8)
++#endif
++#ifndef WIFEXITED
++# define WIFEXITED(stat_val) (((stat_val) & 255) == 0)
++#endif
++
++int
++main (void)
++{
++  int s;
++  wait (&s);
++  s = WIFEXITED (s) ? WEXITSTATUS (s) : 1;
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"
++then :
++  ac_cv_header_sys_wait_h=yes
++else $as_nop
++  ac_cv_header_sys_wait_h=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++fi
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_wait_h" >&5
++printf "%s\n" "$ac_cv_header_sys_wait_h" >&6; }
++if test $ac_cv_header_sys_wait_h = yes; then
++
++printf "%s\n" "#define HAVE_SYS_WAIT_H 1" >>confdefs.h
++
++fi
++
++ac_config_files="$ac_config_files Makefile"
++
++ac_config_headers="$ac_config_headers config.h"
++
++cat >confcache <<\_ACEOF
++# This file is a shell script that caches the results of configure
++# tests run on this system so they can be shared between configure
++# scripts and configure runs, see configure's option --config-cache.
++# It is not useful on other systems.  If it contains results you don't
++# want to keep, you may remove or edit it.
++#
++# config.status only pays attention to the cache file if you give it
++# the --recheck option to rerun configure.
++#
++# `ac_cv_env_foo' variables (set or unset) will be overridden when
++# loading this file, other *unset* `ac_cv_foo' will be assigned the
++# following values.
++
++_ACEOF
++
++# The following way of writing the cache mishandles newlines in values,
++# but we know of no workaround that is simple, portable, and efficient.
++# So, we kill variables containing newlines.
++# Ultrix sh set writes to stderr and can't be redirected directly,
++# and sets the high bit in the cache file unless we assign to the vars.
++(
++  for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do
++    eval ac_val=\$$ac_var
++    case $ac_val in #(
++    *${as_nl}*)
++      case $ac_var in #(
++      *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
++printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
++      esac
++      case $ac_var in #(
++      _ | IFS | as_nl) ;; #(
++      BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #(
++      *) { eval $ac_var=; unset $ac_var;} ;;
++      esac ;;
++    esac
++  done
++
++  (set) 2>&1 |
++    case $as_nl`(ac_space=' '; set) 2>&1` in #(
++    *${as_nl}ac_space=\ *)
++      # `set' does not quote correctly, so add quotes: double-quote
++      # substitution turns \\\\ into \\, and sed turns \\ into \.
++      sed -n \
++	"s/'/'\\\\''/g;
++	  s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p"
++      ;; #(
++    *)
++      # `set' quotes correctly as required by POSIX, so do not add quotes.
++      sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p"
++      ;;
++    esac |
++    sort
++) |
++  sed '
++     /^ac_cv_env_/b end
++     t clear
++     :clear
++     s/^\([^=]*\)=\(.*[{}].*\)$/test ${\1+y} || &/
++     t end
++     s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
++     :end' >>confcache
++if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
++  if test -w "$cache_file"; then
++    if test "x$cache_file" != "x/dev/null"; then
++      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
++printf "%s\n" "$as_me: updating cache $cache_file" >&6;}
++      if test ! -f "$cache_file" || test -h "$cache_file"; then
++	cat confcache >"$cache_file"
++      else
++        case $cache_file in #(
++        */* | ?:*)
++	  mv -f confcache "$cache_file"$$ &&
++	  mv -f "$cache_file"$$ "$cache_file" ;; #(
++        *)
++	  mv -f confcache "$cache_file" ;;
++	esac
++      fi
++    fi
++  else
++    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
++printf "%s\n" "$as_me: not updating unwritable cache $cache_file" >&6;}
++  fi
++fi
++rm -f confcache
++
++test "x$prefix" = xNONE && prefix=$ac_default_prefix
++# Let make expand exec_prefix.
++test "x$exec_prefix" = xNONE && exec_prefix='${prefix}'
++
++DEFS=-DHAVE_CONFIG_H
++
++ac_libobjs=
++ac_ltlibobjs=
++U=
++for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
++  # 1. Remove the extension, and $U if already installed.
++  ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
++  ac_i=`printf "%s\n" "$ac_i" | sed "$ac_script"`
++  # 2. Prepend LIBOBJDIR.  When used with automake>=1.10 LIBOBJDIR
++  #    will be set to the directory where LIBOBJS objects are built.
++  as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
++  as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo'
++done
++LIBOBJS=$ac_libobjs
++
++LTLIBOBJS=$ac_ltlibobjs
++
++
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5
++printf %s "checking that generated files are newer than configure... " >&6; }
++   if test -n "$am_sleep_pid"; then
++     # Hide warnings about reused PIDs.
++     wait $am_sleep_pid 2>/dev/null
++   fi
++   { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: done" >&5
++printf "%s\n" "done" >&6; }
++ if test -n "$EXEEXT"; then
++  am__EXEEXT_TRUE=
++  am__EXEEXT_FALSE='#'
++else
++  am__EXEEXT_TRUE='#'
++  am__EXEEXT_FALSE=
++fi
++
++if test -z "${MAINTAINER_MODE_TRUE}" && test -z "${MAINTAINER_MODE_FALSE}"; then
++  as_fn_error $? "conditional \"MAINTAINER_MODE\" was never defined.
++Usually this means the macro was only invoked conditionally." "$LINENO" 5
++fi
++if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then
++  as_fn_error $? "conditional \"AMDEP\" was never defined.
++Usually this means the macro was only invoked conditionally." "$LINENO" 5
++fi
++if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
++  as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
++Usually this means the macro was only invoked conditionally." "$LINENO" 5
++fi
++if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then
++  as_fn_error $? "conditional \"am__fastdepCC\" was never defined.
++Usually this means the macro was only invoked conditionally." "$LINENO" 5
++fi
++if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then
++  as_fn_error $? "conditional \"am__fastdepCXX\" was never defined.
++Usually this means the macro was only invoked conditionally." "$LINENO" 5
++fi
++
++: "${CONFIG_STATUS=./config.status}"
++ac_write_fail=0
++ac_clean_files_save=$ac_clean_files
++ac_clean_files="$ac_clean_files $CONFIG_STATUS"
++{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
++printf "%s\n" "$as_me: creating $CONFIG_STATUS" >&6;}
++as_write_fail=0
++cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
++#! $SHELL
++# Generated by $as_me.
++# Run this file to recreate the current configuration.
++# Compiler output produced by configure, useful for debugging
++# configure, is in config.log if it exists.
++
++debug=false
++ac_cs_recheck=false
++ac_cs_silent=false
++
++SHELL=\${CONFIG_SHELL-$SHELL}
++export SHELL
++_ASEOF
++cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
++## -------------------- ##
++## M4sh Initialization. ##
++## -------------------- ##
++
++# Be more Bourne compatible
++DUALCASE=1; export DUALCASE # for MKS sh
++as_nop=:
++if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
++then :
++  emulate sh
++  NULLCMD=:
++  # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
++  # is contrary to our usage.  Disable this feature.
++  alias -g '${1+"$@"}'='"$@"'
++  setopt NO_GLOB_SUBST
++else $as_nop
++  case `(set -o) 2>/dev/null` in #(
++  *posix*) :
++    set -o posix ;; #(
++  *) :
++     ;;
++esac
++fi
++
++
++
++# Reset variables that may have inherited troublesome values from
++# the environment.
++
++# IFS needs to be set, to space, tab, and newline, in precisely that order.
++# (If _AS_PATH_WALK were called with IFS unset, it would have the
++# side effect of setting IFS to empty, thus disabling word splitting.)
++# Quoting is to prevent editors from complaining about space-tab.
++as_nl='
++'
++export as_nl
++IFS=" ""	$as_nl"
++
++PS1='$ '
++PS2='> '
++PS4='+ '
++
++# Ensure predictable behavior from utilities with locale-dependent output.
++LC_ALL=C
++export LC_ALL
++LANGUAGE=C
++export LANGUAGE
++
++# We cannot yet rely on "unset" to work, but we need these variables
++# to be unset--not just set to an empty or harmless value--now, to
++# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh).  This construct
++# also avoids known problems related to "unset" and subshell syntax
++# in other old shells (e.g. bash 2.01 and pdksh 5.2.14).
++for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH
++do eval test \${$as_var+y} \
++  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
++done
++
++# Ensure that fds 0, 1, and 2 are open.
++if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi
++if (exec 3>&2)            ; then :; else exec 2>/dev/null; fi
++
++# The user is always right.
++if ${PATH_SEPARATOR+false} :; then
++  PATH_SEPARATOR=:
++  (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
++    (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
++      PATH_SEPARATOR=';'
++  }
++fi
++
++
++# Find who we are.  Look in the path if we contain no directory separator.
++as_myself=
++case $0 in #((
++  *[\\/]* ) as_myself=$0 ;;
++  *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH
++do
++  IFS=$as_save_IFS
++  case $as_dir in #(((
++    '') as_dir=./ ;;
++    */) ;;
++    *) as_dir=$as_dir/ ;;
++  esac
++    test -r "$as_dir$0" && as_myself=$as_dir$0 && break
++  done
++IFS=$as_save_IFS
++
++     ;;
++esac
++# We did not find ourselves, most probably we were run as `sh COMMAND'
++# in which case we are not to be found in the path.
++if test "x$as_myself" = x; then
++  as_myself=$0
++fi
++if test ! -f "$as_myself"; then
++  printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
++  exit 1
++fi
++
++
++
++# as_fn_error STATUS ERROR [LINENO LOG_FD]
++# ----------------------------------------
++# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are
++# provided, also output the error to LOG_FD, referencing LINENO. Then exit the
++# script with STATUS, using 1 if that was 0.
++as_fn_error ()
++{
++  as_status=$1; test $as_status -eq 0 && as_status=1
++  if test "$4"; then
++    as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++    printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
++  fi
++  printf "%s\n" "$as_me: error: $2" >&2
++  as_fn_exit $as_status
++} # as_fn_error
++
++
++
++# as_fn_set_status STATUS
++# -----------------------
++# Set $? to STATUS, without forking.
++as_fn_set_status ()
++{
++  return $1
++} # as_fn_set_status
++
++# as_fn_exit STATUS
++# -----------------
++# Exit the shell with STATUS, even in a "trap 0" or "set -e" context.
++as_fn_exit ()
++{
++  set +e
++  as_fn_set_status $1
++  exit $1
++} # as_fn_exit
++
++# as_fn_unset VAR
++# ---------------
++# Portably unset VAR.
++as_fn_unset ()
++{
++  { eval $1=; unset $1;}
++}
++as_unset=as_fn_unset
++
++# as_fn_append VAR VALUE
++# ----------------------
++# Append the text in VALUE to the end of the definition contained in VAR. Take
++# advantage of any shell optimizations that allow amortized linear growth over
++# repeated appends, instead of the typical quadratic growth present in naive
++# implementations.
++if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null
++then :
++  eval 'as_fn_append ()
++  {
++    eval $1+=\$2
++  }'
++else $as_nop
++  as_fn_append ()
++  {
++    eval $1=\$$1\$2
++  }
++fi # as_fn_append
++
++# as_fn_arith ARG...
++# ------------------
++# Perform arithmetic evaluation on the ARGs, and store the result in the
++# global $as_val. Take advantage of shells that can avoid forks. The arguments
++# must be portable across $(()) and expr.
++if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null
++then :
++  eval 'as_fn_arith ()
++  {
++    as_val=$(( $* ))
++  }'
++else $as_nop
++  as_fn_arith ()
++  {
++    as_val=`expr "$@" || test $? -eq 1`
++  }
++fi # as_fn_arith
++
++
++if expr a : '\(a\)' >/dev/null 2>&1 &&
++   test "X`expr 00001 : '.*\(...\)'`" = X001; then
++  as_expr=expr
++else
++  as_expr=false
++fi
++
++if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then
++  as_basename=basename
++else
++  as_basename=false
++fi
++
++if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then
++  as_dirname=dirname
++else
++  as_dirname=false
++fi
++
++as_me=`$as_basename -- "$0" ||
++$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
++	 X"$0" : 'X\(//\)$' \| \
++	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
++printf "%s\n" X/"$0" |
++    sed '/^.*\/\([^/][^/]*\)\/*$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\/\(\/\/\)$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\/\(\/\).*/{
++	    s//\1/
++	    q
++	  }
++	  s/.*/./; q'`
++
++# Avoid depending upon Character Ranges.
++as_cr_letters='abcdefghijklmnopqrstuvwxyz'
++as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ'
++as_cr_Letters=$as_cr_letters$as_cr_LETTERS
++as_cr_digits='0123456789'
++as_cr_alnum=$as_cr_Letters$as_cr_digits
++
++
++# Determine whether it's possible to make 'echo' print without a newline.
++# These variables are no longer used directly by Autoconf, but are AC_SUBSTed
++# for compatibility with existing Makefiles.
++ECHO_C= ECHO_N= ECHO_T=
++case `echo -n x` in #(((((
++-n*)
++  case `echo 'xy\c'` in
++  *c*) ECHO_T='	';;	# ECHO_T is single tab character.
++  xy)  ECHO_C='\c';;
++  *)   echo `echo ksh88 bug on AIX 6.1` > /dev/null
++       ECHO_T='	';;
++  esac;;
++*)
++  ECHO_N='-n';;
++esac
++
++# For backward compatibility with old third-party macros, we provide
++# the shell variables $as_echo and $as_echo_n.  New code should use
++# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively.
++as_echo='printf %s\n'
++as_echo_n='printf %s'
++
++rm -f conf$$ conf$$.exe conf$$.file
++if test -d conf$$.dir; then
++  rm -f conf$$.dir/conf$$.file
++else
++  rm -f conf$$.dir
++  mkdir conf$$.dir 2>/dev/null
++fi
++if (echo >conf$$.file) 2>/dev/null; then
++  if ln -s conf$$.file conf$$ 2>/dev/null; then
++    as_ln_s='ln -s'
++    # ... but there are two gotchas:
++    # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail.
++    # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable.
++    # In both cases, we have to default to `cp -pR'.
++    ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe ||
++      as_ln_s='cp -pR'
++  elif ln conf$$.file conf$$ 2>/dev/null; then
++    as_ln_s=ln
++  else
++    as_ln_s='cp -pR'
++  fi
++else
++  as_ln_s='cp -pR'
++fi
++rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file
++rmdir conf$$.dir 2>/dev/null
++
++
++# as_fn_mkdir_p
++# -------------
++# Create "$as_dir" as a directory, including parents if necessary.
++as_fn_mkdir_p ()
++{
++
++  case $as_dir in #(
++  -*) as_dir=./$as_dir;;
++  esac
++  test -d "$as_dir" || eval $as_mkdir_p || {
++    as_dirs=
++    while :; do
++      case $as_dir in #(
++      *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
++      *) as_qdir=$as_dir;;
++      esac
++      as_dirs="'$as_qdir' $as_dirs"
++      as_dir=`$as_dirname -- "$as_dir" ||
++$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
++	 X"$as_dir" : 'X\(//\)[^/]' \| \
++	 X"$as_dir" : 'X\(//\)$' \| \
++	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
++printf "%s\n" X"$as_dir" |
++    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)[^/].*/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\).*/{
++	    s//\1/
++	    q
++	  }
++	  s/.*/./; q'`
++      test -d "$as_dir" && break
++    done
++    test -z "$as_dirs" || eval "mkdir $as_dirs"
++  } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir"
++
++
++} # as_fn_mkdir_p
++if mkdir -p . 2>/dev/null; then
++  as_mkdir_p='mkdir -p "$as_dir"'
++else
++  test -d ./-p && rmdir ./-p
++  as_mkdir_p=false
++fi
++
++
++# as_fn_executable_p FILE
++# -----------------------
++# Test if FILE is an executable regular file.
++as_fn_executable_p ()
++{
++  test -f "$1" && test -x "$1"
++} # as_fn_executable_p
++as_test_x='test -x'
++as_executable_p=as_fn_executable_p
++
++# Sed expression to map a string onto a valid CPP name.
++as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'"
++
++# Sed expression to map a string onto a valid variable name.
++as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'"
++
++
++exec 6>&1
++## ----------------------------------- ##
++## Main body of $CONFIG_STATUS script. ##
++## ----------------------------------- ##
++_ASEOF
++test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1
++
++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
++# Save the log message, to keep $0 and so on meaningful, and to
++# report actual input values of CONFIG_FILES etc. instead of their
++# values after options handling.
++ac_log="
++This file was extended by bolt plugin for ld $as_me 0.1, which was
++generated by GNU Autoconf 2.71.  Invocation command line was
++
++  CONFIG_FILES    = $CONFIG_FILES
++  CONFIG_HEADERS  = $CONFIG_HEADERS
++  CONFIG_LINKS    = $CONFIG_LINKS
++  CONFIG_COMMANDS = $CONFIG_COMMANDS
++  $ $0 $@
++
++on `(hostname || uname -n) 2>/dev/null | sed 1q`
++"
++
++_ACEOF
++
++case $ac_config_files in *"
++"*) set x $ac_config_files; shift; ac_config_files=$*;;
++esac
++
++case $ac_config_headers in *"
++"*) set x $ac_config_headers; shift; ac_config_headers=$*;;
++esac
++
++
++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
++# Files that config.status was made for.
++config_files="$ac_config_files"
++config_headers="$ac_config_headers"
++config_commands="$ac_config_commands"
++
++_ACEOF
++
++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
++ac_cs_usage="\
++\`$as_me' instantiates files and other configuration actions
++from templates according to the current configuration.  Unless the files
++and actions are specified as TAGs, all are instantiated by default.
++
++Usage: $0 [OPTION]... [TAG]...
++
++  -h, --help       print this help, then exit
++  -V, --version    print version number and configuration settings, then exit
++      --config     print configuration, then exit
++  -q, --quiet, --silent
++                   do not print progress messages
++  -d, --debug      don't remove temporary files
++      --recheck    update $as_me by reconfiguring in the same conditions
++      --file=FILE[:TEMPLATE]
++                   instantiate the configuration file FILE
++      --header=FILE[:TEMPLATE]
++                   instantiate the configuration header FILE
++
++Configuration files:
++$config_files
++
++Configuration headers:
++$config_headers
++
++Configuration commands:
++$config_commands
++
++Report bugs to the package provider."
++
++_ACEOF
++ac_cs_config=`printf "%s\n" "$ac_configure_args" | sed "$ac_safe_unquote"`
++ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\''/g"`
++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
++ac_cs_config='$ac_cs_config_escaped'
++ac_cs_version="\\
++bolt plugin for ld config.status 0.1
++configured by $0, generated by GNU Autoconf 2.71,
++  with options \\"\$ac_cs_config\\"
++
++Copyright (C) 2021 Free Software Foundation, Inc.
++This config.status script is free software; the Free Software Foundation
++gives unlimited permission to copy, distribute and modify it."
++
++ac_pwd='$ac_pwd'
++srcdir='$srcdir'
++INSTALL='$INSTALL'
++MKDIR_P='$MKDIR_P'
++AWK='$AWK'
++test -n "\$AWK" || AWK=awk
++_ACEOF
++
++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
++# The default lists apply if the user does not specify any file.
++ac_need_defaults=:
++while test $# != 0
++do
++  case $1 in
++  --*=?*)
++    ac_option=`expr "X$1" : 'X\([^=]*\)='`
++    ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'`
++    ac_shift=:
++    ;;
++  --*=)
++    ac_option=`expr "X$1" : 'X\([^=]*\)='`
++    ac_optarg=
++    ac_shift=:
++    ;;
++  *)
++    ac_option=$1
++    ac_optarg=$2
++    ac_shift=shift
++    ;;
++  esac
++
++  case $ac_option in
++  # Handling of the options.
++  -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
++    ac_cs_recheck=: ;;
++  --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
++    printf "%s\n" "$ac_cs_version"; exit ;;
++  --config | --confi | --conf | --con | --co | --c )
++    printf "%s\n" "$ac_cs_config"; exit ;;
++  --debug | --debu | --deb | --de | --d | -d )
++    debug=: ;;
++  --file | --fil | --fi | --f )
++    $ac_shift
++    case $ac_optarg in
++    *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
++    '') as_fn_error $? "missing file argument" ;;
++    esac
++    as_fn_append CONFIG_FILES " '$ac_optarg'"
++    ac_need_defaults=false;;
++  --header | --heade | --head | --hea )
++    $ac_shift
++    case $ac_optarg in
++    *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
++    esac
++    as_fn_append CONFIG_HEADERS " '$ac_optarg'"
++    ac_need_defaults=false;;
++  --he | --h)
++    # Conflict between --help and --header
++    as_fn_error $? "ambiguous option: \`$1'
++Try \`$0 --help' for more information.";;
++  --help | --hel | -h )
++    printf "%s\n" "$ac_cs_usage"; exit ;;
++  -q | -quiet | --quiet | --quie | --qui | --qu | --q \
++  | -silent | --silent | --silen | --sile | --sil | --si | --s)
++    ac_cs_silent=: ;;
++
++  # This is an error.
++  -*) as_fn_error $? "unrecognized option: \`$1'
++Try \`$0 --help' for more information." ;;
++
++  *) as_fn_append ac_config_targets " $1"
++     ac_need_defaults=false ;;
++
++  esac
++  shift
++done
++
++ac_configure_extra_args=
++
++if $ac_cs_silent; then
++  exec 6>/dev/null
++  ac_configure_extra_args="$ac_configure_extra_args --silent"
++fi
++
++_ACEOF
++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
++if \$ac_cs_recheck; then
++  set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
++  shift
++  \printf "%s\n" "running CONFIG_SHELL=$SHELL \$*" >&6
++  CONFIG_SHELL='$SHELL'
++  export CONFIG_SHELL
++  exec "\$@"
++fi
++
++_ACEOF
++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
++exec 5>>config.log
++{
++  echo
++  sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
++## Running $as_me. ##
++_ASBOX
++  printf "%s\n" "$ac_log"
++} >&5
++
++_ACEOF
++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
++#
++# INIT-COMMANDS
++#
++AMDEP_TRUE="$AMDEP_TRUE" MAKE="${MAKE-make}"
++
++
++# The HP-UX ksh and POSIX shell print the target directory to stdout
++# if CDPATH is set.
++(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
++
++sed_quote_subst='$sed_quote_subst'
++double_quote_subst='$double_quote_subst'
++delay_variable_subst='$delay_variable_subst'
++macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`'
++macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`'
++enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`'
++enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`'
++pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`'
++enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`'
++shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`'
++SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`'
++ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`'
++PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`'
++host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`'
++host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`'
++host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`'
++build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`'
++build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`'
++build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`'
++SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`'
++Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`'
++GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`'
++EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`'
++FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`'
++LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`'
++NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`'
++LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`'
++max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`'
++ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`'
++exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`'
++lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`'
++lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`'
++lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`'
++lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`'
++lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`'
++reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`'
++reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`'
++FILECMD='`$ECHO "$FILECMD" | $SED "$delay_single_quote_subst"`'
++OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`'
++deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`'
++file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`'
++file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`'
++want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`'
++DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`'
++sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`'
++AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`'
++lt_ar_flags='`$ECHO "$lt_ar_flags" | $SED "$delay_single_quote_subst"`'
++AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`'
++archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`'
++STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`'
++RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`'
++old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`'
++old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`'
++old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`'
++lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`'
++CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`'
++CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`'
++compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`'
++GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`'
++lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`'
++lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`'
++lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`'
++lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`'
++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`'
++lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`'
++nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`'
++lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`'
++lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`'
++objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`'
++MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`'
++lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`'
++lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`'
++lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`'
++lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`'
++lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`'
++need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`'
++MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`'
++DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`'
++NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`'
++LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`'
++OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`'
++OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`'
++libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`'
++shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`'
++extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`'
++archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`'
++enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`'
++export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`'
++whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`'
++compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`'
++old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`'
++old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`'
++archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`'
++archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`'
++module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`'
++module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`'
++with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`'
++allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`'
++no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`'
++hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`'
++hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`'
++hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`'
++hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`'
++hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`'
++hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`'
++hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`'
++inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`'
++link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`'
++always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`'
++export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`'
++exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`'
++include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`'
++prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`'
++postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`'
++file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`'
++variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`'
++need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`'
++need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`'
++version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`'
++runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`'
++shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`'
++shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`'
++libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`'
++library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`'
++soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`'
++install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`'
++postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`'
++postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`'
++finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`'
++finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`'
++hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`'
++sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`'
++configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`'
++configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`'
++hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`'
++enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`'
++enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`'
++enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`'
++old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`'
++striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`'
++compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`'
++predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`'
++postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`'
++predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`'
++postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`'
++compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`'
++LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`'
++reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`'
++reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`'
++old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`'
++compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`'
++GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`'
++lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`'
++lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`'
++lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`'
++lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`'
++lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`'
++archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`'
++enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`'
++export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
++whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
++compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`'
++old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`'
++old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`'
++archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`'
++archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`'
++module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`'
++module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`'
++with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`'
++allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`'
++no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`'
++hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
++hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`'
++hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`'
++hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`'
++hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`'
++hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`'
++hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`'
++inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`'
++link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`'
++always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`'
++export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`'
++exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`'
++include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`'
++prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`'
++postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`'
++file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`'
++hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`'
++compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`'
++predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`'
++postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`'
++predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`'
++postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`'
++compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`'
++
++LTCC='$LTCC'
++LTCFLAGS='$LTCFLAGS'
++compiler='$compiler_DEFAULT'
++
++# A function that is used when there is no print builtin or printf.
++func_fallback_echo ()
++{
++  eval 'cat <<_LTECHO_EOF
++\$1
++_LTECHO_EOF'
++}
++
++# Quote evaled strings.
++for var in SHELL \
++ECHO \
++PATH_SEPARATOR \
++SED \
++GREP \
++EGREP \
++FGREP \
++LD \
++NM \
++LN_S \
++lt_SP2NL \
++lt_NL2SP \
++reload_flag \
++FILECMD \
++OBJDUMP \
++deplibs_check_method \
++file_magic_cmd \
++file_magic_glob \
++want_nocaseglob \
++DLLTOOL \
++sharedlib_from_linklib_cmd \
++AR \
++archiver_list_spec \
++STRIP \
++RANLIB \
++CC \
++CFLAGS \
++compiler \
++lt_cv_sys_global_symbol_pipe \
++lt_cv_sys_global_symbol_to_cdecl \
++lt_cv_sys_global_symbol_to_import \
++lt_cv_sys_global_symbol_to_c_name_address \
++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \
++lt_cv_nm_interface \
++nm_file_list_spec \
++lt_cv_truncate_bin \
++lt_prog_compiler_no_builtin_flag \
++lt_prog_compiler_pic \
++lt_prog_compiler_wl \
++lt_prog_compiler_static \
++lt_cv_prog_compiler_c_o \
++need_locks \
++MANIFEST_TOOL \
++DSYMUTIL \
++NMEDIT \
++LIPO \
++OTOOL \
++OTOOL64 \
++shrext_cmds \
++export_dynamic_flag_spec \
++whole_archive_flag_spec \
++compiler_needs_object \
++with_gnu_ld \
++allow_undefined_flag \
++no_undefined_flag \
++hardcode_libdir_flag_spec \
++hardcode_libdir_separator \
++exclude_expsyms \
++include_expsyms \
++file_list_spec \
++variables_saved_for_relink \
++libname_spec \
++library_names_spec \
++soname_spec \
++install_override_mode \
++finish_eval \
++old_striplib \
++striplib \
++compiler_lib_search_dirs \
++predep_objects \
++postdep_objects \
++predeps \
++postdeps \
++compiler_lib_search_path \
++LD_CXX \
++reload_flag_CXX \
++compiler_CXX \
++lt_prog_compiler_no_builtin_flag_CXX \
++lt_prog_compiler_pic_CXX \
++lt_prog_compiler_wl_CXX \
++lt_prog_compiler_static_CXX \
++lt_cv_prog_compiler_c_o_CXX \
++export_dynamic_flag_spec_CXX \
++whole_archive_flag_spec_CXX \
++compiler_needs_object_CXX \
++with_gnu_ld_CXX \
++allow_undefined_flag_CXX \
++no_undefined_flag_CXX \
++hardcode_libdir_flag_spec_CXX \
++hardcode_libdir_separator_CXX \
++exclude_expsyms_CXX \
++include_expsyms_CXX \
++file_list_spec_CXX \
++compiler_lib_search_dirs_CXX \
++predep_objects_CXX \
++postdep_objects_CXX \
++predeps_CXX \
++postdeps_CXX \
++compiler_lib_search_path_CXX; do
++    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
++    *[\\\\\\\`\\"\\\$]*)
++      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
++      ;;
++    *)
++      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
++      ;;
++    esac
++done
++
++# Double-quote double-evaled strings.
++for var in reload_cmds \
++old_postinstall_cmds \
++old_postuninstall_cmds \
++old_archive_cmds \
++extract_expsyms_cmds \
++old_archive_from_new_cmds \
++old_archive_from_expsyms_cmds \
++archive_cmds \
++archive_expsym_cmds \
++module_cmds \
++module_expsym_cmds \
++export_symbols_cmds \
++prelink_cmds \
++postlink_cmds \
++postinstall_cmds \
++postuninstall_cmds \
++finish_cmds \
++sys_lib_search_path_spec \
++configure_time_dlsearch_path \
++configure_time_lt_sys_library_path \
++reload_cmds_CXX \
++old_archive_cmds_CXX \
++old_archive_from_new_cmds_CXX \
++old_archive_from_expsyms_cmds_CXX \
++archive_cmds_CXX \
++archive_expsym_cmds_CXX \
++module_cmds_CXX \
++module_expsym_cmds_CXX \
++export_symbols_cmds_CXX \
++prelink_cmds_CXX \
++postlink_cmds_CXX; do
++    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
++    *[\\\\\\\`\\"\\\$]*)
++      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
++      ;;
++    *)
++      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
++      ;;
++    esac
++done
++
++ac_aux_dir='$ac_aux_dir'
++
++# See if we are running on zsh, and set the options that allow our
++# commands through without removal of \ escapes INIT.
++if test -n "\${ZSH_VERSION+set}"; then
++   setopt NO_GLOB_SUBST
++fi
++
++
++    PACKAGE='$PACKAGE'
++    VERSION='$VERSION'
++    RM='$RM'
++    ofile='$ofile'
++
++
++
++
++
++
++_ACEOF
++
++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
++
++# Handling of arguments.
++for ac_config_target in $ac_config_targets
++do
++  case $ac_config_target in
++    "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;;
++    "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;;
++    "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
++    "config.h") CONFIG_HEADERS="$CONFIG_HEADERS config.h" ;;
++
++  *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;;
++  esac
++done
++
++
++# If the user did not use the arguments to specify the items to instantiate,
++# then the envvar interface is used.  Set only those that are not.
++# We use the long form for the default assignment because of an extremely
++# bizarre bug on SunOS 4.1.3.
++if $ac_need_defaults; then
++  test ${CONFIG_FILES+y} || CONFIG_FILES=$config_files
++  test ${CONFIG_HEADERS+y} || CONFIG_HEADERS=$config_headers
++  test ${CONFIG_COMMANDS+y} || CONFIG_COMMANDS=$config_commands
++fi
++
++# Have a temporary directory for convenience.  Make it in the build tree
++# simply because there is no reason against having it here, and in addition,
++# creating and moving files from /tmp can sometimes cause problems.
++# Hook for its removal unless debugging.
++# Note that there is a small window in which the directory will not be cleaned:
++# after its creation but before its name has been assigned to `$tmp'.
++$debug ||
++{
++  tmp= ac_tmp=
++  trap 'exit_status=$?
++  : "${ac_tmp:=$tmp}"
++  { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status
++' 0
++  trap 'as_fn_exit 1' 1 2 13 15
++}
++# Create a (secure) tmp directory for tmp files.
++
++{
++  tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` &&
++  test -d "$tmp"
++}  ||
++{
++  tmp=./conf$$-$RANDOM
++  (umask 077 && mkdir "$tmp")
++} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5
++ac_tmp=$tmp
++
++# Set up the scripts for CONFIG_FILES section.
++# No need to generate them if there are no CONFIG_FILES.
++# This happens for instance with `./config.status config.h'.
++if test -n "$CONFIG_FILES"; then
++
++
++ac_cr=`echo X | tr X '\015'`
++# On cygwin, bash can eat \r inside `` if the user requested igncr.
++# But we know of no other shell where ac_cr would be empty at this
++# point, so we can use a bashism as a fallback.
++if test "x$ac_cr" = x; then
++  eval ac_cr=\$\'\\r\'
++fi
++ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null`
++if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then
++  ac_cs_awk_cr='\\r'
++else
++  ac_cs_awk_cr=$ac_cr
++fi
++
++echo 'BEGIN {' >"$ac_tmp/subs1.awk" &&
++_ACEOF
++
++
++{
++  echo "cat >conf$$subs.awk <<_ACEOF" &&
++  echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' &&
++  echo "_ACEOF"
++} >conf$$subs.sh ||
++  as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
++ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'`
++ac_delim='%!_!# '
++for ac_last_try in false false false false false :; do
++  . ./conf$$subs.sh ||
++    as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
++
++  ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X`
++  if test $ac_delim_n = $ac_delim_num; then
++    break
++  elif $ac_last_try; then
++    as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5
++  else
++    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
++  fi
++done
++rm -f conf$$subs.sh
++
++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
++cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK &&
++_ACEOF
++sed -n '
++h
++s/^/S["/; s/!.*/"]=/
++p
++g
++s/^[^!]*!//
++:repl
++t repl
++s/'"$ac_delim"'$//
++t delim
++:nl
++h
++s/\(.\{148\}\)..*/\1/
++t more1
++s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/
++p
++n
++b repl
++:more1
++s/["\\]/\\&/g; s/^/"/; s/$/"\\/
++p
++g
++s/.\{148\}//
++t nl
++:delim
++h
++s/\(.\{148\}\)..*/\1/
++t more2
++s/["\\]/\\&/g; s/^/"/; s/$/"/
++p
++b
++:more2
++s/["\\]/\\&/g; s/^/"/; s/$/"\\/
++p
++g
++s/.\{148\}//
++t delim
++' >$CONFIG_STATUS || ac_write_fail=1
++rm -f conf$$subs.awk
++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
++_ACAWK
++cat >>"\$ac_tmp/subs1.awk" <<_ACAWK &&
++  for (key in S) S_is_set[key] = 1
++  FS = ""
++
++}
++{
++  line = $ 0
++  nfields = split(line, field, "@")
++  substed = 0
++  len = length(field[1])
++  for (i = 2; i < nfields; i++) {
++    key = field[i]
++    keylen = length(key)
++    if (S_is_set[key]) {
++      value = S[key]
++      line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3)
++      len += length(value) + length(field[++i])
++      substed = 1
++    } else
++      len += 1 + keylen
++  }
++
++  print line
++}
++
++_ACAWK
++_ACEOF
++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
++if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then
++  sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g"
++else
++  cat
++fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \
++  || as_fn_error $? "could not setup config files machinery" "$LINENO" 5
++_ACEOF
++
++# VPATH may cause trouble with some makes, so we remove sole $(srcdir),
++# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and
++# trailing colons and then remove the whole line if VPATH becomes empty
++# (actually we leave an empty line to preserve line numbers).
++if test "x$srcdir" = x.; then
++  ac_vpsub='/^[	 ]*VPATH[	 ]*=[	 ]*/{
++h
++s///
++s/^/:/
++s/[	 ]*$/:/
++s/:\$(srcdir):/:/g
++s/:\${srcdir}:/:/g
++s/:@srcdir@:/:/g
++s/^:*//
++s/:*$//
++x
++s/\(=[	 ]*\).*/\1/
++G
++s/\n//
++s/^[^=]*=[	 ]*$//
++}'
++fi
++
++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
++fi # test -n "$CONFIG_FILES"
++
++# Set up the scripts for CONFIG_HEADERS section.
++# No need to generate them if there are no CONFIG_HEADERS.
++# This happens for instance with `./config.status Makefile'.
++if test -n "$CONFIG_HEADERS"; then
++cat >"$ac_tmp/defines.awk" <<\_ACAWK ||
++BEGIN {
++_ACEOF
++
++# Transform confdefs.h into an awk script `defines.awk', embedded as
++# here-document in config.status, that substitutes the proper values into
++# config.h.in to produce config.h.
++
++# Create a delimiter string that does not exist in confdefs.h, to ease
++# handling of long lines.
++ac_delim='%!_!# '
++for ac_last_try in false false :; do
++  ac_tt=`sed -n "/$ac_delim/p" confdefs.h`
++  if test -z "$ac_tt"; then
++    break
++  elif $ac_last_try; then
++    as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5
++  else
++    ac_delim="$ac_delim!$ac_delim _$ac_delim!! "
++  fi
++done
++
++# For the awk script, D is an array of macro values keyed by name,
++# likewise P contains macro parameters if any.  Preserve backslash
++# newline sequences.
++
++ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]*
++sed -n '
++s/.\{148\}/&'"$ac_delim"'/g
++t rset
++:rset
++s/^[	 ]*#[	 ]*define[	 ][	 ]*/ /
++t def
++d
++:def
++s/\\$//
++t bsnl
++s/["\\]/\\&/g
++s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
++D["\1"]=" \3"/p
++s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2"/p
++d
++:bsnl
++s/["\\]/\\&/g
++s/^ \('"$ac_word_re"'\)\(([^()]*)\)[	 ]*\(.*\)/P["\1"]="\2"\
++D["\1"]=" \3\\\\\\n"\\/p
++t cont
++s/^ \('"$ac_word_re"'\)[	 ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p
++t cont
++d
++:cont
++n
++s/.\{148\}/&'"$ac_delim"'/g
++t clear
++:clear
++s/\\$//
++t bsnlc
++s/["\\]/\\&/g; s/^/"/; s/$/"/p
++d
++:bsnlc
++s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p
++b cont
++' >$CONFIG_STATUS || ac_write_fail=1
++
++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
++  for (key in D) D_is_set[key] = 1
++  FS = ""
++}
++/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ {
++  line = \$ 0
++  split(line, arg, " ")
++  if (arg[1] == "#") {
++    defundef = arg[2]
++    mac1 = arg[3]
++  } else {
++    defundef = substr(arg[1], 2)
++    mac1 = arg[2]
++  }
++  split(mac1, mac2, "(") #)
++  macro = mac2[1]
++  prefix = substr(line, 1, index(line, defundef) - 1)
++  if (D_is_set[macro]) {
++    # Preserve the white space surrounding the "#".
++    print prefix "define", macro P[macro] D[macro]
++    next
++  } else {
++    # Replace #undef with comments.  This is necessary, for example,
++    # in the case of _POSIX_SOURCE, which is predefined and required
++    # on some systems where configure will not decide to define it.
++    if (defundef == "undef") {
++      print "/*", prefix defundef, macro, "*/"
++      next
++    }
++  }
++}
++{ print }
++_ACAWK
++_ACEOF
++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
++  as_fn_error $? "could not setup config headers machinery" "$LINENO" 5
++fi # test -n "$CONFIG_HEADERS"
++
++
++eval set X "  :F $CONFIG_FILES  :H $CONFIG_HEADERS    :C $CONFIG_COMMANDS"
++shift
++for ac_tag
++do
++  case $ac_tag in
++  :[FHLC]) ac_mode=$ac_tag; continue;;
++  esac
++  case $ac_mode$ac_tag in
++  :[FHL]*:*);;
++  :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;;
++  :[FH]-) ac_tag=-:-;;
++  :[FH]*) ac_tag=$ac_tag:$ac_tag.in;;
++  esac
++  ac_save_IFS=$IFS
++  IFS=:
++  set x $ac_tag
++  IFS=$ac_save_IFS
++  shift
++  ac_file=$1
++  shift
++
++  case $ac_mode in
++  :L) ac_source=$1;;
++  :[FH])
++    ac_file_inputs=
++    for ac_f
++    do
++      case $ac_f in
++      -) ac_f="$ac_tmp/stdin";;
++      *) # Look for the file first in the build tree, then in the source tree
++	 # (if the path is not absolute).  The absolute path cannot be DOS-style,
++	 # because $ac_f cannot contain `:'.
++	 test -f "$ac_f" ||
++	   case $ac_f in
++	   [\\/$]*) false;;
++	   *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";;
++	   esac ||
++	   as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
++      esac
++      case $ac_f in *\'*) ac_f=`printf "%s\n" "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
++      as_fn_append ac_file_inputs " '$ac_f'"
++    done
++
++    # Let's still pretend it is `configure' which instantiates (i.e., don't
++    # use $as_me), people would be surprised to read:
++    #    /* config.h.  Generated by config.status.  */
++    configure_input='Generated from '`
++	  printf "%s\n" "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
++	`' by configure.'
++    if test x"$ac_file" != x-; then
++      configure_input="$ac_file.  $configure_input"
++      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
++printf "%s\n" "$as_me: creating $ac_file" >&6;}
++    fi
++    # Neutralize special characters interpreted by sed in replacement strings.
++    case $configure_input in #(
++    *\&* | *\|* | *\\* )
++       ac_sed_conf_input=`printf "%s\n" "$configure_input" |
++       sed 's/[\\\\&|]/\\\\&/g'`;; #(
++    *) ac_sed_conf_input=$configure_input;;
++    esac
++
++    case $ac_tag in
++    *:-:* | *:-) cat >"$ac_tmp/stdin" \
++      || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;;
++    esac
++    ;;
++  esac
++
++  ac_dir=`$as_dirname -- "$ac_file" ||
++$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
++	 X"$ac_file" : 'X\(//\)[^/]' \| \
++	 X"$ac_file" : 'X\(//\)$' \| \
++	 X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
++printf "%s\n" X"$ac_file" |
++    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)[^/].*/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\).*/{
++	    s//\1/
++	    q
++	  }
++	  s/.*/./; q'`
++  as_dir="$ac_dir"; as_fn_mkdir_p
++  ac_builddir=.
++
++case "$ac_dir" in
++.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
++*)
++  ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'`
++  # A ".." for each directory in $ac_dir_suffix.
++  ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
++  case $ac_top_builddir_sub in
++  "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
++  *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
++  esac ;;
++esac
++ac_abs_top_builddir=$ac_pwd
++ac_abs_builddir=$ac_pwd$ac_dir_suffix
++# for backward compatibility:
++ac_top_builddir=$ac_top_build_prefix
++
++case $srcdir in
++  .)  # We are building in place.
++    ac_srcdir=.
++    ac_top_srcdir=$ac_top_builddir_sub
++    ac_abs_top_srcdir=$ac_pwd ;;
++  [\\/]* | ?:[\\/]* )  # Absolute name.
++    ac_srcdir=$srcdir$ac_dir_suffix;
++    ac_top_srcdir=$srcdir
++    ac_abs_top_srcdir=$srcdir ;;
++  *) # Relative name.
++    ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix
++    ac_top_srcdir=$ac_top_build_prefix$srcdir
++    ac_abs_top_srcdir=$ac_pwd/$srcdir ;;
++esac
++ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
++
++
++  case $ac_mode in
++  :F)
++  #
++  # CONFIG_FILE
++  #
++
++  case $INSTALL in
++  [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;;
++  *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;;
++  esac
++  ac_MKDIR_P=$MKDIR_P
++  case $MKDIR_P in
++  [\\/$]* | ?:[\\/]* ) ;;
++  */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;;
++  esac
++_ACEOF
++
++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
++# If the template does not know about datarootdir, expand it.
++# FIXME: This hack should be removed a few years after 2.60.
++ac_datarootdir_hack=; ac_datarootdir_seen=
++ac_sed_dataroot='
++/datarootdir/ {
++  p
++  q
++}
++/@datadir@/p
++/@docdir@/p
++/@infodir@/p
++/@localedir@/p
++/@mandir@/p'
++case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
++*datarootdir*) ac_datarootdir_seen=yes;;
++*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
++printf "%s\n" "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
++_ACEOF
++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
++  ac_datarootdir_hack='
++  s&@datadir@&$datadir&g
++  s&@docdir@&$docdir&g
++  s&@infodir@&$infodir&g
++  s&@localedir@&$localedir&g
++  s&@mandir@&$mandir&g
++  s&\\\${datarootdir}&$datarootdir&g' ;;
++esac
++_ACEOF
++
++# Neutralize VPATH when `$srcdir' = `.'.
++# Shell code in configure.ac might set extrasub.
++# FIXME: do we really want to maintain this feature?
++cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
++ac_sed_extra="$ac_vpsub
++$extrasub
++_ACEOF
++cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
++:t
++/@[a-zA-Z_][a-zA-Z_0-9]*@/!b
++s|@configure_input@|$ac_sed_conf_input|;t t
++s&@top_builddir@&$ac_top_builddir_sub&;t t
++s&@top_build_prefix@&$ac_top_build_prefix&;t t
++s&@srcdir@&$ac_srcdir&;t t
++s&@abs_srcdir@&$ac_abs_srcdir&;t t
++s&@top_srcdir@&$ac_top_srcdir&;t t
++s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t
++s&@builddir@&$ac_builddir&;t t
++s&@abs_builddir@&$ac_abs_builddir&;t t
++s&@abs_top_builddir@&$ac_abs_top_builddir&;t t
++s&@INSTALL@&$ac_INSTALL&;t t
++s&@MKDIR_P@&$ac_MKDIR_P&;t t
++$ac_datarootdir_hack
++"
++eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \
++  >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5
++
++test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
++  { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
++  { ac_out=`sed -n '/^[	 ]*datarootdir[	 ]*:*=/p' \
++      "$ac_tmp/out"`; test -z "$ac_out"; } &&
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
++which seems to be undefined.  Please make sure it is defined" >&5
++printf "%s\n" "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
++which seems to be undefined.  Please make sure it is defined" >&2;}
++
++  rm -f "$ac_tmp/stdin"
++  case $ac_file in
++  -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";;
++  *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";;
++  esac \
++  || as_fn_error $? "could not create $ac_file" "$LINENO" 5
++ ;;
++  :H)
++  #
++  # CONFIG_HEADER
++  #
++  if test x"$ac_file" != x-; then
++    {
++      printf "%s\n" "/* $configure_input  */" >&1 \
++      && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
++    } >"$ac_tmp/config.h" \
++      || as_fn_error $? "could not create $ac_file" "$LINENO" 5
++    if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
++      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
++printf "%s\n" "$as_me: $ac_file is unchanged" >&6;}
++    else
++      rm -f "$ac_file"
++      mv "$ac_tmp/config.h" "$ac_file" \
++	|| as_fn_error $? "could not create $ac_file" "$LINENO" 5
++    fi
++  else
++    printf "%s\n" "/* $configure_input  */" >&1 \
++      && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
++      || as_fn_error $? "could not create -" "$LINENO" 5
++  fi
++# Compute "$ac_file"'s index in $config_headers.
++_am_arg="$ac_file"
++_am_stamp_count=1
++for _am_header in $config_headers :; do
++  case $_am_header in
++    $_am_arg | $_am_arg:* )
++      break ;;
++    * )
++      _am_stamp_count=`expr $_am_stamp_count + 1` ;;
++  esac
++done
++echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" ||
++$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
++	 X"$_am_arg" : 'X\(//\)[^/]' \| \
++	 X"$_am_arg" : 'X\(//\)$' \| \
++	 X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null ||
++printf "%s\n" X"$_am_arg" |
++    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)[^/].*/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\).*/{
++	    s//\1/
++	    q
++	  }
++	  s/.*/./; q'`/stamp-h$_am_stamp_count
++ ;;
++
++  :C)  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5
++printf "%s\n" "$as_me: executing $ac_file commands" >&6;}
++ ;;
++  esac
++
++
++  case $ac_file$ac_mode in
++    "depfiles":C) test x"$AMDEP_TRUE" != x"" || {
++  # Older Autoconf quotes --file arguments for eval, but not when files
++  # are listed without --file.  Let's play safe and only enable the eval
++  # if we detect the quoting.
++  # TODO: see whether this extra hack can be removed once we start
++  # requiring Autoconf 2.70 or later.
++  case $CONFIG_FILES in #(
++  *\'*) :
++    eval set x "$CONFIG_FILES" ;; #(
++  *) :
++    set x $CONFIG_FILES ;; #(
++  *) :
++     ;;
++esac
++  shift
++  # Used to flag and report bootstrapping failures.
++  am_rc=0
++  for am_mf
++  do
++    # Strip MF so we end up with the name of the file.
++    am_mf=`printf "%s\n" "$am_mf" | sed -e 's/:.*$//'`
++    # Check whether this is an Automake generated Makefile which includes
++    # dependency-tracking related rules and includes.
++    # Grep'ing the whole file directly is not great: AIX grep has a line
++    # limit of 2048, but all sed's we know have understand at least 4000.
++    sed -n 's,^am--depfiles:.*,X,p' "$am_mf" | grep X >/dev/null 2>&1 \
++      || continue
++    am_dirpart=`$as_dirname -- "$am_mf" ||
++$as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
++	 X"$am_mf" : 'X\(//\)[^/]' \| \
++	 X"$am_mf" : 'X\(//\)$' \| \
++	 X"$am_mf" : 'X\(/\)' \| . 2>/dev/null ||
++printf "%s\n" X"$am_mf" |
++    sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)[^/].*/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\/\)$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\(\/\).*/{
++	    s//\1/
++	    q
++	  }
++	  s/.*/./; q'`
++    am_filepart=`$as_basename -- "$am_mf" ||
++$as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \
++	 X"$am_mf" : 'X\(//\)$' \| \
++	 X"$am_mf" : 'X\(/\)' \| . 2>/dev/null ||
++printf "%s\n" X/"$am_mf" |
++    sed '/^.*\/\([^/][^/]*\)\/*$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\/\(\/\/\)$/{
++	    s//\1/
++	    q
++	  }
++	  /^X\/\(\/\).*/{
++	    s//\1/
++	    q
++	  }
++	  s/.*/./; q'`
++    { echo "$as_me:$LINENO: cd "$am_dirpart" \
++      && sed -e '/# am--include-marker/d' "$am_filepart" \
++        | $MAKE -f - am--depfiles" >&5
++   (cd "$am_dirpart" \
++      && sed -e '/# am--include-marker/d' "$am_filepart" \
++        | $MAKE -f - am--depfiles) >&5 2>&5
++   ac_status=$?
++   echo "$as_me:$LINENO: \$? = $ac_status" >&5
++   (exit $ac_status); } || am_rc=$?
++  done
++  if test $am_rc -ne 0; then
++    { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++as_fn_error $? "Something went wrong bootstrapping makefile fragments
++    for automatic dependency tracking.  If GNU make was not used, consider
++    re-running the configure script with MAKE=\"gmake\" (or whatever is
++    necessary).  You can also try re-running configure with the
++    '--disable-dependency-tracking' option to at least be able to build
++    the package (albeit without support for automatic dependency tracking).
++See \`config.log' for more details" "$LINENO" 5; }
++  fi
++  { am_dirpart=; unset am_dirpart;}
++  { am_filepart=; unset am_filepart;}
++  { am_mf=; unset am_mf;}
++  { am_rc=; unset am_rc;}
++  rm -f conftest-deps.mk
++}
++ ;;
++    "libtool":C)
++
++    # See if we are running on zsh, and set the options that allow our
++    # commands through without removal of \ escapes.
++    if test -n "${ZSH_VERSION+set}"; then
++      setopt NO_GLOB_SUBST
++    fi
++
++    cfgfile=${ofile}T
++    trap "$RM \"$cfgfile\"; exit 1" 1 2 15
++    $RM "$cfgfile"
++
++    cat <<_LT_EOF >> "$cfgfile"
++#! $SHELL
++# Generated automatically by $as_me ($PACKAGE) $VERSION
++# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
++# NOTE: Changes made to this file will be lost: look at ltmain.sh.
++
++# Provide generalized library-building support services.
++# Written by Gordon Matzigkeit, 1996
++
++# Copyright (C) 2014 Free Software Foundation, Inc.
++# This is free software; see the source for copying conditions.  There is NO
++# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
++
++# GNU Libtool is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 2 of of the License, or
++# (at your option) any later version.
++#
++# As a special exception to the GNU General Public License, if you
++# distribute this file as part of a program or library that is built
++# using GNU Libtool, you may include this file under the  same
++# distribution terms that you use for the rest of that program.
++#
++# GNU Libtool is distributed in the hope that it will be useful, but
++# WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with this program.  If not, see .
++
++
++# The names of the tagged configurations supported by this script.
++available_tags='CXX '
++
++# Configured defaults for sys_lib_dlsearch_path munging.
++: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"}
++
++# ### BEGIN LIBTOOL CONFIG
++
++# Which release of libtool.m4 was used?
++macro_version=$macro_version
++macro_revision=$macro_revision
++
++# Whether or not to build shared libraries.
++build_libtool_libs=$enable_shared
++
++# Whether or not to build static libraries.
++build_old_libs=$enable_static
++
++# What type of objects to build.
++pic_mode=$pic_mode
++
++# Whether or not to optimize for fast installation.
++fast_install=$enable_fast_install
++
++# Shared archive member basename,for filename based shared library versioning on AIX.
++shared_archive_member_spec=$shared_archive_member_spec
++
++# Shell to use when invoking shell scripts.
++SHELL=$lt_SHELL
++
++# An echo program that protects backslashes.
++ECHO=$lt_ECHO
++
++# The PATH separator for the build system.
++PATH_SEPARATOR=$lt_PATH_SEPARATOR
++
++# The host system.
++host_alias=$host_alias
++host=$host
++host_os=$host_os
++
++# The build system.
++build_alias=$build_alias
++build=$build
++build_os=$build_os
++
++# A sed program that does not truncate output.
++SED=$lt_SED
++
++# Sed that helps us avoid accidentally triggering echo(1) options like -n.
++Xsed="\$SED -e 1s/^X//"
++
++# A grep program that handles long lines.
++GREP=$lt_GREP
++
++# An ERE matcher.
++EGREP=$lt_EGREP
++
++# A literal string matcher.
++FGREP=$lt_FGREP
++
++# A BSD- or MS-compatible name lister.
++NM=$lt_NM
++
++# Whether we need soft or hard links.
++LN_S=$lt_LN_S
++
++# What is the maximum length of a command?
++max_cmd_len=$max_cmd_len
++
++# Object file suffix (normally "o").
++objext=$ac_objext
++
++# Executable file suffix (normally "").
++exeext=$exeext
++
++# whether the shell understands "unset".
++lt_unset=$lt_unset
++
++# turn spaces into newlines.
++SP2NL=$lt_lt_SP2NL
++
++# turn newlines into spaces.
++NL2SP=$lt_lt_NL2SP
++
++# convert \$build file names to \$host format.
++to_host_file_cmd=$lt_cv_to_host_file_cmd
++
++# convert \$build files to toolchain format.
++to_tool_file_cmd=$lt_cv_to_tool_file_cmd
++
++# A file(cmd) program that detects file types.
++FILECMD=$lt_FILECMD
++
++# An object symbol dumper.
++OBJDUMP=$lt_OBJDUMP
++
++# Method to check whether dependent libraries are shared objects.
++deplibs_check_method=$lt_deplibs_check_method
++
++# Command to use when deplibs_check_method = "file_magic".
++file_magic_cmd=$lt_file_magic_cmd
++
++# How to find potential files when deplibs_check_method = "file_magic".
++file_magic_glob=$lt_file_magic_glob
++
++# Find potential files using nocaseglob when deplibs_check_method = "file_magic".
++want_nocaseglob=$lt_want_nocaseglob
++
++# DLL creation program.
++DLLTOOL=$lt_DLLTOOL
++
++# Command to associate shared and link libraries.
++sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd
++
++# The archiver.
++AR=$lt_AR
++
++# Flags to create an archive (by configure).
++lt_ar_flags=$lt_ar_flags
++
++# Flags to create an archive.
++AR_FLAGS=\${ARFLAGS-"\$lt_ar_flags"}
++
++# How to feed a file listing to the archiver.
++archiver_list_spec=$lt_archiver_list_spec
++
++# A symbol stripping program.
++STRIP=$lt_STRIP
++
++# Commands used to install an old-style archive.
++RANLIB=$lt_RANLIB
++old_postinstall_cmds=$lt_old_postinstall_cmds
++old_postuninstall_cmds=$lt_old_postuninstall_cmds
++
++# Whether to use a lock for old archive extraction.
++lock_old_archive_extraction=$lock_old_archive_extraction
++
++# A C compiler.
++LTCC=$lt_CC
++
++# LTCC compiler flags.
++LTCFLAGS=$lt_CFLAGS
++
++# Take the output of nm and produce a listing of raw symbols and C names.
++global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe
++
++# Transform the output of nm in a proper C declaration.
++global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl
++
++# Transform the output of nm into a list of symbols to manually relocate.
++global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import
++
++# Transform the output of nm in a C name address pair.
++global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address
++
++# Transform the output of nm in a C name address pair when lib prefix is needed.
++global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix
++
++# The name lister interface.
++nm_interface=$lt_lt_cv_nm_interface
++
++# Specify filename containing input files for \$NM.
++nm_file_list_spec=$lt_nm_file_list_spec
++
++# The root where to search for dependent libraries,and where our libraries should be installed.
++lt_sysroot=$lt_sysroot
++
++# Command to truncate a binary pipe.
++lt_truncate_bin=$lt_lt_cv_truncate_bin
++
++# The name of the directory that contains temporary libtool files.
++objdir=$objdir
++
++# Used to examine libraries when file_magic_cmd begins with "file".
++MAGIC_CMD=$MAGIC_CMD
++
++# Must we lock files when doing compilation?
++need_locks=$lt_need_locks
++
++# Manifest tool.
++MANIFEST_TOOL=$lt_MANIFEST_TOOL
++
++# Tool to manipulate archived DWARF debug symbol files on Mac OS X.
++DSYMUTIL=$lt_DSYMUTIL
++
++# Tool to change global to local symbols on Mac OS X.
++NMEDIT=$lt_NMEDIT
++
++# Tool to manipulate fat objects and archives on Mac OS X.
++LIPO=$lt_LIPO
++
++# ldd/readelf like tool for Mach-O binaries on Mac OS X.
++OTOOL=$lt_OTOOL
++
++# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4.
++OTOOL64=$lt_OTOOL64
++
++# Old archive suffix (normally "a").
++libext=$libext
++
++# Shared library suffix (normally ".so").
++shrext_cmds=$lt_shrext_cmds
++
++# The commands to extract the exported symbol list from a shared archive.
++extract_expsyms_cmds=$lt_extract_expsyms_cmds
++
++# Variables whose values should be saved in libtool wrapper scripts and
++# restored at link time.
++variables_saved_for_relink=$lt_variables_saved_for_relink
++
++# Do we need the "lib" prefix for modules?
++need_lib_prefix=$need_lib_prefix
++
++# Do we need a version for libraries?
++need_version=$need_version
++
++# Library versioning type.
++version_type=$version_type
++
++# Shared library runtime path variable.
++runpath_var=$runpath_var
++
++# Shared library path variable.
++shlibpath_var=$shlibpath_var
++
++# Is shlibpath searched before the hard-coded library search path?
++shlibpath_overrides_runpath=$shlibpath_overrides_runpath
++
++# Format of library name prefix.
++libname_spec=$lt_libname_spec
++
++# List of archive names.  First name is the real one, the rest are links.
++# The last name is the one that the linker finds with -lNAME
++library_names_spec=$lt_library_names_spec
++
++# The coded name of the library, if different from the real name.
++soname_spec=$lt_soname_spec
++
++# Permission mode override for installation of shared libraries.
++install_override_mode=$lt_install_override_mode
++
++# Command to use after installation of a shared archive.
++postinstall_cmds=$lt_postinstall_cmds
++
++# Command to use after uninstallation of a shared archive.
++postuninstall_cmds=$lt_postuninstall_cmds
++
++# Commands used to finish a libtool library installation in a directory.
++finish_cmds=$lt_finish_cmds
++
++# As "finish_cmds", except a single script fragment to be evaled but
++# not shown.
++finish_eval=$lt_finish_eval
++
++# Whether we should hardcode library paths into libraries.
++hardcode_into_libs=$hardcode_into_libs
++
++# Compile-time system search path for libraries.
++sys_lib_search_path_spec=$lt_sys_lib_search_path_spec
++
++# Detected run-time system search path for libraries.
++sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path
++
++# Explicit LT_SYS_LIBRARY_PATH set during ./configure time.
++configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path
++
++# Whether dlopen is supported.
++dlopen_support=$enable_dlopen
++
++# Whether dlopen of programs is supported.
++dlopen_self=$enable_dlopen_self
++
++# Whether dlopen of statically linked programs is supported.
++dlopen_self_static=$enable_dlopen_self_static
++
++# Commands to strip libraries.
++old_striplib=$lt_old_striplib
++striplib=$lt_striplib
++
++
++# The linker used to build libraries.
++LD=$lt_LD
++
++# How to create reloadable object files.
++reload_flag=$lt_reload_flag
++reload_cmds=$lt_reload_cmds
++
++# Commands used to build an old-style archive.
++old_archive_cmds=$lt_old_archive_cmds
++
++# A language specific compiler.
++CC=$lt_compiler
++
++# Is the compiler the GNU compiler?
++with_gcc=$GCC
++
++# Compiler flag to turn off builtin functions.
++no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag
++
++# Additional compiler flags for building library objects.
++pic_flag=$lt_lt_prog_compiler_pic
++
++# How to pass a linker flag through the compiler.
++wl=$lt_lt_prog_compiler_wl
++
++# Compiler flag to prevent dynamic linking.
++link_static_flag=$lt_lt_prog_compiler_static
++
++# Does compiler simultaneously support -c and -o options?
++compiler_c_o=$lt_lt_cv_prog_compiler_c_o
++
++# Whether or not to add -lc for building shared libraries.
++build_libtool_need_lc=$archive_cmds_need_lc
++
++# Whether or not to disallow shared libs when runtime libs are static.
++allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes
++
++# Compiler flag to allow reflexive dlopens.
++export_dynamic_flag_spec=$lt_export_dynamic_flag_spec
++
++# Compiler flag to generate shared objects directly from archives.
++whole_archive_flag_spec=$lt_whole_archive_flag_spec
++
++# Whether the compiler copes with passing no objects directly.
++compiler_needs_object=$lt_compiler_needs_object
++
++# Create an old-style archive from a shared archive.
++old_archive_from_new_cmds=$lt_old_archive_from_new_cmds
++
++# Create a temporary old-style archive to link instead of a shared archive.
++old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds
++
++# Commands used to build a shared archive.
++archive_cmds=$lt_archive_cmds
++archive_expsym_cmds=$lt_archive_expsym_cmds
++
++# Commands used to build a loadable module if different from building
++# a shared archive.
++module_cmds=$lt_module_cmds
++module_expsym_cmds=$lt_module_expsym_cmds
++
++# Whether we are building with GNU ld or not.
++with_gnu_ld=$lt_with_gnu_ld
++
++# Flag that allows shared libraries with undefined symbols to be built.
++allow_undefined_flag=$lt_allow_undefined_flag
++
++# Flag that enforces no undefined symbols.
++no_undefined_flag=$lt_no_undefined_flag
++
++# Flag to hardcode \$libdir into a binary during linking.
++# This must work even if \$libdir does not exist
++hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec
++
++# Whether we need a single "-rpath" flag with a separated argument.
++hardcode_libdir_separator=$lt_hardcode_libdir_separator
++
++# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
++# DIR into the resulting binary.
++hardcode_direct=$hardcode_direct
++
++# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
++# DIR into the resulting binary and the resulting library dependency is
++# "absolute",i.e impossible to change by setting \$shlibpath_var if the
++# library is relocated.
++hardcode_direct_absolute=$hardcode_direct_absolute
++
++# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
++# into the resulting binary.
++hardcode_minus_L=$hardcode_minus_L
++
++# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
++# into the resulting binary.
++hardcode_shlibpath_var=$hardcode_shlibpath_var
++
++# Set to "yes" if building a shared library automatically hardcodes DIR
++# into the library and all subsequent libraries and executables linked
++# against it.
++hardcode_automatic=$hardcode_automatic
++
++# Set to yes if linker adds runtime paths of dependent libraries
++# to runtime path list.
++inherit_rpath=$inherit_rpath
++
++# Whether libtool must link a program against all its dependency libraries.
++link_all_deplibs=$link_all_deplibs
++
++# Set to "yes" if exported symbols are required.
++always_export_symbols=$always_export_symbols
++
++# The commands to list exported symbols.
++export_symbols_cmds=$lt_export_symbols_cmds
++
++# Symbols that should not be listed in the preloaded symbols.
++exclude_expsyms=$lt_exclude_expsyms
++
++# Symbols that must always be exported.
++include_expsyms=$lt_include_expsyms
++
++# Commands necessary for linking programs (against libraries) with templates.
++prelink_cmds=$lt_prelink_cmds
++
++# Commands necessary for finishing linking programs.
++postlink_cmds=$lt_postlink_cmds
++
++# Specify filename containing input files.
++file_list_spec=$lt_file_list_spec
++
++# How to hardcode a shared library path into an executable.
++hardcode_action=$hardcode_action
++
++# The directories searched by this compiler when creating a shared library.
++compiler_lib_search_dirs=$lt_compiler_lib_search_dirs
++
++# Dependencies to place before and after the objects being linked to
++# create a shared library.
++predep_objects=$lt_predep_objects
++postdep_objects=$lt_postdep_objects
++predeps=$lt_predeps
++postdeps=$lt_postdeps
++
++# The library search path used internally by the compiler when linking
++# a shared library.
++compiler_lib_search_path=$lt_compiler_lib_search_path
++
++# ### END LIBTOOL CONFIG
++
++_LT_EOF
++
++    cat <<'_LT_EOF' >> "$cfgfile"
++
++# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE
++
++# func_munge_path_list VARIABLE PATH
++# -----------------------------------
++# VARIABLE is name of variable containing _space_ separated list of
++# directories to be munged by the contents of PATH, which is string
++# having a format:
++# "DIR[:DIR]:"
++#       string "DIR[ DIR]" will be prepended to VARIABLE
++# ":DIR[:DIR]"
++#       string "DIR[ DIR]" will be appended to VARIABLE
++# "DIRP[:DIRP]::[DIRA:]DIRA"
++#       string "DIRP[ DIRP]" will be prepended to VARIABLE and string
++#       "DIRA[ DIRA]" will be appended to VARIABLE
++# "DIR[:DIR]"
++#       VARIABLE will be replaced by "DIR[ DIR]"
++func_munge_path_list ()
++{
++    case x$2 in
++    x)
++        ;;
++    *:)
++        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\"
++        ;;
++    x:*)
++        eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\"
++        ;;
++    *::*)
++        eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\"
++        eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\"
++        ;;
++    *)
++        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\"
++        ;;
++    esac
++}
++
++
++# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
++func_cc_basename ()
++{
++    for cc_temp in $*""; do
++      case $cc_temp in
++        compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
++        distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
++        \-*) ;;
++        *) break;;
++      esac
++    done
++    func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
++}
++
++
++# ### END FUNCTIONS SHARED WITH CONFIGURE
++
++_LT_EOF
++
++  case $host_os in
++  aix3*)
++    cat <<\_LT_EOF >> "$cfgfile"
++# AIX sometimes has problems with the GCC collect2 program.  For some
++# reason, if we set the COLLECT_NAMES environment variable, the problems
++# vanish in a puff of smoke.
++if test set != "${COLLECT_NAMES+set}"; then
++  COLLECT_NAMES=
++  export COLLECT_NAMES
++fi
++_LT_EOF
++    ;;
++  esac
++
++
++
++ltmain=$ac_aux_dir/ltmain.sh
++
++
++  # We use sed instead of cat because bash on DJGPP gets confused if
++  # if finds mixed CR/LF and LF-only lines.  Since sed operates in
++  # text mode, it properly converts lines to CR/LF.  This bash problem
++  # is reportedly fixed, but why not run on old versions too?
++  $SED '$q' "$ltmain" >> "$cfgfile" \
++     || (rm -f "$cfgfile"; exit 1)
++
++   mv -f "$cfgfile" "$ofile" ||
++    (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
++  chmod +x "$ofile"
++
++
++    cat <<_LT_EOF >> "$ofile"
++
++# ### BEGIN LIBTOOL TAG CONFIG: CXX
++
++# The linker used to build libraries.
++LD=$lt_LD_CXX
++
++# How to create reloadable object files.
++reload_flag=$lt_reload_flag_CXX
++reload_cmds=$lt_reload_cmds_CXX
++
++# Commands used to build an old-style archive.
++old_archive_cmds=$lt_old_archive_cmds_CXX
++
++# A language specific compiler.
++CC=$lt_compiler_CXX
++
++# Is the compiler the GNU compiler?
++with_gcc=$GCC_CXX
++
++# Compiler flag to turn off builtin functions.
++no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX
++
++# Additional compiler flags for building library objects.
++pic_flag=$lt_lt_prog_compiler_pic_CXX
++
++# How to pass a linker flag through the compiler.
++wl=$lt_lt_prog_compiler_wl_CXX
++
++# Compiler flag to prevent dynamic linking.
++link_static_flag=$lt_lt_prog_compiler_static_CXX
++
++# Does compiler simultaneously support -c and -o options?
++compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX
++
++# Whether or not to add -lc for building shared libraries.
++build_libtool_need_lc=$archive_cmds_need_lc_CXX
++
++# Whether or not to disallow shared libs when runtime libs are static.
++allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX
++
++# Compiler flag to allow reflexive dlopens.
++export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX
++
++# Compiler flag to generate shared objects directly from archives.
++whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX
++
++# Whether the compiler copes with passing no objects directly.
++compiler_needs_object=$lt_compiler_needs_object_CXX
++
++# Create an old-style archive from a shared archive.
++old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX
++
++# Create a temporary old-style archive to link instead of a shared archive.
++old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX
++
++# Commands used to build a shared archive.
++archive_cmds=$lt_archive_cmds_CXX
++archive_expsym_cmds=$lt_archive_expsym_cmds_CXX
++
++# Commands used to build a loadable module if different from building
++# a shared archive.
++module_cmds=$lt_module_cmds_CXX
++module_expsym_cmds=$lt_module_expsym_cmds_CXX
++
++# Whether we are building with GNU ld or not.
++with_gnu_ld=$lt_with_gnu_ld_CXX
++
++# Flag that allows shared libraries with undefined symbols to be built.
++allow_undefined_flag=$lt_allow_undefined_flag_CXX
++
++# Flag that enforces no undefined symbols.
++no_undefined_flag=$lt_no_undefined_flag_CXX
++
++# Flag to hardcode \$libdir into a binary during linking.
++# This must work even if \$libdir does not exist
++hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX
++
++# Whether we need a single "-rpath" flag with a separated argument.
++hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX
++
++# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
++# DIR into the resulting binary.
++hardcode_direct=$hardcode_direct_CXX
++
++# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
++# DIR into the resulting binary and the resulting library dependency is
++# "absolute",i.e impossible to change by setting \$shlibpath_var if the
++# library is relocated.
++hardcode_direct_absolute=$hardcode_direct_absolute_CXX
++
++# Set to "yes" if using the -LDIR flag during linking hardcodes DIR
++# into the resulting binary.
++hardcode_minus_L=$hardcode_minus_L_CXX
++
++# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
++# into the resulting binary.
++hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX
++
++# Set to "yes" if building a shared library automatically hardcodes DIR
++# into the library and all subsequent libraries and executables linked
++# against it.
++hardcode_automatic=$hardcode_automatic_CXX
++
++# Set to yes if linker adds runtime paths of dependent libraries
++# to runtime path list.
++inherit_rpath=$inherit_rpath_CXX
++
++# Whether libtool must link a program against all its dependency libraries.
++link_all_deplibs=$link_all_deplibs_CXX
++
++# Set to "yes" if exported symbols are required.
++always_export_symbols=$always_export_symbols_CXX
++
++# The commands to list exported symbols.
++export_symbols_cmds=$lt_export_symbols_cmds_CXX
++
++# Symbols that should not be listed in the preloaded symbols.
++exclude_expsyms=$lt_exclude_expsyms_CXX
++
++# Symbols that must always be exported.
++include_expsyms=$lt_include_expsyms_CXX
++
++# Commands necessary for linking programs (against libraries) with templates.
++prelink_cmds=$lt_prelink_cmds_CXX
++
++# Commands necessary for finishing linking programs.
++postlink_cmds=$lt_postlink_cmds_CXX
++
++# Specify filename containing input files.
++file_list_spec=$lt_file_list_spec_CXX
++
++# How to hardcode a shared library path into an executable.
++hardcode_action=$hardcode_action_CXX
++
++# The directories searched by this compiler when creating a shared library.
++compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX
++
++# Dependencies to place before and after the objects being linked to
++# create a shared library.
++predep_objects=$lt_predep_objects_CXX
++postdep_objects=$lt_postdep_objects_CXX
++predeps=$lt_predeps_CXX
++postdeps=$lt_postdeps_CXX
++
++# The library search path used internally by the compiler when linking
++# a shared library.
++compiler_lib_search_path=$lt_compiler_lib_search_path_CXX
++
++# ### END LIBTOOL TAG CONFIG: CXX
++_LT_EOF
++
++ ;;
++
++  esac
++done # for ac_tag
++
++
++as_fn_exit 0
++_ACEOF
++ac_clean_files=$ac_clean_files_save
++
++test $ac_write_fail = 0 ||
++  as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5
++
++
++# configure is writing to config.log, and then calls config.status.
++# config.status does its own redirection, appending to config.log.
++# Unfortunately, on DOS this fails, as config.log is still kept open
++# by configure, so config.status won't be able to write to it; its
++# output is simply discarded.  So we exec the FD to /dev/null,
++# effectively closing config.log, so it can be properly (re)opened and
++# appended to by config.status.  When coming back to configure, we
++# need to make the FD available again.
++if test "$no_create" != yes; then
++  ac_cs_success=:
++  ac_config_status_args=
++  test "$silent" = yes &&
++    ac_config_status_args="$ac_config_status_args --quiet"
++  exec 5>/dev/null
++  $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false
++  exec 5>>config.log
++  # Use ||, not &&, to avoid exiting from the if with $? = 1, which
++  # would make configure fail if this is the last instruction.
++  $ac_cs_success || as_fn_exit 1
++fi
++if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
++  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
++printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
++fi
++
++
++
+diff --git a/bolt-plugin/configure.ac b/bolt-plugin/configure.ac
+new file mode 100644
+index 000000000..111d0ac48
+--- /dev/null
++++ b/bolt-plugin/configure.ac
+@@ -0,0 +1,60 @@
++AC_INIT([bolt plugin for ld],[0.1],[],[bolt-plugin])
++AC_CANONICAL_TARGET
++GCC_TOPLEV_SUBDIRS
++AM_INIT_AUTOMAKE([foreign no-dist])
++AM_MAINTAINER_MODE
++AC_ARG_WITH(libiberty,
++  [AS_HELP_STRING([--with-libiberty=PATH],
++    [specify the directory where to find libiberty [../libiberty]])],
++  [], with_libiberty=../libiberty)
++AC_SUBST(with_libiberty)
++AC_USE_SYSTEM_EXTENSIONS
++AC_PROG_CC
++AC_PROG_CXX
++AC_SYS_LARGEFILE
++ACX_PROG_CC_WARNING_OPTS([-Wall], [ac_bolt_plugin_warn_cflags])
++
++# Check whether -static-libgcc is supported.
++saved_LDFLAGS="$LDFLAGS"
++LDFLAGS="$LDFLAGS -static-libgcc"
++AC_MSG_CHECKING([for -static-libgcc])
++AC_LINK_IFELSE([AC_LANG_SOURCE([
++  int main() {}])], [have_static_libgcc=yes], [have_static_libgcc=no])
++AC_MSG_RESULT($have_static_libgcc); 
++LDFLAGS="$saved_LDFLAGS"
++# Need -Wc to get it through libtool.
++if test "x$have_static_libgcc" = xyes; then
++   ac_bolt_plugin_ldflags="-Wc,-static-libgcc"
++fi
++AC_SUBST(ac_bolt_plugin_ldflags)
++
++if test x"$host_subdir" = x.; then
++   gcc_build_dir=../gcc
++else
++   gcc_build_dir=../../$host_subdir/gcc
++fi
++AC_SUBST(gcc_build_dir)
++
++# Used for constructing correct paths for offload compilers.
++accel_dir_suffix=
++real_target_noncanonical=${target_noncanonical}
++if test x"$enable_as_accelerator_for" != x; then
++  accel_dir_suffix=/accel/${target_noncanonical}
++  real_target_noncanonical=${enable_as_accelerator_for}
++fi
++AC_SUBST(accel_dir_suffix)
++AC_SUBST(real_target_noncanonical)
++
++# Determine what GCC version number to use in filesystem paths.
++GCC_BASE_VER
++
++LT_INIT
++ACX_LT_HOST_FLAGS
++AC_SUBST(target_noncanonical)
++AC_TYPE_INT64_T
++AC_TYPE_UINT64_T
++AC_HEADER_SYS_WAIT
++AC_CONFIG_FILES(Makefile)
++AC_CONFIG_HEADERS(config.h)
++AC_OUTPUT
++
+diff --git a/gcc/common.opt b/gcc/common.opt
+index e69947fc2..44638fe83 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -2554,6 +2554,22 @@ fauto-bolt=
+ Common Joined RejectNegative
+ Specify the feedback data directory required by BOLT-plugin.  The default is the current directory.
+ 
++fbolt-use
++Common Var(flag_bolt_use)
++Do BOLT optimization after linkage with BOLT profile read from this option.  The default is data.fdata.
++
++fbolt-use=
++Common Joined RejectNegative Var
++Do BOLT optimization after linkage with BOLT profile read from this option.
++
++fbolt-target=
++Common Joined RejectNegative Var
++Specify the BOLT optimization target binary.
++
++fbolt-option=
++Common Joined RejectNegative Var
++Specify BOLT optimization options separated by commas.
++
+ frerun-cse-after-loop
+ Common Var(flag_rerun_cse_after_loop) Optimization
+ Add a common subexpression elimination pass after loop optimizations.
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 6d57e7d69..2bba88140 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -1283,6 +1283,10 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
+   if (opts->x_flag_auto_bolt && opts->x_flag_lto)
+     sorry ("%<-fauto-bolt%> is not supported with LTO");
+ 
++  /* Currently -fbolt-use is not supported for LTO.  */
++  if (opts->x_flag_bolt_use && opts->x_flag_lto)
++    sorry ("-fbolt-use is not supported with LTO");
++
+   /* Control IPA optimizations based on different -flive-patching level.  */
+   if (opts->x_flag_live_patching)
+     control_options_for_live_patching (opts, opts_set,
+@@ -3284,9 +3288,28 @@ common_handle_option (struct gcc_options *opts,
+       break;
+     
+     case OPT_fauto_bolt_:
++      opts->x_flag_auto_bolt = true;
++      /* FALLTHRU */
+     case OPT_fauto_bolt:
+-      /* Deferred.  */  
+-      break;  
++      if (opts->x_flag_bolt_use)
++        error_at (loc,
++		  "-fauto-bolt conflicts with -fbolt-use.");
++      break;
++
++    case OPT_fbolt_use_:
++    case OPT_fbolt_use:
++      if (opts->x_flag_auto_bolt)
++        error_at (loc,
++		  "-fauto-bolt conflicts with -fbolt-use.");
++    break;
++
++    case OPT_fbolt_target_:
++      /* Deferred.  */
++      break;
++
++    case OPT_fbolt_option_:
++      /* Defferred */  
++      break;
+ 
+     case OPT_ftabstop_:
+       /* It is documented that we silently ignore silly values.  */
+-- 
+2.33.0
+
diff --git a/0032-LoongArch-Add-code-generation-support-for-call36-fun.patch b/0032-LoongArch-Add-code-generation-support-for-call36-fun.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d4f59d8f1ae5e1436cd5d607558bf6f4f6d179b4
--- /dev/null
+++ b/0032-LoongArch-Add-code-generation-support-for-call36-fun.patch
@@ -0,0 +1,561 @@
+From 5ab014701ddd9968855026f0e2ae1af2b165bcd7 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 16 Nov 2023 15:06:11 +0800
+Subject: [PATCH 032/188] LoongArch: Add code generation support for call36
+ function calls.
+
+When compiling with '-mcmodel=medium', the function call is made through
+'pcaddu18i+jirl' if binutils supports call36, otherwise the
+native implementation 'pcalau12i+jirl' is used.
+
+gcc/ChangeLog:
+
+	* config.in: Regenerate.
+	* config/loongarch/loongarch-opts.h (HAVE_AS_SUPPORT_CALL36): Define macro.
+	* config/loongarch/loongarch.cc (loongarch_legitimize_call_address):
+	If binutils supports call36, the function call is not split over expand.
+	* config/loongarch/loongarch.md: Add call36 generation code.
+	* config/loongarch/predicates.md: Likewise.
+	* configure: Regenerate.
+	* configure.ac: Check whether binutils supports call36.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/func-call-medium-5.c: If the assembler supports call36,
+	the test is abandoned.
+	* gcc.target/loongarch/func-call-medium-6.c: Likewise.
+	* gcc.target/loongarch/func-call-medium-7.c: Likewise.
+	* gcc.target/loongarch/func-call-medium-8.c: Likewise.
+	* lib/target-supports.exp: Added a function to see if the assembler supports
+	the call36 relocation.
+	* gcc.target/loongarch/func-call-medium-call36-1.c: New test.
+	* gcc.target/loongarch/func-call-medium-call36.c: New test.
+
+Co-authored-by: Xi Ruoyao 
+---
+ gcc/config.in                                 |   6 +
+ gcc/config/loongarch/loongarch-opts.h         |   4 +
+ gcc/config/loongarch/loongarch.cc             |  12 +-
+ gcc/config/loongarch/loongarch.md             | 171 +++++++++++++++---
+ gcc/config/loongarch/predicates.md            |   7 +-
+ gcc/configure                                 |  32 ++++
+ gcc/configure.ac                              |   6 +
+ .../gcc.target/loongarch/func-call-medium-5.c |   1 +
+ .../gcc.target/loongarch/func-call-medium-6.c |   1 +
+ .../gcc.target/loongarch/func-call-medium-7.c |   1 +
+ .../gcc.target/loongarch/func-call-medium-8.c |   1 +
+ .../loongarch/func-call-medium-call36-1.c     |  21 +++
+ .../loongarch/func-call-medium-call36.c       |  32 ++++
+ gcc/testsuite/lib/target-supports.exp         |   9 +
+ 14 files changed, 268 insertions(+), 36 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-call36-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-call36.c
+
+diff --git a/gcc/config.in b/gcc/config.in
+index 04968b53c..033cfb98b 100644
+--- a/gcc/config.in
++++ b/gcc/config.in
+@@ -759,6 +759,12 @@
+ #endif
+ 
+ 
++/* Define if your assembler supports call36 relocation. */
++#ifndef USED_FOR_TARGET
++#undef HAVE_AS_SUPPORT_CALL36
++#endif
++
++
+ /* Define if your assembler and linker support thread-local storage. */
+ #ifndef USED_FOR_TARGET
+ #undef HAVE_AS_TLS
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index dfbe9dd5c..22ce1a122 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -99,6 +99,10 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+ #define HAVE_AS_EXPLICIT_RELOCS 0
+ #endif
+ 
++#ifndef HAVE_AS_SUPPORT_CALL36
++#define HAVE_AS_SUPPORT_CALL36 0
++#endif
++
+ #ifndef HAVE_AS_MRELAX_OPTION
+ #define HAVE_AS_MRELAX_OPTION 0
+ #endif
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index b6f0d61ef..43f0e82ba 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -3002,12 +3002,16 @@ loongarch_legitimize_call_address (rtx addr)
+ 
+   enum loongarch_symbol_type symbol_type = loongarch_classify_symbol (addr);
+ 
+-  /* Split function call insn 'bl sym' or 'bl %plt(sym)' to :
+-     pcalau12i $rd, %pc_hi20(sym)
+-     jr $rd, %pc_lo12(sym).  */
++  /* If add the compilation option '-cmodel=medium', and the assembler does
++     not support call36.  The following sequence of instructions will be
++     used for the function call:
++	pcalau12i $rd, %pc_hi20(sym)
++	jr $rd, %pc_lo12(sym)
++  */
+ 
+   if (TARGET_CMODEL_MEDIUM
+-      && TARGET_EXPLICIT_RELOCS
++      && !HAVE_AS_SUPPORT_CALL36
++      && (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE)
+       && (SYMBOL_REF_P (addr) || LABEL_REF_P (addr))
+       && (symbol_type == SYMBOL_PCREL
+ 	  || (symbol_type == SYMBOL_GOT_DISP && flag_plt)))
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index ed86c95bd..52e40a208 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -3274,7 +3274,13 @@
+ 					    XEXP (target, 1),
+ 					    operands[1]));
+   else
+-    emit_call_insn (gen_sibcall_internal (target, operands[1]));
++    {
++      rtx call = emit_call_insn (gen_sibcall_internal (target, operands[1]));
++
++      if (TARGET_CMODEL_MEDIUM && !REG_P (target))
++	clobber_reg (&CALL_INSN_FUNCTION_USAGE (call),
++		     gen_rtx_REG (Pmode, T0_REGNUM));
++    }
+   DONE;
+ })
+ 
+@@ -3282,10 +3288,25 @@
+   [(call (mem:SI (match_operand 0 "call_insn_operand" "j,c,b"))
+ 	 (match_operand 1 "" ""))]
+   "SIBLING_CALL_P (insn)"
+-  "@
+-   jr\t%0
+-   b\t%0
+-   b\t%%plt(%0)"
++{
++  switch (which_alternative)
++    {
++    case 0:
++      return "jr\t%0";
++    case 1:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r12,%%call36(%0)\n\tjirl\t$r0,$r12,0";
++      else
++	return "b\t%0";
++    case 2:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r12,%%call36(%0)\n\tjirl\t$r0,$r12,0";
++      else
++	return "b\t%%plt(%0)";
++    default:
++      gcc_unreachable ();
++    }
++}
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ (define_insn "@sibcall_internal_1"
+@@ -3318,9 +3339,17 @@
+ 							   operands[2],
+ 							   arg2));
+       else
+-	emit_call_insn (gen_sibcall_value_multiple_internal (arg1, target,
+-							   operands[2],
+-							   arg2));
++	{
++	  rtx call
++	    = emit_call_insn (gen_sibcall_value_multiple_internal (arg1,
++								   target,
++								   operands[2],
++								   arg2));
++
++	  if (TARGET_CMODEL_MEDIUM && !REG_P (target))
++	    clobber_reg (&CALL_INSN_FUNCTION_USAGE (call),
++			gen_rtx_REG (Pmode, T0_REGNUM));
++	}
+     }
+    else
+     {
+@@ -3334,8 +3363,15 @@
+ 						  XEXP (target, 1),
+ 						  operands[2]));
+       else
+-	emit_call_insn (gen_sibcall_value_internal (operands[0], target,
+-						  operands[2]));
++	{
++	  rtx call = emit_call_insn (gen_sibcall_value_internal (operands[0],
++								 target,
++								 operands[2]));
++
++	  if (TARGET_CMODEL_MEDIUM && !REG_P (target))
++	    clobber_reg (&CALL_INSN_FUNCTION_USAGE (call),
++			gen_rtx_REG (Pmode, T0_REGNUM));
++	}
+     }
+   DONE;
+ })
+@@ -3345,10 +3381,25 @@
+ 	(call (mem:SI (match_operand 1 "call_insn_operand" "j,c,b"))
+ 	      (match_operand 2 "" "")))]
+   "SIBLING_CALL_P (insn)"
+-  "@
+-   jr\t%1
+-   b\t%1
+-   b\t%%plt(%1)"
++{
++  switch (which_alternative)
++    {
++    case 0:
++      return "jr\t%1";
++    case 1:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r12,%%call36(%1)\n\tjirl\t$r0,$r12,0";
++      else
++	return "b\t%1";
++    case 2:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r12,%%call36(%1)\n\tjirl\t$r0,$r12,0";
++      else
++	return "b\t%%plt(%1)";
++    default:
++      gcc_unreachable ();
++    }
++}
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ (define_insn "@sibcall_value_internal_1"
+@@ -3368,10 +3419,25 @@
+ 	(call (mem:SI (match_dup 1))
+ 	      (match_dup 2)))]
+   "SIBLING_CALL_P (insn)"
+-  "@
+-   jr\t%1
+-   b\t%1
+-   b\t%%plt(%1)"
++{
++  switch (which_alternative)
++    {
++    case 0:
++      return "jr\t%1";
++    case 1:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r12,%%call36(%1)\n\tjirl\t$r0,$r12,0";
++      else
++	return "b\t%1";
++    case 2:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r12,%%call36(%1)\n\tjirl\t$r0,$r12,0";
++      else
++	return "b\t%%plt(%1)";
++    default:
++      gcc_unreachable ();
++    }
++}
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ (define_insn "@sibcall_value_multiple_internal_1"
+@@ -3411,10 +3477,25 @@
+ 	 (match_operand 1 "" ""))
+    (clobber (reg:SI RETURN_ADDR_REGNUM))]
+   ""
+-  "@
+-   jirl\t$r1,%0,0
+-   bl\t%0
+-   bl\t%%plt(%0)"
++{
++  switch (which_alternative)
++    {
++    case 0:
++      return "jirl\t$r1,%0,0";
++    case 1:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r1,%%call36(%0)\n\tjirl\t$r1,$r1,0";
++      else
++	return "bl\t%0";
++    case 2:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r1,%%call36(%0)\n\tjirl\t$r1,$r1,0";
++      else
++	return "bl\t%%plt(%0)";
++    default:
++      gcc_unreachable ();
++    }
++}
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ (define_insn "@call_internal_1"
+@@ -3473,10 +3554,25 @@
+ 	      (match_operand 2 "" "")))
+    (clobber (reg:SI RETURN_ADDR_REGNUM))]
+   ""
+-  "@
+-   jirl\t$r1,%1,0
+-   bl\t%1
+-   bl\t%%plt(%1)"
++{
++  switch (which_alternative)
++    {
++    case 0:
++      return "jirl\t$r1,%1,0";
++    case 1:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r1,%%call36(%1)\n\tjirl\t$r1,$r1,0";
++      else
++	return "bl\t%1";
++    case 2:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r1,%%call36(%1)\n\tjirl\t$r1,$r1,0";
++      else
++	return "bl\t%%plt(%1)";
++    default:
++      gcc_unreachable ();
++    }
++}
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ (define_insn "@call_value_internal_1"
+@@ -3498,10 +3594,25 @@
+ 	      (match_dup 2)))
+    (clobber (reg:SI RETURN_ADDR_REGNUM))]
+   ""
+-  "@
+-   jirl\t$r1,%1,0
+-   bl\t%1
+-   bl\t%%plt(%1)"
++{
++  switch (which_alternative)
++    {
++    case 0:
++      return "jirl\t$r1,%1,0";
++    case 1:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r1,%%call36(%1)\n\tjirl\t$r1,$r1,0";
++      else
++	return "bl\t%1";
++    case 2:
++      if (TARGET_CMODEL_MEDIUM)
++	return "pcaddu18i\t$r1,%%call36(%1)\n\tjirl\t$r1,$r1,0";
++      else
++	return "bl\t%%plt(%1)";
++    default:
++      gcc_unreachable ();
++    }
++}
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ (define_insn "@call_value_multiple_internal_1"
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 1d669f560..2aae87db4 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -443,7 +443,9 @@
+     {
+     case SYMBOL_PCREL:
+       if (TARGET_CMODEL_EXTREME
+-	  || (TARGET_CMODEL_MEDIUM && !TARGET_EXPLICIT_RELOCS))
++	  || (TARGET_CMODEL_MEDIUM
++	      && HAVE_AS_SUPPORT_CALL36
++	      && (la_opt_explicit_relocs == EXPLICIT_RELOCS_NONE)))
+ 	return false;
+       else
+ 	return 1;
+@@ -452,7 +454,8 @@
+       if (TARGET_CMODEL_EXTREME
+ 	  || !flag_plt
+ 	  || (flag_plt && TARGET_CMODEL_MEDIUM
+-	      && !TARGET_EXPLICIT_RELOCS))
++	      && HAVE_AS_SUPPORT_CALL36
++	      && (la_opt_explicit_relocs == EXPLICIT_RELOCS_NONE)))
+ 	return false;
+       else
+ 	return 1;
+diff --git a/gcc/configure b/gcc/configure
+index 09bacfec3..5842e7a18 100755
+--- a/gcc/configure
++++ b/gcc/configure
+@@ -28836,6 +28836,38 @@ if test $gcc_cv_as_loongarch_explicit_relocs = yes; then
+ 
+ $as_echo "#define HAVE_AS_EXPLICIT_RELOCS 1" >>confdefs.h
+ 
++fi
++
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for call36 relocation support" >&5
++$as_echo_n "checking assembler for call36 relocation support... " >&6; }
++if ${gcc_cv_as_loongarch_call36+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  gcc_cv_as_loongarch_call36=no
++  if test x$gcc_cv_as != x; then
++    $as_echo 'pcaddu18i $r1, %call36(a)
++       jirl $r1, $r1, 0' > conftest.s
++    if { ac_try='$gcc_cv_as $gcc_cv_as_flags  -o conftest.o conftest.s >&5'
++  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
++  (eval $ac_try) 2>&5
++  ac_status=$?
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; }
++    then
++	gcc_cv_as_loongarch_call36=yes
++    else
++      echo "configure: failed program was" >&5
++      cat conftest.s >&5
++    fi
++    rm -f conftest.o conftest.s
++  fi
++fi
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_loongarch_call36" >&5
++$as_echo "$gcc_cv_as_loongarch_call36" >&6; }
++if test $gcc_cv_as_loongarch_call36 = yes; then
++
++$as_echo "#define HAVE_AS_SUPPORT_CALL36 1" >>confdefs.h
++
+ fi
+ 
+     { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for eh_frame pcrel encoding support" >&5
+diff --git a/gcc/configure.ac b/gcc/configure.ac
+index a0999152e..9c3fd3ad6 100644
+--- a/gcc/configure.ac
++++ b/gcc/configure.ac
+@@ -5329,6 +5329,12 @@ x:
+       [a:pcalau12i $t0,%pc_hi20(a)],,
+       [AC_DEFINE(HAVE_AS_EXPLICIT_RELOCS, 1,
+ 	  [Define if your assembler supports explicit relocation.])])
++    gcc_GAS_CHECK_FEATURE([call36 relocation support],
++      gcc_cv_as_loongarch_call36,,
++      [pcaddu18i $r1, %call36(a)
++       jirl $r1, $r1, 0],,
++      [AC_DEFINE(HAVE_AS_SUPPORT_CALL36, 1,
++	  [Define if your assembler supports call36 relocation.])])
+     gcc_GAS_CHECK_FEATURE([eh_frame pcrel encoding support],
+       gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support,,
+       [.cfi_startproc
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c
+index 8a47b5afc..cae880bd8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c
+@@ -1,4 +1,5 @@
+ /* { dg-do compile } */
++/* { dg-skip-if "dg-require-effective-target loongarch_call36_support" { *-*-* } } */
+ /* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mexplicit-relocs -mcmodel=medium" } */
+ /* { dg-final { scan-assembler "test:.*pcalau12i.*%pc_hi20\\(g\\)\n\tjirl.*pc_lo12\\(g\\)" } } */
+ /* { dg-final { scan-assembler "test1:.*pcalau12i.*%pc_hi20\\(f\\)\n\tjirl.*%pc_lo12\\(f\\)" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c
+index 1e75e60e0..33819542d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c
+@@ -1,4 +1,5 @@
+ /* { dg-do compile } */
++/* { dg-skip-if "dg-require-effective-target loongarch_call36_support" { *-*-* } } */
+ /* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mexplicit-relocs -mcmodel=medium" } */
+ /* { dg-final { scan-assembler "test:.*pcalau12i.*%pc_hi20\\(g\\)\n\tjirl.*pc_lo12\\(g\\)" } } */
+ /* { dg-final { scan-assembler "test1:.*pcalau12i.*%pc_hi20\\(f\\)\n\tjirl.*%pc_lo12\\(f\\)" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c
+index 9e89085ca..969b59d04 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c
+@@ -1,4 +1,5 @@
+ /* { dg-do compile } */
++/* { dg-skip-if "dg-require-effective-target loongarch_call36_support" { *-*-* } } */
+ /* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs -mcmodel=medium" } */
+ /* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*pcalau12i\t.*%got_pc_hi20\\(f\\)\n\tld\.d\t.*%got_pc_lo12\\(f\\)\n\tjirl" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c
+index fde9c6e0e..786ff395f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c
+@@ -1,4 +1,5 @@
+ /* { dg-do compile } */
++/* { dg-skip-if "dg-require-effective-target loongarch_call36_support" { *-*-* } } */
+ /* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs -mcmodel=medium" } */
+ /* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*pcalau12i\t.*%pc_hi20\\(f\\)\n\tjirl.*%pc_lo12\\(f\\)" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-call36-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-call36-1.c
+new file mode 100644
+index 000000000..872ff32f8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-call36-1.c
+@@ -0,0 +1,21 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target loongarch_call36_support } */
++/* { dg-options "-mcmodel=medium -mexplicit-relocs -fdump-rtl-final -O2" } */
++/* { dg-final { scan-assembler "test:.*pcaddu18i\t\\\$r1,%call36\\(func\\)" } } */
++/* { dg-final { scan-assembler "test_value:.*pcaddu18i\t\\\$r1,%call36\\(func_value\\)" } } */
++
++extern void func (void);
++int
++test (void)
++{
++  func ();
++}
++
++
++extern int func_value (void);
++float
++test_value (void)
++{
++  func_value ();
++}
++
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-call36.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-call36.c
+new file mode 100644
+index 000000000..98ccd260d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-call36.c
+@@ -0,0 +1,32 @@
++/* { dg-do compile } */
++/* { dg-require-effective-target loongarch_call36_support } */
++/* { dg-options "-mcmodel=medium -mexplicit-relocs -fdump-rtl-final -O2" } */
++/* { dg-final { scan-rtl-dump-times "\\(clobber \\(reg:DI 12 \\\$r12\\)\\)" 3 "final" } } */
++/* { dg-final { scan-assembler "test:.*pcaddu18i\t\\\$r12,%call36\\(func\\)" } } */
++/* { dg-final { scan-assembler "test_value:.*pcaddu18i\t\\\$r12,%call36\\(func_value\\)" } } */
++/* { dg-final { scan-assembler "test_multi:.*pcaddu18i\t\\\$r12,%call36\\(func_multi\\)" } } */
++
++extern void func (void);
++void
++test (void)
++{
++  func();
++}
++
++
++extern int func_value (void);
++int
++test_value (void)
++{
++  func_value ();
++}
++
++struct t {float a; float b;};
++
++extern struct t func_multi (void);
++struct t
++test_multi (void)
++{
++  func_multi ();
++}
++
+diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
+index bbe145c1c..b8bff1a31 100644
+--- a/gcc/testsuite/lib/target-supports.exp
++++ b/gcc/testsuite/lib/target-supports.exp
+@@ -10573,6 +10573,15 @@ proc check_effective_target_loongarch_asx_hw { } {
+     } "-mlasx"]
+ }
+ 
++# Check whether LoongArch binutils supports call36 relocation.
++proc check_effective_target_loongarch_call36_support { } {
++  return [check_no_compiler_messages loongarch_call36_support object {
++/* Assembly code */
++   pcaddu18i $r1,%call36(a)
++   jirl $r1,$r1,0
++  } ""]
++}
++
+ # Return 1 if the target does *not* require strict alignment.
+ 
+ proc check_effective_target_non_strict_align {} {
+-- 
+2.43.0
+
diff --git a/0033-AutoBOLT-Enable-BOLT-linker-plugin-on-aarch64-3-3.patch b/0033-AutoBOLT-Enable-BOLT-linker-plugin-on-aarch64-3-3.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e850a4dfbd2ffdccf0ae0b1469b5287d6ae6719f
--- /dev/null
+++ b/0033-AutoBOLT-Enable-BOLT-linker-plugin-on-aarch64-3-3.patch
@@ -0,0 +1,345 @@
+From 94242286383a80e6ab83d824a4d7ea23ea311f75 Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao_admin 
+Date: Mon, 22 Jan 2024 15:38:24 +0800
+Subject: [PATCH] [AutoBOLT] Enable BOLT linker plugin on aarch64 3/3
+
+---
+ Makefile.def     | 10 ++++++++++
+ configure        | 27 ++++++++++++++++++++++++++-
+ configure.ac     | 22 +++++++++++++++++++++-
+ gcc/config.host  |  1 +
+ gcc/config.in    | 13 +++++++++++++
+ gcc/configure    | 10 ++++++++--
+ gcc/configure.ac |  4 ++++
+ gcc/gcc.cc       | 23 +++++++++++++++++++++++
+ 8 files changed, 106 insertions(+), 4 deletions(-)
+
+diff --git a/Makefile.def b/Makefile.def
+index 72d585496..0ba868890 100644
+--- a/Makefile.def
++++ b/Makefile.def
+@@ -145,6 +145,9 @@ host_modules= { module= gnattools; };
+ host_modules= { module= lto-plugin; bootstrap=true;
+ 		extra_configure_flags='--enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@';
+ 		extra_make_flags='@extra_linker_plugin_flags@'; };
++host_modules= { module= bolt-plugin; bootstrap=true;
++		extra_configure_flags='--enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@';
++		extra_make_flags='@extra_linker_plugin_flags@'; };
+ host_modules= { module= libcc1; extra_configure_flags=--enable-shared; };
+ host_modules= { module= gotools; };
+ host_modules= { module= libctf; bootstrap=true; };
+@@ -349,6 +352,7 @@ dependencies = { module=configure-gcc; on=all-mpfr; };
+ dependencies = { module=configure-gcc; on=all-mpc; };
+ dependencies = { module=configure-gcc; on=all-isl; };
+ dependencies = { module=configure-gcc; on=all-lto-plugin; };
++dependencies = { module=configure-gcc; on=all-bolt-plugin; };
+ dependencies = { module=configure-gcc; on=all-binutils; };
+ dependencies = { module=configure-gcc; on=all-gas; };
+ dependencies = { module=configure-gcc; on=all-ld; };
+@@ -374,6 +378,7 @@ dependencies = { module=all-gcc; on=all-libdecnumber; hard=true; };
+ dependencies = { module=all-gcc; on=all-libiberty; };
+ dependencies = { module=all-gcc; on=all-fixincludes; };
+ dependencies = { module=all-gcc; on=all-lto-plugin; };
++dependencies = { module=all-gcc; on=all-bolt-plugin; };
+ dependencies = { module=all-gcc; on=all-libiconv; };
+ dependencies = { module=info-gcc; on=all-build-libiberty; };
+ dependencies = { module=dvi-gcc; on=all-build-libiberty; };
+@@ -381,8 +386,10 @@ dependencies = { module=pdf-gcc; on=all-build-libiberty; };
+ dependencies = { module=html-gcc; on=all-build-libiberty; };
+ dependencies = { module=install-gcc ; on=install-fixincludes; };
+ dependencies = { module=install-gcc ; on=install-lto-plugin; };
++dependencies = { module=install-gcc ; on=install-bolt-plugin; };
+ dependencies = { module=install-strip-gcc ; on=install-strip-fixincludes; };
+ dependencies = { module=install-strip-gcc ; on=install-strip-lto-plugin; };
++dependencies = { module=install-strip-gcc ; on=install-strip-bolt-plugin; };
+ 
+ dependencies = { module=configure-libcpp; on=configure-libiberty; hard=true; };
+ dependencies = { module=configure-libcpp; on=configure-intl; };
+@@ -401,6 +408,9 @@ dependencies = { module=all-gnattools; on=all-target-libstdc++-v3; };
+ dependencies = { module=all-lto-plugin; on=all-libiberty; };
+ dependencies = { module=all-lto-plugin; on=all-libiberty-linker-plugin; };
+ 
++dependencies = { module=all-bolt-plugin; on=all-libiberty; };
++dependencies = { module=all-bolt-plugin; on=all-libiberty-linker-plugin; };
++
+ dependencies = { module=configure-libcc1; on=configure-gcc; };
+ dependencies = { module=all-libcc1; on=all-gcc; };
+ 
+diff --git a/configure b/configure
+index 5dcaab14a..aff62c464 100755
+--- a/configure
++++ b/configure
+@@ -826,6 +826,7 @@ with_isl
+ with_isl_include
+ with_isl_lib
+ enable_isl_version_check
++enable_bolt
+ enable_lto
+ enable_linker_plugin_configure_flags
+ enable_linker_plugin_flags
+@@ -1550,6 +1551,7 @@ Optional Features:
+                           enable the PGO build
+   --disable-isl-version-check
+                           disable check for isl version
++  --enable-bolt           enable bolt optimization support
+   --enable-lto            enable link time optimization support
+   --enable-linker-plugin-configure-flags=FLAGS
+                           additional flags for configuring linker plugins
+@@ -8564,6 +8566,15 @@ fi
+ 
+ 
+ 
++# Check for BOLT support.
++# Check whether --enable-bolt was given.
++if test "${enable_bolt+set}" = set; then :
++  enableval=$enable_bolt; enable_bolt=$enableval
++else
++  enable_bolt=no; default_enable_bolt=no
++fi
++
++
+ # Check for LTO support.
+ # Check whether --enable-lto was given.
+ if test "${enable_lto+set}" = set; then :
+@@ -8593,6 +8604,16 @@ if test $target_elf = yes; then :
+   # ELF platforms build the lto-plugin always.
+   build_lto_plugin=yes
+ 
++  # ELF platforms can build the bolt-plugin.
++  # NOT BUILD BOLT BY DEFAULT.
++  case $target in
++    aarch64*-*-linux*)
++    if test $enable_bolt = yes; then :
++      build_bolt_plugin=yes
++    fi
++    ;;
++  esac
++
+ else
+   if test x"$default_enable_lto" = x"yes" ; then
+     case $target in
+@@ -8780,6 +8801,10 @@ if test -d ${srcdir}/gcc; then
+     fi
+   fi
+ 
++  if test "${build_bolt_plugin}" = "yes" ; then
++      configdirs="$configdirs bolt-plugin"
++  fi
++
+   # If we're building an offloading compiler, add the LTO front end.
+   if test x"$enable_as_accelerator_for" != x ; then
+     case ,${enable_languages}, in
+@@ -9202,7 +9227,7 @@ fi
+ extra_host_libiberty_configure_flags=
+ extra_host_zlib_configure_flags=
+ case " $configdirs " in
+-  *" lto-plugin "* | *" libcc1 "*)
++  *" lto-plugin "* | *" libcc1 "* | *" bolt-plugin "*)
+     # When these are to be built as shared libraries, the same applies to
+     # libiberty.
+     extra_host_libiberty_configure_flags=--enable-shared
+diff --git a/configure.ac b/configure.ac
+index 85977482a..f310d75ca 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -1863,6 +1863,12 @@ fi
+ AC_SUBST(isllibs)
+ AC_SUBST(islinc)
+ 
++# Check for BOLT support.
++AC_ARG_ENABLE(bolt,
++[AS_HELP_STRING([--enable-bolt], [enable bolt optimization support])],
++enable_bolt=$enableval,
++enable_bolt=no; default_enable_bolt=no)
++
+ # Check for LTO support.
+ AC_ARG_ENABLE(lto,
+ [AS_HELP_STRING([--enable-lto], [enable link time optimization support])],
+@@ -1871,6 +1877,16 @@ enable_lto=yes; default_enable_lto=yes)
+ 
+ ACX_ELF_TARGET_IFELSE([# ELF platforms build the lto-plugin always.
+   build_lto_plugin=yes
++
++  # ELF platforms can build the bolt-plugin.
++  # NOT BUILD BOLT BY DEFAULT.
++  case $target in
++    aarch64*-*-linux*)
++    if test $enable_bolt = yes; then :
++      build_bolt_plugin=yes
++    fi
++    ;;
++  esac
+ ],[if test x"$default_enable_lto" = x"yes" ; then
+     case $target in
+       *-apple-darwin[[912]]* | *-cygwin* | *-mingw* | *djgpp*) ;;
+@@ -2049,6 +2065,10 @@ if test -d ${srcdir}/gcc; then
+     fi
+   fi
+ 
++  if test "${build_bolt_plugin}" = "yes" ; then
++      configdirs="$configdirs bolt-plugin"
++  fi
++
+   # If we're building an offloading compiler, add the LTO front end.
+   if test x"$enable_as_accelerator_for" != x ; then
+     case ,${enable_languages}, in
+@@ -2457,7 +2477,7 @@ fi
+ extra_host_libiberty_configure_flags=
+ extra_host_zlib_configure_flags=
+ case " $configdirs " in
+-  *" lto-plugin "* | *" libcc1 "*)
++  *" lto-plugin "* | *" libcc1 "* | *" bolt-plugin "*)    
+     # When these are to be built as shared libraries, the same applies to
+     # libiberty.
+     extra_host_libiberty_configure_flags=--enable-shared
+diff --git a/gcc/config.host b/gcc/config.host
+index 4ca300f11..bf7dcb4cc 100644
+--- a/gcc/config.host
++++ b/gcc/config.host
+@@ -75,6 +75,7 @@ out_host_hook_obj=host-default.o
+ host_can_use_collect2=yes
+ use_long_long_for_widest_fast_int=no
+ host_lto_plugin_soname=liblto_plugin.so
++host_bolt_plugin_soname=libbolt_plugin.so
+ 
+ # Unsupported hosts list.  Generally, only include hosts known to fail here,
+ # since we allow hosts not listed to be supported generically.
+diff --git a/gcc/config.in b/gcc/config.in
+index 64c27c9cf..6bb25b25b 100644
+--- a/gcc/config.in
++++ b/gcc/config.in
+@@ -24,6 +24,13 @@
+ #endif
+ 
+ 
++/* Define to the name of the BOLT plugin DSO that must be passed to the
++   linker's -plugin=LIB option. */
++#ifndef USED_FOR_TARGET
++#undef BOLTPLUGINSONAME
++#endif
++
++
+ /* Define to the root for URLs about GCC changes. */
+ #ifndef USED_FOR_TARGET
+ #undef CHANGES_ROOT_URL
+@@ -2208,6 +2215,12 @@
+ #endif
+ 
+ 
++/* Define which stat syscall is able to handle 64bit indodes. */
++#ifndef USED_FOR_TARGET
++#undef HOST_STAT_FOR_64BIT_INODES
++#endif
++
++
+ /* Define as const if the declaration of iconv() needs const. */
+ #ifndef USED_FOR_TARGET
+ #undef ICONV_CONST
+diff --git a/gcc/configure b/gcc/configure
+index 98bbf0f85..30f386789 100755
+--- a/gcc/configure
++++ b/gcc/configure
+@@ -13578,6 +13578,12 @@ case $use_collect2 in
+ esac
+ 
+ 
++cat >>confdefs.h <<_ACEOF
++#define BOLTPLUGINSONAME "${host_bolt_plugin_soname}"
++_ACEOF
++
++
++
+ cat >>confdefs.h <<_ACEOF
+ #define LTOPLUGINSONAME "${host_lto_plugin_soname}"
+ _ACEOF
+@@ -19668,7 +19674,7 @@ else
+   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+   lt_status=$lt_dlunknown
+   cat > conftest.$ac_ext <<_LT_EOF
+-#line 19671 "configure"
++#line 19677 "configure"
+ #include "confdefs.h"
+ 
+ #if HAVE_DLFCN_H
+@@ -19774,7 +19780,7 @@ else
+   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+   lt_status=$lt_dlunknown
+   cat > conftest.$ac_ext <<_LT_EOF
+-#line 19777 "configure"
++#line 19783 "configure"
+ #include "confdefs.h"
+ 
+ #if HAVE_DLFCN_H
+diff --git a/gcc/configure.ac b/gcc/configure.ac
+index c74f4b555..dd6cd60f8 100644
+--- a/gcc/configure.ac
++++ b/gcc/configure.ac
+@@ -2531,6 +2531,10 @@ case $use_collect2 in
+     ;;
+ esac
+ 
++AC_DEFINE_UNQUOTED(BOLTPLUGINSONAME,"${host_bolt_plugin_soname}",
++[Define to the name of the BOLT plugin DSO that must be
++  passed to the linker's -plugin=LIB option.])
++
+ AC_DEFINE_UNQUOTED(LTOPLUGINSONAME,"${host_lto_plugin_soname}",
+ [Define to the name of the LTO plugin DSO that must be
+   passed to the linker's -plugin=LIB option.])
+diff --git a/gcc/gcc.cc b/gcc/gcc.cc
+index fbcc9d033..b0d03430e 100644
+--- a/gcc/gcc.cc
++++ b/gcc/gcc.cc
+@@ -1156,6 +1156,8 @@ proper position among the other output files.  */
+ %{!fsyntax-only:%{!c:%{!M:%{!MM:%{!E:%{!S:\
+     %(linker) " \
+     LINK_PLUGIN_SPEC \
++   "%{fauto-bolt|fauto-bolt=*|fbolt-use|fbolt-use=*: \
++    -plugin %(linker_auto_bolt_plugin_file) }"\
+    "%{flto|flto=*:%
+Date: Fri, 17 Nov 2023 15:42:53 +0800
+Subject: [PATCH 033/188] LoongArch: Implement atomic operations using
+ LoongArch1.1 instructions.
+
+1. short and char type calls for atomic_add_fetch and __atomic_fetch_add are
+   implemented using amadd{_db}.{b/h}.
+2. Use amcas{_db}.{b/h/w/d} to implement __atomic_compare_exchange_n and __atomic_compare_exchange.
+3. The short and char types of the functions __atomic_exchange and __atomic_exchange_n are
+   implemented using amswap{_db}.{b/h}.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-def.h: Add comments.
+	* config/loongarch/loongarch-opts.h (ISA_BASE_IS_LA64V110): Define macro.
+	* config/loongarch/loongarch.cc (loongarch_memmodel_needs_rel_acq_fence):
+	Remove redundant code implementations.
+	* config/loongarch/sync.md (d): Added QI, HI support.
+	(atomic_add): New template.
+	(atomic_exchange_short): Likewise.
+	(atomic_cas_value_strong_amcas): Likewise..
+	(atomic_fetch_add_short): Likewise.
+---
+ gcc/config/loongarch/loongarch-def.h  |   2 +
+ gcc/config/loongarch/loongarch-opts.h |   2 +-
+ gcc/config/loongarch/loongarch.cc     |   6 +-
+ gcc/config/loongarch/sync.md          | 186 ++++++++++++++++++++------
+ 4 files changed, 147 insertions(+), 49 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index 4757de14b..078d8607d 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -54,7 +54,9 @@ extern "C" {
+ 
+ /* enum isa_base */
+ extern const char* loongarch_isa_base_strings[];
++/* LoongArch V1.00.  */
+ #define ISA_BASE_LA64V100     0
++/* LoongArch V1.10.  */
+ #define ISA_BASE_LA64V110     1
+ #define N_ISA_BASE_TYPES      2
+ 
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index 22ce1a122..9b3d023ac 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -86,10 +86,10 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+ 				   || la_target.isa.simd == ISA_EXT_SIMD_LASX)
+ #define ISA_HAS_LASX		  (la_target.isa.simd == ISA_EXT_SIMD_LASX)
+ 
+-
+ /* TARGET_ macros for use in *.md template conditionals */
+ #define TARGET_uARCH_LA464	  (la_target.cpu_tune == CPU_LA464)
+ #define TARGET_uARCH_LA664	  (la_target.cpu_tune == CPU_LA664)
++#define ISA_BASE_IS_LA64V110	  (la_target.isa.base == ISA_BASE_LA64V110)
+ 
+ /* Note: optimize_size may vary across functions,
+    while -m[no]-memcpy imposes a global constraint.  */
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 43f0e82ba..7bb46a45d 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -5813,16 +5813,12 @@ loongarch_print_operand_punct_valid_p (unsigned char code)
+ static bool
+ loongarch_memmodel_needs_rel_acq_fence (enum memmodel model)
+ {
+-  switch (model)
++  switch (memmodel_base (model))
+     {
+       case MEMMODEL_ACQ_REL:
+       case MEMMODEL_SEQ_CST:
+-      case MEMMODEL_SYNC_SEQ_CST:
+       case MEMMODEL_RELEASE:
+-      case MEMMODEL_SYNC_RELEASE:
+       case MEMMODEL_ACQUIRE:
+-      case MEMMODEL_CONSUME:
+-      case MEMMODEL_SYNC_ACQUIRE:
+ 	return true;
+ 
+       case MEMMODEL_RELAXED:
+diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md
+index dd1f98946..1eabaec04 100644
+--- a/gcc/config/loongarch/sync.md
++++ b/gcc/config/loongarch/sync.md
+@@ -38,7 +38,7 @@
+   [(plus "add") (ior "or") (xor "xor") (and "and")])
+ 
+ ;; This attribute gives the format suffix for atomic memory operations.
+-(define_mode_attr amo [(SI "w") (DI "d")])
++(define_mode_attr amo [(QI "b") (HI "h") (SI "w") (DI "d")])
+ 
+ ;;  expands to the name of the atomic operand that implements a
+ ;; particular code.
+@@ -123,7 +123,18 @@
+ 	 UNSPEC_SYNC_OLD_OP))]
+   ""
+   "am%A2.\t$zero,%z1,%0"
+-  [(set (attr "length") (const_int 8))])
++  [(set (attr "length") (const_int 4))])
++
++(define_insn "atomic_add"
++  [(set (match_operand:SHORT 0 "memory_operand" "+ZB")
++	(unspec_volatile:SHORT
++	  [(plus:SHORT (match_dup 0)
++		       (match_operand:SHORT 1 "reg_or_0_operand" "rJ"))
++	   (match_operand:SI 2 "const_int_operand")] ;; model
++	 UNSPEC_SYNC_OLD_OP))]
++  "ISA_BASE_IS_LA64V110"
++  "amadd%A2.\t$zero,%z1,%0"
++  [(set (attr "length") (const_int 4))])
+ 
+ (define_insn "atomic_fetch_"
+   [(set (match_operand:GPR 0 "register_operand" "=&r")
+@@ -131,12 +142,12 @@
+    (set (match_dup 1)
+ 	(unspec_volatile:GPR
+ 	  [(any_atomic:GPR (match_dup 1)
+-		     (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
++			   (match_operand:GPR 2 "reg_or_0_operand" "rJ"))
+ 	   (match_operand:SI 3 "const_int_operand")] ;; model
+ 	 UNSPEC_SYNC_OLD_OP))]
+   ""
+   "am%A3.\t%0,%z2,%1"
+-  [(set (attr "length") (const_int 8))])
++  [(set (attr "length") (const_int 4))])
+ 
+ (define_insn "atomic_exchange"
+   [(set (match_operand:GPR 0 "register_operand" "=&r")
+@@ -148,7 +159,19 @@
+ 	(match_operand:GPR 2 "register_operand" "r"))]
+   ""
+   "amswap%A3.\t%0,%z2,%1"
+-  [(set (attr "length") (const_int 8))])
++  [(set (attr "length") (const_int 4))])
++
++(define_insn "atomic_exchange_short"
++  [(set (match_operand:SHORT 0 "register_operand" "=&r")
++	(unspec_volatile:SHORT
++	  [(match_operand:SHORT 1 "memory_operand" "+ZB")
++	   (match_operand:SI 3 "const_int_operand")] ;; model
++	  UNSPEC_SYNC_EXCHANGE))
++   (set (match_dup 1)
++	(match_operand:SHORT 2 "register_operand" "r"))]
++  "ISA_BASE_IS_LA64V110"
++  "amswap%A3.\t%0,%z2,%1"
++  [(set (attr "length") (const_int 4))])
+ 
+ (define_insn "atomic_cas_value_strong"
+   [(set (match_operand:GPR 0 "register_operand" "=&r")
+@@ -156,25 +179,36 @@
+    (set (match_dup 1)
+ 	(unspec_volatile:GPR [(match_operand:GPR 2 "reg_or_0_operand" "rJ")
+ 			      (match_operand:GPR 3 "reg_or_0_operand" "rJ")
+-			      (match_operand:SI 4 "const_int_operand")  ;; mod_s
+-			      (match_operand:SI 5 "const_int_operand")] ;; mod_f
++			      (match_operand:SI 4 "const_int_operand")]  ;; mod_s
+ 	 UNSPEC_COMPARE_AND_SWAP))
+-   (clobber (match_scratch:GPR 6 "=&r"))]
++   (clobber (match_scratch:GPR 5 "=&r"))]
+   ""
+ {
+   return "1:\\n\\t"
+ 	 "ll.\\t%0,%1\\n\\t"
+ 	 "bne\\t%0,%z2,2f\\n\\t"
+-	 "or%i3\\t%6,$zero,%3\\n\\t"
+-	 "sc.\\t%6,%1\\n\\t"
+-	 "beqz\\t%6,1b\\n\\t"
++	 "or%i3\\t%5,$zero,%3\\n\\t"
++	 "sc.\\t%5,%1\\n\\t"
++	 "beqz\\t%5,1b\\n\\t"
+ 	 "b\\t3f\\n\\t"
+ 	 "2:\\n\\t"
+-	 "%G5\\n\\t"
++	 "%G4\\n\\t"
+ 	 "3:\\n\\t";
+ }
+   [(set (attr "length") (const_int 28))])
+ 
++(define_insn "atomic_cas_value_strong_amcas"
++  [(set (match_operand:QHWD 0 "register_operand" "=&r")
++	(match_operand:QHWD 1 "memory_operand" "+ZB"))
++   (set (match_dup 1)
++	(unspec_volatile:QHWD [(match_operand:QHWD 2 "reg_or_0_operand" "rJ")
++			       (match_operand:QHWD 3 "reg_or_0_operand" "rJ")
++			       (match_operand:SI 4 "const_int_operand")]  ;; mod_s
++	 UNSPEC_COMPARE_AND_SWAP))]
++  "ISA_BASE_IS_LA64V110"
++  "ori\t%0,%z2,0\n\tamcas%A4.\t%0,%z3,%1"
++  [(set (attr "length") (const_int 8))])
++
+ (define_expand "atomic_compare_and_swap"
+   [(match_operand:SI 0 "register_operand" "")   ;; bool output
+    (match_operand:GPR 1 "register_operand" "")  ;; val output
+@@ -186,9 +220,29 @@
+    (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
+   ""
+ {
+-  emit_insn (gen_atomic_cas_value_strong (operands[1], operands[2],
+-						operands[3], operands[4],
+-						operands[6], operands[7]));
++  rtx mod_s, mod_f;
++
++  mod_s = operands[6];
++  mod_f = operands[7];
++
++  /* Normally the succ memory model must be stronger than fail, but in the
++     unlikely event of fail being ACQUIRE and succ being RELEASE we need to
++     promote succ to ACQ_REL so that we don't lose the acquire semantics.  */
++
++  if (is_mm_acquire (memmodel_base (INTVAL (mod_f)))
++      && is_mm_release (memmodel_base (INTVAL (mod_s))))
++    mod_s = GEN_INT (MEMMODEL_ACQ_REL);
++
++  operands[6] = mod_s;
++
++  if (ISA_BASE_IS_LA64V110)
++    emit_insn (gen_atomic_cas_value_strong_amcas (operands[1], operands[2],
++							 operands[3], operands[4],
++							 operands[6]));
++  else
++    emit_insn (gen_atomic_cas_value_strong (operands[1], operands[2],
++						  operands[3], operands[4],
++						  operands[6]));
+ 
+   rtx compare = operands[1];
+   if (operands[3] != const0_rtx)
+@@ -292,31 +346,53 @@
+    (match_operand:SI 7 "const_int_operand" "")] ;; mod_f
+   ""
+ {
+-  union loongarch_gen_fn_ptrs generator;
+-  generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si;
+-  loongarch_expand_atomic_qihi (generator, operands[1], operands[2],
+-				operands[3], operands[4], operands[7]);
++  rtx mod_s, mod_f;
+ 
+-  rtx compare = operands[1];
+-  if (operands[3] != const0_rtx)
+-    {
+-      machine_mode mode = GET_MODE (operands[3]);
+-      rtx op1 = convert_modes (SImode, mode, operands[1], true);
+-      rtx op3 = convert_modes (SImode, mode, operands[3], true);
+-      rtx difference = gen_rtx_MINUS (SImode, op1, op3);
+-      compare = gen_reg_rtx (SImode);
+-      emit_insn (gen_rtx_SET (compare, difference));
+-    }
++  mod_s = operands[6];
++  mod_f = operands[7];
+ 
+-  if (word_mode != mode)
++  /* Normally the succ memory model must be stronger than fail, but in the
++     unlikely event of fail being ACQUIRE and succ being RELEASE we need to
++     promote succ to ACQ_REL so that we don't lose the acquire semantics.  */
++
++  if (is_mm_acquire (memmodel_base (INTVAL (mod_f)))
++      && is_mm_release (memmodel_base (INTVAL (mod_s))))
++    mod_s = GEN_INT (MEMMODEL_ACQ_REL);
++
++  operands[6] = mod_s;
++
++  if (ISA_BASE_IS_LA64V110)
++    emit_insn (gen_atomic_cas_value_strong_amcas (operands[1], operands[2],
++						       operands[3], operands[4],
++						       operands[6]));
++  else
+     {
+-      rtx reg = gen_reg_rtx (word_mode);
+-      emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare)));
+-      compare = reg;
++      union loongarch_gen_fn_ptrs generator;
++      generator.fn_7 = gen_atomic_cas_value_cmp_and_7_si;
++      loongarch_expand_atomic_qihi (generator, operands[1], operands[2],
++				    operands[3], operands[4], operands[6]);
+     }
+ 
+-  emit_insn (gen_rtx_SET (operands[0],
+-			  gen_rtx_EQ (SImode, compare, const0_rtx)));
++      rtx compare = operands[1];
++      if (operands[3] != const0_rtx)
++	{
++	  machine_mode mode = GET_MODE (operands[3]);
++	  rtx op1 = convert_modes (SImode, mode, operands[1], true);
++	  rtx op3 = convert_modes (SImode, mode, operands[3], true);
++	  rtx difference = gen_rtx_MINUS (SImode, op1, op3);
++	  compare = gen_reg_rtx (SImode);
++	  emit_insn (gen_rtx_SET (compare, difference));
++	}
++
++      if (word_mode != mode)
++	{
++	  rtx reg = gen_reg_rtx (word_mode);
++	  emit_insn (gen_rtx_SET (reg, gen_rtx_SIGN_EXTEND (word_mode, compare)));
++	  compare = reg;
++	}
++
++      emit_insn (gen_rtx_SET (operands[0],
++			      gen_rtx_EQ (SImode, compare, const0_rtx)));
+   DONE;
+ })
+ 
+@@ -505,13 +581,31 @@
+ 	(match_operand:SHORT 2 "register_operand"))]
+   ""
+ {
+-  union loongarch_gen_fn_ptrs generator;
+-  generator.fn_7 = gen_atomic_cas_value_exchange_7_si;
+-  loongarch_expand_atomic_qihi (generator, operands[0], operands[1],
+-				const0_rtx, operands[2], operands[3]);
++  if (ISA_BASE_IS_LA64V110)
++    emit_insn (gen_atomic_exchange_short (operands[0], operands[1], operands[2], operands[3]));
++  else
++    {
++      union loongarch_gen_fn_ptrs generator;
++      generator.fn_7 = gen_atomic_cas_value_exchange_7_si;
++      loongarch_expand_atomic_qihi (generator, operands[0], operands[1],
++				    const0_rtx, operands[2], operands[3]);
++    }
+   DONE;
+ })
+ 
++(define_insn "atomic_fetch_add_short"
++  [(set (match_operand:SHORT 0 "register_operand" "=&r")
++	(match_operand:SHORT 1 "memory_operand" "+ZB"))
++   (set (match_dup 1)
++	(unspec_volatile:SHORT
++	  [(plus:SHORT (match_dup 1)
++		     (match_operand:SHORT 2 "reg_or_0_operand" "rJ"))
++	   (match_operand:SI 3 "const_int_operand")] ;; model
++	 UNSPEC_SYNC_OLD_OP))]
++  "ISA_BASE_IS_LA64V110"
++  "amadd%A3.\t%0,%z2,%1"
++  [(set (attr "length") (const_int 4))])
++
+ (define_expand "atomic_fetch_add"
+   [(set (match_operand:SHORT 0 "register_operand" "=&r")
+ 	(match_operand:SHORT 1 "memory_operand" "+ZB"))
+@@ -523,10 +617,16 @@
+ 	 UNSPEC_SYNC_OLD_OP))]
+   ""
+ {
+-  union loongarch_gen_fn_ptrs generator;
+-  generator.fn_7 = gen_atomic_cas_value_add_7_si;
+-  loongarch_expand_atomic_qihi (generator, operands[0], operands[1],
+-				operands[1], operands[2], operands[3]);
++  if (ISA_BASE_IS_LA64V110)
++    emit_insn (gen_atomic_fetch_add_short (operands[0], operands[1],
++					     operands[2], operands[3]));
++  else
++    {
++      union loongarch_gen_fn_ptrs generator;
++      generator.fn_7 = gen_atomic_cas_value_add_7_si;
++      loongarch_expand_atomic_qihi (generator, operands[0], operands[1],
++				    operands[1], operands[2], operands[3]);
++    }
+   DONE;
+ })
+ 
+-- 
+2.43.0
+
diff --git a/0034-Autofdo-Enable-discrimibator-and-MCF-algorithm-on-Au.patch b/0034-Autofdo-Enable-discrimibator-and-MCF-algorithm-on-Au.patch
new file mode 100644
index 0000000000000000000000000000000000000000..01a7d12af20ec2b3eaee3975eacd2b95367f8bdb
--- /dev/null
+++ b/0034-Autofdo-Enable-discrimibator-and-MCF-algorithm-on-Au.patch
@@ -0,0 +1,312 @@
+From b020447c840c6e22440a9b9063298a06333fd2f1 Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao 
+Date: Sat, 23 Mar 2024 22:56:09 +0800
+Subject: [PATCH] [Autofdo]Enable discrimibator and MCF algorithm on Autofdo
+
+---
+ gcc/auto-profile.cc | 171 +++++++++++++++++++++++++++++++++++++++++++-
+ gcc/cfghooks.cc     |   7 ++
+ gcc/opts.cc         |   5 +-
+ gcc/tree-inline.cc  |  14 ++++
+ 4 files changed, 193 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/auto-profile.cc b/gcc/auto-profile.cc
+index 2b34b80b8..f45f0ec66 100644
+--- a/gcc/auto-profile.cc
++++ b/gcc/auto-profile.cc
+@@ -466,6 +466,17 @@ string_table::get_index (const char *name) const
+   if (name == NULL)
+     return -1;
+   string_index_map::const_iterator iter = map_.find (name);
++  /* Function name may be duplicate.  Try to distinguish by the
++     #file_name#function_name defined by the autofdo tool chain.  */
++  if (iter == map_.end ())
++    {
++      char* file_name = get_original_name (lbasename (dump_base_name));
++      char* file_func_name
++	= concat ("#", file_name, "#", name, NULL);
++      iter = map_.find (file_func_name);
++      free (file_name);
++      free (file_func_name);
++    }
+   if (iter == map_.end ())
+     return -1;
+ 
+@@ -654,7 +665,7 @@ function_instance::read_function_instance (function_instance_stack *stack,
+ 
+   for (unsigned i = 0; i < num_pos_counts; i++)
+     {
+-      unsigned offset = gcov_read_unsigned () & 0xffff0000;
++      unsigned offset = gcov_read_unsigned ();
+       unsigned num_targets = gcov_read_unsigned ();
+       gcov_type count = gcov_read_counter ();
+       s->pos_counts[offset].count = count;
+@@ -733,6 +744,10 @@ autofdo_source_profile::get_count_info (gimple *stmt, count_info *info) const
+   function_instance *s = get_function_instance_by_inline_stack (stack);
+   if (s == NULL)
+     return false;
++  if (s->get_count_info (stack[0].second + stmt->bb->discriminator, info))
++    {
++      return true;
++    }
+   return s->get_count_info (stack[0].second, info);
+ }
+ 
+@@ -1395,6 +1410,66 @@ afdo_propagate (bb_set *annotated_bb)
+     }
+ }
+ 
++/* Process the following scene when the branch probability
++   inversion when do function afdo_propagate (). E.g.
++   BB_NUM (sample count)
++      BB1 (1000)
++       /    \
++    BB2 (10) BB3 (0)
++      \       /
++	BB4
++   In afdo_propagate ().count of BB3 is calculated by
++   COUNT (BB3) = 990 (990 = COUNT (BB1) - COUNT (BB2) = 1000 - 10)
++   In fact, BB3 may be colder than BB2 by sample count.
++   This function allocate source BB count to wach succ BB by sample
++   rate, E.g.
++   BB2_COUNT = BB1_COUNT * (BB2_COUNT / (BB2_COUNT + BB3_COUNT))  */
++
++static void
++afdo_preprocess_bb_count ()
++{
++  basic_block bb;
++  FOR_ALL_BB_FN (bb, cfun)
++    {
++      if (bb->count.ipa_p () && EDGE_COUNT (bb->succs) > 1
++	  && bb->count > profile_count::zero ().afdo ())
++	{
++	  basic_block bb1 = EDGE_SUCC (bb, 0)->dest;
++	  basic_block bb2 = EDGE_SUCC (bb, 1)->dest;
++	  if (single_succ_edge (bb1) && single_succ_edge (bb2)
++	      && EDGE_SUCC (bb1, 0)->dest == EDGE_SUCC (bb2, 0)->dest)
++	    {
++	      gcov_type max_count = 0;
++	      gcov_type total_count = 0;
++	      edge e;
++	      edge_iterator ei;
++	      FOR_EACH_EDGE (e, ei, bb->succs)
++		{
++		  if (!e->dest->count.ipa_p ())
++		    {
++		      continue;
++		    }
++		  max_count = MAX (max_count, e->dest->count.to_gcov_type ());
++		  total_count += e->dest->count.to_gcov_type ();
++		}
++	      /* Only bb_count > max_count * 2, branch probability will
++		 inversion.  */
++	      if (max_count > 0 && bb->count.to_gcov_type () > max_count * 2)
++		{
++		  FOR_EACH_EDGE (e, ei, bb->succs)
++		    {
++		      gcov_type target_count = bb->count.to_gcov_type ()
++			* e->dest->count.to_gcov_type ()/ total_count;
++		      e->dest->count
++			= profile_count::from_gcov_type
++			  (target_count).afdo ();
++		    }
++		}
++	    }
++	}
++    }
++}
++
+ /* Propagate counts on control flow graph and calculate branch
+    probabilities.  */
+ 
+@@ -1420,6 +1495,7 @@ afdo_calculate_branch_prob (bb_set *annotated_bb)
+     }
+ 
+   afdo_find_equiv_class (annotated_bb);
++  afdo_preprocess_bb_count ();
+   afdo_propagate (annotated_bb);
+ 
+   FOR_EACH_BB_FN (bb, cfun)
+@@ -1523,6 +1599,83 @@ afdo_vpt_for_early_inline (stmt_set *promoted_stmts)
+   return false;
+ }
+ 
++/* Preparation before executing MCF algorithm.  */
++
++static void
++afdo_init_mcf ()
++{
++  basic_block bb;
++  edge e;
++  edge_iterator ei;
++
++  if (dump_file)
++    {
++      fprintf (dump_file, "\n init calling mcf_smooth_cfg (). \n");
++    }
++
++  /* Step1: when use mcf, BB id must be continous,
++     so we need compact_blocks ().  */
++  compact_blocks ();
++
++  /* Step2: allocate memory for MCF input data.  */
++  bb_gcov_counts.safe_grow_cleared (cfun->cfg->x_last_basic_block);
++  edge_gcov_counts = new hash_map;
++
++  /* Step3: init MCF input data from cfg.  */
++  FOR_ALL_BB_FN (bb, cfun)
++    {
++      /* Init BB count for MCF.  */
++      bb_gcov_count (bb) = bb->count.to_gcov_type ();
++
++      gcov_type total_count = 0;
++      FOR_EACH_EDGE (e, ei, bb->succs)
++	{
++	  total_count += e->dest->count.to_gcov_type ();
++	}
++
++      /* If there is no sample in each successor blocks, source
++	 BB samples are allocated to each edge by branch static prob.  */
++
++      FOR_EACH_EDGE (e, ei, bb->succs)
++	{
++	  if (total_count == 0)
++	    {
++	      edge_gcov_count (e) = e->src->count.to_gcov_type ()
++		* e->probability.to_reg_br_prob_base () / REG_BR_PROB_BASE;
++	    }
++	  else
++	    {
++	      edge_gcov_count (e) = e->src->count.to_gcov_type ()
++		* e->dest->count.to_gcov_type () / total_count;
++	    }
++	}
++    }
++}
++
++
++/* Free the resources used by MCF and reset BB count from MCF result.
++   branch probability has been updated in mcf_smooth_cfg ().  */
++
++static void
++afdo_process_after_mcf ()
++{
++  basic_block bb;
++  /* Reset BB count from MCF result.  */
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      if (bb_gcov_count (bb))
++	{
++	  bb->count
++	    = profile_count::from_gcov_type (bb_gcov_count (bb)).afdo ();
++	}
++    }
++
++    /* Clean up MCF resource.  */
++    bb_gcov_counts.release ();
++    delete edge_gcov_counts;
++    edge_gcov_counts = NULL;
++}
++
+ /* Annotate auto profile to the control flow graph. Do not annotate value
+    profile for stmts in PROMOTED_STMTS.  */
+ 
+@@ -1574,8 +1727,20 @@ afdo_annotate_cfg (const stmt_set &promoted_stmts)
+   afdo_source_profile->mark_annotated (cfun->function_end_locus);
+   if (max_count > profile_count::zero ())
+     {
+-      /* Calculate, propagate count and probability information on CFG.  */
+-      afdo_calculate_branch_prob (&annotated_bb);
++      /* 1 means -fprofile-correction is enbaled manually, and MCF
++	 algorithm will be used to calculate count and probability.
++	 Otherwise, use the default calculate algorithm.  */
++      if (flag_profile_correction == 1)
++	{
++	  afdo_init_mcf ();
++	  mcf_smooth_cfg ();
++	  afdo_process_after_mcf ();
++	}
++      else
++	{
++	  /* Calculate, propagate count and probability information on CFG.  */
++	  afdo_calculate_branch_prob (&annotated_bb);
++	}
+     }
+   update_max_bb_count ();
+   profile_status_for_fn (cfun) = PROFILE_READ;
+diff --git a/gcc/cfghooks.cc b/gcc/cfghooks.cc
+index c0b7bdcd9..323663010 100644
+--- a/gcc/cfghooks.cc
++++ b/gcc/cfghooks.cc
+@@ -542,6 +542,9 @@ split_block_1 (basic_block bb, void *i)
+     return NULL;
+ 
+   new_bb->count = bb->count;
++  /* Copy discriminator from original bb for distinguishes among
++     several basic blocks that share a common locus, allowing for
++     more accurate autofdo.  */
+   new_bb->discriminator = bb->discriminator;
+ 
+   if (dom_info_available_p (CDI_DOMINATORS))
+@@ -1113,6 +1116,10 @@ duplicate_block (basic_block bb, edge e, basic_block after, copy_bb_data *id)
+     move_block_after (new_bb, after);
+ 
+   new_bb->flags = (bb->flags & ~BB_DUPLICATED);
++  /* Copy discriminator from original bb for distinguishes among
++     several basic blocks that share a common locus, allowing for
++     more accurate autofdo.  */
++  new_bb->discriminator = bb->discriminator;
+   FOR_EACH_EDGE (s, ei, bb->succs)
+     {
+       /* Since we are creating edges from a new block to successors
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 2bba88140..4b4925331 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -3014,7 +3014,10 @@ common_handle_option (struct gcc_options *opts,
+       /* FALLTHRU */
+     case OPT_fauto_profile:
+       enable_fdo_optimizations (opts, opts_set, value);
+-      SET_OPTION_IF_UNSET (opts, opts_set, flag_profile_correction, value);
++	  /* 2 is special and means flag_profile_correction trun on by
++	     -fauto-profile.  */
++      SET_OPTION_IF_UNSET (opts, opts_set, flag_profile_correction,
++			   (value ? 2 : 0));
+       break;
+ 
+     case OPT_fipa_struct_reorg_:
+diff --git a/gcc/tree-inline.cc b/gcc/tree-inline.cc
+index f892cee3f..f50dbbc52 100644
+--- a/gcc/tree-inline.cc
++++ b/gcc/tree-inline.cc
+@@ -2038,6 +2038,10 @@ copy_bb (copy_body_data *id, basic_block bb,
+      basic_block_info automatically.  */
+   copy_basic_block = create_basic_block (NULL, (basic_block) prev->aux);
+   copy_basic_block->count = bb->count.apply_scale (num, den);
++  /* Copy discriminator from original bb for distinguishes among
++     several basic blocks that share a common locus, allowing for
++     more accurate autofdo.  */
++  copy_basic_block->discriminator = bb->discriminator;
+ 
+   copy_gsi = gsi_start_bb (copy_basic_block);
+ 
+@@ -3058,6 +3062,16 @@ copy_cfg_body (copy_body_data * id,
+ 	  den += e->count ();
+       ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = den;
+     }
++  /* When autofdo uses PMU as the sampling unit, the number of
++     ENTRY_BLOCK_PTR_FOR_FN cannot be obtained directly and will
++     be zero.  It using for adjust_for_ipa_scaling will cause the
++     inlined BB count incorrectly overestimated.  So set den equal
++     to num, which is the source inline BB count to avoid
++     overestimated.  */
++  if (den == profile_count::zero ().afdo ())
++    {
++      den = num;
++    }
+ 
+   profile_count::adjust_for_ipa_scaling (&num, &den);
+ 
+-- 
+2.33.0
+
diff --git a/0034-LoongArch-atomic_load-and-atomic_store-are-implement.patch b/0034-LoongArch-atomic_load-and-atomic_store-are-implement.patch
new file mode 100644
index 0000000000000000000000000000000000000000..24c0500ed891f75132c233c0f59ae95dd4bddfeb
--- /dev/null
+++ b/0034-LoongArch-atomic_load-and-atomic_store-are-implement.patch
@@ -0,0 +1,140 @@
+From 61a70e6b6b44bf420eae559d998e109b70e5a9b6 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 17 Nov 2023 16:04:45 +0800
+Subject: [PATCH 034/188] LoongArch: atomic_load and atomic_store are
+ implemented using dbar grading.
+
+Because the la464 memory model design allows the same address load out of order,
+so in the following test example, the Load of 23 lines may be executed first over
+the load of 21 lines, resulting in an error.
+So when memmodel is MEMMODEL_RELAXED, the load instruction will be followed by
+"dbar 0x700" when implementing _atomic_load.
+
+  1 void *
+  2 gomp_ptrlock_get_slow (gomp_ptrlock_t *ptrlock)
+  3 {
+  4   int *intptr;
+  5   uintptr_t oldval = 1;
+  6
+  7   __atomic_compare_exchange_n (ptrlock, &oldval, 2, false,
+  8                                MEMMODEL_RELAXED, MEMMODEL_RELAXED);
+  9
+ 10   /* futex works on ints, not pointers.
+ 11      But a valid work share pointer will be at least
+ 12      8 byte aligned, so it is safe to assume the low
+ 13      32-bits of the pointer won't contain values 1 or 2.  */
+ 14   __asm volatile ("" : "=r" (intptr) : "0" (ptrlock));
+ 15 #if __BYTE_ORDER == __BIG_ENDIAN
+ 16   if (sizeof (*ptrlock) > sizeof (int))
+ 17     intptr += (sizeof (*ptrlock) / sizeof (int)) - 1;
+ 18 #endif
+ 19   do
+ 20     do_wait (intptr, 2);
+ 21   while (__atomic_load_n (intptr, MEMMODEL_RELAXED) == 2);
+ 22   __asm volatile ("" : : : "memory");
+ 23   return (void *) __atomic_load_n (ptrlock, MEMMODEL_ACQUIRE);
+ 24 }
+
+gcc/ChangeLog:
+
+	* config/loongarch/sync.md (atomic_load): New template.
+---
+ gcc/config/loongarch/sync.md | 70 +++++++++++++++++++++++++++++++++---
+ 1 file changed, 65 insertions(+), 5 deletions(-)
+
+diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md
+index 1eabaec04..f4673c856 100644
+--- a/gcc/config/loongarch/sync.md
++++ b/gcc/config/loongarch/sync.md
+@@ -30,6 +30,7 @@
+   UNSPEC_SYNC_OLD_OP
+   UNSPEC_SYNC_EXCHANGE
+   UNSPEC_ATOMIC_STORE
++  UNSPEC_ATOMIC_LOAD
+   UNSPEC_MEMORY_BARRIER
+ ])
+ 
+@@ -103,16 +104,75 @@
+ 
+ ;; Atomic memory operations.
+ 
++(define_insn "atomic_load"
++  [(set (match_operand:QHWD 0 "register_operand" "=r")
++    (unspec_volatile:QHWD
++      [(match_operand:QHWD 1 "memory_operand" "+m")
++       (match_operand:SI 2 "const_int_operand")]                        ;; model
++      UNSPEC_ATOMIC_LOAD))]
++  ""
++{
++  enum memmodel model = memmodel_base (INTVAL (operands[2]));
++
++  switch (model)
++    {
++    case MEMMODEL_SEQ_CST:
++      return "dbar\t0x11\\n\\t"
++	     "ld.\t%0,%1\\n\\t"
++	     "dbar\t0x14\\n\\t";
++    case MEMMODEL_ACQUIRE:
++      return "ld.\t%0,%1\\n\\t"
++	     "dbar\t0x14\\n\\t";
++    case MEMMODEL_RELAXED:
++      return "ld.\t%0,%1\\n\\t"
++	     "dbar\t0x700\\n\\t";
++
++    default:
++      /* The valid memory order variants are __ATOMIC_RELAXED, __ATOMIC_SEQ_CST,
++	 __ATOMIC_CONSUME and __ATOMIC_ACQUIRE.
++	 The expand_builtin_atomic_store function converts all invalid memmodels
++	 to MEMMODEL_SEQ_CST.
++
++	 __atomic builtins doc: "Consume is implemented using the
++	 stronger acquire memory order because of a deficiency in C++11's
++	 semantics."  See PR 59448 and get_memmodel in builtins.cc.  */
++      gcc_unreachable ();
++    }
++}
++  [(set (attr "length") (const_int 12))])
++
+ ;; Implement atomic stores with amoswap.  Fall back to fences for atomic loads.
+ (define_insn "atomic_store"
+-  [(set (match_operand:GPR 0 "memory_operand" "+ZB")
+-    (unspec_volatile:GPR
+-      [(match_operand:GPR 1 "reg_or_0_operand" "rJ")
++  [(set (match_operand:QHWD 0 "memory_operand" "+m")
++    (unspec_volatile:QHWD
++      [(match_operand:QHWD 1 "reg_or_0_operand" "rJ")
+        (match_operand:SI 2 "const_int_operand")]      ;; model
+       UNSPEC_ATOMIC_STORE))]
+   ""
+-  "amswap%A2.\t$zero,%z1,%0"
+-  [(set (attr "length") (const_int 8))])
++{
++  enum memmodel model = memmodel_base (INTVAL (operands[2]));
++
++  switch (model)
++    {
++    case MEMMODEL_SEQ_CST:
++      return "dbar\t0x12\\n\\t"
++	     "st.\t%z1,%0\\n\\t"
++	     "dbar\t0x18\\n\\t";
++    case MEMMODEL_RELEASE:
++      return "dbar\t0x12\\n\\t"
++	     "st.\t%z1,%0\\n\\t";
++    case MEMMODEL_RELAXED:
++      return "st.\t%z1,%0";
++
++    default:
++      /* The valid memory order variants are __ATOMIC_RELAXED, __ATOMIC_SEQ_CST,
++	 and __ATOMIC_RELEASE.
++	 The expand_builtin_atomic_store function converts all invalid memmodels
++	 to MEMMODEL_SEQ_CST.  */
++      gcc_unreachable ();
++    }
++}
++  [(set (attr "length") (const_int 12))])
+ 
+ (define_insn "atomic_"
+   [(set (match_operand:GPR 0 "memory_operand" "+ZB")
+-- 
+2.43.0
+
diff --git a/0035-Add-insn-defs-and-correct-costs-for-cmlt-generation.patch b/0035-Add-insn-defs-and-correct-costs-for-cmlt-generation.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c40886ab41742bb3eb4d4ff9065405ba6fff5065
--- /dev/null
+++ b/0035-Add-insn-defs-and-correct-costs-for-cmlt-generation.patch
@@ -0,0 +1,194 @@
+From aa39a66f6029fe16a656d7c6339908b953fb1e04 Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia WX1215920 
+Date: Thu, 22 Feb 2024 11:27:43 +0300
+Subject: [PATCH 01/18] Add insn defs and correct costs for cmlt generation
+
+---
+ gcc/config/aarch64/aarch64-simd.md  | 48 +++++++++++++++++++++++++++++
+ gcc/config/aarch64/aarch64.cc       | 15 +++++++++
+ gcc/config/aarch64/aarch64.opt      |  4 +++
+ gcc/config/aarch64/iterators.md     |  3 +-
+ gcc/config/aarch64/predicates.md    | 25 +++++++++++++++
+ gcc/testsuite/gcc.dg/combine-cmlt.c | 20 ++++++++++++
+ 6 files changed, 114 insertions(+), 1 deletion(-)
+ create mode 100755 gcc/testsuite/gcc.dg/combine-cmlt.c
+
+diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
+index ee7f0b89c..82f73805f 100644
+--- a/gcc/config/aarch64/aarch64-simd.md
++++ b/gcc/config/aarch64/aarch64-simd.md
+@@ -6454,6 +6454,54 @@
+   [(set_attr "type" "neon_compare, neon_compare_zero")]
+ )
+ 
++;; Use cmlt to replace vector arithmetic operations like this (SImode example):
++;; B = (((A >> 15) & 0x00010001) << 16) - ((A >> 15) & 0x00010001)
++;; TODO: maybe extend to scalar operations or other cm** instructions.
++
++(define_insn "*aarch64_cmlt_as_arith"
++  [(set (match_operand: 0 "register_operand" "=w")
++	(minus:
++	  (ashift:
++	    (and:
++	      (lshiftrt:
++		(match_operand:VDQHSD 1 "register_operand" "w")
++		(match_operand:VDQHSD 2 "half_size_minus_one_operand"))
++	      (match_operand:VDQHSD 3 "cmlt_arith_mask_operand"))
++	    (match_operand:VDQHSD 4 "half_size_operand"))
++	  (and:
++	    (lshiftrt:
++	      (match_dup 1)
++	      (match_dup 2))
++	    (match_dup 3))))]
++  "TARGET_SIMD && flag_cmlt_arith"
++  "cmlt\t%0., %1., #0"
++  [(set_attr "type" "neon_compare_zero")]
++)
++
++;; The helper definition that allows combiner to use the previous pattern.
++
++(define_insn_and_split "*arch64_cmlt_tmp"
++  [(set (match_operand: 0 "register_operand" "=w")
++	(and:
++	  (lshiftrt:
++	    (match_operand:VDQHSD 1 "register_operand" "w")
++	    (match_operand:VDQHSD 2 "half_size_minus_one_operand"))
++	  (match_operand:VDQHSD 3 "cmlt_arith_mask_operand")))]
++  "TARGET_SIMD && flag_cmlt_arith"
++  "#"
++  "&& reload_completed"
++  [(set (match_operand: 0 "register_operand")
++	(lshiftrt:
++	  (match_operand:VDQHSD 1 "register_operand")
++	  (match_operand:VDQHSD 2 "half_size_minus_one_operand")))
++   (set (match_dup 0)
++	(and:
++	  (match_dup 0)
++	  (match_operand:VDQHSD 3 "cmlt_arith_mask_operand")))]
++  ""
++  [(set_attr "type" "neon_compare_zero")]
++)
++
+ (define_insn_and_split "aarch64_cmdi"
+   [(set (match_operand:DI 0 "register_operand" "=w,w,r")
+ 	(neg:DI
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index a3da4ca30..04072ca25 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -14064,6 +14064,21 @@ cost_minus:
+ 	    return true;
+ 	  }
+ 
++	/* Detect aarch64_cmlt_as_arith instruction. Now only this pattern
++	   matches the condition. The costs of cmlt and sub instructions
++	   are comparable, so we are not increasing the cost here.  */
++	if (flag_cmlt_arith && GET_CODE (op0) == ASHIFT
++	    && GET_CODE (op1) == AND)
++	  {
++	    rtx op0_subop0 = XEXP (op0, 0);
++	    if (rtx_equal_p (op0_subop0, op1))
++	      {
++		rtx lshrt_op = XEXP (op0_subop0, 0);
++		if (GET_CODE (lshrt_op) == LSHIFTRT)
++		  return true;
++	      }
++	  }
++
+ 	/* Look for SUB (extended register).  */
+ 	if (is_a  (mode)
+ 	    && aarch64_rtx_arith_op_extract_p (op1))
+diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
+index a64b927e9..101664c7c 100644
+--- a/gcc/config/aarch64/aarch64.opt
++++ b/gcc/config/aarch64/aarch64.opt
+@@ -262,6 +262,10 @@ Use an immediate to offset from the stack protector guard register, sp_el0.
+ This option is for use with fstack-protector-strong and not for use in
+ user-land code.
+ 
++mcmlt-arith
++Target Var(flag_cmlt_arith) Optimization Init(0)
++Use SIMD cmlt instruction to perform some arithmetic/logic calculations.
++
+ TargetVariable
+ long aarch64_stack_protector_guard_offset = 0
+ 
+diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
+index 26a840d7f..967e6b0b1 100644
+--- a/gcc/config/aarch64/iterators.md
++++ b/gcc/config/aarch64/iterators.md
+@@ -1485,7 +1485,8 @@
+ 			  (V2DI "2s")])
+ 
+ ;; Register suffix narrowed modes for VQN.
+-(define_mode_attr V2ntype [(V8HI "16b") (V4SI "8h")
++(define_mode_attr V2ntype [(V4HI "8b") (V2SI "4h")
++			   (V8HI "16b") (V4SI "8h")
+ 			   (V2DI "4s")])
+ 
+ ;; Widened modes of vector modes.
+diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
+index c308015ac..07c14aacb 100644
+--- a/gcc/config/aarch64/predicates.md
++++ b/gcc/config/aarch64/predicates.md
+@@ -49,6 +49,31 @@
+   return CONST_INT_P (op) && IN_RANGE (INTVAL (op), 1, 3);
+ })
+ 
++(define_predicate "half_size_minus_one_operand"
++  (match_code "const_vector")
++{
++  op = unwrap_const_vec_duplicate (op);
++  unsigned int size = GET_MODE_UNIT_BITSIZE (mode) / 2;
++  return CONST_INT_P (op) && (UINTVAL (op) == size - 1);
++})
++
++(define_predicate "half_size_operand"
++  (match_code "const_vector")
++{
++  op = unwrap_const_vec_duplicate (op);
++  unsigned int size = GET_MODE_UNIT_BITSIZE (mode) / 2;
++  return CONST_INT_P (op) && (UINTVAL (op) == size);
++})
++
++(define_predicate "cmlt_arith_mask_operand"
++  (match_code "const_vector")
++{
++  op = unwrap_const_vec_duplicate (op);
++  unsigned int size = GET_MODE_UNIT_BITSIZE (mode) / 2;
++  unsigned long long mask = ((unsigned long long) 1 << size) | 1;
++  return CONST_INT_P (op) && (UINTVAL (op) == mask);
++})
++
+ (define_predicate "subreg_lowpart_operator"
+   (ior (match_code "truncate")
+        (and (match_code "subreg")
+diff --git a/gcc/testsuite/gcc.dg/combine-cmlt.c b/gcc/testsuite/gcc.dg/combine-cmlt.c
+new file mode 100755
+index 000000000..b4c9a37ff
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/combine-cmlt.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile { target aarch64-*-* } } */
++/* { dg-options "-O3 -mcmlt-arith" } */
++
++/* The test checks usage of cmlt insns for arithmetic/logic calculations
++ * in foo ().  It's inspired by sources of x264 codec.  */
++
++typedef unsigned short int uint16_t;
++typedef unsigned int uint32_t;
++
++void foo( uint32_t *a, uint32_t *b)
++{
++  for (unsigned i = 0; i < 4; i++)
++    {
++      uint32_t s = ((a[i]>>((8 * sizeof(uint16_t))-1))
++		    &(((uint32_t)1<<(8 * sizeof(uint16_t)))+1))*((uint16_t)-1);
++      b[i] = (a[i]+s)^s;
++    }
++}
++
++/* { dg-final { scan-assembler-times {cmlt\t} 1 } }  */
+-- 
+2.33.0
+
diff --git a/0035-LoongArch-genopts-Add-infrastructure-to-generate-cod.patch b/0035-LoongArch-genopts-Add-infrastructure-to-generate-cod.patch
new file mode 100644
index 0000000000000000000000000000000000000000..91119ccd7b5c229ee957e595a35b0406cdeee67c
--- /dev/null
+++ b/0035-LoongArch-genopts-Add-infrastructure-to-generate-cod.patch
@@ -0,0 +1,615 @@
+From 535fb5a2d4347801439fbb51fa07cd0317183cee Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 25 Oct 2024 02:08:03 +0000
+Subject: [PATCH 035/188] LoongArch: genopts: Add infrastructure to generate
+ code for  new features in ISA evolution
+
+LoongArch v1.10 introduced the concept of ISA evolution.  During ISA
+evolution, many independent features can be added and enumerated via
+CPUCFG.
+
+Add a data file into genopts storing the CPUCFG word, bit, the name
+of the command line option controlling if this feature should be used
+for compilation, and the text description.  Make genstr.sh process these
+info and add the command line options into loongarch.opt and
+loongarch-str.h, and generate a new file loongarch-cpucfg-map.h for
+mapping CPUCFG output to the corresponding option.  When handling
+-march=native, use the information in loongarch-cpucfg-map.h to generate
+the corresponding option mask.  Enable the features implied by -march
+setting unless the user has explicitly disabled the feature.
+
+The added options (-mdiv32 and -mld-seq-sa) are not really handled yet.
+They'll be used in the following patches.
+
+gcc/ChangeLog:
+
+        * config/loongarch/genopts/isa-evolution.in: New data file.
+        * config/loongarch/genopts/genstr.sh: Translate info in
+        isa-evolution.in when generating loongarch-str.h, loongarch.opt,
+        and loongarch-cpucfg-map.h.
+        * config/loongarch/genopts/loongarch.opt.in (isa_evolution):
+        New variable.
+        * config/loongarch/t-loongarch: (loongarch-cpucfg-map.h): New
+        rule.
+        (loongarch-str.h): Depend on isa-evolution.in.
+        (loongarch.opt): Depend on isa-evolution.in.
+        (loongarch-cpu.o): Depend on loongarch-cpucfg-map.h.
+        * config/loongarch/loongarch-str.h: Regenerate.
+        * config/loongarch/loongarch-def.h (loongarch_isa):  Add field
+        for evolution features.  Add helper function to enable features
+        in this field.
+        Probe native CPU capability and save the corresponding options
+        into preset.
+        * config/loongarch/loongarch-cpu.cc (fill_native_cpu_config):
+        Probe native CPU capability and save the corresponding options
+        into preset.
+        (cache_cpucfg): Simplify with C++11-style for loop.
+        (cpucfg_useful_idx, N_CPUCFG_WORDS): Move to ...
+        * config/loongarch/loongarch.cc
+        (loongarch_option_override_internal): Enable the ISA evolution
+        feature options implied by -march and not explicitly disabled.
+        (loongarch_asm_code_end): New function, print ISA information as
+        comments in the assembly if -fverbose-asm.  It makes easier to
+        debug things like -march=native.
+        (TARGET_ASM_CODE_END): Define.
+        * config/loongarch/loongarch.opt: Regenerate.
+        * config/loongarch/loongarch-cpucfg-map.h: Generate.
+        (cpucfg_useful_idx, N_CPUCFG_WORDS) ... here.
+---
+ gcc/config/loongarch/genopts/genstr.sh        | 92 ++++++++++++++++++-
+ gcc/config/loongarch/genopts/isa-evolution.in |  2 +
+ gcc/config/loongarch/genopts/loongarch.opt.in |  7 ++
+ gcc/config/loongarch/loongarch-cpu.cc         | 46 +++++-----
+ gcc/config/loongarch/loongarch-cpucfg-map.h   | 48 ++++++++++
+ gcc/config/loongarch/loongarch-def.h          |  7 ++
+ gcc/config/loongarch/loongarch-str.h          |  6 +-
+ gcc/config/loongarch/loongarch.cc             | 31 +++++++
+ gcc/config/loongarch/loongarch.opt            | 20 +++-
+ gcc/config/loongarch/t-loongarch              | 21 ++++-
+ 10 files changed, 244 insertions(+), 36 deletions(-)
+ create mode 100644 gcc/config/loongarch/genopts/isa-evolution.in
+ create mode 100644 gcc/config/loongarch/loongarch-cpucfg-map.h
+
+diff --git a/gcc/config/loongarch/genopts/genstr.sh b/gcc/config/loongarch/genopts/genstr.sh
+index 972ef125f..bcc616e98 100755
+--- a/gcc/config/loongarch/genopts/genstr.sh
++++ b/gcc/config/loongarch/genopts/genstr.sh
+@@ -25,8 +25,8 @@ cd "$(dirname "$0")"
+ # Generate a header containing definitions from the string table.
+ gen_defines() {
+     cat <.  */
++
++#ifndef LOONGARCH_CPUCFG_MAP_H
++#define LOONGARCH_CPUCFG_MAP_H
++
++#include "options.h"
++
++static constexpr struct {
++  int cpucfg_word;
++  unsigned int cpucfg_bit;
++  HOST_WIDE_INT isa_evolution_bit;
++} cpucfg_map[] = {
++EOF
++
++    # Generate the strings from isa-evolution.in.
++    awk '{
++      gsub(/-/, "_", $3)
++      print("  { "$1", 1u << "$2", OPTION_MASK_ISA_"toupper($3)" },")
++    }' isa-evolution.in
++
++    echo "};"
++    echo
++    echo "static constexpr int cpucfg_useful_idx[] = {"
++
++    awk 'BEGIN { print("  0,\n  1,\n  2,\n  16,\n  17,\n  18,\n  19,") }
++    {if ($1+0 > max+0) max=$1; print("  "$1",")}' \
++   isa-evolution.in | sort -n | uniq
++
++    echo "};"
++    echo ""
++
++    awk 'BEGIN { max=19 }
++    { if ($1+0 > max+0) max=$1 }
++    END { print "static constexpr int N_CPUCFG_WORDS = "1+max";" }' \
++   isa-evolution.in
++
++    echo "#endif /* LOONGARCH_CPUCFG_MAP_H */"
+ }
+ 
+ main() {
+     case "$1" in
++    cpucfg-map) gen_cpucfg_map;;
+ 	header) gen_defines;;
+ 	opt) gen_options;;
+-	*) echo "Unknown Command: \"$1\". Available: header, opt"; exit 1;;
++    *) echo "Unknown Command: \"$1\". Available: cpucfg-map, header, opt"; exit 1;;
+     esac
+ }
+ 
+diff --git a/gcc/config/loongarch/genopts/isa-evolution.in b/gcc/config/loongarch/genopts/isa-evolution.in
+new file mode 100644
+index 000000000..e58f0d6a1
+--- /dev/null
++++ b/gcc/config/loongarch/genopts/isa-evolution.in
+@@ -0,0 +1,2 @@
++2	26	div32		Support div.w[u] and mod.w[u] instructions with inputs not sign-extended.
++3	23	ld-seq-sa	Do not need load-load barriers (dbar 0x700).
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index bd3cfaf60..a49de07c9 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -247,3 +247,10 @@ Target Undocumented Joined UInteger Var(loongarch_vect_issue_info) Init(4) Integ
+ Indicate how many non memory access vector instructions can be issued per
+ cycle, it's used in unroll factor determination for autovectorizer.  The
+ default value is 4.
++
++; Features added during ISA evolution.  This concept is different from ISA
++; extension, read Section 1.5 of LoongArch v1.10 Volume 1 for the
++; explanation.  These features may be implemented and enumerated with
++; CPUCFG independantly, so we use bit flags to specify them.
++Variable
++HOST_WIDE_INT isa_evolution = 0
+diff --git a/gcc/config/loongarch/loongarch-cpu.cc b/gcc/config/loongarch/loongarch-cpu.cc
+index cbe52d7ed..e1cd85d02 100644
+--- a/gcc/config/loongarch/loongarch-cpu.cc
++++ b/gcc/config/loongarch/loongarch-cpu.cc
+@@ -29,12 +29,11 @@ along with GCC; see the file COPYING3.  If not see
+ #include "loongarch-def.h"
+ #include "loongarch-opts.h"
+ #include "loongarch-cpu.h"
++#include "loongarch-cpucfg-map.h"
+ #include "loongarch-str.h"
+ 
+ /* Native CPU detection with "cpucfg" */
+-#define N_CPUCFG_WORDS 0x15
+ static uint32_t cpucfg_cache[N_CPUCFG_WORDS] = { 0 };
+-static const int cpucfg_useful_idx[] = {0, 1, 2, 16, 17, 18, 19};
+ 
+ static uint32_t
+ read_cpucfg_word (int wordno)
+@@ -56,11 +55,8 @@ read_cpucfg_word (int wordno)
+ void
+ cache_cpucfg (void)
+ {
+-  for (unsigned int i = 0; i < sizeof (cpucfg_useful_idx) / sizeof (int); i++)
+-    {
+-      cpucfg_cache[cpucfg_useful_idx[i]]
+-	= read_cpucfg_word (cpucfg_useful_idx[i]);
+-    }
++  for (int idx: cpucfg_useful_idx)
++    cpucfg_cache[idx] = read_cpucfg_word (idx);
+ }
+ 
+ uint32_t
+@@ -125,11 +121,12 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+       int tmp;
+       tgt->cpu_arch = native_cpu_type;
+ 
++      auto &preset = loongarch_cpu_default_isa[tgt->cpu_arch];
++
+       /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].base
+ 	 With: base architecture (ARCH)
+ 	 At:   cpucfg_words[1][1:0] */
+ 
+-      #define PRESET_ARCH (loongarch_cpu_default_isa[tgt->cpu_arch].base)
+       switch (cpucfg_cache[1] & 0x3)
+ 	{
+ 	  case 0x02:
+@@ -144,19 +141,18 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+ 	}
+ 
+       /* Check consistency with PRID presets.  */
+-      if (native_cpu_type != CPU_NATIVE && tmp != PRESET_ARCH)
++      if (native_cpu_type != CPU_NATIVE && tmp != preset.base)
+ 	warning (0, "base architecture %qs differs from PRID preset %qs",
+ 		 loongarch_isa_base_strings[tmp],
+-		 loongarch_isa_base_strings[PRESET_ARCH]);
++		 loongarch_isa_base_strings[preset.base]);
+ 
+       /* Use the native value anyways.  */
+-      PRESET_ARCH = tmp;
++      preset.base = tmp;
+ 
+       /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].fpu
+ 	 With: FPU type (FP, FP_SP, FP_DP)
+ 	 At:   cpucfg_words[2][2:0] */
+ 
+-      #define PRESET_FPU (loongarch_cpu_default_isa[tgt->cpu_arch].fpu)
+       switch (cpucfg_cache[2] & 0x7)
+ 	{
+ 	  case 0x07:
+@@ -179,20 +175,19 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+ 	}
+ 
+       /* Check consistency with PRID presets.  */
+-      if (native_cpu_type != CPU_NATIVE && tmp != PRESET_FPU)
++      if (native_cpu_type != CPU_NATIVE && tmp != preset.fpu)
+ 	warning (0, "floating-point unit %qs differs from PRID preset %qs",
+ 		 loongarch_isa_ext_strings[tmp],
+-		 loongarch_isa_ext_strings[PRESET_FPU]);
++		 loongarch_isa_ext_strings[preset.fpu]);
+ 
+       /* Use the native value anyways.  */
+-      PRESET_FPU = tmp;
++      preset.fpu = tmp;
+ 
+ 
+       /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].simd
+ 	 With: SIMD extension type (LSX, LASX)
+ 	 At:   cpucfg_words[2][7:6] */
+ 
+-      #define PRESET_SIMD (loongarch_cpu_default_isa[tgt->cpu_arch].simd)
+       switch (cpucfg_cache[2] & 0xc0)
+ 	{
+ 	  case 0xc0:
+@@ -219,14 +214,19 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+       /* Check consistency with PRID presets.  */
+ 
+       /*
+-      if (native_cpu_type != CPU_NATIVE && tmp != PRESET_SIMD)
++      if (native_cpu_type != CPU_NATIVE && tmp != preset.simd)
+ 	warning (0, "SIMD extension %qs differs from PRID preset %qs",
+ 		 loongarch_isa_ext_strings[tmp],
+-		 loongarch_isa_ext_strings[PRESET_SIMD]);
++		 loongarch_isa_ext_strings[preset.simd]);
+       */
+ 
+       /* Use the native value anyways.  */
+-      PRESET_SIMD = tmp;
++      preset.simd = tmp;
++
++      /* Features added during ISA evolution.  */
++      for (const auto &entry: cpucfg_map)
++	if (cpucfg_cache[entry.cpucfg_word] & entry.cpucfg_bit)
++	  preset.evolution |= entry.isa_evolution_bit;
+     }
+ 
+   if (tune_native_p)
+@@ -237,7 +237,7 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+ 	 With: cache size info
+ 	 At:   cpucfg_words[16:20][31:0] */
+ 
+-      #define PRESET_CACHE (loongarch_cpu_cache[tgt->cpu_tune])
++      auto &preset_cache = loongarch_cpu_cache[tgt->cpu_tune];
+       struct loongarch_cache native_cache;
+       int l1d_present = 0, l1u_present = 0;
+       int l2d_present = 0;
+@@ -268,8 +268,8 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+ 	>> 10;					  /* in kibibytes */
+ 
+       /* Use the native value anyways.  */
+-      PRESET_CACHE.l1d_line_size = native_cache.l1d_line_size;
+-      PRESET_CACHE.l1d_size = native_cache.l1d_size;
+-      PRESET_CACHE.l2d_size = native_cache.l2d_size;
++      preset_cache.l1d_line_size = native_cache.l1d_line_size;
++      preset_cache.l1d_size = native_cache.l1d_size;
++      preset_cache.l2d_size = native_cache.l2d_size;
+     }
+ }
+diff --git a/gcc/config/loongarch/loongarch-cpucfg-map.h b/gcc/config/loongarch/loongarch-cpucfg-map.h
+new file mode 100644
+index 000000000..0c078c397
+--- /dev/null
++++ b/gcc/config/loongarch/loongarch-cpucfg-map.h
+@@ -0,0 +1,48 @@
++/* Generated automatically by "genstr" from "isa-evolution.in".
++   Please do not edit this file directly.
++
++   Copyright (C) 2023 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#ifndef LOONGARCH_CPUCFG_MAP_H
++#define LOONGARCH_CPUCFG_MAP_H
++
++#include "options.h"
++
++static constexpr struct {
++  int cpucfg_word;
++  unsigned int cpucfg_bit;
++  HOST_WIDE_INT isa_evolution_bit;
++} cpucfg_map[] = {
++  { 2, 1u << 26, OPTION_MASK_ISA_DIV32 },
++  { 3, 1u << 23, OPTION_MASK_ISA_LD_SEQ_SA },
++};
++
++static constexpr int cpucfg_useful_idx[] = {
++  0,
++  1,
++  2,
++  3,
++  16,
++  17,
++  18,
++  19,
++};
++
++static constexpr int N_CPUCFG_WORDS = 20;
++#endif /* LOONGARCH_CPUCFG_MAP_H */
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index 078d8607d..cb99caebe 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -46,6 +46,7 @@ along with GCC; see the file COPYING3.  If not see
+ #ifndef LOONGARCH_DEF_H
+ #define LOONGARCH_DEF_H
+ 
++#include 
+ #include "loongarch-tune.h"
+ 
+ #ifdef __cplusplus
+@@ -121,6 +122,12 @@ struct loongarch_isa
+   int base;	    /* ISA_BASE_ */
+   int fpu;	    /* ISA_EXT_FPU_ */
+   int simd;	    /* ISA_EXT_SIMD_ */
++
++  /* ISA evolution features implied by -march=, for -march=native probed
++     via CPUCFG.  The features implied by base may be not included here.
++
++     Using int64_t instead of HOST_WIDE_INT for C compatibility.  */
++  int64_t evolution;
+ };
+ 
+ struct loongarch_abi
+diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h
+index 037e9e583..cd9dbb41b 100644
+--- a/gcc/config/loongarch/loongarch-str.h
++++ b/gcc/config/loongarch/loongarch-str.h
+@@ -1,5 +1,5 @@
+-/* Generated automatically by "genstr" from "loongarch-strings".
+-   Please do not edit this file directly.
++/* Generated automatically by "genstr" from "loongarch-strings" and
++   "isa-evolution.in".  Please do not edit this file directly.
+ 
+    Copyright (C) 2021-2022 Free Software Foundation, Inc.
+    Contributed by Loongson Ltd.
+@@ -69,4 +69,6 @@ along with GCC; see the file COPYING3.  If not see
+ #define STR_EXPLICIT_RELOCS_NONE "none"
+ #define STR_EXPLICIT_RELOCS_ALWAYS "always"
+ 
++#define OPTSTR_DIV32   "div32"
++#define OPTSTR_LD_SEQ_SA   "ld-seq-sa"
+ #endif /* LOONGARCH_STR_H */
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 7bb46a45d..8bd46da62 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -7451,6 +7451,10 @@ loongarch_option_override_internal (struct gcc_options *opts,
+   if (loongarch_branch_cost == 0)
+     loongarch_branch_cost = loongarch_cost->branch_cost;
+ 
++  /* If the user hasn't disabled a feature added during ISA evolution,
++     use the processor's default.  */
++  isa_evolution |= (la_target.isa.evolution &
++		    ~global_options_set.x_isa_evolution);
+ 
+   /* Enable sw prefetching at -O3 and higher.  */
+   if (opts->x_flag_prefetch_loop_arrays < 0
+@@ -11427,6 +11431,30 @@ loongarch_builtin_support_vector_misalignment (machine_mode mode,
+ 						      is_packed);
+ }
+ 
++/* If -fverbose-asm, dump some info for debugging.  */
++static void
++loongarch_asm_code_end (void)
++{
++#define DUMP_FEATURE(PRED) \
++  fprintf (asm_out_file, "%s %s: %s\n", ASM_COMMENT_START, #PRED, \
++	   (PRED) ? "enabled" : "disabled")
++
++  if (flag_verbose_asm)
++    {
++      fprintf (asm_out_file, "\n%s CPU: %s\n", ASM_COMMENT_START,
++	       loongarch_cpu_strings [la_target.cpu_arch]);
++      fprintf (asm_out_file, "%s Tune: %s\n", ASM_COMMENT_START,
++	       loongarch_cpu_strings [la_target.cpu_tune]);
++      fprintf (asm_out_file, "%s Base ISA: %s\n", ASM_COMMENT_START,
++	       loongarch_isa_base_strings [la_target.isa.base]);
++      DUMP_FEATURE (TARGET_DIV32);
++      DUMP_FEATURE (TARGET_LD_SEQ_SA);
++    }
++
++  fputs ("\n\n", asm_out_file);
++#undef DUMP_FEATURE
++}
++
+ /* Initialize the GCC target structure.  */
+ #undef TARGET_ASM_ALIGNED_HI_OP
+ #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
+@@ -11446,6 +11474,9 @@ loongarch_builtin_support_vector_misalignment (machine_mode mode,
+ #undef TARGET_ASM_FUNCTION_RODATA_SECTION
+ #define TARGET_ASM_FUNCTION_RODATA_SECTION loongarch_function_rodata_section
+ 
++#undef TARGET_ASM_CODE_END
++#define TARGET_ASM_CODE_END loongarch_asm_code_end
++
+ #undef TARGET_SCHED_INIT
+ #define TARGET_SCHED_INIT loongarch_sched_init
+ #undef TARGET_SCHED_REORDER
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index d936954b8..5251f705d 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -1,9 +1,10 @@
+ ; Generated by "genstr" from the template "loongarch.opt.in"
+-; and definitions from "loongarch-strings".
++; and definitions from "loongarch-strings" and "isa-evolution.in".
+ ;
+ ; Please do not edit this file directly.
+ ; It will be automatically updated during a gcc build
+-; if you change "loongarch.opt.in" or "loongarch-strings".
++; if you change "loongarch.opt.in", "loongarch-strings", or
++; "isa-evolution.in".
+ ;
+ ; Copyright (C) 2021-2022 Free Software Foundation, Inc.
+ ;
+@@ -254,3 +255,18 @@ Target Undocumented Joined UInteger Var(loongarch_vect_issue_info) Init(4) Integ
+ Indicate how many non memory access vector instructions can be issued per
+ cycle, it's used in unroll factor determination for autovectorizer.  The
+ default value is 4.
++
++; Features added during ISA evolution.  This concept is different from ISA
++; extension, read Section 1.5 of LoongArch v1.10 Volume 1 for the
++; explanation.  These features may be implemented and enumerated with
++; CPUCFG independantly, so we use bit flags to specify them.
++Variable
++HOST_WIDE_INT isa_evolution = 0
++
++mdiv32
++Target Mask(ISA_DIV32) Var(isa_evolution)
++Support div.w[u] and mod.w[u] instructions with inputs not sign-extended.
++
++mld-seq-sa
++Target Mask(ISA_LD_SEQ_SA) Var(isa_evolution)
++Do not need load-load barriers (dbar 0x700).
+diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch
+index 12734c37b..57b1176bc 100644
+--- a/gcc/config/loongarch/t-loongarch
++++ b/gcc/config/loongarch/t-loongarch
+@@ -18,8 +18,9 @@
+ 
+ 
+ GTM_H += loongarch-multilib.h
+-OPTIONS_H_EXTRA += $(srcdir)/config/loongarch/loongarch-def.h \
+-		   $(srcdir)/config/loongarch/loongarch-tune.h
++OPTIONS_H_EXTRA += $(srcdir)/config/loongarch/loongarch-def.h	\
++		   $(srcdir)/config/loongarch/loongarch-tune.h	\
++		   $(srcdir)/config/loongarch/loongarch-cpucfg-map.h
+ 
+ # Canonical target triplet from config.gcc
+ LA_MULTIARCH_TRIPLET = $(patsubst LA_MULTIARCH_TRIPLET=%,%,$\
+@@ -31,7 +32,8 @@ LA_STR_H = $(srcdir)/config/loongarch/loongarch-str.h
+ # String definition header
+ $(LA_STR_H): s-loongarch-str ; @true
+ s-loongarch-str: $(srcdir)/config/loongarch/genopts/genstr.sh \
+-	$(srcdir)/config/loongarch/genopts/loongarch-strings
++	$(srcdir)/config/loongarch/genopts/loongarch-strings  \
++	$(srcdir)/config/loongarch/genopts/isa-evolution.in
+ 	$(SHELL) $(srcdir)/config/loongarch/genopts/genstr.sh header \
+     $(srcdir)/config/loongarch/genopts/loongarch-strings > \
+     tmp-loongarch-str.h
+@@ -58,7 +60,8 @@ loongarch-driver.o : $(srcdir)/config/loongarch/loongarch-driver.cc $(LA_STR_H)
+ loongarch-opts.o: $(srcdir)/config/loongarch/loongarch-opts.cc $(LA_STR_H)
+ 	$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+ 
+-loongarch-cpu.o: $(srcdir)/config/loongarch/loongarch-cpu.cc $(LA_STR_H)
++loongarch-cpu.o: $(srcdir)/config/loongarch/loongarch-cpu.cc $(LA_STR_H) \
++		 $(srcdir)/config/loongarch/loongarch-cpucfg-map.h
+ 	$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+ 
+ loongarch-def.o: $(srcdir)/config/loongarch/loongarch-def.c $(LA_STR_H)
+@@ -67,6 +70,7 @@ loongarch-def.o: $(srcdir)/config/loongarch/loongarch-def.c $(LA_STR_H)
+ $(srcdir)/config/loongarch/loongarch.opt: s-loongarch-opt ; @true
+ s-loongarch-opt: $(srcdir)/config/loongarch/genopts/genstr.sh \
+ 	$(srcdir)/config/loongarch/genopts/loongarch.opt.in \
++	$(srcdir)/config/loongarch/genopts/isa-evolution.in \
+ 	$(srcdir)/config/loongarch/genopts/loongarch-strings $(LA_STR_H)
+ 	$(SHELL) $(srcdir)/config/loongarch/genopts/genstr.sh opt \
+     $(srcdir)/config/loongarch/genopts/loongarch.opt.in \
+@@ -74,3 +78,12 @@ s-loongarch-opt: $(srcdir)/config/loongarch/genopts/genstr.sh \
+ 	$(SHELL) $(srcdir)/../move-if-change tmp-loongarch.opt \
+     $(srcdir)/config/loongarch/loongarch.opt
+ 	$(STAMP) s-loongarch-opt
++
++$(srcdir)/config/loongarch/loongarch-cpucfg-map.h: s-loongarch-cpucfg-map
++	@true
++s-loongarch-cpucfg-map: $(srcdir)/config/loongarch/genopts/genstr.sh \
++	$(srcdir)/config/loongarch/genopts/isa-evolution.in
++	$(SHELL) $< cpucfg-map > tmp-cpucfg.h
++	$(SHELL) $(srcdir)/../move-if-change tmp-cpucfg.h \
++	    $(srcdir)/config/loongarch/loongarch-cpucfg-map.h
++	$(STAMP) $@
+-- 
+2.43.0
+
diff --git a/0036-LoongArch-Add-evolution-features-of-base-ISA-revisio.patch b/0036-LoongArch-Add-evolution-features-of-base-ISA-revisio.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ba0c123988c8217939d11465bffe4c01ed425713
--- /dev/null
+++ b/0036-LoongArch-Add-evolution-features-of-base-ISA-revisio.patch
@@ -0,0 +1,148 @@
+From 24648180418affbaf044a58ae0b5f79a0cf71155 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 18 Nov 2023 03:19:07 +0800
+Subject: [PATCH 036/188] LoongArch: Add evolution features of base ISA
+ revisions
+
+	* config/loongarch/loongarch-def.h:
+	(loongarch_isa_base_features): Declare.  Define it in ...
+	* config/loongarch/loongarch-cpu.cc
+	(loongarch_isa_base_features): ... here.
+	(fill_native_cpu_config): If we know the base ISA of the CPU
+	model from PRID, use it instead of la64 (v1.0).  Check if all
+	expected features of this base ISA is available, emit a warning
+	if not.
+	* config/loongarch/loongarch-opts.cc (config_target_isa): Enable
+	the features implied by the base ISA if not -march=native.
+---
+ gcc/config/loongarch/loongarch-cpu.cc  | 62 ++++++++++++++++++--------
+ gcc/config/loongarch/loongarch-def.h   |  5 +++
+ gcc/config/loongarch/loongarch-opts.cc |  3 ++
+ 3 files changed, 52 insertions(+), 18 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch-cpu.cc b/gcc/config/loongarch/loongarch-cpu.cc
+index e1cd85d02..76d66fa55 100644
+--- a/gcc/config/loongarch/loongarch-cpu.cc
++++ b/gcc/config/loongarch/loongarch-cpu.cc
+@@ -32,6 +32,19 @@ along with GCC; see the file COPYING3.  If not see
+ #include "loongarch-cpucfg-map.h"
+ #include "loongarch-str.h"
+ 
++/* loongarch_isa_base_features defined here instead of loongarch-def.c
++   because we need to use options.h.  Pay attention on the order of elements
++   in the initializer becaue ISO C++ does not allow C99 designated
++   initializers!  */
++
++#define ISA_BASE_LA64V110_FEATURES \
++  (OPTION_MASK_ISA_DIV32 | OPTION_MASK_ISA_LD_SEQ_SA)
++
++int64_t loongarch_isa_base_features[N_ISA_BASE_TYPES] = {
++  /* [ISA_BASE_LA64V100] = */ 0,
++  /* [ISA_BASE_LA64V110] = */ ISA_BASE_LA64V110_FEATURES,
++};
++
+ /* Native CPU detection with "cpucfg" */
+ static uint32_t cpucfg_cache[N_CPUCFG_WORDS] = { 0 };
+ 
+@@ -127,24 +140,22 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+ 	 With: base architecture (ARCH)
+ 	 At:   cpucfg_words[1][1:0] */
+ 
+-      switch (cpucfg_cache[1] & 0x3)
+-	{
+-	  case 0x02:
+-	    tmp = ISA_BASE_LA64V100;
+-	    break;
+-
+-	  default:
+-	    fatal_error (UNKNOWN_LOCATION,
+-			 "unknown native base architecture %<0x%x%>, "
+-			 "%qs failed", (unsigned int) (cpucfg_cache[1] & 0x3),
+-			 "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE);
+-	}
+-
+-      /* Check consistency with PRID presets.  */
+-      if (native_cpu_type != CPU_NATIVE && tmp != preset.base)
+-	warning (0, "base architecture %qs differs from PRID preset %qs",
+-		 loongarch_isa_base_strings[tmp],
+-		 loongarch_isa_base_strings[preset.base]);
++      if (native_cpu_type != CPU_NATIVE)
++	tmp = loongarch_cpu_default_isa[native_cpu_type].base;
++      else
++	switch (cpucfg_cache[1] & 0x3)
++	  {
++	    case 0x02:
++	      tmp = ISA_BASE_LA64V100;
++	      break;
++
++	    default:
++	      fatal_error (UNKNOWN_LOCATION,
++			   "unknown native base architecture %<0x%x%>, "
++			   "%qs failed",
++			   (unsigned int) (cpucfg_cache[1] & 0x3),
++			   "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE);
++	  }
+ 
+       /* Use the native value anyways.  */
+       preset.base = tmp;
+@@ -227,6 +238,21 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+       for (const auto &entry: cpucfg_map)
+ 	if (cpucfg_cache[entry.cpucfg_word] & entry.cpucfg_bit)
+ 	  preset.evolution |= entry.isa_evolution_bit;
++
++      if (native_cpu_type != CPU_NATIVE)
++	{
++	  /* Check if the local CPU really supports the features of the base
++	     ISA of probed native_cpu_type.  If any feature is not detected,
++	     either GCC or the hardware is buggy.  */
++	  auto base_isa_feature = loongarch_isa_base_features[preset.base];
++	  if ((preset.evolution & base_isa_feature) != base_isa_feature)
++	    warning (0,
++		     "detected base architecture %qs, but some of its "
++		     "features are not detected; the detected base "
++		     "architecture may be unreliable, only detected "
++		     "features will be enabled",
++		     loongarch_isa_base_strings[preset.base]);
++	}
+     }
+ 
+   if (tune_native_p)
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index cb99caebe..ca0a324dd 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -55,12 +55,17 @@ extern "C" {
+ 
+ /* enum isa_base */
+ extern const char* loongarch_isa_base_strings[];
++
+ /* LoongArch V1.00.  */
+ #define ISA_BASE_LA64V100     0
+ /* LoongArch V1.10.  */
+ #define ISA_BASE_LA64V110     1
+ #define N_ISA_BASE_TYPES      2
+ 
++/* Unlike other arrays, this is defined in loongarch-cpu.cc.  The problem is
++   we cannot use the C++ header options.h in loongarch-def.c.  */
++extern int64_t loongarch_isa_base_features[];
++
+ /* enum isa_ext_* */
+ extern const char* loongarch_isa_ext_strings[];
+ #define ISA_EXT_NONE	      0
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index f10a9d3ff..390720479 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -284,6 +284,9 @@ config_target_isa:
+   /* Get default ISA from "-march" or its default value.  */
+   t.isa = loongarch_cpu_default_isa[t.cpu_arch];
+ 
++  if (t.cpu_arch != CPU_NATIVE)
++    t.isa.evolution |= loongarch_isa_base_features[t.isa.base];
++
+   /* Apply incremental changes.  */
+   /* "-march=native" overrides the default FPU type.  */
+ 
+-- 
+2.43.0
+
diff --git a/0036-rtl-ifcvt-introduce-rtl-ifcvt-enchancements.patch b/0036-rtl-ifcvt-introduce-rtl-ifcvt-enchancements.patch
new file mode 100644
index 0000000000000000000000000000000000000000..813eba9323f9b19aff134995289462e26eb04dfa
--- /dev/null
+++ b/0036-rtl-ifcvt-introduce-rtl-ifcvt-enchancements.patch
@@ -0,0 +1,560 @@
+From 4cae948c1c00ad7a59f0f234f809fbd9a0208eb4 Mon Sep 17 00:00:00 2001
+From: vchernon 
+Date: Wed, 28 Feb 2024 23:05:12 +0800
+Subject: [PATCH 02/18] [rtl-ifcvt] introduce rtl ifcvt enchancements     new
+ option:       -fifcvt-allow-complicated-cmps:         allows ifcvt to deal
+ with complicated cmps like
+
+        cmp reg1 (reg2 + reg3)
+
+        can increase compilation time
+    new param:
+      -param=ifcvt-allow-register-renaming=[0,1,2]
+        1 : allows ifcvt to rename registers in then and else bb
+        2 : allows to rename registers in condition and else/then bb
+        can increase compilation time and register pressure
+---
+ gcc/common.opt                                |   4 +
+ gcc/ifcvt.cc                                  | 291 +++++++++++++++---
+ gcc/params.opt                                |   4 +
+ .../gcc.c-torture/execute/ifcvt-renaming-1.c  |  35 +++
+ gcc/testsuite/gcc.dg/ifcvt-6.c                |  27 ++
+ 5 files changed, 311 insertions(+), 50 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.c-torture/execute/ifcvt-renaming-1.c
+ create mode 100644 gcc/testsuite/gcc.dg/ifcvt-6.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index c7c6bc256..aa00fb7b0 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -3691,4 +3691,8 @@ fipa-ra
+ Common Var(flag_ipa_ra) Optimization
+ Use caller save register across calls if possible.
+ 
++fifcvt-allow-complicated-cmps
++Common Var(flag_ifcvt_allow_complicated_cmps) Optimization
++Allow RTL if-conversion pass to deal with complicated cmps (can increase compilation time).
++
+ ; This comment is to ensure we retain the blank line above.
+diff --git a/gcc/ifcvt.cc b/gcc/ifcvt.cc
+index 2c1eba312..584db7b55 100644
+--- a/gcc/ifcvt.cc
++++ b/gcc/ifcvt.cc
+@@ -886,7 +886,9 @@ noce_emit_store_flag (struct noce_if_info *if_info, rtx x, int reversep,
+     }
+ 
+   /* Don't even try if the comparison operands or the mode of X are weird.  */
+-  if (cond_complex || !SCALAR_INT_MODE_P (GET_MODE (x)))
++  if (!flag_ifcvt_allow_complicated_cmps
++      && (cond_complex
++	  || !SCALAR_INT_MODE_P (GET_MODE (x))))
+     return NULL_RTX;
+ 
+   return emit_store_flag (x, code, XEXP (cond, 0),
+@@ -1965,7 +1967,8 @@ insn_valid_noce_process_p (rtx_insn *insn, rtx cc)
+   /* Currently support only simple single sets in test_bb.  */
+   if (!sset
+       || !noce_operand_ok (SET_DEST (sset))
+-      || contains_ccmode_rtx_p (SET_DEST (sset))
++      || (!flag_ifcvt_allow_complicated_cmps
++	  && contains_ccmode_rtx_p (SET_DEST (sset)))
+       || !noce_operand_ok (SET_SRC (sset)))
+     return false;
+ 
+@@ -1979,13 +1982,17 @@ insn_valid_noce_process_p (rtx_insn *insn, rtx cc)
+    in this function.  */
+ 
+ static bool
+-bbs_ok_for_cmove_arith (basic_block bb_a, basic_block bb_b, rtx to_rename)
++bbs_ok_for_cmove_arith (basic_block bb_a,
++			basic_block bb_b,
++			rtx to_rename,
++			bitmap conflict_regs)
+ {
+   rtx_insn *a_insn;
+   bitmap bba_sets = BITMAP_ALLOC (®_obstack);
+-
++  bitmap intersections = BITMAP_ALLOC (®_obstack);
+   df_ref def;
+   df_ref use;
++  rtx_insn *last_a = last_active_insn (bb_a, FALSE);
+ 
+   FOR_BB_INSNS (bb_a, a_insn)
+     {
+@@ -1995,18 +2002,15 @@ bbs_ok_for_cmove_arith (basic_block bb_a, basic_block bb_b, rtx to_rename)
+       rtx sset_a = single_set (a_insn);
+ 
+       if (!sset_a)
+-	{
+-	  BITMAP_FREE (bba_sets);
+-	  return false;
+-	}
++	goto end_cmove_arith_check_and_fail;
+       /* Record all registers that BB_A sets.  */
+       FOR_EACH_INSN_DEF (def, a_insn)
+-	if (!(to_rename && DF_REF_REG (def) == to_rename))
++	if (!(to_rename && DF_REF_REG (def) == to_rename && a_insn == last_a))
+ 	  bitmap_set_bit (bba_sets, DF_REF_REGNO (def));
+     }
+ 
++  bitmap_and (intersections, df_get_live_in (bb_b), bba_sets);
+   rtx_insn *b_insn;
+-
+   FOR_BB_INSNS (bb_b, b_insn)
+     {
+       if (!active_insn_p (b_insn))
+@@ -2015,10 +2019,7 @@ bbs_ok_for_cmove_arith (basic_block bb_a, basic_block bb_b, rtx to_rename)
+       rtx sset_b = single_set (b_insn);
+ 
+       if (!sset_b)
+-	{
+-	  BITMAP_FREE (bba_sets);
+-	  return false;
+-	}
++	goto end_cmove_arith_check_and_fail;
+ 
+       /* Make sure this is a REG and not some instance
+ 	 of ZERO_EXTRACT or SUBREG or other dangerous stuff.
+@@ -2030,25 +2031,34 @@ bbs_ok_for_cmove_arith (basic_block bb_a, basic_block bb_b, rtx to_rename)
+       if (MEM_P (SET_DEST (sset_b)))
+ 	gcc_assert (rtx_equal_p (SET_DEST (sset_b), to_rename));
+       else if (!REG_P (SET_DEST (sset_b)))
+-	{
+-	  BITMAP_FREE (bba_sets);
+-	  return false;
+-	}
++	goto end_cmove_arith_check_and_fail;
+ 
+-      /* If the insn uses a reg set in BB_A return false.  */
++      /* If the insn uses a reg set in BB_A return false
++	 or try to collect register list for renaming.  */
+       FOR_EACH_INSN_USE (use, b_insn)
+ 	{
+-	  if (bitmap_bit_p (bba_sets, DF_REF_REGNO (use)))
++	  if (bitmap_bit_p (intersections, DF_REF_REGNO (use)))
+ 	    {
+-	      BITMAP_FREE (bba_sets);
+-	      return false;
++	      if (param_ifcvt_allow_register_renaming < 1)
++		  goto end_cmove_arith_check_and_fail;
++
++	      /* Those regs should be renamed.  We can't rename CC reg, but
++		 possibly we can provide combined comparison in the future.  */
++	      if (GET_MODE_CLASS (GET_MODE (DF_REF_REG (use))) == MODE_CC)
++		goto end_cmove_arith_check_and_fail;
++	      bitmap_set_bit (conflict_regs, DF_REF_REGNO (use));
+ 	    }
+ 	}
+-
+     }
+ 
+   BITMAP_FREE (bba_sets);
++  BITMAP_FREE (intersections);
+   return true;
++
++end_cmove_arith_check_and_fail:
++  BITMAP_FREE (bba_sets);
++  BITMAP_FREE (intersections);
++  return false;
+ }
+ 
+ /* Emit copies of all the active instructions in BB except the last.
+@@ -2103,6 +2113,142 @@ noce_emit_bb (rtx last_insn, basic_block bb, bool simple)
+   return true;
+ }
+ 
++/* This function tries to rename regs that intersect with considered bb
++   inside condition expression.  Condition expression will be moved down
++   if the optimization will be applied, so it is essential to be sure that
++   all intersected registers will be renamed otherwise transformation
++   can't be applied.  Function returns true if renaming was successful
++   and optimization can proceed futher.  */
++
++static bool
++noce_rename_regs_in_cond (struct noce_if_info *if_info, bitmap cond_rename_regs)
++{
++  bool success = true;
++  if (bitmap_empty_p (cond_rename_regs))
++    return true;
++  if (param_ifcvt_allow_register_renaming < 2)
++    return false;
++  df_ref use;
++  rtx_insn *cmp_insn = if_info->cond_earliest;
++  /*  Jump instruction as a condion currently unsupported.  */
++  if (JUMP_P (cmp_insn))
++    return false;
++  rtx_insn *before_cmp = PREV_INSN (cmp_insn);
++  start_sequence ();
++  rtx_insn *copy_of_cmp = as_a  (copy_rtx (cmp_insn));
++  basic_block cmp_block = BLOCK_FOR_INSN (cmp_insn);
++  FOR_EACH_INSN_USE (use, cmp_insn)
++    {
++      if (bitmap_bit_p (cond_rename_regs, DF_REF_REGNO (use)))
++	{
++	  rtx use_reg = DF_REF_REG (use);
++	  rtx tmp = gen_reg_rtx (GET_MODE (use_reg));
++	  if (!validate_replace_rtx (use_reg, tmp, copy_of_cmp))
++	    {
++	      end_sequence ();
++	      return false;
++	    }
++	  noce_emit_move_insn (tmp, use_reg);
++	}
++    }
++
++  emit_insn (PATTERN (copy_of_cmp));
++  rtx_insn *seq = get_insns ();
++  unshare_all_rtl_in_chain (seq);
++  end_sequence ();
++
++  emit_insn_after_setloc (seq, before_cmp, INSN_LOCATION (cmp_insn));
++  delete_insn_and_edges (cmp_insn);
++  rtx_insn *insn;
++  FOR_BB_INSNS (cmp_block, insn)
++    df_insn_rescan (insn);
++
++  if_info->cond = noce_get_condition (if_info->jump,
++				      ©_of_cmp,
++				      if_info->then_else_reversed);
++  if_info->cond_earliest = copy_of_cmp;
++  if_info->rev_cond = NULL_RTX;
++
++  return success;
++}
++
++/* This function tries to rename regs that intersect with considered bb.
++   return true if the renaming was successful and optimization can
++   proceed futher, false otherwise.  */
++static bool
++noce_rename_regs_in_bb (basic_block test_bb, bitmap rename_regs)
++{
++  if (bitmap_empty_p (rename_regs))
++    return true;
++  rtx_insn *insn;
++  rtx_insn *last_insn = last_active_insn (test_bb, FALSE);
++  bool res = true;
++  start_sequence ();
++  FOR_BB_INSNS (test_bb, insn)
++    {
++      if (!active_insn_p (insn))
++	continue;
++      /* Only ssets are supported for now.  */
++      rtx sset = single_set (insn);
++      gcc_assert (sset);
++      rtx x = SET_DEST (sset);
++      if (!REG_P (x) || !bitmap_bit_p (rename_regs, REGNO (x)))
++	continue;
++      /* Do not need to rename dest in the last instruction
++	 it will be renamed anyway.  */
++      if (insn == last_insn)
++	continue;
++      machine_mode mode = GET_MODE (x);
++      rtx tmp = gen_reg_rtx (mode);
++      if (!validate_replace_rtx_part (x, tmp, &SET_DEST (sset), insn))
++	{
++	  gcc_assert (insn != last_insn);
++	  /* We can generate additional move for such case,
++	     but it will increase register preasure.
++	     For now just stop transformation.  */
++	  rtx result_rtx = SET_DEST (single_set (last_insn));
++	  if (REG_P (result_rtx) && (x != result_rtx))
++	    {
++	      res = false;
++	      break;
++	    }
++	  if (!validate_replace_rtx (x, tmp, insn))
++	    gcc_unreachable ();
++	  noce_emit_move_insn (tmp,x);
++	}
++      set_used_flags (insn);
++      rtx_insn *rename_candidate;
++      for (rename_candidate = NEXT_INSN (insn);
++	   rename_candidate && rename_candidate!= NEXT_INSN (BB_END (test_bb));
++	   rename_candidate = NEXT_INSN (rename_candidate))
++	{
++	  if (!reg_overlap_mentioned_p (x, rename_candidate))
++	    continue;
++
++	  int replace_res = TRUE;
++	  if (rename_candidate == last_insn)
++	    {
++	      validate_replace_src_group (x, tmp, rename_candidate);
++	      replace_res = apply_change_group ();
++	    }
++	  else
++	    replace_res = validate_replace_rtx (x, tmp, rename_candidate);
++	  gcc_assert (replace_res);
++	  set_used_flags (rename_candidate);
++	}
++      set_used_flags (x);
++      set_used_flags (tmp);
++    }
++    rtx_insn *seq = get_insns ();
++    unshare_all_rtl_in_chain (seq);
++    end_sequence ();
++    emit_insn_before_setloc (seq, first_active_insn (test_bb),
++			     INSN_LOCATION (first_active_insn (test_bb)));
++    FOR_BB_INSNS (test_bb, insn)
++      df_insn_rescan (insn);
++  return res;
++}
++
+ /* Try more complex cases involving conditional_move.  */
+ 
+ static int
+@@ -2185,11 +2331,30 @@ noce_try_cmove_arith (struct noce_if_info *if_info)
+ 	  std::swap (then_bb, else_bb);
+ 	}
+     }
+-
++  bitmap else_bb_rename_regs = BITMAP_ALLOC (®_obstack);
++  bitmap then_bb_rename_regs = BITMAP_ALLOC (®_obstack);
+   if (then_bb && else_bb
+-      && (!bbs_ok_for_cmove_arith (then_bb, else_bb,  if_info->orig_x)
+-	  || !bbs_ok_for_cmove_arith (else_bb, then_bb,  if_info->orig_x)))
+-    return FALSE;
++      && (!bbs_ok_for_cmove_arith (then_bb, else_bb,
++				   if_info->orig_x,
++				   then_bb_rename_regs)
++	  || !bbs_ok_for_cmove_arith (else_bb, then_bb,
++				      if_info->orig_x,
++				      else_bb_rename_regs)))
++    {
++      BITMAP_FREE (then_bb_rename_regs);
++      BITMAP_FREE (else_bb_rename_regs);
++      return FALSE;
++    }
++  bool prepass_renaming = noce_rename_regs_in_bb (then_bb,
++						  then_bb_rename_regs)
++			  && noce_rename_regs_in_bb (else_bb,
++						     else_bb_rename_regs);
++
++  BITMAP_FREE (then_bb_rename_regs);
++  BITMAP_FREE (else_bb_rename_regs);
++
++  if (!prepass_renaming)
++   return FALSE;
+ 
+   start_sequence ();
+ 
+@@ -3072,7 +3237,8 @@ noce_operand_ok (const_rtx op)
+ 
+ static bool
+ bb_valid_for_noce_process_p (basic_block test_bb, rtx cond,
+-			      unsigned int *cost, bool *simple_p)
++			     unsigned int *cost, bool *simple_p,
++			     bitmap cond_rename_regs)
+ {
+   if (!test_bb)
+     return false;
+@@ -3112,8 +3278,9 @@ bb_valid_for_noce_process_p (basic_block test_bb, rtx cond,
+   rtx_insn *prev_last_insn = PREV_INSN (last_insn);
+   gcc_assert (prev_last_insn);
+ 
+-  /* For now, disallow setting x multiple times in test_bb.  */
+-  if (REG_P (x) && reg_set_between_p (x, first_insn, prev_last_insn))
++  if (REG_P (x)
++      && reg_set_between_p (x, first_insn, prev_last_insn)
++      && param_ifcvt_allow_register_renaming < 1)
+     return false;
+ 
+   bitmap test_bb_temps = BITMAP_ALLOC (®_obstack);
+@@ -3125,25 +3292,35 @@ bb_valid_for_noce_process_p (basic_block test_bb, rtx cond,
+   rtx_insn *insn;
+   FOR_BB_INSNS (test_bb, insn)
+     {
+-      if (insn != last_insn)
+-	{
+-	  if (!active_insn_p (insn))
+-	    continue;
++      if (insn == last_insn)
++	continue;
++      if (!active_insn_p (insn))
++	continue;
+ 
+-	  if (!insn_valid_noce_process_p (insn, cc))
+-	    goto free_bitmap_and_fail;
++      if (!insn_valid_noce_process_p (insn, cc))
++	goto free_bitmap_and_fail;
+ 
+-	  rtx sset = single_set (insn);
+-	  gcc_assert (sset);
++      rtx sset = single_set (insn);
++      gcc_assert (sset);
+ 
+-	  if (contains_mem_rtx_p (SET_SRC (sset))
+-	      || !REG_P (SET_DEST (sset))
+-	      || reg_overlap_mentioned_p (SET_DEST (sset), cond))
+-	    goto free_bitmap_and_fail;
++      if (contains_mem_rtx_p (SET_SRC (sset))
++	  || !REG_P (SET_DEST (sset)))
++	goto free_bitmap_and_fail;
+ 
+-	  potential_cost += pattern_cost (sset, speed_p);
+-	  bitmap_set_bit (test_bb_temps, REGNO (SET_DEST (sset)));
++      if (reg_overlap_mentioned_p (SET_DEST (sset), cond))
++	{
++	  if (param_ifcvt_allow_register_renaming < 1)
++	    goto free_bitmap_and_fail;
++	  rtx sset_dest = SET_DEST (sset);
++	  if (REG_P (sset_dest)
++	      && (GET_MODE_CLASS (GET_MODE (sset_dest)) != MODE_CC))
++	    bitmap_set_bit (cond_rename_regs, REGNO (sset_dest));
++	  else
++	    goto free_bitmap_and_fail;
+ 	}
++	potential_cost += pattern_cost (sset, speed_p);
++	if (SET_DEST (sset) != SET_DEST (last_set))
++	  bitmap_set_bit (test_bb_temps, REGNO (SET_DEST (sset)));
+     }
+ 
+   /* If any of the intermediate results in test_bb are live after test_bb
+@@ -3777,15 +3954,29 @@ noce_process_if_block (struct noce_if_info *if_info)
+ 
+   bool speed_p = optimize_bb_for_speed_p (test_bb);
+   unsigned int then_cost = 0, else_cost = 0;
++  bitmap cond_rename_regs = BITMAP_ALLOC (®_obstack);
+   if (!bb_valid_for_noce_process_p (then_bb, cond, &then_cost,
+-				    &if_info->then_simple))
+-    return false;
++				    &if_info->then_simple, cond_rename_regs))
++    {
++      BITMAP_FREE (cond_rename_regs);
++      return false;
++    }
+ 
+   if (else_bb
+       && !bb_valid_for_noce_process_p (else_bb, cond, &else_cost,
+-				       &if_info->else_simple))
+-    return false;
++				       &if_info->else_simple, cond_rename_regs))
++    {
++      BITMAP_FREE (cond_rename_regs);
++      return false;
++    }
+ 
++  if (!noce_rename_regs_in_cond (if_info, cond_rename_regs))
++    {
++      BITMAP_FREE (cond_rename_regs);
++      return false;
++    }
++  BITMAP_FREE (cond_rename_regs);
++  cond = if_info->cond;
+   if (speed_p)
+     if_info->original_cost += average_cost (then_cost, else_cost,
+ 					    find_edge (test_bb, then_bb));
+@@ -5823,12 +6014,13 @@ if_convert (bool after_combine)
+ {
+   basic_block bb;
+   int pass;
+-
+   if (optimize == 1)
+     {
+       df_live_add_problem ();
+       df_live_set_all_dirty ();
+     }
++  free_dominance_info (CDI_DOMINATORS);
++  cleanup_cfg (CLEANUP_EXPENSIVE);
+ 
+   /* Record whether we are after combine pass.  */
+   ifcvt_after_combine = after_combine;
+@@ -5933,7 +6125,6 @@ rest_of_handle_if_conversion (void)
+ 	  dump_reg_info (dump_file);
+ 	  dump_flow_info (dump_file, dump_flags);
+ 	}
+-      cleanup_cfg (CLEANUP_EXPENSIVE);
+       if_convert (false);
+       if (num_updated_if_blocks)
+ 	/* Get rid of any dead CC-related instructions.  */
+diff --git a/gcc/params.opt b/gcc/params.opt
+index d2196dc68..ba87f820b 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -669,6 +669,10 @@ Maximum permissible cost for the sequence that would be generated by the RTL if-
+ Common Joined UInteger Var(param_max_rtl_if_conversion_unpredictable_cost) Init(40) IntegerRange(0, 200) Param Optimization
+ Maximum permissible cost for the sequence that would be generated by the RTL if-conversion pass for a branch that is considered unpredictable.
+ 
++-param=ifcvt-allow-register-renaming=
++Common Joined UInteger Var(param_ifcvt_allow_register_renaming) IntegerRange(0, 2) Param Optimization
++Allow RTL if-conversion pass to aggressively rename registers in basic blocks.  Sometimes additional moves will be created.
++
+ -param=max-sched-extend-regions-iters=
+ Common Joined UInteger Var(param_max_sched_extend_regions_iters) Param Optimization
+ The maximum number of iterations through CFG to extend regions.
+diff --git a/gcc/testsuite/gcc.c-torture/execute/ifcvt-renaming-1.c b/gcc/testsuite/gcc.c-torture/execute/ifcvt-renaming-1.c
+new file mode 100644
+index 000000000..65c4d4140
+--- /dev/null
++++ b/gcc/testsuite/gcc.c-torture/execute/ifcvt-renaming-1.c
+@@ -0,0 +1,35 @@
++
++extern void abort(void);
++
++__attribute__ ((noinline))
++int foo (int x, int y, int z, int a, int b)
++{
++  if (a < 2) {
++      if (a == 0) {
++	  if (x - y < 0)
++	    x = x - y + z;
++	  else
++	    x = x - y;
++	}
++      else {
++	  if (x + y >= z)
++	    x = x + y - z;
++	  else
++	    x = x + y;
++	}
++    }
++  return x;
++}
++
++int main(void) {
++  if (foo (5,10,7,0,1) != 2) // x - y + z = -5 + 7 = 2
++    abort ();
++  if (foo (50,10,7,0,1) != 40) // x - y = 40
++    abort ();
++  if (foo (5,10,7,1,1) != 8) // x + y - z = 5 + 10 - 7 = 8
++    abort ();
++  if (foo (5,10,70,1,1) != 15) // x + y = 15
++    abort ();
++  return 0;
++}
++
+diff --git a/gcc/testsuite/gcc.dg/ifcvt-6.c b/gcc/testsuite/gcc.dg/ifcvt-6.c
+new file mode 100644
+index 000000000..be9a67b3f
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/ifcvt-6.c
+@@ -0,0 +1,27 @@
++/* { dg-do compile { target { aarch64*-*-* } } } */
++/* { dg-options "-fdump-rtl-ce1 -O2 --param max-rtl-if-conversion-unpredictable-cost=100 --param max-rtl-if-conversion-predictable-cost=100 --param=ifcvt-allow-register-renaming=2 -fifcvt-allow-complicated-cmps" } */
++
++typedef unsigned int uint16_t;
++
++uint16_t
++foo (uint16_t x, uint16_t y, uint16_t z, uint16_t a,
++     uint16_t b, uint16_t c, uint16_t d) {
++  int i = 1;
++  int j = 1;
++  if (a > b) {
++      j = x;
++      if (b > c)
++	i = y;
++      else
++	i = z;
++    }
++  else {
++      j = y;
++      if (c > d)
++	i = z;
++    }
++  return i * j;
++}
++
++/* { dg-final { scan-rtl-dump "7 true changes made" "ce1" } } */
++
+-- 
+2.33.0
+
diff --git a/0037-LoongArch-Take-the-advantage-of-mdiv32-if-it-s-enabl.patch b/0037-LoongArch-Take-the-advantage-of-mdiv32-if-it-s-enabl.patch
new file mode 100644
index 0000000000000000000000000000000000000000..28f3226b3e4e6252ccfab3f1bcceceb3eff647d7
--- /dev/null
+++ b/0037-LoongArch-Take-the-advantage-of-mdiv32-if-it-s-enabl.patch
@@ -0,0 +1,156 @@
+From 6b483504c4fbb2a05a17d67e8f51b72149f1bbf9 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Thu, 16 Nov 2023 09:21:47 +0800
+Subject: [PATCH 037/188] LoongArch: Take the advantage of -mdiv32 if it's
+ enabled
+
+With -mdiv32, we can assume div.w[u] and mod.w[u] works on low 32 bits
+of a 64-bit GPR even if it's not sign-extended.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (DIV): New mode iterator.
+	(3): Don't expand if TARGET_DIV32.
+	(di3_fake): Disable if TARGET_DIV32.
+	(*3): Allow SImode if TARGET_DIV32.
+	(si3_extended): New insn if TARGET_DIV32.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/div-div32.c: New test.
+	* gcc.target/loongarch/div-no-div32.c: New test.
+---
+ gcc/config/loongarch/loongarch.md             | 31 ++++++++++++++++---
+ .../gcc.target/loongarch/div-div32.c          | 31 +++++++++++++++++++
+ .../gcc.target/loongarch/div-no-div32.c       | 11 +++++++
+ 3 files changed, 68 insertions(+), 5 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/div-div32.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/div-no-div32.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 52e40a208..c4e7af107 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -408,6 +408,10 @@
+ ;; st.w.
+ (define_mode_iterator ST_ANY [QHWD ANYF])
+ 
++;; A mode for anything legal as a input of a div or mod instruction.
++(define_mode_iterator DIV [(DI "TARGET_64BIT")
++			   (SI "!TARGET_64BIT || TARGET_DIV32")])
++
+ ;; In GPR templates, a string like "mul." will expand to "mul.w" in the
+ ;; 32-bit version and "mul.d" in the 64-bit version.
+ (define_mode_attr d [(SI "w") (DI "d")])
+@@ -914,7 +918,7 @@
+ 		     (match_operand:GPR 2 "register_operand")))]
+   ""
+ {
+- if (GET_MODE (operands[0]) == SImode && TARGET_64BIT)
++ if (GET_MODE (operands[0]) == SImode && TARGET_64BIT && !TARGET_DIV32)
+   {
+     rtx reg1 = gen_reg_rtx (DImode);
+     rtx reg2 = gen_reg_rtx (DImode);
+@@ -934,9 +938,9 @@
+ })
+ 
+ (define_insn "*3"
+-  [(set (match_operand:X 0 "register_operand" "=r,&r,&r")
+-	(any_div:X (match_operand:X 1 "register_operand" "r,r,0")
+-		   (match_operand:X 2 "register_operand" "r,r,r")))]
++  [(set (match_operand:DIV 0 "register_operand" "=r,&r,&r")
++	(any_div:DIV (match_operand:DIV 1 "register_operand" "r,r,0")
++		     (match_operand:DIV 2 "register_operand" "r,r,r")))]
+   ""
+ {
+   return loongarch_output_division (".\t%0,%1,%2", operands);
+@@ -949,6 +953,23 @@
+ 	(const_string "yes")
+ 	(const_string "no")))])
+ 
++(define_insn "si3_extended"
++  [(set (match_operand:DI 0 "register_operand" "=r,&r,&r")
++	(sign_extend
++	  (any_div:SI (match_operand:SI 1 "register_operand" "r,r,0")
++		      (match_operand:SI 2 "register_operand" "r,r,r"))))]
++  "TARGET_64BIT && TARGET_DIV32"
++{
++  return loongarch_output_division (".w\t%0,%1,%2", operands);
++}
++  [(set_attr "type" "idiv")
++   (set_attr "mode" "SI")
++   (set (attr "enabled")
++      (if_then_else
++	(match_test "!!which_alternative == loongarch_check_zero_div_p()")
++	(const_string "yes")
++	(const_string "no")))])
++
+ (define_insn "di3_fake"
+   [(set (match_operand:DI 0 "register_operand" "=r,&r,&r")
+ 	(sign_extend:DI
+@@ -957,7 +978,7 @@
+ 	     (any_div:DI (match_operand:DI 1 "register_operand" "r,r,0")
+ 			 (match_operand:DI 2 "register_operand" "r,r,r")) 0)]
+ 	  UNSPEC_FAKE_ANY_DIV)))]
+-  "TARGET_64BIT"
++  "TARGET_64BIT && !TARGET_DIV32"
+ {
+   return loongarch_output_division (".w\t%0,%1,%2", operands);
+ }
+diff --git a/gcc/testsuite/gcc.target/loongarch/div-div32.c b/gcc/testsuite/gcc.target/loongarch/div-div32.c
+new file mode 100644
+index 000000000..8b1f686ec
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/div-div32.c
+@@ -0,0 +1,31 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mabi=lp64d -mdiv32" } */
++/* { dg-final { scan-assembler "div\.w" } } */
++/* { dg-final { scan-assembler "div\.wu" } } */
++/* { dg-final { scan-assembler "mod\.w" } } */
++/* { dg-final { scan-assembler "mod\.wu" } } */
++/* { dg-final { scan-assembler-not "slli\.w.*,0" } } */
++
++int
++divw (long a, long b)
++{
++  return (int)a / (int)b;
++}
++
++unsigned int
++divwu (long a, long b)
++{
++  return (unsigned int)a / (unsigned int)b;
++}
++
++int
++modw (long a, long b)
++{
++  return (int)a % (int)b;
++}
++
++unsigned int
++modwu (long a, long b)
++{
++  return (unsigned int)a % (unsigned int)b;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/div-no-div32.c b/gcc/testsuite/gcc.target/loongarch/div-no-div32.c
+new file mode 100644
+index 000000000..f0f697ba5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/div-no-div32.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mabi=lp64d" } */
++/* { dg-final { scan-assembler "div\.w" } } */
++/* { dg-final { scan-assembler "div\.wu" } } */
++/* { dg-final { scan-assembler "mod\.w" } } */
++/* { dg-final { scan-assembler "mod\.wu" } } */
++
++/* -mno-div32 should be implied by -march=loongarch64.  */
++/* { dg-final { scan-assembler-times "slli\.w\[^\n\]*0" 8 } } */
++
++#include "div-div32.c"
+-- 
+2.43.0
+
diff --git a/0037-Perform-early-if-conversion-of-simple-arithmetic.patch b/0037-Perform-early-if-conversion-of-simple-arithmetic.patch
new file mode 100644
index 0000000000000000000000000000000000000000..14de678e3eb6cf0242eb59aaeecc2dd340c34c39
--- /dev/null
+++ b/0037-Perform-early-if-conversion-of-simple-arithmetic.patch
@@ -0,0 +1,109 @@
+From 310eade1450995b55d9f8120561022fbf164b2ec Mon Sep 17 00:00:00 2001
+From: Pronin Alexander 00812787 
+Date: Thu, 12 Jan 2023 14:52:49 +0300
+Subject: [PATCH 03/18] Perform early if-conversion of simple arithmetic
+
+---
+ gcc/common.opt                      |  4 ++++
+ gcc/match.pd                        | 25 +++++++++++++++++++
+ gcc/testsuite/gcc.dg/ifcvt-gimple.c | 37 +++++++++++++++++++++++++++++
+ 3 files changed, 66 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.dg/ifcvt-gimple.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index aa00fb7b0..dac477c04 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1821,6 +1821,10 @@ fif-conversion2
+ Common Var(flag_if_conversion2) Optimization
+ Perform conversion of conditional jumps to conditional execution.
+ 
++fif-conversion-gimple
++Common Var(flag_if_conversion_gimple) Optimization
++Perform conversion of conditional jumps to branchless equivalents during gimple transformations.
++
+ fstack-reuse=
+ Common Joined RejectNegative Enum(stack_reuse_level) Var(flag_stack_reuse) Init(SR_ALL) Optimization
+ -fstack-reuse=[all|named_vars|none]	Set stack reuse level for local variables.
+diff --git a/gcc/match.pd b/gcc/match.pd
+index 6f24d5079..3cbaf2a5b 100644
+--- a/gcc/match.pd
++++ b/gcc/match.pd
+@@ -4278,6 +4278,31 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+   )
+  )
+ )
++
++(if (flag_if_conversion_gimple)
++ (for simple_op (plus minus bit_and bit_ior bit_xor)
++  (simplify
++   (cond @0 (simple_op @1 INTEGER_CST@2) @1)
++   (switch
++    /* a = cond ? a + 1 : a -> a = a + ((int) cond) */
++    (if (integer_onep (@2))
++     (simple_op @1 (convert (convert:boolean_type_node @0))))
++    /* a = cond ? a + powerof2cst : a ->
++       a = a + ((int) cond) << log2 (powerof2cst) */
++    (if (INTEGRAL_TYPE_P (type) && integer_pow2p (@2))
++     (with
++      {
++	tree shift = build_int_cst (integer_type_node, tree_log2 (@2));
++      }
++      (simple_op @1 (lshift (convert (convert:boolean_type_node @0))
++			    { shift; })
++      )
++     )
++    )
++   )
++  )
++ )
++)
+ #endif
+ 
+ #if GIMPLE
+diff --git a/gcc/testsuite/gcc.dg/ifcvt-gimple.c b/gcc/testsuite/gcc.dg/ifcvt-gimple.c
+new file mode 100644
+index 000000000..0f7c87e5c
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/ifcvt-gimple.c
+@@ -0,0 +1,37 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fif-conversion-gimple -fdump-tree-optimized" } */
++
++int test_int (int optimizable_int) {
++    if (optimizable_int > 5)
++	++optimizable_int;
++    return optimizable_int;
++}
++
++int test_int_pow2 (int optimizable_int_pow2) {
++    if (optimizable_int_pow2 <= 4)
++	optimizable_int_pow2 += 1024;
++    return optimizable_int_pow2;
++}
++
++int test_int_non_pow2 (int not_optimizable_int_non_pow2) {
++    if (not_optimizable_int_non_pow2 == 1)
++	not_optimizable_int_non_pow2 += 513;
++    return not_optimizable_int_non_pow2;
++}
++
++float test_float (float not_optimizable_float) {
++    if (not_optimizable_float > 5)
++	not_optimizable_float += 1;
++    return not_optimizable_float;
++}
++
++/* Expecting if-else block in test_float and test_int_non_pow2 only. */
++/* { dg-final { scan-tree-dump-not "if \\(optimizable" "optimized" } } */
++/* { dg-final { scan-tree-dump "if \\(not_optimizable_int_non_pow2" "optimized" } } */
++/* { dg-final { scan-tree-dump "if \\(not_optimizable_float" "optimized" } } */
++/* { dg-final { scan-tree-dump-times "if " 2 "optimized" } } */
++/* { dg-final { scan-tree-dump-times "else" 2 "optimized" } } */
++
++/* Expecting shifted result only for optimizable_int_pow2. */
++/* { dg-final { scan-tree-dump-times " << " 1 "optimized" } } */
++/* { dg-final { scan-tree-dump " << 10;" "optimized" } } */
+-- 
+2.33.0
+
diff --git a/0038-Add-option-to-allow-matching-uaddsub-overflow-for-wi.patch b/0038-Add-option-to-allow-matching-uaddsub-overflow-for-wi.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9b2be003020a7f8af73007a10dbdccc38d7935a9
--- /dev/null
+++ b/0038-Add-option-to-allow-matching-uaddsub-overflow-for-wi.patch
@@ -0,0 +1,252 @@
+From 6684509e81e4341675c73a7dc853180229a8abcb Mon Sep 17 00:00:00 2001
+From: Pronin Alexander 00812787 
+Date: Tue, 24 Jan 2023 16:43:40 +0300
+Subject: [PATCH 04/18] Add option to allow matching uaddsub overflow for widen
+ ops too.
+
+---
+ gcc/common.opt                 |   5 ++
+ gcc/testsuite/gcc.dg/uaddsub.c | 143 +++++++++++++++++++++++++++++++++
+ gcc/tree-ssa-math-opts.cc      |  43 ++++++++--
+ 3 files changed, 184 insertions(+), 7 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/uaddsub.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index dac477c04..39c90604e 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -3106,6 +3106,11 @@ freciprocal-math
+ Common Var(flag_reciprocal_math) SetByCombined Optimization
+ Same as -fassociative-math for expressions which include division.
+ 
++fuaddsub-overflow-match-all
++Common Var(flag_uaddsub_overflow_match_all)
++Match unsigned add/sub overflow even if the target does not support
++the corresponding instruction.
++
+ ; Nonzero means that unsafe floating-point math optimizations are allowed
+ ; for the sake of speed.  IEEE compliance is not guaranteed, and operations
+ ; are allowed to assume that their arguments and results are "normal"
+diff --git a/gcc/testsuite/gcc.dg/uaddsub.c b/gcc/testsuite/gcc.dg/uaddsub.c
+new file mode 100644
+index 000000000..96c26d308
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/uaddsub.c
+@@ -0,0 +1,143 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fuaddsub-overflow-match-all -fdump-tree-optimized" } */
++#include 
++
++typedef unsigned __int128 uint128_t;
++typedef struct uint256_t
++{
++  uint128_t lo;
++  uint128_t hi;
++} uint256_t;
++
++uint16_t add16 (uint8_t a, uint8_t b)
++{
++  uint8_t tmp = a + b;
++  uint8_t overflow = 0;
++  if (tmp < a)
++    overflow = 1;
++
++  uint16_t res = overflow;
++  res <<= 8;
++  res += tmp;
++  return res;
++}
++
++uint32_t add32 (uint16_t a, uint16_t b)
++{
++  uint16_t tmp = a + b;
++  uint16_t overflow = 0;
++  if (tmp < a)
++    overflow = 1;
++
++  uint32_t res = overflow;
++  res <<= 16;
++  res += tmp;
++  return res;
++}
++
++uint64_t add64 (uint32_t a, uint32_t b)
++{
++  uint32_t tmp = a + b;
++  uint32_t overflow = 0;
++  if (tmp < a)
++    overflow = 1;
++
++  uint64_t res = overflow;
++  res <<= 32;
++  res += tmp;
++  return res;
++}
++
++uint128_t add128 (uint64_t a, uint64_t b)
++{
++  uint64_t tmp = a + b;
++  uint64_t overflow = 0;
++  if (tmp < a)
++    overflow = 1;
++
++  uint128_t res = overflow;
++  res <<= 64;
++  res += tmp;
++  return res;
++}
++
++uint256_t add256 (uint128_t a, uint128_t b)
++{
++  uint128_t tmp = a + b;
++  uint128_t overflow = 0;
++  if (tmp < a)
++    overflow = 1;
++
++  uint256_t res;
++  res.hi = overflow;
++  res.lo = tmp;
++  return res;
++}
++
++uint16_t sub16 (uint8_t a, uint8_t b)
++{
++  uint8_t tmp = a - b;
++  uint8_t overflow = 0;
++  if (tmp > a)
++    overflow = -1;
++
++  uint16_t res = overflow;
++  res <<= 8;
++  res += tmp;
++  return res;
++}
++
++uint32_t sub32 (uint16_t a, uint16_t b)
++{
++  uint16_t tmp = a - b;
++  uint16_t overflow = 0;
++  if (tmp > a)
++    overflow = -1;
++
++  uint32_t res = overflow;
++  res <<= 16;
++  res += tmp;
++  return res;
++}
++
++uint64_t sub64 (uint32_t a, uint32_t b)
++{
++  uint32_t tmp = a - b;
++  uint32_t overflow = 0;
++  if (tmp > a)
++    overflow = -1;
++
++  uint64_t res = overflow;
++  res <<= 32;
++  res += tmp;
++  return res;
++}
++
++uint128_t sub128 (uint64_t a, uint64_t b)
++{
++  uint64_t tmp = a - b;
++  uint64_t overflow = 0;
++  if (tmp > a)
++    overflow = -1;
++
++  uint128_t res = overflow;
++  res <<= 64;
++  res += tmp;
++  return res;
++}
++
++uint256_t sub256 (uint128_t a, uint128_t b)
++{
++  uint128_t tmp = a - b;
++  uint128_t overflow = 0;
++  if (tmp > a)
++    overflow = -1;
++
++  uint256_t res;
++  res.hi = overflow;
++  res.lo = tmp;
++  return res;
++}
++
++/* { dg-final { scan-tree-dump-times "= .ADD_OVERFLOW \\(a_\[0-9\]+\\(D\\), b_\[0-9\]+\\(D\\)\\)" 5 "optimized" } } */
++/* { dg-final { scan-tree-dump-times "= .SUB_OVERFLOW \\(a_\[0-9\]+\\(D\\), b_\[0-9\]+\\(D\\)\\)" 5 "optimized" } } */
+diff --git a/gcc/tree-ssa-math-opts.cc b/gcc/tree-ssa-math-opts.cc
+index 232e903b0..55d6ee8ae 100644
+--- a/gcc/tree-ssa-math-opts.cc
++++ b/gcc/tree-ssa-math-opts.cc
+@@ -3468,6 +3468,27 @@ convert_mult_to_fma (gimple *mul_stmt, tree op1, tree op2,
+     }
+ }
+ 
++/* Check if the corresponding operation has wider equivalent on the target.  */
++
++static bool
++wider_optab_check_p (optab op, machine_mode mode, int unsignedp)
++{
++  machine_mode wider_mode;
++  FOR_EACH_WIDER_MODE (wider_mode, mode)
++    {
++      machine_mode next_mode;
++      if (optab_handler (op, wider_mode) != CODE_FOR_nothing
++	  || (op == smul_optab
++	      && GET_MODE_WIDER_MODE (wider_mode).exists (&next_mode)
++	      && (find_widening_optab_handler ((unsignedp
++						? umul_widen_optab
++						: smul_widen_optab),
++						next_mode, mode))))
++	return true;
++    }
++
++  return false;
++}
+ 
+ /* Helper function of match_arith_overflow.  For MUL_OVERFLOW, if we have
+    a check for non-zero like:
+@@ -3903,15 +3924,22 @@ match_arith_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
+ 		       || code == MINUS_EXPR
+ 		       || code == MULT_EXPR
+ 		       || code == BIT_NOT_EXPR);
++  int unsignedp = TYPE_UNSIGNED (type);
+   if (!INTEGRAL_TYPE_P (type)
+-      || !TYPE_UNSIGNED (type)
+-      || has_zero_uses (lhs)
+-      || (code != PLUS_EXPR
+-	  && code != MULT_EXPR
+-	  && optab_handler (code == MINUS_EXPR ? usubv4_optab : uaddv4_optab,
+-			    TYPE_MODE (type)) == CODE_FOR_nothing))
++      || !unsignedp
++      || has_zero_uses (lhs))
+     return false;
+ 
++  if (code == PLUS_EXPR || code == MINUS_EXPR)
++    {
++      machine_mode mode = TYPE_MODE (type);
++      optab op = code == PLUS_EXPR ? uaddv4_optab : usubv4_optab;
++      if (optab_handler (op, mode) == CODE_FOR_nothing
++	  && (!flag_uaddsub_overflow_match_all
++	      || !wider_optab_check_p (op, mode, unsignedp)))
++	return false;
++    }
++
+   tree rhs1 = gimple_assign_rhs1 (stmt);
+   tree rhs2 = gimple_assign_rhs2 (stmt);
+   FOR_EACH_IMM_USE_FAST (use_p, iter, lhs)
+@@ -3986,7 +4014,8 @@ match_arith_overflow (gimple_stmt_iterator *gsi, gimple *stmt,
+       || (code != MULT_EXPR && (code == BIT_NOT_EXPR ? use_seen : !use_seen))
+       || (code == PLUS_EXPR
+ 	  && optab_handler (uaddv4_optab,
+-			    TYPE_MODE (type)) == CODE_FOR_nothing)
++			    TYPE_MODE (type)) == CODE_FOR_nothing
++	  && !flag_uaddsub_overflow_match_all)
+       || (code == MULT_EXPR
+ 	  && optab_handler (cast_stmt ? mulv4_optab : umulv4_optab,
+ 			    TYPE_MODE (type)) == CODE_FOR_nothing))
+-- 
+2.33.0
+
diff --git a/0038-LoongArch-Don-t-emit-dbar-0x700-if-mld-seq-sa.patch b/0038-LoongArch-Don-t-emit-dbar-0x700-if-mld-seq-sa.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9696f857d1f6186d43e59e6c109ffcf66b6b6aa7
--- /dev/null
+++ b/0038-LoongArch-Don-t-emit-dbar-0x700-if-mld-seq-sa.patch
@@ -0,0 +1,61 @@
+From 42368d6ab1200c157ff473c37889b56b596040e2 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Thu, 16 Nov 2023 09:30:14 +0800
+Subject: [PATCH 038/188] LoongArch: Don't emit dbar 0x700 if -mld-seq-sa
+
+This option (CPUCFG word 0x3 bit 23) means "the hardware guarantee that
+two loads on the same address won't be reordered with each other".  Thus
+we can omit the "load-load" barrier dbar 0x700.
+
+This is only a micro-optimization because dbar 0x700 is already treated
+as nop if the hardware supports LD_SEQ_SA.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_print_operand): Don't
+	print dbar 0x700 if TARGET_LD_SEQ_SA.
+	* config/loongarch/sync.md (atomic_load): Likewise.
+---
+ gcc/config/loongarch/loongarch.cc | 2 +-
+ gcc/config/loongarch/sync.md      | 9 +++++----
+ 2 files changed, 6 insertions(+), 5 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 8bd46da62..c86b787c4 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -6057,7 +6057,7 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+       if (loongarch_cas_failure_memorder_needs_acquire (
+ 	    memmodel_from_int (INTVAL (op))))
+ 	fputs ("dbar\t0b10100", file);
+-      else
++      else if (!TARGET_LD_SEQ_SA)
+ 	fputs ("dbar\t0x700", file);
+       break;
+ 
+diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md
+index f4673c856..65443c899 100644
+--- a/gcc/config/loongarch/sync.md
++++ b/gcc/config/loongarch/sync.md
+@@ -119,13 +119,14 @@
+     case MEMMODEL_SEQ_CST:
+       return "dbar\t0x11\\n\\t"
+ 	     "ld.\t%0,%1\\n\\t"
+-	     "dbar\t0x14\\n\\t";
++	     "dbar\t0x14";
+     case MEMMODEL_ACQUIRE:
+       return "ld.\t%0,%1\\n\\t"
+-	     "dbar\t0x14\\n\\t";
++	     "dbar\t0x14";
+     case MEMMODEL_RELAXED:
+-      return "ld.\t%0,%1\\n\\t"
+-	     "dbar\t0x700\\n\\t";
++      return TARGET_LD_SEQ_SA ? "ld.\t%0,%1\\n\\t"
++			      : "ld.\t%0,%1\\n\\t"
++				"dbar\t0x700";
+ 
+     default:
+       /* The valid memory order variants are __ATOMIC_RELAXED, __ATOMIC_SEQ_CST,
+-- 
+2.43.0
+
diff --git a/0039-LoongArch-Add-fine-grained-control-for-LAM_BH-and-LA.patch b/0039-LoongArch-Add-fine-grained-control-for-LAM_BH-and-LA.patch
new file mode 100644
index 0000000000000000000000000000000000000000..03b289416053c67e0c8a6ed4d7c5feb203d2dba5
--- /dev/null
+++ b/0039-LoongArch-Add-fine-grained-control-for-LAM_BH-and-LA.patch
@@ -0,0 +1,208 @@
+From 416bdd180a6c0dab4736a6da26de245cb0487c0e Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 25 Oct 2024 02:13:53 +0000
+Subject: [PATCH 039/188] LoongArch: Add fine-grained control for LAM_BH and
+ LAMCAS
+
+gcc/ChangeLog:
+
+        * config/loongarch/genopts/isa-evolution.in: (lam-bh, lamcas):
+        Add.
+        * config/loongarch/loongarch-str.h: Regenerate.
+        * config/loongarch/loongarch.opt: Regenerate.
+        * config/loongarch/loongarch-cpucfg-map.h: Regenerate.
+        * config/loongarch/loongarch-cpu.cc
+        (ISA_BASE_LA64V110_FEATURES): Include OPTION_MASK_ISA_LAM_BH
+        and OPTION_MASK_ISA_LAMCAS.
+        * config/loongarch/sync.md (atomic_add): Use
+        TARGET_LAM_BH instead of ISA_BASE_IS_LA64V110.  Remove empty
+        lines from assembly output.
+        (atomic_exchange_short): Likewise.
+        (atomic_exchange): Likewise.
+        (atomic_fetch_add_short): Likewise.
+        (atomic_fetch_add): Likewise.
+        (atomic_cas_value_strong_amcas): Use TARGET_LAMCAS instead
+        of ISA_BASE_IS_LA64V110.
+        (atomic_compare_and_swap): Likewise.
+        (atomic_compare_and_swap): Likewise.
+        (atomic_compare_and_swap): Likewise.
+        * config/loongarch/loongarch.cc (loongarch_asm_code_end): Dump
+        status if -mlam-bh and -mlamcas if -fverbose-asm.
+---
+ gcc/config/loongarch/genopts/isa-evolution.in |  2 ++
+ gcc/config/loongarch/loongarch-cpu.cc         |  3 ++-
+ gcc/config/loongarch/loongarch-cpucfg-map.h   |  2 ++
+ gcc/config/loongarch/loongarch-str.h          |  2 ++
+ gcc/config/loongarch/loongarch.cc             |  2 ++
+ gcc/config/loongarch/loongarch.opt            |  8 ++++++++
+ gcc/config/loongarch/sync.md                  | 18 +++++++++---------
+ 7 files changed, 27 insertions(+), 10 deletions(-)
+
+diff --git a/gcc/config/loongarch/genopts/isa-evolution.in b/gcc/config/loongarch/genopts/isa-evolution.in
+index e58f0d6a1..a6bc3f87f 100644
+--- a/gcc/config/loongarch/genopts/isa-evolution.in
++++ b/gcc/config/loongarch/genopts/isa-evolution.in
+@@ -1,2 +1,4 @@
+ 2	26	div32		Support div.w[u] and mod.w[u] instructions with inputs not sign-extended.
++2	27	lam-bh		Support am{swap/add}[_db].{b/h} instructions.
++2	28	lamcas		Support amcas[_db].{b/h/w/d} instructions.
+ 3	23	ld-seq-sa	Do not need load-load barriers (dbar 0x700).
+diff --git a/gcc/config/loongarch/loongarch-cpu.cc b/gcc/config/loongarch/loongarch-cpu.cc
+index 76d66fa55..bbce82c9c 100644
+--- a/gcc/config/loongarch/loongarch-cpu.cc
++++ b/gcc/config/loongarch/loongarch-cpu.cc
+@@ -38,7 +38,8 @@ along with GCC; see the file COPYING3.  If not see
+    initializers!  */
+ 
+ #define ISA_BASE_LA64V110_FEATURES \
+-  (OPTION_MASK_ISA_DIV32 | OPTION_MASK_ISA_LD_SEQ_SA)
++  (OPTION_MASK_ISA_DIV32 | OPTION_MASK_ISA_LD_SEQ_SA \
++   | OPTION_MASK_ISA_LAM_BH | OPTION_MASK_ISA_LAMCAS)
+ 
+ int64_t loongarch_isa_base_features[N_ISA_BASE_TYPES] = {
+   /* [ISA_BASE_LA64V100] = */ 0,
+diff --git a/gcc/config/loongarch/loongarch-cpucfg-map.h b/gcc/config/loongarch/loongarch-cpucfg-map.h
+index 0c078c397..02ff16712 100644
+--- a/gcc/config/loongarch/loongarch-cpucfg-map.h
++++ b/gcc/config/loongarch/loongarch-cpucfg-map.h
+@@ -30,6 +30,8 @@ static constexpr struct {
+   HOST_WIDE_INT isa_evolution_bit;
+ } cpucfg_map[] = {
+   { 2, 1u << 26, OPTION_MASK_ISA_DIV32 },
++  { 2, 1u << 27, OPTION_MASK_ISA_LAM_BH },
++  { 2, 1u << 28, OPTION_MASK_ISA_LAMCAS },
+   { 3, 1u << 23, OPTION_MASK_ISA_LD_SEQ_SA },
+ };
+ 
+diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h
+index cd9dbb41b..0fee9abe5 100644
+--- a/gcc/config/loongarch/loongarch-str.h
++++ b/gcc/config/loongarch/loongarch-str.h
+@@ -70,5 +70,7 @@ along with GCC; see the file COPYING3.  If not see
+ #define STR_EXPLICIT_RELOCS_ALWAYS "always"
+ 
+ #define OPTSTR_DIV32   "div32"
++#define OPTSTR_LAM_BH  "lam-bh"
++#define OPTSTR_LAMCAS  "lamcas"
+ #define OPTSTR_LD_SEQ_SA   "ld-seq-sa"
+ #endif /* LOONGARCH_STR_H */
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index c86b787c4..33d23a731 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -11448,6 +11448,8 @@ loongarch_asm_code_end (void)
+       fprintf (asm_out_file, "%s Base ISA: %s\n", ASM_COMMENT_START,
+ 	       loongarch_isa_base_strings [la_target.isa.base]);
+       DUMP_FEATURE (TARGET_DIV32);
++      DUMP_FEATURE (TARGET_LAM_BH);
++      DUMP_FEATURE (TARGET_LAMCAS);
+       DUMP_FEATURE (TARGET_LD_SEQ_SA);
+     }
+ 
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 5251f705d..ea0d5bb4e 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -267,6 +267,14 @@ mdiv32
+ Target Mask(ISA_DIV32) Var(isa_evolution)
+ Support div.w[u] and mod.w[u] instructions with inputs not sign-extended.
+ 
++mlam-bh
++Target Mask(ISA_LAM_BH) Var(isa_evolution)
++Support am{swap/add}[_db].{b/h} instructions.
++
++mlamcas
++Target Mask(ISA_LAMCAS) Var(isa_evolution)
++Support amcas[_db].{b/h/w/d} instructions.
++
+ mld-seq-sa
+ Target Mask(ISA_LD_SEQ_SA) Var(isa_evolution)
+ Do not need load-load barriers (dbar 0x700).
+diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md
+index 65443c899..a678e7131 100644
+--- a/gcc/config/loongarch/sync.md
++++ b/gcc/config/loongarch/sync.md
+@@ -124,7 +124,7 @@
+       return "ld.\t%0,%1\\n\\t"
+ 	     "dbar\t0x14";
+     case MEMMODEL_RELAXED:
+-      return TARGET_LD_SEQ_SA ? "ld.\t%0,%1\\n\\t"
++      return TARGET_LD_SEQ_SA ? "ld.\t%0,%1"
+ 			      : "ld.\t%0,%1\\n\\t"
+ 				"dbar\t0x700";
+ 
+@@ -193,7 +193,7 @@
+ 		       (match_operand:SHORT 1 "reg_or_0_operand" "rJ"))
+ 	   (match_operand:SI 2 "const_int_operand")] ;; model
+ 	 UNSPEC_SYNC_OLD_OP))]
+-  "ISA_BASE_IS_LA64V110"
++  "TARGET_LAM_BH"
+   "amadd%A2.\t$zero,%z1,%0"
+   [(set (attr "length") (const_int 4))])
+ 
+@@ -230,7 +230,7 @@
+ 	  UNSPEC_SYNC_EXCHANGE))
+    (set (match_dup 1)
+ 	(match_operand:SHORT 2 "register_operand" "r"))]
+-  "ISA_BASE_IS_LA64V110"
++  "TARGET_LAM_BH"
+   "amswap%A3.\t%0,%z2,%1"
+   [(set (attr "length") (const_int 4))])
+ 
+@@ -266,7 +266,7 @@
+ 			       (match_operand:QHWD 3 "reg_or_0_operand" "rJ")
+ 			       (match_operand:SI 4 "const_int_operand")]  ;; mod_s
+ 	 UNSPEC_COMPARE_AND_SWAP))]
+-  "ISA_BASE_IS_LA64V110"
++  "TARGET_LAMCAS"
+   "ori\t%0,%z2,0\n\tamcas%A4.\t%0,%z3,%1"
+   [(set (attr "length") (const_int 8))])
+ 
+@@ -296,7 +296,7 @@
+ 
+   operands[6] = mod_s;
+ 
+-  if (ISA_BASE_IS_LA64V110)
++  if (TARGET_LAMCAS)
+     emit_insn (gen_atomic_cas_value_strong_amcas (operands[1], operands[2],
+ 							 operands[3], operands[4],
+ 							 operands[6]));
+@@ -422,7 +422,7 @@
+ 
+   operands[6] = mod_s;
+ 
+-  if (ISA_BASE_IS_LA64V110)
++  if (TARGET_LAMCAS)
+     emit_insn (gen_atomic_cas_value_strong_amcas (operands[1], operands[2],
+ 						       operands[3], operands[4],
+ 						       operands[6]));
+@@ -642,7 +642,7 @@
+ 	(match_operand:SHORT 2 "register_operand"))]
+   ""
+ {
+-  if (ISA_BASE_IS_LA64V110)
++  if (TARGET_LAM_BH)
+     emit_insn (gen_atomic_exchange_short (operands[0], operands[1], operands[2], operands[3]));
+   else
+     {
+@@ -663,7 +663,7 @@
+ 		     (match_operand:SHORT 2 "reg_or_0_operand" "rJ"))
+ 	   (match_operand:SI 3 "const_int_operand")] ;; model
+ 	 UNSPEC_SYNC_OLD_OP))]
+-  "ISA_BASE_IS_LA64V110"
++  "TARGET_LAM_BH"
+   "amadd%A3.\t%0,%z2,%1"
+   [(set (attr "length") (const_int 4))])
+ 
+@@ -678,7 +678,7 @@
+ 	 UNSPEC_SYNC_OLD_OP))]
+   ""
+ {
+-  if (ISA_BASE_IS_LA64V110)
++  if (TARGET_LAM_BH)
+     emit_insn (gen_atomic_fetch_add_short (operands[0], operands[1],
+ 					     operands[2], operands[3]));
+   else
+-- 
+2.43.0
+
diff --git a/0039-Match-double-sized-mul-pattern.patch b/0039-Match-double-sized-mul-pattern.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9d4e56f24732f19ce3d77e9c6bea04549b45c099
--- /dev/null
+++ b/0039-Match-double-sized-mul-pattern.patch
@@ -0,0 +1,488 @@
+From e7b22f97f960b62e555dfd6f2e3ae43973fcbb3e Mon Sep 17 00:00:00 2001
+From: Pronin Alexander 00812787 
+Date: Wed, 25 Jan 2023 15:04:07 +0300
+Subject: [PATCH 05/18] Match double sized mul pattern
+
+---
+ gcc/match.pd                              | 136 +++++++++++++++++++++
+ gcc/testsuite/gcc.dg/double_sized_mul-1.c | 141 ++++++++++++++++++++++
+ gcc/testsuite/gcc.dg/double_sized_mul-2.c |  62 ++++++++++
+ gcc/tree-ssa-math-opts.cc                 |  80 ++++++++++++
+ 4 files changed, 419 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.dg/double_sized_mul-1.c
+ create mode 100644 gcc/testsuite/gcc.dg/double_sized_mul-2.c
+
+diff --git a/gcc/match.pd b/gcc/match.pd
+index 3cbaf2a5b..61866cb90 100644
+--- a/gcc/match.pd
++++ b/gcc/match.pd
+@@ -7895,3 +7895,139 @@ and,
+ 	       == TYPE_UNSIGNED (TREE_TYPE (@3))))
+        && single_use (@4)
+        && single_use (@5))))
++
++/* Match multiplication with double sized result.
++
++   Consider the following calculations:
++   arg0 * arg1 = (2^(bit_size/2) * arg0_hi + arg0_lo)
++	       * (2^(bit_size/2) * arg1_hi + arg1_lo)
++   arg0 * arg1 = 2^bit_size * arg0_hi * arg1_hi
++	       + 2^(bit_size/2) * (arg0_hi * arg1_lo + arg0_lo * arg1_hi)
++	       + arg0_lo * arg1_lo
++
++   The products of high and low parts fits in bit_size values, thus they are
++   placed in high and low parts of result respectively.
++
++   The sum of the mixed products may overflow, so we need a detection for that.
++   Also it has a bit_size/2 offset, thus it intersects with both high and low
++   parts of result.  Overflow detection constant is bit_size/2 due to this.
++
++   With this info:
++   arg0 * arg1 = 2^bit_size * arg0_hi * arg1_hi
++	       + 2^(bit_size/2) * middle
++	       + 2^bit_size * possible_middle_overflow
++	       + arg0_lo * arg1_lo
++   arg0 * arg1 = 2^bit_size * (arg0_hi * arg1_hi + possible_middle_overflow)
++	       + 2^(bit_size/2) * (2^(bit_size/2) * middle_hi + middle_lo)
++	       + arg0_lo * arg1_lo
++   arg0 * arg1 = 2^bit_size * (arg0_hi * arg1_hi + middle_hi
++	       +	       possible_middle_overflow)
++	       + 2^(bit_size/2) * middle_lo
++	       + arg0_lo * arg1_lo
++
++   The last sum can produce overflow for the high result part.  With this:
++   arg0 * arg1 = 2^bit_size * (arg0_hi * arg1_hi + possible_middle_overflow
++	       +	       possible_res_lo_overflow + middle_hi)
++	       + res_lo
++	       = res_hi + res_lo
++
++   This formula is quite big to fit into one match pattern with all of the
++   combinations of terms inside it.  There are many helpers for better code
++   readability.
++
++   The simplification basis is res_hi: assuming that res_lo only is not
++   real practical case for such calculations.
++
++   Overflow handling is done via matching complex calculations:
++   the realpart and imagpart are quite handy here.  */
++/* Match low and high parts of the argument.  */
++(match (double_size_mul_arg_lo @0 @1)
++ (bit_and @0 INTEGER_CST@1)
++  (if (wi::to_wide (@1)
++       == wi::mask (TYPE_PRECISION (type) / 2, false, TYPE_PRECISION (type)))))
++(match (double_size_mul_arg_hi @0 @1)
++ (rshift @0 INTEGER_CST@1)
++  (if (wi::to_wide (@1) == TYPE_PRECISION (type) / 2)))
++
++/* Match various argument parts products.  */
++(match (double_size_mul_lolo @0 @1)
++ (mult@4 (double_size_mul_arg_lo @0 @2) (double_size_mul_arg_lo @1 @3))
++  (if (single_use (@4))))
++(match (double_size_mul_hihi @0 @1)
++ (mult@4 (double_size_mul_arg_hi @0 @2) (double_size_mul_arg_hi @1 @3))
++  (if (single_use (@4))))
++(match (double_size_mul_lohi @0 @1)
++ (mult:c@4 (double_size_mul_arg_lo @0 @2) (double_size_mul_arg_hi @1 @3))
++  (if (single_use (@4))))
++
++/* Match complex middle sum.  */
++(match (double_size_mul_middle_complex @0 @1)
++ (IFN_ADD_OVERFLOW@2 (double_size_mul_lohi @0 @1) (double_size_mul_lohi @1 @0))
++  (if (num_imm_uses (@2) == 2)))
++
++/* Match real middle results.  */
++(match (double_size_mul_middle @0 @1)
++ (realpart@2 (double_size_mul_middle_complex @0 @1))
++  (if (num_imm_uses (@2) == 2)))
++(match (double_size_mul_middleres_lo @0 @1)
++ (lshift@3 (double_size_mul_middle @0 @1) INTEGER_CST@2)
++  (if (wi::to_wide (@2) == TYPE_PRECISION (type) / 2
++       && single_use (@3))))
++(match (double_size_mul_middleres_hi @0 @1)
++ (rshift@3 (double_size_mul_middle @0 @1) INTEGER_CST@2)
++  (if (wi::to_wide (@2) == TYPE_PRECISION (type) / 2
++       && single_use (@3))))
++
++/* Match low result part.  */
++/* Number of uses may be < 2 in case when we are interested in
++   high part only.  */
++(match (double_size_mul_res_lo_complex @0 @1)
++ (IFN_ADD_OVERFLOW:c@2
++  (double_size_mul_lolo:c @0 @1) (double_size_mul_middleres_lo @0 @1))
++  (if (num_imm_uses (@2) <= 2)))
++(match (double_size_mul_res_lo @0 @1)
++ (realpart (double_size_mul_res_lo_complex @0 @1)))
++
++/* Match overflow terms.  */
++(match (double_size_mul_overflow_check_lo @0 @1 @5)
++ (convert@4 (ne@3
++  (imagpart@2 (double_size_mul_res_lo_complex@5 @0 @1)) integer_zerop))
++  (if (single_use (@2) && single_use (@3) && single_use (@4))))
++(match (double_size_mul_overflow_check_hi @0 @1)
++ (lshift@6 (convert@5 (ne@4
++  (imagpart@3 (double_size_mul_middle_complex @0 @1)) integer_zerop))
++	   INTEGER_CST@2)
++  (if (wi::to_wide (@2) == TYPE_PRECISION (type) / 2
++       && single_use (@3) && single_use (@4) && single_use (@5)
++       && single_use (@6))))
++
++/* Match all possible permutations for high result part calculations.  */
++(for op1 (double_size_mul_hihi
++	  double_size_mul_overflow_check_hi
++	  double_size_mul_middleres_hi)
++     op2 (double_size_mul_overflow_check_hi
++	  double_size_mul_middleres_hi
++	  double_size_mul_hihi)
++     op3 (double_size_mul_middleres_hi
++	  double_size_mul_hihi
++	  double_size_mul_overflow_check_hi)
++ (match (double_size_mul_candidate @0 @1 @2 @3)
++  (plus:c@2
++   (plus:c@4 (double_size_mul_overflow_check_lo @0 @1 @3) (op1:c @0 @1))
++   (plus:c@5 (op2:c @0 @1) (op3:c @0 @1)))
++    (if (single_use (@4) && single_use (@5))))
++ (match (double_size_mul_candidate @0 @1 @2 @3)
++  (plus:c@2 (double_size_mul_overflow_check_lo @0 @1 @3)
++   (plus:c@4 (op1:c @0 @1)
++    (plus:c@5 (op2:c @0 @1) (op3:c @0 @1))))
++     (if (single_use (@4) && single_use (@5))))
++ (match (double_size_mul_candidate @0 @1 @2 @3)
++  (plus:c@2 (op1:c @0 @1)
++   (plus:c@4 (double_size_mul_overflow_check_lo @0 @1 @3)
++    (plus:c@5 (op2:c @0 @1) (op3:c @0 @1))))
++     (if (single_use (@4) && single_use (@5))))
++ (match (double_size_mul_candidate @0 @1 @2 @3)
++  (plus:c@2 (op1:c @0 @1)
++   (plus:c@4 (op2:c @0 @1)
++    (plus:c@5 (double_size_mul_overflow_check_lo @0 @1 @3) (op3:c @0 @1))))
++     (if (single_use (@4) && single_use (@5)))))
+diff --git a/gcc/testsuite/gcc.dg/double_sized_mul-1.c b/gcc/testsuite/gcc.dg/double_sized_mul-1.c
+new file mode 100644
+index 000000000..4d475cc8a
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/double_sized_mul-1.c
+@@ -0,0 +1,141 @@
++/* { dg-do compile } */
++/* fif-conversion-gimple and fuaddsub-overflow-match-all are required for
++   proper overflow detection in some cases.  */
++/* { dg-options "-O2 -fif-conversion-gimple -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */
++#include 
++
++typedef unsigned __int128 uint128_t;
++
++uint16_t mul16 (uint8_t a, uint8_t b)
++{
++  uint8_t a_lo = a & 0xF;
++  uint8_t b_lo = b & 0xF;
++  uint8_t a_hi = a >> 4;
++  uint8_t b_hi = b >> 4;
++  uint8_t lolo = a_lo * b_lo;
++  uint8_t lohi = a_lo * b_hi;
++  uint8_t hilo = a_hi * b_lo;
++  uint8_t hihi = a_hi * b_hi;
++  uint8_t middle = hilo + lohi;
++  uint8_t middle_hi = middle >> 4;
++  uint8_t middle_lo = middle << 4;
++  uint8_t res_lo = lolo + middle_lo;
++  uint8_t res_hi = hihi + middle_hi;
++  res_hi += (res_lo < middle_lo ? 1 : 0);
++  res_hi += (middle < hilo ? 0x10 : 0);
++  uint16_t res = ((uint16_t) res_hi) << 8;
++  res += res_lo;
++  return res;
++}
++
++uint32_t mul32 (uint16_t a, uint16_t b)
++{
++  uint16_t a_lo = a & 0xFF;
++  uint16_t b_lo = b & 0xFF;
++  uint16_t a_hi = a >> 8;
++  uint16_t b_hi = b >> 8;
++  uint16_t lolo = a_lo * b_lo;
++  uint16_t lohi = a_lo * b_hi;
++  uint16_t hilo = a_hi * b_lo;
++  uint16_t hihi = a_hi * b_hi;
++  uint16_t middle = hilo + lohi;
++  uint16_t middle_hi = middle >> 8;
++  uint16_t middle_lo = middle << 8;
++  uint16_t res_lo = lolo + middle_lo;
++  uint16_t res_hi = hihi + middle_hi;
++  res_hi += (res_lo < middle_lo ? 1 : 0);
++  res_hi += (middle < hilo ? 0x100 : 0);
++  uint32_t res = ((uint32_t) res_hi) << 16;
++  res += res_lo;
++  return res;
++}
++
++uint64_t mul64 (uint32_t a, uint32_t b)
++{
++  uint32_t a_lo = a & 0xFFFF;
++  uint32_t b_lo = b & 0xFFFF;
++  uint32_t a_hi = a >> 16;
++  uint32_t b_hi = b >> 16;
++  uint32_t lolo = a_lo * b_lo;
++  uint32_t lohi = a_lo * b_hi;
++  uint32_t hilo = a_hi * b_lo;
++  uint32_t hihi = a_hi * b_hi;
++  uint32_t middle = hilo + lohi;
++  uint32_t middle_hi = middle >> 16;
++  uint32_t middle_lo = middle << 16;
++  uint32_t res_lo = lolo + middle_lo;
++  uint32_t res_hi = hihi + middle_hi;
++  res_hi += (res_lo < middle_lo ? 1 : 0);
++  res_hi += (middle < hilo ? 0x10000 : 0);
++  uint64_t res = ((uint64_t) res_hi) << 32;
++  res += res_lo;
++  return res;
++}
++
++uint128_t mul128 (uint64_t a, uint64_t b)
++{
++  uint64_t a_lo = a & 0xFFFFFFFF;
++  uint64_t b_lo = b & 0xFFFFFFFF;
++  uint64_t a_hi = a >> 32;
++  uint64_t b_hi = b >> 32;
++  uint64_t lolo = a_lo * b_lo;
++  uint64_t lohi = a_lo * b_hi;
++  uint64_t hilo = a_hi * b_lo;
++  uint64_t hihi = a_hi * b_hi;
++  uint64_t middle = hilo + lohi;
++  uint64_t middle_hi = middle >> 32;
++  uint64_t middle_lo = middle << 32;
++  uint64_t res_lo = lolo + middle_lo;
++  uint64_t res_hi = hihi + middle_hi;
++  res_hi += (res_lo < middle_lo ? 1 : 0);
++  res_hi += (middle < hilo ? 0x100000000 : 0);
++  uint128_t res = ((uint128_t) res_hi) << 64;
++  res += res_lo;
++  return res;
++}
++
++uint64_t mul64_perm (uint32_t a, uint32_t b)
++{
++  uint32_t a_lo = a & 0xFFFF;
++  uint32_t b_lo = b & 0xFFFF;
++  uint32_t a_hi = a >> 16;
++  uint32_t b_hi = b >> 16;
++  uint32_t lolo = a_lo * b_lo;
++  uint32_t lohi = a_lo * b_hi;
++  uint32_t hilo = a_hi * b_lo;
++  uint32_t hihi = a_hi * b_hi;
++  uint32_t middle = hilo + lohi;
++  uint32_t middle_hi = middle >> 16;
++  uint32_t middle_lo = middle << 16;
++  uint32_t res_lo = lolo + middle_lo;
++  uint32_t res_hi = hihi + middle_hi;
++  res_hi = res_lo < middle_lo ? res_hi + 1 : res_hi;
++  res_hi = middle < hilo ? res_hi + 0x10000 : res_hi;
++  uint64_t res = ((uint64_t) res_hi) << 32;
++  res += res_lo;
++  return res;
++}
++
++uint128_t mul128_perm (uint64_t a, uint64_t b)
++{
++  uint64_t a_lo = a & 0xFFFFFFFF;
++  uint64_t b_lo = b & 0xFFFFFFFF;
++  uint64_t a_hi = a >> 32;
++  uint64_t b_hi = b >> 32;
++  uint64_t lolo = a_lo * b_lo;
++  uint64_t lohi = a_lo * b_hi;
++  uint64_t hilo = a_hi * b_lo;
++  uint64_t hihi = a_hi * b_hi;
++  uint64_t middle = hilo + lohi;
++  uint64_t middle_hi = middle >> 32;
++  uint64_t middle_lo = middle << 32;
++  uint64_t res_lo = lolo + middle_lo;
++  uint64_t res_hi = hihi + middle_hi;
++  res_hi = res_lo < middle_lo ? res_hi + 1 : res_hi;
++  res_hi = middle < hilo ? res_hi + 0x100000000 : res_hi;
++  uint128_t res = ((uint128_t) res_hi) << 64;
++  res += res_lo;
++  return res;
++}
++
++/* { dg-final { scan-tree-dump-times "double sized mul optimized: 1" 6 "widening_mul" } } */
+diff --git a/gcc/testsuite/gcc.dg/double_sized_mul-2.c b/gcc/testsuite/gcc.dg/double_sized_mul-2.c
+new file mode 100644
+index 000000000..cc6e5af25
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/double_sized_mul-2.c
+@@ -0,0 +1,62 @@
++/* { dg-do compile } */
++/* fif-conversion-gimple is required for proper overflow detection
++   in some cases.  */
++/* { dg-options "-O2 -fif-conversion-gimple -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */
++#include 
++
++typedef unsigned __int128 uint128_t;
++typedef struct uint256_t
++{
++    uint128_t lo;
++    uint128_t hi;
++} uint256_t;
++
++uint64_t mul64_double_use (uint32_t a, uint32_t b)
++{
++  uint32_t a_lo = a & 0xFFFF;
++  uint32_t b_lo = b & 0xFFFF;
++  uint32_t a_hi = a >> 16;
++  uint32_t b_hi = b >> 16;
++  uint32_t lolo = a_lo * b_lo;
++  uint32_t lohi = a_lo * b_hi;
++  uint32_t hilo = a_hi * b_lo;
++  uint32_t hihi = a_hi * b_hi;
++  uint32_t middle = hilo + lohi;
++  uint32_t middle_hi = middle >> 16;
++  uint32_t middle_lo = middle << 16;
++  uint32_t res_lo = lolo + middle_lo;
++  uint32_t res_hi = hihi + middle_hi;
++  res_hi += (res_lo < middle_lo ? 1 : 0);
++  res_hi += (middle < hilo ? 0x10000 : 0);
++  uint64_t res = ((uint64_t) res_hi) << 32;
++  res += res_lo;
++  return res + lolo;
++}
++
++uint256_t mul256 (uint128_t a, uint128_t b)
++{
++  uint128_t a_lo = a & 0xFFFFFFFFFFFFFFFF;
++  uint128_t b_lo = b & 0xFFFFFFFFFFFFFFFF;
++  uint128_t a_hi = a >> 64;
++  uint128_t b_hi = b >> 64;
++  uint128_t lolo = a_lo * b_lo;
++  uint128_t lohi = a_lo * b_hi;
++  uint128_t hilo = a_hi * b_lo;
++  uint128_t hihi = a_hi * b_hi;
++  uint128_t middle = hilo + lohi;
++  uint128_t middle_hi = middle >> 64;
++  uint128_t middle_lo = middle << 64;
++  uint128_t res_lo = lolo + middle_lo;
++  uint128_t res_hi = hihi + middle_hi;
++  res_hi += (res_lo < middle_lo ? 1 : 0);
++  /* Constant is to big warning WA */
++  uint128_t overflow_tmp = (middle < hilo ? 1 : 0);
++  overflow_tmp <<= 64;
++  res_hi += overflow_tmp;
++  uint256_t res;
++  res.lo = res_lo;
++  res.hi = res_hi;
++  return res;
++}
++
++/* { dg-final { scan-tree-dump-not "double sized mul optimized" "widening_mul" } } */
+diff --git a/gcc/tree-ssa-math-opts.cc b/gcc/tree-ssa-math-opts.cc
+index 55d6ee8ae..2c06b8a60 100644
+--- a/gcc/tree-ssa-math-opts.cc
++++ b/gcc/tree-ssa-math-opts.cc
+@@ -210,6 +210,9 @@ static struct
+ 
+   /* Number of highpart multiplication ops inserted.  */
+   int highpart_mults_inserted;
++
++  /* Number of optimized double sized multiplications.  */
++  int double_sized_mul_optimized;
+ } widen_mul_stats;
+ 
+ /* The instance of "struct occurrence" representing the highest
+@@ -4893,6 +4896,78 @@ optimize_spaceship (gimple *stmt)
+ }
+ 
+ 
++/* Pattern matcher for double sized multiplication defined in match.pd.  */
++extern bool gimple_double_size_mul_candidate (tree, tree*, tree (*)(tree));
++
++static bool
++convert_double_size_mul (gimple_stmt_iterator *gsi, gimple *stmt)
++{
++  gimple *use_stmt, *complex_res_lo;
++  gimple_stmt_iterator insert_before;
++  imm_use_iterator use_iter;
++  tree match[4]; // arg0, arg1, res_hi, complex_res_lo
++  tree arg0, arg1, widen_mult, new_type, tmp;
++  tree lhs = gimple_assign_lhs (stmt);
++  location_t loc = UNKNOWN_LOCATION;
++  machine_mode mode;
++
++  if (!gimple_double_size_mul_candidate (lhs, match, NULL))
++    return false;
++
++  new_type = build_nonstandard_integer_type (
++	  TYPE_PRECISION (TREE_TYPE (match[0])) * 2, 1);
++  mode = TYPE_MODE (new_type);
++
++  /* Early return if the target multiplication doesn't exist on target.  */
++  if (optab_handler (smul_optab, mode) == CODE_FOR_nothing
++      && !wider_optab_check_p (smul_optab, mode, 1))
++    return false;
++
++  /* Determine the point where the wide multiplication
++     should be inserted.  Complex low res is OK since it is required
++     by both high and low part getters, thus it dominates both of them.  */
++  complex_res_lo = SSA_NAME_DEF_STMT (match[3]);
++  insert_before = gsi_for_stmt (complex_res_lo);
++  gsi_next (&insert_before);
++
++  /* Create the widen multiplication.  */
++  arg0 = build_and_insert_cast (&insert_before, loc, new_type, match[0]);
++  arg1 = build_and_insert_cast (&insert_before, loc, new_type, match[1]);
++  widen_mult = build_and_insert_binop (&insert_before, loc, "widen_mult",
++				       MULT_EXPR, arg0, arg1);
++
++  /* Find the mult low part getter.  */
++  FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, match[3])
++    if (gimple_assign_rhs_code (use_stmt) == REALPART_EXPR)
++      break;
++
++  /* Create high and low (if needed) parts extractors.  */
++  /* Low part.  */
++  if (use_stmt)
++    {
++      loc = gimple_location (use_stmt);
++      tmp = build_and_insert_cast (&insert_before, loc,
++	  	      		   TREE_TYPE (gimple_get_lhs (use_stmt)),
++	  			   widen_mult);
++      gassign *new_stmt = gimple_build_assign (gimple_get_lhs (use_stmt),
++	    				       NOP_EXPR, tmp);
++      gsi_replace (&insert_before, new_stmt, true);
++    }
++
++  /* High part.  */
++  loc = gimple_location (stmt);
++  tmp = build_and_insert_binop (gsi, loc, "widen_mult_hi",
++				RSHIFT_EXPR, widen_mult,
++				build_int_cst (new_type,
++					       TYPE_PRECISION (new_type) / 2));
++  tmp = build_and_insert_cast (gsi, loc, TREE_TYPE (lhs), tmp);
++  gassign *new_stmt = gimple_build_assign (lhs, NOP_EXPR, tmp);
++  gsi_replace (gsi, new_stmt, true);
++
++  widen_mul_stats.double_sized_mul_optimized++;
++  return true;
++}
++
+ /* Find integer multiplications where the operands are extended from
+    smaller types, and replace the MULT_EXPR with a WIDEN_MULT_EXPR
+    or MULT_HIGHPART_EXPR where appropriate.  */
+@@ -4987,6 +5062,9 @@ math_opts_dom_walker::after_dom_children (basic_block bb)
+ 	      break;
+ 
+ 	    case PLUS_EXPR:
++	      if (convert_double_size_mul (&gsi, stmt))
++		break;
++	      __attribute__ ((fallthrough));
+ 	    case MINUS_EXPR:
+ 	      if (!convert_plusminus_to_widen (&gsi, stmt, code))
+ 		match_arith_overflow (&gsi, stmt, code, m_cfg_changed_p);
+@@ -5091,6 +5169,8 @@ pass_optimize_widening_mul::execute (function *fun)
+ 			    widen_mul_stats.divmod_calls_inserted);
+   statistics_counter_event (fun, "highpart multiplications inserted",
+ 			    widen_mul_stats.highpart_mults_inserted);
++  statistics_counter_event (fun, "double sized mul optimized",
++			    widen_mul_stats.double_sized_mul_optimized);
+ 
+   return cfg_changed ? TODO_cleanup_cfg : 0;
+ }
+-- 
+2.33.0
+
diff --git a/0040-LoongArch-Fix-mexplict-relocs-none-mcmodel-medium-pr.patch b/0040-LoongArch-Fix-mexplict-relocs-none-mcmodel-medium-pr.patch
new file mode 100644
index 0000000000000000000000000000000000000000..809b646f20596596b70e911eab1edca0f5fd0bed
--- /dev/null
+++ b/0040-LoongArch-Fix-mexplict-relocs-none-mcmodel-medium-pr.patch
@@ -0,0 +1,50 @@
+From 8ca46859ad70fb9473f6dbb1d3069e68ed43ef36 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 19 Nov 2023 01:41:12 +0800
+Subject: [PATCH 040/188] LoongArch: Fix "-mexplict-relocs=none
+ -mcmodel=medium" producing %call36 when the assembler does not support it
+
+Even if !HAVE_AS_SUPPORT_CALL36, const_call_insn_operand should still
+return false when -mexplict-relocs=none -mcmodel=medium to make
+loongarch_legitimize_call_address emit la.local or la.global.
+
+gcc/ChangeLog:
+
+	* config/loongarch/predicates.md (const_call_insn_operand):
+	Remove buggy "HAVE_AS_SUPPORT_CALL36" conditions.  Change "1" to
+	"true" to make the coding style consistent.
+---
+ gcc/config/loongarch/predicates.md | 6 ++----
+ 1 file changed, 2 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 2aae87db4..30a0dee9f 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -444,21 +444,19 @@
+     case SYMBOL_PCREL:
+       if (TARGET_CMODEL_EXTREME
+ 	  || (TARGET_CMODEL_MEDIUM
+-	      && HAVE_AS_SUPPORT_CALL36
+ 	      && (la_opt_explicit_relocs == EXPLICIT_RELOCS_NONE)))
+ 	return false;
+       else
+-	return 1;
++	return true;
+ 
+     case SYMBOL_GOT_DISP:
+       if (TARGET_CMODEL_EXTREME
+ 	  || !flag_plt
+ 	  || (flag_plt && TARGET_CMODEL_MEDIUM
+-	      && HAVE_AS_SUPPORT_CALL36
+ 	      && (la_opt_explicit_relocs == EXPLICIT_RELOCS_NONE)))
+ 	return false;
+       else
+-	return 1;
++	return true;
+ 
+     default:
+       return false;
+-- 
+2.43.0
+
diff --git a/0040-Port-icp-patch-to-GCC-12.patch b/0040-Port-icp-patch-to-GCC-12.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d0b34126ce497f8f912ef10b2815735d5b7650a7
--- /dev/null
+++ b/0040-Port-icp-patch-to-GCC-12.patch
@@ -0,0 +1,2387 @@
+From b73462757734c62f64e7a4379340679ec6f19669 Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Tue, 27 Feb 2024 07:28:12 +0800
+Subject: [PATCH 06/18] Port icp patch to GCC 12
+
+---
+ gcc/common.opt              |    8 +
+ gcc/dbgcnt.def              |    1 +
+ gcc/ipa-devirt.cc           | 1855 +++++++++++++++++++++++++++++++++++
+ gcc/passes.def              |    1 +
+ gcc/testsuite/gcc.dg/icp1.c |   40 +
+ gcc/testsuite/gcc.dg/icp2.c |   38 +
+ gcc/testsuite/gcc.dg/icp3.c |   52 +
+ gcc/testsuite/gcc.dg/icp4.c |   55 ++
+ gcc/testsuite/gcc.dg/icp5.c |   66 ++
+ gcc/testsuite/gcc.dg/icp6.c |   66 ++
+ gcc/testsuite/gcc.dg/icp7.c |   48 +
+ gcc/timevar.def             |    1 +
+ gcc/tree-pass.h             |    1 +
+ 13 files changed, 2232 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.dg/icp1.c
+ create mode 100644 gcc/testsuite/gcc.dg/icp2.c
+ create mode 100644 gcc/testsuite/gcc.dg/icp3.c
+ create mode 100644 gcc/testsuite/gcc.dg/icp4.c
+ create mode 100644 gcc/testsuite/gcc.dg/icp5.c
+ create mode 100644 gcc/testsuite/gcc.dg/icp6.c
+ create mode 100644 gcc/testsuite/gcc.dg/icp7.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 39c90604e..16aadccf6 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1316,6 +1316,14 @@ fdevirtualize
+ Common Var(flag_devirtualize) Optimization
+ Try to convert virtual calls to direct ones.
+ 
++ficp
++Common Var(flag_icp) Optimization Init(0)
++Try to promote indirect calls to direct ones.
++
++ficp-speculatively
++Common Var(flag_icp_speculatively) Optimization
++Promote indirect calls speculatively.
++
+ fdiagnostics-show-location=
+ Common Joined RejectNegative Enum(diagnostic_prefixing_rule)
+ -fdiagnostics-show-location=[once|every-line]	How often to emit source location at the beginning of line-wrapped diagnostics.
+diff --git a/gcc/dbgcnt.def b/gcc/dbgcnt.def
+index 3aa18cd0c..a00bbc31b 100644
+--- a/gcc/dbgcnt.def
++++ b/gcc/dbgcnt.def
+@@ -170,6 +170,7 @@ DEBUG_COUNTER (graphite_scop)
+ DEBUG_COUNTER (hoist)
+ DEBUG_COUNTER (hoist_insn)
+ DEBUG_COUNTER (ia64_sched2)
++DEBUG_COUNTER (icp)
+ DEBUG_COUNTER (if_after_combine)
+ DEBUG_COUNTER (if_after_reload)
+ DEBUG_COUNTER (if_conversion)
+diff --git a/gcc/ipa-devirt.cc b/gcc/ipa-devirt.cc
+index 74fe65608..383839189 100644
+--- a/gcc/ipa-devirt.cc
++++ b/gcc/ipa-devirt.cc
+@@ -103,9 +103,14 @@ along with GCC; see the file COPYING3.  If not see
+   indirect polymorphic edge all possible polymorphic call targets of the call.
+ 
+   pass_ipa_devirt performs simple speculative devirtualization.
++  pass_ipa_icp performs simple indirect call promotion.
+ */
+ 
+ #include "config.h"
++#define INCLUDE_ALGORITHM
++#define INCLUDE_SET
++#define INCLUDE_MAP
++#define INCLUDE_LIST
+ #include "system.h"
+ #include "coretypes.h"
+ #include "backend.h"
+@@ -127,6 +132,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "ipa-fnsummary.h"
+ #include "demangle.h"
+ #include "dbgcnt.h"
++#include "gimple-iterator.h"
+ #include "gimple-pretty-print.h"
+ #include "intl.h"
+ #include "stringpool.h"
+@@ -4401,5 +4407,1854 @@ make_pass_ipa_odr (gcc::context *ctxt)
+   return new pass_ipa_odr (ctxt);
+ }
+ 
++/* Function signature map used to look up function decl which corresponds to
++   the given function type.  */
++typedef std::set type_set;
++typedef std::set decl_set;
++typedef std::map type_alias_map;
++typedef std::map type_decl_map;
++typedef std::map uid_to_type_map;
++typedef std::map type_map;
++
++static bool has_address_taken_functions_with_varargs = false;
++static type_set *unsafe_types = NULL;
++static type_alias_map *fta_map = NULL;
++static type_alias_map *ta_map = NULL;
++static type_map *ctype_map = NULL;
++static type_alias_map *cbase_to_ptype = NULL;
++static type_decl_map *fs_map = NULL;
++static uid_to_type_map *type_uid_map = NULL;
++
++static void
++print_type_set(unsigned ftype_uid, type_alias_map *map)
++{
++  if (!map->count (ftype_uid))
++    return;
++  type_set* s = (*map)[ftype_uid];
++  for (type_set::const_iterator it = s->begin (); it != s->end (); it++)
++    fprintf (dump_file, it == s->begin () ? "%d" : ", %d", *it);
++}
++
++static void
++dump_type_with_uid (const char *msg, tree type, dump_flags_t flags = TDF_NONE)
++{
++  fprintf (dump_file, msg);
++  print_generic_expr (dump_file, type, flags);
++  fprintf (dump_file, " (%d)\n", TYPE_UID (type));
++}
++
++/* Walk aggregate type and collect types of scalar elements.  */
++
++static void
++collect_scalar_types (tree tp, std::list &types)
++{
++  /* TODO: take into account different field offsets.
++     Also support array casts.  */
++  if (tp && dump_file && (dump_flags & TDF_DETAILS))
++    dump_type_with_uid ("Walk var's type: ", tp, TDF_UID);
++  if (RECORD_OR_UNION_TYPE_P (tp))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Record's fields {\n");
++      for (tree field = TYPE_FIELDS (tp); field;
++	   field = DECL_CHAIN (field))
++	{
++	  if (TREE_CODE (field) != FIELD_DECL)
++	    continue;
++	  collect_scalar_types (TREE_TYPE (field), types);
++	}
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "}\n");
++      return;
++    }
++  if (TREE_CODE (tp) == ARRAY_TYPE)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Array's innermost type:\n");
++      /* Take the innermost component type.  */
++      tree elt;
++      for (elt = TREE_TYPE (tp); TREE_CODE (elt) == ARRAY_TYPE;
++	   elt = TREE_TYPE (elt))
++	if (dump_file && (dump_flags & TDF_DETAILS))
++	  print_generic_expr (dump_file, elt);
++      collect_scalar_types (elt, types);
++      return;
++    }
++  types.push_back (tp);
++}
++
++static void maybe_register_aliases (tree type1, tree type2);
++
++/* Walk type lists and maybe register type aliases.  */
++
++static void
++compare_type_lists (std::list tlist1, std::list tlist2)
++{
++  for (std::list::iterator ti1 = tlist1.begin (), ti2 = tlist2.begin ();
++       ti1 != tlist1.end (); ++ti1, ++ti2)
++    {
++      /* TODO: correct the analysis results if lists have different length.  */
++      if (ti2 == tlist2.end ())
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "Type lists with different length!\n");
++	  break;
++	}
++      maybe_register_aliases (*ti1, *ti2);
++    }
++}
++
++/* For two given types collect scalar element types and
++   compare the result lists to find type aliases.  */
++
++static void
++collect_scalar_types_and_find_aliases (tree t1, tree t2)
++{
++  std::list tlist1;
++  std::list tlist2;
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "First type list: ");
++  collect_scalar_types (t1, tlist1);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "Second type list: ");
++  collect_scalar_types (t2, tlist2);
++  compare_type_lists (tlist1, tlist2);
++}
++
++/* Dump type with the corresponding set from the map.  */
++
++static void
++dump_type_uid_with_set (const char *msg, tree type, type_alias_map *map,
++			bool dump_type = true, bool with_newline = true)
++{
++  fprintf (dump_file, msg, TYPE_UID (type));
++  if (dump_type)
++    print_generic_expr (dump_file, type);
++  fprintf (dump_file, " (");
++  print_type_set (TYPE_UID (type), map);
++  fprintf (dump_file, ")");
++  fprintf (dump_file, with_newline ? "\n" : " ");
++}
++
++static void
++dump_two_types_uids_with_set (const char *msg, unsigned t1_uid,
++			      unsigned t2_uid, type_alias_map *map)
++{
++  fprintf (dump_file, msg, t1_uid, t2_uid);
++  fprintf (dump_file, " (");
++  print_type_set (t1_uid, map);
++  fprintf (dump_file, ")\n");
++}
++
++/* Register type aliases in the map.  Return true if new alias
++   is registered.  */
++
++static bool
++register_ailas_type (tree type, tree alias_type, type_alias_map *map,
++		     bool only_merge = false)
++{
++  /* TODO: maybe support the case with one missed type.  */
++  if (!type || !alias_type)
++    return false;
++  unsigned type_uid = TYPE_UID (type);
++  unsigned alias_type_uid = TYPE_UID (alias_type);
++  if (type_uid_map->count (type_uid) == 0)
++    (*type_uid_map)[type_uid] = type;
++  if (type_uid_map->count (alias_type_uid) == 0)
++    (*type_uid_map)[alias_type_uid] = alias_type;
++
++  if (map->count (type_uid) == 0 && map->count (alias_type_uid) == 0)
++    {
++      (*map)[type_uid] = new type_set ();
++      (*map)[alias_type_uid] = (*map)[type_uid];
++    }
++  else if (map->count (type_uid) == 0)
++    (*map)[type_uid] = (*map)[alias_type_uid];
++  else if (map->count (alias_type_uid) == 0)
++    (*map)[alias_type_uid] = (*map)[type_uid];
++  else if (map->count (type_uid) && map->count (alias_type_uid))
++    {
++      if ((*map)[type_uid] == (*map)[alias_type_uid])
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    dump_two_types_uids_with_set ("Types (%d) and (%d) are already in",
++					  type_uid, alias_type_uid, map);
++	  return false;
++	}
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  dump_type_uid_with_set ("T1 (%d) in set", type, map, false, true);
++	  dump_type_uid_with_set ("T2 (%d) in set", alias_type, map,
++				  false, true);
++	}
++      (*map)[type_uid]->insert ((*map)[alias_type_uid]->begin (),
++				(*map)[alias_type_uid]->end ());
++      type_set *type_set = (*map)[alias_type_uid];
++      for (type_set::const_iterator it1 = type_set->begin ();
++	   it1 != type_set->end (); ++it1)
++	(*map)[*it1] = (*map)[type_uid];
++      delete type_set;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "MERGE: ");
++    }
++   if (!only_merge)
++     {
++       (*map)[type_uid]->insert (alias_type_uid);
++       (*map)[type_uid]->insert (type_uid);
++     }
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    dump_two_types_uids_with_set ("Insert types (%d) and (%d) into set",
++				  type_uid, alias_type_uid, map);
++  return true;
++}
++
++static void
++dump_two_types_with_uids (const char *msg, tree t1, tree t2)
++{
++  fprintf (dump_file, msg);
++  print_generic_expr (dump_file, t1, TDF_UID);
++  fprintf (dump_file, " (%d), ", TYPE_UID (t1));
++  print_generic_expr (dump_file, t2, TDF_UID);
++  fprintf (dump_file, " (%d)\n", TYPE_UID (t2));
++}
++
++static void
++analyze_pointees (tree type1, tree type2)
++{
++  gcc_assert (POINTER_TYPE_P (type1) && POINTER_TYPE_P (type2));
++  tree base1 = TREE_TYPE (type1);
++  tree base2 = TREE_TYPE (type2);
++  /* TODO: maybe analyze void pointers.  */
++  if (VOID_TYPE_P(base1) || VOID_TYPE_P(base2))
++    return;
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    dump_two_types_with_uids ("Walk pointee types: ", base1, base2);
++  collect_scalar_types_and_find_aliases (base1, base2);
++}
++
++static void
++map_canonical_base_to_pointer (tree type, tree to_insert)
++{
++  type = TYPE_MAIN_VARIANT (type);
++  tree base_type = TREE_TYPE (type);
++  tree cbase_type = TYPE_CANONICAL (base_type);
++  if (!cbase_type)
++    return;
++  unsigned cbase_type_uid = TYPE_UID (cbase_type);
++  if (type_uid_map->count (cbase_type_uid) == 0)
++    (*type_uid_map)[cbase_type_uid] = cbase_type;
++
++  if (cbase_to_ptype->count (cbase_type_uid) == 0)
++    {
++      (*cbase_to_ptype)[cbase_type_uid] = new type_set ();
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "New map cb-to-p=(%d): ", cbase_type_uid);
++    }
++  else if (!(*cbase_to_ptype)[cbase_type_uid]->count (TYPE_UID (to_insert)))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Found map cb-to-p=(%d): ", cbase_type_uid);
++    }
++  else
++    return;
++  /* Add all variants of 'to_insert' type.  */
++  for (tree t = to_insert; t; t = TYPE_NEXT_VARIANT (t))
++    {
++      unsigned t_uid = TYPE_UID (t);
++      if (!(*cbase_to_ptype)[cbase_type_uid]->count (t_uid))
++	{
++	  (*cbase_to_ptype)[cbase_type_uid]->insert (t_uid);
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	     fprintf (dump_file, "(%d) ", t_uid);
++	}
++      if (type_uid_map->count (t_uid) == 0)
++	(*type_uid_map)[t_uid] = t;
++    }
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\n");
++}
++
++/* Analyse two types and maybe register them as aliases. Also collect
++   unsafe function types and map canonical base types to corresponding
++   pointer types.  */
++
++static void
++maybe_register_aliases (tree type1, tree type2)
++{
++  if (type1 && POINTER_TYPE_P (type1) && !FUNCTION_POINTER_TYPE_P (type1))
++    map_canonical_base_to_pointer (type1, type1);
++  if (type2 && POINTER_TYPE_P (type2) && !FUNCTION_POINTER_TYPE_P (type2))
++    map_canonical_base_to_pointer (type2, type2);
++
++  if (type1 == type2 || !type1 || !type2)
++    return;
++
++  if (POINTER_TYPE_P (type1) && POINTER_TYPE_P (type2))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	dump_two_types_with_uids ("Pointer types: ", type1, type2);
++      if (register_ailas_type (type1, type2, ta_map))
++	analyze_pointees (type1, type2);
++    }
++  /* If function and non-function type pointers alias,
++     the function type is unsafe.  */
++  if (FUNCTION_POINTER_TYPE_P (type1) && !FUNCTION_POINTER_TYPE_P (type2))
++    unsafe_types->insert (TYPE_UID (type1));
++  if (FUNCTION_POINTER_TYPE_P (type2) && !FUNCTION_POINTER_TYPE_P (type1))
++    unsafe_types->insert (TYPE_UID (type2));
++
++  /* Try to figure out with pointers to incomplete types.  */
++  if (POINTER_TYPE_P (type1) && POINTER_TYPE_P (type2))
++    {
++      type1 = TYPE_MAIN_VARIANT (type1);
++      type2 = TYPE_MAIN_VARIANT (type2);
++      tree base1 = TREE_TYPE (type1);
++      tree base2 = TREE_TYPE (type2);
++      if (RECORD_OR_UNION_TYPE_P (base1) && RECORD_OR_UNION_TYPE_P (base2))
++	{
++	  tree cb1 = TYPE_CANONICAL (base1);
++	  tree cb2 = TYPE_CANONICAL (base2);
++	  if (cb1 && !cb2)
++	    map_canonical_base_to_pointer (type1, type2);
++	  if (cb2 && !cb1)
++	    map_canonical_base_to_pointer (type2, type1);
++	}
++    }
++}
++
++/* Maybe register non-void/equal type aliases.  */
++
++static void
++maybe_register_non_void_aliases (tree t1, tree t2)
++{
++  gcc_assert (t1 && t2);
++  if (type_uid_map->count (TYPE_UID (t1)) == 0)
++    (*type_uid_map)[TYPE_UID (t1)] = t1;
++  if (type_uid_map->count (TYPE_UID (t2)) == 0)
++    (*type_uid_map)[TYPE_UID (t2)] = t2;
++
++  /* Skip equal and void types.  */
++  if (t1 == t2 || VOID_TYPE_P (t1) || VOID_TYPE_P (t2))
++    return;
++  maybe_register_aliases (t1, t2);
++}
++
++/* Detect function type in call stmt.  */
++
++static tree
++get_call_fntype (gcall *stmt)
++{
++  tree fntype = NULL;
++  if (gimple_call_fndecl (stmt) && TREE_TYPE (gimple_call_fndecl (stmt)))
++    fntype = TREE_TYPE (gimple_call_fndecl (stmt));
++  else
++    {
++      tree call_fn = gimple_call_fn (stmt);
++      tree ptype = TREE_TYPE (call_fn);
++      gcc_assert (ptype && TREE_TYPE (ptype));
++      fntype = TREE_TYPE (ptype);
++    }
++  gcc_assert (fntype && fntype != void_type_node
++	      && (TREE_CODE (fntype) == FUNCTION_TYPE
++		  || TREE_CODE (fntype) == METHOD_TYPE));
++  return fntype;
++}
++
++static void
++dump_global_var (tree decl)
++{
++  fprintf (dump_file, "Analyze global var: ");
++  print_generic_decl (dump_file, decl, TDF_NONE);
++  fprintf (dump_file, "\n");
++}
++
++static void
++collect_block_elt_types (tree tp, std::list &types, tree block)
++{
++  tree vt = TREE_TYPE (tp);
++  gcc_assert (vt);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      const char *msg = TREE_CODE (block) == BLOCK ? "VAR's block: " :
++						     "VAR's ctor: ";
++      fprintf (dump_file, msg);
++      print_generic_expr (dump_file, tp);
++      dump_type_with_uid (" with type ", vt);
++    }
++  collect_scalar_types (vt, types);
++}
++
++/* Compare types of initialization block's or constructor's elements and
++   fields of the initializer type to find type aliases.  */
++
++static void
++compare_block_and_init_type (tree block, tree t1)
++{
++  std::list tlist1;
++  std::list tlist2;
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "Init's type list: ");
++  collect_scalar_types (t1, tlist1);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "Block's type list: ");
++  if (TREE_CODE (block) == CONSTRUCTOR)
++    {
++      unsigned HOST_WIDE_INT idx;
++      tree value;
++      FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (block), idx, value)
++	{
++	  gcc_assert (value);
++	  collect_block_elt_types (value, tlist2, block);
++	}
++    }
++  else if (TREE_CODE (block) == BLOCK)
++    for (tree var = BLOCK_VARS (block); var; var = DECL_CHAIN (var))
++      {
++	if (TREE_CODE (var) != VAR_DECL)
++	  continue;
++	collect_block_elt_types (var, tlist2, block);
++      }
++  else
++    gcc_unreachable ();
++  compare_type_lists (tlist1, tlist2);
++}
++
++/* Analyze global var to find type aliases comparing types of var and
++   initializer elements.  */
++
++static void
++analyze_global_var (varpool_node *var)
++{
++  var->get_constructor();
++  tree decl = var->decl;
++  if (TREE_CODE (decl) == SSA_NAME || !DECL_INITIAL (decl)
++      || integer_zerop (DECL_INITIAL (decl)))
++    return;
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    dump_global_var (decl);
++  tree var_type = TREE_TYPE (decl);
++  tree init_type = TREE_TYPE (DECL_INITIAL (decl));
++  gcc_assert (var_type && init_type);
++  if (RECORD_OR_UNION_TYPE_P (init_type)
++      && !initializer_zerop (DECL_INITIAL (decl)))
++    compare_block_and_init_type (DECL_INITIAL (decl), init_type);
++  else if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "Is not a record with nonzero init\n");
++
++  if (var_type == init_type)
++    return;
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    dump_two_types_with_uids ("Mismatch of var and init types: ",
++			      var_type, init_type);
++  collect_scalar_types_and_find_aliases (var_type, init_type);
++}
++
++static void
++dump_function_node_info (struct cgraph_node *n)
++{
++  fprintf (dump_file, "\nAnalyse function node: ");
++  print_generic_expr (dump_file, n->decl);
++  fprintf (dump_file, "\n");
++  tree fndecl_type = TREE_TYPE (n->decl);
++  dump_type_with_uid ("Function decl type: ", fndecl_type, TDF_UID);
++  if (TREE_TYPE (fndecl_type))
++    dump_type_with_uid ("Return type: ", TREE_TYPE (fndecl_type));
++  tree argt = TYPE_ARG_TYPES (fndecl_type);
++  for (unsigned i = 1; argt && argt != void_type_node
++       && !VOID_TYPE_P (TREE_VALUE (argt)); ++i, argt = TREE_CHAIN (argt))
++    {
++      tree atype = TREE_VALUE (argt);
++      fprintf (dump_file, "%d-arg type: ", i);
++      dump_type_with_uid ("", atype);
++    }
++  fprintf (dump_file, "\n");
++}
++
++static void
++dump_call_stmt_info (gcall *stmt, tree fntype)
++{
++  fprintf (dump_file, "\nAnalyse call stmt: ");
++  if (stmt)
++    print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS);
++  else
++    fprintf (dump_file, "(no stmt)\n");
++  dump_type_with_uid ("fntype=", fntype, TDF_UID);
++  if (gimple_call_fntype (stmt))
++    dump_type_with_uid ("fntype1=", gimple_call_fntype (stmt), TDF_UID);
++  if (gimple_call_fndecl (stmt) && TREE_TYPE (gimple_call_fndecl (stmt)))
++    dump_type_with_uid ("fntype2=", TREE_TYPE (gimple_call_fndecl (stmt)),
++			TDF_UID);
++}
++
++/* Dump actual and formal arg types.  */
++
++static void
++dump_arg_types_with_uids (int i, tree t1, tree t2)
++{
++  if (i >= 0)
++    fprintf (dump_file, "Call's %d-arg types: ", i);
++  else
++    fprintf (dump_file, "Call's return types: ");
++  fprintf (dump_file, "(%d) and (%d) ", TYPE_UID (t1), TYPE_UID (t2));
++  print_generic_expr (dump_file, t1, TDF_UID);
++  fprintf (dump_file, " ");
++  print_generic_expr (dump_file, t2, TDF_UID);
++  fprintf (dump_file, "\n");
++}
++
++/* Analyze call graph edge with connected call stmt to find type aliases in
++   arguments and return value casts.  */
++
++static void
++analyze_cgraph_edge (cgraph_edge *e)
++{
++  gcall *stmt = e->call_stmt;
++  gcc_assert (stmt != NULL);
++  tree fntype = get_call_fntype (stmt);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    dump_call_stmt_info (stmt, fntype);
++  if (gimple_has_lhs (stmt))
++    {
++      tree t1 = TREE_TYPE (gimple_call_lhs (stmt));
++      tree t2 = TREE_TYPE (fntype);
++      const int is_return_arg = -1;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	dump_arg_types_with_uids (is_return_arg, t1, t2);
++      maybe_register_non_void_aliases (t1, t2);
++    }
++
++  tree argt = TYPE_ARG_TYPES (fntype);
++  if (!argt)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Finish call stmt analysis\n");
++      return;
++    }
++  gcc_assert (argt);
++  unsigned num_args = gimple_call_num_args (stmt);
++  for (unsigned i = 0; i < num_args && argt; ++i, argt = TREE_CHAIN (argt))
++    {
++      tree arg = gimple_call_arg (stmt, i);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	dump_arg_types_with_uids (i, TREE_VALUE (argt), TREE_TYPE (arg));
++      if (TREE_VALUE (argt) == TREE_TYPE (arg)
++	  || !POINTER_TYPE_P (TREE_VALUE (argt))
++	  || !POINTER_TYPE_P (TREE_TYPE (arg)))
++	continue;
++      maybe_register_non_void_aliases (TREE_VALUE (argt), TREE_TYPE (arg));
++      tree t1 = TREE_TYPE (TREE_VALUE (argt));
++      tree t2 = TREE_TYPE (TREE_TYPE (arg));
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Call's %d-arg base types: (%d) and (%d)\n",
++		 i, (t1 ? TYPE_UID (t1) : 0), (t2 ? TYPE_UID (t2) : 0));
++      maybe_register_non_void_aliases (t1, t2);
++    }
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "End list of args\n");
++  tree fndecl_type = NULL;
++  if (e->callee && e->callee->decl)
++    fndecl_type = TREE_TYPE (e->callee->decl);
++  if (fndecl_type && fndecl_type != fntype)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Function decl and edge types mismatch:\n");
++      register_ailas_type (fntype, fndecl_type, fta_map);
++    }
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "End call stmt analysis\n");
++}
++
++static void
++dump_assign_info (gimple *stmt, tree rhs, tree lhs_type, tree rhs_type)
++{
++  fprintf (dump_file, "\nAnalyse assign cast/copy stmt, rhs=%s: ",
++	   get_tree_code_name (TREE_CODE (rhs)));
++  print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS);
++  fprintf (dump_file, "Types: ");
++  print_generic_expr (dump_file, lhs_type);
++  fprintf (dump_file, ", ");
++  print_generic_expr (dump_file, rhs_type);
++  fprintf (dump_file, "\n");
++}
++
++/* Analyze cast/copy assign stmt to find type aliases.  */
++
++static void
++analyze_assign_stmt (gimple *stmt)
++{
++  gcc_assert (is_gimple_assign (stmt));
++  tree rhs_type = NULL_TREE;
++  tree lhs_type = TREE_TYPE (gimple_assign_lhs (stmt));
++  tree rhs = gimple_assign_rhs1 (stmt);
++  if (TREE_CODE (rhs) == MEM_REF)
++    {
++      rhs = TREE_OPERAND (rhs, 0);
++      tree ptr_type = TREE_TYPE (rhs);
++      gcc_assert (POINTER_TYPE_P (ptr_type));
++      rhs_type = TREE_TYPE (ptr_type);
++    }
++  else if (TREE_CODE (rhs) == ADDR_EXPR)
++    {
++      rhs = TREE_OPERAND (rhs, 0);
++      if (VAR_OR_FUNCTION_DECL_P (rhs) || TREE_CODE (rhs) == STRING_CST
++	  || TREE_CODE (rhs) == ARRAY_REF || TREE_CODE (rhs) == PARM_DECL)
++	rhs_type = build_pointer_type (TREE_TYPE (rhs));
++      else if (TREE_CODE (rhs) == COMPONENT_REF)
++	{
++	  rhs = TREE_OPERAND (rhs, 1);
++	  rhs_type = build_pointer_type (TREE_TYPE (rhs));
++	}
++      else if (TREE_CODE (rhs) == MEM_REF)
++	{
++	  rhs = TREE_OPERAND (rhs, 0);
++	  rhs_type = TREE_TYPE (rhs);
++	  gcc_assert (POINTER_TYPE_P (rhs_type));
++	}
++      else
++	gcc_unreachable();
++    }
++  else
++    rhs_type = TREE_TYPE (rhs);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    dump_assign_info (stmt, rhs, lhs_type, rhs_type);
++  if (CONSTANT_CLASS_P (rhs) && !zerop (rhs)
++      && FUNCTION_POINTER_TYPE_P (TREE_TYPE (rhs)))
++    {
++      tree ftype = TREE_TYPE (rhs_type);
++      unsafe_types->insert (TYPE_UID (ftype));
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Function type (%d) is unsafe due to assign "
++		 "non-zero cst to function pointer\n", TYPE_UID (ftype));
++    }
++  maybe_register_non_void_aliases (lhs_type, rhs_type);
++}
++
++/* Walk all fn's stmt to analyze assigns.  */
++
++static void
++analyze_assigns (function* fn)
++{
++  push_cfun (fn);
++  basic_block bb;
++  gimple_stmt_iterator si;
++  FOR_EACH_BB_FN (bb, fn)
++    for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
++      {
++	gimple *stmt = gsi_stmt (si);
++	if (!gimple_assign_cast_p (stmt) && !gimple_assign_copy_p (stmt))
++	  continue;
++	analyze_assign_stmt (stmt);
++      }
++  pop_cfun ();
++}
++
++/* Walk all functions to collect sets of type aliases.  */
++
++static void
++collect_type_alias_sets ()
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\n\nCollect type alias sets walking global vars.\n");
++
++  varpool_node *var;
++  FOR_EACH_VARIABLE (var)
++    if (var->real_symbol_p ())
++      analyze_global_var (var);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nCollect type alias sets walking functions.\n");
++
++  struct cgraph_node *n;
++  FOR_EACH_FUNCTION (n)
++    {
++      if (!n->has_gimple_body_p ())
++	continue;
++      n->get_body ();
++      function *fn = DECL_STRUCT_FUNCTION (n->decl);
++      if (!fn)
++	continue;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	dump_function_node_info (n);
++      /* Analyze direct/indirect function calls.  */
++      for (cgraph_edge *e = n->callees; e; e = e->next_callee)
++	analyze_cgraph_edge (e);
++      for (cgraph_edge *e = n->indirect_calls; e; e = e->next_callee)
++	analyze_cgraph_edge (e);
++      /* Analyze assign (with casts) statements.  */
++      analyze_assigns (fn);
++    }
++}
++
++static void
++process_cbase_to_ptype_map ()
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nProcess types in cbase-to-ptypes map:\n");
++
++  for (type_alias_map::iterator it1 = cbase_to_ptype->begin ();
++       it1 != cbase_to_ptype->end (); ++it1)
++    {
++      type_set *set = it1->second;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	dump_type_uid_with_set ("cb=(%d): ", (*type_uid_map)[it1->first],
++				cbase_to_ptype);
++      tree ctype = NULL;
++      for (type_set::const_iterator it2 = set->begin ();
++	   it2 != set->end (); it2++)
++	{
++	  tree t2 = (*type_uid_map)[*it2];
++	  if (t2 == TYPE_MAIN_VARIANT (t2))
++	    {
++	      ctype = t2;
++	      break;
++	    }
++	}
++      if (!ctype)
++	continue;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	dump_type_with_uid ("Select canonical type: ", ctype);
++      for (type_set::const_iterator it2 = set->begin ();
++	   it2 != set->end (); it2++)
++	{
++	  tree t = (*type_uid_map)[*it2];
++	  if (!ctype_map->count (t))
++	    {
++	      (*ctype_map)[t] = ctype;
++	      if (dump_file && (dump_flags & TDF_DETAILS))
++		fprintf (dump_file, "Set canonical type for (%d)->c(%d)\n",
++			 *it2, TYPE_UID (ctype));
++	    }
++	  else if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "Canonical type is already set (%d)->c(%d)\n",
++		     *it2, TYPE_UID ((*ctype_map)[t]));
++	}
++    }
++}
++
++static void
++set_canonical_type_for_type_set (type_set *set)
++{
++  tree one_canonical = NULL;
++  for (type_set::const_iterator it = set->begin (); it != set->end (); it++)
++    {
++      tree t = (*type_uid_map)[*it];
++      gcc_assert (t);
++      if ((TYPE_CANONICAL (t) || ctype_map->count (t)))
++	{
++	  one_canonical = TYPE_CANONICAL (t) ? TYPE_CANONICAL (t)
++					     : (*ctype_map)[t];
++	  gcc_assert (COMPLETE_TYPE_P (t));
++	  break;
++	}
++    }
++  for (type_set::const_iterator it = set->begin (); it != set->end (); it++)
++    {
++      tree t = (*type_uid_map)[*it];
++      if (!ctype_map->count (t))
++	{
++	  (*ctype_map)[t] = one_canonical;
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      if (one_canonical)
++		fprintf (dump_file, "Set canonical type for (%d)->c(%d)\n",
++			 TYPE_UID (t), TYPE_UID (one_canonical));
++	      else
++		fprintf (dump_file, "Set NULL canonical for (%d)\n", *it);
++	    }
++	}
++      else if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  tree ct = (*ctype_map)[t];
++	  fprintf (dump_file, "Canonical type is already set (%d)->c(%d)\n",
++		   TYPE_UID (t), ct ? TYPE_UID (ct) : -1);
++	}
++    }
++}
++
++static void
++dump_is_type_set_incomplete (type_set * set)
++{
++  bool has_complete_types = false;
++  for (type_set::const_iterator it = set->begin (); it != set->end (); it++)
++    if (COMPLETE_TYPE_P ((*type_uid_map)[*it]))
++      {
++	has_complete_types = true;
++	break;
++      }
++  if (!has_complete_types)
++    fprintf (dump_file, "Set of incomplete types\n");
++}
++
++static void
++process_alias_type_sets ()
++{
++  if (dump_file)
++    fprintf (dump_file, "\nProcess alias sets of types:\n");
++  /* Keep processed types to process each type set (in ta_map) only once.  */
++  type_set processed_types;
++  for (type_alias_map::iterator it1 = ta_map->begin ();
++       it1 != ta_map->end (); ++it1)
++    {
++      tree type = (*type_uid_map)[it1->first];
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	dump_type_uid_with_set ("(%d) ", type, ta_map);
++      if (processed_types.count (TYPE_UID (type)) != 0
++	  || unsafe_types->count (TYPE_UID (type)) != 0)
++	continue;
++      type_set *set = it1->second;
++      for (type_set::const_iterator it2 = set->begin ();
++	   it2 != set->end (); it2++)
++	processed_types.insert (*it2);
++      /* Check if this type set contains function pointers and
++	 non-function pointers.  */
++      bool has_no_fp = false, has_fp = false;
++      for (type_set::const_iterator it2 = set->begin ();
++	   it2 != set->end (); it2++)
++	{
++	  tree t2 = (*type_uid_map)[*it2];
++	  if (FUNCTION_POINTER_TYPE_P (t2))
++	    has_fp = true;
++	  else
++	    has_no_fp = true;
++	  if (has_fp && has_no_fp)
++	    break;
++	}
++      if (has_fp)
++	{
++	  for (type_set::const_iterator it2 = set->begin ();
++	       it2 != set->end (); it2++)
++	    {
++	      tree t2 = (*type_uid_map)[*it2];
++	      /* If it's a type set with mixed function and not-function types,
++		 mark all function pointer types in the set as unsafe.  */
++	      if (has_no_fp && FUNCTION_POINTER_TYPE_P (t2))
++		{
++		  tree ftype = TREE_TYPE (t2);
++		  unsafe_types->insert (TYPE_UID (ftype));
++		  if (dump_file && (dump_flags & TDF_DETAILS))
++		    fprintf (dump_file, "Insert function type (%d) to unsafe "
++			     "due to escape its pointer type (%d) to mixed "
++			     "alias set (printed before)\n",
++			     TYPE_UID (ftype), TYPE_UID (t2));
++		}
++	      /* If it's a type set with only function pointer types,
++		 mark all base function types in the set as aliases.  */
++	      if (!has_no_fp)
++		{
++		  gcc_assert (FUNCTION_POINTER_TYPE_P (type)
++			      && FUNCTION_POINTER_TYPE_P (t2));
++		  if (dump_file && (dump_flags & TDF_DETAILS))
++		    fprintf (dump_file, "Insert function type aliases by "
++			     "function pointer aliases:\n");
++		  register_ailas_type (TREE_TYPE (type), TREE_TYPE (t2),
++				       fta_map);
++		}
++	    }
++	}
++      set_canonical_type_for_type_set (set);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	dump_is_type_set_incomplete (set);
++    }
++}
++
++static void
++dump_unsafe_and_canonical_types ()
++{
++  fprintf (dump_file, "\nList of unsafe types:\n");
++  for (type_set::iterator it = unsafe_types->begin ();
++       it != unsafe_types->end (); ++it)
++    {
++      print_generic_expr (dump_file, (*type_uid_map)[*it]);
++      fprintf (dump_file, " (%d)\n", *it);
++    }
++  fprintf (dump_file, "\nList of alias canonical types:\n");
++  for (type_alias_map::iterator it = ta_map->begin ();
++       it != ta_map->end (); ++it)
++    {
++      tree type = (*type_uid_map)[it->first];
++      if (ctype_map->count (type) == 0)
++	continue;
++      print_generic_expr (dump_file, type);
++      fprintf (dump_file, " -> ");
++      tree ctype = (*ctype_map)[type];
++      if (ctype != NULL)
++	{
++	  print_generic_expr (dump_file, ctype);
++	  fprintf (dump_file, " (%d)->(%d)\n",
++		   TYPE_UID (type), TYPE_UID (ctype));
++	}
++      else
++	 fprintf (dump_file, " null\n");
++    }
++}
++
++static void
++init_function_type_alias_for_edge (cgraph_edge *e)
++{
++  gcall *stmt = e->call_stmt;
++  gcc_assert (stmt != NULL);
++  tree fntype = get_call_fntype (stmt);
++  if (fta_map->count (TYPE_UID (fntype)) == 0)
++    register_ailas_type (fntype, fntype, fta_map);
++}
++
++/* This pass over all function types makes each function type to have
++   at least one alias (itself).  */
++
++static void
++init_function_type_aliases ()
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nInit aliases for all function types.\n");
++
++  struct cgraph_node *n;
++  FOR_EACH_FUNCTION (n)
++    {
++      tree fntype = TREE_TYPE (n->decl);
++      if (fta_map->count (TYPE_UID (fntype)) == 0)
++	register_ailas_type (fntype, fntype, fta_map);
++
++      if (!n->has_gimple_body_p ())
++	continue;
++      n->get_body ();
++      function *fn = DECL_STRUCT_FUNCTION (n->decl);
++      if (!fn)
++	continue;
++
++      /* Init for function types of direct/indirect callees.  */
++      for (cgraph_edge *e = n->callees; e; e = e->next_callee)
++	init_function_type_alias_for_edge (e);
++      for (cgraph_edge *e = n->indirect_calls; e; e = e->next_callee)
++	init_function_type_alias_for_edge (e);
++    }
++}
++
++/* In lto-common.c there is the global canonical type table and the
++   corresponding machinery which detects the same types from differens
++   modules and joins them assigning the one canonical type.  However
++   lto does not set the goal to do a complete and precise matching, so
++   sometimes a few types has no TYPE_CANONICAL set.  Since ICP relies on
++   precise type matching, we create the similar table and register all
++   the required types in it.  */
++
++static std::map *canonical_type_hash_cache = NULL;
++static std::map *icp_canonical_types = NULL;
++
++static hashval_t hash_canonical_type (tree type);
++
++/* Register canonical type in icp_canonical_types and ctype_map evaluating
++   its hash (using hash_canonical_type) if it's needed.  */
++
++static hashval_t
++icp_register_canonical_type (tree t)
++{
++  hashval_t hash;
++  if (canonical_type_hash_cache->count ((const_tree) t) == 0)
++    {
++      tree t1 = TYPE_MAIN_VARIANT (t);
++      if (!COMPLETE_TYPE_P (t1) && TYPE_CANONICAL (t1)
++	  && COMPLETE_TYPE_P (TYPE_CANONICAL (t1)))
++	{
++	  t1 = TYPE_CANONICAL (t1);
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "Use complete canonical (%d) for (%d)\n",
++		     TYPE_UID (t1), TYPE_UID (t));
++	}
++      hash = hash_canonical_type (t1);
++      /* Cache the just computed hash value.  */
++      (*canonical_type_hash_cache)[(const_tree) t] = hash;
++    }
++  else
++    hash = (*canonical_type_hash_cache)[(const_tree) t];
++
++  tree new_type = t;
++  if (icp_canonical_types->count (hash))
++    {
++      new_type = (*icp_canonical_types)[hash];
++      gcc_checking_assert (new_type != t);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Found canonical (%d) for (%d), h=%u\n",
++		 TYPE_UID (new_type), TYPE_UID (t), (unsigned int) hash);
++    }
++  else
++    {
++      (*icp_canonical_types)[hash] = t;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Register canonical %d, h=%u\n", TYPE_UID (t),
++		 (unsigned int) hash);
++    }
++  if (ctype_map->count (t) == 0)
++    (*ctype_map)[t] = new_type;
++  return hash;
++}
++
++/* Merge hstate with hash of the given type.  If the type is not registered,
++   register it in the maps of the canonical types. */
++
++static void
++iterative_hash_canonical_type (tree type, inchash::hash &hstate)
++{
++  hashval_t v;
++  /* All type variants have same TYPE_CANONICAL.  */
++  type = TYPE_MAIN_VARIANT (type);
++  if (canonical_type_hash_cache->count ((const_tree) type))
++    v = (*canonical_type_hash_cache)[(const_tree) type];
++  else
++    v = icp_register_canonical_type (type);
++  hstate.merge_hash (v);
++}
++
++/* Compute and return hash for the given type.  It does not take into account
++   base types of pointer types.  */
++
++static hashval_t
++hash_canonical_type (tree type)
++{
++  inchash::hash hstate;
++  enum tree_code code;
++  /* Combine a few common features of types so that types are grouped into
++     smaller sets; when searching for existing matching types to merge,
++     only existing types having the same features as the new type will be
++     checked.  */
++  code = tree_code_for_canonical_type_merging (TREE_CODE (type));
++  hstate.add_int (code);
++  if (!RECORD_OR_UNION_TYPE_P (type))
++    hstate.add_int (TYPE_MODE (type));
++  /* Incorporate common features of numerical types.  */
++  if (INTEGRAL_TYPE_P (type)
++      || SCALAR_FLOAT_TYPE_P (type)
++      || FIXED_POINT_TYPE_P (type)
++      || TREE_CODE (type) == OFFSET_TYPE
++      || POINTER_TYPE_P (type))
++    {
++      hstate.add_int (TYPE_PRECISION (type));
++      if (!type_with_interoperable_signedness (type))
++	hstate.add_int (TYPE_UNSIGNED (type));
++    }
++  if (VECTOR_TYPE_P (type))
++    {
++      hstate.add_poly_int (TYPE_VECTOR_SUBPARTS (type));
++      hstate.add_int (TYPE_UNSIGNED (type));
++    }
++  if (TREE_CODE (type) == COMPLEX_TYPE)
++    hstate.add_int (TYPE_UNSIGNED (type));
++  if (POINTER_TYPE_P (type))
++    hstate.add_int (TYPE_ADDR_SPACE (TREE_TYPE (type)));
++  /* For array types hash the domain bounds and the string flag.  */
++  if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type))
++    {
++      hstate.add_int (TYPE_STRING_FLAG (type));
++      /* OMP lowering can introduce error_mark_node in place of
++	 random local decls in types.  */
++      if (TYPE_MIN_VALUE (TYPE_DOMAIN (type)) != error_mark_node)
++	inchash::add_expr (TYPE_MIN_VALUE (TYPE_DOMAIN (type)), hstate);
++      if (TYPE_MAX_VALUE (TYPE_DOMAIN (type)) != error_mark_node)
++	inchash::add_expr (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), hstate);
++    }
++  /* Recurse for aggregates with a single element type.  */
++  if (TREE_CODE (type) == ARRAY_TYPE
++      || TREE_CODE (type) == COMPLEX_TYPE
++      || TREE_CODE (type) == VECTOR_TYPE)
++    iterative_hash_canonical_type (TREE_TYPE (type), hstate);
++  /* Incorporate function return and argument types.  */
++  if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
++    {
++      unsigned nargs = 0;
++      iterative_hash_canonical_type (TREE_TYPE (type), hstate);
++      for (tree p = TYPE_ARG_TYPES (type); p; p = TREE_CHAIN (p))
++	{
++	  iterative_hash_canonical_type (TREE_VALUE (p), hstate);
++	  nargs++;
++	}
++      hstate.add_int (nargs);
++    }
++  if (RECORD_OR_UNION_TYPE_P (type))
++    {
++      unsigned nfields = 0;
++      for (tree f = TYPE_FIELDS (type); f; f = TREE_CHAIN (f))
++	if (TREE_CODE (f) == FIELD_DECL)
++	  {
++	    iterative_hash_canonical_type (TREE_TYPE (f), hstate);
++	    nfields++;
++	  }
++      hstate.add_int (nfields);
++    }
++  return hstate.end ();
++}
++
++/* It finds canonical type in ctype_map and icp_canonical_types maps.  */
++
++static tree
++find_canonical_type (tree type)
++{
++  if (ctype_map->count (type))
++    return (*ctype_map)[type];
++  if (canonical_type_hash_cache->count ((const_tree) type) == 0)
++    return NULL;
++  hashval_t h = (*canonical_type_hash_cache)[(const_tree) type];
++  if (icp_canonical_types->count (h))
++    return (*icp_canonical_types)[h];
++  return NULL;
++}
++
++/* It updates hash for the given type taking into account pointees in pointer
++   types.  If the type is incomplete function type, it returns true.  It's used
++   only for function type hash calculation. */
++
++static bool
++initial_hash_canonical_type (tree type, inchash::hash &hstate)
++{
++  /* All type variants have same TYPE_CANONICAL.  */
++  type = TYPE_MAIN_VARIANT (type);
++  if (VOID_TYPE_P (type))
++    {
++      hstate.add_int (POINTER_TYPE);
++      return false;
++    }
++  hstate.add_int (TREE_CODE (type));
++  hstate.add_int (TYPE_MODE (type));
++  if (POINTER_TYPE_P (type))
++    {
++      tree base_type = TREE_TYPE (type);
++      hstate.add_int (TYPE_ADDR_SPACE (base_type));
++      return initial_hash_canonical_type (base_type, hstate);
++    }
++  tree ctype = find_canonical_type (type);
++  if (!ctype)
++    {
++      if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE)
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "Due to ftype (%d)\n", TYPE_UID (type));
++	  return true;
++	}
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	dump_type_with_uid ("Has NO canonical type: ", type, TDF_UID);
++      icp_register_canonical_type (type);
++      if (ctype_map->count(type))
++	ctype = (*ctype_map)[type];
++      if (ctype && dump_file && (dump_flags & TDF_DETAILS))
++	dump_type_with_uid ("Found canonical type: ", ctype, TDF_UID);
++    }
++  else if (dump_file && (dump_flags & TDF_DETAILS))
++    dump_type_with_uid ("Canonical type: ", ctype, TDF_UID);
++  hstate.add_int (TYPE_UID (ctype));
++  return false;
++}
++
++/* It returns hash value for the given function type. If the function type is
++   incomplete, insert it in the incomplete_hash_ftype set.  */
++
++static hashval_t
++get_hash_for_ftype (tree type, type_set *incomplete_hash_ftype)
++{
++  bool incomplete = false;
++  inchash::hash hstate;
++  /* Function type is expected.  */
++  gcc_assert (TREE_CODE (type) == FUNCTION_TYPE
++	      || TREE_CODE (type) == METHOD_TYPE);
++  /* Hash return type.  */
++  tree rt = TREE_TYPE (type);
++  tree ct = rt ? find_canonical_type (rt) : void_type_node;
++  incomplete |= initial_hash_canonical_type (ct ? ct : rt, hstate);
++  /* Hash arg types.  */
++  tree argt = TYPE_ARG_TYPES (type);
++  if (!argt)
++    incomplete |= initial_hash_canonical_type (void_type_node, hstate);
++  else
++    for (unsigned i = 1; argt; ++i, argt = TREE_CHAIN (argt))
++      {
++	tree ct = find_canonical_type (TREE_VALUE (argt));
++	ct = ct ? ct : TREE_VALUE (argt);
++	incomplete |= initial_hash_canonical_type (ct, hstate);
++      }
++  if (incomplete && incomplete_hash_ftype->count (TYPE_UID (type)) == 0)
++    incomplete_hash_ftype->insert (TYPE_UID (type));
++  else if (!incomplete && incomplete_hash_ftype->count (TYPE_UID (type)) != 0)
++    incomplete_hash_ftype->erase (TYPE_UID (type));
++  return hstate.end();
++}
++
++/* Find type aliases evaluating type hashes and connecting types with
++   the same hash values.  */
++
++static void
++find_type_aliases_by_compatibility ()
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nFind type aliases checking their compatibility.\n");
++
++  std::map hash_to_ftype;
++  type_set *incomplete_hash_ftype = new type_set;
++  canonical_type_hash_cache = new std::map;
++  icp_canonical_types = new std::map;
++
++  bool changed;
++  int i = 0;
++  do
++    {
++      changed = false;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Iteration %d\n", i);
++      for (type_alias_map::iterator it = fta_map->begin ();
++	   it != fta_map->end (); ++it)
++	{
++	  tree type = (*type_uid_map)[it->first];
++	  if (TYPE_CANONICAL (type))
++	    continue;
++	  hashval_t hash = get_hash_for_ftype (type, incomplete_hash_ftype);
++	  if (incomplete_hash_ftype->count (TYPE_UID (type)) != 0)
++	    {
++	      if (dump_file && (dump_flags & TDF_DETAILS))
++		fprintf (dump_file, "Incomplete (%d), h=%u\n", TYPE_UID (type),
++			 (unsigned int) hash);
++	      continue;
++	    }
++	  if (hash_to_ftype.count (hash) == 0)
++	    hash_to_ftype[hash] = type;
++	  TYPE_CANONICAL (type) = hash_to_ftype[hash];
++	  changed = true;
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "(%d)->(%d), h=%u\n", TYPE_UID (type),
++		     TYPE_UID (TYPE_CANONICAL (type)), (unsigned int) hash);
++	}
++      i++;
++    }
++  while (changed);
++
++  delete incomplete_hash_ftype;
++  delete icp_canonical_types;
++  delete canonical_type_hash_cache;
++}
++
++static void
++dump_function_type_aliases_list ()
++{
++  fprintf (dump_file, "\nList of function type aliases:\n");
++  for (type_alias_map::iterator it = fta_map->begin ();
++       it != fta_map->end (); ++it)
++    dump_type_uid_with_set ("(%d) ", (*type_uid_map)[it->first], fta_map);
++}
++
++/* Collect type aliases and find missed canonical types.  */
++
++static void
++collect_function_type_aliases ()
++{
++  collect_type_alias_sets ();
++  process_cbase_to_ptype_map ();
++  process_alias_type_sets ();
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    dump_unsafe_and_canonical_types ();
++
++  /* TODO: maybe remove this pass.  */
++  init_function_type_aliases ();
++  for (type_alias_map::iterator it = fta_map->begin ();
++       it != fta_map->end (); ++it)
++    set_canonical_type_for_type_set (it->second);
++  find_type_aliases_by_compatibility ();
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    dump_function_type_aliases_list ();
++}
++
++static void
++dump_function_signature_info (struct cgraph_node *n, tree ftype, bool varargs)
++{
++  fprintf (dump_file, "Function decl: ");
++  print_generic_expr (dump_file, n->decl);
++  dump_type_uid_with_set (" with type (%d) ", ftype, fta_map, true, false);
++  if (varargs)
++    fprintf (dump_file, "has varargs, ");
++  if (TREE_CODE (ftype) == METHOD_TYPE)
++    fprintf (dump_file, "is method, ");
++  if (!n->address_taken)
++    fprintf (dump_file, "is not address taken, ");
++  if (unsafe_types->count (TYPE_UID (ftype)))
++    fprintf (dump_file, "is unsafe, ");
++  fprintf (dump_file, "\n");
++}
++
++/* Check if the function has variadic arguments.
++   It's corrected count_num_arguments ().  */
++
++static bool
++has_varargs (tree decl)
++{
++  tree t;
++  unsigned int num = 0;
++  for (t = TYPE_ARG_TYPES (TREE_TYPE (decl));
++       t && TREE_VALUE (t) != void_type_node; t = TREE_CHAIN (t))
++    num++;
++  if (!t && num)
++    return true;
++  return false;
++}
++
++/* Join fs_map's sets for function type aliases.  */
++
++static void
++merge_fs_map_for_ftype_aliases ()
++{
++  if (dump_file)
++    fprintf (dump_file, "\n\nMerge decl sets for function type aliases:\n");
++  type_set processed_types;
++  for (type_decl_map::iterator it1 = fs_map->begin ();
++       it1 != fs_map->end (); ++it1)
++    {
++      if (processed_types.count (it1->first) != 0)
++	continue;
++      decl_set *d_set = it1->second;
++      tree type = (*type_uid_map)[it1->first];
++      type_set *set = (*fta_map)[it1->first];
++      for (type_set::const_iterator it2 = set->begin ();
++	   it2 != set->end (); it2++)
++	{
++	  tree t2 = (*type_uid_map)[*it2];
++	  processed_types.insert (*it2);
++	  if (type == t2)
++	    continue;
++	  gcc_assert ((TREE_CODE (type) == FUNCTION_TYPE
++		       || TREE_CODE (type) == METHOD_TYPE)
++		      && (TREE_CODE (t2) == FUNCTION_TYPE
++			  || TREE_CODE (t2) == METHOD_TYPE));
++	  if (fs_map->count (*it2) == 0 || (*fs_map)[*it2] == NULL)
++	    (*fs_map)[*it2] = d_set;
++	  else
++	    {
++	      decl_set *t2_decl_set = (*fs_map)[*it2];
++	      (*fs_map)[*it2] = d_set;
++	      gcc_assert (t2_decl_set && t2_decl_set->size() > 0);
++	      d_set->insert (t2_decl_set->begin (), t2_decl_set->end ());
++	      delete t2_decl_set;
++	    }
++	}
++    }
++}
++
++/* Dump function types with set of functions corresponding to it.  */
++
++static void
++dump_function_signature_sets ()
++{
++  fprintf (dump_file, "\n\nUnique sets of function signatures:\n");
++  std::set processed_sets;
++  for (type_decl_map::iterator it1 = fs_map->begin ();
++       it1 != fs_map->end (); ++it1)
++    {
++      decl_set *set = it1->second;
++      if (processed_sets.count (set) != 0)
++	continue;
++      processed_sets.insert (set);
++      fprintf (dump_file, "{ ");
++      print_type_set (it1->first, fta_map);
++      fprintf (dump_file, " : ");
++      for (decl_set::const_iterator it2 = set->begin ();
++	   it2 != set->end (); it2++)
++	{
++	  fprintf (dump_file, it2 == set->begin () ? "" : ", ");
++	  print_generic_expr (dump_file, *it2);
++	  fprintf (dump_file, "(%d)", DECL_UID (*it2));
++	}
++      fprintf (dump_file, "}\n");
++    }
++}
++
++/* Fill the map of function types to sets of function decls.  */
++
++static void
++collect_function_signatures ()
++{
++  if (dump_file)
++    fprintf (dump_file, "\n\nCollect function signatures:\n");
++  struct cgraph_node *n;
++  FOR_EACH_FUNCTION (n)
++    {
++      gcc_assert (n->decl && TREE_TYPE (n->decl));
++      tree ftype = TREE_TYPE (n->decl);
++      bool varargs = has_varargs (n->decl);
++      if (varargs && n->address_taken)
++	has_address_taken_functions_with_varargs = true;
++      if (dump_file)
++	dump_function_signature_info (n, ftype, varargs);
++      if (!n->address_taken)
++	continue;
++      /* TODO: make a separate pass at the end to remove canonicals.  */
++      tree ctype = TYPE_CANONICAL (ftype);
++      unsigned alias_type_fs = ctype ? TYPE_UID (ctype) : 0;
++      if (dump_file)
++	fprintf (dump_file, "canonical type: %d %ld\n",
++		 alias_type_fs, fs_map->count (alias_type_fs));
++      if (alias_type_fs)
++	{
++	  if (fs_map->count (TYPE_UID (ctype)) == 0)
++	    (*fs_map)[TYPE_UID (ctype)] = new decl_set ();
++	  if (dump_file)
++	    fprintf (dump_file, "insert decl (%d) to set of map [%d]\n",
++		     DECL_UID (n->decl), TYPE_UID (ctype));
++	  (*fs_map)[TYPE_UID (ctype)]->insert (n->decl);
++	}
++    }
++  merge_fs_map_for_ftype_aliases ();
++  if (dump_file)
++    dump_function_signature_sets ();
++}
++
++#define MAX_TARG_STAT 4
++struct icp_stats
++{
++  int npolymorphic;
++  int nspeculated;
++  int nsubst;
++  int ncold;
++  int nmultiple;
++  int noverwritable;
++  int nnotdefined;
++  int nexternal;
++  int nartificial;
++  int nremove;
++  int nicp;
++  int nspec;
++  int nf;
++  int ncalls;
++  int nindir;
++  int nind_only;
++  int ntargs[MAX_TARG_STAT + 1];
++};
++
++static void
++dump_processing_function (struct cgraph_node *n, struct icp_stats &stats)
++{
++  fprintf (dump_file, "\n\nProcesing function %s\n", n->dump_name ());
++  print_generic_expr (dump_file, n->decl);
++  fprintf (dump_file, "\n");
++  dump_type_with_uid ("Func's type: ", TREE_TYPE (n->decl));
++  if (dump_file && (dump_flags & TDF_STATS))
++    {
++      struct cgraph_edge *e;
++      stats.nf++;
++      for (e = n->indirect_calls; e; e = e->next_callee)
++	stats.nindir++;
++      for (e = n->callees; e; e = e->next_callee)
++	stats.ncalls++;
++      stats.ncalls += stats.nindir;
++      if (n->callers == NULL)
++	{
++	  fprintf (dump_file, "Function has NO callers\n");
++	  stats.nind_only++;
++	}
++    }
++}
++
++static void
++dump_indirect_call_site (tree call_fn, tree call_fn_ty)
++{
++  fprintf (dump_file, "Indirect call site: ");
++  print_generic_expr (dump_file, call_fn);
++  dump_type_with_uid ("\nFunction pointer type: ", call_fn_ty);
++}
++
++static void
++erase_from_unreachable (unsigned type_uid, type_set &unreachable)
++{
++  unreachable.erase (type_uid);
++  if (!fta_map->count (type_uid))
++    return;
++  type_set *set = (*fta_map)[type_uid];
++  for (type_set::const_iterator it = set->begin (); it != set->end (); it++)
++    unreachable.erase (*it);
++}
++
++static void
++dump_found_fdecls (decl_set *decls, unsigned ctype_uid)
++{
++  fprintf (dump_file, "Signature analysis FOUND decls (%d):", ctype_uid);
++  for (decl_set::const_iterator it = decls->begin (); it != decls->end (); it++)
++    {
++      print_generic_expr (dump_file, *it);
++      fprintf (dump_file, "(%d), ", DECL_UID (*it));
++    }
++  if (unsafe_types->count (ctype_uid))
++    fprintf (dump_file, "type is UNSAFE");
++  fprintf (dump_file, "\n");
++}
++
++static void
++count_found_targets (struct icp_stats &stats, unsigned size)
++{
++  gcc_assert (size > 0);
++  stats.ntargs[size > MAX_TARG_STAT ? MAX_TARG_STAT : size - 1]++;
++}
++
++/* Promote the indirect call.  */
++
++static void
++promote_call (struct cgraph_edge *e, struct cgraph_node *n,
++	      struct cgraph_node *likely_target, struct icp_stats *stats)
++{
++  if (dump_enabled_p ())
++    {
++      dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, e->call_stmt,
++		       "promoting indirect call in %s to %s\n",
++		       n->dump_name (), likely_target->dump_name ());
++    }
++  if (!likely_target->can_be_discarded_p ())
++    {
++      symtab_node *sn = likely_target->noninterposable_alias ();
++      cgraph_node *alias = dyn_cast (sn);
++      if (alias)
++	likely_target = alias;
++    }
++  gimple *new_call;
++  if (flag_icp_speculatively)
++    {
++      e->make_speculative (likely_target, e->count.apply_scale (5, 10));
++      new_call = e->call_stmt;
++      stats->nspec++;
++    }
++  else
++    {
++      cgraph_edge *e2 = cgraph_edge::make_direct (e, likely_target);
++      new_call = cgraph_edge::redirect_call_stmt_to_callee (e2);
++      stats->nsubst++;
++    }
++  if (dump_file)
++    {
++      fprintf (dump_file, "The call is substituted by: ");
++      print_gimple_stmt (dump_file, new_call, 0);
++      fprintf (dump_file, "\n");
++    }
++}
++
++/* Find functions which are called only indirectly and if they are not in
++   fs_map, they can be removed.  For now it is used only to print stats.  */
++
++static int
++find_functions_can_be_removed (type_set &unreachable)
++{
++  int nremove = 0;
++  if (dump_file)
++    fprintf (dump_file, "\nRemove unused functions:\n");
++  struct cgraph_node *n;
++  FOR_EACH_FUNCTION (n)
++    {
++      gcc_assert (n->decl && TREE_TYPE (n->decl));
++      if (n->callers != NULL)
++	continue;
++      tree ftype = TREE_TYPE (n->decl);
++      tree ctype = TYPE_CANONICAL (ftype);
++      if (!ctype || !unreachable.count (TYPE_UID (ctype))
++	  || unsafe_types->count (TYPE_UID (ftype))
++	  || TREE_CODE (ftype) == METHOD_TYPE || n->callers != NULL
++	  || !n->definition || n->alias || n->thunk || n->clones)
++	continue;
++      if (dump_file)
++	fprintf (dump_file, "%s is not used\n", n->dump_name ());
++      nremove++;
++    }
++  return nremove;
++}
++
++static void
++dump_stats (struct icp_stats &st)
++{
++  fprintf (dump_file, "\nSTATS: %i candidates for indirect call promotion,"
++	   " %i substituted, %i speculatively promoted, %i cold\n"
++	   "%i have multiple targets, %i already speculated, %i external,"
++	   " %i not defined, %i artificial, %i polymorphic calls,"
++	   " %i overwritable\n", st.nicp, st.nsubst, st.nspec, st.ncold,
++	   st.nmultiple, st.nspeculated, st.nexternal, st.nnotdefined,
++	   st.nartificial, st.npolymorphic, st.noverwritable);
++  if (!(dump_flags & TDF_STATS))
++    return;
++  fprintf (dump_file, "EXTRA STATS: %i functions, %i indirect calls,"
++	   " %i total calls, %i called only indirectly, %i may be removed\n"
++	   "Indirect call sites with found targets ", st.nf, st.nindir,
++	   st.ncalls, st.nind_only, st.nremove);
++  for (unsigned i = 0; i < MAX_TARG_STAT; i++)
++    fprintf (dump_file, "%u:%i, ", i + 1, st.ntargs[i]);
++  fprintf (dump_file, "more:%i\n", st.ntargs[MAX_TARG_STAT]);
++}
++
++/* Optimize indirect calls.  When an indirect call has only one target,
++   promote it into a direct call.  */
++
++static bool
++optimize_indirect_calls ()
++{
++  /* TODO: maybe move to the top of ipa_icp.  */
++  if (has_address_taken_functions_with_varargs)
++    {
++      if (dump_file)
++	fprintf (dump_file, "\n\nAddress taken function with varargs is found."
++		 " Skip the optimization.\n");
++      return false;
++    }
++  struct icp_stats stats = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
++			    0, 0, 0, 0, 0, {0, 0, 0, 0, 0}};
++  /* At first assume all function types are unreadchable.  */
++  type_set unreachable_ftypes;
++  if (dump_file && (dump_flags & TDF_STATS))
++    for (type_decl_map::iterator it = fs_map->begin ();
++	 it != fs_map->end (); ++it)
++      unreachable_ftypes.insert (it->first);
++
++  struct cgraph_node *n;
++  FOR_EACH_DEFINED_FUNCTION (n)
++    {
++      if (dump_file)
++	dump_processing_function (n, stats);
++      struct cgraph_edge *e;
++      bool update = false;
++      if (!opt_for_fn (n->decl, flag_icp) || !n->has_gimple_body_p ()
++	  || n->inlined_to || !n->indirect_calls)
++	{
++	  if (dump_file)
++	    fprintf (dump_file, "Skip the function\n");
++	  continue;
++	}
++      /* If the function has indirect calls which are not polymorphic,
++	 process its body, otherwise continue.  */
++      bool non_polymorphic_calls = false;
++      for (e = n->indirect_calls; e; e = e->next_callee)
++	if (!e->indirect_info->polymorphic)
++	  {
++	    non_polymorphic_calls = true;
++	    break;
++	  }
++      if (!non_polymorphic_calls)
++	{
++	  if (dump_file)
++	    fprintf (dump_file, "All indirect calls are polymorphic,"
++		     "skip...\n");
++	  continue;
++	}
++      /* Get the function body to operate with call statements.  */
++      n->get_body ();
++      /* Walk indirect call sites and apply the optimization.  */
++      cgraph_edge *next;
++      for (e = n->indirect_calls; e; e = next)
++	{
++	  next = e->next_callee;
++	  if (e->indirect_info->polymorphic)
++	    {
++	      if (dump_file)
++		fprintf (dump_file, "Target is polymorphic, skip...\n\n");
++	      stats.npolymorphic++;
++	      continue;
++	    }
++	  stats.nicp++;
++	  struct cgraph_node *likely_target = NULL;
++	  gcall *stmt = e->call_stmt;
++	  gcc_assert (stmt != NULL);
++	  tree call_fn = gimple_call_fn (stmt);
++	  tree call_fn_ty = TREE_TYPE (call_fn);
++	  if (dump_file)
++	    dump_indirect_call_site (call_fn, call_fn_ty);
++	  tree decl = NULL_TREE;
++	  if (POINTER_TYPE_P (call_fn_ty))
++	    {
++	      if (dump_file)
++		dump_type_with_uid ("Pointee type: ", TREE_TYPE (call_fn_ty));
++	      if (dump_file && (dump_flags & TDF_STATS))
++		erase_from_unreachable (TYPE_UID (TREE_TYPE (call_fn_ty)),
++					unreachable_ftypes);
++	      /* Try to use the signature analysis results.  */
++	      tree ctype = TYPE_CANONICAL (TREE_TYPE (call_fn_ty));
++	      unsigned ctype_uid = ctype ? TYPE_UID (ctype) : 0;
++	      if (ctype_uid && fs_map->count (ctype_uid))
++		{
++		  if (dump_flags && (dump_flags & TDF_STATS))
++		    erase_from_unreachable (ctype_uid, unreachable_ftypes);
++		  decl_set *decls = (*fs_map)[ctype_uid];
++		  if (dump_file)
++		    dump_found_fdecls (decls, ctype_uid);
++		  /* TODO: optimize for multple targets.  */
++		  if (!unsafe_types->count (ctype_uid) && decls->size () == 1)
++		    {
++		      decl = *(decls->begin ());
++		      likely_target = cgraph_node::get (decl);
++		    }
++		  if (!unsafe_types->count (ctype_uid)
++		      && (dump_flags & TDF_STATS))
++		    count_found_targets (stats, decls->size ());
++		}
++	    }
++	  if (!decl || !likely_target)
++	    {
++	      if (dump_file)
++		fprintf (dump_file, "Callee is unknown\n\n");
++	      continue;
++	    }
++	  if (TREE_CODE (TREE_TYPE (decl)) == METHOD_TYPE)
++	    {
++	      if (dump_file)
++		fprintf (dump_file, "Callee is method\n\n");
++	      continue;
++	    }
++	  if (e->speculative)
++	    {
++	      if (dump_file)
++		fprintf (dump_file, "Call is already speculated\n\n");
++	      stats.nspeculated++;
++	      continue;
++	    }
++	  if (!likely_target->definition)
++	    {
++	      if (dump_file)
++		fprintf (dump_file, "Target is not a definition\n\n");
++	      stats.nnotdefined++;
++	      continue;
++	    }
++	  /* Do not introduce new references to external symbols.  While we
++	     can handle these just well, it is common for programs to
++	     incorrectly with headers defining methods they are linked
++	     with.  */
++	  if (DECL_EXTERNAL (likely_target->decl))
++	    {
++	      if (dump_file)
++		fprintf (dump_file, "Target is external\n\n");
++	      stats.nexternal++;
++	      continue;
++	    }
++	  /* Don't use an implicitly-declared destructor (c++/58678).  */
++	  struct cgraph_node *non_thunk_target
++	    = likely_target->function_symbol ();
++	  if (DECL_ARTIFICIAL (non_thunk_target->decl))
++	    {
++	      if (dump_file)
++		fprintf (dump_file, "Target is artificial\n\n");
++	      stats.nartificial++;
++	      continue;
++	    }
++	  if (likely_target->get_availability () <= AVAIL_INTERPOSABLE
++	      && likely_target->can_be_discarded_p ())
++	    {
++	      if (dump_file)
++		fprintf (dump_file, "Target is overwritable\n\n");
++	      stats.noverwritable++;
++	      continue;
++	    }
++	  else if (dbg_cnt (icp))
++	    {
++	      promote_call (e, n, likely_target, &stats);
++	      update = true;
++	    }
++	}
++      if (update)
++	ipa_update_overall_fn_summary (n);
++    }
++
++  if (dump_file && (dump_flags & TDF_STATS))
++    stats.nremove = find_functions_can_be_removed (unreachable_ftypes);
++
++  if (dump_file)
++    dump_stats (stats);
++  return stats.nsubst || stats.nspec;
++}
++
++/* Delete the given MAP with allocated sets.  One set may be associated with
++   more then one type/decl.  */
++
++template 
++static void
++remove_type_alias_map (MAP *map)
++{
++  std::set processed_sets;
++  for (typename MAP::iterator it = map->begin (); it != map->end (); it++)
++    {
++      typename MAP::mapped_type set = it->second;
++      if (processed_sets.count (set) != 0)
++	continue;
++      processed_sets.insert (set);
++      delete set;
++    }
++  delete map;
++}
++
++/* The ipa indirect call promotion pass. Run required analysis and optimize
++   indirect calls.
++   When indirect call has only one target, promote it into a direct call.  */
++
++static unsigned int
++ipa_icp (void)
++{
++  ta_map = new type_alias_map;
++  fta_map = new type_alias_map;
++  cbase_to_ptype = new type_alias_map;
++  fs_map = new type_decl_map;
++  ctype_map = new type_map;
++  unsafe_types = new type_set;
++  type_uid_map = new uid_to_type_map;
++
++  /* Find type aliases, fill the function signature map and
++     optimize indirect calls.  */
++  collect_function_type_aliases ();
++  collect_function_signatures ();
++  bool optimized = optimize_indirect_calls ();
++
++  remove_type_alias_map (ta_map);
++  remove_type_alias_map (fta_map);
++  remove_type_alias_map (cbase_to_ptype);
++  remove_type_alias_map (fs_map);
++  delete ctype_map;
++  delete unsafe_types;
++  delete type_uid_map;
++
++  return optimized ? TODO_remove_functions : 0;
++}
++
++namespace {
++
++const pass_data pass_data_ipa_icp =
++{
++  IPA_PASS, /* type */
++  "icp", /* name */
++  OPTGROUP_NONE, /* optinfo_flags */
++  TV_IPA_ICP, /* tv_id */
++  0, /* properties_required */
++  0, /* properties_provided */
++  0, /* properties_destroyed */
++  0, /* todo_flags_start */
++  0, /* todo_flags_finish */
++};
++
++class pass_ipa_icp : public ipa_opt_pass_d
++{
++public:
++  pass_ipa_icp (gcc::context *ctxt)
++    : ipa_opt_pass_d (pass_data_ipa_icp, ctxt,
++		      NULL, /* generate_summary */
++		      NULL, /* write_summary */
++		      NULL, /* read_summary */
++		      NULL, /* write_optimization_summary */
++		      NULL, /* read_optimization_summary */
++		      NULL, /* stmt_fixup */
++		      0, /* function_transform_todo_flags_start */
++		      NULL, /* function_transform */
++		      NULL) /* variable_transform */
++  {}
++
++  /* opt_pass methods: */
++  virtual bool gate (function *)
++    {
++      return (optimize && flag_icp && !seen_error ()
++	      && (in_lto_p || flag_whole_program));
++    }
++
++  virtual unsigned int execute (function *) { return ipa_icp (); }
++
++}; // class pass_ipa_icp
++
++} // anon namespace
++
++ipa_opt_pass_d *
++make_pass_ipa_icp (gcc::context *ctxt)
++{
++  return new pass_ipa_icp (ctxt);
++}
+ 
+ #include "gt-ipa-devirt.h"
+diff --git a/gcc/passes.def b/gcc/passes.def
+index 9692066e4..d6db9be6e 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -156,6 +156,7 @@ along with GCC; see the file COPYING3.  If not see
+   NEXT_PASS (pass_ipa_profile);
+   NEXT_PASS (pass_ipa_icf);
+   NEXT_PASS (pass_ipa_devirt);
++  NEXT_PASS (pass_ipa_icp);
+   NEXT_PASS (pass_ipa_cp);
+   NEXT_PASS (pass_ipa_sra);
+   NEXT_PASS (pass_ipa_cdtor_merge);
+diff --git a/gcc/testsuite/gcc.dg/icp1.c b/gcc/testsuite/gcc.dg/icp1.c
+new file mode 100644
+index 000000000..c2117f738
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/icp1.c
+@@ -0,0 +1,40 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp1.c.077i.icp" } */
++
++int dummy = 0;
++
++typedef int (*ftype1)(int a);
++typedef float (*ftype2)(int a);
++
++ftype1 func1;
++
++struct {
++ int a;
++ int* b;
++ ftype1 myf1;
++ ftype2 myf2;
++} my_str;
++
++int foo(int a) {
++  my_str.myf1 = func1;
++  if (a % 2 == 0)
++    dummy += dummy % (dummy - a);
++  return a + 1;
++}
++
++float bar(int a) {
++  my_str.myf2 = &bar;
++  func1 = &foo;
++  return foo(a);
++}
++
++int main() {
++  bar(1);
++  my_str.myf2(3);
++  return (my_str.myf1(2) + func1(4)) != 8;
++}
++
++/* { dg-final { scan-ipa-dump "The call is substituted by:.*= foo \\(4\\);" "icp" } } */
++/* { dg-final { scan-ipa-dump "The call is substituted by:.*= foo \\(2\\);" "icp" } } */
++/* { dg-final { scan-ipa-dump "The call is substituted by: bar \\(3\\);" "icp" } } */
++/* { dg-final { scan-ipa-dump "STATS: 3 candidates for indirect call promotion, 3 substituted, 0 speculatively promoted, 0 cold" "icp" } } */
+diff --git a/gcc/testsuite/gcc.dg/icp2.c b/gcc/testsuite/gcc.dg/icp2.c
+new file mode 100644
+index 000000000..03d31d407
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/icp2.c
+@@ -0,0 +1,38 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp2.c.077i.icp" } */
++
++int dummy = 0;
++
++typedef int (*ftype1)(int a);
++typedef float (*ftype2)(int a);
++
++ftype1 func1;
++
++struct {
++ int a;
++ int* b;
++ ftype1 myf1;
++ ftype2 myf2;
++} my_str;
++
++int foo(int a) {
++  my_str.myf1 = func1;
++  if (a % 2 == 0)
++    dummy += dummy % (dummy - a);
++  return a + 1;
++}
++
++float bar(int a) {
++  my_str.myf2 = dummy ? (ftype2) &foo : &bar;
++  func1 = (ftype1) &bar;
++  return foo(a);
++}
++
++int main() {
++  bar(1);
++  my_str.myf2(3);
++  return (my_str.myf1(2) + func1(4)) != 8;
++}
++
++/* { dg-final { scan-ipa-dump-not "The call is substituted by.*" "icp" } } */
++/* { dg-final { scan-ipa-dump "STATS: 3 candidates for indirect call promotion, 0 substituted, 0 speculatively promoted, 0 cold" "icp" } } */
+diff --git a/gcc/testsuite/gcc.dg/icp3.c b/gcc/testsuite/gcc.dg/icp3.c
+new file mode 100644
+index 000000000..2a7d1e6f5
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/icp3.c
+@@ -0,0 +1,52 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp3.c.077i.icp" } */
++
++#include 
++
++int dummy = 0;
++
++typedef int (*ftype1)(int a);
++typedef float (*ftype2)(int a);
++typedef ftype1 (*ftype3) (ftype2);
++
++ftype1 func1;
++
++struct {
++ int a;
++ int* b;
++ ftype1 myf1;
++ ftype2 myf2;
++ ftype3 myf3;
++} my_str;
++
++ftype1 boo(ftype2 a) {
++  printf ("Call boo\n");
++  return (ftype1) a;
++}
++
++int foo(int a) {
++  printf ("Call foo\n");
++  my_str.myf1 = func1;
++  if (a % 2 == 0)
++    dummy += dummy % (dummy - a);
++  return a + 1;
++}
++
++float bar(int a) {
++  printf("Call bar\n");
++  my_str.myf2 = (ftype2) my_str.myf3((ftype2) foo);
++  func1 = &foo;
++  return foo(a);
++}
++
++int main() {
++  my_str.myf3 = &boo;
++  bar(1);
++  my_str.myf2(3);
++  return (my_str.myf1(2) + func1(4)) != 8;
++}
++
++/* { dg-final { scan-ipa-dump "The call is substituted by:.*= foo \\(4\\);" "icp" } } */
++/* { dg-final { scan-ipa-dump "The call is substituted by:.*= foo \\(2\\);" "icp" } } */
++/* { dg-final { scan-ipa-dump "The call is substituted by: foo \\(3\\);" "icp" } } */
++/* { dg-final { scan-ipa-dump "STATS: 4 candidates for indirect call promotion, 3 substituted, 0 speculatively promoted, 0 cold" "icp" } } */
+diff --git a/gcc/testsuite/gcc.dg/icp4.c b/gcc/testsuite/gcc.dg/icp4.c
+new file mode 100644
+index 000000000..e3e1d5116
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/icp4.c
+@@ -0,0 +1,55 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp4.c.077i.icp" } */
++
++#include 
++
++int dummy = 0;
++
++typedef int (*ftype1)(int a);
++typedef float (*ftype2)(int a);
++typedef ftype1 (*ftype3) (ftype2);
++
++ftype1 func1;
++ftype1 boo(ftype2 a);
++int foo(int a);
++float bar(int a);
++
++typedef struct {
++ int a;
++ int* b;
++ ftype1 myf1;
++ ftype2 myf2;
++ ftype3 myf3;
++} T;
++
++T my_str = {0, (int*) &dummy, (ftype1) &boo, (ftype2) &foo, (ftype3) &bar};
++
++ftype1 boo(ftype2 a) {
++  printf ("Call boo\n");
++  return (ftype1) a;
++}
++
++int foo(int a) {
++  printf ("Call foo\n");
++  my_str.myf1 = func1;
++  if (a % 2 == 0)
++    dummy += dummy % (dummy - a);
++  return a + 1;
++}
++
++float bar(int a) {
++  printf("Call bar\n");
++  my_str.myf2 = (ftype2) my_str.myf3((ftype2) foo);
++  func1 = &foo;
++  return foo(a);
++}
++
++int main() {
++  my_str.myf3 = &boo;
++  bar(1);
++  my_str.myf2(3);
++  return (my_str.myf1(2) + func1(4)) != 8;
++}
++
++/* { dg-final { scan-ipa-dump-not "The call is substituted by.*" "icp" } } */
++/* { dg-final { scan-ipa-dump "STATS: 4 candidates for indirect call promotion, 0 substituted, 0 speculatively promoted, 0 cold" "icp" } } */
+diff --git a/gcc/testsuite/gcc.dg/icp5.c b/gcc/testsuite/gcc.dg/icp5.c
+new file mode 100644
+index 000000000..c7709243c
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/icp5.c
+@@ -0,0 +1,66 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp5.c.077i.icp" } */
++
++#include 
++
++int dummy = 0;
++
++typedef int (*ftype1)(int a);
++typedef float (*ftype2)(int a);
++typedef ftype1 (*ftype3) (ftype2);
++
++ftype1 func1;
++ftype1 boo(ftype2 a);
++int foo(int a);
++float bar(int a);
++
++typedef struct {
++ int a;
++ int* b;
++ ftype1 myf1;
++ ftype2 myf2;
++ ftype3 myf3;
++} T;
++
++T my_str;
++
++typedef struct {
++ int a;
++ int* b;
++ ftype3 myf1;
++ ftype2 myf2;
++ ftype1 myf3;
++} T1;
++
++T1 my1 = {0, &dummy, boo, &bar, &foo};
++
++ftype1 boo(ftype2 a) {
++  printf("Call boo\n");
++  return (ftype1) a;
++}
++
++int foo(int a) {
++  printf("Call foo\n");
++  my_str.myf1 = func1;
++  if (a % 2 == 0)
++    dummy += dummy % (dummy - a);
++  return a + 1;
++}
++
++float bar(int a) {
++  printf("Call bar\n");
++  my_str.myf2 = (ftype2) my_str.myf3((ftype2) foo);
++  func1 = &foo;
++  return foo(a);
++}
++
++int main() {
++  my_str = *(T*)&my1;
++  my_str.myf3 = &boo;
++  bar(1);
++  my_str.myf2(3);
++  return (my_str.myf1(2) + func1(4)) != 8;
++}
++
++/* { dg-final { scan-ipa-dump-not "The call is substituted by.*" "icp" } } */
++/* { dg-final { scan-ipa-dump "STATS: 4 candidates for indirect call promotion, 0 substituted, 0 speculatively promoted, 0 cold" "icp" } } */
+diff --git a/gcc/testsuite/gcc.dg/icp6.c b/gcc/testsuite/gcc.dg/icp6.c
+new file mode 100644
+index 000000000..5a9f15045
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/icp6.c
+@@ -0,0 +1,66 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp6.c.077i.icp -Wno-int-conversion -Wno-incompatible-pointer-types" } */
++int dummy = 0;
++
++typedef int (*ftype1)(int a);
++typedef float (*ftype2)(int a);
++typedef int (*ftype3)();
++typedef int (*ftype4)(int a, int b);
++
++ftype1 func1;
++ftype4 func2;
++
++struct {
++ int a;
++ int* b;
++ ftype1 myf1;
++ ftype2 myf2;
++ ftype3 myf3;
++} my_str;
++
++int foo3(float a) {
++  return dummy;
++}
++
++int foo4(int a, int b) {
++  return a*b;
++}
++
++int foo(int a) {
++  my_str.myf1 = func1;
++  if (a % 2 == 0)
++    dummy += dummy % (dummy - a);
++  return a + 1;
++}
++
++int foo2(float a) {
++ func1 = (ftype1) &foo;
++ func2 = &foo4;
++ return dummy + foo3 (a);
++}
++
++float bar2(int a) {
++  my_str.myf2 = (ftype2)(0x864213);
++  func2 = 0x65378;
++  return foo(a);
++}
++
++float bar(int a) {
++  my_str.myf3 = &foo2;
++  my_str.myf2 = &bar;
++  func1 = (ftype1) &dummy;
++  func2 = (ftype4) &bar2;
++  return foo(a);
++}
++
++int main() {
++  bar(1);
++  bar2(1);
++  bar(0);
++  my_str.myf2(3);
++  ((ftype1) my_str.myf3)(0.0);
++  int sum = func1(4);
++  return (sum + my_str.myf1(2) + func2(5, 6)) != 38;
++}
++/* { dg-final { scan-ipa-dump "The call is substituted by.*foo2 \\(0\\);" "icp" } } */
++/* { dg-final { scan-ipa-dump "STATS: 5 candidates for indirect call promotion, 1 substituted, 0 speculatively promoted, 0 cold" "icp" } } */
+diff --git a/gcc/testsuite/gcc.dg/icp7.c b/gcc/testsuite/gcc.dg/icp7.c
+new file mode 100644
+index 000000000..fa52197f4
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/icp7.c
+@@ -0,0 +1,48 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -flto -ficp -fdump-ipa-icp=./icp7.c.077i.icp" } */
++
++#include 
++
++int dummy = 0;
++
++typedef int (*ftype1)(int a);
++typedef float (*ftype2)(int a);
++
++ftype1 func1;
++
++struct {
++ int a;
++ int* b;
++ ftype1 myf1;
++ ftype2 myf2;
++} my_str;
++
++int boo(int a, ...) {
++  va_list ap;
++  va_start(ap, a);
++  if (a == 0)
++    dummy += va_arg(ap, int);
++  va_end(ap);
++  return dummy;
++}
++
++int foo(int a) {
++  my_str.myf1 = func1;
++  if (a % 2 == 0)
++    dummy += dummy % (dummy - a);
++  return a + 1;
++}
++
++float bar(int a) {
++  my_str.myf2 = &bar;
++  func1 = (ftype1) &boo;
++  return foo(a);
++}
++
++int main() {
++  bar(1);
++  my_str.myf2(3);
++  return (my_str.myf1(2) + func1(4));
++}
++
++/* { dg-final { scan-ipa-dump "Address taken function with varargs is found. Skip the optimization." "icp" } } */
+diff --git a/gcc/timevar.def b/gcc/timevar.def
+index 98a5a490f..ca4156066 100644
+--- a/gcc/timevar.def
++++ b/gcc/timevar.def
+@@ -71,6 +71,7 @@ DEFTIMEVAR (TV_CGRAPHOPT             , "callgraph optimization")
+ DEFTIMEVAR (TV_CGRAPH_FUNC_EXPANSION , "callgraph functions expansion")
+ DEFTIMEVAR (TV_CGRAPH_IPA_PASSES     , "callgraph ipa passes")
+ DEFTIMEVAR (TV_IPA_ODR		     , "ipa ODR types")
++DEFTIMEVAR (TV_IPA_ICP               , "ipa indirect call promotion")
+ DEFTIMEVAR (TV_IPA_FNSUMMARY         , "ipa function summary")
+ DEFTIMEVAR (TV_IPA_UNREACHABLE       , "ipa dead code removal")
+ DEFTIMEVAR (TV_IPA_INHERITANCE       , "ipa inheritance graph")
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index 56898e019..5f09e4f8b 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -524,6 +524,7 @@ extern ipa_opt_pass_d *make_pass_ipa_cp (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_sra (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_icf (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_devirt (gcc::context *ctxt);
++extern ipa_opt_pass_d *make_pass_ipa_icp (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_odr (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_pure_const (gcc::context *ctxt);
+-- 
+2.33.0
+
diff --git a/0041-LoongArch-Modify-MUSL_DYNAMIC_LINKER.patch b/0041-LoongArch-Modify-MUSL_DYNAMIC_LINKER.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b34be7229d47537fdda61f8c6ec2099271b6e9fe
--- /dev/null
+++ b/0041-LoongArch-Modify-MUSL_DYNAMIC_LINKER.patch
@@ -0,0 +1,43 @@
+From 4c24f920e52c0dddf4bbbc391d2e5d2524754b4a Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Sat, 18 Nov 2023 11:04:42 +0800
+Subject: [PATCH 041/188] LoongArch: Modify MUSL_DYNAMIC_LINKER.
+
+Use no suffix at all in the musl dynamic linker name for hard
+float ABI. Use -sf and -sp suffixes in musl dynamic linker name
+for soft float and single precision ABIs. The following table
+outlines the musl interpreter names for the LoongArch64 ABI names.
+
+musl interpreter            | LoongArch64 ABI
+--------------------------- | -----------------
+ld-musl-loongarch64.so.1    | loongarch64-lp64d
+ld-musl-loongarch64-sp.so.1 | loongarch64-lp64f
+ld-musl-loongarch64-sf.so.1 | loongarch64-lp64s
+
+gcc/ChangeLog:
+
+	* config/loongarch/gnu-user.h (MUSL_ABI_SPEC): Modify suffix.
+---
+ gcc/config/loongarch/gnu-user.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h
+index 60ef75601..9fc49dc8f 100644
+--- a/gcc/config/loongarch/gnu-user.h
++++ b/gcc/config/loongarch/gnu-user.h
+@@ -34,9 +34,9 @@ along with GCC; see the file COPYING3.  If not see
+   "/lib" ABI_GRLEN_SPEC "/ld-linux-loongarch-" ABI_SPEC ".so.1"
+ 
+ #define MUSL_ABI_SPEC \
+-  "%{mabi=lp64d:-lp64d}" \
+-  "%{mabi=lp64f:-lp64f}" \
+-  "%{mabi=lp64s:-lp64s}"
++  "%{mabi=lp64d:}" \
++  "%{mabi=lp64f:-sp}" \
++  "%{mabi=lp64s:-sf}"
+ 
+ #undef MUSL_DYNAMIC_LINKER
+ #define MUSL_DYNAMIC_LINKER \
+-- 
+2.43.0
+
diff --git a/0041-Port-fixes-in-icp-to-GCC-12.patch b/0041-Port-fixes-in-icp-to-GCC-12.patch
new file mode 100644
index 0000000000000000000000000000000000000000..723f8b074caf1b33cdbca7e49ece489fcb4a7ba7
--- /dev/null
+++ b/0041-Port-fixes-in-icp-to-GCC-12.patch
@@ -0,0 +1,100 @@
+From aaa117a9ff58fb208e8c8859e075ca425f995f63 Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Tue, 27 Feb 2024 07:43:57 +0800
+Subject: [PATCH 07/18] Port fixes in icp to GCC 12
+
+---
+ gcc/ipa-devirt.cc | 37 ++++++++++++++++++++++++++++++-------
+ 1 file changed, 30 insertions(+), 7 deletions(-)
+
+diff --git a/gcc/ipa-devirt.cc b/gcc/ipa-devirt.cc
+index 383839189..318535d06 100644
+--- a/gcc/ipa-devirt.cc
++++ b/gcc/ipa-devirt.cc
+@@ -4431,6 +4431,11 @@ print_type_set(unsigned ftype_uid, type_alias_map *map)
+   if (!map->count (ftype_uid))
+     return;
+   type_set* s = (*map)[ftype_uid];
++  if (!s)
++    {
++      fprintf (dump_file, "%d (no set)", ftype_uid);
++      return;
++    }
+   for (type_set::const_iterator it = s->begin (); it != s->end (); it++)
+     fprintf (dump_file, it == s->begin () ? "%d" : ", %d", *it);
+ }
+@@ -4696,12 +4701,19 @@ maybe_register_aliases (tree type1, tree type2)
+       if (register_ailas_type (type1, type2, ta_map))
+ 	analyze_pointees (type1, type2);
+     }
++  unsigned type1_uid = TYPE_UID (type1);
++  unsigned type2_uid = TYPE_UID (type2);
++  if (type_uid_map->count (type1_uid) == 0)
++    (*type_uid_map)[type1_uid] = type1;
++  if (type_uid_map->count (type2_uid) == 0)
++    (*type_uid_map)[type2_uid] = type2;
++
+   /* If function and non-function type pointers alias,
+      the function type is unsafe.  */
+   if (FUNCTION_POINTER_TYPE_P (type1) && !FUNCTION_POINTER_TYPE_P (type2))
+-    unsafe_types->insert (TYPE_UID (type1));
++    unsafe_types->insert (type1_uid);
+   if (FUNCTION_POINTER_TYPE_P (type2) && !FUNCTION_POINTER_TYPE_P (type1))
+-    unsafe_types->insert (TYPE_UID (type2));
++    unsafe_types->insert (type2_uid);
+ 
+   /* Try to figure out with pointers to incomplete types.  */
+   if (POINTER_TYPE_P (type1) && POINTER_TYPE_P (type2))
+@@ -4825,10 +4837,12 @@ compare_block_and_init_type (tree block, tree t1)
+ static void
+ analyze_global_var (varpool_node *var)
+ {
+-  var->get_constructor();
+   tree decl = var->decl;
+-  if (TREE_CODE (decl) == SSA_NAME || !DECL_INITIAL (decl)
+-      || integer_zerop (DECL_INITIAL (decl)))
++  if (decl || !DECL_INITIAL (decl))
++    return;
++  var->get_constructor ();
++  if (TREE_CODE (decl) == SSA_NAME || integer_zerop (DECL_INITIAL (decl))
++      || TREE_CODE (DECL_INITIAL (decl)) == ERROR_MARK)
+     return;
+ 
+   if (dump_file && (dump_flags & TDF_DETAILS))
+@@ -4998,7 +5012,9 @@ analyze_assign_stmt (gimple *stmt)
+     {
+       rhs = TREE_OPERAND (rhs, 0);
+       if (VAR_OR_FUNCTION_DECL_P (rhs) || TREE_CODE (rhs) == STRING_CST
+-	  || TREE_CODE (rhs) == ARRAY_REF || TREE_CODE (rhs) == PARM_DECL)
++	  || TREE_CODE (rhs) == ARRAY_REF || TREE_CODE (rhs) == PARM_DECL
++	  || TREE_CODE (rhs) == LABEL_DECL || TREE_CODE (rhs) == CONST_DECL
++	  || TREE_CODE (rhs) == RESULT_DECL)
+ 	rhs_type = build_pointer_type (TREE_TYPE (rhs));
+       else if (TREE_CODE (rhs) == COMPONENT_REF)
+ 	{
+@@ -5012,7 +5028,12 @@ analyze_assign_stmt (gimple *stmt)
+ 	  gcc_assert (POINTER_TYPE_P (rhs_type));
+ 	}
+       else
+-	gcc_unreachable();
++	{
++	  fprintf (dump_file, "\nUnsupported rhs type %s in assign stmt: ",
++		   get_tree_code_name (TREE_CODE (rhs)));
++	  print_gimple_stmt (dump_file, stmt, 0);
++	  gcc_unreachable ();
++	}
+     }
+   else
+     rhs_type = TREE_TYPE (rhs);
+@@ -5710,6 +5731,8 @@ merge_fs_map_for_ftype_aliases ()
+       decl_set *d_set = it1->second;
+       tree type = (*type_uid_map)[it1->first];
+       type_set *set = (*fta_map)[it1->first];
++      if (!set)
++	continue;
+       for (type_set::const_iterator it2 = set->begin ();
+ 	   it2 != set->end (); it2++)
+ 	{
+-- 
+2.33.0
+
diff --git a/0042-Add-split-complex-instructions-pass.patch b/0042-Add-split-complex-instructions-pass.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b73affdc48bd22b9c62c6c491c28a45b27c33a9f
--- /dev/null
+++ b/0042-Add-split-complex-instructions-pass.patch
@@ -0,0 +1,1245 @@
+From 9a8e5716543972dec36bae1f9d380d27bfbcdae1 Mon Sep 17 00:00:00 2001
+From: Agrachev Andrey WX1228450 
+Date: Mon, 21 Aug 2023 12:35:19 +0300
+Subject: [PATCH 09/18] Add split-complex-instructions pass
+
+ - Add option -fsplit-ldp-stp
+ - Add functionality to detect and split depended from store LDP instructions.
+ - Add -param=param-ldp-dependency-search-range= to configure ldp dependency search range
+ - Add RTL tests
+
+Co-authored-by: Chernonog Vyacheslav 00812786 
+Co-authored-by: Zinin Ivan WX1305386 
+Co-authored-by: Gadzhiev Emin WX1195297 
+---
+ gcc/common.opt                                |   5 +
+ gcc/config/aarch64/aarch64.cc                 |  42 ++
+ gcc/doc/tm.texi                               |   8 +
+ gcc/doc/tm.texi.in                            |   4 +
+ gcc/params.opt                                |   3 +
+ gcc/passes.def                                |   1 +
+ gcc/sched-rgn.cc                              | 704 +++++++++++++++++-
+ gcc/target.def                                |  10 +
+ .../gcc.dg/rtl/aarch64/test-ldp-dont-split.c  |  74 ++
+ .../rtl/aarch64/test-ldp-split-rearrange.c    |  40 +
+ .../gcc.dg/rtl/aarch64/test-ldp-split.c       | 174 +++++
+ gcc/timevar.def                               |   1 +
+ gcc/tree-pass.h                               |   1 +
+ 13 files changed, 1066 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c
+ create mode 100644 gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c
+ create mode 100644 gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index a42bee250..c0e3f5687 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1797,6 +1797,11 @@ floop-nest-optimize
+ Common Var(flag_loop_nest_optimize) Optimization
+ Enable the loop nest optimizer.
+ 
++fsplit-ldp-stp
++Common Var(flag_split_ldp_stp) Optimization
++Split load/store pair instructions into separate load/store operations
++for better performance.
++
+ fstrict-volatile-bitfields
+ Common Var(flag_strict_volatile_bitfields) Init(-1) Optimization
+ Force bitfield accesses to match their type width.
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 04072ca25..48e2eded0 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -27507,6 +27507,48 @@ aarch64_run_selftests (void)
+ 
+ #endif /* #if CHECKING_P */
+ 
++/* TODO: refuse to use ranges intead of full list of an instruction codes.  */
++
++bool
++is_aarch64_ldp_insn (int icode)
++{
++  if ((icode >= CODE_FOR_load_pair_sw_sisi
++	  && icode <= CODE_FOR_load_pair_dw_tftf)
++      || (icode >= CODE_FOR_loadwb_pairsi_si
++	     && icode <= CODE_FOR_loadwb_pairtf_di)
++      || (icode >= CODE_FOR_load_pairv8qiv8qi
++	     && icode <= CODE_FOR_load_pairdfdf)
++      || (icode >= CODE_FOR_load_pairv16qiv16qi
++	     && icode <= CODE_FOR_load_pairv8bfv2df)
++      || (icode >= CODE_FOR_load_pair_lanesv8qi
++	     && icode <= CODE_FOR_load_pair_lanesdf))
++    return true;
++  return false;
++}
++
++bool
++is_aarch64_stp_insn (int icode)
++{
++  if ((icode >= CODE_FOR_store_pair_sw_sisi
++	  && icode <= CODE_FOR_store_pair_dw_tftf)
++      || (icode >= CODE_FOR_storewb_pairsi_si
++	     && icode <= CODE_FOR_storewb_pairtf_di)
++      || (icode >= CODE_FOR_vec_store_pairv8qiv8qi
++	     && icode <= CODE_FOR_vec_store_pairdfdf)
++      || (icode >= CODE_FOR_vec_store_pairv16qiv16qi
++	     && icode <= CODE_FOR_vec_store_pairv8bfv2df)
++      || (icode >= CODE_FOR_store_pair_lanesv8qi
++	     && icode <= CODE_FOR_store_pair_lanesdf))
++    return true;
++  return false;
++}
++
++#undef TARGET_IS_LDP_INSN
++#define TARGET_IS_LDP_INSN is_aarch64_ldp_insn
++
++#undef TARGET_IS_STP_INSN
++#define TARGET_IS_STP_INSN is_aarch64_stp_insn
++
+ #undef TARGET_STACK_PROTECT_GUARD
+ #define TARGET_STACK_PROTECT_GUARD aarch64_stack_protect_guard
+ 
+diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
+index c5006afc0..0c6415a9c 100644
+--- a/gcc/doc/tm.texi
++++ b/gcc/doc/tm.texi
+@@ -12113,6 +12113,14 @@ object files that are not referenced from @code{main} and uses export
+ lists.
+ @end defmac
+ 
++@deftypefn {Target Hook} bool TARGET_IS_LDP_INSN (int @var{icode})
++Return true if icode is corresponding to any of the LDP instruction types.
++@end deftypefn
++
++@deftypefn {Target Hook} bool TARGET_IS_STP_INSN (int @var{icode})
++Return true if icode is corresponding to any of the STP instruction types.
++@end deftypefn
++
+ @deftypefn {Target Hook} bool TARGET_CANNOT_MODIFY_JUMPS_P (void)
+ This target hook returns @code{true} past the point in which new jump
+ instructions could be created.  On machines that require a register for
+diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
+index f869ddd5e..6ff60e562 100644
+--- a/gcc/doc/tm.texi.in
++++ b/gcc/doc/tm.texi.in
+@@ -7977,6 +7977,10 @@ object files that are not referenced from @code{main} and uses export
+ lists.
+ @end defmac
+ 
++@hook TARGET_IS_LDP_INSN
++
++@hook TARGET_IS_STP_INSN
++
+ @hook TARGET_CANNOT_MODIFY_JUMPS_P
+ 
+ @hook TARGET_HAVE_CONDITIONAL_EXECUTION
+diff --git a/gcc/params.opt b/gcc/params.opt
+index 7fcc2398d..6176d4790 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -1217,4 +1217,7 @@ Enum(vrp_mode) String(ranger) Value(VRP_MODE_RANGER)
+ Common Joined UInteger Var(param_pointer_compression_size) Init(32) IntegerRange(8, 32) Param Optimization
+ Target size of compressed pointer, which should be 8, 16 or 32.
+ 
++-param=param-ldp-dependency-search-range=
++Common Joined UInteger Var(param_ldp_dependency_search_range) Init(16) IntegerRange(1, 32) Param Optimization
++Range for depended ldp search in split-ldp-stp path.
+ ; This comment is to ensure we retain the blank line above.
+diff --git a/gcc/passes.def b/gcc/passes.def
+index 941bbadf0..a30e05688 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -514,6 +514,7 @@ along with GCC; see the file COPYING3.  If not see
+ 	  NEXT_PASS (pass_reorder_blocks);
+ 	  NEXT_PASS (pass_leaf_regs);
+ 	  NEXT_PASS (pass_split_before_sched2);
++	  NEXT_PASS (pass_split_complex_instructions);
+ 	  NEXT_PASS (pass_sched2);
+ 	  NEXT_PASS (pass_stack_regs);
+ 	  PUSH_INSERT_PASSES_WITHIN (pass_stack_regs)
+diff --git a/gcc/sched-rgn.cc b/gcc/sched-rgn.cc
+index a0dfdb788..b4df8bdc5 100644
+--- a/gcc/sched-rgn.cc
++++ b/gcc/sched-rgn.cc
+@@ -44,6 +44,8 @@ along with GCC; see the file COPYING3.  If not see
+    are actually scheduled.  */
+ 
+ #include "config.h"
++#define INCLUDE_SET
++#define INCLUDE_VECTOR
+ #include "system.h"
+ #include "coretypes.h"
+ #include "backend.h"
+@@ -65,6 +67,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "dbgcnt.h"
+ #include "pretty-print.h"
+ #include "print-rtl.h"
++#include "cfgrtl.h"
+ 
+ /* Disable warnings about quoting issues in the pp_xxx calls below
+    that (intentionally) don't follow GCC diagnostic conventions.  */
+@@ -3951,6 +3954,705 @@ make_pass_sched_fusion (gcc::context *ctxt)
+   return new pass_sched_fusion (ctxt);
+ }
+ 
++namespace {
++
++/* Def-use analisys special functions implementation.  */
++
++static struct df_link *
++get_defs (rtx_insn *insn, rtx reg)
++{
++  df_ref use;
++  struct df_link *ref_chain, *ref_link;
++
++  FOR_EACH_INSN_USE (use, insn)
++    {
++      if (GET_CODE (DF_REF_REG (use)) == SUBREG)
++	return NULL;
++      if (REGNO (DF_REF_REG (use)) == REGNO (reg))
++	break;
++    }
++
++  gcc_assert (use != NULL);
++
++  ref_chain = DF_REF_CHAIN (use);
++
++  for (ref_link = ref_chain; ref_link; ref_link = ref_link->next)
++    {
++      /* Problem getting some definition for this instruction.  */
++      if (ref_link->ref == NULL)
++	return NULL;
++      if (DF_REF_INSN_INFO (ref_link->ref) == NULL)
++	return NULL;
++      /* As global regs are assumed to be defined at each function call
++	  dataflow can report a call_insn as being a definition of REG.
++	  But we can't do anything with that in this pass so proceed only
++	  if the instruction really sets REG in a way that can be deduced
++	  from the RTL structure.  */
++      if (global_regs[REGNO (reg)]
++	  && !set_of (reg, DF_REF_INSN (ref_link->ref)))
++	return NULL;
++    }
++
++  return ref_chain;
++}
++
++static struct df_link *
++get_uses (rtx_insn *insn, rtx reg)
++{
++  df_ref def;
++  struct df_link *ref_chain, *ref_link;
++
++  FOR_EACH_INSN_DEF (def, insn)
++    if (REGNO (DF_REF_REG (def)) == REGNO (reg))
++      break;
++
++  gcc_assert (def != NULL && "Broken def-use analisys chain.");
++
++  ref_chain = DF_REF_CHAIN (def);
++
++  for (ref_link = ref_chain; ref_link; ref_link = ref_link->next)
++    {
++      /* Problem getting some use for this instruction.  */
++      if (ref_link->ref == NULL)
++	return NULL;
++    }
++
++  return ref_chain;
++}
++
++const pass_data pass_data_split_complex_instructions = {
++  RTL_PASS,			     /* Type.  */
++  "split_complex_instructions",	     /* Name.  */
++  OPTGROUP_NONE,		     /* Optinfo_flags.  */
++  TV_SPLIT_CMP_INS,		     /* Tv_id.  */
++  0,				     /* Properties_required.  */
++  0,				     /* Properties_provided.  */
++  0,				     /* Properties_destroyed.  */
++  0,				     /* Todo_flags_start.  */
++  (TODO_df_verify | TODO_df_finish), /* Todo_flags_finish.  */
++};
++
++class pass_split_complex_instructions : public rtl_opt_pass
++{
++private:
++  enum complex_instructions_t
++  {
++    UNDEFINED,
++    LDP,
++    LDP_TI,
++    STP,
++    STR
++  };
++
++  void split_complex_insn (rtx_insn *insn);
++  void split_ldp_ti (rtx_insn *insn);
++  void split_ldp_with_offset (rtx_insn *ldp_insn);
++  void split_simple_ldp (rtx_insn *ldp_insn);
++  void split_ldp_stp (rtx_insn *insn);
++  complex_instructions_t get_insn_type (rtx_insn *insn);
++
++  basic_block bb;
++  rtx_insn *insn;
++  std::set dependent_stores_candidates;
++  std::set ldp_to_split_list;
++
++  complex_instructions_t complex_insn_type = UNDEFINED;
++  bool is_store_insn (rtx_insn *insn);
++  bool is_ldp_dependent_on_store (rtx_insn *ldp_insn, basic_block bb);
++  bool bfs_for_reg_dependent_store (rtx_insn *ldp_insn, basic_block search_bb,
++				    rtx_insn *search_insn,
++				    int search_range
++				    = param_ldp_dependency_search_range);
++  bool is_store_reg_dependent (rtx_insn *ldp_insn, rtx_insn *str_insn);
++  void init_df ();
++  void find_dependent_stores_candidates (rtx_insn *ldp_insn);
++  int get_insn_offset (rtx_insn *insn, complex_instructions_t insn_type,
++		       int *arith_operation_ptr = NULL);
++
++public:
++  pass_split_complex_instructions (gcc::context *ctxt)
++      : rtl_opt_pass (pass_data_split_complex_instructions, ctxt)
++  {
++  }
++  /* opt_pass methods: */
++  virtual bool gate (function *);
++
++  virtual unsigned int
++  execute (function *)
++  {
++    enum rtx_code ldp_memref_code;
++    init_df ();
++    ldp_to_split_list.clear ();
++    FOR_EACH_BB_FN (bb, cfun)
++      {
++	FOR_BB_INSNS (bb, insn)
++	  {
++	    complex_instructions_t insn_type = get_insn_type (insn);
++	    /* TODO: Add splitting of STP instructions.  */
++	    if (insn_type != LDP && insn_type != LDP_TI)
++	      continue;
++	    /* TODO: Currently support only ldp_ti and ldp with REG or
++	       PLUS/MINUS offset expression.  */
++	    if (insn_type == LDP_TI)
++	      {
++		ldp_memref_code = GET_CODE (XEXP (XEXP (PATTERN (insn), 1),
++						  0));
++		if (ldp_memref_code != REG && ldp_memref_code != PLUS
++		    && ldp_memref_code != MINUS)
++		  continue;
++	      }
++	    if (is_ldp_dependent_on_store (insn, bb))
++	      {
++		ldp_to_split_list.insert (insn);
++	      }
++	  }
++      }
++
++    for (std::set::iterator i = ldp_to_split_list.begin ();
++	 i != ldp_to_split_list.end (); ++i)
++      split_complex_insn (*i);
++
++    return 0;
++  }
++}; // class pass_split_complex_instructions
++
++bool
++pass_split_complex_instructions::is_ldp_dependent_on_store (rtx_insn *ldp_insn,
++							    basic_block bb)
++{
++  find_dependent_stores_candidates (ldp_insn);
++  return bfs_for_reg_dependent_store (ldp_insn, bb, ldp_insn);
++}
++
++bool
++pass_split_complex_instructions::bfs_for_reg_dependent_store (
++    rtx_insn *ldp_insn, basic_block search_bb, rtx_insn *search_insn,
++    int search_range)
++{
++  rtx_insn *current_search_insn = search_insn;
++
++  for (int i = search_range; i > 0; --i)
++    {
++      if (!current_search_insn)
++	return false;
++      bool checking_result
++	  = is_store_reg_dependent (ldp_insn, current_search_insn);
++      if (checking_result)
++	{
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "LDP to split:\n");
++	      print_rtl_single (dump_file, ldp_insn);
++	      fprintf (dump_file, "Found STR:\n");
++	      print_rtl_single (dump_file, current_search_insn);
++	    }
++	  return true;
++	}
++      if (current_search_insn == BB_HEAD (search_bb))
++	{
++	  /* Search in all parent BBs for the reg_dependent store.  */
++	  edge_iterator ei;
++	  edge e;
++
++	  FOR_EACH_EDGE (e, ei, search_bb->preds)
++	    if (e->src->index != 0
++		&& bfs_for_reg_dependent_store (ldp_insn, e->src,
++						BB_END (e->src), i - 1))
++	      return true;
++	  return false;
++	}
++      else
++	{
++	  if (!active_insn_p (current_search_insn))
++	    i++;
++	  current_search_insn = PREV_INSN (current_search_insn);
++	}
++    }
++  return false;
++}
++
++void
++pass_split_complex_instructions::init_df ()
++{
++  df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
++  df_chain_add_problem (DF_UD_CHAIN + DF_DU_CHAIN);
++  df_mir_add_problem ();
++  df_live_add_problem ();
++  df_live_set_all_dirty ();
++  df_analyze ();
++  df_set_flags (DF_DEFER_INSN_RESCAN);
++}
++
++void
++pass_split_complex_instructions::find_dependent_stores_candidates (
++    rtx_insn *ldp_insn)
++{
++  dependent_stores_candidates.clear ();
++  df_ref use;
++
++  FOR_EACH_INSN_USE (use, ldp_insn)
++    {
++      df_link *defs = get_defs (ldp_insn, DF_REF_REG (use));
++      if (!defs)
++	return;
++
++      for (df_link *def = defs; def; def = def->next)
++	{
++	  df_link *uses
++	      = get_uses (DF_REF_INSN (def->ref), DF_REF_REG (def->ref));
++	  if (!uses)
++	    continue;
++
++	  for (df_link *use = uses; use; use = use->next)
++	    {
++	      if (DF_REF_CLASS (use->ref) == DF_REF_REGULAR
++		  && is_store_insn (DF_REF_INSN (use->ref)))
++		dependent_stores_candidates.insert (DF_REF_INSN (use->ref));
++	    }
++	}
++    }
++}
++
++bool
++pass_split_complex_instructions::is_store_reg_dependent (rtx_insn *ldp_insn,
++							 rtx_insn *str_insn)
++{
++  if (!is_store_insn (str_insn)
++      || dependent_stores_candidates.find (str_insn)
++	     == dependent_stores_candidates.end ())
++    return false;
++
++  int ldp_offset_sign = UNDEFINED;
++  int ldp_offset
++      = get_insn_offset (ldp_insn, get_insn_type (ldp_insn), &ldp_offset_sign);
++  if (ldp_offset_sign == MINUS)
++    ldp_offset = -ldp_offset;
++
++  int str_offset_sign = UNDEFINED;
++  int str_offset = get_insn_offset (str_insn, STR, &str_offset_sign);
++  if (str_offset_sign == MINUS)
++    str_offset = -str_offset;
++
++  if (str_offset == ldp_offset || str_offset == ldp_offset + 8)
++    return true;
++
++  return false;
++}
++
++bool
++pass_split_complex_instructions::is_store_insn (rtx_insn *insn)
++{
++  if (!insn)
++    return false;
++  rtx sset_b = single_set (insn);
++  /* TODO: The condition below allow to take only store instructions in which
++     the memory location's operand is either a register (base) or an plus/minus
++     operation (base + #imm). So it might make sense to add support for other
++     cases (e.g. multiply and shift).  */
++  if (sset_b && MEM_P (SET_DEST (sset_b))
++      && GET_MODE (XEXP (sset_b, 0)) != BLKmode
++      && (GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == REG
++	  || (GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == PLUS
++	      || GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == MINUS)
++	  && (GET_CODE (XEXP (XEXP (XEXP (sset_b, 0), 0), 1)) == CONST_INT)))
++    return true;
++
++  return false;
++}
++
++int
++pass_split_complex_instructions::get_insn_offset (
++    rtx_insn *insn, complex_instructions_t insn_type, int *arith_operation_ptr)
++{
++  rtx insn_pat = PATTERN (insn);
++  int returned_offset = 0;
++
++  rtx offset_expr = NULL;
++  rtx offset_value_expr = NULL;
++
++  switch (insn_type)
++    {
++    case LDP:
++      {
++	int number_of_sub_insns = XVECLEN (insn_pat, 0);
++
++	/* Calculate it's own ofsset of first load insn.  */
++	rtx_insn *first_load_insn = NULL;
++	if (number_of_sub_insns == 2)
++	  {
++	    first_load_insn
++		= make_insn_raw (copy_rtx (XVECEXP (insn_pat, 0, 0)));
++	    arith_operation_ptr = NULL;
++
++	    offset_expr = XEXP (XEXP (PATTERN (first_load_insn), 1), 0);
++	    if (GET_CODE (offset_expr) == PLUS
++		|| GET_CODE (offset_expr) == MINUS)
++	      offset_value_expr
++		  = XEXP (XEXP (XEXP (PATTERN (first_load_insn), 1), 0), 1);
++	    else
++	      offset_expr = NULL;
++	  }
++	else if (number_of_sub_insns == 3)
++	  {
++	    rtx_insn *offset_sub_insn
++		= make_insn_raw (copy_rtx (XVECEXP (insn_pat, 0, 0)));
++
++	    offset_expr = XEXP (PATTERN (offset_sub_insn), 1);
++	    offset_value_expr = XEXP (XEXP (PATTERN (offset_sub_insn), 1), 1);
++	  }
++	else
++	  {
++	    gcc_assert (false
++			&& "Wrong number of elements in the ldp_insn vector");
++	  }
++	break;
++      }
++    case LDP_TI:
++      {
++	offset_expr = XEXP (XEXP (insn_pat, 1), 0);
++	if (GET_CODE (offset_expr) != PLUS && GET_CODE (offset_expr) != MINUS)
++	  return 0;
++	offset_value_expr = XEXP (XEXP (XEXP (insn_pat, 1), 0), 1);
++	break;
++      }
++    case STR:
++      {
++	offset_expr = XEXP (XEXP (insn_pat, 0), 0);
++	/* If memory location is specified by single base register then the
++	   offset is zero.  */
++	if (GET_CODE (offset_expr) == REG)
++	  return 0;
++	offset_value_expr = XEXP (XEXP (XEXP (insn_pat, 0), 0), 1);
++	break;
++      }
++    default:
++      {
++	if (dumps_are_enabled && dump_file)
++	  {
++	    fprintf (dump_file, "Instruction that was tried to split:\n");
++	    print_rtl_single (dump_file, insn);
++	  }
++	gcc_assert (false && "Unsupported instruction type");
++	break;
++      }
++    }
++
++  if (offset_expr != NULL && offset_value_expr
++      && GET_CODE (offset_value_expr) == CONST_INT)
++    returned_offset = XINT (offset_value_expr, 0);
++
++  if (arith_operation_ptr != NULL)
++    {
++      *arith_operation_ptr = GET_CODE (offset_expr);
++      gcc_assert ((*arith_operation_ptr == MINUS
++		   || *arith_operation_ptr == PLUS)
++		  && "Unexpected arithmetic operation in the offset expr");
++    }
++
++  return returned_offset;
++}
++
++void
++pass_split_complex_instructions::split_simple_ldp (rtx_insn *ldp_insn)
++{
++  rtx pat = PATTERN (ldp_insn);
++
++  rtx_insn *mem_insn_1 = make_insn_raw (copy_rtx (XVECEXP (pat, 0, 0)));
++  rtx_insn *mem_insn_2 = make_insn_raw (copy_rtx (XVECEXP (pat, 0, 1)));
++
++  int dest_regno = REGNO (SET_DEST (PATTERN (mem_insn_1)));
++  int src_regno;
++
++  rtx srs_reg_insn = XEXP (SET_SRC (PATTERN (mem_insn_1)), 0);
++
++  if (GET_CODE (srs_reg_insn) == REG)
++    src_regno = REGNO (srs_reg_insn);
++  else
++    src_regno = REGNO (XEXP (srs_reg_insn, 0));
++
++  rtx_insn *emited_insn_1, *emited_insn_2;
++
++  /* in cases like ldp r1,r2,[r1] we emit ldr r2,[r1] first.  */
++  if (src_regno == dest_regno)
++    std::swap (mem_insn_1, mem_insn_2);
++
++  emited_insn_1 = emit_insn (PATTERN (mem_insn_1));
++  emited_insn_2 = emit_insn (PATTERN (mem_insn_2));
++
++  int sub_insn_1_code = recog (PATTERN (mem_insn_1), mem_insn_1, 0);
++  int sub_insn_2_code = recog (PATTERN (mem_insn_2), mem_insn_2, 0);
++
++  INSN_CODE (emited_insn_1) = sub_insn_1_code;
++  INSN_CODE (emited_insn_2) = sub_insn_2_code;
++}
++
++void
++pass_split_complex_instructions::split_ldp_with_offset (rtx_insn *ldp_insn)
++{
++  rtx pat = PATTERN (ldp_insn);
++  bool post_index = true;
++
++  rtx_insn offset_insn;
++  rtx_insn mem_insn_1;
++  rtx_insn mem_insn_2;
++
++  int offset_insn_code;
++  int mem_insn_1_code = -1;
++  int mem_insn_2_code = -1;
++
++  int offset = 0;
++  int arith_operation = UNDEFINED;
++
++  for (int i = 0; i < 3; i++)
++    {
++      rtx sub_insn = XVECEXP (pat, 0, i);
++      rtx_insn *copy_of_sub_insn = make_insn_raw (copy_rtx (sub_insn));
++      int sub_insn_code
++	  = recog (PATTERN (copy_of_sub_insn), copy_of_sub_insn, 0);
++
++      /* If sub_insn is offset related.  */
++      if (GET_RTX_CLASS (sub_insn_code) == RTX_UNARY)
++	{
++	  offset_insn = *copy_of_sub_insn;
++	  offset_insn_code = sub_insn_code;
++	  gcc_assert (i == 0
++		      && "Offset related insn must be the first "
++			 "element of a parallel insn vector");
++
++	  offset = get_insn_offset (ldp_insn, LDP, &arith_operation);
++	}
++      else
++	{
++	  if (GET_CODE (XEXP (PATTERN (copy_of_sub_insn), 0)) != REG)
++	    {
++	      rtx &offset_expr
++		  = XEXP (XEXP (XEXP (PATTERN (copy_of_sub_insn), 0), 0), 1);
++	      if (GET_CODE (offset_expr) == CONST_INT)
++		{
++		  int local_offset = XINT (offset_expr, 0);
++		  offset = (arith_operation == PLUS ? offset : -offset);
++
++		  offset_expr = GEN_INT (local_offset + offset);
++
++		  gcc_assert (
++		      (arith_operation == MINUS || arith_operation == PLUS)
++		      && "Unexpected arithmetic operation in offset related "
++			 "sub_insn");
++
++		  if (i == 1)
++		    post_index = false;
++		}
++	      else
++		{
++		  post_index = true;
++		}
++	    }
++	}
++      if (i == 1)
++	{
++	  mem_insn_1 = *copy_of_sub_insn;
++	  mem_insn_1_code = sub_insn_code;
++	}
++      if (i == 2)
++	{
++	  mem_insn_2 = *copy_of_sub_insn;
++	  mem_insn_2_code = sub_insn_code;
++	}
++    }
++  gcc_assert (mem_insn_1_code != -1 && mem_insn_2_code != -1
++	      && "Uninitialized memory insns");
++
++  int dest_regno = REGNO (SET_DEST (PATTERN (&mem_insn_1)));
++  int src_regno;
++
++  rtx srs_reg_insn = XEXP (SET_SRC (PATTERN (&mem_insn_1)), 0);
++
++  if (GET_CODE (srs_reg_insn) == REG)
++    src_regno = REGNO (srs_reg_insn);
++  else
++    src_regno = REGNO (XEXP (srs_reg_insn, 0));
++
++  /* Don't split such weird LDP.  */
++  if (src_regno == dest_regno)
++    return;
++
++  rtx_insn *emited_offset_insn;
++  if (!post_index)
++    {
++      emited_offset_insn = emit_insn (PATTERN (&offset_insn));
++      INSN_CODE (emited_offset_insn) = offset_insn_code;
++    }
++
++  rtx_insn *emited_insn_1 = emit_insn (PATTERN (&mem_insn_1));
++  rtx_insn *emited_insn_2 = emit_insn (PATTERN (&mem_insn_2));
++
++
++  INSN_CODE (emited_insn_1) = mem_insn_1_code;
++  INSN_CODE (emited_insn_2) = mem_insn_2_code;
++
++  if (post_index)
++    {
++      emited_offset_insn = emit_insn (PATTERN (&offset_insn));
++      INSN_CODE (emited_offset_insn) = offset_insn_code;
++    }
++}
++
++void
++pass_split_complex_instructions::split_ldp_stp (rtx_insn *insn)
++{
++  rtx_insn *prev_insn = PREV_INSN (insn);
++  int number_of_sub_insns = XVECLEN (PATTERN (insn), 0);
++
++  start_sequence ();
++
++  if (number_of_sub_insns == 2)
++    split_simple_ldp (insn);
++  else if (number_of_sub_insns == 3)
++    split_ldp_with_offset (insn);
++  else
++    gcc_assert (false && "Broken complex insn vector");
++
++  rtx_insn *seq = get_insns ();
++  unshare_all_rtl_in_chain (seq);
++  end_sequence ();
++
++  emit_insn_after_setloc (seq, prev_insn, INSN_LOCATION (insn));
++  delete_insn_and_edges (insn);
++}
++
++void
++pass_split_complex_instructions::split_ldp_ti (rtx_insn *insn)
++{
++  rtx_insn *prev_insn = PREV_INSN (insn);
++  rtx_insn *load_insn_1 = make_insn_raw (copy_rtx (PATTERN (insn)));
++  rtx_insn *load_insn_2 = make_insn_raw (copy_rtx (PATTERN (insn)));
++
++  rtx reg_insn_1 = XEXP (PATTERN (load_insn_1), 0);
++  rtx mem_insn_1 = XEXP (PATTERN (load_insn_1), 1);
++  rtx mem_insn_2 = XEXP (PATTERN (load_insn_2), 1);
++
++  PUT_MODE (mem_insn_1, DImode);
++  PUT_MODE (mem_insn_2, DImode);
++
++  int reg_no_1 = REGNO (reg_insn_1);
++
++  XEXP (PATTERN (load_insn_1), 0) = gen_rtx_REG (DImode, reg_no_1);
++  XEXP (PATTERN (load_insn_2), 0) = gen_rtx_REG (DImode, reg_no_1 + 1);
++
++  rtx load_insn_2_plus_expr = XEXP (XEXP (PATTERN (load_insn_2), 1), 0);
++  if (GET_CODE (load_insn_2_plus_expr) == REG)
++    {
++	XEXP (XEXP (PATTERN (load_insn_2), 1), 0)
++	  = gen_rtx_PLUS (DImode,
++			  gen_rtx_REG (DImode, REGNO (load_insn_2_plus_expr)),
++			  GEN_INT (GET_MODE_SIZE (DImode)));
++    }
++  else
++    {
++      rtx load_insn_2_offset_expr
++      = XEXP (XEXP (XEXP (PATTERN (load_insn_2), 1), 0), 1);
++
++      if (load_insn_2_offset_expr == NULL)
++	return;
++
++      if (GET_CODE (load_insn_2_offset_expr) == CONST_INT)
++	{
++	  int load_insn_2_offset = XINT (load_insn_2_offset_expr, 0);
++	  XEXP (XEXP (XEXP (PATTERN (load_insn_2), 1), 0), 1)
++	    = GEN_INT (load_insn_2_offset + GET_MODE_SIZE (DImode));
++	}
++    }
++
++  start_sequence ();
++
++  int src_regno;
++  rtx srs_reg_insn = XEXP (XEXP (PATTERN (load_insn_1), 1), 0);
++
++  if (GET_CODE (srs_reg_insn) == REG)
++    src_regno = REGNO (srs_reg_insn);
++  else
++    src_regno = REGNO (XEXP (srs_reg_insn, 0));
++
++  /* in cases like ldp r1,r2,[r1] we emit ldr r2,[r1] first.  */
++  if (src_regno == reg_no_1)
++    std::swap (load_insn_1, load_insn_2);
++
++  rtx_insn *emited_load_insn_1 = emit_insn (PATTERN (load_insn_1));
++  rtx_insn *emited_load_insn_2 = emit_insn (PATTERN (load_insn_2));
++
++  INSN_CODE (emited_load_insn_1)
++      = recog (PATTERN (emited_load_insn_1), emited_load_insn_1, 0);
++  INSN_CODE (emited_load_insn_2)
++      = recog (PATTERN (emited_load_insn_2), emited_load_insn_2, 0);
++
++  rtx_insn *seq = get_insns ();
++  unshare_all_rtl_in_chain (seq);
++  end_sequence ();
++
++  emit_insn_after_setloc (seq, prev_insn, INSN_LOCATION (insn));
++  delete_insn_and_edges (insn);
++}
++
++void
++pass_split_complex_instructions::split_complex_insn (rtx_insn *insn)
++{
++  complex_instructions_t insn_type = get_insn_type (insn);
++  /* TODO: Add splitting of STP instructions.  */
++  if (insn_type == LDP || insn_type == STP)
++    split_ldp_stp (insn);
++  else if (insn_type == LDP_TI)
++    split_ldp_ti (insn);
++  else
++    gcc_assert (false && "Unsupported type of insn to split");
++}
++
++pass_split_complex_instructions::complex_instructions_t
++pass_split_complex_instructions::get_insn_type (rtx_insn *insn)
++{
++  if (!INSN_P (insn))
++    return UNDEFINED;
++
++  rtx pat = PATTERN (insn);
++  int icode = recog (PATTERN (insn), insn, NULL);
++
++  if (GET_CODE (pat) == PARALLEL)
++    {
++      if (targetm.is_ldp_insn (icode))
++	{
++	  return LDP;
++	}
++      if (targetm.is_stp_insn (icode))
++	{
++	  return STP;
++	}
++      else
++	{
++	  return UNDEFINED;
++	}
++    }
++  rtx set_insn = single_set (insn);
++  if (set_insn && GET_CODE (XEXP (set_insn, 1)) == MEM
++      && GET_MODE (XEXP (set_insn, 1)) == E_TImode)
++    return LDP_TI;
++
++  return UNDEFINED;
++}
++
++bool
++pass_split_complex_instructions::gate (function *)
++{
++  return targetm.is_ldp_insn && targetm.is_stp_insn && optimize > 0
++	 && flag_split_ldp_stp > 0;
++}
++
++} // anon namespace
++
++rtl_opt_pass *
++make_pass_split_complex_instructions (gcc::context *ctxt)
++{
++  return new pass_split_complex_instructions (ctxt);
++}
++
+ #if __GNUC__ >= 10
+ #  pragma GCC diagnostic pop
+-#endif
++#endif
+\ No newline at end of file
+diff --git a/gcc/target.def b/gcc/target.def
+index d85adf36a..a3a50b474 100644
+--- a/gcc/target.def
++++ b/gcc/target.def
+@@ -2677,6 +2677,16 @@ modes and they have different conditional execution capability, such as ARM.",
+  bool, (void),
+  default_have_conditional_execution)
+ 
++DEFHOOK
++(is_ldp_insn,
++  "Return true if icode is corresponding to any of the LDP instruction types.",
++  bool, (int icode), NULL)
++
++DEFHOOK
++(is_stp_insn,
++  "Return true if icode is corresponding to any of the STP instruction types.",
++  bool, (int icode), NULL)
++
+ DEFHOOK
+ (gen_ccmp_first,
+  "This function prepares to emit a comparison insn for the first compare in a\n\
+diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c
+new file mode 100644
+index 000000000..3918d43f6
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c
+@@ -0,0 +1,74 @@
++/* { dg-do compile { target aarch64-*-* } } */
++/* { dg-additional-options "-fsplit-ldp-stp" } */
++/*
++ *    Tests are:
++ *          Patterns where LDP insns should NOT be split
++ *                       */
++
++int __RTL (startwith ("split_complex_instructions"))
++simple_ldp_after_store ()
++{
++(function "simple_ldp_after_store"
++  (insn-chain
++    (block 2
++      (edge-from entry (flags "FALLTHRU"))
++      (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++      (cinsn 228 (set (reg/i:DI sp) 
++                   (reg/i:DI x0)))
++      (cinsn 101 (set (mem/c:DI
++                        (plus:DI (reg/f:DI sp)
++                          (const_int 32))[1 S4 A32])(reg:DI x0)))
++      (cinsn 10 (parallel [
++        (set (reg:DI x29)
++          (mem:DI (plus:DI (reg/f:DI sp) (const_int 8)) [1 S4 A32]))
++        (set (reg:DI x30)
++          (mem:DI (plus:DI (reg/f:DI sp)
++            (const_int 16)) [1 S4 A32]))]))
++      (cinsn 11 (use (reg/i:DI sp)))
++      (cinsn 12 (use (reg/i:DI cc)))
++      (cinsn 13 (use (reg/i:DI x29)))
++      (cinsn 14 (use (reg/i:DI x30)))
++      (cinsn 15 (use (reg/i:DI x0)))
++      (edge-to exit (flags "FALLTHRU"))
++    ) ;; block 2
++  ) ;; insn-chain
++) ;; function "simple_ldp_after_store"
++}
++
++int __RTL (startwith ("split_complex_instructions"))
++ldp_after_store_in_different_bb ()
++{
++(function "ldp_after_store_in_different_bb"
++  (insn-chain
++    (block 2
++      (edge-from entry (flags "FALLTHRU"))
++      (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++      (cinsn 228 (set (reg/i:DI sp) 
++                   (reg/i:DI x0)))
++      (cinsn 101 (set (mem/c:DI
++                        (plus:DI (reg/f:DI sp)
++                          (const_int 32))[1 S4 A32])(reg:DI x0)))
++      (edge-to 3 (flags "FALLTHRU"))
++    ) ;; block 2
++    (block 3
++      (edge-from 2 (flags "FALLTHRU"))
++      (cnote 4 [bb 3] NOTE_INSN_BASIC_BLOCK)
++      (cinsn 10 (parallel [
++        (set (reg:DI x29)
++          (mem:DI (plus:DI (reg/f:DI sp) (const_int 8)) [1 S4 A32]))
++        (set (reg:DI x30)
++          (mem:DI (plus:DI (reg/f:DI sp)
++            (const_int 16)) [1 S4 A32]))]))
++      (cinsn 11 (use (reg/i:DI sp)))
++      (cinsn 12 (use (reg/i:DI cc)))
++      (cinsn 13 (use (reg/i:DI x29)))
++      (cinsn 14 (use (reg/i:DI x30)))
++      (cinsn 15 (use (reg/i:DI x0)))
++      (edge-to exit (flags "FALLTHRU"))
++    ) ;; block 3
++  ) ;; insn-chain
++) ;; function "ldp_after_store_in_different_bb"
++}
++
++/* Verify that the output code contains exactly 2 ldp.  */
++/* { dg-final { scan-assembler-times {ldp\t} 2 } }  */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c
+new file mode 100644
+index 000000000..653c30f83
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c
+@@ -0,0 +1,40 @@
++/* { dg-do compile { target aarch64-*-* } } */
++/* { dg-additional-options "-fsplit-ldp-stp" } */
++/*
++ *    Test is:
++ *        Pattern where LDP insns should be split with rearrangement in order
++ *        to deal with data dependecy betwen subinstruction.  
++ *                                                                          */
++
++int __RTL (startwith ("split_complex_instructions"))
++simple_ldp_after_store ()
++{
++(function "ldp_equal_registers"
++  (insn-chain
++    (block 2
++      (edge-from entry (flags "FALLTHRU"))
++      (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++      (cinsn 228 (set (reg/i:DI x1) 
++                   (reg/i:DI x0)))
++      (cinsn 101 (set (mem/c:DI
++                        (plus:DI (reg/f:DI x1)
++                          (const_int 8))[1 S4 A32])(reg:DI x0)))
++      (cinsn 10 (parallel [
++        (set (reg:DI x1)
++          (mem:DI (plus:DI (reg/f:DI x1) (const_int 8)) [1 S4 A32]))
++        (set (reg:DI x2)
++          (mem:DI (plus:DI (reg/f:DI x1)
++            (const_int 16)) [1 S4 A32]))]))
++      (cinsn 11 (use (reg/i:DI sp)))
++      (cinsn 12 (use (reg/i:DI cc)))
++      (cinsn 13 (use (reg/i:DI x0)))
++      (cinsn 14 (use (reg/i:DI x1)))
++      (cinsn 15 (use (reg/i:DI x2)))
++      (edge-to exit (flags "FALLTHRU"))
++    ) ;; block 2
++  ) ;; insn-chain
++) ;; function "ldp_equal_registers"
++}
++
++/* Verify that the output code rearrange ldrs.  */
++/* { dg-final { scan-assembler-times ".*ldr.*x2.*x1,.*16.*ldr.*x1.*x1.*8" 1 } }  */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c
+new file mode 100644
+index 000000000..dc9f26efb
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c
+@@ -0,0 +1,174 @@
++/* { dg-do compile { target aarch64-*-* } } */
++/* { dg-additional-options "-O1 -fsplit-ldp-stp" } */
++/*
++ *    Tests are:
++ *          Patterns where LDP insns should be split
++ *                       */
++
++int __RTL (startwith ("split_complex_instructions"))
++simple_ldp_after_store ()
++{
++(function "simple_ldp_after_store"
++  (insn-chain
++    (block 2
++      (edge-from entry (flags "FALLTHRU"))
++      (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++      (cinsn 228 (set (reg/i:DI sp)
++                   (reg/i:DI x0)))
++      (cinsn 238 (set (reg/i:DI x1)
++                   (reg/i:DI x0)))
++
++      (cinsn 101 (set (mem/c:DI
++                        (plus:DI (reg/f:DI sp)
++                          (const_int 8))[1 S4 A32])(reg:DI x0)))
++      (cinsn 10 (parallel [
++        (set (reg:DI x29)
++          (mem:DI (plus:DI (reg/f:DI sp) (const_int 8)) [1 S4 A32]))
++        (set (reg:DI x30)
++          (mem:DI (plus:DI (reg/f:DI sp)
++            (const_int 16)) [1 S4 A32]))]))
++
++      (cinsn 102 (set (mem/c:DI (plus:DI (reg/f:DI x1)
++                                          (const_int -16)) [1 S4 A32])
++                      (reg:DI x0)))
++      (cinsn 11 (parallel [
++        (set (reg:DI x3)
++          (mem:DI (plus:DI (reg/f:DI x1) (const_int -16)) [1 S4 A32]))
++        (set (reg:DI x4)
++          (mem:DI (plus:DI (reg/f:DI x1) (const_int -8)) [1 S4 A32]))
++      ]))
++
++      (cinsn 103 (set (mem/c:DI (reg/f:DI x1) [1 S4 A32])
++                      (reg:DI x0)))
++      (cinsn 12 (parallel [
++        (set (reg:DI x5) (mem:DI (reg/f:DI x1) [1 S4 A32]))
++        (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1)
++                                          (const_int 8)) [1 S4 A32]))
++      ]))
++
++      (cinsn 13 (use (reg/i:DI sp)))
++      (cinsn 14 (use (reg/i:DI cc)))
++      (cinsn 15 (use (reg/i:DI x29)))
++      (cinsn 16 (use (reg/i:DI x30)))
++      (cinsn 17 (use (reg/i:DI x0)))
++      (cinsn 18 (use (reg/i:DI x3)))
++      (cinsn 19 (use (reg/i:DI x4)))
++      (cinsn 20 (use (reg/i:DI x5)))
++      (cinsn 21 (use (reg/i:DI x6)))
++      (edge-to exit (flags "FALLTHRU"))
++    ) ;; block 2
++  ) ;; insn-chain
++) ;; function "simple_ldp_after_store"
++}
++
++int __RTL (startwith ("split_complex_instructions"))
++ldp_ti_after_store ()
++{
++  (function "ldp_ti_after_store"
++    (insn-chain
++      (block 2
++      (edge-from entry (flags "FALLTHRU"))
++      (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++      (cinsn 228 (set (reg/i:DI sp)
++                   (reg/i:DI x0)))
++      (cinsn 238 (set (reg/i:DI x2)
++                   (reg/i:DI x0)))
++
++      (cinsn 101 (set (mem/c:DI
++                        (plus:DI (reg/f:DI sp)
++                          (const_int 136))[1 S4 A32])(reg:DI x0)))
++      (insn 81 (set (reg:TI x0 [1 S4 A32])
++              (mem/c:TI (plus:DI (reg/f:DI sp)
++                      (const_int 136 )) [1 S4 A32]))
++           (expr_list:REG_EQUIV (mem/c:TI (plus:DI (reg/f:DI sfp)
++                      (const_int -24 )) [1 S4 A32])
++              (nil)))
++
++      (cinsn 102 (set (mem/c:DI (plus:DI (reg/f:DI x2)
++                                          (const_int -16)) [1 S4 A32])
++                      (reg:DI x0)))
++      (insn 82 (set (reg:TI x3 [1 S4 A32])
++                    (mem/c:TI (plus:DI (reg/f:DI x2)
++                                        (const_int -16)) [1 S4 A32])))
++
++      (cinsn 103 (set (mem/c:DI (reg/f:DI x2) [1 S4 A32])
++                      (reg:DI x0)))
++      (insn 83 (set (reg:TI x5 [1 S4 A32])
++                    (mem/c:TI (reg/f:DI x2) [1 S4 A32])))
++
++      (cinsn 11 (use (reg/i:DI sp)))
++      (cinsn 12 (use (reg/i:DI cc)))
++      (cinsn 13 (use (reg/i:DI x29)))
++      (cinsn 14 (use (reg/i:DI x30)))
++      (cinsn 15 (use (reg/i:DI x0)))
++      (cinsn 16 (use (reg/i:DI x3)))
++      (cinsn 17 (use (reg/i:DI x5)))
++      (cinsn 18 (use (reg/i:DI x1)))
++      (cinsn 19 (use (reg/i:DI x4)))
++      (cinsn 20 (use (reg/i:DI x6)))
++      (edge-to exit (flags "FALLTHRU"))
++    ) ;; block 2
++  ) ;; insn-chain
++) ;; function "ldp_ti_after_store"
++}
++
++int __RTL (startwith ("split_complex_instructions"))
++ldp_after_store_in_different_bb ()
++{
++(function "ldp_after_store_in_different_bb"
++  (insn-chain
++    (block 2
++      (edge-from entry (flags "FALLTHRU"))
++      (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++      (cinsn 228 (set (reg/i:DI sp)
++                   (reg/i:DI x0)))
++      (cinsn 238 (set (reg/i:DI x1)
++                   (reg/i:DI x0)))
++
++      (cinsn 101 (set (mem/c:DI
++                        (plus:DI (reg/f:DI sp)
++                          (const_int 8))[1 S4 A32])(reg:DI x0)))
++      (cinsn 102 (set (mem/c:DI (plus:DI (reg/f:DI x1)
++                                          (const_int -16)) [1 S4 A32])
++                      (reg:DI x0)))
++      (cinsn 103 (set (mem/c:DI (reg/f:DI x1) [1 S4 A32])
++                      (reg:DI x0)))
++      (edge-to 3 (flags "FALLTHRU"))
++    ) ;; block 2
++    (block 3
++      (edge-from 2 (flags "FALLTHRU"))
++      (cnote 4 [bb 3] NOTE_INSN_BASIC_BLOCK)
++      (cinsn 10 (parallel [
++        (set (reg:DI x29)
++          (mem:DI (plus:DI (reg/f:DI sp) (const_int 8)) [1 S4 A32]))
++        (set (reg:DI x30)
++          (mem:DI (plus:DI (reg/f:DI sp)
++            (const_int 16)) [1 S4 A32]))]))
++      (cinsn 11 (parallel [
++        (set (reg:DI x3)
++          (mem:DI (plus:DI (reg/f:DI x1) (const_int -16)) [1 S4 A32]))
++        (set (reg:DI x4)
++          (mem:DI (plus:DI (reg/f:DI x1) (const_int -8)) [1 S4 A32]))
++      ]))
++      (cinsn 12 (parallel [
++        (set (reg:DI x5) (mem:DI (reg/f:DI x1) [1 S4 A32]))
++        (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1)
++                                          (const_int 8)) [1 S4 A32]))
++      ]))
++      (cinsn 13 (use (reg/i:DI sp)))
++      (cinsn 14 (use (reg/i:DI cc)))
++      (cinsn 15 (use (reg/i:DI x29)))
++      (cinsn 16 (use (reg/i:DI x30)))
++      (cinsn 17 (use (reg/i:DI x0)))
++      (cinsn 18 (use (reg/i:DI x3)))
++      (cinsn 19 (use (reg/i:DI x4)))
++      (cinsn 20 (use (reg/i:DI x5)))
++      (cinsn 21 (use (reg/i:DI x6)))
++      (edge-to exit (flags "FALLTHRU"))
++    ) ;; block 3
++  ) ;; insn-chain
++) ;; function "ldp_after_store_in_different_bb"
++}
++
++/* Verify that the output code doesn't contain ldp.  */
++/* { dg-final { scan-assembler-not {ldp\t} } }  */
+\ No newline at end of file
+diff --git a/gcc/timevar.def b/gcc/timevar.def
+index 1e7d4e74b..2ccecffb5 100644
+--- a/gcc/timevar.def
++++ b/gcc/timevar.def
+@@ -280,6 +280,7 @@ DEFTIMEVAR (TV_RELOAD_CSE_REGS       , "reload CSE regs")
+ DEFTIMEVAR (TV_GCSE_AFTER_RELOAD     , "load CSE after reload")
+ DEFTIMEVAR (TV_REE		     , "ree")
+ DEFTIMEVAR (TV_THREAD_PROLOGUE_AND_EPILOGUE, "thread pro- & epilogue")
++DEFTIMEVAR (TV_SPLIT_CMP_INS         , "split complex instructions")
+ DEFTIMEVAR (TV_IFCVT2		     , "if-conversion 2")
+ DEFTIMEVAR (TV_SPLIT_PATHS	     , "split paths")
+ DEFTIMEVAR (TV_COMBINE_STACK_ADJUST  , "combine stack adjustments")
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index 86f38e2f2..6daac7fc1 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -612,6 +612,7 @@ extern rtl_opt_pass *make_pass_split_after_reload (gcc::context *ctxt);
+ extern rtl_opt_pass *make_pass_thread_prologue_and_epilogue (gcc::context
+ 							     *ctxt);
+ extern rtl_opt_pass *make_pass_zero_call_used_regs (gcc::context *ctxt);
++extern rtl_opt_pass *make_pass_split_complex_instructions (gcc::context *ctxt);
+ extern rtl_opt_pass *make_pass_stack_adjustments (gcc::context *ctxt);
+ extern rtl_opt_pass *make_pass_sched_fusion (gcc::context *ctxt);
+ extern rtl_opt_pass *make_pass_peephole2 (gcc::context *ctxt);
+-- 
+2.33.0
+
diff --git a/0042-LoongArch-Fix-libgcc-build-failure-when-libc-is-not-.patch b/0042-LoongArch-Fix-libgcc-build-failure-when-libc-is-not-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2f6a8ad22ac01ccfe5f775c455fae24c1a8c02dc
--- /dev/null
+++ b/0042-LoongArch-Fix-libgcc-build-failure-when-libc-is-not-.patch
@@ -0,0 +1,85 @@
+From 0f65e5ebe60d9ad5141115661ed71c321156cd95 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 21 Nov 2023 09:09:25 +0800
+Subject: [PATCH 042/188] LoongArch: Fix libgcc build failure when libc is not
+ available
+
+To use int64_t we included  in loongarch-def.h.
+Unfortunately, loongarch-def.h is also used by libgcc etc., causing a
+build failure when building a "stage1" cross compiler at which the
+target libc is not built yet.
+
+As int64_t is used for a C-compatible replacement of HOST_WIDE_INT, it's
+not directly or indirectly referred by the target libraries.  So
+guard everything requiring stdint.h with #if then they'll not block
+target libraries.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-def.h (stdint.h): Guard with #if to
+	exclude it for target libraries.
+	(loongarch_isa_base_features): Likewise.
+	(loongarch_isa): Likewise.
+	(loongarch_abi): Likewise.
+	(loongarch_target): Likewise.
+	(loongarch_cpu_default_isa): Likewise.
+---
+ gcc/config/loongarch/loongarch-def.h | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index ca0a324dd..ef848f606 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -46,7 +46,10 @@ along with GCC; see the file COPYING3.  If not see
+ #ifndef LOONGARCH_DEF_H
+ #define LOONGARCH_DEF_H
+ 
++#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+ #include 
++#endif
++
+ #include "loongarch-tune.h"
+ 
+ #ifdef __cplusplus
+@@ -62,9 +65,11 @@ extern const char* loongarch_isa_base_strings[];
+ #define ISA_BASE_LA64V110     1
+ #define N_ISA_BASE_TYPES      2
+ 
++#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+ /* Unlike other arrays, this is defined in loongarch-cpu.cc.  The problem is
+    we cannot use the C++ header options.h in loongarch-def.c.  */
+ extern int64_t loongarch_isa_base_features[];
++#endif
+ 
+ /* enum isa_ext_* */
+ extern const char* loongarch_isa_ext_strings[];
+@@ -121,6 +126,7 @@ extern const char* loongarch_cmodel_strings[];
+ #define M_OPT_ABSENT(opt_enum)  ((opt_enum) == M_OPT_UNSET)
+ 
+ 
++#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+ /* Internal representation of the target.  */
+ struct loongarch_isa
+ {
+@@ -150,6 +156,9 @@ struct loongarch_target
+   int cmodel;	    /* CMODEL_ */
+ };
+ 
++extern struct loongarch_isa loongarch_cpu_default_isa[];
++#endif
++
+ /* CPU properties.  */
+ /* index */
+ #define CPU_NATIVE	  0
+@@ -162,7 +171,6 @@ struct loongarch_target
+ 
+ /* parallel tables.  */
+ extern const char* loongarch_cpu_strings[];
+-extern struct loongarch_isa loongarch_cpu_default_isa[];
+ extern int loongarch_cpu_issue_rate[];
+ extern int loongarch_cpu_multipass_dfa_lookahead[];
+ 
+-- 
+2.43.0
+
diff --git a/0043-Extending-and-refactoring-of-pass_split_complex_inst.patch b/0043-Extending-and-refactoring-of-pass_split_complex_inst.patch
new file mode 100644
index 0000000000000000000000000000000000000000..509a534f042554b56e658fda16b1ee63a04649c8
--- /dev/null
+++ b/0043-Extending-and-refactoring-of-pass_split_complex_inst.patch
@@ -0,0 +1,1426 @@
+From a49db831320ac70ca8f46b94ee60d7c6951f65c3 Mon Sep 17 00:00:00 2001
+From: Gadzhiev Emin WX1195297 
+Date: Wed, 20 Dec 2023 21:36:07 +0300
+Subject: [PATCH 10/18] Extending and refactoring of
+ pass_split_complex_instructions
+
+- Add flag parameter in is_ldp_insn and is_stp_insn to know
+  if instruction has writeback operation
+- Add support of PRE_*, POST_* operands as a memory address
+  expression
+- Split only LDPs that intersect with a dependent store
+  instruction
+- Make the selection of dependent store instructions stricter
+  so it will be enough to check by BFS that dependent store
+  instruction appears in search range.
+- Add helper methods to retrieve fields of rtx
+- Remove redundant iterations in find_dependent_stores_candidates
+- Refactor generation of instructions
+- Add more test cases
+---
+ gcc/config/aarch64/aarch64.cc                 |  62 +-
+ gcc/doc/tm.texi                               |  12 +-
+ gcc/sched-rgn.cc                              | 771 +++++++++---------
+ gcc/target.def                                |  14 +-
+ .../gcc.dg/rtl/aarch64/test-ldp-dont-split.c  |  35 +-
+ .../rtl/aarch64/test-ldp-split-rearrange.c    |   2 +-
+ .../gcc.dg/rtl/aarch64/test-ldp-split.c       | 181 +++-
+ 7 files changed, 603 insertions(+), 474 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 48e2eded0..fa566dd80 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -27507,39 +27507,59 @@ aarch64_run_selftests (void)
+ 
+ #endif /* #if CHECKING_P */
+ 
+-/* TODO: refuse to use ranges intead of full list of an instruction codes.  */
++/* TODO: refuse to use ranges instead of full list of an instruction codes.  */
+ 
+ bool
+-is_aarch64_ldp_insn (int icode)
++is_aarch64_ldp_insn (int icode, bool *has_wb)
+ {
+   if ((icode >= CODE_FOR_load_pair_sw_sisi
+-	  && icode <= CODE_FOR_load_pair_dw_tftf)
++	  && icode <= CODE_FOR_load_pair_sw_sfsf)
++      || (icode >= CODE_FOR_load_pair_dw_didi
++	  && icode <= CODE_FOR_load_pair_dw_dfdf)
++      || (icode == CODE_FOR_load_pair_dw_tftf)
+       || (icode >= CODE_FOR_loadwb_pairsi_si
+-	     && icode <= CODE_FOR_loadwb_pairtf_di)
+-      || (icode >= CODE_FOR_load_pairv8qiv8qi
+-	     && icode <= CODE_FOR_load_pairdfdf)
+-      || (icode >= CODE_FOR_load_pairv16qiv16qi
+-	     && icode <= CODE_FOR_load_pairv8bfv2df)
+-      || (icode >= CODE_FOR_load_pair_lanesv8qi
+-	     && icode <= CODE_FOR_load_pair_lanesdf))
+-    return true;
++	  && icode <= CODE_FOR_loadwb_pairdi_di)
++      || (icode >= CODE_FOR_loadwb_pairsf_si
++	  && icode <= CODE_FOR_loadwb_pairdf_di)
++      || (icode >= CODE_FOR_loadwb_pairti_si
++	  && icode <= CODE_FOR_loadwb_pairtf_di))
++    {
++      if (has_wb)
++	*has_wb = ((icode >= CODE_FOR_loadwb_pairsi_si
++		     && icode <= CODE_FOR_loadwb_pairdi_di)
++		   || (icode >= CODE_FOR_loadwb_pairsf_si
++		     && icode <= CODE_FOR_loadwb_pairdf_di)
++		   || (icode >= CODE_FOR_loadwb_pairti_si
++		      && icode <= CODE_FOR_loadwb_pairtf_di));
++      return true;
++    }
+   return false;
+ }
+ 
+ bool
+-is_aarch64_stp_insn (int icode)
++is_aarch64_stp_insn (int icode, bool *has_wb)
+ {
+   if ((icode >= CODE_FOR_store_pair_sw_sisi
+-	  && icode <= CODE_FOR_store_pair_dw_tftf)
++	  && icode <= CODE_FOR_store_pair_sw_sfsf)
++      || (icode >= CODE_FOR_store_pair_dw_didi
++	  && icode <= CODE_FOR_store_pair_dw_dfdf)
++      || (icode == CODE_FOR_store_pair_dw_tftf)
+       || (icode >= CODE_FOR_storewb_pairsi_si
+-	     && icode <= CODE_FOR_storewb_pairtf_di)
+-      || (icode >= CODE_FOR_vec_store_pairv8qiv8qi
+-	     && icode <= CODE_FOR_vec_store_pairdfdf)
+-      || (icode >= CODE_FOR_vec_store_pairv16qiv16qi
+-	     && icode <= CODE_FOR_vec_store_pairv8bfv2df)
+-      || (icode >= CODE_FOR_store_pair_lanesv8qi
+-	     && icode <= CODE_FOR_store_pair_lanesdf))
+-    return true;
++	  && icode <= CODE_FOR_storewb_pairdi_di)
++      || (icode >= CODE_FOR_storewb_pairsf_si
++	  && icode <= CODE_FOR_storewb_pairdf_di)
++      || (icode >= CODE_FOR_storewb_pairti_si
++	  && icode <= CODE_FOR_storewb_pairtf_di))
++    {
++      if (has_wb)
++	*has_wb = ((icode >= CODE_FOR_storewb_pairsi_si
++		     && icode <= CODE_FOR_storewb_pairdi_di)
++		   || (icode >= CODE_FOR_storewb_pairsf_si
++		     && icode <= CODE_FOR_storewb_pairdf_di)
++		   || (icode >= CODE_FOR_storewb_pairti_si
++		     && icode <= CODE_FOR_storewb_pairtf_di));
++      return true;
++    }
+   return false;
+ }
+ 
+diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
+index 0c6415a9c..3b6e90bf2 100644
+--- a/gcc/doc/tm.texi
++++ b/gcc/doc/tm.texi
+@@ -12113,12 +12113,16 @@ object files that are not referenced from @code{main} and uses export
+ lists.
+ @end defmac
+ 
+-@deftypefn {Target Hook} bool TARGET_IS_LDP_INSN (int @var{icode})
+-Return true if icode is corresponding to any of the LDP instruction types.
++@deftypefn {Target Hook} bool TARGET_IS_LDP_INSN (int @var{icode}, bool *@var{has_wb})
++Return true if @var{icode} is corresponding to any of the LDP instruction
++types.  If @var{has_wb} is not NULL then its value is set to true if LDP
++contains post-index or pre-index operation.
+ @end deftypefn
+ 
+-@deftypefn {Target Hook} bool TARGET_IS_STP_INSN (int @var{icode})
+-Return true if icode is corresponding to any of the STP instruction types.
++@deftypefn {Target Hook} bool TARGET_IS_STP_INSN (int @var{icode}, bool *@var{has_wb})
++Return true if @var{icode} is corresponding to any of the STP instruction
++types.  If @var{has_wb} is not NULL then its value is set to true if STP
++contains post-index or pre-index operation.
+ @end deftypefn
+ 
+ @deftypefn {Target Hook} bool TARGET_CANNOT_MODIFY_JUMPS_P (void)
+diff --git a/gcc/sched-rgn.cc b/gcc/sched-rgn.cc
+index b4df8bdc5..5f61de1c8 100644
+--- a/gcc/sched-rgn.cc
++++ b/gcc/sched-rgn.cc
+@@ -3956,7 +3956,7 @@ make_pass_sched_fusion (gcc::context *ctxt)
+ 
+ namespace {
+ 
+-/* Def-use analisys special functions implementation.  */
++/* Def-use analysis special functions implementation.  */
+ 
+ static struct df_link *
+ get_defs (rtx_insn *insn, rtx reg)
+@@ -4032,42 +4032,66 @@ const pass_data pass_data_split_complex_instructions = {
+   (TODO_df_verify | TODO_df_finish), /* Todo_flags_finish.  */
+ };
+ 
++/* Pass split_complex_instructions finds LOAD PAIR instructions (LDP) that can
++   be split into two LDR instructions.  It splits only those LDP for which one
++   half of the requested memory is contained in the preceding STORE (STR/STP)
++   instruction whose base register has the same definition.  This allows
++   to use hardware store-to-load forwarding mechanism and to get one half of
++   requested memory from the store queue of CPU.
++
++   TODO: Add split of STP.
++   TODO: Add split of vector STP and LDP.  */
+ class pass_split_complex_instructions : public rtl_opt_pass
+ {
+ private:
+-  enum complex_instructions_t
++  enum mem_access_insn_t
+   {
+     UNDEFINED,
+     LDP,
++    /* LDP with post-index (see loadwb_pair in config/aarch64.md).  */
++    LDP_WB,
++    /* LDP that contains one destination register in RTL IR
++       (see movti_aarch64 in config/aarch64.md).  */
+     LDP_TI,
+     STP,
++    /* STP with pre-index (see storewb_pair in config/aarch64.md).  */
++    STP_WB,
++    /* STP that contains one source register in RTL IR
++       (see movti_aarch64 in config/aarch64.md).  */
++    STP_TI,
+     STR
+   };
+ 
+-  void split_complex_insn (rtx_insn *insn);
+-  void split_ldp_ti (rtx_insn *insn);
+-  void split_ldp_with_offset (rtx_insn *ldp_insn);
+-  void split_simple_ldp (rtx_insn *ldp_insn);
+-  void split_ldp_stp (rtx_insn *insn);
+-  complex_instructions_t get_insn_type (rtx_insn *insn);
+-
+-  basic_block bb;
+-  rtx_insn *insn;
+   std::set dependent_stores_candidates;
+   std::set ldp_to_split_list;
+ 
+-  complex_instructions_t complex_insn_type = UNDEFINED;
+-  bool is_store_insn (rtx_insn *insn);
+-  bool is_ldp_dependent_on_store (rtx_insn *ldp_insn, basic_block bb);
++  void split_ldp_ti (rtx_insn *insn);
++  void split_ldp (rtx_insn *ldp_insn);
++  /* Emit a NEW_INSNS chain, recognize instruction code of each new instruction
++     and replace OLD_INSN with the emitted sequence.  */
++  void replace_insn (rtx_insn *old_insn, rtx_insn *new_insns);
++
++  mem_access_insn_t get_insn_type (rtx_insn *insn);
++  bool is_typeof_ldp (mem_access_insn_t insn_type);
++  bool is_typeof_stp (mem_access_insn_t insn_type);
++
+   bool bfs_for_reg_dependent_store (rtx_insn *ldp_insn, basic_block search_bb,
+ 				    rtx_insn *search_insn,
+ 				    int search_range
+ 				    = param_ldp_dependency_search_range);
+   bool is_store_reg_dependent (rtx_insn *ldp_insn, rtx_insn *str_insn);
+   void init_df ();
+-  void find_dependent_stores_candidates (rtx_insn *ldp_insn);
+-  int get_insn_offset (rtx_insn *insn, complex_instructions_t insn_type,
+-		       int *arith_operation_ptr = NULL);
++  void find_dependent_stores_candidates (rtx_insn *ldp_insn,
++					 mem_access_insn_t insn_type);
++
++  rtx get_memref (rtx_insn *insn, mem_access_insn_t insn_type);
++  rtx get_base_reg (rtx memref);
++  /* Set OFFSET to the offset value.  Returns TRUE if MEMREF's address
++     expression is supported, FALSE otherwise.  */
++  bool get_offset (rtx memref, int &offset);
++  /* Return size of memory referenced by MEMREF.  Returns -1 if INSN_TYPE
++     wasn't recognized.  */
++  int get_unit_size (rtx memref, mem_access_insn_t insn_type);
+ 
+ public:
+   pass_split_complex_instructions (gcc::context *ctxt)
+@@ -4080,28 +4104,22 @@ public:
+   virtual unsigned int
+   execute (function *)
+   {
+-    enum rtx_code ldp_memref_code;
++    basic_block bb;
++    rtx_insn *insn;
++
+     init_df ();
+     ldp_to_split_list.clear ();
+     FOR_EACH_BB_FN (bb, cfun)
+       {
+ 	FOR_BB_INSNS (bb, insn)
+ 	  {
+-	    complex_instructions_t insn_type = get_insn_type (insn);
+-	    /* TODO: Add splitting of STP instructions.  */
+-	    if (insn_type != LDP && insn_type != LDP_TI)
++	    mem_access_insn_t insn_type = get_insn_type (insn);
++	    if (!is_typeof_ldp (insn_type))
+ 	      continue;
+-	    /* TODO: Currently support only ldp_ti and ldp with REG or
+-	       PLUS/MINUS offset expression.  */
+-	    if (insn_type == LDP_TI)
+-	      {
+-		ldp_memref_code = GET_CODE (XEXP (XEXP (PATTERN (insn), 1),
+-						  0));
+-		if (ldp_memref_code != REG && ldp_memref_code != PLUS
+-		    && ldp_memref_code != MINUS)
+-		  continue;
+-	      }
+-	    if (is_ldp_dependent_on_store (insn, bb))
++
++	    find_dependent_stores_candidates (insn, insn_type);
++	    if (!dependent_stores_candidates.empty ()
++	       && bfs_for_reg_dependent_store (insn, bb, insn))
+ 	      {
+ 		ldp_to_split_list.insert (insn);
+ 	      }
+@@ -4110,18 +4128,107 @@ public:
+ 
+     for (std::set::iterator i = ldp_to_split_list.begin ();
+ 	 i != ldp_to_split_list.end (); ++i)
+-      split_complex_insn (*i);
++      split_ldp (*i);
+ 
+     return 0;
+   }
+ }; // class pass_split_complex_instructions
+ 
+ bool
+-pass_split_complex_instructions::is_ldp_dependent_on_store (rtx_insn *ldp_insn,
+-							    basic_block bb)
++pass_split_complex_instructions::is_typeof_ldp (
++    mem_access_insn_t insn_type)
+ {
+-  find_dependent_stores_candidates (ldp_insn);
+-  return bfs_for_reg_dependent_store (ldp_insn, bb, ldp_insn);
++  return (insn_type == LDP || insn_type == LDP_WB || insn_type == LDP_TI);
++}
++
++bool
++pass_split_complex_instructions::is_typeof_stp (
++    mem_access_insn_t insn_type)
++{
++  return (insn_type == STP || insn_type == STP_WB || insn_type == STP_TI);
++}
++
++rtx
++pass_split_complex_instructions::get_memref (
++    rtx_insn *insn, mem_access_insn_t insn_type)
++{
++  rtx insn_pat = PATTERN (insn);
++  rtx memref = NULL;
++
++  switch (insn_type)
++    {
++      case LDP:
++	memref = SET_SRC (XVECEXP (insn_pat, 0, 0));
++	break;
++      case LDP_WB:
++	memref = SET_SRC (XVECEXP (insn_pat, 0, 1));
++	break;
++      case LDP_TI:
++	memref = SET_SRC (insn_pat);
++	break;
++      case STP:
++	memref = SET_DEST (XVECEXP (insn_pat, 0, 0));
++	break;
++      case STP_WB:
++	memref = SET_DEST (XVECEXP (insn_pat, 0, 1));
++	break;
++      case STP_TI:
++      case STR:
++	memref = SET_DEST (insn_pat);
++	break;
++      default:
++	break;
++    }
++
++  if (memref && !MEM_P (memref))
++    return NULL;
++  return memref;
++}
++
++rtx
++pass_split_complex_instructions::get_base_reg (rtx memref)
++{
++  if (!memref || !MEM_P (memref))
++    return NULL;
++  rtx addr_exp = XEXP (memref, 0);
++
++  switch (GET_CODE (addr_exp))
++    {
++      case REG:
++	return addr_exp;
++      case PLUS:
++      case PRE_DEC:
++      case PRE_INC:
++      case POST_DEC:
++      case POST_INC:
++	if (REG_P (XEXP (addr_exp, 0)))
++	  return XEXP (addr_exp, 0);
++      default:
++	return NULL;
++    }
++}
++
++int
++pass_split_complex_instructions::get_unit_size (
++    rtx memref, mem_access_insn_t insn_type)
++{
++  if (!memref)
++    return -1;
++
++  switch (insn_type)
++    {
++      case LDP:
++      case STP:
++      case LDP_WB:
++      case STP_WB:
++      case STR:
++	return GET_MODE_SIZE (GET_MODE (memref)).to_constant ();
++      case LDP_TI:
++      case STP_TI:
++	return GET_MODE_SIZE (E_DImode).to_constant ();
++      default:
++	return -1;
++    }
+ }
+ 
+ bool
+@@ -4135,9 +4242,9 @@ pass_split_complex_instructions::bfs_for_reg_dependent_store (
+     {
+       if (!current_search_insn)
+ 	return false;
+-      bool checking_result
+-	  = is_store_reg_dependent (ldp_insn, current_search_insn);
+-      if (checking_result)
++
++      if (dependent_stores_candidates.find (current_search_insn)
++	  != dependent_stores_candidates.end ())
+ 	{
+ 	  if (dump_file)
+ 	    {
+@@ -4185,30 +4292,29 @@ pass_split_complex_instructions::init_df ()
+ 
+ void
+ pass_split_complex_instructions::find_dependent_stores_candidates (
+-    rtx_insn *ldp_insn)
++    rtx_insn *ldp_insn, mem_access_insn_t insn_type)
+ {
+   dependent_stores_candidates.clear ();
+-  df_ref use;
+ 
+-  FOR_EACH_INSN_USE (use, ldp_insn)
+-    {
+-      df_link *defs = get_defs (ldp_insn, DF_REF_REG (use));
+-      if (!defs)
+-	return;
++  rtx base_reg = get_base_reg (get_memref (ldp_insn, insn_type));
++  if (!base_reg)
++    return;
+ 
+-      for (df_link *def = defs; def; def = def->next)
+-	{
+-	  df_link *uses
+-	      = get_uses (DF_REF_INSN (def->ref), DF_REF_REG (def->ref));
+-	  if (!uses)
+-	    continue;
++  df_link *defs = get_defs (ldp_insn, base_reg);
++  if (!defs)
++    return;
+ 
+-	  for (df_link *use = uses; use; use = use->next)
+-	    {
+-	      if (DF_REF_CLASS (use->ref) == DF_REF_REGULAR
+-		  && is_store_insn (DF_REF_INSN (use->ref)))
+-		dependent_stores_candidates.insert (DF_REF_INSN (use->ref));
+-	    }
++  for (df_link *def = defs; def; def = def->next)
++    {
++      df_link *uses = get_uses (DF_REF_INSN (def->ref), DF_REF_REG (def->ref));
++      if (!uses)
++	continue;
++      for (df_link *use = uses; use; use = use->next)
++	{
++	  if (DF_REF_CLASS (use->ref) == DF_REF_REGULAR
++	      && DF_REF_INSN (use->ref) != ldp_insn
++	      && is_store_reg_dependent (ldp_insn, DF_REF_INSN (use->ref)))
++	    dependent_stores_candidates.insert (DF_REF_INSN (use->ref));
+ 	}
+     }
+ }
+@@ -4217,423 +4323,274 @@ bool
+ pass_split_complex_instructions::is_store_reg_dependent (rtx_insn *ldp_insn,
+ 							 rtx_insn *str_insn)
+ {
+-  if (!is_store_insn (str_insn)
+-      || dependent_stores_candidates.find (str_insn)
+-	     == dependent_stores_candidates.end ())
++  if (!str_insn)
+     return false;
+ 
+-  int ldp_offset_sign = UNDEFINED;
+-  int ldp_offset
+-      = get_insn_offset (ldp_insn, get_insn_type (ldp_insn), &ldp_offset_sign);
+-  if (ldp_offset_sign == MINUS)
+-    ldp_offset = -ldp_offset;
++  mem_access_insn_t st_type = get_insn_type (str_insn);
++  if (!is_typeof_stp (st_type) && st_type != STR)
++    return false;
+ 
+-  int str_offset_sign = UNDEFINED;
+-  int str_offset = get_insn_offset (str_insn, STR, &str_offset_sign);
+-  if (str_offset_sign == MINUS)
+-    str_offset = -str_offset;
++  mem_access_insn_t ld_type = get_insn_type (ldp_insn);
++  rtx ld_memref = get_memref (ldp_insn, ld_type);
++  rtx st_memref = get_memref (str_insn, st_type);
++  rtx ld_base_reg = get_base_reg (ld_memref);
++  rtx st_base_reg =  get_base_reg (st_memref);
+ 
+-  if (str_offset == ldp_offset || str_offset == ldp_offset + 8)
+-    return true;
++  if (!ld_base_reg || !st_base_reg
++      || REGNO (ld_base_reg) != REGNO (st_base_reg))
++    return false;
+ 
+-  return false;
+-}
++  int ld_offset = 0;
++  int st_offset = 0;
++  if (get_offset (ld_memref, ld_offset)
++      && get_offset (st_memref, st_offset))
++    {
++      int ld_unit_size = get_unit_size (ld_memref, ld_type);
++      int st_size = get_unit_size (st_memref, st_type);
++      if (st_type != STR)
++	st_size *= 2;
+ 
+-bool
+-pass_split_complex_instructions::is_store_insn (rtx_insn *insn)
+-{
+-  if (!insn)
+-    return false;
+-  rtx sset_b = single_set (insn);
+-  /* TODO: The condition below allow to take only store instructions in which
+-     the memory location's operand is either a register (base) or an plus/minus
+-     operation (base + #imm). So it might make sense to add support for other
+-     cases (e.g. multiply and shift).  */
+-  if (sset_b && MEM_P (SET_DEST (sset_b))
+-      && GET_MODE (XEXP (sset_b, 0)) != BLKmode
+-      && (GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == REG
+-	  || (GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == PLUS
+-	      || GET_CODE (XEXP (XEXP (sset_b, 0), 0)) == MINUS)
+-	  && (GET_CODE (XEXP (XEXP (XEXP (sset_b, 0), 0), 1)) == CONST_INT)))
+-    return true;
++      if (ld_unit_size < 0 || st_size < 0)
++	return false;
++
++      bool st_has_low_ld_part = (ld_offset >= st_offset
++	&& (ld_offset + ld_unit_size <= st_offset + st_size));
++      bool st_has_high_ld_part = ((ld_offset + ld_unit_size >= st_offset)
++	&& (ld_offset + 2 * ld_unit_size <= st_offset + st_size));
++      bool st_has_not_full_ld = (ld_offset < st_offset
++	|| (ld_offset + 2 * ld_unit_size > st_offset + st_size));
++
++      if ((st_has_low_ld_part || st_has_high_ld_part) && st_has_not_full_ld)
++	return true;
++    }
+ 
+   return false;
+ }
+ 
+-int
+-pass_split_complex_instructions::get_insn_offset (
+-    rtx_insn *insn, complex_instructions_t insn_type, int *arith_operation_ptr)
++bool
++pass_split_complex_instructions::get_offset (rtx memref, int &offset)
+ {
+-  rtx insn_pat = PATTERN (insn);
+-  int returned_offset = 0;
++  rtx addr_exp = XEXP (memref, 0);
+ 
+-  rtx offset_expr = NULL;
+-  rtx offset_value_expr = NULL;
+-
+-  switch (insn_type)
++  switch (GET_CODE (addr_exp))
+     {
+-    case LDP:
+-      {
+-	int number_of_sub_insns = XVECLEN (insn_pat, 0);
+-
+-	/* Calculate it's own ofsset of first load insn.  */
+-	rtx_insn *first_load_insn = NULL;
+-	if (number_of_sub_insns == 2)
++      case REG:
++      case POST_DEC:
++      case POST_INC:
++	offset = 0;
++	return true;
++      case PRE_DEC:
++	offset = -(GET_MODE_SIZE (GET_MODE (memref)).to_constant ());
++	return true;
++      case PRE_INC:
++	offset = GET_MODE_SIZE (GET_MODE (memref)).to_constant ();
++	return true;
++      case PLUS:
++	if (CONST_INT_P (XEXP (addr_exp, 1)))
+ 	  {
+-	    first_load_insn
+-		= make_insn_raw (copy_rtx (XVECEXP (insn_pat, 0, 0)));
+-	    arith_operation_ptr = NULL;
+-
+-	    offset_expr = XEXP (XEXP (PATTERN (first_load_insn), 1), 0);
+-	    if (GET_CODE (offset_expr) == PLUS
+-		|| GET_CODE (offset_expr) == MINUS)
+-	      offset_value_expr
+-		  = XEXP (XEXP (XEXP (PATTERN (first_load_insn), 1), 0), 1);
+-	    else
+-	      offset_expr = NULL;
++	    offset = INTVAL (XEXP (addr_exp, 1));
++	    return true;
+ 	  }
+-	else if (number_of_sub_insns == 3)
+-	  {
+-	    rtx_insn *offset_sub_insn
+-		= make_insn_raw (copy_rtx (XVECEXP (insn_pat, 0, 0)));
+-
+-	    offset_expr = XEXP (PATTERN (offset_sub_insn), 1);
+-	    offset_value_expr = XEXP (XEXP (PATTERN (offset_sub_insn), 1), 1);
+-	  }
+-	else
+-	  {
+-	    gcc_assert (false
+-			&& "Wrong number of elements in the ldp_insn vector");
+-	  }
+-	break;
+-      }
+-    case LDP_TI:
+-      {
+-	offset_expr = XEXP (XEXP (insn_pat, 1), 0);
+-	if (GET_CODE (offset_expr) != PLUS && GET_CODE (offset_expr) != MINUS)
+-	  return 0;
+-	offset_value_expr = XEXP (XEXP (XEXP (insn_pat, 1), 0), 1);
+-	break;
+-      }
+-    case STR:
+-      {
+-	offset_expr = XEXP (XEXP (insn_pat, 0), 0);
+-	/* If memory location is specified by single base register then the
+-	   offset is zero.  */
+-	if (GET_CODE (offset_expr) == REG)
+-	  return 0;
+-	offset_value_expr = XEXP (XEXP (XEXP (insn_pat, 0), 0), 1);
+-	break;
+-      }
+-    default:
+-      {
+-	if (dumps_are_enabled && dump_file)
+-	  {
+-	    fprintf (dump_file, "Instruction that was tried to split:\n");
+-	    print_rtl_single (dump_file, insn);
+-	  }
+-	gcc_assert (false && "Unsupported instruction type");
+-	break;
+-      }
+-    }
+-
+-  if (offset_expr != NULL && offset_value_expr
+-      && GET_CODE (offset_value_expr) == CONST_INT)
+-    returned_offset = XINT (offset_value_expr, 0);
+-
+-  if (arith_operation_ptr != NULL)
+-    {
+-      *arith_operation_ptr = GET_CODE (offset_expr);
+-      gcc_assert ((*arith_operation_ptr == MINUS
+-		   || *arith_operation_ptr == PLUS)
+-		  && "Unexpected arithmetic operation in the offset expr");
++      default:
++	return false;
+     }
+-
+-  return returned_offset;
+ }
+ 
+ void
+-pass_split_complex_instructions::split_simple_ldp (rtx_insn *ldp_insn)
++pass_split_complex_instructions::replace_insn (rtx_insn *old_insn,
++					       rtx_insn *new_insns)
+ {
+-  rtx pat = PATTERN (ldp_insn);
+-
+-  rtx_insn *mem_insn_1 = make_insn_raw (copy_rtx (XVECEXP (pat, 0, 0)));
+-  rtx_insn *mem_insn_2 = make_insn_raw (copy_rtx (XVECEXP (pat, 0, 1)));
+-
+-  int dest_regno = REGNO (SET_DEST (PATTERN (mem_insn_1)));
+-  int src_regno;
+-
+-  rtx srs_reg_insn = XEXP (SET_SRC (PATTERN (mem_insn_1)), 0);
+-
+-  if (GET_CODE (srs_reg_insn) == REG)
+-    src_regno = REGNO (srs_reg_insn);
+-  else
+-    src_regno = REGNO (XEXP (srs_reg_insn, 0));
+-
+-  rtx_insn *emited_insn_1, *emited_insn_2;
++  rtx_insn *prev_insn = PREV_INSN (old_insn);
++  start_sequence ();
+ 
+-  /* in cases like ldp r1,r2,[r1] we emit ldr r2,[r1] first.  */
+-  if (src_regno == dest_regno)
+-    std::swap (mem_insn_1, mem_insn_2);
++  emit_insn (new_insns);
++  if (dump_file)
++    {
++      fprintf (dump_file, "Split LDP:\n");
++      print_rtl_single (dump_file, old_insn);
++      fprintf (dump_file, "Split into:\n");
++    }
+ 
+-  emited_insn_1 = emit_insn (PATTERN (mem_insn_1));
+-  emited_insn_2 = emit_insn (PATTERN (mem_insn_2));
++  for (rtx_insn *insn = new_insns; insn; insn = NEXT_INSN (insn))
++    {
++	INSN_CODE (insn) = recog (PATTERN (insn), insn, NULL);
++	if (dump_file)
++	  {
++	    print_rtl_single (dump_file, insn);
++	  }
++    }
+ 
+-  int sub_insn_1_code = recog (PATTERN (mem_insn_1), mem_insn_1, 0);
+-  int sub_insn_2_code = recog (PATTERN (mem_insn_2), mem_insn_2, 0);
++  rtx_insn *seq = get_insns ();
++  unshare_all_rtl_in_chain (seq);
++  end_sequence ();
+ 
+-  INSN_CODE (emited_insn_1) = sub_insn_1_code;
+-  INSN_CODE (emited_insn_2) = sub_insn_2_code;
++  emit_insn_after_setloc (seq, prev_insn, INSN_LOCATION (old_insn));
++  delete_insn_and_edges (old_insn);
+ }
+ 
+ void
+-pass_split_complex_instructions::split_ldp_with_offset (rtx_insn *ldp_insn)
++pass_split_complex_instructions::split_ldp (rtx_insn *ldp_insn)
+ {
+   rtx pat = PATTERN (ldp_insn);
+-  bool post_index = true;
+-
+-  rtx_insn offset_insn;
+-  rtx_insn mem_insn_1;
+-  rtx_insn mem_insn_2;
++  mem_access_insn_t insn_type = get_insn_type (ldp_insn);
++  gcc_assert (is_typeof_ldp (insn_type));
+ 
+-  int offset_insn_code;
+-  int mem_insn_1_code = -1;
+-  int mem_insn_2_code = -1;
++  rtx load_rtx_1 = NULL;
++  rtx load_rtx_2 = NULL;
++  rtx post_index_rtx = NULL;
+ 
+-  int offset = 0;
+-  int arith_operation = UNDEFINED;
+-
+-  for (int i = 0; i < 3; i++)
++  switch (insn_type)
+     {
+-      rtx sub_insn = XVECEXP (pat, 0, i);
+-      rtx_insn *copy_of_sub_insn = make_insn_raw (copy_rtx (sub_insn));
+-      int sub_insn_code
+-	  = recog (PATTERN (copy_of_sub_insn), copy_of_sub_insn, 0);
+-
+-      /* If sub_insn is offset related.  */
+-      if (GET_RTX_CLASS (sub_insn_code) == RTX_UNARY)
+-	{
+-	  offset_insn = *copy_of_sub_insn;
+-	  offset_insn_code = sub_insn_code;
+-	  gcc_assert (i == 0
+-		      && "Offset related insn must be the first "
+-			 "element of a parallel insn vector");
+-
+-	  offset = get_insn_offset (ldp_insn, LDP, &arith_operation);
+-	}
+-      else
+-	{
+-	  if (GET_CODE (XEXP (PATTERN (copy_of_sub_insn), 0)) != REG)
+-	    {
+-	      rtx &offset_expr
+-		  = XEXP (XEXP (XEXP (PATTERN (copy_of_sub_insn), 0), 0), 1);
+-	      if (GET_CODE (offset_expr) == CONST_INT)
+-		{
+-		  int local_offset = XINT (offset_expr, 0);
+-		  offset = (arith_operation == PLUS ? offset : -offset);
+-
+-		  offset_expr = GEN_INT (local_offset + offset);
+-
+-		  gcc_assert (
+-		      (arith_operation == MINUS || arith_operation == PLUS)
+-		      && "Unexpected arithmetic operation in offset related "
+-			 "sub_insn");
+-
+-		  if (i == 1)
+-		    post_index = false;
+-		}
+-	      else
+-		{
+-		  post_index = true;
+-		}
+-	    }
+-	}
+-      if (i == 1)
+-	{
+-	  mem_insn_1 = *copy_of_sub_insn;
+-	  mem_insn_1_code = sub_insn_code;
+-	}
+-      if (i == 2)
+-	{
+-	  mem_insn_2 = *copy_of_sub_insn;
+-	  mem_insn_2_code = sub_insn_code;
+-	}
++      case LDP:
++	load_rtx_1 = copy_rtx (XVECEXP (pat, 0, 0));
++	load_rtx_2 = copy_rtx (XVECEXP (pat, 0, 1));
++	break;
++      case LDP_WB:
++	post_index_rtx = copy_rtx (XVECEXP (pat, 0, 0));
++	load_rtx_1 = copy_rtx (XVECEXP (pat, 0, 1));
++	load_rtx_2 = copy_rtx (XVECEXP (pat, 0, 2));
++	break;
++      case LDP_TI:
++	split_ldp_ti (ldp_insn);
++	return;
++      default:
++	return;
+     }
+-  gcc_assert (mem_insn_1_code != -1 && mem_insn_2_code != -1
+-	      && "Uninitialized memory insns");
+ 
+-  int dest_regno = REGNO (SET_DEST (PATTERN (&mem_insn_1)));
+-  int src_regno;
+-
+-  rtx srs_reg_insn = XEXP (SET_SRC (PATTERN (&mem_insn_1)), 0);
+-
+-  if (GET_CODE (srs_reg_insn) == REG)
+-    src_regno = REGNO (srs_reg_insn);
+-  else
+-    src_regno = REGNO (XEXP (srs_reg_insn, 0));
++  int dest_regno = REGNO (SET_DEST (load_rtx_1));
++  int base_regno = REGNO (get_base_reg (get_memref (ldp_insn, insn_type)));
+ 
+-  /* Don't split such weird LDP.  */
+-  if (src_regno == dest_regno)
+-    return;
+-
+-  rtx_insn *emited_offset_insn;
+-  if (!post_index)
++  /* In cases like ldp r1,r2,[r1[, #imm]] emit ldr r2,[r1[, #imm]] first.
++     For LDP with post-index don't split such instruction.  */
++  if (base_regno == dest_regno)
+     {
+-      emited_offset_insn = emit_insn (PATTERN (&offset_insn));
+-      INSN_CODE (emited_offset_insn) = offset_insn_code;
++      if (insn_type == LDP)
++	std::swap (load_rtx_1, load_rtx_2);
++      else
++	return;
+     }
+ 
+-  rtx_insn *emited_insn_1 = emit_insn (PATTERN (&mem_insn_1));
+-  rtx_insn *emited_insn_2 = emit_insn (PATTERN (&mem_insn_2));
+-
+-
+-  INSN_CODE (emited_insn_1) = mem_insn_1_code;
+-  INSN_CODE (emited_insn_2) = mem_insn_2_code;
+-
+-  if (post_index)
++  /* Construct the instruction chain for subsequent emitting.  */
++  rtx_insn *insn_seq = make_insn_raw (load_rtx_1);
++  rtx_insn *load_insn_2 = make_insn_raw (load_rtx_2);
++  SET_NEXT_INSN (insn_seq) = load_insn_2;
++  SET_NEXT_INSN (load_insn_2) = NULL;
++  if (post_index_rtx)
+     {
+-      emited_offset_insn = emit_insn (PATTERN (&offset_insn));
+-      INSN_CODE (emited_offset_insn) = offset_insn_code;
++      rtx_insn *post_index_insn = make_insn_raw (post_index_rtx);
++      SET_NEXT_INSN (load_insn_2) = post_index_insn;
++      SET_NEXT_INSN (post_index_insn) = NULL;
+     }
+-}
+-
+-void
+-pass_split_complex_instructions::split_ldp_stp (rtx_insn *insn)
+-{
+-  rtx_insn *prev_insn = PREV_INSN (insn);
+-  int number_of_sub_insns = XVECLEN (PATTERN (insn), 0);
+-
+-  start_sequence ();
+ 
+-  if (number_of_sub_insns == 2)
+-    split_simple_ldp (insn);
+-  else if (number_of_sub_insns == 3)
+-    split_ldp_with_offset (insn);
+-  else
+-    gcc_assert (false && "Broken complex insn vector");
+-
+-  rtx_insn *seq = get_insns ();
+-  unshare_all_rtl_in_chain (seq);
+-  end_sequence ();
+-
+-  emit_insn_after_setloc (seq, prev_insn, INSN_LOCATION (insn));
+-  delete_insn_and_edges (insn);
++  replace_insn (ldp_insn, insn_seq);
+ }
+ 
+ void
+ pass_split_complex_instructions::split_ldp_ti (rtx_insn *insn)
+ {
+-  rtx_insn *prev_insn = PREV_INSN (insn);
+-  rtx_insn *load_insn_1 = make_insn_raw (copy_rtx (PATTERN (insn)));
+-  rtx_insn *load_insn_2 = make_insn_raw (copy_rtx (PATTERN (insn)));
+-
+-  rtx reg_insn_1 = XEXP (PATTERN (load_insn_1), 0);
+-  rtx mem_insn_1 = XEXP (PATTERN (load_insn_1), 1);
+-  rtx mem_insn_2 = XEXP (PATTERN (load_insn_2), 1);
+-
+-  PUT_MODE (mem_insn_1, DImode);
+-  PUT_MODE (mem_insn_2, DImode);
+-
+-  int reg_no_1 = REGNO (reg_insn_1);
++  rtx pat = PATTERN (insn);
++  rtx memref = get_memref (insn, LDP_TI);
++  int unit_size = get_unit_size (memref, LDP_TI);
++  rtx base_reg = get_base_reg (memref);
++  rtx dest_reg = SET_DEST (pat);
++
++  rtx reg_index_rtx = NULL;
++  rtx load_rtx_1 = NULL;
++  rtx load_rtx_2 = NULL;
++  bool post_index = false;
++  int offset = 0;
+ 
+-  XEXP (PATTERN (load_insn_1), 0) = gen_rtx_REG (DImode, reg_no_1);
+-  XEXP (PATTERN (load_insn_2), 0) = gen_rtx_REG (DImode, reg_no_1 + 1);
++  rtx load_1_memref = gen_rtx_MEM (DImode, base_reg);
+ 
+-  rtx load_insn_2_plus_expr = XEXP (XEXP (PATTERN (load_insn_2), 1), 0);
+-  if (GET_CODE (load_insn_2_plus_expr) == REG)
++  rtx addr_expr = XEXP (memref, 0);
++  if (GET_CODE (addr_expr) == PLUS)
+     {
+-	XEXP (XEXP (PATTERN (load_insn_2), 1), 0)
+-	  = gen_rtx_PLUS (DImode,
+-			  gen_rtx_REG (DImode, REGNO (load_insn_2_plus_expr)),
+-			  GEN_INT (GET_MODE_SIZE (DImode)));
++      offset = INTVAL (XEXP (addr_expr, 1));
++      XEXP (load_1_memref, 0) = gen_rtx_PLUS (DImode, base_reg,
++					      GEN_INT (offset));
+     }
+-  else
+-    {
+-      rtx load_insn_2_offset_expr
+-      = XEXP (XEXP (XEXP (PATTERN (load_insn_2), 1), 0), 1);
+ 
+-      if (load_insn_2_offset_expr == NULL)
+-	return;
+-
+-      if (GET_CODE (load_insn_2_offset_expr) == CONST_INT)
+-	{
+-	  int load_insn_2_offset = XINT (load_insn_2_offset_expr, 0);
+-	  XEXP (XEXP (XEXP (PATTERN (load_insn_2), 1), 0), 1)
+-	    = GEN_INT (load_insn_2_offset + GET_MODE_SIZE (DImode));
+-	}
+-    }
+-
+-  start_sequence ();
++  rtx load_2_memref = gen_rtx_MEM (DImode,
++    gen_rtx_PLUS (DImode, base_reg, GEN_INT (offset + unit_size)));
+ 
+-  int src_regno;
+-  rtx srs_reg_insn = XEXP (XEXP (PATTERN (load_insn_1), 1), 0);
++  load_rtx_1 = gen_rtx_SET (gen_rtx_REG (DImode, REGNO (dest_reg)),
++			    load_1_memref);
++  load_rtx_2 = gen_rtx_SET (gen_rtx_REG (DImode, REGNO (dest_reg) + 1),
++			    load_2_memref);
+ 
+-  if (GET_CODE (srs_reg_insn) == REG)
+-    src_regno = REGNO (srs_reg_insn);
+-  else
+-    src_regno = REGNO (XEXP (srs_reg_insn, 0));
++  if (GET_CODE (addr_expr) == PRE_INC || GET_CODE (addr_expr) == PRE_DEC
++      || GET_CODE (addr_expr) == POST_INC || GET_CODE (addr_expr) == POST_DEC)
++    {
++      /* The amount of increment or decrement is equal to size of
++	 machine-mode of the containing MEMREF (see rtl.def).  */
++      int index_offset = GET_MODE_SIZE (GET_MODE (memref)).to_constant ();
+ 
+-  /* in cases like ldp r1,r2,[r1] we emit ldr r2,[r1] first.  */
+-  if (src_regno == reg_no_1)
+-    std::swap (load_insn_1, load_insn_2);
++      if (GET_CODE (addr_expr) == PRE_DEC || GET_CODE (addr_expr) == POST_DEC)
++	index_offset = -index_offset;
+ 
+-  rtx_insn *emited_load_insn_1 = emit_insn (PATTERN (load_insn_1));
+-  rtx_insn *emited_load_insn_2 = emit_insn (PATTERN (load_insn_2));
++      if (GET_CODE (addr_expr) == POST_INC || GET_CODE (addr_expr) == POST_DEC)
++	post_index = true;
+ 
+-  INSN_CODE (emited_load_insn_1)
+-      = recog (PATTERN (emited_load_insn_1), emited_load_insn_1, 0);
+-  INSN_CODE (emited_load_insn_2)
+-      = recog (PATTERN (emited_load_insn_2), emited_load_insn_2, 0);
++      reg_index_rtx = gen_rtx_SET (base_reg,
++				   gen_rtx_PLUS (DImode, base_reg,
++						 GEN_INT (index_offset)));
++    }
+ 
+-  rtx_insn *seq = get_insns ();
+-  unshare_all_rtl_in_chain (seq);
+-  end_sequence ();
++  /* In cases like ldp r1,r2,[r1] we emit ldr r2,[r1] first.  */
++  if (REGNO (base_reg) == REGNO (dest_reg))
++    std::swap (load_rtx_1, load_rtx_2);
+ 
+-  emit_insn_after_setloc (seq, prev_insn, INSN_LOCATION (insn));
+-  delete_insn_and_edges (insn);
+-}
++  /* Construct the instruction chain for subsequent emitting.  */
++  rtx_insn *insn_seq = make_insn_raw (load_rtx_1);
++  rtx_insn *load_insn_2 = make_insn_raw (load_rtx_2);
++  SET_NEXT_INSN (insn_seq) = load_insn_2;
++  SET_NEXT_INSN (load_insn_2) = NULL;
++  if (post_index && reg_index_rtx)
++    {
++      rtx_insn *post_index_insn = make_insn_raw (reg_index_rtx);
++      SET_NEXT_INSN (load_insn_2) = post_index_insn;
++      SET_NEXT_INSN (post_index_insn) = NULL;
++    }
++  else if (!post_index && reg_index_rtx)
++    {
++      rtx_insn *pre_index = make_insn_raw (reg_index_rtx);
++      SET_NEXT_INSN (pre_index) = insn_seq;
++      insn_seq = pre_index;
++    }
+ 
+-void
+-pass_split_complex_instructions::split_complex_insn (rtx_insn *insn)
+-{
+-  complex_instructions_t insn_type = get_insn_type (insn);
+-  /* TODO: Add splitting of STP instructions.  */
+-  if (insn_type == LDP || insn_type == STP)
+-    split_ldp_stp (insn);
+-  else if (insn_type == LDP_TI)
+-    split_ldp_ti (insn);
+-  else
+-    gcc_assert (false && "Unsupported type of insn to split");
++  replace_insn (insn, insn_seq);
+ }
+ 
+-pass_split_complex_instructions::complex_instructions_t
++pass_split_complex_instructions::mem_access_insn_t
+ pass_split_complex_instructions::get_insn_type (rtx_insn *insn)
+ {
+   if (!INSN_P (insn))
+     return UNDEFINED;
+ 
+-  rtx pat = PATTERN (insn);
+-  int icode = recog (PATTERN (insn), insn, NULL);
++  int icode = INSN_CODE (insn);
++  if (icode == -1)
++    icode = recog (PATTERN (insn), insn, 0);
++  bool has_wb = false;
++
++  if (targetm.is_ldp_insn (icode, &has_wb))
++    return (has_wb ? LDP_WB : LDP);
+ 
+-  if (GET_CODE (pat) == PARALLEL)
++  if (targetm.is_stp_insn (icode, &has_wb))
++    return (has_wb ? STP_WB : STP);
++
++  rtx set_insn = single_set (insn);
++  if (set_insn && (GET_MODE (SET_SRC (set_insn)) == E_TImode
++      || GET_MODE (SET_DEST (set_insn)) == E_TImode))
+     {
+-      if (targetm.is_ldp_insn (icode))
+-	{
+-	  return LDP;
+-	}
+-      if (targetm.is_stp_insn (icode))
+-	{
+-	  return STP;
+-	}
+-      else
+-	{
+-	  return UNDEFINED;
+-	}
++      if (MEM_P (SET_SRC (set_insn)) && REG_P (SET_DEST (set_insn)))
++	return LDP_TI;
++      if (MEM_P (SET_DEST (set_insn)) && REG_P (SET_SRC (set_insn)))
++	return STP_TI;
+     }
+-  rtx set_insn = single_set (insn);
+-  if (set_insn && GET_CODE (XEXP (set_insn, 1)) == MEM
+-      && GET_MODE (XEXP (set_insn, 1)) == E_TImode)
+-    return LDP_TI;
++
++  if (set_insn && MEM_P (SET_DEST (set_insn)) && REG_P (SET_SRC (set_insn))
++      && GET_MODE (SET_DEST (set_insn)) != BLKmode)
++    return STR;
+ 
+   return UNDEFINED;
+ }
+diff --git a/gcc/target.def b/gcc/target.def
+index a3a50b474..8797a21d5 100644
+--- a/gcc/target.def
++++ b/gcc/target.def
+@@ -2679,13 +2679,19 @@ modes and they have different conditional execution capability, such as ARM.",
+ 
+ DEFHOOK
+ (is_ldp_insn,
+-  "Return true if icode is corresponding to any of the LDP instruction types.",
+-  bool, (int icode), NULL)
++ "Return true if @var{icode} is corresponding to any of the LDP instruction\n\
++types.  If @var{has_wb} is not NULL then its value is set to true if LDP\n\
++contains post-index or pre-index operation.",
++  bool, (int icode, bool *has_wb),
++  NULL)
+ 
+ DEFHOOK
+ (is_stp_insn,
+-  "Return true if icode is corresponding to any of the STP instruction types.",
+-  bool, (int icode), NULL)
++ "Return true if @var{icode} is corresponding to any of the STP instruction\n\
++types.  If @var{has_wb} is not NULL then its value is set to true if STP\n\
++contains post-index or pre-index operation.",
++  bool, (int icode, bool *has_wb),
++  NULL)
+ 
+ DEFHOOK
+ (gen_ccmp_first,
+diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c
+index 3918d43f6..2d42231dc 100644
+--- a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c
++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-dont-split.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target aarch64-*-* } } */
+-/* { dg-additional-options "-fsplit-ldp-stp" } */
++/* { dg-additional-options "-O1 -fsplit-ldp-stp" } */
+ /*
+  *    Tests are:
+  *          Patterns where LDP insns should NOT be split
+@@ -15,6 +15,9 @@ simple_ldp_after_store ()
+       (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
+       (cinsn 228 (set (reg/i:DI sp) 
+                    (reg/i:DI x0)))
++      (cinsn 238 (set (reg/i:DI x1)
++                   (reg/i:DI x0)))
++
+       (cinsn 101 (set (mem/c:DI
+                         (plus:DI (reg/f:DI sp)
+                           (const_int 32))[1 S4 A32])(reg:DI x0)))
+@@ -24,11 +27,27 @@ simple_ldp_after_store ()
+         (set (reg:DI x30)
+           (mem:DI (plus:DI (reg/f:DI sp)
+             (const_int 16)) [1 S4 A32]))]))
+-      (cinsn 11 (use (reg/i:DI sp)))
+-      (cinsn 12 (use (reg/i:DI cc)))
+-      (cinsn 13 (use (reg/i:DI x29)))
+-      (cinsn 14 (use (reg/i:DI x30)))
+-      (cinsn 15 (use (reg/i:DI x0)))
++      (cinsn 11 (use (reg/i:DI x29)))
++      (cinsn 12 (use (reg/i:DI x30)))
++
++      /* stp x0, x2, [x1].  */
++      (cinsn 102 (parallel [
++        (set (mem:DI (reg/f:DI x1) [1 S4 A32])
++             (reg:DI x0))
++        (set (mem:DI (plus:DI (reg/f:DI x1) (const_int 8)) [1 S4 A32])
++             (reg:DI x2))]))
++      /* ldp x5, x6, [x1].  */
++      (cinsn 13 (parallel [
++        (set (reg:DI x5) (mem:DI (reg/f:DI x1) [1 S4 A32]))
++        (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1)
++                                          (const_int 8)) [1 S4 A32]))
++      ]))
++      (cinsn 14 (use (reg/i:DI x5)))
++      (cinsn 15 (use (reg/i:DI x6)))
++
++      (cinsn 100 (use (reg/i:DI sp)))
++      (cinsn 200 (use (reg/i:DI cc)))
++      (cinsn 300 (use (reg/i:DI x0)))
+       (edge-to exit (flags "FALLTHRU"))
+     ) ;; block 2
+   ) ;; insn-chain
+@@ -70,5 +89,5 @@ ldp_after_store_in_different_bb ()
+ ) ;; function "ldp_after_store_in_different_bb"
+ }
+ 
+-/* Verify that the output code contains exactly 2 ldp.  */
+-/* { dg-final { scan-assembler-times {ldp\t} 2 } }  */
+\ No newline at end of file
++/* Verify that the output code contains exactly 3 ldp.  */
++/* { dg-final { scan-assembler-times {ldp\t} 3 } }  */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c
+index 653c30f83..59ff82df9 100644
+--- a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c
++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split-rearrange.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target aarch64-*-* } } */
+-/* { dg-additional-options "-fsplit-ldp-stp" } */
++/* { dg-additional-options "-O1 -fsplit-ldp-stp" } */
+ /*
+  *    Test is:
+  *        Pattern where LDP insns should be split with rearrangement in order
+diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c
+index dc9f26efb..e25762160 100644
+--- a/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c
++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/test-ldp-split.c
+@@ -13,48 +13,131 @@ simple_ldp_after_store ()
+     (block 2
+       (edge-from entry (flags "FALLTHRU"))
+       (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++      /* mov sp, x0.  */
+       (cinsn 228 (set (reg/i:DI sp)
+-                   (reg/i:DI x0)))
++                      (reg/i:DI x0)))
++      /* mov x1, x0.  */
+       (cinsn 238 (set (reg/i:DI x1)
+-                   (reg/i:DI x0)))
++                      (reg/i:DI x0)))
+ 
++      /* str x0, [sp, 8].  */
+       (cinsn 101 (set (mem/c:DI
+                         (plus:DI (reg/f:DI sp)
+                           (const_int 8))[1 S4 A32])(reg:DI x0)))
++      /* ldp x29, x30, [sp, 8].  */
+       (cinsn 10 (parallel [
+         (set (reg:DI x29)
+           (mem:DI (plus:DI (reg/f:DI sp) (const_int 8)) [1 S4 A32]))
+         (set (reg:DI x30)
+           (mem:DI (plus:DI (reg/f:DI sp)
+             (const_int 16)) [1 S4 A32]))]))
++      (cinsn 11 (use (reg/i:DI x29)))
++      (cinsn 12 (use (reg/i:DI x30)))
+ 
++      /* str x0, [x1, -16].  */
+       (cinsn 102 (set (mem/c:DI (plus:DI (reg/f:DI x1)
+                                           (const_int -16)) [1 S4 A32])
+                       (reg:DI x0)))
+-      (cinsn 11 (parallel [
++      /* ldp x3, x4, [x1, -16].  */
++      (cinsn 13 (parallel [
+         (set (reg:DI x3)
+           (mem:DI (plus:DI (reg/f:DI x1) (const_int -16)) [1 S4 A32]))
+         (set (reg:DI x4)
+           (mem:DI (plus:DI (reg/f:DI x1) (const_int -8)) [1 S4 A32]))
+       ]))
++      (cinsn 14 (use (reg/i:DI x3)))
++      (cinsn 15 (use (reg/i:DI x4)))
+ 
++      /* str x0, [x1].  */
+       (cinsn 103 (set (mem/c:DI (reg/f:DI x1) [1 S4 A32])
+                       (reg:DI x0)))
+-      (cinsn 12 (parallel [
++      /* ldp x5, x6, [x1].  */
++      (cinsn 16 (parallel [
+         (set (reg:DI x5) (mem:DI (reg/f:DI x1) [1 S4 A32]))
+         (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1)
+                                           (const_int 8)) [1 S4 A32]))
+       ]))
++      (cinsn 17 (use (reg/i:DI x5)))
++      (cinsn 18 (use (reg/i:DI x6)))
+ 
+-      (cinsn 13 (use (reg/i:DI sp)))
+-      (cinsn 14 (use (reg/i:DI cc)))
+-      (cinsn 15 (use (reg/i:DI x29)))
+-      (cinsn 16 (use (reg/i:DI x30)))
+-      (cinsn 17 (use (reg/i:DI x0)))
+-      (cinsn 18 (use (reg/i:DI x3)))
+-      (cinsn 19 (use (reg/i:DI x4)))
+-      (cinsn 20 (use (reg/i:DI x5)))
+-      (cinsn 21 (use (reg/i:DI x6)))
++      /* ldp x29, x30, [sp], 96.  */
++      (cinsn 19 (parallel [
++        (set (reg/f:DI sp)
++          (plus:DI (reg/f:DI sp) (const_int 96)))
++        (set (reg:DI x29)
++          (mem:DI (reg/f:DI sp) [1 S4 A32]))
++        (set (reg:DI x30)
++          (mem:DI (plus:DI (reg/f:DI sp)
++            (const_int 8)) [1 S4 A32]))]))
++      (cinsn 20 (use (reg/i:DI x29)))
++      (cinsn 21 (use (reg/i:DI x30)))
++
++      /* stp x0, x2, [x1, 128].  */
++      (cinsn 104 (parallel [
++        (set (mem:DI (plus:DI (reg/f:DI x1) (const_int 128)) [1 S4 A32])
++             (reg:DI x0))
++        (set (mem:DI (plus:DI (reg/f:DI x1) (const_int 136)) [1 S4 A32])
++             (reg:DI x2))]))
++      /* ldp x29, x30, [x1, 120].  */
++      (cinsn 22 (parallel [
++        (set (reg:DI x29)
++          (mem:DI (plus:DI (reg/f:DI x1) (const_int 120)) [1 S4 A32]))
++        (set (reg:DI x30)
++          (mem:DI (plus:DI (reg/f:DI x1) (const_int 128)) [1 S4 A32]))]))
++      (cinsn 23 (use (reg/i:DI x29)))
++      (cinsn 24 (use (reg/i:DI x30)))
++
++      /* stp x0, x2, [x1, 128].  */
++      (cinsn 105 (parallel [
++        (set (mem:DI (plus:DI (reg/f:DI x1) (const_int 128)) [1 S4 A32])
++             (reg:DI x0))
++        (set (mem:DI (plus:DI (reg/f:DI x1) (const_int 136)) [1 S4 A32])
++             (reg:DI x2))]))
++      /* ldp x3, x4, [x1, 136].  */
++      (cinsn 25 (parallel [
++        (set (reg:DI x3)
++          (mem:DI (plus:DI (reg/f:DI x1) (const_int 136)) [1 S4 A32]))
++        (set (reg:DI x4)
++          (mem:DI (plus:DI (reg/f:DI x1) (const_int 144)) [1 S4 A32]))
++      ]))
++      (cinsn 26 (use (reg/i:DI x3)))
++      (cinsn 27 (use (reg/i:DI x4)))
++
++      /* stp w0, w2, [x1, 32].  */
++      (cinsn 106 (parallel [
++        (set (mem:SI (plus:DI (reg/f:DI x1) (const_int 32)) [1 S4 A32])
++             (reg:SI x0))
++        (set (mem:SI (plus:DI (reg/f:DI x1) (const_int 36)) [1 S4 A32])
++             (reg:SI x2))]))
++      /* ldp x5, x6, [x1, 32].  */
++      (cinsn 28 (parallel [
++        (set (reg:DI x5) (mem:DI (plus:DI (reg/f:DI x1)
++                                          (const_int 32)) [1 S4 A32]))
++        (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1)
++                                          (const_int 40)) [1 S4 A32]))
++      ]))
++      (cinsn 29 (use (reg/i:DI x5)))
++      (cinsn 30 (use (reg/i:DI x6)))
++
++      /* stp w0, w2, [x1, 40].  */
++      (cinsn 107 (parallel [
++        (set (mem:SI (plus:DI (reg/f:DI x1) (const_int 40)) [1 S4 A32])
++             (reg:SI x0))
++        (set (mem:SI (plus:DI (reg/f:DI x1) (const_int 44)) [1 S4 A32])
++             (reg:SI x2))]))
++      /* ldp x5, x6, [x1, 32].  */
++      (cinsn 31 (parallel [
++        (set (reg:DI x5) (mem:DI (plus:DI (reg/f:DI x1)
++                                          (const_int 32)) [1 S4 A32]))
++        (set (reg:DI x6) (mem:DI (plus:DI (reg/f:DI x1)
++                                          (const_int 40)) [1 S4 A32]))
++      ]))
++      (cinsn 32 (use (reg/i:DI x5)))
++      (cinsn 33 (use (reg/i:DI x6)))
++
++      (cinsn 100 (use (reg/i:DI sp)))
++      (cinsn 200 (use (reg/i:DI cc)))
++      (cinsn 400 (use (reg/i:DI x0)))
+       (edge-to exit (flags "FALLTHRU"))
+     ) ;; block 2
+   ) ;; insn-chain
+@@ -69,43 +152,83 @@ ldp_ti_after_store ()
+       (block 2
+       (edge-from entry (flags "FALLTHRU"))
+       (cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++      /* mov sp, x0.  */
+       (cinsn 228 (set (reg/i:DI sp)
+-                   (reg/i:DI x0)))
++                      (reg/i:DI x0)))
++      /* mov x2, x0.  */
+       (cinsn 238 (set (reg/i:DI x2)
+-                   (reg/i:DI x0)))
+-
++                      (reg/i:DI x0)))
++      /* str x0, [sp, 136].  */
+       (cinsn 101 (set (mem/c:DI
+                         (plus:DI (reg/f:DI sp)
+                           (const_int 136))[1 S4 A32])(reg:DI x0)))
+-      (insn 81 (set (reg:TI x0 [1 S4 A32])
++      /* ldp x0, x1, [sp, 136].  */
++      (cinsn 81 (set (reg:TI x0 [1 S4 A32])
+               (mem/c:TI (plus:DI (reg/f:DI sp)
+-                      (const_int 136 )) [1 S4 A32]))
+-           (expr_list:REG_EQUIV (mem/c:TI (plus:DI (reg/f:DI sfp)
+-                      (const_int -24 )) [1 S4 A32])
+-              (nil)))
+-
++                      (const_int 136)) [1 S4 A32])))
++      /* str x0, [x2, -16].  */
+       (cinsn 102 (set (mem/c:DI (plus:DI (reg/f:DI x2)
+-                                          (const_int -16)) [1 S4 A32])
++                                         (const_int -16)) [1 S4 A32])
+                       (reg:DI x0)))
+-      (insn 82 (set (reg:TI x3 [1 S4 A32])
++      /* ldp x3, x4, [x2, -16].  */
++      (cinsn 82 (set (reg:TI x3 [1 S4 A32])
+                     (mem/c:TI (plus:DI (reg/f:DI x2)
+-                                        (const_int -16)) [1 S4 A32])))
+-
++                                       (const_int -16)) [1 S4 A32])))
++      /* str x0, [x2].  */
+       (cinsn 103 (set (mem/c:DI (reg/f:DI x2) [1 S4 A32])
+                       (reg:DI x0)))
+-      (insn 83 (set (reg:TI x5 [1 S4 A32])
++      /* ldp x5, x6, [x2].  */
++      (cinsn 83 (set (reg:TI x5 [1 S4 A32])
+                     (mem/c:TI (reg/f:DI x2) [1 S4 A32])))
+ 
++      /* stp x0, x1, [sp, -8].  */
++      (cinsn 104 (set (mem:TI (plus:DI (reg/v/f:DI sp)
++                                       (const_int -8)) [1 S4 A32])
++                      (reg:TI x0)))
++      /* ldp x5, x6, [sp], -16.  */
++      (cinsn 84 (set (reg/v:TI x5 [1 S4 A32])
++                    (mem:TI (post_dec:DI (reg/v/f:DI sp)) [1 S4 A32])))
++      (cinsn 85 (use (reg/i:DI x5)))
++      (cinsn 86 (use (reg/i:DI x6)))
++
++      /* stp x0, x1, [sp, 8].  */
++      (cinsn 105 (set (mem:TI (plus:DI (reg/v/f:DI sp)
++                                       (const_int 8)) [1 S4 A32])
++                      (reg:TI x0)))
++      /* ldp x5, x6, [sp], -16.  */
++      (cinsn 87 (set (reg/v:TI x5 [1 S4 A32])
++                    (mem:TI (post_dec:DI (reg/v/f:DI sp)) [1 S4 A32])))
++      (cinsn 88 (use (reg/i:DI x5)))
++      (cinsn 89 (use (reg/i:DI x6)))
++
++      /* Intersects with insn 102.  */
++      /* ldp x2, x3, [x2, -16]!.  */
++      (cinsn 90 (set (reg/v:TI x2 [1 S4 A32])
++                    (mem:TI (pre_dec:DI (reg/v/f:DI x2)) [1 S4 A32])))
++      (cinsn 91 (use (reg/i:DI x2)))
++      (cinsn 92 (use (reg/i:DI x3)))
++
++      /* mov x2, x0.  */
++      (cinsn 248 (set (reg/i:DI x2)
++                      (reg/i:DI x0)))
++      /* str x0, [x2, 16].  */
++      (cinsn 106 (set (mem:DI (plus:DI (reg/v/f:DI x2)
++                                       (const_int 16)) [1 S4 A32])
++                      (reg:DI x0)))
++      /* ldp x3, x4, [x2, 16]!.  */
++      (cinsn 93 (set (reg/v:TI x3 [1 S4 A32])
++                    (mem:TI (pre_inc:DI (reg/v/f:DI x2)) [1 S4 A32])))
++      (cinsn 94 (use (reg/i:DI x3)))
++      (cinsn 95 (use (reg/i:DI x4)))
++
+       (cinsn 11 (use (reg/i:DI sp)))
+       (cinsn 12 (use (reg/i:DI cc)))
+       (cinsn 13 (use (reg/i:DI x29)))
+       (cinsn 14 (use (reg/i:DI x30)))
+       (cinsn 15 (use (reg/i:DI x0)))
+       (cinsn 16 (use (reg/i:DI x3)))
+-      (cinsn 17 (use (reg/i:DI x5)))
+       (cinsn 18 (use (reg/i:DI x1)))
+       (cinsn 19 (use (reg/i:DI x4)))
+-      (cinsn 20 (use (reg/i:DI x6)))
+       (edge-to exit (flags "FALLTHRU"))
+     ) ;; block 2
+   ) ;; insn-chain
+-- 
+2.33.0
+
diff --git a/0043-LoongArch-Optimize-LSX-vector-shuffle-on-floating-po.patch b/0043-LoongArch-Optimize-LSX-vector-shuffle-on-floating-po.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0a24290b1e6ac9d322bb2e0bfb88dc4d10e55210
--- /dev/null
+++ b/0043-LoongArch-Optimize-LSX-vector-shuffle-on-floating-po.patch
@@ -0,0 +1,148 @@
+From cdea7c114fa48012705d65134276619b5679fa35 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 19 Nov 2023 06:12:22 +0800
+Subject: [PATCH 043/188] LoongArch: Optimize LSX vector shuffle on
+ floating-point vector
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+The vec_perm expander was wrongly defined.  GCC internal says:
+
+Operand 3 is the “selector”.  It is an integral mode vector of the same
+width and number of elements as mode M.
+
+But we made operand 3 in the same mode as the shuffled vectors, so it
+would be a FP mode vector if the shuffled vectors are FP mode.
+
+With this mistake, the generic code manages to work around and it ends
+up creating some very nasty code for a simple __builtin_shuffle (a, b,
+c) where a and b are V4SF, c is V4SI:
+
+    la.local    $r12,.LANCHOR0
+    la.local    $r13,.LANCHOR1
+    vld $vr1,$r12,48
+    vslli.w $vr1,$vr1,2
+    vld $vr2,$r12,16
+    vld $vr0,$r13,0
+    vld $vr3,$r13,16
+    vshuf.b $vr0,$vr1,$vr1,$vr0
+    vld $vr1,$r12,32
+    vadd.b  $vr0,$vr0,$vr3
+    vandi.b $vr0,$vr0,31
+    vshuf.b $vr0,$vr1,$vr2,$vr0
+    vst $vr0,$r12,0
+    jr  $r1
+
+This is obviously stupid.  Fix the expander definition and adjust
+loongarch_expand_vec_perm to handle it correctly.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lsx.md (vec_perm): Make the
+	selector VIMODE.
+	* config/loongarch/loongarch.cc (loongarch_expand_vec_perm):
+	Use the mode of the selector (instead of the shuffled vector)
+	for truncating it.  Operate on subregs in the selector mode if
+	the shuffled vector has a different mode (i. e. it's a
+	floating-point vector).
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vect-shuf-fp.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc              | 18 ++++++++++--------
+ gcc/config/loongarch/lsx.md                    |  2 +-
+ .../gcc.target/loongarch/vect-shuf-fp.c        | 16 ++++++++++++++++
+ 3 files changed, 27 insertions(+), 9 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-shuf-fp.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 33d23a731..d95ac68e8 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -8603,8 +8603,9 @@ void
+ loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
+ {
+   machine_mode vmode = GET_MODE (target);
++  machine_mode vimode = GET_MODE (sel);
+   auto nelt = GET_MODE_NUNITS (vmode);
+-  auto round_reg = gen_reg_rtx (vmode);
++  auto round_reg = gen_reg_rtx (vimode);
+   rtx round_data[MAX_VECT_LEN];
+ 
+   for (int i = 0; i < nelt; i += 1)
+@@ -8612,9 +8613,16 @@ loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
+       round_data[i] = GEN_INT (0x1f);
+     }
+ 
+-  rtx round_data_rtx = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, round_data));
++  rtx round_data_rtx = gen_rtx_CONST_VECTOR (vimode, gen_rtvec_v (nelt, round_data));
+   emit_move_insn (round_reg, round_data_rtx);
+ 
++  if (vmode != vimode)
++    {
++      target = lowpart_subreg (vimode, target, vmode);
++      op0 = lowpart_subreg (vimode, op0, vmode);
++      op1 = lowpart_subreg (vimode, op1, vmode);
++    }
++
+   switch (vmode)
+     {
+     case E_V16QImode:
+@@ -8622,17 +8630,11 @@ loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
+       emit_insn (gen_lsx_vshuf_b (target, op1, op0, sel));
+       break;
+     case E_V2DFmode:
+-      emit_insn (gen_andv2di3 (sel, sel, round_reg));
+-      emit_insn (gen_lsx_vshuf_d_f (target, sel, op1, op0));
+-      break;
+     case E_V2DImode:
+       emit_insn (gen_andv2di3 (sel, sel, round_reg));
+       emit_insn (gen_lsx_vshuf_d (target, sel, op1, op0));
+       break;
+     case E_V4SFmode:
+-      emit_insn (gen_andv4si3 (sel, sel, round_reg));
+-      emit_insn (gen_lsx_vshuf_w_f (target, sel, op1, op0));
+-      break;
+     case E_V4SImode:
+       emit_insn (gen_andv4si3 (sel, sel, round_reg));
+       emit_insn (gen_lsx_vshuf_w (target, sel, op1, op0));
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 8ea41c85b..5e8d8d74b 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -837,7 +837,7 @@
+  [(match_operand:LSX 0 "register_operand")
+   (match_operand:LSX 1 "register_operand")
+   (match_operand:LSX 2 "register_operand")
+-  (match_operand:LSX 3 "register_operand")]
++  (match_operand: 3 "register_operand")]
+   "ISA_HAS_LSX"
+ {
+   loongarch_expand_vec_perm (operands[0], operands[1],
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-shuf-fp.c b/gcc/testsuite/gcc.target/loongarch/vect-shuf-fp.c
+new file mode 100644
+index 000000000..7acc2113a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vect-shuf-fp.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-mlasx -O3" } */
++/* { dg-final { scan-assembler "vshuf\.w" } } */
++
++#define V __attribute__ ((vector_size (16)))
++
++int a V;
++float b V;
++float c V;
++float d V;
++
++void
++test (void)
++{
++  d = __builtin_shuffle (b, c, a);
++}
+-- 
+2.43.0
+
diff --git a/0044-LoongArch-Optimize-the-loading-of-immediate-numbers-.patch b/0044-LoongArch-Optimize-the-loading-of-immediate-numbers-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9721a38d1250b21b9e6db9f32fe58a57af1cc5b8
--- /dev/null
+++ b/0044-LoongArch-Optimize-the-loading-of-immediate-numbers-.patch
@@ -0,0 +1,112 @@
+From aaf58efe8414a4eaceb6721d9c242df710d1762c Mon Sep 17 00:00:00 2001
+From: Guo Jie 
+Date: Thu, 23 Nov 2023 11:04:17 +0800
+Subject: [PATCH 044/188] LoongArch: Optimize the loading of immediate numbers
+ with the same high and low 32-bit values
+
+For the following immediate load operation in gcc/testsuite/gcc.target/loongarch/imm-load1.c:
+
+	long long r = 0x0101010101010101;
+
+Before this patch:
+
+	lu12i.w	    $r15,16842752>>12
+	ori	    $r15,$r15,257
+	lu32i.d	    $r15,0x1010100000000>>32
+	lu52i.d	    $r15,$r15,0x100000000000000>>52
+
+After this patch:
+
+	lu12i.w     $r15,16842752>>12
+	ori         $r15,$r15,257
+	bstrins.d   $r15,$r15,63,32
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc
+	(enum loongarch_load_imm_method): Add new method.
+	(loongarch_build_integer): Add relevant implementations for
+	new method.
+	(loongarch_move_integer): Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/imm-load1.c: Change old check.
+---
+ gcc/config/loongarch/loongarch.cc             | 22 ++++++++++++++++++-
+ .../gcc.target/loongarch/imm-load1.c          |  3 ++-
+ 2 files changed, 23 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index d95ac68e8..048d3802b 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -142,12 +142,16 @@ struct loongarch_address_info
+ 
+    METHOD_LU52I:
+      Load 52-63 bit of the immediate number.
++
++   METHOD_MIRROR:
++     Copy 0-31 bit of the immediate number to 32-63bit.
+ */
+ enum loongarch_load_imm_method
+ {
+   METHOD_NORMAL,
+   METHOD_LU32I,
+-  METHOD_LU52I
++  METHOD_LU52I,
++  METHOD_MIRROR
+ };
+ 
+ struct loongarch_integer_op
+@@ -1553,11 +1557,23 @@ loongarch_build_integer (struct loongarch_integer_op *codes,
+ 
+       int sign31 = (value & (HOST_WIDE_INT_1U << 31)) >> 31;
+       int sign51 = (value & (HOST_WIDE_INT_1U << 51)) >> 51;
++
++      uint32_t hival = (uint32_t) (value >> 32);
++      uint32_t loval = (uint32_t) value;
++
+       /* Determine whether the upper 32 bits are sign-extended from the lower
+ 	 32 bits. If it is, the instructions to load the high order can be
+ 	 ommitted.  */
+       if (lu32i[sign31] && lu52i[sign31])
+ 	return cost;
++      /* If the lower 32 bits are the same as the upper 32 bits, just copy
++	 the lower 32 bits to the upper 32 bits.  */
++      else if (loval == hival)
++	{
++	  codes[cost].method = METHOD_MIRROR;
++	  codes[cost].curr_value = value;
++	  return cost + 1;
++	}
+       /* Determine whether bits 32-51 are sign-extended from the lower 32
+ 	 bits. If so, directly load 52-63 bits.  */
+       else if (lu32i[sign31])
+@@ -3230,6 +3246,10 @@ loongarch_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
+ 			   gen_rtx_AND (DImode, x, GEN_INT (0xfffffffffffff)),
+ 			   GEN_INT (codes[i].value));
+ 	  break;
++	case METHOD_MIRROR:
++	  gcc_assert (mode == DImode);
++	  emit_insn (gen_insvdi (x, GEN_INT (32), GEN_INT (32), x));
++	  break;
+ 	default:
+ 	  gcc_unreachable ();
+ 	}
+diff --git a/gcc/testsuite/gcc.target/loongarch/imm-load1.c b/gcc/testsuite/gcc.target/loongarch/imm-load1.c
+index 2ff029712..f64cc2956 100644
+--- a/gcc/testsuite/gcc.target/loongarch/imm-load1.c
++++ b/gcc/testsuite/gcc.target/loongarch/imm-load1.c
+@@ -1,6 +1,7 @@
+ /* { dg-do compile } */
+ /* { dg-options "-mabi=lp64d -O2" } */
+-/* { dg-final { scan-assembler "test:.*lu52i\.d.*\n\taddi\.w.*\n\.L2:" } } */
++/* { dg-final { scan-assembler-not "test:.*lu52i\.d.*\n\taddi\.w.*\n\.L2:" } } */
++/* { dg-final { scan-assembler "test:.*lu12i\.w.*\n\tbstrins\.d.*\n\.L2:" } } */
+ 
+ 
+ extern long long b[10];
+-- 
+2.43.0
+
diff --git a/0044-Port-maxmin-patch-to-GCC-12.patch b/0044-Port-maxmin-patch-to-GCC-12.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2423c12ca70fdc1daa4d4d6cc9000e336238c541
--- /dev/null
+++ b/0044-Port-maxmin-patch-to-GCC-12.patch
@@ -0,0 +1,378 @@
+From a3013c074cd2ab5f71eb98a587a627f38c68656c Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Thu, 22 Feb 2024 17:07:24 +0800
+Subject: [PATCH 12/18] Port maxmin patch to GCC 12
+
+---
+ gcc/config/aarch64/aarch64-simd.md    | 256 ++++++++++++++++++++++++++
+ gcc/config/aarch64/predicates.md      |  19 ++
+ gcc/testsuite/gcc.dg/combine-maxmin.c |  46 +++++
+ 3 files changed, 321 insertions(+)
+ create mode 100755 gcc/testsuite/gcc.dg/combine-maxmin.c
+
+diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
+index 82f73805f..de92802f5 100644
+--- a/gcc/config/aarch64/aarch64-simd.md
++++ b/gcc/config/aarch64/aarch64-simd.md
+@@ -1138,6 +1138,82 @@
+   [(set_attr "type" "neon_compare,neon_shift_imm")]
+ )
+ 
++;; Simplify the extension with following truncation for shift+neg operation.
++
++(define_insn_and_split "*aarch64_sshr_neg_v8hi"
++  [(set (match_operand:V8HI 0 "register_operand" "=w")
++	(vec_concat:V8HI
++	  (truncate:V4HI
++	    (ashiftrt:V4SI
++	      (neg:V4SI
++		(sign_extend:V4SI
++		  (vec_select:V4HI
++		    (match_operand:V8HI 1 "register_operand")
++		    (match_operand:V8HI 3 "vect_par_cnst_lo_half"))))
++	      (match_operand:V4SI 2 "maxmin_arith_shift_operand")))
++	  (truncate:V4HI
++	    (ashiftrt:V4SI
++	      (neg:V4SI
++		(sign_extend:V4SI
++		  (vec_select:V4HI
++		    (match_dup 1)
++		    (match_operand:V8HI 4 "vect_par_cnst_hi_half"))))
++	      (match_dup 2)))))]
++  "TARGET_SIMD"
++  "#"
++  "&& true"
++  [(set (match_operand:V8HI 0 "register_operand" "=w")
++	(ashiftrt:V8HI
++	  (neg:V8HI
++	    (match_operand:V8HI 1 "register_operand" "w"))
++	  (match_operand:V8HI 2 "aarch64_simd_imm_minus_one")))]
++  {
++    /* Reduce the shift amount to smaller mode.  */
++    int val = INTVAL (CONST_VECTOR_ENCODED_ELT (operands[2], 0))
++	      - (GET_MODE_UNIT_BITSIZE (GET_MODE (operands[2])) / 2);
++    operands[2] = aarch64_simd_gen_const_vector_dup (V8HImode, val);
++  }
++  [(set_attr "type" "multiple")]
++)
++
++;; The helper definition that allows combiner to use the previous pattern.
++
++(define_insn_and_split "*aarch64_sshr_neg_tmpv8hi"
++  [(set (match_operand:V8HI 0 "register_operand" "=w")
++	(vec_concat:V8HI
++	  (truncate:V4HI
++	    (ashiftrt:V4SI
++	      (neg:V4SI
++		(match_operand:V4SI 1 "register_operand" "w"))
++	      (match_operand:V4SI 2 "maxmin_arith_shift_operand")))
++	  (truncate:V4HI
++	    (ashiftrt:V4SI
++	      (neg:V4SI
++		(match_operand:V4SI 3 "register_operand" "w"))
++	      (match_dup 2)))))]
++  "TARGET_SIMD"
++  "#"
++  "&& true"
++  [(set (match_operand:V4SI 1 "register_operand" "=w")
++	(ashiftrt:V4SI
++	  (neg:V4SI
++	    (match_dup 1))
++	  (match_operand:V4SI 2 "maxmin_arith_shift_operand")))
++   (set (match_operand:V4SI 3 "register_operand" "=w")
++	(ashiftrt:V4SI
++	  (neg:V4SI
++	    (match_dup 3))
++	  (match_dup 2)))
++   (set (match_operand:V8HI 0 "register_operand" "=w")
++	(vec_concat:V8HI
++	  (truncate:V4HI
++	    (match_dup 1))
++	  (truncate:V4HI
++	    (match_dup 3))))]
++  ""
++  [(set_attr "type" "multiple")]
++)
++
+ (define_insn "*aarch64_simd_sra"
+  [(set (match_operand:VDQ_I 0 "register_operand" "=w")
+ 	(plus:VDQ_I
+@@ -1714,6 +1790,26 @@
+  }
+ )
+ 
++(define_insn "vec_pack_trunc_shifted_"
++ [(set (match_operand: 0 "register_operand" "=&w")
++       (vec_concat:
++	 (truncate:
++	   (ashiftrt:VQN (match_operand:VQN 1 "register_operand" "w")
++	      (match_operand:VQN 2 "half_size_operand" "w")))
++	 (truncate:
++	   (ashiftrt:VQN (match_operand:VQN 3 "register_operand" "w")
++	      (match_operand:VQN 4 "half_size_operand" "w")))))]
++ "TARGET_SIMD"
++ {
++   if (BYTES_BIG_ENDIAN)
++     return "uzp2\\t%0., %3., %1.";
++   else
++     return "uzp2\\t%0., %1., %3.";
++ }
++  [(set_attr "type" "neon_permute")
++   (set_attr "length" "4")]
++)
++
+ (define_insn "aarch64_shrn_insn_le"
+   [(set (match_operand: 0 "register_operand" "=w")
+ 	(vec_concat:
+@@ -6652,6 +6748,166 @@
+   [(set_attr "type" "neon_tst")]
+ )
+ 
++;; Simplify the extension with following truncation for cmtst-like operation.
++
++(define_insn_and_split "*aarch64_cmtst_arith_v8hi"
++  [(set (match_operand:V8HI 0 "register_operand" "=w")
++	(vec_concat:V8HI
++	  (plus:V4HI
++	    (truncate:V4HI
++	      (eq:V4SI
++		(sign_extend:V4SI
++		  (vec_select:V4HI
++		    (and:V8HI
++		      (match_operand:V8HI 1 "register_operand")
++		      (match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin"))
++		    (match_operand:V8HI 3 "vect_par_cnst_lo_half")))
++		(match_operand:V4SI 4 "aarch64_simd_or_scalar_imm_zero")))
++	    (match_operand:V4HI 5 "aarch64_simd_imm_minus_one"))
++	  (plus:V4HI
++	    (truncate:V4HI
++	      (eq:V4SI
++		(sign_extend:V4SI
++		  (vec_select:V4HI
++		    (and:V8HI
++		      (match_dup 1)
++		      (match_dup 2))
++		    (match_operand:V8HI 6 "vect_par_cnst_hi_half")))
++		(match_dup 4)))
++	    (match_dup 5))))]
++  "TARGET_SIMD && !reload_completed"
++  "#"
++  "&& true"
++  [(set (match_operand:V8HI 6 "register_operand" "=w")
++	(match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin"))
++   (set (match_operand:V8HI 0 "register_operand" "=w")
++	(plus:V8HI
++	  (eq:V8HI
++	    (and:V8HI
++	      (match_operand:V8HI 1 "register_operand" "w")
++	      (match_dup 6))
++	    (match_operand:V8HI 4 "aarch64_simd_imm_zero"))
++	  (match_operand:V8HI 5 "aarch64_simd_imm_minus_one")))]
++  {
++    if (can_create_pseudo_p ())
++      {
++	int val = INTVAL (CONST_VECTOR_ENCODED_ELT (operands[4], 0));
++	operands[4] = aarch64_simd_gen_const_vector_dup (V8HImode, val);
++	int val2 = INTVAL (CONST_VECTOR_ENCODED_ELT (operands[5], 0));
++	operands[5] = aarch64_simd_gen_const_vector_dup (V8HImode, val2);
++
++	operands[6] = gen_reg_rtx (V8HImode);
++      }
++    else
++      FAIL;
++  }
++  [(set_attr "type" "neon_tst_q")]
++)
++
++;; Three helper definitions that allow combiner to use the previous pattern.
++
++(define_insn_and_split "*aarch64_cmtst_arith_tmp_lo_v8hi"
++  [(set (match_operand:V4SI 0 "register_operand" "=w")
++	(neg:V4SI
++	  (eq:V4SI
++	    (sign_extend:V4SI
++	      (vec_select:V4HI
++		(and:V8HI
++		  (match_operand:V8HI 1 "register_operand")
++		  (match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin"))
++		(match_operand:V8HI 3 "vect_par_cnst_lo_half")))
++	    (match_operand:V4SI 4 "aarch64_simd_or_scalar_imm_zero"))))]
++  "TARGET_SIMD && !reload_completed"
++  "#"
++  "&& true"
++  [(set (match_operand:V8HI 5 "register_operand" "=w")
++	(and:V8HI
++	  (match_operand:V8HI 1 "register_operand")
++	  (match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin")))
++   (set (match_operand:V4SI 0 "register_operand" "=w")
++	(sign_extend:V4SI
++	  (vec_select:V4HI
++	    (match_dup 5)
++	    (match_operand:V8HI 3 "vect_par_cnst_lo_half"))))
++   (set (match_dup 0)
++	(neg:V4SI
++	  (eq:V4SI
++	    (match_dup 0)
++	    (match_operand:V4SI 4 "aarch64_simd_or_scalar_imm_zero"))))]
++  {
++    if (can_create_pseudo_p ())
++      operands[5] = gen_reg_rtx (V8HImode);
++    else
++      FAIL;
++  }
++  [(set_attr "type" "multiple")]
++)
++
++(define_insn_and_split "*aarch64_cmtst_arith_tmp_hi_v8hi"
++  [(set (match_operand:V4SI 0 "register_operand" "=w")
++	  (neg:V4SI
++	    (eq:V4SI
++	      (sign_extend:V4SI
++		(vec_select:V4HI
++		  (and:V8HI
++		    (match_operand:V8HI 1 "register_operand")
++		    (match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin"))
++		  (match_operand:V8HI 3 "vect_par_cnst_hi_half")))
++	      (match_operand:V4SI 4 "aarch64_simd_or_scalar_imm_zero"))))]
++  "TARGET_SIMD && !reload_completed"
++  "#"
++  "&& true"
++  [(set (match_operand:V8HI 5 "register_operand" "=w")
++	(and:V8HI
++	  (match_operand:V8HI 1 "register_operand")
++	  (match_operand:V8HI 2 "aarch64_bic_imm_for_maxmin")))
++   (set (match_operand:V4SI 0 "register_operand" "=w")
++	(sign_extend:V4SI
++	  (vec_select:V4HI
++	    (match_dup 5)
++	    (match_operand:V8HI 3 "vect_par_cnst_hi_half"))))
++   (set (match_dup 0)
++	  (neg:V4SI
++	    (eq:V4SI
++	      (match_dup 0)
++	      (match_operand:V4SI 4 "aarch64_simd_or_scalar_imm_zero"))))]
++  {
++    if (can_create_pseudo_p ())
++      operands[5] = gen_reg_rtx (V8HImode);
++    else
++      FAIL;
++  }
++  [(set_attr "type" "multiple")]
++)
++
++(define_insn_and_split "*aarch64_cmtst_arith_tmpv8hi"
++  [(set (match_operand:V8HI 0 "register_operand" "=w")
++	(vec_concat:V8HI
++	  (truncate:V4HI
++	    (not:V4SI
++	      (match_operand:V4SI 1 "register_operand" "w")))
++	  (truncate:V4HI
++	    (not:V4SI
++	      (match_operand:V4SI 2 "register_operand" "w")))))]
++  "TARGET_SIMD"
++  "#"
++  "&& true"
++  [(set (match_operand:V4SI 1 "register_operand" "=w")
++	(not:V4SI
++	  (match_dup 1)))
++   (set (match_operand:V4SI 2 "register_operand" "=w")
++	(not:V4SI
++	  (match_dup 2)))
++   (set (match_operand:V8HI 0 "register_operand" "=w")
++	(vec_concat:V8HI
++	  (truncate:V4HI
++	    (match_dup 1))
++	  (truncate:V4HI
++	    (match_dup 2))))]
++  ""
++  [(set_attr "type" "multiple")]
++)
++
+ (define_insn_and_split "aarch64_cmtstdi"
+   [(set (match_operand:DI 0 "register_operand" "=w,r")
+ 	(neg:DI
+diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
+index 07c14aacb..1b8496c07 100644
+--- a/gcc/config/aarch64/predicates.md
++++ b/gcc/config/aarch64/predicates.md
+@@ -118,6 +118,25 @@
+ 	     (match_test "aarch64_simd_valid_immediate (op, NULL,
+ 							AARCH64_CHECK_ORR)"))))
+ 
++(define_predicate "aarch64_bic_imm_for_maxmin"
++   (match_code "const_vector")
++{
++  if (!aarch64_simd_valid_immediate (op, NULL, AARCH64_CHECK_BIC))
++    return false;
++  op = unwrap_const_vec_duplicate (op);
++  unsigned int size = GET_MODE_UNIT_BITSIZE (mode);
++  return CONST_INT_P (op)
++	 && ((~UINTVAL (op)) < (((long unsigned int) 1 << size) - 1));
++})
++
++(define_predicate "maxmin_arith_shift_operand"
++   (match_code "const_vector")
++{
++  op = unwrap_const_vec_duplicate (op);
++  unsigned int size = GET_MODE_UNIT_BITSIZE (mode) - 1;
++  return CONST_INT_P (op) && (UINTVAL (op) == size);
++})
++
+ (define_predicate "aarch64_reg_or_bic_imm"
+    (ior (match_operand 0 "register_operand")
+ 	(and (match_code "const_vector")
+diff --git a/gcc/testsuite/gcc.dg/combine-maxmin.c b/gcc/testsuite/gcc.dg/combine-maxmin.c
+new file mode 100755
+index 000000000..06bce7029
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/combine-maxmin.c
+@@ -0,0 +1,46 @@
++/* { dg-do compile { target aarch64-*-* } } */
++/* { dg-options "-O3 -fdump-rtl-combine-all" } */
++
++/* The test checks usage of smax/smin insns for clip evaluation and
++ * uzp1/uzp2 insns for vector element narrowing.  It's inspired by
++ * sources of x264 codec.  */
++
++typedef unsigned char uint8_t;
++typedef long int intptr_t;
++typedef signed short int int16_t;
++
++static __attribute__((always_inline)) inline uint8_t clip (int x )
++{
++    return ( (x & ~((1 << 8)-1)) ? (-x)>>31 & ((1 << 8)-1) : x );
++}
++
++void hf (uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
++	 intptr_t stride, int width, int height, int16_t *buf)
++{
++    const int pad = (8 > 9) ? (-10 * ((1 << 8)-1)) : 0;
++    for( int y = 0; y < height; y++ ) {
++        for( int x = -2; x < width+3; x++ ) {
++            int v = ((src)[x-2*stride] + (src)[x+3*stride] - 5*((src)[x-stride]
++		     + (src)[x+2*stride]) + 20*((src)[x] + (src)[x+stride]));
++            dstv[x] = clip ( (v + 16) >> 5 );
++            buf[x+2] = v + pad;
++        }
++        for( int x = 0; x < width; x++ )
++            dstc[x] = clip ((((buf+2)[x-2*1] + (buf+2)[x+3*1] - 5*((buf+2)[x-1]
++			      + (buf+2)[x+2*1]) + 20*((buf+2)[x] + (buf+2)[x+1]))
++			     - 32*pad + 512) >> 10);
++        for( int x = 0; x < width; x++ )
++            dsth[x] = clip ((((src)[x-2*1] + (src)[x+3*1] - 5*((src)[x-1]
++			      + (src)[x+2*1]) + 20*((src)[x] + (src)[x+1]))
++			     + 16) >> 5);
++        dsth += stride;
++        dstv += stride;
++        dstc += stride;
++        src += stride;
++    }
++}
++
++/* { dg-final { scan-assembler-times {smax\t} 4 } }  */
++/* { dg-final { scan-assembler-times {smin\t} 4 } }  */
++/* { dg-final { scan-assembler-times {cmtst\t} 2 } }  */
++/* { dg-final { scan-assembler-times {uzp1\t} 6 } }  */
+-- 
+2.33.0
+
diff --git a/0045-LoongArch-Fix-runtime-error-in-a-gcc-build-with-with.patch b/0045-LoongArch-Fix-runtime-error-in-a-gcc-build-with-with.patch
new file mode 100644
index 0000000000000000000000000000000000000000..17ff895010f7b62f878ae96adf08162875e5ff4e
--- /dev/null
+++ b/0045-LoongArch-Fix-runtime-error-in-a-gcc-build-with-with.patch
@@ -0,0 +1,30 @@
+From fa28ce4ac91691595e14838be49c9dd42b153b7f Mon Sep 17 00:00:00 2001
+From: Guo Jie 
+Date: Thu, 23 Nov 2023 11:05:56 +0800
+Subject: [PATCH 045/188] LoongArch: Fix runtime error in a gcc build with
+ --with-build-config=bootstrap-ubsan
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_split_plus_constant):
+	avoid left shift of negative value -0x8000.
+---
+ gcc/config/loongarch/loongarch.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 048d3802b..ecceca22d 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4265,7 +4265,7 @@ loongarch_split_plus_constant (rtx *op, machine_mode mode)
+   else if (loongarch_addu16i_imm12_operand_p (v, mode))
+     a = (v & ~HWIT_UC_0xFFF) + ((v & 0x800) << 1);
+   else if (mode == DImode && DUAL_ADDU16I_OPERAND (v))
+-    a = (v > 0 ? 0x7fff : -0x8000) << 16;
++    a = (v > 0 ? 0x7fff0000 : ~0x7fffffff);
+   else
+     gcc_unreachable ();
+ 
+-- 
+2.43.0
+
diff --git a/0045-Port-moving-minmask-pattern-to-gimple-to-GCC-12.patch b/0045-Port-moving-minmask-pattern-to-gimple-to-GCC-12.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a5a786f6fb6cc05c72ed413ea09775d4279f20a7
--- /dev/null
+++ b/0045-Port-moving-minmask-pattern-to-gimple-to-GCC-12.patch
@@ -0,0 +1,239 @@
+From 11da40d18e35219961226d40f11b0702b8649044 Mon Sep 17 00:00:00 2001
+From: Pronin Alexander 00812787 
+Date: Thu, 22 Feb 2024 17:13:27 +0800
+Subject: [PATCH 13/18] Port moving minmask pattern to gimple to GCC 12
+
+---
+ gcc/common.opt                          |   4 +
+ gcc/match.pd                            | 104 ++++++++++++++++++++++++
+ gcc/testsuite/gcc.dg/combine-maxmin-1.c |  15 ++++
+ gcc/testsuite/gcc.dg/combine-maxmin-2.c |  14 ++++
+ gcc/testsuite/gcc.dg/combine-maxmin.c   |  19 +++--
+ 5 files changed, 151 insertions(+), 5 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/combine-maxmin-1.c
+ create mode 100644 gcc/testsuite/gcc.dg/combine-maxmin-2.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 6c6fabb31..3a5004271 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1846,6 +1846,10 @@ fif-conversion-gimple
+ Common Var(flag_if_conversion_gimple) Optimization
+ Perform conversion of conditional jumps to branchless equivalents during gimple transformations.
+ 
++fconvert-minmax
++Common Var(flag_convert_minmax) Optimization
++Convert saturating clipping to min max.
++
+ fstack-reuse=
+ Common Joined RejectNegative Enum(stack_reuse_level) Var(flag_stack_reuse) Init(SR_ALL) Optimization
+ -fstack-reuse=[all|named_vars|none]	Set stack reuse level for local variables.
+diff --git a/gcc/match.pd b/gcc/match.pd
+index 61866cb90..3a19e93b3 100644
+--- a/gcc/match.pd
++++ b/gcc/match.pd
+@@ -8031,3 +8031,107 @@ and,
+    (plus:c@4 (op2:c @0 @1)
+     (plus:c@5 (double_size_mul_overflow_check_lo @0 @1 @3) (op3:c @0 @1))))
+      (if (single_use (@4) && single_use (@5)))))
++
++/* MinMax pattern matching helpers.  More info on the transformation below.  */
++
++/* Match (a & 0b11..100..0) pattern.  */
++(match (minmax_cmp_arg @0 @1)
++ (bit_and @0 INTEGER_CST@1)
++ (if (wi::popcount (~wi::to_widest (@1) + 1) == 1)))
++
++/* Match (inversed_sign_bit >> sign_bit_pos) pattern.
++   This statement is blocking for the transformation of unsigned integers.
++   Do type check here to avoid unnecessary duplications.  */
++(match (minmax_sat_arg @0)
++ (rshift (negate @0) INTEGER_CST@1)
++ (if (!TYPE_UNSIGNED (TREE_TYPE (@0))
++      && wi::eq_p (wi::to_widest (@1), TYPE_PRECISION (TREE_TYPE (@0)) - 1))))
++
++/* Transform ((x & ~mask) ? (-x)>>31 & mask : x) to (min (max (x, 0), mask)).
++   The matched pattern can be described as saturated clipping.
++
++   The pattern supports truncation via both casts and bit_and.
++   Also there are patterns for possible inverted conditions.  */
++(if (flag_convert_minmax)
++/* Truncation via casts.  Unfortunately convert? cannot be applied here
++   because convert and cond take different number of arguments.  */
++ (simplify
++  (convert
++   (cond
++    (ne (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop)
++    (convert? (minmax_sat_arg @0))
++    (convert? @0)))
++  (if (wi::geu_p (~wi::to_widest (@1) + 1, TYPE_PRECISION (type)))
++   (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); }
++    (convert (min (max @0 { integer_zero_node; })
++		  { mask; })))))
++ (simplify
++  (cond
++   (ne (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop)
++   (convert? (minmax_sat_arg @0))
++   (convert? @0))
++  (if (wi::geu_p (~wi::to_widest (@1) + 1, TYPE_PRECISION (type)))
++   (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); }
++    (convert (min (max @0 { integer_zero_node; })
++		  { mask; })))))
++
++ (simplify
++  (convert
++   (cond
++    (eq (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop)
++    (convert? @0)
++    (convert? (minmax_sat_arg @0))))
++  (if (wi::geu_p (~wi::to_widest (@1) + 1, TYPE_PRECISION (type)))
++   (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); }
++    (convert (min (max @0 { integer_zero_node; })
++		  { mask; })))))
++ (simplify
++  (cond
++   (eq (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop)
++   (convert? @0)
++   (convert? (minmax_sat_arg @0)))
++  (if (wi::geu_p (~wi::to_widest (@1) + 1, TYPE_PRECISION (type)))
++   (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); }
++    (convert (min (max @0 { integer_zero_node; })
++		  { mask; })))))
++
++ /* Truncation via bit_and with mask.  Same concerns on convert? here.  */
++ (simplify
++  (convert
++   (cond
++    (ne (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop)
++    (convert? (bit_and (minmax_sat_arg @0) INTEGER_CST@2))
++    (convert? @0)))
++  (if (wi::to_widest (@2) == ~wi::to_widest (@1))
++   (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); }
++    (convert (min (max @0 { integer_zero_node; })
++		  { mask; })))))
++ (simplify
++  (cond
++   (ne (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop)
++   (convert? (bit_and (minmax_sat_arg @0) INTEGER_CST@2))
++   (convert? @0))
++  (if (wi::to_widest (@2) == ~wi::to_widest (@1))
++   (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); }
++    (convert (min (max @0 { integer_zero_node; })
++		  { mask; })))))
++
++ (simplify
++  (convert
++   (cond
++    (eq (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop)
++    (convert? @0)
++    (convert? (bit_and (minmax_sat_arg @0) INTEGER_CST@2))))
++  (if (wi::to_widest (@2) == ~wi::to_widest (@1))
++   (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); }
++    (convert (min (max @0 { integer_zero_node; })
++		  { mask; })))))
++ (simplify
++  (cond
++   (eq (minmax_cmp_arg @0 INTEGER_CST@1) integer_zerop)
++   (convert? @0)
++   (convert? (bit_and (minmax_sat_arg @0) INTEGER_CST@2)))
++  (if (wi::to_widest (@2) == ~wi::to_widest (@1))
++   (with { tree mask = build_int_cst (integer_type_node, ~tree_to_shwi (@1)); }
++    (convert (min (max @0 { integer_zero_node; })
++		  { mask; }))))))
+diff --git a/gcc/testsuite/gcc.dg/combine-maxmin-1.c b/gcc/testsuite/gcc.dg/combine-maxmin-1.c
+new file mode 100644
+index 000000000..859ff7df8
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/combine-maxmin-1.c
+@@ -0,0 +1,15 @@
++/* { dg-do compile { target aarch64-*-* } } */
++/* { dg-options "-O3 -fconvert-minmax" } */
++
++#include 
++
++__attribute__((noinline))
++void test (int32_t *restrict a, int32_t *restrict x)
++{
++  for (int i = 0; i < 4; i++)
++    a[i] = ((((-x[i]) >> 31) ^ x[i])
++            & (-((int32_t)((x[i] & (~((1 << 8)-1))) == 0)))) ^ ((-x[i]) >> 31);
++}
++
++/* { dg-final { scan-assembler-not {smax\t} } }  */
++/* { dg-final { scan-assembler-not {smin\t} } }  */
+diff --git a/gcc/testsuite/gcc.dg/combine-maxmin-2.c b/gcc/testsuite/gcc.dg/combine-maxmin-2.c
+new file mode 100644
+index 000000000..63d4d85b3
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/combine-maxmin-2.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile { target aarch64-*-* } } */
++/* { dg-options "-O3 -fconvert-minmax" } */
++
++#include 
++
++__attribute__((noinline))
++void test (int8_t *restrict a, int32_t *restrict x)
++{
++  for (int i = 0; i < 8; i++)
++    a[i] = ((x[i] & ~((1 << 9)-1)) ? (-x[i])>>31 & ((1 << 9)-1) : x[i]);
++}
++
++/* { dg-final { scan-assembler-times {smax\t} 4 } }  */
++/* { dg-final { scan-assembler-times {smin\t} 4 } }  */
+diff --git a/gcc/testsuite/gcc.dg/combine-maxmin.c b/gcc/testsuite/gcc.dg/combine-maxmin.c
+index 06bce7029..a984fa560 100755
+--- a/gcc/testsuite/gcc.dg/combine-maxmin.c
++++ b/gcc/testsuite/gcc.dg/combine-maxmin.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target aarch64-*-* } } */
+-/* { dg-options "-O3 -fdump-rtl-combine-all" } */
++/* { dg-options "-O3 -fconvert-minmax" } */
+ 
+ /* The test checks usage of smax/smin insns for clip evaluation and
+  * uzp1/uzp2 insns for vector element narrowing.  It's inspired by
+@@ -19,20 +19,26 @@ void hf (uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
+ {
+     const int pad = (8 > 9) ? (-10 * ((1 << 8)-1)) : 0;
+     for( int y = 0; y < height; y++ ) {
++        /* This loop is not being vectorized now.  */
+         for( int x = -2; x < width+3; x++ ) {
+             int v = ((src)[x-2*stride] + (src)[x+3*stride] - 5*((src)[x-stride]
+ 		     + (src)[x+2*stride]) + 20*((src)[x] + (src)[x+stride]));
+             dstv[x] = clip ( (v + 16) >> 5 );
+             buf[x+2] = v + pad;
+         }
++
++        /* Produces two versions of the code: 3xUZP1/2xMAX/2xMIN + 1xUZP1/1xMAX/1xMIN.  */
+         for( int x = 0; x < width; x++ )
+             dstc[x] = clip ((((buf+2)[x-2*1] + (buf+2)[x+3*1] - 5*((buf+2)[x-1]
+ 			      + (buf+2)[x+2*1]) + 20*((buf+2)[x] + (buf+2)[x+1]))
+ 			     - 32*pad + 512) >> 10);
++
++        /* Priduces two versions of the code: 1xUZP1/2xMAX/2xMIN + 0xUZP1/1xMAX/1xMIN.  */
+         for( int x = 0; x < width; x++ )
+             dsth[x] = clip ((((src)[x-2*1] + (src)[x+3*1] - 5*((src)[x-1]
+ 			      + (src)[x+2*1]) + 20*((src)[x] + (src)[x+1]))
+ 			     + 16) >> 5);
++
+         dsth += stride;
+         dstv += stride;
+         dstc += stride;
+@@ -40,7 +46,10 @@ void hf (uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
+     }
+ }
+ 
+-/* { dg-final { scan-assembler-times {smax\t} 4 } }  */
+-/* { dg-final { scan-assembler-times {smin\t} 4 } }  */
+-/* { dg-final { scan-assembler-times {cmtst\t} 2 } }  */
+-/* { dg-final { scan-assembler-times {uzp1\t} 6 } }  */
++/* Max is performed on 0 from signed values, match smax exactly.  */
++/* { dg-final { scan-assembler-times {smax\t} 6 } }  */
++/* Min is performed on signed val>0 and a mask, min sign doesn't matter.  */
++/* { dg-final { scan-assembler-times {[us]min\t} 6 } }  */
++/* All of the vectorized patterns are expected to be matched.  */
++/* { dg-final { scan-assembler-not {cmtst\t} } }  */
++/* { dg-final { scan-assembler-times {uzp1\t} 5 } }  */
+-- 
+2.33.0
+
diff --git a/0046-Add-new-pattern-to-pass-the-maxmin-tests.patch b/0046-Add-new-pattern-to-pass-the-maxmin-tests.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9ceba88090b58a20e5d3c4d2d6c70327cfbd9f47
--- /dev/null
+++ b/0046-Add-new-pattern-to-pass-the-maxmin-tests.patch
@@ -0,0 +1,65 @@
+From dbcb2630c426c8dd2117b5ce625da8422dd8cd65 Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Thu, 22 Feb 2024 17:20:17 +0800
+Subject: [PATCH 14/18] Add new pattern to pass the maxmin tests
+
+---
+ gcc/match.pd                          | 24 ++++++++++++++++++++++++
+ gcc/testsuite/gcc.dg/combine-maxmin.c |  2 +-
+ 2 files changed, 25 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/match.pd b/gcc/match.pd
+index 3a19e93b3..aee58e47b 100644
+--- a/gcc/match.pd
++++ b/gcc/match.pd
+@@ -8038,6 +8038,10 @@ and,
+ (match (minmax_cmp_arg @0 @1)
+  (bit_and @0 INTEGER_CST@1)
+  (if (wi::popcount (~wi::to_widest (@1) + 1) == 1)))
++/* Match ((unsigned) a > 0b0..01..1) pattern.  */
++(match (minmax_cmp_arg1 @0 @1)
++ (gt @0 INTEGER_CST@1)
++ (if (wi::popcount (wi::to_widest (@1) + 1) == 1)))
+ 
+ /* Match (inversed_sign_bit >> sign_bit_pos) pattern.
+    This statement is blocking for the transformation of unsigned integers.
+@@ -8095,6 +8099,26 @@ and,
+     (convert (min (max @0 { integer_zero_node; })
+ 		  { mask; })))))
+ 
++ (simplify
++  (convert
++   (cond
++    (minmax_cmp_arg1 (convert? @0) INTEGER_CST@1)
++    (convert? (minmax_sat_arg @0))
++    (convert? @0)))
++  (if (wi::geu_p (wi::to_widest (@1) + 1, TYPE_PRECISION (type)))
++   (with { tree mask = build_int_cst (integer_type_node, tree_to_shwi (@1)); }
++    (convert (min (max (convert:integer_type_node @0) { integer_zero_node; })
++		  { mask; })))))
++ (simplify
++  (cond
++   (minmax_cmp_arg1 (convert? @0) INTEGER_CST@1)
++   (convert? (minmax_sat_arg @0))
++   (convert? @0))
++  (if (wi::geu_p (wi::to_widest (@1) + 1, TYPE_PRECISION (type)))
++   (with { tree mask = build_int_cst (integer_type_node, tree_to_shwi (@1)); }
++    (convert (min (max (convert:integer_type_node @0) { integer_zero_node; })
++		  { mask; })))))
++
+  /* Truncation via bit_and with mask.  Same concerns on convert? here.  */
+  (simplify
+   (convert
+diff --git a/gcc/testsuite/gcc.dg/combine-maxmin.c b/gcc/testsuite/gcc.dg/combine-maxmin.c
+index a984fa560..5c0c9cc49 100755
+--- a/gcc/testsuite/gcc.dg/combine-maxmin.c
++++ b/gcc/testsuite/gcc.dg/combine-maxmin.c
+@@ -52,4 +52,4 @@ void hf (uint8_t *dsth, uint8_t *dstv, uint8_t *dstc, uint8_t *src,
+ /* { dg-final { scan-assembler-times {[us]min\t} 6 } }  */
+ /* All of the vectorized patterns are expected to be matched.  */
+ /* { dg-final { scan-assembler-not {cmtst\t} } }  */
+-/* { dg-final { scan-assembler-times {uzp1\t} 5 } }  */
++/* { dg-final { scan-assembler-times {uzp1\t} 2 } }  */
+-- 
+2.33.0
+
diff --git a/0046-LoongArch-Fix-usage-of-LSX-and-LASX-frint-ftint-inst.patch b/0046-LoongArch-Fix-usage-of-LSX-and-LASX-frint-ftint-inst.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f42552d65337cc078e616b0a1a06317dfc91b2ef
--- /dev/null
+++ b/0046-LoongArch-Fix-usage-of-LSX-and-LASX-frint-ftint-inst.patch
@@ -0,0 +1,1295 @@
+From d37308b7a62246e16ee61c40441548feb76761f1 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 18 Nov 2023 04:48:20 +0800
+Subject: [PATCH 046/188] LoongArch: Fix usage of LSX and LASX frint/ftint
+ instructions [PR112578]
+
+The usage LSX and LASX frint/ftint instructions had some problems:
+
+1. These instructions raises FE_INEXACT, which is not allowed with
+   -fno-fp-int-builtin-inexact for most C2x section F.10.6 functions
+   (the only exceptions are rint, lrint, and llrint).
+2. The "frint" instruction without explicit rounding mode is used for
+   roundM2, this is incorrect because roundM2 is defined "rounding
+   operand 1 to the *nearest* integer, rounding away from zero in the
+   event of a tie".  We actually don't have such an instruction.  Our
+   frintrne instruction is roundevenM2 (unfortunately, this is not
+   documented).
+3. These define_insn's are written in a way not so easy to hack.
+
+So I removed these instructions and created a "simd.md" file, then added
+them and the corresponding expanders there.  The advantage of the
+simd.md file is we don't need to duplicate the RTL template twice (in
+lsx.md and lasx.md).
+
+gcc/ChangeLog:
+
+	PR target/112578
+	* config/loongarch/lsx.md (UNSPEC_LSX_VFTINT_S,
+	UNSPEC_LSX_VFTINTRNE, UNSPEC_LSX_VFTINTRP,
+	UNSPEC_LSX_VFTINTRM, UNSPEC_LSX_VFRINTRNE_S,
+	UNSPEC_LSX_VFRINTRNE_D, UNSPEC_LSX_VFRINTRZ_S,
+	UNSPEC_LSX_VFRINTRZ_D, UNSPEC_LSX_VFRINTRP_S,
+	UNSPEC_LSX_VFRINTRP_D, UNSPEC_LSX_VFRINTRM_S,
+	UNSPEC_LSX_VFRINTRM_D): Remove.
+	(ILSX, FLSX): Move into ...
+	(VIMODE): Move into ...
+	(FRINT_S, FRINT_D): Remove.
+	(frint_pattern_s, frint_pattern_d, frint_suffix): Remove.
+	(lsx_vfrint_, lsx_vftint_s__,
+	lsx_vftintrne_w_s, lsx_vftintrne_l_d, lsx_vftintrp_w_s,
+	lsx_vftintrp_l_d, lsx_vftintrm_w_s, lsx_vftintrm_l_d,
+	lsx_vfrintrne_s, lsx_vfrintrne_d, lsx_vfrintrz_s,
+	lsx_vfrintrz_d, lsx_vfrintrp_s, lsx_vfrintrp_d,
+	lsx_vfrintrm_s, lsx_vfrintrm_d,
+	v4sf2,
+	v2df2, round2,
+	fix_trunc2): Remove.
+	* config/loongarch/lasx.md: Likewise.
+	* config/loongarch/simd.md: New file.
+	(ILSX, ILASX, FLSX, FLASX, VIMODE): ... here.
+	(IVEC, FVEC): New mode iterators.
+	(VIMODE): ... here.  Extend it to work for all LSX/LASX vector
+	modes.
+	(x, wu, simd_isa, WVEC, vimode, simdfmt, simdifmt_for_f,
+	elebits): New mode attributes.
+	(UNSPEC_SIMD_FRINTRP, UNSPEC_SIMD_FRINTRZ, UNSPEC_SIMD_FRINT,
+	UNSPEC_SIMD_FRINTRM, UNSPEC_SIMD_FRINTRNE): New unspecs.
+	(SIMD_FRINT): New int iterator.
+	(simd_frint_rounding, simd_frint_pattern): New int attributes.
+	(_vfrint_): New
+	define_insn template for frint instructions.
+	(_vftint__):
+	Likewise, but for ftint instructions.
+	(2): New define_expand with
+	flag_fp_int_builtin_inexact checked.
+	(l2): Likewise.
+	(ftrunc2): New define_expand.  It does not require
+	flag_fp_int_builtin_inexact.
+	(fix_trunc2): New define_insn_and_split.  It does
+	not require flag_fp_int_builtin_inexact.
+	(include): Add lsx.md and lasx.md.
+	* config/loongarch/loongarch.md (include): Include simd.md,
+	instead of including lsx.md and lasx.md directly.
+	* config/loongarch/loongarch-builtins.cc
+	(CODE_FOR_lsx_vftint_w_s, CODE_FOR_lsx_vftint_l_d,
+	CODE_FOR_lasx_xvftint_w_s, CODE_FOR_lasx_xvftint_l_d):
+	Remove.
+
+gcc/testsuite/ChangeLog:
+
+	PR target/112578
+	* gcc.target/loongarch/vect-frint.c: New test.
+	* gcc.target/loongarch/vect-frint-no-inexact.c: New test.
+	* gcc.target/loongarch/vect-ftint.c: New test.
+	* gcc.target/loongarch/vect-ftint-no-inexact.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  | 239 -----------------
+ gcc/config/loongarch/loongarch-builtins.cc    |   4 -
+ gcc/config/loongarch/loongarch.md             |   7 +-
+ gcc/config/loongarch/lsx.md                   | 243 ------------------
+ gcc/config/loongarch/simd.md                  | 213 +++++++++++++++
+ .../loongarch/vect-frint-no-inexact.c         |  48 ++++
+ .../gcc.target/loongarch/vect-frint.c         |  85 ++++++
+ .../loongarch/vect-ftint-no-inexact.c         |  44 ++++
+ .../gcc.target/loongarch/vect-ftint.c         |  83 ++++++
+ 9 files changed, 475 insertions(+), 491 deletions(-)
+ create mode 100644 gcc/config/loongarch/simd.md
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-frint-no-inexact.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-frint.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-ftint.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 2e11f0612..d4a56c307 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -53,7 +53,6 @@
+   UNSPEC_LASX_XVFCMP_SULT
+   UNSPEC_LASX_XVFCMP_SUN
+   UNSPEC_LASX_XVFCMP_SUNE
+-  UNSPEC_LASX_XVFTINT_S
+   UNSPEC_LASX_XVFTINT_U
+   UNSPEC_LASX_XVCLO
+   UNSPEC_LASX_XVSAT_S
+@@ -92,12 +91,6 @@
+   UNSPEC_LASX_XVEXTRINS
+   UNSPEC_LASX_XVMSKLTZ
+   UNSPEC_LASX_XVSIGNCOV
+-  UNSPEC_LASX_XVFTINTRNE_W_S
+-  UNSPEC_LASX_XVFTINTRNE_L_D
+-  UNSPEC_LASX_XVFTINTRP_W_S
+-  UNSPEC_LASX_XVFTINTRP_L_D
+-  UNSPEC_LASX_XVFTINTRM_W_S
+-  UNSPEC_LASX_XVFTINTRM_L_D
+   UNSPEC_LASX_XVFTINT_W_D
+   UNSPEC_LASX_XVFFINT_S_L
+   UNSPEC_LASX_XVFTINTRZ_W_D
+@@ -116,14 +109,6 @@
+   UNSPEC_LASX_XVFTINTRML_L_S
+   UNSPEC_LASX_XVFTINTRNEL_L_S
+   UNSPEC_LASX_XVFTINTRNEH_L_S
+-  UNSPEC_LASX_XVFRINTRNE_S
+-  UNSPEC_LASX_XVFRINTRNE_D
+-  UNSPEC_LASX_XVFRINTRZ_S
+-  UNSPEC_LASX_XVFRINTRZ_D
+-  UNSPEC_LASX_XVFRINTRP_S
+-  UNSPEC_LASX_XVFRINTRP_D
+-  UNSPEC_LASX_XVFRINTRM_S
+-  UNSPEC_LASX_XVFRINTRM_D
+   UNSPEC_LASX_XVREPLVE0_Q
+   UNSPEC_LASX_XVPERM_W
+   UNSPEC_LASX_XVPERMI_Q
+@@ -206,9 +191,6 @@
+ ;; Only used for copy256_{u,s}.w.
+ (define_mode_iterator LASX_W    [V8SI V8SF])
+ 
+-;; Only integer modes in LASX.
+-(define_mode_iterator ILASX [V4DI V8SI V16HI V32QI])
+-
+ ;; As ILASX but excludes V32QI.
+ (define_mode_iterator ILASX_DWH [V4DI V8SI V16HI])
+ 
+@@ -224,9 +206,6 @@
+ ;; Only integer modes smaller than a word.
+ (define_mode_iterator ILASX_HB  [V16HI V32QI])
+ 
+-;; Only floating-point modes in LASX.
+-(define_mode_iterator FLASX  [V4DF V8SF])
+-
+ ;; Only used for immediate set shuffle elements instruction.
+ (define_mode_iterator LASX_WHB_W [V8SI V16HI V32QI V8SF])
+ 
+@@ -500,37 +479,6 @@
+    (V16HI "w")
+    (V32QI "w")])
+ 
+-(define_int_iterator FRINT256_S [UNSPEC_LASX_XVFRINTRP_S
+-			       UNSPEC_LASX_XVFRINTRZ_S
+-			       UNSPEC_LASX_XVFRINT
+-			       UNSPEC_LASX_XVFRINTRM_S])
+-
+-(define_int_iterator FRINT256_D [UNSPEC_LASX_XVFRINTRP_D
+-			       UNSPEC_LASX_XVFRINTRZ_D
+-			       UNSPEC_LASX_XVFRINT
+-			       UNSPEC_LASX_XVFRINTRM_D])
+-
+-(define_int_attr frint256_pattern_s
+-  [(UNSPEC_LASX_XVFRINTRP_S  "ceil")
+-   (UNSPEC_LASX_XVFRINTRZ_S  "btrunc")
+-   (UNSPEC_LASX_XVFRINT	     "rint")
+-   (UNSPEC_LASX_XVFRINTRM_S  "floor")])
+-
+-(define_int_attr frint256_pattern_d
+-  [(UNSPEC_LASX_XVFRINTRP_D  "ceil")
+-   (UNSPEC_LASX_XVFRINTRZ_D  "btrunc")
+-   (UNSPEC_LASX_XVFRINT	     "rint")
+-   (UNSPEC_LASX_XVFRINTRM_D  "floor")])
+-
+-(define_int_attr frint256_suffix
+-  [(UNSPEC_LASX_XVFRINTRP_S  "rp")
+-   (UNSPEC_LASX_XVFRINTRP_D  "rp")
+-   (UNSPEC_LASX_XVFRINTRZ_S  "rz")
+-   (UNSPEC_LASX_XVFRINTRZ_D  "rz")
+-   (UNSPEC_LASX_XVFRINT	     "")
+-   (UNSPEC_LASX_XVFRINTRM_S  "rm")
+-   (UNSPEC_LASX_XVFRINTRM_D  "rm")])
+-
+ (define_expand "vec_init"
+   [(match_operand:LASX 0 "register_operand")
+    (match_operand:LASX 1 "")]
+@@ -1688,15 +1636,6 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lasx_xvfrint_"
+-  [(set (match_operand:FLASX 0 "register_operand" "=f")
+-	(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
+-		      UNSPEC_LASX_XVFRINT))]
+-  "ISA_HAS_LASX"
+-  "xvfrint.\t%u0,%u1"
+-  [(set_attr "type" "simd_fcvt")
+-   (set_attr "mode" "")])
+-
+ (define_insn "lasx_xvfrsqrt_"
+   [(set (match_operand:FLASX 0 "register_operand" "=f")
+ 	(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
+@@ -1706,16 +1645,6 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lasx_xvftint_s__"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(unspec: [(match_operand:FLASX 1 "register_operand" "f")]
+-			    UNSPEC_LASX_XVFTINT_S))]
+-  "ISA_HAS_LASX"
+-  "xvftint..\t%u0,%u1"
+-  [(set_attr "type" "simd_fcvt")
+-   (set_attr "cnv_mode" "")
+-   (set_attr "mode" "")])
+-
+ (define_insn "lasx_xvftint_u__"
+   [(set (match_operand: 0 "register_operand" "=f")
+ 	(unspec: [(match_operand:FLASX 1 "register_operand" "f")]
+@@ -1726,18 +1655,6 @@
+    (set_attr "cnv_mode" "")
+    (set_attr "mode" "")])
+ 
+-
+-
+-(define_insn "fix_trunc2"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(fix: (match_operand:FLASX 1 "register_operand" "f")))]
+-  "ISA_HAS_LASX"
+-  "xvftintrz..\t%u0,%u1"
+-  [(set_attr "type" "simd_fcvt")
+-   (set_attr "cnv_mode" "")
+-   (set_attr "mode" "")])
+-
+-
+ (define_insn "fixuns_trunc2"
+   [(set (match_operand: 0 "register_operand" "=f")
+ 	(unsigned_fix: (match_operand:FLASX 1 "register_operand" "f")))]
+@@ -3245,60 +3162,6 @@
+   [(set_attr "type" "simd_fmadd")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lasx_xvftintrne_w_s"
+-  [(set (match_operand:V8SI 0 "register_operand" "=f")
+-	(unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFTINTRNE_W_S))]
+-  "ISA_HAS_LASX"
+-  "xvftintrne.w.s\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V8SF")])
+-
+-(define_insn "lasx_xvftintrne_l_d"
+-  [(set (match_operand:V4DI 0 "register_operand" "=f")
+-	(unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFTINTRNE_L_D))]
+-  "ISA_HAS_LASX"
+-  "xvftintrne.l.d\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4DF")])
+-
+-(define_insn "lasx_xvftintrp_w_s"
+-  [(set (match_operand:V8SI 0 "register_operand" "=f")
+-	(unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFTINTRP_W_S))]
+-  "ISA_HAS_LASX"
+-  "xvftintrp.w.s\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V8SF")])
+-
+-(define_insn "lasx_xvftintrp_l_d"
+-  [(set (match_operand:V4DI 0 "register_operand" "=f")
+-	(unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFTINTRP_L_D))]
+-  "ISA_HAS_LASX"
+-  "xvftintrp.l.d\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4DF")])
+-
+-(define_insn "lasx_xvftintrm_w_s"
+-  [(set (match_operand:V8SI 0 "register_operand" "=f")
+-	(unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFTINTRM_W_S))]
+-  "ISA_HAS_LASX"
+-  "xvftintrm.w.s\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V8SF")])
+-
+-(define_insn "lasx_xvftintrm_l_d"
+-  [(set (match_operand:V4DI 0 "register_operand" "=f")
+-	(unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFTINTRM_L_D))]
+-  "ISA_HAS_LASX"
+-  "xvftintrm.l.d\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4DF")])
+-
+ (define_insn "lasx_xvftint_w_d"
+   [(set (match_operand:V8SI 0 "register_operand" "=f")
+ 	(unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f")
+@@ -3467,108 +3330,6 @@
+   [(set_attr "type" "simd_shift")
+    (set_attr "mode" "V8SF")])
+ 
+-(define_insn "lasx_xvfrintrne_s"
+-  [(set (match_operand:V8SF 0 "register_operand" "=f")
+-	(unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFRINTRNE_S))]
+-  "ISA_HAS_LASX"
+-  "xvfrintrne.s\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V8SF")])
+-
+-(define_insn "lasx_xvfrintrne_d"
+-  [(set (match_operand:V4DF 0 "register_operand" "=f")
+-	(unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFRINTRNE_D))]
+-  "ISA_HAS_LASX"
+-  "xvfrintrne.d\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4DF")])
+-
+-(define_insn "lasx_xvfrintrz_s"
+-  [(set (match_operand:V8SF 0 "register_operand" "=f")
+-	(unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFRINTRZ_S))]
+-  "ISA_HAS_LASX"
+-  "xvfrintrz.s\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V8SF")])
+-
+-(define_insn "lasx_xvfrintrz_d"
+-  [(set (match_operand:V4DF 0 "register_operand" "=f")
+-	(unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFRINTRZ_D))]
+-  "ISA_HAS_LASX"
+-  "xvfrintrz.d\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4DF")])
+-
+-(define_insn "lasx_xvfrintrp_s"
+-  [(set (match_operand:V8SF 0 "register_operand" "=f")
+-	(unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFRINTRP_S))]
+-  "ISA_HAS_LASX"
+-  "xvfrintrp.s\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V8SF")])
+-
+-(define_insn "lasx_xvfrintrp_d"
+-  [(set (match_operand:V4DF 0 "register_operand" "=f")
+-	(unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFRINTRP_D))]
+-  "ISA_HAS_LASX"
+-  "xvfrintrp.d\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4DF")])
+-
+-(define_insn "lasx_xvfrintrm_s"
+-  [(set (match_operand:V8SF 0 "register_operand" "=f")
+-	(unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFRINTRM_S))]
+-  "ISA_HAS_LASX"
+-  "xvfrintrm.s\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V8SF")])
+-
+-(define_insn "lasx_xvfrintrm_d"
+-  [(set (match_operand:V4DF 0 "register_operand" "=f")
+-	(unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
+-		     UNSPEC_LASX_XVFRINTRM_D))]
+-  "ISA_HAS_LASX"
+-  "xvfrintrm.d\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4DF")])
+-
+-;; Vector versions of the floating-point frint patterns.
+-;; Expands to btrunc, ceil, floor, rint.
+-(define_insn "v8sf2"
+- [(set (match_operand:V8SF 0 "register_operand" "=f")
+-	(unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
+-			 FRINT256_S))]
+-  "ISA_HAS_LASX"
+-  "xvfrint.s\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V8SF")])
+-
+-(define_insn "v4df2"
+- [(set (match_operand:V4DF 0 "register_operand" "=f")
+-	(unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
+-			 FRINT256_D))]
+-  "ISA_HAS_LASX"
+-  "xvfrint.d\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4DF")])
+-
+-;; Expands to round.
+-(define_insn "round2"
+- [(set (match_operand:FLASX 0 "register_operand" "=f")
+-	(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
+-			 UNSPEC_LASX_XVFRINT))]
+-  "ISA_HAS_LASX"
+-  "xvfrint.\t%u0,%u1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "")])
+-
+ ;; Offset load and broadcast
+ (define_expand "lasx_xvldrepl_"
+   [(match_operand:LASX 0 "register_operand")
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index 2d9743d86..fb458feac 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -419,8 +419,6 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
+ #define CODE_FOR_lsx_vabsd_hu CODE_FOR_lsx_vabsd_u_hu
+ #define CODE_FOR_lsx_vabsd_wu CODE_FOR_lsx_vabsd_u_wu
+ #define CODE_FOR_lsx_vabsd_du CODE_FOR_lsx_vabsd_u_du
+-#define CODE_FOR_lsx_vftint_w_s CODE_FOR_lsx_vftint_s_w_s
+-#define CODE_FOR_lsx_vftint_l_d CODE_FOR_lsx_vftint_s_l_d
+ #define CODE_FOR_lsx_vftint_wu_s CODE_FOR_lsx_vftint_u_wu_s
+ #define CODE_FOR_lsx_vftint_lu_d CODE_FOR_lsx_vftint_u_lu_d
+ #define CODE_FOR_lsx_vandn_v CODE_FOR_vandnv16qi3
+@@ -725,8 +723,6 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
+ #define CODE_FOR_lasx_xvssrlrn_bu_h CODE_FOR_lasx_xvssrlrn_u_bu_h
+ #define CODE_FOR_lasx_xvssrlrn_hu_w CODE_FOR_lasx_xvssrlrn_u_hu_w
+ #define CODE_FOR_lasx_xvssrlrn_wu_d CODE_FOR_lasx_xvssrlrn_u_wu_d
+-#define CODE_FOR_lasx_xvftint_w_s CODE_FOR_lasx_xvftint_s_w_s
+-#define CODE_FOR_lasx_xvftint_l_d CODE_FOR_lasx_xvftint_s_l_d
+ #define CODE_FOR_lasx_xvftint_wu_s CODE_FOR_lasx_xvftint_u_wu_s
+ #define CODE_FOR_lasx_xvftint_lu_d CODE_FOR_lasx_xvftint_u_lu_d
+ #define CODE_FOR_lasx_xvsllwil_h_b CODE_FOR_lasx_xvsllwil_s_h_b
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index c4e7af107..d1c766cbf 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -4026,11 +4026,8 @@
+ (include "generic.md")
+ (include "la464.md")
+ 
+-; The LoongArch SX Instructions.
+-(include "lsx.md")
+-
+-; The LoongArch ASX Instructions.
+-(include "lasx.md")
++; The LoongArch SIMD Instructions.
++(include "simd.md")
+ 
+ (define_c_enum "unspec" [
+   UNSPEC_ADDRESS_FIRST
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 5e8d8d74b..c1c3719e3 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -55,7 +55,6 @@
+   UNSPEC_LSX_VFCMP_SULT
+   UNSPEC_LSX_VFCMP_SUN
+   UNSPEC_LSX_VFCMP_SUNE
+-  UNSPEC_LSX_VFTINT_S
+   UNSPEC_LSX_VFTINT_U
+   UNSPEC_LSX_VSAT_S
+   UNSPEC_LSX_VSAT_U
+@@ -89,9 +88,6 @@
+   UNSPEC_LSX_VEXTRINS
+   UNSPEC_LSX_VMSKLTZ
+   UNSPEC_LSX_VSIGNCOV
+-  UNSPEC_LSX_VFTINTRNE
+-  UNSPEC_LSX_VFTINTRP
+-  UNSPEC_LSX_VFTINTRM
+   UNSPEC_LSX_VFTINT_W_D
+   UNSPEC_LSX_VFFINT_S_L
+   UNSPEC_LSX_VFTINTRZ_W_D
+@@ -110,14 +106,6 @@
+   UNSPEC_LSX_VFTINTRNEL_L_S
+   UNSPEC_LSX_VFTINTRNEH_L_S
+   UNSPEC_LSX_VFTINTH_L_H
+-  UNSPEC_LSX_VFRINTRNE_S
+-  UNSPEC_LSX_VFRINTRNE_D
+-  UNSPEC_LSX_VFRINTRZ_S
+-  UNSPEC_LSX_VFRINTRZ_D
+-  UNSPEC_LSX_VFRINTRP_S
+-  UNSPEC_LSX_VFRINTRP_D
+-  UNSPEC_LSX_VFRINTRM_S
+-  UNSPEC_LSX_VFRINTRM_D
+   UNSPEC_LSX_VSSRARN_S
+   UNSPEC_LSX_VSSRARN_U
+   UNSPEC_LSX_VSSRLN_U
+@@ -221,9 +209,6 @@
+ ;; Only used for copy_{u,s}.w and vilvh.
+ (define_mode_iterator LSX_W    [V4SI V4SF])
+ 
+-;; Only integer modes.
+-(define_mode_iterator ILSX     [V2DI V4SI V8HI V16QI])
+-
+ ;; As ILSX but excludes V16QI.
+ (define_mode_iterator ILSX_DWH [V2DI V4SI V8HI])
+ 
+@@ -242,21 +227,9 @@
+ ;;;; Only integer modes for fixed-point madd_q/maddr_q.
+ ;;(define_mode_iterator ILSX_WH  [V4SI V8HI])
+ 
+-;; Only floating-point modes.
+-(define_mode_iterator FLSX     [V2DF V4SF])
+-
+ ;; Only used for immediate set shuffle elements instruction.
+ (define_mode_iterator LSX_WHB_W [V4SI V8HI V16QI V4SF])
+ 
+-;; The attribute gives the integer vector mode with same size.
+-(define_mode_attr VIMODE
+-  [(V2DF "V2DI")
+-   (V4SF "V4SI")
+-   (V2DI "V2DI")
+-   (V4SI "V4SI")
+-   (V8HI "V8HI")
+-   (V16QI "V16QI")])
+-
+ ;; The attribute gives half modes for vector modes.
+ (define_mode_attr VHMODE
+   [(V8HI "V16QI")
+@@ -400,38 +373,6 @@
+    (V4SI  "uimm5")
+    (V2DI  "uimm6")])
+ 
+-
+-(define_int_iterator FRINT_S [UNSPEC_LSX_VFRINTRP_S
+-			    UNSPEC_LSX_VFRINTRZ_S
+-			    UNSPEC_LSX_VFRINT
+-			    UNSPEC_LSX_VFRINTRM_S])
+-
+-(define_int_iterator FRINT_D [UNSPEC_LSX_VFRINTRP_D
+-			    UNSPEC_LSX_VFRINTRZ_D
+-			    UNSPEC_LSX_VFRINT
+-			    UNSPEC_LSX_VFRINTRM_D])
+-
+-(define_int_attr frint_pattern_s
+-  [(UNSPEC_LSX_VFRINTRP_S  "ceil")
+-   (UNSPEC_LSX_VFRINTRZ_S  "btrunc")
+-   (UNSPEC_LSX_VFRINT	   "rint")
+-   (UNSPEC_LSX_VFRINTRM_S  "floor")])
+-
+-(define_int_attr frint_pattern_d
+-  [(UNSPEC_LSX_VFRINTRP_D  "ceil")
+-   (UNSPEC_LSX_VFRINTRZ_D  "btrunc")
+-   (UNSPEC_LSX_VFRINT	   "rint")
+-   (UNSPEC_LSX_VFRINTRM_D  "floor")])
+-
+-(define_int_attr frint_suffix
+-  [(UNSPEC_LSX_VFRINTRP_S  "rp")
+-   (UNSPEC_LSX_VFRINTRP_D  "rp")
+-   (UNSPEC_LSX_VFRINTRZ_S  "rz")
+-   (UNSPEC_LSX_VFRINTRZ_D  "rz")
+-   (UNSPEC_LSX_VFRINT	   "")
+-   (UNSPEC_LSX_VFRINTRM_S  "rm")
+-   (UNSPEC_LSX_VFRINTRM_D  "rm")])
+-
+ (define_expand "vec_init"
+   [(match_operand:LSX 0 "register_operand")
+    (match_operand:LSX 1 "")]
+@@ -1616,15 +1557,6 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lsx_vfrint_"
+-  [(set (match_operand:FLSX 0 "register_operand" "=f")
+-	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFRINT))]
+-  "ISA_HAS_LSX"
+-  "vfrint.\t%w0,%w1"
+-  [(set_attr "type" "simd_fcvt")
+-   (set_attr "mode" "")])
+-
+ (define_insn "lsx_vfrsqrt_"
+   [(set (match_operand:FLSX 0 "register_operand" "=f")
+ 	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
+@@ -1634,16 +1566,6 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lsx_vftint_s__"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(unspec: [(match_operand:FLSX 1 "register_operand" "f")]
+-			 UNSPEC_LSX_VFTINT_S))]
+-  "ISA_HAS_LSX"
+-  "vftint..\t%w0,%w1"
+-  [(set_attr "type" "simd_fcvt")
+-   (set_attr "cnv_mode" "")
+-   (set_attr "mode" "")])
+-
+ (define_insn "lsx_vftint_u__"
+   [(set (match_operand: 0 "register_operand" "=f")
+ 	(unspec: [(match_operand:FLSX 1 "register_operand" "f")]
+@@ -1654,15 +1576,6 @@
+    (set_attr "cnv_mode" "")
+    (set_attr "mode" "")])
+ 
+-(define_insn "fix_trunc2"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(fix: (match_operand:FLSX 1 "register_operand" "f")))]
+-  "ISA_HAS_LSX"
+-  "vftintrz..\t%w0,%w1"
+-  [(set_attr "type" "simd_fcvt")
+-   (set_attr "cnv_mode" "")
+-   (set_attr "mode" "")])
+-
+ (define_insn "fixuns_trunc2"
+   [(set (match_operand: 0 "register_operand" "=f")
+ 	(unsigned_fix: (match_operand:FLSX 1 "register_operand" "f")))]
+@@ -2965,60 +2878,6 @@
+   [(set_attr "type" "simd_fmadd")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lsx_vftintrne_w_s"
+-  [(set (match_operand:V4SI 0 "register_operand" "=f")
+-	(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFTINTRNE))]
+-  "ISA_HAS_LSX"
+-  "vftintrne.w.s\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4SF")])
+-
+-(define_insn "lsx_vftintrne_l_d"
+-  [(set (match_operand:V2DI 0 "register_operand" "=f")
+-	(unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFTINTRNE))]
+-  "ISA_HAS_LSX"
+-  "vftintrne.l.d\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V2DF")])
+-
+-(define_insn "lsx_vftintrp_w_s"
+-  [(set (match_operand:V4SI 0 "register_operand" "=f")
+-	(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFTINTRP))]
+-  "ISA_HAS_LSX"
+-  "vftintrp.w.s\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4SF")])
+-
+-(define_insn "lsx_vftintrp_l_d"
+-  [(set (match_operand:V2DI 0 "register_operand" "=f")
+-	(unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFTINTRP))]
+-  "ISA_HAS_LSX"
+-  "vftintrp.l.d\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V2DF")])
+-
+-(define_insn "lsx_vftintrm_w_s"
+-  [(set (match_operand:V4SI 0 "register_operand" "=f")
+-	(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFTINTRM))]
+-  "ISA_HAS_LSX"
+-  "vftintrm.w.s\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4SF")])
+-
+-(define_insn "lsx_vftintrm_l_d"
+-  [(set (match_operand:V2DI 0 "register_operand" "=f")
+-	(unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFTINTRM))]
+-  "ISA_HAS_LSX"
+-  "vftintrm.l.d\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V2DF")])
+-
+ (define_insn "lsx_vftint_w_d"
+   [(set (match_operand:V4SI 0 "register_operand" "=f")
+ 	(unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f")
+@@ -3187,108 +3046,6 @@
+   [(set_attr "type" "simd_shift")
+    (set_attr "mode" "V4SF")])
+ 
+-(define_insn "lsx_vfrintrne_s"
+-  [(set (match_operand:V4SF 0 "register_operand" "=f")
+-	(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFRINTRNE_S))]
+-  "ISA_HAS_LSX"
+-  "vfrintrne.s\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4SF")])
+-
+-(define_insn "lsx_vfrintrne_d"
+-  [(set (match_operand:V2DF 0 "register_operand" "=f")
+-	(unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFRINTRNE_D))]
+-  "ISA_HAS_LSX"
+-  "vfrintrne.d\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V2DF")])
+-
+-(define_insn "lsx_vfrintrz_s"
+-  [(set (match_operand:V4SF 0 "register_operand" "=f")
+-	(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFRINTRZ_S))]
+-  "ISA_HAS_LSX"
+-  "vfrintrz.s\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4SF")])
+-
+-(define_insn "lsx_vfrintrz_d"
+-  [(set (match_operand:V2DF 0 "register_operand" "=f")
+-	(unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFRINTRZ_D))]
+-  "ISA_HAS_LSX"
+-  "vfrintrz.d\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V2DF")])
+-
+-(define_insn "lsx_vfrintrp_s"
+-  [(set (match_operand:V4SF 0 "register_operand" "=f")
+-	(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFRINTRP_S))]
+-  "ISA_HAS_LSX"
+-  "vfrintrp.s\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4SF")])
+-
+-(define_insn "lsx_vfrintrp_d"
+-  [(set (match_operand:V2DF 0 "register_operand" "=f")
+-	(unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFRINTRP_D))]
+-  "ISA_HAS_LSX"
+-  "vfrintrp.d\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V2DF")])
+-
+-(define_insn "lsx_vfrintrm_s"
+-  [(set (match_operand:V4SF 0 "register_operand" "=f")
+-	(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFRINTRM_S))]
+-  "ISA_HAS_LSX"
+-  "vfrintrm.s\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4SF")])
+-
+-(define_insn "lsx_vfrintrm_d"
+-  [(set (match_operand:V2DF 0 "register_operand" "=f")
+-	(unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFRINTRM_D))]
+-  "ISA_HAS_LSX"
+-  "vfrintrm.d\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V2DF")])
+-
+-;; Vector versions of the floating-point frint patterns.
+-;; Expands to btrunc, ceil, floor, rint.
+-(define_insn "v4sf2"
+- [(set (match_operand:V4SF 0 "register_operand" "=f")
+-	(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
+-			 FRINT_S))]
+-  "ISA_HAS_LSX"
+-  "vfrint.s\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V4SF")])
+-
+-(define_insn "v2df2"
+- [(set (match_operand:V2DF 0 "register_operand" "=f")
+-	(unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
+-			 FRINT_D))]
+-  "ISA_HAS_LSX"
+-  "vfrint.d\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "V2DF")])
+-
+-;; Expands to round.
+-(define_insn "round2"
+- [(set (match_operand:FLSX 0 "register_operand" "=f")
+-	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
+-			 UNSPEC_LSX_VFRINT))]
+-  "ISA_HAS_LSX"
+-  "vfrint.\t%w0,%w1"
+-  [(set_attr "type" "simd_shift")
+-   (set_attr "mode" "")])
+-
+ ;; Offset load and broadcast
+ (define_expand "lsx_vldrepl_"
+   [(match_operand:LSX 0 "register_operand")
+diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
+new file mode 100644
+index 000000000..27d1ffecd
+--- /dev/null
++++ b/gcc/config/loongarch/simd.md
+@@ -0,0 +1,213 @@
++;; Machine Description for LoongArch SIMD instructions for GNU compiler.
++;; Copyright (C) 2023 Free Software Foundation, Inc.
++
++;; This file is part of GCC.
++
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++;; GNU General Public License for more details.
++
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; .
++
++;; Integer modes supported by LSX.
++(define_mode_iterator ILSX    [V2DI V4SI V8HI V16QI])
++
++;; Integer modes supported by LASX.
++(define_mode_iterator ILASX   [V4DI V8SI V16HI V32QI])
++
++;; FP modes supported by LSX
++(define_mode_iterator FLSX    [V2DF V4SF])
++
++;; FP modes supported by LASX
++(define_mode_iterator FLASX   [V4DF V8SF])
++
++;; All integer modes available
++(define_mode_iterator IVEC    [(ILSX "ISA_HAS_LSX") (ILASX "ISA_HAS_LASX")])
++
++;; All FP modes available
++(define_mode_iterator FVEC    [(FLSX "ISA_HAS_LSX") (FLASX "ISA_HAS_LASX")])
++
++;; Mnemonic prefix, "x" for LASX modes.
++(define_mode_attr x [(V2DI "") (V4SI "") (V8HI "") (V16QI "")
++		     (V2DF "") (V4SF "")
++		     (V4DI "x") (V8SI "x") (V16HI "x") (V32QI "x")
++		     (V4DF "x") (V8SF "x")])
++
++;; Modifier for vector register, "w" for LSX modes, "u" for LASX modes.
++(define_mode_attr wu [(V2DI "w") (V4SI "w") (V8HI "w") (V16QI "w")
++		      (V2DF "w") (V4SF "w")
++		      (V4DI "u") (V8SI "u") (V16HI "u") (V32QI "u")
++		      (V4DF "u") (V8SF "u")])
++
++;; define_insn name prefix, "lsx" or "lasx"
++(define_mode_attr simd_isa
++  [(V2DI "lsx") (V4SI "lsx") (V8HI "lsx") (V16QI "lsx")
++   (V2DF "lsx") (V4SF "lsx")
++   (V4DI "lasx") (V8SI "lasx") (V16HI "lasx") (V32QI "lasx")
++   (V4DF "lasx") (V8SF "lasx")])
++
++;; Widen integer modes for intermediate values in RTX pattern.
++(define_mode_attr WVEC [(V2DI "V2TI") (V4DI "V4TI")
++			(V4SI "V4DI") (V8SI "V8DI")
++			(V8HI "V8SI") (V16HI "V16SI")
++			(V16QI "V16HI") (V32QI "V32HI")])
++
++;; Integer vector modes with the same length and unit size as a mode.
++(define_mode_attr VIMODE [(V2DI "V2DI") (V4SI "V4SI")
++			  (V8HI "V8HI") (V16QI "V16QI")
++			  (V2DF "V2DI") (V4SF "V4SI")
++			  (V4DI "V4DI") (V8SI "V8SI")
++			  (V16HI "V16HI") (V32QI "V32QI")
++			  (V4DF "V4DI") (V8SF "V8SI")])
++
++;; Lower-case version.
++(define_mode_attr vimode [(V2DF "v2di") (V4SF "v4si")
++			  (V4DF "v4di") (V8SF "v8si")])
++
++;; Suffix for LSX or LASX instructions.
++(define_mode_attr simdfmt [(V2DF "d") (V4DF "d")
++			   (V4SF "s") (V8SF "s")
++			   (V2DI "d") (V4DI "d")
++			   (V4SI "w") (V8SI "w")
++			   (V8HI "h") (V16HI "h")
++			   (V16QI "b") (V32QI "b")])
++
++;; Suffix for integer mode in LSX or LASX instructions with FP input but
++;; integer output.
++(define_mode_attr simdifmt_for_f [(V2DF "l") (V4DF "l")
++				  (V4SF "w") (V8SF "w")])
++
++;; Size of vector elements in bits.
++(define_mode_attr elmbits [(V2DI "64") (V4DI "64")
++			   (V4SI "32") (V8SI "32")
++			   (V8HI "16") (V16HI "16")
++			   (V16QI "8") (V32QI "8")])
++
++;; =======================================================================
++;; For many LASX instructions, the only difference of it from the LSX
++;; counterpart is the length of vector operands.  Describe these LSX/LASX
++;; instruction here so we can avoid duplicating logics.
++;; =======================================================================
++
++;;
++;; FP vector rounding instructions
++;;
++
++(define_c_enum "unspec"
++  [UNSPEC_SIMD_FRINTRP
++   UNSPEC_SIMD_FRINTRZ
++   UNSPEC_SIMD_FRINT
++   UNSPEC_SIMD_FRINTRM
++   UNSPEC_SIMD_FRINTRNE])
++
++(define_int_iterator SIMD_FRINT
++  [UNSPEC_SIMD_FRINTRP
++   UNSPEC_SIMD_FRINTRZ
++   UNSPEC_SIMD_FRINT
++   UNSPEC_SIMD_FRINTRM
++   UNSPEC_SIMD_FRINTRNE])
++
++(define_int_attr simd_frint_rounding
++  [(UNSPEC_SIMD_FRINTRP		"rp")
++   (UNSPEC_SIMD_FRINTRZ		"rz")
++   (UNSPEC_SIMD_FRINT		"")
++   (UNSPEC_SIMD_FRINTRM		"rm")
++   (UNSPEC_SIMD_FRINTRNE	"rne")])
++
++;; All these, but rint, are controlled by -ffp-int-builtin-inexact.
++;; Note: nearbyint is NOT allowed to raise FE_INEXACT even if
++;; -ffp-int-builtin-inexact, but rint is ALLOWED to raise it even if
++;; -fno-fp-int-builtin-inexact.
++(define_int_attr simd_frint_pattern
++  [(UNSPEC_SIMD_FRINTRP		"ceil")
++   (UNSPEC_SIMD_FRINTRZ		"btrunc")
++   (UNSPEC_SIMD_FRINT		"rint")
++   (UNSPEC_SIMD_FRINTRNE	"roundeven")
++   (UNSPEC_SIMD_FRINTRM		"floor")])
++
++;; vfrint.{/rp/rz/rm}
++(define_insn "_vfrint_"
++  [(set (match_operand:FVEC 0 "register_operand" "=f")
++	(unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
++		     SIMD_FRINT))]
++  ""
++  "vfrint.\t%0,%1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "")])
++
++;; Expand the standard-named patterns to vfrint instructions if
++;; raising inexact exception is allowed.
++
++(define_expand "2"
++  [(set (match_operand:FVEC 0 "register_operand" "=f")
++	(unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
++		     SIMD_FRINT))]
++   " == UNSPEC_SIMD_FRINT ||
++    flag_fp_int_builtin_inexact ||
++    !flag_trapping_math")
++
++;; ftrunc is like btrunc, but it's allowed to raise inexact exception
++;; even if -fno-fp-int-builtin-inexact.
++(define_expand "ftrunc2"
++  [(set (match_operand:FVEC 0 "register_operand" "=f")
++	(unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
++		     UNSPEC_SIMD_FRINTRZ))]
++  "")
++
++;; vftint.{/rp/rz/rm}
++(define_insn
++  "_vftint__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(fix:
++	  (unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
++		       SIMD_FRINT)))]
++  ""
++  "vftint..\t%0,%1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "")])
++
++;; Expand the standard-named patterns to vftint instructions if
++;; raising inexact exception.
++
++(define_expand "l2"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(fix:
++	  (unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
++		       SIMD_FRINT)))]
++   " == UNSPEC_SIMD_FRINT ||
++    flag_fp_int_builtin_inexact ||
++    !flag_trapping_math")
++
++;; fix_trunc is allowed to raise inexact exception even if
++;; -fno-fp-int-builtin-inexact.  Because the middle end trys to match
++;; (FIX x) and it does not know (FIX (UNSPEC_SIMD_FRINTRZ x)), we need
++;; to use define_insn_and_split instead of define_expand (expanders are
++;; not considered during matching).
++(define_insn_and_split "fix_trunc2"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(fix: (match_operand:FVEC 1 "register_operand" "f")))]
++  ""
++  "#"
++  ""
++  [(const_int 0)]
++  {
++    emit_insn (gen__vftintrz__ (
++      operands[0], operands[1]));
++    DONE;
++  }
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "")])
++
++; The LoongArch SX Instructions.
++(include "lsx.md")
++
++; The LoongArch ASX Instructions.
++(include "lasx.md")
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-frint-no-inexact.c b/gcc/testsuite/gcc.target/loongarch/vect-frint-no-inexact.c
+new file mode 100644
+index 000000000..7bbaf1fba
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vect-frint-no-inexact.c
+@@ -0,0 +1,48 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d -mdouble-float -fno-math-errno -fno-fp-int-builtin-inexact -mlasx" } */
++
++#include "vect-frint.c"
++
++/* ceil */
++/* { dg-final { scan-assembler "bl\t%plt\\(ceil\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(ceilf\\)" } } */
++/* { dg-final { scan-assembler-not "\tvfrintrp\.s" } } */
++/* { dg-final { scan-assembler-not "\tvfrintrp\.d" } } */
++/* { dg-final { scan-assembler-not "\txvfrintrp\.s" } } */
++/* { dg-final { scan-assembler-not "\txvfrintrp\.d" } } */
++
++/* floor */
++/* { dg-final { scan-assembler "bl\t%plt\\(floor\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(floorf\\)" } } */
++/* { dg-final { scan-assembler-not "\tvfrintrm\.s" } } */
++/* { dg-final { scan-assembler-not "\tvfrintrm\.d" } } */
++/* { dg-final { scan-assembler-not "\txvfrintrm\.s" } } */
++/* { dg-final { scan-assembler-not "\txvfrintrm\.d" } } */
++
++/* nearbyint + rint: Only rint is allowed */
++/* { dg-final { scan-assembler "bl\t%plt\\(nearbyint\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(nearbyintf\\)" } } */
++/* { dg-final { scan-assembler-times "\tvfrint\.s" 1 } } */
++/* { dg-final { scan-assembler-times "\tvfrint\.d" 1 } } */
++/* { dg-final { scan-assembler-times "\txvfrint\.s" 1 } } */
++/* { dg-final { scan-assembler-times "\txvfrint\.d" 1 } } */
++
++/* round: we don't have a corresponding instruction */
++/* { dg-final { scan-assembler "bl\t%plt\\(round\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(roundf\\)" } } */
++
++/* roundeven */
++/* { dg-final { scan-assembler "bl\t%plt\\(roundeven\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(roundevenf\\)" } } */
++/* { dg-final { scan-assembler-not "\tvfrintrne\.s" } } */
++/* { dg-final { scan-assembler-not "\tvfrintrne\.d" } } */
++/* { dg-final { scan-assembler-not "\txvfrintrne\.s" } } */
++/* { dg-final { scan-assembler-not "\txvfrintrne\.d" } } */
++
++/* trunc */
++/* { dg-final { scan-assembler "bl\t%plt\\(trunc\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(truncf\\)" } } */
++/* { dg-final { scan-assembler-not "\tvfrintrz\.s" } } */
++/* { dg-final { scan-assembler-not "\tvfrintrz\.d" } } */
++/* { dg-final { scan-assembler-not "\txvfrintrz\.s" } } */
++/* { dg-final { scan-assembler-not "\txvfrintrz\.d" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-frint.c b/gcc/testsuite/gcc.target/loongarch/vect-frint.c
+new file mode 100644
+index 000000000..6bf211e7e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vect-frint.c
+@@ -0,0 +1,85 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d -mdouble-float -fno-math-errno -ffp-int-builtin-inexact -mlasx" } */
++
++float out_x[8];
++double out_y[4];
++
++float x[8];
++double y[4];
++
++#define TEST(op, N, func) \
++void \
++test_##op##_##N##_##func () \
++{ \
++  for (int i = 0; i < N; i++) \
++    out_##op[i] = __builtin_##func (op[i]); \
++}
++
++TEST(x, 4, ceilf);
++TEST(x, 4, floorf);
++TEST(x, 4, nearbyintf);
++TEST(x, 4, rintf);
++TEST(x, 4, roundf);
++TEST(x, 4, roundevenf);
++TEST(x, 4, truncf);
++
++TEST(x, 8, ceilf);
++TEST(x, 8, floorf);
++TEST(x, 8, nearbyintf);
++TEST(x, 8, rintf);
++TEST(x, 8, roundf);
++TEST(x, 8, roundevenf);
++TEST(x, 8, truncf);
++
++TEST(y, 2, ceil);
++TEST(y, 2, floor);
++TEST(y, 2, nearbyint);
++TEST(y, 2, rint);
++TEST(y, 2, round);
++TEST(y, 2, roundeven);
++TEST(y, 2, trunc);
++
++TEST(y, 4, ceil);
++TEST(y, 4, floor);
++TEST(y, 4, nearbyint);
++TEST(y, 4, rint);
++TEST(y, 4, round);
++TEST(y, 4, roundeven);
++TEST(y, 4, trunc);
++
++/* ceil */
++/* { dg-final { scan-assembler "\tvfrintrp\.s" } } */
++/* { dg-final { scan-assembler "\tvfrintrp\.d" } } */
++/* { dg-final { scan-assembler "\txvfrintrp\.s" } } */
++/* { dg-final { scan-assembler "\txvfrintrp\.d" } } */
++
++/* floor */
++/* { dg-final { scan-assembler "\tvfrintrm\.s" } } */
++/* { dg-final { scan-assembler "\tvfrintrm\.d" } } */
++/* { dg-final { scan-assembler "\txvfrintrm\.s" } } */
++/* { dg-final { scan-assembler "\txvfrintrm\.d" } } */
++
++/* rint and nearbyint
++   nearbyint has been disallowed to raise FE_INEXACT for decades.  */
++/* { dg-final { scan-assembler-times "\tvfrint\.s" 1 } } */
++/* { dg-final { scan-assembler-times "\tvfrint\.d" 1 } } */
++/* { dg-final { scan-assembler-times "\txvfrint\.s" 1 } } */
++/* { dg-final { scan-assembler-times "\txvfrint\.d" 1 } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(nearbyint\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(nearbyintf\\)" } } */
++
++/* round: we don't have a corresponding instruction */
++/* { dg-final { scan-assembler "bl\t%plt\\(round\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(roundf\\)" } } */
++
++/* roundeven */
++/* { dg-final { scan-assembler "\tvfrintrne\.s" } } */
++/* { dg-final { scan-assembler "\tvfrintrne\.d" } } */
++/* { dg-final { scan-assembler "\txvfrintrne\.s" } } */
++/* { dg-final { scan-assembler "\txvfrintrne\.d" } } */
++
++/* trunc */
++/* { dg-final { scan-assembler "\tvfrintrz\.s" } } */
++/* { dg-final { scan-assembler "\tvfrintrz\.d" } } */
++/* { dg-final { scan-assembler "\txvfrintrz\.s" } } */
++/* { dg-final { scan-assembler "\txvfrintrz\.d" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c b/gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c
+new file mode 100644
+index 000000000..83d268099
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c
+@@ -0,0 +1,44 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d -mdouble-float -fno-math-errno -fno-fp-int-builtin-inexact -mlasx" } */
++
++#include "vect-ftint.c"
++
++/* ceil */
++/* { dg-final { scan-assembler "bl\t%plt\\(ceil\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(ceilf\\)" } } */
++/* { dg-final { scan-assembler-not "\tvftintrp\.w\.s" } } */
++/* { dg-final { scan-assembler-not "\tvftintrp\.l\.d" } } */
++/* { dg-final { scan-assembler-not "\txvftintrp\.w\.s" } } */
++/* { dg-final { scan-assembler-not "\txvftintrp\.l\.d" } } */
++
++/* floor */
++/* { dg-final { scan-assembler "bl\t%plt\\(floor\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(floorf\\)" } } */
++/* { dg-final { scan-assembler-not "\tvftintrm\.w\.s" } } */
++/* { dg-final { scan-assembler-not "\tvftintrm\.l\.d" } } */
++/* { dg-final { scan-assembler-not "\txvftintrm\.w\.s" } } */
++/* { dg-final { scan-assembler-not "\txvftintrm\.l\.d" } } */
++
++/* nearbyint + rint */
++/* { dg-final { scan-assembler "bl\t%plt\\(floor\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(floorf\\)" } } */
++/* { dg-final { scan-assembler-times "\tvftint\.w\.s" 1 } } */
++/* { dg-final { scan-assembler-times "\tvftint\.l\.d" 1 } } */
++/* { dg-final { scan-assembler-times "\txvftint\.w\.s" 1 } } */
++/* { dg-final { scan-assembler-times "\txvftint\.l\.d" 1 } } */
++
++/* round: we don't have a corresponding instruction */
++/* { dg-final { scan-assembler "bl\t%plt\\(lround\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(roundf\\)" } } */
++
++/* roundeven */
++/* { dg-final { scan-assembler "bl\t%plt\\(roundeven\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(roundevenf\\)" } } */
++/* { dg-final { scan-assembler-not "\tvftintrne\.w\.s" } } */
++/* { dg-final { scan-assembler-not "\tvftintrne\.l\.d" } } */
++/* { dg-final { scan-assembler-not "\txvftintrne\.w\.s" } } */
++/* { dg-final { scan-assembler-not "\txvftintrne\.l\.d" } } */
++
++/* trunc: XFAIL due to PR 107723 */
++/* { dg-final { scan-assembler "bl\t%plt\\(trunc\\)" { xfail *-*-* } } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(truncf\\)" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-ftint.c b/gcc/testsuite/gcc.target/loongarch/vect-ftint.c
+new file mode 100644
+index 000000000..c4962ed17
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vect-ftint.c
+@@ -0,0 +1,83 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d -mdouble-float -fno-math-errno -ffp-int-builtin-inexact -mlasx" } */
++
++int out_x[8];
++long out_y[4];
++
++float x[8];
++double y[4];
++
++#define TEST(op, N, func) \
++void \
++test_##op##_##N##_##func () \
++{ \
++  for (int i = 0; i < N; i++) \
++    out_##op[i] = __builtin_##func (op[i]); \
++}
++
++TEST(x, 4, ceilf);
++TEST(x, 4, floorf);
++TEST(x, 4, nearbyintf);
++TEST(x, 4, rintf);
++TEST(x, 4, roundf);
++TEST(x, 4, roundevenf);
++TEST(x, 4, truncf);
++
++TEST(x, 8, ceilf);
++TEST(x, 8, floorf);
++TEST(x, 8, nearbyintf);
++TEST(x, 8, rintf);
++TEST(x, 8, roundf);
++TEST(x, 8, roundevenf);
++TEST(x, 8, truncf);
++
++TEST(y, 2, ceil);
++TEST(y, 2, floor);
++TEST(y, 2, nearbyint);
++TEST(y, 2, rint);
++TEST(y, 2, round);
++TEST(y, 2, roundeven);
++TEST(y, 2, trunc);
++
++TEST(y, 4, ceil);
++TEST(y, 4, floor);
++TEST(y, 4, nearbyint);
++TEST(y, 4, rint);
++TEST(y, 4, round);
++TEST(y, 4, roundeven);
++TEST(y, 4, trunc);
++
++/* ceil */
++/* { dg-final { scan-assembler "\tvftintrp\.w\.s" } } */
++/* { dg-final { scan-assembler "\tvftintrp\.l\.d" } } */
++/* { dg-final { scan-assembler "\txvftintrp\.w\.s" } } */
++/* { dg-final { scan-assembler "\txvftintrp\.l\.d" } } */
++
++/* floor */
++/* { dg-final { scan-assembler "\tvftintrm\.w\.s" } } */
++/* { dg-final { scan-assembler "\tvftintrm\.l\.d" } } */
++/* { dg-final { scan-assembler "\txvftintrm\.w\.s" } } */
++/* { dg-final { scan-assembler "\txvftintrm\.l\.d" } } */
++
++/* rint and nearbyint
++   nearbyint has been disallowed to raise FE_INEXACT for decades.  */
++/* { dg-final { scan-assembler-times "\tvftint\.w\.s" 1 } } */
++/* { dg-final { scan-assembler-times "\tvftint\.l\.d" 1 } } */
++/* { dg-final { scan-assembler-times "\txvftint\.w\.s" 1 } } */
++/* { dg-final { scan-assembler-times "\txvftint\.l\.d" 1 } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(nearbyint\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(nearbyintf\\)" } } */
++
++/* round: we don't have a corresponding instruction */
++/* { dg-final { scan-assembler "bl\t%plt\\(lround\\)" } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(roundf\\)" } } */
++
++/* roundeven */
++/* { dg-final { scan-assembler "\tvftintrne\.w\.s" } } */
++/* { dg-final { scan-assembler "\tvftintrne\.l\.d" } } */
++/* { dg-final { scan-assembler "\txvftintrne\.w\.s" } } */
++/* { dg-final { scan-assembler "\txvftintrne\.l\.d" } } */
++
++/* trunc */
++/* { dg-final { scan-assembler-not "bl\t%plt\\(trunc\\)" } } */
++/* { dg-final { scan-assembler-not "bl\t%plt\\(truncf\\)" } } */
+-- 
+2.43.0
+
diff --git a/0047-AES-Implement-AES-pattern-matching.patch b/0047-AES-Implement-AES-pattern-matching.patch
new file mode 100644
index 0000000000000000000000000000000000000000..cd983bf2c5ff86e3037b9f0963e6bdc3c9b77fc0
--- /dev/null
+++ b/0047-AES-Implement-AES-pattern-matching.patch
@@ -0,0 +1,3968 @@
+From 53d321d2fe08f69a29527be157d4bcaaefea04ab Mon Sep 17 00:00:00 2001
+From: Pronin Alexander 00812787 
+Date: Wed, 6 Dec 2023 10:46:28 +0300
+Subject: [PATCH 15/18] [AES] Implement AES pattern matching
+
+---
+ gcc/Makefile.in                               |    1 +
+ gcc/common.opt                                |    4 +
+ gcc/config/aarch64/aarch64.cc                 |   24 +
+ gcc/crypto-accel.cc                           | 2415 +++++++++++++++++
+ gcc/doc/tm.texi                               |   29 +
+ gcc/doc/tm.texi.in                            |   12 +
+ gcc/passes.def                                |    1 +
+ gcc/rtl-matcher.h                             |  367 +++
+ gcc/target.def                                |   41 +
+ .../gcc.target/aarch64/aes-decrypt.c          |  478 ++++
+ .../gcc.target/aarch64/aes-encrypt.c          |  443 +++
+ gcc/timevar.def                               |    1 +
+ gcc/tree-pass.h                               |    1 +
+ 13 files changed, 3817 insertions(+)
+ create mode 100644 gcc/crypto-accel.cc
+ create mode 100644 gcc/rtl-matcher.h
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/aes-decrypt.c
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/aes-encrypt.c
+
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index 45705c1f3..876000bda 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -1332,6 +1332,7 @@ OBJS = \
+ 	cgraphunit.o \
+ 	cgraphclones.o \
+ 	combine.o \
++	crypto-accel.o \
+ 	combine-stack-adj.o \
+ 	compare-elim.o \
+ 	context.o \
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 3a5004271..1eb62ada5 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1129,6 +1129,10 @@ Common Var(flag_array_widen_compare) Optimization
+ Extends types for pointers to arrays to improve array comparsion performance.
+ In some extreme situations this may result in unsafe behavior.
+ 
++fcrypto-accel-aes
++Common Var(flag_crypto_accel_aes) Init(0) Optimization
++Perform crypto acceleration AES pattern matching.
++
+ fauto-inc-dec
+ Common Var(flag_auto_inc_dec) Init(1) Optimization
+ Generate auto-inc/dec instructions.
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index fa566dd80..9171d9d56 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -27569,6 +27569,30 @@ is_aarch64_stp_insn (int icode, bool *has_wb)
+ #undef TARGET_IS_STP_INSN
+ #define TARGET_IS_STP_INSN is_aarch64_stp_insn
+ 
++machine_mode
++aarch64_get_v16qi_mode ()
++{
++  return V16QImode;
++}
++
++#undef TARGET_GET_V16QI_MODE
++#define TARGET_GET_V16QI_MODE aarch64_get_v16qi_mode
++
++#undef TARGET_GEN_REV32V16QI
++#define TARGET_GEN_REV32V16QI gen_aarch64_rev32v16qi
++
++#undef TARGET_GEN_AESEV16QI
++#define TARGET_GEN_AESEV16QI gen_aarch64_crypto_aesev16qi
++
++#undef TARGET_GEN_AESDV16QI
++#define TARGET_GEN_AESDV16QI gen_aarch64_crypto_aesdv16qi
++
++#undef TARGET_GEN_AESMCV16QI
++#define TARGET_GEN_AESMCV16QI gen_aarch64_crypto_aesmcv16qi
++
++#undef TARGET_GEN_AESIMCV16QI
++#define TARGET_GEN_AESIMCV16QI gen_aarch64_crypto_aesimcv16qi
++
+ #undef TARGET_STACK_PROTECT_GUARD
+ #define TARGET_STACK_PROTECT_GUARD aarch64_stack_protect_guard
+ 
+diff --git a/gcc/crypto-accel.cc b/gcc/crypto-accel.cc
+new file mode 100644
+index 000000000..f4e810a6b
+--- /dev/null
++++ b/gcc/crypto-accel.cc
+@@ -0,0 +1,2415 @@
++/* Crypto-pattern optimizer.
++   Copyright (C) 2003-2023 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include "config.h"
++#define INCLUDE_VECTOR
++#define INCLUDE_MAP
++#define INCLUDE_SET
++#define INCLUDE_ALGORITHM
++#include "system.h"
++#include "coretypes.h"
++#include "backend.h"
++#include "target.h"
++#include "rtl.h"
++#include "tree.h"
++#include "df.h"
++#include "memmodel.h"
++#include "optabs.h"
++#include "regs.h"
++#include "emit-rtl.h"
++#include "recog.h"
++#include "cfgrtl.h"
++#include "cfgcleanup.h"
++#include "expr.h"
++#include "tree-pass.h"
++#include "rtl-matcher.h"
++
++/* Basic AES table descryption.  */
++struct aes_table
++{
++  /* Number of elements per table.  */
++  static const unsigned int table_nelts = 256;
++  /* Number of tables.  */
++  static const unsigned int basic_tables_num = 4;
++  /* Number of rounds.  */
++  static const unsigned int rounds_num = 4;
++  /* Common ID for wrong table.  */
++  static const unsigned int BAD_TABLE = -1;
++
++  typedef const unsigned int table_type[table_nelts];
++  typedef table_type *table_map[basic_tables_num];
++
++  template
++  static bool is_basic_table (tree ctor, const T ethalon[table_nelts])
++    {
++      if (TREE_CODE (ctor) != CONSTRUCTOR
++	  ||CONSTRUCTOR_NELTS (ctor) != table_nelts)
++	return false;
++
++      unsigned ix;
++      tree val;
++      FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (ctor), ix, val)
++	if (TREE_INT_CST_LOW (val) != ethalon[ix])
++	  return false;
++      return true;
++    }
++
++  static unsigned check_table (tree ctor,
++			       table_map tables)
++    {
++      for (unsigned i = 0; i < 4; ++i)
++	if (is_basic_table (ctor, *tables[i]))
++	  return i;
++      return BAD_TABLE;
++    }
++};
++
++/* AES encryption info.  */
++struct aes_encrypt_table : aes_table
++{
++  typedef enum
++  {
++    TE0,
++    TE1,
++    TE2,
++    TE3,
++    BAD_TABLE = aes_table::BAD_TABLE
++  } table_entry;
++
++  static table_type Te0;
++  static table_type Te1;
++  static table_type Te2;
++  static table_type Te3;
++
++  static table_map tables;
++  static table_entry rounds[rounds_num];
++  static table_entry final_rounds[rounds_num];
++
++  static table_entry get_table_id (tree ctor)
++    {
++      return static_cast (check_table (ctor, tables));
++    }
++};
++
++/* AES decryption info.  */
++struct aes_decrypt_table : aes_table
++{
++  typedef enum
++  {
++    TD0,
++    TD1,
++    TD2,
++    TD3,
++    TD4,
++    BAD_TABLE = aes_table::BAD_TABLE
++  } table_entry;
++
++  static table_type Td0;
++  static table_type Td1;
++  static table_type Td2;
++  static table_type Td3;
++
++  static table_map tables;
++  static table_entry rounds[rounds_num];
++  static table_entry final_rounds[rounds_num];
++
++  static const unsigned char Td4[table_nelts];
++
++  /* TD4 requires special handler due to type shrinking optimizations.  */
++  static bool is_td4 (tree ctor)
++    {
++      if (is_basic_table (ctor, Td4))
++	return true;
++
++      if (TREE_CODE (ctor) != STRING_CST
++	  || TREE_STRING_LENGTH (ctor) != table_nelts)
++	return false;
++
++      const unsigned char *p
++	= (const unsigned char *) TREE_STRING_POINTER (ctor);
++      for (int i = 0; i < TREE_STRING_LENGTH (ctor); ++i)
++	if (p[i] != Td4[i])
++	  return false;
++
++      return true;
++    }
++
++  static table_entry get_table_id (tree ctor)
++    {
++      unsigned int res = check_table (ctor, tables);
++      if (res == aes_table::BAD_TABLE
++	  && is_td4 (ctor))
++	return TD4;
++      return static_cast (res);
++    }
++};
++
++/* Basic tables info.  */
++aes_encrypt_table::table_map aes_encrypt_table::tables
++  = { &Te0, &Te1, &Te2, &Te3 };
++aes_decrypt_table::table_map aes_decrypt_table::tables
++  = { &Td0, &Td1, &Td2, &Td3 };
++
++/* Round tables permutations info.  */
++aes_encrypt_table::table_entry aes_encrypt_table::rounds[]
++  = {TE0, TE1, TE2, TE3};
++aes_decrypt_table::table_entry aes_decrypt_table::rounds[]
++  = {TD0, TD1, TD2, TD3};
++aes_encrypt_table::table_entry aes_encrypt_table::final_rounds[]
++  = {TE2, TE3, TE0, TE1};
++aes_decrypt_table::table_entry aes_decrypt_table::final_rounds[]
++  = {TD4, TD4, TD4, TD4};
++
++aes_encrypt_table::table_type aes_encrypt_table::Te0 = {
++    0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
++    0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
++    0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
++    0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
++    0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
++    0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
++    0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
++    0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
++    0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
++    0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
++    0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
++    0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
++    0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
++    0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
++    0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
++    0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
++    0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
++    0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
++    0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
++    0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
++    0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
++    0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
++    0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
++    0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
++    0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
++    0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
++    0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
++    0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
++    0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
++    0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
++    0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
++    0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
++    0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
++    0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
++    0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
++    0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
++    0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
++    0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
++    0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
++    0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
++    0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
++    0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
++    0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
++    0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
++    0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
++    0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
++    0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
++    0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
++    0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
++    0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
++    0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
++    0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
++    0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
++    0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
++    0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
++    0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
++    0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
++    0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
++    0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
++    0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
++    0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
++    0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
++    0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
++    0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
++};
++
++aes_encrypt_table::table_type aes_encrypt_table::Te1 = {
++    0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
++    0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
++    0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
++    0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
++    0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
++    0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
++    0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
++    0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
++    0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
++    0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
++    0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
++    0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
++    0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
++    0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
++    0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
++    0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
++    0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
++    0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
++    0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
++    0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
++    0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
++    0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
++    0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
++    0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
++    0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
++    0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
++    0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
++    0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
++    0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
++    0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
++    0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
++    0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
++    0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
++    0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
++    0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
++    0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
++    0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
++    0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
++    0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
++    0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
++    0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
++    0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
++    0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
++    0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
++    0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
++    0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
++    0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
++    0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
++    0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
++    0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
++    0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
++    0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
++    0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
++    0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
++    0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
++    0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
++    0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
++    0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
++    0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
++    0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
++    0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
++    0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
++    0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
++    0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U,
++};
++
++aes_encrypt_table::table_type aes_encrypt_table::Te2 = {
++    0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
++    0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
++    0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
++    0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
++    0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
++    0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
++    0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
++    0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
++    0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
++    0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
++    0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
++    0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
++    0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
++    0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
++    0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
++    0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
++    0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
++    0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
++    0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
++    0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
++    0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
++    0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
++    0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
++    0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
++    0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
++    0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
++    0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
++    0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
++    0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
++    0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
++    0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
++    0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
++    0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
++    0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
++    0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
++    0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
++    0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
++    0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
++    0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
++    0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
++    0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
++    0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
++    0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
++    0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
++    0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
++    0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
++    0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
++    0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
++    0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
++    0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
++    0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
++    0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
++    0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
++    0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
++    0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
++    0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
++    0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
++    0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
++    0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
++    0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
++    0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
++    0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
++    0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
++    0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U,
++};
++
++aes_encrypt_table::table_type aes_encrypt_table::Te3 = {
++    0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
++    0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
++    0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
++    0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
++    0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
++    0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
++    0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
++    0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
++    0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
++    0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
++    0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
++    0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
++    0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
++    0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
++    0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
++    0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
++    0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
++    0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
++    0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
++    0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
++    0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
++    0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
++    0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
++    0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
++    0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
++    0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
++    0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
++    0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
++    0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
++    0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
++    0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
++    0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
++    0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
++    0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
++    0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
++    0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
++    0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
++    0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
++    0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
++    0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
++    0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
++    0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
++    0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
++    0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
++    0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
++    0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
++    0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
++    0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
++    0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
++    0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
++    0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
++    0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
++    0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
++    0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
++    0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
++    0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
++    0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
++    0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
++    0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
++    0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
++    0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
++    0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
++    0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
++    0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU,
++};
++
++aes_decrypt_table::table_type aes_decrypt_table::Td0 = {
++    0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
++    0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
++    0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
++    0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
++    0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
++    0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
++    0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
++    0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
++    0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
++    0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
++    0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
++    0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
++    0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
++    0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
++    0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
++    0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
++    0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
++    0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
++    0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
++    0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
++    0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
++    0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
++    0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
++    0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
++    0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
++    0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
++    0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
++    0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
++    0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
++    0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
++    0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
++    0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
++    0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
++    0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
++    0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
++    0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
++    0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
++    0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
++    0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
++    0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
++    0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
++    0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
++    0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
++    0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
++    0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
++    0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
++    0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
++    0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
++    0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
++    0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
++    0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
++    0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
++    0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
++    0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
++    0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
++    0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
++    0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
++    0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
++    0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
++    0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
++    0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
++    0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
++    0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
++    0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
++};
++
++aes_decrypt_table::table_type aes_decrypt_table::Td1 = {
++    0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU,
++    0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U,
++    0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU,
++    0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U,
++    0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
++    0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U,
++    0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U,
++    0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U,
++    0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U,
++    0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
++    0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU,
++    0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU,
++    0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U,
++    0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU,
++    0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
++    0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U,
++    0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U,
++    0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU,
++    0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU,
++    0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
++    0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU,
++    0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U,
++    0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU,
++    0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU,
++    0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
++    0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U,
++    0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U,
++    0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU,
++    0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U,
++    0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
++    0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U,
++    0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U,
++    0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U,
++    0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU,
++    0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
++    0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U,
++    0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U,
++    0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U,
++    0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U,
++    0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
++    0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU,
++    0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU,
++    0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U,
++    0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU,
++    0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
++    0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU,
++    0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU,
++    0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U,
++    0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU,
++    0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
++    0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U,
++    0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U,
++    0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U,
++    0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U,
++    0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
++    0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U,
++    0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU,
++    0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U,
++    0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U,
++    0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
++    0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U,
++    0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U,
++    0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U,
++    0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U,
++};
++
++aes_decrypt_table::table_type aes_decrypt_table::Td2 = {
++    0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U,
++    0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U,
++    0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U,
++    0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U,
++    0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
++    0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U,
++    0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U,
++    0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U,
++    0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U,
++    0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
++    0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U,
++    0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U,
++    0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU,
++    0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U,
++    0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
++    0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U,
++    0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U,
++    0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U,
++    0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U,
++    0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
++    0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U,
++    0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U,
++    0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U,
++    0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U,
++    0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
++    0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU,
++    0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU,
++    0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U,
++    0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU,
++    0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
++    0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU,
++    0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU,
++    0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU,
++    0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU,
++    0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
++    0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U,
++    0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U,
++    0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U,
++    0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U,
++    0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
++    0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U,
++    0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU,
++    0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU,
++    0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U,
++    0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
++    0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU,
++    0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU,
++    0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U,
++    0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U,
++    0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
++    0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U,
++    0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U,
++    0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U,
++    0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U,
++    0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
++    0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U,
++    0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U,
++    0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U,
++    0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U,
++    0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
++    0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U,
++    0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU,
++    0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U,
++    0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U,
++};
++
++aes_decrypt_table::table_type aes_decrypt_table::Td3 = {
++    0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU,
++    0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU,
++    0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U,
++    0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U,
++    0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
++    0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU,
++    0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U,
++    0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU,
++    0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U,
++    0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
++    0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U,
++    0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U,
++    0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U,
++    0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U,
++    0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
++    0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU,
++    0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU,
++    0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U,
++    0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U,
++    0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
++    0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU,
++    0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U,
++    0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U,
++    0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U,
++    0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
++    0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU,
++    0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U,
++    0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U,
++    0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU,
++    0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
++    0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U,
++    0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U,
++    0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U,
++    0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU,
++    0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
++    0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U,
++    0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U,
++    0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U,
++    0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U,
++    0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
++    0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U,
++    0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU,
++    0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U,
++    0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U,
++    0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
++    0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU,
++    0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U,
++    0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU,
++    0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U,
++    0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
++    0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U,
++    0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U,
++    0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U,
++    0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U,
++    0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
++    0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU,
++    0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU,
++    0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU,
++    0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U,
++    0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
++    0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U,
++    0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU,
++    0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U,
++    0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U,
++};
++
++const unsigned char aes_decrypt_table::Td4[table_nelts] = {
++    0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
++    0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
++    0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
++    0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
++    0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
++    0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
++    0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
++    0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
++    0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
++    0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
++    0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
++    0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
++    0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
++    0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
++    0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
++    0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
++    0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
++    0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
++    0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
++    0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
++    0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
++    0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
++    0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
++    0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
++    0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
++    0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
++    0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
++    0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
++    0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
++    0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
++    0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
++    0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU,
++};
++
++/* In-round shifts info.  */
++static const unsigned HOST_WIDE_INT shift_csts[4] = {24, 16, 8, 0};
++
++/* Check if the pattern is plus-const.  Helper for memref analysis.  */
++static bool
++plus_const_int_p (rtx op)
++{
++  return GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1));
++}
++
++/* Obtain info about memory access.  */
++static bool
++decompose_mem (rtx mem, rtx &base, unsigned HOST_WIDE_INT &offset)
++{
++  address_info info;
++  decompose_mem_address (&info, mem);
++  if (!info.base)
++    return false;
++
++  base = *info.base;
++
++  rtx op = XEXP (mem, 0);
++  if (plus_const_int_p (op))
++    offset = UINTVAL (XEXP (op, 1));
++  /* TODO: WRONG IN GENERAL CASE: we cannot guarantee that the offsets were not
++     changed.  */
++  else if ((GET_CODE (op) == PRE_MODIFY && plus_const_int_p (XEXP (op, 1)))
++	   || REG_P (op))
++    offset = 0;
++  else
++    return false;
++
++  return true;
++}
++
++/* Check if the regs in stmt are same as the provided ones.  */
++static bool
++cmp_regs_in_stmt (rtx stmt, rtx lhs, rtx rhs)
++{
++  return (XEXP (stmt, 0) == lhs) && (XEXP (stmt, 1) == rhs);
++}
++
++/* AES key info.  Inhereted from mem_term_info to be used inside
++   matchers without any unnecessary casts.  */
++struct aes_key : mem_term_info
++{
++  aes_key ()
++    {}
++  aes_key (void *)
++    : mem_term_info (NULL, NULL_RTX)
++    {}
++  aes_key (const mem_term_info &m)
++    : mem_term_info (m)
++    {}
++
++  /* Check if the key has the same base pointer origin as another one.
++     This check is required due to some possible CSE optimizations applied on
++     pointers before this pass.  */
++  bool has_same_origin (const aes_key &other, rtx_insn *use_point) const
++    {
++      /* Simple case: the pointer is same.  */
++      if (src == other.src)
++	return true;
++
++      if (!use_point)
++	return false;
++
++      basic_block curr_bb = BLOCK_FOR_INSN (use_point);
++      if (!single_pred_p (curr_bb)
++	  || modified_between_p (src, BB_HEAD (curr_bb), use_point)
++	  || modified_between_p (other.src, BB_HEAD (curr_bb), use_point))
++	return false;
++
++      edge e = single_pred_edge (curr_bb);
++      rtx_insn *jump = BB_END (e->src);
++      if (!any_condjump_p (jump))
++	return false;
++
++      basic_block from_bb = BLOCK_FOR_INSN (jump);
++      if (EDGE_COUNT (from_bb->succs) != 2)
++	return false;
++
++      /* Need proof that the sources are equal: try to get it from
++	 terminating condition.  */
++      rtx cond = XEXP (SET_SRC (pc_set (jump)), 0);
++      rtx_code code = GET_CODE (cond);
++      if (!((code == EQ && EDGE_SUCC (from_bb, 0) == e)
++	    || (code == NE && EDGE_SUCC (from_bb, 1) == e)))
++	return false;
++
++      rtx arg1 = XEXP (cond, 0);
++      if (XEXP (cond, 1) != CONST0_RTX (GET_MODE (arg1))
++	  || COMPARISON_P (arg1))
++	return false;
++
++      rtx_insn *cmp_insn = get_single_def_insn (jump, arg1);
++      rtx cmp;
++      if (!cmp_insn || !(cmp = get_single_set_op (cmp_insn)))
++	return false;
++
++      if (!(cmp_regs_in_stmt (cmp, src, other.src)
++	    || cmp_regs_in_stmt (cmp, other.src, src)))
++	return false;
++
++      return true;
++    }
++};
++
++/* AES basic state input info.  Inhereted from mem_term_info
++   to use it in matchers without any unnecessary casts.  */
++struct state_input_info : mem_term_info
++{
++  state_input_info ()
++    {}
++  state_input_info (const aes_key &k)
++    : mem_term_info (k), is_key (true)
++    {}
++  state_input_info (const mem_term_info &m)
++    : mem_term_info (m), is_key (false)
++    {}
++
++  bool is_key;
++
++  bool verify (const state_input_info *prev) const
++    {
++      if (!prev)
++	return true;
++
++      return BLOCK_FOR_INSN (loc) == BLOCK_FOR_INSN (prev->loc);
++    }
++};
++
++/* Memory matcher to filter only suitable memory instructions.  */
++struct mem_matcher : matcher_term
++{
++  static bool match (rtx_insn *insn, holder_type &m)
++    {
++      rtx src = get_single_set_op (insn);
++      return src && match (src, insn, m);
++    }
++
++  static bool match (rtx src, rtx_insn *insn, holder_type &m)
++    {
++      if (!MEM_P (src))
++	return false;
++
++      mem_term_info info (NULL, NULL_RTX);
++      if (!decompose_mem (src, info.src, info.offset))
++	return false;
++
++      info.loc = insn;
++      m[0] = info;
++      return true;
++    }
++};
++
++/* AES entry input info.  Enhanced from state input due to ideological
++   similarities.  */
++struct input_info : state_input_info
++{
++  input_info ()
++    {}
++  input_info (const mem_term_info &m, unsigned HOST_WIDE_INT shift_cst)
++    : state_input_info (m), shift_cst (shift_cst)
++    {}
++  input_info (const aes_key &k)
++    : state_input_info (k)
++    {}
++
++  unsigned HOST_WIDE_INT shift_cst;
++
++  /* Input info is sorted by references offsets.  */
++  bool operator < (const input_info &rhs) const
++    {
++      return offset < rhs.offset;
++    }
++
++  std::pair input () const
++    {
++      return std::make_pair (src, offset);
++    }
++
++  bool verify (const input_info *prev, unsigned i) const
++    {
++      if (!state_input_info::verify (prev))
++	return false;
++
++      /* Previous state should reference the previous element
++	 of the same buffer.  */
++      if (prev && (src != prev->src || offset != prev->offset + 1))
++	return false;
++
++      /* State should use the corresponding shift constant.  */
++      return shift_csts[i] == shift_cst;
++    }
++
++  static bool finalize (rtx_insn *insn, input_info *m)
++    {
++      typedef unop_matcher zext_matcher;
++
++      zext_matcher::holder_type zext;
++      if (zext_matcher::match (insn, zext))
++	{
++	  *m = input_info (zext[0], 0);
++	  return true;
++	}
++
++      typedef binop_matcher >
++	shifted_variant;
++      shifted_variant::holder_type lsh;
++      if (!shifted_variant::match (insn, lsh))
++	return false;
++
++      gcc_assert (CONST_INT_P (lsh[1].src));
++      *m = input_info (lsh[0], UINTVAL (lsh[1].src));
++      return true;
++    }
++};
++
++/* Check if the corresponding constants combinations may be used for
++   AES table access.  */
++static bool
++verify_table_access (unsigned HOST_WIDE_INT shift_cst,
++		     unsigned HOST_WIDE_INT and_cst = 0xFF,
++		     bool and_present = true)
++{
++  if (and_cst != 0xFF)
++    return false;
++
++  switch (shift_cst)
++    {
++    case 0:
++    case 8:
++    case 16:
++      return and_present;
++    case 24:
++      return true;
++    default:
++      return false;
++    }
++}
++
++/* AES table reference description.  */
++template
++struct aes_table_ref
++{
++  rtx_insn *insn;
++  rtx_insn *output_insn;
++  unsigned HOST_WIDE_INT lsr_cst;
++  rtx reg;
++  rtx output;
++  typename TABLE_T::table_entry itable;
++  bool is_final;
++
++  bool verify (unsigned i) const
++    {
++      typename TABLE_T::table_entry (ðalon)[TABLE_T::rounds_num]
++	= is_final ? TABLE_T::final_rounds : TABLE_T::rounds;
++      return lsr_cst == shift_csts[i] && itable == ethalon[i];
++    }
++};
++
++/* Check the minimal requirements of the pattern to be a table reference
++   and wrap the table id getter function.  */
++template
++static typename T::table_entry
++check_table (rtx mem)
++{
++  tree expr = MEM_EXPR (mem);
++  if (!expr || TREE_CODE (expr) != ARRAY_REF)
++    return T::BAD_TABLE;
++
++  tree decl = TREE_OPERAND (expr, 0);
++  if (!decl || !DECL_P (decl) || !TREE_READONLY (decl))
++    return T::BAD_TABLE;
++
++  tree ctor = DECL_INITIAL (decl);
++  if (!ctor)
++    return T::BAD_TABLE;
++
++  return T::get_table_id (ctor);
++}
++
++/* Simplified memory info.  Used for simplier table ref analysis.  */
++struct simplified_mem_info
++{
++  rtx base_reg;
++  rtx index;
++};
++
++/* Try to obtain table reference info.  */
++static bool
++decompose_tref_mem_address (simplified_mem_info &info, rtx mem)
++{
++  address_info addr_info;
++  decompose_mem_address (&addr_info, mem);
++  if (!addr_info.base || !addr_info.index)
++    return false;
++
++  info.base_reg = *addr_info.base;
++  info.index = *addr_info.index;
++
++  if (!REG_P (info.base_reg))
++    return false;
++
++  if (addr_info.mode == SImode)
++    {
++      if (GET_CODE (info.index) != MULT)
++	return false;
++
++      rtx cst = XEXP (info.index, 1);
++      if (!CONST_INT_P (cst) || UINTVAL (cst) != 4)
++	return false;
++
++      info.index = XEXP (info.index, 0);
++      return true;
++    }
++
++  return (addr_info.mode == QImode);
++}
++
++/* Find the possible final output instruction.  */
++template
++static rtx_insn *
++get_possible_final_output (rtx_insn *insn, rtx reg,
++			   unsigned HOST_WIDE_INT shift_cst,
++			   typename TABLE_T::table_entry itable);
++
++/* Specialize the function for AES encryption.  The output is AND instruction
++   with propper constant.  */
++template<>
++rtx_insn *
++get_possible_final_output (rtx_insn *insn, rtx reg,
++					      unsigned HOST_WIDE_INT shift_cst,
++					      aes_encrypt_table::table_entry)
++{
++  rtx_insn *out = get_single_use_insn (insn, reg);
++  if (!out)
++    return NULL;
++
++  rtx cst_val = get_op_const_cst (out);
++  if (!cst_val)
++    return NULL;
++
++  unsigned HOST_WIDE_INT ethalon;
++  switch (shift_cst)
++    {
++    case 24:
++      ethalon = 0xffffffffff000000;
++      break;
++    case 16:
++      ethalon = 0xff0000;
++      break;
++    case 8:
++      ethalon = 0xff00;
++      break;
++    case 0:
++      ethalon = 0xff;
++      break;
++    default:
++      gcc_unreachable ();
++    }
++
++  return UINTVAL (cst_val) == ethalon ? out : NULL;
++}
++
++/* Specialize the function for AES decryption.  The output is ASHIFT instruction
++   with propper constant or direct reference to TD4 table.
++
++   TODO: TD4 check might be done here for all the cases.  However, now it is not
++   done here to make decryption and encryption matching
++   more general in common.  */
++template<>
++rtx_insn *
++get_possible_final_output (rtx_insn *insn, rtx reg,
++					      unsigned HOST_WIDE_INT shift_cst,
++					      aes_decrypt_table::table_entry it)
++{
++  rtx_insn *out = get_single_use_insn (insn, reg);
++  if (!out)
++    return NULL;
++
++  rtx cst_val = get_op_const_cst (out);
++  if (!cst_val)
++    // no shift case
++    return it == aes_decrypt_table::TD4 ? insn : NULL;
++
++  return UINTVAL (cst_val) == shift_cst ? out : NULL;
++}
++
++typedef arg_op_matcher reg_matcher;
++
++/* Helper that matches suitable AES table references.  */
++template
++class tref_matcher
++{
++  /* (reg >> cst) matcher.  Helper.  */
++  typedef binop_matcher > table_access;
++  /* zext (reg >> cst) matcher.  Used for TABLE[(val >> 24)] variant.  */
++  typedef unop_matcher direct;
++  /* zext ((reg >> cst1) & cst2) matcher.  Used for
++     TABLE[(val >> (16|8)) & 0xff] variant.  */
++  typedef unop_matcher > > shifted;
++  /* zext (reg & cst) matcher.  Used for TABLE[val & 0xff] variant.  */
++  typedef unop_matcher > > noshift;
++
++  std::map table_alias;
++
++  bool finalize (aes_table_ref &tref,
++		 minimal_term_info &input_info,
++		 minimal_term_info *shift_info = NULL,
++		 minimal_term_info *mask_info = NULL)
++    {
++      gcc_assert (REG_P (input_info.src));
++      gcc_assert (!shift_info || CONST_INT_P (shift_info->src));
++      gcc_assert (!mask_info || CONST_INT_P (mask_info->src));
++
++      unsigned HOST_WIDE_INT shift
++	= shift_info ? UINTVAL (shift_info->src) : 0;
++      unsigned HOST_WIDE_INT mask
++	= mask_info ? UINTVAL (mask_info->src) : 0xFF;
++      if (!verify_table_access (shift, mask, mask_info))
++	return false;
++
++      tref.insn = input_info.loc;
++      tref.reg = input_info.src;
++      tref.lsr_cst = shift;
++      return true;
++    }
++
++  bool match (rtx_insn *insn, rtx index, aes_table_ref &tref)
++    {
++      direct::holder_type direct_res;
++      if (direct::match (index, insn, direct_res))
++	return finalize (tref, direct_res[0], &direct_res[1]);
++
++      shifted::holder_type shifted_res;
++      if (shifted::match (index, insn, shifted_res))
++	return finalize (tref, shifted_res[0],
++			 &shifted_res[1], &shifted_res[2]);
++
++      noshift::holder_type noshift_res;
++      return noshift::match (index, insn, noshift_res)
++	&& finalize (tref, noshift_res[0], NULL, &noshift_res[1]);
++    }
++
++public:
++  bool match (rtx_insn *insn, aes_table_ref &tref)
++    {
++      rtx mem = get_single_set_op (insn);
++      if (!mem && (mem = get_single_set_op (insn)))
++	mem = XEXP (mem, 0);
++
++      rtx dst = get_single_set_dst (insn);
++      if (!mem || !MEM_P (mem) || !dst || GET_MODE (dst) != SImode)
++	return false;
++
++      simplified_mem_info info;
++      if (!decompose_tref_mem_address (info, mem)
++	  || !match (insn, info.index, tref))
++	return false;
++
++      typename TABLE_T::table_entry itable;
++      if (!table_alias.count (info.base_reg))
++	{
++	  itable = check_table (mem);
++	  if (itable == TABLE_T::BAD_TABLE)
++	    return false;
++	  table_alias[info.base_reg] = itable;
++	}
++      else
++	itable = table_alias.at (info.base_reg);
++
++      if (rtx_insn *out = get_possible_final_output (insn, dst,
++							      tref.lsr_cst,
++							      itable))
++	{
++	  tref.is_final = true;
++	  tref.output_insn = out;
++	  tref.output = NULL_RTX;
++	}
++      else
++	{
++	  tref.is_final = false;
++	  tref.output_insn = insn;
++	  tref.output = dst;
++	}
++
++      tref.itable = itable;
++      return true;
++    }
++};
++
++/* AES stage description.  Required for some specializations
++   for curtain rounds.  */
++typedef enum { INPUT, MIDDLE, FINAL } aes_stage;
++
++/* AES entity description.  It can be both round or state inside round.
++   It provides interface for unified analysis between blocks of 4 parts:
++   round -> 4 states -> 4 * 4 arguments.  */
++template
++struct aes_entity
++{
++  aes_key key;
++  std::set entries;
++  rtx_insn *loc;
++
++  aes_entity ()
++    : key (NULL), loc (NULL)
++    {}
++
++  /* Push new entry to the entity.  */
++  bool push_entry (const ENTRY_T &v)
++    {
++      if (entries.size () == 4)
++	return false;
++
++      entries.insert (v);
++      return true;
++    }
++
++  /* The entities are sorted by key offset.  */
++  bool operator < (const aes_entity &rhs) const
++    {
++      return key.offset < rhs.key.offset;
++    }
++
++  /* Verify that all of the entries are correct within their positions inside
++     the entity.  */
++  bool finalize ()
++    {
++      if (entries.size () != 4)
++	return false;
++
++      unsigned i = 0;
++      const ENTRY_T *prev = NULL;
++      for (typename std::set::iterator it = entries.begin ();
++	   it != entries.end (); prev = &*it++, ++i)
++	if (!it->verify (prev, i))
++	  return false;
++
++      loc = entries.begin ()->loc;
++      return true;
++    }
++};
++
++/* Check the correctness of input regs permutations.  */
++template
++static bool
++check_input_regs (const std::vector &curr,
++		  const std::vector &prev);
++
++/* Specialize the function for AES encryption.  */
++template<>
++bool
++check_input_regs (const std::vector &curr,
++				 const std::vector &prev)
++{
++  gcc_assert (curr.size () == 4 && prev.size () == 4);
++  unsigned idx[4] = { 1, 2, 3, 0 };
++  for (int i = 0; i < 4; ++i)
++    if (curr[i] != prev[idx[i]])
++      return false;
++  return true;
++}
++
++/* Specialize the function for AES decryption.  */
++template<>
++bool
++check_input_regs (const std::vector &curr,
++				 const std::vector &prev)
++{
++  gcc_assert (curr.size () == 4 && prev.size () == 4);
++  unsigned idx[4] = { 3, 0, 1, 2 };
++  for (int i = 0; i < 4; ++i)
++    if (curr[i] != prev[idx[i]])
++      return false;
++  return true;
++}
++
++/* Basic descryption of state input.  */
++template
++struct state_input
++{
++  typedef std::vector type;
++
++  static void finalize (type &in, rtx v)
++    {
++      in.push_back (v);
++    }
++
++  template
++  static bool verify (const type &lhs, const type &rhs)
++    {
++      return check_input_regs (lhs, rhs);
++    }
++};
++
++/* Input round state uses special input.  */
++template<>
++struct state_input
++{
++  typedef std::pair type;
++
++  static void finalize (type &in, const type &v)
++    {
++      in = v;
++      // Order is inverted
++      in.second -= 3;
++    }
++
++  template
++  static bool verify (const type &lhs, const type &rhs)
++    {
++      return lhs.first == rhs.first
++	&& lhs.second == rhs.second + 4;
++    }
++};
++
++/* Basic descryption of state output.  */
++template
++struct state_output
++{
++  typedef rtx type;
++
++  static bool verify (const type &, const type &)
++    {
++      return true;
++    }
++};
++
++/* Final round state generates special output.  */
++template<>
++struct state_output
++{
++  typedef std::pair type;
++
++  static bool verify (const type &lhs, const type &rhs)
++    {
++      return lhs.first == rhs.first
++	&& lhs.second == rhs.second + 4;
++    }
++};
++
++/* Basic descryption of round input.  */
++template
++struct round_input
++{
++  typedef std::vector type;
++};
++
++/* Input round uses special input just as its state.  */
++template<>
++struct round_input
++{
++  typedef std::pair type;
++};
++
++/* Basic descryption of round output.  */
++template
++struct round_output
++{
++  typedef std::vector type;
++
++  template
++  static void finalize (type &out, const T &v)
++    {
++      gcc_assert (v.size () == 4);
++      for (typename T::const_iterator it = v.begin (); it != v.end (); ++it)
++	out.push_back (it->output);
++    }
++
++  template
++  static void reorder (type &)
++    {}
++};
++
++/* Reorder output for AES decryption: the order is changed compared to
++   AES encryption.  */
++template<>
++template<>
++void round_output::reorder (type &out)
++{
++  gcc_assert (out.size () == 4);
++  std::swap (out[1], out[3]);
++}
++
++template<>
++template<>
++void round_output::reorder (type &out)
++{
++  round_output::reorder (out);
++}
++
++/* Final round generates special output.  */
++template<>
++struct round_output : state_output
++{
++  template
++  static void finalize (type &out, const T &v)
++    {
++      gcc_assert (v.size () == 4);
++      out = v.begin ()->output;
++    }
++
++  template
++  static void reorder (type &)
++    {}
++};
++
++/* AES state descryption.  */
++template
++struct aes_state : aes_entity
++{
++  typedef aes_entity base_entity;
++
++  typename state_input::type input;
++  typename state_output::type output;
++
++  aes_state ()
++    : base_entity ()
++    {}
++
++  void set_output (const typename state_output::type &o)
++    {
++      output = o;
++    }
++
++  bool push_entry (const ENTRY_T &v)
++    {
++      if (!v.is_key)
++	return base_entity::push_entry (v);
++
++      if (this->key.src)
++	return false;
++
++      this->key = v;
++      return true;
++    }
++
++  /* Verify if the state is correct within its position in round.  */
++  bool verify (const aes_state *prev, unsigned) const
++    {
++      if (!prev)
++	return true;
++
++      if (!this->key.has_same_origin (prev->key, this->loc)
++	  || this->key.offset != prev->key.offset + 4
++	  || BLOCK_FOR_INSN (this->loc) != BLOCK_FOR_INSN (prev->loc))
++	return false;
++
++      return state_input::template verify (input, prev->input)
++	&& state_output::verify (output, prev->output);
++    }
++
++  /* Check if the entries of the state are correct and finalize stored info.  */
++  bool finalize ()
++    {
++      if (!base_entity::finalize ())
++	return false;
++
++      for (typename std::set::iterator it = this->entries.begin ();
++	   it != this->entries.end (); ++it)
++	state_input::finalize (input, it->input ());
++
++      return true;
++    }
++};
++
++/* AES round descryption.  */
++template
++struct aes_round : aes_entity, STAGE, K>
++{
++  typedef aes_entity, STAGE, K> base_entity;
++
++  typename round_input::type input;
++  typename round_output::type output;
++
++  /* Check if the states are correct and finalize stored info.  */
++  bool finalize ()
++    {
++      if (!base_entity::finalize ())
++	return false;
++
++      input = this->entries.begin ()->input;
++      this->key = this->entries.begin ()->key;
++
++      round_output::finalize (output, this->entries);
++      round_output::template reorder (output);
++
++      return true;
++    }
++};
++
++template
++class aes_optimizer;
++
++/* AES round input info.  Used to find and store info about
++   table references.
++
++   Must be inited and finalized before and after usage.  */
++template
++struct round_input_info : state_input_info
++{
++  typedef typename aes_optimizer::table_ref_map tref_map;
++
++  round_input_info ()
++    {}
++  round_input_info (rtx_insn *insn, const aes_table_ref *tref)
++    : state_input_info (mem_term_info (insn, NULL_RTX)), tref (tref)
++    {}
++  round_input_info (const aes_key &k)
++    : state_input_info (k)
++    {}
++
++  rtx input () const
++    {
++      return tref->reg;
++    }
++
++  rtx output () const
++    {
++      return tref->output;
++    }
++
++  /* Table references are sorted by shift constants.
++     TODO: probably sort by key offset?  */
++  bool operator < (const round_input_info &rhs) const
++    {
++      return tref->lsr_cst > rhs.tref->lsr_cst;
++    }
++
++  bool verify (const round_input_info *prev, unsigned i) const
++    {
++      return state_input_info::verify (prev) && tref->verify (i);
++    }
++
++  static bool finalize (rtx_insn *insn, round_input_info *m)
++    {
++      if (checked_p->count (insn))
++	return false;
++
++      typename tref_map::const_iterator it = table_refs_p->find (insn);
++      if (it == table_refs_p->end ())
++	return false;
++
++      m[0] = round_input_info (insn, &it->second);
++      return true;
++    }
++
++  const aes_table_ref *tref;
++
++  static const tref_map *table_refs_p;
++  static const std::set *checked_p;
++
++  /* Store lookup table references.  */
++  static void init (const tref_map &t, const std::set &c)
++    {
++      gcc_assert (!table_refs_p && !checked_p);
++      table_refs_p = &t;
++      checked_p = &c;
++    }
++
++  /* Remove lookup table references.  */
++  static void fin ()
++    {
++      gcc_assert (table_refs_p && checked_p);
++      table_refs_p = NULL;
++      checked_p = NULL;
++    }
++};
++
++template
++const typename aes_optimizer::table_ref_map *
++round_input_info::table_refs_p = NULL;
++
++template
++const std::set *
++round_input_info::checked_p = NULL;
++
++/* AES encryption/decryption optimizer.  */
++template
++class aes_optimizer
++{
++public:
++  typedef std::map > table_ref_map;
++
++  /* AES states typedefs.  */
++  typedef aes_state aes_input_state;
++  typedef aes_state, MIDDLE, T> aes_body_state;
++  typedef aes_state, FINAL, T> aes_final_state;
++
++  /* AES rounds typedefs.  */
++  typedef aes_round aes_input_round;
++  typedef aes_round, MIDDLE, T> aes_body_round;
++  typedef aes_round, FINAL, T> aes_final_round;
++
++  bool run ();
++
++private:
++  bool collect_aes_lookup_tables ();
++  bool form_rounds ();
++  bool find_aes_init_round ();
++  bool collect_state (rtx_insn * insn, aes_body_state &state,
++		      std::set &checked);
++  bool find_aes_rounds ();
++  bool collect_final_round (rtx_insn *insn, aes_final_state &state,
++			    std::set &checked);
++  bool find_aes_final_round ();
++  bool check_aes_pattern ();
++  void erase_unused_rounds (std::set *> &used);
++
++  bool gen_aes_code ();
++  bool gen_init_round ();
++  bool gen_round (const aes_body_round &round);
++  bool gen_final_round ();
++
++  rtx gen_or_get_vreg (const std::vector &vec);
++  rtx get_vreg (const std::vector &vec);
++  rtx gen_vreg (const std::vector &vec);
++
++  table_ref_map table_refs;
++  table_ref_map final_table_refs;
++
++  aes_input_round input_round;
++  std::map, aes_body_round> rounds;
++  aes_final_round final_round;
++
++  std::map, rtx> vec_regs;
++  std::vector to_delete;
++};
++
++/* Find all the AES table references in function.  */
++template
++bool
++aes_optimizer::collect_aes_lookup_tables ()
++{
++  basic_block bb;
++  rtx_insn *insn;
++
++  tref_matcher m;
++  FOR_EACH_BB_FN (bb, cfun)
++    FOR_BB_INSNS (bb, insn)
++      {
++	aes_table_ref tref;
++	if (!m.match (insn, tref))
++	  continue;
++
++	if (!tref.is_final)
++	  table_refs[insn] = tref;
++	else
++	  final_table_refs[tref.output_insn] = tref;
++      }
++
++  return !table_refs.empty () && !final_table_refs.empty ();
++}
++
++/* Helper function to match all the permutations of five arg
++   calculations.  */
++template
++struct five_args_calc_matcher
++{
++  /* Helper for matching (op1 * op2).  */
++  typedef binop_matcher two_args_block;
++  /* Helper for matching (op1 * (op2 * op3)).  */
++  typedef binop_matcher three_args_block;
++  /* Helper for matching ((op1 * op2) * (op3 * op4)).  */
++  typedef binop_matcher opt_four_args_block;
++  /* Helper for matching (op1 * (op2 * (op3 * op4))).  */
++  typedef binop_matcher linear_four_args_block;
++
++  /* Match the (op1 * ((op2 * op3) * (op4 * op5))) variant.  */
++  typedef binop_matcher opt_op_term;
++  /* Match the ((op1 * op2) * (op3 * (op4 * op5))) variant.  */
++  typedef binop_matcher three_op_two;
++  /* Match the (op1 * (op2 * (op3 * (op4 * op5)))) variant.  */
++  typedef binop_matcher fully_linear;
++
++  static const int holder_size = fully_linear::holder_size;
++  static const int op_num = fully_linear::op_num;
++  typedef typename fully_linear::term_type term_type;
++  typedef typename fully_linear::holder_type holder_type;
++
++  static rtx_insn* match (rtx_insn *insn, holder_type &m, unsigned depth = 1)
++    {
++      for (rtx dst = get_single_set_dst (insn); depth && insn && dst;
++	   insn = get_single_use_insn (insn, dst),
++	   dst = insn ? get_single_set_dst (insn) : NULL_RTX,
++	   --depth)
++	if (opt_op_term::match (insn, m) || three_op_two::match (insn, m)
++	    || fully_linear::match (insn, m))
++	  return insn;
++      return NULL;
++    }
++};
++
++/* Match the AES key.  */
++struct key_matcher : matcher_term
++{
++  static bool match (rtx_insn *insn, holder_type &m)
++    {
++      mem_matcher::holder_type info;
++      if (!mem_matcher::match (insn, info))
++	return false;
++
++      m[0] = info[0];
++      return true;
++    }
++};
++
++/* Matcher term for state input.  */
++template
++struct state_input_term : matcher_term
++{
++  typedef typename matcher_term::holder_type holder_type;
++
++  static bool match (rtx, rtx_insn *, holder_type &)
++    {
++      return false;
++    }
++
++  static bool match (rtx_insn *insn, holder_type &m)
++    {
++      key_matcher::holder_type k;
++      if (key_matcher::match (insn, k))
++	{
++	  m[0] = k[0];
++	  return true;
++	}
++
++      return matcher_term::term_type::finalize (insn, m);
++    }
++};
++
++/* Fill state from args.  */
++template 
++static bool
++finalize_input (const T (&args)[5], STATE &state)
++{
++  for (unsigned i = 0; i < 5; ++i)
++    if (!state.push_entry (args[i]))
++      return false;
++
++  return state.finalize ();
++}
++
++/* Construct input state.  */
++template
++static bool
++form_input (rtx_insn *insn, T &state)
++{
++  typedef five_args_calc_matcher >
++    matcher;
++
++  matcher::holder_type m;
++  if (!matcher::match (insn, m) || !finalize_input (m, state))
++    return false;
++
++  /* TODO: probably should not be set here.  */
++  state.set_output (SET_DEST (single_set (insn)));
++  return true;
++}
++
++/* Get definitions chain for the reg being used in the insn.  */
++static df_link *
++get_defs (rtx_insn *insn, rtx reg)
++{
++  df_link *ref_chain = get_def_chain (insn, reg);
++  gcc_assert (ref_chain);
++
++  for (df_link *ref_link = ref_chain; ref_link; ref_link = ref_link->next)
++    if (!check_def_chain_ref (ref_link->ref, reg))
++      return NULL;
++
++  return ref_chain;
++}
++
++/* Find AES init round.  To do this, find the table references that depends on
++   two definitions.  One of them is our input.  */
++template
++bool
++aes_optimizer::find_aes_init_round ()
++{
++  std::set checked;
++
++  for (typename table_ref_map::iterator it = table_refs.begin (),
++       end = table_refs.end (); it != end; ++it)
++    for (df_link *def = get_defs (it->second.insn, it->second.reg);
++	 def; def = def->next)
++      {
++	rtx_insn *def_insn = DF_REF_INSN (def->ref);
++	if (checked.count (def_insn))
++	  continue;
++
++	aes_input_state input_state;
++	if (form_input (def_insn, input_state)
++	    && !input_round.push_entry (input_state))
++	  return false;
++
++	checked.insert (def_insn);
++      }
++
++  return input_round.finalize ();
++}
++
++/* Collect AES inner state.  */
++template
++bool
++aes_optimizer::collect_state (rtx_insn *insn, aes_body_state &state,
++				 std::set &checked)
++{
++  typedef round_input_info term_info;
++  typedef five_args_calc_matcher > matcher;
++
++  typename matcher::holder_type m;
++  term_info::init (table_refs, checked);
++  rtx_insn *match_entry = matcher::match (insn, m, 3);
++  term_info::fin ();
++
++  if (!match_entry || !finalize_input (m, state))
++    return false;
++
++  /* TODO: probably should not be set here.  */
++  state.set_output (SET_DEST (single_set (match_entry)));
++  for (unsigned i = 0; i < 5; ++i)
++    if (!m[i].is_key)
++      checked.insert (m[i].tref->output_insn);
++
++  return true;
++}
++
++/* Simple sorter to link rounds by their registers.  */
++struct reg_comp
++{
++  bool operator () (rtx lhs, rtx rhs) const
++    {
++      return REGNO (lhs) < REGNO (rhs);
++    }
++};
++
++/* Find AES inner rounds.  */
++template
++bool
++aes_optimizer::find_aes_rounds ()
++{
++  typedef std::set input_key;
++
++  std::set checked;
++  std::map candidate_rounds;
++  for (typename table_ref_map::iterator it = table_refs.begin (),
++       end = table_refs.end (); it != end; ++it)
++    {
++      rtx_insn *insn = it->first;
++      if (checked.count (insn))
++	continue;
++
++      rtx_insn *use = get_single_use_insn (insn, SET_DEST (single_set (insn)));
++      if (!use)
++	continue;
++
++      aes_body_state state;
++      if (!collect_state (use, state, checked))
++	continue;
++
++      /* Sort the input so we can found the corresponding state.  */
++      input_key input (state.input.begin (), state.input.end ());
++      candidate_rounds[input].push_entry (state);
++    }
++
++  for (typename std::map::iterator
++       it = candidate_rounds.begin ();
++       it != candidate_rounds.end (); ++it)
++    if (it->second.finalize ())
++      rounds[it->second.input] = it->second;
++
++  return !rounds.empty ();
++}
++
++template
++struct final_state_matcher;
++
++/* AES encrypt matcher requires additional check on key calculations
++   due to possible optimizations.  */
++template<>
++struct final_state_matcher
++{
++  typedef round_input_info term_info;
++  typedef five_args_calc_matcher, IOR, true>
++    matcher;
++  typedef typename matcher::term_type
++    holder_type[matcher::holder_size - matcher::op_num];
++
++  static rtx_insn *match (rtx_insn *insn, holder_type &m, unsigned depth)
++    {
++      matcher::holder_type inner_m;
++      rtx_insn *res = matcher::match (insn, inner_m, depth);
++      if (!res)
++	return NULL;
++
++      /* Run pre-order traversal of the operands to check the correctness
++	 of key usage.  */
++      gcc_assert (inner_m[0].is_op);
++      unsigned pos = 0;
++      if (!check_key_calculations (inner_m, pos))
++	return NULL;
++      gcc_assert (pos == (matcher::holder_size - 1));
++
++      unsigned idx = 0;
++      for (unsigned i = 0; i < matcher::holder_size; ++i)
++	if (!inner_m[i].is_op)
++	  m[idx++] = inner_m[i];
++
++      gcc_assert (idx == 5);
++      return res;
++    }
++
++  static bool check_key_calculations (const matcher::holder_type &m,
++				      unsigned &idx,
++				      bool failure_on_key = false)
++    {
++      gcc_assert (idx < matcher::holder_size);
++      if (!m[idx].is_op)
++	return !(failure_on_key && m[idx].is_key);
++
++      failure_on_key |= (GET_CODE (m[idx].src) == IOR);
++      return check_key_calculations (m, ++idx, failure_on_key)
++	&& check_key_calculations (m, ++idx, failure_on_key);
++    }
++};
++
++
++/* The final state is simple wrapper since no additional checks are required
++   here.  */
++template<>
++struct final_state_matcher
++{
++  typedef round_input_info term_info;
++  typedef five_args_calc_matcher > matcher;
++  typedef typename matcher::holder_type holder_type;
++
++  static rtx_insn *match (rtx_insn *insn, holder_type &m, unsigned depth)
++    {
++      return matcher::match (insn, m, depth);
++    }
++};
++
++/* Match the AES final state.  */
++template
++bool
++aes_optimizer::collect_final_round (rtx_insn *insn, aes_final_state &state,
++				       std::set &checked)
++{
++  typedef final_state_matcher matcher_wrapper;
++
++  typename matcher_wrapper::holder_type m;
++  matcher_wrapper::term_info::init (final_table_refs, checked);
++  rtx_insn *match_entry = matcher_wrapper::match (insn, m, 3);
++  matcher_wrapper::term_info::fin ();
++
++  rtx dst;
++  if (!match_entry || !(dst = get_single_set_dst (match_entry))
++      || !finalize_input (m, state))
++    return false;
++
++  rtx src;
++  if (!(match_entry = get_single_use_insn (match_entry, dst))
++      || !(check_simple_op (match_entry, src, dst))
++      || !dst)
++    return false;
++
++  std::pair output;
++  if (!(match_entry = get_single_use_insn (match_entry, dst))
++      || !(dst = get_single_set_dst (match_entry))
++      || !decompose_mem (dst, output.first, output.second))
++    return false;
++
++  to_delete.push_back (match_entry);
++  state.set_output (output);
++  for (unsigned i = 0; i < 5; ++i)
++    if (!m[i].is_key)
++      checked.insert (m[i].tref->output_insn);
++
++  return true;
++}
++
++/* Find the final round.  */
++template
++bool
++aes_optimizer::find_aes_final_round ()
++{
++  std::set checked;
++  for (typename table_ref_map::iterator it = final_table_refs.begin (),
++       end = final_table_refs.end (); it != end; ++it)
++    {
++      rtx_insn *insn = it->first;
++
++      if (checked.count (insn))
++	continue;
++
++      rtx_insn *use = get_single_use_insn (insn, SET_DEST (single_set (insn)));
++      if (!use)
++	continue;
++
++      aes_final_state state;
++      if (collect_final_round (use, state, checked))
++	final_round.push_entry (state);
++    }
++
++  return final_round.finalize ();
++}
++
++template
++bool
++aes_optimizer::form_rounds ()
++{
++  return find_aes_final_round ()
++    && find_aes_init_round ()
++    && find_aes_rounds ();
++}
++
++template
++void
++aes_optimizer::erase_unused_rounds (std::set *> &used)
++{
++  if (used.size () == rounds.size ())
++    return;
++
++  for (typename std::map, aes_body_round>::iterator
++       it = rounds.begin (), next = it,
++       end = rounds.end (); it != end; it = next)
++    {
++      ++next;
++      if (!used.count (&it->first))
++	rounds.erase (it);
++    }
++}
++
++/* Find round starts and link them together.  */
++template
++bool
++aes_optimizer::check_aes_pattern ()
++{
++  std::set *> checked;
++
++  typename std::map, aes_body_round>::iterator fit
++    = rounds.find (input_round.output);
++
++  bool to_final = false;
++  while (fit != rounds.end () && !checked.count (&fit->first))
++    {
++      checked.insert (&fit->first);
++
++      if (fit->second.output == final_round.input)
++	to_final = true;
++
++      fit = rounds.find (fit->second.output);
++    }
++
++  if (!to_final)
++    return false;
++
++  erase_unused_rounds (checked);
++
++  return true;
++}
++
++static bool
++gen_insns (const rtx patterns[4], rtx_insn *loc)
++{
++  start_sequence ();
++  for (unsigned i = 0; i < 4; ++i)
++    {
++      rtx_insn *insn = emit_insn (patterns[i]);
++      if (recog_memoized (insn) < 0)
++	{
++	  end_sequence ();
++	  return false;
++	}
++    }
++
++  rtx_insn *seq = get_insns ();
++  end_sequence ();
++  emit_insn_after (seq, loc);
++
++  return true;
++}
++
++static rtx
++gen_offset_access (rtx base, unsigned HOST_WIDE_INT offset)
++{
++  if (!offset)
++    return base;
++
++  machine_mode mode = GET_MODE (base);
++  return gen_rtx_PLUS (mode, base, gen_rtx_CONST_INT (mode, offset));
++}
++
++template
++rtx
++aes_optimizer::get_vreg (const std::vector &vec)
++{
++  std::map, rtx>::iterator fit = vec_regs.find (vec);
++  if (fit != vec_regs.end ())
++    return fit->second;
++
++  return 0;
++}
++
++template
++rtx
++aes_optimizer::gen_vreg (const std::vector &vec)
++{
++  machine_mode vmode = targetm.get_v16qi_mode ();
++  rtx vreg = gen_reg_rtx (vmode);
++  vec_regs.insert (std::make_pair (vec, vreg));
++
++  return vreg;
++}
++
++template
++rtx
++aes_optimizer::gen_or_get_vreg (const std::vector &vec)
++{
++  rtx vreg = get_vreg (vec);
++  if (!vreg)
++    vreg = gen_vreg (vec);
++
++  return vreg;
++}
++
++template
++static rtx
++gen_aes_single_round (rtx vout, rtx vreg, rtx vkey);
++template
++static rtx
++gen_aes_mix_columns (rtx vreg, rtx vin);
++
++template<>
++rtx
++gen_aes_single_round (rtx vout, rtx vreg, rtx vkey)
++{
++  return targetm.gen_aesev16qi (vout, vreg, vkey);
++}
++
++template<>
++rtx
++gen_aes_mix_columns (rtx vreg, rtx vin)
++{
++  return targetm.gen_aesmcv16qi (vreg, vin);
++}
++
++template<>
++rtx
++gen_aes_single_round (rtx vout, rtx vreg, rtx vkey)
++{
++  return targetm.gen_aesdv16qi (vout, vreg, vkey);
++}
++
++template<>
++rtx
++gen_aes_mix_columns (rtx vreg, rtx vin)
++{
++  return targetm.gen_aesimcv16qi (vreg, vin);
++}
++
++template
++bool
++aes_optimizer::gen_init_round ()
++{
++  rtx_insn *loc = input_round.loc;
++
++  machine_mode vmode = targetm.get_v16qi_mode ();
++
++  rtx vreg = gen_reg_rtx (vmode);
++  rtx vkey = gen_reg_rtx (vmode);
++  rtx vout = gen_vreg (input_round.output);
++
++  rtx buf = input_round.input.first;
++  rtx key = gen_offset_access (input_round.key.src, input_round.key.offset);
++
++  rtx vload_pat = gen_rtx_SET (vreg,
++			       gen_rtx_MEM (vmode, buf));
++  rtx vkey_load_pat = gen_rtx_SET (vkey,
++				   gen_rtx_MEM (vmode, key));
++  rtx vrev_pat = targetm.gen_rev32v16qi (vkey, vkey);
++  rtx vaes_pat = gen_aes_single_round (vout, vreg, vkey);
++
++  const rtx patterns[4] = {vload_pat, vkey_load_pat, vrev_pat, vaes_pat};
++
++  return gen_insns (patterns, loc);
++}
++
++template
++bool
++aes_optimizer::gen_round (const aes_body_round &round)
++{
++  rtx_insn *loc = round.loc;
++
++  machine_mode vmode = targetm.get_v16qi_mode ();
++
++  rtx vreg = gen_reg_rtx (vmode);
++  rtx vkey = gen_reg_rtx (vmode);
++  rtx vin  = gen_or_get_vreg (round.input);
++  rtx vout = gen_or_get_vreg (round.output);
++
++  rtx key = gen_offset_access (round.key.src, round.key.offset);
++
++  rtx vkey_load_pat = gen_rtx_SET (vkey,
++				   gen_rtx_MEM (vmode, key));
++  rtx vrev_pat = targetm.gen_rev32v16qi (vkey, vkey);
++  rtx vmix_pat = gen_aes_mix_columns (vreg, vin);
++  rtx vaes_pat = gen_aes_single_round (vout, vreg, vkey);
++
++  const rtx patterns[4] = {vkey_load_pat, vrev_pat, vmix_pat, vaes_pat};
++
++  return gen_insns (patterns, loc);
++}
++
++template
++bool
++aes_optimizer::gen_final_round ()
++{
++  rtx_insn *loc = final_round.loc;
++
++  machine_mode vmode = targetm.get_v16qi_mode ();
++
++  rtx vreg = gen_reg_rtx (vmode);
++  rtx vkey = gen_reg_rtx (vmode);
++  rtx vin = get_vreg (final_round.input);
++
++  gcc_assert (vin);
++
++  rtx buf = final_round.output.first;
++  rtx key = gen_offset_access (final_round.key.src, final_round.key.offset);
++
++  rtx vkey_load_pat = gen_rtx_SET (vkey,
++				   gen_rtx_MEM (vmode, key));
++  rtx vrev_pat = targetm.gen_rev32v16qi (vkey, vkey);
++  rtx vxor_pat = gen_rtx_SET (vreg, gen_rtx_XOR (vmode, vin, vkey));
++  rtx vstore_pat = gen_rtx_SET (gen_rtx_MEM (vmode, buf), vreg);
++
++  const rtx patterns[4] = {vkey_load_pat, vrev_pat, vxor_pat, vstore_pat};
++
++  return gen_insns (patterns, loc);
++}
++
++template
++bool
++aes_optimizer::gen_aes_code ()
++{
++  if (!gen_init_round ())
++    return false;
++
++  for (typename std::map, aes_body_round>::iterator
++       it = rounds.begin (), end = rounds.end (); it != end; ++it)
++    {
++      if (!gen_round (it->second))
++	return false;
++    }
++
++  if (!gen_final_round ())
++    return false;
++
++  for (std::vector::iterator it = to_delete.begin (),
++       end = to_delete.end (); it != end; ++it)
++    SET_INSN_DELETED (*it);
++
++  return true;
++}
++
++template
++bool
++aes_optimizer::run ()
++{
++  return collect_aes_lookup_tables ()
++    && form_rounds ()
++    && check_aes_pattern ()
++    && gen_aes_code ();
++}
++
++static unsigned int
++crypto_acceleration ()
++{
++  aes_optimizer enc;
++  aes_optimizer dec;
++  enc.run ();
++  dec.run ();
++
++  return 0;
++}
++
++static void
++init_df ()
++{
++  df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
++  df_chain_add_problem (DF_UD_CHAIN + DF_DU_CHAIN);
++  df_mir_add_problem ();
++  df_live_add_problem ();
++  df_live_set_all_dirty ();
++  df_analyze ();
++  df_set_flags (DF_DEFER_INSN_RESCAN);
++}
++
++namespace {
++
++const pass_data pass_data_crypto_accel =
++{
++  RTL_PASS,	   // type
++  "crypto_accel",  // name
++  OPTGROUP_NONE,   // optinfo_flags
++  TV_CRYPTO_ACCEL, // tv_id
++  PROP_cfglayout,  // properties_required
++  0,		   // properties_provided
++  0,		   // properties_destroyed
++  0,		   // todo_flags_start
++  TODO_df_finish,  // todo_flags_finish
++};
++
++class pass_crypto_accel : public rtl_opt_pass
++{
++public:
++  pass_crypto_accel (gcc::context *ctxt)
++    : rtl_opt_pass (pass_data_crypto_accel, ctxt)
++  {}
++
++  /* opt_pass methods: */
++  virtual bool gate (function *)
++    {
++      if (flag_crypto_accel_aes <= 0)
++	return false;
++      return targetm.get_v16qi_mode
++	&& targetm.gen_rev32v16qi
++	&& targetm.gen_aesev16qi
++	&& targetm.gen_aesmcv16qi;
++    }
++
++  virtual unsigned int execute (function *)
++    {
++      init_df ();
++      return crypto_acceleration ();
++    }
++}; // class pass_crypto_accel
++
++} // anon namespace
++
++rtl_opt_pass *
++make_pass_crypto_accel (gcc::context *ctxt)
++{
++  return new pass_crypto_accel (ctxt);
++}
+diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
+index 3b6e90bf2..2aba523bb 100644
+--- a/gcc/doc/tm.texi
++++ b/gcc/doc/tm.texi
+@@ -12125,6 +12125,35 @@ types.  If @var{has_wb} is not NULL then its value is set to true if STP
+ contains post-index or pre-index operation.
+ @end deftypefn
+ 
++@deftypefn {Target Hook} machine_mode TARGET_GET_V16QI_MODE ()
++This function get the 16 byte elements vector mode if target supports this.
++@end deftypefn
++
++@deftypefn {Target Hook} rtx TARGET_GEN_REV32V16QI (rtx @var{dest}, rtx @var{src})
++This function generate the byte reverse instruction
++ of 16 byte elements vector if target supports this.
++@end deftypefn
++
++@deftypefn {Target Hook} rtx TARGET_GEN_AESEV16QI (rtx @var{dest}, rtx @var{src1}, rtx @var{src2})
++This function generate the AES encryption instruction
++ of 16 byte elements vector if target supports this.
++@end deftypefn
++
++@deftypefn {Target Hook} rtx TARGET_GEN_AESDV16QI (rtx @var{dest}, rtx @var{src1}, rtx @var{src2})
++This function generate the AES decryption instruction
++ of 16 byte elements vector if target supports this.
++@end deftypefn
++
++@deftypefn {Target Hook} rtx TARGET_GEN_AESMCV16QI (rtx @var{dest}, rtx @var{src})
++This function generate the AES mix columns instruction
++ of 16 byte elements vector if target supports this.
++@end deftypefn
++
++@deftypefn {Target Hook} rtx TARGET_GEN_AESIMCV16QI (rtx @var{dest}, rtx @var{src})
++This function generate the AES inversed mix columns instruction
++ of 16 byte elements vector if target supports this.
++@end deftypefn
++
+ @deftypefn {Target Hook} bool TARGET_CANNOT_MODIFY_JUMPS_P (void)
+ This target hook returns @code{true} past the point in which new jump
+ instructions could be created.  On machines that require a register for
+diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
+index 6ff60e562..817d586ff 100644
+--- a/gcc/doc/tm.texi.in
++++ b/gcc/doc/tm.texi.in
+@@ -7981,6 +7981,18 @@ lists.
+ 
+ @hook TARGET_IS_STP_INSN
+ 
++@hook TARGET_GET_V16QI_MODE
++
++@hook TARGET_GEN_REV32V16QI
++
++@hook TARGET_GEN_AESEV16QI
++
++@hook TARGET_GEN_AESDV16QI
++
++@hook TARGET_GEN_AESMCV16QI
++
++@hook TARGET_GEN_AESIMCV16QI
++
+ @hook TARGET_CANNOT_MODIFY_JUMPS_P
+ 
+ @hook TARGET_HAVE_CONDITIONAL_EXECUTION
+diff --git a/gcc/passes.def b/gcc/passes.def
+index a30e05688..b7d4f7b4e 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -475,6 +475,7 @@ along with GCC; see the file COPYING3.  If not see
+       NEXT_PASS (pass_rtl_fwprop_addr);
+       NEXT_PASS (pass_inc_dec);
+       NEXT_PASS (pass_initialize_regs);
++      NEXT_PASS (pass_crypto_accel);
+       NEXT_PASS (pass_ud_rtl_dce);
+       NEXT_PASS (pass_combine);
+       NEXT_PASS (pass_if_after_combine);
+diff --git a/gcc/rtl-matcher.h b/gcc/rtl-matcher.h
+new file mode 100644
+index 000000000..6aed8d98d
+--- /dev/null
++++ b/gcc/rtl-matcher.h
+@@ -0,0 +1,367 @@
++/* Helpers for RTL pattern matchers.
++   Copyright (C) 2003-2023 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#ifndef GCC_RTL_MATCHER_H
++#define GCC_RTL_MATCHER_H
++
++#include "config.h"
++#include "system.h"
++#include "rtl.h"
++#include "df.h"
++
++/* Get definitions chain for the reg being used in insn.  */
++static df_link *
++get_def_chain (rtx_insn *insn, rtx reg)
++{
++  df_ref use;
++  FOR_EACH_INSN_USE (use, insn)
++    {
++      rtx use_reg = DF_REF_REG (use);
++      if (GET_CODE (use_reg) == SUBREG)
++	{
++	  if (REGNO (SUBREG_REG (use_reg)) == REGNO (reg))
++	    return NULL;
++	}
++      else
++	{
++	  gcc_assert (REG_P (use_reg));
++	  if (REGNO (use_reg) == REGNO (reg))
++	    return DF_REF_CHAIN (use);
++	}
++    }
++
++  return NULL;
++}
++
++/* Check if the reg is not global and actually modified in the ref.  */
++static bool
++check_def_chain_ref (df_ref ref, rtx reg)
++{
++  if (!ref || !DF_REF_INSN_INFO (ref))
++    return false;
++
++  return !global_regs[REGNO (reg)]
++    || set_of (reg, DF_REF_INSN (ref));
++}
++
++/* Get the single def instruction of the reg being used in the insn.  */
++static rtx_insn *
++get_single_def_insn (rtx_insn *insn, rtx reg)
++{
++  if (!REG_P (reg))
++    return NULL;
++
++  df_link *ref_chain = get_def_chain (insn, reg);
++  gcc_assert (ref_chain);
++
++  if (!ref_chain || ref_chain->next
++      || !check_def_chain_ref (ref_chain->ref, reg))
++    return NULL;
++
++  return DF_REF_INSN (ref_chain->ref);
++}
++
++/* Get the single user instruction of the reg being set in the insn.  */
++static rtx_insn *
++get_single_use_insn (rtx_insn *insn, rtx reg)
++{
++  df_ref def;
++  struct df_link *ref_chain;
++
++  if (!REG_P (reg))
++    return NULL;
++
++  FOR_EACH_INSN_DEF (def, insn)
++    if (REGNO (DF_REF_REG (def)) == REGNO (reg))
++      break;
++
++  gcc_assert (def && "Broken def-use analysis chain.");
++
++  ref_chain = DF_REF_CHAIN (def);
++
++  if (!ref_chain || ref_chain->next || !ref_chain->ref)
++    return NULL;
++
++  return DF_REF_INSN (ref_chain->ref);
++}
++
++/* Get the rtx pattern of suitable opcode from single set instruction.  */
++template 
++static rtx
++get_single_set_op (rtx_insn *insn)
++{
++  rtx pat = single_set (insn);
++  if (!pat)
++    return NULL_RTX;
++
++  rtx src = SET_SRC (pat);
++  if (GET_CODE (src) != OP1 && GET_CODE (src) != OP2)
++    return NULL_RTX;
++
++  return src;
++}
++
++/* Get the rtx pattern of suitable opcode from single set instruction.  */
++template 
++static rtx
++get_single_set_op (rtx_insn *insn)
++{
++  return get_single_set_op (insn);
++}
++
++/* Get the rtx constant from single set instruction of suitable opcode.  */
++template
++static rtx
++get_op_const_cst (rtx_insn *insn)
++{
++  rtx src = get_single_set_op (insn);
++  if (!src)
++    return NULL_RTX;
++
++  rtx cst = XEXP (src, 1);
++  return CONST_INT_P (cst) ? cst : NULL_RTX;
++}
++
++/* Get the rtx destination from single set instruction of suitable opcode.  */
++template 
++static rtx
++get_single_set_dst (rtx_insn *insn)
++{
++  rtx pat = single_set (insn);
++  if (!pat)
++    return NULL_RTX;
++
++  rtx dst = SET_DEST (pat);
++  if (GET_CODE (dst) != OP)
++    return NULL_RTX;
++
++  return dst;
++}
++
++/* Get the rtx destination from single set instruction.  */
++static rtx
++get_single_set_dst (rtx_insn *insn)
++{
++  rtx pat = single_set (insn);
++  if (!pat)
++    return NULL_RTX;
++
++  return SET_DEST (pat);
++}
++
++/* Check if the instruction is single set of suitable opcode.
++   Also gather its source and destination patterns.  */
++template 
++static bool
++check_simple_op (rtx_insn *insn, rtx &src, rtx &dst)
++{
++  rtx pat = single_set (insn);
++  if (!pat)
++    return false;
++
++  src = SET_SRC (pat);
++  dst = SET_DEST (pat);
++
++  if (GET_CODE (src) != OP)
++    return false;
++
++  return true;
++}
++
++/* Minimal term info of the RTL matcher.  All of the custom matchers should
++   inherit from it.
++
++   It stores information about matched pattern, instruction
++   of its location and predicate if the matched term represents operator
++   inside the matched tree.  */
++struct minimal_term_info
++{
++  minimal_term_info ()
++    {}
++  minimal_term_info (rtx_insn *loc, rtx src, bool is_op = false)
++    : loc (loc), src (src), is_op (is_op)
++    {}
++
++  rtx_insn *loc;
++  rtx src;
++  bool is_op;
++};
++
++/* Term info for memory matcher.  */
++struct mem_term_info : minimal_term_info
++{
++  mem_term_info ()
++    {}
++  mem_term_info (rtx_insn *loc, rtx src, unsigned HOST_WIDE_INT offset = 0)
++    : minimal_term_info (loc, src), offset (offset)
++    {}
++
++  unsigned HOST_WIDE_INT offset;
++};
++
++/* A wrapper being used to turn a term into a matcher-like entity.  */
++template
++struct matcher_term
++{
++  /* Required storage size information of the matcher.  */
++  static const int holder_size = 1;
++  static const int op_num = 0;
++  typedef T term_type;
++  typedef term_type holder_type[holder_size];
++};
++
++/* Simple matcher of patterns of suitable opcode.  */
++template
++struct arg_op_matcher : matcher_term
++{
++  typedef typename matcher_term::holder_type holder_type;
++
++  static bool match (rtx_insn *, holder_type &)
++    {
++      return false;
++    }
++
++  static bool match (rtx src, rtx_insn *insn, holder_type &m)
++    {
++      if (GET_CODE (src) != ARGOP)
++	return false;
++
++      static_cast (m[0]) = minimal_term_info (insn, src);
++      return true;
++    }
++};
++
++/* Simple matcher of integer constants.  */
++template
++struct int_cst_matcher : arg_op_matcher 
++{};
++
++/* Unary operator matcher.  */
++template
++struct unop_matcher
++{
++  /* Required storage size information of the matcher.  */
++  static const int holder_size = ARG::holder_size + store_op;
++  static const int op_num = ARG::op_num + store_op;
++  typedef typename ARG::term_type term_type;
++  typedef term_type holder_type[holder_size];
++
++  static bool match (rtx_insn *insn, holder_type &m)
++    {
++      rtx src = get_single_set_op (insn);
++      return src && match (src, insn, m);
++    }
++
++  static bool match (rtx src, rtx_insn *insn, holder_type &m)
++    {
++      if (REG_P (src))
++	{
++	  insn = get_single_def_insn (insn, src);
++	  if (insn && (src = single_set (insn)))
++	    src = SET_SRC (src);
++	}
++
++      if (!src || !insn || (GET_CODE (src) != OP1 && GET_CODE (src) != OP2))
++	return false;
++
++      /* Store current operation if needed.  */
++      if (store_op)
++	static_cast (m[0]) = minimal_term_info (insn, src,
++								     true);
++
++      rtx op = XEXP (src, 0);
++      rtx_insn *def = get_single_def_insn (insn, op);
++      typename ARG::holder_type &m_arg
++	= (typename ARG::holder_type &) *(m + store_op);
++      return (def && ARG::match (def, m_arg)) || ARG::match (op, insn, m_arg);
++    }
++};
++
++/* Binary operator matcher.  */
++template
++struct binop_matcher
++{
++  /* Required storage size information of the matcher.  */
++  static const int holder_size = LHS::holder_size + RHS::holder_size + store_op;
++  static const int op_num = LHS::op_num + RHS::op_num + store_op;
++  typedef typename LHS::term_type term_type;
++  typedef term_type holder_type[holder_size];
++
++  static bool match (rtx_insn *insn, holder_type &m)
++    {
++      rtx src = get_single_set_op (insn);
++      return src && match (src, insn, m);
++    }
++
++  static bool match (rtx src, rtx_insn *insn, holder_type &m)
++    {
++      if (GET_CODE (src) != OP1 && GET_CODE (src) != OP2)
++	return false;
++
++      /* Store current operation if needed.  */
++      if (store_op)
++	static_cast (m[0]) = minimal_term_info (insn, src,
++								     true);
++
++      rtx lhs_op = XEXP (src, 0);
++      rtx rhs_op = XEXP (src, 1);
++      rtx_insn *lhs_def = get_single_def_insn (insn, lhs_op);
++      rtx_insn *rhs_def = get_single_def_insn (insn, rhs_op);
++
++      return match (lhs_def, rhs_def, lhs_op, rhs_op, insn, m)
++	|| (COMMUTATIVE && match (rhs_def, lhs_def, rhs_op, lhs_op, insn, m));
++    }
++
++private:
++  static bool match (rtx_insn *lhs_def, rtx_insn *rhs_def,
++		     rtx lhs_op, rtx rhs_op, rtx_insn *insn,
++		     holder_type &m)
++    {
++      /* Force template instantiation error on non-matching types.  */
++      gcc_assert ((typename LHS::term_type *) NULL
++		  == (typename RHS::term_type *) NULL);
++
++      /* Obtain locations in the storage.  */
++      typename LHS::holder_type &m_lhs
++	= (typename LHS::holder_type &) *(m + store_op);
++      typename RHS::holder_type &m_rhs
++	= (typename RHS::holder_type &) *(m + store_op
++					  + LHS::holder_size);
++
++      /* Try match both instructions.  */
++      if (lhs_def && rhs_def && LHS::match (lhs_def, m_lhs)
++	  && RHS::match (rhs_def, m_rhs))
++	return true;
++      /* Try match instruction and pattern.  */
++      else if (lhs_def && LHS::match (lhs_def, m_lhs)
++	       && RHS::match (rhs_op, insn, m_rhs))
++	return true;
++      /* Try match pattern and instruction.  */
++      else if (rhs_def && LHS::match (lhs_op, insn, m_lhs)
++	       && RHS::match (rhs_def, m_rhs))
++	return true;
++      /* Try match both patterns.  */
++      else
++	return LHS::match (lhs_op, insn, m_lhs)
++	  && RHS::match (rhs_op, insn, m_rhs);
++    }
++};
++
++#endif // GCC_RTL_MATCHER_H
+diff --git a/gcc/target.def b/gcc/target.def
+index 8797a21d5..c9bb2b4c2 100644
+--- a/gcc/target.def
++++ b/gcc/target.def
+@@ -2693,6 +2693,47 @@ contains post-index or pre-index operation.",
+   bool, (int icode, bool *has_wb),
+   NULL)
+ 
++DEFHOOK
++(get_v16qi_mode,
++ "This function get the 16 byte elements vector mode if target supports this.",
++ machine_mode, (),
++ NULL)
++
++DEFHOOK
++(gen_rev32v16qi,
++ "This function generate the byte reverse instruction\n\
++ of 16 byte elements vector if target supports this.",
++ rtx, (rtx dest, rtx src),
++ NULL)
++
++DEFHOOK
++(gen_aesev16qi,
++ "This function generate the AES encryption instruction\n\
++ of 16 byte elements vector if target supports this.",
++ rtx, (rtx dest, rtx src1, rtx src2),
++ NULL)
++
++DEFHOOK
++(gen_aesdv16qi,
++ "This function generate the AES decryption instruction\n\
++ of 16 byte elements vector if target supports this.",
++ rtx, (rtx dest, rtx src1, rtx src2),
++ NULL)
++
++DEFHOOK
++(gen_aesmcv16qi,
++ "This function generate the AES mix columns instruction\n\
++ of 16 byte elements vector if target supports this.",
++ rtx, (rtx dest, rtx src),
++ NULL)
++
++DEFHOOK
++(gen_aesimcv16qi,
++ "This function generate the AES inversed mix columns instruction\n\
++ of 16 byte elements vector if target supports this.",
++ rtx, (rtx dest, rtx src),
++ NULL)
++
+ DEFHOOK
+ (gen_ccmp_first,
+  "This function prepares to emit a comparison insn for the first compare in a\n\
+diff --git a/gcc/testsuite/gcc.target/aarch64/aes-decrypt.c b/gcc/testsuite/gcc.target/aarch64/aes-decrypt.c
+new file mode 100644
+index 000000000..966ec5532
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/aes-decrypt.c
+@@ -0,0 +1,478 @@
++/* { dg-do run } */
++/* { dg-options "-O3 -fno-inline --save-temps -fcrypto-accel-aes -march=armv8.2-a+lse+crypto" } */
++
++#include 
++#include 
++#include 
++#include 
++#include 
++
++typedef uint8_t u8;
++typedef uint32_t u32;
++
++static const u32 Td0[256] = {
++    0x51f4a750U, 0x7e416553U, 0x1a17a4c3U, 0x3a275e96U,
++    0x3bab6bcbU, 0x1f9d45f1U, 0xacfa58abU, 0x4be30393U,
++    0x2030fa55U, 0xad766df6U, 0x88cc7691U, 0xf5024c25U,
++    0x4fe5d7fcU, 0xc52acbd7U, 0x26354480U, 0xb562a38fU,
++    0xdeb15a49U, 0x25ba1b67U, 0x45ea0e98U, 0x5dfec0e1U,
++    0xc32f7502U, 0x814cf012U, 0x8d4697a3U, 0x6bd3f9c6U,
++    0x038f5fe7U, 0x15929c95U, 0xbf6d7aebU, 0x955259daU,
++    0xd4be832dU, 0x587421d3U, 0x49e06929U, 0x8ec9c844U,
++    0x75c2896aU, 0xf48e7978U, 0x99583e6bU, 0x27b971ddU,
++    0xbee14fb6U, 0xf088ad17U, 0xc920ac66U, 0x7dce3ab4U,
++    0x63df4a18U, 0xe51a3182U, 0x97513360U, 0x62537f45U,
++    0xb16477e0U, 0xbb6bae84U, 0xfe81a01cU, 0xf9082b94U,
++    0x70486858U, 0x8f45fd19U, 0x94de6c87U, 0x527bf8b7U,
++    0xab73d323U, 0x724b02e2U, 0xe31f8f57U, 0x6655ab2aU,
++    0xb2eb2807U, 0x2fb5c203U, 0x86c57b9aU, 0xd33708a5U,
++    0x302887f2U, 0x23bfa5b2U, 0x02036abaU, 0xed16825cU,
++    0x8acf1c2bU, 0xa779b492U, 0xf307f2f0U, 0x4e69e2a1U,
++    0x65daf4cdU, 0x0605bed5U, 0xd134621fU, 0xc4a6fe8aU,
++    0x342e539dU, 0xa2f355a0U, 0x058ae132U, 0xa4f6eb75U,
++    0x0b83ec39U, 0x4060efaaU, 0x5e719f06U, 0xbd6e1051U,
++    0x3e218af9U, 0x96dd063dU, 0xdd3e05aeU, 0x4de6bd46U,
++    0x91548db5U, 0x71c45d05U, 0x0406d46fU, 0x605015ffU,
++    0x1998fb24U, 0xd6bde997U, 0x894043ccU, 0x67d99e77U,
++    0xb0e842bdU, 0x07898b88U, 0xe7195b38U, 0x79c8eedbU,
++    0xa17c0a47U, 0x7c420fe9U, 0xf8841ec9U, 0x00000000U,
++    0x09808683U, 0x322bed48U, 0x1e1170acU, 0x6c5a724eU,
++    0xfd0efffbU, 0x0f853856U, 0x3daed51eU, 0x362d3927U,
++    0x0a0fd964U, 0x685ca621U, 0x9b5b54d1U, 0x24362e3aU,
++    0x0c0a67b1U, 0x9357e70fU, 0xb4ee96d2U, 0x1b9b919eU,
++    0x80c0c54fU, 0x61dc20a2U, 0x5a774b69U, 0x1c121a16U,
++    0xe293ba0aU, 0xc0a02ae5U, 0x3c22e043U, 0x121b171dU,
++    0x0e090d0bU, 0xf28bc7adU, 0x2db6a8b9U, 0x141ea9c8U,
++    0x57f11985U, 0xaf75074cU, 0xee99ddbbU, 0xa37f60fdU,
++    0xf701269fU, 0x5c72f5bcU, 0x44663bc5U, 0x5bfb7e34U,
++    0x8b432976U, 0xcb23c6dcU, 0xb6edfc68U, 0xb8e4f163U,
++    0xd731dccaU, 0x42638510U, 0x13972240U, 0x84c61120U,
++    0x854a247dU, 0xd2bb3df8U, 0xaef93211U, 0xc729a16dU,
++    0x1d9e2f4bU, 0xdcb230f3U, 0x0d8652ecU, 0x77c1e3d0U,
++    0x2bb3166cU, 0xa970b999U, 0x119448faU, 0x47e96422U,
++    0xa8fc8cc4U, 0xa0f03f1aU, 0x567d2cd8U, 0x223390efU,
++    0x87494ec7U, 0xd938d1c1U, 0x8ccaa2feU, 0x98d40b36U,
++    0xa6f581cfU, 0xa57ade28U, 0xdab78e26U, 0x3fadbfa4U,
++    0x2c3a9de4U, 0x5078920dU, 0x6a5fcc9bU, 0x547e4662U,
++    0xf68d13c2U, 0x90d8b8e8U, 0x2e39f75eU, 0x82c3aff5U,
++    0x9f5d80beU, 0x69d0937cU, 0x6fd52da9U, 0xcf2512b3U,
++    0xc8ac993bU, 0x10187da7U, 0xe89c636eU, 0xdb3bbb7bU,
++    0xcd267809U, 0x6e5918f4U, 0xec9ab701U, 0x834f9aa8U,
++    0xe6956e65U, 0xaaffe67eU, 0x21bccf08U, 0xef15e8e6U,
++    0xbae79bd9U, 0x4a6f36ceU, 0xea9f09d4U, 0x29b07cd6U,
++    0x31a4b2afU, 0x2a3f2331U, 0xc6a59430U, 0x35a266c0U,
++    0x744ebc37U, 0xfc82caa6U, 0xe090d0b0U, 0x33a7d815U,
++    0xf104984aU, 0x41ecdaf7U, 0x7fcd500eU, 0x1791f62fU,
++    0x764dd68dU, 0x43efb04dU, 0xccaa4d54U, 0xe49604dfU,
++    0x9ed1b5e3U, 0x4c6a881bU, 0xc12c1fb8U, 0x4665517fU,
++    0x9d5eea04U, 0x018c355dU, 0xfa877473U, 0xfb0b412eU,
++    0xb3671d5aU, 0x92dbd252U, 0xe9105633U, 0x6dd64713U,
++    0x9ad7618cU, 0x37a10c7aU, 0x59f8148eU, 0xeb133c89U,
++    0xcea927eeU, 0xb761c935U, 0xe11ce5edU, 0x7a47b13cU,
++    0x9cd2df59U, 0x55f2733fU, 0x1814ce79U, 0x73c737bfU,
++    0x53f7cdeaU, 0x5ffdaa5bU, 0xdf3d6f14U, 0x7844db86U,
++    0xcaaff381U, 0xb968c43eU, 0x3824342cU, 0xc2a3405fU,
++    0x161dc372U, 0xbce2250cU, 0x283c498bU, 0xff0d9541U,
++    0x39a80171U, 0x080cb3deU, 0xd8b4e49cU, 0x6456c190U,
++    0x7bcb8461U, 0xd532b670U, 0x486c5c74U, 0xd0b85742U,
++};
++
++static const u32 Td1[256] = {
++    0x5051f4a7U, 0x537e4165U, 0xc31a17a4U, 0x963a275eU,
++    0xcb3bab6bU, 0xf11f9d45U, 0xabacfa58U, 0x934be303U,
++    0x552030faU, 0xf6ad766dU, 0x9188cc76U, 0x25f5024cU,
++    0xfc4fe5d7U, 0xd7c52acbU, 0x80263544U, 0x8fb562a3U,
++    0x49deb15aU, 0x6725ba1bU, 0x9845ea0eU, 0xe15dfec0U,
++    0x02c32f75U, 0x12814cf0U, 0xa38d4697U, 0xc66bd3f9U,
++    0xe7038f5fU, 0x9515929cU, 0xebbf6d7aU, 0xda955259U,
++    0x2dd4be83U, 0xd3587421U, 0x2949e069U, 0x448ec9c8U,
++    0x6a75c289U, 0x78f48e79U, 0x6b99583eU, 0xdd27b971U,
++    0xb6bee14fU, 0x17f088adU, 0x66c920acU, 0xb47dce3aU,
++    0x1863df4aU, 0x82e51a31U, 0x60975133U, 0x4562537fU,
++    0xe0b16477U, 0x84bb6baeU, 0x1cfe81a0U, 0x94f9082bU,
++    0x58704868U, 0x198f45fdU, 0x8794de6cU, 0xb7527bf8U,
++    0x23ab73d3U, 0xe2724b02U, 0x57e31f8fU, 0x2a6655abU,
++    0x07b2eb28U, 0x032fb5c2U, 0x9a86c57bU, 0xa5d33708U,
++    0xf2302887U, 0xb223bfa5U, 0xba02036aU, 0x5ced1682U,
++    0x2b8acf1cU, 0x92a779b4U, 0xf0f307f2U, 0xa14e69e2U,
++    0xcd65daf4U, 0xd50605beU, 0x1fd13462U, 0x8ac4a6feU,
++    0x9d342e53U, 0xa0a2f355U, 0x32058ae1U, 0x75a4f6ebU,
++    0x390b83ecU, 0xaa4060efU, 0x065e719fU, 0x51bd6e10U,
++    0xf93e218aU, 0x3d96dd06U, 0xaedd3e05U, 0x464de6bdU,
++    0xb591548dU, 0x0571c45dU, 0x6f0406d4U, 0xff605015U,
++    0x241998fbU, 0x97d6bde9U, 0xcc894043U, 0x7767d99eU,
++    0xbdb0e842U, 0x8807898bU, 0x38e7195bU, 0xdb79c8eeU,
++    0x47a17c0aU, 0xe97c420fU, 0xc9f8841eU, 0x00000000U,
++    0x83098086U, 0x48322bedU, 0xac1e1170U, 0x4e6c5a72U,
++    0xfbfd0effU, 0x560f8538U, 0x1e3daed5U, 0x27362d39U,
++    0x640a0fd9U, 0x21685ca6U, 0xd19b5b54U, 0x3a24362eU,
++    0xb10c0a67U, 0x0f9357e7U, 0xd2b4ee96U, 0x9e1b9b91U,
++    0x4f80c0c5U, 0xa261dc20U, 0x695a774bU, 0x161c121aU,
++    0x0ae293baU, 0xe5c0a02aU, 0x433c22e0U, 0x1d121b17U,
++    0x0b0e090dU, 0xadf28bc7U, 0xb92db6a8U, 0xc8141ea9U,
++    0x8557f119U, 0x4caf7507U, 0xbbee99ddU, 0xfda37f60U,
++    0x9ff70126U, 0xbc5c72f5U, 0xc544663bU, 0x345bfb7eU,
++    0x768b4329U, 0xdccb23c6U, 0x68b6edfcU, 0x63b8e4f1U,
++    0xcad731dcU, 0x10426385U, 0x40139722U, 0x2084c611U,
++    0x7d854a24U, 0xf8d2bb3dU, 0x11aef932U, 0x6dc729a1U,
++    0x4b1d9e2fU, 0xf3dcb230U, 0xec0d8652U, 0xd077c1e3U,
++    0x6c2bb316U, 0x99a970b9U, 0xfa119448U, 0x2247e964U,
++    0xc4a8fc8cU, 0x1aa0f03fU, 0xd8567d2cU, 0xef223390U,
++    0xc787494eU, 0xc1d938d1U, 0xfe8ccaa2U, 0x3698d40bU,
++    0xcfa6f581U, 0x28a57adeU, 0x26dab78eU, 0xa43fadbfU,
++    0xe42c3a9dU, 0x0d507892U, 0x9b6a5fccU, 0x62547e46U,
++    0xc2f68d13U, 0xe890d8b8U, 0x5e2e39f7U, 0xf582c3afU,
++    0xbe9f5d80U, 0x7c69d093U, 0xa96fd52dU, 0xb3cf2512U,
++    0x3bc8ac99U, 0xa710187dU, 0x6ee89c63U, 0x7bdb3bbbU,
++    0x09cd2678U, 0xf46e5918U, 0x01ec9ab7U, 0xa8834f9aU,
++    0x65e6956eU, 0x7eaaffe6U, 0x0821bccfU, 0xe6ef15e8U,
++    0xd9bae79bU, 0xce4a6f36U, 0xd4ea9f09U, 0xd629b07cU,
++    0xaf31a4b2U, 0x312a3f23U, 0x30c6a594U, 0xc035a266U,
++    0x37744ebcU, 0xa6fc82caU, 0xb0e090d0U, 0x1533a7d8U,
++    0x4af10498U, 0xf741ecdaU, 0x0e7fcd50U, 0x2f1791f6U,
++    0x8d764dd6U, 0x4d43efb0U, 0x54ccaa4dU, 0xdfe49604U,
++    0xe39ed1b5U, 0x1b4c6a88U, 0xb8c12c1fU, 0x7f466551U,
++    0x049d5eeaU, 0x5d018c35U, 0x73fa8774U, 0x2efb0b41U,
++    0x5ab3671dU, 0x5292dbd2U, 0x33e91056U, 0x136dd647U,
++    0x8c9ad761U, 0x7a37a10cU, 0x8e59f814U, 0x89eb133cU,
++    0xeecea927U, 0x35b761c9U, 0xede11ce5U, 0x3c7a47b1U,
++    0x599cd2dfU, 0x3f55f273U, 0x791814ceU, 0xbf73c737U,
++    0xea53f7cdU, 0x5b5ffdaaU, 0x14df3d6fU, 0x867844dbU,
++    0x81caaff3U, 0x3eb968c4U, 0x2c382434U, 0x5fc2a340U,
++    0x72161dc3U, 0x0cbce225U, 0x8b283c49U, 0x41ff0d95U,
++    0x7139a801U, 0xde080cb3U, 0x9cd8b4e4U, 0x906456c1U,
++    0x617bcb84U, 0x70d532b6U, 0x74486c5cU, 0x42d0b857U,
++};
++
++static const u32 Td2[256] = {
++    0xa75051f4U, 0x65537e41U, 0xa4c31a17U, 0x5e963a27U,
++    0x6bcb3babU, 0x45f11f9dU, 0x58abacfaU, 0x03934be3U,
++    0xfa552030U, 0x6df6ad76U, 0x769188ccU, 0x4c25f502U,
++    0xd7fc4fe5U, 0xcbd7c52aU, 0x44802635U, 0xa38fb562U,
++    0x5a49deb1U, 0x1b6725baU, 0x0e9845eaU, 0xc0e15dfeU,
++    0x7502c32fU, 0xf012814cU, 0x97a38d46U, 0xf9c66bd3U,
++    0x5fe7038fU, 0x9c951592U, 0x7aebbf6dU, 0x59da9552U,
++    0x832dd4beU, 0x21d35874U, 0x692949e0U, 0xc8448ec9U,
++    0x896a75c2U, 0x7978f48eU, 0x3e6b9958U, 0x71dd27b9U,
++    0x4fb6bee1U, 0xad17f088U, 0xac66c920U, 0x3ab47dceU,
++    0x4a1863dfU, 0x3182e51aU, 0x33609751U, 0x7f456253U,
++    0x77e0b164U, 0xae84bb6bU, 0xa01cfe81U, 0x2b94f908U,
++    0x68587048U, 0xfd198f45U, 0x6c8794deU, 0xf8b7527bU,
++    0xd323ab73U, 0x02e2724bU, 0x8f57e31fU, 0xab2a6655U,
++    0x2807b2ebU, 0xc2032fb5U, 0x7b9a86c5U, 0x08a5d337U,
++    0x87f23028U, 0xa5b223bfU, 0x6aba0203U, 0x825ced16U,
++    0x1c2b8acfU, 0xb492a779U, 0xf2f0f307U, 0xe2a14e69U,
++    0xf4cd65daU, 0xbed50605U, 0x621fd134U, 0xfe8ac4a6U,
++    0x539d342eU, 0x55a0a2f3U, 0xe132058aU, 0xeb75a4f6U,
++    0xec390b83U, 0xefaa4060U, 0x9f065e71U, 0x1051bd6eU,
++    0x8af93e21U, 0x063d96ddU, 0x05aedd3eU, 0xbd464de6U,
++    0x8db59154U, 0x5d0571c4U, 0xd46f0406U, 0x15ff6050U,
++    0xfb241998U, 0xe997d6bdU, 0x43cc8940U, 0x9e7767d9U,
++    0x42bdb0e8U, 0x8b880789U, 0x5b38e719U, 0xeedb79c8U,
++    0x0a47a17cU, 0x0fe97c42U, 0x1ec9f884U, 0x00000000U,
++    0x86830980U, 0xed48322bU, 0x70ac1e11U, 0x724e6c5aU,
++    0xfffbfd0eU, 0x38560f85U, 0xd51e3daeU, 0x3927362dU,
++    0xd9640a0fU, 0xa621685cU, 0x54d19b5bU, 0x2e3a2436U,
++    0x67b10c0aU, 0xe70f9357U, 0x96d2b4eeU, 0x919e1b9bU,
++    0xc54f80c0U, 0x20a261dcU, 0x4b695a77U, 0x1a161c12U,
++    0xba0ae293U, 0x2ae5c0a0U, 0xe0433c22U, 0x171d121bU,
++    0x0d0b0e09U, 0xc7adf28bU, 0xa8b92db6U, 0xa9c8141eU,
++    0x198557f1U, 0x074caf75U, 0xddbbee99U, 0x60fda37fU,
++    0x269ff701U, 0xf5bc5c72U, 0x3bc54466U, 0x7e345bfbU,
++    0x29768b43U, 0xc6dccb23U, 0xfc68b6edU, 0xf163b8e4U,
++    0xdccad731U, 0x85104263U, 0x22401397U, 0x112084c6U,
++    0x247d854aU, 0x3df8d2bbU, 0x3211aef9U, 0xa16dc729U,
++    0x2f4b1d9eU, 0x30f3dcb2U, 0x52ec0d86U, 0xe3d077c1U,
++    0x166c2bb3U, 0xb999a970U, 0x48fa1194U, 0x642247e9U,
++    0x8cc4a8fcU, 0x3f1aa0f0U, 0x2cd8567dU, 0x90ef2233U,
++    0x4ec78749U, 0xd1c1d938U, 0xa2fe8ccaU, 0x0b3698d4U,
++    0x81cfa6f5U, 0xde28a57aU, 0x8e26dab7U, 0xbfa43fadU,
++    0x9de42c3aU, 0x920d5078U, 0xcc9b6a5fU, 0x4662547eU,
++    0x13c2f68dU, 0xb8e890d8U, 0xf75e2e39U, 0xaff582c3U,
++    0x80be9f5dU, 0x937c69d0U, 0x2da96fd5U, 0x12b3cf25U,
++    0x993bc8acU, 0x7da71018U, 0x636ee89cU, 0xbb7bdb3bU,
++    0x7809cd26U, 0x18f46e59U, 0xb701ec9aU, 0x9aa8834fU,
++    0x6e65e695U, 0xe67eaaffU, 0xcf0821bcU, 0xe8e6ef15U,
++    0x9bd9bae7U, 0x36ce4a6fU, 0x09d4ea9fU, 0x7cd629b0U,
++    0xb2af31a4U, 0x23312a3fU, 0x9430c6a5U, 0x66c035a2U,
++    0xbc37744eU, 0xcaa6fc82U, 0xd0b0e090U, 0xd81533a7U,
++    0x984af104U, 0xdaf741ecU, 0x500e7fcdU, 0xf62f1791U,
++    0xd68d764dU, 0xb04d43efU, 0x4d54ccaaU, 0x04dfe496U,
++    0xb5e39ed1U, 0x881b4c6aU, 0x1fb8c12cU, 0x517f4665U,
++    0xea049d5eU, 0x355d018cU, 0x7473fa87U, 0x412efb0bU,
++    0x1d5ab367U, 0xd25292dbU, 0x5633e910U, 0x47136dd6U,
++    0x618c9ad7U, 0x0c7a37a1U, 0x148e59f8U, 0x3c89eb13U,
++    0x27eecea9U, 0xc935b761U, 0xe5ede11cU, 0xb13c7a47U,
++    0xdf599cd2U, 0x733f55f2U, 0xce791814U, 0x37bf73c7U,
++    0xcdea53f7U, 0xaa5b5ffdU, 0x6f14df3dU, 0xdb867844U,
++    0xf381caafU, 0xc43eb968U, 0x342c3824U, 0x405fc2a3U,
++    0xc372161dU, 0x250cbce2U, 0x498b283cU, 0x9541ff0dU,
++    0x017139a8U, 0xb3de080cU, 0xe49cd8b4U, 0xc1906456U,
++    0x84617bcbU, 0xb670d532U, 0x5c74486cU, 0x5742d0b8U,
++};
++
++static const u32 Td3[256] = {
++    0xf4a75051U, 0x4165537eU, 0x17a4c31aU, 0x275e963aU,
++    0xab6bcb3bU, 0x9d45f11fU, 0xfa58abacU, 0xe303934bU,
++    0x30fa5520U, 0x766df6adU, 0xcc769188U, 0x024c25f5U,
++    0xe5d7fc4fU, 0x2acbd7c5U, 0x35448026U, 0x62a38fb5U,
++    0xb15a49deU, 0xba1b6725U, 0xea0e9845U, 0xfec0e15dU,
++    0x2f7502c3U, 0x4cf01281U, 0x4697a38dU, 0xd3f9c66bU,
++    0x8f5fe703U, 0x929c9515U, 0x6d7aebbfU, 0x5259da95U,
++    0xbe832dd4U, 0x7421d358U, 0xe0692949U, 0xc9c8448eU,
++    0xc2896a75U, 0x8e7978f4U, 0x583e6b99U, 0xb971dd27U,
++    0xe14fb6beU, 0x88ad17f0U, 0x20ac66c9U, 0xce3ab47dU,
++    0xdf4a1863U, 0x1a3182e5U, 0x51336097U, 0x537f4562U,
++    0x6477e0b1U, 0x6bae84bbU, 0x81a01cfeU, 0x082b94f9U,
++    0x48685870U, 0x45fd198fU, 0xde6c8794U, 0x7bf8b752U,
++    0x73d323abU, 0x4b02e272U, 0x1f8f57e3U, 0x55ab2a66U,
++    0xeb2807b2U, 0xb5c2032fU, 0xc57b9a86U, 0x3708a5d3U,
++    0x2887f230U, 0xbfa5b223U, 0x036aba02U, 0x16825cedU,
++    0xcf1c2b8aU, 0x79b492a7U, 0x07f2f0f3U, 0x69e2a14eU,
++    0xdaf4cd65U, 0x05bed506U, 0x34621fd1U, 0xa6fe8ac4U,
++    0x2e539d34U, 0xf355a0a2U, 0x8ae13205U, 0xf6eb75a4U,
++    0x83ec390bU, 0x60efaa40U, 0x719f065eU, 0x6e1051bdU,
++    0x218af93eU, 0xdd063d96U, 0x3e05aeddU, 0xe6bd464dU,
++    0x548db591U, 0xc45d0571U, 0x06d46f04U, 0x5015ff60U,
++    0x98fb2419U, 0xbde997d6U, 0x4043cc89U, 0xd99e7767U,
++    0xe842bdb0U, 0x898b8807U, 0x195b38e7U, 0xc8eedb79U,
++    0x7c0a47a1U, 0x420fe97cU, 0x841ec9f8U, 0x00000000U,
++    0x80868309U, 0x2bed4832U, 0x1170ac1eU, 0x5a724e6cU,
++    0x0efffbfdU, 0x8538560fU, 0xaed51e3dU, 0x2d392736U,
++    0x0fd9640aU, 0x5ca62168U, 0x5b54d19bU, 0x362e3a24U,
++    0x0a67b10cU, 0x57e70f93U, 0xee96d2b4U, 0x9b919e1bU,
++    0xc0c54f80U, 0xdc20a261U, 0x774b695aU, 0x121a161cU,
++    0x93ba0ae2U, 0xa02ae5c0U, 0x22e0433cU, 0x1b171d12U,
++    0x090d0b0eU, 0x8bc7adf2U, 0xb6a8b92dU, 0x1ea9c814U,
++    0xf1198557U, 0x75074cafU, 0x99ddbbeeU, 0x7f60fda3U,
++    0x01269ff7U, 0x72f5bc5cU, 0x663bc544U, 0xfb7e345bU,
++    0x4329768bU, 0x23c6dccbU, 0xedfc68b6U, 0xe4f163b8U,
++    0x31dccad7U, 0x63851042U, 0x97224013U, 0xc6112084U,
++    0x4a247d85U, 0xbb3df8d2U, 0xf93211aeU, 0x29a16dc7U,
++    0x9e2f4b1dU, 0xb230f3dcU, 0x8652ec0dU, 0xc1e3d077U,
++    0xb3166c2bU, 0x70b999a9U, 0x9448fa11U, 0xe9642247U,
++    0xfc8cc4a8U, 0xf03f1aa0U, 0x7d2cd856U, 0x3390ef22U,
++    0x494ec787U, 0x38d1c1d9U, 0xcaa2fe8cU, 0xd40b3698U,
++    0xf581cfa6U, 0x7ade28a5U, 0xb78e26daU, 0xadbfa43fU,
++    0x3a9de42cU, 0x78920d50U, 0x5fcc9b6aU, 0x7e466254U,
++    0x8d13c2f6U, 0xd8b8e890U, 0x39f75e2eU, 0xc3aff582U,
++    0x5d80be9fU, 0xd0937c69U, 0xd52da96fU, 0x2512b3cfU,
++    0xac993bc8U, 0x187da710U, 0x9c636ee8U, 0x3bbb7bdbU,
++    0x267809cdU, 0x5918f46eU, 0x9ab701ecU, 0x4f9aa883U,
++    0x956e65e6U, 0xffe67eaaU, 0xbccf0821U, 0x15e8e6efU,
++    0xe79bd9baU, 0x6f36ce4aU, 0x9f09d4eaU, 0xb07cd629U,
++    0xa4b2af31U, 0x3f23312aU, 0xa59430c6U, 0xa266c035U,
++    0x4ebc3774U, 0x82caa6fcU, 0x90d0b0e0U, 0xa7d81533U,
++    0x04984af1U, 0xecdaf741U, 0xcd500e7fU, 0x91f62f17U,
++    0x4dd68d76U, 0xefb04d43U, 0xaa4d54ccU, 0x9604dfe4U,
++    0xd1b5e39eU, 0x6a881b4cU, 0x2c1fb8c1U, 0x65517f46U,
++    0x5eea049dU, 0x8c355d01U, 0x877473faU, 0x0b412efbU,
++    0x671d5ab3U, 0xdbd25292U, 0x105633e9U, 0xd647136dU,
++    0xd7618c9aU, 0xa10c7a37U, 0xf8148e59U, 0x133c89ebU,
++    0xa927eeceU, 0x61c935b7U, 0x1ce5ede1U, 0x47b13c7aU,
++    0xd2df599cU, 0xf2733f55U, 0x14ce7918U, 0xc737bf73U,
++    0xf7cdea53U, 0xfdaa5b5fU, 0x3d6f14dfU, 0x44db8678U,
++    0xaff381caU, 0x68c43eb9U, 0x24342c38U, 0xa3405fc2U,
++    0x1dc37216U, 0xe2250cbcU, 0x3c498b28U, 0x0d9541ffU,
++    0xa8017139U, 0x0cb3de08U, 0xb4e49cd8U, 0x56c19064U,
++    0xcb84617bU, 0x32b670d5U, 0x6c5c7448U, 0xb85742d0U,
++};
++
++static const u8 Td4[256] = {
++    0x52U, 0x09U, 0x6aU, 0xd5U, 0x30U, 0x36U, 0xa5U, 0x38U,
++    0xbfU, 0x40U, 0xa3U, 0x9eU, 0x81U, 0xf3U, 0xd7U, 0xfbU,
++    0x7cU, 0xe3U, 0x39U, 0x82U, 0x9bU, 0x2fU, 0xffU, 0x87U,
++    0x34U, 0x8eU, 0x43U, 0x44U, 0xc4U, 0xdeU, 0xe9U, 0xcbU,
++    0x54U, 0x7bU, 0x94U, 0x32U, 0xa6U, 0xc2U, 0x23U, 0x3dU,
++    0xeeU, 0x4cU, 0x95U, 0x0bU, 0x42U, 0xfaU, 0xc3U, 0x4eU,
++    0x08U, 0x2eU, 0xa1U, 0x66U, 0x28U, 0xd9U, 0x24U, 0xb2U,
++    0x76U, 0x5bU, 0xa2U, 0x49U, 0x6dU, 0x8bU, 0xd1U, 0x25U,
++    0x72U, 0xf8U, 0xf6U, 0x64U, 0x86U, 0x68U, 0x98U, 0x16U,
++    0xd4U, 0xa4U, 0x5cU, 0xccU, 0x5dU, 0x65U, 0xb6U, 0x92U,
++    0x6cU, 0x70U, 0x48U, 0x50U, 0xfdU, 0xedU, 0xb9U, 0xdaU,
++    0x5eU, 0x15U, 0x46U, 0x57U, 0xa7U, 0x8dU, 0x9dU, 0x84U,
++    0x90U, 0xd8U, 0xabU, 0x00U, 0x8cU, 0xbcU, 0xd3U, 0x0aU,
++    0xf7U, 0xe4U, 0x58U, 0x05U, 0xb8U, 0xb3U, 0x45U, 0x06U,
++    0xd0U, 0x2cU, 0x1eU, 0x8fU, 0xcaU, 0x3fU, 0x0fU, 0x02U,
++    0xc1U, 0xafU, 0xbdU, 0x03U, 0x01U, 0x13U, 0x8aU, 0x6bU,
++    0x3aU, 0x91U, 0x11U, 0x41U, 0x4fU, 0x67U, 0xdcU, 0xeaU,
++    0x97U, 0xf2U, 0xcfU, 0xceU, 0xf0U, 0xb4U, 0xe6U, 0x73U,
++    0x96U, 0xacU, 0x74U, 0x22U, 0xe7U, 0xadU, 0x35U, 0x85U,
++    0xe2U, 0xf9U, 0x37U, 0xe8U, 0x1cU, 0x75U, 0xdfU, 0x6eU,
++    0x47U, 0xf1U, 0x1aU, 0x71U, 0x1dU, 0x29U, 0xc5U, 0x89U,
++    0x6fU, 0xb7U, 0x62U, 0x0eU, 0xaaU, 0x18U, 0xbeU, 0x1bU,
++    0xfcU, 0x56U, 0x3eU, 0x4bU, 0xc6U, 0xd2U, 0x79U, 0x20U,
++    0x9aU, 0xdbU, 0xc0U, 0xfeU, 0x78U, 0xcdU, 0x5aU, 0xf4U,
++    0x1fU, 0xddU, 0xa8U, 0x33U, 0x88U, 0x07U, 0xc7U, 0x31U,
++    0xb1U, 0x12U, 0x10U, 0x59U, 0x27U, 0x80U, 0xecU, 0x5fU,
++    0x60U, 0x51U, 0x7fU, 0xa9U, 0x19U, 0xb5U, 0x4aU, 0x0dU,
++    0x2dU, 0xe5U, 0x7aU, 0x9fU, 0x93U, 0xc9U, 0x9cU, 0xefU,
++    0xa0U, 0xe0U, 0x3bU, 0x4dU, 0xaeU, 0x2aU, 0xf5U, 0xb0U,
++    0xc8U, 0xebU, 0xbbU, 0x3cU, 0x83U, 0x53U, 0x99U, 0x61U,
++    0x17U, 0x2bU, 0x04U, 0x7eU, 0xbaU, 0x77U, 0xd6U, 0x26U,
++    0xe1U, 0x69U, 0x14U, 0x63U, 0x55U, 0x21U, 0x0cU, 0x7dU,
++};
++
++#define GETU32(pt)	   \
++  (			   \
++    ((u32)(pt)[0] << 24)   \
++    ^ ((u32)(pt)[1] << 16) \
++    ^ ((u32)(pt)[2] <<  8) \
++    ^ ((u32)(pt)[3])       \
++  )
++
++#define PUTU32(ct, st)		\
++  {				\
++    (ct)[0] = (u8)((st) >> 24); \
++    (ct)[1] = (u8)((st) >> 16); \
++    (ct)[2] = (u8)((st) >>  8); \
++    (ct)[3] = (u8)(st);		\
++  }
++
++void
++aes_decrypt (const unsigned char *in, unsigned char *out,
++	     const u32 *rk, int nr)
++{
++  u32 s0, s1, s2, s3, t0, t1, t2, t3;
++
++  int r = nr >> 1;
++
++  s0 = GETU32 (in     ) ^ rk[0];
++  s1 = GETU32 (in +  4) ^ rk[1];
++  s2 = GETU32 (in +  8) ^ rk[2];
++  s3 = GETU32 (in + 12) ^ rk[3];
++
++  for (;;) {
++      t0 =
++	Td0[(s0 >> 24)       ] ^
++	Td1[(s3 >> 16) & 0xff] ^
++	Td2[(s2 >>  8) & 0xff] ^
++	Td3[(s1      ) & 0xff] ^
++	rk[4];
++      t1 =
++	Td0[(s1 >> 24)       ] ^
++	Td1[(s0 >> 16) & 0xff] ^
++	Td2[(s3 >>  8) & 0xff] ^
++	Td3[(s2      ) & 0xff] ^
++	rk[5];
++      t2 =
++	Td0[(s2 >> 24)       ] ^
++	Td1[(s1 >> 16) & 0xff] ^
++	Td2[(s0 >>  8) & 0xff] ^
++	Td3[(s3      ) & 0xff] ^
++	rk[6];
++      t3 =
++	Td0[(s3 >> 24)       ] ^
++	Td1[(s2 >> 16) & 0xff] ^
++	Td2[(s1 >>  8) & 0xff] ^
++	Td3[(s0      ) & 0xff] ^
++	rk[7];
++
++      rk += 8;
++      if (--r == 0) {
++	  break;
++      }
++
++      s0 =
++	Td0[(t0 >> 24)       ] ^
++	Td1[(t3 >> 16) & 0xff] ^
++	Td2[(t2 >>  8) & 0xff] ^
++	Td3[(t1      ) & 0xff] ^
++	rk[0];
++      s1 =
++	Td0[(t1 >> 24)       ] ^
++	Td1[(t0 >> 16) & 0xff] ^
++	Td2[(t3 >>  8) & 0xff] ^
++	Td3[(t2      ) & 0xff] ^
++	rk[1];
++      s2 =
++	Td0[(t2 >> 24)       ] ^
++	Td1[(t1 >> 16) & 0xff] ^
++	Td2[(t0 >>  8) & 0xff] ^
++	Td3[(t3      ) & 0xff] ^
++	rk[2];
++      s3 =
++	Td0[(t3 >> 24)       ] ^
++	Td1[(t2 >> 16) & 0xff] ^
++	Td2[(t1 >>  8) & 0xff] ^
++	Td3[(t0      ) & 0xff] ^
++	rk[3];
++    }
++
++    s0 =
++	((u32)Td4[(t0 >> 24)       ] << 24) ^
++	((u32)Td4[(t3 >> 16) & 0xff] << 16) ^
++	((u32)Td4[(t2 >>  8) & 0xff] <<  8) ^
++	((u32)Td4[(t1      ) & 0xff])       ^
++	rk[0];
++    PUTU32 (out     , s0);
++
++    s1 =
++	((u32)Td4[(t1 >> 24)       ] << 24) ^
++	((u32)Td4[(t0 >> 16) & 0xff] << 16) ^
++	((u32)Td4[(t3 >>  8) & 0xff] <<  8) ^
++	((u32)Td4[(t2      ) & 0xff])       ^
++	rk[1];
++    PUTU32 (out +  4, s1);
++
++    s2 =
++	((u32)Td4[(t2 >> 24)       ] << 24) ^
++	((u32)Td4[(t1 >> 16) & 0xff] << 16) ^
++	((u32)Td4[(t0 >>  8) & 0xff] <<  8) ^
++	((u32)Td4[(t3      ) & 0xff])       ^
++	rk[2];
++    PUTU32 (out +  8, s2);
++
++    s3 =
++	((u32)Td4[(t3 >> 24)       ] << 24) ^
++	((u32)Td4[(t2 >> 16) & 0xff] << 16) ^
++	((u32)Td4[(t1 >>  8) & 0xff] <<  8) ^
++	((u32)Td4[(t0      ) & 0xff])       ^
++	rk[3];
++    PUTU32 (out + 12, s3);
++}
++
++int main ()
++{
++  const u8 input[16] = { 0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb,
++			 0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32 };
++
++  const u8 expected[16] = { 0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d,
++			    0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34 };
++
++  const u8 key[] = { 0xa8, 0xf9, 0x14, 0xd0, 0x89, 0x25, 0xee, 0xc9,
++		     0xc8, 0x0c, 0x3f, 0xe1, 0xa6, 0x0c, 0x63, 0xb6,
++      		     0x63, 0x5a, 0x7b, 0x0c, 0xfe, 0xea, 0x19, 0x13,
++      		     0x90, 0x88, 0x39, 0xb0, 0xb4, 0xfb, 0x4c, 0x66,
++      		     0x5a, 0x92, 0x7d, 0xdf, 0x9d, 0xb0, 0x62, 0x1f,
++      		     0x6e, 0x62, 0x20, 0xa3, 0x24, 0x73, 0x75, 0xd6,
++      		     0x47, 0x76, 0xc0, 0x12, 0xc7, 0x22, 0x1f, 0xc0,
++      		     0xf3, 0xd2, 0x42, 0xbc, 0x4a, 0x11, 0x55, 0x75,
++      		     0x76, 0xd8, 0xfc, 0x6e, 0x80, 0x54, 0xdf, 0xd2,
++      		     0x34, 0xf0, 0x5d, 0x7c, 0xb9, 0xc3, 0x17, 0xc9,
++      		     0xfc, 0x0a, 0xa3, 0x6e, 0xf6, 0x8c, 0x23, 0xbc,
++      		     0xb4, 0xa4, 0x82, 0xae, 0x8d, 0x33, 0x4a, 0xb5,
++      		     0x13, 0x44, 0x88, 0x90, 0x0a, 0x86, 0x80, 0xd2,
++      		     0x42, 0x28, 0xa1, 0x12, 0x39, 0x97, 0xc8, 0x1b,
++      		     0xf7, 0x13, 0x1f, 0x7c, 0x19, 0xc2, 0x08, 0x42,
++      		     0x48, 0xae, 0x21, 0xc0, 0x7b, 0xbf, 0x69, 0x09,
++      		     0xeb, 0x05, 0x75, 0xcc, 0xee, 0xd1, 0x17, 0x3e,
++      		     0x51, 0x6c, 0x29, 0x82, 0x33, 0x11, 0x48, 0xc9,
++      		     0xa7, 0x08, 0x37, 0x2b, 0x05, 0xd4, 0x62, 0xf2,
++      		     0xbf, 0xbd, 0x3e, 0xbc, 0x62, 0x7d, 0x61, 0x4b,
++      		     0x16, 0x15, 0x7e, 0x2b, 0xa6, 0xd2, 0xae, 0x28,
++      		     0x88, 0x15, 0xf7, 0xab, 0x3c, 0x4f, 0xcf, 0x09 };
++
++  u8 output[16] = { 0 };
++
++  aes_decrypt (input, output, (u32*) key, 10);
++
++  if (memcmp (output, expected, 16) != 0)
++    abort ();
++
++  return 0;
++}
++
++/* { dg-final { scan-assembler "rev32" } } */
++/* { dg-final { scan-assembler "aesimc" } } */
++/* { dg-final { scan-assembler "aesd" } } */
+diff --git a/gcc/testsuite/gcc.target/aarch64/aes-encrypt.c b/gcc/testsuite/gcc.target/aarch64/aes-encrypt.c
+new file mode 100644
+index 000000000..e3f3c446f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/aes-encrypt.c
+@@ -0,0 +1,443 @@
++/* { dg-do run } */
++/* { dg-options "-O3 -fno-inline --save-temps -fcrypto-accel-aes -march=armv8.2-a+lse+crypto" } */
++
++#include 
++#include 
++#include 
++#include 
++#include 
++
++typedef uint8_t u8;
++typedef uint32_t u32;
++
++static const u32 Te0[256] = {
++    0xc66363a5U, 0xf87c7c84U, 0xee777799U, 0xf67b7b8dU,
++    0xfff2f20dU, 0xd66b6bbdU, 0xde6f6fb1U, 0x91c5c554U,
++    0x60303050U, 0x02010103U, 0xce6767a9U, 0x562b2b7dU,
++    0xe7fefe19U, 0xb5d7d762U, 0x4dababe6U, 0xec76769aU,
++    0x8fcaca45U, 0x1f82829dU, 0x89c9c940U, 0xfa7d7d87U,
++    0xeffafa15U, 0xb25959ebU, 0x8e4747c9U, 0xfbf0f00bU,
++    0x41adadecU, 0xb3d4d467U, 0x5fa2a2fdU, 0x45afafeaU,
++    0x239c9cbfU, 0x53a4a4f7U, 0xe4727296U, 0x9bc0c05bU,
++    0x75b7b7c2U, 0xe1fdfd1cU, 0x3d9393aeU, 0x4c26266aU,
++    0x6c36365aU, 0x7e3f3f41U, 0xf5f7f702U, 0x83cccc4fU,
++    0x6834345cU, 0x51a5a5f4U, 0xd1e5e534U, 0xf9f1f108U,
++    0xe2717193U, 0xabd8d873U, 0x62313153U, 0x2a15153fU,
++    0x0804040cU, 0x95c7c752U, 0x46232365U, 0x9dc3c35eU,
++    0x30181828U, 0x379696a1U, 0x0a05050fU, 0x2f9a9ab5U,
++    0x0e070709U, 0x24121236U, 0x1b80809bU, 0xdfe2e23dU,
++    0xcdebeb26U, 0x4e272769U, 0x7fb2b2cdU, 0xea75759fU,
++    0x1209091bU, 0x1d83839eU, 0x582c2c74U, 0x341a1a2eU,
++    0x361b1b2dU, 0xdc6e6eb2U, 0xb45a5aeeU, 0x5ba0a0fbU,
++    0xa45252f6U, 0x763b3b4dU, 0xb7d6d661U, 0x7db3b3ceU,
++    0x5229297bU, 0xdde3e33eU, 0x5e2f2f71U, 0x13848497U,
++    0xa65353f5U, 0xb9d1d168U, 0x00000000U, 0xc1eded2cU,
++    0x40202060U, 0xe3fcfc1fU, 0x79b1b1c8U, 0xb65b5bedU,
++    0xd46a6abeU, 0x8dcbcb46U, 0x67bebed9U, 0x7239394bU,
++    0x944a4adeU, 0x984c4cd4U, 0xb05858e8U, 0x85cfcf4aU,
++    0xbbd0d06bU, 0xc5efef2aU, 0x4faaaae5U, 0xedfbfb16U,
++    0x864343c5U, 0x9a4d4dd7U, 0x66333355U, 0x11858594U,
++    0x8a4545cfU, 0xe9f9f910U, 0x04020206U, 0xfe7f7f81U,
++    0xa05050f0U, 0x783c3c44U, 0x259f9fbaU, 0x4ba8a8e3U,
++    0xa25151f3U, 0x5da3a3feU, 0x804040c0U, 0x058f8f8aU,
++    0x3f9292adU, 0x219d9dbcU, 0x70383848U, 0xf1f5f504U,
++    0x63bcbcdfU, 0x77b6b6c1U, 0xafdada75U, 0x42212163U,
++    0x20101030U, 0xe5ffff1aU, 0xfdf3f30eU, 0xbfd2d26dU,
++    0x81cdcd4cU, 0x180c0c14U, 0x26131335U, 0xc3ecec2fU,
++    0xbe5f5fe1U, 0x359797a2U, 0x884444ccU, 0x2e171739U,
++    0x93c4c457U, 0x55a7a7f2U, 0xfc7e7e82U, 0x7a3d3d47U,
++    0xc86464acU, 0xba5d5de7U, 0x3219192bU, 0xe6737395U,
++    0xc06060a0U, 0x19818198U, 0x9e4f4fd1U, 0xa3dcdc7fU,
++    0x44222266U, 0x542a2a7eU, 0x3b9090abU, 0x0b888883U,
++    0x8c4646caU, 0xc7eeee29U, 0x6bb8b8d3U, 0x2814143cU,
++    0xa7dede79U, 0xbc5e5ee2U, 0x160b0b1dU, 0xaddbdb76U,
++    0xdbe0e03bU, 0x64323256U, 0x743a3a4eU, 0x140a0a1eU,
++    0x924949dbU, 0x0c06060aU, 0x4824246cU, 0xb85c5ce4U,
++    0x9fc2c25dU, 0xbdd3d36eU, 0x43acacefU, 0xc46262a6U,
++    0x399191a8U, 0x319595a4U, 0xd3e4e437U, 0xf279798bU,
++    0xd5e7e732U, 0x8bc8c843U, 0x6e373759U, 0xda6d6db7U,
++    0x018d8d8cU, 0xb1d5d564U, 0x9c4e4ed2U, 0x49a9a9e0U,
++    0xd86c6cb4U, 0xac5656faU, 0xf3f4f407U, 0xcfeaea25U,
++    0xca6565afU, 0xf47a7a8eU, 0x47aeaee9U, 0x10080818U,
++    0x6fbabad5U, 0xf0787888U, 0x4a25256fU, 0x5c2e2e72U,
++    0x381c1c24U, 0x57a6a6f1U, 0x73b4b4c7U, 0x97c6c651U,
++    0xcbe8e823U, 0xa1dddd7cU, 0xe874749cU, 0x3e1f1f21U,
++    0x964b4bddU, 0x61bdbddcU, 0x0d8b8b86U, 0x0f8a8a85U,
++    0xe0707090U, 0x7c3e3e42U, 0x71b5b5c4U, 0xcc6666aaU,
++    0x904848d8U, 0x06030305U, 0xf7f6f601U, 0x1c0e0e12U,
++    0xc26161a3U, 0x6a35355fU, 0xae5757f9U, 0x69b9b9d0U,
++    0x17868691U, 0x99c1c158U, 0x3a1d1d27U, 0x279e9eb9U,
++    0xd9e1e138U, 0xebf8f813U, 0x2b9898b3U, 0x22111133U,
++    0xd26969bbU, 0xa9d9d970U, 0x078e8e89U, 0x339494a7U,
++    0x2d9b9bb6U, 0x3c1e1e22U, 0x15878792U, 0xc9e9e920U,
++    0x87cece49U, 0xaa5555ffU, 0x50282878U, 0xa5dfdf7aU,
++    0x038c8c8fU, 0x59a1a1f8U, 0x09898980U, 0x1a0d0d17U,
++    0x65bfbfdaU, 0xd7e6e631U, 0x844242c6U, 0xd06868b8U,
++    0x824141c3U, 0x299999b0U, 0x5a2d2d77U, 0x1e0f0f11U,
++    0x7bb0b0cbU, 0xa85454fcU, 0x6dbbbbd6U, 0x2c16163aU,
++};
++
++static const u32 Te1[256] = {
++    0xa5c66363U, 0x84f87c7cU, 0x99ee7777U, 0x8df67b7bU,
++    0x0dfff2f2U, 0xbdd66b6bU, 0xb1de6f6fU, 0x5491c5c5U,
++    0x50603030U, 0x03020101U, 0xa9ce6767U, 0x7d562b2bU,
++    0x19e7fefeU, 0x62b5d7d7U, 0xe64dababU, 0x9aec7676U,
++    0x458fcacaU, 0x9d1f8282U, 0x4089c9c9U, 0x87fa7d7dU,
++    0x15effafaU, 0xebb25959U, 0xc98e4747U, 0x0bfbf0f0U,
++    0xec41adadU, 0x67b3d4d4U, 0xfd5fa2a2U, 0xea45afafU,
++    0xbf239c9cU, 0xf753a4a4U, 0x96e47272U, 0x5b9bc0c0U,
++    0xc275b7b7U, 0x1ce1fdfdU, 0xae3d9393U, 0x6a4c2626U,
++    0x5a6c3636U, 0x417e3f3fU, 0x02f5f7f7U, 0x4f83ccccU,
++    0x5c683434U, 0xf451a5a5U, 0x34d1e5e5U, 0x08f9f1f1U,
++    0x93e27171U, 0x73abd8d8U, 0x53623131U, 0x3f2a1515U,
++    0x0c080404U, 0x5295c7c7U, 0x65462323U, 0x5e9dc3c3U,
++    0x28301818U, 0xa1379696U, 0x0f0a0505U, 0xb52f9a9aU,
++    0x090e0707U, 0x36241212U, 0x9b1b8080U, 0x3ddfe2e2U,
++    0x26cdebebU, 0x694e2727U, 0xcd7fb2b2U, 0x9fea7575U,
++    0x1b120909U, 0x9e1d8383U, 0x74582c2cU, 0x2e341a1aU,
++    0x2d361b1bU, 0xb2dc6e6eU, 0xeeb45a5aU, 0xfb5ba0a0U,
++    0xf6a45252U, 0x4d763b3bU, 0x61b7d6d6U, 0xce7db3b3U,
++    0x7b522929U, 0x3edde3e3U, 0x715e2f2fU, 0x97138484U,
++    0xf5a65353U, 0x68b9d1d1U, 0x00000000U, 0x2cc1ededU,
++    0x60402020U, 0x1fe3fcfcU, 0xc879b1b1U, 0xedb65b5bU,
++    0xbed46a6aU, 0x468dcbcbU, 0xd967bebeU, 0x4b723939U,
++    0xde944a4aU, 0xd4984c4cU, 0xe8b05858U, 0x4a85cfcfU,
++    0x6bbbd0d0U, 0x2ac5efefU, 0xe54faaaaU, 0x16edfbfbU,
++    0xc5864343U, 0xd79a4d4dU, 0x55663333U, 0x94118585U,
++    0xcf8a4545U, 0x10e9f9f9U, 0x06040202U, 0x81fe7f7fU,
++    0xf0a05050U, 0x44783c3cU, 0xba259f9fU, 0xe34ba8a8U,
++    0xf3a25151U, 0xfe5da3a3U, 0xc0804040U, 0x8a058f8fU,
++    0xad3f9292U, 0xbc219d9dU, 0x48703838U, 0x04f1f5f5U,
++    0xdf63bcbcU, 0xc177b6b6U, 0x75afdadaU, 0x63422121U,
++    0x30201010U, 0x1ae5ffffU, 0x0efdf3f3U, 0x6dbfd2d2U,
++    0x4c81cdcdU, 0x14180c0cU, 0x35261313U, 0x2fc3ececU,
++    0xe1be5f5fU, 0xa2359797U, 0xcc884444U, 0x392e1717U,
++    0x5793c4c4U, 0xf255a7a7U, 0x82fc7e7eU, 0x477a3d3dU,
++    0xacc86464U, 0xe7ba5d5dU, 0x2b321919U, 0x95e67373U,
++    0xa0c06060U, 0x98198181U, 0xd19e4f4fU, 0x7fa3dcdcU,
++    0x66442222U, 0x7e542a2aU, 0xab3b9090U, 0x830b8888U,
++    0xca8c4646U, 0x29c7eeeeU, 0xd36bb8b8U, 0x3c281414U,
++    0x79a7dedeU, 0xe2bc5e5eU, 0x1d160b0bU, 0x76addbdbU,
++    0x3bdbe0e0U, 0x56643232U, 0x4e743a3aU, 0x1e140a0aU,
++    0xdb924949U, 0x0a0c0606U, 0x6c482424U, 0xe4b85c5cU,
++    0x5d9fc2c2U, 0x6ebdd3d3U, 0xef43acacU, 0xa6c46262U,
++    0xa8399191U, 0xa4319595U, 0x37d3e4e4U, 0x8bf27979U,
++    0x32d5e7e7U, 0x438bc8c8U, 0x596e3737U, 0xb7da6d6dU,
++    0x8c018d8dU, 0x64b1d5d5U, 0xd29c4e4eU, 0xe049a9a9U,
++    0xb4d86c6cU, 0xfaac5656U, 0x07f3f4f4U, 0x25cfeaeaU,
++    0xafca6565U, 0x8ef47a7aU, 0xe947aeaeU, 0x18100808U,
++    0xd56fbabaU, 0x88f07878U, 0x6f4a2525U, 0x725c2e2eU,
++    0x24381c1cU, 0xf157a6a6U, 0xc773b4b4U, 0x5197c6c6U,
++    0x23cbe8e8U, 0x7ca1ddddU, 0x9ce87474U, 0x213e1f1fU,
++    0xdd964b4bU, 0xdc61bdbdU, 0x860d8b8bU, 0x850f8a8aU,
++    0x90e07070U, 0x427c3e3eU, 0xc471b5b5U, 0xaacc6666U,
++    0xd8904848U, 0x05060303U, 0x01f7f6f6U, 0x121c0e0eU,
++    0xa3c26161U, 0x5f6a3535U, 0xf9ae5757U, 0xd069b9b9U,
++    0x91178686U, 0x5899c1c1U, 0x273a1d1dU, 0xb9279e9eU,
++    0x38d9e1e1U, 0x13ebf8f8U, 0xb32b9898U, 0x33221111U,
++    0xbbd26969U, 0x70a9d9d9U, 0x89078e8eU, 0xa7339494U,
++    0xb62d9b9bU, 0x223c1e1eU, 0x92158787U, 0x20c9e9e9U,
++    0x4987ceceU, 0xffaa5555U, 0x78502828U, 0x7aa5dfdfU,
++    0x8f038c8cU, 0xf859a1a1U, 0x80098989U, 0x171a0d0dU,
++    0xda65bfbfU, 0x31d7e6e6U, 0xc6844242U, 0xb8d06868U,
++    0xc3824141U, 0xb0299999U, 0x775a2d2dU, 0x111e0f0fU,
++    0xcb7bb0b0U, 0xfca85454U, 0xd66dbbbbU, 0x3a2c1616U,
++};
++
++static const u32 Te2[256] = {
++    0x63a5c663U, 0x7c84f87cU, 0x7799ee77U, 0x7b8df67bU,
++    0xf20dfff2U, 0x6bbdd66bU, 0x6fb1de6fU, 0xc55491c5U,
++    0x30506030U, 0x01030201U, 0x67a9ce67U, 0x2b7d562bU,
++    0xfe19e7feU, 0xd762b5d7U, 0xabe64dabU, 0x769aec76U,
++    0xca458fcaU, 0x829d1f82U, 0xc94089c9U, 0x7d87fa7dU,
++    0xfa15effaU, 0x59ebb259U, 0x47c98e47U, 0xf00bfbf0U,
++    0xadec41adU, 0xd467b3d4U, 0xa2fd5fa2U, 0xafea45afU,
++    0x9cbf239cU, 0xa4f753a4U, 0x7296e472U, 0xc05b9bc0U,
++    0xb7c275b7U, 0xfd1ce1fdU, 0x93ae3d93U, 0x266a4c26U,
++    0x365a6c36U, 0x3f417e3fU, 0xf702f5f7U, 0xcc4f83ccU,
++    0x345c6834U, 0xa5f451a5U, 0xe534d1e5U, 0xf108f9f1U,
++    0x7193e271U, 0xd873abd8U, 0x31536231U, 0x153f2a15U,
++    0x040c0804U, 0xc75295c7U, 0x23654623U, 0xc35e9dc3U,
++    0x18283018U, 0x96a13796U, 0x050f0a05U, 0x9ab52f9aU,
++    0x07090e07U, 0x12362412U, 0x809b1b80U, 0xe23ddfe2U,
++    0xeb26cdebU, 0x27694e27U, 0xb2cd7fb2U, 0x759fea75U,
++    0x091b1209U, 0x839e1d83U, 0x2c74582cU, 0x1a2e341aU,
++    0x1b2d361bU, 0x6eb2dc6eU, 0x5aeeb45aU, 0xa0fb5ba0U,
++    0x52f6a452U, 0x3b4d763bU, 0xd661b7d6U, 0xb3ce7db3U,
++    0x297b5229U, 0xe33edde3U, 0x2f715e2fU, 0x84971384U,
++    0x53f5a653U, 0xd168b9d1U, 0x00000000U, 0xed2cc1edU,
++    0x20604020U, 0xfc1fe3fcU, 0xb1c879b1U, 0x5bedb65bU,
++    0x6abed46aU, 0xcb468dcbU, 0xbed967beU, 0x394b7239U,
++    0x4ade944aU, 0x4cd4984cU, 0x58e8b058U, 0xcf4a85cfU,
++    0xd06bbbd0U, 0xef2ac5efU, 0xaae54faaU, 0xfb16edfbU,
++    0x43c58643U, 0x4dd79a4dU, 0x33556633U, 0x85941185U,
++    0x45cf8a45U, 0xf910e9f9U, 0x02060402U, 0x7f81fe7fU,
++    0x50f0a050U, 0x3c44783cU, 0x9fba259fU, 0xa8e34ba8U,
++    0x51f3a251U, 0xa3fe5da3U, 0x40c08040U, 0x8f8a058fU,
++    0x92ad3f92U, 0x9dbc219dU, 0x38487038U, 0xf504f1f5U,
++    0xbcdf63bcU, 0xb6c177b6U, 0xda75afdaU, 0x21634221U,
++    0x10302010U, 0xff1ae5ffU, 0xf30efdf3U, 0xd26dbfd2U,
++    0xcd4c81cdU, 0x0c14180cU, 0x13352613U, 0xec2fc3ecU,
++    0x5fe1be5fU, 0x97a23597U, 0x44cc8844U, 0x17392e17U,
++    0xc45793c4U, 0xa7f255a7U, 0x7e82fc7eU, 0x3d477a3dU,
++    0x64acc864U, 0x5de7ba5dU, 0x192b3219U, 0x7395e673U,
++    0x60a0c060U, 0x81981981U, 0x4fd19e4fU, 0xdc7fa3dcU,
++    0x22664422U, 0x2a7e542aU, 0x90ab3b90U, 0x88830b88U,
++    0x46ca8c46U, 0xee29c7eeU, 0xb8d36bb8U, 0x143c2814U,
++    0xde79a7deU, 0x5ee2bc5eU, 0x0b1d160bU, 0xdb76addbU,
++    0xe03bdbe0U, 0x32566432U, 0x3a4e743aU, 0x0a1e140aU,
++    0x49db9249U, 0x060a0c06U, 0x246c4824U, 0x5ce4b85cU,
++    0xc25d9fc2U, 0xd36ebdd3U, 0xacef43acU, 0x62a6c462U,
++    0x91a83991U, 0x95a43195U, 0xe437d3e4U, 0x798bf279U,
++    0xe732d5e7U, 0xc8438bc8U, 0x37596e37U, 0x6db7da6dU,
++    0x8d8c018dU, 0xd564b1d5U, 0x4ed29c4eU, 0xa9e049a9U,
++    0x6cb4d86cU, 0x56faac56U, 0xf407f3f4U, 0xea25cfeaU,
++    0x65afca65U, 0x7a8ef47aU, 0xaee947aeU, 0x08181008U,
++    0xbad56fbaU, 0x7888f078U, 0x256f4a25U, 0x2e725c2eU,
++    0x1c24381cU, 0xa6f157a6U, 0xb4c773b4U, 0xc65197c6U,
++    0xe823cbe8U, 0xdd7ca1ddU, 0x749ce874U, 0x1f213e1fU,
++    0x4bdd964bU, 0xbddc61bdU, 0x8b860d8bU, 0x8a850f8aU,
++    0x7090e070U, 0x3e427c3eU, 0xb5c471b5U, 0x66aacc66U,
++    0x48d89048U, 0x03050603U, 0xf601f7f6U, 0x0e121c0eU,
++    0x61a3c261U, 0x355f6a35U, 0x57f9ae57U, 0xb9d069b9U,
++    0x86911786U, 0xc15899c1U, 0x1d273a1dU, 0x9eb9279eU,
++    0xe138d9e1U, 0xf813ebf8U, 0x98b32b98U, 0x11332211U,
++    0x69bbd269U, 0xd970a9d9U, 0x8e89078eU, 0x94a73394U,
++    0x9bb62d9bU, 0x1e223c1eU, 0x87921587U, 0xe920c9e9U,
++    0xce4987ceU, 0x55ffaa55U, 0x28785028U, 0xdf7aa5dfU,
++    0x8c8f038cU, 0xa1f859a1U, 0x89800989U, 0x0d171a0dU,
++    0xbfda65bfU, 0xe631d7e6U, 0x42c68442U, 0x68b8d068U,
++    0x41c38241U, 0x99b02999U, 0x2d775a2dU, 0x0f111e0fU,
++    0xb0cb7bb0U, 0x54fca854U, 0xbbd66dbbU, 0x163a2c16U,
++};
++
++static const u32 Te3[256] = {
++    0x6363a5c6U, 0x7c7c84f8U, 0x777799eeU, 0x7b7b8df6U,
++    0xf2f20dffU, 0x6b6bbdd6U, 0x6f6fb1deU, 0xc5c55491U,
++    0x30305060U, 0x01010302U, 0x6767a9ceU, 0x2b2b7d56U,
++    0xfefe19e7U, 0xd7d762b5U, 0xababe64dU, 0x76769aecU,
++    0xcaca458fU, 0x82829d1fU, 0xc9c94089U, 0x7d7d87faU,
++    0xfafa15efU, 0x5959ebb2U, 0x4747c98eU, 0xf0f00bfbU,
++    0xadadec41U, 0xd4d467b3U, 0xa2a2fd5fU, 0xafafea45U,
++    0x9c9cbf23U, 0xa4a4f753U, 0x727296e4U, 0xc0c05b9bU,
++    0xb7b7c275U, 0xfdfd1ce1U, 0x9393ae3dU, 0x26266a4cU,
++    0x36365a6cU, 0x3f3f417eU, 0xf7f702f5U, 0xcccc4f83U,
++    0x34345c68U, 0xa5a5f451U, 0xe5e534d1U, 0xf1f108f9U,
++    0x717193e2U, 0xd8d873abU, 0x31315362U, 0x15153f2aU,
++    0x04040c08U, 0xc7c75295U, 0x23236546U, 0xc3c35e9dU,
++    0x18182830U, 0x9696a137U, 0x05050f0aU, 0x9a9ab52fU,
++    0x0707090eU, 0x12123624U, 0x80809b1bU, 0xe2e23ddfU,
++    0xebeb26cdU, 0x2727694eU, 0xb2b2cd7fU, 0x75759feaU,
++    0x09091b12U, 0x83839e1dU, 0x2c2c7458U, 0x1a1a2e34U,
++    0x1b1b2d36U, 0x6e6eb2dcU, 0x5a5aeeb4U, 0xa0a0fb5bU,
++    0x5252f6a4U, 0x3b3b4d76U, 0xd6d661b7U, 0xb3b3ce7dU,
++    0x29297b52U, 0xe3e33eddU, 0x2f2f715eU, 0x84849713U,
++    0x5353f5a6U, 0xd1d168b9U, 0x00000000U, 0xeded2cc1U,
++    0x20206040U, 0xfcfc1fe3U, 0xb1b1c879U, 0x5b5bedb6U,
++    0x6a6abed4U, 0xcbcb468dU, 0xbebed967U, 0x39394b72U,
++    0x4a4ade94U, 0x4c4cd498U, 0x5858e8b0U, 0xcfcf4a85U,
++    0xd0d06bbbU, 0xefef2ac5U, 0xaaaae54fU, 0xfbfb16edU,
++    0x4343c586U, 0x4d4dd79aU, 0x33335566U, 0x85859411U,
++    0x4545cf8aU, 0xf9f910e9U, 0x02020604U, 0x7f7f81feU,
++    0x5050f0a0U, 0x3c3c4478U, 0x9f9fba25U, 0xa8a8e34bU,
++    0x5151f3a2U, 0xa3a3fe5dU, 0x4040c080U, 0x8f8f8a05U,
++    0x9292ad3fU, 0x9d9dbc21U, 0x38384870U, 0xf5f504f1U,
++    0xbcbcdf63U, 0xb6b6c177U, 0xdada75afU, 0x21216342U,
++    0x10103020U, 0xffff1ae5U, 0xf3f30efdU, 0xd2d26dbfU,
++    0xcdcd4c81U, 0x0c0c1418U, 0x13133526U, 0xecec2fc3U,
++    0x5f5fe1beU, 0x9797a235U, 0x4444cc88U, 0x1717392eU,
++    0xc4c45793U, 0xa7a7f255U, 0x7e7e82fcU, 0x3d3d477aU,
++    0x6464acc8U, 0x5d5de7baU, 0x19192b32U, 0x737395e6U,
++    0x6060a0c0U, 0x81819819U, 0x4f4fd19eU, 0xdcdc7fa3U,
++    0x22226644U, 0x2a2a7e54U, 0x9090ab3bU, 0x8888830bU,
++    0x4646ca8cU, 0xeeee29c7U, 0xb8b8d36bU, 0x14143c28U,
++    0xdede79a7U, 0x5e5ee2bcU, 0x0b0b1d16U, 0xdbdb76adU,
++    0xe0e03bdbU, 0x32325664U, 0x3a3a4e74U, 0x0a0a1e14U,
++    0x4949db92U, 0x06060a0cU, 0x24246c48U, 0x5c5ce4b8U,
++    0xc2c25d9fU, 0xd3d36ebdU, 0xacacef43U, 0x6262a6c4U,
++    0x9191a839U, 0x9595a431U, 0xe4e437d3U, 0x79798bf2U,
++    0xe7e732d5U, 0xc8c8438bU, 0x3737596eU, 0x6d6db7daU,
++    0x8d8d8c01U, 0xd5d564b1U, 0x4e4ed29cU, 0xa9a9e049U,
++    0x6c6cb4d8U, 0x5656faacU, 0xf4f407f3U, 0xeaea25cfU,
++    0x6565afcaU, 0x7a7a8ef4U, 0xaeaee947U, 0x08081810U,
++    0xbabad56fU, 0x787888f0U, 0x25256f4aU, 0x2e2e725cU,
++    0x1c1c2438U, 0xa6a6f157U, 0xb4b4c773U, 0xc6c65197U,
++    0xe8e823cbU, 0xdddd7ca1U, 0x74749ce8U, 0x1f1f213eU,
++    0x4b4bdd96U, 0xbdbddc61U, 0x8b8b860dU, 0x8a8a850fU,
++    0x707090e0U, 0x3e3e427cU, 0xb5b5c471U, 0x6666aaccU,
++    0x4848d890U, 0x03030506U, 0xf6f601f7U, 0x0e0e121cU,
++    0x6161a3c2U, 0x35355f6aU, 0x5757f9aeU, 0xb9b9d069U,
++    0x86869117U, 0xc1c15899U, 0x1d1d273aU, 0x9e9eb927U,
++    0xe1e138d9U, 0xf8f813ebU, 0x9898b32bU, 0x11113322U,
++    0x6969bbd2U, 0xd9d970a9U, 0x8e8e8907U, 0x9494a733U,
++    0x9b9bb62dU, 0x1e1e223cU, 0x87879215U, 0xe9e920c9U,
++    0xcece4987U, 0x5555ffaaU, 0x28287850U, 0xdfdf7aa5U,
++    0x8c8c8f03U, 0xa1a1f859U, 0x89898009U, 0x0d0d171aU,
++    0xbfbfda65U, 0xe6e631d7U, 0x4242c684U, 0x6868b8d0U,
++    0x4141c382U, 0x9999b029U, 0x2d2d775aU, 0x0f0f111eU,
++    0xb0b0cb7bU, 0x5454fca8U, 0xbbbbd66dU, 0x16163a2cU,
++};
++
++#define GETU32(pt)	   \
++  (			   \
++    ((u32)(pt)[0] << 24)   \
++    ^ ((u32)(pt)[1] << 16) \
++    ^ ((u32)(pt)[2] <<  8) \
++    ^ ((u32)(pt)[3])       \
++  )
++
++#define PUTU32(ct, st)		\
++  {				\
++    (ct)[0] = (u8)((st) >> 24); \
++    (ct)[1] = (u8)((st) >> 16); \
++    (ct)[2] = (u8)((st) >>  8); \
++    (ct)[3] = (u8)(st);		\
++  }
++
++void
++aes_encrypt (const unsigned char *in, unsigned char *out,
++	     const u32 *rk, int nr)
++{
++  u32 s0, s1, s2, s3, t0, t1, t2, t3;
++
++  int r = nr >> 1;
++
++  s0 = GETU32 (in     ) ^ rk[0];
++  s1 = GETU32 (in +  4) ^ rk[1];
++  s2 = GETU32 (in +  8) ^ rk[2];
++  s3 = GETU32 (in + 12) ^ rk[3];
++
++  for (;;) {
++    t0 =
++	Te0[(s0 >> 24)       ] ^
++	Te1[(s1 >> 16) & 0xff] ^
++	Te2[(s2 >>  8) & 0xff] ^
++	Te3[(s3      ) & 0xff] ^
++	rk[4];
++    t1 =
++	Te0[(s1 >> 24)       ] ^
++	Te1[(s2 >> 16) & 0xff] ^
++	Te2[(s3 >>  8) & 0xff] ^
++	Te3[(s0      ) & 0xff] ^
++	rk[5];
++    t2 =
++	Te0[(s2 >> 24)       ] ^
++	Te1[(s3 >> 16) & 0xff] ^
++	Te2[(s0 >>  8) & 0xff] ^
++	Te3[(s1      ) & 0xff] ^
++	rk[6];
++    t3 =
++	Te0[(s3 >> 24)       ] ^
++	Te1[(s0 >> 16) & 0xff] ^
++	Te2[(s1 >>  8) & 0xff] ^
++	Te3[(s2      ) & 0xff] ^
++	rk[7];
++
++    rk += 8;
++    if (--r == 0)
++	break;
++
++    s0 =
++	Te0[(t0 >> 24)       ] ^
++	Te1[(t1 >> 16) & 0xff] ^
++	Te2[(t2 >>  8) & 0xff] ^
++	Te3[(t3      ) & 0xff] ^
++	rk[0];
++    s1 =
++	Te0[(t1 >> 24)       ] ^
++	Te1[(t2 >> 16) & 0xff] ^
++	Te2[(t3 >>  8) & 0xff] ^
++	Te3[(t0      ) & 0xff] ^
++	rk[1];
++    s2 =
++	Te0[(t2 >> 24)       ] ^
++	Te1[(t3 >> 16) & 0xff] ^
++	Te2[(t0 >>  8) & 0xff] ^
++	Te3[(t1      ) & 0xff] ^
++	rk[2];
++    s3 =
++	Te0[(t3 >> 24)       ] ^
++	Te1[(t0 >> 16) & 0xff] ^
++	Te2[(t1 >>  8) & 0xff] ^
++	Te3[(t2      ) & 0xff] ^
++	rk[3];
++  }
++
++  s0 =
++      (Te2[(t0 >> 24)       ] & 0xff000000) ^
++      (Te3[(t1 >> 16) & 0xff] & 0x00ff0000) ^
++      (Te0[(t2 >>  8) & 0xff] & 0x0000ff00) ^
++      (Te1[(t3      ) & 0xff] & 0x000000ff) ^
++      rk[0];
++  PUTU32 (out     , s0);
++
++  s1 =
++      (Te2[(t1 >> 24)       ] & 0xff000000) ^
++      (Te3[(t2 >> 16) & 0xff] & 0x00ff0000) ^
++      (Te0[(t3 >>  8) & 0xff] & 0x0000ff00) ^
++      (Te1[(t0      ) & 0xff] & 0x000000ff) ^
++      rk[1];
++  PUTU32 (out +  4, s1);
++
++  s2 =
++      (Te2[(t2 >> 24)       ] & 0xff000000) ^
++      (Te3[(t3 >> 16) & 0xff] & 0x00ff0000) ^
++      (Te0[(t0 >>  8) & 0xff] & 0x0000ff00) ^
++      (Te1[(t1      ) & 0xff] & 0x000000ff) ^
++      rk[2];
++  PUTU32 (out +  8, s2);
++
++  s3 =
++      (Te2[(t3 >> 24)       ] & 0xff000000) ^
++      (Te3[(t0 >> 16) & 0xff] & 0x00ff0000) ^
++      (Te0[(t1 >>  8) & 0xff] & 0x0000ff00) ^
++      (Te1[(t2      ) & 0xff] & 0x000000ff) ^
++      rk[3];
++  PUTU32 (out + 12, s3);
++}
++
++
++int main ()
++{
++  const u8 input[16] = { 0x32, 0x43, 0xf6, 0xa8, 0x88, 0x5a, 0x30, 0x8d,
++			 0x31, 0x31, 0x98, 0xa2, 0xe0, 0x37, 0x07, 0x34 };
++
++  const u8 expected[16] = { 0x39, 0x25, 0x84, 0x1d, 0x02, 0xdc, 0x09, 0xfb,
++			    0xdc, 0x11, 0x85, 0x97, 0x19, 0x6a, 0x0b, 0x32 };
++
++  const u8 key[] = { 0x16, 0x15, 0x7e, 0x2b, 0xa6, 0xd2, 0xae, 0x28,
++		     0x88, 0x15, 0xf7, 0xab, 0x3c, 0x4f, 0xcf, 0x09,
++		     0x17, 0xfe, 0xfa, 0xa0, 0xb1, 0x2c, 0x54, 0x88,
++		     0x39, 0x39, 0xa3, 0x23, 0x05, 0x76, 0x6c, 0x2a,
++		     0xf2, 0x95, 0xc2, 0xf2, 0x43, 0xb9, 0x96, 0x7a,
++		     0x7a, 0x80, 0x35, 0x59, 0x7f, 0xf6, 0x59, 0x73,
++		     0x7d, 0x47, 0x80, 0x3d, 0x3e, 0xfe, 0x16, 0x47,
++		     0x44, 0x7e, 0x23, 0x1e, 0x3b, 0x88, 0x7a, 0x6d,
++		     0x41, 0xa5, 0x44, 0xef, 0x7f, 0x5b, 0x52, 0xa8,
++		     0x3b, 0x25, 0x71, 0xb6, 0x00, 0xad, 0x0b, 0xdb,
++		     0xf8, 0xc6, 0xd1, 0xd4, 0x87, 0x9d, 0x83, 0x7c,
++		     0xbc, 0xb8, 0xf2, 0xca, 0xbc, 0x15, 0xf9, 0x11,
++		     0x7a, 0xa3, 0x88, 0x6d, 0xfd, 0x3e, 0x0b, 0x11,
++		     0x41, 0x86, 0xf9, 0xdb, 0xfd, 0x93, 0x00, 0xca,
++		     0x0e, 0xf7, 0x54, 0x4e, 0xf3, 0xc9, 0x5f, 0x5f,
++		     0xb2, 0x4f, 0xa6, 0x84, 0x4f, 0xdc, 0xa6, 0x4e,
++		     0x21, 0x73, 0xd2, 0xea, 0xd2, 0xba, 0x8d, 0xb5,
++		     0x60, 0xf5, 0x2b, 0x31, 0x2f, 0x29, 0x8d, 0x7f,
++		     0xf3, 0x66, 0x77, 0xac, 0x21, 0xdc, 0xfa, 0x19,
++		     0x41, 0x29, 0xd1, 0x28, 0x6e, 0x00, 0x5c, 0x57,
++		     0xa8, 0xf9, 0x14, 0xd0, 0x89, 0x25, 0xee, 0xc9,
++		     0xc8, 0x0c, 0x3f, 0xe1, 0xa6, 0x0c, 0x63, 0xb6 };
++
++  u8 output[16] = { 0 };
++
++  aes_encrypt (input, output, (u32*) key, 10);
++
++  if (memcmp (output, expected, 16) != 0)
++    abort ();
++
++  return 0;
++}
++
++/* { dg-final { scan-assembler "rev32" } } */
++/* { dg-final { scan-assembler "aesmc" } } */
++/* { dg-final { scan-assembler "aese" } } */
+diff --git a/gcc/timevar.def b/gcc/timevar.def
+index 2ccecffb5..18a9f62cc 100644
+--- a/gcc/timevar.def
++++ b/gcc/timevar.def
+@@ -261,6 +261,7 @@ DEFTIMEVAR (TV_AUTO_INC_DEC          , "auto inc dec")
+ DEFTIMEVAR (TV_CSE2                  , "CSE 2")
+ DEFTIMEVAR (TV_BRANCH_PROB           , "branch prediction")
+ DEFTIMEVAR (TV_COMBINE               , "combiner")
++DEFTIMEVAR (TV_CRYPTO_ACCEL	     , "crypto accel")
+ DEFTIMEVAR (TV_IFCVT		     , "if-conversion")
+ DEFTIMEVAR (TV_MODE_SWITCH           , "mode switching")
+ DEFTIMEVAR (TV_SMS		     , "sms modulo scheduling")
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index 6daac7fc1..1733931c3 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -583,6 +583,7 @@ extern rtl_opt_pass *make_pass_cse2 (gcc::context *ctxt);
+ extern rtl_opt_pass *make_pass_df_initialize_opt (gcc::context *ctxt);
+ extern rtl_opt_pass *make_pass_df_initialize_no_opt (gcc::context *ctxt);
+ extern rtl_opt_pass *make_pass_reginfo_init (gcc::context *ctxt);
++extern rtl_opt_pass *make_pass_crypto_accel (gcc::context *ctxt);
+ extern rtl_opt_pass *make_pass_inc_dec (gcc::context *ctxt);
+ extern rtl_opt_pass *make_pass_stack_ptr_mod (gcc::context *ctxt);
+ extern rtl_opt_pass *make_pass_initialize_regs (gcc::context *ctxt);
+-- 
+2.33.0
+
diff --git a/0047-LoongArch-Use-standard-pattern-name-and-RTX-code-for.patch b/0047-LoongArch-Use-standard-pattern-name-and-RTX-code-for.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4768ab91f3c310b0df407bd75adde9582e87567b
--- /dev/null
+++ b/0047-LoongArch-Use-standard-pattern-name-and-RTX-code-for.patch
@@ -0,0 +1,268 @@
+From 4c13256ea34b4169ceb3f9c7826843b754c6a6e0 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 19 Nov 2023 16:28:59 +0800
+Subject: [PATCH 047/188] LoongArch: Use standard pattern name and RTX code for
+ LSX/LASX muh instructions
+
+Removes unnecessary UNSPECs and make the muh instructions useful with
+GNU vectors or auto vectorization.
+
+gcc/ChangeLog:
+
+	* config/loongarch/simd.md (muh): New code attribute mapping
+	any_extend to smul_highpart or umul_highpart.
+	(mul3_highpart): New define_insn.
+	* config/loongarch/lsx.md (UNSPEC_LSX_VMUH_S): Remove.
+	(UNSPEC_LSX_VMUH_U): Remove.
+	(lsx_vmuh_s_): Remove.
+	(lsx_vmuh_u_): Remove.
+	* config/loongarch/lasx.md (UNSPEC_LASX_XVMUH_S): Remove.
+	(UNSPEC_LASX_XVMUH_U): Remove.
+	(lasx_xvmuh_s_): Remove.
+	(lasx_xvmuh_u_): Remove.
+	* config/loongarch/loongarch-builtins.cc (CODE_FOR_lsx_vmuh_b):
+	Redefine to standard pattern name.
+	(CODE_FOR_lsx_vmuh_h): Likewise.
+	(CODE_FOR_lsx_vmuh_w): Likewise.
+	(CODE_FOR_lsx_vmuh_d): Likewise.
+	(CODE_FOR_lsx_vmuh_bu): Likewise.
+	(CODE_FOR_lsx_vmuh_hu): Likewise.
+	(CODE_FOR_lsx_vmuh_wu): Likewise.
+	(CODE_FOR_lsx_vmuh_du): Likewise.
+	(CODE_FOR_lasx_xvmuh_b): Likewise.
+	(CODE_FOR_lasx_xvmuh_h): Likewise.
+	(CODE_FOR_lasx_xvmuh_w): Likewise.
+	(CODE_FOR_lasx_xvmuh_d): Likewise.
+	(CODE_FOR_lasx_xvmuh_bu): Likewise.
+	(CODE_FOR_lasx_xvmuh_hu): Likewise.
+	(CODE_FOR_lasx_xvmuh_wu): Likewise.
+	(CODE_FOR_lasx_xvmuh_du): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vect-muh.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  | 22 ------------
+ gcc/config/loongarch/loongarch-builtins.cc    | 32 ++++++++---------
+ gcc/config/loongarch/lsx.md                   | 22 ------------
+ gcc/config/loongarch/simd.md                  | 16 +++++++++
+ gcc/testsuite/gcc.target/loongarch/vect-muh.c | 36 +++++++++++++++++++
+ 5 files changed, 68 insertions(+), 60 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-muh.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index d4a56c307..023a023b4 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -68,8 +68,6 @@
+   UNSPEC_LASX_BRANCH
+   UNSPEC_LASX_BRANCH_V
+ 
+-  UNSPEC_LASX_XVMUH_S
+-  UNSPEC_LASX_XVMUH_U
+   UNSPEC_LASX_MXVEXTW_U
+   UNSPEC_LASX_XVSLLWIL_S
+   UNSPEC_LASX_XVSLLWIL_U
+@@ -2823,26 +2821,6 @@
+   [(set_attr "type" "simd_logic")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lasx_xvmuh_s_"
+-  [(set (match_operand:ILASX 0 "register_operand" "=f")
+-	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
+-		       (match_operand:ILASX 2 "register_operand" "f")]
+-		      UNSPEC_LASX_XVMUH_S))]
+-  "ISA_HAS_LASX"
+-  "xvmuh.\t%u0,%u1,%u2"
+-  [(set_attr "type" "simd_int_arith")
+-   (set_attr "mode" "")])
+-
+-(define_insn "lasx_xvmuh_u_"
+-  [(set (match_operand:ILASX 0 "register_operand" "=f")
+-	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
+-		       (match_operand:ILASX 2 "register_operand" "f")]
+-		      UNSPEC_LASX_XVMUH_U))]
+-  "ISA_HAS_LASX"
+-  "xvmuh.\t%u0,%u1,%u2"
+-  [(set_attr "type" "simd_int_arith")
+-   (set_attr "mode" "")])
+-
+ (define_insn "lasx_xvsllwil_s__"
+   [(set (match_operand: 0 "register_operand" "=f")
+ 	(unspec: [(match_operand:ILASX_WHB 1 "register_operand" "f")
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index fb458feac..41ea357cf 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -319,6 +319,14 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
+ #define CODE_FOR_lsx_vmod_hu CODE_FOR_umodv8hi3
+ #define CODE_FOR_lsx_vmod_wu CODE_FOR_umodv4si3
+ #define CODE_FOR_lsx_vmod_du CODE_FOR_umodv2di3
++#define CODE_FOR_lsx_vmuh_b CODE_FOR_smulv16qi3_highpart
++#define CODE_FOR_lsx_vmuh_h CODE_FOR_smulv8hi3_highpart
++#define CODE_FOR_lsx_vmuh_w CODE_FOR_smulv4si3_highpart
++#define CODE_FOR_lsx_vmuh_d CODE_FOR_smulv2di3_highpart
++#define CODE_FOR_lsx_vmuh_bu CODE_FOR_umulv16qi3_highpart
++#define CODE_FOR_lsx_vmuh_hu CODE_FOR_umulv8hi3_highpart
++#define CODE_FOR_lsx_vmuh_wu CODE_FOR_umulv4si3_highpart
++#define CODE_FOR_lsx_vmuh_du CODE_FOR_umulv2di3_highpart
+ #define CODE_FOR_lsx_vmul_b CODE_FOR_mulv16qi3
+ #define CODE_FOR_lsx_vmul_h CODE_FOR_mulv8hi3
+ #define CODE_FOR_lsx_vmul_w CODE_FOR_mulv4si3
+@@ -439,14 +447,6 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
+ #define CODE_FOR_lsx_vfnmsub_s CODE_FOR_vfnmsubv4sf4_nmsub4
+ #define CODE_FOR_lsx_vfnmsub_d CODE_FOR_vfnmsubv2df4_nmsub4
+ 
+-#define CODE_FOR_lsx_vmuh_b CODE_FOR_lsx_vmuh_s_b
+-#define CODE_FOR_lsx_vmuh_h CODE_FOR_lsx_vmuh_s_h
+-#define CODE_FOR_lsx_vmuh_w CODE_FOR_lsx_vmuh_s_w
+-#define CODE_FOR_lsx_vmuh_d CODE_FOR_lsx_vmuh_s_d
+-#define CODE_FOR_lsx_vmuh_bu CODE_FOR_lsx_vmuh_u_bu
+-#define CODE_FOR_lsx_vmuh_hu CODE_FOR_lsx_vmuh_u_hu
+-#define CODE_FOR_lsx_vmuh_wu CODE_FOR_lsx_vmuh_u_wu
+-#define CODE_FOR_lsx_vmuh_du CODE_FOR_lsx_vmuh_u_du
+ #define CODE_FOR_lsx_vsllwil_h_b CODE_FOR_lsx_vsllwil_s_h_b
+ #define CODE_FOR_lsx_vsllwil_w_h CODE_FOR_lsx_vsllwil_s_w_h
+ #define CODE_FOR_lsx_vsllwil_d_w CODE_FOR_lsx_vsllwil_s_d_w
+@@ -588,6 +588,14 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
+ #define CODE_FOR_lasx_xvmul_h CODE_FOR_mulv16hi3
+ #define CODE_FOR_lasx_xvmul_w CODE_FOR_mulv8si3
+ #define CODE_FOR_lasx_xvmul_d CODE_FOR_mulv4di3
++#define CODE_FOR_lasx_xvmuh_b CODE_FOR_smulv32qi3_highpart
++#define CODE_FOR_lasx_xvmuh_h CODE_FOR_smulv16hi3_highpart
++#define CODE_FOR_lasx_xvmuh_w CODE_FOR_smulv8si3_highpart
++#define CODE_FOR_lasx_xvmuh_d CODE_FOR_smulv4di3_highpart
++#define CODE_FOR_lasx_xvmuh_bu CODE_FOR_umulv32qi3_highpart
++#define CODE_FOR_lasx_xvmuh_hu CODE_FOR_umulv16hi3_highpart
++#define CODE_FOR_lasx_xvmuh_wu CODE_FOR_umulv8si3_highpart
++#define CODE_FOR_lasx_xvmuh_du CODE_FOR_umulv4di3_highpart
+ #define CODE_FOR_lasx_xvclz_b CODE_FOR_clzv32qi2
+ #define CODE_FOR_lasx_xvclz_h CODE_FOR_clzv16hi2
+ #define CODE_FOR_lasx_xvclz_w CODE_FOR_clzv8si2
+@@ -697,14 +705,6 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
+ #define CODE_FOR_lasx_xvavgr_hu CODE_FOR_lasx_xvavgr_u_hu
+ #define CODE_FOR_lasx_xvavgr_wu CODE_FOR_lasx_xvavgr_u_wu
+ #define CODE_FOR_lasx_xvavgr_du CODE_FOR_lasx_xvavgr_u_du
+-#define CODE_FOR_lasx_xvmuh_b CODE_FOR_lasx_xvmuh_s_b
+-#define CODE_FOR_lasx_xvmuh_h CODE_FOR_lasx_xvmuh_s_h
+-#define CODE_FOR_lasx_xvmuh_w CODE_FOR_lasx_xvmuh_s_w
+-#define CODE_FOR_lasx_xvmuh_d CODE_FOR_lasx_xvmuh_s_d
+-#define CODE_FOR_lasx_xvmuh_bu CODE_FOR_lasx_xvmuh_u_bu
+-#define CODE_FOR_lasx_xvmuh_hu CODE_FOR_lasx_xvmuh_u_hu
+-#define CODE_FOR_lasx_xvmuh_wu CODE_FOR_lasx_xvmuh_u_wu
+-#define CODE_FOR_lasx_xvmuh_du CODE_FOR_lasx_xvmuh_u_du
+ #define CODE_FOR_lasx_xvssran_b_h CODE_FOR_lasx_xvssran_s_b_h
+ #define CODE_FOR_lasx_xvssran_h_w CODE_FOR_lasx_xvssran_s_h_w
+ #define CODE_FOR_lasx_xvssran_w_d CODE_FOR_lasx_xvssran_s_w_d
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index c1c3719e3..537afaf96 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -64,8 +64,6 @@
+   UNSPEC_LSX_VSRLR
+   UNSPEC_LSX_VSRLRI
+   UNSPEC_LSX_VSHUF
+-  UNSPEC_LSX_VMUH_S
+-  UNSPEC_LSX_VMUH_U
+   UNSPEC_LSX_VEXTW_S
+   UNSPEC_LSX_VEXTW_U
+   UNSPEC_LSX_VSLLWIL_S
+@@ -2506,26 +2504,6 @@
+   [(set_attr "type" "simd_logic")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lsx_vmuh_s_"
+-  [(set (match_operand:ILSX 0 "register_operand" "=f")
+-	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
+-		      (match_operand:ILSX 2 "register_operand" "f")]
+-		     UNSPEC_LSX_VMUH_S))]
+-  "ISA_HAS_LSX"
+-  "vmuh.\t%w0,%w1,%w2"
+-  [(set_attr "type" "simd_int_arith")
+-   (set_attr "mode" "")])
+-
+-(define_insn "lsx_vmuh_u_"
+-  [(set (match_operand:ILSX 0 "register_operand" "=f")
+-	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
+-		      (match_operand:ILSX 2 "register_operand" "f")]
+-		     UNSPEC_LSX_VMUH_U))]
+-  "ISA_HAS_LSX"
+-  "vmuh.\t%w0,%w1,%w2"
+-  [(set_attr "type" "simd_int_arith")
+-   (set_attr "mode" "")])
+-
+ (define_insn "lsx_vextw_s_d"
+   [(set (match_operand:V2DI 0 "register_operand" "=f")
+ 	(unspec:V2DI [(match_operand:V4SI 1 "register_operand" "f")]
+diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
+index 27d1ffecd..a0e8db3c0 100644
+--- a/gcc/config/loongarch/simd.md
++++ b/gcc/config/loongarch/simd.md
+@@ -206,6 +206,22 @@
+   [(set_attr "type" "simd_fcvt")
+    (set_attr "mode" "")])
+ 
++;; vmuh.{b/h/w/d}
++
++(define_code_attr muh
++  [(sign_extend "smul_highpart")
++   (zero_extend "umul_highpart")])
++
++(define_insn "mul3_highpart"
++  [(set (match_operand:IVEC 0 "register_operand" "=f")
++	(:IVEC (match_operand:IVEC 1 "register_operand" "f")
++		    (match_operand:IVEC 2 "register_operand" "f")))
++   (any_extend (const_int 0))]
++  ""
++  "vmuh.\t%0,%1,%2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
+ ; The LoongArch SX Instructions.
+ (include "lsx.md")
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-muh.c b/gcc/testsuite/gcc.target/loongarch/vect-muh.c
+new file mode 100644
+index 000000000..a788840b2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vect-muh.c
+@@ -0,0 +1,36 @@
++/* { dg-do compile } */
++/* { dg-options "-mlasx -O3" } */
++/* { dg-final { scan-assembler "\tvmuh\.w\t" } } */
++/* { dg-final { scan-assembler "\tvmuh\.wu\t" } } */
++/* { dg-final { scan-assembler "\txvmuh\.w\t" } } */
++/* { dg-final { scan-assembler "\txvmuh\.wu\t" } } */
++
++int a[8], b[8], c[8];
++
++void
++test1 (void)
++{
++  for (int i = 0; i < 4; i++)
++    c[i] = ((long)a[i] * (long)b[i]) >> 32;
++}
++
++void
++test2 (void)
++{
++  for (int i = 0; i < 4; i++)
++    c[i] = ((long)(unsigned)a[i] * (long)(unsigned)b[i]) >> 32;
++}
++
++void
++test3 (void)
++{
++  for (int i = 0; i < 8; i++)
++    c[i] = ((long)a[i] * (long)b[i]) >> 32;
++}
++
++void
++test4 (void)
++{
++  for (int i = 0; i < 8; i++)
++    c[i] = ((long)(unsigned)a[i] * (long)(unsigned)b[i]) >> 32;
++}
+-- 
+2.43.0
+
diff --git a/0048-LoongArch-Use-standard-pattern-name-and-RTX-code-for.patch b/0048-LoongArch-Use-standard-pattern-name-and-RTX-code-for.patch
new file mode 100644
index 0000000000000000000000000000000000000000..fbb44a4304e3dd2039f120fb605be4a0cbc2679d
--- /dev/null
+++ b/0048-LoongArch-Use-standard-pattern-name-and-RTX-code-for.patch
@@ -0,0 +1,285 @@
+From 9dde2178e64893e4c46b1c375a658f8ab6d34fdd Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 19 Nov 2023 17:28:06 +0800
+Subject: [PATCH 048/188] LoongArch: Use standard pattern name and RTX code for
+ LSX/LASX rotate shift
+
+Remove unnecessary UNSPECs and make the [x]vrotr[i] instructions useful
+with GNU vectors and auto vectorization.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lsx.md (bitimm): Move to ...
+	(UNSPEC_LSX_VROTR): Remove.
+	(lsx_vrotr_): Remove.
+	(lsx_vrotri_): Remove.
+	* config/loongarch/lasx.md (UNSPEC_LASX_XVROTR): Remove.
+	(lsx_vrotr_): Remove.
+	(lsx_vrotri_): Remove.
+	* config/loongarch/simd.md (bitimm): ... here.  Expand it to
+	cover LASX modes.
+	(vrotr3): New define_insn.
+	(vrotri3): New define_insn.
+	* config/loongarch/loongarch-builtins.cc:
+	(CODE_FOR_lsx_vrotr_b): Use standard pattern name.
+	(CODE_FOR_lsx_vrotr_h): Likewise.
+	(CODE_FOR_lsx_vrotr_w): Likewise.
+	(CODE_FOR_lsx_vrotr_d): Likewise.
+	(CODE_FOR_lasx_xvrotr_b): Likewise.
+	(CODE_FOR_lasx_xvrotr_h): Likewise.
+	(CODE_FOR_lasx_xvrotr_w): Likewise.
+	(CODE_FOR_lasx_xvrotr_d): Likewise.
+	(CODE_FOR_lsx_vrotri_b): Define to standard pattern name.
+	(CODE_FOR_lsx_vrotri_h): Likewise.
+	(CODE_FOR_lsx_vrotri_w): Likewise.
+	(CODE_FOR_lsx_vrotri_d): Likewise.
+	(CODE_FOR_lasx_xvrotri_b): Likewise.
+	(CODE_FOR_lasx_xvrotri_h): Likewise.
+	(CODE_FOR_lasx_xvrotri_w): Likewise.
+	(CODE_FOR_lasx_xvrotri_d): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vect-rotr.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  | 22 ------------
+ gcc/config/loongarch/loongarch-builtins.cc    | 16 +++++++++
+ gcc/config/loongarch/lsx.md                   | 28 ---------------
+ gcc/config/loongarch/simd.md                  | 29 +++++++++++++++
+ .../gcc.target/loongarch/vect-rotr.c          | 36 +++++++++++++++++++
+ 5 files changed, 81 insertions(+), 50 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-rotr.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 023a023b4..116b30c07 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -138,7 +138,6 @@
+   UNSPEC_LASX_XVHSUBW_Q_D
+   UNSPEC_LASX_XVHADDW_QU_DU
+   UNSPEC_LASX_XVHSUBW_QU_DU
+-  UNSPEC_LASX_XVROTR
+   UNSPEC_LASX_XVADD_Q
+   UNSPEC_LASX_XVSUB_Q
+   UNSPEC_LASX_XVREPLVE
+@@ -4232,18 +4231,6 @@
+   [(set_attr "type" "simd_int_arith")
+    (set_attr "mode" "V4DI")])
+ 
+-;;XVROTR.B   XVROTR.H   XVROTR.W   XVROTR.D
+-;;TODO-478
+-(define_insn "lasx_xvrotr_"
+-  [(set (match_operand:ILASX 0 "register_operand" "=f")
+-	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
+-		       (match_operand:ILASX 2 "register_operand" "f")]
+-		      UNSPEC_LASX_XVROTR))]
+-  "ISA_HAS_LASX"
+-  "xvrotr.\t%u0,%u1,%u2"
+-  [(set_attr "type" "simd_int_arith")
+-   (set_attr "mode" "")])
+-
+ ;;XVADD.Q
+ ;;TODO2
+ (define_insn "lasx_xvadd_q"
+@@ -4426,15 +4413,6 @@
+   [(set_attr "type" "simd_fcvt")
+    (set_attr "mode" "V4DI")])
+ 
+-(define_insn "lasx_xvrotri_"
+-  [(set (match_operand:ILASX 0 "register_operand" "=f")
+-	(rotatert:ILASX (match_operand:ILASX 1 "register_operand" "f")
+-		       (match_operand 2 "const__operand" "")))]
+-  "ISA_HAS_LASX"
+-  "xvrotri.\t%u0,%u1,%2"
+-  [(set_attr "type" "simd_shf")
+-   (set_attr "mode" "")])
+-
+ (define_insn "lasx_xvextl_q_d"
+   [(set (match_operand:V4DI 0 "register_operand" "=f")
+ 	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")]
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index 41ea357cf..f4523c8bf 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -369,6 +369,14 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
+ #define CODE_FOR_lsx_vsrli_h CODE_FOR_vlshrv8hi3
+ #define CODE_FOR_lsx_vsrli_w CODE_FOR_vlshrv4si3
+ #define CODE_FOR_lsx_vsrli_d CODE_FOR_vlshrv2di3
++#define CODE_FOR_lsx_vrotr_b CODE_FOR_vrotrv16qi3
++#define CODE_FOR_lsx_vrotr_h CODE_FOR_vrotrv8hi3
++#define CODE_FOR_lsx_vrotr_w CODE_FOR_vrotrv4si3
++#define CODE_FOR_lsx_vrotr_d CODE_FOR_vrotrv2di3
++#define CODE_FOR_lsx_vrotri_b CODE_FOR_rotrv16qi3
++#define CODE_FOR_lsx_vrotri_h CODE_FOR_rotrv8hi3
++#define CODE_FOR_lsx_vrotri_w CODE_FOR_rotrv4si3
++#define CODE_FOR_lsx_vrotri_d CODE_FOR_rotrv2di3
+ #define CODE_FOR_lsx_vsub_b CODE_FOR_subv16qi3
+ #define CODE_FOR_lsx_vsub_h CODE_FOR_subv8hi3
+ #define CODE_FOR_lsx_vsub_w CODE_FOR_subv4si3
+@@ -634,6 +642,14 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
+ #define CODE_FOR_lasx_xvsrli_h CODE_FOR_vlshrv16hi3
+ #define CODE_FOR_lasx_xvsrli_w CODE_FOR_vlshrv8si3
+ #define CODE_FOR_lasx_xvsrli_d CODE_FOR_vlshrv4di3
++#define CODE_FOR_lasx_xvrotr_b CODE_FOR_vrotrv32qi3
++#define CODE_FOR_lasx_xvrotr_h CODE_FOR_vrotrv16hi3
++#define CODE_FOR_lasx_xvrotr_w CODE_FOR_vrotrv8si3
++#define CODE_FOR_lasx_xvrotr_d CODE_FOR_vrotrv4di3
++#define CODE_FOR_lasx_xvrotri_b CODE_FOR_rotrv32qi3
++#define CODE_FOR_lasx_xvrotri_h CODE_FOR_rotrv16hi3
++#define CODE_FOR_lasx_xvrotri_w CODE_FOR_rotrv8si3
++#define CODE_FOR_lasx_xvrotri_d CODE_FOR_rotrv4di3
+ #define CODE_FOR_lasx_xvsub_b CODE_FOR_subv32qi3
+ #define CODE_FOR_lasx_xvsub_h CODE_FOR_subv16hi3
+ #define CODE_FOR_lasx_xvsub_w CODE_FOR_subv8si3
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 537afaf96..232399934 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -141,7 +141,6 @@
+   UNSPEC_LSX_VMADDWOD
+   UNSPEC_LSX_VMADDWOD2
+   UNSPEC_LSX_VMADDWOD3
+-  UNSPEC_LSX_VROTR
+   UNSPEC_LSX_VADD_Q
+   UNSPEC_LSX_VSUB_Q
+   UNSPEC_LSX_VEXTH_Q_D
+@@ -363,14 +362,6 @@
+    (V8HI "exp_8")
+    (V16QI "exp_16")])
+ 
+-;; This attribute is used to form an immediate operand constraint using
+-;; "const__operand".
+-(define_mode_attr bitimm
+-  [(V16QI "uimm3")
+-   (V8HI  "uimm4")
+-   (V4SI  "uimm5")
+-   (V2DI  "uimm6")])
+-
+ (define_expand "vec_init"
+   [(match_operand:LSX 0 "register_operand")
+    (match_operand:LSX 1 "")]
+@@ -4152,16 +4143,6 @@
+   [(set_attr "type" "simd_int_arith")
+    (set_attr "mode" "V2DI")])
+ 
+-(define_insn "lsx_vrotr_"
+-  [(set (match_operand:ILSX 0 "register_operand" "=f")
+-	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
+-		      (match_operand:ILSX 2 "register_operand" "f")]
+-		     UNSPEC_LSX_VROTR))]
+-  "ISA_HAS_LSX"
+-  "vrotr.\t%w0,%w1,%w2"
+-  [(set_attr "type" "simd_int_arith")
+-   (set_attr "mode" "")])
+-
+ (define_insn "lsx_vadd_q"
+   [(set (match_operand:V2DI 0 "register_operand" "=f")
+ 	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
+@@ -4255,15 +4236,6 @@
+   [(set_attr "type" "simd_fcvt")
+    (set_attr "mode" "V2DI")])
+ 
+-(define_insn "lsx_vrotri_"
+-  [(set (match_operand:ILSX 0 "register_operand" "=f")
+-	(rotatert:ILSX (match_operand:ILSX 1 "register_operand" "f")
+-		      (match_operand 2 "const__operand" "")))]
+-  "ISA_HAS_LSX"
+-  "vrotri.\t%w0,%w1,%2"
+-  [(set_attr "type" "simd_shf")
+-   (set_attr "mode" "")])
+-
+ (define_insn "lsx_vextl_q_d"
+   [(set (match_operand:V2DI 0 "register_operand" "=f")
+ 	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")]
+diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
+index a0e8db3c0..4ecf7a55e 100644
+--- a/gcc/config/loongarch/simd.md
++++ b/gcc/config/loongarch/simd.md
+@@ -91,6 +91,13 @@
+ 			   (V8HI "16") (V16HI "16")
+ 			   (V16QI "8") (V32QI "8")])
+ 
++;; This attribute is used to form an immediate operand constraint using
++;; "const__operand".
++(define_mode_attr bitimm [(V16QI "uimm3") (V32QI "uimm3")
++			  (V8HI  "uimm4") (V16HI "uimm4")
++			  (V4SI  "uimm5") (V8SI "uimm5")
++			  (V2DI  "uimm6") (V4DI "uimm6")])
++
+ ;; =======================================================================
+ ;; For many LASX instructions, the only difference of it from the LSX
+ ;; counterpart is the length of vector operands.  Describe these LSX/LASX
+@@ -222,6 +229,28 @@
+   [(set_attr "type" "simd_int_arith")
+    (set_attr "mode" "")])
+ 
++;; vrotr.{b/h/w/d}
++
++(define_insn "vrotr3"
++  [(set (match_operand:IVEC 0 "register_operand" "=f")
++	(rotatert:IVEC (match_operand:IVEC 1 "register_operand" "f")
++		       (match_operand:IVEC 2 "register_operand" "f")))]
++  ""
++  "vrotr.\t%0,%1,%2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++;; vrotri.{b/h/w/d}
++
++(define_insn "rotr3"
++  [(set (match_operand:IVEC 0 "register_operand" "=f")
++	(rotatert:IVEC (match_operand:IVEC 1 "register_operand" "f")
++		       (match_operand:SI 2 "const__operand")))]
++  ""
++  "vrotri.\t%0,%1,%2";
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
+ ; The LoongArch SX Instructions.
+ (include "lsx.md")
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-rotr.c b/gcc/testsuite/gcc.target/loongarch/vect-rotr.c
+new file mode 100644
+index 000000000..733c36334
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vect-rotr.c
+@@ -0,0 +1,36 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlasx" } */
++/* { dg-final { scan-assembler "\tvrotr\.w\t" } } */
++/* { dg-final { scan-assembler "\txvrotr\.w\t" } } */
++/* { dg-final { scan-assembler "\tvrotri\.w\t\[^\n\]*7\n" } } */
++/* { dg-final { scan-assembler "\txvrotri\.w\t\[^\n\]*7\n" } } */
++
++unsigned int a[8], b[8];
++
++void
++test1 (void)
++{
++  for (int i = 0; i < 4; i++)
++    a[i] = a[i] >> b[i] | a[i] << (32 - b[i]);
++}
++
++void
++test2 (void)
++{
++  for (int i = 0; i < 8; i++)
++    a[i] = a[i] >> b[i] | a[i] << (32 - b[i]);
++}
++
++void
++test3 (void)
++{
++  for (int i = 0; i < 4; i++)
++    a[i] = a[i] >> 7 | a[i] << 25;
++}
++
++void
++test4 (void)
++{
++  for (int i = 0; i < 8; i++)
++    a[i] = a[i] >> 7 | a[i] << 25;
++}
+-- 
+2.43.0
+
diff --git a/0048-crypto-accel-add-optimization-level-requirement-to-t.patch b/0048-crypto-accel-add-optimization-level-requirement-to-t.patch
new file mode 100644
index 0000000000000000000000000000000000000000..49dfc1d3b8009ddce6d5752959f9825a0ee025fa
--- /dev/null
+++ b/0048-crypto-accel-add-optimization-level-requirement-to-t.patch
@@ -0,0 +1,27 @@
+From 915d549b03c10ab403538888149facd417a02ebc Mon Sep 17 00:00:00 2001
+From: vchernon 
+Date: Wed, 27 Dec 2023 23:31:26 +0800
+Subject: [PATCH 16/18] [crypto-accel] add optimization level requirement to
+ the gate
+
+fix issue (src-openEuler/gcc: I8RRDW)
+---
+ gcc/crypto-accel.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/crypto-accel.cc b/gcc/crypto-accel.cc
+index f4e810a6b..e7766a585 100644
+--- a/gcc/crypto-accel.cc
++++ b/gcc/crypto-accel.cc
+@@ -2391,7 +2391,7 @@ public:
+   /* opt_pass methods: */
+   virtual bool gate (function *)
+     {
+-      if (flag_crypto_accel_aes <= 0)
++      if (flag_crypto_accel_aes <= 0 || optimize < 1)
+ 	return false;
+       return targetm.get_v16qi_mode
+ 	&& targetm.gen_rev32v16qi
+-- 
+2.33.0
+
diff --git a/0049-Add-more-flexible-check-for-pointer-aliasing-during-.patch b/0049-Add-more-flexible-check-for-pointer-aliasing-during-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..df88789c75c5829bff8dee4c5a4e7d817af2227d
--- /dev/null
+++ b/0049-Add-more-flexible-check-for-pointer-aliasing-during-.patch
@@ -0,0 +1,239 @@
+From b5865aef36ebaac87ae30d51f08bfe081795ed67 Mon Sep 17 00:00:00 2001
+From: Chernonog Viacheslav 
+Date: Tue, 12 Mar 2024 23:30:56 +0800
+Subject: [PATCH 17/18] Add more flexible check for pointer aliasing during
+ vectorization It takes minimum between number of iteration and segment length
+ it helps to speed up loops with small number of iterations when only tail can
+ be vectorized
+
+---
+ gcc/params.opt                                |  5 ++
+ .../sve/var_stride_flexible_segment_len_1.c   | 23 +++++++
+ gcc/tree-data-ref.cc                          | 67 +++++++++++++------
+ gcc/tree-data-ref.h                           | 11 ++-
+ gcc/tree-vect-data-refs.cc                    | 14 +++-
+ 5 files changed, 95 insertions(+), 25 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/var_stride_flexible_segment_len_1.c
+
+diff --git a/gcc/params.opt b/gcc/params.opt
+index 6176d4790..7e5c119cf 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -1180,6 +1180,11 @@ Maximum number of loop peels to enhance alignment of data references in a loop.
+ Common Joined UInteger Var(param_vect_max_version_for_alias_checks) Init(10) Param Optimization
+ Bound on number of runtime checks inserted by the vectorizer's loop versioning for alias check.
+ 
++-param=vect-alias-flexible-segment-len=
++Common Joined UInteger Var(param_flexible_seg_len) Init(0) IntegerRange(0, 1) Param Optimization
++Use a minimum length of different segments.  Currenlty the minimum between
++iteration number and vectorization length is chosen by this param.
++
+ -param=vect-max-version-for-alignment-checks=
+ Common Joined UInteger Var(param_vect_max_version_for_alignment_checks) Init(6) Param Optimization
+ Bound on number of runtime checks inserted by the vectorizer's loop versioning for alignment check.
+diff --git a/gcc/testsuite/gcc.target/aarch64/sve/var_stride_flexible_segment_len_1.c b/gcc/testsuite/gcc.target/aarch64/sve/var_stride_flexible_segment_len_1.c
+new file mode 100644
+index 000000000..894f075f3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/aarch64/sve/var_stride_flexible_segment_len_1.c
+@@ -0,0 +1,23 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ftree-vectorize --param=vect-alias-flexible-segment-len=1" } */
++
++#define TYPE int
++#define SIZE 257
++
++void __attribute__ ((weak))
++f (TYPE *x, TYPE *y, unsigned short n, long m __attribute__((unused)))
++{
++  for (int i = 0; i < SIZE; ++i)
++    x[i * n] += y[i * n];
++}
++
++/* { dg-final { scan-assembler {\tld1w\tz[0-9]+} } } */
++/* { dg-final { scan-assembler {\tst1w\tz[0-9]+} } } */
++/* { dg-final { scan-assembler {\tldr\tw[0-9]+} } } */
++/* { dg-final { scan-assembler {\tstr\tw[0-9]+} } } */
++/* Should use a WAR check that multiplies by (VF-2)*4 rather than
++   an overlap check that multiplies by (257-1)*4.  */
++/* { dg-final { scan-assembler {\tcntb\t(x[0-9]+)\n.*\tsub\tx[0-9]+, \1, #8\n.*\tmul\tx[0-9]+,[^\n]*\1} } } */
++/* One range check and a check for n being zero.  */
++/* { dg-final { scan-assembler-times {\t(?:cmp|tst)\t} 2 } } */
++/* { dg-final { scan-assembler-times {\tccmp\t} 1 } } */
+diff --git a/gcc/tree-data-ref.cc b/gcc/tree-data-ref.cc
+index 397792c35..e6ae9e847 100644
+--- a/gcc/tree-data-ref.cc
++++ b/gcc/tree-data-ref.cc
+@@ -2329,31 +2329,15 @@ create_intersect_range_checks_index (class loop *loop, tree *cond_expr,
+    same arguments.  Try to optimize cases in which the second access
+    is a write and in which some overlap is valid.  */
+ 
+-static bool
+-create_waw_or_war_checks (tree *cond_expr,
++static void
++create_waw_or_war_checks2 (tree *cond_expr, tree seg_len_a,
+ 			  const dr_with_seg_len_pair_t &alias_pair)
+ {
+   const dr_with_seg_len& dr_a = alias_pair.first;
+   const dr_with_seg_len& dr_b = alias_pair.second;
+ 
+-  /* Check for cases in which:
+-
+-     (a) DR_B is always a write;
+-     (b) the accesses are well-ordered in both the original and new code
+-	 (see the comment above the DR_ALIAS_* flags for details); and
+-     (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR.  */
+-  if (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW))
+-    return false;
+-
+-  /* Check for equal (but possibly variable) steps.  */
+   tree step = DR_STEP (dr_a.dr);
+-  if (!operand_equal_p (step, DR_STEP (dr_b.dr)))
+-    return false;
+-
+-  /* Make sure that we can operate on sizetype without loss of precision.  */
+   tree addr_type = TREE_TYPE (DR_BASE_ADDRESS (dr_a.dr));
+-  if (TYPE_PRECISION (addr_type) != TYPE_PRECISION (sizetype))
+-    return false;
+ 
+   /* All addresses involved are known to have a common alignment ALIGN.
+      We can therefore subtract ALIGN from an exclusive endpoint to get
+@@ -2370,9 +2354,6 @@ create_waw_or_war_checks (tree *cond_expr,
+ 			       fold_convert (ssizetype, indicator),
+ 			       ssize_int (0));
+ 
+-  /* Get lengths in sizetype.  */
+-  tree seg_len_a
+-    = fold_convert (sizetype, rewrite_to_non_trapping_overflow (dr_a.seg_len));
+   step = fold_convert (sizetype, rewrite_to_non_trapping_overflow (step));
+ 
+   /* Each access has the following pattern:
+@@ -2479,6 +2460,50 @@ create_waw_or_war_checks (tree *cond_expr,
+   *cond_expr = fold_build2 (GT_EXPR, boolean_type_node, subject, limit);
+   if (dump_enabled_p ())
+     dump_printf (MSG_NOTE, "using an address-based WAR/WAW test\n");
++}
++
++/* This is a wrapper function for create_waw_or_war_checks2.  */
++static bool
++create_waw_or_war_checks (tree *cond_expr,
++			  const dr_with_seg_len_pair_t &alias_pair)
++{
++  const dr_with_seg_len& dr_a = alias_pair.first;
++  const dr_with_seg_len& dr_b = alias_pair.second;
++
++  /* Check for cases in which:
++
++     (a) DR_B is always a write;
++     (b) the accesses are well-ordered in both the original and new code
++     (see the comment above the DR_ALIAS_* flags for details); and
++     (c) the DR_STEPs describe all access pairs covered by ALIAS_PAIR.  */
++  if (alias_pair.flags & ~(DR_ALIAS_WAR | DR_ALIAS_WAW))
++    return false;
++
++  /* Check for equal (but possibly variable) steps.  */
++  tree step = DR_STEP (dr_a.dr);
++  if (!operand_equal_p (step, DR_STEP (dr_b.dr)))
++    return false;
++
++  /* Make sure that we can operate on sizetype without loss of precision.  */
++  tree addr_type = TREE_TYPE (DR_BASE_ADDRESS (dr_a.dr));
++  if (TYPE_PRECISION (addr_type) != TYPE_PRECISION (sizetype))
++    return false;
++
++  /* Get lengths in sizetype.  */
++  tree seg_len_a
++    = fold_convert (sizetype,
++		    rewrite_to_non_trapping_overflow (dr_a.seg_len));
++  create_waw_or_war_checks2 (cond_expr, seg_len_a, alias_pair);
++  if (param_flexible_seg_len && dr_a.seg_len != dr_a.seg_len2)
++    {
++      tree seg_len2_a
++	= fold_convert (sizetype,
++			rewrite_to_non_trapping_overflow (dr_a.seg_len2));
++      tree cond_expr2;
++      create_waw_or_war_checks2 (&cond_expr2, seg_len2_a, alias_pair);
++      *cond_expr =  fold_build2 (TRUTH_OR_EXPR, boolean_type_node,
++				 *cond_expr, cond_expr2);
++   }
+   return true;
+ }
+ 
+diff --git a/gcc/tree-data-ref.h b/gcc/tree-data-ref.h
+index f643a95b2..9bc5f16ee 100644
+--- a/gcc/tree-data-ref.h
++++ b/gcc/tree-data-ref.h
+@@ -213,12 +213,19 @@ class dr_with_seg_len
+ public:
+   dr_with_seg_len (data_reference_p d, tree len, unsigned HOST_WIDE_INT size,
+ 		   unsigned int a)
+-    : dr (d), seg_len (len), access_size (size), align (a) {}
+-
++    : dr (d), seg_len (len), seg_len2 (len), access_size (size), align (a)
++    {}
++  dr_with_seg_len (data_reference_p d, tree len, tree len2,
++		   unsigned HOST_WIDE_INT size, unsigned int a)
++    : dr (d), seg_len (len), seg_len2 (len2), access_size (size), align (a)
++    {}
+   data_reference_p dr;
+   /* The offset of the last access that needs to be checked minus
+      the offset of the first.  */
+   tree seg_len;
++  /* The second version of segment length.  Currently this is used to
++     soften checks for a small number of iterations.  */
++  tree seg_len2;
+   /* A value that, when added to abs (SEG_LEN), gives the total number of
+      bytes in the segment.  */
+   poly_uint64 access_size;
+diff --git a/gcc/tree-vect-data-refs.cc b/gcc/tree-vect-data-refs.cc
+index 4e615b80b..04e68f621 100644
+--- a/gcc/tree-vect-data-refs.cc
++++ b/gcc/tree-vect-data-refs.cc
+@@ -3646,6 +3646,7 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
+     {
+       poly_uint64 lower_bound;
+       tree segment_length_a, segment_length_b;
++      tree segment_length2_a, segment_length2_b;
+       unsigned HOST_WIDE_INT access_size_a, access_size_b;
+       unsigned int align_a, align_b;
+ 
+@@ -3751,6 +3752,8 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
+ 	{
+ 	  segment_length_a = size_zero_node;
+ 	  segment_length_b = size_zero_node;
++	  segment_length2_a = size_zero_node;
++	  segment_length2_b = size_zero_node;
+ 	}
+       else
+ 	{
+@@ -3759,8 +3762,15 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
+ 	    length_factor = scalar_loop_iters;
+ 	  else
+ 	    length_factor = size_int (vect_factor);
++	  /* In any case we should rememeber scalar_loop_iters
++	     this helps to create flexible aliasing check
++	     for small number of iterations.  */
+ 	  segment_length_a = vect_vfa_segment_size (dr_info_a, length_factor);
+ 	  segment_length_b = vect_vfa_segment_size (dr_info_b, length_factor);
++	  segment_length2_a
++	    = vect_vfa_segment_size (dr_info_a, scalar_loop_iters);
++	  segment_length2_b
++	    = vect_vfa_segment_size (dr_info_b, scalar_loop_iters);
+ 	}
+       access_size_a = vect_vfa_access_size (loop_vinfo, dr_info_a);
+       access_size_b = vect_vfa_access_size (loop_vinfo, dr_info_b);
+@@ -3805,9 +3815,9 @@ vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
+ 	}
+ 
+       dr_with_seg_len dr_a (dr_info_a->dr, segment_length_a,
+-			    access_size_a, align_a);
++			    segment_length2_a, access_size_a, align_a);
+       dr_with_seg_len dr_b (dr_info_b->dr, segment_length_b,
+-			    access_size_b, align_b);
++			    segment_length2_b, access_size_b, align_b);
+       /* Canonicalize the order to be the one that's needed for accurate
+ 	 RAW, WAR and WAW flags, in cases where the data references are
+ 	 well-ordered.  The order doesn't really matter otherwise,
+-- 
+2.33.0
+
diff --git a/0049-LoongArch-Remove-lrint_allow_inexact.patch b/0049-LoongArch-Remove-lrint_allow_inexact.patch
new file mode 100644
index 0000000000000000000000000000000000000000..870f1d53265543f464edf9c2f805054ab2a5e70f
--- /dev/null
+++ b/0049-LoongArch-Remove-lrint_allow_inexact.patch
@@ -0,0 +1,42 @@
+From c898e4a85c04a72f08db9ba2a454130f15f6f280 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Mon, 20 Nov 2023 01:34:26 +0800
+Subject: [PATCH 049/188] LoongArch: Remove lrint_allow_inexact
+
+No functional change, just a cleanup.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (lrint_allow_inexact): Remove.
+	(2): Check if 
+	== UNSPEC_FTINT instead of .
+---
+ gcc/config/loongarch/loongarch.md | 5 +----
+ 1 file changed, 1 insertion(+), 4 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index d1c766cbf..11577f407 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -585,9 +585,6 @@
+ (define_int_attr lrint_submenmonic [(UNSPEC_FTINT "")
+ 				    (UNSPEC_FTINTRM "rm")
+ 				    (UNSPEC_FTINTRP "rp")])
+-(define_int_attr lrint_allow_inexact [(UNSPEC_FTINT "1")
+-				      (UNSPEC_FTINTRM "0")
+-				      (UNSPEC_FTINTRP "0")])
+ 
+ ;; Iterator and attributes for bytepick.d
+ (define_int_iterator bytepick_w_ashift_amount [8 16 24])
+@@ -2384,7 +2381,7 @@
+ 	(unspec:ANYFI [(match_operand:ANYF 1 "register_operand" "f")]
+ 		      LRINT))]
+   "TARGET_HARD_FLOAT &&
+-   (
++   ( == UNSPEC_FTINT
+     || flag_fp_int_builtin_inexact
+     || !flag_trapping_math)"
+   "ftint.. %0,%1"
+-- 
+2.43.0
+
diff --git a/0050-LoongArch-Use-LSX-for-scalar-FP-rounding-with-explic.patch b/0050-LoongArch-Use-LSX-for-scalar-FP-rounding-with-explic.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4847ba9fd230b872c5a7fac60c3e8ab4e064828f
--- /dev/null
+++ b/0050-LoongArch-Use-LSX-for-scalar-FP-rounding-with-explic.patch
@@ -0,0 +1,150 @@
+From 05fafb78b301ce9a545e0dad896b19339f716eaf Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Mon, 20 Nov 2023 03:51:56 +0800
+Subject: [PATCH 050/188] LoongArch: Use LSX for scalar FP rounding with
+ explicit rounding mode
+
+In LoongArch FP base ISA there is only the frint.{s/d} instruction which
+reads the global rounding mode.  Utilize LSX for explicit rounding mode
+even if the operand is scalar.  It seems wasting the CPU power, but
+still much faster than calling the library function.
+
+gcc/ChangeLog:
+
+	* config/loongarch/simd.md (LSX_SCALAR_FRINT): New int iterator.
+	(VLSX_FOR_FMODE): New mode attribute.
+	(2): New expander,
+	expanding to vreplvei.{w/d} + frint{rp/rz/rm/rne}.{s.d}.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vect-frint-scalar.c: New test.
+	* gcc.target/loongarch/vect-frint-scalar-no-inexact.c: New test.
+---
+ gcc/config/loongarch/simd.md                  | 28 ++++++++++++
+ .../loongarch/vect-frint-scalar-no-inexact.c  | 23 ++++++++++
+ .../gcc.target/loongarch/vect-frint-scalar.c  | 43 +++++++++++++++++++
+ 3 files changed, 94 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c
+
+diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
+index 4ecf7a55e..843b1a41f 100644
+--- a/gcc/config/loongarch/simd.md
++++ b/gcc/config/loongarch/simd.md
+@@ -169,6 +169,34 @@
+ 		     UNSPEC_SIMD_FRINTRZ))]
+   "")
+ 
++;; Use LSX for scalar ceil/floor/trunc/roundeven when -mlsx and -ffp-int-
++;; builtin-inexact.  The base FP instruction set lacks these operations.
++;; Yes we are wasting 50% or even 75% of the CPU horsepower, but it's still
++;; much faster than calling a libc function: on LA464 and LA664 there is a
++;; 3x ~ 5x speed up.
++;;
++;; Note that a vreplvei instruction is needed or we'll also operate on the
++;; junk in high bits of the vector register and produce random FP exceptions.
++
++(define_int_iterator LSX_SCALAR_FRINT
++  [UNSPEC_SIMD_FRINTRP
++   UNSPEC_SIMD_FRINTRZ
++   UNSPEC_SIMD_FRINTRM
++   UNSPEC_SIMD_FRINTRNE])
++
++(define_mode_attr VLSX_FOR_FMODE [(DF "V2DF") (SF "V4SF")])
++
++(define_expand "2"
++  [(set (match_dup 2)
++     (vec_duplicate:
++       (match_operand:ANYF 1 "register_operand")))
++   (set (match_dup 2)
++	(unspec: [(match_dup 2)] LSX_SCALAR_FRINT))
++   (set (match_operand:ANYF 0 "register_operand")
++	(vec_select:ANYF (match_dup 2) (parallel [(const_int 0)])))]
++  "ISA_HAS_LSX && (flag_fp_int_builtin_inexact || !flag_trapping_math)"
++  "operands[2] = gen_reg_rtx (mode);")
++
+ ;; vftint.{/rp/rz/rm}
+ (define_insn
+   "_vftint__"
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c
+new file mode 100644
+index 000000000..002e3b92d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar-no-inexact.c
+@@ -0,0 +1,23 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx -fno-fp-int-builtin-inexact" } */
++
++#include "vect-frint-scalar.c"
++
++/* cannot use LSX for these with -fno-fp-int-builtin-inexact,
++   call library function.  */
++/* { dg-final { scan-assembler "\tb\t%plt\\(ceil\\)" } } */
++/* { dg-final { scan-assembler "\tb\t%plt\\(ceilf\\)" } } */
++/* { dg-final { scan-assembler "\tb\t%plt\\(floor\\)" } } */
++/* { dg-final { scan-assembler "\tb\t%plt\\(floorf\\)" } } */
++/* { dg-final { scan-assembler "\tb\t%plt\\(trunc\\)" } } */
++/* { dg-final { scan-assembler "\tb\t%plt\\(truncf\\)" } } */
++/* { dg-final { scan-assembler "\tb\t%plt\\(roundeven\\)" } } */
++/* { dg-final { scan-assembler "\tb\t%plt\\(roundevenf\\)" } } */
++
++/* nearbyint is not allowed to rasie FE_INEXACT for decades */
++/* { dg-final { scan-assembler "\tb\t%plt\\(nearbyint\\)" } } */
++/* { dg-final { scan-assembler "\tb\t%plt\\(nearbyintf\\)" } } */
++
++/* rint should just use basic FP operation */
++/* { dg-final { scan-assembler "\tfrint\.s" } } */
++/* { dg-final { scan-assembler "\tfrint\.d" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c
+new file mode 100644
+index 000000000..c7cb40be7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vect-frint-scalar.c
+@@ -0,0 +1,43 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx" } */
++
++#define test(func, suffix) \
++__typeof__ (1.##suffix) \
++_##func##suffix (__typeof__ (1.##suffix) x) \
++{ \
++  return __builtin_##func##suffix (x); \
++}
++
++test (ceil, f)
++test (ceil, )
++test (floor, f)
++test (floor, )
++test (trunc, f)
++test (trunc, )
++test (roundeven, f)
++test (roundeven, )
++test (nearbyint, f)
++test (nearbyint, )
++test (rint, f)
++test (rint, )
++
++/* { dg-final { scan-assembler "\tvfrintrp\.s" } } */
++/* { dg-final { scan-assembler "\tvfrintrm\.s" } } */
++/* { dg-final { scan-assembler "\tvfrintrz\.s" } } */
++/* { dg-final { scan-assembler "\tvfrintrne\.s" } } */
++/* { dg-final { scan-assembler "\tvfrintrp\.d" } } */
++/* { dg-final { scan-assembler "\tvfrintrm\.d" } } */
++/* { dg-final { scan-assembler "\tvfrintrz\.d" } } */
++/* { dg-final { scan-assembler "\tvfrintrne\.d" } } */
++
++/* must do vreplvei first */
++/* { dg-final { scan-assembler-times "\tvreplvei\.w\t\\\$vr0,\\\$vr0,0" 4 } } */
++/* { dg-final { scan-assembler-times "\tvreplvei\.d\t\\\$vr0,\\\$vr0,0" 4 } } */
++
++/* nearbyint is not allowed to rasie FE_INEXACT for decades */
++/* { dg-final { scan-assembler "\tb\t%plt\\(nearbyint\\)" } } */
++/* { dg-final { scan-assembler "\tb\t%plt\\(nearbyintf\\)" } } */
++
++/* rint should just use basic FP operation */
++/* { dg-final { scan-assembler "\tfrint\.s" } } */
++/* { dg-final { scan-assembler "\tfrint\.d" } } */
+-- 
+2.43.0
+
diff --git a/0050-Port-IPA-prefetch-to-GCC-12.patch b/0050-Port-IPA-prefetch-to-GCC-12.patch
new file mode 100644
index 0000000000000000000000000000000000000000..225a0c4a5ac98254cd279d08500212671feb90be
--- /dev/null
+++ b/0050-Port-IPA-prefetch-to-GCC-12.patch
@@ -0,0 +1,2071 @@
+From 7ee50ce44c652e21ca8ad33dc4e175f02b51b072 Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Fri, 8 Mar 2024 06:50:39 +0800
+Subject: [PATCH 18/18] Port IPA prefetch to GCC 12
+
+---
+ gcc/Makefile.in     |    1 +
+ gcc/cgraph.cc       |    1 +
+ gcc/cgraph.h        |    2 +
+ gcc/common.opt      |    8 +
+ gcc/ipa-devirt.cc   |   54 +-
+ gcc/ipa-prefetch.cc | 1819 +++++++++++++++++++++++++++++++++++++++++++
+ gcc/ipa-sra.cc      |    8 +
+ gcc/params.opt      |    8 +
+ gcc/passes.def      |    1 +
+ gcc/timevar.def     |    1 +
+ gcc/tree-pass.h     |    1 +
+ 11 files changed, 1902 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/ipa-prefetch.cc
+
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index 876000bda..10544e4a9 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -1468,6 +1468,7 @@ OBJS = \
+ 	ipa-modref.o \
+ 	ipa-modref-tree.o \
+ 	ipa-predicate.o \
++	ipa-prefetch.o \
+ 	ipa-profile.o \
+ 	ipa-prop.o \
+ 	ipa-param-manipulation.o \
+diff --git a/gcc/cgraph.cc b/gcc/cgraph.cc
+index 3734c85db..7d738b891 100644
+--- a/gcc/cgraph.cc
++++ b/gcc/cgraph.cc
+@@ -998,6 +998,7 @@ cgraph_node::create_indirect_edge (gcall *call_stmt, int ecf_flags,
+   edge->indirect_info = cgraph_allocate_init_indirect_info ();
+   edge->indirect_info->ecf_flags = ecf_flags;
+   edge->indirect_info->vptr_changed = true;
++  edge->indirect_info->targets = NULL;
+ 
+   /* Record polymorphic call info.  */
+   if (!cloning_p
+diff --git a/gcc/cgraph.h b/gcc/cgraph.h
+index d96690326..b84ff2f98 100644
+--- a/gcc/cgraph.h
++++ b/gcc/cgraph.h
+@@ -1659,6 +1659,8 @@ public:
+   int param_index;
+   /* ECF flags determined from the caller.  */
+   int ecf_flags;
++  /* Vector of potential call targets determined by analysis.  */
++  vec *targets;
+ 
+   /* Number of speculative call targets, it's less than GCOV_TOPN_VALUES.  */
+   unsigned num_speculative_call_targets : 16;
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 1eb62ada5..e65a06af9 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1328,6 +1328,10 @@ fdevirtualize
+ Common Var(flag_devirtualize) Optimization
+ Try to convert virtual calls to direct ones.
+ 
++fipa-ic
++Common Var(flag_ipa_ic) Optimization Init(0)
++Perform interprocedural analysis of indirect calls.
++
+ ficp
+ Common Var(flag_icp) Optimization Init(0)
+ Try to promote indirect calls to direct ones.
+@@ -2367,6 +2371,10 @@ fprefetch-loop-arrays
+ Common Var(flag_prefetch_loop_arrays) Init(-1) Optimization
+ Generate prefetch instructions, if available, for arrays in loops.
+ 
++fipa-prefetch
++Common Var(flag_ipa_prefetch) Init(0) Optimization
++Generate prefetch instructions, if available, using IPA info.
++
+ fprofile
+ Common Var(profile_flag)
+ Enable basic program profiling code.
+diff --git a/gcc/ipa-devirt.cc b/gcc/ipa-devirt.cc
+index 318535d06..dd3562d56 100644
+--- a/gcc/ipa-devirt.cc
++++ b/gcc/ipa-devirt.cc
+@@ -5758,6 +5758,54 @@ merge_fs_map_for_ftype_aliases ()
+     }
+ }
+ 
++/* Save results of indirect call analysis for the next passes.  */
++
++static void
++save_analysis_results ()
++{
++  if (dump_file)
++    fprintf (dump_file, "\n\nSave results of indirect call analysis.\n");
++
++  struct cgraph_node *n;
++  FOR_EACH_FUNCTION (n)
++    {
++      cgraph_edge *e, *next;
++      for (e = n->indirect_calls; e; e = next)
++	{
++	  next = e->next_callee;
++	  if (e->indirect_info->polymorphic)
++	    continue;
++	  gcall *stmt = e->call_stmt;
++	  gcc_assert (stmt != NULL);
++	  tree call_fn = gimple_call_fn (stmt);
++	  tree call_fn_ty = TREE_TYPE (call_fn);
++	  if (!POINTER_TYPE_P (call_fn_ty))
++	    continue;
++
++	  tree ctype = TYPE_CANONICAL (TREE_TYPE (call_fn_ty));
++	  unsigned ctype_uid = ctype ? TYPE_UID (ctype) : 0;
++	  if (!ctype_uid || unsafe_types->count (ctype_uid)
++	      || !fs_map->count (ctype_uid))
++	    continue;
++	  /* TODO: cleanup noninterposable aliases.  */
++	  decl_set *decls = (*fs_map)[ctype_uid];
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "For call ");
++	      print_gimple_stmt (dump_file, stmt, 0);
++	    }
++	  vec_alloc (e->indirect_info->targets, decls->size ());
++	  for (decl_set::const_iterator it = decls->begin ();
++	       it != decls->end (); it++)
++	    {
++	      struct cgraph_node *target = cgraph_node::get (*it);
++	      /* TODO: maybe discard some targets.  */
++	      e->indirect_info->targets->quick_push (target);
++	    }
++	}
++    }
++}
++
+ /* Dump function types with set of functions corresponding to it.  */
+ 
+ static void
+@@ -5822,6 +5870,8 @@ collect_function_signatures ()
+ 	}
+     }
+   merge_fs_map_for_ftype_aliases ();
++  if (flag_ipa_ic)
++    save_analysis_results ();
+   if (dump_file)
+     dump_function_signature_sets ();
+ }
+@@ -6217,7 +6267,7 @@ ipa_icp (void)
+      optimize indirect calls.  */
+   collect_function_type_aliases ();
+   collect_function_signatures ();
+-  bool optimized = optimize_indirect_calls ();
++  bool optimized = flag_icp ? optimize_indirect_calls () : false;
+ 
+   remove_type_alias_map (ta_map);
+   remove_type_alias_map (fta_map);
+@@ -6264,7 +6314,7 @@ public:
+   /* opt_pass methods: */
+   virtual bool gate (function *)
+     {
+-      return (optimize && flag_icp && !seen_error ()
++      return (optimize && (flag_icp || flag_ipa_ic) && !seen_error ()
+ 	      && (in_lto_p || flag_whole_program));
+     }
+ 
+diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc
+new file mode 100644
+index 000000000..aeea51105
+--- /dev/null
++++ b/gcc/ipa-prefetch.cc
+@@ -0,0 +1,1819 @@
++/* IPA prefetch optimizations.
++   Copyright (C) 2023 Free Software Foundation, Inc.
++   Contributed by Ilia Diachkov.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++/* IPA prefetch is an interprocedural pass that detects cases of indirect
++   memory access potentially in loops and inserts prefetch instructions
++   to optimize cache usage during these indirect memory accesses.  */
++
++#include "config.h"
++#define INCLUDE_SET
++#define INCLUDE_MAP
++#include "system.h"
++#include "coretypes.h"
++#include "target.h"
++#include "tm.h"
++#include "tree.h"
++#include "tree-pass.h"
++#include "cgraph.h"
++#include "diagnostic-core.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "vec.h"
++#include "tree-pretty-print.h"
++#include "gimple-pretty-print.h"
++#include "gimple-iterator.h"
++#include "gimple-walk.h"
++#include "cfg.h"
++#include "cfghooks.h"
++#include "ssa.h"
++#include "tree-dfa.h"
++#include "fold-const.h"
++#include "tree-inline.h"
++#include "stor-layout.h"
++#include "tree-into-ssa.h"
++#include "tree-cfg.h"
++#include "alloc-pool.h"
++#include "symbol-summary.h"
++#include "ipa-prop.h"
++#include "tree-eh.h"
++#include "bitmap.h"
++#include "cfgloop.h"
++#include "langhooks.h"
++#include "ipa-param-manipulation.h"
++#include "ipa-fnsummary.h"
++#include "tree-ssa-loop.h"
++#include "tree-ssa-loop-ivopts.h"
++#include "gimple-fold.h"
++#include "gimplify.h"
++
++namespace {
++
++/* Call graph analysis.  */
++
++typedef std::set edge_set;
++typedef std::set node_set;
++typedef std::map node_to_iedge_map;
++typedef std::map node_to_node_map;
++typedef std::map edge_in_loop;
++typedef std::map node_in_loop;
++
++static edge_in_loop *el_map = NULL;
++static node_in_loop *nl_map = NULL;
++static node_to_iedge_map *icn_map = NULL;
++/* Contains nodes which reachable from a given node.  */
++static node_to_node_map *nn_map = NULL;
++
++static bool
++can_be_optimized (cgraph_node *n)
++{
++  /* TODO: maybe check also inlined_to.  */
++  return opt_for_fn (n->decl, flag_ipa_prefetch) && n->has_gimple_body_p ();
++}
++
++static void
++analyze_cgraph_edge (cgraph_edge *e)
++{
++  gcall *stmt = e->call_stmt;
++  gcc_checking_assert (e && stmt);
++  basic_block bb = gimple_bb (stmt);
++  gcc_checking_assert (bb);
++  /* TODO: add the same check for indirect calls.  */
++  if (e->callee && !can_be_optimized (e->callee))
++    return;
++
++  if (dump_file)
++    {
++      if (e->callee)
++	fprintf (dump_file, "\t%*s%s %s%*s  ", 1, "",
++		 e->callee->dump_name (), !e->inline_failed ? "inlined" :
++		 cgraph_inline_failed_string (e->inline_failed), 1, "");
++      else
++	fprintf (dump_file, "\t%*s%s %s%*s  ", 1, "", "(indirect)",
++		 "n/a", 1, "");
++      fprintf (dump_file, "freq:%4.2f", e->sreal_frequency ().to_double ());
++
++      if (e->callee && cross_module_call_p (e))
++	fprintf (dump_file, " cross module");
++
++      class ipa_call_summary *es = ipa_call_summaries->get (e);
++      if (es)
++	fprintf (dump_file, " loop depth:%2i size:%2i time: %2i",
++		 es->loop_depth, es->call_stmt_size, es->call_stmt_time);
++
++      fprintf (dump_file, "\n");
++    }
++  if (e->indirect_info && dump_file)
++    {
++      fprintf (dump_file, "II: %p\n", (void *) e->indirect_info->targets);
++      unsigned i = 0;
++      cgraph_node *n;
++      if (e->indirect_info->targets)
++	for (i = 0; e->indirect_info->targets->iterate (i, &n); ++i)
++	  fprintf (dump_file, "\t%s\n", n->dump_name ());
++    }
++
++  if (bb_loop_depth (bb) == 0)
++    return;
++
++  if (dump_file)
++    {
++      if (e->callee)
++	fprintf (dump_file, "\tCall in loop (%d): ", bb_loop_depth (bb));
++      else
++	fprintf (dump_file, "\tICall in loop (%d): ", bb_loop_depth (bb));
++      print_gimple_stmt (dump_file, stmt, 0);
++    }
++  (*el_map)[e] = e->sreal_frequency ().to_double ();
++}
++
++/* Walk optimizible cgraph nodes and collect info for edges.  */
++
++static void
++analyse_cgraph ()
++{
++  cgraph_node *n;
++  cgraph_edge *e;
++  FOR_EACH_DEFINED_FUNCTION (n)
++    {
++      if (dump_file)
++	{
++	  fprintf (dump_file, "\n\nProcesing function %s\n", n->dump_name ());
++	  print_generic_expr (dump_file, n->decl);
++	  fprintf (dump_file, "\n");
++	}
++      if (!can_be_optimized (n))
++	{
++	  if (dump_file)
++	    fprintf (dump_file, "Skip the function\n");
++	  continue;
++	}
++
++      /* TODO: maybe remove loop info here.  */
++      push_cfun (DECL_STRUCT_FUNCTION (n->decl));
++      calculate_dominance_info (CDI_DOMINATORS);
++      loop_optimizer_init (LOOPS_NORMAL);
++
++      for (e = n->callees; e; e = e->next_callee)
++	analyze_cgraph_edge (e);
++      for (e = n->indirect_calls; e; e = e->next_callee)
++	analyze_cgraph_edge (e);
++
++      free_dominance_info (CDI_DOMINATORS);
++      loop_optimizer_finalize ();
++
++      pop_cfun ();
++    }
++}
++
++/* Save indirect call info to node:icall_target map.  */
++
++static void
++prepare_indirect_call_info ()
++{
++  cgraph_node *n, *n2;
++  cgraph_edge *e;
++  FOR_EACH_DEFINED_FUNCTION (n)
++    for (e = n->indirect_calls; e; e = e->next_callee)
++      {
++	if (!e->indirect_info->targets)
++	  continue;
++	for (unsigned i = 0; e->indirect_info->targets->iterate (i, &n2); ++i)
++	  {
++	    if (icn_map->count (n2) == 0)
++	      (*icn_map)[n2] = new edge_set;
++	    (*icn_map)[n2]->insert (e);
++	  }
++      }
++}
++
++static void
++collect_nn_info (struct cgraph_edge *e, struct cgraph_node *n)
++{
++  struct cgraph_node *n2 = e->caller;
++  if (nn_map->count (n2) == 0)
++    (*nn_map)[n2] = new node_set;
++  (*nn_map)[n2]->insert (n);
++  if (nn_map->count (n) != 0)
++    {
++      node_set *set = (*nn_map)[n];
++      for (node_set::const_iterator it = set->begin ();
++	   it != set->end (); it++)
++	(*nn_map)[n2]->insert (*it);
++    }
++}
++
++static bool
++check_loop_info_for_cgraph_edge (struct cgraph_edge *e, struct cgraph_node *n,
++				 bool &all_in_loop, double &rate)
++{
++  collect_nn_info (e, n);
++  if (el_map->count (e) == 0)
++    {
++      if (dump_file)
++	fprintf (dump_file, "not all: %s->%s\n",
++		 e->caller->dump_name (), n->dump_name ());
++      all_in_loop = false;
++      return false;
++    }
++  rate += (*el_map)[e];
++  return true;
++}
++
++static bool
++update_loop_info_for_cgraph_node (struct cgraph_node *n)
++{
++  bool changed = false, all_in_loop = true;
++  double rate = 0.0;
++  struct cgraph_edge *e;
++
++  /* Iterate all direct callers.  */
++  if (n->callers)
++    for (e = n->callers; e; e = e->next_caller)
++      if (!check_loop_info_for_cgraph_edge (e, n, all_in_loop, rate))
++	break;
++
++  /* Iterate all possible indirect callers.  */
++  edge_set *set = (*icn_map)[n];
++  if (set)
++    for (edge_set::const_iterator it = set->begin (); it != set->end (); it++)
++      if (!check_loop_info_for_cgraph_edge (*it, n, all_in_loop, rate))
++	break;
++
++  /* The node had 0 loop count but the rate is > 0,
++     so something is changed.  */
++  if (dump_file)
++    fprintf (dump_file, "%s: all=%d, nl->c=%lu, r=%4.2f\n", n->dump_name (),
++	     all_in_loop, nl_map->count (n), rate);
++
++  if (all_in_loop && nl_map->count (n) == 0 && rate > 0.0)
++    {
++      if (dump_file)
++	fprintf (dump_file, "%s: new rate %4.2f\n", n->dump_name (), rate);
++      changed = true;
++    }
++  if (all_in_loop)
++    {
++      (*nl_map)[n] = nl_map->count (n) ? (*nl_map)[n] + rate : rate;
++      for (e = n->callees; e; e = e->next_callee)
++	(*el_map)[e] = el_map->count (e) ? (*el_map)[e] + rate : rate;
++      for (e = n->indirect_calls; e; e = e->next_callee)
++	{
++	  (*el_map)[e] = el_map->count (e) ? (*el_map)[e] + rate : rate;
++	  if (dump_file)
++	    fprintf (dump_file, "%s: reset indirect e=%p to %4.2f\n",
++		     n->dump_name (), (void *) e, (*el_map)[e]);
++	}
++    }
++  return changed;
++}
++
++/* Propagate in_loop info over the call graph.  */
++
++static void
++propagate_loop_info_in_cgraph ()
++{
++  struct cgraph_node *n;
++  bool changed;
++  unsigned iteration = 0;
++  do
++    {
++      changed = false;
++      if (dump_file)
++	fprintf (dump_file, "\nIteration %u\n", iteration++);
++      FOR_EACH_DEFINED_FUNCTION (n)
++	{
++	  if (!n->callers && !(*icn_map)[n])
++	    continue;
++	  if (update_loop_info_for_cgraph_node (n))
++	    changed = true;
++	}
++  } while (changed);
++
++  if (dump_file)
++    {
++      fprintf (dump_file, "\nList of nodes in loops:\n");
++      FOR_EACH_DEFINED_FUNCTION (n)
++	if (nl_map->count (n) != 0)
++	  fprintf (dump_file, "%s: %4.2f\n", n->dump_name (), (*nl_map)[n]);
++      fprintf (dump_file, "\nList of callable nodes:\n");
++      FOR_EACH_DEFINED_FUNCTION (n)
++	if (nn_map->count (n) != 0)
++	  {
++	    node_set *set = (*nn_map)[n];
++	    fprintf (dump_file, "%s: ", n->dump_name ());
++	    for (node_set::const_iterator it = set->begin ();
++		 it != set->end (); it++)
++	      fprintf (dump_file, "%s ", (*it)->dump_name ());
++	    fprintf (dump_file, "\n");
++	  }
++    }
++}
++
++/* Analysis of memory references.  */
++
++typedef enum
++{
++  MR_NONE,
++  MR_SIMPLE,
++  MR_POLYNOMIAL,
++  MR_INDIRECT,
++  MR_UNSUPPORTED
++} mr_type;
++const char *mr_type_str[] =
++    {"none", "simple", "poly", "indirect", "unsuppoted"};
++
++struct memref_type;
++typedef std::set memref_set;
++
++static unsigned max_mr_id = 0;
++typedef struct memref_type
++{
++  unsigned mr_id = 0;
++  mr_type type = MR_NONE;
++  tree mem = NULL_TREE;
++  tree base = NULL_TREE;
++  tree offset = NULL_TREE;
++  vec stmts = vNULL;
++  memref_set used_mrs;
++  bool is_store = false;
++  bool is_incr = false;
++  tree step = NULL_TREE;
++} memref_t;
++
++typedef std::map tree_memref_map;
++typedef std::map > function_mrs_map;
++typedef std::map funct_mrs_map;
++typedef std::map memref_map;
++typedef std::map memref_tree_map;
++
++typedef std::set stmt_set;
++typedef std::map tree_map;
++
++tree_memref_map *tm_map;
++funct_mrs_map *fmrs_map;
++funct_mrs_map *optimize_mrs_map;
++memref_map *mr_candidate_map;
++tree_map *decl_map;
++
++static void analyse_mem_ref (gimple *stmt, tree mem, memref_t* mr);
++
++static memref_t*
++get_memref (gimple *stmt, tree mem, bool is_store)
++{
++  if (tm_map->count (mem))
++    {
++      if (dump_file)
++	fprintf (dump_file, "Found mr %d for %p.\n",
++		 (*tm_map)[mem]->mr_id, (void *) mem);
++      return (*tm_map)[mem];
++    }
++
++  memref_t *mr = new memref_t;
++  mr->mr_id = ++max_mr_id;
++  mr->is_store = is_store;
++  mr->mem = mem;
++  (*tm_map)[mem] = mr;
++  if (dump_file)
++    fprintf (dump_file, "Create mr %d for %p.\n",
++	     mr->mr_id, (void *) mem);
++  analyse_mem_ref (stmt, mem, mr);
++  return mr;
++}
++
++static void
++print_mrs_ids (memref_set &mrs, const char *start)
++{
++  if (start)
++    fprintf (dump_file, "%s", start);
++  for (memref_set::const_iterator it = mrs.begin (); it != mrs.end (); it++)
++    fprintf (dump_file, "%d ", (*it)->mr_id);
++  fprintf (dump_file, "\n");
++}
++
++static void
++print_memref (memref_t *mr)
++{
++  fprintf (dump_file, "MR (%d) type: %s (%s) mem: ", mr->mr_id,
++	   mr_type_str[mr->type], mr->is_store ? "st" : "ld");
++  print_generic_expr (dump_file, mr->mem);
++  fprintf (dump_file, "\nbase: ");
++  if (mr->base)
++    print_generic_expr (dump_file, mr->base);
++  else
++    fprintf (dump_file, "null");
++  fprintf (dump_file, "\noffset: ");
++  if (mr->offset)
++    print_generic_expr (dump_file, mr->offset);
++  else
++    fprintf (dump_file, "null");
++  fprintf (dump_file, "\nstmts:\n");
++  for (unsigned int i = 0; i < mr->stmts.length (); i++)
++    print_gimple_stmt (dump_file, mr->stmts[i], 0);
++  print_mrs_ids (mr->used_mrs, "\tused memrefs: ");
++  if (mr->is_incr)
++    {
++      fprintf (dump_file, "\tis incremental with step: ");
++      print_generic_expr (dump_file, mr->step);
++    }
++  fprintf (dump_file, "\n");
++}
++
++/* If there is a simple load or store to a memory reference in STMT, returns
++   the location of the memory reference, and sets IS_STORE according to whether
++   it is a store or load.  Otherwise, returns NULL.
++   TODO: from gcc/tree-ssa-loop-im.c, maybe make it global.  */
++
++static tree *
++simple_mem_ref_in_stmt (gimple *stmt, bool *is_store)
++{
++  tree *lhs, *rhs;
++
++  /* Recognize SSA_NAME = MEM and MEM = (SSA_NAME | invariant) patterns.  */
++  if (!gimple_assign_single_p (stmt))
++    return NULL;
++
++  lhs = gimple_assign_lhs_ptr (stmt);
++  rhs = gimple_assign_rhs1_ptr (stmt);
++
++  if (TREE_CODE (*lhs) == SSA_NAME && gimple_vuse (stmt))
++    {
++      *is_store = false;
++      return rhs;
++    }
++  else if (gimple_vdef (stmt)
++	   && (TREE_CODE (*rhs) == SSA_NAME || is_gimple_min_invariant (*rhs)))
++    {
++      *is_store = true;
++      return lhs;
++    }
++  else
++    return NULL;
++}
++
++static void
++analyse_incremental (gimple *stmt, memref_t* mr)
++{
++  if (!gimple_assign_single_p (stmt))
++    return;
++  tree rhs1, rhs2;
++  /* TODO: maybe support other types of stmts.  */
++  while (stmt && is_gimple_assign (stmt))
++    {
++      enum tree_code def_code = gimple_assign_rhs_code (stmt);
++      gimple_rhs_class rhs_class = gimple_assign_rhs_class (stmt);
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Incr: in assign (%s)\n",
++		   get_tree_code_name (def_code));
++	  print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS);
++	}
++      gcc_assert (def_code != ERROR_MARK);
++      switch (rhs_class)
++	{
++	case GIMPLE_TERNARY_RHS:
++	  if (dump_file)
++	    fprintf (dump_file, "Incr: unsupported trinary rhs\n");
++	  stmt = NULL;
++	  break;
++	case GIMPLE_UNARY_RHS:
++	case GIMPLE_SINGLE_RHS:
++	  rhs1 = gimple_assign_rhs1 (stmt);
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "Incr: (%s)",
++		       get_tree_code_name (TREE_CODE (rhs1)));
++	      print_generic_expr (dump_file, rhs1);
++	      fprintf (dump_file, "\n");
++	    }
++	  if (def_code == SSA_NAME)
++	    stmt = SSA_NAME_DEF_STMT (rhs1);
++	  else if (def_code == MEM_REF || def_code == COMPONENT_REF
++		   || def_code == ARRAY_REF)
++	    {
++	      /* If we have dereference in address evaluation,
++		 it's indirect memory access.  */
++	      if (dump_file)
++		{
++		  if (operand_equal_p (mr->mem, rhs1))
++		    fprintf (dump_file, "Incr: the same MEM\n");
++		  else
++		    fprintf (dump_file, "Incr: diff MEM\n");
++		  print_generic_expr (dump_file, rhs1);
++		  fprintf (dump_file, " ");
++		  print_generic_expr (dump_file, mr->mem);
++		  fprintf (dump_file, "\n");
++		}
++	      if (operand_equal_p (mr->mem, rhs1) && mr->step)
++		mr->is_incr = true;
++	      stmt = NULL;
++	    }
++	  else
++	    {
++	      if (dump_file)
++		fprintf (dump_file, "Incr: unsupported unary/single\n");
++	      stmt = NULL;
++	    }
++	  break;
++	case GIMPLE_BINARY_RHS:
++	  rhs1 = gimple_assign_rhs1 (stmt);
++	  rhs2 = gimple_assign_rhs2 (stmt);
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "(%s) (%s)",
++		       get_tree_code_name (TREE_CODE (rhs1)),
++		       get_tree_code_name (TREE_CODE (rhs2)));
++	      print_generic_expr (dump_file, rhs1);
++	      fprintf (dump_file, " ");
++	      print_generic_expr (dump_file, rhs2);
++	      fprintf (dump_file, "\n");
++	    }
++	  /* TODO: extend for other types of incrementation.  */
++	  if (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (rhs2) == INTEGER_CST)
++	    {
++	      stmt = SSA_NAME_DEF_STMT (rhs1);
++	      mr->step = rhs2;
++	      if (dump_file)
++		{
++		  fprintf (dump_file, "Incr: const increment stmt: ");
++		  print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS);
++		}
++	    }
++	  else
++	    stmt = NULL;
++	  break;
++	default:
++	  gcc_unreachable ();
++      }
++    }
++  if ((mr->step && !mr->is_incr) || (!mr->step && mr->is_incr))
++    {
++      mr->step = NULL_TREE;
++      mr->is_incr = false;
++    }
++}
++
++static mr_type
++get_memref_type (memref_t *base, memref_t *used, enum tree_code code)
++{
++  /* TODO: improve memref type detection.  */
++  enum tree_code base_code = TREE_CODE (base->mem);
++  if (dump_file)
++    fprintf (dump_file, "get_memref_type: base=%d,%d used=%d,%d code=%s "
++	     "base_code=%s\n", base->mr_id, base->type,
++	     used ? used->mr_id : -1, used ? used->type : -1,
++	     get_tree_code_name (code), get_tree_code_name (base_code));
++  if (used)
++    {
++      if (base->type > used->type)
++	return base->type;
++      if (used->type == MR_SIMPLE)
++	return MR_POLYNOMIAL;
++      if (used->type == MR_POLYNOMIAL)
++	return base_code == ARRAY_REF ? MR_POLYNOMIAL : MR_INDIRECT;
++      if (used->type == MR_INDIRECT)
++	return MR_INDIRECT;
++      return MR_UNSUPPORTED;
++    }
++  if (code == MEM_REF || code == ARRAY_REF || code == COMPONENT_REF)
++    return base->type;
++  if (code == POINTER_PLUS_EXPR || code == PLUS_EXPR
++      || code == MINUS_EXPR || code == MULT_EXPR)
++    return base->type <= MR_POLYNOMIAL ? MR_POLYNOMIAL : base->type;
++  return base->type >= MR_INDIRECT ? base->type : MR_INDIRECT;
++}
++
++/* Recursively walk defs of src expression and record used stmts and other mrs.
++   Return a base address candidate if it's found.  */
++
++static tree
++analyse_addr_eval (tree src, memref_t* mr)
++{
++  if (TREE_CODE (src) != SSA_NAME)
++    return NULL_TREE;
++  gimple *stmt = SSA_NAME_DEF_STMT (src);
++  if (dump_file)
++    {
++      fprintf (dump_file, "Src_stmt: ");
++      print_gimple_stmt (dump_file, stmt, 0);
++    }
++  if (!is_gimple_assign (stmt))
++    {
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Is not assign, stop analysis: ");
++	  print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS);
++	}
++      mr->type = MR_UNSUPPORTED;
++      mr->stmts.safe_push (stmt);
++      return NULL_TREE;
++    }
++  enum tree_code def_code = gimple_assign_rhs_code (stmt);
++  if (def_code != MEM_REF && def_code != COMPONENT_REF
++      && def_code != ARRAY_REF)
++    mr->stmts.safe_push (stmt);
++  gimple_rhs_class rhs_class = gimple_assign_rhs_class (stmt);
++  tree rhs1, rhs2, base;
++  if (dump_file)
++    fprintf (dump_file, "In assign (%s): ", get_tree_code_name (def_code));
++
++  switch (rhs_class)
++    {
++    case GIMPLE_TERNARY_RHS:
++      if (dump_file)
++	fprintf (dump_file, "Unsupported trinary rhs\n");
++      mr->type = MR_UNSUPPORTED;
++      return NULL_TREE;
++    case GIMPLE_UNARY_RHS:
++    case GIMPLE_SINGLE_RHS:
++      rhs1 = gimple_assign_rhs1 (stmt);
++      if (dump_file)
++	{
++	  fprintf (dump_file, "(%s)",
++		   get_tree_code_name (TREE_CODE (rhs1)));
++	  print_generic_expr (dump_file, rhs1);
++	  fprintf (dump_file, "\n");
++	}
++      if (def_code == NOP_EXPR)
++	return analyse_addr_eval (rhs1, mr);
++      else if (def_code == MEM_REF || def_code == COMPONENT_REF
++	       || def_code == ARRAY_REF)
++	{
++	  memref_t *mr2 = get_memref (stmt, rhs1, false);
++	  mr->type = get_memref_type (mr, mr2, def_code);
++	  for (memref_set::const_iterator it = mr2->used_mrs.begin ();
++	       it != mr2->used_mrs.end (); it++)
++	    mr->used_mrs.insert (*it);
++	  mr->used_mrs.insert (mr2);
++	  return mr2->base;
++	}
++      else
++	{
++	  if (dump_file)
++	    fprintf (dump_file, "Unsupported unary/single\n");
++	  mr->type = MR_UNSUPPORTED;
++	}
++      return NULL_TREE;
++    case GIMPLE_BINARY_RHS:
++      rhs1 = gimple_assign_rhs1 (stmt);
++      rhs2 = gimple_assign_rhs2 (stmt);
++      if (dump_file)
++	{
++	  fprintf (dump_file, "(%s) (%s)",
++		   get_tree_code_name (TREE_CODE (rhs1)),
++		   get_tree_code_name (TREE_CODE (rhs2)));
++	  print_generic_expr (dump_file, rhs1);
++	  fprintf (dump_file, " ");
++	  print_generic_expr (dump_file, rhs2);
++	  fprintf (dump_file, "\n");
++	}
++      base = analyse_addr_eval (rhs1, mr);
++      analyse_addr_eval (rhs2, mr);
++      mr->type = get_memref_type (mr, NULL, def_code);
++      return base;
++    default:
++      gcc_unreachable ();
++    }
++  return NULL_TREE;
++}
++
++static tree
++get_mem_ref_address_ssa_name (tree mem, tree base)
++{
++  gcc_assert (TREE_CODE (mem) == MEM_REF);
++  if (base == NULL_TREE)
++    base = get_base_address (mem);
++  tree base_addr = NULL_TREE;
++  if (TREE_CODE (base) == MEM_REF)
++    base_addr = TREE_OPERAND (base, 0);
++  if (base_addr != NULL_TREE && TREE_CODE (base_addr) == SSA_NAME)
++    return base_addr;
++  return NULL_TREE;
++}
++
++static void
++analyse_mem_ref (gimple *stmt, tree mem, memref_t* mr)
++{
++  tree base = get_base_address (mem);
++  if (dump_file)
++    fprintf (dump_file, "Codes: base = %s, mem = %s\n",
++	     base ? get_tree_code_name (TREE_CODE (base)) : "null",
++	     mem ? get_tree_code_name (TREE_CODE (mem)) : "null");
++
++  mr->stmts.safe_push (stmt);
++  mr->base = base;
++  switch (TREE_CODE (mem))
++    {
++    case COMPONENT_REF:
++      if (mr->is_store)
++	analyse_incremental (stmt, mr);
++      mr->type = MR_SIMPLE;
++      mr->offset = TREE_OPERAND (mem, 1);
++      return;
++    case ARRAY_REF:
++      analyse_addr_eval (TREE_OPERAND (mem, 1), mr);
++      return;
++    case MEM_REF:
++      {
++	tree base_addr = get_mem_ref_address_ssa_name (mem, base);
++	if (dump_file)
++	  {
++	    fprintf (dump_file, "Base addr (%s): ",
++		     base_addr ? get_tree_code_name (TREE_CODE (base_addr))
++			       : "null");
++	    if (base_addr)
++	      print_generic_expr (dump_file, base_addr);
++	    fprintf (dump_file, "\n");
++	  }
++	if (base_addr)
++	  {
++	    mr->base = analyse_addr_eval (base_addr, mr);
++	    return;
++	  }
++	break;
++      }
++    default:
++      break;
++    }
++  mr->type = MR_UNSUPPORTED;
++  mr->base = NULL_TREE;
++}
++
++static void
++analyse_stmt (gimple *stmt)
++{
++  bool is_store;
++  tree *mem = simple_mem_ref_in_stmt (stmt, &is_store);
++  if (!mem)
++    return;
++  if (dump_file)
++    {
++      fprintf (dump_file, "\n%s: mr is found in stmt (%s): ",
++	       function_name (cfun), is_store ? "store" : "load");
++      print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS);
++    }
++  memref_t *mr = get_memref (stmt, *mem, is_store);
++  (*fmrs_map)[cfun]->insert (mr);
++  if (dump_file)
++    print_memref (mr);
++}
++
++/* Scan stmts for indirect stores/loads with bases passed as function args.  */
++
++static void
++collect_memrefs_for_cgraph_node (struct cgraph_node *n)
++{
++  if (dump_file)
++    fprintf (dump_file, "\nCollect indirect ptr info in %s\n", n->dump_name ());
++  n->get_body ();
++  function *fn = DECL_STRUCT_FUNCTION (n->decl);
++  gcc_assert (fn && n->has_gimple_body_p ());
++
++  push_cfun (fn);
++  basic_block bb;
++  gimple_stmt_iterator si;
++  (*fmrs_map)[fn] = new memref_set;
++  FOR_EACH_BB_FN (bb, fn)
++    for (si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
++      {
++	gimple *stmt = gsi_stmt (si);
++	analyse_stmt (stmt);
++      }
++  pop_cfun ();
++}
++
++/* Walk cgraph nodes and collect memory references info.  */
++
++static void
++collect_memory_references ()
++{
++  struct cgraph_node *n;
++  /* TODO: collect info only for loops and functions in loops.  */
++  FOR_EACH_DEFINED_FUNCTION (n)
++    if (nl_map->count (n) != 0 && n->has_gimple_body_p ())
++      collect_memrefs_for_cgraph_node (n);
++
++  if (dump_file)
++    {
++      fprintf (dump_file, "\n\nDump mem references:\n");
++      FOR_EACH_DEFINED_FUNCTION (n)
++	if (nl_map->count (n) != 0 && n->has_gimple_body_p ())
++	  {
++	    function *fn = DECL_STRUCT_FUNCTION (n->decl);
++	    fprintf (dump_file, "\nIn function %s (%s):\n", function_name (fn),
++		     nl_map->count (n) != 0 ? "in loop" : "");
++	    for (memref_set::const_iterator it = (*fmrs_map)[fn]->begin ();
++		 it != (*fmrs_map)[fn]->end (); it++)
++	      print_memref (*it);
++	  }
++    }
++}
++
++/* Analysis of loops.  */
++
++memref_set *current_incr_mrs;
++memref_set *current_indirect_mrs;
++
++static void
++collect_memref (memref_t *mr, class loop *loop, bool check_loop)
++{
++  gimple *stmt = mr->stmts[0];
++  gcc_assert (stmt);
++  if (check_loop && !flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
++    return;
++
++  /* TODO: Improve base invariant analysis for memrefs which are not local
++     (located in called functions).  */
++  bool is_base_inv = false;
++  if (mr->base)
++    is_base_inv = expr_invariant_in_loop_p (loop, mr->base);
++
++  if (dump_file && (mr->type == MR_INDIRECT || mr->is_incr))
++    {
++      fprintf (dump_file, "%s MR (%d): ", mr->is_incr ? "INCR" : "INDIRECT",
++	       mr->mr_id);
++      print_generic_expr (dump_file, mr->mem);
++      fprintf (dump_file, "\twith base: ");
++      if (mr->base)
++	print_generic_expr (dump_file, mr->base);
++      else
++	fprintf (dump_file, "null");
++      fprintf (dump_file, " (is_inv=%d)\n", is_base_inv);
++    }
++
++  if (!is_base_inv)
++    return;
++  if (mr->type == MR_INDIRECT)
++    current_indirect_mrs->insert (mr);
++  if (mr->is_incr)
++    current_incr_mrs->insert (mr);
++}
++
++static void
++analyse_callable_function (struct cgraph_node *n, class loop *loop)
++{
++  if (dump_file)
++    fprintf (dump_file, "Callable (%s):\n", n->dump_name ());
++
++  function *fn = DECL_STRUCT_FUNCTION (n->decl);
++  if (fmrs_map->count (fn))
++    for (memref_set::const_iterator it = (*fmrs_map)[fn]->begin ();
++	 it != (*fmrs_map)[fn]->end (); it++)
++      collect_memref (*it, loop, false);
++}
++
++static void
++insert_node_with_callable_nodes (node_set &s, struct cgraph_node *n)
++{
++  s.insert (n);
++  if (nn_map->count (n) == 0)
++    return;
++  node_set *set = (*nn_map)[n];
++  for (node_set::const_iterator it = set->begin (); it != set->end (); it++)
++    s.insert ((*it));
++}
++
++static bool
++compatible_memrefs_p (memref_t *mr1, memref_t *mr2, bool &compatible_offset)
++{
++  if (!mr1->base || !mr2->base || !mr2->offset)
++    return false;
++  tree base_type1 = TYPE_MAIN_VARIANT (TREE_TYPE (mr1->base));
++  tree base_type2 = TYPE_MAIN_VARIANT (TREE_TYPE (mr2->base));
++  if (base_type1 != base_type2)
++    return false;
++  if (mr1->offset && mr1->offset == mr2->offset)
++    compatible_offset = true;
++  else
++    compatible_offset = false;
++  return true;
++}
++
++static void
++compare_memrefs (memref_t* mr, memref_t* mr2)
++{
++  /* TODO: improve analysis of memrefs from different functions: take into
++     account data flow and context.  */
++  bool compatible_offset = false;
++  if (!compatible_memrefs_p (mr, mr2, compatible_offset))
++    return;
++  if (!compatible_offset)
++    {
++      for (memref_set::const_iterator it = mr->used_mrs.begin ();
++	   it != mr->used_mrs.end (); it++)
++	if ((*it)->offset && (*it)->offset == mr2->offset)
++	  {
++	    compatible_offset = true;
++	    if (dump_file)
++	      fprintf (dump_file, "Used MR (%d) and INC MR have "
++		       "the same offset\n", (*it)->mr_id);
++	    break;
++	  }
++    }
++  if (!compatible_offset)
++    return;
++  if (dump_file)
++    {
++      fprintf (dump_file, "MR (%d) is optimization candidate with offset: ",
++	       mr->mr_id);
++      print_generic_expr (dump_file, mr2->offset);
++      fprintf (dump_file, "\n");
++    }
++
++  if (!mr_candidate_map->count (mr))
++    {
++      (*mr_candidate_map)[mr] = mr2;
++      return;
++    }
++  /* TODO: support analysis with incrementation of different fields.  */
++  if ((*mr_candidate_map)[mr]->offset != mr2->offset)
++    {
++      if (dump_file)
++	{
++	  fprintf (dump_file, "It conflicts with previously found MR (%d) "
++		   "with offset ", (*mr_candidate_map)[mr]->mr_id);
++	  if ((*mr_candidate_map)[mr] != NULL)
++	    print_generic_expr (dump_file, (*mr_candidate_map)[mr]->offset);
++	  fprintf (dump_file, ", disable the optimization\n");
++	}
++      (*mr_candidate_map)[mr] = NULL;
++    }
++}
++
++/* In the given loop and all functions called from the loop, collect
++   indirect/incremental memrefs with invariant base address and inductive
++   offset.  */
++
++static void
++collect_memrefs_for_loop (class loop *loop, struct cgraph_node *n,
++			  function *fn)
++{
++  current_incr_mrs = new memref_set;
++  current_indirect_mrs = new memref_set;
++
++  if (dump_file)
++    fprintf (dump_file, "Loop %d\n", loop->num);
++  if (fmrs_map->count (fn))
++    for (memref_set::const_iterator it = (*fmrs_map)[fn]->begin ();
++	 it != (*fmrs_map)[fn]->end (); it++)
++      collect_memref (*it, loop, true);
++
++  /* Collect vector of functions called in the loop.  */
++  node_set set;
++  struct cgraph_edge *e;
++  struct cgraph_node *n2;
++  for (e = n->callees; e; e = e->next_callee)
++    {
++      gcall *stmt = e->call_stmt;
++      if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
++	continue;
++      insert_node_with_callable_nodes (set, e->callee);
++    }
++  for (e = n->indirect_calls; e; e = e->next_callee)
++    {
++      gcall *stmt = e->call_stmt;
++      if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt))
++	  || !e->indirect_info->targets)
++	continue;
++      for (unsigned i = 0; e->indirect_info->targets->iterate (i, &n2); ++i)
++	insert_node_with_callable_nodes (set, n2);
++    }
++  if (set.empty ())
++    return;
++  if (dump_file)
++    fprintf (dump_file, "Go inside all callables of %s\n", n->dump_name ());
++
++  for (node_set::const_iterator it = set.begin (); it != set.end (); it++)
++    analyse_callable_function (*it, loop);
++
++  if (!current_incr_mrs->empty () && !current_indirect_mrs->empty ())
++    {
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Loop has both incr and indirect memrefs\n"
++		   "Incr: ");
++	  for (memref_set::const_iterator it = current_incr_mrs->begin ();
++	       it != current_incr_mrs->end (); it++)
++	    fprintf (dump_file, "%d ", (*it)->mr_id);
++	  fprintf (dump_file, "\nIndirect: ");
++	  for (memref_set::const_iterator it = current_indirect_mrs->begin ();
++	       it != current_indirect_mrs->end (); it++)
++	    fprintf (dump_file, "%d ", (*it)->mr_id);
++	  fprintf (dump_file, "\n");
++	}
++      /* Check if indirect memref has a base address similar to one of
++	 incremental memref.  */
++      for (memref_set::const_iterator it = current_indirect_mrs->begin ();
++	   it != current_indirect_mrs->end (); it++)
++	for (memref_set::const_iterator it2 = current_incr_mrs->begin ();
++	   it2 != current_incr_mrs->end (); it2++)
++	  compare_memrefs (*it, *it2);
++    }
++
++  delete current_incr_mrs;
++  delete current_indirect_mrs;
++}
++
++static void
++analyse_loops_in_cgraph_node (struct cgraph_node *n)
++{
++  if (dump_file)
++    fprintf (dump_file, "\nAnalyse loops in %s\n", n->dump_name ());
++
++  n->get_body ();
++  function *fn = DECL_STRUCT_FUNCTION (n->decl);
++  gcc_assert (fn && n->has_gimple_body_p ());
++
++  push_cfun (fn);
++  calculate_dominance_info (CDI_DOMINATORS);
++  loop_optimizer_init (LOOPS_NORMAL);
++
++  for (auto loop : loops_list (cfun, 0))
++    {
++      class loop *outer = loop_outer (loop);
++      /* Walk only outermost loops.  */
++      if (outer->num != 0)
++	continue;
++      collect_memrefs_for_loop (loop, n, fn);
++    }
++
++  free_dominance_info (CDI_DOMINATORS);
++  loop_optimizer_finalize ();
++  pop_cfun ();
++}
++
++static void
++analyse_loops ()
++{
++  if (dump_file)
++    fprintf (dump_file, "\n\nLoops: procesing functions\n");
++  cgraph_node *n;
++  FOR_EACH_DEFINED_FUNCTION (n)
++    {
++      if (!can_be_optimized (n))
++	{
++	  if (dump_file)
++	    fprintf (dump_file, "Skip the function\n");
++	  continue;
++	}
++      analyse_loops_in_cgraph_node (n);
++    }
++
++  if (dump_file)
++    fprintf (dump_file, "\n\nList of optimization candidates:\n");
++
++  FOR_EACH_DEFINED_FUNCTION (n)
++    {
++      function *fn = DECL_STRUCT_FUNCTION (n->decl);
++      if (!can_be_optimized (n) || !fmrs_map->count (fn))
++	continue;
++      for (memref_map::iterator it = mr_candidate_map->begin ();
++	   it != mr_candidate_map->end (); ++it)
++	{
++	  memref_t *mr = it->first, *mr2 = it->second;
++	  if (mr2 == NULL || !(*fmrs_map)[fn]->count (mr))
++	    continue;
++	  if (!optimize_mrs_map->count (fn))
++	    (*optimize_mrs_map)[fn] = new memref_set;
++	  (*optimize_mrs_map)[fn]->insert (mr);
++	}
++      if (dump_file && optimize_mrs_map->count (fn))
++	{
++	  fprintf (dump_file, "Function %s\n", n->dump_name ());
++	  for (memref_set::const_iterator it
++		   = (*optimize_mrs_map)[fn]->begin ();
++	       it != (*optimize_mrs_map)[fn]->end (); it++)
++	    {
++	      memref_t *mr = *it, *mr2 = (*mr_candidate_map)[mr];
++	      fprintf (dump_file, "MRs %d,%d with incremental offset ",
++		       mr->mr_id, mr2->mr_id);
++	      print_generic_expr (dump_file, mr2->offset);
++	      fprintf (dump_file, "\n");
++	    }
++	}
++    }
++}
++
++/* Reduce the set filtering out memrefs with the same memory references,
++   return the result vector of memrefs.  */
++
++static void
++reduce_memref_set (memref_set *set, vec &vec)
++{
++  for (memref_set::const_iterator it = set->begin ();
++      it != set->end (); it++)
++    {
++      memref_t *mr1 = *it;
++      if (!vec.length ())
++	vec.safe_push (mr1);
++      else
++	{
++	  bool inserted = false;
++	  for (unsigned int i = 0; i < vec.length (); i++)
++	    {
++	      /* mr2 is less than current mr1.  */
++	      memref_t *mr2 = vec[i];
++	      if (operand_equal_p (mr1->mem, mr2->mem))
++		{
++		  if (dump_file)
++		    fprintf (dump_file, "The same mems in MRs %d and %d\n",
++			     mr1->mr_id, mr2->mr_id);
++		  /* TODO: maybe build new memref which include stmts of both
++		     mr1 and mr2.  */
++		  if ((mr1->is_store && !mr2->is_store)
++		       || mr1->stmts.length () > mr2->stmts.length ())
++		    {
++		      inserted = true;
++		      vec[i] = mr1;
++		    }
++		}
++	    }
++	  if (!inserted)
++	    vec.safe_push (mr1);
++	}
++    }
++  if (dump_file)
++    {
++      fprintf (dump_file, "MRs (%d) after filtering: ", vec.length ());
++      for (unsigned int i = 0; i < vec.length (); i++)
++	fprintf (dump_file, "%d ", vec[i]->mr_id);
++      fprintf (dump_file, "\n");
++    }
++}
++
++static void
++find_nearest_common_dominator (memref_t *mr, basic_block &dom)
++{
++  for (unsigned int i = 0; i < mr->stmts.length (); i++)
++    {
++      basic_block bb = gimple_bb (mr->stmts[i]);
++      gcc_assert (bb);
++      if (dom == bb)
++	continue;
++      if (dom)
++	dom = nearest_common_dominator (CDI_DOMINATORS, dom, bb);
++      else
++	dom = bb;
++    }
++}
++
++/* Return true if DECL is a parameter or a SSA_NAME for a parameter.
++   TODO: from gcc/tree-inline.c, maybe make it global.  */
++
++static bool
++is_parm (tree decl)
++{
++  if (TREE_CODE (decl) == SSA_NAME)
++    {
++      decl = SSA_NAME_VAR (decl);
++      if (!decl)
++	return false;
++    }
++
++  return (TREE_CODE (decl) == PARM_DECL);
++}
++
++/* TODO: the following functions are inspired by remap in gcc/tree-inline.c,
++   maybe we can share some functionality.  */
++
++static tree
++remap_name (tree name, gimple *stmt, bool is_lhs)
++{
++  tree new_tree = NULL_TREE;
++  if (decl_map->count (name))
++    {
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Find map: ");
++	  print_generic_expr (dump_file, name);
++	  fprintf (dump_file, " ");
++	  print_generic_expr (dump_file, (*decl_map)[name]);
++	  fprintf (dump_file, "\n");
++	}
++      return unshare_expr ((*decl_map)[name]);
++    }
++  if (!is_lhs)
++    return name;
++  if (TREE_CODE (name) == SSA_NAME)
++    {
++      /* Remap anonymous SSA names or SSA names of anonymous decls.  */
++      tree var = SSA_NAME_VAR (name);
++      if (!var
++	  || (!SSA_NAME_IS_DEFAULT_DEF (name)
++	      && VAR_P (var) && !VAR_DECL_IS_VIRTUAL_OPERAND (var)
++	      && DECL_ARTIFICIAL (var) && DECL_IGNORED_P (var)
++	      && !DECL_NAME (var)))
++	{
++	  new_tree = make_ssa_name (TREE_TYPE (name), stmt);
++	  if (!var && SSA_NAME_IDENTIFIER (name))
++	    SET_SSA_NAME_VAR_OR_IDENTIFIER (new_tree,
++					    SSA_NAME_IDENTIFIER (name));
++	  SSA_NAME_OCCURS_IN_ABNORMAL_PHI (new_tree)
++	      = SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name);
++	  /* So can range-info.  */
++	  if (!POINTER_TYPE_P (TREE_TYPE (name))
++	      && SSA_NAME_RANGE_INFO (name))
++	    duplicate_ssa_name_range_info (new_tree,
++					   SSA_NAME_RANGE_TYPE (name),
++					   SSA_NAME_RANGE_INFO (name));
++	  /* TODO: maybe correct the insertion.  */
++	  (*decl_map)[name] = new_tree;
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "New map (no var): ");
++	      print_generic_expr (dump_file, name);
++	      fprintf (dump_file, " ");
++	      print_generic_expr (dump_file, new_tree);
++	      fprintf (dump_file, "\n");
++	    }
++	  return new_tree;
++	}
++      /* TODO: maybe remap_name or do the same as before for SSA_NAME_VAR.  */
++      new_tree = make_ssa_name (TREE_TYPE (name), stmt);
++      (*decl_map)[name] = new_tree;
++      if (dump_file)
++	{
++	  fprintf (dump_file, "New map: ");
++	  print_generic_expr (dump_file, name);
++	  fprintf (dump_file, " ");
++	  print_generic_expr (dump_file, new_tree);
++	  fprintf (dump_file, "\n");
++	}
++    }
++  else if (VAR_P (name) || TREE_CODE (name) == PARM_DECL)
++    {
++      if (dump_file)
++	{
++	  fprintf (dump_file, "VAR/PARM: ");
++	  print_generic_expr (dump_file, name);
++	  fprintf (dump_file, "\n");
++	}
++      return name;
++    }
++  else
++    {
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Unsupported: ");
++	  print_generic_expr (dump_file, name);
++	  fprintf (dump_file, "\n");
++	}
++      //gcc_unreachable ();
++      return name;
++    }
++  return new_tree;
++}
++
++/* Passed to walk_tree.  Copies the node pointed to, if appropriate.  */
++
++static tree
++ipa_copy_tree_r (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
++{
++  enum tree_code code = TREE_CODE (*tp);
++  enum tree_code_class cl = TREE_CODE_CLASS (code);
++
++  /* We make copies of most nodes.  */
++  if (IS_EXPR_CODE_CLASS (cl)
++      || code == TREE_LIST
++      || code == TREE_VEC
++      || code == TYPE_DECL
++      || code == OMP_CLAUSE)
++    {
++      /* Because the chain gets clobbered when we make a copy, we save it
++	 here.  */
++      tree chain = NULL_TREE, new_tree;
++
++      if (CODE_CONTAINS_STRUCT (code, TS_COMMON))
++	chain = TREE_CHAIN (*tp);
++
++      /* Copy the node.  */
++      new_tree = copy_node (*tp);
++
++      *tp = new_tree;
++
++      /* Now, restore the chain, if appropriate.  That will cause
++	 walk_tree to walk into the chain as well.  */
++      if (code == PARM_DECL
++	  || code == TREE_LIST
++	  || code == OMP_CLAUSE)
++	TREE_CHAIN (*tp) = chain;
++
++      /* For now, we don't update BLOCKs when we make copies.  So, we
++	 have to nullify all BIND_EXPRs.  */
++      if (TREE_CODE (*tp) == BIND_EXPR)
++	BIND_EXPR_BLOCK (*tp) = NULL_TREE;
++    }
++  else if (code == CONSTRUCTOR || code == STATEMENT_LIST)
++    gcc_unreachable ();
++  else if (TREE_CODE_CLASS (code) == tcc_type
++	   || TREE_CODE_CLASS (code) == tcc_declaration
++	   || TREE_CODE_CLASS (code) == tcc_constant)
++    *walk_subtrees = 0;
++  return NULL_TREE;
++}
++
++/* Remap the GIMPLE operand pointed to by *TP.  DATA is really a
++   'struct walk_stmt_info *'.  DATA->INFO is a 'gimple *'.
++   WALK_SUBTREES is used to indicate walk_gimple_op whether to keep
++   recursing into the children nodes of *TP.  */
++
++static tree
++remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data)
++{
++  struct walk_stmt_info *wi_p = (struct walk_stmt_info *) data;
++  gimple *stmt = (gimple *) wi_p->info;
++
++  /* For recursive invocations this is no longer the LHS itself.  */
++  bool is_lhs = wi_p->is_lhs;
++  wi_p->is_lhs = false;
++
++  if (TREE_CODE (*tp) == SSA_NAME)
++    {
++      *tp = remap_name (*tp, stmt, is_lhs);
++      *walk_subtrees = 0;
++      if (is_lhs)
++	SSA_NAME_DEF_STMT (*tp) = wi_p->stmt;
++      return NULL;
++    }
++  else if (auto_var_in_fn_p (*tp, cfun->decl))
++    {
++      /* Local variables and labels need to be replaced by equivalent
++	 variables.  We don't want to copy static variables; there's
++	 only one of those, no matter how many times we inline the
++	 containing function.  Similarly for globals from an outer
++	 function.  */
++      tree new_decl;
++
++      /* Remap the declaration.  */
++      new_decl = remap_name (*tp, stmt, is_lhs);
++      gcc_assert (new_decl);
++      /* Replace this variable with the copy.  */
++      STRIP_TYPE_NOPS (new_decl);
++      /* ???  The C++ frontend uses void * pointer zero to initialize
++	 any other type.  This confuses the middle-end type verification.
++	 As cloned bodies do not go through gimplification again the fixup
++	 there doesn't trigger.  */
++      if (TREE_CODE (new_decl) == INTEGER_CST
++	  && !useless_type_conversion_p (TREE_TYPE (*tp), TREE_TYPE (new_decl)))
++	new_decl = fold_convert (TREE_TYPE (*tp), new_decl);
++      *tp = new_decl;
++      *walk_subtrees = 0;
++    }
++  else if (TREE_CODE (*tp) == STATEMENT_LIST || TREE_CODE (*tp) == SAVE_EXPR)
++    {
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Unexpected tree: ");
++	  print_generic_expr (dump_file, *tp);
++	  fprintf (dump_file, "\n");
++	}
++      gcc_unreachable ();
++    }
++  else
++    {
++      /* Otherwise, just copy the node.  Note that copy_tree_r already
++	 knows not to copy VAR_DECLs, etc., so this is safe.  */
++
++      if (TREE_CODE (*tp) == MEM_REF)
++	{
++	  /* We need to re-canonicalize MEM_REFs from inline substitutions
++	     that can happen when a pointer argument is an ADDR_EXPR.
++	     Recurse here manually to allow that.  */
++	  tree ptr = TREE_OPERAND (*tp, 0);
++	  tree type = TREE_TYPE (*tp);
++	  tree old = *tp;
++	  walk_tree (&ptr, remap_gimple_op_r, data, NULL);
++	  *tp = fold_build2 (MEM_REF, type, ptr, TREE_OPERAND (*tp, 1));
++	  TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
++	  TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
++	  TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
++	  /* TODO: maybe support this case.  */
++	  gcc_assert (MR_DEPENDENCE_CLIQUE (old) == 0);
++	  /* We cannot propagate the TREE_THIS_NOTRAP flag if we have
++	     remapped a parameter as the property might be valid only
++	     for the parameter itself.  */
++	  if (TREE_THIS_NOTRAP (old) && (!is_parm (TREE_OPERAND (old, 0))))
++	    TREE_THIS_NOTRAP (*tp) = 1;
++	  REF_REVERSE_STORAGE_ORDER (*tp) = REF_REVERSE_STORAGE_ORDER (old);
++	  *walk_subtrees = 0;
++	  return NULL;
++	}
++
++      /* Here is the "usual case".  Copy this tree node, and then
++	 tweak some special cases.  */
++      ipa_copy_tree_r (tp, walk_subtrees, NULL);
++      gcc_assert (!(TREE_CODE (*tp) == TARGET_EXPR && TREE_OPERAND (*tp, 3)));
++      if (TREE_CODE (*tp) == ADDR_EXPR)
++	{
++	  /* TODO: If this used to be invariant, but is not any longer,
++	     then regimplification is probably needed.  */
++	  walk_tree (&TREE_OPERAND (*tp, 0), remap_gimple_op_r, data, NULL);
++	  recompute_tree_invariant_for_addr_expr (*tp);
++	  *walk_subtrees = 0;
++	}
++    }
++  /* TODO: maybe we need to update TREE_BLOCK (*tp).  */
++
++  /* Keep iterating.  */
++  return NULL_TREE;
++}
++
++static void
++create_cgraph_edge (cgraph_node *n, gimple *stmt)
++{
++  gcall *call_stmt = dyn_cast  (stmt);
++  basic_block bb = gimple_bb (stmt);
++  tree decl = gimple_call_fndecl (call_stmt);
++  if (!decl)
++    return;
++  struct cgraph_edge *e = n->create_edge (cgraph_node::get_create (decl),
++					  call_stmt, bb->count);
++  /* TODO: maybe we need to store ipa_call_summary result.  */
++  ipa_call_summaries->get_create (e);
++}
++
++/* Insert prefetch intrinsics in this function, return nonzero on success.  */
++
++static int
++optimize_function (cgraph_node *n, function *fn)
++{
++  /* In a given function, optimize only indirect memrefs with
++     the same incremental memref.
++     TODO: implement the optimization for other cases.  */
++  bool different_incrementals = false;
++  memref_t *first_mr = NULL;
++  memref_set used_mrs;
++  for (memref_set::const_iterator it = (*optimize_mrs_map)[fn]->begin ();
++       it != (*optimize_mrs_map)[fn]->end (); it++)
++    {
++      memref_t *mr = *it;
++      if (!first_mr)
++	first_mr = mr;
++      else if ((*mr_candidate_map)[first_mr] != (*mr_candidate_map)[mr])
++	{
++	  different_incrementals = true;
++	  break;
++	}
++      for (memref_set::const_iterator it2 = mr->used_mrs.begin ();
++	   it2 != mr->used_mrs.end (); it2++)
++	used_mrs.insert (*it2);
++    }
++  if (different_incrementals)
++    {
++      if (dump_file)
++	fprintf (dump_file, "It contains memrefs with different "
++		 "incrementals.  Skip the case.\n");
++      return 0;
++    }
++  memref_t *inc_mr = (*mr_candidate_map)[first_mr];
++  if (!inc_mr->stmts[0] || !gimple_assign_single_p (inc_mr->stmts[0]))
++    {
++      if (dump_file)
++	fprintf (dump_file, "Incremental MR with unexpected stmt.  "
++		 "Skip the case.\n");
++      return 0;
++    }
++  if (dump_file && !used_mrs.empty ())
++    print_mrs_ids (used_mrs, "Common list of used mrs:\n");
++
++  /* Find a memref in used mrs which corresponds to the found incremental
++     memref.  */
++  memref_t *comp_mr = NULL;
++  for (memref_set::const_iterator it = used_mrs.begin ();
++       it != used_mrs.end (); it++)
++    {
++    bool c_offset;
++    if ((*it)->type != MR_SIMPLE || inc_mr->type != MR_SIMPLE
++	|| !compatible_memrefs_p (*it, inc_mr, c_offset))
++      continue;
++    if (c_offset)
++      {
++	if (dump_file)
++	  fprintf (dump_file, "Found compatible used MR (%d) and "
++		   "incr MR (%d)\n", (*it)->mr_id, inc_mr->mr_id);
++	comp_mr = (*it);
++      }
++    }
++  if (!comp_mr || !comp_mr->stmts[0]
++      || !gimple_assign_single_p (comp_mr->stmts[0]))
++    {
++      if (dump_file)
++	fprintf (dump_file, "Compatible MR in this function is not found "
++		 " or it has unexpected stmt.  Skip the case.\n");
++      return 0;
++    }
++
++  /* Filter out memrefs with the same memory references.
++     TODO: maybe do the same with used mrs.  */
++  vec vmrs = vNULL;
++  reduce_memref_set ((*optimize_mrs_map)[fn], vmrs);
++
++  /* Find insertion place.  Create new BB.  */
++  /* TODO: maybe it is useful to process also used_mrs.  */
++  basic_block dom_bb = NULL;
++  for (unsigned int i = 0; i < vmrs.length (); i++)
++    find_nearest_common_dominator (vmrs[i], dom_bb);
++
++  if (!dom_bb)
++    {
++      if (dump_file)
++	fprintf (dump_file, "Dominator bb for MRs is not found.  "
++		 "Skip the case.\n");
++      return 0;
++    }
++  else if (dump_file)
++    fprintf (dump_file, "Dominator bb %d for MRs\n", dom_bb->index);
++
++  split_block (dom_bb, (gimple *) NULL);
++  gimple_stmt_iterator gsi = gsi_last_bb (dom_bb);
++
++  /* Create new inc var.  Insert new_var = old_var + step * factor.  */
++  decl_map = new tree_map;
++  gcc_assert (comp_mr->stmts[0] && gimple_assign_single_p (comp_mr->stmts[0]));
++  tree inc_var = gimple_assign_lhs (comp_mr->stmts[0]);
++  gimple_seq stmts = NULL;
++  tree var_type = TREE_TYPE (inc_var);
++  enum tree_code inc_code;
++  if (TREE_CODE (var_type) == POINTER_TYPE)
++    inc_code = POINTER_PLUS_EXPR;
++  else
++    inc_code = PLUS_EXPR;
++  tree step = inc_mr->step;
++  unsigned dist_val = tree_to_uhwi (step) * param_ipa_prefetch_distance_factor;
++  tree dist = build_int_cst (TREE_TYPE (step), dist_val);
++  tree new_inc_var = gimple_build (&stmts, inc_code, var_type, inc_var, dist);
++  (*decl_map)[inc_var] = new_inc_var;
++
++  /* Create other new vars.  Insert new stmts.  */
++  struct walk_stmt_info wi;
++  stmt_set processed_stmts;
++  memref_tree_map mr_new_trees;
++  for (memref_set::const_iterator it = used_mrs.begin ();
++       it != used_mrs.end (); it++)
++    {
++      memref_t *mr = *it;
++      gimple *last_stmt = NULL;
++      if (mr == comp_mr)
++	continue;
++      for (int i = mr->stmts.length () - 1; i >= 0 ; i--)
++	{
++	  if (processed_stmts.count (mr->stmts[i]))
++	    continue;
++	  processed_stmts.insert (mr->stmts[i]);
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "Copy stmt %d from used MR (%d):\n",
++		       i, mr->mr_id);
++	      print_gimple_stmt (dump_file, mr->stmts[i], 0);
++	    }
++	  /* Create a new copy of STMT and duplicate STMT's virtual
++	     operands.  */
++	  gimple *copy = gimple_copy (mr->stmts[i]);
++	  gcc_checking_assert (!is_gimple_debug (copy));
++
++	  /* Remap all the operands in COPY.  */
++	  memset (&wi, 0, sizeof (wi));
++	  last_stmt = copy;
++	  wi.info = copy;
++	  walk_gimple_op (copy, remap_gimple_op_r, &wi);
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "Stmt %d after remap:\n",i);
++	      print_gimple_stmt (dump_file, copy, 0);
++	    }
++	  gimple_seq_add_stmt (&stmts, copy);
++	}
++      gcc_assert (last_stmt);
++      mr_new_trees[mr] = gimple_assign_lhs (last_stmt);
++      if (dump_file)
++	{
++	  fprintf (dump_file, "MR (%d) new mem: ", mr->mr_id);
++	  print_generic_expr (dump_file, gimple_assign_lhs (last_stmt));
++	  fprintf (dump_file, "\n");
++	}
++    }
++  /* On new load check page fault.  */
++  /* Insert prefetch instructions.  */
++  if (dump_file)
++    fprintf (dump_file, "Evaluate addresses and insert prefetch insn.\n");
++
++  vec pcalls = vNULL;
++  tree local;
++  switch (param_ipa_prefetch_locality)
++    {
++    case 0:
++      local = integer_zero_node;
++      break;
++    case 1:
++      local = integer_one_node;
++      break;
++    case 2:
++      local = build_int_cst (integer_type_node, 2);
++      break;
++    default:
++    case 3:
++      local = integer_three_node;
++      break;
++    }
++  for (unsigned int j = 0; j < vmrs.length (); j++)
++    {
++      memref_t *mr = vmrs[j];
++      /* Don't need to copy the last stmt, since we insert prefetch insn
++	 instead of it.  */
++      for (int i = mr->stmts.length () - 1; i >= 1 ; i--)
++	{
++	  if (processed_stmts.count (mr->stmts[i]))
++	    continue;
++	  processed_stmts.insert (mr->stmts[i]);
++
++	  gimple *copy = gimple_copy (mr->stmts[i]);
++	  gcc_checking_assert (!is_gimple_debug (copy));
++
++	  /* Remap all the operands in COPY.  */
++	  memset (&wi, 0, sizeof (wi));
++	  wi.info = copy;
++	  walk_gimple_op (copy, remap_gimple_op_r, &wi);
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "Stmt %d after remap:\n",i);
++	      print_gimple_stmt (dump_file, copy, 0);
++	    }
++	  gimple_seq_add_stmt (&stmts, copy);
++	}
++      gimple *last_stmt = mr->stmts[0];
++      gcc_assert (last_stmt);
++      mr_new_trees[mr] = gimple_assign_lhs (last_stmt);
++      tree write_p = mr->is_store ? integer_one_node : integer_zero_node;
++      tree addr = get_mem_ref_address_ssa_name (mr->mem, NULL_TREE);
++      if (decl_map->count (addr))
++	addr = (*decl_map)[addr];
++      last_stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
++				     3, addr, write_p, local);
++      pcalls.safe_push (last_stmt);
++      gimple_seq_add_stmt (&stmts, last_stmt);
++    }
++
++  gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
++  delete decl_map;
++
++  /* Modify cgraph inserting calls to prefetch intrinsics.  */
++  for (unsigned i = 0; i < pcalls.length (); i++)
++    create_cgraph_edge (n, pcalls[i]);
++  ipa_update_overall_fn_summary (n);
++
++  return 1;
++}
++
++static int
++insert_prefetch ()
++{
++  int res = 0;
++  cgraph_node *n;
++  FOR_EACH_DEFINED_FUNCTION (n)
++    {
++      function *fn = DECL_STRUCT_FUNCTION (n->decl);
++      if (!optimize_mrs_map->count (fn))
++	continue;
++      if (dump_file)
++	fprintf (dump_file, "Optimize function %s\n", n->dump_name ());
++      push_cfun (DECL_STRUCT_FUNCTION (n->decl));
++      calculate_dominance_info (CDI_DOMINATORS);
++      res |= optimize_function (n, fn);
++      free_dominance_info (CDI_DOMINATORS);
++      pop_cfun ();
++    }
++  return res;
++}
++
++static unsigned int
++ipa_prefetch (void)
++{
++  if (!targetm.have_prefetch ())
++    {
++      if (dump_file)
++	fprintf (dump_file, "Prefetch is not supported by the target.\n");
++      return 0;
++    }
++
++  unsigned int ret = 0;
++  el_map = new edge_in_loop;
++  nl_map = new node_in_loop;
++  icn_map = new node_to_iedge_map;
++  nn_map = new node_to_node_map;
++  tm_map = new tree_memref_map;
++  fmrs_map = new funct_mrs_map;
++  mr_candidate_map = new memref_map;
++  optimize_mrs_map = new funct_mrs_map;
++
++  max_mr_id = 0;
++  /* TODO: check if we really need this init.  */
++  if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
++    {
++      tree type = build_function_type_list (void_type_node,
++					    const_ptr_type_node, NULL_TREE);
++      tree decl = add_builtin_function ("__builtin_prefetch", type,
++					BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
++					NULL, NULL_TREE);
++      DECL_IS_NOVOPS (decl) = true;
++      set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
++    }
++
++  analyse_cgraph ();
++  prepare_indirect_call_info ();
++  propagate_loop_info_in_cgraph ();
++  collect_memory_references ();
++  analyse_loops ();
++
++  /* TODO: implement some specific heuristics.  */
++  if (!optimize_mrs_map->empty ())
++    ret = insert_prefetch ();
++
++  delete el_map;
++  delete nl_map;
++  for (node_to_iedge_map::iterator it = icn_map->begin ();
++       it != icn_map->end (); ++it)
++    delete it->second;
++  delete icn_map;
++  for (node_to_node_map::iterator it = nn_map->begin ();
++       it != nn_map->end (); ++it)
++    delete it->second;
++  delete nn_map;
++  for (tree_memref_map::iterator it = tm_map->begin ();
++       it != tm_map->end (); ++it)
++    delete it->second;
++  delete tm_map;
++  for (funct_mrs_map::iterator it = fmrs_map->begin ();
++       it != fmrs_map->end (); ++it)
++    delete it->second;
++  delete fmrs_map;
++  delete mr_candidate_map;
++  delete optimize_mrs_map;
++
++  /* TODO: maybe add other todos.  */
++  return ret | TODO_verify_all;
++}
++
++const pass_data pass_data_ipa_prefetch =
++{
++  SIMPLE_IPA_PASS, // type
++  "ipa_prefetch", // name
++  OPTGROUP_NONE, // optinfo_flags
++  TV_IPA_PREFETCH, // tv_id
++  0, // properties_required
++  0, // properties_provided
++  0, // properties_destroyed
++  0, // todo_flags_start
++  0, // todo_flags_finish
++};
++
++class pass_ipa_prefetch : public simple_ipa_opt_pass
++{
++public:
++  pass_ipa_prefetch (gcc::context *ctxt)
++    : simple_ipa_opt_pass (pass_data_ipa_prefetch, ctxt)
++  {}
++
++  /* opt_pass methods: */
++  virtual bool gate (function *);
++  virtual unsigned int execute (function *)
++  {
++    return ipa_prefetch ();
++  }
++}; // class pass_ipa_prefetch
++
++bool
++pass_ipa_prefetch::gate (function *)
++{
++  return (optimize >= 3
++	  && flag_ipa_prefetch
++	  /* Don't bother doing anything if the program has errors.  */
++	  && !seen_error ()
++	  && flag_lto_partition == LTO_PARTITION_ONE
++	  /* Only enable struct optimizations in lto or whole_program.  */
++	  && (in_lto_p || flag_whole_program));
++}
++
++} // anon namespace
++
++simple_ipa_opt_pass *
++make_pass_ipa_prefetch (gcc::context *ctxt)
++{
++  return new pass_ipa_prefetch (ctxt);
++}
+diff --git a/gcc/ipa-sra.cc b/gcc/ipa-sra.cc
+index 261a72085..5355cf2f4 100644
+--- a/gcc/ipa-sra.cc
++++ b/gcc/ipa-sra.cc
+@@ -3033,6 +3033,14 @@ process_edge_to_unknown_caller (cgraph_edge *cs)
+   gcc_checking_assert (from_ifs);
+   isra_call_summary *csum = call_sums->get (cs);
+ 
++  /* TODO: implement better support for call edges inserted after summary
++     collection but before sra wpa invocation.  */
++  if (!csum)
++    {
++      csum = call_sums->get_create (cs);
++      csum->m_return_ignored = true;
++    }
++
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     fprintf (dump_file, "Processing an edge to an unknown caller from %s:\n",
+ 	     cs->caller->dump_name ());
+diff --git a/gcc/params.opt b/gcc/params.opt
+index 7e5c119cf..5c07e3986 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -309,6 +309,14 @@ Maximum pieces that IPA-SRA tracks per formal parameter, as a consequence, also
+ Common Joined UInteger Var(param_ipa_sra_ptr_growth_factor) Init(2) Param Optimization
+ Maximum allowed growth of number and total size of new parameters that ipa-sra replaces a pointer to an aggregate with.
+ 
++-param=ipa-prefetch-distance-factor=
++Common Joined UInteger Var(param_ipa_prefetch_distance_factor) Init(4) Param Optimization
++The factor represents the number of inductive variable incrementations to evaluate an indirect memory address for IPA prefetch.
++
++-param=ipa-prefetch-locality=
++Common Joined UInteger Var(param_ipa_prefetch_locality) Init(3) Param Optimization
++The flag represents temporal locality values in the following way: 0:pstl1strm, 1:pstl3keep, 2:pstl2keep, 3:pstl1keep.
++
+ -param=ira-loop-reserved-regs=
+ Common Joined UInteger Var(param_ira_loop_reserved_regs) Init(2) Param Optimization
+ The number of registers in each class kept unused by loop invariant motion.
+diff --git a/gcc/passes.def b/gcc/passes.def
+index b7d4f7b4e..4c1436766 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -158,6 +158,7 @@ along with GCC; see the file COPYING3.  If not see
+   NEXT_PASS (pass_ipa_icf);
+   NEXT_PASS (pass_ipa_devirt);
+   NEXT_PASS (pass_ipa_icp);
++  NEXT_PASS (pass_ipa_prefetch);
+   NEXT_PASS (pass_ipa_cp);
+   NEXT_PASS (pass_ipa_sra);
+   NEXT_PASS (pass_ipa_cdtor_merge);
+diff --git a/gcc/timevar.def b/gcc/timevar.def
+index 18a9f62cc..810ae20fd 100644
+--- a/gcc/timevar.def
++++ b/gcc/timevar.def
+@@ -81,6 +81,7 @@ DEFTIMEVAR (TV_IPA_CONSTANT_PROP     , "ipa cp")
+ DEFTIMEVAR (TV_IPA_INLINING          , "ipa inlining heuristics")
+ DEFTIMEVAR (TV_IPA_FNSPLIT           , "ipa function splitting")
+ DEFTIMEVAR (TV_IPA_COMDATS	     , "ipa comdats")
++DEFTIMEVAR (TV_IPA_PREFETCH	     , "ipa prefetch")
+ DEFTIMEVAR (TV_IPA_STRUCT_REORG      , "ipa struct reorg optimization")
+ DEFTIMEVAR (TV_IPA_OPT		     , "ipa various optimizations")
+ DEFTIMEVAR (TV_IPA_LTO_DECOMPRESS    , "lto stream decompression")
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index 1733931c3..63f1192ae 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -529,6 +529,7 @@ extern ipa_opt_pass_d *make_pass_ipa_icp (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_odr (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_pure_const (gcc::context *ctxt);
++extern simple_ipa_opt_pass *make_pass_ipa_prefetch (gcc::context *ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_struct_reorg (gcc::context *ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_pta (gcc::context *ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_tm (gcc::context *ctxt);
+-- 
+2.33.0
+
diff --git a/0051-LoongArch-Remove-duplicate-definition-of-CLZ_DEFINED.patch b/0051-LoongArch-Remove-duplicate-definition-of-CLZ_DEFINED.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d3bcb9575543b4bf414772c35b3bb683e917440e
--- /dev/null
+++ b/0051-LoongArch-Remove-duplicate-definition-of-CLZ_DEFINED.patch
@@ -0,0 +1,49 @@
+From 21bb4f07db53df717d02e9115dcdb7b5475ede2a Mon Sep 17 00:00:00 2001
+From: Li Wei 
+Date: Tue, 28 Nov 2023 15:56:35 +0800
+Subject: [PATCH 051/188] LoongArch: Remove duplicate definition of
+ CLZ_DEFINED_VALUE_AT_ZERO.
+
+In the r14-5547 commit, C[LT]Z_DEFINED_VALUE_AT_ZERO were defined at
+the same time, but in fact, CLZ_DEFINED_VALUE_AT_ZERO has already been
+defined, so remove the duplicate definition.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.h (CTZ_DEFINED_VALUE_AT_ZERO): Add
+	description.
+	(CLZ_DEFINED_VALUE_AT_ZERO): Remove duplicate definition.
+---
+ gcc/config/loongarch/loongarch.h | 9 +++------
+ 1 file changed, 3 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 19cf6fd33..8b28be0e4 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -288,10 +288,12 @@ along with GCC; see the file COPYING3.  If not see
+ /* Define if loading short immediate values into registers sign extends.  */
+ #define SHORT_IMMEDIATES_SIGN_EXTEND 1
+ 
+-/* The clz.{w/d} instructions have the natural values at 0.  */
++/* The clz.{w/d}, ctz.{w/d} instructions have the natural values at 0.  */
+ 
+ #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+   ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
++#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
++  ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
+ 
+ /* Standard register usage.  */
+ 
+@@ -1239,8 +1241,3 @@ struct GTY (()) machine_function
+ 
+ #define TARGET_EXPLICIT_RELOCS \
+   (la_opt_explicit_relocs == EXPLICIT_RELOCS_ALWAYS)
+-
+-#define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+-  ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
+-#define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
+-  ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
+-- 
+2.43.0
+
diff --git a/0051-Port-fixes-for-IPA-prefetch-to-GCC-12.patch b/0051-Port-fixes-for-IPA-prefetch-to-GCC-12.patch
new file mode 100644
index 0000000000000000000000000000000000000000..dae19fa2574f55f6d82b4d3088d46691eff58bfb
--- /dev/null
+++ b/0051-Port-fixes-for-IPA-prefetch-to-GCC-12.patch
@@ -0,0 +1,2216 @@
+From 4c262af8e178ac7c81b32be5b159b4d09a5841c9 Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Fri, 8 Mar 2024 07:07:50 +0800
+Subject: [PATCH 1/2] Port fixes for IPA prefetch to GCC 12
+
+---
+ gcc/ipa-devirt.cc                          |    9 +-
+ gcc/ipa-prefetch.cc                        |  174 +-
+ gcc/ipa-sra.cc                             |    7 +
+ gcc/params.opt                             |    4 +-
+ gcc/testsuite/gcc.dg/completion-1.c        |    1 +
+ gcc/testsuite/gcc.dg/ipa/ipa-prefetch-xz.c | 1843 ++++++++++++++++++++
+ 6 files changed, 1974 insertions(+), 64 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/ipa/ipa-prefetch-xz.c
+
+diff --git a/gcc/ipa-devirt.cc b/gcc/ipa-devirt.cc
+index dd3562d56..dd000b401 100644
+--- a/gcc/ipa-devirt.cc
++++ b/gcc/ipa-devirt.cc
+@@ -5029,9 +5029,12 @@ analyze_assign_stmt (gimple *stmt)
+ 	}
+       else
+ 	{
+-	  fprintf (dump_file, "\nUnsupported rhs type %s in assign stmt: ",
+-		   get_tree_code_name (TREE_CODE (rhs)));
+-	  print_gimple_stmt (dump_file, stmt, 0);
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "\nUnsupported rhs type %s in assign stmt: ",
++		       get_tree_code_name (TREE_CODE (rhs)));
++	      print_gimple_stmt (dump_file, stmt, 0);
++	    }
+ 	  gcc_unreachable ();
+ 	}
+     }
+diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc
+index aeea51105..9537e4835 100644
+--- a/gcc/ipa-prefetch.cc
++++ b/gcc/ipa-prefetch.cc
+@@ -167,6 +167,7 @@ analyse_cgraph ()
+ 	}
+ 
+       /* TODO: maybe remove loop info here.  */
++      n->get_body ();
+       push_cfun (DECL_STRUCT_FUNCTION (n->decl));
+       calculate_dominance_info (CDI_DOMINATORS);
+       loop_optimizer_init (LOOPS_NORMAL);
+@@ -942,6 +943,9 @@ compare_memrefs (memref_t* mr, memref_t* mr2)
+       (*mr_candidate_map)[mr] = mr2;
+       return;
+     }
++  /* Probably we shouldn't leave nulls in the map.  */
++  if ((*mr_candidate_map)[mr] == NULL)
++    return;
+   /* TODO: support analysis with incrementation of different fields.  */
+   if ((*mr_candidate_map)[mr]->offset != mr2->offset)
+     {
+@@ -1090,6 +1094,15 @@ analyse_loops ()
+ 	  memref_t *mr = it->first, *mr2 = it->second;
+ 	  if (mr2 == NULL || !(*fmrs_map)[fn]->count (mr))
+ 	    continue;
++	  /* For now optimize only MRs that mem is MEM_REF.
++	     TODO: support other MR types.  */
++	  if (TREE_CODE (mr->mem) != MEM_REF)
++	    {
++	      if (dump_file)
++		fprintf (dump_file, "Skip MR %d: unsupported tree code = %s\n",
++			 mr->mr_id, get_tree_code_name (TREE_CODE (mr->mem)));
++	      continue;
++	    }
+ 	  if (!optimize_mrs_map->count (fn))
+ 	    (*optimize_mrs_map)[fn] = new memref_set;
+ 	  (*optimize_mrs_map)[fn]->insert (mr);
+@@ -1102,7 +1115,7 @@ analyse_loops ()
+ 	       it != (*optimize_mrs_map)[fn]->end (); it++)
+ 	    {
+ 	      memref_t *mr = *it, *mr2 = (*mr_candidate_map)[mr];
+-	      fprintf (dump_file, "MRs %d,%d with incremental offset ",
++	      fprintf (dump_file, "MRs %d, %d with incremental offset ",
+ 		       mr->mr_id, mr2->mr_id);
+ 	      print_generic_expr (dump_file, mr2->offset);
+ 	      fprintf (dump_file, "\n");
+@@ -1435,6 +1448,52 @@ remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data)
+   return NULL_TREE;
+ }
+ 
++/* Copy stmt and remap its operands.  */
++
++static gimple *
++gimple_copy_and_remap (gimple *stmt)
++{
++  gimple *copy = gimple_copy (stmt);
++  gcc_checking_assert (!is_gimple_debug (copy));
++
++  /* Remap all the operands in COPY.  */
++  struct walk_stmt_info wi;
++  memset (&wi, 0, sizeof (wi));
++  wi.info = copy;
++  walk_gimple_op (copy, remap_gimple_op_r, &wi);
++  if (dump_file)
++    {
++      fprintf (dump_file, "Stmt copy after remap:\n");
++      print_gimple_stmt (dump_file, copy, 0);
++    }
++  return copy;
++}
++
++/* Copy and remap stmts listed in MR in reverse order to last_idx, skipping
++   processed ones.  Insert new stmts to the sequence.  */
++
++static gimple *
++gimple_copy_and_remap_memref_stmts (memref_t *mr, gimple_seq &stmts,
++				    int last_idx, stmt_set &processed)
++{
++  gimple *last_stmt = NULL;
++  for (int i = mr->stmts.length () - 1; i >= last_idx ; i--)
++    {
++      if (processed.count (mr->stmts[i]))
++	continue;
++      processed.insert (mr->stmts[i]);
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Copy stmt %d from used MR (%d):\n",
++		   i, mr->mr_id);
++	  print_gimple_stmt (dump_file, mr->stmts[i], 0);
++	}
++      last_stmt = gimple_copy_and_remap (mr->stmts[i]);
++      gimple_seq_add_stmt (&stmts, last_stmt);
++  }
++  return last_stmt;
++}
++
+ static void
+ create_cgraph_edge (cgraph_node *n, gimple *stmt)
+ {
+@@ -1490,6 +1549,13 @@ optimize_function (cgraph_node *n, function *fn)
+ 		 "Skip the case.\n");
+       return 0;
+     }
++  if (!tree_fits_shwi_p (inc_mr->step))
++    {
++      if (dump_file)
++	fprintf (dump_file, "Cannot represent incremental MR's step as "
++		 "integer.  Skip the case.\n");
++      return 0;
++    }
+   if (dump_file && !used_mrs.empty ())
+     print_mrs_ids (used_mrs, "Common list of used mrs:\n");
+ 
+@@ -1539,16 +1605,44 @@ optimize_function (cgraph_node *n, function *fn)
+       return 0;
+     }
+   else if (dump_file)
+-    fprintf (dump_file, "Dominator bb %d for MRs\n", dom_bb->index);
++    {
++      fprintf (dump_file, "Dominator bb %d for MRs:\n", dom_bb->index);
++      gimple_dump_bb (dump_file, dom_bb, 0, dump_flags);
++      fprintf (dump_file, "\n");
++    }
+ 
+-  split_block (dom_bb, (gimple *) NULL);
++  /* Try to find comp_mr's stmt in the dominator bb.  */
++  gimple *last_used = NULL;
++  for (gimple_stmt_iterator si = gsi_last_bb (dom_bb); !gsi_end_p (si);
++       gsi_prev (&si))
++    if (comp_mr->stmts[0] == gsi_stmt (si))
++      {
++	last_used = gsi_stmt (si);
++	if (dump_file)
++	  {
++	    fprintf (dump_file, "Last used stmt in dominator bb:\n");
++	    print_gimple_stmt (dump_file, last_used, 0);
++	  }
++	break;
++      }
++
++  split_block (dom_bb, last_used);
+   gimple_stmt_iterator gsi = gsi_last_bb (dom_bb);
+ 
+   /* Create new inc var.  Insert new_var = old_var + step * factor.  */
+   decl_map = new tree_map;
+   gcc_assert (comp_mr->stmts[0] && gimple_assign_single_p (comp_mr->stmts[0]));
+   tree inc_var = gimple_assign_lhs (comp_mr->stmts[0]);
++  /* If old_var definition dominates the current use, just use it, otherwise
++     evaluate it just before new inc var evaluation.  */
+   gimple_seq stmts = NULL;
++  stmt_set processed_stmts;
++  if (!dominated_by_p (CDI_DOMINATORS, dom_bb, gimple_bb (comp_mr->stmts[0])))
++    {
++      gimple *tmp = gimple_copy_and_remap_memref_stmts (comp_mr, stmts, 0,
++							processed_stmts);
++      inc_var = gimple_assign_lhs (tmp);
++    }
+   tree var_type = TREE_TYPE (inc_var);
+   enum tree_code inc_code;
+   if (TREE_CODE (var_type) == POINTER_TYPE)
+@@ -1556,52 +1650,28 @@ optimize_function (cgraph_node *n, function *fn)
+   else
+     inc_code = PLUS_EXPR;
+   tree step = inc_mr->step;
+-  unsigned dist_val = tree_to_uhwi (step) * param_ipa_prefetch_distance_factor;
++  HOST_WIDE_INT dist_val = tree_to_shwi (step)
++			   * param_ipa_prefetch_distance_factor;
+   tree dist = build_int_cst (TREE_TYPE (step), dist_val);
+   tree new_inc_var = gimple_build (&stmts, inc_code, var_type, inc_var, dist);
+   (*decl_map)[inc_var] = new_inc_var;
++  if (dump_file)
++    {
++      fprintf (dump_file, "New distance value: %ld, new inc var: ", dist_val);
++      print_generic_expr (dump_file, new_inc_var);
++      fprintf (dump_file, "\n");
++    }
+ 
+   /* Create other new vars.  Insert new stmts.  */
+-  struct walk_stmt_info wi;
+-  stmt_set processed_stmts;
+-  memref_tree_map mr_new_trees;
+   for (memref_set::const_iterator it = used_mrs.begin ();
+        it != used_mrs.end (); it++)
+     {
+       memref_t *mr = *it;
+-      gimple *last_stmt = NULL;
+       if (mr == comp_mr)
+ 	continue;
+-      for (int i = mr->stmts.length () - 1; i >= 0 ; i--)
+-	{
+-	  if (processed_stmts.count (mr->stmts[i]))
+-	    continue;
+-	  processed_stmts.insert (mr->stmts[i]);
+-	  if (dump_file)
+-	    {
+-	      fprintf (dump_file, "Copy stmt %d from used MR (%d):\n",
+-		       i, mr->mr_id);
+-	      print_gimple_stmt (dump_file, mr->stmts[i], 0);
+-	    }
+-	  /* Create a new copy of STMT and duplicate STMT's virtual
+-	     operands.  */
+-	  gimple *copy = gimple_copy (mr->stmts[i]);
+-	  gcc_checking_assert (!is_gimple_debug (copy));
+-
+-	  /* Remap all the operands in COPY.  */
+-	  memset (&wi, 0, sizeof (wi));
+-	  last_stmt = copy;
+-	  wi.info = copy;
+-	  walk_gimple_op (copy, remap_gimple_op_r, &wi);
+-	  if (dump_file)
+-	    {
+-	      fprintf (dump_file, "Stmt %d after remap:\n",i);
+-	      print_gimple_stmt (dump_file, copy, 0);
+-	    }
+-	  gimple_seq_add_stmt (&stmts, copy);
+-	}
++      gimple *last_stmt = gimple_copy_and_remap_memref_stmts (mr, stmts, 0,
++							      processed_stmts);
+       gcc_assert (last_stmt);
+-      mr_new_trees[mr] = gimple_assign_lhs (last_stmt);
+       if (dump_file)
+ 	{
+ 	  fprintf (dump_file, "MR (%d) new mem: ", mr->mr_id);
+@@ -1637,29 +1707,9 @@ optimize_function (cgraph_node *n, function *fn)
+       memref_t *mr = vmrs[j];
+       /* Don't need to copy the last stmt, since we insert prefetch insn
+ 	 instead of it.  */
+-      for (int i = mr->stmts.length () - 1; i >= 1 ; i--)
+-	{
+-	  if (processed_stmts.count (mr->stmts[i]))
+-	    continue;
+-	  processed_stmts.insert (mr->stmts[i]);
+-
+-	  gimple *copy = gimple_copy (mr->stmts[i]);
+-	  gcc_checking_assert (!is_gimple_debug (copy));
+-
+-	  /* Remap all the operands in COPY.  */
+-	  memset (&wi, 0, sizeof (wi));
+-	  wi.info = copy;
+-	  walk_gimple_op (copy, remap_gimple_op_r, &wi);
+-	  if (dump_file)
+-	    {
+-	      fprintf (dump_file, "Stmt %d after remap:\n",i);
+-	      print_gimple_stmt (dump_file, copy, 0);
+-	    }
+-	  gimple_seq_add_stmt (&stmts, copy);
+-	}
++      gimple_copy_and_remap_memref_stmts (mr, stmts, 1, processed_stmts);
+       gimple *last_stmt = mr->stmts[0];
+       gcc_assert (last_stmt);
+-      mr_new_trees[mr] = gimple_assign_lhs (last_stmt);
+       tree write_p = mr->is_store ? integer_one_node : integer_zero_node;
+       tree addr = get_mem_ref_address_ssa_name (mr->mem, NULL_TREE);
+       if (decl_map->count (addr))
+@@ -1668,6 +1718,11 @@ optimize_function (cgraph_node *n, function *fn)
+ 				     3, addr, write_p, local);
+       pcalls.safe_push (last_stmt);
+       gimple_seq_add_stmt (&stmts, last_stmt);
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Insert %d prefetch stmt:\n", j);
++	  print_gimple_stmt (dump_file, last_stmt, 0);
++	}
+     }
+ 
+   gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
+@@ -1677,6 +1732,7 @@ optimize_function (cgraph_node *n, function *fn)
+   for (unsigned i = 0; i < pcalls.length (); i++)
+     create_cgraph_edge (n, pcalls[i]);
+   ipa_update_overall_fn_summary (n);
++  renumber_gimple_stmt_uids (DECL_STRUCT_FUNCTION (n->decl));
+ 
+   return 1;
+ }
+@@ -1806,7 +1862,7 @@ pass_ipa_prefetch::gate (function *)
+ 	  /* Don't bother doing anything if the program has errors.  */
+ 	  && !seen_error ()
+ 	  && flag_lto_partition == LTO_PARTITION_ONE
+-	  /* Only enable struct optimizations in lto or whole_program.  */
++	  /* Only enable prefetch optimizations in lto or whole_program.  */
+ 	  && (in_lto_p || flag_whole_program));
+ }
+ 
+diff --git a/gcc/ipa-sra.cc b/gcc/ipa-sra.cc
+index 5355cf2f4..471b3927c 100644
+--- a/gcc/ipa-sra.cc
++++ b/gcc/ipa-sra.cc
+@@ -3393,6 +3393,13 @@ param_splitting_across_edge (cgraph_edge *cs)
+   gcc_checking_assert (from_ifs && from_ifs->m_parameters);
+ 
+   isra_call_summary *csum = call_sums->get (cs);
++  /* TODO: implement better support for call edges inserted after summary
++     collection but before sra wpa invocation.  */
++  if (!csum)
++    {
++      csum = call_sums->get_create (cs);
++      csum->m_return_ignored = true;
++    }
+   gcc_checking_assert (csum);
+   unsigned args_count = csum->m_arg_flow.length ();
+   isra_func_summary *to_ifs = func_sums->get (callee);
+diff --git a/gcc/params.opt b/gcc/params.opt
+index 5c07e3986..50385dfd7 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -314,8 +314,8 @@ Common Joined UInteger Var(param_ipa_prefetch_distance_factor) Init(4) Param Opt
+ The factor represents the number of inductive variable incrementations to evaluate an indirect memory address for IPA prefetch.
+ 
+ -param=ipa-prefetch-locality=
+-Common Joined UInteger Var(param_ipa_prefetch_locality) Init(3) Param Optimization
+-The flag represents temporal locality values in the following way: 0:pstl1strm, 1:pstl3keep, 2:pstl2keep, 3:pstl1keep.
++Common Joined UInteger Var(param_ipa_prefetch_locality) Init(3) IntegerRange(0, 3) Param Optimization
++The flag represents temporal locality value between 0 and 3, the higher value means the higher temporal locality in the data.
+ 
+ -param=ira-loop-reserved-regs=
+ Common Joined UInteger Var(param_ira_loop_reserved_regs) Init(2) Param Optimization
+diff --git a/gcc/testsuite/gcc.dg/completion-1.c b/gcc/testsuite/gcc.dg/completion-1.c
+index 64da64f1c..df2319c76 100644
+--- a/gcc/testsuite/gcc.dg/completion-1.c
++++ b/gcc/testsuite/gcc.dg/completion-1.c
+@@ -2,6 +2,7 @@
+ /* { dg-options "--completion=-fipa-ic" } */
+ 
+ /* { dg-begin-multiline-output "" }
++-fipa-ic
+ -fipa-icf
+ -fipa-icf-functions
+ -fipa-icf-variables
+diff --git a/gcc/testsuite/gcc.dg/ipa/ipa-prefetch-xz.c b/gcc/testsuite/gcc.dg/ipa/ipa-prefetch-xz.c
+new file mode 100644
+index 000000000..bd4fb2bdc
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/ipa/ipa-prefetch-xz.c
+@@ -0,0 +1,1843 @@
++/* { dg-do link } */
++/* { dg-options "-O3 -fipa-ic -fipa-prefetch -flto -flto-partition=one -fdump-ipa-ipa_prefetch -fdump-ipa-icp" } */
++/* { dg-require-effective-target lto } */
++
++/* Based on opensource xz code.  */
++
++#include 
++#include 
++
++typedef long int ptrdiff_t;
++typedef long unsigned int size_t;
++typedef unsigned int wchar_t;
++
++typedef unsigned char __u_char;
++typedef unsigned short int __u_short;
++typedef unsigned int __u_int;
++typedef unsigned long int __u_long;
++
++typedef signed char __int8_t;
++typedef unsigned char __uint8_t;
++typedef signed short int __int16_t;
++typedef unsigned short int __uint16_t;
++typedef signed int __int32_t;
++typedef unsigned int __uint32_t;
++
++typedef signed long int __int64_t;
++typedef unsigned long int __uint64_t;
++
++typedef __int8_t __int_least8_t;
++typedef __uint8_t __uint_least8_t;
++typedef __int16_t __int_least16_t;
++typedef __uint16_t __uint_least16_t;
++typedef __int32_t __int_least32_t;
++typedef __uint32_t __uint_least32_t;
++typedef __int64_t __int_least64_t;
++typedef __uint64_t __uint_least64_t;
++
++typedef __int8_t int8_t;
++typedef __int16_t int16_t;
++typedef __int32_t int32_t;
++typedef __int64_t int64_t;
++
++typedef __uint8_t uint8_t;
++typedef __uint16_t uint16_t;
++typedef __uint32_t uint32_t;
++typedef __uint64_t uint64_t;
++
++typedef long int intptr_t;
++typedef unsigned long int uintptr_t;
++
++static inline uint16_t
++read16ne(const uint8_t *buf)
++{
++ uint16_t num;
++ memcpy(&num, buf, sizeof(num));
++ return num;
++}
++
++static inline uint32_t
++read32ne(const uint8_t *buf)
++{
++ uint32_t num;
++ memcpy(&num, buf, sizeof(num));
++ return num;
++}
++
++static inline uint16_t
++aligned_read16ne(const uint8_t *buf)
++{
++ uint16_t num;
++ memcpy(&num, __builtin_assume_aligned(buf, sizeof(num)), sizeof(num));
++ return num;
++}
++
++
++static inline uint32_t
++aligned_read32ne(const uint8_t *buf)
++{
++ uint32_t num;
++ memcpy(&num, __builtin_assume_aligned(buf, sizeof(num)), sizeof(num));
++ return num;
++}
++
++static inline uint64_t
++aligned_read64ne(const uint8_t *buf)
++{
++ uint64_t num;
++ memcpy(&num, __builtin_assume_aligned(buf, sizeof(num)), sizeof(num));
++ return num;
++}
++
++typedef unsigned char lzma_bool;
++
++typedef enum {
++ LZMA_RESERVED_ENUM = 0
++} lzma_reserved_enum;
++
++typedef enum {
++ LZMA_OK = 0,
++ LZMA_STREAM_END = 1,
++ LZMA_NO_CHECK = 2,
++ LZMA_UNSUPPORTED_CHECK = 3,
++ LZMA_GET_CHECK = 4,
++ LZMA_MEM_ERROR = 5,
++ LZMA_MEMLIMIT_ERROR = 6,
++ LZMA_FORMAT_ERROR = 7,
++ LZMA_OPTIONS_ERROR = 8,
++ LZMA_DATA_ERROR = 9,
++ LZMA_BUF_ERROR = 10,
++ LZMA_PROG_ERROR = 11,
++} lzma_ret;
++
++typedef enum {
++ LZMA_RUN = 0,
++ LZMA_SYNC_FLUSH = 1,
++ LZMA_FULL_FLUSH = 2,
++ LZMA_FULL_BARRIER = 4,
++ LZMA_FINISH = 3
++} lzma_action;
++
++typedef struct {
++ void *( *alloc)(void *opaque, size_t nmemb, size_t size);
++
++ void ( *free)(void *opaque, void *ptr);
++
++ void *opaque;
++} lzma_allocator;
++
++typedef uint64_t lzma_vli;
++
++typedef enum {
++ LZMA_CHECK_NONE = 0,
++ LZMA_CHECK_CRC32 = 1,
++ LZMA_CHECK_CRC64 = 4,
++ LZMA_CHECK_SHA256 = 10
++} lzma_check;
++
++typedef struct {
++ lzma_vli id;
++ void *options;
++} lzma_filter;
++
++typedef enum {
++ LZMA_MF_HC3 = 0x03,
++ LZMA_MF_HC4 = 0x04,
++ LZMA_MF_BT2 = 0x12,
++ LZMA_MF_BT3 = 0x13,
++ LZMA_MF_BT4 = 0x14
++} lzma_match_finder;
++
++typedef struct lzma_next_coder_s lzma_next_coder;
++
++typedef struct lzma_filter_info_s lzma_filter_info;
++
++typedef lzma_ret (*lzma_init_function)(
++  lzma_next_coder *next, const lzma_allocator *allocator,
++  const lzma_filter_info *filters);
++
++typedef lzma_ret (*lzma_code_function)(
++  void *coder, const lzma_allocator *allocator,
++  const uint8_t *restrict in, size_t *restrict in_pos,
++  size_t in_size, uint8_t *restrict out,
++  size_t *restrict out_pos, size_t out_size,
++  lzma_action action);
++
++typedef void (*lzma_end_function)(
++  void *coder, const lzma_allocator *allocator);
++
++struct lzma_filter_info_s {
++ lzma_vli id;
++ lzma_init_function init;
++ void *options;
++};
++
++struct lzma_next_coder_s {
++ void *coder;
++ lzma_vli id;
++ uintptr_t init;
++
++ lzma_code_function code;
++ lzma_end_function end;
++ void (*get_progress)(void *coder,
++   uint64_t *progress_in, uint64_t *progress_out);
++
++ lzma_check (*get_check)(const void *coder);
++ lzma_ret (*memconfig)(void *coder, uint64_t *memusage,
++   uint64_t *old_memlimit, uint64_t new_memlimit);
++ lzma_ret (*update)(void *coder, const lzma_allocator *allocator,
++   const lzma_filter *filters, const lzma_filter *reversed_filters);
++};
++
++typedef struct {
++ uint32_t len;
++ uint32_t dist;
++} lzma_match;
++
++typedef struct lzma_mf_s lzma_mf;
++struct lzma_mf_s {
++ uint8_t *buffer;
++ uint32_t size;
++ uint32_t keep_size_before;
++ uint32_t keep_size_after;
++ uint32_t offset;
++ uint32_t read_pos;
++ uint32_t read_ahead;
++ uint32_t read_limit;
++ uint32_t write_pos;
++ uint32_t pending;
++ uint32_t (*find)(lzma_mf *mf, lzma_match *matches);
++ void (*skip)(lzma_mf *mf, uint32_t num);
++ uint32_t *hash;
++ uint32_t *son;
++ uint32_t cyclic_pos;
++ uint32_t cyclic_size;
++ uint32_t hash_mask;
++ uint32_t depth;
++ uint32_t nice_len;
++ uint32_t match_len_max;
++ lzma_action action;
++ uint32_t hash_count;
++ uint32_t sons_count;
++};
++
++typedef struct {
++ size_t before_size;
++ size_t dict_size;
++ size_t after_size;
++ size_t match_len_max;
++ size_t nice_len;
++ lzma_match_finder match_finder;
++ uint32_t depth;
++ const uint8_t *preset_dict;
++ uint32_t preset_dict_size;
++} lzma_lz_options;
++
++typedef struct {
++ void *coder;
++ lzma_ret (*code)(void *coder,
++   lzma_mf *restrict mf, uint8_t *restrict out,
++   size_t *restrict out_pos, size_t out_size);
++ void (*end)(void *coder, const lzma_allocator *allocator);
++ lzma_ret (*options_update)(void *coder, const lzma_filter *filter);
++} lzma_lz_encoder;
++
++static inline const uint8_t *
++mf_ptr(const lzma_mf *mf)
++{
++ return mf->buffer + mf->read_pos;
++}
++
++static inline uint32_t
++mf_avail(const lzma_mf *mf)
++{
++ return mf->write_pos - mf->read_pos;
++}
++
++typedef struct {
++ uint32_t state[8];
++ uint64_t size;
++} lzma_sha256_state;
++
++typedef struct {
++ union {
++  uint8_t u8[64];
++  uint32_t u32[16];
++  uint64_t u64[8];
++ } buffer;
++ union {
++  uint32_t crc32;
++  uint64_t crc64;
++  lzma_sha256_state sha256;
++ } state;
++} lzma_check_state;
++
++// The table is constantly initialized in the original code.
++// Skip it in the test.
++const uint32_t lzma_crc32_table[8][256];
++
++static inline uint32_t __attribute__((__always_inline__))
++lzma_memcmplen(const uint8_t *buf1, const uint8_t *buf2,
++  uint32_t len, uint32_t limit)
++{
++ while (len < limit) {
++  uint32_t x = read32ne(buf1 + len) - read32ne(buf2 + len);
++  if (x != 0) {
++   if ((x & 0xFFFF) == 0) {
++    len += 2;
++    x >>= 16;
++   }
++
++   if ((x & 0xFF) == 0)
++    ++len;
++
++   return ((len) < (limit) ? (len) : (limit));
++  }
++
++  len += 4;
++ }
++
++ return limit;
++}
++
++extern uint32_t
++lzma_mf_find(lzma_mf *mf, uint32_t *count_ptr, lzma_match *matches)
++{
++ const uint32_t count = mf->find(mf, matches);
++ uint32_t len_best = 0;
++
++ if (count > 0) {
++  len_best = matches[count - 1].len;
++  if (len_best == mf->nice_len) {
++   uint32_t limit = mf_avail(mf) + 1;
++   if (limit > mf->match_len_max)
++    limit = mf->match_len_max;
++   const uint8_t *p1 = mf_ptr(mf) - 1;
++   const uint8_t *p2 = p1 - matches[count - 1].dist - 1;
++   len_best = lzma_memcmplen(p1, p2, len_best, limit);
++  }
++ }
++
++ *count_ptr = count;
++ ++mf->read_ahead;
++
++ return len_best;
++}
++
++static void
++normalize(lzma_mf *mf)
++{
++ const uint32_t subvalue = ((4294967295U) - mf->cyclic_size);
++
++ for (uint32_t i = 0; i < mf->hash_count; ++i) {
++  if (mf->hash[i] <= subvalue)
++   mf->hash[i] = 0;
++  else
++   mf->hash[i] -= subvalue;
++ }
++
++ for (uint32_t i = 0; i < mf->sons_count; ++i) {
++  if (mf->son[i] <= subvalue)
++   mf->son[i] = 0;
++  else
++   mf->son[i] -= subvalue;
++ }
++
++ mf->offset -= subvalue;
++ return;
++}
++
++static void
++move_pos(lzma_mf *mf)
++{
++ if (++mf->cyclic_pos == mf->cyclic_size)
++  mf->cyclic_pos = 0;
++ ++mf->read_pos;
++ if (__builtin_expect(mf->read_pos + mf->offset == (4294967295U), 0 ))
++  normalize(mf);
++}
++
++static void
++move_pending(lzma_mf *mf)
++{
++ ++mf->read_pos;
++ ++mf->pending;
++}
++
++static lzma_match *
++hc_find_func(
++  const uint32_t len_limit,
++  const uint32_t pos,
++  const uint8_t *const cur,
++  uint32_t cur_match,
++  uint32_t depth,
++  uint32_t *const son,
++  const uint32_t cyclic_pos,
++  const uint32_t cyclic_size,
++  lzma_match *matches,
++  uint32_t len_best)
++{
++ son[cyclic_pos] = cur_match;
++
++ while (1) {
++  const uint32_t delta = pos - cur_match;
++  if (depth-- == 0 || delta >= cyclic_size)
++   return matches;
++
++  const uint8_t *const pb = cur - delta;
++  cur_match = son[cyclic_pos - delta
++    + (delta > cyclic_pos ? cyclic_size : 0)];
++
++  if (pb[len_best] == cur[len_best] && pb[0] == cur[0]) {
++   uint32_t len = lzma_memcmplen(pb, cur, 1, len_limit);
++
++   if (len_best < len) {
++    len_best = len;
++    matches->len = len;
++    matches->dist = delta - 1;
++    ++matches;
++
++    if (len == len_limit)
++     return matches;
++   }
++  }
++ }
++}
++
++extern uint32_t
++lzma_mf_hc3_find(lzma_mf *mf, lzma_match *matches)
++{
++ uint32_t len_limit = mf_avail(mf);
++ if (mf->nice_len <= len_limit) {
++  len_limit = mf->nice_len;
++ } else if (len_limit < (3)) {
++  move_pending(mf);
++  return 0;
++ }
++ const uint8_t *cur = mf_ptr(mf);
++ const uint32_t pos = mf->read_pos + mf->offset;
++ uint32_t matches_count = 0;
++
++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1];
++ const uint32_t hash_2_value = temp & ((1U << 10) - 1);
++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask;
++
++ const uint32_t delta2 = pos - mf->hash[hash_2_value];
++ const uint32_t cur_match = mf->hash[((1U << 10)) + hash_value];
++
++ mf->hash[hash_2_value] = pos;
++ mf->hash[((1U << 10)) + hash_value] = pos;
++
++ uint32_t len_best = 2;
++
++ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
++  len_best = lzma_memcmplen(cur - delta2, cur, len_best, len_limit);
++
++  matches[0].len = len_best;
++  matches[0].dist = delta2 - 1;
++  matches_count = 1;
++
++  if (len_best == len_limit) {
++   mf->son[mf->cyclic_pos] = cur_match;
++   move_pos(mf);
++   return 1;
++  }
++ }
++
++ matches_count = hc_find_func(len_limit, pos, cur, cur_match, mf->depth,
++			      mf->son, mf->cyclic_pos, mf->cyclic_size,
++			      matches + matches_count, len_best) - matches;
++ move_pos(mf);
++ return matches_count;
++}
++
++extern void
++lzma_mf_hc3_skip(lzma_mf *mf, uint32_t amount)
++{
++ do {
++  if (mf_avail(mf) < 3) {
++   move_pending(mf);
++   continue;
++  }
++
++  const uint8_t *cur = mf_ptr(mf);
++  const uint32_t pos = mf->read_pos + mf->offset;
++
++  const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1];
++  const uint32_t hash_2_value = temp & ((1U << 10) - 1);
++  const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask;
++
++  const uint32_t cur_match
++    = mf->hash[((1U << 10)) + hash_value];
++
++  mf->hash[hash_2_value] = pos;
++  mf->hash[((1U << 10)) + hash_value] = pos;
++
++  do { mf->son[mf->cyclic_pos] = cur_match; move_pos(mf); } while (0);
++
++ } while (--amount != 0);
++}
++
++extern uint32_t
++lzma_mf_hc4_find(lzma_mf *mf, lzma_match *matches)
++{
++ uint32_t len_limit = mf_avail(mf);
++ if (mf->nice_len <= len_limit) {
++  len_limit = mf->nice_len;
++ } else if (len_limit < (4)) {
++  move_pending(mf);
++  return 0;
++ }
++ const uint8_t *cur = mf_ptr(mf);
++ const uint32_t pos = mf->read_pos + mf->offset;
++ uint32_t matches_count = 0;
++
++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1];
++ const uint32_t hash_2_value = temp & ((1U << 10) - 1);
++ const uint32_t hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8))
++				& ((1U << 16) - 1);
++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)
++				      ^ (lzma_crc32_table[0][cur[3]] << 5))
++			      & mf->hash_mask;
++ uint32_t delta2 = pos - mf->hash[hash_2_value];
++ const uint32_t delta3
++   = pos - mf->hash[((1U << 10)) + hash_3_value];
++ const uint32_t cur_match = mf->hash[((1U << 10) + (1U << 16)) + hash_value];
++
++ mf->hash[hash_2_value ] = pos;
++ mf->hash[((1U << 10)) + hash_3_value] = pos;
++ mf->hash[((1U << 10) + (1U << 16)) + hash_value] = pos;
++
++ uint32_t len_best = 1;
++
++ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
++  len_best = 2;
++  matches[0].len = 2;
++  matches[0].dist = delta2 - 1;
++  matches_count = 1;
++ }
++
++ if (delta2 != delta3 && delta3 < mf->cyclic_size
++   && *(cur - delta3) == *cur) {
++  len_best = 3;
++  matches[matches_count++].dist = delta3 - 1;
++  delta2 = delta3;
++ }
++
++ if (matches_count != 0) {
++  len_best = lzma_memcmplen(cur - delta2, cur,
++    len_best, len_limit);
++
++  matches[matches_count - 1].len = len_best;
++
++  if (len_best == len_limit) {
++   mf->son[mf->cyclic_pos] = cur_match; move_pos(mf);
++   return matches_count;
++  }
++ }
++
++ if (len_best < 3)
++  len_best = 3;
++
++ matches_count = hc_find_func(len_limit, pos, cur, cur_match, mf->depth,
++			      mf->son, mf->cyclic_pos, mf->cyclic_size,
++			      matches + matches_count, len_best) - matches;
++ move_pos(mf);
++ return matches_count;
++}
++
++extern void
++lzma_mf_hc4_skip(lzma_mf *mf, uint32_t amount)
++{
++ do {
++  if (mf_avail(mf) < 4) {
++   move_pending(mf);
++   continue;
++  }
++
++  const uint8_t *cur = mf_ptr(mf);
++  const uint32_t pos = mf->read_pos + mf->offset;
++
++  const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1];
++  const uint32_t hash_2_value = temp & ((1U << 10) - 1);
++  const uint32_t hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & ((1U << 16) - 1);
++  const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)
++				       ^ (lzma_crc32_table[0][cur[3]] << 5))
++			       & mf->hash_mask;
++
++  const uint32_t cur_match
++    = mf->hash[((1U << 10) + (1U << 16)) + hash_value];
++
++  mf->hash[hash_2_value] = pos;
++  mf->hash[((1U << 10)) + hash_3_value] = pos;
++  mf->hash[((1U << 10) + (1U << 16)) + hash_value] = pos;
++
++  mf->son[mf->cyclic_pos] = cur_match;
++  move_pos(mf);
++ } while (--amount != 0);
++}
++
++static lzma_match *
++bt_find_func(
++  const uint32_t len_limit,
++  const uint32_t pos,
++  const uint8_t *const cur,
++  uint32_t cur_match,
++  uint32_t depth,
++  uint32_t *const son,
++  const uint32_t cyclic_pos,
++  const uint32_t cyclic_size,
++  lzma_match *matches,
++  uint32_t len_best)
++{
++ uint32_t *ptr0 = son + (cyclic_pos << 1) + 1;
++ uint32_t *ptr1 = son + (cyclic_pos << 1);
++
++ uint32_t len0 = 0;
++ uint32_t len1 = 0;
++
++ while (1) {
++  const uint32_t delta = pos - cur_match;
++  if (depth-- == 0 || delta >= cyclic_size) {
++   *ptr0 = 0;
++   *ptr1 = 0;
++   return matches;
++  }
++
++  uint32_t *const pair = son + ((cyclic_pos - delta
++    + (delta > cyclic_pos ? cyclic_size : 0))
++    << 1);
++
++  const uint8_t *const pb = cur - delta;
++  uint32_t len = ((len0) < (len1) ? (len0) : (len1));
++
++  if (pb[len] == cur[len]) {
++   len = lzma_memcmplen(pb, cur, len + 1, len_limit);
++
++   if (len_best < len) {
++    len_best = len;
++    matches->len = len;
++    matches->dist = delta - 1;
++    ++matches;
++
++    if (len == len_limit) {
++     *ptr1 = pair[0];
++     *ptr0 = pair[1];
++     return matches;
++    }
++   }
++  }
++
++  if (pb[len] < cur[len]) {
++   *ptr1 = cur_match;
++   ptr1 = pair + 1;
++   cur_match = *ptr1;
++   len1 = len;
++  } else {
++   *ptr0 = cur_match;
++   ptr0 = pair;
++   cur_match = *ptr0;
++   len0 = len;
++  }
++ }
++}
++
++
++static void
++bt_skip_func(
++  const uint32_t len_limit,
++  const uint32_t pos,
++  const uint8_t *const cur,
++  uint32_t cur_match,
++  uint32_t depth,
++  uint32_t *const son,
++  const uint32_t cyclic_pos,
++  const uint32_t cyclic_size)
++{
++ uint32_t *ptr0 = son + (cyclic_pos << 1) + 1;
++ uint32_t *ptr1 = son + (cyclic_pos << 1);
++
++ uint32_t len0 = 0;
++ uint32_t len1 = 0;
++
++ while (1) {
++  const uint32_t delta = pos - cur_match;
++  if (depth-- == 0 || delta >= cyclic_size) {
++   *ptr0 = 0;
++   *ptr1 = 0;
++   return;
++  }
++
++  uint32_t *pair = son + ((cyclic_pos - delta
++    + (delta > cyclic_pos ? cyclic_size : 0))
++    << 1);
++  const uint8_t *pb = cur - delta;
++  uint32_t len = ((len0) < (len1) ? (len0) : (len1));
++
++  if (pb[len] == cur[len]) {
++   len = lzma_memcmplen(pb, cur, len + 1, len_limit);
++
++   if (len == len_limit) {
++    *ptr1 = pair[0];
++    *ptr0 = pair[1];
++    return;
++   }
++  }
++
++  if (pb[len] < cur[len]) {
++   *ptr1 = cur_match;
++   ptr1 = pair + 1;
++   cur_match = *ptr1;
++   len1 = len;
++  } else {
++   *ptr0 = cur_match;
++   ptr0 = pair;
++   cur_match = *ptr0;
++   len0 = len;
++  }
++ }
++}
++
++extern uint32_t
++lzma_mf_bt2_find(lzma_mf *mf, lzma_match *matches)
++{
++ uint32_t len_limit = mf_avail(mf);
++ if (mf->nice_len <= len_limit) {
++  len_limit = mf->nice_len;
++ } else if (len_limit < (2) || (mf->action == LZMA_SYNC_FLUSH)) {
++  move_pending(mf);
++  return 0;
++ }
++ const uint8_t *cur = mf_ptr(mf);
++ const uint32_t pos = mf->read_pos + mf->offset;
++ uint32_t matches_count = 0;
++ const uint32_t hash_value = read16ne(cur);
++ const uint32_t cur_match = mf->hash[hash_value];
++ mf->hash[hash_value] = pos;
++
++ matches_count = bt_find_func(len_limit, pos, cur, cur_match, mf->depth,
++                              mf->son, mf->cyclic_pos, mf->cyclic_size,
++                              matches + matches_count, 1) - matches;
++ move_pos(mf);
++ return matches_count;
++}
++
++extern void
++lzma_mf_bt2_skip(lzma_mf *mf, uint32_t amount)
++{
++ do {
++  uint32_t len_limit = mf_avail(mf);
++  if (mf->nice_len <= len_limit) {
++   len_limit = mf->nice_len;
++  } else if (len_limit < (2) || (mf->action == LZMA_SYNC_FLUSH)) { 
++   move_pending(mf);
++   continue;
++  }
++  const uint8_t *cur = mf_ptr(mf);
++  const uint32_t pos = mf->read_pos + mf->offset;
++
++  const uint32_t hash_value = read16ne(cur);
++  const uint32_t cur_match = mf->hash[hash_value];
++  mf->hash[hash_value] = pos;
++
++  bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, mf->son,
++	       mf->cyclic_pos, mf->cyclic_size);
++  move_pos(mf);
++ } while (--amount != 0);
++}
++
++extern uint32_t
++lzma_mf_bt3_find(lzma_mf *mf, lzma_match *matches)
++{
++ uint32_t len_limit = mf_avail(mf);
++ if (mf->nice_len <= len_limit) {
++  len_limit = mf->nice_len;
++ } else if (len_limit < (3) || (1 && mf->action == LZMA_SYNC_FLUSH)) { 
++  move_pending(mf);
++  return 0;
++ }
++ const uint8_t *cur = mf_ptr(mf);
++ const uint32_t pos = mf->read_pos + mf->offset;
++ uint32_t matches_count = 0;
++
++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1];
++ const uint32_t hash_2_value = temp & ((1U << 10) - 1);
++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask;
++
++ const uint32_t delta2 = pos - mf->hash[hash_2_value];
++ const uint32_t cur_match = mf->hash[((1U << 10)) + hash_value];
++
++ mf->hash[hash_2_value] = pos;
++ mf->hash[((1U << 10)) + hash_value] = pos;
++
++ uint32_t len_best = 2;
++
++ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
++  len_best = lzma_memcmplen(
++    cur, cur - delta2, len_best, len_limit);
++
++  matches[0].len = len_best;
++  matches[0].dist = delta2 - 1;
++  matches_count = 1;
++
++  if (len_best == len_limit) {
++   bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, mf->son,
++		mf->cyclic_pos, mf->cyclic_size);
++   move_pos(mf);
++   return 1;
++  }
++ }
++
++ matches_count = bt_find_func(len_limit, pos, cur, cur_match, mf->depth,
++			      mf->son, mf->cyclic_pos, mf->cyclic_size,
++			      matches + matches_count, len_best) - matches;
++ move_pos(mf);
++ return matches_count;
++}
++
++
++extern void
++lzma_mf_bt3_skip(lzma_mf *mf, uint32_t amount)
++{
++ do {
++  uint32_t len_limit = mf_avail(mf);
++  if (mf->nice_len <= len_limit) {
++    len_limit = mf->nice_len; }
++  else if (len_limit < (3) || (1 && mf->action == LZMA_SYNC_FLUSH)) { 
++    move_pending(mf);
++    continue;
++  }
++  const uint8_t *cur = mf_ptr(mf);
++  const uint32_t pos = mf->read_pos + mf->offset;
++
++  const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1];
++  const uint32_t hash_2_value = temp & ((1U << 10) - 1);
++  const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & mf->hash_mask;
++
++  const uint32_t cur_match = mf->hash[((1U << 10)) + hash_value];
++
++  mf->hash[hash_2_value] = pos;
++  mf->hash[((1U << 10)) + hash_value] = pos;
++
++  bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, mf->son,
++	       mf->cyclic_pos, mf->cyclic_size); 
++  move_pos(mf);
++ } while (--amount != 0);
++}
++
++extern uint32_t
++lzma_mf_bt4_find(lzma_mf *mf, lzma_match *matches)
++{
++ uint32_t len_limit = mf->write_pos - mf->read_pos;
++ if (mf->nice_len <= len_limit) {
++  len_limit = mf->nice_len;
++ } else if (len_limit < (4) || (mf->action == LZMA_SYNC_FLUSH)) {
++  ++mf->read_pos;
++  ++mf->pending;
++  return 0;
++ }
++
++ const uint8_t *cur = mf->buffer + mf->read_pos;
++ const uint32_t pos = mf->read_pos + mf->offset;
++ uint32_t matches_count = 0;
++
++ const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1];
++ const uint32_t hash_2_value = temp & ((1U << 10) - 1);
++ const uint32_t hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8)) & ((1U << 16) - 1);
++ const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)
++				      ^ (lzma_crc32_table[0][cur[3]] << 5))
++			      & mf->hash_mask;
++
++ uint32_t delta2 = pos - mf->hash[hash_2_value];
++ const uint32_t delta3 = pos - mf->hash[((1U << 10)) + hash_3_value];
++ const uint32_t cur_match = mf->hash[((1U << 10) + (1U << 16)) + hash_value];
++
++ mf->hash[hash_2_value] = pos;
++ mf->hash[((1U << 10)) + hash_3_value] = pos;
++ mf->hash[((1U << 10) + (1U << 16)) + hash_value] = pos;
++
++ uint32_t len_best = 1;
++
++ if (delta2 < mf->cyclic_size && *(cur - delta2) == *cur) {
++  len_best = 2;
++  matches[0].len = 2;
++  matches[0].dist = delta2 - 1;
++  matches_count = 1;
++ }
++
++ if (delta2 != delta3 && delta3 < mf->cyclic_size && *(cur - delta3) == *cur) {
++  len_best = 3;
++  matches[matches_count++].dist = delta3 - 1;
++  delta2 = delta3;
++ }
++
++ if (matches_count != 0) {
++  len_best = lzma_memcmplen(cur, cur - delta2, len_best, len_limit);
++
++  matches[matches_count - 1].len = len_best;
++
++  if (len_best == len_limit) {
++    bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, mf->son,
++		 mf->cyclic_pos, mf->cyclic_size);
++    move_pos(mf);
++    return matches_count;
++  }
++ }
++
++ if (len_best < 3)
++  len_best = 3;
++
++ matches_count = bt_find_func(len_limit, pos, cur, cur_match, mf->depth, mf->son,
++                              mf->cyclic_pos, mf->cyclic_size,
++                              matches + matches_count, len_best) - matches;
++ move_pos(mf);
++ return matches_count;
++}
++
++extern void
++lzma_mf_bt4_skip(lzma_mf *mf, uint32_t amount)
++{
++ do {
++  uint32_t len_limit = mf_avail(mf);
++  if (mf->nice_len <= len_limit) {
++   len_limit = mf->nice_len;
++  } else if (len_limit < (4) || (mf->action == LZMA_SYNC_FLUSH)) {
++   move_pending(mf);
++   continue;
++  }
++
++  const uint8_t *cur = mf->buffer + mf->read_pos;
++  const uint32_t pos = mf->read_pos + mf->offset;
++
++  const uint32_t temp = lzma_crc32_table[0][cur[0]] ^ cur[1];
++  const uint32_t hash_2_value = temp & ((1U << 10) - 1);
++  const uint32_t hash_3_value = (temp ^ ((uint32_t)(cur[2]) << 8))
++				& ((1U << 16) - 1);
++  const uint32_t hash_value = (temp ^ ((uint32_t)(cur[2]) << 8)
++				       ^ (lzma_crc32_table[0][cur[3]] << 5))
++			       & mf->hash_mask;
++
++  const uint32_t cur_match = mf->hash[((1U << 10) + (1U << 16)) + hash_value];
++
++  mf->hash[hash_2_value] = pos;
++  mf->hash[((1U << 10)) + hash_3_value] = pos;
++  mf->hash[((1U << 10) + (1U << 16)) + hash_value] = pos;
++
++  bt_skip_func(len_limit, pos, cur, cur_match, mf->depth, mf->son,
++	       mf->cyclic_pos, mf->cyclic_size);
++  move_pos(mf);
++ } while (--amount != 0);
++}
++
++static inline void
++mf_skip(lzma_mf *mf, uint32_t amount)
++{
++ if (amount != 0) {
++  mf->skip(mf, amount);
++  mf->read_ahead += amount;
++ }
++}
++
++typedef struct lzma_lzma1_encoder_s lzma_lzma1_encoder;
++typedef uint16_t probability;
++
++typedef struct {
++ probability choice;
++ probability choice2;
++ probability low[(1 << 4)][(1 << 3)];
++ probability mid[(1 << 4)][(1 << 3)];
++ probability high[(1 << 8)];
++ uint32_t prices[(1 << 4)][((1 << 3) + (1 << 3) + (1 << 8))];
++ uint32_t table_size;
++ uint32_t counters[(1 << 4)];
++} lzma_length_encoder;
++
++typedef struct {
++ uint64_t low;
++ uint64_t cache_size;
++ uint32_t range;
++ uint8_t cache;
++ size_t count;
++ size_t pos;
++
++ enum {
++  RC_BIT_0,
++  RC_BIT_1,
++  RC_DIRECT_0,
++  RC_DIRECT_1,
++  RC_FLUSH,
++ } symbols[58];
++
++ probability *probs[58];
++} lzma_range_encoder;
++
++
++typedef enum {
++ STATE_LIT_LIT,
++ STATE_MATCH_LIT_LIT,
++ STATE_REP_LIT_LIT,
++ STATE_SHORTREP_LIT_LIT,
++ STATE_MATCH_LIT,
++ STATE_REP_LIT,
++ STATE_SHORTREP_LIT,
++ STATE_LIT_MATCH,
++ STATE_LIT_LONGREP,
++ STATE_LIT_SHORTREP,
++ STATE_NONLIT_MATCH,
++ STATE_NONLIT_REP,
++} lzma_lzma_state;
++
++typedef struct {
++ lzma_lzma_state state;
++ _Bool prev_1_is_literal;
++ _Bool prev_2;
++
++ uint32_t pos_prev_2;
++ uint32_t back_prev_2;
++
++ uint32_t price;
++ uint32_t pos_prev;
++ uint32_t back_prev;
++
++ uint32_t backs[4];
++} lzma_optimal;
++
++struct lzma_lzma1_encoder_s {
++ lzma_range_encoder rc;
++ lzma_lzma_state state;
++ uint32_t reps[4];
++ lzma_match matches[(2 + ((1 << 3) + (1 << 3) + (1 << 8)) - 1) + 1];
++ uint32_t matches_count;
++ uint32_t longest_match_length;
++ _Bool fast_mode;
++ _Bool is_initialized;
++ _Bool is_flushed;
++ uint32_t pos_mask;
++ uint32_t literal_context_bits;
++ uint32_t literal_pos_mask;
++
++ probability literal[(1 << 4)][0x300];
++ probability is_match[12][(1 << 4)];
++ probability is_rep[12];
++ probability is_rep0[12];
++ probability is_rep1[12];
++ probability is_rep2[12];
++ probability is_rep0_long[12][(1 << 4)];
++ probability dist_slot[4][(1 << 6)];
++ probability dist_special[(1 << (14 / 2)) - 14];
++ probability dist_align[(1 << 4)];
++
++ lzma_length_encoder match_len_encoder;
++ lzma_length_encoder rep_len_encoder;
++
++ uint32_t dist_slot_prices[4][(1 << 6)];
++ uint32_t dist_prices[4][(1 << (14 / 2))];
++ uint32_t dist_table_size;
++ uint32_t match_price_count;
++
++ uint32_t align_prices[(1 << 4)];
++ uint32_t align_price_count;
++ uint32_t opts_end_index;
++ uint32_t opts_current_index;
++ lzma_optimal opts[(1 << 12)];
++};
++
++extern void
++lzma_lzma_optimum_fast(lzma_lzma1_encoder *restrict coder,
++  lzma_mf *restrict mf,
++  uint32_t *restrict back_res, uint32_t *restrict len_res)
++{
++ const uint32_t nice_len = mf->nice_len;
++
++ uint32_t len_main;
++ uint32_t matches_count;
++ if (mf->read_ahead == 0) {
++  len_main = lzma_mf_find(mf, &matches_count, coder->matches);
++ } else {
++  len_main = coder->longest_match_length;
++  matches_count = coder->matches_count;
++ }
++
++ const uint8_t *buf = mf_ptr(mf) - 1;
++ const uint32_t buf_avail
++   = ((mf_avail(mf) + 1) < ((2 + ((1 << 3) + (1 << 3) + (1 << 8)) - 1))
++      ? (mf_avail(mf) + 1) : ((2 + ((1 << 3) + (1 << 3) + (1 << 8)) - 1)));
++
++ if (buf_avail < 2) {
++  *back_res = (4294967295U);
++  *len_res = 1;
++  return;
++ }
++
++ uint32_t rep_len = 0;
++ uint32_t rep_index = 0;
++
++ for (uint32_t i = 0; i < 4; ++i) {
++  const uint8_t *const buf_back = buf - coder->reps[i] - 1;
++  if ((read16ne(buf) != read16ne(buf_back)))
++   continue;
++  const uint32_t len = lzma_memcmplen(buf, buf_back, 2, buf_avail);
++  if (len >= nice_len) {
++   *back_res = i;
++   *len_res = len;
++   mf_skip(mf, len - 1);
++   return;
++  }
++  if (len > rep_len) {
++   rep_index = i;
++   rep_len = len;
++  }
++ }
++ if (len_main >= nice_len) {
++  *back_res = coder->matches[matches_count - 1].dist + 4;
++  *len_res = len_main;
++  mf_skip(mf, len_main - 1);
++  return;
++ }
++
++ uint32_t back_main = 0;
++ if (len_main >= 2) {
++  back_main = coder->matches[matches_count - 1].dist;
++  while (matches_count > 1 && len_main ==
++    coder->matches[matches_count - 2].len + 1) {
++   if (!(((back_main) >> 7) > (coder->matches[ matches_count - 2].dist)))
++    break;
++   --matches_count;
++   len_main = coder->matches[matches_count - 1].len;
++   back_main = coder->matches[matches_count - 1].dist;
++  }
++  if (len_main == 2 && back_main >= 0x80)
++   len_main = 1;
++ }
++
++ if (rep_len >= 2) {
++  if (rep_len + 1 >= len_main
++    || (rep_len + 2 >= len_main
++     && back_main > (1U << 9))
++    || (rep_len + 3 >= len_main
++     && back_main > (1U << 15))) {
++   *back_res = rep_index;
++   *len_res = rep_len;
++   mf_skip(mf, rep_len - 1);
++   return;
++  }
++ }
++
++ if (len_main < 2 || buf_avail <= 2) {
++  *back_res = (4294967295U);
++  *len_res = 1;
++  return;
++ }
++
++ coder->longest_match_length = lzma_mf_find(mf,
++   &coder->matches_count, coder->matches);
++
++ if (coder->longest_match_length >= 2) {
++  const uint32_t new_dist = coder->matches[
++    coder->matches_count - 1].dist;
++
++  if ((coder->longest_match_length >= len_main
++     && new_dist < back_main)
++    || (coder->longest_match_length == len_main + 1
++     && !(((new_dist) >> 7) > (back_main)))
++    || (coder->longest_match_length > len_main + 1)
++    || (coder->longest_match_length + 1 >= len_main
++     && len_main >= 3
++     && (((back_main) >> 7) > (new_dist)))) {
++   *back_res = (4294967295U);
++   *len_res = 1;
++   return;
++  }
++ }
++ ++buf;
++ const uint32_t limit = ((2) > (len_main - 1) ? (2) : (len_main - 1));
++ for (uint32_t i = 0; i < 4; ++i) {
++  if (memcmp(buf, buf - coder->reps[i] - 1, limit) == 0) {
++   *back_res = (4294967295U);
++   *len_res = 1;
++   return;
++  }
++ }
++
++ *back_res = back_main + 4;
++ *len_res = len_main;
++ mf_skip(mf, len_main - 2);
++ return;
++}
++
++static inline void
++rc_bit(lzma_range_encoder *rc, probability *prob, uint32_t bit)
++{
++ rc->symbols[rc->count] = bit;
++ rc->probs[rc->count] = prob;
++ ++rc->count;
++}
++
++static inline void
++rc_bittree(lzma_range_encoder *rc, probability *probs,
++  uint32_t bit_count, uint32_t symbol)
++{
++ uint32_t model_index = 1;
++
++ do {
++  const uint32_t bit = (symbol >> --bit_count) & 1;
++  rc_bit(rc, &probs[model_index], bit);
++  model_index = (model_index << 1) + bit;
++ } while (bit_count != 0);
++}
++
++static _Bool
++encode_init(lzma_lzma1_encoder *coder, lzma_mf *mf)
++{
++ if (mf->read_pos == mf->read_limit) {
++  if (mf->action == LZMA_RUN)
++   return 0;
++ } else {
++  mf_skip(mf, 1);
++  mf->read_ahead = 0;
++  rc_bit(&coder->rc, &coder->is_match[0][0], 0);
++  rc_bittree(&coder->rc, coder->literal[0], 8, mf->buffer[0]);
++ }
++
++ coder->is_initialized = 1;
++
++ return 1;
++}
++
++static inline uint32_t
++mf_position(const lzma_mf *mf)
++{
++ return mf->read_pos - mf->read_ahead;
++}
++
++static inline _Bool
++rc_shift_low(lzma_range_encoder *rc,
++  uint8_t *out, size_t *out_pos, size_t out_size)
++{
++ if ((uint32_t)(rc->low) < (uint32_t)(0xFF000000)
++   || (uint32_t)(rc->low >> 32) != 0) {
++  do {
++   if (*out_pos == out_size)
++    return 1;
++
++   out[*out_pos] = rc->cache + (uint8_t)(rc->low >> 32);
++   ++*out_pos;
++   rc->cache = 0xFF;
++  } while (--rc->cache_size != 0);
++  rc->cache = (rc->low >> 24) & 0xFF;
++ }
++
++ ++rc->cache_size;
++ rc->low = (rc->low & 0x00FFFFFF) << 8;
++ return 0;
++}
++
++static inline void
++rc_reset(lzma_range_encoder *rc)
++{
++ rc->low = 0;
++ rc->cache_size = 1;
++ rc->range = (4294967295U);
++ rc->cache = 0;
++ rc->count = 0;
++ rc->pos = 0;
++}
++
++static inline _Bool
++rc_encode(lzma_range_encoder *rc,
++  uint8_t *out, size_t *out_pos, size_t out_size)
++{
++ while (rc->pos < rc->count) {
++  if (rc->range < (1U << 24)) {
++   if (rc_shift_low(rc, out, out_pos, out_size))
++    return 1;
++   rc->range <<= 8;
++  }
++
++  switch (rc->symbols[rc->pos]) {
++  case RC_BIT_0: {
++   probability prob = *rc->probs[rc->pos];
++   rc->range = (rc->range >> 11)
++     * prob;
++   prob += ((1U << 11) - prob) >> 5;
++   *rc->probs[rc->pos] = prob;
++   break;
++  }
++
++  case RC_BIT_1: {
++   probability prob = *rc->probs[rc->pos];
++   const uint32_t bound = prob * (rc->range
++     >> 11);
++   rc->low += bound;
++   rc->range -= bound;
++   prob -= prob >> 5;
++   *rc->probs[rc->pos] = prob;
++   break;
++  }
++
++  case RC_DIRECT_0:
++   rc->range >>= 1;
++   break;
++
++  case RC_DIRECT_1:
++   rc->range >>= 1;
++   rc->low += rc->range;
++   break;
++
++  case RC_FLUSH:
++   rc->range = (4294967295U);
++   do {
++    if (rc_shift_low(rc, out, out_pos, out_size))
++     return 1;
++   } while (++rc->pos < rc->count);
++
++   rc_reset(rc);
++   return 0;
++
++  default:
++   break;
++  }
++  ++rc->pos;
++ }
++
++ rc->count = 0;
++ rc->pos = 0;
++ return 0;
++}
++
++static inline uint64_t
++rc_pending(const lzma_range_encoder *rc)
++{
++ return rc->cache_size + 5 - 1;
++}
++
++static inline void
++literal_matched(lzma_range_encoder *rc, probability *subcoder,
++  uint32_t match_byte, uint32_t symbol)
++{
++ uint32_t offset = 0x100;
++ symbol += 1U << 8;
++
++ do {
++  match_byte <<= 1;
++  const uint32_t match_bit = match_byte & offset;
++  const uint32_t subcoder_index
++    = offset + match_bit + (symbol >> 8);
++  const uint32_t bit = (symbol >> 7) & 1;
++  rc_bit(rc, &subcoder[subcoder_index], bit);
++
++  symbol <<= 1;
++  offset &= ~(match_byte ^ symbol);
++
++ } while (symbol < (1U << 16));
++}
++
++static inline void
++literal(lzma_lzma1_encoder *coder, lzma_mf *mf, uint32_t position)
++{
++ const uint8_t cur_byte = mf->buffer[mf->read_pos - mf->read_ahead];
++ probability *subcoder  = ((coder->literal)[
++   (((position) & (coder->literal_pos_mask))
++    << (coder->literal_context_bits))
++   + ((uint32_t)(mf->buffer[mf->read_pos - mf->read_ahead - 1])
++   >> (8U - (coder->literal_context_bits)))]);
++
++ if (((coder->state) < 7)) {
++  rc_bittree(&coder->rc, subcoder, 8, cur_byte);
++ } else {
++  const uint8_t match_byte
++    = mf->buffer[mf->read_pos - coder->reps[0] - 1 - mf->read_ahead];
++  literal_matched(&coder->rc, subcoder, match_byte, cur_byte);
++ }
++ coder->state
++   = ((coder->state) <= STATE_SHORTREP_LIT_LIT
++      ? STATE_LIT_LIT : ((coder->state) <= STATE_LIT_SHORTREP
++			 ? (coder->state) - 3 : (coder->state) - 6));
++}
++
++const uint8_t lzma_rc_prices[] = {
++         128, 103,  91,  84,  78,  73,  69,  66,
++          63,  61,  58,  56,  54,  52,  51,  49,
++          48,  46,  45,  44,  43,  42,  41,  40,
++          39,  38,  37,  36,  35,  34,  34,  33,
++          32,  31,  31,  30,  29,  29,  28,  28,
++          27,  26,  26,  25,  25,  24,  24,  23,
++          23,  22,  22,  22,  21,  21,  20,  20,
++          19,  19,  19,  18,  18,  17,  17,  17,
++          16,  16,  16,  15,  15,  15,  14,  14,
++          14,  13,  13,  13,  12,  12,  12,  11,
++          11,  11,  11,  10,  10,  10,  10,   9,
++           9,   9,   9,   8,   8,   8,   8,   7,
++           7,   7,   7,   6,   6,   6,   6,   5,
++           5,   5,   5,   5,   4,   4,   4,   4,
++           3,   3,   3,   3,   3,   2,   2,   2,
++           2,   2,   2,   1,   1,   1,   1,   1
++};
++
++static inline uint32_t
++rc_bit_price(const probability prob, const uint32_t bit)
++{
++ return lzma_rc_prices[(prob ^ ((0U - bit)
++   & ((1U << 11) - 1))) >> 4];
++}
++
++static inline uint32_t
++rc_bit_0_price(const probability prob)
++{
++ return lzma_rc_prices[prob >> 4];
++}
++
++static inline uint32_t
++rc_bit_1_price(const probability prob)
++{
++ return lzma_rc_prices[(prob ^ ((1U << 11) - 1))
++   >> 4];
++}
++
++static inline uint32_t
++rc_bittree_price(const probability *const probs,
++  const uint32_t bit_levels, uint32_t symbol)
++{
++ uint32_t price = 0;
++ symbol += 1U << bit_levels;
++
++ do {
++  const uint32_t bit = symbol & 1;
++  symbol >>= 1;
++  price += rc_bit_price(probs[symbol], bit);
++ } while (symbol != 1);
++
++ return price;
++}
++
++static void
++length_update_prices(lzma_length_encoder *lc, const uint32_t pos_state)
++{
++ const uint32_t table_size = lc->table_size;
++ lc->counters[pos_state] = table_size;
++
++ const uint32_t a0 = rc_bit_0_price(lc->choice);
++ const uint32_t a1 = rc_bit_1_price(lc->choice);
++ const uint32_t b0 = a1 + rc_bit_0_price(lc->choice2);
++ const uint32_t b1 = a1 + rc_bit_1_price(lc->choice2);
++ uint32_t *const prices = lc->prices[pos_state];
++
++ uint32_t i;
++ for (i = 0; i < table_size && i < (1 << 3); ++i)
++  prices[i] = a0 + rc_bittree_price(lc->low[pos_state],
++    3, i);
++
++ for (; i < table_size && i < (1 << 3) + (1 << 3); ++i)
++  prices[i] = b0 + rc_bittree_price(lc->mid[pos_state],
++    3, i - (1 << 3));
++
++ for (; i < table_size; ++i)
++  prices[i] = b1 + rc_bittree_price(lc->high, 8,
++    i - (1 << 3) - (1 << 3));
++
++ return;
++}
++
++static inline void
++length(lzma_range_encoder *rc, lzma_length_encoder *lc,
++  const uint32_t pos_state, uint32_t len, const _Bool fast_mode)
++{
++ len -= 2;
++
++ if (len < (1 << 3)) {
++  rc_bit(rc, &lc->choice, 0);
++  rc_bittree(rc, lc->low[pos_state], 3, len);
++ } else {
++  rc_bit(rc, &lc->choice, 1);
++  len -= (1 << 3);
++
++  if (len < (1 << 3)) {
++   rc_bit(rc, &lc->choice2, 0);
++   rc_bittree(rc, lc->mid[pos_state], 3, len);
++  } else {
++   rc_bit(rc, &lc->choice2, 1);
++   len -= (1 << 3);
++   rc_bittree(rc, lc->high, 8, len);
++  }
++ }
++
++ if (!fast_mode)
++  if (--lc->counters[pos_state] == 0)
++   length_update_prices(lc, pos_state);
++}
++
++static inline void
++rep_match(lzma_lzma1_encoder *coder, const uint32_t pos_state,
++  const uint32_t rep, const uint32_t len)
++{
++ if (rep == 0) {
++  rc_bit(&coder->rc, &coder->is_rep0[coder->state], 0);
++  rc_bit(&coder->rc,
++    &coder->is_rep0_long[coder->state][pos_state],
++    len != 1);
++ } else {
++  const uint32_t distance = coder->reps[rep];
++  rc_bit(&coder->rc, &coder->is_rep0[coder->state], 1);
++
++  if (rep == 1) {
++   rc_bit(&coder->rc, &coder->is_rep1[coder->state], 0);
++  } else {
++   rc_bit(&coder->rc, &coder->is_rep1[coder->state], 1);
++   rc_bit(&coder->rc, &coder->is_rep2[coder->state],
++     rep - 2);
++
++   if (rep == 3)
++    coder->reps[3] = coder->reps[2];
++
++   coder->reps[2] = coder->reps[1];
++  }
++
++  coder->reps[1] = coder->reps[0];
++  coder->reps[0] = distance;
++ }
++
++ if (len == 1) {
++  coder->state = ((coder->state) < 7 ? STATE_LIT_SHORTREP : STATE_NONLIT_REP);
++ } else {
++  length(&coder->rc, &coder->rep_len_encoder, pos_state, len,
++    coder->fast_mode);
++  coder->state = ((coder->state) < 7 ? STATE_LIT_LONGREP : STATE_NONLIT_REP);
++ }
++}
++
++// This array is constantly initialized in the original code. It's quite big
++// so we skip it.
++const uint8_t lzma_fastpos[1 << 13];
++
++static inline uint32_t
++get_dist_slot(uint32_t dist)
++{
++ if (dist < (1U << (13 + ((0) + (0) * (13 - 1)))))
++  return lzma_fastpos[dist];
++
++ if (dist < (1U << (13 + ((0) + (1) * (13 - 1)))))
++  return (uint32_t)(lzma_fastpos[(dist) >> ((0) + (1) * (13 - 1))]) + 2 * ((0) + (1) * (13 - 1));
++
++ return (uint32_t)(lzma_fastpos[(dist) >> ((0) + (2) * (13 - 1))]) + 2 * ((0) + (2) * (13 - 1));
++}
++
++static inline void
++rc_bittree_reverse(lzma_range_encoder *rc, probability *probs,
++  uint32_t bit_count, uint32_t symbol)
++{
++ uint32_t model_index = 1;
++ do {
++  const uint32_t bit = symbol & 1;
++  symbol >>= 1;
++  rc_bit(rc, &probs[model_index], bit);
++  model_index = (model_index << 1) + bit;
++ } while (--bit_count != 0);
++}
++
++static inline void
++rc_direct(lzma_range_encoder *rc, uint32_t value, uint32_t bit_count)
++{
++ do {
++  rc->symbols[rc->count++]
++    = RC_DIRECT_0 + ((value >> --bit_count) & 1);
++ } while (bit_count != 0);
++}
++
++static inline void
++match(lzma_lzma1_encoder *coder, const uint32_t pos_state,
++      const uint32_t distance, const uint32_t len)
++{
++ coder->state = ((coder->state) < 7 ? STATE_LIT_MATCH : STATE_NONLIT_MATCH);
++
++ length(&coder->rc, &coder->match_len_encoder, pos_state, len,
++	coder->fast_mode);
++
++ const uint32_t dist_slot = get_dist_slot(distance);
++ const uint32_t dist_state = ((len) < 4 + 2 ? (len) - 2 : 4 - 1);
++ rc_bittree(&coder->rc, coder->dist_slot[dist_state], 6, dist_slot);
++
++ if (dist_slot >= 4) {
++  const uint32_t footer_bits = (dist_slot >> 1) - 1;
++  const uint32_t base = (2 | (dist_slot & 1)) << footer_bits;
++  const uint32_t dist_reduced = distance - base;
++
++  if (dist_slot < 14) {
++   rc_bittree_reverse(&coder->rc, coder->dist_special + base - dist_slot - 1,
++		     footer_bits, dist_reduced);
++  } else {
++   rc_direct(&coder->rc, dist_reduced >> 4,
++     footer_bits - 4);
++   rc_bittree_reverse(
++     &coder->rc, coder->dist_align,
++     4, dist_reduced & ((1 << 4) - 1));
++   ++coder->align_price_count;
++  }
++ }
++
++ coder->reps[3] = coder->reps[2];
++ coder->reps[2] = coder->reps[1];
++ coder->reps[1] = coder->reps[0];
++ coder->reps[0] = distance;
++ ++coder->match_price_count;
++}
++
++static void
++encode_symbol(lzma_lzma1_encoder *coder, lzma_mf *mf,
++  uint32_t back, uint32_t len, uint32_t position)
++{
++ const uint32_t pos_state = position & coder->pos_mask;
++
++ if (back == (4294967295U)) {
++  rc_bit(&coder->rc,
++    &coder->is_match[coder->state][pos_state], 0);
++  literal(coder, mf, position);
++ } else {
++  rc_bit(&coder->rc,
++   &coder->is_match[coder->state][pos_state], 1);
++
++  if (back < 4) {
++   rc_bit(&coder->rc, &coder->is_rep[coder->state], 1);
++   rep_match(coder, pos_state, back, len);
++  } else {
++   rc_bit(&coder->rc, &coder->is_rep[coder->state], 0);
++   match(coder, pos_state, back - 4, len);
++  }
++ }
++ mf->read_ahead -= len;
++}
++
++static void
++encode_eopm(lzma_lzma1_encoder *coder, uint32_t position)
++{
++ const uint32_t pos_state = position & coder->pos_mask;
++ rc_bit(&coder->rc, &coder->is_match[coder->state][pos_state], 1);
++ rc_bit(&coder->rc, &coder->is_rep[coder->state], 0);
++ match(coder, pos_state, (4294967295U), 2);
++}
++
++static inline void
++rc_flush(lzma_range_encoder *rc)
++{
++ for (size_t i = 0; i < 5; ++i)
++  rc->symbols[rc->count++] = RC_FLUSH;
++}
++
++extern void exit (int __status)
++ __attribute__ ((__nothrow__ , __leaf__ , __noreturn__));
++
++extern lzma_ret
++lzma_lzma_encode(lzma_lzma1_encoder *restrict coder, lzma_mf *restrict mf,
++  uint8_t *restrict out, size_t *restrict out_pos,
++  size_t out_size, uint32_t limit)
++{
++
++ if (!coder->is_initialized && !encode_init(coder, mf))
++  return LZMA_OK;
++
++ uint32_t position = mf_position(mf);
++
++ while (1) {
++  if (rc_encode(&coder->rc, out, out_pos, out_size)) {
++   return LZMA_OK;
++  }
++
++  if (limit != (4294967295U)
++      && (mf->read_pos - mf->read_ahead >= limit
++	 || *out_pos + rc_pending(&coder->rc)
++	    >= (1U << 16) - ((1 << 12) + 1)))
++   break;
++
++  if (mf->read_pos >= mf->read_limit) {
++   if (mf->action == LZMA_RUN)
++    return LZMA_OK;
++
++
++   if (mf->read_ahead == 0)
++    break;
++  }
++  uint32_t len;
++  uint32_t back;
++
++  if (coder->fast_mode)
++   lzma_lzma_optimum_fast(coder, mf, &back, &len);
++  else
++   // The original code contains the  call to
++   // lzma_lzma_optimum_normal(coder, mf, &back, &len, position);
++   exit (-1);
++
++  encode_symbol(coder, mf, back, len, position);
++
++  position += len;
++ }
++
++ if (!coder->is_flushed) {
++  coder->is_flushed = 1;
++  if (limit == (4294967295U))
++   encode_eopm(coder, position);
++
++  rc_flush(&coder->rc);
++
++  if (rc_encode(&coder->rc, out, out_pos, out_size)) {
++   return LZMA_OK;
++  }
++ }
++
++ coder->is_flushed = 0;
++ return LZMA_STREAM_END;
++}
++
++extern void
++lzma_free(void *ptr, const lzma_allocator *allocator)
++{
++ if (allocator != ((void *)0) && allocator->free != ((void *)0))
++  allocator->free(allocator->opaque, ptr);
++ else
++  free(ptr);
++ return;
++}
++
++static _Bool
++lz_encoder_prepare(lzma_mf *mf, const lzma_allocator *allocator,
++  const lzma_lz_options *lz_options)
++{
++ if (lz_options->dict_size < 4096U
++   || lz_options->dict_size
++    > (1U << 30) + (1U << 29)
++   || lz_options->nice_len > lz_options->match_len_max)
++  return 1;
++
++ mf->keep_size_before = lz_options->before_size + lz_options->dict_size;
++ mf->keep_size_after = lz_options->after_size
++   + lz_options->match_len_max;
++ uint32_t reserve = lz_options->dict_size / 2;
++ if (reserve > (1U << 30))
++  reserve /= 2;
++
++ reserve += (lz_options->before_size + lz_options->match_len_max
++   + lz_options->after_size) / 2 + (1U << 19);
++
++ const uint32_t old_size = mf->size;
++ mf->size = mf->keep_size_before + reserve + mf->keep_size_after;
++
++ if ((mf->buffer != ((void *)0)) && old_size != mf->size) {
++  lzma_free(mf->buffer, allocator);
++  mf->buffer = ((void *)0);
++ }
++
++ mf->match_len_max = lz_options->match_len_max;
++ mf->nice_len = lz_options->nice_len;
++ mf->cyclic_size = lz_options->dict_size + 1;
++
++ switch (lz_options->match_finder) {
++ case LZMA_MF_HC3:
++  mf->find = &lzma_mf_hc3_find;
++  mf->skip = &lzma_mf_hc3_skip;
++  break;
++
++ case LZMA_MF_HC4:
++  mf->find = &lzma_mf_hc4_find;
++  mf->skip = &lzma_mf_hc4_skip;
++  break;
++
++ case LZMA_MF_BT2:
++  mf->find = &lzma_mf_bt2_find;
++  mf->skip = &lzma_mf_bt2_skip;
++  break;
++
++ case LZMA_MF_BT3:
++  mf->find = &lzma_mf_bt3_find;
++  mf->skip = &lzma_mf_bt3_skip;
++  break;
++
++ case LZMA_MF_BT4:
++  mf->find = &lzma_mf_bt4_find;
++  mf->skip = &lzma_mf_bt4_skip;
++  break;
++
++ default:
++  return 1;
++ }
++
++ const uint32_t hash_bytes = lz_options->match_finder & 0x0F;
++ if (hash_bytes > mf->nice_len)
++  return 1;
++
++ const _Bool is_bt = (lz_options->match_finder & 0x10) != 0;
++ uint32_t hs;
++
++ if (hash_bytes == 2) {
++  hs = 0xFFFF;
++ } else {
++  hs = lz_options->dict_size - 1;
++  hs |= hs >> 1;
++  hs |= hs >> 2;
++  hs |= hs >> 4;
++  hs |= hs >> 8;
++  hs >>= 1;
++  hs |= 0xFFFF;
++
++  if (hs > (1U << 24)) {
++   if (hash_bytes == 3)
++    hs = (1U << 24) - 1;
++   else
++    hs >>= 1;
++  }
++ }
++
++ mf->hash_mask = hs;
++
++ ++hs;
++ if (hash_bytes > 2)
++  hs += (1U << 10);
++ if (hash_bytes > 3)
++  hs += (1U << 16);
++
++ const uint32_t old_hash_count = mf->hash_count;
++ const uint32_t old_sons_count = mf->sons_count;
++ mf->hash_count = hs;
++ mf->sons_count = mf->cyclic_size;
++ if (is_bt)
++  mf->sons_count *= 2;
++
++ if (old_hash_count != mf->hash_count
++   || old_sons_count != mf->sons_count) {
++  lzma_free(mf->hash, allocator);
++  mf->hash = ((void *)0);
++
++  lzma_free(mf->son, allocator);
++  mf->son = ((void *)0);
++ }
++
++ mf->depth = lz_options->depth;
++ if (mf->depth == 0) {
++  if (is_bt)
++   mf->depth = 16 + mf->nice_len / 2;
++  else
++   mf->depth = 4 + mf->nice_len / 4;
++ }
++
++ return 0;
++}
++
++int
++main ()
++{
++  lzma_mf mf;
++  lzma_allocator allocator;
++  lzma_lz_options lz_options;
++
++  void *coder;
++  uint8_t *restrict out;
++  size_t *restrict out_pos;
++  size_t out_size;
++
++  lz_encoder_prepare(&mf, &allocator, &lz_options);
++  return (int) lzma_lzma_encode(coder, &mf, out, out_pos, out_size, (4294967295U));
++}
++
++
++/* { dg-final { scan-wpa-ipa-dump "Save results of indirect call analysis." "icp"} } */
++/* { dg-final { scan-wpa-ipa-dump-times "For call" 2 "icp"} } */
++/* { dg-final { scan-wpa-ipa-dump-times "Insert 0 prefetch stmt:" 5 "ipa_prefetch"} } */
++/* { dg-final { scan-wpa-ipa-dump-times "Insert 1 prefetch stmt:" 4 "ipa_prefetch"} } */
++/* { dg-final { scan-wpa-ipa-dump-times "Insert 2 prefetch stmt:" 2 "ipa_prefetch"} } */
+-- 
+2.33.0
+
diff --git a/0052-Fix-fails-in-IPA-prefetch-src-openEuler-gcc-I96ID7.patch b/0052-Fix-fails-in-IPA-prefetch-src-openEuler-gcc-I96ID7.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e50c9b2b682f7498c6b807b5db12cf7fc7c115d5
--- /dev/null
+++ b/0052-Fix-fails-in-IPA-prefetch-src-openEuler-gcc-I96ID7.patch
@@ -0,0 +1,94 @@
+From 0263daa1312d0cdcdf9c770bcf5d982a2d4fc16b Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Fri, 29 Mar 2024 17:15:41 +0800
+Subject: [PATCH 2/2] Fix fails in IPA prefetch (src-openEuler/gcc: I96ID7)
+
+---
+ gcc/ipa-prefetch.cc | 28 ++++++++++++++++++++++++++--
+ 1 file changed, 26 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc
+index 9537e4835..1ceb5137f 100644
+--- a/gcc/ipa-prefetch.cc
++++ b/gcc/ipa-prefetch.cc
+@@ -366,6 +366,7 @@ typedef std::map memref_map;
+ typedef std::map memref_tree_map;
+ 
+ typedef std::set stmt_set;
++typedef std::set tree_set;
+ typedef std::map tree_map;
+ 
+ tree_memref_map *tm_map;
+@@ -1124,8 +1125,21 @@ analyse_loops ()
+     }
+ }
+ 
++/* Compare memrefs by IDs; helper for qsort.  */
++
++static int
++memref_id_cmp (const void *p1, const void *p2)
++{
++  const memref_t *mr1 = *(const memref_t **) p1;
++  const memref_t *mr2 = *(const memref_t **) p2;
++
++  if ((unsigned) mr1->mr_id > (unsigned) mr2->mr_id)
++    return 1;
++  return -1;
++}
++
+ /* Reduce the set filtering out memrefs with the same memory references,
+-   return the result vector of memrefs.  */
++   sort and return the result vector of memrefs.  */
+ 
+ static void
+ reduce_memref_set (memref_set *set, vec &vec)
+@@ -1162,6 +1176,7 @@ reduce_memref_set (memref_set *set, vec &vec)
+ 	    vec.safe_push (mr1);
+ 	}
+     }
++  vec.qsort (memref_id_cmp);
+   if (dump_file)
+     {
+       fprintf (dump_file, "MRs (%d) after filtering: ", vec.length ());
+@@ -1663,10 +1678,15 @@ optimize_function (cgraph_node *n, function *fn)
+     }
+ 
+   /* Create other new vars.  Insert new stmts.  */
++  vec used_mr_vec = vNULL;
+   for (memref_set::const_iterator it = used_mrs.begin ();
+        it != used_mrs.end (); it++)
++    used_mr_vec.safe_push (*it);
++  used_mr_vec.qsort (memref_id_cmp);
++
++  for (unsigned int j = 0; j < used_mr_vec.length (); j++)
+     {
+-      memref_t *mr = *it;
++      memref_t *mr = used_mr_vec[j];
+       if (mr == comp_mr)
+ 	continue;
+       gimple *last_stmt = gimple_copy_and_remap_memref_stmts (mr, stmts, 0,
+@@ -1702,6 +1722,7 @@ optimize_function (cgraph_node *n, function *fn)
+       local = integer_three_node;
+       break;
+     }
++  tree_set prefetched_addrs;
+   for (unsigned int j = 0; j < vmrs.length (); j++)
+     {
+       memref_t *mr = vmrs[j];
+@@ -1714,10 +1735,13 @@ optimize_function (cgraph_node *n, function *fn)
+       tree addr = get_mem_ref_address_ssa_name (mr->mem, NULL_TREE);
+       if (decl_map->count (addr))
+ 	addr = (*decl_map)[addr];
++      if (prefetched_addrs.count (addr))
++	continue;
+       last_stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
+ 				     3, addr, write_p, local);
+       pcalls.safe_push (last_stmt);
+       gimple_seq_add_stmt (&stmts, last_stmt);
++      prefetched_addrs.insert (addr);
+       if (dump_file)
+ 	{
+ 	  fprintf (dump_file, "Insert %d prefetch stmt:\n", j);
+-- 
+2.33.0
+
diff --git a/0052-LoongArch-Added-vectorized-hardware-inspection-for-t.patch b/0052-LoongArch-Added-vectorized-hardware-inspection-for-t.patch
new file mode 100644
index 0000000000000000000000000000000000000000..768de3c411ec468261e20dfc9e8e90302553226a
--- /dev/null
+++ b/0052-LoongArch-Added-vectorized-hardware-inspection-for-t.patch
@@ -0,0 +1,4375 @@
+From 8d5c983efc35804f98823e203eada6263dd1604e Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Tue, 28 Nov 2023 16:23:53 +0800
+Subject: [PATCH 052/188] LoongArch: Added vectorized hardware inspection for
+ testsuite.
+
+When GCC regression tests are executed on a cpu that does not support
+vectorization, the loongarch/vector directory will have some FAIL entries for
+all test cases related to vectorization runs. In order to solve this kind
+of problem, a vectorized hardware detection function was added to the code,
+which can only be compiled but not run.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c:Remove
+	the default Settings to run the behavior.
+	* gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvadd.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvadda.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddi.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvand.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvandi.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvandn.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitset.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvclo.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvclz.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvextrins.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvffinth.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvftintl.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvilvh.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvilvl.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvld.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvldi.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmadd.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmsub.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmul.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvneg.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvnor.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvnori.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvor.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvori.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvorn.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpackev.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpackod.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpickev.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpickod.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpickve.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvprem.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpremi.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvreplve.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvrotr.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvrotri.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvseq.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvseqi.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsll.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslli.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsra.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrai.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsran.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrani.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrar.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrari.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrl.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrli.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrln.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssran.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrani.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrln.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvst.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsub.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsubi.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvxor.c:Dito.
+	* gcc.target/loongarch/vector/lasx/lasx-xvxori.c:Dito.
+	* gcc.target/loongarch/vector/loongarch-vector.exp:Added hardware
+	detection to set the behavior of program execution based on the
+	characteristics of the hardware.
+	* gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c:Remove the default
+	Settings to run the behavior.
+	* gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vadd.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vadda.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddi.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vand.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vandi.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vandn.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vavg-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vavg-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitclr.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitclri.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitrev.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitsel.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitseli.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitset.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitseti.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vbsll.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vbsrl.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vclo.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vclz.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vexth-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vexth-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vextl-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vextl-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vextrins.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vffint-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vffint-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vffint-3.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfrstp.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vftint-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vftint-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vftint-3.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vftint-4.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vilvh.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vilvl.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vld.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vldi.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmadd.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmax-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmax-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmin-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmin-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmini-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmini-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmod-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmod-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmskgez.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmskltz.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmsknz.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmsub.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmul.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vneg.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vnor.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vnori.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vor.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vori.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vorn.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vpackev.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vpackod.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vpcnt.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vpickev.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vpickod.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vpremi.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vreplve.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vreplvei.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vrotr.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vrotri.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsat-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsat-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vseq.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vseqi.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vshuf.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsigncov.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsle-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsle-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vslei-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vslei-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsll.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vslli.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vslt-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vslt-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vslti-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vslti-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsra.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrai.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsran.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrani.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrar.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrari.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrarn.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrarni.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrl.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrli.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrln.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrlni.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrlr.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrlri.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vssran.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrani.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrarn.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrarni.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrln.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrlni.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vssub-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vssub-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vst.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsub.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsubi.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vxor.c:Dito.
+	* gcc.target/loongarch/vector/lsx/lsx-vxori.c:Dito.
+---
+ .../loongarch/vector/lasx/lasx-xvabsd-1.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvabsd-2.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvadd.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvadda.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvaddi.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvaddwev-1.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvaddwev-2.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvaddwev-3.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvaddwod-1.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvaddwod-2.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvaddwod-3.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvand.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvandi.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvandn.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvavg-1.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvavg-2.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvavgr-1.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvavgr-2.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvbitclr.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvbitclri.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvbitrev.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvbitrevi.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvbitsel.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvbitseli.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvbitset.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvbitseti.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvbsll_v.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvbsrl_v.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvclo.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvclz.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvdiv-1.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvdiv-2.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvext2xv-1.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvext2xv-2.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvexth-1.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvexth-2.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvextl-1.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvextl-2.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvextrins.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvfadd_d.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvfadd_s.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvfclass_d.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvfclass_s.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_caf_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_cle_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_clt_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_cne_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_cor_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_cun_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_saf_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_seq_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_sle_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_slt_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_sne_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_sor_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcmp_sun_s.c |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcvt.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvfcvth.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvffint-1.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvffint-2.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvffinth.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvflogb_d.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvflogb_s.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvfmadd_d.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvfmadd_s.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvfmax_d.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvfmax_s.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvfmaxa_d.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvfmaxa_s.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvfnmadd_d.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvfnmadd_s.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvfrint_d.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvfrint_s.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvfrstp.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvfrstpi.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvfsqrt_d.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvfsqrt_s.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvftint-1.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvftint-2.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvftint-3.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvftintl.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvhaddw-1.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvhaddw-2.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvhsubw-1.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvhsubw-2.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvilvh.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvilvl.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvinsgr2vr.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvinsve0.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvld.c         |  1 -
+ .../loongarch/vector/lasx/lasx-xvldi.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvmadd.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvmaddwev-1.c  |  1 -
+ .../loongarch/vector/lasx/lasx-xvmaddwev-2.c  |  1 -
+ .../loongarch/vector/lasx/lasx-xvmaddwev-3.c  |  1 -
+ .../loongarch/vector/lasx/lasx-xvmaddwod-1.c  |  1 -
+ .../loongarch/vector/lasx/lasx-xvmaddwod-2.c  |  1 -
+ .../loongarch/vector/lasx/lasx-xvmaddwod-3.c  |  1 -
+ .../loongarch/vector/lasx/lasx-xvmax-1.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvmax-2.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvmaxi-1.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvmaxi-2.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvmin-1.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvmin-2.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvmini-1.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvmini-2.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvmod-1.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvmod-2.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvmskgez.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvmskltz.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvmsknz.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvmsub.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvmuh-1.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvmuh-2.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvmul.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvmulwev-1.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvmulwev-2.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvmulwev-3.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvmulwod-1.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvmulwod-2.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvmulwod-3.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvneg.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvnor.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvnori.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvor.c         |  1 -
+ .../loongarch/vector/lasx/lasx-xvori.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvorn.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvpackev.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvpackod.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvpcnt.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvpickev.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvpickod.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvpickve.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvpickve2gr.c  |  1 -
+ .../loongarch/vector/lasx/lasx-xvprem.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvpremi.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvreplgr2vr.c  |  1 -
+ .../loongarch/vector/lasx/lasx-xvreplve.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvreplve0.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvreplvei.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvrotr.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvrotri.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvsadd-1.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvsadd-2.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvsat-1.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvsat-2.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvseq.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvseqi.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvshuf4i_b.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvshuf_b.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvsigncov.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvsle-1.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvsle-2.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvslei-1.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvslei-2.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvsll.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvslli.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvsllwil-1.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvsllwil-2.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvslt-1.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvslt-2.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvslti-1.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvslti-2.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvsra.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrai.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvsran.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrani.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrar.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrari.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrarn.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrarni.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrl.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrli.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrln.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrlni.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrlr.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrlri.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrlrn.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvsrlrni.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvssran.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvssrani.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvssrarn.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvssrarni.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvssrln.c      |  1 -
+ .../loongarch/vector/lasx/lasx-xvssrlni.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvssrlrn.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvssrlrni.c    |  1 -
+ .../loongarch/vector/lasx/lasx-xvssub-1.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvssub-2.c     |  1 -
+ .../loongarch/vector/lasx/lasx-xvst.c         |  1 -
+ .../loongarch/vector/lasx/lasx-xvsub.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvsubi.c       |  1 -
+ .../loongarch/vector/lasx/lasx-xvsubwev-1.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvsubwev-2.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvsubwod-1.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvsubwod-2.c   |  1 -
+ .../loongarch/vector/lasx/lasx-xvxor.c        |  1 -
+ .../loongarch/vector/lasx/lasx-xvxori.c       |  1 -
+ .../loongarch/vector/loongarch-vector.exp     | 23 +++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vabsd-1.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vabsd-2.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vadd.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vadda.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vaddi.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vaddwev-1.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vaddwev-2.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vaddwev-3.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vaddwod-1.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vaddwod-2.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vaddwod-3.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vand.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vandi.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vandn.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vavg-1.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vavg-2.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vavgr-1.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vavgr-2.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vbitclr.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vbitclri.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vbitrev.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vbitrevi.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vbitsel.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vbitseli.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vbitset.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vbitseti.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vbsll.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vbsrl.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vclo.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vclz.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vdiv-1.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vdiv-2.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vexth-1.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vexth-2.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vextl-1.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vextl-2.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vextrins.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vfadd_d.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vfadd_s.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vfclass_d.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfclass_s.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_caf.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_ceq.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_cle.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_clt.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_cne.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_cor.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_cun.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_saf.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_seq.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_sle.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_slt.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_sne.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_sor.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcmp_sun.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfcvt-1.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vfcvt-2.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vffint-1.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vffint-2.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vffint-3.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vflogb_d.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vflogb_s.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vfmadd_d.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vfmadd_s.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vfmax_d.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vfmax_s.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vfmaxa_d.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vfmaxa_s.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vfnmadd_d.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfnmadd_s.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vfrint_d.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vfrint_s.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vfrstp.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vfrstpi.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vfsqrt_d.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vfsqrt_s.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vftint-1.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vftint-2.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vftint-3.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vftint-4.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vhaddw-1.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vhaddw-2.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vhsubw-1.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vhsubw-2.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vilvh.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vilvl.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vinsgr2vr.c      |  1 -
+ .../gcc.target/loongarch/vector/lsx/lsx-vld.c |  1 -
+ .../loongarch/vector/lsx/lsx-vldi.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vmadd.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vmaddwev-1.c     |  1 -
+ .../loongarch/vector/lsx/lsx-vmaddwev-2.c     |  1 -
+ .../loongarch/vector/lsx/lsx-vmaddwev-3.c     |  1 -
+ .../loongarch/vector/lsx/lsx-vmaddwod-1.c     |  1 -
+ .../loongarch/vector/lsx/lsx-vmaddwod-2.c     |  1 -
+ .../loongarch/vector/lsx/lsx-vmaddwod-3.c     |  1 -
+ .../loongarch/vector/lsx/lsx-vmax-1.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vmax-2.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vmaxi-1.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vmaxi-2.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vmin-1.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vmin-2.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vmini-1.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vmini-2.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vmod-1.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vmod-2.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vmskgez.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vmskltz.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vmsknz.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vmsub.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vmuh-1.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vmuh-2.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vmul.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vmulwev-1.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vmulwev-2.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vmulwev-3.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vmulwod-1.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vmulwod-2.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vmulwod-3.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vneg.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vnor.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vnori.c          |  1 -
+ .../gcc.target/loongarch/vector/lsx/lsx-vor.c |  1 -
+ .../loongarch/vector/lsx/lsx-vori.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vorn.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vpackev.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vpackod.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vpcnt.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vpickev.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vpickod.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vpickve2gr.c     |  1 -
+ .../loongarch/vector/lsx/lsx-vpremi.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vreplgr2vr.c     |  1 -
+ .../loongarch/vector/lsx/lsx-vreplve.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vreplvei.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vrotr.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vrotri.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vsadd-1.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vsadd-2.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vsat-1.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vsat-2.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vseq.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vseqi.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vshuf.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vshuf4i.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vsigncov.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vsle-1.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vsle-2.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vslei-1.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vslei-2.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vsll.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vslli.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vsllwil-1.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vsllwil-2.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vslt-1.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vslt-2.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vslti-1.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vslti-2.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vsra.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vsrai.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vsran.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vsrani.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vsrar.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vsrari.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vsrarn.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vsrarni.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vsrl.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vsrli.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vsrln.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vsrlni.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vsrlr.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vsrlri.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vsrlrn.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vsrlrni.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vssran.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vssrani.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vssrarn.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vssrarni.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vssrln.c         |  1 -
+ .../loongarch/vector/lsx/lsx-vssrlni.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vssrlrn.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vssrlrni.c       |  1 -
+ .../loongarch/vector/lsx/lsx-vssub-1.c        |  1 -
+ .../loongarch/vector/lsx/lsx-vssub-2.c        |  1 -
+ .../gcc.target/loongarch/vector/lsx/lsx-vst.c |  1 -
+ .../loongarch/vector/lsx/lsx-vsub.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vsubi.c          |  1 -
+ .../loongarch/vector/lsx/lsx-vsubwev-1.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vsubwev-2.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vsubwod-1.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vsubwod-2.c      |  1 -
+ .../loongarch/vector/lsx/lsx-vxor.c           |  1 -
+ .../loongarch/vector/lsx/lsx-vxori.c          |  1 -
+ 393 files changed, 23 insertions(+), 392 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c
+index 41fae32df..5e15a12cb 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c
+index bd7a9069d..fa0f9f6b5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c
+index 293295723..82da73440 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c
+index d6b57d1cd..2c2701dc2 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c
+index 054bf6e55..064b26fb6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c
+index 70f3bf783..160073927 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c
+index 22528a14f..c45840ea2 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c
+index 38a0a53d7..567bc1faf 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c
+index a4dc565e9..775b90547 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c
+index a2fbe9ed0..34721ad56 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c
+index 8c98fc4be..30d52b01c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c
+index e485786dd..96ad473a3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c
+index 26cddc53a..59d6a14ab 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c
+index bc3590c21..b2809d369 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c
+index 5ce31ebbd..18d186280 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c
+index d04e42753..4a79277b4 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c
+index 37b78aa1b..7e6a244e7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c
+index 3944a6ac0..f020cbeea 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c
+index def7b588e..70c928886 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c
+index 713eb19d5..7eee98f40 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c
+index 2b0e7f8d1..a4f104e8e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c
+index 2b8327d91..967a01f6d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c
+index c9847a615..414080540 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c
+index 1edb4fca2..b2532f5eb 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c
+index c195cd91c..ff9d030f0 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c
+index 47f37e4b3..9081443bc 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c
+index 3c1a8b8e6..7110423fc 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c
+index 340f7691b..236b5b28e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c
+index dbc52f92b..927fa16fe 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c
+index 89191c467..3e39c212a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c
+index 0d7c67703..e3cfe283e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c
+index fd8b6d38c..71543290a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c
+index 94f31019c..2e9e4b03d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c
+index d93201bc4..f6a098d96 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c
+index 9fb4e3ff0..c64e6cadf 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c
+index fe6ff15d8..33ede4dab 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c
+index c0d3e8e75..7f59c765d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c
+index 8c7ab4ed3..d9eee597c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c
+index 8e61f1c6d..e4dc8bf10 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c
+index 657a19e58..7cd7ad8a3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c
+index 4002c4074..62ca8c9c3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c
+index 5d5b4c43c..5a2733075 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ /* { dg-timeout 500 } */
+ #include "../simd_correctness_check.h"
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c
+index 888e85b6e..cae82f6cb 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ /* { dg-timeout 500 } */
+ #include "../simd_correctness_check.h"
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c
+index fa3372358..1fe7c8bc5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c
+index 6d6649f6f..d4c4aa150 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c
+index a64dd7598..1ca2fbd91 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c
+index 733cc00ee..0dffd68e7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c
+index 190741070..77ba5fca4 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c
+index 8dd58f228..954c7575c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c
+index 3230c101d..98eb38573 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c
+index 23cbc4bf0..1427165fd 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c
+index 6641d2c58..e61e0e655 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c
+index d25fc25da..24f4f2054 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c
+index 8210f749b..f468d93c6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c
+index 9d015a5c8..29c128e79 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c
+index a61681073..29c080c50 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c
+index 41f274920..eee56168b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c
+index 116399a7c..8b6225d06 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c
+index 001ce1c69..7933ec580 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c
+index dd04fd788..e0240cb5c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c
+index 3e2b15507..c6f4aeaa6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c
+index e310ff5ee..4d8e71bd2 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c
+index bba1a06f3..57a4cd2b9 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c
+index b641c733f..798c75280 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c
+index c85c94bf6..f5c49f982 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c
+index bde41dd5c..d25bbe6dd 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c
+index 207ba167f..eefa1e5ac 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c
+index 9b7703231..a9271e60d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c
+index 96bbb942d..63605b85c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c
+index c73a8a74a..4b59e3403 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c
+index d161c850c..0f6c5e4cc 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c
+index c5e9576ea..3f4540425 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c
+index 4babf1638..e65ded196 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ /* { dg-timeout 500 } */
+ #include "../simd_correctness_check.h"
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
+index 9f2fa6747..fbfe300ea 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ /* { dg-timeout 500 } */
+ #include "../simd_correctness_check.h"
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c
+index 557f9f8b5..72b3fe08d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c
+index cdb7b11aa..cbb23e0a8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c
+index 18d5c51de..21f617231 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c
+index 27df4a27d..0a28716bc 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c
+index c75468d42..24b21ef8a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c
+index ad72f7596..5a72994d5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c
+index 19db4e192..c02e00bdd 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c
+index b0fdf7e0b..f20ec5b83 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c
+index 1cf0ec698..03a885648 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c
+index 14ec081a4..9ee92aa85 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c
+index fa4d5fd6f..e5101a857 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c
+index 87c3e25b1..685b76e7e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c
+index 5a047a508..cbadbd3d6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c
+index 4393045c3..c78eb7fce 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c
+index ce28c4857..9e3cd7087 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c
+index 644d2ce4b..b356dd1bf 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c
+index c1eda6c6c..f39a94ab0 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c
+index 84b3c6599..51e4661d5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c
+index f9634b128..6a04e7268 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c
+index 6238685bc..5e5b35de5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c
+index 5fa080375..bfa095dc8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c
+index 40549448e..6a4704583 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c
+index 683876933..d456cbfff 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c
+index f9f88b654..7f1c40c00 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c
+index 5210e4cf9..abe92a605 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c
+index 96c6671f2..4b8932ab0 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c
+index 38f2c0afe..561d964b1 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c
+index e804a0a45..cc52343ec 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c
+index b6b34063c..2373c96ef 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c
+index 7dbf335c1..9df0af7ed 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c
+index 9eaa0e9e7..0eb03acbe 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c
+index 01aabada8..6579978b7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c
+index 8eb7d9355..7402ff6f0 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c
+index 6f34f6ffc..fd052cd81 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c
+index d0a9e9d2f..cb39dbbad 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c
+index 15e66ae38..952725afc 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c
+index 53b21f98b..22aa6ab0a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c
+index 81865fd32..6b48f8ab8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c
+index 8c8d4996b..4e13f34dd 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c
+index 58ad8bfcd..2e42c1d64 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c
+index 85d24fe44..2d420c280 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c
+index be3c8e718..f14aa47ca 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c
+index 01ff71649..e09174d08 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c
+index 32088f4ae..2a4c09c52 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c
+index 19157f682..7afa6ad94 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c
+index 80fdcda63..ad69c1e47 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c
+index 1a4b221fe..27a7fdd67 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c
+index 9fcd3ce0c..c55d20d45 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c
+index 3cd1626d4..fe17ef13f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c
+index 3a491ecab..2b8e6228b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c
+index 995a34c18..8a8062a99 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c
+index 27eef710d..11643896c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c
+index ee91af95f..0341bde95 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c
+index fa6cdff31..de7a208c3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c
+index 33b96d657..e83957070 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c
+index cdd20e881..ee335779f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c
+index d2e742e81..7d6be3664 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c
+index 66faa74d0..831247beb 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c
+index a9778809f..65188ad41 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c
+index a2edbb80a..d23406674 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c
+index 8bd3a8273..2e18db108 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c
+index 9346f9bfb..e9fc1d7d3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
+index 9346f9bfb..e9fc1d7d3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c
+index 81456bc1b..1685747c0 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c
+index 7aa76c2ba..beeee765f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c
+index a2bc2da52..5643b913f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c
+index 9346f9bfb..e9fc1d7d3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c
+index 21446e55e..49439865c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c
+index c1b8e1752..24d508f81 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c
+index 2a4f29b50..cecac6173 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c
+index a3afc9811..6cd4e0503 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c
+index b4ac50271..29a4f5ae2 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c
+index e5ee89deb..571145b84 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c
+index 2a42386ce..41b9470c1 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c
+index 5478d19c1..6c9b96460 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c
+index c8a00ca89..600168127 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
+index 03c479a08..b8ab38711 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c
+index 2a6eee0fd..5137f5de6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c
+index ed752df00..13f8c8c4f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c
+index bc98b41af..ef1784f67 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c
+index 06717802c..21f68132b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c
+index 093d5640e..0adadaa39 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c
+index 7179e715c..4a2927624 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c
+index 003e29b67..50e9a9f53 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c
+index ef3a47da5..22a7a31a9 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c
+index 76651af63..4b68aeb18 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c
+index ca1f5e94f..f44f083b7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c
+index 6864f5eb8..60278e22b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c
+index 7dd2778a5..87d069d1f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c
+index d93e4314e..9eefa782b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c
+index 2bf9ae9c3..b4bda4dab 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c
+index a51be899b..871d0241b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c
+index e08934b12..eba7c1164 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c
+index 44c20a954..96382483e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c
+index fb47385c0..542b6fd3a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c
+index 63ba92ead..cfd61ba40 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c
+index c145f7ff3..c847e2812 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c
+index b5c0fca74..c0ce0dd88 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c
+index 1d591c35c..8ac09a026 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c
+index e8696701f..dd0a09c4e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c
+index d54991051..42a695875 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c
+index 0fb6483cf..a7acf351d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c
+index 22e62a3e7..c4e1e14e0 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c
+index 71f770aff..4a2e14712 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c
+index cbc1de371..b17c7c4b3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c
+index 8fc7a0029..bfca007d7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c
+index fdb0c25f1..4648f751a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c
+index dd3c2c6f6..25482aebc 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c
+index 7848ddd41..c284254ab 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c
+index b1c16baf4..c39002ed5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c
+index 356eb2182..09313d03c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c
+index 116bebbb6..6d53719a6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c
+index 977061097..c812a1b0c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c
+index b55e388b1..2683355fe 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c
+index ada72a16a..dc187aa2c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c
+index f42523850..b13ea88a7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c
+index 3c5e775ff..68a2cac21 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c
+index c1de1e8d3..e940491a6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c
+index a3c0de6d3..8a1272685 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c
+index caa72ca61..ba535d1e0 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c
+index 57d883c04..0a1d0277c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c
+index 1687729d3..660c20da8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c
+index 8d6ed92a1..9710d128c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c
+index 18b36c873..506e983da 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c
+index 8fd6298f7..da7203af7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp b/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp
+index 2c37aa91d..d53bee52a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp
++++ b/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp
+@@ -33,9 +33,32 @@ if ![info exists DEFAULT_CFLAGS] then {
+ #Initialize `dg'.
+ dg-init
+ 
++# If the target hardware supports LSX, the default action is "run", otherwise
++# just "compile".
++global dg-do-what-default
++if {[check_effective_target_loongarch_sx_hw]} then {
++  set dg-do-what-default run
++} else {
++  set dg-do-what-default compile
++}
++
+ #Main loop.
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/lsx/*.\[cS\]]] \
+ 	" -mlsx" $DEFAULT_CFLAGS
++
++dg-finish
++
++dg-init
++# If the target hardware supports LASX, the default action is "run", otherwise
++# just "compile".
++
++global dg-do-what-default
++if {[check_effective_target_loongarch_asx_hw]} then {
++  set dg-do-what-default run
++} else {
++  set dg-do-what-default compile
++}
++
+ dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/lasx/*.\[cS\]]] \
+ 	" -mlasx" $DEFAULT_CFLAGS
+ # All done.
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c
+index e336581f3..8790470a4 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c
+index c1af80e14..77e027bdb 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c
+index 7cfb989e4..e2c4f3ad3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c
+index 4bb699eab..c7ce0a75b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c
+index 77afabe92..23f28bc34 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c
+index b7b16a325..54503e22b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c
+index a407cadfb..0b1e90959 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c
+index 4d5c60998..eefd0be2a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c
+index 0ebe8c8a9..1016afe21 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c
+index 379517f39..befbf7049 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c
+index 30dc83518..9365d242d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c
+index 1597749b5..374b8b035 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c
+index 906da69ca..ad4b5d307 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c
+index 3ae2d7694..e645b9475 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c
+index 2177ca3f6..0d7463eda 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c
+index 1b0d879e4..bc16057ff 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c
+index 4b7262537..e494870bc 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c
+index 22908b1ea..ff9907dd8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c
+index 411dcaa40..d663653a0 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c
+index 5d7d66e06..9017d1541 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c
+index ba4f4b6dc..5d6d1ef4b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c
+index 9739182cd..1f730a688 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c
+index 52ac9939f..2239b3740 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c
+index f2d6fb042..d5818879f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c
+index e05af675e..a1737c51b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c
+index 540a724a7..577fbeb4a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c
+index 34246c551..d60d8434f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c
+index 986b7d566..a8d0e0fe2 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c
+index 2c1099a04..c386ed74c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c
+index 12df2c670..aa3e54a8d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c
+index cb4be0475..36ee4b83b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c
+index f2bc7df27..7cf31e21c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c
+index f6390800d..32db7a9c7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c
+index 6ab217e97..78afaa8bb 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c
+index 99854dbd8..998596169 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c
+index 73bb530c9..31a3b5e42 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c
+index 8d4158b57..e9187db90 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c
+index 7ffbd385e..b4d65d678 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c
+index 388430278..83b013b95 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c
+index 9706d7adc..d570dcd24 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c
+index 7166f954b..a3a5f44d8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c
+index b448c2076..d38b6ab9d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c
+index 98941b47d..74ff46f89 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c
+index 409bce0ec..a40019e39 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c
+index 39c9cf7a7..934169c6e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c
+index c3da43bb4..c351daac0 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c
+index 5228dbede..8ca078c9e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c
+index a2beff53f..b57cf604c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c
+index bfa4914be..6d35a4a30 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c
+index bc573936d..07101104f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c
+index 87cb8da7c..dd418110c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c
+index 3845e8ec3..5b2e8d6a4 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c
+index 964eff79f..98a798c5f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c
+index ea47baf40..413a81cb7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c
+index 68cb5a52f..78c8f19a5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c
+index d4a86e262..4d71b07ec 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c
+index e8f4f12b9..476782ce4 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c
+index 85db95762..4a54fe133 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c
+index f8839cfcd..bb4ac9dfc 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c
+index 9150e27ca..e12e95367 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c
+index cc36bf136..de5c46167 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c
+index 624589620..3556daa72 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c
+index c5de1ac7a..fa6ee6fd2 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c
+index 6b85e87bd..22a8f6b91 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c
+index 442473fb4..bd942da1c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c
+index 876588827..a5e513c73 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c
+index c2766d5c6..ab8265bc2 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c
+index 5fcdedd3f..8a09f61fe 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c
+index 96b14aad6..0d0475a44 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c
+index bf8414b49..58470aef1 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c
+index c60ff2b46..0b1074016 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ /* { dg-timeout 500 } */
+ #include "../simd_correctness_check.h"
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
+index 12cb02303..61f28325a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ /* { dg-timeout 500 } */
+ #include "../simd_correctness_check.h"
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c
+index ac0ade8b1..30d6ed51c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c
+index a2b110f21..e74dfb0d5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c
+index 8a35dfe24..5bae5a67f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c
+index ffd80540b..4a76ee69f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c
+index 8d0d56632..5bf753662 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c
+index 5dba807f6..ffbdb0069 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c
+index 7f6d2f4d1..d13f7d0d9 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c
+index 9c5bb9131..2d6b92375 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c
+index af75f8e4e..ab3abf2a3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c
+index 37c769a2d..078d229da 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c
+index 0b51cb8cf..1999543f4 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c
+index 26b51ee14..3d9b1a817 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c
+index aa802b295..aefcdb960 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c
+index 88c66f220..4226f8683 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c
+index 2b9dcc0b5..c45d72667 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c
+index 7cd9abb7c..815ca0cdb 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c
+index 089500ea9..6ba93f73c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c
+index 3fade5157..33369303f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c
+index d3fd83da7..2f55309ce 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c
+index 839285685..0a48f655a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c
+index bab2c6cf3..091343e82 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c
+index 5875aa597..42d873b4c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c
+index 4be7fce82..9f6aa3d12 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c
+index 8a4c39502..6b06e204e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c
+index b0e22f955..c96462994 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c
+index 51a9a92e8..96db676e7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c
+index 7cff1d848..64c61f0a1 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c
+index b79af2228..27c50bdbb 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c
+index b2a7a35bd..d076ae8f2 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c
+index c90cae75e..c6e183fd4 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c
+index 772d040c3..e1e10cb60 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c
+index 6eaae2134..c0e9a1a96 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c
+index 5470d40dd..cade92d25 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c
+index 8deb04427..4ecfff10c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c
+index 64a950f81..717305270 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c
+index 8f743ec2e..cfccbb7e6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c
+index d547af0d3..1cd2e7cdc 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c
+index 47cf33cfd..b4f171d20 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c
+index ab650a024..8f630371e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c
+index 60b6e3503..78b745a38 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c
+index 8ba666275..5f3c049a1 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c
+index 8357f4e80..9a949ef18 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c
+index e4afc8247..a16b518af 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c
+index 346f0316a..5fbb48e81 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c
+index 6eea49a61..570bd1d13 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c
+index f3e4e0390..522f07950 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c
+index 9f5702e2c..62d1e3420 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c
+index 9441ba50e..e077ce7d0 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c
+index a7a3acce9..80b2da43d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c
+index a07a02ab2..fb43da265 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c
+index 537a1bb3b..7686bcb5f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c
+index 8a6e035c9..d40b093e6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c
+index bb59bc312..6eb69cbf5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c
+index 030e87fd8..17a43bbc5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c
+index 783eedae1..85ae43e63 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c
+index 66982d89f..0b0200ed6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c
+index 58591f1bb..5fd4af833 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c
+index 74269e319..e41c2f8f2 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c
+index acca2bee9..5ec0a4d2a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c
+index ef0ad676e..36c9bf336 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c
+index a5f02b1b1..2f16a3483 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c
+index 463adb48e..6634b3a9f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c
+index a81be76f1..157132c28 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c
+index c42440cea..286fe935a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c
+index 4ae4dbf8b..81b16542f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c
+index 1bc27c983..3eda1f166 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c
+index 67d189991..d08f84481 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c
+index cd8eefb47..0cf4c664b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c
+index 31e3919bf..d709dbdb7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c
+index 4362941ab..a031aaeb3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c
+index c16a291de..f33c4a8b7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
+index 646935c92..f3b800f88 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c
+index cd441b841..ee4a7e5b7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c
+index 0fb1bc18f..933cb3b0b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c
+index a26eb0a3d..febb6345a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c
+index 15c6cedc2..80b2db335 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c
+index 0e72a33dd..e78a8b07c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c
+index 685a1bb36..361d41a04 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c
+index 7b8ad7d5a..169627dd3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c
+index 7a77e80c0..6a3978317 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c
+index 796e88cad..985e32a24 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c
+index 5f46293dc..b20f92ef8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c
+index 15c96ccfe..8ce161e92 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c
+index e8d69f0e9..6f8ddd219 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c
+index 5bf3ce6e8..442abf65b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c
+index 768df528f..8dbba943a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c
+index fd7c22a82..1285aa86a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c
+index 2ca4f0b7a..efccd1822 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c
+index 4e7c7ab7e..ad6dd0908 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c
+index 92988035d..6cfec397d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c
+index 6a842d9ce..b4ff77206 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c
+index 2a353d65a..8ddc6157d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c
+index 60d474203..a0ecbc7dd 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c
+index 3aa23bdc8..6abc66b89 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c
+index f9c789855..9f59d5ea4 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c
+index 7b5e9a7bf..29e51a34d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c
+index 5a8f4f70a..28e8a3ff2 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c
+index ca462c834..94b58e65c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c
+index 211339bb8..ae9d88518 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c
+index 2c3a53416..d18448ea7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c
+index c630b4261..639361d7b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c
+index 468a17c15..11f19c249 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c
+index e45ca36f0..5ab683fd5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c
+index 7ffcecde7..526fb15dc 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c
+index a23ad7cd2..b3c0c37c3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c
+index 76fac97be..7785e9f59 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c
+index ed600c72d..a07d5c541 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c
+index 613668143..2189b8167 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c
+index ec688bb12..e1a633096 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c
+index 02f7ca08b..7035d256e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c
+index fc4cbb4e5..d7a5d7f30 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c
+index 0d5987567..028664bd8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c
+index 8afdffa50..ad0eef8d3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c
+index f5c82bc74..01907dcee 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c
+index 37e0ccf4d..35cd761ee 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c
+index f0d391a09..358775ed3 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c
+index 3b18bc13c..986ead074 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c
+index 39ebff154..9c8688432 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c
+index 62837f1ac..c762b88f8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c
+index 72fa97174..0b9ba4709 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c
+index cc823d4ba..08ceab6ee 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c
+@@ -1,4 +1,3 @@
+-/* { dg-do run } */
+ /* { dg-options "-mlsx -w -fno-strict-aliasing" } */
+ #include "../simd_correctness_check.h"
+ #include 
+-- 
+2.43.0
+
diff --git a/0053-LoongArch-Accelerate-optimization-of-scalar-signed-u.patch b/0053-LoongArch-Accelerate-optimization-of-scalar-signed-u.patch
new file mode 100644
index 0000000000000000000000000000000000000000..57c2817c736258241fe8d43fdde856b2f340aa2e
--- /dev/null
+++ b/0053-LoongArch-Accelerate-optimization-of-scalar-signed-u.patch
@@ -0,0 +1,148 @@
+From 87230032bc7fbcec1e3927b2b4a6aeba78040cc6 Mon Sep 17 00:00:00 2001
+From: Li Wei 
+Date: Tue, 28 Nov 2023 15:38:37 +0800
+Subject: [PATCH 053/188] LoongArch: Accelerate optimization of scalar
+ signed/unsigned popcount.
+
+In LoongArch, the vector popcount has corresponding instructions, while
+the scalar does not. Currently, the scalar popcount is calculated
+through a loop, and the value of a non-power of two needs to be iterated
+several times, so the vector popcount instruction is considered for
+optimization.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (v2di): Used to simplify the
+	following templates.
+	(popcount2): New.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/popcnt.c: New test.
+	* gcc.target/loongarch/popcount.c: New test.
+---
+ gcc/config/loongarch/loongarch.md             | 27 +++++++++++-
+ gcc/testsuite/gcc.target/loongarch/popcnt.c   | 41 +++++++++++++++++++
+ gcc/testsuite/gcc.target/loongarch/popcount.c | 17 ++++++++
+ 3 files changed, 83 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/popcnt.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/popcount.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 11577f407..cfd7a8ec6 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1512,7 +1512,30 @@
+    (set_attr "cnv_mode"	"D2S")
+    (set_attr "mode" "SF")])
+ 
+-
++;; In vector registers, popcount can be implemented directly through
++;; the vector instruction [X]VPCNT.  For GP registers, we can implement
++;; it through the following method.  Compared with loop implementation
++;; of popcount, the following method has better performance.
++
++;; This attribute used for get connection of scalar mode and corresponding
++;; vector mode.
++(define_mode_attr cntmap [(SI "v4si") (DI "v2di")])
++
++(define_expand "popcount2"
++  [(set (match_operand:GPR 0 "register_operand")
++	(popcount:GPR (match_operand:GPR 1 "register_operand")))]
++  "ISA_HAS_LSX"
++{
++  rtx in = operands[1];
++  rtx out = operands[0];
++  rtx vreg = mode == SImode ? gen_reg_rtx (V4SImode) :
++				    gen_reg_rtx (V2DImode);
++  emit_insn (gen_lsx_vinsgr2vr_ (vreg, in, vreg, GEN_INT (1)));
++  emit_insn (gen_popcount2 (vreg, vreg));
++  emit_insn (gen_lsx_vpickve2gr_ (out, vreg, GEN_INT (0)));
++  DONE;
++})
++
+ ;;
+ ;;  ....................
+ ;;
+@@ -3879,7 +3902,7 @@
+ 		   (any_extend:SI (match_dup 3)))])]
+   "")
+ 
+-
++
+ 
+ (define_mode_iterator QHSD [QI HI SI DI])
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/popcnt.c b/gcc/testsuite/gcc.target/loongarch/popcnt.c
+new file mode 100644
+index 000000000..a10fca420
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/popcnt.c
+@@ -0,0 +1,41 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx" } */
++/* { dg-final { scan-assembler-not {popcount} } } */
++/* { dg-final { scan-assembler-times "vpcnt.d" 2 { target { loongarch64*-*-* } } } } */
++/* { dg-final { scan-assembler-times "vpcnt.w" 4 { target { loongarch64*-*-* } } } } */
++
++int
++foo (int x)
++{
++  return __builtin_popcount (x);
++}
++
++long
++foo1 (long x)
++{
++  return __builtin_popcountl (x);
++}
++
++long long
++foo2 (long long x)
++{
++  return __builtin_popcountll (x);
++}
++
++int
++foo3 (int *p)
++{
++  return __builtin_popcount (*p);
++}
++
++unsigned
++foo4 (int x)
++{
++  return __builtin_popcount (x);
++}
++
++unsigned long
++foo5 (int x)
++{
++  return __builtin_popcount (x);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/popcount.c b/gcc/testsuite/gcc.target/loongarch/popcount.c
+new file mode 100644
+index 000000000..390ff0676
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/popcount.c
+@@ -0,0 +1,17 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx -fdump-tree-optimized" } */
++/* { dg-final { scan-tree-dump-times "__builtin_popcount|\\.POPCOUNT" 1 "optimized" } } */
++
++int
++PopCount (long b)
++{
++  int c = 0;
++
++  while (b)
++    {
++      b &= b - 1;
++      c++;
++    }
++
++  return c;
++}
+-- 
+2.43.0
+
diff --git a/0053-struct-reorg-Add-Semi-Relayout.patch b/0053-struct-reorg-Add-Semi-Relayout.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f68716441813a9db7e5b4b9b73c486b8c21d8db6
--- /dev/null
+++ b/0053-struct-reorg-Add-Semi-Relayout.patch
@@ -0,0 +1,1366 @@
+From c2a0dcc565e0f6274f26644bd389337db8f2940c Mon Sep 17 00:00:00 2001
+From: tiancheng-bao 
+Date: Sat, 30 Mar 2024 11:04:23 +0800
+Subject: [PATCH] [struct-reorg] Add Semi Relayout
+
+---
+ gcc/common.opt                                |   6 +-
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      | 975 +++++++++++++++++-
+ gcc/ipa-struct-reorg/ipa-struct-reorg.h       |   8 +
+ gcc/params.opt                                |   5 +
+ .../gcc.dg/struct/semi_relayout_rewrite.c     |  86 ++
+ gcc/testsuite/gcc.dg/struct/struct-reorg.exp  |   4 +
+ 6 files changed, 1040 insertions(+), 44 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/semi_relayout_rewrite.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 38f1e457d..9484df5ad 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -2010,9 +2010,9 @@ Common Var(flag_ipa_struct_reorg) Init(0) Optimization
+ Perform structure layout optimizations.
+ 
+ fipa-struct-reorg=
+-Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 5)
+--fipa-struct-reorg=[0,1,2,3,4,5] adding none, struct-reorg, reorder-fields,
+-dfe, safe-pointer-compression, unsafe-pointer-compression optimizations.
++Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0) IntegerRange(0, 6)
++-fipa-struct-reorg=[0,1,2,3,4,5,6] adding none, struct-reorg, reorder-fields,
++dfe, safe-pointer-compression, unsafe-pointer-compression, semi-relayout optimizations.
+ 
+ fipa-vrp
+ Common Var(flag_ipa_vrp) Optimization
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index 3922873f3..6a202b4bd 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -294,7 +294,8 @@ enum struct_layout_opt_level
+   STRUCT_REORDER_FIELDS = 1 << 2,
+   DEAD_FIELD_ELIMINATION = 1 << 3,
+   POINTER_COMPRESSION_SAFE = 1 << 4,
+-  POINTER_COMPRESSION_UNSAFE = 1 << 5
++  POINTER_COMPRESSION_UNSAFE = 1 << 5,
++  SEMI_RELAYOUT = 1 << 6
+ };
+ 
+ /* Defines the target pointer size of compressed pointer, which should be 8,
+@@ -308,6 +309,7 @@ void get_base (tree &base, tree expr);
+ 
+ static unsigned int current_layout_opt_level;
+ hash_map replace_type_map;
++hash_map semi_relayout_map;
+ 
+ /* Return true if one of these types is created by struct-reorg.  */
+ 
+@@ -426,7 +428,9 @@ srtype::srtype (tree type)
+     visited (false),
+     pc_candidate (false),
+     has_legal_alloc_num (false),
+-    has_alloc_array (0)
++    has_alloc_array (0),
++    semi_relayout (false),
++    bucket_parts (0)
+ {
+   for (int i = 0; i < max_split; i++)
+     newtype[i] = NULL_TREE;
+@@ -891,6 +895,66 @@ srfield::create_new_reorder_fields (tree newtype[max_split],
+   newfield[0] = field;
+ }
+ 
++/* Given a struct s whose fields has already reordered by size, we try to
++   combine fields less than 8 bytes together to 8 bytes.  Example:
++   struct s {
++     uint64_t a,
++     uint32_t b,
++     uint32_t c,
++     uint32_t d,
++     uint16_t e,
++     uint8_t f
++   }
++
++   We allocate memory for arrays of struct S, before semi-relayout, their
++   layout in memory is shown as below:
++   [a,b,c,d,e,f,padding;a,b,c,d,e,f,padding;...]
++
++   During semi-relayout, we put a number of structs into a same region called
++   bucket.  The number is determined by param realyout-bucket-capacity-level.
++   Using 1024 here as example.  After semi-relayout, the layout in a bucket is
++   shown as below:
++   part1 [a;a;a...]
++   part2 [b,c;b,c;b,c;...]
++   part3 [d,e,f,pad;d,e,f,pad;d,e,f,pad;...]
++
++   In the last bucket, if the amount of rest structs is less than the capacity
++   of a bucket, the rest of allcated memory will be wasted as padding.  */
++
++unsigned
++srtype::calculate_bucket_size ()
++{
++  unsigned parts = 0;
++  unsigned bit_sum = 0;
++  unsigned relayout_offset = 0;
++  /* Currently, limit each 8 bytes with less than 2 fields.  */
++  unsigned curr_part_num = 0;
++  unsigned field_num = 0;
++  for (tree f = TYPE_FIELDS (newtype[0]); f; f = DECL_CHAIN (f))
++    {
++      unsigned size = TYPE_PRECISION (TREE_TYPE (f));
++      bit_sum += size;
++      field_num++;
++      if (++curr_part_num > 2 || bit_sum > 64)
++	{
++	  bit_sum = size;
++	  parts++;
++	  relayout_offset = relayout_part_size * parts;
++	  curr_part_num = 1;
++	}
++      else
++	{
++	  relayout_offset = relayout_part_size * parts + (bit_sum - size) / 8;
++	}
++      new_field_offsets.put (f, relayout_offset);
++    }
++  /* Donnot relayout a struct with only one field after DFE.  */
++  if (field_num == 1)
++    return 0;
++  bucket_parts = ++parts;
++  return parts * relayout_part_size;
++}
++
+ /* Create the new TYPE corresponding to THIS type.  */
+ 
+ bool
+@@ -1001,6 +1065,15 @@ srtype::create_new_type (void)
+   if (pc_candidate && pc_gptr == NULL_TREE)
+     create_global_ptr_for_pc ();
+ 
++  if (semi_relayout)
++    {
++      bucket_size = calculate_bucket_size ();
++      if (bucket_size == 0)
++	return false;
++      if (semi_relayout_map.get (this->newtype[0]) == NULL)
++	semi_relayout_map.put (this->newtype[0], this->type);
++    }
++
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "Created %d types:\n", maxclusters);
+@@ -1393,7 +1466,7 @@ public:
+ 		       bool should_create = false, bool can_escape = false);
+   bool wholeaccess (tree expr, tree base, tree accesstype, srtype *t);
+ 
+-  void check_alloc_num (gimple *stmt, srtype *type);
++  void check_alloc_num (gimple *stmt, srtype *type, bool ptrptr);
+   void check_definition_assign (srdecl *decl, vec &worklist);
+   void check_definition_call (srdecl *decl, vec &worklist);
+   void check_definition (srdecl *decl, vec &);
+@@ -1442,6 +1515,33 @@ public:
+ 						  tree &);
+   basic_block create_bb_for_compress_nullptr (basic_block, tree &);
+   basic_block create_bb_for_decompress_nullptr (basic_block, tree, tree &);
++
++   // Semi-relayout methods:
++  bool is_semi_relayout_candidate (tree);
++  srtype *get_semi_relayout_candidate_type (tree);
++  void check_and_prune_struct_for_semi_relayout (void);
++  tree rewrite_pointer_diff (gimple_stmt_iterator *, tree, tree, srtype *);
++  tree rewrite_pointer_plus_integer (gimple *, gimple_stmt_iterator *, tree,
++				     tree, srtype *);
++  tree build_div_expr (gimple_stmt_iterator *, tree, tree);
++  tree get_true_pointer_base (gimple_stmt_iterator *, tree, srtype *);
++  tree get_real_allocated_ptr (tree, gimple_stmt_iterator *);
++  tree set_ptr_for_use (tree, gimple_stmt_iterator *);
++  void record_allocated_size (tree, gimple_stmt_iterator *, tree);
++  tree read_allocated_size (tree, gimple_stmt_iterator *);
++  gimple *create_aligned_alloc (gimple_stmt_iterator *, srtype *, tree,
++				tree &);
++  void create_memset_zero (tree, gimple_stmt_iterator *, tree);
++  void create_memcpy (tree, tree, tree, gimple_stmt_iterator *);
++  void create_free (tree, gimple_stmt_iterator *);
++  void copy_to_lhs (tree, tree, gimple_stmt_iterator *);
++  srtype *get_relayout_candidate_type (tree);
++  long unsigned int get_true_field_offset (srfield *, srtype *);
++  tree rewrite_address (tree, srfield *, srtype *, gimple_stmt_iterator *);
++  bool check_sr_copy (gimple *);
++  void relayout_field_copy (gimple_stmt_iterator *, gimple *, tree, tree,
++			    tree&, tree &);
++  bool do_semi_relayout (gimple_stmt_iterator *, gimple *, tree &, tree &);
+ };
+ 
+ struct ipa_struct_relayout
+@@ -4355,7 +4455,7 @@ ipa_struct_reorg::check_type_and_push (tree newdecl, srdecl *decl,
+ }
+ 
+ void
+-ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type)
++ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type, bool ptrptr)
+ {
+   if (current_layout_opt_level >= COMPLETE_STRUCT_RELAYOUT
+       && handled_allocation_stmt (stmt))
+@@ -4363,13 +4463,28 @@ ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type)
+       tree arg0 = gimple_call_arg (stmt, 0);
+       basic_block bb = gimple_bb (stmt);
+       cgraph_node *node = current_function->node;
++      if (!ptrptr && current_layout_opt_level >= SEMI_RELAYOUT
++	  && gimple_call_builtin_p (stmt, BUILT_IN_MALLOC))
++	{
++	  /* Malloc is commonly used for allocations of
++	  a single struct and semi-relayout will waste
++	  a mess of memory, so we skip it.  */
++	  type->has_alloc_array = -4;
++	  return;
++	}
+       if (integer_onep (arg0))
+ 	/* Actually NOT an array, but may ruin other array.  */
+ 	type->has_alloc_array = -1;
+       else if (bb->loop_father != NULL
+ 	       && loop_outer (bb->loop_father) != NULL)
+-	/* The allocation is in a loop.  */
+-	type->has_alloc_array = -2;
++	{
++	  /* For semi-relayout, do not escape realloc.  */
++	  if (current_layout_opt_level & SEMI_RELAYOUT
++	      && gimple_call_builtin_p (stmt, BUILT_IN_REALLOC))
++	    return;
++	  /* The allocation is in a loop.  */
++	  type->has_alloc_array = -2;
++	}
+       else if (node->callers != NULL)
+ 	type->has_alloc_array = -3;
+       else
+@@ -4448,6 +4563,13 @@ ipa_struct_reorg::check_definition_assign (srdecl *decl,
+       return;
+     }
+ 
++  if (semi_relayout_map.get (type->type) != NULL)
++    {
++      if (current_layout_opt_level != COMPLETE_STRUCT_RELAYOUT)
++	type->mark_escape (escape_unhandled_rewrite, stmt);
++      return;
++    }
++
+   /* d) if the name is from a cast/assignment, make sure it is used as
+ 	that type or void*
+ 	i) If void* then push the ssa_name into worklist.  */
+@@ -4484,7 +4606,8 @@ ipa_struct_reorg::check_definition_call (srdecl *decl, vec &worklist)
+ 	type->mark_escape (escape_return, stmt);
+     }
+ 
+-  check_alloc_num (stmt, type);
++  bool ptrptr = isptrptr (decl->orig_type);
++  check_alloc_num (stmt, type, ptrptr);
+   return;
+ }
+ 
+@@ -6038,6 +6161,55 @@ ipa_struct_reorg::pc_candidate_tree_p (tree xhs)
+   return false;
+ }
+ 
++srtype *
++ipa_struct_reorg::get_semi_relayout_candidate_type (tree xhs)
++{
++  if (xhs == NULL)
++    return NULL;
++  if (TREE_CODE (xhs) == SSA_NAME || TREE_CODE (xhs) == COMPONENT_REF)
++    {
++      srtype *access_type = find_type (inner_type (TREE_TYPE (xhs)));
++      if (access_type != NULL && access_type->semi_relayout)
++	return access_type;
++    }
++  return NULL;
++}
++
++bool
++ipa_struct_reorg::is_semi_relayout_candidate (tree xhs)
++{
++  if (xhs == NULL)
++    return false;
++
++  if (TREE_CODE (xhs) == SSA_NAME)
++    xhs = TREE_TYPE (xhs);
++
++  if (TREE_CODE (xhs) == POINTER_TYPE)
++    {
++      srtype *var_type = find_type (TREE_TYPE (xhs));
++      if (!var_type || var_type->has_escaped ())
++	return false;
++      if (var_type->semi_relayout)
++	return true;
++    }
++
++  if (TREE_CODE (xhs) == COMPONENT_REF)
++    {
++      tree mem = TREE_OPERAND (xhs, 0);
++      if (TREE_CODE (mem) == MEM_REF)
++	{
++	  tree type = TREE_TYPE (mem);
++	  srtype *old_type = get_relayout_candidate_type (type);
++	  if (!old_type)
++	    return false;
++	  if (types_compatible_p (type, old_type->type)
++	      && old_type->semi_relayout)
++	    return true;
++  	}
++    }
++  return false;
++}
++
+ /* True if xhs is a component_ref that base has escaped but uses a compression
+    candidate type.  */
+ 
+@@ -6388,7 +6560,7 @@ ipa_struct_reorg::decompress_candidate_without_check (gimple_stmt_iterator *gsi,
+ 		}
+ 	    }
+ 	  /* -> _1 = t->s
+-	        _2 = _1->s
++		_2 = _1->s
+ 	     In this case, _1 might not be nullptr, so decompress it without
+ 	     check.  */
+ 	  else if (TREE_CODE (next_rhs) == COMPONENT_REF)
+@@ -6582,6 +6754,426 @@ ipa_struct_reorg::try_rewrite_with_pointer_compression (gassign *stmt,
+     }
+ }
+ 
++tree
++ipa_struct_reorg::rewrite_pointer_diff (gimple_stmt_iterator *gsi, tree ptr1,
++					tree ptr2, srtype *type)
++{
++  tree shifts = build_int_cst (long_integer_type_node, semi_relayout_align);
++  tree pointer_type = build_pointer_type (unsigned_char_type_node);
++  // tree pointer_type = build_pointer_type (long_integer_type_node);
++  tree intptr_type = signed_type_for (pointer_type);
++
++  /* addr_high_1 = (intptr_t)ptr1 >> shifts  */
++  tree ptr1_cvt = fold_convert (intptr_type, ptr1);
++  tree addr_high_1 = gimplify_build2 (gsi, RSHIFT_EXPR, intptr_type,
++				      ptr1_cvt, shifts);
++  /* addr_high_2 = (intptr_t)ptr2 >> shifts  */
++  tree ptr2_cvt = fold_convert (intptr_type, ptr2);
++  tree addr_high_2 = gimplify_build2 (gsi, RSHIFT_EXPR, intptr_type,
++				      ptr2_cvt, shifts);
++  /* off1 = (intptr_t)ptr1 - (addr_high_1 << shifts)  */
++  tree bucket_start_1 = gimplify_build2 (gsi, LSHIFT_EXPR, intptr_type,
++					 addr_high_1, shifts);
++  tree off1 = gimplify_build2 (gsi, MINUS_EXPR, intptr_type,
++			       ptr1_cvt, bucket_start_1);
++  /* off2 = (intptr_t)ptr2 - (addr_high_2 << shifts)  */
++  tree bucket_start_2 = gimplify_build2 (gsi, LSHIFT_EXPR, intptr_type,
++					 addr_high_2, shifts);
++  tree off2 = gimplify_build2 (gsi, MINUS_EXPR, intptr_type,
++			       ptr2_cvt, bucket_start_2);
++  /* group_diff = (addr_high_1 - addr_high_2) / bucket_parts  */
++  tree bucket_sub = gimplify_build2 (gsi, MINUS_EXPR, intptr_type,
++				     addr_high_1, addr_high_2);
++  tree bucket_parts = build_int_cst (intptr_type,
++				     type->bucket_parts);
++  tree group_diff = gimplify_build2 (gsi, TRUNC_DIV_EXPR,
++				     intptr_type,
++				     bucket_sub, bucket_parts);
++  /* off_addr_diff = off1 - off2  */
++  tree off_addr_diff = gimplify_build2 (gsi, MINUS_EXPR, intptr_type,
++					off1, off2);
++  /* res = group_diff * bucket_capacity + off_diff / 8  */
++  tree capacity = build_int_cst (long_integer_type_node,
++				 relayout_part_size / 8);
++  tree unit_size = build_int_cst (long_integer_type_node, 8);
++  tree bucket_index_diff = gimplify_build2 (gsi, MULT_EXPR,
++					    intptr_type,
++					    group_diff, capacity);
++  tree off_index = gimplify_build2 (gsi, TRUNC_DIV_EXPR,
++				    long_integer_type_node,
++				    off_addr_diff, unit_size);
++  tree res = gimplify_build2 (gsi, PLUS_EXPR, intptr_type,
++			      bucket_index_diff, off_index);
++  return res;
++}
++
++basic_block
++create_bb_for_group_diff_eq_0 (basic_block last_bb, tree phi, tree new_granule)
++{
++  basic_block new_bb = create_empty_bb (last_bb);
++  if (last_bb->loop_father != NULL)
++    {
++      add_bb_to_loop (new_bb, last_bb->loop_father);
++      loops_state_set (LOOPS_NEED_FIXUP);
++    }
++  /* Emit res = new_granule;  */
++  gimple_stmt_iterator gsi = gsi_last_bb (new_bb);
++  gimple *new_stmt = gimple_build_assign (phi, new_granule);
++  gsi_insert_after (&gsi, new_stmt, GSI_NEW_STMT);
++  return new_bb;
++}
++
++basic_block
++create_bb_for_group_diff_ne_0 (basic_block new_bb, tree &phi, tree ptr,
++			       tree group_diff, tree off_times_8, srtype *type)
++{
++  tree intptr_type = signed_type_for (long_unsigned_type_node);
++  tree shifts = build_int_cst (intptr_type, semi_relayout_align);
++  gimple_stmt_iterator gsi = gsi_last_bb (new_bb);
++  gsi_insert_after (&gsi, gimple_build_nop (), GSI_NEW_STMT);
++  tree ptr_cvt = fold_convert (intptr_type, ptr);
++  /* curr_group_start = (ptr >> shifts) << shifts;  */
++  tree ptr_r_1 = gimplify_build2 (&gsi, RSHIFT_EXPR, intptr_type,
++				  ptr_cvt, shifts);
++  tree curr_group_start = gimplify_build2 (&gsi, LSHIFT_EXPR, intptr_type,
++					   ptr_r_1, shifts);
++  /* curr_off_from_group = ptr - curr_group_start;  */
++  tree curr_off_from_group = gimplify_build2 (&gsi, MINUS_EXPR,
++					      intptr_type,
++					      ptr_cvt, curr_group_start);
++  /* res = curr_group_start + ((group_diff * parts) << shifts)
++	   + ((curr_off_from_group + off_times_8) % shifts);  */
++  tree step1 = gimplify_build2 (&gsi, MULT_EXPR, long_integer_type_node,
++				group_diff, build_int_cst (
++				long_integer_type_node, type->bucket_parts));
++  tree step1_cvt = fold_convert (intptr_type, step1);
++  tree step2 = gimplify_build2 (&gsi, LSHIFT_EXPR, intptr_type,
++				step1_cvt, shifts);
++  tree off_times_8_cvt = fold_convert (intptr_type, off_times_8);
++  tree step3 = gimplify_build2 (&gsi, PLUS_EXPR, intptr_type,
++				curr_off_from_group, off_times_8_cvt);
++  tree step4 = gimplify_build2 (&gsi, TRUNC_MOD_EXPR, intptr_type,
++				step3, build_int_cst (intptr_type,
++				relayout_part_size));
++  tree step5 = gimplify_build2 (&gsi, PLUS_EXPR, intptr_type,
++				step2, step4);
++  tree res_phi1 = gimplify_build2 (&gsi, PLUS_EXPR, long_integer_type_node,
++				   curr_group_start, step5);
++  /* if (group_diff < 0)  */
++  gcond *cond = gimple_build_cond (LT_EXPR, group_diff,
++				   build_int_cst (long_integer_type_node, 0),
++				   NULL_TREE, NULL_TREE);
++  gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
++  /* remove nop  */
++  gsi_remove (&gsi, true);
++  /* res += shifts  */
++  basic_block true_bb = create_empty_bb (new_bb);
++  if (new_bb->loop_father != NULL)
++    {
++      add_bb_to_loop (true_bb, new_bb->loop_father);
++      loops_state_set (LOOPS_NEED_FIXUP);
++    }
++  gimple_stmt_iterator true_gsi = gsi_last_bb (true_bb);
++  tree res_phi2 = make_ssa_name (long_integer_type_node);
++  gimple *new_stmt
++		= gimple_build_assign (res_phi2, PLUS_EXPR, res_phi1,
++				       build_int_cst (long_integer_type_node,
++				       relayout_part_size));
++  gsi_insert_after (&true_gsi, new_stmt, GSI_NEW_STMT);
++  /* create phi bb  */
++  basic_block res_bb = create_empty_bb (true_bb);
++  if (new_bb->loop_father != NULL)
++    {
++      add_bb_to_loop (res_bb, new_bb->loop_father);
++      loops_state_set (LOOPS_NEED_FIXUP);
++    }
++  /* rebuild cfg  */
++  edge etrue = make_edge (new_bb, true_bb, EDGE_TRUE_VALUE);
++  etrue->probability = profile_probability::unlikely ();
++  true_bb->count = etrue->count ();
++
++  edge efalse = make_edge (new_bb, res_bb, EDGE_FALSE_VALUE);
++  efalse->probability = profile_probability::likely ();
++  res_bb->count = efalse->count ();
++
++  edge efall = make_single_succ_edge (true_bb, res_bb, EDGE_FALLTHRU);
++
++  phi = make_ssa_name (long_integer_type_node);
++  gphi *phi_node = create_phi_node (phi, res_bb);
++  add_phi_arg (phi_node, res_phi2, efall, UNKNOWN_LOCATION);
++  add_phi_arg (phi_node, res_phi1, efalse, UNKNOWN_LOCATION);
++
++  if (dom_info_available_p (CDI_DOMINATORS))
++    {
++      set_immediate_dominator (CDI_DOMINATORS, true_bb, new_bb);
++      set_immediate_dominator (CDI_DOMINATORS, res_bb, new_bb);
++    }
++  return res_bb;
++}
++
++tree
++ipa_struct_reorg::rewrite_pointer_plus_integer (gimple *stmt,
++						gimple_stmt_iterator *gsi,
++						tree ptr, tree offset,
++						srtype *type)
++{
++  gcc_assert (type->semi_relayout);
++  tree off = fold_convert (long_integer_type_node, offset);
++  tree num_8 = build_int_cst (long_integer_type_node, 8);
++  tree shifts = build_int_cst (long_integer_type_node, semi_relayout_align);
++  // tree shifts = build_int_cst (integer_type_node, semi_relayout_align);
++  /* off_times_8 = off * 8;  */
++  tree off_times_8 = gimplify_build2 (gsi, MULT_EXPR, long_integer_type_node,
++				      off, num_8);
++  /* new_granule = ptr + off * 8;  */
++  tree ptr_int = fold_convert (long_integer_type_node, ptr);
++  tree new_granule = gimplify_build2 (gsi, PLUS_EXPR, long_integer_type_node,
++				      ptr_int, off_times_8);
++  /* group_diff = (new_granule >> shifts) - (ptr >> shifts);  */
++  tree group_diff_rhs_1 = gimplify_build2 (gsi, RSHIFT_EXPR,
++					   long_integer_type_node,
++					   new_granule, shifts);
++  tree group_diff_rhs_2 = gimplify_build2 (gsi, RSHIFT_EXPR,
++					   long_integer_type_node,
++					   ptr_int, shifts);
++  tree group_diff = gimplify_build2 (gsi, MINUS_EXPR, long_integer_type_node,
++				     group_diff_rhs_1, group_diff_rhs_2);
++  /* if (group_diff == 0)  */
++  gcond *cond = gimple_build_cond (EQ_EXPR, group_diff,
++				   build_int_cst (long_integer_type_node, 0),
++				   NULL_TREE, NULL_TREE);
++  gimple_set_location (cond, UNKNOWN_LOCATION);
++  gsi_insert_before (gsi, cond, GSI_SAME_STMT);
++
++  edge e = split_block (cond->bb, cond);
++  basic_block split_src_bb = e->src;
++  basic_block split_dst_bb = e->dest;
++  remove_edge_raw (e);
++  /* if (group_diff == 0)
++       res = new_granule;  */
++  tree res_phi_1 = make_ssa_name (long_integer_type_node);
++  basic_block true_bb = create_bb_for_group_diff_eq_0 (split_src_bb, res_phi_1,
++						       new_granule);
++  /* else  */
++  tree res_phi_2 = NULL_TREE;
++  basic_block false_bb = create_empty_bb (split_src_bb);
++  if (split_src_bb->loop_father != NULL)
++    {
++      add_bb_to_loop (false_bb, split_src_bb->loop_father);
++      loops_state_set (LOOPS_NEED_FIXUP);
++    }
++
++  edge etrue = make_edge (split_src_bb, true_bb, EDGE_TRUE_VALUE);
++  etrue->probability = profile_probability::very_likely ();
++  true_bb->count = etrue->count ();
++
++  edge efalse = make_edge (split_src_bb, false_bb, EDGE_FALSE_VALUE);
++  efalse->probability = profile_probability::unlikely ();
++  false_bb->count = efalse->count ();
++  basic_block res_bb = create_bb_for_group_diff_ne_0 (false_bb, res_phi_2,
++						      ptr_int, group_diff,
++						      off_times_8, type);
++  /* rebuild cfg  */
++  edge e_true_fall = make_single_succ_edge (true_bb, split_dst_bb,
++					    EDGE_FALLTHRU);
++  edge e_false_fall = make_single_succ_edge (res_bb, split_dst_bb,
++					     EDGE_FALLTHRU);
++  tree res_int = make_ssa_name (long_integer_type_node);
++  gphi *phi_node = create_phi_node (res_int, split_dst_bb);
++  add_phi_arg (phi_node, res_phi_1, e_true_fall, UNKNOWN_LOCATION);
++  add_phi_arg (phi_node, res_phi_2, e_false_fall, UNKNOWN_LOCATION);
++  if (dom_info_available_p (CDI_DOMINATORS))
++    {
++      set_immediate_dominator (CDI_DOMINATORS, split_dst_bb, split_src_bb);
++      set_immediate_dominator (CDI_DOMINATORS, true_bb, split_src_bb);
++      set_immediate_dominator (CDI_DOMINATORS, false_bb, split_src_bb);
++    }
++  *gsi = gsi_start_bb (split_dst_bb);
++  tree pointer_type = build_pointer_type (unsigned_char_type_node);
++  tree res = gimplify_build1 (gsi, NOP_EXPR, pointer_type, res_int);
++  return res;
++}
++
++tree
++ipa_struct_reorg::build_div_expr (gimple_stmt_iterator *gsi,
++				  tree expr, tree orig_size)
++{
++  tree div_expr = build2 (TRUNC_DIV_EXPR, long_unsigned_type_node,
++			  expr, orig_size);
++  tree num = make_ssa_name (long_unsigned_type_node);
++  gimple *g = gimple_build_assign (num, div_expr);
++  gsi_insert_before (gsi, g, GSI_SAME_STMT);
++  return num;
++}
++
++srtype *
++ipa_struct_reorg::get_relayout_candidate_type (tree type)
++{
++  if (type == NULL)
++    return NULL;
++  if (TREE_CODE (type) != RECORD_TYPE)
++    return NULL;
++  return find_type (inner_type (type));
++}
++
++long unsigned int
++ipa_struct_reorg::get_true_field_offset (srfield *field, srtype *type)
++{
++  unsigned HOST_WIDE_INT new_offset;
++  new_offset = *(type->new_field_offsets.get (field->newfield[0]));
++  return new_offset;
++}
++
++tree
++ipa_struct_reorg::get_true_pointer_base (gimple_stmt_iterator *gsi,
++					 tree mem_ref, srtype *type)
++{
++  tree ptr = TREE_OPERAND (mem_ref, 0);
++  tree off_bytes = TREE_OPERAND (mem_ref, 1);
++  unsigned num = tree_to_shwi (off_bytes);
++  if (num == 0)
++    return ptr;
++  tree orig_size = TYPE_SIZE_UNIT (TREE_TYPE (mem_ref));
++  tree off = build_int_cst (long_integer_type_node,
++			    num / tree_to_uhwi (orig_size));
++  gimple *stmt = gsi_stmt (*gsi);
++  tree new_pointer_base = rewrite_pointer_plus_integer (stmt, gsi, ptr,
++							off, type);
++  return new_pointer_base;
++}
++
++tree
++ipa_struct_reorg::rewrite_address (tree pointer_base, srfield *field,
++				   srtype *type, gimple_stmt_iterator *gsi)
++{
++  unsigned HOST_WIDE_INT field_offset = get_true_field_offset (field, type);
++
++  tree pointer_ssa = fold_convert (long_unsigned_type_node, pointer_base);
++  tree step1 = gimplify_build1 (gsi, NOP_EXPR, long_unsigned_type_node,
++				pointer_ssa);
++  tree new_offset_ssa = build_int_cst (long_unsigned_type_node, field_offset);
++  tree step2 = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node, step1,
++				new_offset_ssa);
++  tree field_ssa = fold_convert (
++		   build_pointer_type (TREE_TYPE (field->newfield[0])), step2);
++  tree step3 = gimplify_build1 (gsi, NOP_EXPR,
++				TREE_TYPE (field_ssa), field_ssa);
++
++  tree new_mem_ref = fold_build2 (MEM_REF, TREE_TYPE (field->newfield[0]),
++				  step3, build_int_cst (
++				  TREE_TYPE (field_ssa), 0));
++  return new_mem_ref;
++}
++
++bool
++ipa_struct_reorg::check_sr_copy (gimple *stmt)
++{
++  tree lhs = gimple_assign_lhs (stmt);
++  tree rhs = gimple_assign_rhs1 (stmt);
++
++  if (TREE_CODE (lhs) != MEM_REF || TREE_CODE (rhs) != MEM_REF)
++    return false;
++  srtype *t1 = get_relayout_candidate_type (TREE_TYPE (lhs));
++  srtype *t2 = get_relayout_candidate_type (TREE_TYPE (rhs));
++  if (!t1 || !t2 || !t1->semi_relayout || !t2->semi_relayout || t1 != t2)
++    return false;
++  tree pointer1 = TREE_OPERAND (lhs, 0);
++  tree pointer2 = TREE_OPERAND (rhs, 0);
++  if (TREE_CODE (TREE_TYPE (pointer1)) != POINTER_TYPE
++      || TREE_CODE (TREE_TYPE (pointer2)) != POINTER_TYPE)
++    return false;
++
++  tree type1 = TREE_TYPE (TREE_TYPE (pointer1));
++  tree type2 = TREE_TYPE (TREE_TYPE (pointer2));
++
++  srtype *t3 = get_relayout_candidate_type (type1);
++  srtype *t4 = get_relayout_candidate_type (type2);
++
++  if (t3 != t4 || t3 != t1)
++    return false;
++
++  return true;
++}
++
++void
++ipa_struct_reorg::relayout_field_copy (gimple_stmt_iterator *gsi,
++				       gimple *stmt ATTRIBUTE_UNUSED,
++				       tree lhs, tree rhs ATTRIBUTE_UNUSED,
++				       tree &newlhs, tree &newrhs)
++{
++  srtype *type = get_relayout_candidate_type (TREE_TYPE (lhs));
++  tree lhs_base_pointer = get_true_pointer_base (gsi, newlhs, type);
++  tree rhs_base_pointer = get_true_pointer_base (gsi, newrhs, type);
++  tree new_l_mem_ref = NULL_TREE;
++  tree new_r_mem_ref = NULL_TREE;
++  srfield *field = NULL;
++  unsigned i = 0;
++  FOR_EACH_VEC_ELT (type->fields, i, field)
++    {
++      if (!field->newfield[0])
++	continue;
++      new_l_mem_ref = rewrite_address (lhs_base_pointer, field, type, gsi);
++      new_r_mem_ref = rewrite_address (rhs_base_pointer, field, type, gsi);
++      if (!is_gimple_reg (new_l_mem_ref))
++	{
++	  tree tmp_reg = create_tmp_reg (TREE_TYPE(new_l_mem_ref));
++	  gimple *copy_stmt = gimple_build_assign (tmp_reg, new_r_mem_ref);
++	  gsi_insert_before (gsi, copy_stmt, GSI_SAME_STMT);
++	  new_r_mem_ref = tmp_reg;
++	}
++      gimple *new_stmt = gimple_build_assign (new_l_mem_ref, new_r_mem_ref);
++      gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
++    }
++  gcc_assert (new_l_mem_ref != NULL_TREE && new_r_mem_ref != NULL_TREE);
++  newlhs = new_l_mem_ref;
++  newrhs = new_r_mem_ref;
++}
++
++bool
++ipa_struct_reorg::do_semi_relayout (gimple_stmt_iterator *gsi, gimple *stmt,
++				    tree &newlhs, tree &newrhs)
++{
++  tree lhs = gimple_assign_lhs (stmt);
++  tree rhs = gimple_assign_rhs1 (stmt);
++
++  bool l = TREE_CODE (lhs) == COMPONENT_REF ? is_semi_relayout_candidate (lhs)
++					    : false;
++  bool r = TREE_CODE (rhs) == COMPONENT_REF ? is_semi_relayout_candidate (rhs)
++					    : false;
++
++  gcc_assert (!(l && r));
++
++  if (!l && !r)
++    {
++      if (check_sr_copy (stmt))
++	{
++	  relayout_field_copy (gsi, stmt, lhs, rhs, newlhs, newrhs);
++	  return true;
++	}
++    }
++  else if (l)
++    {
++      srtype *type = get_relayout_candidate_type (
++				TREE_TYPE (TREE_OPERAND (lhs, 0)));
++      srfield *new_field = type->find_field (
++				int_byte_position (TREE_OPERAND (lhs, 1)));
++      tree pointer_base = get_true_pointer_base (
++				gsi, TREE_OPERAND (newlhs, 0), type);
++      newlhs = rewrite_address (pointer_base, new_field, type, gsi);
++    }
++  else if (r)
++    {
++      srtype *type = get_relayout_candidate_type (
++				TREE_TYPE (TREE_OPERAND (rhs, 0)));
++      srfield *new_field = type->find_field (
++				int_byte_position (TREE_OPERAND (rhs, 1)));
++      tree pointer_base = get_true_pointer_base (
++				gsi, TREE_OPERAND (newrhs, 0), type);
++      newrhs = rewrite_address (pointer_base, new_field, type, gsi);
++    }
++  return false;
++}
++
+ bool
+ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ {
+@@ -6677,7 +7269,8 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+       tree size = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (lhs)));
+       tree num;
+       /* Check if rhs2 is a multiplication of the size of the type.  */
+-      if (!is_result_of_mult (rhs2, &num, size))
++      if (!is_result_of_mult (rhs2, &num, size)
++	  && !(current_layout_opt_level & SEMI_RELAYOUT))
+ 	internal_error (
+ 	  "The rhs of pointer is not a multiplicate and it slips through");
+ 
+@@ -6698,12 +7291,39 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 	      tree newsize = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (newlhs[i])));
+ 	      newsize = gimplify_build2 (gsi, MULT_EXPR, sizetype, num,
+ 					 newsize);
++	      if (current_layout_opt_level >= SEMI_RELAYOUT)
++		{
++		  if (is_semi_relayout_candidate (lhs))
++		    {
++		      srtype *type = get_semi_relayout_candidate_type (lhs);
++		      newrhs[i] = rewrite_pointer_plus_integer (stmt, gsi,
++								newrhs[i],
++								num, type);
++		      newsize = build_int_cst (long_unsigned_type_node, 0);
++		    }
++		}
+ 	      new_stmt = gimple_build_assign (newlhs[i], POINTER_PLUS_EXPR,
+ 					      newrhs[i], newsize);
+ 	    }
+ 	  else
+-	    new_stmt = gimple_build_assign (newlhs[i], POINTER_PLUS_EXPR,
+-					    newrhs[i], rhs2);
++	    {
++	      /* rhs2 is not a const integer  */
++	      if (current_layout_opt_level >= SEMI_RELAYOUT)
++		{
++		  if (is_semi_relayout_candidate (lhs))
++		    {
++		      num = build_div_expr (gsi, rhs2,
++					    build_int_cst (
++					    long_unsigned_type_node, 1));
++		      srtype *type = get_semi_relayout_candidate_type (lhs);
++		      newrhs[i] = rewrite_pointer_plus_integer (stmt,
++								gsi, newrhs[i], num, type);
++		      rhs2 = build_int_cst (long_unsigned_type_node, 0);
++		    }
++		}
++	      new_stmt = gimple_build_assign (newlhs[i], POINTER_PLUS_EXPR,
++					      newrhs[i], rhs2);
++	    }
+ 	  gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
+ 	  remove = true;
+ 	}
+@@ -6744,13 +7364,34 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 	return false;
+ 
+       /* The two operands always have pointer/reference type.  */
+-      for (unsigned i = 0; i < max_split && newrhs1[i] && newrhs2[i]; i++)
++      if (current_layout_opt_level >= SEMI_RELAYOUT
++	  && (is_semi_relayout_candidate (rhs1) || is_semi_relayout_candidate (rhs2)))
+ 	{
+-	  gimple_assign_set_rhs1 (stmt, newrhs1[i]);
+-	  gimple_assign_set_rhs2 (stmt, newrhs2[i]);
+-	  update_stmt (stmt);
++	  for (unsigned i = 0; i < max_split && newrhs1[i] &&newrhs2[i]; i++)
++	    {
++	      srtype *type = get_semi_relayout_candidate_type (rhs1);
++	      if (!type)
++		{
++		  type = get_semi_relayout_candidate_type (rhs2);
++		}
++	      gcc_assert (type != NULL);
++	      tree res = rewrite_pointer_diff (gsi, newrhs1[i],
++					       newrhs2[i], type);
++	      gimple *g = gimple_build_assign (gimple_assign_lhs (stmt),
++					       res);
++	      gsi_insert_before (gsi, g, GSI_SAME_STMT);
++	    }
++	  remove = true;
++	} 
++      else 
++	{
++	  for (unsigned i = 0; i < max_split && newrhs1[i] && newrhs2[i]; i++)
++	    {
++	      gimple_assign_set_rhs1 (stmt, newrhs1[i]);
++	      gimple_assign_set_rhs2 (stmt, newrhs2[i]);
++	      update_stmt (stmt);
++	    }
+ 	}
+-      remove = false;
+       return remove;
+     }
+ 
+@@ -6777,18 +7418,24 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 	fprintf (dump_file, "\nreplaced with:\n");
+       for (unsigned i = 0; i < max_split && (newlhs[i] || newrhs[i]); i++)
+ 	{
++	  bool fields_copied = false;
++	  if (current_layout_opt_level & SEMI_RELAYOUT)
++	    fields_copied = do_semi_relayout (gsi, stmt, newlhs[i], newrhs[i]);
+ 	  if (current_layout_opt_level >= POINTER_COMPRESSION_SAFE)
+ 	    try_rewrite_with_pointer_compression (stmt, gsi, lhs, rhs,
+ 						  newlhs[i], newrhs[i]);
+-	  gimple *newstmt = gimple_build_assign (newlhs[i] ? newlhs[i] : lhs,
+-						 newrhs[i] ? newrhs[i] : rhs);
++	  remove = true;
++	  if (fields_copied)
++	    continue;
++	  tree lhs_expr = newlhs[i] ? newlhs[i] : lhs;
++	  tree rhs_expr = newrhs[i] ? newrhs[i] : rhs;
++	  gimple *newstmt = gimple_build_assign (lhs_expr, rhs_expr);
+ 	  if (dump_file && (dump_flags & TDF_DETAILS))
+ 	    {
+ 	      print_gimple_stmt (dump_file, newstmt, 0);
+ 	      fprintf (dump_file, "\n");
+ 	    }
+ 	  gsi_insert_before (gsi, newstmt, GSI_SAME_STMT);
+-	  remove = true;
+ 	}
+       return remove;
+     }
+@@ -6796,6 +7443,110 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+   return remove;
+ }
+ 
++tree
++ipa_struct_reorg::get_real_allocated_ptr (tree ptr, gimple_stmt_iterator *gsi)
++{
++  tree ptr_to_int = fold_convert (long_unsigned_type_node, ptr);
++  tree align = build_int_cst (long_unsigned_type_node, relayout_part_size);
++  tree real_addr = gimplify_build2 (gsi, MINUS_EXPR, long_unsigned_type_node,
++				    ptr_to_int, align);
++  tree res = gimplify_build1 (gsi, NOP_EXPR,
++			      build_pointer_type (long_unsigned_type_node),
++			      real_addr);
++  return res;
++}
++
++tree
++ipa_struct_reorg::set_ptr_for_use (tree ptr, gimple_stmt_iterator *gsi)
++{
++  tree ptr_to_int = fold_convert (long_unsigned_type_node, ptr);
++  tree align = build_int_cst (long_unsigned_type_node, relayout_part_size);
++  tree ptr_int = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node,
++				  ptr_to_int, align);
++  tree res = gimplify_build1 (gsi, NOP_EXPR,
++			      build_pointer_type (long_unsigned_type_node),
++			      ptr_int);
++  return res;
++}
++
++void
++ipa_struct_reorg::record_allocated_size (tree ptr, gimple_stmt_iterator *gsi,
++					 tree size)
++{
++  tree lhs = fold_build2 (MEM_REF, long_unsigned_type_node, ptr,
++			  build_int_cst (build_pointer_type (
++			  long_unsigned_type_node), 0));
++  gimple *stmt = gimple_build_assign (lhs, size);
++  gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
++}
++
++tree
++ipa_struct_reorg::read_allocated_size (tree ptr, gimple_stmt_iterator *gsi)
++{
++  tree to_type = build_pointer_type (long_unsigned_type_node);
++  tree off = build_int_cst (to_type, 0);
++  tree size = gimplify_build2 (gsi, MEM_REF, long_unsigned_type_node,
++			       ptr, off);
++  return size;
++}
++
++gimple *
++ipa_struct_reorg::create_aligned_alloc (gimple_stmt_iterator *gsi,
++					srtype *type, tree num, tree &size)
++{
++  tree fn = builtin_decl_implicit (BUILT_IN_ALIGNED_ALLOC);
++
++  tree align = build_int_cst (long_unsigned_type_node, relayout_part_size);
++  unsigned bucket_size = type->bucket_size;
++
++  tree nbuckets = gimplify_build2 (gsi, CEIL_DIV_EXPR, long_unsigned_type_node,
++				   num, build_int_cst (long_unsigned_type_node,
++				   relayout_part_size / 8));
++  tree use_size = gimplify_build2 (gsi, MULT_EXPR, long_unsigned_type_node,
++				   nbuckets, build_int_cst (
++				   long_unsigned_type_node, bucket_size));
++  size = gimplify_build2 (gsi, PLUS_EXPR, long_unsigned_type_node,
++			  use_size, align);
++  gimple *g = gimple_build_call (fn, 2, align, size);
++  gsi_insert_before (gsi, g, GSI_SAME_STMT);
++  return g;
++}
++
++void
++ipa_struct_reorg::create_memset_zero (tree ptr, gimple_stmt_iterator *gsi,
++				      tree size)
++{
++  tree fn = builtin_decl_implicit (BUILT_IN_MEMSET);
++  tree val = build_int_cst (long_unsigned_type_node, 0);
++  gimple *g = gimple_build_call (fn, 3, ptr, val, size);
++  gsi_insert_before (gsi, g, GSI_SAME_STMT);
++}
++
++void
++ipa_struct_reorg::create_memcpy (tree src, tree dst, tree size,
++				 gimple_stmt_iterator *gsi)
++{
++  tree fn = builtin_decl_implicit (BUILT_IN_MEMCPY);
++  gimple *g = gimple_build_call (fn, 3, dst, src, size);
++  gsi_insert_before (gsi, g, GSI_SAME_STMT);
++}
++
++void
++ipa_struct_reorg::create_free (tree ptr, gimple_stmt_iterator *gsi)
++{
++  tree fn = builtin_decl_implicit (BUILT_IN_FREE);
++  gimple *g = gimple_build_call (fn, 1, ptr);
++  gsi_insert_before (gsi, g, GSI_SAME_STMT);
++}
++
++void
++ipa_struct_reorg::copy_to_lhs (tree lhs, tree new_lhs,
++			       gimple_stmt_iterator *gsi)
++{
++  gimple *g = gimple_build_assign (lhs, new_lhs);
++  gsi_insert_before (gsi, g, GSI_SAME_STMT);
++}
++
+ /* Rewrite function call statement STMT.  Return TRUE if the statement
+    is to be removed.  */
+ 
+@@ -6837,25 +7588,74 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi)
+ 			 ? TYPE_SIZE_UNIT (decl->orig_type)
+ 			 : TYPE_SIZE_UNIT (type->newtype[i]);
+ 	  gimple *g;
+-	  /* Every allocation except for calloc needs
+-	     the size multiplied out.  */
+-	  if (!gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))
+-	    newsize = gimplify_build2 (gsi, MULT_EXPR, sizetype, num, newsize);
+-
+-	  if (gimple_call_builtin_p (stmt, BUILT_IN_MALLOC)
+-	      || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA))
+-	    g = gimple_build_call (gimple_call_fndecl (stmt),
+-				   1, newsize);
+-	  else if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))
+-	    g = gimple_build_call (gimple_call_fndecl (stmt),
+-				   2, num, newsize);
+-	  else if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC))
+-	    g = gimple_build_call (gimple_call_fndecl (stmt),
+-				   2, newrhs1[i], newsize);
+-	  else
+-	    gcc_assert (false);
+-	  gimple_call_set_lhs (g, decl->newdecl[i]);
+-	  gsi_insert_before (gsi, g, GSI_SAME_STMT);
++	  bool rewrite = false;
++	  if (current_layout_opt_level >= SEMI_RELAYOUT
++	      && type->semi_relayout)
++	    {
++	      if (gimple_call_builtin_p (stmt, BUILT_IN_MALLOC))
++		;
++	      else if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))
++		{
++		  tree rhs2 = gimple_call_arg (stmt, 1);
++		  if (tree_to_uhwi (rhs2) == tree_to_uhwi (TYPE_SIZE_UNIT (type->type)))
++		    {
++		      rewrite = true;
++		      tree size = NULL_TREE;
++		      g = create_aligned_alloc (gsi, type, num, size);
++		      tree real_ptr = make_ssa_name (build_pointer_type (unsigned_char_type_node));
++		      gimple_set_lhs (g, real_ptr);
++		      create_memset_zero (real_ptr, gsi, size);
++		      record_allocated_size (real_ptr, gsi, size);
++		      tree lhs_use = set_ptr_for_use (real_ptr, gsi);
++		      copy_to_lhs (decl->newdecl[i], lhs_use, gsi);
++		    }
++		}
++	      else if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC))
++		{
++		  rewrite = true;
++		  tree size = NULL_TREE;
++		  g = create_aligned_alloc (gsi, type, num, size);
++		  tree real_ptr = make_ssa_name (build_pointer_type (unsigned_char_type_node));
++		  gimple_set_lhs (g, real_ptr);
++		  create_memset_zero (real_ptr, gsi, size);
++		  tree src = get_real_allocated_ptr (newrhs1[i], gsi);
++		  tree old_size = read_allocated_size (src, gsi);
++		  create_memcpy (src, real_ptr, old_size, gsi);
++		  record_allocated_size (real_ptr, gsi, size);
++		  tree lhs_use = set_ptr_for_use (real_ptr, gsi);
++		  create_free (src, gsi);
++		  copy_to_lhs (decl->newdecl[i], lhs_use, gsi);
++		}
++	      else
++		{
++		  gcc_assert (false);
++		  internal_error ("supported type for semi-relayout.");
++		}
++	    }
++	  if (!rewrite
++	      && (current_layout_opt_level >= STRUCT_REORDER_FIELDS
++		  || current_layout_opt_level == STRUCT_SPLIT))
++	    {
++	      /* Every allocation except for calloc needs the size multiplied out.  */
++	      if (!gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))
++		newsize = gimplify_build2 (gsi, MULT_EXPR, sizetype,
++					   num, newsize);
++	      if (gimple_call_builtin_p (stmt, BUILT_IN_MALLOC)
++		  || gimple_call_builtin_p (stmt, BUILT_IN_ALLOCA))
++		g = gimple_build_call (gimple_call_fndecl (stmt), 1, newsize);
++	      else if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))
++		g = gimple_build_call (gimple_call_fndecl (stmt), 2,
++				       num, newsize);
++	      else if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC))
++		g = gimple_build_call (gimple_call_fndecl (stmt), 2,
++				       newrhs1[i], newsize);
++	      else
++		gcc_assert (false);
++	      gimple_call_set_lhs (g, decl->newdecl[i]);
++	      gsi_insert_before (gsi, g, GSI_SAME_STMT);
++	    }
++
++
+ 	  if (type->pc_candidate)
+ 	    {
+ 	      /* Init global header for pointer compression.  */
+@@ -6875,11 +7675,14 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi)
+       if (!rewrite_expr (expr, newexpr))
+ 	return false;
+ 
++      srtype *t = find_type (TREE_TYPE (TREE_TYPE (expr)));
+       if (newexpr[1] == NULL)
+ 	{
+-	  gimple_call_set_arg (stmt, 0, newexpr[0]);
+-	  update_stmt (stmt);
+-	  return false;
++	  if (t && t->semi_relayout)
++	    newexpr[0] = get_real_allocated_ptr (newexpr[0], gsi);
++	    gimple_call_set_arg (stmt, 0, newexpr[0]);
++	    update_stmt (stmt);
++	    return false;
+ 	}
+ 
+       for (unsigned i = 0; i < max_split && newexpr[i]; i++)
+@@ -7571,6 +8374,86 @@ ipa_struct_reorg::check_and_prune_struct_for_pointer_compression (void)
+     }
+ }
+ 
++void
++ipa_struct_reorg::check_and_prune_struct_for_semi_relayout (void)
++{
++  unsigned relayout_transform = 0;
++  for (unsigned i = 0; i < types.length (); i++)
++    {
++      srtype *type = types[i];
++      if (dump_file)
++	{
++	  print_generic_expr (dump_file, type->type);
++	}
++      if (type->has_escaped ())
++	{
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, " has escaped by %s, "
++				  "skip relayout.\n", type->escape_reason());
++	    }
++	  continue;
++	}
++      if (TYPE_FIELDS (type->type) == NULL)
++	{
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, " has zero field, skip relayout.\n");
++	    }
++	  continue;
++	}
++      if (type->chain_type)
++	{
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, " is chain_type, skip relayout.\n");
++	    }
++	    continue;
++	}
++      if (type->has_alloc_array == 0 || type->has_alloc_array == 1
++	  || type->has_alloc_array == -1 || type->has_alloc_array == -3
++	  || type->has_alloc_array == -4)
++	{
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, " has alloc number: %d,"
++				  " skip relayout.\n", type->has_alloc_array);
++	    }
++	  continue;
++	}
++      if (get_type_name (type->type) == NULL)
++	{
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, " has empty struct name,"
++				  " skip relayout.\n");
++	    }
++	  continue;
++	}
++      relayout_transform++;
++      type->semi_relayout = true;
++      if (dump_file)
++	{
++	  fprintf (dump_file, " attempts to do semi-relayout.\n");
++	}
++    }
++
++  if (dump_file)
++    {
++      if (relayout_transform)
++	{
++	  fprintf (dump_file, "\nNumber of structures to transform in "
++			      "semi-relayout is %d\n", relayout_transform);
++	}
++      else
++	{
++	  fprintf (dump_file, "\nNo structures to transform in "
++			      "semi-relayout.\n");
++	}
++    }
++}
++
++
+ /* Init pointer size from parameter param_pointer_compression_size.  */
+ 
+ static void
+@@ -7612,6 +8495,8 @@ ipa_struct_reorg::execute (unsigned int opt)
+ 
+       if (opt >= POINTER_COMPRESSION_SAFE)
+ 	check_and_prune_struct_for_pointer_compression ();
++      if (opt >= SEMI_RELAYOUT)
++	check_and_prune_struct_for_semi_relayout ();
+       ret = rewrite_functions ();
+     }
+   else
+@@ -7659,6 +8544,8 @@ public:
+     unsigned int level = 0;
+     switch (struct_layout_optimize_level)
+       {
++	case 6: level |= SEMI_RELAYOUT;
++	// FALLTHRU
+ 	case 5: level |= POINTER_COMPRESSION_UNSAFE;
+ 	// FALLTHRU
+ 	case 4: level |= POINTER_COMPRESSION_SAFE;
+@@ -7678,6 +8565,12 @@ public:
+     if (level & POINTER_COMPRESSION_SAFE)
+       init_pointer_size_for_pointer_compression ();
+ 
++    if (level & SEMI_RELAYOUT)
++      {
++	semi_relayout_align = semi_relayout_level;
++	relayout_part_size = 1 << semi_relayout_level;
++      }
++
+     /* Preserved for backward compatibility, reorder fields needs run before
+        struct split and complete struct relayout.  */
+     if (flag_ipa_reorder_fields && level < STRUCT_REORDER_FIELDS)
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.h b/gcc/ipa-struct-reorg/ipa-struct-reorg.h
+index 6c4469597..e3e6d7afb 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.h
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.h
+@@ -25,6 +25,9 @@ namespace struct_reorg {
+ 
+ const int max_split = 2;
+ 
++unsigned semi_relayout_align = semi_relayout_level;
++unsigned relayout_part_size = 1 << semi_relayout_level;
++
+ template 
+ struct auto_vec_del : auto_vec
+ {
+@@ -128,6 +131,10 @@ public:
+   /* Negative number means it has illegal allocated arrays
+      that we do not optimize.  */
+   int has_alloc_array;
++  bool semi_relayout;
++  hash_map new_field_offsets;
++  unsigned bucket_parts;
++  unsigned bucket_size;
+ 
+   // Constructors
+   srtype (tree type);
+@@ -149,6 +156,7 @@ public:
+   bool has_dead_field (void);
+   void mark_escape (escape_type, gimple *stmt);
+   void create_global_ptr_for_pc ();
++  unsigned calculate_bucket_size ();
+   bool has_escaped (void)
+   {
+     return escapes != does_not_escape;
+diff --git a/gcc/params.opt b/gcc/params.opt
+index bb5d82471..82a3d92c5 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -1221,4 +1221,9 @@ Target size of compressed pointer, which should be 8, 16 or 32.
+ -param=param-ldp-dependency-search-range=
+ Common Joined UInteger Var(param_ldp_dependency_search_range) Init(16) IntegerRange(1, 32) Param Optimization
+ Range for depended ldp search in split-ldp-stp path.
++
++-param=semi-relayout-level=
++Common Joined UInteger Var(semi_relayout_level) Init(13) IntegerRange(11, 15) Param Optimization
++Set capacity of each bucket to semi-relayout to (1 << semi-relayout-level) / 8 .
++
+ ; This comment is to ensure we retain the blank line above.
+diff --git a/gcc/testsuite/gcc.dg/struct/semi_relayout_rewrite.c b/gcc/testsuite/gcc.dg/struct/semi_relayout_rewrite.c
+new file mode 100644
+index 000000000..aca8400ca
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/semi_relayout_rewrite.c
+@@ -0,0 +1,86 @@
++// Check simplify rewrite chance for semi-relayout
++/* { dg-do compile } */
++
++#include 
++#include 
++
++typedef struct node node_t;
++typedef struct node *node_p;
++
++typedef struct arc arc_t;
++typedef struct arc *arc_p;
++
++typedef struct network
++{
++  arc_p arcs;
++  arc_p sorted_arcs;
++  int x;
++  node_p nodes;
++  node_p stop_nodes;
++} network_t;
++
++struct node
++{
++  int64_t potential;
++  int orientation;
++  node_p child;
++  node_p pred;
++  node_p sibling;
++  node_p sibling_prev;
++  arc_p basic_arc;
++  arc_p firstout;
++  arc_p firstin;
++  arc_p arc_tmp;
++  int64_t flow;
++  int64_t depth;
++  int number;
++  int time;
++};
++
++struct arc
++{
++  int id;
++  int64_t cost;
++  node_p tail;
++  node_p head;
++  short ident;
++  arc_p nextout;
++  arc_p nextin;
++  int64_t flow;
++  int64_t org_cost;
++  network_t* net_add;
++};
++
++
++const int MAX = 100;
++network_t* net;
++node_p node;
++arc_p arc;
++
++int
++main ()
++{
++  net = (network_t*) calloc (1, sizeof(network_t));
++  net->arcs = (arc_p) calloc (MAX, sizeof (arc_t));
++  net->sorted_arcs = (arc_p) calloc (MAX, sizeof (arc_t));
++  net->nodes = (node_p) calloc (MAX, sizeof (node_t));
++  net->arcs->id = 100;
++
++  node = net->nodes;
++  arc = net->arcs;
++
++  for (unsigned i = 0; i < MAX; i++)
++    {
++      arc->head = node;
++      arc->head->child = node;
++      node->potential = i + 1;
++      arc->cost = arc->head->potential;
++      arc->tail = node->sibling;
++      node = node + 1;
++      arc = arc + 1;
++    }
++
++  return 0;
++}
++
++/* { dg-final { scan-ipa-dump "Number of structures to transform in semi-relayout is 1" "struct_reorg" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+index c40474407..c5a955b00 100644
+--- a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+@@ -55,6 +55,10 @@ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/pc*.c]] \
+ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/pc*.c]] \
+ 	"" "-fipa-struct-reorg=5 -fdump-ipa-all -flto-partition=one -fwhole-program"
+ 
++# -fipa-struct-reorg=6
++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/semi_relayout*.c]] \
++	"" "-fipa-struct-reorg=6 -fdump-ipa-all -flto-partition=one -fwhole-program"
++
+ # All done.
+ torture-finish
+ dg-finish
+-- 
+2.33.0
+
diff --git a/0054-LoongArch-Optimize-vector-constant-extract-even-odd-.patch b/0054-LoongArch-Optimize-vector-constant-extract-even-odd-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5dc5e2718aa00b7a3e79fcf6ac99727fea7df457
--- /dev/null
+++ b/0054-LoongArch-Optimize-vector-constant-extract-even-odd-.patch
@@ -0,0 +1,163 @@
+From 19282fbb0dab42c3553326a1ed01ad9a599622dd Mon Sep 17 00:00:00 2001
+From: Li Wei 
+Date: Tue, 28 Nov 2023 15:39:00 +0800
+Subject: [PATCH 054/188] LoongArch: Optimize vector constant
+ extract-{even/odd} permutation.
+
+For vector constant extract-{even/odd} permutation replace the default
+[x]vshuf instruction combination with [x]vilv{l/h} instruction, which
+can reduce instructions and improves performance.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_is_odd_extraction):
+	Supplementary function prototype.
+	(loongarch_is_even_extraction): Adjust.
+	(loongarch_try_expand_lsx_vshuf_const): Adjust.
+	(loongarch_is_extraction_permutation): Adjust.
+	(loongarch_expand_vec_perm_const_2): Adjust.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/lasx-extract-even_odd-opt.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc             | 33 +++++++++++-
+ .../loongarch/lasx-extract-even_odd-opt.c     | 54 +++++++++++++++++++
+ 2 files changed, 85 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/lasx-extract-even_odd-opt.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index ecceca22d..3ef7e3605 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -8668,6 +8668,12 @@ loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
+     }
+ }
+ 
++static bool
++loongarch_is_odd_extraction (struct expand_vec_perm_d *);
++
++static bool
++loongarch_is_even_extraction (struct expand_vec_perm_d *);
++
+ static bool
+ loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
+ {
+@@ -8690,6 +8696,24 @@ loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
+       if (d->testing_p)
+ 	return true;
+ 
++      /* If match extract-even and extract-odd permutations pattern, use
++       * vselect much better than vshuf.  */
++      if (loongarch_is_odd_extraction (d)
++	  || loongarch_is_even_extraction (d))
++	{
++	  if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1,
++						d->perm, d->nelt))
++	    return true;
++
++	  unsigned char perm2[MAX_VECT_LEN];
++	  for (i = 0; i < d->nelt; ++i)
++	    perm2[i] = (d->perm[i] + d->nelt) & (2 * d->nelt - 1);
++
++	  if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0,
++						perm2, d->nelt))
++	    return true;
++	}
++
+       for (i = 0; i < d->nelt; i += 1)
+ 	{
+ 	  rperm[i] = GEN_INT (d->perm[i]);
+@@ -8874,7 +8898,7 @@ loongarch_is_even_extraction (struct expand_vec_perm_d *d)
+ 	  result = false;
+ 	  break;
+ 	}
+-      buf += 1;
++      buf += 2;
+     }
+ 
+   return result;
+@@ -8896,7 +8920,7 @@ loongarch_is_extraction_permutation (struct expand_vec_perm_d *d)
+ 	  result = false;
+ 	  break;
+ 	}
+-      buf += 2;
++      buf += 1;
+     }
+ 
+   return result;
+@@ -9373,6 +9397,11 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	 Selector after: { 1, 3, 1, 3 }.
+ 	 Even extraction selector sample: E_V4DImode, { 0, 2, 4, 6 }
+ 	 Selector after: { 0, 2, 0, 2 }.  */
++
++      /* Better implement of extract-even and extract-odd permutations.  */
++      if (loongarch_expand_vec_perm_even_odd (d))
++	return true;
++
+       for (i = 0; i < d->nelt / 2; i += 1)
+ 	{
+ 	  idx = d->perm[i];
+diff --git a/gcc/testsuite/gcc.target/loongarch/lasx-extract-even_odd-opt.c b/gcc/testsuite/gcc.target/loongarch/lasx-extract-even_odd-opt.c
+new file mode 100644
+index 000000000..515f0c862
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/lasx-extract-even_odd-opt.c
+@@ -0,0 +1,54 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -mlasx" } */
++/* { dg-final { scan-assembler "xvilvl.d" } } */
++/* { dg-final { scan-assembler "xvilvh.d" } } */
++
++#define CMUL(a, b, c)                                                         \
++  {                                                                           \
++    (c).ai = (a).ai * (b).ai - (a).bi * (b).bi;                               \
++    (c).bi = (a).ai * (b).bi + (a).bi * (b).ai;                               \
++    (c).ci = (a).ci * (b).ci - (a).di * (b).di;                               \
++    (c).di = (a).ci * (b).di + (a).di * (b).ci;                               \
++  }
++#define CSUM(a, b)                                                            \
++  {                                                                           \
++    (a).ai += (b).ai;                                                         \
++    (a).bi += (b).bi;                                                         \
++    (a).ci += (b).ci;                                                         \
++    (a).di += (b).di;                                                         \
++  }
++
++typedef struct
++{
++  double ai;
++  double bi;
++  double ci;
++  double di;
++} complex;
++
++typedef struct
++{
++  complex e[6][6];
++} matrix;
++
++typedef struct
++{
++  complex c[6];
++} vector;
++
++void
++mult_adj_mat_vec (matrix *a, vector *b, vector *c)
++{
++  register int i, j;
++  register complex x, y;
++  for (i = 0; i < 6; i++)
++    {
++      x.ai = x.bi = x.ci = x.di = 0.0;
++      for (j = 0; j < 6; j++)
++        {
++          CMUL (a->e[j][i], b->c[j], y);
++          CSUM (x, y);
++        }
++      c->c[i] = x;
++    }
++}
+-- 
+2.43.0
+
diff --git a/0054-Struct-Reorg-Bugfix-for-structure-pointer-compressio.patch b/0054-Struct-Reorg-Bugfix-for-structure-pointer-compressio.patch
new file mode 100644
index 0000000000000000000000000000000000000000..54e93fc7c0da64fc764668d94e87caa39f012068
--- /dev/null
+++ b/0054-Struct-Reorg-Bugfix-for-structure-pointer-compressio.patch
@@ -0,0 +1,28 @@
+From 9dc3df938b9ed2c27498c8548087fee1ce930366 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=83=91=E6=99=A8=E5=8D=89?= 
+Date: Tue, 2 Apr 2024 11:08:30 +0800
+Subject: [PATCH] [Struct Reorg] Bugfix for structure pointer compression
+
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index fa33f2d35..3922873f3 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -7541,9 +7541,11 @@ ipa_struct_reorg::check_and_prune_struct_for_pointer_compression (void)
+       if (!type->has_legal_alloc_num)
+ 	{
+ 	  if (current_layout_opt_level & POINTER_COMPRESSION_UNSAFE)
++	    {
+ 	    if (dump_file)
+ 	      fprintf (dump_file, " has unknown alloc size, but"
+ 				  " in unsafe mode, so");
++	    }
+ 	  else
+ 	    {
+ 	      if (dump_file)
+-- 
+2.33.0
+
diff --git a/0055-LoongArch-Add-intrinsic-function-descriptions-for-LS.patch b/0055-LoongArch-Add-intrinsic-function-descriptions-for-LS.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5d290116d88e5a4e4f601a9a913e284b1c6c9b1b
--- /dev/null
+++ b/0055-LoongArch-Add-intrinsic-function-descriptions-for-LS.patch
@@ -0,0 +1,1697 @@
+From 548322a75cdeb96960fb9d324a2abf8735c4d254 Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Tue, 7 Nov 2023 11:53:39 +0800
+Subject: [PATCH 055/188] LoongArch: Add intrinsic function descriptions for
+ LSX and LASX instructions to doc.
+
+gcc/ChangeLog:
+
+	* doc/extend.texi: Add information about the intrinsic function of the vector
+	instruction.
+---
+ gcc/doc/extend.texi | 1662 +++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 1662 insertions(+)
+
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index 497c6de5f..7edd3974d 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -14679,6 +14679,8 @@ instructions, but allow the compiler to schedule those calls.
+ * BPF Built-in Functions::
+ * FR-V Built-in Functions::
+ * LoongArch Base Built-in Functions::
++* LoongArch SX Vector Intrinsics::
++* LoongArch ASX Vector Intrinsics::
+ * MIPS DSP Built-in Functions::
+ * MIPS Paired-Single Support::
+ * MIPS Loongson Built-in Functions::
+@@ -16262,6 +16264,1666 @@ Returns the value that is currently set in the @samp{tp} register.
+     void * __builtin_thread_pointer (void)
+ @end smallexample
+ 
++@node LoongArch SX Vector Intrinsics
++@subsection LoongArch SX Vector Intrinsics
++
++GCC provides intrinsics to access the LSX (Loongson SIMD Extension) instructions.
++The interface is made available by including @code{} and using
++@option{-mlsx}.
++
++The following vectors typedefs are included in @code{lsxintrin.h}:
++
++@itemize
++@item @code{__m128i}, a 128-bit vector of fixed point;
++@item @code{__m128}, a 128-bit vector of single precision floating point;
++@item @code{__m128d}, a 128-bit vector of double precision floating point.
++@end itemize
++
++Instructions and corresponding built-ins may have additional restrictions and/or
++input/output values manipulated:
++@itemize
++@item @code{imm0_1}, an integer literal in range 0 to 1;
++@item @code{imm0_3}, an integer literal in range 0 to 3;
++@item @code{imm0_7}, an integer literal in range 0 to 7;
++@item @code{imm0_15}, an integer literal in range 0 to 15;
++@item @code{imm0_31}, an integer literal in range 0 to 31;
++@item @code{imm0_63}, an integer literal in range 0 to 63;
++@item @code{imm0_127}, an integer literal in range 0 to 127;
++@item @code{imm0_255}, an integer literal in range 0 to 255;
++@item @code{imm_n16_15}, an integer literal in range -16 to 15;
++@item @code{imm_n128_127}, an integer literal in range -128 to 127;
++@item @code{imm_n256_255}, an integer literal in range -256 to 255;
++@item @code{imm_n512_511}, an integer literal in range -512 to 511;
++@item @code{imm_n1024_1023}, an integer literal in range -1024 to 1023;
++@item @code{imm_n2048_2047}, an integer literal in range -2048 to 2047.
++@end itemize
++
++For convenience, GCC defines functions @code{__lsx_vrepli_@{b/h/w/d@}} and
++@code{__lsx_b[n]z_@{v/b/h/w/d@}}, which are implemented as follows:
++
++@smallexample
++a. @code{__lsx_vrepli_@{b/h/w/d@}}: Implemented the case where the highest
++   bit of @code{vldi} instruction @code{i13} is 1.
++
++   i13[12] == 1'b0
++   case i13[11:10] of :
++     2'b00: __lsx_vrepli_b (imm_n512_511)
++     2'b01: __lsx_vrepli_h (imm_n512_511)
++     2'b10: __lsx_vrepli_w (imm_n512_511)
++     2'b11: __lsx_vrepli_d (imm_n512_511)
++
++b. @code{__lsx_b[n]z_@{v/b/h/w/d@}}: Since the @code{vseteqz} class directive
++   cannot be used on its own, this function is defined.
++
++   _lsx_bz_v  => vseteqz.v + bcnez
++   _lsx_bnz_v => vsetnez.v + bcnez
++   _lsx_bz_b  => vsetanyeqz.b + bcnez
++   _lsx_bz_h  => vsetanyeqz.h + bcnez
++   _lsx_bz_w  => vsetanyeqz.w + bcnez
++   _lsx_bz_d  => vsetanyeqz.d + bcnez
++   _lsx_bnz_b => vsetallnez.b + bcnez
++   _lsx_bnz_h => vsetallnez.h + bcnez
++   _lsx_bnz_w => vsetallnez.w + bcnez
++   _lsx_bnz_d => vsetallnez.d + bcnez
++@end smallexample
++
++@smallexample
++eg:
++  #include 
++
++  extern __m128i @var{a};
++
++  void
++  test (void)
++  @{
++    if (__lsx_bz_v (@var{a}))
++      printf ("1\n");
++    else
++      printf ("2\n");
++  @}
++@end smallexample
++
++@emph{Note:} For directives where the intent operand is also the source operand
++(modifying only part of the bitfield of the intent register), the first parameter
++in the builtin call function is used as the intent operand.
++
++@smallexample
++eg:
++  #include 
++
++  extern __m128i @var{dst};
++  extern int @var{src};
++
++  void
++  test (void)
++  @{
++    @var{dst} = __lsx_vinsgr2vr_b (@var{dst}, @var{src}, 3);
++  @}
++@end smallexample
++
++The intrinsics provided are listed below:
++@smallexample
++int __lsx_bnz_b (__m128i);
++int __lsx_bnz_d (__m128i);
++int __lsx_bnz_h (__m128i);
++int __lsx_bnz_v (__m128i);
++int __lsx_bnz_w (__m128i);
++int __lsx_bz_b (__m128i);
++int __lsx_bz_d (__m128i);
++int __lsx_bz_h (__m128i);
++int __lsx_bz_v (__m128i);
++int __lsx_bz_w (__m128i);
++__m128i __lsx_vabsd_b (__m128i, __m128i);
++__m128i __lsx_vabsd_bu (__m128i, __m128i);
++__m128i __lsx_vabsd_di (__m128i, __m128i);
++__m128i __lsx_vabsd_du (__m128i, __m128i);
++__m128i __lsx_vabsd_h (__m128i, __m128i);
++__m128i __lsx_vabsd_hu (__m128i, __m128i);
++__m128i __lsx_vabsd_w (__m128i, __m128i);
++__m128i __lsx_vabsd_wu (__m128i, __m128i);
++__m128i __lsx_vadda_b (__m128i, __m128i);
++__m128i __lsx_vadda_d (__m128i, __m128i);
++__m128i __lsx_vadda_h (__m128i, __m128i);
++__m128i __lsx_vadda_w (__m128i, __m128i);
++__m128i __lsx_vadd_b (__m128i, __m128i);
++__m128i __lsx_vadd_d (__m128i, __m128i);
++__m128i __lsx_vadd_h (__m128i, __m128i);
++__m128i __lsx_vaddi_bu (__m128i, imm0_31);
++__m128i __lsx_vaddi_du (__m128i, imm0_31);
++__m128i __lsx_vaddi_hu (__m128i, imm0_31);
++__m128i __lsx_vaddi_wu (__m128i, imm0_31);
++__m128i __lsx_vadd_q (__m128i, __m128i);
++__m128i __lsx_vadd_w (__m128i, __m128i);
++__m128i __lsx_vaddwev_d_w (__m128i, __m128i);
++__m128i __lsx_vaddwev_d_wu (__m128i, __m128i);
++__m128i __lsx_vaddwev_d_wu_w (__m128i, __m128i);
++__m128i __lsx_vaddwev_h_b (__m128i, __m128i);
++__m128i __lsx_vaddwev_h_bu (__m128i, __m128i);
++__m128i __lsx_vaddwev_h_bu_b (__m128i, __m128i);
++__m128i __lsx_vaddwev_q_d (__m128i, __m128i);
++__m128i __lsx_vaddwev_q_du (__m128i, __m128i);
++__m128i __lsx_vaddwev_q_du_d (__m128i, __m128i);
++__m128i __lsx_vaddwev_w_h (__m128i, __m128i);
++__m128i __lsx_vaddwev_w_hu (__m128i, __m128i);
++__m128i __lsx_vaddwev_w_hu_h (__m128i, __m128i);
++__m128i __lsx_vaddwod_d_w (__m128i, __m128i);
++__m128i __lsx_vaddwod_d_wu (__m128i, __m128i);
++__m128i __lsx_vaddwod_d_wu_w (__m128i, __m128i);
++__m128i __lsx_vaddwod_h_b (__m128i, __m128i);
++__m128i __lsx_vaddwod_h_bu (__m128i, __m128i);
++__m128i __lsx_vaddwod_h_bu_b (__m128i, __m128i);
++__m128i __lsx_vaddwod_q_d (__m128i, __m128i);
++__m128i __lsx_vaddwod_q_du (__m128i, __m128i);
++__m128i __lsx_vaddwod_q_du_d (__m128i, __m128i);
++__m128i __lsx_vaddwod_w_h (__m128i, __m128i);
++__m128i __lsx_vaddwod_w_hu (__m128i, __m128i);
++__m128i __lsx_vaddwod_w_hu_h (__m128i, __m128i);
++__m128i __lsx_vandi_b (__m128i, imm0_255);
++__m128i __lsx_vandn_v (__m128i, __m128i);
++__m128i __lsx_vand_v (__m128i, __m128i);
++__m128i __lsx_vavg_b (__m128i, __m128i);
++__m128i __lsx_vavg_bu (__m128i, __m128i);
++__m128i __lsx_vavg_d (__m128i, __m128i);
++__m128i __lsx_vavg_du (__m128i, __m128i);
++__m128i __lsx_vavg_h (__m128i, __m128i);
++__m128i __lsx_vavg_hu (__m128i, __m128i);
++__m128i __lsx_vavgr_b (__m128i, __m128i);
++__m128i __lsx_vavgr_bu (__m128i, __m128i);
++__m128i __lsx_vavgr_d (__m128i, __m128i);
++__m128i __lsx_vavgr_du (__m128i, __m128i);
++__m128i __lsx_vavgr_h (__m128i, __m128i);
++__m128i __lsx_vavgr_hu (__m128i, __m128i);
++__m128i __lsx_vavgr_w (__m128i, __m128i);
++__m128i __lsx_vavgr_wu (__m128i, __m128i);
++__m128i __lsx_vavg_w (__m128i, __m128i);
++__m128i __lsx_vavg_wu (__m128i, __m128i);
++__m128i __lsx_vbitclr_b (__m128i, __m128i);
++__m128i __lsx_vbitclr_d (__m128i, __m128i);
++__m128i __lsx_vbitclr_h (__m128i, __m128i);
++__m128i __lsx_vbitclri_b (__m128i, imm0_7);
++__m128i __lsx_vbitclri_d (__m128i, imm0_63);
++__m128i __lsx_vbitclri_h (__m128i, imm0_15);
++__m128i __lsx_vbitclri_w (__m128i, imm0_31);
++__m128i __lsx_vbitclr_w (__m128i, __m128i);
++__m128i __lsx_vbitrev_b (__m128i, __m128i);
++__m128i __lsx_vbitrev_d (__m128i, __m128i);
++__m128i __lsx_vbitrev_h (__m128i, __m128i);
++__m128i __lsx_vbitrevi_b (__m128i, imm0_7);
++__m128i __lsx_vbitrevi_d (__m128i, imm0_63);
++__m128i __lsx_vbitrevi_h (__m128i, imm0_15);
++__m128i __lsx_vbitrevi_w (__m128i, imm0_31);
++__m128i __lsx_vbitrev_w (__m128i, __m128i);
++__m128i __lsx_vbitseli_b (__m128i, __m128i, imm0_255);
++__m128i __lsx_vbitsel_v (__m128i, __m128i, __m128i);
++__m128i __lsx_vbitset_b (__m128i, __m128i);
++__m128i __lsx_vbitset_d (__m128i, __m128i);
++__m128i __lsx_vbitset_h (__m128i, __m128i);
++__m128i __lsx_vbitseti_b (__m128i, imm0_7);
++__m128i __lsx_vbitseti_d (__m128i, imm0_63);
++__m128i __lsx_vbitseti_h (__m128i, imm0_15);
++__m128i __lsx_vbitseti_w (__m128i, imm0_31);
++__m128i __lsx_vbitset_w (__m128i, __m128i);
++__m128i __lsx_vbsll_v (__m128i, imm0_31);
++__m128i __lsx_vbsrl_v (__m128i, imm0_31);
++__m128i __lsx_vclo_b (__m128i);
++__m128i __lsx_vclo_d (__m128i);
++__m128i __lsx_vclo_h (__m128i);
++__m128i __lsx_vclo_w (__m128i);
++__m128i __lsx_vclz_b (__m128i);
++__m128i __lsx_vclz_d (__m128i);
++__m128i __lsx_vclz_h (__m128i);
++__m128i __lsx_vclz_w (__m128i);
++__m128i __lsx_vdiv_b (__m128i, __m128i);
++__m128i __lsx_vdiv_bu (__m128i, __m128i);
++__m128i __lsx_vdiv_d (__m128i, __m128i);
++__m128i __lsx_vdiv_du (__m128i, __m128i);
++__m128i __lsx_vdiv_h (__m128i, __m128i);
++__m128i __lsx_vdiv_hu (__m128i, __m128i);
++__m128i __lsx_vdiv_w (__m128i, __m128i);
++__m128i __lsx_vdiv_wu (__m128i, __m128i);
++__m128i __lsx_vexth_du_wu (__m128i);
++__m128i __lsx_vexth_d_w (__m128i);
++__m128i __lsx_vexth_h_b (__m128i);
++__m128i __lsx_vexth_hu_bu (__m128i);
++__m128i __lsx_vexth_q_d (__m128i);
++__m128i __lsx_vexth_qu_du (__m128i);
++__m128i __lsx_vexth_w_h (__m128i);
++__m128i __lsx_vexth_wu_hu (__m128i);
++__m128i __lsx_vextl_q_d (__m128i);
++__m128i __lsx_vextl_qu_du (__m128i);
++__m128i __lsx_vextrins_b (__m128i, __m128i, imm0_255);
++__m128i __lsx_vextrins_d (__m128i, __m128i, imm0_255);
++__m128i __lsx_vextrins_h (__m128i, __m128i, imm0_255);
++__m128i __lsx_vextrins_w (__m128i, __m128i, imm0_255);
++__m128d __lsx_vfadd_d (__m128d, __m128d);
++__m128 __lsx_vfadd_s (__m128, __m128);
++__m128i __lsx_vfclass_d (__m128d);
++__m128i __lsx_vfclass_s (__m128);
++__m128i __lsx_vfcmp_caf_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_caf_s (__m128, __m128);
++__m128i __lsx_vfcmp_ceq_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_ceq_s (__m128, __m128);
++__m128i __lsx_vfcmp_cle_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_cle_s (__m128, __m128);
++__m128i __lsx_vfcmp_clt_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_clt_s (__m128, __m128);
++__m128i __lsx_vfcmp_cne_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_cne_s (__m128, __m128);
++__m128i __lsx_vfcmp_cor_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_cor_s (__m128, __m128);
++__m128i __lsx_vfcmp_cueq_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_cueq_s (__m128, __m128);
++__m128i __lsx_vfcmp_cule_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_cule_s (__m128, __m128);
++__m128i __lsx_vfcmp_cult_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_cult_s (__m128, __m128);
++__m128i __lsx_vfcmp_cun_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_cune_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_cune_s (__m128, __m128);
++__m128i __lsx_vfcmp_cun_s (__m128, __m128);
++__m128i __lsx_vfcmp_saf_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_saf_s (__m128, __m128);
++__m128i __lsx_vfcmp_seq_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_seq_s (__m128, __m128);
++__m128i __lsx_vfcmp_sle_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_sle_s (__m128, __m128);
++__m128i __lsx_vfcmp_slt_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_slt_s (__m128, __m128);
++__m128i __lsx_vfcmp_sne_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_sne_s (__m128, __m128);
++__m128i __lsx_vfcmp_sor_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_sor_s (__m128, __m128);
++__m128i __lsx_vfcmp_sueq_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_sueq_s (__m128, __m128);
++__m128i __lsx_vfcmp_sule_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_sule_s (__m128, __m128);
++__m128i __lsx_vfcmp_sult_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_sult_s (__m128, __m128);
++__m128i __lsx_vfcmp_sun_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_sune_d (__m128d, __m128d);
++__m128i __lsx_vfcmp_sune_s (__m128, __m128);
++__m128i __lsx_vfcmp_sun_s (__m128, __m128);
++__m128d __lsx_vfcvth_d_s (__m128);
++__m128i __lsx_vfcvt_h_s (__m128, __m128);
++__m128 __lsx_vfcvth_s_h (__m128i);
++__m128d __lsx_vfcvtl_d_s (__m128);
++__m128 __lsx_vfcvtl_s_h (__m128i);
++__m128 __lsx_vfcvt_s_d (__m128d, __m128d);
++__m128d __lsx_vfdiv_d (__m128d, __m128d);
++__m128 __lsx_vfdiv_s (__m128, __m128);
++__m128d __lsx_vffint_d_l (__m128i);
++__m128d __lsx_vffint_d_lu (__m128i);
++__m128d __lsx_vffinth_d_w (__m128i);
++__m128d __lsx_vffintl_d_w (__m128i);
++__m128 __lsx_vffint_s_l (__m128i, __m128i);
++__m128 __lsx_vffint_s_w (__m128i);
++__m128 __lsx_vffint_s_wu (__m128i);
++__m128d __lsx_vflogb_d (__m128d);
++__m128 __lsx_vflogb_s (__m128);
++__m128d __lsx_vfmadd_d (__m128d, __m128d, __m128d);
++__m128 __lsx_vfmadd_s (__m128, __m128, __m128);
++__m128d __lsx_vfmaxa_d (__m128d, __m128d);
++__m128 __lsx_vfmaxa_s (__m128, __m128);
++__m128d __lsx_vfmax_d (__m128d, __m128d);
++__m128 __lsx_vfmax_s (__m128, __m128);
++__m128d __lsx_vfmina_d (__m128d, __m128d);
++__m128 __lsx_vfmina_s (__m128, __m128);
++__m128d __lsx_vfmin_d (__m128d, __m128d);
++__m128 __lsx_vfmin_s (__m128, __m128);
++__m128d __lsx_vfmsub_d (__m128d, __m128d, __m128d);
++__m128 __lsx_vfmsub_s (__m128, __m128, __m128);
++__m128d __lsx_vfmul_d (__m128d, __m128d);
++__m128 __lsx_vfmul_s (__m128, __m128);
++__m128d __lsx_vfnmadd_d (__m128d, __m128d, __m128d);
++__m128 __lsx_vfnmadd_s (__m128, __m128, __m128);
++__m128d __lsx_vfnmsub_d (__m128d, __m128d, __m128d);
++__m128 __lsx_vfnmsub_s (__m128, __m128, __m128);
++__m128d __lsx_vfrecip_d (__m128d);
++__m128 __lsx_vfrecip_s (__m128);
++__m128d __lsx_vfrint_d (__m128d);
++__m128i __lsx_vfrintrm_d (__m128d);
++__m128i __lsx_vfrintrm_s (__m128);
++__m128i __lsx_vfrintrne_d (__m128d);
++__m128i __lsx_vfrintrne_s (__m128);
++__m128i __lsx_vfrintrp_d (__m128d);
++__m128i __lsx_vfrintrp_s (__m128);
++__m128i __lsx_vfrintrz_d (__m128d);
++__m128i __lsx_vfrintrz_s (__m128);
++__m128 __lsx_vfrint_s (__m128);
++__m128d __lsx_vfrsqrt_d (__m128d);
++__m128 __lsx_vfrsqrt_s (__m128);
++__m128i __lsx_vfrstp_b (__m128i, __m128i, __m128i);
++__m128i __lsx_vfrstp_h (__m128i, __m128i, __m128i);
++__m128i __lsx_vfrstpi_b (__m128i, __m128i, imm0_31);
++__m128i __lsx_vfrstpi_h (__m128i, __m128i, imm0_31);
++__m128d __lsx_vfsqrt_d (__m128d);
++__m128 __lsx_vfsqrt_s (__m128);
++__m128d __lsx_vfsub_d (__m128d, __m128d);
++__m128 __lsx_vfsub_s (__m128, __m128);
++__m128i __lsx_vftinth_l_s (__m128);
++__m128i __lsx_vftint_l_d (__m128d);
++__m128i __lsx_vftintl_l_s (__m128);
++__m128i __lsx_vftint_lu_d (__m128d);
++__m128i __lsx_vftintrmh_l_s (__m128);
++__m128i __lsx_vftintrm_l_d (__m128d);
++__m128i __lsx_vftintrml_l_s (__m128);
++__m128i __lsx_vftintrm_w_d (__m128d, __m128d);
++__m128i __lsx_vftintrm_w_s (__m128);
++__m128i __lsx_vftintrneh_l_s (__m128);
++__m128i __lsx_vftintrne_l_d (__m128d);
++__m128i __lsx_vftintrnel_l_s (__m128);
++__m128i __lsx_vftintrne_w_d (__m128d, __m128d);
++__m128i __lsx_vftintrne_w_s (__m128);
++__m128i __lsx_vftintrph_l_s (__m128);
++__m128i __lsx_vftintrp_l_d (__m128d);
++__m128i __lsx_vftintrpl_l_s (__m128);
++__m128i __lsx_vftintrp_w_d (__m128d, __m128d);
++__m128i __lsx_vftintrp_w_s (__m128);
++__m128i __lsx_vftintrzh_l_s (__m128);
++__m128i __lsx_vftintrz_l_d (__m128d);
++__m128i __lsx_vftintrzl_l_s (__m128);
++__m128i __lsx_vftintrz_lu_d (__m128d);
++__m128i __lsx_vftintrz_w_d (__m128d, __m128d);
++__m128i __lsx_vftintrz_w_s (__m128);
++__m128i __lsx_vftintrz_wu_s (__m128);
++__m128i __lsx_vftint_w_d (__m128d, __m128d);
++__m128i __lsx_vftint_w_s (__m128);
++__m128i __lsx_vftint_wu_s (__m128);
++__m128i __lsx_vhaddw_du_wu (__m128i, __m128i);
++__m128i __lsx_vhaddw_d_w (__m128i, __m128i);
++__m128i __lsx_vhaddw_h_b (__m128i, __m128i);
++__m128i __lsx_vhaddw_hu_bu (__m128i, __m128i);
++__m128i __lsx_vhaddw_q_d (__m128i, __m128i);
++__m128i __lsx_vhaddw_qu_du (__m128i, __m128i);
++__m128i __lsx_vhaddw_w_h (__m128i, __m128i);
++__m128i __lsx_vhaddw_wu_hu (__m128i, __m128i);
++__m128i __lsx_vhsubw_du_wu (__m128i, __m128i);
++__m128i __lsx_vhsubw_d_w (__m128i, __m128i);
++__m128i __lsx_vhsubw_h_b (__m128i, __m128i);
++__m128i __lsx_vhsubw_hu_bu (__m128i, __m128i);
++__m128i __lsx_vhsubw_q_d (__m128i, __m128i);
++__m128i __lsx_vhsubw_qu_du (__m128i, __m128i);
++__m128i __lsx_vhsubw_w_h (__m128i, __m128i);
++__m128i __lsx_vhsubw_wu_hu (__m128i, __m128i);
++__m128i __lsx_vilvh_b (__m128i, __m128i);
++__m128i __lsx_vilvh_d (__m128i, __m128i);
++__m128i __lsx_vilvh_h (__m128i, __m128i);
++__m128i __lsx_vilvh_w (__m128i, __m128i);
++__m128i __lsx_vilvl_b (__m128i, __m128i);
++__m128i __lsx_vilvl_d (__m128i, __m128i);
++__m128i __lsx_vilvl_h (__m128i, __m128i);
++__m128i __lsx_vilvl_w (__m128i, __m128i);
++__m128i __lsx_vinsgr2vr_b (__m128i, int, imm0_15);
++__m128i __lsx_vinsgr2vr_d (__m128i, long int, imm0_1);
++__m128i __lsx_vinsgr2vr_h (__m128i, int, imm0_7);
++__m128i __lsx_vinsgr2vr_w (__m128i, int, imm0_3);
++__m128i __lsx_vld (void *, imm_n2048_2047)
++__m128i __lsx_vldi (imm_n1024_1023)
++__m128i __lsx_vldrepl_b (void *, imm_n2048_2047)
++__m128i __lsx_vldrepl_d (void *, imm_n256_255)
++__m128i __lsx_vldrepl_h (void *, imm_n1024_1023)
++__m128i __lsx_vldrepl_w (void *, imm_n512_511)
++__m128i __lsx_vldx (void *, long int);
++__m128i __lsx_vmadd_b (__m128i, __m128i, __m128i);
++__m128i __lsx_vmadd_d (__m128i, __m128i, __m128i);
++__m128i __lsx_vmadd_h (__m128i, __m128i, __m128i);
++__m128i __lsx_vmadd_w (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_d_w (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_d_wu (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_d_wu_w (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_h_b (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_h_bu (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_h_bu_b (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_q_d (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_q_du (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_q_du_d (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_w_h (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_w_hu (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwev_w_hu_h (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_d_w (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_d_wu (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_d_wu_w (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_h_b (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_h_bu (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_h_bu_b (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_q_d (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_q_du (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_q_du_d (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_w_h (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_w_hu (__m128i, __m128i, __m128i);
++__m128i __lsx_vmaddwod_w_hu_h (__m128i, __m128i, __m128i);
++__m128i __lsx_vmax_b (__m128i, __m128i);
++__m128i __lsx_vmax_bu (__m128i, __m128i);
++__m128i __lsx_vmax_d (__m128i, __m128i);
++__m128i __lsx_vmax_du (__m128i, __m128i);
++__m128i __lsx_vmax_h (__m128i, __m128i);
++__m128i __lsx_vmax_hu (__m128i, __m128i);
++__m128i __lsx_vmaxi_b (__m128i, imm_n16_15)
++__m128i __lsx_vmaxi_bu (__m128i, imm0_31);
++__m128i __lsx_vmaxi_d (__m128i, imm_n16_15)
++__m128i __lsx_vmaxi_du (__m128i, imm0_31);
++__m128i __lsx_vmaxi_h (__m128i, imm_n16_15)
++__m128i __lsx_vmaxi_hu (__m128i, imm0_31);
++__m128i __lsx_vmaxi_w (__m128i, imm_n16_15)
++__m128i __lsx_vmaxi_wu (__m128i, imm0_31);
++__m128i __lsx_vmax_w (__m128i, __m128i);
++__m128i __lsx_vmax_wu (__m128i, __m128i);
++__m128i __lsx_vmin_b (__m128i, __m128i);
++__m128i __lsx_vmin_bu (__m128i, __m128i);
++__m128i __lsx_vmin_d (__m128i, __m128i);
++__m128i __lsx_vmin_du (__m128i, __m128i);
++__m128i __lsx_vmin_h (__m128i, __m128i);
++__m128i __lsx_vmin_hu (__m128i, __m128i);
++__m128i __lsx_vmini_b (__m128i, imm_n16_15)
++__m128i __lsx_vmini_bu (__m128i, imm0_31);
++__m128i __lsx_vmini_d (__m128i, imm_n16_15)
++__m128i __lsx_vmini_du (__m128i, imm0_31);
++__m128i __lsx_vmini_h (__m128i, imm_n16_15)
++__m128i __lsx_vmini_hu (__m128i, imm0_31);
++__m128i __lsx_vmini_w (__m128i, imm_n16_15)
++__m128i __lsx_vmini_wu (__m128i, imm0_31);
++__m128i __lsx_vmin_w (__m128i, __m128i);
++__m128i __lsx_vmin_wu (__m128i, __m128i);
++__m128i __lsx_vmod_b (__m128i, __m128i);
++__m128i __lsx_vmod_bu (__m128i, __m128i);
++__m128i __lsx_vmod_d (__m128i, __m128i);
++__m128i __lsx_vmod_du (__m128i, __m128i);
++__m128i __lsx_vmod_h (__m128i, __m128i);
++__m128i __lsx_vmod_hu (__m128i, __m128i);
++__m128i __lsx_vmod_w (__m128i, __m128i);
++__m128i __lsx_vmod_wu (__m128i, __m128i);
++__m128i __lsx_vmskgez_b (__m128i);
++__m128i __lsx_vmskltz_b (__m128i);
++__m128i __lsx_vmskltz_d (__m128i);
++__m128i __lsx_vmskltz_h (__m128i);
++__m128i __lsx_vmskltz_w (__m128i);
++__m128i __lsx_vmsknz_b (__m128i);
++__m128i __lsx_vmsub_b (__m128i, __m128i, __m128i);
++__m128i __lsx_vmsub_d (__m128i, __m128i, __m128i);
++__m128i __lsx_vmsub_h (__m128i, __m128i, __m128i);
++__m128i __lsx_vmsub_w (__m128i, __m128i, __m128i);
++__m128i __lsx_vmuh_b (__m128i, __m128i);
++__m128i __lsx_vmuh_bu (__m128i, __m128i);
++__m128i __lsx_vmuh_d (__m128i, __m128i);
++__m128i __lsx_vmuh_du (__m128i, __m128i);
++__m128i __lsx_vmuh_h (__m128i, __m128i);
++__m128i __lsx_vmuh_hu (__m128i, __m128i);
++__m128i __lsx_vmuh_w (__m128i, __m128i);
++__m128i __lsx_vmuh_wu (__m128i, __m128i);
++__m128i __lsx_vmul_b (__m128i, __m128i);
++__m128i __lsx_vmul_d (__m128i, __m128i);
++__m128i __lsx_vmul_h (__m128i, __m128i);
++__m128i __lsx_vmul_w (__m128i, __m128i);
++__m128i __lsx_vmulwev_d_w (__m128i, __m128i);
++__m128i __lsx_vmulwev_d_wu (__m128i, __m128i);
++__m128i __lsx_vmulwev_d_wu_w (__m128i, __m128i);
++__m128i __lsx_vmulwev_h_b (__m128i, __m128i);
++__m128i __lsx_vmulwev_h_bu (__m128i, __m128i);
++__m128i __lsx_vmulwev_h_bu_b (__m128i, __m128i);
++__m128i __lsx_vmulwev_q_d (__m128i, __m128i);
++__m128i __lsx_vmulwev_q_du (__m128i, __m128i);
++__m128i __lsx_vmulwev_q_du_d (__m128i, __m128i);
++__m128i __lsx_vmulwev_w_h (__m128i, __m128i);
++__m128i __lsx_vmulwev_w_hu (__m128i, __m128i);
++__m128i __lsx_vmulwev_w_hu_h (__m128i, __m128i);
++__m128i __lsx_vmulwod_d_w (__m128i, __m128i);
++__m128i __lsx_vmulwod_d_wu (__m128i, __m128i);
++__m128i __lsx_vmulwod_d_wu_w (__m128i, __m128i);
++__m128i __lsx_vmulwod_h_b (__m128i, __m128i);
++__m128i __lsx_vmulwod_h_bu (__m128i, __m128i);
++__m128i __lsx_vmulwod_h_bu_b (__m128i, __m128i);
++__m128i __lsx_vmulwod_q_d (__m128i, __m128i);
++__m128i __lsx_vmulwod_q_du (__m128i, __m128i);
++__m128i __lsx_vmulwod_q_du_d (__m128i, __m128i);
++__m128i __lsx_vmulwod_w_h (__m128i, __m128i);
++__m128i __lsx_vmulwod_w_hu (__m128i, __m128i);
++__m128i __lsx_vmulwod_w_hu_h (__m128i, __m128i);
++__m128i __lsx_vneg_b (__m128i);
++__m128i __lsx_vneg_d (__m128i);
++__m128i __lsx_vneg_h (__m128i);
++__m128i __lsx_vneg_w (__m128i);
++__m128i __lsx_vnori_b (__m128i, imm0_255);
++__m128i __lsx_vnor_v (__m128i, __m128i);
++__m128i __lsx_vori_b (__m128i, imm0_255);
++__m128i __lsx_vorn_v (__m128i, __m128i);
++__m128i __lsx_vor_v (__m128i, __m128i);
++__m128i __lsx_vpackev_b (__m128i, __m128i);
++__m128i __lsx_vpackev_d (__m128i, __m128i);
++__m128i __lsx_vpackev_h (__m128i, __m128i);
++__m128i __lsx_vpackev_w (__m128i, __m128i);
++__m128i __lsx_vpackod_b (__m128i, __m128i);
++__m128i __lsx_vpackod_d (__m128i, __m128i);
++__m128i __lsx_vpackod_h (__m128i, __m128i);
++__m128i __lsx_vpackod_w (__m128i, __m128i);
++__m128i __lsx_vpcnt_b (__m128i);
++__m128i __lsx_vpcnt_d (__m128i);
++__m128i __lsx_vpcnt_h (__m128i);
++__m128i __lsx_vpcnt_w (__m128i);
++__m128i __lsx_vpermi_w (__m128i, __m128i, imm0_255);
++__m128i __lsx_vpickev_b (__m128i, __m128i);
++__m128i __lsx_vpickev_d (__m128i, __m128i);
++__m128i __lsx_vpickev_h (__m128i, __m128i);
++__m128i __lsx_vpickev_w (__m128i, __m128i);
++__m128i __lsx_vpickod_b (__m128i, __m128i);
++__m128i __lsx_vpickod_d (__m128i, __m128i);
++__m128i __lsx_vpickod_h (__m128i, __m128i);
++__m128i __lsx_vpickod_w (__m128i, __m128i);
++int __lsx_vpickve2gr_b (__m128i, imm0_15);
++unsinged int __lsx_vpickve2gr_bu (__m128i, imm0_15);
++long int __lsx_vpickve2gr_d (__m128i, imm0_1);
++unsigned long int __lsx_vpickve2gr_du (__m128i, imm0_1);
++int __lsx_vpickve2gr_h (__m128i, imm0_7);
++unsinged int __lsx_vpickve2gr_hu (__m128i, imm0_7);
++int __lsx_vpickve2gr_w (__m128i, imm0_3);
++unsigned int __lsx_vpickve2gr_wu (__m128i, imm0_3);
++__m128i __lsx_vreplgr2vr_b (int);
++__m128i __lsx_vreplgr2vr_d (long int);
++__m128i __lsx_vreplgr2vr_h (int);
++__m128i __lsx_vreplgr2vr_w (int);
++__m128i __lsx_vrepli_b (imm_n512_511);
++__m128i __lsx_vrepli_d (imm_n512_511);
++__m128i __lsx_vrepli_h (imm_n512_511);
++__m128i __lsx_vrepli_w (imm_n512_511);
++__m128i __lsx_vreplve_b (__m128i, int);
++__m128i __lsx_vreplve_d (__m128i, int);
++__m128i __lsx_vreplve_h (__m128i, int);
++__m128i __lsx_vreplvei_b (__m128i, imm0_15);
++__m128i __lsx_vreplvei_d (__m128i, imm0_1);
++__m128i __lsx_vreplvei_h (__m128i, imm0_7);
++__m128i __lsx_vreplvei_w (__m128i, imm0_3);
++__m128i __lsx_vreplve_w (__m128i, int);
++__m128i __lsx_vrotr_b (__m128i, __m128i);
++__m128i __lsx_vrotr_d (__m128i, __m128i);
++__m128i __lsx_vrotr_h (__m128i, __m128i);
++__m128i __lsx_vrotri_b (__m128i, imm0_7);
++__m128i __lsx_vrotri_d (__m128i, imm0_63);
++__m128i __lsx_vrotri_h (__m128i, imm0_15);
++__m128i __lsx_vrotri_w (__m128i, imm0_31);
++__m128i __lsx_vrotr_w (__m128i, __m128i);
++__m128i __lsx_vsadd_b (__m128i, __m128i);
++__m128i __lsx_vsadd_bu (__m128i, __m128i);
++__m128i __lsx_vsadd_d (__m128i, __m128i);
++__m128i __lsx_vsadd_du (__m128i, __m128i);
++__m128i __lsx_vsadd_h (__m128i, __m128i);
++__m128i __lsx_vsadd_hu (__m128i, __m128i);
++__m128i __lsx_vsadd_w (__m128i, __m128i);
++__m128i __lsx_vsadd_wu (__m128i, __m128i);
++__m128i __lsx_vsat_b (__m128i, imm0_7);
++__m128i __lsx_vsat_bu (__m128i, imm0_7);
++__m128i __lsx_vsat_d (__m128i, imm0_63);
++__m128i __lsx_vsat_du (__m128i, imm0_63);
++__m128i __lsx_vsat_h (__m128i, imm0_15);
++__m128i __lsx_vsat_hu (__m128i, imm0_15);
++__m128i __lsx_vsat_w (__m128i, imm0_31);
++__m128i __lsx_vsat_wu (__m128i, imm0_31);
++__m128i __lsx_vseq_b (__m128i, __m128i);
++__m128i __lsx_vseq_d (__m128i, __m128i);
++__m128i __lsx_vseq_h (__m128i, __m128i);
++__m128i __lsx_vseqi_b (__m128i, imm_n16_15);
++__m128i __lsx_vseqi_d (__m128i, imm_n16_15);
++__m128i __lsx_vseqi_h (__m128i, imm_n16_15);
++__m128i __lsx_vseqi_w (__m128i, imm_n16_15);
++__m128i __lsx_vseq_w (__m128i, __m128i);
++__m128i __lsx_vshuf4i_b (__m128i, imm0_255);
++__m128i __lsx_vshuf4i_d (__m128i, __m128i, imm0_255);
++__m128i __lsx_vshuf4i_h (__m128i, imm0_255);
++__m128i __lsx_vshuf4i_w (__m128i, imm0_255);
++__m128i __lsx_vshuf_b (__m128i, __m128i, __m128i);
++__m128i __lsx_vshuf_d (__m128i, __m128i, __m128i);
++__m128i __lsx_vshuf_h (__m128i, __m128i, __m128i);
++__m128i __lsx_vshuf_w (__m128i, __m128i, __m128i);
++__m128i __lsx_vsigncov_b (__m128i, __m128i);
++__m128i __lsx_vsigncov_d (__m128i, __m128i);
++__m128i __lsx_vsigncov_h (__m128i, __m128i);
++__m128i __lsx_vsigncov_w (__m128i, __m128i);
++__m128i __lsx_vsigncov_b (__m128i, __m128i);
++__m128i __lsx_vsigncov_d (__m128i, __m128i);
++__m128i __lsx_vsigncov_h (__m128i, __m128i);
++__m128i __lsx_vsigncov_w (__m128i, __m128i);
++__m128i __lsx_vsle_b (__m128i, __m128i);
++__m128i __lsx_vsle_bu (__m128i, __m128i);
++__m128i __lsx_vsle_d (__m128i, __m128i);
++__m128i __lsx_vsle_du (__m128i, __m128i);
++__m128i __lsx_vsle_h (__m128i, __m128i);
++__m128i __lsx_vsle_hu (__m128i, __m128i);
++__m128i __lsx_vslei_b (__m128i, imm_n16_15);
++__m128i __lsx_vslei_bu (__m128i, imm0_31);
++__m128i __lsx_vslei_d (__m128i, imm_n16_15);
++__m128i __lsx_vslei_du (__m128i, imm0_31);
++__m128i __lsx_vslei_h (__m128i, imm_n16_15);
++__m128i __lsx_vslei_hu (__m128i, imm0_31);
++__m128i __lsx_vslei_w (__m128i, imm_n16_15);
++__m128i __lsx_vslei_wu (__m128i, imm0_31);
++__m128i __lsx_vsle_w (__m128i, __m128i);
++__m128i __lsx_vsle_wu (__m128i, __m128i);
++__m128i __lsx_vsll_b (__m128i, __m128i);
++__m128i __lsx_vsll_d (__m128i, __m128i);
++__m128i __lsx_vsll_h (__m128i, __m128i);
++__m128i __lsx_vslli_b (__m128i, imm0_7);
++__m128i __lsx_vslli_d (__m128i, imm0_63);
++__m128i __lsx_vslli_h (__m128i, imm0_15);
++__m128i __lsx_vslli_w (__m128i, imm0_31);
++__m128i __lsx_vsll_w (__m128i, __m128i);
++__m128i __lsx_vsllwil_du_wu (__m128i, imm0_31);
++__m128i __lsx_vsllwil_d_w (__m128i, imm0_31);
++__m128i __lsx_vsllwil_h_b (__m128i, imm0_7);
++__m128i __lsx_vsllwil_hu_bu (__m128i, imm0_7);
++__m128i __lsx_vsllwil_w_h (__m128i, imm0_15);
++__m128i __lsx_vsllwil_wu_hu (__m128i, imm0_15);
++__m128i __lsx_vslt_b (__m128i, __m128i);
++__m128i __lsx_vslt_bu (__m128i, __m128i);
++__m128i __lsx_vslt_d (__m128i, __m128i);
++__m128i __lsx_vslt_du (__m128i, __m128i);
++__m128i __lsx_vslt_h (__m128i, __m128i);
++__m128i __lsx_vslt_hu (__m128i, __m128i);
++__m128i __lsx_vslti_b (__m128i, imm_n16_15);
++__m128i __lsx_vslti_bu (__m128i, imm0_31);
++__m128i __lsx_vslti_d (__m128i, imm_n16_15);
++__m128i __lsx_vslti_du (__m128i, imm0_31);
++__m128i __lsx_vslti_h (__m128i, imm_n16_15);
++__m128i __lsx_vslti_hu (__m128i, imm0_31);
++__m128i __lsx_vslti_w (__m128i, imm_n16_15);
++__m128i __lsx_vslti_wu (__m128i, imm0_31);
++__m128i __lsx_vslt_w (__m128i, __m128i);
++__m128i __lsx_vslt_wu (__m128i, __m128i);
++__m128i __lsx_vsra_b (__m128i, __m128i);
++__m128i __lsx_vsra_d (__m128i, __m128i);
++__m128i __lsx_vsra_h (__m128i, __m128i);
++__m128i __lsx_vsrai_b (__m128i, imm0_7);
++__m128i __lsx_vsrai_d (__m128i, imm0_63);
++__m128i __lsx_vsrai_h (__m128i, imm0_15);
++__m128i __lsx_vsrai_w (__m128i, imm0_31);
++__m128i __lsx_vsran_b_h (__m128i, __m128i);
++__m128i __lsx_vsran_h_w (__m128i, __m128i);
++__m128i __lsx_vsrani_b_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vsrani_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vsrani_h_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vsrani_w_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vsran_w_d (__m128i, __m128i);
++__m128i __lsx_vsrar_b (__m128i, __m128i);
++__m128i __lsx_vsrar_d (__m128i, __m128i);
++__m128i __lsx_vsrar_h (__m128i, __m128i);
++__m128i __lsx_vsrari_b (__m128i, imm0_7);
++__m128i __lsx_vsrari_d (__m128i, imm0_63);
++__m128i __lsx_vsrari_h (__m128i, imm0_15);
++__m128i __lsx_vsrari_w (__m128i, imm0_31);
++__m128i __lsx_vsrarn_b_h (__m128i, __m128i);
++__m128i __lsx_vsrarn_h_w (__m128i, __m128i);
++__m128i __lsx_vsrarni_b_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vsrarni_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vsrarni_h_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vsrarni_w_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vsrarn_w_d (__m128i, __m128i);
++__m128i __lsx_vsrar_w (__m128i, __m128i);
++__m128i __lsx_vsra_w (__m128i, __m128i);
++__m128i __lsx_vsrl_b (__m128i, __m128i);
++__m128i __lsx_vsrl_d (__m128i, __m128i);
++__m128i __lsx_vsrl_h (__m128i, __m128i);
++__m128i __lsx_vsrli_b (__m128i, imm0_7);
++__m128i __lsx_vsrli_d (__m128i, imm0_63);
++__m128i __lsx_vsrli_h (__m128i, imm0_15);
++__m128i __lsx_vsrli_w (__m128i, imm0_31);
++__m128i __lsx_vsrln_b_h (__m128i, __m128i);
++__m128i __lsx_vsrln_h_w (__m128i, __m128i);
++__m128i __lsx_vsrlni_b_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vsrlni_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vsrlni_h_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vsrlni_w_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vsrln_w_d (__m128i, __m128i);
++__m128i __lsx_vsrlr_b (__m128i, __m128i);
++__m128i __lsx_vsrlr_d (__m128i, __m128i);
++__m128i __lsx_vsrlr_h (__m128i, __m128i);
++__m128i __lsx_vsrlri_b (__m128i, imm0_7);
++__m128i __lsx_vsrlri_d (__m128i, imm0_63);
++__m128i __lsx_vsrlri_h (__m128i, imm0_15);
++__m128i __lsx_vsrlri_w (__m128i, imm0_31);
++__m128i __lsx_vsrlrn_b_h (__m128i, __m128i);
++__m128i __lsx_vsrlrn_h_w (__m128i, __m128i);
++__m128i __lsx_vsrlrni_b_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vsrlrni_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vsrlrni_h_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vsrlrni_w_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vsrlrn_w_d (__m128i, __m128i);
++__m128i __lsx_vsrlr_w (__m128i, __m128i);
++__m128i __lsx_vsrl_w (__m128i, __m128i);
++__m128i __lsx_vssran_b_h (__m128i, __m128i);
++__m128i __lsx_vssran_bu_h (__m128i, __m128i);
++__m128i __lsx_vssran_hu_w (__m128i, __m128i);
++__m128i __lsx_vssran_h_w (__m128i, __m128i);
++__m128i __lsx_vssrani_b_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vssrani_bu_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vssrani_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrani_du_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrani_hu_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vssrani_h_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vssrani_w_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vssrani_wu_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vssran_w_d (__m128i, __m128i);
++__m128i __lsx_vssran_wu_d (__m128i, __m128i);
++__m128i __lsx_vssrarn_b_h (__m128i, __m128i);
++__m128i __lsx_vssrarn_bu_h (__m128i, __m128i);
++__m128i __lsx_vssrarn_hu_w (__m128i, __m128i);
++__m128i __lsx_vssrarn_h_w (__m128i, __m128i);
++__m128i __lsx_vssrarni_b_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vssrarni_bu_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vssrarni_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrarni_du_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrarni_hu_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vssrarni_h_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vssrarni_w_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vssrarni_wu_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vssrarn_w_d (__m128i, __m128i);
++__m128i __lsx_vssrarn_wu_d (__m128i, __m128i);
++__m128i __lsx_vssrln_b_h (__m128i, __m128i);
++__m128i __lsx_vssrln_bu_h (__m128i, __m128i);
++__m128i __lsx_vssrln_hu_w (__m128i, __m128i);
++__m128i __lsx_vssrln_h_w (__m128i, __m128i);
++__m128i __lsx_vssrlni_b_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vssrlni_bu_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vssrlni_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrlni_du_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrlni_hu_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vssrlni_h_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vssrlni_w_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vssrlni_wu_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vssrln_w_d (__m128i, __m128i);
++__m128i __lsx_vssrln_wu_d (__m128i, __m128i);
++__m128i __lsx_vssrlrn_b_h (__m128i, __m128i);
++__m128i __lsx_vssrlrn_bu_h (__m128i, __m128i);
++__m128i __lsx_vssrlrn_hu_w (__m128i, __m128i);
++__m128i __lsx_vssrlrn_h_w (__m128i, __m128i);
++__m128i __lsx_vssrlrni_b_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vssrlrni_bu_h (__m128i, __m128i, imm0_15);
++__m128i __lsx_vssrlrni_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrlrni_du_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrlrni_hu_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vssrlrni_h_w (__m128i, __m128i, imm0_31);
++__m128i __lsx_vssrlrni_w_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vssrlrni_wu_d (__m128i, __m128i, imm0_63);
++__m128i __lsx_vssrlrn_w_d (__m128i, __m128i);
++__m128i __lsx_vssrlrn_wu_d (__m128i, __m128i);
++__m128i __lsx_vssub_b (__m128i, __m128i);
++__m128i __lsx_vssub_bu (__m128i, __m128i);
++__m128i __lsx_vssub_d (__m128i, __m128i);
++__m128i __lsx_vssub_du (__m128i, __m128i);
++__m128i __lsx_vssub_h (__m128i, __m128i);
++__m128i __lsx_vssub_hu (__m128i, __m128i);
++__m128i __lsx_vssub_w (__m128i, __m128i);
++__m128i __lsx_vssub_wu (__m128i, __m128i);
++void __lsx_vst (__m128i, void *, imm_n2048_2047)
++void __lsx_vstelm_b (__m128i, void *, imm_n128_127, idx);
++void __lsx_vstelm_d (__m128i, void *, imm_n128_127, idx);
++void __lsx_vstelm_h (__m128i, void *, imm_n128_127, idx);
++void __lsx_vstelm_w (__m128i, void *, imm_n128_127, idx);
++void __lsx_vstx (__m128i, void *, long int)
++__m128i __lsx_vsub_b (__m128i, __m128i);
++__m128i __lsx_vsub_d (__m128i, __m128i);
++__m128i __lsx_vsub_h (__m128i, __m128i);
++__m128i __lsx_vsubi_bu (__m128i, imm0_31);
++__m128i __lsx_vsubi_du (__m128i, imm0_31);
++__m128i __lsx_vsubi_hu (__m128i, imm0_31);
++__m128i __lsx_vsubi_wu (__m128i, imm0_31);
++__m128i __lsx_vsub_q (__m128i, __m128i);
++__m128i __lsx_vsub_w (__m128i, __m128i);
++__m128i __lsx_vsubwev_d_w (__m128i, __m128i);
++__m128i __lsx_vsubwev_d_wu (__m128i, __m128i);
++__m128i __lsx_vsubwev_h_b (__m128i, __m128i);
++__m128i __lsx_vsubwev_h_bu (__m128i, __m128i);
++__m128i __lsx_vsubwev_q_d (__m128i, __m128i);
++__m128i __lsx_vsubwev_q_du (__m128i, __m128i);
++__m128i __lsx_vsubwev_w_h (__m128i, __m128i);
++__m128i __lsx_vsubwev_w_hu (__m128i, __m128i);
++__m128i __lsx_vsubwod_d_w (__m128i, __m128i);
++__m128i __lsx_vsubwod_d_wu (__m128i, __m128i);
++__m128i __lsx_vsubwod_h_b (__m128i, __m128i);
++__m128i __lsx_vsubwod_h_bu (__m128i, __m128i);
++__m128i __lsx_vsubwod_q_d (__m128i, __m128i);
++__m128i __lsx_vsubwod_q_du (__m128i, __m128i);
++__m128i __lsx_vsubwod_w_h (__m128i, __m128i);
++__m128i __lsx_vsubwod_w_hu (__m128i, __m128i);
++__m128i __lsx_vxori_b (__m128i, imm0_255);
++__m128i __lsx_vxor_v (__m128i, __m128i);
++@end smallexample
++
++@node LoongArch ASX Vector Intrinsics
++@subsection LoongArch ASX Vector Intrinsics
++
++GCC provides intrinsics to access the LASX (Loongson Advanced SIMD Extension)
++instructions. The interface is made available by including @code{}
++and using @option{-mlasx}.
++
++The following vectors typedefs are included in @code{lasxintrin.h}:
++
++@itemize
++@item @code{__m256i}, a 256-bit vector of fixed point;
++@item @code{__m256}, a 256-bit vector of single precision floating point;
++@item @code{__m256d}, a 256-bit vector of double precision floating point.
++@end itemize
++
++Instructions and corresponding built-ins may have additional restrictions and/or
++input/output values manipulated:
++
++@itemize
++@item @code{imm0_1}, an integer literal in range 0 to 1.
++@item @code{imm0_3}, an integer literal in range 0 to 3.
++@item @code{imm0_7}, an integer literal in range 0 to 7.
++@item @code{imm0_15}, an integer literal in range 0 to 15.
++@item @code{imm0_31}, an integer literal in range 0 to 31.
++@item @code{imm0_63}, an integer literal in range 0 to 63.
++@item @code{imm0_127}, an integer literal in range 0 to 127.
++@item @code{imm0_255}, an integer literal in range 0 to 255.
++@item @code{imm_n16_15}, an integer literal in range -16 to 15.
++@item @code{imm_n128_127}, an integer literal in range -128 to 127.
++@item @code{imm_n256_255}, an integer literal in range -256 to 255.
++@item @code{imm_n512_511}, an integer literal in range -512 to 511.
++@item @code{imm_n1024_1023}, an integer literal in range -1024 to 1023.
++@item @code{imm_n2048_2047}, an integer literal in range -2048 to 2047.
++@end itemize
++
++For convenience, GCC defines functions @code{__lasx_xvrepli_@{b/h/w/d@}} and
++@code{__lasx_b[n]z_@{v/b/h/w/d@}}, which are implemented as follows:
++
++@smallexample
++a. @code{__lasx_xvrepli_@{b/h/w/d@}}: Implemented the case where the highest
++   bit of @code{xvldi} instruction @code{i13} is 1.
++
++   i13[12] == 1'b0
++   case i13[11:10] of :
++     2'b00: __lasx_xvrepli_b (imm_n512_511)
++     2'b01: __lasx_xvrepli_h (imm_n512_511)
++     2'b10: __lasx_xvrepli_w (imm_n512_511)
++     2'b11: __lasx_xvrepli_d (imm_n512_511)
++
++b. @code{__lasx_b[n]z_@{v/b/h/w/d@}}: Since the @code{xvseteqz} class directive
++   cannot be used on its own, this function is defined.
++
++   __lasx_xbz_v  => xvseteqz.v + bcnez
++   __lasx_xbnz_v => xvsetnez.v + bcnez
++   __lasx_xbz_b  => xvsetanyeqz.b + bcnez
++   __lasx_xbz_h  => xvsetanyeqz.h + bcnez
++   __lasx_xbz_w  => xvsetanyeqz.w + bcnez
++   __lasx_xbz_d  => xvsetanyeqz.d + bcnez
++   __lasx_xbnz_b => xvsetallnez.b + bcnez
++   __lasx_xbnz_h => xvsetallnez.h + bcnez
++   __lasx_xbnz_w => xvsetallnez.w + bcnez
++   __lasx_xbnz_d => xvsetallnez.d + bcnez
++@end smallexample
++
++@smallexample
++eg:
++  #include 
++
++  extern __m256i @var{a};
++
++  void
++  test (void)
++  @{
++    if (__lasx_xbz_v (@var{a}))
++      printf ("1\n");
++    else
++      printf ("2\n");
++  @}
++@end smallexample
++
++@emph{Note:} For directives where the intent operand is also the source operand
++(modifying only part of the bitfield of the intent register), the first parameter
++in the builtin call function is used as the intent operand.
++
++@smallexample
++eg:
++  #include 
++  extern __m256i @var{dst};
++  int @var{src};
++
++  void
++  test (void)
++  @{
++    @var{dst} = __lasx_xvinsgr2vr_w (@var{dst}, @var{src}, 3);
++  @}
++@end smallexample
++
++
++The intrinsics provided are listed below:
++
++@smallexample
++__m256i __lasx_vext2xv_d_b (__m256i);
++__m256i __lasx_vext2xv_d_h (__m256i);
++__m256i __lasx_vext2xv_du_bu (__m256i);
++__m256i __lasx_vext2xv_du_hu (__m256i);
++__m256i __lasx_vext2xv_du_wu (__m256i);
++__m256i __lasx_vext2xv_d_w (__m256i);
++__m256i __lasx_vext2xv_h_b (__m256i);
++__m256i __lasx_vext2xv_hu_bu (__m256i);
++__m256i __lasx_vext2xv_w_b (__m256i);
++__m256i __lasx_vext2xv_w_h (__m256i);
++__m256i __lasx_vext2xv_wu_bu (__m256i);
++__m256i __lasx_vext2xv_wu_hu (__m256i);
++int __lasx_xbnz_b (__m256i);
++int __lasx_xbnz_d (__m256i);
++int __lasx_xbnz_h (__m256i);
++int __lasx_xbnz_v (__m256i);
++int __lasx_xbnz_w (__m256i);
++int __lasx_xbz_b (__m256i);
++int __lasx_xbz_d (__m256i);
++int __lasx_xbz_h (__m256i);
++int __lasx_xbz_v (__m256i);
++int __lasx_xbz_w (__m256i);
++__m256i __lasx_xvabsd_b (__m256i, __m256i);
++__m256i __lasx_xvabsd_bu (__m256i, __m256i);
++__m256i __lasx_xvabsd_d (__m256i, __m256i);
++__m256i __lasx_xvabsd_du (__m256i, __m256i);
++__m256i __lasx_xvabsd_h (__m256i, __m256i);
++__m256i __lasx_xvabsd_hu (__m256i, __m256i);
++__m256i __lasx_xvabsd_w (__m256i, __m256i);
++__m256i __lasx_xvabsd_wu (__m256i, __m256i);
++__m256i __lasx_xvadda_b (__m256i, __m256i);
++__m256i __lasx_xvadda_d (__m256i, __m256i);
++__m256i __lasx_xvadda_h (__m256i, __m256i);
++__m256i __lasx_xvadda_w (__m256i, __m256i);
++__m256i __lasx_xvadd_b (__m256i, __m256i);
++__m256i __lasx_xvadd_d (__m256i, __m256i);
++__m256i __lasx_xvadd_h (__m256i, __m256i);
++__m256i __lasx_xvaddi_bu (__m256i, imm0_31);
++__m256i __lasx_xvaddi_du (__m256i, imm0_31);
++__m256i __lasx_xvaddi_hu (__m256i, imm0_31);
++__m256i __lasx_xvaddi_wu (__m256i, imm0_31);
++__m256i __lasx_xvadd_q (__m256i, __m256i);
++__m256i __lasx_xvadd_w (__m256i, __m256i);
++__m256i __lasx_xvaddwev_d_w (__m256i, __m256i);
++__m256i __lasx_xvaddwev_d_wu (__m256i, __m256i);
++__m256i __lasx_xvaddwev_d_wu_w (__m256i, __m256i);
++__m256i __lasx_xvaddwev_h_b (__m256i, __m256i);
++__m256i __lasx_xvaddwev_h_bu (__m256i, __m256i);
++__m256i __lasx_xvaddwev_h_bu_b (__m256i, __m256i);
++__m256i __lasx_xvaddwev_q_d (__m256i, __m256i);
++__m256i __lasx_xvaddwev_q_du (__m256i, __m256i);
++__m256i __lasx_xvaddwev_q_du_d (__m256i, __m256i);
++__m256i __lasx_xvaddwev_w_h (__m256i, __m256i);
++__m256i __lasx_xvaddwev_w_hu (__m256i, __m256i);
++__m256i __lasx_xvaddwev_w_hu_h (__m256i, __m256i);
++__m256i __lasx_xvaddwod_d_w (__m256i, __m256i);
++__m256i __lasx_xvaddwod_d_wu (__m256i, __m256i);
++__m256i __lasx_xvaddwod_d_wu_w (__m256i, __m256i);
++__m256i __lasx_xvaddwod_h_b (__m256i, __m256i);
++__m256i __lasx_xvaddwod_h_bu (__m256i, __m256i);
++__m256i __lasx_xvaddwod_h_bu_b (__m256i, __m256i);
++__m256i __lasx_xvaddwod_q_d (__m256i, __m256i);
++__m256i __lasx_xvaddwod_q_du (__m256i, __m256i);
++__m256i __lasx_xvaddwod_q_du_d (__m256i, __m256i);
++__m256i __lasx_xvaddwod_w_h (__m256i, __m256i);
++__m256i __lasx_xvaddwod_w_hu (__m256i, __m256i);
++__m256i __lasx_xvaddwod_w_hu_h (__m256i, __m256i);
++__m256i __lasx_xvandi_b (__m256i, imm0_255);
++__m256i __lasx_xvandn_v (__m256i, __m256i);
++__m256i __lasx_xvand_v (__m256i, __m256i);
++__m256i __lasx_xvavg_b (__m256i, __m256i);
++__m256i __lasx_xvavg_bu (__m256i, __m256i);
++__m256i __lasx_xvavg_d (__m256i, __m256i);
++__m256i __lasx_xvavg_du (__m256i, __m256i);
++__m256i __lasx_xvavg_h (__m256i, __m256i);
++__m256i __lasx_xvavg_hu (__m256i, __m256i);
++__m256i __lasx_xvavgr_b (__m256i, __m256i);
++__m256i __lasx_xvavgr_bu (__m256i, __m256i);
++__m256i __lasx_xvavgr_d (__m256i, __m256i);
++__m256i __lasx_xvavgr_du (__m256i, __m256i);
++__m256i __lasx_xvavgr_h (__m256i, __m256i);
++__m256i __lasx_xvavgr_hu (__m256i, __m256i);
++__m256i __lasx_xvavgr_w (__m256i, __m256i);
++__m256i __lasx_xvavgr_wu (__m256i, __m256i);
++__m256i __lasx_xvavg_w (__m256i, __m256i);
++__m256i __lasx_xvavg_wu (__m256i, __m256i);
++__m256i __lasx_xvbitclr_b (__m256i, __m256i);
++__m256i __lasx_xvbitclr_d (__m256i, __m256i);
++__m256i __lasx_xvbitclr_h (__m256i, __m256i);
++__m256i __lasx_xvbitclri_b (__m256i, imm0_7);
++__m256i __lasx_xvbitclri_d (__m256i, imm0_63);
++__m256i __lasx_xvbitclri_h (__m256i, imm0_15);
++__m256i __lasx_xvbitclri_w (__m256i, imm0_31);
++__m256i __lasx_xvbitclr_w (__m256i, __m256i);
++__m256i __lasx_xvbitrev_b (__m256i, __m256i);
++__m256i __lasx_xvbitrev_d (__m256i, __m256i);
++__m256i __lasx_xvbitrev_h (__m256i, __m256i);
++__m256i __lasx_xvbitrevi_b (__m256i, imm0_7);
++__m256i __lasx_xvbitrevi_d (__m256i, imm0_63);
++__m256i __lasx_xvbitrevi_h (__m256i, imm0_15);
++__m256i __lasx_xvbitrevi_w (__m256i, imm0_31);
++__m256i __lasx_xvbitrev_w (__m256i, __m256i);
++__m256i __lasx_xvbitseli_b (__m256i, __m256i, imm0_255);
++__m256i __lasx_xvbitsel_v (__m256i, __m256i, __m256i);
++__m256i __lasx_xvbitset_b (__m256i, __m256i);
++__m256i __lasx_xvbitset_d (__m256i, __m256i);
++__m256i __lasx_xvbitset_h (__m256i, __m256i);
++__m256i __lasx_xvbitseti_b (__m256i, imm0_7);
++__m256i __lasx_xvbitseti_d (__m256i, imm0_63);
++__m256i __lasx_xvbitseti_h (__m256i, imm0_15);
++__m256i __lasx_xvbitseti_w (__m256i, imm0_31);
++__m256i __lasx_xvbitset_w (__m256i, __m256i);
++__m256i __lasx_xvbsll_v (__m256i, imm0_31);
++__m256i __lasx_xvbsrl_v (__m256i, imm0_31);
++__m256i __lasx_xvclo_b (__m256i);
++__m256i __lasx_xvclo_d (__m256i);
++__m256i __lasx_xvclo_h (__m256i);
++__m256i __lasx_xvclo_w (__m256i);
++__m256i __lasx_xvclz_b (__m256i);
++__m256i __lasx_xvclz_d (__m256i);
++__m256i __lasx_xvclz_h (__m256i);
++__m256i __lasx_xvclz_w (__m256i);
++__m256i __lasx_xvdiv_b (__m256i, __m256i);
++__m256i __lasx_xvdiv_bu (__m256i, __m256i);
++__m256i __lasx_xvdiv_d (__m256i, __m256i);
++__m256i __lasx_xvdiv_du (__m256i, __m256i);
++__m256i __lasx_xvdiv_h (__m256i, __m256i);
++__m256i __lasx_xvdiv_hu (__m256i, __m256i);
++__m256i __lasx_xvdiv_w (__m256i, __m256i);
++__m256i __lasx_xvdiv_wu (__m256i, __m256i);
++__m256i __lasx_xvexth_du_wu (__m256i);
++__m256i __lasx_xvexth_d_w (__m256i);
++__m256i __lasx_xvexth_h_b (__m256i);
++__m256i __lasx_xvexth_hu_bu (__m256i);
++__m256i __lasx_xvexth_q_d (__m256i);
++__m256i __lasx_xvexth_qu_du (__m256i);
++__m256i __lasx_xvexth_w_h (__m256i);
++__m256i __lasx_xvexth_wu_hu (__m256i);
++__m256i __lasx_xvextl_q_d (__m256i);
++__m256i __lasx_xvextl_qu_du (__m256i);
++__m256i __lasx_xvextrins_b (__m256i, __m256i, imm0_255);
++__m256i __lasx_xvextrins_d (__m256i, __m256i, imm0_255);
++__m256i __lasx_xvextrins_h (__m256i, __m256i, imm0_255);
++__m256i __lasx_xvextrins_w (__m256i, __m256i, imm0_255);
++__m256d __lasx_xvfadd_d (__m256d, __m256d);
++__m256 __lasx_xvfadd_s (__m256, __m256);
++__m256i __lasx_xvfclass_d (__m256d);
++__m256i __lasx_xvfclass_s (__m256);
++__m256i __lasx_xvfcmp_caf_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_caf_s (__m256, __m256);
++__m256i __lasx_xvfcmp_ceq_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_ceq_s (__m256, __m256);
++__m256i __lasx_xvfcmp_cle_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_cle_s (__m256, __m256);
++__m256i __lasx_xvfcmp_clt_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_clt_s (__m256, __m256);
++__m256i __lasx_xvfcmp_cne_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_cne_s (__m256, __m256);
++__m256i __lasx_xvfcmp_cor_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_cor_s (__m256, __m256);
++__m256i __lasx_xvfcmp_cueq_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_cueq_s (__m256, __m256);
++__m256i __lasx_xvfcmp_cule_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_cule_s (__m256, __m256);
++__m256i __lasx_xvfcmp_cult_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_cult_s (__m256, __m256);
++__m256i __lasx_xvfcmp_cun_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_cune_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_cune_s (__m256, __m256);
++__m256i __lasx_xvfcmp_cun_s (__m256, __m256);
++__m256i __lasx_xvfcmp_saf_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_saf_s (__m256, __m256);
++__m256i __lasx_xvfcmp_seq_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_seq_s (__m256, __m256);
++__m256i __lasx_xvfcmp_sle_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_sle_s (__m256, __m256);
++__m256i __lasx_xvfcmp_slt_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_slt_s (__m256, __m256);
++__m256i __lasx_xvfcmp_sne_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_sne_s (__m256, __m256);
++__m256i __lasx_xvfcmp_sor_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_sor_s (__m256, __m256);
++__m256i __lasx_xvfcmp_sueq_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_sueq_s (__m256, __m256);
++__m256i __lasx_xvfcmp_sule_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_sule_s (__m256, __m256);
++__m256i __lasx_xvfcmp_sult_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_sult_s (__m256, __m256);
++__m256i __lasx_xvfcmp_sun_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_sune_d (__m256d, __m256d);
++__m256i __lasx_xvfcmp_sune_s (__m256, __m256);
++__m256i __lasx_xvfcmp_sun_s (__m256, __m256);
++__m256d __lasx_xvfcvth_d_s (__m256);
++__m256i __lasx_xvfcvt_h_s (__m256, __m256);
++__m256 __lasx_xvfcvth_s_h (__m256i);
++__m256d __lasx_xvfcvtl_d_s (__m256);
++__m256 __lasx_xvfcvtl_s_h (__m256i);
++__m256 __lasx_xvfcvt_s_d (__m256d, __m256d);
++__m256d __lasx_xvfdiv_d (__m256d, __m256d);
++__m256 __lasx_xvfdiv_s (__m256, __m256);
++__m256d __lasx_xvffint_d_l (__m256i);
++__m256d __lasx_xvffint_d_lu (__m256i);
++__m256d __lasx_xvffinth_d_w (__m256i);
++__m256d __lasx_xvffintl_d_w (__m256i);
++__m256 __lasx_xvffint_s_l (__m256i, __m256i);
++__m256 __lasx_xvffint_s_w (__m256i);
++__m256 __lasx_xvffint_s_wu (__m256i);
++__m256d __lasx_xvflogb_d (__m256d);
++__m256 __lasx_xvflogb_s (__m256);
++__m256d __lasx_xvfmadd_d (__m256d, __m256d, __m256d);
++__m256 __lasx_xvfmadd_s (__m256, __m256, __m256);
++__m256d __lasx_xvfmaxa_d (__m256d, __m256d);
++__m256 __lasx_xvfmaxa_s (__m256, __m256);
++__m256d __lasx_xvfmax_d (__m256d, __m256d);
++__m256 __lasx_xvfmax_s (__m256, __m256);
++__m256d __lasx_xvfmina_d (__m256d, __m256d);
++__m256 __lasx_xvfmina_s (__m256, __m256);
++__m256d __lasx_xvfmin_d (__m256d, __m256d);
++__m256 __lasx_xvfmin_s (__m256, __m256);
++__m256d __lasx_xvfmsub_d (__m256d, __m256d, __m256d);
++__m256 __lasx_xvfmsub_s (__m256, __m256, __m256);
++__m256d __lasx_xvfmul_d (__m256d, __m256d);
++__m256 __lasx_xvfmul_s (__m256, __m256);
++__m256d __lasx_xvfnmadd_d (__m256d, __m256d, __m256d);
++__m256 __lasx_xvfnmadd_s (__m256, __m256, __m256);
++__m256d __lasx_xvfnmsub_d (__m256d, __m256d, __m256d);
++__m256 __lasx_xvfnmsub_s (__m256, __m256, __m256);
++__m256d __lasx_xvfrecip_d (__m256d);
++__m256 __lasx_xvfrecip_s (__m256);
++__m256d __lasx_xvfrint_d (__m256d);
++__m256i __lasx_xvfrintrm_d (__m256d);
++__m256i __lasx_xvfrintrm_s (__m256);
++__m256i __lasx_xvfrintrne_d (__m256d);
++__m256i __lasx_xvfrintrne_s (__m256);
++__m256i __lasx_xvfrintrp_d (__m256d);
++__m256i __lasx_xvfrintrp_s (__m256);
++__m256i __lasx_xvfrintrz_d (__m256d);
++__m256i __lasx_xvfrintrz_s (__m256);
++__m256 __lasx_xvfrint_s (__m256);
++__m256d __lasx_xvfrsqrt_d (__m256d);
++__m256 __lasx_xvfrsqrt_s (__m256);
++__m256i __lasx_xvfrstp_b (__m256i, __m256i, __m256i);
++__m256i __lasx_xvfrstp_h (__m256i, __m256i, __m256i);
++__m256i __lasx_xvfrstpi_b (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvfrstpi_h (__m256i, __m256i, imm0_31);
++__m256d __lasx_xvfsqrt_d (__m256d);
++__m256 __lasx_xvfsqrt_s (__m256);
++__m256d __lasx_xvfsub_d (__m256d, __m256d);
++__m256 __lasx_xvfsub_s (__m256, __m256);
++__m256i __lasx_xvftinth_l_s (__m256);
++__m256i __lasx_xvftint_l_d (__m256d);
++__m256i __lasx_xvftintl_l_s (__m256);
++__m256i __lasx_xvftint_lu_d (__m256d);
++__m256i __lasx_xvftintrmh_l_s (__m256);
++__m256i __lasx_xvftintrm_l_d (__m256d);
++__m256i __lasx_xvftintrml_l_s (__m256);
++__m256i __lasx_xvftintrm_w_d (__m256d, __m256d);
++__m256i __lasx_xvftintrm_w_s (__m256);
++__m256i __lasx_xvftintrneh_l_s (__m256);
++__m256i __lasx_xvftintrne_l_d (__m256d);
++__m256i __lasx_xvftintrnel_l_s (__m256);
++__m256i __lasx_xvftintrne_w_d (__m256d, __m256d);
++__m256i __lasx_xvftintrne_w_s (__m256);
++__m256i __lasx_xvftintrph_l_s (__m256);
++__m256i __lasx_xvftintrp_l_d (__m256d);
++__m256i __lasx_xvftintrpl_l_s (__m256);
++__m256i __lasx_xvftintrp_w_d (__m256d, __m256d);
++__m256i __lasx_xvftintrp_w_s (__m256);
++__m256i __lasx_xvftintrzh_l_s (__m256);
++__m256i __lasx_xvftintrz_l_d (__m256d);
++__m256i __lasx_xvftintrzl_l_s (__m256);
++__m256i __lasx_xvftintrz_lu_d (__m256d);
++__m256i __lasx_xvftintrz_w_d (__m256d, __m256d);
++__m256i __lasx_xvftintrz_w_s (__m256);
++__m256i __lasx_xvftintrz_wu_s (__m256);
++__m256i __lasx_xvftint_w_d (__m256d, __m256d);
++__m256i __lasx_xvftint_w_s (__m256);
++__m256i __lasx_xvftint_wu_s (__m256);
++__m256i __lasx_xvhaddw_du_wu (__m256i, __m256i);
++__m256i __lasx_xvhaddw_d_w (__m256i, __m256i);
++__m256i __lasx_xvhaddw_h_b (__m256i, __m256i);
++__m256i __lasx_xvhaddw_hu_bu (__m256i, __m256i);
++__m256i __lasx_xvhaddw_q_d (__m256i, __m256i);
++__m256i __lasx_xvhaddw_qu_du (__m256i, __m256i);
++__m256i __lasx_xvhaddw_w_h (__m256i, __m256i);
++__m256i __lasx_xvhaddw_wu_hu (__m256i, __m256i);
++__m256i __lasx_xvhsubw_du_wu (__m256i, __m256i);
++__m256i __lasx_xvhsubw_d_w (__m256i, __m256i);
++__m256i __lasx_xvhsubw_h_b (__m256i, __m256i);
++__m256i __lasx_xvhsubw_hu_bu (__m256i, __m256i);
++__m256i __lasx_xvhsubw_q_d (__m256i, __m256i);
++__m256i __lasx_xvhsubw_qu_du (__m256i, __m256i);
++__m256i __lasx_xvhsubw_w_h (__m256i, __m256i);
++__m256i __lasx_xvhsubw_wu_hu (__m256i, __m256i);
++__m256i __lasx_xvilvh_b (__m256i, __m256i);
++__m256i __lasx_xvilvh_d (__m256i, __m256i);
++__m256i __lasx_xvilvh_h (__m256i, __m256i);
++__m256i __lasx_xvilvh_w (__m256i, __m256i);
++__m256i __lasx_xvilvl_b (__m256i, __m256i);
++__m256i __lasx_xvilvl_d (__m256i, __m256i);
++__m256i __lasx_xvilvl_h (__m256i, __m256i);
++__m256i __lasx_xvilvl_w (__m256i, __m256i);
++__m256i __lasx_xvinsgr2vr_d (__m256i, long int, imm0_3);
++__m256i __lasx_xvinsgr2vr_w (__m256i, int, imm0_7);
++__m256i __lasx_xvinsve0_d (__m256i, __m256i, imm0_3);
++__m256i __lasx_xvinsve0_w (__m256i, __m256i, imm0_7);
++__m256i __lasx_xvld (void *, imm_n2048_2047);
++__m256i __lasx_xvldi (imm_n1024_1023);
++__m256i __lasx_xvldrepl_b (void *, imm_n2048_2047);
++__m256i __lasx_xvldrepl_d (void *, imm_n256_255);
++__m256i __lasx_xvldrepl_h (void *, imm_n1024_1023);
++__m256i __lasx_xvldrepl_w (void *, imm_n512_511);
++__m256i __lasx_xvldx (void *, long int);
++__m256i __lasx_xvmadd_b (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmadd_d (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmadd_h (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmadd_w (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_d_w (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_d_wu (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_d_wu_w (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_h_b (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_h_bu (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_h_bu_b (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_q_d (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_q_du (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_q_du_d (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_w_h (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_w_hu (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwev_w_hu_h (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_d_w (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_d_wu (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_d_wu_w (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_h_b (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_h_bu (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_h_bu_b (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_q_d (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_q_du (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_q_du_d (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_w_h (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_w_hu (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmaddwod_w_hu_h (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmax_b (__m256i, __m256i);
++__m256i __lasx_xvmax_bu (__m256i, __m256i);
++__m256i __lasx_xvmax_d (__m256i, __m256i);
++__m256i __lasx_xvmax_du (__m256i, __m256i);
++__m256i __lasx_xvmax_h (__m256i, __m256i);
++__m256i __lasx_xvmax_hu (__m256i, __m256i);
++__m256i __lasx_xvmaxi_b (__m256i, imm_n16_15);
++__m256i __lasx_xvmaxi_bu (__m256i, imm0_31);
++__m256i __lasx_xvmaxi_d (__m256i, imm_n16_15);
++__m256i __lasx_xvmaxi_du (__m256i, imm0_31);
++__m256i __lasx_xvmaxi_h (__m256i, imm_n16_15);
++__m256i __lasx_xvmaxi_hu (__m256i, imm0_31);
++__m256i __lasx_xvmaxi_w (__m256i, imm_n16_15);
++__m256i __lasx_xvmaxi_wu (__m256i, imm0_31);
++__m256i __lasx_xvmax_w (__m256i, __m256i);
++__m256i __lasx_xvmax_wu (__m256i, __m256i);
++__m256i __lasx_xvmin_b (__m256i, __m256i);
++__m256i __lasx_xvmin_bu (__m256i, __m256i);
++__m256i __lasx_xvmin_d (__m256i, __m256i);
++__m256i __lasx_xvmin_du (__m256i, __m256i);
++__m256i __lasx_xvmin_h (__m256i, __m256i);
++__m256i __lasx_xvmin_hu (__m256i, __m256i);
++__m256i __lasx_xvmini_b (__m256i, imm_n16_15);
++__m256i __lasx_xvmini_bu (__m256i, imm0_31);
++__m256i __lasx_xvmini_d (__m256i, imm_n16_15);
++__m256i __lasx_xvmini_du (__m256i, imm0_31);
++__m256i __lasx_xvmini_h (__m256i, imm_n16_15);
++__m256i __lasx_xvmini_hu (__m256i, imm0_31);
++__m256i __lasx_xvmini_w (__m256i, imm_n16_15);
++__m256i __lasx_xvmini_wu (__m256i, imm0_31);
++__m256i __lasx_xvmin_w (__m256i, __m256i);
++__m256i __lasx_xvmin_wu (__m256i, __m256i);
++__m256i __lasx_xvmod_b (__m256i, __m256i);
++__m256i __lasx_xvmod_bu (__m256i, __m256i);
++__m256i __lasx_xvmod_d (__m256i, __m256i);
++__m256i __lasx_xvmod_du (__m256i, __m256i);
++__m256i __lasx_xvmod_h (__m256i, __m256i);
++__m256i __lasx_xvmod_hu (__m256i, __m256i);
++__m256i __lasx_xvmod_w (__m256i, __m256i);
++__m256i __lasx_xvmod_wu (__m256i, __m256i);
++__m256i __lasx_xvmskgez_b (__m256i);
++__m256i __lasx_xvmskltz_b (__m256i);
++__m256i __lasx_xvmskltz_d (__m256i);
++__m256i __lasx_xvmskltz_h (__m256i);
++__m256i __lasx_xvmskltz_w (__m256i);
++__m256i __lasx_xvmsknz_b (__m256i);
++__m256i __lasx_xvmsub_b (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmsub_d (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmsub_h (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmsub_w (__m256i, __m256i, __m256i);
++__m256i __lasx_xvmuh_b (__m256i, __m256i);
++__m256i __lasx_xvmuh_bu (__m256i, __m256i);
++__m256i __lasx_xvmuh_d (__m256i, __m256i);
++__m256i __lasx_xvmuh_du (__m256i, __m256i);
++__m256i __lasx_xvmuh_h (__m256i, __m256i);
++__m256i __lasx_xvmuh_hu (__m256i, __m256i);
++__m256i __lasx_xvmuh_w (__m256i, __m256i);
++__m256i __lasx_xvmuh_wu (__m256i, __m256i);
++__m256i __lasx_xvmul_b (__m256i, __m256i);
++__m256i __lasx_xvmul_d (__m256i, __m256i);
++__m256i __lasx_xvmul_h (__m256i, __m256i);
++__m256i __lasx_xvmul_w (__m256i, __m256i);
++__m256i __lasx_xvmulwev_d_w (__m256i, __m256i);
++__m256i __lasx_xvmulwev_d_wu (__m256i, __m256i);
++__m256i __lasx_xvmulwev_d_wu_w (__m256i, __m256i);
++__m256i __lasx_xvmulwev_h_b (__m256i, __m256i);
++__m256i __lasx_xvmulwev_h_bu (__m256i, __m256i);
++__m256i __lasx_xvmulwev_h_bu_b (__m256i, __m256i);
++__m256i __lasx_xvmulwev_q_d (__m256i, __m256i);
++__m256i __lasx_xvmulwev_q_du (__m256i, __m256i);
++__m256i __lasx_xvmulwev_q_du_d (__m256i, __m256i);
++__m256i __lasx_xvmulwev_w_h (__m256i, __m256i);
++__m256i __lasx_xvmulwev_w_hu (__m256i, __m256i);
++__m256i __lasx_xvmulwev_w_hu_h (__m256i, __m256i);
++__m256i __lasx_xvmulwod_d_w (__m256i, __m256i);
++__m256i __lasx_xvmulwod_d_wu (__m256i, __m256i);
++__m256i __lasx_xvmulwod_d_wu_w (__m256i, __m256i);
++__m256i __lasx_xvmulwod_h_b (__m256i, __m256i);
++__m256i __lasx_xvmulwod_h_bu (__m256i, __m256i);
++__m256i __lasx_xvmulwod_h_bu_b (__m256i, __m256i);
++__m256i __lasx_xvmulwod_q_d (__m256i, __m256i);
++__m256i __lasx_xvmulwod_q_du (__m256i, __m256i);
++__m256i __lasx_xvmulwod_q_du_d (__m256i, __m256i);
++__m256i __lasx_xvmulwod_w_h (__m256i, __m256i);
++__m256i __lasx_xvmulwod_w_hu (__m256i, __m256i);
++__m256i __lasx_xvmulwod_w_hu_h (__m256i, __m256i);
++__m256i __lasx_xvneg_b (__m256i);
++__m256i __lasx_xvneg_d (__m256i);
++__m256i __lasx_xvneg_h (__m256i);
++__m256i __lasx_xvneg_w (__m256i);
++__m256i __lasx_xvnori_b (__m256i, imm0_255);
++__m256i __lasx_xvnor_v (__m256i, __m256i);
++__m256i __lasx_xvori_b (__m256i, imm0_255);
++__m256i __lasx_xvorn_v (__m256i, __m256i);
++__m256i __lasx_xvor_v (__m256i, __m256i);
++__m256i __lasx_xvpackev_b (__m256i, __m256i);
++__m256i __lasx_xvpackev_d (__m256i, __m256i);
++__m256i __lasx_xvpackev_h (__m256i, __m256i);
++__m256i __lasx_xvpackev_w (__m256i, __m256i);
++__m256i __lasx_xvpackod_b (__m256i, __m256i);
++__m256i __lasx_xvpackod_d (__m256i, __m256i);
++__m256i __lasx_xvpackod_h (__m256i, __m256i);
++__m256i __lasx_xvpackod_w (__m256i, __m256i);
++__m256i __lasx_xvpcnt_b (__m256i);
++__m256i __lasx_xvpcnt_d (__m256i);
++__m256i __lasx_xvpcnt_h (__m256i);
++__m256i __lasx_xvpcnt_w (__m256i);
++__m256i __lasx_xvpermi_d (__m256i, imm0_255);
++__m256i __lasx_xvpermi_q (__m256i, __m256i, imm0_255);
++__m256i __lasx_xvpermi_w (__m256i, __m256i, imm0_255);
++__m256i __lasx_xvperm_w (__m256i, __m256i);
++__m256i __lasx_xvpickev_b (__m256i, __m256i);
++__m256i __lasx_xvpickev_d (__m256i, __m256i);
++__m256i __lasx_xvpickev_h (__m256i, __m256i);
++__m256i __lasx_xvpickev_w (__m256i, __m256i);
++__m256i __lasx_xvpickod_b (__m256i, __m256i);
++__m256i __lasx_xvpickod_d (__m256i, __m256i);
++__m256i __lasx_xvpickod_h (__m256i, __m256i);
++__m256i __lasx_xvpickod_w (__m256i, __m256i);
++long int __lasx_xvpickve2gr_d (__m256i, imm0_3);
++unsigned long int __lasx_xvpickve2gr_du (__m256i, imm0_3);
++int __lasx_xvpickve2gr_w (__m256i, imm0_7);
++unsigned int __lasx_xvpickve2gr_wu (__m256i, imm0_7);
++__m256i __lasx_xvpickve_d (__m256i, imm0_3);
++__m256d __lasx_xvpickve_d_f (__m256d, imm0_3);
++__m256i __lasx_xvpickve_w (__m256i, imm0_7);
++__m256 __lasx_xvpickve_w_f (__m256, imm0_7);
++__m256i __lasx_xvrepl128vei_b (__m256i, imm0_15);
++__m256i __lasx_xvrepl128vei_d (__m256i, imm0_1);
++__m256i __lasx_xvrepl128vei_h (__m256i, imm0_7);
++__m256i __lasx_xvrepl128vei_w (__m256i, imm0_3);
++__m256i __lasx_xvreplgr2vr_b (int);
++__m256i __lasx_xvreplgr2vr_d (long int);
++__m256i __lasx_xvreplgr2vr_h (int);
++__m256i __lasx_xvreplgr2vr_w (int);
++__m256i __lasx_xvrepli_b (imm_n512_511);
++__m256i __lasx_xvrepli_d (imm_n512_511);
++__m256i __lasx_xvrepli_h (imm_n512_511);
++__m256i __lasx_xvrepli_w (imm_n512_511);
++__m256i __lasx_xvreplve0_b (__m256i);
++__m256i __lasx_xvreplve0_d (__m256i);
++__m256i __lasx_xvreplve0_h (__m256i);
++__m256i __lasx_xvreplve0_q (__m256i);
++__m256i __lasx_xvreplve0_w (__m256i);
++__m256i __lasx_xvreplve_b (__m256i, int);
++__m256i __lasx_xvreplve_d (__m256i, int);
++__m256i __lasx_xvreplve_h (__m256i, int);
++__m256i __lasx_xvreplve_w (__m256i, int);
++__m256i __lasx_xvrotr_b (__m256i, __m256i);
++__m256i __lasx_xvrotr_d (__m256i, __m256i);
++__m256i __lasx_xvrotr_h (__m256i, __m256i);
++__m256i __lasx_xvrotri_b (__m256i, imm0_7);
++__m256i __lasx_xvrotri_d (__m256i, imm0_63);
++__m256i __lasx_xvrotri_h (__m256i, imm0_15);
++__m256i __lasx_xvrotri_w (__m256i, imm0_31);
++__m256i __lasx_xvrotr_w (__m256i, __m256i);
++__m256i __lasx_xvsadd_b (__m256i, __m256i);
++__m256i __lasx_xvsadd_bu (__m256i, __m256i);
++__m256i __lasx_xvsadd_d (__m256i, __m256i);
++__m256i __lasx_xvsadd_du (__m256i, __m256i);
++__m256i __lasx_xvsadd_h (__m256i, __m256i);
++__m256i __lasx_xvsadd_hu (__m256i, __m256i);
++__m256i __lasx_xvsadd_w (__m256i, __m256i);
++__m256i __lasx_xvsadd_wu (__m256i, __m256i);
++__m256i __lasx_xvsat_b (__m256i, imm0_7);
++__m256i __lasx_xvsat_bu (__m256i, imm0_7);
++__m256i __lasx_xvsat_d (__m256i, imm0_63);
++__m256i __lasx_xvsat_du (__m256i, imm0_63);
++__m256i __lasx_xvsat_h (__m256i, imm0_15);
++__m256i __lasx_xvsat_hu (__m256i, imm0_15);
++__m256i __lasx_xvsat_w (__m256i, imm0_31);
++__m256i __lasx_xvsat_wu (__m256i, imm0_31);
++__m256i __lasx_xvseq_b (__m256i, __m256i);
++__m256i __lasx_xvseq_d (__m256i, __m256i);
++__m256i __lasx_xvseq_h (__m256i, __m256i);
++__m256i __lasx_xvseqi_b (__m256i, imm_n16_15);
++__m256i __lasx_xvseqi_d (__m256i, imm_n16_15);
++__m256i __lasx_xvseqi_h (__m256i, imm_n16_15);
++__m256i __lasx_xvseqi_w (__m256i, imm_n16_15);
++__m256i __lasx_xvseq_w (__m256i, __m256i);
++__m256i __lasx_xvshuf4i_b (__m256i, imm0_255);
++__m256i __lasx_xvshuf4i_d (__m256i, __m256i, imm0_255);
++__m256i __lasx_xvshuf4i_h (__m256i, imm0_255);
++__m256i __lasx_xvshuf4i_w (__m256i, imm0_255);
++__m256i __lasx_xvshuf_b (__m256i, __m256i, __m256i);
++__m256i __lasx_xvshuf_d (__m256i, __m256i, __m256i);
++__m256i __lasx_xvshuf_h (__m256i, __m256i, __m256i);
++__m256i __lasx_xvshuf_w (__m256i, __m256i, __m256i);
++__m256i __lasx_xvsigncov_b (__m256i, __m256i);
++__m256i __lasx_xvsigncov_d (__m256i, __m256i);
++__m256i __lasx_xvsigncov_h (__m256i, __m256i);
++__m256i __lasx_xvsigncov_w (__m256i, __m256i);
++__m256i __lasx_xvsle_b (__m256i, __m256i);
++__m256i __lasx_xvsle_bu (__m256i, __m256i);
++__m256i __lasx_xvsle_d (__m256i, __m256i);
++__m256i __lasx_xvsle_du (__m256i, __m256i);
++__m256i __lasx_xvsle_h (__m256i, __m256i);
++__m256i __lasx_xvsle_hu (__m256i, __m256i);
++__m256i __lasx_xvslei_b (__m256i, imm_n16_15);
++__m256i __lasx_xvslei_bu (__m256i, imm0_31);
++__m256i __lasx_xvslei_d (__m256i, imm_n16_15);
++__m256i __lasx_xvslei_du (__m256i, imm0_31);
++__m256i __lasx_xvslei_h (__m256i, imm_n16_15);
++__m256i __lasx_xvslei_hu (__m256i, imm0_31);
++__m256i __lasx_xvslei_w (__m256i, imm_n16_15);
++__m256i __lasx_xvslei_wu (__m256i, imm0_31);
++__m256i __lasx_xvsle_w (__m256i, __m256i);
++__m256i __lasx_xvsle_wu (__m256i, __m256i);
++__m256i __lasx_xvsll_b (__m256i, __m256i);
++__m256i __lasx_xvsll_d (__m256i, __m256i);
++__m256i __lasx_xvsll_h (__m256i, __m256i);
++__m256i __lasx_xvslli_b (__m256i, imm0_7);
++__m256i __lasx_xvslli_d (__m256i, imm0_63);
++__m256i __lasx_xvslli_h (__m256i, imm0_15);
++__m256i __lasx_xvslli_w (__m256i, imm0_31);
++__m256i __lasx_xvsll_w (__m256i, __m256i);
++__m256i __lasx_xvsllwil_du_wu (__m256i, imm0_31);
++__m256i __lasx_xvsllwil_d_w (__m256i, imm0_31);
++__m256i __lasx_xvsllwil_h_b (__m256i, imm0_7);
++__m256i __lasx_xvsllwil_hu_bu (__m256i, imm0_7);
++__m256i __lasx_xvsllwil_w_h (__m256i, imm0_15);
++__m256i __lasx_xvsllwil_wu_hu (__m256i, imm0_15);
++__m256i __lasx_xvslt_b (__m256i, __m256i);
++__m256i __lasx_xvslt_bu (__m256i, __m256i);
++__m256i __lasx_xvslt_d (__m256i, __m256i);
++__m256i __lasx_xvslt_du (__m256i, __m256i);
++__m256i __lasx_xvslt_h (__m256i, __m256i);
++__m256i __lasx_xvslt_hu (__m256i, __m256i);
++__m256i __lasx_xvslti_b (__m256i, imm_n16_15);
++__m256i __lasx_xvslti_bu (__m256i, imm0_31);
++__m256i __lasx_xvslti_d (__m256i, imm_n16_15);
++__m256i __lasx_xvslti_du (__m256i, imm0_31);
++__m256i __lasx_xvslti_h (__m256i, imm_n16_15);
++__m256i __lasx_xvslti_hu (__m256i, imm0_31);
++__m256i __lasx_xvslti_w (__m256i, imm_n16_15);
++__m256i __lasx_xvslti_wu (__m256i, imm0_31);
++__m256i __lasx_xvslt_w (__m256i, __m256i);
++__m256i __lasx_xvslt_wu (__m256i, __m256i);
++__m256i __lasx_xvsra_b (__m256i, __m256i);
++__m256i __lasx_xvsra_d (__m256i, __m256i);
++__m256i __lasx_xvsra_h (__m256i, __m256i);
++__m256i __lasx_xvsrai_b (__m256i, imm0_7);
++__m256i __lasx_xvsrai_d (__m256i, imm0_63);
++__m256i __lasx_xvsrai_h (__m256i, imm0_15);
++__m256i __lasx_xvsrai_w (__m256i, imm0_31);
++__m256i __lasx_xvsran_b_h (__m256i, __m256i);
++__m256i __lasx_xvsran_h_w (__m256i, __m256i);
++__m256i __lasx_xvsrani_b_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvsrani_d_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvsrani_h_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvsrani_w_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvsran_w_d (__m256i, __m256i);
++__m256i __lasx_xvsrar_b (__m256i, __m256i);
++__m256i __lasx_xvsrar_d (__m256i, __m256i);
++__m256i __lasx_xvsrar_h (__m256i, __m256i);
++__m256i __lasx_xvsrari_b (__m256i, imm0_7);
++__m256i __lasx_xvsrari_d (__m256i, imm0_63);
++__m256i __lasx_xvsrari_h (__m256i, imm0_15);
++__m256i __lasx_xvsrari_w (__m256i, imm0_31);
++__m256i __lasx_xvsrarn_b_h (__m256i, __m256i);
++__m256i __lasx_xvsrarn_h_w (__m256i, __m256i);
++__m256i __lasx_xvsrarni_b_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvsrarni_d_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvsrarni_h_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvsrarni_w_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvsrarn_w_d (__m256i, __m256i);
++__m256i __lasx_xvsrar_w (__m256i, __m256i);
++__m256i __lasx_xvsra_w (__m256i, __m256i);
++__m256i __lasx_xvsrl_b (__m256i, __m256i);
++__m256i __lasx_xvsrl_d (__m256i, __m256i);
++__m256i __lasx_xvsrl_h (__m256i, __m256i);
++__m256i __lasx_xvsrli_b (__m256i, imm0_7);
++__m256i __lasx_xvsrli_d (__m256i, imm0_63);
++__m256i __lasx_xvsrli_h (__m256i, imm0_15);
++__m256i __lasx_xvsrli_w (__m256i, imm0_31);
++__m256i __lasx_xvsrln_b_h (__m256i, __m256i);
++__m256i __lasx_xvsrln_h_w (__m256i, __m256i);
++__m256i __lasx_xvsrlni_b_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvsrlni_d_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvsrlni_h_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvsrlni_w_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvsrln_w_d (__m256i, __m256i);
++__m256i __lasx_xvsrlr_b (__m256i, __m256i);
++__m256i __lasx_xvsrlr_d (__m256i, __m256i);
++__m256i __lasx_xvsrlr_h (__m256i, __m256i);
++__m256i __lasx_xvsrlri_b (__m256i, imm0_7);
++__m256i __lasx_xvsrlri_d (__m256i, imm0_63);
++__m256i __lasx_xvsrlri_h (__m256i, imm0_15);
++__m256i __lasx_xvsrlri_w (__m256i, imm0_31);
++__m256i __lasx_xvsrlrn_b_h (__m256i, __m256i);
++__m256i __lasx_xvsrlrn_h_w (__m256i, __m256i);
++__m256i __lasx_xvsrlrni_b_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvsrlrni_d_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvsrlrni_h_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvsrlrni_w_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvsrlrn_w_d (__m256i, __m256i);
++__m256i __lasx_xvsrlr_w (__m256i, __m256i);
++__m256i __lasx_xvsrl_w (__m256i, __m256i);
++__m256i __lasx_xvssran_b_h (__m256i, __m256i);
++__m256i __lasx_xvssran_bu_h (__m256i, __m256i);
++__m256i __lasx_xvssran_hu_w (__m256i, __m256i);
++__m256i __lasx_xvssran_h_w (__m256i, __m256i);
++__m256i __lasx_xvssrani_b_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvssrani_bu_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvssrani_d_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvssrani_du_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvssrani_hu_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvssrani_h_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvssrani_w_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvssrani_wu_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvssran_w_d (__m256i, __m256i);
++__m256i __lasx_xvssran_wu_d (__m256i, __m256i);
++__m256i __lasx_xvssrarn_b_h (__m256i, __m256i);
++__m256i __lasx_xvssrarn_bu_h (__m256i, __m256i);
++__m256i __lasx_xvssrarn_hu_w (__m256i, __m256i);
++__m256i __lasx_xvssrarn_h_w (__m256i, __m256i);
++__m256i __lasx_xvssrarni_b_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvssrarni_bu_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvssrarni_d_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvssrarni_du_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvssrarni_hu_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvssrarni_h_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvssrarni_w_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvssrarni_wu_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvssrarn_w_d (__m256i, __m256i);
++__m256i __lasx_xvssrarn_wu_d (__m256i, __m256i);
++__m256i __lasx_xvssrln_b_h (__m256i, __m256i);
++__m256i __lasx_xvssrln_bu_h (__m256i, __m256i);
++__m256i __lasx_xvssrln_hu_w (__m256i, __m256i);
++__m256i __lasx_xvssrln_h_w (__m256i, __m256i);
++__m256i __lasx_xvssrlni_b_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvssrlni_bu_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvssrlni_d_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvssrlni_du_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvssrlni_hu_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvssrlni_h_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvssrlni_w_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvssrlni_wu_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvssrln_w_d (__m256i, __m256i);
++__m256i __lasx_xvssrln_wu_d (__m256i, __m256i);
++__m256i __lasx_xvssrlrn_b_h (__m256i, __m256i);
++__m256i __lasx_xvssrlrn_bu_h (__m256i, __m256i);
++__m256i __lasx_xvssrlrn_hu_w (__m256i, __m256i);
++__m256i __lasx_xvssrlrn_h_w (__m256i, __m256i);
++__m256i __lasx_xvssrlrni_b_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvssrlrni_bu_h (__m256i, __m256i, imm0_15);
++__m256i __lasx_xvssrlrni_d_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvssrlrni_du_q (__m256i, __m256i, imm0_127);
++__m256i __lasx_xvssrlrni_hu_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvssrlrni_h_w (__m256i, __m256i, imm0_31);
++__m256i __lasx_xvssrlrni_w_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvssrlrni_wu_d (__m256i, __m256i, imm0_63);
++__m256i __lasx_xvssrlrn_w_d (__m256i, __m256i);
++__m256i __lasx_xvssrlrn_wu_d (__m256i, __m256i);
++__m256i __lasx_xvssub_b (__m256i, __m256i);
++__m256i __lasx_xvssub_bu (__m256i, __m256i);
++__m256i __lasx_xvssub_d (__m256i, __m256i);
++__m256i __lasx_xvssub_du (__m256i, __m256i);
++__m256i __lasx_xvssub_h (__m256i, __m256i);
++__m256i __lasx_xvssub_hu (__m256i, __m256i);
++__m256i __lasx_xvssub_w (__m256i, __m256i);
++__m256i __lasx_xvssub_wu (__m256i, __m256i);
++void __lasx_xvst (__m256i, void *, imm_n2048_2047);
++void __lasx_xvstelm_b (__m256i, void *, imm_n128_127, idx);
++void __lasx_xvstelm_d (__m256i, void *, imm_n128_127, idx);
++void __lasx_xvstelm_h (__m256i, void *, imm_n128_127, idx);
++void __lasx_xvstelm_w (__m256i, void *, imm_n128_127, idx);
++void __lasx_xvstx (__m256i, void *, long int);
++__m256i __lasx_xvsub_b (__m256i, __m256i);
++__m256i __lasx_xvsub_d (__m256i, __m256i);
++__m256i __lasx_xvsub_h (__m256i, __m256i);
++__m256i __lasx_xvsubi_bu (__m256i, imm0_31);
++__m256i __lasx_xvsubi_du (__m256i, imm0_31);
++__m256i __lasx_xvsubi_hu (__m256i, imm0_31);
++__m256i __lasx_xvsubi_wu (__m256i, imm0_31);
++__m256i __lasx_xvsub_q (__m256i, __m256i);
++__m256i __lasx_xvsub_w (__m256i, __m256i);
++__m256i __lasx_xvsubwev_d_w (__m256i, __m256i);
++__m256i __lasx_xvsubwev_d_wu (__m256i, __m256i);
++__m256i __lasx_xvsubwev_h_b (__m256i, __m256i);
++__m256i __lasx_xvsubwev_h_bu (__m256i, __m256i);
++__m256i __lasx_xvsubwev_q_d (__m256i, __m256i);
++__m256i __lasx_xvsubwev_q_du (__m256i, __m256i);
++__m256i __lasx_xvsubwev_w_h (__m256i, __m256i);
++__m256i __lasx_xvsubwev_w_hu (__m256i, __m256i);
++__m256i __lasx_xvsubwod_d_w (__m256i, __m256i);
++__m256i __lasx_xvsubwod_d_wu (__m256i, __m256i);
++__m256i __lasx_xvsubwod_h_b (__m256i, __m256i);
++__m256i __lasx_xvsubwod_h_bu (__m256i, __m256i);
++__m256i __lasx_xvsubwod_q_d (__m256i, __m256i);
++__m256i __lasx_xvsubwod_q_du (__m256i, __m256i);
++__m256i __lasx_xvsubwod_w_h (__m256i, __m256i);
++__m256i __lasx_xvsubwod_w_hu (__m256i, __m256i);
++__m256i __lasx_xvxori_b (__m256i, imm0_255);
++__m256i __lasx_xvxor_v (__m256i, __m256i);
++@end smallexample
++
+ @node MIPS DSP Built-in Functions
+ @subsection MIPS DSP Built-in Functions
+ 
+-- 
+2.43.0
+
diff --git a/0055-Struct-Reorg-Port-bugfixes-to-GCC-12.3.1.patch b/0055-Struct-Reorg-Port-bugfixes-to-GCC-12.3.1.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8f6b3b7eece1b4aa9e39041b3217fe6ac068d77f
--- /dev/null
+++ b/0055-Struct-Reorg-Port-bugfixes-to-GCC-12.3.1.patch
@@ -0,0 +1,420 @@
+From 55c547748af36ffc3f2d5ed154a91fb3fcb8431c Mon Sep 17 00:00:00 2001
+From: Mingchuan Wu 
+Date: Thu, 11 Apr 2024 15:49:59 +0800
+Subject: [PATCH] [Struct Reorg] Port bugfixes to GCC 12.3.1
+
+Migrated from commits in GCC10.3.1:
+https://gitee.com/openeuler/gcc/commit/41af6d361a6d85ef4fce8a8438113d765596afdd
+https://gitee.com/openeuler/gcc/commit/25d74b98caeaae881e374924886ee664aa1af5bc
+https://gitee.com/openeuler/gcc/commit/b5a3bfe92f96cd0d2224d80ac4eaa80dab1bd6bf
+https://gitee.com/openeuler/gcc/commit/708ffe6f132ee39441b66b6ab6b98847d35916b7
+https://gitee.com/openeuler/gcc/commit/e875e4e7f3716aa268ffbbf55ee199ec82b6aeba
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      | 97 ++++++++++---------
+ gcc/testsuite/gcc.dg/struct/dfe_escape.c      | 50 ++++++++++
+ gcc/testsuite/gcc.dg/struct/dfe_func_ptr.c    | 69 +++++++++++++
+ gcc/testsuite/gcc.dg/struct/struct-reorg.exp  |  2 +
+ gcc/testsuite/gcc.dg/struct/struct_reorg-10.c | 29 ++++++
+ gcc/testsuite/gcc.dg/struct/struct_reorg-11.c | 16 +++
+ gcc/testsuite/gcc.dg/struct/struct_reorg-12.c | 26 +++++
+ 7 files changed, 243 insertions(+), 46 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_escape.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/dfe_func_ptr.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-10.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-11.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/struct_reorg-12.c
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index 6a202b4bd..f03d1d875 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -466,10 +466,19 @@ srtype::has_dead_field (void)
+   unsigned i;
+   FOR_EACH_VEC_ELT (fields, i, this_field)
+     {
+-      if (!(this_field->field_access & READ_FIELD))
+-	{
+-	  may_dfe = true;
+-	  break;
++      /* Function pointer members are not processed, because DFE
++         does not currently support accurate analysis of function
++         pointers, and we have not identified specific use cases. */
++      if (!(this_field->field_access & READ_FIELD)
++	 && !FUNCTION_POINTER_TYPE_P (this_field->fieldtype))
++	{
++	  /* Fields with escape risks should not be processed. */
++	  if (this_field->type == NULL
++	      || (this_field->type->escapes == does_not_escape))
++	    {
++	      may_dfe = true;
++	      break;
++	    }
+ 	}
+     }
+   return may_dfe;
+@@ -1032,8 +1041,13 @@ srtype::create_new_type (void)
+     {
+       srfield *f = fields[i];
+       if (current_layout_opt_level & DEAD_FIELD_ELIMINATION
+-	  && !(f->field_access & READ_FIELD))
+-	continue;
++	  && !(f->field_access & READ_FIELD)
++	  && !FUNCTION_POINTER_TYPE_P (f->fieldtype))
++	{
++	  /* Fields with escape risks should not be processed. */
++	  if (f->type == NULL || (f->type->escapes == does_not_escape))
++	    continue;
++	}
+       f->create_new_fields (newtype, newfields, newlast);
+     }
+ 
+@@ -3815,9 +3829,17 @@ ipa_struct_reorg::maybe_mark_or_record_other_side (tree side, tree other,
+       if (VOID_POINTER_P (TREE_TYPE (side))
+ 	  && TREE_CODE (side) == SSA_NAME)
+ 	{
+-	  /* The type is other, the declaration is side.  */
+-	  current_function->record_decl (type, side, -1,
+-		isptrptr (TREE_TYPE (other)) ? TREE_TYPE (other) : NULL);
++	  tree inner = SSA_NAME_VAR (side);
++	  if (inner)
++	    {
++	      srdecl *in = find_decl (inner);
++	      if (in && !in->type->has_escaped ())
++		{
++		  /* The type is other, the declaration is side.  */
++		  current_function->record_decl (type, side, -1,
++			isptrptr (TREE_TYPE (other)) ? TREE_TYPE (other) : NULL);
++		}
++	     }
+ 	}
+       else
+ 	/* *_1 = &MEM[(void *)&x + 8B].  */
+@@ -3910,6 +3932,12 @@ ipa_struct_reorg::maybe_record_assign (cgraph_node *node, gassign *stmt)
+ 	maybe_mark_or_record_other_side (rhs, lhs, stmt);
+       if (TREE_CODE (lhs) == SSA_NAME)
+ 	maybe_mark_or_record_other_side (lhs, rhs, stmt);
++
++      /* Handle missing ARRAY_REF cases.  */
++      if (TREE_CODE (lhs) == ARRAY_REF)
++	mark_type_as_escape (TREE_TYPE (lhs), escape_array, stmt);
++      if (TREE_CODE (rhs) == ARRAY_REF)
++	mark_type_as_escape (TREE_TYPE (rhs), escape_array, stmt);
+     }
+ }
+ 
+@@ -5272,8 +5300,11 @@ ipa_struct_reorg::record_accesses (void)
+ 	record_function (cnode);
+       else
+ 	{
+-	  tree return_type = TREE_TYPE (TREE_TYPE (cnode->decl));
+-	  mark_type_as_escape (return_type, escape_return, NULL);
++	  if (cnode->externally_visible)
++	    {
++	      tree return_type = TREE_TYPE (TREE_TYPE (cnode->decl));
++	      mark_type_as_escape (return_type, escape_return, NULL);
++	    }
+ 	}
+ 
+     }
+@@ -5889,6 +5920,7 @@ ipa_struct_reorg::rewrite_expr (tree expr,
+   bool escape_from_base = false;
+ 
+   tree newbase[max_split];
++  memset (newbase, 0, sizeof (tree[max_split]));
+   memset (newexpr, 0, sizeof (tree[max_split]));
+ 
+   if (TREE_CODE (expr) == CONSTRUCTOR)
+@@ -6912,7 +6944,7 @@ create_bb_for_group_diff_ne_0 (basic_block new_bb, tree &phi, tree ptr,
+ }
+ 
+ tree
+-ipa_struct_reorg::rewrite_pointer_plus_integer (gimple *stmt,
++ipa_struct_reorg::rewrite_pointer_plus_integer (gimple *stmt ATTRIBUTE_UNUSED,
+ 						gimple_stmt_iterator *gsi,
+ 						tree ptr, tree offset,
+ 						srtype *type)
+@@ -7889,41 +7921,14 @@ ipa_struct_reorg::rewrite_cond (gcond *stmt,
+    should be removed.  */
+ 
+ bool
+-ipa_struct_reorg::rewrite_debug (gimple *stmt, gimple_stmt_iterator *)
++ipa_struct_reorg::rewrite_debug (gimple *, gimple_stmt_iterator *)
+ {
+-  if (current_layout_opt_level >= STRUCT_REORDER_FIELDS)
+-    /* Delete debug gimple now.  */
+-    return true;
+-  bool remove = false;
+-  if (gimple_debug_bind_p (stmt))
+-    {
+-      tree var = gimple_debug_bind_get_var (stmt);
+-      tree newvar[max_split];
+-      if (rewrite_expr (var, newvar, true))
+-	remove = true;
+-      if (gimple_debug_bind_has_value_p (stmt))
+-	{
+-	  var = gimple_debug_bind_get_value (stmt);
+-	  if (TREE_CODE (var) == POINTER_PLUS_EXPR)
+-	    var = TREE_OPERAND (var, 0);
+-	  if (rewrite_expr (var, newvar, true))
+-	    remove = true;
+-	}
+-    }
+-  else if (gimple_debug_source_bind_p (stmt))
+-    {
+-      tree var = gimple_debug_source_bind_get_var (stmt);
+-      tree newvar[max_split];
+-      if (rewrite_expr (var, newvar, true))
+-	remove = true;
+-      var = gimple_debug_source_bind_get_value (stmt);
+-      if (TREE_CODE (var) == POINTER_PLUS_EXPR)
+-	var = TREE_OPERAND (var, 0);
+-      if (rewrite_expr (var, newvar, true))
+-	remove = true;
+-    }
+-
+-  return remove;
++  /* In debug statements, there might be some statements that have
++     been optimized out in gimple but left in debug gimple.  Sometimes
++     these statements need to be analyzed to escape, but in rewrite
++     stage it shouldn't happen.  It needs to care a lot to handle these
++     cases but seems useless.  So now we just delete debug gimple.  */
++  return true;
+ }
+ 
+ /* Rewrite PHI nodes, return true if the PHI was replaced.  */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfe_escape.c b/gcc/testsuite/gcc.dg/struct/dfe_escape.c
+new file mode 100644
+index 000000000..09efe8027
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfe_escape.c
+@@ -0,0 +1,50 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++typedef struct arc arc_t;
++typedef struct arc *arc_p;
++
++typedef struct network
++{    
++  int x;
++} network_t;
++
++struct arc
++{
++  int flow;
++  network_t* net_add;
++};
++
++const int MAX = 100;
++
++/* let it escape_array, "Type is used in an array [not handled yet]".  */
++network_t* net[2];
++arc_p stop_arcs = NULL;
++
++int
++main ()
++{
++  net[0] = (network_t*) calloc (1, sizeof(network_t));
++  stop_arcs = (arc_p) calloc (MAX, sizeof (arc_t));
++
++  net[0]->x = 100;
++
++  for (unsigned i = 0; i < 3; i++)
++    {        
++      net[0]->x = net[0]->x + 2;
++      stop_arcs->flow = net[0]->x / 2;
++      stop_arcs->flow = stop_arcs->flow + 20;
++      stop_arcs->net_add = net[0];
++      stop_arcs++;
++    }
++
++  if( net[1] != 0 && stop_arcs != 0)
++    {
++      return -1;
++    }
++  return 0;
++}
++
++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfe_func_ptr.c b/gcc/testsuite/gcc.dg/struct/dfe_func_ptr.c
+new file mode 100644
+index 000000000..74ea93bbc
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfe_func_ptr.c
+@@ -0,0 +1,69 @@
++/* { dg-do compile } */
++/* { dg-do run } */
++
++#include 
++#include 
++
++#ifdef STACK_SIZE
++#if STACK_SIZE > 16000
++#define N 1000
++#else
++#define N (STACK_SIZE/16)
++#endif
++#else
++#define N 1000
++#endif
++
++int num;
++
++int (*foo)(int d);
++int f (int t);
++
++typedef struct str_t str_t1;
++struct str_t
++{
++   int a;
++   float b;
++   int (*foo)(int d);
++};
++
++int main ()
++{
++   int i, r;
++   r = rand ();
++   num = r > N ? N : r;
++   str_t1 * p1 = calloc (num, sizeof (str_t1));
++   if (p1 == NULL)
++      return 0;
++   for (i = 0; i < num; i++)
++     {
++       p1[i].foo = malloc (1 * sizeof (f));
++       p1[i].foo = f;
++       p1[i].foo (i);
++     }
++
++   for (i = 0; i < num; i++)
++      p1[i].a = 1;
++
++   for (i = 0; i < num; i++)
++      p1[i].b = 2;
++
++   for (i = 0; i < num; i++)
++      if (p1[i].a != 1)
++	 abort ();
++
++   for (i = 0; i < num; i++)
++      if (abs (p1[i].b - 2) > 0.0001)
++	 abort ();
++
++   return 0;
++}
++
++int f (int t)
++{
++   if ( t < 0)
++      abort ();
++   return 0;
++}
++
++/* { dg-final { scan-ipa-dump-times "Dead field elimination" 0 "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+index c5a955b00..687f6609f 100644
+--- a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+@@ -46,6 +46,8 @@ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/rf_*.c]] \
+ # -fipa-struct-reorg=3
+ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/dfe*.c]] \
+ 	"" "-fipa-struct-reorg=3 -fdump-ipa-all -flto-partition=one -fwhole-program"
++gcc-dg-runtest $srcdir/$subdir/struct_reorg-7.c \
++	"" "-fipa-struct-reorg=3 -fdump-ipa-all -flto-partition=one -fwhole-program"
+ 
+ # -fipa-struct-reorg=4
+ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/pc*.c]] \
+diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-10.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-10.c
+new file mode 100644
+index 000000000..ec422f76f
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-10.c
+@@ -0,0 +1,29 @@
++/* { dg-do compile } */
++/* { dg-options "-w -g -O3 -flto-partition=one -fipa-struct-reorg -fwhole-program -S" } */
++
++struct a {
++  int b;
++  char c;
++};
++struct {
++  double d;
++  _Bool e;
++} * f;
++struct g {
++  struct a h;
++} i;
++long j;
++void k();
++void l() { k(i); }
++void k(struct a m) {
++  f->e = 0;
++  for (;;)
++    l();
++}
++int main() {
++  for (; j; f = 0) {
++    struct g *n = 0;
++    char o = n->h.c;
++  }
++  l();
++}
+diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-11.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-11.c
+new file mode 100644
+index 000000000..3e42aa84a
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-11.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-w -g -O3 -flto-partition=one -fipa-struct-reorg -fwhole-program -S" } */
++
++struct a {
++  int b;
++  double c;
++};
++struct d {
++  struct a e;
++};
++int f;
++int main() {
++  _Bool g;
++  struct d **h = 0;
++  g = *h += f;
++}
+diff --git a/gcc/testsuite/gcc.dg/struct/struct_reorg-12.c b/gcc/testsuite/gcc.dg/struct/struct_reorg-12.c
+new file mode 100644
+index 000000000..d434f9fe0
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/struct_reorg-12.c
+@@ -0,0 +1,26 @@
++/* { dg-do compile } */
++/* { dg-options "-w -g -O3 -flto-partition=one -fipa-struct-reorg -fwhole-program -S" } */
++
++struct foo {
++  long element1;
++  long element2;
++};
++
++struct goo {
++  struct foo element_foo;
++};
++
++struct goo g1;
++
++void func () {
++  struct foo (*local)[] = 0;
++  long idx;
++  (g1).element_foo = (*local)[idx];
++}
++
++struct foo g2;
++int main () {
++  func ();
++  g2 = g1.element_foo;
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/0056-Fix-bug-that-verifying-gimple-failed-when-reorg-leve.patch b/0056-Fix-bug-that-verifying-gimple-failed-when-reorg-leve.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4bcaca8428cd1a1b8d8cfcece48b9a87cf3890cb
--- /dev/null
+++ b/0056-Fix-bug-that-verifying-gimple-failed-when-reorg-leve.patch
@@ -0,0 +1,27 @@
+From fa6f80044dcebd28506e871e6e5d25e2dfd7e105 Mon Sep 17 00:00:00 2001
+From: tiancheng-bao 
+Date: Fri, 12 Apr 2024 15:09:28 +0800
+Subject: [PATCH 01/32] Fix bug that verifying gimple failed when reorg-level >
+ 5
+
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index f03d1d875..e08577c0c 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -7461,6 +7461,9 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 	    continue;
+ 	  tree lhs_expr = newlhs[i] ? newlhs[i] : lhs;
+ 	  tree rhs_expr = newrhs[i] ? newrhs[i] : rhs;
++	  if (!useless_type_conversion_p (TREE_TYPE (lhs_expr),
++					  TREE_TYPE (rhs_expr)))
++	    rhs_expr = gimplify_build1 (gsi, NOP_EXPR, TREE_TYPE (lhs_expr), rhs_expr);  
+ 	  gimple *newstmt = gimple_build_assign (lhs_expr, rhs_expr);
+ 	  if (dump_file && (dump_flags & TDF_DETAILS))
+ 	    {
+-- 
+2.28.0.windows.1
+
diff --git a/0056-LoongArch-Switch-loongarch-def-from-C-to-C-to-make-i.patch b/0056-LoongArch-Switch-loongarch-def-from-C-to-C-to-make-i.patch
new file mode 100644
index 0000000000000000000000000000000000000000..59bfd8e1611145a63f1c5e78f767e55d30368f47
--- /dev/null
+++ b/0056-LoongArch-Switch-loongarch-def-from-C-to-C-to-make-i.patch
@@ -0,0 +1,925 @@
+From 6c85d03940f87770a7e8b7195ffe45f99afef411 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 1 Dec 2023 10:09:33 +0800
+Subject: [PATCH 056/188] LoongArch: Switch loongarch-def from C to C++ to make
+ it possible.
+
+We'll use HOST_WIDE_INT in LoongArch static properties in following patches.
+
+To keep the same readability as C99 designated initializers, create a
+std::array like data structure with position setter function, and add
+field setter functions for structs used in loongarch-def.cc.
+
+Remove unneeded guards #if
+!defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+in loongarch-def.h and loongarch-opts.h.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-def.h: Remove extern "C".
+	(loongarch_isa_base_strings): Declare as loongarch_def_array
+	instead of plain array.
+	(loongarch_isa_ext_strings): Likewise.
+	(loongarch_abi_base_strings): Likewise.
+	(loongarch_abi_ext_strings): Likewise.
+	(loongarch_cmodel_strings): Likewise.
+	(loongarch_cpu_strings): Likewise.
+	(loongarch_cpu_default_isa): Likewise.
+	(loongarch_cpu_issue_rate): Likewise.
+	(loongarch_cpu_multipass_dfa_lookahead): Likewise.
+	(loongarch_cpu_cache): Likewise.
+	(loongarch_cpu_align): Likewise.
+	(loongarch_cpu_rtx_cost_data): Likewise.
+	(loongarch_isa): Add a constructor and field setter functions.
+	* config/loongarch/loongarch-opts.h (loongarch-defs.h): Do not
+	include for target libraries.
+	* config/loongarch/loongarch-opts.cc: Comment code that doesn't
+	run and causes compilation errors.
+	* config/loongarch/loongarch-tune.h (LOONGARCH_TUNE_H): Likewise.
+	(struct loongarch_rtx_cost_data): Likewise.
+	(struct loongarch_cache): Likewise.
+	(struct loongarch_align): Likewise.
+	* config/loongarch/t-loongarch: Compile loongarch-def.cc with the
+	C++ compiler.
+	* config/loongarch/loongarch-def-array.h: New file for a
+	std:array like data structure with position setter function.
+	* config/loongarch/loongarch-def.c: Rename to ...
+	* config/loongarch/loongarch-def.cc: ... here.
+	(loongarch_cpu_strings): Define as loongarch_def_array instead
+	of plain array.
+	(loongarch_cpu_default_isa): Likewise.
+	(loongarch_cpu_cache): Likewise.
+	(loongarch_cpu_align): Likewise.
+	(loongarch_cpu_rtx_cost_data): Likewise.
+	(loongarch_cpu_issue_rate): Likewise.
+	(loongarch_cpu_multipass_dfa_lookahead): Likewise.
+	(loongarch_isa_base_strings): Likewise.
+	(loongarch_isa_ext_strings): Likewise.
+	(loongarch_abi_base_strings): Likewise.
+	(loongarch_abi_ext_strings): Likewise.
+	(loongarch_cmodel_strings): Likewise.
+	(abi_minimal_isa): Likewise.
+	(loongarch_rtx_cost_optimize_size): Use field setter functions
+	instead of designated initializers.
+	(loongarch_rtx_cost_data): Implement default constructor.
+---
+ gcc/config/loongarch/loongarch-def-array.h |  40 ++++
+ gcc/config/loongarch/loongarch-def.c       | 227 ---------------------
+ gcc/config/loongarch/loongarch-def.cc      | 187 +++++++++++++++++
+ gcc/config/loongarch/loongarch-def.h       |  55 ++---
+ gcc/config/loongarch/loongarch-opts.cc     |   7 +
+ gcc/config/loongarch/loongarch-opts.h      |   5 +-
+ gcc/config/loongarch/loongarch-tune.h      | 123 ++++++++++-
+ gcc/config/loongarch/t-loongarch           |   4 +-
+ 8 files changed, 390 insertions(+), 258 deletions(-)
+ create mode 100644 gcc/config/loongarch/loongarch-def-array.h
+ delete mode 100644 gcc/config/loongarch/loongarch-def.c
+ create mode 100644 gcc/config/loongarch/loongarch-def.cc
+
+diff --git a/gcc/config/loongarch/loongarch-def-array.h b/gcc/config/loongarch/loongarch-def-array.h
+new file mode 100644
+index 000000000..bdb3e9c6a
+--- /dev/null
++++ b/gcc/config/loongarch/loongarch-def-array.h
+@@ -0,0 +1,40 @@
++/* A std::array like data structure for LoongArch static properties.
++   Copyright (C) 2023 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#ifndef _LOONGARCH_DEF_ARRAY_H
++#define _LOONGARCH_DEF_ARRAY_H 1
++
++template 
++class loongarch_def_array {
++private:
++  T arr[N];
++public:
++  loongarch_def_array () : arr{} {}
++
++  T &operator[] (int n) { return arr[n]; }
++  const T &operator[] (int n) const { return arr[n]; }
++
++  loongarch_def_array set (int idx, T &&value)
++  {
++    (*this)[idx] = value;
++    return *this;
++  }
++};
++
++#endif
+diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c
+deleted file mode 100644
+index fe4474e77..000000000
+--- a/gcc/config/loongarch/loongarch-def.c
++++ /dev/null
+@@ -1,227 +0,0 @@
+-/* LoongArch static properties.
+-   Copyright (C) 2021-2022 Free Software Foundation, Inc.
+-   Contributed by Loongson Ltd.
+-
+-This file is part of GCC.
+-
+-GCC is free software; you can redistribute it and/or modify
+-it under the terms of the GNU General Public License as published by
+-the Free Software Foundation; either version 3, or (at your option)
+-any later version.
+-
+-GCC is distributed in the hope that it will be useful,
+-but WITHOUT ANY WARRANTY; without even the implied warranty of
+-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-GNU General Public License for more details.
+-
+-You should have received a copy of the GNU General Public License
+-along with GCC; see the file COPYING3.  If not see
+-.  */
+-
+-#include "loongarch-def.h"
+-#include "loongarch-str.h"
+-
+-/* CPU property tables.  */
+-const char*
+-loongarch_cpu_strings[N_TUNE_TYPES] = {
+-  [CPU_NATIVE]		  = STR_CPU_NATIVE,
+-  [CPU_ABI_DEFAULT]	  = STR_CPU_ABI_DEFAULT,
+-  [CPU_LOONGARCH64]	  = STR_CPU_LOONGARCH64,
+-  [CPU_LA464]		  = STR_CPU_LA464,
+-  [CPU_LA664]		  = STR_CPU_LA664,
+-};
+-
+-struct loongarch_isa
+-loongarch_cpu_default_isa[N_ARCH_TYPES] = {
+-  [CPU_LOONGARCH64] = {
+-      .base = ISA_BASE_LA64V100,
+-      .fpu = ISA_EXT_FPU64,
+-      .simd = 0,
+-  },
+-  [CPU_LA464] = {
+-      .base = ISA_BASE_LA64V100,
+-      .fpu = ISA_EXT_FPU64,
+-      .simd = ISA_EXT_SIMD_LASX,
+-  },
+-  [CPU_LA664] = {
+-      .base = ISA_BASE_LA64V110,
+-      .fpu = ISA_EXT_FPU64,
+-      .simd = ISA_EXT_SIMD_LASX,
+-  },
+-};
+-
+-struct loongarch_cache
+-loongarch_cpu_cache[N_TUNE_TYPES] = {
+-  [CPU_LOONGARCH64] = {
+-      .l1d_line_size = 64,
+-      .l1d_size = 64,
+-      .l2d_size = 256,
+-      .simultaneous_prefetches = 4,
+-  },
+-  [CPU_LA464] = {
+-      .l1d_line_size = 64,
+-      .l1d_size = 64,
+-      .l2d_size = 256,
+-      .simultaneous_prefetches = 4,
+-  },
+-  [CPU_LA664] = {
+-      .l1d_line_size = 64,
+-      .l1d_size = 64,
+-      .l2d_size = 256,
+-      .simultaneous_prefetches = 4,
+-  },
+-};
+-
+-struct loongarch_align
+-loongarch_cpu_align[N_TUNE_TYPES] = {
+-  [CPU_LOONGARCH64] = {
+-    .function = "32",
+-    .label = "16",
+-  },
+-  [CPU_LA464] = {
+-    .function = "32",
+-    .label = "16",
+-  },
+-  [CPU_LA664] = {
+-    .function = "32",
+-    .label = "16",
+-  },
+-};
+-
+-
+-/* Default RTX cost initializer.  */
+-#define COSTS_N_INSNS(N) ((N) * 4)
+-#define DEFAULT_COSTS				\
+-    .fp_add		= COSTS_N_INSNS (1),	\
+-    .fp_mult_sf		= COSTS_N_INSNS (2),	\
+-    .fp_mult_df		= COSTS_N_INSNS (4),	\
+-    .fp_div_sf		= COSTS_N_INSNS (6),	\
+-    .fp_div_df		= COSTS_N_INSNS (8),	\
+-    .int_mult_si	= COSTS_N_INSNS (1),	\
+-    .int_mult_di	= COSTS_N_INSNS (1),	\
+-    .int_div_si		= COSTS_N_INSNS (4),	\
+-    .int_div_di		= COSTS_N_INSNS (6),	\
+-    .branch_cost	= 6,			\
+-    .memory_latency	= 4
+-
+-/* The following properties cannot be looked up directly using "cpucfg".
+- So it is necessary to provide a default value for "unknown native"
+- tune targets (i.e. -mtune=native while PRID does not correspond to
+- any known "-mtune" type).  */
+-
+-struct loongarch_rtx_cost_data
+-loongarch_cpu_rtx_cost_data[N_TUNE_TYPES] = {
+-  [CPU_NATIVE] = {
+-      DEFAULT_COSTS
+-  },
+-  [CPU_LOONGARCH64] = {
+-      DEFAULT_COSTS
+-  },
+-  [CPU_LA464] = {
+-      DEFAULT_COSTS
+-  },
+-  [CPU_LA664] = {
+-      DEFAULT_COSTS
+-  },
+-};
+-
+-/* RTX costs to use when optimizing for size.  */
+-const struct loongarch_rtx_cost_data
+-loongarch_rtx_cost_optimize_size = {
+-    .fp_add	      = 4,
+-    .fp_mult_sf	      = 4,
+-    .fp_mult_df	      = 4,
+-    .fp_div_sf	      = 4,
+-    .fp_div_df	      = 4,
+-    .int_mult_si      = 4,
+-    .int_mult_di      = 4,
+-    .int_div_si	      = 4,
+-    .int_div_di	      = 4,
+-    .branch_cost      = 6,
+-    .memory_latency   = 4,
+-};
+-
+-int
+-loongarch_cpu_issue_rate[N_TUNE_TYPES] = {
+-  [CPU_NATIVE]	      = 4,
+-  [CPU_LOONGARCH64]   = 4,
+-  [CPU_LA464]	      = 4,
+-  [CPU_LA664]	      = 6,
+-};
+-
+-int
+-loongarch_cpu_multipass_dfa_lookahead[N_TUNE_TYPES] = {
+-  [CPU_NATIVE]	      = 4,
+-  [CPU_LOONGARCH64]   = 4,
+-  [CPU_LA464]	      = 4,
+-  [CPU_LA664]	      = 6,
+-};
+-
+-/* Wiring string definitions from loongarch-str.h to global arrays
+-   with standard index values from loongarch-opts.h, so we can
+-   print config-related messages and do ABI self-spec filtering
+-   from the driver in a self-consistent manner.  */
+-
+-const char*
+-loongarch_isa_base_strings[N_ISA_BASE_TYPES] = {
+-  [ISA_BASE_LA64V100] = STR_ISA_BASE_LA64V100,
+-  [ISA_BASE_LA64V110] = STR_ISA_BASE_LA64V110,
+-};
+-
+-const char*
+-loongarch_isa_ext_strings[N_ISA_EXT_TYPES] = {
+-  [ISA_EXT_NONE] = STR_NONE,
+-  [ISA_EXT_FPU32] = STR_ISA_EXT_FPU32,
+-  [ISA_EXT_FPU64] = STR_ISA_EXT_FPU64,
+-  [ISA_EXT_SIMD_LSX] = STR_ISA_EXT_LSX,
+-  [ISA_EXT_SIMD_LASX] = STR_ISA_EXT_LASX,
+-};
+-
+-const char*
+-loongarch_abi_base_strings[N_ABI_BASE_TYPES] = {
+-  [ABI_BASE_LP64D] = STR_ABI_BASE_LP64D,
+-  [ABI_BASE_LP64F] = STR_ABI_BASE_LP64F,
+-  [ABI_BASE_LP64S] = STR_ABI_BASE_LP64S,
+-};
+-
+-const char*
+-loongarch_abi_ext_strings[N_ABI_EXT_TYPES] = {
+-  [ABI_EXT_BASE] = STR_ABI_EXT_BASE,
+-};
+-
+-const char*
+-loongarch_cmodel_strings[] = {
+-  [CMODEL_NORMAL]	  = STR_CMODEL_NORMAL,
+-  [CMODEL_TINY]		  = STR_CMODEL_TINY,
+-  [CMODEL_TINY_STATIC]	  = STR_CMODEL_TS,
+-  [CMODEL_MEDIUM]	  = STR_CMODEL_MEDIUM,
+-  [CMODEL_LARGE]	  = STR_CMODEL_LARGE,
+-  [CMODEL_EXTREME]	  = STR_CMODEL_EXTREME,
+-};
+-
+-
+-/* ABI-related definitions.  */
+-const struct loongarch_isa
+-abi_minimal_isa[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES] = {
+-  [ABI_BASE_LP64D] = {
+-      [ABI_EXT_BASE] = {
+-	  .base = ISA_BASE_LA64V100,
+-	  .fpu = ISA_EXT_FPU64,
+-	  .simd = 0
+-      },
+-  },
+-  [ABI_BASE_LP64F] = {
+-      [ABI_EXT_BASE] = {
+-	  .base = ISA_BASE_LA64V100,
+-	  .fpu = ISA_EXT_FPU32,
+-	  .simd = 0
+-      },
+-  },
+-  [ABI_BASE_LP64S] = {
+-      [ABI_EXT_BASE] = {
+-	  .base = ISA_BASE_LA64V100,
+-	  .fpu = ISA_EXT_NONE,
+-	  .simd = 0
+-      },
+-  },
+-};
+diff --git a/gcc/config/loongarch/loongarch-def.cc b/gcc/config/loongarch/loongarch-def.cc
+new file mode 100644
+index 000000000..6990c86c2
+--- /dev/null
++++ b/gcc/config/loongarch/loongarch-def.cc
+@@ -0,0 +1,187 @@
++/* LoongArch static properties.
++   Copyright (C) 2021-2023 Free Software Foundation, Inc.
++   Contributed by Loongson Ltd.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include "loongarch-def.h"
++#include "loongarch-str.h"
++
++template 
++using array = loongarch_def_array;
++
++template 
++using array_tune = array;
++
++template 
++using array_arch = array;
++
++/* CPU property tables.  */
++array_tune loongarch_cpu_strings = array_tune ()
++  .set (CPU_NATIVE, STR_CPU_NATIVE)
++  .set (CPU_ABI_DEFAULT, STR_CPU_ABI_DEFAULT)
++  .set (CPU_LOONGARCH64, STR_CPU_LOONGARCH64)
++  .set (CPU_LA464, STR_CPU_LA464)
++  .set (CPU_LA664, STR_CPU_LA664);
++
++array_arch loongarch_cpu_default_isa =
++  array_arch ()
++    .set (CPU_LOONGARCH64,
++	  loongarch_isa ()
++	    .base_ (ISA_BASE_LA64V100)
++	    .fpu_ (ISA_EXT_FPU64))
++    .set (CPU_LA464,
++	  loongarch_isa ()
++	    .base_ (ISA_BASE_LA64V100)
++	    .fpu_ (ISA_EXT_FPU64)
++	    .simd_ (ISA_EXT_SIMD_LASX))
++    .set (CPU_LA664,
++	  loongarch_isa ()
++	    .base_ (ISA_BASE_LA64V110)
++	    .fpu_ (ISA_EXT_FPU64)
++	    .simd_ (ISA_EXT_SIMD_LASX));
++
++static inline loongarch_cache la464_cache ()
++{
++  return loongarch_cache ()
++    .l1d_line_size_ (64)
++    .l1d_size_ (64)
++    .l2d_size_ (256)
++    .simultaneous_prefetches_ (4);
++}
++
++array_tune loongarch_cpu_cache =
++  array_tune ()
++    .set (CPU_LOONGARCH64, la464_cache ())
++    .set (CPU_LA464, la464_cache ())
++    .set (CPU_LA664, la464_cache ());
++
++static inline loongarch_align la464_align ()
++{
++  return loongarch_align ().function_ ("32").label_ ("16");
++}
++
++array_tune loongarch_cpu_align =
++  array_tune ()
++    .set (CPU_LOONGARCH64, la464_align ())
++    .set (CPU_LA464, la464_align ())
++    .set (CPU_LA664, la464_align ());
++
++#define COSTS_N_INSNS(N) ((N) * 4)
++
++/* Default RTX cost initializer.  */
++loongarch_rtx_cost_data::loongarch_rtx_cost_data ()
++  : fp_add (COSTS_N_INSNS (1)),
++    fp_mult_sf (COSTS_N_INSNS (2)),
++    fp_mult_df (COSTS_N_INSNS (4)),
++    fp_div_sf (COSTS_N_INSNS (6)),
++    fp_div_df (COSTS_N_INSNS (8)),
++    int_mult_si (COSTS_N_INSNS (1)),
++    int_mult_di (COSTS_N_INSNS (1)),
++    int_div_si (COSTS_N_INSNS (4)),
++    int_div_di (COSTS_N_INSNS (6)),
++    branch_cost (6),
++    memory_latency (4) {}
++
++/* The following properties cannot be looked up directly using "cpucfg".
++ So it is necessary to provide a default value for "unknown native"
++ tune targets (i.e. -mtune=native while PRID does not correspond to
++ any known "-mtune" type).  Currently all numbers are default.  */
++array_tune loongarch_cpu_rtx_cost_data =
++  array_tune ();
++
++/* RTX costs to use when optimizing for size.  */
++const loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size =
++  loongarch_rtx_cost_data ()
++    .fp_add_ (4)
++    .fp_mult_sf_ (4)
++    .fp_mult_df_ (4)
++    .fp_div_sf_ (4)
++    .fp_div_df_ (4)
++    .int_mult_si_ (4)
++    .int_mult_di_ (4)
++    .int_div_si_ (4)
++    .int_div_di_ (4);
++
++array_tune loongarch_cpu_issue_rate = array_tune ()
++  .set (CPU_NATIVE, 4)
++  .set (CPU_LOONGARCH64, 4)
++  .set (CPU_LA464, 4)
++  .set (CPU_LA664, 6);
++
++array_tune loongarch_cpu_multipass_dfa_lookahead = array_tune ()
++  .set (CPU_NATIVE, 4)
++  .set (CPU_LOONGARCH64, 4)
++  .set (CPU_LA464, 4)
++  .set (CPU_LA664, 6);
++
++/* Wiring string definitions from loongarch-str.h to global arrays
++   with standard index values from loongarch-opts.h, so we can
++   print config-related messages and do ABI self-spec filtering
++   from the driver in a self-consistent manner.  */
++
++array loongarch_isa_base_strings =
++  array ()
++    .set (ISA_BASE_LA64V100, STR_ISA_BASE_LA64V100)
++    .set (ISA_BASE_LA64V110, STR_ISA_BASE_LA64V110);
++
++array loongarch_isa_ext_strings =
++  array ()
++    .set (ISA_EXT_NONE, STR_NONE)
++    .set (ISA_EXT_FPU32, STR_ISA_EXT_FPU32)
++    .set (ISA_EXT_FPU64, STR_ISA_EXT_FPU64)
++    .set (ISA_EXT_SIMD_LSX, STR_ISA_EXT_LSX)
++    .set (ISA_EXT_SIMD_LASX, STR_ISA_EXT_LASX);
++
++array loongarch_abi_base_strings =
++  array ()
++    .set (ABI_BASE_LP64D, STR_ABI_BASE_LP64D)
++    .set (ABI_BASE_LP64F, STR_ABI_BASE_LP64F)
++    .set (ABI_BASE_LP64S, STR_ABI_BASE_LP64S);
++
++array loongarch_abi_ext_strings =
++  array ()
++    .set (ABI_EXT_BASE, STR_ABI_EXT_BASE);
++
++array loongarch_cmodel_strings =
++  array ()
++    .set (CMODEL_NORMAL,		STR_CMODEL_NORMAL)
++    .set (CMODEL_TINY,		STR_CMODEL_TINY)
++    .set (CMODEL_TINY_STATIC,	STR_CMODEL_TS)
++    .set (CMODEL_MEDIUM,		STR_CMODEL_MEDIUM)
++    .set (CMODEL_LARGE,		STR_CMODEL_LARGE)
++    .set (CMODEL_EXTREME,		STR_CMODEL_EXTREME);
++
++array, N_ABI_BASE_TYPES>
++  abi_minimal_isa = array,
++			  N_ABI_BASE_TYPES> ()
++    .set (ABI_BASE_LP64D,
++	  array ()
++	    .set (ABI_EXT_BASE,
++		  loongarch_isa ()
++		    .base_ (ISA_BASE_LA64V100)
++		    .fpu_ (ISA_EXT_FPU64)))
++    .set (ABI_BASE_LP64F,
++	  array ()
++	    .set (ABI_EXT_BASE,
++		  loongarch_isa ()
++		    .base_ (ISA_BASE_LA64V100)
++		    .fpu_ (ISA_EXT_FPU32)))
++    .set (ABI_BASE_LP64S,
++	  array ()
++	    .set (ABI_EXT_BASE,
++		  loongarch_isa ().base_ (ISA_BASE_LA64V100)));
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index ef848f606..5ac70dfdd 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -50,20 +50,18 @@ along with GCC; see the file COPYING3.  If not see
+ #include 
+ #endif
+ 
++#include "loongarch-def-array.h"
+ #include "loongarch-tune.h"
+ 
+-#ifdef __cplusplus
+-extern "C" {
+-#endif
+-
+ /* enum isa_base */
+-extern const char* loongarch_isa_base_strings[];
+ 
+ /* LoongArch V1.00.  */
+ #define ISA_BASE_LA64V100     0
+ /* LoongArch V1.10.  */
+ #define ISA_BASE_LA64V110     1
+ #define N_ISA_BASE_TYPES      2
++extern loongarch_def_array
++  loongarch_isa_base_strings;
+ 
+ #if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+ /* Unlike other arrays, this is defined in loongarch-cpu.cc.  The problem is
+@@ -72,7 +70,6 @@ extern int64_t loongarch_isa_base_features[];
+ #endif
+ 
+ /* enum isa_ext_* */
+-extern const char* loongarch_isa_ext_strings[];
+ #define ISA_EXT_NONE	      0
+ #define ISA_EXT_FPU32	      1
+ #define ISA_EXT_FPU64	      2
+@@ -80,13 +77,16 @@ extern const char* loongarch_isa_ext_strings[];
+ #define ISA_EXT_SIMD_LSX      3
+ #define ISA_EXT_SIMD_LASX     4
+ #define N_ISA_EXT_TYPES	      5
++extern loongarch_def_array
++  loongarch_isa_ext_strings;
+ 
+ /* enum abi_base */
+-extern const char* loongarch_abi_base_strings[];
+ #define ABI_BASE_LP64D	      0
+ #define ABI_BASE_LP64F	      1
+ #define ABI_BASE_LP64S	      2
+ #define N_ABI_BASE_TYPES      3
++extern loongarch_def_array
++  loongarch_abi_base_strings;
+ 
+ #define TO_LP64_ABI_BASE(C) (C)
+ 
+@@ -99,12 +99,12 @@ extern const char* loongarch_abi_base_strings[];
+ 
+ 
+ /* enum abi_ext */
+-extern const char* loongarch_abi_ext_strings[];
+ #define ABI_EXT_BASE	      0
+ #define N_ABI_EXT_TYPES	      1
++extern loongarch_def_array
++  loongarch_abi_ext_strings;
+ 
+ /* enum cmodel */
+-extern const char* loongarch_cmodel_strings[];
+ #define CMODEL_NORMAL	      0
+ #define CMODEL_TINY	      1
+ #define CMODEL_TINY_STATIC    2
+@@ -112,6 +112,8 @@ extern const char* loongarch_cmodel_strings[];
+ #define CMODEL_LARGE	      4
+ #define CMODEL_EXTREME	      5
+ #define N_CMODEL_TYPES	      6
++extern loongarch_def_array
++  loongarch_cmodel_strings;
+ 
+ /* enum explicit_relocs */
+ #define EXPLICIT_RELOCS_AUTO	0
+@@ -126,7 +128,6 @@ extern const char* loongarch_cmodel_strings[];
+ #define M_OPT_ABSENT(opt_enum)  ((opt_enum) == M_OPT_UNSET)
+ 
+ 
+-#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+ /* Internal representation of the target.  */
+ struct loongarch_isa
+ {
+@@ -139,6 +140,13 @@ struct loongarch_isa
+ 
+      Using int64_t instead of HOST_WIDE_INT for C compatibility.  */
+   int64_t evolution;
++
++  loongarch_isa () : base (0), fpu (0), simd (0), evolution (0) {}
++  loongarch_isa base_ (int _base) { base = _base; return *this; }
++  loongarch_isa fpu_ (int _fpu) { fpu = _fpu; return *this; }
++  loongarch_isa simd_ (int _simd) { simd = _simd; return *this; }
++  loongarch_isa evolution_ (int64_t _evolution)
++    { evolution = _evolution; return *this; }
+ };
+ 
+ struct loongarch_abi
+@@ -156,9 +164,6 @@ struct loongarch_target
+   int cmodel;	    /* CMODEL_ */
+ };
+ 
+-extern struct loongarch_isa loongarch_cpu_default_isa[];
+-#endif
+-
+ /* CPU properties.  */
+ /* index */
+ #define CPU_NATIVE	  0
+@@ -170,15 +175,19 @@ extern struct loongarch_isa loongarch_cpu_default_isa[];
+ #define N_TUNE_TYPES	  5
+ 
+ /* parallel tables.  */
+-extern const char* loongarch_cpu_strings[];
+-extern int loongarch_cpu_issue_rate[];
+-extern int loongarch_cpu_multipass_dfa_lookahead[];
++extern loongarch_def_array
++  loongarch_cpu_strings;
++extern loongarch_def_array
++  loongarch_cpu_default_isa;
++extern loongarch_def_array
++  loongarch_cpu_issue_rate;
++extern loongarch_def_array
++  loongarch_cpu_multipass_dfa_lookahead;
++extern loongarch_def_array
++  loongarch_cpu_cache;
++extern loongarch_def_array
++  loongarch_cpu_align;
++extern loongarch_def_array
++  loongarch_cpu_rtx_cost_data;
+ 
+-extern struct loongarch_cache loongarch_cpu_cache[];
+-extern struct loongarch_align loongarch_cpu_align[];
+-extern struct loongarch_rtx_cost_data loongarch_cpu_rtx_cost_data[];
+-
+-#ifdef __cplusplus
+-}
+-#endif
+ #endif /* LOONGARCH_DEF_H */
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index 390720479..45fc521e4 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -163,6 +163,7 @@ loongarch_config_target (struct loongarch_target *target,
+ 			 int follow_multilib_list_p)
+ {
+   struct loongarch_target t;
++
+   if (!target)
+     return;
+ 
+@@ -657,12 +658,18 @@ abi_str (struct loongarch_abi abi)
+ 		     strlen (loongarch_abi_base_strings[abi.base]));
+   else
+     {
++      /* This situation has not yet occurred, so in order to avoid the
++	 -Warray-bounds warning during C++ syntax checking, this part
++	 of the code is commented first.  */
++      /*
+       APPEND_STRING (loongarch_abi_base_strings[abi.base])
+       APPEND1 ('/')
+       APPEND_STRING (loongarch_abi_ext_strings[abi.ext])
+       APPEND1 ('\0')
+ 
+       return XOBFINISH (&msg_obstack, const char *);
++      */
++      gcc_unreachable ();
+     }
+ }
+ 
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index 9b3d023ac..0dabf1551 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -21,7 +21,10 @@ along with GCC; see the file COPYING3.  If not see
+ #ifndef LOONGARCH_OPTS_H
+ #define LOONGARCH_OPTS_H
+ 
++/* This is a C++ header and it shouldn't be used by target libraries.  */
++#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+ #include "loongarch-def.h"
++#endif
+ 
+ /* Target configuration */
+ extern struct loongarch_target la_target;
+@@ -33,7 +36,6 @@ struct loongarch_flags {
+     int sx[2];
+ };
+ 
+-#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+ 
+ /* Initialize loongarch_target from separate option variables.  */
+ void
+@@ -54,7 +56,6 @@ void
+ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+ 				 struct gcc_options *opts,
+ 				 struct gcc_options *opts_set);
+-#endif
+ 
+ 
+ /* Macros for common conditional expressions used in loongarch.{c,h,md} */
+diff --git a/gcc/config/loongarch/loongarch-tune.h b/gcc/config/loongarch/loongarch-tune.h
+index d961963f0..616b94e87 100644
+--- a/gcc/config/loongarch/loongarch-tune.h
++++ b/gcc/config/loongarch/loongarch-tune.h
+@@ -21,6 +21,8 @@ along with GCC; see the file COPYING3.  If not see
+ #ifndef LOONGARCH_TUNE_H
+ #define LOONGARCH_TUNE_H
+ 
++#include "loongarch-def-array.h"
++
+ /* RTX costs of various operations on the different architectures.  */
+ struct loongarch_rtx_cost_data
+ {
+@@ -35,6 +37,76 @@ struct loongarch_rtx_cost_data
+   unsigned short int_div_di;
+   unsigned short branch_cost;
+   unsigned short memory_latency;
++
++  /* Default RTX cost initializer, implemented in loongarch-def.cc.  */
++  loongarch_rtx_cost_data ();
++
++  loongarch_rtx_cost_data fp_add_ (unsigned short _fp_add)
++  {
++    fp_add = _fp_add;
++    return *this;
++  }
++
++  loongarch_rtx_cost_data fp_mult_sf_ (unsigned short _fp_mult_sf)
++  {
++    fp_mult_sf = _fp_mult_sf;
++    return *this;
++  }
++
++  loongarch_rtx_cost_data fp_mult_df_ (unsigned short _fp_mult_df)
++  {
++    fp_mult_df = _fp_mult_df;
++    return *this;
++  }
++
++  loongarch_rtx_cost_data fp_div_sf_ (unsigned short _fp_div_sf)
++  {
++    fp_div_sf = _fp_div_sf;
++    return *this;
++  }
++
++  loongarch_rtx_cost_data fp_div_df_ (unsigned short _fp_div_df)
++  {
++    fp_div_df = _fp_div_df;
++    return *this;
++  }
++
++  loongarch_rtx_cost_data int_mult_si_ (unsigned short _int_mult_si)
++  {
++    int_mult_si = _int_mult_si;
++    return *this;
++  }
++
++  loongarch_rtx_cost_data int_mult_di_ (unsigned short _int_mult_di)
++  {
++    int_mult_di = _int_mult_di;
++    return *this;
++  }
++
++  loongarch_rtx_cost_data int_div_si_ (unsigned short _int_div_si)
++  {
++    int_div_si = _int_div_si;
++    return *this;
++  }
++
++  loongarch_rtx_cost_data int_div_di_ (unsigned short _int_div_di)
++  {
++    int_div_di = _int_div_di;
++    return *this;
++  }
++
++  loongarch_rtx_cost_data branch_cost_ (unsigned short _branch_cost)
++  {
++    branch_cost = _branch_cost;
++    return *this;
++  }
++
++  loongarch_rtx_cost_data memory_latency_ (unsigned short _memory_latency)
++  {
++    memory_latency = _memory_latency;
++    return *this;
++  }
++
+ };
+ 
+ /* Costs to use when optimizing for size.  */
+@@ -42,10 +114,39 @@ extern const struct loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size;
+ 
+ /* Cache size record of known processor models.  */
+ struct loongarch_cache {
+-    int l1d_line_size;  /* bytes */
+-    int l1d_size;       /* KiB */
+-    int l2d_size;       /* kiB */
+-    int simultaneous_prefetches; /* number of parallel prefetch */
++  int l1d_line_size;  /* bytes */
++  int l1d_size;       /* KiB */
++  int l2d_size;       /* kiB */
++  int simultaneous_prefetches; /* number of parallel prefetch */
++
++  loongarch_cache () : l1d_line_size (0),
++		       l1d_size (0),
++		       l2d_size (0),
++		       simultaneous_prefetches (0) {}
++
++  loongarch_cache l1d_line_size_ (int _l1d_line_size)
++  {
++    l1d_line_size = _l1d_line_size;
++    return *this;
++  }
++
++  loongarch_cache l1d_size_ (int _l1d_size)
++  {
++    l1d_size = _l1d_size;
++    return *this;
++  }
++
++  loongarch_cache l2d_size_ (int _l2d_size)
++  {
++    l2d_size = _l2d_size;
++    return *this;
++  }
++
++  loongarch_cache simultaneous_prefetches_ (int _simultaneous_prefetches)
++  {
++    simultaneous_prefetches = _simultaneous_prefetches;
++    return *this;
++  }
+ };
+ 
+ /* Alignment for functions and labels for best performance.  For new uarchs
+@@ -54,6 +155,20 @@ struct loongarch_cache {
+ struct loongarch_align {
+   const char *function;	/* default value for -falign-functions */
+   const char *label;	/* default value for -falign-labels */
++
++  loongarch_align () : function (nullptr), label (nullptr) {}
++
++  loongarch_align function_ (const char *_function)
++  {
++    function = _function;
++    return *this;
++  }
++
++  loongarch_align label_ (const char *_label)
++  {
++    label = _label;
++    return *this;
++  }
+ };
+ 
+ #endif /* LOONGARCH_TUNE_H */
+diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch
+index 57b1176bc..a1a40431f 100644
+--- a/gcc/config/loongarch/t-loongarch
++++ b/gcc/config/loongarch/t-loongarch
+@@ -64,8 +64,8 @@ loongarch-cpu.o: $(srcdir)/config/loongarch/loongarch-cpu.cc $(LA_STR_H) \
+ 		 $(srcdir)/config/loongarch/loongarch-cpucfg-map.h
+ 	$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+ 
+-loongarch-def.o: $(srcdir)/config/loongarch/loongarch-def.c $(LA_STR_H)
+-	$(CC) -c $(ALL_CFLAGS) $(INCLUDES) $<
++loongarch-def.o: $(srcdir)/config/loongarch/loongarch-def.cc $(LA_STR_H)
++	$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+ 
+ $(srcdir)/config/loongarch/loongarch.opt: s-loongarch-opt ; @true
+ s-loongarch-opt: $(srcdir)/config/loongarch/genopts/genstr.sh \
+-- 
+2.43.0
+
diff --git a/0057-AutoFdo-Fix-memory-leaks-in-autofdo.patch b/0057-AutoFdo-Fix-memory-leaks-in-autofdo.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4d80606a075a6f331c7eae33a8daa0eb5a6d4db5
--- /dev/null
+++ b/0057-AutoFdo-Fix-memory-leaks-in-autofdo.patch
@@ -0,0 +1,90 @@
+From 13e82fccba781b29e55a6e1934986514019b728d Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao 
+Date: Sun, 24 Mar 2024 20:42:27 +0800
+Subject: [PATCH 02/32] [AutoFdo] Fix memory leaks in autofdo
+
+---
+ gcc/final.cc | 22 ++++++++++++++--------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+diff --git a/gcc/final.cc b/gcc/final.cc
+index d4c4fa08f..af4e529bb 100644
+--- a/gcc/final.cc
++++ b/gcc/final.cc
+@@ -4402,12 +4402,15 @@ get_fdo_count_quality (profile_count count)
+   return profile_quality[count.quality ()];
+ }
+ 
+-static const char *
++/* If the function is not public, return the function_name/file_name for
++   disambiguation of local symbols since there could be identical function
++   names coming from identical file names.  The caller needs to free memory.  */
++static char *
+ alias_local_functions (const char *fnname)
+ {
+   if (TREE_PUBLIC (cfun->decl))
+     {
+-      return fnname;
++      return concat (fnname, NULL);
+     }
+   return concat (fnname, "/", lbasename (dump_base_name), NULL);
+ }
+@@ -4457,12 +4460,13 @@ dump_direct_callee_info_to_asm (basic_block bb, gcov_type call_count)
+ 
+ 	  if (callee)
+ 	    {
++	      char *func_name =
++		      alias_local_functions (get_fnname_from_decl (callee));
+ 	      fprintf (asm_out_file, "\t.string \"%x\"\n",
+ 		       INSN_ADDRESSES (INSN_UID (insn)));
+ 
+ 	      fprintf (asm_out_file, "\t.string \"%s%s\"\n",
+-		       ASM_FDO_CALLEE_FLAG,
+-                       alias_local_functions (get_fnname_from_decl (callee)));
++		       ASM_FDO_CALLEE_FLAG, func_name);
+ 
+               fprintf (asm_out_file,
+                        "\t.string \"" HOST_WIDE_INT_PRINT_DEC "\"\n",
+@@ -4472,9 +4476,9 @@ dump_direct_callee_info_to_asm (basic_block bb, gcov_type call_count)
+                 {
+                   fprintf (dump_file, "call: %x --> %s \n",
+                            INSN_ADDRESSES (INSN_UID (insn)),
+-                           alias_local_functions
+-                           (get_fnname_from_decl (callee)));
++			   func_name);
+                 }
++	      free (func_name);
+             }
+         }
+      } 
+@@ -4547,8 +4551,9 @@ dump_bb_info_to_asm (basic_block bb, gcov_type bb_count)
+ static void 
+ dump_function_info_to_asm (const char *fnname)
+ {
++  char *func_name = alias_local_functions (fnname);
+   fprintf (asm_out_file, "\t.string \"%s%s\"\n",
+-           ASM_FDO_CALLER_FLAG, alias_local_functions (fnname));
++	   ASM_FDO_CALLER_FLAG, func_name);
+   fprintf (asm_out_file, "\t.string \"%s%d\"\n",
+            ASM_FDO_CALLER_SIZE_FLAG, get_function_end_addr ());
+   fprintf (asm_out_file, "\t.string \"%s%s\"\n",
+@@ -4557,7 +4562,7 @@ dump_function_info_to_asm (const char *fnname)
+   if (dump_file)
+     {
+       fprintf (dump_file, "\n FUNC_NAME: %s\n",
+-               alias_local_functions (fnname));
++	       func_name);
+       fprintf (dump_file, " file: %s\n",
+                dump_base_name);
+       fprintf (dump_file, "profile_status: %s\n",
+@@ -4567,6 +4572,7 @@ dump_function_info_to_asm (const char *fnname)
+       fprintf (dump_file, " function_bind: %s\n",
+                simple_get_function_bind ());
+     }
++  free (func_name);
+ }
+ 
+ /* Dump function profile into form AutoFDO or PGO to asm.    */
+-- 
+2.28.0.windows.1
+
diff --git a/0057-LoongArch-Remove-the-definition-of-ISA_BASE_LA64V110.patch b/0057-LoongArch-Remove-the-definition-of-ISA_BASE_LA64V110.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0ad408672d5e937ce0ca44f95aca57a9f9d35025
--- /dev/null
+++ b/0057-LoongArch-Remove-the-definition-of-ISA_BASE_LA64V110.patch
@@ -0,0 +1,261 @@
+From 1ec35f153636077760b65dc3e0385d0a4d383486 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 1 Dec 2023 11:51:51 +0800
+Subject: [PATCH 057/188] LoongArch: Remove the definition of ISA_BASE_LA64V110
+ from the code.
+
+The instructions defined in LoongArch Reference Manual v1.1 are not the instruction
+set v1.1 version. The CPU defined later may only support some instructions in
+LoongArch Reference Manual v1.1. Therefore, the macro ISA_BASE_LA64V110 and
+related definitions are removed here.
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/loongarch-strings: Delete STR_ISA_BASE_LA64V110.
+	* config/loongarch/genopts/loongarch.opt.in: Likewise.
+	* config/loongarch/loongarch-cpu.cc (ISA_BASE_LA64V110_FEATURES): Delete macro.
+	(fill_native_cpu_config): Define a new variable hw_isa_evolution record the
+	extended instruction set support read from cpucfg.
+	* config/loongarch/loongarch-def.cc: Set evolution at initialization.
+	* config/loongarch/loongarch-def.h (ISA_BASE_LA64V100): Delete.
+	(ISA_BASE_LA64V110): Likewise.
+	(N_ISA_BASE_TYPES): Likewise.
+	(defined): Likewise.
+	* config/loongarch/loongarch-opts.cc: Likewise.
+	* config/loongarch/loongarch-opts.h (TARGET_64BIT): Likewise.
+	(ISA_BASE_IS_LA64V110): Likewise.
+	* config/loongarch/loongarch-str.h (STR_ISA_BASE_LA64V110): Likewise.
+	* config/loongarch/loongarch.opt: Regenerate.
+---
+ .../loongarch/genopts/loongarch-strings       |  1 -
+ gcc/config/loongarch/genopts/loongarch.opt.in |  3 ---
+ gcc/config/loongarch/loongarch-cpu.cc         | 23 +++++--------------
+ gcc/config/loongarch/loongarch-def.cc         | 14 +++++++----
+ gcc/config/loongarch/loongarch-def.h          | 12 ++--------
+ gcc/config/loongarch/loongarch-opts.cc        |  3 ---
+ gcc/config/loongarch/loongarch-opts.h         |  4 +---
+ gcc/config/loongarch/loongarch-str.h          |  1 -
+ gcc/config/loongarch/loongarch.opt            |  3 ---
+ 9 files changed, 19 insertions(+), 45 deletions(-)
+
+diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings
+index 6c8a42af2..411ad5696 100644
+--- a/gcc/config/loongarch/genopts/loongarch-strings
++++ b/gcc/config/loongarch/genopts/loongarch-strings
+@@ -30,7 +30,6 @@ STR_CPU_LA664	      la664
+ 
+ # Base architecture
+ STR_ISA_BASE_LA64V100 la64
+-STR_ISA_BASE_LA64V110 la64v1.1
+ 
+ # -mfpu
+ OPTSTR_ISA_EXT_FPU    fpu
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index a49de07c9..cd5e75e4f 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -32,9 +32,6 @@ Basic ISAs of LoongArch:
+ EnumValue
+ Enum(isa_base) String(@@STR_ISA_BASE_LA64V100@@) Value(ISA_BASE_LA64V100)
+ 
+-EnumValue
+-Enum(isa_base) String(@@STR_ISA_BASE_LA64V110@@) Value(ISA_BASE_LA64V110)
+-
+ ;; ISA extensions / adjustments
+ Enum
+ Name(isa_ext_fpu) Type(int)
+diff --git a/gcc/config/loongarch/loongarch-cpu.cc b/gcc/config/loongarch/loongarch-cpu.cc
+index bbce82c9c..7e0625835 100644
+--- a/gcc/config/loongarch/loongarch-cpu.cc
++++ b/gcc/config/loongarch/loongarch-cpu.cc
+@@ -23,7 +23,6 @@ along with GCC; see the file COPYING3.  If not see
+ #include "config.h"
+ #include "system.h"
+ #include "coretypes.h"
+-#include "tm.h"
+ #include "diagnostic-core.h"
+ 
+ #include "loongarch-def.h"
+@@ -32,19 +31,6 @@ along with GCC; see the file COPYING3.  If not see
+ #include "loongarch-cpucfg-map.h"
+ #include "loongarch-str.h"
+ 
+-/* loongarch_isa_base_features defined here instead of loongarch-def.c
+-   because we need to use options.h.  Pay attention on the order of elements
+-   in the initializer becaue ISO C++ does not allow C99 designated
+-   initializers!  */
+-
+-#define ISA_BASE_LA64V110_FEATURES \
+-  (OPTION_MASK_ISA_DIV32 | OPTION_MASK_ISA_LD_SEQ_SA \
+-   | OPTION_MASK_ISA_LAM_BH | OPTION_MASK_ISA_LAMCAS)
+-
+-int64_t loongarch_isa_base_features[N_ISA_BASE_TYPES] = {
+-  /* [ISA_BASE_LA64V100] = */ 0,
+-  /* [ISA_BASE_LA64V110] = */ ISA_BASE_LA64V110_FEATURES,
+-};
+ 
+ /* Native CPU detection with "cpucfg" */
+ static uint32_t cpucfg_cache[N_CPUCFG_WORDS] = { 0 };
+@@ -235,18 +221,20 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+       /* Use the native value anyways.  */
+       preset.simd = tmp;
+ 
++
++      int64_t hw_isa_evolution = 0;
++
+       /* Features added during ISA evolution.  */
+       for (const auto &entry: cpucfg_map)
+ 	if (cpucfg_cache[entry.cpucfg_word] & entry.cpucfg_bit)
+-	  preset.evolution |= entry.isa_evolution_bit;
++	  hw_isa_evolution |= entry.isa_evolution_bit;
+ 
+       if (native_cpu_type != CPU_NATIVE)
+ 	{
+ 	  /* Check if the local CPU really supports the features of the base
+ 	     ISA of probed native_cpu_type.  If any feature is not detected,
+ 	     either GCC or the hardware is buggy.  */
+-	  auto base_isa_feature = loongarch_isa_base_features[preset.base];
+-	  if ((preset.evolution & base_isa_feature) != base_isa_feature)
++	  if ((preset.evolution & hw_isa_evolution) != hw_isa_evolution)
+ 	    warning (0,
+ 		     "detected base architecture %qs, but some of its "
+ 		     "features are not detected; the detected base "
+@@ -254,6 +242,7 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+ 		     "features will be enabled",
+ 		     loongarch_isa_base_strings[preset.base]);
+ 	}
++      preset.evolution = hw_isa_evolution;
+     }
+ 
+   if (tune_native_p)
+diff --git a/gcc/config/loongarch/loongarch-def.cc b/gcc/config/loongarch/loongarch-def.cc
+index 6990c86c2..bc6997e45 100644
+--- a/gcc/config/loongarch/loongarch-def.cc
++++ b/gcc/config/loongarch/loongarch-def.cc
+@@ -18,6 +18,11 @@ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3.  If not see
+ .  */
+ 
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++
+ #include "loongarch-def.h"
+ #include "loongarch-str.h"
+ 
+@@ -51,9 +56,11 @@ array_arch loongarch_cpu_default_isa =
+ 	    .simd_ (ISA_EXT_SIMD_LASX))
+     .set (CPU_LA664,
+ 	  loongarch_isa ()
+-	    .base_ (ISA_BASE_LA64V110)
++	    .base_ (ISA_BASE_LA64V100)
+ 	    .fpu_ (ISA_EXT_FPU64)
+-	    .simd_ (ISA_EXT_SIMD_LASX));
++	    .simd_ (ISA_EXT_SIMD_LASX)
++	    .evolution_ (OPTION_MASK_ISA_DIV32 | OPTION_MASK_ISA_LD_SEQ_SA
++		    | OPTION_MASK_ISA_LAM_BH | OPTION_MASK_ISA_LAMCAS));
+ 
+ static inline loongarch_cache la464_cache ()
+ {
+@@ -136,8 +143,7 @@ array_tune loongarch_cpu_multipass_dfa_lookahead = array_tune ()
+ 
+ array loongarch_isa_base_strings =
+   array ()
+-    .set (ISA_BASE_LA64V100, STR_ISA_BASE_LA64V100)
+-    .set (ISA_BASE_LA64V110, STR_ISA_BASE_LA64V110);
++    .set (ISA_BASE_LA64V100, STR_ISA_BASE_LA64V100);
+ 
+ array loongarch_isa_ext_strings =
+   array ()
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index 5ac70dfdd..f8f36f0e2 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -56,19 +56,11 @@ along with GCC; see the file COPYING3.  If not see
+ /* enum isa_base */
+ 
+ /* LoongArch V1.00.  */
+-#define ISA_BASE_LA64V100     0
+-/* LoongArch V1.10.  */
+-#define ISA_BASE_LA64V110     1
+-#define N_ISA_BASE_TYPES      2
++#define ISA_BASE_LA64V100	0
++#define N_ISA_BASE_TYPES	1
+ extern loongarch_def_array
+   loongarch_isa_base_strings;
+ 
+-#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+-/* Unlike other arrays, this is defined in loongarch-cpu.cc.  The problem is
+-   we cannot use the C++ header options.h in loongarch-def.c.  */
+-extern int64_t loongarch_isa_base_features[];
+-#endif
+-
+ /* enum isa_ext_* */
+ #define ISA_EXT_NONE	      0
+ #define ISA_EXT_FPU32	      1
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index 45fc521e4..d31becc67 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -285,9 +285,6 @@ config_target_isa:
+   /* Get default ISA from "-march" or its default value.  */
+   t.isa = loongarch_cpu_default_isa[t.cpu_arch];
+ 
+-  if (t.cpu_arch != CPU_NATIVE)
+-    t.isa.evolution |= loongarch_isa_base_features[t.isa.base];
+-
+   /* Apply incremental changes.  */
+   /* "-march=native" overrides the default FPU type.  */
+ 
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index 0dabf1551..7010ddfec 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -77,8 +77,7 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+ #define TARGET_DOUBLE_FLOAT	  (la_target.isa.fpu == ISA_EXT_FPU64)
+ #define TARGET_DOUBLE_FLOAT_ABI	  (la_target.abi.base == ABI_BASE_LP64D)
+ 
+-#define TARGET_64BIT		  (la_target.isa.base == ISA_BASE_LA64V100 \
+-				   || la_target.isa.base == ISA_BASE_LA64V110)
++#define TARGET_64BIT		  (la_target.isa.base == ISA_BASE_LA64V100)
+ #define TARGET_ABI_LP64		  (la_target.abi.base == ABI_BASE_LP64D	\
+ 				   || la_target.abi.base == ABI_BASE_LP64F \
+ 				   || la_target.abi.base == ABI_BASE_LP64S)
+@@ -90,7 +89,6 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+ /* TARGET_ macros for use in *.md template conditionals */
+ #define TARGET_uARCH_LA464	  (la_target.cpu_tune == CPU_LA464)
+ #define TARGET_uARCH_LA664	  (la_target.cpu_tune == CPU_LA664)
+-#define ISA_BASE_IS_LA64V110	  (la_target.isa.base == ISA_BASE_LA64V110)
+ 
+ /* Note: optimize_size may vary across functions,
+    while -m[no]-memcpy imposes a global constraint.  */
+diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h
+index 0fee9abe5..7144bbe28 100644
+--- a/gcc/config/loongarch/loongarch-str.h
++++ b/gcc/config/loongarch/loongarch-str.h
+@@ -33,7 +33,6 @@ along with GCC; see the file COPYING3.  If not see
+ #define STR_CPU_LA664 "la664"
+ 
+ #define STR_ISA_BASE_LA64V100 "la64"
+-#define STR_ISA_BASE_LA64V110 "la64v1.1"
+ 
+ #define OPTSTR_ISA_EXT_FPU "fpu"
+ #define STR_NONE "none"
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index ea0d5bb4e..7fe36feb9 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -40,9 +40,6 @@ Basic ISAs of LoongArch:
+ EnumValue
+ Enum(isa_base) String(la64) Value(ISA_BASE_LA64V100)
+ 
+-EnumValue
+-Enum(isa_base) String(la64v1.1) Value(ISA_BASE_LA64V110)
+-
+ ;; ISA extensions / adjustments
+ Enum
+ Name(isa_ext_fpu) Type(int)
+-- 
+2.43.0
+
diff --git a/0058-LoongArch-Add-support-for-xorsign.patch b/0058-LoongArch-Add-support-for-xorsign.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b4fd958b3d7eafcb8661eb14a0ac40240ca2ce83
--- /dev/null
+++ b/0058-LoongArch-Add-support-for-xorsign.patch
@@ -0,0 +1,412 @@
+From dac02bbb72cae374ddc905fffcc6c94c901f9b26 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Fri, 17 Nov 2023 17:00:21 +0800
+Subject: [PATCH 058/188] LoongArch: Add support for xorsign.
+
+This patch adds support for xorsign pattern to scalar fp and vector. With the
+new expands, uniformly using vector bitwise logical operations to handle xorsign.
+
+On LoongArch64, floating-point registers and vector registers share the same register,
+so this patch also allows conversion between LSX vector mode and scalar fp mode to
+avoid unnecessary instruction generation.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md (xorsign3): New expander.
+	* config/loongarch/loongarch.cc (loongarch_can_change_mode_class): Allow
+	conversion between LSX vector mode and scalar fp mode.
+	* config/loongarch/loongarch.md (@xorsign3): New expander.
+	* config/loongarch/lsx.md (@xorsign3): Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xorsign-run.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xorsign.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-xorsign-run.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-xorsign.c: New test.
+	* gcc.target/loongarch/xorsign-run.c: New test.
+	* gcc.target/loongarch/xorsign.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  | 22 +++++--
+ gcc/config/loongarch/loongarch.cc             |  5 ++
+ gcc/config/loongarch/loongarch.md             | 17 ++++++
+ gcc/config/loongarch/lsx.md                   | 23 +++++--
+ .../loongarch/vector/lasx/lasx-xorsign-run.c  | 60 +++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xorsign.c      | 19 ++++++
+ .../loongarch/vector/lsx/lsx-xorsign-run.c    | 60 +++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-xorsign.c        | 19 ++++++
+ .../gcc.target/loongarch/xorsign-run.c        | 25 ++++++++
+ gcc/testsuite/gcc.target/loongarch/xorsign.c  | 18 ++++++
+ 10 files changed, 260 insertions(+), 8 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xorsign-run.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xorsign.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-xorsign-run.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-xorsign.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/xorsign-run.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/xorsign.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 116b30c07..de7c88f14 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -1065,10 +1065,10 @@
+    (set_attr "mode" "")])
+ 
+ (define_insn "xor3"
+-  [(set (match_operand:ILASX 0 "register_operand" "=f,f,f")
+-	(xor:ILASX
+-	  (match_operand:ILASX 1 "register_operand" "f,f,f")
+-	  (match_operand:ILASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
++  [(set (match_operand:LASX 0 "register_operand" "=f,f,f")
++	(xor:LASX
++	  (match_operand:LASX 1 "register_operand" "f,f,f")
++	  (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
+   "ISA_HAS_LASX"
+   "@
+    xvxor.v\t%u0,%u1,%u2
+@@ -3061,6 +3061,20 @@
+   operands[5] = gen_reg_rtx (mode);
+ })
+ 
++(define_expand "xorsign3"
++  [(set (match_dup 4)
++    (and:FLASX (match_dup 3)
++        (match_operand:FLASX 2 "register_operand")))
++   (set (match_operand:FLASX 0 "register_operand")
++    (xor:FLASX (match_dup 4)
++         (match_operand:FLASX 1 "register_operand")))]
++  "ISA_HAS_LASX"
++{
++  operands[3] = loongarch_build_signbit_mask (mode, 1, 0);
++
++  operands[4] = gen_reg_rtx (mode);
++})
++
+ 
+ (define_insn "absv4df2"
+   [(set (match_operand:V4DF 0 "register_operand" "=f")
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 3ef7e3605..3c8ae9a42 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -6703,6 +6703,11 @@ loongarch_can_change_mode_class (machine_mode from, machine_mode to,
+   if (LSX_SUPPORTED_MODE_P (from) && LSX_SUPPORTED_MODE_P (to))
+     return true;
+ 
++  /* Allow conversion between LSX vector mode and scalar fp mode. */
++  if ((LSX_SUPPORTED_MODE_P (from) && SCALAR_FLOAT_MODE_P (to))
++      || ((SCALAR_FLOAT_MODE_P (from) && LSX_SUPPORTED_MODE_P (to))))
++    return true;
++
+   return !reg_classes_intersect_p (FP_REGS, rclass);
+ }
+ 
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index cfd7a8ec6..afc3c591f 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1164,6 +1164,23 @@
+   "fcopysign.\t%0,%1,%2"
+   [(set_attr "type" "fcopysign")
+    (set_attr "mode" "")])
++
++(define_expand "@xorsign3"
++  [(match_operand:ANYF 0 "register_operand")
++   (match_operand:ANYF 1 "register_operand")
++   (match_operand:ANYF 2 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  machine_mode lsx_mode
++    = mode == SFmode ? V4SFmode : V2DFmode;
++  rtx tmp = gen_reg_rtx (lsx_mode);
++  rtx op1 = lowpart_subreg (lsx_mode, operands[1], mode);
++  rtx op2 = lowpart_subreg (lsx_mode, operands[2], mode);
++  emit_insn (gen_xorsign3 (lsx_mode, tmp, op1, op2));
++  emit_move_insn (operands[0],
++          lowpart_subreg (mode, tmp, lsx_mode));
++  DONE;
++})
+ 
+ ;;
+ ;;  ....................
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 232399934..ce6ec6d69 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -957,10 +957,10 @@
+    (set_attr "mode" "")])
+ 
+ (define_insn "xor3"
+-  [(set (match_operand:ILSX 0 "register_operand" "=f,f,f")
+-	(xor:ILSX
+-	  (match_operand:ILSX 1 "register_operand" "f,f,f")
+-	  (match_operand:ILSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
++  [(set (match_operand:LSX 0 "register_operand" "=f,f,f")
++	(xor:LSX
++	  (match_operand:LSX 1 "register_operand" "f,f,f")
++	  (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
+   "ISA_HAS_LSX"
+   "@
+    vxor.v\t%w0,%w1,%w2
+@@ -2786,6 +2786,21 @@
+   operands[5] = gen_reg_rtx (mode);
+ })
+ 
++(define_expand "@xorsign3"
++  [(set (match_dup 4)
++    (and:FLSX (match_dup 3)
++        (match_operand:FLSX 2 "register_operand")))
++   (set (match_operand:FLSX 0 "register_operand")
++    (xor:FLSX (match_dup 4)
++         (match_operand:FLSX 1 "register_operand")))]
++  "ISA_HAS_LSX"
++{
++  operands[3] = loongarch_build_signbit_mask (mode, 1, 0);
++
++  operands[4] = gen_reg_rtx (mode);
++})
++
++
+ (define_insn "absv2df2"
+   [(set (match_operand:V2DF 0 "register_operand" "=f")
+ 	(abs:V2DF (match_operand:V2DF 1 "register_operand" "f")))]
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xorsign-run.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xorsign-run.c
+new file mode 100644
+index 000000000..2295503d4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xorsign-run.c
+@@ -0,0 +1,60 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -ftree-vectorize -mlasx" } */
++/* { dg-require-effective-target loongarch_asx_hw } */
++
++#include "lasx-xorsign.c"
++
++extern void abort ();
++
++#define N 16
++float a[N] = {-0.1f, -3.2f, -6.3f, -9.4f,
++              -12.5f, -15.6f, -18.7f, -21.8f,
++              24.9f, 27.1f, 30.2f, 33.3f,
++              36.4f, 39.5f, 42.6f, 45.7f};
++float b[N] = {-1.2f, 3.4f, -5.6f, 7.8f,
++              -9.0f, 1.0f, -2.0f, 3.0f,
++              -4.0f, -5.0f, 6.0f, 7.0f,
++              -8.0f, -9.0f, 10.0f, 11.0f};
++float r[N];
++
++double ad[N] = {-0.1d,  -3.2d,  -6.3d,  -9.4d,
++                -12.5d, -15.6d, -18.7d, -21.8d,
++                 24.9d,  27.1d,  30.2d,  33.3d,
++                 36.4d,  39.5d,  42.6d, 45.7d};
++double bd[N] = {-1.2d,  3.4d, -5.6d,  7.8d,
++                -9.0d,  1.0d, -2.0d,  3.0d,
++                -4.0d, -5.0d,  6.0d,  7.0d,
++                -8.0d, -9.0d, 10.0d, 11.0d};
++double rd[N];
++
++void
++__attribute__ ((optimize ("-O0")))
++check_xorsignf (void)
++{
++  for (int i = 0; i < N; i++)
++    if (r[i] != a[i] * __builtin_copysignf (1.0f, b[i]))
++      abort ();
++}
++
++void
++__attribute__ ((optimize ("-O0")))
++check_xorsign (void)
++{
++  for (int i = 0; i < N; i++)
++    if (rd[i] != ad[i] * __builtin_copysign (1.0d, bd[i]))
++      abort ();
++}
++
++int
++main (void)
++{
++  my_xorsignf (r, a, b, N); 
++  /* check results:  */
++  check_xorsignf ();
++
++  my_xorsign (rd, ad, bd, N);
++  /* check results:  */
++  check_xorsign ();
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xorsign.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xorsign.c
+new file mode 100644
+index 000000000..190a9239b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xorsign.c
+@@ -0,0 +1,19 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ftree-vectorize -mlasx" } */
++/* { dg-final { scan-assembler "xvand\\.v" } } */
++/* { dg-final { scan-assembler "xvxor\\.v" } } */
++/* { dg-final { scan-assembler-not "xvfmul" } } */
++
++double
++my_xorsign (double *restrict a, double *restrict b, double *restrict c, int n)
++{
++  for (int i = 0; i < n; i++)
++    a[i] = b[i] * __builtin_copysign (1.0d, c[i]);
++}
++
++float
++my_xorsignf (float *restrict a, float *restrict b, float *restrict c, int n)
++{
++  for (int i = 0; i < n; i++)
++    a[i] = b[i] * __builtin_copysignf (1.0f, c[i]);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-xorsign-run.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-xorsign-run.c
+new file mode 100644
+index 000000000..22c5c03cc
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-xorsign-run.c
+@@ -0,0 +1,60 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -ftree-vectorize -mlsx" } */
++/* { dg-require-effective-target loongarch_sx_hw } */
++
++#include "lsx-xorsign.c"
++
++extern void abort ();
++
++#define N 16
++float a[N] = {-0.1f, -3.2f, -6.3f, -9.4f,
++              -12.5f, -15.6f, -18.7f, -21.8f,
++              24.9f, 27.1f, 30.2f, 33.3f,
++              36.4f, 39.5f, 42.6f, 45.7f};
++float b[N] = {-1.2f, 3.4f, -5.6f, 7.8f,
++              -9.0f, 1.0f, -2.0f, 3.0f,
++              -4.0f, -5.0f, 6.0f, 7.0f,
++              -8.0f, -9.0f, 10.0f, 11.0f};
++float r[N];
++
++double ad[N] = {-0.1d,  -3.2d,  -6.3d,  -9.4d,
++                -12.5d, -15.6d, -18.7d, -21.8d,
++                 24.9d,  27.1d,  30.2d,  33.3d,
++                 36.4d,  39.5d,  42.6d, 45.7d};
++double bd[N] = {-1.2d,  3.4d, -5.6d,  7.8d,
++                -9.0d,  1.0d, -2.0d,  3.0d,
++                -4.0d, -5.0d,  6.0d,  7.0d,
++                -8.0d, -9.0d, 10.0d, 11.0d};
++double rd[N];
++
++void
++__attribute__ ((optimize ("-O0")))
++check_xorsignf (void)
++{
++  for (int i = 0; i < N; i++)
++    if (r[i] != a[i] * __builtin_copysignf (1.0f, b[i]))
++      abort ();
++}
++
++void
++__attribute__ ((optimize ("-O0")))
++check_xorsign (void)
++{
++  for (int i = 0; i < N; i++)
++    if (rd[i] != ad[i] * __builtin_copysign (1.0d, bd[i]))
++      abort ();
++}
++
++int
++main (void)
++{
++  my_xorsignf (r, a, b, N);
++  /* check results:  */
++  check_xorsignf ();
++
++  my_xorsign (rd, ad, bd, N);
++  /* check results:  */
++  check_xorsign ();
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-xorsign.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-xorsign.c
+new file mode 100644
+index 000000000..c2694c11e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-xorsign.c
+@@ -0,0 +1,19 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ftree-vectorize -mlsx" } */
++/* { dg-final { scan-assembler "vand\\.v" } } */
++/* { dg-final { scan-assembler "vxor\\.v" } } */
++/* { dg-final { scan-assembler-not "vfmul" } } */
++
++double
++my_xorsign (double *restrict a, double *restrict b, double *restrict c, int n)
++{
++  for (int i = 0; i < n; i++)
++    a[i] = b[i] * __builtin_copysign (1.0d, c[i]);
++}
++
++float
++my_xorsignf (float *restrict a, float *restrict b, float *restrict c, int n)
++{
++  for (int i = 0; i < n; i++)
++    a[i] = b[i] * __builtin_copysignf (1.0f, c[i]);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/xorsign-run.c b/gcc/testsuite/gcc.target/loongarch/xorsign-run.c
+new file mode 100644
+index 000000000..b4f28adf8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/xorsign-run.c
+@@ -0,0 +1,25 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -mlsx" } */
++/* { dg-require-effective-target loongarch_sx_hw } */
++
++extern void abort(void);
++
++static double x = 2.0;
++static float  y = 2.0;
++
++int main()
++{
++  if ((2.5 * __builtin_copysign(1.0d, x)) != 2.5)
++     abort();
++
++  if ((2.5 * __builtin_copysign(1.0f, y)) != 2.5)
++     abort();
++
++  if ((2.5 * __builtin_copysignf(1.0d, -x)) != -2.5)
++     abort();
++
++  if ((2.5 * __builtin_copysignf(1.0f, -y)) != -2.5)
++     abort();
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/xorsign.c b/gcc/testsuite/gcc.target/loongarch/xorsign.c
+new file mode 100644
+index 000000000..ca80603d4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/xorsign.c
+@@ -0,0 +1,18 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx" } */
++/* { dg-final { scan-assembler "vand\\.v" } } */
++/* { dg-final { scan-assembler "vxor\\.v" } } */
++/* { dg-final { scan-assembler-not "fcopysign" } } */
++/* { dg-final { scan-assembler-not "fmul" } } */
++
++double
++my_xorsign (double a, double b)
++{
++  return a * __builtin_copysign (1.0d, b);
++}
++
++float
++my_xorsignf (float a, float b)
++{
++  return a * __builtin_copysignf (1.0f, b);
++}
+-- 
+2.43.0
+
diff --git a/0059-LoongArch-Add-support-for-LoongArch-V1.1-approximate.patch b/0059-LoongArch-Add-support-for-LoongArch-V1.1-approximate.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8edd4f973591484e60107f65a2c4ea9265fad738
--- /dev/null
+++ b/0059-LoongArch-Add-support-for-LoongArch-V1.1-approximate.patch
@@ -0,0 +1,730 @@
+From 88117f2703d06e44983e54a985ec0ad6f2397a46 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Wed, 6 Dec 2023 15:04:49 +0800
+Subject: [PATCH 059/188] LoongArch: Add support for LoongArch V1.1 approximate
+ instructions.
+
+This patch adds define_insn/builtins/intrinsics for these instructions, and add option
+-mfrecipe to control instruction generation.
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/isa-evolution.in (fecipe): Add.
+	* config/loongarch/larchintrin.h (__frecipe_s): New intrinsic.
+	(__frecipe_d): Ditto.
+	(__frsqrte_s): Ditto.
+	(__frsqrte_d): Ditto.
+	* config/loongarch/lasx.md (lasx_xvfrecipe_): New insn pattern.
+	(lasx_xvfrsqrte_): Ditto.
+	* config/loongarch/lasxintrin.h (__lasx_xvfrecipe_s): New intrinsic.
+	(__lasx_xvfrecipe_d): Ditto.
+	(__lasx_xvfrsqrte_s): Ditto.
+	(__lasx_xvfrsqrte_d): Ditto.
+	* config/loongarch/loongarch-builtins.cc (AVAIL_ALL): Add predicates.
+	(LSX_EXT_BUILTIN): New macro.
+	(LASX_EXT_BUILTIN): Ditto.
+	* config/loongarch/loongarch-cpucfg-map.h: Regenerate.
+	* config/loongarch/loongarch-c.cc: Add builtin macro "__loongarch_frecipe".
+	* config/loongarch/loongarch-def.cc: Regenerate.
+	* config/loongarch/loongarch-str.h (OPTSTR_FRECIPE): Regenerate.
+	* config/loongarch/loongarch.cc (loongarch_asm_code_end): Dump status for TARGET_FRECIPE.
+	* config/loongarch/loongarch.md (loongarch_frecipe_): New insn pattern.
+	(loongarch_frsqrte_): Ditto.
+	* config/loongarch/loongarch.opt: Regenerate.
+	* config/loongarch/lsx.md (lsx_vfrecipe_): New insn pattern.
+	(lsx_vfrsqrte_): Ditto.
+	* config/loongarch/lsxintrin.h (__lsx_vfrecipe_s): New intrinsic.
+	(__lsx_vfrecipe_d): Ditto.
+	(__lsx_vfrsqrte_s): Ditto.
+	(__lsx_vfrsqrte_d): Ditto.
+	* doc/extend.texi: Add documentation for LoongArch new builtins and intrinsics.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/larch-frecipe-builtin.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-frecipe-builtin.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-frecipe-builtin.c: New test.
+---
+ gcc/config/loongarch/genopts/isa-evolution.in |  1 +
+ gcc/config/loongarch/larchintrin.h            | 38 +++++++++++++++++
+ gcc/config/loongarch/lasx.md                  | 24 +++++++++++
+ gcc/config/loongarch/lasxintrin.h             | 34 +++++++++++++++
+ gcc/config/loongarch/loongarch-builtins.cc    | 42 +++++++++++++++++++
+ gcc/config/loongarch/loongarch-c.cc           |  3 ++
+ gcc/config/loongarch/loongarch-cpucfg-map.h   |  1 +
+ gcc/config/loongarch/loongarch-def.cc         |  3 +-
+ gcc/config/loongarch/loongarch-str.h          |  1 +
+ gcc/config/loongarch/loongarch.cc             |  1 +
+ gcc/config/loongarch/loongarch.md             | 35 +++++++++++++++-
+ gcc/config/loongarch/loongarch.opt            |  4 ++
+ gcc/config/loongarch/lsx.md                   | 24 +++++++++++
+ gcc/config/loongarch/lsxintrin.h              | 34 +++++++++++++++
+ gcc/doc/extend.texi                           | 35 ++++++++++++++++
+ .../loongarch/larch-frecipe-builtin.c         | 28 +++++++++++++
+ .../vector/lasx/lasx-frecipe-builtin.c        | 30 +++++++++++++
+ .../vector/lsx/lsx-frecipe-builtin.c          | 30 +++++++++++++
+ 18 files changed, 365 insertions(+), 3 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/larch-frecipe-builtin.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-frecipe-builtin.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-frecipe-builtin.c
+
+diff --git a/gcc/config/loongarch/genopts/isa-evolution.in b/gcc/config/loongarch/genopts/isa-evolution.in
+index a6bc3f87f..11a198b64 100644
+--- a/gcc/config/loongarch/genopts/isa-evolution.in
++++ b/gcc/config/loongarch/genopts/isa-evolution.in
+@@ -1,3 +1,4 @@
++2	25	frecipe		Support frecipe.{s/d} and frsqrte.{s/d} instructions.
+ 2	26	div32		Support div.w[u] and mod.w[u] instructions with inputs not sign-extended.
+ 2	27	lam-bh		Support am{swap/add}[_db].{b/h} instructions.
+ 2	28	lamcas		Support amcas[_db].{b/h/w/d} instructions.
+diff --git a/gcc/config/loongarch/larchintrin.h b/gcc/config/loongarch/larchintrin.h
+index 2833f1487..22035e767 100644
+--- a/gcc/config/loongarch/larchintrin.h
++++ b/gcc/config/loongarch/larchintrin.h
+@@ -333,6 +333,44 @@ __iocsrwr_d (unsigned long int _1, unsigned int _2)
+ }
+ #endif
+ 
++#ifdef __loongarch_frecipe
++/* Assembly instruction format: fd, fj.  */
++/* Data types in instruction templates:  SF, SF.  */
++extern __inline void
++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
++__frecipe_s (float _1)
++{
++  __builtin_loongarch_frecipe_s ((float) _1);
++}
++
++/* Assembly instruction format: fd, fj.  */
++/* Data types in instruction templates:  DF, DF.  */
++extern __inline void
++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
++__frecipe_d (double _1)
++{
++  __builtin_loongarch_frecipe_d ((double) _1);
++}
++
++/* Assembly instruction format: fd, fj.  */
++/* Data types in instruction templates:  SF, SF.  */
++extern __inline void
++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
++__frsqrte_s (float _1)
++{
++  __builtin_loongarch_frsqrte_s ((float) _1);
++}
++
++/* Assembly instruction format: fd, fj.  */
++/* Data types in instruction templates:  DF, DF.  */
++extern __inline void
++__attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
++__frsqrte_d (double _1)
++{
++  __builtin_loongarch_frsqrte_d ((double) _1);
++}
++#endif
++
+ /* Assembly instruction format:	ui15.  */
+ /* Data types in instruction templates:  USI.  */
+ #define __dbar(/*ui15*/ _1) __builtin_loongarch_dbar ((_1))
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index de7c88f14..b1416f6c3 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -40,8 +40,10 @@
+   UNSPEC_LASX_XVFCVTL
+   UNSPEC_LASX_XVFLOGB
+   UNSPEC_LASX_XVFRECIP
++  UNSPEC_LASX_XVFRECIPE
+   UNSPEC_LASX_XVFRINT
+   UNSPEC_LASX_XVFRSQRT
++  UNSPEC_LASX_XVFRSQRTE
+   UNSPEC_LASX_XVFCMP_SAF
+   UNSPEC_LASX_XVFCMP_SEQ
+   UNSPEC_LASX_XVFCMP_SLE
+@@ -1633,6 +1635,17 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
++;; Approximate Reciprocal Instructions.
++
++(define_insn "lasx_xvfrecipe_"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++    (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
++		  UNSPEC_LASX_XVFRECIPE))]
++  "ISA_HAS_LASX && TARGET_FRECIPE"
++  "xvfrecipe.\t%u0,%u1"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
+ (define_insn "lasx_xvfrsqrt_"
+   [(set (match_operand:FLASX 0 "register_operand" "=f")
+ 	(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
+@@ -1642,6 +1655,17 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
++;; Approximate Reciprocal Square Root Instructions.
++
++(define_insn "lasx_xvfrsqrte_"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++    (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
++		  UNSPEC_LASX_XVFRSQRTE))]
++  "ISA_HAS_LASX && TARGET_FRECIPE"
++  "xvfrsqrte.\t%u0,%u1"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
+ (define_insn "lasx_xvftint_u__"
+   [(set (match_operand: 0 "register_operand" "=f")
+ 	(unspec: [(match_operand:FLASX 1 "register_operand" "f")]
+diff --git a/gcc/config/loongarch/lasxintrin.h b/gcc/config/loongarch/lasxintrin.h
+index 7bce2c757..5e65e76e7 100644
+--- a/gcc/config/loongarch/lasxintrin.h
++++ b/gcc/config/loongarch/lasxintrin.h
+@@ -2399,6 +2399,40 @@ __m256d __lasx_xvfrecip_d (__m256d _1)
+   return (__m256d)__builtin_lasx_xvfrecip_d ((v4f64)_1);
+ }
+ 
++#if defined(__loongarch_frecipe)
++/* Assembly instruction format: xd, xj.  */
++/* Data types in instruction templates:  V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfrecipe_s (__m256 _1)
++{
++  return (__m256)__builtin_lasx_xvfrecipe_s ((v8f32)_1);
++}
++
++/* Assembly instruction format: xd, xj.  */
++/* Data types in instruction templates:  V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfrecipe_d (__m256d _1)
++{
++  return (__m256d)__builtin_lasx_xvfrecipe_d ((v4f64)_1);
++}
++
++/* Assembly instruction format: xd, xj.  */
++/* Data types in instruction templates:  V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfrsqrte_s (__m256 _1)
++{
++  return (__m256)__builtin_lasx_xvfrsqrte_s ((v8f32)_1);
++}
++
++/* Assembly instruction format: xd, xj.  */
++/* Data types in instruction templates:  V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfrsqrte_d (__m256d _1)
++{
++  return (__m256d)__builtin_lasx_xvfrsqrte_d ((v4f64)_1);
++}
++#endif
++
+ /* Assembly instruction format:	xd, xj.  */
+ /* Data types in instruction templates:  V8SF, V8SF.  */
+ extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index f4523c8bf..bc156bd36 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -120,6 +120,9 @@ struct loongarch_builtin_description
+ AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI)
+ AVAIL_ALL (lsx, ISA_HAS_LSX)
+ AVAIL_ALL (lasx, ISA_HAS_LASX)
++AVAIL_ALL (frecipe, TARGET_FRECIPE && TARGET_HARD_FLOAT_ABI)
++AVAIL_ALL (lsx_frecipe, ISA_HAS_LSX && TARGET_FRECIPE)
++AVAIL_ALL (lasx_frecipe, ISA_HAS_LASX && TARGET_FRECIPE)
+ 
+ /* Construct a loongarch_builtin_description from the given arguments.
+ 
+@@ -164,6 +167,15 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
+     "__builtin_lsx_" #INSN,  LARCH_BUILTIN_DIRECT,			\
+     FUNCTION_TYPE, loongarch_builtin_avail_lsx }
+ 
++ /* Define an LSX LARCH_BUILTIN_DIRECT function __builtin_lsx_
++    for instruction CODE_FOR_lsx_.  FUNCTION_TYPE is a builtin_description
++    field. AVAIL is the name of the availability predicate, without the leading
++    loongarch_builtin_avail_.  */
++#define LSX_EXT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)                     \
++  { CODE_FOR_lsx_ ## INSN,                                              \
++    "__builtin_lsx_" #INSN,  LARCH_BUILTIN_DIRECT,                      \
++    FUNCTION_TYPE, loongarch_builtin_avail_##AVAIL }
++
+ 
+ /* Define an LSX LARCH_BUILTIN_LSX_TEST_BRANCH function __builtin_lsx_
+    for instruction CODE_FOR_lsx_.  FUNCTION_TYPE is a builtin_description
+@@ -189,6 +201,15 @@ AVAIL_ALL (lasx, ISA_HAS_LASX)
+     "__builtin_lasx_" #INSN,  LARCH_BUILTIN_LASX,			\
+     FUNCTION_TYPE, loongarch_builtin_avail_lasx }
+ 
++/* Define an LASX LARCH_BUILTIN_DIRECT function __builtin_lasx_
++   for instruction CODE_FOR_lasx_.  FUNCTION_TYPE is a builtin_description
++   field. AVAIL is the name of the availability predicate, without the leading
++   loongarch_builtin_avail_.  */
++#define LASX_EXT_BUILTIN(INSN, FUNCTION_TYPE, AVAIL)                    \
++  { CODE_FOR_lasx_ ## INSN,                                             \
++    "__builtin_lasx_" #INSN,  LARCH_BUILTIN_LASX,                       \
++    FUNCTION_TYPE, loongarch_builtin_avail_##AVAIL }
++
+ /* Define an LASX LARCH_BUILTIN_DIRECT_NO_TARGET function __builtin_lasx_
+    for instruction CODE_FOR_lasx_.  FUNCTION_TYPE is a builtin_description
+    field.  */
+@@ -804,6 +825,27 @@ static const struct loongarch_builtin_description loongarch_builtins[] = {
+   DIRECT_NO_TARGET_BUILTIN (syscall, LARCH_VOID_FTYPE_USI, default),
+   DIRECT_NO_TARGET_BUILTIN (break, LARCH_VOID_FTYPE_USI, default),
+ 
++  /* Built-in functions for frecipe.{s/d} and frsqrte.{s/d}.  */
++
++  DIRECT_BUILTIN (frecipe_s, LARCH_SF_FTYPE_SF, frecipe),
++  DIRECT_BUILTIN (frecipe_d, LARCH_DF_FTYPE_DF, frecipe),
++  DIRECT_BUILTIN (frsqrte_s, LARCH_SF_FTYPE_SF, frecipe),
++  DIRECT_BUILTIN (frsqrte_d, LARCH_DF_FTYPE_DF, frecipe),
++
++  /* Built-in functions for new LSX instructions.  */
++
++  LSX_EXT_BUILTIN (vfrecipe_s, LARCH_V4SF_FTYPE_V4SF, lsx_frecipe),
++  LSX_EXT_BUILTIN (vfrecipe_d, LARCH_V2DF_FTYPE_V2DF, lsx_frecipe),
++  LSX_EXT_BUILTIN (vfrsqrte_s, LARCH_V4SF_FTYPE_V4SF, lsx_frecipe),
++  LSX_EXT_BUILTIN (vfrsqrte_d, LARCH_V2DF_FTYPE_V2DF, lsx_frecipe),
++
++  /* Built-in functions for new LASX instructions.  */
++
++  LASX_EXT_BUILTIN (xvfrecipe_s, LARCH_V8SF_FTYPE_V8SF, lasx_frecipe),
++  LASX_EXT_BUILTIN (xvfrecipe_d, LARCH_V4DF_FTYPE_V4DF, lasx_frecipe),
++  LASX_EXT_BUILTIN (xvfrsqrte_s, LARCH_V8SF_FTYPE_V8SF, lasx_frecipe),
++  LASX_EXT_BUILTIN (xvfrsqrte_d, LARCH_V4DF_FTYPE_V4DF, lasx_frecipe),
++
+   /* Built-in functions for LSX.  */
+   LSX_BUILTIN (vsll_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
+   LSX_BUILTIN (vsll_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
+diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc
+index 76c8ea8db..a89477a74 100644
+--- a/gcc/config/loongarch/loongarch-c.cc
++++ b/gcc/config/loongarch/loongarch-c.cc
+@@ -102,6 +102,9 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile)
+   else
+     builtin_define ("__loongarch_frlen=0");
+ 
++  if (TARGET_HARD_FLOAT && TARGET_FRECIPE)
++    builtin_define ("__loongarch_frecipe");
++
+   if (ISA_HAS_LSX)
+     {
+       builtin_define ("__loongarch_simd");
+diff --git a/gcc/config/loongarch/loongarch-cpucfg-map.h b/gcc/config/loongarch/loongarch-cpucfg-map.h
+index 02ff16712..148333c24 100644
+--- a/gcc/config/loongarch/loongarch-cpucfg-map.h
++++ b/gcc/config/loongarch/loongarch-cpucfg-map.h
+@@ -29,6 +29,7 @@ static constexpr struct {
+   unsigned int cpucfg_bit;
+   HOST_WIDE_INT isa_evolution_bit;
+ } cpucfg_map[] = {
++  { 2, 1u << 25, OPTION_MASK_ISA_FRECIPE },
+   { 2, 1u << 26, OPTION_MASK_ISA_DIV32 },
+   { 2, 1u << 27, OPTION_MASK_ISA_LAM_BH },
+   { 2, 1u << 28, OPTION_MASK_ISA_LAMCAS },
+diff --git a/gcc/config/loongarch/loongarch-def.cc b/gcc/config/loongarch/loongarch-def.cc
+index bc6997e45..c41804a18 100644
+--- a/gcc/config/loongarch/loongarch-def.cc
++++ b/gcc/config/loongarch/loongarch-def.cc
+@@ -60,7 +60,8 @@ array_arch loongarch_cpu_default_isa =
+ 	    .fpu_ (ISA_EXT_FPU64)
+ 	    .simd_ (ISA_EXT_SIMD_LASX)
+ 	    .evolution_ (OPTION_MASK_ISA_DIV32 | OPTION_MASK_ISA_LD_SEQ_SA
+-		    | OPTION_MASK_ISA_LAM_BH | OPTION_MASK_ISA_LAMCAS));
++			 | OPTION_MASK_ISA_LAM_BH | OPTION_MASK_ISA_LAMCAS
++			 | OPTION_MASK_ISA_FRECIPE));
+ 
+ static inline loongarch_cache la464_cache ()
+ {
+diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h
+index 7144bbe28..a8821acb0 100644
+--- a/gcc/config/loongarch/loongarch-str.h
++++ b/gcc/config/loongarch/loongarch-str.h
+@@ -68,6 +68,7 @@ along with GCC; see the file COPYING3.  If not see
+ #define STR_EXPLICIT_RELOCS_NONE "none"
+ #define STR_EXPLICIT_RELOCS_ALWAYS "always"
+ 
++#define OPTSTR_FRECIPE "frecipe"
+ #define OPTSTR_DIV32   "div32"
+ #define OPTSTR_LAM_BH  "lam-bh"
+ #define OPTSTR_LAMCAS  "lamcas"
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 3c8ae9a42..ce1c0a8bd 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -11503,6 +11503,7 @@ loongarch_asm_code_end (void)
+ 	       loongarch_cpu_strings [la_target.cpu_tune]);
+       fprintf (asm_out_file, "%s Base ISA: %s\n", ASM_COMMENT_START,
+ 	       loongarch_isa_base_strings [la_target.isa.base]);
++      DUMP_FEATURE (TARGET_FRECIPE);
+       DUMP_FEATURE (TARGET_DIV32);
+       DUMP_FEATURE (TARGET_LAM_BH);
+       DUMP_FEATURE (TARGET_LAMCAS);
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index afc3c591f..9080cec1c 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -59,6 +59,12 @@
+   ;; Stack tie
+   UNSPEC_TIE
+ 
++  ;; RSQRT
++  UNSPEC_RSQRTE
++
++  ;; RECIP
++  UNSPEC_RECIPE
++
+   ;; CRC
+   UNSPEC_CRC
+   UNSPEC_CRCC
+@@ -220,6 +226,7 @@
+ ;; fmadd	floating point multiply-add
+ ;; fdiv		floating point divide
+ ;; frdiv	floating point reciprocal divide
++;; frecipe      floating point approximate reciprocal
+ ;; fabs		floating point absolute value
+ ;; flogb	floating point exponent extract
+ ;; fneg		floating point negation
+@@ -229,6 +236,7 @@
+ ;; fscaleb	floating point scale
+ ;; fsqrt	floating point square root
+ ;; frsqrt       floating point reciprocal square root
++;; frsqrte      floating point approximate reciprocal square root
+ ;; multi	multiword sequence (or user asm statements)
+ ;; atomic	atomic memory update instruction
+ ;; syncloop	memory atomic operation implemented as a sync loop
+@@ -238,8 +246,8 @@
+   "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore,
+    prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical,
+    shift,slt,signext,clz,trap,imul,idiv,move,
+-   fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,flogb,fneg,fcmp,fcopysign,fcvt,
+-   fscaleb,fsqrt,frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost,
++   fmove,fadd,fmul,fmadd,fdiv,frdiv,frecipe,fabs,flogb,fneg,fcmp,fcopysign,fcvt,
++   fscaleb,fsqrt,frsqrt,frsqrte,accext,accmod,multi,atomic,syncloop,nop,ghost,
+    simd_div,simd_fclass,simd_flog2,simd_fadd,simd_fcvt,simd_fmul,simd_fmadd,
+    simd_fdiv,simd_bitins,simd_bitmov,simd_insert,simd_sld,simd_mul,simd_fcmp,
+    simd_fexp2,simd_int_arith,simd_bit,simd_shift,simd_splat,simd_fill,
+@@ -908,6 +916,18 @@
+   [(set_attr "type" "frdiv")
+    (set_attr "mode" "")])
+ 
++;; Approximate Reciprocal Instructions.
++
++(define_insn "loongarch_frecipe_"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++    (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")]
++	     UNSPEC_RECIPE))]
++  "TARGET_FRECIPE"
++  "frecipe.\t%0,%1"
++  [(set_attr "type" "frecipe")
++   (set_attr "mode" "")
++   (set_attr "insn_count" "1")])
++
+ ;; Integer division and modulus.
+ (define_expand "3"
+   [(set (match_operand:GPR 0 "register_operand")
+@@ -1133,6 +1153,17 @@
+   [(set_attr "type" "frsqrt")
+    (set_attr "mode" "")
+    (set_attr "insn_count" "1")])
++
++;; Approximate Reciprocal Square Root Instructions.
++
++(define_insn "loongarch_frsqrte_"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++    (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")]
++		 UNSPEC_RSQRTE))]
++  "TARGET_FRECIPE"
++  "frsqrte.\t%0,%1"
++  [(set_attr "type" "frsqrte")
++   (set_attr "mode" "")])
+ 
+ ;;
+ ;;  ....................
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 7fe36feb9..e7bc8bed4 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -260,6 +260,10 @@ default value is 4.
+ Variable
+ HOST_WIDE_INT isa_evolution = 0
+ 
++mfrecipe
++Target Mask(ISA_FRECIPE) Var(isa_evolution)
++Support frecipe.{s/d} and frsqrte.{s/d} instructions.
++
+ mdiv32
+ Target Mask(ISA_DIV32) Var(isa_evolution)
+ Support div.w[u] and mod.w[u] instructions with inputs not sign-extended.
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index ce6ec6d69..37bdc6910 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -42,8 +42,10 @@
+   UNSPEC_LSX_VFCVTL
+   UNSPEC_LSX_VFLOGB
+   UNSPEC_LSX_VFRECIP
++  UNSPEC_LSX_VFRECIPE
+   UNSPEC_LSX_VFRINT
+   UNSPEC_LSX_VFRSQRT
++  UNSPEC_LSX_VFRSQRTE
+   UNSPEC_LSX_VFCMP_SAF
+   UNSPEC_LSX_VFCMP_SEQ
+   UNSPEC_LSX_VFCMP_SLE
+@@ -1546,6 +1548,17 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
++;; Approximate Reciprocal Instructions.
++
++(define_insn "lsx_vfrecipe_"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++    (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
++		 UNSPEC_LSX_VFRECIPE))]
++  "ISA_HAS_LSX && TARGET_FRECIPE"
++  "vfrecipe.\t%w0,%w1"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
+ (define_insn "lsx_vfrsqrt_"
+   [(set (match_operand:FLSX 0 "register_operand" "=f")
+ 	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
+@@ -1555,6 +1568,17 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
++;; Approximate Reciprocal Square Root Instructions.
++
++(define_insn "lsx_vfrsqrte_"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++    (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
++		 UNSPEC_LSX_VFRSQRTE))]
++  "ISA_HAS_LSX && TARGET_FRECIPE"
++  "vfrsqrte.\t%w0,%w1"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
+ (define_insn "lsx_vftint_u__"
+   [(set (match_operand: 0 "register_operand" "=f")
+ 	(unspec: [(match_operand:FLSX 1 "register_operand" "f")]
+diff --git a/gcc/config/loongarch/lsxintrin.h b/gcc/config/loongarch/lsxintrin.h
+index 29553c093..57a6fc40a 100644
+--- a/gcc/config/loongarch/lsxintrin.h
++++ b/gcc/config/loongarch/lsxintrin.h
+@@ -2480,6 +2480,40 @@ __m128d __lsx_vfrecip_d (__m128d _1)
+   return (__m128d)__builtin_lsx_vfrecip_d ((v2f64)_1);
+ }
+ 
++#if defined(__loongarch_frecipe)
++/* Assembly instruction format: vd, vj.  */
++/* Data types in instruction templates:  V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfrecipe_s (__m128 _1)
++{
++  return (__m128)__builtin_lsx_vfrecipe_s ((v4f32)_1);
++}
++
++/* Assembly instruction format: vd, vj.  */
++/* Data types in instruction templates:  V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfrecipe_d (__m128d _1)
++{
++  return (__m128d)__builtin_lsx_vfrecipe_d ((v2f64)_1);
++}
++
++/* Assembly instruction format: vd, vj.  */
++/* Data types in instruction templates:  V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfrsqrte_s (__m128 _1)
++{
++  return (__m128)__builtin_lsx_vfrsqrte_s ((v4f32)_1);
++}
++
++/* Assembly instruction format: vd, vj.  */
++/* Data types in instruction templates:  V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfrsqrte_d (__m128d _1)
++{
++  return (__m128d)__builtin_lsx_vfrsqrte_d ((v2f64)_1);
++}
++#endif
++
+ /* Assembly instruction format:	vd, vj.  */
+ /* Data types in instruction templates:  V4SF, V4SF.  */
+ extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index 7edd3974d..bb042ae78 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -16187,6 +16187,14 @@ The intrinsics provided are listed below:
+     void __builtin_loongarch_break (imm0_32767)
+ @end smallexample
+ 
++These instrisic functions are available by using @option{-mfrecipe}.
++@smallexample
++    float __builtin_loongarch_frecipe_s (float);
++    double  __builtin_loongarch_frecipe_d (double);
++    float __builtin_loongarch_frsqrte_s (float);
++    double  __builtin_loongarch_frsqrte_d (double);
++@end smallexample
++
+ @emph{Note:}Since the control register is divided into 32-bit and 64-bit,
+ but the access instruction is not distinguished. So GCC renames the control
+ instructions when implementing intrinsics.
+@@ -16259,6 +16267,15 @@ function you need to include @code{larchintrin.h}.
+     void __break (imm0_32767)
+ @end smallexample
+ 
++These instrisic functions are available by including @code{larchintrin.h} and
++using @option{-mfrecipe}.
++@smallexample
++    float __frecipe_s (float);
++    double __frecipe_d (double);
++    float __frsqrte_s (float);
++    double __frsqrte_d (double);
++@end smallexample
++
+ Returns the value that is currently set in the @samp{tp} register.
+ @smallexample
+     void * __builtin_thread_pointer (void)
+@@ -17085,6 +17102,15 @@ __m128i __lsx_vxori_b (__m128i, imm0_255);
+ __m128i __lsx_vxor_v (__m128i, __m128i);
+ @end smallexample
+ 
++These instrisic functions are available by including @code{lsxintrin.h} and
++using @option{-mfrecipe} and @option{-mlsx}.
++@smallexample
++__m128d __lsx_vfrecipe_d (__m128d);
++__m128 __lsx_vfrecipe_s (__m128);
++__m128d __lsx_vfrsqrte_d (__m128d);
++__m128 __lsx_vfrsqrte_s (__m128);
++@end smallexample
++
+ @node LoongArch ASX Vector Intrinsics
+ @subsection LoongArch ASX Vector Intrinsics
+ 
+@@ -17924,6 +17950,15 @@ __m256i __lasx_xvxori_b (__m256i, imm0_255);
+ __m256i __lasx_xvxor_v (__m256i, __m256i);
+ @end smallexample
+ 
++These instrisic functions are available by including @code{lasxintrin.h} and
++using @option{-mfrecipe} and @option{-mlasx}.
++@smallexample
++__m256d __lasx_xvfrecipe_d (__m256d);
++__m256 __lasx_xvfrecipe_s (__m256);
++__m256d __lasx_xvfrsqrte_d (__m256d);
++__m256 __lasx_xvfrsqrte_s (__m256);
++@end smallexample
++
+ @node MIPS DSP Built-in Functions
+ @subsection MIPS DSP Built-in Functions
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/larch-frecipe-builtin.c b/gcc/testsuite/gcc.target/loongarch/larch-frecipe-builtin.c
+new file mode 100644
+index 000000000..b9329f346
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/larch-frecipe-builtin.c
+@@ -0,0 +1,28 @@
++/* Test builtins for frecipe.{s/d} and frsqrte.{s/d} instructions */
++/* { dg-do compile } */
++/* { dg-options "-mfrecipe" } */
++/* { dg-final { scan-assembler-times "test_frecipe_s:.*frecipe\\.s.*test_frecipe_s" 1 } } */
++/* { dg-final { scan-assembler-times "test_frecipe_d:.*frecipe\\.d.*test_frecipe_d" 1 } } */
++/* { dg-final { scan-assembler-times "test_frsqrte_s:.*frsqrte\\.s.*test_frsqrte_s" 1 } } */
++/* { dg-final { scan-assembler-times "test_frsqrte_d:.*frsqrte\\.d.*test_frsqrte_d" 1 } } */
++
++float
++test_frecipe_s (float _1)
++{
++  return __builtin_loongarch_frecipe_s (_1);
++}
++double
++test_frecipe_d (double _1)
++{
++  return __builtin_loongarch_frecipe_d (_1);
++}
++float
++test_frsqrte_s (float _1)
++{
++  return __builtin_loongarch_frsqrte_s (_1);
++}
++double
++test_frsqrte_d (double _1)
++{
++  return __builtin_loongarch_frsqrte_d (_1);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-frecipe-builtin.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-frecipe-builtin.c
+new file mode 100644
+index 000000000..522535b45
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-frecipe-builtin.c
+@@ -0,0 +1,30 @@
++/* Test builtins for xvfrecipe.{s/d} and xvfrsqrte.{s/d} instructions */
++/* { dg-do compile } */
++/* { dg-options "-mlasx -mfrecipe" } */
++/* { dg-final { scan-assembler-times "lasx_xvfrecipe_s:.*xvfrecipe\\.s.*lasx_xvfrecipe_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrecipe_d:.*xvfrecipe\\.d.*lasx_xvfrecipe_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrsqrte_s:.*xvfrsqrte\\.s.*lasx_xvfrsqrte_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrsqrte_d:.*xvfrsqrte\\.d.*lasx_xvfrsqrte_d" 1 } } */
++
++#include 
++
++v8f32
++__lasx_xvfrecipe_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfrecipe_s (_1);
++}
++v4f64
++__lasx_xvfrecipe_d (v4f64 _1)
++{
++  return __builtin_lasx_xvfrecipe_d (_1);
++}
++v8f32
++__lasx_xvfrsqrte_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfrsqrte_s (_1);
++}
++v4f64
++__lasx_xvfrsqrte_d (v4f64 _1)
++{
++  return __builtin_lasx_xvfrsqrte_d (_1);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-frecipe-builtin.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-frecipe-builtin.c
+new file mode 100644
+index 000000000..4ad0cb0ff
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-frecipe-builtin.c
+@@ -0,0 +1,30 @@
++/* Test builtins for vfrecipe.{s/d} and vfrsqrte.{s/d} instructions */
++/* { dg-do compile } */
++/* { dg-options "-mlsx -mfrecipe" } */
++/* { dg-final { scan-assembler-times "lsx_vfrecipe_s:.*vfrecipe\\.s.*lsx_vfrecipe_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrecipe_d:.*vfrecipe\\.d.*lsx_vfrecipe_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrsqrte_s:.*vfrsqrte\\.s.*lsx_vfrsqrte_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrsqrte_d:.*vfrsqrte\\.d.*lsx_vfrsqrte_d" 1 } } */
++
++#include 
++
++v4f32
++__lsx_vfrecipe_s (v4f32 _1)
++{
++  return __builtin_lsx_vfrecipe_s (_1);
++}
++v2f64
++__lsx_vfrecipe_d (v2f64 _1)
++{
++  return __builtin_lsx_vfrecipe_d (_1);
++}
++v4f32
++__lsx_vfrsqrte_s (v4f32 _1)
++{
++  return __builtin_lsx_vfrsqrte_s (_1);
++}
++v2f64
++__lsx_vfrsqrte_d (v2f64 _1)
++{
++  return __builtin_lsx_vfrsqrte_d (_1);
++}
+-- 
+2.43.0
+
diff --git a/0060-LoongArch-Use-standard-pattern-name-for-xvfrsqrt-vfr.patch b/0060-LoongArch-Use-standard-pattern-name-for-xvfrsqrt-vfr.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e62d27e98120e84bc3b26282d609b5362d13e563
--- /dev/null
+++ b/0060-LoongArch-Use-standard-pattern-name-for-xvfrsqrt-vfr.patch
@@ -0,0 +1,257 @@
+From e8210e26ac638eb443f8991fee6d412b297cb279 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Wed, 6 Dec 2023 15:04:50 +0800
+Subject: [PATCH 060/188] LoongArch: Use standard pattern name for
+ xvfrsqrt/vfrsqrt instructions.
+
+Rename lasx_xvfrsqrt*/lsx_vfrsqrt* to rsqrt2 to align with standard
+pattern name. Define function use_rsqrt_p to decide when to use rsqrt optab.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md (lasx_xvfrsqrt_): Renamed to ..
+	(rsqrt2): .. this.
+	* config/loongarch/loongarch-builtins.cc
+	(CODE_FOR_lsx_vfrsqrt_d): Redefine to standard pattern name.
+	(CODE_FOR_lsx_vfrsqrt_s): Ditto.
+	(CODE_FOR_lasx_xvfrsqrt_d): Ditto.
+	(CODE_FOR_lasx_xvfrsqrt_s): Ditto.
+	* config/loongarch/loongarch.cc (use_rsqrt_p): New function.
+	(loongarch_optab_supported_p): Ditto.
+	(TARGET_OPTAB_SUPPORTED_P): New hook.
+	* config/loongarch/loongarch.md (*rsqrta): Remove.
+	(*rsqrt2): New insn pattern.
+	(*rsqrtb): Remove.
+	* config/loongarch/lsx.md (lsx_vfrsqrt_): Renamed to ..
+	(rsqrt2): .. this.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-rsqrt.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-rsqrt.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  |  6 ++---
+ gcc/config/loongarch/loongarch-builtins.cc    |  4 +++
+ gcc/config/loongarch/loongarch.cc             | 27 +++++++++++++++++++
+ gcc/config/loongarch/loongarch.md             | 24 +++++------------
+ gcc/config/loongarch/lsx.md                   |  6 ++---
+ .../loongarch/vector/lasx/lasx-rsqrt.c        | 26 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-rsqrt.c          | 26 ++++++++++++++++++
+ 7 files changed, 96 insertions(+), 23 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-rsqrt.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-rsqrt.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index b1416f6c3..3a4a1fe51 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -1646,10 +1646,10 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lasx_xvfrsqrt_"
++(define_insn "rsqrt2"
+   [(set (match_operand:FLASX 0 "register_operand" "=f")
+-	(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
+-		      UNSPEC_LASX_XVFRSQRT))]
++    (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
++		  UNSPEC_LASX_XVFRSQRT))]
+   "ISA_HAS_LASX"
+   "xvfrsqrt.\t%u0,%u1"
+   [(set_attr "type" "simd_fdiv")
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index bc156bd36..4aae27a5e 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -500,6 +500,8 @@ AVAIL_ALL (lasx_frecipe, ISA_HAS_LASX && TARGET_FRECIPE)
+ #define CODE_FOR_lsx_vssrlrn_bu_h CODE_FOR_lsx_vssrlrn_u_bu_h
+ #define CODE_FOR_lsx_vssrlrn_hu_w CODE_FOR_lsx_vssrlrn_u_hu_w
+ #define CODE_FOR_lsx_vssrlrn_wu_d CODE_FOR_lsx_vssrlrn_u_wu_d
++#define CODE_FOR_lsx_vfrsqrt_d CODE_FOR_rsqrtv2df2
++#define CODE_FOR_lsx_vfrsqrt_s CODE_FOR_rsqrtv4sf2
+ 
+ /* LoongArch ASX define CODE_FOR_lasx_mxxx */
+ #define CODE_FOR_lasx_xvsadd_b CODE_FOR_ssaddv32qi3
+@@ -776,6 +778,8 @@ AVAIL_ALL (lasx_frecipe, ISA_HAS_LASX && TARGET_FRECIPE)
+ #define CODE_FOR_lasx_xvsat_hu CODE_FOR_lasx_xvsat_u_hu
+ #define CODE_FOR_lasx_xvsat_wu CODE_FOR_lasx_xvsat_u_wu
+ #define CODE_FOR_lasx_xvsat_du CODE_FOR_lasx_xvsat_u_du
++#define CODE_FOR_lasx_xvfrsqrt_d CODE_FOR_rsqrtv4df2
++#define CODE_FOR_lasx_xvfrsqrt_s CODE_FOR_rsqrtv8sf2
+ 
+ static const struct loongarch_builtin_description loongarch_builtins[] = {
+ #define LARCH_MOVFCSR2GR 0
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index ce1c0a8bd..95aa9453b 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -11487,6 +11487,30 @@ loongarch_builtin_support_vector_misalignment (machine_mode mode,
+ 						      is_packed);
+ }
+ 
++static bool
++use_rsqrt_p (void)
++{
++  return (flag_finite_math_only
++	  && !flag_trapping_math
++	  && flag_unsafe_math_optimizations);
++}
++
++/* Implement the TARGET_OPTAB_SUPPORTED_P hook.  */
++
++static bool
++loongarch_optab_supported_p (int op, machine_mode, machine_mode,
++			     optimization_type opt_type)
++{
++  switch (op)
++    {
++    case rsqrt_optab:
++      return opt_type == OPTIMIZE_FOR_SPEED && use_rsqrt_p ();
++
++    default:
++      return true;
++    }
++}
++
+ /* If -fverbose-asm, dump some info for debugging.  */
+ static void
+ loongarch_asm_code_end (void)
+@@ -11625,6 +11649,9 @@ loongarch_asm_code_end (void)
+ #undef TARGET_FUNCTION_ARG_BOUNDARY
+ #define TARGET_FUNCTION_ARG_BOUNDARY loongarch_function_arg_boundary
+ 
++#undef TARGET_OPTAB_SUPPORTED_P
++#define TARGET_OPTAB_SUPPORTED_P loongarch_optab_supported_p
++
+ #undef TARGET_VECTOR_MODE_SUPPORTED_P
+ #define TARGET_VECTOR_MODE_SUPPORTED_P loongarch_vector_mode_supported_p
+ 
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 9080cec1c..4dfe583e2 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -60,6 +60,7 @@
+   UNSPEC_TIE
+ 
+   ;; RSQRT
++  UNSPEC_RSQRT
+   UNSPEC_RSQRTE
+ 
+   ;; RECIP
+@@ -1134,25 +1135,14 @@
+    (set_attr "mode" "")
+    (set_attr "insn_count" "1")])
+ 
+-(define_insn "*rsqrta"
++(define_insn "*rsqrt2"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+-	(div:ANYF (match_operand:ANYF 1 "const_1_operand" "")
+-		  (sqrt:ANYF (match_operand:ANYF 2 "register_operand" "f"))))]
+-  "flag_unsafe_math_optimizations"
+-  "frsqrt.\t%0,%2"
+-  [(set_attr "type" "frsqrt")
+-   (set_attr "mode" "")
+-   (set_attr "insn_count" "1")])
+-
+-(define_insn "*rsqrtb"
+-  [(set (match_operand:ANYF 0 "register_operand" "=f")
+-	(sqrt:ANYF (div:ANYF (match_operand:ANYF 1 "const_1_operand" "")
+-			     (match_operand:ANYF 2 "register_operand" "f"))))]
+-  "flag_unsafe_math_optimizations"
+-  "frsqrt.\t%0,%2"
++    (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")]
++	     UNSPEC_RSQRT))]
++  "TARGET_HARD_FLOAT"
++  "frsqrt.\t%0,%1"
+   [(set_attr "type" "frsqrt")
+-   (set_attr "mode" "")
+-   (set_attr "insn_count" "1")])
++   (set_attr "mode" "")])
+ 
+ ;; Approximate Reciprocal Square Root Instructions.
+ 
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 37bdc6910..cb4a448e7 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -1559,10 +1559,10 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lsx_vfrsqrt_"
++(define_insn "rsqrt2"
+   [(set (match_operand:FLSX 0 "register_operand" "=f")
+-	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFRSQRT))]
++    (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
++		 UNSPEC_LSX_VFRSQRT))]
+   "ISA_HAS_LSX"
+   "vfrsqrt.\t%w0,%w1"
+   [(set_attr "type" "simd_fdiv")
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-rsqrt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-rsqrt.c
+new file mode 100644
+index 000000000..24316944d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-rsqrt.c
+@@ -0,0 +1,26 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlasx -ffast-math" } */
++/* { dg-final { scan-assembler "xvfrsqrt.s" } } */
++/* { dg-final { scan-assembler "xvfrsqrt.d" } } */
++
++extern float sqrtf (float);
++
++float a[8], b[8];
++
++void
++foo1(void)
++{
++  for (int i = 0; i < 8; i++)
++    a[i] = 1 / sqrtf (b[i]);
++}
++
++extern double sqrt (double);
++
++double da[4], db[4];
++
++void
++foo2(void)
++{
++  for (int i = 0; i < 4; i++)
++    da[i] = 1 / sqrt (db[i]);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-rsqrt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-rsqrt.c
+new file mode 100644
+index 000000000..519cc4764
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-rsqrt.c
+@@ -0,0 +1,26 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx -ffast-math" } */
++/* { dg-final { scan-assembler "vfrsqrt.s" } } */
++/* { dg-final { scan-assembler "vfrsqrt.d" } } */
++
++extern float sqrtf (float);
++
++float a[4], b[4];
++
++void
++foo1(void)
++{
++  for (int i = 0; i < 4; i++)
++    a[i] = 1 / sqrtf (b[i]);
++}
++
++extern double sqrt (double);
++
++double da[2], db[2];
++
++void
++foo2(void)
++{
++  for (int i = 0; i < 2; i++)
++    da[i] = 1 / sqrt (db[i]);
++}
+-- 
+2.43.0
+
diff --git a/0061-LoongArch-Redefine-pattern-for-xvfrecip-vfrecip-inst.patch b/0061-LoongArch-Redefine-pattern-for-xvfrecip-vfrecip-inst.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d61606369888d6d0067d6d83e04bfb198abcfa86
--- /dev/null
+++ b/0061-LoongArch-Redefine-pattern-for-xvfrecip-vfrecip-inst.patch
@@ -0,0 +1,135 @@
+From 74924710ee8d662d883bf898d69aef1946d91ea5 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Wed, 6 Dec 2023 15:04:51 +0800
+Subject: [PATCH 061/188] LoongArch: Redefine pattern for xvfrecip/vfrecip
+ instructions.
+
+Redefine pattern for [x]vfrecip instructions use rtx code instead of unspec, and enable
+[x]vfrecip instructions to be generated during auto-vectorization.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md (lasx_xvfrecip_): Renamed to ..
+	(recip3): .. this.
+	* config/loongarch/loongarch-builtins.cc (CODE_FOR_lsx_vfrecip_d): Redefine
+	to new pattern name.
+	(CODE_FOR_lsx_vfrecip_s): Ditto.
+	(CODE_FOR_lasx_xvfrecip_d): Ditto.
+	(CODE_FOR_lasx_xvfrecip_s): Ditto.
+	(loongarch_expand_builtin_direct): For the vector recip instructions, construct a
+	temporary parameter const1_vector.
+	* config/loongarch/lsx.md (lsx_vfrecip_): Renamed to ..
+	(recip3): .. this.
+	* config/loongarch/predicates.md (const_vector_1_operand): New predicate.
+---
+ gcc/config/loongarch/lasx.md               |  8 ++++----
+ gcc/config/loongarch/loongarch-builtins.cc | 20 ++++++++++++++++++++
+ gcc/config/loongarch/lsx.md                |  8 ++++----
+ gcc/config/loongarch/predicates.md         |  4 ++++
+ 4 files changed, 32 insertions(+), 8 deletions(-)
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 3a4a1fe51..ad49a3ffb 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -1626,12 +1626,12 @@
+   [(set_attr "type" "simd_fminmax")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lasx_xvfrecip_"
++(define_insn "recip3"
+   [(set (match_operand:FLASX 0 "register_operand" "=f")
+-	(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
+-		      UNSPEC_LASX_XVFRECIP))]
++       (div:FLASX (match_operand:FLASX 1 "const_vector_1_operand" "")
++		  (match_operand:FLASX 2 "register_operand" "f")))]
+   "ISA_HAS_LASX"
+-  "xvfrecip.\t%u0,%u1"
++  "xvfrecip.\t%u0,%u2"
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index 4aae27a5e..85849ed29 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -502,6 +502,8 @@ AVAIL_ALL (lasx_frecipe, ISA_HAS_LASX && TARGET_FRECIPE)
+ #define CODE_FOR_lsx_vssrlrn_wu_d CODE_FOR_lsx_vssrlrn_u_wu_d
+ #define CODE_FOR_lsx_vfrsqrt_d CODE_FOR_rsqrtv2df2
+ #define CODE_FOR_lsx_vfrsqrt_s CODE_FOR_rsqrtv4sf2
++#define CODE_FOR_lsx_vfrecip_d CODE_FOR_recipv2df3
++#define CODE_FOR_lsx_vfrecip_s CODE_FOR_recipv4sf3
+ 
+ /* LoongArch ASX define CODE_FOR_lasx_mxxx */
+ #define CODE_FOR_lasx_xvsadd_b CODE_FOR_ssaddv32qi3
+@@ -780,6 +782,8 @@ AVAIL_ALL (lasx_frecipe, ISA_HAS_LASX && TARGET_FRECIPE)
+ #define CODE_FOR_lasx_xvsat_du CODE_FOR_lasx_xvsat_u_du
+ #define CODE_FOR_lasx_xvfrsqrt_d CODE_FOR_rsqrtv4df2
+ #define CODE_FOR_lasx_xvfrsqrt_s CODE_FOR_rsqrtv8sf2
++#define CODE_FOR_lasx_xvfrecip_d CODE_FOR_recipv4df3
++#define CODE_FOR_lasx_xvfrecip_s CODE_FOR_recipv8sf3
+ 
+ static const struct loongarch_builtin_description loongarch_builtins[] = {
+ #define LARCH_MOVFCSR2GR 0
+@@ -3019,6 +3023,22 @@ loongarch_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
+   if (has_target_p)
+     create_output_operand (&ops[opno++], target, TYPE_MODE (TREE_TYPE (exp)));
+ 
++  /* For the vector reciprocal instructions, we need to construct a temporary
++     parameter const1_vector.  */
++  switch (icode)
++    {
++    case CODE_FOR_recipv8sf3:
++    case CODE_FOR_recipv4df3:
++    case CODE_FOR_recipv4sf3:
++    case CODE_FOR_recipv2df3:
++      loongarch_prepare_builtin_arg (&ops[2], exp, 0);
++      create_input_operand (&ops[1], CONST1_RTX (ops[0].mode), ops[0].mode);
++      return loongarch_expand_builtin_insn (icode, 3, ops, has_target_p);
++
++    default:
++      break;
++    }
++
+   /* Map the arguments to the other operands.  */
+   gcc_assert (opno + call_expr_nargs (exp)
+ 	      == insn_data[icode].n_generator_args);
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index cb4a448e7..f2774f021 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -1539,12 +1539,12 @@
+   [(set_attr "type" "simd_fminmax")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lsx_vfrecip_"
++(define_insn "recip3"
+   [(set (match_operand:FLSX 0 "register_operand" "=f")
+-	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
+-		     UNSPEC_LSX_VFRECIP))]
++       (div:FLSX (match_operand:FLSX 1 "const_vector_1_operand" "")
++		 (match_operand:FLSX 2 "register_operand" "f")))]
+   "ISA_HAS_LSX"
+-  "vfrecip.\t%w0,%w1"
++  "vfrecip.\t%w0,%w2"
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 30a0dee9f..572550dbc 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -227,6 +227,10 @@
+   (and (match_code "const_int,const_wide_int,const_double,const_vector")
+        (match_test "op == CONST1_RTX (GET_MODE (op))")))
+ 
++(define_predicate "const_vector_1_operand"
++  (and (match_code "const_vector")
++       (match_test "op == CONST1_RTX (GET_MODE (op))")))
++
+ (define_predicate "reg_or_1_operand"
+   (ior (match_operand 0 "const_1_operand")
+        (match_operand 0 "register_operand")))
+-- 
+2.43.0
+
diff --git a/0062-LoongArch-New-options-mrecip-and-mrecip-with-ffast-m.patch b/0062-LoongArch-New-options-mrecip-and-mrecip-with-ffast-m.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ad072c79b23b354feb02944d9ff85f709475f96f
--- /dev/null
+++ b/0062-LoongArch-New-options-mrecip-and-mrecip-with-ffast-m.patch
@@ -0,0 +1,1096 @@
+From faac4efbee23e60691fc086a78284225ecf824a8 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Wed, 6 Dec 2023 15:04:52 +0800
+Subject: [PATCH 062/188] LoongArch: New options -mrecip and -mrecip= with
+ ffast-math.
+
+When both the -mrecip and -mfrecipe options are enabled, use approximate reciprocal
+instructions and approximate reciprocal square root instructions with additional
+Newton-Raphson steps to implement single precision floating-point division, square
+root and reciprocal square root operations, for a better performance.
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/loongarch.opt.in (recip_mask): New variable.
+	(-mrecip, -mrecip): New options.
+	* config/loongarch/lasx.md (div3): New expander.
+	(*div3): Rename.
+	(sqrt2): New expander.
+	(*sqrt2): Rename.
+	(rsqrt2): New expander.
+	* config/loongarch/loongarch-protos.h (loongarch_emit_swrsqrtsf): New prototype.
+	(loongarch_emit_swdivsf): Ditto.
+	* config/loongarch/loongarch.cc (loongarch_option_override_internal): Set
+	recip_mask for -mrecip and -mrecip= options.
+	(loongarch_emit_swrsqrtsf): New function.
+	(loongarch_emit_swdivsf): Ditto.
+	* config/loongarch/loongarch.h (RECIP_MASK_NONE, RECIP_MASK_DIV, RECIP_MASK_SQRT
+	RECIP_MASK_RSQRT, RECIP_MASK_VEC_DIV, RECIP_MASK_VEC_SQRT, RECIP_MASK_VEC_RSQRT
+	RECIP_MASK_ALL): New bitmasks.
+	(TARGET_RECIP_DIV, TARGET_RECIP_SQRT, TARGET_RECIP_RSQRT, TARGET_RECIP_VEC_DIV
+	TARGET_RECIP_VEC_SQRT, TARGET_RECIP_VEC_RSQRT): New tests.
+	* config/loongarch/loongarch.md (sqrt2): New expander.
+	(*sqrt2): Rename.
+	(rsqrt2): New expander.
+	* config/loongarch/loongarch.opt (recip_mask): New variable.
+	(-mrecip, -mrecip): New options.
+	* config/loongarch/lsx.md (div3): New expander.
+	(*div3): Rename.
+	(sqrt2): New expander.
+	(*sqrt2): Rename.
+	(rsqrt2): New expander.
+	* config/loongarch/predicates.md (reg_or_vecotr_1_operand): New predicate.
+	* doc/invoke.texi (LoongArch Options): Document new options.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/divf.c: New test.
+	* gcc.target/loongarch/recip-divf.c: New test.
+	* gcc.target/loongarch/recip-sqrtf.c: New test.
+	* gcc.target/loongarch/sqrtf.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-divf.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-recip-divf.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-recip-sqrtf.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-recip.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-sqrtf.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-divf.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-recip-divf.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-recip-sqrtf.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-recip.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-sqrtf.c: New test.
+---
+ gcc/config/loongarch/genopts/loongarch.opt.in |  11 +
+ gcc/config/loongarch/lasx.md                  |  53 ++++-
+ gcc/config/loongarch/loongarch-protos.h       |   2 +
+ gcc/config/loongarch/loongarch.cc             | 188 ++++++++++++++++++
+ gcc/config/loongarch/loongarch.h              |  18 ++
+ gcc/config/loongarch/loongarch.md             |  49 ++++-
+ gcc/config/loongarch/loongarch.opt            |  11 +
+ gcc/config/loongarch/lsx.md                   |  53 ++++-
+ gcc/config/loongarch/predicates.md            |   4 +
+ gcc/doc/invoke.texi                           |  55 ++++-
+ gcc/testsuite/gcc.target/loongarch/divf.c     |  10 +
+ .../gcc.target/loongarch/recip-divf.c         |   9 +
+ .../gcc.target/loongarch/recip-sqrtf.c        |  23 +++
+ gcc/testsuite/gcc.target/loongarch/sqrtf.c    |  24 +++
+ .../loongarch/vector/lasx/lasx-divf.c         |  13 ++
+ .../loongarch/vector/lasx/lasx-recip-divf.c   |  12 ++
+ .../loongarch/vector/lasx/lasx-recip-sqrtf.c  |  28 +++
+ .../loongarch/vector/lasx/lasx-recip.c        |  24 +++
+ .../loongarch/vector/lasx/lasx-sqrtf.c        |  29 +++
+ .../loongarch/vector/lsx/lsx-divf.c           |  13 ++
+ .../loongarch/vector/lsx/lsx-recip-divf.c     |  12 ++
+ .../loongarch/vector/lsx/lsx-recip-sqrtf.c    |  28 +++
+ .../loongarch/vector/lsx/lsx-recip.c          |  24 +++
+ .../loongarch/vector/lsx/lsx-sqrtf.c          |  29 +++
+ 24 files changed, 711 insertions(+), 11 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/divf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/recip-divf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/recip-sqrtf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/sqrtf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-divf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip-divf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip-sqrtf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-sqrtf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-divf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip-divf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip-sqrtf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-sqrtf.c
+
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index cd5e75e4f..102202b03 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -23,6 +23,9 @@ config/loongarch/loongarch-opts.h
+ HeaderInclude
+ config/loongarch/loongarch-str.h
+ 
++TargetVariable
++unsigned int recip_mask = 0
++
+ ; ISA related options
+ ;; Base ISA
+ Enum
+@@ -194,6 +197,14 @@ mexplicit-relocs
+ Target Var(la_opt_explicit_relocs_backward) Init(M_OPT_UNSET)
+ Use %reloc() assembly operators (for backward compatibility).
+ 
++mrecip
++Target RejectNegative Var(loongarch_recip)
++Generate approximate reciprocal divide and square root for better throughput.
++
++mrecip=
++Target RejectNegative Joined Var(loongarch_recip_name)
++Control generation of reciprocal estimates.
++
+ ; The code model option names for -mcmodel.
+ Enum
+ Name(cmodel) Type(int)
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index ad49a3ffb..eeac8cd98 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -1194,7 +1194,25 @@
+   [(set_attr "type" "simd_fmul")
+    (set_attr "mode" "")])
+ 
+-(define_insn "div3"
++(define_expand "div3"
++  [(set (match_operand:FLASX 0 "register_operand")
++    (div:FLASX (match_operand:FLASX 1 "reg_or_vecotr_1_operand")
++	       (match_operand:FLASX 2 "register_operand")))]
++  "ISA_HAS_LASX"
++{
++  if (mode == V8SFmode
++    && TARGET_RECIP_VEC_DIV
++    && optimize_insn_for_speed_p ()
++    && flag_finite_math_only && !flag_trapping_math
++    && flag_unsafe_math_optimizations)
++  {
++    loongarch_emit_swdivsf (operands[0], operands[1],
++	operands[2], V8SFmode);
++    DONE;
++  }
++})
++
++(define_insn "*div3"
+   [(set (match_operand:FLASX 0 "register_operand" "=f")
+ 	(div:FLASX (match_operand:FLASX 1 "register_operand" "f")
+ 		   (match_operand:FLASX 2 "register_operand" "f")))]
+@@ -1223,7 +1241,23 @@
+   [(set_attr "type" "simd_fmadd")
+    (set_attr "mode" "")])
+ 
+-(define_insn "sqrt2"
++(define_expand "sqrt2"
++  [(set (match_operand:FLASX 0 "register_operand")
++    (sqrt:FLASX (match_operand:FLASX 1 "register_operand")))]
++  "ISA_HAS_LASX"
++{
++  if (mode == V8SFmode
++      && TARGET_RECIP_VEC_SQRT
++      && flag_unsafe_math_optimizations
++      && optimize_insn_for_speed_p ()
++      && flag_finite_math_only && !flag_trapping_math)
++    {
++      loongarch_emit_swrsqrtsf (operands[0], operands[1], V8SFmode, 0);
++      DONE;
++    }
++})
++
++(define_insn "*sqrt2"
+   [(set (match_operand:FLASX 0 "register_operand" "=f")
+ 	(sqrt:FLASX (match_operand:FLASX 1 "register_operand" "f")))]
+   "ISA_HAS_LASX"
+@@ -1646,7 +1680,20 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
+-(define_insn "rsqrt2"
++(define_expand "rsqrt2"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++    (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
++	     UNSPEC_LASX_XVFRSQRT))]
++  "ISA_HAS_LASX"
++ {
++   if (mode == V8SFmode && TARGET_RECIP_VEC_RSQRT)
++     {
++       loongarch_emit_swrsqrtsf (operands[0], operands[1], V8SFmode, 1);
++       DONE;
++     }
++})
++
++(define_insn "*rsqrt2"
+   [(set (match_operand:FLASX 0 "register_operand" "=f")
+     (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
+ 		  UNSPEC_LASX_XVFRSQRT))]
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 51d38177b..117669e9f 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -220,5 +220,7 @@ extern rtx loongarch_gen_const_int_vector_shuffle (machine_mode, int);
+ extern tree loongarch_build_builtin_va_list (void);
+ 
+ extern rtx loongarch_build_signbit_mask (machine_mode, bool, bool);
++extern void loongarch_emit_swrsqrtsf (rtx, rtx, machine_mode, bool);
++extern void loongarch_emit_swdivsf (rtx, rtx, rtx, machine_mode);
+ extern bool loongarch_explicit_relocs_p (enum loongarch_symbol_type);
+ #endif /* ! GCC_LOONGARCH_PROTOS_H */
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 95aa9453b..18326ce47 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -7548,6 +7548,71 @@ loongarch_option_override_internal (struct gcc_options *opts,
+ 
+   /* Function to allocate machine-dependent function status.  */
+   init_machine_status = &loongarch_init_machine_status;
++
++  /* -mrecip options.  */
++  static struct
++    {
++      const char *string;	    /* option name.  */
++      unsigned int mask;	    /* mask bits to set.  */
++    }
++  const recip_options[] = {
++	{ "all",       RECIP_MASK_ALL },
++	{ "none",      RECIP_MASK_NONE },
++	{ "div",       RECIP_MASK_DIV },
++	{ "sqrt",      RECIP_MASK_SQRT },
++	{ "rsqrt",     RECIP_MASK_RSQRT },
++	{ "vec-div",   RECIP_MASK_VEC_DIV },
++	{ "vec-sqrt",  RECIP_MASK_VEC_SQRT },
++	{ "vec-rsqrt", RECIP_MASK_VEC_RSQRT },
++  };
++
++  if (loongarch_recip_name)
++    {
++      char *p = ASTRDUP (loongarch_recip_name);
++      char *q;
++      unsigned int mask, i;
++      bool invert;
++
++      while ((q = strtok (p, ",")) != NULL)
++	{
++	  p = NULL;
++	  if (*q == '!')
++	    {
++	      invert = true;
++	      q++;
++	    }
++	  else
++	    invert = false;
++
++	  if (!strcmp (q, "default"))
++	    mask = RECIP_MASK_ALL;
++	  else
++	    {
++	      for (i = 0; i < ARRAY_SIZE (recip_options); i++)
++		if (!strcmp (q, recip_options[i].string))
++		  {
++		    mask = recip_options[i].mask;
++		    break;
++		  }
++
++	      if (i == ARRAY_SIZE (recip_options))
++		{
++		  error ("unknown option for %<-mrecip=%s%>", q);
++		  invert = false;
++		  mask = RECIP_MASK_NONE;
++		}
++	    }
++
++	  if (invert)
++	    recip_mask &= ~mask;
++	  else
++	    recip_mask |= mask;
++	}
++    }
++  if (loongarch_recip)
++    recip_mask |= RECIP_MASK_ALL;
++  if (!TARGET_FRECIPE)
++    recip_mask = RECIP_MASK_NONE;
+ }
+ 
+ 
+@@ -11470,6 +11535,126 @@ loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert)
+   return force_reg (vec_mode, v);
+ }
+ 
++/* Use rsqrte instruction and Newton-Rhapson to compute the approximation of
++   a single precision floating point [reciprocal] square root.  */
++
++void loongarch_emit_swrsqrtsf (rtx res, rtx a, machine_mode mode, bool recip)
++{
++  rtx x0, e0, e1, e2, mhalf, monehalf;
++  REAL_VALUE_TYPE r;
++  int unspec;
++
++  x0 = gen_reg_rtx (mode);
++  e0 = gen_reg_rtx (mode);
++  e1 = gen_reg_rtx (mode);
++  e2 = gen_reg_rtx (mode);
++
++  real_arithmetic (&r, ABS_EXPR, &dconsthalf, NULL);
++  mhalf = const_double_from_real_value (r, SFmode);
++
++  real_arithmetic (&r, PLUS_EXPR, &dconsthalf, &dconst1);
++  monehalf = const_double_from_real_value (r, SFmode);
++  unspec = UNSPEC_RSQRTE;
++
++  if (VECTOR_MODE_P (mode))
++    {
++      mhalf = loongarch_build_const_vector (mode, true, mhalf);
++      monehalf = loongarch_build_const_vector (mode, true, monehalf);
++      unspec = GET_MODE_SIZE (mode) == 32 ? UNSPEC_LASX_XVFRSQRTE
++					  : UNSPEC_LSX_VFRSQRTE;
++    }
++
++  /* rsqrt(a) =  rsqrte(a) * (1.5 - 0.5 * a * rsqrte(a) * rsqrte(a))
++     sqrt(a)  =  a * rsqrte(a) * (1.5 - 0.5 * a * rsqrte(a) * rsqrte(a))  */
++
++  a = force_reg (mode, a);
++
++  /* x0 = rsqrt(a) estimate.  */
++  emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, a),
++					      unspec)));
++
++  /* If (a == 0.0) Filter out infinity to prevent NaN for sqrt(0.0).  */
++  if (!recip)
++    {
++      rtx zero = force_reg (mode, CONST0_RTX (mode));
++
++      if (VECTOR_MODE_P (mode))
++	{
++	  machine_mode imode = related_int_vector_mode (mode).require ();
++	  rtx mask = gen_reg_rtx (imode);
++	  emit_insn (gen_rtx_SET (mask, gen_rtx_NE (imode, a, zero)));
++	  emit_insn (gen_rtx_SET (x0, gen_rtx_AND (mode, x0,
++						   gen_lowpart (mode, mask))));
++	}
++      else
++	{
++	  rtx target = emit_conditional_move (x0, { GT, a, zero, mode },
++					      x0, zero, mode, 0);
++	  if (target != x0)
++	    emit_move_insn (x0, target);
++	}
++    }
++
++  /* e0 = x0 * a  */
++  emit_insn (gen_rtx_SET (e0, gen_rtx_MULT (mode, x0, a)));
++  /* e1 = e0 * x0  */
++  emit_insn (gen_rtx_SET (e1, gen_rtx_MULT (mode, e0, x0)));
++
++  /* e2 = 1.5 - e1 * 0.5  */
++  mhalf = force_reg (mode, mhalf);
++  monehalf = force_reg (mode, monehalf);
++  emit_insn (gen_rtx_SET (e2, gen_rtx_FMA (mode,
++					   gen_rtx_NEG (mode, e1),
++							mhalf, monehalf)));
++
++  if (recip)
++    /* res = e2 * x0  */
++    emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, x0, e2)));
++  else
++    /* res = e2 * e0  */
++    emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, e2, e0)));
++}
++
++/* Use recipe instruction and Newton-Rhapson to compute the approximation of
++   a single precision floating point divide.  */
++
++void loongarch_emit_swdivsf (rtx res, rtx a, rtx b, machine_mode mode)
++{
++  rtx x0, e0, mtwo;
++  REAL_VALUE_TYPE r;
++  x0 = gen_reg_rtx (mode);
++  e0 = gen_reg_rtx (mode);
++  int unspec = UNSPEC_RECIPE;
++
++  real_arithmetic (&r, ABS_EXPR, &dconst2, NULL);
++  mtwo = const_double_from_real_value (r, SFmode);
++
++  if (VECTOR_MODE_P (mode))
++    {
++      mtwo = loongarch_build_const_vector (mode, true, mtwo);
++      unspec = GET_MODE_SIZE (mode) == 32 ? UNSPEC_LASX_XVFRECIPE
++					  : UNSPEC_LSX_VFRECIPE;
++    }
++
++  mtwo = force_reg (mode, mtwo);
++
++  /* a / b = a * recipe(b) * (2.0 - b * recipe(b))  */
++
++  /* x0 = 1./b estimate.  */
++  emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
++					      unspec)));
++  /* 2.0 - b * x0  */
++  emit_insn (gen_rtx_SET (e0, gen_rtx_FMA (mode,
++					   gen_rtx_NEG (mode, b), x0, mtwo)));
++
++  /* x0 = a * x0  */
++  if (a != CONST1_RTX (mode))
++    emit_insn (gen_rtx_SET (x0, gen_rtx_MULT (mode, a, x0)));
++
++  /* res = e0 * x0  */
++  emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, e0, x0)));
++}
++
+ static bool
+ loongarch_builtin_support_vector_misalignment (machine_mode mode,
+ 					       const_tree type,
+@@ -11665,6 +11850,9 @@ loongarch_asm_code_end (void)
+ #define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_MODES \
+   loongarch_autovectorize_vector_modes
+ 
++#undef TARGET_OPTAB_SUPPORTED_P
++#define TARGET_OPTAB_SUPPORTED_P loongarch_optab_supported_p
++
+ #undef TARGET_INIT_BUILTINS
+ #define TARGET_INIT_BUILTINS loongarch_init_builtins
+ #undef TARGET_BUILTIN_DECL
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 8b28be0e4..fbc0f53e4 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -702,6 +702,24 @@ enum reg_class
+    && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT		\
+        || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT))
+ 
++#define RECIP_MASK_NONE         0x00
++#define RECIP_MASK_DIV          0x01
++#define RECIP_MASK_SQRT         0x02
++#define RECIP_MASK_RSQRT        0x04
++#define RECIP_MASK_VEC_DIV      0x08
++#define RECIP_MASK_VEC_SQRT     0x10
++#define RECIP_MASK_VEC_RSQRT    0x20
++#define RECIP_MASK_ALL (RECIP_MASK_DIV | RECIP_MASK_SQRT \
++			| RECIP_MASK_RSQRT | RECIP_MASK_VEC_SQRT \
++			| RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_RSQRT)
++
++#define TARGET_RECIP_DIV        ((recip_mask & RECIP_MASK_DIV) != 0 || TARGET_uARCH_LA664)
++#define TARGET_RECIP_SQRT       ((recip_mask & RECIP_MASK_SQRT) != 0 || TARGET_uARCH_LA664)
++#define TARGET_RECIP_RSQRT      ((recip_mask & RECIP_MASK_RSQRT) != 0 || TARGET_uARCH_LA664)
++#define TARGET_RECIP_VEC_DIV    ((recip_mask & RECIP_MASK_VEC_DIV) != 0 || TARGET_uARCH_LA664)
++#define TARGET_RECIP_VEC_SQRT   ((recip_mask & RECIP_MASK_VEC_SQRT) != 0 || TARGET_uARCH_LA664)
++#define TARGET_RECIP_VEC_RSQRT  ((recip_mask & RECIP_MASK_VEC_RSQRT) != 0 || TARGET_uARCH_LA664)
++
+ /* 1 if N is a possible register number for function argument passing.
+    We have no FP argument registers when soft-float.  */
+ 
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 4dfe583e2..c6edd1dda 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -893,9 +893,21 @@
+ ;; Float division and modulus.
+ (define_expand "div3"
+   [(set (match_operand:ANYF 0 "register_operand")
+-	(div:ANYF (match_operand:ANYF 1 "reg_or_1_operand")
+-		  (match_operand:ANYF 2 "register_operand")))]
+-  "")
++    (div:ANYF (match_operand:ANYF 1 "reg_or_1_operand")
++	      (match_operand:ANYF 2 "register_operand")))]
++  ""
++{
++  if (mode == SFmode
++    && TARGET_RECIP_DIV
++    && optimize_insn_for_speed_p ()
++    && flag_finite_math_only && !flag_trapping_math
++    && flag_unsafe_math_optimizations)
++  {
++    loongarch_emit_swdivsf (operands[0], operands[1],
++	operands[2], SFmode);
++    DONE;
++  }
++})
+ 
+ (define_insn "*div3"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+@@ -1126,7 +1138,23 @@
+ ;;
+ ;;  ....................
+ 
+-(define_insn "sqrt2"
++(define_expand "sqrt2"
++  [(set (match_operand:ANYF 0 "register_operand")
++    (sqrt:ANYF (match_operand:ANYF 1 "register_operand")))]
++  ""
++ {
++  if (mode == SFmode
++      && TARGET_RECIP_SQRT
++      && flag_unsafe_math_optimizations
++      && !optimize_insn_for_size_p ()
++      && flag_finite_math_only && !flag_trapping_math)
++    {
++      loongarch_emit_swrsqrtsf (operands[0], operands[1], SFmode, 0);
++      DONE;
++    }
++ })
++
++(define_insn "*sqrt2"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+ 	(sqrt:ANYF (match_operand:ANYF 1 "register_operand" "f")))]
+   ""
+@@ -1135,6 +1163,19 @@
+    (set_attr "mode" "")
+    (set_attr "insn_count" "1")])
+ 
++(define_expand "rsqrt2"
++  [(set (match_operand:ANYF 0 "register_operand")
++    (unspec:ANYF [(match_operand:ANYF 1 "register_operand")]
++	   UNSPEC_RSQRT))]
++  "TARGET_HARD_FLOAT"
++{
++   if (mode == SFmode && TARGET_RECIP_RSQRT)
++     {
++       loongarch_emit_swrsqrtsf (operands[0], operands[1], SFmode, 1);
++       DONE;
++     }
++})
++
+ (define_insn "*rsqrt2"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+     (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")]
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index e7bc8bed4..56f6a9564 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -31,6 +31,9 @@ config/loongarch/loongarch-opts.h
+ HeaderInclude
+ config/loongarch/loongarch-str.h
+ 
++TargetVariable
++unsigned int recip_mask = 0
++
+ ; ISA related options
+ ;; Base ISA
+ Enum
+@@ -202,6 +205,14 @@ mexplicit-relocs
+ Target Var(la_opt_explicit_relocs_backward) Init(M_OPT_UNSET)
+ Use %reloc() assembly operators (for backward compatibility).
+ 
++mrecip
++Target RejectNegative Var(loongarch_recip)
++Generate approximate reciprocal divide and square root for better throughput.
++
++mrecip=
++Target RejectNegative Joined Var(loongarch_recip_name)
++Control generation of reciprocal estimates.
++
+ ; The code model option names for -mcmodel.
+ Enum
+ Name(cmodel) Type(int)
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index f2774f021..dbdb42301 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -1083,7 +1083,25 @@
+   [(set_attr "type" "simd_fmul")
+    (set_attr "mode" "")])
+ 
+-(define_insn "div3"
++(define_expand "div3"
++  [(set (match_operand:FLSX 0 "register_operand")
++    (div:FLSX (match_operand:FLSX 1 "reg_or_vecotr_1_operand")
++	      (match_operand:FLSX 2 "register_operand")))]
++  "ISA_HAS_LSX"
++{
++  if (mode == V4SFmode
++    && TARGET_RECIP_VEC_DIV
++    && optimize_insn_for_speed_p ()
++    && flag_finite_math_only && !flag_trapping_math
++    && flag_unsafe_math_optimizations)
++  {
++    loongarch_emit_swdivsf (operands[0], operands[1],
++	operands[2], V4SFmode);
++    DONE;
++  }
++})
++
++(define_insn "*div3"
+   [(set (match_operand:FLSX 0 "register_operand" "=f")
+ 	(div:FLSX (match_operand:FLSX 1 "register_operand" "f")
+ 		  (match_operand:FLSX 2 "register_operand" "f")))]
+@@ -1112,7 +1130,23 @@
+   [(set_attr "type" "simd_fmadd")
+    (set_attr "mode" "")])
+ 
+-(define_insn "sqrt2"
++(define_expand "sqrt2"
++  [(set (match_operand:FLSX 0 "register_operand")
++    (sqrt:FLSX (match_operand:FLSX 1 "register_operand")))]
++  "ISA_HAS_LSX"
++{
++  if (mode == V4SFmode
++      && TARGET_RECIP_VEC_SQRT
++      && flag_unsafe_math_optimizations
++      && optimize_insn_for_speed_p ()
++      && flag_finite_math_only && !flag_trapping_math)
++    {
++      loongarch_emit_swrsqrtsf (operands[0], operands[1], V4SFmode, 0);
++      DONE;
++    }
++})
++
++(define_insn "*sqrt2"
+   [(set (match_operand:FLSX 0 "register_operand" "=f")
+ 	(sqrt:FLSX (match_operand:FLSX 1 "register_operand" "f")))]
+   "ISA_HAS_LSX"
+@@ -1559,7 +1593,20 @@
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+ 
+-(define_insn "rsqrt2"
++(define_expand "rsqrt2"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++    (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
++	     UNSPEC_LSX_VFRSQRT))]
++ "ISA_HAS_LSX"
++{
++ if (mode == V4SFmode && TARGET_RECIP_VEC_RSQRT)
++   {
++     loongarch_emit_swrsqrtsf (operands[0], operands[1], V4SFmode, 1);
++     DONE;
++   }
++})
++
++(define_insn "*rsqrt2"
+   [(set (match_operand:FLSX 0 "register_operand" "=f")
+     (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
+ 		 UNSPEC_LSX_VFRSQRT))]
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 572550dbc..88e54c915 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -235,6 +235,10 @@
+   (ior (match_operand 0 "const_1_operand")
+        (match_operand 0 "register_operand")))
+ 
++(define_predicate "reg_or_vecotr_1_operand"
++  (ior (match_operand 0 "const_vector_1_operand")
++       (match_operand 0 "register_operand")))
++
+ ;; These are used in vec_merge, hence accept bitmask as const_int.
+ (define_predicate "const_exp_2_operand"
+   (and (match_code "const_int")
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 168f3d0db..76a8f20d1 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -1008,7 +1008,8 @@ Objective-C and Objective-C++ Dialects}.
+ -mmax-inline-memcpy-size=@var{n} @gol
+ -mexplicit-relocs -mno-explicit-relocs @gol
+ -mdirect-extern-access -mno-direct-extern-access @gol
+--mcmodel=@var{code-model}}
++-mcmodel=@var{code-model} -mrelax -mpass-mrelax-to-as} @gol
++-mrecip  -mrecip=@var{opt}
+ 
+ @emph{M32R/D Options}
+ @gccoptlist{-m32r2  -m32rx  -m32r @gol
+@@ -24633,6 +24634,58 @@ kernels, executables linked with @option{-static} or @option{-static-pie}.
+ @option{-mdirect-extern-access} is not compatible with @option{-fPIC} or
+ @option{-fpic}.
+ 
++@opindex mrecip
++@item -mrecip
++This option enables use of the reciprocal estimate and reciprocal square
++root estimate instructions with additional Newton-Raphson steps to increase
++precision instead of doing a divide or square root and divide for
++floating-point arguments.
++These instructions are generated only when @option{-funsafe-math-optimizations}
++is enabled together with @option{-ffinite-math-only} and
++@option{-fno-trapping-math}.
++This option is off by default. Before you can use this option, you must sure the
++target CPU supports frecipe and frsqrte instructions.
++Note that while the throughput of the sequence is higher than the throughput of
++the non-reciprocal instruction, the precision of the sequence can be decreased
++by up to 2 ulp (i.e. the inverse of 1.0 equals 0.99999994).
++
++@opindex mrecip=opt
++@item -mrecip=@var{opt}
++This option controls which reciprocal estimate instructions
++may be used.  @var{opt} is a comma-separated list of options, which may
++be preceded by a @samp{!} to invert the option:
++@table @samp
++@item all
++Enable all estimate instructions.
++
++@item default
++Enable the default instructions, equivalent to @option{-mrecip}.
++
++@item none
++Disable all estimate instructions, equivalent to @option{-mno-recip}.
++
++@item div
++Enable the approximation for scalar division.
++
++@item vec-div
++Enable the approximation for vectorized division.
++
++@item sqrt
++Enable the approximation for scalar square root.
++
++@item vec-sqrt
++Enable the approximation for vectorized square root.
++
++@item rsqrt
++Enable the approximation for scalar reciprocal square root.
++
++@item vec-rsqrt
++Enable the approximation for vectorized reciprocal square root.
++@end table
++
++So, for example, @option{-mrecip=all,!sqrt} enables
++all of the reciprocal approximations, except for scalar square root.
++
+ @item loongarch-vect-unroll-limit
+ The vectorizer will use available tuning information to determine whether it
+ would be beneficial to unroll the main vectorized loop and by how much.  This
+diff --git a/gcc/testsuite/gcc.target/loongarch/divf.c b/gcc/testsuite/gcc.target/loongarch/divf.c
+new file mode 100644
+index 000000000..6c831817c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/divf.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -mrecip -mfrecipe -fno-unsafe-math-optimizations" } */
++/* { dg-final { scan-assembler "fdiv.s" } } */
++/* { dg-final { scan-assembler-not "frecipe.s" } } */
++
++float
++foo(float a, float b)
++{
++  return a / b;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/recip-divf.c b/gcc/testsuite/gcc.target/loongarch/recip-divf.c
+new file mode 100644
+index 000000000..db5e3e488
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/recip-divf.c
+@@ -0,0 +1,9 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -mrecip -mfrecipe" } */
++/* { dg-final { scan-assembler "frecipe.s" } } */
++
++float
++foo(float a, float b)
++{
++  return a / b;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/recip-sqrtf.c b/gcc/testsuite/gcc.target/loongarch/recip-sqrtf.c
+new file mode 100644
+index 000000000..7f45db6cd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/recip-sqrtf.c
+@@ -0,0 +1,23 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -mrecip -mfrecipe" } */
++/* { dg-final { scan-assembler-times "frsqrte.s" 3 } } */
++
++extern float sqrtf (float);
++
++float
++foo1 (float a, float b)
++{
++  return a/sqrtf(b);
++}
++
++float
++foo2 (float a, float b)
++{
++  return sqrtf(a/b);
++}
++
++float
++foo3 (float a)
++{
++  return sqrtf(a);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/sqrtf.c b/gcc/testsuite/gcc.target/loongarch/sqrtf.c
+new file mode 100644
+index 000000000..c2720faac
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/sqrtf.c
+@@ -0,0 +1,24 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -mrecip -mfrecipe -fno-unsafe-math-optimizations" } */
++/* { dg-final { scan-assembler-times "fsqrt.s" 3 } } */
++/* { dg-final { scan-assembler-not "frsqrte.s" } } */
++
++extern float sqrtf (float);
++
++float
++foo1 (float a, float b)
++{
++  return a/sqrtf(b);
++}
++
++float
++foo2 (float a, float b)
++{
++  return sqrtf(a/b);
++}
++
++float
++foo3 (float a)
++{
++  return sqrtf(a);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-divf.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-divf.c
+new file mode 100644
+index 000000000..748a82200
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-divf.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mrecip -mlasx -mfrecipe -fno-unsafe-math-optimizations" } */
++/* { dg-final { scan-assembler "xvfdiv.s" } } */
++/* { dg-final { scan-assembler-not "xvfrecipe.s" } } */
++
++float a[8],b[8],c[8];
++
++void 
++foo ()
++{
++  for (int i = 0; i < 8; i++)
++    c[i] = a[i] / b[i];
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip-divf.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip-divf.c
+new file mode 100644
+index 000000000..6532756f0
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip-divf.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -mrecip -mlasx -mfrecipe" } */
++/* { dg-final { scan-assembler "xvfrecipe.s" } } */
++
++float a[8],b[8],c[8];
++
++void
++foo ()
++{
++  for (int i = 0; i < 8; i++)
++    c[i] = a[i] / b[i];
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip-sqrtf.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip-sqrtf.c
+new file mode 100644
+index 000000000..a623dff8f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip-sqrtf.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -mrecip -mlasx -mfrecipe" } */
++/* { dg-final { scan-assembler-times "xvfrsqrte.s" 3 } } */
++
++float a[8], b[8], c[8];
++
++extern float sqrtf (float);
++
++void
++foo1 (void)
++{
++  for (int i = 0; i < 8; i++)
++    c[i] = a[i] / sqrtf (b[i]);
++}
++
++void
++foo2 (void)
++{
++  for (int i = 0; i < 8; i++)
++    c[i] = sqrtf (a[i] / b[i]);
++}
++
++void
++foo3 (void)
++{
++  for (int i = 0; i < 8; i++)
++    c[i] = sqrtf (a[i]);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip.c
+new file mode 100644
+index 000000000..083c86840
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-recip.c
+@@ -0,0 +1,24 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlasx -fno-vect-cost-model" } */
++/* { dg-final { scan-assembler "xvfrecip.s" } } */
++/* { dg-final { scan-assembler "xvfrecip.d" } } */
++/* { dg-final { scan-assembler-not "xvfdiv.s" } } */
++/* { dg-final { scan-assembler-not "xvfdiv.d" } } */
++
++float a[8], b[8];
++
++void 
++foo1(void)
++{
++  for (int i = 0; i < 8; i++)
++    a[i] = 1 / (b[i]);
++}
++
++double da[4], db[4];
++
++void
++foo2(void)
++{
++  for (int i = 0; i < 4; i++)
++    da[i] = 1 / (db[i]);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-sqrtf.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-sqrtf.c
+new file mode 100644
+index 000000000..a005a3886
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-sqrtf.c
+@@ -0,0 +1,29 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -fno-unsafe-math-optimizations  -mrecip -mlasx -mfrecipe" } */
++/* { dg-final { scan-assembler-times "xvfsqrt.s" 3 } } */
++/* { dg-final { scan-assembler-not "xvfrsqrte.s" } } */
++
++float a[8], b[8], c[8];
++
++extern float sqrtf (float);
++
++void
++foo1 (void)
++{
++  for (int i = 0; i < 8; i++)
++    c[i] = a[i] / sqrtf (b[i]);
++}
++
++void
++foo2 (void)
++{
++  for (int i = 0; i < 8; i++)
++    c[i] = sqrtf (a[i] / b[i]);
++}
++
++void
++foo3 (void)
++{
++  for (int i = 0; i < 8; i++)
++    c[i] = sqrtf (a[i]);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-divf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-divf.c
+new file mode 100644
+index 000000000..1219b1ef8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-divf.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -mrecip -mlsx -mfrecipe -fno-unsafe-math-optimizations" } */
++/* { dg-final { scan-assembler "vfdiv.s" } } */
++/* { dg-final { scan-assembler-not "vfrecipe.s" } } */
++
++float a[4],b[4],c[4];
++
++void
++foo ()
++{
++  for (int i = 0; i < 4; i++)
++    c[i] = a[i] / b[i];
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip-divf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip-divf.c
+new file mode 100644
+index 000000000..edbe8d909
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip-divf.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -mrecip -mlsx -mfrecipe" } */
++/* { dg-final { scan-assembler "vfrecipe.s" } } */
++
++float a[4],b[4],c[4];
++
++void
++foo ()
++{
++  for (int i = 0; i < 4; i++)
++    c[i] = a[i] / b[i];
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip-sqrtf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip-sqrtf.c
+new file mode 100644
+index 000000000..d356f915e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip-sqrtf.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -mrecip -mlsx -mfrecipe" } */
++/* { dg-final { scan-assembler-times "vfrsqrte.s" 3 } } */
++
++float a[4], b[4], c[4];
++
++extern float sqrtf (float);
++
++void
++foo1 (void)
++{
++  for (int i = 0; i < 4; i++)
++    c[i] = a[i] / sqrtf (b[i]);
++}
++
++void
++foo2 (void)
++{
++  for (int i = 0; i < 4; i++)
++    c[i] = sqrtf (a[i] / b[i]);
++}
++
++void
++foo3 (void)
++{
++  for (int i = 0; i < 4; i++)
++    c[i] = sqrtf (a[i]);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip.c
+new file mode 100644
+index 000000000..c4d6af4db
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-recip.c
+@@ -0,0 +1,24 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx -fno-vect-cost-model" } */
++/* { dg-final { scan-assembler "vfrecip.s" } } */
++/* { dg-final { scan-assembler "vfrecip.d" } } */
++/* { dg-final { scan-assembler-not "vfdiv.s" } } */
++/* { dg-final { scan-assembler-not "vfdiv.d" } } */
++
++float a[4], b[4];
++
++void
++foo1(void)
++{
++  for (int i = 0; i < 4; i++)
++    a[i] = 1 / (b[i]);
++}
++
++double da[2], db[2];
++
++void
++foo2(void)
++{
++  for (int i = 0; i < 2; i++)
++    da[i] = 1 / (db[i]);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-sqrtf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-sqrtf.c
+new file mode 100644
+index 000000000..3ff6570a6
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-sqrtf.c
+@@ -0,0 +1,29 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -mrecip -mlsx -mfrecipe -fno-unsafe-math-optimizations" } */
++/* { dg-final { scan-assembler-times "vfsqrt.s" 3 } } */
++/* { dg-final { scan-assembler-not "vfrsqrte.s" } } */
++
++float a[4], b[4], c[4];
++
++extern float sqrtf (float);
++
++void
++foo1 (void)
++{
++  for (int i = 0; i < 4; i++)
++    c[i] = a[i] / sqrtf (b[i]);
++}
++
++void
++foo2 (void)
++{
++  for (int i = 0; i < 4; i++)
++    c[i] = sqrtf (a[i] / b[i]);
++}
++
++void
++foo3 (void)
++{
++  for (int i = 0; i < 4; i++)
++    c[i] = sqrtf (a[i]);
++}
+-- 
+2.43.0
+
diff --git a/0063-LoongArch-Vectorized-loop-unrolling-is-disable-for-d.patch b/0063-LoongArch-Vectorized-loop-unrolling-is-disable-for-d.patch
new file mode 100644
index 0000000000000000000000000000000000000000..752d5a44559f6aefee280bc2393268078ff45477
--- /dev/null
+++ b/0063-LoongArch-Vectorized-loop-unrolling-is-disable-for-d.patch
@@ -0,0 +1,83 @@
+From bb211ae35474a9fa1a8189f0a4c525ce3d8c280e Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Wed, 6 Dec 2023 15:04:53 +0800
+Subject: [PATCH 063/188] LoongArch: Vectorized loop unrolling is disable for
+ divf/sqrtf/rsqrtf when -mrecip is enabled.
+
+Using -mrecip generates a sequence of instructions to replace divf, sqrtf and rsqrtf. The number
+of generated instructions is close to or exceeds the maximum issue instructions per cycle of the
+LoongArch, so vectorized loop unrolling is not performed on them.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_vector_costs::determine_suggested_unroll_factor):
+	If m_has_recip is true, uf return 1.
+	(loongarch_vector_costs::add_stmt_cost): Detect the use of approximate instruction sequence.
+---
+ gcc/config/loongarch/loongarch.cc | 36 +++++++++++++++++++++++++++++--
+ 1 file changed, 34 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 18326ce47..d64777179 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -3970,7 +3970,9 @@ protected:
+   /* Reduction factor for suggesting unroll factor.  */
+   unsigned m_reduc_factor = 0;
+   /* True if the loop contains an average operation. */
+-  bool m_has_avg =false;
++  bool m_has_avg = false;
++  /* True if the loop uses approximation instruction sequence.  */
++  bool m_has_recip = false;
+ };
+ 
+ /* Implement TARGET_VECTORIZE_CREATE_COSTS.  */
+@@ -4017,7 +4019,7 @@ loongarch_vector_costs::determine_suggested_unroll_factor (loop_vec_info loop_vi
+ {
+   class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
+ 
+-  if (m_has_avg)
++  if (m_has_avg || m_has_recip)
+     return 1;
+ 
+   /* Don't unroll if it's specified explicitly not to be unrolled.  */
+@@ -4077,6 +4079,36 @@ loongarch_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
+ 	}
+     }
+ 
++  combined_fn cfn;
++  if (kind == vector_stmt
++      && stmt_info
++      && stmt_info->stmt)
++    {
++      /* Detect the use of approximate instruction sequence.  */
++      if ((TARGET_RECIP_VEC_SQRT || TARGET_RECIP_VEC_RSQRT)
++	  && (cfn = gimple_call_combined_fn (stmt_info->stmt)) != CFN_LAST)
++	switch (cfn)
++	  {
++	  case CFN_BUILT_IN_SQRTF:
++	    m_has_recip = true;
++	  default:
++	    break;
++	  }
++      else if (TARGET_RECIP_VEC_DIV
++	       && gimple_code (stmt_info->stmt) == GIMPLE_ASSIGN)
++	{
++	  machine_mode mode = TYPE_MODE (vectype);
++	  switch (gimple_assign_rhs_code (stmt_info->stmt))
++	    {
++	    case RDIV_EXPR:
++	      if (GET_MODE_INNER (mode) == SFmode)
++		m_has_recip = true;
++	    default:
++	      break;
++	    }
++	}
++    }
++
+   return retval;
+ }
+ 
+-- 
+2.43.0
+
diff --git a/0064-LoongArch-Fix-lsx-vshuf.c-and-lasx-xvshuf_b.c-tests-.patch b/0064-LoongArch-Fix-lsx-vshuf.c-and-lasx-xvshuf_b.c-tests-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..cc4b80adac07a813af6d1eaad8e158cc021038ed
--- /dev/null
+++ b/0064-LoongArch-Fix-lsx-vshuf.c-and-lasx-xvshuf_b.c-tests-.patch
@@ -0,0 +1,130 @@
+From 6ca9670e02a7d3f939b1a75f7b5a9094cd1db909 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Fri, 25 Oct 2024 02:45:35 +0000
+Subject: [PATCH 064/188] LoongArch: Fix lsx-vshuf.c and lasx-xvshuf_b.c tests
+ fail on  LA664 [PR112611]
+
+For [x]vshuf instructions, if the index value in the selector exceeds 63, it triggers
+undefined behavior on LA464, but not on LA664. To ensure compatibility of these two
+tests on both LA464 and LA664, we have modified both tests to ensure that the index
+value in the selector does not exceed 63.
+
+gcc/testsuite/ChangeLog:
+
+        PR target/112611
+        * gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c: Sure index less than 64.
+        * gcc.target/loongarch/vector/lsx/lsx-vshuf.c: Ditto.
+---
+ .../loongarch/vector/lasx/lasx-xvshuf_b.c          | 14 +++++++-------
+ .../gcc.target/loongarch/vector/lsx/lsx-vshuf.c    | 12 ++++++------
+ 2 files changed, 13 insertions(+), 13 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
+index b8ab38711..910d29339 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
+@@ -99,9 +99,9 @@ main ()
+   *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
+   *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000;
+   *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
+-  *((unsigned long *)&__m256i_op2[3]) = 0x3ff0010000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x3f11010000000000;
+   *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[1]) = 0x3ff0010000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x3f11010000000000;
+   *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+@@ -200,7 +200,7 @@ main ()
+   *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
+   *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
+   __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+@@ -351,7 +351,7 @@ main ()
+   *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001;
+   *((unsigned long *)&__m256i_op2[0]) = 0x00000000012e2110;
+   *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_result[1]) = 0x00000000012e2110;
+   *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
+   __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
+@@ -426,10 +426,10 @@ main ()
+   *((unsigned long *)&__m256i_op2[2]) = 0x8000000080000000;
+   *((unsigned long *)&__m256i_op2[1]) = 0xdfffffffdfffffff;
+   *((unsigned long *)&__m256i_op2[0]) = 0x8000000080000000;
+-  *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xdfffffff80000000;
+   *((unsigned long *)&__m256i_result[2]) = 0x7fc00000dfffffff;
+-  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fc0000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
+   __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
+index f3b800f88..93a3078fa 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
+@@ -33,7 +33,7 @@ main ()
+   *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op2[0]) = 0x3f2f1f0f00000000;
+   *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00000000;
+   __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+@@ -153,7 +153,7 @@ main ()
+   *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461;
+   *((unsigned long *)&__m128i_op2[1]) = 0x00007fff00007fff;
+   *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00007fff00000000;
+   *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
+   __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+@@ -198,7 +198,7 @@ main ()
+   *((unsigned long *)&__m128i_op2[1]) = 0x00000000000000c0;
+   *((unsigned long *)&__m128i_op2[0]) = 0x00000001ffffff29;
+   *((unsigned long *)&__m128i_result[1]) = 0xffffff29ffffff29;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff2900000001;
+   __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+@@ -219,7 +219,7 @@ main ()
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op2[1]) = 0x0000000020000020;
+   *((unsigned long *)&__m128i_op2[0]) = 0x0000000020000020;
+-  *((unsigned long *)&__m128i_result[1]) = 0x2000002000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_result[0]) = 0x2000002020000020;
+   __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+@@ -241,7 +241,7 @@ main ()
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000010;
+   *((unsigned long *)&__m128i_op2[1]) = 0x8000000100000000;
+   *((unsigned long *)&__m128i_op2[0]) = 0x8000000000000103;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0000010300000103;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000103;
+   *((unsigned long *)&__m128i_result[0]) = 0x0000010380000001;
+   __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+@@ -252,7 +252,7 @@ main ()
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
+   *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
+   __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+-- 
+2.43.0
+
diff --git a/0065-LoongArch-Fix-ICE-and-use-simplify_gen_subreg-instea.patch b/0065-LoongArch-Fix-ICE-and-use-simplify_gen_subreg-instea.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6e497896ce892205bf844b4dc4bdf8de8e960474
--- /dev/null
+++ b/0065-LoongArch-Fix-ICE-and-use-simplify_gen_subreg-instea.patch
@@ -0,0 +1,318 @@
+From 87396b4550eeb097cdbe73fb19c84059ba6bb85e Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Wed, 29 Nov 2023 11:18:00 +0800
+Subject: [PATCH 065/188] LoongArch: Fix ICE and use simplify_gen_subreg
+ instead of gen_rtx_SUBREG directly.
+
+loongarch_expand_vec_cond_mask_expr generates 'subreg's of 'subreg's, which are not supported
+in gcc, it causes an ICE:
+
+ice.c:55:1: error: unrecognizable insn:
+   55 | }
+      | ^
+(insn 63 62 64 8 (set (reg:V4DI 278)
+        (subreg:V4DI (subreg:V4DF (reg:V4DI 273 [ vect__53.26 ]) 0) 0)) -1
+     (nil))
+during RTL pass: vregs
+ice.c:55:1: internal compiler error: in extract_insn, at recog.cc:2804
+
+Last time, Ruoyao has fixed a similar ICE:
+https://gcc.gnu.org/pipermail/gcc-patches/2023-November/636156.html
+
+This patch fixes ICE and use simplify_gen_subreg instead of gen_rtx_SUBREG as much as possible
+to avoid the same ice happening again.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_try_expand_lsx_vshuf_const): Use
+	simplify_gen_subreg instead of gen_rtx_SUBREG.
+	(loongarch_expand_vec_perm_const_2): Ditto.
+	(loongarch_expand_vec_cond_expr): Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/pr112476-3.c: New test.
+	* gcc.target/loongarch/pr112476-4.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc             | 79 +++++++++++--------
+ .../gcc.target/loongarch/pr112476-3.c         | 58 ++++++++++++++
+ .../gcc.target/loongarch/pr112476-4.c         |  4 +
+ 3 files changed, 108 insertions(+), 33 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/pr112476-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/pr112476-4.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index d64777179..4a3a7a246 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -8824,13 +8824,13 @@ loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
+       if (d->vmode == E_V2DFmode)
+ 	{
+ 	  sel = gen_rtx_CONST_VECTOR (E_V2DImode, gen_rtvec_v (d->nelt, rperm));
+-	  tmp = gen_rtx_SUBREG (E_V2DImode, d->target, 0);
++	  tmp = simplify_gen_subreg (E_V2DImode, d->target, d->vmode, 0);
+ 	  emit_move_insn (tmp, sel);
+ 	}
+       else if (d->vmode == E_V4SFmode)
+ 	{
+ 	  sel = gen_rtx_CONST_VECTOR (E_V4SImode, gen_rtvec_v (d->nelt, rperm));
+-	  tmp = gen_rtx_SUBREG (E_V4SImode, d->target, 0);
++	  tmp = simplify_gen_subreg (E_V4SImode, d->target, d->vmode, 0);
+ 	  emit_move_insn (tmp, sel);
+ 	}
+       else
+@@ -9614,8 +9614,8 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	  /* Adjust op1 for selecting correct value in high 128bit of target
+ 	     register.
+ 	     op1: E_V4DImode, { 4, 5, 6, 7 } -> { 2, 3, 4, 5 }.  */
+-	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
+-	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0);
++	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0);
++	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0);
+ 	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
+ 					      conv_op0, GEN_INT (0x21)));
+ 
+@@ -9644,8 +9644,8 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	  emit_move_insn (op0_alt, d->op0);
+ 
+ 	  /* Generate subreg for fitting into insn gen function.  */
+-	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
+-	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
++	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0);
++	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0);
+ 
+ 	  /* Adjust op value in temp register.
+ 	     op0 = {0,1,2,3}, op1 = {4,5,0,1}  */
+@@ -9691,9 +9691,10 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	  emit_move_insn (op1_alt, d->op1);
+ 	  emit_move_insn (op0_alt, d->op0);
+ 
+-	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
+-	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
+-	  rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0);
++	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0);
++	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0);
++	  rtx conv_target = simplify_gen_subreg (E_V4DImode, d->target,
++						 d->vmode, 0);
+ 
+ 	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
+ 					      conv_op0, GEN_INT (0x02)));
+@@ -9725,9 +9726,10 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	 Selector sample: E_V4DImode, { 0, 1, 4 ,5 }  */
+       if (!d->testing_p)
+ 	{
+-	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0);
+-	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0);
+-	  rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0);
++	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, d->op1, d->vmode, 0);
++	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0);
++	  rtx conv_target = simplify_gen_subreg (E_V4DImode, d->target,
++						 d->vmode, 0);
+ 
+ 	  /* We can achieve the expectation by using sinple xvpermi.q insn.  */
+ 	  emit_move_insn (conv_target, conv_op1);
+@@ -9752,8 +9754,8 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	  emit_move_insn (op1_alt, d->op1);
+ 	  emit_move_insn (op0_alt, d->op0);
+ 
+-	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
+-	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
++	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0);
++	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0);
+ 	  /* Adjust op value in temp regiter.
+ 	     op0 = { 0, 1, 2, 3 }, op1 = { 6, 7, 2, 3 }  */
+ 	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
+@@ -9797,9 +9799,10 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	  emit_move_insn (op1_alt, d->op1);
+ 	  emit_move_insn (op0_alt, d->op0);
+ 
+-	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
+-	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
+-	  rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0);
++	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0);
++	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0);
++	  rtx conv_target = simplify_gen_subreg (E_V4DImode, d->target,
++						 d->vmode, 0);
+ 
+ 	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
+ 					      conv_op0, GEN_INT (0x13)));
+@@ -9831,10 +9834,11 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	 Selector sample:E_V8SImode, { 2, 2, 2, 2, 2, 2, 2, 2 }  */
+       if (!d->testing_p)
+ 	{
+-	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0);
+-	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0);
++	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, d->op1, d->vmode, 0);
++	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0);
+ 	  rtx temp_reg = gen_reg_rtx (d->vmode);
+-	  rtx conv_temp = gen_rtx_SUBREG (E_V4DImode, temp_reg, 0);
++	  rtx conv_temp = simplify_gen_subreg (E_V4DImode, temp_reg,
++					       d->vmode, 0);
+ 
+ 	  emit_move_insn (temp_reg, d->op0);
+ 
+@@ -9943,9 +9947,11 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	  emit_move_insn (op0_alt, d->op0);
+ 	  emit_move_insn (op1_alt, d->op1);
+ 
+-	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0);
+-	  rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
+-	  rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
++	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0);
++	  rtx conv_op0a = simplify_gen_subreg (E_V4DImode, op0_alt,
++					       d->vmode, 0);
++	  rtx conv_op1a = simplify_gen_subreg (E_V4DImode, op1_alt,
++					       d->vmode, 0);
+ 
+ 	  /* Duplicate op0's low 128bit in op0, then duplicate high 128bit
+ 	     in op1.  After this, xvshuf.* insn's selector argument can
+@@ -9978,10 +9984,12 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	  emit_move_insn (op0_alt, d->op0);
+ 	  emit_move_insn (op1_alt, d->op1);
+ 
+-	  rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
+-	  rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
+-	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0);
+-	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0);
++	  rtx conv_op0a = simplify_gen_subreg (E_V4DImode, op0_alt,
++					       d->vmode, 0);
++	  rtx conv_op1a = simplify_gen_subreg (E_V4DImode, op1_alt,
++					       d->vmode, 0);
++	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0);
++	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, d->op1, d->vmode, 0);
+ 
+ 	  /* Reorganize op0's hi/lo 128bit and op1's hi/lo 128bit, to make sure
+ 	     that selector's low 128bit can access all op0's elements, and
+@@ -10101,12 +10109,12 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+     {
+     case E_V4DFmode:
+       sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (d->nelt, rperm));
+-      tmp = gen_rtx_SUBREG (E_V4DImode, d->target, 0);
++      tmp = simplify_gen_subreg (E_V4DImode, d->target, d->vmode, 0);
+       emit_move_insn (tmp, sel);
+       break;
+     case E_V8SFmode:
+       sel = gen_rtx_CONST_VECTOR (E_V8SImode, gen_rtvec_v (d->nelt, rperm));
+-      tmp = gen_rtx_SUBREG (E_V8SImode, d->target, 0);
++      tmp = simplify_gen_subreg (E_V8SImode, d->target, d->vmode, 0);
+       emit_move_insn (tmp, sel);
+       break;
+     default:
+@@ -10192,7 +10200,7 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+      64bit in target vector register.  */
+   else if (extract_ev_od)
+     {
+-      rtx converted = gen_rtx_SUBREG (E_V4DImode, d->target, 0);
++      rtx converted = simplify_gen_subreg (E_V4DImode, d->target, d->vmode, 0);
+       emit_insn (gen_lasx_xvpermi_d_v4di (converted, converted,
+ 					  GEN_INT (0xD8)));
+     }
+@@ -11279,7 +11287,9 @@ loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode,
+ 	  if (mode != vimode)
+ 	    {
+ 	      xop1 = gen_reg_rtx (vimode);
+-	      emit_move_insn (xop1, gen_rtx_SUBREG (vimode, operands[1], 0));
++	      emit_move_insn (xop1,
++			      simplify_gen_subreg (vimode, operands[1],
++						   mode, 0));
+ 	    }
+ 	  emit_move_insn (src1, xop1);
+ 	}
+@@ -11296,7 +11306,9 @@ loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode,
+ 	  if (mode != vimode)
+ 	    {
+ 	      xop2 = gen_reg_rtx (vimode);
+-	      emit_move_insn (xop2, gen_rtx_SUBREG (vimode, operands[2], 0));
++	      emit_move_insn (xop2,
++			      simplify_gen_subreg (vimode, operands[2],
++						   mode, 0));
+ 	    }
+ 	  emit_move_insn (src2, xop2);
+ 	}
+@@ -11315,7 +11327,8 @@ loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode,
+ 			  gen_rtx_AND (vimode, mask, src1));
+       /* The result is placed back to a register with the mask.  */
+       emit_insn (gen_rtx_SET (mask, bsel));
+-      emit_move_insn (operands[0], gen_rtx_SUBREG (mode, mask, 0));
++      emit_move_insn (operands[0],
++		      simplify_gen_subreg (mode, mask, vimode, 0));
+     }
+ }
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/pr112476-3.c b/gcc/testsuite/gcc.target/loongarch/pr112476-3.c
+new file mode 100644
+index 000000000..d696d4182
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/pr112476-3.c
+@@ -0,0 +1,58 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -mlsx" } */
++
++#include 
++
++typedef int8_t orc_int8;
++typedef int16_t orc_int16;
++typedef int32_t orc_int32;
++typedef int64_t orc_int64;
++
++typedef union
++{
++  orc_int32 i;
++  float f;
++  orc_int16 x2[2];
++  orc_int8 x4[4];
++} orc_union32;
++typedef union
++{
++  orc_int64 i;
++  double f;
++  orc_int32 x2[2];
++  float x2f[2];
++  orc_int16 x4[4];
++} orc_union64;
++
++void
++audio_orc_s32_to_double (double * restrict d1,
++    const signed int * restrict s1, int n)
++{
++  int i;
++  orc_union64 *restrict ptr0;
++  const orc_union32 *restrict ptr4;
++  orc_union32 var33;
++  orc_union64 var34;
++  orc_union64 var35;
++  orc_union64 var36;
++
++  ptr0 = (orc_union64 *) d1;
++  ptr4 = (orc_union32 *) s1;
++
++  var34.i = 0x41e0000000000000UL;
++
++  for (i = 0; i < n; i++) {
++    var33 = ptr4[i];
++    var36.f = var33.i;
++    {
++      orc_union64 _src1;
++      orc_union64 _src2;
++      orc_union64 _dest1;
++      _src1.i = ((var36.i) & ((((var36.i)&0x7ff0000000000000UL) == 0) ? 0xfff0000000000000UL : 0xffffffffffffffffUL));
++      _src2.i = ((var34.i) & ((((var34.i)&0x7ff0000000000000UL) == 0) ? 0xfff0000000000000UL : 0xffffffffffffffffUL));
++      _dest1.f = _src1.f / _src2.f;
++      var35.i = ((_dest1.i) & ((((_dest1.i)&0x7ff0000000000000UL) == 0) ? 0xfff0000000000000UL : 0xffffffffffffffffUL));
++    }
++    ptr0[i] = var35;
++  }
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/pr112476-4.c b/gcc/testsuite/gcc.target/loongarch/pr112476-4.c
+new file mode 100644
+index 000000000..955d98552
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/pr112476-4.c
+@@ -0,0 +1,4 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -mlasx" } */
++ 
++#include "pr112476-3.c"
+-- 
+2.43.0
+
diff --git a/0066-LoongArch-Fix-eh_return-epilogue-for-normal-returns.patch b/0066-LoongArch-Fix-eh_return-epilogue-for-normal-returns.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f418277912d85d2a5604d2c3e30ccf963ad65218
--- /dev/null
+++ b/0066-LoongArch-Fix-eh_return-epilogue-for-normal-returns.patch
@@ -0,0 +1,236 @@
+From 34088d0a8685defa97754b7ab5d90b9bc536cfaa Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Fri, 8 Dec 2023 18:01:18 +0800
+Subject: [PATCH 066/188] LoongArch: Fix eh_return epilogue for normal returns.
+
+On LoongArch, the regitsters $r4 - $r7 (EH_RETURN_DATA_REGNO) will be saved
+and restored in the function prologue and epilogue if the given function calls
+__builtin_eh_return.  This causes the return value to be overwritten on normal
+return paths and breaks a rare case of libgcc's _Unwind_RaiseException.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc: Do not restore the saved eh_return
+	data registers ($r4-$r7) for a normal return of a function that calls
+	__builtin_eh_return elsewhere.
+	* config/loongarch/loongarch-protos.h: Same.
+	* config/loongarch/loongarch.md: Same.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/eh_return-normal-return.c: New test.
+---
+ gcc/config/loongarch/loongarch-protos.h       |  2 +-
+ gcc/config/loongarch/loongarch.cc             | 34 ++++++++++++-----
+ gcc/config/loongarch/loongarch.md             | 23 ++++++++++-
+ .../loongarch/eh_return-normal-return.c       | 38 +++++++++++++++++++
+ 4 files changed, 84 insertions(+), 13 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/eh_return-normal-return.c
+
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 117669e9f..e5fcf3111 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -60,7 +60,7 @@ enum loongarch_symbol_type {
+ extern rtx loongarch_emit_move (rtx, rtx);
+ extern HOST_WIDE_INT loongarch_initial_elimination_offset (int, int);
+ extern void loongarch_expand_prologue (void);
+-extern void loongarch_expand_epilogue (bool);
++extern void loongarch_expand_epilogue (int);
+ extern bool loongarch_can_use_return_insn (void);
+ 
+ extern bool loongarch_symbolic_constant_p (rtx, enum loongarch_symbol_type *);
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 4a3a7a246..7caf04d8d 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -1012,7 +1012,8 @@ loongarch_save_restore_reg (machine_mode mode, int regno, HOST_WIDE_INT offset,
+ 
+ static void
+ loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset,
+-			      loongarch_save_restore_fn fn)
++			      loongarch_save_restore_fn fn,
++			      bool skip_eh_data_regs_p)
+ {
+   HOST_WIDE_INT offset;
+ 
+@@ -1021,7 +1022,14 @@ loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset,
+   for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
+     if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
+       {
+-	if (!cfun->machine->reg_is_wrapped_separately[regno])
++	/* Special care needs to be taken for $r4-$r7 (EH_RETURN_DATA_REGNO)
++	   when returning normally from a function that calls
++	   __builtin_eh_return.  In this case, these registers are saved but
++	   should not be restored, or the return value may be clobbered.  */
++
++	if (!(cfun->machine->reg_is_wrapped_separately[regno]
++	      || (skip_eh_data_regs_p
++	      && GP_ARG_FIRST <= regno && regno < GP_ARG_FIRST + 4)))
+ 	  loongarch_save_restore_reg (word_mode, regno, offset, fn);
+ 
+ 	offset -= UNITS_PER_WORD;
+@@ -1294,7 +1302,7 @@ loongarch_expand_prologue (void)
+ 			    GEN_INT (-step1));
+       RTX_FRAME_RELATED_P (emit_insn (insn)) = 1;
+       size -= step1;
+-      loongarch_for_each_saved_reg (size, loongarch_save_reg);
++      loongarch_for_each_saved_reg (size, loongarch_save_reg, false);
+     }
+ 
+   /* Set up the frame pointer, if we're using one.  */
+@@ -1379,11 +1387,13 @@ loongarch_can_use_return_insn (void)
+   return reload_completed && cfun->machine->frame.total_size == 0;
+ }
+ 
+-/* Expand an "epilogue" or "sibcall_epilogue" pattern; SIBCALL_P
+-   says which.  */
++/* Expand function epilogue using the following insn patterns:
++   "epilogue"	      (style == NORMAL_RETURN)
++   "sibcall_epilogue" (style == SIBCALL_RETURN)
++   "eh_return"	      (style == EXCEPTION_RETURN) */
+ 
+ void
+-loongarch_expand_epilogue (bool sibcall_p)
++loongarch_expand_epilogue (int style)
+ {
+   /* Split the frame into two.  STEP1 is the amount of stack we should
+      deallocate before restoring the registers.  STEP2 is the amount we
+@@ -1400,7 +1410,8 @@ loongarch_expand_epilogue (bool sibcall_p)
+   bool need_barrier_p
+     = (get_frame_size () + cfun->machine->frame.arg_pointer_offset) != 0;
+ 
+-  if (!sibcall_p && loongarch_can_use_return_insn ())
++  /* Handle simple returns.  */
++  if (style == NORMAL_RETURN && loongarch_can_use_return_insn ())
+     {
+       emit_jump_insn (gen_return ());
+       return;
+@@ -1476,7 +1487,9 @@ loongarch_expand_epilogue (bool sibcall_p)
+ 
+   /* Restore the registers.  */
+   loongarch_for_each_saved_reg (frame->total_size - step2,
+-				loongarch_restore_reg);
++				loongarch_restore_reg,
++				crtl->calls_eh_return
++				&& style != EXCEPTION_RETURN);
+ 
+   if (need_barrier_p)
+     loongarch_emit_stack_tie ();
+@@ -1497,11 +1510,12 @@ loongarch_expand_epilogue (bool sibcall_p)
+     }
+ 
+   /* Add in the __builtin_eh_return stack adjustment.  */
+-  if (crtl->calls_eh_return)
++  if (crtl->calls_eh_return && style == EXCEPTION_RETURN)
+     emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
+ 			      EH_RETURN_STACKADJ_RTX));
+ 
+-  if (!sibcall_p)
++  /* Emit return unless doing sibcall.  */
++  if (style != SIBCALL_RETURN)
+     emit_jump_insn (gen_simple_return_internal (ra));
+ }
+ 
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index c6edd1dda..222f1ae83 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -125,6 +125,11 @@
+    (T1_REGNUM			13)
+    (S0_REGNUM			23)
+ 
++   ;; Return path styles
++   (NORMAL_RETURN		0)
++   (SIBCALL_RETURN		1)
++   (EXCEPTION_RETURN		2)
++
+    ;; PIC long branch sequences are never longer than 100 bytes.
+    (MAX_PIC_BRANCH_LENGTH	100)
+ ])
+@@ -3276,7 +3281,7 @@
+   [(const_int 2)]
+   ""
+ {
+-  loongarch_expand_epilogue (false);
++  loongarch_expand_epilogue (NORMAL_RETURN);
+   DONE;
+ })
+ 
+@@ -3284,7 +3289,7 @@
+   [(const_int 2)]
+   ""
+ {
+-  loongarch_expand_epilogue (true);
++  loongarch_expand_epilogue (SIBCALL_RETURN);
+   DONE;
+ })
+ 
+@@ -3341,6 +3346,20 @@
+     emit_insn (gen_eh_set_ra_di (operands[0]));
+   else
+     emit_insn (gen_eh_set_ra_si (operands[0]));
++
++  emit_jump_insn (gen_eh_return_internal ());
++  emit_barrier ();
++  DONE;
++})
++
++(define_insn_and_split "eh_return_internal"
++  [(eh_return)]
++  ""
++  "#"
++  "epilogue_completed"
++  [(const_int 0)]
++{
++  loongarch_expand_epilogue (EXCEPTION_RETURN);
+   DONE;
+ })
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/eh_return-normal-return.c b/gcc/testsuite/gcc.target/loongarch/eh_return-normal-return.c
+new file mode 100644
+index 000000000..f8f3965f8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/eh_return-normal-return.c
+@@ -0,0 +1,38 @@
++/* { dg-do run } */
++/* { dg-options "-O2" } */
++
++#include 
++
++int foo ()  __attribute__((noinline));
++int main ();
++
++int
++foo () {
++
++  int t;
++
++  /* prevent optimization using asm */
++  asm ("" : "=r" (t) : "0" (-1));
++  asm ("" : "=r" (t) : "0" (t ? 1 : 0));
++
++  if (t == 0)
++    /* never reached */
++    __builtin_eh_return (0, __builtin_return_address (0));
++
++  else if (t == 1)
++    /* return here */
++    return 202312;
++
++  else
++    /* never reached: prevent vrp optimization in main */
++    return 0;
++}
++
++int
++main ()
++{
++  if (foo() == 202312)
++    return 0; 
++  else
++    abort ();
++}
+-- 
+2.43.0
+
diff --git a/0067-LoongArch-Allow-mcmodel-extreme-and-model-attribute-.patch b/0067-LoongArch-Allow-mcmodel-extreme-and-model-attribute-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..84b07abd42d3e592f9ef6efc26b4076205cda70b
--- /dev/null
+++ b/0067-LoongArch-Allow-mcmodel-extreme-and-model-attribute-.patch
@@ -0,0 +1,180 @@
+From fdb51014f00094737459d5c9008630454ec7f342 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Thu, 7 Dec 2023 15:45:30 +0800
+Subject: [PATCH 067/188] LoongArch: Allow -mcmodel=extreme and model attribute
+ with -mexplicit-relocs=auto
+
+There seems no real reason to require -mexplicit-relocs=always for
+-mcmodel=extreme or model attribute.  As the linker does not know how to
+relax a 3-operand la.local or la.global pseudo instruction, just emit
+explicit relocs for SYMBOL_PCREL64, and under TARGET_CMODEL_EXTREME also
+SYMBOL_GOT_DISP.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_explicit_relocs_p):
+	Return true for SYMBOL_PCREL64.  Return true for SYMBOL_GOT_DISP
+	if TARGET_CMODEL_EXTREME.
+	(loongarch_split_symbol): Check for la_opt_explicit_relocs !=
+	EXPLICIT_RELOCS_NONE instead of TARGET_EXPLICIT_RELOCS.
+	(loongarch_print_operand_reloc): Likewise.
+	(loongarch_option_override_internal): Likewise.
+	(loongarch_handle_model_attribute): Likewise.
+	* doc/invoke.texi (-mcmodel=extreme): Update the compatibility
+	between it and -mexplicit-relocs=.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/attr-model-3.c: New test.
+	* gcc.target/loongarch/attr-model-4.c: New test.
+	* gcc.target/loongarch/func-call-extreme-3.c: New test.
+	* gcc.target/loongarch/func-call-extreme-4.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc             | 25 ++++++++++++-------
+ gcc/doc/invoke.texi                           |  4 +--
+ .../gcc.target/loongarch/attr-model-3.c       |  6 +++++
+ .../gcc.target/loongarch/attr-model-4.c       |  6 +++++
+ .../loongarch/func-call-extreme-3.c           |  7 ++++++
+ .../loongarch/func-call-extreme-4.c           |  7 ++++++
+ 6 files changed, 44 insertions(+), 11 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-4.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-extreme-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-extreme-4.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 7caf04d8d..4362149ef 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -1969,9 +1969,16 @@ loongarch_explicit_relocs_p (enum loongarch_symbol_type type)
+       case SYMBOL_TLS_LE:
+       case SYMBOL_TLSGD:
+       case SYMBOL_TLSLDM:
+-	/* The linker don't know how to relax TLS accesses.  */
++      case SYMBOL_PCREL64:
++	/* The linker don't know how to relax TLS accesses or 64-bit
++	   pc-relative accesses.  */
+ 	return true;
+       case SYMBOL_GOT_DISP:
++	/* The linker don't know how to relax GOT accesses in extreme
++	   code model.  */
++	if (TARGET_CMODEL_EXTREME)
++	  return true;
++
+ 	/* If we are performing LTO for a final link, and we have the
+ 	   linker plugin so we know the resolution of the symbols, then
+ 	   all GOT references are binding to external symbols or
+@@ -3134,7 +3141,7 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
+ 
+   if (loongarch_symbol_extreme_p (symbol_type) && can_create_pseudo_p ())
+     {
+-      gcc_assert (TARGET_EXPLICIT_RELOCS);
++      gcc_assert (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE);
+ 
+       temp1 = gen_reg_rtx (Pmode);
+       emit_move_insn (temp1, gen_rtx_LO_SUM (Pmode, gen_rtx_REG (Pmode, 0),
+@@ -5933,7 +5940,7 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part,
+     loongarch_classify_symbolic_expression (op);
+ 
+   if (loongarch_symbol_extreme_p (symbol_type))
+-    gcc_assert (TARGET_EXPLICIT_RELOCS);
++    gcc_assert (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE);
+ 
+   switch (symbol_type)
+     {
+@@ -7540,9 +7547,9 @@ loongarch_option_override_internal (struct gcc_options *opts,
+   switch (la_target.cmodel)
+     {
+       case CMODEL_EXTREME:
+-	if (!TARGET_EXPLICIT_RELOCS)
+-	  error ("code model %qs needs %s",
+-		 "extreme", "-mexplicit-relocs=always");
++	if (la_opt_explicit_relocs == EXPLICIT_RELOCS_NONE)
++	  error ("code model %qs is not compatible with %s",
++		 "extreme", "-mexplicit-relocs=none");
+ 
+ 	if (opts->x_flag_plt)
+ 	  {
+@@ -7908,11 +7915,11 @@ loongarch_handle_model_attribute (tree *node, tree name, tree arg, int,
+ 	  *no_add_attrs = true;
+ 	  return NULL_TREE;
+ 	}
+-      if (!TARGET_EXPLICIT_RELOCS)
++      if (la_opt_explicit_relocs == EXPLICIT_RELOCS_NONE)
+ 	{
+ 	  error_at (DECL_SOURCE_LOCATION (decl),
+-		    "%qE attribute requires %s", name,
+-		    "-mexplicit-relocs=always");
++		    "%qE attribute is not compatible with %s", name,
++		    "-mexplicit-relocs=none");
+ 	  *no_add_attrs = true;
+ 	  return NULL_TREE;
+ 	}
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 76a8f20d1..5c6515cb1 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -24602,8 +24602,8 @@ The text segment and data segment must be within 2GB addressing space.
+ 
+ @item extreme
+ This mode does not limit the size of the code segment and data segment.
+-The @option{-mcmodel=extreme} option is incompatible with @option{-fplt} and
+-@option{-mno-explicit-relocs}.
++The @option{-mcmodel=extreme} option is incompatible with @option{-fplt}
++and/or @option{-mexplicit-relocs=none}.
+ @end table
+ The default code model is @code{normal}.
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/attr-model-3.c b/gcc/testsuite/gcc.target/loongarch/attr-model-3.c
+new file mode 100644
+index 000000000..5622d5086
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/attr-model-3.c
+@@ -0,0 +1,6 @@
++/* { dg-do compile } */
++/* { dg-options "-mexplicit-relocs=auto -mcmodel=normal -O2" } */
++/* { dg-final { scan-assembler-times "%pc64_hi12" 2 } } */
++
++#define ATTR_MODEL_TEST
++#include "attr-model-test.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/attr-model-4.c b/gcc/testsuite/gcc.target/loongarch/attr-model-4.c
+new file mode 100644
+index 000000000..482724bb9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/attr-model-4.c
+@@ -0,0 +1,6 @@
++/* { dg-do compile } */
++/* { dg-options "-mexplicit-relocs=auto -mcmodel=extreme -O2" } */
++/* { dg-final { scan-assembler-times "%pc64_hi12" 3 } } */
++
++#define ATTR_MODEL_TEST
++#include "attr-model-test.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-3.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-3.c
+new file mode 100644
+index 000000000..a4da44b4a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-3.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs=auto -mcmodel=extreme" } */
++/* { dg-final { scan-assembler "test:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
++/* { dg-final { scan-assembler "test1:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
++/* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
++
++#include "func-call-extreme-1.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-4.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-4.c
+new file mode 100644
+index 000000000..16b00f4c5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-4.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs=auto -mcmodel=extreme" } */
++/* { dg-final { scan-assembler "test:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
++/* { dg-final { scan-assembler "test1:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
++/* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
++
++#include "func-call-extreme-1.c"
+-- 
+2.43.0
+
diff --git a/0068-LoongArch-Fix-warnings-building-libgcc.patch b/0068-LoongArch-Fix-warnings-building-libgcc.patch
new file mode 100644
index 0000000000000000000000000000000000000000..47a113dad6576c41f8d521c8ad144c35d837596e
--- /dev/null
+++ b/0068-LoongArch-Fix-warnings-building-libgcc.patch
@@ -0,0 +1,79 @@
+From 5a910f294605d0163f8f4ac255a14425b154b5dd Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 9 Dec 2023 22:08:37 +0800
+Subject: [PATCH 068/188] LoongArch: Fix warnings building libgcc
+
+We are excluding loongarch-opts.h from target libraries, but now struct
+loongarch_target and gcc_options are not declared in the target
+libraries, causing:
+
+In file included from ../.././gcc/options.h:8,
+                 from ../.././gcc/tm.h:49,
+                 from ../../../gcc/libgcc/fixed-bit.c:48:
+../../../gcc/libgcc/../gcc/config/loongarch/loongarch-opts.h:57:41:
+warning: 'struct gcc_options' declared inside parameter list will not
+be visible outside of this definition or declaration
+   57 |                                  struct gcc_options *opts,
+      |                                         ^~~~~~~~~~~
+
+So exclude the declarations referring to the C++ structs as well.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-opts.h (la_target): Move into #if
+	for loongarch-def.h.
+	(loongarch_init_target): Likewise.
+	(loongarch_config_target): Likewise.
+	(loongarch_update_gcc_opt_status): Likewise.
+---
+ gcc/config/loongarch/loongarch-opts.h | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index 7010ddfec..639ed50bd 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -21,22 +21,15 @@ along with GCC; see the file COPYING3.  If not see
+ #ifndef LOONGARCH_OPTS_H
+ #define LOONGARCH_OPTS_H
+ 
+-/* This is a C++ header and it shouldn't be used by target libraries.  */
++/* The loongarch-def.h file is a C++ header and it shouldn't be used by
++   target libraries.  Exclude it and everything using the C++ structs
++   (struct loongarch_target and gcc_options) from target libraries.  */
+ #if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+ #include "loongarch-def.h"
+-#endif
+ 
+ /* Target configuration */
+ extern struct loongarch_target la_target;
+ 
+-/* Flag status */
+-struct loongarch_flags {
+-    int flt; const char* flt_str;
+-#define SX_FLAG_TYPE(x) ((x) < 0 ? -(x) : (x))
+-    int sx[2];
+-};
+-
+-
+ /* Initialize loongarch_target from separate option variables.  */
+ void
+ loongarch_init_target (struct loongarch_target *target,
+@@ -56,7 +49,14 @@ void
+ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+ 				 struct gcc_options *opts,
+ 				 struct gcc_options *opts_set);
++#endif
+ 
++/* Flag status */
++struct loongarch_flags {
++    int flt; const char* flt_str;
++#define SX_FLAG_TYPE(x) ((x) < 0 ? -(x) : (x))
++    int sx[2];
++};
+ 
+ /* Macros for common conditional expressions used in loongarch.{c,h,md} */
+ #define TARGET_CMODEL_NORMAL	    (la_target.cmodel == CMODEL_NORMAL)
+-- 
+2.43.0
+
diff --git a/0069-LoongArch-testsuite-Remove-XFAIL-in-vect-ftint-no-in.patch b/0069-LoongArch-testsuite-Remove-XFAIL-in-vect-ftint-no-in.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a7ef474a22f515e4bb1ea7814b03e1b937244679
--- /dev/null
+++ b/0069-LoongArch-testsuite-Remove-XFAIL-in-vect-ftint-no-in.patch
@@ -0,0 +1,30 @@
+From 639e7518c8a4468cd50d774c5a3dbda5f2dbb4a7 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 13 Dec 2023 02:39:35 +0800
+Subject: [PATCH 069/188] LoongArch: testsuite: Remove XFAIL in
+ vect-ftint-no-inexact.c
+
+After r14-6455 this no longer fails.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vect-ftint-no-inexact.c (xfail): Remove.
+---
+ gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c b/gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c
+index 83d268099..61918beef 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c
++++ b/gcc/testsuite/gcc.target/loongarch/vect-ftint-no-inexact.c
+@@ -39,6 +39,5 @@
+ /* { dg-final { scan-assembler-not "\txvftintrne\.w\.s" } } */
+ /* { dg-final { scan-assembler-not "\txvftintrne\.l\.d" } } */
+ 
+-/* trunc: XFAIL due to PR 107723 */
+-/* { dg-final { scan-assembler "bl\t%plt\\(trunc\\)" { xfail *-*-* } } } */
++/* { dg-final { scan-assembler "bl\t%plt\\(trunc\\)" } } */
+ /* { dg-final { scan-assembler "bl\t%plt\\(truncf\\)" } } */
+-- 
+2.43.0
+
diff --git a/0070-LoongArch-Include-rtl.h-for-COSTS_N_INSNS-instead-of.patch b/0070-LoongArch-Include-rtl.h-for-COSTS_N_INSNS-instead-of.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e0ff04254e6e7789117a0cffeee09831c675a205
--- /dev/null
+++ b/0070-LoongArch-Include-rtl.h-for-COSTS_N_INSNS-instead-of.patch
@@ -0,0 +1,44 @@
+From 6a5e3932a39f1ffa6f87479748ee711e4fa47d30 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 9 Dec 2023 15:27:28 +0800
+Subject: [PATCH 070/188] LoongArch: Include rtl.h for COSTS_N_INSNS instead of
+ hard coding our own
+
+With loongarch-def.cc switched from C to C++, we can include rtl.h for
+COSTS_N_INSNS, instead of hard coding our own.
+
+THis is a non-functional change for now, but it will make the code more
+future-proof in case COSTS_N_INSNS in rtl.h would be changed.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-def.cc (rtl.h): Include.
+	(COSTS_N_INSNS): Remove the macro definition.
+---
+ gcc/config/loongarch/loongarch-def.cc | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch-def.cc b/gcc/config/loongarch/loongarch-def.cc
+index c41804a18..6217b1926 100644
+--- a/gcc/config/loongarch/loongarch-def.cc
++++ b/gcc/config/loongarch/loongarch-def.cc
+@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "system.h"
+ #include "coretypes.h"
+ #include "tm.h"
++#include "rtl.h"
+ 
+ #include "loongarch-def.h"
+ #include "loongarch-str.h"
+@@ -89,8 +90,6 @@ array_tune loongarch_cpu_align =
+     .set (CPU_LA464, la464_align ())
+     .set (CPU_LA664, la464_align ());
+ 
+-#define COSTS_N_INSNS(N) ((N) * 4)
+-
+ /* Default RTX cost initializer.  */
+ loongarch_rtx_cost_data::loongarch_rtx_cost_data ()
+   : fp_add (COSTS_N_INSNS (1)),
+-- 
+2.43.0
+
diff --git a/0071-LoongArch-Fix-instruction-costs-PR112936.patch b/0071-LoongArch-Fix-instruction-costs-PR112936.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8a3eb7889a6a0f3ce12ea4a166e35f16d56d58f7
--- /dev/null
+++ b/0071-LoongArch-Fix-instruction-costs-PR112936.patch
@@ -0,0 +1,165 @@
+From c5abe64e64aba601e67f3367a27caf616062b8f4 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 9 Dec 2023 17:41:32 +0800
+Subject: [PATCH 071/188] LoongArch: Fix instruction costs [PR112936]
+
+Replace the instruction costs in loongarch_rtx_cost_data constructor
+based on micro-benchmark results on LA464 and LA664.
+
+This allows optimizations like "x * 17" to alsl, and "x * 68" to alsl
+and slli.
+
+gcc/ChangeLog:
+
+	PR target/112936
+	* config/loongarch/loongarch-def.cc
+	(loongarch_rtx_cost_data::loongarch_rtx_cost_data): Update
+	instruction costs per micro-benchmark results.
+	(loongarch_rtx_cost_optimize_size): Set all instruction costs
+	to (COSTS_N_INSNS (1) + 1).
+	* config/loongarch/loongarch.cc (loongarch_rtx_costs): Remove
+	special case for multiplication when optimizing for size.
+	Adjust division cost when TARGET_64BIT && !TARGET_DIV32.
+	Account the extra cost when TARGET_CHECK_ZERO_DIV and
+	optimizing for speed.
+
+gcc/testsuite/ChangeLog
+
+	PR target/112936
+	* gcc.target/loongarch/mul-const-reduction.c: New test.
+---
+ gcc/config/loongarch/loongarch-def.cc         | 39 ++++++++++---------
+ gcc/config/loongarch/loongarch.cc             | 22 +++++------
+ .../loongarch/mul-const-reduction.c           | 11 ++++++
+ 3 files changed, 43 insertions(+), 29 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/mul-const-reduction.c
+
+diff --git a/gcc/config/loongarch/loongarch-def.cc b/gcc/config/loongarch/loongarch-def.cc
+index 6217b1926..4a8885e83 100644
+--- a/gcc/config/loongarch/loongarch-def.cc
++++ b/gcc/config/loongarch/loongarch-def.cc
+@@ -92,15 +92,15 @@ array_tune loongarch_cpu_align =
+ 
+ /* Default RTX cost initializer.  */
+ loongarch_rtx_cost_data::loongarch_rtx_cost_data ()
+-  : fp_add (COSTS_N_INSNS (1)),
+-    fp_mult_sf (COSTS_N_INSNS (2)),
+-    fp_mult_df (COSTS_N_INSNS (4)),
+-    fp_div_sf (COSTS_N_INSNS (6)),
++  : fp_add (COSTS_N_INSNS (5)),
++    fp_mult_sf (COSTS_N_INSNS (5)),
++    fp_mult_df (COSTS_N_INSNS (5)),
++    fp_div_sf (COSTS_N_INSNS (8)),
+     fp_div_df (COSTS_N_INSNS (8)),
+-    int_mult_si (COSTS_N_INSNS (1)),
+-    int_mult_di (COSTS_N_INSNS (1)),
+-    int_div_si (COSTS_N_INSNS (4)),
+-    int_div_di (COSTS_N_INSNS (6)),
++    int_mult_si (COSTS_N_INSNS (4)),
++    int_mult_di (COSTS_N_INSNS (4)),
++    int_div_si (COSTS_N_INSNS (5)),
++    int_div_di (COSTS_N_INSNS (5)),
+     branch_cost (6),
+     memory_latency (4) {}
+ 
+@@ -111,18 +111,21 @@ loongarch_rtx_cost_data::loongarch_rtx_cost_data ()
+ array_tune loongarch_cpu_rtx_cost_data =
+   array_tune ();
+ 
+-/* RTX costs to use when optimizing for size.  */
++/* RTX costs to use when optimizing for size.
++   We use a value slightly larger than COSTS_N_INSNS (1) for all of them
++   because they are slower than simple instructions.  */
++#define COST_COMPLEX_INSN (COSTS_N_INSNS (1) + 1)
+ const loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size =
+   loongarch_rtx_cost_data ()
+-    .fp_add_ (4)
+-    .fp_mult_sf_ (4)
+-    .fp_mult_df_ (4)
+-    .fp_div_sf_ (4)
+-    .fp_div_df_ (4)
+-    .int_mult_si_ (4)
+-    .int_mult_di_ (4)
+-    .int_div_si_ (4)
+-    .int_div_di_ (4);
++    .fp_add_ (COST_COMPLEX_INSN)
++    .fp_mult_sf_ (COST_COMPLEX_INSN)
++    .fp_mult_df_ (COST_COMPLEX_INSN)
++    .fp_div_sf_ (COST_COMPLEX_INSN)
++    .fp_div_df_ (COST_COMPLEX_INSN)
++    .int_mult_si_ (COST_COMPLEX_INSN)
++    .int_mult_di_ (COST_COMPLEX_INSN)
++    .int_div_si_ (COST_COMPLEX_INSN)
++    .int_div_di_ (COST_COMPLEX_INSN);
+ 
+ array_tune loongarch_cpu_issue_rate = array_tune ()
+   .set (CPU_NATIVE, 4)
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 4362149ef..afbb55390 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -3797,8 +3797,6 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code,
+ 	*total = (speed
+ 		  ? loongarch_cost->int_mult_si * 3 + 6
+ 		  : COSTS_N_INSNS (7));
+-      else if (!speed)
+-	*total = COSTS_N_INSNS (1) + 1;
+       else if (mode == DImode)
+ 	*total = loongarch_cost->int_mult_di;
+       else
+@@ -3833,14 +3831,18 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code,
+ 
+     case UDIV:
+     case UMOD:
+-      if (!speed)
+-	{
+-	  *total = COSTS_N_INSNS (loongarch_idiv_insns (mode));
+-	}
+-      else if (mode == DImode)
++      if (mode == DImode)
+ 	*total = loongarch_cost->int_div_di;
+       else
+-	*total = loongarch_cost->int_div_si;
++	{
++	  *total = loongarch_cost->int_div_si;
++	  if (TARGET_64BIT && !TARGET_DIV32)
++	    *total += COSTS_N_INSNS (2);
++	}
++
++      if (TARGET_CHECK_ZERO_DIV)
++	*total += COSTS_N_INSNS (2);
++
+       return false;
+ 
+     case SIGN_EXTEND:
+@@ -3872,9 +3874,7 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code,
+ 		  && (GET_CODE (XEXP (XEXP (XEXP (x, 0), 0), 1))
+ 		      == ZERO_EXTEND))))
+ 	{
+-	  if (!speed)
+-	    *total = COSTS_N_INSNS (1) + 1;
+-	  else if (mode == DImode)
++	  if (mode == DImode)
+ 	    *total = loongarch_cost->int_mult_di;
+ 	  else
+ 	    *total = loongarch_cost->int_mult_si;
+diff --git a/gcc/testsuite/gcc.target/loongarch/mul-const-reduction.c b/gcc/testsuite/gcc.target/loongarch/mul-const-reduction.c
+new file mode 100644
+index 000000000..02d9a4876
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/mul-const-reduction.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mtune=la464" } */
++/* { dg-final { scan-assembler "alsl\.w" } } */
++/* { dg-final { scan-assembler "slli\.w" } } */
++/* { dg-final { scan-assembler-not "mul\.w" } } */
++
++int
++test (int a)
++{
++  return a * 68;
++}
+-- 
+2.43.0
+
diff --git a/0072-LoongArch-Add-alslsi3_extend.patch b/0072-LoongArch-Add-alslsi3_extend.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a0c7d0385434664880e54e26cbdb1ec1d27dc8a4
--- /dev/null
+++ b/0072-LoongArch-Add-alslsi3_extend.patch
@@ -0,0 +1,53 @@
+From 89dfb9ad8687f9b31be5925b2d106b6ec13cc628 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 9 Dec 2023 18:02:35 +0800
+Subject: [PATCH 072/188] LoongArch: Add alslsi3_extend
+
+Following the instruction cost fix, we are generating
+
+    alsl.w $a0, $a0, $a0, 4
+
+instead of
+
+    li.w  $t0, 17
+    mul.w $a0, $t0
+
+for "x * 4", because alsl.w is 4 times faster than mul.w.  But we didn't
+have a sign-extending pattern for alsl.w, causing an extra slli.w
+instruction generated to sign-extend $a0.  Add the pattern to remove the
+redundant extension.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (alslsi3_extend): New
+	define_insn.
+---
+ gcc/config/loongarch/loongarch.md | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 222f1ae83..23368008e 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -2874,6 +2874,18 @@
+   [(set_attr "type" "arith")
+    (set_attr "mode" "")])
+ 
++(define_insn "alslsi3_extend"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(sign_extend:DI
++	  (plus:SI
++	    (ashift:SI (match_operand:SI 1 "register_operand" "r")
++		       (match_operand 2 "const_immalsl_operand" ""))
++	    (match_operand:SI 3 "register_operand" "r"))))]
++  ""
++  "alsl.w\t%0,%1,%3,%2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "SI")])
++
+ 
+ 
+ ;; Reverse the order of bytes of operand 1 and store the result in operand 0.
+-- 
+2.43.0
+
diff --git a/0073-LoongArch-Add-support-for-D-frontend.patch b/0073-LoongArch-Add-support-for-D-frontend.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b953765cba08dd8932b7c24bf335050ec523c5c3
--- /dev/null
+++ b/0073-LoongArch-Add-support-for-D-frontend.patch
@@ -0,0 +1,224 @@
+From 6ef045728a11218f023fee4527cd6d2fdb2c2910 Mon Sep 17 00:00:00 2001
+From: liushuyu 
+Date: Mon, 18 Dec 2023 09:52:07 +0800
+Subject: [PATCH 073/188] LoongArch: Add support for D frontend.
+
+gcc/ChangeLog:
+
+	* config.gcc: Add loongarch-d.o to d_target_objs for LoongArch
+	architecture.
+	* config/loongarch/t-loongarch: Add object target for loongarch-d.cc.
+	* config/loongarch/loongarch-d.cc
+	(loongarch_d_target_versions): add interface function to define builtin
+	D versions for LoongArch architecture.
+	(loongarch_d_handle_target_float_abi): add interface function to define
+	builtin D traits for LoongArch architecture.
+	(loongarch_d_register_target_info): add interface function to register
+	loongarch_d_handle_target_float_abi function.
+	* config/loongarch/loongarch-d.h
+	(loongarch_d_target_versions): add function prototype.
+	(loongarch_d_register_target_info): Likewise.
+
+libphobos/ChangeLog:
+
+	* configure.tgt: Enable libphobos for LoongArch architecture.
+	* libdruntime/gcc/sections/elf.d: Add TLS_DTV_OFFSET constant for
+	LoongArch64.
+	* libdruntime/gcc/unwind/generic.d: Add __aligned__ constant for
+	LoongArch64.
+---
+ gcc/config.gcc                             |  1 +
+ gcc/config/loongarch/loongarch-d.cc        | 77 ++++++++++++++++++++++
+ gcc/config/loongarch/loongarch-d.h         | 26 ++++++++
+ gcc/config/loongarch/t-loongarch           |  4 ++
+ libphobos/configure.tgt                    |  3 +
+ libphobos/libdruntime/gcc/sections/elf.d   |  2 +
+ libphobos/libdruntime/gcc/unwind/generic.d |  1 +
+ 7 files changed, 114 insertions(+)
+ create mode 100644 gcc/config/loongarch/loongarch-d.cc
+ create mode 100644 gcc/config/loongarch/loongarch-d.h
+
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 11ab620d0..039187fa2 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -456,6 +456,7 @@ mips*-*-*)
+ 	;;
+ loongarch*-*-*)
+ 	cpu_type=loongarch
++	d_target_objs="loongarch-d.o"
+ 	extra_headers="larchintrin.h lsxintrin.h lasxintrin.h"
+ 	extra_objs="loongarch-c.o loongarch-builtins.o loongarch-cpu.o loongarch-opts.o loongarch-def.o"
+ 	extra_gcc_objs="loongarch-driver.o loongarch-cpu.o loongarch-opts.o loongarch-def.o"
+diff --git a/gcc/config/loongarch/loongarch-d.cc b/gcc/config/loongarch/loongarch-d.cc
+new file mode 100644
+index 000000000..9ac483c39
+--- /dev/null
++++ b/gcc/config/loongarch/loongarch-d.cc
+@@ -0,0 +1,77 @@
++/* Subroutines for the D front end on the LoongArch architecture.
++   Copyright (C) 2023 Free Software Foundation, Inc.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#define IN_TARGET_CODE 1
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm_d.h"
++#include "d/d-target.h"
++#include "d/d-target-def.h"
++
++/* Implement TARGET_D_CPU_VERSIONS for LoongArch targets.  */
++
++void
++loongarch_d_target_versions (void)
++{
++  if (TARGET_64BIT)
++    d_add_builtin_version ("LoongArch64");
++  else
++    d_add_builtin_version ("LoongArch32");
++
++  if (TARGET_HARD_FLOAT_ABI)
++    {
++      d_add_builtin_version ("LoongArch_HardFloat");
++      d_add_builtin_version ("D_HardFloat");
++    }
++  else if (TARGET_SOFT_FLOAT_ABI)
++    {
++      d_add_builtin_version ("LoongArch_SoftFloat");
++      d_add_builtin_version ("D_SoftFloat");
++    }
++}
++
++/* Handle a call to `__traits(getTargetInfo, "floatAbi")'.  */
++
++static tree
++loongarch_d_handle_target_float_abi (void)
++{
++  const char *abi;
++
++  if (TARGET_HARD_FLOAT_ABI)
++    abi = "hard";
++  else if (TARGET_SOFT_FLOAT_ABI)
++    abi = "soft";
++  else
++    abi = "";
++
++  return build_string_literal (strlen (abi) + 1, abi);
++}
++
++/* Implement TARGET_D_REGISTER_CPU_TARGET_INFO.  */
++
++void
++loongarch_d_register_target_info (void)
++{
++  const struct d_target_info_spec handlers[] = {
++    {"floatAbi", loongarch_d_handle_target_float_abi},
++    {NULL, NULL},
++  };
++
++  d_add_target_info_handlers (handlers);
++}
+diff --git a/gcc/config/loongarch/loongarch-d.h b/gcc/config/loongarch/loongarch-d.h
+new file mode 100644
+index 000000000..a2fb8d51d
+--- /dev/null
++++ b/gcc/config/loongarch/loongarch-d.h
+@@ -0,0 +1,26 @@
++/* Definitions for the D front end on the LoongArch architecture.
++   Copyright (C) 2023 Free Software Foundation, Inc.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++/* Defined in loongarch-d.cc  */
++extern void
++loongarch_d_target_versions (void);
++extern void
++loongarch_d_register_target_info (void);
++
++/* Target hooks for D language.  */
++#define TARGET_D_CPU_VERSIONS loongarch_d_target_versions
++#define TARGET_D_REGISTER_CPU_TARGET_INFO loongarch_d_register_target_info
+diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch
+index a1a40431f..994f4d19c 100644
+--- a/gcc/config/loongarch/t-loongarch
++++ b/gcc/config/loongarch/t-loongarch
+@@ -67,6 +67,10 @@ loongarch-cpu.o: $(srcdir)/config/loongarch/loongarch-cpu.cc $(LA_STR_H) \
+ loongarch-def.o: $(srcdir)/config/loongarch/loongarch-def.cc $(LA_STR_H)
+ 	$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+ 
++loongarch-d.o: $(srcdir)/config/loongarch/loongarch-d.cc
++	$(COMPILE) $<
++	$(POSTCOMPILE)
++
+ $(srcdir)/config/loongarch/loongarch.opt: s-loongarch-opt ; @true
+ s-loongarch-opt: $(srcdir)/config/loongarch/genopts/genstr.sh \
+ 	$(srcdir)/config/loongarch/genopts/loongarch.opt.in \
+diff --git a/libphobos/configure.tgt b/libphobos/configure.tgt
+index 0063dd232..dcb1551cd 100644
+--- a/libphobos/configure.tgt
++++ b/libphobos/configure.tgt
+@@ -36,6 +36,9 @@ case "${target}" in
+   hppa-*-linux*)
+ 	LIBPHOBOS_SUPPORTED=yes
+ 	;;
++  loongarch*-*-linux*)
++	LIBPHOBOS_SUPPORTED=yes
++	;;
+   mips*-*-linux*)
+ 	LIBPHOBOS_SUPPORTED=yes
+ 	;;
+diff --git a/libphobos/libdruntime/gcc/sections/elf.d b/libphobos/libdruntime/gcc/sections/elf.d
+index 5819811f3..bc993ea49 100644
+--- a/libphobos/libdruntime/gcc/sections/elf.d
++++ b/libphobos/libdruntime/gcc/sections/elf.d
+@@ -1061,6 +1061,8 @@ else version (MIPS64)
+     enum TLS_DTV_OFFSET = 0x8000;
+ else version (IBMZ_Any)
+     enum TLS_DTV_OFFSET = 0x0;
++else version (LoongArch64)
++    enum TLS_DTV_OFFSET = 0x0;
+ else
+     static assert( false, "Platform not supported." );
+ 
+diff --git a/libphobos/libdruntime/gcc/unwind/generic.d b/libphobos/libdruntime/gcc/unwind/generic.d
+index 929b75dc7..8e5db80e1 100644
+--- a/libphobos/libdruntime/gcc/unwind/generic.d
++++ b/libphobos/libdruntime/gcc/unwind/generic.d
+@@ -141,6 +141,7 @@ else version (SPARC64)  private enum __aligned__ = 16;
+ else version (SystemZ)  private enum __aligned__ = 8;
+ else version (X86)      private enum __aligned__ = 16;
+ else version (X86_64)   private enum __aligned__ = 16;
++else version (LoongArch64) private enum __aligned__ = 16;
+ else static assert( false, "Platform not supported.");
+ 
+ align(__aligned__) struct _Unwind_Exception
+-- 
+2.43.0
+
diff --git a/0074-libruntime-Add-fiber-context-switch-code-for-LoongAr.patch b/0074-libruntime-Add-fiber-context-switch-code-for-LoongAr.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7313fd56cba55fafa9d9849e85a145e0f8fd04c5
--- /dev/null
+++ b/0074-libruntime-Add-fiber-context-switch-code-for-LoongAr.patch
@@ -0,0 +1,156 @@
+From 29eade7dc3032c6054f2ec2e2caa4ce43da6212d Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Fri, 8 Dec 2023 18:09:41 +0800
+Subject: [PATCH 074/188] libruntime: Add fiber context switch code for
+ LoongArch.
+
+libphobos/ChangeLog:
+
+	* libdruntime/config/loongarch/switchcontext.S: New file.
+---
+ .../config/loongarch/switchcontext.S          | 133 ++++++++++++++++++
+ 1 file changed, 133 insertions(+)
+ create mode 100644 libphobos/libdruntime/config/loongarch/switchcontext.S
+
+diff --git a/libphobos/libdruntime/config/loongarch/switchcontext.S b/libphobos/libdruntime/config/loongarch/switchcontext.S
+new file mode 100644
+index 000000000..edfb9b67e
+--- /dev/null
++++ b/libphobos/libdruntime/config/loongarch/switchcontext.S
+@@ -0,0 +1,133 @@
++/* LoongArch support code for fibers and multithreading.
++   Copyright (C) 2023 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++Under Section 7 of GPL version 3, you are granted additional
++permissions described in the GCC Runtime Library Exception, version
++3.1, as published by the Free Software Foundation.
++
++You should have received a copy of the GNU General Public License and
++a copy of the GCC Runtime Library Exception along with this program;
++see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
++.  */
++
++#include "../common/threadasm.S"
++
++/**
++ * Performs a context switch.
++ *
++ * $a0 - void** - ptr to old stack pointer
++ * $a1 - void*  - new stack pointer
++ *
++ */
++
++#if defined(__loongarch_lp64)
++#  define GPR_L ld.d
++#  define GPR_S st.d
++#  define SZ_GPR 8
++#  define ADDSP(si)   addi.d  $sp, $sp, si
++#elif defined(__loongarch64_ilp32)
++#  define GPR_L ld.w
++#  define GPR_S st.w
++#  define SZ_GPR 4
++#  define ADDSP(si)   addi.w  $sp, $sp, si
++#else
++#  error Unsupported GPR size (must be 64-bit or 32-bit).
++#endif
++
++#if defined(__loongarch_double_float)
++#  define FPR_L fld.d
++#  define FPR_S fst.d
++#  define SZ_FPR 8
++#elif defined(__loongarch_single_float)
++#  define FPR_L fld.s
++#  define FPR_S fst.s
++#  define SZ_FPR 4
++#else
++#  define SZ_FPR 0
++#endif
++
++    .text
++    .align 2
++    .global fiber_switchContext
++    .type   fiber_switchContext, @function
++fiber_switchContext:
++    .cfi_startproc
++    ADDSP(-11 * SZ_GPR)
++
++    // fp regs and return address are stored below the stack
++    // because we don't want the GC to scan them.
++
++    // return address (r1)
++    GPR_S  $r1, $sp, -SZ_GPR
++
++#if SZ_FPR != 0
++    // callee-saved scratch FPRs (f24-f31)
++    FPR_S  $f24, $sp, -SZ_GPR-1*SZ_FPR
++    FPR_S  $f25, $sp, -SZ_GPR-2*SZ_FPR
++    FPR_S  $f26, $sp, -SZ_GPR-3*SZ_FPR
++    FPR_S  $f27, $sp, -SZ_GPR-4*SZ_FPR
++    FPR_S  $f28, $sp, -SZ_GPR-5*SZ_FPR
++    FPR_S  $f29, $sp, -SZ_GPR-6*SZ_FPR
++    FPR_S  $f30, $sp, -SZ_GPR-7*SZ_FPR
++    FPR_S  $f31, $sp, -SZ_GPR-8*SZ_FPR
++#endif
++
++    // callee-saved GPRs (r21, fp (r22), r23-r31)
++    GPR_S $r21, $sp, 0*SZ_GPR
++    GPR_S  $fp, $sp, 1*SZ_GPR
++    GPR_S  $s0, $sp, 2*SZ_GPR
++    GPR_S  $s1, $sp, 3*SZ_GPR
++    GPR_S  $s2, $sp, 4*SZ_GPR
++    GPR_S  $s3, $sp, 5*SZ_GPR
++    GPR_S  $s4, $sp, 6*SZ_GPR
++    GPR_S  $s5, $sp, 7*SZ_GPR
++    GPR_S  $s6, $sp, 8*SZ_GPR
++    GPR_S  $s7, $sp, 9*SZ_GPR
++    GPR_S  $s8, $sp, 10*SZ_GPR
++
++    // swap stack pointer
++    GPR_S $sp, $a0, 0
++    move $sp, $a1
++
++    GPR_L  $r1, $sp, -SZ_GPR
++
++#if SZ_FPR != 0
++    FPR_L  $f24, $sp, -SZ_GPR-1*SZ_FPR
++    FPR_L  $f25, $sp, -SZ_GPR-2*SZ_FPR
++    FPR_L  $f26, $sp, -SZ_GPR-3*SZ_FPR
++    FPR_L  $f27, $sp, -SZ_GPR-4*SZ_FPR
++    FPR_L  $f28, $sp, -SZ_GPR-5*SZ_FPR
++    FPR_L  $f29, $sp, -SZ_GPR-6*SZ_FPR
++    FPR_L  $f30, $sp, -SZ_GPR-7*SZ_FPR
++    FPR_L  $f31, $sp, -SZ_GPR-8*SZ_FPR
++#endif
++
++    GPR_L $r21, $sp, 0*SZ_GPR
++    GPR_L  $fp, $sp, 1*SZ_GPR
++    GPR_L  $s0, $sp, 2*SZ_GPR
++    GPR_L  $s1, $sp, 3*SZ_GPR
++    GPR_L  $s2, $sp, 4*SZ_GPR
++    GPR_L  $s3, $sp, 5*SZ_GPR
++    GPR_L  $s4, $sp, 6*SZ_GPR
++    GPR_L  $s5, $sp, 7*SZ_GPR
++    GPR_L  $s6, $sp, 8*SZ_GPR
++    GPR_L  $s7, $sp, 9*SZ_GPR
++    GPR_L  $s8, $sp, 10*SZ_GPR
++
++    ADDSP(11 * SZ_GPR)
++
++    jr     $r1 // return
++    .cfi_endproc
++    .size fiber_switchContext,.-fiber_switchContext
+-- 
+2.43.0
+
diff --git a/0075-LoongArch-Fix-FP-vector-comparsons-PR113034.patch b/0075-LoongArch-Fix-FP-vector-comparsons-PR113034.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b9b652cd412e56a46278be9f89fc56cf50481d7f
--- /dev/null
+++ b/0075-LoongArch-Fix-FP-vector-comparsons-PR113034.patch
@@ -0,0 +1,866 @@
+From dd33794e64d462bf39e72f39343a384c191307f4 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 17 Dec 2023 01:09:20 +0800
+Subject: [PATCH 075/188] LoongArch: Fix FP vector comparsons [PR113034]
+
+We had the following mappings between vfcmp submenmonics and RTX
+codes:
+
+    (define_code_attr fcc
+      [(unordered "cun")
+       (ordered   "cor")
+       (eq       "ceq")
+       (ne       "cne")
+       (uneq      "cueq")
+       (unle      "cule")
+       (unlt      "cult")
+       (le       "cle")
+       (lt       "clt")])
+
+This is inconsistent with scalar code:
+
+    (define_code_attr fcond [(unordered "cun")
+                             (uneq "cueq")
+                             (unlt "cult")
+                             (unle "cule")
+                             (eq "ceq")
+                             (lt "slt")
+                             (le "sle")
+                             (ordered "cor")
+                             (ltgt "sne")
+                             (ne "cune")
+                             (ge "sge")
+                             (gt "sgt")
+                             (unge "cuge")
+                             (ungt "cugt")])
+
+For every RTX code for which the LSX/LASX code is different from the
+scalar code, the scalar code is correct and the LSX/LASX code is wrong.
+Most seriously, the RTX code NE should be mapped to "cneq", not "cne".
+Rewrite vfcmp define_insns in simd.md using the same mapping as
+scalar fcmp.
+
+Note that GAS does not support [x]vfcmp.{c/s}[u]{ge/gt} (pseudo)
+instruction (although fcmp.{c/s}[u]{ge/gt} is supported), so we need to
+switch the order of inputs and use [x]vfcmp.{c/s}[u]{le/lt} instead.
+
+The vfcmp.{sult/sule/clt/cle}.{s/d} instructions do not have a single
+RTX code, but they can be modeled as an inversed RTX code following a
+"not" operation.  Doing so allows the compiler to optimized vectorized
+__builtin_isless etc. to a single instruction.  This optimization should
+be added for scalar code too and I'll do it later.
+
+Tests are added for mapping between C code, IEC 60559 operations, and
+vfcmp instructions.
+
+[1]:https://gcc.gnu.org/pipermail/gcc-patches/2023-December/640713.html
+
+gcc/ChangeLog:
+
+	PR target/113034
+	* config/loongarch/lasx.md (UNSPEC_LASX_XVFCMP_*): Remove.
+	(lasx_xvfcmp_caf_): Remove.
+	(lasx_xvfcmp_cune_): Remove.
+	(FSC256_UNS): Remove.
+	(fsc256): Remove.
+	(lasx_xvfcmp__): Remove.
+	(lasx_xvfcmp__): Remove.
+	* config/loongarch/lsx.md (UNSPEC_LSX_XVFCMP_*): Remove.
+	(lsx_vfcmp_caf_): Remove.
+	(lsx_vfcmp_cune_): Remove.
+	(vfcond): Remove.
+	(fcc): Remove.
+	(FSC_UNS): Remove.
+	(fsc): Remove.
+	(lsx_vfcmp__): Remove.
+	(lsx_vfcmp__): Remove.
+	* config/loongarch/simd.md
+	(fcond_simd): New define_code_iterator.
+	(_vfcmp__):
+	New define_insn.
+	(fcond_simd_rev): New define_code_iterator.
+	(fcond_rev_asm): New define_code_attr.
+	(_vfcmp__):
+	New define_insn.
+	(fcond_inv): New define_code_iterator.
+	(fcond_inv_rev): New define_code_iterator.
+	(fcond_inv_rev_asm): New define_code_attr.
+	(_vfcmp__): New define_insn.
+	(_vfcmp__):
+	New define_insn.
+	(UNSPEC_SIMD_FCMP_CAF, UNSPEC_SIMD_FCMP_SAF,
+	UNSPEC_SIMD_FCMP_SEQ, UNSPEC_SIMD_FCMP_SUN,
+	UNSPEC_SIMD_FCMP_SUEQ, UNSPEC_SIMD_FCMP_CNE,
+	UNSPEC_SIMD_FCMP_SOR, UNSPEC_SIMD_FCMP_SUNE): New unspecs.
+	(SIMD_FCMP): New define_int_iterator.
+	(fcond_unspec): New define_int_attr.
+	(_vfcmp__): New define_insn.
+	* config/loongarch/loongarch.cc (loongarch_expand_lsx_cmp):
+	Remove unneeded special cases.
+
+gcc/testsuite/ChangeLog:
+
+	PR target/113034
+	* gcc.target/loongarch/vfcmp-f.c: New test.
+	* gcc.target/loongarch/vfcmp-d.c: New test.
+	* gcc.target/loongarch/xvfcmp-f.c: New test.
+	* gcc.target/loongarch/xvfcmp-d.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-vcond-2.c: Scan for cune
+	instead of cne.
+	* gcc.target/loongarch/vector/lsx/lsx-vcond-2.c: Likewise.
+---
+ gcc/config/loongarch/lasx.md                  |  76 --------
+ gcc/config/loongarch/loongarch.cc             |  60 +-----
+ gcc/config/loongarch/lsx.md                   |  83 --------
+ gcc/config/loongarch/simd.md                  | 118 ++++++++++++
+ .../loongarch/vector/lasx/lasx-vcond-2.c      |   4 +-
+ .../loongarch/vector/lsx/lsx-vcond-2.c        |   4 +-
+ gcc/testsuite/gcc.target/loongarch/vfcmp-d.c  |  28 +++
+ gcc/testsuite/gcc.target/loongarch/vfcmp-f.c  | 178 ++++++++++++++++++
+ gcc/testsuite/gcc.target/loongarch/xvfcmp-d.c |  29 +++
+ gcc/testsuite/gcc.target/loongarch/xvfcmp-f.c |  27 +++
+ 10 files changed, 385 insertions(+), 222 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vfcmp-d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vfcmp-f.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/xvfcmp-d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/xvfcmp-f.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index eeac8cd98..921ce0eeb 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -32,9 +32,7 @@
+   UNSPEC_LASX_XVBITREVI
+   UNSPEC_LASX_XVBITSET
+   UNSPEC_LASX_XVBITSETI
+-  UNSPEC_LASX_XVFCMP_CAF
+   UNSPEC_LASX_XVFCLASS
+-  UNSPEC_LASX_XVFCMP_CUNE
+   UNSPEC_LASX_XVFCVT
+   UNSPEC_LASX_XVFCVTH
+   UNSPEC_LASX_XVFCVTL
+@@ -44,17 +42,6 @@
+   UNSPEC_LASX_XVFRINT
+   UNSPEC_LASX_XVFRSQRT
+   UNSPEC_LASX_XVFRSQRTE
+-  UNSPEC_LASX_XVFCMP_SAF
+-  UNSPEC_LASX_XVFCMP_SEQ
+-  UNSPEC_LASX_XVFCMP_SLE
+-  UNSPEC_LASX_XVFCMP_SLT
+-  UNSPEC_LASX_XVFCMP_SNE
+-  UNSPEC_LASX_XVFCMP_SOR
+-  UNSPEC_LASX_XVFCMP_SUEQ
+-  UNSPEC_LASX_XVFCMP_SULE
+-  UNSPEC_LASX_XVFCMP_SULT
+-  UNSPEC_LASX_XVFCMP_SUN
+-  UNSPEC_LASX_XVFCMP_SUNE
+   UNSPEC_LASX_XVFTINT_U
+   UNSPEC_LASX_XVCLO
+   UNSPEC_LASX_XVSAT_S
+@@ -1481,69 +1468,6 @@
+   [(set_attr "type" "simd_fclass")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lasx_xvfcmp_caf_"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(unspec: [(match_operand:FLASX 1 "register_operand" "f")
+-			     (match_operand:FLASX 2 "register_operand" "f")]
+-			    UNSPEC_LASX_XVFCMP_CAF))]
+-  "ISA_HAS_LASX"
+-  "xvfcmp.caf.\t%u0,%u1,%u2"
+-  [(set_attr "type" "simd_fcmp")
+-   (set_attr "mode" "")])
+-
+-(define_insn "lasx_xvfcmp_cune_"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(unspec: [(match_operand:FLASX 1 "register_operand" "f")
+-			     (match_operand:FLASX 2 "register_operand" "f")]
+-			    UNSPEC_LASX_XVFCMP_CUNE))]
+-  "ISA_HAS_LASX"
+-  "xvfcmp.cune.\t%u0,%u1,%u2"
+-  [(set_attr "type" "simd_fcmp")
+-   (set_attr "mode" "")])
+-
+-
+-
+-(define_int_iterator FSC256_UNS [UNSPEC_LASX_XVFCMP_SAF UNSPEC_LASX_XVFCMP_SUN
+-				 UNSPEC_LASX_XVFCMP_SOR UNSPEC_LASX_XVFCMP_SEQ
+-				 UNSPEC_LASX_XVFCMP_SNE UNSPEC_LASX_XVFCMP_SUEQ
+-				 UNSPEC_LASX_XVFCMP_SUNE UNSPEC_LASX_XVFCMP_SULE
+-				 UNSPEC_LASX_XVFCMP_SULT UNSPEC_LASX_XVFCMP_SLE
+-				 UNSPEC_LASX_XVFCMP_SLT])
+-
+-(define_int_attr fsc256
+-  [(UNSPEC_LASX_XVFCMP_SAF  "saf")
+-   (UNSPEC_LASX_XVFCMP_SUN  "sun")
+-   (UNSPEC_LASX_XVFCMP_SOR  "sor")
+-   (UNSPEC_LASX_XVFCMP_SEQ  "seq")
+-   (UNSPEC_LASX_XVFCMP_SNE  "sne")
+-   (UNSPEC_LASX_XVFCMP_SUEQ "sueq")
+-   (UNSPEC_LASX_XVFCMP_SUNE "sune")
+-   (UNSPEC_LASX_XVFCMP_SULE "sule")
+-   (UNSPEC_LASX_XVFCMP_SULT "sult")
+-   (UNSPEC_LASX_XVFCMP_SLE  "sle")
+-   (UNSPEC_LASX_XVFCMP_SLT  "slt")])
+-
+-(define_insn "lasx_xvfcmp__"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(vfcond: (match_operand:FLASX 1 "register_operand" "f")
+-			    (match_operand:FLASX 2 "register_operand" "f")))]
+-  "ISA_HAS_LASX"
+-  "xvfcmp..\t%u0,%u1,%u2"
+-  [(set_attr "type" "simd_fcmp")
+-   (set_attr "mode" "")])
+-
+-
+-(define_insn "lasx_xvfcmp__"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(unspec: [(match_operand:FLASX 1 "register_operand" "f")
+-			     (match_operand:FLASX 2 "register_operand" "f")]
+-			    FSC256_UNS))]
+-  "ISA_HAS_LASX"
+-  "xvfcmp..\t%u0,%u1,%u2"
+-  [(set_attr "type" "simd_fcmp")
+-   (set_attr "mode" "")])
+-
+-
+ (define_mode_attr fint256
+   [(V8SF "v8si")
+    (V4DF "v4di")])
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index afbb55390..a22601d88 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -11156,7 +11156,6 @@ static void
+ loongarch_expand_lsx_cmp (rtx dest, enum rtx_code cond, rtx op0, rtx op1)
+ {
+   machine_mode cmp_mode = GET_MODE (op0);
+-  int unspec = -1;
+   bool negate = false;
+ 
+   switch (cmp_mode)
+@@ -11198,66 +11197,9 @@ loongarch_expand_lsx_cmp (rtx dest, enum rtx_code cond, rtx op0, rtx op1)
+ 
+     case E_V4SFmode:
+     case E_V2DFmode:
+-      switch (cond)
+-	{
+-	case UNORDERED:
+-	case ORDERED:
+-	case EQ:
+-	case NE:
+-	case UNEQ:
+-	case UNLE:
+-	case UNLT:
+-	  break;
+-	case LTGT: cond = NE; break;
+-	case UNGE: cond = UNLE; std::swap (op0, op1); break;
+-	case UNGT: cond = UNLT; std::swap (op0, op1); break;
+-	case LE: unspec = UNSPEC_LSX_VFCMP_SLE; break;
+-	case LT: unspec = UNSPEC_LSX_VFCMP_SLT; break;
+-	case GE: unspec = UNSPEC_LSX_VFCMP_SLE; std::swap (op0, op1); break;
+-	case GT: unspec = UNSPEC_LSX_VFCMP_SLT; std::swap (op0, op1); break;
+-	default:
+-		 gcc_unreachable ();
+-	}
+-      if (unspec < 0)
+-	loongarch_emit_binary (cond, dest, op0, op1);
+-      else
+-	{
+-	  rtx x = gen_rtx_UNSPEC (GET_MODE (dest),
+-				  gen_rtvec (2, op0, op1), unspec);
+-	  emit_insn (gen_rtx_SET (dest, x));
+-	}
+-      break;
+-
+     case E_V8SFmode:
+     case E_V4DFmode:
+-      switch (cond)
+-	{
+-	case UNORDERED:
+-	case ORDERED:
+-	case EQ:
+-	case NE:
+-	case UNEQ:
+-	case UNLE:
+-	case UNLT:
+-	  break;
+-	case LTGT: cond = NE; break;
+-	case UNGE: cond = UNLE; std::swap (op0, op1); break;
+-	case UNGT: cond = UNLT; std::swap (op0, op1); break;
+-	case LE: unspec = UNSPEC_LASX_XVFCMP_SLE; break;
+-	case LT: unspec = UNSPEC_LASX_XVFCMP_SLT; break;
+-	case GE: unspec = UNSPEC_LASX_XVFCMP_SLE; std::swap (op0, op1); break;
+-	case GT: unspec = UNSPEC_LASX_XVFCMP_SLT; std::swap (op0, op1); break;
+-	default:
+-		 gcc_unreachable ();
+-	}
+-      if (unspec < 0)
+-	loongarch_emit_binary (cond, dest, op0, op1);
+-      else
+-	{
+-	  rtx x = gen_rtx_UNSPEC (GET_MODE (dest),
+-				  gen_rtvec (2, op0, op1), unspec);
+-	  emit_insn (gen_rtx_SET (dest, x));
+-	}
++      loongarch_emit_binary (cond, dest, op0, op1);
+       break;
+ 
+     default:
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index dbdb42301..57e0ee3d4 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -34,9 +34,7 @@
+   UNSPEC_LSX_VBITSETI
+   UNSPEC_LSX_BRANCH_V
+   UNSPEC_LSX_BRANCH
+-  UNSPEC_LSX_VFCMP_CAF
+   UNSPEC_LSX_VFCLASS
+-  UNSPEC_LSX_VFCMP_CUNE
+   UNSPEC_LSX_VFCVT
+   UNSPEC_LSX_VFCVTH
+   UNSPEC_LSX_VFCVTL
+@@ -46,17 +44,6 @@
+   UNSPEC_LSX_VFRINT
+   UNSPEC_LSX_VFRSQRT
+   UNSPEC_LSX_VFRSQRTE
+-  UNSPEC_LSX_VFCMP_SAF
+-  UNSPEC_LSX_VFCMP_SEQ
+-  UNSPEC_LSX_VFCMP_SLE
+-  UNSPEC_LSX_VFCMP_SLT
+-  UNSPEC_LSX_VFCMP_SNE
+-  UNSPEC_LSX_VFCMP_SOR
+-  UNSPEC_LSX_VFCMP_SUEQ
+-  UNSPEC_LSX_VFCMP_SULE
+-  UNSPEC_LSX_VFCMP_SULT
+-  UNSPEC_LSX_VFCMP_SUN
+-  UNSPEC_LSX_VFCMP_SUNE
+   UNSPEC_LSX_VFTINT_U
+   UNSPEC_LSX_VSAT_S
+   UNSPEC_LSX_VSAT_U
+@@ -1377,76 +1364,6 @@
+   [(set_attr "type" "simd_fclass")
+    (set_attr "mode" "")])
+ 
+-(define_insn "lsx_vfcmp_caf_"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(unspec: [(match_operand:FLSX 1 "register_operand" "f")
+-			  (match_operand:FLSX 2 "register_operand" "f")]
+-			 UNSPEC_LSX_VFCMP_CAF))]
+-  "ISA_HAS_LSX"
+-  "vfcmp.caf.\t%w0,%w1,%w2"
+-  [(set_attr "type" "simd_fcmp")
+-   (set_attr "mode" "")])
+-
+-(define_insn "lsx_vfcmp_cune_"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(unspec: [(match_operand:FLSX 1 "register_operand" "f")
+-			  (match_operand:FLSX 2 "register_operand" "f")]
+-			 UNSPEC_LSX_VFCMP_CUNE))]
+-  "ISA_HAS_LSX"
+-  "vfcmp.cune.\t%w0,%w1,%w2"
+-  [(set_attr "type" "simd_fcmp")
+-   (set_attr "mode" "")])
+-
+-(define_code_iterator vfcond [unordered ordered eq ne le lt uneq unle unlt])
+-
+-(define_code_attr fcc
+-  [(unordered "cun")
+-   (ordered   "cor")
+-   (eq	      "ceq")
+-   (ne	      "cne")
+-   (uneq      "cueq")
+-   (unle      "cule")
+-   (unlt      "cult")
+-   (le	      "cle")
+-   (lt	      "clt")])
+-
+-(define_int_iterator FSC_UNS [UNSPEC_LSX_VFCMP_SAF UNSPEC_LSX_VFCMP_SUN UNSPEC_LSX_VFCMP_SOR
+-			      UNSPEC_LSX_VFCMP_SEQ UNSPEC_LSX_VFCMP_SNE UNSPEC_LSX_VFCMP_SUEQ
+-			      UNSPEC_LSX_VFCMP_SUNE UNSPEC_LSX_VFCMP_SULE UNSPEC_LSX_VFCMP_SULT
+-			      UNSPEC_LSX_VFCMP_SLE UNSPEC_LSX_VFCMP_SLT])
+-
+-(define_int_attr fsc
+-  [(UNSPEC_LSX_VFCMP_SAF  "saf")
+-   (UNSPEC_LSX_VFCMP_SUN  "sun")
+-   (UNSPEC_LSX_VFCMP_SOR  "sor")
+-   (UNSPEC_LSX_VFCMP_SEQ  "seq")
+-   (UNSPEC_LSX_VFCMP_SNE  "sne")
+-   (UNSPEC_LSX_VFCMP_SUEQ "sueq")
+-   (UNSPEC_LSX_VFCMP_SUNE "sune")
+-   (UNSPEC_LSX_VFCMP_SULE "sule")
+-   (UNSPEC_LSX_VFCMP_SULT "sult")
+-   (UNSPEC_LSX_VFCMP_SLE  "sle")
+-   (UNSPEC_LSX_VFCMP_SLT  "slt")])
+-
+-(define_insn "lsx_vfcmp__"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(vfcond: (match_operand:FLSX 1 "register_operand" "f")
+-			 (match_operand:FLSX 2 "register_operand" "f")))]
+-  "ISA_HAS_LSX"
+-  "vfcmp..\t%w0,%w1,%w2"
+-  [(set_attr "type" "simd_fcmp")
+-   (set_attr "mode" "")])
+-
+-(define_insn "lsx_vfcmp__"
+-  [(set (match_operand: 0 "register_operand" "=f")
+-	(unspec: [(match_operand:FLSX 1 "register_operand" "f")
+-			  (match_operand:FLSX 2 "register_operand" "f")]
+-			 FSC_UNS))]
+-  "ISA_HAS_LSX"
+-  "vfcmp..\t%w0,%w1,%w2"
+-  [(set_attr "type" "simd_fcmp")
+-   (set_attr "mode" "")])
+-
+ (define_mode_attr fint
+   [(V4SF "v4si")
+    (V2DF "v2di")])
+diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
+index 843b1a41f..13202f79b 100644
+--- a/gcc/config/loongarch/simd.md
++++ b/gcc/config/loongarch/simd.md
+@@ -279,6 +279,124 @@
+   [(set_attr "type" "simd_int_arith")
+    (set_attr "mode" "")])
+ 
++;; vfcmp.*.{s/d} with defined RTX code
++;; There are no fcmp.{sugt/suge/cgt/cge}.{s/d} menmonics in GAS, so we have
++;; to reverse the operands ourselves :(.
++(define_code_iterator fcond_simd [unordered uneq unlt unle eq lt le
++				  ordered ltgt ne])
++(define_insn "_vfcmp__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(fcond_simd:
++	  (match_operand:FVEC 1 "register_operand" "f")
++	  (match_operand:FVEC 2 "register_operand" "f")))]
++  ""
++  "vfcmp..\t%0,%1,%2"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++;; There are no fcmp.{sge/sgt/cuge/cugt}.{s/d} menmonics in GAS, so we have
++;; to reverse the operands ourselves.
++(define_code_iterator fcond_simd_rev [ge gt unge ungt])
++
++(define_code_attr fcond_rev_asm
++  [(ge		"sle")
++   (gt		"slt")
++   (unge	"cule")
++   (ungt	"cult")])
++
++(define_insn "_vfcmp__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(fcond_simd_rev:
++	  (match_operand:FVEC 1 "register_operand" "f")
++	  (match_operand:FVEC 2 "register_operand" "f")))]
++  ""
++  "vfcmp..\t%0,%2,%1";
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++;; vfcmp.*.{s/d} without defined RTX code, but with defined RTX code for
++;; its inverse.  Again, there are no fcmp.{sugt/suge/cgt/cge}.{s/d}
++;; menmonics in GAS, so we have to reverse the operands ourselves.
++(define_code_iterator fcond_inv [ge gt unge ungt])
++(define_code_iterator fcond_inv_rev [le lt unle unlt])
++(define_code_attr fcond_inv
++  [(ge		"sult")
++   (gt		"sule")
++   (unge	"clt")
++   (ungt	"cle")
++   (le		"sugt")
++   (lt		"suge")
++   (unle	"cgt")
++   (unlt	"cge")])
++(define_code_attr fcond_inv_rev_asm
++  [(le		"sult")
++   (lt		"sule")
++   (unle	"clt")
++   (unlt	"cle")])
++
++(define_insn "_vfcmp__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(not:
++	  (fcond_inv:
++	    (match_operand:FVEC 1 "register_operand" "f")
++	    (match_operand:FVEC 2 "register_operand" "f"))))]
++  ""
++  "vfcmp..\t%0,%1,%2"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++(define_insn "_vfcmp__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(not:
++	  (fcond_inv_rev:
++	    (match_operand:FVEC 1 "register_operand" "f")
++	    (match_operand:FVEC 2 "register_operand" "f"))))]
++  ""
++  "vfcmp..\t%0,%2,%1"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++;; vfcmp.*.{s/d} instructions only as instrinsics
++(define_c_enum "unspec"
++  [UNSPEC_SIMD_FCMP_CAF
++   UNSPEC_SIMD_FCMP_SAF
++   UNSPEC_SIMD_FCMP_SEQ
++   UNSPEC_SIMD_FCMP_SUN
++   UNSPEC_SIMD_FCMP_SUEQ
++   UNSPEC_SIMD_FCMP_CNE
++   UNSPEC_SIMD_FCMP_SOR
++   UNSPEC_SIMD_FCMP_SUNE])
++
++(define_int_iterator SIMD_FCMP
++  [UNSPEC_SIMD_FCMP_CAF
++   UNSPEC_SIMD_FCMP_SAF
++   UNSPEC_SIMD_FCMP_SEQ
++   UNSPEC_SIMD_FCMP_SUN
++   UNSPEC_SIMD_FCMP_SUEQ
++   UNSPEC_SIMD_FCMP_CNE
++   UNSPEC_SIMD_FCMP_SOR
++   UNSPEC_SIMD_FCMP_SUNE])
++
++(define_int_attr fcond_unspec
++  [(UNSPEC_SIMD_FCMP_CAF	"caf")
++   (UNSPEC_SIMD_FCMP_SAF	"saf")
++   (UNSPEC_SIMD_FCMP_SEQ	"seq")
++   (UNSPEC_SIMD_FCMP_SUN	"sun")
++   (UNSPEC_SIMD_FCMP_SUEQ	"sueq")
++   (UNSPEC_SIMD_FCMP_CNE	"cne")
++   (UNSPEC_SIMD_FCMP_SOR	"sor")
++   (UNSPEC_SIMD_FCMP_SUNE	"sune")])
++
++(define_insn "_vfcmp__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FVEC 1 "register_operand" "f")
++			  (match_operand:FVEC 2 "register_operand" "f")]
++			 SIMD_FCMP))]
++  ""
++  "vfcmp..\t%0,%1,%2"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
+ ; The LoongArch SX Instructions.
+ (include "lsx.md")
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c
+index 55d5a084c..f2f523622 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vcond-2.c
+@@ -69,8 +69,8 @@ TEST_CMP (nugt)
+ 
+ /* { dg-final { scan-assembler-times {\txvfcmp\.ceq\.s} 3 } } */
+ /* { dg-final { scan-assembler-times {\txvfcmp\.ceq\.d} 3 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cne\.s} 3 } } */
+-/* { dg-final { scan-assembler-times {\txvfcmp\.cne\.d} 3 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cune\.s} 3 } } */
++/* { dg-final { scan-assembler-times {\txvfcmp\.cune\.d} 3 } } */
+ /* { dg-final { scan-assembler-times {\txvfcmp\.slt\.s} 6 } } */
+ /* { dg-final { scan-assembler-times {\txvfcmp\.slt\.d} 6 } } */
+ /* { dg-final { scan-assembler-times {\txvfcmp\.sle\.s} 6 } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c
+index 2214afd0a..486bedba4 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vcond-2.c
+@@ -69,8 +69,8 @@ TEST_CMP (nugt)
+ 
+ /* { dg-final { scan-assembler-times {\tvfcmp\.ceq\.s} 3 } } */
+ /* { dg-final { scan-assembler-times {\tvfcmp\.ceq\.d} 3 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cne\.s} 3 } } */
+-/* { dg-final { scan-assembler-times {\tvfcmp\.cne\.d} 3 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cune\.s} 3 } } */
++/* { dg-final { scan-assembler-times {\tvfcmp\.cune\.d} 3 } } */
+ /* { dg-final { scan-assembler-times {\tvfcmp\.slt\.s} 6 } } */
+ /* { dg-final { scan-assembler-times {\tvfcmp\.slt\.d} 6 } } */
+ /* { dg-final { scan-assembler-times {\tvfcmp\.sle\.s} 6 } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/vfcmp-d.c b/gcc/testsuite/gcc.target/loongarch/vfcmp-d.c
+new file mode 100644
+index 000000000..8b870ef38
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vfcmp-d.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx -ffixed-f0 -ffixed-f1 -ffixed-f2 -fno-vect-cost-model" } */
++
++#define F double
++#define I long long
++
++#include "vfcmp-f.c"
++
++/* { dg-final { scan-assembler "compare_quiet_equal:.*\tvfcmp\\.ceq\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_equal:.*\tvfcmp\\.cune\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_not_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater:.*\tvfcmp\\.slt\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater_equal:.*\tvfcmp\\.sle\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less:.*\tvfcmp\\.slt\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less_equal:.*\tvfcmp\\.sle\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_not_greater:.*\tvfcmp\\.sule\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_not_greater\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less_unordered:.*\tvfcmp\\.sult\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_not_less:.*\tvfcmp\\.sule\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_not_less\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater_unordered:.*\tvfcmp\\.sult\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less:.*\tvfcmp\\.clt\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less_equal:.*\tvfcmp\\.cle\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater:.*\tvfcmp\\.clt\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater_equal:.*\tvfcmp\\.cle\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_less:.*\tvfcmp\\.cule\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_not_less\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater_unordered:.*\tvfcmp\\.cult\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_greater:.*\tvfcmp\\.cule\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_not_greater\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less_unordered:.*\tvfcmp\\.cult\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_unordered:.*\tvfcmp\\.cun\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_ordered:.*\tvfcmp\\.cor\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_ordered\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/vfcmp-f.c b/gcc/testsuite/gcc.target/loongarch/vfcmp-f.c
+new file mode 100644
+index 000000000..b9110b90c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vfcmp-f.c
+@@ -0,0 +1,178 @@
++/* Test mapping IEC 60559 operations to SIMD instructions.
++   For details read C23 Annex F.3 and LoongArch Vol. 1 section 3.2.2.1.  */
++
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx -ffixed-f0 -ffixed-f1 -ffixed-f2 -fno-vect-cost-model" } */
++
++#ifndef F
++#define F float
++#endif
++
++#ifndef I
++#define I int
++#endif
++
++#ifndef VL
++#define VL 16
++#endif
++
++typedef F VF __attribute__ ((vector_size (VL)));
++typedef I VI __attribute__ ((vector_size (VL)));
++
++register VF a asm ("f0");
++register VF b asm ("f1");
++register VI c asm ("f2");
++
++void
++compare_quiet_equal (void)
++{
++  c = (a == b);
++}
++
++void
++compare_quiet_not_equal (void)
++{
++  c = (a != b);
++}
++
++void
++compare_signaling_greater (void)
++{
++  c = (a > b);
++}
++
++void
++compare_signaling_greater_equal (void)
++{
++  c = (a >= b);
++}
++
++void
++compare_signaling_less (void)
++{
++  c = (a < b);
++}
++
++void
++compare_signaling_less_equal (void)
++{
++  c = (a <= b);
++}
++
++void
++compare_signaling_not_greater (void)
++{
++  c = ~(a > b);
++}
++
++void
++compare_signaling_less_unordered (void)
++{
++  c = ~(a >= b);
++}
++
++void
++compare_signaling_not_less (void)
++{
++  c = ~(a < b);
++}
++
++void
++compare_signaling_greater_unordered (void)
++{
++  c = ~(a <= b);
++}
++
++void
++compare_quiet_less (void)
++{
++  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
++    c[i] = __builtin_isless (a[i], b[i]) ? -1 : 0;
++}
++
++void
++compare_quiet_less_equal (void)
++{
++  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
++    c[i] = __builtin_islessequal (a[i], b[i]) ? -1 : 0;
++}
++
++void
++compare_quiet_greater (void)
++{
++  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
++    c[i] = __builtin_isgreater (a[i], b[i]) ? -1 : 0;
++}
++
++void
++compare_quiet_greater_equal (void)
++{
++  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
++    c[i] = __builtin_isgreaterequal (a[i], b[i]) ? -1 : 0;
++}
++
++void
++compare_quiet_not_less (void)
++{
++  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
++    c[i] = __builtin_isless (a[i], b[i]) ? 0 : -1;
++}
++
++void
++compare_quiet_greater_unordered (void)
++{
++  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
++    c[i] = __builtin_islessequal (a[i], b[i]) ? 0 : -1;
++}
++
++void
++compare_quiet_not_greater (void)
++{
++  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
++    c[i] = __builtin_isgreater (a[i], b[i]) ? 0 : -1;
++}
++
++void
++compare_quiet_less_unordered (void)
++{
++  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
++    c[i] = __builtin_isgreaterequal (a[i], b[i]) ? 0 : -1;
++}
++
++void
++compare_quiet_unordered (void)
++{
++  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
++    c[i] = __builtin_isunordered (a[i], b[i]) ? -1 : 0;
++}
++
++void
++compare_quiet_ordered (void)
++{
++  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
++    c[i] = __builtin_isunordered (a[i], b[i]) ? 0 : -1;
++}
++
++/* The "-" matches the .size directive after the function
++   body, so we can ensure the instruction is in the correct function.  */
++
++/* { dg-final { scan-assembler "compare_quiet_equal:.*\tvfcmp\\.ceq\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_equal:.*\tvfcmp\\.cune\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_not_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater:.*\tvfcmp\\.slt\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater_equal:.*\tvfcmp\\.sle\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less:.*\tvfcmp\\.slt\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less_equal:.*\tvfcmp\\.sle\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_not_greater:.*\tvfcmp\\.sule\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_not_greater\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less_unordered:.*\tvfcmp\\.sult\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_not_less:.*\tvfcmp\\.sule\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_not_less\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater_unordered:.*\tvfcmp\\.sult\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less:.*\tvfcmp\\.clt\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less_equal:.*\tvfcmp\\.cle\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater:.*\tvfcmp\\.clt\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater_equal:.*\tvfcmp\\.cle\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_less:.*\tvfcmp\\.cule\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_not_less\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater_unordered:.*\tvfcmp\\.cult\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_greater:.*\tvfcmp\\.cule\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_not_greater\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less_unordered:.*\tvfcmp\\.cult\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_unordered:.*\tvfcmp\\.cun\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_ordered:.*\tvfcmp\\.cor\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_ordered\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/xvfcmp-d.c b/gcc/testsuite/gcc.target/loongarch/xvfcmp-d.c
+new file mode 100644
+index 000000000..d8017caaa
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/xvfcmp-d.c
+@@ -0,0 +1,29 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlasx -ffixed-f0 -ffixed-f1 -ffixed-f2 -fno-vect-cost-model" } */
++
++#define F double
++#define I long long
++#define VL 32
++
++#include "vfcmp-f.c"
++
++/* { dg-final { scan-assembler "compare_quiet_equal:.*\txvfcmp\\.ceq\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_equal:.*\txvfcmp\\.cune\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_not_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater:.*\txvfcmp\\.slt\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater_equal:.*\txvfcmp\\.sle\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less:.*\txvfcmp\\.slt\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less_equal:.*\txvfcmp\\.sle\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_not_greater:.*\txvfcmp\\.sule\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_not_greater\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less_unordered:.*\txvfcmp\\.sult\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_not_less:.*\txvfcmp\\.sule\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_not_less\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater_unordered:.*\txvfcmp\\.sult\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less:.*\txvfcmp\\.clt\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less_equal:.*\txvfcmp\\.cle\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater:.*\txvfcmp\\.clt\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater_equal:.*\txvfcmp\\.cle\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_less:.*\txvfcmp\\.cule\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_not_less\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater_unordered:.*\txvfcmp\\.cult\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_greater:.*\txvfcmp\\.cule\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_not_greater\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less_unordered:.*\txvfcmp\\.cult\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_unordered:.*\txvfcmp\\.cun\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_ordered:.*\txvfcmp\\.cor\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_ordered\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/xvfcmp-f.c b/gcc/testsuite/gcc.target/loongarch/xvfcmp-f.c
+new file mode 100644
+index 000000000..b54556475
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/xvfcmp-f.c
+@@ -0,0 +1,27 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlasx -ffixed-f0 -ffixed-f1 -ffixed-f2" } */
++
++#define VL 32
++
++#include "vfcmp-f.c"
++
++/* { dg-final { scan-assembler "compare_quiet_equal:.*\txvfcmp\\.ceq\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_equal:.*\txvfcmp\\.cune\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_not_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater:.*\txvfcmp\\.slt\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater_equal:.*\txvfcmp\\.sle\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less:.*\txvfcmp\\.slt\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less_equal:.*\txvfcmp\\.sle\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less_equal\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_not_greater:.*\txvfcmp\\.sule\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_not_greater\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_less_unordered:.*\txvfcmp\\.sult\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_not_less:.*\txvfcmp\\.sule\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_not_less\n" } } */
++/* { dg-final { scan-assembler "compare_signaling_greater_unordered:.*\txvfcmp\\.sult\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less:.*\txvfcmp\\.clt\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less_equal:.*\txvfcmp\\.cle\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater:.*\txvfcmp\\.clt\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater_equal:.*\txvfcmp\\.cle\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater_equal\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_less:.*\txvfcmp\\.cule\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_not_less\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_greater_unordered:.*\txvfcmp\\.cult\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_not_greater:.*\txvfcmp\\.cule\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_not_greater\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_less_unordered:.*\txvfcmp\\.cult\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_unordered:.*\txvfcmp\\.cun\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_unordered\n" } } */
++/* { dg-final { scan-assembler "compare_quiet_ordered:.*\txvfcmp\\.cor\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_ordered\n" } } */
+-- 
+2.43.0
+
diff --git a/0076-LoongArch-Use-force_reg-instead-of-gen_reg_rtx-emit_.patch b/0076-LoongArch-Use-force_reg-instead-of-gen_reg_rtx-emit_.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7e9e742dbb69796b34d94350f0e6fe444878a854
--- /dev/null
+++ b/0076-LoongArch-Use-force_reg-instead-of-gen_reg_rtx-emit_.patch
@@ -0,0 +1,190 @@
+From be149d7f6527df6b16f3f9f8aec1e488466a71f1 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 19 Dec 2023 04:48:03 +0800
+Subject: [PATCH 076/188] LoongArch: Use force_reg instead of gen_reg_rtx +
+ emit_move_insn in vec_init expander [PR113033]
+
+Jakub says:
+
+    Then that seems like a bug in the loongarch vec_init pattern(s).
+    Those really don't have a predicate in any of the backends on the
+    input operand, so they need to force_reg it if it is something it
+    can't handle. I've looked e.g. at i386 vec_init and that is exactly
+    what it does, see the various tests + force_reg calls in
+    ix86_expand_vector_init*.
+
+So replace gen_reg_rtx + emit_move_insn with force_reg to fix PR 113033.
+
+gcc/ChangeLog:
+
+	PR target/113033
+	* config/loongarch/loongarch.cc
+	(loongarch_expand_vector_init_same): Replace gen_reg_rtx +
+	emit_move_insn with force_reg.
+	(loongarch_expand_vector_init): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	PR target/113033
+	* gcc.target/loongarch/pr113033.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc             | 38 ++++++-------------
+ gcc/testsuite/gcc.target/loongarch/pr113033.c | 23 +++++++++++
+ 2 files changed, 35 insertions(+), 26 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/pr113033.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index a22601d88..000d2d623 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -10745,7 +10745,7 @@ loongarch_expand_vector_init_same (rtx target, rtx vals, unsigned nvar)
+ 	  gcc_unreachable ();
+ 	}
+     }
+-  temp = gen_reg_rtx (imode);
++
+   if (imode == GET_MODE (same))
+     temp2 = same;
+   else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD)
+@@ -10770,7 +10770,8 @@ loongarch_expand_vector_init_same (rtx target, rtx vals, unsigned nvar)
+       else
+ 	temp2 = lowpart_subreg (imode, same, GET_MODE (same));
+     }
+-  emit_move_insn (temp, temp2);
++
++  temp = force_reg (imode, temp2);
+ 
+   switch (vmode)
+     {
+@@ -10992,35 +10993,29 @@ loongarch_expand_vector_init (rtx target, rtx vals)
+ 			 to reduce the number of instructions.  */
+ 		      if (i == 1)
+ 			{
+-			  op0 = gen_reg_rtx (imode);
+-			  emit_move_insn (op0, val_hi[0]);
+-			  op1 = gen_reg_rtx (imode);
+-			  emit_move_insn (op1, val_hi[1]);
++			  op0 = force_reg (imode, val_hi[0]);
++			  op1 = force_reg (imode, val_hi[1]);
+ 			  emit_insn (
+ 			    loongarch_vec_repl2_256 (target_hi, op0, op1));
+ 			}
+ 		      else if (i > 1)
+ 			{
+-			  op0 = gen_reg_rtx (imode);
+-			  emit_move_insn (op0, val_hi[i]);
++			  op0 = force_reg (imode, val_hi[i]);
+ 			  emit_insn (
+ 			    loongarch_vec_set256 (target_hi, op0, GEN_INT (i)));
+ 			}
+ 		    }
+ 		  else
+ 		    {
++		      op0 = force_reg (imode, val_hi[i]);
+ 		      /* Assign the lowest element of val_hi to all elements
+ 			 of target_hi.  */
+ 		      if (i == 0)
+ 			{
+-			  op0 = gen_reg_rtx (imode);
+-			  emit_move_insn (op0, val_hi[0]);
+ 			  emit_insn (loongarch_vec_repl1_256 (target_hi, op0));
+ 			}
+ 		      else if (!rtx_equal_p (val_hi[i], val_hi[0]))
+ 			{
+-			  op0 = gen_reg_rtx (imode);
+-			  emit_move_insn (op0, val_hi[i]);
+ 			  emit_insn (
+ 			    loongarch_vec_set256 (target_hi, op0, GEN_INT (i)));
+ 			}
+@@ -11028,18 +11023,15 @@ loongarch_expand_vector_init (rtx target, rtx vals)
+ 		}
+ 	      if (!lo_same && !half_same)
+ 		{
++		  op0 = force_reg (imode, val_lo[i]);
+ 		  /* Assign the lowest element of val_lo to all elements
+ 		     of target_lo.  */
+ 		  if (i == 0)
+ 		    {
+-		      op0 = gen_reg_rtx (imode);
+-		      emit_move_insn (op0, val_lo[0]);
+ 		      emit_insn (loongarch_vec_repl1_128 (target_lo, op0));
+ 		    }
+ 		  else if (!rtx_equal_p (val_lo[i], val_lo[0]))
+ 		    {
+-		      op0 = gen_reg_rtx (imode);
+-		      emit_move_insn (op0, val_lo[i]);
+ 		      emit_insn (
+ 			loongarch_vec_set128 (target_lo, op0, GEN_INT (i)));
+ 		    }
+@@ -11071,16 +11063,13 @@ loongarch_expand_vector_init (rtx target, rtx vals)
+ 		     reduce the number of instructions.  */
+ 		  if (i == 1)
+ 		    {
+-		      op0 = gen_reg_rtx (imode);
+-		      emit_move_insn (op0, val[0]);
+-		      op1 = gen_reg_rtx (imode);
+-		      emit_move_insn (op1, val[1]);
++		      op0 = force_reg (imode, val[0]);
++		      op1 = force_reg (imode, val[1]);
+ 		      emit_insn (loongarch_vec_repl2_128 (target, op0, op1));
+ 		    }
+ 		  else if (i > 1)
+ 		    {
+-		      op0 = gen_reg_rtx (imode);
+-		      emit_move_insn (op0, val[i]);
++		      op0 = force_reg (imode, val[i]);
+ 		      emit_insn (
+ 			loongarch_vec_set128 (target, op0, GEN_INT (i)));
+ 		    }
+@@ -11093,18 +11082,15 @@ loongarch_expand_vector_init (rtx target, rtx vals)
+ 			loongarch_vec_mirror (target, target, const0_rtx));
+ 		      return;
+ 		    }
++		  op0 = force_reg (imode, val[i]);
+ 		  /* Assign the lowest element of val to all elements of
+ 		     target.  */
+ 		  if (i == 0)
+ 		    {
+-		      op0 = gen_reg_rtx (imode);
+-		      emit_move_insn (op0, val[0]);
+ 		      emit_insn (loongarch_vec_repl1_128 (target, op0));
+ 		    }
+ 		  else if (!rtx_equal_p (val[i], val[0]))
+ 		    {
+-		      op0 = gen_reg_rtx (imode);
+-		      emit_move_insn (op0, val[i]);
+ 		      emit_insn (
+ 			loongarch_vec_set128 (target, op0, GEN_INT (i)));
+ 		    }
+diff --git a/gcc/testsuite/gcc.target/loongarch/pr113033.c b/gcc/testsuite/gcc.target/loongarch/pr113033.c
+new file mode 100644
+index 000000000..4ccd037d8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/pr113033.c
+@@ -0,0 +1,23 @@
++/* PR target/113033: ICE with vector left rotate */
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlasx" } */
++
++typedef unsigned __attribute__ ((vector_size (16))) v4si;
++typedef unsigned __attribute__ ((vector_size (32))) v8si;
++typedef unsigned long long __attribute__ ((vector_size (16))) v2di;
++typedef unsigned long long __attribute__ ((vector_size (32))) v4di;
++
++#define TEST(tp) \
++extern tp data_##tp; \
++tp \
++test_##tp (int x) \
++{ \
++  const int bit = sizeof (data_##tp[0]) * __CHAR_BIT__; \
++  data_##tp = data_##tp << (x & (bit - 1)) \
++	      | data_##tp >> (bit - x & (bit - 1)); \
++}
++
++TEST (v4si)
++TEST (v8si)
++TEST (v2di)
++TEST (v4di)
+-- 
+2.43.0
+
diff --git a/0077-LoongArch-Clean-up-vec_init-expander.patch b/0077-LoongArch-Clean-up-vec_init-expander.patch
new file mode 100644
index 0000000000000000000000000000000000000000..191e9f5b65f3db31bf7c48c198068ae56878e35c
--- /dev/null
+++ b/0077-LoongArch-Clean-up-vec_init-expander.patch
@@ -0,0 +1,83 @@
+From 38438021c770f077b78092299f22712fdd734814 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 19 Dec 2023 05:02:42 +0800
+Subject: [PATCH 077/188] LoongArch: Clean up vec_init expander
+
+Non functional change, clean up the code.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc
+	(loongarch_expand_vector_init_same): Remove "temp2" and reuse
+	"temp" instead.
+	(loongarch_expand_vector_init): Use gcc_unreachable () instead
+	of gcc_assert (0), and fix the comment for it.
+---
+ gcc/config/loongarch/loongarch.cc | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 000d2d623..3aeafeafd 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -10723,7 +10723,7 @@ loongarch_expand_vector_init_same (rtx target, rtx vals, unsigned nvar)
+   machine_mode vmode = GET_MODE (target);
+   machine_mode imode = GET_MODE_INNER (vmode);
+   rtx same = XVECEXP (vals, 0, 0);
+-  rtx temp, temp2;
++  rtx temp;
+ 
+   if (CONST_INT_P (same) && nvar == 0
+       && loongarch_signed_immediate_p (INTVAL (same), 10, 0))
+@@ -10747,17 +10747,17 @@ loongarch_expand_vector_init_same (rtx target, rtx vals, unsigned nvar)
+     }
+ 
+   if (imode == GET_MODE (same))
+-    temp2 = same;
++    temp = same;
+   else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD)
+     {
+       if (GET_CODE (same) == MEM)
+ 	{
+ 	  rtx reg_tmp = gen_reg_rtx (GET_MODE (same));
+ 	  loongarch_emit_move (reg_tmp, same);
+-	  temp2 = simplify_gen_subreg (imode, reg_tmp, GET_MODE (reg_tmp), 0);
++	  temp = simplify_gen_subreg (imode, reg_tmp, GET_MODE (reg_tmp), 0);
+ 	}
+       else
+-	temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0);
++	temp = simplify_gen_subreg (imode, same, GET_MODE (same), 0);
+     }
+   else
+     {
+@@ -10765,13 +10765,13 @@ loongarch_expand_vector_init_same (rtx target, rtx vals, unsigned nvar)
+ 	{
+ 	  rtx reg_tmp = gen_reg_rtx (GET_MODE (same));
+ 	  loongarch_emit_move (reg_tmp, same);
+-	  temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp));
++	  temp = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp));
+ 	}
+       else
+-	temp2 = lowpart_subreg (imode, same, GET_MODE (same));
++	temp = lowpart_subreg (imode, same, GET_MODE (same));
+     }
+ 
+-  temp = force_reg (imode, temp2);
++  temp = force_reg (imode, temp);
+ 
+   switch (vmode)
+     {
+@@ -11117,8 +11117,8 @@ loongarch_expand_vector_init (rtx target, rtx vals)
+       return;
+     }
+ 
+-  /* Loongson is the only cpu with vectors with more elements.  */
+-  gcc_assert (0);
++  /* No LoongArch CPU supports vectors with more elements as at now.  */
++  gcc_unreachable ();
+ }
+ 
+ /* Implement HARD_REGNO_CALLER_SAVE_MODE.  */
+-- 
+2.43.0
+
diff --git a/0078-LoongArch-Fix-incorrect-code-generation-for-sad-patt.patch b/0078-LoongArch-Fix-incorrect-code-generation-for-sad-patt.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5c2ca9a584b2c41b70ffd661c57604df3bf3262e
--- /dev/null
+++ b/0078-LoongArch-Fix-incorrect-code-generation-for-sad-patt.patch
@@ -0,0 +1,78 @@
+From e5c0e4b416b8628585e27b524ba524261cacf713 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Thu, 14 Dec 2023 20:49:04 +0800
+Subject: [PATCH 078/188] LoongArch: Fix incorrect code generation for sad
+ pattern
+
+When I attempt to enable vect_usad_char effective target for LoongArch, slp-reduc-sad.c
+and vect-reduc-sad*.c tests fail. These tests fail because the sad pattern generates bad
+code. This patch to fixed them, for sad patterns, use zero expansion instead of sign
+expansion for reduction.
+
+Currently, we are fixing failed vectorized tests, and in the future, we will
+enable more tests of "vect" for LoongArch.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md: Use zero expansion instruction.
+	* config/loongarch/lsx.md: Ditto.
+---
+ gcc/config/loongarch/lasx.md | 8 ++++----
+ gcc/config/loongarch/lsx.md  | 8 ++++----
+ 2 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 921ce0eeb..9ca3f9278 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -5021,8 +5021,8 @@
+   rtx t2 = gen_reg_rtx (V16HImode);
+   rtx t3 = gen_reg_rtx (V8SImode);
+   emit_insn (gen_lasx_xvabsd_u_bu (t1, operands[1], operands[2]));
+-  emit_insn (gen_lasx_xvhaddw_h_b (t2, t1, t1));
+-  emit_insn (gen_lasx_xvhaddw_w_h (t3, t2, t2));
++  emit_insn (gen_lasx_xvhaddw_hu_bu (t2, t1, t1));
++  emit_insn (gen_lasx_xvhaddw_wu_hu (t3, t2, t2));
+   emit_insn (gen_addv8si3 (operands[0], t3, operands[3]));
+   DONE;
+ })
+@@ -5038,8 +5038,8 @@
+   rtx t2 = gen_reg_rtx (V16HImode);
+   rtx t3 = gen_reg_rtx (V8SImode);
+   emit_insn (gen_lasx_xvabsd_s_b (t1, operands[1], operands[2]));
+-  emit_insn (gen_lasx_xvhaddw_h_b (t2, t1, t1));
+-  emit_insn (gen_lasx_xvhaddw_w_h (t3, t2, t2));
++  emit_insn (gen_lasx_xvhaddw_hu_bu (t2, t1, t1));
++  emit_insn (gen_lasx_xvhaddw_wu_hu (t3, t2, t2));
+   emit_insn (gen_addv8si3 (operands[0], t3, operands[3]));
+   DONE;
+ })
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 57e0ee3d4..7f5fff40a 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -3385,8 +3385,8 @@
+   rtx t2 = gen_reg_rtx (V8HImode);
+   rtx t3 = gen_reg_rtx (V4SImode);
+   emit_insn (gen_lsx_vabsd_u_bu (t1, operands[1], operands[2]));
+-  emit_insn (gen_lsx_vhaddw_h_b (t2, t1, t1));
+-  emit_insn (gen_lsx_vhaddw_w_h (t3, t2, t2));
++  emit_insn (gen_lsx_vhaddw_hu_bu (t2, t1, t1));
++  emit_insn (gen_lsx_vhaddw_wu_hu (t3, t2, t2));
+   emit_insn (gen_addv4si3 (operands[0], t3, operands[3]));
+   DONE;
+ })
+@@ -3402,8 +3402,8 @@
+   rtx t2 = gen_reg_rtx (V8HImode);
+   rtx t3 = gen_reg_rtx (V4SImode);
+   emit_insn (gen_lsx_vabsd_s_b (t1, operands[1], operands[2]));
+-  emit_insn (gen_lsx_vhaddw_h_b (t2, t1, t1));
+-  emit_insn (gen_lsx_vhaddw_w_h (t3, t2, t2));
++  emit_insn (gen_lsx_vhaddw_hu_bu (t2, t1, t1));
++  emit_insn (gen_lsx_vhaddw_wu_hu (t3, t2, t2));
+   emit_insn (gen_addv4si3 (operands[0], t3, operands[3]));
+   DONE;
+ })
+-- 
+2.43.0
+
diff --git a/0079-LoongArch-Modify-the-check-type-of-the-vector-builti.patch b/0079-LoongArch-Modify-the-check-type-of-the-vector-builti.patch
new file mode 100644
index 0000000000000000000000000000000000000000..1096a82ed60cb4d8e74b71c2579872a81d328b48
--- /dev/null
+++ b/0079-LoongArch-Modify-the-check-type-of-the-vector-builti.patch
@@ -0,0 +1,68 @@
+From bedb0338fadc373eeafc418a7bf6395d37eec78c Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Wed, 13 Dec 2023 09:31:07 +0800
+Subject: [PATCH 079/188] LoongArch: Modify the check type of the vector
+ builtin function.
+
+On LoongArch architecture, using the latest gcc14 in regression test,
+it is found that the vector test cases in vector directory appear FAIL
+entries with unmatched pointer types. In order to solve this kind of
+problem, the type of the variable in the check result is modified with
+the parameter type defined in the vector builtin function.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/simd_correctness_check.h:The variable
+	types in the check results are modified in conjunction with the
+	parameter types defined in the vector builtin function.
+---
+ .../loongarch/vector/simd_correctness_check.h       | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h b/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h
+index eb7fbd59c..551340bd5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h
++++ b/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h
+@@ -8,11 +8,12 @@
+       int fail = 0;                                                           \
+       for (size_t i = 0; i < sizeof (res) / sizeof (res[0]); ++i)             \
+         {                                                                     \
+-          long *temp_ref = &ref[i], *temp_res = &res[i];                      \
++          long long *temp_ref = (long long *)&ref[i],                         \
++		*temp_res = (long long *)&res[i];			      \
+           if (abs (*temp_ref - *temp_res) > 0)                                \
+             {                                                                 \
+               printf (" error: %s at line %ld , expected " #ref               \
+-                      "[%ld]:0x%lx, got: 0x%lx\n",                            \
++                      "[%ld]:0x%016lx, got: 0x%016lx\n",                      \
+                       __FILE__, line, i, *temp_ref, *temp_res);               \
+               fail = 1;                                                       \
+             }                                                                 \
+@@ -28,11 +29,11 @@
+       int fail = 0;                                                           \
+       for (size_t i = 0; i < sizeof (res) / sizeof (res[0]); ++i)             \
+         {                                                                     \
+-          int *temp_ref = &ref[i], *temp_res = &res[i];                       \
++          int *temp_ref = (int *)&ref[i], *temp_res = (int *)&res[i];         \
+           if (abs (*temp_ref - *temp_res) > 0)                                \
+             {                                                                 \
+               printf (" error: %s at line %ld , expected " #ref               \
+-                      "[%ld]:0x%x, got: 0x%x\n",                              \
++                      "[%ld]:0x%08x, got: 0x%08x\n",                          \
+                       __FILE__, line, i, *temp_ref, *temp_res);               \
+               fail = 1;                                                       \
+             }                                                                 \
+@@ -47,8 +48,8 @@
+     {                                                                         \
+       if (ref != res)                                                         \
+         {                                                                     \
+-          printf (" error: %s at line %ld , expected %d, got %d\n", __FILE__, \
+-                  line, ref, res);                                            \
++          printf (" error: %s at line %ld , expected 0x:%016x",               \
++		  "got 0x:%016x\n", __FILE__, line, ref, res);                \
+         }                                                                     \
+     }                                                                         \
+   while (0)
+-- 
+2.43.0
+
diff --git a/0080-LoongArch-extend.texi-Fix-typos-in-LSX-intrinsics.patch b/0080-LoongArch-extend.texi-Fix-typos-in-LSX-intrinsics.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b813c0775419c58fd4cc306b3722caad76abc043
--- /dev/null
+++ b/0080-LoongArch-extend.texi-Fix-typos-in-LSX-intrinsics.patch
@@ -0,0 +1,250 @@
+From 2e0092b20b845e0e301b1dab177b338e35981f10 Mon Sep 17 00:00:00 2001
+From: Jiajie Chen 
+Date: Wed, 13 Dec 2023 23:26:01 +0800
+Subject: [PATCH 080/188] LoongArch: extend.texi: Fix typos in LSX intrinsics
+
+Several typos have been found and fixed: missing semicolons, using
+variable name instead of type, duplicate functions and wrong types.
+
+gcc/ChangeLog:
+
+	* doc/extend.texi(__lsx_vabsd_di): remove extra `i' in name.
+	(__lsx_vfrintrm_d, __lsx_vfrintrm_s, __lsx_vfrintrne_d,
+	__lsx_vfrintrne_s, __lsx_vfrintrp_d, __lsx_vfrintrp_s, __lsx_vfrintrz_d,
+	__lsx_vfrintrz_s): fix return types.
+	(__lsx_vld, __lsx_vldi, __lsx_vldrepl_b, __lsx_vldrepl_d,
+	__lsx_vldrepl_h, __lsx_vldrepl_w, __lsx_vmaxi_b, __lsx_vmaxi_d,
+	__lsx_vmaxi_h, __lsx_vmaxi_w, __lsx_vmini_b, __lsx_vmini_d,
+	__lsx_vmini_h, __lsx_vmini_w, __lsx_vsrani_d_q, __lsx_vsrarni_d_q,
+	__lsx_vsrlni_d_q, __lsx_vsrlrni_d_q, __lsx_vssrani_d_q,
+	__lsx_vssrarni_d_q, __lsx_vssrarni_du_q, __lsx_vssrlni_d_q,
+	__lsx_vssrlrni_du_q, __lsx_vst, __lsx_vstx, __lsx_vssrani_du_q,
+	__lsx_vssrlni_du_q, __lsx_vssrlrni_d_q): add missing semicolon.
+	(__lsx_vpickve2gr_bu, __lsx_vpickve2gr_hu): fix typo in return
+	type.
+	(__lsx_vstelm_b, __lsx_vstelm_d, __lsx_vstelm_h,
+	__lsx_vstelm_w): use imm type for the last argument.
+	(__lsx_vsigncov_b, __lsx_vsigncov_h, __lsx_vsigncov_w,
+	__lsx_vsigncov_d): remove duplicate definitions.
+---
+ gcc/doc/extend.texi | 90 ++++++++++++++++++++++-----------------------
+ 1 file changed, 43 insertions(+), 47 deletions(-)
+
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index bb042ae78..ac8da4e80 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -16392,7 +16392,7 @@ int __lsx_bz_v (__m128i);
+ int __lsx_bz_w (__m128i);
+ __m128i __lsx_vabsd_b (__m128i, __m128i);
+ __m128i __lsx_vabsd_bu (__m128i, __m128i);
+-__m128i __lsx_vabsd_di (__m128i, __m128i);
++__m128i __lsx_vabsd_d (__m128i, __m128i);
+ __m128i __lsx_vabsd_du (__m128i, __m128i);
+ __m128i __lsx_vabsd_h (__m128i, __m128i);
+ __m128i __lsx_vabsd_hu (__m128i, __m128i);
+@@ -16598,14 +16598,14 @@ __m128 __lsx_vfnmsub_s (__m128, __m128, __m128);
+ __m128d __lsx_vfrecip_d (__m128d);
+ __m128 __lsx_vfrecip_s (__m128);
+ __m128d __lsx_vfrint_d (__m128d);
+-__m128i __lsx_vfrintrm_d (__m128d);
+-__m128i __lsx_vfrintrm_s (__m128);
+-__m128i __lsx_vfrintrne_d (__m128d);
+-__m128i __lsx_vfrintrne_s (__m128);
+-__m128i __lsx_vfrintrp_d (__m128d);
+-__m128i __lsx_vfrintrp_s (__m128);
+-__m128i __lsx_vfrintrz_d (__m128d);
+-__m128i __lsx_vfrintrz_s (__m128);
++__m128d __lsx_vfrintrm_d (__m128d);
++__m128 __lsx_vfrintrm_s (__m128);
++__m128d __lsx_vfrintrne_d (__m128d);
++__m128 __lsx_vfrintrne_s (__m128);
++__m128d __lsx_vfrintrp_d (__m128d);
++__m128 __lsx_vfrintrp_s (__m128);
++__m128d __lsx_vfrintrz_d (__m128d);
++__m128 __lsx_vfrintrz_s (__m128);
+ __m128 __lsx_vfrint_s (__m128);
+ __m128d __lsx_vfrsqrt_d (__m128d);
+ __m128 __lsx_vfrsqrt_s (__m128);
+@@ -16674,12 +16674,12 @@ __m128i __lsx_vinsgr2vr_b (__m128i, int, imm0_15);
+ __m128i __lsx_vinsgr2vr_d (__m128i, long int, imm0_1);
+ __m128i __lsx_vinsgr2vr_h (__m128i, int, imm0_7);
+ __m128i __lsx_vinsgr2vr_w (__m128i, int, imm0_3);
+-__m128i __lsx_vld (void *, imm_n2048_2047)
+-__m128i __lsx_vldi (imm_n1024_1023)
+-__m128i __lsx_vldrepl_b (void *, imm_n2048_2047)
+-__m128i __lsx_vldrepl_d (void *, imm_n256_255)
+-__m128i __lsx_vldrepl_h (void *, imm_n1024_1023)
+-__m128i __lsx_vldrepl_w (void *, imm_n512_511)
++__m128i __lsx_vld (void *, imm_n2048_2047);
++__m128i __lsx_vldi (imm_n1024_1023);
++__m128i __lsx_vldrepl_b (void *, imm_n2048_2047);
++__m128i __lsx_vldrepl_d (void *, imm_n256_255);
++__m128i __lsx_vldrepl_h (void *, imm_n1024_1023);
++__m128i __lsx_vldrepl_w (void *, imm_n512_511);
+ __m128i __lsx_vldx (void *, long int);
+ __m128i __lsx_vmadd_b (__m128i, __m128i, __m128i);
+ __m128i __lsx_vmadd_d (__m128i, __m128i, __m128i);
+@@ -16715,13 +16715,13 @@ __m128i __lsx_vmax_d (__m128i, __m128i);
+ __m128i __lsx_vmax_du (__m128i, __m128i);
+ __m128i __lsx_vmax_h (__m128i, __m128i);
+ __m128i __lsx_vmax_hu (__m128i, __m128i);
+-__m128i __lsx_vmaxi_b (__m128i, imm_n16_15)
++__m128i __lsx_vmaxi_b (__m128i, imm_n16_15);
+ __m128i __lsx_vmaxi_bu (__m128i, imm0_31);
+-__m128i __lsx_vmaxi_d (__m128i, imm_n16_15)
++__m128i __lsx_vmaxi_d (__m128i, imm_n16_15);
+ __m128i __lsx_vmaxi_du (__m128i, imm0_31);
+-__m128i __lsx_vmaxi_h (__m128i, imm_n16_15)
++__m128i __lsx_vmaxi_h (__m128i, imm_n16_15);
+ __m128i __lsx_vmaxi_hu (__m128i, imm0_31);
+-__m128i __lsx_vmaxi_w (__m128i, imm_n16_15)
++__m128i __lsx_vmaxi_w (__m128i, imm_n16_15);
+ __m128i __lsx_vmaxi_wu (__m128i, imm0_31);
+ __m128i __lsx_vmax_w (__m128i, __m128i);
+ __m128i __lsx_vmax_wu (__m128i, __m128i);
+@@ -16731,13 +16731,13 @@ __m128i __lsx_vmin_d (__m128i, __m128i);
+ __m128i __lsx_vmin_du (__m128i, __m128i);
+ __m128i __lsx_vmin_h (__m128i, __m128i);
+ __m128i __lsx_vmin_hu (__m128i, __m128i);
+-__m128i __lsx_vmini_b (__m128i, imm_n16_15)
++__m128i __lsx_vmini_b (__m128i, imm_n16_15);
+ __m128i __lsx_vmini_bu (__m128i, imm0_31);
+-__m128i __lsx_vmini_d (__m128i, imm_n16_15)
++__m128i __lsx_vmini_d (__m128i, imm_n16_15);
+ __m128i __lsx_vmini_du (__m128i, imm0_31);
+-__m128i __lsx_vmini_h (__m128i, imm_n16_15)
++__m128i __lsx_vmini_h (__m128i, imm_n16_15);
+ __m128i __lsx_vmini_hu (__m128i, imm0_31);
+-__m128i __lsx_vmini_w (__m128i, imm_n16_15)
++__m128i __lsx_vmini_w (__m128i, imm_n16_15);
+ __m128i __lsx_vmini_wu (__m128i, imm0_31);
+ __m128i __lsx_vmin_w (__m128i, __m128i);
+ __m128i __lsx_vmin_wu (__m128i, __m128i);
+@@ -16826,11 +16826,11 @@ __m128i __lsx_vpickod_d (__m128i, __m128i);
+ __m128i __lsx_vpickod_h (__m128i, __m128i);
+ __m128i __lsx_vpickod_w (__m128i, __m128i);
+ int __lsx_vpickve2gr_b (__m128i, imm0_15);
+-unsinged int __lsx_vpickve2gr_bu (__m128i, imm0_15);
++unsigned int __lsx_vpickve2gr_bu (__m128i, imm0_15);
+ long int __lsx_vpickve2gr_d (__m128i, imm0_1);
+ unsigned long int __lsx_vpickve2gr_du (__m128i, imm0_1);
+ int __lsx_vpickve2gr_h (__m128i, imm0_7);
+-unsinged int __lsx_vpickve2gr_hu (__m128i, imm0_7);
++unsigned int __lsx_vpickve2gr_hu (__m128i, imm0_7);
+ int __lsx_vpickve2gr_w (__m128i, imm0_3);
+ unsigned int __lsx_vpickve2gr_wu (__m128i, imm0_3);
+ __m128i __lsx_vreplgr2vr_b (int);
+@@ -16893,10 +16893,6 @@ __m128i __lsx_vsigncov_b (__m128i, __m128i);
+ __m128i __lsx_vsigncov_d (__m128i, __m128i);
+ __m128i __lsx_vsigncov_h (__m128i, __m128i);
+ __m128i __lsx_vsigncov_w (__m128i, __m128i);
+-__m128i __lsx_vsigncov_b (__m128i, __m128i);
+-__m128i __lsx_vsigncov_d (__m128i, __m128i);
+-__m128i __lsx_vsigncov_h (__m128i, __m128i);
+-__m128i __lsx_vsigncov_w (__m128i, __m128i);
+ __m128i __lsx_vsle_b (__m128i, __m128i);
+ __m128i __lsx_vsle_bu (__m128i, __m128i);
+ __m128i __lsx_vsle_d (__m128i, __m128i);
+@@ -16953,7 +16949,7 @@ __m128i __lsx_vsrai_w (__m128i, imm0_31);
+ __m128i __lsx_vsran_b_h (__m128i, __m128i);
+ __m128i __lsx_vsran_h_w (__m128i, __m128i);
+ __m128i __lsx_vsrani_b_h (__m128i, __m128i, imm0_15);
+-__m128i __lsx_vsrani_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vsrani_d_q (__m128i, __m128i, imm0_127);
+ __m128i __lsx_vsrani_h_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vsrani_w_d (__m128i, __m128i, imm0_63);
+ __m128i __lsx_vsran_w_d (__m128i, __m128i);
+@@ -16967,7 +16963,7 @@ __m128i __lsx_vsrari_w (__m128i, imm0_31);
+ __m128i __lsx_vsrarn_b_h (__m128i, __m128i);
+ __m128i __lsx_vsrarn_h_w (__m128i, __m128i);
+ __m128i __lsx_vsrarni_b_h (__m128i, __m128i, imm0_15);
+-__m128i __lsx_vsrarni_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vsrarni_d_q (__m128i, __m128i, imm0_127);
+ __m128i __lsx_vsrarni_h_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vsrarni_w_d (__m128i, __m128i, imm0_63);
+ __m128i __lsx_vsrarn_w_d (__m128i, __m128i);
+@@ -16983,7 +16979,7 @@ __m128i __lsx_vsrli_w (__m128i, imm0_31);
+ __m128i __lsx_vsrln_b_h (__m128i, __m128i);
+ __m128i __lsx_vsrln_h_w (__m128i, __m128i);
+ __m128i __lsx_vsrlni_b_h (__m128i, __m128i, imm0_15);
+-__m128i __lsx_vsrlni_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vsrlni_d_q (__m128i, __m128i, imm0_127);
+ __m128i __lsx_vsrlni_h_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vsrlni_w_d (__m128i, __m128i, imm0_63);
+ __m128i __lsx_vsrln_w_d (__m128i, __m128i);
+@@ -16997,7 +16993,7 @@ __m128i __lsx_vsrlri_w (__m128i, imm0_31);
+ __m128i __lsx_vsrlrn_b_h (__m128i, __m128i);
+ __m128i __lsx_vsrlrn_h_w (__m128i, __m128i);
+ __m128i __lsx_vsrlrni_b_h (__m128i, __m128i, imm0_15);
+-__m128i __lsx_vsrlrni_d_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vsrlrni_d_q (__m128i, __m128i, imm0_127);
+ __m128i __lsx_vsrlrni_h_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vsrlrni_w_d (__m128i, __m128i, imm0_63);
+ __m128i __lsx_vsrlrn_w_d (__m128i, __m128i);
+@@ -17009,8 +17005,8 @@ __m128i __lsx_vssran_hu_w (__m128i, __m128i);
+ __m128i __lsx_vssran_h_w (__m128i, __m128i);
+ __m128i __lsx_vssrani_b_h (__m128i, __m128i, imm0_15);
+ __m128i __lsx_vssrani_bu_h (__m128i, __m128i, imm0_15);
+-__m128i __lsx_vssrani_d_q (__m128i, __m128i, imm0_127)
+-__m128i __lsx_vssrani_du_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrani_d_q (__m128i, __m128i, imm0_127);
++__m128i __lsx_vssrani_du_q (__m128i, __m128i, imm0_127);
+ __m128i __lsx_vssrani_hu_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vssrani_h_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vssrani_w_d (__m128i, __m128i, imm0_63);
+@@ -17023,8 +17019,8 @@ __m128i __lsx_vssrarn_hu_w (__m128i, __m128i);
+ __m128i __lsx_vssrarn_h_w (__m128i, __m128i);
+ __m128i __lsx_vssrarni_b_h (__m128i, __m128i, imm0_15);
+ __m128i __lsx_vssrarni_bu_h (__m128i, __m128i, imm0_15);
+-__m128i __lsx_vssrarni_d_q (__m128i, __m128i, imm0_127)
+-__m128i __lsx_vssrarni_du_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrarni_d_q (__m128i, __m128i, imm0_127);
++__m128i __lsx_vssrarni_du_q (__m128i, __m128i, imm0_127);
+ __m128i __lsx_vssrarni_hu_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vssrarni_h_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vssrarni_w_d (__m128i, __m128i, imm0_63);
+@@ -17037,8 +17033,8 @@ __m128i __lsx_vssrln_hu_w (__m128i, __m128i);
+ __m128i __lsx_vssrln_h_w (__m128i, __m128i);
+ __m128i __lsx_vssrlni_b_h (__m128i, __m128i, imm0_15);
+ __m128i __lsx_vssrlni_bu_h (__m128i, __m128i, imm0_15);
+-__m128i __lsx_vssrlni_d_q (__m128i, __m128i, imm0_127)
+-__m128i __lsx_vssrlni_du_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrlni_d_q (__m128i, __m128i, imm0_127);
++__m128i __lsx_vssrlni_du_q (__m128i, __m128i, imm0_127);
+ __m128i __lsx_vssrlni_hu_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vssrlni_h_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vssrlni_w_d (__m128i, __m128i, imm0_63);
+@@ -17051,8 +17047,8 @@ __m128i __lsx_vssrlrn_hu_w (__m128i, __m128i);
+ __m128i __lsx_vssrlrn_h_w (__m128i, __m128i);
+ __m128i __lsx_vssrlrni_b_h (__m128i, __m128i, imm0_15);
+ __m128i __lsx_vssrlrni_bu_h (__m128i, __m128i, imm0_15);
+-__m128i __lsx_vssrlrni_d_q (__m128i, __m128i, imm0_127)
+-__m128i __lsx_vssrlrni_du_q (__m128i, __m128i, imm0_127)
++__m128i __lsx_vssrlrni_d_q (__m128i, __m128i, imm0_127);
++__m128i __lsx_vssrlrni_du_q (__m128i, __m128i, imm0_127);
+ __m128i __lsx_vssrlrni_hu_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vssrlrni_h_w (__m128i, __m128i, imm0_31);
+ __m128i __lsx_vssrlrni_w_d (__m128i, __m128i, imm0_63);
+@@ -17067,12 +17063,12 @@ __m128i __lsx_vssub_h (__m128i, __m128i);
+ __m128i __lsx_vssub_hu (__m128i, __m128i);
+ __m128i __lsx_vssub_w (__m128i, __m128i);
+ __m128i __lsx_vssub_wu (__m128i, __m128i);
+-void __lsx_vst (__m128i, void *, imm_n2048_2047)
+-void __lsx_vstelm_b (__m128i, void *, imm_n128_127, idx);
+-void __lsx_vstelm_d (__m128i, void *, imm_n128_127, idx);
+-void __lsx_vstelm_h (__m128i, void *, imm_n128_127, idx);
+-void __lsx_vstelm_w (__m128i, void *, imm_n128_127, idx);
+-void __lsx_vstx (__m128i, void *, long int)
++void __lsx_vst (__m128i, void *, imm_n2048_2047);
++void __lsx_vstelm_b (__m128i, void *, imm_n128_127, imm0_15);
++void __lsx_vstelm_d (__m128i, void *, imm_n128_127, imm0_1);
++void __lsx_vstelm_h (__m128i, void *, imm_n128_127, imm0_7);
++void __lsx_vstelm_w (__m128i, void *, imm_n128_127, imm0_3);
++void __lsx_vstx (__m128i, void *, long int);
+ __m128i __lsx_vsub_b (__m128i, __m128i);
+ __m128i __lsx_vsub_d (__m128i, __m128i);
+ __m128i __lsx_vsub_h (__m128i, __m128i);
+-- 
+2.43.0
+
diff --git a/0081-LoongArch-Fix-builtin-function-prototypes-for-LASX-i.patch b/0081-LoongArch-Fix-builtin-function-prototypes-for-LASX-i.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a4aa098a4abec17dd0007bc80e0f6777ccaf1408
--- /dev/null
+++ b/0081-LoongArch-Fix-builtin-function-prototypes-for-LASX-i.patch
@@ -0,0 +1,60 @@
+From d9965ed8d9f4244ac1948c6fb92c7c0f7d80b3a4 Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Tue, 19 Dec 2023 16:43:17 +0800
+Subject: [PATCH 081/188] LoongArch: Fix builtin function prototypes for LASX
+ in doc.
+
+gcc/ChangeLog:
+
+	* doc/extend.texi:According to the documents submitted earlier,
+	Two problems with function return types and using the actual types
+	of parameters instead of variable names were found and fixed.
+---
+ gcc/doc/extend.texi | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index ac8da4e80..c793c9c5d 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -17438,14 +17438,14 @@ __m256 __lasx_xvfnmsub_s (__m256, __m256, __m256);
+ __m256d __lasx_xvfrecip_d (__m256d);
+ __m256 __lasx_xvfrecip_s (__m256);
+ __m256d __lasx_xvfrint_d (__m256d);
+-__m256i __lasx_xvfrintrm_d (__m256d);
+-__m256i __lasx_xvfrintrm_s (__m256);
+-__m256i __lasx_xvfrintrne_d (__m256d);
+-__m256i __lasx_xvfrintrne_s (__m256);
+-__m256i __lasx_xvfrintrp_d (__m256d);
+-__m256i __lasx_xvfrintrp_s (__m256);
+-__m256i __lasx_xvfrintrz_d (__m256d);
+-__m256i __lasx_xvfrintrz_s (__m256);
++__m256d __lasx_xvfrintrm_d (__m256d);
++__m256 __lasx_xvfrintrm_s (__m256);
++__m256d __lasx_xvfrintrne_d (__m256d);
++__m256 __lasx_xvfrintrne_s (__m256);
++__m256d __lasx_xvfrintrp_d (__m256d);
++__m256 __lasx_xvfrintrp_s (__m256);
++__m256d __lasx_xvfrintrz_d (__m256d);
++__m256 __lasx_xvfrintrz_s (__m256);
+ __m256 __lasx_xvfrint_s (__m256);
+ __m256d __lasx_xvfrsqrt_d (__m256d);
+ __m256 __lasx_xvfrsqrt_s (__m256);
+@@ -17912,10 +17912,10 @@ __m256i __lasx_xvssub_hu (__m256i, __m256i);
+ __m256i __lasx_xvssub_w (__m256i, __m256i);
+ __m256i __lasx_xvssub_wu (__m256i, __m256i);
+ void __lasx_xvst (__m256i, void *, imm_n2048_2047);
+-void __lasx_xvstelm_b (__m256i, void *, imm_n128_127, idx);
+-void __lasx_xvstelm_d (__m256i, void *, imm_n128_127, idx);
+-void __lasx_xvstelm_h (__m256i, void *, imm_n128_127, idx);
+-void __lasx_xvstelm_w (__m256i, void *, imm_n128_127, idx);
++void __lasx_xvstelm_b (__m256i, void *, imm_n128_127, imm0_31);
++void __lasx_xvstelm_d (__m256i, void *, imm_n128_127, imm0_3);
++void __lasx_xvstelm_h (__m256i, void *, imm_n128_127, imm0_15);
++void __lasx_xvstelm_w (__m256i, void *, imm_n128_127, imm0_7);
+ void __lasx_xvstx (__m256i, void *, long int);
+ __m256i __lasx_xvsub_b (__m256i, __m256i);
+ __m256i __lasx_xvsub_d (__m256i, __m256i);
+-- 
+2.43.0
+
diff --git a/0082-LoongArch-Add-asm-modifiers-to-the-LSX-and-LASX-dire.patch b/0082-LoongArch-Add-asm-modifiers-to-the-LSX-and-LASX-dire.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ca4d8f45c979fc5bfafe22709e1b28d7e9029976
--- /dev/null
+++ b/0082-LoongArch-Add-asm-modifiers-to-the-LSX-and-LASX-dire.patch
@@ -0,0 +1,92 @@
+From 48f0d47eb6dc2c799c845a25cfabd586bd176378 Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Tue, 5 Dec 2023 14:44:35 +0800
+Subject: [PATCH 082/188] LoongArch: Add asm modifiers to the LSX and LASX
+ directives in the doc.
+
+gcc/ChangeLog:
+
+	* doc/extend.texi:Add modifiers to the vector of asm in the doc.
+	* doc/md.texi:Refine the description of the modifier 'f' in the doc.
+---
+ gcc/doc/extend.texi | 46 +++++++++++++++++++++++++++++++++++++++++++++
+ gcc/doc/md.texi     |  2 +-
+ 2 files changed, 47 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index c793c9c5d..bcb9329c2 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -11424,10 +11424,56 @@ The list below describes the supported modifiers and their effects for LoongArch
+ @item @code{d} @tab Same as @code{c}.
+ @item @code{i} @tab Print the character ''@code{i}'' if the operand is not a register.
+ @item @code{m} @tab Same as @code{c}, but the printed value is @code{operand - 1}.
++@item @code{u} @tab Print a LASX register.
++@item @code{w} @tab Print a LSX register.
+ @item @code{X} @tab Print a constant integer operand in hexadecimal.
+ @item @code{z} @tab Print the operand in its unmodified form, followed by a comma.
+ @end multitable
+ 
++References to input and output operands in the assembler template of extended
++asm statements can use modifiers to affect the way the operands are formatted
++in the code output to the assembler.  For example, the following code uses the
++'w' modifier for LoongArch:
++
++@example
++test-asm.c:
++
++#include 
++
++__m128i foo (void)
++@{
++__m128i  a,b,c;
++__asm__ ("vadd.d %w0,%w1,%w2\n\t"
++   :"=f" (c)
++   :"f" (a),"f" (b));
++
++return c;
++@}
++
++@end example
++
++@noindent
++The compile command for the test case is as follows:
++
++@example
++gcc test-asm.c -mlsx -S -o test-asm.s
++@end example
++
++@noindent
++The assembly statement produces the following assembly code:
++
++@example
++vadd.d $vr0,$vr0,$vr1
++@end example
++
++This is a 128-bit vector addition instruction, @code{c} (referred to in the
++template string as %0) is the output, and @code{a} (%1) and @code{b} (%2) are
++the inputs.  @code{__m128i} is a vector data type defined in the  file
++@code{lsxintrin.h} (@xref{LoongArch SX Vector Intrinsics}).  The symbol '=f'
++represents a constraint using a floating-point register as an output type, and
++the 'f' in the input operand represents a constraint using a floating-point
++register operand, which can refer to the definition of a constraint
++(@xref{Constraints}) in gcc.
+ 
+ @lowersections
+ @include md.texi
+diff --git a/gcc/doc/md.texi b/gcc/doc/md.texi
+index b58da0787..a2e839073 100644
+--- a/gcc/doc/md.texi
++++ b/gcc/doc/md.texi
+@@ -2750,7 +2750,7 @@ $r1h
+ @item LoongArch---@file{config/loongarch/constraints.md}
+ @table @code
+ @item f
+-A floating-point register (if available).
++A floating-point or vector register (if available).
+ @item k
+ A memory operand whose address is formed by a base register and
+ (optionally scaled) index register.
+-- 
+2.43.0
+
diff --git a/0083-LoongArch-Implement-FCCmode-reload-and-cstore-ANYF-m.patch b/0083-LoongArch-Implement-FCCmode-reload-and-cstore-ANYF-m.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c9ceaad842e9a7c34b5304f9e27b2b2b03b91af8
--- /dev/null
+++ b/0083-LoongArch-Implement-FCCmode-reload-and-cstore-ANYF-m.patch
@@ -0,0 +1,392 @@
+From b199de440fc877efdd1dde90b5c1c5111e060c1b Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 15 Dec 2023 01:49:40 +0800
+Subject: [PATCH 083/188] LoongArch: Implement FCCmode reload and
+ cstore4
+
+We used a branch to load floating-point comparison results into GPR.
+This is very slow when the branch is not predictable.
+
+Implement movfcc so we can reload FCCmode into GPRs, FPRs, and MEM.
+Then implement cstore4.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-tune.h
+	(loongarch_rtx_cost_data::movcf2gr): New field.
+	(loongarch_rtx_cost_data::movcf2gr_): New method.
+	(loongarch_rtx_cost_data::use_movcf2gr): New method.
+	* config/loongarch/loongarch-def.cc
+	(loongarch_rtx_cost_data::loongarch_rtx_cost_data): Set movcf2gr
+	to COSTS_N_INSNS (7) and movgr2cf to COSTS_N_INSNS (15), based
+	on timing on LA464.
+	(loongarch_cpu_rtx_cost_data): Set movcf2gr and movgr2cf to
+	COSTS_N_INSNS (1) for LA664.
+	(loongarch_rtx_cost_optimize_size): Set movcf2gr and movgr2cf to
+	COSTS_N_INSNS (1) + 1.
+	* config/loongarch/predicates.md (loongarch_fcmp_operator): New
+	predicate.
+	* config/loongarch/loongarch.md (movfcc): Change to
+	define_expand.
+	(movfcc_internal): New define_insn.
+	(fcc_to_): New define_insn.
+	(cstore4): New define_expand.
+	* config/loongarch/loongarch.cc
+	(loongarch_hard_regno_mode_ok_uncached): Allow FCCmode in GPRs
+	and GPRs.
+	(loongarch_secondary_reload): Reload FCCmode via FPR and/or GPR.
+	(loongarch_emit_float_compare): Call gen_reg_rtx instead of
+	loongarch_allocate_fcc.
+	(loongarch_allocate_fcc): Remove.
+	(loongarch_move_to_gpr_cost): Handle FCC_REGS -> GR_REGS.
+	(loongarch_move_from_gpr_cost): Handle GR_REGS -> FCC_REGS.
+	(loongarch_register_move_cost): Handle FCC_REGS -> FCC_REGS,
+	FCC_REGS -> FP_REGS, and FP_REGS -> FCC_REGS.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/movcf2gr.c: New test.
+	* gcc.target/loongarch/movcf2gr-via-fr.c: New test.
+---
+ gcc/config/loongarch/loongarch-def.cc         | 13 +++-
+ gcc/config/loongarch/loongarch-tune.h         | 15 +++-
+ gcc/config/loongarch/loongarch.cc             | 70 ++++++++++++-------
+ gcc/config/loongarch/loongarch.md             | 69 ++++++++++++++++--
+ gcc/config/loongarch/predicates.md            |  4 ++
+ .../gcc.target/loongarch/movcf2gr-via-fr.c    | 10 +++
+ gcc/testsuite/gcc.target/loongarch/movcf2gr.c |  9 +++
+ 7 files changed, 157 insertions(+), 33 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/movcf2gr-via-fr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/movcf2gr.c
+
+diff --git a/gcc/config/loongarch/loongarch-def.cc b/gcc/config/loongarch/loongarch-def.cc
+index 4a8885e83..843be78e4 100644
+--- a/gcc/config/loongarch/loongarch-def.cc
++++ b/gcc/config/loongarch/loongarch-def.cc
+@@ -101,15 +101,21 @@ loongarch_rtx_cost_data::loongarch_rtx_cost_data ()
+     int_mult_di (COSTS_N_INSNS (4)),
+     int_div_si (COSTS_N_INSNS (5)),
+     int_div_di (COSTS_N_INSNS (5)),
++    movcf2gr (COSTS_N_INSNS (7)),
++    movgr2cf (COSTS_N_INSNS (15)),
+     branch_cost (6),
+     memory_latency (4) {}
+ 
+ /* The following properties cannot be looked up directly using "cpucfg".
+  So it is necessary to provide a default value for "unknown native"
+  tune targets (i.e. -mtune=native while PRID does not correspond to
+- any known "-mtune" type).  Currently all numbers are default.  */
++ any known "-mtune" type).  */
+ array_tune loongarch_cpu_rtx_cost_data =
+-  array_tune ();
++  array_tune ()
++    .set (CPU_LA664,
++	  loongarch_rtx_cost_data ()
++	    .movcf2gr_ (COSTS_N_INSNS (1))
++	    .movgr2cf_ (COSTS_N_INSNS (1)));
+ 
+ /* RTX costs to use when optimizing for size.
+    We use a value slightly larger than COSTS_N_INSNS (1) for all of them
+@@ -125,7 +131,8 @@ const loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size =
+     .int_mult_si_ (COST_COMPLEX_INSN)
+     .int_mult_di_ (COST_COMPLEX_INSN)
+     .int_div_si_ (COST_COMPLEX_INSN)
+-    .int_div_di_ (COST_COMPLEX_INSN);
++    .int_div_di_ (COST_COMPLEX_INSN)
++    .movcf2gr_ (COST_COMPLEX_INSN);
+ 
+ array_tune loongarch_cpu_issue_rate = array_tune ()
+   .set (CPU_NATIVE, 4)
+diff --git a/gcc/config/loongarch/loongarch-tune.h b/gcc/config/loongarch/loongarch-tune.h
+index 616b94e87..26f163f0a 100644
+--- a/gcc/config/loongarch/loongarch-tune.h
++++ b/gcc/config/loongarch/loongarch-tune.h
+@@ -35,6 +35,8 @@ struct loongarch_rtx_cost_data
+   unsigned short int_mult_di;
+   unsigned short int_div_si;
+   unsigned short int_div_di;
++  unsigned short movcf2gr;
++  unsigned short movgr2cf;
+   unsigned short branch_cost;
+   unsigned short memory_latency;
+ 
+@@ -95,6 +97,18 @@ struct loongarch_rtx_cost_data
+     return *this;
+   }
+ 
++  loongarch_rtx_cost_data movcf2gr_ (unsigned short _movcf2gr)
++  {
++    movcf2gr = _movcf2gr;
++    return *this;
++  }
++
++  loongarch_rtx_cost_data movgr2cf_ (unsigned short _movgr2cf)
++  {
++    movgr2cf = _movgr2cf;
++    return *this;
++  }
++
+   loongarch_rtx_cost_data branch_cost_ (unsigned short _branch_cost)
+   {
+     branch_cost = _branch_cost;
+@@ -106,7 +120,6 @@ struct loongarch_rtx_cost_data
+     memory_latency = _memory_latency;
+     return *this;
+   }
+-
+ };
+ 
+ /* Costs to use when optimizing for size.  */
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 3aeafeafd..56f631b1a 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -5119,29 +5119,6 @@ loongarch_zero_if_equal (rtx cmp0, rtx cmp1)
+ 		       OPTAB_DIRECT);
+ }
+ 
+-/* Allocate a floating-point condition-code register of mode MODE.  */
+-
+-static rtx
+-loongarch_allocate_fcc (machine_mode mode)
+-{
+-  unsigned int regno, count;
+-
+-  gcc_assert (TARGET_HARD_FLOAT);
+-
+-  if (mode == FCCmode)
+-    count = 1;
+-  else
+-    gcc_unreachable ();
+-
+-  cfun->machine->next_fcc += -cfun->machine->next_fcc & (count - 1);
+-  if (cfun->machine->next_fcc > FCC_REG_LAST - FCC_REG_FIRST)
+-    cfun->machine->next_fcc = 0;
+-
+-  regno = FCC_REG_FIRST + cfun->machine->next_fcc;
+-  cfun->machine->next_fcc += count;
+-  return gen_rtx_REG (mode, regno);
+-}
+-
+ /* Sign- or zero-extend OP0 and OP1 for integer comparisons.  */
+ 
+ static void
+@@ -5256,7 +5233,7 @@ loongarch_emit_float_compare (enum rtx_code *code, rtx *op0, rtx *op1)
+      operands for FCMP.cond.fmt, instead a reversed condition code is
+      required and a test for false.  */
+   *code = NE;
+-  *op0 = loongarch_allocate_fcc (FCCmode);
++  *op0 = gen_reg_rtx (FCCmode);
+ 
+   *op1 = const0_rtx;
+   loongarch_emit_binary (cmp_code, *op0, cmp_op0, cmp_op1);
+@@ -6626,7 +6603,7 @@ loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode)
+   enum mode_class mclass;
+ 
+   if (mode == FCCmode)
+-    return FCC_REG_P (regno);
++    return FCC_REG_P (regno) || GP_REG_P (regno) || FP_REG_P (regno);
+ 
+   size = GET_MODE_SIZE (mode);
+   mclass = GET_MODE_CLASS (mode);
+@@ -6841,6 +6818,9 @@ loongarch_move_to_gpr_cost (reg_class_t from)
+       /* MOVFR2GR, etc.  */
+       return 4;
+ 
++    case FCC_REGS:
++      return loongarch_cost->movcf2gr;
++
+     default:
+       return 0;
+     }
+@@ -6863,6 +6843,9 @@ loongarch_move_from_gpr_cost (reg_class_t to)
+       /* MOVGR2FR, etc.  */
+       return 4;
+ 
++    case FCC_REGS:
++      return loongarch_cost->movgr2cf;
++
+     default:
+       return 0;
+     }
+@@ -6897,6 +6880,10 @@ loongarch_register_move_cost (machine_mode mode, reg_class_t from,
+   if (to == dregs)
+     return loongarch_move_to_gpr_cost (from);
+ 
++  /* fcc -> fcc, fcc -> fpr, or fpr -> fcc. */
++  if (from == FCC_REGS || to == FCC_REGS)
++    return COSTS_N_INSNS (from == to ? 2 : 1);
++
+   /* Handles cases that require a GPR temporary.  */
+   cost1 = loongarch_move_to_gpr_cost (from);
+   if (cost1 != 0)
+@@ -6933,6 +6920,39 @@ loongarch_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+ 
+   regno = true_regnum (x);
+ 
++  if (mode == FCCmode)
++    {
++      if (reg_class_subset_p (rclass, FCC_REGS) && !FP_REG_P (regno))
++	{
++	  if (FCC_REG_P (regno))
++	    return FP_REGS;
++
++	  auto fn = in_p ? loongarch_move_from_gpr_cost
++			 : loongarch_move_to_gpr_cost;
++
++	  if (fn (FCC_REGS) > fn (FP_REGS) + COSTS_N_INSNS (1))
++	    return FP_REGS;
++
++	  return GP_REG_P (regno) ? NO_REGS : GR_REGS;
++	}
++
++      if (reg_class_subset_p (rclass, GR_REGS) && FCC_REG_P (regno))
++	{
++	  auto fn = in_p ? loongarch_move_to_gpr_cost
++			 : loongarch_move_from_gpr_cost;
++
++	  if (fn (FCC_REGS) > fn (FP_REGS) + COSTS_N_INSNS (1))
++	    return FP_REGS;
++
++	  return NO_REGS;
++	}
++
++      if (reg_class_subset_p (rclass, FP_REGS) && MEM_P (x))
++	return GR_REGS;
++
++      return NO_REGS;
++    }
++
+   if (reg_class_subset_p (rclass, FP_REGS))
+     {
+       if (regno < 0
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 23368008e..6cf71d9e4 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -2283,11 +2283,72 @@
+ 
+ ;; Clear one FCC register
+ 
+-(define_insn "movfcc"
+-  [(set (match_operand:FCC 0 "register_operand" "=z")
+-	(const_int 0))]
++(define_expand "movfcc"
++  [(set (match_operand:FCC 0 "")
++	(match_operand:FCC 1 ""))]
++  "TARGET_HARD_FLOAT"
++{
++  if (memory_operand (operands[0], FCCmode)
++      && memory_operand (operands[1], FCCmode))
++    operands[1] = force_reg (FCCmode, operands[1]);
++})
++
++(define_insn "movfcc_internal"
++  [(set (match_operand:FCC 0 "nonimmediate_operand"
++			     "=z,z,*f,*f,*r,*r,*m,*f,*r,z,*r")
++	(match_operand:FCC 1 "reg_or_0_operand"
++			     "J,*f,z,*f,J*r,*m,J*r,J*r,*f,*r,z"))]
++  "TARGET_HARD_FLOAT"
++  "@
++   fcmp.caf.s\t%0,$f0,$f0
++   movfr2cf\t%0,%1
++   movcf2fr\t%0,%1
++   fmov.s\t%0,%1
++   or\t%0,%z1,$r0
++   ld.b\t%0,%1
++   st.b\t%z1,%0
++   movgr2fr.w\t%0,%1
++   movfr2gr.s\t%0,%1
++   movgr2cf\t%0,%1
++   movcf2gr\t%0,%1"
++  [(set_attr "type" "move")
++   (set_attr "mode" "FCC")])
++
++(define_insn "fcc_to_"
++  [(set (match_operand:X 0 "register_operand" "=r")
++	(if_then_else:X (ne (match_operand:FCC 1 "register_operand" "0")
++			    (const_int 0))
++			(const_int 1)
++			(const_int 0)))]
++  "TARGET_HARD_FLOAT"
+   ""
+-  "fcmp.caf.s\t%0,$f0,$f0")
++  [(set_attr "length" "0")
++   (set_attr "type" "ghost")])
++
++(define_expand "cstore4"
++  [(set (match_operand:SI 0 "register_operand")
++	(match_operator:SI 1 "loongarch_fcmp_operator"
++	  [(match_operand:ANYF 2 "register_operand")
++	   (match_operand:ANYF 3 "register_operand")]))]
++  ""
++  {
++    rtx fcc = gen_reg_rtx (FCCmode);
++    rtx cmp = gen_rtx_fmt_ee (GET_CODE (operands[1]), FCCmode,
++			      operands[2], operands[3]);
++
++    emit_insn (gen_rtx_SET (fcc, cmp));
++    if (TARGET_64BIT)
++      {
++	rtx gpr = gen_reg_rtx (DImode);
++	emit_insn (gen_fcc_to_di (gpr, fcc));
++	emit_insn (gen_rtx_SET (operands[0],
++				lowpart_subreg (SImode, gpr, DImode)));
++      }
++    else
++      emit_insn (gen_fcc_to_si (operands[0], fcc));
++
++    DONE;
++  })
+ 
+ ;; Conditional move instructions.
+ 
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 88e54c915..58f9a7826 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -590,6 +590,10 @@
+ (define_predicate "loongarch_cstore_operator"
+   (match_code "ne,eq,gt,gtu,ge,geu,lt,ltu,le,leu"))
+ 
++(define_predicate "loongarch_fcmp_operator"
++  (match_code
++    "unordered,uneq,unlt,unle,eq,lt,le,ordered,ltgt,ne,ge,gt,unge,ungt"))
++
+ (define_predicate "small_data_pattern"
+   (and (match_code "set,parallel,unspec,unspec_volatile,prefetch")
+        (match_test "loongarch_small_data_pattern_p (op)")))
+diff --git a/gcc/testsuite/gcc.target/loongarch/movcf2gr-via-fr.c b/gcc/testsuite/gcc.target/loongarch/movcf2gr-via-fr.c
+new file mode 100644
+index 000000000..23334a3a3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/movcf2gr-via-fr.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mtune=la464 -mabi=lp64d" } */
++/* { dg-final { scan-assembler "movcf2fr\t\\\$f\[0-9\]+,\\\$fcc" } } */
++/* { dg-final { scan-assembler "movfr2gr\\.s\t\\\$r4" } } */
++
++int
++t (float a, float b)
++{
++  return a > b;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/movcf2gr.c b/gcc/testsuite/gcc.target/loongarch/movcf2gr.c
+new file mode 100644
+index 000000000..d27c393b5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/movcf2gr.c
+@@ -0,0 +1,9 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mtune=la664 -mabi=lp64d" } */
++/* { dg-final { scan-assembler "movcf2gr\t\\\$r4,\\\$fcc" } } */
++
++int
++t (float a, float b)
++{
++  return a > b;
++}
+-- 
+2.43.0
+
diff --git a/0084-LoongArch-Add-sign_extend-pattern-for-32-bit-rotate-.patch b/0084-LoongArch-Add-sign_extend-pattern-for-32-bit-rotate-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..1a98af95096dcc3187d5189777cd3b6685943fd9
--- /dev/null
+++ b/0084-LoongArch-Add-sign_extend-pattern-for-32-bit-rotate-.patch
@@ -0,0 +1,69 @@
+From 8da6a317bc3ad64da8590649b83a841391f20438 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 17 Dec 2023 04:26:23 +0800
+Subject: [PATCH 084/188] LoongArch: Add sign_extend pattern for 32-bit rotate
+ shift
+
+Remove a redundant sign extension.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (rotrsi3_extend): New
+	define_insn.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/rotrw.c: New test.
+---
+ gcc/config/loongarch/loongarch.md          | 10 ++++++++++
+ gcc/testsuite/gcc.target/loongarch/rotrw.c | 17 +++++++++++++++++
+ 2 files changed, 27 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/rotrw.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 6cf71d9e4..44e8d336a 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -2893,6 +2893,16 @@
+   [(set_attr "type" "shift,shift")
+    (set_attr "mode" "")])
+ 
++(define_insn "rotrsi3_extend"
++  [(set (match_operand:DI 0 "register_operand" "=r,r")
++	(sign_extend:DI
++	  (rotatert:SI (match_operand:SI 1 "register_operand" "r,r")
++		       (match_operand:SI 2 "arith_operand" "r,I"))))]
++  "TARGET_64BIT"
++  "rotr%i2.w\t%0,%1,%2"
++  [(set_attr "type" "shift,shift")
++   (set_attr "mode" "SI")])
++
+ ;; The following templates were added to generate "bstrpick.d + alsl.d"
+ ;; instruction pairs.
+ ;; It is required that the values of const_immalsl_operand and
+diff --git a/gcc/testsuite/gcc.target/loongarch/rotrw.c b/gcc/testsuite/gcc.target/loongarch/rotrw.c
+new file mode 100644
+index 000000000..6ed45e8b8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/rotrw.c
+@@ -0,0 +1,17 @@
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++/* { dg-final { scan-assembler "rotr\\.w\t\\\$r4,\\\$r4,\\\$r5" } } */
++/* { dg-final { scan-assembler "rotri\\.w\t\\\$r4,\\\$r4,5" } } */
++/* { dg-final { scan-assembler-not "slli\\.w" } } */
++
++unsigned
++rotr (unsigned a, unsigned b)
++{
++  return a >> b | a << 32 - b;
++}
++
++unsigned
++rotri (unsigned a)
++{
++  return a >> 5 | a << 27;
++}
+-- 
+2.43.0
+
diff --git a/0085-LoongArch-Fixed-bug-in-bstrins_-mode-_for_ior_mask-t.patch b/0085-LoongArch-Fixed-bug-in-bstrins_-mode-_for_ior_mask-t.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f8ee4e670adcdaee0298f96a733accb643f487a5
--- /dev/null
+++ b/0085-LoongArch-Fixed-bug-in-bstrins_-mode-_for_ior_mask-t.patch
@@ -0,0 +1,37 @@
+From e56d6d9526e1565fffeb320e15796385eb1732b8 Mon Sep 17 00:00:00 2001
+From: Li Wei 
+Date: Mon, 25 Dec 2023 11:20:23 +0800
+Subject: [PATCH 085/188] LoongArch: Fixed bug in *bstrins__for_ior_mask
+ template.
+
+We found that using the latest compiled gcc will cause a miscompare error
+when running spec2006 400.perlbench test with -flto turned on.  After testing,
+it was found that only the LoongArch architecture will report errors.
+The first error commit was located through the git bisect command as
+r14-3773-g5b857e87201335.  Through debugging, it was found that the problem
+was that the split condition of the *bstrins__for_ior_mask template was
+empty, which should actually be consistent with the insn condition.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md: Adjust.
+---
+ gcc/config/loongarch/loongarch.md | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 44e8d336a..3d5b75825 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1489,7 +1489,7 @@
+   "loongarch_pre_reload_split () && \
+    loongarch_use_bstrins_for_ior_with_mask (mode, operands)"
+   "#"
+-  ""
++  "&& true"
+   [(set (match_dup 0) (match_dup 1))
+    (set (zero_extract:GPR (match_dup 0) (match_dup 2) (match_dup 4))
+ 	(match_dup 3))]
+-- 
+2.43.0
+
diff --git a/0086-LoongArch-Fix-insn-output-of-vec_concat-templates-fo.patch b/0086-LoongArch-Fix-insn-output-of-vec_concat-templates-fo.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ee3e1b9457694f2e0b6735b1214dcbb90b325de4
--- /dev/null
+++ b/0086-LoongArch-Fix-insn-output-of-vec_concat-templates-fo.patch
@@ -0,0 +1,132 @@
+From b1947829a5949a37db09bc23681e44c8479bd404 Mon Sep 17 00:00:00 2001
+From: Chenghui Pan 
+Date: Fri, 22 Dec 2023 16:22:03 +0800
+Subject: [PATCH 086/188] LoongArch: Fix insn output of vec_concat templates
+ for LASX.
+
+When investigaing failure of gcc.dg/vect/slp-reduc-sad.c, following
+instruction block are being generated by vec_concatv32qi (which is
+generated by vec_initv32qiv16qi) at entrance of foo() function:
+
+  vldx    $vr3,$r5,$r6
+  vld     $vr2,$r5,0
+  xvpermi.q       $xr2,$xr3,0x20
+
+causes the reversion of vec_initv32qiv16qi operation's high and
+low 128-bit part.
+
+According to other target's similar impl and LSX impl for following
+RTL representation, current definition in lasx.md of "vec_concat"
+are wrong:
+
+  (set (op0) (vec_concat (op1) (op2)))
+
+For correct behavior, the last argument of xvpermi.q should be 0x02
+instead of 0x20. This patch fixes this issue and cleanup the vec_concat
+template impl.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md (vec_concatv4di): Delete.
+	(vec_concatv8si): Delete.
+	(vec_concatv16hi): Delete.
+	(vec_concatv32qi): Delete.
+	(vec_concatv4df): Delete.
+	(vec_concatv8sf): Delete.
+	(vec_concat): New template with insn output fixed.
+---
+ gcc/config/loongarch/lasx.md | 74 ++++--------------------------------
+ 1 file changed, 7 insertions(+), 67 deletions(-)
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 9ca3f9278..46150f2fb 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -577,77 +577,17 @@
+   [(set_attr "type" "simd_insert")
+    (set_attr "mode" "")])
+ 
+-(define_insn "vec_concatv4di"
+-  [(set (match_operand:V4DI 0 "register_operand" "=f")
+-	(vec_concat:V4DI
+-	  (match_operand:V2DI 1 "register_operand" "0")
+-	  (match_operand:V2DI 2 "register_operand" "f")))]
+-  "ISA_HAS_LASX"
+-{
+-  return "xvpermi.q\t%u0,%u2,0x20";
+-}
+-  [(set_attr "type" "simd_splat")
+-   (set_attr "mode" "V4DI")])
+-
+-(define_insn "vec_concatv8si"
+-  [(set (match_operand:V8SI 0 "register_operand" "=f")
+-	(vec_concat:V8SI
+-	  (match_operand:V4SI 1 "register_operand" "0")
+-	  (match_operand:V4SI 2 "register_operand" "f")))]
+-  "ISA_HAS_LASX"
+-{
+-  return "xvpermi.q\t%u0,%u2,0x20";
+-}
+-  [(set_attr "type" "simd_splat")
+-   (set_attr "mode" "V4DI")])
+-
+-(define_insn "vec_concatv16hi"
+-  [(set (match_operand:V16HI 0 "register_operand" "=f")
+-	(vec_concat:V16HI
+-	  (match_operand:V8HI 1 "register_operand" "0")
+-	  (match_operand:V8HI 2 "register_operand" "f")))]
+-  "ISA_HAS_LASX"
+-{
+-  return "xvpermi.q\t%u0,%u2,0x20";
+-}
+-  [(set_attr "type" "simd_splat")
+-   (set_attr "mode" "V4DI")])
+-
+-(define_insn "vec_concatv32qi"
+-  [(set (match_operand:V32QI 0 "register_operand" "=f")
+-	(vec_concat:V32QI
+-	  (match_operand:V16QI 1 "register_operand" "0")
+-	  (match_operand:V16QI 2 "register_operand" "f")))]
+-  "ISA_HAS_LASX"
+-{
+-  return "xvpermi.q\t%u0,%u2,0x20";
+-}
+-  [(set_attr "type" "simd_splat")
+-   (set_attr "mode" "V4DI")])
+-
+-(define_insn "vec_concatv4df"
+-  [(set (match_operand:V4DF 0 "register_operand" "=f")
+-	(vec_concat:V4DF
+-	  (match_operand:V2DF 1 "register_operand" "0")
+-	  (match_operand:V2DF 2 "register_operand" "f")))]
+-  "ISA_HAS_LASX"
+-{
+-  return "xvpermi.q\t%u0,%u2,0x20";
+-}
+-  [(set_attr "type" "simd_splat")
+-   (set_attr "mode" "V4DF")])
+-
+-(define_insn "vec_concatv8sf"
+-  [(set (match_operand:V8SF 0 "register_operand" "=f")
+-	(vec_concat:V8SF
+-	  (match_operand:V4SF 1 "register_operand" "0")
+-	  (match_operand:V4SF 2 "register_operand" "f")))]
++(define_insn "vec_concat"
++  [(set (match_operand:LASX 0 "register_operand" "=f")
++	(vec_concat:LASX
++	  (match_operand: 1 "register_operand" "0")
++	  (match_operand: 2 "register_operand" "f")))]
+   "ISA_HAS_LASX"
+ {
+-  return "xvpermi.q\t%u0,%u2,0x20";
++  return "xvpermi.q\t%u0,%u2,0x02";
+ }
+   [(set_attr "type" "simd_splat")
+-   (set_attr "mode" "V4DI")])
++   (set_attr "mode" "")])
+ 
+ ;; xshuf.w
+ (define_insn "lasx_xvperm_"
+-- 
+2.43.0
+
diff --git a/0086-Modfify-cost-calculation-for-dealing-with-equivalenc.patch b/0086-Modfify-cost-calculation-for-dealing-with-equivalenc.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c87f3ecd5a65cc09b4d732b2ab4f55b18a1d7f60
--- /dev/null
+++ b/0086-Modfify-cost-calculation-for-dealing-with-equivalenc.patch
@@ -0,0 +1,321 @@
+From c546aad5d38165e2962456525a0f6a427e03583b Mon Sep 17 00:00:00 2001
+From: "Vladimir N. Makarov" 
+Date: Thu, 26 Oct 2023 09:50:40 -0400
+Subject: [PATCH 31/32] Modfify cost calculation for dealing with equivalences
+
+RISCV target developers reported that pseudos with equivalence used in
+a loop can be spilled.  Simple changes of heuristics of cost
+calculation of pseudos with equivalence or even ignoring equivalences
+resulted in numerous testsuite failures on different targets or worse
+spec2017 performance.  This patch implements more sophisticated cost
+calculations of pseudos with equivalences.  The patch does not change
+RA behaviour for targets still using the old reload pass instead of
+LRA.  The patch solves the reported problem and improves x86-64
+specint2017 a bit (specfp2017 performance stays the same).  The patch
+takes into account how the equivalence will be used: will it be
+integrated into the user insns or require an input reload insn.  It
+requires additional pass over insns.  To compensate RA slow down, the
+patch removes a pass over insns in the reload pass used by IRA before.
+This also decouples IRA from reload more and will help to remove the
+reload pass in the future if it ever happens.
+
+gcc/ChangeLog:
+
+	* dwarf2out.cc (reg_loc_descriptor): Use lra_eliminate_regs when
+	LRA is used.
+	* ira-costs.cc: Include regset.h.
+	(equiv_can_be_consumed_p, get_equiv_regno, calculate_equiv_gains):
+	New functions.
+	(find_costs_and_classes): Call calculate_equiv_gains and redefine
+	mem_cost of pseudos with equivs when LRA is used.
+	* var-tracking.cc: Include ira.h and lra.h.
+	(vt_initialize): Use lra_eliminate_regs when LRA is used.
+---
+ gcc/dwarf2out.cc    |   4 +-
+ gcc/ira-costs.cc    | 169 ++++++++++++++++++++++++++++++++++++++++++--
+ gcc/var-tracking.cc |  14 +++-
+ 3 files changed, 179 insertions(+), 8 deletions(-)
+
+diff --git a/gcc/dwarf2out.cc b/gcc/dwarf2out.cc
+index 0a5c081d8..f0f6f4fd4 100644
+--- a/gcc/dwarf2out.cc
++++ b/gcc/dwarf2out.cc
+@@ -14263,7 +14263,9 @@ reg_loc_descriptor (rtx rtl, enum var_init_status initialized)
+      argument pointer and soft frame pointer rtx's.
+      Use DW_OP_fbreg offset DW_OP_stack_value in this case.  */
+   if ((rtl == arg_pointer_rtx || rtl == frame_pointer_rtx)
+-      && eliminate_regs (rtl, VOIDmode, NULL_RTX) != rtl)
++      && (ira_use_lra_p
++	  ? lra_eliminate_regs (rtl, VOIDmode, NULL_RTX)
++	  : eliminate_regs (rtl, VOIDmode, NULL_RTX)) != rtl)
+     {
+       dw_loc_descr_ref result = NULL;
+ 
+diff --git a/gcc/ira-costs.cc b/gcc/ira-costs.cc
+index 642fda529..c79311783 100644
+--- a/gcc/ira-costs.cc
++++ b/gcc/ira-costs.cc
+@@ -30,6 +30,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "tm_p.h"
+ #include "insn-config.h"
+ #include "regs.h"
++#include "regset.h"
+ #include "ira.h"
+ #include "ira-int.h"
+ #include "addresses.h"
+@@ -1750,6 +1751,145 @@ process_bb_node_for_costs (ira_loop_tree_node_t loop_tree_node)
+     process_bb_for_costs (bb);
+ }
+ 
++/* Check that reg REGNO can be changed by TO in INSN.  Return true in case the
++   result insn would be valid one.  */
++static bool
++equiv_can_be_consumed_p (int regno, rtx to, rtx_insn *insn)
++{
++  validate_replace_src_group (regno_reg_rtx[regno], to, insn);
++  bool res = verify_changes (0);
++  cancel_changes (0);
++  return res;
++}
++
++/* Return true if X contains a pseudo with equivalence.  In this case also
++   return the pseudo through parameter REG.  If the pseudo is a part of subreg,
++   return the subreg through parameter SUBREG.  */
++
++static bool
++get_equiv_regno (rtx x, int ®no, rtx &subreg)
++{
++  subreg = NULL_RTX;
++  if (GET_CODE (x) == SUBREG)
++    {
++      subreg = x;
++      x = SUBREG_REG (x);
++    }
++  if (REG_P (x)
++      && (ira_reg_equiv[REGNO (x)].memory != NULL
++	  || ira_reg_equiv[REGNO (x)].constant != NULL))
++    {
++      regno = REGNO (x);
++      return true;
++    }
++  RTX_CODE code = GET_CODE (x);
++  const char *fmt = GET_RTX_FORMAT (code);
++
++  for (int i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
++    if (fmt[i] == 'e')
++      {
++	if (get_equiv_regno (XEXP (x, i), regno, subreg))
++	  return true;
++      }
++    else if (fmt[i] == 'E')
++      {
++	for (int j = 0; j < XVECLEN (x, i); j++)
++	  if (get_equiv_regno (XVECEXP (x, i, j), regno, subreg))
++	    return true;
++      }
++  return false;
++}
++
++/* A pass through the current function insns.  Calculate costs of using
++   equivalences for pseudos and store them in regno_equiv_gains.  */
++
++static void
++calculate_equiv_gains (void)
++{
++  basic_block bb;
++  int regno, freq, cost;
++  rtx subreg;
++  rtx_insn *insn;
++  machine_mode mode;
++  enum reg_class rclass;
++  bitmap_head equiv_pseudos;
++
++  ira_assert (allocno_p);
++  bitmap_initialize (&equiv_pseudos, ®_obstack);
++  for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
++    if (ira_reg_equiv[regno].init_insns != NULL
++	&& (ira_reg_equiv[regno].memory != NULL
++	    || (ira_reg_equiv[regno].constant != NULL
++		/* Ignore complicated constants which probably will be placed
++		   in memory:  */
++		&& GET_CODE (ira_reg_equiv[regno].constant) != CONST_DOUBLE
++		&& GET_CODE (ira_reg_equiv[regno].constant) != CONST_VECTOR
++		&& GET_CODE (ira_reg_equiv[regno].constant) != LABEL_REF)))
++      {
++	rtx_insn_list *x;
++	for (x = ira_reg_equiv[regno].init_insns; x != NULL; x = x->next ())
++	  {
++	    insn = x->insn ();
++	    rtx set = single_set (insn);
++
++	    if (set == NULL_RTX || SET_DEST (set) != regno_reg_rtx[regno])
++	      break;
++	    bb = BLOCK_FOR_INSN (insn);
++	    ira_curr_regno_allocno_map
++	      = ira_bb_nodes[bb->index].parent->regno_allocno_map;
++	    mode = PSEUDO_REGNO_MODE (regno);
++	    rclass = pref[COST_INDEX (regno)];
++	    ira_init_register_move_cost_if_necessary (mode);
++	    if (ira_reg_equiv[regno].memory != NULL)
++	      cost = ira_memory_move_cost[mode][rclass][1];
++	    else
++	      cost = ira_register_move_cost[mode][rclass][rclass];
++	    freq = REG_FREQ_FROM_BB (bb);
++	    regno_equiv_gains[regno] += cost * freq;
++	  }
++	if (x != NULL)
++	  /* We found complicated equiv or reverse equiv mem=reg.  Ignore
++	     them.  */
++	  regno_equiv_gains[regno] = 0;
++	else
++	  bitmap_set_bit (&equiv_pseudos, regno);
++      }
++
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      freq = REG_FREQ_FROM_BB (bb);
++      ira_curr_regno_allocno_map
++	= ira_bb_nodes[bb->index].parent->regno_allocno_map;
++      FOR_BB_INSNS (bb, insn)
++	{
++	  if (!INSN_P (insn) || !get_equiv_regno (PATTERN (insn), regno, subreg)
++	      || !bitmap_bit_p (&equiv_pseudos, regno))
++	    continue;
++	  rtx subst = ira_reg_equiv[regno].memory;
++
++	  if (subst == NULL)
++	    subst = ira_reg_equiv[regno].constant;
++	  ira_assert (subst != NULL);
++	  mode = PSEUDO_REGNO_MODE (regno);
++	  ira_init_register_move_cost_if_necessary (mode);
++	  bool consumed_p = equiv_can_be_consumed_p (regno, subst, insn);
++
++	  rclass = pref[COST_INDEX (regno)];
++	  if (MEM_P (subst)
++	      /* If it is a change of constant into double for example, the
++		 result constant probably will be placed in memory.  */
++	      || (subreg != NULL_RTX && !INTEGRAL_MODE_P (GET_MODE (subreg))))
++	    cost = ira_memory_move_cost[mode][rclass][1] + (consumed_p ? 0 : 1);
++	  else if (consumed_p)
++	    continue;
++	  else
++	    cost = ira_register_move_cost[mode][rclass][rclass];
++	  regno_equiv_gains[regno] -= cost * freq;
++	}
++    }
++  bitmap_clear (&equiv_pseudos);
++}
++
+ /* Find costs of register classes and memory for allocnos or pseudos
+    and their best costs.  Set up preferred, alternative and allocno
+    classes for pseudos.  */
+@@ -1848,6 +1988,12 @@ find_costs_and_classes (FILE *dump_file)
+       if (pass == 0)
+ 	pref = pref_buffer;
+ 
++      if (ira_use_lra_p && allocno_p && pass == 1)
++	/* It is a pass through all insns.  So do it once and only for RA (not
++	   for insn scheduler) when we already found preferable pseudo register
++	   classes on the previous pass.  */
++	calculate_equiv_gains ();
++
+       /* Now for each allocno look at how desirable each class is and
+ 	 find which class is preferred.  */
+       for (i = max_reg_num () - 1; i >= FIRST_PSEUDO_REGISTER; i--)
+@@ -1940,6 +2086,17 @@ find_costs_and_classes (FILE *dump_file)
+ 	    }
+ 	  if (i >= first_moveable_pseudo && i < last_moveable_pseudo)
+ 	    i_mem_cost = 0;
++	  else if (ira_use_lra_p)
++	    {
++	      if (equiv_savings > 0)
++		{
++		  i_mem_cost = 0;
++		  if (ira_dump_file != NULL && internal_flag_ira_verbose > 5)
++		    fprintf (ira_dump_file,
++			     "   Use MEM for r%d as the equiv savings is %d\n",
++			     i, equiv_savings);
++		}
++	    }
+ 	  else if (equiv_savings < 0)
+ 	    i_mem_cost = -equiv_savings;
+ 	  else if (equiv_savings > 0)
+@@ -2378,7 +2535,10 @@ ira_costs (void)
+   total_allocno_costs = (struct costs *) ira_allocate (max_struct_costs_size
+ 						       * ira_allocnos_num);
+   initiate_regno_cost_classes ();
+-  calculate_elim_costs_all_insns ();
++  if (!ira_use_lra_p)
++    /* Process equivs in reload to update costs through hook
++       ira_adjust_equiv_reg_cost.  */
++    calculate_elim_costs_all_insns ();
+   find_costs_and_classes (ira_dump_file);
+   setup_allocno_class_and_costs ();
+   finish_regno_cost_classes ();
+@@ -2503,13 +2663,14 @@ ira_tune_allocno_costs (void)
+     }
+ }
+ 
+-/* Add COST to the estimated gain for eliminating REGNO with its
+-   equivalence.  If COST is zero, record that no such elimination is
+-   possible.  */
++/* A hook from the reload pass.  Add COST to the estimated gain for eliminating
++   REGNO with its equivalence.  If COST is zero, record that no such
++   elimination is possible.  */
+ 
+ void
+ ira_adjust_equiv_reg_cost (unsigned regno, int cost)
+ {
++  ira_assert (!ira_use_lra_p);
+   if (cost == 0)
+     regno_equiv_gains[regno] = 0;
+   else
+diff --git a/gcc/var-tracking.cc b/gcc/var-tracking.cc
+index 7c3ad0a55..b10c8c1eb 100644
+--- a/gcc/var-tracking.cc
++++ b/gcc/var-tracking.cc
+@@ -107,6 +107,8 @@
+ #include "cfgrtl.h"
+ #include "cfganal.h"
+ #include "reload.h"
++#include "ira.h"
++#include "lra.h"
+ #include "calls.h"
+ #include "tree-dfa.h"
+ #include "tree-ssa.h"
+@@ -10133,7 +10135,9 @@ vt_initialize (void)
+ #else
+       reg = arg_pointer_rtx;
+ #endif
+-      elim = eliminate_regs (reg, VOIDmode, NULL_RTX);
++      elim = (ira_use_lra_p
++	      ? lra_eliminate_regs (reg, VOIDmode, NULL_RTX)
++	      : eliminate_regs (reg, VOIDmode, NULL_RTX));
+       if (elim != reg)
+ 	{
+ 	  if (GET_CODE (elim) == PLUS)
+@@ -10153,7 +10157,9 @@ vt_initialize (void)
+       reg = arg_pointer_rtx;
+       fp_cfa_offset = ARG_POINTER_CFA_OFFSET (current_function_decl);
+ #endif
+-      elim = eliminate_regs (reg, VOIDmode, NULL_RTX);
++      elim = (ira_use_lra_p
++	      ? lra_eliminate_regs (reg, VOIDmode, NULL_RTX)
++	      : eliminate_regs (reg, VOIDmode, NULL_RTX));
+       if (elim != reg)
+ 	{
+ 	  if (GET_CODE (elim) == PLUS)
+@@ -10185,7 +10191,9 @@ vt_initialize (void)
+ #else
+       reg = arg_pointer_rtx;
+ #endif
+-      elim = eliminate_regs (reg, VOIDmode, NULL_RTX);
++      elim = (ira_use_lra_p
++	      ? lra_eliminate_regs (reg, VOIDmode, NULL_RTX)
++	      : eliminate_regs (reg, VOIDmode, NULL_RTX));
+       if (elim != reg)
+ 	{
+ 	  if (GET_CODE (elim) == PLUS)
+-- 
+2.28.0.windows.1
+
diff --git a/0087-Add-cost-calculation-for-reg-equivalence-invariants.patch b/0087-Add-cost-calculation-for-reg-equivalence-invariants.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9315409ad82775270d2ece67dfa0616169e07b01
--- /dev/null
+++ b/0087-Add-cost-calculation-for-reg-equivalence-invariants.patch
@@ -0,0 +1,49 @@
+From 4965473a4211a9feb46a0d168180ab450cb18bcc Mon Sep 17 00:00:00 2001
+From: "Vladimir N. Makarov" 
+Date: Fri, 27 Oct 2023 08:28:24 -0400
+Subject: [PATCH 32/32] Add cost calculation for reg equivalence invariants
+
+My recent patch improving cost calculation for pseudos with equivalence
+resulted in failure of gcc.target/arm/eliminate.c on aarch64.  This patch
+fixes this failure.
+
+gcc/ChangeLog:
+
+	* ira-costs.cc: (get_equiv_regno, calculate_equiv_gains):
+	Process reg equivalence invariants.
+---
+ gcc/ira-costs.cc | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/gcc/ira-costs.cc b/gcc/ira-costs.cc
+index c79311783..d33104a30 100644
+--- a/gcc/ira-costs.cc
++++ b/gcc/ira-costs.cc
+@@ -1777,6 +1777,7 @@ get_equiv_regno (rtx x, int ®no, rtx &subreg)
+     }
+   if (REG_P (x)
+       && (ira_reg_equiv[REGNO (x)].memory != NULL
++	  || ira_reg_equiv[REGNO (x)].invariant != NULL
+ 	  || ira_reg_equiv[REGNO (x)].constant != NULL))
+     {
+       regno = REGNO (x);
+@@ -1819,6 +1820,7 @@ calculate_equiv_gains (void)
+   for (regno = max_reg_num () - 1; regno >= FIRST_PSEUDO_REGISTER; regno--)
+     if (ira_reg_equiv[regno].init_insns != NULL
+ 	&& (ira_reg_equiv[regno].memory != NULL
++	    || ira_reg_equiv[regno].invariant != NULL
+ 	    || (ira_reg_equiv[regno].constant != NULL
+ 		/* Ignore complicated constants which probably will be placed
+ 		   in memory:  */
+@@ -1869,6 +1871,8 @@ calculate_equiv_gains (void)
+ 
+ 	  if (subst == NULL)
+ 	    subst = ira_reg_equiv[regno].constant;
++	  if (subst == NULL)
++	    subst = ira_reg_equiv[regno].invariant;
+ 	  ira_assert (subst != NULL);
+ 	  mode = PSEUDO_REGNO_MODE (regno);
+ 	  ira_init_register_move_cost_if_necessary (mode);
+-- 
+2.28.0.windows.1
+
diff --git a/0087-LoongArch-Fix-ICE-when-passing-two-same-vector-argum.patch b/0087-LoongArch-Fix-ICE-when-passing-two-same-vector-argum.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f1373496838686a875bcc59d2cd3101e5543d419
--- /dev/null
+++ b/0087-LoongArch-Fix-ICE-when-passing-two-same-vector-argum.patch
@@ -0,0 +1,232 @@
+From 1096571509762846e2222f575bc981385b4e9fb7 Mon Sep 17 00:00:00 2001
+From: Chenghui Pan 
+Date: Fri, 22 Dec 2023 16:18:44 +0800
+Subject: [PATCH 087/188] LoongArch: Fix ICE when passing two same vector
+ argument consecutively
+
+Following code will cause ICE on LoongArch target:
+
+  #include 
+
+  extern void bar (__m128i, __m128i);
+
+  __m128i a;
+
+  void
+  foo ()
+  {
+    bar (a, a);
+  }
+
+It is caused by missing constraint definition in mov_lsx. This
+patch fixes the template and remove the unnecessary processing from
+loongarch_split_move () function.
+
+This patch also cleanup the redundant definition from
+loongarch_split_move () and loongarch_split_move_p ().
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md: Use loongarch_split_move and
+	loongarch_split_move_p directly.
+	* config/loongarch/loongarch-protos.h
+	(loongarch_split_move): Remove unnecessary argument.
+	(loongarch_split_move_insn_p): Delete.
+	(loongarch_split_move_insn): Delete.
+	* config/loongarch/loongarch.cc
+	(loongarch_split_move_insn_p): Delete.
+	(loongarch_load_store_insns): Use loongarch_split_move_p
+	directly.
+	(loongarch_split_move): remove the unnecessary processing.
+	(loongarch_split_move_insn): Delete.
+	* config/loongarch/lsx.md: Use loongarch_split_move and
+	loongarch_split_move_p directly.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-mov-1.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  |  4 +-
+ gcc/config/loongarch/loongarch-protos.h       |  4 +-
+ gcc/config/loongarch/loongarch.cc             | 49 +------------------
+ gcc/config/loongarch/lsx.md                   | 10 ++--
+ .../loongarch/vector/lsx/lsx-mov-1.c          | 14 ++++++
+ 5 files changed, 24 insertions(+), 57 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-mov-1.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 46150f2fb..dbbf5a136 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -839,10 +839,10 @@
+   [(set (match_operand:LASX 0 "nonimmediate_operand")
+ 	(match_operand:LASX 1 "move_operand"))]
+   "reload_completed && ISA_HAS_LASX
+-   && loongarch_split_move_insn_p (operands[0], operands[1])"
++   && loongarch_split_move_p (operands[0], operands[1])"
+   [(const_int 0)]
+ {
+-  loongarch_split_move_insn (operands[0], operands[1], curr_insn);
++  loongarch_split_move (operands[0], operands[1]);
+   DONE;
+ })
+ 
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index e5fcf3111..2067e50c3 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -82,11 +82,9 @@ extern rtx loongarch_legitimize_call_address (rtx);
+ 
+ extern rtx loongarch_subword (rtx, bool);
+ extern bool loongarch_split_move_p (rtx, rtx);
+-extern void loongarch_split_move (rtx, rtx, rtx);
++extern void loongarch_split_move (rtx, rtx);
+ extern bool loongarch_addu16i_imm12_operand_p (HOST_WIDE_INT, machine_mode);
+ extern void loongarch_split_plus_constant (rtx *, machine_mode);
+-extern bool loongarch_split_move_insn_p (rtx, rtx);
+-extern void loongarch_split_move_insn (rtx, rtx, rtx);
+ extern void loongarch_split_128bit_move (rtx, rtx);
+ extern bool loongarch_split_128bit_move_p (rtx, rtx);
+ extern void loongarch_split_256bit_move (rtx, rtx);
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 56f631b1a..5c278386a 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2558,7 +2558,6 @@ loongarch_split_const_insns (rtx x)
+   return low + high;
+ }
+ 
+-bool loongarch_split_move_insn_p (rtx dest, rtx src);
+ /* Return one word of 128-bit value OP, taking into account the fixed
+    endianness of certain registers.  BYTE selects from the byte address.  */
+ 
+@@ -2598,7 +2597,7 @@ loongarch_load_store_insns (rtx mem, rtx_insn *insn)
+     {
+       set = single_set (insn);
+       if (set
+-	  && !loongarch_split_move_insn_p (SET_DEST (set), SET_SRC (set)))
++	  && !loongarch_split_move_p (SET_DEST (set), SET_SRC (set)))
+ 	might_split_p = false;
+     }
+ 
+@@ -4216,7 +4215,7 @@ loongarch_split_move_p (rtx dest, rtx src)
+    SPLIT_TYPE describes the split condition.  */
+ 
+ void
+-loongarch_split_move (rtx dest, rtx src, rtx insn_)
++loongarch_split_move (rtx dest, rtx src)
+ {
+   rtx low_dest;
+ 
+@@ -4254,33 +4253,6 @@ loongarch_split_move (rtx dest, rtx src, rtx insn_)
+ 			       loongarch_subword (src, true));
+ 	}
+     }
+-
+-  /* This is a hack.  See if the next insn uses DEST and if so, see if we
+-     can forward SRC for DEST.  This is most useful if the next insn is a
+-     simple store.  */
+-  rtx_insn *insn = (rtx_insn *) insn_;
+-  struct loongarch_address_info addr = {};
+-  if (insn)
+-    {
+-      rtx_insn *next = next_nonnote_nondebug_insn_bb (insn);
+-      if (next)
+-	{
+-	  rtx set = single_set (next);
+-	  if (set && SET_SRC (set) == dest)
+-	    {
+-	      if (MEM_P (src))
+-		{
+-		  rtx tmp = XEXP (src, 0);
+-		  loongarch_classify_address (&addr, tmp, GET_MODE (tmp),
+-					      true);
+-		  if (addr.reg && !reg_overlap_mentioned_p (dest, addr.reg))
+-		    validate_change (next, &SET_SRC (set), src, false);
+-		}
+-	      else
+-		validate_change (next, &SET_SRC (set), src, false);
+-	    }
+-	}
+-    }
+ }
+ 
+ /* Check if adding an integer constant value for a specific mode can be
+@@ -4327,23 +4299,6 @@ loongarch_split_plus_constant (rtx *op, machine_mode mode)
+   op[2] = gen_int_mode (v, mode);
+ }
+ 
+-/* Return true if a move from SRC to DEST in INSN should be split.  */
+-
+-bool
+-loongarch_split_move_insn_p (rtx dest, rtx src)
+-{
+-  return loongarch_split_move_p (dest, src);
+-}
+-
+-/* Split a move from SRC to DEST in INSN, given that
+-   loongarch_split_move_insn_p holds.  */
+-
+-void
+-loongarch_split_move_insn (rtx dest, rtx src, rtx insn)
+-{
+-  loongarch_split_move (dest, src, insn);
+-}
+-
+ /* Implement TARGET_CONSTANT_ALIGNMENT.  */
+ 
+ static HOST_WIDE_INT
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 7f5fff40a..3e3248ef4 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -794,21 +794,21 @@
+ })
+ 
+ (define_insn "mov_lsx"
+-  [(set (match_operand:LSX 0 "nonimmediate_operand" "=f,f,R,*r,*f")
+-	(match_operand:LSX 1 "move_operand" "fYGYI,R,f,*f,*r"))]
++  [(set (match_operand:LSX 0 "nonimmediate_operand" "=f,f,R,*r,*f,*r")
++	(match_operand:LSX 1 "move_operand" "fYGYI,R,f,*f,*r,*r"))]
+   "ISA_HAS_LSX"
+ { return loongarch_output_move (operands[0], operands[1]); }
+-  [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert")
++  [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert,simd_copy")
+    (set_attr "mode" "")])
+ 
+ (define_split
+   [(set (match_operand:LSX 0 "nonimmediate_operand")
+ 	(match_operand:LSX 1 "move_operand"))]
+   "reload_completed && ISA_HAS_LSX
+-   && loongarch_split_move_insn_p (operands[0], operands[1])"
++   && loongarch_split_move_p (operands[0], operands[1])"
+   [(const_int 0)]
+ {
+-  loongarch_split_move_insn (operands[0], operands[1], curr_insn);
++  loongarch_split_move (operands[0], operands[1]);
+   DONE;
+ })
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-mov-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-mov-1.c
+new file mode 100644
+index 000000000..7f9d792eb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-mov-1.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-mlsx -O2" } */
++
++#include 
++
++extern void bar (__m128i, __m128i);
++
++__m128i a;
++
++void
++foo ()
++{
++  bar (a, a);
++}
+-- 
+2.43.0
+
diff --git a/0088-BUGFIX-Fix-the-configure-file-of-BOLT.patch b/0088-BUGFIX-Fix-the-configure-file-of-BOLT.patch
new file mode 100644
index 0000000000000000000000000000000000000000..1d63246e0db9c267f4a3673683cd723dc2f2d82f
--- /dev/null
+++ b/0088-BUGFIX-Fix-the-configure-file-of-BOLT.patch
@@ -0,0 +1,30102 @@
+From e245129ab722da21df3a2853474a9d4acf47fe67 Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao_admin 
+Date: Fri, 26 Apr 2024 21:34:19 +0800
+Subject: [PATCH] [BUGFIX] Fix the configure file of BOLT
+
+---
+ Makefile.in             |  1319 +++++
+ bolt-plugin/Makefile.in |    34 +-
+ bolt-plugin/aclocal.m4  |  9169 +-----------------------------
+ bolt-plugin/config.h.in |   106 +-
+ bolt-plugin/configure   | 11313 +++++++++++++++-----------------------
+ 5 files changed, 5796 insertions(+), 16145 deletions(-)
+
+diff --git a/Makefile.in b/Makefile.in
+index 593495e16..7785b3d9a 100644
+--- a/Makefile.in
++++ b/Makefile.in
+@@ -1111,6 +1111,7 @@ configure-host:  \
+     maybe-configure-c++tools \
+     maybe-configure-gnattools \
+     maybe-configure-lto-plugin \
++    maybe-configure-bolt-plugin \
+     maybe-configure-libcc1 \
+     maybe-configure-gotools \
+     maybe-configure-libctf
+@@ -1288,6 +1289,9 @@ all-host: maybe-all-gnattools
+ @if lto-plugin-no-bootstrap
+ all-host: maybe-all-lto-plugin
+ @endif lto-plugin-no-bootstrap
++@if bolt-plugin-no-bootstrap
++all-host: maybe-all-bolt-plugin
++@endif bolt-plugin-no-bootstrap
+ all-host: maybe-all-libcc1
+ all-host: maybe-all-gotools
+ @if libctf-no-bootstrap
+@@ -1403,6 +1407,7 @@ info-host: maybe-info-utils
+ info-host: maybe-info-c++tools
+ info-host: maybe-info-gnattools
+ info-host: maybe-info-lto-plugin
++info-host: maybe-info-bolt-plugin
+ info-host: maybe-info-libcc1
+ info-host: maybe-info-gotools
+ info-host: maybe-info-libctf
+@@ -1493,6 +1498,7 @@ dvi-host: maybe-dvi-utils
+ dvi-host: maybe-dvi-c++tools
+ dvi-host: maybe-dvi-gnattools
+ dvi-host: maybe-dvi-lto-plugin
++dvi-host: maybe-dvi-bolt-plugin
+ dvi-host: maybe-dvi-libcc1
+ dvi-host: maybe-dvi-gotools
+ dvi-host: maybe-dvi-libctf
+@@ -1583,6 +1589,7 @@ pdf-host: maybe-pdf-utils
+ pdf-host: maybe-pdf-c++tools
+ pdf-host: maybe-pdf-gnattools
+ pdf-host: maybe-pdf-lto-plugin
++pdf-host: maybe-pdf-bolt-plugin
+ pdf-host: maybe-pdf-libcc1
+ pdf-host: maybe-pdf-gotools
+ pdf-host: maybe-pdf-libctf
+@@ -1673,6 +1680,7 @@ html-host: maybe-html-utils
+ html-host: maybe-html-c++tools
+ html-host: maybe-html-gnattools
+ html-host: maybe-html-lto-plugin
++html-host: maybe-html-bolt-plugin
+ html-host: maybe-html-libcc1
+ html-host: maybe-html-gotools
+ html-host: maybe-html-libctf
+@@ -1763,6 +1771,7 @@ TAGS-host: maybe-TAGS-utils
+ TAGS-host: maybe-TAGS-c++tools
+ TAGS-host: maybe-TAGS-gnattools
+ TAGS-host: maybe-TAGS-lto-plugin
++TAGS-host: maybe-TAGS-bolt-plugin
+ TAGS-host: maybe-TAGS-libcc1
+ TAGS-host: maybe-TAGS-gotools
+ TAGS-host: maybe-TAGS-libctf
+@@ -1853,6 +1862,7 @@ install-info-host: maybe-install-info-utils
+ install-info-host: maybe-install-info-c++tools
+ install-info-host: maybe-install-info-gnattools
+ install-info-host: maybe-install-info-lto-plugin
++install-info-host: maybe-install-info-bolt-plugin
+ install-info-host: maybe-install-info-libcc1
+ install-info-host: maybe-install-info-gotools
+ install-info-host: maybe-install-info-libctf
+@@ -1943,6 +1953,7 @@ install-dvi-host: maybe-install-dvi-utils
+ install-dvi-host: maybe-install-dvi-c++tools
+ install-dvi-host: maybe-install-dvi-gnattools
+ install-dvi-host: maybe-install-dvi-lto-plugin
++install-dvi-host: maybe-install-dvi-bolt-plugin
+ install-dvi-host: maybe-install-dvi-libcc1
+ install-dvi-host: maybe-install-dvi-gotools
+ install-dvi-host: maybe-install-dvi-libctf
+@@ -2033,6 +2044,7 @@ install-pdf-host: maybe-install-pdf-utils
+ install-pdf-host: maybe-install-pdf-c++tools
+ install-pdf-host: maybe-install-pdf-gnattools
+ install-pdf-host: maybe-install-pdf-lto-plugin
++install-pdf-host: maybe-install-pdf-bolt-plugin
+ install-pdf-host: maybe-install-pdf-libcc1
+ install-pdf-host: maybe-install-pdf-gotools
+ install-pdf-host: maybe-install-pdf-libctf
+@@ -2123,6 +2135,7 @@ install-html-host: maybe-install-html-utils
+ install-html-host: maybe-install-html-c++tools
+ install-html-host: maybe-install-html-gnattools
+ install-html-host: maybe-install-html-lto-plugin
++install-html-host: maybe-install-html-bolt-plugin
+ install-html-host: maybe-install-html-libcc1
+ install-html-host: maybe-install-html-gotools
+ install-html-host: maybe-install-html-libctf
+@@ -2213,6 +2226,7 @@ installcheck-host: maybe-installcheck-utils
+ installcheck-host: maybe-installcheck-c++tools
+ installcheck-host: maybe-installcheck-gnattools
+ installcheck-host: maybe-installcheck-lto-plugin
++installcheck-host: maybe-installcheck-bolt-plugin
+ installcheck-host: maybe-installcheck-libcc1
+ installcheck-host: maybe-installcheck-gotools
+ installcheck-host: maybe-installcheck-libctf
+@@ -2303,6 +2317,7 @@ mostlyclean-host: maybe-mostlyclean-utils
+ mostlyclean-host: maybe-mostlyclean-c++tools
+ mostlyclean-host: maybe-mostlyclean-gnattools
+ mostlyclean-host: maybe-mostlyclean-lto-plugin
++mostlyclean-host: maybe-mostlyclean-bolt-plugin
+ mostlyclean-host: maybe-mostlyclean-libcc1
+ mostlyclean-host: maybe-mostlyclean-gotools
+ mostlyclean-host: maybe-mostlyclean-libctf
+@@ -2393,6 +2408,7 @@ clean-host: maybe-clean-utils
+ clean-host: maybe-clean-c++tools
+ clean-host: maybe-clean-gnattools
+ clean-host: maybe-clean-lto-plugin
++clean-host: maybe-clean-bolt-plugin
+ clean-host: maybe-clean-libcc1
+ clean-host: maybe-clean-gotools
+ clean-host: maybe-clean-libctf
+@@ -2483,6 +2499,7 @@ distclean-host: maybe-distclean-utils
+ distclean-host: maybe-distclean-c++tools
+ distclean-host: maybe-distclean-gnattools
+ distclean-host: maybe-distclean-lto-plugin
++distclean-host: maybe-distclean-bolt-plugin
+ distclean-host: maybe-distclean-libcc1
+ distclean-host: maybe-distclean-gotools
+ distclean-host: maybe-distclean-libctf
+@@ -2573,6 +2590,7 @@ maintainer-clean-host: maybe-maintainer-clean-utils
+ maintainer-clean-host: maybe-maintainer-clean-c++tools
+ maintainer-clean-host: maybe-maintainer-clean-gnattools
+ maintainer-clean-host: maybe-maintainer-clean-lto-plugin
++maintainer-clean-host: maybe-maintainer-clean-bolt-plugin
+ maintainer-clean-host: maybe-maintainer-clean-libcc1
+ maintainer-clean-host: maybe-maintainer-clean-gotools
+ maintainer-clean-host: maybe-maintainer-clean-libctf
+@@ -2721,6 +2739,7 @@ check-host:  \
+     maybe-check-c++tools \
+     maybe-check-gnattools \
+     maybe-check-lto-plugin \
++    maybe-check-bolt-plugin \
+     maybe-check-libcc1 \
+     maybe-check-gotools \
+     maybe-check-libctf
+@@ -2858,6 +2877,7 @@ install-host-nogcc:  \
+     maybe-install-c++tools \
+     maybe-install-gnattools \
+     maybe-install-lto-plugin \
++    maybe-install-bolt-plugin \
+     maybe-install-libcc1 \
+     maybe-install-gotools \
+     maybe-install-libctf
+@@ -2913,6 +2933,7 @@ install-host:  \
+     maybe-install-c++tools \
+     maybe-install-gnattools \
+     maybe-install-lto-plugin \
++    maybe-install-bolt-plugin \
+     maybe-install-libcc1 \
+     maybe-install-gotools \
+     maybe-install-libctf
+@@ -3023,6 +3044,7 @@ install-strip-host:  \
+     maybe-install-strip-c++tools \
+     maybe-install-strip-gnattools \
+     maybe-install-strip-lto-plugin \
++    maybe-install-strip-bolt-plugin \
+     maybe-install-strip-libcc1 \
+     maybe-install-strip-gotools \
+     maybe-install-strip-libctf
+@@ -41493,6 +41515,1155 @@ maintainer-clean-lto-plugin:
+ 
+ 
+ 
++.PHONY: configure-bolt-plugin maybe-configure-bolt-plugin
++maybe-configure-bolt-plugin:
++@if gcc-bootstrap
++configure-bolt-plugin: stage_current
++@endif gcc-bootstrap
++@if bolt-plugin
++maybe-configure-bolt-plugin: configure-bolt-plugin
++configure-bolt-plugin: 
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \
++	$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \
++	$(HOST_EXPORTS)  \
++	echo Configuring in $(HOST_SUBDIR)/bolt-plugin; \
++	cd "$(HOST_SUBDIR)/bolt-plugin" || exit 1; \
++	case $(srcdir) in \
++	  /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \
++	  *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \
++		sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \
++	esac; \
++	module_srcdir=bolt-plugin; \
++	$(SHELL) \
++	  $$s/$$module_srcdir/configure \
++	  --srcdir=$${topdir}/$$module_srcdir \
++	  $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \
++	  --target=${target_alias} --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@ \
++	  || exit 1
++@endif bolt-plugin
++
++
++
++.PHONY: configure-stage1-bolt-plugin maybe-configure-stage1-bolt-plugin
++maybe-configure-stage1-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-configure-stage1-bolt-plugin: configure-stage1-bolt-plugin
++configure-stage1-bolt-plugin:
++	@[ $(current_stage) = stage1 ] || $(MAKE) stage1-start
++	@$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGE1_TFLAGS)"; \
++	test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \
++	$(HOST_EXPORTS) \
++	CFLAGS="$(STAGE1_CFLAGS)"; export CFLAGS; \
++	CXXFLAGS="$(STAGE1_CXXFLAGS)"; export CXXFLAGS; \
++	LIBCFLAGS="$(LIBCFLAGS)"; export LIBCFLAGS;  \
++	echo Configuring stage 1 in $(HOST_SUBDIR)/bolt-plugin; \
++	$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \
++	cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \
++	case $(srcdir) in \
++	  /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \
++	  *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \
++		sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \
++	esac; \
++	module_srcdir=bolt-plugin; \
++	$(SHELL) $$s/$$module_srcdir/configure \
++	  --srcdir=$${topdir}/$$module_srcdir \
++	  $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \
++	  --target=${target_alias} \
++	   \
++	  $(STAGE1_CONFIGURE_FLAGS) \
++	  --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@
++@endif bolt-plugin-bootstrap
++
++.PHONY: configure-stage2-bolt-plugin maybe-configure-stage2-bolt-plugin
++maybe-configure-stage2-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-configure-stage2-bolt-plugin: configure-stage2-bolt-plugin
++configure-stage2-bolt-plugin:
++	@[ $(current_stage) = stage2 ] || $(MAKE) stage2-start
++	@$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGE2_TFLAGS)"; \
++	test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS) \
++	CFLAGS="$(STAGE2_CFLAGS)"; export CFLAGS; \
++	CXXFLAGS="$(STAGE2_CXXFLAGS)"; export CXXFLAGS; \
++	LIBCFLAGS="$(STAGE2_CFLAGS)"; export LIBCFLAGS;  \
++	echo Configuring stage 2 in $(HOST_SUBDIR)/bolt-plugin; \
++	$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \
++	cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \
++	case $(srcdir) in \
++	  /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \
++	  *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \
++		sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \
++	esac; \
++	module_srcdir=bolt-plugin; \
++	$(SHELL) $$s/$$module_srcdir/configure \
++	  --srcdir=$${topdir}/$$module_srcdir \
++	  $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \
++	  --target=${target_alias} \
++	  --with-build-libsubdir=$(HOST_SUBDIR) \
++	  $(STAGE2_CONFIGURE_FLAGS) \
++	  --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@
++@endif bolt-plugin-bootstrap
++
++.PHONY: configure-stage3-bolt-plugin maybe-configure-stage3-bolt-plugin
++maybe-configure-stage3-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-configure-stage3-bolt-plugin: configure-stage3-bolt-plugin
++configure-stage3-bolt-plugin:
++	@[ $(current_stage) = stage3 ] || $(MAKE) stage3-start
++	@$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGE3_TFLAGS)"; \
++	test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS) \
++	CFLAGS="$(STAGE3_CFLAGS)"; export CFLAGS; \
++	CXXFLAGS="$(STAGE3_CXXFLAGS)"; export CXXFLAGS; \
++	LIBCFLAGS="$(STAGE3_CFLAGS)"; export LIBCFLAGS;  \
++	echo Configuring stage 3 in $(HOST_SUBDIR)/bolt-plugin; \
++	$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \
++	cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \
++	case $(srcdir) in \
++	  /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \
++	  *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \
++		sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \
++	esac; \
++	module_srcdir=bolt-plugin; \
++	$(SHELL) $$s/$$module_srcdir/configure \
++	  --srcdir=$${topdir}/$$module_srcdir \
++	  $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \
++	  --target=${target_alias} \
++	  --with-build-libsubdir=$(HOST_SUBDIR) \
++	  $(STAGE3_CONFIGURE_FLAGS) \
++	  --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@
++@endif bolt-plugin-bootstrap
++
++.PHONY: configure-stage4-bolt-plugin maybe-configure-stage4-bolt-plugin
++maybe-configure-stage4-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-configure-stage4-bolt-plugin: configure-stage4-bolt-plugin
++configure-stage4-bolt-plugin:
++	@[ $(current_stage) = stage4 ] || $(MAKE) stage4-start
++	@$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGE4_TFLAGS)"; \
++	test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS) \
++	CFLAGS="$(STAGE4_CFLAGS)"; export CFLAGS; \
++	CXXFLAGS="$(STAGE4_CXXFLAGS)"; export CXXFLAGS; \
++	LIBCFLAGS="$(STAGE4_CFLAGS)"; export LIBCFLAGS;  \
++	echo Configuring stage 4 in $(HOST_SUBDIR)/bolt-plugin; \
++	$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \
++	cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \
++	case $(srcdir) in \
++	  /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \
++	  *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \
++		sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \
++	esac; \
++	module_srcdir=bolt-plugin; \
++	$(SHELL) $$s/$$module_srcdir/configure \
++	  --srcdir=$${topdir}/$$module_srcdir \
++	  $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \
++	  --target=${target_alias} \
++	  --with-build-libsubdir=$(HOST_SUBDIR) \
++	  $(STAGE4_CONFIGURE_FLAGS) \
++	  --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@
++@endif bolt-plugin-bootstrap
++
++.PHONY: configure-stageprofile-bolt-plugin maybe-configure-stageprofile-bolt-plugin
++maybe-configure-stageprofile-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-configure-stageprofile-bolt-plugin: configure-stageprofile-bolt-plugin
++configure-stageprofile-bolt-plugin:
++	@[ $(current_stage) = stageprofile ] || $(MAKE) stageprofile-start
++	@$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGEprofile_TFLAGS)"; \
++	test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS) \
++	CFLAGS="$(STAGEprofile_CFLAGS)"; export CFLAGS; \
++	CXXFLAGS="$(STAGEprofile_CXXFLAGS)"; export CXXFLAGS; \
++	LIBCFLAGS="$(STAGEprofile_CFLAGS)"; export LIBCFLAGS;  \
++	echo Configuring stage profile in $(HOST_SUBDIR)/bolt-plugin; \
++	$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \
++	cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \
++	case $(srcdir) in \
++	  /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \
++	  *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \
++		sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \
++	esac; \
++	module_srcdir=bolt-plugin; \
++	$(SHELL) $$s/$$module_srcdir/configure \
++	  --srcdir=$${topdir}/$$module_srcdir \
++	  $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \
++	  --target=${target_alias} \
++	  --with-build-libsubdir=$(HOST_SUBDIR) \
++	  $(STAGEprofile_CONFIGURE_FLAGS) \
++	  --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@
++@endif bolt-plugin-bootstrap
++
++.PHONY: configure-stagetrain-bolt-plugin maybe-configure-stagetrain-bolt-plugin
++maybe-configure-stagetrain-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-configure-stagetrain-bolt-plugin: configure-stagetrain-bolt-plugin
++configure-stagetrain-bolt-plugin:
++	@[ $(current_stage) = stagetrain ] || $(MAKE) stagetrain-start
++	@$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGEtrain_TFLAGS)"; \
++	test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS) \
++	CFLAGS="$(STAGEtrain_CFLAGS)"; export CFLAGS; \
++	CXXFLAGS="$(STAGEtrain_CXXFLAGS)"; export CXXFLAGS; \
++	LIBCFLAGS="$(STAGEtrain_CFLAGS)"; export LIBCFLAGS;  \
++	echo Configuring stage train in $(HOST_SUBDIR)/bolt-plugin; \
++	$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \
++	cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \
++	case $(srcdir) in \
++	  /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \
++	  *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \
++		sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \
++	esac; \
++	module_srcdir=bolt-plugin; \
++	$(SHELL) $$s/$$module_srcdir/configure \
++	  --srcdir=$${topdir}/$$module_srcdir \
++	  $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \
++	  --target=${target_alias} \
++	  --with-build-libsubdir=$(HOST_SUBDIR) \
++	  $(STAGEtrain_CONFIGURE_FLAGS) \
++	  --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@
++@endif bolt-plugin-bootstrap
++
++.PHONY: configure-stagefeedback-bolt-plugin maybe-configure-stagefeedback-bolt-plugin
++maybe-configure-stagefeedback-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-configure-stagefeedback-bolt-plugin: configure-stagefeedback-bolt-plugin
++configure-stagefeedback-bolt-plugin:
++	@[ $(current_stage) = stagefeedback ] || $(MAKE) stagefeedback-start
++	@$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGEfeedback_TFLAGS)"; \
++	test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS) \
++	CFLAGS="$(STAGEfeedback_CFLAGS)"; export CFLAGS; \
++	CXXFLAGS="$(STAGEfeedback_CXXFLAGS)"; export CXXFLAGS; \
++	LIBCFLAGS="$(STAGEfeedback_CFLAGS)"; export LIBCFLAGS;  \
++	echo Configuring stage feedback in $(HOST_SUBDIR)/bolt-plugin; \
++	$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \
++	cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \
++	case $(srcdir) in \
++	  /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \
++	  *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \
++		sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \
++	esac; \
++	module_srcdir=bolt-plugin; \
++	$(SHELL) $$s/$$module_srcdir/configure \
++	  --srcdir=$${topdir}/$$module_srcdir \
++	  $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \
++	  --target=${target_alias} \
++	  --with-build-libsubdir=$(HOST_SUBDIR) \
++	  $(STAGEfeedback_CONFIGURE_FLAGS) \
++	  --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@
++@endif bolt-plugin-bootstrap
++
++.PHONY: configure-stageautoprofile-bolt-plugin maybe-configure-stageautoprofile-bolt-plugin
++maybe-configure-stageautoprofile-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-configure-stageautoprofile-bolt-plugin: configure-stageautoprofile-bolt-plugin
++configure-stageautoprofile-bolt-plugin:
++	@[ $(current_stage) = stageautoprofile ] || $(MAKE) stageautoprofile-start
++	@$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGEautoprofile_TFLAGS)"; \
++	test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS) \
++	CFLAGS="$(STAGEautoprofile_CFLAGS)"; export CFLAGS; \
++	CXXFLAGS="$(STAGEautoprofile_CXXFLAGS)"; export CXXFLAGS; \
++	LIBCFLAGS="$(STAGEautoprofile_CFLAGS)"; export LIBCFLAGS;  \
++	echo Configuring stage autoprofile in $(HOST_SUBDIR)/bolt-plugin; \
++	$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \
++	cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \
++	case $(srcdir) in \
++	  /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \
++	  *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \
++		sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \
++	esac; \
++	module_srcdir=bolt-plugin; \
++	$(SHELL) $$s/$$module_srcdir/configure \
++	  --srcdir=$${topdir}/$$module_srcdir \
++	  $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \
++	  --target=${target_alias} \
++	  --with-build-libsubdir=$(HOST_SUBDIR) \
++	  $(STAGEautoprofile_CONFIGURE_FLAGS) \
++	  --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@
++@endif bolt-plugin-bootstrap
++
++.PHONY: configure-stageautofeedback-bolt-plugin maybe-configure-stageautofeedback-bolt-plugin
++maybe-configure-stageautofeedback-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-configure-stageautofeedback-bolt-plugin: configure-stageautofeedback-bolt-plugin
++configure-stageautofeedback-bolt-plugin:
++	@[ $(current_stage) = stageautofeedback ] || $(MAKE) stageautofeedback-start
++	@$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGEautofeedback_TFLAGS)"; \
++	test ! -f $(HOST_SUBDIR)/bolt-plugin/Makefile || exit 0; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS) \
++	CFLAGS="$(STAGEautofeedback_CFLAGS)"; export CFLAGS; \
++	CXXFLAGS="$(STAGEautofeedback_CXXFLAGS)"; export CXXFLAGS; \
++	LIBCFLAGS="$(STAGEautofeedback_CFLAGS)"; export LIBCFLAGS;  \
++	echo Configuring stage autofeedback in $(HOST_SUBDIR)/bolt-plugin; \
++	$(SHELL) $(srcdir)/mkinstalldirs $(HOST_SUBDIR)/bolt-plugin; \
++	cd $(HOST_SUBDIR)/bolt-plugin || exit 1; \
++	case $(srcdir) in \
++	  /* | [A-Za-z]:[\\/]*) topdir=$(srcdir) ;; \
++	  *) topdir=`echo $(HOST_SUBDIR)/bolt-plugin/ | \
++		sed -e 's,\./,,g' -e 's,[^/]*/,../,g' `$(srcdir) ;; \
++	esac; \
++	module_srcdir=bolt-plugin; \
++	$(SHELL) $$s/$$module_srcdir/configure \
++	  --srcdir=$${topdir}/$$module_srcdir \
++	  $(HOST_CONFIGARGS) --build=${build_alias} --host=${host_alias} \
++	  --target=${target_alias} \
++	  --with-build-libsubdir=$(HOST_SUBDIR) \
++	  $(STAGEautofeedback_CONFIGURE_FLAGS) \
++	  --enable-shared @extra_linker_plugin_flags@ @extra_linker_plugin_configure_flags@
++@endif bolt-plugin-bootstrap
++
++
++
++
++
++.PHONY: all-bolt-plugin maybe-all-bolt-plugin
++maybe-all-bolt-plugin:
++@if gcc-bootstrap
++all-bolt-plugin: stage_current
++@endif gcc-bootstrap
++@if bolt-plugin
++TARGET-bolt-plugin=all
++maybe-all-bolt-plugin: all-bolt-plugin
++all-bolt-plugin: configure-bolt-plugin
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS)  \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) $(EXTRA_HOST_FLAGS) $(STAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \
++		$(TARGET-bolt-plugin))
++@endif bolt-plugin
++
++
++
++.PHONY: all-stage1-bolt-plugin maybe-all-stage1-bolt-plugin
++.PHONY: clean-stage1-bolt-plugin maybe-clean-stage1-bolt-plugin
++maybe-all-stage1-bolt-plugin:
++maybe-clean-stage1-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-all-stage1-bolt-plugin: all-stage1-bolt-plugin
++all-stage1: all-stage1-bolt-plugin
++TARGET-stage1-bolt-plugin = $(TARGET-bolt-plugin)
++all-stage1-bolt-plugin: configure-stage1-bolt-plugin
++	@[ $(current_stage) = stage1 ] || $(MAKE) stage1-start
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGE1_TFLAGS)"; \
++	$(HOST_EXPORTS)  \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	 \
++	$(MAKE) $(BASE_FLAGS_TO_PASS) \
++		CFLAGS="$(STAGE1_CFLAGS)" \
++		GENERATOR_CFLAGS="$(STAGE1_GENERATOR_CFLAGS)" \
++		CXXFLAGS="$(STAGE1_CXXFLAGS)" \
++		LIBCFLAGS="$(LIBCFLAGS)" \
++		CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \
++		CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \
++		LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \
++		$(EXTRA_HOST_FLAGS)  \
++		$(STAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \
++		TFLAGS="$(STAGE1_TFLAGS)"  \
++		$(TARGET-stage1-bolt-plugin)
++
++maybe-clean-stage1-bolt-plugin: clean-stage1-bolt-plugin
++clean-stage1: clean-stage1-bolt-plugin
++clean-stage1-bolt-plugin:
++	@if [ $(current_stage) = stage1 ]; then \
++	  [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \
++	else \
++	  [ -f $(HOST_SUBDIR)/stage1-bolt-plugin/Makefile ] || exit 0; \
++	  $(MAKE) stage1-start; \
++	fi; \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	$(MAKE) $(EXTRA_HOST_FLAGS)  \
++	$(STAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean
++@endif bolt-plugin-bootstrap
++
++
++.PHONY: all-stage2-bolt-plugin maybe-all-stage2-bolt-plugin
++.PHONY: clean-stage2-bolt-plugin maybe-clean-stage2-bolt-plugin
++maybe-all-stage2-bolt-plugin:
++maybe-clean-stage2-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-all-stage2-bolt-plugin: all-stage2-bolt-plugin
++all-stage2: all-stage2-bolt-plugin
++TARGET-stage2-bolt-plugin = $(TARGET-bolt-plugin)
++all-stage2-bolt-plugin: configure-stage2-bolt-plugin
++	@[ $(current_stage) = stage2 ] || $(MAKE) stage2-start
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGE2_TFLAGS)"; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS)  \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	 \
++	$(MAKE) $(BASE_FLAGS_TO_PASS) \
++		CFLAGS="$(STAGE2_CFLAGS)" \
++		GENERATOR_CFLAGS="$(STAGE2_GENERATOR_CFLAGS)" \
++		CXXFLAGS="$(STAGE2_CXXFLAGS)" \
++		LIBCFLAGS="$(STAGE2_CFLAGS)" \
++		CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \
++		CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \
++		LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \
++		$(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \
++		TFLAGS="$(STAGE2_TFLAGS)"  \
++		$(TARGET-stage2-bolt-plugin)
++
++maybe-clean-stage2-bolt-plugin: clean-stage2-bolt-plugin
++clean-stage2: clean-stage2-bolt-plugin
++clean-stage2-bolt-plugin:
++	@if [ $(current_stage) = stage2 ]; then \
++	  [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \
++	else \
++	  [ -f $(HOST_SUBDIR)/stage2-bolt-plugin/Makefile ] || exit 0; \
++	  $(MAKE) stage2-start; \
++	fi; \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	$(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean
++@endif bolt-plugin-bootstrap
++
++
++.PHONY: all-stage3-bolt-plugin maybe-all-stage3-bolt-plugin
++.PHONY: clean-stage3-bolt-plugin maybe-clean-stage3-bolt-plugin
++maybe-all-stage3-bolt-plugin:
++maybe-clean-stage3-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-all-stage3-bolt-plugin: all-stage3-bolt-plugin
++all-stage3: all-stage3-bolt-plugin
++TARGET-stage3-bolt-plugin = $(TARGET-bolt-plugin)
++all-stage3-bolt-plugin: configure-stage3-bolt-plugin
++	@[ $(current_stage) = stage3 ] || $(MAKE) stage3-start
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGE3_TFLAGS)"; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS)  \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	 \
++	$(MAKE) $(BASE_FLAGS_TO_PASS) \
++		CFLAGS="$(STAGE3_CFLAGS)" \
++		GENERATOR_CFLAGS="$(STAGE3_GENERATOR_CFLAGS)" \
++		CXXFLAGS="$(STAGE3_CXXFLAGS)" \
++		LIBCFLAGS="$(STAGE3_CFLAGS)" \
++		CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \
++		CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \
++		LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \
++		$(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \
++		TFLAGS="$(STAGE3_TFLAGS)"  \
++		$(TARGET-stage3-bolt-plugin)
++
++maybe-clean-stage3-bolt-plugin: clean-stage3-bolt-plugin
++clean-stage3: clean-stage3-bolt-plugin
++clean-stage3-bolt-plugin:
++	@if [ $(current_stage) = stage3 ]; then \
++	  [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \
++	else \
++	  [ -f $(HOST_SUBDIR)/stage3-bolt-plugin/Makefile ] || exit 0; \
++	  $(MAKE) stage3-start; \
++	fi; \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	$(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean
++@endif bolt-plugin-bootstrap
++
++
++.PHONY: all-stage4-bolt-plugin maybe-all-stage4-bolt-plugin
++.PHONY: clean-stage4-bolt-plugin maybe-clean-stage4-bolt-plugin
++maybe-all-stage4-bolt-plugin:
++maybe-clean-stage4-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-all-stage4-bolt-plugin: all-stage4-bolt-plugin
++all-stage4: all-stage4-bolt-plugin
++TARGET-stage4-bolt-plugin = $(TARGET-bolt-plugin)
++all-stage4-bolt-plugin: configure-stage4-bolt-plugin
++	@[ $(current_stage) = stage4 ] || $(MAKE) stage4-start
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGE4_TFLAGS)"; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS)  \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	 \
++	$(MAKE) $(BASE_FLAGS_TO_PASS) \
++		CFLAGS="$(STAGE4_CFLAGS)" \
++		GENERATOR_CFLAGS="$(STAGE4_GENERATOR_CFLAGS)" \
++		CXXFLAGS="$(STAGE4_CXXFLAGS)" \
++		LIBCFLAGS="$(STAGE4_CFLAGS)" \
++		CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \
++		CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \
++		LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \
++		$(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \
++		TFLAGS="$(STAGE4_TFLAGS)"  \
++		$(TARGET-stage4-bolt-plugin)
++
++maybe-clean-stage4-bolt-plugin: clean-stage4-bolt-plugin
++clean-stage4: clean-stage4-bolt-plugin
++clean-stage4-bolt-plugin:
++	@if [ $(current_stage) = stage4 ]; then \
++	  [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \
++	else \
++	  [ -f $(HOST_SUBDIR)/stage4-bolt-plugin/Makefile ] || exit 0; \
++	  $(MAKE) stage4-start; \
++	fi; \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	$(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean
++@endif bolt-plugin-bootstrap
++
++
++.PHONY: all-stageprofile-bolt-plugin maybe-all-stageprofile-bolt-plugin
++.PHONY: clean-stageprofile-bolt-plugin maybe-clean-stageprofile-bolt-plugin
++maybe-all-stageprofile-bolt-plugin:
++maybe-clean-stageprofile-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-all-stageprofile-bolt-plugin: all-stageprofile-bolt-plugin
++all-stageprofile: all-stageprofile-bolt-plugin
++TARGET-stageprofile-bolt-plugin = $(TARGET-bolt-plugin)
++all-stageprofile-bolt-plugin: configure-stageprofile-bolt-plugin
++	@[ $(current_stage) = stageprofile ] || $(MAKE) stageprofile-start
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGEprofile_TFLAGS)"; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS)  \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	 \
++	$(MAKE) $(BASE_FLAGS_TO_PASS) \
++		CFLAGS="$(STAGEprofile_CFLAGS)" \
++		GENERATOR_CFLAGS="$(STAGEprofile_GENERATOR_CFLAGS)" \
++		CXXFLAGS="$(STAGEprofile_CXXFLAGS)" \
++		LIBCFLAGS="$(STAGEprofile_CFLAGS)" \
++		CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \
++		CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \
++		LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \
++		$(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \
++		TFLAGS="$(STAGEprofile_TFLAGS)"  \
++		$(TARGET-stageprofile-bolt-plugin)
++
++maybe-clean-stageprofile-bolt-plugin: clean-stageprofile-bolt-plugin
++clean-stageprofile: clean-stageprofile-bolt-plugin
++clean-stageprofile-bolt-plugin:
++	@if [ $(current_stage) = stageprofile ]; then \
++	  [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \
++	else \
++	  [ -f $(HOST_SUBDIR)/stageprofile-bolt-plugin/Makefile ] || exit 0; \
++	  $(MAKE) stageprofile-start; \
++	fi; \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	$(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean
++@endif bolt-plugin-bootstrap
++
++
++.PHONY: all-stagetrain-bolt-plugin maybe-all-stagetrain-bolt-plugin
++.PHONY: clean-stagetrain-bolt-plugin maybe-clean-stagetrain-bolt-plugin
++maybe-all-stagetrain-bolt-plugin:
++maybe-clean-stagetrain-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-all-stagetrain-bolt-plugin: all-stagetrain-bolt-plugin
++all-stagetrain: all-stagetrain-bolt-plugin
++TARGET-stagetrain-bolt-plugin = $(TARGET-bolt-plugin)
++all-stagetrain-bolt-plugin: configure-stagetrain-bolt-plugin
++	@[ $(current_stage) = stagetrain ] || $(MAKE) stagetrain-start
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGEtrain_TFLAGS)"; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS)  \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	 \
++	$(MAKE) $(BASE_FLAGS_TO_PASS) \
++		CFLAGS="$(STAGEtrain_CFLAGS)" \
++		GENERATOR_CFLAGS="$(STAGEtrain_GENERATOR_CFLAGS)" \
++		CXXFLAGS="$(STAGEtrain_CXXFLAGS)" \
++		LIBCFLAGS="$(STAGEtrain_CFLAGS)" \
++		CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \
++		CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \
++		LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \
++		$(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \
++		TFLAGS="$(STAGEtrain_TFLAGS)"  \
++		$(TARGET-stagetrain-bolt-plugin)
++
++maybe-clean-stagetrain-bolt-plugin: clean-stagetrain-bolt-plugin
++clean-stagetrain: clean-stagetrain-bolt-plugin
++clean-stagetrain-bolt-plugin:
++	@if [ $(current_stage) = stagetrain ]; then \
++	  [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \
++	else \
++	  [ -f $(HOST_SUBDIR)/stagetrain-bolt-plugin/Makefile ] || exit 0; \
++	  $(MAKE) stagetrain-start; \
++	fi; \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	$(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean
++@endif bolt-plugin-bootstrap
++
++
++.PHONY: all-stagefeedback-bolt-plugin maybe-all-stagefeedback-bolt-plugin
++.PHONY: clean-stagefeedback-bolt-plugin maybe-clean-stagefeedback-bolt-plugin
++maybe-all-stagefeedback-bolt-plugin:
++maybe-clean-stagefeedback-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-all-stagefeedback-bolt-plugin: all-stagefeedback-bolt-plugin
++all-stagefeedback: all-stagefeedback-bolt-plugin
++TARGET-stagefeedback-bolt-plugin = $(TARGET-bolt-plugin)
++all-stagefeedback-bolt-plugin: configure-stagefeedback-bolt-plugin
++	@[ $(current_stage) = stagefeedback ] || $(MAKE) stagefeedback-start
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGEfeedback_TFLAGS)"; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS)  \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	 \
++	$(MAKE) $(BASE_FLAGS_TO_PASS) \
++		CFLAGS="$(STAGEfeedback_CFLAGS)" \
++		GENERATOR_CFLAGS="$(STAGEfeedback_GENERATOR_CFLAGS)" \
++		CXXFLAGS="$(STAGEfeedback_CXXFLAGS)" \
++		LIBCFLAGS="$(STAGEfeedback_CFLAGS)" \
++		CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \
++		CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \
++		LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \
++		$(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \
++		TFLAGS="$(STAGEfeedback_TFLAGS)"  \
++		$(TARGET-stagefeedback-bolt-plugin)
++
++maybe-clean-stagefeedback-bolt-plugin: clean-stagefeedback-bolt-plugin
++clean-stagefeedback: clean-stagefeedback-bolt-plugin
++clean-stagefeedback-bolt-plugin:
++	@if [ $(current_stage) = stagefeedback ]; then \
++	  [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \
++	else \
++	  [ -f $(HOST_SUBDIR)/stagefeedback-bolt-plugin/Makefile ] || exit 0; \
++	  $(MAKE) stagefeedback-start; \
++	fi; \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	$(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean
++@endif bolt-plugin-bootstrap
++
++
++.PHONY: all-stageautoprofile-bolt-plugin maybe-all-stageautoprofile-bolt-plugin
++.PHONY: clean-stageautoprofile-bolt-plugin maybe-clean-stageautoprofile-bolt-plugin
++maybe-all-stageautoprofile-bolt-plugin:
++maybe-clean-stageautoprofile-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-all-stageautoprofile-bolt-plugin: all-stageautoprofile-bolt-plugin
++all-stageautoprofile: all-stageautoprofile-bolt-plugin
++TARGET-stageautoprofile-bolt-plugin = $(TARGET-bolt-plugin)
++all-stageautoprofile-bolt-plugin: configure-stageautoprofile-bolt-plugin
++	@[ $(current_stage) = stageautoprofile ] || $(MAKE) stageautoprofile-start
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGEautoprofile_TFLAGS)"; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS)  \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	$$s/gcc/config/i386/$(AUTO_PROFILE) \
++	$(MAKE) $(BASE_FLAGS_TO_PASS) \
++		CFLAGS="$(STAGEautoprofile_CFLAGS)" \
++		GENERATOR_CFLAGS="$(STAGEautoprofile_GENERATOR_CFLAGS)" \
++		CXXFLAGS="$(STAGEautoprofile_CXXFLAGS)" \
++		LIBCFLAGS="$(STAGEautoprofile_CFLAGS)" \
++		CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \
++		CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \
++		LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \
++		$(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \
++		TFLAGS="$(STAGEautoprofile_TFLAGS)"  \
++		$(TARGET-stageautoprofile-bolt-plugin)
++
++maybe-clean-stageautoprofile-bolt-plugin: clean-stageautoprofile-bolt-plugin
++clean-stageautoprofile: clean-stageautoprofile-bolt-plugin
++clean-stageautoprofile-bolt-plugin:
++	@if [ $(current_stage) = stageautoprofile ]; then \
++	  [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \
++	else \
++	  [ -f $(HOST_SUBDIR)/stageautoprofile-bolt-plugin/Makefile ] || exit 0; \
++	  $(MAKE) stageautoprofile-start; \
++	fi; \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	$(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean
++@endif bolt-plugin-bootstrap
++
++
++.PHONY: all-stageautofeedback-bolt-plugin maybe-all-stageautofeedback-bolt-plugin
++.PHONY: clean-stageautofeedback-bolt-plugin maybe-clean-stageautofeedback-bolt-plugin
++maybe-all-stageautofeedback-bolt-plugin:
++maybe-clean-stageautofeedback-bolt-plugin:
++@if bolt-plugin-bootstrap
++maybe-all-stageautofeedback-bolt-plugin: all-stageautofeedback-bolt-plugin
++all-stageautofeedback: all-stageautofeedback-bolt-plugin
++TARGET-stageautofeedback-bolt-plugin = $(TARGET-bolt-plugin)
++all-stageautofeedback-bolt-plugin: configure-stageautofeedback-bolt-plugin
++	@[ $(current_stage) = stageautofeedback ] || $(MAKE) stageautofeedback-start
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	TFLAGS="$(STAGEautofeedback_TFLAGS)"; \
++	$(HOST_EXPORTS) \
++	$(POSTSTAGE1_HOST_EXPORTS)  \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	 \
++	$(MAKE) $(BASE_FLAGS_TO_PASS) \
++		CFLAGS="$(STAGEautofeedback_CFLAGS)" \
++		GENERATOR_CFLAGS="$(STAGEautofeedback_GENERATOR_CFLAGS)" \
++		CXXFLAGS="$(STAGEautofeedback_CXXFLAGS)" \
++		LIBCFLAGS="$(STAGEautofeedback_CFLAGS)" \
++		CFLAGS_FOR_TARGET="$(CFLAGS_FOR_TARGET)" \
++		CXXFLAGS_FOR_TARGET="$(CXXFLAGS_FOR_TARGET)" \
++		LIBCFLAGS_FOR_TARGET="$(LIBCFLAGS_FOR_TARGET)" \
++		$(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ \
++		TFLAGS="$(STAGEautofeedback_TFLAGS)" PERF_DATA=perf.data \
++		$(TARGET-stageautofeedback-bolt-plugin)
++
++maybe-clean-stageautofeedback-bolt-plugin: clean-stageautofeedback-bolt-plugin
++clean-stageautofeedback: clean-stageautofeedback-bolt-plugin
++clean-stageautofeedback-bolt-plugin:
++	@if [ $(current_stage) = stageautofeedback ]; then \
++	  [ -f $(HOST_SUBDIR)/bolt-plugin/Makefile ] || exit 0; \
++	else \
++	  [ -f $(HOST_SUBDIR)/stageautofeedback-bolt-plugin/Makefile ] || exit 0; \
++	  $(MAKE) stageautofeedback-start; \
++	fi; \
++	cd $(HOST_SUBDIR)/bolt-plugin && \
++	$(MAKE) $(EXTRA_HOST_FLAGS) $(POSTSTAGE1_FLAGS_TO_PASS) @extra_linker_plugin_flags@ clean
++@endif bolt-plugin-bootstrap
++
++
++
++
++
++.PHONY: check-bolt-plugin maybe-check-bolt-plugin
++maybe-check-bolt-plugin:
++@if bolt-plugin
++maybe-check-bolt-plugin: check-bolt-plugin
++
++check-bolt-plugin:
++	@: $(MAKE); $(unstage)
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) $(EXTRA_HOST_EXPORTS) \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(FLAGS_TO_PASS) @extra_linker_plugin_flags@ $(EXTRA_BOOTSTRAP_FLAGS) check)
++
++@endif bolt-plugin
++
++.PHONY: install-bolt-plugin maybe-install-bolt-plugin
++maybe-install-bolt-plugin:
++@if bolt-plugin
++maybe-install-bolt-plugin: install-bolt-plugin
++
++install-bolt-plugin: installdirs
++	@: $(MAKE); $(unstage)
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(FLAGS_TO_PASS) @extra_linker_plugin_flags@ install)
++
++@endif bolt-plugin
++
++.PHONY: install-strip-bolt-plugin maybe-install-strip-bolt-plugin
++maybe-install-strip-bolt-plugin:
++@if bolt-plugin
++maybe-install-strip-bolt-plugin: install-strip-bolt-plugin
++
++install-strip-bolt-plugin: installdirs
++	@: $(MAKE); $(unstage)
++	@r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(FLAGS_TO_PASS) @extra_linker_plugin_flags@ install-strip)
++
++@endif bolt-plugin
++
++# Other targets (info, dvi, pdf, etc.)
++
++.PHONY: maybe-info-bolt-plugin info-bolt-plugin
++maybe-info-bolt-plugin:
++@if bolt-plugin
++maybe-info-bolt-plugin: info-bolt-plugin
++
++info-bolt-plugin: \
++    configure-bolt-plugin 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing info in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          info) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-dvi-bolt-plugin dvi-bolt-plugin
++maybe-dvi-bolt-plugin:
++@if bolt-plugin
++maybe-dvi-bolt-plugin: dvi-bolt-plugin
++
++dvi-bolt-plugin: \
++    configure-bolt-plugin 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing dvi in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          dvi) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-pdf-bolt-plugin pdf-bolt-plugin
++maybe-pdf-bolt-plugin:
++@if bolt-plugin
++maybe-pdf-bolt-plugin: pdf-bolt-plugin
++
++pdf-bolt-plugin: \
++    configure-bolt-plugin 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing pdf in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          pdf) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-html-bolt-plugin html-bolt-plugin
++maybe-html-bolt-plugin:
++@if bolt-plugin
++maybe-html-bolt-plugin: html-bolt-plugin
++
++html-bolt-plugin: \
++    configure-bolt-plugin 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing html in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          html) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-TAGS-bolt-plugin TAGS-bolt-plugin
++maybe-TAGS-bolt-plugin:
++@if bolt-plugin
++maybe-TAGS-bolt-plugin: TAGS-bolt-plugin
++
++TAGS-bolt-plugin: \
++    configure-bolt-plugin 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing TAGS in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          TAGS) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-install-info-bolt-plugin install-info-bolt-plugin
++maybe-install-info-bolt-plugin:
++@if bolt-plugin
++maybe-install-info-bolt-plugin: install-info-bolt-plugin
++
++install-info-bolt-plugin: \
++    configure-bolt-plugin \
++    info-bolt-plugin 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing install-info in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          install-info) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-install-dvi-bolt-plugin install-dvi-bolt-plugin
++maybe-install-dvi-bolt-plugin:
++@if bolt-plugin
++maybe-install-dvi-bolt-plugin: install-dvi-bolt-plugin
++
++install-dvi-bolt-plugin: \
++    configure-bolt-plugin \
++    dvi-bolt-plugin 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing install-dvi in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          install-dvi) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-install-pdf-bolt-plugin install-pdf-bolt-plugin
++maybe-install-pdf-bolt-plugin:
++@if bolt-plugin
++maybe-install-pdf-bolt-plugin: install-pdf-bolt-plugin
++
++install-pdf-bolt-plugin: \
++    configure-bolt-plugin \
++    pdf-bolt-plugin 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing install-pdf in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          install-pdf) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-install-html-bolt-plugin install-html-bolt-plugin
++maybe-install-html-bolt-plugin:
++@if bolt-plugin
++maybe-install-html-bolt-plugin: install-html-bolt-plugin
++
++install-html-bolt-plugin: \
++    configure-bolt-plugin \
++    html-bolt-plugin 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing install-html in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          install-html) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-installcheck-bolt-plugin installcheck-bolt-plugin
++maybe-installcheck-bolt-plugin:
++@if bolt-plugin
++maybe-installcheck-bolt-plugin: installcheck-bolt-plugin
++
++installcheck-bolt-plugin: \
++    configure-bolt-plugin 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing installcheck in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          installcheck) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-mostlyclean-bolt-plugin mostlyclean-bolt-plugin
++maybe-mostlyclean-bolt-plugin:
++@if bolt-plugin
++maybe-mostlyclean-bolt-plugin: mostlyclean-bolt-plugin
++
++mostlyclean-bolt-plugin: 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing mostlyclean in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          mostlyclean) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-clean-bolt-plugin clean-bolt-plugin
++maybe-clean-bolt-plugin:
++@if bolt-plugin
++maybe-clean-bolt-plugin: clean-bolt-plugin
++
++clean-bolt-plugin: 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing clean in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          clean) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-distclean-bolt-plugin distclean-bolt-plugin
++maybe-distclean-bolt-plugin:
++@if bolt-plugin
++maybe-distclean-bolt-plugin: distclean-bolt-plugin
++
++distclean-bolt-plugin: 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing distclean in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          distclean) \
++	  || exit 1
++
++@endif bolt-plugin
++
++.PHONY: maybe-maintainer-clean-bolt-plugin maintainer-clean-bolt-plugin
++maybe-maintainer-clean-bolt-plugin:
++@if bolt-plugin
++maybe-maintainer-clean-bolt-plugin: maintainer-clean-bolt-plugin
++
++maintainer-clean-bolt-plugin: 
++	@[ -f ./bolt-plugin/Makefile ] || exit 0; \
++	r=`${PWD_COMMAND}`; export r; \
++	s=`cd $(srcdir); ${PWD_COMMAND}`; export s; \
++	$(HOST_EXPORTS) \
++	for flag in $(EXTRA_HOST_FLAGS) @extra_linker_plugin_flags@; do \
++	  eval `echo "$$flag" | sed -e "s|^\([^=]*\)=\(.*\)|\1='\2'; export \1|"`; \
++	done; \
++	echo "Doing maintainer-clean in bolt-plugin"; \
++	(cd $(HOST_SUBDIR)/bolt-plugin && \
++	  $(MAKE) $(BASE_FLAGS_TO_PASS) "AR=$${AR}" "AS=$${AS}" \
++	          "CC=$${CC}" "CXX=$${CXX}" "LD=$${LD}" "NM=$${NM}" \
++	          "RANLIB=$${RANLIB}" \
++	          "DLLTOOL=$${DLLTOOL}" "WINDRES=$${WINDRES}" "WINDMC=$${WINDMC}" \
++	          maintainer-clean) \
++	  || exit 1
++
++@endif bolt-plugin
++
++
++
+ .PHONY: configure-libcc1 maybe-configure-libcc1
+ maybe-configure-libcc1:
+ @if gcc-bootstrap
+@@ -61940,6 +63111,11 @@ stage1-start::
+ 	  mkdir stage1-lto-plugin; \
+ 	mv stage1-lto-plugin lto-plugin
+ @endif lto-plugin
++@if bolt-plugin
++	@cd $(HOST_SUBDIR); [ -d stage1-bolt-plugin ] || \
++	  mkdir stage1-bolt-plugin; \
++	mv stage1-bolt-plugin bolt-plugin
++@endif bolt-plugin
+ @if libctf
+ 	@cd $(HOST_SUBDIR); [ -d stage1-libctf ] || \
+ 	  mkdir stage1-libctf; \
+@@ -62065,6 +63241,11 @@ stage1-end::
+ 	  cd $(HOST_SUBDIR); mv lto-plugin stage1-lto-plugin; \
+ 	fi
+ @endif lto-plugin
++@if bolt-plugin
++	@if test -d $(HOST_SUBDIR)/bolt-plugin; then \
++	  cd $(HOST_SUBDIR); mv bolt-plugin stage1-bolt-plugin; \
++	fi
++@endif bolt-plugin
+ @if libctf
+ 	@if test -d $(HOST_SUBDIR)/libctf; then \
+ 	  cd $(HOST_SUBDIR); mv libctf stage1-libctf; \
+@@ -62256,6 +63437,12 @@ stage2-start::
+ 	mv stage2-lto-plugin lto-plugin; \
+ 	mv stage1-lto-plugin prev-lto-plugin || test -f stage1-lean 
+ @endif lto-plugin
++@if bolt-plugin
++	@cd $(HOST_SUBDIR); [ -d stage2-bolt-plugin ] || \
++	  mkdir stage2-bolt-plugin; \
++	mv stage2-bolt-plugin bolt-plugin; \
++	mv stage1-bolt-plugin prev-bolt-plugin || test -f stage1-lean 
++@endif bolt-plugin
+ @if libctf
+ 	@cd $(HOST_SUBDIR); [ -d stage2-libctf ] || \
+ 	  mkdir stage2-libctf; \
+@@ -62406,6 +63593,12 @@ stage2-end::
+ 	  mv prev-lto-plugin stage1-lto-plugin; : ; \
+ 	fi
+ @endif lto-plugin
++@if bolt-plugin
++	@if test -d $(HOST_SUBDIR)/bolt-plugin; then \
++	  cd $(HOST_SUBDIR); mv bolt-plugin stage2-bolt-plugin; \
++	  mv prev-bolt-plugin stage1-bolt-plugin; : ; \
++	fi
++@endif bolt-plugin
+ @if libctf
+ 	@if test -d $(HOST_SUBDIR)/libctf; then \
+ 	  cd $(HOST_SUBDIR); mv libctf stage2-libctf; \
+@@ -62622,6 +63815,12 @@ stage3-start::
+ 	mv stage3-lto-plugin lto-plugin; \
+ 	mv stage2-lto-plugin prev-lto-plugin || test -f stage2-lean 
+ @endif lto-plugin
++@if bolt-plugin
++	@cd $(HOST_SUBDIR); [ -d stage3-bolt-plugin ] || \
++	  mkdir stage3-bolt-plugin; \
++	mv stage3-bolt-plugin bolt-plugin; \
++	mv stage2-bolt-plugin prev-bolt-plugin || test -f stage2-lean 
++@endif bolt-plugin
+ @if libctf
+ 	@cd $(HOST_SUBDIR); [ -d stage3-libctf ] || \
+ 	  mkdir stage3-libctf; \
+@@ -62772,6 +63971,12 @@ stage3-end::
+ 	  mv prev-lto-plugin stage2-lto-plugin; : ; \
+ 	fi
+ @endif lto-plugin
++@if bolt-plugin
++	@if test -d $(HOST_SUBDIR)/bolt-plugin; then \
++	  cd $(HOST_SUBDIR); mv bolt-plugin stage3-bolt-plugin; \
++	  mv prev-bolt-plugin stage2-bolt-plugin; : ; \
++	fi
++@endif bolt-plugin
+ @if libctf
+ 	@if test -d $(HOST_SUBDIR)/libctf; then \
+ 	  cd $(HOST_SUBDIR); mv libctf stage3-libctf; \
+@@ -63044,6 +64249,12 @@ stage4-start::
+ 	mv stage4-lto-plugin lto-plugin; \
+ 	mv stage3-lto-plugin prev-lto-plugin || test -f stage3-lean 
+ @endif lto-plugin
++@if bolt-plugin
++	@cd $(HOST_SUBDIR); [ -d stage4-bolt-plugin ] || \
++	  mkdir stage4-bolt-plugin; \
++	mv stage4-bolt-plugin bolt-plugin; \
++	mv stage3-bolt-plugin prev-bolt-plugin || test -f stage3-lean 
++@endif bolt-plugin
+ @if libctf
+ 	@cd $(HOST_SUBDIR); [ -d stage4-libctf ] || \
+ 	  mkdir stage4-libctf; \
+@@ -63194,6 +64405,12 @@ stage4-end::
+ 	  mv prev-lto-plugin stage3-lto-plugin; : ; \
+ 	fi
+ @endif lto-plugin
++@if bolt-plugin
++	@if test -d $(HOST_SUBDIR)/bolt-plugin; then \
++	  cd $(HOST_SUBDIR); mv bolt-plugin stage4-bolt-plugin; \
++	  mv prev-bolt-plugin stage3-bolt-plugin; : ; \
++	fi
++@endif bolt-plugin
+ @if libctf
+ 	@if test -d $(HOST_SUBDIR)/libctf; then \
+ 	  cd $(HOST_SUBDIR); mv libctf stage4-libctf; \
+@@ -63454,6 +64671,12 @@ stageprofile-start::
+ 	mv stageprofile-lto-plugin lto-plugin; \
+ 	mv stage1-lto-plugin prev-lto-plugin || test -f stage1-lean 
+ @endif lto-plugin
++@if bolt-plugin
++	@cd $(HOST_SUBDIR); [ -d stageprofile-bolt-plugin ] || \
++	  mkdir stageprofile-bolt-plugin; \
++	mv stageprofile-bolt-plugin bolt-plugin; \
++	mv stage1-bolt-plugin prev-bolt-plugin || test -f stage1-lean 
++@endif bolt-plugin
+ @if libctf
+ 	@cd $(HOST_SUBDIR); [ -d stageprofile-libctf ] || \
+ 	  mkdir stageprofile-libctf; \
+@@ -63604,6 +64827,12 @@ stageprofile-end::
+ 	  mv prev-lto-plugin stage1-lto-plugin; : ; \
+ 	fi
+ @endif lto-plugin
++@if bolt-plugin
++	@if test -d $(HOST_SUBDIR)/bolt-plugin; then \
++	  cd $(HOST_SUBDIR); mv bolt-plugin stageprofile-bolt-plugin; \
++	  mv prev-bolt-plugin stage1-bolt-plugin; : ; \
++	fi
++@endif bolt-plugin
+ @if libctf
+ 	@if test -d $(HOST_SUBDIR)/libctf; then \
+ 	  cd $(HOST_SUBDIR); mv libctf stageprofile-libctf; \
+@@ -63797,6 +65026,12 @@ stagetrain-start::
+ 	mv stagetrain-lto-plugin lto-plugin; \
+ 	mv stageprofile-lto-plugin prev-lto-plugin || test -f stageprofile-lean 
+ @endif lto-plugin
++@if bolt-plugin
++	@cd $(HOST_SUBDIR); [ -d stagetrain-bolt-plugin ] || \
++	  mkdir stagetrain-bolt-plugin; \
++	mv stagetrain-bolt-plugin bolt-plugin; \
++	mv stageprofile-bolt-plugin prev-bolt-plugin || test -f stageprofile-lean 
++@endif bolt-plugin
+ @if libctf
+ 	@cd $(HOST_SUBDIR); [ -d stagetrain-libctf ] || \
+ 	  mkdir stagetrain-libctf; \
+@@ -63947,6 +65182,12 @@ stagetrain-end::
+ 	  mv prev-lto-plugin stageprofile-lto-plugin; : ; \
+ 	fi
+ @endif lto-plugin
++@if bolt-plugin
++	@if test -d $(HOST_SUBDIR)/bolt-plugin; then \
++	  cd $(HOST_SUBDIR); mv bolt-plugin stagetrain-bolt-plugin; \
++	  mv prev-bolt-plugin stageprofile-bolt-plugin; : ; \
++	fi
++@endif bolt-plugin
+ @if libctf
+ 	@if test -d $(HOST_SUBDIR)/libctf; then \
+ 	  cd $(HOST_SUBDIR); mv libctf stagetrain-libctf; \
+@@ -64140,6 +65381,12 @@ stagefeedback-start::
+ 	mv stagefeedback-lto-plugin lto-plugin; \
+ 	mv stagetrain-lto-plugin prev-lto-plugin || test -f stagetrain-lean 
+ @endif lto-plugin
++@if bolt-plugin
++	@cd $(HOST_SUBDIR); [ -d stagefeedback-bolt-plugin ] || \
++	  mkdir stagefeedback-bolt-plugin; \
++	mv stagefeedback-bolt-plugin bolt-plugin; \
++	mv stagetrain-bolt-plugin prev-bolt-plugin || test -f stagetrain-lean 
++@endif bolt-plugin
+ @if libctf
+ 	@cd $(HOST_SUBDIR); [ -d stagefeedback-libctf ] || \
+ 	  mkdir stagefeedback-libctf; \
+@@ -64290,6 +65537,12 @@ stagefeedback-end::
+ 	  mv prev-lto-plugin stagetrain-lto-plugin; : ; \
+ 	fi
+ @endif lto-plugin
++@if bolt-plugin
++	@if test -d $(HOST_SUBDIR)/bolt-plugin; then \
++	  cd $(HOST_SUBDIR); mv bolt-plugin stagefeedback-bolt-plugin; \
++	  mv prev-bolt-plugin stagetrain-bolt-plugin; : ; \
++	fi
++@endif bolt-plugin
+ @if libctf
+ 	@if test -d $(HOST_SUBDIR)/libctf; then \
+ 	  cd $(HOST_SUBDIR); mv libctf stagefeedback-libctf; \
+@@ -64506,6 +65759,12 @@ stageautoprofile-start::
+ 	mv stageautoprofile-lto-plugin lto-plugin; \
+ 	mv stage1-lto-plugin prev-lto-plugin || test -f stage1-lean 
+ @endif lto-plugin
++@if bolt-plugin
++	@cd $(HOST_SUBDIR); [ -d stageautoprofile-bolt-plugin ] || \
++	  mkdir stageautoprofile-bolt-plugin; \
++	mv stageautoprofile-bolt-plugin bolt-plugin; \
++	mv stage1-bolt-plugin prev-bolt-plugin || test -f stage1-lean 
++@endif bolt-plugin
+ @if libctf
+ 	@cd $(HOST_SUBDIR); [ -d stageautoprofile-libctf ] || \
+ 	  mkdir stageautoprofile-libctf; \
+@@ -64656,6 +65915,12 @@ stageautoprofile-end::
+ 	  mv prev-lto-plugin stage1-lto-plugin; : ; \
+ 	fi
+ @endif lto-plugin
++@if bolt-plugin
++	@if test -d $(HOST_SUBDIR)/bolt-plugin; then \
++	  cd $(HOST_SUBDIR); mv bolt-plugin stageautoprofile-bolt-plugin; \
++	  mv prev-bolt-plugin stage1-bolt-plugin; : ; \
++	fi
++@endif bolt-plugin
+ @if libctf
+ 	@if test -d $(HOST_SUBDIR)/libctf; then \
+ 	  cd $(HOST_SUBDIR); mv libctf stageautoprofile-libctf; \
+@@ -64849,6 +66114,12 @@ stageautofeedback-start::
+ 	mv stageautofeedback-lto-plugin lto-plugin; \
+ 	mv stageautoprofile-lto-plugin prev-lto-plugin || test -f stageautoprofile-lean 
+ @endif lto-plugin
++@if bolt-plugin
++	@cd $(HOST_SUBDIR); [ -d stageautofeedback-bolt-plugin ] || \
++	  mkdir stageautofeedback-bolt-plugin; \
++	mv stageautofeedback-bolt-plugin bolt-plugin; \
++	mv stageautoprofile-bolt-plugin prev-bolt-plugin || test -f stageautoprofile-lean 
++@endif bolt-plugin
+ @if libctf
+ 	@cd $(HOST_SUBDIR); [ -d stageautofeedback-libctf ] || \
+ 	  mkdir stageautofeedback-libctf; \
+@@ -64999,6 +66270,12 @@ stageautofeedback-end::
+ 	  mv prev-lto-plugin stageautoprofile-lto-plugin; : ; \
+ 	fi
+ @endif lto-plugin
++@if bolt-plugin
++	@if test -d $(HOST_SUBDIR)/bolt-plugin; then \
++	  cd $(HOST_SUBDIR); mv bolt-plugin stageautofeedback-bolt-plugin; \
++	  mv prev-bolt-plugin stageautoprofile-bolt-plugin; : ; \
++	fi
++@endif bolt-plugin
+ @if libctf
+ 	@if test -d $(HOST_SUBDIR)/libctf; then \
+ 	  cd $(HOST_SUBDIR); mv libctf stageautofeedback-libctf; \
+@@ -65321,6 +66598,16 @@ configure-stagetrain-gcc: maybe-all-stagetrain-lto-plugin
+ configure-stagefeedback-gcc: maybe-all-stagefeedback-lto-plugin
+ configure-stageautoprofile-gcc: maybe-all-stageautoprofile-lto-plugin
+ configure-stageautofeedback-gcc: maybe-all-stageautofeedback-lto-plugin
++configure-gcc: maybe-all-bolt-plugin
++configure-stage1-gcc: maybe-all-stage1-bolt-plugin
++configure-stage2-gcc: maybe-all-stage2-bolt-plugin
++configure-stage3-gcc: maybe-all-stage3-bolt-plugin
++configure-stage4-gcc: maybe-all-stage4-bolt-plugin
++configure-stageprofile-gcc: maybe-all-stageprofile-bolt-plugin
++configure-stagetrain-gcc: maybe-all-stagetrain-bolt-plugin
++configure-stagefeedback-gcc: maybe-all-stagefeedback-bolt-plugin
++configure-stageautoprofile-gcc: maybe-all-stageautoprofile-bolt-plugin
++configure-stageautofeedback-gcc: maybe-all-stageautofeedback-bolt-plugin
+ configure-gcc: maybe-all-binutils
+ configure-stage1-gcc: maybe-all-stage1-binutils
+ configure-stage2-gcc: maybe-all-stage2-binutils
+@@ -65571,6 +66858,16 @@ all-stagetrain-gcc: maybe-all-stagetrain-lto-plugin
+ all-stagefeedback-gcc: maybe-all-stagefeedback-lto-plugin
+ all-stageautoprofile-gcc: maybe-all-stageautoprofile-lto-plugin
+ all-stageautofeedback-gcc: maybe-all-stageautofeedback-lto-plugin
++all-gcc: maybe-all-bolt-plugin
++all-stage1-gcc: maybe-all-stage1-bolt-plugin
++all-stage2-gcc: maybe-all-stage2-bolt-plugin
++all-stage3-gcc: maybe-all-stage3-bolt-plugin
++all-stage4-gcc: maybe-all-stage4-bolt-plugin
++all-stageprofile-gcc: maybe-all-stageprofile-bolt-plugin
++all-stagetrain-gcc: maybe-all-stagetrain-bolt-plugin
++all-stagefeedback-gcc: maybe-all-stagefeedback-bolt-plugin
++all-stageautoprofile-gcc: maybe-all-stageautoprofile-bolt-plugin
++all-stageautofeedback-gcc: maybe-all-stageautofeedback-bolt-plugin
+ all-gcc: maybe-all-libiconv
+ all-stage1-gcc: maybe-all-stage1-libiconv
+ all-stage2-gcc: maybe-all-stage2-libiconv
+@@ -65623,8 +66920,10 @@ html-stageautoprofile-gcc: maybe-all-build-libiberty
+ html-stageautofeedback-gcc: maybe-all-build-libiberty
+ install-gcc: maybe-install-fixincludes
+ install-gcc: maybe-install-lto-plugin
++install-gcc: maybe-install-bolt-plugin
+ install-strip-gcc: maybe-install-strip-fixincludes
+ install-strip-gcc: maybe-install-strip-lto-plugin
++install-strip-gcc: maybe-install-strip-bolt-plugin
+ configure-libcpp: configure-libiberty
+ configure-stage1-libcpp: configure-stage1-libiberty
+ configure-stage2-libcpp: configure-stage2-libiberty
+@@ -65716,6 +67015,26 @@ all-stagetrain-lto-plugin: maybe-all-stagetrain-libiberty-linker-plugin
+ all-stagefeedback-lto-plugin: maybe-all-stagefeedback-libiberty-linker-plugin
+ all-stageautoprofile-lto-plugin: maybe-all-stageautoprofile-libiberty-linker-plugin
+ all-stageautofeedback-lto-plugin: maybe-all-stageautofeedback-libiberty-linker-plugin
++all-bolt-plugin: maybe-all-libiberty
++all-stage1-bolt-plugin: maybe-all-stage1-libiberty
++all-stage2-bolt-plugin: maybe-all-stage2-libiberty
++all-stage3-bolt-plugin: maybe-all-stage3-libiberty
++all-stage4-bolt-plugin: maybe-all-stage4-libiberty
++all-stageprofile-bolt-plugin: maybe-all-stageprofile-libiberty
++all-stagetrain-bolt-plugin: maybe-all-stagetrain-libiberty
++all-stagefeedback-bolt-plugin: maybe-all-stagefeedback-libiberty
++all-stageautoprofile-bolt-plugin: maybe-all-stageautoprofile-libiberty
++all-stageautofeedback-bolt-plugin: maybe-all-stageautofeedback-libiberty
++all-bolt-plugin: maybe-all-libiberty-linker-plugin
++all-stage1-bolt-plugin: maybe-all-stage1-libiberty-linker-plugin
++all-stage2-bolt-plugin: maybe-all-stage2-libiberty-linker-plugin
++all-stage3-bolt-plugin: maybe-all-stage3-libiberty-linker-plugin
++all-stage4-bolt-plugin: maybe-all-stage4-libiberty-linker-plugin
++all-stageprofile-bolt-plugin: maybe-all-stageprofile-libiberty-linker-plugin
++all-stagetrain-bolt-plugin: maybe-all-stagetrain-libiberty-linker-plugin
++all-stagefeedback-bolt-plugin: maybe-all-stagefeedback-libiberty-linker-plugin
++all-stageautoprofile-bolt-plugin: maybe-all-stageautoprofile-libiberty-linker-plugin
++all-stageautofeedback-bolt-plugin: maybe-all-stageautofeedback-libiberty-linker-plugin
+ all-gotools: maybe-all-target-libgo
+ configure-intl: maybe-all-libiconv
+ configure-stage1-intl: maybe-all-stage1-libiconv
+diff --git a/bolt-plugin/Makefile.in b/bolt-plugin/Makefile.in
+index 11b59407e..0a58abc45 100644
+--- a/bolt-plugin/Makefile.in
++++ b/bolt-plugin/Makefile.in
+@@ -1,7 +1,7 @@
+-# Makefile.in generated by automake 1.16.5 from Makefile.am.
++# Makefile.in generated by automake 1.16.2 from Makefile.am.
+ # @configure_input@
+ 
+-# Copyright (C) 1994-2021 Free Software Foundation, Inc.
++# Copyright (C) 1994-2020 Free Software Foundation, Inc.
+ 
+ # This Makefile.in is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -91,7 +91,15 @@ host_triplet = @host@
+ target_triplet = @target@
+ subdir = .
+ ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+-am__aclocal_m4_deps = $(top_srcdir)/configure.ac
++am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \
++	$(top_srcdir)/../config/depstand.m4 \
++	$(top_srcdir)/../config/lead-dot.m4 \
++	$(top_srcdir)/../config/lthostflags.m4 \
++	$(top_srcdir)/../config/override.m4 \
++	$(top_srcdir)/../config/warnings.m4 \
++	$(top_srcdir)/../libtool.m4 $(top_srcdir)/../ltoptions.m4 \
++	$(top_srcdir)/../ltsugar.m4 $(top_srcdir)/../ltversion.m4 \
++	$(top_srcdir)/../lt~obsolete.m4 $(top_srcdir)/configure.ac
+ am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \
+ 	$(ACLOCAL_M4)
+ DIST_COMMON = $(srcdir)/Makefile.am $(top_srcdir)/configure \
+@@ -194,6 +202,9 @@ am__define_uniq_tagged_files = \
+   unique=`for i in $$list; do \
+     if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
+   done | $(am__uniquify_input)`
++ETAGS = etags
++CTAGS = ctags
++CSCOPE = cscope
+ AM_RECURSIVE_TARGETS = cscope
+ ACLOCAL = @ACLOCAL@
+ AMTAR = @AMTAR@
+@@ -206,9 +217,8 @@ AWK = @AWK@
+ CC = @CC@
+ CCDEPMODE = @CCDEPMODE@
+ CFLAGS = @CFLAGS@
++CPP = @CPP@
+ CPPFLAGS = @CPPFLAGS@
+-CSCOPE = @CSCOPE@
+-CTAGS = @CTAGS@
+ CXX = @CXX@
+ CXXCPP = @CXXCPP@
+ CXXDEPMODE = @CXXDEPMODE@
+@@ -216,17 +226,14 @@ CXXFLAGS = @CXXFLAGS@
+ CYGPATH_W = @CYGPATH_W@
+ DEFS = @DEFS@
+ DEPDIR = @DEPDIR@
+-DLLTOOL = @DLLTOOL@
+ DSYMUTIL = @DSYMUTIL@
+ DUMPBIN = @DUMPBIN@
+ ECHO_C = @ECHO_C@
+ ECHO_N = @ECHO_N@
+ ECHO_T = @ECHO_T@
+ EGREP = @EGREP@
+-ETAGS = @ETAGS@
+ EXEEXT = @EXEEXT@
+ FGREP = @FGREP@
+-FILECMD = @FILECMD@
+ GREP = @GREP@
+ INSTALL = @INSTALL@
+ INSTALL_DATA = @INSTALL_DATA@
+@@ -241,10 +248,8 @@ LIBTOOL = @LIBTOOL@
+ LIPO = @LIPO@
+ LN_S = @LN_S@
+ LTLIBOBJS = @LTLIBOBJS@
+-LT_SYS_LIBRARY_PATH = @LT_SYS_LIBRARY_PATH@
+ MAINT = @MAINT@
+ MAKEINFO = @MAKEINFO@
+-MANIFEST_TOOL = @MANIFEST_TOOL@
+ MKDIR_P = @MKDIR_P@
+ NM = @NM@
+ NMEDIT = @NMEDIT@
+@@ -271,7 +276,7 @@ abs_srcdir = @abs_srcdir@
+ abs_top_builddir = @abs_top_builddir@
+ abs_top_srcdir = @abs_top_srcdir@
+ ac_bolt_plugin_ldflags = @ac_bolt_plugin_ldflags@
+-ac_ct_AR = @ac_ct_AR@
++ac_bolt_plugin_warn_cflags = @ac_bolt_plugin_warn_cflags@
+ ac_ct_CC = @ac_ct_CC@
+ ac_ct_CXX = @ac_ct_CXX@
+ ac_ct_DUMPBIN = @ac_ct_DUMPBIN@
+@@ -285,7 +290,9 @@ bindir = @bindir@
+ build = @build@
+ build_alias = @build_alias@
+ build_cpu = @build_cpu@
++build_libsubdir = @build_libsubdir@
+ build_os = @build_os@
++build_subdir = @build_subdir@
+ build_vendor = @build_vendor@
+ builddir = @builddir@
+ datadir = @datadir@
+@@ -294,10 +301,12 @@ docdir = @docdir@
+ dvidir = @dvidir@
+ exec_prefix = @exec_prefix@
+ gcc_build_dir = @gcc_build_dir@
++get_gcc_base_ver = @get_gcc_base_ver@
+ host = @host@
+ host_alias = @host_alias@
+ host_cpu = @host_cpu@
+ host_os = @host_os@
++host_subdir = @host_subdir@
+ host_vendor = @host_vendor@
+ htmldir = @htmldir@
+ includedir = @includedir@
+@@ -307,6 +316,7 @@ libdir = @libdir@
+ libexecdir = @libexecdir@
+ localedir = @localedir@
+ localstatedir = @localstatedir@
++lt_host_flags = @lt_host_flags@
+ mandir = @mandir@
+ mkdir_p = @mkdir_p@
+ oldincludedir = @oldincludedir@
+@@ -315,7 +325,6 @@ prefix = @prefix@
+ program_transform_name = @program_transform_name@
+ psdir = @psdir@
+ real_target_noncanonical = @real_target_noncanonical@
+-runstatedir = @runstatedir@
+ sbindir = @sbindir@
+ sharedstatedir = @sharedstatedir@
+ srcdir = @srcdir@
+@@ -325,6 +334,7 @@ target_alias = @target_alias@
+ target_cpu = @target_cpu@
+ target_noncanonical := @target_noncanonical@
+ target_os = @target_os@
++target_subdir = @target_subdir@
+ target_vendor = @target_vendor@
+ top_build_prefix = @top_build_prefix@
+ top_builddir = @top_builddir@
+diff --git a/bolt-plugin/aclocal.m4 b/bolt-plugin/aclocal.m4
+index 679f2baa4..73bf7852c 100644
+--- a/bolt-plugin/aclocal.m4
++++ b/bolt-plugin/aclocal.m4
+@@ -1,6 +1,6 @@
+-# generated automatically by aclocal 1.16.5 -*- Autoconf -*-
++# generated automatically by aclocal 1.16.2 -*- Autoconf -*-
+ 
+-# Copyright (C) 1996-2021 Free Software Foundation, Inc.
++# Copyright (C) 1996-2020 Free Software Foundation, Inc.
+ 
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -14,9077 +14,13 @@
+ m4_ifndef([AC_CONFIG_MACRO_DIRS], [m4_defun([_AM_CONFIG_MACRO_DIRS], [])m4_defun([AC_CONFIG_MACRO_DIRS], [_AM_CONFIG_MACRO_DIRS($@)])])
+ m4_ifndef([AC_AUTOCONF_VERSION],
+   [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+-m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.71],,
+-[m4_warning([this file was generated for autoconf 2.71.
++m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.69],,
++[m4_warning([this file was generated for autoconf 2.69.
+ You have another version of autoconf.  It may work, but is not guaranteed to.
+ If you have problems, you may need to regenerate the build system entirely.
+ To do so, use the procedure documented by the package, typically 'autoreconf'.])])
+ 
+-# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*-
+-#
+-#   Copyright (C) 1996-2001, 2003-2019, 2021-2022 Free Software
+-#   Foundation, Inc.
+-#   Written by Gordon Matzigkeit, 1996
+-#
+-# This file is free software; the Free Software Foundation gives
+-# unlimited permission to copy and/or distribute it, with or without
+-# modifications, as long as this notice is preserved.
+-
+-m4_define([_LT_COPYING], [dnl
+-# Copyright (C) 2014 Free Software Foundation, Inc.
+-# This is free software; see the source for copying conditions.  There is NO
+-# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+-
+-# GNU Libtool is free software; you can redistribute it and/or modify
+-# it under the terms of the GNU General Public License as published by
+-# the Free Software Foundation; either version 2 of of the License, or
+-# (at your option) any later version.
+-#
+-# As a special exception to the GNU General Public License, if you
+-# distribute this file as part of a program or library that is built
+-# using GNU Libtool, you may include this file under the  same
+-# distribution terms that you use for the rest of that program.
+-#
+-# GNU Libtool is distributed in the hope that it will be useful, but
+-# WITHOUT ANY WARRANTY; without even the implied warranty of
+-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-# GNU General Public License for more details.
+-#
+-# You should have received a copy of the GNU General Public License
+-# along with this program.  If not, see .
+-])
+-
+-# serial 59 LT_INIT
+-
+-
+-# LT_PREREQ(VERSION)
+-# ------------------
+-# Complain and exit if this libtool version is less that VERSION.
+-m4_defun([LT_PREREQ],
+-[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1,
+-       [m4_default([$3],
+-		   [m4_fatal([Libtool version $1 or higher is required],
+-		             63)])],
+-       [$2])])
+-
+-
+-# _LT_CHECK_BUILDDIR
+-# ------------------
+-# Complain if the absolute build directory name contains unusual characters
+-m4_defun([_LT_CHECK_BUILDDIR],
+-[case `pwd` in
+-  *\ * | *\	*)
+-    AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;;
+-esac
+-])
+-
+-
+-# LT_INIT([OPTIONS])
+-# ------------------
+-AC_DEFUN([LT_INIT],
+-[AC_PREREQ([2.62])dnl We use AC_PATH_PROGS_FEATURE_CHECK
+-AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl
+-AC_BEFORE([$0], [LT_LANG])dnl
+-AC_BEFORE([$0], [LT_OUTPUT])dnl
+-AC_BEFORE([$0], [LTDL_INIT])dnl
+-m4_require([_LT_CHECK_BUILDDIR])dnl
+-
+-dnl Autoconf doesn't catch unexpanded LT_ macros by default:
+-m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl
+-m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl
+-dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4
+-dnl unless we require an AC_DEFUNed macro:
+-AC_REQUIRE([LTOPTIONS_VERSION])dnl
+-AC_REQUIRE([LTSUGAR_VERSION])dnl
+-AC_REQUIRE([LTVERSION_VERSION])dnl
+-AC_REQUIRE([LTOBSOLETE_VERSION])dnl
+-m4_require([_LT_PROG_LTMAIN])dnl
+-
+-_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}])
+-
+-dnl Parse OPTIONS
+-_LT_SET_OPTIONS([$0], [$1])
+-
+-# This can be used to rebuild libtool when needed
+-LIBTOOL_DEPS=$ltmain
+-
+-# Always use our own libtool.
+-LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+-AC_SUBST(LIBTOOL)dnl
+-
+-_LT_SETUP
+-
+-# Only expand once:
+-m4_define([LT_INIT])
+-])# LT_INIT
+-
+-# Old names:
+-AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT])
+-AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_PROG_LIBTOOL], [])
+-dnl AC_DEFUN([AM_PROG_LIBTOOL], [])
+-
+-
+-# _LT_PREPARE_CC_BASENAME
+-# -----------------------
+-m4_defun([_LT_PREPARE_CC_BASENAME], [
+-# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
+-func_cc_basename ()
+-{
+-    for cc_temp in @S|@*""; do
+-      case $cc_temp in
+-        compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;;
+-        distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;;
+-        \-*) ;;
+-        *) break;;
+-      esac
+-    done
+-    func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+-}
+-])# _LT_PREPARE_CC_BASENAME
+-
+-
+-# _LT_CC_BASENAME(CC)
+-# -------------------
+-# It would be clearer to call AC_REQUIREs from _LT_PREPARE_CC_BASENAME,
+-# but that macro is also expanded into generated libtool script, which
+-# arranges for $SED and $ECHO to be set by different means.
+-m4_defun([_LT_CC_BASENAME],
+-[m4_require([_LT_PREPARE_CC_BASENAME])dnl
+-AC_REQUIRE([_LT_DECL_SED])dnl
+-AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl
+-func_cc_basename $1
+-cc_basename=$func_cc_basename_result
+-])
+-
+-
+-# _LT_FILEUTILS_DEFAULTS
+-# ----------------------
+-# It is okay to use these file commands and assume they have been set
+-# sensibly after 'm4_require([_LT_FILEUTILS_DEFAULTS])'.
+-m4_defun([_LT_FILEUTILS_DEFAULTS],
+-[: ${CP="cp -f"}
+-: ${MV="mv -f"}
+-: ${RM="rm -f"}
+-])# _LT_FILEUTILS_DEFAULTS
+-
+-
+-# _LT_SETUP
+-# ---------
+-m4_defun([_LT_SETUP],
+-[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+-AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+-AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl
+-AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl
+-
+-_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl
+-dnl
+-_LT_DECL([], [host_alias], [0], [The host system])dnl
+-_LT_DECL([], [host], [0])dnl
+-_LT_DECL([], [host_os], [0])dnl
+-dnl
+-_LT_DECL([], [build_alias], [0], [The build system])dnl
+-_LT_DECL([], [build], [0])dnl
+-_LT_DECL([], [build_os], [0])dnl
+-dnl
+-AC_REQUIRE([AC_PROG_CC])dnl
+-AC_REQUIRE([LT_PATH_LD])dnl
+-AC_REQUIRE([LT_PATH_NM])dnl
+-dnl
+-AC_REQUIRE([AC_PROG_LN_S])dnl
+-test -z "$LN_S" && LN_S="ln -s"
+-_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl
+-dnl
+-AC_REQUIRE([LT_CMD_MAX_LEN])dnl
+-_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl
+-_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl
+-dnl
+-m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+-m4_require([_LT_CHECK_SHELL_FEATURES])dnl
+-m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl
+-m4_require([_LT_CMD_RELOAD])dnl
+-m4_require([_LT_DECL_FILECMD])dnl
+-m4_require([_LT_CHECK_MAGIC_METHOD])dnl
+-m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl
+-m4_require([_LT_CMD_OLD_ARCHIVE])dnl
+-m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+-m4_require([_LT_WITH_SYSROOT])dnl
+-m4_require([_LT_CMD_TRUNCATE])dnl
+-
+-_LT_CONFIG_LIBTOOL_INIT([
+-# See if we are running on zsh, and set the options that allow our
+-# commands through without removal of \ escapes INIT.
+-if test -n "\${ZSH_VERSION+set}"; then
+-   setopt NO_GLOB_SUBST
+-fi
+-])
+-if test -n "${ZSH_VERSION+set}"; then
+-   setopt NO_GLOB_SUBST
+-fi
+-
+-_LT_CHECK_OBJDIR
+-
+-m4_require([_LT_TAG_COMPILER])dnl
+-
+-case $host_os in
+-aix3*)
+-  # AIX sometimes has problems with the GCC collect2 program.  For some
+-  # reason, if we set the COLLECT_NAMES environment variable, the problems
+-  # vanish in a puff of smoke.
+-  if test set != "${COLLECT_NAMES+set}"; then
+-    COLLECT_NAMES=
+-    export COLLECT_NAMES
+-  fi
+-  ;;
+-esac
+-
+-# Global variables:
+-ofile=libtool
+-can_build_shared=yes
+-
+-# All known linkers require a '.a' archive for static linking (except MSVC and
+-# ICC, which need '.lib').
+-libext=a
+-
+-with_gnu_ld=$lt_cv_prog_gnu_ld
+-
+-old_CC=$CC
+-old_CFLAGS=$CFLAGS
+-
+-# Set sane defaults for various variables
+-test -z "$CC" && CC=cc
+-test -z "$LTCC" && LTCC=$CC
+-test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
+-test -z "$LD" && LD=ld
+-test -z "$ac_objext" && ac_objext=o
+-
+-_LT_CC_BASENAME([$compiler])
+-
+-# Only perform the check for file, if the check method requires it
+-test -z "$MAGIC_CMD" && MAGIC_CMD=file
+-case $deplibs_check_method in
+-file_magic*)
+-  if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+-    _LT_PATH_MAGIC
+-  fi
+-  ;;
+-esac
+-
+-# Use C for the default configuration in the libtool script
+-LT_SUPPORTED_TAG([CC])
+-_LT_LANG_C_CONFIG
+-_LT_LANG_DEFAULT_CONFIG
+-_LT_CONFIG_COMMANDS
+-])# _LT_SETUP
+-
+-
+-# _LT_PREPARE_SED_QUOTE_VARS
+-# --------------------------
+-# Define a few sed substitution that help us do robust quoting.
+-m4_defun([_LT_PREPARE_SED_QUOTE_VARS],
+-[# Backslashify metacharacters that are still active within
+-# double-quoted strings.
+-sed_quote_subst='s/\([["`$\\]]\)/\\\1/g'
+-
+-# Same as above, but do not quote variable references.
+-double_quote_subst='s/\([["`\\]]\)/\\\1/g'
+-
+-# Sed substitution to delay expansion of an escaped shell variable in a
+-# double_quote_subst'ed string.
+-delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g'
+-
+-# Sed substitution to delay expansion of an escaped single quote.
+-delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g'
+-
+-# Sed substitution to avoid accidental globbing in evaled expressions
+-no_glob_subst='s/\*/\\\*/g'
+-])
+-
+-# _LT_PROG_LTMAIN
+-# ---------------
+-# Note that this code is called both from 'configure', and 'config.status'
+-# now that we use AC_CONFIG_COMMANDS to generate libtool.  Notably,
+-# 'config.status' has no value for ac_aux_dir unless we are using Automake,
+-# so we pass a copy along to make sure it has a sensible value anyway.
+-m4_defun([_LT_PROG_LTMAIN],
+-[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl
+-_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir'])
+-ltmain=$ac_aux_dir/ltmain.sh
+-])# _LT_PROG_LTMAIN
+-
+-
+-
+-# So that we can recreate a full libtool script including additional
+-# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS
+-# in macros and then make a single call at the end using the 'libtool'
+-# label.
+-
+-
+-# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS])
+-# ----------------------------------------
+-# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later.
+-m4_define([_LT_CONFIG_LIBTOOL_INIT],
+-[m4_ifval([$1],
+-          [m4_append([_LT_OUTPUT_LIBTOOL_INIT],
+-                     [$1
+-])])])
+-
+-# Initialize.
+-m4_define([_LT_OUTPUT_LIBTOOL_INIT])
+-
+-
+-# _LT_CONFIG_LIBTOOL([COMMANDS])
+-# ------------------------------
+-# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later.
+-m4_define([_LT_CONFIG_LIBTOOL],
+-[m4_ifval([$1],
+-          [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS],
+-                     [$1
+-])])])
+-
+-# Initialize.
+-m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS])
+-
+-
+-# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS])
+-# -----------------------------------------------------
+-m4_defun([_LT_CONFIG_SAVE_COMMANDS],
+-[_LT_CONFIG_LIBTOOL([$1])
+-_LT_CONFIG_LIBTOOL_INIT([$2])
+-])
+-
+-
+-# _LT_FORMAT_COMMENT([COMMENT])
+-# -----------------------------
+-# Add leading comment marks to the start of each line, and a trailing
+-# full-stop to the whole comment if one is not present already.
+-m4_define([_LT_FORMAT_COMMENT],
+-[m4_ifval([$1], [
+-m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])],
+-              [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.])
+-)])
+-
+-
+-
+-
+-
+-# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?])
+-# -------------------------------------------------------------------
+-# CONFIGNAME is the name given to the value in the libtool script.
+-# VARNAME is the (base) name used in the configure script.
+-# VALUE may be 0, 1 or 2 for a computed quote escaped value based on
+-# VARNAME.  Any other value will be used directly.
+-m4_define([_LT_DECL],
+-[lt_if_append_uniq([lt_decl_varnames], [$2], [, ],
+-    [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name],
+-	[m4_ifval([$1], [$1], [$2])])
+-    lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3])
+-    m4_ifval([$4],
+-	[lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])])
+-    lt_dict_add_subkey([lt_decl_dict], [$2],
+-	[tagged?], [m4_ifval([$5], [yes], [no])])])
+-])
+-
+-
+-# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION])
+-# --------------------------------------------------------
+-m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])])
+-
+-
+-# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...])
+-# ------------------------------------------------
+-m4_define([lt_decl_tag_varnames],
+-[_lt_decl_filter([tagged?], [yes], $@)])
+-
+-
+-# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..])
+-# ---------------------------------------------------------
+-m4_define([_lt_decl_filter],
+-[m4_case([$#],
+-  [0], [m4_fatal([$0: too few arguments: $#])],
+-  [1], [m4_fatal([$0: too few arguments: $#: $1])],
+-  [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)],
+-  [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)],
+-  [lt_dict_filter([lt_decl_dict], $@)])[]dnl
+-])
+-
+-
+-# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...])
+-# --------------------------------------------------
+-m4_define([lt_decl_quote_varnames],
+-[_lt_decl_filter([value], [1], $@)])
+-
+-
+-# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...])
+-# ---------------------------------------------------
+-m4_define([lt_decl_dquote_varnames],
+-[_lt_decl_filter([value], [2], $@)])
+-
+-
+-# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...])
+-# ---------------------------------------------------
+-m4_define([lt_decl_varnames_tagged],
+-[m4_assert([$# <= 2])dnl
+-_$0(m4_quote(m4_default([$1], [[, ]])),
+-    m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]),
+-    m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))])
+-m4_define([_lt_decl_varnames_tagged],
+-[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])])
+-
+-
+-# lt_decl_all_varnames([SEPARATOR], [VARNAME1...])
+-# ------------------------------------------------
+-m4_define([lt_decl_all_varnames],
+-[_$0(m4_quote(m4_default([$1], [[, ]])),
+-     m4_if([$2], [],
+-	   m4_quote(lt_decl_varnames),
+-	m4_quote(m4_shift($@))))[]dnl
+-])
+-m4_define([_lt_decl_all_varnames],
+-[lt_join($@, lt_decl_varnames_tagged([$1],
+-			lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl
+-])
+-
+-
+-# _LT_CONFIG_STATUS_DECLARE([VARNAME])
+-# ------------------------------------
+-# Quote a variable value, and forward it to 'config.status' so that its
+-# declaration there will have the same value as in 'configure'.  VARNAME
+-# must have a single quote delimited value for this to work.
+-m4_define([_LT_CONFIG_STATUS_DECLARE],
+-[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`'])
+-
+-
+-# _LT_CONFIG_STATUS_DECLARATIONS
+-# ------------------------------
+-# We delimit libtool config variables with single quotes, so when
+-# we write them to config.status, we have to be sure to quote all
+-# embedded single quotes properly.  In configure, this macro expands
+-# each variable declared with _LT_DECL (and _LT_TAGDECL) into:
+-#
+-#    ='`$ECHO "$" | $SED "$delay_single_quote_subst"`'
+-m4_defun([_LT_CONFIG_STATUS_DECLARATIONS],
+-[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames),
+-    [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])])
+-
+-
+-# _LT_LIBTOOL_TAGS
+-# ----------------
+-# Output comment and list of tags supported by the script
+-m4_defun([_LT_LIBTOOL_TAGS],
+-[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl
+-available_tags='_LT_TAGS'dnl
+-])
+-
+-
+-# _LT_LIBTOOL_DECLARE(VARNAME, [TAG])
+-# -----------------------------------
+-# Extract the dictionary values for VARNAME (optionally with TAG) and
+-# expand to a commented shell variable setting:
+-#
+-#    # Some comment about what VAR is for.
+-#    visible_name=$lt_internal_name
+-m4_define([_LT_LIBTOOL_DECLARE],
+-[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1],
+-					   [description])))[]dnl
+-m4_pushdef([_libtool_name],
+-    m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl
+-m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])),
+-    [0], [_libtool_name=[$]$1],
+-    [1], [_libtool_name=$lt_[]$1],
+-    [2], [_libtool_name=$lt_[]$1],
+-    [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl
+-m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl
+-])
+-
+-
+-# _LT_LIBTOOL_CONFIG_VARS
+-# -----------------------
+-# Produce commented declarations of non-tagged libtool config variables
+-# suitable for insertion in the LIBTOOL CONFIG section of the 'libtool'
+-# script.  Tagged libtool config variables (even for the LIBTOOL CONFIG
+-# section) are produced by _LT_LIBTOOL_TAG_VARS.
+-m4_defun([_LT_LIBTOOL_CONFIG_VARS],
+-[m4_foreach([_lt_var],
+-    m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)),
+-    [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])])
+-
+-
+-# _LT_LIBTOOL_TAG_VARS(TAG)
+-# -------------------------
+-m4_define([_LT_LIBTOOL_TAG_VARS],
+-[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames),
+-    [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])])
+-
+-
+-# _LT_TAGVAR(VARNAME, [TAGNAME])
+-# ------------------------------
+-m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])])
+-
+-
+-# _LT_CONFIG_COMMANDS
+-# -------------------
+-# Send accumulated output to $CONFIG_STATUS.  Thanks to the lists of
+-# variables for single and double quote escaping we saved from calls
+-# to _LT_DECL, we can put quote escaped variables declarations
+-# into 'config.status', and then the shell code to quote escape them in
+-# for loops in 'config.status'.  Finally, any additional code accumulated
+-# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded.
+-m4_defun([_LT_CONFIG_COMMANDS],
+-[AC_PROVIDE_IFELSE([LT_OUTPUT],
+-	dnl If the libtool generation code has been placed in $CONFIG_LT,
+-	dnl instead of duplicating it all over again into config.status,
+-	dnl then we will have config.status run $CONFIG_LT later, so it
+-	dnl needs to know what name is stored there:
+-        [AC_CONFIG_COMMANDS([libtool],
+-            [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])],
+-    dnl If the libtool generation code is destined for config.status,
+-    dnl expand the accumulated commands and init code now:
+-    [AC_CONFIG_COMMANDS([libtool],
+-        [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])])
+-])#_LT_CONFIG_COMMANDS
+-
+-
+-# Initialize.
+-m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT],
+-[
+-
+-# The HP-UX ksh and POSIX shell print the target directory to stdout
+-# if CDPATH is set.
+-(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+-
+-sed_quote_subst='$sed_quote_subst'
+-double_quote_subst='$double_quote_subst'
+-delay_variable_subst='$delay_variable_subst'
+-_LT_CONFIG_STATUS_DECLARATIONS
+-LTCC='$LTCC'
+-LTCFLAGS='$LTCFLAGS'
+-compiler='$compiler_DEFAULT'
+-
+-# A function that is used when there is no print builtin or printf.
+-func_fallback_echo ()
+-{
+-  eval 'cat <<_LTECHO_EOF
+-\$[]1
+-_LTECHO_EOF'
+-}
+-
+-# Quote evaled strings.
+-for var in lt_decl_all_varnames([[ \
+-]], lt_decl_quote_varnames); do
+-    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+-    *[[\\\\\\\`\\"\\\$]]*)
+-      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
+-      ;;
+-    *)
+-      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+-      ;;
+-    esac
+-done
+-
+-# Double-quote double-evaled strings.
+-for var in lt_decl_all_varnames([[ \
+-]], lt_decl_dquote_varnames); do
+-    case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+-    *[[\\\\\\\`\\"\\\$]]*)
+-      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
+-      ;;
+-    *)
+-      eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+-      ;;
+-    esac
+-done
+-
+-_LT_OUTPUT_LIBTOOL_INIT
+-])
+-
+-# _LT_GENERATED_FILE_INIT(FILE, [COMMENT])
+-# ------------------------------------
+-# Generate a child script FILE with all initialization necessary to
+-# reuse the environment learned by the parent script, and make the
+-# file executable.  If COMMENT is supplied, it is inserted after the
+-# '#!' sequence but before initialization text begins.  After this
+-# macro, additional text can be appended to FILE to form the body of
+-# the child script.  The macro ends with non-zero status if the
+-# file could not be fully written (such as if the disk is full).
+-m4_ifdef([AS_INIT_GENERATED],
+-[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])],
+-[m4_defun([_LT_GENERATED_FILE_INIT],
+-[m4_require([AS_PREPARE])]dnl
+-[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl
+-[lt_write_fail=0
+-cat >$1 <<_ASEOF || lt_write_fail=1
+-#! $SHELL
+-# Generated by $as_me.
+-$2
+-SHELL=\${CONFIG_SHELL-$SHELL}
+-export SHELL
+-_ASEOF
+-cat >>$1 <<\_ASEOF || lt_write_fail=1
+-AS_SHELL_SANITIZE
+-_AS_PREPARE
+-exec AS_MESSAGE_FD>&1
+-_ASEOF
+-test 0 = "$lt_write_fail" && chmod +x $1[]dnl
+-m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT
+-
+-# LT_OUTPUT
+-# ---------
+-# This macro allows early generation of the libtool script (before
+-# AC_OUTPUT is called), incase it is used in configure for compilation
+-# tests.
+-AC_DEFUN([LT_OUTPUT],
+-[: ${CONFIG_LT=./config.lt}
+-AC_MSG_NOTICE([creating $CONFIG_LT])
+-_LT_GENERATED_FILE_INIT(["$CONFIG_LT"],
+-[# Run this file to recreate a libtool stub with the current configuration.])
+-
+-cat >>"$CONFIG_LT" <<\_LTEOF
+-lt_cl_silent=false
+-exec AS_MESSAGE_LOG_FD>>config.log
+-{
+-  echo
+-  AS_BOX([Running $as_me.])
+-} >&AS_MESSAGE_LOG_FD
+-
+-lt_cl_help="\
+-'$as_me' creates a local libtool stub from the current configuration,
+-for use in further configure time tests before the real libtool is
+-generated.
+-
+-Usage: $[0] [[OPTIONS]]
+-
+-  -h, --help      print this help, then exit
+-  -V, --version   print version number, then exit
+-  -q, --quiet     do not print progress messages
+-  -d, --debug     don't remove temporary files
+-
+-Report bugs to ."
+-
+-lt_cl_version="\
+-m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl
+-m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION])
+-configured by $[0], generated by m4_PACKAGE_STRING.
+-
+-Copyright (C) 2011 Free Software Foundation, Inc.
+-This config.lt script is free software; the Free Software Foundation
+-gives unlimited permision to copy, distribute and modify it."
+-
+-while test 0 != $[#]
+-do
+-  case $[1] in
+-    --version | --v* | -V )
+-      echo "$lt_cl_version"; exit 0 ;;
+-    --help | --h* | -h )
+-      echo "$lt_cl_help"; exit 0 ;;
+-    --debug | --d* | -d )
+-      debug=: ;;
+-    --quiet | --q* | --silent | --s* | -q )
+-      lt_cl_silent=: ;;
+-
+-    -*) AC_MSG_ERROR([unrecognized option: $[1]
+-Try '$[0] --help' for more information.]) ;;
+-
+-    *) AC_MSG_ERROR([unrecognized argument: $[1]
+-Try '$[0] --help' for more information.]) ;;
+-  esac
+-  shift
+-done
+-
+-if $lt_cl_silent; then
+-  exec AS_MESSAGE_FD>/dev/null
+-fi
+-_LTEOF
+-
+-cat >>"$CONFIG_LT" <<_LTEOF
+-_LT_OUTPUT_LIBTOOL_COMMANDS_INIT
+-_LTEOF
+-
+-cat >>"$CONFIG_LT" <<\_LTEOF
+-AC_MSG_NOTICE([creating $ofile])
+-_LT_OUTPUT_LIBTOOL_COMMANDS
+-AS_EXIT(0)
+-_LTEOF
+-chmod +x "$CONFIG_LT"
+-
+-# configure is writing to config.log, but config.lt does its own redirection,
+-# appending to config.log, which fails on DOS, as config.log is still kept
+-# open by configure.  Here we exec the FD to /dev/null, effectively closing
+-# config.log, so it can be properly (re)opened and appended to by config.lt.
+-lt_cl_success=:
+-test yes = "$silent" &&
+-  lt_config_lt_args="$lt_config_lt_args --quiet"
+-exec AS_MESSAGE_LOG_FD>/dev/null
+-$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false
+-exec AS_MESSAGE_LOG_FD>>config.log
+-$lt_cl_success || AS_EXIT(1)
+-])# LT_OUTPUT
+-
+-
+-# _LT_CONFIG(TAG)
+-# ---------------
+-# If TAG is the built-in tag, create an initial libtool script with a
+-# default configuration from the untagged config vars.  Otherwise add code
+-# to config.status for appending the configuration named by TAG from the
+-# matching tagged config vars.
+-m4_defun([_LT_CONFIG],
+-[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+-_LT_CONFIG_SAVE_COMMANDS([
+-  m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl
+-  m4_if(_LT_TAG, [C], [
+-    # See if we are running on zsh, and set the options that allow our
+-    # commands through without removal of \ escapes.
+-    if test -n "${ZSH_VERSION+set}"; then
+-      setopt NO_GLOB_SUBST
+-    fi
+-
+-    cfgfile=${ofile}T
+-    trap "$RM \"$cfgfile\"; exit 1" 1 2 15
+-    $RM "$cfgfile"
+-
+-    cat <<_LT_EOF >> "$cfgfile"
+-#! $SHELL
+-# Generated automatically by $as_me ($PACKAGE) $VERSION
+-# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+-# NOTE: Changes made to this file will be lost: look at ltmain.sh.
+-
+-# Provide generalized library-building support services.
+-# Written by Gordon Matzigkeit, 1996
+-
+-_LT_COPYING
+-_LT_LIBTOOL_TAGS
+-
+-# Configured defaults for sys_lib_dlsearch_path munging.
+-: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"}
+-
+-# ### BEGIN LIBTOOL CONFIG
+-_LT_LIBTOOL_CONFIG_VARS
+-_LT_LIBTOOL_TAG_VARS
+-# ### END LIBTOOL CONFIG
+-
+-_LT_EOF
+-
+-    cat <<'_LT_EOF' >> "$cfgfile"
+-
+-# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE
+-
+-_LT_PREPARE_MUNGE_PATH_LIST
+-_LT_PREPARE_CC_BASENAME
+-
+-# ### END FUNCTIONS SHARED WITH CONFIGURE
+-
+-_LT_EOF
+-
+-  case $host_os in
+-  aix3*)
+-    cat <<\_LT_EOF >> "$cfgfile"
+-# AIX sometimes has problems with the GCC collect2 program.  For some
+-# reason, if we set the COLLECT_NAMES environment variable, the problems
+-# vanish in a puff of smoke.
+-if test set != "${COLLECT_NAMES+set}"; then
+-  COLLECT_NAMES=
+-  export COLLECT_NAMES
+-fi
+-_LT_EOF
+-    ;;
+-  esac
+-
+-  _LT_PROG_LTMAIN
+-
+-  # We use sed instead of cat because bash on DJGPP gets confused if
+-  # if finds mixed CR/LF and LF-only lines.  Since sed operates in
+-  # text mode, it properly converts lines to CR/LF.  This bash problem
+-  # is reportedly fixed, but why not run on old versions too?
+-  $SED '$q' "$ltmain" >> "$cfgfile" \
+-     || (rm -f "$cfgfile"; exit 1)
+-
+-   mv -f "$cfgfile" "$ofile" ||
+-    (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
+-  chmod +x "$ofile"
+-],
+-[cat <<_LT_EOF >> "$ofile"
+-
+-dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded
+-dnl in a comment (ie after a #).
+-# ### BEGIN LIBTOOL TAG CONFIG: $1
+-_LT_LIBTOOL_TAG_VARS(_LT_TAG)
+-# ### END LIBTOOL TAG CONFIG: $1
+-_LT_EOF
+-])dnl /m4_if
+-],
+-[m4_if([$1], [], [
+-    PACKAGE='$PACKAGE'
+-    VERSION='$VERSION'
+-    RM='$RM'
+-    ofile='$ofile'], [])
+-])dnl /_LT_CONFIG_SAVE_COMMANDS
+-])# _LT_CONFIG
+-
+-
+-# LT_SUPPORTED_TAG(TAG)
+-# ---------------------
+-# Trace this macro to discover what tags are supported by the libtool
+-# --tag option, using:
+-#    autoconf --trace 'LT_SUPPORTED_TAG:$1'
+-AC_DEFUN([LT_SUPPORTED_TAG], [])
+-
+-
+-# C support is built-in for now
+-m4_define([_LT_LANG_C_enabled], [])
+-m4_define([_LT_TAGS], [])
+-
+-
+-# LT_LANG(LANG)
+-# -------------
+-# Enable libtool support for the given language if not already enabled.
+-AC_DEFUN([LT_LANG],
+-[AC_BEFORE([$0], [LT_OUTPUT])dnl
+-m4_case([$1],
+-  [C],			[_LT_LANG(C)],
+-  [C++],		[_LT_LANG(CXX)],
+-  [Go],			[_LT_LANG(GO)],
+-  [Java],		[_LT_LANG(GCJ)],
+-  [Fortran 77],		[_LT_LANG(F77)],
+-  [Fortran],		[_LT_LANG(FC)],
+-  [Windows Resource],	[_LT_LANG(RC)],
+-  [m4_ifdef([_LT_LANG_]$1[_CONFIG],
+-    [_LT_LANG($1)],
+-    [m4_fatal([$0: unsupported language: "$1"])])])dnl
+-])# LT_LANG
+-
+-
+-# _LT_LANG(LANGNAME)
+-# ------------------
+-m4_defun([_LT_LANG],
+-[m4_ifdef([_LT_LANG_]$1[_enabled], [],
+-  [LT_SUPPORTED_TAG([$1])dnl
+-  m4_append([_LT_TAGS], [$1 ])dnl
+-  m4_define([_LT_LANG_]$1[_enabled], [])dnl
+-  _LT_LANG_$1_CONFIG($1)])dnl
+-])# _LT_LANG
+-
+-
+-m4_ifndef([AC_PROG_GO], [
+-# NOTE: This macro has been submitted for inclusion into   #
+-#  GNU Autoconf as AC_PROG_GO.  When it is available in    #
+-#  a released version of Autoconf we should remove this    #
+-#  macro and use it instead.                               #
+-m4_defun([AC_PROG_GO],
+-[AC_LANG_PUSH(Go)dnl
+-AC_ARG_VAR([GOC],     [Go compiler command])dnl
+-AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl
+-_AC_ARG_VAR_LDFLAGS()dnl
+-AC_CHECK_TOOL(GOC, gccgo)
+-if test -z "$GOC"; then
+-  if test -n "$ac_tool_prefix"; then
+-    AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo])
+-  fi
+-fi
+-if test -z "$GOC"; then
+-  AC_CHECK_PROG(GOC, gccgo, gccgo, false)
+-fi
+-])#m4_defun
+-])#m4_ifndef
+-
+-
+-# _LT_LANG_DEFAULT_CONFIG
+-# -----------------------
+-m4_defun([_LT_LANG_DEFAULT_CONFIG],
+-[AC_PROVIDE_IFELSE([AC_PROG_CXX],
+-  [LT_LANG(CXX)],
+-  [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])])
+-
+-AC_PROVIDE_IFELSE([AC_PROG_F77],
+-  [LT_LANG(F77)],
+-  [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])])
+-
+-AC_PROVIDE_IFELSE([AC_PROG_FC],
+-  [LT_LANG(FC)],
+-  [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])])
+-
+-dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal
+-dnl pulling things in needlessly.
+-AC_PROVIDE_IFELSE([AC_PROG_GCJ],
+-  [LT_LANG(GCJ)],
+-  [AC_PROVIDE_IFELSE([A][M_PROG_GCJ],
+-    [LT_LANG(GCJ)],
+-    [AC_PROVIDE_IFELSE([LT_PROG_GCJ],
+-      [LT_LANG(GCJ)],
+-      [m4_ifdef([AC_PROG_GCJ],
+-	[m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])])
+-       m4_ifdef([A][M_PROG_GCJ],
+-	[m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])])
+-       m4_ifdef([LT_PROG_GCJ],
+-	[m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])])
+-
+-AC_PROVIDE_IFELSE([AC_PROG_GO],
+-  [LT_LANG(GO)],
+-  [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])])
+-
+-AC_PROVIDE_IFELSE([LT_PROG_RC],
+-  [LT_LANG(RC)],
+-  [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])])
+-])# _LT_LANG_DEFAULT_CONFIG
+-
+-# Obsolete macros:
+-AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)])
+-AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)])
+-AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)])
+-AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)])
+-AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_LIBTOOL_CXX], [])
+-dnl AC_DEFUN([AC_LIBTOOL_F77], [])
+-dnl AC_DEFUN([AC_LIBTOOL_FC], [])
+-dnl AC_DEFUN([AC_LIBTOOL_GCJ], [])
+-dnl AC_DEFUN([AC_LIBTOOL_RC], [])
+-
+-
+-# _LT_TAG_COMPILER
+-# ----------------
+-m4_defun([_LT_TAG_COMPILER],
+-[AC_REQUIRE([AC_PROG_CC])dnl
+-
+-_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl
+-_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl
+-_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl
+-_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl
+-
+-# If no C compiler was specified, use CC.
+-LTCC=${LTCC-"$CC"}
+-
+-# If no C compiler flags were specified, use CFLAGS.
+-LTCFLAGS=${LTCFLAGS-"$CFLAGS"}
+-
+-# Allow CC to be a program name with arguments.
+-compiler=$CC
+-])# _LT_TAG_COMPILER
+-
+-
+-# _LT_COMPILER_BOILERPLATE
+-# ------------------------
+-# Check for compiler boilerplate output or warnings with
+-# the simple compiler test code.
+-m4_defun([_LT_COMPILER_BOILERPLATE],
+-[m4_require([_LT_DECL_SED])dnl
+-ac_outfile=conftest.$ac_objext
+-echo "$lt_simple_compile_test_code" >conftest.$ac_ext
+-eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+-_lt_compiler_boilerplate=`cat conftest.err`
+-$RM conftest*
+-])# _LT_COMPILER_BOILERPLATE
+-
+-
+-# _LT_LINKER_BOILERPLATE
+-# ----------------------
+-# Check for linker boilerplate output or warnings with
+-# the simple link test code.
+-m4_defun([_LT_LINKER_BOILERPLATE],
+-[m4_require([_LT_DECL_SED])dnl
+-ac_outfile=conftest.$ac_objext
+-echo "$lt_simple_link_test_code" >conftest.$ac_ext
+-eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err
+-_lt_linker_boilerplate=`cat conftest.err`
+-$RM -r conftest*
+-])# _LT_LINKER_BOILERPLATE
+-
+-# _LT_REQUIRED_DARWIN_CHECKS
+-# -------------------------
+-m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[
+-  case $host_os in
+-    rhapsody* | darwin*)
+-    AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:])
+-    AC_CHECK_TOOL([NMEDIT], [nmedit], [:])
+-    AC_CHECK_TOOL([LIPO], [lipo], [:])
+-    AC_CHECK_TOOL([OTOOL], [otool], [:])
+-    AC_CHECK_TOOL([OTOOL64], [otool64], [:])
+-    _LT_DECL([], [DSYMUTIL], [1],
+-      [Tool to manipulate archived DWARF debug symbol files on Mac OS X])
+-    _LT_DECL([], [NMEDIT], [1],
+-      [Tool to change global to local symbols on Mac OS X])
+-    _LT_DECL([], [LIPO], [1],
+-      [Tool to manipulate fat objects and archives on Mac OS X])
+-    _LT_DECL([], [OTOOL], [1],
+-      [ldd/readelf like tool for Mach-O binaries on Mac OS X])
+-    _LT_DECL([], [OTOOL64], [1],
+-      [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4])
+-
+-    AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod],
+-      [lt_cv_apple_cc_single_mod=no
+-      if test -z "$LT_MULTI_MODULE"; then
+-	# By default we will add the -single_module flag. You can override
+-	# by either setting the environment variable LT_MULTI_MODULE
+-	# non-empty at configure time, or by adding -multi_module to the
+-	# link flags.
+-	rm -rf libconftest.dylib*
+-	echo "int foo(void){return 1;}" > conftest.c
+-	echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+--dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD
+-	$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+-	  -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
+-        _lt_result=$?
+-	# If there is a non-empty error log, and "single_module"
+-	# appears in it, assume the flag caused a linker warning
+-        if test -s conftest.err && $GREP single_module conftest.err; then
+-	  cat conftest.err >&AS_MESSAGE_LOG_FD
+-	# Otherwise, if the output was created with a 0 exit code from
+-	# the compiler, it worked.
+-	elif test -f libconftest.dylib && test 0 = "$_lt_result"; then
+-	  lt_cv_apple_cc_single_mod=yes
+-	else
+-	  cat conftest.err >&AS_MESSAGE_LOG_FD
+-	fi
+-	rm -rf libconftest.dylib*
+-	rm -f conftest.*
+-      fi])
+-
+-    AC_CACHE_CHECK([for -exported_symbols_list linker flag],
+-      [lt_cv_ld_exported_symbols_list],
+-      [lt_cv_ld_exported_symbols_list=no
+-      save_LDFLAGS=$LDFLAGS
+-      echo "_main" > conftest.sym
+-      LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym"
+-      AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
+-	[lt_cv_ld_exported_symbols_list=yes],
+-	[lt_cv_ld_exported_symbols_list=no])
+-	LDFLAGS=$save_LDFLAGS
+-    ])
+-
+-    AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load],
+-      [lt_cv_ld_force_load=no
+-      cat > conftest.c << _LT_EOF
+-int forced_loaded() { return 2;}
+-_LT_EOF
+-      echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD
+-      $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD
+-      echo "$AR $AR_FLAGS libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD
+-      $AR $AR_FLAGS libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD
+-      echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD
+-      $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD
+-      cat > conftest.c << _LT_EOF
+-int main() { return 0;}
+-_LT_EOF
+-      echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD
+-      $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
+-      _lt_result=$?
+-      if test -s conftest.err && $GREP force_load conftest.err; then
+-	cat conftest.err >&AS_MESSAGE_LOG_FD
+-      elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then
+-	lt_cv_ld_force_load=yes
+-      else
+-	cat conftest.err >&AS_MESSAGE_LOG_FD
+-      fi
+-        rm -f conftest.err libconftest.a conftest conftest.c
+-        rm -rf conftest.dSYM
+-    ])
+-    case $host_os in
+-    rhapsody* | darwin1.[[012]])
+-      _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;;
+-    darwin1.*)
+-      _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
+-    darwin*)
+-      case $MACOSX_DEPLOYMENT_TARGET,$host in
+-        10.[[012]],*|,*powerpc*-darwin[[5-8]]*)
+-          _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
+-        *)
+-          _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;;
+-      esac
+-    ;;
+-  esac
+-    if test yes = "$lt_cv_apple_cc_single_mod"; then
+-      _lt_dar_single_mod='$single_module'
+-    fi
+-    if test yes = "$lt_cv_ld_exported_symbols_list"; then
+-      _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym'
+-    else
+-      _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib'
+-    fi
+-    if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then
+-      _lt_dsymutil='~$DSYMUTIL $lib || :'
+-    else
+-      _lt_dsymutil=
+-    fi
+-    ;;
+-  esac
+-])
+-
+-
+-# _LT_DARWIN_LINKER_FEATURES([TAG])
+-# ---------------------------------
+-# Checks for linker and compiler features on darwin
+-m4_defun([_LT_DARWIN_LINKER_FEATURES],
+-[
+-  m4_require([_LT_REQUIRED_DARWIN_CHECKS])
+-  _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-  _LT_TAGVAR(hardcode_direct, $1)=no
+-  _LT_TAGVAR(hardcode_automatic, $1)=yes
+-  _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+-  if test yes = "$lt_cv_ld_force_load"; then
+-    _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+-    m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes],
+-                  [FC],  [_LT_TAGVAR(compiler_needs_object, $1)=yes])
+-  else
+-    _LT_TAGVAR(whole_archive_flag_spec, $1)=''
+-  fi
+-  _LT_TAGVAR(link_all_deplibs, $1)=yes
+-  _LT_TAGVAR(allow_undefined_flag, $1)=$_lt_dar_allow_undefined
+-  case $cc_basename in
+-     ifort*|nagfor*) _lt_dar_can_shared=yes ;;
+-     *) _lt_dar_can_shared=$GCC ;;
+-  esac
+-  if test yes = "$_lt_dar_can_shared"; then
+-    output_verbose_link_cmd=func_echo_all
+-    _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil"
+-    _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil"
+-    _LT_TAGVAR(archive_expsym_cmds, $1)="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil"
+-    _LT_TAGVAR(module_expsym_cmds, $1)="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil"
+-    m4_if([$1], [CXX],
+-[   if test yes != "$lt_cv_apple_cc_single_mod"; then
+-      _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil"
+-      _LT_TAGVAR(archive_expsym_cmds, $1)="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil"
+-    fi
+-],[])
+-  else
+-  _LT_TAGVAR(ld_shlibs, $1)=no
+-  fi
+-])
+-
+-# _LT_SYS_MODULE_PATH_AIX([TAGNAME])
+-# ----------------------------------
+-# Links a minimal program and checks the executable
+-# for the system default hardcoded library path. In most cases,
+-# this is /usr/lib:/lib, but when the MPI compilers are used
+-# the location of the communication and MPI libs are included too.
+-# If we don't find anything, use the default library path according
+-# to the aix ld manual.
+-# Store the results from the different compilers for each TAGNAME.
+-# Allow to override them for all tags through lt_cv_aix_libpath.
+-m4_defun([_LT_SYS_MODULE_PATH_AIX],
+-[m4_require([_LT_DECL_SED])dnl
+-if test set = "${lt_cv_aix_libpath+set}"; then
+-  aix_libpath=$lt_cv_aix_libpath
+-else
+-  AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])],
+-  [AC_LINK_IFELSE([AC_LANG_PROGRAM],[
+-  lt_aix_libpath_sed='[
+-      /Import File Strings/,/^$/ {
+-	  /^0/ {
+-	      s/^0  *\([^ ]*\) *$/\1/
+-	      p
+-	  }
+-      }]'
+-  _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+-  # Check for a 64-bit object if we didn't find anything.
+-  if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
+-    _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+-  fi],[])
+-  if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then
+-    _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=/usr/lib:/lib
+-  fi
+-  ])
+-  aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])
+-fi
+-])# _LT_SYS_MODULE_PATH_AIX
+-
+-
+-# _LT_SHELL_INIT(ARG)
+-# -------------------
+-m4_define([_LT_SHELL_INIT],
+-[m4_divert_text([M4SH-INIT], [$1
+-])])# _LT_SHELL_INIT
+-
+-
+-
+-# _LT_PROG_ECHO_BACKSLASH
+-# -----------------------
+-# Find how we can fake an echo command that does not interpret backslash.
+-# In particular, with Autoconf 2.60 or later we add some code to the start
+-# of the generated configure script that will find a shell with a builtin
+-# printf (that we can use as an echo command).
+-m4_defun([_LT_PROG_ECHO_BACKSLASH],
+-[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+-ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+-ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+-
+-AC_MSG_CHECKING([how to print strings])
+-# Test print first, because it will be a builtin if present.
+-if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
+-   test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
+-  ECHO='print -r --'
+-elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
+-  ECHO='printf %s\n'
+-else
+-  # Use this function as a fallback that always works.
+-  func_fallback_echo ()
+-  {
+-    eval 'cat <<_LTECHO_EOF
+-$[]1
+-_LTECHO_EOF'
+-  }
+-  ECHO='func_fallback_echo'
+-fi
+-
+-# func_echo_all arg...
+-# Invoke $ECHO with all args, space-separated.
+-func_echo_all ()
+-{
+-    $ECHO "$*"
+-}
+-
+-case $ECHO in
+-  printf*) AC_MSG_RESULT([printf]) ;;
+-  print*) AC_MSG_RESULT([print -r]) ;;
+-  *) AC_MSG_RESULT([cat]) ;;
+-esac
+-
+-m4_ifdef([_AS_DETECT_SUGGESTED],
+-[_AS_DETECT_SUGGESTED([
+-  test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || (
+-    ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+-    ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+-    ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+-    PATH=/empty FPATH=/empty; export PATH FPATH
+-    test "X`printf %s $ECHO`" = "X$ECHO" \
+-      || test "X`print -r -- $ECHO`" = "X$ECHO" )])])
+-
+-_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts])
+-_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes])
+-])# _LT_PROG_ECHO_BACKSLASH
+-
+-
+-# _LT_WITH_SYSROOT
+-# ----------------
+-AC_DEFUN([_LT_WITH_SYSROOT],
+-[m4_require([_LT_DECL_SED])dnl
+-AC_MSG_CHECKING([for sysroot])
+-AC_ARG_WITH([sysroot],
+-[AS_HELP_STRING([--with-sysroot@<:@=DIR@:>@],
+-  [Search for dependent libraries within DIR (or the compiler's sysroot
+-   if not specified).])],
+-[], [with_sysroot=no])
+-
+-dnl lt_sysroot will always be passed unquoted.  We quote it here
+-dnl in case the user passed a directory name.
+-lt_sysroot=
+-case $with_sysroot in #(
+- yes)
+-   if test yes = "$GCC"; then
+-     lt_sysroot=`$CC --print-sysroot 2>/dev/null`
+-   fi
+-   ;; #(
+- /*)
+-   lt_sysroot=`echo "$with_sysroot" | $SED -e "$sed_quote_subst"`
+-   ;; #(
+- no|'')
+-   ;; #(
+- *)
+-   AC_MSG_RESULT([$with_sysroot])
+-   AC_MSG_ERROR([The sysroot must be an absolute path.])
+-   ;;
+-esac
+-
+- AC_MSG_RESULT([${lt_sysroot:-no}])
+-_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl
+-[dependent libraries, and where our libraries should be installed.])])
+-
+-# _LT_ENABLE_LOCK
+-# ---------------
+-m4_defun([_LT_ENABLE_LOCK],
+-[AC_ARG_ENABLE([libtool-lock],
+-  [AS_HELP_STRING([--disable-libtool-lock],
+-    [avoid locking (might break parallel builds)])])
+-test no = "$enable_libtool_lock" || enable_libtool_lock=yes
+-
+-# Some flags need to be propagated to the compiler or linker for good
+-# libtool support.
+-case $host in
+-ia64-*-hpux*)
+-  # Find out what ABI is being produced by ac_compile, and set mode
+-  # options accordingly.
+-  echo 'int i;' > conftest.$ac_ext
+-  if AC_TRY_EVAL(ac_compile); then
+-    case `$FILECMD conftest.$ac_objext` in
+-      *ELF-32*)
+-	HPUX_IA64_MODE=32
+-	;;
+-      *ELF-64*)
+-	HPUX_IA64_MODE=64
+-	;;
+-    esac
+-  fi
+-  rm -rf conftest*
+-  ;;
+-*-*-irix6*)
+-  # Find out what ABI is being produced by ac_compile, and set linker
+-  # options accordingly.
+-  echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext
+-  if AC_TRY_EVAL(ac_compile); then
+-    if test yes = "$lt_cv_prog_gnu_ld"; then
+-      case `$FILECMD conftest.$ac_objext` in
+-	*32-bit*)
+-	  LD="${LD-ld} -melf32bsmip"
+-	  ;;
+-	*N32*)
+-	  LD="${LD-ld} -melf32bmipn32"
+-	  ;;
+-	*64-bit*)
+-	  LD="${LD-ld} -melf64bmip"
+-	;;
+-      esac
+-    else
+-      case `$FILECMD conftest.$ac_objext` in
+-	*32-bit*)
+-	  LD="${LD-ld} -32"
+-	  ;;
+-	*N32*)
+-	  LD="${LD-ld} -n32"
+-	  ;;
+-	*64-bit*)
+-	  LD="${LD-ld} -64"
+-	  ;;
+-      esac
+-    fi
+-  fi
+-  rm -rf conftest*
+-  ;;
+-
+-mips64*-*linux*)
+-  # Find out what ABI is being produced by ac_compile, and set linker
+-  # options accordingly.
+-  echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext
+-  if AC_TRY_EVAL(ac_compile); then
+-    emul=elf
+-    case `$FILECMD conftest.$ac_objext` in
+-      *32-bit*)
+-	emul="${emul}32"
+-	;;
+-      *64-bit*)
+-	emul="${emul}64"
+-	;;
+-    esac
+-    case `$FILECMD conftest.$ac_objext` in
+-      *MSB*)
+-	emul="${emul}btsmip"
+-	;;
+-      *LSB*)
+-	emul="${emul}ltsmip"
+-	;;
+-    esac
+-    case `$FILECMD conftest.$ac_objext` in
+-      *N32*)
+-	emul="${emul}n32"
+-	;;
+-    esac
+-    LD="${LD-ld} -m $emul"
+-  fi
+-  rm -rf conftest*
+-  ;;
+-
+-x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \
+-s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
+-  # Find out what ABI is being produced by ac_compile, and set linker
+-  # options accordingly.  Note that the listed cases only cover the
+-  # situations where additional linker options are needed (such as when
+-  # doing 32-bit compilation for a host where ld defaults to 64-bit, or
+-  # vice versa); the common cases where no linker options are needed do
+-  # not appear in the list.
+-  echo 'int i;' > conftest.$ac_ext
+-  if AC_TRY_EVAL(ac_compile); then
+-    case `$FILECMD conftest.o` in
+-      *32-bit*)
+-	case $host in
+-	  x86_64-*kfreebsd*-gnu)
+-	    LD="${LD-ld} -m elf_i386_fbsd"
+-	    ;;
+-	  x86_64-*linux*)
+-	    case `$FILECMD conftest.o` in
+-	      *x86-64*)
+-		LD="${LD-ld} -m elf32_x86_64"
+-		;;
+-	      *)
+-		LD="${LD-ld} -m elf_i386"
+-		;;
+-	    esac
+-	    ;;
+-	  powerpc64le-*linux*)
+-	    LD="${LD-ld} -m elf32lppclinux"
+-	    ;;
+-	  powerpc64-*linux*)
+-	    LD="${LD-ld} -m elf32ppclinux"
+-	    ;;
+-	  s390x-*linux*)
+-	    LD="${LD-ld} -m elf_s390"
+-	    ;;
+-	  sparc64-*linux*)
+-	    LD="${LD-ld} -m elf32_sparc"
+-	    ;;
+-	esac
+-	;;
+-      *64-bit*)
+-	case $host in
+-	  x86_64-*kfreebsd*-gnu)
+-	    LD="${LD-ld} -m elf_x86_64_fbsd"
+-	    ;;
+-	  x86_64-*linux*)
+-	    LD="${LD-ld} -m elf_x86_64"
+-	    ;;
+-	  powerpcle-*linux*)
+-	    LD="${LD-ld} -m elf64lppc"
+-	    ;;
+-	  powerpc-*linux*)
+-	    LD="${LD-ld} -m elf64ppc"
+-	    ;;
+-	  s390*-*linux*|s390*-*tpf*)
+-	    LD="${LD-ld} -m elf64_s390"
+-	    ;;
+-	  sparc*-*linux*)
+-	    LD="${LD-ld} -m elf64_sparc"
+-	    ;;
+-	esac
+-	;;
+-    esac
+-  fi
+-  rm -rf conftest*
+-  ;;
+-
+-*-*-sco3.2v5*)
+-  # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+-  SAVE_CFLAGS=$CFLAGS
+-  CFLAGS="$CFLAGS -belf"
+-  AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf,
+-    [AC_LANG_PUSH(C)
+-     AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no])
+-     AC_LANG_POP])
+-  if test yes != "$lt_cv_cc_needs_belf"; then
+-    # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+-    CFLAGS=$SAVE_CFLAGS
+-  fi
+-  ;;
+-*-*solaris*)
+-  # Find out what ABI is being produced by ac_compile, and set linker
+-  # options accordingly.
+-  echo 'int i;' > conftest.$ac_ext
+-  if AC_TRY_EVAL(ac_compile); then
+-    case `$FILECMD conftest.o` in
+-    *64-bit*)
+-      case $lt_cv_prog_gnu_ld in
+-      yes*)
+-        case $host in
+-        i?86-*-solaris*|x86_64-*-solaris*)
+-          LD="${LD-ld} -m elf_x86_64"
+-          ;;
+-        sparc*-*-solaris*)
+-          LD="${LD-ld} -m elf64_sparc"
+-          ;;
+-        esac
+-        # GNU ld 2.21 introduced _sol2 emulations.  Use them if available.
+-        if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
+-          LD=${LD-ld}_sol2
+-        fi
+-        ;;
+-      *)
+-	if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
+-	  LD="${LD-ld} -64"
+-	fi
+-	;;
+-      esac
+-      ;;
+-    esac
+-  fi
+-  rm -rf conftest*
+-  ;;
+-esac
+-
+-need_locks=$enable_libtool_lock
+-])# _LT_ENABLE_LOCK
+-
+-
+-# _LT_PROG_AR
+-# -----------
+-m4_defun([_LT_PROG_AR],
+-[AC_CHECK_TOOLS(AR, [ar], false)
+-: ${AR=ar}
+-_LT_DECL([], [AR], [1], [The archiver])
+-
+-# Use ARFLAGS variable as AR's operation code to sync the variable naming with
+-# Automake.  If both AR_FLAGS and ARFLAGS are specified, AR_FLAGS should have
+-# higher priority because thats what people were doing historically (setting
+-# ARFLAGS for automake and AR_FLAGS for libtool).  FIXME: Make the AR_FLAGS
+-# variable obsoleted/removed.
+-
+-test ${AR_FLAGS+y} || AR_FLAGS=${ARFLAGS-cr}
+-lt_ar_flags=$AR_FLAGS
+-_LT_DECL([], [lt_ar_flags], [0], [Flags to create an archive (by configure)])
+-
+-# Make AR_FLAGS overridable by 'make ARFLAGS='.  Don't try to run-time override
+-# by AR_FLAGS because that was never working and AR_FLAGS is about to die.
+-_LT_DECL([], [AR_FLAGS], [\@S|@{ARFLAGS-"\@S|@lt_ar_flags"}],
+-         [Flags to create an archive])
+-
+-AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file],
+-  [lt_cv_ar_at_file=no
+-   AC_COMPILE_IFELSE([AC_LANG_PROGRAM],
+-     [echo conftest.$ac_objext > conftest.lst
+-      lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD'
+-      AC_TRY_EVAL([lt_ar_try])
+-      if test 0 -eq "$ac_status"; then
+-	# Ensure the archiver fails upon bogus file names.
+-	rm -f conftest.$ac_objext libconftest.a
+-	AC_TRY_EVAL([lt_ar_try])
+-	if test 0 -ne "$ac_status"; then
+-          lt_cv_ar_at_file=@
+-        fi
+-      fi
+-      rm -f conftest.* libconftest.a
+-     ])
+-  ])
+-
+-if test no = "$lt_cv_ar_at_file"; then
+-  archiver_list_spec=
+-else
+-  archiver_list_spec=$lt_cv_ar_at_file
+-fi
+-_LT_DECL([], [archiver_list_spec], [1],
+-  [How to feed a file listing to the archiver])
+-])# _LT_PROG_AR
+-
+-
+-# _LT_CMD_OLD_ARCHIVE
+-# -------------------
+-m4_defun([_LT_CMD_OLD_ARCHIVE],
+-[_LT_PROG_AR
+-
+-AC_CHECK_TOOL(STRIP, strip, :)
+-test -z "$STRIP" && STRIP=:
+-_LT_DECL([], [STRIP], [1], [A symbol stripping program])
+-
+-AC_CHECK_TOOL(RANLIB, ranlib, :)
+-test -z "$RANLIB" && RANLIB=:
+-_LT_DECL([], [RANLIB], [1],
+-    [Commands used to install an old-style archive])
+-
+-# Determine commands to create old-style static archives.
+-old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs'
+-old_postinstall_cmds='chmod 644 $oldlib'
+-old_postuninstall_cmds=
+-
+-if test -n "$RANLIB"; then
+-  case $host_os in
+-  bitrig* | openbsd*)
+-    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
+-    ;;
+-  *)
+-    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
+-    ;;
+-  esac
+-  old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
+-fi
+-
+-case $host_os in
+-  darwin*)
+-    lock_old_archive_extraction=yes ;;
+-  *)
+-    lock_old_archive_extraction=no ;;
+-esac
+-_LT_DECL([], [old_postinstall_cmds], [2])
+-_LT_DECL([], [old_postuninstall_cmds], [2])
+-_LT_TAGDECL([], [old_archive_cmds], [2],
+-    [Commands used to build an old-style archive])
+-_LT_DECL([], [lock_old_archive_extraction], [0],
+-    [Whether to use a lock for old archive extraction])
+-])# _LT_CMD_OLD_ARCHIVE
+-
+-
+-# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
+-#		[OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE])
+-# ----------------------------------------------------------------
+-# Check whether the given compiler option works
+-AC_DEFUN([_LT_COMPILER_OPTION],
+-[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+-m4_require([_LT_DECL_SED])dnl
+-AC_CACHE_CHECK([$1], [$2],
+-  [$2=no
+-   m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4])
+-   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+-   lt_compiler_flag="$3"  ## exclude from sc_useless_quotes_in_assignment
+-   # Insert the option either (1) after the last *FLAGS variable, or
+-   # (2) before a word containing "conftest.", or (3) at the end.
+-   # Note that $ac_compile itself does not contain backslashes and begins
+-   # with a dollar sign (not a hyphen), so the echo should work correctly.
+-   # The option is referenced via a variable to avoid confusing sed.
+-   lt_compile=`echo "$ac_compile" | $SED \
+-   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+-   -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
+-   -e 's:$: $lt_compiler_flag:'`
+-   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+-   (eval "$lt_compile" 2>conftest.err)
+-   ac_status=$?
+-   cat conftest.err >&AS_MESSAGE_LOG_FD
+-   echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+-   if (exit $ac_status) && test -s "$ac_outfile"; then
+-     # The compiler can only warn and ignore the option if not recognized
+-     # So say no if there are warnings other than the usual output.
+-     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp
+-     $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+-     if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then
+-       $2=yes
+-     fi
+-   fi
+-   $RM conftest*
+-])
+-
+-if test yes = "[$]$2"; then
+-    m4_if([$5], , :, [$5])
+-else
+-    m4_if([$6], , :, [$6])
+-fi
+-])# _LT_COMPILER_OPTION
+-
+-# Old name:
+-AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], [])
+-
+-
+-# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS,
+-#                  [ACTION-SUCCESS], [ACTION-FAILURE])
+-# ----------------------------------------------------
+-# Check whether the given linker option works
+-AC_DEFUN([_LT_LINKER_OPTION],
+-[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+-m4_require([_LT_DECL_SED])dnl
+-AC_CACHE_CHECK([$1], [$2],
+-  [$2=no
+-   save_LDFLAGS=$LDFLAGS
+-   LDFLAGS="$LDFLAGS $3"
+-   echo "$lt_simple_link_test_code" > conftest.$ac_ext
+-   if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+-     # The linker can only warn and ignore the option if not recognized
+-     # So say no if there are warnings
+-     if test -s conftest.err; then
+-       # Append any errors to the config.log.
+-       cat conftest.err 1>&AS_MESSAGE_LOG_FD
+-       $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp
+-       $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2
+-       if diff conftest.exp conftest.er2 >/dev/null; then
+-         $2=yes
+-       fi
+-     else
+-       $2=yes
+-     fi
+-   fi
+-   $RM -r conftest*
+-   LDFLAGS=$save_LDFLAGS
+-])
+-
+-if test yes = "[$]$2"; then
+-    m4_if([$4], , :, [$4])
+-else
+-    m4_if([$5], , :, [$5])
+-fi
+-])# _LT_LINKER_OPTION
+-
+-# Old name:
+-AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], [])
+-
+-
+-# LT_CMD_MAX_LEN
+-#---------------
+-AC_DEFUN([LT_CMD_MAX_LEN],
+-[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+-# find the maximum length of command line arguments
+-AC_MSG_CHECKING([the maximum length of command line arguments])
+-AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl
+-  i=0
+-  teststring=ABCD
+-
+-  case $build_os in
+-  msdosdjgpp*)
+-    # On DJGPP, this test can blow up pretty badly due to problems in libc
+-    # (any single argument exceeding 2000 bytes causes a buffer overrun
+-    # during glob expansion).  Even if it were fixed, the result of this
+-    # check would be larger than it should be.
+-    lt_cv_sys_max_cmd_len=12288;    # 12K is about right
+-    ;;
+-
+-  gnu*)
+-    # Under GNU Hurd, this test is not required because there is
+-    # no limit to the length of command line arguments.
+-    # Libtool will interpret -1 as no limit whatsoever
+-    lt_cv_sys_max_cmd_len=-1;
+-    ;;
+-
+-  cygwin* | mingw* | cegcc*)
+-    # On Win9x/ME, this test blows up -- it succeeds, but takes
+-    # about 5 minutes as the teststring grows exponentially.
+-    # Worse, since 9x/ME are not pre-emptively multitasking,
+-    # you end up with a "frozen" computer, even though with patience
+-    # the test eventually succeeds (with a max line length of 256k).
+-    # Instead, let's just punt: use the minimum linelength reported by
+-    # all of the supported platforms: 8192 (on NT/2K/XP).
+-    lt_cv_sys_max_cmd_len=8192;
+-    ;;
+-
+-  mint*)
+-    # On MiNT this can take a long time and run out of memory.
+-    lt_cv_sys_max_cmd_len=8192;
+-    ;;
+-
+-  amigaos*)
+-    # On AmigaOS with pdksh, this test takes hours, literally.
+-    # So we just punt and use a minimum line length of 8192.
+-    lt_cv_sys_max_cmd_len=8192;
+-    ;;
+-
+-  bitrig* | darwin* | dragonfly* | freebsd* | midnightbsd* | netbsd* | openbsd*)
+-    # This has been around since 386BSD, at least.  Likely further.
+-    if test -x /sbin/sysctl; then
+-      lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
+-    elif test -x /usr/sbin/sysctl; then
+-      lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax`
+-    else
+-      lt_cv_sys_max_cmd_len=65536	# usable default for all BSDs
+-    fi
+-    # And add a safety zone
+-    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+-    lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+-    ;;
+-
+-  interix*)
+-    # We know the value 262144 and hardcode it with a safety zone (like BSD)
+-    lt_cv_sys_max_cmd_len=196608
+-    ;;
+-
+-  os2*)
+-    # The test takes a long time on OS/2.
+-    lt_cv_sys_max_cmd_len=8192
+-    ;;
+-
+-  osf*)
+-    # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
+-    # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
+-    # nice to cause kernel panics so lets avoid the loop below.
+-    # First set a reasonable default.
+-    lt_cv_sys_max_cmd_len=16384
+-    #
+-    if test -x /sbin/sysconfig; then
+-      case `/sbin/sysconfig -q proc exec_disable_arg_limit` in
+-        *1*) lt_cv_sys_max_cmd_len=-1 ;;
+-      esac
+-    fi
+-    ;;
+-  sco3.2v5*)
+-    lt_cv_sys_max_cmd_len=102400
+-    ;;
+-  sysv5* | sco5v6* | sysv4.2uw2*)
+-    kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
+-    if test -n "$kargmax"; then
+-      lt_cv_sys_max_cmd_len=`echo $kargmax | $SED 's/.*[[	 ]]//'`
+-    else
+-      lt_cv_sys_max_cmd_len=32768
+-    fi
+-    ;;
+-  *)
+-    lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
+-    if test -n "$lt_cv_sys_max_cmd_len" && \
+-       test undefined != "$lt_cv_sys_max_cmd_len"; then
+-      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+-      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+-    else
+-      # Make teststring a little bigger before we do anything with it.
+-      # a 1K string should be a reasonable start.
+-      for i in 1 2 3 4 5 6 7 8; do
+-        teststring=$teststring$teststring
+-      done
+-      SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
+-      # If test is not a shell built-in, we'll probably end up computing a
+-      # maximum length that is only half of the actual maximum length, but
+-      # we can't tell.
+-      while { test X`env echo "$teststring$teststring" 2>/dev/null` \
+-	         = "X$teststring$teststring"; } >/dev/null 2>&1 &&
+-	      test 17 != "$i" # 1/2 MB should be enough
+-      do
+-        i=`expr $i + 1`
+-        teststring=$teststring$teststring
+-      done
+-      # Only check the string length outside the loop.
+-      lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1`
+-      teststring=
+-      # Add a significant safety factor because C++ compilers can tack on
+-      # massive amounts of additional arguments before passing them to the
+-      # linker.  It appears as though 1/2 is a usable value.
+-      lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2`
+-    fi
+-    ;;
+-  esac
+-])
+-if test -n "$lt_cv_sys_max_cmd_len"; then
+-  AC_MSG_RESULT($lt_cv_sys_max_cmd_len)
+-else
+-  AC_MSG_RESULT(none)
+-fi
+-max_cmd_len=$lt_cv_sys_max_cmd_len
+-_LT_DECL([], [max_cmd_len], [0],
+-    [What is the maximum length of a command?])
+-])# LT_CMD_MAX_LEN
+-
+-# Old name:
+-AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], [])
+-
+-
+-# _LT_HEADER_DLFCN
+-# ----------------
+-m4_defun([_LT_HEADER_DLFCN],
+-[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl
+-])# _LT_HEADER_DLFCN
+-
+-
+-# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE,
+-#                      ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING)
+-# ----------------------------------------------------------------
+-m4_defun([_LT_TRY_DLOPEN_SELF],
+-[m4_require([_LT_HEADER_DLFCN])dnl
+-if test yes = "$cross_compiling"; then :
+-  [$4]
+-else
+-  lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+-  lt_status=$lt_dlunknown
+-  cat > conftest.$ac_ext <<_LT_EOF
+-[#line $LINENO "configure"
+-#include "confdefs.h"
+-
+-#if HAVE_DLFCN_H
+-#include 
+-#endif
+-
+-#include 
+-
+-#ifdef RTLD_GLOBAL
+-#  define LT_DLGLOBAL		RTLD_GLOBAL
+-#else
+-#  ifdef DL_GLOBAL
+-#    define LT_DLGLOBAL		DL_GLOBAL
+-#  else
+-#    define LT_DLGLOBAL		0
+-#  endif
+-#endif
+-
+-/* We may have to define LT_DLLAZY_OR_NOW in the command line if we
+-   find out it does not work in some platform. */
+-#ifndef LT_DLLAZY_OR_NOW
+-#  ifdef RTLD_LAZY
+-#    define LT_DLLAZY_OR_NOW		RTLD_LAZY
+-#  else
+-#    ifdef DL_LAZY
+-#      define LT_DLLAZY_OR_NOW		DL_LAZY
+-#    else
+-#      ifdef RTLD_NOW
+-#        define LT_DLLAZY_OR_NOW	RTLD_NOW
+-#      else
+-#        ifdef DL_NOW
+-#          define LT_DLLAZY_OR_NOW	DL_NOW
+-#        else
+-#          define LT_DLLAZY_OR_NOW	0
+-#        endif
+-#      endif
+-#    endif
+-#  endif
+-#endif
+-
+-/* When -fvisibility=hidden is used, assume the code has been annotated
+-   correspondingly for the symbols needed.  */
+-#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+-int fnord () __attribute__((visibility("default")));
+-#endif
+-
+-int fnord () { return 42; }
+-int main ()
+-{
+-  void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+-  int status = $lt_dlunknown;
+-
+-  if (self)
+-    {
+-      if (dlsym (self,"fnord"))       status = $lt_dlno_uscore;
+-      else
+-        {
+-	  if (dlsym( self,"_fnord"))  status = $lt_dlneed_uscore;
+-          else puts (dlerror ());
+-	}
+-      /* dlclose (self); */
+-    }
+-  else
+-    puts (dlerror ());
+-
+-  return status;
+-}]
+-_LT_EOF
+-  if AC_TRY_EVAL(ac_link) && test -s "conftest$ac_exeext" 2>/dev/null; then
+-    (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null
+-    lt_status=$?
+-    case x$lt_status in
+-      x$lt_dlno_uscore) $1 ;;
+-      x$lt_dlneed_uscore) $2 ;;
+-      x$lt_dlunknown|x*) $3 ;;
+-    esac
+-  else :
+-    # compilation failed
+-    $3
+-  fi
+-fi
+-rm -fr conftest*
+-])# _LT_TRY_DLOPEN_SELF
+-
+-
+-# LT_SYS_DLOPEN_SELF
+-# ------------------
+-AC_DEFUN([LT_SYS_DLOPEN_SELF],
+-[m4_require([_LT_HEADER_DLFCN])dnl
+-if test yes != "$enable_dlopen"; then
+-  enable_dlopen=unknown
+-  enable_dlopen_self=unknown
+-  enable_dlopen_self_static=unknown
+-else
+-  lt_cv_dlopen=no
+-  lt_cv_dlopen_libs=
+-
+-  case $host_os in
+-  beos*)
+-    lt_cv_dlopen=load_add_on
+-    lt_cv_dlopen_libs=
+-    lt_cv_dlopen_self=yes
+-    ;;
+-
+-  mingw* | pw32* | cegcc*)
+-    lt_cv_dlopen=LoadLibrary
+-    lt_cv_dlopen_libs=
+-    ;;
+-
+-  cygwin*)
+-    lt_cv_dlopen=dlopen
+-    lt_cv_dlopen_libs=
+-    ;;
+-
+-  darwin*)
+-    # if libdl is installed we need to link against it
+-    AC_CHECK_LIB([dl], [dlopen],
+-		[lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],[
+-    lt_cv_dlopen=dyld
+-    lt_cv_dlopen_libs=
+-    lt_cv_dlopen_self=yes
+-    ])
+-    ;;
+-
+-  tpf*)
+-    # Don't try to run any link tests for TPF.  We know it's impossible
+-    # because TPF is a cross-compiler, and we know how we open DSOs.
+-    lt_cv_dlopen=dlopen
+-    lt_cv_dlopen_libs=
+-    lt_cv_dlopen_self=no
+-    ;;
+-
+-  *)
+-    AC_CHECK_FUNC([shl_load],
+-	  [lt_cv_dlopen=shl_load],
+-      [AC_CHECK_LIB([dld], [shl_load],
+-	    [lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld],
+-	[AC_CHECK_FUNC([dlopen],
+-	      [lt_cv_dlopen=dlopen],
+-	  [AC_CHECK_LIB([dl], [dlopen],
+-		[lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl],
+-	    [AC_CHECK_LIB([svld], [dlopen],
+-		  [lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld],
+-	      [AC_CHECK_LIB([dld], [dld_link],
+-		    [lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld])
+-	      ])
+-	    ])
+-	  ])
+-	])
+-      ])
+-    ;;
+-  esac
+-
+-  if test no = "$lt_cv_dlopen"; then
+-    enable_dlopen=no
+-  else
+-    enable_dlopen=yes
+-  fi
+-
+-  case $lt_cv_dlopen in
+-  dlopen)
+-    save_CPPFLAGS=$CPPFLAGS
+-    test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+-
+-    save_LDFLAGS=$LDFLAGS
+-    wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+-
+-    save_LIBS=$LIBS
+-    LIBS="$lt_cv_dlopen_libs $LIBS"
+-
+-    AC_CACHE_CHECK([whether a program can dlopen itself],
+-	  lt_cv_dlopen_self, [dnl
+-	  _LT_TRY_DLOPEN_SELF(
+-	    lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes,
+-	    lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross)
+-    ])
+-
+-    if test yes = "$lt_cv_dlopen_self"; then
+-      wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
+-      AC_CACHE_CHECK([whether a statically linked program can dlopen itself],
+-	  lt_cv_dlopen_self_static, [dnl
+-	  _LT_TRY_DLOPEN_SELF(
+-	    lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes,
+-	    lt_cv_dlopen_self_static=no,  lt_cv_dlopen_self_static=cross)
+-      ])
+-    fi
+-
+-    CPPFLAGS=$save_CPPFLAGS
+-    LDFLAGS=$save_LDFLAGS
+-    LIBS=$save_LIBS
+-    ;;
+-  esac
+-
+-  case $lt_cv_dlopen_self in
+-  yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;;
+-  *) enable_dlopen_self=unknown ;;
+-  esac
+-
+-  case $lt_cv_dlopen_self_static in
+-  yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;;
+-  *) enable_dlopen_self_static=unknown ;;
+-  esac
+-fi
+-_LT_DECL([dlopen_support], [enable_dlopen], [0],
+-	 [Whether dlopen is supported])
+-_LT_DECL([dlopen_self], [enable_dlopen_self], [0],
+-	 [Whether dlopen of programs is supported])
+-_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0],
+-	 [Whether dlopen of statically linked programs is supported])
+-])# LT_SYS_DLOPEN_SELF
+-
+-# Old name:
+-AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], [])
+-
+-
+-# _LT_COMPILER_C_O([TAGNAME])
+-# ---------------------------
+-# Check to see if options -c and -o are simultaneously supported by compiler.
+-# This macro does not hard code the compiler like AC_PROG_CC_C_O.
+-m4_defun([_LT_COMPILER_C_O],
+-[m4_require([_LT_DECL_SED])dnl
+-m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+-m4_require([_LT_TAG_COMPILER])dnl
+-AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext],
+-  [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)],
+-  [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no
+-   $RM -r conftest 2>/dev/null
+-   mkdir conftest
+-   cd conftest
+-   mkdir out
+-   echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+-
+-   lt_compiler_flag="-o out/conftest2.$ac_objext"
+-   # Insert the option either (1) after the last *FLAGS variable, or
+-   # (2) before a word containing "conftest.", or (3) at the end.
+-   # Note that $ac_compile itself does not contain backslashes and begins
+-   # with a dollar sign (not a hyphen), so the echo should work correctly.
+-   lt_compile=`echo "$ac_compile" | $SED \
+-   -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \
+-   -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \
+-   -e 's:$: $lt_compiler_flag:'`
+-   (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD)
+-   (eval "$lt_compile" 2>out/conftest.err)
+-   ac_status=$?
+-   cat out/conftest.err >&AS_MESSAGE_LOG_FD
+-   echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD
+-   if (exit $ac_status) && test -s out/conftest2.$ac_objext
+-   then
+-     # The compiler can only warn and ignore the option if not recognized
+-     # So say no if there are warnings
+-     $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp
+-     $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2
+-     if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then
+-       _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
+-     fi
+-   fi
+-   chmod u+w . 2>&AS_MESSAGE_LOG_FD
+-   $RM conftest*
+-   # SGI C++ compiler will create directory out/ii_files/ for
+-   # template instantiation
+-   test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files
+-   $RM out/* && rmdir out
+-   cd ..
+-   $RM -r conftest
+-   $RM conftest*
+-])
+-_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1],
+-	[Does compiler simultaneously support -c and -o options?])
+-])# _LT_COMPILER_C_O
+-
+-
+-# _LT_COMPILER_FILE_LOCKS([TAGNAME])
+-# ----------------------------------
+-# Check to see if we can do hard links to lock some files if needed
+-m4_defun([_LT_COMPILER_FILE_LOCKS],
+-[m4_require([_LT_ENABLE_LOCK])dnl
+-m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+-_LT_COMPILER_C_O([$1])
+-
+-hard_links=nottested
+-if test no = "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" && test no != "$need_locks"; then
+-  # do not overwrite the value of need_locks provided by the user
+-  AC_MSG_CHECKING([if we can lock with hard links])
+-  hard_links=yes
+-  $RM conftest*
+-  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+-  touch conftest.a
+-  ln conftest.a conftest.b 2>&5 || hard_links=no
+-  ln conftest.a conftest.b 2>/dev/null && hard_links=no
+-  AC_MSG_RESULT([$hard_links])
+-  if test no = "$hard_links"; then
+-    AC_MSG_WARN(['$CC' does not support '-c -o', so 'make -j' may be unsafe])
+-    need_locks=warn
+-  fi
+-else
+-  need_locks=no
+-fi
+-_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?])
+-])# _LT_COMPILER_FILE_LOCKS
+-
+-
+-# _LT_CHECK_OBJDIR
+-# ----------------
+-m4_defun([_LT_CHECK_OBJDIR],
+-[AC_CACHE_CHECK([for objdir], [lt_cv_objdir],
+-[rm -f .libs 2>/dev/null
+-mkdir .libs 2>/dev/null
+-if test -d .libs; then
+-  lt_cv_objdir=.libs
+-else
+-  # MS-DOS does not allow filenames that begin with a dot.
+-  lt_cv_objdir=_libs
+-fi
+-rmdir .libs 2>/dev/null])
+-objdir=$lt_cv_objdir
+-_LT_DECL([], [objdir], [0],
+-         [The name of the directory that contains temporary libtool files])dnl
+-m4_pattern_allow([LT_OBJDIR])dnl
+-AC_DEFINE_UNQUOTED([LT_OBJDIR], "$lt_cv_objdir/",
+-  [Define to the sub-directory where libtool stores uninstalled libraries.])
+-])# _LT_CHECK_OBJDIR
+-
+-
+-# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME])
+-# --------------------------------------
+-# Check hardcoding attributes.
+-m4_defun([_LT_LINKER_HARDCODE_LIBPATH],
+-[AC_MSG_CHECKING([how to hardcode library paths into programs])
+-_LT_TAGVAR(hardcode_action, $1)=
+-if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" ||
+-   test -n "$_LT_TAGVAR(runpath_var, $1)" ||
+-   test yes = "$_LT_TAGVAR(hardcode_automatic, $1)"; then
+-
+-  # We can hardcode non-existent directories.
+-  if test no != "$_LT_TAGVAR(hardcode_direct, $1)" &&
+-     # If the only mechanism to avoid hardcoding is shlibpath_var, we
+-     # have to relink, otherwise we might link with an installed library
+-     # when we should be linking with a yet-to-be-installed one
+-     ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" &&
+-     test no != "$_LT_TAGVAR(hardcode_minus_L, $1)"; then
+-    # Linking always hardcodes the temporary library directory.
+-    _LT_TAGVAR(hardcode_action, $1)=relink
+-  else
+-    # We can link without hardcoding, and we can hardcode nonexisting dirs.
+-    _LT_TAGVAR(hardcode_action, $1)=immediate
+-  fi
+-else
+-  # We cannot hardcode anything, or else we can only hardcode existing
+-  # directories.
+-  _LT_TAGVAR(hardcode_action, $1)=unsupported
+-fi
+-AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)])
+-
+-if test relink = "$_LT_TAGVAR(hardcode_action, $1)" ||
+-   test yes = "$_LT_TAGVAR(inherit_rpath, $1)"; then
+-  # Fast installation is not supported
+-  enable_fast_install=no
+-elif test yes = "$shlibpath_overrides_runpath" ||
+-     test no = "$enable_shared"; then
+-  # Fast installation is not necessary
+-  enable_fast_install=needless
+-fi
+-_LT_TAGDECL([], [hardcode_action], [0],
+-    [How to hardcode a shared library path into an executable])
+-])# _LT_LINKER_HARDCODE_LIBPATH
+-
+-
+-# _LT_CMD_STRIPLIB
+-# ----------------
+-m4_defun([_LT_CMD_STRIPLIB],
+-[m4_require([_LT_DECL_EGREP])
+-striplib=
+-old_striplib=
+-AC_MSG_CHECKING([whether stripping libraries is possible])
+-if test -z "$STRIP"; then
+-  AC_MSG_RESULT([no])
+-else
+-  if $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
+-    old_striplib="$STRIP --strip-debug"
+-    striplib="$STRIP --strip-unneeded"
+-    AC_MSG_RESULT([yes])
+-  else
+-    case $host_os in
+-    darwin*)
+-      # FIXME - insert some real tests, host_os isn't really good enough
+-      striplib="$STRIP -x"
+-      old_striplib="$STRIP -S"
+-      AC_MSG_RESULT([yes])
+-      ;;
+-    freebsd*)
+-      if $STRIP -V 2>&1 | $GREP "elftoolchain" >/dev/null; then
+-        old_striplib="$STRIP --strip-debug"
+-        striplib="$STRIP --strip-unneeded"
+-        AC_MSG_RESULT([yes])
+-      else
+-        AC_MSG_RESULT([no])
+-      fi
+-      ;;
+-    *)
+-      AC_MSG_RESULT([no])
+-      ;;
+-    esac
+-  fi
+-fi
+-_LT_DECL([], [old_striplib], [1], [Commands to strip libraries])
+-_LT_DECL([], [striplib], [1])
+-])# _LT_CMD_STRIPLIB
+-
+-
+-# _LT_PREPARE_MUNGE_PATH_LIST
+-# ---------------------------
+-# Make sure func_munge_path_list() is defined correctly.
+-m4_defun([_LT_PREPARE_MUNGE_PATH_LIST],
+-[[# func_munge_path_list VARIABLE PATH
+-# -----------------------------------
+-# VARIABLE is name of variable containing _space_ separated list of
+-# directories to be munged by the contents of PATH, which is string
+-# having a format:
+-# "DIR[:DIR]:"
+-#       string "DIR[ DIR]" will be prepended to VARIABLE
+-# ":DIR[:DIR]"
+-#       string "DIR[ DIR]" will be appended to VARIABLE
+-# "DIRP[:DIRP]::[DIRA:]DIRA"
+-#       string "DIRP[ DIRP]" will be prepended to VARIABLE and string
+-#       "DIRA[ DIRA]" will be appended to VARIABLE
+-# "DIR[:DIR]"
+-#       VARIABLE will be replaced by "DIR[ DIR]"
+-func_munge_path_list ()
+-{
+-    case x@S|@2 in
+-    x)
+-        ;;
+-    *:)
+-        eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'` \@S|@@S|@1\"
+-        ;;
+-    x:*)
+-        eval @S|@1=\"\@S|@@S|@1 `$ECHO @S|@2 | $SED 's/:/ /g'`\"
+-        ;;
+-    *::*)
+-        eval @S|@1=\"\@S|@@S|@1\ `$ECHO @S|@2 | $SED -e 's/.*:://' -e 's/:/ /g'`\"
+-        eval @S|@1=\"`$ECHO @S|@2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \@S|@@S|@1\"
+-        ;;
+-    *)
+-        eval @S|@1=\"`$ECHO @S|@2 | $SED 's/:/ /g'`\"
+-        ;;
+-    esac
+-}
+-]])# _LT_PREPARE_PATH_LIST
+-
+-
+-# _LT_SYS_DYNAMIC_LINKER([TAG])
+-# -----------------------------
+-# PORTME Fill in your ld.so characteristics
+-m4_defun([_LT_SYS_DYNAMIC_LINKER],
+-[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+-m4_require([_LT_DECL_EGREP])dnl
+-m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+-m4_require([_LT_DECL_OBJDUMP])dnl
+-m4_require([_LT_DECL_SED])dnl
+-m4_require([_LT_CHECK_SHELL_FEATURES])dnl
+-m4_require([_LT_PREPARE_MUNGE_PATH_LIST])dnl
+-AC_MSG_CHECKING([dynamic linker characteristics])
+-m4_if([$1],
+-	[], [
+-if test yes = "$GCC"; then
+-  case $host_os in
+-    darwin*) lt_awk_arg='/^libraries:/,/LR/' ;;
+-    *) lt_awk_arg='/^libraries:/' ;;
+-  esac
+-  case $host_os in
+-    mingw* | cegcc*) lt_sed_strip_eq='s|=\([[A-Za-z]]:\)|\1|g' ;;
+-    *) lt_sed_strip_eq='s|=/|/|g' ;;
+-  esac
+-  lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
+-  case $lt_search_path_spec in
+-  *\;*)
+-    # if the path contains ";" then we assume it to be the separator
+-    # otherwise default to the standard path separator (i.e. ":") - it is
+-    # assumed that no part of a normal pathname contains ";" but that should
+-    # okay in the real world where ";" in dirpaths is itself problematic.
+-    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'`
+-    ;;
+-  *)
+-    lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"`
+-    ;;
+-  esac
+-  # Ok, now we have the path, separated by spaces, we can step through it
+-  # and add multilib dir if necessary...
+-  lt_tmp_lt_search_path_spec=
+-  lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
+-  # ...but if some path component already ends with the multilib dir we assume
+-  # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer).
+-  case "$lt_multi_os_dir; $lt_search_path_spec " in
+-  "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*)
+-    lt_multi_os_dir=
+-    ;;
+-  esac
+-  for lt_sys_path in $lt_search_path_spec; do
+-    if test -d "$lt_sys_path$lt_multi_os_dir"; then
+-      lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir"
+-    elif test -n "$lt_multi_os_dir"; then
+-      test -d "$lt_sys_path" && \
+-	lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
+-    fi
+-  done
+-  lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
+-BEGIN {RS = " "; FS = "/|\n";} {
+-  lt_foo = "";
+-  lt_count = 0;
+-  for (lt_i = NF; lt_i > 0; lt_i--) {
+-    if ($lt_i != "" && $lt_i != ".") {
+-      if ($lt_i == "..") {
+-        lt_count++;
+-      } else {
+-        if (lt_count == 0) {
+-          lt_foo = "/" $lt_i lt_foo;
+-        } else {
+-          lt_count--;
+-        }
+-      }
+-    }
+-  }
+-  if (lt_foo != "") { lt_freq[[lt_foo]]++; }
+-  if (lt_freq[[lt_foo]] == 1) { print lt_foo; }
+-}'`
+-  # AWK program above erroneously prepends '/' to C:/dos/paths
+-  # for these hosts.
+-  case $host_os in
+-    mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
+-      $SED 's|/\([[A-Za-z]]:\)|\1|g'` ;;
+-  esac
+-  sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
+-else
+-  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+-fi])
+-library_names_spec=
+-libname_spec='lib$name'
+-soname_spec=
+-shrext_cmds=.so
+-postinstall_cmds=
+-postuninstall_cmds=
+-finish_cmds=
+-finish_eval=
+-shlibpath_var=
+-shlibpath_overrides_runpath=unknown
+-version_type=none
+-dynamic_linker="$host_os ld.so"
+-sys_lib_dlsearch_path_spec="/lib /usr/lib"
+-need_lib_prefix=unknown
+-hardcode_into_libs=no
+-
+-# when you set need_version to no, make sure it does not cause -set_version
+-# flags to be left without arguments
+-need_version=unknown
+-
+-AC_ARG_VAR([LT_SYS_LIBRARY_PATH],
+-[User-defined run-time library search path.])
+-
+-case $host_os in
+-aix3*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname.a'
+-  shlibpath_var=LIBPATH
+-
+-  # AIX 3 has no versioning support, so we append a major version to the name.
+-  soname_spec='$libname$release$shared_ext$major'
+-  ;;
+-
+-aix[[4-9]]*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  need_lib_prefix=no
+-  need_version=no
+-  hardcode_into_libs=yes
+-  if test ia64 = "$host_cpu"; then
+-    # AIX 5 supports IA64
+-    library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext'
+-    shlibpath_var=LD_LIBRARY_PATH
+-  else
+-    # With GCC up to 2.95.x, collect2 would create an import file
+-    # for dependence libraries.  The import file would start with
+-    # the line '#! .'.  This would cause the generated library to
+-    # depend on '.', always an invalid library.  This was fixed in
+-    # development snapshots of GCC prior to 3.0.
+-    case $host_os in
+-      aix4 | aix4.[[01]] | aix4.[[01]].*)
+-      if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+-	   echo ' yes '
+-	   echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then
+-	:
+-      else
+-	can_build_shared=no
+-      fi
+-      ;;
+-    esac
+-    # Using Import Files as archive members, it is possible to support
+-    # filename-based versioning of shared library archives on AIX. While
+-    # this would work for both with and without runtime linking, it will
+-    # prevent static linking of such archives. So we do filename-based
+-    # shared library versioning with .so extension only, which is used
+-    # when both runtime linking and shared linking is enabled.
+-    # Unfortunately, runtime linking may impact performance, so we do
+-    # not want this to be the default eventually. Also, we use the
+-    # versioned .so libs for executables only if there is the -brtl
+-    # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only.
+-    # To allow for filename-based versioning support, we need to create
+-    # libNAME.so.V as an archive file, containing:
+-    # *) an Import File, referring to the versioned filename of the
+-    #    archive as well as the shared archive member, telling the
+-    #    bitwidth (32 or 64) of that shared object, and providing the
+-    #    list of exported symbols of that shared object, eventually
+-    #    decorated with the 'weak' keyword
+-    # *) the shared object with the F_LOADONLY flag set, to really avoid
+-    #    it being seen by the linker.
+-    # At run time we better use the real file rather than another symlink,
+-    # but for link time we create the symlink libNAME.so -> libNAME.so.V
+-
+-    case $with_aix_soname,$aix_use_runtimelinking in
+-    # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct
+-    # soname into executable. Probably we can add versioning support to
+-    # collect2, so additional links can be useful in future.
+-    aix,yes) # traditional libtool
+-      dynamic_linker='AIX unversionable lib.so'
+-      # If using run time linking (on AIX 4.2 or later) use lib.so
+-      # instead of lib.a to let people know that these are not
+-      # typical AIX shared libraries.
+-      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-      ;;
+-    aix,no) # traditional AIX only
+-      dynamic_linker='AIX lib.a[(]lib.so.V[)]'
+-      # We preserve .a as extension for shared libraries through AIX4.2
+-      # and later when we are not doing run time linking.
+-      library_names_spec='$libname$release.a $libname.a'
+-      soname_spec='$libname$release$shared_ext$major'
+-      ;;
+-    svr4,*) # full svr4 only
+-      dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)]"
+-      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+-      # We do not specify a path in Import Files, so LIBPATH fires.
+-      shlibpath_overrides_runpath=yes
+-      ;;
+-    *,yes) # both, prefer svr4
+-      dynamic_linker="AIX lib.so.V[(]$shared_archive_member_spec.o[)], lib.a[(]lib.so.V[)]"
+-      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+-      # unpreferred sharedlib libNAME.a needs extra handling
+-      postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"'
+-      postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"'
+-      # We do not specify a path in Import Files, so LIBPATH fires.
+-      shlibpath_overrides_runpath=yes
+-      ;;
+-    *,no) # both, prefer aix
+-      dynamic_linker="AIX lib.a[(]lib.so.V[)], lib.so.V[(]$shared_archive_member_spec.o[)]"
+-      library_names_spec='$libname$release.a $libname.a'
+-      soname_spec='$libname$release$shared_ext$major'
+-      # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling
+-      postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)'
+-      postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"'
+-      ;;
+-    esac
+-    shlibpath_var=LIBPATH
+-  fi
+-  ;;
+-
+-amigaos*)
+-  case $host_cpu in
+-  powerpc)
+-    # Since July 2007 AmigaOS4 officially supports .so libraries.
+-    # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    ;;
+-  m68k)
+-    library_names_spec='$libname.ixlibrary $libname.a'
+-    # Create ${libname}_ixlibrary.a entries in /sys/libs.
+-    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+-    ;;
+-  esac
+-  ;;
+-
+-beos*)
+-  library_names_spec='$libname$shared_ext'
+-  dynamic_linker="$host_os ld.so"
+-  shlibpath_var=LIBRARY_PATH
+-  ;;
+-
+-bsdi[[45]]*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
+-  finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+-  sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib"
+-  # the default ld.so.conf also contains /usr/contrib/lib and
+-  # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow
+-  # libtool to hard-code these into programs
+-  ;;
+-
+-cygwin* | mingw* | pw32* | cegcc*)
+-  version_type=windows
+-  shrext_cmds=.dll
+-  need_version=no
+-  need_lib_prefix=no
+-
+-  case $GCC,$cc_basename in
+-  yes,*)
+-    # gcc
+-    library_names_spec='$libname.dll.a'
+-    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+-    postinstall_cmds='base_file=`basename \$file`~
+-      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
+-      dldir=$destdir/`dirname \$dlpath`~
+-      test -d \$dldir || mkdir -p \$dldir~
+-      $install_prog $dir/$dlname \$dldir/$dlname~
+-      chmod a+x \$dldir/$dlname~
+-      if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+-        eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+-      fi'
+-    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+-      dlpath=$dir/\$dldll~
+-       $RM \$dlpath'
+-    shlibpath_overrides_runpath=yes
+-
+-    case $host_os in
+-    cygwin*)
+-      # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+-      soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
+-m4_if([$1], [],[
+-      sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"])
+-      ;;
+-    mingw* | cegcc*)
+-      # MinGW DLLs use traditional 'lib' prefix
+-      soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
+-      ;;
+-    pw32*)
+-      # pw32 DLLs use 'pw' prefix rather than 'lib'
+-      library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
+-      ;;
+-    esac
+-    dynamic_linker='Win32 ld.exe'
+-    ;;
+-
+-  *,cl* | *,icl*)
+-    # Native MSVC or ICC
+-    libname_spec='$name'
+-    soname_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext'
+-    library_names_spec='$libname.dll.lib'
+-
+-    case $build_os in
+-    mingw*)
+-      sys_lib_search_path_spec=
+-      lt_save_ifs=$IFS
+-      IFS=';'
+-      for lt_path in $LIB
+-      do
+-        IFS=$lt_save_ifs
+-        # Let DOS variable expansion print the short 8.3 style file name.
+-        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+-        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+-      done
+-      IFS=$lt_save_ifs
+-      # Convert to MSYS style.
+-      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'`
+-      ;;
+-    cygwin*)
+-      # Convert to unix form, then to dos form, then back to unix form
+-      # but this time dos style (no spaces!) so that the unix form looks
+-      # like /cygdrive/c/PROGRA~1:/cygdr...
+-      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+-      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+-      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+-      ;;
+-    *)
+-      sys_lib_search_path_spec=$LIB
+-      if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then
+-        # It is most probably a Windows format PATH.
+-        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+-      else
+-        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+-      fi
+-      # FIXME: find the short name or the path components, as spaces are
+-      # common. (e.g. "Program Files" -> "PROGRA~1")
+-      ;;
+-    esac
+-
+-    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+-    postinstall_cmds='base_file=`basename \$file`~
+-      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
+-      dldir=$destdir/`dirname \$dlpath`~
+-      test -d \$dldir || mkdir -p \$dldir~
+-      $install_prog $dir/$dlname \$dldir/$dlname'
+-    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+-      dlpath=$dir/\$dldll~
+-       $RM \$dlpath'
+-    shlibpath_overrides_runpath=yes
+-    dynamic_linker='Win32 link.exe'
+-    ;;
+-
+-  *)
+-    # Assume MSVC and ICC wrapper
+-    library_names_spec='$libname`echo $release | $SED -e 's/[[.]]/-/g'`$versuffix$shared_ext $libname.lib'
+-    dynamic_linker='Win32 ld.exe'
+-    ;;
+-  esac
+-  # FIXME: first we should search . and the directory the executable is in
+-  shlibpath_var=PATH
+-  ;;
+-
+-darwin* | rhapsody*)
+-  dynamic_linker="$host_os dyld"
+-  version_type=darwin
+-  need_lib_prefix=no
+-  need_version=no
+-  library_names_spec='$libname$release$major$shared_ext $libname$shared_ext'
+-  soname_spec='$libname$release$major$shared_ext'
+-  shlibpath_overrides_runpath=yes
+-  shlibpath_var=DYLD_LIBRARY_PATH
+-  shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+-m4_if([$1], [],[
+-  sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"])
+-  sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib'
+-  ;;
+-
+-dgux*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  need_lib_prefix=no
+-  need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  ;;
+-
+-freebsd* | dragonfly* | midnightbsd*)
+-  # DragonFly does not have aout.  When/if they implement a new
+-  # versioning mechanism, adjust this.
+-  if test -x /usr/bin/objformat; then
+-    objformat=`/usr/bin/objformat`
+-  else
+-    case $host_os in
+-    freebsd[[23]].*) objformat=aout ;;
+-    *) objformat=elf ;;
+-    esac
+-  fi
+-  version_type=freebsd-$objformat
+-  case $version_type in
+-    freebsd-elf*)
+-      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-      soname_spec='$libname$release$shared_ext$major'
+-      need_version=no
+-      need_lib_prefix=no
+-      ;;
+-    freebsd-*)
+-      library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+-      need_version=yes
+-      ;;
+-  esac
+-  shlibpath_var=LD_LIBRARY_PATH
+-  case $host_os in
+-  freebsd2.*)
+-    shlibpath_overrides_runpath=yes
+-    ;;
+-  freebsd3.[[01]]* | freebsdelf3.[[01]]*)
+-    shlibpath_overrides_runpath=yes
+-    hardcode_into_libs=yes
+-    ;;
+-  freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \
+-  freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1)
+-    shlibpath_overrides_runpath=no
+-    hardcode_into_libs=yes
+-    ;;
+-  *) # from 4.6 on, and DragonFly
+-    shlibpath_overrides_runpath=yes
+-    hardcode_into_libs=yes
+-    ;;
+-  esac
+-  ;;
+-
+-haiku*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  need_lib_prefix=no
+-  need_version=no
+-  dynamic_linker="$host_os runtime_loader"
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
+-  shlibpath_var=LIBRARY_PATH
+-  shlibpath_overrides_runpath=no
+-  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
+-  hardcode_into_libs=yes
+-  ;;
+-
+-hpux9* | hpux10* | hpux11*)
+-  # Give a soname corresponding to the major version so that dld.sl refuses to
+-  # link against other versions.
+-  version_type=sunos
+-  need_lib_prefix=no
+-  need_version=no
+-  case $host_cpu in
+-  ia64*)
+-    shrext_cmds='.so'
+-    hardcode_into_libs=yes
+-    dynamic_linker="$host_os dld.so"
+-    shlibpath_var=LD_LIBRARY_PATH
+-    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
+-    if test 32 = "$HPUX_IA64_MODE"; then
+-      sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+-      sys_lib_dlsearch_path_spec=/usr/lib/hpux32
+-    else
+-      sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+-      sys_lib_dlsearch_path_spec=/usr/lib/hpux64
+-    fi
+-    ;;
+-  hppa*64*)
+-    shrext_cmds='.sl'
+-    hardcode_into_libs=yes
+-    dynamic_linker="$host_os dld.sl"
+-    shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+-    shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
+-    sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+-    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+-    ;;
+-  *)
+-    shrext_cmds='.sl'
+-    dynamic_linker="$host_os dld.sl"
+-    shlibpath_var=SHLIB_PATH
+-    shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
+-    ;;
+-  esac
+-  # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+-  postinstall_cmds='chmod 555 $lib'
+-  # or fails outright, so override atomically:
+-  install_override_mode=555
+-  ;;
+-
+-interix[[3-9]]*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  need_lib_prefix=no
+-  need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
+-  dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=no
+-  hardcode_into_libs=yes
+-  ;;
+-
+-irix5* | irix6* | nonstopux*)
+-  case $host_os in
+-    nonstopux*) version_type=nonstopux ;;
+-    *)
+-	if test yes = "$lt_cv_prog_gnu_ld"; then
+-		version_type=linux # correct to gnu/linux during the next big refactor
+-	else
+-		version_type=irix
+-	fi ;;
+-  esac
+-  need_lib_prefix=no
+-  need_version=no
+-  soname_spec='$libname$release$shared_ext$major'
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext'
+-  case $host_os in
+-  irix5* | nonstopux*)
+-    libsuff= shlibsuff=
+-    ;;
+-  *)
+-    case $LD in # libtool.m4 will add one of these switches to LD
+-    *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ")
+-      libsuff= shlibsuff= libmagic=32-bit;;
+-    *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ")
+-      libsuff=32 shlibsuff=N32 libmagic=N32;;
+-    *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ")
+-      libsuff=64 shlibsuff=64 libmagic=64-bit;;
+-    *) libsuff= shlibsuff= libmagic=never-match;;
+-    esac
+-    ;;
+-  esac
+-  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+-  shlibpath_overrides_runpath=no
+-  sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff"
+-  sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff"
+-  hardcode_into_libs=yes
+-  ;;
+-
+-# No shared lib support for Linux oldld, aout, or coff.
+-linux*oldld* | linux*aout* | linux*coff*)
+-  dynamic_linker=no
+-  ;;
+-
+-linux*android*)
+-  version_type=none # Android doesn't support versioned libraries.
+-  need_lib_prefix=no
+-  need_version=no
+-  library_names_spec='$libname$release$shared_ext'
+-  soname_spec='$libname$release$shared_ext'
+-  finish_cmds=
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=yes
+-
+-  # This implies no fast_install, which is unacceptable.
+-  # Some rework will be needed to allow for fast_install
+-  # before this can be enabled.
+-  hardcode_into_libs=yes
+-
+-  dynamic_linker='Android linker'
+-  # Don't embed -rpath directories since the linker doesn't support them.
+-  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-  ;;
+-
+-# This must be glibc/ELF.
+-linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  need_lib_prefix=no
+-  need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
+-  finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=no
+-
+-  # Some binutils ld are patched to set DT_RUNPATH
+-  AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath],
+-    [lt_cv_shlibpath_overrides_runpath=no
+-    save_LDFLAGS=$LDFLAGS
+-    save_libdir=$libdir
+-    eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \
+-	 LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\""
+-    AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])],
+-      [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null],
+-	 [lt_cv_shlibpath_overrides_runpath=yes])])
+-    LDFLAGS=$save_LDFLAGS
+-    libdir=$save_libdir
+-    ])
+-  shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath
+-
+-  # This implies no fast_install, which is unacceptable.
+-  # Some rework will be needed to allow for fast_install
+-  # before this can be enabled.
+-  hardcode_into_libs=yes
+-
+-  # Add ABI-specific directories to the system library path.
+-  sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib"
+-
+-  # Ideally, we could use ldconfig to report *all* directores which are
+-  # searched for libraries, however this is still not possible.  Aside from not
+-  # being certain /sbin/ldconfig is available, command
+-  # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64,
+-  # even though it is searched at run-time.  Try to do the best guess by
+-  # appending ld.so.conf contents (and includes) to the search path.
+-  if test -f /etc/ld.so.conf; then
+-    lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+-    sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra"
+-  fi
+-
+-  # We used to test for /lib/ld.so.1 and disable shared libraries on
+-  # powerpc, because MkLinux only supported shared libraries with the
+-  # GNU dynamic linker.  Since this was broken with cross compilers,
+-  # most powerpc-linux boxes support dynamic linking these days and
+-  # people can always --disable-shared, the test was removed, and we
+-  # assume the GNU/Linux dynamic linker is in use.
+-  dynamic_linker='GNU/Linux ld.so'
+-  ;;
+-
+-netbsd*)
+-  version_type=sunos
+-  need_lib_prefix=no
+-  need_version=no
+-  if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+-    finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+-    dynamic_linker='NetBSD (a.out) ld.so'
+-  else
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
+-    dynamic_linker='NetBSD ld.elf_so'
+-  fi
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=yes
+-  hardcode_into_libs=yes
+-  ;;
+-
+-newsos6)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=yes
+-  ;;
+-
+-*nto* | *qnx*)
+-  version_type=qnx
+-  need_lib_prefix=no
+-  need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=no
+-  hardcode_into_libs=yes
+-  dynamic_linker='ldqnx.so'
+-  ;;
+-
+-openbsd* | bitrig*)
+-  version_type=sunos
+-  sys_lib_dlsearch_path_spec=/usr/lib
+-  need_lib_prefix=no
+-  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+-    need_version=no
+-  else
+-    need_version=yes
+-  fi
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+-  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=yes
+-  ;;
+-
+-os2*)
+-  libname_spec='$name'
+-  version_type=windows
+-  shrext_cmds=.dll
+-  need_version=no
+-  need_lib_prefix=no
+-  # OS/2 can only load a DLL with a base name of 8 characters or less.
+-  soname_spec='`test -n "$os2dllname" && libname="$os2dllname";
+-    v=$($ECHO $release$versuffix | tr -d .-);
+-    n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _);
+-    $ECHO $n$v`$shared_ext'
+-  library_names_spec='${libname}_dll.$libext'
+-  dynamic_linker='OS/2 ld.exe'
+-  shlibpath_var=BEGINLIBPATH
+-  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+-  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+-  postinstall_cmds='base_file=`basename \$file`~
+-    dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~
+-    dldir=$destdir/`dirname \$dlpath`~
+-    test -d \$dldir || mkdir -p \$dldir~
+-    $install_prog $dir/$dlname \$dldir/$dlname~
+-    chmod a+x \$dldir/$dlname~
+-    if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+-      eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+-    fi'
+-  postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~
+-    dlpath=$dir/\$dldll~
+-    $RM \$dlpath'
+-  ;;
+-
+-osf3* | osf4* | osf5*)
+-  version_type=osf
+-  need_lib_prefix=no
+-  need_version=no
+-  soname_spec='$libname$release$shared_ext$major'
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+-  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+-  ;;
+-
+-rdos*)
+-  dynamic_linker=no
+-  ;;
+-
+-solaris*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  need_lib_prefix=no
+-  need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=yes
+-  hardcode_into_libs=yes
+-  # ldd complains unless libraries are executable
+-  postinstall_cmds='chmod +x $lib'
+-  ;;
+-
+-sunos4*)
+-  version_type=sunos
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+-  finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=yes
+-  if test yes = "$with_gnu_ld"; then
+-    need_lib_prefix=no
+-  fi
+-  need_version=yes
+-  ;;
+-
+-sysv4 | sysv4.3*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  case $host_vendor in
+-    sni)
+-      shlibpath_overrides_runpath=no
+-      need_lib_prefix=no
+-      runpath_var=LD_RUN_PATH
+-      ;;
+-    siemens)
+-      need_lib_prefix=no
+-      ;;
+-    motorola)
+-      need_lib_prefix=no
+-      need_version=no
+-      shlibpath_overrides_runpath=no
+-      sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib'
+-      ;;
+-  esac
+-  ;;
+-
+-sysv4*MP*)
+-  if test -d /usr/nec; then
+-    version_type=linux # correct to gnu/linux during the next big refactor
+-    library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext'
+-    soname_spec='$libname$shared_ext.$major'
+-    shlibpath_var=LD_LIBRARY_PATH
+-  fi
+-  ;;
+-
+-sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+-  version_type=sco
+-  need_lib_prefix=no
+-  need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=yes
+-  hardcode_into_libs=yes
+-  if test yes = "$with_gnu_ld"; then
+-    sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+-  else
+-    sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+-    case $host_os in
+-      sco3.2v5*)
+-        sys_lib_search_path_spec="$sys_lib_search_path_spec /lib"
+-	;;
+-    esac
+-  fi
+-  sys_lib_dlsearch_path_spec='/usr/lib'
+-  ;;
+-
+-tpf*)
+-  # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  need_lib_prefix=no
+-  need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=no
+-  hardcode_into_libs=yes
+-  ;;
+-
+-uts4*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  ;;
+-
+-*)
+-  dynamic_linker=no
+-  ;;
+-esac
+-AC_MSG_RESULT([$dynamic_linker])
+-test no = "$dynamic_linker" && can_build_shared=no
+-
+-variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+-if test yes = "$GCC"; then
+-  variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+-fi
+-
+-if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then
+-  sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec
+-fi
+-
+-if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then
+-  sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec
+-fi
+-
+-# remember unaugmented sys_lib_dlsearch_path content for libtool script decls...
+-configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec
+-
+-# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code
+-func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH"
+-
+-# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool
+-configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH
+-
+-_LT_DECL([], [variables_saved_for_relink], [1],
+-    [Variables whose values should be saved in libtool wrapper scripts and
+-    restored at link time])
+-_LT_DECL([], [need_lib_prefix], [0],
+-    [Do we need the "lib" prefix for modules?])
+-_LT_DECL([], [need_version], [0], [Do we need a version for libraries?])
+-_LT_DECL([], [version_type], [0], [Library versioning type])
+-_LT_DECL([], [runpath_var], [0],  [Shared library runtime path variable])
+-_LT_DECL([], [shlibpath_var], [0],[Shared library path variable])
+-_LT_DECL([], [shlibpath_overrides_runpath], [0],
+-    [Is shlibpath searched before the hard-coded library search path?])
+-_LT_DECL([], [libname_spec], [1], [Format of library name prefix])
+-_LT_DECL([], [library_names_spec], [1],
+-    [[List of archive names.  First name is the real one, the rest are links.
+-    The last name is the one that the linker finds with -lNAME]])
+-_LT_DECL([], [soname_spec], [1],
+-    [[The coded name of the library, if different from the real name]])
+-_LT_DECL([], [install_override_mode], [1],
+-    [Permission mode override for installation of shared libraries])
+-_LT_DECL([], [postinstall_cmds], [2],
+-    [Command to use after installation of a shared archive])
+-_LT_DECL([], [postuninstall_cmds], [2],
+-    [Command to use after uninstallation of a shared archive])
+-_LT_DECL([], [finish_cmds], [2],
+-    [Commands used to finish a libtool library installation in a directory])
+-_LT_DECL([], [finish_eval], [1],
+-    [[As "finish_cmds", except a single script fragment to be evaled but
+-    not shown]])
+-_LT_DECL([], [hardcode_into_libs], [0],
+-    [Whether we should hardcode library paths into libraries])
+-_LT_DECL([], [sys_lib_search_path_spec], [2],
+-    [Compile-time system search path for libraries])
+-_LT_DECL([sys_lib_dlsearch_path_spec], [configure_time_dlsearch_path], [2],
+-    [Detected run-time system search path for libraries])
+-_LT_DECL([], [configure_time_lt_sys_library_path], [2],
+-    [Explicit LT_SYS_LIBRARY_PATH set during ./configure time])
+-])# _LT_SYS_DYNAMIC_LINKER
+-
+-
+-# _LT_PATH_TOOL_PREFIX(TOOL)
+-# --------------------------
+-# find a file program that can recognize shared library
+-AC_DEFUN([_LT_PATH_TOOL_PREFIX],
+-[m4_require([_LT_DECL_EGREP])dnl
+-AC_MSG_CHECKING([for $1])
+-AC_CACHE_VAL(lt_cv_path_MAGIC_CMD,
+-[case $MAGIC_CMD in
+-[[\\/*] |  ?:[\\/]*])
+-  lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path.
+-  ;;
+-*)
+-  lt_save_MAGIC_CMD=$MAGIC_CMD
+-  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
+-dnl $ac_dummy forces splitting on constant user-supplied paths.
+-dnl POSIX.2 word splitting is done only on the output of word expansions,
+-dnl not every word.  This closes a longstanding sh security hole.
+-  ac_dummy="m4_if([$2], , $PATH, [$2])"
+-  for ac_dir in $ac_dummy; do
+-    IFS=$lt_save_ifs
+-    test -z "$ac_dir" && ac_dir=.
+-    if test -f "$ac_dir/$1"; then
+-      lt_cv_path_MAGIC_CMD=$ac_dir/"$1"
+-      if test -n "$file_magic_test_file"; then
+-	case $deplibs_check_method in
+-	"file_magic "*)
+-	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+-	  MAGIC_CMD=$lt_cv_path_MAGIC_CMD
+-	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+-	    $EGREP "$file_magic_regex" > /dev/null; then
+-	    :
+-	  else
+-	    cat <<_LT_EOF 1>&2
+-
+-*** Warning: the command libtool uses to detect shared libraries,
+-*** $file_magic_cmd, produces output that libtool cannot recognize.
+-*** The result is that libtool may fail to recognize shared libraries
+-*** as such.  This will affect the creation of libtool libraries that
+-*** depend on shared libraries, but programs linked with such libtool
+-*** libraries will work regardless of this problem.  Nevertheless, you
+-*** may want to report the problem to your system manager and/or to
+-*** bug-libtool@gnu.org
+-
+-_LT_EOF
+-	  fi ;;
+-	esac
+-      fi
+-      break
+-    fi
+-  done
+-  IFS=$lt_save_ifs
+-  MAGIC_CMD=$lt_save_MAGIC_CMD
+-  ;;
+-esac])
+-MAGIC_CMD=$lt_cv_path_MAGIC_CMD
+-if test -n "$MAGIC_CMD"; then
+-  AC_MSG_RESULT($MAGIC_CMD)
+-else
+-  AC_MSG_RESULT(no)
+-fi
+-_LT_DECL([], [MAGIC_CMD], [0],
+-	 [Used to examine libraries when file_magic_cmd begins with "file"])dnl
+-])# _LT_PATH_TOOL_PREFIX
+-
+-# Old name:
+-AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], [])
+-
+-
+-# _LT_PATH_MAGIC
+-# --------------
+-# find a file program that can recognize a shared library
+-m4_defun([_LT_PATH_MAGIC],
+-[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH)
+-if test -z "$lt_cv_path_MAGIC_CMD"; then
+-  if test -n "$ac_tool_prefix"; then
+-    _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH)
+-  else
+-    MAGIC_CMD=:
+-  fi
+-fi
+-])# _LT_PATH_MAGIC
+-
+-
+-# LT_PATH_LD
+-# ----------
+-# find the pathname to the GNU or non-GNU linker
+-AC_DEFUN([LT_PATH_LD],
+-[AC_REQUIRE([AC_PROG_CC])dnl
+-AC_REQUIRE([AC_CANONICAL_HOST])dnl
+-AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+-m4_require([_LT_DECL_SED])dnl
+-m4_require([_LT_DECL_EGREP])dnl
+-m4_require([_LT_PROG_ECHO_BACKSLASH])dnl
+-
+-AC_ARG_WITH([gnu-ld],
+-    [AS_HELP_STRING([--with-gnu-ld],
+-	[assume the C compiler uses GNU ld @<:@default=no@:>@])],
+-    [test no = "$withval" || with_gnu_ld=yes],
+-    [with_gnu_ld=no])dnl
+-
+-ac_prog=ld
+-if test yes = "$GCC"; then
+-  # Check if gcc -print-prog-name=ld gives a path.
+-  AC_MSG_CHECKING([for ld used by $CC])
+-  case $host in
+-  *-*-mingw*)
+-    # gcc leaves a trailing carriage return, which upsets mingw
+-    ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+-  *)
+-    ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+-  esac
+-  case $ac_prog in
+-    # Accept absolute paths.
+-    [[\\/]]* | ?:[[\\/]]*)
+-      re_direlt='/[[^/]][[^/]]*/\.\./'
+-      # Canonicalize the pathname of ld
+-      ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'`
+-      while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+-	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+-      done
+-      test -z "$LD" && LD=$ac_prog
+-      ;;
+-  "")
+-    # If it fails, then pretend we aren't using GCC.
+-    ac_prog=ld
+-    ;;
+-  *)
+-    # If it is relative, then search for the first ld in PATH.
+-    with_gnu_ld=unknown
+-    ;;
+-  esac
+-elif test yes = "$with_gnu_ld"; then
+-  AC_MSG_CHECKING([for GNU ld])
+-else
+-  AC_MSG_CHECKING([for non-GNU ld])
+-fi
+-AC_CACHE_VAL(lt_cv_path_LD,
+-[if test -z "$LD"; then
+-  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
+-  for ac_dir in $PATH; do
+-    IFS=$lt_save_ifs
+-    test -z "$ac_dir" && ac_dir=.
+-    if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+-      lt_cv_path_LD=$ac_dir/$ac_prog
+-      # Check to see if the program is GNU ld.  I'd rather use --version,
+-      # but apparently some variants of GNU ld only accept -v.
+-      # Break only if it was the GNU/non-GNU ld that we prefer.
+-      case `"$lt_cv_path_LD" -v 2>&1 &1 conftest.i
+-cat conftest.i conftest.i >conftest2.i
+-: ${lt_DD:=$DD}
+-AC_PATH_PROGS_FEATURE_CHECK([lt_DD], [dd],
+-[if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then
+-  cmp -s conftest.i conftest.out \
+-  && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=:
+-fi])
+-rm -f conftest.i conftest2.i conftest.out])
+-])# _LT_PATH_DD
+-
+-
+-# _LT_CMD_TRUNCATE
+-# ----------------
+-# find command to truncate a binary pipe
+-m4_defun([_LT_CMD_TRUNCATE],
+-[m4_require([_LT_PATH_DD])
+-AC_CACHE_CHECK([how to truncate binary pipes], [lt_cv_truncate_bin],
+-[printf 0123456789abcdef0123456789abcdef >conftest.i
+-cat conftest.i conftest.i >conftest2.i
+-lt_cv_truncate_bin=
+-if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then
+-  cmp -s conftest.i conftest.out \
+-  && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1"
+-fi
+-rm -f conftest.i conftest2.i conftest.out
+-test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"])
+-_LT_DECL([lt_truncate_bin], [lt_cv_truncate_bin], [1],
+-  [Command to truncate a binary pipe])
+-])# _LT_CMD_TRUNCATE
+-
+-
+-# _LT_CHECK_MAGIC_METHOD
+-# ----------------------
+-# how to check for library dependencies
+-#  -- PORTME fill in with the dynamic library characteristics
+-m4_defun([_LT_CHECK_MAGIC_METHOD],
+-[m4_require([_LT_DECL_EGREP])
+-m4_require([_LT_DECL_OBJDUMP])
+-AC_CACHE_CHECK([how to recognize dependent libraries],
+-lt_cv_deplibs_check_method,
+-[lt_cv_file_magic_cmd='$MAGIC_CMD'
+-lt_cv_file_magic_test_file=
+-lt_cv_deplibs_check_method='unknown'
+-# Need to set the preceding variable on all platforms that support
+-# interlibrary dependencies.
+-# 'none' -- dependencies not supported.
+-# 'unknown' -- same as none, but documents that we really don't know.
+-# 'pass_all' -- all dependencies passed with no checks.
+-# 'test_compile' -- check by making test program.
+-# 'file_magic [[regex]]' -- check by looking for files in library path
+-# that responds to the $file_magic_cmd with a given extended regex.
+-# If you have 'file' or equivalent on your system and you're not sure
+-# whether 'pass_all' will *always* work, you probably want this one.
+-
+-case $host_os in
+-aix[[4-9]]*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-
+-beos*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-
+-bsdi[[45]]*)
+-  lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib)'
+-  lt_cv_file_magic_cmd='$FILECMD -L'
+-  lt_cv_file_magic_test_file=/shlib/libc.so
+-  ;;
+-
+-cygwin*)
+-  # func_win32_libid is a shell function defined in ltmain.sh
+-  lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+-  lt_cv_file_magic_cmd='func_win32_libid'
+-  ;;
+-
+-mingw* | pw32*)
+-  # Base MSYS/MinGW do not provide the 'file' command needed by
+-  # func_win32_libid shell function, so use a weaker test based on 'objdump',
+-  # unless we find 'file', for example because we are cross-compiling.
+-  if ( file / ) >/dev/null 2>&1; then
+-    lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+-    lt_cv_file_magic_cmd='func_win32_libid'
+-  else
+-    # Keep this pattern in sync with the one in func_win32_libid.
+-    lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
+-    lt_cv_file_magic_cmd='$OBJDUMP -f'
+-  fi
+-  ;;
+-
+-cegcc*)
+-  # use the weaker test based on 'objdump'. See mingw*.
+-  lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?'
+-  lt_cv_file_magic_cmd='$OBJDUMP -f'
+-  ;;
+-
+-darwin* | rhapsody*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-
+-freebsd* | dragonfly* | midnightbsd*)
+-  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+-    case $host_cpu in
+-    i*86 )
+-      # Not sure whether the presence of OpenBSD here was a mistake.
+-      # Let's accept both of them until this is cleared up.
+-      lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library'
+-      lt_cv_file_magic_cmd=$FILECMD
+-      lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+-      ;;
+-    esac
+-  else
+-    lt_cv_deplibs_check_method=pass_all
+-  fi
+-  ;;
+-
+-haiku*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-
+-hpux10.20* | hpux11*)
+-  lt_cv_file_magic_cmd=$FILECMD
+-  case $host_cpu in
+-  ia64*)
+-    lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64'
+-    lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so
+-    ;;
+-  hppa*64*)
+-    [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]']
+-    lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl
+-    ;;
+-  *)
+-    lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library'
+-    lt_cv_file_magic_test_file=/usr/lib/libc.sl
+-    ;;
+-  esac
+-  ;;
+-
+-interix[[3-9]]*)
+-  # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here
+-  lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$'
+-  ;;
+-
+-irix5* | irix6* | nonstopux*)
+-  case $LD in
+-  *-32|*"-32 ") libmagic=32-bit;;
+-  *-n32|*"-n32 ") libmagic=N32;;
+-  *-64|*"-64 ") libmagic=64-bit;;
+-  *) libmagic=never-match;;
+-  esac
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-
+-# This must be glibc/ELF.
+-linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-
+-netbsd*)
+-  if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+-    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
+-  else
+-    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$'
+-  fi
+-  ;;
+-
+-newos6*)
+-  lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)'
+-  lt_cv_file_magic_cmd=$FILECMD
+-  lt_cv_file_magic_test_file=/usr/lib/libnls.so
+-  ;;
+-
+-*nto* | *qnx*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-
+-openbsd* | bitrig*)
+-  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+-    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$'
+-  else
+-    lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$'
+-  fi
+-  ;;
+-
+-osf3* | osf4* | osf5*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-
+-rdos*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-
+-solaris*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-
+-sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-
+-sysv4 | sysv4.3*)
+-  case $host_vendor in
+-  motorola)
+-    lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]'
+-    lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*`
+-    ;;
+-  ncr)
+-    lt_cv_deplibs_check_method=pass_all
+-    ;;
+-  sequent)
+-    lt_cv_file_magic_cmd='/bin/file'
+-    lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )'
+-    ;;
+-  sni)
+-    lt_cv_file_magic_cmd='/bin/file'
+-    lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib"
+-    lt_cv_file_magic_test_file=/lib/libc.so
+-    ;;
+-  siemens)
+-    lt_cv_deplibs_check_method=pass_all
+-    ;;
+-  pc)
+-    lt_cv_deplibs_check_method=pass_all
+-    ;;
+-  esac
+-  ;;
+-
+-tpf*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-os2*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-esac
+-])
+-
+-file_magic_glob=
+-want_nocaseglob=no
+-if test "$build" = "$host"; then
+-  case $host_os in
+-  mingw* | pw32*)
+-    if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
+-      want_nocaseglob=yes
+-    else
+-      file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"`
+-    fi
+-    ;;
+-  esac
+-fi
+-
+-file_magic_cmd=$lt_cv_file_magic_cmd
+-deplibs_check_method=$lt_cv_deplibs_check_method
+-test -z "$deplibs_check_method" && deplibs_check_method=unknown
+-
+-_LT_DECL([], [deplibs_check_method], [1],
+-    [Method to check whether dependent libraries are shared objects])
+-_LT_DECL([], [file_magic_cmd], [1],
+-    [Command to use when deplibs_check_method = "file_magic"])
+-_LT_DECL([], [file_magic_glob], [1],
+-    [How to find potential files when deplibs_check_method = "file_magic"])
+-_LT_DECL([], [want_nocaseglob], [1],
+-    [Find potential files using nocaseglob when deplibs_check_method = "file_magic"])
+-])# _LT_CHECK_MAGIC_METHOD
+-
+-
+-# LT_PATH_NM
+-# ----------
+-# find the pathname to a BSD- or MS-compatible name lister
+-AC_DEFUN([LT_PATH_NM],
+-[AC_REQUIRE([AC_PROG_CC])dnl
+-AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM,
+-[if test -n "$NM"; then
+-  # Let the user override the test.
+-  lt_cv_path_NM=$NM
+-else
+-  lt_nm_to_check=${ac_tool_prefix}nm
+-  if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
+-    lt_nm_to_check="$lt_nm_to_check nm"
+-  fi
+-  for lt_tmp_nm in $lt_nm_to_check; do
+-    lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
+-    for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
+-      IFS=$lt_save_ifs
+-      test -z "$ac_dir" && ac_dir=.
+-      tmp_nm=$ac_dir/$lt_tmp_nm
+-      if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then
+-	# Check to see if the nm accepts a BSD-compat flag.
+-	# Adding the 'sed 1q' prevents false positives on HP-UX, which says:
+-	#   nm: unknown option "B" ignored
+-	# Tru64's nm complains that /dev/null is an invalid object file
+-	# MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty
+-	case $build_os in
+-	mingw*) lt_bad_file=conftest.nm/nofile ;;
+-	*) lt_bad_file=/dev/null ;;
+-	esac
+-	case `"$tmp_nm" -B $lt_bad_file 2>&1 | $SED '1q'` in
+-	*$lt_bad_file* | *'Invalid file or object type'*)
+-	  lt_cv_path_NM="$tmp_nm -B"
+-	  break 2
+-	  ;;
+-	*)
+-	  case `"$tmp_nm" -p /dev/null 2>&1 | $SED '1q'` in
+-	  */dev/null*)
+-	    lt_cv_path_NM="$tmp_nm -p"
+-	    break 2
+-	    ;;
+-	  *)
+-	    lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+-	    continue # so that we can try to find one that supports BSD flags
+-	    ;;
+-	  esac
+-	  ;;
+-	esac
+-      fi
+-    done
+-    IFS=$lt_save_ifs
+-  done
+-  : ${lt_cv_path_NM=no}
+-fi])
+-if test no != "$lt_cv_path_NM"; then
+-  NM=$lt_cv_path_NM
+-else
+-  # Didn't find any BSD compatible name lister, look for dumpbin.
+-  if test -n "$DUMPBIN"; then :
+-    # Let the user override the test.
+-  else
+-    AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :)
+-    case `$DUMPBIN -symbols -headers /dev/null 2>&1 | $SED '1q'` in
+-    *COFF*)
+-      DUMPBIN="$DUMPBIN -symbols -headers"
+-      ;;
+-    *)
+-      DUMPBIN=:
+-      ;;
+-    esac
+-  fi
+-  AC_SUBST([DUMPBIN])
+-  if test : != "$DUMPBIN"; then
+-    NM=$DUMPBIN
+-  fi
+-fi
+-test -z "$NM" && NM=nm
+-AC_SUBST([NM])
+-_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl
+-
+-AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface],
+-  [lt_cv_nm_interface="BSD nm"
+-  echo "int some_variable = 0;" > conftest.$ac_ext
+-  (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD)
+-  (eval "$ac_compile" 2>conftest.err)
+-  cat conftest.err >&AS_MESSAGE_LOG_FD
+-  (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD)
+-  (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out)
+-  cat conftest.err >&AS_MESSAGE_LOG_FD
+-  (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD)
+-  cat conftest.out >&AS_MESSAGE_LOG_FD
+-  if $GREP 'External.*some_variable' conftest.out > /dev/null; then
+-    lt_cv_nm_interface="MS dumpbin"
+-  fi
+-  rm -f conftest*])
+-])# LT_PATH_NM
+-
+-# Old names:
+-AU_ALIAS([AM_PROG_NM], [LT_PATH_NM])
+-AU_ALIAS([AC_PROG_NM], [LT_PATH_NM])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AM_PROG_NM], [])
+-dnl AC_DEFUN([AC_PROG_NM], [])
+-
+-# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
+-# --------------------------------
+-# how to determine the name of the shared library
+-# associated with a specific link library.
+-#  -- PORTME fill in with the dynamic library characteristics
+-m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB],
+-[m4_require([_LT_DECL_EGREP])
+-m4_require([_LT_DECL_OBJDUMP])
+-m4_require([_LT_DECL_DLLTOOL])
+-AC_CACHE_CHECK([how to associate runtime and link libraries],
+-lt_cv_sharedlib_from_linklib_cmd,
+-[lt_cv_sharedlib_from_linklib_cmd='unknown'
+-
+-case $host_os in
+-cygwin* | mingw* | pw32* | cegcc*)
+-  # two different shell functions defined in ltmain.sh;
+-  # decide which one to use based on capabilities of $DLLTOOL
+-  case `$DLLTOOL --help 2>&1` in
+-  *--identify-strict*)
+-    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
+-    ;;
+-  *)
+-    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
+-    ;;
+-  esac
+-  ;;
+-*)
+-  # fallback: assume linklib IS sharedlib
+-  lt_cv_sharedlib_from_linklib_cmd=$ECHO
+-  ;;
+-esac
+-])
+-sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
+-test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
+-
+-_LT_DECL([], [sharedlib_from_linklib_cmd], [1],
+-    [Command to associate shared and link libraries])
+-])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB
+-
+-
+-# _LT_PATH_MANIFEST_TOOL
+-# ----------------------
+-# locate the manifest tool
+-m4_defun([_LT_PATH_MANIFEST_TOOL],
+-[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :)
+-test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
+-AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool],
+-  [lt_cv_path_mainfest_tool=no
+-  echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD
+-  $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
+-  cat conftest.err >&AS_MESSAGE_LOG_FD
+-  if $GREP 'Manifest Tool' conftest.out > /dev/null; then
+-    lt_cv_path_mainfest_tool=yes
+-  fi
+-  rm -f conftest*])
+-if test yes != "$lt_cv_path_mainfest_tool"; then
+-  MANIFEST_TOOL=:
+-fi
+-_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl
+-])# _LT_PATH_MANIFEST_TOOL
+-
+-
+-# _LT_DLL_DEF_P([FILE])
+-# ---------------------
+-# True iff FILE is a Windows DLL '.def' file.
+-# Keep in sync with func_dll_def_p in the libtool script
+-AC_DEFUN([_LT_DLL_DEF_P],
+-[dnl
+-  test DEF = "`$SED -n dnl
+-    -e '\''s/^[[	 ]]*//'\'' dnl Strip leading whitespace
+-    -e '\''/^\(;.*\)*$/d'\'' dnl      Delete empty lines and comments
+-    -e '\''s/^\(EXPORTS\|LIBRARY\)\([[	 ]].*\)*$/DEF/p'\'' dnl
+-    -e q dnl                          Only consider the first "real" line
+-    $1`" dnl
+-])# _LT_DLL_DEF_P
+-
+-
+-# LT_LIB_M
+-# --------
+-# check for math library
+-AC_DEFUN([LT_LIB_M],
+-[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+-LIBM=
+-case $host in
+-*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*)
+-  # These system don't have libm, or don't need it
+-  ;;
+-*-ncr-sysv4.3*)
+-  AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM=-lmw)
+-  AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm")
+-  ;;
+-*)
+-  AC_CHECK_LIB(m, cos, LIBM=-lm)
+-  ;;
+-esac
+-AC_SUBST([LIBM])
+-])# LT_LIB_M
+-
+-# Old name:
+-AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_CHECK_LIBM], [])
+-
+-
+-# _LT_COMPILER_NO_RTTI([TAGNAME])
+-# -------------------------------
+-m4_defun([_LT_COMPILER_NO_RTTI],
+-[m4_require([_LT_TAG_COMPILER])dnl
+-
+-_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
+-
+-if test yes = "$GCC"; then
+-  case $cc_basename in
+-  nvcc*)
+-    _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;;
+-  *)
+-    _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;;
+-  esac
+-
+-  _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions],
+-    lt_cv_prog_compiler_rtti_exceptions,
+-    [-fno-rtti -fno-exceptions], [],
+-    [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"])
+-fi
+-_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1],
+-	[Compiler flag to turn off builtin functions])
+-])# _LT_COMPILER_NO_RTTI
+-
+-
+-# _LT_CMD_GLOBAL_SYMBOLS
+-# ----------------------
+-m4_defun([_LT_CMD_GLOBAL_SYMBOLS],
+-[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+-AC_REQUIRE([AC_PROG_CC])dnl
+-AC_REQUIRE([AC_PROG_AWK])dnl
+-AC_REQUIRE([LT_PATH_NM])dnl
+-AC_REQUIRE([LT_PATH_LD])dnl
+-m4_require([_LT_DECL_SED])dnl
+-m4_require([_LT_DECL_EGREP])dnl
+-m4_require([_LT_TAG_COMPILER])dnl
+-
+-# Check for command to grab the raw symbol name followed by C symbol from nm.
+-AC_MSG_CHECKING([command to parse $NM output from $compiler object])
+-AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe],
+-[
+-# These are sane defaults that work on at least a few old systems.
+-# [They come from Ultrix.  What could be older than Ultrix?!! ;)]
+-
+-# Character class describing NM global symbol codes.
+-symcode='[[BCDEGRST]]'
+-
+-# Regexp to match symbols that can be accessed directly from C.
+-sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)'
+-
+-# Define system-specific variables.
+-case $host_os in
+-aix*)
+-  symcode='[[BCDT]]'
+-  ;;
+-cygwin* | mingw* | pw32* | cegcc*)
+-  symcode='[[ABCDGISTW]]'
+-  ;;
+-hpux*)
+-  if test ia64 = "$host_cpu"; then
+-    symcode='[[ABCDEGRST]]'
+-  fi
+-  ;;
+-irix* | nonstopux*)
+-  symcode='[[BCDEGRST]]'
+-  ;;
+-osf*)
+-  symcode='[[BCDEGQRST]]'
+-  ;;
+-solaris*)
+-  symcode='[[BDRT]]'
+-  ;;
+-sco3.2v5*)
+-  symcode='[[DT]]'
+-  ;;
+-sysv4.2uw2*)
+-  symcode='[[DT]]'
+-  ;;
+-sysv5* | sco5v6* | unixware* | OpenUNIX*)
+-  symcode='[[ABDT]]'
+-  ;;
+-sysv4)
+-  symcode='[[DFNSTU]]'
+-  ;;
+-esac
+-
+-# If we're using GNU nm, then use its standard symbol codes.
+-case `$NM -V 2>&1` in
+-*GNU* | *'with BFD'*)
+-  symcode='[[ABCDGIRSTW]]' ;;
+-esac
+-
+-if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+-  # Gets list of data symbols to import.
+-  lt_cv_sys_global_symbol_to_import="$SED -n -e 's/^I .* \(.*\)$/\1/p'"
+-  # Adjust the below global symbol transforms to fixup imported variables.
+-  lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'"
+-  lt_c_name_hook=" -e 's/^I .* \(.*\)$/  {\"\1\", (void *) 0},/p'"
+-  lt_c_name_lib_hook="\
+-  -e 's/^I .* \(lib.*\)$/  {\"\1\", (void *) 0},/p'\
+-  -e 's/^I .* \(.*\)$/  {\"lib\1\", (void *) 0},/p'"
+-else
+-  # Disable hooks by default.
+-  lt_cv_sys_global_symbol_to_import=
+-  lt_cdecl_hook=
+-  lt_c_name_hook=
+-  lt_c_name_lib_hook=
+-fi
+-
+-# Transform an extracted symbol line into a proper C declaration.
+-# Some systems (esp. on ia64) link data and code symbols differently,
+-# so use this general approach.
+-lt_cv_sys_global_symbol_to_cdecl="$SED -n"\
+-$lt_cdecl_hook\
+-" -e 's/^T .* \(.*\)$/extern int \1();/p'"\
+-" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'"
+-
+-# Transform an extracted symbol line into symbol name and symbol address
+-lt_cv_sys_global_symbol_to_c_name_address="$SED -n"\
+-$lt_c_name_hook\
+-" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
+-" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/p'"
+-
+-# Transform an extracted symbol line into symbol name with lib prefix and
+-# symbol address.
+-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="$SED -n"\
+-$lt_c_name_lib_hook\
+-" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
+-" -e 's/^$symcode$symcode* .* \(lib.*\)$/  {\"\1\", (void *) \&\1},/p'"\
+-" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"lib\1\", (void *) \&\1},/p'"
+-
+-# Handle CRLF in mingw tool chain
+-opt_cr=
+-case $build_os in
+-mingw*)
+-  opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp
+-  ;;
+-esac
+-
+-# Try without a prefix underscore, then with it.
+-for ac_symprfx in "" "_"; do
+-
+-  # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol.
+-  symxfrm="\\1 $ac_symprfx\\2 \\2"
+-
+-  # Write the raw and C identifiers.
+-  if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+-    # Fake it for dumpbin and say T for any non-static function,
+-    # D for any global variable and I for any imported variable.
+-    # Also find C++ and __fastcall symbols from MSVC++ or ICC,
+-    # which start with @ or ?.
+-    lt_cv_sys_global_symbol_pipe="$AWK ['"\
+-"     {last_section=section; section=\$ 3};"\
+-"     /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
+-"     /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
+-"     /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\
+-"     /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\
+-"     /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\
+-"     \$ 0!~/External *\|/{next};"\
+-"     / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
+-"     {if(hide[section]) next};"\
+-"     {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\
+-"     {split(\$ 0,a,/\||\r/); split(a[2],s)};"\
+-"     s[1]~/^[@?]/{print f,s[1],s[1]; next};"\
+-"     s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\
+-"     ' prfx=^$ac_symprfx]"
+-  else
+-    lt_cv_sys_global_symbol_pipe="$SED -n -e 's/^.*[[	 ]]\($symcode$symcode*\)[[	 ]][[	 ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
+-  fi
+-  lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | $SED '/ __gnu_lto/d'"
+-
+-  # Check to see that the pipe works correctly.
+-  pipe_works=no
+-
+-  rm -f conftest*
+-  cat > conftest.$ac_ext <<_LT_EOF
+-#ifdef __cplusplus
+-extern "C" {
+-#endif
+-char nm_test_var;
+-void nm_test_func(void);
+-void nm_test_func(void){}
+-#ifdef __cplusplus
+-}
+-#endif
+-int main(){nm_test_var='a';nm_test_func();return(0);}
+-_LT_EOF
+-
+-  if AC_TRY_EVAL(ac_compile); then
+-    # Now try to grab the symbols.
+-    nlist=conftest.nm
+-    if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then
+-      # Try sorting and uniquifying the output.
+-      if sort "$nlist" | uniq > "$nlist"T; then
+-	mv -f "$nlist"T "$nlist"
+-      else
+-	rm -f "$nlist"T
+-      fi
+-
+-      # Make sure that we snagged all the symbols we need.
+-      if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
+-	if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
+-	  cat <<_LT_EOF > conftest.$ac_ext
+-/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
+-#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE
+-/* DATA imports from DLLs on WIN32 can't be const, because runtime
+-   relocations are performed -- see ld's documentation on pseudo-relocs.  */
+-# define LT@&t@_DLSYM_CONST
+-#elif defined __osf__
+-/* This system does not cope well with relocations in const data.  */
+-# define LT@&t@_DLSYM_CONST
+-#else
+-# define LT@&t@_DLSYM_CONST const
+-#endif
+-
+-#ifdef __cplusplus
+-extern "C" {
+-#endif
+-
+-_LT_EOF
+-	  # Now generate the symbol file.
+-	  eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext'
+-
+-	  cat <<_LT_EOF >> conftest.$ac_ext
+-
+-/* The mapping between symbol names and symbols.  */
+-LT@&t@_DLSYM_CONST struct {
+-  const char *name;
+-  void       *address;
+-}
+-lt__PROGRAM__LTX_preloaded_symbols[[]] =
+-{
+-  { "@PROGRAM@", (void *) 0 },
+-_LT_EOF
+-	  $SED "s/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
+-	  cat <<\_LT_EOF >> conftest.$ac_ext
+-  {0, (void *) 0}
+-};
+-
+-/* This works around a problem in FreeBSD linker */
+-#ifdef FREEBSD_WORKAROUND
+-static const void *lt_preloaded_setup() {
+-  return lt__PROGRAM__LTX_preloaded_symbols;
+-}
+-#endif
+-
+-#ifdef __cplusplus
+-}
+-#endif
+-_LT_EOF
+-	  # Now try linking the two files.
+-	  mv conftest.$ac_objext conftstm.$ac_objext
+-	  lt_globsym_save_LIBS=$LIBS
+-	  lt_globsym_save_CFLAGS=$CFLAGS
+-	  LIBS=conftstm.$ac_objext
+-	  CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)"
+-	  if AC_TRY_EVAL(ac_link) && test -s conftest$ac_exeext; then
+-	    pipe_works=yes
+-	  fi
+-	  LIBS=$lt_globsym_save_LIBS
+-	  CFLAGS=$lt_globsym_save_CFLAGS
+-	else
+-	  echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD
+-	fi
+-      else
+-	echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD
+-      fi
+-    else
+-      echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD
+-    fi
+-  else
+-    echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD
+-    cat conftest.$ac_ext >&5
+-  fi
+-  rm -rf conftest* conftst*
+-
+-  # Do not use the global_symbol_pipe unless it works.
+-  if test yes = "$pipe_works"; then
+-    break
+-  else
+-    lt_cv_sys_global_symbol_pipe=
+-  fi
+-done
+-])
+-if test -z "$lt_cv_sys_global_symbol_pipe"; then
+-  lt_cv_sys_global_symbol_to_cdecl=
+-fi
+-if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
+-  AC_MSG_RESULT(failed)
+-else
+-  AC_MSG_RESULT(ok)
+-fi
+-
+-# Response file support.
+-if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+-  nm_file_list_spec='@'
+-elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then
+-  nm_file_list_spec='@'
+-fi
+-
+-_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1],
+-    [Take the output of nm and produce a listing of raw symbols and C names])
+-_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1],
+-    [Transform the output of nm in a proper C declaration])
+-_LT_DECL([global_symbol_to_import], [lt_cv_sys_global_symbol_to_import], [1],
+-    [Transform the output of nm into a list of symbols to manually relocate])
+-_LT_DECL([global_symbol_to_c_name_address],
+-    [lt_cv_sys_global_symbol_to_c_name_address], [1],
+-    [Transform the output of nm in a C name address pair])
+-_LT_DECL([global_symbol_to_c_name_address_lib_prefix],
+-    [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1],
+-    [Transform the output of nm in a C name address pair when lib prefix is needed])
+-_LT_DECL([nm_interface], [lt_cv_nm_interface], [1],
+-    [The name lister interface])
+-_LT_DECL([], [nm_file_list_spec], [1],
+-    [Specify filename containing input files for $NM])
+-]) # _LT_CMD_GLOBAL_SYMBOLS
+-
+-
+-# _LT_COMPILER_PIC([TAGNAME])
+-# ---------------------------
+-m4_defun([_LT_COMPILER_PIC],
+-[m4_require([_LT_TAG_COMPILER])dnl
+-_LT_TAGVAR(lt_prog_compiler_wl, $1)=
+-_LT_TAGVAR(lt_prog_compiler_pic, $1)=
+-_LT_TAGVAR(lt_prog_compiler_static, $1)=
+-
+-m4_if([$1], [CXX], [
+-  # C++ specific cases for pic, static, wl, etc.
+-  if test yes = "$GXX"; then
+-    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+-
+-    case $host_os in
+-    aix*)
+-      # All AIX code is PIC.
+-      if test ia64 = "$host_cpu"; then
+-	# AIX 5 now supports IA64 processor
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-      fi
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-      ;;
+-
+-    amigaos*)
+-      case $host_cpu in
+-      powerpc)
+-            # see comment about AmigaOS4 .so support
+-            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-        ;;
+-      m68k)
+-            # FIXME: we need at least 68020 code to build shared libraries, but
+-            # adding the '-m68020' flag to GCC prevents building anything better,
+-            # like '-m68040'.
+-            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
+-        ;;
+-      esac
+-      ;;
+-
+-    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+-      # PIC is the default for these OSes.
+-      ;;
+-    mingw* | cygwin* | os2* | pw32* | cegcc*)
+-      # This hack is so that the source file can tell whether it is being
+-      # built for inclusion in a dll (and should export symbols for example).
+-      # Although the cygwin gcc ignores -fPIC, still need this for old-style
+-      # (--disable-auto-import) libraries
+-      m4_if([$1], [GCJ], [],
+-	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+-      case $host_os in
+-      os2*)
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static'
+-	;;
+-      esac
+-      ;;
+-    darwin* | rhapsody*)
+-      # PIC is the default on this platform
+-      # Common symbols not allowed in MH_DYLIB files
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+-      ;;
+-    *djgpp*)
+-      # DJGPP does not support shared libraries at all
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+-      ;;
+-    haiku*)
+-      # PIC is the default for Haiku.
+-      # The "-static" flag exists, but is broken.
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)=
+-      ;;
+-    interix[[3-9]]*)
+-      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+-      # Instead, we relocate shared libraries at runtime.
+-      ;;
+-    sysv4*MP*)
+-      if test -d /usr/nec; then
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
+-      fi
+-      ;;
+-    hpux*)
+-      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+-      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
+-      # sets the default TLS model and affects inlining.
+-      case $host_cpu in
+-      hppa*64*)
+-	;;
+-      *)
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-	;;
+-      esac
+-      ;;
+-    *qnx* | *nto*)
+-      # QNX uses GNU C++, but need to define -shared option too, otherwise
+-      # it will coredump.
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+-      ;;
+-    *)
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-      ;;
+-    esac
+-  else
+-    case $host_os in
+-      aix[[4-9]]*)
+-	# All AIX code is PIC.
+-	if test ia64 = "$host_cpu"; then
+-	  # AIX 5 now supports IA64 processor
+-	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-	else
+-	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
+-	fi
+-	;;
+-      chorus*)
+-	case $cc_basename in
+-	cxch68*)
+-	  # Green Hills C++ Compiler
+-	  # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a"
+-	  ;;
+-	esac
+-	;;
+-      mingw* | cygwin* | os2* | pw32* | cegcc*)
+-	# This hack is so that the source file can tell whether it is being
+-	# built for inclusion in a dll (and should export symbols for example).
+-	m4_if([$1], [GCJ], [],
+-	  [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+-	;;
+-      dgux*)
+-	case $cc_basename in
+-	  ec++*)
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-	    ;;
+-	  ghcx*)
+-	    # Green Hills C++ Compiler
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+-	    ;;
+-	  *)
+-	    ;;
+-	esac
+-	;;
+-      freebsd* | dragonfly* | midnightbsd*)
+-	# FreeBSD uses GNU C++
+-	;;
+-      hpux9* | hpux10* | hpux11*)
+-	case $cc_basename in
+-	  CC*)
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive'
+-	    if test ia64 != "$host_cpu"; then
+-	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+-	    fi
+-	    ;;
+-	  aCC*)
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive'
+-	    case $host_cpu in
+-	    hppa*64*|ia64*)
+-	      # +Z the default
+-	      ;;
+-	    *)
+-	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+-	      ;;
+-	    esac
+-	    ;;
+-	  *)
+-	    ;;
+-	esac
+-	;;
+-      interix*)
+-	# This is c89, which is MS Visual C++ (no shared libs)
+-	# Anyone wants to do a port?
+-	;;
+-      irix5* | irix6* | nonstopux*)
+-	case $cc_basename in
+-	  CC*)
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+-	    # CC pic flag -KPIC is the default.
+-	    ;;
+-	  *)
+-	    ;;
+-	esac
+-	;;
+-      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+-	case $cc_basename in
+-	  KCC*)
+-	    # KAI C++ Compiler
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-	    ;;
+-	  ecpc* )
+-	    # old Intel C++ for x86_64, which still supported -KPIC.
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+-	    ;;
+-	  icpc* )
+-	    # Intel C++, used to be incompatible with GCC.
+-	    # ICC 10 doesn't accept -KPIC any more.
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+-	    ;;
+-	  pgCC* | pgcpp*)
+-	    # Portland Group C++ compiler
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-	    ;;
+-	  cxx*)
+-	    # Compaq C++
+-	    # Make sure the PIC flag is empty.  It appears that all Alpha
+-	    # Linux and Compaq Tru64 Unix objects are PIC.
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+-	    ;;
+-	  xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*)
+-	    # IBM XL 8.0, 9.0 on PPC and BlueGene
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
+-	    ;;
+-	  *)
+-	    case `$CC -V 2>&1 | $SED 5q` in
+-	    *Sun\ C*)
+-	      # Sun C++ 5.9
+-	      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-	      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-	      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+-	      ;;
+-	    esac
+-	    ;;
+-	esac
+-	;;
+-      lynxos*)
+-	;;
+-      m88k*)
+-	;;
+-      mvs*)
+-	case $cc_basename in
+-	  cxx*)
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall'
+-	    ;;
+-	  *)
+-	    ;;
+-	esac
+-	;;
+-      netbsd*)
+-	;;
+-      *qnx* | *nto*)
+-        # QNX uses GNU C++, but need to define -shared option too, otherwise
+-        # it will coredump.
+-        _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+-        ;;
+-      osf3* | osf4* | osf5*)
+-	case $cc_basename in
+-	  KCC*)
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,'
+-	    ;;
+-	  RCC*)
+-	    # Rational C++ 2.4.1
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+-	    ;;
+-	  cxx*)
+-	    # Digital/Compaq C++
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	    # Make sure the PIC flag is empty.  It appears that all Alpha
+-	    # Linux and Compaq Tru64 Unix objects are PIC.
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+-	    ;;
+-	  *)
+-	    ;;
+-	esac
+-	;;
+-      psos*)
+-	;;
+-      solaris*)
+-	case $cc_basename in
+-	  CC* | sunCC*)
+-	    # Sun C++ 4.2, 5.x and Centerline C++
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+-	    ;;
+-	  gcx*)
+-	    # Green Hills C++ Compiler
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+-	    ;;
+-	  *)
+-	    ;;
+-	esac
+-	;;
+-      sunos4*)
+-	case $cc_basename in
+-	  CC*)
+-	    # Sun C++ 4.x
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-	    ;;
+-	  lcc*)
+-	    # Lucid
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+-	    ;;
+-	  *)
+-	    ;;
+-	esac
+-	;;
+-      sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+-	case $cc_basename in
+-	  CC*)
+-	    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-	    _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-	    ;;
+-	esac
+-	;;
+-      tandem*)
+-	case $cc_basename in
+-	  NCC*)
+-	    # NonStop-UX NCC 3.20
+-	    _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-	    ;;
+-	  *)
+-	    ;;
+-	esac
+-	;;
+-      vxworks*)
+-	;;
+-      *)
+-	_LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+-	;;
+-    esac
+-  fi
+-],
+-[
+-  if test yes = "$GCC"; then
+-    _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-    _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+-
+-    case $host_os in
+-      aix*)
+-      # All AIX code is PIC.
+-      if test ia64 = "$host_cpu"; then
+-	# AIX 5 now supports IA64 processor
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-      fi
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-      ;;
+-
+-    amigaos*)
+-      case $host_cpu in
+-      powerpc)
+-            # see comment about AmigaOS4 .so support
+-            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-        ;;
+-      m68k)
+-            # FIXME: we need at least 68020 code to build shared libraries, but
+-            # adding the '-m68020' flag to GCC prevents building anything better,
+-            # like '-m68040'.
+-            _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4'
+-        ;;
+-      esac
+-      ;;
+-
+-    beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*)
+-      # PIC is the default for these OSes.
+-      ;;
+-
+-    mingw* | cygwin* | pw32* | os2* | cegcc*)
+-      # This hack is so that the source file can tell whether it is being
+-      # built for inclusion in a dll (and should export symbols for example).
+-      # Although the cygwin gcc ignores -fPIC, still need this for old-style
+-      # (--disable-auto-import) libraries
+-      m4_if([$1], [GCJ], [],
+-	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+-      case $host_os in
+-      os2*)
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static'
+-	;;
+-      esac
+-      ;;
+-
+-    darwin* | rhapsody*)
+-      # PIC is the default on this platform
+-      # Common symbols not allowed in MH_DYLIB files
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+-      ;;
+-
+-    haiku*)
+-      # PIC is the default for Haiku.
+-      # The "-static" flag exists, but is broken.
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)=
+-      ;;
+-
+-    hpux*)
+-      # PIC is the default for 64-bit PA HP-UX, but not for 32-bit
+-      # PA HP-UX.  On IA64 HP-UX, PIC is the default but the pic flag
+-      # sets the default TLS model and affects inlining.
+-      case $host_cpu in
+-      hppa*64*)
+-	# +Z the default
+-	;;
+-      *)
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-	;;
+-      esac
+-      ;;
+-
+-    interix[[3-9]]*)
+-      # Interix 3.x gcc -fpic/-fPIC options generate broken code.
+-      # Instead, we relocate shared libraries at runtime.
+-      ;;
+-
+-    msdosdjgpp*)
+-      # Just because we use GCC doesn't mean we suddenly get shared libraries
+-      # on systems that don't support them.
+-      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+-      enable_shared=no
+-      ;;
+-
+-    *nto* | *qnx*)
+-      # QNX uses GNU C++, but need to define -shared option too, otherwise
+-      # it will coredump.
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+-      ;;
+-
+-    sysv4*MP*)
+-      if test -d /usr/nec; then
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic
+-      fi
+-      ;;
+-
+-    *)
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-      ;;
+-    esac
+-
+-    case $cc_basename in
+-    nvcc*) # Cuda Compiler Driver 2.2
+-      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker '
+-      if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
+-        _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)"
+-      fi
+-      ;;
+-    esac
+-  else
+-    # PORTME Check for flag to pass linker flags through the system compiler.
+-    case $host_os in
+-    aix*)
+-      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-      if test ia64 = "$host_cpu"; then
+-	# AIX 5 now supports IA64 processor
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-      else
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp'
+-      fi
+-      ;;
+-
+-    darwin* | rhapsody*)
+-      # PIC is the default on this platform
+-      # Common symbols not allowed in MH_DYLIB files
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common'
+-      case $cc_basename in
+-      nagfor*)
+-        # NAG Fortran compiler
+-        _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,'
+-        _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+-        _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-        ;;
+-      esac
+-      ;;
+-
+-    mingw* | cygwin* | pw32* | os2* | cegcc*)
+-      # This hack is so that the source file can tell whether it is being
+-      # built for inclusion in a dll (and should export symbols for example).
+-      m4_if([$1], [GCJ], [],
+-	[_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT'])
+-      case $host_os in
+-      os2*)
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-static'
+-	;;
+-      esac
+-      ;;
+-
+-    hpux9* | hpux10* | hpux11*)
+-      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-      # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but
+-      # not for PA HP-UX.
+-      case $host_cpu in
+-      hppa*64*|ia64*)
+-	# +Z the default
+-	;;
+-      *)
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z'
+-	;;
+-      esac
+-      # Is there a better lt_prog_compiler_static that works with the bundled CC?
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)='$wl-a ${wl}archive'
+-      ;;
+-
+-    irix5* | irix6* | nonstopux*)
+-      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-      # PIC (with -KPIC) is the default.
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+-      ;;
+-
+-    linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+-      case $cc_basename in
+-      # old Intel for x86_64, which still supported -KPIC.
+-      ecc*)
+-	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+-        ;;
+-      # icc used to be incompatible with GCC.
+-      # ICC 10 doesn't accept -KPIC any more.
+-      icc* | ifort*)
+-	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+-        ;;
+-      # Lahey Fortran 8.1.
+-      lf95*)
+-	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared'
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='--static'
+-	;;
+-      nagfor*)
+-	# NAG Fortran compiler
+-	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,'
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-	;;
+-      tcc*)
+-	# Fabrice Bellard et al's Tiny C Compiler
+-	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+-	;;
+-      pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
+-        # Portland Group compilers (*not* the Pentium gcc compiler,
+-	# which looks to be a dead project)
+-	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-        ;;
+-      ccc*)
+-        _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-        # All Alpha code is PIC.
+-        _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+-        ;;
+-      xl* | bgxl* | bgf* | mpixl*)
+-	# IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene
+-	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic'
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink'
+-	;;
+-      *)
+-	case `$CC -V 2>&1 | $SED 5q` in
+-	*Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*)
+-	  # Sun Fortran 8.3 passes all unrecognized flags to the linker
+-	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-	  _LT_TAGVAR(lt_prog_compiler_wl, $1)=''
+-	  ;;
+-	*Sun\ F* | *Sun*Fortran*)
+-	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+-	  ;;
+-	*Sun\ C*)
+-	  # Sun C 5.9
+-	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	  ;;
+-        *Intel*\ [[CF]]*Compiler*)
+-	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC'
+-	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-static'
+-	  ;;
+-	*Portland\ Group*)
+-	  _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-	  _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic'
+-	  _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-	  ;;
+-	esac
+-	;;
+-      esac
+-      ;;
+-
+-    newsos6)
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-      ;;
+-
+-    *nto* | *qnx*)
+-      # QNX uses GNU C++, but need to define -shared option too, otherwise
+-      # it will coredump.
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared'
+-      ;;
+-
+-    osf3* | osf4* | osf5*)
+-      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-      # All OSF/1 code is PIC.
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+-      ;;
+-
+-    rdos*)
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared'
+-      ;;
+-
+-    solaris*)
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-      case $cc_basename in
+-      f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
+-	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';;
+-      *)
+-	_LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';;
+-      esac
+-      ;;
+-
+-    sunos4*)
+-      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld '
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC'
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-      ;;
+-
+-    sysv4 | sysv4.2uw2* | sysv4.3*)
+-      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-      ;;
+-
+-    sysv4*MP*)
+-      if test -d /usr/nec; then
+-	_LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic'
+-	_LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-      fi
+-      ;;
+-
+-    sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*)
+-      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC'
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-      ;;
+-
+-    unicos*)
+-      _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,'
+-      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+-      ;;
+-
+-    uts4*)
+-      _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic'
+-      _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic'
+-      ;;
+-
+-    *)
+-      _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no
+-      ;;
+-    esac
+-  fi
+-])
+-case $host_os in
+-  # For platforms that do not support PIC, -DPIC is meaningless:
+-  *djgpp*)
+-    _LT_TAGVAR(lt_prog_compiler_pic, $1)=
+-    ;;
+-  *)
+-    _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])"
+-    ;;
+-esac
+-
+-AC_CACHE_CHECK([for $compiler option to produce PIC],
+-  [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)],
+-  [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)])
+-_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)
+-
+-#
+-# Check to make sure the PIC flag actually works.
+-#
+-if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then
+-  _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works],
+-    [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)],
+-    [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [],
+-    [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in
+-     "" | " "*) ;;
+-     *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;;
+-     esac],
+-    [_LT_TAGVAR(lt_prog_compiler_pic, $1)=
+-     _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no])
+-fi
+-_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1],
+-	[Additional compiler flags for building library objects])
+-
+-_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1],
+-	[How to pass a linker flag through the compiler])
+-#
+-# Check to make sure the static flag actually works.
+-#
+-wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\"
+-_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works],
+-  _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1),
+-  $lt_tmp_static_flag,
+-  [],
+-  [_LT_TAGVAR(lt_prog_compiler_static, $1)=])
+-_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1],
+-	[Compiler flag to prevent dynamic linking])
+-])# _LT_COMPILER_PIC
+-
+-
+-# _LT_LINKER_SHLIBS([TAGNAME])
+-# ----------------------------
+-# See if the linker supports building shared libraries.
+-m4_defun([_LT_LINKER_SHLIBS],
+-[AC_REQUIRE([LT_PATH_LD])dnl
+-AC_REQUIRE([LT_PATH_NM])dnl
+-m4_require([_LT_PATH_MANIFEST_TOOL])dnl
+-m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+-m4_require([_LT_DECL_EGREP])dnl
+-m4_require([_LT_DECL_SED])dnl
+-m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl
+-m4_require([_LT_TAG_COMPILER])dnl
+-AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
+-m4_if([$1], [CXX], [
+-  _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+-  _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
+-  case $host_os in
+-  aix[[4-9]]*)
+-    # If we're using GNU nm, then we don't want the "-C" option.
+-    # -C means demangle to GNU nm, but means don't demangle to AIX nm.
+-    # Without the "-l" option, or with the "-B" option, AIX nm treats
+-    # weak defined symbols like other global defined symbols, whereas
+-    # GNU nm marks them as "W".
+-    # While the 'weak' keyword is ignored in the Export File, we need
+-    # it in the Import File for the 'aix-soname' feature, so we have
+-    # to replace the "-B" option with "-P" for AIX nm.
+-    if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+-      _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
+-    else
+-      _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
+-    fi
+-    ;;
+-  pw32*)
+-    _LT_TAGVAR(export_symbols_cmds, $1)=$ltdll_cmds
+-    ;;
+-  cygwin* | mingw* | cegcc*)
+-    case $cc_basename in
+-    cl* | icl*)
+-      _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+-      ;;
+-    *)
+-      _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
+-      _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
+-      ;;
+-    esac
+-    ;;
+-  *)
+-    _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+-    ;;
+-  esac
+-], [
+-  runpath_var=
+-  _LT_TAGVAR(allow_undefined_flag, $1)=
+-  _LT_TAGVAR(always_export_symbols, $1)=no
+-  _LT_TAGVAR(archive_cmds, $1)=
+-  _LT_TAGVAR(archive_expsym_cmds, $1)=
+-  _LT_TAGVAR(compiler_needs_object, $1)=no
+-  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+-  _LT_TAGVAR(export_dynamic_flag_spec, $1)=
+-  _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+-  _LT_TAGVAR(hardcode_automatic, $1)=no
+-  _LT_TAGVAR(hardcode_direct, $1)=no
+-  _LT_TAGVAR(hardcode_direct_absolute, $1)=no
+-  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+-  _LT_TAGVAR(hardcode_libdir_separator, $1)=
+-  _LT_TAGVAR(hardcode_minus_L, $1)=no
+-  _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+-  _LT_TAGVAR(inherit_rpath, $1)=no
+-  _LT_TAGVAR(link_all_deplibs, $1)=unknown
+-  _LT_TAGVAR(module_cmds, $1)=
+-  _LT_TAGVAR(module_expsym_cmds, $1)=
+-  _LT_TAGVAR(old_archive_from_new_cmds, $1)=
+-  _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)=
+-  _LT_TAGVAR(thread_safe_flag_spec, $1)=
+-  _LT_TAGVAR(whole_archive_flag_spec, $1)=
+-  # include_expsyms should be a list of space-separated symbols to be *always*
+-  # included in the symbol list
+-  _LT_TAGVAR(include_expsyms, $1)=
+-  # exclude_expsyms can be an extended regexp of symbols to exclude
+-  # it will be wrapped by ' (' and ')$', so one must not match beginning or
+-  # end of line.  Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc',
+-  # as well as any symbol that contains 'd'.
+-  _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*']
+-  # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+-  # platforms (ab)use it in PIC code, but their linkers get confused if
+-  # the symbol is explicitly referenced.  Since portable code cannot
+-  # rely on this symbol name, it's probably fine to never include it in
+-  # preloaded symbol tables.
+-  # Exclude shared library initialization/finalization symbols.
+-dnl Note also adjust exclude_expsyms for C++ above.
+-  extract_expsyms_cmds=
+-
+-  case $host_os in
+-  cygwin* | mingw* | pw32* | cegcc*)
+-    # FIXME: the MSVC++ and ICC port hasn't been tested in a loooong time
+-    # When not using gcc, we currently assume that we are using
+-    # Microsoft Visual C++ or Intel C++ Compiler.
+-    if test yes != "$GCC"; then
+-      with_gnu_ld=no
+-    fi
+-    ;;
+-  interix*)
+-    # we just hope/assume this is gcc and not c89 (= MSVC++ or ICC)
+-    with_gnu_ld=yes
+-    ;;
+-  openbsd* | bitrig*)
+-    with_gnu_ld=no
+-    ;;
+-  esac
+-
+-  _LT_TAGVAR(ld_shlibs, $1)=yes
+-
+-  # On some targets, GNU ld is compatible enough with the native linker
+-  # that we're better off using the native interface for both.
+-  lt_use_gnu_ld_interface=no
+-  if test yes = "$with_gnu_ld"; then
+-    case $host_os in
+-      aix*)
+-	# The AIX port of GNU ld has always aspired to compatibility
+-	# with the native linker.  However, as the warning in the GNU ld
+-	# block says, versions before 2.19.5* couldn't really create working
+-	# shared libraries, regardless of the interface used.
+-	case `$LD -v 2>&1` in
+-	  *\ \(GNU\ Binutils\)\ 2.19.5*) ;;
+-	  *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;;
+-	  *\ \(GNU\ Binutils\)\ [[3-9]]*) ;;
+-	  *)
+-	    lt_use_gnu_ld_interface=yes
+-	    ;;
+-	esac
+-	;;
+-      *)
+-	lt_use_gnu_ld_interface=yes
+-	;;
+-    esac
+-  fi
+-
+-  if test yes = "$lt_use_gnu_ld_interface"; then
+-    # If archive_cmds runs LD, not CC, wlarc should be empty
+-    wlarc='$wl'
+-
+-    # Set some defaults for GNU ld with shared library support. These
+-    # are reset later if shared libraries are not supported. Putting them
+-    # here allows them to be overridden if necessary.
+-    runpath_var=LD_RUN_PATH
+-    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+-    # ancient GNU ld didn't support --whole-archive et. al.
+-    if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
+-      _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
+-    else
+-      _LT_TAGVAR(whole_archive_flag_spec, $1)=
+-    fi
+-    supports_anon_versioning=no
+-    case `$LD -v | $SED -e 's/([[^)]]\+)\s\+//' 2>&1` in
+-      *GNU\ gold*) supports_anon_versioning=yes ;;
+-      *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11
+-      *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+-      *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ...
+-      *\ 2.11.*) ;; # other 2.11 versions
+-      *) supports_anon_versioning=yes ;;
+-    esac
+-
+-    # See if GNU ld supports shared libraries.
+-    case $host_os in
+-    aix[[3-9]]*)
+-      # On AIX/PPC, the GNU linker is very broken
+-      if test ia64 != "$host_cpu"; then
+-	_LT_TAGVAR(ld_shlibs, $1)=no
+-	cat <<_LT_EOF 1>&2
+-
+-*** Warning: the GNU linker, at least up to release 2.19, is reported
+-*** to be unable to reliably create shared libraries on AIX.
+-*** Therefore, libtool is disabling shared libraries support.  If you
+-*** really care for shared libraries, you may want to install binutils
+-*** 2.20 or above, or modify your PATH so that a non-GNU linker is found.
+-*** You will then need to restart the configuration process.
+-
+-_LT_EOF
+-      fi
+-      ;;
+-
+-    amigaos*)
+-      case $host_cpu in
+-      powerpc)
+-            # see comment about AmigaOS4 .so support
+-            _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-            _LT_TAGVAR(archive_expsym_cmds, $1)=''
+-        ;;
+-      m68k)
+-            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+-            _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-            _LT_TAGVAR(hardcode_minus_L, $1)=yes
+-        ;;
+-      esac
+-      ;;
+-
+-    beos*)
+-      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+-	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+-	# Joseph Beckenbach  says some releases of gcc
+-	# support --undefined.  This deserves some investigation.  FIXME
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-      else
+-	_LT_TAGVAR(ld_shlibs, $1)=no
+-      fi
+-      ;;
+-
+-    cygwin* | mingw* | pw32* | cegcc*)
+-      # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
+-      # as there is no search path for DLLs.
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols'
+-      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+-      _LT_TAGVAR(always_export_symbols, $1)=no
+-      _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+-      _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols'
+-      _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname']
+-
+-      if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+-        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+-	# If the export-symbols file already is a .def file, use it as
+-	# is; otherwise, prepend EXPORTS...
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
+-          cp $export_symbols $output_objdir/$soname.def;
+-        else
+-          echo EXPORTS > $output_objdir/$soname.def;
+-          cat $export_symbols >> $output_objdir/$soname.def;
+-        fi~
+-        $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+-      else
+-	_LT_TAGVAR(ld_shlibs, $1)=no
+-      fi
+-      ;;
+-
+-    haiku*)
+-      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-      _LT_TAGVAR(link_all_deplibs, $1)=yes
+-      ;;
+-
+-    os2*)
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+-      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+-      shrext_cmds=.dll
+-      _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	$ECHO EXPORTS >> $output_objdir/$libname.def~
+-	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+-	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	emximp -o $lib $output_objdir/$libname.def'
+-      _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	$ECHO EXPORTS >> $output_objdir/$libname.def~
+-	prefix_cmds="$SED"~
+-	if test EXPORTS = "`$SED 1q $export_symbols`"; then
+-	  prefix_cmds="$prefix_cmds -e 1d";
+-	fi~
+-	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+-	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+-	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	emximp -o $lib $output_objdir/$libname.def'
+-      _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+-      _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+-      _LT_TAGVAR(file_list_spec, $1)='@'
+-      ;;
+-
+-    interix[[3-9]]*)
+-      _LT_TAGVAR(hardcode_direct, $1)=no
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+-      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+-      # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+-      # Instead, shared libraries are loaded at an image base (0x10000000 by
+-      # default) and relocated if they conflict, which is a slow very memory
+-      # consuming and fragmenting process.  To avoid this, we pick a random,
+-      # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+-      # time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+-      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+-      _LT_TAGVAR(archive_expsym_cmds, $1)='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+-      ;;
+-
+-    gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
+-      tmp_diet=no
+-      if test linux-dietlibc = "$host_os"; then
+-	case $cc_basename in
+-	  diet\ *) tmp_diet=yes;;	# linux-dietlibc with static linking (!diet-dyn)
+-	esac
+-      fi
+-      if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
+-	 && test no = "$tmp_diet"
+-      then
+-	tmp_addflag=' $pic_flag'
+-	tmp_sharedflag='-shared'
+-	case $cc_basename,$host_cpu in
+-        pgcc*)				# Portland Group C compiler
+-	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+-	  tmp_addflag=' $pic_flag'
+-	  ;;
+-	pgf77* | pgf90* | pgf95* | pgfortran*)
+-					# Portland Group f77 and f90 compilers
+-	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+-	  tmp_addflag=' $pic_flag -Mnomain' ;;
+-	ecc*,ia64* | icc*,ia64*)	# Intel C compiler on ia64
+-	  tmp_addflag=' -i_dynamic' ;;
+-	efc*,ia64* | ifort*,ia64*)	# Intel Fortran compiler on ia64
+-	  tmp_addflag=' -i_dynamic -nofor_main' ;;
+-	ifc* | ifort*)			# Intel Fortran compiler
+-	  tmp_addflag=' -nofor_main' ;;
+-	lf95*)				# Lahey Fortran 8.1
+-	  _LT_TAGVAR(whole_archive_flag_spec, $1)=
+-	  tmp_sharedflag='--shared' ;;
+-        nagfor*)                        # NAGFOR 5.3
+-          tmp_sharedflag='-Wl,-shared' ;;
+-	xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+-	  tmp_sharedflag='-qmkshrobj'
+-	  tmp_addflag= ;;
+-	nvcc*)	# Cuda Compiler Driver 2.2
+-	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+-	  _LT_TAGVAR(compiler_needs_object, $1)=yes
+-	  ;;
+-	esac
+-	case `$CC -V 2>&1 | $SED 5q` in
+-	*Sun\ C*)			# Sun C 5.9
+-	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+-	  _LT_TAGVAR(compiler_needs_object, $1)=yes
+-	  tmp_sharedflag='-G' ;;
+-	*Sun\ F*)			# Sun Fortran 8.3
+-	  tmp_sharedflag='-G' ;;
+-	esac
+-	_LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-
+-        if test yes = "$supports_anon_versioning"; then
+-          _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+-            cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+-            echo "local: *; };" >> $output_objdir/$libname.ver~
+-            $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
+-        fi
+-
+-	case $cc_basename in
+-	tcc*)
+-	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='-rdynamic'
+-	  ;;
+-	xlf* | bgf* | bgxlf* | mpixlf*)
+-	  # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
+-	  _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive'
+-	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-	  _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
+-	  if test yes = "$supports_anon_versioning"; then
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+-              cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+-              echo "local: *; };" >> $output_objdir/$libname.ver~
+-              $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+-	  fi
+-	  ;;
+-	esac
+-      else
+-        _LT_TAGVAR(ld_shlibs, $1)=no
+-      fi
+-      ;;
+-
+-    netbsd*)
+-      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+-	_LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+-	wlarc=
+-      else
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+-      fi
+-      ;;
+-
+-    solaris*)
+-      if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then
+-	_LT_TAGVAR(ld_shlibs, $1)=no
+-	cat <<_LT_EOF 1>&2
+-
+-*** Warning: The releases 2.8.* of the GNU linker cannot reliably
+-*** create shared libraries on Solaris systems.  Therefore, libtool
+-*** is disabling shared libraries support.  We urge you to upgrade GNU
+-*** binutils to release 2.9.1 or newer.  Another option is to modify
+-*** your PATH or compiler configuration so that the native linker is
+-*** used, and then restart.
+-
+-_LT_EOF
+-      elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+-      else
+-	_LT_TAGVAR(ld_shlibs, $1)=no
+-      fi
+-      ;;
+-
+-    sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*)
+-      case `$LD -v 2>&1` in
+-        *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*)
+-	_LT_TAGVAR(ld_shlibs, $1)=no
+-	cat <<_LT_EOF 1>&2
+-
+-*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot
+-*** reliably create shared libraries on SCO systems.  Therefore, libtool
+-*** is disabling shared libraries support.  We urge you to upgrade GNU
+-*** binutils to release 2.16.91.0.3 or newer.  Another option is to modify
+-*** your PATH or compiler configuration so that the native linker is
+-*** used, and then restart.
+-
+-_LT_EOF
+-	;;
+-	*)
+-	  # For security reasons, it is highly recommended that you always
+-	  # use absolute paths for naming shared libraries, and exclude the
+-	  # DT_RUNPATH tag from executables and libraries.  But doing so
+-	  # requires that you compile everything twice, which is a pain.
+-	  if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+-	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+-	  else
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	  fi
+-	;;
+-      esac
+-      ;;
+-
+-    sunos4*)
+-      _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+-      wlarc=
+-      _LT_TAGVAR(hardcode_direct, $1)=yes
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      ;;
+-
+-    *)
+-      if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+-      else
+-	_LT_TAGVAR(ld_shlibs, $1)=no
+-      fi
+-      ;;
+-    esac
+-
+-    if test no = "$_LT_TAGVAR(ld_shlibs, $1)"; then
+-      runpath_var=
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+-      _LT_TAGVAR(export_dynamic_flag_spec, $1)=
+-      _LT_TAGVAR(whole_archive_flag_spec, $1)=
+-    fi
+-  else
+-    # PORTME fill in a description of your system's linker (not GNU ld)
+-    case $host_os in
+-    aix3*)
+-      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+-      _LT_TAGVAR(always_export_symbols, $1)=yes
+-      _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname'
+-      # Note: this linker hardcodes the directories in LIBPATH if there
+-      # are no directories specified by -L.
+-      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+-      if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then
+-	# Neither direct hardcoding nor static linking is supported with a
+-	# broken collect2.
+-	_LT_TAGVAR(hardcode_direct, $1)=unsupported
+-      fi
+-      ;;
+-
+-    aix[[4-9]]*)
+-      if test ia64 = "$host_cpu"; then
+-	# On IA64, the linker does run time linking by default, so we don't
+-	# have to do anything special.
+-	aix_use_runtimelinking=no
+-	exp_sym_flag='-Bexport'
+-	no_entry_flag=
+-      else
+-	# If we're using GNU nm, then we don't want the "-C" option.
+-	# -C means demangle to GNU nm, but means don't demangle to AIX nm.
+-	# Without the "-l" option, or with the "-B" option, AIX nm treats
+-	# weak defined symbols like other global defined symbols, whereas
+-	# GNU nm marks them as "W".
+-	# While the 'weak' keyword is ignored in the Export File, we need
+-	# it in the Import File for the 'aix-soname' feature, so we have
+-	# to replace the "-B" option with "-P" for AIX nm.
+-	if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+-	  _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
+-	else
+-	  _LT_TAGVAR(export_symbols_cmds, $1)='`func_echo_all $NM | $SED -e '\''s/B\([[^B]]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && ([substr](\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
+-	fi
+-	aix_use_runtimelinking=no
+-
+-	# Test if we are trying to use run time linking or normal
+-	# AIX style linking. If -brtl is somewhere in LDFLAGS, we
+-	# have runtime linking enabled, and use it for executables.
+-	# For shared libraries, we enable/disable runtime linking
+-	# depending on the kind of the shared library created -
+-	# when "with_aix_soname,aix_use_runtimelinking" is:
+-	# "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
+-	# "aix,yes"  lib.so          shared, rtl:yes, for executables
+-	#            lib.a           static archive
+-	# "both,no"  lib.so.V(shr.o) shared, rtl:yes
+-	#            lib.a(lib.so.V) shared, rtl:no,  for executables
+-	# "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
+-	#            lib.a(lib.so.V) shared, rtl:no
+-	# "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
+-	#            lib.a           static archive
+-	case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
+-	  for ld_flag in $LDFLAGS; do
+-	  if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then
+-	    aix_use_runtimelinking=yes
+-	    break
+-	  fi
+-	  done
+-	  if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
+-	    # With aix-soname=svr4, we create the lib.so.V shared archives only,
+-	    # so we don't have lib.a shared libs to link our executables.
+-	    # We have to force runtime linking in this case.
+-	    aix_use_runtimelinking=yes
+-	    LDFLAGS="$LDFLAGS -Wl,-brtl"
+-	  fi
+-	  ;;
+-	esac
+-
+-	exp_sym_flag='-bexport'
+-	no_entry_flag='-bnoentry'
+-      fi
+-
+-      # When large executables or shared objects are built, AIX ld can
+-      # have problems creating the table of contents.  If linking a library
+-      # or program results in "error TOC overflow" add -mminimal-toc to
+-      # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
+-      # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+-
+-      _LT_TAGVAR(archive_cmds, $1)=''
+-      _LT_TAGVAR(hardcode_direct, $1)=yes
+-      _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+-      _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+-      _LT_TAGVAR(link_all_deplibs, $1)=yes
+-      _LT_TAGVAR(file_list_spec, $1)='$wl-f,'
+-      case $with_aix_soname,$aix_use_runtimelinking in
+-      aix,*) ;; # traditional, no import file
+-      svr4,* | *,yes) # use import file
+-	# The Import File defines what to hardcode.
+-	_LT_TAGVAR(hardcode_direct, $1)=no
+-	_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+-	;;
+-      esac
+-
+-      if test yes = "$GCC"; then
+-	case $host_os in aix4.[[012]]|aix4.[[012]].*)
+-	# We only want to do this on AIX 4.2 and lower, the check
+-	# below for broken collect2 doesn't work under 4.3+
+-	  collect2name=`$CC -print-prog-name=collect2`
+-	  if test -f "$collect2name" &&
+-	   strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+-	  then
+-	  # We have reworked collect2
+-	  :
+-	  else
+-	  # We have old collect2
+-	  _LT_TAGVAR(hardcode_direct, $1)=unsupported
+-	  # It fails to find uninstalled libraries when the uninstalled
+-	  # path is not listed in the libpath.  Setting hardcode_minus_L
+-	  # to unsupported forces relinking
+-	  _LT_TAGVAR(hardcode_minus_L, $1)=yes
+-	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-	  _LT_TAGVAR(hardcode_libdir_separator, $1)=
+-	  fi
+-	  ;;
+-	esac
+-	shared_flag='-shared'
+-	if test yes = "$aix_use_runtimelinking"; then
+-	  shared_flag="$shared_flag "'$wl-G'
+-	fi
+-	# Need to ensure runtime linking is disabled for the traditional
+-	# shared library, or the linker may eventually find shared libraries
+-	# /with/ Import File - we do not want to mix them.
+-	shared_flag_aix='-shared'
+-	shared_flag_svr4='-shared $wl-G'
+-      else
+-	# not using gcc
+-	if test ia64 = "$host_cpu"; then
+-	# VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+-	# chokes on -Wl,-G. The following line is correct:
+-	  shared_flag='-G'
+-	else
+-	  if test yes = "$aix_use_runtimelinking"; then
+-	    shared_flag='$wl-G'
+-	  else
+-	    shared_flag='$wl-bM:SRE'
+-	  fi
+-	  shared_flag_aix='$wl-bM:SRE'
+-	  shared_flag_svr4='$wl-G'
+-	fi
+-      fi
+-
+-      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall'
+-      # It seems that -bexpall does not export symbols beginning with
+-      # underscore (_), so it is better to generate a list of symbols to export.
+-      _LT_TAGVAR(always_export_symbols, $1)=yes
+-      if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
+-	# Warning - without using the other runtime loading flags (-brtl),
+-	# -berok will link without error, but may produce a broken library.
+-	_LT_TAGVAR(allow_undefined_flag, $1)='-berok'
+-        # Determine the default libpath from the value encoded in an
+-        # empty executable.
+-        _LT_SYS_MODULE_PATH_AIX([$1])
+-        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
+-        _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
+-      else
+-	if test ia64 = "$host_cpu"; then
+-	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib'
+-	  _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
+-	  _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
+-	else
+-	 # Determine the default libpath from the value encoded in an
+-	 # empty executable.
+-	 _LT_SYS_MODULE_PATH_AIX([$1])
+-	 _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
+-	  # Warning - without using the other run time loading flags,
+-	  # -berok will link without error, but may produce a broken library.
+-	  _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok'
+-	  _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok'
+-	  if test yes = "$with_gnu_ld"; then
+-	    # We only use this code for GNU lds that support --whole-archive.
+-	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive'
+-	  else
+-	    # Exported symbols can be pulled into shared objects from archives
+-	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+-	  fi
+-	  _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+-	  _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
+-	  # -brtl affects multiple linker settings, -berok does not and is overridden later
+-	  compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`'
+-	  if test svr4 != "$with_aix_soname"; then
+-	    # This is similar to how AIX traditionally builds its shared libraries.
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
+-	  fi
+-	  if test aix != "$with_aix_soname"; then
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
+-	  else
+-	    # used by -dlpreopen to get the symbols
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
+-	  fi
+-	  _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d'
+-	fi
+-      fi
+-      ;;
+-
+-    amigaos*)
+-      case $host_cpu in
+-      powerpc)
+-            # see comment about AmigaOS4 .so support
+-            _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-            _LT_TAGVAR(archive_expsym_cmds, $1)=''
+-        ;;
+-      m68k)
+-            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)'
+-            _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-            _LT_TAGVAR(hardcode_minus_L, $1)=yes
+-        ;;
+-      esac
+-      ;;
+-
+-    bsdi[[45]]*)
+-      _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic
+-      ;;
+-
+-    cygwin* | mingw* | pw32* | cegcc*)
+-      # When not using gcc, we currently assume that we are using
+-      # Microsoft Visual C++ or Intel C++ Compiler.
+-      # hardcode_libdir_flag_spec is actually meaningless, as there is
+-      # no search path for DLLs.
+-      case $cc_basename in
+-      cl* | icl*)
+-	# Native MSVC or ICC
+-	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+-	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+-	_LT_TAGVAR(always_export_symbols, $1)=yes
+-	_LT_TAGVAR(file_list_spec, $1)='@'
+-	# Tell ltmain to make .lib files, not .a files.
+-	libext=lib
+-	# Tell ltmain to make .dll files, not .so files.
+-	shrext_cmds=.dll
+-	# FIXME: Setting linknames here is a bad hack.
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
+-            cp "$export_symbols" "$output_objdir/$soname.def";
+-            echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
+-          else
+-            $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
+-          fi~
+-          $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+-          linknames='
+-	# The linker will not automatically build a static lib if we build a DLL.
+-	# _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+-	_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+-	_LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+-	_LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols'
+-	# Don't use ranlib
+-	_LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
+-	_LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
+-          lt_tool_outputfile="@TOOL_OUTPUT@"~
+-          case $lt_outputfile in
+-            *.exe|*.EXE) ;;
+-            *)
+-              lt_outputfile=$lt_outputfile.exe
+-              lt_tool_outputfile=$lt_tool_outputfile.exe
+-              ;;
+-          esac~
+-          if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
+-            $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+-            $RM "$lt_outputfile.manifest";
+-          fi'
+-	;;
+-      *)
+-	# Assume MSVC and ICC wrapper
+-	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+-	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+-	# Tell ltmain to make .lib files, not .a files.
+-	libext=lib
+-	# Tell ltmain to make .dll files, not .so files.
+-	shrext_cmds=.dll
+-	# FIXME: Setting linknames here is a bad hack.
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
+-	# The linker will automatically build a .lib file if we build a DLL.
+-	_LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+-	# FIXME: Should let the user specify the lib program.
+-	_LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs'
+-	_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+-	;;
+-      esac
+-      ;;
+-
+-    darwin* | rhapsody*)
+-      _LT_DARWIN_LINKER_FEATURES($1)
+-      ;;
+-
+-    dgux*)
+-      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      ;;
+-
+-    # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor
+-    # support.  Future versions do this automatically, but an explicit c++rt0.o
+-    # does not break anything, and helps significantly (at the cost of a little
+-    # extra space).
+-    freebsd2.2*)
+-      _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o'
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+-      _LT_TAGVAR(hardcode_direct, $1)=yes
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      ;;
+-
+-    # Unfortunately, older versions of FreeBSD 2 do not have this feature.
+-    freebsd2.*)
+-      _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
+-      _LT_TAGVAR(hardcode_direct, $1)=yes
+-      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      ;;
+-
+-    # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+-    freebsd* | dragonfly* | midnightbsd*)
+-      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+-      _LT_TAGVAR(hardcode_direct, $1)=yes
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      ;;
+-
+-    hpux9*)
+-      if test yes = "$GCC"; then
+-	_LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+-      else
+-	_LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+-      fi
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
+-      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-      _LT_TAGVAR(hardcode_direct, $1)=yes
+-
+-      # hardcode_minus_L: Not really in the search PATH,
+-      # but as the default location of the library.
+-      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+-      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+-      ;;
+-
+-    hpux10*)
+-      if test yes,no = "$GCC,$with_gnu_ld"; then
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+-      else
+-	_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+-      fi
+-      if test no = "$with_gnu_ld"; then
+-	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
+-	_LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-	_LT_TAGVAR(hardcode_direct, $1)=yes
+-	_LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+-	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+-	# hardcode_minus_L: Not really in the search PATH,
+-	# but as the default location of the library.
+-	_LT_TAGVAR(hardcode_minus_L, $1)=yes
+-      fi
+-      ;;
+-
+-    hpux11*)
+-      if test yes,no = "$GCC,$with_gnu_ld"; then
+-	case $host_cpu in
+-	hppa*64*)
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	  ;;
+-	ia64*)
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+-	  ;;
+-	*)
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+-	  ;;
+-	esac
+-      else
+-	case $host_cpu in
+-	hppa*64*)
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	  ;;
+-	ia64*)
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+-	  ;;
+-	*)
+-	m4_if($1, [], [
+-	  # Older versions of the 11.00 compiler do not understand -b yet
+-	  # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
+-	  _LT_LINKER_OPTION([if $CC understands -b],
+-	    _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b],
+-	    [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'],
+-	    [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])],
+-	  [_LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'])
+-	  ;;
+-	esac
+-      fi
+-      if test no = "$with_gnu_ld"; then
+-	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
+-	_LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-
+-	case $host_cpu in
+-	hppa*64*|ia64*)
+-	  _LT_TAGVAR(hardcode_direct, $1)=no
+-	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-	  ;;
+-	*)
+-	  _LT_TAGVAR(hardcode_direct, $1)=yes
+-	  _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+-	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+-
+-	  # hardcode_minus_L: Not really in the search PATH,
+-	  # but as the default location of the library.
+-	  _LT_TAGVAR(hardcode_minus_L, $1)=yes
+-	  ;;
+-	esac
+-      fi
+-      ;;
+-
+-    irix5* | irix6* | nonstopux*)
+-      if test yes = "$GCC"; then
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+-	# Try to use the -exported_symbol ld option, if it does not
+-	# work, assume that -exports_file does not work either and
+-	# implicitly export all symbols.
+-	# This should be the same for all languages, so no per-tag cache variable.
+-	AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol],
+-	  [lt_cv_irix_exported_symbol],
+-	  [save_LDFLAGS=$LDFLAGS
+-	   LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null"
+-	   AC_LINK_IFELSE(
+-	     [AC_LANG_SOURCE(
+-	        [AC_LANG_CASE([C], [[int foo (void) { return 0; }]],
+-			      [C++], [[int foo (void) { return 0; }]],
+-			      [Fortran 77], [[
+-      subroutine foo
+-      end]],
+-			      [Fortran], [[
+-      subroutine foo
+-      end]])])],
+-	      [lt_cv_irix_exported_symbol=yes],
+-	      [lt_cv_irix_exported_symbol=no])
+-           LDFLAGS=$save_LDFLAGS])
+-	if test yes = "$lt_cv_irix_exported_symbol"; then
+-          _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib'
+-	fi
+-      else
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib'
+-      fi
+-      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-      _LT_TAGVAR(inherit_rpath, $1)=yes
+-      _LT_TAGVAR(link_all_deplibs, $1)=yes
+-      ;;
+-
+-    linux*)
+-      case $cc_basename in
+-      tcc*)
+-	# Fabrice Bellard et al's Tiny C Compiler
+-	_LT_TAGVAR(ld_shlibs, $1)=yes
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+-	;;
+-      esac
+-      ;;
+-
+-    netbsd*)
+-      if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+-	_LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
+-      else
+-	_LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags'      # ELF
+-      fi
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+-      _LT_TAGVAR(hardcode_direct, $1)=yes
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      ;;
+-
+-    newsos6)
+-      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+-      _LT_TAGVAR(hardcode_direct, $1)=yes
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      ;;
+-
+-    *nto* | *qnx*)
+-      ;;
+-
+-    openbsd* | bitrig*)
+-      if test -f /usr/libexec/ld.so; then
+-	_LT_TAGVAR(hardcode_direct, $1)=yes
+-	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-	_LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+-	if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+-	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols'
+-	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+-	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+-	else
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+-	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+-	fi
+-      else
+-	_LT_TAGVAR(ld_shlibs, $1)=no
+-      fi
+-      ;;
+-
+-    os2*)
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+-      _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+-      shrext_cmds=.dll
+-      _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	$ECHO EXPORTS >> $output_objdir/$libname.def~
+-	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+-	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	emximp -o $lib $output_objdir/$libname.def'
+-      _LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	$ECHO EXPORTS >> $output_objdir/$libname.def~
+-	prefix_cmds="$SED"~
+-	if test EXPORTS = "`$SED 1q $export_symbols`"; then
+-	  prefix_cmds="$prefix_cmds -e 1d";
+-	fi~
+-	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+-	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+-	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	emximp -o $lib $output_objdir/$libname.def'
+-      _LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+-      _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+-      _LT_TAGVAR(file_list_spec, $1)='@'
+-      ;;
+-
+-    osf3*)
+-      if test yes = "$GCC"; then
+-	_LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+-      else
+-	_LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+-      fi
+-      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-      ;;
+-
+-    osf4* | osf5*)	# as osf3* with the addition of -msym flag
+-      if test yes = "$GCC"; then
+-	_LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+-	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-      else
+-	_LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
+-          $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp'
+-
+-	# Both c and cxx compiler support -rpath directly
+-	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+-      fi
+-      _LT_TAGVAR(archive_cmds_need_lc, $1)='no'
+-      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-      ;;
+-
+-    solaris*)
+-      _LT_TAGVAR(no_undefined_flag, $1)=' -z defs'
+-      if test yes = "$GCC"; then
+-	wlarc='$wl'
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-          $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+-      else
+-	case `$CC -V 2>&1` in
+-	*"Compilers 5.0"*)
+-	  wlarc=''
+-	  _LT_TAGVAR(archive_cmds, $1)='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags'
+-	  _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-            $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
+-	  ;;
+-	*)
+-	  wlarc='$wl'
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags'
+-	  _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-            $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+-	  ;;
+-	esac
+-      fi
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      case $host_os in
+-      solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+-      *)
+-	# The compiler driver will combine and reorder linker options,
+-	# but understands '-z linker_flag'.  GCC discards it without '$wl',
+-	# but is careful enough not to reorder.
+-	# Supported since Solaris 2.6 (maybe 2.5.1?)
+-	if test yes = "$GCC"; then
+-	  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
+-	else
+-	  _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
+-	fi
+-	;;
+-      esac
+-      _LT_TAGVAR(link_all_deplibs, $1)=yes
+-      ;;
+-
+-    sunos4*)
+-      if test sequent = "$host_vendor"; then
+-	# Use $CC to link under sequent, because it throws in some extra .o
+-	# files that make .init and .fini sections work.
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+-      else
+-	_LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+-      fi
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-      _LT_TAGVAR(hardcode_direct, $1)=yes
+-      _LT_TAGVAR(hardcode_minus_L, $1)=yes
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      ;;
+-
+-    sysv4)
+-      case $host_vendor in
+-	sni)
+-	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+-	  _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true???
+-	;;
+-	siemens)
+-	  ## LD is ld it makes a PLAMLIB
+-	  ## CC just makes a GrossModule.
+-	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags'
+-	  _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs'
+-	  _LT_TAGVAR(hardcode_direct, $1)=no
+-        ;;
+-	motorola)
+-	  _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+-	  _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie
+-	;;
+-      esac
+-      runpath_var='LD_RUN_PATH'
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      ;;
+-
+-    sysv4.3*)
+-      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport'
+-      ;;
+-
+-    sysv4*MP*)
+-      if test -d /usr/nec; then
+-	_LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+-	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-	runpath_var=LD_RUN_PATH
+-	hardcode_runpath_var=yes
+-	_LT_TAGVAR(ld_shlibs, $1)=yes
+-      fi
+-      ;;
+-
+-    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
+-      _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
+-      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      runpath_var='LD_RUN_PATH'
+-
+-      if test yes = "$GCC"; then
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-      else
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-      fi
+-      ;;
+-
+-    sysv5* | sco3.2v5* | sco5v6*)
+-      # Note: We CANNOT use -z defs as we might desire, because we do not
+-      # link with -lc, and that would cause any symbols used from libc to
+-      # always be unresolved, which means just about no library would
+-      # ever link correctly.  If we're not using GNU ld we use -z text
+-      # though, which does catch some bad symbols but isn't as heavy-handed
+-      # as -z defs.
+-      _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
+-      _LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs'
+-      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir'
+-      _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+-      _LT_TAGVAR(link_all_deplibs, $1)=yes
+-      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport'
+-      runpath_var='LD_RUN_PATH'
+-
+-      if test yes = "$GCC"; then
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-      else
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-      fi
+-      ;;
+-
+-    uts4*)
+-      _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+-      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      ;;
+-
+-    *)
+-      _LT_TAGVAR(ld_shlibs, $1)=no
+-      ;;
+-    esac
+-
+-    if test sni = "$host_vendor"; then
+-      case $host in
+-      sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+-	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Blargedynsym'
+-	;;
+-      esac
+-    fi
+-  fi
+-])
+-AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
+-test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no
+-
+-_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld
+-
+-_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl
+-_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl
+-_LT_DECL([], [extract_expsyms_cmds], [2],
+-    [The commands to extract the exported symbol list from a shared archive])
+-
+-#
+-# Do we need to explicitly link libc?
+-#
+-case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in
+-x|xyes)
+-  # Assume -lc should be added
+-  _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+-
+-  if test yes,yes = "$GCC,$enable_shared"; then
+-    case $_LT_TAGVAR(archive_cmds, $1) in
+-    *'~'*)
+-      # FIXME: we may have to deal with multi-command sequences.
+-      ;;
+-    '$CC '*)
+-      # Test whether the compiler implicitly links with -lc since on some
+-      # systems, -lgcc has to come before -lc. If gcc already passes -lc
+-      # to ld, don't add -lc before -lgcc.
+-      AC_CACHE_CHECK([whether -lc should be explicitly linked in],
+-	[lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1),
+-	[$RM conftest*
+-	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+-
+-	if AC_TRY_EVAL(ac_compile) 2>conftest.err; then
+-	  soname=conftest
+-	  lib=conftest
+-	  libobjs=conftest.$ac_objext
+-	  deplibs=
+-	  wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1)
+-	  pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1)
+-	  compiler_flags=-v
+-	  linker_flags=-v
+-	  verstring=
+-	  output_objdir=.
+-	  libname=conftest
+-	  lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1)
+-	  _LT_TAGVAR(allow_undefined_flag, $1)=
+-	  if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1)
+-	  then
+-	    lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-	  else
+-	    lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+-	  fi
+-	  _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag
+-	else
+-	  cat conftest.err 1>&5
+-	fi
+-	$RM conftest*
+-	])
+-      _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)
+-      ;;
+-    esac
+-  fi
+-  ;;
+-esac
+-
+-_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0],
+-    [Whether or not to add -lc for building shared libraries])
+-_LT_TAGDECL([allow_libtool_libs_with_static_runtimes],
+-    [enable_shared_with_static_runtimes], [0],
+-    [Whether or not to disallow shared libs when runtime libs are static])
+-_LT_TAGDECL([], [export_dynamic_flag_spec], [1],
+-    [Compiler flag to allow reflexive dlopens])
+-_LT_TAGDECL([], [whole_archive_flag_spec], [1],
+-    [Compiler flag to generate shared objects directly from archives])
+-_LT_TAGDECL([], [compiler_needs_object], [1],
+-    [Whether the compiler copes with passing no objects directly])
+-_LT_TAGDECL([], [old_archive_from_new_cmds], [2],
+-    [Create an old-style archive from a shared archive])
+-_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2],
+-    [Create a temporary old-style archive to link instead of a shared archive])
+-_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive])
+-_LT_TAGDECL([], [archive_expsym_cmds], [2])
+-_LT_TAGDECL([], [module_cmds], [2],
+-    [Commands used to build a loadable module if different from building
+-    a shared archive.])
+-_LT_TAGDECL([], [module_expsym_cmds], [2])
+-_LT_TAGDECL([], [with_gnu_ld], [1],
+-    [Whether we are building with GNU ld or not])
+-_LT_TAGDECL([], [allow_undefined_flag], [1],
+-    [Flag that allows shared libraries with undefined symbols to be built])
+-_LT_TAGDECL([], [no_undefined_flag], [1],
+-    [Flag that enforces no undefined symbols])
+-_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1],
+-    [Flag to hardcode $libdir into a binary during linking.
+-    This must work even if $libdir does not exist])
+-_LT_TAGDECL([], [hardcode_libdir_separator], [1],
+-    [Whether we need a single "-rpath" flag with a separated argument])
+-_LT_TAGDECL([], [hardcode_direct], [0],
+-    [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes
+-    DIR into the resulting binary])
+-_LT_TAGDECL([], [hardcode_direct_absolute], [0],
+-    [Set to "yes" if using DIR/libNAME$shared_ext during linking hardcodes
+-    DIR into the resulting binary and the resulting library dependency is
+-    "absolute", i.e impossible to change by setting $shlibpath_var if the
+-    library is relocated])
+-_LT_TAGDECL([], [hardcode_minus_L], [0],
+-    [Set to "yes" if using the -LDIR flag during linking hardcodes DIR
+-    into the resulting binary])
+-_LT_TAGDECL([], [hardcode_shlibpath_var], [0],
+-    [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR
+-    into the resulting binary])
+-_LT_TAGDECL([], [hardcode_automatic], [0],
+-    [Set to "yes" if building a shared library automatically hardcodes DIR
+-    into the library and all subsequent libraries and executables linked
+-    against it])
+-_LT_TAGDECL([], [inherit_rpath], [0],
+-    [Set to yes if linker adds runtime paths of dependent libraries
+-    to runtime path list])
+-_LT_TAGDECL([], [link_all_deplibs], [0],
+-    [Whether libtool must link a program against all its dependency libraries])
+-_LT_TAGDECL([], [always_export_symbols], [0],
+-    [Set to "yes" if exported symbols are required])
+-_LT_TAGDECL([], [export_symbols_cmds], [2],
+-    [The commands to list exported symbols])
+-_LT_TAGDECL([], [exclude_expsyms], [1],
+-    [Symbols that should not be listed in the preloaded symbols])
+-_LT_TAGDECL([], [include_expsyms], [1],
+-    [Symbols that must always be exported])
+-_LT_TAGDECL([], [prelink_cmds], [2],
+-    [Commands necessary for linking programs (against libraries) with templates])
+-_LT_TAGDECL([], [postlink_cmds], [2],
+-    [Commands necessary for finishing linking programs])
+-_LT_TAGDECL([], [file_list_spec], [1],
+-    [Specify filename containing input files])
+-dnl FIXME: Not yet implemented
+-dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1],
+-dnl    [Compiler flag to generate thread safe objects])
+-])# _LT_LINKER_SHLIBS
+-
+-
+-# _LT_LANG_C_CONFIG([TAG])
+-# ------------------------
+-# Ensure that the configuration variables for a C compiler are suitably
+-# defined.  These variables are subsequently used by _LT_CONFIG to write
+-# the compiler configuration to 'libtool'.
+-m4_defun([_LT_LANG_C_CONFIG],
+-[m4_require([_LT_DECL_EGREP])dnl
+-lt_save_CC=$CC
+-AC_LANG_PUSH(C)
+-
+-# Source file extension for C test sources.
+-ac_ext=c
+-
+-# Object file extension for compiled C test sources.
+-objext=o
+-_LT_TAGVAR(objext, $1)=$objext
+-
+-# Code to be used in simple compile tests
+-lt_simple_compile_test_code="int some_variable = 0;"
+-
+-# Code to be used in simple link tests
+-lt_simple_link_test_code='int main(){return(0);}'
+-
+-_LT_TAG_COMPILER
+-# Save the default compiler, since it gets overwritten when the other
+-# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP.
+-compiler_DEFAULT=$CC
+-
+-# save warnings/boilerplate of simple test code
+-_LT_COMPILER_BOILERPLATE
+-_LT_LINKER_BOILERPLATE
+-
+-if test -n "$compiler"; then
+-  _LT_COMPILER_NO_RTTI($1)
+-  _LT_COMPILER_PIC($1)
+-  _LT_COMPILER_C_O($1)
+-  _LT_COMPILER_FILE_LOCKS($1)
+-  _LT_LINKER_SHLIBS($1)
+-  _LT_SYS_DYNAMIC_LINKER($1)
+-  _LT_LINKER_HARDCODE_LIBPATH($1)
+-  LT_SYS_DLOPEN_SELF
+-  _LT_CMD_STRIPLIB
+-
+-  # Report what library types will actually be built
+-  AC_MSG_CHECKING([if libtool supports shared libraries])
+-  AC_MSG_RESULT([$can_build_shared])
+-
+-  AC_MSG_CHECKING([whether to build shared libraries])
+-  test no = "$can_build_shared" && enable_shared=no
+-
+-  # On AIX, shared libraries and static libraries use the same namespace, and
+-  # are all built from PIC.
+-  case $host_os in
+-  aix3*)
+-    test yes = "$enable_shared" && enable_static=no
+-    if test -n "$RANLIB"; then
+-      archive_cmds="$archive_cmds~\$RANLIB \$lib"
+-      postinstall_cmds='$RANLIB $lib'
+-    fi
+-    ;;
+-
+-  aix[[4-9]]*)
+-    if test ia64 != "$host_cpu"; then
+-      case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
+-      yes,aix,yes) ;;			# shared object as lib.so file only
+-      yes,svr4,*) ;;			# shared object as lib.so archive member only
+-      yes,*) enable_static=no ;;	# shared object in lib.a archive as well
+-      esac
+-    fi
+-    ;;
+-  esac
+-  AC_MSG_RESULT([$enable_shared])
+-
+-  AC_MSG_CHECKING([whether to build static libraries])
+-  # Make sure either enable_shared or enable_static is yes.
+-  test yes = "$enable_shared" || enable_static=yes
+-  AC_MSG_RESULT([$enable_static])
+-
+-  _LT_CONFIG($1)
+-fi
+-AC_LANG_POP
+-CC=$lt_save_CC
+-])# _LT_LANG_C_CONFIG
+-
+-
+-# _LT_LANG_CXX_CONFIG([TAG])
+-# --------------------------
+-# Ensure that the configuration variables for a C++ compiler are suitably
+-# defined.  These variables are subsequently used by _LT_CONFIG to write
+-# the compiler configuration to 'libtool'.
+-m4_defun([_LT_LANG_CXX_CONFIG],
+-[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+-m4_require([_LT_DECL_EGREP])dnl
+-m4_require([_LT_PATH_MANIFEST_TOOL])dnl
+-if test -n "$CXX" && ( test no != "$CXX" &&
+-    ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) ||
+-    (test g++ != "$CXX"))); then
+-  AC_PROG_CXXCPP
+-else
+-  _lt_caught_CXX_error=yes
+-fi
+-
+-AC_LANG_PUSH(C++)
+-_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-_LT_TAGVAR(allow_undefined_flag, $1)=
+-_LT_TAGVAR(always_export_symbols, $1)=no
+-_LT_TAGVAR(archive_expsym_cmds, $1)=
+-_LT_TAGVAR(compiler_needs_object, $1)=no
+-_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+-_LT_TAGVAR(hardcode_direct, $1)=no
+-_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+-_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+-_LT_TAGVAR(hardcode_libdir_separator, $1)=
+-_LT_TAGVAR(hardcode_minus_L, $1)=no
+-_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported
+-_LT_TAGVAR(hardcode_automatic, $1)=no
+-_LT_TAGVAR(inherit_rpath, $1)=no
+-_LT_TAGVAR(module_cmds, $1)=
+-_LT_TAGVAR(module_expsym_cmds, $1)=
+-_LT_TAGVAR(link_all_deplibs, $1)=unknown
+-_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+-_LT_TAGVAR(reload_flag, $1)=$reload_flag
+-_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+-_LT_TAGVAR(no_undefined_flag, $1)=
+-_LT_TAGVAR(whole_archive_flag_spec, $1)=
+-_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+-
+-# Source file extension for C++ test sources.
+-ac_ext=cpp
+-
+-# Object file extension for compiled C++ test sources.
+-objext=o
+-_LT_TAGVAR(objext, $1)=$objext
+-
+-# No sense in running all these tests if we already determined that
+-# the CXX compiler isn't working.  Some variables (like enable_shared)
+-# are currently assumed to apply to all compilers on this platform,
+-# and will be corrupted by setting them based on a non-working compiler.
+-if test yes != "$_lt_caught_CXX_error"; then
+-  # Code to be used in simple compile tests
+-  lt_simple_compile_test_code="int some_variable = 0;"
+-
+-  # Code to be used in simple link tests
+-  lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }'
+-
+-  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+-  _LT_TAG_COMPILER
+-
+-  # save warnings/boilerplate of simple test code
+-  _LT_COMPILER_BOILERPLATE
+-  _LT_LINKER_BOILERPLATE
+-
+-  # Allow CC to be a program name with arguments.
+-  lt_save_CC=$CC
+-  lt_save_CFLAGS=$CFLAGS
+-  lt_save_LD=$LD
+-  lt_save_GCC=$GCC
+-  GCC=$GXX
+-  lt_save_with_gnu_ld=$with_gnu_ld
+-  lt_save_path_LD=$lt_cv_path_LD
+-  if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then
+-    lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx
+-  else
+-    $as_unset lt_cv_prog_gnu_ld
+-  fi
+-  if test -n "${lt_cv_path_LDCXX+set}"; then
+-    lt_cv_path_LD=$lt_cv_path_LDCXX
+-  else
+-    $as_unset lt_cv_path_LD
+-  fi
+-  test -z "${LDCXX+set}" || LD=$LDCXX
+-  CC=${CXX-"c++"}
+-  CFLAGS=$CXXFLAGS
+-  compiler=$CC
+-  _LT_TAGVAR(compiler, $1)=$CC
+-  _LT_CC_BASENAME([$compiler])
+-
+-  if test -n "$compiler"; then
+-    # We don't want -fno-exception when compiling C++ code, so set the
+-    # no_builtin_flag separately
+-    if test yes = "$GXX"; then
+-      _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin'
+-    else
+-      _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=
+-    fi
+-
+-    if test yes = "$GXX"; then
+-      # Set up default GNU C++ configuration
+-
+-      LT_PATH_LD
+-
+-      # Check if GNU C++ uses GNU ld as the underlying linker, since the
+-      # archiving commands below assume that GNU ld is being used.
+-      if test yes = "$with_gnu_ld"; then
+-        _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+-        _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+-
+-        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-        _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+-
+-        # If archive_cmds runs LD, not CC, wlarc should be empty
+-        # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
+-        #     investigate it a little bit more. (MM)
+-        wlarc='$wl'
+-
+-        # ancient GNU ld didn't support --whole-archive et. al.
+-        if eval "`$CC -print-prog-name=ld` --help 2>&1" |
+-	  $GREP 'no-whole-archive' > /dev/null; then
+-          _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
+-        else
+-          _LT_TAGVAR(whole_archive_flag_spec, $1)=
+-        fi
+-      else
+-        with_gnu_ld=no
+-        wlarc=
+-
+-        # A generic and very simple default shared library creation
+-        # command for GNU C++ for the case where it uses the native
+-        # linker, instead of GNU ld.  If possible, this setting should
+-        # overridden to take advantage of the native linker features on
+-        # the platform it is being used on.
+-        _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+-      fi
+-
+-      # Commands to make compiler produce verbose output that lists
+-      # what "hidden" libraries, object files and flags are used when
+-      # linking a shared library.
+-      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+-
+-    else
+-      GXX=no
+-      with_gnu_ld=no
+-      wlarc=
+-    fi
+-
+-    # PORTME: fill in a description of your system's C++ link characteristics
+-    AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries])
+-    _LT_TAGVAR(ld_shlibs, $1)=yes
+-    case $host_os in
+-      aix3*)
+-        # FIXME: insert proper C++ library support
+-        _LT_TAGVAR(ld_shlibs, $1)=no
+-        ;;
+-      aix[[4-9]]*)
+-        if test ia64 = "$host_cpu"; then
+-          # On IA64, the linker does run time linking by default, so we don't
+-          # have to do anything special.
+-          aix_use_runtimelinking=no
+-          exp_sym_flag='-Bexport'
+-          no_entry_flag=
+-        else
+-          aix_use_runtimelinking=no
+-
+-          # Test if we are trying to use run time linking or normal
+-          # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+-          # have runtime linking enabled, and use it for executables.
+-          # For shared libraries, we enable/disable runtime linking
+-          # depending on the kind of the shared library created -
+-          # when "with_aix_soname,aix_use_runtimelinking" is:
+-          # "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
+-          # "aix,yes"  lib.so          shared, rtl:yes, for executables
+-          #            lib.a           static archive
+-          # "both,no"  lib.so.V(shr.o) shared, rtl:yes
+-          #            lib.a(lib.so.V) shared, rtl:no,  for executables
+-          # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
+-          #            lib.a(lib.so.V) shared, rtl:no
+-          # "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
+-          #            lib.a           static archive
+-          case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*)
+-	    for ld_flag in $LDFLAGS; do
+-	      case $ld_flag in
+-	      *-brtl*)
+-	        aix_use_runtimelinking=yes
+-	        break
+-	        ;;
+-	      esac
+-	    done
+-	    if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
+-	      # With aix-soname=svr4, we create the lib.so.V shared archives only,
+-	      # so we don't have lib.a shared libs to link our executables.
+-	      # We have to force runtime linking in this case.
+-	      aix_use_runtimelinking=yes
+-	      LDFLAGS="$LDFLAGS -Wl,-brtl"
+-	    fi
+-	    ;;
+-          esac
+-
+-          exp_sym_flag='-bexport'
+-          no_entry_flag='-bnoentry'
+-        fi
+-
+-        # When large executables or shared objects are built, AIX ld can
+-        # have problems creating the table of contents.  If linking a library
+-        # or program results in "error TOC overflow" add -mminimal-toc to
+-        # CXXFLAGS/CFLAGS for g++/gcc.  In the cases where that is not
+-        # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS.
+-
+-        _LT_TAGVAR(archive_cmds, $1)=''
+-        _LT_TAGVAR(hardcode_direct, $1)=yes
+-        _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+-        _LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+-        _LT_TAGVAR(link_all_deplibs, $1)=yes
+-        _LT_TAGVAR(file_list_spec, $1)='$wl-f,'
+-        case $with_aix_soname,$aix_use_runtimelinking in
+-        aix,*) ;;	# no import file
+-        svr4,* | *,yes) # use import file
+-          # The Import File defines what to hardcode.
+-          _LT_TAGVAR(hardcode_direct, $1)=no
+-          _LT_TAGVAR(hardcode_direct_absolute, $1)=no
+-          ;;
+-        esac
+-
+-        if test yes = "$GXX"; then
+-          case $host_os in aix4.[[012]]|aix4.[[012]].*)
+-          # We only want to do this on AIX 4.2 and lower, the check
+-          # below for broken collect2 doesn't work under 4.3+
+-	  collect2name=`$CC -print-prog-name=collect2`
+-	  if test -f "$collect2name" &&
+-	     strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+-	  then
+-	    # We have reworked collect2
+-	    :
+-	  else
+-	    # We have old collect2
+-	    _LT_TAGVAR(hardcode_direct, $1)=unsupported
+-	    # It fails to find uninstalled libraries when the uninstalled
+-	    # path is not listed in the libpath.  Setting hardcode_minus_L
+-	    # to unsupported forces relinking
+-	    _LT_TAGVAR(hardcode_minus_L, $1)=yes
+-	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-	    _LT_TAGVAR(hardcode_libdir_separator, $1)=
+-	  fi
+-          esac
+-          shared_flag='-shared'
+-	  if test yes = "$aix_use_runtimelinking"; then
+-	    shared_flag=$shared_flag' $wl-G'
+-	  fi
+-	  # Need to ensure runtime linking is disabled for the traditional
+-	  # shared library, or the linker may eventually find shared libraries
+-	  # /with/ Import File - we do not want to mix them.
+-	  shared_flag_aix='-shared'
+-	  shared_flag_svr4='-shared $wl-G'
+-        else
+-          # not using gcc
+-          if test ia64 = "$host_cpu"; then
+-	  # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+-	  # chokes on -Wl,-G. The following line is correct:
+-	  shared_flag='-G'
+-          else
+-	    if test yes = "$aix_use_runtimelinking"; then
+-	      shared_flag='$wl-G'
+-	    else
+-	      shared_flag='$wl-bM:SRE'
+-	    fi
+-	    shared_flag_aix='$wl-bM:SRE'
+-	    shared_flag_svr4='$wl-G'
+-          fi
+-        fi
+-
+-        _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-bexpall'
+-        # It seems that -bexpall does not export symbols beginning with
+-        # underscore (_), so it is better to generate a list of symbols to
+-	# export.
+-        _LT_TAGVAR(always_export_symbols, $1)=yes
+-	if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
+-          # Warning - without using the other runtime loading flags (-brtl),
+-          # -berok will link without error, but may produce a broken library.
+-          # The "-G" linker flag allows undefined symbols.
+-          _LT_TAGVAR(no_undefined_flag, $1)='-bernotok'
+-          # Determine the default libpath from the value encoded in an empty
+-          # executable.
+-          _LT_SYS_MODULE_PATH_AIX([$1])
+-          _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
+-
+-          _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
+-        else
+-          if test ia64 = "$host_cpu"; then
+-	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $libdir:/usr/lib:/lib'
+-	    _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs"
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
+-          else
+-	    # Determine the default libpath from the value encoded in an
+-	    # empty executable.
+-	    _LT_SYS_MODULE_PATH_AIX([$1])
+-	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-blibpath:$libdir:'"$aix_libpath"
+-	    # Warning - without using the other run time loading flags,
+-	    # -berok will link without error, but may produce a broken library.
+-	    _LT_TAGVAR(no_undefined_flag, $1)=' $wl-bernotok'
+-	    _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-berok'
+-	    if test yes = "$with_gnu_ld"; then
+-	      # We only use this code for GNU lds that support --whole-archive.
+-	      _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive'
+-	    else
+-	      # Exported symbols can be pulled into shared objects from archives
+-	      _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience'
+-	    fi
+-	    _LT_TAGVAR(archive_cmds_need_lc, $1)=yes
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
+-	    # -brtl affects multiple linker settings, -berok does not and is overridden later
+-	    compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([[, ]]\\)%-berok\\1%g"`'
+-	    if test svr4 != "$with_aix_soname"; then
+-	      # This is similar to how AIX traditionally builds its shared
+-	      # libraries. Need -bnortl late, we may have -brtl in LDFLAGS.
+-	      _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
+-	    fi
+-	    if test aix != "$with_aix_soname"; then
+-	      _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
+-	    else
+-	      # used by -dlpreopen to get the symbols
+-	      _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
+-	    fi
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)="$_LT_TAGVAR(archive_expsym_cmds, $1)"'~$RM -r $output_objdir/$realname.d'
+-          fi
+-        fi
+-        ;;
+-
+-      beos*)
+-	if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+-	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+-	  # Joseph Beckenbach  says some releases of gcc
+-	  # support --undefined.  This deserves some investigation.  FIXME
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-	else
+-	  _LT_TAGVAR(ld_shlibs, $1)=no
+-	fi
+-	;;
+-
+-      chorus*)
+-        case $cc_basename in
+-          *)
+-	  # FIXME: insert proper C++ library support
+-	  _LT_TAGVAR(ld_shlibs, $1)=no
+-	  ;;
+-        esac
+-        ;;
+-
+-      cygwin* | mingw* | pw32* | cegcc*)
+-	case $GXX,$cc_basename in
+-	,cl* | no,cl* | ,icl* | no,icl*)
+-	  # Native MSVC or ICC
+-	  # hardcode_libdir_flag_spec is actually meaningless, as there is
+-	  # no search path for DLLs.
+-	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' '
+-	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+-	  _LT_TAGVAR(always_export_symbols, $1)=yes
+-	  _LT_TAGVAR(file_list_spec, $1)='@'
+-	  # Tell ltmain to make .lib files, not .a files.
+-	  libext=lib
+-	  # Tell ltmain to make .dll files, not .so files.
+-	  shrext_cmds=.dll
+-	  # FIXME: Setting linknames here is a bad hack.
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
+-	  _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
+-              cp "$export_symbols" "$output_objdir/$soname.def";
+-              echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
+-            else
+-              $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
+-            fi~
+-            $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+-            linknames='
+-	  # The linker will not automatically build a static lib if we build a DLL.
+-	  # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true'
+-	  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+-	  # Don't use ranlib
+-	  _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib'
+-	  _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~
+-            lt_tool_outputfile="@TOOL_OUTPUT@"~
+-            case $lt_outputfile in
+-              *.exe|*.EXE) ;;
+-              *)
+-                lt_outputfile=$lt_outputfile.exe
+-                lt_tool_outputfile=$lt_tool_outputfile.exe
+-                ;;
+-            esac~
+-            func_to_tool_file "$lt_outputfile"~
+-            if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
+-              $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+-              $RM "$lt_outputfile.manifest";
+-            fi'
+-	  ;;
+-	*)
+-	  # g++
+-	  # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless,
+-	  # as there is no search path for DLLs.
+-	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-	  _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-all-symbols'
+-	  _LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+-	  _LT_TAGVAR(always_export_symbols, $1)=no
+-	  _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+-
+-	  if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+-	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+-	    # If the export-symbols file already is a .def file, use it as
+-	    # is; otherwise, prepend EXPORTS...
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)='if _LT_DLL_DEF_P([$export_symbols]); then
+-              cp $export_symbols $output_objdir/$soname.def;
+-            else
+-              echo EXPORTS > $output_objdir/$soname.def;
+-              cat $export_symbols >> $output_objdir/$soname.def;
+-            fi~
+-            $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+-	  else
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	  fi
+-	  ;;
+-	esac
+-	;;
+-      darwin* | rhapsody*)
+-        _LT_DARWIN_LINKER_FEATURES($1)
+-	;;
+-
+-      os2*)
+-	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir'
+-	_LT_TAGVAR(hardcode_minus_L, $1)=yes
+-	_LT_TAGVAR(allow_undefined_flag, $1)=unsupported
+-	shrext_cmds=.dll
+-	_LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	  $ECHO EXPORTS >> $output_objdir/$libname.def~
+-	  emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+-	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	  emximp -o $lib $output_objdir/$libname.def'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	  $ECHO EXPORTS >> $output_objdir/$libname.def~
+-	  prefix_cmds="$SED"~
+-	  if test EXPORTS = "`$SED 1q $export_symbols`"; then
+-	    prefix_cmds="$prefix_cmds -e 1d";
+-	  fi~
+-	  prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+-	  cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+-	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	  emximp -o $lib $output_objdir/$libname.def'
+-	_LT_TAGVAR(old_archive_From_new_cmds, $1)='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+-	_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes
+-	_LT_TAGVAR(file_list_spec, $1)='@'
+-	;;
+-
+-      dgux*)
+-        case $cc_basename in
+-          ec++*)
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-          ghcx*)
+-	    # Green Hills C++ Compiler
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-          *)
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-        esac
+-        ;;
+-
+-      freebsd2.*)
+-        # C++ shared libraries reported to be fairly broken before
+-	# switch to ELF
+-        _LT_TAGVAR(ld_shlibs, $1)=no
+-        ;;
+-
+-      freebsd-elf*)
+-        _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-        ;;
+-
+-      freebsd* | dragonfly* | midnightbsd*)
+-        # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
+-        # conventions
+-        _LT_TAGVAR(ld_shlibs, $1)=yes
+-        ;;
+-
+-      haiku*)
+-        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-        _LT_TAGVAR(link_all_deplibs, $1)=yes
+-        ;;
+-
+-      hpux9*)
+-        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
+-        _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-        _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+-        _LT_TAGVAR(hardcode_direct, $1)=yes
+-        _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
+-				             # but as the default
+-				             # location of the library.
+-
+-        case $cc_basename in
+-          CC*)
+-            # FIXME: insert proper C++ library support
+-            _LT_TAGVAR(ld_shlibs, $1)=no
+-            ;;
+-          aCC*)
+-            _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+-            # Commands to make compiler produce verbose output that lists
+-            # what "hidden" libraries, object files and flags are used when
+-            # linking a shared library.
+-            #
+-            # There doesn't appear to be a way to prevent this compiler from
+-            # explicitly linking system object files so we need to strip them
+-            # from the output so that they don't get included in the library
+-            # dependencies.
+-            output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+-            ;;
+-          *)
+-            if test yes = "$GXX"; then
+-              _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
+-            else
+-              # FIXME: insert proper C++ library support
+-              _LT_TAGVAR(ld_shlibs, $1)=no
+-            fi
+-            ;;
+-        esac
+-        ;;
+-
+-      hpux10*|hpux11*)
+-        if test no = "$with_gnu_ld"; then
+-	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl+b $wl$libdir'
+-	  _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-
+-          case $host_cpu in
+-            hppa*64*|ia64*)
+-              ;;
+-            *)
+-	      _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+-              ;;
+-          esac
+-        fi
+-        case $host_cpu in
+-          hppa*64*|ia64*)
+-            _LT_TAGVAR(hardcode_direct, $1)=no
+-            _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-            ;;
+-          *)
+-            _LT_TAGVAR(hardcode_direct, $1)=yes
+-            _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+-            _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH,
+-					         # but as the default
+-					         # location of the library.
+-            ;;
+-        esac
+-
+-        case $cc_basename in
+-          CC*)
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-          aCC*)
+-	    case $host_cpu in
+-	      hppa*64*)
+-	        _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+-	        ;;
+-	      ia64*)
+-	        _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+-	        ;;
+-	      *)
+-	        _LT_TAGVAR(archive_cmds, $1)='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+-	        ;;
+-	    esac
+-	    # Commands to make compiler produce verbose output that lists
+-	    # what "hidden" libraries, object files and flags are used when
+-	    # linking a shared library.
+-	    #
+-	    # There doesn't appear to be a way to prevent this compiler from
+-	    # explicitly linking system object files so we need to strip them
+-	    # from the output so that they don't get included in the library
+-	    # dependencies.
+-	    output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+-	    ;;
+-          *)
+-	    if test yes = "$GXX"; then
+-	      if test no = "$with_gnu_ld"; then
+-	        case $host_cpu in
+-	          hppa*64*)
+-	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+-	            ;;
+-	          ia64*)
+-	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+-	            ;;
+-	          *)
+-	            _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+-	            ;;
+-	        esac
+-	      fi
+-	    else
+-	      # FIXME: insert proper C++ library support
+-	      _LT_TAGVAR(ld_shlibs, $1)=no
+-	    fi
+-	    ;;
+-        esac
+-        ;;
+-
+-      interix[[3-9]]*)
+-	_LT_TAGVAR(hardcode_direct, $1)=no
+-	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+-	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+-	# Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+-	# Instead, shared libraries are loaded at an image base (0x10000000 by
+-	# default) and relocated if they conflict, which is a slow very memory
+-	# consuming and fragmenting process.  To avoid this, we pick a random,
+-	# 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+-	# time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+-	_LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+-	_LT_TAGVAR(archive_expsym_cmds, $1)='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+-	;;
+-      irix5* | irix6*)
+-        case $cc_basename in
+-          CC*)
+-	    # SGI C++
+-	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+-
+-	    # Archives containing C++ object files must be created using
+-	    # "CC -ar", where "CC" is the IRIX C++ compiler.  This is
+-	    # necessary to make sure instantiated templates are included
+-	    # in the archive.
+-	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs'
+-	    ;;
+-          *)
+-	    if test yes = "$GXX"; then
+-	      if test no = "$with_gnu_ld"; then
+-	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+-	      else
+-	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib'
+-	      fi
+-	    fi
+-	    _LT_TAGVAR(link_all_deplibs, $1)=yes
+-	    ;;
+-        esac
+-        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-        _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-        _LT_TAGVAR(inherit_rpath, $1)=yes
+-        ;;
+-
+-      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+-        case $cc_basename in
+-          KCC*)
+-	    # Kuck and Associates, Inc. (KAI) C++ Compiler
+-
+-	    # KCC will only create a shared library if the output file
+-	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+-	    # to its proper name (with version) after linking.
+-	    _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib'
+-	    # Commands to make compiler produce verbose output that lists
+-	    # what "hidden" libraries, object files and flags are used when
+-	    # linking a shared library.
+-	    #
+-	    # There doesn't appear to be a way to prevent this compiler from
+-	    # explicitly linking system object files so we need to strip them
+-	    # from the output so that they don't get included in the library
+-	    # dependencies.
+-	    output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+-
+-	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+-	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+-
+-	    # Archives containing C++ object files must be created using
+-	    # "CC -Bstatic", where "CC" is the KAI C++ compiler.
+-	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs'
+-	    ;;
+-	  icpc* | ecpc* )
+-	    # Intel C++
+-	    with_gnu_ld=yes
+-	    # version 8.0 and above of icpc choke on multiply defined symbols
+-	    # if we add $predep_objects and $postdep_objects, however 7.1 and
+-	    # earlier do not add the objects themselves.
+-	    case `$CC -V 2>&1` in
+-	      *"Version 7."*)
+-	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+-		_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+-		;;
+-	      *)  # Version 8.0 or newer
+-	        tmp_idyn=
+-	        case $host_cpu in
+-		  ia64*) tmp_idyn=' -i_dynamic';;
+-		esac
+-	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-		_LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+-		;;
+-	    esac
+-	    _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+-	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+-	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive$convenience $wl--no-whole-archive'
+-	    ;;
+-          pgCC* | pgcpp*)
+-            # Portland Group C++ compiler
+-	    case `$CC -V` in
+-	    *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*)
+-	      _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~
+-               rm -rf $tpldir~
+-               $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
+-               compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
+-	      _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~
+-                rm -rf $tpldir~
+-                $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
+-                $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
+-                $RANLIB $oldlib'
+-	      _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~
+-                rm -rf $tpldir~
+-                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+-                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+-	      _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~
+-                rm -rf $tpldir~
+-                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+-                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+-	      ;;
+-	    *) # Version 6 and above use weak symbols
+-	      _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+-	      _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
+-	      ;;
+-	    esac
+-
+-	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl--rpath $wl$libdir'
+-	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+-	    _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+-            ;;
+-	  cxx*)
+-	    # Compaq C++
+-	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname  -o $lib $wl-retain-symbols-file $wl$export_symbols'
+-
+-	    runpath_var=LD_RUN_PATH
+-	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+-	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-
+-	    # Commands to make compiler produce verbose output that lists
+-	    # what "hidden" libraries, object files and flags are used when
+-	    # linking a shared library.
+-	    #
+-	    # There doesn't appear to be a way to prevent this compiler from
+-	    # explicitly linking system object files so we need to strip them
+-	    # from the output so that they don't get included in the library
+-	    # dependencies.
+-	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
+-	    ;;
+-	  xl* | mpixl* | bgxl*)
+-	    # IBM XL 8.0 on PPC, with GNU ld
+-	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl--export-dynamic'
+-	    _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-	    if test yes = "$supports_anon_versioning"; then
+-	      _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~
+-                cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+-                echo "local: *; };" >> $output_objdir/$libname.ver~
+-                $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
+-	    fi
+-	    ;;
+-	  *)
+-	    case `$CC -V 2>&1 | $SED 5q` in
+-	    *Sun\ C*)
+-	      # Sun C++ 5.9
+-	      _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
+-	      _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+-	      _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols'
+-	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+-	      _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
+-	      _LT_TAGVAR(compiler_needs_object, $1)=yes
+-
+-	      # Not sure whether something based on
+-	      # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1
+-	      # would be better.
+-	      output_verbose_link_cmd='func_echo_all'
+-
+-	      # Archives containing C++ object files must be created using
+-	      # "CC -xar", where "CC" is the Sun C++ compiler.  This is
+-	      # necessary to make sure instantiated templates are included
+-	      # in the archive.
+-	      _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
+-	      ;;
+-	    esac
+-	    ;;
+-	esac
+-	;;
+-
+-      lynxos*)
+-        # FIXME: insert proper C++ library support
+-	_LT_TAGVAR(ld_shlibs, $1)=no
+-	;;
+-
+-      m88k*)
+-        # FIXME: insert proper C++ library support
+-        _LT_TAGVAR(ld_shlibs, $1)=no
+-	;;
+-
+-      mvs*)
+-        case $cc_basename in
+-          cxx*)
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-	  *)
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-	esac
+-	;;
+-
+-      netbsd*)
+-        if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+-	  _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable  -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags'
+-	  wlarc=
+-	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+-	  _LT_TAGVAR(hardcode_direct, $1)=yes
+-	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-	fi
+-	# Workaround some broken pre-1.5 toolchains
+-	output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"'
+-	;;
+-
+-      *nto* | *qnx*)
+-        _LT_TAGVAR(ld_shlibs, $1)=yes
+-	;;
+-
+-      openbsd* | bitrig*)
+-	if test -f /usr/libexec/ld.so; then
+-	  _LT_TAGVAR(hardcode_direct, $1)=yes
+-	  _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-	  _LT_TAGVAR(hardcode_direct_absolute, $1)=yes
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+-	  _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+-	  if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib'
+-	    _LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-E'
+-	    _LT_TAGVAR(whole_archive_flag_spec, $1)=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
+-	  fi
+-	  output_verbose_link_cmd=func_echo_all
+-	else
+-	  _LT_TAGVAR(ld_shlibs, $1)=no
+-	fi
+-	;;
+-
+-      osf3* | osf4* | osf5*)
+-        case $cc_basename in
+-          KCC*)
+-	    # Kuck and Associates, Inc. (KAI) C++ Compiler
+-
+-	    # KCC will only create a shared library if the output file
+-	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+-	    # to its proper name (with version) after linking.
+-	    _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+-
+-	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath,$libdir'
+-	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-
+-	    # Archives containing C++ object files must be created using
+-	    # the KAI C++ compiler.
+-	    case $host in
+-	      osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;;
+-	      *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;;
+-	    esac
+-	    ;;
+-          RCC*)
+-	    # Rational C++ 2.4.1
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-          cxx*)
+-	    case $host in
+-	      osf3*)
+-	        _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
+-	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+-	        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-		;;
+-	      *)
+-	        _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*'
+-	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+-	        _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
+-                  echo "-hidden">> $lib.exp~
+-                  $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp  `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~
+-                  $RM $lib.exp'
+-	        _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir'
+-		;;
+-	    esac
+-
+-	    _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-
+-	    # Commands to make compiler produce verbose output that lists
+-	    # what "hidden" libraries, object files and flags are used when
+-	    # linking a shared library.
+-	    #
+-	    # There doesn't appear to be a way to prevent this compiler from
+-	    # explicitly linking system object files so we need to strip them
+-	    # from the output so that they don't get included in the library
+-	    # dependencies.
+-	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+-	    ;;
+-	  *)
+-	    if test yes,no = "$GXX,$with_gnu_ld"; then
+-	      _LT_TAGVAR(allow_undefined_flag, $1)=' $wl-expect_unresolved $wl\*'
+-	      case $host in
+-	        osf3*)
+-	          _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+-		  ;;
+-	        *)
+-	          _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+-		  ;;
+-	      esac
+-
+-	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-rpath $wl$libdir'
+-	      _LT_TAGVAR(hardcode_libdir_separator, $1)=:
+-
+-	      # Commands to make compiler produce verbose output that lists
+-	      # what "hidden" libraries, object files and flags are used when
+-	      # linking a shared library.
+-	      output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+-
+-	    else
+-	      # FIXME: insert proper C++ library support
+-	      _LT_TAGVAR(ld_shlibs, $1)=no
+-	    fi
+-	    ;;
+-        esac
+-        ;;
+-
+-      psos*)
+-        # FIXME: insert proper C++ library support
+-        _LT_TAGVAR(ld_shlibs, $1)=no
+-        ;;
+-
+-      sunos4*)
+-        case $cc_basename in
+-          CC*)
+-	    # Sun C++ 4.x
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-          lcc*)
+-	    # Lucid
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-          *)
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-        esac
+-        ;;
+-
+-      solaris*)
+-        case $cc_basename in
+-          CC* | sunCC*)
+-	    # Sun C++ 4.2, 5.x and Centerline C++
+-            _LT_TAGVAR(archive_cmds_need_lc,$1)=yes
+-	    _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs'
+-	    _LT_TAGVAR(archive_cmds, $1)='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-              $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+-
+-	    _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir'
+-	    _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-	    case $host_os in
+-	      solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+-	      *)
+-		# The compiler driver will combine and reorder linker options,
+-		# but understands '-z linker_flag'.
+-	        # Supported since Solaris 2.6 (maybe 2.5.1?)
+-		_LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract'
+-	        ;;
+-	    esac
+-	    _LT_TAGVAR(link_all_deplibs, $1)=yes
+-
+-	    output_verbose_link_cmd='func_echo_all'
+-
+-	    # Archives containing C++ object files must be created using
+-	    # "CC -xar", where "CC" is the Sun C++ compiler.  This is
+-	    # necessary to make sure instantiated templates are included
+-	    # in the archive.
+-	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs'
+-	    ;;
+-          gcx*)
+-	    # Green Hills C++ Compiler
+-	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
+-
+-	    # The C++ compiler must be used to create the archive.
+-	    _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
+-	    ;;
+-          *)
+-	    # GNU C++ compiler with Solaris linker
+-	    if test yes,no = "$GXX,$with_gnu_ld"; then
+-	      _LT_TAGVAR(no_undefined_flag, $1)=' $wl-z ${wl}defs'
+-	      if $CC --version | $GREP -v '^2\.7' > /dev/null; then
+-	        _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
+-	        _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-                  $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+-
+-	        # Commands to make compiler produce verbose output that lists
+-	        # what "hidden" libraries, object files and flags are used when
+-	        # linking a shared library.
+-	        output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+-	      else
+-	        # g++ 2.7 appears to require '-G' NOT '-shared' on this
+-	        # platform.
+-	        _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
+-	        _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-                  $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+-
+-	        # Commands to make compiler produce verbose output that lists
+-	        # what "hidden" libraries, object files and flags are used when
+-	        # linking a shared library.
+-	        output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+-	      fi
+-
+-	      _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R $wl$libdir'
+-	      case $host_os in
+-		solaris2.[[0-5]] | solaris2.[[0-5]].*) ;;
+-		*)
+-		  _LT_TAGVAR(whole_archive_flag_spec, $1)='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
+-		  ;;
+-	      esac
+-	    fi
+-	    ;;
+-        esac
+-        ;;
+-
+-    sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*)
+-      _LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
+-      _LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-      _LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-      runpath_var='LD_RUN_PATH'
+-
+-      case $cc_basename in
+-        CC*)
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	  ;;
+-	*)
+-	  _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	  _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	  ;;
+-      esac
+-      ;;
+-
+-      sysv5* | sco3.2v5* | sco5v6*)
+-	# Note: We CANNOT use -z defs as we might desire, because we do not
+-	# link with -lc, and that would cause any symbols used from libc to
+-	# always be unresolved, which means just about no library would
+-	# ever link correctly.  If we're not using GNU ld we use -z text
+-	# though, which does catch some bad symbols but isn't as heavy-handed
+-	# as -z defs.
+-	_LT_TAGVAR(no_undefined_flag, $1)='$wl-z,text'
+-	_LT_TAGVAR(allow_undefined_flag, $1)='$wl-z,nodefs'
+-	_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-	_LT_TAGVAR(hardcode_shlibpath_var, $1)=no
+-	_LT_TAGVAR(hardcode_libdir_flag_spec, $1)='$wl-R,$libdir'
+-	_LT_TAGVAR(hardcode_libdir_separator, $1)=':'
+-	_LT_TAGVAR(link_all_deplibs, $1)=yes
+-	_LT_TAGVAR(export_dynamic_flag_spec, $1)='$wl-Bexport'
+-	runpath_var='LD_RUN_PATH'
+-
+-	case $cc_basename in
+-          CC*)
+-	    _LT_TAGVAR(archive_cmds, $1)='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	    _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~
+-              '"$_LT_TAGVAR(old_archive_cmds, $1)"
+-	    _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~
+-              '"$_LT_TAGVAR(reload_cmds, $1)"
+-	    ;;
+-	  *)
+-	    _LT_TAGVAR(archive_cmds, $1)='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	    _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	    ;;
+-	esac
+-      ;;
+-
+-      tandem*)
+-        case $cc_basename in
+-          NCC*)
+-	    # NonStop-UX NCC 3.20
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-          *)
+-	    # FIXME: insert proper C++ library support
+-	    _LT_TAGVAR(ld_shlibs, $1)=no
+-	    ;;
+-        esac
+-        ;;
+-
+-      vxworks*)
+-        # FIXME: insert proper C++ library support
+-        _LT_TAGVAR(ld_shlibs, $1)=no
+-        ;;
+-
+-      *)
+-        # FIXME: insert proper C++ library support
+-        _LT_TAGVAR(ld_shlibs, $1)=no
+-        ;;
+-    esac
+-
+-    AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)])
+-    test no = "$_LT_TAGVAR(ld_shlibs, $1)" && can_build_shared=no
+-
+-    _LT_TAGVAR(GCC, $1)=$GXX
+-    _LT_TAGVAR(LD, $1)=$LD
+-
+-    ## CAVEAT EMPTOR:
+-    ## There is no encapsulation within the following macros, do not change
+-    ## the running order or otherwise move them around unless you know exactly
+-    ## what you are doing...
+-    _LT_SYS_HIDDEN_LIBDEPS($1)
+-    _LT_COMPILER_PIC($1)
+-    _LT_COMPILER_C_O($1)
+-    _LT_COMPILER_FILE_LOCKS($1)
+-    _LT_LINKER_SHLIBS($1)
+-    _LT_SYS_DYNAMIC_LINKER($1)
+-    _LT_LINKER_HARDCODE_LIBPATH($1)
+-
+-    _LT_CONFIG($1)
+-  fi # test -n "$compiler"
+-
+-  CC=$lt_save_CC
+-  CFLAGS=$lt_save_CFLAGS
+-  LDCXX=$LD
+-  LD=$lt_save_LD
+-  GCC=$lt_save_GCC
+-  with_gnu_ld=$lt_save_with_gnu_ld
+-  lt_cv_path_LDCXX=$lt_cv_path_LD
+-  lt_cv_path_LD=$lt_save_path_LD
+-  lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld
+-  lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld
+-fi # test yes != "$_lt_caught_CXX_error"
+-
+-AC_LANG_POP
+-])# _LT_LANG_CXX_CONFIG
+-
+-
+-# _LT_FUNC_STRIPNAME_CNF
+-# ----------------------
+-# func_stripname_cnf prefix suffix name
+-# strip PREFIX and SUFFIX off of NAME.
+-# PREFIX and SUFFIX must not contain globbing or regex special
+-# characters, hashes, percent signs, but SUFFIX may contain a leading
+-# dot (in which case that matches only a dot).
+-#
+-# This function is identical to the (non-XSI) version of func_stripname,
+-# except this one can be used by m4 code that may be executed by configure,
+-# rather than the libtool script.
+-m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl
+-AC_REQUIRE([_LT_DECL_SED])
+-AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])
+-func_stripname_cnf ()
+-{
+-  case @S|@2 in
+-  .*) func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%\\\\@S|@2\$%%"`;;
+-  *)  func_stripname_result=`$ECHO "@S|@3" | $SED "s%^@S|@1%%; s%@S|@2\$%%"`;;
+-  esac
+-} # func_stripname_cnf
+-])# _LT_FUNC_STRIPNAME_CNF
+-
+-
+-# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME])
+-# ---------------------------------
+-# Figure out "hidden" library dependencies from verbose
+-# compiler output when linking a shared library.
+-# Parse the compiler output and extract the necessary
+-# objects, libraries and library flags.
+-m4_defun([_LT_SYS_HIDDEN_LIBDEPS],
+-[m4_require([_LT_FILEUTILS_DEFAULTS])dnl
+-AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl
+-# Dependencies to place before and after the object being linked:
+-_LT_TAGVAR(predep_objects, $1)=
+-_LT_TAGVAR(postdep_objects, $1)=
+-_LT_TAGVAR(predeps, $1)=
+-_LT_TAGVAR(postdeps, $1)=
+-_LT_TAGVAR(compiler_lib_search_path, $1)=
+-
+-dnl we can't use the lt_simple_compile_test_code here,
+-dnl because it contains code intended for an executable,
+-dnl not a library.  It's possible we should let each
+-dnl tag define a new lt_????_link_test_code variable,
+-dnl but it's only used here...
+-m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF
+-int a;
+-void foo (void) { a = 0; }
+-_LT_EOF
+-], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF
+-class Foo
+-{
+-public:
+-  Foo (void) { a = 0; }
+-private:
+-  int a;
+-};
+-_LT_EOF
+-], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF
+-      subroutine foo
+-      implicit none
+-      integer*4 a
+-      a=0
+-      return
+-      end
+-_LT_EOF
+-], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF
+-      subroutine foo
+-      implicit none
+-      integer a
+-      a=0
+-      return
+-      end
+-_LT_EOF
+-], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF
+-public class foo {
+-  private int a;
+-  public void bar (void) {
+-    a = 0;
+-  }
+-};
+-_LT_EOF
+-], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF
+-package foo
+-func foo() {
+-}
+-_LT_EOF
+-])
+-
+-_lt_libdeps_save_CFLAGS=$CFLAGS
+-case "$CC $CFLAGS " in #(
+-*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
+-*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
+-*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
+-esac
+-
+-dnl Parse the compiler output and extract the necessary
+-dnl objects, libraries and library flags.
+-if AC_TRY_EVAL(ac_compile); then
+-  # Parse the compiler output and extract the necessary
+-  # objects, libraries and library flags.
+-
+-  # Sentinel used to keep track of whether or not we are before
+-  # the conftest object file.
+-  pre_test_object_deps_done=no
+-
+-  for p in `eval "$output_verbose_link_cmd"`; do
+-    case $prev$p in
+-
+-    -L* | -R* | -l*)
+-       # Some compilers place space between "-{L,R}" and the path.
+-       # Remove the space.
+-       if test x-L = "$p" ||
+-          test x-R = "$p"; then
+-	 prev=$p
+-	 continue
+-       fi
+-
+-       # Expand the sysroot to ease extracting the directories later.
+-       if test -z "$prev"; then
+-         case $p in
+-         -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
+-         -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
+-         -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
+-         esac
+-       fi
+-       case $p in
+-       =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
+-       esac
+-       if test no = "$pre_test_object_deps_done"; then
+-	 case $prev in
+-	 -L | -R)
+-	   # Internal compiler library paths should come after those
+-	   # provided the user.  The postdeps already come after the
+-	   # user supplied libs so there is no need to process them.
+-	   if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then
+-	     _LT_TAGVAR(compiler_lib_search_path, $1)=$prev$p
+-	   else
+-	     _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} $prev$p"
+-	   fi
+-	   ;;
+-	 # The "-l" case would never come before the object being
+-	 # linked, so don't bother handling this case.
+-	 esac
+-       else
+-	 if test -z "$_LT_TAGVAR(postdeps, $1)"; then
+-	   _LT_TAGVAR(postdeps, $1)=$prev$p
+-	 else
+-	   _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} $prev$p"
+-	 fi
+-       fi
+-       prev=
+-       ;;
+-
+-    *.lto.$objext) ;; # Ignore GCC LTO objects
+-    *.$objext)
+-       # This assumes that the test object file only shows up
+-       # once in the compiler output.
+-       if test "$p" = "conftest.$objext"; then
+-	 pre_test_object_deps_done=yes
+-	 continue
+-       fi
+-
+-       if test no = "$pre_test_object_deps_done"; then
+-	 if test -z "$_LT_TAGVAR(predep_objects, $1)"; then
+-	   _LT_TAGVAR(predep_objects, $1)=$p
+-	 else
+-	   _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p"
+-	 fi
+-       else
+-	 if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then
+-	   _LT_TAGVAR(postdep_objects, $1)=$p
+-	 else
+-	   _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p"
+-	 fi
+-       fi
+-       ;;
+-
+-    *) ;; # Ignore the rest.
+-
+-    esac
+-  done
+-
+-  # Clean up.
+-  rm -f a.out a.exe
+-else
+-  echo "libtool.m4: error: problem compiling $1 test program"
+-fi
+-
+-$RM -f confest.$objext
+-CFLAGS=$_lt_libdeps_save_CFLAGS
+-
+-# PORTME: override above test on systems where it is broken
+-m4_if([$1], [CXX],
+-[case $host_os in
+-interix[[3-9]]*)
+-  # Interix 3.5 installs completely hosed .la files for C++, so rather than
+-  # hack all around it, let's just trust "g++" to DTRT.
+-  _LT_TAGVAR(predep_objects,$1)=
+-  _LT_TAGVAR(postdep_objects,$1)=
+-  _LT_TAGVAR(postdeps,$1)=
+-  ;;
+-esac
+-])
+-
+-case " $_LT_TAGVAR(postdeps, $1) " in
+-*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;;
+-esac
+- _LT_TAGVAR(compiler_lib_search_dirs, $1)=
+-if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then
+- _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | $SED -e 's! -L! !g' -e 's!^ !!'`
+-fi
+-_LT_TAGDECL([], [compiler_lib_search_dirs], [1],
+-    [The directories searched by this compiler when creating a shared library])
+-_LT_TAGDECL([], [predep_objects], [1],
+-    [Dependencies to place before and after the objects being linked to
+-    create a shared library])
+-_LT_TAGDECL([], [postdep_objects], [1])
+-_LT_TAGDECL([], [predeps], [1])
+-_LT_TAGDECL([], [postdeps], [1])
+-_LT_TAGDECL([], [compiler_lib_search_path], [1],
+-    [The library search path used internally by the compiler when linking
+-    a shared library])
+-])# _LT_SYS_HIDDEN_LIBDEPS
+-
+-
+-# _LT_LANG_F77_CONFIG([TAG])
+-# --------------------------
+-# Ensure that the configuration variables for a Fortran 77 compiler are
+-# suitably defined.  These variables are subsequently used by _LT_CONFIG
+-# to write the compiler configuration to 'libtool'.
+-m4_defun([_LT_LANG_F77_CONFIG],
+-[AC_LANG_PUSH(Fortran 77)
+-if test -z "$F77" || test no = "$F77"; then
+-  _lt_disable_F77=yes
+-fi
+-
+-_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-_LT_TAGVAR(allow_undefined_flag, $1)=
+-_LT_TAGVAR(always_export_symbols, $1)=no
+-_LT_TAGVAR(archive_expsym_cmds, $1)=
+-_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+-_LT_TAGVAR(hardcode_direct, $1)=no
+-_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+-_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+-_LT_TAGVAR(hardcode_libdir_separator, $1)=
+-_LT_TAGVAR(hardcode_minus_L, $1)=no
+-_LT_TAGVAR(hardcode_automatic, $1)=no
+-_LT_TAGVAR(inherit_rpath, $1)=no
+-_LT_TAGVAR(module_cmds, $1)=
+-_LT_TAGVAR(module_expsym_cmds, $1)=
+-_LT_TAGVAR(link_all_deplibs, $1)=unknown
+-_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+-_LT_TAGVAR(reload_flag, $1)=$reload_flag
+-_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+-_LT_TAGVAR(no_undefined_flag, $1)=
+-_LT_TAGVAR(whole_archive_flag_spec, $1)=
+-_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+-
+-# Source file extension for f77 test sources.
+-ac_ext=f
+-
+-# Object file extension for compiled f77 test sources.
+-objext=o
+-_LT_TAGVAR(objext, $1)=$objext
+-
+-# No sense in running all these tests if we already determined that
+-# the F77 compiler isn't working.  Some variables (like enable_shared)
+-# are currently assumed to apply to all compilers on this platform,
+-# and will be corrupted by setting them based on a non-working compiler.
+-if test yes != "$_lt_disable_F77"; then
+-  # Code to be used in simple compile tests
+-  lt_simple_compile_test_code="\
+-      subroutine t
+-      return
+-      end
+-"
+-
+-  # Code to be used in simple link tests
+-  lt_simple_link_test_code="\
+-      program t
+-      end
+-"
+-
+-  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+-  _LT_TAG_COMPILER
+-
+-  # save warnings/boilerplate of simple test code
+-  _LT_COMPILER_BOILERPLATE
+-  _LT_LINKER_BOILERPLATE
+-
+-  # Allow CC to be a program name with arguments.
+-  lt_save_CC=$CC
+-  lt_save_GCC=$GCC
+-  lt_save_CFLAGS=$CFLAGS
+-  CC=${F77-"f77"}
+-  CFLAGS=$FFLAGS
+-  compiler=$CC
+-  _LT_TAGVAR(compiler, $1)=$CC
+-  _LT_CC_BASENAME([$compiler])
+-  GCC=$G77
+-  if test -n "$compiler"; then
+-    AC_MSG_CHECKING([if libtool supports shared libraries])
+-    AC_MSG_RESULT([$can_build_shared])
+-
+-    AC_MSG_CHECKING([whether to build shared libraries])
+-    test no = "$can_build_shared" && enable_shared=no
+-
+-    # On AIX, shared libraries and static libraries use the same namespace, and
+-    # are all built from PIC.
+-    case $host_os in
+-      aix3*)
+-        test yes = "$enable_shared" && enable_static=no
+-        if test -n "$RANLIB"; then
+-          archive_cmds="$archive_cmds~\$RANLIB \$lib"
+-          postinstall_cmds='$RANLIB $lib'
+-        fi
+-        ;;
+-      aix[[4-9]]*)
+-	if test ia64 != "$host_cpu"; then
+-	  case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
+-	  yes,aix,yes) ;;		# shared object as lib.so file only
+-	  yes,svr4,*) ;;		# shared object as lib.so archive member only
+-	  yes,*) enable_static=no ;;	# shared object in lib.a archive as well
+-	  esac
+-	fi
+-        ;;
+-    esac
+-    AC_MSG_RESULT([$enable_shared])
+-
+-    AC_MSG_CHECKING([whether to build static libraries])
+-    # Make sure either enable_shared or enable_static is yes.
+-    test yes = "$enable_shared" || enable_static=yes
+-    AC_MSG_RESULT([$enable_static])
+-
+-    _LT_TAGVAR(GCC, $1)=$G77
+-    _LT_TAGVAR(LD, $1)=$LD
+-
+-    ## CAVEAT EMPTOR:
+-    ## There is no encapsulation within the following macros, do not change
+-    ## the running order or otherwise move them around unless you know exactly
+-    ## what you are doing...
+-    _LT_COMPILER_PIC($1)
+-    _LT_COMPILER_C_O($1)
+-    _LT_COMPILER_FILE_LOCKS($1)
+-    _LT_LINKER_SHLIBS($1)
+-    _LT_SYS_DYNAMIC_LINKER($1)
+-    _LT_LINKER_HARDCODE_LIBPATH($1)
+-
+-    _LT_CONFIG($1)
+-  fi # test -n "$compiler"
+-
+-  GCC=$lt_save_GCC
+-  CC=$lt_save_CC
+-  CFLAGS=$lt_save_CFLAGS
+-fi # test yes != "$_lt_disable_F77"
+-
+-AC_LANG_POP
+-])# _LT_LANG_F77_CONFIG
+-
+-
+-# _LT_LANG_FC_CONFIG([TAG])
+-# -------------------------
+-# Ensure that the configuration variables for a Fortran compiler are
+-# suitably defined.  These variables are subsequently used by _LT_CONFIG
+-# to write the compiler configuration to 'libtool'.
+-m4_defun([_LT_LANG_FC_CONFIG],
+-[AC_LANG_PUSH(Fortran)
+-
+-if test -z "$FC" || test no = "$FC"; then
+-  _lt_disable_FC=yes
+-fi
+-
+-_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-_LT_TAGVAR(allow_undefined_flag, $1)=
+-_LT_TAGVAR(always_export_symbols, $1)=no
+-_LT_TAGVAR(archive_expsym_cmds, $1)=
+-_LT_TAGVAR(export_dynamic_flag_spec, $1)=
+-_LT_TAGVAR(hardcode_direct, $1)=no
+-_LT_TAGVAR(hardcode_direct_absolute, $1)=no
+-_LT_TAGVAR(hardcode_libdir_flag_spec, $1)=
+-_LT_TAGVAR(hardcode_libdir_separator, $1)=
+-_LT_TAGVAR(hardcode_minus_L, $1)=no
+-_LT_TAGVAR(hardcode_automatic, $1)=no
+-_LT_TAGVAR(inherit_rpath, $1)=no
+-_LT_TAGVAR(module_cmds, $1)=
+-_LT_TAGVAR(module_expsym_cmds, $1)=
+-_LT_TAGVAR(link_all_deplibs, $1)=unknown
+-_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+-_LT_TAGVAR(reload_flag, $1)=$reload_flag
+-_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+-_LT_TAGVAR(no_undefined_flag, $1)=
+-_LT_TAGVAR(whole_archive_flag_spec, $1)=
+-_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no
+-
+-# Source file extension for fc test sources.
+-ac_ext=${ac_fc_srcext-f}
+-
+-# Object file extension for compiled fc test sources.
+-objext=o
+-_LT_TAGVAR(objext, $1)=$objext
+-
+-# No sense in running all these tests if we already determined that
+-# the FC compiler isn't working.  Some variables (like enable_shared)
+-# are currently assumed to apply to all compilers on this platform,
+-# and will be corrupted by setting them based on a non-working compiler.
+-if test yes != "$_lt_disable_FC"; then
+-  # Code to be used in simple compile tests
+-  lt_simple_compile_test_code="\
+-      subroutine t
+-      return
+-      end
+-"
+-
+-  # Code to be used in simple link tests
+-  lt_simple_link_test_code="\
+-      program t
+-      end
+-"
+-
+-  # ltmain only uses $CC for tagged configurations so make sure $CC is set.
+-  _LT_TAG_COMPILER
+-
+-  # save warnings/boilerplate of simple test code
+-  _LT_COMPILER_BOILERPLATE
+-  _LT_LINKER_BOILERPLATE
+-
+-  # Allow CC to be a program name with arguments.
+-  lt_save_CC=$CC
+-  lt_save_GCC=$GCC
+-  lt_save_CFLAGS=$CFLAGS
+-  CC=${FC-"f95"}
+-  CFLAGS=$FCFLAGS
+-  compiler=$CC
+-  GCC=$ac_cv_fc_compiler_gnu
+-
+-  _LT_TAGVAR(compiler, $1)=$CC
+-  _LT_CC_BASENAME([$compiler])
+-
+-  if test -n "$compiler"; then
+-    AC_MSG_CHECKING([if libtool supports shared libraries])
+-    AC_MSG_RESULT([$can_build_shared])
+-
+-    AC_MSG_CHECKING([whether to build shared libraries])
+-    test no = "$can_build_shared" && enable_shared=no
+-
+-    # On AIX, shared libraries and static libraries use the same namespace, and
+-    # are all built from PIC.
+-    case $host_os in
+-      aix3*)
+-        test yes = "$enable_shared" && enable_static=no
+-        if test -n "$RANLIB"; then
+-          archive_cmds="$archive_cmds~\$RANLIB \$lib"
+-          postinstall_cmds='$RANLIB $lib'
+-        fi
+-        ;;
+-      aix[[4-9]]*)
+-	if test ia64 != "$host_cpu"; then
+-	  case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
+-	  yes,aix,yes) ;;		# shared object as lib.so file only
+-	  yes,svr4,*) ;;		# shared object as lib.so archive member only
+-	  yes,*) enable_static=no ;;	# shared object in lib.a archive as well
+-	  esac
+-	fi
+-        ;;
+-    esac
+-    AC_MSG_RESULT([$enable_shared])
+-
+-    AC_MSG_CHECKING([whether to build static libraries])
+-    # Make sure either enable_shared or enable_static is yes.
+-    test yes = "$enable_shared" || enable_static=yes
+-    AC_MSG_RESULT([$enable_static])
+-
+-    _LT_TAGVAR(GCC, $1)=$ac_cv_fc_compiler_gnu
+-    _LT_TAGVAR(LD, $1)=$LD
+-
+-    ## CAVEAT EMPTOR:
+-    ## There is no encapsulation within the following macros, do not change
+-    ## the running order or otherwise move them around unless you know exactly
+-    ## what you are doing...
+-    _LT_SYS_HIDDEN_LIBDEPS($1)
+-    _LT_COMPILER_PIC($1)
+-    _LT_COMPILER_C_O($1)
+-    _LT_COMPILER_FILE_LOCKS($1)
+-    _LT_LINKER_SHLIBS($1)
+-    _LT_SYS_DYNAMIC_LINKER($1)
+-    _LT_LINKER_HARDCODE_LIBPATH($1)
+-
+-    _LT_CONFIG($1)
+-  fi # test -n "$compiler"
+-
+-  GCC=$lt_save_GCC
+-  CC=$lt_save_CC
+-  CFLAGS=$lt_save_CFLAGS
+-fi # test yes != "$_lt_disable_FC"
+-
+-AC_LANG_POP
+-])# _LT_LANG_FC_CONFIG
+-
+-
+-# _LT_LANG_GCJ_CONFIG([TAG])
+-# --------------------------
+-# Ensure that the configuration variables for the GNU Java Compiler compiler
+-# are suitably defined.  These variables are subsequently used by _LT_CONFIG
+-# to write the compiler configuration to 'libtool'.
+-m4_defun([_LT_LANG_GCJ_CONFIG],
+-[AC_REQUIRE([LT_PROG_GCJ])dnl
+-AC_LANG_SAVE
+-
+-# Source file extension for Java test sources.
+-ac_ext=java
+-
+-# Object file extension for compiled Java test sources.
+-objext=o
+-_LT_TAGVAR(objext, $1)=$objext
+-
+-# Code to be used in simple compile tests
+-lt_simple_compile_test_code="class foo {}"
+-
+-# Code to be used in simple link tests
+-lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }'
+-
+-# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+-_LT_TAG_COMPILER
+-
+-# save warnings/boilerplate of simple test code
+-_LT_COMPILER_BOILERPLATE
+-_LT_LINKER_BOILERPLATE
+-
+-# Allow CC to be a program name with arguments.
+-lt_save_CC=$CC
+-lt_save_CFLAGS=$CFLAGS
+-lt_save_GCC=$GCC
+-GCC=yes
+-CC=${GCJ-"gcj"}
+-CFLAGS=$GCJFLAGS
+-compiler=$CC
+-_LT_TAGVAR(compiler, $1)=$CC
+-_LT_TAGVAR(LD, $1)=$LD
+-_LT_CC_BASENAME([$compiler])
+-
+-# GCJ did not exist at the time GCC didn't implicitly link libc in.
+-_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-
+-_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+-_LT_TAGVAR(reload_flag, $1)=$reload_flag
+-_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+-
+-if test -n "$compiler"; then
+-  _LT_COMPILER_NO_RTTI($1)
+-  _LT_COMPILER_PIC($1)
+-  _LT_COMPILER_C_O($1)
+-  _LT_COMPILER_FILE_LOCKS($1)
+-  _LT_LINKER_SHLIBS($1)
+-  _LT_LINKER_HARDCODE_LIBPATH($1)
+-
+-  _LT_CONFIG($1)
+-fi
+-
+-AC_LANG_RESTORE
+-
+-GCC=$lt_save_GCC
+-CC=$lt_save_CC
+-CFLAGS=$lt_save_CFLAGS
+-])# _LT_LANG_GCJ_CONFIG
+-
+-
+-# _LT_LANG_GO_CONFIG([TAG])
+-# --------------------------
+-# Ensure that the configuration variables for the GNU Go compiler
+-# are suitably defined.  These variables are subsequently used by _LT_CONFIG
+-# to write the compiler configuration to 'libtool'.
+-m4_defun([_LT_LANG_GO_CONFIG],
+-[AC_REQUIRE([LT_PROG_GO])dnl
+-AC_LANG_SAVE
+-
+-# Source file extension for Go test sources.
+-ac_ext=go
+-
+-# Object file extension for compiled Go test sources.
+-objext=o
+-_LT_TAGVAR(objext, $1)=$objext
+-
+-# Code to be used in simple compile tests
+-lt_simple_compile_test_code="package main; func main() { }"
+-
+-# Code to be used in simple link tests
+-lt_simple_link_test_code='package main; func main() { }'
+-
+-# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+-_LT_TAG_COMPILER
+-
+-# save warnings/boilerplate of simple test code
+-_LT_COMPILER_BOILERPLATE
+-_LT_LINKER_BOILERPLATE
+-
+-# Allow CC to be a program name with arguments.
+-lt_save_CC=$CC
+-lt_save_CFLAGS=$CFLAGS
+-lt_save_GCC=$GCC
+-GCC=yes
+-CC=${GOC-"gccgo"}
+-CFLAGS=$GOFLAGS
+-compiler=$CC
+-_LT_TAGVAR(compiler, $1)=$CC
+-_LT_TAGVAR(LD, $1)=$LD
+-_LT_CC_BASENAME([$compiler])
+-
+-# Go did not exist at the time GCC didn't implicitly link libc in.
+-_LT_TAGVAR(archive_cmds_need_lc, $1)=no
+-
+-_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds
+-_LT_TAGVAR(reload_flag, $1)=$reload_flag
+-_LT_TAGVAR(reload_cmds, $1)=$reload_cmds
+-
+-if test -n "$compiler"; then
+-  _LT_COMPILER_NO_RTTI($1)
+-  _LT_COMPILER_PIC($1)
+-  _LT_COMPILER_C_O($1)
+-  _LT_COMPILER_FILE_LOCKS($1)
+-  _LT_LINKER_SHLIBS($1)
+-  _LT_LINKER_HARDCODE_LIBPATH($1)
+-
+-  _LT_CONFIG($1)
+-fi
+-
+-AC_LANG_RESTORE
+-
+-GCC=$lt_save_GCC
+-CC=$lt_save_CC
+-CFLAGS=$lt_save_CFLAGS
+-])# _LT_LANG_GO_CONFIG
+-
+-
+-# _LT_LANG_RC_CONFIG([TAG])
+-# -------------------------
+-# Ensure that the configuration variables for the Windows resource compiler
+-# are suitably defined.  These variables are subsequently used by _LT_CONFIG
+-# to write the compiler configuration to 'libtool'.
+-m4_defun([_LT_LANG_RC_CONFIG],
+-[AC_REQUIRE([LT_PROG_RC])dnl
+-AC_LANG_SAVE
+-
+-# Source file extension for RC test sources.
+-ac_ext=rc
+-
+-# Object file extension for compiled RC test sources.
+-objext=o
+-_LT_TAGVAR(objext, $1)=$objext
+-
+-# Code to be used in simple compile tests
+-lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }'
+-
+-# Code to be used in simple link tests
+-lt_simple_link_test_code=$lt_simple_compile_test_code
+-
+-# ltmain only uses $CC for tagged configurations so make sure $CC is set.
+-_LT_TAG_COMPILER
+-
+-# save warnings/boilerplate of simple test code
+-_LT_COMPILER_BOILERPLATE
+-_LT_LINKER_BOILERPLATE
+-
+-# Allow CC to be a program name with arguments.
+-lt_save_CC=$CC
+-lt_save_CFLAGS=$CFLAGS
+-lt_save_GCC=$GCC
+-GCC=
+-CC=${RC-"windres"}
+-CFLAGS=
+-compiler=$CC
+-_LT_TAGVAR(compiler, $1)=$CC
+-_LT_CC_BASENAME([$compiler])
+-_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes
+-
+-if test -n "$compiler"; then
+-  :
+-  _LT_CONFIG($1)
+-fi
+-
+-GCC=$lt_save_GCC
+-AC_LANG_RESTORE
+-CC=$lt_save_CC
+-CFLAGS=$lt_save_CFLAGS
+-])# _LT_LANG_RC_CONFIG
+-
+-
+-# LT_PROG_GCJ
+-# -----------
+-AC_DEFUN([LT_PROG_GCJ],
+-[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ],
+-  [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ],
+-    [AC_CHECK_TOOL(GCJ, gcj,)
+-      test set = "${GCJFLAGS+set}" || GCJFLAGS="-g -O2"
+-      AC_SUBST(GCJFLAGS)])])[]dnl
+-])
+-
+-# Old name:
+-AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([LT_AC_PROG_GCJ], [])
+-
+-
+-# LT_PROG_GO
+-# ----------
+-AC_DEFUN([LT_PROG_GO],
+-[AC_CHECK_TOOL(GOC, gccgo,)
+-])
+-
+-
+-# LT_PROG_RC
+-# ----------
+-AC_DEFUN([LT_PROG_RC],
+-[AC_CHECK_TOOL(RC, windres,)
+-])
+-
+-# Old name:
+-AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([LT_AC_PROG_RC], [])
+-
+-
+-# _LT_DECL_EGREP
+-# --------------
+-# If we don't have a new enough Autoconf to choose the best grep
+-# available, choose the one first in the user's PATH.
+-m4_defun([_LT_DECL_EGREP],
+-[AC_REQUIRE([AC_PROG_EGREP])dnl
+-AC_REQUIRE([AC_PROG_FGREP])dnl
+-test -z "$GREP" && GREP=grep
+-_LT_DECL([], [GREP], [1], [A grep program that handles long lines])
+-_LT_DECL([], [EGREP], [1], [An ERE matcher])
+-_LT_DECL([], [FGREP], [1], [A literal string matcher])
+-dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too
+-AC_SUBST([GREP])
+-])
+-
+-
+-# _LT_DECL_OBJDUMP
+-# --------------
+-# If we don't have a new enough Autoconf to choose the best objdump
+-# available, choose the one first in the user's PATH.
+-m4_defun([_LT_DECL_OBJDUMP],
+-[AC_CHECK_TOOL(OBJDUMP, objdump, false)
+-test -z "$OBJDUMP" && OBJDUMP=objdump
+-_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper])
+-AC_SUBST([OBJDUMP])
+-])
+-
+-# _LT_DECL_DLLTOOL
+-# ----------------
+-# Ensure DLLTOOL variable is set.
+-m4_defun([_LT_DECL_DLLTOOL],
+-[AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+-test -z "$DLLTOOL" && DLLTOOL=dlltool
+-_LT_DECL([], [DLLTOOL], [1], [DLL creation program])
+-AC_SUBST([DLLTOOL])
+-])
+-
+-# _LT_DECL_FILECMD
+-# ----------------
+-# Check for a file(cmd) program that can be used to detect file type and magic
+-m4_defun([_LT_DECL_FILECMD],
+-[AC_CHECK_TOOL([FILECMD], [file], [:])
+-_LT_DECL([], [FILECMD], [1], [A file(cmd) program that detects file types])
+-])# _LD_DECL_FILECMD
+-
+-# _LT_DECL_SED
+-# ------------
+-# Check for a fully-functional sed program, that truncates
+-# as few characters as possible.  Prefer GNU sed if found.
+-m4_defun([_LT_DECL_SED],
+-[AC_PROG_SED
+-test -z "$SED" && SED=sed
+-Xsed="$SED -e 1s/^X//"
+-_LT_DECL([], [SED], [1], [A sed program that does not truncate output])
+-_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"],
+-    [Sed that helps us avoid accidentally triggering echo(1) options like -n])
+-])# _LT_DECL_SED
+-
+-m4_ifndef([AC_PROG_SED], [
+-# NOTE: This macro has been submitted for inclusion into   #
+-#  GNU Autoconf as AC_PROG_SED.  When it is available in   #
+-#  a released version of Autoconf we should remove this    #
+-#  macro and use it instead.                               #
+-
+-m4_defun([AC_PROG_SED],
+-[AC_MSG_CHECKING([for a sed that does not truncate output])
+-AC_CACHE_VAL(lt_cv_path_SED,
+-[# Loop through the user's path and test for sed and gsed.
+-# Then use that list of sed's as ones to test for truncation.
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  test -z "$as_dir" && as_dir=.
+-  for lt_ac_prog in sed gsed; do
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-      if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then
+-        lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext"
+-      fi
+-    done
+-  done
+-done
+-IFS=$as_save_IFS
+-lt_ac_max=0
+-lt_ac_count=0
+-# Add /usr/xpg4/bin/sed as it is typically found on Solaris
+-# along with /bin/sed that truncates output.
+-for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do
+-  test ! -f "$lt_ac_sed" && continue
+-  cat /dev/null > conftest.in
+-  lt_ac_count=0
+-  echo $ECHO_N "0123456789$ECHO_C" >conftest.in
+-  # Check for GNU sed and select it if it is found.
+-  if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then
+-    lt_cv_path_SED=$lt_ac_sed
+-    break
+-  fi
+-  while true; do
+-    cat conftest.in conftest.in >conftest.tmp
+-    mv conftest.tmp conftest.in
+-    cp conftest.in conftest.nl
+-    echo >>conftest.nl
+-    $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break
+-    cmp -s conftest.out conftest.nl || break
+-    # 10000 chars as input seems more than enough
+-    test 10 -lt "$lt_ac_count" && break
+-    lt_ac_count=`expr $lt_ac_count + 1`
+-    if test "$lt_ac_count" -gt "$lt_ac_max"; then
+-      lt_ac_max=$lt_ac_count
+-      lt_cv_path_SED=$lt_ac_sed
+-    fi
+-  done
+-done
+-])
+-SED=$lt_cv_path_SED
+-AC_SUBST([SED])
+-AC_MSG_RESULT([$SED])
+-])#AC_PROG_SED
+-])#m4_ifndef
+-
+-# Old name:
+-AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED])
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([LT_AC_PROG_SED], [])
+-
+-
+-# _LT_CHECK_SHELL_FEATURES
+-# ------------------------
+-# Find out whether the shell is Bourne or XSI compatible,
+-# or has some other useful features.
+-m4_defun([_LT_CHECK_SHELL_FEATURES],
+-[if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+-  lt_unset=unset
+-else
+-  lt_unset=false
+-fi
+-_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl
+-
+-# test EBCDIC or ASCII
+-case `echo X|tr X '\101'` in
+- A) # ASCII based system
+-    # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr
+-  lt_SP2NL='tr \040 \012'
+-  lt_NL2SP='tr \015\012 \040\040'
+-  ;;
+- *) # EBCDIC based system
+-  lt_SP2NL='tr \100 \n'
+-  lt_NL2SP='tr \r\n \100\100'
+-  ;;
+-esac
+-_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl
+-_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl
+-])# _LT_CHECK_SHELL_FEATURES
+-
+-
+-# _LT_PATH_CONVERSION_FUNCTIONS
+-# -----------------------------
+-# Determine what file name conversion functions should be used by
+-# func_to_host_file (and, implicitly, by func_to_host_path).  These are needed
+-# for certain cross-compile configurations and native mingw.
+-m4_defun([_LT_PATH_CONVERSION_FUNCTIONS],
+-[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+-AC_REQUIRE([AC_CANONICAL_BUILD])dnl
+-AC_MSG_CHECKING([how to convert $build file names to $host format])
+-AC_CACHE_VAL(lt_cv_to_host_file_cmd,
+-[case $host in
+-  *-*-mingw* )
+-    case $build in
+-      *-*-mingw* ) # actually msys
+-        lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
+-        ;;
+-      *-*-cygwin* )
+-        lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
+-        ;;
+-      * ) # otherwise, assume *nix
+-        lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
+-        ;;
+-    esac
+-    ;;
+-  *-*-cygwin* )
+-    case $build in
+-      *-*-mingw* ) # actually msys
+-        lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
+-        ;;
+-      *-*-cygwin* )
+-        lt_cv_to_host_file_cmd=func_convert_file_noop
+-        ;;
+-      * ) # otherwise, assume *nix
+-        lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
+-        ;;
+-    esac
+-    ;;
+-  * ) # unhandled hosts (and "normal" native builds)
+-    lt_cv_to_host_file_cmd=func_convert_file_noop
+-    ;;
+-esac
+-])
+-to_host_file_cmd=$lt_cv_to_host_file_cmd
+-AC_MSG_RESULT([$lt_cv_to_host_file_cmd])
+-_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd],
+-         [0], [convert $build file names to $host format])dnl
+-
+-AC_MSG_CHECKING([how to convert $build file names to toolchain format])
+-AC_CACHE_VAL(lt_cv_to_tool_file_cmd,
+-[#assume ordinary cross tools, or native build.
+-lt_cv_to_tool_file_cmd=func_convert_file_noop
+-case $host in
+-  *-*-mingw* )
+-    case $build in
+-      *-*-mingw* ) # actually msys
+-        lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
+-        ;;
+-    esac
+-    ;;
+-esac
+-])
+-to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+-AC_MSG_RESULT([$lt_cv_to_tool_file_cmd])
+-_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd],
+-         [0], [convert $build files to toolchain format])dnl
+-])# _LT_PATH_CONVERSION_FUNCTIONS
+-
+-# Helper functions for option handling.                    -*- Autoconf -*-
+-#
+-#   Copyright (C) 2004-2005, 2007-2009, 2011-2019, 2021-2022 Free
+-#   Software Foundation, Inc.
+-#   Written by Gary V. Vaughan, 2004
+-#
+-# This file is free software; the Free Software Foundation gives
+-# unlimited permission to copy and/or distribute it, with or without
+-# modifications, as long as this notice is preserved.
+-
+-# serial 8 ltoptions.m4
+-
+-# This is to help aclocal find these macros, as it can't see m4_define.
+-AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])])
+-
+-
+-# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME)
+-# ------------------------------------------
+-m4_define([_LT_MANGLE_OPTION],
+-[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])])
+-
+-
+-# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME)
+-# ---------------------------------------
+-# Set option OPTION-NAME for macro MACRO-NAME, and if there is a
+-# matching handler defined, dispatch to it.  Other OPTION-NAMEs are
+-# saved as a flag.
+-m4_define([_LT_SET_OPTION],
+-[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl
+-m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]),
+-        _LT_MANGLE_DEFUN([$1], [$2]),
+-    [m4_warning([Unknown $1 option '$2'])])[]dnl
+-])
+-
+-
+-# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET])
+-# ------------------------------------------------------------
+-# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise.
+-m4_define([_LT_IF_OPTION],
+-[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])])
+-
+-
+-# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET)
+-# -------------------------------------------------------
+-# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME
+-# are set.
+-m4_define([_LT_UNLESS_OPTIONS],
+-[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
+-	    [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option),
+-		      [m4_define([$0_found])])])[]dnl
+-m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3
+-])[]dnl
+-])
+-
+-
+-# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST)
+-# ----------------------------------------
+-# OPTION-LIST is a space-separated list of Libtool options associated
+-# with MACRO-NAME.  If any OPTION has a matching handler declared with
+-# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about
+-# the unknown option and exit.
+-m4_defun([_LT_SET_OPTIONS],
+-[# Set options
+-m4_foreach([_LT_Option], m4_split(m4_normalize([$2])),
+-    [_LT_SET_OPTION([$1], _LT_Option)])
+-
+-m4_if([$1],[LT_INIT],[
+-  dnl
+-  dnl Simply set some default values (i.e off) if boolean options were not
+-  dnl specified:
+-  _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no
+-  ])
+-  _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no
+-  ])
+-  dnl
+-  dnl If no reference was made to various pairs of opposing options, then
+-  dnl we run the default mode handler for the pair.  For example, if neither
+-  dnl 'shared' nor 'disable-shared' was passed, we enable building of shared
+-  dnl archives by default:
+-  _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED])
+-  _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC])
+-  _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC])
+-  _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install],
+-		   [_LT_ENABLE_FAST_INSTALL])
+-  _LT_UNLESS_OPTIONS([LT_INIT], [aix-soname=aix aix-soname=both aix-soname=svr4],
+-		   [_LT_WITH_AIX_SONAME([aix])])
+-  ])
+-])# _LT_SET_OPTIONS
+-
+-
+-
+-# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME)
+-# -----------------------------------------
+-m4_define([_LT_MANGLE_DEFUN],
+-[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])])
+-
+-
+-# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE)
+-# -----------------------------------------------
+-m4_define([LT_OPTION_DEFINE],
+-[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl
+-])# LT_OPTION_DEFINE
+-
+-
+-# dlopen
+-# ------
+-LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes
+-])
+-
+-AU_DEFUN([AC_LIBTOOL_DLOPEN],
+-[_LT_SET_OPTION([LT_INIT], [dlopen])
+-AC_DIAGNOSE([obsolete],
+-[$0: Remove this warning and the call to _LT_SET_OPTION when you
+-put the 'dlopen' option into LT_INIT's first parameter.])
+-])
+-
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], [])
+-
+-
+-# win32-dll
+-# ---------
+-# Declare package support for building win32 dll's.
+-LT_OPTION_DEFINE([LT_INIT], [win32-dll],
+-[enable_win32_dll=yes
+-
+-case $host in
+-*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*)
+-  AC_CHECK_TOOL(AS, as, false)
+-  AC_CHECK_TOOL(DLLTOOL, dlltool, false)
+-  AC_CHECK_TOOL(OBJDUMP, objdump, false)
+-  ;;
+-esac
+-
+-test -z "$AS" && AS=as
+-_LT_DECL([], [AS],      [1], [Assembler program])dnl
+-
+-test -z "$DLLTOOL" && DLLTOOL=dlltool
+-_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl
+-
+-test -z "$OBJDUMP" && OBJDUMP=objdump
+-_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl
+-])# win32-dll
+-
+-AU_DEFUN([AC_LIBTOOL_WIN32_DLL],
+-[AC_REQUIRE([AC_CANONICAL_HOST])dnl
+-_LT_SET_OPTION([LT_INIT], [win32-dll])
+-AC_DIAGNOSE([obsolete],
+-[$0: Remove this warning and the call to _LT_SET_OPTION when you
+-put the 'win32-dll' option into LT_INIT's first parameter.])
+-])
+-
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], [])
+-
+-
+-# _LT_ENABLE_SHARED([DEFAULT])
+-# ----------------------------
+-# implement the --enable-shared flag, and supports the 'shared' and
+-# 'disable-shared' LT_INIT options.
+-# DEFAULT is either 'yes' or 'no'.  If omitted, it defaults to 'yes'.
+-m4_define([_LT_ENABLE_SHARED],
+-[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl
+-AC_ARG_ENABLE([shared],
+-    [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@],
+-	[build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])],
+-    [p=${PACKAGE-default}
+-    case $enableval in
+-    yes) enable_shared=yes ;;
+-    no) enable_shared=no ;;
+-    *)
+-      enable_shared=no
+-      # Look at the argument we got.  We use all the common list separators.
+-      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+-      for pkg in $enableval; do
+-	IFS=$lt_save_ifs
+-	if test "X$pkg" = "X$p"; then
+-	  enable_shared=yes
+-	fi
+-      done
+-      IFS=$lt_save_ifs
+-      ;;
+-    esac],
+-    [enable_shared=]_LT_ENABLE_SHARED_DEFAULT)
+-
+-    _LT_DECL([build_libtool_libs], [enable_shared], [0],
+-	[Whether or not to build shared libraries])
+-])# _LT_ENABLE_SHARED
+-
+-LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])])
+-LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])])
+-
+-# Old names:
+-AC_DEFUN([AC_ENABLE_SHARED],
+-[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared])
+-])
+-
+-AC_DEFUN([AC_DISABLE_SHARED],
+-[_LT_SET_OPTION([LT_INIT], [disable-shared])
+-])
+-
+-AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)])
+-AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)])
+-
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AM_ENABLE_SHARED], [])
+-dnl AC_DEFUN([AM_DISABLE_SHARED], [])
+-
+-
+-
+-# _LT_ENABLE_STATIC([DEFAULT])
+-# ----------------------------
+-# implement the --enable-static flag, and support the 'static' and
+-# 'disable-static' LT_INIT options.
+-# DEFAULT is either 'yes' or 'no'.  If omitted, it defaults to 'yes'.
+-m4_define([_LT_ENABLE_STATIC],
+-[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl
+-AC_ARG_ENABLE([static],
+-    [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@],
+-	[build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])],
+-    [p=${PACKAGE-default}
+-    case $enableval in
+-    yes) enable_static=yes ;;
+-    no) enable_static=no ;;
+-    *)
+-     enable_static=no
+-      # Look at the argument we got.  We use all the common list separators.
+-      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+-      for pkg in $enableval; do
+-	IFS=$lt_save_ifs
+-	if test "X$pkg" = "X$p"; then
+-	  enable_static=yes
+-	fi
+-      done
+-      IFS=$lt_save_ifs
+-      ;;
+-    esac],
+-    [enable_static=]_LT_ENABLE_STATIC_DEFAULT)
+-
+-    _LT_DECL([build_old_libs], [enable_static], [0],
+-	[Whether or not to build static libraries])
+-])# _LT_ENABLE_STATIC
+-
+-LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])])
+-LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])])
+-
+-# Old names:
+-AC_DEFUN([AC_ENABLE_STATIC],
+-[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static])
+-])
+-
+-AC_DEFUN([AC_DISABLE_STATIC],
+-[_LT_SET_OPTION([LT_INIT], [disable-static])
+-])
+-
+-AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)])
+-AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)])
+-
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AM_ENABLE_STATIC], [])
+-dnl AC_DEFUN([AM_DISABLE_STATIC], [])
+-
+-
+-
+-# _LT_ENABLE_FAST_INSTALL([DEFAULT])
+-# ----------------------------------
+-# implement the --enable-fast-install flag, and support the 'fast-install'
+-# and 'disable-fast-install' LT_INIT options.
+-# DEFAULT is either 'yes' or 'no'.  If omitted, it defaults to 'yes'.
+-m4_define([_LT_ENABLE_FAST_INSTALL],
+-[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl
+-AC_ARG_ENABLE([fast-install],
+-    [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@],
+-    [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])],
+-    [p=${PACKAGE-default}
+-    case $enableval in
+-    yes) enable_fast_install=yes ;;
+-    no) enable_fast_install=no ;;
+-    *)
+-      enable_fast_install=no
+-      # Look at the argument we got.  We use all the common list separators.
+-      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+-      for pkg in $enableval; do
+-	IFS=$lt_save_ifs
+-	if test "X$pkg" = "X$p"; then
+-	  enable_fast_install=yes
+-	fi
+-      done
+-      IFS=$lt_save_ifs
+-      ;;
+-    esac],
+-    [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT)
+-
+-_LT_DECL([fast_install], [enable_fast_install], [0],
+-	 [Whether or not to optimize for fast installation])dnl
+-])# _LT_ENABLE_FAST_INSTALL
+-
+-LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])])
+-LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])])
+-
+-# Old names:
+-AU_DEFUN([AC_ENABLE_FAST_INSTALL],
+-[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install])
+-AC_DIAGNOSE([obsolete],
+-[$0: Remove this warning and the call to _LT_SET_OPTION when you put
+-the 'fast-install' option into LT_INIT's first parameter.])
+-])
+-
+-AU_DEFUN([AC_DISABLE_FAST_INSTALL],
+-[_LT_SET_OPTION([LT_INIT], [disable-fast-install])
+-AC_DIAGNOSE([obsolete],
+-[$0: Remove this warning and the call to _LT_SET_OPTION when you put
+-the 'disable-fast-install' option into LT_INIT's first parameter.])
+-])
+-
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], [])
+-dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], [])
+-
+-
+-# _LT_WITH_AIX_SONAME([DEFAULT])
+-# ----------------------------------
+-# implement the --with-aix-soname flag, and support the `aix-soname=aix'
+-# and `aix-soname=both' and `aix-soname=svr4' LT_INIT options. DEFAULT
+-# is either `aix', `both' or `svr4'.  If omitted, it defaults to `aix'.
+-m4_define([_LT_WITH_AIX_SONAME],
+-[m4_define([_LT_WITH_AIX_SONAME_DEFAULT], [m4_if($1, svr4, svr4, m4_if($1, both, both, aix))])dnl
+-shared_archive_member_spec=
+-case $host,$enable_shared in
+-power*-*-aix[[5-9]]*,yes)
+-  AC_MSG_CHECKING([which variant of shared library versioning to provide])
+-  AC_ARG_WITH([aix-soname],
+-    [AS_HELP_STRING([--with-aix-soname=aix|svr4|both],
+-      [shared library versioning (aka "SONAME") variant to provide on AIX, @<:@default=]_LT_WITH_AIX_SONAME_DEFAULT[@:>@.])],
+-    [case $withval in
+-    aix|svr4|both)
+-      ;;
+-    *)
+-      AC_MSG_ERROR([Unknown argument to --with-aix-soname])
+-      ;;
+-    esac
+-    lt_cv_with_aix_soname=$with_aix_soname],
+-    [AC_CACHE_VAL([lt_cv_with_aix_soname],
+-      [lt_cv_with_aix_soname=]_LT_WITH_AIX_SONAME_DEFAULT)
+-    with_aix_soname=$lt_cv_with_aix_soname])
+-  AC_MSG_RESULT([$with_aix_soname])
+-  if test aix != "$with_aix_soname"; then
+-    # For the AIX way of multilib, we name the shared archive member
+-    # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o',
+-    # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File.
+-    # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag,
+-    # the AIX toolchain works better with OBJECT_MODE set (default 32).
+-    if test 64 = "${OBJECT_MODE-32}"; then
+-      shared_archive_member_spec=shr_64
+-    else
+-      shared_archive_member_spec=shr
+-    fi
+-  fi
+-  ;;
+-*)
+-  with_aix_soname=aix
+-  ;;
+-esac
+-
+-_LT_DECL([], [shared_archive_member_spec], [0],
+-    [Shared archive member basename, for filename based shared library versioning on AIX])dnl
+-])# _LT_WITH_AIX_SONAME
+-
+-LT_OPTION_DEFINE([LT_INIT], [aix-soname=aix], [_LT_WITH_AIX_SONAME([aix])])
+-LT_OPTION_DEFINE([LT_INIT], [aix-soname=both], [_LT_WITH_AIX_SONAME([both])])
+-LT_OPTION_DEFINE([LT_INIT], [aix-soname=svr4], [_LT_WITH_AIX_SONAME([svr4])])
+-
+-
+-# _LT_WITH_PIC([MODE])
+-# --------------------
+-# implement the --with-pic flag, and support the 'pic-only' and 'no-pic'
+-# LT_INIT options.
+-# MODE is either 'yes' or 'no'.  If omitted, it defaults to 'both'.
+-m4_define([_LT_WITH_PIC],
+-[AC_ARG_WITH([pic],
+-    [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@],
+-	[try to use only PIC/non-PIC objects @<:@default=use both@:>@])],
+-    [lt_p=${PACKAGE-default}
+-    case $withval in
+-    yes|no) pic_mode=$withval ;;
+-    *)
+-      pic_mode=default
+-      # Look at the argument we got.  We use all the common list separators.
+-      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+-      for lt_pkg in $withval; do
+-	IFS=$lt_save_ifs
+-	if test "X$lt_pkg" = "X$lt_p"; then
+-	  pic_mode=yes
+-	fi
+-      done
+-      IFS=$lt_save_ifs
+-      ;;
+-    esac],
+-    [pic_mode=m4_default([$1], [default])])
+-
+-_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl
+-])# _LT_WITH_PIC
+-
+-LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])])
+-LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])])
+-
+-# Old name:
+-AU_DEFUN([AC_LIBTOOL_PICMODE],
+-[_LT_SET_OPTION([LT_INIT], [pic-only])
+-AC_DIAGNOSE([obsolete],
+-[$0: Remove this warning and the call to _LT_SET_OPTION when you
+-put the 'pic-only' option into LT_INIT's first parameter.])
+-])
+-
+-dnl aclocal-1.4 backwards compatibility:
+-dnl AC_DEFUN([AC_LIBTOOL_PICMODE], [])
+-
+-
+-m4_define([_LTDL_MODE], [])
+-LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive],
+-		 [m4_define([_LTDL_MODE], [nonrecursive])])
+-LT_OPTION_DEFINE([LTDL_INIT], [recursive],
+-		 [m4_define([_LTDL_MODE], [recursive])])
+-LT_OPTION_DEFINE([LTDL_INIT], [subproject],
+-		 [m4_define([_LTDL_MODE], [subproject])])
+-
+-m4_define([_LTDL_TYPE], [])
+-LT_OPTION_DEFINE([LTDL_INIT], [installable],
+-		 [m4_define([_LTDL_TYPE], [installable])])
+-LT_OPTION_DEFINE([LTDL_INIT], [convenience],
+-		 [m4_define([_LTDL_TYPE], [convenience])])
+-
+-# ltsugar.m4 -- libtool m4 base layer.                         -*-Autoconf-*-
+-#
+-# Copyright (C) 2004-2005, 2007-2008, 2011-2019, 2021-2022 Free Software
+-# Foundation, Inc.
+-# Written by Gary V. Vaughan, 2004
+-#
+-# This file is free software; the Free Software Foundation gives
+-# unlimited permission to copy and/or distribute it, with or without
+-# modifications, as long as this notice is preserved.
+-
+-# serial 6 ltsugar.m4
+-
+-# This is to help aclocal find these macros, as it can't see m4_define.
+-AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])])
+-
+-
+-# lt_join(SEP, ARG1, [ARG2...])
+-# -----------------------------
+-# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their
+-# associated separator.
+-# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier
+-# versions in m4sugar had bugs.
+-m4_define([lt_join],
+-[m4_if([$#], [1], [],
+-       [$#], [2], [[$2]],
+-       [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])])
+-m4_define([_lt_join],
+-[m4_if([$#$2], [2], [],
+-       [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])])
+-
+-
+-# lt_car(LIST)
+-# lt_cdr(LIST)
+-# ------------
+-# Manipulate m4 lists.
+-# These macros are necessary as long as will still need to support
+-# Autoconf-2.59, which quotes differently.
+-m4_define([lt_car], [[$1]])
+-m4_define([lt_cdr],
+-[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])],
+-       [$#], 1, [],
+-       [m4_dquote(m4_shift($@))])])
+-m4_define([lt_unquote], $1)
+-
+-
+-# lt_append(MACRO-NAME, STRING, [SEPARATOR])
+-# ------------------------------------------
+-# Redefine MACRO-NAME to hold its former content plus 'SEPARATOR''STRING'.
+-# Note that neither SEPARATOR nor STRING are expanded; they are appended
+-# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked).
+-# No SEPARATOR is output if MACRO-NAME was previously undefined (different
+-# than defined and empty).
+-#
+-# This macro is needed until we can rely on Autoconf 2.62, since earlier
+-# versions of m4sugar mistakenly expanded SEPARATOR but not STRING.
+-m4_define([lt_append],
+-[m4_define([$1],
+-	   m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])])
+-
+-
+-
+-# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...])
+-# ----------------------------------------------------------
+-# Produce a SEP delimited list of all paired combinations of elements of
+-# PREFIX-LIST with SUFFIX1 through SUFFIXn.  Each element of the list
+-# has the form PREFIXmINFIXSUFFIXn.
+-# Needed until we can rely on m4_combine added in Autoconf 2.62.
+-m4_define([lt_combine],
+-[m4_if(m4_eval([$# > 3]), [1],
+-       [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl
+-[[m4_foreach([_Lt_prefix], [$2],
+-	     [m4_foreach([_Lt_suffix],
+-		]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[,
+-	[_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])])
+-
+-
+-# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ])
+-# -----------------------------------------------------------------------
+-# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited
+-# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ.
+-m4_define([lt_if_append_uniq],
+-[m4_ifdef([$1],
+-	  [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1],
+-		 [lt_append([$1], [$2], [$3])$4],
+-		 [$5])],
+-	  [lt_append([$1], [$2], [$3])$4])])
+-
+-
+-# lt_dict_add(DICT, KEY, VALUE)
+-# -----------------------------
+-m4_define([lt_dict_add],
+-[m4_define([$1($2)], [$3])])
+-
+-
+-# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE)
+-# --------------------------------------------
+-m4_define([lt_dict_add_subkey],
+-[m4_define([$1($2:$3)], [$4])])
+-
+-
+-# lt_dict_fetch(DICT, KEY, [SUBKEY])
+-# ----------------------------------
+-m4_define([lt_dict_fetch],
+-[m4_ifval([$3],
+-	m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]),
+-    m4_ifdef([$1($2)], [m4_defn([$1($2)])]))])
+-
+-
+-# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE])
+-# -----------------------------------------------------------------
+-m4_define([lt_if_dict_fetch],
+-[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4],
+-	[$5],
+-    [$6])])
+-
+-
+-# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...])
+-# --------------------------------------------------------------
+-m4_define([lt_dict_filter],
+-[m4_if([$5], [], [],
+-  [lt_join(m4_quote(m4_default([$4], [[, ]])),
+-           lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]),
+-		      [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl
+-])
+-
+-# ltversion.m4 -- version numbers			-*- Autoconf -*-
+-#
+-#   Copyright (C) 2004, 2011-2019, 2021-2022 Free Software Foundation,
+-#   Inc.
+-#   Written by Scott James Remnant, 2004
+-#
+-# This file is free software; the Free Software Foundation gives
+-# unlimited permission to copy and/or distribute it, with or without
+-# modifications, as long as this notice is preserved.
+-
+-# @configure_input@
+-
+-# serial 4245 ltversion.m4
+-# This file is part of GNU Libtool
+-
+-m4_define([LT_PACKAGE_VERSION], [2.4.7])
+-m4_define([LT_PACKAGE_REVISION], [2.4.7])
+-
+-AC_DEFUN([LTVERSION_VERSION],
+-[macro_version='2.4.7'
+-macro_revision='2.4.7'
+-_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?])
+-_LT_DECL(, macro_revision, 0)
+-])
+-
+-# lt~obsolete.m4 -- aclocal satisfying obsolete definitions.    -*-Autoconf-*-
+-#
+-#   Copyright (C) 2004-2005, 2007, 2009, 2011-2019, 2021-2022 Free
+-#   Software Foundation, Inc.
+-#   Written by Scott James Remnant, 2004.
+-#
+-# This file is free software; the Free Software Foundation gives
+-# unlimited permission to copy and/or distribute it, with or without
+-# modifications, as long as this notice is preserved.
+-
+-# serial 5 lt~obsolete.m4
+-
+-# These exist entirely to fool aclocal when bootstrapping libtool.
+-#
+-# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN),
+-# which have later been changed to m4_define as they aren't part of the
+-# exported API, or moved to Autoconf or Automake where they belong.
+-#
+-# The trouble is, aclocal is a bit thick.  It'll see the old AC_DEFUN
+-# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us
+-# using a macro with the same name in our local m4/libtool.m4 it'll
+-# pull the old libtool.m4 in (it doesn't see our shiny new m4_define
+-# and doesn't know about Autoconf macros at all.)
+-#
+-# So we provide this file, which has a silly filename so it's always
+-# included after everything else.  This provides aclocal with the
+-# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything
+-# because those macros already exist, or will be overwritten later.
+-# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6.
+-#
+-# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here.
+-# Yes, that means every name once taken will need to remain here until
+-# we give up compatibility with versions before 1.7, at which point
+-# we need to keep only those names which we still refer to.
+-
+-# This is to help aclocal find these macros, as it can't see m4_define.
+-AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])])
+-
+-m4_ifndef([AC_LIBTOOL_LINKER_OPTION],	[AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])])
+-m4_ifndef([AC_PROG_EGREP],		[AC_DEFUN([AC_PROG_EGREP])])
+-m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH],	[AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])])
+-m4_ifndef([_LT_AC_SHELL_INIT],		[AC_DEFUN([_LT_AC_SHELL_INIT])])
+-m4_ifndef([_LT_AC_SYS_LIBPATH_AIX],	[AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])])
+-m4_ifndef([_LT_PROG_LTMAIN],		[AC_DEFUN([_LT_PROG_LTMAIN])])
+-m4_ifndef([_LT_AC_TAGVAR],		[AC_DEFUN([_LT_AC_TAGVAR])])
+-m4_ifndef([AC_LTDL_ENABLE_INSTALL],	[AC_DEFUN([AC_LTDL_ENABLE_INSTALL])])
+-m4_ifndef([AC_LTDL_PREOPEN],		[AC_DEFUN([AC_LTDL_PREOPEN])])
+-m4_ifndef([_LT_AC_SYS_COMPILER],	[AC_DEFUN([_LT_AC_SYS_COMPILER])])
+-m4_ifndef([_LT_AC_LOCK],		[AC_DEFUN([_LT_AC_LOCK])])
+-m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE],	[AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])])
+-m4_ifndef([_LT_AC_TRY_DLOPEN_SELF],	[AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])])
+-m4_ifndef([AC_LIBTOOL_PROG_CC_C_O],	[AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])])
+-m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])])
+-m4_ifndef([AC_LIBTOOL_OBJDIR],		[AC_DEFUN([AC_LIBTOOL_OBJDIR])])
+-m4_ifndef([AC_LTDL_OBJDIR],		[AC_DEFUN([AC_LTDL_OBJDIR])])
+-m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])])
+-m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP],	[AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])])
+-m4_ifndef([AC_PATH_MAGIC],		[AC_DEFUN([AC_PATH_MAGIC])])
+-m4_ifndef([AC_PROG_LD_GNU],		[AC_DEFUN([AC_PROG_LD_GNU])])
+-m4_ifndef([AC_PROG_LD_RELOAD_FLAG],	[AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])])
+-m4_ifndef([AC_DEPLIBS_CHECK_METHOD],	[AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])])
+-m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])])
+-m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])])
+-m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])])
+-m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS],	[AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])])
+-m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP],	[AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])])
+-m4_ifndef([LT_AC_PROG_EGREP],		[AC_DEFUN([LT_AC_PROG_EGREP])])
+-m4_ifndef([LT_AC_PROG_SED],		[AC_DEFUN([LT_AC_PROG_SED])])
+-m4_ifndef([_LT_CC_BASENAME],		[AC_DEFUN([_LT_CC_BASENAME])])
+-m4_ifndef([_LT_COMPILER_BOILERPLATE],	[AC_DEFUN([_LT_COMPILER_BOILERPLATE])])
+-m4_ifndef([_LT_LINKER_BOILERPLATE],	[AC_DEFUN([_LT_LINKER_BOILERPLATE])])
+-m4_ifndef([_AC_PROG_LIBTOOL],		[AC_DEFUN([_AC_PROG_LIBTOOL])])
+-m4_ifndef([AC_LIBTOOL_SETUP],		[AC_DEFUN([AC_LIBTOOL_SETUP])])
+-m4_ifndef([_LT_AC_CHECK_DLFCN],		[AC_DEFUN([_LT_AC_CHECK_DLFCN])])
+-m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER],	[AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])])
+-m4_ifndef([_LT_AC_TAGCONFIG],		[AC_DEFUN([_LT_AC_TAGCONFIG])])
+-m4_ifndef([AC_DISABLE_FAST_INSTALL],	[AC_DEFUN([AC_DISABLE_FAST_INSTALL])])
+-m4_ifndef([_LT_AC_LANG_CXX],		[AC_DEFUN([_LT_AC_LANG_CXX])])
+-m4_ifndef([_LT_AC_LANG_F77],		[AC_DEFUN([_LT_AC_LANG_F77])])
+-m4_ifndef([_LT_AC_LANG_GCJ],		[AC_DEFUN([_LT_AC_LANG_GCJ])])
+-m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])])
+-m4_ifndef([_LT_AC_LANG_C_CONFIG],	[AC_DEFUN([_LT_AC_LANG_C_CONFIG])])
+-m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])])
+-m4_ifndef([_LT_AC_LANG_CXX_CONFIG],	[AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])])
+-m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])])
+-m4_ifndef([_LT_AC_LANG_F77_CONFIG],	[AC_DEFUN([_LT_AC_LANG_F77_CONFIG])])
+-m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])])
+-m4_ifndef([_LT_AC_LANG_GCJ_CONFIG],	[AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])])
+-m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG],	[AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])])
+-m4_ifndef([_LT_AC_LANG_RC_CONFIG],	[AC_DEFUN([_LT_AC_LANG_RC_CONFIG])])
+-m4_ifndef([AC_LIBTOOL_CONFIG],		[AC_DEFUN([AC_LIBTOOL_CONFIG])])
+-m4_ifndef([_LT_AC_FILE_LTDLL_C],	[AC_DEFUN([_LT_AC_FILE_LTDLL_C])])
+-m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS],	[AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])])
+-m4_ifndef([_LT_AC_PROG_CXXCPP],		[AC_DEFUN([_LT_AC_PROG_CXXCPP])])
+-m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS],	[AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])])
+-m4_ifndef([_LT_PROG_ECHO_BACKSLASH],	[AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])])
+-m4_ifndef([_LT_PROG_F77],		[AC_DEFUN([_LT_PROG_F77])])
+-m4_ifndef([_LT_PROG_FC],		[AC_DEFUN([_LT_PROG_FC])])
+-m4_ifndef([_LT_PROG_CXX],		[AC_DEFUN([_LT_PROG_CXX])])
+-
+-# Copyright (C) 2002-2021 Free Software Foundation, Inc.
++# Copyright (C) 2002-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9099,7 +35,7 @@ AC_DEFUN([AM_AUTOMAKE_VERSION],
+ [am__api_version='1.16'
+ dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to
+ dnl require some minimum version.  Point them to the right macro.
+-m4_if([$1], [1.16.5], [],
++m4_if([$1], [1.16.2], [],
+       [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl
+ ])
+ 
+@@ -9115,14 +51,14 @@ m4_define([_AM_AUTOCONF_VERSION], [])
+ # Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced.
+ # This function is AC_REQUIREd by AM_INIT_AUTOMAKE.
+ AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION],
+-[AM_AUTOMAKE_VERSION([1.16.5])dnl
++[AM_AUTOMAKE_VERSION([1.16.2])dnl
+ m4_ifndef([AC_AUTOCONF_VERSION],
+   [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl
+ _AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))])
+ 
+ # AM_AUX_DIR_EXPAND                                         -*- Autoconf -*-
+ 
+-# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++# Copyright (C) 2001-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9174,7 +110,7 @@ am_aux_dir=`cd "$ac_aux_dir" && pwd`
+ 
+ # AM_CONDITIONAL                                            -*- Autoconf -*-
+ 
+-# Copyright (C) 1997-2021 Free Software Foundation, Inc.
++# Copyright (C) 1997-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9205,7 +141,7 @@ AC_CONFIG_COMMANDS_PRE(
+ Usually this means the macro was only invoked conditionally.]])
+ fi])])
+ 
+-# Copyright (C) 1999-2021 Free Software Foundation, Inc.
++# Copyright (C) 1999-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9396,7 +332,7 @@ _AM_SUBST_NOTMAKE([am__nodep])dnl
+ 
+ # Generate code to set up dependency tracking.              -*- Autoconf -*-
+ 
+-# Copyright (C) 1999-2021 Free Software Foundation, Inc.
++# Copyright (C) 1999-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9464,7 +400,7 @@ AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS],
+ 
+ # Do all the work for Automake.                             -*- Autoconf -*-
+ 
+-# Copyright (C) 1996-2021 Free Software Foundation, Inc.
++# Copyright (C) 1996-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9492,10 +428,6 @@ m4_defn([AC_PROG_CC])
+ # release and drop the old call support.
+ AC_DEFUN([AM_INIT_AUTOMAKE],
+ [AC_PREREQ([2.65])dnl
+-m4_ifdef([_$0_ALREADY_INIT],
+-  [m4_fatal([$0 expanded multiple times
+-]m4_defn([_$0_ALREADY_INIT]))],
+-  [m4_define([_$0_ALREADY_INIT], m4_expansion_stack)])dnl
+ dnl Autoconf wants to disallow AM_ names.  We explicitly allow
+ dnl the ones we care about.
+ m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl
+@@ -9532,7 +464,7 @@ m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl
+ [_AM_SET_OPTIONS([$1])dnl
+ dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT.
+ m4_if(
+-  m4_ifset([AC_PACKAGE_NAME], [ok]):m4_ifset([AC_PACKAGE_VERSION], [ok]),
++  m4_ifdef([AC_PACKAGE_NAME], [ok]):m4_ifdef([AC_PACKAGE_VERSION], [ok]),
+   [ok:ok],,
+   [m4_fatal([AC_INIT should be called with package and version arguments])])dnl
+  AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl
+@@ -9584,20 +516,6 @@ AC_PROVIDE_IFELSE([AC_PROG_OBJCXX],
+ 		  [m4_define([AC_PROG_OBJCXX],
+ 			     m4_defn([AC_PROG_OBJCXX])[_AM_DEPENDENCIES([OBJCXX])])])dnl
+ ])
+-# Variables for tags utilities; see am/tags.am
+-if test -z "$CTAGS"; then
+-  CTAGS=ctags
+-fi
+-AC_SUBST([CTAGS])
+-if test -z "$ETAGS"; then
+-  ETAGS=etags
+-fi
+-AC_SUBST([ETAGS])
+-if test -z "$CSCOPE"; then
+-  CSCOPE=cscope
+-fi
+-AC_SUBST([CSCOPE])
+-
+ AC_REQUIRE([AM_SILENT_RULES])dnl
+ dnl The testsuite driver may need to know about EXEEXT, so add the
+ dnl 'am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen.  This
+@@ -9679,7 +597,7 @@ for _am_header in $config_headers :; do
+ done
+ echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count])
+ 
+-# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++# Copyright (C) 2001-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9700,29 +618,10 @@ if test x"${install_sh+set}" != xset; then
+ fi
+ AC_SUBST([install_sh])])
+ 
+-# Copyright (C) 2003-2021 Free Software Foundation, Inc.
+-#
+-# This file is free software; the Free Software Foundation
+-# gives unlimited permission to copy and/or distribute it,
+-# with or without modifications, as long as this notice is preserved.
+-
+-# Check whether the underlying file-system supports filenames
+-# with a leading dot.  For instance MS-DOS doesn't.
+-AC_DEFUN([AM_SET_LEADING_DOT],
+-[rm -rf .tst 2>/dev/null
+-mkdir .tst 2>/dev/null
+-if test -d .tst; then
+-  am__leading_dot=.
+-else
+-  am__leading_dot=_
+-fi
+-rmdir .tst 2>/dev/null
+-AC_SUBST([am__leading_dot])])
+-
+ # Add --enable-maintainer-mode option to configure.         -*- Autoconf -*-
+ # From Jim Meyering
+ 
+-# Copyright (C) 1996-2021 Free Software Foundation, Inc.
++# Copyright (C) 1996-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9757,7 +656,7 @@ AC_MSG_CHECKING([whether to enable maintainer-specific portions of Makefiles])
+ 
+ # Check to see how 'make' treats includes.	            -*- Autoconf -*-
+ 
+-# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++# Copyright (C) 2001-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9800,7 +699,7 @@ AC_SUBST([am__quote])])
+ 
+ # Fake the existence of programs that GNU maintainers use.  -*- Autoconf -*-
+ 
+-# Copyright (C) 1997-2021 Free Software Foundation, Inc.
++# Copyright (C) 1997-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9821,7 +720,12 @@ AC_DEFUN([AM_MISSING_HAS_RUN],
+ [AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl
+ AC_REQUIRE_AUX_FILE([missing])dnl
+ if test x"${MISSING+set}" != xset; then
+-  MISSING="\${SHELL} '$am_aux_dir/missing'"
++  case $am_aux_dir in
++  *\ * | *\	*)
++    MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
++  *)
++    MISSING="\${SHELL} $am_aux_dir/missing" ;;
++  esac
+ fi
+ # Use eval to expand $SHELL
+ if eval "$MISSING --is-lightweight"; then
+@@ -9834,7 +738,7 @@ fi
+ 
+ # Helper functions for option handling.                     -*- Autoconf -*-
+ 
+-# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++# Copyright (C) 2001-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9863,7 +767,7 @@ AC_DEFUN([_AM_SET_OPTIONS],
+ AC_DEFUN([_AM_IF_OPTION],
+ [m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])])
+ 
+-# Copyright (C) 1999-2021 Free Software Foundation, Inc.
++# Copyright (C) 1999-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9910,7 +814,7 @@ AC_LANG_POP([C])])
+ # For backward compatibility.
+ AC_DEFUN_ONCE([AM_PROG_CC_C_O], [AC_REQUIRE([AC_PROG_CC])])
+ 
+-# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++# Copyright (C) 2001-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -9929,7 +833,7 @@ AC_DEFUN([AM_RUN_LOG],
+ 
+ # Check to make sure that the build environment is sane.    -*- Autoconf -*-
+ 
+-# Copyright (C) 1996-2021 Free Software Foundation, Inc.
++# Copyright (C) 1996-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -10010,7 +914,7 @@ AC_CONFIG_COMMANDS_PRE(
+ rm -f conftest.file
+ ])
+ 
+-# Copyright (C) 2009-2021 Free Software Foundation, Inc.
++# Copyright (C) 2009-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -10070,7 +974,7 @@ AC_SUBST([AM_BACKSLASH])dnl
+ _AM_SUBST_NOTMAKE([AM_BACKSLASH])dnl
+ ])
+ 
+-# Copyright (C) 2001-2021 Free Software Foundation, Inc.
++# Copyright (C) 2001-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -10098,7 +1002,7 @@ fi
+ INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+ AC_SUBST([INSTALL_STRIP_PROGRAM])])
+ 
+-# Copyright (C) 2006-2021 Free Software Foundation, Inc.
++# Copyright (C) 2006-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -10117,7 +1021,7 @@ AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)])
+ 
+ # Check how to create a tarball.                            -*- Autoconf -*-
+ 
+-# Copyright (C) 2004-2021 Free Software Foundation, Inc.
++# Copyright (C) 2004-2020 Free Software Foundation, Inc.
+ #
+ # This file is free software; the Free Software Foundation
+ # gives unlimited permission to copy and/or distribute it,
+@@ -10248,3 +1152,14 @@ AC_SUBST([am__tar])
+ AC_SUBST([am__untar])
+ ]) # _AM_PROG_TAR
+ 
++m4_include([../config/acx.m4])
++m4_include([../config/depstand.m4])
++m4_include([../config/lead-dot.m4])
++m4_include([../config/lthostflags.m4])
++m4_include([../config/override.m4])
++m4_include([../config/warnings.m4])
++m4_include([../libtool.m4])
++m4_include([../ltoptions.m4])
++m4_include([../ltsugar.m4])
++m4_include([../ltversion.m4])
++m4_include([../lt~obsolete.m4])
+diff --git a/bolt-plugin/config.h.in b/bolt-plugin/config.h.in
+index 9e9d316ec..ddbde7619 100644
+--- a/bolt-plugin/config.h.in
++++ b/bolt-plugin/config.h.in
+@@ -6,15 +6,12 @@
+ /* Define to 1 if you have the  header file. */
+ #undef HAVE_INTTYPES_H
+ 
+-/* Define to 1 if you have the  header file. */
+-#undef HAVE_MINIX_CONFIG_H
++/* Define to 1 if you have the  header file. */
++#undef HAVE_MEMORY_H
+ 
+ /* Define to 1 if you have the  header file. */
+ #undef HAVE_STDINT_H
+ 
+-/* Define to 1 if you have the  header file. */
+-#undef HAVE_STDIO_H
+-
+ /* Define to 1 if you have the  header file. */
+ #undef HAVE_STDLIB_H
+ 
+@@ -36,10 +33,8 @@
+ /* Define to 1 if you have the  header file. */
+ #undef HAVE_UNISTD_H
+ 
+-/* Define to 1 if you have the  header file. */
+-#undef HAVE_WCHAR_H
+-
+-/* Define to the sub-directory where libtool stores uninstalled libraries. */
++/* Define to the sub-directory in which libtool stores uninstalled libraries.
++   */
+ #undef LT_OBJDIR
+ 
+ /* Name of package */
+@@ -63,108 +58,55 @@
+ /* Define to the version of this package. */
+ #undef PACKAGE_VERSION
+ 
+-/* Define to 1 if all of the C90 standard headers exist (not just the ones
+-   required in a freestanding environment). This macro is provided for
+-   backward compatibility; new code need not use it. */
++/* Define to 1 if you have the ANSI C header files. */
+ #undef STDC_HEADERS
+ 
+ /* Enable extensions on AIX 3, Interix.  */
+ #ifndef _ALL_SOURCE
+ # undef _ALL_SOURCE
+ #endif
+-/* Enable general extensions on macOS.  */
+-#ifndef _DARWIN_C_SOURCE
+-# undef _DARWIN_C_SOURCE
+-#endif
+-/* Enable general extensions on Solaris.  */
+-#ifndef __EXTENSIONS__
+-# undef __EXTENSIONS__
+-#endif
+ /* Enable GNU extensions on systems that have them.  */
+ #ifndef _GNU_SOURCE
+ # undef _GNU_SOURCE
+ #endif
+-/* Enable X/Open compliant socket functions that do not require linking
+-   with -lxnet on HP-UX 11.11.  */
+-#ifndef _HPUX_ALT_XOPEN_SOCKET_API
+-# undef _HPUX_ALT_XOPEN_SOCKET_API
+-#endif
+-/* Identify the host operating system as Minix.
+-   This macro does not affect the system headers' behavior.
+-   A future release of Autoconf may stop defining this macro.  */
+-#ifndef _MINIX
+-# undef _MINIX
+-#endif
+-/* Enable general extensions on NetBSD.
+-   Enable NetBSD compatibility extensions on Minix.  */
+-#ifndef _NETBSD_SOURCE
+-# undef _NETBSD_SOURCE
+-#endif
+-/* Enable OpenBSD compatibility extensions on NetBSD.
+-   Oddly enough, this does nothing on OpenBSD.  */
+-#ifndef _OPENBSD_SOURCE
+-# undef _OPENBSD_SOURCE
+-#endif
+-/* Define to 1 if needed for POSIX-compatible behavior.  */
+-#ifndef _POSIX_SOURCE
+-# undef _POSIX_SOURCE
+-#endif
+-/* Define to 2 if needed for POSIX-compatible behavior.  */
+-#ifndef _POSIX_1_SOURCE
+-# undef _POSIX_1_SOURCE
+-#endif
+-/* Enable POSIX-compatible threading on Solaris.  */
++/* Enable threading extensions on Solaris.  */
+ #ifndef _POSIX_PTHREAD_SEMANTICS
+ # undef _POSIX_PTHREAD_SEMANTICS
+ #endif
+-/* Enable extensions specified by ISO/IEC TS 18661-5:2014.  */
+-#ifndef __STDC_WANT_IEC_60559_ATTRIBS_EXT__
+-# undef __STDC_WANT_IEC_60559_ATTRIBS_EXT__
+-#endif
+-/* Enable extensions specified by ISO/IEC TS 18661-1:2014.  */
+-#ifndef __STDC_WANT_IEC_60559_BFP_EXT__
+-# undef __STDC_WANT_IEC_60559_BFP_EXT__
+-#endif
+-/* Enable extensions specified by ISO/IEC TS 18661-2:2015.  */
+-#ifndef __STDC_WANT_IEC_60559_DFP_EXT__
+-# undef __STDC_WANT_IEC_60559_DFP_EXT__
+-#endif
+-/* Enable extensions specified by ISO/IEC TS 18661-4:2015.  */
+-#ifndef __STDC_WANT_IEC_60559_FUNCS_EXT__
+-# undef __STDC_WANT_IEC_60559_FUNCS_EXT__
+-#endif
+-/* Enable extensions specified by ISO/IEC TS 18661-3:2015.  */
+-#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__
+-# undef __STDC_WANT_IEC_60559_TYPES_EXT__
+-#endif
+-/* Enable extensions specified by ISO/IEC TR 24731-2:2010.  */
+-#ifndef __STDC_WANT_LIB_EXT2__
+-# undef __STDC_WANT_LIB_EXT2__
+-#endif
+-/* Enable extensions specified by ISO/IEC 24747:2009.  */
+-#ifndef __STDC_WANT_MATH_SPEC_FUNCS__
+-# undef __STDC_WANT_MATH_SPEC_FUNCS__
+-#endif
+ /* Enable extensions on HP NonStop.  */
+ #ifndef _TANDEM_SOURCE
+ # undef _TANDEM_SOURCE
+ #endif
+-/* Enable X/Open extensions.  Define to 500 only if necessary
+-   to make mbstate_t available.  */
+-#ifndef _XOPEN_SOURCE
+-# undef _XOPEN_SOURCE
++/* Enable general extensions on Solaris.  */
++#ifndef __EXTENSIONS__
++# undef __EXTENSIONS__
+ #endif
+ 
+ 
+ /* Version number of package */
+ #undef VERSION
+ 
++/* Enable large inode numbers on Mac OS X 10.5.  */
++#ifndef _DARWIN_USE_64_BIT_INODE
++# define _DARWIN_USE_64_BIT_INODE 1
++#endif
++
+ /* Number of bits in a file offset, on hosts where this is settable. */
+ #undef _FILE_OFFSET_BITS
+ 
+ /* Define for large files, on AIX-style hosts. */
+ #undef _LARGE_FILES
+ 
++/* Define to 1 if on MINIX. */
++#undef _MINIX
++
++/* Define to 2 if the system does not provide POSIX.1 features except with
++   this defined. */
++#undef _POSIX_1_SOURCE
++
++/* Define to 1 if you need to in order for `stat' and other things to work. */
++#undef _POSIX_SOURCE
++
+ /* Define for Solaris 2.5.1 so the uint64_t typedef from ,
+    , or  is not used. If the typedef were allowed, the
+    #define below would cause a syntax error. */
+diff --git a/bolt-plugin/configure b/bolt-plugin/configure
+index 63bde9a41..78e7e57c3 100755
+--- a/bolt-plugin/configure
++++ b/bolt-plugin/configure
+@@ -1,10 +1,9 @@
+ #! /bin/sh
+ # Guess values for system-dependent variables and create Makefiles.
+-# Generated by GNU Autoconf 2.71 for bolt plugin for ld 0.1.
++# Generated by GNU Autoconf 2.69 for bolt plugin for ld 0.1.
+ #
+ #
+-# Copyright (C) 1992-1996, 1998-2017, 2020-2021 Free Software Foundation,
+-# Inc.
++# Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc.
+ #
+ #
+ # This configure script is free software; the Free Software Foundation
+@@ -15,16 +14,14 @@
+ 
+ # Be more Bourne compatible
+ DUALCASE=1; export DUALCASE # for MKS sh
+-as_nop=:
+-if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
+-then :
++if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+   emulate sh
+   NULLCMD=:
+   # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+   # is contrary to our usage.  Disable this feature.
+   alias -g '${1+"$@"}'='"$@"'
+   setopt NO_GLOB_SUBST
+-else $as_nop
++else
+   case `(set -o) 2>/dev/null` in #(
+   *posix*) :
+     set -o posix ;; #(
+@@ -34,46 +31,46 @@ esac
+ fi
+ 
+ 
+-
+-# Reset variables that may have inherited troublesome values from
+-# the environment.
+-
+-# IFS needs to be set, to space, tab, and newline, in precisely that order.
+-# (If _AS_PATH_WALK were called with IFS unset, it would have the
+-# side effect of setting IFS to empty, thus disabling word splitting.)
+-# Quoting is to prevent editors from complaining about space-tab.
+ as_nl='
+ '
+ export as_nl
+-IFS=" ""	$as_nl"
+-
+-PS1='$ '
+-PS2='> '
+-PS4='+ '
+-
+-# Ensure predictable behavior from utilities with locale-dependent output.
+-LC_ALL=C
+-export LC_ALL
+-LANGUAGE=C
+-export LANGUAGE
+-
+-# We cannot yet rely on "unset" to work, but we need these variables
+-# to be unset--not just set to an empty or harmless value--now, to
+-# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh).  This construct
+-# also avoids known problems related to "unset" and subshell syntax
+-# in other old shells (e.g. bash 2.01 and pdksh 5.2.14).
+-for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH
+-do eval test \${$as_var+y} \
+-  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+-done
+-
+-# Ensure that fds 0, 1, and 2 are open.
+-if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi
+-if (exec 3>&2)            ; then :; else exec 2>/dev/null; fi
++# Printing a long string crashes Solaris 7 /usr/bin/printf.
++as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
++as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
++as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
++# Prefer a ksh shell builtin over an external printf program on Solaris,
++# but without wasting forks for bash or zsh.
++if test -z "$BASH_VERSION$ZSH_VERSION" \
++    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
++  as_echo='print -r --'
++  as_echo_n='print -rn --'
++elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
++  as_echo='printf %s\n'
++  as_echo_n='printf %s'
++else
++  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
++    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
++    as_echo_n='/usr/ucb/echo -n'
++  else
++    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
++    as_echo_n_body='eval
++      arg=$1;
++      case $arg in #(
++      *"$as_nl"*)
++	expr "X$arg" : "X\\(.*\\)$as_nl";
++	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
++      esac;
++      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
++    '
++    export as_echo_n_body
++    as_echo_n='sh -c $as_echo_n_body as_echo'
++  fi
++  export as_echo_body
++  as_echo='sh -c $as_echo_body as_echo'
++fi
+ 
+ # The user is always right.
+-if ${PATH_SEPARATOR+false} :; then
++if test "${PATH_SEPARATOR+set}" != set; then
+   PATH_SEPARATOR=:
+   (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+     (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+@@ -82,6 +79,13 @@ if ${PATH_SEPARATOR+false} :; then
+ fi
+ 
+ 
++# IFS
++# We need space, tab and new line, in precisely that order.  Quoting is
++# there to prevent editors from complaining about space-tab.
++# (If _AS_PATH_WALK were called with IFS unset, it would disable word
++# splitting by setting IFS to empty value.)
++IFS=" ""	$as_nl"
++
+ # Find who we are.  Look in the path if we contain no directory separator.
+ as_myself=
+ case $0 in #((
+@@ -90,12 +94,8 @@ case $0 in #((
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    test -r "$as_dir$0" && as_myself=$as_dir$0 && break
++  test -z "$as_dir" && as_dir=.
++    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+   done
+ IFS=$as_save_IFS
+ 
+@@ -107,10 +107,30 @@ if test "x$as_myself" = x; then
+   as_myself=$0
+ fi
+ if test ! -f "$as_myself"; then
+-  printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
++  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+   exit 1
+ fi
+ 
++# Unset variables that we do not need and which cause bugs (e.g. in
++# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
++# suppresses any "Segmentation fault" message there.  '((' could
++# trigger a bug in pdksh 5.2.14.
++for as_var in BASH_ENV ENV MAIL MAILPATH
++do eval test x\${$as_var+set} = xset \
++  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
++done
++PS1='$ '
++PS2='> '
++PS4='+ '
++
++# NLS nuisances.
++LC_ALL=C
++export LC_ALL
++LANGUAGE=C
++export LANGUAGE
++
++# CDPATH.
++(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+ 
+ # Use a proper internal environment variable to ensure we don't fall
+   # into an infinite loop, continuously re-executing ourselves.
+@@ -132,22 +152,20 @@ esac
+ exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+ # Admittedly, this is quite paranoid, since all the known shells bail
+ # out after a failed `exec'.
+-printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2
+-exit 255
++$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
++as_fn_exit 255
+   fi
+   # We don't want this to propagate to other subprocesses.
+           { _as_can_reexec=; unset _as_can_reexec;}
+ if test "x$CONFIG_SHELL" = x; then
+-  as_bourne_compatible="as_nop=:
+-if test \${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
+-then :
++  as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then :
+   emulate sh
+   NULLCMD=:
+   # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which
+   # is contrary to our usage.  Disable this feature.
+   alias -g '\${1+\"\$@\"}'='\"\$@\"'
+   setopt NO_GLOB_SUBST
+-else \$as_nop
++else
+   case \`(set -o) 2>/dev/null\` in #(
+   *posix*) :
+     set -o posix ;; #(
+@@ -167,20 +185,18 @@ as_fn_success || { exitcode=1; echo as_fn_success failed.; }
+ as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; }
+ as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; }
+ as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; }
+-if ( set x; as_fn_ret_success y && test x = \"\$1\" )
+-then :
++if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then :
+ 
+-else \$as_nop
++else
+   exitcode=1; echo positional parameters were not saved.
+ fi
+ test x\$exitcode = x0 || exit 1
+-blah=\$(echo \$(echo blah))
+-test x\"\$blah\" = xblah || exit 1
+ test -x / || exit 1"
+   as_suggested="  as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO
+   as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO
+   eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" &&
+   test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1
++test \$(( 1 + 1 )) = 2 || exit 1
+ 
+   test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || (
+     ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
+@@ -188,40 +204,31 @@ test -x / || exit 1"
+     ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO
+     PATH=/empty FPATH=/empty; export PATH FPATH
+     test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\
+-      || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1
+-test \$(( 1 + 1 )) = 2 || exit 1"
+-  if (eval "$as_required") 2>/dev/null
+-then :
++      || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1"
++  if (eval "$as_required") 2>/dev/null; then :
+   as_have_required=yes
+-else $as_nop
++else
+   as_have_required=no
+ fi
+-  if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null
+-then :
++  if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then :
+ 
+-else $as_nop
++else
+   as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ as_found=false
+ for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+   as_found=:
+   case $as_dir in #(
+ 	 /*)
+ 	   for as_base in sh bash ksh sh5; do
+ 	     # Try only shells that exist, to save several forks.
+-	     as_shell=$as_dir$as_base
++	     as_shell=$as_dir/$as_base
+ 	     if { test -f "$as_shell" || test -f "$as_shell.exe"; } &&
+-		    as_run=a "$as_shell" -c "$as_bourne_compatible""$as_required" 2>/dev/null
+-then :
++		    { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then :
+   CONFIG_SHELL=$as_shell as_have_required=yes
+-		   if as_run=a "$as_shell" -c "$as_bourne_compatible""$as_suggested" 2>/dev/null
+-then :
++		   if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then :
+   break 2
+ fi
+ fi
+@@ -229,21 +236,14 @@ fi
+        esac
+   as_found=false
+ done
+-IFS=$as_save_IFS
+-if $as_found
+-then :
+-
+-else $as_nop
+-  if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
+-	      as_run=a "$SHELL" -c "$as_bourne_compatible""$as_required" 2>/dev/null
+-then :
++$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } &&
++	      { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then :
+   CONFIG_SHELL=$SHELL as_have_required=yes
+-fi
+-fi
++fi; }
++IFS=$as_save_IFS
+ 
+ 
+-      if test "x$CONFIG_SHELL" != x
+-then :
++      if test "x$CONFIG_SHELL" != x; then :
+   export CONFIG_SHELL
+              # We cannot yet assume a decent shell, so we have to provide a
+ # neutralization value for shells without unset; and this also
+@@ -261,19 +261,18 @@ esac
+ exec $CONFIG_SHELL $as_opts "$as_myself" ${1+"$@"}
+ # Admittedly, this is quite paranoid, since all the known shells bail
+ # out after a failed `exec'.
+-printf "%s\n" "$0: could not re-execute with $CONFIG_SHELL" >&2
++$as_echo "$0: could not re-execute with $CONFIG_SHELL" >&2
+ exit 255
+ fi
+ 
+-    if test x$as_have_required = xno
+-then :
+-  printf "%s\n" "$0: This script requires a shell more modern than all"
+-  printf "%s\n" "$0: the shells that I found on your system."
+-  if test ${ZSH_VERSION+y} ; then
+-    printf "%s\n" "$0: In particular, zsh $ZSH_VERSION has bugs and should"
+-    printf "%s\n" "$0: be upgraded to zsh 4.3.4 or later."
++    if test x$as_have_required = xno; then :
++  $as_echo "$0: This script requires a shell more modern than all"
++  $as_echo "$0: the shells that I found on your system."
++  if test x${ZSH_VERSION+set} = xset ; then
++    $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should"
++    $as_echo "$0: be upgraded to zsh 4.3.4 or later."
+   else
+-    printf "%s\n" "$0: Please tell bug-autoconf@gnu.org about your system,
++    $as_echo "$0: Please tell bug-autoconf@gnu.org about your system,
+ $0: including any error possibly output before this
+ $0: message. Then install a modern shell, or manually run
+ $0: the script under such a shell if you do have one."
+@@ -300,7 +299,6 @@ as_fn_unset ()
+ }
+ as_unset=as_fn_unset
+ 
+-
+ # as_fn_set_status STATUS
+ # -----------------------
+ # Set $? to STATUS, without forking.
+@@ -318,14 +316,6 @@ as_fn_exit ()
+   as_fn_set_status $1
+   exit $1
+ } # as_fn_exit
+-# as_fn_nop
+-# ---------
+-# Do nothing but, unlike ":", preserve the value of $?.
+-as_fn_nop ()
+-{
+-  return $?
+-}
+-as_nop=as_fn_nop
+ 
+ # as_fn_mkdir_p
+ # -------------
+@@ -340,7 +330,7 @@ as_fn_mkdir_p ()
+     as_dirs=
+     while :; do
+       case $as_dir in #(
+-      *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
++      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+       *) as_qdir=$as_dir;;
+       esac
+       as_dirs="'$as_qdir' $as_dirs"
+@@ -349,7 +339,7 @@ $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ 	 X"$as_dir" : 'X\(//\)[^/]' \| \
+ 	 X"$as_dir" : 'X\(//\)$' \| \
+ 	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+-printf "%s\n" X"$as_dir" |
++$as_echo X"$as_dir" |
+     sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ 	    s//\1/
+ 	    q
+@@ -388,13 +378,12 @@ as_fn_executable_p ()
+ # advantage of any shell optimizations that allow amortized linear growth over
+ # repeated appends, instead of the typical quadratic growth present in naive
+ # implementations.
+-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null
+-then :
++if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+   eval 'as_fn_append ()
+   {
+     eval $1+=\$2
+   }'
+-else $as_nop
++else
+   as_fn_append ()
+   {
+     eval $1=\$$1\$2
+@@ -406,27 +395,18 @@ fi # as_fn_append
+ # Perform arithmetic evaluation on the ARGs, and store the result in the
+ # global $as_val. Take advantage of shells that can avoid forks. The arguments
+ # must be portable across $(()) and expr.
+-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null
+-then :
++if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+   eval 'as_fn_arith ()
+   {
+     as_val=$(( $* ))
+   }'
+-else $as_nop
++else
+   as_fn_arith ()
+   {
+     as_val=`expr "$@" || test $? -eq 1`
+   }
+ fi # as_fn_arith
+ 
+-# as_fn_nop
+-# ---------
+-# Do nothing but, unlike ":", preserve the value of $?.
+-as_fn_nop ()
+-{
+-  return $?
+-}
+-as_nop=as_fn_nop
+ 
+ # as_fn_error STATUS ERROR [LINENO LOG_FD]
+ # ----------------------------------------
+@@ -438,9 +418,9 @@ as_fn_error ()
+   as_status=$1; test $as_status -eq 0 && as_status=1
+   if test "$4"; then
+     as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
++    $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+   fi
+-  printf "%s\n" "$as_me: error: $2" >&2
++  $as_echo "$as_me: error: $2" >&2
+   as_fn_exit $as_status
+ } # as_fn_error
+ 
+@@ -467,7 +447,7 @@ as_me=`$as_basename -- "$0" ||
+ $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ 	 X"$0" : 'X\(//\)$' \| \
+ 	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+-printf "%s\n" X/"$0" |
++$as_echo X/"$0" |
+     sed '/^.*\/\([^/][^/]*\)\/*$/{
+ 	    s//\1/
+ 	    q
+@@ -511,7 +491,7 @@ as_cr_alnum=$as_cr_Letters$as_cr_digits
+       s/-\n.*//
+     ' >$as_me.lineno &&
+   chmod +x "$as_me.lineno" ||
+-    { printf "%s\n" "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
++    { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; }
+ 
+   # If we had to re-execute with $CONFIG_SHELL, we're ensured to have
+   # already done that, so ensure we don't try to do so again and fall
+@@ -525,10 +505,6 @@ as_cr_alnum=$as_cr_Letters$as_cr_digits
+   exit
+ }
+ 
+-
+-# Determine whether it's possible to make 'echo' print without a newline.
+-# These variables are no longer used directly by Autoconf, but are AC_SUBSTed
+-# for compatibility with existing Makefiles.
+ ECHO_C= ECHO_N= ECHO_T=
+ case `echo -n x` in #(((((
+ -n*)
+@@ -542,13 +518,6 @@ case `echo -n x` in #(((((
+   ECHO_N='-n';;
+ esac
+ 
+-# For backward compatibility with old third-party macros, we provide
+-# the shell variables $as_echo and $as_echo_n.  New code should use
+-# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively.
+-as_echo='printf %s\n'
+-as_echo_n='printf %s'
+-
+-
+ rm -f conf$$ conf$$.exe conf$$.file
+ if test -d conf$$.dir; then
+   rm -f conf$$.dir/conf$$.file
+@@ -625,75 +594,78 @@ PACKAGE_URL=''
+ 
+ # Factoring default headers for most tests.
+ ac_includes_default="\
+-#include 
+-#ifdef HAVE_STDIO_H
+-# include 
++#include 
++#ifdef HAVE_SYS_TYPES_H
++# include 
+ #endif
+-#ifdef HAVE_STDLIB_H
++#ifdef HAVE_SYS_STAT_H
++# include 
++#endif
++#ifdef STDC_HEADERS
+ # include 
++# include 
++#else
++# ifdef HAVE_STDLIB_H
++#  include 
++# endif
+ #endif
+ #ifdef HAVE_STRING_H
++# if !defined STDC_HEADERS && defined HAVE_MEMORY_H
++#  include 
++# endif
+ # include 
+ #endif
++#ifdef HAVE_STRINGS_H
++# include 
++#endif
+ #ifdef HAVE_INTTYPES_H
+ # include 
+ #endif
+ #ifdef HAVE_STDINT_H
+ # include 
+ #endif
+-#ifdef HAVE_STRINGS_H
+-# include 
+-#endif
+-#ifdef HAVE_SYS_TYPES_H
+-# include 
+-#endif
+-#ifdef HAVE_SYS_STAT_H
+-# include 
+-#endif
+ #ifdef HAVE_UNISTD_H
+ # include 
+ #endif"
+ 
+-ac_header_c_list=
+ ac_subst_vars='am__EXEEXT_FALSE
+ am__EXEEXT_TRUE
+ LTLIBOBJS
+ LIBOBJS
+ target_noncanonical
++lt_host_flags
+ CXXCPP
+-LT_SYS_LIBRARY_PATH
+ OTOOL64
+ OTOOL
+ LIPO
+ NMEDIT
+ DSYMUTIL
+-MANIFEST_TOOL
+ RANLIB
+-ac_ct_AR
+ AR
+-DLLTOOL
+ OBJDUMP
+-FILECMD
+ LN_S
+ NM
+ ac_ct_DUMPBIN
+ DUMPBIN
+ LD
+ FGREP
+-EGREP
+-GREP
+ SED
+ LIBTOOL
++get_gcc_base_ver
+ real_target_noncanonical
+ accel_dir_suffix
+ gcc_build_dir
+ ac_bolt_plugin_ldflags
++ac_bolt_plugin_warn_cflags
+ am__fastdepCXX_FALSE
+ am__fastdepCXX_TRUE
+ CXXDEPMODE
+ ac_ct_CXX
+ CXXFLAGS
+ CXX
++EGREP
++GREP
++CPP
+ am__fastdepCC_FALSE
+ am__fastdepCC_TRUE
+ CCDEPMODE
+@@ -718,9 +690,6 @@ AM_BACKSLASH
+ AM_DEFAULT_VERBOSITY
+ AM_DEFAULT_V
+ AM_V
+-CSCOPE
+-ETAGS
+-CTAGS
+ am__untar
+ am__tar
+ AMTAR
+@@ -744,6 +713,10 @@ am__isrc
+ INSTALL_DATA
+ INSTALL_SCRIPT
+ INSTALL_PROGRAM
++target_subdir
++host_subdir
++build_subdir
++build_libsubdir
+ target_os
+ target_vendor
+ target_cpu
+@@ -775,7 +748,6 @@ infodir
+ docdir
+ oldincludedir
+ includedir
+-runstatedir
+ localstatedir
+ sharedstatedir
+ sysconfdir
+@@ -799,18 +771,18 @@ am__quote'
+ ac_subst_files=''
+ ac_user_opts='
+ enable_option_checking
++with_build_libsubdir
+ enable_silent_rules
+ enable_maintainer_mode
+ with_libiberty
+ enable_dependency_tracking
+ enable_largefile
++with_gcc_major_version_only
+ enable_shared
+ enable_static
+ with_pic
+ enable_fast_install
+-with_aix_soname
+ with_gnu_ld
+-with_sysroot
+ enable_libtool_lock
+ '
+       ac_precious_vars='build_alias
+@@ -821,10 +793,10 @@ CFLAGS
+ LDFLAGS
+ LIBS
+ CPPFLAGS
++CPP
+ CXX
+ CXXFLAGS
+ CCC
+-LT_SYS_LIBRARY_PATH
+ CXXCPP'
+ 
+ 
+@@ -864,7 +836,6 @@ datadir='${datarootdir}'
+ sysconfdir='${prefix}/etc'
+ sharedstatedir='${prefix}/com'
+ localstatedir='${prefix}/var'
+-runstatedir='${localstatedir}/run'
+ includedir='${prefix}/include'
+ oldincludedir='/usr/include'
+ docdir='${datarootdir}/doc/${PACKAGE_TARNAME}'
+@@ -894,6 +865,8 @@ do
+   *)    ac_optarg=yes ;;
+   esac
+ 
++  # Accept the important Cygnus configure options, so we can diagnose typos.
++
+   case $ac_dashdash$ac_option in
+   --)
+     ac_dashdash=yes ;;
+@@ -934,9 +907,9 @@ do
+     ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'`
+     # Reject names that are not valid shell variable names.
+     expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+-      as_fn_error $? "invalid feature name: \`$ac_useropt'"
++      as_fn_error $? "invalid feature name: $ac_useropt"
+     ac_useropt_orig=$ac_useropt
+-    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
++    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+     case $ac_user_opts in
+       *"
+ "enable_$ac_useropt"
+@@ -960,9 +933,9 @@ do
+     ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'`
+     # Reject names that are not valid shell variable names.
+     expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+-      as_fn_error $? "invalid feature name: \`$ac_useropt'"
++      as_fn_error $? "invalid feature name: $ac_useropt"
+     ac_useropt_orig=$ac_useropt
+-    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
++    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+     case $ac_user_opts in
+       *"
+ "enable_$ac_useropt"
+@@ -1115,15 +1088,6 @@ do
+   | -silent | --silent | --silen | --sile | --sil)
+     silent=yes ;;
+ 
+-  -runstatedir | --runstatedir | --runstatedi | --runstated \
+-  | --runstate | --runstat | --runsta | --runst | --runs \
+-  | --run | --ru | --r)
+-    ac_prev=runstatedir ;;
+-  -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \
+-  | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \
+-  | --run=* | --ru=* | --r=*)
+-    runstatedir=$ac_optarg ;;
+-
+   -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb)
+     ac_prev=sbindir ;;
+   -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \
+@@ -1173,9 +1137,9 @@ do
+     ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'`
+     # Reject names that are not valid shell variable names.
+     expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+-      as_fn_error $? "invalid package name: \`$ac_useropt'"
++      as_fn_error $? "invalid package name: $ac_useropt"
+     ac_useropt_orig=$ac_useropt
+-    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
++    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+     case $ac_user_opts in
+       *"
+ "with_$ac_useropt"
+@@ -1189,9 +1153,9 @@ do
+     ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'`
+     # Reject names that are not valid shell variable names.
+     expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null &&
+-      as_fn_error $? "invalid package name: \`$ac_useropt'"
++      as_fn_error $? "invalid package name: $ac_useropt"
+     ac_useropt_orig=$ac_useropt
+-    ac_useropt=`printf "%s\n" "$ac_useropt" | sed 's/[-+.]/_/g'`
++    ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'`
+     case $ac_user_opts in
+       *"
+ "with_$ac_useropt"
+@@ -1235,9 +1199,9 @@ Try \`$0 --help' for more information"
+ 
+   *)
+     # FIXME: should be removed in autoconf 3.0.
+-    printf "%s\n" "$as_me: WARNING: you should use --build, --host, --target" >&2
++    $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2
+     expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null &&
+-      printf "%s\n" "$as_me: WARNING: invalid host type: $ac_option" >&2
++      $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2
+     : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}"
+     ;;
+ 
+@@ -1253,7 +1217,7 @@ if test -n "$ac_unrecognized_opts"; then
+   case $enable_option_checking in
+     no) ;;
+     fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;;
+-    *)     printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
++    *)     $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;;
+   esac
+ fi
+ 
+@@ -1261,7 +1225,7 @@ fi
+ for ac_var in	exec_prefix prefix bindir sbindir libexecdir datarootdir \
+ 		datadir sysconfdir sharedstatedir localstatedir includedir \
+ 		oldincludedir docdir infodir htmldir dvidir pdfdir psdir \
+-		libdir localedir mandir runstatedir
++		libdir localedir mandir
+ do
+   eval ac_val=\$$ac_var
+   # Remove trailing slashes.
+@@ -1317,7 +1281,7 @@ $as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ 	 X"$as_myself" : 'X\(//\)[^/]' \| \
+ 	 X"$as_myself" : 'X\(//\)$' \| \
+ 	 X"$as_myself" : 'X\(/\)' \| . 2>/dev/null ||
+-printf "%s\n" X"$as_myself" |
++$as_echo X"$as_myself" |
+     sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ 	    s//\1/
+ 	    q
+@@ -1414,7 +1378,6 @@ Fine tuning of the installation directories:
+   --sysconfdir=DIR        read-only single-machine data [PREFIX/etc]
+   --sharedstatedir=DIR    modifiable architecture-independent data [PREFIX/com]
+   --localstatedir=DIR     modifiable single-machine data [PREFIX/var]
+-  --runstatedir=DIR       modifiable per-process data [LOCALSTATEDIR/run]
+   --libdir=DIR            object code libraries [EPREFIX/lib]
+   --includedir=DIR        C header files [PREFIX/include]
+   --oldincludedir=DIR     C header files for non-gcc [/usr/include]
+@@ -1473,16 +1436,14 @@ Optional Features:
+ Optional Packages:
+   --with-PACKAGE[=ARG]    use PACKAGE [ARG=yes]
+   --without-PACKAGE       do not use PACKAGE (same as --with-PACKAGE=no)
++  --with-build-libsubdir=DIR  Directory where to find libraries for build system
+   --with-libiberty=PATH   specify the directory where to find libiberty
+                           [../libiberty]
+-  --with-pic[=PKGS]       try to use only PIC/non-PIC objects [default=use
++  --with-gcc-major-version-only
++                          use only GCC major number in filesystem paths
++  --with-pic              try to use only PIC/non-PIC objects [default=use
+                           both]
+-  --with-aix-soname=aix|svr4|both
+-                          shared library versioning (aka "SONAME") variant to
+-                          provide on AIX, [default=aix].
+   --with-gnu-ld           assume the C compiler uses GNU ld [default=no]
+-  --with-sysroot[=DIR]    Search for dependent libraries within DIR (or the
+-                          compiler's sysroot if not specified).
+ 
+ Some influential environment variables:
+   CC          C compiler command
+@@ -1492,10 +1453,9 @@ Some influential environment variables:
+   LIBS        libraries to pass to the linker, e.g. -l
+   CPPFLAGS    (Objective) C/C++ preprocessor flags, e.g. -I if
+               you have headers in a nonstandard directory 
++  CPP         C preprocessor
+   CXX         C++ compiler command
+   CXXFLAGS    C++ compiler flags
+-  LT_SYS_LIBRARY_PATH
+-              User-defined run-time library search path.
+   CXXCPP      C++ preprocessor
+ 
+ Use these variables to override the choices made by `configure' or to help
+@@ -1517,9 +1477,9 @@ if test "$ac_init_help" = "recursive"; then
+ case "$ac_dir" in
+ .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *)
+-  ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'`
++  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+   # A ".." for each directory in $ac_dir_suffix.
+-  ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
++  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+   case $ac_top_builddir_sub in
+   "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+   *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+@@ -1547,8 +1507,7 @@ esac
+ ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+ 
+     cd "$ac_dir" || { ac_status=$?; continue; }
+-    # Check for configure.gnu first; this name is used for a wrapper for
+-    # Metaconfig's "Configure" on case-insensitive file systems.
++    # Check for guested configure.
+     if test -f "$ac_srcdir/configure.gnu"; then
+       echo &&
+       $SHELL "$ac_srcdir/configure.gnu" --help=recursive
+@@ -1556,7 +1515,7 @@ ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix
+       echo &&
+       $SHELL "$ac_srcdir/configure" --help=recursive
+     else
+-      printf "%s\n" "$as_me: WARNING: no configuration information is in $ac_dir" >&2
++      $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2
+     fi || ac_status=$?
+     cd "$ac_pwd" || { ac_status=$?; break; }
+   done
+@@ -1566,9 +1525,9 @@ test -n "$ac_init_help" && exit $ac_status
+ if $ac_init_version; then
+   cat <<\_ACEOF
+ bolt plugin for ld configure 0.1
+-generated by GNU Autoconf 2.71
++generated by GNU Autoconf 2.69
+ 
+-Copyright (C) 2021 Free Software Foundation, Inc.
++Copyright (C) 2012 Free Software Foundation, Inc.
+ This configure script is free software; the Free Software Foundation
+ gives unlimited permission to copy, distribute and modify it.
+ _ACEOF
+@@ -1585,14 +1544,14 @@ fi
+ ac_fn_c_try_compile ()
+ {
+   as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+-  rm -f conftest.$ac_objext conftest.beam
++  rm -f conftest.$ac_objext
+   if { { ac_try="$ac_compile"
+ case "(($ac_try" in
+   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_compile") 2>conftest.err
+   ac_status=$?
+   if test -s conftest.err; then
+@@ -1600,15 +1559,14 @@ printf "%s\n" "$ac_try_echo"; } >&5
+     cat conftest.er1 >&5
+     mv -f conftest.er1 conftest.err
+   fi
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; } && {
+ 	 test -z "$ac_c_werror_flag" ||
+ 	 test ! -s conftest.err
+-       } && test -s conftest.$ac_objext
+-then :
++       } && test -s conftest.$ac_objext; then :
+   ac_retval=0
+-else $as_nop
+-  printf "%s\n" "$as_me: failed program was:" >&5
++else
++  $as_echo "$as_me: failed program was:" >&5
+ sed 's/^/| /' conftest.$ac_ext >&5
+ 
+ 	ac_retval=1
+@@ -1618,6 +1576,172 @@ fi
+ 
+ } # ac_fn_c_try_compile
+ 
++# ac_fn_c_try_cpp LINENO
++# ----------------------
++# Try to preprocess conftest.$ac_ext, and return whether this succeeded.
++ac_fn_c_try_cpp ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  if { { ac_try="$ac_cpp conftest.$ac_ext"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++$as_echo "$ac_try_echo"; } >&5
++  (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
++  ac_status=$?
++  if test -s conftest.err; then
++    grep -v '^ *+' conftest.err >conftest.er1
++    cat conftest.er1 >&5
++    mv -f conftest.er1 conftest.err
++  fi
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } > conftest.i && {
++	 test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" ||
++	 test ! -s conftest.err
++       }; then :
++  ac_retval=0
++else
++  $as_echo "$as_me: failed program was:" >&5
++sed 's/^/| /' conftest.$ac_ext >&5
++
++    ac_retval=1
++fi
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++  as_fn_set_status $ac_retval
++
++} # ac_fn_c_try_cpp
++
++# ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES
++# -------------------------------------------------------
++# Tests whether HEADER exists, giving a warning if it cannot be compiled using
++# the include files in INCLUDES and setting the cache variable VAR
++# accordingly.
++ac_fn_c_check_header_mongrel ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  if eval \${$3+:} false; then :
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
++$as_echo_n "checking for $2... " >&6; }
++if eval \${$3+:} false; then :
++  $as_echo_n "(cached) " >&6
++fi
++eval ac_res=\$$3
++	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
++$as_echo "$ac_res" >&6; }
++else
++  # Is the header compilable?
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 usability" >&5
++$as_echo_n "checking $2 usability... " >&6; }
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++$4
++#include <$2>
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"; then :
++  ac_header_compiler=yes
++else
++  ac_header_compiler=no
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_compiler" >&5
++$as_echo "$ac_header_compiler" >&6; }
++
++# Is the header present?
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking $2 presence" >&5
++$as_echo_n "checking $2 presence... " >&6; }
++cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include <$2>
++_ACEOF
++if ac_fn_c_try_cpp "$LINENO"; then :
++  ac_header_preproc=yes
++else
++  ac_header_preproc=no
++fi
++rm -f conftest.err conftest.i conftest.$ac_ext
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_header_preproc" >&5
++$as_echo "$ac_header_preproc" >&6; }
++
++# So?  What about this header?
++case $ac_header_compiler:$ac_header_preproc:$ac_c_preproc_warn_flag in #((
++  yes:no: )
++    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&5
++$as_echo "$as_me: WARNING: $2: accepted by the compiler, rejected by the preprocessor!" >&2;}
++    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
++$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
++    ;;
++  no:yes:* )
++    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: present but cannot be compiled" >&5
++$as_echo "$as_me: WARNING: $2: present but cannot be compiled" >&2;}
++    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     check for missing prerequisite headers?" >&5
++$as_echo "$as_me: WARNING: $2:     check for missing prerequisite headers?" >&2;}
++    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: see the Autoconf documentation" >&5
++$as_echo "$as_me: WARNING: $2: see the Autoconf documentation" >&2;}
++    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&5
++$as_echo "$as_me: WARNING: $2:     section \"Present But Cannot Be Compiled\"" >&2;}
++    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $2: proceeding with the compiler's result" >&5
++$as_echo "$as_me: WARNING: $2: proceeding with the compiler's result" >&2;}
++    ;;
++esac
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
++$as_echo_n "checking for $2... " >&6; }
++if eval \${$3+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  eval "$3=\$ac_header_compiler"
++fi
++eval ac_res=\$$3
++	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
++$as_echo "$ac_res" >&6; }
++fi
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++
++} # ac_fn_c_check_header_mongrel
++
++# ac_fn_c_try_run LINENO
++# ----------------------
++# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes
++# that executables *can* be run.
++ac_fn_c_try_run ()
++{
++  as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
++  if { { ac_try="$ac_link"
++case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++$as_echo "$ac_try_echo"; } >&5
++  (eval "$ac_link") 2>&5
++  ac_status=$?
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && { ac_try='./conftest$ac_exeext'
++  { { case "(($ac_try" in
++  *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
++  *) ac_try_echo=$ac_try;;
++esac
++eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
++$as_echo "$ac_try_echo"; } >&5
++  (eval "$ac_try") 2>&5
++  ac_status=$?
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; }; then :
++  ac_retval=0
++else
++  $as_echo "$as_me: program exited with status $ac_status" >&5
++       $as_echo "$as_me: failed program was:" >&5
++sed 's/^/| /' conftest.$ac_ext >&5
++
++       ac_retval=$ac_status
++fi
++  rm -rf conftest.dSYM conftest_ipa8_conftest.oo
++  eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
++  as_fn_set_status $ac_retval
++
++} # ac_fn_c_try_run
++
+ # ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES
+ # -------------------------------------------------------
+ # Tests whether HEADER exists and can be compiled using the include files in
+@@ -1625,28 +1749,26 @@ fi
+ ac_fn_c_check_header_compile ()
+ {
+   as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+-printf %s "checking for $2... " >&6; }
+-if eval test \${$3+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
++$as_echo_n "checking for $2... " >&6; }
++if eval \${$3+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ $4
+ #include <$2>
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   eval "$3=yes"
+-else $as_nop
++else
+   eval "$3=no"
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+ eval ac_res=\$$3
+-	       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+-printf "%s\n" "$ac_res" >&6; }
++	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
++$as_echo "$ac_res" >&6; }
+   eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ 
+ } # ac_fn_c_check_header_compile
+@@ -1657,14 +1779,14 @@ printf "%s\n" "$ac_res" >&6; }
+ ac_fn_cxx_try_compile ()
+ {
+   as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+-  rm -f conftest.$ac_objext conftest.beam
++  rm -f conftest.$ac_objext
+   if { { ac_try="$ac_compile"
+ case "(($ac_try" in
+   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_compile") 2>conftest.err
+   ac_status=$?
+   if test -s conftest.err; then
+@@ -1672,15 +1794,14 @@ printf "%s\n" "$ac_try_echo"; } >&5
+     cat conftest.er1 >&5
+     mv -f conftest.er1 conftest.err
+   fi
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; } && {
+ 	 test -z "$ac_cxx_werror_flag" ||
+ 	 test ! -s conftest.err
+-       } && test -s conftest.$ac_objext
+-then :
++       } && test -s conftest.$ac_objext; then :
+   ac_retval=0
+-else $as_nop
+-  printf "%s\n" "$as_me: failed program was:" >&5
++else
++  $as_echo "$as_me: failed program was:" >&5
+ sed 's/^/| /' conftest.$ac_ext >&5
+ 
+ 	ac_retval=1
+@@ -1696,14 +1817,14 @@ fi
+ ac_fn_c_try_link ()
+ {
+   as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+-  rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext
++  rm -f conftest.$ac_objext conftest$ac_exeext
+   if { { ac_try="$ac_link"
+ case "(($ac_try" in
+   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_link") 2>conftest.err
+   ac_status=$?
+   if test -s conftest.err; then
+@@ -1711,18 +1832,17 @@ printf "%s\n" "$ac_try_echo"; } >&5
+     cat conftest.er1 >&5
+     mv -f conftest.er1 conftest.err
+   fi
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; } && {
+ 	 test -z "$ac_c_werror_flag" ||
+ 	 test ! -s conftest.err
+        } && test -s conftest$ac_exeext && {
+ 	 test "$cross_compiling" = yes ||
+ 	 test -x conftest$ac_exeext
+-       }
+-then :
++       }; then :
+   ac_retval=0
+-else $as_nop
+-  printf "%s\n" "$as_me: failed program was:" >&5
++else
++  $as_echo "$as_me: failed program was:" >&5
+ sed 's/^/| /' conftest.$ac_ext >&5
+ 
+ 	ac_retval=1
+@@ -1743,12 +1863,11 @@ fi
+ ac_fn_c_check_func ()
+ {
+   as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
+-printf %s "checking for $2... " >&6; }
+-if eval test \${$3+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5
++$as_echo_n "checking for $2... " >&6; }
++if eval \${$3+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ /* Define $2 to an innocuous variant, in case  declares $2.
+@@ -1756,9 +1875,16 @@ else $as_nop
+ #define $2 innocuous_$2
+ 
+ /* System header to define __stub macros and hopefully few prototypes,
+-   which can conflict with char $2 (); below.  */
++    which can conflict with char $2 (); below.
++    Prefer  to  if __STDC__ is defined, since
++     exists even on freestanding compilers.  */
++
++#ifdef __STDC__
++# include 
++#else
++# include 
++#endif
+ 
+-#include 
+ #undef $2
+ 
+ /* Override any GCC internal prototype to avoid an error.
+@@ -1776,25 +1902,24 @@ choke me
+ #endif
+ 
+ int
+-main (void)
++main ()
+ {
+ return $2 ();
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
++if ac_fn_c_try_link "$LINENO"; then :
+   eval "$3=yes"
+-else $as_nop
++else
+   eval "$3=no"
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+ fi
+ eval ac_res=\$$3
+-	       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+-printf "%s\n" "$ac_res" >&6; }
++	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
++$as_echo "$ac_res" >&6; }
+   eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ 
+ } # ac_fn_c_check_func
+@@ -1811,7 +1936,7 @@ case "(($ac_try" in
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err
+   ac_status=$?
+   if test -s conftest.err; then
+@@ -1819,15 +1944,14 @@ printf "%s\n" "$ac_try_echo"; } >&5
+     cat conftest.er1 >&5
+     mv -f conftest.er1 conftest.err
+   fi
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; } > conftest.i && {
+ 	 test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" ||
+ 	 test ! -s conftest.err
+-       }
+-then :
++       }; then :
+   ac_retval=0
+-else $as_nop
+-  printf "%s\n" "$as_me: failed program was:" >&5
++else
++  $as_echo "$as_me: failed program was:" >&5
+ sed 's/^/| /' conftest.$ac_ext >&5
+ 
+     ac_retval=1
+@@ -1843,14 +1967,14 @@ fi
+ ac_fn_cxx_try_link ()
+ {
+   as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+-  rm -f conftest.$ac_objext conftest.beam conftest$ac_exeext
++  rm -f conftest.$ac_objext conftest$ac_exeext
+   if { { ac_try="$ac_link"
+ case "(($ac_try" in
+   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_link") 2>conftest.err
+   ac_status=$?
+   if test -s conftest.err; then
+@@ -1858,18 +1982,17 @@ printf "%s\n" "$ac_try_echo"; } >&5
+     cat conftest.er1 >&5
+     mv -f conftest.er1 conftest.err
+   fi
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; } && {
+ 	 test -z "$ac_cxx_werror_flag" ||
+ 	 test ! -s conftest.err
+        } && test -s conftest$ac_exeext && {
+ 	 test "$cross_compiling" = yes ||
+ 	 test -x conftest$ac_exeext
+-       }
+-then :
++       }; then :
+   ac_retval=0
+-else $as_nop
+-  printf "%s\n" "$as_me: failed program was:" >&5
++else
++  $as_echo "$as_me: failed program was:" >&5
+ sed 's/^/| /' conftest.$ac_ext >&5
+ 
+ 	ac_retval=1
+@@ -1891,12 +2014,11 @@ fi
+ ac_fn_c_find_intX_t ()
+ {
+   as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for int$2_t" >&5
+-printf %s "checking for int$2_t... " >&6; }
+-if eval test \${$3+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for int$2_t" >&5
++$as_echo_n "checking for int$2_t... " >&6; }
++if eval \${$3+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   eval "$3=no"
+      # Order is important - never check a type that is potentially smaller
+      # than half of the expected target width.
+@@ -1907,7 +2029,7 @@ else $as_nop
+ $ac_includes_default
+ 	     enum { N = $2 / 2 - 1 };
+ int
+-main (void)
++main ()
+ {
+ static int test_array [1 - 2 * !(0 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1))];
+ test_array [0] = 0;
+@@ -1917,14 +2039,13 @@ return test_array [0];
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ $ac_includes_default
+ 	        enum { N = $2 / 2 - 1 };
+ int
+-main (void)
++main ()
+ {
+ static int test_array [1 - 2 * !(($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 1)
+ 		 < ($ac_type) ((((($ac_type) 1 << N) << N) - 1) * 2 + 2))];
+@@ -1935,10 +2056,9 @@ return test_array [0];
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+ 
+-else $as_nop
++else
+   case $ac_type in #(
+   int$2_t) :
+     eval "$3=yes" ;; #(
+@@ -1946,20 +2066,19 @@ else $as_nop
+     eval "$3=\$ac_type" ;;
+ esac
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+-       if eval test \"x\$"$3"\" = x"no"
+-then :
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
++       if eval test \"x\$"$3"\" = x"no"; then :
+ 
+-else $as_nop
++else
+   break
+ fi
+      done
+ fi
+ eval ac_res=\$$3
+-	       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+-printf "%s\n" "$ac_res" >&6; }
++	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
++$as_echo "$ac_res" >&6; }
+   eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ 
+ } # ac_fn_c_find_intX_t
+@@ -1971,12 +2090,11 @@ printf "%s\n" "$ac_res" >&6; }
+ ac_fn_c_find_uintX_t ()
+ {
+   as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5
+-printf %s "checking for uint$2_t... " >&6; }
+-if eval test \${$3+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uint$2_t" >&5
++$as_echo_n "checking for uint$2_t... " >&6; }
++if eval \${$3+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   eval "$3=no"
+      # Order is important - never check a type that is potentially smaller
+      # than half of the expected target width.
+@@ -1986,7 +2104,7 @@ else $as_nop
+ /* end confdefs.h.  */
+ $ac_includes_default
+ int
+-main (void)
++main ()
+ {
+ static int test_array [1 - 2 * !((($ac_type) -1 >> ($2 / 2 - 1)) >> ($2 / 2 - 1) == 3)];
+ test_array [0] = 0;
+@@ -1996,8 +2114,7 @@ return test_array [0];
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   case $ac_type in #(
+   uint$2_t) :
+     eval "$3=yes" ;; #(
+@@ -2005,49 +2122,28 @@ then :
+     eval "$3=\$ac_type" ;;
+ esac
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+-       if eval test \"x\$"$3"\" = x"no"
+-then :
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
++       if eval test \"x\$"$3"\" = x"no"; then :
+ 
+-else $as_nop
++else
+   break
+ fi
+      done
+ fi
+ eval ac_res=\$$3
+-	       { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
+-printf "%s\n" "$ac_res" >&6; }
++	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
++$as_echo "$ac_res" >&6; }
+   eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno
+ 
+ } # ac_fn_c_find_uintX_t
+-ac_configure_args_raw=
+-for ac_arg
+-do
+-  case $ac_arg in
+-  *\'*)
+-    ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+-  esac
+-  as_fn_append ac_configure_args_raw " '$ac_arg'"
+-done
+-
+-case $ac_configure_args_raw in
+-  *$as_nl*)
+-    ac_safe_unquote= ;;
+-  *)
+-    ac_unsafe_z='|&;<>()$`\\"*?[ ''	' # This string ends in space, tab.
+-    ac_unsafe_a="$ac_unsafe_z#~"
+-    ac_safe_unquote="s/ '\\([^$ac_unsafe_a][^$ac_unsafe_z]*\\)'/ \\1/g"
+-    ac_configure_args_raw=`      printf "%s\n" "$ac_configure_args_raw" | sed "$ac_safe_unquote"`;;
+-esac
+-
+ cat >config.log <<_ACEOF
+ This file contains any messages produced by compilers while
+ running configure, to aid debugging if configure makes a mistake.
+ 
+ It was created by bolt plugin for ld $as_me 0.1, which was
+-generated by GNU Autoconf 2.71.  Invocation command line was
++generated by GNU Autoconf 2.69.  Invocation command line was
+ 
+-  $ $0$ac_configure_args_raw
++  $ $0 $@
+ 
+ _ACEOF
+ exec 5>>config.log
+@@ -2080,12 +2176,8 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    printf "%s\n" "PATH: $as_dir"
++  test -z "$as_dir" && as_dir=.
++    $as_echo "PATH: $as_dir"
+   done
+ IFS=$as_save_IFS
+ 
+@@ -2120,7 +2212,7 @@ do
+     | -silent | --silent | --silen | --sile | --sil)
+       continue ;;
+     *\'*)
+-      ac_arg=`printf "%s\n" "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
++      ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;;
+     esac
+     case $ac_pass in
+     1) as_fn_append ac_configure_args0 " '$ac_arg'" ;;
+@@ -2155,13 +2247,11 @@ done
+ # WARNING: Use '\'' to represent an apostrophe within the trap.
+ # WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug.
+ trap 'exit_status=$?
+-  # Sanitize IFS.
+-  IFS=" ""	$as_nl"
+   # Save into config.log some information that might help in debugging.
+   {
+     echo
+ 
+-    printf "%s\n" "## ---------------- ##
++    $as_echo "## ---------------- ##
+ ## Cache variables. ##
+ ## ---------------- ##"
+     echo
+@@ -2172,8 +2262,8 @@ trap 'exit_status=$?
+     case $ac_val in #(
+     *${as_nl}*)
+       case $ac_var in #(
+-      *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+-printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
++      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
++$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+       esac
+       case $ac_var in #(
+       _ | IFS | as_nl) ;; #(
+@@ -2197,7 +2287,7 @@ printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;}
+ )
+     echo
+ 
+-    printf "%s\n" "## ----------------- ##
++    $as_echo "## ----------------- ##
+ ## Output variables. ##
+ ## ----------------- ##"
+     echo
+@@ -2205,14 +2295,14 @@ printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;}
+     do
+       eval ac_val=\$$ac_var
+       case $ac_val in
+-      *\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
++      *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+       esac
+-      printf "%s\n" "$ac_var='\''$ac_val'\''"
++      $as_echo "$ac_var='\''$ac_val'\''"
+     done | sort
+     echo
+ 
+     if test -n "$ac_subst_files"; then
+-      printf "%s\n" "## ------------------- ##
++      $as_echo "## ------------------- ##
+ ## File substitutions. ##
+ ## ------------------- ##"
+       echo
+@@ -2220,15 +2310,15 @@ printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;}
+       do
+ 	eval ac_val=\$$ac_var
+ 	case $ac_val in
+-	*\'\''*) ac_val=`printf "%s\n" "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
++	*\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;;
+ 	esac
+-	printf "%s\n" "$ac_var='\''$ac_val'\''"
++	$as_echo "$ac_var='\''$ac_val'\''"
+       done | sort
+       echo
+     fi
+ 
+     if test -s confdefs.h; then
+-      printf "%s\n" "## ----------- ##
++      $as_echo "## ----------- ##
+ ## confdefs.h. ##
+ ## ----------- ##"
+       echo
+@@ -2236,8 +2326,8 @@ printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;}
+       echo
+     fi
+     test "$ac_signal" != 0 &&
+-      printf "%s\n" "$as_me: caught signal $ac_signal"
+-    printf "%s\n" "$as_me: exit $exit_status"
++      $as_echo "$as_me: caught signal $ac_signal"
++    $as_echo "$as_me: exit $exit_status"
+   } >&5
+   rm -f core *.core core.conftest.* &&
+     rm -f -r conftest* confdefs* conf$$* $ac_clean_files &&
+@@ -2251,48 +2341,63 @@ ac_signal=0
+ # confdefs.h avoids OS command line length limits that DEFS can exceed.
+ rm -f -r conftest* confdefs.h
+ 
+-printf "%s\n" "/* confdefs.h */" > confdefs.h
++$as_echo "/* confdefs.h */" > confdefs.h
+ 
+ # Predefined preprocessor variables.
+ 
+-printf "%s\n" "#define PACKAGE_NAME \"$PACKAGE_NAME\"" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define PACKAGE_NAME "$PACKAGE_NAME"
++_ACEOF
+ 
+-printf "%s\n" "#define PACKAGE_TARNAME \"$PACKAGE_TARNAME\"" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define PACKAGE_TARNAME "$PACKAGE_TARNAME"
++_ACEOF
+ 
+-printf "%s\n" "#define PACKAGE_VERSION \"$PACKAGE_VERSION\"" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define PACKAGE_VERSION "$PACKAGE_VERSION"
++_ACEOF
+ 
+-printf "%s\n" "#define PACKAGE_STRING \"$PACKAGE_STRING\"" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define PACKAGE_STRING "$PACKAGE_STRING"
++_ACEOF
+ 
+-printf "%s\n" "#define PACKAGE_BUGREPORT \"$PACKAGE_BUGREPORT\"" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT"
++_ACEOF
+ 
+-printf "%s\n" "#define PACKAGE_URL \"$PACKAGE_URL\"" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define PACKAGE_URL "$PACKAGE_URL"
++_ACEOF
+ 
+ 
+ # Let the site file select an alternate cache file if it wants to.
+ # Prefer an explicitly selected file to automatically selected ones.
++ac_site_file1=NONE
++ac_site_file2=NONE
+ if test -n "$CONFIG_SITE"; then
+-  ac_site_files="$CONFIG_SITE"
++  # We do not want a PATH search for config.site.
++  case $CONFIG_SITE in #((
++    -*)  ac_site_file1=./$CONFIG_SITE;;
++    */*) ac_site_file1=$CONFIG_SITE;;
++    *)   ac_site_file1=./$CONFIG_SITE;;
++  esac
+ elif test "x$prefix" != xNONE; then
+-  ac_site_files="$prefix/share/config.site $prefix/etc/config.site"
++  ac_site_file1=$prefix/share/config.site
++  ac_site_file2=$prefix/etc/config.site
+ else
+-  ac_site_files="$ac_default_prefix/share/config.site $ac_default_prefix/etc/config.site"
++  ac_site_file1=$ac_default_prefix/share/config.site
++  ac_site_file2=$ac_default_prefix/etc/config.site
+ fi
+-
+-for ac_site_file in $ac_site_files
++for ac_site_file in "$ac_site_file1" "$ac_site_file2"
+ do
+-  case $ac_site_file in #(
+-  */*) :
+-     ;; #(
+-  *) :
+-    ac_site_file=./$ac_site_file ;;
+-esac
+-  if test -f "$ac_site_file" && test -r "$ac_site_file"; then
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
+-printf "%s\n" "$as_me: loading site script $ac_site_file" >&6;}
++  test "x$ac_site_file" = xNONE && continue
++  if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then
++    { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5
++$as_echo "$as_me: loading site script $ac_site_file" >&6;}
+     sed 's/^/| /' "$ac_site_file" >&5
+     . "$ac_site_file" \
+-      || { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++      || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ as_fn_error $? "failed to load site script $ac_site_file
+ See \`config.log' for more details" "$LINENO" 5; }
+   fi
+@@ -2302,745 +2407,139 @@ if test -r "$cache_file"; then
+   # Some versions of bash will fail to source /dev/null (special files
+   # actually), so we avoid doing that.  DJGPP emulates it as a regular file.
+   if test /dev/null != "$cache_file" && test -f "$cache_file"; then
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
+-printf "%s\n" "$as_me: loading cache $cache_file" >&6;}
++    { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5
++$as_echo "$as_me: loading cache $cache_file" >&6;}
+     case $cache_file in
+       [\\/]* | ?:[\\/]* ) . "$cache_file";;
+       *)                      . "./$cache_file";;
+     esac
+   fi
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
+-printf "%s\n" "$as_me: creating cache $cache_file" >&6;}
++  { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5
++$as_echo "$as_me: creating cache $cache_file" >&6;}
+   >$cache_file
+ fi
+ 
+-as_fn_append ac_header_c_list " stdio.h stdio_h HAVE_STDIO_H"
+-# Test code for whether the C compiler supports C89 (global declarations)
+-ac_c_conftest_c89_globals='
+-/* Does the compiler advertise C89 conformance?
+-   Do not test the value of __STDC__, because some compilers set it to 0
+-   while being otherwise adequately conformant. */
+-#if !defined __STDC__
+-# error "Compiler does not advertise C89 conformance"
+-#endif
++# Check that the precious variables saved in the cache have kept the same
++# value.
++ac_cache_corrupted=false
++for ac_var in $ac_precious_vars; do
++  eval ac_old_set=\$ac_cv_env_${ac_var}_set
++  eval ac_new_set=\$ac_env_${ac_var}_set
++  eval ac_old_val=\$ac_cv_env_${ac_var}_value
++  eval ac_new_val=\$ac_env_${ac_var}_value
++  case $ac_old_set,$ac_new_set in
++    set,)
++      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
++$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
++      ac_cache_corrupted=: ;;
++    ,set)
++      { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
++$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
++      ac_cache_corrupted=: ;;
++    ,);;
++    *)
++      if test "x$ac_old_val" != "x$ac_new_val"; then
++	# differences in whitespace do not lead to failure.
++	ac_old_val_w=`echo x $ac_old_val`
++	ac_new_val_w=`echo x $ac_new_val`
++	if test "$ac_old_val_w" != "$ac_new_val_w"; then
++	  { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
++$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
++	  ac_cache_corrupted=:
++	else
++	  { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
++$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
++	  eval $ac_var=\$ac_old_val
++	fi
++	{ $as_echo "$as_me:${as_lineno-$LINENO}:   former value:  \`$ac_old_val'" >&5
++$as_echo "$as_me:   former value:  \`$ac_old_val'" >&2;}
++	{ $as_echo "$as_me:${as_lineno-$LINENO}:   current value: \`$ac_new_val'" >&5
++$as_echo "$as_me:   current value: \`$ac_new_val'" >&2;}
++      fi;;
++  esac
++  # Pass precious variables to config.status.
++  if test "$ac_new_set" = set; then
++    case $ac_new_val in
++    *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
++    *) ac_arg=$ac_var=$ac_new_val ;;
++    esac
++    case " $ac_configure_args " in
++      *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
++      *) as_fn_append ac_configure_args " '$ac_arg'" ;;
++    esac
++  fi
++done
++if $ac_cache_corrupted; then
++  { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
++  { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
++$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;}
++  as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5
++fi
++## -------------------- ##
++## Main body of script. ##
++## -------------------- ##
+ 
+-#include 
+-#include 
+-struct stat;
+-/* Most of the following tests are stolen from RCS 5.7 src/conf.sh.  */
+-struct buf { int x; };
+-struct buf * (*rcsopen) (struct buf *, struct stat *, int);
+-static char *e (p, i)
+-     char **p;
+-     int i;
+-{
+-  return p[i];
+-}
+-static char *f (char * (*g) (char **, int), char **p, ...)
+-{
+-  char *s;
+-  va_list v;
+-  va_start (v,p);
+-  s = g (p, va_arg (v,int));
+-  va_end (v);
+-  return s;
+-}
+-
+-/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
+-   function prototypes and stuff, but not \xHH hex character constants.
+-   These do not provoke an error unfortunately, instead are silently treated
+-   as an "x".  The following induces an error, until -std is added to get
+-   proper ANSI mode.  Curiously \x00 != x always comes out true, for an
+-   array size at least.  It is necessary to write \x00 == 0 to get something
+-   that is true only with -std.  */
+-int osf4_cc_array ['\''\x00'\'' == 0 ? 1 : -1];
+-
+-/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
+-   inside strings and character constants.  */
+-#define FOO(x) '\''x'\''
+-int xlc6_cc_array[FOO(a) == '\''x'\'' ? 1 : -1];
+-
+-int test (int i, double x);
+-struct s1 {int (*f) (int a);};
+-struct s2 {int (*f) (double a);};
+-int pairnames (int, char **, int *(*)(struct buf *, struct stat *, int),
+-               int, int);'
+-
+-# Test code for whether the C compiler supports C89 (body of main).
+-ac_c_conftest_c89_main='
+-ok |= (argc == 0 || f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]);
+-'
+-
+-# Test code for whether the C compiler supports C99 (global declarations)
+-ac_c_conftest_c99_globals='
+-// Does the compiler advertise C99 conformance?
+-#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 199901L
+-# error "Compiler does not advertise C99 conformance"
+-#endif
+-
+-#include 
+-extern int puts (const char *);
+-extern int printf (const char *, ...);
+-extern int dprintf (int, const char *, ...);
+-extern void *malloc (size_t);
+-
+-// Check varargs macros.  These examples are taken from C99 6.10.3.5.
+-// dprintf is used instead of fprintf to avoid needing to declare
+-// FILE and stderr.
+-#define debug(...) dprintf (2, __VA_ARGS__)
+-#define showlist(...) puts (#__VA_ARGS__)
+-#define report(test,...) ((test) ? puts (#test) : printf (__VA_ARGS__))
+-static void
+-test_varargs_macros (void)
+-{
+-  int x = 1234;
+-  int y = 5678;
+-  debug ("Flag");
+-  debug ("X = %d\n", x);
+-  showlist (The first, second, and third items.);
+-  report (x>y, "x is %d but y is %d", x, y);
+-}
+-
+-// Check long long types.
+-#define BIG64 18446744073709551615ull
+-#define BIG32 4294967295ul
+-#define BIG_OK (BIG64 / BIG32 == 4294967297ull && BIG64 % BIG32 == 0)
+-#if !BIG_OK
+-  #error "your preprocessor is broken"
+-#endif
+-#if BIG_OK
+-#else
+-  #error "your preprocessor is broken"
+-#endif
+-static long long int bignum = -9223372036854775807LL;
+-static unsigned long long int ubignum = BIG64;
+-
+-struct incomplete_array
+-{
+-  int datasize;
+-  double data[];
+-};
+-
+-struct named_init {
+-  int number;
+-  const wchar_t *name;
+-  double average;
+-};
+-
+-typedef const char *ccp;
+-
+-static inline int
+-test_restrict (ccp restrict text)
+-{
+-  // See if C++-style comments work.
+-  // Iterate through items via the restricted pointer.
+-  // Also check for declarations in for loops.
+-  for (unsigned int i = 0; *(text+i) != '\''\0'\''; ++i)
+-    continue;
+-  return 0;
+-}
+-
+-// Check varargs and va_copy.
+-static bool
+-test_varargs (const char *format, ...)
+-{
+-  va_list args;
+-  va_start (args, format);
+-  va_list args_copy;
+-  va_copy (args_copy, args);
+-
+-  const char *str = "";
+-  int number = 0;
+-  float fnumber = 0;
+-
+-  while (*format)
+-    {
+-      switch (*format++)
+-	{
+-	case '\''s'\'': // string
+-	  str = va_arg (args_copy, const char *);
+-	  break;
+-	case '\''d'\'': // int
+-	  number = va_arg (args_copy, int);
+-	  break;
+-	case '\''f'\'': // float
+-	  fnumber = va_arg (args_copy, double);
+-	  break;
+-	default:
+-	  break;
+-	}
+-    }
+-  va_end (args_copy);
+-  va_end (args);
+-
+-  return *str && number && fnumber;
+-}
+-'
+-
+-# Test code for whether the C compiler supports C99 (body of main).
+-ac_c_conftest_c99_main='
+-  // Check bool.
+-  _Bool success = false;
+-  success |= (argc != 0);
+-
+-  // Check restrict.
+-  if (test_restrict ("String literal") == 0)
+-    success = true;
+-  char *restrict newvar = "Another string";
+-
+-  // Check varargs.
+-  success &= test_varargs ("s, d'\'' f .", "string", 65, 34.234);
+-  test_varargs_macros ();
+-
+-  // Check flexible array members.
+-  struct incomplete_array *ia =
+-    malloc (sizeof (struct incomplete_array) + (sizeof (double) * 10));
+-  ia->datasize = 10;
+-  for (int i = 0; i < ia->datasize; ++i)
+-    ia->data[i] = i * 1.234;
+-
+-  // Check named initializers.
+-  struct named_init ni = {
+-    .number = 34,
+-    .name = L"Test wide string",
+-    .average = 543.34343,
+-  };
+-
+-  ni.number = 58;
+-
+-  int dynamic_array[ni.number];
+-  dynamic_array[0] = argv[0][0];
+-  dynamic_array[ni.number - 1] = 543;
+-
+-  // work around unused variable warnings
+-  ok |= (!success || bignum == 0LL || ubignum == 0uLL || newvar[0] == '\''x'\''
+-	 || dynamic_array[ni.number - 1] != 543);
+-'
+-
+-# Test code for whether the C compiler supports C11 (global declarations)
+-ac_c_conftest_c11_globals='
+-// Does the compiler advertise C11 conformance?
+-#if !defined __STDC_VERSION__ || __STDC_VERSION__ < 201112L
+-# error "Compiler does not advertise C11 conformance"
+-#endif
+-
+-// Check _Alignas.
+-char _Alignas (double) aligned_as_double;
+-char _Alignas (0) no_special_alignment;
+-extern char aligned_as_int;
+-char _Alignas (0) _Alignas (int) aligned_as_int;
+-
+-// Check _Alignof.
+-enum
+-{
+-  int_alignment = _Alignof (int),
+-  int_array_alignment = _Alignof (int[100]),
+-  char_alignment = _Alignof (char)
+-};
+-_Static_assert (0 < -_Alignof (int), "_Alignof is signed");
+-
+-// Check _Noreturn.
+-int _Noreturn does_not_return (void) { for (;;) continue; }
+-
+-// Check _Static_assert.
+-struct test_static_assert
+-{
+-  int x;
+-  _Static_assert (sizeof (int) <= sizeof (long int),
+-                  "_Static_assert does not work in struct");
+-  long int y;
+-};
+-
+-// Check UTF-8 literals.
+-#define u8 syntax error!
+-char const utf8_literal[] = u8"happens to be ASCII" "another string";
+-
+-// Check duplicate typedefs.
+-typedef long *long_ptr;
+-typedef long int *long_ptr;
+-typedef long_ptr long_ptr;
+-
+-// Anonymous structures and unions -- taken from C11 6.7.2.1 Example 1.
+-struct anonymous
+-{
+-  union {
+-    struct { int i; int j; };
+-    struct { int k; long int l; } w;
+-  };
+-  int m;
+-} v1;
+-'
+-
+-# Test code for whether the C compiler supports C11 (body of main).
+-ac_c_conftest_c11_main='
+-  _Static_assert ((offsetof (struct anonymous, i)
+-		   == offsetof (struct anonymous, w.k)),
+-		  "Anonymous union alignment botch");
+-  v1.i = 2;
+-  v1.w.k = 5;
+-  ok |= v1.i != 5;
+-'
+-
+-# Test code for whether the C compiler supports C11 (complete).
+-ac_c_conftest_c11_program="${ac_c_conftest_c89_globals}
+-${ac_c_conftest_c99_globals}
+-${ac_c_conftest_c11_globals}
+-
+-int
+-main (int argc, char **argv)
+-{
+-  int ok = 0;
+-  ${ac_c_conftest_c89_main}
+-  ${ac_c_conftest_c99_main}
+-  ${ac_c_conftest_c11_main}
+-  return ok;
+-}
+-"
+-
+-# Test code for whether the C compiler supports C99 (complete).
+-ac_c_conftest_c99_program="${ac_c_conftest_c89_globals}
+-${ac_c_conftest_c99_globals}
+-
+-int
+-main (int argc, char **argv)
+-{
+-  int ok = 0;
+-  ${ac_c_conftest_c89_main}
+-  ${ac_c_conftest_c99_main}
+-  return ok;
+-}
+-"
+-
+-# Test code for whether the C compiler supports C89 (complete).
+-ac_c_conftest_c89_program="${ac_c_conftest_c89_globals}
+-
+-int
+-main (int argc, char **argv)
+-{
+-  int ok = 0;
+-  ${ac_c_conftest_c89_main}
+-  return ok;
+-}
+-"
+-
+-as_fn_append ac_header_c_list " stdlib.h stdlib_h HAVE_STDLIB_H"
+-as_fn_append ac_header_c_list " string.h string_h HAVE_STRING_H"
+-as_fn_append ac_header_c_list " inttypes.h inttypes_h HAVE_INTTYPES_H"
+-as_fn_append ac_header_c_list " stdint.h stdint_h HAVE_STDINT_H"
+-as_fn_append ac_header_c_list " strings.h strings_h HAVE_STRINGS_H"
+-as_fn_append ac_header_c_list " sys/stat.h sys_stat_h HAVE_SYS_STAT_H"
+-as_fn_append ac_header_c_list " sys/types.h sys_types_h HAVE_SYS_TYPES_H"
+-as_fn_append ac_header_c_list " unistd.h unistd_h HAVE_UNISTD_H"
+-as_fn_append ac_header_c_list " wchar.h wchar_h HAVE_WCHAR_H"
+-as_fn_append ac_header_c_list " minix/config.h minix_config_h HAVE_MINIX_CONFIG_H"
+-# Test code for whether the C++ compiler supports C++98 (global declarations)
+-ac_cxx_conftest_cxx98_globals='
+-// Does the compiler advertise C++98 conformance?
+-#if !defined __cplusplus || __cplusplus < 199711L
+-# error "Compiler does not advertise C++98 conformance"
+-#endif
+-
+-// These inclusions are to reject old compilers that
+-// lack the unsuffixed header files.
+-#include 
+-#include 
+-
+-//  and  are *not* freestanding headers in C++98.
+-extern void assert (int);
+-namespace std {
+-  extern int strcmp (const char *, const char *);
+-}
+-
+-// Namespaces, exceptions, and templates were all added after "C++ 2.0".
+-using std::exception;
+-using std::strcmp;
+-
+-namespace {
+-
+-void test_exception_syntax()
+-{
+-  try {
+-    throw "test";
+-  } catch (const char *s) {
+-    // Extra parentheses suppress a warning when building autoconf itself,
+-    // due to lint rules shared with more typical C programs.
+-    assert (!(strcmp) (s, "test"));
+-  }
+-}
+-
+-template  struct test_template
+-{
+-  T const val;
+-  explicit test_template(T t) : val(t) {}
+-  template  T add(U u) { return static_cast(u) + val; }
+-};
+-
+-} // anonymous namespace
+-'
+-
+-# Test code for whether the C++ compiler supports C++98 (body of main)
+-ac_cxx_conftest_cxx98_main='
+-  assert (argc);
+-  assert (! argv[0]);
+-{
+-  test_exception_syntax ();
+-  test_template tt (2.0);
+-  assert (tt.add (4) == 6.0);
+-  assert (true && !false);
+-}
+-'
+-
+-# Test code for whether the C++ compiler supports C++11 (global declarations)
+-ac_cxx_conftest_cxx11_globals='
+-// Does the compiler advertise C++ 2011 conformance?
+-#if !defined __cplusplus || __cplusplus < 201103L
+-# error "Compiler does not advertise C++11 conformance"
+-#endif
+-
+-namespace cxx11test
+-{
+-  constexpr int get_val() { return 20; }
+-
+-  struct testinit
+-  {
+-    int i;
+-    double d;
+-  };
+-
+-  class delegate
+-  {
+-  public:
+-    delegate(int n) : n(n) {}
+-    delegate(): delegate(2354) {}
+-
+-    virtual int getval() { return this->n; };
+-  protected:
+-    int n;
+-  };
+-
+-  class overridden : public delegate
+-  {
+-  public:
+-    overridden(int n): delegate(n) {}
+-    virtual int getval() override final { return this->n * 2; }
+-  };
+-
+-  class nocopy
+-  {
+-  public:
+-    nocopy(int i): i(i) {}
+-    nocopy() = default;
+-    nocopy(const nocopy&) = delete;
+-    nocopy & operator=(const nocopy&) = delete;
+-  private:
+-    int i;
+-  };
+-
+-  // for testing lambda expressions
+-  template  Ret eval(Fn f, Ret v)
+-  {
+-    return f(v);
+-  }
+-
+-  // for testing variadic templates and trailing return types
+-  template  auto sum(V first) -> V
+-  {
+-    return first;
+-  }
+-  template  auto sum(V first, Args... rest) -> V
+-  {
+-    return first + sum(rest...);
+-  }
+-}
+-'
+-
+-# Test code for whether the C++ compiler supports C++11 (body of main)
+-ac_cxx_conftest_cxx11_main='
+-{
+-  // Test auto and decltype
+-  auto a1 = 6538;
+-  auto a2 = 48573953.4;
+-  auto a3 = "String literal";
+-
+-  int total = 0;
+-  for (auto i = a3; *i; ++i) { total += *i; }
+-
+-  decltype(a2) a4 = 34895.034;
+-}
+-{
+-  // Test constexpr
+-  short sa[cxx11test::get_val()] = { 0 };
+-}
+-{
+-  // Test initializer lists
+-  cxx11test::testinit il = { 4323, 435234.23544 };
+-}
+-{
+-  // Test range-based for
+-  int array[] = {9, 7, 13, 15, 4, 18, 12, 10, 5, 3,
+-                 14, 19, 17, 8, 6, 20, 16, 2, 11, 1};
+-  for (auto &x : array) { x += 23; }
+-}
+-{
+-  // Test lambda expressions
+-  using cxx11test::eval;
+-  assert (eval ([](int x) { return x*2; }, 21) == 42);
+-  double d = 2.0;
+-  assert (eval ([&](double x) { return d += x; }, 3.0) == 5.0);
+-  assert (d == 5.0);
+-  assert (eval ([=](double x) mutable { return d += x; }, 4.0) == 9.0);
+-  assert (d == 5.0);
+-}
+-{
+-  // Test use of variadic templates
+-  using cxx11test::sum;
+-  auto a = sum(1);
+-  auto b = sum(1, 2);
+-  auto c = sum(1.0, 2.0, 3.0);
+-}
+-{
+-  // Test constructor delegation
+-  cxx11test::delegate d1;
+-  cxx11test::delegate d2();
+-  cxx11test::delegate d3(45);
+-}
+-{
+-  // Test override and final
+-  cxx11test::overridden o1(55464);
+-}
+-{
+-  // Test nullptr
+-  char *c = nullptr;
+-}
+-{
+-  // Test template brackets
+-  test_template<::test_template> v(test_template(12));
+-}
+-{
+-  // Unicode literals
+-  char const *utf8 = u8"UTF-8 string \u2500";
+-  char16_t const *utf16 = u"UTF-8 string \u2500";
+-  char32_t const *utf32 = U"UTF-32 string \u2500";
+-}
+-'
+-
+-# Test code for whether the C compiler supports C++11 (complete).
+-ac_cxx_conftest_cxx11_program="${ac_cxx_conftest_cxx98_globals}
+-${ac_cxx_conftest_cxx11_globals}
+-
+-int
+-main (int argc, char **argv)
+-{
+-  int ok = 0;
+-  ${ac_cxx_conftest_cxx98_main}
+-  ${ac_cxx_conftest_cxx11_main}
+-  return ok;
+-}
+-"
+-
+-# Test code for whether the C compiler supports C++98 (complete).
+-ac_cxx_conftest_cxx98_program="${ac_cxx_conftest_cxx98_globals}
+-int
+-main (int argc, char **argv)
+-{
+-  int ok = 0;
+-  ${ac_cxx_conftest_cxx98_main}
+-  return ok;
+-}
+-"
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ 
+ 
+-# Auxiliary files required by this configure script.
+-ac_aux_files="ltmain.sh compile missing install-sh config.guess config.sub"
+ 
+-# Locations in which to look for auxiliary files.
+-ac_aux_dir_candidates="${srcdir}${PATH_SEPARATOR}${srcdir}/..${PATH_SEPARATOR}${srcdir}/../.."
+ 
+-# Search for a directory containing all of the required auxiliary files,
+-# $ac_aux_files, from the $PATH-style list $ac_aux_dir_candidates.
+-# If we don't find one directory that contains all the files we need,
+-# we report the set of missing files from the *first* directory in
+-# $ac_aux_dir_candidates and give up.
+-ac_missing_aux_files=""
+-ac_first_candidate=:
+-printf "%s\n" "$as_me:${as_lineno-$LINENO}: looking for aux files: $ac_aux_files" >&5
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-as_found=false
+-for as_dir in $ac_aux_dir_candidates
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-  as_found=:
+ 
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}:  trying $as_dir" >&5
+-  ac_aux_dir_found=yes
+-  ac_install_sh=
+-  for ac_aux in $ac_aux_files
+-  do
+-    # As a special case, if "install-sh" is required, that requirement
+-    # can be satisfied by any of "install-sh", "install.sh", or "shtool",
+-    # and $ac_install_sh is set appropriately for whichever one is found.
+-    if test x"$ac_aux" = x"install-sh"
+-    then
+-      if test -f "${as_dir}install-sh"; then
+-        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}install-sh found" >&5
+-        ac_install_sh="${as_dir}install-sh -c"
+-      elif test -f "${as_dir}install.sh"; then
+-        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}install.sh found" >&5
+-        ac_install_sh="${as_dir}install.sh -c"
+-      elif test -f "${as_dir}shtool"; then
+-        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}shtool found" >&5
+-        ac_install_sh="${as_dir}shtool install -c"
+-      else
+-        ac_aux_dir_found=no
+-        if $ac_first_candidate; then
+-          ac_missing_aux_files="${ac_missing_aux_files} install-sh"
+-        else
+-          break
+-        fi
+-      fi
+-    else
+-      if test -f "${as_dir}${ac_aux}"; then
+-        printf "%s\n" "$as_me:${as_lineno-$LINENO}:   ${as_dir}${ac_aux} found" >&5
+-      else
+-        ac_aux_dir_found=no
+-        if $ac_first_candidate; then
+-          ac_missing_aux_files="${ac_missing_aux_files} ${ac_aux}"
+-        else
+-          break
+-        fi
+-      fi
+-    fi
+-  done
+-  if test "$ac_aux_dir_found" = yes; then
+-    ac_aux_dir="$as_dir"
++ac_aux_dir=
++for ac_dir in "$srcdir" "$srcdir/.." "$srcdir/../.."; do
++  if test -f "$ac_dir/install-sh"; then
++    ac_aux_dir=$ac_dir
++    ac_install_sh="$ac_aux_dir/install-sh -c"
++    break
++  elif test -f "$ac_dir/install.sh"; then
++    ac_aux_dir=$ac_dir
++    ac_install_sh="$ac_aux_dir/install.sh -c"
++    break
++  elif test -f "$ac_dir/shtool"; then
++    ac_aux_dir=$ac_dir
++    ac_install_sh="$ac_aux_dir/shtool install -c"
+     break
+   fi
+-  ac_first_candidate=false
+-
+-  as_found=false
+ done
+-IFS=$as_save_IFS
+-if $as_found
+-then :
+-
+-else $as_nop
+-  as_fn_error $? "cannot find required auxiliary files:$ac_missing_aux_files" "$LINENO" 5
++if test -z "$ac_aux_dir"; then
++  as_fn_error $? "cannot find install-sh, install.sh, or shtool in \"$srcdir\" \"$srcdir/..\" \"$srcdir/../..\"" "$LINENO" 5
+ fi
+ 
+-
+ # These three variables are undocumented and unsupported,
+ # and are intended to be withdrawn in a future Autoconf release.
+ # They can cause serious problems if a builder's source tree is in a directory
+ # whose full name contains unusual characters.
+-if test -f "${ac_aux_dir}config.guess"; then
+-  ac_config_guess="$SHELL ${ac_aux_dir}config.guess"
+-fi
+-if test -f "${ac_aux_dir}config.sub"; then
+-  ac_config_sub="$SHELL ${ac_aux_dir}config.sub"
+-fi
+-if test -f "$ac_aux_dir/configure"; then
+-  ac_configure="$SHELL ${ac_aux_dir}configure"
+-fi
++ac_config_guess="$SHELL $ac_aux_dir/config.guess"  # Please don't use this var.
++ac_config_sub="$SHELL $ac_aux_dir/config.sub"  # Please don't use this var.
++ac_configure="$SHELL $ac_aux_dir/configure"  # Please don't use this var.
+ 
+-# Check that the precious variables saved in the cache have kept the same
+-# value.
+-ac_cache_corrupted=false
+-for ac_var in $ac_precious_vars; do
+-  eval ac_old_set=\$ac_cv_env_${ac_var}_set
+-  eval ac_new_set=\$ac_env_${ac_var}_set
+-  eval ac_old_val=\$ac_cv_env_${ac_var}_value
+-  eval ac_new_val=\$ac_env_${ac_var}_value
+-  case $ac_old_set,$ac_new_set in
+-    set,)
+-      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5
+-printf "%s\n" "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;}
+-      ac_cache_corrupted=: ;;
+-    ,set)
+-      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5
+-printf "%s\n" "$as_me: error: \`$ac_var' was not set in the previous run" >&2;}
+-      ac_cache_corrupted=: ;;
+-    ,);;
+-    *)
+-      if test "x$ac_old_val" != "x$ac_new_val"; then
+-	# differences in whitespace do not lead to failure.
+-	ac_old_val_w=`echo x $ac_old_val`
+-	ac_new_val_w=`echo x $ac_new_val`
+-	if test "$ac_old_val_w" != "$ac_new_val_w"; then
+-	  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5
+-printf "%s\n" "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;}
+-	  ac_cache_corrupted=:
+-	else
+-	  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5
+-printf "%s\n" "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;}
+-	  eval $ac_var=\$ac_old_val
+-	fi
+-	{ printf "%s\n" "$as_me:${as_lineno-$LINENO}:   former value:  \`$ac_old_val'" >&5
+-printf "%s\n" "$as_me:   former value:  \`$ac_old_val'" >&2;}
+-	{ printf "%s\n" "$as_me:${as_lineno-$LINENO}:   current value: \`$ac_new_val'" >&5
+-printf "%s\n" "$as_me:   current value: \`$ac_new_val'" >&2;}
+-      fi;;
+-  esac
+-  # Pass precious variables to config.status.
+-  if test "$ac_new_set" = set; then
+-    case $ac_new_val in
+-    *\'*) ac_arg=$ac_var=`printf "%s\n" "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;;
+-    *) ac_arg=$ac_var=$ac_new_val ;;
+-    esac
+-    case " $ac_configure_args " in
+-      *" '$ac_arg' "*) ;; # Avoid dups.  Use of quotes ensures accuracy.
+-      *) as_fn_append ac_configure_args " '$ac_arg'" ;;
+-    esac
+-  fi
+-done
+-if $ac_cache_corrupted; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5
+-printf "%s\n" "$as_me: error: changes in the environment can compromise the build" >&2;}
+-  as_fn_error $? "run \`${MAKE-make} distclean' and/or \`rm $cache_file'
+-	    and start over" "$LINENO" 5
+-fi
+-## -------------------- ##
+-## Main body of script. ##
+-## -------------------- ##
+-
+-ac_ext=c
+-ac_cpp='$CPP $CPPFLAGS'
+-ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+-ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+-ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ 
++# Make sure we can run config.sub.
++$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 ||
++  as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5
+ 
+-
+-
+-
+-  # Make sure we can run config.sub.
+-$SHELL "${ac_aux_dir}config.sub" sun4 >/dev/null 2>&1 ||
+-  as_fn_error $? "cannot run $SHELL ${ac_aux_dir}config.sub" "$LINENO" 5
+-
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
+-printf %s "checking build system type... " >&6; }
+-if test ${ac_cv_build+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5
++$as_echo_n "checking build system type... " >&6; }
++if ${ac_cv_build+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_build_alias=$build_alias
+ test "x$ac_build_alias" = x &&
+-  ac_build_alias=`$SHELL "${ac_aux_dir}config.guess"`
++  ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"`
+ test "x$ac_build_alias" = x &&
+   as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5
+-ac_cv_build=`$SHELL "${ac_aux_dir}config.sub" $ac_build_alias` ||
+-  as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $ac_build_alias failed" "$LINENO" 5
++ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` ||
++  as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
+-printf "%s\n" "$ac_cv_build" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5
++$as_echo "$ac_cv_build" >&6; }
+ case $ac_cv_build in
+ *-*-*) ;;
+ *) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;;
+@@ -3059,22 +2558,21 @@ IFS=$ac_save_IFS
+ case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac
+ 
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
+-printf %s "checking host system type... " >&6; }
+-if test ${ac_cv_host+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5
++$as_echo_n "checking host system type... " >&6; }
++if ${ac_cv_host+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test "x$host_alias" = x; then
+   ac_cv_host=$ac_cv_build
+ else
+-  ac_cv_host=`$SHELL "${ac_aux_dir}config.sub" $host_alias` ||
+-    as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $host_alias failed" "$LINENO" 5
++  ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` ||
++    as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5
+ fi
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
+-printf "%s\n" "$ac_cv_host" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5
++$as_echo "$ac_cv_host" >&6; }
+ case $ac_cv_host in
+ *-*-*) ;;
+ *) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;;
+@@ -3093,22 +2591,21 @@ IFS=$ac_save_IFS
+ case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac
+ 
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking target system type" >&5
+-printf %s "checking target system type... " >&6; }
+-if test ${ac_cv_target+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking target system type" >&5
++$as_echo_n "checking target system type... " >&6; }
++if ${ac_cv_target+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test "x$target_alias" = x; then
+   ac_cv_target=$ac_cv_host
+ else
+-  ac_cv_target=`$SHELL "${ac_aux_dir}config.sub" $target_alias` ||
+-    as_fn_error $? "$SHELL ${ac_aux_dir}config.sub $target_alias failed" "$LINENO" 5
++  ac_cv_target=`$SHELL "$ac_aux_dir/config.sub" $target_alias` ||
++    as_fn_error $? "$SHELL $ac_aux_dir/config.sub $target_alias failed" "$LINENO" 5
+ fi
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_target" >&5
+-printf "%s\n" "$ac_cv_target" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_target" >&5
++$as_echo "$ac_cv_target" >&6; }
+ case $ac_cv_target in
+ *-*-*) ;;
+ *) as_fn_error $? "invalid value of canonical target" "$LINENO" 5;;
+@@ -3133,11 +2630,54 @@ test -n "$target_alias" &&
+   test "$program_prefix$program_suffix$program_transform_name" = \
+     NONENONEs,x,x, &&
+   program_prefix=${target_alias}-
+-GCC_TOPLEV_SUBDIRS
+-am__api_version='1.16'
++ case ${build_alias} in
++  "") build_noncanonical=${build} ;;
++  *) build_noncanonical=${build_alias} ;;
++esac
++
++ case ${host_alias} in
++  "") host_noncanonical=${build_noncanonical} ;;
++  *) host_noncanonical=${host_alias} ;;
++esac
++
++ case ${target_alias} in
++  "") target_noncanonical=${host_noncanonical} ;;
++  *) target_noncanonical=${target_alias} ;;
++esac
++
++
++# post-stage1 host modules use a different CC_FOR_BUILD so, in order to
++# have matching libraries, they should use host libraries: Makefile.tpl
++# arranges to pass --with-build-libsubdir=$(HOST_SUBDIR).
++# However, they still use the build modules, because the corresponding
++# host modules (e.g. bison) are only built for the host when bootstrap
++# finishes. So:
++# - build_subdir is where we find build modules, and never changes.
++# - build_libsubdir is where we find build libraries, and can be overridden.
++
++# Prefix 'build-' so this never conflicts with target_subdir.
++build_subdir="build-${build_noncanonical}"
++
++# Check whether --with-build-libsubdir was given.
++if test "${with_build_libsubdir+set}" = set; then :
++  withval=$with_build_libsubdir; build_libsubdir="$withval"
++else
++  build_libsubdir="$build_subdir"
++fi
++
++# --srcdir=. covers the toplevel, while "test -d" covers the subdirectories
++if ( test $srcdir = . && test -d gcc ) \
++   || test -d $srcdir/../host-${host_noncanonical}; then
++  host_subdir="host-${host_noncanonical}"
++else
++  host_subdir=.
++fi
++# No prefix.
++target_subdir=${target_noncanonical}
+ 
++am__api_version='1.16'
+ 
+-  # Find a good install program.  We prefer a C program (faster),
++# Find a good install program.  We prefer a C program (faster),
+ # so one script is as good as another.  But avoid the broken or
+ # incompatible versions:
+ # SysV /etc/install, /usr/sbin/install
+@@ -3151,25 +2691,20 @@ am__api_version='1.16'
+ # OS/2's system install, which has a completely different semantic
+ # ./install, which can be erroneously created by make from ./install.sh.
+ # Reject install programs that cannot install multiple files.
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
+-printf %s "checking for a BSD-compatible install... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5
++$as_echo_n "checking for a BSD-compatible install... " >&6; }
+ if test -z "$INSTALL"; then
+-if test ${ac_cv_path_install+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++if ${ac_cv_path_install+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    # Account for fact that we put trailing slashes in our PATH walk.
+-case $as_dir in #((
+-  ./ | /[cC]/* | \
++  test -z "$as_dir" && as_dir=.
++    # Account for people who put trailing slashes in PATH elements.
++case $as_dir/ in #((
++  ./ | .// | /[cC]/* | \
+   /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \
+   ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \
+   /usr/ucb/* ) ;;
+@@ -3179,13 +2714,13 @@ case $as_dir in #((
+     # by default.
+     for ac_prog in ginstall scoinst install; do
+       for ac_exec_ext in '' $ac_executable_extensions; do
+-	if as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext"; then
++	if as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext"; then
+ 	  if test $ac_prog = install &&
+-	    grep dspmsg "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
++	    grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ 	    # AIX install.  It has an incompatible calling convention.
+ 	    :
+ 	  elif test $ac_prog = install &&
+-	    grep pwplus "$as_dir$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
++	    grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then
+ 	    # program-specific install script used by HP pwplus--don't use.
+ 	    :
+ 	  else
+@@ -3193,12 +2728,12 @@ case $as_dir in #((
+ 	    echo one > conftest.one
+ 	    echo two > conftest.two
+ 	    mkdir conftest.dir
+-	    if "$as_dir$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir/" &&
++	    if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" &&
+ 	      test -s conftest.one && test -s conftest.two &&
+ 	      test -s conftest.dir/conftest.one &&
+ 	      test -s conftest.dir/conftest.two
+ 	    then
+-	      ac_cv_path_install="$as_dir$ac_prog$ac_exec_ext -c"
++	      ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c"
+ 	      break 3
+ 	    fi
+ 	  fi
+@@ -3214,7 +2749,7 @@ IFS=$as_save_IFS
+ rm -rf conftest.one conftest.two conftest.dir
+ 
+ fi
+-  if test ${ac_cv_path_install+y}; then
++  if test "${ac_cv_path_install+set}" = set; then
+     INSTALL=$ac_cv_path_install
+   else
+     # As a last resort, use the slow shell script.  Don't cache a
+@@ -3224,8 +2759,8 @@ fi
+     INSTALL=$ac_install_sh
+   fi
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
+-printf "%s\n" "$INSTALL" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5
++$as_echo "$INSTALL" >&6; }
+ 
+ # Use test -z because SunOS4 sh mishandles braces in ${var-val}.
+ # It thinks the first close brace ends the variable substitution.
+@@ -3235,8 +2770,8 @@ test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}'
+ 
+ test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644'
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5
+-printf %s "checking whether build environment is sane... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5
++$as_echo_n "checking whether build environment is sane... " >&6; }
+ # Reject unsafe characters in $srcdir or the absolute working directory
+ # name.  Accept space and tab only in the latter.
+ am_lf='
+@@ -3290,8 +2825,8 @@ else
+    as_fn_error $? "newly created file is older than distributed files!
+ Check your system clock" "$LINENO" 5
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+-printf "%s\n" "yes" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++$as_echo "yes" >&6; }
+ # If we didn't sleep, we still need to ensure time stamps of config.status and
+ # generated files are strictly newer.
+ am_sleep_pid=
+@@ -3310,23 +2845,26 @@ test "$program_suffix" != NONE &&
+ # Double any \ or $.
+ # By default was `s,x,x', remove it if useless.
+ ac_script='s/[\\$]/&&/g;s/;s,x,x,$//'
+-program_transform_name=`printf "%s\n" "$program_transform_name" | sed "$ac_script"`
+-
++program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"`
+ 
+ # Expand $ac_aux_dir to an absolute path.
+ am_aux_dir=`cd "$ac_aux_dir" && pwd`
+ 
+-
+-  if test x"${MISSING+set}" != xset; then
+-  MISSING="\${SHELL} '$am_aux_dir/missing'"
++if test x"${MISSING+set}" != xset; then
++  case $am_aux_dir in
++  *\ * | *\	*)
++    MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;;
++  *)
++    MISSING="\${SHELL} $am_aux_dir/missing" ;;
++  esac
+ fi
+ # Use eval to expand $SHELL
+ if eval "$MISSING --is-lightweight"; then
+   am_missing_run="$MISSING "
+ else
+   am_missing_run=
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5
+-printf "%s\n" "$as_me: WARNING: 'missing' script is too old or missing" >&2;}
++  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: 'missing' script is too old or missing" >&5
++$as_echo "$as_me: WARNING: 'missing' script is too old or missing" >&2;}
+ fi
+ 
+ if test x"${install_sh+set}" != xset; then
+@@ -3346,12 +2884,11 @@ if test "$cross_compiling" != no; then
+   if test -n "$ac_tool_prefix"; then
+   # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}strip; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_STRIP+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_STRIP+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$STRIP"; then
+   ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+ else
+@@ -3359,15 +2896,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -3378,11 +2911,11 @@ fi
+ fi
+ STRIP=$ac_cv_prog_STRIP
+ if test -n "$STRIP"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
+-printf "%s\n" "$STRIP" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
++$as_echo "$STRIP" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -3391,12 +2924,11 @@ if test -z "$ac_cv_prog_STRIP"; then
+   ac_ct_STRIP=$STRIP
+   # Extract the first word of "strip", so it can be a program name with args.
+ set dummy strip; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_STRIP+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_STRIP+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_STRIP"; then
+   ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+ else
+@@ -3404,15 +2936,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_STRIP="strip"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -3423,11 +2951,11 @@ fi
+ fi
+ ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+ if test -n "$ac_ct_STRIP"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
+-printf "%s\n" "$ac_ct_STRIP" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
++$as_echo "$ac_ct_STRIP" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+   if test "x$ac_ct_STRIP" = x; then
+@@ -3435,8 +2963,8 @@ fi
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     STRIP=$ac_ct_STRIP
+@@ -3448,31 +2976,25 @@ fi
+ fi
+ INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s"
+ 
+-
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a race-free mkdir -p" >&5
+-printf %s "checking for a race-free mkdir -p... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5
++$as_echo_n "checking for a thread-safe mkdir -p... " >&6; }
+ if test -z "$MKDIR_P"; then
+-  if test ${ac_cv_path_mkdir+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  if ${ac_cv_path_mkdir+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_prog in mkdir gmkdir; do
+ 	 for ac_exec_ext in '' $ac_executable_extensions; do
+-	   as_fn_executable_p "$as_dir$ac_prog$ac_exec_ext" || continue
+-	   case `"$as_dir$ac_prog$ac_exec_ext" --version 2>&1` in #(
+-	     'mkdir ('*'coreutils) '* | \
+-	     'BusyBox '* | \
++	   as_fn_executable_p "$as_dir/$ac_prog$ac_exec_ext" || continue
++	   case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #(
++	     'mkdir (GNU coreutils) '* | \
++	     'mkdir (coreutils) '* | \
+ 	     'mkdir (fileutils) '4.1*)
+-	       ac_cv_path_mkdir=$as_dir$ac_prog$ac_exec_ext
++	       ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext
+ 	       break 3;;
+ 	   esac
+ 	 done
+@@ -3483,7 +3005,7 @@ IFS=$as_save_IFS
+ fi
+ 
+   test -d ./--version && rmdir ./--version
+-  if test ${ac_cv_path_mkdir+y}; then
++  if test "${ac_cv_path_mkdir+set}" = set; then
+     MKDIR_P="$ac_cv_path_mkdir -p"
+   else
+     # As a last resort, use the slow shell script.  Don't cache a
+@@ -3493,19 +3015,18 @@ fi
+     MKDIR_P="$ac_install_sh -d"
+   fi
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
+-printf "%s\n" "$MKDIR_P" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5
++$as_echo "$MKDIR_P" >&6; }
+ 
+ for ac_prog in gawk mawk nawk awk
+ do
+   # Extract the first word of "$ac_prog", so it can be a program name with args.
+ set dummy $ac_prog; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_AWK+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_AWK+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$AWK"; then
+   ac_cv_prog_AWK="$AWK" # Let the user override the test.
+ else
+@@ -3513,15 +3034,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_AWK="$ac_prog"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -3532,25 +3049,24 @@ fi
+ fi
+ AWK=$ac_cv_prog_AWK
+ if test -n "$AWK"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
+-printf "%s\n" "$AWK" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5
++$as_echo "$AWK" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+   test -n "$AWK" && break
+ done
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
+-printf %s "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5
++$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; }
+ set x ${MAKE-make}
+-ac_make=`printf "%s\n" "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
+-if eval test \${ac_cv_prog_make_${ac_make}_set+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'`
++if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   cat >conftest.make <<\_ACEOF
+ SHELL = /bin/sh
+ all:
+@@ -3566,12 +3082,12 @@ esac
+ rm -f conftest.make
+ fi
+ if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+-printf "%s\n" "yes" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++$as_echo "yes" >&6; }
+   SET_MAKE=
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+   SET_MAKE="MAKE=${MAKE-make}"
+ fi
+ 
+@@ -3585,8 +3101,7 @@ fi
+ rmdir .tst 2>/dev/null
+ 
+ # Check whether --enable-silent-rules was given.
+-if test ${enable_silent_rules+y}
+-then :
++if test "${enable_silent_rules+set}" = set; then :
+   enableval=$enable_silent_rules;
+ fi
+ 
+@@ -3596,13 +3111,12 @@ case $enable_silent_rules in # (((
+     *) AM_DEFAULT_VERBOSITY=1;;
+ esac
+ am_make=${MAKE-make}
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5
+-printf %s "checking whether $am_make supports nested variables... " >&6; }
+-if test ${am_cv_make_support_nested_variables+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if printf "%s\n" 'TRUE=$(BAR$(V))
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $am_make supports nested variables" >&5
++$as_echo_n "checking whether $am_make supports nested variables... " >&6; }
++if ${am_cv_make_support_nested_variables+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  if $as_echo 'TRUE=$(BAR$(V))
+ BAR0=false
+ BAR1=true
+ V=1
+@@ -3614,8 +3128,8 @@ else
+   am_cv_make_support_nested_variables=no
+ fi
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5
+-printf "%s\n" "$am_cv_make_support_nested_variables" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_make_support_nested_variables" >&5
++$as_echo "$am_cv_make_support_nested_variables" >&6; }
+ if test $am_cv_make_support_nested_variables = yes; then
+     AM_V='$(V)'
+   AM_DEFAULT_V='$(AM_DEFAULT_VERBOSITY)'
+@@ -3650,10 +3164,14 @@ fi
+  VERSION='0.1'
+ 
+ 
+-printf "%s\n" "#define PACKAGE \"$PACKAGE\"" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define PACKAGE "$PACKAGE"
++_ACEOF
+ 
+ 
+-printf "%s\n" "#define VERSION \"$VERSION\"" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define VERSION "$VERSION"
++_ACEOF
+ 
+ # Some tools Automake needs.
+ 
+@@ -3693,20 +3211,6 @@ am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'
+ 
+ 
+ 
+-# Variables for tags utilities; see am/tags.am
+-if test -z "$CTAGS"; then
+-  CTAGS=ctags
+-fi
+-
+-if test -z "$ETAGS"; then
+-  ETAGS=etags
+-fi
+-
+-if test -z "$CSCOPE"; then
+-  CSCOPE=cscope
+-fi
+-
+-
+ 
+ # POSIX will say in a future version that running "rm -f" with no argument
+ # is OK; and we want to be able to make that assumption in our Makefile
+@@ -3751,18 +3255,17 @@ END
+ fi
+ 
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5
+-printf %s "checking whether to enable maintainer-specific portions of Makefiles... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to enable maintainer-specific portions of Makefiles" >&5
++$as_echo_n "checking whether to enable maintainer-specific portions of Makefiles... " >&6; }
+     # Check whether --enable-maintainer-mode was given.
+-if test ${enable_maintainer_mode+y}
+-then :
++if test "${enable_maintainer_mode+set}" = set; then :
+   enableval=$enable_maintainer_mode; USE_MAINTAINER_MODE=$enableval
+-else $as_nop
++else
+   USE_MAINTAINER_MODE=no
+ fi
+ 
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5
+-printf "%s\n" "$USE_MAINTAINER_MODE" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $USE_MAINTAINER_MODE" >&5
++$as_echo "$USE_MAINTAINER_MODE" >&6; }
+    if test $USE_MAINTAINER_MODE = yes; then
+   MAINTAINER_MODE_TRUE=
+   MAINTAINER_MODE_FALSE='#'
+@@ -3776,29 +3279,19 @@ fi
+ 
+ 
+ # Check whether --with-libiberty was given.
+-if test ${with_libiberty+y}
+-then :
++if test "${with_libiberty+set}" = set; then :
+   withval=$with_libiberty;
+-else $as_nop
++else
+   with_libiberty=../libiberty
+ fi
+ 
+ 
+-
+-
+-
+-
+-
+-
+-
+-
+-
+ DEPDIR="${am__leading_dot}deps"
+ 
+ ac_config_commands="$ac_config_commands depfiles"
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5
+-printf %s "checking whether ${MAKE-make} supports the include directive... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} supports the include directive" >&5
++$as_echo_n "checking whether ${MAKE-make} supports the include directive... " >&6; }
+ cat > confinc.mk << 'END'
+ am__doit:
+ 	@echo this is the am__doit target >confinc.out
+@@ -3834,12 +3327,11 @@ esac
+   fi
+ done
+ rm -f confinc.* confmf.*
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5
+-printf "%s\n" "${_am_result}" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: ${_am_result}" >&5
++$as_echo "${_am_result}" >&6; }
+ 
+ # Check whether --enable-dependency-tracking was given.
+-if test ${enable_dependency_tracking+y}
+-then :
++if test "${enable_dependency_tracking+set}" = set; then :
+   enableval=$enable_dependency_tracking;
+ fi
+ 
+@@ -3865,12 +3357,11 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ if test -n "$ac_tool_prefix"; then
+   # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}gcc; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$CC"; then
+   ac_cv_prog_CC="$CC" # Let the user override the test.
+ else
+@@ -3878,15 +3369,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_CC="${ac_tool_prefix}gcc"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -3897,11 +3384,11 @@ fi
+ fi
+ CC=$ac_cv_prog_CC
+ if test -n "$CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+-printf "%s\n" "$CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++$as_echo "$CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -3910,12 +3397,11 @@ if test -z "$ac_cv_prog_CC"; then
+   ac_ct_CC=$CC
+   # Extract the first word of "gcc", so it can be a program name with args.
+ set dummy gcc; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_CC"; then
+   ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+ else
+@@ -3923,15 +3409,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_CC="gcc"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -3942,11 +3424,11 @@ fi
+ fi
+ ac_ct_CC=$ac_cv_prog_ac_ct_CC
+ if test -n "$ac_ct_CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+-printf "%s\n" "$ac_ct_CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
++$as_echo "$ac_ct_CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+   if test "x$ac_ct_CC" = x; then
+@@ -3954,8 +3436,8 @@ fi
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     CC=$ac_ct_CC
+@@ -3968,12 +3450,11 @@ if test -z "$CC"; then
+           if test -n "$ac_tool_prefix"; then
+     # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}cc; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$CC"; then
+   ac_cv_prog_CC="$CC" # Let the user override the test.
+ else
+@@ -3981,15 +3462,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_CC="${ac_tool_prefix}cc"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -4000,11 +3477,11 @@ fi
+ fi
+ CC=$ac_cv_prog_CC
+ if test -n "$CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+-printf "%s\n" "$CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++$as_echo "$CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -4013,12 +3490,11 @@ fi
+ if test -z "$CC"; then
+   # Extract the first word of "cc", so it can be a program name with args.
+ set dummy cc; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$CC"; then
+   ac_cv_prog_CC="$CC" # Let the user override the test.
+ else
+@@ -4027,19 +3503,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
++    if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+        ac_prog_rejected=yes
+        continue
+      fi
+     ac_cv_prog_CC="cc"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -4055,18 +3527,18 @@ if test $ac_prog_rejected = yes; then
+     # However, it has the same basename, so the bogon will be chosen
+     # first if we set CC to just the basename; use the full file name.
+     shift
+-    ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@"
++    ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+   fi
+ fi
+ fi
+ fi
+ CC=$ac_cv_prog_CC
+ if test -n "$CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+-printf "%s\n" "$CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++$as_echo "$CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -4077,12 +3549,11 @@ if test -z "$CC"; then
+   do
+     # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+ set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$CC"; then
+   ac_cv_prog_CC="$CC" # Let the user override the test.
+ else
+@@ -4090,15 +3561,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -4109,11 +3576,11 @@ fi
+ fi
+ CC=$ac_cv_prog_CC
+ if test -n "$CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+-printf "%s\n" "$CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++$as_echo "$CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -4126,118 +3593,11 @@ if test -z "$CC"; then
+ do
+   # Extract the first word of "$ac_prog", so it can be a program name with args.
+ set dummy $ac_prog; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if test -n "$ac_ct_CC"; then
+-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+-else
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_ac_ct_CC="$ac_prog"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+-    break 2
+-  fi
+-done
+-  done
+-IFS=$as_save_IFS
+-
+-fi
+-fi
+-ac_ct_CC=$ac_cv_prog_ac_ct_CC
+-if test -n "$ac_ct_CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+-printf "%s\n" "$ac_ct_CC" >&6; }
+-else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-fi
+-
+-
+-  test -n "$ac_ct_CC" && break
+-done
+-
+-  if test "x$ac_ct_CC" = x; then
+-    CC=""
+-  else
+-    case $cross_compiling:$ac_tool_warned in
+-yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+-ac_tool_warned=yes ;;
+-esac
+-    CC=$ac_ct_CC
+-  fi
+-fi
+-
+-fi
+-if test -z "$CC"; then
+-  if test -n "$ac_tool_prefix"; then
+-  # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args.
+-set dummy ${ac_tool_prefix}clang; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if test -n "$CC"; then
+-  ac_cv_prog_CC="$CC" # Let the user override the test.
+-else
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_CC="${ac_tool_prefix}clang"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+-    break 2
+-  fi
+-done
+-  done
+-IFS=$as_save_IFS
+-
+-fi
+-fi
+-CC=$ac_cv_prog_CC
+-if test -n "$CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+-printf "%s\n" "$CC" >&6; }
+-else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-fi
+-
+-
+-fi
+-if test -z "$ac_cv_prog_CC"; then
+-  ac_ct_CC=$CC
+-  # Extract the first word of "clang", so it can be a program name with args.
+-set dummy clang; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_CC"; then
+   ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+ else
+@@ -4245,15 +3605,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_ac_ct_CC="clang"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_CC="$ac_prog"
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -4264,48 +3620,50 @@ fi
+ fi
+ ac_ct_CC=$ac_cv_prog_ac_ct_CC
+ if test -n "$ac_ct_CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+-printf "%s\n" "$ac_ct_CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
++$as_echo "$ac_ct_CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
++
++  test -n "$ac_ct_CC" && break
++done
++
+   if test "x$ac_ct_CC" = x; then
+     CC=""
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     CC=$ac_ct_CC
+   fi
+-else
+-  CC="$ac_cv_prog_CC"
+ fi
+ 
+ fi
+ 
+ 
+-test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ as_fn_error $? "no acceptable C compiler found in \$PATH
+ See \`config.log' for more details" "$LINENO" 5; }
+ 
+ # Provide some information about the compiler.
+-printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
++$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+ set X $ac_compile
+ ac_compiler=$2
+-for ac_option in --version -v -V -qversion -version; do
++for ac_option in --version -v -V -qversion; do
+   { { ac_try="$ac_compiler $ac_option >&5"
+ case "(($ac_try" in
+   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+   ac_status=$?
+   if test -s conftest.err; then
+@@ -4315,7 +3673,7 @@ printf "%s\n" "$ac_try_echo"; } >&5
+     cat conftest.er1 >&5
+   fi
+   rm -f conftest.er1 conftest.err
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }
+ done
+ 
+@@ -4323,7 +3681,7 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+@@ -4335,9 +3693,9 @@ ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out"
+ # Try to create an executable without -o first, disregard a.out.
+ # It will help us diagnose broken compilers, and finding out an intuition
+ # of exeext.
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
+-printf %s "checking whether the C compiler works... " >&6; }
+-ac_link_default=`printf "%s\n" "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5
++$as_echo_n "checking whether the C compiler works... " >&6; }
++ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'`
+ 
+ # The possible output files:
+ ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*"
+@@ -4358,12 +3716,11 @@ case "(($ac_try" in
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_link_default") 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; }
+-then :
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; then :
+   # Autoconf-2.13 could set the ac_cv_exeext variable to `no'.
+ # So ignore a value of `no', otherwise this would lead to `EXEEXT = no'
+ # in a Makefile.  We should not override ac_cv_exeext if it was cached,
+@@ -4380,7 +3737,7 @@ do
+ 	# certainly right.
+ 	break;;
+     *.* )
+-	if test ${ac_cv_exeext+y} && test "$ac_cv_exeext" != no;
++	if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no;
+ 	then :; else
+ 	   ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'`
+ 	fi
+@@ -4396,46 +3753,44 @@ do
+ done
+ test "$ac_cv_exeext" = no && ac_cv_exeext=
+ 
+-else $as_nop
++else
+   ac_file=''
+ fi
+-if test -z "$ac_file"
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-printf "%s\n" "$as_me: failed program was:" >&5
++if test -z "$ac_file"; then :
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
++$as_echo "$as_me: failed program was:" >&5
+ sed 's/^/| /' conftest.$ac_ext >&5
+ 
+-{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ as_fn_error 77 "C compiler cannot create executables
+ See \`config.log' for more details" "$LINENO" 5; }
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+-printf "%s\n" "yes" >&6; }
+-fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
+-printf %s "checking for C compiler default output file name... " >&6; }
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
+-printf "%s\n" "$ac_file" >&6; }
++else
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++$as_echo "yes" >&6; }
++fi
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5
++$as_echo_n "checking for C compiler default output file name... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5
++$as_echo "$ac_file" >&6; }
+ ac_exeext=$ac_cv_exeext
+ 
+ rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out
+ ac_clean_files=$ac_clean_files_save
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
+-printf %s "checking for suffix of executables... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5
++$as_echo_n "checking for suffix of executables... " >&6; }
+ if { { ac_try="$ac_link"
+ case "(($ac_try" in
+   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_link") 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; }
+-then :
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; then :
+   # If both `conftest.exe' and `conftest' are `present' (well, observable)
+ # catch `conftest.exe'.  For instance with Cygwin, `ls conftest' will
+ # work properly (i.e., refer to `conftest.exe'), while it won't with
+@@ -4449,15 +3804,15 @@ for ac_file in conftest.exe conftest conftest.*; do
+     * ) break;;
+   esac
+ done
+-else $as_nop
+-  { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++else
++  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ as_fn_error $? "cannot compute suffix of executables: cannot compile and link
+ See \`config.log' for more details" "$LINENO" 5; }
+ fi
+ rm -f conftest conftest$ac_cv_exeext
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
+-printf "%s\n" "$ac_cv_exeext" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5
++$as_echo "$ac_cv_exeext" >&6; }
+ 
+ rm -f conftest.$ac_ext
+ EXEEXT=$ac_cv_exeext
+@@ -4466,7 +3821,7 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ #include 
+ int
+-main (void)
++main ()
+ {
+ FILE *f = fopen ("conftest.out", "w");
+  return ferror (f) || fclose (f) != 0;
+@@ -4478,8 +3833,8 @@ _ACEOF
+ ac_clean_files="$ac_clean_files conftest.out"
+ # Check that the compiler produces executables we can run.  If not, either
+ # the compiler is broken, or we cross compile.
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
+-printf %s "checking whether we are cross compiling... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5
++$as_echo_n "checking whether we are cross compiling... " >&6; }
+ if test "$cross_compiling" != yes; then
+   { { ac_try="$ac_link"
+ case "(($ac_try" in
+@@ -4487,10 +3842,10 @@ case "(($ac_try" in
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_link") 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }
+   if { ac_try='./conftest$ac_cv_exeext'
+   { { case "(($ac_try" in
+@@ -4498,40 +3853,39 @@ printf "%s\n" "$ac_try_echo"; } >&5
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_try") 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }; }; then
+     cross_compiling=no
+   else
+     if test "$cross_compiling" = maybe; then
+ 	cross_compiling=yes
+     else
+-	{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
+-as_fn_error 77 "cannot run C compiled programs.
++	{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
++as_fn_error $? "cannot run C compiled programs.
+ If you meant to cross compile, use \`--host'.
+ See \`config.log' for more details" "$LINENO" 5; }
+     fi
+   fi
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
+-printf "%s\n" "$cross_compiling" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5
++$as_echo "$cross_compiling" >&6; }
+ 
+ rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out
+ ac_clean_files=$ac_clean_files_save
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
+-printf %s "checking for suffix of object files... " >&6; }
+-if test ${ac_cv_objext+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5
++$as_echo_n "checking for suffix of object files... " >&6; }
++if ${ac_cv_objext+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+@@ -4545,12 +3899,11 @@ case "(($ac_try" in
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_compile") 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; }
+-then :
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; then :
+   for ac_file in conftest.o conftest.obj conftest.*; do
+   test -f "$ac_file" || continue;
+   case $ac_file in
+@@ -4559,32 +3912,31 @@ then :
+        break;;
+   esac
+ done
+-else $as_nop
+-  printf "%s\n" "$as_me: failed program was:" >&5
++else
++  $as_echo "$as_me: failed program was:" >&5
+ sed 's/^/| /' conftest.$ac_ext >&5
+ 
+-{ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ as_fn_error $? "cannot compute suffix of object files: cannot compile
+ See \`config.log' for more details" "$LINENO" 5; }
+ fi
+ rm -f conftest.$ac_cv_objext conftest.$ac_ext
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
+-printf "%s\n" "$ac_cv_objext" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5
++$as_echo "$ac_cv_objext" >&6; }
+ OBJEXT=$ac_cv_objext
+ ac_objext=$OBJEXT
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5
+-printf %s "checking whether the compiler supports GNU C... " >&6; }
+-if test ${ac_cv_c_compiler_gnu+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
++$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
++if ${ac_cv_c_compiler_gnu+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ #ifndef __GNUC__
+        choke me
+@@ -4594,33 +3946,29 @@ main (void)
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   ac_compiler_gnu=yes
+-else $as_nop
++else
+   ac_compiler_gnu=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_cv_c_compiler_gnu=$ac_compiler_gnu
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+-printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; }
+-ac_compiler_gnu=$ac_cv_c_compiler_gnu
+-
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
++$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+ if test $ac_compiler_gnu = yes; then
+   GCC=yes
+ else
+   GCC=
+ fi
+-ac_test_CFLAGS=${CFLAGS+y}
++ac_test_CFLAGS=${CFLAGS+set}
+ ac_save_CFLAGS=$CFLAGS
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+-printf %s "checking whether $CC accepts -g... " >&6; }
+-if test ${ac_cv_prog_cc_g+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
++$as_echo_n "checking whether $CC accepts -g... " >&6; }
++if ${ac_cv_prog_cc_g+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_save_c_werror_flag=$ac_c_werror_flag
+    ac_c_werror_flag=yes
+    ac_cv_prog_cc_g=no
+@@ -4629,60 +3977,57 @@ else $as_nop
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_prog_cc_g=yes
+-else $as_nop
++else
+   CFLAGS=""
+       cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+ 
+-else $as_nop
++else
+   ac_c_werror_flag=$ac_save_c_werror_flag
+ 	 CFLAGS="-g"
+ 	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_prog_cc_g=yes
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+    ac_c_werror_flag=$ac_save_c_werror_flag
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+-printf "%s\n" "$ac_cv_prog_cc_g" >&6; }
+-if test $ac_test_CFLAGS; then
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
++$as_echo "$ac_cv_prog_cc_g" >&6; }
++if test "$ac_test_CFLAGS" = set; then
+   CFLAGS=$ac_save_CFLAGS
+ elif test $ac_cv_prog_cc_g = yes; then
+   if test "$GCC" = yes; then
+@@ -4697,144 +4042,94 @@ else
+     CFLAGS=
+   fi
+ fi
+-ac_prog_cc_stdc=no
+-if test x$ac_prog_cc_stdc = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5
+-printf %s "checking for $CC option to enable C11 features... " >&6; }
+-if test ${ac_cv_prog_cc_c11+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  ac_cv_prog_cc_c11=no
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
++$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
++if ${ac_cv_prog_cc_c89+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  ac_cv_prog_cc_c89=no
+ ac_save_CC=$CC
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+-$ac_c_conftest_c11_program
+-_ACEOF
+-for ac_arg in '' -std=gnu11
+-do
+-  CC="$ac_save_CC $ac_arg"
+-  if ac_fn_c_try_compile "$LINENO"
+-then :
+-  ac_cv_prog_cc_c11=$ac_arg
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam
+-  test "x$ac_cv_prog_cc_c11" != "xno" && break
+-done
+-rm -f conftest.$ac_ext
+-CC=$ac_save_CC
+-fi
++#include 
++#include 
++struct stat;
++/* Most of the following tests are stolen from RCS 5.7's src/conf.sh.  */
++struct buf { int x; };
++FILE * (*rcsopen) (struct buf *, struct stat *, int);
++static char *e (p, i)
++     char **p;
++     int i;
++{
++  return p[i];
++}
++static char *f (char * (*g) (char **, int), char **p, ...)
++{
++  char *s;
++  va_list v;
++  va_start (v,p);
++  s = g (p, va_arg (v,int));
++  va_end (v);
++  return s;
++}
+ 
+-if test "x$ac_cv_prog_cc_c11" = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+-printf "%s\n" "unsupported" >&6; }
+-else $as_nop
+-  if test "x$ac_cv_prog_cc_c11" = x
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+-printf "%s\n" "none needed" >&6; }
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5
+-printf "%s\n" "$ac_cv_prog_cc_c11" >&6; }
+-     CC="$CC $ac_cv_prog_cc_c11"
+-fi
+-  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11
+-  ac_prog_cc_stdc=c11
+-fi
+-fi
+-if test x$ac_prog_cc_stdc = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5
+-printf %s "checking for $CC option to enable C99 features... " >&6; }
+-if test ${ac_cv_prog_cc_c99+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  ac_cv_prog_cc_c99=no
+-ac_save_CC=$CC
+-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+-/* end confdefs.h.  */
+-$ac_c_conftest_c99_program
+-_ACEOF
+-for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99=
+-do
+-  CC="$ac_save_CC $ac_arg"
+-  if ac_fn_c_try_compile "$LINENO"
+-then :
+-  ac_cv_prog_cc_c99=$ac_arg
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam
+-  test "x$ac_cv_prog_cc_c99" != "xno" && break
+-done
+-rm -f conftest.$ac_ext
+-CC=$ac_save_CC
+-fi
++/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
++   function prototypes and stuff, but not '\xHH' hex character constants.
++   These don't provoke an error unfortunately, instead are silently treated
++   as 'x'.  The following induces an error, until -std is added to get
++   proper ANSI mode.  Curiously '\x00'!='x' always comes out true, for an
++   array size at least.  It's necessary to write '\x00'==0 to get something
++   that's true only with -std.  */
++int osf4_cc_array ['\x00' == 0 ? 1 : -1];
+ 
+-if test "x$ac_cv_prog_cc_c99" = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+-printf "%s\n" "unsupported" >&6; }
+-else $as_nop
+-  if test "x$ac_cv_prog_cc_c99" = x
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+-printf "%s\n" "none needed" >&6; }
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5
+-printf "%s\n" "$ac_cv_prog_cc_c99" >&6; }
+-     CC="$CC $ac_cv_prog_cc_c99"
+-fi
+-  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99
+-  ac_prog_cc_stdc=c99
+-fi
+-fi
+-if test x$ac_prog_cc_stdc = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5
+-printf %s "checking for $CC option to enable C89 features... " >&6; }
+-if test ${ac_cv_prog_cc_c89+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  ac_cv_prog_cc_c89=no
+-ac_save_CC=$CC
+-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+-/* end confdefs.h.  */
+-$ac_c_conftest_c89_program
++/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
++   inside strings and character constants.  */
++#define FOO(x) 'x'
++int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
++
++int test (int i, double x);
++struct s1 {int (*f) (int a);};
++struct s2 {int (*f) (double a);};
++int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
++int argc;
++char **argv;
++int
++main ()
++{
++return f (e, argv, 0) != argv[0]  ||  f (e, argv, 1) != argv[1];
++  ;
++  return 0;
++}
+ _ACEOF
+-for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
++for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
++	-Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+ do
+   CC="$ac_save_CC $ac_arg"
+-  if ac_fn_c_try_compile "$LINENO"
+-then :
++  if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_prog_cc_c89=$ac_arg
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam
++rm -f core conftest.err conftest.$ac_objext
+   test "x$ac_cv_prog_cc_c89" != "xno" && break
+ done
+ rm -f conftest.$ac_ext
+ CC=$ac_save_CC
+-fi
+ 
+-if test "x$ac_cv_prog_cc_c89" = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+-printf "%s\n" "unsupported" >&6; }
+-else $as_nop
+-  if test "x$ac_cv_prog_cc_c89" = x
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+-printf "%s\n" "none needed" >&6; }
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+-printf "%s\n" "$ac_cv_prog_cc_c89" >&6; }
+-     CC="$CC $ac_cv_prog_cc_c89"
+-fi
+-  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89
+-  ac_prog_cc_stdc=c89
+ fi
++# AC_CACHE_VAL
++case "x$ac_cv_prog_cc_c89" in
++  x)
++    { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
++$as_echo "none needed" >&6; } ;;
++  xno)
++    { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
++$as_echo "unsupported" >&6; } ;;
++  *)
++    CC="$CC $ac_cv_prog_cc_c89"
++    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
++$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
++esac
++if test "x$ac_cv_prog_cc_c89" != xno; then :
++
+ fi
+ 
+ ac_ext=c
+@@ -4843,23 +4138,21 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ 
+-
+-  ac_ext=c
++ac_ext=c
+ ac_cpp='$CPP $CPPFLAGS'
+ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
+-printf %s "checking whether $CC understands -c and -o together... " >&6; }
+-if test ${am_cv_prog_cc_c_o+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
++$as_echo_n "checking whether $CC understands -c and -o together... " >&6; }
++if ${am_cv_prog_cc_c_o+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+@@ -4887,8 +4180,8 @@ _ACEOF
+   rm -f core conftest*
+   unset am_i
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
+-printf "%s\n" "$am_cv_prog_cc_c_o" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
++$as_echo "$am_cv_prog_cc_c_o" >&6; }
+ if test "$am_cv_prog_cc_c_o" != yes; then
+    # Losing compiler, so override with the script.
+    # FIXME: It is wrong to rewrite CC.
+@@ -4906,12 +4199,11 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ 
+ depcc="$CC"   am_compiler_list=
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+-printf %s "checking dependency style of $depcc... " >&6; }
+-if test ${am_cv_CC_dependencies_compiler_type+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
++$as_echo_n "checking dependency style of $depcc... " >&6; }
++if ${am_cv_CC_dependencies_compiler_type+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+   # We make a subdir and do the tests there.  Otherwise we can end up
+   # making bogus files that we don't know about and never remove.  For
+@@ -5014,195 +4306,483 @@ else $as_nop
+   cd ..
+   rm -rf conftest.dir
+ else
+-  am_cv_CC_dependencies_compiler_type=none
+-fi
+-
++  am_cv_CC_dependencies_compiler_type=none
++fi
++
++fi
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
++$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; }
++CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
++
++ if
++  test "x$enable_dependency_tracking" != xno \
++  && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
++  am__fastdepCC_TRUE=
++  am__fastdepCC_FALSE='#'
++else
++  am__fastdepCC_TRUE='#'
++  am__fastdepCC_FALSE=
++fi
++
++
++
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5
++$as_echo_n "checking how to run the C preprocessor... " >&6; }
++# On Suns, sometimes $CPP names a directory.
++if test -n "$CPP" && test -d "$CPP"; then
++  CPP=
++fi
++if test -z "$CPP"; then
++  if ${ac_cv_prog_CPP+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++      # Double quotes because CPP needs to be expanded
++    for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp"
++    do
++      ac_preproc_ok=false
++for ac_c_preproc_warn_flag in '' yes
++do
++  # Use a header file that comes with gcc, so configuring glibc
++  # with a fresh cross-compiler works.
++  # Prefer  to  if __STDC__ is defined, since
++  #  exists even on freestanding compilers.
++  # On the NeXT, cc -E runs the code through the compiler's parser,
++  # not just through cpp. "Syntax error" is here to catch this case.
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#ifdef __STDC__
++# include 
++#else
++# include 
++#endif
++		     Syntax error
++_ACEOF
++if ac_fn_c_try_cpp "$LINENO"; then :
++
++else
++  # Broken: fails on valid input.
++continue
++fi
++rm -f conftest.err conftest.i conftest.$ac_ext
++
++  # OK, works on sane cases.  Now check whether nonexistent headers
++  # can be detected and how.
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
++_ACEOF
++if ac_fn_c_try_cpp "$LINENO"; then :
++  # Broken: success on invalid input.
++continue
++else
++  # Passes both tests.
++ac_preproc_ok=:
++break
++fi
++rm -f conftest.err conftest.i conftest.$ac_ext
++
++done
++# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
++rm -f conftest.i conftest.err conftest.$ac_ext
++if $ac_preproc_ok; then :
++  break
++fi
++
++    done
++    ac_cv_prog_CPP=$CPP
++
++fi
++  CPP=$ac_cv_prog_CPP
++else
++  ac_cv_prog_CPP=$CPP
++fi
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5
++$as_echo "$CPP" >&6; }
++ac_preproc_ok=false
++for ac_c_preproc_warn_flag in '' yes
++do
++  # Use a header file that comes with gcc, so configuring glibc
++  # with a fresh cross-compiler works.
++  # Prefer  to  if __STDC__ is defined, since
++  #  exists even on freestanding compilers.
++  # On the NeXT, cc -E runs the code through the compiler's parser,
++  # not just through cpp. "Syntax error" is here to catch this case.
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#ifdef __STDC__
++# include 
++#else
++# include 
++#endif
++		     Syntax error
++_ACEOF
++if ac_fn_c_try_cpp "$LINENO"; then :
++
++else
++  # Broken: fails on valid input.
++continue
++fi
++rm -f conftest.err conftest.i conftest.$ac_ext
++
++  # OK, works on sane cases.  Now check whether nonexistent headers
++  # can be detected and how.
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
++_ACEOF
++if ac_fn_c_try_cpp "$LINENO"; then :
++  # Broken: success on invalid input.
++continue
++else
++  # Passes both tests.
++ac_preproc_ok=:
++break
++fi
++rm -f conftest.err conftest.i conftest.$ac_ext
++
++done
++# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
++rm -f conftest.i conftest.err conftest.$ac_ext
++if $ac_preproc_ok; then :
++
++else
++  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
++as_fn_error $? "C preprocessor \"$CPP\" fails sanity check
++See \`config.log' for more details" "$LINENO" 5; }
++fi
++
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
++$as_echo_n "checking for grep that handles long lines and -e... " >&6; }
++if ${ac_cv_path_GREP+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  if test -z "$GREP"; then
++  ac_path_GREP_found=false
++  # Loop through the user's path and test for each of PROGNAME-LIST
++  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
++do
++  IFS=$as_save_IFS
++  test -z "$as_dir" && as_dir=.
++    for ac_prog in grep ggrep; do
++    for ac_exec_ext in '' $ac_executable_extensions; do
++      ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext"
++      as_fn_executable_p "$ac_path_GREP" || continue
++# Check for GNU ac_path_GREP and select it if it is found.
++  # Check for GNU $ac_path_GREP
++case `"$ac_path_GREP" --version 2>&1` in
++*GNU*)
++  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
++*)
++  ac_count=0
++  $as_echo_n 0123456789 >"conftest.in"
++  while :
++  do
++    cat "conftest.in" "conftest.in" >"conftest.tmp"
++    mv "conftest.tmp" "conftest.in"
++    cp "conftest.in" "conftest.nl"
++    $as_echo 'GREP' >> "conftest.nl"
++    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
++    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
++    as_fn_arith $ac_count + 1 && ac_count=$as_val
++    if test $ac_count -gt ${ac_path_GREP_max-0}; then
++      # Best one so far, save it but keep looking for a better one
++      ac_cv_path_GREP="$ac_path_GREP"
++      ac_path_GREP_max=$ac_count
++    fi
++    # 10*(2^10) chars as input seems more than enough
++    test $ac_count -gt 10 && break
++  done
++  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
++esac
++
++      $ac_path_GREP_found && break 3
++    done
++  done
++  done
++IFS=$as_save_IFS
++  if test -z "$ac_cv_path_GREP"; then
++    as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
++  fi
++else
++  ac_cv_path_GREP=$GREP
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
+-printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; }
+-CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
+ 
+- if
+-  test "x$enable_dependency_tracking" != xno \
+-  && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then
+-  am__fastdepCC_TRUE=
+-  am__fastdepCC_FALSE='#'
+-else
+-  am__fastdepCC_TRUE='#'
+-  am__fastdepCC_FALSE=
+ fi
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
++$as_echo "$ac_cv_path_GREP" >&6; }
++ GREP="$ac_cv_path_GREP"
+ 
+ 
+-
+-ac_header= ac_cache=
+-for ac_item in $ac_header_c_list
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
++$as_echo_n "checking for egrep... " >&6; }
++if ${ac_cv_path_EGREP+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
++   then ac_cv_path_EGREP="$GREP -E"
++   else
++     if test -z "$EGREP"; then
++  ac_path_EGREP_found=false
++  # Loop through the user's path and test for each of PROGNAME-LIST
++  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
++for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+ do
+-  if test $ac_cache; then
+-    ac_fn_c_check_header_compile "$LINENO" $ac_header ac_cv_header_$ac_cache "$ac_includes_default"
+-    if eval test \"x\$ac_cv_header_$ac_cache\" = xyes; then
+-      printf "%s\n" "#define $ac_item 1" >> confdefs.h
++  IFS=$as_save_IFS
++  test -z "$as_dir" && as_dir=.
++    for ac_prog in egrep; do
++    for ac_exec_ext in '' $ac_executable_extensions; do
++      ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext"
++      as_fn_executable_p "$ac_path_EGREP" || continue
++# Check for GNU ac_path_EGREP and select it if it is found.
++  # Check for GNU $ac_path_EGREP
++case `"$ac_path_EGREP" --version 2>&1` in
++*GNU*)
++  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
++*)
++  ac_count=0
++  $as_echo_n 0123456789 >"conftest.in"
++  while :
++  do
++    cat "conftest.in" "conftest.in" >"conftest.tmp"
++    mv "conftest.tmp" "conftest.in"
++    cp "conftest.in" "conftest.nl"
++    $as_echo 'EGREP' >> "conftest.nl"
++    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
++    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
++    as_fn_arith $ac_count + 1 && ac_count=$as_val
++    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
++      # Best one so far, save it but keep looking for a better one
++      ac_cv_path_EGREP="$ac_path_EGREP"
++      ac_path_EGREP_max=$ac_count
+     fi
+-    ac_header= ac_cache=
+-  elif test $ac_header; then
+-    ac_cache=$ac_item
+-  else
+-    ac_header=$ac_item
+-  fi
+-done
+-
+-
+-
+-
+-
+-
+-
+-
+-if test $ac_cv_header_stdlib_h = yes && test $ac_cv_header_string_h = yes
+-then :
+-
+-printf "%s\n" "#define STDC_HEADERS 1" >>confdefs.h
++    # 10*(2^10) chars as input seems more than enough
++    test $ac_count -gt 10 && break
++  done
++  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
++esac
+ 
++      $ac_path_EGREP_found && break 3
++    done
++  done
++  done
++IFS=$as_save_IFS
++  if test -z "$ac_cv_path_EGREP"; then
++    as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
++  fi
++else
++  ac_cv_path_EGREP=$EGREP
+ fi
+ 
++   fi
++fi
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
++$as_echo "$ac_cv_path_EGREP" >&6; }
++ EGREP="$ac_cv_path_EGREP"
+ 
+ 
+-
+-
+-
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5
+-printf %s "checking whether it is safe to define __EXTENSIONS__... " >&6; }
+-if test ${ac_cv_safe_to_define___extensions__+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5
++$as_echo_n "checking for ANSI C header files... " >&6; }
++if ${ac_cv_header_stdc+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
++#include 
++#include 
++#include 
++#include 
+ 
+-#         define __EXTENSIONS__ 1
+-          $ac_includes_default
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
+-  ac_cv_safe_to_define___extensions__=yes
+-else $as_nop
+-  ac_cv_safe_to_define___extensions__=no
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++if ac_fn_c_try_compile "$LINENO"; then :
++  ac_cv_header_stdc=yes
++else
++  ac_cv_header_stdc=no
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5
+-printf "%s\n" "$ac_cv_safe_to_define___extensions__" >&6; }
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ 
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether _XOPEN_SOURCE should be defined" >&5
+-printf %s "checking whether _XOPEN_SOURCE should be defined... " >&6; }
+-if test ${ac_cv_should_define__xopen_source+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  ac_cv_should_define__xopen_source=no
+-    if test $ac_cv_header_wchar_h = yes
+-then :
++if test $ac_cv_header_stdc = yes; then
++  # SunOS 4.x string.h does not declare mem*, contrary to ANSI.
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
++#include 
+ 
+-          #include 
+-          mbstate_t x;
+-int
+-main (void)
+-{
++_ACEOF
++if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
++  $EGREP "memchr" >/dev/null 2>&1; then :
++
++else
++  ac_cv_header_stdc=no
++fi
++rm -f conftest*
++
++fi
++
++if test $ac_cv_header_stdc = yes; then
++  # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI.
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++#include 
+ 
+-  ;
+-  return 0;
+-}
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if (eval "$ac_cpp conftest.$ac_ext") 2>&5 |
++  $EGREP "free" >/dev/null 2>&1; then :
++
++else
++  ac_cv_header_stdc=no
++fi
++rm -f conftest*
+ 
+-else $as_nop
++fi
++
++if test $ac_cv_header_stdc = yes; then
++  # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi.
++  if test "$cross_compiling" = yes; then :
++  :
++else
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
++#include 
++#include 
++#if ((' ' & 0x0FF) == 0x020)
++# define ISLOWER(c) ('a' <= (c) && (c) <= 'z')
++# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c))
++#else
++# define ISLOWER(c) \
++		   (('a' <= (c) && (c) <= 'i') \
++		     || ('j' <= (c) && (c) <= 'r') \
++		     || ('s' <= (c) && (c) <= 'z'))
++# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c))
++#endif
+ 
+-            #define _XOPEN_SOURCE 500
+-            #include 
+-            mbstate_t x;
++#define XOR(e, f) (((e) && !(f)) || (!(e) && (f)))
+ int
+-main (void)
++main ()
+ {
+-
+-  ;
++  int i;
++  for (i = 0; i < 256; i++)
++    if (XOR (islower (i), ISLOWER (i))
++	|| toupper (i) != TOUPPER (i))
++      return 2;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
+-  ac_cv_should_define__xopen_source=yes
++if ac_fn_c_try_run "$LINENO"; then :
++
++else
++  ac_cv_header_stdc=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \
++  conftest.$ac_objext conftest.beam conftest.$ac_ext
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++
+ fi
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_should_define__xopen_source" >&5
+-printf "%s\n" "$ac_cv_should_define__xopen_source" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5
++$as_echo "$ac_cv_header_stdc" >&6; }
++if test $ac_cv_header_stdc = yes; then
+ 
+-  printf "%s\n" "#define _ALL_SOURCE 1" >>confdefs.h
++$as_echo "#define STDC_HEADERS 1" >>confdefs.h
++
++fi
+ 
+-  printf "%s\n" "#define _DARWIN_C_SOURCE 1" >>confdefs.h
++# On IRIX 5.3, sys/types and inttypes.h are conflicting.
++for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \
++		  inttypes.h stdint.h unistd.h
++do :
++  as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh`
++ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default
++"
++if eval test \"x\$"$as_ac_Header"\" = x"yes"; then :
++  cat >>confdefs.h <<_ACEOF
++#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1
++_ACEOF
+ 
+-  printf "%s\n" "#define _GNU_SOURCE 1" >>confdefs.h
++fi
+ 
+-  printf "%s\n" "#define _HPUX_ALT_XOPEN_SOCKET_API 1" >>confdefs.h
++done
+ 
+-  printf "%s\n" "#define _NETBSD_SOURCE 1" >>confdefs.h
+ 
+-  printf "%s\n" "#define _OPENBSD_SOURCE 1" >>confdefs.h
+ 
+-  printf "%s\n" "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h
++  ac_fn_c_check_header_mongrel "$LINENO" "minix/config.h" "ac_cv_header_minix_config_h" "$ac_includes_default"
++if test "x$ac_cv_header_minix_config_h" = xyes; then :
++  MINIX=yes
++else
++  MINIX=
++fi
+ 
+-  printf "%s\n" "#define __STDC_WANT_IEC_60559_ATTRIBS_EXT__ 1" >>confdefs.h
+ 
+-  printf "%s\n" "#define __STDC_WANT_IEC_60559_BFP_EXT__ 1" >>confdefs.h
++  if test "$MINIX" = yes; then
+ 
+-  printf "%s\n" "#define __STDC_WANT_IEC_60559_DFP_EXT__ 1" >>confdefs.h
++$as_echo "#define _POSIX_SOURCE 1" >>confdefs.h
+ 
+-  printf "%s\n" "#define __STDC_WANT_IEC_60559_FUNCS_EXT__ 1" >>confdefs.h
+ 
+-  printf "%s\n" "#define __STDC_WANT_IEC_60559_TYPES_EXT__ 1" >>confdefs.h
++$as_echo "#define _POSIX_1_SOURCE 2" >>confdefs.h
+ 
+-  printf "%s\n" "#define __STDC_WANT_LIB_EXT2__ 1" >>confdefs.h
+ 
+-  printf "%s\n" "#define __STDC_WANT_MATH_SPEC_FUNCS__ 1" >>confdefs.h
++$as_echo "#define _MINIX 1" >>confdefs.h
+ 
+-  printf "%s\n" "#define _TANDEM_SOURCE 1" >>confdefs.h
++  fi
+ 
+-  if test $ac_cv_header_minix_config_h = yes
+-then :
+-  MINIX=yes
+-    printf "%s\n" "#define _MINIX 1" >>confdefs.h
+ 
+-    printf "%s\n" "#define _POSIX_SOURCE 1" >>confdefs.h
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether it is safe to define __EXTENSIONS__" >&5
++$as_echo_n "checking whether it is safe to define __EXTENSIONS__... " >&6; }
++if ${ac_cv_safe_to_define___extensions__+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
+ 
+-    printf "%s\n" "#define _POSIX_1_SOURCE 2" >>confdefs.h
++#         define __EXTENSIONS__ 1
++          $ac_includes_default
++int
++main ()
++{
+ 
+-else $as_nop
+-  MINIX=
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"; then :
++  ac_cv_safe_to_define___extensions__=yes
++else
++  ac_cv_safe_to_define___extensions__=no
+ fi
+-  if test $ac_cv_safe_to_define___extensions__ = yes
+-then :
+-  printf "%s\n" "#define __EXTENSIONS__ 1" >>confdefs.h
+-
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+-  if test $ac_cv_should_define__xopen_source = yes
+-then :
+-  printf "%s\n" "#define _XOPEN_SOURCE 500" >>confdefs.h
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_safe_to_define___extensions__" >&5
++$as_echo "$ac_cv_safe_to_define___extensions__" >&6; }
++  test $ac_cv_safe_to_define___extensions__ = yes &&
++    $as_echo "#define __EXTENSIONS__ 1" >>confdefs.h
++
++  $as_echo "#define _ALL_SOURCE 1" >>confdefs.h
++
++  $as_echo "#define _GNU_SOURCE 1" >>confdefs.h
++
++  $as_echo "#define _POSIX_PTHREAD_SEMANTICS 1" >>confdefs.h
++
++  $as_echo "#define _TANDEM_SOURCE 1" >>confdefs.h
+ 
+-fi
+ 
+ ac_ext=c
+ ac_cpp='$CPP $CPPFLAGS'
+@@ -5212,12 +4792,11 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ if test -n "$ac_tool_prefix"; then
+   # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}gcc; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$CC"; then
+   ac_cv_prog_CC="$CC" # Let the user override the test.
+ else
+@@ -5225,15 +4804,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_CC="${ac_tool_prefix}gcc"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -5244,11 +4819,11 @@ fi
+ fi
+ CC=$ac_cv_prog_CC
+ if test -n "$CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+-printf "%s\n" "$CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++$as_echo "$CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -5257,12 +4832,11 @@ if test -z "$ac_cv_prog_CC"; then
+   ac_ct_CC=$CC
+   # Extract the first word of "gcc", so it can be a program name with args.
+ set dummy gcc; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_CC"; then
+   ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+ else
+@@ -5270,15 +4844,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_CC="gcc"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -5289,11 +4859,11 @@ fi
+ fi
+ ac_ct_CC=$ac_cv_prog_ac_ct_CC
+ if test -n "$ac_ct_CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+-printf "%s\n" "$ac_ct_CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
++$as_echo "$ac_ct_CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+   if test "x$ac_ct_CC" = x; then
+@@ -5301,8 +4871,8 @@ fi
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     CC=$ac_ct_CC
+@@ -5315,12 +4885,11 @@ if test -z "$CC"; then
+           if test -n "$ac_tool_prefix"; then
+     # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}cc; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$CC"; then
+   ac_cv_prog_CC="$CC" # Let the user override the test.
+ else
+@@ -5328,15 +4897,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_CC="${ac_tool_prefix}cc"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -5347,11 +4912,11 @@ fi
+ fi
+ CC=$ac_cv_prog_CC
+ if test -n "$CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+-printf "%s\n" "$CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++$as_echo "$CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -5360,12 +4925,11 @@ fi
+ if test -z "$CC"; then
+   # Extract the first word of "cc", so it can be a program name with args.
+ set dummy cc; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$CC"; then
+   ac_cv_prog_CC="$CC" # Let the user override the test.
+ else
+@@ -5374,19 +4938,15 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    if test "$as_dir$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
++    if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then
+        ac_prog_rejected=yes
+        continue
+      fi
+     ac_cv_prog_CC="cc"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -5402,18 +4962,18 @@ if test $ac_prog_rejected = yes; then
+     # However, it has the same basename, so the bogon will be chosen
+     # first if we set CC to just the basename; use the full file name.
+     shift
+-    ac_cv_prog_CC="$as_dir$ac_word${1+' '}$@"
++    ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@"
+   fi
+ fi
+ fi
+ fi
+ CC=$ac_cv_prog_CC
+ if test -n "$CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+-printf "%s\n" "$CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++$as_echo "$CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -5424,12 +4984,11 @@ if test -z "$CC"; then
+   do
+     # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+ set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$CC"; then
+   ac_cv_prog_CC="$CC" # Let the user override the test.
+ else
+@@ -5437,15 +4996,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_CC="$ac_tool_prefix$ac_prog"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -5456,11 +5011,11 @@ fi
+ fi
+ CC=$ac_cv_prog_CC
+ if test -n "$CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+-printf "%s\n" "$CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
++$as_echo "$CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -5473,12 +5028,11 @@ if test -z "$CC"; then
+ do
+   # Extract the first word of "$ac_prog", so it can be a program name with args.
+ set dummy $ac_prog; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_CC"; then
+   ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+ else
+@@ -5486,15 +5040,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_CC="$ac_prog"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -5505,11 +5055,11 @@ fi
+ fi
+ ac_ct_CC=$ac_cv_prog_ac_ct_CC
+ if test -n "$ac_ct_CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+-printf "%s\n" "$ac_ct_CC" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
++$as_echo "$ac_ct_CC" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -5521,138 +5071,34 @@ done
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+-ac_tool_warned=yes ;;
+-esac
+-    CC=$ac_ct_CC
+-  fi
+-fi
+-
+-fi
+-if test -z "$CC"; then
+-  if test -n "$ac_tool_prefix"; then
+-  # Extract the first word of "${ac_tool_prefix}clang", so it can be a program name with args.
+-set dummy ${ac_tool_prefix}clang; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if test -n "$CC"; then
+-  ac_cv_prog_CC="$CC" # Let the user override the test.
+-else
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_CC="${ac_tool_prefix}clang"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+-    break 2
+-  fi
+-done
+-  done
+-IFS=$as_save_IFS
+-
+-fi
+-fi
+-CC=$ac_cv_prog_CC
+-if test -n "$CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CC" >&5
+-printf "%s\n" "$CC" >&6; }
+-else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-fi
+-
+-
+-fi
+-if test -z "$ac_cv_prog_CC"; then
+-  ac_ct_CC=$CC
+-  # Extract the first word of "clang", so it can be a program name with args.
+-set dummy clang; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if test -n "$ac_ct_CC"; then
+-  ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test.
+-else
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_ac_ct_CC="clang"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+-    break 2
+-  fi
+-done
+-  done
+-IFS=$as_save_IFS
+-
+-fi
+-fi
+-ac_ct_CC=$ac_cv_prog_ac_ct_CC
+-if test -n "$ac_ct_CC"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5
+-printf "%s\n" "$ac_ct_CC" >&6; }
+-else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-fi
+-
+-  if test "x$ac_ct_CC" = x; then
+-    CC=""
+-  else
+-    case $cross_compiling:$ac_tool_warned in
+-yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     CC=$ac_ct_CC
+   fi
+-else
+-  CC="$ac_cv_prog_CC"
+ fi
+ 
+ fi
+ 
+ 
+-test -z "$CC" && { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ as_fn_error $? "no acceptable C compiler found in \$PATH
+ See \`config.log' for more details" "$LINENO" 5; }
+ 
+ # Provide some information about the compiler.
+-printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
++$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5
+ set X $ac_compile
+ ac_compiler=$2
+-for ac_option in --version -v -V -qversion -version; do
++for ac_option in --version -v -V -qversion; do
+   { { ac_try="$ac_compiler $ac_option >&5"
+ case "(($ac_try" in
+   *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;;
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+   ac_status=$?
+   if test -s conftest.err; then
+@@ -5662,21 +5108,20 @@ printf "%s\n" "$ac_try_echo"; } >&5
+     cat conftest.er1 >&5
+   fi
+   rm -f conftest.er1 conftest.err
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }
+ done
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C" >&5
+-printf %s "checking whether the compiler supports GNU C... " >&6; }
+-if test ${ac_cv_c_compiler_gnu+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5
++$as_echo_n "checking whether we are using the GNU C compiler... " >&6; }
++if ${ac_cv_c_compiler_gnu+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ #ifndef __GNUC__
+        choke me
+@@ -5686,33 +5131,29 @@ main (void)
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   ac_compiler_gnu=yes
+-else $as_nop
++else
+   ac_compiler_gnu=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_cv_c_compiler_gnu=$ac_compiler_gnu
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
+-printf "%s\n" "$ac_cv_c_compiler_gnu" >&6; }
+-ac_compiler_gnu=$ac_cv_c_compiler_gnu
+-
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5
++$as_echo "$ac_cv_c_compiler_gnu" >&6; }
+ if test $ac_compiler_gnu = yes; then
+   GCC=yes
+ else
+   GCC=
+ fi
+-ac_test_CFLAGS=${CFLAGS+y}
++ac_test_CFLAGS=${CFLAGS+set}
+ ac_save_CFLAGS=$CFLAGS
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
+-printf %s "checking whether $CC accepts -g... " >&6; }
+-if test ${ac_cv_prog_cc_g+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5
++$as_echo_n "checking whether $CC accepts -g... " >&6; }
++if ${ac_cv_prog_cc_g+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_save_c_werror_flag=$ac_c_werror_flag
+    ac_c_werror_flag=yes
+    ac_cv_prog_cc_g=no
+@@ -5721,60 +5162,57 @@ else $as_nop
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_prog_cc_g=yes
+-else $as_nop
++else
+   CFLAGS=""
+       cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+ 
+-else $as_nop
++else
+   ac_c_werror_flag=$ac_save_c_werror_flag
+ 	 CFLAGS="-g"
+ 	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_prog_cc_g=yes
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+    ac_c_werror_flag=$ac_save_c_werror_flag
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
+-printf "%s\n" "$ac_cv_prog_cc_g" >&6; }
+-if test $ac_test_CFLAGS; then
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5
++$as_echo "$ac_cv_prog_cc_g" >&6; }
++if test "$ac_test_CFLAGS" = set; then
+   CFLAGS=$ac_save_CFLAGS
+ elif test $ac_cv_prog_cc_g = yes; then
+   if test "$GCC" = yes; then
+@@ -5786,147 +5224,97 @@ else
+   if test "$GCC" = yes; then
+     CFLAGS="-O2"
+   else
+-    CFLAGS=
+-  fi
+-fi
+-ac_prog_cc_stdc=no
+-if test x$ac_prog_cc_stdc = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C11 features" >&5
+-printf %s "checking for $CC option to enable C11 features... " >&6; }
+-if test ${ac_cv_prog_cc_c11+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  ac_cv_prog_cc_c11=no
+-ac_save_CC=$CC
+-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+-/* end confdefs.h.  */
+-$ac_c_conftest_c11_program
+-_ACEOF
+-for ac_arg in '' -std=gnu11
+-do
+-  CC="$ac_save_CC $ac_arg"
+-  if ac_fn_c_try_compile "$LINENO"
+-then :
+-  ac_cv_prog_cc_c11=$ac_arg
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam
+-  test "x$ac_cv_prog_cc_c11" != "xno" && break
+-done
+-rm -f conftest.$ac_ext
+-CC=$ac_save_CC
+-fi
+-
+-if test "x$ac_cv_prog_cc_c11" = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+-printf "%s\n" "unsupported" >&6; }
+-else $as_nop
+-  if test "x$ac_cv_prog_cc_c11" = x
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+-printf "%s\n" "none needed" >&6; }
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c11" >&5
+-printf "%s\n" "$ac_cv_prog_cc_c11" >&6; }
+-     CC="$CC $ac_cv_prog_cc_c11"
+-fi
+-  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c11
+-  ac_prog_cc_stdc=c11
+-fi
+-fi
+-if test x$ac_prog_cc_stdc = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C99 features" >&5
+-printf %s "checking for $CC option to enable C99 features... " >&6; }
+-if test ${ac_cv_prog_cc_c99+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  ac_cv_prog_cc_c99=no
+-ac_save_CC=$CC
+-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+-/* end confdefs.h.  */
+-$ac_c_conftest_c99_program
+-_ACEOF
+-for ac_arg in '' -std=gnu99 -std=c99 -c99 -qlanglvl=extc1x -qlanglvl=extc99 -AC99 -D_STDC_C99=
+-do
+-  CC="$ac_save_CC $ac_arg"
+-  if ac_fn_c_try_compile "$LINENO"
+-then :
+-  ac_cv_prog_cc_c99=$ac_arg
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam
+-  test "x$ac_cv_prog_cc_c99" != "xno" && break
+-done
+-rm -f conftest.$ac_ext
+-CC=$ac_save_CC
++    CFLAGS=
++  fi
+ fi
+-
+-if test "x$ac_cv_prog_cc_c99" = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+-printf "%s\n" "unsupported" >&6; }
+-else $as_nop
+-  if test "x$ac_cv_prog_cc_c99" = x
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+-printf "%s\n" "none needed" >&6; }
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c99" >&5
+-printf "%s\n" "$ac_cv_prog_cc_c99" >&6; }
+-     CC="$CC $ac_cv_prog_cc_c99"
+-fi
+-  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c99
+-  ac_prog_cc_stdc=c99
+-fi
+-fi
+-if test x$ac_prog_cc_stdc = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CC option to enable C89 features" >&5
+-printf %s "checking for $CC option to enable C89 features... " >&6; }
+-if test ${ac_cv_prog_cc_c89+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5
++$as_echo_n "checking for $CC option to accept ISO C89... " >&6; }
++if ${ac_cv_prog_cc_c89+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_cv_prog_cc_c89=no
+ ac_save_CC=$CC
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+-$ac_c_conftest_c89_program
++#include 
++#include 
++struct stat;
++/* Most of the following tests are stolen from RCS 5.7's src/conf.sh.  */
++struct buf { int x; };
++FILE * (*rcsopen) (struct buf *, struct stat *, int);
++static char *e (p, i)
++     char **p;
++     int i;
++{
++  return p[i];
++}
++static char *f (char * (*g) (char **, int), char **p, ...)
++{
++  char *s;
++  va_list v;
++  va_start (v,p);
++  s = g (p, va_arg (v,int));
++  va_end (v);
++  return s;
++}
++
++/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default.  It has
++   function prototypes and stuff, but not '\xHH' hex character constants.
++   These don't provoke an error unfortunately, instead are silently treated
++   as 'x'.  The following induces an error, until -std is added to get
++   proper ANSI mode.  Curiously '\x00'!='x' always comes out true, for an
++   array size at least.  It's necessary to write '\x00'==0 to get something
++   that's true only with -std.  */
++int osf4_cc_array ['\x00' == 0 ? 1 : -1];
++
++/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters
++   inside strings and character constants.  */
++#define FOO(x) 'x'
++int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1];
++
++int test (int i, double x);
++struct s1 {int (*f) (int a);};
++struct s2 {int (*f) (double a);};
++int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int);
++int argc;
++char **argv;
++int
++main ()
++{
++return f (e, argv, 0) != argv[0]  ||  f (e, argv, 1) != argv[1];
++  ;
++  return 0;
++}
+ _ACEOF
+-for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
++for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \
++	-Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__"
+ do
+   CC="$ac_save_CC $ac_arg"
+-  if ac_fn_c_try_compile "$LINENO"
+-then :
++  if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_prog_cc_c89=$ac_arg
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam
++rm -f core conftest.err conftest.$ac_objext
+   test "x$ac_cv_prog_cc_c89" != "xno" && break
+ done
+ rm -f conftest.$ac_ext
+ CC=$ac_save_CC
+-fi
+ 
+-if test "x$ac_cv_prog_cc_c89" = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+-printf "%s\n" "unsupported" >&6; }
+-else $as_nop
+-  if test "x$ac_cv_prog_cc_c89" = x
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+-printf "%s\n" "none needed" >&6; }
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
+-printf "%s\n" "$ac_cv_prog_cc_c89" >&6; }
+-     CC="$CC $ac_cv_prog_cc_c89"
+-fi
+-  ac_cv_prog_cc_stdc=$ac_cv_prog_cc_c89
+-  ac_prog_cc_stdc=c89
+ fi
++# AC_CACHE_VAL
++case "x$ac_cv_prog_cc_c89" in
++  x)
++    { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
++$as_echo "none needed" >&6; } ;;
++  xno)
++    { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
++$as_echo "unsupported" >&6; } ;;
++  *)
++    CC="$CC $ac_cv_prog_cc_c89"
++    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5
++$as_echo "$ac_cv_prog_cc_c89" >&6; } ;;
++esac
++if test "x$ac_cv_prog_cc_c89" != xno; then :
++
+ fi
+ 
+ ac_ext=c
+@@ -5935,23 +5323,21 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ 
+-
+-  ac_ext=c
++ac_ext=c
+ ac_cpp='$CPP $CPPFLAGS'
+ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
+-printf %s "checking whether $CC understands -c and -o together... " >&6; }
+-if test ${am_cv_prog_cc_c_o+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC understands -c and -o together" >&5
++$as_echo_n "checking whether $CC understands -c and -o together... " >&6; }
++if ${am_cv_prog_cc_c_o+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+@@ -5979,8 +5365,8 @@ _ACEOF
+   rm -f core conftest*
+   unset am_i
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
+-printf "%s\n" "$am_cv_prog_cc_c_o" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_prog_cc_c_o" >&5
++$as_echo "$am_cv_prog_cc_c_o" >&6; }
+ if test "$am_cv_prog_cc_c_o" != yes; then
+    # Losing compiler, so override with the script.
+    # FIXME: It is wrong to rewrite CC.
+@@ -5998,12 +5384,11 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ 
+ depcc="$CC"   am_compiler_list=
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+-printf %s "checking dependency style of $depcc... " >&6; }
+-if test ${am_cv_CC_dependencies_compiler_type+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
++$as_echo_n "checking dependency style of $depcc... " >&6; }
++if ${am_cv_CC_dependencies_compiler_type+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+   # We make a subdir and do the tests there.  Otherwise we can end up
+   # making bogus files that we don't know about and never remove.  For
+@@ -6110,8 +5495,8 @@ else
+ fi
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
+-printf "%s\n" "$am_cv_CC_dependencies_compiler_type" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5
++$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; }
+ CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type
+ 
+  if
+@@ -6125,12 +5510,6 @@ else
+ fi
+ 
+ 
+-
+-
+-
+-
+-
+-
+ ac_ext=cpp
+ ac_cpp='$CXXCPP $CPPFLAGS'
+ ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+@@ -6141,16 +5520,15 @@ if test -z "$CXX"; then
+     CXX=$CCC
+   else
+     if test -n "$ac_tool_prefix"; then
+-  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++
++  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+   do
+     # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+ set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_CXX+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_CXX+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$CXX"; then
+   ac_cv_prog_CXX="$CXX" # Let the user override the test.
+ else
+@@ -6158,15 +5536,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_CXX="$ac_tool_prefix$ac_prog"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -6177,11 +5551,11 @@ fi
+ fi
+ CXX=$ac_cv_prog_CXX
+ if test -n "$CXX"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
+-printf "%s\n" "$CXX" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5
++$as_echo "$CXX" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -6190,16 +5564,15 @@ fi
+ fi
+ if test -z "$CXX"; then
+   ac_ct_CXX=$CXX
+-  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC clang++
++  for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC
+ do
+   # Extract the first word of "$ac_prog", so it can be a program name with args.
+ set dummy $ac_prog; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_CXX+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_CXX+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_CXX"; then
+   ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test.
+ else
+@@ -6207,15 +5580,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_CXX="$ac_prog"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -6226,11 +5595,11 @@ fi
+ fi
+ ac_ct_CXX=$ac_cv_prog_ac_ct_CXX
+ if test -n "$ac_ct_CXX"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
+-printf "%s\n" "$ac_ct_CXX" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5
++$as_echo "$ac_ct_CXX" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -6242,8 +5611,8 @@ done
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     CXX=$ac_ct_CXX
+@@ -6253,7 +5622,7 @@ fi
+   fi
+ fi
+ # Provide some information about the compiler.
+-printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
++$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5
+ set X $ac_compile
+ ac_compiler=$2
+ for ac_option in --version -v -V -qversion; do
+@@ -6263,7 +5632,7 @@ case "(($ac_try" in
+   *) ac_try_echo=$ac_try;;
+ esac
+ eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\""
+-printf "%s\n" "$ac_try_echo"; } >&5
++$as_echo "$ac_try_echo"; } >&5
+   (eval "$ac_compiler $ac_option >&5") 2>conftest.err
+   ac_status=$?
+   if test -s conftest.err; then
+@@ -6273,21 +5642,20 @@ printf "%s\n" "$ac_try_echo"; } >&5
+     cat conftest.er1 >&5
+   fi
+   rm -f conftest.er1 conftest.err
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }
+ done
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the compiler supports GNU C++" >&5
+-printf %s "checking whether the compiler supports GNU C++... " >&6; }
+-if test ${ac_cv_cxx_compiler_gnu+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5
++$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; }
++if ${ac_cv_cxx_compiler_gnu+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ #ifndef __GNUC__
+        choke me
+@@ -6297,33 +5665,29 @@ main (void)
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_cxx_try_compile "$LINENO"
+-then :
++if ac_fn_cxx_try_compile "$LINENO"; then :
+   ac_compiler_gnu=yes
+-else $as_nop
++else
+   ac_compiler_gnu=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ ac_cv_cxx_compiler_gnu=$ac_compiler_gnu
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
+-printf "%s\n" "$ac_cv_cxx_compiler_gnu" >&6; }
+-ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+-
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5
++$as_echo "$ac_cv_cxx_compiler_gnu" >&6; }
+ if test $ac_compiler_gnu = yes; then
+   GXX=yes
+ else
+   GXX=
+ fi
+-ac_test_CXXFLAGS=${CXXFLAGS+y}
++ac_test_CXXFLAGS=${CXXFLAGS+set}
+ ac_save_CXXFLAGS=$CXXFLAGS
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
+-printf %s "checking whether $CXX accepts -g... " >&6; }
+-if test ${ac_cv_prog_cxx_g+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5
++$as_echo_n "checking whether $CXX accepts -g... " >&6; }
++if ${ac_cv_prog_cxx_g+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_save_cxx_werror_flag=$ac_cxx_werror_flag
+    ac_cxx_werror_flag=yes
+    ac_cv_prog_cxx_g=no
+@@ -6332,60 +5696,57 @@ else $as_nop
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_cxx_try_compile "$LINENO"
+-then :
++if ac_fn_cxx_try_compile "$LINENO"; then :
+   ac_cv_prog_cxx_g=yes
+-else $as_nop
++else
+   CXXFLAGS=""
+       cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_cxx_try_compile "$LINENO"
+-then :
++if ac_fn_cxx_try_compile "$LINENO"; then :
+ 
+-else $as_nop
++else
+   ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+ 	 CXXFLAGS="-g"
+ 	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_cxx_try_compile "$LINENO"
+-then :
++if ac_fn_cxx_try_compile "$LINENO"; then :
+   ac_cv_prog_cxx_g=yes
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+    ac_cxx_werror_flag=$ac_save_cxx_werror_flag
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
+-printf "%s\n" "$ac_cv_prog_cxx_g" >&6; }
+-if test $ac_test_CXXFLAGS; then
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5
++$as_echo "$ac_cv_prog_cxx_g" >&6; }
++if test "$ac_test_CXXFLAGS" = set; then
+   CXXFLAGS=$ac_save_CXXFLAGS
+ elif test $ac_cv_prog_cxx_g = yes; then
+   if test "$GXX" = yes; then
+@@ -6400,100 +5761,6 @@ else
+     CXXFLAGS=
+   fi
+ fi
+-ac_prog_cxx_stdcxx=no
+-if test x$ac_prog_cxx_stdcxx = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++11 features" >&5
+-printf %s "checking for $CXX option to enable C++11 features... " >&6; }
+-if test ${ac_cv_prog_cxx_cxx11+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  ac_cv_prog_cxx_cxx11=no
+-ac_save_CXX=$CXX
+-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+-/* end confdefs.h.  */
+-$ac_cxx_conftest_cxx11_program
+-_ACEOF
+-for ac_arg in '' -std=gnu++11 -std=gnu++0x -std=c++11 -std=c++0x -qlanglvl=extended0x -AA
+-do
+-  CXX="$ac_save_CXX $ac_arg"
+-  if ac_fn_cxx_try_compile "$LINENO"
+-then :
+-  ac_cv_prog_cxx_cxx11=$ac_arg
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam
+-  test "x$ac_cv_prog_cxx_cxx11" != "xno" && break
+-done
+-rm -f conftest.$ac_ext
+-CXX=$ac_save_CXX
+-fi
+-
+-if test "x$ac_cv_prog_cxx_cxx11" = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+-printf "%s\n" "unsupported" >&6; }
+-else $as_nop
+-  if test "x$ac_cv_prog_cxx_cxx11" = x
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+-printf "%s\n" "none needed" >&6; }
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx11" >&5
+-printf "%s\n" "$ac_cv_prog_cxx_cxx11" >&6; }
+-     CXX="$CXX $ac_cv_prog_cxx_cxx11"
+-fi
+-  ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx11
+-  ac_prog_cxx_stdcxx=cxx11
+-fi
+-fi
+-if test x$ac_prog_cxx_stdcxx = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $CXX option to enable C++98 features" >&5
+-printf %s "checking for $CXX option to enable C++98 features... " >&6; }
+-if test ${ac_cv_prog_cxx_cxx98+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  ac_cv_prog_cxx_cxx98=no
+-ac_save_CXX=$CXX
+-cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+-/* end confdefs.h.  */
+-$ac_cxx_conftest_cxx98_program
+-_ACEOF
+-for ac_arg in '' -std=gnu++98 -std=c++98 -qlanglvl=extended -AA
+-do
+-  CXX="$ac_save_CXX $ac_arg"
+-  if ac_fn_cxx_try_compile "$LINENO"
+-then :
+-  ac_cv_prog_cxx_cxx98=$ac_arg
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam
+-  test "x$ac_cv_prog_cxx_cxx98" != "xno" && break
+-done
+-rm -f conftest.$ac_ext
+-CXX=$ac_save_CXX
+-fi
+-
+-if test "x$ac_cv_prog_cxx_cxx98" = xno
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5
+-printf "%s\n" "unsupported" >&6; }
+-else $as_nop
+-  if test "x$ac_cv_prog_cxx_cxx98" = x
+-then :
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none needed" >&5
+-printf "%s\n" "none needed" >&6; }
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_cxx98" >&5
+-printf "%s\n" "$ac_cv_prog_cxx_cxx98" >&6; }
+-     CXX="$CXX $ac_cv_prog_cxx_cxx98"
+-fi
+-  ac_cv_prog_cxx_stdcxx=$ac_cv_prog_cxx_cxx98
+-  ac_prog_cxx_stdcxx=cxx98
+-fi
+-fi
+-
+ ac_ext=c
+ ac_cpp='$CPP $CPPFLAGS'
+ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+@@ -6502,12 +5769,11 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ 
+ depcc="$CXX"  am_compiler_list=
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
+-printf %s "checking dependency style of $depcc... " >&6; }
+-if test ${am_cv_CXX_dependencies_compiler_type+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5
++$as_echo_n "checking dependency style of $depcc... " >&6; }
++if ${am_cv_CXX_dependencies_compiler_type+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then
+   # We make a subdir and do the tests there.  Otherwise we can end up
+   # making bogus files that we don't know about and never remove.  For
+@@ -6614,8 +5880,8 @@ else
+ fi
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5
+-printf "%s\n" "$am_cv_CXX_dependencies_compiler_type" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5
++$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; }
+ CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type
+ 
+  if
+@@ -6630,19 +5896,17 @@ fi
+ 
+ 
+ # Check whether --enable-largefile was given.
+-if test ${enable_largefile+y}
+-then :
++if test "${enable_largefile+set}" = set; then :
+   enableval=$enable_largefile;
+ fi
+ 
+ if test "$enable_largefile" != no; then
+ 
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5
+-printf %s "checking for special C compiler options needed for large files... " >&6; }
+-if test ${ac_cv_sys_largefile_CC+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5
++$as_echo_n "checking for special C compiler options needed for large files... " >&6; }
++if ${ac_cv_sys_largefile_CC+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_cv_sys_largefile_CC=no
+      if test "$GCC" != yes; then
+        ac_save_CC=$CC
+@@ -6656,47 +5920,44 @@ else $as_nop
+     We can't simply define LARGE_OFF_T to be 9223372036854775807,
+     since some C++ compilers masquerading as C compilers
+     incorrectly reject 9223372036854775807.  */
+-#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31))
++#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+   int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ 		       && LARGE_OFF_T % 2147483647 == 1)
+ 		      ? 1 : -1];
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-	 if ac_fn_c_try_compile "$LINENO"
+-then :
++	 if ac_fn_c_try_compile "$LINENO"; then :
+   break
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam
++rm -f core conftest.err conftest.$ac_objext
+ 	 CC="$CC -n32"
+-	 if ac_fn_c_try_compile "$LINENO"
+-then :
++	 if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_sys_largefile_CC=' -n32'; break
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam
++rm -f core conftest.err conftest.$ac_objext
+ 	 break
+        done
+        CC=$ac_save_CC
+        rm -f conftest.$ac_ext
+     fi
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5
+-printf "%s\n" "$ac_cv_sys_largefile_CC" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5
++$as_echo "$ac_cv_sys_largefile_CC" >&6; }
+   if test "$ac_cv_sys_largefile_CC" != no; then
+     CC=$CC$ac_cv_sys_largefile_CC
+   fi
+ 
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5
+-printf %s "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; }
+-if test ${ac_cv_sys_file_offset_bits+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5
++$as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; }
++if ${ac_cv_sys_file_offset_bits+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   while :; do
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+@@ -6705,23 +5966,22 @@ else $as_nop
+     We can't simply define LARGE_OFF_T to be 9223372036854775807,
+     since some C++ compilers masquerading as C compilers
+     incorrectly reject 9223372036854775807.  */
+-#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31))
++#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+   int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ 		       && LARGE_OFF_T % 2147483647 == 1)
+ 		      ? 1 : -1];
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_sys_file_offset_bits=no; break
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ #define _FILE_OFFSET_BITS 64
+@@ -6730,43 +5990,43 @@ rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+     We can't simply define LARGE_OFF_T to be 9223372036854775807,
+     since some C++ compilers masquerading as C compilers
+     incorrectly reject 9223372036854775807.  */
+-#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31))
++#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+   int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ 		       && LARGE_OFF_T % 2147483647 == 1)
+ 		      ? 1 : -1];
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_sys_file_offset_bits=64; break
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+   ac_cv_sys_file_offset_bits=unknown
+   break
+ done
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5
+-printf "%s\n" "$ac_cv_sys_file_offset_bits" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5
++$as_echo "$ac_cv_sys_file_offset_bits" >&6; }
+ case $ac_cv_sys_file_offset_bits in #(
+   no | unknown) ;;
+   *)
+-printf "%s\n" "#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits
++_ACEOF
+ ;;
+ esac
+ rm -rf conftest*
+   if test $ac_cv_sys_file_offset_bits = unknown; then
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5
+-printf %s "checking for _LARGE_FILES value needed for large files... " >&6; }
+-if test ${ac_cv_sys_large_files+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5
++$as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; }
++if ${ac_cv_sys_large_files+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   while :; do
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+@@ -6775,23 +6035,22 @@ else $as_nop
+     We can't simply define LARGE_OFF_T to be 9223372036854775807,
+     since some C++ compilers masquerading as C compilers
+     incorrectly reject 9223372036854775807.  */
+-#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31))
++#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+   int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ 		       && LARGE_OFF_T % 2147483647 == 1)
+ 		      ? 1 : -1];
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_sys_large_files=no; break
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ #define _LARGE_FILES 1
+@@ -6800,60 +6059,119 @@ rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+     We can't simply define LARGE_OFF_T to be 9223372036854775807,
+     since some C++ compilers masquerading as C compilers
+     incorrectly reject 9223372036854775807.  */
+-#define LARGE_OFF_T (((off_t) 1 << 31 << 31) - 1 + ((off_t) 1 << 31 << 31))
++#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62))
+   int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721
+ 		       && LARGE_OFF_T % 2147483647 == 1)
+ 		      ? 1 : -1];
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_sys_large_files=1; break
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+   ac_cv_sys_large_files=unknown
+   break
+ done
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5
+-printf "%s\n" "$ac_cv_sys_large_files" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5
++$as_echo "$ac_cv_sys_large_files" >&6; }
+ case $ac_cv_sys_large_files in #(
+   no | unknown) ;;
+   *)
+-printf "%s\n" "#define _LARGE_FILES $ac_cv_sys_large_files" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define _LARGE_FILES $ac_cv_sys_large_files
++_ACEOF
+ ;;
+ esac
+ rm -rf conftest*
+   fi
++
++
++fi
++
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
++ac_bolt_plugin_warn_cflags=
++save_CFLAGS="$CFLAGS"
++for real_option in -Wall; do
++  # Do the check with the no- prefix removed since gcc silently
++  # accepts any -Wno-* option on purpose
++  case $real_option in
++    -Wno-*) option=-W`expr x$real_option : 'x-Wno-\(.*\)'` ;;
++    *) option=$real_option ;;
++  esac
++  as_acx_Woption=`$as_echo "acx_cv_prog_cc_warning_$option" | $as_tr_sh`
++
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports $option" >&5
++$as_echo_n "checking whether $CC supports $option... " >&6; }
++if eval \${$as_acx_Woption+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  CFLAGS="$option"
++    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++/* end confdefs.h.  */
++
++int
++main ()
++{
++
++  ;
++  return 0;
++}
++_ACEOF
++if ac_fn_c_try_compile "$LINENO"; then :
++  eval "$as_acx_Woption=yes"
++else
++  eval "$as_acx_Woption=no"
++fi
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
++
+ fi
++eval ac_res=\$$as_acx_Woption
++	       { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5
++$as_echo "$ac_res" >&6; }
++  if test `eval 'as_val=${'$as_acx_Woption'};$as_echo "$as_val"'` = yes; then :
++  ac_bolt_plugin_warn_cflags="$ac_bolt_plugin_warn_cflags${ac_bolt_plugin_warn_cflags:+ }$real_option"
++fi
++  done
++CFLAGS="$save_CFLAGS"
++ac_ext=c
++ac_cpp='$CPP $CPPFLAGS'
++ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
++ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
++ac_compiler_gnu=$ac_cv_c_compiler_gnu
++
+ 
+ 
+ # Check whether -static-libgcc is supported.
+ saved_LDFLAGS="$LDFLAGS"
+ LDFLAGS="$LDFLAGS -static-libgcc"
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -static-libgcc" >&5
+-printf %s "checking for -static-libgcc... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for -static-libgcc" >&5
++$as_echo_n "checking for -static-libgcc... " >&6; }
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+   int main() {}
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
++if ac_fn_c_try_link "$LINENO"; then :
+   have_static_libgcc=yes
+-else $as_nop
++else
+   have_static_libgcc=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $have_static_libgcc" >&5
+-printf "%s\n" "$have_static_libgcc" >&6; };
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $have_static_libgcc" >&5
++$as_echo "$have_static_libgcc" >&6; };
+ LDFLAGS="$saved_LDFLAGS"
+ # Need -Wc to get it through libtool.
+ if test "x$have_static_libgcc" = xyes; then
+@@ -6879,19 +6197,30 @@ fi
+ 
+ 
+ # Determine what GCC version number to use in filesystem paths.
+-GCC_BASE_VER
++
++  get_gcc_base_ver="cat"
++
++# Check whether --with-gcc-major-version-only was given.
++if test "${with_gcc_major_version_only+set}" = set; then :
++  withval=$with_gcc_major_version_only; if test x$with_gcc_major_version_only = xyes ; then
++        get_gcc_base_ver="sed -e 's/^\([0-9]*\).*/\1/'"
++      fi
++
++fi
++
++
++
+ 
+ case `pwd` in
+   *\ * | *\	*)
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5
+-printf "%s\n" "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;;
++    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5
++$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;;
+ esac
+ 
+ 
+ 
+-macro_version='2.4.7'
+-macro_revision='2.4.7'
+-
++macro_version='2.2.7a'
++macro_revision='1.3134'
+ 
+ 
+ 
+@@ -6905,7 +6234,7 @@ macro_revision='2.4.7'
+ 
+ 
+ 
+-ltmain=$ac_aux_dir/ltmain.sh
++ltmain="$ac_aux_dir/ltmain.sh"
+ 
+ # Backslashify metacharacters that are still active within
+ # double-quoted strings.
+@@ -6928,10 +6257,10 @@ ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\
+ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO
+ ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5
+-printf %s "checking how to print strings... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5
++$as_echo_n "checking how to print strings... " >&6; }
+ # Test print first, because it will be a builtin if present.
+-if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \
++if test "X`print -r -- -n 2>/dev/null`" = X-n && \
+    test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then
+   ECHO='print -r --'
+ elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then
+@@ -6954,13 +6283,13 @@ func_echo_all ()
+     $ECHO ""
+ }
+ 
+-case $ECHO in
+-  printf*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: printf" >&5
+-printf "%s\n" "printf" >&6; } ;;
+-  print*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: print -r" >&5
+-printf "%s\n" "print -r" >&6; } ;;
+-  *) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: cat" >&5
+-printf "%s\n" "cat" >&6; } ;;
++case "$ECHO" in
++  printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5
++$as_echo "printf" >&6; } ;;
++  print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5
++$as_echo "print -r" >&6; } ;;
++  *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5
++$as_echo "cat" >&6; } ;;
+ esac
+ 
+ 
+@@ -6976,12 +6305,11 @@ esac
+ 
+ 
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5
+-printf %s "checking for a sed that does not truncate output... " >&6; }
+-if test ${ac_cv_path_SED+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5
++$as_echo_n "checking for a sed that does not truncate output... " >&6; }
++if ${ac_cv_path_SED+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+             ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/
+      for ac_i in 1 2 3 4 5 6 7; do
+        ac_script="$ac_script$as_nl$ac_script"
+@@ -6995,15 +6323,10 @@ else $as_nop
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_prog in sed gsed
+-   do
++  test -z "$as_dir" && as_dir=.
++    for ac_prog in sed gsed; do
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-      ac_path_SED="$as_dir$ac_prog$ac_exec_ext"
++      ac_path_SED="$as_dir/$ac_prog$ac_exec_ext"
+       as_fn_executable_p "$ac_path_SED" || continue
+ # Check for GNU ac_path_SED and select it if it is found.
+   # Check for GNU $ac_path_SED
+@@ -7012,13 +6335,13 @@ case `"$ac_path_SED" --version 2>&1` in
+   ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;;
+ *)
+   ac_count=0
+-  printf %s 0123456789 >"conftest.in"
++  $as_echo_n 0123456789 >"conftest.in"
+   while :
+   do
+     cat "conftest.in" "conftest.in" >"conftest.tmp"
+     mv "conftest.tmp" "conftest.in"
+     cp "conftest.in" "conftest.nl"
+-    printf "%s\n" '' >> "conftest.nl"
++    $as_echo '' >> "conftest.nl"
+     "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break
+     diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+     as_fn_arith $ac_count + 1 && ac_count=$as_val
+@@ -7046,172 +6369,29 @@ else
+ fi
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5
+-printf "%s\n" "$ac_cv_path_SED" >&6; }
+- SED="$ac_cv_path_SED"
+-  rm -f conftest.sed
+-
+-test -z "$SED" && SED=sed
+-Xsed="$SED -e 1s/^X//"
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5
+-printf %s "checking for grep that handles long lines and -e... " >&6; }
+-if test ${ac_cv_path_GREP+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if test -z "$GREP"; then
+-  ac_path_GREP_found=false
+-  # Loop through the user's path and test for each of PROGNAME-LIST
+-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_prog in grep ggrep
+-   do
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-      ac_path_GREP="$as_dir$ac_prog$ac_exec_ext"
+-      as_fn_executable_p "$ac_path_GREP" || continue
+-# Check for GNU ac_path_GREP and select it if it is found.
+-  # Check for GNU $ac_path_GREP
+-case `"$ac_path_GREP" --version 2>&1` in
+-*GNU*)
+-  ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;;
+-*)
+-  ac_count=0
+-  printf %s 0123456789 >"conftest.in"
+-  while :
+-  do
+-    cat "conftest.in" "conftest.in" >"conftest.tmp"
+-    mv "conftest.tmp" "conftest.in"
+-    cp "conftest.in" "conftest.nl"
+-    printf "%s\n" 'GREP' >> "conftest.nl"
+-    "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+-    as_fn_arith $ac_count + 1 && ac_count=$as_val
+-    if test $ac_count -gt ${ac_path_GREP_max-0}; then
+-      # Best one so far, save it but keep looking for a better one
+-      ac_cv_path_GREP="$ac_path_GREP"
+-      ac_path_GREP_max=$ac_count
+-    fi
+-    # 10*(2^10) chars as input seems more than enough
+-    test $ac_count -gt 10 && break
+-  done
+-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+-esac
+-
+-      $ac_path_GREP_found && break 3
+-    done
+-  done
+-  done
+-IFS=$as_save_IFS
+-  if test -z "$ac_cv_path_GREP"; then
+-    as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+-  fi
+-else
+-  ac_cv_path_GREP=$GREP
+-fi
+-
+-fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5
+-printf "%s\n" "$ac_cv_path_GREP" >&6; }
+- GREP="$ac_cv_path_GREP"
+-
+-
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5
+-printf %s "checking for egrep... " >&6; }
+-if test ${ac_cv_path_EGREP+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if echo a | $GREP -E '(a|b)' >/dev/null 2>&1
+-   then ac_cv_path_EGREP="$GREP -E"
+-   else
+-     if test -z "$EGREP"; then
+-  ac_path_EGREP_found=false
+-  # Loop through the user's path and test for each of PROGNAME-LIST
+-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_prog in egrep
+-   do
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-      ac_path_EGREP="$as_dir$ac_prog$ac_exec_ext"
+-      as_fn_executable_p "$ac_path_EGREP" || continue
+-# Check for GNU ac_path_EGREP and select it if it is found.
+-  # Check for GNU $ac_path_EGREP
+-case `"$ac_path_EGREP" --version 2>&1` in
+-*GNU*)
+-  ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;;
+-*)
+-  ac_count=0
+-  printf %s 0123456789 >"conftest.in"
+-  while :
+-  do
+-    cat "conftest.in" "conftest.in" >"conftest.tmp"
+-    mv "conftest.tmp" "conftest.in"
+-    cp "conftest.in" "conftest.nl"
+-    printf "%s\n" 'EGREP' >> "conftest.nl"
+-    "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break
+-    diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+-    as_fn_arith $ac_count + 1 && ac_count=$as_val
+-    if test $ac_count -gt ${ac_path_EGREP_max-0}; then
+-      # Best one so far, save it but keep looking for a better one
+-      ac_cv_path_EGREP="$ac_path_EGREP"
+-      ac_path_EGREP_max=$ac_count
+-    fi
+-    # 10*(2^10) chars as input seems more than enough
+-    test $ac_count -gt 10 && break
+-  done
+-  rm -f conftest.in conftest.tmp conftest.nl conftest.out;;
+-esac
+-
+-      $ac_path_EGREP_found && break 3
+-    done
+-  done
+-  done
+-IFS=$as_save_IFS
+-  if test -z "$ac_cv_path_EGREP"; then
+-    as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5
+-  fi
+-else
+-  ac_cv_path_EGREP=$EGREP
+-fi
+-
+-   fi
+-fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5
+-printf "%s\n" "$ac_cv_path_EGREP" >&6; }
+- EGREP="$ac_cv_path_EGREP"
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5
++$as_echo "$ac_cv_path_SED" >&6; }
++ SED="$ac_cv_path_SED"
++  rm -f conftest.sed
++
++test -z "$SED" && SED=sed
++Xsed="$SED -e 1s/^X//"
++
++
++
++
++
++
++
++
++
+ 
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5
+-printf %s "checking for fgrep... " >&6; }
+-if test ${ac_cv_path_FGREP+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5
++$as_echo_n "checking for fgrep... " >&6; }
++if ${ac_cv_path_FGREP+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1
+    then ac_cv_path_FGREP="$GREP -F"
+    else
+@@ -7222,15 +6402,10 @@ else $as_nop
+ for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_prog in fgrep
+-   do
++  test -z "$as_dir" && as_dir=.
++    for ac_prog in fgrep; do
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-      ac_path_FGREP="$as_dir$ac_prog$ac_exec_ext"
++      ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext"
+       as_fn_executable_p "$ac_path_FGREP" || continue
+ # Check for GNU ac_path_FGREP and select it if it is found.
+   # Check for GNU $ac_path_FGREP
+@@ -7239,13 +6414,13 @@ case `"$ac_path_FGREP" --version 2>&1` in
+   ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;;
+ *)
+   ac_count=0
+-  printf %s 0123456789 >"conftest.in"
++  $as_echo_n 0123456789 >"conftest.in"
+   while :
+   do
+     cat "conftest.in" "conftest.in" >"conftest.tmp"
+     mv "conftest.tmp" "conftest.in"
+     cp "conftest.in" "conftest.nl"
+-    printf "%s\n" 'FGREP' >> "conftest.nl"
++    $as_echo 'FGREP' >> "conftest.nl"
+     "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break
+     diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break
+     as_fn_arith $ac_count + 1 && ac_count=$as_val
+@@ -7274,8 +6449,8 @@ fi
+ 
+    fi
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5
+-printf "%s\n" "$ac_cv_path_FGREP" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5
++$as_echo "$ac_cv_path_FGREP" >&6; }
+  FGREP="$ac_cv_path_FGREP"
+ 
+ 
+@@ -7300,21 +6475,20 @@ test -z "$GREP" && GREP=grep
+ 
+ 
+ # Check whether --with-gnu-ld was given.
+-if test ${with_gnu_ld+y}
+-then :
+-  withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes
+-else $as_nop
++if test "${with_gnu_ld+set}" = set; then :
++  withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes
++else
+   with_gnu_ld=no
+ fi
+ 
+ ac_prog=ld
+-if test yes = "$GCC"; then
++if test "$GCC" = yes; then
+   # Check if gcc -print-prog-name=ld gives a path.
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
+-printf %s "checking for ld used by $CC... " >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
++$as_echo_n "checking for ld used by $CC... " >&6; }
+   case $host in
+   *-*-mingw*)
+-    # gcc leaves a trailing carriage return, which upsets mingw
++    # gcc leaves a trailing carriage return which upsets mingw
+     ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+   *)
+     ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+@@ -7328,7 +6502,7 @@ printf %s "checking for ld used by $CC... " >&6; }
+       while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+ 	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+       done
+-      test -z "$LD" && LD=$ac_prog
++      test -z "$LD" && LD="$ac_prog"
+       ;;
+   "")
+     # If it fails, then pretend we aren't using GCC.
+@@ -7339,58 +6513,56 @@ printf %s "checking for ld used by $CC... " >&6; }
+     with_gnu_ld=unknown
+     ;;
+   esac
+-elif test yes = "$with_gnu_ld"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
+-printf %s "checking for GNU ld... " >&6; }
++elif test "$with_gnu_ld" = yes; then
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
++$as_echo_n "checking for GNU ld... " >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
+-printf %s "checking for non-GNU ld... " >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
++$as_echo_n "checking for non-GNU ld... " >&6; }
+ fi
+-if test ${lt_cv_path_LD+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++if ${lt_cv_path_LD+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -z "$LD"; then
+-  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++  lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+   for ac_dir in $PATH; do
+-    IFS=$lt_save_ifs
++    IFS="$lt_save_ifs"
+     test -z "$ac_dir" && ac_dir=.
+     if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+-      lt_cv_path_LD=$ac_dir/$ac_prog
++      lt_cv_path_LD="$ac_dir/$ac_prog"
+       # Check to see if the program is GNU ld.  I'd rather use --version,
+       # but apparently some variants of GNU ld only accept -v.
+       # Break only if it was the GNU/non-GNU ld that we prefer.
+       case `"$lt_cv_path_LD" -v 2>&1 &5
+-printf "%s\n" "$LD" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5
++$as_echo "$LD" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
+-printf %s "checking if the linker ($LD) is GNU ld... " >&6; }
+-if test ${lt_cv_prog_gnu_ld+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
++$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; }
++if ${lt_cv_prog_gnu_ld+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   # I'd rather use --version here, but apparently some GNU lds only accept -v.
+ case `$LD -v 2>&1 &1 &5
+-printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5
++$as_echo "$lt_cv_prog_gnu_ld" >&6; }
+ with_gnu_ld=$lt_cv_prog_gnu_ld
+ 
+ 
+@@ -7413,46 +6585,40 @@ with_gnu_ld=$lt_cv_prog_gnu_ld
+ 
+ 
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5
+-printf %s "checking for BSD- or MS-compatible name lister (nm)... " >&6; }
+-if test ${lt_cv_path_NM+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5
++$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; }
++if ${lt_cv_path_NM+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$NM"; then
+   # Let the user override the test.
+-  lt_cv_path_NM=$NM
++  lt_cv_path_NM="$NM"
+ else
+-  lt_nm_to_check=${ac_tool_prefix}nm
++  lt_nm_to_check="${ac_tool_prefix}nm"
+   if test -n "$ac_tool_prefix" && test "$build" = "$host"; then
+     lt_nm_to_check="$lt_nm_to_check nm"
+   fi
+   for lt_tmp_nm in $lt_nm_to_check; do
+-    lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++    lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+     for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do
+-      IFS=$lt_save_ifs
++      IFS="$lt_save_ifs"
+       test -z "$ac_dir" && ac_dir=.
+-      tmp_nm=$ac_dir/$lt_tmp_nm
+-      if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext"; then
++      tmp_nm="$ac_dir/$lt_tmp_nm"
++      if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then
+ 	# Check to see if the nm accepts a BSD-compat flag.
+-	# Adding the 'sed 1q' prevents false positives on HP-UX, which says:
++	# Adding the `sed 1q' prevents false positives on HP-UX, which says:
+ 	#   nm: unknown option "B" ignored
+ 	# Tru64's nm complains that /dev/null is an invalid object file
+-	# MSYS converts /dev/null to NUL, MinGW nm treats NUL as empty
+-	case $build_os in
+-	mingw*) lt_bad_file=conftest.nm/nofile ;;
+-	*) lt_bad_file=/dev/null ;;
+-	esac
+-	case `"$tmp_nm" -B $lt_bad_file 2>&1 | $SED '1q'` in
+-	*$lt_bad_file* | *'Invalid file or object type'*)
++	case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in
++	*/dev/null* | *'Invalid file or object type'*)
+ 	  lt_cv_path_NM="$tmp_nm -B"
+-	  break 2
++	  break
+ 	  ;;
+ 	*)
+-	  case `"$tmp_nm" -p /dev/null 2>&1 | $SED '1q'` in
++	  case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in
+ 	  */dev/null*)
+ 	    lt_cv_path_NM="$tmp_nm -p"
+-	    break 2
++	    break
+ 	    ;;
+ 	  *)
+ 	    lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but
+@@ -7463,15 +6629,15 @@ else
+ 	esac
+       fi
+     done
+-    IFS=$lt_save_ifs
++    IFS="$lt_save_ifs"
+   done
+   : ${lt_cv_path_NM=no}
+ fi
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5
+-printf "%s\n" "$lt_cv_path_NM" >&6; }
+-if test no != "$lt_cv_path_NM"; then
+-  NM=$lt_cv_path_NM
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5
++$as_echo "$lt_cv_path_NM" >&6; }
++if test "$lt_cv_path_NM" != "no"; then
++  NM="$lt_cv_path_NM"
+ else
+   # Didn't find any BSD compatible name lister, look for dumpbin.
+   if test -n "$DUMPBIN"; then :
+@@ -7482,12 +6648,11 @@ else
+   do
+     # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+ set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_DUMPBIN+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_DUMPBIN+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$DUMPBIN"; then
+   ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test.
+ else
+@@ -7495,15 +6660,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -7514,11 +6675,11 @@ fi
+ fi
+ DUMPBIN=$ac_cv_prog_DUMPBIN
+ if test -n "$DUMPBIN"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5
+-printf "%s\n" "$DUMPBIN" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5
++$as_echo "$DUMPBIN" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -7531,12 +6692,11 @@ if test -z "$DUMPBIN"; then
+ do
+   # Extract the first word of "$ac_prog", so it can be a program name with args.
+ set dummy $ac_prog; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_DUMPBIN+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_DUMPBIN"; then
+   ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test.
+ else
+@@ -7544,15 +6704,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_DUMPBIN="$ac_prog"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -7563,11 +6719,11 @@ fi
+ fi
+ ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN
+ if test -n "$ac_ct_DUMPBIN"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5
+-printf "%s\n" "$ac_ct_DUMPBIN" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5
++$as_echo "$ac_ct_DUMPBIN" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -7579,17 +6735,17 @@ done
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     DUMPBIN=$ac_ct_DUMPBIN
+   fi
+ fi
+ 
+-    case `$DUMPBIN -symbols -headers /dev/null 2>&1 | $SED '1q'` in
++    case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in
+     *COFF*)
+-      DUMPBIN="$DUMPBIN -symbols -headers"
++      DUMPBIN="$DUMPBIN -symbols"
+       ;;
+     *)
+       DUMPBIN=:
+@@ -7597,8 +6753,8 @@ fi
+     esac
+   fi
+ 
+-  if test : != "$DUMPBIN"; then
+-    NM=$DUMPBIN
++  if test "$DUMPBIN" != ":"; then
++    NM="$DUMPBIN"
+   fi
+ fi
+ test -z "$NM" && NM=nm
+@@ -7608,12 +6764,11 @@ test -z "$NM" && NM=nm
+ 
+ 
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5
+-printf %s "checking the name lister ($NM) interface... " >&6; }
+-if test ${lt_cv_nm_interface+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5
++$as_echo_n "checking the name lister ($NM) interface... " >&6; }
++if ${lt_cv_nm_interface+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_nm_interface="BSD nm"
+   echo "int some_variable = 0;" > conftest.$ac_ext
+   (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5)
+@@ -7629,29 +6784,28 @@ else $as_nop
+   fi
+   rm -f conftest*
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5
+-printf "%s\n" "$lt_cv_nm_interface" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5
++$as_echo "$lt_cv_nm_interface" >&6; }
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5
+-printf %s "checking whether ln -s works... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5
++$as_echo_n "checking whether ln -s works... " >&6; }
+ LN_S=$as_ln_s
+ if test "$LN_S" = "ln -s"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+-printf "%s\n" "yes" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++$as_echo "yes" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5
+-printf "%s\n" "no, using $LN_S" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5
++$as_echo "no, using $LN_S" >&6; }
+ fi
+ 
+ # find the maximum length of command line arguments
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5
+-printf %s "checking the maximum length of command line arguments... " >&6; }
+-if test ${lt_cv_sys_max_cmd_len+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5
++$as_echo_n "checking the maximum length of command line arguments... " >&6; }
++if ${lt_cv_sys_max_cmd_len+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+     i=0
+-  teststring=ABCD
++  teststring="ABCD"
+ 
+   case $build_os in
+   msdosdjgpp*)
+@@ -7691,7 +6845,7 @@ else $as_nop
+     lt_cv_sys_max_cmd_len=8192;
+     ;;
+ 
+-  bitrig* | darwin* | dragonfly* | freebsd* | midnightbsd* | netbsd* | openbsd*)
++  netbsd* | freebsd* | openbsd* | darwin* | dragonfly*)
+     # This has been around since 386BSD, at least.  Likely further.
+     if test -x /sbin/sysctl; then
+       lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax`
+@@ -7710,11 +6864,6 @@ else $as_nop
+     lt_cv_sys_max_cmd_len=196608
+     ;;
+ 
+-  os2*)
+-    # The test takes a long time on OS/2.
+-    lt_cv_sys_max_cmd_len=8192
+-    ;;
+-
+   osf*)
+     # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure
+     # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not
+@@ -7734,30 +6883,29 @@ else $as_nop
+   sysv5* | sco5v6* | sysv4.2uw2*)
+     kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null`
+     if test -n "$kargmax"; then
+-      lt_cv_sys_max_cmd_len=`echo $kargmax | $SED 's/.*[	 ]//'`
++      lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[	 ]//'`
+     else
+       lt_cv_sys_max_cmd_len=32768
+     fi
+     ;;
+   *)
+     lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null`
+-    if test -n "$lt_cv_sys_max_cmd_len" && \
+-       test undefined != "$lt_cv_sys_max_cmd_len"; then
++    if test -n "$lt_cv_sys_max_cmd_len"; then
+       lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4`
+       lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3`
+     else
+       # Make teststring a little bigger before we do anything with it.
+       # a 1K string should be a reasonable start.
+-      for i in 1 2 3 4 5 6 7 8; do
++      for i in 1 2 3 4 5 6 7 8 ; do
+         teststring=$teststring$teststring
+       done
+       SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}}
+       # If test is not a shell built-in, we'll probably end up computing a
+       # maximum length that is only half of the actual maximum length, but
+       # we can't tell.
+-      while { test X`env echo "$teststring$teststring" 2>/dev/null` \
++      while { test "X"`func_fallback_echo "$teststring$teststring" 2>/dev/null` \
+ 	         = "X$teststring$teststring"; } >/dev/null 2>&1 &&
+-	      test 17 != "$i" # 1/2 MB should be enough
++	      test $i != 17 # 1/2 MB should be enough
+       do
+         i=`expr $i + 1`
+         teststring=$teststring$teststring
+@@ -7775,12 +6923,12 @@ else $as_nop
+ 
+ fi
+ 
+-if test -n "$lt_cv_sys_max_cmd_len"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5
+-printf "%s\n" "$lt_cv_sys_max_cmd_len" >&6; }
++if test -n $lt_cv_sys_max_cmd_len ; then
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5
++$as_echo "$lt_cv_sys_max_cmd_len" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: none" >&5
+-printf "%s\n" "none" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5
++$as_echo "none" >&6; }
+ fi
+ max_cmd_len=$lt_cv_sys_max_cmd_len
+ 
+@@ -7793,6 +6941,30 @@ max_cmd_len=$lt_cv_sys_max_cmd_len
+ : ${MV="mv -f"}
+ : ${RM="rm -f"}
+ 
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5
++$as_echo_n "checking whether the shell understands some XSI constructs... " >&6; }
++# Try some XSI features
++xsi_shell=no
++( _lt_dummy="a/b/c"
++  test "${_lt_dummy##*/},${_lt_dummy%/*},"${_lt_dummy%"$_lt_dummy"}, \
++      = c,a/b,, \
++    && eval 'test $(( 1 + 1 )) -eq 2 \
++    && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \
++  && xsi_shell=yes
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5
++$as_echo "$xsi_shell" >&6; }
++
++
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5
++$as_echo_n "checking whether the shell understands \"+=\"... " >&6; }
++lt_shell_append=no
++( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \
++    >/dev/null 2>&1 \
++  && lt_shell_append=yes
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5
++$as_echo "$lt_shell_append" >&6; }
++
++
+ if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then
+   lt_unset=unset
+ else
+@@ -7824,92 +6996,15 @@ esac
+ 
+ 
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5
+-printf %s "checking how to convert $build file names to $host format... " >&6; }
+-if test ${lt_cv_to_host_file_cmd+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  case $host in
+-  *-*-mingw* )
+-    case $build in
+-      *-*-mingw* ) # actually msys
+-        lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32
+-        ;;
+-      *-*-cygwin* )
+-        lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32
+-        ;;
+-      * ) # otherwise, assume *nix
+-        lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32
+-        ;;
+-    esac
+-    ;;
+-  *-*-cygwin* )
+-    case $build in
+-      *-*-mingw* ) # actually msys
+-        lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin
+-        ;;
+-      *-*-cygwin* )
+-        lt_cv_to_host_file_cmd=func_convert_file_noop
+-        ;;
+-      * ) # otherwise, assume *nix
+-        lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin
+-        ;;
+-    esac
+-    ;;
+-  * ) # unhandled hosts (and "normal" native builds)
+-    lt_cv_to_host_file_cmd=func_convert_file_noop
+-    ;;
+-esac
+-
+-fi
+-
+-to_host_file_cmd=$lt_cv_to_host_file_cmd
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5
+-printf "%s\n" "$lt_cv_to_host_file_cmd" >&6; }
+-
+-
+-
+-
+-
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5
+-printf %s "checking how to convert $build file names to toolchain format... " >&6; }
+-if test ${lt_cv_to_tool_file_cmd+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  #assume ordinary cross tools, or native build.
+-lt_cv_to_tool_file_cmd=func_convert_file_noop
+-case $host in
+-  *-*-mingw* )
+-    case $build in
+-      *-*-mingw* ) # actually msys
+-        lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32
+-        ;;
+-    esac
+-    ;;
+-esac
+-
+-fi
+-
+-to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5
+-printf "%s\n" "$lt_cv_to_tool_file_cmd" >&6; }
+-
+-
+-
+-
+-
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5
+-printf %s "checking for $LD option to reload object files... " >&6; }
+-if test ${lt_cv_ld_reload_flag+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5
++$as_echo_n "checking for $LD option to reload object files... " >&6; }
++if ${lt_cv_ld_reload_flag+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_ld_reload_flag='-r'
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5
+-printf "%s\n" "$lt_cv_ld_reload_flag" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5
++$as_echo "$lt_cv_ld_reload_flag" >&6; }
+ reload_flag=$lt_cv_ld_reload_flag
+ case $reload_flag in
+ "" | " "*) ;;
+@@ -7917,14 +7012,9 @@ case $reload_flag in
+ esac
+ reload_cmds='$LD$reload_flag -o $output$reload_objs'
+ case $host_os in
+-  cygwin* | mingw* | pw32* | cegcc*)
+-    if test yes != "$GCC"; then
+-      reload_cmds=false
+-    fi
+-    ;;
+   darwin*)
+-    if test yes = "$GCC"; then
+-      reload_cmds='$LTCC $LTCFLAGS -nostdlib $wl-r -o $output$reload_objs'
++    if test "$GCC" = yes; then
++      reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs'
+     else
+       reload_cmds='$LD$reload_flag -o $output$reload_objs'
+     fi
+@@ -7939,123 +7029,14 @@ esac
+ 
+ 
+ 
+-if test -n "$ac_tool_prefix"; then
+-  # Extract the first word of "${ac_tool_prefix}file", so it can be a program name with args.
+-set dummy ${ac_tool_prefix}file; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_FILECMD+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if test -n "$FILECMD"; then
+-  ac_cv_prog_FILECMD="$FILECMD" # Let the user override the test.
+-else
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_FILECMD="${ac_tool_prefix}file"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+-    break 2
+-  fi
+-done
+-  done
+-IFS=$as_save_IFS
+-
+-fi
+-fi
+-FILECMD=$ac_cv_prog_FILECMD
+-if test -n "$FILECMD"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $FILECMD" >&5
+-printf "%s\n" "$FILECMD" >&6; }
+-else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-fi
+-
+-
+-fi
+-if test -z "$ac_cv_prog_FILECMD"; then
+-  ac_ct_FILECMD=$FILECMD
+-  # Extract the first word of "file", so it can be a program name with args.
+-set dummy file; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_FILECMD+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if test -n "$ac_ct_FILECMD"; then
+-  ac_cv_prog_ac_ct_FILECMD="$ac_ct_FILECMD" # Let the user override the test.
+-else
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_ac_ct_FILECMD="file"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+-    break 2
+-  fi
+-done
+-  done
+-IFS=$as_save_IFS
+-
+-fi
+-fi
+-ac_ct_FILECMD=$ac_cv_prog_ac_ct_FILECMD
+-if test -n "$ac_ct_FILECMD"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_FILECMD" >&5
+-printf "%s\n" "$ac_ct_FILECMD" >&6; }
+-else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-fi
+-
+-  if test "x$ac_ct_FILECMD" = x; then
+-    FILECMD=":"
+-  else
+-    case $cross_compiling:$ac_tool_warned in
+-yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+-ac_tool_warned=yes ;;
+-esac
+-    FILECMD=$ac_ct_FILECMD
+-  fi
+-else
+-  FILECMD="$ac_cv_prog_FILECMD"
+-fi
+-
+-
+-
+-
+-
+-
+-
+ if test -n "$ac_tool_prefix"; then
+   # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}objdump; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_OBJDUMP+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_OBJDUMP+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$OBJDUMP"; then
+   ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test.
+ else
+@@ -8063,15 +7044,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -8082,11 +7059,11 @@ fi
+ fi
+ OBJDUMP=$ac_cv_prog_OBJDUMP
+ if test -n "$OBJDUMP"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5
+-printf "%s\n" "$OBJDUMP" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5
++$as_echo "$OBJDUMP" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -8095,12 +7072,11 @@ if test -z "$ac_cv_prog_OBJDUMP"; then
+   ac_ct_OBJDUMP=$OBJDUMP
+   # Extract the first word of "objdump", so it can be a program name with args.
+ set dummy objdump; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_OBJDUMP+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_OBJDUMP"; then
+   ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test.
+ else
+@@ -8108,15 +7084,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_OBJDUMP="objdump"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -8127,11 +7099,11 @@ fi
+ fi
+ ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP
+ if test -n "$ac_ct_OBJDUMP"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5
+-printf "%s\n" "$ac_ct_OBJDUMP" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5
++$as_echo "$ac_ct_OBJDUMP" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+   if test "x$ac_ct_OBJDUMP" = x; then
+@@ -8139,8 +7111,8 @@ fi
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     OBJDUMP=$ac_ct_OBJDUMP
+@@ -8159,25 +7131,24 @@ test -z "$OBJDUMP" && OBJDUMP=objdump
+ 
+ 
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5
+-printf %s "checking how to recognize dependent libraries... " >&6; }
+-if test ${lt_cv_deplibs_check_method+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5
++$as_echo_n "checking how to recognize dependent libraries... " >&6; }
++if ${lt_cv_deplibs_check_method+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_file_magic_cmd='$MAGIC_CMD'
+ lt_cv_file_magic_test_file=
+ lt_cv_deplibs_check_method='unknown'
+ # Need to set the preceding variable on all platforms that support
+ # interlibrary dependencies.
+ # 'none' -- dependencies not supported.
+-# 'unknown' -- same as none, but documents that we really don't know.
++# `unknown' -- same as none, but documents that we really don't know.
+ # 'pass_all' -- all dependencies passed with no checks.
+ # 'test_compile' -- check by making test program.
+ # 'file_magic [[regex]]' -- check by looking for files in library path
+-# that responds to the $file_magic_cmd with a given extended regex.
+-# If you have 'file' or equivalent on your system and you're not sure
+-# whether 'pass_all' will *always* work, you probably want this one.
++# which responds to the $file_magic_cmd with a given extended regex.
++# If you have `file' or equivalent on your system and you're not sure
++# whether `pass_all' will *always* work, you probably want this one.
+ 
+ case $host_os in
+ aix[4-9]*)
+@@ -8190,7 +7161,7 @@ beos*)
+ 
+ bsdi[45]*)
+   lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)'
+-  lt_cv_file_magic_cmd='$FILECMD -L'
++  lt_cv_file_magic_cmd='/usr/bin/file -L'
+   lt_cv_file_magic_test_file=/shlib/libc.so
+   ;;
+ 
+@@ -8204,12 +7175,12 @@ mingw* | pw32*)
+   # Base MSYS/MinGW do not provide the 'file' command needed by
+   # func_win32_libid shell function, so use a weaker test based on 'objdump',
+   # unless we find 'file', for example because we are cross-compiling.
+-  if ( file / ) >/dev/null 2>&1; then
++  # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin.
++  if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then
+     lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL'
+     lt_cv_file_magic_cmd='func_win32_libid'
+   else
+-    # Keep this pattern in sync with the one in func_win32_libid.
+-    lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)'
++    lt_cv_deplibs_check_method='file_magic file format pei*-i386(.*architecture: i386)?'
+     lt_cv_file_magic_cmd='$OBJDUMP -f'
+   fi
+   ;;
+@@ -8224,14 +7195,14 @@ darwin* | rhapsody*)
+   lt_cv_deplibs_check_method=pass_all
+   ;;
+ 
+-freebsd* | dragonfly* | midnightbsd*)
++freebsd* | dragonfly*)
+   if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then
+     case $host_cpu in
+     i*86 )
+       # Not sure whether the presence of OpenBSD here was a mistake.
+       # Let's accept both of them until this is cleared up.
+       lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library'
+-      lt_cv_file_magic_cmd=$FILECMD
++      lt_cv_file_magic_cmd=/usr/bin/file
+       lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*`
+       ;;
+     esac
+@@ -8240,12 +7211,16 @@ freebsd* | dragonfly* | midnightbsd*)
+   fi
+   ;;
+ 
++gnu*)
++  lt_cv_deplibs_check_method=pass_all
++  ;;
++
+ haiku*)
+   lt_cv_deplibs_check_method=pass_all
+   ;;
+ 
+ hpux10.20* | hpux11*)
+-  lt_cv_file_magic_cmd=$FILECMD
++  lt_cv_file_magic_cmd=/usr/bin/file
+   case $host_cpu in
+   ia64*)
+     lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64'
+@@ -8277,8 +7252,8 @@ irix5* | irix6* | nonstopux*)
+   lt_cv_deplibs_check_method=pass_all
+   ;;
+ 
+-# This must be glibc/ELF.
+-linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++# This must be Linux ELF.
++linux* | k*bsd*-gnu | kopensolaris*-gnu | uclinuxfdpiceabi)
+   lt_cv_deplibs_check_method=pass_all
+   ;;
+ 
+@@ -8292,7 +7267,7 @@ netbsd*)
+ 
+ newos6*)
+   lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)'
+-  lt_cv_file_magic_cmd=$FILECMD
++  lt_cv_file_magic_cmd=/usr/bin/file
+   lt_cv_file_magic_test_file=/usr/lib/libnls.so
+   ;;
+ 
+@@ -8300,8 +7275,8 @@ newos6*)
+   lt_cv_deplibs_check_method=pass_all
+   ;;
+ 
+-openbsd* | bitrig*)
+-  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
++openbsd*)
++  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+     lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$'
+   else
+     lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$'
+@@ -8333,220 +7308,40 @@ sysv4 | sysv4.3*)
+   ncr)
+     lt_cv_deplibs_check_method=pass_all
+     ;;
+-  sequent)
+-    lt_cv_file_magic_cmd='/bin/file'
+-    lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )'
+-    ;;
+-  sni)
+-    lt_cv_file_magic_cmd='/bin/file'
+-    lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib"
+-    lt_cv_file_magic_test_file=/lib/libc.so
+-    ;;
+-  siemens)
+-    lt_cv_deplibs_check_method=pass_all
+-    ;;
+-  pc)
+-    lt_cv_deplibs_check_method=pass_all
+-    ;;
+-  esac
+-  ;;
+-
+-tpf*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-os2*)
+-  lt_cv_deplibs_check_method=pass_all
+-  ;;
+-esac
+-
+-fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5
+-printf "%s\n" "$lt_cv_deplibs_check_method" >&6; }
+-
+-file_magic_glob=
+-want_nocaseglob=no
+-if test "$build" = "$host"; then
+-  case $host_os in
+-  mingw* | pw32*)
+-    if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then
+-      want_nocaseglob=yes
+-    else
+-      file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"`
+-    fi
+-    ;;
+-  esac
+-fi
+-
+-file_magic_cmd=$lt_cv_file_magic_cmd
+-deplibs_check_method=$lt_cv_deplibs_check_method
+-test -z "$deplibs_check_method" && deplibs_check_method=unknown
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-if test -n "$ac_tool_prefix"; then
+-  # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args.
+-set dummy ${ac_tool_prefix}dlltool; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_DLLTOOL+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if test -n "$DLLTOOL"; then
+-  ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test.
+-else
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+-    break 2
+-  fi
+-done
+-  done
+-IFS=$as_save_IFS
+-
+-fi
+-fi
+-DLLTOOL=$ac_cv_prog_DLLTOOL
+-if test -n "$DLLTOOL"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5
+-printf "%s\n" "$DLLTOOL" >&6; }
+-else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-fi
+-
+-
+-fi
+-if test -z "$ac_cv_prog_DLLTOOL"; then
+-  ac_ct_DLLTOOL=$DLLTOOL
+-  # Extract the first word of "dlltool", so it can be a program name with args.
+-set dummy dlltool; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_DLLTOOL+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if test -n "$ac_ct_DLLTOOL"; then
+-  ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test.
+-else
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_ac_ct_DLLTOOL="dlltool"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+-    break 2
+-  fi
+-done
+-  done
+-IFS=$as_save_IFS
+-
+-fi
+-fi
+-ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL
+-if test -n "$ac_ct_DLLTOOL"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5
+-printf "%s\n" "$ac_ct_DLLTOOL" >&6; }
+-else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-fi
+-
+-  if test "x$ac_ct_DLLTOOL" = x; then
+-    DLLTOOL="false"
+-  else
+-    case $cross_compiling:$ac_tool_warned in
+-yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+-ac_tool_warned=yes ;;
+-esac
+-    DLLTOOL=$ac_ct_DLLTOOL
+-  fi
+-else
+-  DLLTOOL="$ac_cv_prog_DLLTOOL"
+-fi
+-
+-test -z "$DLLTOOL" && DLLTOOL=dlltool
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5
+-printf %s "checking how to associate runtime and link libraries... " >&6; }
+-if test ${lt_cv_sharedlib_from_linklib_cmd+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  lt_cv_sharedlib_from_linklib_cmd='unknown'
+-
+-case $host_os in
+-cygwin* | mingw* | pw32* | cegcc*)
+-  # two different shell functions defined in ltmain.sh;
+-  # decide which one to use based on capabilities of $DLLTOOL
+-  case `$DLLTOOL --help 2>&1` in
+-  *--identify-strict*)
+-    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib
+-    ;;
+-  *)
+-    lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback
++  sequent)
++    lt_cv_file_magic_cmd='/bin/file'
++    lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )'
++    ;;
++  sni)
++    lt_cv_file_magic_cmd='/bin/file'
++    lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib"
++    lt_cv_file_magic_test_file=/lib/libc.so
++    ;;
++  siemens)
++    lt_cv_deplibs_check_method=pass_all
++    ;;
++  pc)
++    lt_cv_deplibs_check_method=pass_all
+     ;;
+   esac
+   ;;
+-*)
+-  # fallback: assume linklib IS sharedlib
+-  lt_cv_sharedlib_from_linklib_cmd=$ECHO
++
++tpf*)
++  lt_cv_deplibs_check_method=pass_all
+   ;;
+ esac
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5
+-printf "%s\n" "$lt_cv_sharedlib_from_linklib_cmd" >&6; }
+-sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd
+-test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5
++$as_echo "$lt_cv_deplibs_check_method" >&6; }
++file_magic_cmd=$lt_cv_file_magic_cmd
++deplibs_check_method=$lt_cv_deplibs_check_method
++test -z "$deplibs_check_method" && deplibs_check_method=unknown
++
++
++
++
++
+ 
+ 
+ 
+@@ -8555,16 +7350,13 @@ test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO
+ 
+ 
+ if test -n "$ac_tool_prefix"; then
+-  for ac_prog in ar
+-  do
+-    # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args.
+-set dummy $ac_tool_prefix$ac_prog; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_AR+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  # Extract the first word of "${ac_tool_prefix}ar", so it can be a program name with args.
++set dummy ${ac_tool_prefix}ar; ac_word=$2
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_AR+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$AR"; then
+   ac_cv_prog_AR="$AR" # Let the user override the test.
+ else
+@@ -8572,15 +7364,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_AR="$ac_tool_prefix$ac_prog"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
++    ac_cv_prog_AR="${ac_tool_prefix}ar"
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -8591,29 +7379,24 @@ fi
+ fi
+ AR=$ac_cv_prog_AR
+ if test -n "$AR"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $AR" >&5
+-printf "%s\n" "$AR" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5
++$as_echo "$AR" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+-    test -n "$AR" && break
+-  done
+ fi
+-if test -z "$AR"; then
++if test -z "$ac_cv_prog_AR"; then
+   ac_ct_AR=$AR
+-  for ac_prog in ar
+-do
+-  # Extract the first word of "$ac_prog", so it can be a program name with args.
+-set dummy $ac_prog; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_AR+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  # Extract the first word of "ar", so it can be a program name with args.
++set dummy ar; ac_word=$2
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_AR+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_AR"; then
+   ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test.
+ else
+@@ -8621,15 +7404,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_ac_ct_AR="$ac_prog"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
++    ac_cv_prog_ac_ct_AR="ar"
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -8640,112 +7419,34 @@ fi
+ fi
+ ac_ct_AR=$ac_cv_prog_ac_ct_AR
+ if test -n "$ac_ct_AR"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5
+-printf "%s\n" "$ac_ct_AR" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5
++$as_echo "$ac_ct_AR" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+-
+-  test -n "$ac_ct_AR" && break
+-done
+-
+   if test "x$ac_ct_AR" = x; then
+     AR="false"
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     AR=$ac_ct_AR
+   fi
++else
++  AR="$ac_cv_prog_AR"
+ fi
+ 
+-: ${AR=ar}
+-
+-
+-
+-
+-
+-
+-# Use ARFLAGS variable as AR's operation code to sync the variable naming with
+-# Automake.  If both AR_FLAGS and ARFLAGS are specified, AR_FLAGS should have
+-# higher priority because thats what people were doing historically (setting
+-# ARFLAGS for automake and AR_FLAGS for libtool).  FIXME: Make the AR_FLAGS
+-# variable obsoleted/removed.
+-
+-test ${AR_FLAGS+y} || AR_FLAGS=${ARFLAGS-cr}
+-lt_ar_flags=$AR_FLAGS
+-
+-
+-
+-
+-
+-
+-# Make AR_FLAGS overridable by 'make ARFLAGS='.  Don't try to run-time override
+-# by AR_FLAGS because that was never working and AR_FLAGS is about to die.
+-
+-
+-
+-
+-
+-
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5
+-printf %s "checking for archiver @FILE support... " >&6; }
+-if test ${lt_cv_ar_at_file+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  lt_cv_ar_at_file=no
+-   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+-/* end confdefs.h.  */
+-
+-int
+-main (void)
+-{
++test -z "$AR" && AR=ar
++test -z "$AR_FLAGS" && AR_FLAGS=cru
+ 
+-  ;
+-  return 0;
+-}
+-_ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
+-  echo conftest.$ac_objext > conftest.lst
+-      lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5'
+-      { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
+-  (eval $lt_ar_try) 2>&5
+-  ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; }
+-      if test 0 -eq "$ac_status"; then
+-	# Ensure the archiver fails upon bogus file names.
+-	rm -f conftest.$ac_objext libconftest.a
+-	{ { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5
+-  (eval $lt_ar_try) 2>&5
+-  ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; }
+-	if test 0 -ne "$ac_status"; then
+-          lt_cv_ar_at_file=@
+-        fi
+-      fi
+-      rm -f conftest.* libconftest.a
+ 
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
+ 
+-fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5
+-printf "%s\n" "$lt_cv_ar_at_file" >&6; }
+ 
+-if test no = "$lt_cv_ar_at_file"; then
+-  archiver_list_spec=
+-else
+-  archiver_list_spec=$lt_cv_ar_at_file
+-fi
+ 
+ 
+ 
+@@ -8756,12 +7457,11 @@ fi
+ if test -n "$ac_tool_prefix"; then
+   # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}strip; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_STRIP+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_STRIP+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$STRIP"; then
+   ac_cv_prog_STRIP="$STRIP" # Let the user override the test.
+ else
+@@ -8769,15 +7469,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_STRIP="${ac_tool_prefix}strip"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -8788,11 +7484,11 @@ fi
+ fi
+ STRIP=$ac_cv_prog_STRIP
+ if test -n "$STRIP"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
+-printf "%s\n" "$STRIP" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5
++$as_echo "$STRIP" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -8801,12 +7497,11 @@ if test -z "$ac_cv_prog_STRIP"; then
+   ac_ct_STRIP=$STRIP
+   # Extract the first word of "strip", so it can be a program name with args.
+ set dummy strip; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_STRIP+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_STRIP+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_STRIP"; then
+   ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test.
+ else
+@@ -8814,15 +7509,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_STRIP="strip"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -8833,11 +7524,11 @@ fi
+ fi
+ ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP
+ if test -n "$ac_ct_STRIP"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
+-printf "%s\n" "$ac_ct_STRIP" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5
++$as_echo "$ac_ct_STRIP" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+   if test "x$ac_ct_STRIP" = x; then
+@@ -8845,8 +7536,8 @@ fi
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     STRIP=$ac_ct_STRIP
+@@ -8865,12 +7556,11 @@ test -z "$STRIP" && STRIP=:
+ if test -n "$ac_tool_prefix"; then
+   # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}ranlib; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_RANLIB+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_RANLIB+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$RANLIB"; then
+   ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test.
+ else
+@@ -8878,15 +7568,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -8897,11 +7583,11 @@ fi
+ fi
+ RANLIB=$ac_cv_prog_RANLIB
+ if test -n "$RANLIB"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5
+-printf "%s\n" "$RANLIB" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5
++$as_echo "$RANLIB" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -8910,12 +7596,11 @@ if test -z "$ac_cv_prog_RANLIB"; then
+   ac_ct_RANLIB=$RANLIB
+   # Extract the first word of "ranlib", so it can be a program name with args.
+ set dummy ranlib; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_RANLIB+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_RANLIB+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_RANLIB"; then
+   ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test.
+ else
+@@ -8923,15 +7608,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_RANLIB="ranlib"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -8942,11 +7623,11 @@ fi
+ fi
+ ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB
+ if test -n "$ac_ct_RANLIB"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5
+-printf "%s\n" "$ac_ct_RANLIB" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5
++$as_echo "$ac_ct_RANLIB" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+   if test "x$ac_ct_RANLIB" = x; then
+@@ -8954,8 +7635,8 @@ fi
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     RANLIB=$ac_ct_RANLIB
+@@ -8978,14 +7659,14 @@ old_postuninstall_cmds=
+ 
+ if test -n "$RANLIB"; then
+   case $host_os in
+-  bitrig* | openbsd*)
+-    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib"
++  openbsd*)
++    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$oldlib"
+     ;;
+   *)
+-    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib"
++    old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$oldlib"
+     ;;
+   esac
+-  old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib"
++  old_archive_cmds="$old_archive_cmds~\$RANLIB \$oldlib"
+ fi
+ 
+ case $host_os in
+@@ -9044,12 +7725,11 @@ compiler=$CC
+ 
+ 
+ # Check for command to grab the raw symbol name followed by C symbol from nm.
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5
+-printf %s "checking command to parse $NM output from $compiler object... " >&6; }
+-if test ${lt_cv_sys_global_symbol_pipe+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5
++$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; }
++if ${lt_cv_sys_global_symbol_pipe+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+ 
+ # These are sane defaults that work on at least a few old systems.
+ # [They come from Ultrix.  What could be older than Ultrix?!! ;)]
+@@ -9069,7 +7749,7 @@ cygwin* | mingw* | pw32* | cegcc*)
+   symcode='[ABCDGISTW]'
+   ;;
+ hpux*)
+-  if test ia64 = "$host_cpu"; then
++  if test "$host_cpu" = ia64; then
+     symcode='[ABCDEGRST]'
+   fi
+   ;;
+@@ -9102,44 +7782,14 @@ case `$NM -V 2>&1` in
+   symcode='[ABCDGIRSTW]' ;;
+ esac
+ 
+-if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+-  # Gets list of data symbols to import.
+-  lt_cv_sys_global_symbol_to_import="$SED -n -e 's/^I .* \(.*\)$/\1/p'"
+-  # Adjust the below global symbol transforms to fixup imported variables.
+-  lt_cdecl_hook=" -e 's/^I .* \(.*\)$/extern __declspec(dllimport) char \1;/p'"
+-  lt_c_name_hook=" -e 's/^I .* \(.*\)$/  {\"\1\", (void *) 0},/p'"
+-  lt_c_name_lib_hook="\
+-  -e 's/^I .* \(lib.*\)$/  {\"\1\", (void *) 0},/p'\
+-  -e 's/^I .* \(.*\)$/  {\"lib\1\", (void *) 0},/p'"
+-else
+-  # Disable hooks by default.
+-  lt_cv_sys_global_symbol_to_import=
+-  lt_cdecl_hook=
+-  lt_c_name_hook=
+-  lt_c_name_lib_hook=
+-fi
+-
+ # Transform an extracted symbol line into a proper C declaration.
+ # Some systems (esp. on ia64) link data and code symbols differently,
+ # so use this general approach.
+-lt_cv_sys_global_symbol_to_cdecl="$SED -n"\
+-$lt_cdecl_hook\
+-" -e 's/^T .* \(.*\)$/extern int \1();/p'"\
+-" -e 's/^$symcode$symcode* .* \(.*\)$/extern char \1;/p'"
++lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'"
+ 
+ # Transform an extracted symbol line into symbol name and symbol address
+-lt_cv_sys_global_symbol_to_c_name_address="$SED -n"\
+-$lt_c_name_hook\
+-" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
+-" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/p'"
+-
+-# Transform an extracted symbol line into symbol name with lib prefix and
+-# symbol address.
+-lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="$SED -n"\
+-$lt_c_name_lib_hook\
+-" -e 's/^: \(.*\) .*$/  {\"\1\", (void *) 0},/p'"\
+-" -e 's/^$symcode$symcode* .* \(lib.*\)$/  {\"\1\", (void *) \&\1},/p'"\
+-" -e 's/^$symcode$symcode* .* \(.*\)$/  {\"lib\1\", (void *) \&\1},/p'"
++lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\) $/  {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/  {\"\2\", (void *) \&\2},/p'"
++lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\) $/  {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/  {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/  {\"lib\2\", (void *) \&\2},/p'"
+ 
+ # Handle CRLF in mingw tool chain
+ opt_cr=
+@@ -9157,29 +7807,24 @@ for ac_symprfx in "" "_"; do
+ 
+   # Write the raw and C identifiers.
+   if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+-    # Fake it for dumpbin and say T for any non-static function,
+-    # D for any global variable and I for any imported variable.
+-    # Also find C++ and __fastcall symbols from MSVC++ or ICC,
++    # Fake it for dumpbin and say T for any non-static function
++    # and D for any global variable.
++    # Also find C++ and __fastcall symbols from MSVC++,
+     # which start with @ or ?.
+     lt_cv_sys_global_symbol_pipe="$AWK '"\
+ "     {last_section=section; section=\$ 3};"\
+-"     /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\
+ "     /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\
+-"     /^ *Symbol name *: /{split(\$ 0,sn,\":\"); si=substr(sn[2],2)};"\
+-"     /^ *Type *: code/{print \"T\",si,substr(si,length(prfx))};"\
+-"     /^ *Type *: data/{print \"I\",si,substr(si,length(prfx))};"\
+ "     \$ 0!~/External *\|/{next};"\
+ "     / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\
+ "     {if(hide[section]) next};"\
+-"     {f=\"D\"}; \$ 0~/\(\).*\|/{f=\"T\"};"\
+-"     {split(\$ 0,a,/\||\r/); split(a[2],s)};"\
+-"     s[1]~/^[@?]/{print f,s[1],s[1]; next};"\
+-"     s[1]~prfx {split(s[1],t,\"@\"); print f,t[1],substr(t[1],length(prfx))}"\
++"     {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\
++"     {split(\$ 0, a, /\||\r/); split(a[2], s)};"\
++"     s[1]~/^[@?]/{print s[1], s[1]; next};"\
++"     s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\
+ "     ' prfx=^$ac_symprfx"
+   else
+-    lt_cv_sys_global_symbol_pipe="$SED -n -e 's/^.*[	 ]\($symcode$symcode*\)[	 ][	 ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
++    lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[	 ]\($symcode$symcode*\)[	 ][	 ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'"
+   fi
+-  lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | $SED '/ __gnu_lto/d'"
+ 
+   # Check to see that the pipe works correctly.
+   pipe_works=no
+@@ -9201,14 +7846,14 @@ _LT_EOF
+   if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+   (eval $ac_compile) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }; then
+     # Now try to grab the symbols.
+     nlist=conftest.nm
+     if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5
+   (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; } && test -s "$nlist"; then
+       # Try sorting and uniquifying the output.
+       if sort "$nlist" | uniq > "$nlist"T; then
+@@ -9221,18 +7866,6 @@ _LT_EOF
+       if $GREP ' nm_test_var$' "$nlist" >/dev/null; then
+ 	if $GREP ' nm_test_func$' "$nlist" >/dev/null; then
+ 	  cat <<_LT_EOF > conftest.$ac_ext
+-/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests.  */
+-#if defined _WIN32 || defined __CYGWIN__ || defined _WIN32_WCE
+-/* DATA imports from DLLs on WIN32 can't be const, because runtime
+-   relocations are performed -- see ld's documentation on pseudo-relocs.  */
+-# define LT_DLSYM_CONST
+-#elif defined __osf__
+-/* This system does not cope well with relocations in const data.  */
+-# define LT_DLSYM_CONST
+-#else
+-# define LT_DLSYM_CONST const
+-#endif
+-
+ #ifdef __cplusplus
+ extern "C" {
+ #endif
+@@ -9244,7 +7877,7 @@ _LT_EOF
+ 	  cat <<_LT_EOF >> conftest.$ac_ext
+ 
+ /* The mapping between symbol names and symbols.  */
+-LT_DLSYM_CONST struct {
++const struct {
+   const char *name;
+   void       *address;
+ }
+@@ -9252,7 +7885,7 @@ lt__PROGRAM__LTX_preloaded_symbols[] =
+ {
+   { "@PROGRAM@", (void *) 0 },
+ _LT_EOF
+-	  $SED "s/^$symcode$symcode* .* \(.*\)$/  {\"\1\", (void *) \&\1},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
++	  $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/  {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext
+ 	  cat <<\_LT_EOF >> conftest.$ac_ext
+   {0, (void *) 0}
+ };
+@@ -9270,19 +7903,19 @@ static const void *lt_preloaded_setup() {
+ _LT_EOF
+ 	  # Now try linking the two files.
+ 	  mv conftest.$ac_objext conftstm.$ac_objext
+-	  lt_globsym_save_LIBS=$LIBS
+-	  lt_globsym_save_CFLAGS=$CFLAGS
+-	  LIBS=conftstm.$ac_objext
++	  lt_save_LIBS="$LIBS"
++	  lt_save_CFLAGS="$CFLAGS"
++	  LIBS="conftstm.$ac_objext"
+ 	  CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag"
+ 	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+   (eval $ac_link) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; } && test -s conftest$ac_exeext; then
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && test -s conftest${ac_exeext}; then
+ 	    pipe_works=yes
+ 	  fi
+-	  LIBS=$lt_globsym_save_LIBS
+-	  CFLAGS=$lt_globsym_save_CFLAGS
++	  LIBS="$lt_save_LIBS"
++	  CFLAGS="$lt_save_CFLAGS"
+ 	else
+ 	  echo "cannot find nm_test_func in $nlist" >&5
+ 	fi
+@@ -9299,7 +7932,7 @@ _LT_EOF
+   rm -rf conftest* conftst*
+ 
+   # Do not use the global_symbol_pipe unless it works.
+-  if test yes = "$pipe_works"; then
++  if test "$pipe_works" = yes; then
+     break
+   else
+     lt_cv_sys_global_symbol_pipe=
+@@ -9312,18 +7945,11 @@ if test -z "$lt_cv_sys_global_symbol_pipe"; then
+   lt_cv_sys_global_symbol_to_cdecl=
+ fi
+ if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: failed" >&5
+-printf "%s\n" "failed" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5
++$as_echo "failed" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ok" >&5
+-printf "%s\n" "ok" >&6; }
+-fi
+-
+-# Response file support.
+-if test "$lt_cv_nm_interface" = "MS dumpbin"; then
+-  nm_file_list_spec='@'
+-elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then
+-  nm_file_list_spec='@'
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5
++$as_echo "ok" >&6; }
+ fi
+ 
+ 
+@@ -9346,189 +7972,46 @@ fi
+ 
+ 
+ 
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5
+-printf %s "checking for sysroot... " >&6; }
+-
+-# Check whether --with-sysroot was given.
+-if test ${with_sysroot+y}
+-then :
+-  withval=$with_sysroot;
+-else $as_nop
+-  with_sysroot=no
+-fi
+-
+-
+-lt_sysroot=
+-case $with_sysroot in #(
+- yes)
+-   if test yes = "$GCC"; then
+-     lt_sysroot=`$CC --print-sysroot 2>/dev/null`
+-   fi
+-   ;; #(
+- /*)
+-   lt_sysroot=`echo "$with_sysroot" | $SED -e "$sed_quote_subst"`
+-   ;; #(
+- no|'')
+-   ;; #(
+- *)
+-   { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_sysroot" >&5
+-printf "%s\n" "$with_sysroot" >&6; }
+-   as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5
+-   ;;
+-esac
+-
+- { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5
+-printf "%s\n" "${lt_sysroot:-no}" >&6; }
+-
+-
+-
+-
+-
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for a working dd" >&5
+-printf %s "checking for a working dd... " >&6; }
+-if test ${ac_cv_path_lt_DD+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  printf 0123456789abcdef0123456789abcdef >conftest.i
+-cat conftest.i conftest.i >conftest2.i
+-: ${lt_DD:=$DD}
+-if test -z "$lt_DD"; then
+-  ac_path_lt_DD_found=false
+-  # Loop through the user's path and test for each of PROGNAME-LIST
+-  as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_prog in dd
+-   do
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-      ac_path_lt_DD="$as_dir$ac_prog$ac_exec_ext"
+-      as_fn_executable_p "$ac_path_lt_DD" || continue
+-if "$ac_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then
+-  cmp -s conftest.i conftest.out \
+-  && ac_cv_path_lt_DD="$ac_path_lt_DD" ac_path_lt_DD_found=:
+-fi
+-      $ac_path_lt_DD_found && break 3
+-    done
+-  done
+-  done
+-IFS=$as_save_IFS
+-  if test -z "$ac_cv_path_lt_DD"; then
+-    :
+-  fi
+-else
+-  ac_cv_path_lt_DD=$lt_DD
+-fi
+-
+-rm -f conftest.i conftest2.i conftest.out
+-fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_lt_DD" >&5
+-printf "%s\n" "$ac_cv_path_lt_DD" >&6; }
+-
+-
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to truncate binary pipes" >&5
+-printf %s "checking how to truncate binary pipes... " >&6; }
+-if test ${lt_cv_truncate_bin+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  printf 0123456789abcdef0123456789abcdef >conftest.i
+-cat conftest.i conftest.i >conftest2.i
+-lt_cv_truncate_bin=
+-if "$ac_cv_path_lt_DD" bs=32 count=1 conftest.out 2>/dev/null; then
+-  cmp -s conftest.i conftest.out \
+-  && lt_cv_truncate_bin="$ac_cv_path_lt_DD bs=4096 count=1"
+-fi
+-rm -f conftest.i conftest2.i conftest.out
+-test -z "$lt_cv_truncate_bin" && lt_cv_truncate_bin="$SED -e 4q"
+-fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_truncate_bin" >&5
+-printf "%s\n" "$lt_cv_truncate_bin" >&6; }
+-
+-
+-
+-
+-
+-
+-
+-# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
+-func_cc_basename ()
+-{
+-    for cc_temp in $*""; do
+-      case $cc_temp in
+-        compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+-        distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+-        \-*) ;;
+-        *) break;;
+-      esac
+-    done
+-    func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+-}
+ 
+ # Check whether --enable-libtool-lock was given.
+-if test ${enable_libtool_lock+y}
+-then :
++if test "${enable_libtool_lock+set}" = set; then :
+   enableval=$enable_libtool_lock;
+ fi
+ 
+-test no = "$enable_libtool_lock" || enable_libtool_lock=yes
++test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes
+ 
+ # Some flags need to be propagated to the compiler or linker for good
+ # libtool support.
+ case $host in
+ ia64-*-hpux*)
+-  # Find out what ABI is being produced by ac_compile, and set mode
+-  # options accordingly.
++  # Find out which ABI we are using.
+   echo 'int i;' > conftest.$ac_ext
+   if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+   (eval $ac_compile) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }; then
+-    case `$FILECMD conftest.$ac_objext` in
++    case `/usr/bin/file conftest.$ac_objext` in
+       *ELF-32*)
+-	HPUX_IA64_MODE=32
++	HPUX_IA64_MODE="32"
+ 	;;
+       *ELF-64*)
+-	HPUX_IA64_MODE=64
++	HPUX_IA64_MODE="64"
+ 	;;
+     esac
+   fi
+   rm -rf conftest*
+   ;;
+ *-*-irix6*)
+-  # Find out what ABI is being produced by ac_compile, and set linker
+-  # options accordingly.
++  # Find out which ABI we are using.
+   echo '#line '$LINENO' "configure"' > conftest.$ac_ext
+   if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+   (eval $ac_compile) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }; then
+-    if test yes = "$lt_cv_prog_gnu_ld"; then
+-      case `$FILECMD conftest.$ac_objext` in
++    if test "$lt_cv_prog_gnu_ld" = yes; then
++      case `/usr/bin/file conftest.$ac_objext` in
+ 	*32-bit*)
+ 	  LD="${LD-ld} -melf32bsmip"
+ 	  ;;
+@@ -9540,7 +8023,7 @@ ia64-*-hpux*)
+ 	;;
+       esac
+     else
+-      case `$FILECMD conftest.$ac_objext` in
++      case `/usr/bin/file conftest.$ac_objext` in
+ 	*32-bit*)
+ 	  LD="${LD-ld} -32"
+ 	  ;;
+@@ -9556,64 +8039,23 @@ ia64-*-hpux*)
+   rm -rf conftest*
+   ;;
+ 
+-mips64*-*linux*)
+-  # Find out what ABI is being produced by ac_compile, and set linker
+-  # options accordingly.
+-  echo '#line '$LINENO' "configure"' > conftest.$ac_ext
+-  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+-  (eval $ac_compile) 2>&5
+-  ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; }; then
+-    emul=elf
+-    case `$FILECMD conftest.$ac_objext` in
+-      *32-bit*)
+-	emul="${emul}32"
+-	;;
+-      *64-bit*)
+-	emul="${emul}64"
+-	;;
+-    esac
+-    case `$FILECMD conftest.$ac_objext` in
+-      *MSB*)
+-	emul="${emul}btsmip"
+-	;;
+-      *LSB*)
+-	emul="${emul}ltsmip"
+-	;;
+-    esac
+-    case `$FILECMD conftest.$ac_objext` in
+-      *N32*)
+-	emul="${emul}n32"
+-	;;
+-    esac
+-    LD="${LD-ld} -m $emul"
+-  fi
+-  rm -rf conftest*
+-  ;;
+-
+ x86_64-*kfreebsd*-gnu|x86_64-*linux*|powerpc*-*linux*| \
+ s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
+-  # Find out what ABI is being produced by ac_compile, and set linker
+-  # options accordingly.  Note that the listed cases only cover the
+-  # situations where additional linker options are needed (such as when
+-  # doing 32-bit compilation for a host where ld defaults to 64-bit, or
+-  # vice versa); the common cases where no linker options are needed do
+-  # not appear in the list.
++  # Find out which ABI we are using.
+   echo 'int i;' > conftest.$ac_ext
+   if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+   (eval $ac_compile) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }; then
+-    case `$FILECMD conftest.o` in
++    case `/usr/bin/file conftest.o` in
+       *32-bit*)
+ 	case $host in
+ 	  x86_64-*kfreebsd*-gnu)
+ 	    LD="${LD-ld} -m elf_i386_fbsd"
+ 	    ;;
+ 	  x86_64-*linux*)
+-	    case `$FILECMD conftest.o` in
++	    case `/usr/bin/file conftest.o` in
+ 	      *x86-64*)
+ 		LD="${LD-ld} -m elf32_x86_64"
+ 		;;
+@@ -9665,14 +8107,13 @@ s390*-*linux*|s390*-*tpf*|sparc*-*linux*)
+ 
+ *-*-sco3.2v5*)
+   # On SCO OpenServer 5, we need -belf to get full-featured binaries.
+-  SAVE_CFLAGS=$CFLAGS
++  SAVE_CFLAGS="$CFLAGS"
+   CFLAGS="$CFLAGS -belf"
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5
+-printf %s "checking whether the C compiler needs -belf... " >&6; }
+-if test ${lt_cv_cc_needs_belf+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5
++$as_echo_n "checking whether the C compiler needs -belf... " >&6; }
++if ${lt_cv_cc_needs_belf+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_ext=c
+ ac_cpp='$CPP $CPPFLAGS'
+ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+@@ -9683,20 +8124,19 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
++if ac_fn_c_try_link "$LINENO"; then :
+   lt_cv_cc_needs_belf=yes
+-else $as_nop
++else
+   lt_cv_cc_needs_belf=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+      ac_ext=c
+ ac_cpp='$CPP $CPPFLAGS'
+@@ -9705,39 +8145,25 @@ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $
+ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5
+-printf "%s\n" "$lt_cv_cc_needs_belf" >&6; }
+-  if test yes != "$lt_cv_cc_needs_belf"; then
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5
++$as_echo "$lt_cv_cc_needs_belf" >&6; }
++  if test x"$lt_cv_cc_needs_belf" != x"yes"; then
+     # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf
+-    CFLAGS=$SAVE_CFLAGS
++    CFLAGS="$SAVE_CFLAGS"
+   fi
+   ;;
+-*-*solaris*)
+-  # Find out what ABI is being produced by ac_compile, and set linker
+-  # options accordingly.
++sparc*-*solaris*)
++  # Find out which ABI we are using.
+   echo 'int i;' > conftest.$ac_ext
+   if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+   (eval $ac_compile) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }; then
+-    case `$FILECMD conftest.o` in
++    case `/usr/bin/file conftest.o` in
+     *64-bit*)
+       case $lt_cv_prog_gnu_ld in
+-      yes*)
+-        case $host in
+-        i?86-*-solaris*|x86_64-*-solaris*)
+-          LD="${LD-ld} -m elf_x86_64"
+-          ;;
+-        sparc*-*-solaris*)
+-          LD="${LD-ld} -m elf64_sparc"
+-          ;;
+-        esac
+-        # GNU ld 2.21 introduced _sol2 emulations.  Use them if available.
+-        if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then
+-          LD=${LD-ld}_sol2
+-        fi
+-        ;;
++      yes*) LD="${LD-ld} -m elf64_sparc" ;;
+       *)
+ 	if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then
+ 	  LD="${LD-ld} -64"
+@@ -9751,135 +8177,7 @@ printf "%s\n" "$lt_cv_cc_needs_belf" >&6; }
+   ;;
+ esac
+ 
+-need_locks=$enable_libtool_lock
+-
+-if test -n "$ac_tool_prefix"; then
+-  # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args.
+-set dummy ${ac_tool_prefix}mt; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_MANIFEST_TOOL+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if test -n "$MANIFEST_TOOL"; then
+-  ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test.
+-else
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+-    break 2
+-  fi
+-done
+-  done
+-IFS=$as_save_IFS
+-
+-fi
+-fi
+-MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL
+-if test -n "$MANIFEST_TOOL"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5
+-printf "%s\n" "$MANIFEST_TOOL" >&6; }
+-else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-fi
+-
+-
+-fi
+-if test -z "$ac_cv_prog_MANIFEST_TOOL"; then
+-  ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL
+-  # Extract the first word of "mt", so it can be a program name with args.
+-set dummy mt; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_MANIFEST_TOOL+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  if test -n "$ac_ct_MANIFEST_TOOL"; then
+-  ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test.
+-else
+-as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+-for as_dir in $PATH
+-do
+-  IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
+-    ac_cv_prog_ac_ct_MANIFEST_TOOL="mt"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
+-    break 2
+-  fi
+-done
+-  done
+-IFS=$as_save_IFS
+-
+-fi
+-fi
+-ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL
+-if test -n "$ac_ct_MANIFEST_TOOL"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5
+-printf "%s\n" "$ac_ct_MANIFEST_TOOL" >&6; }
+-else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-fi
+-
+-  if test "x$ac_ct_MANIFEST_TOOL" = x; then
+-    MANIFEST_TOOL=":"
+-  else
+-    case $cross_compiling:$ac_tool_warned in
+-yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+-ac_tool_warned=yes ;;
+-esac
+-    MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL
+-  fi
+-else
+-  MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL"
+-fi
+-
+-test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5
+-printf %s "checking if $MANIFEST_TOOL is a manifest tool... " >&6; }
+-if test ${lt_cv_path_mainfest_tool+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  lt_cv_path_mainfest_tool=no
+-  echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5
+-  $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out
+-  cat conftest.err >&5
+-  if $GREP 'Manifest Tool' conftest.out > /dev/null; then
+-    lt_cv_path_mainfest_tool=yes
+-  fi
+-  rm -f conftest*
+-fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5
+-printf "%s\n" "$lt_cv_path_mainfest_tool" >&6; }
+-if test yes != "$lt_cv_path_mainfest_tool"; then
+-  MANIFEST_TOOL=:
+-fi
+-
+-
+-
+-
++need_locks="$enable_libtool_lock"
+ 
+ 
+   case $host_os in
+@@ -9887,12 +8185,11 @@ fi
+     if test -n "$ac_tool_prefix"; then
+   # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}dsymutil; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_DSYMUTIL+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_DSYMUTIL+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$DSYMUTIL"; then
+   ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test.
+ else
+@@ -9900,15 +8197,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -9919,11 +8212,11 @@ fi
+ fi
+ DSYMUTIL=$ac_cv_prog_DSYMUTIL
+ if test -n "$DSYMUTIL"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5
+-printf "%s\n" "$DSYMUTIL" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5
++$as_echo "$DSYMUTIL" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -9932,12 +8225,11 @@ if test -z "$ac_cv_prog_DSYMUTIL"; then
+   ac_ct_DSYMUTIL=$DSYMUTIL
+   # Extract the first word of "dsymutil", so it can be a program name with args.
+ set dummy dsymutil; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_DSYMUTIL+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_DSYMUTIL"; then
+   ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test.
+ else
+@@ -9945,15 +8237,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_DSYMUTIL="dsymutil"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -9964,11 +8252,11 @@ fi
+ fi
+ ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL
+ if test -n "$ac_ct_DSYMUTIL"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5
+-printf "%s\n" "$ac_ct_DSYMUTIL" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5
++$as_echo "$ac_ct_DSYMUTIL" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+   if test "x$ac_ct_DSYMUTIL" = x; then
+@@ -9976,8 +8264,8 @@ fi
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     DSYMUTIL=$ac_ct_DSYMUTIL
+@@ -9989,12 +8277,11 @@ fi
+     if test -n "$ac_tool_prefix"; then
+   # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}nmedit; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_NMEDIT+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_NMEDIT+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$NMEDIT"; then
+   ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test.
+ else
+@@ -10002,15 +8289,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -10021,11 +8304,11 @@ fi
+ fi
+ NMEDIT=$ac_cv_prog_NMEDIT
+ if test -n "$NMEDIT"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5
+-printf "%s\n" "$NMEDIT" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5
++$as_echo "$NMEDIT" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -10034,12 +8317,11 @@ if test -z "$ac_cv_prog_NMEDIT"; then
+   ac_ct_NMEDIT=$NMEDIT
+   # Extract the first word of "nmedit", so it can be a program name with args.
+ set dummy nmedit; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_NMEDIT+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_NMEDIT"; then
+   ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test.
+ else
+@@ -10047,15 +8329,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_NMEDIT="nmedit"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -10066,11 +8344,11 @@ fi
+ fi
+ ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT
+ if test -n "$ac_ct_NMEDIT"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5
+-printf "%s\n" "$ac_ct_NMEDIT" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5
++$as_echo "$ac_ct_NMEDIT" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+   if test "x$ac_ct_NMEDIT" = x; then
+@@ -10078,8 +8356,8 @@ fi
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     NMEDIT=$ac_ct_NMEDIT
+@@ -10091,12 +8369,11 @@ fi
+     if test -n "$ac_tool_prefix"; then
+   # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}lipo; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_LIPO+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_LIPO+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$LIPO"; then
+   ac_cv_prog_LIPO="$LIPO" # Let the user override the test.
+ else
+@@ -10104,15 +8381,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_LIPO="${ac_tool_prefix}lipo"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -10123,11 +8396,11 @@ fi
+ fi
+ LIPO=$ac_cv_prog_LIPO
+ if test -n "$LIPO"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5
+-printf "%s\n" "$LIPO" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5
++$as_echo "$LIPO" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -10136,12 +8409,11 @@ if test -z "$ac_cv_prog_LIPO"; then
+   ac_ct_LIPO=$LIPO
+   # Extract the first word of "lipo", so it can be a program name with args.
+ set dummy lipo; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_LIPO+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_LIPO+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_LIPO"; then
+   ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test.
+ else
+@@ -10149,15 +8421,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_LIPO="lipo"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -10168,11 +8436,11 @@ fi
+ fi
+ ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO
+ if test -n "$ac_ct_LIPO"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5
+-printf "%s\n" "$ac_ct_LIPO" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5
++$as_echo "$ac_ct_LIPO" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+   if test "x$ac_ct_LIPO" = x; then
+@@ -10180,8 +8448,8 @@ fi
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     LIPO=$ac_ct_LIPO
+@@ -10193,12 +8461,11 @@ fi
+     if test -n "$ac_tool_prefix"; then
+   # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}otool; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_OTOOL+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_OTOOL+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$OTOOL"; then
+   ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test.
+ else
+@@ -10206,15 +8473,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_OTOOL="${ac_tool_prefix}otool"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -10225,11 +8488,11 @@ fi
+ fi
+ OTOOL=$ac_cv_prog_OTOOL
+ if test -n "$OTOOL"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5
+-printf "%s\n" "$OTOOL" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5
++$as_echo "$OTOOL" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -10238,12 +8501,11 @@ if test -z "$ac_cv_prog_OTOOL"; then
+   ac_ct_OTOOL=$OTOOL
+   # Extract the first word of "otool", so it can be a program name with args.
+ set dummy otool; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_OTOOL+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_OTOOL+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_OTOOL"; then
+   ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test.
+ else
+@@ -10251,15 +8513,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_OTOOL="otool"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -10270,11 +8528,11 @@ fi
+ fi
+ ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL
+ if test -n "$ac_ct_OTOOL"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5
+-printf "%s\n" "$ac_ct_OTOOL" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5
++$as_echo "$ac_ct_OTOOL" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+   if test "x$ac_ct_OTOOL" = x; then
+@@ -10282,8 +8540,8 @@ fi
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     OTOOL=$ac_ct_OTOOL
+@@ -10295,12 +8553,11 @@ fi
+     if test -n "$ac_tool_prefix"; then
+   # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args.
+ set dummy ${ac_tool_prefix}otool64; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_OTOOL64+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_OTOOL64+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$OTOOL64"; then
+   ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test.
+ else
+@@ -10308,15 +8565,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -10327,11 +8580,11 @@ fi
+ fi
+ OTOOL64=$ac_cv_prog_OTOOL64
+ if test -n "$OTOOL64"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5
+-printf "%s\n" "$OTOOL64" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5
++$as_echo "$OTOOL64" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -10340,12 +8593,11 @@ if test -z "$ac_cv_prog_OTOOL64"; then
+   ac_ct_OTOOL64=$OTOOL64
+   # Extract the first word of "otool64", so it can be a program name with args.
+ set dummy otool64; ac_word=$2
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
+-printf %s "checking for $ac_word... " >&6; }
+-if test ${ac_cv_prog_ac_ct_OTOOL64+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5
++$as_echo_n "checking for $ac_word... " >&6; }
++if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -n "$ac_ct_OTOOL64"; then
+   ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test.
+ else
+@@ -10353,15 +8605,11 @@ as_save_IFS=$IFS; IFS=$PATH_SEPARATOR
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
++  test -z "$as_dir" && as_dir=.
+     for ac_exec_ext in '' $ac_executable_extensions; do
+-  if as_fn_executable_p "$as_dir$ac_word$ac_exec_ext"; then
++  if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then
+     ac_cv_prog_ac_ct_OTOOL64="otool64"
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: found $as_dir$ac_word$ac_exec_ext" >&5
++    $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5
+     break 2
+   fi
+ done
+@@ -10372,11 +8620,11 @@ fi
+ fi
+ ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64
+ if test -n "$ac_ct_OTOOL64"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5
+-printf "%s\n" "$ac_ct_OTOOL64" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5
++$as_echo "$ac_ct_OTOOL64" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+   if test "x$ac_ct_OTOOL64" = x; then
+@@ -10384,8 +8632,8 @@ fi
+   else
+     case $cross_compiling:$ac_tool_warned in
+ yes:)
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
+-printf "%s\n" "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5
++$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;}
+ ac_tool_warned=yes ;;
+ esac
+     OTOOL64=$ac_ct_OTOOL64
+@@ -10420,14 +8668,13 @@ fi
+ 
+ 
+ 
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5
+-printf %s "checking for -single_module linker flag... " >&6; }
+-if test ${lt_cv_apple_cc_single_mod+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5
++$as_echo_n "checking for -single_module linker flag... " >&6; }
++if ${lt_cv_apple_cc_single_mod+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_apple_cc_single_mod=no
+-      if test -z "$LT_MULTI_MODULE"; then
++      if test -z "${LT_MULTI_MODULE}"; then
+ 	# By default we will add the -single_module flag. You can override
+ 	# by either setting the environment variable LT_MULTI_MODULE
+ 	# non-empty at configure time, or by adding -multi_module to the
+@@ -10439,13 +8686,7 @@ else $as_nop
+ 	$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \
+ 	  -dynamiclib -Wl,-single_module conftest.c 2>conftest.err
+         _lt_result=$?
+-	# If there is a non-empty error log, and "single_module"
+-	# appears in it, assume the flag caused a linker warning
+-        if test -s conftest.err && $GREP single_module conftest.err; then
+-	  cat conftest.err >&5
+-	# Otherwise, if the output was created with a 0 exit code from
+-	# the compiler, it worked.
+-	elif test -f libconftest.dylib && test 0 = "$_lt_result"; then
++	if test -f libconftest.dylib && test ! -s conftest.err && test $_lt_result = 0; then
+ 	  lt_cv_apple_cc_single_mod=yes
+ 	else
+ 	  cat conftest.err >&5
+@@ -10454,15 +8695,13 @@ else $as_nop
+ 	rm -f conftest.*
+       fi
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5
+-printf "%s\n" "$lt_cv_apple_cc_single_mod" >&6; }
+-
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5
+-printf %s "checking for -exported_symbols_list linker flag... " >&6; }
+-if test ${lt_cv_ld_exported_symbols_list+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5
++$as_echo "$lt_cv_apple_cc_single_mod" >&6; }
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5
++$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; }
++if ${lt_cv_ld_exported_symbols_list+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_ld_exported_symbols_list=no
+       save_LDFLAGS=$LDFLAGS
+       echo "_main" > conftest.sym
+@@ -10471,52 +8710,45 @@ else $as_nop
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
++if ac_fn_c_try_link "$LINENO"; then :
+   lt_cv_ld_exported_symbols_list=yes
+-else $as_nop
++else
+   lt_cv_ld_exported_symbols_list=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+-	LDFLAGS=$save_LDFLAGS
++	LDFLAGS="$save_LDFLAGS"
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5
+-printf "%s\n" "$lt_cv_ld_exported_symbols_list" >&6; }
+-
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5
+-printf %s "checking for -force_load linker flag... " >&6; }
+-if test ${lt_cv_ld_force_load+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5
++$as_echo "$lt_cv_ld_exported_symbols_list" >&6; }
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5
++$as_echo_n "checking for -force_load linker flag... " >&6; }
++if ${lt_cv_ld_force_load+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_ld_force_load=no
+       cat > conftest.c << _LT_EOF
+ int forced_loaded() { return 2;}
+ _LT_EOF
+       echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5
+       $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5
+-      echo "$AR $AR_FLAGS libconftest.a conftest.o" >&5
+-      $AR $AR_FLAGS libconftest.a conftest.o 2>&5
+-      echo "$RANLIB libconftest.a" >&5
+-      $RANLIB libconftest.a 2>&5
++      echo "$AR cru libconftest.a conftest.o" >&5
++      $AR cru libconftest.a conftest.o 2>&5
+       cat > conftest.c << _LT_EOF
+ int main() { return 0;}
+ _LT_EOF
+       echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5
+       $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err
+       _lt_result=$?
+-      if test -s conftest.err && $GREP force_load conftest.err; then
+-	cat conftest.err >&5
+-      elif test -f conftest && test 0 = "$_lt_result" && $GREP forced_load conftest >/dev/null 2>&1; then
++      if test -f conftest && test ! -s conftest.err && test $_lt_result = 0 && $GREP forced_load conftest 2>&1 >/dev/null; then
+ 	lt_cv_ld_force_load=yes
+       else
+ 	cat conftest.err >&5
+@@ -10525,31 +8757,38 @@ _LT_EOF
+         rm -rf conftest.dSYM
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5
+-printf "%s\n" "$lt_cv_ld_force_load" >&6; }
+-    case $host_os in
+-    rhapsody* | darwin1.[012])
+-      _lt_dar_allow_undefined='$wl-undefined ${wl}suppress' ;;
+-    darwin1.*)
+-      _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
+-    darwin*)
+-      case $MACOSX_DEPLOYMENT_TARGET,$host in
+-        10.[012],*|,*powerpc*-darwin[5-8]*)
+-          _lt_dar_allow_undefined='$wl-flat_namespace $wl-undefined ${wl}suppress' ;;
+-        *)
+-          _lt_dar_allow_undefined='$wl-undefined ${wl}dynamic_lookup' ;;
+-      esac
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5
++$as_echo "$lt_cv_ld_force_load" >&6; }
++    # Allow for Darwin 4-7 (macOS 10.0-10.3) although these are not expect to
++    # build without first building modern cctools / linker.
++    case $host_cpu-$host_os in
++    *-rhapsody* | *-darwin1.[012])
++      _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;;
++    *-darwin1.*)
++      _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;;
++    *-darwin*)
++      # darwin 5.x (macOS 10.1) onwards we only need to adjust when the
++      # deployment target is forced to an earlier version.
++      case ${MACOSX_DEPLOYMENT_TARGET-UNSET},$host in
++	UNSET,*-darwin[89]*|UNSET,*-darwin[12][0123456789]*)
++	  ;;
++	10.[012][,.]*)
++	  _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress'
++	  ;;
++	*)
++	  ;;
++     esac
+     ;;
+   esac
+-    if test yes = "$lt_cv_apple_cc_single_mod"; then
++    if test "$lt_cv_apple_cc_single_mod" = "yes"; then
+       _lt_dar_single_mod='$single_module'
+     fi
+-    if test yes = "$lt_cv_ld_exported_symbols_list"; then
+-      _lt_dar_export_syms=' $wl-exported_symbols_list,$output_objdir/$libname-symbols.expsym'
++    if test "$lt_cv_ld_exported_symbols_list" = "yes"; then
++      _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym'
+     else
+-      _lt_dar_export_syms='~$NMEDIT -s $output_objdir/$libname-symbols.expsym $lib'
++      _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}'
+     fi
+-    if test : != "$DSYMUTIL" && test no = "$lt_cv_ld_force_load"; then
++    if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then
+       _lt_dsymutil='~$DSYMUTIL $lib || :'
+     else
+       _lt_dsymutil=
+@@ -10557,61 +8796,21 @@ printf "%s\n" "$lt_cv_ld_force_load" >&6; }
+     ;;
+   esac
+ 
+-# func_munge_path_list VARIABLE PATH
+-# -----------------------------------
+-# VARIABLE is name of variable containing _space_ separated list of
+-# directories to be munged by the contents of PATH, which is string
+-# having a format:
+-# "DIR[:DIR]:"
+-#       string "DIR[ DIR]" will be prepended to VARIABLE
+-# ":DIR[:DIR]"
+-#       string "DIR[ DIR]" will be appended to VARIABLE
+-# "DIRP[:DIRP]::[DIRA:]DIRA"
+-#       string "DIRP[ DIRP]" will be prepended to VARIABLE and string
+-#       "DIRA[ DIRA]" will be appended to VARIABLE
+-# "DIR[:DIR]"
+-#       VARIABLE will be replaced by "DIR[ DIR]"
+-func_munge_path_list ()
+-{
+-    case x$2 in
+-    x)
+-        ;;
+-    *:)
+-        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\"
+-        ;;
+-    x:*)
+-        eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\"
+-        ;;
+-    *::*)
+-        eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\"
+-        eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\"
+-        ;;
+-    *)
+-        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\"
+-        ;;
+-    esac
+-}
+-
+-ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default
++for ac_header in dlfcn.h
++do :
++  ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default
+ "
+-if test "x$ac_cv_header_dlfcn_h" = xyes
+-then :
+-  printf "%s\n" "#define HAVE_DLFCN_H 1" >>confdefs.h
++if test "x$ac_cv_header_dlfcn_h" = xyes; then :
++  cat >>confdefs.h <<_ACEOF
++#define HAVE_DLFCN_H 1
++_ACEOF
+ 
+ fi
+ 
++done
+ 
+ 
+ 
+-func_stripname_cnf ()
+-{
+-  case $2 in
+-  .*) func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%\\\\$2\$%%"`;;
+-  *)  func_stripname_result=`$ECHO "$3" | $SED "s%^$1%%; s%$2\$%%"`;;
+-  esac
+-} # func_stripname_cnf
+-
+-
+ 
+ 
+ 
+@@ -10626,8 +8825,7 @@ func_stripname_cnf ()
+ 
+ 
+             # Check whether --enable-shared was given.
+-if test ${enable_shared+y}
+-then :
++if test "${enable_shared+set}" = set; then :
+   enableval=$enable_shared; p=${PACKAGE-default}
+     case $enableval in
+     yes) enable_shared=yes ;;
+@@ -10635,17 +8833,17 @@ then :
+     *)
+       enable_shared=no
+       # Look at the argument we got.  We use all the common list separators.
+-      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
++      lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+       for pkg in $enableval; do
+-	IFS=$lt_save_ifs
++	IFS="$lt_save_ifs"
+ 	if test "X$pkg" = "X$p"; then
+ 	  enable_shared=yes
+ 	fi
+       done
+-      IFS=$lt_save_ifs
++      IFS="$lt_save_ifs"
+       ;;
+     esac
+-else $as_nop
++else
+   enable_shared=yes
+ fi
+ 
+@@ -10658,8 +8856,7 @@ fi
+ 
+ 
+   # Check whether --enable-static was given.
+-if test ${enable_static+y}
+-then :
++if test "${enable_static+set}" = set; then :
+   enableval=$enable_static; p=${PACKAGE-default}
+     case $enableval in
+     yes) enable_static=yes ;;
+@@ -10667,17 +8864,17 @@ then :
+     *)
+      enable_static=no
+       # Look at the argument we got.  We use all the common list separators.
+-      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
++      lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+       for pkg in $enableval; do
+-	IFS=$lt_save_ifs
++	IFS="$lt_save_ifs"
+ 	if test "X$pkg" = "X$p"; then
+ 	  enable_static=yes
+ 	fi
+       done
+-      IFS=$lt_save_ifs
++      IFS="$lt_save_ifs"
+       ;;
+     esac
+-else $as_nop
++else
+   enable_static=yes
+ fi
+ 
+@@ -10691,29 +8888,15 @@ fi
+ 
+ 
+ # Check whether --with-pic was given.
+-if test ${with_pic+y}
+-then :
+-  withval=$with_pic; lt_p=${PACKAGE-default}
+-    case $withval in
+-    yes|no) pic_mode=$withval ;;
+-    *)
+-      pic_mode=default
+-      # Look at the argument we got.  We use all the common list separators.
+-      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
+-      for lt_pkg in $withval; do
+-	IFS=$lt_save_ifs
+-	if test "X$lt_pkg" = "X$lt_p"; then
+-	  pic_mode=yes
+-	fi
+-      done
+-      IFS=$lt_save_ifs
+-      ;;
+-    esac
+-else $as_nop
++if test "${with_pic+set}" = set; then :
++  withval=$with_pic; pic_mode="$withval"
++else
+   pic_mode=default
+ fi
+ 
+ 
++test -z "$pic_mode" && pic_mode=default
++
+ 
+ 
+ 
+@@ -10721,8 +8904,7 @@ fi
+ 
+ 
+   # Check whether --enable-fast-install was given.
+-if test ${enable_fast_install+y}
+-then :
++if test "${enable_fast_install+set}" = set; then :
+   enableval=$enable_fast_install; p=${PACKAGE-default}
+     case $enableval in
+     yes) enable_fast_install=yes ;;
+@@ -10730,17 +8912,17 @@ then :
+     *)
+       enable_fast_install=no
+       # Look at the argument we got.  We use all the common list separators.
+-      lt_save_ifs=$IFS; IFS=$IFS$PATH_SEPARATOR,
++      lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR,"
+       for pkg in $enableval; do
+-	IFS=$lt_save_ifs
++	IFS="$lt_save_ifs"
+ 	if test "X$pkg" = "X$p"; then
+ 	  enable_fast_install=yes
+ 	fi
+       done
+-      IFS=$lt_save_ifs
++      IFS="$lt_save_ifs"
+       ;;
+     esac
+-else $as_nop
++else
+   enable_fast_install=yes
+ fi
+ 
+@@ -10751,65 +8933,11 @@ fi
+ 
+ 
+ 
+-  shared_archive_member_spec=
+-case $host,$enable_shared in
+-power*-*-aix[5-9]*,yes)
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking which variant of shared library versioning to provide" >&5
+-printf %s "checking which variant of shared library versioning to provide... " >&6; }
+-
+-# Check whether --with-aix-soname was given.
+-if test ${with_aix_soname+y}
+-then :
+-  withval=$with_aix_soname; case $withval in
+-    aix|svr4|both)
+-      ;;
+-    *)
+-      as_fn_error $? "Unknown argument to --with-aix-soname" "$LINENO" 5
+-      ;;
+-    esac
+-    lt_cv_with_aix_soname=$with_aix_soname
+-else $as_nop
+-  if test ${lt_cv_with_aix_soname+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  lt_cv_with_aix_soname=aix
+-fi
+-
+-    with_aix_soname=$lt_cv_with_aix_soname
+-fi
+-
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $with_aix_soname" >&5
+-printf "%s\n" "$with_aix_soname" >&6; }
+-  if test aix != "$with_aix_soname"; then
+-    # For the AIX way of multilib, we name the shared archive member
+-    # based on the bitwidth used, traditionally 'shr.o' or 'shr_64.o',
+-    # and 'shr.imp' or 'shr_64.imp', respectively, for the Import File.
+-    # Even when GNU compilers ignore OBJECT_MODE but need '-maix64' flag,
+-    # the AIX toolchain works better with OBJECT_MODE set (default 32).
+-    if test 64 = "${OBJECT_MODE-32}"; then
+-      shared_archive_member_spec=shr_64
+-    else
+-      shared_archive_member_spec=shr
+-    fi
+-  fi
+-  ;;
+-*)
+-  with_aix_soname=aix
+-  ;;
+-esac
+-
+-
+-
+-
+-
+-
+-
+ 
+ 
+ 
+ # This can be used to rebuild libtool when needed
+-LIBTOOL_DEPS=$ltmain
++LIBTOOL_DEPS="$ltmain"
+ 
+ # Always use our own libtool.
+ LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+@@ -10834,10 +8962,6 @@ LIBTOOL='$(SHELL) $(top_builddir)/libtool'
+ 
+ 
+ 
+-
+-
+-
+-
+ 
+ 
+ 
+@@ -10858,16 +8982,15 @@ test -z "$LN_S" && LN_S="ln -s"
+ 
+ 
+ 
+-if test -n "${ZSH_VERSION+set}"; then
++if test -n "${ZSH_VERSION+set}" ; then
+    setopt NO_GLOB_SUBST
+ fi
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5
+-printf %s "checking for objdir... " >&6; }
+-if test ${lt_cv_objdir+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5
++$as_echo_n "checking for objdir... " >&6; }
++if ${lt_cv_objdir+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   rm -f .libs 2>/dev/null
+ mkdir .libs 2>/dev/null
+ if test -d .libs; then
+@@ -10878,15 +9001,17 @@ else
+ fi
+ rmdir .libs 2>/dev/null
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5
+-printf "%s\n" "$lt_cv_objdir" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5
++$as_echo "$lt_cv_objdir" >&6; }
+ objdir=$lt_cv_objdir
+ 
+ 
+ 
+ 
+ 
+-printf "%s\n" "#define LT_OBJDIR \"$lt_cv_objdir/\"" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define LT_OBJDIR "$lt_cv_objdir/"
++_ACEOF
+ 
+ 
+ 
+@@ -10896,7 +9021,7 @@ aix3*)
+   # AIX sometimes has problems with the GCC collect2 program.  For some
+   # reason, if we set the COLLECT_NAMES environment variable, the problems
+   # vanish in a puff of smoke.
+-  if test set != "${COLLECT_NAMES+set}"; then
++  if test "X${COLLECT_NAMES+set}" != Xset; then
+     COLLECT_NAMES=
+     export COLLECT_NAMES
+   fi
+@@ -10907,14 +9032,14 @@ esac
+ ofile=libtool
+ can_build_shared=yes
+ 
+-# All known linkers require a '.a' archive for static linking (except MSVC and
+-# ICC, which need '.lib').
++# All known linkers require a `.a' archive for static linking (except MSVC,
++# which needs '.lib').
+ libext=a
+ 
+-with_gnu_ld=$lt_cv_prog_gnu_ld
++with_gnu_ld="$lt_cv_prog_gnu_ld"
+ 
+-old_CC=$CC
+-old_CFLAGS=$CFLAGS
++old_CC="$CC"
++old_CFLAGS="$CFLAGS"
+ 
+ # Set sane defaults for various variables
+ test -z "$CC" && CC=cc
+@@ -10923,8 +9048,15 @@ test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS
+ test -z "$LD" && LD=ld
+ test -z "$ac_objext" && ac_objext=o
+ 
+-func_cc_basename $compiler
+-cc_basename=$func_cc_basename_result
++for cc_temp in $compiler""; do
++  case $cc_temp in
++    compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
++    distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
++    \-*) ;;
++    *) break;;
++  esac
++done
++cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+ 
+ 
+ # Only perform the check for file, if the check method requires it
+@@ -10932,30 +9064,29 @@ test -z "$MAGIC_CMD" && MAGIC_CMD=file
+ case $deplibs_check_method in
+ file_magic*)
+   if test "$file_magic_cmd" = '$MAGIC_CMD'; then
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5
+-printf %s "checking for ${ac_tool_prefix}file... " >&6; }
+-if test ${lt_cv_path_MAGIC_CMD+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5
++$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; }
++if ${lt_cv_path_MAGIC_CMD+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   case $MAGIC_CMD in
+ [\\/*] |  ?:[\\/]*)
+-  lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path.
++  lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+   ;;
+ *)
+-  lt_save_MAGIC_CMD=$MAGIC_CMD
+-  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++  lt_save_MAGIC_CMD="$MAGIC_CMD"
++  lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+   ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
+   for ac_dir in $ac_dummy; do
+-    IFS=$lt_save_ifs
++    IFS="$lt_save_ifs"
+     test -z "$ac_dir" && ac_dir=.
+-    if test -f "$ac_dir/${ac_tool_prefix}file"; then
+-      lt_cv_path_MAGIC_CMD=$ac_dir/"${ac_tool_prefix}file"
++    if test -f $ac_dir/${ac_tool_prefix}file; then
++      lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file"
+       if test -n "$file_magic_test_file"; then
+ 	case $deplibs_check_method in
+ 	"file_magic "*)
+ 	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+-	  MAGIC_CMD=$lt_cv_path_MAGIC_CMD
++	  MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ 	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ 	    $EGREP "$file_magic_regex" > /dev/null; then
+ 	    :
+@@ -10978,19 +9109,19 @@ _LT_EOF
+       break
+     fi
+   done
+-  IFS=$lt_save_ifs
+-  MAGIC_CMD=$lt_save_MAGIC_CMD
++  IFS="$lt_save_ifs"
++  MAGIC_CMD="$lt_save_MAGIC_CMD"
+   ;;
+ esac
+ fi
+ 
+-MAGIC_CMD=$lt_cv_path_MAGIC_CMD
++MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if test -n "$MAGIC_CMD"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
+-printf "%s\n" "$MAGIC_CMD" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
++$as_echo "$MAGIC_CMD" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -10999,30 +9130,29 @@ fi
+ 
+ if test -z "$lt_cv_path_MAGIC_CMD"; then
+   if test -n "$ac_tool_prefix"; then
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for file" >&5
+-printf %s "checking for file... " >&6; }
+-if test ${lt_cv_path_MAGIC_CMD+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5
++$as_echo_n "checking for file... " >&6; }
++if ${lt_cv_path_MAGIC_CMD+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   case $MAGIC_CMD in
+ [\\/*] |  ?:[\\/]*)
+-  lt_cv_path_MAGIC_CMD=$MAGIC_CMD # Let the user override the test with a path.
++  lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path.
+   ;;
+ *)
+-  lt_save_MAGIC_CMD=$MAGIC_CMD
+-  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++  lt_save_MAGIC_CMD="$MAGIC_CMD"
++  lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+   ac_dummy="/usr/bin$PATH_SEPARATOR$PATH"
+   for ac_dir in $ac_dummy; do
+-    IFS=$lt_save_ifs
++    IFS="$lt_save_ifs"
+     test -z "$ac_dir" && ac_dir=.
+-    if test -f "$ac_dir/file"; then
+-      lt_cv_path_MAGIC_CMD=$ac_dir/"file"
++    if test -f $ac_dir/file; then
++      lt_cv_path_MAGIC_CMD="$ac_dir/file"
+       if test -n "$file_magic_test_file"; then
+ 	case $deplibs_check_method in
+ 	"file_magic "*)
+ 	  file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"`
+-	  MAGIC_CMD=$lt_cv_path_MAGIC_CMD
++	  MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ 	  if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null |
+ 	    $EGREP "$file_magic_regex" > /dev/null; then
+ 	    :
+@@ -11045,19 +9175,19 @@ _LT_EOF
+       break
+     fi
+   done
+-  IFS=$lt_save_ifs
+-  MAGIC_CMD=$lt_save_MAGIC_CMD
++  IFS="$lt_save_ifs"
++  MAGIC_CMD="$lt_save_MAGIC_CMD"
+   ;;
+ esac
+ fi
+ 
+-MAGIC_CMD=$lt_cv_path_MAGIC_CMD
++MAGIC_CMD="$lt_cv_path_MAGIC_CMD"
+ if test -n "$MAGIC_CMD"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
+-printf "%s\n" "$MAGIC_CMD" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5
++$as_echo "$MAGIC_CMD" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ 
+ 
+@@ -11072,7 +9202,7 @@ esac
+ 
+ # Use C for the default configuration in the libtool script
+ 
+-lt_save_CC=$CC
++lt_save_CC="$CC"
+ ac_ext=c
+ ac_cpp='$CPP $CPPFLAGS'
+ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+@@ -11126,11 +9256,15 @@ _lt_linker_boilerplate=`cat conftest.err`
+ $RM -r conftest*
+ 
+ 
++## CAVEAT EMPTOR:
++## There is no encapsulation within the following macros, do not change
++## the running order or otherwise move them around unless you know exactly
++## what you are doing...
+ if test -n "$compiler"; then
+ 
+ lt_prog_compiler_no_builtin_flag=
+ 
+-if test yes = "$GCC"; then
++if test "$GCC" = yes; then
+   case $cc_basename in
+   nvcc*)
+     lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;;
+@@ -11138,16 +9272,15 @@ if test yes = "$GCC"; then
+     lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;;
+   esac
+ 
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5
+-printf %s "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; }
+-if test ${lt_cv_prog_compiler_rtti_exceptions+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5
++$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; }
++if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_prog_compiler_rtti_exceptions=no
+    ac_outfile=conftest.$ac_objext
+    echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+-   lt_compiler_flag="-fno-rtti -fno-exceptions"  ## exclude from sc_useless_quotes_in_assignment
++   lt_compiler_flag="-fno-rtti -fno-exceptions"
+    # Insert the option either (1) after the last *FLAGS variable, or
+    # (2) before a word containing "conftest.", or (3) at the end.
+    # Note that $ac_compile itself does not contain backslashes and begins
+@@ -11174,10 +9307,10 @@ else $as_nop
+    $RM conftest*
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5
+-printf "%s\n" "$lt_cv_prog_compiler_rtti_exceptions" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5
++$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; }
+ 
+-if test yes = "$lt_cv_prog_compiler_rtti_exceptions"; then
++if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then
+     lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions"
+ else
+     :
+@@ -11194,15 +9327,17 @@ fi
+ lt_prog_compiler_pic=
+ lt_prog_compiler_static=
+ 
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
++$as_echo_n "checking for $compiler option to produce PIC... " >&6; }
+ 
+-  if test yes = "$GCC"; then
++  if test "$GCC" = yes; then
+     lt_prog_compiler_wl='-Wl,'
+     lt_prog_compiler_static='-static'
+ 
+     case $host_os in
+       aix*)
+       # All AIX code is PIC.
+-      if test ia64 = "$host_cpu"; then
++      if test "$host_cpu" = ia64; then
+ 	# AIX 5 now supports IA64 processor
+ 	lt_prog_compiler_static='-Bstatic'
+       fi
+@@ -11217,8 +9352,8 @@ lt_prog_compiler_static=
+         ;;
+       m68k)
+             # FIXME: we need at least 68020 code to build shared libraries, but
+-            # adding the '-m68020' flag to GCC prevents building anything better,
+-            # like '-m68040'.
++            # adding the `-m68020' flag to GCC prevents building anything better,
++            # like `-m68040'.
+             lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4'
+         ;;
+       esac
+@@ -11234,11 +9369,6 @@ lt_prog_compiler_static=
+       # Although the cygwin gcc ignores -fPIC, still need this for old-style
+       # (--disable-auto-import) libraries
+       lt_prog_compiler_pic='-DDLL_EXPORT'
+-      case $host_os in
+-      os2*)
+-	lt_prog_compiler_static='$wl-static'
+-	;;
+-      esac
+       ;;
+ 
+     darwin* | rhapsody*)
+@@ -11299,9 +9429,7 @@ lt_prog_compiler_static=
+     case $cc_basename in
+     nvcc*) # Cuda Compiler Driver 2.2
+       lt_prog_compiler_wl='-Xlinker '
+-      if test -n "$lt_prog_compiler_pic"; then
+-        lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic"
+-      fi
++      lt_prog_compiler_pic='-Xcompiler -fPIC'
+       ;;
+     esac
+   else
+@@ -11309,7 +9437,7 @@ lt_prog_compiler_static=
+     case $host_os in
+     aix*)
+       lt_prog_compiler_wl='-Wl,'
+-      if test ia64 = "$host_cpu"; then
++      if test "$host_cpu" = ia64; then
+ 	# AIX 5 now supports IA64 processor
+ 	lt_prog_compiler_static='-Bstatic'
+       else
+@@ -11317,29 +9445,10 @@ lt_prog_compiler_static=
+       fi
+       ;;
+ 
+-    darwin* | rhapsody*)
+-      # PIC is the default on this platform
+-      # Common symbols not allowed in MH_DYLIB files
+-      lt_prog_compiler_pic='-fno-common'
+-      case $cc_basename in
+-      nagfor*)
+-        # NAG Fortran compiler
+-        lt_prog_compiler_wl='-Wl,-Wl,,'
+-        lt_prog_compiler_pic='-PIC'
+-        lt_prog_compiler_static='-Bstatic'
+-        ;;
+-      esac
+-      ;;
+-
+     mingw* | cygwin* | pw32* | os2* | cegcc*)
+       # This hack is so that the source file can tell whether it is being
+       # built for inclusion in a dll (and should export symbols for example).
+       lt_prog_compiler_pic='-DDLL_EXPORT'
+-      case $host_os in
+-      os2*)
+-	lt_prog_compiler_static='$wl-static'
+-	;;
+-      esac
+       ;;
+ 
+     hpux9* | hpux10* | hpux11*)
+@@ -11355,7 +9464,7 @@ lt_prog_compiler_static=
+ 	;;
+       esac
+       # Is there a better lt_prog_compiler_static that works with the bundled CC?
+-      lt_prog_compiler_static='$wl-a ${wl}archive'
++      lt_prog_compiler_static='${wl}-a ${wl}archive'
+       ;;
+ 
+     irix5* | irix6* | nonstopux*)
+@@ -11364,9 +9473,9 @@ lt_prog_compiler_static=
+       lt_prog_compiler_static='-non_shared'
+       ;;
+ 
+-    linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++    linux* | k*bsd*-gnu | kopensolaris*-gnu)
+       case $cc_basename in
+-      # old Intel for x86_64, which still supported -KPIC.
++      # old Intel for x86_64 which still supported -KPIC.
+       ecc*)
+ 	lt_prog_compiler_wl='-Wl,'
+ 	lt_prog_compiler_pic='-KPIC'
+@@ -11385,18 +9494,6 @@ lt_prog_compiler_static=
+ 	lt_prog_compiler_pic='--shared'
+ 	lt_prog_compiler_static='--static'
+ 	;;
+-      nagfor*)
+-	# NAG Fortran compiler
+-	lt_prog_compiler_wl='-Wl,-Wl,,'
+-	lt_prog_compiler_pic='-PIC'
+-	lt_prog_compiler_static='-Bstatic'
+-	;;
+-      tcc*)
+-	# Fabrice Bellard et al's Tiny C Compiler
+-	lt_prog_compiler_wl='-Wl,'
+-	lt_prog_compiler_pic='-fPIC'
+-	lt_prog_compiler_static='-static'
+-	;;
+       pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*)
+         # Portland Group compilers (*not* the Pentium gcc compiler,
+ 	# which looks to be a dead project)
+@@ -11416,34 +9513,19 @@ lt_prog_compiler_static=
+ 	lt_prog_compiler_static='-qstaticlink'
+ 	;;
+       *)
+-	case `$CC -V 2>&1 | $SED 5q` in
+-	*Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*)
++	case `$CC -V 2>&1 | sed 5q` in
++	*Sun\ F* | *Sun*Fortran*)
+ 	  # Sun Fortran 8.3 passes all unrecognized flags to the linker
+ 	  lt_prog_compiler_pic='-KPIC'
+ 	  lt_prog_compiler_static='-Bstatic'
+ 	  lt_prog_compiler_wl=''
+ 	  ;;
+-	*Sun\ F* | *Sun*Fortran*)
+-	  lt_prog_compiler_pic='-KPIC'
+-	  lt_prog_compiler_static='-Bstatic'
+-	  lt_prog_compiler_wl='-Qoption ld '
+-	  ;;
+ 	*Sun\ C*)
+ 	  # Sun C 5.9
+ 	  lt_prog_compiler_pic='-KPIC'
+ 	  lt_prog_compiler_static='-Bstatic'
+ 	  lt_prog_compiler_wl='-Wl,'
+ 	  ;;
+-        *Intel*\ [CF]*Compiler*)
+-	  lt_prog_compiler_wl='-Wl,'
+-	  lt_prog_compiler_pic='-fPIC'
+-	  lt_prog_compiler_static='-static'
+-	  ;;
+-	*Portland\ Group*)
+-	  lt_prog_compiler_wl='-Wl,'
+-	  lt_prog_compiler_pic='-fpic'
+-	  lt_prog_compiler_static='-Bstatic'
+-	  ;;
+ 	esac
+ 	;;
+       esac
+@@ -11474,7 +9556,7 @@ lt_prog_compiler_static=
+       lt_prog_compiler_pic='-KPIC'
+       lt_prog_compiler_static='-Bstatic'
+       case $cc_basename in
+-      f77* | f90* | f95* | sunf77* | sunf90* | sunf95*)
++      f77* | f90* | f95*)
+ 	lt_prog_compiler_wl='-Qoption ld ';;
+       *)
+ 	lt_prog_compiler_wl='-Wl,';;
+@@ -11494,7 +9576,7 @@ lt_prog_compiler_static=
+       ;;
+ 
+     sysv4*MP*)
+-      if test -d /usr/nec; then
++      if test -d /usr/nec ;then
+ 	lt_prog_compiler_pic='-Kconform_pic'
+ 	lt_prog_compiler_static='-Bstatic'
+       fi
+@@ -11523,7 +9605,7 @@ lt_prog_compiler_static=
+   fi
+ 
+ case $host_os in
+-  # For platforms that do not support PIC, -DPIC is meaningless:
++  # For platforms which do not support PIC, -DPIC is meaningless:
+   *djgpp*)
+     lt_prog_compiler_pic=
+     ;;
+@@ -11531,33 +9613,27 @@ case $host_os in
+     lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC"
+     ;;
+ esac
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic" >&5
++$as_echo "$lt_prog_compiler_pic" >&6; }
++
++
++
++
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
+-printf %s "checking for $compiler option to produce PIC... " >&6; }
+-if test ${lt_cv_prog_compiler_pic+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  lt_cv_prog_compiler_pic=$lt_prog_compiler_pic
+-fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5
+-printf "%s\n" "$lt_cv_prog_compiler_pic" >&6; }
+-lt_prog_compiler_pic=$lt_cv_prog_compiler_pic
+ 
+ #
+ # Check to make sure the PIC flag actually works.
+ #
+ if test -n "$lt_prog_compiler_pic"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5
+-printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; }
+-if test ${lt_cv_prog_compiler_pic_works+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5
++$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; }
++if ${lt_cv_prog_compiler_pic_works+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_prog_compiler_pic_works=no
+    ac_outfile=conftest.$ac_objext
+    echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+-   lt_compiler_flag="$lt_prog_compiler_pic -DPIC"  ## exclude from sc_useless_quotes_in_assignment
++   lt_compiler_flag="$lt_prog_compiler_pic -DPIC"
+    # Insert the option either (1) after the last *FLAGS variable, or
+    # (2) before a word containing "conftest.", or (3) at the end.
+    # Note that $ac_compile itself does not contain backslashes and begins
+@@ -11584,10 +9660,10 @@ else $as_nop
+    $RM conftest*
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5
+-printf "%s\n" "$lt_cv_prog_compiler_pic_works" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5
++$as_echo "$lt_cv_prog_compiler_pic_works" >&6; }
+ 
+-if test yes = "$lt_cv_prog_compiler_pic_works"; then
++if test x"$lt_cv_prog_compiler_pic_works" = xyes; then
+     case $lt_prog_compiler_pic in
+      "" | " "*) ;;
+      *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;;
+@@ -11604,23 +9680,17 @@ fi
+ 
+ 
+ 
+-
+-
+-
+-
+-
+ #
+ # Check to make sure the static flag actually works.
+ #
+ wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\"
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
+-printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
+-if test ${lt_cv_prog_compiler_static_works+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
++$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
++if ${lt_cv_prog_compiler_static_works+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_prog_compiler_static_works=no
+-   save_LDFLAGS=$LDFLAGS
++   save_LDFLAGS="$LDFLAGS"
+    LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
+    echo "$lt_simple_link_test_code" > conftest.$ac_ext
+    if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+@@ -11639,13 +9709,13 @@ else $as_nop
+      fi
+    fi
+    $RM -r conftest*
+-   LDFLAGS=$save_LDFLAGS
++   LDFLAGS="$save_LDFLAGS"
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5
+-printf "%s\n" "$lt_cv_prog_compiler_static_works" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5
++$as_echo "$lt_cv_prog_compiler_static_works" >&6; }
+ 
+-if test yes = "$lt_cv_prog_compiler_static_works"; then
++if test x"$lt_cv_prog_compiler_static_works" = xyes; then
+     :
+ else
+     lt_prog_compiler_static=
+@@ -11657,12 +9727,11 @@ fi
+ 
+ 
+ 
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+-printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+-if test ${lt_cv_prog_compiler_c_o+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
++$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
++if ${lt_cv_prog_compiler_c_o+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_prog_compiler_c_o=no
+    $RM -r conftest 2>/dev/null
+    mkdir conftest
+@@ -11705,20 +9774,19 @@ else $as_nop
+    $RM conftest*
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
+-printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
++$as_echo "$lt_cv_prog_compiler_c_o" >&6; }
+ 
+ 
+ 
+ 
+ 
+ 
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+-printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+-if test ${lt_cv_prog_compiler_c_o+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
++$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
++if ${lt_cv_prog_compiler_c_o+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_prog_compiler_c_o=no
+    $RM -r conftest 2>/dev/null
+    mkdir conftest
+@@ -11761,28 +9829,28 @@ else $as_nop
+    $RM conftest*
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
+-printf "%s\n" "$lt_cv_prog_compiler_c_o" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5
++$as_echo "$lt_cv_prog_compiler_c_o" >&6; }
+ 
+ 
+ 
+ 
+-hard_links=nottested
+-if test no = "$lt_cv_prog_compiler_c_o" && test no != "$need_locks"; then
++hard_links="nottested"
++if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then
+   # do not overwrite the value of need_locks provided by the user
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
+-printf %s "checking if we can lock with hard links... " >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
++$as_echo_n "checking if we can lock with hard links... " >&6; }
+   hard_links=yes
+   $RM conftest*
+   ln conftest.a conftest.b 2>/dev/null && hard_links=no
+   touch conftest.a
+   ln conftest.a conftest.b 2>&5 || hard_links=no
+   ln conftest.a conftest.b 2>/dev/null && hard_links=no
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
+-printf "%s\n" "$hard_links" >&6; }
+-  if test no = "$hard_links"; then
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5
+-printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;}
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
++$as_echo "$hard_links" >&6; }
++  if test "$hard_links" = no; then
++    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5
++$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;}
+     need_locks=warn
+   fi
+ else
+@@ -11794,8 +9862,8 @@ fi
+ 
+ 
+ 
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+-printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
++$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+ 
+   runpath_var=
+   allow_undefined_flag=
+@@ -11810,6 +9878,7 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries
+   hardcode_direct=no
+   hardcode_direct_absolute=no
+   hardcode_libdir_flag_spec=
++  hardcode_libdir_flag_spec_ld=
+   hardcode_libdir_separator=
+   hardcode_minus_L=no
+   hardcode_shlibpath_var=unsupported
+@@ -11825,9 +9894,9 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries
+   # included in the symbol list
+   include_expsyms=
+   # exclude_expsyms can be an extended regexp of symbols to exclude
+-  # it will be wrapped by ' (' and ')$', so one must not match beginning or
+-  # end of line.  Example: 'a|bc|.*d.*' will exclude the symbols 'a' and 'bc',
+-  # as well as any symbol that contains 'd'.
++  # it will be wrapped by ` (' and `)$', so one must not match beginning or
++  # end of line.  Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc',
++  # as well as any symbol that contains `d'.
+   exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
+   # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out
+   # platforms (ab)use it in PIC code, but their linkers get confused if
+@@ -11839,18 +9908,18 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries
+ 
+   case $host_os in
+   cygwin* | mingw* | pw32* | cegcc*)
+-    # FIXME: the MSVC++ and ICC port hasn't been tested in a loooong time
++    # FIXME: the MSVC++ port hasn't been tested in a loooong time
+     # When not using gcc, we currently assume that we are using
+-    # Microsoft Visual C++ or Intel C++ Compiler.
+-    if test yes != "$GCC"; then
++    # Microsoft Visual C++.
++    if test "$GCC" != yes; then
+       with_gnu_ld=no
+     fi
+     ;;
+   interix*)
+-    # we just hope/assume this is gcc and not c89 (= MSVC++ or ICC)
++    # we just hope/assume this is gcc and not c89 (= MSVC++)
+     with_gnu_ld=yes
+     ;;
+-  openbsd* | bitrig*)
++  openbsd*)
+     with_gnu_ld=no
+     ;;
+   esac
+@@ -11860,7 +9929,7 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries
+   # On some targets, GNU ld is compatible enough with the native linker
+   # that we're better off using the native interface for both.
+   lt_use_gnu_ld_interface=no
+-  if test yes = "$with_gnu_ld"; then
++  if test "$with_gnu_ld" = yes; then
+     case $host_os in
+       aix*)
+ 	# The AIX port of GNU ld has always aspired to compatibility
+@@ -11882,24 +9951,24 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries
+     esac
+   fi
+ 
+-  if test yes = "$lt_use_gnu_ld_interface"; then
++  if test "$lt_use_gnu_ld_interface" = yes; then
+     # If archive_cmds runs LD, not CC, wlarc should be empty
+-    wlarc='$wl'
++    wlarc='${wl}'
+ 
+     # Set some defaults for GNU ld with shared library support. These
+     # are reset later if shared libraries are not supported. Putting them
+     # here allows them to be overridden if necessary.
+     runpath_var=LD_RUN_PATH
+-    hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
+-    export_dynamic_flag_spec='$wl--export-dynamic'
++    hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
++    export_dynamic_flag_spec='${wl}--export-dynamic'
+     # ancient GNU ld didn't support --whole-archive et. al.
+     if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then
+-      whole_archive_flag_spec=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
++      whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+     else
+       whole_archive_flag_spec=
+     fi
+     supports_anon_versioning=no
+-    case `$LD -v | $SED -e 's/([^)]\+)\s\+//' 2>&1` in
++    case `$LD -v 2>&1` in
+       *GNU\ gold*) supports_anon_versioning=yes ;;
+       *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11
+       *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ...
+@@ -11912,7 +9981,7 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries
+     case $host_os in
+     aix[3-9]*)
+       # On AIX/PPC, the GNU linker is very broken
+-      if test ia64 != "$host_cpu"; then
++      if test "$host_cpu" != ia64; then
+ 	ld_shlibs=no
+ 	cat <<_LT_EOF 1>&2
+ 
+@@ -11931,7 +10000,7 @@ _LT_EOF
+       case $host_cpu in
+       powerpc)
+             # see comment about AmigaOS4 .so support
+-            archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++            archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+             archive_expsym_cmds=''
+         ;;
+       m68k)
+@@ -11947,7 +10016,7 @@ _LT_EOF
+ 	allow_undefined_flag=unsupported
+ 	# Joseph Beckenbach  says some releases of gcc
+ 	# support --undefined.  This deserves some investigation.  FIXME
+-	archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+       else
+ 	ld_shlibs=no
+       fi
+@@ -11957,98 +10026,68 @@ _LT_EOF
+       # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless,
+       # as there is no search path for DLLs.
+       hardcode_libdir_flag_spec='-L$libdir'
+-      export_dynamic_flag_spec='$wl--export-all-symbols'
++      export_dynamic_flag_spec='${wl}--export-all-symbols'
+       allow_undefined_flag=unsupported
+       always_export_symbols=no
+       enable_shared_with_static_runtimes=yes
+-      export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
+-      exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
++      export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols'
+ 
+       if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+-        archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+-	# If the export-symbols file already is a .def file, use it as
+-	# is; otherwise, prepend EXPORTS...
+-	archive_expsym_cmds='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
+-          cp $export_symbols $output_objdir/$soname.def;
+-        else
+-          echo EXPORTS > $output_objdir/$soname.def;
+-          cat $export_symbols >> $output_objdir/$soname.def;
+-        fi~
+-        $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++        archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++	# If the export-symbols file already is a .def file (1st line
++	# is EXPORTS), use it as is; otherwise, prepend...
++	archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
++	  cp $export_symbols $output_objdir/$soname.def;
++	else
++	  echo EXPORTS > $output_objdir/$soname.def;
++	  cat $export_symbols >> $output_objdir/$soname.def;
++	fi~
++	$CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+       else
+ 	ld_shlibs=no
+       fi
+       ;;
+ 
+     haiku*)
+-      archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++      archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+       link_all_deplibs=yes
+       ;;
+ 
+-    os2*)
+-      hardcode_libdir_flag_spec='-L$libdir'
+-      hardcode_minus_L=yes
+-      allow_undefined_flag=unsupported
+-      shrext_cmds=.dll
+-      archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	$ECHO EXPORTS >> $output_objdir/$libname.def~
+-	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+-	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	emximp -o $lib $output_objdir/$libname.def'
+-      archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	$ECHO EXPORTS >> $output_objdir/$libname.def~
+-	prefix_cmds="$SED"~
+-	if test EXPORTS = "`$SED 1q $export_symbols`"; then
+-	  prefix_cmds="$prefix_cmds -e 1d";
+-	fi~
+-	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+-	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+-	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	emximp -o $lib $output_objdir/$libname.def'
+-      old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+-      enable_shared_with_static_runtimes=yes
+-      file_list_spec='@'
+-      ;;
+-
+     interix[3-9]*)
+       hardcode_direct=no
+       hardcode_shlibpath_var=no
+-      hardcode_libdir_flag_spec='$wl-rpath,$libdir'
+-      export_dynamic_flag_spec='$wl-E'
++      hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
++      export_dynamic_flag_spec='${wl}-E'
+       # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+       # Instead, shared libraries are loaded at an image base (0x10000000 by
+       # default) and relocated if they conflict, which is a slow very memory
+       # consuming and fragmenting process.  To avoid this, we pick a random,
+       # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+       # time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+-      archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+-      archive_expsym_cmds='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++      archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++      archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+       ;;
+ 
+-    gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu)
++    gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu | uclinuxfdpiceabi)
+       tmp_diet=no
+-      if test linux-dietlibc = "$host_os"; then
++      if test "$host_os" = linux-dietlibc; then
+ 	case $cc_basename in
+ 	  diet\ *) tmp_diet=yes;;	# linux-dietlibc with static linking (!diet-dyn)
+ 	esac
+       fi
+       if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \
+-	 && test no = "$tmp_diet"
++	 && test "$tmp_diet" = no
+       then
+ 	tmp_addflag=' $pic_flag'
+ 	tmp_sharedflag='-shared'
+ 	case $cc_basename,$host_cpu in
+         pgcc*)				# Portland Group C compiler
+-	  whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ 	  tmp_addflag=' $pic_flag'
+ 	  ;;
+ 	pgf77* | pgf90* | pgf95* | pgfortran*)
+ 					# Portland Group f77 and f90 compilers
+-	  whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ 	  tmp_addflag=' $pic_flag -Mnomain' ;;
+ 	ecc*,ia64* | icc*,ia64*)	# Intel C compiler on ia64
+ 	  tmp_addflag=' -i_dynamic' ;;
+@@ -12059,47 +10098,43 @@ _LT_EOF
+ 	lf95*)				# Lahey Fortran 8.1
+ 	  whole_archive_flag_spec=
+ 	  tmp_sharedflag='--shared' ;;
+-        nagfor*)                        # NAGFOR 5.3
+-          tmp_sharedflag='-Wl,-shared' ;;
+ 	xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below)
+ 	  tmp_sharedflag='-qmkshrobj'
+ 	  tmp_addflag= ;;
+ 	nvcc*)	# Cuda Compiler Driver 2.2
+-	  whole_archive_flag_spec='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ 	  compiler_needs_object=yes
+ 	  ;;
+ 	esac
+-	case `$CC -V 2>&1 | $SED 5q` in
++	case `$CC -V 2>&1 | sed 5q` in
+ 	*Sun\ C*)			# Sun C 5.9
+-	  whole_archive_flag_spec='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	  whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ 	  compiler_needs_object=yes
+ 	  tmp_sharedflag='-G' ;;
+ 	*Sun\ F*)			# Sun Fortran 8.3
+ 	  tmp_sharedflag='-G' ;;
+ 	esac
+-	archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ 
+-        if test yes = "$supports_anon_versioning"; then
++        if test "x$supports_anon_versioning" = xyes; then
+           archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
+-            cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+-            echo "local: *; };" >> $output_objdir/$libname.ver~
+-            $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
++	    cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
++	    echo "local: *; };" >> $output_objdir/$libname.ver~
++	    $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+         fi
+ 
+ 	case $cc_basename in
+-	tcc*)
+-	  export_dynamic_flag_spec='-rdynamic'
+-	  ;;
+ 	xlf* | bgf* | bgxlf* | mpixlf*)
+ 	  # IBM XL Fortran 10.1 on PPC cannot create shared libs itself
+ 	  whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive'
+-	  hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
+-	  archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib'
+-	  if test yes = "$supports_anon_versioning"; then
++	  hardcode_libdir_flag_spec=
++	  hardcode_libdir_flag_spec_ld='-rpath $libdir'
++	  archive_cmds='$LD -shared $libobjs $deplibs $compiler_flags -soname $soname -o $lib'
++	  if test "x$supports_anon_versioning" = xyes; then
+ 	    archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~
+-              cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+-              echo "local: *; };" >> $output_objdir/$libname.ver~
+-              $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
++	      cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
++	      echo "local: *; };" >> $output_objdir/$libname.ver~
++	      $LD -shared $libobjs $deplibs $compiler_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib'
+ 	  fi
+ 	  ;;
+ 	esac
+@@ -12113,8 +10148,8 @@ _LT_EOF
+ 	archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib'
+ 	wlarc=
+       else
+-	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
++	archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+       fi
+       ;;
+ 
+@@ -12132,8 +10167,8 @@ _LT_EOF
+ 
+ _LT_EOF
+       elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+-	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
++	archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+       else
+ 	ld_shlibs=no
+       fi
+@@ -12145,7 +10180,7 @@ _LT_EOF
+ 	ld_shlibs=no
+ 	cat <<_LT_EOF 1>&2
+ 
+-*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 cannot
++*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not
+ *** reliably create shared libraries on SCO systems.  Therefore, libtool
+ *** is disabling shared libraries support.  We urge you to upgrade GNU
+ *** binutils to release 2.16.91.0.3 or newer.  Another option is to modify
+@@ -12160,9 +10195,9 @@ _LT_EOF
+ 	  # DT_RUNPATH tag from executables and libraries.  But doing so
+ 	  # requires that you compile everything twice, which is a pain.
+ 	  if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+-	    hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
+-	    archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-	    archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	    hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
++	    archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
++	    archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ 	  else
+ 	    ld_shlibs=no
+ 	  fi
+@@ -12179,15 +10214,15 @@ _LT_EOF
+ 
+     *)
+       if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then
+-	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-	archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
++	archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+       else
+ 	ld_shlibs=no
+       fi
+       ;;
+     esac
+ 
+-    if test no = "$ld_shlibs"; then
++    if test "$ld_shlibs" = no; then
+       runpath_var=
+       hardcode_libdir_flag_spec=
+       export_dynamic_flag_spec=
+@@ -12203,7 +10238,7 @@ _LT_EOF
+       # Note: this linker hardcodes the directories in LIBPATH if there
+       # are no directories specified by -L.
+       hardcode_minus_L=yes
+-      if test yes = "$GCC" && test -z "$lt_prog_compiler_static"; then
++      if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then
+ 	# Neither direct hardcoding nor static linking is supported with a
+ 	# broken collect2.
+ 	hardcode_direct=unsupported
+@@ -12211,57 +10246,34 @@ _LT_EOF
+       ;;
+ 
+     aix[4-9]*)
+-      if test ia64 = "$host_cpu"; then
++      if test "$host_cpu" = ia64; then
+ 	# On IA64, the linker does run time linking by default, so we don't
+ 	# have to do anything special.
+ 	aix_use_runtimelinking=no
+ 	exp_sym_flag='-Bexport'
+-	no_entry_flag=
++	no_entry_flag=""
+       else
+ 	# If we're using GNU nm, then we don't want the "-C" option.
+-	# -C means demangle to GNU nm, but means don't demangle to AIX nm.
+-	# Without the "-l" option, or with the "-B" option, AIX nm treats
+-	# weak defined symbols like other global defined symbols, whereas
+-	# GNU nm marks them as "W".
+-	# While the 'weak' keyword is ignored in the Export File, we need
+-	# it in the Import File for the 'aix-soname' feature, so we have
+-	# to replace the "-B" option with "-P" for AIX nm.
++	# -C means demangle to AIX nm, but means don't demangle with GNU nm
++	# Also, AIX nm treats weak defined symbols like other global
++	# defined symbols, whereas GNU nm marks them as "W".
+ 	if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+-	  export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
++	  export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ 	else
+-	  export_symbols_cmds='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
++	  export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+ 	fi
+ 	aix_use_runtimelinking=no
+ 
+ 	# Test if we are trying to use run time linking or normal
+ 	# AIX style linking. If -brtl is somewhere in LDFLAGS, we
+-	# have runtime linking enabled, and use it for executables.
+-	# For shared libraries, we enable/disable runtime linking
+-	# depending on the kind of the shared library created -
+-	# when "with_aix_soname,aix_use_runtimelinking" is:
+-	# "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
+-	# "aix,yes"  lib.so          shared, rtl:yes, for executables
+-	#            lib.a           static archive
+-	# "both,no"  lib.so.V(shr.o) shared, rtl:yes
+-	#            lib.a(lib.so.V) shared, rtl:no,  for executables
+-	# "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
+-	#            lib.a(lib.so.V) shared, rtl:no
+-	# "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
+-	#            lib.a           static archive
++	# need to do runtime linking.
+ 	case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
+ 	  for ld_flag in $LDFLAGS; do
+-	  if (test x-brtl = "x$ld_flag" || test x-Wl,-brtl = "x$ld_flag"); then
++	  if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then
+ 	    aix_use_runtimelinking=yes
+ 	    break
+ 	  fi
+ 	  done
+-	  if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
+-	    # With aix-soname=svr4, we create the lib.so.V shared archives only,
+-	    # so we don't have lib.a shared libs to link our executables.
+-	    # We have to force runtime linking in this case.
+-	    aix_use_runtimelinking=yes
+-	    LDFLAGS="$LDFLAGS -Wl,-brtl"
+-	  fi
+ 	  ;;
+ 	esac
+ 
+@@ -12280,21 +10292,13 @@ _LT_EOF
+       hardcode_direct_absolute=yes
+       hardcode_libdir_separator=':'
+       link_all_deplibs=yes
+-      file_list_spec='$wl-f,'
+-      case $with_aix_soname,$aix_use_runtimelinking in
+-      aix,*) ;; # traditional, no import file
+-      svr4,* | *,yes) # use import file
+-	# The Import File defines what to hardcode.
+-	hardcode_direct=no
+-	hardcode_direct_absolute=no
+-	;;
+-      esac
++      file_list_spec='${wl}-f,'
+ 
+-      if test yes = "$GCC"; then
++      if test "$GCC" = yes; then
+ 	case $host_os in aix4.[012]|aix4.[012].*)
+ 	# We only want to do this on AIX 4.2 and lower, the check
+ 	# below for broken collect2 doesn't work under 4.3+
+-	  collect2name=`$CC -print-prog-name=collect2`
++	  collect2name=`${CC} -print-prog-name=collect2`
+ 	  if test -f "$collect2name" &&
+ 	   strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+ 	  then
+@@ -12313,168 +10317,119 @@ _LT_EOF
+ 	  ;;
+ 	esac
+ 	shared_flag='-shared'
+-	if test yes = "$aix_use_runtimelinking"; then
+-	  shared_flag="$shared_flag "'$wl-G'
++	if test "$aix_use_runtimelinking" = yes; then
++	  shared_flag="$shared_flag "'${wl}-G'
+ 	fi
+-	# Need to ensure runtime linking is disabled for the traditional
+-	# shared library, or the linker may eventually find shared libraries
+-	# /with/ Import File - we do not want to mix them.
+-	shared_flag_aix='-shared'
+-	shared_flag_svr4='-shared $wl-G'
+       else
+ 	# not using gcc
+-	if test ia64 = "$host_cpu"; then
++	if test "$host_cpu" = ia64; then
+ 	# VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+ 	# chokes on -Wl,-G. The following line is correct:
+ 	  shared_flag='-G'
+ 	else
+-	  if test yes = "$aix_use_runtimelinking"; then
+-	    shared_flag='$wl-G'
++	  if test "$aix_use_runtimelinking" = yes; then
++	    shared_flag='${wl}-G'
+ 	  else
+-	    shared_flag='$wl-bM:SRE'
++	    shared_flag='${wl}-bM:SRE'
+ 	  fi
+-	  shared_flag_aix='$wl-bM:SRE'
+-	  shared_flag_svr4='$wl-G'
+ 	fi
+       fi
+ 
+-      export_dynamic_flag_spec='$wl-bexpall'
++      export_dynamic_flag_spec='${wl}-bexpall'
+       # It seems that -bexpall does not export symbols beginning with
+       # underscore (_), so it is better to generate a list of symbols to export.
+       always_export_symbols=yes
+-      if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
++      if test "$aix_use_runtimelinking" = yes; then
+ 	# Warning - without using the other runtime loading flags (-brtl),
+ 	# -berok will link without error, but may produce a broken library.
+ 	allow_undefined_flag='-berok'
+         # Determine the default libpath from the value encoded in an
+         # empty executable.
+-        if test set = "${lt_cv_aix_libpath+set}"; then
+-  aix_libpath=$lt_cv_aix_libpath
+-else
+-  if test ${lt_cv_aix_libpath_+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++        cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
+-
+-  lt_aix_libpath_sed='
+-      /Import File Strings/,/^$/ {
+-	  /^0/ {
+-	      s/^0  *\([^ ]*\) *$/\1/
+-	      p
+-	  }
+-      }'
+-  lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+-  # Check for a 64-bit object if we didn't find anything.
+-  if test -z "$lt_cv_aix_libpath_"; then
+-    lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+-  fi
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
+-    conftest$ac_exeext conftest.$ac_ext
+-  if test -z "$lt_cv_aix_libpath_"; then
+-    lt_cv_aix_libpath_=/usr/lib:/lib
+-  fi
++if ac_fn_c_try_link "$LINENO"; then :
+ 
++lt_aix_libpath_sed='
++    /Import File Strings/,/^$/ {
++	/^0/ {
++	    s/^0  *\(.*\)$/\1/
++	    p
++	}
++    }'
++aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++# Check for a 64-bit object if we didn't find anything.
++if test -z "$aix_libpath"; then
++  aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi
+-
+-  aix_libpath=$lt_cv_aix_libpath_
+ fi
++rm -f core conftest.err conftest.$ac_objext \
++    conftest$ac_exeext conftest.$ac_ext
++if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
+ 
+-        hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath"
+-        archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
++        hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
++        archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+       else
+-	if test ia64 = "$host_cpu"; then
+-	  hardcode_libdir_flag_spec='$wl-R $libdir:/usr/lib:/lib'
++	if test "$host_cpu" = ia64; then
++	  hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib'
+ 	  allow_undefined_flag="-z nodefs"
+-	  archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
++	  archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+ 	else
+ 	 # Determine the default libpath from the value encoded in an
+ 	 # empty executable.
+-	 if test set = "${lt_cv_aix_libpath+set}"; then
+-  aix_libpath=$lt_cv_aix_libpath
+-else
+-  if test ${lt_cv_aix_libpath_+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++	 cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
+-
+-  lt_aix_libpath_sed='
+-      /Import File Strings/,/^$/ {
+-	  /^0/ {
+-	      s/^0  *\([^ ]*\) *$/\1/
+-	      p
+-	  }
+-      }'
+-  lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+-  # Check for a 64-bit object if we didn't find anything.
+-  if test -z "$lt_cv_aix_libpath_"; then
+-    lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+-  fi
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
+-    conftest$ac_exeext conftest.$ac_ext
+-  if test -z "$lt_cv_aix_libpath_"; then
+-    lt_cv_aix_libpath_=/usr/lib:/lib
+-  fi
++if ac_fn_c_try_link "$LINENO"; then :
+ 
++lt_aix_libpath_sed='
++    /Import File Strings/,/^$/ {
++	/^0/ {
++	    s/^0  *\(.*\)$/\1/
++	    p
++	}
++    }'
++aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++# Check for a 64-bit object if we didn't find anything.
++if test -z "$aix_libpath"; then
++  aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi
+-
+-  aix_libpath=$lt_cv_aix_libpath_
+ fi
++rm -f core conftest.err conftest.$ac_objext \
++    conftest$ac_exeext conftest.$ac_ext
++if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
+ 
+-	 hardcode_libdir_flag_spec='$wl-blibpath:$libdir:'"$aix_libpath"
++	 hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath"
+ 	  # Warning - without using the other run time loading flags,
+ 	  # -berok will link without error, but may produce a broken library.
+-	  no_undefined_flag=' $wl-bernotok'
+-	  allow_undefined_flag=' $wl-berok'
+-	  if test yes = "$with_gnu_ld"; then
++	  no_undefined_flag=' ${wl}-bernotok'
++	  allow_undefined_flag=' ${wl}-berok'
++	  if test "$with_gnu_ld" = yes; then
+ 	    # We only use this code for GNU lds that support --whole-archive.
+-	    whole_archive_flag_spec='$wl--whole-archive$convenience $wl--no-whole-archive'
++	    whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ 	  else
+ 	    # Exported symbols can be pulled into shared objects from archives
+ 	    whole_archive_flag_spec='$convenience'
+ 	  fi
+ 	  archive_cmds_need_lc=yes
+-	  archive_expsym_cmds='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
+-	  # -brtl affects multiple linker settings, -berok does not and is overridden later
+-	  compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`'
+-	  if test svr4 != "$with_aix_soname"; then
+-	    # This is similar to how AIX traditionally builds its shared libraries.
+-	    archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
+-	  fi
+-	  if test aix != "$with_aix_soname"; then
+-	    archive_expsym_cmds="$archive_expsym_cmds"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
+-	  else
+-	    # used by -dlpreopen to get the symbols
+-	    archive_expsym_cmds="$archive_expsym_cmds"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
+-	  fi
+-	  archive_expsym_cmds="$archive_expsym_cmds"'~$RM -r $output_objdir/$realname.d'
++	  # This is similar to how AIX traditionally builds its shared libraries.
++	  archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+ 	fi
+       fi
+       ;;
+@@ -12483,7 +10438,7 @@ fi
+       case $host_cpu in
+       powerpc)
+             # see comment about AmigaOS4 .so support
+-            archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++            archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+             archive_expsym_cmds=''
+         ;;
+       m68k)
+@@ -12500,68 +10455,23 @@ fi
+ 
+     cygwin* | mingw* | pw32* | cegcc*)
+       # When not using gcc, we currently assume that we are using
+-      # Microsoft Visual C++ or Intel C++ Compiler.
++      # Microsoft Visual C++.
+       # hardcode_libdir_flag_spec is actually meaningless, as there is
+       # no search path for DLLs.
+-      case $cc_basename in
+-      cl* | icl*)
+-	# Native MSVC or ICC
+-	hardcode_libdir_flag_spec=' '
+-	allow_undefined_flag=unsupported
+-	always_export_symbols=yes
+-	file_list_spec='@'
+-	# Tell ltmain to make .lib files, not .a files.
+-	libext=lib
+-	# Tell ltmain to make .dll files, not .so files.
+-	shrext_cmds=.dll
+-	# FIXME: Setting linknames here is a bad hack.
+-	archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
+-	archive_expsym_cmds='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
+-            cp "$export_symbols" "$output_objdir/$soname.def";
+-            echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
+-          else
+-            $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
+-          fi~
+-          $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+-          linknames='
+-	# The linker will not automatically build a static lib if we build a DLL.
+-	# _LT_TAGVAR(old_archive_from_new_cmds, )='true'
+-	enable_shared_with_static_runtimes=yes
+-	exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+-	export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols'
+-	# Don't use ranlib
+-	old_postinstall_cmds='chmod 644 $oldlib'
+-	postlink_cmds='lt_outputfile="@OUTPUT@"~
+-          lt_tool_outputfile="@TOOL_OUTPUT@"~
+-          case $lt_outputfile in
+-            *.exe|*.EXE) ;;
+-            *)
+-              lt_outputfile=$lt_outputfile.exe
+-              lt_tool_outputfile=$lt_tool_outputfile.exe
+-              ;;
+-          esac~
+-          if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
+-            $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+-            $RM "$lt_outputfile.manifest";
+-          fi'
+-	;;
+-      *)
+-	# Assume MSVC and ICC wrapper
+-	hardcode_libdir_flag_spec=' '
+-	allow_undefined_flag=unsupported
+-	# Tell ltmain to make .lib files, not .a files.
+-	libext=lib
+-	# Tell ltmain to make .dll files, not .so files.
+-	shrext_cmds=.dll
+-	# FIXME: Setting linknames here is a bad hack.
+-	archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
+-	# The linker will automatically build a .lib file if we build a DLL.
+-	old_archive_from_new_cmds='true'
+-	# FIXME: Should let the user specify the lib program.
+-	old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs'
+-	enable_shared_with_static_runtimes=yes
+-	;;
+-      esac
++      hardcode_libdir_flag_spec=' '
++      allow_undefined_flag=unsupported
++      # Tell ltmain to make .lib files, not .a files.
++      libext=lib
++      # Tell ltmain to make .dll files, not .so files.
++      shrext_cmds=".dll"
++      # FIXME: Setting linknames here is a bad hack.
++      archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames='
++      # The linker will automatically build a .lib file if we build a DLL.
++      old_archive_from_new_cmds='true'
++      # FIXME: Should let the user specify the lib program.
++      old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs'
++      fix_srcfile_path='`cygpath -w "$srcfile"`'
++      enable_shared_with_static_runtimes=yes
+       ;;
+ 
+     darwin* | rhapsody*)
+@@ -12571,24 +10481,23 @@ fi
+   hardcode_direct=no
+   hardcode_automatic=yes
+   hardcode_shlibpath_var=unsupported
+-  if test yes = "$lt_cv_ld_force_load"; then
+-    whole_archive_flag_spec='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+-
++  if test "$lt_cv_ld_force_load" = "yes"; then
++    whole_archive_flag_spec='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+   else
+     whole_archive_flag_spec=''
+   fi
+   link_all_deplibs=yes
+-  allow_undefined_flag=$_lt_dar_allow_undefined
++  allow_undefined_flag="$_lt_dar_allow_undefined"
+   case $cc_basename in
+-     ifort*|nagfor*) _lt_dar_can_shared=yes ;;
++     ifort*) _lt_dar_can_shared=yes ;;
+      *) _lt_dar_can_shared=$GCC ;;
+   esac
+-  if test yes = "$_lt_dar_can_shared"; then
++  if test "$_lt_dar_can_shared" = "yes"; then
+     output_verbose_link_cmd=func_echo_all
+-    archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil"
+-    module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil"
+-    archive_expsym_cmds="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil"
+-    module_expsym_cmds="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil"
++    archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
++    module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
++    archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
++    module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}"
+ 
+   else
+   ld_shlibs=no
+@@ -12622,41 +10531,42 @@ fi
+       ;;
+ 
+     # FreeBSD 3 and greater uses gcc -shared to do shared libraries.
+-    freebsd* | dragonfly* | midnightbsd*)
+-      archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
++    freebsd* | dragonfly*)
++      archive_cmds='$CC -shared -o $lib $libobjs $deplibs $compiler_flags'
+       hardcode_libdir_flag_spec='-R$libdir'
+       hardcode_direct=yes
+       hardcode_shlibpath_var=no
+       ;;
+ 
+     hpux9*)
+-      if test yes = "$GCC"; then
+-	archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++      if test "$GCC" = yes; then
++	archive_cmds='$RM $output_objdir/$soname~$CC -shared -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+       else
+-	archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++	archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+       fi
+-      hardcode_libdir_flag_spec='$wl+b $wl$libdir'
++      hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+       hardcode_libdir_separator=:
+       hardcode_direct=yes
+ 
+       # hardcode_minus_L: Not really in the search PATH,
+       # but as the default location of the library.
+       hardcode_minus_L=yes
+-      export_dynamic_flag_spec='$wl-E'
++      export_dynamic_flag_spec='${wl}-E'
+       ;;
+ 
+     hpux10*)
+-      if test yes,no = "$GCC,$with_gnu_ld"; then
+-	archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
++      if test "$GCC" = yes && test "$with_gnu_ld" = no; then
++	archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+       else
+ 	archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+       fi
+-      if test no = "$with_gnu_ld"; then
+-	hardcode_libdir_flag_spec='$wl+b $wl$libdir'
++      if test "$with_gnu_ld" = no; then
++	hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
++	hardcode_libdir_flag_spec_ld='+b $libdir'
+ 	hardcode_libdir_separator=:
+ 	hardcode_direct=yes
+ 	hardcode_direct_absolute=yes
+-	export_dynamic_flag_spec='$wl-E'
++	export_dynamic_flag_spec='${wl}-E'
+ 	# hardcode_minus_L: Not really in the search PATH,
+ 	# but as the default location of the library.
+ 	hardcode_minus_L=yes
+@@ -12664,38 +10574,37 @@ fi
+       ;;
+ 
+     hpux11*)
+-      if test yes,no = "$GCC,$with_gnu_ld"; then
++      if test "$GCC" = yes && test "$with_gnu_ld" = no; then
+ 	case $host_cpu in
+ 	hppa*64*)
+-	  archive_cmds='$CC -shared $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_cmds='$CC -shared ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ 	  ;;
+ 	ia64*)
+-	  archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ 	  ;;
+ 	*)
+-	  archive_cmds='$CC -shared $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_cmds='$CC -shared -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ 	  ;;
+ 	esac
+       else
+ 	case $host_cpu in
+ 	hppa*64*)
+-	  archive_cmds='$CC -b $wl+h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ 	  ;;
+ 	ia64*)
+-	  archive_cmds='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags'
+ 	  ;;
+ 	*)
+ 
+ 	  # Older versions of the 11.00 compiler do not understand -b yet
+ 	  # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does)
+-	  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5
+-printf %s "checking if $CC understands -b... " >&6; }
+-if test ${lt_cv_prog_compiler__b+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++	  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5
++$as_echo_n "checking if $CC understands -b... " >&6; }
++if ${lt_cv_prog_compiler__b+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_prog_compiler__b=no
+-   save_LDFLAGS=$LDFLAGS
++   save_LDFLAGS="$LDFLAGS"
+    LDFLAGS="$LDFLAGS -b"
+    echo "$lt_simple_link_test_code" > conftest.$ac_ext
+    if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+@@ -12714,14 +10623,14 @@ else $as_nop
+      fi
+    fi
+    $RM -r conftest*
+-   LDFLAGS=$save_LDFLAGS
++   LDFLAGS="$save_LDFLAGS"
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5
+-printf "%s\n" "$lt_cv_prog_compiler__b" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5
++$as_echo "$lt_cv_prog_compiler__b" >&6; }
+ 
+-if test yes = "$lt_cv_prog_compiler__b"; then
+-    archive_cmds='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
++if test x"$lt_cv_prog_compiler__b" = xyes; then
++    archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'
+ else
+     archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'
+ fi
+@@ -12729,8 +10638,8 @@ fi
+ 	  ;;
+ 	esac
+       fi
+-      if test no = "$with_gnu_ld"; then
+-	hardcode_libdir_flag_spec='$wl+b $wl$libdir'
++      if test "$with_gnu_ld" = no; then
++	hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir'
+ 	hardcode_libdir_separator=:
+ 
+ 	case $host_cpu in
+@@ -12741,7 +10650,7 @@ fi
+ 	*)
+ 	  hardcode_direct=yes
+ 	  hardcode_direct_absolute=yes
+-	  export_dynamic_flag_spec='$wl-E'
++	  export_dynamic_flag_spec='${wl}-E'
+ 
+ 	  # hardcode_minus_L: Not really in the search PATH,
+ 	  # but as the default location of the library.
+@@ -12752,60 +10661,35 @@ fi
+       ;;
+ 
+     irix5* | irix6* | nonstopux*)
+-      if test yes = "$GCC"; then
+-	archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++      if test "$GCC" = yes; then
++	archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ 	# Try to use the -exported_symbol ld option, if it does not
+ 	# work, assume that -exports_file does not work either and
+ 	# implicitly export all symbols.
+-	# This should be the same for all languages, so no per-tag cache variable.
+-	{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5
+-printf %s "checking whether the $host_os linker accepts -exported_symbol... " >&6; }
+-if test ${lt_cv_irix_exported_symbol+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  save_LDFLAGS=$LDFLAGS
+-	   LDFLAGS="$LDFLAGS -shared $wl-exported_symbol ${wl}foo $wl-update_registry $wl/dev/null"
+-	   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++        save_LDFLAGS="$LDFLAGS"
++        LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null"
++        cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+-int foo (void) { return 0; }
++int foo(void) {}
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
+-  lt_cv_irix_exported_symbol=yes
+-else $as_nop
+-  lt_cv_irix_exported_symbol=no
++if ac_fn_c_try_link "$LINENO"; then :
++  archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib'
++
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+-           LDFLAGS=$save_LDFLAGS
+-fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5
+-printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; }
+-	if test yes = "$lt_cv_irix_exported_symbol"; then
+-          archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations $wl-exports_file $wl$export_symbols -o $lib'
+-	fi
++        LDFLAGS="$save_LDFLAGS"
+       else
+-	archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+-	archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -exports_file $export_symbols -o $lib'
++	archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
++	archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib'
+       fi
+       archive_cmds_need_lc='no'
+-      hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
++      hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+       hardcode_libdir_separator=:
+       inherit_rpath=yes
+       link_all_deplibs=yes
+       ;;
+ 
+-    linux*)
+-      case $cc_basename in
+-      tcc*)
+-	# Fabrice Bellard et al's Tiny C Compiler
+-	ld_shlibs=yes
+-	archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+-	;;
+-      esac
+-      ;;
+-
+     netbsd*)
+       if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+ 	archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'  # a.out
+@@ -12820,7 +10704,7 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; }
+     newsos6)
+       archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags'
+       hardcode_direct=yes
+-      hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
++      hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+       hardcode_libdir_separator=:
+       hardcode_shlibpath_var=no
+       ;;
+@@ -12828,19 +10712,27 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; }
+     *nto* | *qnx*)
+       ;;
+ 
+-    openbsd* | bitrig*)
++    openbsd*)
+       if test -f /usr/libexec/ld.so; then
+ 	hardcode_direct=yes
+ 	hardcode_shlibpath_var=no
+ 	hardcode_direct_absolute=yes
+-	if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
++	if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
+ 	  archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+-	  archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags $wl-retain-symbols-file,$export_symbols'
+-	  hardcode_libdir_flag_spec='$wl-rpath,$libdir'
+-	  export_dynamic_flag_spec='$wl-E'
++	  archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols'
++	  hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
++	  export_dynamic_flag_spec='${wl}-E'
+ 	else
+-	  archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
+-	  hardcode_libdir_flag_spec='$wl-rpath,$libdir'
++	  case $host_os in
++	   openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*)
++	     archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags'
++	     hardcode_libdir_flag_spec='-R$libdir'
++	     ;;
++	   *)
++	     archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags'
++	     hardcode_libdir_flag_spec='${wl}-rpath,$libdir'
++	     ;;
++	  esac
+ 	fi
+       else
+ 	ld_shlibs=no
+@@ -12851,54 +10743,33 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; }
+       hardcode_libdir_flag_spec='-L$libdir'
+       hardcode_minus_L=yes
+       allow_undefined_flag=unsupported
+-      shrext_cmds=.dll
+-      archive_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	$ECHO EXPORTS >> $output_objdir/$libname.def~
+-	emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+-	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	emximp -o $lib $output_objdir/$libname.def'
+-      archive_expsym_cmds='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	$ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	$ECHO EXPORTS >> $output_objdir/$libname.def~
+-	prefix_cmds="$SED"~
+-	if test EXPORTS = "`$SED 1q $export_symbols`"; then
+-	  prefix_cmds="$prefix_cmds -e 1d";
+-	fi~
+-	prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+-	cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+-	$CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	emximp -o $lib $output_objdir/$libname.def'
+-      old_archive_From_new_cmds='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+-      enable_shared_with_static_runtimes=yes
+-      file_list_spec='@'
++      archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def'
++      old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def'
+       ;;
+ 
+     osf3*)
+-      if test yes = "$GCC"; then
+-	allow_undefined_flag=' $wl-expect_unresolved $wl\*'
+-	archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++      if test "$GCC" = yes; then
++	allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
++	archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+       else
+ 	allow_undefined_flag=' -expect_unresolved \*'
+-	archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+       fi
+       archive_cmds_need_lc='no'
+-      hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
++      hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+       hardcode_libdir_separator=:
+       ;;
+ 
+     osf4* | osf5*)	# as osf3* with the addition of -msym flag
+-      if test yes = "$GCC"; then
+-	allow_undefined_flag=' $wl-expect_unresolved $wl\*'
+-	archive_cmds='$CC -shared$allow_undefined_flag $pic_flag $libobjs $deplibs $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
+-	hardcode_libdir_flag_spec='$wl-rpath $wl$libdir'
++      if test "$GCC" = yes; then
++	allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*'
++	archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
++	hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir'
+       else
+ 	allow_undefined_flag=' -expect_unresolved \*'
+-	archive_cmds='$CC -shared$allow_undefined_flag $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ 	archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~
+-          $CC -shared$allow_undefined_flag $wl-input $wl$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~$RM $lib.exp'
++	$CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp'
+ 
+ 	# Both c and cxx compiler support -rpath directly
+ 	hardcode_libdir_flag_spec='-rpath $libdir'
+@@ -12909,24 +10780,24 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; }
+ 
+     solaris*)
+       no_undefined_flag=' -z defs'
+-      if test yes = "$GCC"; then
+-	wlarc='$wl'
+-	archive_cmds='$CC -shared $pic_flag $wl-z ${wl}text $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags'
++      if test "$GCC" = yes; then
++	wlarc='${wl}'
++	archive_cmds='$CC -shared ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags'
+ 	archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-          $CC -shared $pic_flag $wl-z ${wl}text $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
++	  $CC -shared ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+       else
+ 	case `$CC -V 2>&1` in
+ 	*"Compilers 5.0"*)
+ 	  wlarc=''
+-	  archive_cmds='$LD -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $linker_flags'
++	  archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags'
+ 	  archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-            $LD -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
++	  $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp'
+ 	  ;;
+ 	*)
+-	  wlarc='$wl'
+-	  archive_cmds='$CC -G$allow_undefined_flag -h $soname -o $lib $libobjs $deplibs $compiler_flags'
++	  wlarc='${wl}'
++	  archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags'
+ 	  archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-            $CC -G$allow_undefined_flag -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
++	  $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp'
+ 	  ;;
+ 	esac
+       fi
+@@ -12936,11 +10807,11 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; }
+       solaris2.[0-5] | solaris2.[0-5].*) ;;
+       *)
+ 	# The compiler driver will combine and reorder linker options,
+-	# but understands '-z linker_flag'.  GCC discards it without '$wl',
++	# but understands `-z linker_flag'.  GCC discards it without `$wl',
+ 	# but is careful enough not to reorder.
+ 	# Supported since Solaris 2.6 (maybe 2.5.1?)
+-	if test yes = "$GCC"; then
+-	  whole_archive_flag_spec='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
++	if test "$GCC" = yes; then
++	  whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+ 	else
+ 	  whole_archive_flag_spec='-z allextract$convenience -z defaultextract'
+ 	fi
+@@ -12950,10 +10821,10 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; }
+       ;;
+ 
+     sunos4*)
+-      if test sequent = "$host_vendor"; then
++      if test "x$host_vendor" = xsequent; then
+ 	# Use $CC to link under sequent, because it throws in some extra .o
+ 	# files that make .init and .fini sections work.
+-	archive_cmds='$CC -G $wl-h $soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags'
+       else
+ 	archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags'
+       fi
+@@ -13002,43 +10873,43 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; }
+       ;;
+ 
+     sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
+-      no_undefined_flag='$wl-z,text'
++      no_undefined_flag='${wl}-z,text'
+       archive_cmds_need_lc=no
+       hardcode_shlibpath_var=no
+       runpath_var='LD_RUN_PATH'
+ 
+-      if test yes = "$GCC"; then
+-	archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++      if test "$GCC" = yes; then
++	archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+       else
+-	archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+       fi
+       ;;
+ 
+     sysv5* | sco3.2v5* | sco5v6*)
+-      # Note: We CANNOT use -z defs as we might desire, because we do not
++      # Note: We can NOT use -z defs as we might desire, because we do not
+       # link with -lc, and that would cause any symbols used from libc to
+       # always be unresolved, which means just about no library would
+       # ever link correctly.  If we're not using GNU ld we use -z text
+       # though, which does catch some bad symbols but isn't as heavy-handed
+       # as -z defs.
+-      no_undefined_flag='$wl-z,text'
+-      allow_undefined_flag='$wl-z,nodefs'
++      no_undefined_flag='${wl}-z,text'
++      allow_undefined_flag='${wl}-z,nodefs'
+       archive_cmds_need_lc=no
+       hardcode_shlibpath_var=no
+-      hardcode_libdir_flag_spec='$wl-R,$libdir'
++      hardcode_libdir_flag_spec='${wl}-R,$libdir'
+       hardcode_libdir_separator=':'
+       link_all_deplibs=yes
+-      export_dynamic_flag_spec='$wl-Bexport'
++      export_dynamic_flag_spec='${wl}-Bexport'
+       runpath_var='LD_RUN_PATH'
+ 
+-      if test yes = "$GCC"; then
+-	archive_cmds='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	archive_expsym_cmds='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++      if test "$GCC" = yes; then
++	archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+       else
+-	archive_cmds='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	archive_expsym_cmds='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+       fi
+       ;;
+ 
+@@ -13053,18 +10924,18 @@ printf "%s\n" "$lt_cv_irix_exported_symbol" >&6; }
+       ;;
+     esac
+ 
+-    if test sni = "$host_vendor"; then
++    if test x$host_vendor = xsni; then
+       case $host in
+       sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*)
+-	export_dynamic_flag_spec='$wl-Blargedynsym'
++	export_dynamic_flag_spec='${wl}-Blargedynsym'
+ 	;;
+       esac
+     fi
+   fi
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5
+-printf "%s\n" "$ld_shlibs" >&6; }
+-test no = "$ld_shlibs" && can_build_shared=no
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5
++$as_echo "$ld_shlibs" >&6; }
++test "$ld_shlibs" = no && can_build_shared=no
+ 
+ with_gnu_ld=$with_gnu_ld
+ 
+@@ -13090,7 +10961,7 @@ x|xyes)
+   # Assume -lc should be added
+   archive_cmds_need_lc=yes
+ 
+-  if test yes,yes = "$GCC,$enable_shared"; then
++  if test "$enable_shared" = yes && test "$GCC" = yes; then
+     case $archive_cmds in
+     *'~'*)
+       # FIXME: we may have to deal with multi-command sequences.
+@@ -13099,19 +10970,18 @@ x|xyes)
+       # Test whether the compiler implicitly links with -lc since on some
+       # systems, -lgcc has to come before -lc. If gcc already passes -lc
+       # to ld, don't add -lc before -lgcc.
+-      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
+-printf %s "checking whether -lc should be explicitly linked in... " >&6; }
+-if test ${lt_cv_archive_cmds_need_lc+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++      { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
++$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; }
++if ${lt_cv_archive_cmds_need_lc+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   $RM conftest*
+ 	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+ 
+ 	if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+   (eval $ac_compile) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; } 2>conftest.err; then
+ 	  soname=conftest
+ 	  lib=conftest
+@@ -13129,7 +10999,7 @@ else $as_nop
+ 	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
+   (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }
+ 	  then
+ 	    lt_cv_archive_cmds_need_lc=no
+@@ -13143,8 +11013,8 @@ else $as_nop
+ 	$RM conftest*
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5
+-printf "%s\n" "$lt_cv_archive_cmds_need_lc" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5
++$as_echo "$lt_cv_archive_cmds_need_lc" >&6; }
+       archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc
+       ;;
+     esac
+@@ -13303,17 +11173,22 @@ esac
+ 
+ 
+ 
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
+-printf %s "checking dynamic linker characteristics... " >&6; }
+ 
+-if test yes = "$GCC"; then
++
++
++
++
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
++$as_echo_n "checking dynamic linker characteristics... " >&6; }
++
++if test "$GCC" = yes; then
+   case $host_os in
+-    darwin*) lt_awk_arg='/^libraries:/,/LR/' ;;
+-    *) lt_awk_arg='/^libraries:/' ;;
++    darwin*) lt_awk_arg="/^libraries:/,/LR/" ;;
++    *) lt_awk_arg="/^libraries:/" ;;
+   esac
+   case $host_os in
+-    mingw* | cegcc*) lt_sed_strip_eq='s|=\([A-Za-z]:\)|\1|g' ;;
+-    *) lt_sed_strip_eq='s|=/|/|g' ;;
++    mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;;
++    *) lt_sed_strip_eq="s,=/,/,g" ;;
+   esac
+   lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq`
+   case $lt_search_path_spec in
+@@ -13329,35 +11204,28 @@ if test yes = "$GCC"; then
+     ;;
+   esac
+   # Ok, now we have the path, separated by spaces, we can step through it
+-  # and add multilib dir if necessary...
++  # and add multilib dir if necessary.
+   lt_tmp_lt_search_path_spec=
+-  lt_multi_os_dir=/`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
+-  # ...but if some path component already ends with the multilib dir we assume
+-  # that all is fine and trust -print-search-dirs as is (GCC 4.2? or newer).
+-  case "$lt_multi_os_dir; $lt_search_path_spec " in
+-  "/; "* | "/.; "* | "/./; "* | *"$lt_multi_os_dir "* | *"$lt_multi_os_dir/ "*)
+-    lt_multi_os_dir=
+-    ;;
+-  esac
++  lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
+   for lt_sys_path in $lt_search_path_spec; do
+-    if test -d "$lt_sys_path$lt_multi_os_dir"; then
+-      lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path$lt_multi_os_dir"
+-    elif test -n "$lt_multi_os_dir"; then
++    if test -d "$lt_sys_path/$lt_multi_os_dir"; then
++      lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir"
++    else
+       test -d "$lt_sys_path" && \
+ 	lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path"
+     fi
+   done
+   lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk '
+-BEGIN {RS = " "; FS = "/|\n";} {
+-  lt_foo = "";
+-  lt_count = 0;
++BEGIN {RS=" "; FS="/|\n";} {
++  lt_foo="";
++  lt_count=0;
+   for (lt_i = NF; lt_i > 0; lt_i--) {
+     if ($lt_i != "" && $lt_i != ".") {
+       if ($lt_i == "..") {
+         lt_count++;
+       } else {
+         if (lt_count == 0) {
+-          lt_foo = "/" $lt_i lt_foo;
++          lt_foo="/" $lt_i lt_foo;
+         } else {
+           lt_count--;
+         }
+@@ -13371,7 +11239,7 @@ BEGIN {RS = " "; FS = "/|\n";} {
+   # for these hosts.
+   case $host_os in
+     mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\
+-      $SED 's|/\([A-Za-z]:\)|\1|g'` ;;
++      $SED 's,/\([A-Za-z]:\),\1,g'` ;;
+   esac
+   sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP`
+ else
+@@ -13380,7 +11248,7 @@ fi
+ library_names_spec=
+ libname_spec='lib$name'
+ soname_spec=
+-shrext_cmds=.so
++shrext_cmds=".so"
+ postinstall_cmds=
+ postuninstall_cmds=
+ finish_cmds=
+@@ -13397,108 +11265,56 @@ hardcode_into_libs=no
+ # flags to be left without arguments
+ need_version=unknown
+ 
+-
+-
+ case $host_os in
+ aix3*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname.a'
++  version_type=linux
++  library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
+   shlibpath_var=LIBPATH
+ 
+   # AIX 3 has no versioning support, so we append a major version to the name.
+-  soname_spec='$libname$release$shared_ext$major'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   ;;
+ 
+ aix[4-9]*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+   hardcode_into_libs=yes
+-  if test ia64 = "$host_cpu"; then
++  if test "$host_cpu" = ia64; then
+     # AIX 5 supports IA64
+-    library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext'
++    library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}'
+     shlibpath_var=LD_LIBRARY_PATH
+   else
+     # With GCC up to 2.95.x, collect2 would create an import file
+     # for dependence libraries.  The import file would start with
+-    # the line '#! .'.  This would cause the generated library to
+-    # depend on '.', always an invalid library.  This was fixed in
++    # the line `#! .'.  This would cause the generated library to
++    # depend on `.', always an invalid library.  This was fixed in
+     # development snapshots of GCC prior to 3.0.
+     case $host_os in
+       aix4 | aix4.[01] | aix4.[01].*)
+       if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+ 	   echo ' yes '
+-	   echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then
++	   echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then
+ 	:
+       else
+ 	can_build_shared=no
+       fi
+       ;;
+     esac
+-    # Using Import Files as archive members, it is possible to support
+-    # filename-based versioning of shared library archives on AIX. While
+-    # this would work for both with and without runtime linking, it will
+-    # prevent static linking of such archives. So we do filename-based
+-    # shared library versioning with .so extension only, which is used
+-    # when both runtime linking and shared linking is enabled.
+-    # Unfortunately, runtime linking may impact performance, so we do
+-    # not want this to be the default eventually. Also, we use the
+-    # versioned .so libs for executables only if there is the -brtl
+-    # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only.
+-    # To allow for filename-based versioning support, we need to create
+-    # libNAME.so.V as an archive file, containing:
+-    # *) an Import File, referring to the versioned filename of the
+-    #    archive as well as the shared archive member, telling the
+-    #    bitwidth (32 or 64) of that shared object, and providing the
+-    #    list of exported symbols of that shared object, eventually
+-    #    decorated with the 'weak' keyword
+-    # *) the shared object with the F_LOADONLY flag set, to really avoid
+-    #    it being seen by the linker.
+-    # At run time we better use the real file rather than another symlink,
+-    # but for link time we create the symlink libNAME.so -> libNAME.so.V
+-
+-    case $with_aix_soname,$aix_use_runtimelinking in
+-    # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct
++    # AIX (on Power*) has no versioning support, so currently we can not hardcode correct
+     # soname into executable. Probably we can add versioning support to
+     # collect2, so additional links can be useful in future.
+-    aix,yes) # traditional libtool
+-      dynamic_linker='AIX unversionable lib.so'
++    if test "$aix_use_runtimelinking" = yes; then
+       # If using run time linking (on AIX 4.2 or later) use lib.so
+       # instead of lib.a to let people know that these are not
+       # typical AIX shared libraries.
+-      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-      ;;
+-    aix,no) # traditional AIX only
+-      dynamic_linker='AIX lib.a(lib.so.V)'
++      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++    else
+       # We preserve .a as extension for shared libraries through AIX4.2
+       # and later when we are not doing run time linking.
+-      library_names_spec='$libname$release.a $libname.a'
+-      soname_spec='$libname$release$shared_ext$major'
+-      ;;
+-    svr4,*) # full svr4 only
+-      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)"
+-      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+-      # We do not specify a path in Import Files, so LIBPATH fires.
+-      shlibpath_overrides_runpath=yes
+-      ;;
+-    *,yes) # both, prefer svr4
+-      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)"
+-      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+-      # unpreferred sharedlib libNAME.a needs extra handling
+-      postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"'
+-      postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"'
+-      # We do not specify a path in Import Files, so LIBPATH fires.
+-      shlibpath_overrides_runpath=yes
+-      ;;
+-    *,no) # both, prefer aix
+-      dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)"
+-      library_names_spec='$libname$release.a $libname.a'
+-      soname_spec='$libname$release$shared_ext$major'
+-      # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling
+-      postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)'
+-      postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"'
+-      ;;
+-    esac
++      library_names_spec='${libname}${release}.a $libname.a'
++      soname_spec='${libname}${release}${shared_ext}$major'
++    fi
+     shlibpath_var=LIBPATH
+   fi
+   ;;
+@@ -13508,27 +11324,27 @@ amigaos*)
+   powerpc)
+     # Since July 2007 AmigaOS4 officially supports .so libraries.
+     # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+     ;;
+   m68k)
+     library_names_spec='$libname.ixlibrary $libname.a'
+     # Create ${libname}_ixlibrary.a entries in /sys/libs.
+-    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
++    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+     ;;
+   esac
+   ;;
+ 
+ beos*)
+-  library_names_spec='$libname$shared_ext'
++  library_names_spec='${libname}${shared_ext}'
+   dynamic_linker="$host_os ld.so"
+   shlibpath_var=LIBRARY_PATH
+   ;;
+ 
+ bsdi[45]*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+   shlibpath_var=LD_LIBRARY_PATH
+   sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+@@ -13540,17 +11356,16 @@ bsdi[45]*)
+ 
+ cygwin* | mingw* | pw32* | cegcc*)
+   version_type=windows
+-  shrext_cmds=.dll
++  shrext_cmds=".dll"
+   need_version=no
+   need_lib_prefix=no
+ 
+-  case $GCC,$cc_basename in
+-  yes,*)
+-    # gcc
++  case $GCC,$host_os in
++  yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*)
+     library_names_spec='$libname.dll.a'
+     # DLL is installed to $(libdir)/../bin by postinstall_cmds
+-    postinstall_cmds='base_file=`basename \$file`~
+-      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
++    postinstall_cmds='base_file=`basename \${file}`~
++      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+       dldir=$destdir/`dirname \$dlpath`~
+       test -d \$dldir || mkdir -p \$dldir~
+       $install_prog $dir/$dlname \$dldir/$dlname~
+@@ -13566,84 +11381,26 @@ cygwin* | mingw* | pw32* | cegcc*)
+     case $host_os in
+     cygwin*)
+       # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+-      soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++      soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ 
+       sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"
+       ;;
+     mingw* | cegcc*)
+       # MinGW DLLs use traditional 'lib' prefix
+-      soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++      soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+       ;;
+     pw32*)
+       # pw32 DLLs use 'pw' prefix rather than 'lib'
+-      library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+-      ;;
+-    esac
+-    dynamic_linker='Win32 ld.exe'
+-    ;;
+-
+-  *,cl* | *,icl*)
+-    # Native MSVC or ICC
+-    libname_spec='$name'
+-    soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+-    library_names_spec='$libname.dll.lib'
+-
+-    case $build_os in
+-    mingw*)
+-      sys_lib_search_path_spec=
+-      lt_save_ifs=$IFS
+-      IFS=';'
+-      for lt_path in $LIB
+-      do
+-        IFS=$lt_save_ifs
+-        # Let DOS variable expansion print the short 8.3 style file name.
+-        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+-        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+-      done
+-      IFS=$lt_save_ifs
+-      # Convert to MSYS style.
+-      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
+-      ;;
+-    cygwin*)
+-      # Convert to unix form, then to dos form, then back to unix form
+-      # but this time dos style (no spaces!) so that the unix form looks
+-      # like /cygdrive/c/PROGRA~1:/cygdr...
+-      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+-      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+-      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+-      ;;
+-    *)
+-      sys_lib_search_path_spec=$LIB
+-      if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
+-        # It is most probably a Windows format PATH.
+-        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+-      else
+-        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+-      fi
+-      # FIXME: find the short name or the path components, as spaces are
+-      # common. (e.g. "Program Files" -> "PROGRA~1")
++      library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+       ;;
+     esac
+-
+-    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+-    postinstall_cmds='base_file=`basename \$file`~
+-      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
+-      dldir=$destdir/`dirname \$dlpath`~
+-      test -d \$dldir || mkdir -p \$dldir~
+-      $install_prog $dir/$dlname \$dldir/$dlname'
+-    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+-      dlpath=$dir/\$dldll~
+-       $RM \$dlpath'
+-    shlibpath_overrides_runpath=yes
+-    dynamic_linker='Win32 link.exe'
+     ;;
+ 
+   *)
+-    # Assume MSVC and ICC wrapper
+-    library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib'
+-    dynamic_linker='Win32 ld.exe'
++    library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib'
+     ;;
+   esac
++  dynamic_linker='Win32 ld.exe'
+   # FIXME: first we should search . and the directory the executable is in
+   shlibpath_var=PATH
+   ;;
+@@ -13653,8 +11410,8 @@ darwin* | rhapsody*)
+   version_type=darwin
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$major$shared_ext $libname$shared_ext'
+-  soname_spec='$libname$release$major$shared_ext'
++  library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext'
++  soname_spec='${libname}${release}${major}$shared_ext'
+   shlibpath_overrides_runpath=yes
+   shlibpath_var=DYLD_LIBRARY_PATH
+   shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+@@ -13664,15 +11421,15 @@ darwin* | rhapsody*)
+   ;;
+ 
+ dgux*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   ;;
+ 
+-freebsd* | dragonfly* | midnightbsd*)
++freebsd* | dragonfly*)
+   # DragonFly does not have aout.  When/if they implement a new
+   # versioning mechanism, adjust this.
+   if test -x /usr/bin/objformat; then
+@@ -13686,13 +11443,12 @@ freebsd* | dragonfly* | midnightbsd*)
+   version_type=freebsd-$objformat
+   case $version_type in
+     freebsd-elf*)
+-      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-      soname_spec='$libname$release$shared_ext$major'
++      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+       need_version=no
+       need_lib_prefix=no
+       ;;
+     freebsd-*)
+-      library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++      library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix'
+       need_version=yes
+       ;;
+   esac
+@@ -13718,15 +11474,15 @@ freebsd* | dragonfly* | midnightbsd*)
+   ;;
+ 
+ haiku*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+   dynamic_linker="$host_os runtime_loader"
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LIBRARY_PATH
+-  shlibpath_overrides_runpath=no
+-  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
++  shlibpath_overrides_runpath=yes
++  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib'
+   hardcode_into_libs=yes
+   ;;
+ 
+@@ -13743,15 +11499,14 @@ hpux9* | hpux10* | hpux11*)
+     dynamic_linker="$host_os dld.so"
+     shlibpath_var=LD_LIBRARY_PATH
+     shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
+-    if test 32 = "$HPUX_IA64_MODE"; then
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++    soname_spec='${libname}${release}${shared_ext}$major'
++    if test "X$HPUX_IA64_MODE" = X32; then
+       sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+-      sys_lib_dlsearch_path_spec=/usr/lib/hpux32
+     else
+       sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+-      sys_lib_dlsearch_path_spec=/usr/lib/hpux64
+     fi
++    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+     ;;
+   hppa*64*)
+     shrext_cmds='.sl'
+@@ -13759,8 +11514,8 @@ hpux9* | hpux10* | hpux11*)
+     dynamic_linker="$host_os dld.sl"
+     shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+     shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++    soname_spec='${libname}${release}${shared_ext}$major'
+     sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+     sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+     ;;
+@@ -13769,8 +11524,8 @@ hpux9* | hpux10* | hpux11*)
+     dynamic_linker="$host_os dld.sl"
+     shlibpath_var=SHLIB_PATH
+     shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++    soname_spec='${libname}${release}${shared_ext}$major'
+     ;;
+   esac
+   # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+@@ -13780,11 +11535,11 @@ hpux9* | hpux10* | hpux11*)
+   ;;
+ 
+ interix[3-9]*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=no
+@@ -13795,16 +11550,16 @@ irix5* | irix6* | nonstopux*)
+   case $host_os in
+     nonstopux*) version_type=nonstopux ;;
+     *)
+-	if test yes = "$lt_cv_prog_gnu_ld"; then
+-		version_type=linux # correct to gnu/linux during the next big refactor
++	if test "$lt_cv_prog_gnu_ld" = yes; then
++		version_type=linux
+ 	else
+ 		version_type=irix
+ 	fi ;;
+   esac
+   need_lib_prefix=no
+   need_version=no
+-  soname_spec='$libname$release$shared_ext$major'
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext'
++  soname_spec='${libname}${release}${shared_ext}$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}'
+   case $host_os in
+   irix5* | nonstopux*)
+     libsuff= shlibsuff=
+@@ -13823,8 +11578,8 @@ irix5* | irix6* | nonstopux*)
+   esac
+   shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+   shlibpath_overrides_runpath=no
+-  sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff"
+-  sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff"
++  sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
++  sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+   hardcode_into_libs=yes
+   ;;
+ 
+@@ -13833,42 +11588,26 @@ linux*oldld* | linux*aout* | linux*coff*)
+   dynamic_linker=no
+   ;;
+ 
+-linux*android*)
+-  version_type=none # Android doesn't support versioned libraries.
+-  need_lib_prefix=no
+-  need_version=no
+-  library_names_spec='$libname$release$shared_ext'
+-  soname_spec='$libname$release$shared_ext'
+-  finish_cmds=
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=yes
+-
+-  # This implies no fast_install, which is unacceptable.
+-  # Some rework will be needed to allow for fast_install
+-  # before this can be enabled.
+-  hardcode_into_libs=yes
+-
+-  dynamic_linker='Android linker'
+-  # Don't embed -rpath directories since the linker doesn't support them.
+-  hardcode_libdir_flag_spec='-L$libdir'
+-  ;;
++# This must be Linux ELF.
+ 
+-# This must be glibc/ELF.
+-linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++# uclinux* changes (here and below) have been submitted to the libtool
++# project, but have not yet been accepted: they are GCC-local changes
++# for the time being.  (See
++# https://lists.gnu.org/archive/html/libtool-patches/2018-05/msg00000.html)
++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu* | uclinuxfdpiceabi)
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=no
+ 
+   # Some binutils ld are patched to set DT_RUNPATH
+-  if test ${lt_cv_shlibpath_overrides_runpath+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  if ${lt_cv_shlibpath_overrides_runpath+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_shlibpath_overrides_runpath=no
+     save_LDFLAGS=$LDFLAGS
+     save_libdir=$libdir
+@@ -13878,21 +11617,19 @@ else $as_nop
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
+-  if  ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null
+-then :
++if ac_fn_c_try_link "$LINENO"; then :
++  if  ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then :
+   lt_cv_shlibpath_overrides_runpath=yes
+ fi
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+     LDFLAGS=$save_LDFLAGS
+     libdir=$save_libdir
+@@ -13906,18 +11643,10 @@ fi
+   # before this can be enabled.
+   hardcode_into_libs=yes
+ 
+-  # Add ABI-specific directories to the system library path.
+-  sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib"
+-
+-  # Ideally, we could use ldconfig to report *all* directores which are
+-  # searched for libraries, however this is still not possible.  Aside from not
+-  # being certain /sbin/ldconfig is available, command
+-  # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64,
+-  # even though it is searched at run-time.  Try to do the best guess by
+-  # appending ld.so.conf contents (and includes) to the search path.
++  # Append ld.so.conf contents to the search path
+   if test -f /etc/ld.so.conf; then
+     lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+-    sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra"
++    sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+   fi
+ 
+   # We used to test for /lib/ld.so.1 and disable shared libraries on
+@@ -13934,12 +11663,12 @@ netbsd*)
+   need_lib_prefix=no
+   need_version=no
+   if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+     finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+     dynamic_linker='NetBSD (a.out) ld.so'
+   else
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
++    soname_spec='${libname}${release}${shared_ext}$major'
+     dynamic_linker='NetBSD ld.elf_so'
+   fi
+   shlibpath_var=LD_LIBRARY_PATH
+@@ -13948,8 +11677,8 @@ netbsd*)
+   ;;
+ 
+ newsos6)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  version_type=linux
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=yes
+   ;;
+@@ -13958,68 +11687,58 @@ newsos6)
+   version_type=qnx
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=no
+   hardcode_into_libs=yes
+   dynamic_linker='ldqnx.so'
+   ;;
+ 
+-openbsd* | bitrig*)
++openbsd*)
+   version_type=sunos
+-  sys_lib_dlsearch_path_spec=/usr/lib
++  sys_lib_dlsearch_path_spec="/usr/lib"
+   need_lib_prefix=no
+-  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+-    need_version=no
++  # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
++  case $host_os in
++    openbsd3.3 | openbsd3.3.*)	need_version=yes ;;
++    *)				need_version=no  ;;
++  esac
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
++  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
++  shlibpath_var=LD_LIBRARY_PATH
++  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
++    case $host_os in
++      openbsd2.[89] | openbsd2.[89].*)
++	shlibpath_overrides_runpath=no
++	;;
++      *)
++	shlibpath_overrides_runpath=yes
++	;;
++      esac
+   else
+-    need_version=yes
++    shlibpath_overrides_runpath=yes
+   fi
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
+-  finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=yes
+   ;;
+ 
+ os2*)
+   libname_spec='$name'
+-  version_type=windows
+-  shrext_cmds=.dll
+-  need_version=no
++  shrext_cmds=".dll"
+   need_lib_prefix=no
+-  # OS/2 can only load a DLL with a base name of 8 characters or less.
+-  soname_spec='`test -n "$os2dllname" && libname="$os2dllname";
+-    v=$($ECHO $release$versuffix | tr -d .-);
+-    n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _);
+-    $ECHO $n$v`$shared_ext'
+-  library_names_spec='${libname}_dll.$libext'
++  library_names_spec='$libname${shared_ext} $libname.a'
+   dynamic_linker='OS/2 ld.exe'
+-  shlibpath_var=BEGINLIBPATH
+-  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+-  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+-  postinstall_cmds='base_file=`basename \$file`~
+-    dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~
+-    dldir=$destdir/`dirname \$dlpath`~
+-    test -d \$dldir || mkdir -p \$dldir~
+-    $install_prog $dir/$dlname \$dldir/$dlname~
+-    chmod a+x \$dldir/$dlname~
+-    if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+-      eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+-    fi'
+-  postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~
+-    dlpath=$dir/\$dldll~
+-    $RM \$dlpath'
++  shlibpath_var=LIBPATH
+   ;;
+ 
+ osf3* | osf4* | osf5*)
+   version_type=osf
+   need_lib_prefix=no
+   need_version=no
+-  soname_spec='$libname$release$shared_ext$major'
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='${libname}${release}${shared_ext}$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+   shlibpath_var=LD_LIBRARY_PATH
+   sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+-  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
++  sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+   ;;
+ 
+ rdos*)
+@@ -14027,11 +11746,11 @@ rdos*)
+   ;;
+ 
+ solaris*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=yes
+   hardcode_into_libs=yes
+@@ -14041,20 +11760,20 @@ solaris*)
+ 
+ sunos4*)
+   version_type=sunos
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+   finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=yes
+-  if test yes = "$with_gnu_ld"; then
++  if test "$with_gnu_ld" = yes; then
+     need_lib_prefix=no
+   fi
+   need_version=yes
+   ;;
+ 
+ sysv4 | sysv4.3*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  version_type=linux
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   case $host_vendor in
+     sni)
+@@ -14075,24 +11794,24 @@ sysv4 | sysv4.3*)
+   ;;
+ 
+ sysv4*MP*)
+-  if test -d /usr/nec; then
+-    version_type=linux # correct to gnu/linux during the next big refactor
+-    library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext'
+-    soname_spec='$libname$shared_ext.$major'
++  if test -d /usr/nec ;then
++    version_type=linux
++    library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
++    soname_spec='$libname${shared_ext}.$major'
+     shlibpath_var=LD_LIBRARY_PATH
+   fi
+   ;;
+ 
+ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+-  version_type=sco
++  version_type=freebsd-elf
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=yes
+   hardcode_into_libs=yes
+-  if test yes = "$with_gnu_ld"; then
++  if test "$with_gnu_ld" = yes; then
+     sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+   else
+     sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+@@ -14107,19 +11826,19 @@ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ 
+ tpf*)
+   # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=no
+   hardcode_into_libs=yes
+   ;;
+ 
+ uts4*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  version_type=linux
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   ;;
+ 
+@@ -14127,33 +11846,22 @@ uts4*)
+   dynamic_linker=no
+   ;;
+ esac
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
+-printf "%s\n" "$dynamic_linker" >&6; }
+-test no = "$dynamic_linker" && can_build_shared=no
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
++$as_echo "$dynamic_linker" >&6; }
++test "$dynamic_linker" = no && can_build_shared=no
+ 
+ variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+-if test yes = "$GCC"; then
++if test "$GCC" = yes; then
+   variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+ fi
+ 
+-if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then
+-  sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec
++if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then
++  sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec"
+ fi
+-
+-if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then
+-  sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec
++if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then
++  sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec"
+ fi
+ 
+-# remember unaugmented sys_lib_dlsearch_path content for libtool script decls...
+-configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec
+-
+-# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code
+-func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH"
+-
+-# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool
+-configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH
+-
+-
+ 
+ 
+ 
+@@ -14245,24 +11953,20 @@ configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH
+ 
+ 
+ 
+-
+-
+-
+-
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
+-printf %s "checking how to hardcode library paths into programs... " >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
++$as_echo_n "checking how to hardcode library paths into programs... " >&6; }
+ hardcode_action=
+ if test -n "$hardcode_libdir_flag_spec" ||
+    test -n "$runpath_var" ||
+-   test yes = "$hardcode_automatic"; then
++   test "X$hardcode_automatic" = "Xyes" ; then
+ 
+   # We can hardcode non-existent directories.
+-  if test no != "$hardcode_direct" &&
++  if test "$hardcode_direct" != no &&
+      # If the only mechanism to avoid hardcoding is shlibpath_var, we
+      # have to relink, otherwise we might link with an installed library
+      # when we should be linking with a yet-to-be-installed one
+-     ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, )" &&
+-     test no != "$hardcode_minus_L"; then
++     ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no &&
++     test "$hardcode_minus_L" != no; then
+     # Linking always hardcodes the temporary library directory.
+     hardcode_action=relink
+   else
+@@ -14274,15 +11978,15 @@ else
+   # directories.
+   hardcode_action=unsupported
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5
+-printf "%s\n" "$hardcode_action" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5
++$as_echo "$hardcode_action" >&6; }
+ 
+-if test relink = "$hardcode_action" ||
+-   test yes = "$inherit_rpath"; then
++if test "$hardcode_action" = relink ||
++   test "$inherit_rpath" = yes; then
+   # Fast installation is not supported
+   enable_fast_install=no
+-elif test yes = "$shlibpath_overrides_runpath" ||
+-     test no = "$enable_shared"; then
++elif test "$shlibpath_overrides_runpath" = yes ||
++     test "$enable_shared" = no; then
+   # Fast installation is not necessary
+   enable_fast_install=needless
+ fi
+@@ -14292,7 +11996,7 @@ fi
+ 
+ 
+ 
+-  if test yes != "$enable_dlopen"; then
++  if test "x$enable_dlopen" != xyes; then
+   enable_dlopen=unknown
+   enable_dlopen_self=unknown
+   enable_dlopen_self_static=unknown
+@@ -14302,29 +12006,28 @@ else
+ 
+   case $host_os in
+   beos*)
+-    lt_cv_dlopen=load_add_on
++    lt_cv_dlopen="load_add_on"
+     lt_cv_dlopen_libs=
+     lt_cv_dlopen_self=yes
+     ;;
+ 
+   mingw* | pw32* | cegcc*)
+-    lt_cv_dlopen=LoadLibrary
++    lt_cv_dlopen="LoadLibrary"
+     lt_cv_dlopen_libs=
+     ;;
+ 
+   cygwin*)
+-    lt_cv_dlopen=dlopen
++    lt_cv_dlopen="dlopen"
+     lt_cv_dlopen_libs=
+     ;;
+ 
+   darwin*)
+-    # if libdl is installed we need to link against it
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
+-printf %s "checking for dlopen in -ldl... " >&6; }
+-if test ${ac_cv_lib_dl_dlopen+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  # if libdl is installed we need to link against it
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
++$as_echo_n "checking for dlopen in -ldl... " >&6; }
++if ${ac_cv_lib_dl_dlopen+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_check_lib_save_LIBS=$LIBS
+ LIBS="-ldl  $LIBS"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+@@ -14333,33 +12036,34 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* Override any GCC internal prototype to avoid an error.
+    Use char because int might match the return type of a GCC
+    builtin and then its argument prototype would still apply.  */
++#ifdef __cplusplus
++extern "C"
++#endif
+ char dlopen ();
+ int
+-main (void)
++main ()
+ {
+ return dlopen ();
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
++if ac_fn_c_try_link "$LINENO"; then :
+   ac_cv_lib_dl_dlopen=yes
+-else $as_nop
++else
+   ac_cv_lib_dl_dlopen=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+ LIBS=$ac_check_lib_save_LIBS
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
+-printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; }
+-if test "x$ac_cv_lib_dl_dlopen" = xyes
+-then :
+-  lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
++$as_echo "$ac_cv_lib_dl_dlopen" >&6; }
++if test "x$ac_cv_lib_dl_dlopen" = xyes; then :
++  lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
++else
+ 
+-    lt_cv_dlopen=dyld
++    lt_cv_dlopen="dyld"
+     lt_cv_dlopen_libs=
+     lt_cv_dlopen_self=yes
+ 
+@@ -14367,26 +12071,16 @@ fi
+ 
+     ;;
+ 
+-  tpf*)
+-    # Don't try to run any link tests for TPF.  We know it's impossible
+-    # because TPF is a cross-compiler, and we know how we open DSOs.
+-    lt_cv_dlopen=dlopen
+-    lt_cv_dlopen_libs=
+-    lt_cv_dlopen_self=no
+-    ;;
+-
+   *)
+     ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load"
+-if test "x$ac_cv_func_shl_load" = xyes
+-then :
+-  lt_cv_dlopen=shl_load
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5
+-printf %s "checking for shl_load in -ldld... " >&6; }
+-if test ${ac_cv_lib_dld_shl_load+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++if test "x$ac_cv_func_shl_load" = xyes; then :
++  lt_cv_dlopen="shl_load"
++else
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5
++$as_echo_n "checking for shl_load in -ldld... " >&6; }
++if ${ac_cv_lib_dld_shl_load+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_check_lib_save_LIBS=$LIBS
+ LIBS="-ldld  $LIBS"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+@@ -14395,42 +12089,41 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* Override any GCC internal prototype to avoid an error.
+    Use char because int might match the return type of a GCC
+    builtin and then its argument prototype would still apply.  */
++#ifdef __cplusplus
++extern "C"
++#endif
+ char shl_load ();
+ int
+-main (void)
++main ()
+ {
+ return shl_load ();
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
++if ac_fn_c_try_link "$LINENO"; then :
+   ac_cv_lib_dld_shl_load=yes
+-else $as_nop
++else
+   ac_cv_lib_dld_shl_load=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+ LIBS=$ac_check_lib_save_LIBS
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5
+-printf "%s\n" "$ac_cv_lib_dld_shl_load" >&6; }
+-if test "x$ac_cv_lib_dld_shl_load" = xyes
+-then :
+-  lt_cv_dlopen=shl_load lt_cv_dlopen_libs=-ldld
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5
++$as_echo "$ac_cv_lib_dld_shl_load" >&6; }
++if test "x$ac_cv_lib_dld_shl_load" = xyes; then :
++  lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"
++else
+   ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen"
+-if test "x$ac_cv_func_dlopen" = xyes
+-then :
+-  lt_cv_dlopen=dlopen
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
+-printf %s "checking for dlopen in -ldl... " >&6; }
+-if test ${ac_cv_lib_dl_dlopen+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++if test "x$ac_cv_func_dlopen" = xyes; then :
++  lt_cv_dlopen="dlopen"
++else
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5
++$as_echo_n "checking for dlopen in -ldl... " >&6; }
++if ${ac_cv_lib_dl_dlopen+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_check_lib_save_LIBS=$LIBS
+ LIBS="-ldl  $LIBS"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+@@ -14439,37 +12132,37 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* Override any GCC internal prototype to avoid an error.
+    Use char because int might match the return type of a GCC
+    builtin and then its argument prototype would still apply.  */
++#ifdef __cplusplus
++extern "C"
++#endif
+ char dlopen ();
+ int
+-main (void)
++main ()
+ {
+ return dlopen ();
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
++if ac_fn_c_try_link "$LINENO"; then :
+   ac_cv_lib_dl_dlopen=yes
+-else $as_nop
++else
+   ac_cv_lib_dl_dlopen=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+ LIBS=$ac_check_lib_save_LIBS
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
+-printf "%s\n" "$ac_cv_lib_dl_dlopen" >&6; }
+-if test "x$ac_cv_lib_dl_dlopen" = xyes
+-then :
+-  lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-ldl
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5
+-printf %s "checking for dlopen in -lsvld... " >&6; }
+-if test ${ac_cv_lib_svld_dlopen+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5
++$as_echo "$ac_cv_lib_dl_dlopen" >&6; }
++if test "x$ac_cv_lib_dl_dlopen" = xyes; then :
++  lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"
++else
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5
++$as_echo_n "checking for dlopen in -lsvld... " >&6; }
++if ${ac_cv_lib_svld_dlopen+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_check_lib_save_LIBS=$LIBS
+ LIBS="-lsvld  $LIBS"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+@@ -14478,37 +12171,37 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* Override any GCC internal prototype to avoid an error.
+    Use char because int might match the return type of a GCC
+    builtin and then its argument prototype would still apply.  */
++#ifdef __cplusplus
++extern "C"
++#endif
+ char dlopen ();
+ int
+-main (void)
++main ()
+ {
+ return dlopen ();
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
++if ac_fn_c_try_link "$LINENO"; then :
+   ac_cv_lib_svld_dlopen=yes
+-else $as_nop
++else
+   ac_cv_lib_svld_dlopen=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+ LIBS=$ac_check_lib_save_LIBS
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5
+-printf "%s\n" "$ac_cv_lib_svld_dlopen" >&6; }
+-if test "x$ac_cv_lib_svld_dlopen" = xyes
+-then :
+-  lt_cv_dlopen=dlopen lt_cv_dlopen_libs=-lsvld
+-else $as_nop
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5
+-printf %s "checking for dld_link in -ldld... " >&6; }
+-if test ${ac_cv_lib_dld_dld_link+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5
++$as_echo "$ac_cv_lib_svld_dlopen" >&6; }
++if test "x$ac_cv_lib_svld_dlopen" = xyes; then :
++  lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"
++else
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5
++$as_echo_n "checking for dld_link in -ldld... " >&6; }
++if ${ac_cv_lib_dld_dld_link+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   ac_check_lib_save_LIBS=$LIBS
+ LIBS="-ldld  $LIBS"
+ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+@@ -14517,30 +12210,31 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* Override any GCC internal prototype to avoid an error.
+    Use char because int might match the return type of a GCC
+    builtin and then its argument prototype would still apply.  */
++#ifdef __cplusplus
++extern "C"
++#endif
+ char dld_link ();
+ int
+-main (void)
++main ()
+ {
+ return dld_link ();
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_link "$LINENO"
+-then :
++if ac_fn_c_try_link "$LINENO"; then :
+   ac_cv_lib_dld_dld_link=yes
+-else $as_nop
++else
+   ac_cv_lib_dld_dld_link=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+ LIBS=$ac_check_lib_save_LIBS
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5
+-printf "%s\n" "$ac_cv_lib_dld_dld_link" >&6; }
+-if test "x$ac_cv_lib_dld_dld_link" = xyes
+-then :
+-  lt_cv_dlopen=dld_link lt_cv_dlopen_libs=-ldld
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5
++$as_echo "$ac_cv_lib_dld_dld_link" >&6; }
++if test "x$ac_cv_lib_dld_dld_link" = xyes; then :
++  lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"
+ fi
+ 
+ 
+@@ -14561,36 +12255,35 @@ fi
+     ;;
+   esac
+ 
+-  if test no = "$lt_cv_dlopen"; then
+-    enable_dlopen=no
+-  else
++  if test "x$lt_cv_dlopen" != xno; then
+     enable_dlopen=yes
++  else
++    enable_dlopen=no
+   fi
+ 
+   case $lt_cv_dlopen in
+   dlopen)
+-    save_CPPFLAGS=$CPPFLAGS
+-    test yes = "$ac_cv_header_dlfcn_h" && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
++    save_CPPFLAGS="$CPPFLAGS"
++    test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H"
+ 
+-    save_LDFLAGS=$LDFLAGS
++    save_LDFLAGS="$LDFLAGS"
+     wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\"
+ 
+-    save_LIBS=$LIBS
++    save_LIBS="$LIBS"
+     LIBS="$lt_cv_dlopen_libs $LIBS"
+ 
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5
+-printf %s "checking whether a program can dlopen itself... " >&6; }
+-if test ${lt_cv_dlopen_self+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  	  if test yes = "$cross_compiling"; then :
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5
++$as_echo_n "checking whether a program can dlopen itself... " >&6; }
++if ${lt_cv_dlopen_self+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  	  if test "$cross_compiling" = yes; then :
+   lt_cv_dlopen_self=cross
+ else
+   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+   lt_status=$lt_dlunknown
+   cat > conftest.$ac_ext <<_LT_EOF
+-#line $LINENO "configure"
++#line 12286 "configure"
+ #include "confdefs.h"
+ 
+ #if HAVE_DLFCN_H
+@@ -14631,13 +12324,13 @@ else
+ #  endif
+ #endif
+ 
+-/* When -fvisibility=hidden is used, assume the code has been annotated
++/* When -fvisbility=hidden is used, assume the code has been annotated
+    correspondingly for the symbols needed.  */
+-#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+-int fnord () __attribute__((visibility("default")));
++#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
++void fnord () __attribute__((visibility("default")));
+ #endif
+ 
+-int fnord () { return 42; }
++void fnord () { int i=42; }
+ int main ()
+ {
+   void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+@@ -14662,8 +12355,8 @@ _LT_EOF
+   if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+   (eval $ac_link) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then
+     (./conftest; exit; ) >&5 2>/dev/null
+     lt_status=$?
+     case x$lt_status in
+@@ -14680,24 +12373,23 @@ rm -fr conftest*
+ 
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5
+-printf "%s\n" "$lt_cv_dlopen_self" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5
++$as_echo "$lt_cv_dlopen_self" >&6; }
+ 
+-    if test yes = "$lt_cv_dlopen_self"; then
++    if test "x$lt_cv_dlopen_self" = xyes; then
+       wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\"
+-      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5
+-printf %s "checking whether a statically linked program can dlopen itself... " >&6; }
+-if test ${lt_cv_dlopen_self_static+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  	  if test yes = "$cross_compiling"; then :
++      { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5
++$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; }
++if ${lt_cv_dlopen_self_static+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  	  if test "$cross_compiling" = yes; then :
+   lt_cv_dlopen_self_static=cross
+ else
+   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+   lt_status=$lt_dlunknown
+   cat > conftest.$ac_ext <<_LT_EOF
+-#line $LINENO "configure"
++#line 12392 "configure"
+ #include "confdefs.h"
+ 
+ #if HAVE_DLFCN_H
+@@ -14738,13 +12430,13 @@ else
+ #  endif
+ #endif
+ 
+-/* When -fvisibility=hidden is used, assume the code has been annotated
++/* When -fvisbility=hidden is used, assume the code has been annotated
+    correspondingly for the symbols needed.  */
+-#if defined __GNUC__ && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
+-int fnord () __attribute__((visibility("default")));
++#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3))
++void fnord () __attribute__((visibility("default")));
+ #endif
+ 
+-int fnord () { return 42; }
++void fnord () { int i=42; }
+ int main ()
+ {
+   void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW);
+@@ -14769,8 +12461,8 @@ _LT_EOF
+   if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5
+   (eval $ac_link) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; } && test -s "conftest$ac_exeext" 2>/dev/null; then
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then
+     (./conftest; exit; ) >&5 2>/dev/null
+     lt_status=$?
+     case x$lt_status in
+@@ -14787,13 +12479,13 @@ rm -fr conftest*
+ 
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5
+-printf "%s\n" "$lt_cv_dlopen_self_static" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5
++$as_echo "$lt_cv_dlopen_self_static" >&6; }
+     fi
+ 
+-    CPPFLAGS=$save_CPPFLAGS
+-    LDFLAGS=$save_LDFLAGS
+-    LIBS=$save_LIBS
++    CPPFLAGS="$save_CPPFLAGS"
++    LDFLAGS="$save_LDFLAGS"
++    LIBS="$save_LIBS"
+     ;;
+   esac
+ 
+@@ -14826,43 +12518,32 @@ fi
+ 
+ striplib=
+ old_striplib=
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5
+-printf %s "checking whether stripping libraries is possible... " >&6; }
+-if test -z "$STRIP"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-else
+-  if $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
+-    old_striplib="$STRIP --strip-debug"
+-    striplib="$STRIP --strip-unneeded"
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+-printf "%s\n" "yes" >&6; }
+-  else
+-    case $host_os in
+-    darwin*)
+-      # FIXME - insert some real tests, host_os isn't really good enough
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5
++$as_echo_n "checking whether stripping libraries is possible... " >&6; }
++if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then
++  test -z "$old_striplib" && old_striplib="$STRIP --strip-debug"
++  test -z "$striplib" && striplib="$STRIP --strip-unneeded"
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++$as_echo "yes" >&6; }
++else
++# FIXME - insert some real tests, host_os isn't really good enough
++  case $host_os in
++  darwin*)
++    if test -n "$STRIP" ; then
+       striplib="$STRIP -x"
+       old_striplib="$STRIP -S"
+-      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+-printf "%s\n" "yes" >&6; }
+-      ;;
+-    freebsd*)
+-      if $STRIP -V 2>&1 | $GREP "elftoolchain" >/dev/null; then
+-        old_striplib="$STRIP --strip-debug"
+-        striplib="$STRIP --strip-unneeded"
+-        { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: yes" >&5
+-printf "%s\n" "yes" >&6; }
+-      else
+-        { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-      fi
+-      ;;
+-    *)
+-      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
+-      ;;
+-    esac
+-  fi
++      { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5
++$as_echo "yes" >&6; }
++    else
++      { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
++    fi
++    ;;
++  *)
++    { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
++    ;;
++  esac
+ fi
+ 
+ 
+@@ -14876,21 +12557,21 @@ fi
+ 
+ 
+ 
+-  # Report what library types will actually be built
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5
+-printf %s "checking if libtool supports shared libraries... " >&6; }
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5
+-printf "%s\n" "$can_build_shared" >&6; }
++  # Report which library types will actually be built
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5
++$as_echo_n "checking if libtool supports shared libraries... " >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5
++$as_echo "$can_build_shared" >&6; }
+ 
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5
+-printf %s "checking whether to build shared libraries... " >&6; }
+-  test no = "$can_build_shared" && enable_shared=no
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5
++$as_echo_n "checking whether to build shared libraries... " >&6; }
++  test "$can_build_shared" = "no" && enable_shared=no
+ 
+   # On AIX, shared libraries and static libraries use the same namespace, and
+   # are all built from PIC.
+   case $host_os in
+   aix3*)
+-    test yes = "$enable_shared" && enable_static=no
++    test "$enable_shared" = yes && enable_static=no
+     if test -n "$RANLIB"; then
+       archive_cmds="$archive_cmds~\$RANLIB \$lib"
+       postinstall_cmds='$RANLIB $lib'
+@@ -14898,24 +12579,20 @@ printf %s "checking whether to build shared libraries... " >&6; }
+     ;;
+ 
+   aix[4-9]*)
+-    if test ia64 != "$host_cpu"; then
+-      case $enable_shared,$with_aix_soname,$aix_use_runtimelinking in
+-      yes,aix,yes) ;;			# shared object as lib.so file only
+-      yes,svr4,*) ;;			# shared object as lib.so archive member only
+-      yes,*) enable_static=no ;;	# shared object in lib.a archive as well
+-      esac
++    if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then
++      test "$enable_shared" = yes && enable_static=no
+     fi
+     ;;
+   esac
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5
+-printf "%s\n" "$enable_shared" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5
++$as_echo "$enable_shared" >&6; }
+ 
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5
+-printf %s "checking whether to build static libraries... " >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5
++$as_echo_n "checking whether to build static libraries... " >&6; }
+   # Make sure either enable_shared or enable_static is yes.
+-  test yes = "$enable_shared" || enable_static=yes
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5
+-printf "%s\n" "$enable_static" >&6; }
++  test "$enable_shared" = yes || enable_static=yes
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5
++$as_echo "$enable_static" >&6; }
+ 
+ 
+ 
+@@ -14927,42 +12604,46 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ 
+-CC=$lt_save_CC
++CC="$lt_save_CC"
+ 
+-      if test -n "$CXX" && ( test no != "$CXX" &&
+-    ( (test g++ = "$CXX" && `g++ -v >/dev/null 2>&1` ) ||
+-    (test g++ != "$CXX"))); then
++      if test -n "$CXX" && ( test "X$CXX" != "Xno" &&
++    ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) ||
++    (test "X$CXX" != "Xg++"))) ; then
+   ac_ext=cpp
+ ac_cpp='$CXXCPP $CPPFLAGS'
+ ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5'
+ ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5'
+ ac_compiler_gnu=$ac_cv_cxx_compiler_gnu
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5
+-printf %s "checking how to run the C++ preprocessor... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5
++$as_echo_n "checking how to run the C++ preprocessor... " >&6; }
+ if test -z "$CXXCPP"; then
+-  if test ${ac_cv_prog_CXXCPP+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-      # Double quotes because $CXX needs to be expanded
+-    for CXXCPP in "$CXX -E" cpp /lib/cpp
++  if ${ac_cv_prog_CXXCPP+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++      # Double quotes because CXXCPP needs to be expanded
++    for CXXCPP in "$CXX -E" "/lib/cpp"
+     do
+       ac_preproc_ok=false
+ for ac_cxx_preproc_warn_flag in '' yes
+ do
+   # Use a header file that comes with gcc, so configuring glibc
+   # with a fresh cross-compiler works.
++  # Prefer  to  if __STDC__ is defined, since
++  #  exists even on freestanding compilers.
+   # On the NeXT, cc -E runs the code through the compiler's parser,
+   # not just through cpp. "Syntax error" is here to catch this case.
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+-#include 
++#ifdef __STDC__
++# include 
++#else
++# include 
++#endif
+ 		     Syntax error
+ _ACEOF
+-if ac_fn_cxx_try_cpp "$LINENO"
+-then :
++if ac_fn_cxx_try_cpp "$LINENO"; then :
+ 
+-else $as_nop
++else
+   # Broken: fails on valid input.
+ continue
+ fi
+@@ -14974,11 +12655,10 @@ rm -f conftest.err conftest.i conftest.$ac_ext
+ /* end confdefs.h.  */
+ #include 
+ _ACEOF
+-if ac_fn_cxx_try_cpp "$LINENO"
+-then :
++if ac_fn_cxx_try_cpp "$LINENO"; then :
+   # Broken: success on invalid input.
+ continue
+-else $as_nop
++else
+   # Passes both tests.
+ ac_preproc_ok=:
+ break
+@@ -14988,8 +12668,7 @@ rm -f conftest.err conftest.i conftest.$ac_ext
+ done
+ # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+ rm -f conftest.i conftest.err conftest.$ac_ext
+-if $ac_preproc_ok
+-then :
++if $ac_preproc_ok; then :
+   break
+ fi
+ 
+@@ -15001,24 +12680,29 @@ fi
+ else
+   ac_cv_prog_CXXCPP=$CXXCPP
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5
+-printf "%s\n" "$CXXCPP" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5
++$as_echo "$CXXCPP" >&6; }
+ ac_preproc_ok=false
+ for ac_cxx_preproc_warn_flag in '' yes
+ do
+   # Use a header file that comes with gcc, so configuring glibc
+   # with a fresh cross-compiler works.
++  # Prefer  to  if __STDC__ is defined, since
++  #  exists even on freestanding compilers.
+   # On the NeXT, cc -E runs the code through the compiler's parser,
+   # not just through cpp. "Syntax error" is here to catch this case.
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+-#include 
++#ifdef __STDC__
++# include 
++#else
++# include 
++#endif
+ 		     Syntax error
+ _ACEOF
+-if ac_fn_cxx_try_cpp "$LINENO"
+-then :
++if ac_fn_cxx_try_cpp "$LINENO"; then :
+ 
+-else $as_nop
++else
+   # Broken: fails on valid input.
+ continue
+ fi
+@@ -15030,11 +12714,10 @@ rm -f conftest.err conftest.i conftest.$ac_ext
+ /* end confdefs.h.  */
+ #include 
+ _ACEOF
+-if ac_fn_cxx_try_cpp "$LINENO"
+-then :
++if ac_fn_cxx_try_cpp "$LINENO"; then :
+   # Broken: success on invalid input.
+ continue
+-else $as_nop
++else
+   # Passes both tests.
+ ac_preproc_ok=:
+ break
+@@ -15044,12 +12727,11 @@ rm -f conftest.err conftest.i conftest.$ac_ext
+ done
+ # Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped.
+ rm -f conftest.i conftest.err conftest.$ac_ext
+-if $ac_preproc_ok
+-then :
++if $ac_preproc_ok; then :
+ 
+-else $as_nop
+-  { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++else
++  { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check
+ See \`config.log' for more details" "$LINENO" 5; }
+ fi
+@@ -15079,6 +12761,7 @@ export_dynamic_flag_spec_CXX=
+ hardcode_direct_CXX=no
+ hardcode_direct_absolute_CXX=no
+ hardcode_libdir_flag_spec_CXX=
++hardcode_libdir_flag_spec_ld_CXX=
+ hardcode_libdir_separator_CXX=
+ hardcode_minus_L_CXX=no
+ hardcode_shlibpath_var_CXX=unsupported
+@@ -15105,7 +12788,7 @@ objext_CXX=$objext
+ # the CXX compiler isn't working.  Some variables (like enable_shared)
+ # are currently assumed to apply to all compilers on this platform,
+ # and will be corrupted by setting them based on a non-working compiler.
+-if test yes != "$_lt_caught_CXX_error"; then
++if test "$_lt_caught_CXX_error" != yes; then
+   # Code to be used in simple compile tests
+   lt_simple_compile_test_code="int some_variable = 0;"
+ 
+@@ -15145,7 +12828,6 @@ $RM -r conftest*
+ 
+   # Allow CC to be a program name with arguments.
+   lt_save_CC=$CC
+-  lt_save_CFLAGS=$CFLAGS
+   lt_save_LD=$LD
+   lt_save_GCC=$GCC
+   GCC=$GXX
+@@ -15163,43 +12845,48 @@ $RM -r conftest*
+   fi
+   test -z "${LDCXX+set}" || LD=$LDCXX
+   CC=${CXX-"c++"}
+-  CFLAGS=$CXXFLAGS
+   compiler=$CC
+   compiler_CXX=$CC
+-  func_cc_basename $compiler
+-cc_basename=$func_cc_basename_result
++  for cc_temp in $compiler""; do
++  case $cc_temp in
++    compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
++    distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
++    \-*) ;;
++    *) break;;
++  esac
++done
++cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+ 
+ 
+   if test -n "$compiler"; then
+     # We don't want -fno-exception when compiling C++ code, so set the
+     # no_builtin_flag separately
+-    if test yes = "$GXX"; then
++    if test "$GXX" = yes; then
+       lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin'
+     else
+       lt_prog_compiler_no_builtin_flag_CXX=
+     fi
+ 
+-    if test yes = "$GXX"; then
++    if test "$GXX" = yes; then
+       # Set up default GNU C++ configuration
+ 
+ 
+ 
+ # Check whether --with-gnu-ld was given.
+-if test ${with_gnu_ld+y}
+-then :
+-  withval=$with_gnu_ld; test no = "$withval" || with_gnu_ld=yes
+-else $as_nop
++if test "${with_gnu_ld+set}" = set; then :
++  withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes
++else
+   with_gnu_ld=no
+ fi
+ 
+ ac_prog=ld
+-if test yes = "$GCC"; then
++if test "$GCC" = yes; then
+   # Check if gcc -print-prog-name=ld gives a path.
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
+-printf %s "checking for ld used by $CC... " >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5
++$as_echo_n "checking for ld used by $CC... " >&6; }
+   case $host in
+   *-*-mingw*)
+-    # gcc leaves a trailing carriage return, which upsets mingw
++    # gcc leaves a trailing carriage return which upsets mingw
+     ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;;
+   *)
+     ac_prog=`($CC -print-prog-name=ld) 2>&5` ;;
+@@ -15213,7 +12900,7 @@ printf %s "checking for ld used by $CC... " >&6; }
+       while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do
+ 	ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"`
+       done
+-      test -z "$LD" && LD=$ac_prog
++      test -z "$LD" && LD="$ac_prog"
+       ;;
+   "")
+     # If it fails, then pretend we aren't using GCC.
+@@ -15224,58 +12911,56 @@ printf %s "checking for ld used by $CC... " >&6; }
+     with_gnu_ld=unknown
+     ;;
+   esac
+-elif test yes = "$with_gnu_ld"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
+-printf %s "checking for GNU ld... " >&6; }
++elif test "$with_gnu_ld" = yes; then
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5
++$as_echo_n "checking for GNU ld... " >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
+-printf %s "checking for non-GNU ld... " >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5
++$as_echo_n "checking for non-GNU ld... " >&6; }
+ fi
+-if test ${lt_cv_path_LD+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++if ${lt_cv_path_LD+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   if test -z "$LD"; then
+-  lt_save_ifs=$IFS; IFS=$PATH_SEPARATOR
++  lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR
+   for ac_dir in $PATH; do
+-    IFS=$lt_save_ifs
++    IFS="$lt_save_ifs"
+     test -z "$ac_dir" && ac_dir=.
+     if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then
+-      lt_cv_path_LD=$ac_dir/$ac_prog
++      lt_cv_path_LD="$ac_dir/$ac_prog"
+       # Check to see if the program is GNU ld.  I'd rather use --version,
+       # but apparently some variants of GNU ld only accept -v.
+       # Break only if it was the GNU/non-GNU ld that we prefer.
+       case `"$lt_cv_path_LD" -v 2>&1 &5
+-printf "%s\n" "$LD" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LD" >&5
++$as_echo "$LD" >&6; }
+ else
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: no" >&5
+-printf "%s\n" "no" >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5
++$as_echo "no" >&6; }
+ fi
+ test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
+-printf %s "checking if the linker ($LD) is GNU ld... " >&6; }
+-if test ${lt_cv_prog_gnu_ld+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5
++$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; }
++if ${lt_cv_prog_gnu_ld+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   # I'd rather use --version here, but apparently some GNU lds only accept -v.
+ case `$LD -v 2>&1 &1 &5
+-printf "%s\n" "$lt_cv_prog_gnu_ld" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_gnu_ld" >&5
++$as_echo "$lt_cv_prog_gnu_ld" >&6; }
+ with_gnu_ld=$lt_cv_prog_gnu_ld
+ 
+ 
+@@ -15298,22 +12983,22 @@ with_gnu_ld=$lt_cv_prog_gnu_ld
+ 
+       # Check if GNU C++ uses GNU ld as the underlying linker, since the
+       # archiving commands below assume that GNU ld is being used.
+-      if test yes = "$with_gnu_ld"; then
+-        archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+-        archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++      if test "$with_gnu_ld" = yes; then
++        archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
++        archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ 
+-        hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
+-        export_dynamic_flag_spec_CXX='$wl--export-dynamic'
++        hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
++        export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+ 
+         # If archive_cmds runs LD, not CC, wlarc should be empty
+         # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to
+         #     investigate it a little bit more. (MM)
+-        wlarc='$wl'
++        wlarc='${wl}'
+ 
+         # ancient GNU ld didn't support --whole-archive et. al.
+         if eval "`$CC -print-prog-name=ld` --help 2>&1" |
+ 	  $GREP 'no-whole-archive' > /dev/null; then
+-          whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
++          whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+         else
+           whole_archive_flag_spec_CXX=
+         fi
+@@ -15341,8 +13026,8 @@ with_gnu_ld=$lt_cv_prog_gnu_ld
+     fi
+ 
+     # PORTME: fill in a description of your system's C++ link characteristics
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+-printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
++$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+     ld_shlibs_CXX=yes
+     case $host_os in
+       aix3*)
+@@ -15350,30 +13035,18 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries
+         ld_shlibs_CXX=no
+         ;;
+       aix[4-9]*)
+-        if test ia64 = "$host_cpu"; then
++        if test "$host_cpu" = ia64; then
+           # On IA64, the linker does run time linking by default, so we don't
+           # have to do anything special.
+           aix_use_runtimelinking=no
+           exp_sym_flag='-Bexport'
+-          no_entry_flag=
++          no_entry_flag=""
+         else
+           aix_use_runtimelinking=no
+ 
+           # Test if we are trying to use run time linking or normal
+           # AIX style linking. If -brtl is somewhere in LDFLAGS, we
+-          # have runtime linking enabled, and use it for executables.
+-          # For shared libraries, we enable/disable runtime linking
+-          # depending on the kind of the shared library created -
+-          # when "with_aix_soname,aix_use_runtimelinking" is:
+-          # "aix,no"   lib.a(lib.so.V) shared, rtl:no,  for executables
+-          # "aix,yes"  lib.so          shared, rtl:yes, for executables
+-          #            lib.a           static archive
+-          # "both,no"  lib.so.V(shr.o) shared, rtl:yes
+-          #            lib.a(lib.so.V) shared, rtl:no,  for executables
+-          # "both,yes" lib.so.V(shr.o) shared, rtl:yes, for executables
+-          #            lib.a(lib.so.V) shared, rtl:no
+-          # "svr4,*"   lib.so.V(shr.o) shared, rtl:yes, for executables
+-          #            lib.a           static archive
++          # need to do runtime linking.
+           case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*)
+ 	    for ld_flag in $LDFLAGS; do
+ 	      case $ld_flag in
+@@ -15383,13 +13056,6 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries
+ 	        ;;
+ 	      esac
+ 	    done
+-	    if test svr4,no = "$with_aix_soname,$aix_use_runtimelinking"; then
+-	      # With aix-soname=svr4, we create the lib.so.V shared archives only,
+-	      # so we don't have lib.a shared libs to link our executables.
+-	      # We have to force runtime linking in this case.
+-	      aix_use_runtimelinking=yes
+-	      LDFLAGS="$LDFLAGS -Wl,-brtl"
+-	    fi
+ 	    ;;
+           esac
+ 
+@@ -15408,21 +13074,13 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries
+         hardcode_direct_absolute_CXX=yes
+         hardcode_libdir_separator_CXX=':'
+         link_all_deplibs_CXX=yes
+-        file_list_spec_CXX='$wl-f,'
+-        case $with_aix_soname,$aix_use_runtimelinking in
+-        aix,*) ;;	# no import file
+-        svr4,* | *,yes) # use import file
+-          # The Import File defines what to hardcode.
+-          hardcode_direct_CXX=no
+-          hardcode_direct_absolute_CXX=no
+-          ;;
+-        esac
++        file_list_spec_CXX='${wl}-f,'
+ 
+-        if test yes = "$GXX"; then
++        if test "$GXX" = yes; then
+           case $host_os in aix4.[012]|aix4.[012].*)
+           # We only want to do this on AIX 4.2 and lower, the check
+           # below for broken collect2 doesn't work under 4.3+
+-	  collect2name=`$CC -print-prog-name=collect2`
++	  collect2name=`${CC} -print-prog-name=collect2`
+ 	  if test -f "$collect2name" &&
+ 	     strings "$collect2name" | $GREP resolve_lib_name >/dev/null
+ 	  then
+@@ -15440,172 +13098,122 @@ printf %s "checking whether the $compiler linker ($LD) supports shared libraries
+ 	  fi
+           esac
+           shared_flag='-shared'
+-	  if test yes = "$aix_use_runtimelinking"; then
+-	    shared_flag=$shared_flag' $wl-G'
++	  if test "$aix_use_runtimelinking" = yes; then
++	    shared_flag="$shared_flag "'${wl}-G'
+ 	  fi
+-	  # Need to ensure runtime linking is disabled for the traditional
+-	  # shared library, or the linker may eventually find shared libraries
+-	  # /with/ Import File - we do not want to mix them.
+-	  shared_flag_aix='-shared'
+-	  shared_flag_svr4='-shared $wl-G'
+         else
+           # not using gcc
+-          if test ia64 = "$host_cpu"; then
++          if test "$host_cpu" = ia64; then
+ 	  # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release
+ 	  # chokes on -Wl,-G. The following line is correct:
+ 	  shared_flag='-G'
+           else
+-	    if test yes = "$aix_use_runtimelinking"; then
+-	      shared_flag='$wl-G'
++	    if test "$aix_use_runtimelinking" = yes; then
++	      shared_flag='${wl}-G'
+ 	    else
+-	      shared_flag='$wl-bM:SRE'
++	      shared_flag='${wl}-bM:SRE'
+ 	    fi
+-	    shared_flag_aix='$wl-bM:SRE'
+-	    shared_flag_svr4='$wl-G'
+           fi
+         fi
+ 
+-        export_dynamic_flag_spec_CXX='$wl-bexpall'
++        export_dynamic_flag_spec_CXX='${wl}-bexpall'
+         # It seems that -bexpall does not export symbols beginning with
+         # underscore (_), so it is better to generate a list of symbols to
+ 	# export.
+         always_export_symbols_CXX=yes
+-	if test aix,yes = "$with_aix_soname,$aix_use_runtimelinking"; then
++        if test "$aix_use_runtimelinking" = yes; then
+           # Warning - without using the other runtime loading flags (-brtl),
+           # -berok will link without error, but may produce a broken library.
+-          # The "-G" linker flag allows undefined symbols.
+-          no_undefined_flag_CXX='-bernotok'
++          allow_undefined_flag_CXX='-berok'
+           # Determine the default libpath from the value encoded in an empty
+           # executable.
+-          if test set = "${lt_cv_aix_libpath+set}"; then
+-  aix_libpath=$lt_cv_aix_libpath
+-else
+-  if test ${lt_cv_aix_libpath__CXX+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++          cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_cxx_try_link "$LINENO"
+-then :
+-
+-  lt_aix_libpath_sed='
+-      /Import File Strings/,/^$/ {
+-	  /^0/ {
+-	      s/^0  *\([^ ]*\) *$/\1/
+-	      p
+-	  }
+-      }'
+-  lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+-  # Check for a 64-bit object if we didn't find anything.
+-  if test -z "$lt_cv_aix_libpath__CXX"; then
+-    lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+-  fi
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
+-    conftest$ac_exeext conftest.$ac_ext
+-  if test -z "$lt_cv_aix_libpath__CXX"; then
+-    lt_cv_aix_libpath__CXX=/usr/lib:/lib
+-  fi
++if ac_fn_cxx_try_link "$LINENO"; then :
+ 
++lt_aix_libpath_sed='
++    /Import File Strings/,/^$/ {
++	/^0/ {
++	    s/^0  *\(.*\)$/\1/
++	    p
++	}
++    }'
++aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++# Check for a 64-bit object if we didn't find anything.
++if test -z "$aix_libpath"; then
++  aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi
+-
+-  aix_libpath=$lt_cv_aix_libpath__CXX
+ fi
++rm -f core conftest.err conftest.$ac_objext \
++    conftest$ac_exeext conftest.$ac_ext
++if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
+ 
+-          hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath"
++          hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath"
+ 
+-          archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs $wl'$no_entry_flag' $compiler_flags `if test -n "$allow_undefined_flag"; then func_echo_all "$wl$allow_undefined_flag"; else :; fi` $wl'$exp_sym_flag:\$export_symbols' '$shared_flag
++          archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag"
+         else
+-          if test ia64 = "$host_cpu"; then
+-	    hardcode_libdir_flag_spec_CXX='$wl-R $libdir:/usr/lib:/lib'
++          if test "$host_cpu" = ia64; then
++	    hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib'
+ 	    allow_undefined_flag_CXX="-z nodefs"
+-	    archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\$wl$no_entry_flag"' $compiler_flags $wl$allow_undefined_flag '"\$wl$exp_sym_flag:\$export_symbols"
++	    archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols"
+           else
+ 	    # Determine the default libpath from the value encoded in an
+ 	    # empty executable.
+-	    if test set = "${lt_cv_aix_libpath+set}"; then
+-  aix_libpath=$lt_cv_aix_libpath
+-else
+-  if test ${lt_cv_aix_libpath__CXX+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  cat confdefs.h - <<_ACEOF >conftest.$ac_ext
++	    cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_cxx_try_link "$LINENO"
+-then :
+-
+-  lt_aix_libpath_sed='
+-      /Import File Strings/,/^$/ {
+-	  /^0/ {
+-	      s/^0  *\([^ ]*\) *$/\1/
+-	      p
+-	  }
+-      }'
+-  lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+-  # Check for a 64-bit object if we didn't find anything.
+-  if test -z "$lt_cv_aix_libpath__CXX"; then
+-    lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+-  fi
+-fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
+-    conftest$ac_exeext conftest.$ac_ext
+-  if test -z "$lt_cv_aix_libpath__CXX"; then
+-    lt_cv_aix_libpath__CXX=/usr/lib:/lib
+-  fi
++if ac_fn_cxx_try_link "$LINENO"; then :
+ 
++lt_aix_libpath_sed='
++    /Import File Strings/,/^$/ {
++	/^0/ {
++	    s/^0  *\(.*\)$/\1/
++	    p
++	}
++    }'
++aix_libpath=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
++# Check for a 64-bit object if we didn't find anything.
++if test -z "$aix_libpath"; then
++  aix_libpath=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"`
+ fi
+-
+-  aix_libpath=$lt_cv_aix_libpath__CXX
+ fi
++rm -f core conftest.err conftest.$ac_objext \
++    conftest$ac_exeext conftest.$ac_ext
++if test -z "$aix_libpath"; then aix_libpath="/usr/lib:/lib"; fi
+ 
+-	    hardcode_libdir_flag_spec_CXX='$wl-blibpath:$libdir:'"$aix_libpath"
++	    hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath"
+ 	    # Warning - without using the other run time loading flags,
+ 	    # -berok will link without error, but may produce a broken library.
+-	    no_undefined_flag_CXX=' $wl-bernotok'
+-	    allow_undefined_flag_CXX=' $wl-berok'
+-	    if test yes = "$with_gnu_ld"; then
++	    no_undefined_flag_CXX=' ${wl}-bernotok'
++	    allow_undefined_flag_CXX=' ${wl}-berok'
++	    if test "$with_gnu_ld" = yes; then
+ 	      # We only use this code for GNU lds that support --whole-archive.
+-	      whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive'
++	      whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ 	    else
+ 	      # Exported symbols can be pulled into shared objects from archives
+ 	      whole_archive_flag_spec_CXX='$convenience'
+ 	    fi
+ 	    archive_cmds_need_lc_CXX=yes
+-	    archive_expsym_cmds_CXX='$RM -r $output_objdir/$realname.d~$MKDIR $output_objdir/$realname.d'
+-	    # -brtl affects multiple linker settings, -berok does not and is overridden later
+-	    compiler_flags_filtered='`func_echo_all "$compiler_flags " | $SED -e "s%-brtl\\([, ]\\)%-berok\\1%g"`'
+-	    if test svr4 != "$with_aix_soname"; then
+-	      # This is similar to how AIX traditionally builds its shared
+-	      # libraries. Need -bnortl late, we may have -brtl in LDFLAGS.
+-	      archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_aix' -o $output_objdir/$realname.d/$soname $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$realname.d/$soname'
+-	    fi
+-	    if test aix != "$with_aix_soname"; then
+-	      archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$CC '$shared_flag_svr4' -o $output_objdir/$realname.d/$shared_archive_member_spec.o $libobjs $deplibs $wl-bnoentry '$compiler_flags_filtered'$wl-bE:$export_symbols$allow_undefined_flag~$STRIP -e $output_objdir/$realname.d/$shared_archive_member_spec.o~( func_echo_all "#! $soname($shared_archive_member_spec.o)"; if test shr_64 = "$shared_archive_member_spec"; then func_echo_all "# 64"; else func_echo_all "# 32"; fi; cat $export_symbols ) > $output_objdir/$realname.d/$shared_archive_member_spec.imp~$AR $AR_FLAGS $output_objdir/$soname $output_objdir/$realname.d/$shared_archive_member_spec.o $output_objdir/$realname.d/$shared_archive_member_spec.imp'
+-	    else
+-	      # used by -dlpreopen to get the symbols
+-	      archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$MV  $output_objdir/$realname.d/$soname $output_objdir'
+-	    fi
+-	    archive_expsym_cmds_CXX="$archive_expsym_cmds_CXX"'~$RM -r $output_objdir/$realname.d'
++	    # This is similar to how AIX traditionally builds its shared
++	    # libraries.
++	    archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname'
+           fi
+         fi
+         ;;
+@@ -15615,7 +13223,7 @@ fi
+ 	  allow_undefined_flag_CXX=unsupported
+ 	  # Joseph Beckenbach  says some releases of gcc
+ 	  # support --undefined.  This deserves some investigation.  FIXME
+-	  archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++	  archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+ 	else
+ 	  ld_shlibs_CXX=no
+ 	fi
+@@ -15631,76 +13239,29 @@ fi
+         ;;
+ 
+       cygwin* | mingw* | pw32* | cegcc*)
+-	case $GXX,$cc_basename in
+-	,cl* | no,cl* | ,icl* | no,icl*)
+-	  # Native MSVC or ICC
+-	  # hardcode_libdir_flag_spec is actually meaningless, as there is
+-	  # no search path for DLLs.
+-	  hardcode_libdir_flag_spec_CXX=' '
+-	  allow_undefined_flag_CXX=unsupported
+-	  always_export_symbols_CXX=yes
+-	  file_list_spec_CXX='@'
+-	  # Tell ltmain to make .lib files, not .a files.
+-	  libext=lib
+-	  # Tell ltmain to make .dll files, not .so files.
+-	  shrext_cmds=.dll
+-	  # FIXME: Setting linknames here is a bad hack.
+-	  archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~linknames='
+-	  archive_expsym_cmds_CXX='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
+-              cp "$export_symbols" "$output_objdir/$soname.def";
+-              echo "$tool_output_objdir$soname.def" > "$output_objdir/$soname.exp";
+-            else
+-              $SED -e '\''s/^/-link -EXPORT:/'\'' < $export_symbols > $output_objdir/$soname.exp;
+-            fi~
+-            $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~
+-            linknames='
+-	  # The linker will not automatically build a static lib if we build a DLL.
+-	  # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true'
+-	  enable_shared_with_static_runtimes_CXX=yes
+-	  # Don't use ranlib
+-	  old_postinstall_cmds_CXX='chmod 644 $oldlib'
+-	  postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~
+-            lt_tool_outputfile="@TOOL_OUTPUT@"~
+-            case $lt_outputfile in
+-              *.exe|*.EXE) ;;
+-              *)
+-                lt_outputfile=$lt_outputfile.exe
+-                lt_tool_outputfile=$lt_tool_outputfile.exe
+-                ;;
+-            esac~
+-            func_to_tool_file "$lt_outputfile"~
+-            if test : != "$MANIFEST_TOOL" && test -f "$lt_outputfile.manifest"; then
+-              $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1;
+-              $RM "$lt_outputfile.manifest";
+-            fi'
+-	  ;;
+-	*)
+-	  # g++
+-	  # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless,
+-	  # as there is no search path for DLLs.
+-	  hardcode_libdir_flag_spec_CXX='-L$libdir'
+-	  export_dynamic_flag_spec_CXX='$wl--export-all-symbols'
+-	  allow_undefined_flag_CXX=unsupported
+-	  always_export_symbols_CXX=no
+-	  enable_shared_with_static_runtimes_CXX=yes
+-
+-	  if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
+-	    archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+-	    # If the export-symbols file already is a .def file, use it as
+-	    # is; otherwise, prepend EXPORTS...
+-	    archive_expsym_cmds_CXX='if   test DEF = "`$SED -n     -e '\''s/^[	 ]*//'\''     -e '\''/^\(;.*\)*$/d'\''     -e '\''s/^\(EXPORTS\|LIBRARY\)\([	 ].*\)*$/DEF/p'\''     -e q     $export_symbols`" ; then
+-              cp $export_symbols $output_objdir/$soname.def;
+-            else
+-              echo EXPORTS > $output_objdir/$soname.def;
+-              cat $export_symbols >> $output_objdir/$soname.def;
+-            fi~
+-            $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname $wl--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
+-	  else
+-	    ld_shlibs_CXX=no
+-	  fi
+-	  ;;
+-	esac
+-	;;
++        # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless,
++        # as there is no search path for DLLs.
++        hardcode_libdir_flag_spec_CXX='-L$libdir'
++        export_dynamic_flag_spec_CXX='${wl}--export-all-symbols'
++        allow_undefined_flag_CXX=unsupported
++        always_export_symbols_CXX=no
++        enable_shared_with_static_runtimes_CXX=yes
++
++        if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then
++          archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++          # If the export-symbols file already is a .def file (1st line
++          # is EXPORTS), use it as is; otherwise, prepend...
++          archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then
++	    cp $export_symbols $output_objdir/$soname.def;
++          else
++	    echo EXPORTS > $output_objdir/$soname.def;
++	    cat $export_symbols >> $output_objdir/$soname.def;
++          fi~
++          $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib'
++        else
++          ld_shlibs_CXX=no
++        fi
++        ;;
+       darwin* | rhapsody*)
+ 
+ 
+@@ -15708,27 +13269,26 @@ fi
+   hardcode_direct_CXX=no
+   hardcode_automatic_CXX=yes
+   hardcode_shlibpath_var_CXX=unsupported
+-  if test yes = "$lt_cv_ld_force_load"; then
+-    whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience $wl-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+-
++  if test "$lt_cv_ld_force_load" = "yes"; then
++    whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`'
+   else
+     whole_archive_flag_spec_CXX=''
+   fi
+   link_all_deplibs_CXX=yes
+-  allow_undefined_flag_CXX=$_lt_dar_allow_undefined
++  allow_undefined_flag_CXX="$_lt_dar_allow_undefined"
+   case $cc_basename in
+-     ifort*|nagfor*) _lt_dar_can_shared=yes ;;
++     ifort*) _lt_dar_can_shared=yes ;;
+      *) _lt_dar_can_shared=$GCC ;;
+   esac
+-  if test yes = "$_lt_dar_can_shared"; then
++  if test "$_lt_dar_can_shared" = "yes"; then
+     output_verbose_link_cmd=func_echo_all
+-    archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dsymutil"
+-    module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dsymutil"
+-    archive_expsym_cmds_CXX="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod$_lt_dar_export_syms$_lt_dsymutil"
+-    module_expsym_cmds_CXX="$SED -e 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags$_lt_dar_export_syms$_lt_dsymutil"
+-       if test yes != "$lt_cv_apple_cc_single_mod"; then
+-      archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dsymutil"
+-      archive_expsym_cmds_CXX="$SED 's|^|_|' < \$export_symbols > \$output_objdir/\$libname-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \$lib-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$lib-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring$_lt_dar_export_syms$_lt_dsymutil"
++    archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}"
++    module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}"
++    archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}"
++    module_expsym_cmds_CXX="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}"
++       if test "$lt_cv_apple_cc_single_mod" != "yes"; then
++      archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}"
++      archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}"
+     fi
+ 
+   else
+@@ -15737,35 +13297,6 @@ fi
+ 
+ 	;;
+ 
+-      os2*)
+-	hardcode_libdir_flag_spec_CXX='-L$libdir'
+-	hardcode_minus_L_CXX=yes
+-	allow_undefined_flag_CXX=unsupported
+-	shrext_cmds=.dll
+-	archive_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	  $ECHO EXPORTS >> $output_objdir/$libname.def~
+-	  emxexp $libobjs | $SED /"_DLL_InitTerm"/d >> $output_objdir/$libname.def~
+-	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	  emximp -o $lib $output_objdir/$libname.def'
+-	archive_expsym_cmds_CXX='$ECHO "LIBRARY ${soname%$shared_ext} INITINSTANCE TERMINSTANCE" > $output_objdir/$libname.def~
+-	  $ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~
+-	  $ECHO "DATA MULTIPLE NONSHARED" >> $output_objdir/$libname.def~
+-	  $ECHO EXPORTS >> $output_objdir/$libname.def~
+-	  prefix_cmds="$SED"~
+-	  if test EXPORTS = "`$SED 1q $export_symbols`"; then
+-	    prefix_cmds="$prefix_cmds -e 1d";
+-	  fi~
+-	  prefix_cmds="$prefix_cmds -e \"s/^\(.*\)$/_\1/g\""~
+-	  cat $export_symbols | $prefix_cmds >> $output_objdir/$libname.def~
+-	  $CC -Zdll -Zcrtdll -o $output_objdir/$soname $libobjs $deplibs $compiler_flags $output_objdir/$libname.def~
+-	  emximp -o $lib $output_objdir/$libname.def'
+-	old_archive_From_new_cmds_CXX='emximp -o $output_objdir/${libname}_dll.a $output_objdir/$libname.def'
+-	enable_shared_with_static_runtimes_CXX=yes
+-	file_list_spec_CXX='@'
+-	;;
+-
+       dgux*)
+         case $cc_basename in
+           ec++*)
+@@ -15794,21 +13325,24 @@ fi
+         archive_cmds_need_lc_CXX=no
+         ;;
+ 
+-      freebsd* | dragonfly* | midnightbsd*)
++      freebsd* | dragonfly*)
+         # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF
+         # conventions
+         ld_shlibs_CXX=yes
+         ;;
+ 
++      gnu*)
++        ;;
++
+       haiku*)
+-        archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
++        archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
+         link_all_deplibs_CXX=yes
+         ;;
+ 
+       hpux9*)
+-        hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir'
++        hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir'
+         hardcode_libdir_separator_CXX=:
+-        export_dynamic_flag_spec_CXX='$wl-E'
++        export_dynamic_flag_spec_CXX='${wl}-E'
+         hardcode_direct_CXX=yes
+         hardcode_minus_L_CXX=yes # Not in the search PATH,
+ 				             # but as the default
+@@ -15820,7 +13354,7 @@ fi
+             ld_shlibs_CXX=no
+             ;;
+           aCC*)
+-            archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++            archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+             # Commands to make compiler produce verbose output that lists
+             # what "hidden" libraries, object files and flags are used when
+             # linking a shared library.
+@@ -15829,11 +13363,11 @@ fi
+             # explicitly linking system object files so we need to strip them
+             # from the output so that they don't get included in the library
+             # dependencies.
+-            output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++            output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+             ;;
+           *)
+-            if test yes = "$GXX"; then
+-              archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag $wl+b $wl$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test "x$output_objdir/$soname" = "x$lib" || mv $output_objdir/$soname $lib'
++            if test "$GXX" = yes; then
++              archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib -fPIC ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib'
+             else
+               # FIXME: insert proper C++ library support
+               ld_shlibs_CXX=no
+@@ -15843,15 +13377,15 @@ fi
+         ;;
+ 
+       hpux10*|hpux11*)
+-        if test no = "$with_gnu_ld"; then
+-	  hardcode_libdir_flag_spec_CXX='$wl+b $wl$libdir'
++        if test $with_gnu_ld = no; then
++	  hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir'
+ 	  hardcode_libdir_separator_CXX=:
+ 
+           case $host_cpu in
+             hppa*64*|ia64*)
+               ;;
+             *)
+-	      export_dynamic_flag_spec_CXX='$wl-E'
++	      export_dynamic_flag_spec_CXX='${wl}-E'
+               ;;
+           esac
+         fi
+@@ -15877,13 +13411,13 @@ fi
+           aCC*)
+ 	    case $host_cpu in
+ 	      hppa*64*)
+-	        archive_cmds_CXX='$CC -b $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	        archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ 	        ;;
+ 	      ia64*)
+-	        archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	        archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ 	        ;;
+ 	      *)
+-	        archive_cmds_CXX='$CC -b $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	        archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ 	        ;;
+ 	    esac
+ 	    # Commands to make compiler produce verbose output that lists
+@@ -15894,20 +13428,20 @@ fi
+ 	    # explicitly linking system object files so we need to strip them
+ 	    # from the output so that they don't get included in the library
+ 	    # dependencies.
+-	    output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++	    output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+ 	    ;;
+           *)
+-	    if test yes = "$GXX"; then
+-	      if test no = "$with_gnu_ld"; then
++	    if test "$GXX" = yes; then
++	      if test $with_gnu_ld = no; then
+ 	        case $host_cpu in
+ 	          hppa*64*)
+-	            archive_cmds_CXX='$CC -shared -nostdlib -fPIC $wl+h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	            archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ 	            ;;
+ 	          ia64*)
+-	            archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	            archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ 	            ;;
+ 	          *)
+-	            archive_cmds_CXX='$CC -shared -nostdlib $pic_flag $wl+h $wl$soname $wl+b $wl$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	            archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ 	            ;;
+ 	        esac
+ 	      fi
+@@ -15922,22 +13456,22 @@ fi
+       interix[3-9]*)
+ 	hardcode_direct_CXX=no
+ 	hardcode_shlibpath_var_CXX=no
+-	hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
+-	export_dynamic_flag_spec_CXX='$wl-E'
++	hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
++	export_dynamic_flag_spec_CXX='${wl}-E'
+ 	# Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc.
+ 	# Instead, shared libraries are loaded at an image base (0x10000000 by
+ 	# default) and relocated if they conflict, which is a slow very memory
+ 	# consuming and fragmenting process.  To avoid this, we pick a random,
+ 	# 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link
+ 	# time.  Moving up from 0x10000000 also allows more sbrk(2) space.
+-	archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+-	archive_expsym_cmds_CXX='$SED "s|^|_|" $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags $wl-h,$soname $wl--retain-symbols-file,$output_objdir/$soname.expsym $wl--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++	archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
++	archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib'
+ 	;;
+       irix5* | irix6*)
+         case $cc_basename in
+           CC*)
+ 	    # SGI C++
+-	    archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	    archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ 
+ 	    # Archives containing C++ object files must be created using
+ 	    # "CC -ar", where "CC" is the IRIX C++ compiler.  This is
+@@ -15946,22 +13480,22 @@ fi
+ 	    old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs'
+ 	    ;;
+           *)
+-	    if test yes = "$GXX"; then
+-	      if test no = "$with_gnu_ld"; then
+-	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++	    if test "$GXX" = yes; then
++	      if test "$with_gnu_ld" = no; then
++	        archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ 	      else
+-	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` -o $lib'
++	        archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib'
+ 	      fi
+ 	    fi
+ 	    link_all_deplibs_CXX=yes
+ 	    ;;
+         esac
+-        hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
++        hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+         hardcode_libdir_separator_CXX=:
+         inherit_rpath_CXX=yes
+         ;;
+ 
+-      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++      linux* | k*bsd*-gnu | kopensolaris*-gnu)
+         case $cc_basename in
+           KCC*)
+ 	    # Kuck and Associates, Inc. (KAI) C++ Compiler
+@@ -15969,8 +13503,8 @@ fi
+ 	    # KCC will only create a shared library if the output file
+ 	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+ 	    # to its proper name (with version) after linking.
+-	    archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+-	    archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib $wl-retain-symbols-file,$export_symbols; mv \$templib $lib'
++	    archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
++	    archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib'
+ 	    # Commands to make compiler produce verbose output that lists
+ 	    # what "hidden" libraries, object files and flags are used when
+ 	    # linking a shared library.
+@@ -15979,10 +13513,10 @@ fi
+ 	    # explicitly linking system object files so we need to strip them
+ 	    # from the output so that they don't get included in the library
+ 	    # dependencies.
+-	    output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++	    output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+ 
+-	    hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
+-	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
++	    hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
++	    export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
+ 
+ 	    # Archives containing C++ object files must be created using
+ 	    # "CC -Bstatic", where "CC" is the KAI C++ compiler.
+@@ -15996,59 +13530,59 @@ fi
+ 	    # earlier do not add the objects themselves.
+ 	    case `$CC -V 2>&1` in
+ 	      *"Version 7."*)
+-	        archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+-		archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	        archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
++		archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ 		;;
+ 	      *)  # Version 8.0 or newer
+ 	        tmp_idyn=
+ 	        case $host_cpu in
+ 		  ia64*) tmp_idyn=' -i_dynamic';;
+ 		esac
+-	        archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-		archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	        archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
++		archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib'
+ 		;;
+ 	    esac
+ 	    archive_cmds_need_lc_CXX=no
+-	    hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
+-	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
+-	    whole_archive_flag_spec_CXX='$wl--whole-archive$convenience $wl--no-whole-archive'
++	    hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
++	    export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
++	    whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive'
+ 	    ;;
+           pgCC* | pgcpp*)
+             # Portland Group C++ compiler
+ 	    case `$CC -V` in
+ 	    *pgCC\ [1-5].* | *pgcpp\ [1-5].*)
+ 	      prelink_cmds_CXX='tpldir=Template.dir~
+-               rm -rf $tpldir~
+-               $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
+-               compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
++		rm -rf $tpldir~
++		$CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~
++		compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"'
+ 	      old_archive_cmds_CXX='tpldir=Template.dir~
+-                rm -rf $tpldir~
+-                $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
+-                $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
+-                $RANLIB $oldlib'
++		rm -rf $tpldir~
++		$CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~
++		$AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~
++		$RANLIB $oldlib'
+ 	      archive_cmds_CXX='tpldir=Template.dir~
+-                rm -rf $tpldir~
+-                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+-                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
++		rm -rf $tpldir~
++		$CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
++		$CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
+ 	      archive_expsym_cmds_CXX='tpldir=Template.dir~
+-                rm -rf $tpldir~
+-                $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
+-                $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++		rm -rf $tpldir~
++		$CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~
++		$CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+ 	      ;;
+ 	    *) # Version 6 and above use weak symbols
+-	      archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+-	      archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname $wl-retain-symbols-file $wl$export_symbols -o $lib'
++	      archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib'
++	      archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib'
+ 	      ;;
+ 	    esac
+ 
+-	    hardcode_libdir_flag_spec_CXX='$wl--rpath $wl$libdir'
+-	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
+-	    whole_archive_flag_spec_CXX='$wl--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	    hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir'
++	    export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
++	    whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test  -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+             ;;
+ 	  cxx*)
+ 	    # Compaq C++
+-	    archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname -o $lib'
+-	    archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname  -o $lib $wl-retain-symbols-file $wl$export_symbols'
++	    archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib'
++	    archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname  -o $lib ${wl}-retain-symbols-file $wl$export_symbols'
+ 
+ 	    runpath_var=LD_RUN_PATH
+ 	    hardcode_libdir_flag_spec_CXX='-rpath $libdir'
+@@ -16062,29 +13596,29 @@ fi
+ 	    # explicitly linking system object files so we need to strip them
+ 	    # from the output so that they don't get included in the library
+ 	    # dependencies.
+-	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
++	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed'
+ 	    ;;
+ 	  xl* | mpixl* | bgxl*)
+ 	    # IBM XL 8.0 on PPC, with GNU ld
+-	    hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
+-	    export_dynamic_flag_spec_CXX='$wl--export-dynamic'
+-	    archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname -o $lib'
+-	    if test yes = "$supports_anon_versioning"; then
++	    hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
++	    export_dynamic_flag_spec_CXX='${wl}--export-dynamic'
++	    archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib'
++	    if test "x$supports_anon_versioning" = xyes; then
+ 	      archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~
+-                cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
+-                echo "local: *; };" >> $output_objdir/$libname.ver~
+-                $CC -qmkshrobj $libobjs $deplibs $compiler_flags $wl-soname $wl$soname $wl-version-script $wl$output_objdir/$libname.ver -o $lib'
++		cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~
++		echo "local: *; };" >> $output_objdir/$libname.ver~
++		$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib'
+ 	    fi
+ 	    ;;
+ 	  *)
+-	    case `$CC -V 2>&1 | $SED 5q` in
++	    case `$CC -V 2>&1 | sed 5q` in
+ 	    *Sun\ C*)
+ 	      # Sun C++ 5.9
+ 	      no_undefined_flag_CXX=' -zdefs'
+-	      archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+-	      archive_expsym_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file $wl$export_symbols'
++	      archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	      archive_expsym_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols'
+ 	      hardcode_libdir_flag_spec_CXX='-R$libdir'
+-	      whole_archive_flag_spec_CXX='$wl--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` $wl--no-whole-archive'
++	      whole_archive_flag_spec_CXX='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive'
+ 	      compiler_needs_object_CXX=yes
+ 
+ 	      # Not sure whether something based on
+@@ -16142,17 +13676,22 @@ fi
+         ld_shlibs_CXX=yes
+ 	;;
+ 
+-      openbsd* | bitrig*)
++      openbsd2*)
++        # C++ shared libraries are fairly broken
++	ld_shlibs_CXX=no
++	;;
++
++      openbsd*)
+ 	if test -f /usr/libexec/ld.so; then
+ 	  hardcode_direct_CXX=yes
+ 	  hardcode_shlibpath_var_CXX=no
+ 	  hardcode_direct_absolute_CXX=yes
+ 	  archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib'
+-	  hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
+-	  if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`"; then
+-	    archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-retain-symbols-file,$export_symbols -o $lib'
+-	    export_dynamic_flag_spec_CXX='$wl-E'
+-	    whole_archive_flag_spec_CXX=$wlarc'--whole-archive$convenience '$wlarc'--no-whole-archive'
++	  hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
++	  if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
++	    archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib'
++	    export_dynamic_flag_spec_CXX='${wl}-E'
++	    whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive'
+ 	  fi
+ 	  output_verbose_link_cmd=func_echo_all
+ 	else
+@@ -16168,9 +13707,9 @@ fi
+ 	    # KCC will only create a shared library if the output file
+ 	    # ends with ".so" (or ".sl" for HP-UX), so rename the library
+ 	    # to its proper name (with version) after linking.
+-	    archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\$tempext\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
++	    archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib'
+ 
+-	    hardcode_libdir_flag_spec_CXX='$wl-rpath,$libdir'
++	    hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir'
+ 	    hardcode_libdir_separator_CXX=:
+ 
+ 	    # Archives containing C++ object files must be created using
+@@ -16188,17 +13727,17 @@ fi
+           cxx*)
+ 	    case $host in
+ 	      osf3*)
+-	        allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*'
+-	        archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $soname `test -n "$verstring" && func_echo_all "$wl-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
+-	        hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
++	        allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*'
++	        archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
++	        hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+ 		;;
+ 	      *)
+ 	        allow_undefined_flag_CXX=' -expect_unresolved \*'
+-	        archive_cmds_CXX='$CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib'
++	        archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib'
+ 	        archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~
+-                  echo "-hidden">> $lib.exp~
+-                  $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname $wl-input $wl$lib.exp  `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry $output_objdir/so_locations -o $lib~
+-                  $RM $lib.exp'
++	          echo "-hidden">> $lib.exp~
++	          $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp  `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~
++	          $RM $lib.exp'
+ 	        hardcode_libdir_flag_spec_CXX='-rpath $libdir'
+ 		;;
+ 	    esac
+@@ -16213,21 +13752,21 @@ fi
+ 	    # explicitly linking system object files so we need to strip them
+ 	    # from the output so that they don't get included in the library
+ 	    # dependencies.
+-	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list= ; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
++	    output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"'
+ 	    ;;
+ 	  *)
+-	    if test yes,no = "$GXX,$with_gnu_ld"; then
+-	      allow_undefined_flag_CXX=' $wl-expect_unresolved $wl\*'
++	    if test "$GXX" = yes && test "$with_gnu_ld" = no; then
++	      allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*'
+ 	      case $host in
+ 	        osf3*)
+-	          archive_cmds_CXX='$CC -shared -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++	          archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ 		  ;;
+ 	        *)
+-	          archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-msym $wl-soname $wl$soname `test -n "$verstring" && func_echo_all "$wl-set_version $wl$verstring"` $wl-update_registry $wl$output_objdir/so_locations -o $lib'
++	          archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib'
+ 		  ;;
+ 	      esac
+ 
+-	      hardcode_libdir_flag_spec_CXX='$wl-rpath $wl$libdir'
++	      hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir'
+ 	      hardcode_libdir_separator_CXX=:
+ 
+ 	      # Commands to make compiler produce verbose output that lists
+@@ -16269,13 +13808,13 @@ fi
+ 
+       solaris*)
+         case $cc_basename in
+-          CC* | sunCC*)
++          CC*)
+ 	    # Sun C++ 4.2, 5.x and Centerline C++
+             archive_cmds_need_lc_CXX=yes
+ 	    no_undefined_flag_CXX=' -zdefs'
+-	    archive_cmds_CXX='$CC -G$allow_undefined_flag -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
++	    archive_cmds_CXX='$CC -G${allow_undefined_flag}  -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags'
+ 	    archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-              $CC -G$allow_undefined_flag $wl-M $wl$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
++	      $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+ 
+ 	    hardcode_libdir_flag_spec_CXX='-R$libdir'
+ 	    hardcode_shlibpath_var_CXX=no
+@@ -16283,7 +13822,7 @@ fi
+ 	      solaris2.[0-5] | solaris2.[0-5].*) ;;
+ 	      *)
+ 		# The compiler driver will combine and reorder linker options,
+-		# but understands '-z linker_flag'.
++		# but understands `-z linker_flag'.
+ 	        # Supported since Solaris 2.6 (maybe 2.5.1?)
+ 		whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract'
+ 	        ;;
+@@ -16300,30 +13839,30 @@ fi
+ 	    ;;
+           gcx*)
+ 	    # Green Hills C++ Compiler
+-	    archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
++	    archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+ 
+ 	    # The C++ compiler must be used to create the archive.
+ 	    old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs'
+ 	    ;;
+           *)
+ 	    # GNU C++ compiler with Solaris linker
+-	    if test yes,no = "$GXX,$with_gnu_ld"; then
+-	      no_undefined_flag_CXX=' $wl-z ${wl}defs'
++	    if test "$GXX" = yes && test "$with_gnu_ld" = no; then
++	      no_undefined_flag_CXX=' ${wl}-z ${wl}defs'
+ 	      if $CC --version | $GREP -v '^2\.7' > /dev/null; then
+-	        archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
++	        archive_cmds_CXX='$CC -shared -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+ 	        archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-                  $CC -shared $pic_flag -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
++		  $CC -shared -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+ 
+ 	        # Commands to make compiler produce verbose output that lists
+ 	        # what "hidden" libraries, object files and flags are used when
+ 	        # linking a shared library.
+ 	        output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+ 	      else
+-	        # g++ 2.7 appears to require '-G' NOT '-shared' on this
++	        # g++ 2.7 appears to require `-G' NOT `-shared' on this
+ 	        # platform.
+-	        archive_cmds_CXX='$CC -G -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags $wl-h $wl$soname -o $lib'
++	        archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib'
+ 	        archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~
+-                  $CC -G -nostdlib $wl-M $wl$lib.exp $wl-h $wl$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
++		  $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp'
+ 
+ 	        # Commands to make compiler produce verbose output that lists
+ 	        # what "hidden" libraries, object files and flags are used when
+@@ -16331,11 +13870,11 @@ fi
+ 	        output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"'
+ 	      fi
+ 
+-	      hardcode_libdir_flag_spec_CXX='$wl-R $wl$libdir'
++	      hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir'
+ 	      case $host_os in
+ 		solaris2.[0-5] | solaris2.[0-5].*) ;;
+ 		*)
+-		  whole_archive_flag_spec_CXX='$wl-z ${wl}allextract$convenience $wl-z ${wl}defaultextract'
++		  whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract'
+ 		  ;;
+ 	      esac
+ 	    fi
+@@ -16344,52 +13883,52 @@ fi
+         ;;
+ 
+     sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*)
+-      no_undefined_flag_CXX='$wl-z,text'
++      no_undefined_flag_CXX='${wl}-z,text'
+       archive_cmds_need_lc_CXX=no
+       hardcode_shlibpath_var_CXX=no
+       runpath_var='LD_RUN_PATH'
+ 
+       case $cc_basename in
+         CC*)
+-	  archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	  archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ 	  ;;
+ 	*)
+-	  archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	  archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	  archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ 	  ;;
+       esac
+       ;;
+ 
+       sysv5* | sco3.2v5* | sco5v6*)
+-	# Note: We CANNOT use -z defs as we might desire, because we do not
++	# Note: We can NOT use -z defs as we might desire, because we do not
+ 	# link with -lc, and that would cause any symbols used from libc to
+ 	# always be unresolved, which means just about no library would
+ 	# ever link correctly.  If we're not using GNU ld we use -z text
+ 	# though, which does catch some bad symbols but isn't as heavy-handed
+ 	# as -z defs.
+-	no_undefined_flag_CXX='$wl-z,text'
+-	allow_undefined_flag_CXX='$wl-z,nodefs'
++	no_undefined_flag_CXX='${wl}-z,text'
++	allow_undefined_flag_CXX='${wl}-z,nodefs'
+ 	archive_cmds_need_lc_CXX=no
+ 	hardcode_shlibpath_var_CXX=no
+-	hardcode_libdir_flag_spec_CXX='$wl-R,$libdir'
++	hardcode_libdir_flag_spec_CXX='${wl}-R,$libdir'
+ 	hardcode_libdir_separator_CXX=':'
+ 	link_all_deplibs_CXX=yes
+-	export_dynamic_flag_spec_CXX='$wl-Bexport'
++	export_dynamic_flag_spec_CXX='${wl}-Bexport'
+ 	runpath_var='LD_RUN_PATH'
+ 
+ 	case $cc_basename in
+           CC*)
+-	    archive_cmds_CXX='$CC -G $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	    archive_expsym_cmds_CXX='$CC -G $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ 	    old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~
+-              '"$old_archive_cmds_CXX"
++	      '"$old_archive_cmds_CXX"
+ 	    reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~
+-              '"$reload_cmds_CXX"
++	      '"$reload_cmds_CXX"
+ 	    ;;
+ 	  *)
+-	    archive_cmds_CXX='$CC -shared $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+-	    archive_expsym_cmds_CXX='$CC -shared $wl-Bexport:$export_symbols $wl-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
++	    archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags'
+ 	    ;;
+ 	esac
+       ;;
+@@ -16419,12 +13958,12 @@ fi
+         ;;
+     esac
+ 
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
+-printf "%s\n" "$ld_shlibs_CXX" >&6; }
+-    test no = "$ld_shlibs_CXX" && can_build_shared=no
++    { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
++$as_echo "$ld_shlibs_CXX" >&6; }
++    test "$ld_shlibs_CXX" = no && can_build_shared=no
+ 
+-    GCC_CXX=$GXX
+-    LD_CXX=$LD
++    GCC_CXX="$GXX"
++    LD_CXX="$LD"
+ 
+     ## CAVEAT EMPTOR:
+     ## There is no encapsulation within the following macros, do not change
+@@ -16447,18 +13986,10 @@ private:
+ };
+ _LT_EOF
+ 
+-
+-_lt_libdeps_save_CFLAGS=$CFLAGS
+-case "$CC $CFLAGS " in #(
+-*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;;
+-*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;;
+-*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;;
+-esac
+-
+ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+   (eval $ac_compile) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }; then
+   # Parse the compiler output and extract the necessary
+   # objects, libraries and library flags.
+@@ -16468,38 +13999,29 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+   pre_test_object_deps_done=no
+ 
+   for p in `eval "$output_verbose_link_cmd"`; do
+-    case $prev$p in
++    case $p in
+ 
+     -L* | -R* | -l*)
+        # Some compilers place space between "-{L,R}" and the path.
+        # Remove the space.
+-       if test x-L = "$p" ||
+-          test x-R = "$p"; then
++       if test $p = "-L" ||
++          test $p = "-R"; then
+ 	 prev=$p
+ 	 continue
++       else
++	 prev=
+        fi
+ 
+-       # Expand the sysroot to ease extracting the directories later.
+-       if test -z "$prev"; then
+-         case $p in
+-         -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;;
+-         -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;;
+-         -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;;
+-         esac
+-       fi
+-       case $p in
+-       =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;;
+-       esac
+-       if test no = "$pre_test_object_deps_done"; then
+-	 case $prev in
+-	 -L | -R)
++       if test "$pre_test_object_deps_done" = no; then
++	 case $p in
++	 -L* | -R*)
+ 	   # Internal compiler library paths should come after those
+ 	   # provided the user.  The postdeps already come after the
+ 	   # user supplied libs so there is no need to process them.
+ 	   if test -z "$compiler_lib_search_path_CXX"; then
+-	     compiler_lib_search_path_CXX=$prev$p
++	     compiler_lib_search_path_CXX="${prev}${p}"
+ 	   else
+-	     compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} $prev$p"
++	     compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}"
+ 	   fi
+ 	   ;;
+ 	 # The "-l" case would never come before the object being
+@@ -16507,15 +14029,13 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ 	 esac
+        else
+ 	 if test -z "$postdeps_CXX"; then
+-	   postdeps_CXX=$prev$p
++	   postdeps_CXX="${prev}${p}"
+ 	 else
+-	   postdeps_CXX="${postdeps_CXX} $prev$p"
++	   postdeps_CXX="${postdeps_CXX} ${prev}${p}"
+ 	 fi
+        fi
+-       prev=
+        ;;
+ 
+-    *.lto.$objext) ;; # Ignore GCC LTO objects
+     *.$objext)
+        # This assumes that the test object file only shows up
+        # once in the compiler output.
+@@ -16524,15 +14044,15 @@ if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+ 	 continue
+        fi
+ 
+-       if test no = "$pre_test_object_deps_done"; then
++       if test "$pre_test_object_deps_done" = no; then
+ 	 if test -z "$predep_objects_CXX"; then
+-	   predep_objects_CXX=$p
++	   predep_objects_CXX="$p"
+ 	 else
+ 	   predep_objects_CXX="$predep_objects_CXX $p"
+ 	 fi
+        else
+ 	 if test -z "$postdep_objects_CXX"; then
+-	   postdep_objects_CXX=$p
++	   postdep_objects_CXX="$p"
+ 	 else
+ 	   postdep_objects_CXX="$postdep_objects_CXX $p"
+ 	 fi
+@@ -16551,7 +14071,6 @@ else
+ fi
+ 
+ $RM -f confest.$objext
+-CFLAGS=$_lt_libdeps_save_CFLAGS
+ 
+ # PORTME: override above test on systems where it is broken
+ case $host_os in
+@@ -16562,6 +14081,51 @@ interix[3-9]*)
+   postdep_objects_CXX=
+   postdeps_CXX=
+   ;;
++
++linux*)
++  case `$CC -V 2>&1 | sed 5q` in
++  *Sun\ C*)
++    # Sun C++ 5.9
++
++    # The more standards-conforming stlport4 library is
++    # incompatible with the Cstd library. Avoid specifying
++    # it if it's in CXXFLAGS. Ignore libCrun as
++    # -library=stlport4 depends on it.
++    case " $CXX $CXXFLAGS " in
++    *" -library=stlport4 "*)
++      solaris_use_stlport4=yes
++      ;;
++    esac
++
++    if test "$solaris_use_stlport4" != yes; then
++      postdeps_CXX='-library=Cstd -library=Crun'
++    fi
++    ;;
++  esac
++  ;;
++
++solaris*)
++  case $cc_basename in
++  CC*)
++    # The more standards-conforming stlport4 library is
++    # incompatible with the Cstd library. Avoid specifying
++    # it if it's in CXXFLAGS. Ignore libCrun as
++    # -library=stlport4 depends on it.
++    case " $CXX $CXXFLAGS " in
++    *" -library=stlport4 "*)
++      solaris_use_stlport4=yes
++      ;;
++    esac
++
++    # Adding this requires a known-good setup of shared libraries for
++    # Sun compiler versions before 5.6, else PIC objects from an old
++    # archive will be linked into the output, leading to subtle bugs.
++    if test "$solaris_use_stlport4" != yes; then
++      postdeps_CXX='-library=Cstd -library=Crun'
++    fi
++    ;;
++  esac
++  ;;
+ esac
+ 
+ 
+@@ -16570,7 +14134,7 @@ case " $postdeps_CXX " in
+ esac
+  compiler_lib_search_dirs_CXX=
+ if test -n "${compiler_lib_search_path_CXX}"; then
+- compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | $SED -e 's! -L! !g' -e 's!^ !!'`
++ compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | ${SED} -e 's! -L! !g' -e 's!^ !!'`
+ fi
+ 
+ 
+@@ -16607,16 +14171,18 @@ fi
+ lt_prog_compiler_pic_CXX=
+ lt_prog_compiler_static_CXX=
+ 
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
++$as_echo_n "checking for $compiler option to produce PIC... " >&6; }
+ 
+   # C++ specific cases for pic, static, wl, etc.
+-  if test yes = "$GXX"; then
++  if test "$GXX" = yes; then
+     lt_prog_compiler_wl_CXX='-Wl,'
+     lt_prog_compiler_static_CXX='-static'
+ 
+     case $host_os in
+     aix*)
+       # All AIX code is PIC.
+-      if test ia64 = "$host_cpu"; then
++      if test "$host_cpu" = ia64; then
+ 	# AIX 5 now supports IA64 processor
+ 	lt_prog_compiler_static_CXX='-Bstatic'
+       fi
+@@ -16631,8 +14197,8 @@ lt_prog_compiler_static_CXX=
+         ;;
+       m68k)
+             # FIXME: we need at least 68020 code to build shared libraries, but
+-            # adding the '-m68020' flag to GCC prevents building anything better,
+-            # like '-m68040'.
++            # adding the `-m68020' flag to GCC prevents building anything better,
++            # like `-m68040'.
+             lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4'
+         ;;
+       esac
+@@ -16647,11 +14213,6 @@ lt_prog_compiler_static_CXX=
+       # Although the cygwin gcc ignores -fPIC, still need this for old-style
+       # (--disable-auto-import) libraries
+       lt_prog_compiler_pic_CXX='-DDLL_EXPORT'
+-      case $host_os in
+-      os2*)
+-	lt_prog_compiler_static_CXX='$wl-static'
+-	;;
+-      esac
+       ;;
+     darwin* | rhapsody*)
+       # PIC is the default on this platform
+@@ -16701,7 +14262,7 @@ lt_prog_compiler_static_CXX=
+     case $host_os in
+       aix[4-9]*)
+ 	# All AIX code is PIC.
+-	if test ia64 = "$host_cpu"; then
++	if test "$host_cpu" = ia64; then
+ 	  # AIX 5 now supports IA64 processor
+ 	  lt_prog_compiler_static_CXX='-Bstatic'
+ 	else
+@@ -16716,11 +14277,6 @@ lt_prog_compiler_static_CXX=
+ 	  ;;
+ 	esac
+ 	;;
+-      mingw* | cygwin* | os2* | pw32* | cegcc*)
+-	# This hack is so that the source file can tell whether it is being
+-	# built for inclusion in a dll (and should export symbols for example).
+-	lt_prog_compiler_pic_CXX='-DDLL_EXPORT'
+-	;;
+       dgux*)
+ 	case $cc_basename in
+ 	  ec++*)
+@@ -16734,21 +14290,21 @@ lt_prog_compiler_static_CXX=
+ 	    ;;
+ 	esac
+ 	;;
+-      freebsd* | dragonfly* | midnightbsd*)
++      freebsd* | dragonfly*)
+ 	# FreeBSD uses GNU C++
+ 	;;
+       hpux9* | hpux10* | hpux11*)
+ 	case $cc_basename in
+ 	  CC*)
+ 	    lt_prog_compiler_wl_CXX='-Wl,'
+-	    lt_prog_compiler_static_CXX='$wl-a ${wl}archive'
+-	    if test ia64 != "$host_cpu"; then
++	    lt_prog_compiler_static_CXX='${wl}-a ${wl}archive'
++	    if test "$host_cpu" != ia64; then
+ 	      lt_prog_compiler_pic_CXX='+Z'
+ 	    fi
+ 	    ;;
+ 	  aCC*)
+ 	    lt_prog_compiler_wl_CXX='-Wl,'
+-	    lt_prog_compiler_static_CXX='$wl-a ${wl}archive'
++	    lt_prog_compiler_static_CXX='${wl}-a ${wl}archive'
+ 	    case $host_cpu in
+ 	    hppa*64*|ia64*)
+ 	      # +Z the default
+@@ -16777,7 +14333,7 @@ lt_prog_compiler_static_CXX=
+ 	    ;;
+ 	esac
+ 	;;
+-      linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
++      linux* | k*bsd*-gnu | kopensolaris*-gnu)
+ 	case $cc_basename in
+ 	  KCC*)
+ 	    # KAI C++ Compiler
+@@ -16785,7 +14341,7 @@ lt_prog_compiler_static_CXX=
+ 	    lt_prog_compiler_pic_CXX='-fPIC'
+ 	    ;;
+ 	  ecpc* )
+-	    # old Intel C++ for x86_64, which still supported -KPIC.
++	    # old Intel C++ for x86_64 which still supported -KPIC.
+ 	    lt_prog_compiler_wl_CXX='-Wl,'
+ 	    lt_prog_compiler_pic_CXX='-KPIC'
+ 	    lt_prog_compiler_static_CXX='-static'
+@@ -16817,7 +14373,7 @@ lt_prog_compiler_static_CXX=
+ 	    lt_prog_compiler_static_CXX='-qstaticlink'
+ 	    ;;
+ 	  *)
+-	    case `$CC -V 2>&1 | $SED 5q` in
++	    case `$CC -V 2>&1 | sed 5q` in
+ 	    *Sun\ C*)
+ 	      # Sun C++ 5.9
+ 	      lt_prog_compiler_pic_CXX='-KPIC'
+@@ -16873,7 +14429,7 @@ lt_prog_compiler_static_CXX=
+ 	;;
+       solaris*)
+ 	case $cc_basename in
+-	  CC* | sunCC*)
++	  CC*)
+ 	    # Sun C++ 4.2, 5.x and Centerline C++
+ 	    lt_prog_compiler_pic_CXX='-KPIC'
+ 	    lt_prog_compiler_static_CXX='-Bstatic'
+@@ -16930,7 +14486,7 @@ lt_prog_compiler_static_CXX=
+   fi
+ 
+ case $host_os in
+-  # For platforms that do not support PIC, -DPIC is meaningless:
++  # For platforms which do not support PIC, -DPIC is meaningless:
+   *djgpp*)
+     lt_prog_compiler_pic_CXX=
+     ;;
+@@ -16938,33 +14494,24 @@ case $host_os in
+     lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC"
+     ;;
+ esac
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_prog_compiler_pic_CXX" >&5
++$as_echo "$lt_prog_compiler_pic_CXX" >&6; }
++
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5
+-printf %s "checking for $compiler option to produce PIC... " >&6; }
+-if test ${lt_cv_prog_compiler_pic_CXX+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
+-  lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX
+-fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5
+-printf "%s\n" "$lt_cv_prog_compiler_pic_CXX" >&6; }
+-lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX
+ 
+ #
+ # Check to make sure the PIC flag actually works.
+ #
+ if test -n "$lt_prog_compiler_pic_CXX"; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5
+-printf %s "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; }
+-if test ${lt_cv_prog_compiler_pic_works_CXX+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5
++$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; }
++if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_prog_compiler_pic_works_CXX=no
+    ac_outfile=conftest.$ac_objext
+    echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+-   lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC"  ## exclude from sc_useless_quotes_in_assignment
++   lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC"
+    # Insert the option either (1) after the last *FLAGS variable, or
+    # (2) before a word containing "conftest.", or (3) at the end.
+    # Note that $ac_compile itself does not contain backslashes and begins
+@@ -16991,10 +14538,10 @@ else $as_nop
+    $RM conftest*
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5
+-printf "%s\n" "$lt_cv_prog_compiler_pic_works_CXX" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5
++$as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; }
+ 
+-if test yes = "$lt_cv_prog_compiler_pic_works_CXX"; then
++if test x"$lt_cv_prog_compiler_pic_works_CXX" = xyes; then
+     case $lt_prog_compiler_pic_CXX in
+      "" | " "*) ;;
+      *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;;
+@@ -17008,20 +14555,17 @@ fi
+ 
+ 
+ 
+-
+-
+ #
+ # Check to make sure the static flag actually works.
+ #
+ wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\"
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
+-printf %s "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
+-if test ${lt_cv_prog_compiler_static_works_CXX+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5
++$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; }
++if ${lt_cv_prog_compiler_static_works_CXX+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_prog_compiler_static_works_CXX=no
+-   save_LDFLAGS=$LDFLAGS
++   save_LDFLAGS="$LDFLAGS"
+    LDFLAGS="$LDFLAGS $lt_tmp_static_flag"
+    echo "$lt_simple_link_test_code" > conftest.$ac_ext
+    if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then
+@@ -17040,13 +14584,13 @@ else $as_nop
+      fi
+    fi
+    $RM -r conftest*
+-   LDFLAGS=$save_LDFLAGS
++   LDFLAGS="$save_LDFLAGS"
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5
+-printf "%s\n" "$lt_cv_prog_compiler_static_works_CXX" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5
++$as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; }
+ 
+-if test yes = "$lt_cv_prog_compiler_static_works_CXX"; then
++if test x"$lt_cv_prog_compiler_static_works_CXX" = xyes; then
+     :
+ else
+     lt_prog_compiler_static_CXX=
+@@ -17055,12 +14599,11 @@ fi
+ 
+ 
+ 
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+-printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+-if test ${lt_cv_prog_compiler_c_o_CXX+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
++$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
++if ${lt_cv_prog_compiler_c_o_CXX+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_prog_compiler_c_o_CXX=no
+    $RM -r conftest 2>/dev/null
+    mkdir conftest
+@@ -17103,17 +14646,16 @@ else $as_nop
+    $RM conftest*
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
+-printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
++$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; }
+ 
+ 
+ 
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
+-printf %s "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
+-if test ${lt_cv_prog_compiler_c_o_CXX+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5
++$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; }
++if ${lt_cv_prog_compiler_c_o_CXX+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_prog_compiler_c_o_CXX=no
+    $RM -r conftest 2>/dev/null
+    mkdir conftest
+@@ -17156,28 +14698,28 @@ else $as_nop
+    $RM conftest*
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
+-printf "%s\n" "$lt_cv_prog_compiler_c_o_CXX" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5
++$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; }
+ 
+ 
+ 
+ 
+-hard_links=nottested
+-if test no = "$lt_cv_prog_compiler_c_o_CXX" && test no != "$need_locks"; then
++hard_links="nottested"
++if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then
+   # do not overwrite the value of need_locks provided by the user
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
+-printf %s "checking if we can lock with hard links... " >&6; }
++  { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5
++$as_echo_n "checking if we can lock with hard links... " >&6; }
+   hard_links=yes
+   $RM conftest*
+   ln conftest.a conftest.b 2>/dev/null && hard_links=no
+   touch conftest.a
+   ln conftest.a conftest.b 2>&5 || hard_links=no
+   ln conftest.a conftest.b 2>/dev/null && hard_links=no
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
+-printf "%s\n" "$hard_links" >&6; }
+-  if test no = "$hard_links"; then
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&5
+-printf "%s\n" "$as_me: WARNING: '$CC' does not support '-c -o', so 'make -j' may be unsafe" >&2;}
++  { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5
++$as_echo "$hard_links" >&6; }
++  if test "$hard_links" = no; then
++    { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5
++$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;}
+     need_locks=warn
+   fi
+ else
+@@ -17186,49 +14728,37 @@ fi
+ 
+ 
+ 
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
+-printf %s "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5
++$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; }
+ 
+   export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+-  exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
+   case $host_os in
+   aix[4-9]*)
+     # If we're using GNU nm, then we don't want the "-C" option.
+-    # -C means demangle to GNU nm, but means don't demangle to AIX nm.
+-    # Without the "-l" option, or with the "-B" option, AIX nm treats
+-    # weak defined symbols like other global defined symbols, whereas
+-    # GNU nm marks them as "W".
+-    # While the 'weak' keyword is ignored in the Export File, we need
+-    # it in the Import File for the 'aix-soname' feature, so we have
+-    # to replace the "-B" option with "-P" for AIX nm.
++    # -C means demangle to AIX nm, but means don't demangle with GNU nm
++    # Also, AIX nm treats weak defined symbols like other global defined
++    # symbols, whereas GNU nm marks them as "W".
+     if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then
+-      export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { if (\$ 2 == "W") { print \$ 3 " weak" } else { print \$ 3 } } }'\'' | sort -u > $export_symbols'
++      export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+     else
+-      export_symbols_cmds_CXX='`func_echo_all $NM | $SED -e '\''s/B\([^B]*\)$/P\1/'\''` -PCpgl $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L") || (\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) && (substr(\$ 1,1,1) != ".")) { if ((\$ 2 == "W") || (\$ 2 == "V") || (\$ 2 == "Z")) { print \$ 1 " weak" } else { print \$ 1 } } }'\'' | sort -u > $export_symbols'
++      export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "L")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols'
+     fi
+     ;;
+   pw32*)
+-    export_symbols_cmds_CXX=$ltdll_cmds
+-    ;;
++    export_symbols_cmds_CXX="$ltdll_cmds"
++  ;;
+   cygwin* | mingw* | cegcc*)
+-    case $cc_basename in
+-    cl* | icl*)
+-      exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*'
+-      ;;
+-    *)
+-      export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
+-      exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'
+-      ;;
+-    esac
+-    ;;
++    export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;/^.*[ ]__nm__/s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols'
++  ;;
+   *)
+     export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols'
+-    ;;
++  ;;
+   esac
++  exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
+-printf "%s\n" "$ld_shlibs_CXX" >&6; }
+-test no = "$ld_shlibs_CXX" && can_build_shared=no
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5
++$as_echo "$ld_shlibs_CXX" >&6; }
++test "$ld_shlibs_CXX" = no && can_build_shared=no
+ 
+ with_gnu_ld_CXX=$with_gnu_ld
+ 
+@@ -17245,7 +14775,7 @@ x|xyes)
+   # Assume -lc should be added
+   archive_cmds_need_lc_CXX=yes
+ 
+-  if test yes,yes = "$GCC,$enable_shared"; then
++  if test "$enable_shared" = yes && test "$GCC" = yes; then
+     case $archive_cmds_CXX in
+     *'~'*)
+       # FIXME: we may have to deal with multi-command sequences.
+@@ -17254,19 +14784,18 @@ x|xyes)
+       # Test whether the compiler implicitly links with -lc since on some
+       # systems, -lgcc has to come before -lc. If gcc already passes -lc
+       # to ld, don't add -lc before -lgcc.
+-      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
+-printf %s "checking whether -lc should be explicitly linked in... " >&6; }
+-if test ${lt_cv_archive_cmds_need_lc_CXX+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++      { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5
++$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; }
++if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   $RM conftest*
+ 	echo "$lt_simple_compile_test_code" > conftest.$ac_ext
+ 
+ 	if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5
+   (eval $ac_compile) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; } 2>conftest.err; then
+ 	  soname=conftest
+ 	  lib=conftest
+@@ -17284,7 +14813,7 @@ else $as_nop
+ 	  if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5
+   (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5
+   ac_status=$?
+-  printf "%s\n" "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+   test $ac_status = 0; }
+ 	  then
+ 	    lt_cv_archive_cmds_need_lc_CXX=no
+@@ -17298,8 +14827,8 @@ else $as_nop
+ 	$RM conftest*
+ 
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5
+-printf "%s\n" "$lt_cv_archive_cmds_need_lc_CXX" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5
++$as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; }
+       archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX
+       ;;
+     esac
+@@ -17368,13 +14897,15 @@ esac
+ 
+ 
+ 
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
+-printf %s "checking dynamic linker characteristics... " >&6; }
++
++
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5
++$as_echo_n "checking dynamic linker characteristics... " >&6; }
+ 
+ library_names_spec=
+ libname_spec='lib$name'
+ soname_spec=
+-shrext_cmds=.so
++shrext_cmds=".so"
+ postinstall_cmds=
+ postuninstall_cmds=
+ finish_cmds=
+@@ -17391,108 +14922,56 @@ hardcode_into_libs=no
+ # flags to be left without arguments
+ need_version=unknown
+ 
+-
+-
+ case $host_os in
+ aix3*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname.a'
++  version_type=linux
++  library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a'
+   shlibpath_var=LIBPATH
+ 
+   # AIX 3 has no versioning support, so we append a major version to the name.
+-  soname_spec='$libname$release$shared_ext$major'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   ;;
+ 
+ aix[4-9]*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+   hardcode_into_libs=yes
+-  if test ia64 = "$host_cpu"; then
++  if test "$host_cpu" = ia64; then
+     # AIX 5 supports IA64
+-    library_names_spec='$libname$release$shared_ext$major $libname$release$shared_ext$versuffix $libname$shared_ext'
++    library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}'
+     shlibpath_var=LD_LIBRARY_PATH
+   else
+     # With GCC up to 2.95.x, collect2 would create an import file
+     # for dependence libraries.  The import file would start with
+-    # the line '#! .'.  This would cause the generated library to
+-    # depend on '.', always an invalid library.  This was fixed in
++    # the line `#! .'.  This would cause the generated library to
++    # depend on `.', always an invalid library.  This was fixed in
+     # development snapshots of GCC prior to 3.0.
+     case $host_os in
+       aix4 | aix4.[01] | aix4.[01].*)
+       if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)'
+ 	   echo ' yes '
+-	   echo '#endif'; } | $CC -E - | $GREP yes > /dev/null; then
++	   echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then
+ 	:
+       else
+ 	can_build_shared=no
+       fi
+       ;;
+     esac
+-    # Using Import Files as archive members, it is possible to support
+-    # filename-based versioning of shared library archives on AIX. While
+-    # this would work for both with and without runtime linking, it will
+-    # prevent static linking of such archives. So we do filename-based
+-    # shared library versioning with .so extension only, which is used
+-    # when both runtime linking and shared linking is enabled.
+-    # Unfortunately, runtime linking may impact performance, so we do
+-    # not want this to be the default eventually. Also, we use the
+-    # versioned .so libs for executables only if there is the -brtl
+-    # linker flag in LDFLAGS as well, or --with-aix-soname=svr4 only.
+-    # To allow for filename-based versioning support, we need to create
+-    # libNAME.so.V as an archive file, containing:
+-    # *) an Import File, referring to the versioned filename of the
+-    #    archive as well as the shared archive member, telling the
+-    #    bitwidth (32 or 64) of that shared object, and providing the
+-    #    list of exported symbols of that shared object, eventually
+-    #    decorated with the 'weak' keyword
+-    # *) the shared object with the F_LOADONLY flag set, to really avoid
+-    #    it being seen by the linker.
+-    # At run time we better use the real file rather than another symlink,
+-    # but for link time we create the symlink libNAME.so -> libNAME.so.V
+-
+-    case $with_aix_soname,$aix_use_runtimelinking in
+-    # AIX (on Power*) has no versioning support, so currently we cannot hardcode correct
++    # AIX (on Power*) has no versioning support, so currently we can not hardcode correct
+     # soname into executable. Probably we can add versioning support to
+     # collect2, so additional links can be useful in future.
+-    aix,yes) # traditional libtool
+-      dynamic_linker='AIX unversionable lib.so'
++    if test "$aix_use_runtimelinking" = yes; then
+       # If using run time linking (on AIX 4.2 or later) use lib.so
+       # instead of lib.a to let people know that these are not
+       # typical AIX shared libraries.
+-      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-      ;;
+-    aix,no) # traditional AIX only
+-      dynamic_linker='AIX lib.a(lib.so.V)'
++      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++    else
+       # We preserve .a as extension for shared libraries through AIX4.2
+       # and later when we are not doing run time linking.
+-      library_names_spec='$libname$release.a $libname.a'
+-      soname_spec='$libname$release$shared_ext$major'
+-      ;;
+-    svr4,*) # full svr4 only
+-      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o)"
+-      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+-      # We do not specify a path in Import Files, so LIBPATH fires.
+-      shlibpath_overrides_runpath=yes
+-      ;;
+-    *,yes) # both, prefer svr4
+-      dynamic_linker="AIX lib.so.V($shared_archive_member_spec.o), lib.a(lib.so.V)"
+-      library_names_spec='$libname$release$shared_ext$major $libname$shared_ext'
+-      # unpreferred sharedlib libNAME.a needs extra handling
+-      postinstall_cmds='test -n "$linkname" || linkname="$realname"~func_stripname "" ".so" "$linkname"~$install_shared_prog "$dir/$func_stripname_result.$libext" "$destdir/$func_stripname_result.$libext"~test -z "$tstripme" || test -z "$striplib" || $striplib "$destdir/$func_stripname_result.$libext"'
+-      postuninstall_cmds='for n in $library_names $old_library; do :; done~func_stripname "" ".so" "$n"~test "$func_stripname_result" = "$n" || func_append rmfiles " $odir/$func_stripname_result.$libext"'
+-      # We do not specify a path in Import Files, so LIBPATH fires.
+-      shlibpath_overrides_runpath=yes
+-      ;;
+-    *,no) # both, prefer aix
+-      dynamic_linker="AIX lib.a(lib.so.V), lib.so.V($shared_archive_member_spec.o)"
+-      library_names_spec='$libname$release.a $libname.a'
+-      soname_spec='$libname$release$shared_ext$major'
+-      # unpreferred sharedlib libNAME.so.V and symlink libNAME.so need extra handling
+-      postinstall_cmds='test -z "$dlname" || $install_shared_prog $dir/$dlname $destdir/$dlname~test -z "$tstripme" || test -z "$striplib" || $striplib $destdir/$dlname~test -n "$linkname" || linkname=$realname~func_stripname "" ".a" "$linkname"~(cd "$destdir" && $LN_S -f $dlname $func_stripname_result.so)'
+-      postuninstall_cmds='test -z "$dlname" || func_append rmfiles " $odir/$dlname"~for n in $old_library $library_names; do :; done~func_stripname "" ".a" "$n"~func_append rmfiles " $odir/$func_stripname_result.so"'
+-      ;;
+-    esac
++      library_names_spec='${libname}${release}.a $libname.a'
++      soname_spec='${libname}${release}${shared_ext}$major'
++    fi
+     shlibpath_var=LIBPATH
+   fi
+   ;;
+@@ -17502,27 +14981,27 @@ amigaos*)
+   powerpc)
+     # Since July 2007 AmigaOS4 officially supports .so libraries.
+     # When compiling the executable, add -use-dynld -Lsobjs: to the compileline.
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+     ;;
+   m68k)
+     library_names_spec='$libname.ixlibrary $libname.a'
+     # Create ${libname}_ixlibrary.a entries in /sys/libs.
+-    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
++    finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done'
+     ;;
+   esac
+   ;;
+ 
+ beos*)
+-  library_names_spec='$libname$shared_ext'
++  library_names_spec='${libname}${shared_ext}'
+   dynamic_linker="$host_os ld.so"
+   shlibpath_var=LIBRARY_PATH
+   ;;
+ 
+ bsdi[45]*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir'
+   shlibpath_var=LD_LIBRARY_PATH
+   sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib"
+@@ -17534,17 +15013,16 @@ bsdi[45]*)
+ 
+ cygwin* | mingw* | pw32* | cegcc*)
+   version_type=windows
+-  shrext_cmds=.dll
++  shrext_cmds=".dll"
+   need_version=no
+   need_lib_prefix=no
+ 
+-  case $GCC,$cc_basename in
+-  yes,*)
+-    # gcc
++  case $GCC,$host_os in
++  yes,cygwin* | yes,mingw* | yes,pw32* | yes,cegcc*)
+     library_names_spec='$libname.dll.a'
+     # DLL is installed to $(libdir)/../bin by postinstall_cmds
+-    postinstall_cmds='base_file=`basename \$file`~
+-      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
++    postinstall_cmds='base_file=`basename \${file}`~
++      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~
+       dldir=$destdir/`dirname \$dlpath`~
+       test -d \$dldir || mkdir -p \$dldir~
+       $install_prog $dir/$dlname \$dldir/$dlname~
+@@ -17560,83 +15038,25 @@ cygwin* | mingw* | pw32* | cegcc*)
+     case $host_os in
+     cygwin*)
+       # Cygwin DLLs use 'cyg' prefix rather than 'lib'
+-      soname_spec='`echo $libname | $SED -e 's/^lib/cyg/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++      soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+ 
+       ;;
+     mingw* | cegcc*)
+       # MinGW DLLs use traditional 'lib' prefix
+-      soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
++      soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+       ;;
+     pw32*)
+       # pw32 DLLs use 'pw' prefix rather than 'lib'
+-      library_names_spec='`echo $libname | $SED -e 's/^lib/pw/'``echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+-      ;;
+-    esac
+-    dynamic_linker='Win32 ld.exe'
+-    ;;
+-
+-  *,cl* | *,icl*)
+-    # Native MSVC or ICC
+-    libname_spec='$name'
+-    soname_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext'
+-    library_names_spec='$libname.dll.lib'
+-
+-    case $build_os in
+-    mingw*)
+-      sys_lib_search_path_spec=
+-      lt_save_ifs=$IFS
+-      IFS=';'
+-      for lt_path in $LIB
+-      do
+-        IFS=$lt_save_ifs
+-        # Let DOS variable expansion print the short 8.3 style file name.
+-        lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"`
+-        sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path"
+-      done
+-      IFS=$lt_save_ifs
+-      # Convert to MSYS style.
+-      sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'`
+-      ;;
+-    cygwin*)
+-      # Convert to unix form, then to dos form, then back to unix form
+-      # but this time dos style (no spaces!) so that the unix form looks
+-      # like /cygdrive/c/PROGRA~1:/cygdr...
+-      sys_lib_search_path_spec=`cygpath --path --unix "$LIB"`
+-      sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null`
+-      sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+-      ;;
+-    *)
+-      sys_lib_search_path_spec=$LIB
+-      if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then
+-        # It is most probably a Windows format PATH.
+-        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'`
+-      else
+-        sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"`
+-      fi
+-      # FIXME: find the short name or the path components, as spaces are
+-      # common. (e.g. "Program Files" -> "PROGRA~1")
++      library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}'
+       ;;
+     esac
+-
+-    # DLL is installed to $(libdir)/../bin by postinstall_cmds
+-    postinstall_cmds='base_file=`basename \$file`~
+-      dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; echo \$dlname'\''`~
+-      dldir=$destdir/`dirname \$dlpath`~
+-      test -d \$dldir || mkdir -p \$dldir~
+-      $install_prog $dir/$dlname \$dldir/$dlname'
+-    postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~
+-      dlpath=$dir/\$dldll~
+-       $RM \$dlpath'
+-    shlibpath_overrides_runpath=yes
+-    dynamic_linker='Win32 link.exe'
+     ;;
+ 
+   *)
+-    # Assume MSVC and ICC wrapper
+-    library_names_spec='$libname`echo $release | $SED -e 's/[.]/-/g'`$versuffix$shared_ext $libname.lib'
+-    dynamic_linker='Win32 ld.exe'
++    library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib'
+     ;;
+   esac
++  dynamic_linker='Win32 ld.exe'
+   # FIXME: first we should search . and the directory the executable is in
+   shlibpath_var=PATH
+   ;;
+@@ -17646,8 +15066,8 @@ darwin* | rhapsody*)
+   version_type=darwin
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$major$shared_ext $libname$shared_ext'
+-  soname_spec='$libname$release$major$shared_ext'
++  library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext'
++  soname_spec='${libname}${release}${major}$shared_ext'
+   shlibpath_overrides_runpath=yes
+   shlibpath_var=DYLD_LIBRARY_PATH
+   shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`'
+@@ -17656,15 +15076,15 @@ darwin* | rhapsody*)
+   ;;
+ 
+ dgux*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   ;;
+ 
+-freebsd* | dragonfly* | midnightbsd*)
++freebsd* | dragonfly*)
+   # DragonFly does not have aout.  When/if they implement a new
+   # versioning mechanism, adjust this.
+   if test -x /usr/bin/objformat; then
+@@ -17678,13 +15098,12 @@ freebsd* | dragonfly* | midnightbsd*)
+   version_type=freebsd-$objformat
+   case $version_type in
+     freebsd-elf*)
+-      library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-      soname_spec='$libname$release$shared_ext$major'
++      library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
+       need_version=no
+       need_lib_prefix=no
+       ;;
+     freebsd-*)
+-      library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++      library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix'
+       need_version=yes
+       ;;
+   esac
+@@ -17710,15 +15129,15 @@ freebsd* | dragonfly* | midnightbsd*)
+   ;;
+ 
+ haiku*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+   dynamic_linker="$host_os runtime_loader"
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LIBRARY_PATH
+-  shlibpath_overrides_runpath=no
+-  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib'
++  shlibpath_overrides_runpath=yes
++  sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/beos/system/lib'
+   hardcode_into_libs=yes
+   ;;
+ 
+@@ -17735,15 +15154,14 @@ hpux9* | hpux10* | hpux11*)
+     dynamic_linker="$host_os dld.so"
+     shlibpath_var=LD_LIBRARY_PATH
+     shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
+-    if test 32 = "$HPUX_IA64_MODE"; then
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++    soname_spec='${libname}${release}${shared_ext}$major'
++    if test "X$HPUX_IA64_MODE" = X32; then
+       sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib"
+-      sys_lib_dlsearch_path_spec=/usr/lib/hpux32
+     else
+       sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64"
+-      sys_lib_dlsearch_path_spec=/usr/lib/hpux64
+     fi
++    sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+     ;;
+   hppa*64*)
+     shrext_cmds='.sl'
+@@ -17751,8 +15169,8 @@ hpux9* | hpux10* | hpux11*)
+     dynamic_linker="$host_os dld.sl"
+     shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH
+     shlibpath_overrides_runpath=yes # Unless +noenvvar is specified.
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++    soname_spec='${libname}${release}${shared_ext}$major'
+     sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64"
+     sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+     ;;
+@@ -17761,8 +15179,8 @@ hpux9* | hpux10* | hpux11*)
+     dynamic_linker="$host_os dld.sl"
+     shlibpath_var=SHLIB_PATH
+     shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++    soname_spec='${libname}${release}${shared_ext}$major'
+     ;;
+   esac
+   # HP-UX runs *really* slowly unless shared libraries are mode 555, ...
+@@ -17772,11 +15190,11 @@ hpux9* | hpux10* | hpux11*)
+   ;;
+ 
+ interix[3-9]*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=no
+@@ -17787,16 +15205,16 @@ irix5* | irix6* | nonstopux*)
+   case $host_os in
+     nonstopux*) version_type=nonstopux ;;
+     *)
+-	if test yes = "$lt_cv_prog_gnu_ld"; then
+-		version_type=linux # correct to gnu/linux during the next big refactor
++	if test "$lt_cv_prog_gnu_ld" = yes; then
++		version_type=linux
+ 	else
+ 		version_type=irix
+ 	fi ;;
+   esac
+   need_lib_prefix=no
+   need_version=no
+-  soname_spec='$libname$release$shared_ext$major'
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$release$shared_ext $libname$shared_ext'
++  soname_spec='${libname}${release}${shared_ext}$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}'
+   case $host_os in
+   irix5* | nonstopux*)
+     libsuff= shlibsuff=
+@@ -17811,56 +15229,40 @@ irix5* | irix6* | nonstopux*)
+       libsuff=64 shlibsuff=64 libmagic=64-bit;;
+     *) libsuff= shlibsuff= libmagic=never-match;;
+     esac
+-    ;;
+-  esac
+-  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
+-  shlibpath_overrides_runpath=no
+-  sys_lib_search_path_spec="/usr/lib$libsuff /lib$libsuff /usr/local/lib$libsuff"
+-  sys_lib_dlsearch_path_spec="/usr/lib$libsuff /lib$libsuff"
+-  hardcode_into_libs=yes
+-  ;;
+-
+-# No shared lib support for Linux oldld, aout, or coff.
+-linux*oldld* | linux*aout* | linux*coff*)
+-  dynamic_linker=no
+-  ;;
+-
+-linux*android*)
+-  version_type=none # Android doesn't support versioned libraries.
+-  need_lib_prefix=no
+-  need_version=no
+-  library_names_spec='$libname$release$shared_ext'
+-  soname_spec='$libname$release$shared_ext'
+-  finish_cmds=
+-  shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=yes
+-
+-  # This implies no fast_install, which is unacceptable.
+-  # Some rework will be needed to allow for fast_install
+-  # before this can be enabled.
++    ;;
++  esac
++  shlibpath_var=LD_LIBRARY${shlibsuff}_PATH
++  shlibpath_overrides_runpath=no
++  sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}"
++  sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}"
+   hardcode_into_libs=yes
++  ;;
+ 
+-  dynamic_linker='Android linker'
+-  # Don't embed -rpath directories since the linker doesn't support them.
+-  hardcode_libdir_flag_spec_CXX='-L$libdir'
++# No shared lib support for Linux oldld, aout, or coff.
++linux*oldld* | linux*aout* | linux*coff*)
++  dynamic_linker=no
+   ;;
+ 
+-# This must be glibc/ELF.
+-linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++# This must be Linux ELF.
++
++# uclinux* changes (here and below) have been submitted to the libtool
++# project, but have not yet been accepted: they are GCC-local changes
++# for the time being.  (See
++# https://lists.gnu.org/archive/html/libtool-patches/2018-05/msg00000.html)
++linux* | k*bsd*-gnu | kopensolaris*-gnu | gnu* | uclinuxfdpiceabi)
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=no
+ 
+   # Some binutils ld are patched to set DT_RUNPATH
+-  if test ${lt_cv_shlibpath_overrides_runpath+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++  if ${lt_cv_shlibpath_overrides_runpath+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   lt_cv_shlibpath_overrides_runpath=no
+     save_LDFLAGS=$LDFLAGS
+     save_libdir=$libdir
+@@ -17870,21 +15272,19 @@ else $as_nop
+ /* end confdefs.h.  */
+ 
+ int
+-main (void)
++main ()
+ {
+ 
+   ;
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_cxx_try_link "$LINENO"
+-then :
+-  if  ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null
+-then :
++if ac_fn_cxx_try_link "$LINENO"; then :
++  if  ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then :
+   lt_cv_shlibpath_overrides_runpath=yes
+ fi
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam \
++rm -f core conftest.err conftest.$ac_objext \
+     conftest$ac_exeext conftest.$ac_ext
+     LDFLAGS=$save_LDFLAGS
+     libdir=$save_libdir
+@@ -17898,18 +15298,10 @@ fi
+   # before this can be enabled.
+   hardcode_into_libs=yes
+ 
+-  # Add ABI-specific directories to the system library path.
+-  sys_lib_dlsearch_path_spec="/lib64 /usr/lib64 /lib /usr/lib"
+-
+-  # Ideally, we could use ldconfig to report *all* directores which are
+-  # searched for libraries, however this is still not possible.  Aside from not
+-  # being certain /sbin/ldconfig is available, command
+-  # 'ldconfig -N -X -v | grep ^/' on 64bit Fedora does not report /usr/lib64,
+-  # even though it is searched at run-time.  Try to do the best guess by
+-  # appending ld.so.conf contents (and includes) to the search path.
++  # Append ld.so.conf contents to the search path
+   if test -f /etc/ld.so.conf; then
+     lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[	 ]*hwcap[	 ]/d;s/[:,	]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '`
+-    sys_lib_dlsearch_path_spec="$sys_lib_dlsearch_path_spec $lt_ld_extra"
++    sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra"
+   fi
+ 
+   # We used to test for /lib/ld.so.1 and disable shared libraries on
+@@ -17926,12 +15318,12 @@ netbsd*)
+   need_lib_prefix=no
+   need_version=no
+   if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+     finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+     dynamic_linker='NetBSD (a.out) ld.so'
+   else
+-    library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-    soname_spec='$libname$release$shared_ext$major'
++    library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}'
++    soname_spec='${libname}${release}${shared_ext}$major'
+     dynamic_linker='NetBSD ld.elf_so'
+   fi
+   shlibpath_var=LD_LIBRARY_PATH
+@@ -17940,8 +15332,8 @@ netbsd*)
+   ;;
+ 
+ newsos6)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  version_type=linux
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=yes
+   ;;
+@@ -17950,68 +15342,58 @@ newsos6)
+   version_type=qnx
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=no
+   hardcode_into_libs=yes
+   dynamic_linker='ldqnx.so'
+   ;;
+ 
+-openbsd* | bitrig*)
++openbsd*)
+   version_type=sunos
+-  sys_lib_dlsearch_path_spec=/usr/lib
++  sys_lib_dlsearch_path_spec="/usr/lib"
+   need_lib_prefix=no
+-  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`"; then
+-    need_version=no
+-  else
+-    need_version=yes
+-  fi
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++  # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs.
++  case $host_os in
++    openbsd3.3 | openbsd3.3.*)	need_version=yes ;;
++    *)				need_version=no  ;;
++  esac
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+   finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir'
+   shlibpath_var=LD_LIBRARY_PATH
+-  shlibpath_overrides_runpath=yes
++  if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then
++    case $host_os in
++      openbsd2.[89] | openbsd2.[89].*)
++	shlibpath_overrides_runpath=no
++	;;
++      *)
++	shlibpath_overrides_runpath=yes
++	;;
++      esac
++  else
++    shlibpath_overrides_runpath=yes
++  fi
+   ;;
+ 
+ os2*)
+   libname_spec='$name'
+-  version_type=windows
+-  shrext_cmds=.dll
+-  need_version=no
++  shrext_cmds=".dll"
+   need_lib_prefix=no
+-  # OS/2 can only load a DLL with a base name of 8 characters or less.
+-  soname_spec='`test -n "$os2dllname" && libname="$os2dllname";
+-    v=$($ECHO $release$versuffix | tr -d .-);
+-    n=$($ECHO $libname | cut -b -$((8 - ${#v})) | tr . _);
+-    $ECHO $n$v`$shared_ext'
+-  library_names_spec='${libname}_dll.$libext'
++  library_names_spec='$libname${shared_ext} $libname.a'
+   dynamic_linker='OS/2 ld.exe'
+-  shlibpath_var=BEGINLIBPATH
+-  sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib"
+-  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
+-  postinstall_cmds='base_file=`basename \$file`~
+-    dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\$base_file'\''i; $ECHO \$dlname'\''`~
+-    dldir=$destdir/`dirname \$dlpath`~
+-    test -d \$dldir || mkdir -p \$dldir~
+-    $install_prog $dir/$dlname \$dldir/$dlname~
+-    chmod a+x \$dldir/$dlname~
+-    if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then
+-      eval '\''$striplib \$dldir/$dlname'\'' || exit \$?;
+-    fi'
+-  postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; $ECHO \$dlname'\''`~
+-    dlpath=$dir/\$dldll~
+-    $RM \$dlpath'
++  shlibpath_var=LIBPATH
+   ;;
+ 
+ osf3* | osf4* | osf5*)
+   version_type=osf
+   need_lib_prefix=no
+   need_version=no
+-  soname_spec='$libname$release$shared_ext$major'
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  soname_spec='${libname}${release}${shared_ext}$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+   shlibpath_var=LD_LIBRARY_PATH
+   sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib"
+-  sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec
++  sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec"
+   ;;
+ 
+ rdos*)
+@@ -18019,11 +15401,11 @@ rdos*)
+   ;;
+ 
+ solaris*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=yes
+   hardcode_into_libs=yes
+@@ -18033,20 +15415,20 @@ solaris*)
+ 
+ sunos4*)
+   version_type=sunos
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$shared_ext$versuffix'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix'
+   finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=yes
+-  if test yes = "$with_gnu_ld"; then
++  if test "$with_gnu_ld" = yes; then
+     need_lib_prefix=no
+   fi
+   need_version=yes
+   ;;
+ 
+ sysv4 | sysv4.3*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  version_type=linux
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   case $host_vendor in
+     sni)
+@@ -18067,24 +15449,24 @@ sysv4 | sysv4.3*)
+   ;;
+ 
+ sysv4*MP*)
+-  if test -d /usr/nec; then
+-    version_type=linux # correct to gnu/linux during the next big refactor
+-    library_names_spec='$libname$shared_ext.$versuffix $libname$shared_ext.$major $libname$shared_ext'
+-    soname_spec='$libname$shared_ext.$major'
++  if test -d /usr/nec ;then
++    version_type=linux
++    library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}'
++    soname_spec='$libname${shared_ext}.$major'
+     shlibpath_var=LD_LIBRARY_PATH
+   fi
+   ;;
+ 
+ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+-  version_type=sco
++  version_type=freebsd-elf
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=yes
+   hardcode_into_libs=yes
+-  if test yes = "$with_gnu_ld"; then
++  if test "$with_gnu_ld" = yes; then
+     sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib'
+   else
+     sys_lib_search_path_spec='/usr/ccs/lib /usr/lib'
+@@ -18099,19 +15481,19 @@ sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*)
+ 
+ tpf*)
+   # TPF is a cross-target only.  Preferred cross-host = GNU/Linux.
+-  version_type=linux # correct to gnu/linux during the next big refactor
++  version_type=linux
+   need_lib_prefix=no
+   need_version=no
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
+   shlibpath_var=LD_LIBRARY_PATH
+   shlibpath_overrides_runpath=no
+   hardcode_into_libs=yes
+   ;;
+ 
+ uts4*)
+-  version_type=linux # correct to gnu/linux during the next big refactor
+-  library_names_spec='$libname$release$shared_ext$versuffix $libname$release$shared_ext$major $libname$shared_ext'
+-  soname_spec='$libname$release$shared_ext$major'
++  version_type=linux
++  library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}'
++  soname_spec='${libname}${release}${shared_ext}$major'
+   shlibpath_var=LD_LIBRARY_PATH
+   ;;
+ 
+@@ -18119,32 +15501,22 @@ uts4*)
+   dynamic_linker=no
+   ;;
+ esac
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
+-printf "%s\n" "$dynamic_linker" >&6; }
+-test no = "$dynamic_linker" && can_build_shared=no
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5
++$as_echo "$dynamic_linker" >&6; }
++test "$dynamic_linker" = no && can_build_shared=no
+ 
+ variables_saved_for_relink="PATH $shlibpath_var $runpath_var"
+-if test yes = "$GCC"; then
++if test "$GCC" = yes; then
+   variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH"
+ fi
+ 
+-if test set = "${lt_cv_sys_lib_search_path_spec+set}"; then
+-  sys_lib_search_path_spec=$lt_cv_sys_lib_search_path_spec
++if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then
++  sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec"
+ fi
+-
+-if test set = "${lt_cv_sys_lib_dlsearch_path_spec+set}"; then
+-  sys_lib_dlsearch_path_spec=$lt_cv_sys_lib_dlsearch_path_spec
++if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then
++  sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec"
+ fi
+ 
+-# remember unaugmented sys_lib_dlsearch_path content for libtool script decls...
+-configure_time_dlsearch_path=$sys_lib_dlsearch_path_spec
+-
+-# ... but it needs LT_SYS_LIBRARY_PATH munging for other configure-time code
+-func_munge_path_list sys_lib_dlsearch_path_spec "$LT_SYS_LIBRARY_PATH"
+-
+-# to be used as default LT_SYS_LIBRARY_PATH value in generated libtool
+-configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH
+-
+ 
+ 
+ 
+@@ -18182,22 +15554,20 @@ configure_time_lt_sys_library_path=$LT_SYS_LIBRARY_PATH
+ 
+ 
+ 
+-
+-
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
+-printf %s "checking how to hardcode library paths into programs... " >&6; }
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5
++$as_echo_n "checking how to hardcode library paths into programs... " >&6; }
+ hardcode_action_CXX=
+ if test -n "$hardcode_libdir_flag_spec_CXX" ||
+    test -n "$runpath_var_CXX" ||
+-   test yes = "$hardcode_automatic_CXX"; then
++   test "X$hardcode_automatic_CXX" = "Xyes" ; then
+ 
+   # We can hardcode non-existent directories.
+-  if test no != "$hardcode_direct_CXX" &&
++  if test "$hardcode_direct_CXX" != no &&
+      # If the only mechanism to avoid hardcoding is shlibpath_var, we
+      # have to relink, otherwise we might link with an installed library
+      # when we should be linking with a yet-to-be-installed one
+-     ## test no != "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" &&
+-     test no != "$hardcode_minus_L_CXX"; then
++     ## test "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" != no &&
++     test "$hardcode_minus_L_CXX" != no; then
+     # Linking always hardcodes the temporary library directory.
+     hardcode_action_CXX=relink
+   else
+@@ -18209,15 +15579,15 @@ else
+   # directories.
+   hardcode_action_CXX=unsupported
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5
+-printf "%s\n" "$hardcode_action_CXX" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5
++$as_echo "$hardcode_action_CXX" >&6; }
+ 
+-if test relink = "$hardcode_action_CXX" ||
+-   test yes = "$inherit_rpath_CXX"; then
++if test "$hardcode_action_CXX" = relink ||
++   test "$inherit_rpath_CXX" = yes; then
+   # Fast installation is not supported
+   enable_fast_install=no
+-elif test yes = "$shlibpath_overrides_runpath" ||
+-     test no = "$enable_shared"; then
++elif test "$shlibpath_overrides_runpath" = yes ||
++     test "$enable_shared" = no; then
+   # Fast installation is not necessary
+   enable_fast_install=needless
+ fi
+@@ -18231,7 +15601,6 @@ fi
+   fi # test -n "$compiler"
+ 
+   CC=$lt_save_CC
+-  CFLAGS=$lt_save_CFLAGS
+   LDCXX=$LD
+   LD=$lt_save_LD
+   GCC=$lt_save_GCC
+@@ -18240,7 +15609,7 @@ fi
+   lt_cv_path_LD=$lt_save_path_LD
+   lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld
+   lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld
+-fi # test yes != "$_lt_caught_CXX_error"
++fi # test "$_lt_caught_CXX_error" != yes
+ 
+ ac_ext=c
+ ac_cpp='$CPP $CPPFLAGS'
+@@ -18260,8 +15629,6 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ 
+ 
+ 
+-
+-
+         ac_config_commands="$ac_config_commands libtool"
+ 
+ 
+@@ -18270,14 +15637,38 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+ # Only expand once:
+ 
+ 
+-ACX_LT_HOST_FLAGS
++
++
++
++
++case $host in
++  *-cygwin* | *-mingw*)
++    # 'host' will be top-level target in the case of a target lib,
++    # we must compare to with_cross_host to decide if this is a native
++    # or cross-compiler and select where to install dlls appropriately.
++    if test -n "$with_cross_host" &&
++	test x"$with_cross_host" != x"no"; then
++      lt_host_flags='-no-undefined -bindir "$(toolexeclibdir)"';
++    else
++      lt_host_flags='-no-undefined -bindir "$(bindir)"';
++    fi
++    ;;
++  *)
++    lt_host_flags=
++    ;;
++esac
++
++
++
+ 
+ ac_fn_c_find_intX_t "$LINENO" "64" "ac_cv_c_int64_t"
+ case $ac_cv_c_int64_t in #(
+   no|yes) ;; #(
+   *)
+ 
+-printf "%s\n" "#define int64_t $ac_cv_c_int64_t" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define int64_t $ac_cv_c_int64_t
++_ACEOF
+ ;;
+ esac
+ 
+@@ -18286,19 +15677,20 @@ case $ac_cv_c_uint64_t in #(
+   no|yes) ;; #(
+   *)
+ 
+-printf "%s\n" "#define _UINT64_T 1" >>confdefs.h
++$as_echo "#define _UINT64_T 1" >>confdefs.h
+ 
+ 
+-printf "%s\n" "#define uint64_t $ac_cv_c_uint64_t" >>confdefs.h
++cat >>confdefs.h <<_ACEOF
++#define uint64_t $ac_cv_c_uint64_t
++_ACEOF
+ ;;
+   esac
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking for sys/wait.h that is POSIX.1 compatible" >&5
+-printf %s "checking for sys/wait.h that is POSIX.1 compatible... " >&6; }
+-if test ${ac_cv_header_sys_wait_h+y}
+-then :
+-  printf %s "(cached) " >&6
+-else $as_nop
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sys/wait.h that is POSIX.1 compatible" >&5
++$as_echo_n "checking for sys/wait.h that is POSIX.1 compatible... " >&6; }
++if ${ac_cv_header_sys_wait_h+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
+   cat confdefs.h - <<_ACEOF >conftest.$ac_ext
+ /* end confdefs.h.  */
+ #include 
+@@ -18311,7 +15703,7 @@ else $as_nop
+ #endif
+ 
+ int
+-main (void)
++main ()
+ {
+   int s;
+   wait (&s);
+@@ -18320,19 +15712,18 @@ main (void)
+   return 0;
+ }
+ _ACEOF
+-if ac_fn_c_try_compile "$LINENO"
+-then :
++if ac_fn_c_try_compile "$LINENO"; then :
+   ac_cv_header_sys_wait_h=yes
+-else $as_nop
++else
+   ac_cv_header_sys_wait_h=no
+ fi
+-rm -f core conftest.err conftest.$ac_objext conftest.beam conftest.$ac_ext
++rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext
+ fi
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_wait_h" >&5
+-printf "%s\n" "$ac_cv_header_sys_wait_h" >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_sys_wait_h" >&5
++$as_echo "$ac_cv_header_sys_wait_h" >&6; }
+ if test $ac_cv_header_sys_wait_h = yes; then
+ 
+-printf "%s\n" "#define HAVE_SYS_WAIT_H 1" >>confdefs.h
++$as_echo "#define HAVE_SYS_WAIT_H 1" >>confdefs.h
+ 
+ fi
+ 
+@@ -18367,8 +15758,8 @@ _ACEOF
+     case $ac_val in #(
+     *${as_nl}*)
+       case $ac_var in #(
+-      *_cv_*) { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
+-printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
++      *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5
++$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;;
+       esac
+       case $ac_var in #(
+       _ | IFS | as_nl) ;; #(
+@@ -18398,15 +15789,15 @@ printf "%s\n" "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;}
+      /^ac_cv_env_/b end
+      t clear
+      :clear
+-     s/^\([^=]*\)=\(.*[{}].*\)$/test ${\1+y} || &/
++     s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/
+      t end
+      s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/
+      :end' >>confcache
+ if diff "$cache_file" confcache >/dev/null 2>&1; then :; else
+   if test -w "$cache_file"; then
+     if test "x$cache_file" != "x/dev/null"; then
+-      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
+-printf "%s\n" "$as_me: updating cache $cache_file" >&6;}
++      { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5
++$as_echo "$as_me: updating cache $cache_file" >&6;}
+       if test ! -f "$cache_file" || test -h "$cache_file"; then
+ 	cat confcache >"$cache_file"
+       else
+@@ -18420,8 +15811,8 @@ printf "%s\n" "$as_me: updating cache $cache_file" >&6;}
+       fi
+     fi
+   else
+-    { printf "%s\n" "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
+-printf "%s\n" "$as_me: not updating unwritable cache $cache_file" >&6;}
++    { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5
++$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;}
+   fi
+ fi
+ rm -f confcache
+@@ -18438,7 +15829,7 @@ U=
+ for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue
+   # 1. Remove the extension, and $U if already installed.
+   ac_script='s/\$U\././;s/\.o$//;s/\.obj$//'
+-  ac_i=`printf "%s\n" "$ac_i" | sed "$ac_script"`
++  ac_i=`$as_echo "$ac_i" | sed "$ac_script"`
+   # 2. Prepend LIBOBJDIR.  When used with automake>=1.10 LIBOBJDIR
+   #    will be set to the directory where LIBOBJS objects are built.
+   as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext"
+@@ -18449,14 +15840,14 @@ LIBOBJS=$ac_libobjs
+ LTLIBOBJS=$ac_ltlibobjs
+ 
+ 
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5
+-printf %s "checking that generated files are newer than configure... " >&6; }
++{ $as_echo "$as_me:${as_lineno-$LINENO}: checking that generated files are newer than configure" >&5
++$as_echo_n "checking that generated files are newer than configure... " >&6; }
+    if test -n "$am_sleep_pid"; then
+      # Hide warnings about reused PIDs.
+      wait $am_sleep_pid 2>/dev/null
+    fi
+-   { printf "%s\n" "$as_me:${as_lineno-$LINENO}: result: done" >&5
+-printf "%s\n" "done" >&6; }
++   { $as_echo "$as_me:${as_lineno-$LINENO}: result: done" >&5
++$as_echo "done" >&6; }
+  if test -n "$EXEEXT"; then
+   am__EXEEXT_TRUE=
+   am__EXEEXT_FALSE='#'
+@@ -18490,8 +15881,8 @@ fi
+ ac_write_fail=0
+ ac_clean_files_save=$ac_clean_files
+ ac_clean_files="$ac_clean_files $CONFIG_STATUS"
+-{ printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
+-printf "%s\n" "$as_me: creating $CONFIG_STATUS" >&6;}
++{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5
++$as_echo "$as_me: creating $CONFIG_STATUS" >&6;}
+ as_write_fail=0
+ cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1
+ #! $SHELL
+@@ -18514,16 +15905,14 @@ cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1
+ 
+ # Be more Bourne compatible
+ DUALCASE=1; export DUALCASE # for MKS sh
+-as_nop=:
+-if test ${ZSH_VERSION+y} && (emulate sh) >/dev/null 2>&1
+-then :
++if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then :
+   emulate sh
+   NULLCMD=:
+   # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which
+   # is contrary to our usage.  Disable this feature.
+   alias -g '${1+"$@"}'='"$@"'
+   setopt NO_GLOB_SUBST
+-else $as_nop
++else
+   case `(set -o) 2>/dev/null` in #(
+   *posix*) :
+     set -o posix ;; #(
+@@ -18533,46 +15922,46 @@ esac
+ fi
+ 
+ 
+-
+-# Reset variables that may have inherited troublesome values from
+-# the environment.
+-
+-# IFS needs to be set, to space, tab, and newline, in precisely that order.
+-# (If _AS_PATH_WALK were called with IFS unset, it would have the
+-# side effect of setting IFS to empty, thus disabling word splitting.)
+-# Quoting is to prevent editors from complaining about space-tab.
+ as_nl='
+ '
+ export as_nl
+-IFS=" ""	$as_nl"
+-
+-PS1='$ '
+-PS2='> '
+-PS4='+ '
+-
+-# Ensure predictable behavior from utilities with locale-dependent output.
+-LC_ALL=C
+-export LC_ALL
+-LANGUAGE=C
+-export LANGUAGE
+-
+-# We cannot yet rely on "unset" to work, but we need these variables
+-# to be unset--not just set to an empty or harmless value--now, to
+-# avoid bugs in old shells (e.g. pre-3.0 UWIN ksh).  This construct
+-# also avoids known problems related to "unset" and subshell syntax
+-# in other old shells (e.g. bash 2.01 and pdksh 5.2.14).
+-for as_var in BASH_ENV ENV MAIL MAILPATH CDPATH
+-do eval test \${$as_var+y} \
+-  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
+-done
+-
+-# Ensure that fds 0, 1, and 2 are open.
+-if (exec 3>&0) 2>/dev/null; then :; else exec 0&1) 2>/dev/null; then :; else exec 1>/dev/null; fi
+-if (exec 3>&2)            ; then :; else exec 2>/dev/null; fi
++# Printing a long string crashes Solaris 7 /usr/bin/printf.
++as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\'
++as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo
++as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo
++# Prefer a ksh shell builtin over an external printf program on Solaris,
++# but without wasting forks for bash or zsh.
++if test -z "$BASH_VERSION$ZSH_VERSION" \
++    && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then
++  as_echo='print -r --'
++  as_echo_n='print -rn --'
++elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then
++  as_echo='printf %s\n'
++  as_echo_n='printf %s'
++else
++  if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then
++    as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"'
++    as_echo_n='/usr/ucb/echo -n'
++  else
++    as_echo_body='eval expr "X$1" : "X\\(.*\\)"'
++    as_echo_n_body='eval
++      arg=$1;
++      case $arg in #(
++      *"$as_nl"*)
++	expr "X$arg" : "X\\(.*\\)$as_nl";
++	arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;;
++      esac;
++      expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl"
++    '
++    export as_echo_n_body
++    as_echo_n='sh -c $as_echo_n_body as_echo'
++  fi
++  export as_echo_body
++  as_echo='sh -c $as_echo_body as_echo'
++fi
+ 
+ # The user is always right.
+-if ${PATH_SEPARATOR+false} :; then
++if test "${PATH_SEPARATOR+set}" != set; then
+   PATH_SEPARATOR=:
+   (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && {
+     (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 ||
+@@ -18581,6 +15970,13 @@ if ${PATH_SEPARATOR+false} :; then
+ fi
+ 
+ 
++# IFS
++# We need space, tab and new line, in precisely that order.  Quoting is
++# there to prevent editors from complaining about space-tab.
++# (If _AS_PATH_WALK were called with IFS unset, it would disable word
++# splitting by setting IFS to empty value.)
++IFS=" ""	$as_nl"
++
+ # Find who we are.  Look in the path if we contain no directory separator.
+ as_myself=
+ case $0 in #((
+@@ -18589,12 +15985,8 @@ case $0 in #((
+ for as_dir in $PATH
+ do
+   IFS=$as_save_IFS
+-  case $as_dir in #(((
+-    '') as_dir=./ ;;
+-    */) ;;
+-    *) as_dir=$as_dir/ ;;
+-  esac
+-    test -r "$as_dir$0" && as_myself=$as_dir$0 && break
++  test -z "$as_dir" && as_dir=.
++    test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break
+   done
+ IFS=$as_save_IFS
+ 
+@@ -18606,10 +15998,30 @@ if test "x$as_myself" = x; then
+   as_myself=$0
+ fi
+ if test ! -f "$as_myself"; then
+-  printf "%s\n" "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
++  $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2
+   exit 1
+ fi
+ 
++# Unset variables that we do not need and which cause bugs (e.g. in
++# pre-3.0 UWIN ksh).  But do not cause bugs in bash 2.01; the "|| exit 1"
++# suppresses any "Segmentation fault" message there.  '((' could
++# trigger a bug in pdksh 5.2.14.
++for as_var in BASH_ENV ENV MAIL MAILPATH
++do eval test x\${$as_var+set} = xset \
++  && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || :
++done
++PS1='$ '
++PS2='> '
++PS4='+ '
++
++# NLS nuisances.
++LC_ALL=C
++export LC_ALL
++LANGUAGE=C
++export LANGUAGE
++
++# CDPATH.
++(unset CDPATH) >/dev/null 2>&1 && unset CDPATH
+ 
+ 
+ # as_fn_error STATUS ERROR [LINENO LOG_FD]
+@@ -18622,14 +16034,13 @@ as_fn_error ()
+   as_status=$1; test $as_status -eq 0 && as_status=1
+   if test "$4"; then
+     as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack
+-    printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
++    $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4
+   fi
+-  printf "%s\n" "$as_me: error: $2" >&2
++  $as_echo "$as_me: error: $2" >&2
+   as_fn_exit $as_status
+ } # as_fn_error
+ 
+ 
+-
+ # as_fn_set_status STATUS
+ # -----------------------
+ # Set $? to STATUS, without forking.
+@@ -18656,20 +16067,18 @@ as_fn_unset ()
+   { eval $1=; unset $1;}
+ }
+ as_unset=as_fn_unset
+-
+ # as_fn_append VAR VALUE
+ # ----------------------
+ # Append the text in VALUE to the end of the definition contained in VAR. Take
+ # advantage of any shell optimizations that allow amortized linear growth over
+ # repeated appends, instead of the typical quadratic growth present in naive
+ # implementations.
+-if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null
+-then :
++if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then :
+   eval 'as_fn_append ()
+   {
+     eval $1+=\$2
+   }'
+-else $as_nop
++else
+   as_fn_append ()
+   {
+     eval $1=\$$1\$2
+@@ -18681,13 +16090,12 @@ fi # as_fn_append
+ # Perform arithmetic evaluation on the ARGs, and store the result in the
+ # global $as_val. Take advantage of shells that can avoid forks. The arguments
+ # must be portable across $(()) and expr.
+-if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null
+-then :
++if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then :
+   eval 'as_fn_arith ()
+   {
+     as_val=$(( $* ))
+   }'
+-else $as_nop
++else
+   as_fn_arith ()
+   {
+     as_val=`expr "$@" || test $? -eq 1`
+@@ -18718,7 +16126,7 @@ as_me=`$as_basename -- "$0" ||
+ $as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \
+ 	 X"$0" : 'X\(//\)$' \| \
+ 	 X"$0" : 'X\(/\)' \| . 2>/dev/null ||
+-printf "%s\n" X/"$0" |
++$as_echo X/"$0" |
+     sed '/^.*\/\([^/][^/]*\)\/*$/{
+ 	    s//\1/
+ 	    q
+@@ -18740,10 +16148,6 @@ as_cr_Letters=$as_cr_letters$as_cr_LETTERS
+ as_cr_digits='0123456789'
+ as_cr_alnum=$as_cr_Letters$as_cr_digits
+ 
+-
+-# Determine whether it's possible to make 'echo' print without a newline.
+-# These variables are no longer used directly by Autoconf, but are AC_SUBSTed
+-# for compatibility with existing Makefiles.
+ ECHO_C= ECHO_N= ECHO_T=
+ case `echo -n x` in #(((((
+ -n*)
+@@ -18757,12 +16161,6 @@ case `echo -n x` in #(((((
+   ECHO_N='-n';;
+ esac
+ 
+-# For backward compatibility with old third-party macros, we provide
+-# the shell variables $as_echo and $as_echo_n.  New code should use
+-# AS_ECHO(["message"]) and AS_ECHO_N(["message"]), respectively.
+-as_echo='printf %s\n'
+-as_echo_n='printf %s'
+-
+ rm -f conf$$ conf$$.exe conf$$.file
+ if test -d conf$$.dir; then
+   rm -f conf$$.dir/conf$$.file
+@@ -18804,7 +16202,7 @@ as_fn_mkdir_p ()
+     as_dirs=
+     while :; do
+       case $as_dir in #(
+-      *\'*) as_qdir=`printf "%s\n" "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
++      *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'(
+       *) as_qdir=$as_dir;;
+       esac
+       as_dirs="'$as_qdir' $as_dirs"
+@@ -18813,7 +16211,7 @@ $as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ 	 X"$as_dir" : 'X\(//\)[^/]' \| \
+ 	 X"$as_dir" : 'X\(//\)$' \| \
+ 	 X"$as_dir" : 'X\(/\)' \| . 2>/dev/null ||
+-printf "%s\n" X"$as_dir" |
++$as_echo X"$as_dir" |
+     sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ 	    s//\1/
+ 	    q
+@@ -18876,7 +16274,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1
+ # values after options handling.
+ ac_log="
+ This file was extended by bolt plugin for ld $as_me 0.1, which was
+-generated by GNU Autoconf 2.71.  Invocation command line was
++generated by GNU Autoconf 2.69.  Invocation command line was
+ 
+   CONFIG_FILES    = $CONFIG_FILES
+   CONFIG_HEADERS  = $CONFIG_HEADERS
+@@ -18938,16 +16336,14 @@ $config_commands
+ Report bugs to the package provider."
+ 
+ _ACEOF
+-ac_cs_config=`printf "%s\n" "$ac_configure_args" | sed "$ac_safe_unquote"`
+-ac_cs_config_escaped=`printf "%s\n" "$ac_cs_config" | sed "s/^ //; s/'/'\\\\\\\\''/g"`
+ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+-ac_cs_config='$ac_cs_config_escaped'
++ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`"
+ ac_cs_version="\\
+ bolt plugin for ld config.status 0.1
+-configured by $0, generated by GNU Autoconf 2.71,
++configured by $0, generated by GNU Autoconf 2.69,
+   with options \\"\$ac_cs_config\\"
+ 
+-Copyright (C) 2021 Free Software Foundation, Inc.
++Copyright (C) 2012 Free Software Foundation, Inc.
+ This config.status script is free software; the Free Software Foundation
+ gives unlimited permission to copy, distribute and modify it."
+ 
+@@ -18987,15 +16383,15 @@ do
+   -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r)
+     ac_cs_recheck=: ;;
+   --version | --versio | --versi | --vers | --ver | --ve | --v | -V )
+-    printf "%s\n" "$ac_cs_version"; exit ;;
++    $as_echo "$ac_cs_version"; exit ;;
+   --config | --confi | --conf | --con | --co | --c )
+-    printf "%s\n" "$ac_cs_config"; exit ;;
++    $as_echo "$ac_cs_config"; exit ;;
+   --debug | --debu | --deb | --de | --d | -d )
+     debug=: ;;
+   --file | --fil | --fi | --f )
+     $ac_shift
+     case $ac_optarg in
+-    *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
++    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+     '') as_fn_error $? "missing file argument" ;;
+     esac
+     as_fn_append CONFIG_FILES " '$ac_optarg'"
+@@ -19003,7 +16399,7 @@ do
+   --header | --heade | --head | --hea )
+     $ac_shift
+     case $ac_optarg in
+-    *\'*) ac_optarg=`printf "%s\n" "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
++    *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;;
+     esac
+     as_fn_append CONFIG_HEADERS " '$ac_optarg'"
+     ac_need_defaults=false;;
+@@ -19012,7 +16408,7 @@ do
+     as_fn_error $? "ambiguous option: \`$1'
+ Try \`$0 --help' for more information.";;
+   --help | --hel | -h )
+-    printf "%s\n" "$ac_cs_usage"; exit ;;
++    $as_echo "$ac_cs_usage"; exit ;;
+   -q | -quiet | --quiet | --quie | --qui | --qu | --q \
+   | -silent | --silent | --silen | --sile | --sil | --si | --s)
+     ac_cs_silent=: ;;
+@@ -19040,7 +16436,7 @@ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+ if \$ac_cs_recheck; then
+   set X $SHELL '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion
+   shift
+-  \printf "%s\n" "running CONFIG_SHELL=$SHELL \$*" >&6
++  \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6
+   CONFIG_SHELL='$SHELL'
+   export CONFIG_SHELL
+   exec "\$@"
+@@ -19054,7 +16450,7 @@ exec 5>>config.log
+   sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX
+ ## Running $as_me. ##
+ _ASBOX
+-  printf "%s\n" "$ac_log"
++  $as_echo "$ac_log"
+ } >&5
+ 
+ _ACEOF
+@@ -19078,10 +16474,8 @@ enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`'
+ enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`'
+ pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`'
+ enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`'
+-shared_archive_member_spec='`$ECHO "$shared_archive_member_spec" | $SED "$delay_single_quote_subst"`'
+ SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`'
+ ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`'
+-PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`'
+ host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`'
+ host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`'
+ host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`'
+@@ -19102,22 +16496,13 @@ exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`'
+ lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`'
+ lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`'
+ lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`'
+-lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`'
+-lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`'
+ reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`'
+ reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`'
+-FILECMD='`$ECHO "$FILECMD" | $SED "$delay_single_quote_subst"`'
+ OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`'
+ deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`'
+ file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`'
+-file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`'
+-want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`'
+-DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`'
+-sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`'
+ AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`'
+-lt_ar_flags='`$ECHO "$lt_ar_flags" | $SED "$delay_single_quote_subst"`'
+ AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`'
+-archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`'
+ STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`'
+ RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`'
+ old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`'
+@@ -19130,22 +16515,16 @@ compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`'
+ GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`'
+ lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`'
+ lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`'
+-lt_cv_sys_global_symbol_to_import='`$ECHO "$lt_cv_sys_global_symbol_to_import" | $SED "$delay_single_quote_subst"`'
+ lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`'
+ lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`'
+-lt_cv_nm_interface='`$ECHO "$lt_cv_nm_interface" | $SED "$delay_single_quote_subst"`'
+-nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`'
+-lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`'
+-lt_cv_truncate_bin='`$ECHO "$lt_cv_truncate_bin" | $SED "$delay_single_quote_subst"`'
+ objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`'
+ MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`'
+ lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`'
+-lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`'
+ lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`'
++lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`'
+ lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`'
+ lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`'
+ need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`'
+-MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`'
+ DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`'
+ NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`'
+ LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`'
+@@ -19169,6 +16548,7 @@ with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`'
+ allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`'
+ no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`'
+ hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`'
++hardcode_libdir_flag_spec_ld='`$ECHO "$hardcode_libdir_flag_spec_ld" | $SED "$delay_single_quote_subst"`'
+ hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`'
+ hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`'
+ hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`'
+@@ -19177,12 +16557,12 @@ hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_q
+ hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`'
+ inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`'
+ link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`'
++fix_srcfile_path='`$ECHO "$fix_srcfile_path" | $SED "$delay_single_quote_subst"`'
+ always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`'
+ export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`'
+ exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`'
+ include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`'
+ prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`'
+-postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`'
+ file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`'
+ variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`'
+ need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`'
+@@ -19201,8 +16581,7 @@ finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`'
+ finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`'
+ hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`'
+ sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`'
+-configure_time_dlsearch_path='`$ECHO "$configure_time_dlsearch_path" | $SED "$delay_single_quote_subst"`'
+-configure_time_lt_sys_library_path='`$ECHO "$configure_time_lt_sys_library_path" | $SED "$delay_single_quote_subst"`'
++sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`'
+ hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`'
+ enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`'
+ enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`'
+@@ -19222,8 +16601,8 @@ old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote
+ compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`'
+ GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`'
+ lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`'
+-lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`'
+ lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`'
++lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`'
+ lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`'
+ lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`'
+ archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`'
+@@ -19241,6 +16620,7 @@ with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`'
+ allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`'
+ no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`'
+ hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`'
++hardcode_libdir_flag_spec_ld_CXX='`$ECHO "$hardcode_libdir_flag_spec_ld_CXX" | $SED "$delay_single_quote_subst"`'
+ hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`'
+ hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`'
+ hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`'
+@@ -19249,12 +16629,12 @@ hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_
+ hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`'
+ inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`'
+ link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`'
++fix_srcfile_path_CXX='`$ECHO "$fix_srcfile_path_CXX" | $SED "$delay_single_quote_subst"`'
+ always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`'
+ export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+ exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`'
+ include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`'
+ prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+-postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`'
+ file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`'
+ hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`'
+ compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`'
+@@ -19279,7 +16659,6 @@ _LTECHO_EOF'
+ # Quote evaled strings.
+ for var in SHELL \
+ ECHO \
+-PATH_SEPARATOR \
+ SED \
+ GREP \
+ EGREP \
+@@ -19290,16 +16669,11 @@ LN_S \
+ lt_SP2NL \
+ lt_NL2SP \
+ reload_flag \
+-FILECMD \
+ OBJDUMP \
+ deplibs_check_method \
+ file_magic_cmd \
+-file_magic_glob \
+-want_nocaseglob \
+-DLLTOOL \
+-sharedlib_from_linklib_cmd \
+ AR \
+-archiver_list_spec \
++AR_FLAGS \
+ STRIP \
+ RANLIB \
+ CC \
+@@ -19307,19 +16681,14 @@ CFLAGS \
+ compiler \
+ lt_cv_sys_global_symbol_pipe \
+ lt_cv_sys_global_symbol_to_cdecl \
+-lt_cv_sys_global_symbol_to_import \
+ lt_cv_sys_global_symbol_to_c_name_address \
+ lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \
+-lt_cv_nm_interface \
+-nm_file_list_spec \
+-lt_cv_truncate_bin \
+ lt_prog_compiler_no_builtin_flag \
+-lt_prog_compiler_pic \
+ lt_prog_compiler_wl \
++lt_prog_compiler_pic \
+ lt_prog_compiler_static \
+ lt_cv_prog_compiler_c_o \
+ need_locks \
+-MANIFEST_TOOL \
+ DSYMUTIL \
+ NMEDIT \
+ LIPO \
+@@ -19333,7 +16702,9 @@ with_gnu_ld \
+ allow_undefined_flag \
+ no_undefined_flag \
+ hardcode_libdir_flag_spec \
++hardcode_libdir_flag_spec_ld \
+ hardcode_libdir_separator \
++fix_srcfile_path \
+ exclude_expsyms \
+ include_expsyms \
+ file_list_spec \
+@@ -19355,8 +16726,8 @@ LD_CXX \
+ reload_flag_CXX \
+ compiler_CXX \
+ lt_prog_compiler_no_builtin_flag_CXX \
+-lt_prog_compiler_pic_CXX \
+ lt_prog_compiler_wl_CXX \
++lt_prog_compiler_pic_CXX \
+ lt_prog_compiler_static_CXX \
+ lt_cv_prog_compiler_c_o_CXX \
+ export_dynamic_flag_spec_CXX \
+@@ -19366,7 +16737,9 @@ with_gnu_ld_CXX \
+ allow_undefined_flag_CXX \
+ no_undefined_flag_CXX \
+ hardcode_libdir_flag_spec_CXX \
++hardcode_libdir_flag_spec_ld_CXX \
+ hardcode_libdir_separator_CXX \
++fix_srcfile_path_CXX \
+ exclude_expsyms_CXX \
+ include_expsyms_CXX \
+ file_list_spec_CXX \
+@@ -19378,7 +16751,7 @@ postdeps_CXX \
+ compiler_lib_search_path_CXX; do
+     case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+     *[\\\\\\\`\\"\\\$]*)
+-      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
++      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\""
+       ;;
+     *)
+       eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+@@ -19400,13 +16773,11 @@ module_cmds \
+ module_expsym_cmds \
+ export_symbols_cmds \
+ prelink_cmds \
+-postlink_cmds \
+ postinstall_cmds \
+ postuninstall_cmds \
+ finish_cmds \
+ sys_lib_search_path_spec \
+-configure_time_dlsearch_path \
+-configure_time_lt_sys_library_path \
++sys_lib_dlsearch_path_spec \
+ reload_cmds_CXX \
+ old_archive_cmds_CXX \
+ old_archive_from_new_cmds_CXX \
+@@ -19416,11 +16787,10 @@ archive_expsym_cmds_CXX \
+ module_cmds_CXX \
+ module_expsym_cmds_CXX \
+ export_symbols_cmds_CXX \
+-prelink_cmds_CXX \
+-postlink_cmds_CXX; do
++prelink_cmds_CXX; do
+     case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in
+     *[\\\\\\\`\\"\\\$]*)
+-      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" ## exclude from sc_prohibit_nested_quotes
++      eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\""
+       ;;
+     *)
+       eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\""
+@@ -19429,16 +16799,19 @@ postlink_cmds_CXX; do
+ done
+ 
+ ac_aux_dir='$ac_aux_dir'
++xsi_shell='$xsi_shell'
++lt_shell_append='$lt_shell_append'
+ 
+-# See if we are running on zsh, and set the options that allow our
++# See if we are running on zsh, and set the options which allow our
+ # commands through without removal of \ escapes INIT.
+-if test -n "\${ZSH_VERSION+set}"; then
++if test -n "\${ZSH_VERSION+set}" ; then
+    setopt NO_GLOB_SUBST
+ fi
+ 
+ 
+     PACKAGE='$PACKAGE'
+     VERSION='$VERSION'
++    TIMESTAMP='$TIMESTAMP'
+     RM='$RM'
+     ofile='$ofile'
+ 
+@@ -19470,9 +16843,9 @@ done
+ # We use the long form for the default assignment because of an extremely
+ # bizarre bug on SunOS 4.1.3.
+ if $ac_need_defaults; then
+-  test ${CONFIG_FILES+y} || CONFIG_FILES=$config_files
+-  test ${CONFIG_HEADERS+y} || CONFIG_HEADERS=$config_headers
+-  test ${CONFIG_COMMANDS+y} || CONFIG_COMMANDS=$config_commands
++  test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files
++  test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers
++  test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands
+ fi
+ 
+ # Have a temporary directory for convenience.  Make it in the build tree
+@@ -19808,7 +17181,7 @@ do
+ 	   esac ||
+ 	   as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;;
+       esac
+-      case $ac_f in *\'*) ac_f=`printf "%s\n" "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
++      case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac
+       as_fn_append ac_file_inputs " '$ac_f'"
+     done
+ 
+@@ -19816,17 +17189,17 @@ do
+     # use $as_me), people would be surprised to read:
+     #    /* config.h.  Generated by config.status.  */
+     configure_input='Generated from '`
+-	  printf "%s\n" "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
++	  $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g'
+ 	`' by configure.'
+     if test x"$ac_file" != x-; then
+       configure_input="$ac_file.  $configure_input"
+-      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
+-printf "%s\n" "$as_me: creating $ac_file" >&6;}
++      { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5
++$as_echo "$as_me: creating $ac_file" >&6;}
+     fi
+     # Neutralize special characters interpreted by sed in replacement strings.
+     case $configure_input in #(
+     *\&* | *\|* | *\\* )
+-       ac_sed_conf_input=`printf "%s\n" "$configure_input" |
++       ac_sed_conf_input=`$as_echo "$configure_input" |
+        sed 's/[\\\\&|]/\\\\&/g'`;; #(
+     *) ac_sed_conf_input=$configure_input;;
+     esac
+@@ -19843,7 +17216,7 @@ $as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ 	 X"$ac_file" : 'X\(//\)[^/]' \| \
+ 	 X"$ac_file" : 'X\(//\)$' \| \
+ 	 X"$ac_file" : 'X\(/\)' \| . 2>/dev/null ||
+-printf "%s\n" X"$ac_file" |
++$as_echo X"$ac_file" |
+     sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ 	    s//\1/
+ 	    q
+@@ -19867,9 +17240,9 @@ printf "%s\n" X"$ac_file" |
+ case "$ac_dir" in
+ .) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;;
+ *)
+-  ac_dir_suffix=/`printf "%s\n" "$ac_dir" | sed 's|^\.[\\/]||'`
++  ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'`
+   # A ".." for each directory in $ac_dir_suffix.
+-  ac_top_builddir_sub=`printf "%s\n" "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
++  ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'`
+   case $ac_top_builddir_sub in
+   "") ac_top_builddir_sub=. ac_top_build_prefix= ;;
+   *)  ac_top_build_prefix=$ac_top_builddir_sub/ ;;
+@@ -19931,8 +17304,8 @@ ac_sed_dataroot='
+ case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in
+ *datarootdir*) ac_datarootdir_seen=yes;;
+ *@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*)
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
+-printf "%s\n" "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
++  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5
++$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;}
+ _ACEOF
+ cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1
+   ac_datarootdir_hack='
+@@ -19976,9 +17349,9 @@ test -z "$ac_datarootdir_hack$ac_datarootdir_seen" &&
+   { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } &&
+   { ac_out=`sed -n '/^[	 ]*datarootdir[	 ]*:*=/p' \
+       "$ac_tmp/out"`; test -z "$ac_out"; } &&
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
++  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+ which seems to be undefined.  Please make sure it is defined" >&5
+-printf "%s\n" "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
++$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir'
+ which seems to be undefined.  Please make sure it is defined" >&2;}
+ 
+   rm -f "$ac_tmp/stdin"
+@@ -19994,20 +17367,20 @@ which seems to be undefined.  Please make sure it is defined" >&2;}
+   #
+   if test x"$ac_file" != x-; then
+     {
+-      printf "%s\n" "/* $configure_input  */" >&1 \
++      $as_echo "/* $configure_input  */" \
+       && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs"
+     } >"$ac_tmp/config.h" \
+       || as_fn_error $? "could not create $ac_file" "$LINENO" 5
+     if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then
+-      { printf "%s\n" "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
+-printf "%s\n" "$as_me: $ac_file is unchanged" >&6;}
++      { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5
++$as_echo "$as_me: $ac_file is unchanged" >&6;}
+     else
+       rm -f "$ac_file"
+       mv "$ac_tmp/config.h" "$ac_file" \
+ 	|| as_fn_error $? "could not create $ac_file" "$LINENO" 5
+     fi
+   else
+-    printf "%s\n" "/* $configure_input  */" >&1 \
++    $as_echo "/* $configure_input  */" \
+       && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \
+       || as_fn_error $? "could not create -" "$LINENO" 5
+   fi
+@@ -20027,7 +17400,7 @@ $as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ 	 X"$_am_arg" : 'X\(//\)[^/]' \| \
+ 	 X"$_am_arg" : 'X\(//\)$' \| \
+ 	 X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null ||
+-printf "%s\n" X"$_am_arg" |
++$as_echo X"$_am_arg" |
+     sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ 	    s//\1/
+ 	    q
+@@ -20047,8 +17420,8 @@ printf "%s\n" X"$_am_arg" |
+ 	  s/.*/./; q'`/stamp-h$_am_stamp_count
+  ;;
+ 
+-  :C)  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5
+-printf "%s\n" "$as_me: executing $ac_file commands" >&6;}
++  :C)  { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5
++$as_echo "$as_me: executing $ac_file commands" >&6;}
+  ;;
+   esac
+ 
+@@ -20074,7 +17447,7 @@ esac
+   for am_mf
+   do
+     # Strip MF so we end up with the name of the file.
+-    am_mf=`printf "%s\n" "$am_mf" | sed -e 's/:.*$//'`
++    am_mf=`$as_echo "$am_mf" | sed -e 's/:.*$//'`
+     # Check whether this is an Automake generated Makefile which includes
+     # dependency-tracking related rules and includes.
+     # Grep'ing the whole file directly is not great: AIX grep has a line
+@@ -20086,7 +17459,7 @@ $as_expr X"$am_mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
+ 	 X"$am_mf" : 'X\(//\)[^/]' \| \
+ 	 X"$am_mf" : 'X\(//\)$' \| \
+ 	 X"$am_mf" : 'X\(/\)' \| . 2>/dev/null ||
+-printf "%s\n" X"$am_mf" |
++$as_echo X"$am_mf" |
+     sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
+ 	    s//\1/
+ 	    q
+@@ -20108,7 +17481,7 @@ printf "%s\n" X"$am_mf" |
+ $as_expr X/"$am_mf" : '.*/\([^/][^/]*\)/*$' \| \
+ 	 X"$am_mf" : 'X\(//\)$' \| \
+ 	 X"$am_mf" : 'X\(/\)' \| . 2>/dev/null ||
+-printf "%s\n" X/"$am_mf" |
++$as_echo X/"$am_mf" |
+     sed '/^.*\/\([^/][^/]*\)\/*$/{
+ 	    s//\1/
+ 	    q
+@@ -20133,8 +17506,8 @@ printf "%s\n" X/"$am_mf" |
+    (exit $ac_status); } || am_rc=$?
+   done
+   if test $am_rc -ne 0; then
+-    { { printf "%s\n" "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
+-printf "%s\n" "$as_me: error: in \`$ac_pwd':" >&2;}
++    { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5
++$as_echo "$as_me: error: in \`$ac_pwd':" >&2;}
+ as_fn_error $? "Something went wrong bootstrapping makefile fragments
+     for automatic dependency tracking.  If GNU make was not used, consider
+     re-running the configure script with MAKE=\"gmake\" (or whatever is
+@@ -20152,53 +17525,54 @@ See \`config.log' for more details" "$LINENO" 5; }
+  ;;
+     "libtool":C)
+ 
+-    # See if we are running on zsh, and set the options that allow our
++    # See if we are running on zsh, and set the options which allow our
+     # commands through without removal of \ escapes.
+-    if test -n "${ZSH_VERSION+set}"; then
++    if test -n "${ZSH_VERSION+set}" ; then
+       setopt NO_GLOB_SUBST
+     fi
+ 
+-    cfgfile=${ofile}T
++    cfgfile="${ofile}T"
+     trap "$RM \"$cfgfile\"; exit 1" 1 2 15
+     $RM "$cfgfile"
+ 
+     cat <<_LT_EOF >> "$cfgfile"
+ #! $SHELL
+-# Generated automatically by $as_me ($PACKAGE) $VERSION
++
++# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services.
++# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION
+ # Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`:
+ # NOTE: Changes made to this file will be lost: look at ltmain.sh.
+-
+-# Provide generalized library-building support services.
+-# Written by Gordon Matzigkeit, 1996
+-
+-# Copyright (C) 2014 Free Software Foundation, Inc.
+-# This is free software; see the source for copying conditions.  There is NO
+-# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+-
+-# GNU Libtool is free software; you can redistribute it and/or modify
+-# it under the terms of the GNU General Public License as published by
+-# the Free Software Foundation; either version 2 of of the License, or
+-# (at your option) any later version.
+ #
+-# As a special exception to the GNU General Public License, if you
+-# distribute this file as part of a program or library that is built
+-# using GNU Libtool, you may include this file under the  same
+-# distribution terms that you use for the rest of that program.
++#   Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005,
++#                 2006, 2007, 2008, 2009 Free Software Foundation, Inc.
++#   Written by Gordon Matzigkeit, 1996
+ #
+-# GNU Libtool is distributed in the hope that it will be useful, but
+-# WITHOUT ANY WARRANTY; without even the implied warranty of
++#   This file is part of GNU Libtool.
++#
++# GNU Libtool is free software; you can redistribute it and/or
++# modify it under the terms of the GNU General Public License as
++# published by the Free Software Foundation; either version 2 of
++# the License, or (at your option) any later version.
++#
++# As a special exception to the GNU General Public License,
++# if you distribute this file as part of a program or library that
++# is built using GNU Libtool, you may include this file under the
++# same distribution terms that you use for the rest of that program.
++#
++# GNU Libtool is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
+ # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ # GNU General Public License for more details.
+ #
+ # You should have received a copy of the GNU General Public License
+-# along with this program.  If not, see .
++# along with GNU Libtool; see the file COPYING.  If not, a copy
++# can be downloaded from http://www.gnu.org/licenses/gpl.html, or
++# obtained by writing to the Free Software Foundation, Inc.,
++# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ 
+ 
+ # The names of the tagged configurations supported by this script.
+-available_tags='CXX '
+-
+-# Configured defaults for sys_lib_dlsearch_path munging.
+-: \${LT_SYS_LIBRARY_PATH="$configure_time_lt_sys_library_path"}
++available_tags="CXX "
+ 
+ # ### BEGIN LIBTOOL CONFIG
+ 
+@@ -20218,18 +17592,12 @@ pic_mode=$pic_mode
+ # Whether or not to optimize for fast installation.
+ fast_install=$enable_fast_install
+ 
+-# Shared archive member basename,for filename based shared library versioning on AIX.
+-shared_archive_member_spec=$shared_archive_member_spec
+-
+ # Shell to use when invoking shell scripts.
+ SHELL=$lt_SHELL
+ 
+ # An echo program that protects backslashes.
+ ECHO=$lt_ECHO
+ 
+-# The PATH separator for the build system.
+-PATH_SEPARATOR=$lt_PATH_SEPARATOR
+-
+ # The host system.
+ host_alias=$host_alias
+ host=$host
+@@ -20279,47 +17647,18 @@ SP2NL=$lt_lt_SP2NL
+ # turn newlines into spaces.
+ NL2SP=$lt_lt_NL2SP
+ 
+-# convert \$build file names to \$host format.
+-to_host_file_cmd=$lt_cv_to_host_file_cmd
+-
+-# convert \$build files to toolchain format.
+-to_tool_file_cmd=$lt_cv_to_tool_file_cmd
+-
+-# A file(cmd) program that detects file types.
+-FILECMD=$lt_FILECMD
+-
+ # An object symbol dumper.
+ OBJDUMP=$lt_OBJDUMP
+ 
+ # Method to check whether dependent libraries are shared objects.
+ deplibs_check_method=$lt_deplibs_check_method
+ 
+-# Command to use when deplibs_check_method = "file_magic".
++# Command to use when deplibs_check_method == "file_magic".
+ file_magic_cmd=$lt_file_magic_cmd
+ 
+-# How to find potential files when deplibs_check_method = "file_magic".
+-file_magic_glob=$lt_file_magic_glob
+-
+-# Find potential files using nocaseglob when deplibs_check_method = "file_magic".
+-want_nocaseglob=$lt_want_nocaseglob
+-
+-# DLL creation program.
+-DLLTOOL=$lt_DLLTOOL
+-
+-# Command to associate shared and link libraries.
+-sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd
+-
+ # The archiver.
+ AR=$lt_AR
+-
+-# Flags to create an archive (by configure).
+-lt_ar_flags=$lt_ar_flags
+-
+-# Flags to create an archive.
+-AR_FLAGS=\${ARFLAGS-"\$lt_ar_flags"}
+-
+-# How to feed a file listing to the archiver.
+-archiver_list_spec=$lt_archiver_list_spec
++AR_FLAGS=$lt_AR_FLAGS
+ 
+ # A symbol stripping program.
+ STRIP=$lt_STRIP
+@@ -20344,27 +17683,12 @@ global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe
+ # Transform the output of nm in a proper C declaration.
+ global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl
+ 
+-# Transform the output of nm into a list of symbols to manually relocate.
+-global_symbol_to_import=$lt_lt_cv_sys_global_symbol_to_import
+-
+ # Transform the output of nm in a C name address pair.
+ global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address
+ 
+ # Transform the output of nm in a C name address pair when lib prefix is needed.
+ global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix
+ 
+-# The name lister interface.
+-nm_interface=$lt_lt_cv_nm_interface
+-
+-# Specify filename containing input files for \$NM.
+-nm_file_list_spec=$lt_nm_file_list_spec
+-
+-# The root where to search for dependent libraries,and where our libraries should be installed.
+-lt_sysroot=$lt_sysroot
+-
+-# Command to truncate a binary pipe.
+-lt_truncate_bin=$lt_lt_cv_truncate_bin
+-
+ # The name of the directory that contains temporary libtool files.
+ objdir=$objdir
+ 
+@@ -20374,9 +17698,6 @@ MAGIC_CMD=$MAGIC_CMD
+ # Must we lock files when doing compilation?
+ need_locks=$lt_need_locks
+ 
+-# Manifest tool.
+-MANIFEST_TOOL=$lt_MANIFEST_TOOL
+-
+ # Tool to manipulate archived DWARF debug symbol files on Mac OS X.
+ DSYMUTIL=$lt_DSYMUTIL
+ 
+@@ -20455,11 +17776,8 @@ hardcode_into_libs=$hardcode_into_libs
+ # Compile-time system search path for libraries.
+ sys_lib_search_path_spec=$lt_sys_lib_search_path_spec
+ 
+-# Detected run-time system search path for libraries.
+-sys_lib_dlsearch_path_spec=$lt_configure_time_dlsearch_path
+-
+-# Explicit LT_SYS_LIBRARY_PATH set during ./configure time.
+-configure_time_lt_sys_library_path=$lt_configure_time_lt_sys_library_path
++# Run-time system search path for libraries.
++sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec
+ 
+ # Whether dlopen is supported.
+ dlopen_support=$enable_dlopen
+@@ -20494,12 +17812,12 @@ with_gcc=$GCC
+ # Compiler flag to turn off builtin functions.
+ no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag
+ 
+-# Additional compiler flags for building library objects.
+-pic_flag=$lt_lt_prog_compiler_pic
+-
+ # How to pass a linker flag through the compiler.
+ wl=$lt_lt_prog_compiler_wl
+ 
++# Additional compiler flags for building library objects.
++pic_flag=$lt_lt_prog_compiler_pic
++
+ # Compiler flag to prevent dynamic linking.
+ link_static_flag=$lt_lt_prog_compiler_static
+ 
+@@ -20549,16 +17867,20 @@ no_undefined_flag=$lt_no_undefined_flag
+ # This must work even if \$libdir does not exist
+ hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec
+ 
++# If ld is used when linking, flag to hardcode \$libdir into a binary
++# during linking.  This must work even if \$libdir does not exist.
++hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld
++
+ # Whether we need a single "-rpath" flag with a separated argument.
+ hardcode_libdir_separator=$lt_hardcode_libdir_separator
+ 
+-# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
++# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+ # DIR into the resulting binary.
+ hardcode_direct=$hardcode_direct
+ 
+-# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
++# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+ # DIR into the resulting binary and the resulting library dependency is
+-# "absolute",i.e impossible to change by setting \$shlibpath_var if the
++# "absolute",i.e impossible to change by setting \${shlibpath_var} if the
+ # library is relocated.
+ hardcode_direct_absolute=$hardcode_direct_absolute
+ 
+@@ -20582,6 +17904,9 @@ inherit_rpath=$inherit_rpath
+ # Whether libtool must link a program against all its dependency libraries.
+ link_all_deplibs=$link_all_deplibs
+ 
++# Fix the shell variable \$srcfile for the compiler.
++fix_srcfile_path=$lt_fix_srcfile_path
++
+ # Set to "yes" if exported symbols are required.
+ always_export_symbols=$always_export_symbols
+ 
+@@ -20597,9 +17922,6 @@ include_expsyms=$lt_include_expsyms
+ # Commands necessary for linking programs (against libraries) with templates.
+ prelink_cmds=$lt_prelink_cmds
+ 
+-# Commands necessary for finishing linking programs.
+-postlink_cmds=$lt_postlink_cmds
+-
+ # Specify filename containing input files.
+ file_list_spec=$lt_file_list_spec
+ 
+@@ -20622,65 +17944,6 @@ compiler_lib_search_path=$lt_compiler_lib_search_path
+ 
+ # ### END LIBTOOL CONFIG
+ 
+-_LT_EOF
+-
+-    cat <<'_LT_EOF' >> "$cfgfile"
+-
+-# ### BEGIN FUNCTIONS SHARED WITH CONFIGURE
+-
+-# func_munge_path_list VARIABLE PATH
+-# -----------------------------------
+-# VARIABLE is name of variable containing _space_ separated list of
+-# directories to be munged by the contents of PATH, which is string
+-# having a format:
+-# "DIR[:DIR]:"
+-#       string "DIR[ DIR]" will be prepended to VARIABLE
+-# ":DIR[:DIR]"
+-#       string "DIR[ DIR]" will be appended to VARIABLE
+-# "DIRP[:DIRP]::[DIRA:]DIRA"
+-#       string "DIRP[ DIRP]" will be prepended to VARIABLE and string
+-#       "DIRA[ DIRA]" will be appended to VARIABLE
+-# "DIR[:DIR]"
+-#       VARIABLE will be replaced by "DIR[ DIR]"
+-func_munge_path_list ()
+-{
+-    case x$2 in
+-    x)
+-        ;;
+-    *:)
+-        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'` \$$1\"
+-        ;;
+-    x:*)
+-        eval $1=\"\$$1 `$ECHO $2 | $SED 's/:/ /g'`\"
+-        ;;
+-    *::*)
+-        eval $1=\"\$$1\ `$ECHO $2 | $SED -e 's/.*:://' -e 's/:/ /g'`\"
+-        eval $1=\"`$ECHO $2 | $SED -e 's/::.*//' -e 's/:/ /g'`\ \$$1\"
+-        ;;
+-    *)
+-        eval $1=\"`$ECHO $2 | $SED 's/:/ /g'`\"
+-        ;;
+-    esac
+-}
+-
+-
+-# Calculate cc_basename.  Skip known compiler wrappers and cross-prefix.
+-func_cc_basename ()
+-{
+-    for cc_temp in $*""; do
+-      case $cc_temp in
+-        compile | *[\\/]compile | ccache | *[\\/]ccache ) ;;
+-        distcc | *[\\/]distcc | purify | *[\\/]purify ) ;;
+-        \-*) ;;
+-        *) break;;
+-      esac
+-    done
+-    func_cc_basename_result=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"`
+-}
+-
+-
+-# ### END FUNCTIONS SHARED WITH CONFIGURE
+-
+ _LT_EOF
+ 
+   case $host_os in
+@@ -20689,7 +17952,7 @@ _LT_EOF
+ # AIX sometimes has problems with the GCC collect2 program.  For some
+ # reason, if we set the COLLECT_NAMES environment variable, the problems
+ # vanish in a puff of smoke.
+-if test set != "${COLLECT_NAMES+set}"; then
++if test "X${COLLECT_NAMES+set}" != Xset; then
+   COLLECT_NAMES=
+   export COLLECT_NAMES
+ fi
+@@ -20698,18 +17961,217 @@ _LT_EOF
+   esac
+ 
+ 
+-
+-ltmain=$ac_aux_dir/ltmain.sh
++ltmain="$ac_aux_dir/ltmain.sh"
+ 
+ 
+   # We use sed instead of cat because bash on DJGPP gets confused if
+   # if finds mixed CR/LF and LF-only lines.  Since sed operates in
+   # text mode, it properly converts lines to CR/LF.  This bash problem
+   # is reportedly fixed, but why not run on old versions too?
+-  $SED '$q' "$ltmain" >> "$cfgfile" \
+-     || (rm -f "$cfgfile"; exit 1)
++  sed '/^# Generated shell functions inserted here/q' "$ltmain" >> "$cfgfile" \
++    || (rm -f "$cfgfile"; exit 1)
++
++  case $xsi_shell in
++  yes)
++    cat << \_LT_EOF >> "$cfgfile"
++
++# func_dirname file append nondir_replacement
++# Compute the dirname of FILE.  If nonempty, add APPEND to the result,
++# otherwise set result to NONDIR_REPLACEMENT.
++func_dirname ()
++{
++  case ${1} in
++    */*) func_dirname_result="${1%/*}${2}" ;;
++    *  ) func_dirname_result="${3}" ;;
++  esac
++}
++
++# func_basename file
++func_basename ()
++{
++  func_basename_result="${1##*/}"
++}
++
++# func_dirname_and_basename file append nondir_replacement
++# perform func_basename and func_dirname in a single function
++# call:
++#   dirname:  Compute the dirname of FILE.  If nonempty,
++#             add APPEND to the result, otherwise set result
++#             to NONDIR_REPLACEMENT.
++#             value returned in "$func_dirname_result"
++#   basename: Compute filename of FILE.
++#             value retuned in "$func_basename_result"
++# Implementation must be kept synchronized with func_dirname
++# and func_basename. For efficiency, we do not delegate to
++# those functions but instead duplicate the functionality here.
++func_dirname_and_basename ()
++{
++  case ${1} in
++    */*) func_dirname_result="${1%/*}${2}" ;;
++    *  ) func_dirname_result="${3}" ;;
++  esac
++  func_basename_result="${1##*/}"
++}
++
++# func_stripname prefix suffix name
++# strip PREFIX and SUFFIX off of NAME.
++# PREFIX and SUFFIX must not contain globbing or regex special
++# characters, hashes, percent signs, but SUFFIX may contain a leading
++# dot (in which case that matches only a dot).
++func_stripname ()
++{
++  # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are
++  # positional parameters, so assign one to ordinary parameter first.
++  func_stripname_result=${3}
++  func_stripname_result=${func_stripname_result#"${1}"}
++  func_stripname_result=${func_stripname_result%"${2}"}
++}
++
++# func_opt_split
++func_opt_split ()
++{
++  func_opt_split_opt=${1%%=*}
++  func_opt_split_arg=${1#*=}
++}
++
++# func_lo2o object
++func_lo2o ()
++{
++  case ${1} in
++    *.lo) func_lo2o_result=${1%.lo}.${objext} ;;
++    *)    func_lo2o_result=${1} ;;
++  esac
++}
++
++# func_xform libobj-or-source
++func_xform ()
++{
++  func_xform_result=${1%.*}.lo
++}
++
++# func_arith arithmetic-term...
++func_arith ()
++{
++  func_arith_result=$(( $* ))
++}
+ 
+-   mv -f "$cfgfile" "$ofile" ||
++# func_len string
++# STRING may not start with a hyphen.
++func_len ()
++{
++  func_len_result=${#1}
++}
++
++_LT_EOF
++    ;;
++  *) # Bourne compatible functions.
++    cat << \_LT_EOF >> "$cfgfile"
++
++# func_dirname file append nondir_replacement
++# Compute the dirname of FILE.  If nonempty, add APPEND to the result,
++# otherwise set result to NONDIR_REPLACEMENT.
++func_dirname ()
++{
++  # Extract subdirectory from the argument.
++  func_dirname_result=`$ECHO "${1}" | $SED "$dirname"`
++  if test "X$func_dirname_result" = "X${1}"; then
++    func_dirname_result="${3}"
++  else
++    func_dirname_result="$func_dirname_result${2}"
++  fi
++}
++
++# func_basename file
++func_basename ()
++{
++  func_basename_result=`$ECHO "${1}" | $SED "$basename"`
++}
++
++
++# func_stripname prefix suffix name
++# strip PREFIX and SUFFIX off of NAME.
++# PREFIX and SUFFIX must not contain globbing or regex special
++# characters, hashes, percent signs, but SUFFIX may contain a leading
++# dot (in which case that matches only a dot).
++# func_strip_suffix prefix name
++func_stripname ()
++{
++  case ${2} in
++    .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;;
++    *)  func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;;
++  esac
++}
++
++# sed scripts:
++my_sed_long_opt='1s/^\(-[^=]*\)=.*/\1/;q'
++my_sed_long_arg='1s/^-[^=]*=//'
++
++# func_opt_split
++func_opt_split ()
++{
++  func_opt_split_opt=`$ECHO "${1}" | $SED "$my_sed_long_opt"`
++  func_opt_split_arg=`$ECHO "${1}" | $SED "$my_sed_long_arg"`
++}
++
++# func_lo2o object
++func_lo2o ()
++{
++  func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"`
++}
++
++# func_xform libobj-or-source
++func_xform ()
++{
++  func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'`
++}
++
++# func_arith arithmetic-term...
++func_arith ()
++{
++  func_arith_result=`expr "$@"`
++}
++
++# func_len string
++# STRING may not start with a hyphen.
++func_len ()
++{
++  func_len_result=`expr "$1" : ".*" 2>/dev/null || echo $max_cmd_len`
++}
++
++_LT_EOF
++esac
++
++case $lt_shell_append in
++  yes)
++    cat << \_LT_EOF >> "$cfgfile"
++
++# func_append var value
++# Append VALUE to the end of shell variable VAR.
++func_append ()
++{
++  eval "$1+=\$2"
++}
++_LT_EOF
++    ;;
++  *)
++    cat << \_LT_EOF >> "$cfgfile"
++
++# func_append var value
++# Append VALUE to the end of shell variable VAR.
++func_append ()
++{
++  eval "$1=\$$1\$2"
++}
++
++_LT_EOF
++    ;;
++  esac
++
++
++  sed -n '/^# Generated shell functions inserted here/,$p' "$ltmain" >> "$cfgfile" \
++    || (rm -f "$cfgfile"; exit 1)
++
++  mv -f "$cfgfile" "$ofile" ||
+     (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile")
+   chmod +x "$ofile"
+ 
+@@ -20737,12 +18199,12 @@ with_gcc=$GCC_CXX
+ # Compiler flag to turn off builtin functions.
+ no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX
+ 
+-# Additional compiler flags for building library objects.
+-pic_flag=$lt_lt_prog_compiler_pic_CXX
+-
+ # How to pass a linker flag through the compiler.
+ wl=$lt_lt_prog_compiler_wl_CXX
+ 
++# Additional compiler flags for building library objects.
++pic_flag=$lt_lt_prog_compiler_pic_CXX
++
+ # Compiler flag to prevent dynamic linking.
+ link_static_flag=$lt_lt_prog_compiler_static_CXX
+ 
+@@ -20792,16 +18254,20 @@ no_undefined_flag=$lt_no_undefined_flag_CXX
+ # This must work even if \$libdir does not exist
+ hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX
+ 
++# If ld is used when linking, flag to hardcode \$libdir into a binary
++# during linking.  This must work even if \$libdir does not exist.
++hardcode_libdir_flag_spec_ld=$lt_hardcode_libdir_flag_spec_ld_CXX
++
+ # Whether we need a single "-rpath" flag with a separated argument.
+ hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX
+ 
+-# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
++# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+ # DIR into the resulting binary.
+ hardcode_direct=$hardcode_direct_CXX
+ 
+-# Set to "yes" if using DIR/libNAME\$shared_ext during linking hardcodes
++# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes
+ # DIR into the resulting binary and the resulting library dependency is
+-# "absolute",i.e impossible to change by setting \$shlibpath_var if the
++# "absolute",i.e impossible to change by setting \${shlibpath_var} if the
+ # library is relocated.
+ hardcode_direct_absolute=$hardcode_direct_absolute_CXX
+ 
+@@ -20825,6 +18291,9 @@ inherit_rpath=$inherit_rpath_CXX
+ # Whether libtool must link a program against all its dependency libraries.
+ link_all_deplibs=$link_all_deplibs_CXX
+ 
++# Fix the shell variable \$srcfile for the compiler.
++fix_srcfile_path=$lt_fix_srcfile_path_CXX
++
+ # Set to "yes" if exported symbols are required.
+ always_export_symbols=$always_export_symbols_CXX
+ 
+@@ -20840,9 +18309,6 @@ include_expsyms=$lt_include_expsyms_CXX
+ # Commands necessary for linking programs (against libraries) with templates.
+ prelink_cmds=$lt_prelink_cmds_CXX
+ 
+-# Commands necessary for finishing linking programs.
+-postlink_cmds=$lt_postlink_cmds_CXX
+-
+ # Specify filename containing input files.
+ file_list_spec=$lt_file_list_spec_CXX
+ 
+@@ -20901,9 +18367,8 @@ if test "$no_create" != yes; then
+   $ac_cs_success || as_fn_exit 1
+ fi
+ if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then
+-  { printf "%s\n" "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
+-printf "%s\n" "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
++  { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5
++$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;}
+ fi
+ 
+ 
+-
+-- 
+2.33.0
+
diff --git a/0088-LoongArch-Expand-left-rotate-to-right-rotate-with-ne.patch b/0088-LoongArch-Expand-left-rotate-to-right-rotate-with-ne.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ac9932e027052b366895d21cb810623be7ea7733
--- /dev/null
+++ b/0088-LoongArch-Expand-left-rotate-to-right-rotate-with-ne.patch
@@ -0,0 +1,253 @@
+From a2cc86c9b5e44c3dcdb8c52d6ae5f535442ec1d4 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 17 Dec 2023 05:38:20 +0800
+Subject: [PATCH 088/188] LoongArch: Expand left rotate to right rotate with
+ negated amount
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (rotl3):
+	New define_expand.
+	* config/loongarch/simd.md (vrotl3): Likewise.
+	(rotl3): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/rotl-with-rotr.c: New test.
+	* gcc.target/loongarch/rotl-with-vrotr-b.c: New test.
+	* gcc.target/loongarch/rotl-with-vrotr-h.c: New test.
+	* gcc.target/loongarch/rotl-with-vrotr-w.c: New test.
+	* gcc.target/loongarch/rotl-with-vrotr-d.c: New test.
+	* gcc.target/loongarch/rotl-with-xvrotr-b.c: New test.
+	* gcc.target/loongarch/rotl-with-xvrotr-h.c: New test.
+	* gcc.target/loongarch/rotl-with-xvrotr-w.c: New test.
+	* gcc.target/loongarch/rotl-with-xvrotr-d.c: New test.
+---
+ gcc/config/loongarch/loongarch.md             | 12 ++++++++
+ gcc/config/loongarch/simd.md                  | 29 +++++++++++++++++++
+ .../gcc.target/loongarch/rotl-with-rotr.c     |  9 ++++++
+ .../gcc.target/loongarch/rotl-with-vrotr-b.c  |  7 +++++
+ .../gcc.target/loongarch/rotl-with-vrotr-d.c  |  7 +++++
+ .../gcc.target/loongarch/rotl-with-vrotr-h.c  |  7 +++++
+ .../gcc.target/loongarch/rotl-with-vrotr-w.c  | 28 ++++++++++++++++++
+ .../gcc.target/loongarch/rotl-with-xvrotr-b.c |  7 +++++
+ .../gcc.target/loongarch/rotl-with-xvrotr-d.c |  7 +++++
+ .../gcc.target/loongarch/rotl-with-xvrotr-h.c |  7 +++++
+ .../gcc.target/loongarch/rotl-with-xvrotr-w.c |  7 +++++
+ 11 files changed, 127 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/rotl-with-rotr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-b.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-h.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-w.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-b.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-h.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-w.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 3d5b75825..ed4d4b906 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -2903,6 +2903,18 @@
+   [(set_attr "type" "shift,shift")
+    (set_attr "mode" "SI")])
+ 
++;; Expand left rotate to right rotate.
++(define_expand "rotl3"
++  [(set (match_dup 3)
++	(neg:SI (match_operand:SI 2 "register_operand")))
++   (set (match_operand:GPR 0 "register_operand")
++	(rotatert:GPR (match_operand:GPR 1 "register_operand")
++		      (match_dup 3)))]
++  ""
++  {
++    operands[3] = gen_reg_rtx (SImode);
++  });
++
+ ;; The following templates were added to generate "bstrpick.d + alsl.d"
+ ;; instruction pairs.
+ ;; It is required that the values of const_immalsl_operand and
+diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
+index 13202f79b..93fb39abc 100644
+--- a/gcc/config/loongarch/simd.md
++++ b/gcc/config/loongarch/simd.md
+@@ -268,6 +268,35 @@
+   [(set_attr "type" "simd_int_arith")
+    (set_attr "mode" "")])
+ 
++;; Expand left rotate to right rotate.
++(define_expand "vrotl3"
++  [(set (match_dup 3)
++	(neg:IVEC (match_operand:IVEC 2 "register_operand")))
++   (set (match_operand:IVEC 0 "register_operand")
++	(rotatert:IVEC (match_operand:IVEC 1 "register_operand")
++		       (match_dup 3)))]
++  ""
++  {
++    operands[3] = gen_reg_rtx (mode);
++  });
++
++;; Expand left rotate with a scalar amount to right rotate: negate the
++;; scalar before broadcasting it because scalar negation is cheaper than
++;; vector negation.
++(define_expand "rotl3"
++  [(set (match_dup 3)
++	(neg:SI (match_operand:SI 2 "register_operand")))
++   (set (match_dup 4)
++	(vec_duplicate:IVEC (subreg: (match_dup 3) 0)))
++   (set (match_operand:IVEC 0 "register_operand")
++	(rotatert:IVEC (match_operand:IVEC 1 "register_operand")
++		       (match_dup 4)))]
++  ""
++  {
++    operands[3] = gen_reg_rtx (SImode);
++    operands[4] = gen_reg_rtx (mode);
++  });
++
+ ;; vrotri.{b/h/w/d}
+ 
+ (define_insn "rotr3"
+diff --git a/gcc/testsuite/gcc.target/loongarch/rotl-with-rotr.c b/gcc/testsuite/gcc.target/loongarch/rotl-with-rotr.c
+new file mode 100644
+index 000000000..84cc53cec
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/rotl-with-rotr.c
+@@ -0,0 +1,9 @@
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++/* { dg-final { scan-assembler "rotr\\.w" } } */
++
++unsigned
++t (unsigned a, unsigned b)
++{
++  return a << b | a >> (32 - b);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-b.c b/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-b.c
+new file mode 100644
+index 000000000..14298bf9e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-b.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx -fno-vect-cost-model" } */
++/* { dg-final { scan-assembler-times "vrotr\\.b" 2 } } */
++/* { dg-final { scan-assembler-times "vneg\\.b" 1 } } */
++
++#define TYPE char
++#include "rotl-with-vrotr-w.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-d.c b/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-d.c
+new file mode 100644
+index 000000000..0e971b323
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-d.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx -fno-vect-cost-model" } */
++/* { dg-final { scan-assembler-times "vrotr\\.d" 2 } } */
++/* { dg-final { scan-assembler-times "vneg\\.d" 1 } } */
++
++#define TYPE long long
++#include "rotl-with-vrotr-w.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-h.c b/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-h.c
+new file mode 100644
+index 000000000..93216ebc2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-h.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx -fno-vect-cost-model" } */
++/* { dg-final { scan-assembler-times "vrotr\\.h" 2 } } */
++/* { dg-final { scan-assembler-times "vneg\\.h" 1 } } */
++
++#define TYPE short
++#include "rotl-with-vrotr-w.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-w.c b/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-w.c
+new file mode 100644
+index 000000000..d05b86f47
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/rotl-with-vrotr-w.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx -fno-vect-cost-model" } */
++/* { dg-final { scan-assembler-times "vrotr\\.w" 2 } } */
++/* { dg-final { scan-assembler-times "vneg\\.w" 1 } } */
++
++#ifndef VLEN
++#define VLEN 16
++#endif
++
++#ifndef TYPE
++#define TYPE int
++#endif
++
++typedef unsigned TYPE V __attribute__ ((vector_size (VLEN)));
++V a, b, c;
++
++void
++test (int x)
++{
++  b = a << x | a >> ((int)sizeof (TYPE) * __CHAR_BIT__ - x);
++}
++
++void
++test2 (void)
++{
++  for (int i = 0; i < VLEN / sizeof (TYPE); i++)
++    c[i] = a[i] << b[i] | a[i] >> ((int)sizeof (TYPE) * __CHAR_BIT__ - b[i]);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-b.c b/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-b.c
+new file mode 100644
+index 000000000..2674b1b61
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-b.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlasx -fno-vect-cost-model" } */
++/* { dg-final { scan-assembler-times "xvrotr\\.b" 2 } } */
++/* { dg-final { scan-assembler-times "xvneg\\.b" 1 } } */
++
++#define VLEN 32
++#include "rotl-with-vrotr-b.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-d.c b/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-d.c
+new file mode 100644
+index 000000000..e94403315
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-d.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlasx -fno-vect-cost-model" } */
++/* { dg-final { scan-assembler-times "xvrotr\\.d" 2 } } */
++/* { dg-final { scan-assembler-times "xvneg\\.d" 1 } } */
++
++#define VLEN 32
++#include "rotl-with-vrotr-d.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-h.c b/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-h.c
+new file mode 100644
+index 000000000..3d998941f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-h.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlasx -fno-vect-cost-model" } */
++/* { dg-final { scan-assembler-times "xvrotr\\.h" 2 } } */
++/* { dg-final { scan-assembler-times "xvneg\\.h" 1 } } */
++
++#define VLEN 32
++#include "rotl-with-vrotr-h.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-w.c b/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-w.c
+new file mode 100644
+index 000000000..ca6aa7bae
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/rotl-with-xvrotr-w.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlasx -fno-vect-cost-model" } */
++/* { dg-final { scan-assembler-times "xvrotr\\.w" 2 } } */
++/* { dg-final { scan-assembler-times "xvneg\\.w" 1 } } */
++
++#define VLEN 32
++#include "rotl-with-vrotr-w.c"
+-- 
+2.43.0
+
diff --git a/0089-LoongArch-Fix-infinite-secondary-reloading-of-FCCmod.patch b/0089-LoongArch-Fix-infinite-secondary-reloading-of-FCCmod.patch
new file mode 100644
index 0000000000000000000000000000000000000000..cda180541470380131fa69a0ec49abca06334990
--- /dev/null
+++ b/0089-LoongArch-Fix-infinite-secondary-reloading-of-FCCmod.patch
@@ -0,0 +1,104 @@
+From 1e389ec3bad94888fadd153f191fe8862448f258 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 27 Dec 2023 04:28:56 +0800
+Subject: [PATCH 089/188] LoongArch: Fix infinite secondary reloading of
+ FCCmode [PR113148]
+
+The GCC internal doc says:
+
+     X might be a pseudo-register or a 'subreg' of a pseudo-register,
+     which could either be in a hard register or in memory.  Use
+     'true_regnum' to find out; it will return -1 if the pseudo is in
+     memory and the hard register number if it is in a register.
+
+So "MEM_P (x)" is not enough for checking if we are reloading from/to
+the memory.  This bug has caused reload pass to stall and finally ICE
+complaining with "maximum number of generated reload insns per insn
+achieved", since r14-6814.
+
+Check if "true_regnum (x)" is -1 besides "MEM_P (x)" to fix the issue.
+
+gcc/ChangeLog:
+
+	PR target/113148
+	* config/loongarch/loongarch.cc (loongarch_secondary_reload):
+	Check if regno == -1 besides MEM_P (x) for reloading FCCmode
+	from/to FPR to/from memory.
+
+gcc/testsuite/ChangeLog:
+
+	PR target/113148
+	* gcc.target/loongarch/pr113148.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc             |  3 +-
+ gcc/testsuite/gcc.target/loongarch/pr113148.c | 44 +++++++++++++++++++
+ 2 files changed, 46 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/pr113148.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 5c278386a..2e305f940 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -6902,7 +6902,8 @@ loongarch_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+ 	  return NO_REGS;
+ 	}
+ 
+-      if (reg_class_subset_p (rclass, FP_REGS) && MEM_P (x))
++      if (reg_class_subset_p (rclass, FP_REGS)
++	  && (regno == -1 || MEM_P (x)))
+ 	return GR_REGS;
+ 
+       return NO_REGS;
+diff --git a/gcc/testsuite/gcc.target/loongarch/pr113148.c b/gcc/testsuite/gcc.target/loongarch/pr113148.c
+new file mode 100644
+index 000000000..cf48e5520
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/pr113148.c
+@@ -0,0 +1,44 @@
++/* PR 113148: ICE caused by infinite reloading */
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=la464 -mfpu=64 -mabi=lp64d" } */
++
++struct bound
++{
++  double max;
++} drawQuadrant_bound;
++double w4, innerXfromXY_y, computeBound_right_0;
++struct arc_def
++{
++  double w, h;
++  double a0, a1;
++};
++static void drawQuadrant (struct arc_def *);
++static void
++computeBound (struct arc_def *def, struct bound *bound)
++{
++  double ellipsex_1, ellipsex_0;
++  bound->max = def->a1 ?: __builtin_sin (w4) * def->h;
++  if (def->a0 == 5 && def->w == def->h)
++    ;
++  else
++    ellipsex_0 = def->a0 == 0.0 ?: __builtin_cos (w4);
++  if (def->a1 == 5 && def->w == def->h)
++    ellipsex_1 = bound->max;
++  __builtin_sqrt (ellipsex_1 * innerXfromXY_y * innerXfromXY_y * w4);
++  computeBound_right_0 = ellipsex_0;
++}
++void
++drawArc ()
++{
++  struct arc_def foo;
++  for (;;)
++    drawQuadrant (&foo);
++}
++void
++drawQuadrant (struct arc_def *def)
++{
++  int y, miny;
++  computeBound (def, &drawQuadrant_bound);
++  while (y >= miny)
++    ;
++}
+-- 
+2.43.0
+
diff --git a/0089-StructReorderFields-Fix-gimple-call-not-rewritten.patch b/0089-StructReorderFields-Fix-gimple-call-not-rewritten.patch
new file mode 100644
index 0000000000000000000000000000000000000000..76697e2a7118797125c20773ac6b2d05ad523662
--- /dev/null
+++ b/0089-StructReorderFields-Fix-gimple-call-not-rewritten.patch
@@ -0,0 +1,48 @@
+From 302b7e15d6308c29c215db4c9901342e1106381a Mon Sep 17 00:00:00 2001
+From: huang-xiaoquan 
+Date: Mon, 29 Apr 2024 11:00:12 +0800
+Subject: [PATCH] [StructReorderFields] Fix gimple call not rewritten due to
+ empty function node
+
+Add parameter type escape for empty functions or inline functions.
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index e08577c0c..2257d3528 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -4366,6 +4366,17 @@ ipa_struct_reorg::maybe_record_call (cgraph_node *node, gcall *stmt)
+ 
+       argtype = argtype ? TREE_CHAIN (argtype) : NULL_TREE;
+     }
++
++  /* Types escapes via a argument at empty or inlined function.  */
++  cgraph_node *callee = node->get_edge (stmt)->callee;
++  if (!gimple_call_builtin_p (stmt, BUILT_IN_FREE)
++      && gimple_call_num_args (stmt)
++      && callee && (!callee->has_gimple_body_p () || callee->inlined_to))
++    {
++      for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
++	mark_type_as_escape (TREE_TYPE (gimple_call_arg (stmt, i)),
++			      escape_var_arg_function);
++    }
+ }
+ 
+ void
+@@ -8068,6 +8079,11 @@ ipa_struct_reorg::rewrite_functions (void)
+ 	      if (dump_file && (dump_flags & TDF_DETAILS))
+ 		{
+ 		  fprintf (dump_file, "\nNo rewrite:\n");
++		  if (current_function_decl == NULL)
++		    {
++		      fprintf (dump_file, "\ncurrent_function_decl == NULL\n");
++		      continue;
++		    }
+ 		  if (current_function_decl)
+ 		    dump_function_to_file (current_function_decl, dump_file,
+ 					   dump_flags | TDF_VOPS);
+-- 
+2.33.0
+
diff --git a/0090-LoongArch-Replace-mexplicit-relocs-auto-simple-used-.patch b/0090-LoongArch-Replace-mexplicit-relocs-auto-simple-used-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ab8c3f64df11e5e1107a47e456a2a1194c34f781
--- /dev/null
+++ b/0090-LoongArch-Replace-mexplicit-relocs-auto-simple-used-.patch
@@ -0,0 +1,305 @@
+From 294893b352898328d804f2d07981f6bf1e54f8b6 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 12 Dec 2023 04:54:21 +0800
+Subject: [PATCH 090/188] LoongArch: Replace -mexplicit-relocs=auto simple-used
+ address peephole2 with combine
+
+The problem with peephole2 is it uses a naive sliding-window algorithm
+and misses many cases.  For example:
+
+    float a[10000];
+    float t() { return a[0] + a[8000]; }
+
+is compiled to:
+
+    la.local    $r13,a
+    la.local    $r12,a+32768
+    fld.s       $f1,$r13,0
+    fld.s       $f0,$r12,-768
+    fadd.s      $f0,$f1,$f0
+
+by trunk.  But as we've explained in r14-4851, the following would be
+better with -mexplicit-relocs=auto:
+
+    pcalau12i   $r13,%pc_hi20(a)
+    pcalau12i   $r12,%pc_hi20(a+32000)
+    fld.s       $f1,$r13,%pc_lo12(a)
+    fld.s       $f0,$r12,%pc_lo12(a+32000)
+    fadd.s      $f0,$f1,$f0
+
+However the sliding-window algorithm just won't detect the pcalau12i/fld
+pair to be optimized.  Use a define_insn_and_rewrite in combine pass
+will work around the issue.
+
+gcc/ChangeLog:
+
+	* config/loongarch/predicates.md
+	(symbolic_pcrel_offset_operand): New define_predicate.
+	(mem_simple_ldst_operand): Likewise.
+	* config/loongarch/loongarch-protos.h
+	(loongarch_rewrite_mem_for_simple_ldst): Declare.
+	* config/loongarch/loongarch.cc
+	(loongarch_rewrite_mem_for_simple_ldst): Implement.
+	* config/loongarch/loongarch.md (simple_load): New
+	define_insn_and_rewrite.
+	(simple_load_ext): Likewise.
+	(simple_store): Likewise.
+	(define_peephole2): Remove la.local/[f]ld peepholes.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/explicit-relocs-auto-single-load-store-2.c:
+	New test.
+	* gcc.target/loongarch/explicit-relocs-auto-single-load-store-3.c:
+	New test.
+---
+ gcc/config/loongarch/loongarch-protos.h       |   1 +
+ gcc/config/loongarch/loongarch.cc             |  16 +++
+ gcc/config/loongarch/loongarch.md             | 114 +++++-------------
+ gcc/config/loongarch/predicates.md            |  13 ++
+ ...explicit-relocs-auto-single-load-store-2.c |  11 ++
+ ...explicit-relocs-auto-single-load-store-3.c |  18 +++
+ 6 files changed, 86 insertions(+), 87 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-3.c
+
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 2067e50c3..5060efbb6 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -163,6 +163,7 @@ extern bool loongarch_use_ins_ext_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+ extern bool loongarch_check_zero_div_p (void);
+ extern bool loongarch_pre_reload_split (void);
+ extern int loongarch_use_bstrins_for_ior_with_mask (machine_mode, rtx *);
++extern rtx loongarch_rewrite_mem_for_simple_ldst (rtx);
+ 
+ union loongarch_gen_fn_ptrs
+ {
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 2e305f940..c6318bee9 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -5713,6 +5713,22 @@ loongarch_use_bstrins_for_ior_with_mask (machine_mode mode, rtx *op)
+   return 0;
+ }
+ 
++/* Rewrite a MEM for simple load/store under -mexplicit-relocs=auto
++   -mcmodel={normal/medium}.  */
++rtx
++loongarch_rewrite_mem_for_simple_ldst (rtx mem)
++{
++  rtx addr = XEXP (mem, 0);
++  rtx hi = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, addr),
++			   UNSPEC_PCALAU12I_GR);
++  rtx new_mem;
++
++  addr = gen_rtx_LO_SUM (Pmode, force_reg (Pmode, hi), addr);
++  new_mem = gen_rtx_MEM (GET_MODE (mem), addr);
++  MEM_COPY_ATTRIBUTES (new_mem, mem);
++  return new_mem;
++}
++
+ /* Print the text for PRINT_OPERAND punctation character CH to FILE.
+    The punctuation characters are:
+ 
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index ed4d4b906..3c61a0cf4 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -4135,101 +4135,41 @@
+ ;;
+ ;; And if the pseudo op cannot be relaxed, we'll get a worse result (with
+ ;; 3 instructions).
+-(define_peephole2
+-  [(set (match_operand:P 0 "register_operand")
+-	(match_operand:P 1 "symbolic_pcrel_operand"))
+-   (set (match_operand:LD_AT_LEAST_32_BIT 2 "register_operand")
+-	(mem:LD_AT_LEAST_32_BIT (match_dup 0)))]
+-  "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \
+-   && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \
+-   && (peep2_reg_dead_p (2, operands[0]) \
+-       || REGNO (operands[0]) == REGNO (operands[2]))"
+-  [(set (match_dup 2)
+-	(mem:LD_AT_LEAST_32_BIT (lo_sum:P (match_dup 0) (match_dup 1))))]
+-  {
+-    emit_insn (gen_pcalau12i_gr (operands[0], operands[1]));
+-  })
+-
+-(define_peephole2
+-  [(set (match_operand:P 0 "register_operand")
+-	(match_operand:P 1 "symbolic_pcrel_operand"))
+-   (set (match_operand:LD_AT_LEAST_32_BIT 2 "register_operand")
+-	(mem:LD_AT_LEAST_32_BIT (plus (match_dup 0)
+-				(match_operand 3 "const_int_operand"))))]
+-  "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \
+-   && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \
+-   && (peep2_reg_dead_p (2, operands[0]) \
+-       || REGNO (operands[0]) == REGNO (operands[2]))"
+-  [(set (match_dup 2)
+-	(mem:LD_AT_LEAST_32_BIT (lo_sum:P (match_dup 0) (match_dup 1))))]
+-  {
+-    operands[1] = plus_constant (Pmode, operands[1], INTVAL (operands[3]));
+-    emit_insn (gen_pcalau12i_gr (operands[0], operands[1]));
+-  })
+-
+-(define_peephole2
+-  [(set (match_operand:P 0 "register_operand")
+-	(match_operand:P 1 "symbolic_pcrel_operand"))
+-   (set (match_operand:GPR 2 "register_operand")
+-	(any_extend:GPR (mem:SUBDI (match_dup 0))))]
+-  "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \
+-   && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \
+-   && (peep2_reg_dead_p (2, operands[0]) \
+-       || REGNO (operands[0]) == REGNO (operands[2]))"
+-  [(set (match_dup 2)
+-	(any_extend:GPR (mem:SUBDI (lo_sum:P (match_dup 0)
+-					     (match_dup 1)))))]
++(define_insn_and_rewrite "simple_load"
++  [(set (match_operand:LD_AT_LEAST_32_BIT 0 "register_operand" "=r,f")
++	(match_operand:LD_AT_LEAST_32_BIT 1 "mem_simple_ldst_operand" ""))]
++  "loongarch_pre_reload_split ()
++   && la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO
++   && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM)"
++  "#"
++  "&& true"
+   {
+-    emit_insn (gen_pcalau12i_gr (operands[0], operands[1]));
++    operands[1] = loongarch_rewrite_mem_for_simple_ldst (operands[1]);
+   })
+ 
+-(define_peephole2
+-  [(set (match_operand:P 0 "register_operand")
+-	(match_operand:P 1 "symbolic_pcrel_operand"))
+-   (set (match_operand:GPR 2 "register_operand")
++(define_insn_and_rewrite "simple_load_ext"
++  [(set (match_operand:GPR 0 "register_operand" "=r")
+ 	(any_extend:GPR
+-	  (mem:SUBDI (plus (match_dup 0)
+-			   (match_operand 3 "const_int_operand")))))]
+-  "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \
+-   && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \
+-   && (peep2_reg_dead_p (2, operands[0]) \
+-       || REGNO (operands[0]) == REGNO (operands[2]))"
+-  [(set (match_dup 2)
+-	(any_extend:GPR (mem:SUBDI (lo_sum:P (match_dup 0)
+-					     (match_dup 1)))))]
+-  {
+-    operands[1] = plus_constant (Pmode, operands[1], INTVAL (operands[3]));
+-    emit_insn (gen_pcalau12i_gr (operands[0], operands[1]));
+-  })
+-
+-(define_peephole2
+-  [(set (match_operand:P 0 "register_operand")
+-	(match_operand:P 1 "symbolic_pcrel_operand"))
+-   (set (mem:ST_ANY (match_dup 0))
+-	(match_operand:ST_ANY 2 "register_operand"))]
+-  "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \
+-   && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \
+-   && (peep2_reg_dead_p (2, operands[0])) \
+-   && REGNO (operands[0]) != REGNO (operands[2])"
+-  [(set (mem:ST_ANY (lo_sum:P (match_dup 0) (match_dup 1))) (match_dup 2))]
++	  (match_operand:SUBDI 1 "mem_simple_ldst_operand" "")))]
++  "loongarch_pre_reload_split ()
++   && la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO
++   && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM)"
++  "#"
++  "&& true"
+   {
+-    emit_insn (gen_pcalau12i_gr (operands[0], operands[1]));
++    operands[1] = loongarch_rewrite_mem_for_simple_ldst (operands[1]);
+   })
+ 
+-(define_peephole2
+-  [(set (match_operand:P 0 "register_operand")
+-	(match_operand:P 1 "symbolic_pcrel_operand"))
+-   (set (mem:ST_ANY (plus (match_dup 0)
+-			  (match_operand 3 "const_int_operand")))
+-	(match_operand:ST_ANY 2 "register_operand"))]
+-  "la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO \
+-   && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM) \
+-   && (peep2_reg_dead_p (2, operands[0])) \
+-   && REGNO (operands[0]) != REGNO (operands[2])"
+-  [(set (mem:ST_ANY (lo_sum:P (match_dup 0) (match_dup 1))) (match_dup 2))]
++(define_insn_and_rewrite "simple_store"
++  [(set (match_operand:ST_ANY 0 "mem_simple_ldst_operand" "")
++	(match_operand:ST_ANY 1 "reg_or_0_operand" "r,f"))]
++  "loongarch_pre_reload_split ()
++   && la_opt_explicit_relocs == EXPLICIT_RELOCS_AUTO
++   && (TARGET_CMODEL_NORMAL || TARGET_CMODEL_MEDIUM)"
++  "#"
++  "&& true"
+   {
+-    operands[1] = plus_constant (Pmode, operands[1], INTVAL (operands[3]));
+-    emit_insn (gen_pcalau12i_gr (operands[0], operands[1]));
++    operands[0] = loongarch_rewrite_mem_for_simple_ldst (operands[0]);
+   })
+ 
+ ;; Synchronization instructions.
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 58f9a7826..3698b9103 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -579,6 +579,19 @@
+   return loongarch_symbolic_constant_p (op, &type) && type == SYMBOL_PCREL;
+ })
+ 
++(define_predicate "symbolic_pcrel_offset_operand"
++  (and (match_code "plus")
++       (match_operand 0 "symbolic_pcrel_operand")
++       (match_operand 1 "const_int_operand")))
++
++(define_predicate "mem_simple_ldst_operand"
++  (match_code "mem")
++{
++  op = XEXP (op, 0);
++  return (symbolic_pcrel_operand (op, Pmode)
++	  || symbolic_pcrel_offset_operand (op, Pmode));
++})
++
+ (define_predicate "equality_operator"
+   (match_code "eq,ne"))
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-2.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-2.c
+new file mode 100644
+index 000000000..42cb966d1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-2.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mabi=lp64d -mexplicit-relocs=auto" } */
++
++float a[8001];
++float
++t (void)
++{
++  return a[0] + a[8000];
++}
++
++/* { dg-final { scan-assembler-not "la.local" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-3.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-3.c
+new file mode 100644
+index 000000000..32aa5383d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-single-load-store-3.c
+@@ -0,0 +1,18 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mexplicit-relocs=auto -fdump-rtl-final" } */
++/* { dg-final { scan-rtl-dump-times "mem/v/c" 2 "final" } } */
++/* { dg-final { scan-assembler-not "la\\.local" } } */
++
++volatile unsigned long counter;
++
++unsigned long
++read (void)
++{
++  return counter;
++}
++
++void
++clear (void)
++{
++  counter = 0;
++}
+-- 
+2.43.0
+
diff --git a/0090-double-sized-mul-testsuite-Add-march-armv8.2-a-for-d.patch b/0090-double-sized-mul-testsuite-Add-march-armv8.2-a-for-d.patch
new file mode 100644
index 0000000000000000000000000000000000000000..41ed9212d71bbd4ee4842e30910f57c2c37dcc55
--- /dev/null
+++ b/0090-double-sized-mul-testsuite-Add-march-armv8.2-a-for-d.patch
@@ -0,0 +1,40 @@
+From 01517aa2397f854ffa96128a0fb23dd5542be709 Mon Sep 17 00:00:00 2001
+From: Chernonog Viacheslav 
+Date: Tue, 30 Apr 2024 18:43:32 +0800
+Subject: [PATCH 1/4] [double-sized-mul][testsuite] Add march armv8.2-a for dg
+ tests
+
+---
+ gcc/testsuite/gcc.dg/double_sized_mul-1.c | 2 +-
+ gcc/testsuite/gcc.dg/double_sized_mul-2.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.dg/double_sized_mul-1.c b/gcc/testsuite/gcc.dg/double_sized_mul-1.c
+index 4d475cc8a..d32a25223 100644
+--- a/gcc/testsuite/gcc.dg/double_sized_mul-1.c
++++ b/gcc/testsuite/gcc.dg/double_sized_mul-1.c
+@@ -1,7 +1,7 @@
+ /* { dg-do compile } */
+ /* fif-conversion-gimple and fuaddsub-overflow-match-all are required for
+    proper overflow detection in some cases.  */
+-/* { dg-options "-O2 -fif-conversion-gimple -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */
++/* { dg-options "-O2 -fif-conversion-gimple -march=armv8.2-a -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */
+ #include 
+ 
+ typedef unsigned __int128 uint128_t;
+diff --git a/gcc/testsuite/gcc.dg/double_sized_mul-2.c b/gcc/testsuite/gcc.dg/double_sized_mul-2.c
+index cc6e5af25..ff35902b7 100644
+--- a/gcc/testsuite/gcc.dg/double_sized_mul-2.c
++++ b/gcc/testsuite/gcc.dg/double_sized_mul-2.c
+@@ -1,7 +1,7 @@
+ /* { dg-do compile } */
+ /* fif-conversion-gimple is required for proper overflow detection
+    in some cases.  */
+-/* { dg-options "-O2 -fif-conversion-gimple -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */
++/* { dg-options "-O2 -fif-conversion-gimple -march=armv8.2-a -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */
+ #include 
+ 
+ typedef unsigned __int128 uint128_t;
+-- 
+2.33.0
+
diff --git a/0091-IPA-Bugfix-Fix-fails-in-IPA-prefetch-src-openEuler-g.patch b/0091-IPA-Bugfix-Fix-fails-in-IPA-prefetch-src-openEuler-g.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5ed724848a08eb6bed05e860483cf88639a43ba5
--- /dev/null
+++ b/0091-IPA-Bugfix-Fix-fails-in-IPA-prefetch-src-openEuler-g.patch
@@ -0,0 +1,34 @@
+From b84a896e2df214b08d6519a097cc410d3e582add Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Wed, 8 May 2024 21:28:32 +0800
+Subject: [PATCH 2/4] [IPA][Bugfix] Fix fails in IPA prefetch
+ (src-openEuler/gcc: I9J6N6)
+
+---
+ gcc/ipa-prefetch.cc | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc
+index 1ceb5137f..94290ea9c 100644
+--- a/gcc/ipa-prefetch.cc
++++ b/gcc/ipa-prefetch.cc
+@@ -1432,8 +1432,14 @@ remap_gimple_op_r (tree *tp, int *walk_subtrees, void *data)
+ 	  TREE_THIS_VOLATILE (*tp) = TREE_THIS_VOLATILE (old);
+ 	  TREE_SIDE_EFFECTS (*tp) = TREE_SIDE_EFFECTS (old);
+ 	  TREE_NO_WARNING (*tp) = TREE_NO_WARNING (old);
+-	  /* TODO: maybe support this case.  */
+-	  gcc_assert (MR_DEPENDENCE_CLIQUE (old) == 0);
++	  if (MR_DEPENDENCE_CLIQUE (old) != 0)
++	    {
++	      MR_DEPENDENCE_CLIQUE (*tp) = MR_DEPENDENCE_CLIQUE (old);
++	      MR_DEPENDENCE_BASE (*tp) = MR_DEPENDENCE_BASE (old);
++	      if (dump_file)
++		fprintf (dump_file, "Copy clique=%d base=%d info.\n",
++			 MR_DEPENDENCE_CLIQUE (old), MR_DEPENDENCE_BASE (old));
++	    }
+ 	  /* We cannot propagate the TREE_THIS_NOTRAP flag if we have
+ 	     remapped a parameter as the property might be valid only
+ 	     for the parameter itself.  */
+-- 
+2.33.0
+
diff --git a/0091-LoongArch-Fix-the-format-of-bstrins_-mode-_for_ior_m.patch b/0091-LoongArch-Fix-the-format-of-bstrins_-mode-_for_ior_m.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f9134155aa0e74f34d4a1822d425acccdd19d462
--- /dev/null
+++ b/0091-LoongArch-Fix-the-format-of-bstrins_-mode-_for_ior_m.patch
@@ -0,0 +1,33 @@
+From 4d569c5fde85ca426eecf57119048ec25f048758 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 29 Dec 2023 20:04:34 +0800
+Subject: [PATCH 091/188] LoongArch: Fix the format of
+ bstrins__for_ior_mask condition (NFC)
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (bstrins__for_ior_mask):
+	For the condition, remove unneeded trailing "\" and move "&&" to
+	follow GNU coding style.  NFC.
+---
+ gcc/config/loongarch/loongarch.md | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 3c61a0cf4..996df66e8 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1486,8 +1486,8 @@
+                           (match_operand:GPR 2 "const_int_operand"))
+ 		 (and:GPR (match_operand:GPR 3 "register_operand")
+ 			  (match_operand:GPR 4 "const_int_operand"))))]
+-  "loongarch_pre_reload_split () && \
+-   loongarch_use_bstrins_for_ior_with_mask (mode, operands)"
++  "loongarch_pre_reload_split ()
++   && loongarch_use_bstrins_for_ior_with_mask (mode, operands)"
+   "#"
+   "&& true"
+   [(set (match_dup 0) (match_dup 1))
+-- 
+2.43.0
+
diff --git a/0092-AES-Bugfix-Change-set_of-to-reg_set_p-and-add-check-.patch b/0092-AES-Bugfix-Change-set_of-to-reg_set_p-and-add-check-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c9bf39eb26f253d23a276d2b251e4f5f0bc025a6
--- /dev/null
+++ b/0092-AES-Bugfix-Change-set_of-to-reg_set_p-and-add-check-.patch
@@ -0,0 +1,29 @@
+From acb6bbf0612aead00a879892ba8ed816c90fe788 Mon Sep 17 00:00:00 2001
+From: Chernonog Viacheslav 
+Date: Wed, 8 May 2024 19:24:27 +0800
+Subject: [PATCH 3/4] [AES][Bugfix] Change set_of to reg_set_p, and add check
+ for global_regs fix for I9JDHE
+
+---
+ gcc/rtl-matcher.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/rtl-matcher.h b/gcc/rtl-matcher.h
+index 6aed8d98d..5310f6266 100644
+--- a/gcc/rtl-matcher.h
++++ b/gcc/rtl-matcher.h
+@@ -56,8 +56,9 @@ check_def_chain_ref (df_ref ref, rtx reg)
+   if (!ref || !DF_REF_INSN_INFO (ref))
+     return false;
+ 
+-  return !global_regs[REGNO (reg)]
+-    || set_of (reg, DF_REF_INSN (ref));
++  return !(REGNO (reg) < FIRST_PSEUDO_REGISTER
++	   && global_regs[REGNO (reg)])
++    || reg_set_p (reg, DF_REF_INSN (ref));
+ }
+ 
+ /* Get the single def instruction of the reg being used in the insn.  */
+-- 
+2.33.0
+
diff --git a/0092-LoongArch-Added-TLS-Le-Relax-support.patch b/0092-LoongArch-Added-TLS-Le-Relax-support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ebb7466c5f53ec6f3c172c7c527caa952a343186
--- /dev/null
+++ b/0092-LoongArch-Added-TLS-Le-Relax-support.patch
@@ -0,0 +1,280 @@
+From 58d41ffad306a359ecd2902ec19d582506f14b10 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Tue, 12 Dec 2023 16:32:31 +0800
+Subject: [PATCH 092/188] LoongArch: Added TLS Le Relax support.
+
+Check whether the assembler supports tls le relax. If it supports it, the assembly
+instruction sequence of tls le relax will be generated by default.
+
+The original way to obtain the tls le symbol address:
+    lu12i.w $rd, %le_hi20(sym)
+    ori $rd, $rd, %le_lo12(sym)
+    add.{w/d} $rd, $rd, $tp
+
+If the assembler supports tls le relax, the following sequence is generated:
+
+    lu12i.w $rd, %le_hi20_r(sym)
+    add.{w/d} $rd,$rd,$tp,%le_add_r(sym)
+    addi.{w/d} $rd,$rd,%le_lo12_r(sym)
+
+gcc/ChangeLog:
+
+	* config.in: Regenerate.
+	* config/loongarch/loongarch-opts.h (HAVE_AS_TLS_LE_RELAXATION): Define.
+	* config/loongarch/loongarch.cc (loongarch_legitimize_tls_address):
+	Added TLS Le Relax support.
+	(loongarch_print_operand_reloc): Add the output string of TLS Le Relax.
+	* config/loongarch/loongarch.md (@add_tls_le_relax): New template.
+	* configure: Regenerate.
+	* configure.ac: Check if binutils supports TLS le relax.
+
+gcc/testsuite/ChangeLog:
+
+	* lib/target-supports.exp: Add a function to check whether binutil supports
+	TLS Le Relax.
+	* gcc.target/loongarch/tls-le-relax.c: New test.
+---
+ gcc/config.in                                 |  6 +++
+ gcc/config/loongarch/loongarch-opts.h         |  4 ++
+ gcc/config/loongarch/loongarch.cc             | 46 +++++++++++++++++--
+ gcc/config/loongarch/loongarch.md             | 12 +++++
+ gcc/configure                                 | 31 +++++++++++++
+ gcc/configure.ac                              |  5 ++
+ .../gcc.target/loongarch/tls-le-relax.c       | 12 +++++
+ gcc/testsuite/lib/target-supports.exp         | 12 +++++
+ 8 files changed, 125 insertions(+), 3 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/tls-le-relax.c
+
+diff --git a/gcc/config.in b/gcc/config.in
+index 033cfb98b..7220b2b2b 100644
+--- a/gcc/config.in
++++ b/gcc/config.in
+@@ -771,6 +771,12 @@
+ #endif
+ 
+ 
++/* Define if your assembler supports tls le relocation. */
++#ifndef USED_FOR_TARGET
++#undef HAVE_AS_TLS_LE_RELAXATION
++#endif
++
++
+ /* Define if your assembler supports vl/vst/vlm/vstm with an optional
+    alignment hint argument. */
+ #ifndef USED_FOR_TARGET
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index 639ed50bd..8491bee0d 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -114,4 +114,8 @@ struct loongarch_flags {
+ #define HAVE_AS_TLS 0
+ #endif
+ 
++#ifndef HAVE_AS_TLS_LE_RELAXATION
++#define HAVE_AS_TLS_LE_RELAXATION 0
++#endif
++
+ #endif /* LOONGARCH_OPTS_H */
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index c6318bee9..d1b1950dc 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2993,7 +2993,29 @@ loongarch_legitimize_tls_address (rtx loc)
+ 
+     case TLS_MODEL_LOCAL_EXEC:
+ 	{
+-	  /* la.tls.le; tp-relative add.  */
++	  /* la.tls.le; tp-relative add.
++
++	     normal:
++	      lu12i.w $rd, %le_hi20(sym)
++	      ori $rd, $rd, %le_lo12(sym)
++	      add.{w/d} $rd, $rd, $tp
++	      (st.{w/d}/ld.{w/d} $rs, $rd, 0)
++
++	     tls le relax:
++	      lu12i.w $rd, %le_hi20_r(sym)
++	      add.{w/d} $rd,$rd,$tp
++	      addi.{w/d} $rd,$rd,%le_lo12_r(sym)
++	      (st.{w/d}/ld.{w/d} $rs, $rd, 0)
++
++	     extreme (When the code model is set to extreme, the TLS le Relax
++	     instruction sequence is not generated):
++	      lu12i.w $rd, %le_hi20(sym)
++	      ori $rd, $rd, %le_lo12(sym)
++	      lu32i.d $rd, %le64_lo20(sym)
++	      lu52i.d $rd, $rd, %le64_hi12(sym)
++	      add.d $rd, $rd, $tp
++	      (st.{w/d}/ld.{w/d} $rs, $rd, 0)  */
++
+ 	  tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
+ 	  tmp1 = gen_reg_rtx (Pmode);
+ 	  dest = gen_reg_rtx (Pmode);
+@@ -3004,7 +3026,20 @@ loongarch_legitimize_tls_address (rtx loc)
+ 	      tmp3 = gen_reg_rtx (Pmode);
+ 	      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2));
+ 	      high = loongarch_force_temporary (tmp3, high);
+-	      emit_insn (gen_ori_l_lo12 (Pmode, tmp1, high, tmp2));
++
++	      /* The assembler does not implement tls le relax support when the
++		 code model is extreme, so when the code model is extreme, the
++		 old symbol address acquisition method is still used.  */
++	      if (HAVE_AS_TLS_LE_RELAXATION && !TARGET_CMODEL_EXTREME)
++		{
++		  emit_insn (gen_add_tls_le_relax (Pmode, dest, high,
++						   tp, loc));
++		  loongarch_emit_move (dest,
++				       gen_rtx_LO_SUM (Pmode, dest, tmp2));
++		  return dest;
++		}
++	      else
++		emit_insn (gen_ori_l_lo12 (Pmode, tmp1, high, tmp2));
+ 
+ 	      if (TARGET_CMODEL_EXTREME)
+ 		{
+@@ -5936,7 +5971,12 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part,
+ 	    gcc_unreachable ();
+ 	}
+       else
+-	reloc = hi_reloc ? "%le_hi20" : "%le_lo12";
++	{
++	  if (HAVE_AS_TLS_LE_RELAXATION && !TARGET_CMODEL_EXTREME)
++	    reloc = hi_reloc ? "%le_hi20_r" : "%le_lo12_r";
++	  else
++	    reloc = hi_reloc ? "%le_hi20" : "%le_lo12";
++	}
+       break;
+ 
+     case SYMBOL_TLSGD:
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 996df66e8..02c537d4c 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -73,6 +73,7 @@
+   UNSPEC_LOAD_FROM_GOT
+   UNSPEC_PCALAU12I
+   UNSPEC_PCALAU12I_GR
++  UNSPEC_ADD_TLS_LE_RELAX
+   UNSPEC_ORI_L_LO12
+   UNSPEC_LUI_L_HI20
+   UNSPEC_LUI_H_LO20
+@@ -2503,6 +2504,17 @@
+   "pcalau12i\t%0,%%pc_hi20(%1)"
+   [(set_attr "type" "move")])
+ 
++(define_insn "@add_tls_le_relax"
++  [(set (match_operand:P 0 "register_operand" "=r")
++	(unspec:P [(match_operand:P 1 "register_operand" "r")
++		   (match_operand:P 2 "register_operand" "r")
++		   (match_operand:P 3 "symbolic_operand")]
++	  UNSPEC_ADD_TLS_LE_RELAX))]
++  "HAVE_AS_TLS_LE_RELAXATION"
++  "add.\t%0,%1,%2,%%le_add_r(%3)"
++  [(set_attr "type" "move")]
++)
++
+ (define_insn "@ori_l_lo12"
+   [(set (match_operand:P 0 "register_operand" "=r")
+ 	(unspec:P [(match_operand:P 1 "register_operand" "r")
+diff --git a/gcc/configure b/gcc/configure
+index 5842e7a18..eecfe60d6 100755
+--- a/gcc/configure
++++ b/gcc/configure
+@@ -28968,6 +28968,37 @@ if test $gcc_cv_as_loongarch_cond_branch_relax = yes; then
+ 
+ $as_echo "#define HAVE_AS_COND_BRANCH_RELAXATION 1" >>confdefs.h
+ 
++fi
++
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for tls le relaxation support" >&5
++$as_echo_n "checking assembler for tls le relaxation support... " >&6; }
++if ${gcc_cv_as_loongarch_tls_le_relaxation_support+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  gcc_cv_as_loongarch_tls_le_relaxation_support=no
++  if test x$gcc_cv_as != x; then
++    $as_echo 'lu12i.w $t0,%le_hi20_r(a)' > conftest.s
++    if { ac_try='$gcc_cv_as $gcc_cv_as_flags  -o conftest.o conftest.s >&5'
++  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
++  (eval $ac_try) 2>&5
++  ac_status=$?
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; }
++    then
++	gcc_cv_as_loongarch_tls_le_relaxation_support=yes
++    else
++      echo "configure: failed program was" >&5
++      cat conftest.s >&5
++    fi
++    rm -f conftest.o conftest.s
++  fi
++fi
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_loongarch_tls_le_relaxation_support" >&5
++$as_echo "$gcc_cv_as_loongarch_tls_le_relaxation_support" >&6; }
++if test $gcc_cv_as_loongarch_tls_le_relaxation_support = yes; then
++
++$as_echo "#define HAVE_AS_TLS_LE_RELAXATION 1" >>confdefs.h
++
+ fi
+ 
+     ;;
+diff --git a/gcc/configure.ac b/gcc/configure.ac
+index 9c3fd3ad6..d1032440d 100644
+--- a/gcc/configure.ac
++++ b/gcc/configure.ac
+@@ -5357,6 +5357,11 @@ x:
+        beq $a0,$a1,a],,
+       [AC_DEFINE(HAVE_AS_COND_BRANCH_RELAXATION, 1,
+ 		[Define if your assembler supports conditional branch relaxation.])])
++    gcc_GAS_CHECK_FEATURE([tls le relaxation support],
++      gcc_cv_as_loongarch_tls_le_relaxation_support,,
++      [lu12i.w $t0,%le_hi20_r(a)],,
++      [AC_DEFINE(HAVE_AS_TLS_LE_RELAXATION, 1,
++	  [Define if your assembler supports tls le relocation.])])
+     ;;
+     s390*-*-*)
+     gcc_GAS_CHECK_FEATURE([.gnu_attribute support],
+diff --git a/gcc/testsuite/gcc.target/loongarch/tls-le-relax.c b/gcc/testsuite/gcc.target/loongarch/tls-le-relax.c
+new file mode 100644
+index 000000000..a9a404fc7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/tls-le-relax.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mcmodel=normal -mexplicit-relocs" } */
++/* { dg-final { scan-assembler "%le_add_r" { target tls_le_relax } } } */
++
++__attribute__ ((tls_model ("local-exec"))) __thread int a;
++
++void
++test (void)
++{
++  a = 10;
++}
++
+diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
+index b8bff1a31..20fbd43ee 100644
+--- a/gcc/testsuite/lib/target-supports.exp
++++ b/gcc/testsuite/lib/target-supports.exp
+@@ -10582,6 +10582,18 @@ proc check_effective_target_loongarch_call36_support { } {
+   } ""]
+ }
+ 
++# Returns 1 if binutils supports TLS le Relax, 0 otherwise.
++proc check_effective_target_tls_le_relax { } {
++  if [check_effective_target_tls_native] {
++    return [check_no_compiler_messages loongarch_tls_le_relax object {
++        /* Assembly code */
++   lu12i.w $r12, %le_hi20_r(a)
++    }]
++  }
++
++  return 0;
++}
++
+ # Return 1 if the target does *not* require strict alignment.
+ 
+ proc check_effective_target_non_strict_align {} {
+-- 
+2.43.0
+
diff --git a/0093-LoongArch-Provide-fmin-fmax-RTL-pattern-for-vectors.patch b/0093-LoongArch-Provide-fmin-fmax-RTL-pattern-for-vectors.patch
new file mode 100644
index 0000000000000000000000000000000000000000..db34f7ad43329bbc09df4366a6f3c67df97df8b7
--- /dev/null
+++ b/0093-LoongArch-Provide-fmin-fmax-RTL-pattern-for-vectors.patch
@@ -0,0 +1,112 @@
+From 97081ba053424e35b1869a00d6ac0e84362d09ea Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 30 Dec 2023 21:40:11 +0800
+Subject: [PATCH 093/188] LoongArch: Provide fmin/fmax RTL pattern for vectors
+
+We already had smin/smax RTL pattern using vfmin/vfmax instructions.
+But for smin/smax, it's unspecified what will happen if either operand
+contains any NaN operands.  So we would not vectorize the loop with
+-fno-finite-math-only (the default for all optimization levels expect
+-Ofast).
+
+But, LoongArch vfmin/vfmax instruction is IEEE-754-2008 conformant so we
+can also use them and vectorize the loop.
+
+gcc/ChangeLog:
+
+	* config/loongarch/simd.md (fmax3): New define_insn.
+	(fmin3): Likewise.
+	(reduc_fmax_scal_3): New define_expand.
+	(reduc_fmin_scal_3): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vfmax-vfmin.c: New test.
+---
+ gcc/config/loongarch/simd.md                  | 31 +++++++++++++++++++
+ .../gcc.target/loongarch/vfmax-vfmin.c        | 31 +++++++++++++++++++
+ 2 files changed, 62 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vfmax-vfmin.c
+
+diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
+index 93fb39abc..8ac1d75a8 100644
+--- a/gcc/config/loongarch/simd.md
++++ b/gcc/config/loongarch/simd.md
+@@ -426,6 +426,37 @@
+   [(set_attr "type" "simd_fcmp")
+    (set_attr "mode" "")])
+ 
++; [x]vf{min/max} instructions are IEEE-754-2008 conforming, use them for
++; the corresponding IEEE-754-2008 operations.  We must use UNSPEC instead
++; of smin/smax though, see PR105414 and PR107013.
++
++(define_int_iterator UNSPEC_FMAXMIN [UNSPEC_FMAX UNSPEC_FMIN])
++(define_int_attr fmaxmin [(UNSPEC_FMAX "fmax") (UNSPEC_FMIN "fmin")])
++
++(define_insn "3"
++  [(set (match_operand:FVEC 0 "register_operand" "=f")
++	(unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")
++		      (match_operand:FVEC 2 "register_operand" "f")]
++		     UNSPEC_FMAXMIN))]
++  ""
++  "v.\t%0,%1,%2"
++  [(set_attr "type" "simd_fminmax")
++   (set_attr "mode" "")])
++
++;; ... and also reduc operations.
++(define_expand "reduc__scal_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:FVEC 1 "register_operand")
++   (const_int UNSPEC_FMAXMIN)]
++  ""
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
+ ; The LoongArch SX Instructions.
+ (include "lsx.md")
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vfmax-vfmin.c b/gcc/testsuite/gcc.target/loongarch/vfmax-vfmin.c
+new file mode 100644
+index 000000000..811fee361
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vfmax-vfmin.c
+@@ -0,0 +1,31 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mtune=la464 -mlasx" } */
++/* { dg-final { scan-assembler "\tvfmin\\.d" } } */
++/* { dg-final { scan-assembler "\tvfmax\\.d" } } */
++/* { dg-final { scan-assembler "\txvfmin\\.d" } } */
++/* { dg-final { scan-assembler "\txvfmax\\.d" } } */
++/* { dg-final { scan-assembler "\tvfmin\\.s" } } */
++/* { dg-final { scan-assembler "\tvfmax\\.s" } } */
++/* { dg-final { scan-assembler "\txvfmin\\.s" } } */
++/* { dg-final { scan-assembler "\txvfmax\\.s" } } */
++
++#define T(OP) __typeof__ (__builtin_##OP (0, 0))
++
++#define TEST(OP, LEN) \
++void \
++test_##OP##LEN (T (OP) *restrict dest, \
++		const T (OP) *restrict src1, \
++		const T (OP) *restrict src2) \
++{ \
++  for (int i = 0; i < LEN / sizeof (T(OP)); i++) \
++    dest[i] = __builtin_##OP (src1[i], src2[i]); \
++}
++
++TEST(fmin, 16)
++TEST(fmax, 16)
++TEST(fmin, 32)
++TEST(fmax, 32)
++TEST(fminf, 16)
++TEST(fmaxf, 16)
++TEST(fminf, 32)
++TEST(fmaxf, 32)
+-- 
+2.43.0
+
diff --git a/0093-fix-bugs-within-pointer-compression-and-DFE.patch b/0093-fix-bugs-within-pointer-compression-and-DFE.patch
new file mode 100644
index 0000000000000000000000000000000000000000..64b35fef7c6f7749ca70078d8048902947ce3ee9
--- /dev/null
+++ b/0093-fix-bugs-within-pointer-compression-and-DFE.patch
@@ -0,0 +1,26 @@
+From 48724ee73cd58b67d59962ee4d56ac85db797e61 Mon Sep 17 00:00:00 2001
+From: tiancheng-bao 
+Date: Fri, 10 May 2024 17:52:27 +0800
+Subject: [PATCH 4/4] fix bugs within pointer compression and DFE
+
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 3 ---
+ 1 file changed, 3 deletions(-)
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index 2257d3528..1a169c635 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -7472,9 +7472,6 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 	    continue;
+ 	  tree lhs_expr = newlhs[i] ? newlhs[i] : lhs;
+ 	  tree rhs_expr = newrhs[i] ? newrhs[i] : rhs;
+-	  if (!useless_type_conversion_p (TREE_TYPE (lhs_expr),
+-					  TREE_TYPE (rhs_expr)))
+-	    rhs_expr = gimplify_build1 (gsi, NOP_EXPR, TREE_TYPE (lhs_expr), rhs_expr);  
+ 	  gimple *newstmt = gimple_build_assign (lhs_expr, rhs_expr);
+ 	  if (dump_file && (dump_flags & TDF_DETAILS))
+ 	    {
+-- 
+2.33.0
+
diff --git a/0094-BUGFIX-AutoBOLT-function-miss-bind-type.patch b/0094-BUGFIX-AutoBOLT-function-miss-bind-type.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4b0e6a5240a46fc07d1eec32030ec74fd2a0c200
--- /dev/null
+++ b/0094-BUGFIX-AutoBOLT-function-miss-bind-type.patch
@@ -0,0 +1,28 @@
+From 4861c3db991e947060de54a4d20c1a13747a6024 Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao_admin 
+Date: Wed, 15 May 2024 14:41:45 +0800
+Subject: [PATCH] [BUGFIX] AutoBOLT function miss bind type
+
+---
+ gcc/final.cc | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/final.cc b/gcc/final.cc
+index af4e529bb..c440846f7 100644
+--- a/gcc/final.cc
++++ b/gcc/final.cc
+@@ -4272,9 +4272,9 @@ leaf_renumber_regs_insn (rtx in_rtx)
+ 
+ #define ASM_FDO_CALLER_FLAG ".fdo.caller "
+ #define ASM_FDO_CALLER_SIZE_FLAG ".fdo.caller.size "
+-#define ASM_FDO_CALLER_BIND_FLAG ".fdo.caller.bind"
++#define ASM_FDO_CALLER_BIND_FLAG ".fdo.caller.bind "
+ 
+-#define ASM_FDO_CALLEE_FLAG ".fdo.callee"
++#define ASM_FDO_CALLEE_FLAG ".fdo.callee "
+ 
+ /* Return the relative offset address of the start instruction of BB,
+    return -1 if it is empty instruction.    */
+-- 
+2.33.0
+
diff --git a/0094-LoongArch-Merge-constant-vector-permuatation-impleme.patch b/0094-LoongArch-Merge-constant-vector-permuatation-impleme.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4c6ae006d49a21a97d7e16203e7649de8566cd64
--- /dev/null
+++ b/0094-LoongArch-Merge-constant-vector-permuatation-impleme.patch
@@ -0,0 +1,1484 @@
+From 06a6a571fd557b53f805d990dd1a40a2ab7c1e5c Mon Sep 17 00:00:00 2001
+From: Li Wei 
+Date: Thu, 28 Dec 2023 20:26:46 +0800
+Subject: [PATCH 094/188] LoongArch: Merge constant vector permuatation
+ implementations.
+
+There are currently two versions of the implementations of constant
+vector permutation: loongarch_expand_vec_perm_const_1 and
+loongarch_expand_vec_perm_const_2.  The implementations of the two
+versions are different. Currently, only the implementation of
+loongarch_expand_vec_perm_const_1 is used for 256-bit vectors.  We
+hope to streamline the code as much as possible while retaining the
+better-performing implementation of the two.  By repeatedly testing
+spec2006 and spec2017, we got the following Merged version.
+Compared with the pre-merger version, the number of lines of code
+in loongarch.cc has been reduced by 888 lines.  At the same time,
+the performance of SPECint2006 under Ofast has been improved by 0.97%,
+and the performance of SPEC2017 fprate has been improved by 0.27%.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_is_odd_extraction):
+	Remove useless forward declaration.
+	(loongarch_is_even_extraction): Remove useless forward declaration.
+	(loongarch_try_expand_lsx_vshuf_const): Removed.
+	(loongarch_expand_vec_perm_const_1): Merged.
+	(loongarch_is_double_duplicate): Removed.
+	(loongarch_is_center_extraction): Ditto.
+	(loongarch_is_reversing_permutation): Ditto.
+	(loongarch_is_di_misalign_extract): Ditto.
+	(loongarch_is_si_misalign_extract): Ditto.
+	(loongarch_is_lasx_lowpart_extract): Ditto.
+	(loongarch_is_op_reverse_perm): Ditto.
+	(loongarch_is_single_op_perm): Ditto.
+	(loongarch_is_divisible_perm): Ditto.
+	(loongarch_is_triple_stride_extract): Ditto.
+	(loongarch_expand_vec_perm_const_2): Merged.
+	(loongarch_expand_vec_perm_const): New.
+	(loongarch_vectorize_vec_perm_const): Adjust.
+---
+ gcc/config/loongarch/loongarch.cc | 1308 +++++------------------------
+ 1 file changed, 210 insertions(+), 1098 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index d1b1950dc..9d2374a46 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -8823,143 +8823,6 @@ loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
+     }
+ }
+ 
+-static bool
+-loongarch_is_odd_extraction (struct expand_vec_perm_d *);
+-
+-static bool
+-loongarch_is_even_extraction (struct expand_vec_perm_d *);
+-
+-static bool
+-loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
+-{
+-  int i;
+-  rtx target, op0, op1, sel, tmp;
+-  rtx rperm[MAX_VECT_LEN];
+-
+-  if (d->vmode == E_V2DImode || d->vmode == E_V2DFmode
+-	|| d->vmode == E_V4SImode || d->vmode == E_V4SFmode
+-	|| d->vmode == E_V8HImode || d->vmode == E_V16QImode)
+-    {
+-      target = d->target;
+-      op0 = d->op0;
+-      op1 = d->one_vector_p ? d->op0 : d->op1;
+-
+-      if (GET_MODE (op0) != GET_MODE (op1)
+-	  || GET_MODE (op0) != GET_MODE (target))
+-	return false;
+-
+-      if (d->testing_p)
+-	return true;
+-
+-      /* If match extract-even and extract-odd permutations pattern, use
+-       * vselect much better than vshuf.  */
+-      if (loongarch_is_odd_extraction (d)
+-	  || loongarch_is_even_extraction (d))
+-	{
+-	  if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1,
+-						d->perm, d->nelt))
+-	    return true;
+-
+-	  unsigned char perm2[MAX_VECT_LEN];
+-	  for (i = 0; i < d->nelt; ++i)
+-	    perm2[i] = (d->perm[i] + d->nelt) & (2 * d->nelt - 1);
+-
+-	  if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0,
+-						perm2, d->nelt))
+-	    return true;
+-	}
+-
+-      for (i = 0; i < d->nelt; i += 1)
+-	{
+-	  rperm[i] = GEN_INT (d->perm[i]);
+-	}
+-
+-      if (d->vmode == E_V2DFmode)
+-	{
+-	  sel = gen_rtx_CONST_VECTOR (E_V2DImode, gen_rtvec_v (d->nelt, rperm));
+-	  tmp = simplify_gen_subreg (E_V2DImode, d->target, d->vmode, 0);
+-	  emit_move_insn (tmp, sel);
+-	}
+-      else if (d->vmode == E_V4SFmode)
+-	{
+-	  sel = gen_rtx_CONST_VECTOR (E_V4SImode, gen_rtvec_v (d->nelt, rperm));
+-	  tmp = simplify_gen_subreg (E_V4SImode, d->target, d->vmode, 0);
+-	  emit_move_insn (tmp, sel);
+-	}
+-      else
+-	{
+-	  sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm));
+-	  emit_move_insn (d->target, sel);
+-	}
+-
+-      switch (d->vmode)
+-	{
+-	case E_V2DFmode:
+-	  emit_insn (gen_lsx_vshuf_d_f (target, target, op1, op0));
+-	  break;
+-	case E_V2DImode:
+-	  emit_insn (gen_lsx_vshuf_d (target, target, op1, op0));
+-	  break;
+-	case E_V4SFmode:
+-	  emit_insn (gen_lsx_vshuf_w_f (target, target, op1, op0));
+-	  break;
+-	case E_V4SImode:
+-	  emit_insn (gen_lsx_vshuf_w (target, target, op1, op0));
+-	  break;
+-	case E_V8HImode:
+-	  emit_insn (gen_lsx_vshuf_h (target, target, op1, op0));
+-	  break;
+-	case E_V16QImode:
+-	  emit_insn (gen_lsx_vshuf_b (target, op1, op0, target));
+-	  break;
+-	default:
+-	  break;
+-	}
+-
+-      return true;
+-    }
+-  return false;
+-}
+-
+-static bool
+-loongarch_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
+-{
+-  unsigned int i, nelt = d->nelt;
+-  unsigned char perm2[MAX_VECT_LEN];
+-
+-  if (d->one_vector_p)
+-    {
+-      /* Try interleave with alternating operands.  */
+-      memcpy (perm2, d->perm, sizeof (perm2));
+-      for (i = 1; i < nelt; i += 2)
+-	perm2[i] += nelt;
+-      if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2,
+-					    nelt))
+-	return true;
+-    }
+-  else
+-    {
+-      if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1,
+-					    d->perm, nelt))
+-	return true;
+-
+-      /* Try again with swapped operands.  */
+-      for (i = 0; i < nelt; ++i)
+-	perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1);
+-      if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2,
+-					    nelt))
+-	return true;
+-    }
+-
+-  if (loongarch_expand_lsx_shuffle (d))
+-    return true;
+-  if (loongarch_expand_vec_perm_even_odd (d))
+-    return true;
+-  if (loongarch_expand_vec_perm_interleave (d))
+-    return true;
+-  return false;
+-}
+-
+ /* Following are the assist function for const vector permutation support.  */
+ static bool
+ loongarch_is_quad_duplicate (struct expand_vec_perm_d *d)
+@@ -8991,36 +8854,6 @@ loongarch_is_quad_duplicate (struct expand_vec_perm_d *d)
+   return result;
+ }
+ 
+-static bool
+-loongarch_is_double_duplicate (struct expand_vec_perm_d *d)
+-{
+-  if (!d->one_vector_p)
+-    return false;
+-
+-  if (d->nelt < 8)
+-    return false;
+-
+-  bool result = true;
+-  unsigned char buf = d->perm[0];
+-
+-  for (int i = 1; i < d->nelt; i += 2)
+-    {
+-      if (d->perm[i] != buf)
+-	{
+-	  result = false;
+-	  break;
+-	}
+-      if (d->perm[i - 1] != d->perm[i])
+-	{
+-	  result = false;
+-	  break;
+-	}
+-      buf += d->nelt / 4;
+-    }
+-
+-  return result;
+-}
+-
+ static bool
+ loongarch_is_odd_extraction (struct expand_vec_perm_d *d)
+ {
+@@ -9081,110 +8914,6 @@ loongarch_is_extraction_permutation (struct expand_vec_perm_d *d)
+   return result;
+ }
+ 
+-static bool
+-loongarch_is_center_extraction (struct expand_vec_perm_d *d)
+-{
+-  bool result = true;
+-  unsigned buf = d->nelt / 2;
+-
+-  for (int i = 0; i < d->nelt; i += 1)
+-    {
+-      if (buf != d->perm[i])
+-	{
+-	  result = false;
+-	  break;
+-	}
+-      buf += 1;
+-    }
+-
+-  return result;
+-}
+-
+-static bool
+-loongarch_is_reversing_permutation (struct expand_vec_perm_d *d)
+-{
+-  if (!d->one_vector_p)
+-    return false;
+-
+-  bool result = true;
+-  unsigned char buf = d->nelt - 1;
+-
+-  for (int i = 0; i < d->nelt; i += 1)
+-    {
+-      if (d->perm[i] != buf)
+-	{
+-	  result = false;
+-	  break;
+-	}
+-
+-      buf -= 1;
+-    }
+-
+-  return result;
+-}
+-
+-static bool
+-loongarch_is_di_misalign_extract (struct expand_vec_perm_d *d)
+-{
+-  if (d->nelt != 4 && d->nelt != 8)
+-    return false;
+-
+-  bool result = true;
+-  unsigned char buf;
+-
+-  if (d->nelt == 4)
+-    {
+-      buf = 1;
+-      for (int i = 0; i < d->nelt; i += 1)
+-	{
+-	  if (buf != d->perm[i])
+-	    {
+-	      result = false;
+-	      break;
+-	    }
+-
+-	  buf += 1;
+-	}
+-    }
+-  else if (d->nelt == 8)
+-    {
+-      buf = 2;
+-      for (int i = 0; i < d->nelt; i += 1)
+-	{
+-	  if (buf != d->perm[i])
+-	    {
+-	      result = false;
+-	      break;
+-	    }
+-
+-	  buf += 1;
+-	}
+-    }
+-
+-  return result;
+-}
+-
+-static bool
+-loongarch_is_si_misalign_extract (struct expand_vec_perm_d *d)
+-{
+-  if (d->vmode != E_V8SImode && d->vmode != E_V8SFmode)
+-    return false;
+-  bool result = true;
+-  unsigned char buf = 1;
+-
+-  for (int i = 0; i < d->nelt; i += 1)
+-    {
+-      if (buf != d->perm[i])
+-	{
+-	  result = false;
+-	  break;
+-	}
+-      buf += 1;
+-    }
+-
+-  return result;
+-}
+-
+ static bool
+ loongarch_is_lasx_lowpart_interleave (struct expand_vec_perm_d *d)
+ {
+@@ -9247,39 +8976,6 @@ loongarch_is_lasx_lowpart_interleave_2 (struct expand_vec_perm_d *d)
+   return result;
+ }
+ 
+-static bool
+-loongarch_is_lasx_lowpart_extract (struct expand_vec_perm_d *d)
+-{
+-  bool result = true;
+-  unsigned char buf = 0;
+-
+-  for (int i = 0; i < d->nelt / 2; i += 1)
+-    {
+-      if (buf != d->perm[i])
+-	{
+-	  result = false;
+-	  break;
+-	}
+-      buf += 1;
+-    }
+-
+-  if (result)
+-    {
+-      buf = d->nelt;
+-      for (int i = d->nelt / 2; i < d->nelt; i += 1)
+-	{
+-	  if (buf != d->perm[i])
+-	    {
+-	      result = false;
+-	      break;
+-	    }
+-	  buf += 1;
+-	}
+-    }
+-
+-  return result;
+-}
+-
+ static bool
+ loongarch_is_lasx_highpart_interleave (expand_vec_perm_d *d)
+ {
+@@ -9361,538 +9057,195 @@ loongarch_is_elem_duplicate (struct expand_vec_perm_d *d)
+   return result;
+ }
+ 
+-inline bool
+-loongarch_is_op_reverse_perm (struct expand_vec_perm_d *d)
+-{
+-  return (d->vmode == E_V4DFmode)
+-    && d->perm[0] == 2 && d->perm[1] == 3
+-    && d->perm[2] == 0 && d->perm[3] == 1;
+-}
++/* In LASX, some permutation insn does not have the behavior that gcc expects
++   when compiler wants to emit a vector permutation.
++
++   1.  What GCC provides via vectorize_vec_perm_const ()'s paramater:
++   When GCC wants to performs a vector permutation, it provides two op
++   reigster, one target register, and a selector.
++   In const vector permutation case, GCC provides selector as a char array
++   that contains original value; in variable vector permuatation
++   (performs via vec_perm insn template), it provides a vector register.
++   We assume that nelt is the elements numbers inside single vector in current
++   256bit vector mode.
++
++   2.  What GCC expects to perform:
++   Two op registers (op0, op1) will "combine" into a 512bit temp vector storage
++   that has 2*nelt elements inside it; the low 256bit is op0, and high 256bit
++   is op1, then the elements are indexed as below:
++   0 ~ nelt - 1		nelt ~ 2 * nelt - 1
++   |-------------------------|-------------------------|
++   Low 256bit (op0)	High 256bit (op1)
++   For example, the second element in op1 (V8SImode) will be indexed with 9.
++   Selector is a vector that has the same mode and number of elements  with
++   op0,op1 and target, it's look like this:
++   0 ~ nelt - 1
++   |-------------------------|
++   256bit (selector)
++   It describes which element from 512bit temp vector storage will fit into
++   target's every element slot.
++   GCC expects that every element in selector can be ANY indices of 512bit
++   vector storage (Selector can pick literally any element from op0 and op1, and
++   then fits into any place of target register). This is also what LSX 128bit
++   vshuf.* instruction do similarly, so we can handle 128bit vector permutation
++   by single instruction easily.
++
++   3.  What LASX permutation instruction does:
++   In short, it just execute two independent 128bit vector permuatation, and
++   it's the reason that we need to do the jobs below.  We will explain it.
++   op0, op1, target, and selector will be separate into high 128bit and low
++   128bit, and do permutation as the description below:
++
++   a) op0's low 128bit and op1's low 128bit "combines" into a 256bit temp
++   vector storage (TVS1), elements are indexed as below:
++   0 ~ nelt / 2 - 1	  nelt / 2 ~ nelt - 1
++   |---------------------|---------------------| TVS1
++   op0's low 128bit      op1's low 128bit
++   op0's high 128bit and op1's high 128bit are "combined" into TVS2 in the
++   same way.
++   0 ~ nelt / 2 - 1	  nelt / 2 ~ nelt - 1
++   |---------------------|---------------------| TVS2
++   op0's high 128bit	op1's high 128bit
++   b) Selector's low 128bit describes which elements from TVS1 will fit into
++   target vector's low 128bit.  No TVS2 elements are allowed.
++   c) Selector's high 128bit describes which elements from TVS2 will fit into
++   target vector's high 128bit.  No TVS1 elements are allowed.
++
++   As we can see, if we want to handle vector permutation correctly, we can
++   achieve it in three ways:
++   a) Modify selector's elements, to make sure that every elements can inform
++   correct value that will put into target vector.
++   b) Generate extra instruction before/after permutation instruction, for
++   adjusting op vector or target vector, to make sure target vector's value is
++   what GCC expects.
++   c) Use other instructions to process op and put correct result into target.
++   */
++
++/* Implementation of constant vector permuatation.  This function identifies
++   recognized pattern of permuation selector argument, and use one or more
++   instruction (s) to finish the permutation job correctly.  For unsupported
++   patterns, it will return false.  */
+ 
+ static bool
+-loongarch_is_single_op_perm (struct expand_vec_perm_d *d)
++loongarch_expand_vec_perm_const (struct expand_vec_perm_d *d)
+ {
+-  bool result = true;
++  bool flag = false;
++  unsigned int i;
++  unsigned char idx;
++  rtx target, op0, op1, sel, tmp;
++  rtx rperm[MAX_VECT_LEN];
++  unsigned int remapped[MAX_VECT_LEN];
++  unsigned char perm2[MAX_VECT_LEN];
+ 
+-  for (int i = 0; i < d->nelt; i += 1)
++  if (GET_MODE_SIZE (d->vmode) == 16)
++    return loongarch_expand_lsx_shuffle (d);
++  else
+     {
+-      if (d->perm[i] >= d->nelt)
++      if (d->one_vector_p)
+ 	{
+-	  result = false;
+-	  break;
++	  /* Try interleave with alternating operands.  */
++	  memcpy (perm2, d->perm, sizeof (perm2));
++	  for (i = 1; i < d->nelt; i += 2)
++	    perm2[i] += d->nelt;
++	  if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1,
++						perm2, d->nelt))
++	    return true;
+ 	}
+-    }
+-
+-  return result;
+-}
+-
+-static bool
+-loongarch_is_divisible_perm (struct expand_vec_perm_d *d)
+-{
+-  bool result = true;
+-
+-  for (int i = 0; i < d->nelt / 2; i += 1)
+-    {
+-      if (d->perm[i] >= d->nelt)
++      else
+ 	{
+-	  result = false;
+-	  break;
+-	}
+-    }
+-
+-  if (result)
+-    {
+-      for (int i = d->nelt / 2; i < d->nelt; i += 1)
+-	{
+-	  if (d->perm[i] < d->nelt)
+-	    {
+-	      result = false;
+-	      break;
+-	    }
+-	}
+-    }
+-
+-  return result;
+-}
+-
+-inline bool
+-loongarch_is_triple_stride_extract (struct expand_vec_perm_d *d)
+-{
+-  return (d->vmode == E_V4DImode || d->vmode == E_V4DFmode)
+-    && d->perm[0] == 1 && d->perm[1] == 4
+-    && d->perm[2] == 7 && d->perm[3] == 0;
+-}
+-
+-/* In LASX, some permutation insn does not have the behavior that gcc expects
+- * when compiler wants to emit a vector permutation.
+- *
+- * 1. What GCC provides via vectorize_vec_perm_const ()'s paramater:
+- * When GCC wants to performs a vector permutation, it provides two op
+- * reigster, one target register, and a selector.
+- * In const vector permutation case, GCC provides selector as a char array
+- * that contains original value; in variable vector permuatation
+- * (performs via vec_perm insn template), it provides a vector register.
+- * We assume that nelt is the elements numbers inside single vector in current
+- * 256bit vector mode.
+- *
+- * 2. What GCC expects to perform:
+- * Two op registers (op0, op1) will "combine" into a 512bit temp vector storage
+- * that has 2*nelt elements inside it; the low 256bit is op0, and high 256bit
+- * is op1, then the elements are indexed as below:
+- *		  0 ~ nelt - 1		nelt ~ 2 * nelt - 1
+- *	  |-------------------------|-------------------------|
+- *		Low 256bit (op0)	High 256bit (op1)
+- * For example, the second element in op1 (V8SImode) will be indexed with 9.
+- * Selector is a vector that has the same mode and number of elements  with
+- * op0,op1 and target, it's look like this:
+- *	      0 ~ nelt - 1
+- *	  |-------------------------|
+- *	      256bit (selector)
+- * It describes which element from 512bit temp vector storage will fit into
+- * target's every element slot.
+- * GCC expects that every element in selector can be ANY indices of 512bit
+- * vector storage (Selector can pick literally any element from op0 and op1, and
+- * then fits into any place of target register). This is also what LSX 128bit
+- * vshuf.* instruction do similarly, so we can handle 128bit vector permutation
+- * by single instruction easily.
+- *
+- * 3. What LASX permutation instruction does:
+- * In short, it just execute two independent 128bit vector permuatation, and
+- * it's the reason that we need to do the jobs below.  We will explain it.
+- * op0, op1, target, and selector will be separate into high 128bit and low
+- * 128bit, and do permutation as the description below:
+- *
+- *  a) op0's low 128bit and op1's low 128bit "combines" into a 256bit temp
+- * vector storage (TVS1), elements are indexed as below:
+- *	    0 ~ nelt / 2 - 1	  nelt / 2 ~ nelt - 1
+- *	|---------------------|---------------------| TVS1
+- *	    op0's low 128bit      op1's low 128bit
+- *    op0's high 128bit and op1's high 128bit are "combined" into TVS2 in the
+- *    same way.
+- *	    0 ~ nelt / 2 - 1	  nelt / 2 ~ nelt - 1
+- *	|---------------------|---------------------| TVS2
+- *	    op0's high 128bit	op1's high 128bit
+- *  b) Selector's low 128bit describes which elements from TVS1 will fit into
+- *  target vector's low 128bit.  No TVS2 elements are allowed.
+- *  c) Selector's high 128bit describes which elements from TVS2 will fit into
+- *  target vector's high 128bit.  No TVS1 elements are allowed.
+- *
+- * As we can see, if we want to handle vector permutation correctly, we can
+- * achieve it in three ways:
+- *  a) Modify selector's elements, to make sure that every elements can inform
+- *  correct value that will put into target vector.
+-    b) Generate extra instruction before/after permutation instruction, for
+-    adjusting op vector or target vector, to make sure target vector's value is
+-    what GCC expects.
+-    c) Use other instructions to process op and put correct result into target.
+- */
+-
+-/* Implementation of constant vector permuatation.  This function identifies
+- * recognized pattern of permuation selector argument, and use one or more
+- * instruction(s) to finish the permutation job correctly.  For unsupported
+- * patterns, it will return false.  */
+-
+-static bool
+-loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+-{
+-  /* Although we have the LSX vec_perm template, there's still some
+-     128bit vector permuatation operations send to vectorize_vec_perm_const.
+-     In this case, we just simpliy wrap them by single vshuf.* instruction,
+-     because LSX vshuf.* instruction just have the same behavior that GCC
+-     expects.  */
+-  if (GET_MODE_SIZE (d->vmode) == 16)
+-    return loongarch_try_expand_lsx_vshuf_const (d);
+-  else
+-    return false;
+-
+-  bool ok = false, reverse_hi_lo = false, extract_ev_od = false,
+-       use_alt_op = false;
+-  unsigned char idx;
+-  int i;
+-  rtx target, op0, op1, sel, tmp;
+-  rtx op0_alt = NULL_RTX, op1_alt = NULL_RTX;
+-  rtx rperm[MAX_VECT_LEN];
+-  unsigned int remapped[MAX_VECT_LEN];
+-
+-  /* Try to figure out whether is a recognized permutation selector pattern, if
+-     yes, we will reassign some elements with new value in selector argument,
+-     and in some cases we will generate some assist insn to complete the
+-     permutation. (Even in some cases, we use other insn to impl permutation
+-     instead of xvshuf!)
++	  if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1,
++						d->perm, d->nelt))
++	    return true;
+ 
+-     Make sure to check d->testing_p is false everytime if you want to emit new
+-     insn, unless you want to crash into ICE directly.  */
+-  if (loongarch_is_quad_duplicate (d))
+-    {
+-      /* Selector example: E_V8SImode, { 0, 0, 0, 0, 4, 4, 4, 4 }
+-	 copy first elem from original selector to all elem in new selector.  */
+-      idx = d->perm[0];
+-      for (i = 0; i < d->nelt; i += 1)
+-	{
+-	  remapped[i] = idx;
+-	}
+-      /* Selector after: { 0, 0, 0, 0, 0, 0, 0, 0 }.  */
+-    }
+-  else if (loongarch_is_double_duplicate (d))
+-    {
+-      /* Selector example: E_V8SImode, { 1, 1, 3, 3, 5, 5, 7, 7 }
+-	 one_vector_p == true.  */
+-      for (i = 0; i < d->nelt / 2; i += 1)
+-	{
+-	  idx = d->perm[i];
+-	  remapped[i] = idx;
+-	  remapped[i + d->nelt / 2] = idx;
++	  /* Try again with swapped operands.  */
++	  for (i = 0; i < d->nelt; ++i)
++	    perm2[i] = (d->perm[i] + d->nelt) & (2 * d->nelt - 1);
++	  if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0,
++						perm2, d->nelt))
++	    return true;
+ 	}
+-      /* Selector after: { 1, 1, 3, 3, 1, 1, 3, 3 }.  */
+-    }
+-  else if (loongarch_is_odd_extraction (d)
+-	   || loongarch_is_even_extraction (d))
+-    {
+-      /* Odd extraction selector sample: E_V4DImode, { 1, 3, 5, 7 }
+-	 Selector after: { 1, 3, 1, 3 }.
+-	 Even extraction selector sample: E_V4DImode, { 0, 2, 4, 6 }
+-	 Selector after: { 0, 2, 0, 2 }.  */
+ 
+-      /* Better implement of extract-even and extract-odd permutations.  */
+-      if (loongarch_expand_vec_perm_even_odd (d))
++      if (loongarch_expand_lsx_shuffle (d))
+ 	return true;
+ 
+-      for (i = 0; i < d->nelt / 2; i += 1)
+-	{
+-	  idx = d->perm[i];
+-	  remapped[i] = idx;
+-	  remapped[i + d->nelt / 2] = idx;
+-	}
+-      /* Additional insn is required for correct result.  See codes below.  */
+-      extract_ev_od = true;
+-    }
+-  else if (loongarch_is_extraction_permutation (d))
+-    {
+-      /* Selector sample: E_V8SImode, { 0, 1, 2, 3, 4, 5, 6, 7 }.  */
+-      if (d->perm[0] == 0)
++      if (loongarch_is_odd_extraction (d)
++	  || loongarch_is_even_extraction (d))
+ 	{
+-	  for (i = 0; i < d->nelt / 2; i += 1)
+-	    {
+-	      remapped[i] = i;
+-	      remapped[i + d->nelt / 2] = i;
+-	    }
++	  if (loongarch_expand_vec_perm_even_odd (d))
++	    return true;
+ 	}
+-      else
++
++      if (loongarch_is_lasx_lowpart_interleave (d)
++	  || loongarch_is_lasx_lowpart_interleave_2 (d)
++	  || loongarch_is_lasx_highpart_interleave (d)
++	  || loongarch_is_lasx_highpart_interleave_2 (d))
+ 	{
+-	  /* { 8, 9, 10, 11, 12, 13, 14, 15 }.  */
+-	  for (i = 0; i < d->nelt / 2; i += 1)
+-	    {
+-	      idx = i + d->nelt / 2;
+-	      remapped[i] = idx;
+-	      remapped[i + d->nelt / 2] = idx;
+-	    }
++	  if (loongarch_expand_vec_perm_interleave (d))
++	    return true;
+ 	}
+-      /* Selector after: { 0, 1, 2, 3, 0, 1, 2, 3 }
+-	 { 8, 9, 10, 11, 8, 9, 10, 11 }  */
+-    }
+-  else if (loongarch_is_center_extraction (d))
+-    {
+-      /* sample: E_V4DImode, { 2, 3, 4, 5 }
+-	 In this condition, we can just copy high 128bit of op0 and low 128bit
+-	 of op1 to the target register by using xvpermi.q insn.  */
+-      if (!d->testing_p)
++
++      if (loongarch_is_quad_duplicate (d))
+ 	{
+-	  emit_move_insn (d->target, d->op1);
+-	  switch (d->vmode)
++	  if (d->testing_p)
++	    return true;
++	  /* Selector example: E_V8SImode, { 0, 0, 0, 0, 4, 4, 4, 4 }.  */
++	  for (i = 0; i < d->nelt; i += 1)
+ 	    {
+-	      case E_V4DImode:
+-		emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target,
+-						    d->op0, GEN_INT (0x21)));
+-		break;
+-	      case E_V4DFmode:
+-		emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target,
+-						    d->op0, GEN_INT (0x21)));
+-		break;
+-	      case E_V8SImode:
+-		emit_insn (gen_lasx_xvpermi_q_v8si (d->target, d->target,
+-						    d->op0, GEN_INT (0x21)));
+-		break;
+-	      case E_V8SFmode:
+-		emit_insn (gen_lasx_xvpermi_q_v8sf (d->target, d->target,
+-						    d->op0, GEN_INT (0x21)));
+-		break;
+-	      case E_V16HImode:
+-		emit_insn (gen_lasx_xvpermi_q_v16hi (d->target, d->target,
+-						     d->op0, GEN_INT (0x21)));
+-		break;
+-	      case E_V32QImode:
+-		emit_insn (gen_lasx_xvpermi_q_v32qi (d->target, d->target,
+-						     d->op0, GEN_INT (0x21)));
+-		break;
+-	      default:
+-		break;
++	      rperm[i] = GEN_INT (d->perm[0]);
+ 	    }
++	  /* Selector after: { 0, 0, 0, 0, 0, 0, 0, 0 }.  */
++	  flag = true;
++	  goto expand_perm_const_end;
+ 	}
+-      ok = true;
+-      /* Finish the funtion directly.  */
+-      goto expand_perm_const_2_end;
+-    }
+-  else if (loongarch_is_reversing_permutation (d))
+-    {
+-      /* Selector sample: E_V8SImode, { 7, 6, 5, 4, 3, 2, 1, 0 }
+-	 one_vector_p == true  */
+-      idx = d->nelt / 2 - 1;
+-      for (i = 0; i < d->nelt / 2; i += 1)
+-	{
+-	  remapped[i] = idx;
+-	  remapped[i + d->nelt / 2] = idx;
+-	  idx -= 1;
+-	}
+-      /* Selector after: { 3, 2, 1, 0, 3, 2, 1, 0 }
+-	 Additional insn will be generated to swap hi and lo 128bit of target
+-	 register.  */
+-      reverse_hi_lo = true;
+-    }
+-  else if (loongarch_is_di_misalign_extract (d)
+-	   || loongarch_is_si_misalign_extract (d))
+-    {
+-      /* Selector Sample:
+-	 DI misalign: E_V4DImode, { 1, 2, 3, 4 }
+-	 SI misalign: E_V8SImode, { 1, 2, 3, 4, 5, 6, 7, 8 }  */
+-      if (!d->testing_p)
+-	{
+-	  /* Copy original op0/op1 value to new temp register.
+-	     In some cases, operand register may be used in multiple place, so
+-	     we need new regiter instead modify original one, to avoid runtime
+-	     crashing or wrong value after execution.  */
+-	  use_alt_op = true;
+-	  op1_alt = gen_reg_rtx (d->vmode);
+-	  emit_move_insn (op1_alt, d->op1);
+-
+-	  /* Adjust op1 for selecting correct value in high 128bit of target
+-	     register.
+-	     op1: E_V4DImode, { 4, 5, 6, 7 } -> { 2, 3, 4, 5 }.  */
+-	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0);
+-	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0);
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
+-					      conv_op0, GEN_INT (0x21)));
+ 
+-	  for (i = 0; i < d->nelt / 2; i += 1)
+-	    {
+-	      remapped[i] = d->perm[i];
+-	      remapped[i + d->nelt / 2] = d->perm[i];
+-	    }
+-	  /* Selector after:
+-	     DI misalign: { 1, 2, 1, 2 }
+-	     SI misalign: { 1, 2, 3, 4, 1, 2, 3, 4 }  */
+-	}
+-    }
+-  else if (loongarch_is_lasx_lowpart_interleave (d))
+-    {
+-      /* Elements from op0's low 18bit and op1's 128bit are inserted into
+-	 target register alternately.
+-	 sample: E_V4DImode, { 0, 4, 1, 5 }  */
+-      if (!d->testing_p)
+-	{
+-	  /* Prepare temp register instead of modify original op.  */
+-	  use_alt_op = true;
+-	  op1_alt = gen_reg_rtx (d->vmode);
+-	  op0_alt = gen_reg_rtx (d->vmode);
+-	  emit_move_insn (op1_alt, d->op1);
+-	  emit_move_insn (op0_alt, d->op0);
+-
+-	  /* Generate subreg for fitting into insn gen function.  */
+-	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0);
+-	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0);
+-
+-	  /* Adjust op value in temp register.
+-	     op0 = {0,1,2,3}, op1 = {4,5,0,1}  */
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
+-					      conv_op0, GEN_INT (0x02)));
+-	  /* op0 = {0,1,4,5}, op1 = {4,5,0,1}  */
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0,
+-					      conv_op1, GEN_INT (0x01)));
+-
+-	  /* Remap indices in selector based on the location of index inside
+-	     selector, and vector element numbers in current vector mode.  */
+-
+-	  /* Filling low 128bit of new selector.  */
+-	  for (i = 0; i < d->nelt / 2; i += 1)
+-	    {
+-	      /* value in odd-indexed slot of low 128bit part of selector
+-		 vector.  */
+-	      remapped[i] = i % 2 != 0 ? d->perm[i] - d->nelt / 2 : d->perm[i];
+-	    }
+-	  /* Then filling the high 128bit.  */
+-	  for (i = d->nelt / 2; i < d->nelt; i += 1)
++      if (loongarch_is_extraction_permutation (d))
++	{
++	  if (d->testing_p)
++	    return true;
++	  /* Selector sample: E_V8SImode, { 0, 1, 2, 3, 4, 5, 6, 7 }.  */
++	  if (d->perm[0] == 0)
+ 	    {
+-	      /* value in even-indexed slot of high 128bit part of
+-		 selector vector.  */
+-	      remapped[i] = i % 2 == 0
+-		? d->perm[i] + (d->nelt / 2) * 3 : d->perm[i];
++	      for (i = 0; i < d->nelt / 2; i += 1)
++		{
++		  remapped[i] = i;
++		  remapped[i + d->nelt / 2] = i;
++		}
+ 	    }
+-	}
+-    }
+-  else if (loongarch_is_lasx_lowpart_interleave_2 (d))
+-    {
+-      /* Special lowpart interleave case in V32QI vector mode.  It does the same
+-	 thing as we can see in if branch that above this line.
+-	 Selector sample: E_V32QImode,
+-	 {0, 1, 2, 3, 4, 5, 6, 7, 32, 33, 34, 35, 36, 37, 38, 39, 8,
+-	 9, 10, 11, 12, 13, 14, 15, 40, 41, 42, 43, 44, 45, 46, 47}  */
+-      if (!d->testing_p)
+-	{
+-	  /* Solution for this case in very simple - covert op into V4DI mode,
+-	     and do same thing as previous if branch.  */
+-	  op1_alt = gen_reg_rtx (d->vmode);
+-	  op0_alt = gen_reg_rtx (d->vmode);
+-	  emit_move_insn (op1_alt, d->op1);
+-	  emit_move_insn (op0_alt, d->op0);
+-
+-	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0);
+-	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0);
+-	  rtx conv_target = simplify_gen_subreg (E_V4DImode, d->target,
+-						 d->vmode, 0);
+-
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
+-					      conv_op0, GEN_INT (0x02)));
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0,
+-					      conv_op1, GEN_INT (0x01)));
+-	  remapped[0] = 0;
+-	  remapped[1] = 4;
+-	  remapped[2] = 1;
+-	  remapped[3] = 5;
+-
+-	  for (i = 0; i < d->nelt; i += 1)
++	  else
+ 	    {
+-	      rperm[i] = GEN_INT (remapped[i]);
++	      /* { 8, 9, 10, 11, 12, 13, 14, 15 }.  */
++	      for (i = 0; i < d->nelt / 2; i += 1)
++		{
++		  idx = i + d->nelt / 2;
++		  remapped[i] = idx;
++		  remapped[i + d->nelt / 2] = idx;
++		}
+ 	    }
++	  /* Selector after: { 0, 1, 2, 3, 0, 1, 2, 3 }
++	     { 8, 9, 10, 11, 8, 9, 10, 11 }  */
+ 
+-	  sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (4, rperm));
+-	  sel = force_reg (E_V4DImode, sel);
+-	  emit_insn (gen_lasx_xvshuf_d (conv_target, sel,
+-					conv_op1, conv_op0));
+-	}
+-
+-      ok = true;
+-      goto expand_perm_const_2_end;
+-    }
+-  else if (loongarch_is_lasx_lowpart_extract (d))
+-    {
+-      /* Copy op0's low 128bit to target's low 128bit, and copy op1's low
+-	 128bit to target's high 128bit.
+-	 Selector sample: E_V4DImode, { 0, 1, 4 ,5 }  */
+-      if (!d->testing_p)
+-	{
+-	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, d->op1, d->vmode, 0);
+-	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0);
+-	  rtx conv_target = simplify_gen_subreg (E_V4DImode, d->target,
+-						 d->vmode, 0);
+-
+-	  /* We can achieve the expectation by using sinple xvpermi.q insn.  */
+-	  emit_move_insn (conv_target, conv_op1);
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_target, conv_target,
+-					      conv_op0, GEN_INT (0x20)));
+-	}
+-
+-      ok = true;
+-      goto expand_perm_const_2_end;
+-    }
+-  else if (loongarch_is_lasx_highpart_interleave (d))
+-    {
+-      /* Similar to lowpart interleave, elements from op0's high 128bit and
+-	 op1's high 128bit are inserted into target regiter alternately.
+-	 Selector sample: E_V8SImode, { 4, 12, 5, 13, 6, 14, 7, 15 }  */
+-      if (!d->testing_p)
+-	{
+-	  /* Prepare temp op register.  */
+-	  use_alt_op = true;
+-	  op1_alt = gen_reg_rtx (d->vmode);
+-	  op0_alt = gen_reg_rtx (d->vmode);
+-	  emit_move_insn (op1_alt, d->op1);
+-	  emit_move_insn (op0_alt, d->op0);
+-
+-	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0);
+-	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0);
+-	  /* Adjust op value in temp regiter.
+-	     op0 = { 0, 1, 2, 3 }, op1 = { 6, 7, 2, 3 }  */
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
+-					      conv_op0, GEN_INT (0x13)));
+-	  /* op0 = { 2, 3, 6, 7 }, op1 = { 6, 7, 2, 3 }  */
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0,
+-					      conv_op1, GEN_INT (0x01)));
+-	  /* Remap indices in selector based on the location of index inside
+-	     selector, and vector element numbers in current vector mode.  */
+-
+-	  /* Filling low 128bit of new selector.  */
+-	 for (i = 0; i < d->nelt / 2; i += 1)
+-	   {
+-	     /* value in even-indexed slot of low 128bit part of selector
+-		vector.  */
+-	     remapped[i] = i % 2 == 0 ? d->perm[i] - d->nelt / 2 : d->perm[i];
+-	   }
+-	  /* Then filling the high 128bit.  */
+-	 for (i = d->nelt / 2; i < d->nelt; i += 1)
+-	   {
+-	     /* value in odd-indexed slot of high 128bit part of selector
+-		vector.  */
+-	      remapped[i] = i % 2 != 0
+-		? d->perm[i] - (d->nelt / 2) * 3 : d->perm[i];
+-	   }
+-	}
+-    }
+-  else if (loongarch_is_lasx_highpart_interleave_2 (d))
+-    {
+-      /* Special highpart interleave case in V32QI vector mode.  It does the
+-	 same thing as the normal version above.
+-	 Selector sample: E_V32QImode,
+-	 {16, 17, 18, 19, 20, 21, 22, 23, 48, 49, 50, 51, 52, 53, 54, 55,
+-	 24, 25, 26, 27, 28, 29, 30, 31, 56, 57, 58, 59, 60, 61, 62, 63}
+-      */
+-      if (!d->testing_p)
+-	{
+-	  /* Convert op into V4DImode and do the things.  */
+-	  op1_alt = gen_reg_rtx (d->vmode);
+-	  op0_alt = gen_reg_rtx (d->vmode);
+-	  emit_move_insn (op1_alt, d->op1);
+-	  emit_move_insn (op0_alt, d->op0);
+-
+-	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, op1_alt, d->vmode, 0);
+-	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, op0_alt, d->vmode, 0);
+-	  rtx conv_target = simplify_gen_subreg (E_V4DImode, d->target,
+-						 d->vmode, 0);
+-
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
+-					      conv_op0, GEN_INT (0x13)));
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0,
+-					      conv_op1, GEN_INT (0x01)));
+-	  remapped[0] = 2;
+-	  remapped[1] = 6;
+-	  remapped[2] = 3;
+-	  remapped[3] = 7;
+-
++	  /* Convert remapped selector array to RTL array.  */
+ 	  for (i = 0; i < d->nelt; i += 1)
+ 	    {
+ 	      rperm[i] = GEN_INT (remapped[i]);
+ 	    }
+ 
+-	  sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (4, rperm));
+-	  sel = force_reg (E_V4DImode, sel);
+-	  emit_insn (gen_lasx_xvshuf_d (conv_target, sel,
+-					conv_op1, conv_op0));
++	  flag = true;
++	  goto expand_perm_const_end;
+ 	}
+ 
+-	ok = true;
+-	goto expand_perm_const_2_end;
+-    }
+-  else if (loongarch_is_elem_duplicate (d))
+-    {
+-      /* Brocast single element (from op0 or op1) to all slot of target
+-	 register.
+-	 Selector sample:E_V8SImode, { 2, 2, 2, 2, 2, 2, 2, 2 }  */
+-      if (!d->testing_p)
++      if (loongarch_is_elem_duplicate (d))
+ 	{
++	  if (d->testing_p)
++	    return true;
++	  /* Brocast single element (from op0 or op1) to all slot of target
++	     register.
++	     Selector sample:E_V8SImode, { 2, 2, 2, 2, 2, 2, 2, 2 }  */
+ 	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, d->op1, d->vmode, 0);
+ 	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0);
+ 	  rtx temp_reg = gen_reg_rtx (d->vmode);
+ 	  rtx conv_temp = simplify_gen_subreg (E_V4DImode, temp_reg,
+ 					       d->vmode, 0);
+-
+ 	  emit_move_insn (temp_reg, d->op0);
+ 
+ 	  idx = d->perm[0];
+@@ -9901,7 +9254,7 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	     value that we need to broardcast, because xvrepl128vei does the
+ 	     broardcast job from every 128bit of source register to
+ 	     corresponded part of target register! (A deep sigh.)  */
+-	  if (/*idx >= 0 &&*/ idx < d->nelt / 2)
++	  if (idx < d->nelt / 2)
+ 	    {
+ 	      emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp,
+ 						  conv_op0, GEN_INT (0x0)));
+@@ -9956,310 +9309,75 @@ loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+ 	      break;
+ 	    }
+ 
+-	  /* finish func directly.  */
+-	  ok = true;
+-	  goto expand_perm_const_2_end;
+-	}
+-    }
+-  else if (loongarch_is_op_reverse_perm (d))
+-    {
+-      /* reverse high 128bit and low 128bit in op0.
+-	 Selector sample: E_V4DFmode, { 2, 3, 0, 1 }
+-	 Use xvpermi.q for doing this job.  */
+-      if (!d->testing_p)
+-	{
+-	  if (d->vmode == E_V4DImode)
+-	    {
+-	      emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target, d->op0,
+-						  GEN_INT (0x01)));
+-	    }
+-	  else if (d->vmode == E_V4DFmode)
+-	    {
+-	      emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target, d->op0,
+-						  GEN_INT (0x01)));
+-	    }
+-	  else
+-	    {
+-	      gcc_unreachable ();
+-	    }
+-	}
+-
+-      ok = true;
+-      goto expand_perm_const_2_end;
+-    }
+-  else if (loongarch_is_single_op_perm (d))
+-    {
+-      /* Permutation that only select elements from op0.  */
+-      if (!d->testing_p)
+-	{
+-	  /* Prepare temp register instead of modify original op.  */
+-	  use_alt_op = true;
+-	  op0_alt = gen_reg_rtx (d->vmode);
+-	  op1_alt = gen_reg_rtx (d->vmode);
+-
+-	  emit_move_insn (op0_alt, d->op0);
+-	  emit_move_insn (op1_alt, d->op1);
+-
+-	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0);
+-	  rtx conv_op0a = simplify_gen_subreg (E_V4DImode, op0_alt,
+-					       d->vmode, 0);
+-	  rtx conv_op1a = simplify_gen_subreg (E_V4DImode, op1_alt,
+-					       d->vmode, 0);
+-
+-	  /* Duplicate op0's low 128bit in op0, then duplicate high 128bit
+-	     in op1.  After this, xvshuf.* insn's selector argument can
+-	     access all elements we need for correct permutation result.  */
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0a, conv_op0a, conv_op0,
+-					      GEN_INT (0x00)));
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1a, conv_op1a, conv_op0,
+-					      GEN_INT (0x11)));
+-
+-	  /* In this case, there's no need to remap selector's indices.  */
+-	  for (i = 0; i < d->nelt; i += 1)
+-	    {
+-	      remapped[i] = d->perm[i];
+-	    }
++	  return true;
+ 	}
+-    }
+-  else if (loongarch_is_divisible_perm (d))
+-    {
+-      /* Divisible perm:
+-	 Low 128bit of selector only selects elements of op0,
+-	 and high 128bit of selector only selects elements of op1.  */
+ 
+-      if (!d->testing_p)
++expand_perm_const_end:
++      if (flag)
+ 	{
+-	  /* Prepare temp register instead of modify original op.  */
+-	  use_alt_op = true;
+-	  op0_alt = gen_reg_rtx (d->vmode);
+-	  op1_alt = gen_reg_rtx (d->vmode);
+-
+-	  emit_move_insn (op0_alt, d->op0);
+-	  emit_move_insn (op1_alt, d->op1);
+-
+-	  rtx conv_op0a = simplify_gen_subreg (E_V4DImode, op0_alt,
+-					       d->vmode, 0);
+-	  rtx conv_op1a = simplify_gen_subreg (E_V4DImode, op1_alt,
+-					       d->vmode, 0);
+-	  rtx conv_op0 = simplify_gen_subreg (E_V4DImode, d->op0, d->vmode, 0);
+-	  rtx conv_op1 = simplify_gen_subreg (E_V4DImode, d->op1, d->vmode, 0);
+-
+-	  /* Reorganize op0's hi/lo 128bit and op1's hi/lo 128bit, to make sure
+-	     that selector's low 128bit can access all op0's elements, and
+-	     selector's high 128bit can access all op1's elements.  */
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0a, conv_op0a, conv_op1,
+-					      GEN_INT (0x02)));
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1a, conv_op1a, conv_op0,
+-					      GEN_INT (0x31)));
+-
+-	  /* No need to modify indices.  */
+-	  for (i = 0; i < d->nelt;i += 1)
++	  /* Copy selector vector from memory to vector register for later insn
++	     gen function.
++	     If vector's element in floating point value, we cannot fit
++	     selector argument into insn gen function directly, because of the
++	     insn template definition.  As a solution, generate a integral mode
++	     subreg of target, then copy selector vector (that is in integral
++	     mode) to this subreg.  */
++	  switch (d->vmode)
+ 	    {
+-	      remapped[i] = d->perm[i];
++	    case E_V4DFmode:
++	      sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (d->nelt,
++								   rperm));
++	      tmp = simplify_gen_subreg (E_V4DImode, d->target, d->vmode, 0);
++	      emit_move_insn (tmp, sel);
++	      break;
++	    case E_V8SFmode:
++	      sel = gen_rtx_CONST_VECTOR (E_V8SImode, gen_rtvec_v (d->nelt,
++								   rperm));
++	      tmp = simplify_gen_subreg (E_V8SImode, d->target, d->vmode, 0);
++	      emit_move_insn (tmp, sel);
++	      break;
++	    default:
++	      sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt,
++								 rperm));
++	      emit_move_insn (d->target, sel);
++	      break;
+ 	    }
+-	}
+-    }
+-  else if (loongarch_is_triple_stride_extract (d))
+-    {
+-      /* Selector sample: E_V4DFmode, { 1, 4, 7, 0 }.  */
+-      if (!d->testing_p)
+-	{
+-	  /* Resolve it with brute force modification.  */
+-	  remapped[0] = 1;
+-	  remapped[1] = 2;
+-	  remapped[2] = 3;
+-	  remapped[3] = 0;
+-	}
+-    }
+-  else
+-    {
+-      /* When all of the detections above are failed, we will try last
+-	 strategy.
+-	 The for loop tries to detect following rules based on indices' value,
+-	 its position inside of selector vector ,and strange behavior of
+-	 xvshuf.* insn; Then we take corresponding action. (Replace with new
+-	 value, or give up whole permutation expansion.)  */
+-      for (i = 0; i < d->nelt; i += 1)
+-	{
+-	  /* % (2 * d->nelt)  */
+-	  idx = d->perm[i];
+ 
+-	  /* if index is located in low 128bit of selector vector.  */
+-	  if (i < d->nelt / 2)
+-	    {
+-	      /* Fail case 1: index tries to reach element that located in op0's
+-		 high 128bit.  */
+-	      if (idx >= d->nelt / 2 && idx < d->nelt)
+-		{
+-		  goto expand_perm_const_2_end;
+-		}
+-	      /* Fail case 2: index tries to reach element that located in
+-		 op1's high 128bit.  */
+-	      if (idx >= (d->nelt + d->nelt / 2))
+-		{
+-		  goto expand_perm_const_2_end;
+-		}
++	  target = d->target;
++	  op0 = d->op0;
++	  op1 = d->one_vector_p ? d->op0 : d->op1;
+ 
+-	      /* Success case: index tries to reach elements that located in
+-		 op1's low 128bit.  Apply - (nelt / 2) offset to original
+-		 value.  */
+-	      if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2))
+-		{
+-		  idx -= d->nelt / 2;
+-		}
+-	    }
+-	  /* if index is located in high 128bit of selector vector.  */
+-	  else
++	  /* We FINALLY can generate xvshuf.* insn.  */
++	  switch (d->vmode)
+ 	    {
+-	      /* Fail case 1: index tries to reach element that located in
+-		 op1's low 128bit.  */
+-	      if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2))
+-		{
+-		  goto expand_perm_const_2_end;
+-		}
+-	      /* Fail case 2: index tries to reach element that located in
+-		 op0's low 128bit.  */
+-	      if (idx < (d->nelt / 2))
+-		{
+-		  goto expand_perm_const_2_end;
+-		}
+-	      /* Success case: index tries to reach element that located in
+-		 op0's high 128bit.  */
+-	      if (idx >= d->nelt / 2 && idx < d->nelt)
+-		{
+-		  idx -= d->nelt / 2;
+-		}
++	    case E_V4DFmode:
++	      emit_insn (gen_lasx_xvshuf_d_f (target, target, op1, op0));
++	      break;
++	    case E_V4DImode:
++	      emit_insn (gen_lasx_xvshuf_d (target, target, op1, op0));
++	      break;
++	    case E_V8SFmode:
++	      emit_insn (gen_lasx_xvshuf_w_f (target, target, op1, op0));
++	      break;
++	    case E_V8SImode:
++	      emit_insn (gen_lasx_xvshuf_w (target, target, op1, op0));
++	      break;
++	    case E_V16HImode:
++	      emit_insn (gen_lasx_xvshuf_h (target, target, op1, op0));
++	      break;
++	    case E_V32QImode:
++	      emit_insn (gen_lasx_xvshuf_b (target, op1, op0, target));
++	      break;
++	    default:
++	      gcc_unreachable ();
++	      break;
+ 	    }
+-	  /* No need to process other case that we did not mentioned.  */
+-
+-	  /* Assign with original or processed value.  */
+-	  remapped[i] = idx;
+-	}
+-    }
+-
+-  ok = true;
+-  /* If testing_p is true, compiler is trying to figure out that backend can
+-     handle this permutation, but doesn't want to generate actual insn.  So
+-     if true, exit directly.  */
+-  if (d->testing_p)
+-    {
+-      goto expand_perm_const_2_end;
+-    }
+-
+-  /* Convert remapped selector array to RTL array.  */
+-  for (i = 0; i < d->nelt; i += 1)
+-    {
+-      rperm[i] = GEN_INT (remapped[i]);
+-    }
+-
+-  /* Copy selector vector from memory to vector regiter for later insn gen
+-     function.
+-     If vector's element in floating point value, we cannot fit selector
+-     argument into insn gen function directly, because of the insn template
+-     definition.  As a solution, generate a integral mode subreg of target,
+-     then copy selector vector (that is in integral mode) to this subreg.  */
+-  switch (d->vmode)
+-    {
+-    case E_V4DFmode:
+-      sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (d->nelt, rperm));
+-      tmp = simplify_gen_subreg (E_V4DImode, d->target, d->vmode, 0);
+-      emit_move_insn (tmp, sel);
+-      break;
+-    case E_V8SFmode:
+-      sel = gen_rtx_CONST_VECTOR (E_V8SImode, gen_rtvec_v (d->nelt, rperm));
+-      tmp = simplify_gen_subreg (E_V8SImode, d->target, d->vmode, 0);
+-      emit_move_insn (tmp, sel);
+-      break;
+-    default:
+-      sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm));
+-      emit_move_insn (d->target, sel);
+-      break;
+-    }
+-
+-  target = d->target;
+-  /* If temp op registers are requested in previous if branch, then use temp
+-     register intead of original one.  */
+-  if (use_alt_op)
+-    {
+-      op0 = op0_alt != NULL_RTX ? op0_alt : d->op0;
+-      op1 = op1_alt != NULL_RTX ? op1_alt : d->op1;
+-    }
+-  else
+-    {
+-      op0 = d->op0;
+-      op1 = d->one_vector_p ? d->op0 : d->op1;
+-    }
+-
+-  /* We FINALLY can generate xvshuf.* insn.  */
+-  switch (d->vmode)
+-    {
+-    case E_V4DFmode:
+-      emit_insn (gen_lasx_xvshuf_d_f (target, target, op1, op0));
+-      break;
+-    case E_V4DImode:
+-      emit_insn (gen_lasx_xvshuf_d (target, target, op1, op0));
+-      break;
+-    case E_V8SFmode:
+-      emit_insn (gen_lasx_xvshuf_w_f (target, target, op1, op0));
+-      break;
+-    case E_V8SImode:
+-      emit_insn (gen_lasx_xvshuf_w (target, target, op1, op0));
+-      break;
+-    case E_V16HImode:
+-      emit_insn (gen_lasx_xvshuf_h (target, target, op1, op0));
+-      break;
+-    case E_V32QImode:
+-      emit_insn (gen_lasx_xvshuf_b (target, op1, op0, target));
+-      break;
+-    default:
+-      gcc_unreachable ();
+-      break;
+-    }
+ 
+-  /* Extra insn for swapping the hi/lo 128bit of target vector register.  */
+-  if (reverse_hi_lo)
+-    {
+-      switch (d->vmode)
+-	{
+-	case E_V4DFmode:
+-	  emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target,
+-					      d->target, GEN_INT (0x1)));
+-	  break;
+-	case E_V4DImode:
+-	  emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target,
+-					      d->target, GEN_INT (0x1)));
+-	  break;
+-	case E_V8SFmode:
+-	  emit_insn (gen_lasx_xvpermi_q_v8sf (d->target, d->target,
+-					      d->target, GEN_INT (0x1)));
+-	  break;
+-	case E_V8SImode:
+-	  emit_insn (gen_lasx_xvpermi_q_v8si (d->target, d->target,
+-					      d->target, GEN_INT (0x1)));
+-	  break;
+-	case E_V16HImode:
+-	  emit_insn (gen_lasx_xvpermi_q_v16hi (d->target, d->target,
+-					       d->target, GEN_INT (0x1)));
+-	  break;
+-	case E_V32QImode:
+-	  emit_insn (gen_lasx_xvpermi_q_v32qi (d->target, d->target,
+-					       d->target, GEN_INT (0x1)));
+-	  break;
+-	default:
+-	  break;
++	  return true;
+ 	}
+     }
+-  /* Extra insn required by odd/even extraction.  Swapping the second and third
+-     64bit in target vector register.  */
+-  else if (extract_ev_od)
+-    {
+-      rtx converted = simplify_gen_subreg (E_V4DImode, d->target, d->vmode, 0);
+-      emit_insn (gen_lasx_xvpermi_d_v4di (converted, converted,
+-					  GEN_INT (0xD8)));
+-    }
+ 
+-expand_perm_const_2_end:
+-  return ok;
++  return false;
+ }
+ 
+ /* Implement TARGET_VECTORIZE_VEC_PERM_CONST.  */
+@@ -10340,25 +9458,19 @@ loongarch_vectorize_vec_perm_const (machine_mode vmode,
+       if (!d.one_vector_p)
+ 	d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
+ 
+-      ok = loongarch_expand_vec_perm_const_2 (&d);
+-      if (ok)
+-	return ok;
+-
+       start_sequence ();
+-      ok = loongarch_expand_vec_perm_const_1 (&d);
++      ok = loongarch_expand_vec_perm_const (&d);
+       end_sequence ();
+       return ok;
+     }
+ 
+-  ok = loongarch_expand_vec_perm_const_2 (&d);
+-  if (!ok)
+-    ok = loongarch_expand_vec_perm_const_1 (&d);
++  ok = loongarch_expand_vec_perm_const (&d);
+ 
+   /* If we were given a two-vector permutation which just happened to
+      have both input vectors equal, we folded this into a one-vector
+      permutation.  There are several loongson patterns that are matched
+      via direct vec_select+vec_concat expansion, but we do not have
+-     support in loongarch_expand_vec_perm_const_1 to guess the adjustment
++     support in loongarch_expand_vec_perm_const to guess the adjustment
+      that should be made for a single operand.  Just try again with
+      the original permutation.  */
+   if (!ok && which == 3)
+@@ -10367,7 +9479,7 @@ loongarch_vectorize_vec_perm_const (machine_mode vmode,
+       d.op1 = op1;
+       d.one_vector_p = false;
+       memcpy (d.perm, orig_perm, MAX_VECT_LEN);
+-      ok = loongarch_expand_vec_perm_const_1 (&d);
++      ok = loongarch_expand_vec_perm_const (&d);
+     }
+ 
+   return ok;
+-- 
+2.43.0
+
diff --git a/0095-LoongArch-testsuite-Fix-FAIL-in-lasx-xvstelm.c-file.patch b/0095-LoongArch-testsuite-Fix-FAIL-in-lasx-xvstelm.c-file.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c6064928f65585fa5f06d3e254add05753142316
--- /dev/null
+++ b/0095-LoongArch-testsuite-Fix-FAIL-in-lasx-xvstelm.c-file.patch
@@ -0,0 +1,34 @@
+From 6263acd411b9685ebc7b16d19b91aad39cb7e184 Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Fri, 29 Dec 2023 09:45:15 +0800
+Subject: [PATCH 095/188] LoongArch: testsuite:Fix FAIL in lasx-xvstelm.c file.
+
+After implementing the cost model on the LoongArch architecture, the GCC
+compiler code has this feature turned on by default, which causes the
+lasx-xvstelm.c file test to fail. Through analysis, this test case can
+generate vectorization instructions required for detection only after
+disabling the functionality of the cost model with the "-fno-vect-cost-model"
+compilation option.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvstelm.c:Add compile
+	option "-fno-vect-cost-model" to dg-options.
+---
+ gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c
+index 1a7b0e86f..4b846204a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -mlasx" } */
++/* { dg-options "-O3 -mlasx -fno-vect-cost-model" } */
+ /* { dg-final { scan-assembler-times "xvstelm.w" 8} } */
+ 
+ #define LEN 256
+-- 
+2.43.0
+
diff --git a/0095-STABS-remove-gstabs-and-gxcoff-functionality.patch b/0095-STABS-remove-gstabs-and-gxcoff-functionality.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e76e8c31eba210448edb45d305322d46084e7e55
--- /dev/null
+++ b/0095-STABS-remove-gstabs-and-gxcoff-functionality.patch
@@ -0,0 +1,9044 @@
+From 97b011574a0b4133c79f2f1ceea2f9ae1d42d044 Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao_admin 
+Date: Thu, 16 May 2024 10:31:40 +0800
+Subject: [PATCH] STABS: remove -gstabs and -gxcoff functionality
+
+gcc/ChangeLog:
+
+	* Makefile.in: Remove -gstabs option support, DBX-related
+	  macros and DBX debugging info support.
+	* collect2.cc (scan_prog_file): Likewise.
+	* common.opt: Likewise.
+	* config.gcc: Likewise.
+	* config.in: Likewise.
+	* config/aarch64/aarch64-elf.h (DBX_DEBUGGING_INFO): Likewise.
+	* config/alpha/alpha.cc: Likewise.
+	* config/alpha/elf.h (ASM_SPEC): Likewise.
+	* config/arc/arc.h (DBX_DEBUGGING_INFO): Likewise.
+	(DBX_CONTIN_LENGTH): Likewise.
+	* config/arm/aout.h (DBX_DEBUGGING_INFO): Likewise.
+	(DBX_CONTIN_LENGTH): Likewise.
+	* config/arm/netbsd-elf.h (DBX_CONTIN_LENGTH): Likewise.
+	* config/darwin.h (DSYMUTIL_SPEC): Likewise.
+	(ASM_DEBUG_SPEC): Likewise.
+	(DBX_DEBUGGING_INFO): Likewise.
+	(DBX_USE_BINCL): Likewise.
+	(DBX_CONTIN_LENGTH): Likewise.
+	(DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END): Likewise.
+	* config/epiphany/epiphany.h (DBX_DEBUGGING_INFO): Likewise.
+	(DBX_CONTIN_LENGTH): Likewise.
+	* config/i386/bsd.h (DBX_NO_XREFS): Likewise.
+	(DBX_CONTIN_LENGTH): Likewise.
+	* config/i386/gas.h (DBX_NO_XREFS): Likewise.
+	(DBX_CONTIN_LENGTH): Likewise.
+	* config/ia64/ia64.h: Likewise.
+	* config/ia64/sysv4.h (DBX_DEBUGGING_INFO): Likewise.
+	* config/m68k/linux.h (DBX_CONTIN_LENGTH): Likewise.
+	* config/m68k/openbsd.h (DBX_DEBUGGING_INFO): Likewise.
+	(DBX_CONTIN_LENGTH): Likewise.
+	(DBX_CONTIN_CHAR): Likewise.
+	* config/mips/mips.cc (mips_output_filename): Likewise.
+	(mips_option_override): Likewise.
+	* config/mips/mips.h (SUBTARGET_ASM_DEBUGGING_SPEC): Likewise.
+	(DBX_DEBUGGING_INFO): Likewise.
+	(DBX_CONTIN_LENGTH): Likewise.
+	(DBX_REGISTER_NUMBER): Likewise.
+	(GP_DBX_FIRST): Likewise.
+	(FP_DBX_FIRST): Likewise.
+	(MD_DBX_FIRST): Likewise.
+	* config/nvptx/nvptx.cc: Likewise.
+	* config/openbsd.h (DBX_NO_XREFS): Likewise.
+	* config/pa/pa-64.h (DBX_DEBUGGING_INFO): Likewise.
+	* config/pa/pa.h (ASSEMBLER_DIALECT): Likewise.
+	(DBX_CONTIN_LENGTH): Likewise.
+	* config/pa/som.h (PREFERRED_DEBUGGING_TYPE): Likewise.
+	(DBX_USE_BINCL): Likewise.
+	(DBX_LINES_FUNCTION_RELATIVE): Likewise.
+	(DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END): Likewise.
+	* config/pdp11/pdp11.cc: Likewise.
+	* config/pdp11/pdp11.h (DBX_DEBUGGING_INFO): Likewise.
+	(PREFERRED_DEBUGGING_TYPE): Likewise.
+	(DBX_CONTIN_LENGTH): Likewise.
+	* config/rs6000/rs6000-builtin.cc: Likewise.
+	* config/rs6000/rs6000-call.cc: Likewise.
+	* config/rs6000/rs6000-logue.cc (defined): Likewise.
+	* config/rs6000/rs6000.cc (rs6000_option_override_internal): Likewise.
+	(HAVE_XCOFF_DWARF_EXTRAS): Likewise.
+	(rs6000_xcoff_declare_function_name): Likewise.
+	* config/rs6000/sysv4.h (DBX_DEBUGGING_INFO): Likewise.
+	(DBX_FUNCTION_FIRST): Likewise.
+	* config/rs6000/xcoff.h (XCOFF_DEBUGGING_INFO): Likewise.
+	* config/rx/rx.h (DBX_DEBUGGING_INFO): Likewise.
+	* config/sh/elf.h (DBX_LINES_FUNCTION_RELATIVE): Likewise.
+	(DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END): Likewise.
+	* config/sol2.h (NO_DBX_BNSYM_ENSYM): Likewise.
+	* config/sparc/freebsd.h (DBX_CONTIN_CHAR): Likewise.
+	* config/sparc/netbsd-elf.h (DBX_CONTIN_CHAR): Likewise.
+	* config/sparc/sparc.h (DBX_CONTIN_LENGTH): Likewise.
+	* config/vax/vax.cc (vax_file_start): Likewise.
+	* config/vax/vax.h (DBX_DEBUGGING_INFO): Likewise.
+	(DBX_CONTIN_LENGTH): Likewise.
+	(DBX_CONTIN_CHAR): Likewise.
+	(DBX_NO_XREFS): Likewise.
+	(DBX_STATIC_STAB_DATA_SECTION): Likewise.
+	* config/vx-common.h (DBX_DEBUGGING_INFO): Likewise.
+	(XCOFF_DEBUGGING_INFO): Likewise.
+	* configure: Regenerate. Likewise.
+	* configure.ac: Likewise.
+	* debug.h: Likewise.
+	* doc/install.texi: Likewise.
+	* doc/invoke.texi: Likewise.
+	* doc/passes.texi: Likewise.
+	* doc/tm.texi: Likewise.
+	* doc/tm.texi.in: Likewise.
+	* dwarf2asm.cc (XCOFF_DEBUGGING_INFO): Likewise.
+	(dw2_asm_output_nstring): Likewise.
+	(USE_LINKONCE_INDIRECT): Likewise.
+	* dwarf2out.cc (XCOFF_DEBUGGING_INFO): Likewise.
+	(HAVE_XCOFF_DWARF_EXTRAS): Likewise.
+	(output_fde): Likewise.
+	(output_call_frame_info): Likewise.
+	(have_macinfo): Likewise.
+	(add_AT_loc_list): Likewise.
+	(add_AT_view_list): Likewise.
+	(output_compilation_unit_header): Likewise.
+	(output_pubnames): Likewise.
+	(output_aranges): Likewise.
+	(output_line_info): Likewise.
+	(output_macinfo): Likewise.
+	(dwarf2out_finish): Likewise.
+	(dwarf2out_early_finish): Likewise.
+	* final.cc (final_scan_insn_1): Likewise.
+	(rest_of_handle_final): Likewise.
+	* flag-types.h (enum debug_info_type): Likewise.
+	(DBX_DEBUG): Likewise.
+	(XCOFF_DEBUG): Likewise.
+	* function.cc (defined): Likewise.
+	* gcc.cc (defined): Likewise.
+	(ASM_DEBUG_SPEC): Likewise.
+	(ASM_DEBUG_OPTION_SPEC): Likewise.
+	* opts.cc (common_handle_option): Likewise.
+	(set_debug_level): Likewise.
+	* system.h (fancy_abort): Likewise.
+	* target-def.h (TARGET_ASM_CONSTRUCTOR): Likewise.
+	(TARGET_ASM_DESTRUCTOR): Likewise.
+	* toplev.cc (defined): Likewise.
+	* varasm.cc: Likewise.
+	* config/dbxcoff.h: Removed.
+	* config/dbxelf.h: Removed.
+	* dbxout.cc: Removed.
+	* dbxout.h: Removed.
+	* gstab.h: Removed.
+	* stab.def: Removed.
+	* xcoffout.cc: Removed.
+	* xcoffout.h: Removed.
+
+gcc/go/ChangeLog:
+
+	* go-lang.cc (go_langhook_pushdecl): Remove -gstabs option support, DBX-related
+	  macros and DBX debugging info support.
+	* gospec.cc (lang_specific_driver): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* lib/gcc-dg.exp: Remove -gstabs option support, DBX-related
+	  macros and DBX debugging info support.
+	* lib/gfortran-dg.exp: Likewise.
+	* lib/target-supports.exp: Likewise.
+	* g++.dg/cpp0x/alias-decl-debug-0.C: Removed.
+	* g++.dg/other/PR23205.C: Removed.
+	* g++.dg/other/pr23205-2.C: Removed.
+	* gcc.dg/20040813-1.c: Removed.
+	* gcc.dg/darwin-20040809-2.c: Removed.
+	* gcc.dg/debug/pr35154.c: Removed.
+	* gcc.dg/pr69471-2.c: Removed.
+	* gcc.target/powerpc/stabs-attrib-vect-darwin.c: Removed.
+	* gcc.target/s390/20041216-1.c: Removed.
+	* gfortran.dg/debug/pr35154-stabs.f: Removed.
+	* objc.dg/stabs-1.m: Removed.
+---
+ gcc/Makefile.in                               |    4 +-
+ gcc/collect2.cc                               |    7 -
+ gcc/common.opt                                |   26 +-
+ gcc/config.gcc                                |  232 +-
+ gcc/config.in                                 |   19 -
+ gcc/config/aarch64/aarch64-elf.h              |    3 -
+ gcc/config/alpha/alpha.cc                     |    4 -
+ gcc/config/alpha/elf.h                        |    2 +-
+ gcc/config/arc/arc.h                          |   10 +-
+ gcc/config/arm/aout.h                         |    9 -
+ gcc/config/arm/netbsd-elf.h                   |    3 -
+ gcc/config/darwin.h                           |   27 +-
+ gcc/config/dbxcoff.h                          |   56 -
+ gcc/config/dbxelf.h                           |   68 -
+ gcc/config/epiphany/epiphany.h                |    7 +-
+ gcc/config/i386/bsd.h                         |    6 -
+ gcc/config/i386/gas.h                         |    4 -
+ gcc/config/ia64/ia64.h                        |    2 -
+ gcc/config/ia64/sysv4.h                       |    3 -
+ gcc/config/m68k/linux.h                       |    4 -
+ gcc/config/m68k/openbsd.h                     |   12 -
+ gcc/config/mips/mips.cc                       |   16 -
+ gcc/config/mips/mips.h                        |   17 +-
+ gcc/config/nvptx/nvptx.cc                     |    1 -
+ gcc/config/openbsd.h                          |   11 -
+ gcc/config/pa/pa-64.h                         |    4 -
+ gcc/config/pa/pa.h                            |   10 -
+ gcc/config/pa/som.h                           |   15 -
+ gcc/config/pdp11/pdp11.cc                     |    1 -
+ gcc/config/pdp11/pdp11.h                      |    9 -
+ gcc/config/rs6000/rs6000-builtin.cc           |    3 -
+ gcc/config/rs6000/rs6000-call.cc              |    8 -
+ gcc/config/rs6000/rs6000-logue.cc             |   12 -
+ gcc/config/rs6000/rs6000.cc                   |   24 +-
+ gcc/config/rs6000/sysv4.h                     |    8 -
+ gcc/config/rs6000/xcoff.h                     |    3 -
+ gcc/config/rx/rx.h                            |    1 -
+ gcc/config/sh/elf.h                           |    3 -
+ gcc/config/sol2.h                             |    4 -
+ gcc/config/sparc/freebsd.h                    |    6 -
+ gcc/config/sparc/netbsd-elf.h                 |    5 -
+ gcc/config/sparc/sparc.h                      |    8 -
+ gcc/config/vax/vax.cc                         |    3 -
+ gcc/config/vax/vax.h                          |   21 -
+ gcc/config/vx-common.h                        |    2 -
+ gcc/configure                                 |   99 -
+ gcc/configure.ac                              |   20 -
+ gcc/dbxout.cc                                 | 3936 -----------------
+ gcc/dbxout.h                                  |   60 -
+ gcc/debug.h                                   |    1 -
+ gcc/doc/install.texi                          |    3 +-
+ gcc/doc/invoke.texi                           |   42 +-
+ gcc/doc/passes.texi                           |    2 +-
+ gcc/doc/tm.texi                               |  220 +-
+ gcc/doc/tm.texi.in                            |  220 +-
+ gcc/dwarf2asm.cc                              |   13 +-
+ gcc/dwarf2out.cc                              |  103 +-
+ gcc/final.cc                                  |   44 -
+ gcc/flag-types.h                              |   18 +-
+ gcc/function.cc                               |    8 -
+ gcc/gcc.cc                                    |   24 +-
+ gcc/go/go-lang.cc                             |    3 +-
+ gcc/go/gospec.cc                              |    4 -
+ gcc/gstab.h                                   |   35 -
+ gcc/opts.cc                                   |   18 +-
+ gcc/stab.def                                  |  239 -
+ gcc/system.h                                  |   14 +-
+ gcc/target-def.h                              |    4 -
+ .../g++.dg/cpp0x/alias-decl-debug-0.C         |   12 -
+ gcc/testsuite/g++.dg/other/PR23205.C          |   17 -
+ gcc/testsuite/g++.dg/other/pr23205-2.C        |   17 -
+ gcc/testsuite/gcc.dg/20040813-1.c             |   14 -
+ gcc/testsuite/gcc.dg/darwin-20040809-2.c      |   15 -
+ gcc/testsuite/gcc.dg/debug/pr35154.c          |   35 -
+ gcc/testsuite/gcc.dg/pr69471-2.c              |    8 -
+ .../powerpc/stabs-attrib-vect-darwin.c        |   12 -
+ gcc/testsuite/gcc.target/s390/20041216-1.c    |   23 -
+ .../gfortran.dg/debug/pr35154-stabs.f         |   35 -
+ gcc/testsuite/lib/gcc-dg.exp                  |    2 +-
+ gcc/testsuite/lib/gfortran-dg.exp             |    2 +-
+ gcc/testsuite/lib/target-supports.exp         |    9 -
+ gcc/testsuite/objc.dg/stabs-1.m               |   19 -
+ gcc/toplev.cc                                 |   21 -
+ gcc/varasm.cc                                 |    4 -
+ gcc/xcoffout.cc                               |  494 ---
+ gcc/xcoffout.h                                |  194 -
+ 86 files changed, 211 insertions(+), 6555 deletions(-)
+ delete mode 100644 gcc/config/dbxcoff.h
+ delete mode 100644 gcc/config/dbxelf.h
+ delete mode 100644 gcc/dbxout.cc
+ delete mode 100644 gcc/dbxout.h
+ delete mode 100644 gcc/gstab.h
+ delete mode 100644 gcc/stab.def
+ delete mode 100644 gcc/testsuite/g++.dg/cpp0x/alias-decl-debug-0.C
+ delete mode 100644 gcc/testsuite/g++.dg/other/PR23205.C
+ delete mode 100644 gcc/testsuite/g++.dg/other/pr23205-2.C
+ delete mode 100644 gcc/testsuite/gcc.dg/20040813-1.c
+ delete mode 100644 gcc/testsuite/gcc.dg/darwin-20040809-2.c
+ delete mode 100644 gcc/testsuite/gcc.dg/debug/pr35154.c
+ delete mode 100644 gcc/testsuite/gcc.dg/pr69471-2.c
+ delete mode 100644 gcc/testsuite/gcc.target/powerpc/stabs-attrib-vect-darwin.c
+ delete mode 100644 gcc/testsuite/gcc.target/s390/20041216-1.c
+ delete mode 100644 gcc/testsuite/gfortran.dg/debug/pr35154-stabs.f
+ delete mode 100644 gcc/testsuite/objc.dg/stabs-1.m
+ delete mode 100644 gcc/xcoffout.cc
+ delete mode 100644 gcc/xcoffout.h
+
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index 298bc9c92..5cd838270 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -1347,7 +1347,6 @@ OBJS = \
+ 	data-streamer.o \
+ 	data-streamer-in.o \
+ 	data-streamer-out.o \
+-	dbxout.o \
+ 	dbgcnt.o \
+ 	dce.o \
+ 	ddg.o \
+@@ -1720,7 +1719,6 @@ OBJS = \
+ 	web.o \
+ 	wide-int.o \
+ 	wide-int-print.o \
+-	xcoffout.o \
+ 	$(out_object_file) \
+ 	$(ANALYZER_OBJS) \
+ 	$(EXTRA_OBJS) \
+@@ -2665,7 +2663,7 @@ GTFILES = $(CPPLIB_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
+   $(srcdir)/reload.h $(srcdir)/caller-save.cc $(srcdir)/symtab.cc \
+   $(srcdir)/alias.cc $(srcdir)/bitmap.cc $(srcdir)/cselib.cc $(srcdir)/cgraph.cc \
+   $(srcdir)/ipa-prop.cc $(srcdir)/ipa-cp.cc $(srcdir)/ipa-utils.h \
+-  $(srcdir)/ipa-param-manipulation.h $(srcdir)/ipa-sra.cc $(srcdir)/dbxout.cc \
++  $(srcdir)/ipa-param-manipulation.h $(srcdir)/ipa-sra.cc \
+   $(srcdir)/ipa-modref.h $(srcdir)/ipa-modref.cc \
+   $(srcdir)/ipa-modref-tree.h \
+   $(srcdir)/signop.h \
+diff --git a/gcc/collect2.cc b/gcc/collect2.cc
+index d81c7f28f..9715e8eee 100644
+--- a/gcc/collect2.cc
++++ b/gcc/collect2.cc
+@@ -2784,13 +2784,6 @@ scan_prog_file (const char *prog_name, scanpass which_pass,
+ 		      if ((name = ldgetname (ldptr, &symbol)) == NULL)
+ 			continue;		/* Should never happen.  */
+ 
+-#ifdef XCOFF_DEBUGGING_INFO
+-		      /* All AIX function names have a duplicate entry
+-			 beginning with a dot.  */
+-		      if (*name == '.')
+-			++name;
+-#endif
+-
+ 		      switch (is_ctor_dtor (name))
+ 			{
+ #if TARGET_AIX_VERSION
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 4367d458d..b18f0b944 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -122,12 +122,6 @@ uint32_t write_symbols = NO_DEBUG
+ Variable
+ enum debug_info_levels debug_info_level = DINFO_LEVEL_NONE
+ 
+-; Nonzero means use GNU-only extensions in the generated symbolic
+-; debugging information.  Currently, this only has an effect when
+-; write_symbols is set to DBX_DEBUG or XCOFF_DEBUG.
+-Variable
+-bool use_gnu_debug_info_extensions
+-
+ ; Level of CTF debugging information we are producing.  See flag-types.h
+ ; for the definitions of the different possible levels.
+ Variable
+@@ -3410,7 +3404,7 @@ Common Driver JoinedOrMissing Negative(gdwarf-)
+ Generate debug information in default version of DWARF format.
+ 
+ gdwarf-
+-Common Driver Joined UInteger Var(dwarf_version) Init(5) Negative(gstabs)
++Common Driver Joined UInteger Var(dwarf_version) Init(5)
+ Generate debug information in DWARF v2 (or later) format.
+ 
+ gdwarf32
+@@ -3458,12 +3452,12 @@ Common Driver Var(dwarf_split_debug_info) Init(0)
+ Generate debug information in separate .dwo files.
+ 
+ gstabs
+-Common Driver JoinedOrMissing Negative(gstabs+)
+-Generate debug information in STABS format.
++Common Driver WarnRemoved
++Does nothing.  Preserved for backward compatibility.
+ 
+ gstabs+
+-Common Driver JoinedOrMissing Negative(gvms)
+-Generate debug information in extended STABS format.
++Common Driver WarnRemoved
++Does nothing.  Preserved for backward compatibility.
+ 
+ gstatement-frontiers
+ Common Driver Var(debug_nonbind_markers_p) PerFunction
+@@ -3489,16 +3483,16 @@ gvariable-location-views=incompat5
+ Common Driver RejectNegative Var(debug_variable_location_views, -1) Init(2)
+ 
+ gvms
+-Common Driver JoinedOrMissing Negative(gxcoff)
++Common Driver JoinedOrMissing
+ Generate debug information in VMS format.
+ 
+ gxcoff
+-Common Driver JoinedOrMissing Negative(gxcoff+)
+-Generate debug information in XCOFF format.
++Common Driver WarnRemoved
++Does nothing.  Preserved for backward compatibility.
+ 
+ gxcoff+
+-Common Driver JoinedOrMissing Negative(gdwarf)
+-Generate debug information in extended XCOFF format.
++Common Driver JoinedOrMissing  WarnRemoved
++Does nothing.  Preserved for backward compatibility.
+ 
+ Enum
+ Name(compressed_debug_sections) Type(int)
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 3108ac4eb..8fdde1576 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -72,10 +72,10 @@
+ #  tm_file		A list of target macro files, if different from
+ #			"$cpu_type/$cpu_type.h". Usually it's constructed
+ #			per target in a way like this:
+-#			tm_file="${tm_file} dbxelf.h elfos.h ${cpu_type.h}/elf.h"
++#			tm_file="${tm_file} elfos.h ${cpu_type.h}/elf.h"
+ #			Note that the preferred order is:
+ #			- specific target header "${cpu_type}/${cpu_type.h}"
+-#			- generic headers like dbxelf.h elfos.h, etc.
++#			- generic headers like elfos.h, etc.
+ #			- specializing target headers like ${cpu_type.h}/elf.h
+ #			This helps to keep OS specific stuff out of the CPU
+ #			defining header ${cpu_type}/${cpu_type.h}.
+@@ -969,7 +969,7 @@ case ${target} in
+ *-*-solaris2*)
+   # i?86-*-solaris2* needs to insert headers between cpu default and
+   # Solaris 2 specific ones.
+-  sol2_tm_file_head="dbxelf.h elfos.h ${cpu_type}/sysv4.h"
++  sol2_tm_file_head="elfos.h ${cpu_type}/sysv4.h"
+   sol2_tm_file_tail="${cpu_type}/sol2.h sol2.h"
+   sol2_tm_file="${sol2_tm_file_head} ${sol2_tm_file_tail}"
+   case ${target} in
+@@ -1093,7 +1093,7 @@ esac
+ 
+ case ${target} in
+ aarch64*-*-elf | aarch64*-*-fuchsia* | aarch64*-*-rtems*)
+-	tm_file="${tm_file} dbxelf.h elfos.h newlib-stdint.h"
++	tm_file="${tm_file} elfos.h newlib-stdint.h"
+ 	tm_file="${tm_file} aarch64/aarch64-elf.h aarch64/aarch64-errata.h aarch64/aarch64-elf-raw.h"
+ 	tmake_file="${tmake_file} aarch64/t-aarch64"
+ 	case $target in
+@@ -1130,19 +1130,19 @@ aarch64*-*-elf | aarch64*-*-fuchsia* | aarch64*-*-rtems*)
+ 	TM_MULTILIB_CONFIG=`echo $TM_MULTILIB_CONFIG | sed 's/^,//'`
+ 	;;
+ aarch64*-*-freebsd*)
+-	tm_file="${tm_file} dbxelf.h elfos.h ${fbsd_tm_file}"
++	tm_file="${tm_file} elfos.h ${fbsd_tm_file}"
+ 	tm_file="${tm_file} aarch64/aarch64-elf.h aarch64/aarch64-errata.h aarch64/aarch64-freebsd.h"
+ 	tmake_file="${tmake_file} aarch64/t-aarch64 aarch64/t-aarch64-freebsd"
+ 	tm_defines="${tm_defines}  TARGET_DEFAULT_ASYNC_UNWIND_TABLES=1"
+ 	;;
+ aarch64*-*-netbsd*)
+-	tm_file="${tm_file} dbxelf.h elfos.h ${nbsd_tm_file}"
++	tm_file="${tm_file} elfos.h ${nbsd_tm_file}"
+ 	tm_file="${tm_file} aarch64/aarch64-elf.h aarch64/aarch64-errata.h aarch64/aarch64-netbsd.h"
+ 	tmake_file="${tmake_file} aarch64/t-aarch64 aarch64/t-aarch64-netbsd"
+ 	extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
+ 	;;
+ aarch64*-*-linux*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h"
++	tm_file="${tm_file} elfos.h gnu-user.h linux.h glibc-stdint.h"
+ 	tm_file="${tm_file} aarch64/aarch64-elf.h aarch64/aarch64-errata.h aarch64/aarch64-linux.h"
+ 	tmake_file="${tmake_file} aarch64/t-aarch64 aarch64/t-aarch64-linux"
+ 	tm_defines="${tm_defines}  TARGET_DEFAULT_ASYNC_UNWIND_TABLES=1"
+@@ -1198,7 +1198,7 @@ alpha*-dec-*vms*)
+ 	tmake_file="${tmake_file} alpha/t-vms alpha/t-alpha"
+ 	;;
+ arc*-*-elf*)
+-	tm_file="arc/arc-arch.h dbxelf.h elfos.h newlib-stdint.h arc/elf.h ${tm_file}"
++	tm_file="arc/arc-arch.h elfos.h newlib-stdint.h arc/elf.h ${tm_file}"
+ 	tmake_file="arc/t-multilib arc/t-arc"
+ 	extra_gcc_objs="driver-arc.o"
+ 	if test "x$with_cpu" != x; then
+@@ -1219,7 +1219,7 @@ arc*-*-elf*)
+ 	esac
+ 	;;
+ arc*-*-linux*)
+-	tm_file="arc/arc-arch.h dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h arc/linux.h ${tm_file}"
++	tm_file="arc/arc-arch.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h arc/linux.h ${tm_file}"
+ 	tmake_file="${tmake_file} arc/t-multilib-linux arc/t-arc"
+ 	extra_gcc_objs="driver-arc.o"
+ 	if test "x$with_cpu" != x; then
+@@ -1255,7 +1255,7 @@ arm-wrs-vxworks7*)
+ 	need_64bit_hwint=yes
+ 	;;
+ arm*-*-freebsd*)                # ARM FreeBSD EABI
+-	tm_file="dbxelf.h elfos.h ${fbsd_tm_file} arm/elf.h"
++	tm_file="elfos.h ${fbsd_tm_file} arm/elf.h"
+ 	case $target in
+ 	arm*b-*-freebsd*)
+ 	    tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=1"
+@@ -1283,7 +1283,7 @@ arm*-*-freebsd*)                # ARM FreeBSD EABI
+ arm*-*-netbsdelf*)
+ 	target_cpu_cname="strongarm"
+ 	tmake_file="${tmake_file} arm/t-arm"
+-	tm_file="dbxelf.h elfos.h ${nbsd_tm_file} arm/elf.h"
++	tm_file="elfos.h ${nbsd_tm_file} arm/elf.h"
+ 	extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
+ 	case ${target} in
+ 	arm*eb-*) tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=1" ;;
+@@ -1312,7 +1312,7 @@ arm*-*-netbsdelf*)
+ 	esac
+ 	;;
+ arm*-*-linux-* | arm*-*-uclinuxfdpiceabi)
+-	tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h arm/elf.h arm/linux-gas.h arm/linux-elf.h"
++	tm_file="elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h arm/elf.h arm/linux-gas.h arm/linux-elf.h"
+ 	extra_options="${extra_options} linux-android.opt"
+ 	case $target in
+ 	arm*b-*-linux*)
+@@ -1343,7 +1343,7 @@ arm*-*-linux-* | arm*-*-uclinuxfdpiceabi)
+ 	with_tls=${with_tls:-gnu}
+ 	;;
+ arm*-*-uclinux*eabi*)		# ARM ucLinux
+-	tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/linux-gas.h arm/uclinux-elf.h glibc-stdint.h"
++	tm_file="elfos.h arm/unknown-elf.h arm/elf.h arm/linux-gas.h arm/uclinux-elf.h glibc-stdint.h"
+ 	tmake_file="${tmake_file} arm/t-arm arm/t-arm-elf arm/t-bpabi"
+ 	tm_file="$tm_file arm/bpabi.h arm/uclinux-eabi.h arm/aout.h arm/arm.h"
+ 	target_cpu_cname="arm7tdmi"
+@@ -1363,7 +1363,7 @@ arm*-*-eabi* | arm*-*-symbianelf* | arm*-*-rtems* | arm*-*-fuchsia*)
+ 	  tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=1"
+ 	esac
+ 	default_use_cxa_atexit=yes
+-	tm_file="dbxelf.h elfos.h arm/unknown-elf.h arm/elf.h arm/bpabi.h"
++	tm_file="elfos.h arm/unknown-elf.h arm/elf.h arm/bpabi.h"
+ 	tmake_file="${tmake_file} arm/t-arm arm/t-arm-elf"
+ 	target_cpu_cname="arm7tdmi"
+ 	case ${target} in
+@@ -1392,7 +1392,7 @@ arm*-*-eabi* | arm*-*-symbianelf* | arm*-*-rtems* | arm*-*-fuchsia*)
+ 	tm_file="${tm_file} arm/aout.h arm/arm.h"
+ 	;;
+ avr-*-*)
+-	tm_file="elfos.h avr/elf.h avr/avr-arch.h avr/avr.h avr/specs.h dbxelf.h avr/avr-stdint.h"
++	tm_file="elfos.h avr/elf.h avr/avr-arch.h avr/avr.h avr/specs.h avr/avr-stdint.h"
+ 	if test x${with_avrlibc} != xno; then
+ 	    tm_file="${tm_file} ${cpu_type}/avrlibc.h"
+ 	    tm_defines="${tm_defines} WITH_AVRLIBC"
+@@ -1525,26 +1525,26 @@ avr-*-*)
+ 	extra_objs="avr-devices.o avr-log.o"
+ 	;;
+ bfin*-elf*)
+-	tm_file="${tm_file} dbxelf.h elfos.h newlib-stdint.h bfin/elf.h"
++	tm_file="${tm_file} elfos.h newlib-stdint.h bfin/elf.h"
+ 	tmake_file=bfin/t-bfin-elf
+ 	use_collect2=no
+ 	;;
+ bfin*-uclinux*)
+-	tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h gnu-user.h linux.h glibc-stdint.h bfin/uclinux.h"
++	tm_file="${tm_file} elfos.h bfin/elf.h gnu-user.h linux.h glibc-stdint.h bfin/uclinux.h"
+ 	tmake_file=bfin/t-bfin-uclinux
+ 	use_collect2=no
+ 	;;
+ bfin*-linux-uclibc*)
+-	tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h gnu-user.h linux.h glibc-stdint.h bfin/linux.h ./linux-sysroot-suffix.h"
++	tm_file="${tm_file} elfos.h bfin/elf.h gnu-user.h linux.h glibc-stdint.h bfin/linux.h ./linux-sysroot-suffix.h"
+ 	tmake_file="${tmake_file} bfin/t-bfin-linux"
+ 	use_collect2=no
+ 	;;
+ bfin*-rtems*)
+-	tm_file="${tm_file} dbxelf.h elfos.h bfin/elf.h bfin/rtems.h rtems.h newlib-stdint.h"
++	tm_file="${tm_file} elfos.h bfin/elf.h bfin/rtems.h rtems.h newlib-stdint.h"
+ 	tmake_file="${tmake_file} bfin/t-rtems"
+ 	;;
+ bfin*-*)
+-	tm_file="${tm_file} dbxelf.h elfos.h newlib-stdint.h bfin/elf.h"
++	tm_file="${tm_file} elfos.h newlib-stdint.h bfin/elf.h"
+ 	use_collect2=no
+ 	use_gcc_stdint=wrap
+ 	;;
+@@ -1600,13 +1600,13 @@ csky-*-*)
+ 
+ 	case ${target} in
+ 	    csky-*-elf*)
+-		tm_file="dbxelf.h elfos.h newlib-stdint.h ${tm_file} csky/csky-elf.h"
++		tm_file="elfos.h newlib-stdint.h ${tm_file} csky/csky-elf.h"
+ 		tmake_file="csky/t-csky csky/t-csky-elf"
+ 		default_use_cxa_atexit=no
+ 		use_gcc_stdint=wrap
+ 		;;
+ 	    csky-*-linux*)
+-		tm_file="dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} csky/csky-linux-elf.h"
++		tm_file="elfos.h gnu-user.h linux.h glibc-stdint.h ${tm_file} csky/csky-linux-elf.h"
+ 		tmake_file="${tmake_file} csky/t-csky csky/t-csky-linux"
+ 
+ 		case ${target} in
+@@ -1633,7 +1633,7 @@ csky-*-*)
+ 	esac
+ 	;;
+ epiphany-*-elf | epiphany-*-rtems*)
+-	tm_file="dbxelf.h elfos.h ${tm_file}"
++	tm_file="elfos.h ${tm_file}"
+ 	tmake_file="${tmake_file} epiphany/t-epiphany"
+ 	case ${target} in
+ 	epiphany-*-rtems*)
+@@ -1649,21 +1649,21 @@ epiphany-*-elf | epiphany-*-rtems*)
+ 	extra_headers="epiphany_intrinsics.h"
+ 	;;
+ fr30-*-elf)
+-	tm_file="dbxelf.h elfos.h newlib-stdint.h ${tm_file}"
++	tm_file="elfos.h newlib-stdint.h ${tm_file}"
+ 	;;
+ frv-*-elf)
+-	tm_file="dbxelf.h elfos.h newlib-stdint.h ${tm_file}"
++	tm_file="elfos.h newlib-stdint.h ${tm_file}"
+ 	tmake_file=frv/t-frv
+ 	;;
+ frv-*-*linux*)
+-	tm_file="dbxelf.h elfos.h ${tm_file} \
++	tm_file="elfos.h ${tm_file} \
+ 	         gnu-user.h linux.h glibc-stdint.h frv/linux.h"
+ 	tmake_file="${tmake_file} frv/t-frv frv/t-linux"
+ 	;;
+ ft32-*-elf)
+ 	gas=yes
+ 	gnu_ld=yes
+-	tm_file="dbxelf.h elfos.h newlib-stdint.h ${tm_file}"
++	tm_file="elfos.h newlib-stdint.h ${tm_file}"
+ 	tmake_file="${tmake_file} ft32/t-ft32"
+ 	;;
+ amdgcn-*-amdhsa)
+@@ -1691,23 +1691,23 @@ amdgcn-*-amdhsa)
+ moxie-*-elf)
+ 	gas=yes
+ 	gnu_ld=yes
+-	tm_file="dbxelf.h elfos.h newlib-stdint.h ${tm_file}"
++	tm_file="elfos.h newlib-stdint.h ${tm_file}"
+ 	tmake_file="${tmake_file} moxie/t-moxie"
+ 	;;
+ moxie-*-uclinux*)
+ 	gas=yes
+ 	gnu_ld=yes
+-	tm_file="dbxelf.h elfos.h ${tm_file} gnu-user.h linux.h glibc-stdint.h moxie/uclinux.h"
++	tm_file="elfos.h ${tm_file} gnu-user.h linux.h glibc-stdint.h moxie/uclinux.h"
+ 	tmake_file="${tmake_file} moxie/t-moxie"
+ 	;;
+ moxie-*-rtems*)
+ 	tmake_file="${tmake_file} moxie/t-moxie"
+-	tm_file="moxie/moxie.h dbxelf.h elfos.h moxie/rtems.h rtems.h newlib-stdint.h"
++	tm_file="moxie/moxie.h elfos.h moxie/rtems.h rtems.h newlib-stdint.h"
+ 	;;
+ moxie-*-moxiebox*)
+ 	gas=yes
+ 	gnu_ld=yes
+-	tm_file="${tm_file} dbxelf.h elfos.h moxie/moxiebox.h newlib-stdint.h"
++	tm_file="${tm_file} elfos.h moxie/moxiebox.h newlib-stdint.h"
+ 	tmake_file="${tmake_file} moxie/t-moxiebox"
+ 	;;
+ h8300-*-elf*)
+@@ -1720,7 +1720,7 @@ h8300-*-linux*)
+ 	;;
+ hppa*64*-*-linux*)
+ 	target_cpu_default="MASK_PA_11|MASK_PA_20"
+-	tm_file="pa/pa64-start.h ${tm_file} dbxelf.h elfos.h gnu-user.h linux.h \
++	tm_file="pa/pa64-start.h ${tm_file} elfos.h gnu-user.h linux.h \
+ 		 glibc-stdint.h pa/pa-linux.h pa/pa64-regs.h pa/pa-64.h \
+ 		 pa/pa64-linux.h"
+ 	tmake_file="${tmake_file} pa/t-pa pa/t-linux"
+@@ -1729,14 +1729,14 @@ hppa*64*-*-linux*)
+ 	;;
+ hppa*-*-linux*)
+ 	target_cpu_default="MASK_PA_11|MASK_NO_SPACE_REGS|MASK_CALLER_COPIES"
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h pa/pa-linux.h \
++	tm_file="${tm_file} elfos.h gnu-user.h linux.h glibc-stdint.h pa/pa-linux.h \
+ 		 pa/pa32-regs.h pa/pa32-linux.h"
+ 	tmake_file="${tmake_file} pa/t-pa pa/t-linux"
+ 	d_target_objs="${d_target_objs} pa-d.o"
+ 	;;
+ hppa*-*-openbsd*)
+ 	target_cpu_default="MASK_PA_11"
+-	tm_file="${tm_file} dbxelf.h elfos.h openbsd.h openbsd-stdint.h openbsd-libpthread.h \
++	tm_file="${tm_file} elfos.h openbsd.h openbsd-stdint.h openbsd-libpthread.h \
+ 		 pa/pa-openbsd.h pa/pa32-regs.h pa/pa32-openbsd.h"
+ 	extra_options="${extra_options} openbsd.opt"
+ 	tmake_file="pa/t-pa"
+@@ -1746,7 +1746,7 @@ hppa*-*-openbsd*)
+ 	;;
+ hppa*-*-netbsd*)
+ 	target_cpu_default="MASK_PA_11|MASK_NO_SPACE_REGS"
+-	tm_file="${tm_file} dbxelf.h elfos.h ${nbsd_tm_file} \
++	tm_file="${tm_file} elfos.h ${nbsd_tm_file} \
+ 		 pa/pa-netbsd.h pa/pa32-regs.h pa/pa32-netbsd.h"
+ 	tmake_file="${tmake_file}"
+ 	tm_defines="${tm_defines} CHAR_FAST8=1 SHORT_FAST16=1"
+@@ -1794,7 +1794,7 @@ hppa*64*-*-hpux11*)
+ 	then
+ 		target_cpu_default="${target_cpu_default}|MASK_GNU_LD"
+ 	fi
+-	tm_file="pa/pa64-start.h ${tm_file} dbxelf.h elfos.h \
++	tm_file="pa/pa64-start.h ${tm_file} elfos.h \
+ 		 pa/pa64-regs.h pa/pa-hpux.h pa/pa-hpux1010.h \
+ 		 pa/pa-hpux11.h"
+ 	case ${target} in
+@@ -1898,55 +1898,55 @@ x86_64-*-darwin*)
+ 	tm_file="${cpu_type}/darwin64-biarch.h ${tm_file} "
+ 	;;
+ i[34567]86-*-elfiamcu)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/iamcu.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h newlib-stdint.h i386/iamcu.h"
+ 	;;
+ i[34567]86-*-elf*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h newlib-stdint.h i386/i386elf.h"
+ 	;;
+ x86_64-*-elf*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/x86-64.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h newlib-stdint.h i386/i386elf.h i386/x86-64.h"
+ 	;;
+ x86_64-*-rtems*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/x86-64.h i386/rtemself.h rtems.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h newlib-stdint.h i386/i386elf.h i386/x86-64.h i386/rtemself.h rtems.h"
+ 	;;
+ i[34567]86-*-rdos*)
+-    tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/rdos.h"
++    tm_file="${tm_file} i386/unix.h i386/att.h elfos.h newlib-stdint.h i386/i386elf.h i386/rdos.h"
+     ;;
+ x86_64-*-rdos*)
+-    tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h newlib-stdint.h i386/i386elf.h i386/x86-64.h i386/rdos.h i386/rdos64.h"
++    tm_file="${tm_file} i386/unix.h i386/att.h elfos.h newlib-stdint.h i386/i386elf.h i386/x86-64.h i386/rdos.h i386/rdos64.h"
+     tmake_file="i386/t-i386elf t-svr4"
+     ;;
+ i[34567]86-*-dragonfly*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h dragonfly.h dragonfly-stdint.h i386/dragonfly.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h dragonfly.h dragonfly-stdint.h i386/dragonfly.h"
+ 	tmake_file="${tmake_file} i386/t-crtstuff"
+ 	;;
+ x86_64-*-dragonfly*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h dragonfly.h dragonfly-stdint.h i386/x86-64.h i386/dragonfly.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h dragonfly.h dragonfly-stdint.h i386/x86-64.h i386/dragonfly.h"
+ 	tmake_file="${tmake_file} i386/t-crtstuff"
+ 	;;
+ i[34567]86-*-freebsd*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h ${fbsd_tm_file} i386/freebsd.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h ${fbsd_tm_file} i386/freebsd.h"
+ 	;;
+ x86_64-*-freebsd*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h ${fbsd_tm_file} i386/x86-64.h i386/freebsd.h i386/freebsd64.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h ${fbsd_tm_file} i386/x86-64.h i386/freebsd.h i386/freebsd64.h"
+ 	;;
+ i[34567]86-*-netbsdelf*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h ${nbsd_tm_file} i386/netbsd-elf.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h ${nbsd_tm_file} i386/netbsd-elf.h"
+ 	extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
+ 	;;
+ x86_64-*-netbsd*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h ${nbsd_tm_file} i386/x86-64.h i386/netbsd64.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h ${nbsd_tm_file} i386/x86-64.h i386/netbsd64.h"
+ 	extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
+ 	;;
+ i[34567]86-*-openbsd*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h"
+ 	tm_file="${tm_file} openbsd.h openbsd-stdint.h openbsd-libpthread.h i386/openbsdelf.h"
+ 	extra_options="${extra_options} openbsd.opt"
+ 	gas=yes
+ 	gnu_ld=yes
+ 	;;
+ x86_64-*-openbsd*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h"
+ 	tm_file="${tm_file} openbsd.h openbsd-stdint.h openbsd-libpthread.h i386/x86-64.h i386/openbsdelf.h"
+ 	extra_options="${extra_options} openbsd.opt"
+ 	gas=yes
+@@ -1955,7 +1955,7 @@ x86_64-*-openbsd*)
+ i[34567]86-*-linux* | i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-gnu* | i[34567]86-*-kopensolaris*-gnu)
+ 			# Intel 80386's running GNU/*
+ 			# with ELF format using glibc 2
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h gnu-user.h glibc-stdint.h"
+ 	case ${target} in
+ 	i[34567]86-*-linux*)
+ 		tm_file="${tm_file} linux.h linux-android.h"
+@@ -2012,7 +2012,7 @@ i[34567]86-*-linux* | i[34567]86-*-kfreebsd*-gnu | i[34567]86-*-gnu* | i[34567]8
+ 	esac
+ 	;;
+ x86_64-*-linux* | x86_64-*-kfreebsd*-gnu)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h gnu-user.h glibc-stdint.h \
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h gnu-user.h glibc-stdint.h \
+ 		 i386/x86-64.h i386/gnu-user-common.h i386/gnu-user64.h"
+ 	case ${target} in
+ 	x86_64-*-linux*)
+@@ -2050,7 +2050,7 @@ x86_64-*-linux* | x86_64-*-kfreebsd*-gnu)
+ 	;;
+ i[34567]86-pc-msdosdjgpp*)
+ 	xm_file=i386/xm-djgpp.h
+-	tm_file="dbxcoff.h ${tm_file} i386/unix.h i386/bsd.h i386/gas.h i386/djgpp.h i386/djgpp-stdint.h"
++	tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h i386/djgpp.h i386/djgpp-stdint.h"
+ 	native_system_header_dir=/dev/env/DJDIR/include
+ 	extra_options="${extra_options} i386/djgpp.opt"
+ 	gnu_ld=yes
+@@ -2059,7 +2059,7 @@ i[34567]86-pc-msdosdjgpp*)
+ 	;;
+ i[34567]86-*-lynxos*)
+ 	xm_defines=POSIX
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h i386/lynx.h lynx.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h i386/lynx.h lynx.h"
+ 	tmake_file="${tmake_file} t-lynx"
+ 	extra_options="${extra_options} lynx.opt"
+ 	thread_file=lynx
+@@ -2067,13 +2067,13 @@ i[34567]86-*-lynxos*)
+ 	gas=yes
+ 	;;
+ i[34567]86-*-nto-qnx*)
+-	tm_file="${tm_file} i386/att.h dbxelf.h tm-dwarf2.h elfos.h i386/unix.h i386/nto.h"
++	tm_file="${tm_file} i386/att.h tm-dwarf2.h elfos.h i386/unix.h i386/nto.h"
+ 	extra_options="${extra_options} i386/nto.opt"
+ 	gnu_ld=yes
+ 	gas=yes
+ 	;;
+ i[34567]86-*-rtems*)
+-	tm_file="${tm_file} i386/unix.h i386/att.h dbxelf.h elfos.h i386/i386elf.h i386/rtemself.h rtems.h newlib-stdint.h"
++	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h i386/i386elf.h i386/rtemself.h rtems.h newlib-stdint.h"
+ 	tmake_file="${tmake_file} i386/t-rtems"
+ 	;;
+ i[34567]86-*-solaris2* | x86_64-*-solaris2*)
+@@ -2122,7 +2122,7 @@ i[4567]86-wrs-vxworks*|x86_64-wrs-vxworks7*)
+ 	esac
+ 	;;
+ i[34567]86-*-cygwin*)
+-	tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h dbxcoff.h i386/cygming.h i386/cygwin.h i386/cygwin-stdint.h"
++	tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h i386/cygming.h i386/cygwin.h i386/cygwin-stdint.h"
+ 	xm_file=i386/xm-cygwin.h
+ 	tmake_file="${tmake_file} i386/t-cygming t-slibgcc"
+ 	target_gtfiles="$target_gtfiles \$(srcdir)/config/i386/winnt.cc"
+@@ -2140,7 +2140,7 @@ i[34567]86-*-cygwin*)
+ 	;;
+ x86_64-*-cygwin*)
+ 	need_64bit_isa=yes
+-	tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h dbxcoff.h i386/cygming.h i386/cygwin.h i386/cygwin-w64.h i386/cygwin-stdint.h"
++	tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h i386/cygming.h i386/cygwin.h i386/cygwin-w64.h i386/cygwin-stdint.h"
+ 	xm_file=i386/xm-cygwin.h
+ 	tmake_file="${tmake_file} i386/t-cygming t-slibgcc i386/t-cygwin-w64"
+ 	target_gtfiles="$target_gtfiles \$(srcdir)/config/i386/winnt.cc"
+@@ -2158,7 +2158,7 @@ x86_64-*-cygwin*)
+ 	tm_defines="${tm_defines} TARGET_CYGWIN64=1"
+ 	;;
+ i[34567]86-*-mingw* | x86_64-*-mingw*)
+-	tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h dbxcoff.h i386/cygming.h"
++	tm_file="${tm_file} i386/unix.h i386/bsd.h i386/gas.h i386/cygming.h"
+ 	xm_file=i386/xm-mingw32.h
+ 	c_target_objs="${c_target_objs} winnt-c.o"
+ 	cxx_target_objs="${cxx_target_objs} winnt-c.o"
+@@ -2252,7 +2252,7 @@ x86_64-*-fuchsia*)
+ 	tm_file="${tm_file} i386/unix.h i386/att.h elfos.h newlib-stdint.h i386/i386elf.h i386/x86-64.h fuchsia.h"
+ 	;;
+ ia64*-*-elf*)
+-	tm_file="${tm_file} dbxelf.h elfos.h newlib-stdint.h ia64/sysv4.h ia64/elf.h"
++	tm_file="${tm_file} elfos.h newlib-stdint.h ia64/sysv4.h ia64/elf.h"
+ 	tmake_file="ia64/t-ia64"
+ 	target_cpu_default="0"
+ 	if test x$gas = xyes
+@@ -2265,17 +2265,17 @@ ia64*-*-elf*)
+ 	fi
+ 	;;
+ ia64*-*-freebsd*)
+-	tm_file="${tm_file} dbxelf.h elfos.h ${fbsd_tm_file} ia64/sysv4.h ia64/freebsd.h"
++	tm_file="${tm_file} elfos.h ${fbsd_tm_file} ia64/sysv4.h ia64/freebsd.h"
+ 	target_cpu_default="MASK_GNU_AS|MASK_GNU_LD"
+ 	tmake_file="${tmake_file} ia64/t-ia64"
+ 	;;
+ ia64*-*-linux*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h ia64/sysv4.h ia64/linux.h"
++	tm_file="${tm_file} elfos.h gnu-user.h linux.h glibc-stdint.h ia64/sysv4.h ia64/linux.h"
+ 	tmake_file="${tmake_file} ia64/t-ia64 ia64/t-linux t-libunwind"
+ 	target_cpu_default="MASK_GNU_AS|MASK_GNU_LD"
+ 	;;
+ ia64*-*-hpux*)
+-	tm_file="${tm_file} dbxelf.h elfos.h ia64/sysv4.h ia64/hpux.h"
++	tm_file="${tm_file} elfos.h ia64/sysv4.h ia64/hpux.h"
+ 	tmake_file="ia64/t-ia64 ia64/t-hpux t-slibgcc"
+ 	target_cpu_default="MASK_GNU_AS"
+ 	case x$enable_threads in
+@@ -2311,16 +2311,16 @@ iq2000*-*-elf*)
+         md_file=iq2000/iq2000.md
+         ;;
+ lm32-*-elf*)
+-        tm_file="dbxelf.h elfos.h ${tm_file} newlib-stdint.h"
++        tm_file="elfos.h ${tm_file} newlib-stdint.h"
+ 	tmake_file="${tmake_file} lm32/t-lm32"
+         ;;
+ lm32-*-rtems*)
+-	tm_file="dbxelf.h elfos.h ${tm_file} lm32/rtems.h rtems.h newlib-stdint.h"
++	tm_file="elfos.h ${tm_file} lm32/rtems.h rtems.h newlib-stdint.h"
+ 	tmake_file="${tmake_file} lm32/t-lm32"
+ 	tmake_file="${tmake_file} lm32/t-rtems"
+          ;;
+ lm32-*-uclinux*)
+-        tm_file="dbxelf.h elfos.h ${tm_file} gnu-user.h linux.h lm32/uclinux-elf.h"
++        tm_file="elfos.h ${tm_file} gnu-user.h linux.h lm32/uclinux-elf.h"
+ 	tmake_file="${tmake_file} lm32/t-lm32"
+         ;;
+ m32r-*-elf*)
+@@ -2348,7 +2348,7 @@ m68k-*-elf* | fido-*-elf*)
+ 		default_cf_cpu=5206
+ 		;;
+ 	esac
+-	tm_file="${tm_file} m68k/m68k-none.h m68k/m68kelf.h dbxelf.h elfos.h newlib-stdint.h m68k/m68kemb.h m68k/m68020-elf.h"
++	tm_file="${tm_file} m68k/m68k-none.h m68k/m68kelf.h elfos.h newlib-stdint.h m68k/m68kemb.h m68k/m68020-elf.h"
+ 	tm_defines="${tm_defines} MOTOROLA=1"
+ 	tmake_file="m68k/t-floatlib m68k/t-m68kbare m68k/t-m68kelf"
+ 	# Add multilibs for targets other than fido.
+@@ -2363,7 +2363,7 @@ m68k-*-elf* | fido-*-elf*)
+ m68k*-*-netbsdelf*)
+ 	default_m68k_cpu=68020
+ 	default_cf_cpu=5475
+-	tm_file="${tm_file} dbxelf.h elfos.h ${nbsd_tm_file} m68k/netbsd-elf.h"
++	tm_file="${tm_file} elfos.h ${nbsd_tm_file} m68k/netbsd-elf.h"
+ 	extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
+ 	tm_defines="${tm_defines} MOTOROLA=1 CHAR_FAST8=1 SHORT_FAST16=1"
+ 	;;
+@@ -2372,7 +2372,7 @@ m68k-*-uclinux*)		# Motorola m68k/ColdFire running uClinux
+ 				# ABI.
+ 	default_m68k_cpu=68020
+ 	default_cf_cpu=5206
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h flat.h m68k/linux.h m68k/uclinux.h ./sysroot-suffix.h"
++	tm_file="${tm_file} elfos.h gnu-user.h linux.h glibc-stdint.h flat.h m68k/linux.h m68k/uclinux.h ./sysroot-suffix.h"
+ 	extra_options="${extra_options} m68k/uclinux.opt"
+  	tm_defines="${tm_defines} MOTOROLA=1"
+ 	tmake_file="m68k/t-floatlib m68k/t-uclinux m68k/t-mlibs"
+@@ -2383,7 +2383,7 @@ m68k-*-linux*)			# Motorola m68k's running GNU/Linux
+ 	default_m68k_cpu=68020
+ 	default_cf_cpu=5475
+ 	with_arch=${with_arch:-m68k}
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h m68k/linux.h ./sysroot-suffix.h"
++	tm_file="${tm_file} elfos.h gnu-user.h linux.h glibc-stdint.h m68k/linux.h ./sysroot-suffix.h"
+ 	extra_options="${extra_options} m68k/ieee.opt"
+ 	tm_defines="${tm_defines} MOTOROLA=1"
+ 	tmake_file="${tmake_file} m68k/t-floatlib m68k/t-linux m68k/t-mlibs"
+@@ -2392,11 +2392,11 @@ m68k-*-rtems*)
+ 	default_m68k_cpu=68020
+ 	default_cf_cpu=5206
+ 	tmake_file="${tmake_file} m68k/t-floatlib m68k/t-m68kbare m68k/t-crtstuff m68k/t-rtems m68k/t-mlibs"
+-	tm_file="${tm_file} m68k/m68k-none.h m68k/m68kelf.h dbxelf.h elfos.h m68k/m68kemb.h m68k/m68020-elf.h m68k/rtemself.h rtems.h newlib-stdint.h"
++	tm_file="${tm_file} m68k/m68k-none.h m68k/m68kelf.h elfos.h m68k/m68kemb.h m68k/m68020-elf.h m68k/rtemself.h rtems.h newlib-stdint.h"
+ 	tm_defines="${tm_defines} MOTOROLA=1"
+ 	;;
+ mcore-*-elf)
+-	tm_file="dbxelf.h elfos.h newlib-stdint.h ${tm_file} mcore/mcore-elf.h"
++	tm_file="elfos.h newlib-stdint.h ${tm_file} mcore/mcore-elf.h"
+ 	tmake_file=mcore/t-mcore
+ 	inhibit_libc=true
+ 	;;
+@@ -2409,7 +2409,7 @@ microblaze*-linux*)
+ 			tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=4321"
+ 			;;
+ 	esac
+-	tm_file="${tm_file} dbxelf.h gnu-user.h linux.h microblaze/linux.h"
++	tm_file="${tm_file} gnu-user.h linux.h microblaze/linux.h"
+ 	tm_file="${tm_file} glibc-stdint.h"
+ 	c_target_objs="${c_target_objs} microblaze-c.o"
+ 	cxx_target_objs="${cxx_target_objs} microblaze-c.o"
+@@ -2425,7 +2425,7 @@ microblaze*-*-rtems*)
+ 			tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=4321"
+ 			;;
+ 	esac
+-	tm_file="${tm_file} dbxelf.h"
++	tm_file="${tm_file}"
+ 	tm_file="${tm_file} microblaze/rtems.h rtems.h newlib-stdint.h"
+ 	c_target_objs="${c_target_objs} microblaze-c.o"
+ 	cxx_target_objs="${cxx_target_objs} microblaze-c.o"
+@@ -2441,7 +2441,7 @@ microblaze*-*-elf)
+ 			tm_defines="${tm_defines} TARGET_BIG_ENDIAN_DEFAULT=4321"
+ 			;;
+ 	esac
+-	tm_file="${tm_file} dbxelf.h newlib-stdint.h"
++	tm_file="${tm_file} newlib-stdint.h"
+ 	c_target_objs="${c_target_objs} microblaze-c.o"
+ 	cxx_target_objs="${cxx_target_objs} microblaze-c.o"
+ 	tmake_file="${tmake_file} microblaze/t-microblaze"
+@@ -2509,7 +2509,7 @@ riscv*-*-freebsd*)
+ 	;;
+ 
+ loongarch*-*-linux*)
+-	tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file}"
++	tm_file="elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file}"
+ 	tm_file="${tm_file} loongarch/gnu-user.h loongarch/linux.h"
+ 	extra_options="${extra_options} linux-android.opt"
+ 	tmake_file="${tmake_file} loongarch/t-linux"
+@@ -2527,7 +2527,7 @@ mips*-*-netbsd*)			# NetBSD/mips, either endian.
+ 	extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
+ 	;;
+ mips*-img-linux*)
+-	tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/linux.h mips/linux-common.h mips/mti-linux.h"
++	tm_file="elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/linux.h mips/linux-common.h mips/mti-linux.h"
+ 	extra_options="${extra_options} linux-android.opt"
+ 	tmake_file="${tmake_file} mips/t-img-linux"
+ 	tm_defines="${tm_defines} MIPS_ISA_DEFAULT=MIPS_ISA_MIPS32R6 MIPS_ABI_DEFAULT=ABI_32"
+@@ -2537,7 +2537,7 @@ mips*-img-linux*)
+ 	gas=yes
+ 	;;
+ mips*-mti-linux*)
+-	tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/linux.h mips/linux-common.h mips/mti-linux.h"
++	tm_file="elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/linux.h mips/linux-common.h mips/mti-linux.h"
+ 	extra_options="${extra_options} linux-android.opt"
+ 	tmake_file="${tmake_file} mips/t-mti-linux"
+ 	tm_defines="${tm_defines} MIPS_ISA_DEFAULT=MIPS_ISA_MIPS32R2 MIPS_ABI_DEFAULT=ABI_32"
+@@ -2547,7 +2547,7 @@ mips*-mti-linux*)
+ 	gas=yes
+ 	;;
+ mips*-*-linux*)				# Linux MIPS, either endian.
+-	tm_file="dbxelf.h elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/linux.h mips/linux-common.h"
++	tm_file="elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file} mips/gnu-user.h mips/linux.h mips/linux-common.h"
+ 	extra_options="${extra_options} linux-android.opt"
+ 	case ${target} in
+ 		mipsisa32r6*)
+@@ -2759,7 +2759,7 @@ mn10300-*-*)
+ 	use_gcc_stdint=wrap
+ 	;;
+ msp430-*-*)
+-	tm_file="dbxelf.h elfos.h newlib-stdint.h ${tm_file}"
++	tm_file="elfos.h newlib-stdint.h ${tm_file}"
+ 	c_target_objs="msp430-c.o"
+ 	cxx_target_objs="msp430-c.o"
+ 	tmake_file="${tmake_file} msp430/t-msp430"
+@@ -2797,11 +2797,11 @@ nds32*-*-*)
+ 	esac
+ 	case ${target} in
+ 	  nds32*-*-elf*)
+-	    tm_file="dbxelf.h elfos.h newlib-stdint.h ${tm_file} nds32/elf.h nds32/nds32_intrinsic.h"
++	    tm_file="elfos.h newlib-stdint.h ${tm_file} nds32/elf.h nds32/nds32_intrinsic.h"
+ 	    tmake_file="nds32/t-nds32 nds32/t-elf"
+ 	    ;;
+ 	  nds32*-*-linux*)
+-	    tm_file="dbxelf.h elfos.h ${tm_file} gnu-user.h linux.h glibc-stdint.h nds32/linux.h nds32/nds32_intrinsic.h"
++	    tm_file="elfos.h ${tm_file} gnu-user.h linux.h glibc-stdint.h nds32/linux.h nds32/nds32_intrinsic.h"
+ 	    tmake_file="${tmake_file} nds32/t-nds32 nds32/t-linux"
+ 	    gcc_cv_initfini_array=yes
+ 	    ;;
+@@ -2922,7 +2922,7 @@ powerpc64-*-darwin*)
+ 	tm_file="${tm_file} ${cpu_type}/darwin64-biarch.h"
+ 	;;
+ powerpc*-*-freebsd*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h ${fbsd_tm_file} rs6000/sysv4.h"
++	tm_file="${tm_file} elfos.h gnu-user.h ${fbsd_tm_file} rs6000/sysv4.h"
+ 	extra_options="${extra_options} rs6000/sysv4.opt"
+ 	tmake_file="rs6000/t-fprules rs6000/t-ppcos ${tmake_file} rs6000/t-ppccomm"
+ 	case ${target} in
+@@ -2947,47 +2947,47 @@ powerpc*-*-freebsd*)
+ 	esac
+ 	;;
+ powerpc-*-netbsd*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h ${nbsd_tm_file} freebsd-spec.h rs6000/sysv4.h rs6000/netbsd.h"
++	tm_file="${tm_file} elfos.h gnu-user.h ${nbsd_tm_file} freebsd-spec.h rs6000/sysv4.h rs6000/netbsd.h"
+ 	extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
+ 	tmake_file="${tmake_file} rs6000/t-netbsd"
+ 	extra_options="${extra_options} rs6000/sysv4.opt"
+ 	;;
+ powerpc-*-eabisimaltivec*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/eabi.h rs6000/eabisim.h rs6000/eabialtivec.h"
++	tm_file="${tm_file} elfos.h gnu-user.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/eabi.h rs6000/eabisim.h rs6000/eabialtivec.h"
+ 	extra_options="${extra_options} rs6000/sysv4.opt"
+ 	tmake_file="rs6000/t-fprules rs6000/t-ppcendian rs6000/t-ppccomm"
+ 	use_gcc_stdint=wrap
+ 	;;
+ powerpc-*-eabisim*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/eabi.h rs6000/eabisim.h"
++	tm_file="${tm_file} elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/eabi.h rs6000/eabisim.h"
+ 	extra_options="${extra_options} rs6000/sysv4.opt"
+ 	tmake_file="rs6000/t-fprules rs6000/t-ppcgas rs6000/t-ppccomm"
+ 	use_gcc_stdint=wrap
+ 	;;
+ powerpc-*-elf*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h"
++	tm_file="${tm_file} elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h"
+ 	extra_options="${extra_options} rs6000/sysv4.opt"
+ 	tmake_file="rs6000/t-fprules rs6000/t-ppcgas rs6000/t-ppccomm"
+ 	;;
+ powerpc-*-eabialtivec*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/eabi.h rs6000/eabialtivec.h"
++	tm_file="${tm_file} elfos.h gnu-user.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/eabi.h rs6000/eabialtivec.h"
+ 	extra_options="${extra_options} rs6000/sysv4.opt"
+ 	tmake_file="rs6000/t-fprules rs6000/t-ppcendian rs6000/t-ppccomm"
+ 	use_gcc_stdint=wrap
+ 	;;
+ powerpc-*-eabi*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/eabi.h"
++	tm_file="${tm_file} elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/eabi.h"
+ 	extra_options="${extra_options} rs6000/sysv4.opt"
+ 	tmake_file="rs6000/t-fprules rs6000/t-ppcgas rs6000/t-ppccomm"
+ 	use_gcc_stdint=wrap
+ 	;;
+ powerpc-*-rtems*)
+-	tm_file="rs6000/biarch64.h ${tm_file} dbxelf.h elfos.h gnu-user.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/rtems.h rtems.h"
++	tm_file="rs6000/biarch64.h ${tm_file} elfos.h gnu-user.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/rtems.h rtems.h"
+ 	extra_options="${extra_options} rs6000/sysv4.opt rs6000/linux64.opt"
+ 	tmake_file="${tmake_file} rs6000/t-fprules rs6000/t-rtems rs6000/t-ppccomm"
+ 	;;
+ powerpc*-*-linux*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h linux.h freebsd-spec.h rs6000/sysv4.h"
++	tm_file="${tm_file} elfos.h gnu-user.h linux.h freebsd-spec.h rs6000/sysv4.h"
+ 	extra_options="${extra_options} rs6000/sysv4.opt"
+ 	tmake_file="${tmake_file} rs6000/t-fprules rs6000/t-ppccomm"
+ 	extra_objs="$extra_objs rs6000-linux.o"
+@@ -3094,7 +3094,7 @@ powerpc-wrs-vxworks*)
+ 	;;
+ powerpc-*-lynxos*)
+ 	xm_defines=POSIX
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h rs6000/sysv4.h rs6000/lynx.h lynx.h"
++	tm_file="${tm_file} elfos.h gnu-user.h rs6000/sysv4.h rs6000/lynx.h lynx.h"
+ 	tmake_file="t-lynx rs6000/t-lynx"
+ 	extra_options="${extra_options} rs6000/sysv4.opt lynx.opt"
+ 	thread_file=lynx
+@@ -3102,18 +3102,18 @@ powerpc-*-lynxos*)
+ 	gas=yes
+ 	;;
+ powerpcle-*-elf*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/sysv4le.h"
++	tm_file="${tm_file} elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/sysv4le.h"
+ 	tmake_file="rs6000/t-fprules rs6000/t-ppcgas rs6000/t-ppccomm"
+ 	extra_options="${extra_options} rs6000/sysv4.opt"
+ 	;;
+ powerpcle-*-eabisim*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/sysv4le.h rs6000/eabi.h rs6000/eabisim.h"
++	tm_file="${tm_file} elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/sysv4le.h rs6000/eabi.h rs6000/eabisim.h"
+ 	tmake_file="rs6000/t-fprules rs6000/t-ppcgas rs6000/t-ppccomm"
+ 	extra_options="${extra_options} rs6000/sysv4.opt"
+ 	use_gcc_stdint=wrap
+ 	;;
+ powerpcle-*-eabi*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/sysv4le.h rs6000/eabi.h"
++	tm_file="${tm_file} elfos.h gnu-user.h usegas.h freebsd-spec.h newlib-stdint.h rs6000/sysv4.h rs6000/sysv4le.h rs6000/eabi.h"
+ 	tmake_file="rs6000/t-fprules rs6000/t-ppcgas rs6000/t-ppccomm"
+ 	extra_options="${extra_options} rs6000/sysv4.opt"
+ 	use_gcc_stdint=wrap
+@@ -3171,7 +3171,7 @@ rl78-*-elf*)
+ 	tmake_file="${tmake_file} rl78/t-rl78"
+ 	;;
+ rx-*-elf*)
+-	tm_file="dbxelf.h elfos.h newlib-stdint.h ${tm_file}"
++	tm_file="elfos.h newlib-stdint.h ${tm_file}"
+ 	tmake_file="${tmake_file} rx/t-rx"
+ 	extra_options="${extra_options} rx/elf.opt"
+ 	;;
+@@ -3180,7 +3180,7 @@ rx-*-linux*)
+ 	tmake_file="${tmake_file} rx/t-linux"
+ 	;;
+ s390-*-linux*)
+-	tm_file="s390/s390.h dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h s390/linux.h"
++	tm_file="s390/s390.h elfos.h gnu-user.h linux.h glibc-stdint.h s390/linux.h"
+ 	c_target_objs="${c_target_objs} s390-c.o"
+ 	cxx_target_objs="${cxx_target_objs} s390-c.o"
+ 	if test x$enable_targets = xall; then
+@@ -3189,7 +3189,7 @@ s390-*-linux*)
+ 	tmake_file="${tmake_file} s390/t-s390"
+ 	;;
+ s390x-*-linux*)
+-	tm_file="s390/s390x.h s390/s390.h dbxelf.h elfos.h gnu-user.h linux.h glibc-stdint.h s390/linux.h"
++	tm_file="s390/s390x.h s390/s390.h elfos.h gnu-user.h linux.h glibc-stdint.h s390/linux.h"
+ 	tm_p_file="linux-protos.h s390/s390-protos.h"
+ 	c_target_objs="${c_target_objs} s390-c.o"
+ 	cxx_target_objs="${cxx_target_objs} s390-c.o"
+@@ -3199,7 +3199,7 @@ s390x-*-linux*)
+ 	tmake_file="${tmake_file} s390/t-linux64 s390/t-s390"
+ 	;;
+ s390x-ibm-tpf*)
+-	tm_file="s390/s390x.h s390/s390.h dbxelf.h elfos.h glibc-stdint.h s390/tpf.h"
++	tm_file="s390/s390x.h s390/s390.h elfos.h glibc-stdint.h s390/tpf.h"
+ 	tm_p_file=s390/s390-protos.h
+ 	c_target_objs="${c_target_objs} s390-c.o"
+ 	cxx_target_objs="${cxx_target_objs} s390-c.o"
+@@ -3238,7 +3238,7 @@ sh-*-elf* | sh[12346l]*-*-elf* | \
+ 	case ${with_endian} in
+ 	little*)	tm_file="sh/little.h ${tm_file}" ;;
+ 	esac
+-	tm_file="${tm_file} dbxelf.h elfos.h sh/elf.h"
++	tm_file="${tm_file} elfos.h sh/elf.h"
+ 	case ${target} in
+ 	sh*-*-linux*)	tmake_file="${tmake_file} sh/t-linux"
+ 			if test x$enable_fdpic = xyes; then
+@@ -3359,14 +3359,14 @@ sh-*-elf* | sh[12346l]*-*-elf* | \
+ 	;;
+ sh-*-rtems*)
+ 	tmake_file="${tmake_file} sh/t-sh sh/t-rtems"
+-	tm_file="${tm_file} dbxelf.h elfos.h sh/elf.h sh/embed-elf.h sh/rtemself.h rtems.h newlib-stdint.h"
++	tm_file="${tm_file} elfos.h sh/elf.h sh/embed-elf.h sh/rtemself.h rtems.h newlib-stdint.h"
+ 	;;
+ sh-wrs-vxworks)
+ 	tmake_file="$tmake_file sh/t-sh sh/t-vxworks"
+ 	tm_file="${tm_file} elfos.h sh/elf.h sh/embed-elf.h vx-common.h vxworks.h sh/vxworks.h"
+ 	;;
+ sparc-*-elf*)
+-	tm_file="${tm_file} dbxelf.h elfos.h newlib-stdint.h sparc/sysv4.h sparc/sp-elf.h"
++	tm_file="${tm_file} elfos.h newlib-stdint.h sparc/sysv4.h sparc/sp-elf.h"
+ 	case ${target} in
+ 	    *-leon-*)
+ 		tmake_file="sparc/t-sparc sparc/t-leon"
+@@ -3380,11 +3380,11 @@ sparc-*-elf*)
+ 	esac
+ 	;;
+ sparc-*-rtems*)
+-	tm_file="${tm_file} dbxelf.h elfos.h sparc/sysv4.h sparc/sp-elf.h sparc/rtemself.h rtems.h newlib-stdint.h"
++	tm_file="${tm_file} elfos.h sparc/sysv4.h sparc/sp-elf.h sparc/rtemself.h rtems.h newlib-stdint.h"
+ 	tmake_file="${tmake_file} sparc/t-sparc sparc/t-rtems"
+ 	;;
+ sparc-*-linux*)
+-	tm_file="${tm_file} dbxelf.h elfos.h sparc/sysv4.h gnu-user.h linux.h glibc-stdint.h sparc/tso.h"
++	tm_file="${tm_file} elfos.h sparc/sysv4.h gnu-user.h linux.h glibc-stdint.h sparc/tso.h"
+ 	extra_options="${extra_options} sparc/long-double-switch.opt"
+ 	case ${target} in
+ 	    *-leon-*)
+@@ -3406,7 +3406,7 @@ sparc-*-linux*)
+ 	fi
+ 	;;
+ sparc-*-netbsdelf*)
+-	tm_file="${tm_file} dbxelf.h elfos.h sparc/sysv4.h ${nbsd_tm_file} sparc/netbsd-elf.h"
++	tm_file="${tm_file} elfos.h sparc/sysv4.h ${nbsd_tm_file} sparc/netbsd-elf.h"
+ 	extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
+ 	extra_options="${extra_options} sparc/long-double-switch.opt"
+ 	tmake_file="${tmake_file} sparc/t-sparc"
+@@ -3428,22 +3428,22 @@ sparc-wrs-vxworks)
+ 	tmake_file="${tmake_file} sparc/t-sparc sparc/t-vxworks"
+ 	;;
+ sparc64-*-elf*)
+-	tm_file="${tm_file} dbxelf.h elfos.h newlib-stdint.h sparc/sysv4.h sparc/sp64-elf.h"
++	tm_file="${tm_file} elfos.h newlib-stdint.h sparc/sysv4.h sparc/sp64-elf.h"
+ 	extra_options="${extra_options}"
+ 	tmake_file="${tmake_file} sparc/t-sparc"
+ 	;;
+ sparc64-*-rtems*)
+-	tm_file="${tm_file} dbxelf.h elfos.h newlib-stdint.h sparc/sysv4.h sparc/sp64-elf.h sparc/rtemself.h rtems.h"
++	tm_file="${tm_file} elfos.h newlib-stdint.h sparc/sysv4.h sparc/sp64-elf.h sparc/rtemself.h rtems.h"
+ 	extra_options="${extra_options}"
+ 	tmake_file="${tmake_file} sparc/t-sparc sparc/t-rtems-64"
+ 	;;
+ sparc64-*-linux*)
+-	tm_file="sparc/biarch64.h ${tm_file} dbxelf.h elfos.h sparc/sysv4.h gnu-user.h linux.h glibc-stdint.h sparc/default64.h sparc/linux64.h sparc/tso.h"
++	tm_file="sparc/biarch64.h ${tm_file} elfos.h sparc/sysv4.h gnu-user.h linux.h glibc-stdint.h sparc/default64.h sparc/linux64.h sparc/tso.h"
+ 	extra_options="${extra_options} sparc/long-double-switch.opt"
+ 	tmake_file="${tmake_file} sparc/t-sparc sparc/t-linux64"
+ 	;;
+ sparc64-*-freebsd*|ultrasparc-*-freebsd*)
+-	tm_file="${tm_file} ${fbsd_tm_file} dbxelf.h elfos.h sparc/sysv4.h sparc/freebsd.h"
++	tm_file="${tm_file} ${fbsd_tm_file} elfos.h sparc/sysv4.h sparc/freebsd.h"
+ 	extra_options="${extra_options} sparc/long-double-switch.opt"
+ 	case "x$with_cpu" in
+ 		xultrasparc) ;;
+@@ -3454,13 +3454,13 @@ sparc64-*-freebsd*|ultrasparc-*-freebsd*)
+ 	;;
+ sparc64-*-netbsd*)
+ 	tm_file="sparc/biarch64.h ${tm_file}"
+-	tm_file="${tm_file} dbxelf.h elfos.h sparc/sysv4.h ${nbsd_tm_file} sparc/netbsd-elf.h"
++	tm_file="${tm_file} elfos.h sparc/sysv4.h ${nbsd_tm_file} sparc/netbsd-elf.h"
+ 	extra_options="${extra_options} netbsd.opt netbsd-elf.opt"
+ 	extra_options="${extra_options} sparc/long-double-switch.opt"
+ 	tmake_file="${tmake_file} sparc/t-sparc sparc/t-netbsd64"
+ 	;;
+ sparc64-*-openbsd*)
+-	tm_file="sparc/openbsd1-64.h ${tm_file} dbxelf.h elfos.h sparc/sysv4.h sparc/sp64-elf.h"
++	tm_file="sparc/openbsd1-64.h ${tm_file} elfos.h sparc/sysv4.h sparc/sp64-elf.h"
+ 	tm_file="${tm_file} openbsd.h openbsd-stdint.h openbsd-libpthread.h sparc/openbsd64.h"
+ 	extra_options="${extra_options} openbsd.opt"
+ 	extra_options="${extra_options}"
+@@ -3470,13 +3470,13 @@ sparc64-*-openbsd*)
+ 	;;
+ tic6x-*-elf)
+ 	tm_file="elfos.h ${tm_file} c6x/elf-common.h c6x/elf.h"
+-	tm_file="${tm_file} dbxelf.h tm-dwarf2.h newlib-stdint.h"
++	tm_file="${tm_file} tm-dwarf2.h newlib-stdint.h"
+ 	tmake_file="c6x/t-c6x c6x/t-c6x-elf"
+ 	use_collect2=no
+ 	;;
+ tic6x-*-uclinux)
+ 	tm_file="elfos.h ${tm_file} gnu-user.h linux.h c6x/elf-common.h c6x/uclinux-elf.h"
+-	tm_file="${tm_file} dbxelf.h tm-dwarf2.h glibc-stdint.h"
++	tm_file="${tm_file} tm-dwarf2.h glibc-stdint.h"
+ 	tm_file="${tm_file} ./sysroot-suffix.h"
+ 	tmake_file="t-sysroot-suffix t-slibgcc"
+ 	tmake_file="${tmake_file} c6x/t-c6x c6x/t-c6x-elf c6x/t-c6x-uclinux"
+@@ -3541,7 +3541,7 @@ v850*-*-*)
+ 	use_gcc_stdint=wrap
+ 	;;
+ vax-*-linux*)
+-	tm_file="${tm_file} dbxelf.h elfos.h gnu-user.h linux.h vax/elf.h vax/linux.h"
++	tm_file="${tm_file} elfos.h gnu-user.h linux.h vax/elf.h vax/linux.h"
+ 	extra_options="${extra_options} vax/elf.opt"
+ 	;;
+ vax-*-netbsdelf*)
+@@ -3550,12 +3550,12 @@ vax-*-netbsdelf*)
+ 	tm_defines="${tm_defines} CHAR_FAST8=1 SHORT_FAST16=1"
+ 	;;
+ visium-*-elf*)
+-	tm_file="dbxelf.h elfos.h ${tm_file} visium/elf.h newlib-stdint.h"
++	tm_file="elfos.h ${tm_file} visium/elf.h newlib-stdint.h"
+ 	tmake_file="visium/t-visium visium/t-crtstuff"
+ 	;;
+ xstormy16-*-elf)
+ 	# For historical reasons, the target files omit the 'x'.
+-	tm_file="dbxelf.h elfos.h newlib-stdint.h stormy16/stormy16.h"
++	tm_file="elfos.h newlib-stdint.h stormy16/stormy16.h"
+ 	tm_p_file=stormy16/stormy16-protos.h
+ 	md_file=stormy16/stormy16.md
+ 	out_file=stormy16/stormy16.cc
+diff --git a/gcc/config.in b/gcc/config.in
+index 6bb25b25b..91328572b 100644
+--- a/gcc/config.in
++++ b/gcc/config.in
+@@ -453,12 +453,6 @@
+ #endif
+ 
+ 
+-/* Define if your assembler supports the --gstabs option. */
+-#ifndef USED_FOR_TARGET
+-#undef HAVE_AS_GSTABS_DEBUG_FLAG
+-#endif
+-
+-
+ /* Define if your assembler supports the Sun syntax for cmov. */
+ #ifndef USED_FOR_TARGET
+ #undef HAVE_AS_IX86_CMOV_SUN_SYNTAX
+@@ -747,12 +741,6 @@
+ #endif
+ 
+ 
+-/* Define if your assembler supports .stabs. */
+-#ifndef USED_FOR_TARGET
+-#undef HAVE_AS_STABS_DIRECTIVE
+-#endif
+-
+-
+ /* Define if your assembler and linker support thread-local storage. */
+ #ifndef USED_FOR_TARGET
+ #undef HAVE_AS_TLS
+@@ -2178,13 +2166,6 @@
+ #endif
+ 
+ 
+-/* Define if your assembler supports AIX debug frame section label reference.
+-   */
+-#ifndef USED_FOR_TARGET
+-#undef HAVE_XCOFF_DWARF_EXTRAS
+-#endif
+-
+-
+ /* Define if you have a working  header file. */
+ #ifndef USED_FOR_TARGET
+ #undef HAVE_ZSTD_H
+diff --git a/gcc/config/aarch64/aarch64-elf.h b/gcc/config/aarch64/aarch64-elf.h
+index 410a40b51..8e05b1f1c 100644
+--- a/gcc/config/aarch64/aarch64-elf.h
++++ b/gcc/config/aarch64/aarch64-elf.h
+@@ -144,7 +144,4 @@ ASM_MABI_SPEC
+ #undef TYPE_OPERAND_FMT
+ #define TYPE_OPERAND_FMT	"%%%s"
+ 
+-/* Stabs debug not required.  */
+-#undef DBX_DEBUGGING_INFO
+-
+ #endif /* GCC_AARCH64_ELF_H */
+diff --git a/gcc/config/alpha/alpha.cc b/gcc/config/alpha/alpha.cc
+index 0a85e66fa..66c17149d 100644
+--- a/gcc/config/alpha/alpha.cc
++++ b/gcc/config/alpha/alpha.cc
+@@ -8458,10 +8458,6 @@ alpha_output_mi_thunk_osf (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+ }
+ #endif /* TARGET_ABI_OSF */
+ 
+-/* Debugging support.  */
+-
+-#include "gstab.h"
+-
+ /* Name of the file containing the current function.  */
+ 
+ static const char *current_function_file = "";
+diff --git a/gcc/config/alpha/elf.h b/gcc/config/alpha/elf.h
+index 4447a7f24..c9cd42e69 100644
+--- a/gcc/config/alpha/elf.h
++++ b/gcc/config/alpha/elf.h
+@@ -22,7 +22,7 @@ along with GCC; see the file COPYING3.  If not see
+ #define CC1_SPEC  "%{G*}"
+ 
+ #undef  ASM_SPEC
+-#define ASM_SPEC  "%{G*} %{relax:-relax} %{!gstabs*:-no-mdebug}%{gstabs*:-mdebug} %{mcpu=*:-m%*}"
++#define ASM_SPEC  "%{G*} %{relax:-relax} %{mcpu=*:-m%*}"
+ 
+ /* Do not output a .file directive at the beginning of the input file.  */
+  
+diff --git a/gcc/config/arc/arc.h b/gcc/config/arc/arc.h
+index 539a16620..0cb560b8a 100644
+--- a/gcc/config/arc/arc.h
++++ b/gcc/config/arc/arc.h
+@@ -1330,12 +1330,7 @@ do { \
+ 
+ /* Debugging information.  */
+ 
+-/* Generate DBX and DWARF debugging information.  */
+-#ifdef DBX_DEBUGGING_INFO
+-#undef DBX_DEBUGGING_INFO
+-#endif
+-#define DBX_DEBUGGING_INFO
+-
++/* Generate DWARF debugging information.  */
+ #ifdef DWARF2_DEBUGGING_INFO
+ #undef DWARF2_DEBUGGING_INFO
+ #endif
+@@ -1385,9 +1380,6 @@ do { \
+ 
+ #define EH_RETURN_STACKADJ_RTX   gen_rtx_REG (Pmode, 2)
+ 
+-/* Turn off splitting of long stabs.  */
+-#define DBX_CONTIN_LENGTH 0
+-
+ /* Miscellaneous.  */
+ 
+ /* Specify the machine mode that this machine uses
+diff --git a/gcc/config/arm/aout.h b/gcc/config/arm/aout.h
+index b918ad378..cfb8db52c 100644
+--- a/gcc/config/arm/aout.h
++++ b/gcc/config/arm/aout.h
+@@ -145,15 +145,6 @@
+ #define NO_DOLLAR_IN_LABEL 1
+ #endif
+ 
+-/* Generate DBX debugging information.  riscix.h will undefine this because
+-   the native assembler does not support stabs.  */
+-#define DBX_DEBUGGING_INFO 1
+-
+-/* Acorn dbx moans about continuation chars, so don't use any.  */
+-#ifndef DBX_CONTIN_LENGTH
+-#define DBX_CONTIN_LENGTH  0
+-#endif
+-
+ /* Output a function label definition.  */
+ #ifndef ASM_DECLARE_FUNCTION_NAME
+ #define ASM_DECLARE_FUNCTION_NAME(STREAM, NAME, DECL)	\
+diff --git a/gcc/config/arm/netbsd-elf.h b/gcc/config/arm/netbsd-elf.h
+index d239c734c..ef1bba280 100644
+--- a/gcc/config/arm/netbsd-elf.h
++++ b/gcc/config/arm/netbsd-elf.h
+@@ -85,9 +85,6 @@
+ #undef PTRDIFF_TYPE
+ #define PTRDIFF_TYPE "long int"
+ 
+-/* We don't have any limit on the length as out debugger is GDB.  */
+-#undef DBX_CONTIN_LENGTH
+-
+ /* NetBSD does its profiling differently to the Acorn compiler. We
+    don't need a word following the mcount call; and to skip it
+    requires either an assembly stub or use of fomit-frame-pointer when
+diff --git a/gcc/config/darwin.h b/gcc/config/darwin.h
+index 51e257dc6..0485b6b45 100644
+--- a/gcc/config/darwin.h
++++ b/gcc/config/darwin.h
+@@ -396,10 +396,10 @@ extern GTY(()) int darwin_ms_struct;
+ 
+ #define DSYMUTIL_SPEC \
+   "%{!c:%{!E:%{!S:%{!r:%{!M:%{!MM:%{!fsyntax-only:%{!fdump=*:\
+-     %{g*:%{!gctf:%{!gbtf:%{!gstabs*:%{%:debug-level-gt(0): -idsym \
++     %{g*:%{!gctf:%{!gbtf:%{%:debug-level-gt(0): -idsym \
+        %{.c|.cc|.C|.cpp|.cp|.c++|.cxx|.CPP|.m|.mm|.s|.f|.f90|\
+ 	 .f95|.f03|.f77|.for|.F|.F90|.F95|.F03|.d: -dsym }\
+-      }}}}}\
++      }}}}\
+    }}}}}}}}"
+ 
+ #define LINK_COMMAND_SPEC LINK_COMMAND_SPEC_A DSYMUTIL_SPEC
+@@ -594,14 +594,7 @@ extern GTY(()) int darwin_ms_struct;
+ "%{static} -arch %(darwin_arch) " \
+ ASM_OPTIONS ASM_MMACOSX_VERSION_MIN_SPEC
+ 
+-#ifdef HAVE_AS_STABS_DIRECTIVE
+-/* We only pass a debug option to the assembler if that supports stabs, since
+-   dwarf is not uniformly supported in the assemblers.  */
+-#define ASM_DEBUG_SPEC  "%{g*:%{%:debug-level-gt(0):%{!gdwarf*:--gstabs}}}"
+-#else
+ #define ASM_DEBUG_SPEC  ""
+-#endif
+-
+ #undef  ASM_DEBUG_OPTION_SPEC
+ #define ASM_DEBUG_OPTION_SPEC	""
+ 
+@@ -615,10 +608,6 @@ ASM_OPTIONS ASM_MMACOSX_VERSION_MIN_SPEC
+ #define DWARF2_DEBUGGING_INFO 1
+ #define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+ 
+-#ifdef HAVE_AS_STABS_DIRECTIVE
+-#define DBX_DEBUGGING_INFO 1
+-#endif
+-
+ #define DEBUG_FRAME_SECTION	  "__DWARF,__debug_frame,regular,debug"
+ #define DEBUG_INFO_SECTION	  "__DWARF,__debug_info,regular,debug"
+ #define DEBUG_ABBREV_SECTION	  "__DWARF,__debug_abbrev,regular,debug"
+@@ -650,18 +639,6 @@ ASM_OPTIONS ASM_MMACOSX_VERSION_MIN_SPEC
+                                ? "__DWARF,__debug_gnu_pubt,regular,debug" \
+                                : "__DWARF,__debug_pubtypes,regular,debug")
+ 
+-/* When generating stabs debugging, use N_BINCL entries.  */
+-
+-#define DBX_USE_BINCL
+-
+-/* There is no limit to the length of stabs strings.  */
+-
+-#define DBX_CONTIN_LENGTH 0
+-
+-/* gdb needs a null N_SO at the end of each file for scattered loading.  */
+-
+-#define DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+-
+ /* GCC's definition of 'one_only' is the same as its definition of 'weak'.  */
+ #define MAKE_DECL_ONE_ONLY(DECL) (DECL_WEAK (DECL) = 1)
+ 
+diff --git a/gcc/config/dbxcoff.h b/gcc/config/dbxcoff.h
+deleted file mode 100644
+index 02b78c6bf..000000000
+--- a/gcc/config/dbxcoff.h
++++ /dev/null
+@@ -1,56 +0,0 @@
+-/* Definitions needed when using stabs embedded in COFF sections.
+-   Copyright (C) 1996-2022 Free Software Foundation, Inc.
+-
+-This file is part of GCC.
+-
+-GCC is free software; you can redistribute it and/or modify
+-it under the terms of the GNU General Public License as published by
+-the Free Software Foundation; either version 3, or (at your option)
+-any later version.
+-
+-GCC is distributed in the hope that it will be useful,
+-but WITHOUT ANY WARRANTY; without even the implied warranty of
+-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-GNU General Public License for more details.
+-
+-You should have received a copy of the GNU General Public License
+-along with GCC; see the file COPYING3.  If not see
+-.  */
+-
+-/* This file may be included by any COFF target which wishes to
+-   support -gstabs generating stabs in sections, as produced by gas
+-   and understood by gdb.  */
+-
+-/* Output DBX (stabs) debugging information if doing -gstabs.  */
+-
+-#define DBX_DEBUGGING_INFO 1
+-
+-/* Be function-relative for block and source line stab directives.  */
+-
+-#define DBX_BLOCKS_FUNCTION_RELATIVE 1
+-
+-/* but, to make this work, functions must appear prior to line info.  */
+-
+-#define DBX_FUNCTION_FIRST
+-
+-/* Generate a blank trailing N_SO to mark the end of the .o file, since
+-   we can't depend upon the linker to mark .o file boundaries with
+-   embedded stabs.  */
+-
+-#define DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+-
+-/* Like block addresses, stabs line numbers are relative to the
+-   current function.  */
+-
+-#define DBX_LINES_FUNCTION_RELATIVE 1
+-
+-/* When generating stabs debugging, use N_BINCL entries.  */
+-
+-#undef DBX_USE_BINCL
+-#define DBX_USE_BINCL
+-
+-/* There is no limit to the length of stabs strings.  */
+-
+-#ifndef DBX_CONTIN_LENGTH
+-#define DBX_CONTIN_LENGTH 0
+-#endif
+diff --git a/gcc/config/dbxelf.h b/gcc/config/dbxelf.h
+deleted file mode 100644
+index 4b90e95bc..000000000
+--- a/gcc/config/dbxelf.h
++++ /dev/null
+@@ -1,68 +0,0 @@
+-/* Definitions needed when using stabs embedded in ELF sections.
+-   Copyright (C) 1999-2022 Free Software Foundation, Inc.
+-
+-This file is part of GCC.
+-
+-GCC is free software; you can redistribute it and/or modify
+-it under the terms of the GNU General Public License as published by
+-the Free Software Foundation; either version 3, or (at your option)
+-any later version.
+-
+-GCC is distributed in the hope that it will be useful,
+-but WITHOUT ANY WARRANTY; without even the implied warranty of
+-MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+-GNU General Public License for more details.
+-
+-Under Section 7 of GPL version 3, you are granted additional
+-permissions described in the GCC Runtime Library Exception, version
+-3.1, as published by the Free Software Foundation.
+-
+-You should have received a copy of the GNU General Public License and
+-a copy of the GCC Runtime Library Exception along with this program;
+-see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+-.  */
+-
+-/* This file may be included by any ELF target which wishes to
+-   support -gstabs generating stabs in sections, as produced by gas
+-   and understood by gdb.  */
+-
+-#ifndef GCC_DBX_ELF_H
+-#define GCC_DBX_ELF_H
+-
+-/* Output DBX (stabs) debugging information if doing -gstabs.  */
+-
+-#define DBX_DEBUGGING_INFO 1
+-
+-/* Make LBRAC and RBRAC addresses relative to the start of the
+-   function.  The native Solaris stabs debugging format works this
+-   way, gdb expects it, and it reduces the number of relocation
+-   entries...  */
+-
+-#define DBX_BLOCKS_FUNCTION_RELATIVE 1
+-
+-/* ... but, to make this work, functions must appear prior to line info.  */
+-
+-#define DBX_FUNCTION_FIRST
+-
+-/* When generating stabs debugging, use N_BINCL entries.  */
+-
+-#define DBX_USE_BINCL
+-
+-/* There is no limit to the length of stabs strings.  */
+-
+-#ifndef DBX_CONTIN_LENGTH
+-#define DBX_CONTIN_LENGTH 0
+-#endif
+-
+-/* Like block addresses, stabs line numbers are relative to the
+-   current function.  */
+-
+-#define DBX_LINES_FUNCTION_RELATIVE 1
+-
+-/* Generate a blank trailing N_SO to mark the end of the .o file, since
+-   we can't depend upon the linker to mark .o file boundaries with
+-   embedded stabs.  */
+-
+-#define DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+-
+-#endif /* ! GCC_DBX_ELF_H */
+diff --git a/gcc/config/epiphany/epiphany.h b/gcc/config/epiphany/epiphany.h
+index 8c723845a..7c6a7f33d 100644
+--- a/gcc/config/epiphany/epiphany.h
++++ b/gcc/config/epiphany/epiphany.h
+@@ -795,14 +795,9 @@ do \
+ 
+ /* Debugging information.  */
+ 
+-/* Generate DBX and DWARF debugging information.  */
+-#define DBX_DEBUGGING_INFO 1
+-
++/* Generate DWARF debugging information.  */
+ #undef PREFERRED_DEBUGGING_TYPE
+ #define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+-
+-/* Turn off splitting of long stabs.  */
+-#define DBX_CONTIN_LENGTH 0
+ 
+ /* Miscellaneous.  */
+ 
+diff --git a/gcc/config/i386/bsd.h b/gcc/config/i386/bsd.h
+index d19ad2773..98d2810e9 100644
+--- a/gcc/config/i386/bsd.h
++++ b/gcc/config/i386/bsd.h
+@@ -91,9 +91,3 @@ along with GCC; see the file COPYING3.  If not see
+ /* The prefix to add to user-visible assembler symbols.  */
+ 
+ #define USER_LABEL_PREFIX "_"
+-
+-/* Sequent has some changes in the format of DBX symbols.  */
+-#define DBX_NO_XREFS 1
+-
+-/* Don't split DBX symbols into continuations.  */
+-#define DBX_CONTIN_LENGTH 0
+diff --git a/gcc/config/i386/gas.h b/gcc/config/i386/gas.h
+index 3bac8eb68..e0ffc75dc 100644
+--- a/gcc/config/i386/gas.h
++++ b/gcc/config/i386/gas.h
+@@ -36,10 +36,6 @@ along with GCC; see the file COPYING3.  If not see
+  * people who want both form will have to compile twice.
+  */
+ 
+-/* these come from i386/bsd.h, but are specific to sequent */
+-#undef DBX_NO_XREFS
+-#undef DBX_CONTIN_LENGTH
+-
+ /* Output #ident as a .ident.  */
+ 
+ #undef TARGET_ASM_OUTPUT_IDENT
+diff --git a/gcc/config/ia64/ia64.h b/gcc/config/ia64/ia64.h
+index bd0ef35e9..69646625e 100644
+--- a/gcc/config/ia64/ia64.h
++++ b/gcc/config/ia64/ia64.h
+@@ -1426,8 +1426,6 @@ do {									\
+ 
+ /* Specific Options for DBX Output.  */
+ 
+-/* This is handled by dbxelf.h.  */
+-
+ 
+ /* Open ended Hooks for DBX Output.  */
+ 
+diff --git a/gcc/config/ia64/sysv4.h b/gcc/config/ia64/sysv4.h
+index 045752af0..046c51101 100644
+--- a/gcc/config/ia64/sysv4.h
++++ b/gcc/config/ia64/sysv4.h
+@@ -30,9 +30,6 @@ see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
+ #undef PREFERRED_DEBUGGING_TYPE
+ #define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+ 
+-/* Stabs does not work properly for 64-bit targets.  */
+-#undef DBX_DEBUGGING_INFO
+-
+ /* Various pseudo-ops for which the Intel assembler uses non-standard
+    definitions.  */
+ 
+diff --git a/gcc/config/m68k/linux.h b/gcc/config/m68k/linux.h
+index 05661bf35..0c66c91f8 100644
+--- a/gcc/config/m68k/linux.h
++++ b/gcc/config/m68k/linux.h
+@@ -146,10 +146,6 @@ along with GCC; see the file COPYING3.  If not see
+     fprintf (FILE, "\tjbsr _mcount\n");					\
+ }
+ 
+-/* Do not break .stabs pseudos into continuations.  */
+-
+-#define DBX_CONTIN_LENGTH 0
+-
+ /* 1 if N is a possible register number for a function value.  For
+    m68k/SVR4 allow d0, a0, or fp0 as return registers, for integral,
+    pointer, or floating types, respectively.  Reject fp0 if not using
+diff --git a/gcc/config/m68k/openbsd.h b/gcc/config/m68k/openbsd.h
+index 63c57fdcc..ab998177d 100644
+--- a/gcc/config/m68k/openbsd.h
++++ b/gcc/config/m68k/openbsd.h
+@@ -60,18 +60,6 @@ along with GCC; see the file COPYING3.  If not see
+ /* Every structure or union's size must be a multiple of 2 bytes.  */
+ #define STRUCTURE_SIZE_BOUNDARY 16
+ 
+-/* Specific options for DBX Output.  */
+-
+-/* This is BSD, so it wants DBX format.  */
+-#define DBX_DEBUGGING_INFO 1
+-
+-/* Do not break .stabs pseudos into continuations.  */
+-#define DBX_CONTIN_LENGTH 0
+-
+-/* This is the char to use for continuation (in case we need to turn
+-   continuation back on).  */
+-#define DBX_CONTIN_CHAR '?'
+-
+ /* Stack & calling: aggregate returns.  */
+ 
+ /* ??? This is traditional, but quite possibly wrong.  It appears to
+diff --git a/gcc/config/mips/mips.cc b/gcc/config/mips/mips.cc
+index e64928f41..02d11ddbf 100644
+--- a/gcc/config/mips/mips.cc
++++ b/gcc/config/mips/mips.cc
+@@ -445,7 +445,6 @@ int num_source_filenames;
+ const char *current_function_file = "";
+ 
+ /* Arrays that map GCC register numbers to debugger register numbers.  */
+-int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
+ int mips_dwarf_regno[FIRST_PSEUDO_REGISTER];
+ 
+ /* Information about the current function's epilogue, used only while
+@@ -9595,10 +9594,6 @@ mips_output_filename (FILE *stream, const char *name)
+       output_quoted_string (stream, name);
+       putc ('\n', stream);
+     }
+-  /* If we are emitting stabs, let dbxout.cc handle this (except for
+-     the mips_output_filename_first_time case).  */
+-  else if (write_symbols == DBX_DEBUG)
+-    return;
+   else if (name != current_function_file
+ 	   && strcmp (name, current_function_file) != 0)
+     {
+@@ -20505,24 +20500,13 @@ mips_option_override (void)
+ 
+   for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+     {
+-      mips_dbx_regno[i] = IGNORED_DWARF_REGNUM;
+       if (GP_REG_P (i) || FP_REG_P (i) || ALL_COP_REG_P (i))
+ 	mips_dwarf_regno[i] = i;
+       else
+ 	mips_dwarf_regno[i] = INVALID_REGNUM;
+     }
+ 
+-  start = GP_DBX_FIRST - GP_REG_FIRST;
+-  for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
+-    mips_dbx_regno[i] = i + start;
+-
+-  start = FP_DBX_FIRST - FP_REG_FIRST;
+-  for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
+-    mips_dbx_regno[i] = i + start;
+-
+   /* Accumulator debug registers use big-endian ordering.  */
+-  mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
+-  mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
+   mips_dwarf_regno[HI_REGNUM] = MD_REG_FIRST + 0;
+   mips_dwarf_regno[LO_REGNUM] = MD_REG_FIRST + 1;
+   for (i = DSP_ACC_REG_FIRST; i <= DSP_ACC_REG_LAST; i += 2)
+diff --git a/gcc/config/mips/mips.h b/gcc/config/mips/mips.h
+index 02562d3b6..fa0676acd 100644
+--- a/gcc/config/mips/mips.h
++++ b/gcc/config/mips/mips.h
+@@ -1380,9 +1380,7 @@ struct mips_cpu_info {
+ #ifndef SUBTARGET_ASM_DEBUGGING_SPEC
+ #define SUBTARGET_ASM_DEBUGGING_SPEC "\
+ %{g} %{g0} %{g1} %{g2} %{g3} \
+-%{ggdb:-g} %{ggdb0:-g0} %{ggdb1:-g1} %{ggdb2:-g2} %{ggdb3:-g3} \
+-%{gstabs:-g} %{gstabs0:-g0} %{gstabs1:-g1} %{gstabs2:-g2} %{gstabs3:-g3} \
+-%{gstabs+:-g} %{gstabs+0:-g0} %{gstabs+1:-g1} %{gstabs+2:-g2} %{gstabs+3:-g3}"
++%{ggdb:-g} %{ggdb0:-g0} %{ggdb1:-g1} %{ggdb2:-g2} %{ggdb3:-g3}"
+ #endif
+ 
+ /* FP_ASM_SPEC represents the floating-point options that must be passed
+@@ -1504,7 +1502,6 @@ FP_ASM_SPEC "\
+ #define SUBTARGET_EXTRA_SPECS
+ #endif
+ 
+-#define DBX_DEBUGGING_INFO 1		/* generate stabs (OSF/rose) */
+ #define DWARF2_DEBUGGING_INFO 1         /* dwarf2 debugging info */
+ 
+ #ifndef PREFERRED_DEBUGGING_TYPE
+@@ -1544,14 +1541,6 @@ FP_ASM_SPEC "\
+ #define USER_LABEL_PREFIX	""
+ #endif
+ 
+-/* On Sun 4, this limit is 2048.  We use 1500 to be safe,
+-   since the length can run past this up to a continuation point.  */
+-#undef DBX_CONTIN_LENGTH
+-#define DBX_CONTIN_LENGTH 1500
+-
+-/* How to renumber registers for dbx and gdb.  */
+-#define DBX_REGISTER_NUMBER(REGNO) mips_dbx_regno[REGNO]
+-
+ /* The mapping from gcc register number to DWARF 2 CFA column number.  */
+ #define DWARF_FRAME_REGNUM(REGNO) mips_dwarf_regno[REGNO]
+ 
+@@ -1865,7 +1854,6 @@ FP_ASM_SPEC "\
+ #define GP_REG_FIRST 0
+ #define GP_REG_LAST  31
+ #define GP_REG_NUM   (GP_REG_LAST - GP_REG_FIRST + 1)
+-#define GP_DBX_FIRST 0
+ #define K0_REG_NUM   (GP_REG_FIRST + 26)
+ #define K1_REG_NUM   (GP_REG_FIRST + 27)
+ #define KERNEL_REG_P(REGNO)	(IN_RANGE (REGNO, K0_REG_NUM, K1_REG_NUM))
+@@ -1873,12 +1861,10 @@ FP_ASM_SPEC "\
+ #define FP_REG_FIRST 32
+ #define FP_REG_LAST  63
+ #define FP_REG_NUM   (FP_REG_LAST - FP_REG_FIRST + 1)
+-#define FP_DBX_FIRST ((write_symbols == DBX_DEBUG) ? 38 : 32)
+ 
+ #define MD_REG_FIRST 64
+ #define MD_REG_LAST  65
+ #define MD_REG_NUM   (MD_REG_LAST - MD_REG_FIRST + 1)
+-#define MD_DBX_FIRST (FP_DBX_FIRST + FP_REG_NUM)
+ 
+ #define MSA_REG_FIRST FP_REG_FIRST
+ #define MSA_REG_LAST  FP_REG_LAST
+@@ -3217,7 +3203,6 @@ extern int num_source_filenames;	/* current .file # */
+ extern struct mips_asm_switch mips_noreorder;
+ extern struct mips_asm_switch mips_nomacro;
+ extern struct mips_asm_switch mips_noat;
+-extern int mips_dbx_regno[];
+ extern int mips_dwarf_regno[];
+ extern bool mips_split_p[];
+ extern bool mips_split_hi_p[];
+diff --git a/gcc/config/nvptx/nvptx.cc b/gcc/config/nvptx/nvptx.cc
+index e4297e2d6..3634a49de 100644
+--- a/gcc/config/nvptx/nvptx.cc
++++ b/gcc/config/nvptx/nvptx.cc
+@@ -52,7 +52,6 @@
+ #include "tm-preds.h"
+ #include "tm-constrs.h"
+ #include "langhooks.h"
+-#include "dbxout.h"
+ #include "cfgrtl.h"
+ #include "gimple.h"
+ #include "stor-layout.h"
+diff --git a/gcc/config/openbsd.h b/gcc/config/openbsd.h
+index 54be22254..aa90ef734 100644
+--- a/gcc/config/openbsd.h
++++ b/gcc/config/openbsd.h
+@@ -150,17 +150,6 @@ while (0)
+ #undef TARGET_LIBC_HAS_FUNCTION
+ #define TARGET_LIBC_HAS_FUNCTION default_libc_has_function
+ 
+-
+-/* Runtime target specification.  */
+-
+-/* Miscellaneous parameters.  */
+-
+-/* Controlling debugging info: dbx options.  */
+-
+-/* Don't use the `xsTAG;' construct in DBX output; OpenBSD systems that
+-   use DBX don't support it.  */
+-#define DBX_NO_XREFS
+-
+ 
+ /* - we use . - _func instead of a local label,
+    - we put extra spaces in expressions such as 
+diff --git a/gcc/config/pa/pa-64.h b/gcc/config/pa/pa-64.h
+index bf505768a..5157b7f30 100644
+--- a/gcc/config/pa/pa-64.h
++++ b/gcc/config/pa/pa-64.h
+@@ -65,10 +65,6 @@ along with GCC; see the file COPYING3.  If not see
+ #undef LONG_DOUBLE_TYPE_SIZE
+ #define LONG_DOUBLE_TYPE_SIZE 128
+ 
+-/* Temporary until we figure out what to do with those *(&@$ 32bit
+-   relocs which appear in stabs.  */
+-#undef DBX_DEBUGGING_INFO
+-
+ /* ?!? This needs to be made compile-time selectable.
+ 
+    The PA64 runtime model has arguments that grow to higher addresses
+diff --git a/gcc/config/pa/pa.h b/gcc/config/pa/pa.h
+index 1ce6635ae..bafdf6021 100644
+--- a/gcc/config/pa/pa.h
++++ b/gcc/config/pa/pa.h
+@@ -130,8 +130,6 @@ extern unsigned long total_code_bytes;
+    and the old mnemonics are dialect zero.  */
+ #define ASSEMBLER_DIALECT (TARGET_PA_20 ? 1 : 0)
+ 
+-/* Override some settings from dbxelf.h.  */
+-
+ /* We do not have to be compatible with dbx, so we enable gdb extensions
+    by default.  */
+ #define DEFAULT_GDB_EXTENSIONS 1
+@@ -139,14 +137,6 @@ extern unsigned long total_code_bytes;
+ /* Select dwarf2 as the preferred debug format.  */
+ #define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+ 
+-/* This used to be zero (no max length), but big enums and such can
+-   cause huge strings which killed gas.
+-
+-   We also have to avoid lossage in dbxout.cc -- it does not compute the
+-   string size accurately, so we are real conservative here.  */
+-#undef DBX_CONTIN_LENGTH
+-#define DBX_CONTIN_LENGTH 3000
+-
+ /* GDB always assumes the current function's frame begins at the value
+    of the stack pointer upon entry to the current function.  Accessing
+    local variables and parameters passed on the stack is done using the
+diff --git a/gcc/config/pa/som.h b/gcc/config/pa/som.h
+index d2510e9b9..3efae0e1f 100644
+--- a/gcc/config/pa/som.h
++++ b/gcc/config/pa/som.h
+@@ -21,21 +21,6 @@ along with GCC; see the file COPYING3.  If not see
+ #undef TARGET_SOM
+ #define TARGET_SOM 1
+ 
+-/* With SOM we can only do STABS.  */
+-#undef PREFERRED_DEBUGGING_TYPE
+-#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+-
+-/* We do not use BINCL stabs in SOM.
+-   ??? If it does not hurt, we probably should to avoid useless divergence
+-   from other embedded stabs implementations.  */
+-#undef DBX_USE_BINCL
+-
+-#define DBX_LINES_FUNCTION_RELATIVE 1
+-
+-/* gdb needs a null N_SO at the end of each file for scattered loading.  */
+-
+-#define DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+-
+ /* HPUX has a program 'chatr' to list the dependencies of dynamically
+    linked executables and shared libraries.  */
+ #define LDD_SUFFIX "chatr"
+diff --git a/gcc/config/pdp11/pdp11.cc b/gcc/config/pdp11/pdp11.cc
+index f7482df18..380223439 100644
+--- a/gcc/config/pdp11/pdp11.cc
++++ b/gcc/config/pdp11/pdp11.cc
+@@ -44,7 +44,6 @@ along with GCC; see the file COPYING3.  If not see
+ #include "calls.h"
+ #include "expr.h"
+ #include "builtins.h"
+-#include "dbxout.h"
+ #include "explow.h"
+ #include "expmed.h"
+ 
+diff --git a/gcc/config/pdp11/pdp11.h b/gcc/config/pdp11/pdp11.h
+index b7d66c3bc..1fa52e8c7 100644
+--- a/gcc/config/pdp11/pdp11.h
++++ b/gcc/config/pdp11/pdp11.h
+@@ -51,12 +51,6 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ 
+ /* Generate DBX debugging information.  */
+-
+-#define DBX_DEBUGGING_INFO
+-
+-#undef PREFERRED_DEBUGGING_TYPE
+-#define PREFERRED_DEBUGGING_TYPE DBX_DEBUG
+-
+ #define TARGET_40_PLUS		(TARGET_40 || TARGET_45)
+ #define TARGET_10		(! TARGET_40_PLUS)
+ 
+@@ -489,9 +483,6 @@ extern int current_first_parm_offset;
+ /* Nonzero if access to memory by byte is no faster than by word.  */
+ #define SLOW_BYTE_ACCESS 1
+ 
+-/* Do not break .stabs pseudos into continuations.  */
+-#define DBX_CONTIN_LENGTH 0
+-
+ /* Give a comparison code (EQ, NE etc) and the first operand of a COMPARE,
+    return the mode to be used for the comparison.  */
+ 
+diff --git a/gcc/config/rs6000/rs6000-builtin.cc b/gcc/config/rs6000/rs6000-builtin.cc
+index cc385a2b2..3bf3b4779 100644
+--- a/gcc/config/rs6000/rs6000-builtin.cc
++++ b/gcc/config/rs6000/rs6000-builtin.cc
+@@ -51,9 +51,6 @@
+ #include "tree-ssa-propagate.h"
+ #include "builtins.h"
+ #include "tree-vector-builder.h"
+-#if TARGET_XCOFF
+-#include "xcoffout.h"  /* get declarations of xcoff_*_section_name */
+-#endif
+ #include "ppc-auxv.h"
+ #include "rs6000-internal.h"
+ 
+diff --git a/gcc/config/rs6000/rs6000-call.cc b/gcc/config/rs6000/rs6000-call.cc
+index d27df7b25..4dfe033a4 100644
+--- a/gcc/config/rs6000/rs6000-call.cc
++++ b/gcc/config/rs6000/rs6000-call.cc
+@@ -61,20 +61,12 @@
+ #include "tree-ssa-propagate.h"
+ #include "builtins.h"
+ #include "tree-vector-builder.h"
+-#if TARGET_XCOFF
+-#include "xcoffout.h"  /* get declarations of xcoff_*_section_name */
+-#endif
+ #include "ppc-auxv.h"
+ #include "targhooks.h"
+ #include "opts.h"
+ 
+ #include "rs6000-internal.h"
+ 
+-#if TARGET_MACHO
+-#include "gstab.h"  /* for N_SLINE */
+-#include "dbxout.h" /* dbxout_ */
+-#endif
+-
+ #ifndef TARGET_PROFILE_KERNEL
+ #define TARGET_PROFILE_KERNEL 0
+ #endif
+diff --git a/gcc/config/rs6000/rs6000-logue.cc b/gcc/config/rs6000/rs6000-logue.cc
+index a868ede24..a11d020cc 100644
+--- a/gcc/config/rs6000/rs6000-logue.cc
++++ b/gcc/config/rs6000/rs6000-logue.cc
+@@ -47,10 +47,6 @@
+ #include "diagnostic-core.h"
+ #include "alias.h"
+ #include "rs6000-internal.h"
+-#if TARGET_MACHO
+-#include "gstab.h"  /* for N_SLINE */
+-#include "dbxout.h" /* dbxout_ */
+-#endif
+ 
+ static int rs6000_ra_ever_killed (void);
+ static void is_altivec_return_reg (rtx, void *);
+@@ -5144,10 +5140,6 @@ macho_branch_islands (void)
+ 	}
+       strcpy (tmp_buf, "\n");
+       strcat (tmp_buf, label);
+-#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+-      if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+-	dbxout_stabd (N_SLINE, bi->line_number);
+-#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
+       if (flag_pic)
+ 	{
+ 	  strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
+@@ -5181,10 +5173,6 @@ macho_branch_islands (void)
+ 	  strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
+ 	}
+       output_asm_insn (tmp_buf, 0);
+-#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+-      if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+-	dbxout_stabd (N_SLINE, bi->line_number);
+-#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
+       branch_islands->pop ();
+     }
+ }
+diff --git a/gcc/config/rs6000/rs6000.cc b/gcc/config/rs6000/rs6000.cc
+index d9a4c3dff..0b75861bb 100644
+--- a/gcc/config/rs6000/rs6000.cc
++++ b/gcc/config/rs6000/rs6000.cc
+@@ -75,9 +75,6 @@
+ #include "ipa-prop.h"
+ #include "ipa-fnsummary.h"
+ #include "except.h"
+-#if TARGET_XCOFF
+-#include "xcoffout.h"  /* get declarations of xcoff_*_section_name */
+-#endif
+ #include "case-cfn-macros.h"
+ #include "ppc-auxv.h"
+ #include "rs6000-internal.h"
+@@ -3899,12 +3896,6 @@ rs6000_option_override_internal (bool global_init_p)
+   if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
+     rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
+ 
+-#ifdef XCOFF_DEBUGGING_INFO
+-  /* For AIX default to 64-bit DWARF.  */
+-  if (!OPTION_SET_P (dwarf_offset_size))
+-    dwarf_offset_size = POINTER_SIZE_UNITS;
+-#endif
+-
+   /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
+      off all of the options that depend on those flags.  */
+   ignore_masks = rs6000_disable_incompatible_switches ();
+@@ -21022,9 +21013,14 @@ rs6000_elf_file_end (void)
+ 
+ #if TARGET_XCOFF
+ 
+-#ifndef HAVE_XCOFF_DWARF_EXTRAS
+-#define HAVE_XCOFF_DWARF_EXTRAS 0
+-#endif
++/* Names of bss and data sections.  These should be unique names for each
++   compilation unit.  */
++
++char *xcoff_bss_section_name;
++char *xcoff_private_data_section_name;
++char *xcoff_private_rodata_section_name;
++char *xcoff_tls_data_section_name;
++char *xcoff_read_only_section_name;
+ 
+ static enum unwind_info_type
+ rs6000_xcoff_debug_unwind_info (void)
+@@ -21539,9 +21535,7 @@ rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
+ 							&data, true);
+   if (!DECL_IGNORED_P (decl))
+     {
+-      if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
+-	xcoffout_declare_function (file, decl, buffer);
+-      else if (dwarf_debuginfo_p ())
++      if (dwarf_debuginfo_p ())
+ 	{
+ 	  name = (*targetm.strip_name_encoding) (name);
+ 	  fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
+diff --git a/gcc/config/rs6000/sysv4.h b/gcc/config/rs6000/sysv4.h
+index 7e2519de5..c8b7eb633 100644
+--- a/gcc/config/rs6000/sysv4.h
++++ b/gcc/config/rs6000/sysv4.h
+@@ -504,9 +504,6 @@ extern int fixuplabelno;
+ #undef  PREFERRED_DEBUGGING_TYPE
+ #define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+ 
+-/* Historically we have also supported stabs debugging.  */
+-#define DBX_DEBUGGING_INFO 1
+-
+ #define TARGET_ENCODE_SECTION_INFO  rs6000_elf_encode_section_info
+ #define TARGET_IN_SMALL_DATA_P  rs6000_elf_in_small_data_p
+ 
+@@ -515,11 +512,6 @@ extern int fixuplabelno;
+ #define	RS6000_OUTPUT_BASENAME(FILE, NAME)	\
+     assemble_name (FILE, NAME)
+ 
+-/* We have to output the stabs for the function name *first*, before
+-   outputting its label.  */
+-
+-#define	DBX_FUNCTION_FIRST
+-
+ /* This is the end of what might become sysv4dbx.h.  */
+ 
+ #define TARGET_OS_SYSV_CPP_BUILTINS()		\
+diff --git a/gcc/config/rs6000/xcoff.h b/gcc/config/rs6000/xcoff.h
+index cd0f99cb9..bafc57df5 100644
+--- a/gcc/config/rs6000/xcoff.h
++++ b/gcc/config/rs6000/xcoff.h
+@@ -21,9 +21,6 @@
+ 
+ #define TARGET_OBJECT_FORMAT OBJECT_XCOFF
+ 
+-/* The RS/6000 uses the XCOFF format.  */
+-#define XCOFF_DEBUGGING_INFO 1
+-
+ /* Define if the object format being used is COFF or a superset.  */
+ #define OBJECT_FORMAT_COFF
+ 
+diff --git a/gcc/config/rx/rx.h b/gcc/config/rx/rx.h
+index ce9c2ff12..77f84039c 100644
+--- a/gcc/config/rx/rx.h
++++ b/gcc/config/rx/rx.h
+@@ -623,7 +623,6 @@ typedef unsigned int CUMULATIVE_ARGS;
+ #undef  PREFERRED_DEBUGGING_TYPE
+ #define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+ 
+-#define DBX_DEBUGGING_INFO 1
+ #define DWARF2_DEBUGGING_INFO 1
+ 
+ #define INCOMING_FRAME_SP_OFFSET		4
+diff --git a/gcc/config/sh/elf.h b/gcc/config/sh/elf.h
+index afb3bc353..f0fd19f88 100644
+--- a/gcc/config/sh/elf.h
++++ b/gcc/config/sh/elf.h
+@@ -67,9 +67,6 @@ along with GCC; see the file COPYING3.  If not see
+ #define ASM_GENERATE_INTERNAL_LABEL(STRING, PREFIX, NUM) \
+   sprintf ((STRING), "*%s%s%ld", LOCAL_LABEL_PREFIX, (PREFIX), (long)(NUM))
+ 
+-#define DBX_LINES_FUNCTION_RELATIVE 1
+-#define DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+-
+ #undef STARTFILE_SPEC
+ #define STARTFILE_SPEC \
+   "%{!shared: crt1.o%s} crti.o%s \
+diff --git a/gcc/config/sol2.h b/gcc/config/sol2.h
+index e22c70c45..fc7033082 100644
+--- a/gcc/config/sol2.h
++++ b/gcc/config/sol2.h
+@@ -498,11 +498,7 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ #define AS_NEEDS_DASH_FOR_PIPED_INPUT
+ 
+-/* The Solaris assembler cannot grok .stabd directives.  */
+-#undef NO_DBX_BNSYM_ENSYM
+-#define NO_DBX_BNSYM_ENSYM 1
+ #endif
+-
+ /* Solaris has an implementation of __enable_execute_stack.  */
+ #define HAVE_ENABLE_EXECUTE_STACK
+ 
+diff --git a/gcc/config/sparc/freebsd.h b/gcc/config/sparc/freebsd.h
+index 98319c528..73850a31f 100644
+--- a/gcc/config/sparc/freebsd.h
++++ b/gcc/config/sparc/freebsd.h
+@@ -109,12 +109,6 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ /************************[  Debugger stuff  ]*********************************/
+ 
+-/* This is the char to use for continuation (in case we need to turn
+-   continuation back on).  */
+-
+-#undef  DBX_CONTIN_CHAR
+-#define DBX_CONTIN_CHAR	'?'
+-
+ /* DWARF bits.  */
+ 
+ /* Follow Irix 6 and not the Dwarf2 draft in using 64-bit offsets. 
+diff --git a/gcc/config/sparc/netbsd-elf.h b/gcc/config/sparc/netbsd-elf.h
+index af194df3e..bee71fc18 100644
+--- a/gcc/config/sparc/netbsd-elf.h
++++ b/gcc/config/sparc/netbsd-elf.h
+@@ -46,11 +46,6 @@ along with GCC; see the file COPYING3.  If not see
+ #undef PTRDIFF_TYPE
+ #define PTRDIFF_TYPE "long int"
+ 
+-/* This is the char to use for continuation (in case we need to turn
+-   continuation back on).  */
+-#undef DBX_CONTIN_CHAR
+-#define DBX_CONTIN_CHAR '?'
+-
+ #undef  LOCAL_LABEL_PREFIX
+ #define LOCAL_LABEL_PREFIX  "."
+ 
+diff --git a/gcc/config/sparc/sparc.h b/gcc/config/sparc/sparc.h
+index 91917c3ea..155e1da7a 100644
+--- a/gcc/config/sparc/sparc.h
++++ b/gcc/config/sparc/sparc.h
+@@ -1506,14 +1506,6 @@ do {									   \
+ #define ADDITIONAL_REGISTER_NAMES \
+ {{"ccr", SPARC_ICC_REG}, {"cc", SPARC_ICC_REG}}
+ 
+-/* On Sun 4, this limit is 2048.  We use 1000 to be safe, since the length
+-   can run past this up to a continuation point.  Once we used 1500, but
+-   a single entry in C++ can run more than 500 bytes, due to the length of
+-   mangled symbol names.  dbxout.cc should really be fixed to do
+-   continuations when they are actually needed instead of trying to
+-   guess...  */
+-#define DBX_CONTIN_LENGTH 1000
+-
+ /* This is how to output a command to make the user-level label named NAME
+    defined for reference from other files.  */
+ 
+diff --git a/gcc/config/vax/vax.cc b/gcc/config/vax/vax.cc
+index f44e23d17..28c1af59a 100644
+--- a/gcc/config/vax/vax.cc
++++ b/gcc/config/vax/vax.cc
+@@ -247,9 +247,6 @@ static void
+ vax_file_start (void)
+ {
+   default_file_start ();
+-
+-  if (write_symbols == DBX_DEBUG)
+-    fprintf (asm_out_file, "___vax_%c_doubles:\n", ASM_DOUBLE_CHAR);
+ }
+ 
+ /* We can use the BSD C library routines for the libgcc calls that are
+diff --git a/gcc/config/vax/vax.h b/gcc/config/vax/vax.h
+index 45c0e7563..12c51e53d 100644
+--- a/gcc/config/vax/vax.h
++++ b/gcc/config/vax/vax.h
+@@ -508,27 +508,6 @@ enum reg_class { NO_REGS, ALL_REGS, LIM_REG_CLASSES };
+     "r8", "r9", "r10", "r11", "ap", "fp", "sp", "pc",	\
+     "psl" }
+ 
+-/* This is BSD, so it wants DBX format.  */
+-
+-#define DBX_DEBUGGING_INFO 1
+-
+-/* Do not break .stabs pseudos into continuations.  */
+-
+-#define DBX_CONTIN_LENGTH 0
+-
+-/* This is the char to use for continuation (in case we need to turn
+-   continuation back on).  */
+-
+-#define DBX_CONTIN_CHAR '?'
+-
+-/* Don't use the `xsfoo;' construct in DBX output; this system
+-   doesn't support it.  */
+-
+-#define DBX_NO_XREFS
+-
+-/* Output the .stabs for a C `static' variable in the data section.  */
+-#define DBX_STATIC_STAB_DATA_SECTION
+-
+ /* VAX specific: which type character is used for type double?  */
+ 
+ #define ASM_DOUBLE_CHAR (TARGET_G_FLOAT ? 'g' : 'd')
+diff --git a/gcc/config/vx-common.h b/gcc/config/vx-common.h
+index aaae4f78b..bc2768437 100644
+--- a/gcc/config/vx-common.h
++++ b/gcc/config/vx-common.h
+@@ -94,8 +94,6 @@ along with GCC; see the file COPYING3.  If not see
+ #define PREFERRED_DEBUGGING_TYPE DWARF2_DEBUG
+ 
+ /* None of these other formats is supported.  */
+-#undef DBX_DEBUGGING_INFO
+-#undef XCOFF_DEBUGGING_INFO
+ #undef VMS_DEBUGGING_INFO
+ 
+ /* ------------------------ Misc configuration bits ----------------------  */
+diff --git a/gcc/configure b/gcc/configure
+index 30f386789..7e64599b0 100755
+--- a/gcc/configure
++++ b/gcc/configure
+@@ -25078,38 +25078,6 @@ cat >>confdefs.h <<_ACEOF
+ _ACEOF
+ 
+ 
+-{ $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for stabs directive" >&5
+-$as_echo_n "checking assembler for stabs directive... " >&6; }
+-if ${gcc_cv_as_stabs_directive+:} false; then :
+-  $as_echo_n "(cached) " >&6
+-else
+-  gcc_cv_as_stabs_directive=no
+-  if test x$gcc_cv_as != x; then
+-    $as_echo '.stabs "gcc2_compiled.",60,0,0,0' > conftest.s
+-    if { ac_try='$gcc_cv_as $gcc_cv_as_flags  -o conftest.o conftest.s >&5'
+-  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+-  (eval $ac_try) 2>&5
+-  ac_status=$?
+-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; }; }
+-    then
+-	gcc_cv_as_stabs_directive=yes
+-    else
+-      echo "configure: failed program was" >&5
+-      cat conftest.s >&5
+-    fi
+-    rm -f conftest.o conftest.s
+-  fi
+-fi
+-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_stabs_directive" >&5
+-$as_echo "$gcc_cv_as_stabs_directive" >&6; }
+-if test $gcc_cv_as_stabs_directive = yes; then
+-
+-$as_echo "#define HAVE_AS_STABS_DIRECTIVE 1" >>confdefs.h
+-
+-fi
+-
+-
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for COMDAT group support (GNU as)" >&5
+ $as_echo_n "checking assembler for COMDAT group support (GNU as)... " >&6; }
+ if ${gcc_cv_as_comdat_group+:} false; then :
+@@ -28186,41 +28154,6 @@ if test $gcc_cv_as_aix_ref = yes; then
+ 
+ $as_echo "#define HAVE_AS_REF 1" >>confdefs.h
+ 
+-fi
+-
+-
+-	{ $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for AIX DWARF location lists section support" >&5
+-$as_echo_n "checking assembler for AIX DWARF location lists section support... " >&6; }
+-if ${gcc_cv_as_aix_dwloc+:} false; then :
+-  $as_echo_n "(cached) " >&6
+-else
+-  gcc_cv_as_aix_dwloc=no
+-  if test x$gcc_cv_as != x; then
+-    $as_echo '	.dwsect 0xA0000
+-	Lframe..0:
+-		.vbyte 4,Lframe..0
+-	  ' > conftest.s
+-    if { ac_try='$gcc_cv_as $gcc_cv_as_flags  -o conftest.o conftest.s >&5'
+-  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+-  (eval $ac_try) 2>&5
+-  ac_status=$?
+-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; }; }
+-    then
+-	gcc_cv_as_aix_dwloc=yes
+-    else
+-      echo "configure: failed program was" >&5
+-      cat conftest.s >&5
+-    fi
+-    rm -f conftest.o conftest.s
+-  fi
+-fi
+-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_aix_dwloc" >&5
+-$as_echo "$gcc_cv_as_aix_dwloc" >&6; }
+-if test $gcc_cv_as_aix_dwloc = yes; then
+-
+-$as_echo "#define HAVE_XCOFF_DWARF_EXTRAS 1" >>confdefs.h
+-
+ fi
+ 
+ 	;;
+@@ -30008,38 +29941,6 @@ $as_echo "#define HAVE_AS_WORKING_DWARF_N_FLAG 1" >>confdefs.h
+    fi
+  fi
+ 
+- { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for --gstabs option" >&5
+-$as_echo_n "checking assembler for --gstabs option... " >&6; }
+-if ${gcc_cv_as_gstabs_flag+:} false; then :
+-  $as_echo_n "(cached) " >&6
+-else
+-  gcc_cv_as_gstabs_flag=no
+-  if test x$gcc_cv_as != x; then
+-    $as_echo "$insn" > conftest.s
+-    if { ac_try='$gcc_cv_as $gcc_cv_as_flags --gstabs -o conftest.o conftest.s >&5'
+-  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+-  (eval $ac_try) 2>&5
+-  ac_status=$?
+-  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
+-  test $ac_status = 0; }; }
+-    then
+-	gcc_cv_as_gstabs_flag=yes
+-    else
+-      echo "configure: failed program was" >&5
+-      cat conftest.s >&5
+-    fi
+-    rm -f conftest.o conftest.s
+-  fi
+-fi
+-{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_gstabs_flag" >&5
+-$as_echo "$gcc_cv_as_gstabs_flag" >&6; }
+-if test $gcc_cv_as_gstabs_flag = yes; then
+-
+-$as_echo "#define HAVE_AS_GSTABS_DEBUG_FLAG 1" >>confdefs.h
+-
+-fi
+-
+-
+  { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for --debug-prefix-map option" >&5
+ $as_echo_n "checking assembler for --debug-prefix-map option... " >&6; }
+ if ${gcc_cv_as_debug_prefix_map_flag+:} false; then :
+diff --git a/gcc/configure.ac b/gcc/configure.ac
+index dd6cd60f8..708ec3fd3 100644
+--- a/gcc/configure.ac
++++ b/gcc/configure.ac
+@@ -3509,11 +3509,6 @@ AC_DEFINE_UNQUOTED(HAVE_LD_ALIGNED_SHF_MERGE,
+   [`if test $gcc_cv_ld_aligned_shf_merge = yes; then echo 1; else echo 0; fi`],
+ [Define 0/1 if your linker supports the SHF_MERGE flag with section alignment > 1.])
+ 
+-gcc_GAS_CHECK_FEATURE([stabs directive], gcc_cv_as_stabs_directive,,
+-[.stabs "gcc2_compiled.",60,0,0,0],,
+-[AC_DEFINE(HAVE_AS_STABS_DIRECTIVE, 1,
+-  [Define if your assembler supports .stabs.])])
+-
+ gcc_GAS_CHECK_FEATURE([COMDAT group support (GNU as)],
+  gcc_cv_as_comdat_group,
+  [--fatal-warnings],
+@@ -5100,15 +5095,6 @@ LCF0:
+ 	  ],,
+ 	  [AC_DEFINE(HAVE_AS_REF, 1,
+ 	    [Define if your assembler supports .ref])])
+-
+-	gcc_GAS_CHECK_FEATURE([AIX DWARF location lists section support],
+-	  gcc_cv_as_aix_dwloc,,
+-	  [	.dwsect 0xA0000
+-	Lframe..0:
+-		.vbyte 4,Lframe..0
+-	  ],,
+-	  [AC_DEFINE(HAVE_XCOFF_DWARF_EXTRAS, 1,
+-	    [Define if your assembler supports AIX debug frame section label reference.])])
+ 	;;
+     esac
+     ;;
+@@ -5841,12 +5827,6 @@ foo:
+    fi
+  fi
+ 
+- gcc_GAS_CHECK_FEATURE([--gstabs option],
+-  gcc_cv_as_gstabs_flag,
+-  [--gstabs], [$insn],,
+-  [AC_DEFINE(HAVE_AS_GSTABS_DEBUG_FLAG, 1,
+-[Define if your assembler supports the --gstabs option.])])
+-
+  gcc_GAS_CHECK_FEATURE([--debug-prefix-map option],
+   gcc_cv_as_debug_prefix_map_flag,
+   [--debug-prefix-map /a=/b], [$insn],,
+diff --git a/gcc/dbxout.cc b/gcc/dbxout.cc
+deleted file mode 100644
+index 878d528dc..000000000
+--- a/gcc/dbxout.cc
++++ /dev/null
+@@ -1,3936 +0,0 @@
+-/* Output dbx-format symbol table information from GNU compiler.
+-   Copyright (C) 1987-2022 Free Software Foundation, Inc.
+-
+-This file is part of GCC.
+-
+-GCC is free software; you can redistribute it and/or modify it under
+-the terms of the GNU General Public License as published by the Free
+-Software Foundation; either version 3, or (at your option) any later
+-version.
+-
+-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+-WARRANTY; without even the implied warranty of MERCHANTABILITY or
+-FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+-for more details.
+-
+-You should have received a copy of the GNU General Public License
+-along with GCC; see the file COPYING3.  If not see
+-.  */
+-
+-
+-/* Output dbx-format symbol table data.
+-   This consists of many symbol table entries, each of them
+-   a .stabs assembler pseudo-op with four operands:
+-   a "name" which is really a description of one symbol and its type,
+-   a "code", which is a symbol defined in stab.h whose name starts with N_,
+-   an unused operand always 0,
+-   and a "value" which is an address or an offset.
+-   The name is enclosed in doublequote characters.
+-
+-   Each function, variable, typedef, and structure tag
+-   has a symbol table entry to define it.
+-   The beginning and end of each level of name scoping within
+-   a function are also marked by special symbol table entries.
+-
+-   The "name" consists of the symbol name, a colon, a kind-of-symbol letter,
+-   and a data type number.  The data type number may be followed by
+-   "=" and a type definition; normally this will happen the first time
+-   the type number is mentioned.  The type definition may refer to
+-   other types by number, and those type numbers may be followed
+-   by "=" and nested definitions.
+-
+-   This can make the "name" quite long.
+-   When a name is more than 80 characters, we split the .stabs pseudo-op
+-   into two .stabs pseudo-ops, both sharing the same "code" and "value".
+-   The first one is marked as continued with a double-backslash at the
+-   end of its "name".
+-
+-   The kind-of-symbol letter distinguished function names from global
+-   variables from file-scope variables from parameters from auto
+-   variables in memory from typedef names from register variables.
+-   See `dbxout_symbol'.
+-
+-   The "code" is mostly redundant with the kind-of-symbol letter
+-   that goes in the "name", but not entirely: for symbols located
+-   in static storage, the "code" says which segment the address is in,
+-   which controls how it is relocated.
+-
+-   The "value" for a symbol in static storage
+-   is the core address of the symbol (actually, the assembler
+-   label for the symbol).  For a symbol located in a stack slot
+-   it is the stack offset; for one in a register, the register number.
+-   For a typedef symbol, it is zero.
+-
+-   If DEBUG_SYMS_TEXT is defined, all debugging symbols must be
+-   output while in the text section.
+-
+-   For more on data type definitions, see `dbxout_type'.  */
+-
+-#include "config.h"
+-#include "system.h"
+-#include "coretypes.h"
+-#include "target.h"
+-#include "function.h"
+-#include "rtl.h"
+-#include "tree.h"
+-#include "memmodel.h"
+-#include "tm_p.h"
+-#include "stringpool.h"
+-#include "insn-config.h"
+-#include "emit-rtl.h"
+-#include "cgraph.h"
+-#include "diagnostic-core.h"
+-#include "fold-const.h"
+-#include "varasm.h"
+-#include "stor-layout.h"
+-#include "reload.h"
+-#include "output.h"
+-#include "dbxout.h"
+-#include "toplev.h"
+-#include "debug.h"
+-#include "common/common-target.h"
+-#include "langhooks.h"
+-#include "expr.h"
+-#include "file-prefix-map.h" /* remap_debug_filename()  */
+-#include "flags.h"
+-
+-#ifdef XCOFF_DEBUGGING_INFO
+-#include "xcoffout.h"
+-#endif
+-
+-#ifndef ASM_STABS_OP
+-# ifdef XCOFF_DEBUGGING_INFO
+-#  define ASM_STABS_OP "\t.stabx\t"
+-# else
+-#  define ASM_STABS_OP "\t.stabs\t"
+-# endif
+-#endif
+-
+-#ifndef ASM_STABN_OP
+-#define ASM_STABN_OP "\t.stabn\t"
+-#endif
+-
+-#ifndef ASM_STABD_OP
+-#define ASM_STABD_OP "\t.stabd\t"
+-#endif
+-
+-#ifndef DBX_TYPE_DECL_STABS_CODE
+-#define DBX_TYPE_DECL_STABS_CODE N_LSYM
+-#endif
+-
+-#ifndef DBX_STATIC_CONST_VAR_CODE
+-#define DBX_STATIC_CONST_VAR_CODE N_FUN
+-#endif
+-
+-#ifndef DBX_REGPARM_STABS_CODE
+-#define DBX_REGPARM_STABS_CODE N_RSYM
+-#endif
+-
+-#ifndef DBX_REGPARM_STABS_LETTER
+-#define DBX_REGPARM_STABS_LETTER 'P'
+-#endif
+-
+-#ifndef NO_DBX_FUNCTION_END
+-#define NO_DBX_FUNCTION_END 0
+-#endif
+-
+-#ifndef NO_DBX_BNSYM_ENSYM
+-#define NO_DBX_BNSYM_ENSYM 0
+-#endif
+-
+-#ifndef NO_DBX_MAIN_SOURCE_DIRECTORY
+-#define NO_DBX_MAIN_SOURCE_DIRECTORY 0
+-#endif
+-
+-#ifndef DBX_BLOCKS_FUNCTION_RELATIVE
+-#define DBX_BLOCKS_FUNCTION_RELATIVE 0
+-#endif
+-
+-#ifndef DBX_LINES_FUNCTION_RELATIVE
+-#define DBX_LINES_FUNCTION_RELATIVE 0
+-#endif
+-
+-#ifndef DBX_CONTIN_LENGTH
+-#define DBX_CONTIN_LENGTH 80
+-#endif
+-
+-#ifndef DBX_CONTIN_CHAR
+-#define DBX_CONTIN_CHAR '\\'
+-#endif
+-
+-enum typestatus {TYPE_UNSEEN, TYPE_XREF, TYPE_DEFINED};
+-
+-/* Structure recording information about a C data type.
+-   The status element says whether we have yet output
+-   the definition of the type.  TYPE_XREF says we have
+-   output it as a cross-reference only.
+-   The file_number and type_number elements are used if DBX_USE_BINCL
+-   is defined.  */
+-
+-struct GTY(()) typeinfo {
+-  enum typestatus status;
+-  int file_number;
+-  int type_number;
+-};
+-
+-/* Vector recording information about C data types.
+-   When we first notice a data type (a tree node),
+-   we assign it a number using next_type_number.
+-   That is its index in this vector.  */
+-
+-static GTY ((length ("typevec_len"))) struct typeinfo *typevec;
+-
+-/* Number of elements of space allocated in `typevec'.  */
+-
+-static GTY(()) int typevec_len;
+-
+-/* In dbx output, each type gets a unique number.
+-   This is the number for the next type output.
+-   The number, once assigned, is in the TYPE_SYMTAB_ADDRESS field.  */
+-
+-static GTY(()) int next_type_number;
+-
+-/* The C front end may call dbxout_symbol before dbxout_init runs.
+-   We save all such decls in this list and output them when we get
+-   to dbxout_init.  */
+-
+-static GTY(()) tree preinit_symbols;
+-
+-enum binclstatus {BINCL_NOT_REQUIRED, BINCL_PENDING, BINCL_PROCESSED};
+-
+-/* When using N_BINCL in dbx output, each type number is actually a
+-   pair of the file number and the type number within the file.
+-   This is a stack of input files.  */
+-
+-struct dbx_file
+-{
+-  struct dbx_file *next;
+-  int file_number;
+-  int next_type_number;
+-  enum binclstatus bincl_status;  /* Keep track of lazy bincl.  */
+-  const char *pending_bincl_name; /* Name of bincl.  */
+-  struct dbx_file *prev;          /* Chain to traverse all pending bincls.  */
+-};
+-
+-/* This is the top of the stack.
+-
+-   This is not saved for PCH, because restoring a PCH should not change it.
+-   next_file_number does have to be saved, because the PCH may use some
+-   file numbers; however, just before restoring a PCH, next_file_number
+-   should always be 0 because we should not have needed any file numbers
+-   yet.  */
+-
+-#if (defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)) \
+-    && defined (DBX_USE_BINCL)
+-static struct dbx_file *current_file;
+-#endif
+-
+-/* This is the next file number to use.  */
+-
+-static GTY(()) int next_file_number;
+-
+-/* A counter for dbxout_function_end.  */
+-
+-static GTY(()) int scope_labelno;
+-
+-/* A counter for dbxout_source_line.  */
+-
+-static GTY(()) int dbxout_source_line_counter;
+-
+-/* Number for the next N_SOL filename stabs label.  The number 0 is reserved
+-   for the N_SO filename stabs label.  */
+-
+-static GTY(()) int source_label_number = 1;
+-
+-/* Last source file name mentioned in a NOTE insn.  */
+-
+-static GTY(()) const char *lastfile;
+-
+-/* Last line number mentioned in a NOTE insn.  */
+-
+-static GTY(()) unsigned int lastlineno;
+-
+-/* Used by PCH machinery to detect if 'lastfile' should be reset to
+-   base_input_file.  */
+-static GTY(()) int lastfile_is_base;
+-
+-/* Typical USG systems don't have stab.h, and they also have
+-   no use for DBX-format debugging info.  */
+-
+-#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+-
+-#ifdef DBX_USE_BINCL
+-/* If zero then there is no pending BINCL.  */
+-static int pending_bincls = 0;
+-#endif
+-
+-/* The original input file name.  */
+-static const char *base_input_file;
+-
+-#ifdef DEBUG_SYMS_TEXT
+-#define FORCE_TEXT switch_to_section (current_function_section ())
+-#else
+-#define FORCE_TEXT
+-#endif
+-
+-#include "gstab.h"
+-
+-/* 1 if PARM is passed to this function in memory.  */
+-
+-#define PARM_PASSED_IN_MEMORY(PARM) \
+- (MEM_P (DECL_INCOMING_RTL (PARM)))
+-
+-/* A C expression for the integer offset value of an automatic variable
+-   (N_LSYM) having address X (an RTX).  */
+-#ifndef DEBUGGER_AUTO_OFFSET
+-#define DEBUGGER_AUTO_OFFSET(X) \
+-  (GET_CODE (X) == PLUS ? INTVAL (XEXP (X, 1)) : 0)
+-#endif
+-
+-/* A C expression for the integer offset value of an argument (N_PSYM)
+-   having address X (an RTX).  The nominal offset is OFFSET.
+-   Note that we use OFFSET + 0 here to avoid the self-assign warning
+-   when the macro is called in a context like
+-   number = DEBUGGER_ARG_OFFSET(number, X)  */
+-#ifndef DEBUGGER_ARG_OFFSET
+-#define DEBUGGER_ARG_OFFSET(OFFSET, X) (OFFSET + 0)
+-#endif
+-
+-/* This obstack holds the stab string currently being constructed.  We
+-   build it up here, then write it out, so we can split long lines up
+-   properly (see dbxout_finish_complex_stabs).  */
+-static struct obstack stabstr_ob;
+-static size_t stabstr_last_contin_point;
+-
+-#ifdef DBX_USE_BINCL
+-static void emit_bincl_stab             (const char *c);
+-static void emit_pending_bincls         (void);
+-#endif
+-static inline void emit_pending_bincls_if_required (void);
+-
+-static void dbxout_init (const char *);
+-
+-static void dbxout_finish (const char *);
+-static void dbxout_start_source_file (unsigned, const char *);
+-static void dbxout_end_source_file (unsigned);
+-static void dbxout_typedefs (tree);
+-static void dbxout_type_index (tree);
+-static void dbxout_args (tree);
+-static void dbxout_type_fields (tree);
+-static void dbxout_type_method_1 (tree);
+-static void dbxout_type_methods (tree);
+-static void dbxout_range_type (tree, tree, tree);
+-static void dbxout_type (tree, int);
+-static bool print_int_cst_bounds_in_octal_p (tree, tree, tree);
+-static bool is_fortran (void);
+-static void dbxout_type_name (tree);
+-static void dbxout_class_name_qualifiers (tree);
+-static int dbxout_symbol_location (tree, tree, const char *, rtx);
+-static void dbxout_symbol_name (tree, const char *, int);
+-static void dbxout_common_name (tree, const char *, stab_code_type);
+-static const char *dbxout_common_check (tree, int *);
+-static void dbxout_early_global_decl (tree);
+-static void dbxout_late_global_decl (tree);
+-static void dbxout_type_decl (tree, int);
+-static void dbxout_handle_pch (unsigned);
+-static void debug_free_queue (void);
+-
+-/* The debug hooks structure.  */
+-#if defined (DBX_DEBUGGING_INFO)
+-
+-static void dbxout_source_line (unsigned int, unsigned int, const char *,
+-				int, bool);
+-static void dbxout_switch_text_section (void);
+-static void dbxout_begin_prologue (unsigned int, unsigned int, const char *);
+-static void dbxout_source_file (const char *);
+-static void dbxout_function_end (tree);
+-static void dbxout_begin_function (tree);
+-static void dbxout_begin_block (unsigned, unsigned);
+-static void dbxout_end_block (unsigned, unsigned);
+-static void dbxout_function_decl (tree);
+-
+-const struct gcc_debug_hooks dbx_debug_hooks =
+-{
+-  dbxout_init,
+-  dbxout_finish,
+-  debug_nothing_charstar,
+-  debug_nothing_void,
+-  debug_nothing_int_charstar,
+-  debug_nothing_int_charstar,
+-  dbxout_start_source_file,
+-  dbxout_end_source_file,
+-  dbxout_begin_block,
+-  dbxout_end_block,
+-  debug_true_const_tree,	         /* ignore_block */
+-  dbxout_source_line,		         /* source_line */
+-  debug_nothing_int_int_charstar,	 /* set_ignored_loc */
+-  dbxout_begin_prologue,	         /* begin_prologue */
+-  debug_nothing_int_charstar,	         /* end_prologue */
+-  debug_nothing_int_charstar,	         /* begin_epilogue */
+-  debug_nothing_int_charstar,	         /* end_epilogue */
+-#ifdef DBX_FUNCTION_FIRST
+-  dbxout_begin_function,
+-#else
+-  debug_nothing_tree,		         /* begin_function */
+-#endif
+-  debug_nothing_int,		         /* end_function */
+-  debug_nothing_tree,			 /* register_main_translation_unit */
+-  dbxout_function_decl,
+-  dbxout_early_global_decl,		 /* early_global_decl */
+-  dbxout_late_global_decl,		 /* late_global_decl */
+-  dbxout_type_decl,			 /* type_decl */
+-  debug_nothing_tree_tree_tree_bool_bool,/* imported_module_or_decl */
+-  debug_false_tree_charstarstar_uhwistar,/* die_ref_for_decl */
+-  debug_nothing_tree_charstar_uhwi,      /* register_external_die */
+-  debug_nothing_tree,		         /* deferred_inline_function */
+-  debug_nothing_tree,		         /* outlining_inline_function */
+-  debug_nothing_rtx_code_label,	         /* label */
+-  dbxout_handle_pch,		         /* handle_pch */
+-  debug_nothing_rtx_insn,	         /* var_location */
+-  debug_nothing_tree,	         	 /* inline_entry */
+-  debug_nothing_tree,			 /* size_function */
+-  dbxout_switch_text_section,            /* switch_text_section */
+-  debug_nothing_tree_tree,		 /* set_name */
+-  0,                                     /* start_end_main_source_file */
+-  TYPE_SYMTAB_IS_ADDRESS                 /* tree_type_symtab_field */
+-};
+-#endif /* DBX_DEBUGGING_INFO  */
+-
+-#if defined (XCOFF_DEBUGGING_INFO)
+-const struct gcc_debug_hooks xcoff_debug_hooks =
+-{
+-  dbxout_init,
+-  dbxout_finish,
+-  debug_nothing_charstar,
+-  debug_nothing_void,
+-  debug_nothing_int_charstar,
+-  debug_nothing_int_charstar,
+-  dbxout_start_source_file,
+-  dbxout_end_source_file,
+-  xcoffout_begin_block,
+-  xcoffout_end_block,
+-  debug_true_const_tree,	         /* ignore_block */
+-  xcoffout_source_line,
+-  debug_nothing_int_int_charstar,	 /* set_ignored_loc */
+-  xcoffout_begin_prologue,	         /* begin_prologue */
+-  debug_nothing_int_charstar,	         /* end_prologue */
+-  debug_nothing_int_charstar,	         /* begin_epilogue */
+-  xcoffout_end_epilogue,
+-  debug_nothing_tree,		         /* begin_function */
+-  xcoffout_end_function,
+-  debug_nothing_tree,			 /* register_main_translation_unit */
+-  debug_nothing_tree,		         /* function_decl */
+-  dbxout_early_global_decl,		 /* early_global_decl */
+-  dbxout_late_global_decl,		 /* late_global_decl */
+-  dbxout_type_decl,			 /* type_decl */
+-  debug_nothing_tree_tree_tree_bool_bool,/* imported_module_or_decl */
+-  debug_false_tree_charstarstar_uhwistar,/* die_ref_for_decl */
+-  debug_nothing_tree_charstar_uhwi,      /* register_external_die */
+-  debug_nothing_tree,		         /* deferred_inline_function */
+-  debug_nothing_tree,		         /* outlining_inline_function */
+-  debug_nothing_rtx_code_label,	         /* label */
+-  dbxout_handle_pch,		         /* handle_pch */
+-  debug_nothing_rtx_insn,	         /* var_location */
+-  debug_nothing_tree,	         	 /* inline_entry */
+-  debug_nothing_tree,			 /* size_function */
+-  debug_nothing_void,                    /* switch_text_section */
+-  debug_nothing_tree_tree,	         /* set_name */
+-  0,                                     /* start_end_main_source_file */
+-  TYPE_SYMTAB_IS_ADDRESS                 /* tree_type_symtab_field */
+-};
+-#endif /* XCOFF_DEBUGGING_INFO  */
+-
+-/* Numeric formatting helper macro.  Note that this does not handle
+-   hexadecimal.  */
+-#define NUMBER_FMT_LOOP(P, NUM, BASE)		\
+-  do						\
+-    {						\
+-      int digit = NUM % BASE;			\
+-      NUM /= BASE;				\
+-      *--P = digit + '0';			\
+-    }						\
+-  while (NUM > 0)
+-
+-/* Utility: write a decimal integer NUM to asm_out_file.  */
+-void
+-dbxout_int (int num)
+-{
+-  char buf[64];
+-  char *p = buf + sizeof buf;
+-  unsigned int unum;
+-
+-  if (num == 0)
+-    {
+-      putc ('0', asm_out_file);
+-      return;
+-    }
+-  if (num < 0)
+-    {
+-      putc ('-', asm_out_file);
+-      unum = -(unsigned int) num;
+-    }
+-  else
+-    unum = num;
+-
+-  NUMBER_FMT_LOOP (p, unum, 10);
+-
+-  while (p < buf + sizeof buf)
+-    {
+-      putc (*p, asm_out_file);
+-      p++;
+-    }
+-}
+-
+-
+-/* Primitives for emitting simple stabs directives.  All other stabs
+-   routines should use these functions instead of directly emitting
+-   stabs.  They are exported because machine-dependent code may need
+-   to invoke them, e.g. in a DBX_OUTPUT_* macro whose definition
+-   forwards to code in CPU.c.  */
+-
+-/* The following functions should all be called immediately after one
+-   of the dbxout_begin_stab* functions (below).  They write out
+-   various things as the value of a stab.  */
+-
+-/* Write out a literal zero as the value of a stab.  */
+-void
+-dbxout_stab_value_zero (void)
+-{
+-  fputs ("0\n", asm_out_file);
+-}
+-
+-/* Write out the label LABEL as the value of a stab.  */
+-void
+-dbxout_stab_value_label (const char *label)
+-{
+-  assemble_name (asm_out_file, label);
+-  putc ('\n', asm_out_file);
+-}
+-
+-/* Write out the difference of two labels, LABEL - BASE, as the value
+-   of a stab.  */
+-void
+-dbxout_stab_value_label_diff (const char *label, const char *base)
+-{
+-  assemble_name (asm_out_file, label);
+-  putc ('-', asm_out_file);
+-  assemble_name (asm_out_file, base);
+-  putc ('\n', asm_out_file);
+-}
+-
+-/* Write out an internal label as the value of a stab, and immediately
+-   emit that internal label.  This should be used only when
+-   dbxout_stabd will not work.  STEM is the name stem of the label,
+-   COUNTERP is a pointer to a counter variable which will be used to
+-   guarantee label uniqueness.  */
+-void
+-dbxout_stab_value_internal_label (const char *stem, int *counterp)
+-{
+-  char label[100];
+-  int counter = counterp ? (*counterp)++ : 0;
+-
+-  ASM_GENERATE_INTERNAL_LABEL (label, stem, counter);
+-  dbxout_stab_value_label (label);
+-  targetm.asm_out.internal_label (asm_out_file, stem, counter);
+-}
+-
+-/* Write out the difference between BASE and an internal label as the
+-   value of a stab, and immediately emit that internal label.  STEM and
+-   COUNTERP are as for dbxout_stab_value_internal_label.  */
+-void
+-dbxout_stab_value_internal_label_diff (const char *stem, int *counterp,
+-				       const char *base)
+-{
+-  char label[100];
+-  int counter = counterp ? (*counterp)++ : 0;
+-
+-  ASM_GENERATE_INTERNAL_LABEL (label, stem, counter);
+-  dbxout_stab_value_label_diff (label, base);
+-  targetm.asm_out.internal_label (asm_out_file, stem, counter);
+-}
+-
+-/* The following functions produce specific kinds of stab directives.  */
+-
+-/* Write a .stabd directive with type STYPE and desc SDESC to asm_out_file.  */
+-void
+-dbxout_stabd (int stype, int sdesc)
+-{
+-  fputs (ASM_STABD_OP, asm_out_file);
+-  dbxout_int (stype);
+-  fputs (",0,", asm_out_file);
+-  dbxout_int (sdesc);
+-  putc ('\n', asm_out_file);
+-}
+-
+-/* Write a .stabn directive with type STYPE.  This function stops
+-   short of emitting the value field, which is the responsibility of
+-   the caller (normally it will be either a symbol or the difference
+-   of two symbols).  */
+-
+-void
+-dbxout_begin_stabn (int stype)
+-{
+-  fputs (ASM_STABN_OP, asm_out_file);
+-  dbxout_int (stype);
+-  fputs (",0,0,", asm_out_file);
+-}
+-
+-/* Write a .stabn directive with type N_SLINE and desc LINE.  As above,
+-   the value field is the responsibility of the caller.  */
+-void
+-dbxout_begin_stabn_sline (int lineno)
+-{
+-  fputs (ASM_STABN_OP, asm_out_file);
+-  dbxout_int (N_SLINE);
+-  fputs (",0,", asm_out_file);
+-  dbxout_int (lineno);
+-  putc (',', asm_out_file);
+-}
+-
+-/* Begin a .stabs directive with string "", type STYPE, and desc and
+-   other fields 0.  The value field is the responsibility of the
+-   caller.  This function cannot be used for .stabx directives.  */
+-void
+-dbxout_begin_empty_stabs (int stype)
+-{
+-  fputs (ASM_STABS_OP, asm_out_file);
+-  fputs ("\"\",", asm_out_file);
+-  dbxout_int (stype);
+-  fputs (",0,0,", asm_out_file);
+-}
+-
+-/* Begin a .stabs directive with string STR, type STYPE, and desc 0.
+-   The value field is the responsibility of the caller.  */
+-void
+-dbxout_begin_simple_stabs (const char *str, int stype)
+-{
+-  fputs (ASM_STABS_OP, asm_out_file);
+-  output_quoted_string (asm_out_file, str);
+-  putc (',', asm_out_file);
+-  dbxout_int (stype);
+-  fputs (",0,0,", asm_out_file);
+-}
+-
+-/* As above but use SDESC for the desc field.  */
+-void
+-dbxout_begin_simple_stabs_desc (const char *str, int stype, int sdesc)
+-{
+-  fputs (ASM_STABS_OP, asm_out_file);
+-  output_quoted_string (asm_out_file, str);
+-  putc (',', asm_out_file);
+-  dbxout_int (stype);
+-  fputs (",0,", asm_out_file);
+-  dbxout_int (sdesc);
+-  putc (',', asm_out_file);
+-}
+-
+-/* The next set of functions are entirely concerned with production of
+-   "complex" .stabs directives: that is, .stabs directives whose
+-   strings have to be constructed piecemeal.  dbxout_type,
+-   dbxout_symbol, etc. use these routines heavily.  The string is queued
+-   up in an obstack, then written out by dbxout_finish_complex_stabs, which
+-   is also responsible for splitting it up if it exceeds DBX_CONTIN_LENGTH.
+-   (You might think it would be more efficient to go straight to stdio
+-   when DBX_CONTIN_LENGTH is 0 (i.e. no length limit) but that turns
+-   out not to be the case, and anyway this needs fewer #ifdefs.)  */
+-
+-/* Begin a complex .stabs directive.  If we can, write the initial
+-   ASM_STABS_OP to the asm_out_file.  */
+-
+-static void
+-dbxout_begin_complex_stabs (void)
+-{
+-  emit_pending_bincls_if_required ();
+-  FORCE_TEXT;
+-  fputs (ASM_STABS_OP, asm_out_file);
+-  putc ('"', asm_out_file);
+-  gcc_assert (stabstr_last_contin_point == 0);
+-}
+-
+-/* As above, but do not force text or emit pending bincls.  This is
+-   used by dbxout_symbol_location, which needs to do something else.  */
+-static void
+-dbxout_begin_complex_stabs_noforcetext (void)
+-{
+-  fputs (ASM_STABS_OP, asm_out_file);
+-  putc ('"', asm_out_file);
+-  gcc_assert (stabstr_last_contin_point == 0);
+-}
+-
+-/* Add CHR, a single character, to the string being built.  */
+-#define stabstr_C(chr) obstack_1grow (&stabstr_ob, chr)
+-
+-/* Add STR, a normal C string, to the string being built.  */
+-#define stabstr_S(str) obstack_grow (&stabstr_ob, str, strlen (str))
+-
+-/* Add the text of ID, an IDENTIFIER_NODE, to the string being built.  */
+-#define stabstr_I(id) obstack_grow (&stabstr_ob, \
+-                                    IDENTIFIER_POINTER (id), \
+-                                    IDENTIFIER_LENGTH (id))
+-
+-/* Add NUM, a signed decimal number, to the string being built.  */
+-static void
+-stabstr_D (HOST_WIDE_INT num)
+-{
+-  char buf[64];
+-  char *p = buf + sizeof buf;
+-  unsigned HOST_WIDE_INT unum;
+-
+-  if (num == 0)
+-    {
+-      stabstr_C ('0');
+-      return;
+-    }
+-  if (num < 0)
+-    {
+-      stabstr_C ('-');
+-      unum = -(unsigned HOST_WIDE_INT) num;
+-    }
+-  else
+-    unum = num;
+-
+-  NUMBER_FMT_LOOP (p, unum, 10);
+-
+-  obstack_grow (&stabstr_ob, p, (buf + sizeof buf) - p);
+-}
+-
+-/* Add NUM, an unsigned decimal number, to the string being built.  */
+-static void
+-stabstr_U (unsigned HOST_WIDE_INT num)
+-{
+-  char buf[64];
+-  char *p = buf + sizeof buf;
+-  if (num == 0)
+-    {
+-      stabstr_C ('0');
+-      return;
+-    }
+-  NUMBER_FMT_LOOP (p, num, 10);
+-  obstack_grow (&stabstr_ob, p, (buf + sizeof buf) - p);
+-}
+-
+-/* Add CST, an INTEGER_CST tree, to the string being built as an
+-   unsigned octal number.  This routine handles values which are
+-   larger than a single HOST_WIDE_INT.  */
+-static void
+-stabstr_O (tree cst)
+-{
+-  int prec = TYPE_PRECISION (TREE_TYPE (cst));
+-  int res_pres = prec % 3;
+-  int i;
+-  unsigned int digit;
+-
+-  /* Leading zero for base indicator.  */
+-  stabstr_C ('0');
+-
+-  /* If the value is zero, the base indicator will serve as the value
+-     all by itself.  */
+-  if (wi::to_wide (cst) == 0)
+-    return;
+-
+-  /* GDB wants constants with no extra leading "1" bits, so
+-     we need to remove any sign-extension that might be
+-     present.  */
+-  if (res_pres == 1)
+-    {
+-      digit = wi::extract_uhwi (wi::to_wide (cst), prec - 1, 1);
+-      stabstr_C ('0' + digit);
+-    }
+-  else if (res_pres == 2)
+-    {
+-      digit = wi::extract_uhwi (wi::to_wide (cst), prec - 2, 2);
+-      stabstr_C ('0' + digit);
+-    }
+-
+-  prec -= res_pres;
+-  for (i = prec - 3; i >= 0; i = i - 3)
+-    {
+-      digit = wi::extract_uhwi (wi::to_wide (cst), i, 3);
+-      stabstr_C ('0' + digit);
+-    }
+-}
+-
+-/* Called whenever it is safe to break a stabs string into multiple
+-   .stabs directives.  If the current string has exceeded the limit
+-   set by DBX_CONTIN_LENGTH, mark the current position in the buffer
+-   as a continuation point by inserting DBX_CONTIN_CHAR (doubled if
+-   it is a backslash) and a null character.  */
+-static inline void
+-stabstr_continue (void)
+-{
+-  if (DBX_CONTIN_LENGTH > 0
+-      && obstack_object_size (&stabstr_ob) - stabstr_last_contin_point
+-	 > DBX_CONTIN_LENGTH)
+-    {
+-      if (DBX_CONTIN_CHAR == '\\')
+-	obstack_1grow (&stabstr_ob, '\\');
+-      obstack_1grow (&stabstr_ob, DBX_CONTIN_CHAR);
+-      obstack_1grow (&stabstr_ob, '\0');
+-      stabstr_last_contin_point = obstack_object_size (&stabstr_ob);
+-    }
+-}
+-#define CONTIN stabstr_continue ()
+-
+-/* Macro subroutine of dbxout_finish_complex_stabs, which emits
+-   all of the arguments to the .stabs directive after the string.
+-   Overridden by xcoffout.h.  CODE is the stabs code for this symbol;
+-   LINE is the source line to write into the desc field (in extended
+-   mode); SYM is the symbol itself.
+-
+-   ADDR, LABEL, and NUMBER are three different ways to represent the
+-   stabs value field.  At most one of these should be nonzero.
+-
+-     ADDR is used most of the time; it represents the value as an
+-     RTL address constant.
+-
+-     LABEL is used (currently) only for N_CATCH stabs; it represents
+-     the value as a string suitable for assemble_name.
+-
+-     NUMBER is used when the value is an offset from an implicit base
+-     pointer (e.g. for a stack variable), or an index (e.g. for a
+-     register variable).  It represents the value as a decimal integer.  */
+-
+-#ifndef DBX_FINISH_STABS
+-#define DBX_FINISH_STABS(SYM, CODE, LINE, ADDR, LABEL, NUMBER)	\
+-do {								\
+-  int line_ = use_gnu_debug_info_extensions ? LINE : 0;		\
+-								\
+-  dbxout_int (CODE);						\
+-  fputs (",0,", asm_out_file);					\
+-  dbxout_int (line_);						\
+-  putc (',', asm_out_file);					\
+-  if (ADDR)							\
+-    output_addr_const (asm_out_file, ADDR);			\
+-  else if (LABEL)						\
+-    assemble_name (asm_out_file, LABEL);			\
+-  else								\
+-    dbxout_int (NUMBER);					\
+-  putc ('\n', asm_out_file);					\
+-} while (0)
+-#endif
+-
+-/* Finish the emission of a complex .stabs directive.  When DBX_CONTIN_LENGTH
+-   is zero, this has only to emit the close quote and the remainder of
+-   the arguments.  When it is nonzero, the string has been marshalled in
+-   stabstr_ob, and this routine is responsible for breaking it up into
+-   DBX_CONTIN_LENGTH-sized chunks.
+-
+-   SYM is the DECL of the symbol under consideration; it is used only
+-   for its DECL_SOURCE_LINE.  The other arguments are all passed directly
+-   to DBX_FINISH_STABS; see above for details.  */
+-
+-static void
+-dbxout_finish_complex_stabs (tree sym, stab_code_type code,
+-			     rtx addr, const char *label, int number)
+-{
+-  int line ATTRIBUTE_UNUSED;
+-  char *str;
+-  size_t len;
+-
+-  line = sym ? DECL_SOURCE_LINE (sym) : 0;
+-  if (DBX_CONTIN_LENGTH > 0)
+-    {
+-      char *chunk;
+-      size_t chunklen;
+-
+-      /* Nul-terminate the growing string, then get its size and
+-	 address.  */
+-      obstack_1grow (&stabstr_ob, '\0');
+-
+-      len = obstack_object_size (&stabstr_ob);
+-      chunk = str = XOBFINISH (&stabstr_ob, char *);
+-
+-      /* Within the buffer are a sequence of NUL-separated strings,
+-	 each of which is to be written out as a separate stab
+-	 directive.  */
+-      for (;;)
+-	{
+-	  chunklen = strlen (chunk);
+-	  fwrite (chunk, 1, chunklen, asm_out_file);
+-	  fputs ("\",", asm_out_file);
+-
+-	  /* Must add an extra byte to account for the NUL separator.  */
+-	  chunk += chunklen + 1;
+-	  len   -= chunklen + 1;
+-
+-	  /* Only put a line number on the last stab in the sequence.  */
+-	  DBX_FINISH_STABS (sym, code, len == 0 ? line : 0,
+-			    addr, label, number);
+-	  if (len == 0)
+-	    break;
+-
+-	  fputs (ASM_STABS_OP, asm_out_file);
+-	  putc ('"', asm_out_file);
+-	}
+-      stabstr_last_contin_point = 0;
+-    }
+-  else
+-    {
+-      /* No continuations - we can put the whole string out at once.
+-	 It is faster to augment the string with the close quote and
+-	 comma than to do a two-character fputs.  */
+-      obstack_grow (&stabstr_ob, "\",", 2);
+-      len = obstack_object_size (&stabstr_ob);
+-      str = XOBFINISH (&stabstr_ob, char *);
+-
+-      fwrite (str, 1, len, asm_out_file);
+-      DBX_FINISH_STABS (sym, code, line, addr, label, number);
+-    }
+-  obstack_free (&stabstr_ob, str);
+-}
+-
+-#if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
+-
+-/* When -gused is used, emit debug info for only used symbols. But in
+-   addition to the standard intercepted debug_hooks there are some
+-   direct calls into this file, i.e., dbxout_symbol, dbxout_parms, and
+-   dbxout_reg_params.  Those routines may also be called from a higher
+-   level intercepted routine. So to prevent recording data for an inner
+-   call to one of these for an intercept, we maintain an intercept
+-   nesting counter (debug_nesting). We only save the intercepted
+-   arguments if the nesting is 1.  */
+-static int debug_nesting = 0;
+-
+-static tree *symbol_queue;
+-static int symbol_queue_index = 0;
+-static int symbol_queue_size = 0;
+-
+-#define DBXOUT_DECR_NESTING \
+-  if (--debug_nesting == 0 && symbol_queue_index > 0) \
+-    { emit_pending_bincls_if_required (); debug_flush_symbol_queue (); }
+-
+-#define DBXOUT_DECR_NESTING_AND_RETURN(x) \
+-  do {--debug_nesting; return (x);} while (0)
+-
+-#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
+-
+-#if defined (DBX_DEBUGGING_INFO)
+-
+-static void
+-dbxout_function_end (tree decl ATTRIBUTE_UNUSED)
+-{
+-  char lscope_label_name[100];
+-
+-  /* The Lscope label must be emitted even if we aren't doing anything
+-     else; dbxout_block needs it.  */
+-  switch_to_section (current_function_section ());
+-
+-  /* Convert Lscope into the appropriate format for local labels in case
+-     the system doesn't insert underscores in front of user generated
+-     labels.  */
+-  ASM_GENERATE_INTERNAL_LABEL (lscope_label_name, "Lscope", scope_labelno);
+-  targetm.asm_out.internal_label (asm_out_file, "Lscope", scope_labelno);
+-
+-  /* The N_FUN tag at the end of the function is a GNU extension,
+-     which may be undesirable, and is unnecessary if we do not have
+-     named sections.  */
+-  if (!use_gnu_debug_info_extensions
+-      || NO_DBX_FUNCTION_END
+-      || !targetm_common.have_named_sections)
+-    return;
+-
+-  /* By convention, GCC will mark the end of a function with an N_FUN
+-     symbol and an empty string.  */
+-  if (crtl->has_bb_partition)
+-    {
+-      dbxout_begin_empty_stabs (N_FUN);
+-      if (in_cold_section_p)
+-	dbxout_stab_value_label_diff (crtl->subsections.cold_section_end_label,
+-				      crtl->subsections.cold_section_label);
+-      else
+-	dbxout_stab_value_label_diff (crtl->subsections.hot_section_end_label,
+-				      crtl->subsections.hot_section_label);
+-    }
+-  else
+-    {
+-      char begin_label[20];
+-      /* Reference current function start using LFBB.  */
+-      ASM_GENERATE_INTERNAL_LABEL (begin_label, "LFBB", scope_labelno);
+-      dbxout_begin_empty_stabs (N_FUN);
+-      dbxout_stab_value_label_diff (lscope_label_name, begin_label);
+-    }
+-
+-  if (!NO_DBX_BNSYM_ENSYM && !flag_debug_only_used_symbols)
+-    dbxout_stabd (N_ENSYM, 0);
+-}
+-#endif /* DBX_DEBUGGING_INFO */
+-
+-/* Get lang description for N_SO stab.  */
+-static unsigned int ATTRIBUTE_UNUSED
+-get_lang_number (void)
+-{
+-  const char *language_string = lang_hooks.name;
+-  if (lang_GNU_C ())
+-    return N_SO_C;
+-  else if (lang_GNU_CXX ())
+-    return N_SO_CC;
+-  else if (strcmp (language_string, "GNU F77") == 0)
+-    return N_SO_FORTRAN;
+-  else if (lang_GNU_Fortran ())
+-    return N_SO_FORTRAN90; /* CHECKME */
+-  else if (strcmp (language_string, "GNU Objective-C") == 0)
+-    return N_SO_OBJC;
+-  else if (strcmp (language_string, "GNU Objective-C++") == 0)
+-    return N_SO_OBJCPLUS;
+-  else
+-    return 0;
+-
+-}
+-
+-static bool
+-is_fortran (void)
+-{
+-   unsigned int lang = get_lang_number ();
+-
+-   return (lang == N_SO_FORTRAN) || (lang == N_SO_FORTRAN90);
+-}
+-
+-/* At the beginning of compilation, start writing the symbol table.
+-   Initialize `typevec' and output the standard data types of C.  */
+-
+-static void
+-dbxout_init (const char *input_file_name)
+-{
+-  char ltext_label_name[100];
+-  bool used_ltext_label_name = false;
+-  tree syms = lang_hooks.decls.getdecls ();
+-  const char *mapped_name;
+-
+-  typevec_len = 100;
+-  typevec = ggc_cleared_vec_alloc (typevec_len);
+-
+-  /* stabstr_ob contains one string, which will be just fine with
+-     1-byte alignment.  */
+-  obstack_specify_allocation (&stabstr_ob, 0, 1, xmalloc, free);
+-
+-  /* Convert Ltext into the appropriate format for local labels in case
+-     the system doesn't insert underscores in front of user generated
+-     labels.  */
+-  ASM_GENERATE_INTERNAL_LABEL (ltext_label_name, "Ltext", 0);
+-
+-  /* Put the current working directory in an N_SO symbol.  */
+-  if (use_gnu_debug_info_extensions && !NO_DBX_MAIN_SOURCE_DIRECTORY)
+-    {
+-      static const char *cwd;
+-
+-      if (!cwd)
+-	{
+-	  cwd = get_src_pwd ();
+-	  if (cwd[0] == '\0')
+-	    cwd = "/";
+-	  else if (!IS_DIR_SEPARATOR (cwd[strlen (cwd) - 1]))
+-	    cwd = concat (cwd, "/", NULL);
+-	  cwd = remap_debug_filename (cwd);
+-	}
+-#ifdef DBX_OUTPUT_MAIN_SOURCE_DIRECTORY
+-      DBX_OUTPUT_MAIN_SOURCE_DIRECTORY (asm_out_file, cwd);
+-#else /* no DBX_OUTPUT_MAIN_SOURCE_DIRECTORY */
+-      dbxout_begin_simple_stabs_desc (cwd, N_SO, get_lang_number ());
+-      dbxout_stab_value_label (ltext_label_name);
+-      used_ltext_label_name = true;
+-#endif /* no DBX_OUTPUT_MAIN_SOURCE_DIRECTORY */
+-    }
+-
+-  mapped_name = remap_debug_filename (input_file_name);
+-#ifdef DBX_OUTPUT_MAIN_SOURCE_FILENAME
+-  DBX_OUTPUT_MAIN_SOURCE_FILENAME (asm_out_file, mapped_name);
+-#else
+-  dbxout_begin_simple_stabs_desc (mapped_name, N_SO, get_lang_number ());
+-  dbxout_stab_value_label (ltext_label_name);
+-  used_ltext_label_name = true;
+-#endif
+-
+-  if (used_ltext_label_name)
+-    {
+-      switch_to_section (text_section);
+-      targetm.asm_out.internal_label (asm_out_file, "Ltext", 0);
+-    }
+-
+-  /* Emit an N_OPT stab to indicate that this file was compiled by GCC.
+-     The string used is historical.  */
+-#ifndef NO_DBX_GCC_MARKER
+-  dbxout_begin_simple_stabs ("gcc2_compiled.", N_OPT);
+-  dbxout_stab_value_zero ();
+-#endif
+-
+-  base_input_file = lastfile = input_file_name;
+-
+-  next_type_number = 1;
+-
+-#ifdef DBX_USE_BINCL
+-  current_file = XNEW (struct dbx_file);
+-  current_file->next = NULL;
+-  current_file->file_number = 0;
+-  current_file->next_type_number = 1;
+-  next_file_number = 1;
+-  current_file->prev = NULL;
+-  current_file->bincl_status = BINCL_NOT_REQUIRED;
+-  current_file->pending_bincl_name = NULL;
+-#endif
+-
+-  /* Get all permanent types that have typedef names, and output them
+-     all, except for those already output.  Some language front ends
+-     put these declarations in the top-level scope; some do not;
+-     the latter are responsible for calling debug_hooks->type_decl from
+-     their record_builtin_type function.  */
+-  dbxout_typedefs (syms);
+-
+-  if (preinit_symbols)
+-    {
+-      tree t;
+-      for (t = nreverse (preinit_symbols); t; t = TREE_CHAIN (t))
+-	dbxout_symbol (TREE_VALUE (t), 0);
+-      preinit_symbols = 0;
+-    }
+-}
+-
+-/* Output any typedef names for types described by TYPE_DECLs in SYMS.  */
+-
+-static void
+-dbxout_typedefs (tree syms)
+-{
+-  for (; syms != NULL_TREE; syms = DECL_CHAIN (syms))
+-    {
+-      if (TREE_CODE (syms) == TYPE_DECL)
+-	{
+-	  tree type = TREE_TYPE (syms);
+-	  if (TYPE_NAME (type)
+-	      && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+-	      && COMPLETE_OR_VOID_TYPE_P (type)
+-	      && ! TREE_ASM_WRITTEN (TYPE_NAME (type)))
+-	    dbxout_symbol (TYPE_NAME (type), 0);
+-	}
+-    }
+-}
+-
+-#ifdef DBX_USE_BINCL
+-/* Emit BINCL stab using given name.  */
+-static void
+-emit_bincl_stab (const char *name)
+-{
+-  dbxout_begin_simple_stabs (name, N_BINCL);
+-  dbxout_stab_value_zero ();
+-}
+-
+-/* If there are pending bincls then it is time to emit all of them.  */
+-
+-static inline void
+-emit_pending_bincls_if_required (void)
+-{
+-  if (pending_bincls)
+-    emit_pending_bincls ();
+-}
+-
+-/* Emit all pending bincls.  */
+-
+-static void
+-emit_pending_bincls (void)
+-{
+-  struct dbx_file *f = current_file;
+-
+-  /* Find first pending bincl.  */
+-  while (f->bincl_status == BINCL_PENDING)
+-    f = f->next;
+-
+-  /* Now emit all bincls.  */
+-  f = f->prev;
+-
+-  while (f)
+-    {
+-      if (f->bincl_status == BINCL_PENDING)
+-        {
+-          emit_bincl_stab (f->pending_bincl_name);
+-
+-	  /* Update file number and status.  */
+-          f->file_number = next_file_number++;
+-          f->bincl_status = BINCL_PROCESSED;
+-        }
+-      if (f == current_file)
+-        break;
+-      f = f->prev;
+-    }
+-
+-  /* All pending bincls have been emitted.  */
+-  pending_bincls = 0;
+-}
+-
+-#else
+-
+-static inline void
+-emit_pending_bincls_if_required (void) {}
+-#endif
+-
+-/* Change to reading from a new source file.  Generate a N_BINCL stab.  */
+-
+-static void
+-dbxout_start_source_file (unsigned int line ATTRIBUTE_UNUSED,
+-			  const char *filename ATTRIBUTE_UNUSED)
+-{
+-#ifdef DBX_USE_BINCL
+-  struct dbx_file *n = XNEW (struct dbx_file);
+-
+-  n->next = current_file;
+-  n->next_type_number = 1;
+-  /* Do not assign file number now.
+-     Delay it until we actually emit BINCL.  */
+-  n->file_number = 0;
+-  n->prev = NULL;
+-  current_file->prev = n;
+-  n->bincl_status = BINCL_PENDING;
+-  n->pending_bincl_name = remap_debug_filename (filename);
+-  pending_bincls = 1;
+-  current_file = n;
+-#endif
+-}
+-
+-/* Revert to reading a previous source file.  Generate a N_EINCL stab.  */
+-
+-static void
+-dbxout_end_source_file (unsigned int line ATTRIBUTE_UNUSED)
+-{
+-#ifdef DBX_USE_BINCL
+-  /* Emit EINCL stab only if BINCL is not pending.  */
+-  if (current_file->bincl_status == BINCL_PROCESSED)
+-    {
+-      dbxout_begin_stabn (N_EINCL);
+-      dbxout_stab_value_zero ();
+-    }
+-  current_file->bincl_status = BINCL_NOT_REQUIRED;
+-  current_file = current_file->next;
+-#endif
+-}
+-
+-/* Handle a few odd cases that occur when trying to make PCH files work.  */
+-
+-static void
+-dbxout_handle_pch (unsigned at_end)
+-{
+-  if (! at_end)
+-    {
+-      /* When using the PCH, this file will be included, so we need to output
+-	 a BINCL.  */
+-      dbxout_start_source_file (0, lastfile);
+-
+-      /* The base file when using the PCH won't be the same as
+-	 the base file when it's being generated.  */
+-      lastfile = NULL;
+-    }
+-  else
+-    {
+-      /* ... and an EINCL.  */
+-      dbxout_end_source_file (0);
+-
+-      /* Deal with cases where 'lastfile' was never actually changed.  */
+-      lastfile_is_base = lastfile == NULL;
+-    }
+-}
+-
+-#if defined (DBX_DEBUGGING_INFO)
+-
+-static bool dbxout_block (tree, int, tree, int);
+-
+-/* Output debugging info to FILE to switch to sourcefile FILENAME.  */
+-
+-static void
+-dbxout_source_file (const char *filename)
+-{
+-  if (lastfile == 0 && lastfile_is_base)
+-    {
+-      lastfile = base_input_file;
+-      lastfile_is_base = 0;
+-    }
+-
+-  if (filename && (lastfile == 0 || strcmp (filename, lastfile)))
+-    {
+-      /* Don't change section amid function.  */
+-      if (current_function_decl == NULL_TREE)
+-	switch_to_section (text_section);
+-
+-      dbxout_begin_simple_stabs (remap_debug_filename (filename), N_SOL);
+-      dbxout_stab_value_internal_label ("Ltext", &source_label_number);
+-      lastfile = filename;
+-    }
+-}
+-
+-/* Output N_BNSYM, line number symbol entry, and local symbol at
+-   function scope  */
+-
+-static void
+-dbxout_begin_prologue (unsigned int lineno,
+-		       unsigned int column ATTRIBUTE_UNUSED,
+-		       const char *filename)
+-{
+-  if (use_gnu_debug_info_extensions
+-      && !NO_DBX_FUNCTION_END
+-      && !NO_DBX_BNSYM_ENSYM
+-      && !flag_debug_only_used_symbols)
+-    dbxout_stabd (N_BNSYM, 0);
+-
+-  /* pre-increment the scope counter */
+-  scope_labelno++;
+-
+-  dbxout_source_line (lineno, 0, filename, 0, true);
+-  /* Output function begin block at function scope, referenced
+-     by dbxout_block, dbxout_source_line and dbxout_function_end.  */
+-  emit_pending_bincls_if_required ();
+-  targetm.asm_out.internal_label (asm_out_file, "LFBB", scope_labelno);
+-}
+-
+-/* Output a line number symbol entry for source file FILENAME and line
+-   number LINENO.  */
+-
+-static void
+-dbxout_source_line (unsigned int lineno, unsigned int column ATTRIBUTE_UNUSED,
+-		    const char *filename, int discriminator ATTRIBUTE_UNUSED,
+-                    bool is_stmt ATTRIBUTE_UNUSED)
+-{
+-  dbxout_source_file (filename);
+-
+-#ifdef DBX_OUTPUT_SOURCE_LINE
+-  DBX_OUTPUT_SOURCE_LINE (asm_out_file, lineno, dbxout_source_line_counter);
+-#else
+-  if (DBX_LINES_FUNCTION_RELATIVE)
+-    {
+-      char begin_label[20];
+-      dbxout_begin_stabn_sline (lineno);
+-      /* Reference current function start using LFBB.  */
+-      ASM_GENERATE_INTERNAL_LABEL (begin_label, "LFBB", scope_labelno);
+-      dbxout_stab_value_internal_label_diff ("LM", &dbxout_source_line_counter,
+-					     begin_label);
+-    }
+-  else
+-    dbxout_stabd (N_SLINE, lineno);
+-#endif
+-  lastlineno = lineno;
+-}
+-
+-/* Unfortunately, at least when emitting relative addresses, STABS
+-   has no way to express multiple partitions.  Represent a function
+-   as two functions in this case.  */
+-
+-static void
+-dbxout_switch_text_section (void)
+-{
+-  /* The N_FUN tag at the end of the function is a GNU extension,
+-     which may be undesirable, and is unnecessary if we do not have
+-     named sections.  */
+-  in_cold_section_p = !in_cold_section_p;
+-  switch_to_section (current_function_section ());
+-  dbxout_block (DECL_INITIAL (current_function_decl), 0,
+-		DECL_ARGUMENTS (current_function_decl), -1);
+-  dbxout_function_end (current_function_decl);
+-  in_cold_section_p = !in_cold_section_p;
+-
+-  switch_to_section (current_function_section ());
+-
+-  tree context = decl_function_context (current_function_decl);
+-  extern tree cold_function_name;
+-
+-  dbxout_begin_complex_stabs ();
+-  stabstr_I (cold_function_name);
+-  stabstr_S (":f");
+-
+-  tree type = TREE_TYPE (current_function_decl);
+-  if (TREE_TYPE (type))
+-    dbxout_type (TREE_TYPE (type), 0);
+-  else
+-    dbxout_type (void_type_node, 0);
+-
+-  if (context != 0)
+-    {
+-      stabstr_C (',');
+-      stabstr_I (cold_function_name);
+-      stabstr_C (',');
+-      stabstr_I (DECL_NAME (context));
+-    }
+-
+-  dbxout_finish_complex_stabs (current_function_decl, N_FUN, 0,
+-			       crtl->subsections.cold_section_label, 0);
+-
+-  /* pre-increment the scope counter */
+-  scope_labelno++;
+-
+-  dbxout_source_line (lastlineno, 0, lastfile, 0, true);
+-  /* Output function begin block at function scope, referenced
+-     by dbxout_block, dbxout_source_line and dbxout_function_end.  */
+-  emit_pending_bincls_if_required ();
+-  targetm.asm_out.internal_label (asm_out_file, "LFBB", scope_labelno);
+-}
+-
+-/* Describe the beginning of an internal block within a function.  */
+-
+-static void
+-dbxout_begin_block (unsigned int line ATTRIBUTE_UNUSED, unsigned int n)
+-{
+-  emit_pending_bincls_if_required ();
+-  targetm.asm_out.internal_label (asm_out_file, "LBB", n);
+-}
+-
+-/* Describe the end line-number of an internal block within a function.  */
+-
+-static void
+-dbxout_end_block (unsigned int line ATTRIBUTE_UNUSED, unsigned int n)
+-{
+-  emit_pending_bincls_if_required ();
+-  targetm.asm_out.internal_label (asm_out_file, "LBE", n);
+-}
+-
+-/* Output dbx data for a function definition.
+-   This includes a definition of the function name itself (a symbol),
+-   definitions of the parameters (locating them in the parameter list)
+-   and then output the block that makes up the function's body
+-   (including all the auto variables of the function).  */
+-
+-static void
+-dbxout_function_decl (tree decl)
+-{
+-  emit_pending_bincls_if_required ();
+-#ifndef DBX_FUNCTION_FIRST
+-  dbxout_begin_function (decl);
+-#endif
+-  dbxout_block (DECL_INITIAL (decl), 0, DECL_ARGUMENTS (decl), -1);
+-  dbxout_function_end (decl);
+-}
+-
+-#endif /* DBX_DEBUGGING_INFO  */
+-
+-static void
+-dbxout_early_global_decl (tree decl ATTRIBUTE_UNUSED)
+-{
+-  /* NYI for non-dwarf.  */
+-}
+-
+-/* Debug information for a global DECL.  Called from toplev.cc after
+-   compilation proper has finished.  */
+-static void
+-dbxout_late_global_decl (tree decl)
+-{
+-  if (VAR_P (decl) && !DECL_EXTERNAL (decl))
+-    {
+-      int saved_tree_used = TREE_USED (decl);
+-      TREE_USED (decl) = 1;
+-      dbxout_symbol (decl, 0);
+-      TREE_USED (decl) = saved_tree_used;
+-    }
+-}
+-
+-/* This is just a function-type adapter; dbxout_symbol does exactly
+-   what we want but returns an int.  */
+-static void
+-dbxout_type_decl (tree decl, int local)
+-{
+-  dbxout_symbol (decl, local);
+-}
+-
+-/* At the end of compilation, finish writing the symbol table.
+-   The default is to call debug_free_queue but do nothing else.  */
+-
+-static void
+-dbxout_finish (const char *filename ATTRIBUTE_UNUSED)
+-{
+-#ifdef DBX_OUTPUT_MAIN_SOURCE_FILE_END
+-  DBX_OUTPUT_MAIN_SOURCE_FILE_END (asm_out_file, filename);
+-#elif defined DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+- {
+-   switch_to_section (text_section);
+-   dbxout_begin_empty_stabs (N_SO);
+-   dbxout_stab_value_internal_label ("Letext", 0);
+- }
+-#endif
+-  debug_free_queue ();
+-}
+-
+-/* Output the index of a type.  */
+-
+-static void
+-dbxout_type_index (tree type)
+-{
+-#ifndef DBX_USE_BINCL
+-  stabstr_D (TYPE_SYMTAB_ADDRESS (type));
+-#else
+-  struct typeinfo *t = &typevec[TYPE_SYMTAB_ADDRESS (type)];
+-  stabstr_C ('(');
+-  stabstr_D (t->file_number);
+-  stabstr_C (',');
+-  stabstr_D (t->type_number);
+-  stabstr_C (')');
+-#endif
+-}
+-
+-
+-/* Generate the symbols for any queued up type symbols we encountered
+-   while generating the type info for some originally used symbol.
+-   This might generate additional entries in the queue.  Only when
+-   the nesting depth goes to 0 is this routine called.  */
+-
+-static void
+-debug_flush_symbol_queue (void)
+-{
+-  int i;
+-
+-  /* Make sure that additionally queued items are not flushed
+-     prematurely.  */
+-
+-  ++debug_nesting;
+-
+-  for (i = 0; i < symbol_queue_index; ++i)
+-    {
+-      /* If we pushed queued symbols then such symbols must be
+-         output no matter what anyone else says.  Specifically,
+-         we need to make sure dbxout_symbol() thinks the symbol was
+-         used and also we need to override TYPE_DECL_SUPPRESS_DEBUG
+-         which may be set for outside reasons.  */
+-      int saved_tree_used = TREE_USED (symbol_queue[i]);
+-      int saved_suppress_debug = TYPE_DECL_SUPPRESS_DEBUG (symbol_queue[i]);
+-      TREE_USED (symbol_queue[i]) = 1;
+-      TYPE_DECL_SUPPRESS_DEBUG (symbol_queue[i]) = 0;
+-
+-#ifdef DBX_DEBUGGING_INFO
+-      dbxout_symbol (symbol_queue[i], 0);
+-#endif
+-
+-      TREE_USED (symbol_queue[i]) = saved_tree_used;
+-      TYPE_DECL_SUPPRESS_DEBUG (symbol_queue[i]) = saved_suppress_debug;
+-    }
+-
+-  symbol_queue_index = 0;
+-  --debug_nesting;
+-}
+-
+-/* Queue a type symbol needed as part of the definition of a decl
+-   symbol.  These symbols are generated when debug_flush_symbol_queue()
+-   is called.  */
+-
+-static void
+-debug_queue_symbol (tree decl)
+-{
+-  if (symbol_queue_index >= symbol_queue_size)
+-    {
+-      symbol_queue_size += 10;
+-      symbol_queue = XRESIZEVEC (tree, symbol_queue, symbol_queue_size);
+-    }
+-
+-  symbol_queue[symbol_queue_index++] = decl;
+-}
+-
+-/* Free symbol queue.  */
+-static void
+-debug_free_queue (void)
+-{
+-  if (symbol_queue)
+-    {
+-      free (symbol_queue);
+-      symbol_queue = NULL;
+-      symbol_queue_size = 0;
+-    }
+-}
+-
+-/* Used in several places: evaluates to '0' for a private decl,
+-   '1' for a protected decl, '2' for a public decl.  */
+-#define DECL_ACCESSIBILITY_CHAR(DECL) \
+-(TREE_PRIVATE (DECL) ? '0' : TREE_PROTECTED (DECL) ? '1' : '2')
+-
+-/* Subroutine of `dbxout_type'.  Output the type fields of TYPE.
+-   This must be a separate function because anonymous unions require
+-   recursive calls.  */
+-
+-static void
+-dbxout_type_fields (tree type)
+-{
+-  tree tem;
+-
+-  /* Output the name, type, position (in bits), size (in bits) of each
+-     field that we can support.  */
+-  for (tem = TYPE_FIELDS (type); tem; tem = DECL_CHAIN (tem))
+-    {
+-      /* If one of the nodes is an error_mark or its type is then
+-	 return early.  */
+-      if (error_operand_p (tem))
+-	return;
+-
+-      /* Omit here local type decls until we know how to support them.  */
+-      if (TREE_CODE (tem) == TYPE_DECL
+-	  || TREE_CODE (tem) == TEMPLATE_DECL
+-	  /* Member functions emitted after fields.  */
+-	  || TREE_CODE (tem) == FUNCTION_DECL
+-	  /* Omit here the nameless fields that are used to skip bits.  */
+-	  || DECL_IGNORED_P (tem)
+-	  /* Omit fields whose position or size are variable or too large to
+-	     represent.  */
+-	  || (TREE_CODE (tem) == FIELD_DECL
+-	      && (! tree_fits_shwi_p (bit_position (tem))
+-		  || ! DECL_SIZE (tem)
+-		  || ! tree_fits_uhwi_p (DECL_SIZE (tem)))))
+-	continue;
+-
+-      else if (TREE_CODE (tem) != CONST_DECL)
+-	{
+-	  /* Continue the line if necessary,
+-	     but not before the first field.  */
+-	  if (tem != TYPE_FIELDS (type))
+-	    CONTIN;
+-
+-	  if (DECL_NAME (tem))
+-	    stabstr_I (DECL_NAME (tem));
+-	  stabstr_C (':');
+-
+-	  if (use_gnu_debug_info_extensions
+-	      && (TREE_PRIVATE (tem) || TREE_PROTECTED (tem)
+-		  || TREE_CODE (tem) != FIELD_DECL))
+-	    {
+-	      stabstr_C ('/');
+-	      stabstr_C (DECL_ACCESSIBILITY_CHAR (tem));
+-	    }
+-
+-	  dbxout_type ((TREE_CODE (tem) == FIELD_DECL
+-			&& DECL_BIT_FIELD_TYPE (tem))
+-		       ? DECL_BIT_FIELD_TYPE (tem) : TREE_TYPE (tem), 0);
+-
+-	  if (VAR_P (tem))
+-	    {
+-	      if (TREE_STATIC (tem) && use_gnu_debug_info_extensions)
+-		{
+-		  tree name = DECL_ASSEMBLER_NAME (tem);
+-
+-		  stabstr_C (':');
+-		  stabstr_I (name);
+-		  stabstr_C (';');
+-		}
+-	      else
+-		/* If TEM is non-static, GDB won't understand it.  */
+-		stabstr_S (",0,0;");
+-	    }
+-	  else
+-	    {
+-	      stabstr_C (',');
+-	      stabstr_D (int_bit_position (tem));
+-	      stabstr_C (',');
+-	      stabstr_D (tree_to_uhwi (DECL_SIZE (tem)));
+-	      stabstr_C (';');
+-	    }
+-	}
+-    }
+-}
+-
+-/* Subroutine of `dbxout_type_methods'.  Output debug info about the
+-   method described DECL.  */
+-
+-static void
+-dbxout_type_method_1 (tree decl)
+-{
+-  char c1 = 'A', c2;
+-
+-  if (TREE_CODE (TREE_TYPE (decl)) == FUNCTION_TYPE)
+-    c2 = '?';
+-  else /* it's a METHOD_TYPE.  */
+-    {
+-      tree firstarg = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (decl)));
+-      /* A for normal functions.
+-	 B for `const' member functions.
+-	 C for `volatile' member functions.
+-	 D for `const volatile' member functions.  */
+-      if (TYPE_READONLY (TREE_TYPE (firstarg)))
+-	c1 += 1;
+-      if (TYPE_VOLATILE (TREE_TYPE (firstarg)))
+-	c1 += 2;
+-
+-      if (DECL_VINDEX (decl))
+-	c2 = '*';
+-      else
+-	c2 = '.';
+-    }
+-
+-  /* ??? Output the mangled name, which contains an encoding of the
+-     method's type signature.  May not be necessary anymore.  */
+-  stabstr_C (':');
+-  stabstr_I (DECL_ASSEMBLER_NAME (decl));
+-  stabstr_C (';');
+-  stabstr_C (DECL_ACCESSIBILITY_CHAR (decl));
+-  stabstr_C (c1);
+-  stabstr_C (c2);
+-
+-  if (DECL_VINDEX (decl) && tree_fits_shwi_p (DECL_VINDEX (decl)))
+-    {
+-      stabstr_D (tree_to_shwi (DECL_VINDEX (decl)));
+-      stabstr_C (';');
+-      dbxout_type (DECL_CONTEXT (decl), 0);
+-      stabstr_C (';');
+-    }
+-}
+-
+-/* Subroutine of `dbxout_type'.  Output debug info about the member
+-   functions defined in TYPE.  */
+-
+-static void
+-dbxout_type_methods (tree type)
+-{
+-  for (tree fndecl = TYPE_FIELDS (type); fndecl;)
+-    {
+-      int need_prefix = 1;
+-
+-      /* Group together all the methods for the same operation.
+-	 These differ in the types of the arguments.  */
+-      for (tree last = NULL_TREE;
+-	   fndecl && (last == NULL_TREE || DECL_NAME (fndecl) == DECL_NAME (last));
+-	   fndecl = DECL_CHAIN (fndecl))
+-	/* Output the name of the field (after overloading), as
+-	   well as the name of the field before overloading, along
+-	   with its parameter list */
+-	{
+-	  /* Skip non-functions.  */
+-	  if (TREE_CODE (fndecl) != FUNCTION_DECL)
+-	    continue;
+-
+-	  /* Also ignore abstract methods; those are only interesting to
+-	     the DWARF backends.  */
+-	  if (DECL_IGNORED_P (fndecl) || DECL_ABSTRACT_P (fndecl))
+-	    continue;
+-
+-	  CONTIN;
+-
+-	  last = fndecl;
+-
+-	  /* Redundantly output the plain name, since that's what gdb
+-	     expects.  */
+-	  if (need_prefix)
+-	    {
+-	      stabstr_I (DECL_NAME (fndecl));
+-	      stabstr_S ("::");
+-	      need_prefix = 0;
+-	    }
+-
+-	  dbxout_type (TREE_TYPE (fndecl), 0);
+-	  dbxout_type_method_1 (fndecl);
+-	}
+-      if (!need_prefix)
+-	stabstr_C (';');
+-    }
+-}
+-
+-/* Emit a "range" type specification, which has the form:
+-   "r;;;".
+-   TYPE is an INTEGER_TYPE, LOW and HIGH are the bounds.  */
+-
+-static void
+-dbxout_range_type (tree type, tree low, tree high)
+-{
+-  stabstr_C ('r');
+-  if (TREE_TYPE (type))
+-    dbxout_type (TREE_TYPE (type), 0);
+-  else if (TREE_CODE (type) != INTEGER_TYPE)
+-    dbxout_type (type, 0);
+-  else
+-    {
+-      /* Traditionally, we made sure 'int' was type 1, and builtin types
+-	 were defined to be sub-ranges of int.  Unfortunately, this
+-	 does not allow us to distinguish true sub-ranges from integer
+-	 types.  So, instead we define integer (non-sub-range) types as
+-	 sub-ranges of themselves.  This matters for Chill.  If this isn't
+-	 a subrange type, then we want to define it in terms of itself.
+-	 However, in C, this may be an anonymous integer type, and we don't
+-	 want to emit debug info referring to it.  Just calling
+-	 dbxout_type_index won't work anyways, because the type hasn't been
+-	 defined yet.  We make this work for both cases by checked to see
+-	 whether this is a defined type, referring to it if it is, and using
+-	 'int' otherwise.  */
+-      if (TYPE_SYMTAB_ADDRESS (type) != 0)
+-	dbxout_type_index (type);
+-      else
+-	dbxout_type_index (integer_type_node);
+-    }
+-
+-  stabstr_C (';');
+-  if (low && tree_fits_shwi_p (low))
+-    {
+-      if (print_int_cst_bounds_in_octal_p (type, low, high))
+-        stabstr_O (low);
+-      else
+-        stabstr_D (tree_to_shwi (low));
+-    }
+-  else
+-    stabstr_C ('0');
+-
+-  stabstr_C (';');
+-  if (high && tree_fits_shwi_p (high))
+-    {
+-      if (print_int_cst_bounds_in_octal_p (type, low, high))
+-        stabstr_O (high);
+-      else
+-        stabstr_D (tree_to_shwi (high));
+-      stabstr_C (';');
+-    }
+-  else
+-    stabstr_S ("-1;");
+-}
+-
+-
+-/* Output a reference to a type.  If the type has not yet been
+-   described in the dbx output, output its definition now.
+-   For a type already defined, just refer to its definition
+-   using the type number.
+-
+-   If FULL is nonzero, and the type has been described only with
+-   a forward-reference, output the definition now.
+-   If FULL is zero in this case, just refer to the forward-reference
+-   using the number previously allocated.  */
+-
+-static void
+-dbxout_type (tree type, int full)
+-{
+-  static int anonymous_type_number = 0;
+-  tree tem, main_variant, low, high;
+-
+-  if (TREE_CODE (type) == INTEGER_TYPE)
+-    {
+-      if (TREE_TYPE (type) == 0)
+-	{
+-	  low = TYPE_MIN_VALUE (type);
+-	  high = TYPE_MAX_VALUE (type);
+-	}
+-
+-      else if (subrange_type_for_debug_p (type, &low, &high))
+-	;
+-
+-      /* If this is a subtype that should not be emitted as a subrange type,
+-	 use the base type.  */
+-      else
+-	{
+-	  type = TREE_TYPE (type);
+-	  low = TYPE_MIN_VALUE (type);
+-	  high = TYPE_MAX_VALUE (type);
+-	}
+-    }
+-
+-  /* If there was an input error and we don't really have a type,
+-     avoid crashing and write something that is at least valid
+-     by assuming `int'.  */
+-  if (type == error_mark_node)
+-    type = integer_type_node;
+-  else
+-    {
+-      if (TYPE_NAME (type)
+-	  && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+-	  && TYPE_DECL_SUPPRESS_DEBUG (TYPE_NAME (type)))
+-	full = 0;
+-    }
+-
+-  /* Try to find the "main variant" with the same name.  */
+-  if (TYPE_NAME (type) && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+-      && DECL_ORIGINAL_TYPE (TYPE_NAME (type)))
+-    main_variant = TREE_TYPE (TYPE_NAME (type));
+-  else
+-    main_variant = TYPE_MAIN_VARIANT (type);
+-
+-  /* If we are not using extensions, stabs does not distinguish const and
+-     volatile, so there is no need to make them separate types.  */
+-  if (!use_gnu_debug_info_extensions)
+-    type = main_variant;
+-
+-  if (TYPE_SYMTAB_ADDRESS (type) == 0)
+-    {
+-      /* Type has no dbx number assigned.  Assign next available number.  */
+-      TYPE_SYMTAB_ADDRESS (type) = next_type_number++;
+-
+-      /* Make sure type vector is long enough to record about this type.  */
+-
+-      if (next_type_number == typevec_len)
+-	{
+-	  typevec = GGC_RESIZEVEC (struct typeinfo, typevec, typevec_len * 2);
+-	  memset (typevec + typevec_len, 0, typevec_len * sizeof typevec[0]);
+-	  typevec_len *= 2;
+-	}
+-
+-#ifdef DBX_USE_BINCL
+-      emit_pending_bincls_if_required ();
+-      typevec[TYPE_SYMTAB_ADDRESS (type)].file_number
+-	= current_file->file_number;
+-      typevec[TYPE_SYMTAB_ADDRESS (type)].type_number
+-	= current_file->next_type_number++;
+-#endif
+-    }
+-
+-  if (flag_debug_only_used_symbols)
+-    {
+-      if ((TREE_CODE (type) == RECORD_TYPE
+-	   || TREE_CODE (type) == UNION_TYPE
+-	   || TREE_CODE (type) == QUAL_UNION_TYPE
+-	   || TREE_CODE (type) == ENUMERAL_TYPE)
+-	  && TYPE_STUB_DECL (type)
+-	  && DECL_P (TYPE_STUB_DECL (type))
+-	  && ! DECL_IGNORED_P (TYPE_STUB_DECL (type)))
+-	debug_queue_symbol (TYPE_STUB_DECL (type));
+-      else if (TYPE_NAME (type)
+-	       && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL)
+-	debug_queue_symbol (TYPE_NAME (type));
+-    }
+-
+-  /* Output the number of this type, to refer to it.  */
+-  dbxout_type_index (type);
+-
+-#ifdef DBX_TYPE_DEFINED
+-  if (DBX_TYPE_DEFINED (type))
+-    return;
+-#endif
+-
+-  /* If this type's definition has been output or is now being output,
+-     that is all.  */
+-
+-  switch (typevec[TYPE_SYMTAB_ADDRESS (type)].status)
+-    {
+-    case TYPE_UNSEEN:
+-      break;
+-    case TYPE_XREF:
+-      /* If we have already had a cross reference,
+-	 and either that's all we want or that's the best we could do,
+-	 don't repeat the cross reference.
+-	 Sun dbx crashes if we do.  */
+-      if (! full || !COMPLETE_TYPE_P (type)
+-	  /* No way in DBX fmt to describe a variable size.  */
+-	  || ! tree_fits_uhwi_p (TYPE_SIZE (type)))
+-	return;
+-      break;
+-    case TYPE_DEFINED:
+-      return;
+-    }
+-
+-#ifdef DBX_NO_XREFS
+-  /* For systems where dbx output does not allow the `=xsNAME:' syntax,
+-     leave the type-number completely undefined rather than output
+-     a cross-reference.  If we have already used GNU debug info extensions,
+-     then it is OK to output a cross reference.  This is necessary to get
+-     proper C++ debug output.  */
+-  if ((TREE_CODE (type) == RECORD_TYPE || TREE_CODE (type) == UNION_TYPE
+-       || TREE_CODE (type) == QUAL_UNION_TYPE
+-       || TREE_CODE (type) == ENUMERAL_TYPE)
+-      && ! use_gnu_debug_info_extensions)
+-    /* We must use the same test here as we use twice below when deciding
+-       whether to emit a cross-reference.  */
+-    if ((TYPE_NAME (type) != 0
+-	 && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+-	       && DECL_IGNORED_P (TYPE_NAME (type)))
+-	 && !full)
+-	|| !COMPLETE_TYPE_P (type)
+-	/* No way in DBX fmt to describe a variable size.  */
+-	|| ! tree_fits_uhwi_p (TYPE_SIZE (type)))
+-      {
+-	typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_XREF;
+-	return;
+-      }
+-#endif
+-
+-  /* Output a definition now.  */
+-  stabstr_C ('=');
+-
+-  /* Mark it as defined, so that if it is self-referent
+-     we will not get into an infinite recursion of definitions.  */
+-
+-  typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_DEFINED;
+-
+-  /* If this type is a variant of some other, hand off.  Types with
+-     different names are usefully distinguished.  We only distinguish
+-     cv-qualified types if we're using extensions.  */
+-  if (TYPE_READONLY (type) > TYPE_READONLY (main_variant))
+-    {
+-      stabstr_C ('k');
+-      dbxout_type (build_type_variant (type, 0, TYPE_VOLATILE (type)), 0);
+-      return;
+-    }
+-  else if (TYPE_VOLATILE (type) > TYPE_VOLATILE (main_variant))
+-    {
+-      stabstr_C ('B');
+-      dbxout_type (build_type_variant (type, TYPE_READONLY (type), 0), 0);
+-      return;
+-    }
+-  else if (main_variant != TYPE_MAIN_VARIANT (type))
+-    {
+-      if (flag_debug_only_used_symbols)
+-        {
+-          tree orig_type = DECL_ORIGINAL_TYPE (TYPE_NAME (type));
+-
+-          if ((TREE_CODE (orig_type) == RECORD_TYPE
+-               || TREE_CODE (orig_type) == UNION_TYPE
+-               || TREE_CODE (orig_type) == QUAL_UNION_TYPE
+-               || TREE_CODE (orig_type) == ENUMERAL_TYPE)
+-              && TYPE_STUB_DECL (orig_type)
+-              && ! DECL_IGNORED_P (TYPE_STUB_DECL (orig_type)))
+-            debug_queue_symbol (TYPE_STUB_DECL (orig_type));
+-        }
+-      /* 'type' is a typedef; output the type it refers to.  */
+-      dbxout_type (DECL_ORIGINAL_TYPE (TYPE_NAME (type)), 0);
+-      return;
+-    }
+-  /* else continue.  */
+-
+-  switch (TREE_CODE (type))
+-    {
+-    case VOID_TYPE:
+-    case NULLPTR_TYPE:
+-    case LANG_TYPE:
+-    case OPAQUE_TYPE:
+-      /* For a void type, just define it as itself; i.e., "5=5".
+-	 This makes us consider it defined
+-	 without saying what it is.  The debugger will make it
+-	 a void type when the reference is seen, and nothing will
+-	 ever override that default.  */
+-      dbxout_type_index (type);
+-      break;
+-
+-    case INTEGER_TYPE:
+-      if (type == char_type_node && ! TYPE_UNSIGNED (type))
+-	{
+-	  /* Output the type `char' as a subrange of itself!
+-	     I don't understand this definition, just copied it
+-	     from the output of pcc.
+-	     This used to use `r2' explicitly and we used to
+-	     take care to make sure that `char' was type number 2.  */
+-	  stabstr_C ('r');
+-	  dbxout_type_index (type);
+-	  stabstr_S (";0;127;");
+-	}
+-
+-      /* If this is a subtype of another integer type, always prefer to
+-	 write it as a subtype.  */
+-      else if (TREE_TYPE (type) != 0
+-	       && TREE_CODE (TREE_TYPE (type)) == INTEGER_TYPE)
+-	{
+-	  /* If the size is non-standard, say what it is if we can use
+-	     GDB extensions.  */
+-
+-	  if (use_gnu_debug_info_extensions
+-	      && TYPE_PRECISION (type) != TYPE_PRECISION (integer_type_node))
+-	    {
+-	      stabstr_S ("@s");
+-	      stabstr_D (TYPE_PRECISION (type));
+-	      stabstr_C (';');
+-	    }
+-
+-	  dbxout_range_type (type, low, high);
+-	}
+-
+-      else
+-	{
+-	  /* If the size is non-standard, say what it is if we can use
+-	     GDB extensions.  */
+-
+-	  if (use_gnu_debug_info_extensions
+-	      && TYPE_PRECISION (type) != TYPE_PRECISION (integer_type_node))
+-	    {
+-	      stabstr_S ("@s");
+-	      stabstr_D (TYPE_PRECISION (type));
+-	      stabstr_C (';');
+-	    }
+-
+-	  if (print_int_cst_bounds_in_octal_p (type, low, high))
+-	    {
+-	      stabstr_C ('r');
+-
+-              /* If this type derives from another type, output type index of
+-		 parent type. This is particularly important when parent type
+-		 is an enumerated type, because not generating the parent type
+-		 index would transform the definition of this enumerated type
+-		 into a plain unsigned type.  */
+-              if (TREE_TYPE (type) != 0)
+-                dbxout_type_index (TREE_TYPE (type));
+-              else
+-                dbxout_type_index (type);
+-
+-	      stabstr_C (';');
+-	      stabstr_O (low);
+-	      stabstr_C (';');
+-	      stabstr_O (high);
+-	      stabstr_C (';');
+-	    }
+-
+-	  else
+-	    /* Output other integer types as subranges of `int'.  */
+-	    dbxout_range_type (type, low, high);
+-	}
+-
+-      break;
+-
+-    case REAL_TYPE:
+-    case FIXED_POINT_TYPE:
+-      /* This used to say `r1' and we used to take care
+-	 to make sure that `int' was type number 1.  */
+-      stabstr_C ('r');
+-      dbxout_type_index (integer_type_node);
+-      stabstr_C (';');
+-      stabstr_D (int_size_in_bytes (type));
+-      stabstr_S (";0;");
+-      break;
+-
+-    case BOOLEAN_TYPE:
+-      if (use_gnu_debug_info_extensions)
+-	{
+-	  stabstr_S ("@s");
+-	  stabstr_D (BITS_PER_UNIT * int_size_in_bytes (type));
+-	  stabstr_S (";-16;");
+-	}
+-      else /* Define as enumeral type (False, True) */
+-	stabstr_S ("eFalse:0,True:1,;");
+-      break;
+-
+-    case COMPLEX_TYPE:
+-      /* Differs from the REAL_TYPE by its new data type number.
+-	 R3 is NF_COMPLEX.  We don't try to use any of the other NF_*
+-	 codes since gdb doesn't care anyway.  */
+-
+-      if (TREE_CODE (TREE_TYPE (type)) == REAL_TYPE)
+-	{
+-	  stabstr_S ("R3;");
+-	  stabstr_D (2 * int_size_in_bytes (TREE_TYPE (type)));
+-	  stabstr_S (";0;");
+-	}
+-      else
+-	{
+-	  /* Output a complex integer type as a structure,
+-	     pending some other way to do it.  */
+-	  stabstr_C ('s');
+-	  stabstr_D (int_size_in_bytes (type));
+-
+-	  stabstr_S ("real:");
+-	  dbxout_type (TREE_TYPE (type), 0);
+-	  stabstr_S (",0,");
+-	  stabstr_D (TYPE_PRECISION (TREE_TYPE (type)));
+-
+-	  stabstr_S (";imag:");
+-	  dbxout_type (TREE_TYPE (type), 0);
+-	  stabstr_C (',');
+-	  stabstr_D (TYPE_PRECISION (TREE_TYPE (type)));
+-	  stabstr_C (',');
+-	  stabstr_D (TYPE_PRECISION (TREE_TYPE (type)));
+-	  stabstr_S (";;");
+-	}
+-      break;
+-
+-    case ARRAY_TYPE:
+-      /* Make arrays of packed bits look like bitstrings for chill.  */
+-      if (TYPE_PACKED (type) && use_gnu_debug_info_extensions)
+-	{
+-	  stabstr_S ("@s");
+-	  stabstr_D (BITS_PER_UNIT * int_size_in_bytes (type));
+-	  stabstr_S (";@S;S");
+-	  dbxout_type (TYPE_DOMAIN (type), 0);
+-	  break;
+-	}
+-
+-      /* Output "a" followed by a range type definition
+-	 for the index type of the array
+-	 followed by a reference to the target-type.
+-	 ar1;0;N;M for a C array of type M and size N+1.  */
+-      /* Check if a character string type, which in Chill is
+-	 different from an array of characters.  */
+-      if (TYPE_STRING_FLAG (type) && use_gnu_debug_info_extensions)
+-	{
+-	  stabstr_S ("@S;");
+-	}
+-      tem = TYPE_DOMAIN (type);
+-      if (tem == NULL)
+-	{
+-	  stabstr_S ("ar");
+-	  dbxout_type_index (integer_type_node);
+-	  stabstr_S (";0;-1;");
+-	}
+-      else
+-	{
+-	  stabstr_C ('a');
+-	  dbxout_range_type (tem, TYPE_MIN_VALUE (tem), TYPE_MAX_VALUE (tem));
+-	}
+-
+-      dbxout_type (TREE_TYPE (type), 0);
+-      break;
+-
+-    case VECTOR_TYPE:
+-      /* Make vectors look like an array.  */
+-      if (use_gnu_debug_info_extensions)
+-	stabstr_S ("@V;");
+-
+-      /* Output "a" followed by a range type definition
+-	 for the index type of the array
+-	 followed by a reference to the target-type.
+-	 ar1;0;N;M for a C array of type M and size N+1.  */
+-      stabstr_C ('a');
+-      dbxout_range_type (integer_type_node, size_zero_node,
+-			 size_int (TYPE_VECTOR_SUBPARTS (type) - 1));
+-
+-      dbxout_type (TREE_TYPE (type), 0);
+-      break;
+-
+-    case RECORD_TYPE:
+-    case UNION_TYPE:
+-    case QUAL_UNION_TYPE:
+-      {
+-	tree binfo = TYPE_BINFO (type);
+-
+-	/* Output a structure type.  We must use the same test here as we
+-	   use in the DBX_NO_XREFS case above.  */
+-	if ((TYPE_NAME (type) != 0
+-	     && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+-		   && DECL_IGNORED_P (TYPE_NAME (type)))
+-	     && !full)
+-	    || !COMPLETE_TYPE_P (type)
+-	    /* No way in DBX fmt to describe a variable size.  */
+-	    || ! tree_fits_uhwi_p (TYPE_SIZE (type)))
+-	  {
+-	    /* If the type is just a cross reference, output one
+-	       and mark the type as partially described.
+-	       If it later becomes defined, we will output
+-	       its real definition.
+-	       If the type has a name, don't nest its definition within
+-	       another type's definition; instead, output an xref
+-	       and let the definition come when the name is defined.  */
+-	    stabstr_S ((TREE_CODE (type) == RECORD_TYPE) ? "xs" : "xu");
+-	    if (TYPE_IDENTIFIER (type))
+-	      {
+-		/* Note that the C frontend creates for anonymous variable
+-		   length records/unions TYPE_NAME with DECL_NAME NULL.  */
+-		dbxout_type_name (type);
+-	      }
+-	    else
+-	      {
+-		stabstr_S ("$$");
+-		stabstr_D (anonymous_type_number++);
+-	      }
+-
+-	    stabstr_C (':');
+-	    typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_XREF;
+-	    break;
+-	  }
+-
+-	/* Identify record or union, and print its size.  */
+-	stabstr_C ((TREE_CODE (type) == RECORD_TYPE) ? 's' : 'u');
+-	stabstr_D (int_size_in_bytes (type));
+-
+-	if (binfo)
+-	  {
+-	    int i;
+-	    tree child;
+-	    vec *accesses = BINFO_BASE_ACCESSES (binfo);
+-
+-	    if (use_gnu_debug_info_extensions)
+-	      {
+-		if (BINFO_N_BASE_BINFOS (binfo))
+-		  {
+-		    stabstr_C ('!');
+-		    stabstr_U (BINFO_N_BASE_BINFOS (binfo));
+-		    stabstr_C (',');
+-		  }
+-	      }
+-	    for (i = 0; BINFO_BASE_ITERATE (binfo, i, child); i++)
+-	      {
+-		tree access = (accesses ? (*accesses)[i] : access_public_node);
+-
+-		if (use_gnu_debug_info_extensions)
+-		  {
+-		    stabstr_C (BINFO_VIRTUAL_P (child) ? '1' : '0');
+-		    stabstr_C (access == access_public_node ? '2' :
+-				   access == access_protected_node
+-				   ? '1' :'0');
+-		    if (BINFO_VIRTUAL_P (child)
+-			&& (lang_GNU_CXX ()
+-			    || strcmp (lang_hooks.name, "GNU Objective-C++") == 0))
+-		      /* For a virtual base, print the (negative)
+-		     	 offset within the vtable where we must look
+-		     	 to find the necessary adjustment.  */
+-		      stabstr_D
+-			(tree_to_shwi (BINFO_VPTR_FIELD (child))
+-			 * BITS_PER_UNIT);
+-		    else
+-		      stabstr_D (tree_to_shwi (BINFO_OFFSET (child))
+-				       * BITS_PER_UNIT);
+-		    stabstr_C (',');
+-		    dbxout_type (BINFO_TYPE (child), 0);
+-		    stabstr_C (';');
+-		  }
+-		else
+-		  {
+-		    /* Print out the base class information with
+-		       fields which have the same names at the types
+-		       they hold.  */
+-		    dbxout_type_name (BINFO_TYPE (child));
+-		    stabstr_C (':');
+-		    dbxout_type (BINFO_TYPE (child), full);
+-		    stabstr_C (',');
+-		    stabstr_D (tree_to_shwi (BINFO_OFFSET (child))
+-				     * BITS_PER_UNIT);
+-		    stabstr_C (',');
+-		    stabstr_D
+-		      (tree_to_shwi (TYPE_SIZE (BINFO_TYPE (child)))
+-		       * BITS_PER_UNIT);
+-		    stabstr_C (';');
+-		  }
+-	      }
+-	  }
+-      }
+-
+-      /* Write out the field declarations.  */
+-      dbxout_type_fields (type);
+-      if (use_gnu_debug_info_extensions)
+-	dbxout_type_methods (type);
+-
+-      stabstr_C (';');
+-
+-      if (use_gnu_debug_info_extensions && TREE_CODE (type) == RECORD_TYPE
+-	  /* Avoid the ~ if we don't really need it--it confuses dbx.  */
+-	  && TYPE_VFIELD (type))
+-	{
+-
+-	  /* We need to write out info about what field this class
+-	     uses as its "main" vtable pointer field, because if this
+-	     field is inherited from a base class, GDB cannot necessarily
+-	     figure out which field it's using in time.  */
+-	  stabstr_S ("~%");
+-	  dbxout_type (DECL_FCONTEXT (TYPE_VFIELD (type)), 0);
+-	  stabstr_C (';');
+-	}
+-      break;
+-
+-    case ENUMERAL_TYPE:
+-      /* We must use the same test here as we use in the DBX_NO_XREFS case
+-	 above.  We simplify it a bit since an enum will never have a variable
+-	 size.  */
+-      if ((TYPE_NAME (type) != 0
+-	   && ! (TREE_CODE (TYPE_NAME (type)) == TYPE_DECL
+-		 && DECL_IGNORED_P (TYPE_NAME (type)))
+-	   && !full)
+-	  || !COMPLETE_TYPE_P (type))
+-	{
+-	  stabstr_S ("xe");
+-	  dbxout_type_name (type);
+-	  typevec[TYPE_SYMTAB_ADDRESS (type)].status = TYPE_XREF;
+-	  stabstr_C (':');
+-	  return;
+-	}
+-      if (use_gnu_debug_info_extensions
+-	  && TYPE_PRECISION (type) != TYPE_PRECISION (integer_type_node))
+-	{
+-	  stabstr_S ("@s");
+-	  stabstr_D (TYPE_PRECISION (type));
+-	  stabstr_C (';');
+-	}
+-
+-      stabstr_C ('e');
+-      for (tem = TYPE_VALUES (type); tem; tem = TREE_CHAIN (tem))
+-	{
+-          tree value = TREE_VALUE (tem);
+-
+-	  stabstr_I (TREE_PURPOSE (tem));
+-	  stabstr_C (':');
+-
+-          if (TREE_CODE (value) == CONST_DECL)
+-            value = DECL_INITIAL (value);
+-
+-	  if (cst_and_fits_in_hwi (value))
+-	    stabstr_D (TREE_INT_CST_LOW (value));
+-	  else
+-	    stabstr_O (value);
+-
+-	  stabstr_C (',');
+-	  if (TREE_CHAIN (tem) != 0)
+-	    CONTIN;
+-	}
+-
+-      stabstr_C (';');
+-      break;
+-
+-    case POINTER_TYPE:
+-      stabstr_C ('*');
+-      dbxout_type (TREE_TYPE (type), 0);
+-      break;
+-
+-    case METHOD_TYPE:
+-      if (use_gnu_debug_info_extensions)
+-	{
+-	  stabstr_C ('#');
+-
+-	  /* Write the argument types out longhand.  */
+-	  dbxout_type (TYPE_METHOD_BASETYPE (type), 0);
+-	  stabstr_C (',');
+-	  dbxout_type (TREE_TYPE (type), 0);
+-	  dbxout_args (TYPE_ARG_TYPES (type));
+-	  stabstr_C (';');
+-	}
+-      else
+-	/* Treat it as a function type.  */
+-	dbxout_type (TREE_TYPE (type), 0);
+-      break;
+-
+-    case OFFSET_TYPE:
+-      if (use_gnu_debug_info_extensions)
+-	{
+-	  stabstr_C ('@');
+-	  dbxout_type (TYPE_OFFSET_BASETYPE (type), 0);
+-	  stabstr_C (',');
+-	  dbxout_type (TREE_TYPE (type), 0);
+-	}
+-      else
+-	/* Should print as an int, because it is really just an offset.  */
+-	dbxout_type (integer_type_node, 0);
+-      break;
+-
+-    case REFERENCE_TYPE:
+-      if (use_gnu_debug_info_extensions)
+-	{
+-	  stabstr_C ('&');
+-	}
+-      else
+-	stabstr_C ('*');
+-      dbxout_type (TREE_TYPE (type), 0);
+-      break;
+-
+-    case FUNCTION_TYPE:
+-      stabstr_C ('f');
+-      dbxout_type (TREE_TYPE (type), 0);
+-      break;
+-
+-    default:
+-      /* A C++ function with deduced return type can have a TEMPLATE_TYPE_PARM
+-	 named 'auto' in its type.
+-	 No debug info for TEMPLATE_TYPE_PARM type supported yet.  */
+-      if (lang_GNU_CXX ())
+-	{
+-	  tree name = TYPE_IDENTIFIER (type);
+-	  if (name == get_identifier ("auto")
+-	      || name == get_identifier ("decltype(auto)"))
+-	    break;
+-	}
+-
+-      gcc_unreachable ();
+-    }
+-}
+-
+-/* Return nonzero if the given type represents an integer whose bounds
+-   should be printed in octal format.  */
+-
+-static bool
+-print_int_cst_bounds_in_octal_p (tree type, tree low, tree high)
+-{
+-  /* If we can use GDB extensions and the size is wider than a long
+-     (the size used by GDB to read them) or we may have trouble writing
+-     the bounds the usual way, write them in octal.  Note the test is for
+-     the *target's* size of "long", not that of the host.  The host test
+-     is just to make sure we can write it out in case the host wide int
+-     is narrower than the target "long".
+-
+-     For unsigned types, we use octal if they are the same size or larger.
+-     This is because we print the bounds as signed decimal, and hence they
+-     can't span same size unsigned types.  */
+-
+-  if (use_gnu_debug_info_extensions
+-      && low && TREE_CODE (low) == INTEGER_CST
+-      && high && TREE_CODE (high) == INTEGER_CST
+-      && (TYPE_PRECISION (type) > TYPE_PRECISION (integer_type_node)
+-	  || ((TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node))
+-	      && TYPE_UNSIGNED (type))
+-	  || TYPE_PRECISION (type) > HOST_BITS_PER_WIDE_INT
+-	  || (TYPE_PRECISION (type) == HOST_BITS_PER_WIDE_INT
+-	      && TYPE_UNSIGNED (type))))
+-    return TRUE;
+-  else
+-    return FALSE;
+-}
+-
+-/* Output the name of type TYPE, with no punctuation.
+-   Such names can be set up either by typedef declarations
+-   or by struct, enum and union tags.  */
+-
+-static void
+-dbxout_type_name (tree type)
+-{
+-  tree t = TYPE_NAME (type);
+-
+-  gcc_assert (t);
+-  switch (TREE_CODE (t))
+-    {
+-    case IDENTIFIER_NODE:
+-      break;
+-    case TYPE_DECL:
+-      t = DECL_NAME (t);
+-      break;
+-    default:
+-      gcc_unreachable ();
+-    }
+-
+-  stabstr_I (t);
+-}
+-
+-/* Output leading struct or class names needed for qualifying type
+-   whose scope is limited to a struct or class.  */
+-
+-static void
+-dbxout_class_name_qualifiers (tree decl)
+-{
+-  tree context = decl_type_context (decl);
+-
+-  if (context != NULL_TREE
+-      && TREE_CODE (context) == RECORD_TYPE
+-      && TYPE_NAME (context) != 0
+-      && (TREE_CODE (TYPE_NAME (context)) == IDENTIFIER_NODE
+-          || (DECL_NAME (TYPE_NAME (context)) != 0)))
+-    {
+-      tree name = TYPE_NAME (context);
+-
+-      if (TREE_CODE (name) == TYPE_DECL)
+-	{
+-	  dbxout_class_name_qualifiers (name);
+-	  name = DECL_NAME (name);
+-	}
+-      stabstr_I (name);
+-      stabstr_S ("::");
+-    }
+-}
+-
+-/* This is a specialized subset of expand_expr for use by dbxout_symbol in
+-   evaluating DECL_VALUE_EXPR.  In particular, we stop if we find decls that
+-   haven't been expanded, or if the expression is getting so complex we won't
+-   be able to represent it in stabs anyway.  Returns NULL on failure.  */
+-
+-static rtx
+-dbxout_expand_expr (tree expr)
+-{
+-  switch (TREE_CODE (expr))
+-    {
+-    case VAR_DECL:
+-      /* We can't handle emulated tls variables, because the address is an
+-	 offset to the return value of __emutls_get_address, and there is no
+-	 way to express that in stabs.  Also, there are name mangling issues
+-	 here.  We end up with references to undefined symbols if we don't
+-	 disable debug info for these variables.  */
+-      if (!targetm.have_tls && DECL_THREAD_LOCAL_P (expr))
+-	return NULL;
+-      if (TREE_STATIC (expr)
+-	  && !TREE_ASM_WRITTEN (expr)
+-	  && !DECL_HAS_VALUE_EXPR_P (expr)
+-	  && !TREE_PUBLIC (expr)
+-	  && DECL_RTL_SET_P (expr)
+-	  && MEM_P (DECL_RTL (expr)))
+-	{
+-	  /* If this is a var that might not be actually output,
+-	     return NULL, otherwise stabs might reference an undefined
+-	     symbol.  */
+-	  varpool_node *node = varpool_node::get (expr);
+-	  if (!node || !node->definition)
+-	    return NULL;
+-	}
+-      /* FALLTHRU */
+-
+-    case PARM_DECL:
+-    case RESULT_DECL:
+-      if (DECL_HAS_VALUE_EXPR_P (expr))
+-	return dbxout_expand_expr (DECL_VALUE_EXPR (expr));
+-      /* FALLTHRU */
+-
+-    case CONST_DECL:
+-      return DECL_RTL_IF_SET (expr);
+-
+-    case INTEGER_CST:
+-      return expand_expr (expr, NULL_RTX, VOIDmode, EXPAND_INITIALIZER);
+-
+-    case COMPONENT_REF:
+-    case ARRAY_REF:
+-    case ARRAY_RANGE_REF:
+-    case BIT_FIELD_REF:
+-      {
+-	machine_mode mode;
+-	poly_int64 bitsize, bitpos;
+-	tree offset, tem;
+-	int unsignedp, reversep, volatilep = 0;
+-	rtx x;
+-
+-	tem = get_inner_reference (expr, &bitsize, &bitpos, &offset, &mode,
+-				   &unsignedp, &reversep, &volatilep);
+-
+-	x = dbxout_expand_expr (tem);
+-	if (x == NULL || !MEM_P (x))
+-	  return NULL;
+-	if (offset != NULL)
+-	  {
+-	    if (!tree_fits_shwi_p (offset))
+-	      return NULL;
+-	    x = adjust_address_nv (x, mode, tree_to_shwi (offset));
+-	  }
+-	if (maybe_ne (bitpos, 0))
+-	  x = adjust_address_nv (x, mode, bits_to_bytes_round_down (bitpos));
+-
+-	return x;
+-      }
+-
+-    default:
+-      return NULL;
+-    }
+-}
+-
+-/* Helper function for output_used_types.  Queue one entry from the
+-   used types hash to be output.  */
+-
+-bool
+-output_used_types_helper (tree const &type, vec *types_p)
+-{
+-  if ((TREE_CODE (type) == RECORD_TYPE
+-       || TREE_CODE (type) == UNION_TYPE
+-       || TREE_CODE (type) == QUAL_UNION_TYPE
+-       || TREE_CODE (type) == ENUMERAL_TYPE)
+-      && TYPE_STUB_DECL (type)
+-      && DECL_P (TYPE_STUB_DECL (type))
+-      && ! DECL_IGNORED_P (TYPE_STUB_DECL (type)))
+-    types_p->quick_push (TYPE_STUB_DECL (type));
+-  else if (TYPE_NAME (type)
+-	   && TREE_CODE (TYPE_NAME (type)) == TYPE_DECL)
+-    types_p->quick_push (TYPE_NAME (type));
+-
+-  return true;
+-}
+-
+-/* This is a qsort callback which sorts types and declarations into a
+-   predictable order (types, then declarations, sorted by UID
+-   within).  */
+-
+-static int
+-output_types_sort (const void *pa, const void *pb)
+-{
+-  const tree lhs = *((const tree *)pa);
+-  const tree rhs = *((const tree *)pb);
+-
+-  if (TYPE_P (lhs))
+-    {
+-      if (TYPE_P (rhs))
+-	return TYPE_UID (lhs) - TYPE_UID (rhs);
+-      else
+-	return 1;
+-    }
+-  else
+-    {
+-      if (TYPE_P (rhs))
+-	return -1;
+-      else
+-	return DECL_UID (lhs) - DECL_UID (rhs);
+-    }
+-}
+-
+-
+-/* Force all types used by this function to be output in debug
+-   information.  */
+-
+-static void
+-output_used_types (void)
+-{
+-  if (cfun && cfun->used_types_hash)
+-    {
+-      vec types;
+-      int i;
+-      tree type;
+-
+-      types.create (cfun->used_types_hash->elements ());
+-      cfun->used_types_hash->traverse *, output_used_types_helper>
+-       	(&types);
+-
+-      /* Sort by UID to prevent dependence on hash table ordering.  */
+-      types.qsort (output_types_sort);
+-
+-      FOR_EACH_VEC_ELT (types, i, type)
+-	debug_queue_symbol (type);
+-
+-      types.release ();
+-    }
+-}
+-
+-/* Output a .stabs for the symbol defined by DECL,
+-   which must be a ..._DECL node in the normal namespace.
+-   It may be a CONST_DECL, a FUNCTION_DECL, a PARM_DECL or a VAR_DECL.
+-   LOCAL is nonzero if the scope is less than the entire file.
+-   Return 1 if a stabs might have been emitted.  */
+-
+-int
+-dbxout_symbol (tree decl, int local ATTRIBUTE_UNUSED)
+-{
+-  tree type = TREE_TYPE (decl);
+-  tree context = NULL_TREE;
+-  int result = 0;
+-  rtx decl_rtl;
+-
+-  /* "Intercept" dbxout_symbol() calls like we do all debug_hooks.  */
+-  ++debug_nesting;
+-
+-  /* Ignore nameless syms, but don't ignore type tags.  */
+-
+-  if ((DECL_NAME (decl) == 0 && TREE_CODE (decl) != TYPE_DECL)
+-      || DECL_IGNORED_P (decl))
+-    DBXOUT_DECR_NESTING_AND_RETURN (0);
+-
+-  /* If we are to generate only the symbols actually used then such
+-     symbol nodes are flagged with TREE_USED.  Ignore any that
+-     aren't flagged as TREE_USED.  */
+-
+-  if (flag_debug_only_used_symbols
+-      && (!TREE_USED (decl)
+-          && (!VAR_P (decl) || !DECL_INITIAL (decl))))
+-    DBXOUT_DECR_NESTING_AND_RETURN (0);
+-
+-  /* If dbxout_init has not yet run, queue this symbol for later.  */
+-  if (!typevec)
+-    {
+-      preinit_symbols = tree_cons (0, decl, preinit_symbols);
+-      DBXOUT_DECR_NESTING_AND_RETURN (0);
+-    }
+-
+-  if (flag_debug_only_used_symbols)
+-    {
+-      tree t;
+-
+-      /* We now have a used symbol.  We need to generate the info for
+-         the symbol's type in addition to the symbol itself.  These
+-         type symbols are queued to be generated after were done with
+-         the symbol itself (otherwise they would fight over the
+-         stabstr obstack).
+-
+-         Note, because the TREE_TYPE(type) might be something like a
+-         pointer to a named type we need to look for the first name
+-         we see following the TREE_TYPE chain.  */
+-
+-      t = type;
+-      while (POINTER_TYPE_P (t))
+-        t = TREE_TYPE (t);
+-
+-      /* RECORD_TYPE, UNION_TYPE, QUAL_UNION_TYPE, and ENUMERAL_TYPE
+-         need special treatment.  The TYPE_STUB_DECL field in these
+-         types generally represents the tag name type we want to
+-         output.  In addition there  could be a typedef type with
+-         a different name.  In that case we also want to output
+-         that.  */
+-
+-      if (TREE_CODE (t) == RECORD_TYPE
+-           || TREE_CODE (t) == UNION_TYPE
+-           || TREE_CODE (t) == QUAL_UNION_TYPE
+-           || TREE_CODE (t) == ENUMERAL_TYPE)
+-        {
+-	    if (TYPE_STUB_DECL (t)
+-		&& TYPE_STUB_DECL (t) != decl
+-		&& DECL_P (TYPE_STUB_DECL (t))
+-		&& ! DECL_IGNORED_P (TYPE_STUB_DECL (t)))
+-	    {
+-	      debug_queue_symbol (TYPE_STUB_DECL (t));
+-	      if (TYPE_NAME (t)
+-		  && TYPE_NAME (t) != TYPE_STUB_DECL (t)
+-		  && TYPE_NAME (t) != decl
+-		  && DECL_P (TYPE_NAME (t)))
+-		debug_queue_symbol (TYPE_NAME (t));
+-	    }
+-	}
+-      else if (TYPE_NAME (t)
+-	       && TYPE_NAME (t) != decl
+-	       && DECL_P (TYPE_NAME (t)))
+-        debug_queue_symbol (TYPE_NAME (t));
+-    }
+-
+-  emit_pending_bincls_if_required ();
+-
+-  switch (TREE_CODE (decl))
+-    {
+-    case CONST_DECL:
+-      /* Enum values are defined by defining the enum type.  */
+-      break;
+-
+-    case FUNCTION_DECL:
+-      decl_rtl = DECL_RTL_IF_SET (decl);
+-      if (!decl_rtl)
+-	DBXOUT_DECR_NESTING_AND_RETURN (0);
+-      if (DECL_EXTERNAL (decl))
+-	break;
+-      /* Don't mention a nested function under its parent.  */
+-      context = decl_function_context (decl);
+-      if (context == current_function_decl)
+-	break;
+-      /* Don't mention an inline instance of a nested function.  */
+-      if (context && DECL_FROM_INLINE (decl))
+-	break;
+-      if (!MEM_P (decl_rtl)
+-	  || GET_CODE (XEXP (decl_rtl, 0)) != SYMBOL_REF)
+-	break;
+-
+-      if (flag_debug_only_used_symbols)
+-	output_used_types ();
+-
+-      dbxout_begin_complex_stabs ();
+-      stabstr_I (DECL_ASSEMBLER_NAME (decl));
+-      stabstr_S (TREE_PUBLIC (decl) ? ":F" : ":f");
+-      result = 1;
+-
+-      if (TREE_TYPE (type))
+-	dbxout_type (TREE_TYPE (type), 0);
+-      else
+-	dbxout_type (void_type_node, 0);
+-
+-      /* For a nested function, when that function is compiled,
+-	 mention the containing function name
+-	 as well as (since dbx wants it) our own assembler-name.  */
+-      if (context != 0)
+-	{
+-	  stabstr_C (',');
+-	  stabstr_I (DECL_ASSEMBLER_NAME (decl));
+-	  stabstr_C (',');
+-	  stabstr_I (DECL_NAME (context));
+-	}
+-
+-      dbxout_finish_complex_stabs (decl, N_FUN, XEXP (decl_rtl, 0), 0, 0);
+-      break;
+-
+-    case TYPE_DECL:
+-      /* Don't output the same typedef twice.
+-         And don't output what language-specific stuff doesn't want output.  */
+-      if (TREE_ASM_WRITTEN (decl) || TYPE_DECL_SUPPRESS_DEBUG (decl))
+-	DBXOUT_DECR_NESTING_AND_RETURN (0);
+-
+-      /* Don't output typedefs for types with magic type numbers (XCOFF).  */
+-#ifdef DBX_ASSIGN_FUNDAMENTAL_TYPE_NUMBER
+-      {
+-	int fundamental_type_number =
+-	  DBX_ASSIGN_FUNDAMENTAL_TYPE_NUMBER (decl);
+-
+-	if (fundamental_type_number != 0)
+-	  {
+-	    TREE_ASM_WRITTEN (decl) = 1;
+-	    TYPE_SYMTAB_ADDRESS (TREE_TYPE (decl)) = fundamental_type_number;
+-	    DBXOUT_DECR_NESTING_AND_RETURN (0);
+-	  }
+-      }
+-#endif
+-      FORCE_TEXT;
+-      result = 1;
+-      {
+-	int tag_needed = 1;
+-	int did_output = 0;
+-
+-	if (DECL_NAME (decl))
+-	  {
+-	    /* Nonzero means we must output a tag as well as a typedef.  */
+-	    tag_needed = 0;
+-
+-	    /* Handle the case of a C++ structure or union
+-	       where the TYPE_NAME is a TYPE_DECL
+-	       which gives both a typedef name and a tag.  */
+-	    /* dbx requires the tag first and the typedef second.  */
+-	    if ((TREE_CODE (type) == RECORD_TYPE
+-		 || TREE_CODE (type) == UNION_TYPE
+-		 || TREE_CODE (type) == QUAL_UNION_TYPE)
+-		&& TYPE_NAME (type) == decl
+-		&& !use_gnu_debug_info_extensions
+-		&& !TREE_ASM_WRITTEN (TYPE_NAME (type))
+-		/* Distinguish the implicit typedefs of C++
+-		   from explicit ones that might be found in C.  */
+-		&& DECL_ARTIFICIAL (decl)
+-                /* Do not generate a tag for incomplete records.  */
+-                && COMPLETE_TYPE_P (type)
+-		/* Do not generate a tag for records of variable size,
+-		   since this type cannot be properly described in the
+-		   DBX format, and it confuses some tools such as objdump.  */
+-		&& tree_fits_uhwi_p (TYPE_SIZE (type)))
+-	      {
+-		tree name = TYPE_IDENTIFIER (type);
+-
+-		dbxout_begin_complex_stabs ();
+-		stabstr_I (name);
+-		stabstr_S (":T");
+-		dbxout_type (type, 1);
+-		dbxout_finish_complex_stabs (0, DBX_TYPE_DECL_STABS_CODE,
+-					     0, 0, 0);
+-	      }
+-
+-	    dbxout_begin_complex_stabs ();
+-
+-	    /* Output leading class/struct qualifiers.  */
+-	    if (use_gnu_debug_info_extensions)
+-	      dbxout_class_name_qualifiers (decl);
+-
+-	    /* Output typedef name.  */
+-	    stabstr_I (DECL_NAME (decl));
+-	    stabstr_C (':');
+-
+-	    /* Short cut way to output a tag also.  */
+-	    if ((TREE_CODE (type) == RECORD_TYPE
+-		 || TREE_CODE (type) == UNION_TYPE
+-		 || TREE_CODE (type) == QUAL_UNION_TYPE)
+-		&& TYPE_NAME (type) == decl
+-		/* Distinguish the implicit typedefs of C++
+-		   from explicit ones that might be found in C.  */
+-		&& DECL_ARTIFICIAL (decl))
+-	      {
+-		if (use_gnu_debug_info_extensions)
+-		  {
+-		    stabstr_C ('T');
+-		    TREE_ASM_WRITTEN (TYPE_NAME (type)) = 1;
+-		  }
+-	      }
+-
+-	    stabstr_C ('t');
+-	    dbxout_type (type, 1);
+-	    dbxout_finish_complex_stabs (decl, DBX_TYPE_DECL_STABS_CODE,
+-					 0, 0, 0);
+-	    did_output = 1;
+-	  }
+-
+-	/* Don't output a tag if this is an incomplete type.  This prevents
+-	   the sun4 Sun OS 4.x dbx from crashing.  */
+-
+-	if (tag_needed && TYPE_NAME (type) != 0
+-	    && (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE
+-		|| (DECL_NAME (TYPE_NAME (type)) != 0))
+-	    && COMPLETE_TYPE_P (type)
+-	    && !TREE_ASM_WRITTEN (TYPE_NAME (type)))
+-	  {
+-	    /* For a TYPE_DECL with no name, but the type has a name,
+-	       output a tag.
+-	       This is what represents `struct foo' with no typedef.  */
+-	    /* In C++, the name of a type is the corresponding typedef.
+-	       In C, it is an IDENTIFIER_NODE.  */
+-	    tree name = TYPE_IDENTIFIER (type);
+-
+-	    dbxout_begin_complex_stabs ();
+-	    stabstr_I (name);
+-	    stabstr_S (":T");
+-	    dbxout_type (type, 1);
+-	    dbxout_finish_complex_stabs (0, DBX_TYPE_DECL_STABS_CODE, 0, 0, 0);
+-	    did_output = 1;
+-	  }
+-
+-	/* If an enum type has no name, it cannot be referred to, but
+-	   we must output it anyway, to record the enumeration
+-	   constants.  */
+-
+-	if (!did_output && TREE_CODE (type) == ENUMERAL_TYPE)
+-	  {
+-	    dbxout_begin_complex_stabs ();
+-	    /* Some debuggers fail when given NULL names, so give this a
+-	       harmless name of " " (Why not "(anon)"?).  */
+-	    stabstr_S (" :T");
+-	    dbxout_type (type, 1);
+-	    dbxout_finish_complex_stabs (0, DBX_TYPE_DECL_STABS_CODE, 0, 0, 0);
+-	  }
+-
+-	/* Prevent duplicate output of a typedef.  */
+-	TREE_ASM_WRITTEN (decl) = 1;
+-	break;
+-      }
+-
+-    case PARM_DECL:
+-      if (DECL_HAS_VALUE_EXPR_P (decl))
+-	decl = DECL_VALUE_EXPR (decl);
+-
+-      /* PARM_DECLs go in their own separate chain and are output by
+-	 dbxout_reg_parms and dbxout_parms, except for those that are
+-	 disguised VAR_DECLs like Out parameters in Ada.  */
+-      gcc_assert (VAR_P (decl));
+-
+-      /* fall through */
+-
+-    case RESULT_DECL:
+-    case VAR_DECL:
+-      /* Don't mention a variable that is external.
+-	 Let the file that defines it describe it.  */
+-      if (DECL_EXTERNAL (decl))
+-	break;
+-
+-      /* If the variable is really a constant
+-	 and not written in memory, inform the debugger.
+-
+-	 ??? Why do we skip emitting the type and location in this case?  */
+-      if (TREE_STATIC (decl) && TREE_READONLY (decl)
+-	  && DECL_INITIAL (decl) != 0
+-	  && tree_fits_shwi_p (DECL_INITIAL (decl))
+-	  && ! TREE_ASM_WRITTEN (decl)
+-	  && (DECL_FILE_SCOPE_P (decl)
+-	      || TREE_CODE (DECL_CONTEXT (decl)) == BLOCK
+-	      || TREE_CODE (DECL_CONTEXT (decl)) == NAMESPACE_DECL)
+-	  && TREE_PUBLIC (decl) == 0)
+-	{
+-	  /* The sun4 assembler does not grok this.  */
+-
+-	  if (TREE_CODE (TREE_TYPE (decl)) == INTEGER_TYPE
+-	      || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE)
+-	    {
+-	      HOST_WIDE_INT ival = tree_to_shwi (DECL_INITIAL (decl));
+-
+-	      dbxout_begin_complex_stabs ();
+-	      dbxout_symbol_name (decl, NULL, 'c');
+-	      stabstr_S ("=i");
+-	      stabstr_D (ival);
+-	      dbxout_finish_complex_stabs (0, N_LSYM, 0, 0, 0);
+-	      DBXOUT_DECR_NESTING;
+-	      return 1;
+-	    }
+-	  else
+-	    break;
+-	}
+-      /* else it is something we handle like a normal variable.  */
+-
+-      decl_rtl = dbxout_expand_expr (decl);
+-      if (!decl_rtl)
+-	DBXOUT_DECR_NESTING_AND_RETURN (0);
+-
+-      if (!is_global_var (decl))
+-	decl_rtl = eliminate_regs (decl_rtl, VOIDmode, NULL_RTX);
+-#ifdef LEAF_REG_REMAP
+-      if (crtl->uses_only_leaf_regs)
+-	leaf_renumber_regs_insn (decl_rtl);
+-#endif
+-
+-      result = dbxout_symbol_location (decl, type, 0, decl_rtl);
+-      break;
+-
+-    default:
+-      break;
+-    }
+-  DBXOUT_DECR_NESTING;
+-  return result;
+-}
+-
+-/* Output the stab for DECL, a VAR_DECL, RESULT_DECL or PARM_DECL.
+-   Add SUFFIX to its name, if SUFFIX is not 0.
+-   Describe the variable as residing in HOME
+-   (usually HOME is DECL_RTL (DECL), but not always).
+-   Returns 1 if the stab was really emitted.  */
+-
+-static int
+-dbxout_symbol_location (tree decl, tree type, const char *suffix, rtx home)
+-{
+-  int letter = 0;
+-  stab_code_type code;
+-  rtx addr = 0;
+-  int number = 0;
+-  int regno = -1;
+-
+-  /* Don't mention a variable at all
+-     if it was completely optimized into nothingness.
+-
+-     If the decl was from an inline function, then its rtl
+-     is not identically the rtl that was used in this
+-     particular compilation.  */
+-  if (GET_CODE (home) == SUBREG)
+-    {
+-      rtx value = home;
+-
+-      while (GET_CODE (value) == SUBREG)
+-	value = SUBREG_REG (value);
+-      if (REG_P (value))
+-	{
+-	  if (REGNO (value) >= FIRST_PSEUDO_REGISTER)
+-	    return 0;
+-	}
+-      home = alter_subreg (&home, true);
+-    }
+-  if (REG_P (home))
+-    {
+-      regno = REGNO (home);
+-      if (regno >= FIRST_PSEUDO_REGISTER)
+-	return 0;
+-    }
+-
+-  /* The kind-of-variable letter depends on where
+-     the variable is and on the scope of its name:
+-     G and N_GSYM for static storage and global scope,
+-     S for static storage and file scope,
+-     V for static storage and local scope,
+-     for those two, use N_LCSYM if data is in bss segment,
+-     N_STSYM if in data segment, N_FUN otherwise.
+-     (We used N_FUN originally, then changed to N_STSYM
+-     to please GDB.  However, it seems that confused ld.
+-     Now GDB has been fixed to like N_FUN, says Kingdon.)
+-     no letter at all, and N_LSYM, for auto variable,
+-     r and N_RSYM for register variable.  */
+-
+-  if (MEM_P (home) && GET_CODE (XEXP (home, 0)) == SYMBOL_REF)
+-    {
+-      if (TREE_PUBLIC (decl))
+-	{
+-	  int offs;
+-	  letter = 'G';
+-	  code = N_GSYM;
+-	  if (dbxout_common_check (decl, &offs) != NULL)
+-	    {
+-	      letter = 'V';
+-	      addr = 0;
+-	      number = offs;
+-	    }
+-	}
+-      else
+-	{
+-	  addr = XEXP (home, 0);
+-
+-	  letter = decl_function_context (decl) ? 'V' : 'S';
+-
+-	  /* Some ports can transform a symbol ref into a label ref,
+-	     because the symbol ref is too far away and has to be
+-	     dumped into a constant pool.  Alternatively, the symbol
+-	     in the constant pool might be referenced by a different
+-	     symbol.  */
+-	  if (GET_CODE (addr) == SYMBOL_REF
+-	      && CONSTANT_POOL_ADDRESS_P (addr))
+-	    {
+-	      bool marked;
+-	      rtx tmp = get_pool_constant_mark (addr, &marked);
+-
+-	      if (GET_CODE (tmp) == SYMBOL_REF)
+-		{
+-		  addr = tmp;
+-		  if (CONSTANT_POOL_ADDRESS_P (addr))
+-		    get_pool_constant_mark (addr, &marked);
+-		  else
+-		    marked = true;
+-		}
+-	      else if (GET_CODE (tmp) == LABEL_REF)
+-		{
+-		  addr = tmp;
+-		  marked = true;
+-		}
+-
+-	      /* If all references to the constant pool were optimized
+-		 out, we just ignore the symbol.  */
+-	      if (!marked)
+-		return 0;
+-	    }
+-
+-	  /* This should be the same condition as in assemble_variable, but
+-	     we don't have access to dont_output_data here.  So, instead,
+-	     we rely on the fact that error_mark_node initializers always
+-	     end up in bss for C++ and never end up in bss for C.  */
+-	  if (DECL_INITIAL (decl) == 0
+-	      || (lang_GNU_CXX ()
+-		  && DECL_INITIAL (decl) == error_mark_node))
+-	    {
+-	      int offs;
+-	      code = N_LCSYM;
+-	      if (dbxout_common_check (decl, &offs) != NULL)
+-	        {
+-		  addr = 0;
+-		  number = offs;
+-		  letter = 'V';
+-		  code = N_GSYM;
+-		}
+-	    }
+-	  else if (DECL_IN_TEXT_SECTION (decl))
+-	    /* This is not quite right, but it's the closest
+-	       of all the codes that Unix defines.  */
+-	    code = DBX_STATIC_CONST_VAR_CODE;
+-	  else
+-	    {
+-	      /* Ultrix `as' seems to need this.  */
+-#ifdef DBX_STATIC_STAB_DATA_SECTION
+-	      switch_to_section (data_section);
+-#endif
+-	      code = N_STSYM;
+-	    }
+-	}
+-    }
+-  else if (regno >= 0)
+-    {
+-      letter = 'r';
+-      code = N_RSYM;
+-      number = DBX_REGISTER_NUMBER (regno);
+-    }
+-  else if (MEM_P (home)
+-	   && (MEM_P (XEXP (home, 0))
+-	       || (REG_P (XEXP (home, 0))
+-		   && REGNO (XEXP (home, 0)) != HARD_FRAME_POINTER_REGNUM
+-		   && REGNO (XEXP (home, 0)) != STACK_POINTER_REGNUM
+-#if !HARD_FRAME_POINTER_IS_ARG_POINTER
+-		   && REGNO (XEXP (home, 0)) != ARG_POINTER_REGNUM
+-#endif
+-		   )))
+-    /* If the value is indirect by memory or by a register
+-       that isn't the frame pointer
+-       then it means the object is variable-sized and address through
+-       that register or stack slot.  DBX has no way to represent this
+-       so all we can do is output the variable as a pointer.
+-       If it's not a parameter, ignore it.  */
+-    {
+-      if (REG_P (XEXP (home, 0)))
+-	{
+-	  letter = 'r';
+-	  code = N_RSYM;
+-	  if (REGNO (XEXP (home, 0)) >= FIRST_PSEUDO_REGISTER)
+-	    return 0;
+-	  number = DBX_REGISTER_NUMBER (REGNO (XEXP (home, 0)));
+-	}
+-      else
+-	{
+-	  code = N_LSYM;
+-	  /* RTL looks like (MEM (MEM (PLUS (REG...) (CONST_INT...)))).
+-	     We want the value of that CONST_INT.  */
+-	  number = DEBUGGER_AUTO_OFFSET (XEXP (XEXP (home, 0), 0));
+-	}
+-
+-      /* Effectively do build_pointer_type, but don't cache this type,
+-	 since it might be temporary whereas the type it points to
+-	 might have been saved for inlining.  */
+-      /* Don't use REFERENCE_TYPE because dbx can't handle that.  */
+-      type = make_node (POINTER_TYPE);
+-      TREE_TYPE (type) = TREE_TYPE (decl);
+-    }
+-  else if (MEM_P (home)
+-	   && REG_P (XEXP (home, 0)))
+-    {
+-      code = N_LSYM;
+-      number = DEBUGGER_AUTO_OFFSET (XEXP (home, 0));
+-    }
+-  else if (MEM_P (home)
+-	   && GET_CODE (XEXP (home, 0)) == PLUS
+-	   && CONST_INT_P (XEXP (XEXP (home, 0), 1)))
+-    {
+-      code = N_LSYM;
+-      /* RTL looks like (MEM (PLUS (REG...) (CONST_INT...)))
+-	 We want the value of that CONST_INT.  */
+-      number = DEBUGGER_AUTO_OFFSET (XEXP (home, 0));
+-    }
+-  else if (MEM_P (home)
+-	   && GET_CODE (XEXP (home, 0)) == CONST)
+-    {
+-      /* Handle an obscure case which can arise when optimizing and
+-	 when there are few available registers.  (This is *always*
+-	 the case for i386/i486 targets).  The RTL looks like
+-	 (MEM (CONST ...)) even though this variable is a local `auto'
+-	 or a local `register' variable.  In effect, what has happened
+-	 is that the reload pass has seen that all assignments and
+-	 references for one such a local variable can be replaced by
+-	 equivalent assignments and references to some static storage
+-	 variable, thereby avoiding the need for a register.  In such
+-	 cases we're forced to lie to debuggers and tell them that
+-	 this variable was itself `static'.  */
+-      int offs;
+-      code = N_LCSYM;
+-      letter = 'V';
+-      if (dbxout_common_check (decl, &offs) == NULL)
+-        addr = XEXP (XEXP (home, 0), 0);
+-      else
+-        {
+-	  addr = 0;
+-	  number = offs;
+-	  code = N_GSYM;
+-	}
+-    }
+-  else if (GET_CODE (home) == CONCAT)
+-    {
+-      tree subtype;
+-
+-      /* If TYPE is not a COMPLEX_TYPE (it might be a RECORD_TYPE,
+-	 for example), then there is no easy way to figure out
+-	 what SUBTYPE should be.  So, we give up.  */
+-      if (TREE_CODE (type) != COMPLEX_TYPE)
+-	return 0;
+-
+-      subtype = TREE_TYPE (type);
+-
+-      /* If the variable's storage is in two parts,
+-	 output each as a separate stab with a modified name.  */
+-      if (WORDS_BIG_ENDIAN)
+-	dbxout_symbol_location (decl, subtype, "$imag", XEXP (home, 0));
+-      else
+-	dbxout_symbol_location (decl, subtype, "$real", XEXP (home, 0));
+-
+-      if (WORDS_BIG_ENDIAN)
+-	dbxout_symbol_location (decl, subtype, "$real", XEXP (home, 1));
+-      else
+-	dbxout_symbol_location (decl, subtype, "$imag", XEXP (home, 1));
+-      return 1;
+-    }
+-  else
+-    /* Address might be a MEM, when DECL is a variable-sized object.
+-       Or it might be const0_rtx, meaning previous passes
+-       want us to ignore this variable.  */
+-    return 0;
+-
+-  /* Ok, start a symtab entry and output the variable name.  */
+-  emit_pending_bincls_if_required ();
+-  FORCE_TEXT;
+-
+-#ifdef DBX_STATIC_BLOCK_START
+-  DBX_STATIC_BLOCK_START (asm_out_file, code);
+-#endif
+-
+-  dbxout_begin_complex_stabs_noforcetext ();
+-  dbxout_symbol_name (decl, suffix, letter);
+-  dbxout_type (type, 0);
+-  dbxout_finish_complex_stabs (decl, code, addr, 0, number);
+-
+-#ifdef DBX_STATIC_BLOCK_END
+-  DBX_STATIC_BLOCK_END (asm_out_file, code);
+-#endif
+-  return 1;
+-}
+-
+-/* Output the symbol name of DECL for a stabs, with suffix SUFFIX.
+-   Then output LETTER to indicate the kind of location the symbol has.  */
+-
+-static void
+-dbxout_symbol_name (tree decl, const char *suffix, int letter)
+-{
+-  tree name;
+-
+-  if (DECL_CONTEXT (decl)
+-      && (TYPE_P (DECL_CONTEXT (decl))
+-	  || TREE_CODE (DECL_CONTEXT (decl)) == NAMESPACE_DECL))
+-    /* One slight hitch: if this is a VAR_DECL which is a class member
+-       or a namespace member, we must put out the mangled name instead of the
+-       DECL_NAME.  Note also that static member (variable) names DO NOT begin
+-       with underscores in .stabs directives.  */
+-    name = DECL_ASSEMBLER_NAME (decl);
+-  else
+-    /* ...but if we're function-local, we don't want to include the junk
+-       added by ASM_FORMAT_PRIVATE_NAME.  */
+-    name = DECL_NAME (decl);
+-
+-  if (name)
+-    stabstr_I (name);
+-  else
+-    stabstr_S ("(anon)");
+-
+-  if (suffix)
+-    stabstr_S (suffix);
+-  stabstr_C (':');
+-  if (letter)
+-    stabstr_C (letter);
+-}
+-
+-
+-/* Output the common block name for DECL in a stabs.
+-
+-   Symbols in global common (.comm) get wrapped with an N_BCOMM/N_ECOMM pair
+-   around each group of symbols in the same .comm area.  The N_GSYM stabs
+-   that are emitted only contain the offset in the common area.  This routine
+-   emits the N_BCOMM and N_ECOMM stabs.  */
+-
+-static void
+-dbxout_common_name (tree decl, const char *name, stab_code_type op)
+-{
+-  dbxout_begin_complex_stabs ();
+-  stabstr_S (name);
+-  dbxout_finish_complex_stabs (decl, op, NULL_RTX, NULL, 0);
+-}
+-
+-/* Check decl to determine whether it is a VAR_DECL destined for storage in a
+-   common area.  If it is, the return value will be a non-null string giving
+-   the name of the common storage block it will go into.  If non-null, the
+-   value is the offset into the common block for that symbol's storage.  */
+-
+-static const char *
+-dbxout_common_check (tree decl, int *value)
+-{
+-  rtx home;
+-  rtx sym_addr;
+-  const char *name = NULL;
+-
+-  /* If the decl isn't a VAR_DECL, or if it isn't static, or if
+-     it does not have a value (the offset into the common area), or if it
+-     is thread local (as opposed to global) then it isn't common, and shouldn't
+-     be handled as such.
+-
+-     ??? DECL_THREAD_LOCAL_P check prevents problems with improper .stabs
+-     for thread-local symbols.  Can be handled via same mechanism as used
+-     in dwarf2out.cc.  */
+-  if (!VAR_P (decl)
+-      || !TREE_STATIC (decl)
+-      || !DECL_HAS_VALUE_EXPR_P (decl)
+-      || DECL_THREAD_LOCAL_P (decl)
+-      || !is_fortran ())
+-    return NULL;
+-
+-  home = DECL_RTL (decl);
+-  if (home == NULL_RTX || GET_CODE (home) != MEM)
+-    return NULL;
+-
+-  sym_addr = dbxout_expand_expr (DECL_VALUE_EXPR (decl));
+-  if (sym_addr == NULL_RTX || GET_CODE (sym_addr) != MEM)
+-    return NULL;
+-
+-  sym_addr = XEXP (sym_addr, 0);
+-  if (GET_CODE (sym_addr) == CONST)
+-    sym_addr = XEXP (sym_addr, 0);
+-  if ((GET_CODE (sym_addr) == SYMBOL_REF || GET_CODE (sym_addr) == PLUS)
+-      && DECL_INITIAL (decl) == 0)
+-    {
+-
+-      /* We have a sym that will go into a common area, meaning that it
+-         will get storage reserved with a .comm/.lcomm assembler pseudo-op.
+-
+-         Determine name of common area this symbol will be an offset into,
+-         and offset into that area.  Also retrieve the decl for the area
+-         that the symbol is offset into.  */
+-      tree cdecl = NULL;
+-
+-      switch (GET_CODE (sym_addr))
+-        {
+-        case PLUS:
+-          if (CONST_INT_P (XEXP (sym_addr, 0)))
+-            {
+-              name =
+-                targetm.strip_name_encoding (XSTR (XEXP (sym_addr, 1), 0));
+-              *value = INTVAL (XEXP (sym_addr, 0));
+-              cdecl = SYMBOL_REF_DECL (XEXP (sym_addr, 1));
+-            }
+-          else
+-            {
+-              name =
+-                targetm.strip_name_encoding (XSTR (XEXP (sym_addr, 0), 0));
+-              *value = INTVAL (XEXP (sym_addr, 1));
+-              cdecl = SYMBOL_REF_DECL (XEXP (sym_addr, 0));
+-            }
+-          break;
+-
+-        case SYMBOL_REF:
+-          name = targetm.strip_name_encoding (XSTR (sym_addr, 0));
+-          *value = 0;
+-          cdecl = SYMBOL_REF_DECL (sym_addr);
+-          break;
+-
+-        default:
+-          error ("common symbol debug info is not structured as "
+-                 "symbol+offset");
+-        }
+-
+-      /* Check area common symbol is offset into.  If this is not public, then
+-         it is not a symbol in a common block.  It must be a .lcomm symbol, not
+-         a .comm symbol.  */
+-      if (cdecl == NULL || !TREE_PUBLIC (cdecl))
+-        name = NULL;
+-    }
+-  else
+-    name = NULL;
+-
+-  return name;
+-}
+-
+-/* Output definitions of all the decls in a chain. Return nonzero if
+-   anything was output */
+-
+-int
+-dbxout_syms (tree syms)
+-{
+-  int result = 0;
+-  const char *comm_prev = NULL;
+-  tree syms_prev = NULL;
+-
+-  while (syms)
+-    {
+-      int temp, copen, cclos;
+-      const char *comm_new;
+-
+-      /* Check for common symbol, and then progression into a new/different
+-         block of common symbols.  Emit closing/opening common bracket if
+-         necessary.  */
+-      comm_new = dbxout_common_check (syms, &temp);
+-      copen = comm_new != NULL
+-              && (comm_prev == NULL || strcmp (comm_new, comm_prev));
+-      cclos = comm_prev != NULL
+-              && (comm_new == NULL || strcmp (comm_new, comm_prev));
+-      if (cclos)
+-        dbxout_common_name (syms_prev, comm_prev, N_ECOMM);
+-      if (copen)
+-        {
+-          dbxout_common_name (syms, comm_new, N_BCOMM);
+-          syms_prev = syms;
+-        }
+-      comm_prev = comm_new;
+-
+-      result += dbxout_symbol (syms, 1);
+-      syms = DECL_CHAIN (syms);
+-    }
+-
+-  if (comm_prev != NULL)
+-    dbxout_common_name (syms_prev, comm_prev, N_ECOMM);
+-
+-  return result;
+-}
+-
+-/* The following two functions output definitions of function parameters.
+-   Each parameter gets a definition locating it in the parameter list.
+-   Each parameter that is a register variable gets a second definition
+-   locating it in the register.
+-
+-   Printing or argument lists in gdb uses the definitions that
+-   locate in the parameter list.  But reference to the variable in
+-   expressions uses preferentially the definition as a register.  */
+-
+-/* Output definitions, referring to storage in the parmlist,
+-   of all the parms in PARMS, which is a chain of PARM_DECL nodes.  */
+-
+-void
+-dbxout_parms (tree parms)
+-{
+-  ++debug_nesting;
+-  emit_pending_bincls_if_required ();
+-  fixed_size_mode rtl_mode, type_mode;
+-
+-  for (; parms; parms = DECL_CHAIN (parms))
+-    if (DECL_NAME (parms)
+-	&& TREE_TYPE (parms) != error_mark_node
+-	&& DECL_RTL_SET_P (parms)
+-	&& DECL_INCOMING_RTL (parms)
+-	/* We can't represent variable-sized types in this format.  */
+-	&& is_a  (TYPE_MODE (TREE_TYPE (parms)), &type_mode)
+-	&& is_a  (GET_MODE (DECL_RTL (parms)), &rtl_mode))
+-      {
+-	tree eff_type;
+-	char letter;
+-	stab_code_type code;
+-	int number;
+-
+-	/* Perform any necessary register eliminations on the parameter's rtl,
+-	   so that the debugging output will be accurate.  */
+-	DECL_INCOMING_RTL (parms)
+-	  = eliminate_regs (DECL_INCOMING_RTL (parms), VOIDmode, NULL_RTX);
+-	SET_DECL_RTL (parms,
+-		      eliminate_regs (DECL_RTL (parms), VOIDmode, NULL_RTX));
+-#ifdef LEAF_REG_REMAP
+-	if (crtl->uses_only_leaf_regs)
+-	  {
+-	    leaf_renumber_regs_insn (DECL_INCOMING_RTL (parms));
+-	    leaf_renumber_regs_insn (DECL_RTL (parms));
+-	  }
+-#endif
+-
+-	if (PARM_PASSED_IN_MEMORY (parms))
+-	  {
+-	    rtx inrtl = XEXP (DECL_INCOMING_RTL (parms), 0);
+-
+-	    /* ??? Here we assume that the parm address is indexed
+-	       off the frame pointer or arg pointer.
+-	       If that is not true, we produce meaningless results,
+-	       but do not crash.  */
+-	    if (GET_CODE (inrtl) == PLUS
+-		&& CONST_INT_P (XEXP (inrtl, 1)))
+-	      number = INTVAL (XEXP (inrtl, 1));
+-	    else
+-	      number = 0;
+-
+-	    code = N_PSYM;
+-	    number = DEBUGGER_ARG_OFFSET (number, inrtl);
+-	    letter = 'p';
+-
+-	    /* It is quite tempting to use TREE_TYPE (parms) instead
+-	       of DECL_ARG_TYPE (parms) for the eff_type, so that gcc
+-	       reports the actual type of the parameter, rather than
+-	       the promoted type.  This certainly makes GDB's life
+-	       easier, at least for some ports.  The change is a bad
+-	       idea however, since GDB expects to be able access the
+-	       type without performing any conversions.  So for
+-	       example, if we were passing a float to an unprototyped
+-	       function, gcc will store a double on the stack, but if
+-	       we emit a stab saying the type is a float, then gdb
+-	       will only read in a single value, and this will produce
+-	       an erroneous value.  */
+-	    eff_type = DECL_ARG_TYPE (parms);
+-	  }
+-	else if (REG_P (DECL_RTL (parms)))
+-	  {
+-	    rtx best_rtl;
+-
+-	    /* Parm passed in registers and lives in registers or nowhere.  */
+-	    code = DBX_REGPARM_STABS_CODE;
+-	    letter = DBX_REGPARM_STABS_LETTER;
+-
+-	    /* For parms passed in registers, it is better to use the
+-	       declared type of the variable, not the type it arrived in.  */
+-	    eff_type = TREE_TYPE (parms);
+-
+-	    /* If parm lives in a register, use that register; pretend
+-	       the parm was passed there.  It would be more consistent
+-	       to describe the register where the parm was passed, but
+-	       in practice that register usually holds something else.
+-	       If the parm lives nowhere, use the register where it
+-	       was passed.  */
+-	    if (REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER)
+-	      best_rtl = DECL_RTL (parms);
+-	    else if (GET_CODE (DECL_INCOMING_RTL (parms)) == PARALLEL)
+-	      best_rtl = XEXP (XVECEXP (DECL_INCOMING_RTL (parms), 0, 0), 0);
+-	    else
+-	      best_rtl = DECL_INCOMING_RTL (parms);
+-
+-	    number = DBX_REGISTER_NUMBER (REGNO (best_rtl));
+-	  }
+-	else if (MEM_P (DECL_RTL (parms))
+-		 && REG_P (XEXP (DECL_RTL (parms), 0))
+-		 && REGNO (XEXP (DECL_RTL (parms), 0)) != HARD_FRAME_POINTER_REGNUM
+-		 && REGNO (XEXP (DECL_RTL (parms), 0)) != STACK_POINTER_REGNUM
+-#if !HARD_FRAME_POINTER_IS_ARG_POINTER
+-		 && REGNO (XEXP (DECL_RTL (parms), 0)) != ARG_POINTER_REGNUM
+-#endif
+-		 )
+-	  {
+-	    /* Parm was passed via invisible reference.
+-	       That is, its address was passed in a register.
+-	       Output it as if it lived in that register.
+-	       The debugger will know from the type
+-	       that it was actually passed by invisible reference.  */
+-
+-	    code = DBX_REGPARM_STABS_CODE;
+-
+-	    /* GDB likes this marked with a special letter.  */
+-	    letter = (use_gnu_debug_info_extensions
+-		      ? 'a' : DBX_REGPARM_STABS_LETTER);
+-	    eff_type = TREE_TYPE (parms);
+-
+-	    /* DECL_RTL looks like (MEM (REG...).  Get the register number.
+-	       If it is an unallocated pseudo-reg, then use the register where
+-	       it was passed instead.
+-	       ??? Why is DBX_REGISTER_NUMBER not used here?  */
+-
+-	    if (REGNO (XEXP (DECL_RTL (parms), 0)) < FIRST_PSEUDO_REGISTER)
+-	      number = REGNO (XEXP (DECL_RTL (parms), 0));
+-	    else
+-	      number = REGNO (DECL_INCOMING_RTL (parms));
+-	  }
+-	else if (MEM_P (DECL_RTL (parms))
+-		 && MEM_P (XEXP (DECL_RTL (parms), 0)))
+-	  {
+-	    /* Parm was passed via invisible reference, with the reference
+-	       living on the stack.  DECL_RTL looks like
+-	       (MEM (MEM (PLUS (REG ...) (CONST_INT ...)))) or it
+-	       could look like (MEM (MEM (REG))).  */
+-
+-	    code = N_PSYM;
+-	    letter = 'v';
+-	    eff_type = TREE_TYPE (parms);
+-
+-	    if (!REG_P (XEXP (XEXP (DECL_RTL (parms), 0), 0)))
+-	      number = INTVAL (XEXP (XEXP (XEXP (DECL_RTL (parms), 0), 0), 1));
+-	    else
+-	      number = 0;
+-
+-	    number = DEBUGGER_ARG_OFFSET (number,
+-					  XEXP (XEXP (DECL_RTL (parms), 0), 0));
+-	  }
+-	else if (MEM_P (DECL_RTL (parms))
+-		 && XEXP (DECL_RTL (parms), 0) != const0_rtx
+-		 /* ??? A constant address for a parm can happen
+-		    when the reg it lives in is equiv to a constant in memory.
+-		    Should make this not happen, after 2.4.  */
+-		 && ! CONSTANT_P (XEXP (DECL_RTL (parms), 0)))
+-	  {
+-	    /* Parm was passed in registers but lives on the stack.  */
+-
+-	    code = N_PSYM;
+-	    letter = 'p';
+-	    eff_type = TREE_TYPE (parms);
+-
+-	    /* DECL_RTL looks like (MEM (PLUS (REG...) (CONST_INT...))),
+-	       in which case we want the value of that CONST_INT,
+-	       or (MEM (REG ...)),
+-	       in which case we use a value of zero.  */
+-	    if (!REG_P (XEXP (DECL_RTL (parms), 0)))
+-	      number = INTVAL (XEXP (XEXP (DECL_RTL (parms), 0), 1));
+-	    else
+-	      number = 0;
+-
+-	    /* Make a big endian correction if the mode of the type of the
+-	       parameter is not the same as the mode of the rtl.  */
+-	    if (BYTES_BIG_ENDIAN
+-		&& type_mode != rtl_mode
+-		&& GET_MODE_SIZE (type_mode) < UNITS_PER_WORD)
+-	      number += GET_MODE_SIZE (rtl_mode) - GET_MODE_SIZE (type_mode);
+-	  }
+-	else
+-	  /* ??? We don't know how to represent this argument.  */
+-	  continue;
+-
+-	dbxout_begin_complex_stabs ();
+-
+-	if (DECL_NAME (parms))
+-	  {
+-	    stabstr_I (DECL_NAME (parms));
+-	    stabstr_C (':');
+-	  }
+-	else
+-	  stabstr_S ("(anon):");
+-	stabstr_C (letter);
+-	dbxout_type (eff_type, 0);
+-	dbxout_finish_complex_stabs (parms, code, 0, 0, number);
+-      }
+-  DBXOUT_DECR_NESTING;
+-}
+-
+-/* Output definitions for the places where parms live during the function,
+-   when different from where they were passed, when the parms were passed
+-   in memory.
+-
+-   It is not useful to do this for parms passed in registers
+-   that live during the function in different registers, because it is
+-   impossible to look in the passed register for the passed value,
+-   so we use the within-the-function register to begin with.
+-
+-   PARMS is a chain of PARM_DECL nodes.  */
+-
+-void
+-dbxout_reg_parms (tree parms)
+-{
+-  ++debug_nesting;
+-
+-  for (; parms; parms = DECL_CHAIN (parms))
+-    if (DECL_NAME (parms) && PARM_PASSED_IN_MEMORY (parms))
+-      {
+-	/* Report parms that live in registers during the function
+-	   but were passed in memory.  */
+-	if (REG_P (DECL_RTL (parms))
+-	    && REGNO (DECL_RTL (parms)) < FIRST_PSEUDO_REGISTER)
+-	  dbxout_symbol_location (parms, TREE_TYPE (parms),
+-				  0, DECL_RTL (parms));
+-	else if (GET_CODE (DECL_RTL (parms)) == CONCAT)
+-	  dbxout_symbol_location (parms, TREE_TYPE (parms),
+-				  0, DECL_RTL (parms));
+-	/* Report parms that live in memory but not where they were passed.  */
+-	else if (MEM_P (DECL_RTL (parms))
+-		 && ! rtx_equal_p (DECL_RTL (parms), DECL_INCOMING_RTL (parms)))
+-	  dbxout_symbol_location (parms, TREE_TYPE (parms),
+-				  0, DECL_RTL (parms));
+-      }
+-  DBXOUT_DECR_NESTING;
+-}
+-
+-/* Given a chain of ..._TYPE nodes (as come in a parameter list),
+-   output definitions of those names, in raw form */
+-
+-static void
+-dbxout_args (tree args)
+-{
+-  while (args)
+-    {
+-      stabstr_C (',');
+-      dbxout_type (TREE_VALUE (args), 0);
+-      args = TREE_CHAIN (args);
+-    }
+-}
+-
+-#if defined (DBX_DEBUGGING_INFO)
+-
+-/* Subroutine of dbxout_block.  Emit an N_LBRAC stab referencing LABEL.
+-   BEGIN_LABEL is the name of the beginning of the function, which may
+-   be required.  */
+-static void
+-dbx_output_lbrac (const char *label,
+-		  const char *begin_label ATTRIBUTE_UNUSED)
+-{
+-  dbxout_begin_stabn (N_LBRAC);
+-  if (DBX_BLOCKS_FUNCTION_RELATIVE)
+-    dbxout_stab_value_label_diff (label, begin_label);
+-  else
+-    dbxout_stab_value_label (label);
+-}
+-
+-/* Subroutine of dbxout_block.  Emit an N_RBRAC stab referencing LABEL.
+-   BEGIN_LABEL is the name of the beginning of the function, which may
+-   be required.  */
+-static void
+-dbx_output_rbrac (const char *label,
+-		  const char *begin_label ATTRIBUTE_UNUSED)
+-{
+-  dbxout_begin_stabn (N_RBRAC);
+-  if (DBX_BLOCKS_FUNCTION_RELATIVE)
+-    dbxout_stab_value_label_diff (label, begin_label);
+-  else
+-    dbxout_stab_value_label (label);
+-}
+-
+-/* Return true if at least one block among BLOCK, its children or siblings
+-   has TREE_USED, TREE_ASM_WRITTEN and BLOCK_IN_COLD_SECTION_P
+-   set.  If there is none, clear TREE_USED bit on such blocks.  */
+-
+-static bool
+-dbx_block_with_cold_children (tree block)
+-{
+-  bool ret = false;
+-  for (; block; block = BLOCK_CHAIN (block))
+-    if (TREE_USED (block) && TREE_ASM_WRITTEN (block))
+-      {
+-	bool children = dbx_block_with_cold_children (BLOCK_SUBBLOCKS (block));
+-	if (BLOCK_IN_COLD_SECTION_P (block) || children)
+-	  ret = true;
+-	else
+-	  TREE_USED (block) = false;
+-      }
+-  return ret;
+-}
+-
+-/* Output everything about a symbol block (a BLOCK node
+-   that represents a scope level),
+-   including recursive output of contained blocks.
+-
+-   BLOCK is the BLOCK node.
+-   DEPTH is its depth within containing symbol blocks.
+-   ARGS is usually zero; but for the outermost block of the
+-   body of a function, it is a chain of PARM_DECLs for the function parameters.
+-   We output definitions of all the register parms
+-   as if they were local variables of that block.
+-
+-   If -g1 was used, we count blocks just the same, but output nothing
+-   except for the outermost block.
+-
+-   Actually, BLOCK may be several blocks chained together.
+-   We handle them all in sequence.
+-
+-   Return true if we emitted any LBRAC/RBRAC.  */
+-
+-static bool
+-dbxout_block (tree block, int depth, tree args, int parent_blocknum)
+-{
+-  bool ret = false;
+-  char begin_label[20];
+-  /* Reference current function start using LFBB.  */
+-  ASM_GENERATE_INTERNAL_LABEL (begin_label, "LFBB", scope_labelno);
+-
+-  /* If called for the second partition, ignore blocks that don't have
+-     any children in the second partition.  */
+-  if (crtl->has_bb_partition && in_cold_section_p && depth == 0)
+-    dbx_block_with_cold_children (block);
+-
+-  for (; block; block = BLOCK_CHAIN (block))
+-    {
+-      /* Ignore blocks never expanded or otherwise marked as real.  */
+-      if (TREE_USED (block) && TREE_ASM_WRITTEN (block))
+-	{
+-	  int did_output;
+-	  int blocknum = BLOCK_NUMBER (block);
+-	  int this_parent = parent_blocknum;
+-
+-	  /* In dbx format, the syms of a block come before the N_LBRAC.
+-	     If nothing is output, we don't need the N_LBRAC, either.  */
+-	  did_output = 0;
+-	  if (debug_info_level != DINFO_LEVEL_TERSE || depth == 0)
+-	    did_output = dbxout_syms (BLOCK_VARS (block));
+-	  if (args)
+-	    dbxout_reg_parms (args);
+-
+-	  /* Now output an N_LBRAC symbol to represent the beginning of
+-	     the block.  Use the block's tree-walk order to generate
+-	     the assembler symbols LBBn and LBEn
+-	     that final will define around the code in this block.  */
+-	  if (did_output
+-	      && BLOCK_IN_COLD_SECTION_P (block) == in_cold_section_p)
+-	    {
+-	      char buf[20];
+-	      const char *scope_start;
+-
+-	      ret = true;
+-	      if (depth == 0)
+-		/* The outermost block doesn't get LBB labels; use
+-		   the LFBB local symbol emitted by dbxout_begin_prologue.  */
+-		scope_start = begin_label;
+-	      else
+-		{
+-		  ASM_GENERATE_INTERNAL_LABEL (buf, "LBB", blocknum);
+-		  scope_start = buf;
+-		  this_parent = blocknum;
+-		}
+-
+-	      dbx_output_lbrac (scope_start, begin_label);
+-	    }
+-
+-	  /* Output the subblocks.  */
+-	  bool children
+-	    = dbxout_block (BLOCK_SUBBLOCKS (block), depth + 1, NULL_TREE,
+-			    this_parent);
+-	  ret |= children;
+-
+-	  /* Refer to the marker for the end of the block.  */
+-	  if (did_output
+-	      && BLOCK_IN_COLD_SECTION_P (block) == in_cold_section_p)
+-	    {
+-	      char buf[100];
+-	      if (depth == 0)
+-		/* The outermost block doesn't get LBE labels;
+-		   use the "scope" label which will be emitted
+-		   by dbxout_function_end.  */
+-		ASM_GENERATE_INTERNAL_LABEL (buf, "Lscope", scope_labelno);
+-	      else
+-		ASM_GENERATE_INTERNAL_LABEL (buf, "LBE", blocknum);
+-
+-	      dbx_output_rbrac (buf, begin_label);
+-	    }
+-	  else if (did_output && !children)
+-	    {
+-	      /* If we emitted any vars and didn't output any LBRAC/RBRAC,
+-		 either at this level or any lower level, we need to emit
+-		 an empty LBRAC/RBRAC pair now.  */
+-	      char buf[30];
+-	      const char *scope_start;
+-
+-	      ret = true;
+-	      if (parent_blocknum == -1)
+-		scope_start = begin_label;
+-	      else
+-		{
+-		  ASM_GENERATE_INTERNAL_LABEL (buf, "LBB", parent_blocknum);
+-		  scope_start = buf;
+-		}
+-
+-	      dbx_output_lbrac (scope_start, begin_label);
+-	      dbx_output_rbrac (scope_start, begin_label);
+-	    }
+-	}
+-    }
+-  return ret;
+-}
+-
+-/* Output the information about a function and its arguments and result.
+-   Usually this follows the function's code,
+-   but on some systems, it comes before.  */
+-
+-static void
+-dbxout_begin_function (tree decl)
+-{
+-  int saved_tree_used1;
+-
+-  saved_tree_used1 = TREE_USED (decl);
+-  TREE_USED (decl) = 1;
+-  if (DECL_NAME (DECL_RESULT (decl)) != 0)
+-    {
+-      int saved_tree_used2 = TREE_USED (DECL_RESULT (decl));
+-      TREE_USED (DECL_RESULT (decl)) = 1;
+-      dbxout_symbol (decl, 0);
+-      TREE_USED (DECL_RESULT (decl)) = saved_tree_used2;
+-    }
+-  else
+-    dbxout_symbol (decl, 0);
+-  TREE_USED (decl) = saved_tree_used1;
+-
+-  dbxout_parms (DECL_ARGUMENTS (decl));
+-  if (DECL_NAME (DECL_RESULT (decl)) != 0)
+-    dbxout_symbol (DECL_RESULT (decl), 1);
+-}
+-#endif /* DBX_DEBUGGING_INFO */
+-
+-#endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
+-
+-/* Record an element in the table of global destructors.  SYMBOL is
+-   a SYMBOL_REF of the function to be called; PRIORITY is a number
+-   between 0 and MAX_INIT_PRIORITY.  */
+-
+-void
+-default_stabs_asm_out_destructor (rtx symbol ATTRIBUTE_UNUSED,
+-				  int priority ATTRIBUTE_UNUSED)
+-{
+-#if defined DBX_DEBUGGING_INFO || defined XCOFF_DEBUGGING_INFO
+-  /* Tell GNU LD that this is part of the static destructor set.
+-     This will work for any system that uses stabs, most usefully
+-     aout systems.  */
+-  dbxout_begin_simple_stabs ("___DTOR_LIST__", 22 /* N_SETT */);
+-  dbxout_stab_value_label (XSTR (symbol, 0));
+-#else
+-  sorry ("global destructors not supported on this target");
+-#endif
+-}
+-
+-/* Likewise for global constructors.  */
+-
+-void
+-default_stabs_asm_out_constructor (rtx symbol ATTRIBUTE_UNUSED,
+-				   int priority ATTRIBUTE_UNUSED)
+-{
+-#if defined DBX_DEBUGGING_INFO || defined XCOFF_DEBUGGING_INFO
+-  /* Tell GNU LD that this is part of the static destructor set.
+-     This will work for any system that uses stabs, most usefully
+-     aout systems.  */
+-  dbxout_begin_simple_stabs ("___CTOR_LIST__", 22 /* N_SETT */);
+-  dbxout_stab_value_label (XSTR (symbol, 0));
+-#else
+-  sorry ("global constructors not supported on this target");
+-#endif
+-}
+-
+-#include "gt-dbxout.h"
+diff --git a/gcc/dbxout.h b/gcc/dbxout.h
+deleted file mode 100644
+index 2c38e76c2..000000000
+--- a/gcc/dbxout.h
++++ /dev/null
+@@ -1,60 +0,0 @@
+-/* dbxout.h - Various declarations for functions found in dbxout.cc
+-   Copyright (C) 1998-2022 Free Software Foundation, Inc.
+-
+-This file is part of GCC.
+-
+-GCC is free software; you can redistribute it and/or modify it under
+-the terms of the GNU General Public License as published by the Free
+-Software Foundation; either version 3, or (at your option) any later
+-version.
+-
+-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+-WARRANTY; without even the implied warranty of MERCHANTABILITY or
+-FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+-for more details.
+-
+-You should have received a copy of the GNU General Public License
+-along with GCC; see the file COPYING3.  If not see
+-.  */
+-
+-#ifndef GCC_DBXOUT_H
+-#define GCC_DBXOUT_H
+-
+-extern int dbxout_symbol (tree, int);
+-extern void dbxout_parms (tree);
+-extern void dbxout_reg_parms (tree);
+-extern int dbxout_syms (tree);
+-
+-extern void default_stabs_asm_out_destructor (rtx, int);
+-extern void default_stabs_asm_out_constructor (rtx, int);
+-
+-/* dbxout helper functions */
+-#if defined DBX_DEBUGGING_INFO || defined XCOFF_DEBUGGING_INFO
+-
+-extern void dbxout_int (int);
+-extern void dbxout_stabd (int, int);
+-extern void dbxout_begin_stabn (int);
+-extern void dbxout_begin_stabn_sline (int);
+-extern void dbxout_begin_empty_stabs (int);
+-extern void dbxout_begin_simple_stabs (const char *, int);
+-extern void dbxout_begin_simple_stabs_desc (const char *, int, int);
+-
+-extern void dbxout_stab_value_zero (void);
+-extern void dbxout_stab_value_label (const char *);
+-extern void dbxout_stab_value_label_diff (const char *, const char *);
+-extern void dbxout_stab_value_internal_label (const char *, int *);
+-extern void dbxout_stab_value_internal_label_diff (const char *, int *,
+-						   const char *);
+-#endif
+-
+-/* Language description for N_SO stabs.  */
+-#define N_SO_AS          1
+-#define N_SO_C           2
+-#define N_SO_ANSI_C      3
+-#define N_SO_CC          4 /* c++*/
+-#define N_SO_FORTRAN     5
+-#define N_SO_FORTRAN90   7
+-#define N_SO_OBJC        50
+-#define N_SO_OBJCPLUS    51
+-
+-#endif /* GCC_DBXOUT_H */
+diff --git a/gcc/debug.h b/gcc/debug.h
+index 17a7e4862..fe85115d5 100644
+--- a/gcc/debug.h
++++ b/gcc/debug.h
+@@ -238,7 +238,6 @@ extern void debug_nothing_tree_charstar_uhwi (tree, const char *,
+ 
+ /* Hooks for various debug formats.  */
+ extern const struct gcc_debug_hooks do_nothing_debug_hooks;
+-extern const struct gcc_debug_hooks dbx_debug_hooks;
+ extern const struct gcc_debug_hooks xcoff_debug_hooks;
+ extern const struct gcc_debug_hooks dwarf2_debug_hooks;
+ extern const struct gcc_debug_hooks dwarf2_lineno_debug_hooks;
+diff --git a/gcc/doc/install.texi b/gcc/doc/install.texi
+index 19d073256..a650f60c7 100644
+--- a/gcc/doc/install.texi
++++ b/gcc/doc/install.texi
+@@ -3982,8 +3982,7 @@ on FreeBSD 7 or later) and the use of @code{__cxa_atexit} by default
+ by GCC 4.5 and above.
+ 
+ We support FreeBSD using the ELF file format with DWARF 2 debugging
+-for all CPU architectures.  You may use @option{-gstabs} instead of
+-@option{-g}, if you really want the old debugging format.  There are
++for all CPU architectures.  There are
+ no known issues with mixing object files and libraries with different
+ debugging formats.  Otherwise, this release of GCC should now match
+ more of the configuration used in the stock FreeBSD configuration of
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 4d3eccdb2..7ca60dd64 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -45,7 +45,7 @@ remainder.  @command{g++} accepts mostly the same options as @command{gcc}.
+ @c man end
+ @c man begin SEEALSO
+ gpl(7), gfdl(7), fsf-funding(7),
+-cpp(1), gcov(1), as(1), ld(1), gdb(1), dbx(1)
++cpp(1), gcov(1), as(1), ld(1), gdb(1)
+ and the Info entries for @file{gcc}, @file{cpp}, @file{as},
+ @file{ld}, @file{binutils} and @file{gdb}.
+ @c man end
+@@ -480,7 +480,7 @@ Objective-C and Objective-C++ Dialects}.
+ @gccoptlist{-g  -g@var{level}  -gdwarf  -gdwarf-@var{version} @gol
+ -gbtf -gctf  -gctf@var{level} @gol
+ -ggdb  -grecord-gcc-switches  -gno-record-gcc-switches @gol
+--gstabs  -gstabs+  -gstrict-dwarf  -gno-strict-dwarf @gol
++-gstrict-dwarf  -gno-strict-dwarf @gol
+ -gas-loc-support  -gno-as-loc-support @gol
+ -gas-locview-support  -gno-as-locview-support @gol
+ -gcolumn-info  -gno-column-info  -gdwarf32  -gdwarf64 @gol
+@@ -488,7 +488,7 @@ Objective-C and Objective-C++ Dialects}.
+ -gvariable-location-views  -gno-variable-location-views @gol
+ -ginternal-reset-location-views  -gno-internal-reset-location-views @gol
+ -ginline-points  -gno-inline-points @gol
+--gvms  -gxcoff  -gxcoff+  -gz@r{[}=@var{type}@r{]} @gol
++-gvms -gz@r{[}=@var{type}@r{]} @gol
+ -gsplit-dwarf  -gdescribe-dies  -gno-describe-dies @gol
+ -fdebug-prefix-map=@var{old}=@var{new}  -fdebug-types-section @gol
+ -fno-eliminate-unused-debug-types @gol
+@@ -10276,10 +10276,8 @@ information.
+ On most systems that use stabs format, @option{-g} enables use of extra
+ debugging information that only GDB can use; this extra information
+ makes debugging work better in GDB but probably makes other debuggers
+-crash or
+-refuse to read the program.  If you want to control for certain whether
+-to generate the extra information, use @option{-gstabs+}, @option{-gstabs},
+-@option{-gxcoff+}, @option{-gxcoff}, or @option{-gvms} (see below).
++crash or refuse to read the program.  If you want to control for certain whether
++to generate the extra information, use @option{-gvms} (see below).
+ 
+ @item -ggdb
+ @opindex ggdb
+@@ -10336,34 +10334,6 @@ information, but does not include type information.
+ Level 2 produces type information for entities (functions, data objects etc.)
+ at file-scope or global-scope only.
+ 
+-@item -gstabs
+-@opindex gstabs
+-Produce debugging information in stabs format (if that is supported),
+-without GDB extensions.  This is the format used by DBX on most BSD
+-systems.  On MIPS, Alpha and System V Release 4 systems this option
+-produces stabs debugging output that is not understood by DBX@.
+-On System V Release 4 systems this option requires the GNU assembler.
+-
+-@item -gstabs+
+-@opindex gstabs+
+-Produce debugging information in stabs format (if that is supported),
+-using GNU extensions understood only by the GNU debugger (GDB)@.  The
+-use of these extensions is likely to make other debuggers crash or
+-refuse to read the program.
+-
+-@item -gxcoff
+-@opindex gxcoff
+-Produce debugging information in XCOFF format (if that is supported).
+-This is the format used by the DBX debugger on IBM RS/6000 systems.
+-
+-@item -gxcoff+
+-@opindex gxcoff+
+-Produce debugging information in XCOFF format (if that is supported),
+-using GNU extensions understood only by the GNU debugger (GDB)@.  The
+-use of these extensions is likely to make other debuggers crash or
+-refuse to read the program, and may cause assemblers other than the GNU
+-assembler (GAS) to fail with an error.
+-
+ @item -gvms
+ @opindex gvms
+ Produce debugging information in Alpha/VMS debug format (if that is
+@@ -10371,8 +10341,6 @@ supported).  This is the format used by DEBUG on Alpha/VMS systems.
+ 
+ @item -g@var{level}
+ @itemx -ggdb@var{level}
+-@itemx -gstabs@var{level}
+-@itemx -gxcoff@var{level}
+ @itemx -gvms@var{level}
+ Request debugging information and also use @var{level} to specify how
+ much information.  The default level is 2.
+diff --git a/gcc/doc/passes.texi b/gcc/doc/passes.texi
+index 1e821d4e5..9e8b4f50a 100644
+--- a/gcc/doc/passes.texi
++++ b/gcc/doc/passes.texi
+@@ -1184,7 +1184,7 @@ these files.
+ 
+ This is run after final because it must output the stack slot offsets
+ for pseudo registers that did not get hard registers.  Source files
+-are @file{dbxout.cc} for DBX symbol table format, @file{dwarfout.c} for
++are @file{dwarfout.c} for
+ DWARF symbol table format, files @file{dwarf2out.cc} and @file{dwarf2asm.cc}
+ for DWARF2 symbol table format, and @file{vmsdbgout.cc} for VMS debug
+ symbol table format.
+diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
+index 2aba523bb..851d31c18 100644
+--- a/gcc/doc/tm.texi
++++ b/gcc/doc/tm.texi
+@@ -9912,9 +9912,6 @@ This describes how to specify debugging information.
+ 
+ @menu
+ * All Debuggers::      Macros that affect all debugging formats uniformly.
+-* DBX Options::        Macros enabling specific options in DBX format.
+-* DBX Hooks::          Hook macros for varying DBX format.
+-* File Names and DBX:: Macros controlling output of file names in DBX format.
+ * DWARF::              Macros for DWARF format.
+ * VMS Debug::          Macros for VMS debug format.
+ * CTF Debug::          Macros for CTF debug format.
+@@ -9965,35 +9962,18 @@ having address @var{x} (an RTL expression).  The nominal offset is
+ A C expression that returns the type of debugging output GCC should
+ produce when the user specifies just @option{-g}.  Define
+ this if you have arranged for GCC to support more than one format of
+-debugging output.  Currently, the allowable values are @code{DBX_DEBUG},
+-@code{DWARF2_DEBUG}, @code{XCOFF_DEBUG}, @code{VMS_DEBUG},
++debugging output.  Currently, the allowable values are
++@code{DWARF2_DEBUG}, @code{VMS_DEBUG},
+ and @code{VMS_AND_DWARF2_DEBUG}.
+ 
+ When the user specifies @option{-ggdb}, GCC normally also uses the
+ value of this macro to select the debugging output format, but with two
+ exceptions.  If @code{DWARF2_DEBUGGING_INFO} is defined, GCC uses the
+-value @code{DWARF2_DEBUG}.  Otherwise, if @code{DBX_DEBUGGING_INFO} is
+-defined, GCC uses @code{DBX_DEBUG}.
++value @code{DWARF2_DEBUG}.
+ 
+ The value of this macro only affects the default debugging output; the
+-user can always get a specific type of output by using @option{-gstabs},
+-@option{-gdwarf-2}, @option{-gxcoff}, or @option{-gvms}.
+-@end defmac
+-
+-@node DBX Options
+-@subsection Specific Options for DBX Output
+-
+-@c prevent bad page break with this line
+-These are specific options for DBX output.
+-
+-@defmac DBX_DEBUGGING_INFO
+-Define this macro if GCC should produce debugging output for DBX
+-in response to the @option{-g} option.
+-@end defmac
+-
+-@defmac XCOFF_DEBUGGING_INFO
+-Define this macro if GCC should produce XCOFF format debugging output
+-in response to the @option{-g} option.  This is a variant of DBX format.
++user can always get a specific type of output by using  @option{-gdwarf-2},
++or @option{-gvms}.
+ @end defmac
+ 
+ @defmac DEFAULT_GDB_EXTENSIONS
+@@ -10004,196 +9984,6 @@ macro, the default is 1: always generate the extended information
+ if there is any occasion to.
+ @end defmac
+ 
+-@defmac DEBUG_SYMS_TEXT
+-Define this macro if all @code{.stabs} commands should be output while
+-in the text section.
+-@end defmac
+-
+-@defmac ASM_STABS_OP
+-A C string constant, including spacing, naming the assembler pseudo op to
+-use instead of @code{"\t.stabs\t"} to define an ordinary debugging symbol.
+-If you don't define this macro, @code{"\t.stabs\t"} is used.  This macro
+-applies only to DBX debugging information format.
+-@end defmac
+-
+-@defmac ASM_STABD_OP
+-A C string constant, including spacing, naming the assembler pseudo op to
+-use instead of @code{"\t.stabd\t"} to define a debugging symbol whose
+-value is the current location.  If you don't define this macro,
+-@code{"\t.stabd\t"} is used.  This macro applies only to DBX debugging
+-information format.
+-@end defmac
+-
+-@defmac ASM_STABN_OP
+-A C string constant, including spacing, naming the assembler pseudo op to
+-use instead of @code{"\t.stabn\t"} to define a debugging symbol with no
+-name.  If you don't define this macro, @code{"\t.stabn\t"} is used.  This
+-macro applies only to DBX debugging information format.
+-@end defmac
+-
+-@defmac DBX_NO_XREFS
+-Define this macro if DBX on your system does not support the construct
+-@samp{xs@var{tagname}}.  On some systems, this construct is used to
+-describe a forward reference to a structure named @var{tagname}.
+-On other systems, this construct is not supported at all.
+-@end defmac
+-
+-@defmac DBX_CONTIN_LENGTH
+-A symbol name in DBX-format debugging information is normally
+-continued (split into two separate @code{.stabs} directives) when it
+-exceeds a certain length (by default, 80 characters).  On some
+-operating systems, DBX requires this splitting; on others, splitting
+-must not be done.  You can inhibit splitting by defining this macro
+-with the value zero.  You can override the default splitting-length by
+-defining this macro as an expression for the length you desire.
+-@end defmac
+-
+-@defmac DBX_CONTIN_CHAR
+-Normally continuation is indicated by adding a @samp{\} character to
+-the end of a @code{.stabs} string when a continuation follows.  To use
+-a different character instead, define this macro as a character
+-constant for the character you want to use.  Do not define this macro
+-if backslash is correct for your system.
+-@end defmac
+-
+-@defmac DBX_STATIC_STAB_DATA_SECTION
+-Define this macro if it is necessary to go to the data section before
+-outputting the @samp{.stabs} pseudo-op for a non-global static
+-variable.
+-@end defmac
+-
+-@defmac DBX_TYPE_DECL_STABS_CODE
+-The value to use in the ``code'' field of the @code{.stabs} directive
+-for a typedef.  The default is @code{N_LSYM}.
+-@end defmac
+-
+-@defmac DBX_STATIC_CONST_VAR_CODE
+-The value to use in the ``code'' field of the @code{.stabs} directive
+-for a static variable located in the text section.  DBX format does not
+-provide any ``right'' way to do this.  The default is @code{N_FUN}.
+-@end defmac
+-
+-@defmac DBX_REGPARM_STABS_CODE
+-The value to use in the ``code'' field of the @code{.stabs} directive
+-for a parameter passed in registers.  DBX format does not provide any
+-``right'' way to do this.  The default is @code{N_RSYM}.
+-@end defmac
+-
+-@defmac DBX_REGPARM_STABS_LETTER
+-The letter to use in DBX symbol data to identify a symbol as a parameter
+-passed in registers.  DBX format does not customarily provide any way to
+-do this.  The default is @code{'P'}.
+-@end defmac
+-
+-@defmac DBX_FUNCTION_FIRST
+-Define this macro if the DBX information for a function and its
+-arguments should precede the assembler code for the function.  Normally,
+-in DBX format, the debugging information entirely follows the assembler
+-code.
+-@end defmac
+-
+-@defmac DBX_BLOCKS_FUNCTION_RELATIVE
+-Define this macro, with value 1, if the value of a symbol describing
+-the scope of a block (@code{N_LBRAC} or @code{N_RBRAC}) should be
+-relative to the start of the enclosing function.  Normally, GCC uses
+-an absolute address.
+-@end defmac
+-
+-@defmac DBX_LINES_FUNCTION_RELATIVE
+-Define this macro, with value 1, if the value of a symbol indicating
+-the current line number (@code{N_SLINE}) should be relative to the
+-start of the enclosing function.  Normally, GCC uses an absolute address.
+-@end defmac
+-
+-@defmac DBX_USE_BINCL
+-Define this macro if GCC should generate @code{N_BINCL} and
+-@code{N_EINCL} stabs for included header files, as on Sun systems.  This
+-macro also directs GCC to output a type number as a pair of a file
+-number and a type number within the file.  Normally, GCC does not
+-generate @code{N_BINCL} or @code{N_EINCL} stabs, and it outputs a single
+-number for a type number.
+-@end defmac
+-
+-@node DBX Hooks
+-@subsection Open-Ended Hooks for DBX Format
+-
+-@c prevent bad page break with this line
+-These are hooks for DBX format.
+-
+-@defmac DBX_OUTPUT_SOURCE_LINE (@var{stream}, @var{line}, @var{counter})
+-A C statement to output DBX debugging information before code for line
+-number @var{line} of the current source file to the stdio stream
+-@var{stream}.  @var{counter} is the number of time the macro was
+-invoked, including the current invocation; it is intended to generate
+-unique labels in the assembly output.
+-
+-This macro should not be defined if the default output is correct, or
+-if it can be made correct by defining @code{DBX_LINES_FUNCTION_RELATIVE}.
+-@end defmac
+-
+-@defmac NO_DBX_FUNCTION_END
+-Some stabs encapsulation formats (in particular ECOFF), cannot handle the
+-@code{.stabs "",N_FUN,,0,0,Lscope-function-1} gdb dbx extension construct.
+-On those machines, define this macro to turn this feature off without
+-disturbing the rest of the gdb extensions.
+-@end defmac
+-
+-@defmac NO_DBX_BNSYM_ENSYM
+-Some assemblers cannot handle the @code{.stabd BNSYM/ENSYM,0,0} gdb dbx
+-extension construct.  On those machines, define this macro to turn this
+-feature off without disturbing the rest of the gdb extensions.
+-@end defmac
+-
+-@node File Names and DBX
+-@subsection File Names in DBX Format
+-
+-@c prevent bad page break with this line
+-This describes file names in DBX format.
+-
+-@defmac DBX_OUTPUT_MAIN_SOURCE_FILENAME (@var{stream}, @var{name})
+-A C statement to output DBX debugging information to the stdio stream
+-@var{stream}, which indicates that file @var{name} is the main source
+-file---the file specified as the input file for compilation.
+-This macro is called only once, at the beginning of compilation.
+-
+-This macro need not be defined if the standard form of output
+-for DBX debugging information is appropriate.
+-
+-It may be necessary to refer to a label equal to the beginning of the
+-text section.  You can use @samp{assemble_name (stream, ltext_label_name)}
+-to do so.  If you do this, you must also set the variable
+-@var{used_ltext_label_name} to @code{true}.
+-@end defmac
+-
+-@defmac NO_DBX_MAIN_SOURCE_DIRECTORY
+-Define this macro, with value 1, if GCC should not emit an indication
+-of the current directory for compilation and current source language at
+-the beginning of the file.
+-@end defmac
+-
+-@defmac NO_DBX_GCC_MARKER
+-Define this macro, with value 1, if GCC should not emit an indication
+-that this object file was compiled by GCC@.  The default is to emit
+-an @code{N_OPT} stab at the beginning of every source file, with
+-@samp{gcc2_compiled.} for the string and value 0.
+-@end defmac
+-
+-@defmac DBX_OUTPUT_MAIN_SOURCE_FILE_END (@var{stream}, @var{name})
+-A C statement to output DBX debugging information at the end of
+-compilation of the main source file @var{name}.  Output should be
+-written to the stdio stream @var{stream}.
+-
+-If you don't define this macro, nothing special is output at the end
+-of compilation, which is correct for most machines.
+-@end defmac
+-
+-@defmac DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+-Define this macro @emph{instead of} defining
+-@code{DBX_OUTPUT_MAIN_SOURCE_FILE_END}, if what needs to be output at
+-the end of compilation is an @code{N_SO} stab with an empty string,
+-whose value is the highest absolute text address in the file.
+-@end defmac
+-
+ @need 2000
+ @node DWARF
+ @subsection Macros for DWARF Output
+diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
+index 817d586ff..ac95cdf7a 100644
+--- a/gcc/doc/tm.texi.in
++++ b/gcc/doc/tm.texi.in
+@@ -6603,9 +6603,6 @@ This describes how to specify debugging information.
+ 
+ @menu
+ * All Debuggers::      Macros that affect all debugging formats uniformly.
+-* DBX Options::        Macros enabling specific options in DBX format.
+-* DBX Hooks::          Hook macros for varying DBX format.
+-* File Names and DBX:: Macros controlling output of file names in DBX format.
+ * DWARF::              Macros for DWARF format.
+ * VMS Debug::          Macros for VMS debug format.
+ * CTF Debug::          Macros for CTF debug format.
+@@ -6656,35 +6653,18 @@ having address @var{x} (an RTL expression).  The nominal offset is
+ A C expression that returns the type of debugging output GCC should
+ produce when the user specifies just @option{-g}.  Define
+ this if you have arranged for GCC to support more than one format of
+-debugging output.  Currently, the allowable values are @code{DBX_DEBUG},
+-@code{DWARF2_DEBUG}, @code{XCOFF_DEBUG}, @code{VMS_DEBUG},
++debugging output.  Currently, the allowable values are
++@code{DWARF2_DEBUG}, @code{VMS_DEBUG},
+ and @code{VMS_AND_DWARF2_DEBUG}.
+ 
+ When the user specifies @option{-ggdb}, GCC normally also uses the
+ value of this macro to select the debugging output format, but with two
+ exceptions.  If @code{DWARF2_DEBUGGING_INFO} is defined, GCC uses the
+-value @code{DWARF2_DEBUG}.  Otherwise, if @code{DBX_DEBUGGING_INFO} is
+-defined, GCC uses @code{DBX_DEBUG}.
++value @code{DWARF2_DEBUG}.
+ 
+ The value of this macro only affects the default debugging output; the
+-user can always get a specific type of output by using @option{-gstabs},
+-@option{-gdwarf-2}, @option{-gxcoff}, or @option{-gvms}.
+-@end defmac
+-
+-@node DBX Options
+-@subsection Specific Options for DBX Output
+-
+-@c prevent bad page break with this line
+-These are specific options for DBX output.
+-
+-@defmac DBX_DEBUGGING_INFO
+-Define this macro if GCC should produce debugging output for DBX
+-in response to the @option{-g} option.
+-@end defmac
+-
+-@defmac XCOFF_DEBUGGING_INFO
+-Define this macro if GCC should produce XCOFF format debugging output
+-in response to the @option{-g} option.  This is a variant of DBX format.
++user can always get a specific type of output by using  @option{-gdwarf-2},
++or @option{-gvms}.
+ @end defmac
+ 
+ @defmac DEFAULT_GDB_EXTENSIONS
+@@ -6695,196 +6675,6 @@ macro, the default is 1: always generate the extended information
+ if there is any occasion to.
+ @end defmac
+ 
+-@defmac DEBUG_SYMS_TEXT
+-Define this macro if all @code{.stabs} commands should be output while
+-in the text section.
+-@end defmac
+-
+-@defmac ASM_STABS_OP
+-A C string constant, including spacing, naming the assembler pseudo op to
+-use instead of @code{"\t.stabs\t"} to define an ordinary debugging symbol.
+-If you don't define this macro, @code{"\t.stabs\t"} is used.  This macro
+-applies only to DBX debugging information format.
+-@end defmac
+-
+-@defmac ASM_STABD_OP
+-A C string constant, including spacing, naming the assembler pseudo op to
+-use instead of @code{"\t.stabd\t"} to define a debugging symbol whose
+-value is the current location.  If you don't define this macro,
+-@code{"\t.stabd\t"} is used.  This macro applies only to DBX debugging
+-information format.
+-@end defmac
+-
+-@defmac ASM_STABN_OP
+-A C string constant, including spacing, naming the assembler pseudo op to
+-use instead of @code{"\t.stabn\t"} to define a debugging symbol with no
+-name.  If you don't define this macro, @code{"\t.stabn\t"} is used.  This
+-macro applies only to DBX debugging information format.
+-@end defmac
+-
+-@defmac DBX_NO_XREFS
+-Define this macro if DBX on your system does not support the construct
+-@samp{xs@var{tagname}}.  On some systems, this construct is used to
+-describe a forward reference to a structure named @var{tagname}.
+-On other systems, this construct is not supported at all.
+-@end defmac
+-
+-@defmac DBX_CONTIN_LENGTH
+-A symbol name in DBX-format debugging information is normally
+-continued (split into two separate @code{.stabs} directives) when it
+-exceeds a certain length (by default, 80 characters).  On some
+-operating systems, DBX requires this splitting; on others, splitting
+-must not be done.  You can inhibit splitting by defining this macro
+-with the value zero.  You can override the default splitting-length by
+-defining this macro as an expression for the length you desire.
+-@end defmac
+-
+-@defmac DBX_CONTIN_CHAR
+-Normally continuation is indicated by adding a @samp{\} character to
+-the end of a @code{.stabs} string when a continuation follows.  To use
+-a different character instead, define this macro as a character
+-constant for the character you want to use.  Do not define this macro
+-if backslash is correct for your system.
+-@end defmac
+-
+-@defmac DBX_STATIC_STAB_DATA_SECTION
+-Define this macro if it is necessary to go to the data section before
+-outputting the @samp{.stabs} pseudo-op for a non-global static
+-variable.
+-@end defmac
+-
+-@defmac DBX_TYPE_DECL_STABS_CODE
+-The value to use in the ``code'' field of the @code{.stabs} directive
+-for a typedef.  The default is @code{N_LSYM}.
+-@end defmac
+-
+-@defmac DBX_STATIC_CONST_VAR_CODE
+-The value to use in the ``code'' field of the @code{.stabs} directive
+-for a static variable located in the text section.  DBX format does not
+-provide any ``right'' way to do this.  The default is @code{N_FUN}.
+-@end defmac
+-
+-@defmac DBX_REGPARM_STABS_CODE
+-The value to use in the ``code'' field of the @code{.stabs} directive
+-for a parameter passed in registers.  DBX format does not provide any
+-``right'' way to do this.  The default is @code{N_RSYM}.
+-@end defmac
+-
+-@defmac DBX_REGPARM_STABS_LETTER
+-The letter to use in DBX symbol data to identify a symbol as a parameter
+-passed in registers.  DBX format does not customarily provide any way to
+-do this.  The default is @code{'P'}.
+-@end defmac
+-
+-@defmac DBX_FUNCTION_FIRST
+-Define this macro if the DBX information for a function and its
+-arguments should precede the assembler code for the function.  Normally,
+-in DBX format, the debugging information entirely follows the assembler
+-code.
+-@end defmac
+-
+-@defmac DBX_BLOCKS_FUNCTION_RELATIVE
+-Define this macro, with value 1, if the value of a symbol describing
+-the scope of a block (@code{N_LBRAC} or @code{N_RBRAC}) should be
+-relative to the start of the enclosing function.  Normally, GCC uses
+-an absolute address.
+-@end defmac
+-
+-@defmac DBX_LINES_FUNCTION_RELATIVE
+-Define this macro, with value 1, if the value of a symbol indicating
+-the current line number (@code{N_SLINE}) should be relative to the
+-start of the enclosing function.  Normally, GCC uses an absolute address.
+-@end defmac
+-
+-@defmac DBX_USE_BINCL
+-Define this macro if GCC should generate @code{N_BINCL} and
+-@code{N_EINCL} stabs for included header files, as on Sun systems.  This
+-macro also directs GCC to output a type number as a pair of a file
+-number and a type number within the file.  Normally, GCC does not
+-generate @code{N_BINCL} or @code{N_EINCL} stabs, and it outputs a single
+-number for a type number.
+-@end defmac
+-
+-@node DBX Hooks
+-@subsection Open-Ended Hooks for DBX Format
+-
+-@c prevent bad page break with this line
+-These are hooks for DBX format.
+-
+-@defmac DBX_OUTPUT_SOURCE_LINE (@var{stream}, @var{line}, @var{counter})
+-A C statement to output DBX debugging information before code for line
+-number @var{line} of the current source file to the stdio stream
+-@var{stream}.  @var{counter} is the number of time the macro was
+-invoked, including the current invocation; it is intended to generate
+-unique labels in the assembly output.
+-
+-This macro should not be defined if the default output is correct, or
+-if it can be made correct by defining @code{DBX_LINES_FUNCTION_RELATIVE}.
+-@end defmac
+-
+-@defmac NO_DBX_FUNCTION_END
+-Some stabs encapsulation formats (in particular ECOFF), cannot handle the
+-@code{.stabs "",N_FUN,,0,0,Lscope-function-1} gdb dbx extension construct.
+-On those machines, define this macro to turn this feature off without
+-disturbing the rest of the gdb extensions.
+-@end defmac
+-
+-@defmac NO_DBX_BNSYM_ENSYM
+-Some assemblers cannot handle the @code{.stabd BNSYM/ENSYM,0,0} gdb dbx
+-extension construct.  On those machines, define this macro to turn this
+-feature off without disturbing the rest of the gdb extensions.
+-@end defmac
+-
+-@node File Names and DBX
+-@subsection File Names in DBX Format
+-
+-@c prevent bad page break with this line
+-This describes file names in DBX format.
+-
+-@defmac DBX_OUTPUT_MAIN_SOURCE_FILENAME (@var{stream}, @var{name})
+-A C statement to output DBX debugging information to the stdio stream
+-@var{stream}, which indicates that file @var{name} is the main source
+-file---the file specified as the input file for compilation.
+-This macro is called only once, at the beginning of compilation.
+-
+-This macro need not be defined if the standard form of output
+-for DBX debugging information is appropriate.
+-
+-It may be necessary to refer to a label equal to the beginning of the
+-text section.  You can use @samp{assemble_name (stream, ltext_label_name)}
+-to do so.  If you do this, you must also set the variable
+-@var{used_ltext_label_name} to @code{true}.
+-@end defmac
+-
+-@defmac NO_DBX_MAIN_SOURCE_DIRECTORY
+-Define this macro, with value 1, if GCC should not emit an indication
+-of the current directory for compilation and current source language at
+-the beginning of the file.
+-@end defmac
+-
+-@defmac NO_DBX_GCC_MARKER
+-Define this macro, with value 1, if GCC should not emit an indication
+-that this object file was compiled by GCC@.  The default is to emit
+-an @code{N_OPT} stab at the beginning of every source file, with
+-@samp{gcc2_compiled.} for the string and value 0.
+-@end defmac
+-
+-@defmac DBX_OUTPUT_MAIN_SOURCE_FILE_END (@var{stream}, @var{name})
+-A C statement to output DBX debugging information at the end of
+-compilation of the main source file @var{name}.  Output should be
+-written to the stdio stream @var{stream}.
+-
+-If you don't define this macro, nothing special is output at the end
+-of compilation, which is correct for most machines.
+-@end defmac
+-
+-@defmac DBX_OUTPUT_NULL_N_SO_AT_MAIN_SOURCE_FILE_END
+-Define this macro @emph{instead of} defining
+-@code{DBX_OUTPUT_MAIN_SOURCE_FILE_END}, if what needs to be output at
+-the end of compilation is an @code{N_SO} stab with an empty string,
+-whose value is the highest absolute text address in the file.
+-@end defmac
+-
+ @need 2000
+ @node DWARF
+ @subsection Macros for DWARF Output
+diff --git a/gcc/dwarf2asm.cc b/gcc/dwarf2asm.cc
+index 274f574f2..7eac83f7b 100644
+--- a/gcc/dwarf2asm.cc
++++ b/gcc/dwarf2asm.cc
+@@ -35,10 +35,6 @@ along with GCC; see the file COPYING3.  If not see
+ #include "emit-rtl.h"
+ #include "fold-const.h"
+ 
+-#ifndef XCOFF_DEBUGGING_INFO
+-#define XCOFF_DEBUGGING_INFO 0
+-#endif
+-
+ 
+ /* Output an unaligned integer with the given value and size.  Prefer not
+    to print a newline, since the caller may want to add a comment.  */
+@@ -384,16 +380,13 @@ dw2_asm_output_nstring (const char *str, size_t orig_len,
+ 
+   if (flag_debug_asm && comment)
+     {
+-      if (XCOFF_DEBUGGING_INFO)
+-	fputs ("\t.byte \"", asm_out_file);
+-      else
+-	fputs ("\t.ascii \"", asm_out_file);
++      fputs ("\t.ascii \"", asm_out_file);
+ 
+       for (i = 0; i < len; i++)
+ 	{
+ 	  int c = str[i];
+ 	  if (c == '\"')
+-	    fputc (XCOFF_DEBUGGING_INFO ? '\"' : '\\', asm_out_file);
++	    fputc ('\\', asm_out_file);
+ 	  else if (c == '\\')
+ 	    fputc ('\\', asm_out_file);
+ 	  if (ISPRINT (c))
+@@ -913,7 +906,7 @@ static GTY(()) hash_map *indirect_pool;
+ static GTY(()) int dw2_const_labelno;
+ 
+ #if defined(HAVE_GAS_HIDDEN)
+-# define USE_LINKONCE_INDIRECT (SUPPORTS_ONE_ONLY && !XCOFF_DEBUGGING_INFO)
++# define USE_LINKONCE_INDIRECT (SUPPORTS_ONE_ONLY)
+ #else
+ # define USE_LINKONCE_INDIRECT 0
+ #endif
+diff --git a/gcc/dwarf2out.cc b/gcc/dwarf2out.cc
+index f0f6f4fd4..380da2589 100644
+--- a/gcc/dwarf2out.cc
++++ b/gcc/dwarf2out.cc
+@@ -105,14 +105,6 @@ static rtx_insn *cached_next_real_insn;
+ static void dwarf2out_decl (tree);
+ static bool is_redundant_typedef (const_tree);
+ 
+-#ifndef XCOFF_DEBUGGING_INFO
+-#define XCOFF_DEBUGGING_INFO 0
+-#endif
+-
+-#ifndef HAVE_XCOFF_DWARF_EXTRAS
+-#define HAVE_XCOFF_DWARF_EXTRAS 0
+-#endif
+-
+ #ifdef VMS_DEBUGGING_INFO
+ int vms_file_stats_name (const char *, long long *, long *, char *, int *);
+ 
+@@ -608,14 +600,11 @@ output_fde (dw_fde_ref fde, bool for_eh, bool second,
+ 				  for_eh + j);
+   ASM_GENERATE_INTERNAL_LABEL (l1, FDE_AFTER_SIZE_LABEL, for_eh + j);
+   ASM_GENERATE_INTERNAL_LABEL (l2, FDE_END_LABEL, for_eh + j);
+-  if (!XCOFF_DEBUGGING_INFO || for_eh)
+-    {
+-      if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4 && !for_eh)
+-	dw2_asm_output_data (4, 0xffffffff, "Initial length escape value"
+-			     " indicating 64-bit DWARF extension");
+-      dw2_asm_output_delta (for_eh ? 4 : dwarf_offset_size, l2, l1,
+-			    "FDE Length");
+-    }
++  if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4 && !for_eh)
++    dw2_asm_output_data (4, 0xffffffff, "Initial length escape value"
++			 " indicating 64-bit DWARF extension");
++  dw2_asm_output_delta (for_eh ? 4 : dwarf_offset_size, l2, l1,
++			"FDE Length");
+   ASM_OUTPUT_LABEL (asm_out_file, l1);
+ 
+   if (for_eh)
+@@ -812,14 +801,11 @@ output_call_frame_info (int for_eh)
+   /* Output the CIE.  */
+   ASM_GENERATE_INTERNAL_LABEL (l1, CIE_AFTER_SIZE_LABEL, for_eh);
+   ASM_GENERATE_INTERNAL_LABEL (l2, CIE_END_LABEL, for_eh);
+-  if (!XCOFF_DEBUGGING_INFO || for_eh)
+-    {
+-      if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4 && !for_eh)
+-	dw2_asm_output_data (4, 0xffffffff,
+-	  "Initial length escape value indicating 64-bit DWARF extension");
+-      dw2_asm_output_delta (for_eh ? 4 : dwarf_offset_size, l2, l1,
+-			    "Length of Common Information Entry");
+-    }
++  if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4 && !for_eh)
++    dw2_asm_output_data (4, 0xffffffff,
++			 "Initial length escape value indicating 64-bit DWARF extension");
++  dw2_asm_output_delta (for_eh ? 4 : dwarf_offset_size, l2, l1,
++			"Length of Common Information Entry");
+   ASM_OUTPUT_LABEL (asm_out_file, l1);
+ 
+   /* Now that the CIE pointer is PC-relative for EH,
+@@ -3679,8 +3665,7 @@ static GTY (()) vec *macinfo_table;
+ /* True if .debug_macinfo or .debug_macros section is going to be
+    emitted.  */
+ #define have_macinfo \
+-  ((!XCOFF_DEBUGGING_INFO || HAVE_XCOFF_DWARF_EXTRAS) \
+-   && debug_info_level >= DINFO_LEVEL_VERBOSE \
++   (debug_info_level >= DINFO_LEVEL_VERBOSE \
+    && !macinfo_table->is_empty ())
+ 
+ /* Vector of dies for which we should generate .debug_ranges info.  */
+@@ -4982,9 +4967,6 @@ add_AT_loc_list (dw_die_ref die, enum dwarf_attribute attr_kind, dw_loc_list_ref
+ {
+   dw_attr_node attr;
+ 
+-  if (XCOFF_DEBUGGING_INFO && !HAVE_XCOFF_DWARF_EXTRAS)
+-    return;
+-
+   attr.dw_attr = attr_kind;
+   attr.dw_attr_val.val_class = dw_val_class_loc_list;
+   attr.dw_attr_val.val_entry = NULL;
+@@ -5008,9 +4990,6 @@ add_AT_view_list (dw_die_ref die, enum dwarf_attribute attr_kind)
+ {
+   dw_attr_node attr;
+ 
+-  if (XCOFF_DEBUGGING_INFO && !HAVE_XCOFF_DWARF_EXTRAS)
+-    return;
+-
+   attr.dw_attr = attr_kind;
+   attr.dw_attr_val.val_class = dw_val_class_view_list;
+   attr.dw_attr_val.val_entry = NULL;
+@@ -11167,15 +11146,12 @@ output_dwarf_version ()
+ static void
+ output_compilation_unit_header (enum dwarf_unit_type ut)
+ {
+-  if (!XCOFF_DEBUGGING_INFO)
+-    {
+-      if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4)
+-	dw2_asm_output_data (4, 0xffffffff,
+-	  "Initial length escape value indicating 64-bit DWARF extension");
+-      dw2_asm_output_data (dwarf_offset_size,
+-			   next_die_offset - DWARF_INITIAL_LENGTH_SIZE,
+-			   "Length of Compilation Unit Info");
+-    }
++  if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4)
++    dw2_asm_output_data (4, 0xffffffff,
++      "Initial length escape value indicating 64-bit DWARF extension");
++  dw2_asm_output_data (dwarf_offset_size,
++		       next_die_offset - DWARF_INITIAL_LENGTH_SIZE,
++		       "Length of Compilation Unit Info");
+ 
+   output_dwarf_version ();
+   if (dwarf_version >= 5)
+@@ -11684,14 +11660,11 @@ output_pubnames (vec *names)
+   unsigned long pubnames_length = size_of_pubnames (names);
+   pubname_entry *pub;
+ 
+-  if (!XCOFF_DEBUGGING_INFO)
+-    {
+-      if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4)
+-	dw2_asm_output_data (4, 0xffffffff,
+-	  "Initial length escape value indicating 64-bit DWARF extension");
+-      dw2_asm_output_data (dwarf_offset_size, pubnames_length,
+-			   "Pub Info Length");
+-    }
++  if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4)
++    dw2_asm_output_data (4, 0xffffffff,
++			 "Initial length escape value indicating 64-bit DWARF extension");
++  dw2_asm_output_data (dwarf_offset_size, pubnames_length,
++		       "Pub Info Length");
+ 
+   /* Version number for pubnames/pubtypes is independent of dwarf version.  */
+   dw2_asm_output_data (2, 2, "DWARF pubnames/pubtypes version");
+@@ -11766,14 +11739,11 @@ output_aranges (void)
+   unsigned i;
+   unsigned long aranges_length = size_of_aranges ();
+   
+-  if (!XCOFF_DEBUGGING_INFO)
+-    {
+-      if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4)
+-	dw2_asm_output_data (4, 0xffffffff,
+-	  "Initial length escape value indicating 64-bit DWARF extension");
+-      dw2_asm_output_data (dwarf_offset_size, aranges_length,
+-			   "Length of Address Ranges Info");
+-    }
++  if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4)
++    dw2_asm_output_data (4, 0xffffffff,
++			 "Initial length escape value indicating 64-bit DWARF extension");
++  dw2_asm_output_data (dwarf_offset_size, aranges_length,
++		       "Length of Address Ranges Info");
+ 
+   /* Version number for aranges is still 2, even up to DWARF5.  */
+   dw2_asm_output_data (2, 2, "DWARF aranges version");
+@@ -13067,14 +13037,11 @@ output_line_info (bool prologue_only)
+   ASM_GENERATE_INTERNAL_LABEL (p2, LN_PROLOG_END_LABEL,
+ 			       output_line_info_generation++);
+ 
+-  if (!XCOFF_DEBUGGING_INFO)
+-    {
+-      if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4)
+-	dw2_asm_output_data (4, 0xffffffff,
+-	  "Initial length escape value indicating 64-bit DWARF extension");
+-      dw2_asm_output_delta (dwarf_offset_size, l2, l1,
+-			    "Length of Source Line Info");
+-    }
++  if (DWARF_INITIAL_LENGTH_SIZE - dwarf_offset_size == 4)
++    dw2_asm_output_data (4, 0xffffffff,
++			 "Initial length escape value indicating 64-bit DWARF extension");
++  dw2_asm_output_delta (dwarf_offset_size, l2, l1,
++			"Length of Source Line Info");
+ 
+   ASM_OUTPUT_LABEL (asm_out_file, l1);
+ 
+@@ -29144,8 +29111,6 @@ output_macinfo (const char *debug_line_label, bool early_lto_debug)
+   /* AIX Assembler inserts the length, so adjust the reference to match the
+      offset expected by debuggers.  */
+   strcpy (dl_section_ref, debug_line_label);
+-  if (XCOFF_DEBUGGING_INFO)
+-    strcat (dl_section_ref, DWARF_INITIAL_LENGTH_SIZE_STR);
+ 
+   /* For .debug_macro emit the section header.  */
+   if (!dwarf_strict || dwarf_version >= 5)
+@@ -32350,8 +32315,6 @@ dwarf2out_finish (const char *filename)
+   /* AIX Assembler inserts the length, so adjust the reference to match the
+      offset expected by debuggers.  */
+   strcpy (dl_section_ref, debug_line_section_label);
+-  if (XCOFF_DEBUGGING_INFO)
+-    strcat (dl_section_ref, DWARF_INITIAL_LENGTH_SIZE_STR);
+ 
+   if (debug_info_level >= DINFO_LEVEL_TERSE)
+     add_AT_lineptr (main_comp_unit_die, DW_AT_stmt_list,
+@@ -33067,8 +33030,6 @@ dwarf2out_early_finish (const char *filename)
+   /* AIX Assembler inserts the length, so adjust the reference to match the
+      offset expected by debuggers.  */
+   strcpy (dl_section_ref, debug_line_section_label);
+-  if (XCOFF_DEBUGGING_INFO)
+-    strcat (dl_section_ref, DWARF_INITIAL_LENGTH_SIZE_STR);
+ 
+   if (debug_info_level >= DINFO_LEVEL_TERSE)
+     add_AT_lineptr (comp_unit_die (), DW_AT_stmt_list, dl_section_ref);
+diff --git a/gcc/final.cc b/gcc/final.cc
+index af4e529bb..822b2db43 100644
+--- a/gcc/final.cc
++++ b/gcc/final.cc
+@@ -84,16 +84,8 @@ along with GCC; see the file COPYING3.  If not see
+ #include "insn-codes.h"
+ #include "common/common-target.h"
+ 
+-#ifdef XCOFF_DEBUGGING_INFO
+-#include "xcoffout.h"		/* Needed for external data declarations.  */
+-#endif
+-
+ #include "dwarf2out.h"
+ 
+-#ifdef DBX_DEBUGGING_INFO
+-#include "dbxout.h"
+-#endif
+-
+ /* Most ports don't need to define CC_STATUS_INIT.
+    So define a null default for it to save conditionalization later.  */
+ #ifndef CC_STATUS_INIT
+@@ -2326,19 +2318,6 @@ final_scan_insn_1 (rtx_insn *insn, FILE *file, int optimize_p ATTRIBUTE_UNUSED,
+ 	      TREE_ASM_WRITTEN (NOTE_BLOCK (insn)) = 1;
+ 	      BLOCK_IN_COLD_SECTION_P (NOTE_BLOCK (insn)) = in_cold_section_p;
+ 	    }
+-	  if (write_symbols == DBX_DEBUG)
+-	    {
+-	      location_t *locus_ptr
+-		= block_nonartificial_location (NOTE_BLOCK (insn));
+-
+-	      if (locus_ptr != NULL)
+-		{
+-		  override_filename = LOCATION_FILE (*locus_ptr);
+-		  override_linenum = LOCATION_LINE (*locus_ptr);
+-		  override_columnnum = LOCATION_COLUMN (*locus_ptr);
+-		  override_discriminator = compute_discriminator (*locus_ptr);
+-		}
+-	    }
+ 	  break;
+ 
+ 	case NOTE_INSN_BLOCK_END:
+@@ -2361,27 +2340,6 @@ final_scan_insn_1 (rtx_insn *insn, FILE *file, int optimize_p ATTRIBUTE_UNUSED,
+ 	      gcc_assert (BLOCK_IN_COLD_SECTION_P (NOTE_BLOCK (insn))
+ 			  == in_cold_section_p);
+ 	    }
+-	  if (write_symbols == DBX_DEBUG)
+-	    {
+-	      tree outer_block = BLOCK_SUPERCONTEXT (NOTE_BLOCK (insn));
+-	      location_t *locus_ptr
+-		= block_nonartificial_location (outer_block);
+-
+-	      if (locus_ptr != NULL)
+-		{
+-		  override_filename = LOCATION_FILE (*locus_ptr);
+-		  override_linenum = LOCATION_LINE (*locus_ptr);
+-		  override_columnnum = LOCATION_COLUMN (*locus_ptr);
+-		  override_discriminator = compute_discriminator (*locus_ptr);
+-		}
+-	      else
+-		{
+-		  override_filename = NULL;
+-		  override_linenum = 0;
+-		  override_columnnum = 0;
+-		  override_discriminator = 0;
+-		}
+-	    }
+ 	  break;
+ 
+ 	case NOTE_INSN_DELETED_LABEL:
+@@ -4708,8 +4666,6 @@ rest_of_handle_final (void)
+   if (! quiet_flag)
+     fflush (asm_out_file);
+ 
+-  /* Write DBX symbols if requested.  */
+-
+   /* Note that for those inline functions where we don't initially
+      know for certain that we will be generating an out-of-line copy,
+      the first invocation of this routine (rest_of_compilation) will
+diff --git a/gcc/flag-types.h b/gcc/flag-types.h
+index 64c64eb32..e6121d75e 100644
+--- a/gcc/flag-types.h
++++ b/gcc/flag-types.h
+@@ -24,24 +24,18 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ enum debug_info_type
+ {
+-  DINFO_TYPE_NONE = 0,		  /* No debug info.  */
+-  DINFO_TYPE_DBX = 1,		  /* BSD .stabs for DBX.  */
+-  DINFO_TYPE_DWARF2 = 2,	  /* Dwarf v2 debug info.  */
+-  DINFO_TYPE_XCOFF = 3,		  /* IBM/Xcoff debug info.  */
+-  DINFO_TYPE_VMS = 4,		  /* VMS debug info.  */
+-  DINFO_TYPE_CTF = 5,		  /* CTF debug info.  */
+-  DINFO_TYPE_BTF = 6,		  /* BTF debug info.  */
+-  DINFO_TYPE_BTF_WITH_CORE = 7,	  /* BTF debug info with CO-RE relocations.  */
++  DINFO_TYPE_NONE,		  /* No debug info.  */
++  DINFO_TYPE_DWARF2,		  /* Dwarf v2 debug info.  */
++  DINFO_TYPE_VMS,		  /* VMS debug info.  */
++  DINFO_TYPE_CTF,		  /* CTF debug info.  */
++  DINFO_TYPE_BTF,		  /* BTF debug info.  */
++  DINFO_TYPE_BTF_WITH_CORE,	  /* BTF debug info with CO-RE relocations.  */
+   DINFO_TYPE_MAX = DINFO_TYPE_BTF_WITH_CORE /* Marker only.  */
+ };
+ 
+ #define NO_DEBUG      (0U)
+-/* Write DBX debug info (using dbxout.cc).  */
+-#define DBX_DEBUG     (1U << DINFO_TYPE_DBX)
+ /* Write DWARF2 debug info (using dwarf2out.cc).  */
+ #define DWARF2_DEBUG  (1U << DINFO_TYPE_DWARF2)
+-/* Write IBM/XCOFF debug info (using dbxout.cc).  */
+-#define XCOFF_DEBUG   (1U << DINFO_TYPE_XCOFF)
+ /* Write VMS debug info (using vmsdbgout.cc).  */
+ #define VMS_DEBUG     (1U << DINFO_TYPE_VMS)
+ /* Write CTF debug info (using ctfout.cc).  */
+diff --git a/gcc/function.cc b/gcc/function.cc
+index d84a3240e..49c7ccf4b 100644
+--- a/gcc/function.cc
++++ b/gcc/function.cc
+@@ -4628,14 +4628,6 @@ number_blocks (tree fn)
+   int n_blocks;
+   tree *block_vector;
+ 
+-  /* For XCOFF debugging output, we start numbering the blocks
+-     from 1 within each function, rather than keeping a running
+-     count.  */
+-#if defined (XCOFF_DEBUGGING_INFO)
+-  if (write_symbols == XCOFF_DEBUG)
+-    next_block_index = 1;
+-#endif
+-
+   block_vector = get_block_vector (DECL_INITIAL (fn), &n_blocks);
+ 
+   /* The top-level BLOCK isn't numbered at all.  */
+diff --git a/gcc/gcc.cc b/gcc/gcc.cc
+index b0d03430e..32e45adc2 100644
+--- a/gcc/gcc.cc
++++ b/gcc/gcc.cc
+@@ -927,26 +927,11 @@ proper position among the other output files.  */
+ # else
+ #  define ASM_DEBUG_DWARF_OPTION "--gdwarf2"
+ # endif
+-# if defined(DBX_DEBUGGING_INFO) && defined(DWARF2_DEBUGGING_INFO) \
+-     && defined(HAVE_AS_GDWARF2_DEBUG_FLAG) && defined(HAVE_AS_GSTABS_DEBUG_FLAG)
+-#  define ASM_DEBUG_SPEC						\
+-      (PREFERRED_DEBUGGING_TYPE == DBX_DEBUG				\
+-       ? "%{%:debug-level-gt(0):"					\
+-	 "%{gdwarf*:" ASM_DEBUG_DWARF_OPTION "};"			\
+-	 ":%{g*:--gstabs}}" ASM_MAP					\
+-       : "%{%:debug-level-gt(0):"					\
+-	 "%{gstabs*:--gstabs;"						\
+-	 ":%{g*:" ASM_DEBUG_DWARF_OPTION "}}}" ASM_MAP)
+-# else
+-#  if defined(DBX_DEBUGGING_INFO) && defined(HAVE_AS_GSTABS_DEBUG_FLAG)
+-#   define ASM_DEBUG_SPEC "%{g*:%{%:debug-level-gt(0):--gstabs}}" ASM_MAP
+-#  endif
+ #  if defined(DWARF2_DEBUGGING_INFO) && defined(HAVE_AS_GDWARF2_DEBUG_FLAG)
+ #   define ASM_DEBUG_SPEC "%{g*:%{%:debug-level-gt(0):" \
+ 	ASM_DEBUG_DWARF_OPTION "}}" ASM_MAP
+ #  endif
+ # endif
+-#endif
+ #ifndef ASM_DEBUG_SPEC
+ # define ASM_DEBUG_SPEC ""
+ #endif
+@@ -960,14 +945,7 @@ proper position among the other output files.  */
+ 	"%:dwarf-version-gt(3):--gdwarf-4 ;"				\
+ 	"%:dwarf-version-gt(2):--gdwarf-3 ;"				\
+ 	":--gdwarf2 }"
+-#  if defined(DBX_DEBUGGING_INFO) && defined(DWARF2_DEBUGGING_INFO)
+-#  define ASM_DEBUG_OPTION_SPEC						\
+-      (PREFERRED_DEBUGGING_TYPE == DBX_DEBUG				\
+-       ? "%{%:debug-level-gt(0):"					\
+-	 "%{gdwarf*:" ASM_DEBUG_OPTION_DWARF_OPT "}}" 			\
+-       : "%{%:debug-level-gt(0):"					\
+-	 "%{!gstabs*:%{g*:" ASM_DEBUG_OPTION_DWARF_OPT "}}}")
+-# elif defined(DWARF2_DEBUGGING_INFO)
++# if defined(DWARF2_DEBUGGING_INFO)
+ #   define ASM_DEBUG_OPTION_SPEC "%{g*:%{%:debug-level-gt(0):" \
+ 	ASM_DEBUG_OPTION_DWARF_OPT "}}"
+ #  endif
+diff --git a/gcc/go/go-lang.cc b/gcc/go/go-lang.cc
+index c8365d259..faaf52341 100644
+--- a/gcc/go/go-lang.cc
++++ b/gcc/go/go-lang.cc
+@@ -507,8 +507,7 @@ go_langhook_pushdecl (tree decl ATTRIBUTE_UNUSED)
+ }
+ 
+ /* This hook is used to get the current list of declarations as trees.
+-   We don't support that; instead we use the write_globals hook.  This
+-   can't simply crash because it is called by -gstabs.  */
++   We don't support that; instead we use the write_globals hook.  */
+ 
+ static tree
+ go_langhook_getdecls (void)
+diff --git a/gcc/go/gospec.cc b/gcc/go/gospec.cc
+index df92b62d8..1e5140768 100644
+--- a/gcc/go/gospec.cc
++++ b/gcc/go/gospec.cc
+@@ -215,11 +215,7 @@ lang_specific_driver (struct cl_decoded_option **in_decoded_options,
+ 	case OPT_gdwarf:
+ 	case OPT_gdwarf_:
+ 	case OPT_ggdb:
+-	case OPT_gstabs:
+-	case OPT_gstabs_:
+ 	case OPT_gvms:
+-	case OPT_gxcoff:
+-	case OPT_gxcoff_:
+ 	  saw_opt_g = true;
+ 	  break;
+ 
+diff --git a/gcc/gstab.h b/gcc/gstab.h
+deleted file mode 100644
+index c597d1200..000000000
+--- a/gcc/gstab.h
++++ /dev/null
+@@ -1,35 +0,0 @@
+-/* Copyright (C) 1997-2022 Free Software Foundation, Inc.
+-
+-This file is part of GCC.
+-
+-GCC is free software; you can redistribute it and/or modify it under
+-the terms of the GNU General Public License as published by the Free
+-Software Foundation; either version 3, or (at your option) any later
+-version.
+-
+-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+-WARRANTY; without even the implied warranty of MERCHANTABILITY or
+-FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+-for more details.
+-
+-You should have received a copy of the GNU General Public License
+-along with GCC; see the file COPYING3.  If not see
+-.  */
+-
+-#ifndef GCC_GSTAB_H
+-#define GCC_GSTAB_H
+-
+-#define __define_stab(NAME, CODE, STRING) NAME=CODE,
+-
+-enum
+-{
+-#include "stab.def"
+-LAST_UNUSED_STAB_CODE
+-};
+-
+-/* stabs debug codes really are integers with expressive names.  */
+-typedef int stab_code_type;
+-
+-#undef __define_stab
+-
+-#endif /* ! GCC_GSTAB_H */
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 4b4925331..e34e5ee8e 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -53,7 +53,7 @@ const char *const debug_type_names[] =
+ 
+ static uint32_t debug_type_masks[] =
+ {
+-  NO_DEBUG, DBX_DEBUG, DWARF2_DEBUG, XCOFF_DEBUG, VMS_DEBUG,
++  NO_DEBUG, DWARF2_DEBUG, VMS_DEBUG,
+   CTF_DEBUG, BTF_DEBUG
+ };
+ 
+@@ -3186,22 +3186,10 @@ common_handle_option (struct gcc_options *opts,
+       set_debug_level (NO_DEBUG, 2, arg, opts, opts_set, loc);
+       break;
+ 
+-    case OPT_gstabs:
+-    case OPT_gstabs_:
+-      set_debug_level (DBX_DEBUG, code == OPT_gstabs_, arg, opts, opts_set,
+-		       loc);
+-      break;
+-
+     case OPT_gvms:
+       set_debug_level (VMS_DEBUG, false, arg, opts, opts_set, loc);
+       break;
+ 
+-    case OPT_gxcoff:
+-    case OPT_gxcoff_:
+-      set_debug_level (XCOFF_DEBUG, code == OPT_gxcoff_, arg, opts, opts_set,
+-		       loc);
+-      break;
+-
+     case OPT_gz:
+     case OPT_gz_:
+       /* Handled completely via specs.  */
+@@ -3503,8 +3491,6 @@ set_debug_level (uint32_t dinfo, int extended, const char *arg,
+ 		 struct gcc_options *opts, struct gcc_options *opts_set,
+ 		 location_t loc)
+ {
+-  opts->x_use_gnu_debug_info_extensions = extended;
+-
+   if (dinfo == NO_DEBUG)
+     {
+       if (opts->x_write_symbols == NO_DEBUG)
+@@ -3518,8 +3504,6 @@ set_debug_level (uint32_t dinfo, int extended, const char *arg,
+ 		opts->x_write_symbols |= DWARF2_DEBUG;
+ 	      else
+ 		opts->x_write_symbols = DWARF2_DEBUG;
+-#elif defined DBX_DEBUGGING_INFO
+-	      opts->x_write_symbols = DBX_DEBUG;
+ #endif
+ 	    }
+ 
+diff --git a/gcc/stab.def b/gcc/stab.def
+deleted file mode 100644
+index e5af19b2b..000000000
+--- a/gcc/stab.def
++++ /dev/null
+@@ -1,239 +0,0 @@
+-/* Table of DBX symbol codes for the GNU system.
+-   Copyright (C) 1988-2022 Free Software Foundation, Inc.
+-   This file is part of the GNU C Library.
+-
+-   The GNU C Library is free software; you can redistribute it and/or
+-   modify it under the terms of the GNU Library General Public License as
+-   published by the Free Software Foundation; either version 3 of the
+-   License, or (at your option) any later version.
+-
+-   The GNU C Library is distributed in the hope that it will be useful,
+-   but WITHOUT ANY WARRANTY; without even the implied warranty of
+-   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+-   Library General Public License for more details.
+-
+-   You should have received a copy of the GNU Library General Public
+-   License along with the GNU C Library; see the file COPYING3.  If
+-   not see .  */
+-
+-/* This contains contribution from Cygnus Support.  */
+-
+-/* Global variable.  Only the name is significant.
+-   To find the address, look in the corresponding external symbol.  */
+-__define_stab (N_GSYM, 0x20, "GSYM")
+-
+-/* Function name for BSD Fortran.  Only the name is significant.
+-   To find the address, look in the corresponding external symbol.  */
+-__define_stab (N_FNAME, 0x22, "FNAME")
+-
+-/* Function name or text-segment variable for C.  Value is its address.
+-   Desc is supposedly starting line number, but GCC doesn't set it
+-   and DBX seems not to miss it.  */
+-__define_stab (N_FUN, 0x24, "FUN")
+-
+-/* Data-segment variable with internal linkage.  Value is its address.
+-   "Static Sym".  */
+-__define_stab (N_STSYM, 0x26, "STSYM")
+-
+-/* BSS-segment variable with internal linkage.  Value is its address.  */
+-__define_stab (N_LCSYM, 0x28, "LCSYM")
+-
+-/* Begin function marker.  */
+-__define_stab (N_BNSYM, 0x2e, "BNSYM")
+-
+-/* End function marker.  */
+-__define_stab (N_ENSYM, 0x4e, "ENSYM")
+-
+-/* Name of main routine.  Only the name is significant.
+-   This is not used in C.  */
+-__define_stab (N_MAIN, 0x2a, "MAIN")
+-
+-/* Global symbol in Pascal.
+-   Supposedly the value is its line number; I'm skeptical.  */
+-__define_stab (N_PC, 0x30, "PC")
+-
+-/* Number of symbols:  0, files,,funcs,lines according to Ultrix V4.0.  */
+-__define_stab (N_NSYMS, 0x32, "NSYMS")
+-
+-/* "No DST map for sym: name, ,0,type,ignored"  according to Ultrix V4.0.  */
+-__define_stab (N_NOMAP, 0x34, "NOMAP")
+-
+-/* New stab from Solaris.  I don't know what it means, but it
+-   don't seem to contain useful information.  */
+-__define_stab (N_OBJ, 0x38, "OBJ")
+-
+-/* New stab from Solaris.  I don't know what it means, but it
+-   don't seem to contain useful information.  Possibly related to the
+-   optimization flags used in this module.  */
+-__define_stab (N_OPT, 0x3c, "OPT")
+-
+-/* Register variable.  Value is number of register.  */
+-__define_stab (N_RSYM, 0x40, "RSYM")
+-
+-/* Modula-2 compilation unit.  Can someone say what info it contains?  */
+-__define_stab (N_M2C, 0x42, "M2C")
+-
+-/* Line number in text segment.  Desc is the line number;
+-   value is corresponding address.  */
+-__define_stab (N_SLINE, 0x44, "SLINE")
+-
+-/* Similar, for data segment.  */
+-__define_stab (N_DSLINE, 0x46, "DSLINE")
+-
+-/* Similar, for bss segment.  */
+-__define_stab (N_BSLINE, 0x48, "BSLINE")
+-
+-/* Sun's source-code browser stabs.  ?? Don't know what the fields are.
+-   Supposedly the field is "path to associated .cb file".  THIS VALUE
+-   OVERLAPS WITH N_BSLINE!  */
+-__define_stab (N_BROWS, 0x48, "BROWS")
+-
+-/* GNU Modula-2 definition module dependency.  Value is the modification time
+-   of the definition file.  Other is nonzero if it is imported with the
+-   GNU M2 keyword %INITIALIZE.  Perhaps N_M2C can be used if there
+-   are enough empty fields? */
+-__define_stab(N_DEFD, 0x4a, "DEFD")
+-
+-/* THE FOLLOWING TWO STAB VALUES CONFLICT.  Happily, one is for Modula-2
+-   and one is for C++.   Still,...  */
+-/* GNU C++ exception variable.  Name is variable name.  */
+-__define_stab (N_EHDECL, 0x50, "EHDECL")
+-/* Modula2 info "for imc":  name,,0,0,0  according to Ultrix V4.0.  */
+-__define_stab (N_MOD2, 0x50, "MOD2")
+-
+-/* GNU C++ `catch' clause.  Value is its address.  Desc is nonzero if
+-   this entry is immediately followed by a CAUGHT stab saying what exception
+-   was caught.  Multiple CAUGHT stabs means that multiple exceptions
+-   can be caught here.  If Desc is 0, it means all exceptions are caught
+-   here.  */
+-__define_stab (N_CATCH, 0x54, "CATCH")
+-
+-/* Structure or union element.  Value is offset in the structure.  */
+-__define_stab (N_SSYM, 0x60, "SSYM")
+-
+-/* Name of main source file.
+-   Value is starting text address of the compilation.  */
+-__define_stab (N_SO, 0x64, "SO")
+-
+-/* Automatic variable in the stack.  Value is offset from frame pointer.
+-   Also used for type descriptions.  */
+-__define_stab (N_LSYM, 0x80, "LSYM")
+-
+-/* Beginning of an include file.  Only Sun uses this.
+-   In an object file, only the name is significant.
+-   The Sun linker puts data into some of the other fields.  */
+-__define_stab (N_BINCL, 0x82, "BINCL")
+-
+-/* Name of sub-source file (#include file).
+-   Value is starting text address of the compilation.  */
+-__define_stab (N_SOL, 0x84, "SOL")
+-
+-/* Parameter variable.  Value is offset from argument pointer.
+-   (On most machines the argument pointer is the same as the frame pointer.  */
+-__define_stab (N_PSYM, 0xa0, "PSYM")
+-
+-/* End of an include file.  No name.
+-   This and N_BINCL act as brackets around the file's output.
+-   In an object file, there is no significant data in this entry.
+-   The Sun linker puts data into some of the fields.  */
+-__define_stab (N_EINCL, 0xa2, "EINCL")
+-
+-/* Alternate entry point.  Value is its address.  */
+-__define_stab (N_ENTRY, 0xa4, "ENTRY")
+-
+-/* Beginning of lexical block.
+-   The desc is the nesting level in lexical blocks.
+-   The value is the address of the start of the text for the block.
+-   The variables declared inside the block *precede* the N_LBRAC symbol.  */
+-__define_stab (N_LBRAC, 0xc0, "LBRAC")
+-
+-/* Place holder for deleted include file.  Replaces a N_BINCL and everything
+-   up to the corresponding N_EINCL.  The Sun linker generates these when
+-   it finds multiple identical copies of the symbols from an include file.
+-   This appears only in output from the Sun linker.  */
+-__define_stab (N_EXCL, 0xc2, "EXCL")
+-
+-/* Modula-2 scope information.  Can someone say what info it contains?  */
+-__define_stab (N_SCOPE, 0xc4, "SCOPE")
+-
+-/* End of a lexical block.  Desc matches the N_LBRAC's desc.
+-   The value is the address of the end of the text for the block.  */
+-__define_stab (N_RBRAC, 0xe0, "RBRAC")
+-
+-/* Begin named common block.  Only the name is significant.  */
+-__define_stab (N_BCOMM, 0xe2, "BCOMM")
+-
+-/* End named common block.  Only the name is significant
+-   (and it should match the N_BCOMM).  */
+-__define_stab (N_ECOMM, 0xe4, "ECOMM")
+-
+-/* End common (local name): value is address.
+-   I'm not sure how this is used.  */
+-__define_stab (N_ECOML, 0xe8, "ECOML")
+-
+-/* These STAB's are used on Gould systems for Non-Base register symbols
+-   or something like that.  FIXME.  I have assigned the values at random
+-   since I don't have a Gould here.  Fixups from Gould folk welcome...  */
+-__define_stab (N_NBTEXT, 0xF0, "NBTEXT")
+-__define_stab (N_NBDATA, 0xF2, "NBDATA")
+-__define_stab (N_NBBSS,  0xF4, "NBBSS")
+-__define_stab (N_NBSTS,  0xF6, "NBSTS")
+-__define_stab (N_NBLCS,  0xF8, "NBLCS")
+-
+-/* Second symbol entry containing a length-value for the preceding entry.
+-   The value is the length.  */
+-__define_stab (N_LENG, 0xfe, "LENG")
+-
+-/* The above information, in matrix format.
+-
+-			STAB MATRIX
+-	_________________________________________________
+-	| 00 - 1F are not dbx stab symbols		|
+-	| In most cases, the low bit is the EXTernal bit|
+-
+-	| 00 UNDEF  | 02 ABS	| 04 TEXT   | 06 DATA	|
+-	| 01  |EXT  | 03  |EXT	| 05  |EXT  | 07  |EXT	|
+-
+-	| 08 BSS    | 0A INDR	| 0C FN_SEQ | 0E   	|
+-	| 09  |EXT  | 0B 	| 0D	    | 0F	|
+-
+-	| 10 	    | 12 COMM	| 14 SETA   | 16 SETT	|
+-	| 11	    | 13	| 15 	    | 17	|
+-
+-	| 18 SETD   | 1A SETB	| 1C SETV   | 1E WARNING|
+-	| 19	    | 1B	| 1D 	    | 1F FN	|
+-
+-	|_______________________________________________|
+-	| Debug entries with bit 01 set are unused.	|
+-	| 20 GSYM   | 22 FNAME	| 24 FUN    | 26 STSYM	|
+-	| 28 LCSYM  | 2A MAIN	| 2C	    | 2E BNSYM	|
+-	| 30 PC	    | 32 NSYMS	| 34 NOMAP  | 36	|
+-	| 38 OBJ    | 3A	| 3C OPT    | 3E	|
+-	| 40 RSYM   | 42 M2C	| 44 SLINE  | 46 DSLINE |
+-	| 48 BSLINE*| 4A DEFD	| 4C        | 4E ENSYM	|
+-	| 50 EHDECL*| 52	| 54 CATCH  | 56        |
+-	| 58        | 5A        | 5C        | 5E	|
+-	| 60 SSYM   | 62	| 64 SO	    | 66 	|
+-	| 68 	    | 6A	| 6C	    | 6E	|
+-	| 70	    | 72	| 74	    | 76	|
+-	| 78	    | 7A	| 7C	    | 7E	|
+-	| 80 LSYM   | 82 BINCL	| 84 SOL    | 86	|
+-	| 88	    | 8A	| 8C	    | 8E	|
+-	| 90	    | 92	| 94	    | 96	|
+-	| 98	    | 9A	| 9C	    | 9E	|
+-	| A0 PSYM   | A2 EINCL	| A4 ENTRY  | A6	|
+-	| A8	    | AA	| AC	    | AE	|
+-	| B0	    | B2	| B4	    | B6	|
+-	| B8	    | BA	| BC	    | BE	|
+-	| C0 LBRAC  | C2 EXCL	| C4 SCOPE  | C6	|
+-	| C8	    | CA	| CC	    | CE	|
+-	| D0	    | D2	| D4	    | D6	|
+-	| D8	    | DA	| DC	    | DE	|
+-	| E0 RBRAC  | E2 BCOMM	| E4 ECOMM  | E6	|
+-	| E8 ECOML  | EA	| EC	    | EE	|
+-	| F0	    | F2	| F4	    | F6	|
+-	| F8	    | FA	| FC	    | FE LENG	|
+-	+-----------------------------------------------+
+- * 50 EHDECL is also MOD2.
+- * 48 BSLINE is also BROWS.
+- */
+diff --git a/gcc/system.h b/gcc/system.h
+index e10c34f70..2f56411c0 100644
+--- a/gcc/system.h
++++ b/gcc/system.h
+@@ -1005,8 +1005,7 @@ extern void fancy_abort (const char *, int, const char *)
+ 	ASM_OUTPUT_DEFINE_LABEL_DIFFERENCE_SYMBOL HOST_WORDS_BIG_ENDIAN	   \
+ 	OBJC_PROLOGUE ALLOCATE_TRAMPOLINE HANDLE_PRAGMA ROUND_TYPE_SIZE	   \
+ 	ROUND_TYPE_SIZE_UNIT CONST_SECTION_ASM_OP CRT_GET_RFIB_TEXT	   \
+-	DBX_LBRAC_FIRST DBX_OUTPUT_ENUM DBX_OUTPUT_SOURCE_FILENAME	   \
+-	DBX_WORKING_DIRECTORY INSN_CACHE_DEPTH INSN_CACHE_SIZE		   \
++	INSN_CACHE_DEPTH INSN_CACHE_SIZE				   \
+ 	INSN_CACHE_LINE_WIDTH INIT_SECTION_PREAMBLE NEED_ATEXIT ON_EXIT	   \
+ 	EXIT_BODY OBJECT_FORMAT_ROSE MULTIBYTE_CHARS MAP_CHARACTER	   \
+ 	LIBGCC_NEEDS_DOUBLE FINAL_PRESCAN_LABEL DEFAULT_CALLER_SAVES	   \
+@@ -1019,15 +1018,14 @@ extern void fancy_abort (const char *, int, const char *)
+ 	MAX_WCHAR_TYPE_SIZE SHARED_SECTION_ASM_OP INTEGRATE_THRESHOLD      \
+ 	FINAL_REG_PARM_STACK_SPACE MAYBE_REG_PARM_STACK_SPACE		   \
+ 	TRADITIONAL_PIPELINE_INTERFACE DFA_PIPELINE_INTERFACE		   \
+-	DBX_OUTPUT_STANDARD_TYPES BUILTIN_SETJMP_FRAME_VALUE		   \
++	BUILTIN_SETJMP_FRAME_VALUE					   \
+ 	SUNOS4_SHARED_LIBRARIES PROMOTE_FOR_CALL_ONLY			   \
+ 	SPACE_AFTER_L_OPTION NO_RECURSIVE_FUNCTION_CSE			   \
+ 	DEFAULT_MAIN_RETURN TARGET_MEM_FUNCTIONS EXPAND_BUILTIN_VA_ARG	   \
+ 	COLLECT_PARSE_FLAG DWARF2_GENERATE_TEXT_SECTION_LABEL WINNING_GDB  \
+ 	ASM_OUTPUT_FILENAME ASM_OUTPUT_SOURCE_LINE FILE_NAME_JOINER	   \
+-	GDB_INV_REF_REGPARM_STABS_LETTER DBX_MEMPARM_STABS_LETTER	   \
+-	PUT_SDB_SRC_FILE STABS_GCC_MARKER DBX_OUTPUT_FUNCTION_END	   \
+-	DBX_OUTPUT_GCC_MARKER DBX_FINISH_SYMBOL SDB_GENERATE_FAKE	   \
++	GDB_INV_REF_REGPARM_STABS_LETTER				   \
++	PUT_SDB_SRC_FILE STABS_GCC_MARKER SDB_GENERATE_FAKE		   \
+ 	NON_SAVING_SETJMP TARGET_LATE_RTL_PROLOGUE_EPILOGUE		   \
+ 	CASE_DROPS_THROUGH TARGET_BELL TARGET_BS TARGET_CR TARGET_DIGIT0   \
+         TARGET_ESC TARGET_FF TARGET_NEWLINE TARGET_TAB TARGET_VT	   \
+@@ -1052,8 +1050,8 @@ extern void fancy_abort (const char *, int, const char *)
+ 	PREFERRED_OUTPUT_RELOAD_CLASS SYSTEM_INCLUDE_DIR		   \
+ 	STANDARD_INCLUDE_DIR STANDARD_INCLUDE_COMPONENT			   \
+ 	LINK_ELIMINATE_DUPLICATE_LDIRECTORIES MIPS_DEBUGGING_INFO	   \
+-	IDENT_ASM_OP ALL_COP_ADDITIONAL_REGISTER_NAMES DBX_OUTPUT_LBRAC	   \
+-	DBX_OUTPUT_NFUN DBX_OUTPUT_RBRAC RANGE_TEST_NON_SHORT_CIRCUIT	   \
++	IDENT_ASM_OP ALL_COP_ADDITIONAL_REGISTER_NAMES			   \
++	RANGE_TEST_NON_SHORT_CIRCUIT					   \
+ 	REAL_VALUE_TRUNCATE REVERSE_CONDEXEC_PREDICATES_P		   \
+ 	TARGET_ALIGN_ANON_BITFIELDS TARGET_NARROW_VOLATILE_BITFIELDS	   \
+ 	IDENT_ASM_OP UNALIGNED_SHORT_ASM_OP UNALIGNED_INT_ASM_OP	   \
+diff --git a/gcc/target-def.h b/gcc/target-def.h
+index 1c4aa2963..f81f8fe3b 100644
+--- a/gcc/target-def.h
++++ b/gcc/target-def.h
+@@ -62,8 +62,6 @@
+ # else
+ #  ifdef TARGET_ASM_NAMED_SECTION
+ #   define TARGET_ASM_CONSTRUCTOR default_named_section_asm_out_constructor
+-#  else
+-#   define TARGET_ASM_CONSTRUCTOR default_stabs_asm_out_constructor
+ #  endif
+ # endif
+ #endif
+@@ -74,8 +72,6 @@
+ # else
+ #  ifdef TARGET_ASM_NAMED_SECTION
+ #   define TARGET_ASM_DESTRUCTOR default_named_section_asm_out_destructor
+-#  else
+-#   define TARGET_ASM_DESTRUCTOR default_stabs_asm_out_destructor
+ #  endif
+ # endif
+ #endif
+diff --git a/gcc/testsuite/g++.dg/cpp0x/alias-decl-debug-0.C b/gcc/testsuite/g++.dg/cpp0x/alias-decl-debug-0.C
+deleted file mode 100644
+index 8464aa35a..000000000
+--- a/gcc/testsuite/g++.dg/cpp0x/alias-decl-debug-0.C
++++ /dev/null
+@@ -1,12 +0,0 @@
+-// Origin: PR c++/51032
+-// { dg-do compile { target { c++11 && stabs } } }
+-// { dg-options "-gstabs+" }
+-
+-template 
+-struct A {
+-    template using B = U*;
+-    int a;
+-};
+-
+-A a;
+-
+diff --git a/gcc/testsuite/g++.dg/other/PR23205.C b/gcc/testsuite/g++.dg/other/PR23205.C
+deleted file mode 100644
+index 65ba1f6f3..000000000
+--- a/gcc/testsuite/g++.dg/other/PR23205.C
++++ /dev/null
+@@ -1,17 +0,0 @@
+-/* { dg-do compile { target stabs } } */
+-/* { dg-options "-gstabs+ -fno-eliminate-unused-debug-types" } */
+-
+-const int foobar = 4;
+-int foo ()
+-{
+-        return foobar + 1;
+-}
+-
+-int main()
+-{
+-        int i;
+-        i = foo();
+-        return i;
+-}
+-
+-/* { dg-final { scan-assembler ".stabs.*foobar:(c=i|S)" } } */
+diff --git a/gcc/testsuite/g++.dg/other/pr23205-2.C b/gcc/testsuite/g++.dg/other/pr23205-2.C
+deleted file mode 100644
+index 7b25c071a..000000000
+--- a/gcc/testsuite/g++.dg/other/pr23205-2.C
++++ /dev/null
+@@ -1,17 +0,0 @@
+-/* { dg-do compile { target stabs } } */
+-/* { dg-options "-gstabs+ -fno-eliminate-unused-debug-types -ftoplevel-reorder" } */
+-
+-const int foobar = 4;
+-int foo ()
+-{
+-        return foobar + 1;
+-}
+-
+-int main()
+-{
+-        int i;
+-        i = foo();
+-        return i;
+-}
+-
+-/* { dg-final { scan-assembler ".stabs.*foobar:c=i" } } */
+diff --git a/gcc/testsuite/gcc.dg/20040813-1.c b/gcc/testsuite/gcc.dg/20040813-1.c
+deleted file mode 100644
+index 9cf664dd7..000000000
+--- a/gcc/testsuite/gcc.dg/20040813-1.c
++++ /dev/null
+@@ -1,14 +0,0 @@
+-/* Test lang in N_SO stab.  */
+-/* Contributed by Devang Patel    */
+-
+-/* { dg-do compile { target stabs } } */
+-/* { dg-options "-gstabs" } */
+-
+-int
+-main ()
+-{
+-  return 0;
+-}
+-
+-/* { dg-final { scan-assembler ".stabs.*100,0,2" } } */
+-
+diff --git a/gcc/testsuite/gcc.dg/darwin-20040809-2.c b/gcc/testsuite/gcc.dg/darwin-20040809-2.c
+deleted file mode 100644
+index 98d571276..000000000
+--- a/gcc/testsuite/gcc.dg/darwin-20040809-2.c
++++ /dev/null
+@@ -1,15 +0,0 @@
+-/* Test dead code strip support.  */
+-/* Contributed by Devang Patel    */
+-
+-/* { dg-do compile { target { *-*-darwin* && stabs } } } */
+-/* { dg-options "-gstabs+ -fno-eliminate-unused-debug-symbols" } */
+-
+-int
+-main ()
+-{
+-  return 0;
+-}
+-
+-/* { dg-final { scan-assembler ".stabd.46,0,0" } } */
+-/* { dg-final { scan-assembler ".stabd.78,0,0" } } */
+-
+diff --git a/gcc/testsuite/gcc.dg/debug/pr35154.c b/gcc/testsuite/gcc.dg/debug/pr35154.c
+deleted file mode 100644
+index 08eefaf66..000000000
+--- a/gcc/testsuite/gcc.dg/debug/pr35154.c
++++ /dev/null
+@@ -1,35 +0,0 @@
+-/* Test to make sure that stabs for C symbols that go into .comm have the
+-   proper structure.  These should be lettered G for the struct that gives
+-   the name to the .comm, and should be V or S for .lcomm symbols.  */
+-
+-__attribute__ ((used))
+-static char i_outer;
+-struct {
+-   char f1;
+-   char f2;
+-} opta;
+-struct {
+-   char f1;
+-   char f2;
+-} optb;
+-
+-int
+-main()
+-{
+-   static char i_inner[2] __attribute__ ((used));
+-   i_inner[0] = 'a'; i_inner[1] = 'b';
+-   opta.f1 = 'c';
+-   opta.f2 = 'd';
+-   optb.f1 = 'C';
+-   optb.f2 = 'D';
+-   i_outer = 'e';
+-/* { dg-do compile } */
+-/* { dg-skip-if "No stabs" { mmix-*-* alpha*-*-* hppa*64*-*-* ia64-*-* *-*-vxworks* } } */
+-/* { dg-skip-if "stabs only" { *-*-* } { "*" } { "-gstabs" } } */
+-   return 0;
+-}
+-
+-/* { dg-final { scan-assembler ".stabs.*i_inner:V" } } */
+-/* { dg-final { scan-assembler ".stabs.*i_outer:S" } } */
+-/* { dg-final { scan-assembler ".stabs.*opta:G" } } */
+-/* { dg-final { scan-assembler ".stabs.*optb:G" } } */
+diff --git a/gcc/testsuite/gcc.dg/pr69471-2.c b/gcc/testsuite/gcc.dg/pr69471-2.c
+deleted file mode 100644
+index d5799604b..000000000
+--- a/gcc/testsuite/gcc.dg/pr69471-2.c
++++ /dev/null
+@@ -1,8 +0,0 @@
+-/* { dg-do compile } */
+-/* { dg-options "-gstabs2 -gdwarf-4 -gstabs3" } */
+-/* { dg-error "conflicts with prior selectio" "" { target *-*-* } 0 } */
+-
+-void
+-foo (void)
+-{
+-}
+diff --git a/gcc/testsuite/gcc.target/powerpc/stabs-attrib-vect-darwin.c b/gcc/testsuite/gcc.target/powerpc/stabs-attrib-vect-darwin.c
+deleted file mode 100644
+index 5c7acf18a..000000000
+--- a/gcc/testsuite/gcc.target/powerpc/stabs-attrib-vect-darwin.c
++++ /dev/null
+@@ -1,12 +0,0 @@
+-/* Test Attribute Vector associated with vector type stabs.  */
+-/* { dg-do compile { target powerpc*-*-darwin* } } */
+-/* { dg-require-effective-target stabs } */
+-/* { dg-options "-gstabs+ -fno-eliminate-unused-debug-types -faltivec" } */
+-
+-int main ()
+-{
+-  vector int vi = { 6,7,8,9 };
+-  return 0;
+-}
+-
+-/* { dg-final { scan-assembler ".stabs.*vi\:\\(0,\[0-9\]+\\)=\@V" } } */
+diff --git a/gcc/testsuite/gcc.target/s390/20041216-1.c b/gcc/testsuite/gcc.target/s390/20041216-1.c
+deleted file mode 100644
+index 492ee6c18..000000000
+--- a/gcc/testsuite/gcc.target/s390/20041216-1.c
++++ /dev/null
+@@ -1,23 +0,0 @@
+-/* This test case would get an unresolved symbol during link
+-   because stabs referred to an optimized-away literal pool
+-   entry.  */
+-
+-/* { dg-do run } */
+-/* { dg-options "-O2 -fno-omit-frame-pointer -gstabs" } */
+-
+-int main (void)
+-{
+-  static char buf[4096];
+-  char *p;
+-
+-  do
+-    {
+-      p = buf;
+-      asm volatile ("" : : : "memory", "0", "1", "2", "3", "4", "5", "6",
+-				       "7", "8", "9", "10", "12");
+-    }
+-  while (*p);
+-
+-  return 0;
+-}
+-
+diff --git a/gcc/testsuite/gfortran.dg/debug/pr35154-stabs.f b/gcc/testsuite/gfortran.dg/debug/pr35154-stabs.f
+deleted file mode 100644
+index 5e5c42e9b..000000000
+--- a/gcc/testsuite/gfortran.dg/debug/pr35154-stabs.f
++++ /dev/null
+@@ -1,35 +0,0 @@
+-C     Test program for common block debugging.  G. Helffrich 11 July 2004.
+-C { dg-do compile }
+-C { dg-skip-if "No stabs" { aarch64*-*-* mmix-*-* alpha*-*-* hppa*64*-*-* ia64-*-* *-*-vxworks* } }
+-C { dg-skip-if "No stabs" { *-*-* } { "*" } { "-gstabs" } }
+-      common i,j
+-      common /label/l,m
+-      i = 1
+-      j = 2
+-      k = 3
+-      l = 4
+-      m = 5
+-      call sub
+-      end
+-      subroutine sub
+-      common /label/l,m
+-      logical first
+-      save n
+-      data first /.true./
+-      if (first) then
+-         n = 0
+-	 first = .false.
+-      endif
+-      n = n + 1
+-      l = l + 1
+-      return
+-      end
+-
+-C { dg-final { scan-assembler ".stabs.*\"__BLNK__\",226" } }
+-C { dg-final { scan-assembler ".stabs.*\"i:V.*\",.*,0" } }
+-C { dg-final { scan-assembler ".stabs.*\"j:V.*\",.*,4" } }
+-C { dg-final { scan-assembler ".stabs.*\"__BLNK__\",228" } }
+-C { dg-final { scan-assembler ".stabs.*\"label_\",226" } }
+-C { dg-final { scan-assembler ".stabs.*\"l:V.*\",.*,0" } }
+-C { dg-final { scan-assembler ".stabs.*\"m:V.*\",.*,4" } }
+-C { dg-final { scan-assembler ".stabs.*\"label_\",228" } }
+diff --git a/gcc/testsuite/lib/gcc-dg.exp b/gcc/testsuite/lib/gcc-dg.exp
+index 8c28997dd..5e8acb28b 100644
+--- a/gcc/testsuite/lib/gcc-dg.exp
++++ b/gcc/testsuite/lib/gcc-dg.exp
+@@ -655,7 +655,7 @@ proc gcc-dg-target-supports-debug-format { target_compile trivial type } {
+ proc gcc-dg-debug-runtest { target_compile trivial opt_opts testcases } {
+     if ![info exists DEBUG_TORTURE_OPTIONS] {
+ 	set DEBUG_TORTURE_OPTIONS ""
+-	foreach type {-gctf -gdwarf-2 -gstabs -gstabs+ -gxcoff -gxcoff+} {
++	foreach type {-gctf -gdwarf-2} {
+ 	    if [expr [gcc-dg-target-supports-debug-format \
+ 		      $target_compile $trivial $type]] {
+ 		if { $type == "-gctf" } {
+diff --git a/gcc/testsuite/lib/gfortran-dg.exp b/gcc/testsuite/lib/gfortran-dg.exp
+index 7407be4b8..d4a245e0b 100644
+--- a/gcc/testsuite/lib/gfortran-dg.exp
++++ b/gcc/testsuite/lib/gfortran-dg.exp
+@@ -170,7 +170,7 @@ proc gfortran-dg-debug-runtest { target_compile trivial opt_opts testcases } {
+ 
+     if ![info exists DEBUG_TORTURE_OPTIONS] {
+        set DEBUG_TORTURE_OPTIONS ""
+-       set type_list [list "-gstabs" "-gstabs+" "-gxcoff" "-gxcoff+" "-gdwarf-2" ]
++       set type_list [list "-gdwarf-2" ]
+        foreach type $type_list {
+            set comp_output [$target_compile \
+                    "$srcdir/$subdir/$trivial" "trivial.S" assembly \
+diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
+index d404058fd..bd89d4f52 100644
+--- a/gcc/testsuite/lib/target-supports.exp
++++ b/gcc/testsuite/lib/target-supports.exp
+@@ -1126,15 +1126,6 @@ proc check_effective_target_pthread {} {
+     } "-pthread"]
+ }
+ 
+-# Return 1 if compilation with -gstabs is error-free for trivial
+-# code, 0 otherwise.
+-
+-proc check_effective_target_stabs {} {
+-    return [check_no_compiler_messages stabs object {
+-	void foo (void) { }
+-    } "-gstabs"]
+-}
+-
+ # Return 1 if compilation with -mpe-aligned-commons is error-free
+ # for trivial code, 0 otherwise.
+ 
+diff --git a/gcc/testsuite/objc.dg/stabs-1.m b/gcc/testsuite/objc.dg/stabs-1.m
+deleted file mode 100644
+index b97e4d6a7..000000000
+--- a/gcc/testsuite/objc.dg/stabs-1.m
++++ /dev/null
+@@ -1,19 +0,0 @@
+-/* Check if the final SO STABS record goes into the .text section.  */
+-/* Contributed by Ziemowit Laski  */
+-
+-/* { dg-do compile { target stabs } } */
+-/* { dg-options "-gstabs" } */
+-/* { dg-additional-options "-Wno-objc-root-class" } */
+-
+-@interface MyClass
+-+ newWithArg: arg;
+-@end
+-
+-@implementation MyClass
+-+ newWithArg: arg
+-{
+-}
+-@end
+-
+-/* See PR target/52152 for the xfail.  */
+-/* { dg-final { scan-assembler "(.SUBSPA.*\[\$\]CODE\[\$\]|.text\"?)\n\t.stabs.*100,0,0,(\[\.\$\])?L?L\[\$\]?etext\[0-9\]*\n(\[\.\$\])?L?L\[\$\]?etext" { xfail mips*-*-elf* } } } */
+diff --git a/gcc/toplev.cc b/gcc/toplev.cc
+index 055e0642f..f00a166df 100644
+--- a/gcc/toplev.cc
++++ b/gcc/toplev.cc
+@@ -89,14 +89,6 @@ along with GCC; see the file COPYING3.  If not see
+ #include "ipa-param-manipulation.h"
+ #include "dbgcnt.h"
+ 
+-#if defined(DBX_DEBUGGING_INFO) || defined(XCOFF_DEBUGGING_INFO)
+-#include "dbxout.h"
+-#endif
+-
+-#ifdef XCOFF_DEBUGGING_INFO
+-#include "xcoffout.h"		/* Needed for external data declarations. */
+-#endif
+-
+ #include "selftest.h"
+ 
+ #ifdef HAVE_isl
+@@ -1415,21 +1407,8 @@ process_options (bool no_backend)
+       && ctf_debug_info_level == CTFINFO_LEVEL_NONE)
+     write_symbols = NO_DEBUG;
+ 
+-  /* Warn if STABS debug gets enabled and is not the default.  */
+-  if (PREFERRED_DEBUGGING_TYPE != DBX_DEBUG && (write_symbols & DBX_DEBUG))
+-    warning (0, "STABS debugging information is obsolete and not "
+-	     "supported anymore");
+-
+   if (write_symbols == NO_DEBUG)
+     ;
+-#if defined(DBX_DEBUGGING_INFO)
+-  else if (write_symbols == DBX_DEBUG)
+-    debug_hooks = &dbx_debug_hooks;
+-#endif
+-#if defined(XCOFF_DEBUGGING_INFO)
+-  else if (write_symbols == XCOFF_DEBUG)
+-    debug_hooks = &xcoff_debug_hooks;
+-#endif
+ #ifdef DWARF2_DEBUGGING_INFO
+   else if (dwarf_debuginfo_p ())
+     debug_hooks = &dwarf2_debug_hooks;
+diff --git a/gcc/varasm.cc b/gcc/varasm.cc
+index a4b1cc686..3f69b47a7 100644
+--- a/gcc/varasm.cc
++++ b/gcc/varasm.cc
+@@ -62,10 +62,6 @@ along with GCC; see the file COPYING3.  If not see
+ #include "toplev.h"
+ #include "opts.h"
+ 
+-#ifdef XCOFF_DEBUGGING_INFO
+-#include "xcoffout.h"		/* Needed for external data declarations.  */
+-#endif
+-
+ /* The (assembler) name of the first globally-visible object output.  */
+ extern GTY(()) const char *first_global_object_name;
+ extern GTY(()) const char *weak_global_object_name;
+diff --git a/gcc/xcoffout.cc b/gcc/xcoffout.cc
+deleted file mode 100644
+index bafd35524..000000000
+--- a/gcc/xcoffout.cc
++++ /dev/null
+@@ -1,494 +0,0 @@
+-/* Output xcoff-format symbol table information from GNU compiler.
+-   Copyright (C) 1992-2022 Free Software Foundation, Inc.
+-
+-This file is part of GCC.
+-
+-GCC is free software; you can redistribute it and/or modify it under
+-the terms of the GNU General Public License as published by the Free
+-Software Foundation; either version 3, or (at your option) any later
+-version.
+-
+-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+-WARRANTY; without even the implied warranty of MERCHANTABILITY or
+-FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+-for more details.
+-
+-You should have received a copy of the GNU General Public License
+-along with GCC; see the file COPYING3.  If not see
+-.  */
+-
+-/* Output xcoff-format symbol table data.  The main functionality is contained
+-   in dbxout.cc.  This file implements the sdbout-like parts of the xcoff
+-   interface.  Many functions are very similar to their counterparts in
+-   the former sdbout.c file.  */
+-
+-#include "config.h"
+-#include "system.h"
+-#include "coretypes.h"
+-#include "target.h"
+-#include "rtl.h"
+-#include "tree.h"
+-#include "diagnostic-core.h"
+-#include "varasm.h"
+-#include "output.h"
+-#include "debug.h"
+-#include "file-prefix-map.h" /* remap_debug_filename()  */
+-
+-#ifdef XCOFF_DEBUGGING_INFO
+-
+-/* This defines the C_* storage classes.  */
+-#include "xcoff.h"
+-#include "xcoffout.h"
+-#include "dbxout.h"
+-#include "gstab.h"
+-
+-/* Line number of beginning of current function, minus one.
+-   Negative means not in a function or not using xcoff.  */
+-
+-static int xcoff_begin_function_line = -1;
+-static int xcoff_inlining = 0;
+-
+-/* Name of the current include file.  */
+-
+-const char *xcoff_current_include_file;
+-
+-/* Name of the current function file.  This is the file the `.bf' is
+-   emitted from.  In case a line is emitted from a different file,
+-   (by including that file of course), then the line number will be
+-   absolute.  */
+-
+-static const char *xcoff_current_function_file;
+-
+-/* Names of bss and data sections.  These should be unique names for each
+-   compilation unit.  */
+-
+-char *xcoff_bss_section_name;
+-char *xcoff_private_data_section_name;
+-char *xcoff_private_rodata_section_name;
+-char *xcoff_tls_data_section_name;
+-char *xcoff_read_only_section_name;
+-
+-/* Last source file name mentioned in a NOTE insn.  */
+-
+-const char *xcoff_lastfile;
+-
+-/* Macro definitions used below.  */
+-
+-#define ABS_OR_RELATIVE_LINENO(LINENO)		\
+-((xcoff_inlining) ? (LINENO) : (LINENO) - xcoff_begin_function_line)
+-
+-/* Output source line numbers via ".line".  */
+-#define ASM_OUTPUT_LINE(FILE,LINENUM)					   \
+-  do									   \
+-    {									   \
+-      /* Make sure we're in a function and prevent output of .line 0, as   \
+-	 line # 0 is meant for symbol addresses in xcoff.  Additionally,   \
+-	 line numbers are 'unsigned short' in 32-bit mode.  */		   \
+-      if (xcoff_begin_function_line >= 0)				   \
+-	{								   \
+-	  int lno = ABS_OR_RELATIVE_LINENO (LINENUM);			   \
+-	  if (lno > 0 && (TARGET_64BIT || lno <= (int)USHRT_MAX))	   \
+-	    fprintf (FILE, "\t.line\t%d\n", lno);			   \
+-	}								   \
+-    }									   \
+-  while (0)
+-
+-#define ASM_OUTPUT_LFB(FILE,LINENUM) \
+-{						\
+-  if (xcoff_begin_function_line == -1)		\
+-    {						\
+-      xcoff_begin_function_line = (LINENUM) - 1;\
+-      fprintf (FILE, "\t.bf\t%d\n", (LINENUM));	\
+-    }						\
+-  xcoff_current_function_file			\
+-    = (xcoff_current_include_file		\
+-       ? xcoff_current_include_file : main_input_filename); \
+-}
+-
+-#define ASM_OUTPUT_LFE(FILE,LINENUM)		\
+-  do						\
+-    {						\
+-      fprintf (FILE, "\t.ef\t%d\n", (LINENUM));	\
+-      xcoff_begin_function_line = -1;		\
+-    }						\
+-  while (0)
+-
+-#define ASM_OUTPUT_LBB(FILE,LINENUM,BLOCKNUM) \
+-  fprintf (FILE, "\t.bb\t%d\n", ABS_OR_RELATIVE_LINENO (LINENUM))
+-
+-#define ASM_OUTPUT_LBE(FILE,LINENUM,BLOCKNUM) \
+-  fprintf (FILE, "\t.eb\t%d\n", ABS_OR_RELATIVE_LINENO (LINENUM))
+-
+-static void xcoffout_block (tree, int, tree);
+-static void xcoffout_source_file (FILE *, const char *, int);
+-
+-/* Support routines for XCOFF debugging info.  */
+-
+-struct xcoff_type_number
+-{
+-  const char *name;
+-  int number;
+-};
+-static const struct xcoff_type_number xcoff_type_numbers[] = {
+-  { "int", -1 },
+-  { "char", -2 },
+-  { "short int", -3 },
+-  { "long int", -4 },  /* fiddled to -31 if 64 bits */
+-  { "unsigned char", -5 },
+-  { "signed char", -6 },
+-  { "short unsigned int", -7 },
+-  { "unsigned int", -8 },
+-  /* No such type "unsigned".  */
+-  { "long unsigned int", -10 }, /* fiddled to -32 if 64 bits */
+-  { "void", -11 },
+-  { "float", -12 },
+-  { "double", -13 },
+-  { "long double", -14 },
+-  /* Fortran types run from -15 to -29.  */
+-  { "wchar", -30 },  /* XXX Should be "wchar_t" ? */
+-  { "long long int", -31 },
+-  { "long long unsigned int", -32 },
+-  /* Additional Fortran types run from -33 to -37.  */
+-
+-  /* ??? Should also handle built-in C++ and Obj-C types.  There perhaps
+-     aren't any that C doesn't already have.  */
+-};
+-
+-/* Returns an XCOFF fundamental type number for DECL (assumed to be a
+-   TYPE_DECL), or 0 if dbxout.cc should assign a type number normally.  */
+-int
+-xcoff_assign_fundamental_type_number (tree decl)
+-{
+-  const char *name;
+-  size_t i;
+-
+-  /* Do not waste time searching the list for non-intrinsic types.  */
+-  if (DECL_NAME (decl) == 0 || ! DECL_IS_UNDECLARED_BUILTIN (decl))
+-    return 0;
+-
+-  name = IDENTIFIER_POINTER (DECL_NAME (decl));
+-
+-  /* Linear search, blech, but the list is too small to bother
+-     doing anything else.  */
+-  for (i = 0; i < ARRAY_SIZE (xcoff_type_numbers); i++)
+-    if (!strcmp (xcoff_type_numbers[i].name, name))
+-      goto found;
+-  return 0;
+-
+- found:
+-  /* -4 and -10 should be replaced with -31 and -32, respectively,
+-     when used for a 64-bit type.  */
+-  if (int_size_in_bytes (TREE_TYPE (decl)) == 8)
+-    {
+-      if (xcoff_type_numbers[i].number == -4)
+-	return -31;
+-      if (xcoff_type_numbers[i].number == -10)
+-	return -32;
+-    }
+-  return xcoff_type_numbers[i].number;
+-}
+-
+-/* Print an error message for unrecognized stab codes.  */
+-
+-#define UNKNOWN_STAB(STR)	\
+-  internal_error ("no sclass for %s stab (0x%x)", STR, stab)
+-
+-/* Conversion routine from BSD stabs to AIX storage classes.  */
+-
+-int
+-stab_to_sclass (int stab)
+-{
+-  switch (stab)
+-    {
+-    case N_GSYM:
+-      return C_GSYM;
+-
+-    case N_FNAME:
+-      UNKNOWN_STAB ("N_FNAME");
+-
+-    case N_FUN:
+-      return C_FUN;
+-
+-    case N_STSYM:
+-    case N_LCSYM:
+-      return C_STSYM;
+-
+-    case N_MAIN:
+-      UNKNOWN_STAB ("N_MAIN");
+-
+-    case N_RSYM:
+-      return C_RSYM;
+-
+-    case N_SSYM:
+-      UNKNOWN_STAB ("N_SSYM");
+-
+-    case N_RPSYM:
+-      return C_RPSYM;
+-
+-    case N_PSYM:
+-      return C_PSYM;
+-    case N_LSYM:
+-      return C_LSYM;
+-    case N_DECL:
+-      return C_DECL;
+-    case N_ENTRY:
+-      return C_ENTRY;
+-
+-    case N_SO:
+-      UNKNOWN_STAB ("N_SO");
+-
+-    case N_SOL:
+-      UNKNOWN_STAB ("N_SOL");
+-
+-    case N_SLINE:
+-      UNKNOWN_STAB ("N_SLINE");
+-
+-    case N_DSLINE:
+-      UNKNOWN_STAB ("N_DSLINE");
+-
+-    case N_BSLINE:
+-      UNKNOWN_STAB ("N_BSLINE");
+-
+-    case N_BINCL:
+-      UNKNOWN_STAB ("N_BINCL");
+-
+-    case N_EINCL:
+-      UNKNOWN_STAB ("N_EINCL");
+-
+-    case N_EXCL:
+-      UNKNOWN_STAB ("N_EXCL");
+-
+-    case N_LBRAC:
+-      UNKNOWN_STAB ("N_LBRAC");
+-
+-    case N_RBRAC:
+-      UNKNOWN_STAB ("N_RBRAC");
+-
+-    case N_BCOMM:
+-      return C_BCOMM;
+-    case N_ECOMM:
+-      return C_ECOMM;
+-    case N_ECOML:
+-      return C_ECOML;
+-
+-    case N_LENG:
+-      UNKNOWN_STAB ("N_LENG");
+-
+-    case N_PC:
+-      UNKNOWN_STAB ("N_PC");
+-
+-    case N_M2C:
+-      UNKNOWN_STAB ("N_M2C");
+-
+-    case N_SCOPE:
+-      UNKNOWN_STAB ("N_SCOPE");
+-
+-    case N_CATCH:
+-      UNKNOWN_STAB ("N_CATCH");
+-
+-    case N_OPT:
+-      UNKNOWN_STAB ("N_OPT");
+-
+-    default:
+-      UNKNOWN_STAB ("?");
+-    }
+-}
+-
+-/* Output debugging info to FILE to switch to sourcefile FILENAME.
+-   INLINE_P is true if this is from an inlined function.  */
+-
+-static void
+-xcoffout_source_file (FILE *file, const char *filename, int inline_p)
+-{
+-  if (filename
+-      && (xcoff_lastfile == 0 || strcmp (filename, xcoff_lastfile)
+-	  || (inline_p && ! xcoff_inlining)
+-	  || (! inline_p && xcoff_inlining)))
+-    {
+-      if (xcoff_current_include_file)
+-	{
+-	  fprintf (file, "\t.ei\t");
+-	  output_quoted_string (file,
+-	      remap_debug_filename (xcoff_current_include_file));
+-	  fprintf (file, "\n");
+-	  xcoff_current_include_file = NULL;
+-	}
+-      xcoff_inlining = inline_p;
+-      if (strcmp (main_input_filename, filename) || inline_p)
+-	{
+-	  fprintf (file, "\t.bi\t");
+-	  output_quoted_string (file, remap_debug_filename (filename));
+-	  fprintf (file, "\n");
+-	  xcoff_current_include_file = filename;
+-	}
+-      xcoff_lastfile = filename;
+-    }
+-}
+-
+-/* Output a line number symbol entry for location (FILENAME, LINE).  */
+-
+-void
+-xcoffout_source_line (unsigned int line, unsigned int column ATTRIBUTE_UNUSED,
+-		      const char *filename, int discriminator ATTRIBUTE_UNUSED,
+-                      bool is_stmt ATTRIBUTE_UNUSED)
+-{
+-  bool inline_p = (strcmp (xcoff_current_function_file, filename) != 0
+-		   || (int) line < xcoff_begin_function_line);
+-
+-  xcoffout_source_file (asm_out_file, filename, inline_p);
+-
+-  ASM_OUTPUT_LINE (asm_out_file, line);
+-}
+-
+-/* Output the symbols defined in block number DO_BLOCK.
+-
+-   This function works by walking the tree structure of blocks,
+-   counting blocks until it finds the desired block.  */
+-
+-static unsigned int do_block = 0;
+-
+-static void
+-xcoffout_block (tree block, int depth, tree args)
+-{
+-  while (block)
+-    {
+-      /* Ignore blocks never expanded or otherwise marked as real.  */
+-      if (TREE_USED (block))
+-	{
+-	  /* When we reach the specified block, output its symbols.  */
+-	  if (BLOCK_NUMBER (block) == do_block)
+-	    {
+-	      /* Output the syms of the block.  */
+-	      if (debug_info_level != DINFO_LEVEL_TERSE || depth == 0)
+-		dbxout_syms (BLOCK_VARS (block));
+-	      if (args)
+-		dbxout_reg_parms (args);
+-
+-	      /* We are now done with the block.  Don't go to inner blocks.  */
+-	      return;
+-	    }
+-	  /* If we are past the specified block, stop the scan.  */
+-	  else if (BLOCK_NUMBER (block) >= do_block)
+-	    return;
+-
+-	  /* Output the subblocks.  */
+-	  xcoffout_block (BLOCK_SUBBLOCKS (block), depth + 1, NULL_TREE);
+-	}
+-      block = BLOCK_CHAIN (block);
+-    }
+-}
+-
+-/* Describe the beginning of an internal block within a function.
+-   Also output descriptions of variables defined in this block.
+-
+-   N is the number of the block, by order of beginning, counting from 1,
+-   and not counting the outermost (function top-level) block.
+-   The blocks match the BLOCKs in DECL_INITIAL (current_function_decl),
+-   if the count starts at 0 for the outermost one.  */
+-
+-void
+-xcoffout_begin_block (unsigned int line, unsigned int n)
+-{
+-  tree decl = current_function_decl;
+-
+-  /* The IBM AIX compiler does not emit a .bb for the function level scope,
+-     so we avoid it here also.  */
+-  if (n != 1)
+-    ASM_OUTPUT_LBB (asm_out_file, line, n);
+-
+-  do_block = n;
+-  xcoffout_block (DECL_INITIAL (decl), 0, DECL_ARGUMENTS (decl));
+-}
+-
+-/* Describe the end line-number of an internal block within a function.  */
+-
+-void
+-xcoffout_end_block (unsigned int line, unsigned int n)
+-{
+-  if (n != 1)
+-    ASM_OUTPUT_LBE (asm_out_file, line, n);
+-}
+-
+-/* Called at beginning of function (before prologue).
+-   Declare function as needed for debugging.  */
+-
+-void
+-xcoffout_declare_function (FILE *file, tree decl, const char *name)
+-{
+-  size_t len;
+-
+-  if (*name == '*')
+-    name++;
+-  len = strlen (name);
+-  if (name[len - 1] == ']')
+-    {
+-      char *n = XALLOCAVEC (char, len - 3);
+-      memcpy (n, name, len - 4);
+-      n[len - 4] = '\0';
+-      name = n;
+-    }
+-
+-  /* Any pending .bi or .ei must occur before the .function pseudo op.
+-     Otherwise debuggers will think that the function is in the previous
+-     file and/or at the wrong line number.  */
+-  xcoffout_source_file (file, DECL_SOURCE_FILE (decl), 0);
+-  dbxout_symbol (decl, 0);
+-
+-  /* .function NAME, TOP, MAPPING, TYPE, SIZE
+-     16 and 044 are placeholders for backwards compatibility */
+-  fprintf (file, "\t.function .%s,.%s,16,044,FE..%s-.%s\n",
+-	   name, name, name, name);
+-}
+-
+-/* Called at beginning of function body (at start of prologue).
+-   Record the function's starting line number, so we can output
+-   relative line numbers for the other lines.
+-   Record the file name that this function is contained in.  */
+-
+-void
+-xcoffout_begin_prologue (unsigned int line,
+-			 unsigned int column ATTRIBUTE_UNUSED,
+-			 const char *file ATTRIBUTE_UNUSED)
+-{
+-  ASM_OUTPUT_LFB (asm_out_file, line);
+-  dbxout_parms (DECL_ARGUMENTS (current_function_decl));
+-
+-  /* Emit the symbols for the outermost BLOCK's variables.  sdbout.c did this
+-     in sdbout_begin_block, but there is no guarantee that there will be any
+-     inner block 1, so we must do it here.  This gives a result similar to
+-     dbxout, so it does make some sense.  */
+-  do_block = BLOCK_NUMBER (DECL_INITIAL (current_function_decl));
+-  xcoffout_block (DECL_INITIAL (current_function_decl), 0,
+-		  DECL_ARGUMENTS (current_function_decl));
+-
+-  ASM_OUTPUT_LINE (asm_out_file, line);
+-}
+-
+-/* Called at end of function (before epilogue).
+-   Describe end of outermost block.  */
+-
+-void
+-xcoffout_end_function (unsigned int last_linenum)
+-{
+-  ASM_OUTPUT_LFE (asm_out_file, last_linenum);
+-}
+-
+-/* Output xcoff info for the absolute end of a function.
+-   Called after the epilogue is output.  */
+-
+-void
+-xcoffout_end_epilogue (unsigned int line ATTRIBUTE_UNUSED,
+-		       const char *file ATTRIBUTE_UNUSED)
+-{
+-  /* We need to pass the correct function size to .function, otherwise,
+-     the xas assembler can't figure out the correct size for the function
+-     aux entry.  So, we emit a label after the last instruction which can
+-     be used by the .function pseudo op to calculate the function size.  */
+-
+-  const char *fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
+-  if (*fname == '*')
+-    ++fname;
+-  fprintf (asm_out_file, "FE..");
+-  ASM_OUTPUT_LABEL (asm_out_file, fname);
+-}
+-#endif /* XCOFF_DEBUGGING_INFO */
+diff --git a/gcc/xcoffout.h b/gcc/xcoffout.h
+deleted file mode 100644
+index f28e83ea9..000000000
+--- a/gcc/xcoffout.h
++++ /dev/null
+@@ -1,194 +0,0 @@
+-/* XCOFF definitions.  These are needed in dbxout.cc, final.cc,
+-   and xcoffout.h.
+-   Copyright (C) 1998-2022 Free Software Foundation, Inc.
+-
+-This file is part of GCC.
+-
+-GCC is free software; you can redistribute it and/or modify it under
+-the terms of the GNU General Public License as published by the Free
+-Software Foundation; either version 3, or (at your option) any later
+-version.
+-
+-GCC is distributed in the hope that it will be useful, but WITHOUT ANY
+-WARRANTY; without even the implied warranty of MERCHANTABILITY or
+-FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
+-for more details.
+-
+-You should have received a copy of the GNU General Public License
+-along with GCC; see the file COPYING3.  If not see
+-.  */
+-
+-#ifndef GCC_XCOFFOUT_H
+-#define GCC_XCOFFOUT_H
+-
+-/* Tags and typedefs are C_DECL in XCOFF, not C_LSYM.  */
+-
+-#define DBX_TYPE_DECL_STABS_CODE N_DECL
+-
+-/* Use the XCOFF predefined type numbers.  */
+-
+-#define DBX_ASSIGN_FUNDAMENTAL_TYPE_NUMBER(TYPE) \
+-  xcoff_assign_fundamental_type_number (TYPE)
+-
+-/* Any type with a negative type index has already been output.  */
+-
+-#define DBX_TYPE_DEFINED(TYPE) (TYPE_SYMTAB_ADDRESS (TYPE) < 0)
+-
+-/* Must use N_STSYM for static const variables (those in the text section)
+-   instead of N_FUN.  */
+-
+-#define DBX_STATIC_CONST_VAR_CODE N_STSYM
+-
+-/* For static variables, output code to define the start of a static block.  */
+-
+-#define DBX_STATIC_BLOCK_START(ASMFILE,CODE)				\
+-{									\
+-  if ((CODE) == N_STSYM)						\
+-    fprintf ((ASMFILE), "\t.bs\t%s[RW]\n", xcoff_private_data_section_name);\
+-  else if ((CODE) == N_LCSYM)						\
+-    fprintf ((ASMFILE), "\t.bs\t%s\n", xcoff_bss_section_name);	\
+-}
+-
+-/* For static variables, output code to define the end of a static block.  */
+-
+-#define DBX_STATIC_BLOCK_END(ASMFILE,CODE)				\
+-{									\
+-  if ((CODE) == N_STSYM || (CODE) == N_LCSYM)				\
+-    fputs ("\t.es\n", (ASMFILE));					\
+-}
+-
+-/* We must use N_RPYSM instead of N_RSYM for register parameters.  */
+-
+-#define DBX_REGPARM_STABS_CODE N_RPSYM
+-
+-/* We must use 'R' instead of 'P' for register parameters.  */
+-
+-#define DBX_REGPARM_STABS_LETTER 'R'
+-
+-/* Define our own finish symbol function, since xcoff stabs have their
+-   own different format.  */
+-
+-#define DBX_FINISH_STABS(SYM, CODE, LINE, ADDR, LABEL, NUMBER) do {	\
+-  if (ADDR)								\
+-    {									\
+-      /* If we are writing a function name, we must emit a dot in	\
+-	 order to refer to the function code, not its descriptor.  */	\
+-      if (CODE == N_FUN)						\
+-	putc ('.', asm_out_file);					\
+-									\
+-      /* If we are writing a function name, we must ensure that		\
+-	 there is no storage-class suffix on the name.  */		\
+-      if (CODE == N_FUN && GET_CODE (ADDR) == SYMBOL_REF)		\
+-	{								\
+-	  const char *_p = XSTR (ADDR, 0);				\
+-	  if (*_p == '*')						\
+-	    fputs (_p+1, asm_out_file);					\
+-	  else								\
+-	    for (; *_p != '[' && *_p; _p++)				\
+-	      putc (*_p != '$' ? *_p : '_', asm_out_file);		\
+-	}								\
+-      else								\
+-	output_addr_const (asm_out_file, ADDR);				\
+-    }									\
+-  /* Another special case: N_GSYM always gets the symbol name,		\
+-     whether or not LABEL or NUMBER are set.  */			\
+-  else if (CODE == N_GSYM)						\
+-    assemble_name (asm_out_file, XSTR (XEXP (DECL_RTL (SYM), 0), 0));	\
+-  else if (LABEL)							\
+-    assemble_name (asm_out_file, LABEL);				\
+-  else									\
+-    dbxout_int (NUMBER);						\
+-  putc (',', asm_out_file);						\
+-  dbxout_int (stab_to_sclass (CODE));					\
+-  fputs (",0\n", asm_out_file);						\
+-} while (0)
+-
+-/* These are IBM XCOFF extensions we need to reference in dbxout.cc
+-   and xcoffout.cc.  */
+-
+-/* AIX XCOFF uses this for typedefs.  This can have any value, since it is
+-   only used for translation into a C_DECL storage class.  */
+-#ifndef N_DECL
+-#define N_DECL 0x8c
+-#endif
+-/* AIX XCOFF uses this for parameters passed in registers.  This can have
+-   any value, since it is only used for translation into a C_RPSYM storage
+-   class.  */
+-#ifndef N_RPSYM
+-#define N_RPSYM 0x8e
+-#endif
+-
+-/* Name of the current include file.  */
+-
+-extern const char *xcoff_current_include_file;
+-
+-/* Names of bss and data sections.  These should be unique names for each
+-   compilation unit.  */
+-
+-extern char *xcoff_bss_section_name;
+-extern char *xcoff_private_data_section_name;
+-extern char *xcoff_private_rodata_section_name;
+-extern char *xcoff_tls_data_section_name;
+-extern char *xcoff_read_only_section_name;
+-
+-/* Last source file name mentioned in a NOTE insn.  */
+-
+-extern const char *xcoff_lastfile;
+-
+-/* Don't write out path name for main source file.  */
+-#define NO_DBX_MAIN_SOURCE_DIRECTORY 1
+-
+-/* Write out main source file name using ".file" rather than ".stabs".
+-   We don't actually do this here, because the assembler gets confused if there
+-   is more than one .file directive.  rs6000_xcoff_file_start is already
+-   emitting a .file directory, so we don't output one here also.
+-   Initialize xcoff_lastfile.  */
+-#define DBX_OUTPUT_MAIN_SOURCE_FILENAME(FILE,FILENAME) \
+-  xcoff_lastfile = (FILENAME)
+-
+-/* If we are still in an include file, its end must be marked.  */
+-#define DBX_OUTPUT_MAIN_SOURCE_FILE_END(FILE, FILENAME)	\
+-do {							\
+-  if (xcoff_current_include_file)			\
+-    {							\
+-      fputs ("\t.ei\t", (FILE));			\
+-      output_quoted_string ((FILE), xcoff_current_include_file);	\
+-      putc ('\n', (FILE));				\
+-      xcoff_current_include_file = NULL;		\
+-    }							\
+-} while (0)
+-
+-/* Do not emit any marker for XCOFF until assembler allows XFT_CV.  */
+-#define NO_DBX_GCC_MARKER
+-
+-/* XCOFF32 maximum length is 64K; XLC limits to 16K.  */
+-#define DBX_CONTIN_LENGTH 16384
+-
+-/* XLC uses '?' as continuation character.  */
+-#define DBX_CONTIN_CHAR '?'
+-
+-/* Don't try to use the `x' type-cross-reference character in DBX data.
+-   Also has the consequence of putting each struct, union or enum
+-   into a separate .stabs, containing only cross-refs to the others.  */
+-#define DBX_NO_XREFS
+-
+-/* We must put stabs in the text section.  If we don't the assembler
+-   won't handle them correctly; it will sometimes put stabs where gdb
+-   can't find them.  */
+-
+-#define DEBUG_SYMS_TEXT
+-
+-/* Prototype functions in xcoffout.cc.  */
+-
+-extern int stab_to_sclass (int);
+-extern void xcoffout_begin_prologue (unsigned int, unsigned int, const char *);
+-extern void xcoffout_begin_block (unsigned, unsigned);
+-extern void xcoffout_end_epilogue (unsigned int, const char *);
+-extern void xcoffout_end_function (unsigned int);
+-extern void xcoffout_end_block (unsigned, unsigned);
+-extern int xcoff_assign_fundamental_type_number (tree);
+-extern void xcoffout_declare_function (FILE *, tree, const char *);
+-extern void xcoffout_source_line (unsigned int, unsigned int, const char *,
+-				  int, bool);
+-
+-#endif /* GCC_XCOFFOUT_H */
+-- 
+2.33.0
+
diff --git a/0096-Bugfix-Autofdo-use-PMU-sampling-set-num-eauals-den.patch b/0096-Bugfix-Autofdo-use-PMU-sampling-set-num-eauals-den.patch
new file mode 100644
index 0000000000000000000000000000000000000000..20cf27b093ef9c242126da3133ed08b5a00f7dce
--- /dev/null
+++ b/0096-Bugfix-Autofdo-use-PMU-sampling-set-num-eauals-den.patch
@@ -0,0 +1,45 @@
+From 06e86b362f74ba0706fb5d8377f78d24b658c300 Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao_admin 
+Date: Sat, 18 May 2024 12:22:23 +0800
+Subject: [PATCH] [Bugfix] Autofdo use PMU sampling set num eauals den
+
+---
+ gcc/final.cc    | 2 +-
+ gcc/tree-cfg.cc | 8 ++++++++
+ 2 files changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/final.cc b/gcc/final.cc
+index f66c9d155..e4bfceabc 100644
+--- a/gcc/final.cc
++++ b/gcc/final.cc
+@@ -4604,7 +4604,7 @@ dump_profile_to_elf_sections ()
+   /* Return if no feedback data.    */
+   if (!flag_profile_use && !flag_auto_profile)
+     {
+-      error ("-fauto-bolt should use with -profile-use or -fauto-profile");
++      error ("-fauto-bolt should use with -fprofile-use or -fauto-profile");
+       return;
+     }
+   
+diff --git a/gcc/tree-cfg.cc b/gcc/tree-cfg.cc
+index 05fc45147..48b52f785 100644
+--- a/gcc/tree-cfg.cc
++++ b/gcc/tree-cfg.cc
+@@ -9741,6 +9741,14 @@ execute_fixup_cfg (void)
+   /* Same scaling is also done by ipa_merge_profiles.  */
+   profile_count num = node->count;
+   profile_count den = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
++  /* When autofdo uses PMU as the sampling unit, the number of
++     node can not be obtained directly, sometimes it will be zero,
++     but the execution number for function should at least be 1. We
++     set num be den here to make sure the num will not decrease.  */
++  if (num == profile_count::zero ().afdo () && den.quality () == profile_quality::AFDO)
++    {
++      num = den;
++    }
+   bool scale = num.initialized_p () && !(num == den);
+   auto_bitmap dce_ssa_names;
+ 
+-- 
+2.33.0
+
diff --git a/0096-LoongArch-testsuite-Modify-the-test-behavior-of-the-.patch b/0096-LoongArch-testsuite-Modify-the-test-behavior-of-the-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ba208db4459146688248f7a392423f89ec46e3fb
--- /dev/null
+++ b/0096-LoongArch-testsuite-Modify-the-test-behavior-of-the-.patch
@@ -0,0 +1,47 @@
+From c21f2c7e6c2385a3783977bbca79ebe178d0d141 Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Fri, 5 Jan 2024 11:43:24 +0800
+Subject: [PATCH 096/188] LoongArch: testsuite:Modify the test behavior of the
+ vect-bic-bitmask-{12, 23}.c file.
+
+Before modifying the test behavior of the program, dg-do is set to assemble in
+vect-bic-bitmask-{12,23}.c. However, when the binutils library does not support
+the vector instruction set, it will FAIL to recognize the vector instruction
+and fail item will appear in the assembly stage. So set the program's dg-do to
+compile.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.dg/vect/vect-bic-bitmask-12.c: Change the default
+	setting of assembly to compile.
+	* gcc.dg/vect/vect-bic-bitmask-23.c: Dito.
+---
+ gcc/testsuite/gcc.dg/vect/vect-bic-bitmask-12.c | 2 +-
+ gcc/testsuite/gcc.dg/vect/vect-bic-bitmask-23.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-bic-bitmask-12.c b/gcc/testsuite/gcc.dg/vect/vect-bic-bitmask-12.c
+index 36ec5a8b1..213e4c2a4 100644
+--- a/gcc/testsuite/gcc.dg/vect/vect-bic-bitmask-12.c
++++ b/gcc/testsuite/gcc.dg/vect/vect-bic-bitmask-12.c
+@@ -1,5 +1,5 @@
+ /* { dg-skip-if "missing optab for vectorization" { sparc*-*-* } } */
+-/* { dg-do assemble } */
++/* { dg-do compile } */
+ /* { dg-additional-options "-O3 -fdump-tree-dce -w" } */
+ 
+ #include 
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-bic-bitmask-23.c b/gcc/testsuite/gcc.dg/vect/vect-bic-bitmask-23.c
+index 5b4c3b6e1..5dceb4bbc 100644
+--- a/gcc/testsuite/gcc.dg/vect/vect-bic-bitmask-23.c
++++ b/gcc/testsuite/gcc.dg/vect/vect-bic-bitmask-23.c
+@@ -1,5 +1,5 @@
+ /* { dg-skip-if "missing optab for vectorization" { sparc*-*-* } } */
+-/* { dg-do assemble } */
++/* { dg-do compile } */
+ /* { dg-additional-options "-O1 -fdump-tree-dce -w" } */
+ 
+ #include 
+-- 
+2.43.0
+
diff --git a/0097-Improve-non-loop-disambiguation.patch b/0097-Improve-non-loop-disambiguation.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ae609d29474c7a05896508e06227959e405b2ad5
--- /dev/null
+++ b/0097-Improve-non-loop-disambiguation.patch
@@ -0,0 +1,101 @@
+From 6de2e0d400cbe46da482a672810c37b1832c408c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=83=91=E6=99=A8=E5=8D=89?= 
+Date: Thu, 25 Jul 2024 19:45:43 +0800
+Subject: [PATCH] Improve non-loop disambiguation
+
+This optimization is brought from https://gcc.gnu.org/git/gitweb.cgi?p=gcc.git;h=038b077689bb5310386b04d40a2cea234f01e6aa.
+
+When dr_may_alias_p is called without a loop context, it tries
+to use the tree-affine interface to calculate the difference
+between the two addresses and use that difference to check whether
+the gap between the accesses is known at compile time.  However, as the
+example in the PR shows, this doesn't expand SSA_NAMEs and so can easily
+be defeated by things like reassociation.
+
+One fix would have been to use aff_combination_expand to expand the
+SSA_NAMEs, but we'd then need some way of maintaining the associated
+cache.  This patch instead reuses the innermost_loop_behavior fields
+(which exist even when no loop context is provided).
+
+It might still be useful to do the aff_combination_expand thing too,
+if an example turns out to need it.
+---
+ gcc/common.opt                              |  4 ++++
+ gcc/testsuite/gcc.dg/vect/bb-slp-pr106019.c | 16 +++++++++++++++
+ gcc/tree-data-ref.cc                        | 22 +++++++++++++++++++++
+ 3 files changed, 42 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.dg/vect/bb-slp-pr106019.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index b18f0b944..75bf9c9c1 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -3217,6 +3217,10 @@ ftree-loop-vectorize
+ Common Var(flag_tree_loop_vectorize) Optimization EnabledBy(ftree-vectorize)
+ Enable loop vectorization on trees.
+ 
++falias-analysis-expand-ssa
++Common Var(flag_alias_analysis_expand_ssa) Init(0)
++Enable expanded SSA name analysis during alias analysis.
++
+ ftree-slp-vectorize
+ Common Var(flag_tree_slp_vectorize) Optimization EnabledBy(ftree-vectorize)
+ Enable basic block vectorization (SLP) on trees.
+diff --git a/gcc/testsuite/gcc.dg/vect/bb-slp-pr106019.c b/gcc/testsuite/gcc.dg/vect/bb-slp-pr106019.c
+new file mode 100644
+index 000000000..5ff8a8a62
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/bb-slp-pr106019.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-additional-options "-falias-analysis-expand-ssa" } */
++
++void f(double *p, long i)
++{
++    p[i+0] += 1;
++    p[i+1] += 1;
++}
++void g(double *p, long i)
++{
++    double *q = p + i;
++    q[0] += 1;
++    q[1] += 1;
++}
++
++/* { dg-final { scan-tree-dump-not "can't determine dependence" slp2 } } */
+diff --git a/gcc/tree-data-ref.cc b/gcc/tree-data-ref.cc
+index e6ae9e847..a05073c51 100644
+--- a/gcc/tree-data-ref.cc
++++ b/gcc/tree-data-ref.cc
+@@ -2993,6 +2993,28 @@ dr_may_alias_p (const struct data_reference *a, const struct data_reference *b,
+      disambiguation.  */
+   if (!loop_nest)
+     {
++      if (flag_alias_analysis_expand_ssa)
++	{
++	  tree tree_size_a = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (a)));
++	  tree tree_size_b = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (b)));
++
++	  if (DR_BASE_ADDRESS (a)
++	     && DR_BASE_ADDRESS (b)
++	     && operand_equal_p (DR_BASE_ADDRESS (a), DR_BASE_ADDRESS (b))
++	     && operand_equal_p (DR_OFFSET (a), DR_OFFSET (b))
++	     && poly_int_tree_p (tree_size_a)
++	     && poly_int_tree_p (tree_size_b)
++	     && !ranges_maybe_overlap_p (wi::to_widest (DR_INIT (a)),
++					 wi::to_widest (tree_size_a),
++					 wi::to_widest (DR_INIT (b)),
++					 wi::to_widest (tree_size_b)))
++	     {
++	       gcc_assert (integer_zerop (DR_STEP (a))
++	     		   && integer_zerop (DR_STEP (b)));
++	       return false;
++	     }
++	}
++
+       aff_tree off1, off2;
+       poly_widest_int size1, size2;
+       get_inner_reference_aff (DR_REF (a), &off1, &size1);
+-- 
+2.33.0
+
diff --git a/0097-LoongArch-testsuite-Delete-the-default-run-behavior-.patch b/0097-LoongArch-testsuite-Delete-the-default-run-behavior-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f87269fc5d2320894d0090495fe95175da7f6b73
--- /dev/null
+++ b/0097-LoongArch-testsuite-Delete-the-default-run-behavior-.patch
@@ -0,0 +1,31 @@
+From cdee2d1e7391d95bf6fd471fddcb86ee81247929 Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Fri, 5 Jan 2024 11:43:27 +0800
+Subject: [PATCH 097/188] LoongArch: testsuite:Delete the default run behavior
+ in pr60510.f.
+
+When binutils does not support vector instruction sets, the test program fails
+because it does not recognize vectorization at the assembly stage. Therefore,
+the default run behavior of the program is deleted, so that the behavior of
+the program depends on whether the software supports vectorization.
+
+gcc/testsuite/ChangeLog:
+
+	* gfortran.dg/vect/pr60510.f: Delete the default behavior of the
+	program.
+---
+ gcc/testsuite/gfortran.dg/vect/pr60510.f | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/gcc/testsuite/gfortran.dg/vect/pr60510.f b/gcc/testsuite/gfortran.dg/vect/pr60510.f
+index ecd50dd55..c1e11b27d 100644
+--- a/gcc/testsuite/gfortran.dg/vect/pr60510.f
++++ b/gcc/testsuite/gfortran.dg/vect/pr60510.f
+@@ -1,4 +1,3 @@
+-! { dg-do run }
+ ! { dg-require-effective-target vect_double }
+ ! { dg-require-effective-target vect_intdouble_cvt }
+ ! { dg-additional-options "-fno-inline -ffast-math" }
+-- 
+2.43.0
+
diff --git a/0098-CHREC-multiplication-and-undefined-overflow.patch b/0098-CHREC-multiplication-and-undefined-overflow.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9f9a6b7410fd4a910d9eb899401a81f62500a797
--- /dev/null
+++ b/0098-CHREC-multiplication-and-undefined-overflow.patch
@@ -0,0 +1,265 @@
+From c4e4fef145c1e402f0558cc35f6c1ed0a08beffb Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=83=91=E6=99=A8=E5=8D=89?= 
+Date: Thu, 25 Jul 2024 20:16:52 +0800
+Subject: [PATCH] CHREC multiplication and undefined overflow
+
+This optimization is brought from https://gcc.gnu.org/pipermail/gcc-patches/2024-February/646531.html
+
+When folding a multiply CHRECs are handled like {a, +, b} * c
+is {a*c, +, b*c} but that isn't generally correct when overflow
+invokes undefined behavior.  The following uses unsigned arithmetic
+unless either a is zero or a and b have the same sign.
+
+I've used simple early outs for INTEGER_CSTs and otherwise use
+a range-query since we lack a tree_expr_nonpositive_p and
+get_range_pos_neg isn't a good fit.
+---
+ gcc/common.opt                          |  4 ++
+ gcc/testsuite/gcc.dg/pr68317.c          |  6 +-
+ gcc/testsuite/gcc.dg/torture/pr114074.c | 31 ++++++++++
+ gcc/tree-chrec.cc                       | 81 +++++++++++++++++++++----
+ gcc/tree-chrec.h                        |  2 +-
+ gcc/value-range.cc                      | 12 ++++
+ gcc/value-range.h                       |  2 +
+ 7 files changed, 123 insertions(+), 15 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/torture/pr114074.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index b18f0b944..d3af3ba39 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1771,6 +1771,10 @@ floop-interchange
+ Common Var(flag_loop_interchange) Optimization
+ Enable loop interchange on trees.
+ 
++fchrec-mul-fold-strict-overflow
++Common Var(flag_chrec_mul_fold_strict_overflow) Init(0)
++Enable strict overflow handling during constant folding of multiply CHRECs.
++
+ floop-block
+ Common Alias(floop-nest-optimize)
+ Enable loop nest transforms.  Same as -floop-nest-optimize.
+diff --git a/gcc/testsuite/gcc.dg/pr68317.c b/gcc/testsuite/gcc.dg/pr68317.c
+index bd053a752..671a67d95 100644
+--- a/gcc/testsuite/gcc.dg/pr68317.c
++++ b/gcc/testsuite/gcc.dg/pr68317.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdisable-tree-ethread" } */
++/* { dg-options "-O2 -fdisable-tree-ethread -fchrec-mul-fold-strict-overflow" } */
+ 
+ /* Note: Threader will collapse loop.  */
+ 
+@@ -12,8 +12,8 @@ foo ()
+ {
+  int32_t index = 0;
+ 
+- for (index; index <= 10; index--) // expected warning here
++ for (index; index <= 10; index--) /* { dg-warning "iteration \[0-9\]+ invokes undefined behavior" } */
+    /* Result of the following multiply will overflow
+       when converted to signed int32_t.  */
+-   bar ((0xcafe + index) * 0xdead);  /* { dg-warning "iteration \[0-9\]+ invokes undefined behavior" } */
++   bar ((0xcafe + index) * 0xdead);
+ }
+diff --git a/gcc/testsuite/gcc.dg/torture/pr114074.c b/gcc/testsuite/gcc.dg/torture/pr114074.c
+new file mode 100644
+index 000000000..9a383d8fc
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/torture/pr114074.c
+@@ -0,0 +1,31 @@
++/* { dg-do run } */
++<<<<<<< HEAD
++/* { dg-options "-fchrec-mul-fold-strict-overflow" } */
++=======
++/* { dg-options "-fchrec-mul-fold-strict-overflow"" } */
++>>>>>>> 47092575e7696f5a21cf75284fe3d4feb0c813ab
++int a, b, d;
++
++__attribute__((noipa)) void
++foo (void)
++{
++  ++d;
++}
++
++int
++main ()
++{
++  for (a = 0; a > -3; a -= 2)
++    {
++      int c = a;
++      b = __INT_MAX__ - 3000;
++      a = ~c * b;
++      foo ();
++      if (!a)
++	break;
++      a = c;
++    }
++  if (d != 2)
++    __builtin_abort ();
++  return 0;
++}
+diff --git a/gcc/tree-chrec.cc b/gcc/tree-chrec.cc
+index c44cea754..3323901bc 100644
+--- a/gcc/tree-chrec.cc
++++ b/gcc/tree-chrec.cc
+@@ -38,6 +38,8 @@ along with GCC; see the file COPYING3.  If not see
+ #include "gimple.h"
+ #include "tree-ssa-loop.h"
+ #include "dumpfile.h"
++#include "value-range.h"
++#include "value-query.h"
+ #include "tree-scalar-evolution.h"
+ 
+ /* Extended folder for chrecs.  */
+@@ -404,6 +406,13 @@ chrec_fold_multiply (tree type,
+       || automatically_generated_chrec_p (op1))
+     return chrec_fold_automatically_generated_operands (op0, op1);
+ 
++  if (flag_chrec_mul_fold_strict_overflow)
++    {
++      if (TREE_CODE (op0) != POLYNOMIAL_CHREC
++	  && TREE_CODE (op1) == POLYNOMIAL_CHREC)
++	std::swap (op0, op1);
++    }
++
+   switch (TREE_CODE (op0))
+     {
+     case POLYNOMIAL_CHREC:
+@@ -428,10 +437,53 @@ chrec_fold_multiply (tree type,
+ 	  if (integer_zerop (op1))
+ 	    return build_int_cst (type, 0);
+ 
+-	  return build_polynomial_chrec
+-	    (CHREC_VARIABLE (op0),
+-	     chrec_fold_multiply (type, CHREC_LEFT (op0), op1),
+-	     chrec_fold_multiply (type, CHREC_RIGHT (op0), op1));
++	  if (flag_chrec_mul_fold_strict_overflow)
++	    {
++	      /* When overflow is undefined and CHREC_LEFT/RIGHT do not have the
++		 same sign or CHREC_LEFT is zero then folding the multiply into
++		 the addition does not have the same behavior on overflow.  Use
++		 unsigned arithmetic in that case.  */
++	      value_range rl, rr;
++	      if (!ANY_INTEGRAL_TYPE_P (type)
++		  || TYPE_OVERFLOW_WRAPS (type)
++		  || integer_zerop (CHREC_LEFT (op0))
++		  || (TREE_CODE (CHREC_LEFT (op0)) == INTEGER_CST
++		  && TREE_CODE (CHREC_RIGHT (op0)) == INTEGER_CST
++		  && (tree_int_cst_sgn (CHREC_LEFT (op0))
++		      == tree_int_cst_sgn (CHREC_RIGHT (op0))))
++		  || (get_range_query (cfun)->range_of_expr (rl, CHREC_LEFT (op0))
++		  && !rl.undefined_p ()
++		  && (rl.nonpositive_p () || rl.nonnegative_p ())
++		  && get_range_query (cfun)->range_of_expr (rr,
++							CHREC_RIGHT (op0))
++		  && !rr.undefined_p ()
++		  && ((rl.nonpositive_p () && rr.nonpositive_p ())
++		  || (rl.nonnegative_p () && rr.nonnegative_p ()))))
++		{
++		  tree left = chrec_fold_multiply (type, CHREC_LEFT (op0), op1);
++		  tree right = chrec_fold_multiply (type, CHREC_RIGHT (op0), op1);
++		  return build_polynomial_chrec (CHREC_VARIABLE (op0), left, right);
++		}
++	      else
++		{
++		  tree utype = unsigned_type_for (type);
++		  tree uop1 = chrec_convert_rhs (utype, op1);
++		  tree uleft0 = chrec_convert_rhs (utype, CHREC_LEFT (op0));
++		  tree uright0 = chrec_convert_rhs (utype, CHREC_RIGHT (op0));
++		  tree left = chrec_fold_multiply (utype, uleft0, uop1);
++		  tree right = chrec_fold_multiply (utype, uright0, uop1);
++		  tree tem = build_polynomial_chrec (CHREC_VARIABLE (op0),
++							left, right);
++		  return chrec_convert_rhs (type, tem);
++		}
++	     }
++	   else
++	     {
++	       return build_polynomial_chrec
++		  (CHREC_VARIABLE (op0),
++		   chrec_fold_multiply (type, CHREC_LEFT (op0), op1),
++		   chrec_fold_multiply (type, CHREC_RIGHT (op0), op1));
++	     }
+ 	}
+ 
+     CASE_CONVERT:
+@@ -449,13 +501,20 @@ chrec_fold_multiply (tree type,
+       switch (TREE_CODE (op1))
+ 	{
+ 	case POLYNOMIAL_CHREC:
+-	  gcc_checking_assert
+-	    (!chrec_contains_symbols_defined_in_loop (op1,
+-						      CHREC_VARIABLE (op1)));
+-	  return build_polynomial_chrec
+-	    (CHREC_VARIABLE (op1),
+-	     chrec_fold_multiply (type, CHREC_LEFT (op1), op0),
+-	     chrec_fold_multiply (type, CHREC_RIGHT (op1), op0));
++	  if (flag_chrec_mul_fold_strict_overflow)
++	    {
++	      gcc_unreachable ();
++	    }
++	  else
++	   {
++	     gcc_checking_assert
++		(!chrec_contains_symbols_defined_in_loop (op1,
++				CHREC_VARIABLE (op1)));
++	     return build_polynomial_chrec
++		(CHREC_VARIABLE (op1),
++		 chrec_fold_multiply (type, CHREC_LEFT (op1), op0),
++		 chrec_fold_multiply (type, CHREC_RIGHT (op1), op0));
++	    }
+ 
+ 	CASE_CONVERT:
+ 	  if (tree_contains_chrecs (op1, NULL))
+diff --git a/gcc/tree-chrec.h b/gcc/tree-chrec.h
+index fcf41710d..cdc97d5d9 100644
+--- a/gcc/tree-chrec.h
++++ b/gcc/tree-chrec.h
+@@ -63,7 +63,7 @@ extern tree chrec_fold_plus (tree, tree, tree);
+ extern tree chrec_fold_minus (tree, tree, tree);
+ extern tree chrec_fold_multiply (tree, tree, tree);
+ extern tree chrec_convert (tree, tree, gimple *, bool = true, tree = NULL);
+-extern tree chrec_convert_rhs (tree, tree, gimple *);
++extern tree chrec_convert_rhs (tree, tree, gimple * = NULL);
+ extern tree chrec_convert_aggressive (tree, tree, bool *);
+ 
+ /* Operations.  */
+diff --git a/gcc/value-range.cc b/gcc/value-range.cc
+index 000bbcf89..a1dc10a24 100644
+--- a/gcc/value-range.cc
++++ b/gcc/value-range.cc
+@@ -656,6 +656,18 @@ irange::contains_p (tree cst) const
+ 
+   return false;
+ }
++bool
++irange::nonnegative_p () const
++{
++  return wi::ge_p (lower_bound (), 0, TYPE_SIGN (type ()));
++}
++
++bool
++irange::nonpositive_p () const
++{
++  return wi::le_p (upper_bound (), 0, TYPE_SIGN (type ()));
++}
++
+ 
+ 
+ /* Normalize addresses into constants.  */
+diff --git a/gcc/value-range.h b/gcc/value-range.h
+index d4cba22d5..2dc0907de 100644
+--- a/gcc/value-range.h
++++ b/gcc/value-range.h
+@@ -69,6 +69,8 @@ public:
+   bool varying_p () const;
+   bool singleton_p (tree *result = NULL) const;
+   bool contains_p (tree) const;
++  bool nonnegative_p () const;
++  bool nonpositive_p () const;
+ 
+   // In-place operators.
+   void union_ (const irange &);
+-- 
+2.33.0
+
diff --git a/0098-LoongArch-testsuite-Added-additional-vectorization-m.patch b/0098-LoongArch-testsuite-Added-additional-vectorization-m.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ea25625a1fc77ea57b0754cd837e4611f063c89c
--- /dev/null
+++ b/0098-LoongArch-testsuite-Added-additional-vectorization-m.patch
@@ -0,0 +1,157 @@
+From c8fa8efa3297ebced55da8a69cf44f314573be7c Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Fri, 5 Jan 2024 11:43:28 +0800
+Subject: [PATCH 098/188] LoongArch: testsuite:Added additional vectorization
+ "-mlasx" compilation option.
+
+In the LoongArch architecture, the reason for not adding the 128-bit
+vector-width-*hi* instruction template in the GCC back end is that it causes
+program performance loss, so we can only add the "-mlasx" compilation option
+to use 256-bit vectorization functions in test files.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.dg/vect/bb-slp-pattern-1.c: If you are testing on the
+	LoongArch architecture, you need to add the "-mlasx" compilation
+	option to generate vectorized code.
+	* gcc.dg/vect/slp-widen-mult-half.c: Dito.
+	* gcc.dg/vect/vect-widen-mult-const-s16.c: Dito.
+	* gcc.dg/vect/vect-widen-mult-const-u16.c: Dito.
+	* gcc.dg/vect/vect-widen-mult-half-u8.c: Dito.
+	* gcc.dg/vect/vect-widen-mult-half.c: Dito.
+	* gcc.dg/vect/vect-widen-mult-u16.c: Dito.
+	* gcc.dg/vect/vect-widen-mult-u8-s16-s32.c: Dito.
+	* gcc.dg/vect/vect-widen-mult-u8-u32.c: Dito.
+	* gcc.dg/vect/vect-widen-mult-u8.c: Dito.
+---
+ gcc/testsuite/gcc.dg/vect/bb-slp-pattern-1.c           | 1 +
+ gcc/testsuite/gcc.dg/vect/slp-widen-mult-half.c        | 1 +
+ gcc/testsuite/gcc.dg/vect/vect-widen-mult-const-s16.c  | 1 +
+ gcc/testsuite/gcc.dg/vect/vect-widen-mult-const-u16.c  | 1 +
+ gcc/testsuite/gcc.dg/vect/vect-widen-mult-half-u8.c    | 1 +
+ gcc/testsuite/gcc.dg/vect/vect-widen-mult-half.c       | 1 +
+ gcc/testsuite/gcc.dg/vect/vect-widen-mult-u16.c        | 1 +
+ gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8-s16-s32.c | 1 +
+ gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8-u32.c     | 1 +
+ gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8.c         | 1 +
+ 10 files changed, 10 insertions(+)
+
+diff --git a/gcc/testsuite/gcc.dg/vect/bb-slp-pattern-1.c b/gcc/testsuite/gcc.dg/vect/bb-slp-pattern-1.c
+index 47b1a4366..52ffca82a 100644
+--- a/gcc/testsuite/gcc.dg/vect/bb-slp-pattern-1.c
++++ b/gcc/testsuite/gcc.dg/vect/bb-slp-pattern-1.c
+@@ -1,4 +1,5 @@
+ /* { dg-require-effective-target vect_int } */
++/* { dg-additional-options "-mlasx" { target loongarch*-*-* } } */
+ 
+ #include 
+ #include "tree-vect.h"
+diff --git a/gcc/testsuite/gcc.dg/vect/slp-widen-mult-half.c b/gcc/testsuite/gcc.dg/vect/slp-widen-mult-half.c
+index e3bfee333..cd44e551f 100644
+--- a/gcc/testsuite/gcc.dg/vect/slp-widen-mult-half.c
++++ b/gcc/testsuite/gcc.dg/vect/slp-widen-mult-half.c
+@@ -1,6 +1,7 @@
+ /* Disabling epilogues until we find a better way to deal with scans.  */
+ /* { dg-additional-options "--param vect-epilogues-nomask=0" } */
+ /* { dg-require-effective-target vect_int } */
++/* { dg-additional-options "-mlasx" { target loongarch*-*-* } } */
+ 
+ #include "tree-vect.h"
+ 
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-const-s16.c b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-const-s16.c
+index 4c95dd201..082c758cb 100644
+--- a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-const-s16.c
++++ b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-const-s16.c
+@@ -2,6 +2,7 @@
+ /* { dg-additional-options "--param vect-epilogues-nomask=0" } */
+ /* { dg-require-effective-target vect_int } */
+ /* { dg-additional-options "-fno-ipa-icf" } */
++/* { dg-additional-options "-mlasx" { target loongarch*-*-*} } */
+ 
+ #include "tree-vect.h"
+ 
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-const-u16.c b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-const-u16.c
+index 4075f815c..a95e617ad 100644
+--- a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-const-u16.c
++++ b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-const-u16.c
+@@ -2,6 +2,7 @@
+ /* { dg-additional-options "--param vect-epilogues-nomask=0" } */
+ /* { dg-require-effective-target vect_int } */
+ /* { dg-additional-options "-fno-ipa-icf" } */
++/* { dg-additional-options "-mlasx" { target loongarch*-*-*} } */
+ 
+ #include "tree-vect.h"
+ 
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-half-u8.c b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-half-u8.c
+index c4ac88e18..14d96645a 100644
+--- a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-half-u8.c
++++ b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-half-u8.c
+@@ -2,6 +2,7 @@
+ /* { dg-additional-options "--param vect-epilogues-nomask=0" } */
+ /* { dg-require-effective-target vect_int } */
+ /* { dg-additional-options "-fno-ipa-icf" } */
++/* { dg-additional-options "-mlasx" { target loongarch*-*-*} } */
+ 
+ #include "tree-vect.h"
+ 
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-half.c b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-half.c
+index ebbf4f5e8..7901dae85 100644
+--- a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-half.c
++++ b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-half.c
+@@ -1,6 +1,7 @@
+ /* Disabling epilogues until we find a better way to deal with scans.  */
+ /* { dg-additional-options "--param vect-epilogues-nomask=0" } */
+ /* { dg-require-effective-target vect_int } */
++/* { dg-additional-options "-mlasx" { target loongarch*-*-*} } */
+ 
+ #include "tree-vect.h"
+ 
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u16.c b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u16.c
+index 2e28baae0..21b39953e 100644
+--- a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u16.c
++++ b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u16.c
+@@ -1,6 +1,7 @@
+ /* Disabling epilogues until we find a better way to deal with scans.  */
+ /* { dg-additional-options "--param vect-epilogues-nomask=0" } */
+ /* { dg-require-effective-target vect_int } */
++/* { dg-additional-options "-mlasx" { target loongarch*-*-*} } */
+ 
+ #include 
+ #include "tree-vect.h"
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8-s16-s32.c b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8-s16-s32.c
+index d277f0b2b..4827e11b2 100644
+--- a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8-s16-s32.c
++++ b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8-s16-s32.c
+@@ -1,6 +1,7 @@
+ /* Disabling epilogues until we find a better way to deal with scans.  */
+ /* { dg-additional-options "--param vect-epilogues-nomask=0" } */
+ /* { dg-require-effective-target vect_int } */
++/* { dg-additional-options "-mlasx" { target loongarch*-*-*} } */
+ 
+ #include 
+ #include "tree-vect.h"
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8-u32.c b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8-u32.c
+index f50358802..87eb9e0cb 100644
+--- a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8-u32.c
++++ b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8-u32.c
+@@ -1,5 +1,6 @@
+ /* { dg-additional-options "--param vect-epilogues-nomask=0" } */
+ /* { dg-require-effective-target vect_int } */
++/* { dg-additional-options "-mlasx" { target loongarch*-*-* } } */
+ 
+ #include 
+ #include "tree-vect.h"
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8.c b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8.c
+index 03d137941..507d30c35 100644
+--- a/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8.c
++++ b/gcc/testsuite/gcc.dg/vect/vect-widen-mult-u8.c
+@@ -1,5 +1,6 @@
+ /* { dg-additional-options "--param vect-epilogues-nomask=0" } */
+ /* { dg-require-effective-target vect_int } */
++/* { dg-additional-options "-mlasx" { target loongarch*-*-*} } */
+ 
+ #include 
+ #include "tree-vect.h"
+-- 
+2.43.0
+
diff --git a/0099-Enable-Transposed-SLP.patch b/0099-Enable-Transposed-SLP.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b4e8b24b669790890da83fa4966a18efb18f90ae
--- /dev/null
+++ b/0099-Enable-Transposed-SLP.patch
@@ -0,0 +1,5624 @@
+From 0dd3b8532f35486bd5db2c71342c8dfed4c0893a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=83=91=E6=99=A8=E5=8D=89?= 
+Date: Thu, 25 Jul 2024 17:25:23 +0800
+Subject: [PATCH] Enable Transposed SLP.
+
+---
+ gcc/common.opt                          |    4 +
+ gcc/testsuite/gcc.dg/vect/transpose-1.c |   53 +
+ gcc/testsuite/gcc.dg/vect/transpose-2.c |   50 +
+ gcc/testsuite/gcc.dg/vect/transpose-3.c |   54 +
+ gcc/testsuite/gcc.dg/vect/transpose-4.c |   53 +
+ gcc/testsuite/gcc.dg/vect/transpose-5.c |   74 ++
+ gcc/testsuite/gcc.dg/vect/transpose-6.c |   67 +
+ gcc/testsuite/gcc.dg/vect/transpose-7.c |   53 +
+ gcc/testsuite/gcc.dg/vect/transpose-8.c |   53 +
+ gcc/testsuite/gcc.dg/vect/vect.exp      |    7 +
+ gcc/tree-loop-distribution.cc           | 1464 ++++++++++++++++++++-
+ gcc/tree-vect-data-refs.cc              |  237 ++++
+ gcc/tree-vect-loop.cc                   |   42 +-
+ gcc/tree-vect-patterns.cc               |    4 +-
+ gcc/tree-vect-slp.cc                    | 1553 ++++++++++++++++++++---
+ gcc/tree-vect-stmts.cc                  |  973 +++++++++++++-
+ gcc/tree-vectorizer.h                   |   96 +-
+ 17 files changed, 4648 insertions(+), 189 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/vect/transpose-1.c
+ create mode 100644 gcc/testsuite/gcc.dg/vect/transpose-2.c
+ create mode 100644 gcc/testsuite/gcc.dg/vect/transpose-3.c
+ create mode 100644 gcc/testsuite/gcc.dg/vect/transpose-4.c
+ create mode 100644 gcc/testsuite/gcc.dg/vect/transpose-5.c
+ create mode 100644 gcc/testsuite/gcc.dg/vect/transpose-6.c
+ create mode 100644 gcc/testsuite/gcc.dg/vect/transpose-7.c
+ create mode 100644 gcc/testsuite/gcc.dg/vect/transpose-8.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index b18f0b944..5958c4e0b 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -3221,6 +3221,10 @@ ftree-slp-vectorize
+ Common Var(flag_tree_slp_vectorize) Optimization EnabledBy(ftree-vectorize)
+ Enable basic block vectorization (SLP) on trees.
+ 
++ftree-slp-transpose-vectorize
++Common Var(flag_tree_slp_transpose_vectorize) Optimization Init(0)
++Enable basic block vectorization (SLP) for transposed stores and loads on trees.
++
+ fvect-cost-model=
+ Common Joined RejectNegative Enum(vect_cost_model) Var(flag_vect_cost_model) Init(VECT_COST_MODEL_DEFAULT) Optimization
+ -fvect-cost-model=[unlimited|dynamic|cheap|very-cheap]	Specifies the cost model for vectorization.
+diff --git a/gcc/testsuite/gcc.dg/vect/transpose-1.c b/gcc/testsuite/gcc.dg/vect/transpose-1.c
+new file mode 100644
+index 000000000..8237a8b9e
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/transpose-1.c
+@@ -0,0 +1,53 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-require-effective-target vect_int } */
++#include 
++#include 
++#include "tree-vect.h"
++
++#define N 4
++#define M 256
++
++int foo (unsigned char *pix1, int i_pix1, unsigned char *pix2, int i_pix2)
++{
++  int i = 0;
++  int sum = 0;
++  unsigned c0[N], c1[N], c2[N], c3[N], c4[N], c5[N], c6[N], c7[N];
++  for (i = 0; i < N; i++, pix1 += i_pix1, pix2 += i_pix2)
++    {
++      c0[i] = pix1[0] - pix2[0];
++      c1[i] = pix1[1] - pix2[1];
++      c2[i] = pix1[2] - pix2[2];
++      c3[i] = pix1[3] - pix2[3];
++      c4[i] = pix1[4] - pix2[4];
++      c5[i] = pix1[5] - pix2[5];
++      c6[i] = pix1[6] - pix2[6];
++      c7[i] = pix1[7] - pix2[7];
++    }
++  for (int i = 0; i < N; i++)
++    {
++      sum += c0[i] + c1[i] + c2[i] + c3[i] + c4[i] + c5[i] + c6[i] + c7[i];
++    }
++  return sum;
++}
++
++int main (int argc, const char* argv[])
++{
++  unsigned char input1[M];
++  unsigned char input2[M];
++  int i1 = 16;
++  int i2 = 8;
++  check_vect ();
++  for (int i = 0; i < M; i++)
++    {
++	input1[i] = i * 2;
++	input2[i] = i;
++    }
++  int sum = foo (input1, i1, input2, i2);
++  if (sum != 1264)
++    {
++      abort ();
++    }
++  return 0;
++}
++
++/* { dg-final { scan-tree-dump "vectorized using transposed version" "slp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/vect/transpose-2.c b/gcc/testsuite/gcc.dg/vect/transpose-2.c
+new file mode 100644
+index 000000000..fdf4dbd96
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/transpose-2.c
+@@ -0,0 +1,50 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-additional-options "-fno-tree-loop-vectorize -fno-tree-dse" } */
++/* { dg-require-effective-target vect_int } */
++#include 
++#include 
++#include "tree-vect.h"
++
++#define N 8
++#define M 256
++
++int foo (unsigned char *pix1, int i_pix1, unsigned char *pix2, int i_pix2)
++{
++  int i = 0;
++  int sum = 0;
++  unsigned short c0[N], c1[N], c2[N], c3[N], c4[N], c5[N], c6[N], c7[N];
++  for (i = 0; i < N; i++, pix1 += i_pix1, pix2 += i_pix2)
++    {
++      c0[i] = pix1[0] - pix2[0];
++      c1[i] = pix1[1] - pix2[1];
++      c2[i] = pix1[2] - pix2[2];
++      c3[i] = pix1[3] - pix2[3];
++    }
++  for (int i = 0; i < N; i++)
++    {
++      sum += c0[i] + c1[i] + c2[i] + c3[i];
++    }
++  return sum;
++}
++
++int main (int argc, const char* argv[])
++{
++  unsigned char input1[M];
++  unsigned char input2[M];
++  int i1 = 5;
++  int i2 = 4;
++  check_vect ();
++  for (int i = 0; i < M; i++)
++    {
++	input1[i] = i * 4;
++	input2[i] = i * 2;
++    }
++  int sum = foo (input1, i1, input2, i2);
++  if (sum != 1440)
++    {
++      abort ();
++    }
++  return 0;
++}
++
++/* { dg-final { scan-tree-dump "vectorized using transposed version" "slp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/vect/transpose-3.c b/gcc/testsuite/gcc.dg/vect/transpose-3.c
+new file mode 100644
+index 000000000..e492e3717
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/transpose-3.c
+@@ -0,0 +1,54 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-additional-options "-fno-tree-loop-vectorize -fno-tree-dse -fno-tree-fre" } */
++/* { dg-require-effective-target vect_int } */
++#include 
++#include 
++#include "tree-vect.h"
++
++#define N 4
++#define M 256
++
++int foo (unsigned short *pix1, int i_pix1, unsigned short *pix2, int i_pix2)
++{
++  int i = 0;
++  int sum = 0;
++  unsigned c0[N], c1[N], c2[N], c3[N], c4[N], c5[N], c6[N], c7[N];
++  for (i = 0; i < N; i++, pix1 += i_pix1, pix2 += i_pix2)
++    {
++      c0[i] = pix1[0] - pix2[0];
++      c1[i] = pix1[1] - pix2[1];
++      c2[i] = pix1[2] - pix2[2];
++      c3[i] = pix1[3] - pix2[3];
++      c4[i] = pix1[4] - pix2[4];
++      c5[i] = pix1[5] - pix2[5];
++      c6[i] = pix1[6] - pix2[6];
++      c7[i] = pix1[7] - pix2[7];
++    }
++  for (int i = 0; i < N; i++)
++     {
++      sum += c0[i] + c1[i] + c2[i] + c3[i] + c4[i] + c5[i] + c6[i] + c7[i];
++    }
++  return sum;
++}
++
++int main (int argc, const char* argv[])
++{
++  unsigned short input1[M];
++  unsigned short input2[M];
++  int i1 = 8;
++  int i2 = 4;
++  check_vect ();
++  for (int i = 0; i < M; i++)
++    {
++	input1[i] = i * 4;
++	input2[i] = i;
++    }
++  int sum = foo (input1, i1, input2, i2);
++  if (sum != 1680)
++    {
++      abort ();
++    }
++  return 0;
++}
++
++/* { dg-final { scan-tree-dump "vectorized using transposed version" "slp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/vect/transpose-4.c b/gcc/testsuite/gcc.dg/vect/transpose-4.c
+new file mode 100644
+index 000000000..0b4adea9b
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/transpose-4.c
+@@ -0,0 +1,53 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-require-effective-target vect_int } */
++#include 
++#include 
++#include "tree-vect.h"
++
++#define N 4
++#define M 256
++
++int foo (unsigned *pix1, int i_pix1, unsigned *pix2, int i_pix2)
++{
++  int i = 0;
++  int sum = 0;
++  unsigned c0[N], c1[N], c2[N], c3[N], c4[N], c5[N], c6[N], c7[N];
++  for (i = 0; i < N; i++, pix1 += i_pix1, pix2 += i_pix2)
++    {
++      c0[i] = pix1[0] - pix2[0];
++      c1[i] = pix1[1] - pix2[1];
++      c2[i] = pix1[2] - pix2[2];
++      c3[i] = pix1[3] - pix2[3];
++      c4[i] = pix1[4] - pix2[4];
++      c5[i] = pix1[5] - pix2[5];
++      c6[i] = pix1[6] - pix2[6];
++      c7[i] = pix1[7] - pix2[7];
++    }
++  for (int i = 0; i < N; i++)
++     {
++      sum += c0[i] + c1[i] + c2[i] + c3[i] + c4[i] + c5[i] + c6[i] + c7[i];
++    }
++  return sum;
++}
++
++int main (int argc, const char* argv[])
++{
++  unsigned input1[M];
++  unsigned input2[M];
++  int i1 = 12;
++  int i2 = 6;
++  check_vect ();
++  for (int i = 0; i < M; i++)
++    {
++	input1[i] = i * 7;
++	input2[i] = i * 3;
++    }
++  int sum = foo (input1, i1, input2, i2);
++  if (sum != 3616)
++    {
++      abort ();
++    }
++  return 0;
++}
++
++/* { dg-final { scan-tree-dump "vectorized using transposed version" "slp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/vect/transpose-5.c b/gcc/testsuite/gcc.dg/vect/transpose-5.c
+new file mode 100644
+index 000000000..040dedf1b
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/transpose-5.c
+@@ -0,0 +1,74 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-additional-options "-fno-tree-dse -fno-tree-fre" } */
++/* { dg-require-effective-target vect_int } */
++#include 
++#include 
++#include 
++#include "tree-vect.h"
++
++#define N 4
++#define M 256
++#define eps 1e-8
++
++double foo (unsigned char *pix1, int i_pix1, unsigned char *pix2, int i_pix2)
++{
++  unsigned a0[N];
++  unsigned a1[N];
++  unsigned a2[N];
++  unsigned a3[N];
++
++  int b0[N];
++  int b1[N];
++  int b2[N];
++  int b3[N];
++
++  for (int i = 0; i < N; i++, pix1 += i_pix1, pix2 += i_pix2)
++    {
++      a0[i] = (pix1[0] - pix2[0]) + ((pix1[4] + pix2[4]) << 16);
++      a1[i] = (pix1[1] - pix2[1]) + ((pix1[5] + pix2[5]) << 16);
++      a2[i] = (pix1[2] - pix2[2]) + ((pix1[6] + pix2[6]) << 16);
++      a3[i] = (pix1[3] - pix2[3]) + ((pix1[7] + pix2[7]) << 16);
++    }
++
++  for (int i = 0; i < N; i++, pix1 += i_pix1, pix2 += i_pix2)
++    {
++      b0[i] = (pix1[0] - pix2[0]) + (pix1[4] + pix2[4]);
++      b1[i] = (pix1[1] - pix2[1]) + (pix1[5] + pix2[5]);
++      b2[i] = (pix1[2] - pix2[2]) + (pix1[6] + pix2[6]);
++      b3[i] = (pix1[3] - pix2[3]) + (pix1[7] + pix2[7]);
++    }
++
++  double sum = 0;
++  for (int i = 0; i < N; i++)
++    {
++      sum += a0[i] + a1[i] + a2[i] + a3[i] + b0[i] + b1[i] + b2[i] + b3[i];
++    }
++  return sum;
++}
++
++int main (int argc, const char* argv[])
++{
++  unsigned char input1[M];
++  unsigned char input2[M];
++  int i1 = 8;
++  int i2 = 3;
++  unsigned char m = 2;
++  unsigned short n = 12;
++  float t = 3.0;
++  double k = 4.2;
++  check_vect ();
++  for (int i = 0; i < M; i++)
++    {
++	input1[i] = i * 6;
++	input2[i] = i * 3;
++    }
++  double sum = foo (input1, i1, input2, i2);
++  if (fabs (sum - 78648144) > eps)
++    {
++      abort ();
++    }
++  return 0;
++}
++
++/* { dg-final { scan-tree-dump "vectorized using transposed version" "slp1" } } */
++/* { dg-final { scan-tree-dump-times "vectorizable_store for slp transpose" 2 "slp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/vect/transpose-6.c b/gcc/testsuite/gcc.dg/vect/transpose-6.c
+new file mode 100644
+index 000000000..3e134ac02
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/transpose-6.c
+@@ -0,0 +1,67 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-require-effective-target vect_int } */
++/* { dg-require-effective-target vect_float } */
++#include 
++#include 
++#include 
++#include "tree-vect.h"
++
++#define N 4
++#define M 256
++#define eps 1e-8
++
++float foo (unsigned char *pix1, int i_pix1, unsigned char *pix2, int i_pix2)
++{
++  unsigned a0[N];
++  unsigned a1[N];
++  unsigned a2[N];
++  unsigned a3[N];
++
++  float c0[N];
++  float c1[N];
++  float c2[N];
++  float c3[N];
++
++  for (int i = 0; i < N; i++, pix1 += i_pix1, pix2 += i_pix2)
++    {
++      a0[i] = (pix1[0] - pix2[0]) + ((pix1[4] - pix2[4]) << 16);
++      a1[i] = (pix1[1] - pix2[1]) + ((pix1[5] - pix2[5]) << 16);
++      a2[i] = (pix1[2] - pix2[2]) + ((pix1[6] - pix2[6]) << 16);
++      a3[i] = (pix1[3] - pix2[3]) + ((pix1[7] - pix2[7]) << 16);
++
++      c0[i] = (pix1[0] * pix2[0]) + (pix1[4] * pix2[4]);
++      c1[i] = (pix1[1] * pix2[1]) + (pix1[5] * pix2[5]);
++      c2[i] = (pix1[2] * pix2[2]) + (pix1[6] * pix2[6]);
++      c3[i] = (pix1[3] * pix2[3]) + (pix1[7] * pix2[7]);
++    }
++
++  float sum = 0;
++  for (int i = 0; i < N; i++)
++    {
++      sum += a0[i] + a1[i] + a2[i] + a3[i] + c0[i] + c1[i] + c2[i] + c3[i];
++    }
++  return sum;
++}
++
++int main (int argc, const char* argv[])
++{
++  unsigned char input1[M];
++  unsigned char input2[M];
++  int i1 = 18;
++  int i2 = 6;
++  check_vect ();
++  for (int i = 0; i < M; i++)
++    {
++	input1[i] = i * 4;
++	input2[i] = i * 2;
++    }
++  float sum = foo (input1, i1, input2, i2);
++  if (fabs (sum - 106041168) > eps) 
++    {
++      abort ();
++    }
++  return 0;
++}
++
++/* { dg-final { scan-tree-dump "vectorized using transposed version" "slp1" } } */
++/* { dg-final { scan-tree-dump-times "vectorizable_store for slp transpose" 2 "slp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/vect/transpose-7.c b/gcc/testsuite/gcc.dg/vect/transpose-7.c
+new file mode 100644
+index 000000000..8ba1b1b6d
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/transpose-7.c
+@@ -0,0 +1,53 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-additional-options "-fno-tree-loop-vectorize -fno-tree-dse" } */
++/* { dg-require-effective-target vect_int } */
++#include 
++#include 
++#include "tree-vect.h"
++
++#define N 16
++#define M 256
++
++int foo (unsigned char *pix1, int i_pix1, unsigned char *pix2, int i_pix2)
++{
++  int i = 0;
++  int sum = 0;
++  unsigned char c0[N], c1[N];
++  for (int i = 0; i < N/2; i++, pix1 += i_pix1, pix2 += i_pix2)
++    {
++      c0[i] = pix1[0] - pix2[0];
++      c1[i] = pix1[1] - pix2[1];
++    }
++  for (int i = N/2; i < N; i++, pix1 += i_pix1, pix2 += i_pix2)
++    {
++      c0[i] = pix1[0] - pix2[0];
++      c1[i] = pix1[1] - pix2[1];
++   }
++  for (int i = 0; i < N; i++)
++    {
++      sum += c0[i] + c1[i];
++    }
++  return sum;
++}
++
++int main (int argc, const char* argv[])
++{
++  unsigned char input1[M];
++  unsigned char input2[M];
++  int i1 = 6;
++  int i2 = 4;
++  check_vect ();
++  for (int i = 0; i < M; i++)
++    {
++	input1[i] = i * 5;
++	input2[i] = i * 2;
++    }
++  int sum = foo (input1, i1, input2, i2);
++  if (sum != 3280)
++    {
++      abort ();
++    }
++  return 0;
++}
++
++/* { dg-final { scan-tree-dump "vectorized using transposed version" "slp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/vect/transpose-8.c b/gcc/testsuite/gcc.dg/vect/transpose-8.c
+new file mode 100644
+index 000000000..a154f012a
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/transpose-8.c
+@@ -0,0 +1,53 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-additional-options "-fno-tree-loop-vectorize" } */
++/* { dg-require-effective-target vect_int } */
++#include 
++#include 
++#include "tree-vect.h"
++
++#define N 32
++#define M 256
++
++int foo (unsigned char *pix1, int i_pix1, unsigned char *pix2, int i_pix2)
++{
++  int i = 0;
++  int sum = 0;
++  unsigned char c0[N], c1[N];
++  for (int i = 0; i < N/2; i++, pix1 += i_pix1, pix2 += i_pix2)
++    {
++      c0[i] = pix1[0] - pix2[0];
++      c1[i] = pix1[1] - pix2[1];
++    }
++  for (int i = N/2; i < N; i++, pix1 += i_pix1, pix2 += i_pix2)
++    {
++      c0[i] = pix1[0] - pix2[0];
++      c1[i] = pix1[1] - pix2[1];
++   }
++  for (int i = 0; i < N; i++)
++    {
++      sum += c0[i] + c1[i];
++    }
++  return sum;
++}
++
++int main (int argc, const char* argv[])
++{
++  unsigned char input1[M];
++  unsigned char input2[M];
++  int i1 = 6;
++  int i2 = 4;
++  check_vect ();
++  for (int i = 0; i < M; i++)
++    {
++	input1[i] = i * 5;
++	input2[i] = i * 2;
++    }
++  int sum = foo (input1, i1, input2, i2);
++  if (sum != 7584)
++    {
++      abort ();
++    }
++  return 0;
++}
++
++/* { dg-final { scan-tree-dump "vectorized using transposed version" "slp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/vect/vect.exp b/gcc/testsuite/gcc.dg/vect/vect.exp
+index dcaef1e0a..ae5212411 100644
+--- a/gcc/testsuite/gcc.dg/vect/vect.exp
++++ b/gcc/testsuite/gcc.dg/vect/vect.exp
+@@ -117,6 +117,13 @@ et-dg-runtest dg-runtest [lsort \
+ 	[glob -nocomplain $srcdir/$subdir/no-vfa-*.\[cS\]]] \
+ 	"" $DEFAULT_VECTCFLAGS
+ 
++# -ftree-slp-transpose-vectorize SLP tests
++set VECT_SLP_CFLAGS $SAVED_VECT_SLP_CFLAGS
++lappend VECT_SLP_CFLAGS "-ftree-slp-transpose-vectorize"
++et-dg-runtest dg-runtest [lsort \
++	[glob -nocomplain $srcdir/$subdir/transpose-*.\[cS\]]] \
++	"" "-ftree-slp-transpose-vectorize -fdump-tree-slp-details -O3"
++
+ # -ffast-math tests
+ set DEFAULT_VECTCFLAGS $SAVED_DEFAULT_VECTCFLAGS
+ lappend DEFAULT_VECTCFLAGS "-ffast-math"
+diff --git a/gcc/tree-loop-distribution.cc b/gcc/tree-loop-distribution.cc
+index 606eb05e6..8d118e987 100644
+--- a/gcc/tree-loop-distribution.cc
++++ b/gcc/tree-loop-distribution.cc
+@@ -36,6 +36,47 @@ along with GCC; see the file COPYING3.  If not see
+    |   D(I) = A(I-1)*E
+    |ENDDO
+ 
++   If an unvectorizable loop has grouped loads, and calculations from grouped
++   loads are isomorphic, build temp arrays using stmts where isomorphic
++   calculations end.  Afer distribution, the partition built from temp
++   arrays can be vectorized in pass SLP after loop unrolling.  For example,
++
++   |DO I = 1, N
++   |    A = FOO (ARG_1);
++   |    B = FOO (ARG_2);
++   |    C = BAR_0 (A);
++   |    D = BAR_1 (B);
++   |ENDDO
++
++   is transformed to
++
++   |DO I = 1, N
++   |    J = FOO (ARG_1);
++   |    K = FOO (ARG_2);
++   |    X[I] = J;
++   |    Y[I] = K;
++   |    A = X[I];
++   |    B = Y[I];
++   |    C = BAR_0 (A);
++   |    D = BAR_1 (B);
++   |ENDDO
++
++   and is then distributed to
++
++   |DO I = 1, N
++   |    J = FOO (ARG_1);
++   |    K = FOO (ARG_2);
++   |    X[I] = J;
++   |    Y[I] = K;
++   |ENDDO
++
++   |DO I = 1, N
++   |    A = X[I];
++   |    B = Y[I];
++   |    C = BAR_0 (A);
++   |    D = BAR_1 (B);
++   |ENDDO
++
+    Loop distribution is the dual of loop fusion.  It separates statements
+    of a loop (or loop nest) into multiple loops (or loop nests) with the
+    same loop header.  The major goal is to separate statements which may
+@@ -44,7 +85,9 @@ along with GCC; see the file COPYING3.  If not see
+ 
+      1) Seed partitions with specific type statements.  For now we support
+ 	two types seed statements: statement defining variable used outside
+-	of loop; statement storing to memory.
++	of loop; statement storing to memory.  Moreover, for unvectorizable
++	loops, we try to find isomorphic stmts from grouped load and build
++	temp arrays as new seed statements.
+      2) Build reduced dependence graph (RDG) for loop to be distributed.
+ 	The vertices (RDG:V) model all statements in the loop and the edges
+ 	(RDG:E) model flow and control dependencies between statements.
+@@ -90,6 +133,8 @@ along with GCC; see the file COPYING3.  If not see
+ 	data reuse.  */
+ 
+ #include "config.h"
++#define INCLUDE_MAP
++#define INCLUDE_ALGORITHM
+ #include "system.h"
+ #include "coretypes.h"
+ #include "backend.h"
+@@ -115,6 +160,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "tree-vectorizer.h"
+ #include "tree-eh.h"
+ #include "gimple-fold.h"
++#include "optabs-tree.h"
+ #include "tree-affine.h"
+ #include "intl.h"
+ #include "rtl.h"
+@@ -188,6 +234,52 @@ struct rdg_vertex
+ #define RDG_MEM_WRITE_STMT(RDG, I) RDGV_HAS_MEM_WRITE (&(RDG->vertices[I]))
+ #define RDG_MEM_READS_STMT(RDG, I) RDGV_HAS_MEM_READS (&(RDG->vertices[I]))
+ 
++/* Results of isomorphic group analysis.  */
++#define UNINITIALIZED	(0)
++#define ISOMORPHIC	(1)
++#define HETEROGENEOUS	(1 << 1)
++#define UNCERTAIN	(1 << 2)
++
++/* Information of a stmt while analyzing isomorphic use in group.  */
++
++typedef struct _group_info
++{
++  gimple *stmt;
++
++  /* True if stmt can be a cut point.  */
++  bool cut_point;
++
++  /* For use_stmt with two rhses, one of which is the lhs of stmt.
++     If the other is unknown to be isomorphic, mark it uncertain.  */
++  bool uncertain;
++
++  /* Searching of isomorphic stmt reaches heterogeneous groups or reaches
++     MEM stmts.  */
++  bool done;
++
++  _group_info ()
++    {
++      stmt = NULL;
++      cut_point = false;
++      uncertain = false;
++      done = false;
++    }
++} *group_info;
++
++/* PAIR of cut points and corresponding profit.  */
++typedef std::pair *, int> stmts_profit;
++
++/* MAP of vector factor VF and corresponding stmts_profit PAIR.  */
++typedef std::map vf_stmts_profit_map;
++
++/* PAIR of group_num and iteration_num.  We consider rhses from the same
++   group and interation are isomorphic.  */
++typedef std::pair group_iteration;
++
++/* An isomorphic stmt is detetmined by lhs of use_stmt, group_num and
++   the iteration_num when we insert this stmt to this map.  */
++typedef std::map isomer_stmt_lhs;
++
+ /* Data dependence type.  */
+ 
+ enum rdg_dep_type
+@@ -600,13 +692,14 @@ class loop_distribution
+   /* Returns true when PARTITION1 and PARTITION2 access the same memory
+      object in RDG.  */
+   bool share_memory_accesses (struct graph *rdg,
+-			      partition *partition1, partition *partition2);
++			      partition *partition1, partition *partition2,
++			      hash_set *excluded_arrays);
+ 
+   /* For each seed statement in STARTING_STMTS, this function builds
+      partition for it by adding depended statements according to RDG.
+      All partitions are recorded in PARTITIONS.  */
+   void rdg_build_partitions (struct graph *rdg,
+-			     vec starting_stmts,
++			     vec *starting_stmts,
+ 			     vec *partitions);
+ 
+   /* Compute partition dependence created by the data references in DRS1
+@@ -643,15 +736,50 @@ class loop_distribution
+ 
+   /* Fuse PARTITIONS of LOOP if necessary before finalizing distribution.
+      ALIAS_DDRS contains ddrs which need runtime alias check.  */
+-  void finalize_partitions (class loop *loop, vec
+-			    *partitions, vec *alias_ddrs);
++  void finalize_partitions (class loop *loop,
++			    vec *partitions,
++			    vec *alias_ddrs, bitmap producers);
++
++  /* Analyze loop form and if it's vectorizable to decide if we need to
++     insert temp arrays to distribute it.  */
++  bool may_insert_temp_arrays (loop_p loop, struct graph *&rdg,
++			       control_dependences *cd);
++
++  /* Reset gimple_uid of GIMPLE_DEBUG and GIMPLE_LABEL to -1.  */
++  void reset_gimple_uid (loop_p loop);
++
++  bool check_loop_vectorizable (loop_p loop);
++
++  inline void rebuild_rdg (loop_p loop, struct graph *&rdg,
++			   control_dependences *cd);
++
++  /* If loop is not distributed, remove inserted temp arrays.  */
++  void remove_insertion (loop_p loop, struct graph *flow_only_rdg,
++			 bitmap producers, struct partition *partition);
++
++  /* Insert temp arrays if isomorphic computation exists.  Temp arrays will be
++     regarded as SEED_STMTS for building partitions in succeeding processes.  */
++  bool insert_temp_arrays (loop_p loop, vec seed_stmts,
++			   hash_set *tmp_array_vars, bitmap producers);
++
++  void build_producers (loop_p loop, bitmap producers,
++			vec &transformed);
++
++  void do_insertion (loop_p loop, struct graph *flow_only_rdg, tree iv,
++		     bitmap cut_points, hash_set  *tmp_array_vars,
++		     bitmap producers);
++
++  /* Fuse PARTITIONS built from inserted temp arrays into one partition,
++     fuse the rest into another.  */
++  void merge_remaining_partitions (vec *partitions,
++				   bitmap producers);
+ 
+   /* Distributes the code from LOOP in such a way that producer statements
+      are placed before consumer statements.  Tries to separate only the
+      statements from STMTS into separate loops.  Returns the number of
+      distributed loops.  Set NB_CALLS to number of generated builtin calls.
+      Set *DESTROY_P to whether LOOP needs to be destroyed.  */
+-  int distribute_loop (class loop *loop, const vec &stmts,
++  int distribute_loop (class loop *loop, vec &stmts,
+ 		       control_dependences *cd, int *nb_calls, bool *destroy_p,
+ 		       bool only_patterns_p);
+ 
+@@ -1893,7 +2021,8 @@ loop_distribution::classify_partition (loop_p loop,
+ 
+ bool
+ loop_distribution::share_memory_accesses (struct graph *rdg,
+-		       partition *partition1, partition *partition2)
++		       partition *partition1, partition *partition2,
++		       hash_set  *excluded_arrays)
+ {
+   unsigned i, j;
+   bitmap_iterator bi, bj;
+@@ -1927,7 +2056,10 @@ loop_distribution::share_memory_accesses (struct graph *rdg,
+ 	  if (operand_equal_p (DR_BASE_ADDRESS (dr1), DR_BASE_ADDRESS (dr2), 0)
+ 	      && operand_equal_p (DR_OFFSET (dr1), DR_OFFSET (dr2), 0)
+ 	      && operand_equal_p (DR_INIT (dr1), DR_INIT (dr2), 0)
+-	      && operand_equal_p (DR_STEP (dr1), DR_STEP (dr2), 0))
++	      && operand_equal_p (DR_STEP (dr1), DR_STEP (dr2), 0)
++	      /* An exception, if PARTITION1 and PARTITION2 contain the
++		 temp array we inserted, do not merge them.  */
++	      && !excluded_arrays->contains (DR_REF (dr1)))
+ 	    return true;
+ 	}
+     }
+@@ -1941,14 +2073,14 @@ loop_distribution::share_memory_accesses (struct graph *rdg,
+ 
+ void
+ loop_distribution::rdg_build_partitions (struct graph *rdg,
+-					 vec starting_stmts,
++					 vec *starting_stmts,
+ 					 vec *partitions)
+ {
+   auto_bitmap processed;
+   int i;
+   gimple *stmt;
+ 
+-  FOR_EACH_VEC_ELT (starting_stmts, i, stmt)
++  FOR_EACH_VEC_ELT (*starting_stmts, i, stmt)
+     {
+       int v = rdg_vertex_for_stmt (rdg, stmt);
+ 
+@@ -2912,13 +3044,47 @@ fuse_memset_builtins (vec *partitions)
+     }
+ }
+ 
++void
++loop_distribution::merge_remaining_partitions
++			(vec *partitions,
++			 bitmap producers)
++{
++  struct partition *partition = NULL;
++  struct partition *p1 = NULL, *p2 = NULL;
++  for (unsigned i = 0; partitions->iterate (i, &partition); i++)
++    {
++      if (bitmap_intersect_p (producers, partition->stmts))
++	{
++	  if (p1 == NULL)
++	    {
++	      p1 = partition;
++	      continue;
++	    }
++	  partition_merge_into (NULL, p1, partition, FUSE_FINALIZE);
++	}
++      else
++	{
++	  if (p2 == NULL)
++	    {
++	      p2 = partition;
++	      continue;
++	    }
++	  partition_merge_into (NULL, p2, partition, FUSE_FINALIZE);
++	}
++      partitions->unordered_remove (i);
++      partition_free (partition);
++      i--;
++    }
++}
++
+ void
+ loop_distribution::finalize_partitions (class loop *loop,
+ 					vec *partitions,
+-					vec *alias_ddrs)
++					vec *alias_ddrs,
++					bitmap producers)
+ {
+   unsigned i;
+-  struct partition *partition, *a;
++  struct partition *partition;
+ 
+   if (partitions->length () == 1
+       || alias_ddrs->length () > 0)
+@@ -2950,13 +3116,7 @@ loop_distribution::finalize_partitions (class loop *loop,
+       || (loop->inner == NULL
+ 	  && i >= NUM_PARTITION_THRESHOLD && num_normal > num_builtin))
+     {
+-      a = (*partitions)[0];
+-      for (i = 1; partitions->iterate (i, &partition); ++i)
+-	{
+-	  partition_merge_into (NULL, a, partition, FUSE_FINALIZE);
+-	  partition_free (partition);
+-	}
+-      partitions->truncate (1);
++      merge_remaining_partitions (partitions, producers);
+     }
+ 
+   /* Fuse memset builtins if possible.  */
+@@ -2964,6 +3124,1216 @@ loop_distribution::finalize_partitions (class loop *loop,
+     fuse_memset_builtins (partitions);
+ }
+ 
++/* Gimple uids of GIMPLE_DEBUG and GIMPLE_LABEL were changed during function
++   vect_analyze_loop, reset them to -1.  */
++
++void
++loop_distribution::reset_gimple_uid (loop_p loop)
++{
++  basic_block *bbs = get_loop_body_in_custom_order (loop, this,
++						    bb_top_order_cmp_r);
++  for (int i = 0; i < int (loop->num_nodes); i++)
++    {
++      basic_block bb = bbs[i];
++      for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
++	   gsi_next (&gsi))
++	{
++	  gimple *stmt = gsi_stmt (gsi);
++	  if (is_gimple_debug (stmt) || gimple_code (stmt) == GIMPLE_LABEL)
++	    gimple_set_uid (stmt, -1);
++	}
++    }
++  free (bbs);
++}
++
++bool
++loop_distribution::check_loop_vectorizable (loop_p loop)
++{
++  vec_info_shared shared;
++  vect_analyze_loop (loop, &shared, true);
++  loop_vec_info vinfo = loop_vec_info_for_loop (loop);
++  reset_gimple_uid (loop);
++  if (vinfo == NULL)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file,
++		 "Loop %d no temp array insertion: bad data access pattern,"
++		 " unable to generate loop_vinfo.\n", loop->num);
++      return false;
++    }
++  if (vinfo->vectorizable)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Loop %d no temp array insertion: original loop"
++			    " can be vectorized without distribution.\n",
++			    loop->num);
++      delete vinfo;
++      loop->aux = NULL;
++      return false;
++    }
++  if (vinfo->grouped_loads.length () == 0)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Loop %d no temp array insertion: original loop"
++			    " has no grouped loads.\n" , loop->num);
++      delete vinfo;
++      loop->aux = NULL;
++      return false;
++    }
++  return true;
++}
++
++inline void
++loop_distribution::rebuild_rdg (loop_p loop, struct graph *&rdg,
++				control_dependences *cd)
++{
++  free_rdg (rdg);
++  rdg = build_rdg (loop, cd);
++  gcc_checking_assert (rdg != NULL);
++}
++
++bool
++loop_distribution::may_insert_temp_arrays (loop_p loop, struct graph *&rdg,
++					   control_dependences *cd)
++{
++  if (!(flag_tree_slp_transpose_vectorize && flag_tree_loop_vectorize))
++    return false;
++
++  /* Only loops with two basic blocks HEADER and LATCH are supported.  HEADER
++     is the main body of a LOOP and LATCH is the basic block that controls the
++     LOOP execution.  Size of temp array is determined by loop execution time,
++     so it must be a const.  */
++  tree loop_extent = number_of_latch_executions (loop);
++  if (loop->inner != NULL || loop->num_nodes > 2
++      || TREE_CODE (loop_extent) != INTEGER_CST)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Loop %d: no temp array insertion: bad loop"
++			    " form.\n", loop->num);
++      return false;
++    }
++
++  if (loop->dont_vectorize)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Loop %d: no temp array insertion: this loop"
++			    " should never be vectorized.\n",
++			    loop->num);
++      return false;
++    }
++
++  /* Do not distribute a LOOP that is able to be vectorized without
++     distribution.  */
++  if (!check_loop_vectorizable (loop))
++    {
++      rebuild_rdg (loop, rdg, cd);
++      return false;
++    }
++
++  rebuild_rdg (loop, rdg, cd);
++  return true;
++}
++
++/* Return max grouped loads' length if all groupes length satisfy len = 2 ^ n.
++   Otherwise, return 0.  */
++
++static unsigned
++get_max_vf (loop_vec_info vinfo)
++{
++  unsigned size = 0;
++  unsigned max = 0;
++  stmt_vec_info stmt_info;
++  unsigned i = 0;
++  FOR_EACH_VEC_ELT (vinfo->grouped_loads, i, stmt_info)
++    {
++      size = stmt_info->size;
++      if (!pow2p_hwi (size))
++	return 0;
++      max = size > max ? size : max;
++    }
++  return max;
++}
++
++/* Convert grouped_loads from linked list to vector with length vf.  Init
++   group_info of each stmt in the same group and put then into a vector.  And
++   these vectors consist WORKLISTS.  We will re-analyze a group if it is
++   uncertain, so we regard WORKLISTS as a circular queue.  */
++
++static unsigned
++build_queue (loop_vec_info vinfo, unsigned vf,
++	     vec *> &worklists)
++{
++  stmt_vec_info stmt_info;
++  unsigned i = 0;
++  group_info ginfo = NULL;
++  vec *worklist = NULL;
++  FOR_EACH_VEC_ELT (vinfo->grouped_loads, i, stmt_info)
++    {
++      unsigned group_size = stmt_info->size;
++      stmt_vec_info c_stmt_info = stmt_info;
++      bool succ = true;
++      while (group_size >= vf)
++	{
++	  vec_alloc (worklist, vf);
++	  for (unsigned j = 0; j < vf; ++j)
++	    {
++	      if (c_stmt_info == NULL)
++		{
++		  succ = false;
++		  break;
++		}
++	      ginfo = new _group_info ();
++	      ginfo->stmt = c_stmt_info->stmt;
++	      worklist->safe_push (ginfo);
++	      c_stmt_info = c_stmt_info->next_element;
++	    }
++	  if (!succ)
++	    {
++	      unsigned k = 0;
++	      ginfo = NULL;
++	      FOR_EACH_VEC_ELT (*worklist, k, ginfo)
++		delete ginfo;
++	      vec_free (worklist);
++	      break;
++	    }
++	  worklists.safe_push (worklist);
++	  group_size -= vf;
++	}
++    }
++  return worklists.length ();
++}
++
++static bool
++check_same_oprand_type (tree op1, tree op2)
++{
++  tree type1 = TREE_TYPE (op1);
++  tree type2 = TREE_TYPE (op2);
++  if (TREE_CODE (type1) != INTEGER_TYPE && TREE_CODE (type1) != REAL_TYPE)
++    return false;
++
++  return (TREE_CODE (type1) == TREE_CODE (type2)
++	  && TYPE_UNSIGNED (type1) == TYPE_UNSIGNED (type2)
++	  && TYPE_PRECISION (type1) == TYPE_PRECISION (type2));
++}
++
++static bool
++bit_field_p (gimple *stmt)
++{
++  unsigned i = 0;
++  auto_vec datarefs_vec;
++  data_reference_p dr;
++  if (!find_data_references_in_stmt (NULL, stmt, &datarefs_vec))
++    return true;
++
++  FOR_EACH_VEC_ELT (datarefs_vec, i, dr)
++    {
++      if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
++	  && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
++	return true;
++    }
++  return false;
++}
++
++static inline bool
++shift_operation (enum tree_code op)
++{
++  return op == LSHIFT_EXPR || op == RSHIFT_EXPR || op == LROTATE_EXPR
++	 || op == RROTATE_EXPR;
++}
++
++/* Return relationship between USE_STMT and the first use_stmt of the group.
++   RHS1 is the lhs of stmt recorded in group_info.  If another rhs of use_stmt
++   is not a constant, return UNCERTAIN and re-check it later.  */
++
++static unsigned
++check_isomorphic (gimple *use_stmt, gimple *first,
++		  tree rhs1, vec &hetero_lhs)
++{
++  /* Check same operation.  */
++  enum tree_code rhs_code_first = gimple_assign_rhs_code (first);
++  enum tree_code rhs_code_current = gimple_assign_rhs_code (use_stmt);
++  if (rhs_code_first != rhs_code_current)
++    return HETEROGENEOUS;
++
++  /* For shift operations, oprands should be equal.  */
++  if (shift_operation (rhs_code_current))
++    {
++      tree shift_op_first = gimple_assign_rhs2 (first);
++      tree shift_op_current = gimple_assign_rhs2 (use_stmt);
++      if (!operand_equal_p (shift_op_first, shift_op_current, 0)
++	  || !TREE_CONSTANT (shift_op_first))
++	return HETEROGENEOUS;
++
++      return ISOMORPHIC;
++    }
++  /* Type convertion expr or assignment.  */
++  if (gimple_num_ops (first) == 2)
++    return (rhs_code_first == NOP_EXPR || rhs_code_first == CONVERT_EXPR
++	      || rhs_code_first == SSA_NAME) ? ISOMORPHIC : HETEROGENEOUS;
++
++  /* We find USE_STMT from lhs of a stmt, denote it as rhs1 of USE_STMT and
++     the other one as rhs2.  Check if define-stmt of current rhs2 is isomorphic
++     with define-stmt of rhs2 in the first USE_STMT at this group.  */
++  tree rhs2_first = gimple_assign_rhs1 (use_stmt) == rhs1
++		    ? gimple_assign_rhs2 (first) : gimple_assign_rhs1 (first);
++  tree rhs2_curr = gimple_assign_rhs1 (use_stmt) == rhs1
++	      ? gimple_assign_rhs2 (use_stmt) : gimple_assign_rhs1 (use_stmt);
++
++  if (check_same_oprand_type (rhs2_first, rhs2_curr))
++    {
++      if (TREE_CONSTANT (rhs2_curr))
++	return ISOMORPHIC;
++      else if (hetero_lhs.contains (rhs2_curr))
++	return HETEROGENEOUS;
++
++      /* Provisionally set the stmt as uncertain and analyze the whole group
++	 in function CHECK_UNCERTAIN later if all use_stmts are uncertain.  */
++      return UNCERTAIN;
++    }
++  return HETEROGENEOUS;
++}
++
++static bool
++unsupported_operations (gimple *stmt)
++{
++  enum tree_code code = gimple_assign_rhs_code (stmt);
++  return code == COND_EXPR;
++}
++
++/* Check if the single use_stmt of STMT is isomorphic with the first one's
++   use_stmt in current group.  */
++
++static unsigned
++check_use_stmt (group_info elmt, gimple *&first,
++		vec &tmp_stmts, vec &hetero_lhs)
++{
++  if (gimple_code (elmt->stmt) != GIMPLE_ASSIGN)
++    return HETEROGENEOUS;
++  use_operand_p dummy;
++  tree lhs = gimple_assign_lhs (elmt->stmt);
++  gimple *use_stmt = NULL;
++  single_imm_use (lhs, &dummy, &use_stmt);
++  /* STMTs with three rhs are not supported, e.g., GIMPLE_COND.  */
++  if (use_stmt == NULL || gimple_code (use_stmt) != GIMPLE_ASSIGN
++      || unsupported_operations (use_stmt) || bit_field_p (use_stmt))
++    return HETEROGENEOUS;
++  tmp_stmts.safe_push (use_stmt);
++  if (first == NULL)
++    {
++      first = use_stmt;
++      return UNINITIALIZED;
++    }
++  /* Check if current use_stmt and the first menber's use_stmt in the group
++     are of the same type.  */
++  tree first_lhs = gimple_assign_lhs (first);
++  tree curr_lhs = gimple_assign_lhs (use_stmt);
++  if (!check_same_oprand_type (first_lhs, curr_lhs))
++    return HETEROGENEOUS;
++  return check_isomorphic (use_stmt, first, lhs, hetero_lhs);
++}
++
++/* Replace stmt field in group with stmts in TMP_STMTS, and insert their
++   lhs_info to ISOMER_LHS.  */
++
++static void
++update_isomer_lhs (vec *group, unsigned group_num,
++		   unsigned iteration, isomer_stmt_lhs &isomer_lhs,
++		   vec &tmp_stmts, int &profit,
++		   vec &merged_groups)
++{
++  group_info elmt = NULL;
++  /* Do not insert temp array if isomorphic stmts from grouped load have
++     only casting operations.  Once isomorphic calculation has 3 oprands,
++     such as plus operation, this group can be regarded as cut point.  */
++  bool operated = (gimple_num_ops (tmp_stmts[0]) == 3);
++  /* Do not insert temp arrays if search of iosomophic stmts reaches
++     MEM stmts.  */
++  bool has_vdef = gimple_vdef (tmp_stmts[0]) != NULL;
++  bool merge = false;
++  for (unsigned i = 0; i < group->length (); i++)
++    {
++      elmt = (*group)[i];
++      elmt->stmt = has_vdef ? NULL : tmp_stmts[i];
++      elmt->cut_point = has_vdef ? false : (elmt->cut_point || operated);
++      elmt->uncertain = false;
++      elmt->done = has_vdef;
++      tree lhs = gimple_assign_lhs (tmp_stmts[i]);
++      if (isomer_lhs.find (lhs) != isomer_lhs.end ())
++	{
++	  merge = true;
++	  continue;
++	}
++      isomer_lhs[lhs] = std::make_pair (group_num, iteration);
++    }
++  if (merge)
++    {
++      merged_groups.safe_push (group_num);
++      profit = 0;
++      return;
++    }
++  enum vect_cost_for_stmt kind = scalar_stmt;
++  int scalar_cost = builtin_vectorization_cost (kind, NULL_TREE, 0);
++  profit = (tmp_stmts.length () - 1) * scalar_cost;
++}
++
++/* Try to find rhs2 in ISOMER_LHS, if all rhs2 were found and their group_num
++   and iteration are same, GROUP is isomorphic.  */
++
++static unsigned
++check_isomorphic_rhs (vec *group, vec &tmp_stmts,
++		      isomer_stmt_lhs &isomer_lhs)
++{
++  group_info elmt = NULL;
++  gimple *stmt = NULL;
++  unsigned j = 0;
++  unsigned group_num = -1u;
++  unsigned iteration = -1u;
++  tree rhs1 = NULL;
++  tree rhs2 = NULL;
++  unsigned status = UNINITIALIZED;
++  FOR_EACH_VEC_ELT (*group, j, elmt)
++    {
++      rhs1 = gimple_assign_lhs (elmt->stmt);
++      stmt = tmp_stmts[j];
++      rhs2 = (rhs1 == gimple_assign_rhs1 (stmt))
++	     ? gimple_assign_rhs2 (stmt) : gimple_assign_rhs1 (stmt);
++      isomer_stmt_lhs::iterator iter = isomer_lhs.find (rhs2);
++      if (iter != isomer_lhs.end ())
++	{
++	  if (group_num == -1u)
++	    {
++	      group_num = iter->second.first;
++	      iteration = iter->second.second;
++	      status |= ISOMORPHIC;
++	      continue;
++	    }
++	  if (iter->second.first == group_num
++	      && iter->second.second == iteration)
++	    {
++	      status |= ISOMORPHIC;
++	      continue;
++	    }
++	  return HETEROGENEOUS;
++	}
++      else
++	status |= UNCERTAIN;
++    }
++  return status;
++}
++
++/* Update group_info for uncertain groups.  */
++
++static void
++update_uncertain_stmts (vec *group, unsigned group_num,
++			 unsigned iteration, vec &tmp_stmts)
++{
++  unsigned j = 0;
++  group_info elmt = NULL;
++  FOR_EACH_VEC_ELT (*group, j, elmt)
++    {
++      elmt->uncertain = true;
++      elmt->done = false;
++    }
++}
++
++/* Push stmts in TMP_STMTS into HETERO_LHS.  */
++
++static void
++set_hetero (vec *group, vec &hetero_lhs,
++	    vec &tmp_stmts)
++{
++  group_info elmt = NULL;
++  unsigned i = 0;
++  for (i = 0; i < group->length (); i++)
++    {
++      elmt = (*group)[i];
++      elmt->uncertain = false;
++      elmt->done = true;
++    }
++  gimple *stmt = NULL;
++  FOR_EACH_VEC_ELT (tmp_stmts, i, stmt)
++    if (stmt != NULL)
++      hetero_lhs.safe_push (gimple_assign_lhs (stmt));
++}
++
++/* Given an uncertain group, TMP_STMTS are use_stmts of stmts in GROUP.
++   Rhs1 is the lhs of stmt in GROUP, rhs2 is the other rhs of USE_STMT.
++
++   Try to find rhs2 in ISOMER_LHS, if all found rhs2 have same group_num
++   and iteration, this uncertain group is isomorphic.
++
++   If no rhs matched, this GROUP remains uncertain and update group_info.
++
++   Otherwise, this GROUP is heterogeneous and return true to end analysis
++   for this group.  */
++
++static bool
++check_uncertain (vec *group, unsigned group_num,
++		 unsigned iteration, int &profit,
++		 vec &tmp_stmts, isomer_stmt_lhs &isomer_lhs,
++		 vec &hetero_lhs, vec &merged_groups)
++{
++  unsigned status = check_isomorphic_rhs (group, tmp_stmts, isomer_lhs);
++  bool done = false;
++  switch (status)
++    {
++      case UNCERTAIN:
++	update_uncertain_stmts (group, group_num, iteration, tmp_stmts);
++	break;
++      case ISOMORPHIC:
++	update_isomer_lhs (group, group_num, iteration, isomer_lhs,
++			   tmp_stmts, profit, merged_groups);
++	break;
++      default:
++	set_hetero (group, hetero_lhs, tmp_stmts);
++	done = true;
++    }
++  return done;
++}
++
++/* Return false if analysis of this group is not finished, e.g., isomorphic or
++   uncertain.  Calculate the profit if vectorized.  */
++
++static bool
++check_group (vec *group, unsigned group_num, unsigned iteration,
++	     int &profit, vec &merged_groups,
++	     isomer_stmt_lhs &isomer_lhs, vec &hetero_lhs)
++{
++  unsigned j = 0;
++  group_info elmt = NULL;
++  gimple *first = NULL;
++  unsigned res = 0;
++  /* Record single use stmts in TMP_STMTS and decide whether replace stmts in
++     ginfo in succeeding processes.  */
++  auto_vec tmp_stmts;
++  FOR_EACH_VEC_ELT (*group, j, elmt)
++    {
++      if (merged_groups.contains (group_num))
++	return true;
++      res |= check_use_stmt (elmt, first, tmp_stmts, hetero_lhs);
++    }
++
++  /* Update each group member according to RES.  */
++  switch (res)
++    {
++      case ISOMORPHIC:
++	update_isomer_lhs (group, group_num, iteration, isomer_lhs,
++			   tmp_stmts, profit, merged_groups);
++	return false;
++      case UNCERTAIN:
++	return check_uncertain (group, group_num, iteration, profit,
++				tmp_stmts, isomer_lhs, hetero_lhs,
++				merged_groups);
++      default:
++	set_hetero (group, hetero_lhs, tmp_stmts);
++	return true;
++    }
++}
++
++/* Return true if all analysises are done except uncertain groups.  */
++
++static bool
++end_of_search (vec *> &circular_queue,
++	       vec &merged_groups)
++{
++  unsigned i = 0;
++  vec *group = NULL;
++  group_info elmt = NULL;
++  FOR_EACH_VEC_ELT (circular_queue, i, group)
++    {
++      if (merged_groups.contains (i))
++	continue;
++      elmt = (*group)[0];
++      /* If there is any isomorphic use_stmts, continue analysis of isomorphic
++	 use_stmts.  */
++      if (!elmt->done && !elmt->uncertain)
++	return false;
++    }
++  return true;
++}
++
++/* Push valid stmts to STMTS as cutpoints.  */
++
++static bool
++check_any_cutpoints (vec *> &circular_queue,
++		     vec *&stmts, vec &merged_groups)
++{
++  unsigned front = 0;
++  vec *group = NULL;
++  group_info elmt = NULL;
++  unsigned max = circular_queue.length () * circular_queue[0]->length ();
++  vec_alloc (stmts, max);
++  while (front < circular_queue.length ())
++    {
++      unsigned i = 0;
++      if (merged_groups.contains (front))
++	{
++	  front++;
++	  continue;
++	}
++      group = circular_queue[front++];
++      FOR_EACH_VEC_ELT (*group, i, elmt)
++	if (elmt->stmt != NULL && elmt->done && elmt->cut_point)
++	  stmts->safe_push (elmt->stmt);
++    }
++  return stmts->length () != 0;
++}
++
++/* Grouped loads are isomorphic.  Make pair for group number and iteration,
++   map load stmt to this pair.  We set iteration 0 here.  */
++
++static void
++init_isomer_lhs (vec *> &groups, isomer_stmt_lhs &isomer_lhs)
++{
++  vec *group = NULL;
++  group_info elmt = NULL;
++  unsigned i = 0;
++  FOR_EACH_VEC_ELT (groups, i, group)
++    {
++      unsigned j = 0;
++      FOR_EACH_VEC_ELT (*group, j, elmt)
++	isomer_lhs[gimple_assign_lhs (elmt->stmt)] = std::make_pair (i, 0);
++    }
++}
++
++/* It's not a strict analysis of load/store profit.  Assume scalar and vector
++   load/store are of the same cost.  The result PROFIT equals profit form
++   vectorizing of scalar loads/stores minus cost of a vectorized load/store.  */
++
++static int
++load_store_profit (unsigned scalar_mem_ops, unsigned vf, unsigned new_mem_ops)
++{
++  int profit = 0;
++  enum vect_cost_for_stmt kind = scalar_load;
++  int scalar_cost = builtin_vectorization_cost (kind, NULL_TREE, 0);
++  profit += (scalar_mem_ops - (scalar_mem_ops / vf)) * scalar_cost;
++  profit -= new_mem_ops / vf * scalar_cost;
++  kind = scalar_store;
++  scalar_cost = builtin_vectorization_cost (kind, NULL_TREE, 0);
++  profit -= new_mem_ops / vf * scalar_cost;
++  return profit;
++}
++
++/* Breadth first search the graph consisting of define-use chain starting from
++   the circular queue initialized by function BUILD_QUEUE.  Find single use of
++   each stmt in group and check if they are isomorphic.  Isomorphic is defined
++   as same rhs type, same operator, and isomorphic calculation of each rhs
++   starting from load.  If another rhs is uncertain to be isomorphic, put it
++   at the end of circular queue and re-analyze it during the next iteration.
++   If a group shares the same use_stmt with another group, skip one of them in
++   succeedor prcoesses as merged.  Iterate the circular queue until all
++   remianing groups heterogeneous or reaches MEN stmts.  If all other groups
++   have finishes the analysis, and the remaining groups are uncertain,
++   return false to avoid endless loop.  */
++
++bool
++bfs_find_isomer_stmts (vec *> &circular_queue,
++		       stmts_profit &profit_pair, unsigned vf,
++		       bool &reach_vdef)
++{
++  isomer_stmt_lhs isomer_lhs;
++  auto_vec hetero_lhs;
++  auto_vec merged_groups;
++  vec *group = NULL;
++  /* True if analysis finishes.  */
++  bool done = false;
++  int profit_sum = 0;
++  vec *stmts = NULL;
++  init_isomer_lhs (circular_queue, isomer_lhs);
++  for (unsigned i = 1; !done; ++i)
++    {
++      unsigned front = 0;
++      /* Re-initialize DONE to TRUE while a new iteration begins.  */
++      done = true;
++      while (front < circular_queue.length ())
++	{
++	  int profit = 0;
++	  group = circular_queue[front];
++	  done &= check_group (group, front, i, profit, merged_groups,
++			       isomer_lhs, hetero_lhs);
++	  profit_sum += profit;
++	  if (profit != 0 && (*group)[0]->stmt == NULL)
++	    {
++	      reach_vdef = true;
++	      return false;
++	    }
++	  ++front;
++	}
++      /* Uncertain result, return.  */
++      if (!done && end_of_search (circular_queue, merged_groups))
++	return false;
++    }
++  if (check_any_cutpoints (circular_queue, stmts, merged_groups))
++    {
++      profit_pair.first = stmts;
++      unsigned loads = circular_queue.length () * circular_queue[0]->length ();
++      profit_pair.second = profit_sum + load_store_profit (loads, vf,
++							   stmts->length ());
++      if (profit_pair.second > 0)
++	return true;
++    }
++  return false;
++}
++
++/* Free memory allocated by ginfo.  */
++
++static void
++free_ginfos (vec *> &worklists)
++{
++  vec *worklist;
++  unsigned i = 0;
++  while (i < worklists.length ())
++    {
++      worklist = worklists[i++];
++      group_info ginfo;
++      unsigned j = 0;
++      FOR_EACH_VEC_ELT (*worklist, j, ginfo)
++	delete ginfo;
++      vec_free (worklist);
++    }
++}
++
++static void
++release_tmp_stmts (vf_stmts_profit_map &candi_stmts)
++{
++  vf_stmts_profit_map::iterator iter;
++  for (iter = candi_stmts.begin (); iter != candi_stmts.end (); ++iter)
++    iter->second.first->release ();
++}
++
++/* Choose the group of stmt with maximun profit.  */
++
++static bool
++decide_stmts_by_profit (vf_stmts_profit_map &candi_stmts, vec &stmts)
++{
++  vf_stmts_profit_map::iterator iter;
++  int profit = 0;
++  int max = 0;
++  vec *tmp = NULL;
++  for (iter = candi_stmts.begin (); iter != candi_stmts.end (); ++iter)
++    {
++      profit = iter->second.second;
++      if (profit > max)
++	{
++	  tmp = iter->second.first;
++	  max = profit;
++	}
++    }
++  if (max == 0)
++    {
++      release_tmp_stmts (candi_stmts);
++      return false;
++    }
++  unsigned i = 0;
++  gimple *stmt = NULL;
++  FOR_EACH_VEC_ELT (*tmp, i, stmt)
++    stmts.safe_push (stmt);
++  release_tmp_stmts (candi_stmts);
++  return stmts.length () != 0;
++}
++
++/* Find isomorphic stmts from grouped loads with vector factor VF.
++
++   Given source code as follows and ignore casting.
++
++   a0 = (a[0] + b[0]) + ((a[4] - b[4]) << 16);
++   a1 = (a[1] + b[1]) + ((a[5] - b[5]) << 16);
++   a2 = (a[2] + b[2]) + ((a[6] - b[6]) << 16);
++   a3 = (a[3] + b[3]) + ((a[7] - b[7]) << 16);
++
++   We get grouped loads in VINFO as
++
++   GROUP_1		GROUP_2
++   _1 = *a		_11 = *b
++   _2 = *(a + 1)	_12 = *(b + 1)
++   _3 = *(a + 2)	_13 = *(b + 2)
++   _4 = *(a + 3)	_14 = *(b + 3)
++   _5 = *(a + 4)	_15 = *(b + 4)
++   _6 = *(a + 5)	_16 = *(b + 5)
++   _7 = *(a + 6)	_17 = *(b + 6)
++   _8 = *(a + 7)	_18 = *(b + 7)
++
++   First we try VF = 8, we get two worklists
++
++   WORKLIST_1		WORKLIST_2
++   _1 = *a		_11 = *b
++   _2 = *(a + 1)	_12 = *(b + 1)
++   _3 = *(a + 2)	_13 = *(b + 2)
++   _4 = *(a + 3)	_14 = *(b + 3)
++   _5 = *(a + 4)	_15 = *(b + 4)
++   _6 = *(a + 5)	_16 = *(b + 5)
++   _7 = *(a + 6)	_17 = *(b + 6)
++   _8 = *(a + 7)	_18 = *(b + 7)
++
++   We find _111 = _1 + _11 and _115 = _5 - _15 are not isomorphic,
++   so we try VF = VF / 2.
++
++   GROUP_1		GROUP_2
++   _1 = *a		_5 = *(a + 4)
++   _2 = *(a + 1)	_6 = *(a + 5)
++   _3 = *(a + 2)	_7 = *(a + 6)
++   _4 = *(a + 3)	_8 = *(a + 7)
++
++   GROUP_3		GROUP_4
++   _11 = *b		_15 = *(b + 4)
++   _12 = *(b + 1)	_16 = *(b + 5)
++   _13 = *(b + 2)	_17 = *(b + 6)
++   _14 = *(b + 3)	_18 = *(b + 7)
++
++   We first analyze group_1, and find all operations are isomorphic, then
++   replace stmts in group_1 with their use_stmts.  Group_2 as well.
++
++   GROUP_1		GROUP_2
++   _111 = _1 + _11	_115 = _5 - _15
++   _112 = _2 + _12	_116 = _6 - _16
++   _113 = _3 + _13	_117 = _7 - _17
++   _114 = _4 + _14	_118 = _8 - _18
++
++   When analyzing group_3 and group_4, we find their use_stmts are the same
++   as group_1 and group_2.  So group_3 is regarded as being merged to group_1
++   and group_4 being merged to group_2.  In future procedures, we will skip
++   group_3 and group_4.
++
++   We repeat such processing until opreations are not isomorphic or searching
++   reaches MEM stmts.  In our given case, searching end up at a0, a1, a2 and
++   a3.  */
++
++static bool
++find_isomorphic_stmts (loop_vec_info vinfo, vec &stmts)
++{
++  unsigned vf = get_max_vf (vinfo);
++  if (vf == 0)
++    return false;
++  auto_vec *> circular_queue;
++  /* Map of vector factor and corresponding vectorizing profit.  */
++  stmts_profit profit_map;
++  /* Map of cut_points and vector factor.  */
++  vf_stmts_profit_map candi_stmts;
++  bool reach_vdef = false;
++  while (vf > 2)
++    {
++      if (build_queue (vinfo, vf, circular_queue) == 0)
++	return false;
++      if (!bfs_find_isomer_stmts (circular_queue, profit_map, vf, reach_vdef))
++	{
++	  if (reach_vdef)
++	    {
++	      release_tmp_stmts (candi_stmts);
++	      free_ginfos (circular_queue);
++	      circular_queue.release ();
++	      return false;
++	    }
++	  vf /= 2;
++	  free_ginfos (circular_queue);
++	  circular_queue.release ();
++	  continue;
++	}
++      candi_stmts[vf] = profit_map;
++      free_ginfos (circular_queue);
++      vf /= 2;
++      circular_queue.release ();
++    }
++  return decide_stmts_by_profit (candi_stmts, stmts);
++}
++
++/* Get iv from SEED_STMTS and make sure each seed_stmt has only one iv as index
++   and all indices are the same.  */
++
++static tree
++find_index (vec seed_stmts)
++{
++  if (seed_stmts.length () == 0)
++    return NULL;
++  bool found_index = false;
++  tree index = NULL;
++  unsigned ui = 0;
++  for (ui = 0; ui < seed_stmts.length (); ui++)
++    {
++      if (!gimple_vdef (seed_stmts[ui]))
++	return NULL;
++      tree lhs = gimple_assign_lhs (seed_stmts[ui]);
++      unsigned num_index = 0;
++      while (TREE_CODE (lhs) == ARRAY_REF)
++	{
++	  if (TREE_CODE (TREE_OPERAND (lhs, 1)) == SSA_NAME)
++	    {
++	      num_index++;
++	      if (num_index > 1)
++		return NULL;
++	      if (index == NULL)
++		{
++		  index = TREE_OPERAND (lhs, 1);
++		  found_index = true;
++		}
++	      else if (index != TREE_OPERAND (lhs, 1))
++		return NULL;
++	    }
++	  lhs = TREE_OPERAND (lhs, 0);
++	}
++      if (!found_index)
++	return NULL;
++    }
++  return index;
++}
++
++/* Check if expression of phi is an increament of a const.  */
++
++static void
++check_phi_inc (struct vertex *v_phi, struct graph *rdg, bool &found_inc)
++{
++  struct graph_edge *e_phi;
++  for (e_phi = v_phi->succ; e_phi; e_phi = e_phi->succ_next)
++    {
++      struct vertex *v_inc = &(rdg->vertices[e_phi->dest]);
++      if (!is_gimple_assign (RDGV_STMT (v_inc))
++	  || gimple_expr_code (RDGV_STMT (v_inc)) != PLUS_EXPR)
++	continue;
++      tree rhs1 = gimple_assign_rhs1 (RDGV_STMT (v_inc));
++      tree rhs2 = gimple_assign_rhs2 (RDGV_STMT (v_inc));
++      if (!(integer_onep (rhs1) || integer_onep (rhs2)))
++	continue;
++      struct graph_edge *e_inc;
++      /* find cycle with only two vertices inc and phi: inc <--> phi.  */
++      bool found_cycle = false;
++      for (e_inc = v_inc->succ; e_inc; e_inc = e_inc->succ_next)
++	{
++	  if (e_inc->dest == e_phi->src)
++	    {
++	      found_cycle = true;
++	      break;
++	    }
++	}
++      if (!found_cycle)
++	continue;
++      found_inc = true;
++    }
++}
++
++/* Check if phi satisfies form like PHI <0, i>.  */
++
++static inline bool
++iv_check_phi_stmt (gimple *phi_stmt)
++{
++  return gimple_phi_num_args (phi_stmt) == 2
++	 && (integer_zerop (gimple_phi_arg_def (phi_stmt, 0))
++	     || integer_zerop (gimple_phi_arg_def (phi_stmt, 1)));
++}
++
++/* Make sure the iteration varible is a phi.  */
++
++static tree
++get_iv_from_seed (struct graph *flow_only_rdg, vec seed_stmts)
++{
++  tree index = find_index (seed_stmts);
++  if (index == NULL)
++    return NULL;
++  for (int i = 0; i < flow_only_rdg->n_vertices; i++)
++    {
++      struct vertex *v = &(flow_only_rdg->vertices[i]);
++      if (RDGV_STMT (v) != seed_stmts[0])
++	continue;
++      struct graph_edge *e;
++      bool found_phi = false;
++      for (e = v->pred; e; e = e->pred_next)
++	{
++	  struct vertex *v_phi = &(flow_only_rdg->vertices[e->src]);
++	  gimple *phi_stmt = RDGV_STMT (v_phi);
++	  if (gimple_code (phi_stmt) != GIMPLE_PHI
++	      || gimple_phi_result (phi_stmt) != index)
++	    continue;
++	  if (!iv_check_phi_stmt (phi_stmt))
++	    return NULL;
++	  /* find inc expr in succ of phi.  */
++	  bool found_inc = false;
++	  check_phi_inc (v_phi, flow_only_rdg, found_inc);
++	  if (!found_inc)
++	    return NULL;
++	  found_phi = true;
++	  break;
++	}
++      if (!found_phi)
++	return NULL;
++      break;
++    }
++  return index;
++}
++
++/* Do not distribute loop if vertexes in ROOT_MAP have antidependence with in
++   FLOW_ONLY_RDG.  */
++
++static bool
++check_no_dependency (struct graph *flow_only_rdg, bitmap root_map)
++{
++  bitmap_iterator bi;
++  unsigned ui;
++  auto_vec visited_nodes;
++  auto_bitmap visited_map;
++  EXECUTE_IF_SET_IN_BITMAP (root_map, 0, ui, bi)
++    visited_nodes.safe_push (ui);
++  for (ui = 0; ui < visited_nodes.length (); ui++)
++    {
++      struct vertex *v = &(flow_only_rdg->vertices[visited_nodes[ui]]);
++      struct graph_edge *e;
++      for (e = v->succ; e; e = e->succ_next)
++	{
++	  if (bitmap_bit_p (root_map, e->dest))
++	    return false;
++	  if (bitmap_bit_p (visited_map, e->dest))
++	    continue;
++	  visited_nodes.safe_push (e->dest);
++	  bitmap_set_bit (visited_map, e->dest);
++	}
++    }
++  return true;
++}
++
++/* Find isomorphic stmts from GROUPED_LOADS in VINFO and make sure
++   there is no dependency among those STMT we found.  */
++
++static unsigned
++get_cut_points (struct graph *flow_only_rdg, bitmap cut_points,
++		loop_vec_info vinfo)
++{
++  unsigned n_stmts = 0;
++
++  /* STMTS that may be CUT_POINTS.  */
++  auto_vec stmts;
++  if (!find_isomorphic_stmts (vinfo, stmts))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "No temp array insertion: no isomorphic stmts"
++			    " were found.\n");
++      return 0;
++    }
++
++  for (int i = 0; i < flow_only_rdg->n_vertices; i++)
++    {
++      if (stmts.contains (RDG_STMT (flow_only_rdg, i)))
++	bitmap_set_bit (cut_points, i);
++    }
++  n_stmts = bitmap_count_bits (cut_points);
++
++  bool succ = check_no_dependency (flow_only_rdg, cut_points);
++  if (!succ)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "No temp array inserted: data dependency"
++			    " among isomorphic stmts.\n");
++      return 0;
++    }
++  return n_stmts;
++}
++
++static void
++build_temp_array (struct vertex *v, gimple_stmt_iterator &gsi,
++		  poly_uint64 array_extent, tree iv,
++		  hash_set *tmp_array_vars, vec *transformed)
++{
++  gimple *stmt = RDGV_STMT (v);
++  tree lhs = gimple_assign_lhs (stmt);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "original stmt:\t");
++      print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS|TDF_MEMSYMS);
++    }
++  tree var_ssa = duplicate_ssa_name (lhs, stmt);
++  gimple_assign_set_lhs (stmt, var_ssa);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "changed to:\t");
++      print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS | TDF_MEMSYMS);
++    }
++  gimple_set_uid (gsi_stmt (gsi), -1);
++  tree vect_elt_type = TREE_TYPE (lhs);
++  tree array_type = build_array_type_nelts (vect_elt_type, array_extent);
++  tree array = create_tmp_var (array_type);
++  tree array_ssa = build4 (ARRAY_REF, vect_elt_type, array, iv, NULL, NULL);
++  tmp_array_vars->add (array_ssa);
++  gimple *store = gimple_build_assign (array_ssa, var_ssa);
++  tree new_vdef = make_ssa_name (gimple_vop (cfun), store);
++  gsi_insert_after (&gsi, store, GSI_NEW_STMT);
++  gimple_set_vdef (store, new_vdef);
++  transformed->safe_push (store);
++  gimple_set_uid (gsi_stmt (gsi), -1);
++  tree array_ssa2 = build4 (ARRAY_REF, vect_elt_type, array, iv, NULL, NULL);
++  tmp_array_vars->add (array_ssa2);
++  gimple *load = gimple_build_assign (lhs, array_ssa2);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "insert stmt:\t");
++      print_gimple_stmt (dump_file, store, 0, TDF_VOPS|TDF_MEMSYMS);
++      fprintf (dump_file, " and stmt:\t");
++      print_gimple_stmt (dump_file, load, 0, TDF_VOPS|TDF_MEMSYMS);
++    }
++  gimple_set_vuse (load, new_vdef);
++  gsi_insert_after (&gsi, load, GSI_NEW_STMT);
++  gimple_set_uid (gsi_stmt (gsi), -1);
++}
++
++/* Set bitmap PRODUCERS based on vec TRANSFORMED.  */
++
++void
++loop_distribution::build_producers (loop_p loop, bitmap producers,
++				    vec &transformed)
++{
++  auto_vec stmts;
++  stmts_from_loop (loop, &stmts);
++  int i = 0;
++  gimple *stmt = NULL;
++
++  FOR_EACH_VEC_ELT (stmts, i, stmt)
++    gimple_set_uid (stmt, i);
++  i = 0;
++  FOR_EACH_VEC_ELT (transformed, i, stmt)
++    bitmap_set_bit (producers, stmt->uid);
++}
++
++/* Transform stmt
++
++   A = FOO (ARG_1);
++
++   to
++
++   STMT_1: A1 = FOO (ARG_1);
++   STMT_2: X[I] = A1;
++   STMT_3: A = X[I];
++
++   Producer is STMT_2 who defines the temp array and consumer is
++   STMT_3 who uses the temp array.  */
++
++void
++loop_distribution::do_insertion (loop_p loop, struct graph *flow_only_rdg,
++				 tree iv, bitmap cut_points,
++				 hash_set *tmp_array_vars,
++				 bitmap producers)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "=== do insertion ===\n");
++
++  auto_vec transformed;
++
++  /* Execution times of loop.  */
++  poly_uint64 array_extent
++    = tree_to_poly_uint64 (number_of_latch_executions (loop)) + 1;
++
++  basic_block *bbs = get_loop_body_in_custom_order (loop, this,
++						    bb_top_order_cmp_r);
++
++  for (int i = 0; i < int (loop->num_nodes); i++)
++    {
++      basic_block bb = bbs[i];
++
++      /* Find all cut points in bb and transform them.  */
++      for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
++	   gsi_next (&gsi))
++	{
++	  unsigned j = gimple_uid (gsi_stmt (gsi));
++	  if (bitmap_bit_p (cut_points, j))
++	    {
++	      struct vertex *v = &(flow_only_rdg->vertices[j]);
++	      build_temp_array (v, gsi, array_extent, iv, tmp_array_vars,
++				&transformed);
++	    }
++	}
++    }
++  build_producers (loop, producers, transformed);
++  update_ssa (TODO_update_ssa);
++  free (bbs);
++}
++
++/* After temp array insertion, given stmts
++   STMT_1: M = FOO (ARG_1);
++   STMT_2: X[I] = M;
++   STMT_3: A = X[I];
++   STMT_2 is the producer, STMT_1 is its prev and STMT_3 is its next.
++   Replace M with A, and remove STMT_2 and STMT_3.  */
++
++static void
++reset_gimple_assign (struct graph *flow_only_rdg, struct partition *partition,
++		     gimple_stmt_iterator &gsi, int j)
++{
++  struct vertex *v = &(flow_only_rdg->vertices[j]);
++  gimple *stmt = RDGV_STMT (v);
++  gimple *prev = stmt->prev;
++  gimple *next = stmt->next;
++  tree n_lhs = gimple_assign_lhs (next);
++  gimple_assign_set_lhs (prev, n_lhs);
++  unlink_stmt_vdef (stmt);
++  if (partition)
++    bitmap_clear_bit (partition->stmts, gimple_uid (gsi_stmt (gsi)));
++  gsi_remove (&gsi, true);
++  release_defs (stmt);
++  if (partition)
++    bitmap_clear_bit (partition->stmts, gimple_uid (gsi_stmt (gsi)));
++  gsi_remove (&gsi, true);
++}
++
++void
++loop_distribution::remove_insertion (loop_p loop, struct graph *flow_only_rdg,
++		  bitmap producers, struct partition *partition)
++{
++  basic_block *bbs = get_loop_body_in_custom_order (loop, this,
++						    bb_top_order_cmp_r);
++  for (int i = 0; i < int (loop->num_nodes); i++)
++    {
++      basic_block bb = bbs[i];
++      for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
++	   gsi_next (&gsi))
++	{
++	  unsigned j = gimple_uid (gsi_stmt (gsi));
++	  if (bitmap_bit_p (producers, j))
++	    reset_gimple_assign (flow_only_rdg, partition, gsi, j);
++	}
++    }
++  update_ssa (TODO_update_ssa);
++  free (bbs);
++}
++
++/* Insert temp arrays if isomorphic computation exists.  Temp arrays will be
++   regarded as SEED_STMTS for building partitions in succeeding processes.  */
++
++bool
++loop_distribution::insert_temp_arrays (loop_p loop, vec seed_stmts,
++			hash_set *tmp_array_vars, bitmap producers)
++{
++  struct graph *flow_only_rdg = build_rdg (loop, NULL);
++  gcc_checking_assert (flow_only_rdg != NULL);
++  tree iv = get_iv_from_seed (flow_only_rdg, seed_stmts);
++  if (iv == NULL)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Loop %d no temp array insertion: failed to get"
++			    " iteration variable.\n", loop->num);
++      free_rdg (flow_only_rdg);
++      return false;
++  }
++  auto_bitmap cut_points;
++  loop_vec_info vinfo = loop_vec_info_for_loop (loop);
++  unsigned n_cut_points = get_cut_points (flow_only_rdg, cut_points, vinfo);
++  delete vinfo;
++  loop->aux = NULL;
++  if (n_cut_points == 0)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Loop %d no temp array insertion: no cut points"
++			    " found.\n", loop->num);
++      free_rdg (flow_only_rdg);
++      return false;
++    }
++  do_insertion (loop, flow_only_rdg, iv, cut_points, tmp_array_vars, producers);
++  if (dump_enabled_p ())
++    {
++      dump_user_location_t loc = find_loop_location (loop);
++      dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc, "Insertion done:"
++		       " %d temp arrays inserted in Loop %d.\n",
++		       n_cut_points, loop->num);
++    }
++  free_rdg (flow_only_rdg);
++  return true;
++}
++
++static bool find_seed_stmts_for_distribution (class loop *, vec *);
++
+ /* Distributes the code from LOOP in such a way that producer statements
+    are placed before consumer statements.  Tries to separate only the
+    statements from STMTS into separate loops.  Returns the number of
+@@ -2972,7 +4342,7 @@ loop_distribution::finalize_partitions (class loop *loop,
+ 
+ int
+ loop_distribution::distribute_loop (class loop *loop,
+-		 const vec &stmts,
++		 vec &stmts,
+ 		 control_dependences *cd, int *nb_calls, bool *destroy_p,
+ 		 bool only_patterns_p)
+ {
+@@ -3021,6 +4391,33 @@ loop_distribution::distribute_loop (class loop *loop,
+       return 0;
+     }
+ 
++  /* Try to distribute LOOP if LOOP is simple enough and unable to vectorize.
++     If LOOP has grouped loads, recursively find isomorphic stmts and insert
++     temp arrays, rebuild RDG and call find_seed_stmts_for_distribution
++     to replace STMTS.  */
++
++  hash_set tmp_array_vars;
++
++  /* STMTs that define those inserted TMP_ARRAYs.  */
++  auto_bitmap producers;
++
++  /* New SEED_STMTS after insertion.  */
++  auto_vec work_list;
++  bool insert_success = false;
++  if (may_insert_temp_arrays (loop, rdg, cd))
++    {
++      if (insert_temp_arrays (loop, stmts, &tmp_array_vars, producers))
++	{
++	  if (find_seed_stmts_for_distribution (loop, &work_list))
++	    {
++	      insert_success = true;
++	    }
++	  else
++	    remove_insertion (loop, rdg, producers, NULL);
++	  rebuild_rdg (loop, rdg, cd);
++	}
++     }
++
+   data_reference_p dref;
+   for (i = 0; datarefs_vec.iterate (i, &dref); ++i)
+     dref->aux = (void *) (uintptr_t) i;
+@@ -3029,7 +4426,10 @@ loop_distribution::distribute_loop (class loop *loop,
+     dump_rdg (dump_file, rdg);
+ 
+   auto_vec partitions;
+-  rdg_build_partitions (rdg, stmts, &partitions);
++  if (work_list.length() > stmts.length())
++	rdg_build_partitions (rdg, &work_list, &partitions);
++  else
++	rdg_build_partitions (rdg, &stmts, &partitions);
+ 
+   auto_vec alias_ddrs;
+ 
+@@ -3101,7 +4501,7 @@ loop_distribution::distribute_loop (class loop *loop,
+       for (int j = i + 1;
+ 	   partitions.iterate (j, &partition); ++j)
+ 	{
+-	  if (share_memory_accesses (rdg, into, partition))
++	  if (share_memory_accesses (rdg, into, partition, &tmp_array_vars))
+ 	    {
+ 	      partition_merge_into (rdg, into, partition, FUSE_SHARE_REF);
+ 	      partitions.unordered_remove (j);
+@@ -3151,7 +4551,7 @@ loop_distribution::distribute_loop (class loop *loop,
+ 	}
+     }
+ 
+-  finalize_partitions (loop, &partitions, &alias_ddrs);
++  finalize_partitions (loop, &partitions, &alias_ddrs, producers);
+ 
+   /* If there is a reduction in all partitions make sure the last one
+      is not classified for builtin code generation.  */
+@@ -3169,6 +4569,24 @@ loop_distribution::distribute_loop (class loop *loop,
+     }
+ 
+   nbp = partitions.length ();
++
++  /* If we have inserted TMP_ARRAYs but there is only one partition left in
++     the succeeding processes, remove those inserted TMP_ARRAYs back to the
++     original version.  */
++
++  if (nbp == 1 && insert_success)
++    {
++      struct partition *partition = NULL;
++      partitions.iterate (0, &partition);
++      remove_insertion (loop, rdg, producers, partition);
++      if (dump_enabled_p ())
++	{
++	  dump_user_location_t loc = find_loop_location (loop);
++	  dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, loc, "Insertion removed:"
++			   " unable to distribute loop %d.\n", loop->num);
++	}
++    }
++
+   if (nbp == 0
+       || (nbp == 1 && !partition_builtin_p (partitions[0]))
+       || (nbp > 1 && partition_contains_all_rw (rdg, partitions)))
+diff --git a/gcc/tree-vect-data-refs.cc b/gcc/tree-vect-data-refs.cc
+index 04e68f621..aae7f62f3 100644
+--- a/gcc/tree-vect-data-refs.cc
++++ b/gcc/tree-vect-data-refs.cc
+@@ -2791,6 +2791,9 @@ vect_analyze_group_access_1 (vec_info *vinfo, dr_vec_info *dr_info)
+       DR_GROUP_GAP (stmt_info) = groupsize - last_accessed_element;
+ 
+       DR_GROUP_SIZE (stmt_info) = groupsize;
++
++      DR_GROUP_SLP_TRANSPOSE (stmt_info) = false;
++
+       if (dump_enabled_p ())
+ 	{
+ 	  dump_printf_loc (MSG_NOTE, vect_location,
+@@ -2820,6 +2823,20 @@ vect_analyze_group_access_1 (vec_info *vinfo, dr_vec_info *dr_info)
+ 			     DR_GROUP_GAP (stmt_info));
+ 	}
+ 
++      /* SLP: create an SLP data structure for every interleaving group of
++	 loads for further analysis in vect_analyse_slp.  */
++      if (DR_IS_READ (dr) && !slp_impossible)
++	{
++	  if (loop_vinfo)
++	    {
++	      LOOP_VINFO_GROUPED_LOADS (loop_vinfo).safe_push (stmt_info);
++	    }
++	  if (bb_vinfo)
++	    {
++	      BB_VINFO_GROUPED_LOADS (bb_vinfo).safe_push (stmt_info);
++	    }
++	}
++
+       /* SLP: create an SLP data structure for every interleaving group of
+ 	 stores for further analysis in vect_analyse_slp.  */
+       if (DR_IS_WRITE (dr) && !slp_impossible)
+@@ -5636,6 +5653,226 @@ vect_permute_store_chain (vec_info *vinfo, vec &dr_chain,
+     }
+ }
+ 
++/* Encoding the PERM_MASK_FIRST.  */
++
++static void
++vect_indices_encoding_first (tree vectype, unsigned int array_num,
++			     tree &perm_mask_high_first,
++			     tree &perm_mask_low_first)
++{
++  unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
++  vec_perm_builder sel (nelt, nelt, 1);
++  sel.quick_grow (nelt);
++  unsigned int group_num = nelt / array_num;
++  unsigned int index = 0;
++  unsigned int array = 0;
++  unsigned int group = 0;
++
++  /* The encoding has 1 pattern in the fisrt stage.  */
++  for (array = 0; array < array_num / 2; array++)
++    {
++      for (group = 0; group < group_num * 2; group++)
++	{
++	  sel[index++] = array + array_num * group;
++	}
++    }
++  vec_perm_indices indices (sel, 2, nelt);
++  perm_mask_high_first = vect_gen_perm_mask_checked (vectype, indices);
++
++  index = 0;
++  for (array = array_num / 2; array < array_num; array++)
++    {
++      for (group = 0; group < group_num * 2; group++)
++	{
++	  sel[index++] = array + array_num * group;
++	}
++    }
++  indices.new_vector (sel, 2, nelt);
++  perm_mask_low_first = vect_gen_perm_mask_checked (vectype, indices);
++}
++
++/* Encoding the PERM_MASK.  */
++
++static void
++vect_indices_encoding (tree vectype, unsigned int array_num,
++		       tree &perm_mask_high, tree &perm_mask_low)
++{
++  unsigned int nelt = TYPE_VECTOR_SUBPARTS (vectype).to_constant ();
++  vec_perm_builder sel (nelt, nelt, 1);
++  sel.quick_grow (nelt);
++  unsigned int group_num = nelt / array_num;
++  unsigned int index = 0;
++  unsigned int array = 0;
++  unsigned int group = 0;
++
++  /* The encoding has 2 patterns in the folllowing stages.  */
++  for (array = 0; array < array_num / 2; array++)
++    {
++      for (group = 0; group < group_num; group++)
++	{
++	  sel[index++] = group + group_num * array;
++	}
++      for (group = 0; group < group_num; group++)
++	{
++	  sel[index++] = nelt + group + group_num * array;
++	}
++    }
++  vec_perm_indices indices (sel, 2, nelt);
++  perm_mask_high = vect_gen_perm_mask_checked (vectype, indices);
++
++  index = 0;
++  for (array = array_num / 2; array < array_num; array++)
++    {
++      for (group = 0; group < group_num; group++)
++	{
++	  sel[index++] = group + group_num * array;
++	}
++      for (group = 0; group < group_num; group++)
++	{
++	  sel[index++] = nelt + group + group_num * array;
++	}
++    }
++  indices.new_vector (sel, 2, nelt);
++  perm_mask_low = vect_gen_perm_mask_checked (vectype, indices);
++}
++
++/* Function vect_transpose_store_chain.
++
++   Given a chain of interleaved stores in DR_CHAIN of LENGTH and ARRAY_NUM that
++   must be a power of 2.  Generate interleave_high/low stmts to reorder
++   the data correctly for the stores.  Return the final references for stores
++   in RESULT_CHAIN.  This function is similar to vect_permute_store_chain (),
++   we interleave the contents of the vectors in their order.
++
++   E.g., LENGTH is 4, the scalar type is short (i.e., VF is 8) and ARRAY_NUM
++   is 4.  That is, the input is 4 vectors each containing 8 elements.
++   And 2 (VF / ARRAY_NUM) of 8 elements come from the same array.  we interleave
++   the contents of the four vectors in their order.  We assign a number to each
++   element, the input sequence is:
++
++   1st vec:   0  1  2  3  4  5  6  7
++   2nd vec:   8  9 10 11 12 13 14 15
++   3rd vec:  16 17 18 19 20 21 22 23
++   4th vec:  24 25 26 27 28 29 30 31
++
++   The output sequence should be:
++
++   1st vec:   0  4  8 12 16 20 24 28
++   2nd vec:   1  5  9 13 17 21 25 29
++   3rd vec:   2  6 10 14 18 22 26 30
++   4th vec:   3  7 11 15 19 23 27 31
++
++   In our example,
++   We get 2 (VF / ARRAY_NUM) elements together in every vector.
++
++   I1:   0  4  1  5  2  6  3  7
++   I2:   8 12  9 13 10 14 11 15
++   I3:  16 20 17 21 18 22 19 23
++   I4:  24 28 25 29 26 30 27 31
++
++   Then, we use interleave_high/low instructions to create such output.
++   Every 2 (VF / ARRAY_NUM) elements are regarded as a whole.  The permutation
++   is done in log LENGTH stages.
++
++   I1: interleave_high (1st vec, 3rd vec)
++   I2: interleave_low (1st vec, 3rd vec)
++   I3: interleave_high (2nd vec, 4th vec)
++   I4: interleave_low (2nd vec, 4th vec)
++
++   The first stage of the sequence should be:
++
++   I1:   0  4 16 20  1  5 17 21
++   I2:   2  6 18 22  3  7 19 23
++   I3:   8 12 24 28  9 13 25 29
++   I4:  10 14 26 30 11 15 27 31
++
++   The following stage sequence should be, i.e. the final result is:
++
++   I1:   0  4  8 12 16 20 24 28
++   I2:   1  5  9 13 17 21 25 29
++   I3:   2  6 10 14 18 22 26 30
++   I4:   3  7 11 15 19 23 27 31.  */
++
++void
++vect_transpose_store_chain (vec_info *vinfo, vec dr_chain,
++			    unsigned int length, unsigned int array_num,
++			    stmt_vec_info stmt_info, gimple_stmt_iterator *gsi,
++			    vec *result_chain)
++{
++  gimple *perm_stmt = NULL;
++  tree vectype = STMT_VINFO_VECTYPE (stmt_info);
++  tree perm_mask_low_first = NULL;
++  tree perm_mask_high_first = NULL;
++  tree perm_mask_low = NULL;
++  tree perm_mask_high = NULL;
++  unsigned int log_length = exact_log2 (length);
++
++  /* Only power of 2 is supported.  */
++  gcc_assert (pow2p_hwi (length));
++
++  /* The encoding has 2 types, one for the grouped pattern in the fisrt stage,
++     another for the interleaved patterns in the following stages.  */
++  gcc_assert (array_num != 0);
++
++  /* Create grouped stmt (in the first stage):
++	group = nelt / array_num;
++	high_first = VEC_PERM_EXPR 
++	low_first = VEC_PERM_EXPR   */
++  vect_indices_encoding_first (vectype, array_num, perm_mask_high_first,
++			       perm_mask_low_first);
++
++  /* Create interleaving stmt (in the following stages):
++	high = VEC_PERM_EXPR 
++	low = VEC_PERM_EXPR   */
++  vect_indices_encoding (vectype, array_num, perm_mask_high, perm_mask_low);
++
++  for (unsigned int perm_time = 0; perm_time < log_length; perm_time++)
++    {
++      for (unsigned int index = 0; index < length / 2; index++)
++	{
++	  tree vect1 = dr_chain[index];
++	  tree vect2 = dr_chain[index + length / 2];
++
++	  tree high = make_temp_ssa_name (vectype, NULL, "vect_inter_high");
++	  perm_stmt = gimple_build_assign (high, VEC_PERM_EXPR, vect1, vect2,
++					   perm_time == 0 ? perm_mask_high_first
++							  : perm_mask_high);
++	  vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
++	  (*result_chain)[2 * index] = high;
++
++	  tree low = make_temp_ssa_name (vectype, NULL, "vect_inter_low");
++	  perm_stmt = gimple_build_assign (low, VEC_PERM_EXPR, vect1, vect2,
++					   perm_time == 0 ? perm_mask_low_first
++							  : perm_mask_low);
++	  vect_finish_stmt_generation (vinfo, stmt_info, perm_stmt, gsi);
++	  (*result_chain)[2 * index+1] = low;
++	}
++      memcpy (dr_chain.address (), result_chain->address (),
++	      length * sizeof (tree));
++    }
++}
++
+ /* Function vect_setup_realignment
+ 
+    This function is called when vectorizing an unaligned load using
+diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
+index 3435f9378..f296e9415 100644
+--- a/gcc/tree-vect-loop.cc
++++ b/gcc/tree-vect-loop.cc
+@@ -2856,7 +2856,7 @@ vect_analyze_loop_1 (class loop *loop, vec_info_shared *shared,
+ 		     loop_vec_info main_loop_vinfo,
+ 		     const vector_modes &vector_modes, unsigned &mode_i,
+ 		     machine_mode &autodetected_vector_mode,
+-		     bool &fatal)
++		     bool &fatal, bool result_only_p)
+ {
+   loop_vec_info loop_vinfo
+     = vect_create_loop_vinfo (loop, shared, loop_form_info, main_loop_vinfo);
+@@ -2865,6 +2865,8 @@ vect_analyze_loop_1 (class loop *loop, vec_info_shared *shared,
+   loop_vinfo->vector_mode = vector_mode;
+   unsigned int suggested_unroll_factor = 1;
+ 
++  /* Loop_vinfo for loop-distribution pass.  */
++  opt_loop_vec_info fail_loop_vinfo = opt_loop_vec_info::success (NULL);
+   /* Run the main analysis.  */
+   opt_result res = vect_analyze_loop_2 (loop_vinfo, fatal,
+ 					&suggested_unroll_factor);
+@@ -2933,7 +2935,21 @@ vect_analyze_loop_1 (class loop *loop, vec_info_shared *shared,
+ 
+   if (!res)
+     {
+-      delete loop_vinfo;
++
++	/* If current analysis shows LOOP is unable to vectorize, loop_vinfo
++	will be deleted.  If LOOP is under ldist analysis, backup it before
++	it is deleted and return it if all modes are analyzed and still
++	fail to vectorize.  */
++      if (result_only_p && (mode_i == vector_modes.length ()
++	    || autodetected_vector_mode == VOIDmode))
++	{
++	    fail_loop_vinfo = opt_loop_vec_info::success (loop_vinfo);
++	    loop->aux = (loop_vec_info) fail_loop_vinfo;
++	}
++      else
++	{
++	    delete loop_vinfo;
++	}
+       if (fatal)
+ 	gcc_checking_assert (main_loop_vinfo == NULL);
+       return opt_loop_vec_info::propagate_failure (res);
+@@ -2946,9 +2962,11 @@ vect_analyze_loop_1 (class loop *loop, vec_info_shared *shared,
+ 
+    Apply a set of analyses on LOOP, and create a loop_vec_info struct
+    for it.  The different analyses will record information in the
+-   loop_vec_info struct.  */
++   loop_vec_info struct.  When RESULT_ONLY_P is true, quit analysis
++   if loop is vectorizable, otherwise, do not delete vinfo. */
+ opt_loop_vec_info
+-vect_analyze_loop (class loop *loop, vec_info_shared *shared)
++vect_analyze_loop (class loop *loop, vec_info_shared *shared,
++		   bool result_only_p)
+ {
+   DUMP_VECT_SCOPE ("analyze_loop_nest");
+ 
+@@ -2996,6 +3014,12 @@ vect_analyze_loop (class loop *loop, vec_info_shared *shared)
+ 			     && !unlimited_cost_model (loop));
+   machine_mode autodetected_vector_mode = VOIDmode;
+   opt_loop_vec_info first_loop_vinfo = opt_loop_vec_info::success (NULL);
++  /* Loop_vinfo for loop-distribution pass.  */
++  opt_loop_vec_info fail_loop_vinfo = opt_loop_vec_info::success (NULL);
++  if (result_only_p)
++  {
++     vect_slp_init ();
++  }
+   unsigned int mode_i = 0;
+   unsigned HOST_WIDE_INT simdlen = loop->simdlen;
+ 
+@@ -3019,10 +3043,16 @@ vect_analyze_loop (class loop *loop, vec_info_shared *shared)
+       opt_loop_vec_info loop_vinfo
+ 	= vect_analyze_loop_1 (loop, shared, &loop_form_info,
+ 			       NULL, vector_modes, mode_i,
+-			       autodetected_vector_mode, fatal);
++			       autodetected_vector_mode, fatal, result_only_p);
+       if (fatal)
+ 	break;
+ 
++      if (result_only_p && (mode_i == vector_modes.length ()
++	  || autodetected_vector_mode == VOIDmode))
++	{
++		return loop_vinfo;
++	}
++
+       if (loop_vinfo)
+ 	{
+ 	  /*  Analyzis has been successful so update the VF value.  The
+@@ -3132,7 +3162,7 @@ vect_analyze_loop (class loop *loop, vec_info_shared *shared)
+ 	= vect_analyze_loop_1 (loop, shared, &loop_form_info,
+ 			       first_loop_vinfo,
+ 			       vector_modes, mode_i,
+-			       autodetected_vector_mode, fatal);
++			       autodetected_vector_mode, fatal, result_only_p);
+       if (fatal)
+ 	break;
+ 
+diff --git a/gcc/tree-vect-patterns.cc b/gcc/tree-vect-patterns.cc
+index e1bcab0f7..c0c15773d 100644
+--- a/gcc/tree-vect-patterns.cc
++++ b/gcc/tree-vect-patterns.cc
+@@ -5632,8 +5632,8 @@ static vect_recog_func vect_vect_recog_func_ptrs[] = {
+      internal functions.  */
+   { vect_recog_gather_scatter_pattern, "gather_scatter" },
+   { vect_recog_mask_conversion_pattern, "mask_conversion" },
+-  { vect_recog_widen_plus_pattern, "widen_plus" },
+-  { vect_recog_widen_minus_pattern, "widen_minus" },
++  // { vect_recog_widen_plus_pattern, "widen_plus" },
++  // { vect_recog_widen_minus_pattern, "widen_minus" },
+ };
+ 
+ const unsigned int NUM_PATTERNS = ARRAY_SIZE (vect_vect_recog_func_ptrs);
+diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
+index af477c31a..6cbf8085f 100644
+--- a/gcc/tree-vect-slp.cc
++++ b/gcc/tree-vect-slp.cc
+@@ -49,6 +49,8 @@ along with GCC; see the file COPYING3.  If not see
+ #include "tree-eh.h"
+ #include "tree-cfg.h"
+ #include "alloc-pool.h"
++#include "print-tree.h"
++#include "gimple-pretty-print.h"
+ 
+ static bool vectorizable_slp_permutation (vec_info *, gimple_stmt_iterator *,
+ 					  slp_tree, stmt_vector_for_cost *);
+@@ -994,6 +996,21 @@ vect_build_slp_tree_1 (vec_info *vinfo, unsigned char *swap,
+ 	}
+ 
+       gcc_assert (vectype);
++      if (!STMT_VINFO_VECTYPE (stmt_info))
++	STMT_VINFO_VECTYPE (stmt_info) = vectype;
++      if (dump_file)
++	{
++	  fprintf (dump_file, "vect_build_slp_tree_1: %p\n", stmt_info);
++	  print_gimple_stmt (dump_file, stmt, 0);
++	  fprintf (dump_file, "vect_build_slp_tree_1: vectype=");
++	  if (vectype)
++	    print_generic_expr (dump_file, vectype);
++	  fprintf (dump_file, "\n");
++	  fprintf (dump_file, "internal vectype=");
++	  if (STMT_VINFO_VECTYPE (stmt_info))
++	    print_generic_expr (dump_file, STMT_VINFO_VECTYPE (stmt_info));
++	  fprintf (dump_file, "\n");
++	}
+ 
+       gcall *call_stmt = dyn_cast  (stmt);
+       if (call_stmt)
+@@ -1575,10 +1592,10 @@ vect_build_slp_tree (vec_info *vinfo,
+ 	dump_printf_loc (MSG_NOTE, vect_location,
+ 			 "SLP discovery for node %p succeeded\n", res);
+       gcc_assert (res_ == res);
+-      res->max_nunits = this_max_nunits;
++      res_->max_nunits = this_max_nunits;
+       vect_update_max_nunits (max_nunits, this_max_nunits);
+       /* Keep a reference for the bst_map use.  */
+-      SLP_TREE_REF_COUNT (res)++;
++      SLP_TREE_REF_COUNT (res_)++;
+     }
+   return res_;
+ }
+@@ -3190,8 +3207,10 @@ vect_build_slp_instance (vec_info *vinfo,
+ 
+       /* For basic block SLP, try to break the group up into multiples of
+ 	 a vector size.  */
++      bb_vec_info bb_vinfo = dyn_cast  (vinfo);
+       if (is_a  (vinfo)
+-	  && (i > 1 && i < group_size))
++	  && (i > 1 && i < group_size)
++	  && !bb_vinfo->transposed)
+ 	{
+ 	  tree scalar_type
+ 	    = TREE_TYPE (DR_REF (STMT_VINFO_DATA_REF (stmt_info)));
+@@ -3301,84 +3320,1034 @@ vect_analyze_slp_instance (vec_info *vinfo,
+       scalar_stmts.create (DR_GROUP_SIZE (stmt_info));
+       while (next_info)
+ 	{
+-	  scalar_stmts.quick_push (vect_stmt_to_vectorize (next_info));
+-	  next_info = DR_GROUP_NEXT_ELEMENT (next_info);
++	  scalar_stmts.quick_push (vect_stmt_to_vectorize (next_info));
++	  next_info = DR_GROUP_NEXT_ELEMENT (next_info);
++	}
++    }
++  else if (kind == slp_inst_kind_reduc_chain)
++    {
++      /* Collect the reduction stmts and store them in scalar_stmts.  */
++      scalar_stmts.create (REDUC_GROUP_SIZE (stmt_info));
++      while (next_info)
++	{
++	  scalar_stmts.quick_push (vect_stmt_to_vectorize (next_info));
++	  next_info = REDUC_GROUP_NEXT_ELEMENT (next_info);
++	}
++      /* Mark the first element of the reduction chain as reduction to properly
++	 transform the node.  In the reduction analysis phase only the last
++	 element of the chain is marked as reduction.  */
++      STMT_VINFO_DEF_TYPE (stmt_info)
++	= STMT_VINFO_DEF_TYPE (scalar_stmts.last ());
++      STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info))
++	= STMT_VINFO_REDUC_DEF (vect_orig_stmt (scalar_stmts.last ()));
++    }
++  else if (kind == slp_inst_kind_ctor)
++    {
++      tree rhs = gimple_assign_rhs1 (stmt_info->stmt);
++      tree val;
++      scalar_stmts.create (CONSTRUCTOR_NELTS (rhs));
++      FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (rhs), i, val)
++	{
++	  stmt_vec_info def_info = vinfo->lookup_def (val);
++	  def_info = vect_stmt_to_vectorize (def_info);
++	  scalar_stmts.quick_push (def_info);
++	}
++      if (dump_enabled_p ())
++	dump_printf_loc (MSG_NOTE, vect_location,
++			 "Analyzing vectorizable constructor: %G\n",
++			 stmt_info->stmt);
++    }
++  else if (kind == slp_inst_kind_reduc_group)
++    {
++      /* Collect reduction statements.  */
++      const vec &reductions
++	= as_a  (vinfo)->reductions;
++      scalar_stmts.create (reductions.length ());
++      for (i = 0; reductions.iterate (i, &next_info); i++)
++	if ((STMT_VINFO_RELEVANT_P (next_info)
++	     || STMT_VINFO_LIVE_P (next_info))
++	    /* ???  Make sure we didn't skip a conversion around a reduction
++	       path.  In that case we'd have to reverse engineer that conversion
++	       stmt following the chain using reduc_idx and from the PHI
++	       using reduc_def.  */
++	    && STMT_VINFO_DEF_TYPE (next_info) == vect_reduction_def)
++	  scalar_stmts.quick_push (next_info);
++      /* If less than two were relevant/live there's nothing to SLP.  */
++      if (scalar_stmts.length () < 2)
++	return false;
++    }
++  else
++    gcc_unreachable ();
++
++  vec roots = vNULL;
++  if (kind == slp_inst_kind_ctor)
++    {
++      roots.create (1);
++      roots.quick_push (stmt_info);
++    }
++  /* Build the tree for the SLP instance.  */
++  bool res = vect_build_slp_instance (vinfo, kind, scalar_stmts,
++				      roots,
++				      max_tree_size, limit, bst_map,
++				      kind == slp_inst_kind_store
++				      ? stmt_info : NULL);
++  if (!res)
++    roots.release ();
++
++  /* ???  If this is slp_inst_kind_store and the above succeeded here's
++     where we should do store group splitting.  */
++
++  return res;
++}
++
++static inline bool
++is_const_assign (stmt_vec_info store_elem)
++{
++  if (store_elem == NULL)
++    {
++      gcc_unreachable ();
++    }
++  gimple *stmt = store_elem->stmt;
++  gimple_rhs_class rhs_class = gimple_assign_rhs_class (stmt);
++  return rhs_class == GIMPLE_SINGLE_RHS
++	 && TREE_CONSTANT (gimple_assign_rhs1 (store_elem->stmt));
++}
++
++/* Push inits to INNERMOST_INITS and check const assign.  */
++
++static bool
++record_innermost (vec &innermost_inits,
++		  vec &innermost_offsets,
++		  stmt_vec_info stmt_vinfo)
++{
++  if (!stmt_vinfo)
++    {
++      return false;
++    }
++  stmt_vec_info next_info = stmt_vinfo;
++  while (next_info)
++    {
++      /* No need to vectorize constant assign in a transposed version.  */
++      if (is_const_assign (next_info))
++	{
++	  if (dump_enabled_p ())
++	    {
++	      dump_printf_loc (MSG_NOTE, vect_location,
++			      "no need to vectorize, store is const assign: %G",
++			      next_info->stmt);
++	    }
++	  return false;
++	}
++      innermost_inits.safe_push (STMT_VINFO_DR_INIT (next_info));
++      innermost_offsets.safe_push (STMT_VINFO_DR_OFFSET (next_info));
++      next_info = DR_GROUP_NEXT_ELEMENT (next_info);
++    }
++  return true;
++}
++
++/* Compare inits to INNERMOST_INITS, return FALSE if inits do not match
++   the first grouped_store.  And check const assign meanwhile.  */
++
++static bool
++compare_innermost (const vec &innermost_inits,
++		   const vec &innermost_offsets,
++		   stmt_vec_info stmt_vinfo)
++{
++  if (!stmt_vinfo || innermost_inits.length () != stmt_vinfo->size)
++    {
++      return false;
++    }
++  stmt_vec_info next_info = stmt_vinfo;
++  unsigned int i = 0;
++  while (next_info)
++    {
++      if (is_const_assign (next_info))
++	{
++	  if (dump_enabled_p ())
++	    {
++	      dump_printf_loc (MSG_NOTE, vect_location,
++			       "no need to vectorize, store is const "
++			       "assign: %G", next_info->stmt);
++	    }
++	  return false;
++	}
++      if (innermost_inits[i] != STMT_VINFO_DR_INIT (next_info)
++	  || innermost_offsets[i] != STMT_VINFO_DR_OFFSET (next_info))
++	{
++	  return false;
++	}
++      next_info = DR_GROUP_NEXT_ELEMENT (next_info);
++      i++;
++    }
++  return true;
++}
++
++static bool
++check_same_bb (stmt_vec_info grp1, stmt_vec_info grp2)
++{
++  if (grp1->stmt->bb->index == grp2->stmt->bb->index)
++    {
++       return true;
++    }
++  return false;
++}
++
++/* Check if grouped stores are of same type.
++   input: t1/t2 = TREE_TYPE (gimple_assign_lhs (first_element->stmt))
++   output: 0 if same, 1 or -1 else.  */
++
++static int
++tree_type_cmp (const tree t1, const tree t2)
++{
++  gcc_checking_assert (t1 != NULL && t2 != NULL);
++  if (t1 != t2)
++    {
++      if (TREE_CODE (t1) != TREE_CODE (t2))
++	{
++	  return TREE_CODE (t1) > TREE_CODE (t2) ? 1 : -1;
++	}
++      if (TYPE_UNSIGNED (t1) != TYPE_UNSIGNED (t2))
++	{
++	  return TYPE_UNSIGNED (t1) > TYPE_UNSIGNED (t2) ? 1 : -1;
++	}
++      if (TYPE_PRECISION (t1) != TYPE_PRECISION (t2))
++	{
++	  return TYPE_PRECISION (t1) > TYPE_PRECISION (t2) ? 1 : -1;
++	}
++    }
++  return 0;
++}
++
++/* Check it if 2 grouped stores are of same type that
++   we can analyze them in a transpose group.  */
++static int
++check_same_store_type (stmt_vec_info grp1, stmt_vec_info grp2)
++{
++  if (grp1 == grp2)
++    {
++      return 0;
++    }
++  if (grp1->size != grp2->size)
++    {
++      return grp1->size > grp2->size ? 1 : -1;
++    }
++  tree lhs1 = gimple_assign_lhs (grp1->stmt);
++  tree lhs2 = gimple_assign_lhs (grp2->stmt);
++  if (TREE_CODE (lhs1) != TREE_CODE (lhs2))
++    {
++      return TREE_CODE (lhs1) > TREE_CODE (lhs2) ? 1 : -1;
++    }
++  tree grp_type1 = TREE_TYPE (gimple_assign_lhs (grp1->stmt));
++  tree grp_type2 = TREE_TYPE (gimple_assign_lhs (grp2->stmt));
++  int cmp = tree_type_cmp (grp_type1, grp_type2);
++  return cmp;
++}
++
++/* Sort grouped stores according to group_size and store_type.
++   output: 0 if same, 1 if grp1 > grp2, -1 otherwise.  */
++
++static int
++grouped_store_cmp (const void *grp1_, const void *grp2_)
++{
++  stmt_vec_info grp1 = *(stmt_vec_info *)const_cast(grp1_);
++  stmt_vec_info grp2 = *(stmt_vec_info *)const_cast(grp2_);
++  return check_same_store_type (grp1, grp2);
++}
++
++/* Transposing is based on permutation in registers.  Permutation requires
++   vector length being power of 2 and satisfying the vector mode.  */
++
++static inline bool
++check_filling_reg (stmt_vec_info current_element)
++{
++  if (current_element->size == 0)
++    {
++      return false;
++    }
++  /* If the gimple STMT was already vectorized in vect pass, it's unable to
++     conduct transpose analysis, skip it.  */
++  bool lhs_vectorized
++	= TREE_CODE (TREE_TYPE (gimple_get_lhs (current_element->stmt)))
++	  == VECTOR_TYPE;
++  bool rhs_vectorized
++	= TREE_CODE (TREE_TYPE (gimple_assign_rhs1 (current_element->stmt)))
++	  == VECTOR_TYPE;
++  if (lhs_vectorized || rhs_vectorized)
++    {
++      return false;
++    }
++  unsigned int store_precision
++    = TYPE_PRECISION (TREE_TYPE (gimple_get_lhs (current_element->stmt)));
++  auto_vector_modes vector_modes;
++  targetm.vectorize.autovectorize_vector_modes (&vector_modes, false);
++  unsigned min_mode_size = -1u;
++  for (unsigned i = 0; i < vector_modes.length (); i++)
++    {
++      unsigned mode_bit_size = (GET_MODE_BITSIZE (vector_modes[i])).coeffs[0];
++      min_mode_size = mode_bit_size < min_mode_size
++			? mode_bit_size : min_mode_size;
++    }
++  return store_precision != 0
++	 && pow2p_hwi (current_element->size)
++	 && (current_element->size * store_precision % min_mode_size == 0);
++}
++
++/* Check if previous groups are suitable to transpose, if not, set their
++   group number to -1, reduce grp_num and clear current_groups.
++   Otherwise, just clear current_groups.  */
++
++static void
++check_and_clear_groups (vec ¤t_groups,
++			unsigned int &grp_num)
++{
++  stmt_vec_info first_element;
++  if (current_groups.length () == 1
++      || (current_groups.length () != 0
++	  && !pow2p_hwi (current_groups.length ())))
++    {
++      while (current_groups.length () != 0)
++	{
++	  first_element = current_groups.pop ();
++	  first_element->group_number = -1;
++	}
++      grp_num--;
++    }
++  else
++    {
++      while (current_groups.length ())
++	{
++	  current_groups.pop ();
++	}
++    }
++}
++
++
++/* Make sure that transpose slp vectorization is conducted only if grouped
++   stores are one dimension array ref.  */
++
++static bool
++is_store_one_dim_array (gimple *stmt)
++{
++  tree op = gimple_get_lhs (stmt);
++  if (TREE_CODE (op) != ARRAY_REF)
++    return false;
++  return TREE_OPERAND_LENGTH (op) > 0
++	 && TREE_OPERAND_LENGTH (TREE_OPERAND (op, 0)) == 0;
++}
++
++/* Set grouped_stores with similar MEM_REF to the same group and mark their
++   grp_num.  Groups with same grp_num consist the minimum unit to analyze
++   transpose.  Return num of such units.  */
++
++static unsigned
++vect_prepare_transpose (bb_vec_info bb_vinfo)
++{
++  stmt_vec_info current_element = NULL;
++  stmt_vec_info first_element = NULL;
++  unsigned int i = 0;
++  unsigned int grp_num = 0;
++  /* Use arrays to record MEM_REF data in different GROUPED_STORES.  */
++  auto_vec innermost_inits;
++  auto_vec innermost_offsets;
++
++  /* A set of stmt_vec_info with same store type.  Analyze them if their size
++     is suitable to transpose.  */
++  auto_vec current_groups;
++
++  FOR_EACH_VEC_ELT (bb_vinfo->grouped_stores, i, current_element)
++    {
++      /* Compare current grouped_store to the first one if first_element exists,
++	 push current_element to current_groups if they are similar on innermost
++	 behavior of MEM_REF.  */
++      if (first_element != NULL
++	  && !check_same_store_type (first_element, current_element)
++	  && compare_innermost (innermost_inits, innermost_offsets,
++				current_element)
++	  && check_same_bb (first_element, current_element))
++	{
++	  current_groups.safe_push (current_element);
++	  current_element->group_number = grp_num;
++	  /* If current_element is the last element in grouped_stores, continue
++	     will exit the loop and leave the last group unanalyzed.  */
++	  if (i == bb_vinfo->grouped_stores.length () - 1)
++	    {
++	      check_and_clear_groups (current_groups, grp_num);
++	    }
++	  continue;
++	}
++      check_and_clear_groups (current_groups, grp_num);
++      innermost_inits.release ();
++      innermost_offsets.release ();
++      /* Beginning of a new group to analyze whether they are able to consist
++	 a unit to conduct transpose analysis.  */
++      first_element = NULL;
++      if (is_store_one_dim_array (current_element->stmt)
++	  && check_filling_reg (current_element)
++	  && record_innermost (innermost_inits, innermost_offsets,
++			       current_element))
++	{
++	  first_element = current_element;
++	  current_groups.safe_push (current_element);
++	  current_element->group_number = ++grp_num;
++	  if (i == bb_vinfo->grouped_stores.length () - 1)
++	    {
++	      check_and_clear_groups (current_groups, grp_num);
++	    }
++	  continue;
++	}
++      current_element->group_number = -1;
++    }
++  return grp_num;
++}
++
++/* Return a flag to transpose grouped stores before building slp tree.
++   Add bool may_transpose in class vec_info.  */
++
++static bool
++vect_may_transpose (bb_vec_info bb_vinfo)
++{
++  if (targetm.vectorize.vec_perm_const == NULL)
++    {
++      return false;
++    }
++
++  if (bb_vinfo->grouped_stores.length () < 2)
++    {
++      return false;
++    }
++
++  DUMP_VECT_SCOPE ("analyze if grouped stores may transpose to slp");
++  /* Sort grouped_stores according to size and type for function
++     vect_prepare_transpose ().  */
++  bb_vinfo->grouped_stores.qsort (grouped_store_cmp);
++
++  int groups = vect_prepare_transpose (bb_vinfo);
++  BB_VINFO_TRANS_GROUPS (bb_vinfo) = groups;
++  if (dump_enabled_p ())
++      dump_printf_loc (MSG_NOTE, vect_location,
++		       "%d groups to analyze transposed slp.\n", groups);
++  return groups != 0;
++}
++
++/* Get the base address of STMT_INFO.  */
++
++static tree
++get_op_base_address (stmt_vec_info stmt_info)
++{
++  struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
++  tree op = DR_BASE_ADDRESS (dr);
++  while (TREE_OPERAND_LENGTH (op) > 0)
++    {
++      op = TREE_OPERAND (op, 0);
++    }
++  return op;
++}
++
++/* Compare the UID of the two stmt_info STMTINFO_A and STMTINFO_B.
++   Sorting them in ascending order.  */
++
++static int
++dr_group_cmp (const void *stmtinfo_a_, const void *stmtinfo_b_)
++{
++  stmt_vec_info stmtinfo_a
++	= *(stmt_vec_info *) const_cast (stmtinfo_a_);
++  stmt_vec_info stmtinfo_b
++	= *(stmt_vec_info *) const_cast (stmtinfo_b_);
++
++  /* Stabilize sort.  */
++  if (stmtinfo_a == stmtinfo_b)
++    {
++      return 0;
++    }
++  return gimple_uid (stmtinfo_a->stmt) < gimple_uid (stmtinfo_b->stmt) ? -1 : 1;
++}
++
++/* Find the first elements of the grouped loads which are required to merge.  */
++
++static void
++vect_slp_grouped_load_find (bb_vec_info bb_vinfo, vec &visited,
++			    vec &res)
++{
++  unsigned int i = 0;
++  stmt_vec_info merge_first_element = NULL;
++  stmt_vec_info first_element = NULL;
++  tree opa = NULL;
++  unsigned int grp_size_a = 0;
++  FOR_EACH_VEC_ELT (bb_vinfo->grouped_loads, i, first_element)
++    {
++      if (visited[i])
++	{
++	  continue;
++	}
++      if (!STMT_VINFO_GROUPED_ACCESS (first_element)
++	  || !pow2p_hwi (DR_GROUP_SIZE (first_element)))
++	{
++	  /* Non-conforming grouped load should be grouped separately.  */
++	  if (merge_first_element == NULL)
++	    {
++	      visited[i] = true;
++	      res.safe_push (first_element);
++	      return;
++	    }
++	}
++      if (merge_first_element == NULL)
++	{
++	  merge_first_element = first_element;
++	  opa = get_op_base_address (first_element);
++	  grp_size_a = DR_GROUP_SIZE (first_element);
++	  res.safe_push (first_element);
++	  visited[i] = true;
++	  continue;
++	}
++
++      /* If the two first elements are of the same base address and group size,
++	 these two grouped loads need to be merged.  */
++      tree opb = get_op_base_address (first_element);
++      unsigned int grp_size_b = DR_GROUP_SIZE (first_element);
++      if (opa == opb && grp_size_a == grp_size_b)
++	{
++	  res.safe_push (first_element);
++	  visited[i] = true;
++	}
++    }
++}
++
++/* Merge the grouped loads that are found from
++   vect_slp_grouped_load_find ().  */
++
++static stmt_vec_info
++vect_slp_grouped_load_merge (vec &res)
++{
++  stmt_vec_info stmt_info = res[0];
++  if (res.length () == 1)
++    {
++      return stmt_info;
++    }
++  unsigned int i = 0;
++  unsigned int size = DR_GROUP_SIZE (res[0]);
++  unsigned int new_group_size = size * res.length ();
++  stmt_vec_info first_element = NULL;
++  stmt_vec_info merge_first_element = NULL;
++  stmt_vec_info last_element = NULL;
++  FOR_EACH_VEC_ELT (res, i, first_element)
++    {
++      if (merge_first_element == NULL)
++	{
++	  merge_first_element = first_element;
++	  last_element = merge_first_element;
++	  size = DR_GROUP_SIZE (merge_first_element);
++	}
++
++      if (last_element != first_element
++	  && !DR_GROUP_NEXT_ELEMENT (last_element))
++	{
++	  DR_GROUP_NEXT_ELEMENT (last_element) = first_element;
++	  /* Store the gap from the previous member of the group.  If there is
++	     no gap in the access, DR_GROUP_GAP is always 1.  */
++	  DR_GROUP_GAP_TRANS (first_element) = DR_GROUP_GAP (first_element);
++	  DR_GROUP_GAP (first_element) = 1;
++	}
++      for (stmt_info = first_element; stmt_info;
++	   stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info))
++	{
++	  DR_GROUP_FIRST_ELEMENT (stmt_info) = merge_first_element;
++	  DR_GROUP_SIZE_TRANS (stmt_info) = DR_GROUP_SIZE (stmt_info);
++	  DR_GROUP_SIZE (stmt_info) = new_group_size;
++	  last_element = stmt_info;
++	}
++    }
++  DR_GROUP_SIZE (merge_first_element) = new_group_size;
++  DR_GROUP_SLP_TRANSPOSE (merge_first_element) = true;
++  DR_GROUP_NEXT_ELEMENT (last_element) = NULL;
++  return merge_first_element;
++}
++
++/* Merge the grouped loads which have the same base address and group size.
++   For example, for grouped loads (opa_1, opa_2, opb_1, opb_2):
++     opa_1: a0->a1->a2->a3
++     opa_2: a8->a9->a10->a11
++     opb_1: b0->b1
++     opb_2: b16->b17
++   we can probably get two merged grouped loads:
++     opa: a0->a1->a2->a3->a8->a9->a10->a11
++     opb: b0->b1->b16->b17.  */
++
++static bool
++vect_merge_slp_grouped_loads (bb_vec_info bb_vinfo)
++{
++  if (bb_vinfo->grouped_loads.length () <= 0)
++    {
++      if (dump_enabled_p ())
++	{
++	  dump_printf_loc (MSG_NOTE, vect_location,
++			   "The number of grouped loads is 0.\n");
++	}
++      return false;
++    }
++  bb_vinfo->grouped_loads.qsort (dr_group_cmp);
++  auto_vec visited (bb_vinfo->grouped_loads.length ());
++  auto_vec grouped_loads_merge;
++  for (unsigned int i = 0; i < bb_vinfo->grouped_loads.length (); i++)
++    {
++      visited.safe_push (false);
++    }
++  while (1)
++    {
++      /* Find grouped loads which are required to merge.  */
++      auto_vec res;
++      vect_slp_grouped_load_find (bb_vinfo, visited, res);
++      if (res.is_empty ())
++	{
++	  break;
++	}
++      /* Merge the required grouped loads into one group.  */
++      grouped_loads_merge.safe_push (vect_slp_grouped_load_merge (res));
++    }
++  if (grouped_loads_merge.length () == bb_vinfo->grouped_loads.length ())
++    {
++      if (dump_enabled_p ())
++	{
++	  dump_printf_loc (MSG_NOTE, vect_location,
++			   "No grouped loads need to be merged.\n");
++	}
++      return false;
++    }
++  if (dump_enabled_p ())
++    {
++      dump_printf_loc (MSG_NOTE, vect_location,
++		       "Merging grouped loads successfully.\n");
++    }
++  BB_VINFO_GROUPED_LOADS (bb_vinfo).release ();
++  for (unsigned int i = 0; i < grouped_loads_merge.length (); i++)
++    {
++      BB_VINFO_GROUPED_LOADS (bb_vinfo).safe_push (grouped_loads_merge[i]);
++    }
++  return true;
++}
++
++/* Find the first elements of the grouped stores
++   which are required to transpose and merge.  */
++
++static void
++vect_slp_grouped_store_find (bb_vec_info bb_vinfo, vec &visited,
++			     vec &res)
++{
++  stmt_vec_info first_element = NULL;
++  stmt_vec_info merge_first_element = NULL;
++  unsigned int k = 0;
++  FOR_EACH_VEC_ELT (bb_vinfo->grouped_stores, k, first_element)
++    {
++      if (visited[k])
++	{
++	  continue;
++	}
++      /* Non-conforming grouped store should be grouped separately.  */
++      if (!STMT_VINFO_GROUPED_ACCESS (first_element)
++	  || first_element->group_number == -1)
++	{
++	  if (merge_first_element == NULL)
++	    {
++	      visited[k] = true;
++	      res.safe_push (first_element);
++	      return;
++	    }
++	}
++      if (first_element->group_number != -1
++	  && merge_first_element == NULL)
++	{
++	  merge_first_element = first_element;
++	}
++      if (merge_first_element->group_number == first_element->group_number)
++	{
++	  visited[k] = true;
++	  res.safe_push (first_element);
++	}
++    }
++}
++
++/* Transpose and merge the grouped stores that are found from
++   vect_slp_grouped_store_find ().  */
++
++static stmt_vec_info
++vect_slp_grouped_store_transform (vec &res)
++{
++  stmt_vec_info stmt_info = res[0];
++  if (res.length () == 1)
++    {
++      return stmt_info;
++    }
++  stmt_vec_info rearrange_first_element = stmt_info;
++  stmt_vec_info last_element = rearrange_first_element;
++
++  unsigned int size = DR_GROUP_SIZE (rearrange_first_element);
++  unsigned int new_group_size = size * res.length ();
++  for (unsigned int i = 1; i < res.length (); i++)
++    {
++      /* Store the gap from the previous member of the group.  If there is no
++	 gap in the access, DR_GROUP_GAP is always 1.  */
++      DR_GROUP_GAP_TRANS (res[i]) = DR_GROUP_GAP (res[i]);
++      DR_GROUP_GAP (res[i]) = 1;
++    }
++  while (!res.is_empty ())
++    {
++      stmt_info = res[0];
++      res.ordered_remove (0);
++      if (DR_GROUP_NEXT_ELEMENT (stmt_info))
++	{
++	  res.safe_push (DR_GROUP_NEXT_ELEMENT (stmt_info));
++	}
++      DR_GROUP_FIRST_ELEMENT (stmt_info) = rearrange_first_element;
++      DR_GROUP_NEXT_ELEMENT (last_element) = stmt_info;
++      DR_GROUP_SIZE_TRANS (stmt_info) = DR_GROUP_SIZE (stmt_info);
++      DR_GROUP_SIZE (stmt_info) = new_group_size;
++      last_element = stmt_info;
++    }
++
++  DR_GROUP_SIZE (rearrange_first_element) = new_group_size;
++  DR_GROUP_SLP_TRANSPOSE (rearrange_first_element) = true;
++  DR_GROUP_NEXT_ELEMENT (last_element) = NULL;
++  return rearrange_first_element;
++}
++
++/* Save the STMT_INFO in the grouped stores to BB_VINFO_SCALAR_STORES for
++   transposing back grouped stores.  */
++
++static void
++get_scalar_stores (bb_vec_info bb_vinfo)
++{
++  unsigned int k = 0;
++  stmt_vec_info first_element = NULL;
++  FOR_EACH_VEC_ELT (bb_vinfo->grouped_stores, k, first_element)
++    {
++      /* Filter the grouped store which is unnecessary for transposing.  */
++      if (!STMT_VINFO_GROUPED_ACCESS (first_element)
++	  || first_element->group_number == -1)
++	{
++	  continue;
++	}
++      vec tmp_scalar_store;
++      tmp_scalar_store.create (DR_GROUP_SIZE (first_element));
++      for (stmt_vec_info stmt_info = first_element; stmt_info;
++	   stmt_info = DR_GROUP_NEXT_ELEMENT (stmt_info))
++	{
++	  tmp_scalar_store.safe_push (stmt_info);
++	}
++      BB_VINFO_SCALAR_STORES (bb_vinfo).safe_push (tmp_scalar_store);
++    }
++}
++
++/* Transpose and merge the grouped stores which have the same group number.
++   For example, for grouped stores (opa_0, opa_1, opa_2, opa_3):
++     opa_0: a00->a01->a02->a03
++     opa_1: a10->a11->a12->a13
++     opa_2: a20->a21->a22->a23
++     opa_2: a30->a31->a32->a33
++   we can probably get the merged grouped store:
++     opa: a00->a10->a20->a30
++	->a01->a11->a21->a31
++	->a02->a12->a22->a32
++	->a03->a13->a23->a33.  */
++
++static bool
++vect_transform_slp_grouped_stores (bb_vec_info bb_vinfo)
++{
++  if (bb_vinfo->grouped_stores.length () <= 0)
++    {
++      if (dump_enabled_p ())
++	{
++	  dump_printf_loc (MSG_NOTE, vect_location,
++			   "The number of grouped stores is 0.\n");
++	}
++      return false;
++    }
++
++  bb_vinfo->grouped_stores.qsort (dr_group_cmp);
++  auto_vec grouped_stores_merge;
++  auto_vec visited (bb_vinfo->grouped_stores.length ());
++  unsigned int i = 0;
++  for (i = 0; i < bb_vinfo->grouped_stores.length (); i++)
++    {
++      visited.safe_push (false);
++    }
++
++  /* Get scalar stores for the following transposition recovery.  */
++  get_scalar_stores (bb_vinfo);
++
++  while (1)
++    {
++      /* Find grouped stores which are required to transpose and merge.  */
++      auto_vec res;
++      vect_slp_grouped_store_find (bb_vinfo, visited, res);
++      if (res.is_empty ())
++	{
++	  break;
++	}
++      /* Transpose and merge the required grouped stores into one group.  */
++      grouped_stores_merge.safe_push (vect_slp_grouped_store_transform (res));
++    }
++
++  BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
++  for (i = 0; i < grouped_stores_merge.length (); i++)
++    {
++      BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (grouped_stores_merge[i]);
++    }
++
++  if (dump_enabled_p ())
++    {
++      dump_printf_loc (MSG_NOTE, vect_location,
++		       "Transposing grouped stores successfully.\n");
++    }
++  return true;
++}
++
++/* A helpful function of vect_transform_back_slp_grouped_stores ().  */
++
++static auto_vec
++vect_transform_back_slp_grouped_store (bb_vec_info bb_vinfo,
++				       stmt_vec_info first_stmt_info)
++{
++  auto_vec grouped_stores_split;
++  for (unsigned int i = 0; i < bb_vinfo->scalar_stores.length (); i++)
++    {
++      vec scalar_tmp = bb_vinfo->scalar_stores[i];
++      if (scalar_tmp.length () > 1
++	  && scalar_tmp[0]->group_number != first_stmt_info->group_number)
++	{
++	  continue;
++	}
++      stmt_vec_info cur_stmt_info = NULL;
++      stmt_vec_info cur_first_stmt_info = NULL;
++      stmt_vec_info last_stmt_info = NULL;
++      unsigned int k = 0;
++      FOR_EACH_VEC_ELT (scalar_tmp, k, cur_stmt_info)
++	{
++	  if (k == 0)
++	    {
++	      cur_first_stmt_info = cur_stmt_info;
++	      last_stmt_info = cur_stmt_info;
++	    }
++	  DR_GROUP_FIRST_ELEMENT (cur_stmt_info) = cur_first_stmt_info;
++	  DR_GROUP_NEXT_ELEMENT (last_stmt_info) = cur_stmt_info;
++	  last_stmt_info = cur_stmt_info;
++	}
++      DR_GROUP_SIZE (cur_first_stmt_info) = k;
++      DR_GROUP_NEXT_ELEMENT (last_stmt_info) = NULL;
++      if (first_stmt_info != cur_first_stmt_info)
++	{
++	  DR_GROUP_GAP (cur_first_stmt_info)
++		= DR_GROUP_GAP_TRANS (cur_first_stmt_info);
++	  DR_GROUP_SLP_TRANSPOSE (cur_first_stmt_info) = false;
++	  DR_GROUP_NUMBER (cur_first_stmt_info) = -1;
++	}
++      grouped_stores_split.safe_push (cur_first_stmt_info);
++    }
++  return grouped_stores_split;
++}
++
++/* Transform the grouped store back.  */
++
++void
++vect_transform_back_slp_grouped_stores (bb_vec_info bb_vinfo,
++					stmt_vec_info first_stmt_info)
++{
++  if (first_stmt_info->group_number == -1)
++    {
++      return;
++    }
++  /* Transform back.  */
++  auto_vec grouped_stores_split
++	= vect_transform_back_slp_grouped_store (bb_vinfo, first_stmt_info);
++
++  /* Add the remaining grouped stores to grouped_stores_split.  */
++  stmt_vec_info first_element = NULL;
++  unsigned int i = 0;
++  FOR_EACH_VEC_ELT (bb_vinfo->grouped_stores, i, first_element)
++    {
++      if (first_element->group_number != first_stmt_info->group_number)
++	{
++	  grouped_stores_split.safe_push (first_element);
++	}
++    }
++  DR_GROUP_SLP_TRANSPOSE (first_stmt_info) = false;
++  DR_GROUP_NUMBER (first_stmt_info) = -1;
++  BB_VINFO_GROUPED_STORES (bb_vinfo).release ();
++  for (i = 0; i < grouped_stores_split.length (); i++)
++    {
++      BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (grouped_stores_split[i]);
++    }
++}
++
++/* Function check_for_slp_vectype
++
++   Restriction for grouped stores by checking their vectype.
++   If the vectype of the grouped store is changed, it need transform back.
++   If all grouped stores need to be transformed back, return FALSE.  */
++
++static bool
++check_for_slp_vectype (bb_vec_info bb_vinfo)
++{
++  if (dump_file)
++    fprintf (dump_file, "check_for_slp_vectype: enter\n");
++  stmt_vec_info first_element = NULL;
++  unsigned int i = 0;
++  int count = 0;
++  auto_vec grouped_stores_check;
++  FOR_EACH_VEC_ELT (bb_vinfo->grouped_stores, i, first_element)
++    {
++      grouped_stores_check.safe_push (first_element);
++    }
++  FOR_EACH_VEC_ELT (grouped_stores_check, i, first_element)
++    {
++      if (STMT_VINFO_GROUPED_ACCESS (first_element)
++	  && first_element->group_number != -1)
++	{
++	  unsigned int group_size_b
++			= DR_GROUP_SIZE_TRANS (first_element);
++	  tree vectype = STMT_VINFO_VECTYPE (first_element);
++	  gimple *stmt = STMT_VINFO_STMT (first_element);
++	  tree lhs = gimple_get_lhs (stmt);
++	  tree type = TREE_TYPE (lhs);
++#if 0
++	  if (!vectype && !type)
++	    {
++	      if (dump_file)
++		fprintf (dump_file, "check_for_slp_vectype: no vectype/stmt type\n");
++	      continue;
++	    }
++
++	  if (!vectype)
++	    vectype = type;
++#endif
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "check_for_slp_vectype: %p\n", first_element);
++	      print_gimple_stmt (dump_file, stmt, 0);
++	      fprintf (dump_file, "check_for_slp_vectype: vectype=");
++	      if (vectype)
++		print_generic_expr (dump_file, vectype);
++	      fprintf (dump_file, "\n");
++	    }
++#if 0
++	  if (!vectype || !VECTOR_TYPE_P (vectype))
++	    continue;
++#endif
++	  poly_uint64 nunits = TYPE_VECTOR_SUBPARTS (vectype);
++	  if (nunits.to_constant () > group_size_b)
++	    {
++	      count++;
++	      /* If the vectype is changed, this grouped store need
++		 to be transformed back.  */
++	      vect_transform_back_slp_grouped_stores (bb_vinfo, first_element);
++	      if (dump_enabled_p ())
++		{
++		  dump_printf_loc (MSG_NOTE, vect_location,
++				   "No supported: only supported for"
++				   " group_size geq than nunits.\n");
++		}
++	    }
++	}
++    }
++  if (count == BB_VINFO_TRANS_GROUPS (bb_vinfo))
++    {
++      return false;
++    }
++  if (dump_file)
++    fprintf (dump_file, "check_for_slp_vectype: True\n");
++  return true;
++}
++
++/* Function check_for_dr_alignment
++
++   Check the alignment of the slp instance loads.
++   Return FALSE if a load cannot be vectorized.  */
++
++static bool
++check_for_dr_alignment (bb_vec_info bb_vinfo, slp_instance instance)
++{
++  slp_tree node = NULL;
++  unsigned int i = 0;
++  FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, node)
++    {
++      stmt_vec_info first_stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
++      dr_vec_info *first_dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
++      if (dump_file)
++	{
++	  fprintf (dump_file, "check_for_dr_alignment: %p\n", first_stmt_info);
++
++	  gimple *stmt = STMT_VINFO_STMT (first_stmt_info);
++	  tree lhs = gimple_get_lhs (stmt);
++	  tree type = TREE_TYPE (lhs);
++	  print_gimple_stmt (dump_file, stmt, 0);
++	}
++
++      tree vectype = STMT_VINFO_VECTYPE (first_stmt_info);
++      int malign = dr_misalignment (first_dr_info, vectype);
++      enum dr_alignment_support supportable_dr_alignment
++	= vect_supportable_dr_alignment (bb_vinfo, first_dr_info,
++					 vectype, malign);
++      if (supportable_dr_alignment == dr_explicit_realign_optimized
++	  || supportable_dr_alignment == dr_explicit_realign)
++	{
++	  return false;
+ 	}
+     }
+-  else if (kind == slp_inst_kind_reduc_chain)
++  return true;
++}
++
++/* Initialize slp_transpose flag before transposing.  */
++
++static void
++init_stmt_info_slp_transpose (bb_vec_info bb_vinfo)
++{
++  stmt_vec_info first_element = NULL;
++  unsigned int k = 0;
++  FOR_EACH_VEC_ELT (bb_vinfo->grouped_stores, k, first_element)
+     {
+-      /* Collect the reduction stmts and store them in scalar_stmts.  */
+-      scalar_stmts.create (REDUC_GROUP_SIZE (stmt_info));
+-      while (next_info)
++      if (STMT_VINFO_GROUPED_ACCESS (first_element))
+ 	{
+-	  scalar_stmts.quick_push (vect_stmt_to_vectorize (next_info));
+-	  next_info = REDUC_GROUP_NEXT_ELEMENT (next_info);
++	  DR_GROUP_SLP_TRANSPOSE (first_element) = false;
+ 	}
+-      /* Mark the first element of the reduction chain as reduction to properly
+-	 transform the node.  In the reduction analysis phase only the last
+-	 element of the chain is marked as reduction.  */
+-      STMT_VINFO_DEF_TYPE (stmt_info)
+-	= STMT_VINFO_DEF_TYPE (scalar_stmts.last ());
+-      STMT_VINFO_REDUC_DEF (vect_orig_stmt (stmt_info))
+-	= STMT_VINFO_REDUC_DEF (vect_orig_stmt (scalar_stmts.last ()));
+     }
+-  else if (kind == slp_inst_kind_ctor)
++  FOR_EACH_VEC_ELT (bb_vinfo->grouped_loads, k, first_element)
+     {
+-      tree rhs = gimple_assign_rhs1 (stmt_info->stmt);
+-      tree val;
+-      scalar_stmts.create (CONSTRUCTOR_NELTS (rhs));
+-      FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (rhs), i, val)
++      if (STMT_VINFO_GROUPED_ACCESS (first_element))
+ 	{
+-	  stmt_vec_info def_info = vinfo->lookup_def (val);
+-	  def_info = vect_stmt_to_vectorize (def_info);
+-	  scalar_stmts.quick_push (def_info);
++	  DR_GROUP_SLP_TRANSPOSE (first_element) = false;
+ 	}
+-      if (dump_enabled_p ())
+-	dump_printf_loc (MSG_NOTE, vect_location,
+-			 "Analyzing vectorizable constructor: %G\n",
+-			 stmt_info->stmt);
+     }
+-  else if (kind == slp_inst_kind_reduc_group)
++}
++
++/* Analyze and transpose the stmts before building the SLP tree.  */
++
++static bool
++vect_analyze_transpose (bb_vec_info bb_vinfo)
++{
++  DUMP_VECT_SCOPE ("vect_analyze_transpose");
++
++  if (!vect_may_transpose (bb_vinfo))
+     {
+-      /* Collect reduction statements.  */
+-      const vec &reductions
+-	= as_a  (vinfo)->reductions;
+-      scalar_stmts.create (reductions.length ());
+-      for (i = 0; reductions.iterate (i, &next_info); i++)
+-	if ((STMT_VINFO_RELEVANT_P (next_info)
+-	     || STMT_VINFO_LIVE_P (next_info))
+-	    /* ???  Make sure we didn't skip a conversion around a reduction
+-	       path.  In that case we'd have to reverse engineer that conversion
+-	       stmt following the chain using reduc_idx and from the PHI
+-	       using reduc_def.  */
+-	    && STMT_VINFO_DEF_TYPE (next_info) == vect_reduction_def)
+-	  scalar_stmts.quick_push (next_info);
+-      /* If less than two were relevant/live there's nothing to SLP.  */
+-      if (scalar_stmts.length () < 2)
+-	return false;
++      return false;
+     }
+-  else
+-    gcc_unreachable ();
+ 
+-  vec roots = vNULL;
+-  if (kind == slp_inst_kind_ctor)
++  /* For basic block SLP, try to merge the grouped stores and loads
++     into one group.  */
++  init_stmt_info_slp_transpose (bb_vinfo);
++  if (vect_transform_slp_grouped_stores (bb_vinfo)
++      && vect_merge_slp_grouped_loads (bb_vinfo))
+     {
+-      roots.create (1);
+-      roots.quick_push (stmt_info);
++      if (dump_enabled_p ())
++	{
++	  dump_printf_loc (MSG_NOTE, vect_location,
++			   "Analysis succeeded with SLP transposed.\n");
++	}
++      return true;
+     }
+-  /* Build the tree for the SLP instance.  */
+-  bool res = vect_build_slp_instance (vinfo, kind, scalar_stmts,
+-				      roots,
+-				      max_tree_size, limit, bst_map,
+-				      kind == slp_inst_kind_store
+-				      ? stmt_info : NULL);
+-  if (!res)
+-    roots.release ();
+-
+-  /* ???  If this is slp_inst_kind_store and the above succeeded here's
+-     where we should do store group splitting.  */
+-
+-  return res;
++  if (dump_enabled_p ())
++    {
++      dump_printf_loc (MSG_NOTE, vect_location,
++		       "Analysis failed with SLP transposed.\n");
++    }
++  return false;
+ }
+ 
+ /* Check if there are stmts in the loop can be vectorized using SLP.  Build SLP
+@@ -4963,7 +5932,7 @@ vect_slp_analyze_operations (vec_info *vinfo)
+ 	  /* Check we can vectorize the reduction.  */
+ 	  || (SLP_INSTANCE_KIND (instance) == slp_inst_kind_bb_reduc
+ 	      && !vectorizable_bb_reduc_epilogue (instance, &cost_vec)))
+-        {
++	{
+ 	  slp_tree node = SLP_INSTANCE_TREE (instance);
+ 	  stmt_vec_info stmt_info;
+ 	  if (!SLP_INSTANCE_ROOT_STMTS (instance).is_empty ())
+@@ -4975,7 +5944,7 @@ vect_slp_analyze_operations (vec_info *vinfo)
+ 			     "removing SLP instance operations starting from: %G",
+ 			     stmt_info->stmt);
+ 	  vect_free_slp_instance (instance);
+-          vinfo->slp_instances.ordered_remove (i);
++	  vinfo->slp_instances.ordered_remove (i);
+ 	  cost_vec.release ();
+ 	  while (!visited_vec.is_empty ())
+ 	    visited.remove (visited_vec.pop ());
+@@ -5204,7 +6173,7 @@ vect_bb_slp_scalar_cost (vec_info *vinfo,
+       gimple *orig_stmt = orig_stmt_info->stmt;
+ 
+       /* If there is a non-vectorized use of the defs then the scalar
+-         stmt is kept live in which case we do not account it or any
++	 stmt is kept live in which case we do not account it or any
+ 	 required defs in the SLP children in the scalar cost.  This
+ 	 way we make the vectorization more costly when compared to
+ 	 the scalar cost.  */
+@@ -5481,7 +6450,11 @@ vect_bb_vectorization_profitable_p (bb_vec_info bb_vinfo,
+ 
+       vec_outside_cost = vec_prologue_cost + vec_epilogue_cost;
+ 
+-      if (dump_enabled_p ())
++      BB_VINFO_VEC_INSIDE_COST (bb_vinfo) = vec_inside_cost;
++      BB_VINFO_VEC_OUTSIDE_COST (bb_vinfo) = vec_outside_cost;
++      BB_VINFO_SCALAR_COST (bb_vinfo) = scalar_cost;
++
++      if (!unlimited_cost_model (NULL) && dump_enabled_p ())
+ 	{
+ 	  dump_printf_loc (MSG_NOTE, vect_location,
+ 			   "Cost model analysis for part in loop %d:\n", sl);
+@@ -5819,7 +6792,7 @@ vect_slp_analyze_bb_1 (bb_vec_info bb_vinfo, int n_stmts, bool &fatal,
+   if (!vect_analyze_data_refs (bb_vinfo, &min_vf, NULL))
+     {
+       if (dump_enabled_p ())
+-        dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
++	dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+ 			 "not vectorized: unhandled data-ref in basic "
+ 			 "block.\n");
+       return false;
+@@ -5854,6 +6827,22 @@ vect_slp_analyze_bb_1 (bb_vec_info bb_vinfo, int n_stmts, bool &fatal,
+ 
+   vect_pattern_recog (bb_vinfo);
+ 
++  /* Transpose grouped stores and loads for better vectorizable version.  */
++  if (bb_vinfo->transposed)
++    {
++      if (!vect_analyze_transpose (bb_vinfo))
++	{
++	  if (dump_enabled_p ())
++	    {
++	       dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
++				"not vectorized: unhandled slp transposed in "
++				"basic block.\n");
++	    }
++	  return false;
++	}
++    }
++  bb_vinfo->before_slp = true;
++
+   /* Update store groups from pattern processing.  */
+   vect_fixup_store_groups_with_patterns (bb_vinfo);
+ 
+@@ -5872,6 +6861,20 @@ vect_slp_analyze_bb_1 (bb_vec_info bb_vinfo, int n_stmts, bool &fatal,
+       return false;
+     }
+ 
++  /* Check if the vectype is suitable for SLP transposed.  */
++  if (bb_vinfo->transposed && !check_for_slp_vectype (bb_vinfo))
++    {
++      if (dump_enabled_p ())
++	{
++	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
++			   "Failed to SLP transposed in the basic block.\n");
++	  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
++			   "not vectorized: vectype is not suitable for "
++			   "SLP transposed in basic block.\n");
++	}
++      return false;
++    }
++
+   /* Optimize permutations.  */
+   vect_optimize_slp (bb_vinfo);
+ 
+@@ -5914,6 +6917,27 @@ vect_slp_analyze_bb_1 (bb_vec_info bb_vinfo, int n_stmts, bool &fatal,
+   if (! BB_VINFO_SLP_INSTANCES (bb_vinfo).length ())
+     return false;
+ 
++  /* Check if the alignment is suitable for SLP transposed.  */
++  if (bb_vinfo->transposed)
++    {
++      for (i = 0; BB_VINFO_SLP_INSTANCES (bb_vinfo).iterate (i, &instance); i++)
++	{
++	  if (!check_for_dr_alignment (bb_vinfo, instance))
++	    {
++	      if (dump_enabled_p ())
++		{
++		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
++				   "Failed to SLP transposed in the basic "
++				   "block.\n");
++		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
++				   "not vectorized: alignment is not suitable "
++				   "for SLP transposed in basic block.\n");
++		}
++	      return false;
++	    }
++	}
++    }
++
+   if (!vect_slp_analyze_operations (bb_vinfo))
+     {
+       if (dump_enabled_p ())
+@@ -5923,7 +6947,88 @@ vect_slp_analyze_bb_1 (bb_vec_info bb_vinfo, int n_stmts, bool &fatal,
+     }
+ 
+   vect_bb_partition_graph (bb_vinfo);
++  return true;
++}
++
++static bool
++may_new_transpose_bbvinfo (bb_vec_info bb_vinfo_ori, bool res_ori,
++			   loop_p orig_loop)
++{
++  /* If the flag is false or the slp analysis is broken before
++     vect_analyze_slp, we don't try to analyze the transposed SLP version.  */
++  if (!flag_tree_slp_transpose_vectorize
++      || !BB_VINFO_BEFORE_SLP (bb_vinfo_ori))
++    {
++      return false;
++    }
++
++  /* If the original bb_vinfo can't be vectorized, try to new a bb_vinfo
++     of the transposed version.  */
++  if (!res_ori)
++    {
++      return true;
++    }
++
++  /* Caculate the cost of the original bb_vinfo.  */
++  if (unlimited_cost_model (NULL))
++    {
++      vec &instances = BB_VINFO_SLP_INSTANCES (bb_vinfo_ori);
++      vect_bb_vectorization_profitable_p (bb_vinfo_ori, instances, orig_loop);
++    }
++  /* If the vec cost and scalar cost are not much difference (here we set the
++     threshold to 4), we try to new a bb_vinfo of the transposed version.  */
++  if (BB_VINFO_SCALAR_COST (bb_vinfo_ori)
++      < 4 * (BB_VINFO_VEC_INSIDE_COST (bb_vinfo_ori)
++	     + BB_VINFO_VEC_OUTSIDE_COST (bb_vinfo_ori)))
++    {
++      return true;
++    }
++  return false;
++}
+ 
++static bool
++may_choose_transpose_bbvinfo (bb_vec_info bb_vinfo_trans, bool res_trans,
++			      bb_vec_info bb_vinfo_ori, bool res_ori,
++			      loop_p orig_loop)
++{
++  /* The original bb_vinfo is chosen if the transposed bb_vinfo
++     can't be vectorized.  */
++  if (!res_trans)
++    {
++      return false;
++    }
++  /* Caculate the cost of the transposed bb_vinfo.  */
++  if (unlimited_cost_model (NULL))
++    {
++      vec &instances = BB_VINFO_SLP_INSTANCES (bb_vinfo_trans);
++      vect_bb_vectorization_profitable_p (bb_vinfo_trans, instances,
++					  orig_loop);
++    }
++  int diff_bb_cost = -1;
++  int diff_bb_cost_trans = -1;
++  if (res_ori)
++    {
++      diff_bb_cost = BB_VINFO_SCALAR_COST (bb_vinfo_ori)
++		     - BB_VINFO_VEC_INSIDE_COST (bb_vinfo_ori)
++		     - BB_VINFO_VEC_OUTSIDE_COST (bb_vinfo_ori);
++    }
++  if (res_trans)
++    {
++      diff_bb_cost_trans = BB_VINFO_SCALAR_COST (bb_vinfo_trans)
++			   - BB_VINFO_VEC_INSIDE_COST (bb_vinfo_trans)
++			   - BB_VINFO_VEC_OUTSIDE_COST (bb_vinfo_trans);
++    }
++  /* The original bb_vinfo is chosen when one of the following conditions
++     is satisfied as follows:
++	1) The cost of original version is better transposed version.
++	2) The vec cost is similar to scalar cost in the transposed version.  */
++  if ((res_ori && res_trans && diff_bb_cost >= diff_bb_cost_trans)
++      || (res_trans && BB_VINFO_SCALAR_COST (bb_vinfo_trans)
++		       <= (BB_VINFO_VEC_INSIDE_COST (bb_vinfo_trans)
++			  + BB_VINFO_VEC_OUTSIDE_COST (bb_vinfo_trans))))
++    {
++      return false;
++    }
+   return true;
+ }
+ 
+@@ -5937,6 +7042,7 @@ vect_slp_region (vec bbs, vec datarefs,
+ 		 loop_p orig_loop)
+ {
+   bb_vec_info bb_vinfo;
++  bb_vec_info bb_vinfo_trans = NULL;
+   auto_vector_modes vector_modes;
+ 
+   /* Autodetect first vector size we try.  */
+@@ -5951,6 +7057,10 @@ vect_slp_region (vec bbs, vec datarefs,
+     {
+       bool vectorized = false;
+       bool fatal = false;
++      bool res_bb_vinfo_ori = false;
++      bool res_bb_vinfo_trans = false;
++
++      /* New a bb_vinfo of the original version.  */
+       bb_vinfo = new _bb_vec_info (bbs, &shared);
+ 
+       bool first_time_p = shared.datarefs.is_empty ();
+@@ -5960,8 +7070,113 @@ vect_slp_region (vec bbs, vec datarefs,
+       else
+ 	bb_vinfo->shared->check_datarefs ();
+       bb_vinfo->vector_mode = next_vector_mode;
++      bb_vinfo->transposed = false;
++      bb_vinfo->before_slp = false;
++
++      res_bb_vinfo_ori = vect_slp_analyze_bb_1 (bb_vinfo, n_stmts, fatal,
++						dataref_groups);
++      auto_vec profitable_subgraphs;
++      auto_vec profitable_subgraphs_trans;
++      for (slp_instance instance : BB_VINFO_SLP_INSTANCES (bb_vinfo))
++	{
++	  if (instance->subgraph_entries.is_empty ())
++	    continue;
++
++	    vect_location = instance->location ();
++	    if (!unlimited_cost_model (NULL)
++		&& !vect_bb_vectorization_profitable_p
++		      (bb_vinfo, instance->subgraph_entries, orig_loop))
++	      {
++		if (dump_enabled_p ())
++		  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
++				   "not vectorized: vectorization is not "
++				   "profitable.\n");
++		  continue;
++	      }
++	    if (res_bb_vinfo_ori)
++	      {
++		if (!dbg_cnt (vect_slp))
++		  continue;
++		profitable_subgraphs.safe_push (instance);
++	      }
++	}
++
++      /* Analyze and new a transposed bb_vinfo.  */
++      if (may_new_transpose_bbvinfo (bb_vinfo, res_bb_vinfo_ori, orig_loop))
++	{
++	  bool fatal_trans = false;
++	  bb_vinfo_trans
++	    = new _bb_vec_info (bbs, &shared);
++	  bool first_time_p = shared.datarefs.is_empty ();
++	  BB_VINFO_DATAREFS (bb_vinfo_trans) = datarefs;
++	  if (first_time_p)
++	    {
++	      bb_vinfo_trans->shared->save_datarefs ();
++	    }
++	  else
++	    {
++	      bb_vinfo_trans->shared->check_datarefs ();
++	    }
++	  bb_vinfo_trans->vector_mode = next_vector_mode;
++	  bb_vinfo_trans->transposed = true;
++	  bb_vinfo_trans->before_slp = false;
++
++	  res_bb_vinfo_trans
++	    = vect_slp_analyze_bb_1 (bb_vinfo_trans, n_stmts, fatal_trans,
++				     dataref_groups);
++	  if (res_bb_vinfo_trans)
++	    {
++	      for (slp_instance instance : BB_VINFO_SLP_INSTANCES (bb_vinfo_trans))
++		{
++		  if (instance->subgraph_entries.is_empty ())
++		    continue;
++
++		  vect_location = instance->location ();
++		  if (!unlimited_cost_model (NULL)
++		      && !vect_bb_vectorization_profitable_p
++			(bb_vinfo_trans, instance->subgraph_entries, orig_loop))
++		    {
++		      if (dump_enabled_p ())
++			  dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
++					   "not vectorized: transpose vectorization is not "
++					   "profitable.\n");
++		      res_bb_vinfo_trans = false;
++		      continue;
++		     }
++		  if (res_bb_vinfo_trans)
++		    {
++		      if (!dbg_cnt (vect_slp))
++			continue;
++		      profitable_subgraphs_trans.safe_push (instance);
++		    }
++		}
++	    }
++	  if (may_choose_transpose_bbvinfo (bb_vinfo_trans,
++					    res_bb_vinfo_trans,
++					    bb_vinfo, res_bb_vinfo_ori,
++					    orig_loop))
++	    {
++	      bb_vinfo = bb_vinfo_trans;
++	      fatal = fatal_trans;
++	      if (dump_enabled_p ())
++		{
++		  dump_printf_loc (MSG_NOTE, vect_location,
++				   "Basic block part vectorized "
++				   "using transposed version.\n");
++		}
++	    }
++	  else
++	    {
++	      if (dump_enabled_p ())
++		{
++		  dump_printf_loc (MSG_NOTE, vect_location,
++				   "Basic block part vectorized "
++				   "\n");
++		}
++	    }
++	}
+ 
+-      if (vect_slp_analyze_bb_1 (bb_vinfo, n_stmts, fatal, dataref_groups))
++      if (res_bb_vinfo_ori || res_bb_vinfo_trans)
+ 	{
+ 	  if (dump_enabled_p ())
+ 	    {
+@@ -5972,90 +7187,129 @@ vect_slp_region (vec bbs, vec datarefs,
+ 	    }
+ 
+ 	  bb_vinfo->shared->check_datarefs ();
+-
+-	  auto_vec profitable_subgraphs;
+-	  for (slp_instance instance : BB_VINFO_SLP_INSTANCES (bb_vinfo))
++	  if (!res_bb_vinfo_trans)
+ 	    {
+-	      if (instance->subgraph_entries.is_empty ())
+-		continue;
+-
+-	      vect_location = instance->location ();
+-	      if (!unlimited_cost_model (NULL)
+-		  && !vect_bb_vectorization_profitable_p
+-			(bb_vinfo, instance->subgraph_entries, orig_loop))
++	      /* When we're vectorizing an if-converted loop body make sure
++		 we vectorized all if-converted code.  */
++	      if (!profitable_subgraphs.is_empty ()
++		  && orig_loop)
+ 		{
+-		  if (dump_enabled_p ())
+-		    dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
+-				     "not vectorized: vectorization is not "
+-				     "profitable.\n");
+-		  continue;
++		  gcc_assert (bb_vinfo->bbs.length () == 1);
++		  for (gimple_stmt_iterator gsi = gsi_start_bb (bb_vinfo->bbs[0]);
++		       !gsi_end_p (gsi); gsi_next (&gsi))
++		    {
++		      /* The costing above left us with DCEable vectorized scalar
++			 stmts having the visited flag set on profitable
++			 subgraphs.  Do the delayed clearing of the flag here.  */
++		      if (gimple_visited_p (gsi_stmt (gsi)))
++			{
++			  gimple_set_visited (gsi_stmt (gsi), false);
++			  continue;
++			}
++		      if (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED)
++			continue;
++
++		      if (gassign *ass = dyn_cast  (gsi_stmt (gsi)))
++		       if (gimple_assign_rhs_code (ass) == COND_EXPR)
++			 {
++			   if (!profitable_subgraphs.is_empty ()
++			       && dump_enabled_p ())
++			     dump_printf_loc (MSG_NOTE, vect_location,
++					      "not profitable because of "
++					      "unprofitable if-converted scalar "
++					      "code\n");
++			   profitable_subgraphs.truncate (0);
++			 }
++		    }
+ 		}
+ 
+-	      if (!dbg_cnt (vect_slp))
+-		continue;
++	      /* Finally schedule the profitable subgraphs.  */
++	      for (slp_instance instance : profitable_subgraphs)
++		{
++		  if (!vectorized && dump_enabled_p ())
++		    dump_printf_loc (MSG_NOTE, vect_location,
++				     "Basic block will be vectorized "
++				     "using SLP\n");
++		  vectorized = true;
+ 
+-	      profitable_subgraphs.safe_push (instance);
+-	    }
++		  vect_schedule_slp (bb_vinfo, instance->subgraph_entries);
+ 
+-	  /* When we're vectorizing an if-converted loop body make sure
+-	     we vectorized all if-converted code.  */
+-	  if (!profitable_subgraphs.is_empty ()
+-	      && orig_loop)
++		  unsigned HOST_WIDE_INT bytes;
++		  if (dump_enabled_p ())
++		    {
++		      if (GET_MODE_SIZE
++			   (bb_vinfo->vector_mode).is_constant (&bytes))
++			 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
++					  "basic block part vectorized using %wu "
++					  "byte vectors\n", bytes);
++		      else
++			 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
++					  "basic block part vectorized using "
++					  "variable length vectors\n");
++		    }
++		}
++	    }
++	  else
+ 	    {
+-	      gcc_assert (bb_vinfo->bbs.length () == 1);
+-	      for (gimple_stmt_iterator gsi = gsi_start_bb (bb_vinfo->bbs[0]);
+-		   !gsi_end_p (gsi); gsi_next (&gsi))
++	      if (!profitable_subgraphs_trans.is_empty ()
++		  && orig_loop)
+ 		{
+-		  /* The costing above left us with DCEable vectorized scalar
+-		     stmts having the visited flag set on profitable
+-		     subgraphs.  Do the delayed clearing of the flag here.  */
+-		  if (gimple_visited_p (gsi_stmt (gsi)))
++		  gcc_assert (bb_vinfo->bbs.length () == 1);
++		  for (gimple_stmt_iterator gsi = gsi_start_bb (bb_vinfo->bbs[0]);
++		       !gsi_end_p (gsi); gsi_next (&gsi))
+ 		    {
+-		      gimple_set_visited (gsi_stmt (gsi), false);
+-		      continue;
++		      /* The costing above left us with DCEable vectorized scalar
++			 stmts having the visited flag set on profitable
++			 subgraphs.  Do the delayed clearing of the flag here.  */
++		      if (gimple_visited_p (gsi_stmt (gsi)))
++			{
++			  gimple_set_visited (gsi_stmt (gsi), false);
++			  continue;
++			}
++		       if (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED)
++			 continue;
++
++		       if (gassign *ass = dyn_cast  (gsi_stmt (gsi)))
++			if (gimple_assign_rhs_code (ass) == COND_EXPR)
++			 {
++			   if (!profitable_subgraphs_trans.is_empty ()
++			       && dump_enabled_p ())
++			     dump_printf_loc (MSG_NOTE, vect_location,
++					      "not profitable because of "
++					      "unprofitable if-converted scalar "
++					      "code\n");
++			   profitable_subgraphs_trans.truncate (0);
++			 }
+ 		    }
+-		  if (flag_vect_cost_model == VECT_COST_MODEL_UNLIMITED)
+-		    continue;
+-
+-		  if (gassign *ass = dyn_cast  (gsi_stmt (gsi)))
+-		    if (gimple_assign_rhs_code (ass) == COND_EXPR)
+-		      {
+-			if (!profitable_subgraphs.is_empty ()
+-			    && dump_enabled_p ())
+-			  dump_printf_loc (MSG_NOTE, vect_location,
+-					   "not profitable because of "
+-					   "unprofitable if-converted scalar "
+-					   "code\n");
+-			profitable_subgraphs.truncate (0);
+-		      }
+ 		}
+-	    }
+ 
+-	  /* Finally schedule the profitable subgraphs.  */
+-	  for (slp_instance instance : profitable_subgraphs)
+-	    {
+-	      if (!vectorized && dump_enabled_p ())
+-		dump_printf_loc (MSG_NOTE, vect_location,
+-				 "Basic block will be vectorized "
+-				 "using SLP\n");
+-	      vectorized = true;
++	      /* Finally schedule the profitable subgraphs.  */
++	      for (slp_instance instance : profitable_subgraphs_trans)
++		{
++		  if (!vectorized && dump_enabled_p ())
++		    dump_printf_loc (MSG_NOTE, vect_location,
++				     "Basic block will be vectorized "
++				     "using SLP\n");
++		  vectorized = true;
+ 
+-	      vect_schedule_slp (bb_vinfo, instance->subgraph_entries);
++		  vect_schedule_slp (bb_vinfo, instance->subgraph_entries);
+ 
+-	      unsigned HOST_WIDE_INT bytes;
+-	      if (dump_enabled_p ())
+-		{
+-		  if (GET_MODE_SIZE
+-			(bb_vinfo->vector_mode).is_constant (&bytes))
+-		    dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
+-				     "basic block part vectorized using %wu "
+-				     "byte vectors\n", bytes);
+-		  else
+-		    dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
+-				     "basic block part vectorized using "
+-				     "variable length vectors\n");
++		  unsigned HOST_WIDE_INT bytes;
++		  if (dump_enabled_p ())
++		    {
++		      if (GET_MODE_SIZE
++			   (bb_vinfo->vector_mode).is_constant (&bytes))
++			 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
++					  "basic block part vectorized using %wu "
++					  "byte vectors\n", bytes);
++		      else
++			 dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, vect_location,
++					  "basic block part vectorized using "
++					  "variable length vectors\n");
++		    }
+ 		}
+ 	    }
++
+ 	}
+       else
+ 	{
+@@ -6081,6 +7335,10 @@ vect_slp_region (vec bbs, vec datarefs,
+ 	  }
+ 
+       delete bb_vinfo;
++      if (bb_vinfo_trans)
++	{
++	  bb_vinfo_trans = NULL;
++	}
+ 
+       if (mode_i < vector_modes.length ()
+ 	  && VECTOR_MODE_P (autodetected_vector_mode)
+@@ -7244,10 +8502,17 @@ vect_schedule_slp_node (vec_info *vinfo,
+ 	 ready early, vectorized stores go before the last scalar
+ 	 stmt which is where all uses are ready.  */
+       stmt_vec_info last_stmt_info = NULL;
+-      if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
+-	last_stmt_info = vect_find_first_scalar_stmt_in_slp (node);
+-      else /* DR_IS_WRITE */
+-	last_stmt_info = vect_find_last_scalar_stmt_in_slp (node);
++
++      if (DR_GROUP_FIRST_ELEMENT (stmt_info)
++	  && DR_GROUP_SLP_TRANSPOSE (DR_GROUP_FIRST_ELEMENT (stmt_info)))
++	 last_stmt_info = vect_find_last_scalar_stmt_in_slp (node);
++      else
++	{
++	   if (DR_IS_READ (STMT_VINFO_DATA_REF (stmt_info)))
++		last_stmt_info = vect_find_first_scalar_stmt_in_slp (node);
++	   else /* DR_IS_WRITE */
++		last_stmt_info = vect_find_last_scalar_stmt_in_slp (node);
++	}
+       si = gsi_for_stmt (last_stmt_info->stmt);
+     }
+   else if ((STMT_VINFO_TYPE (stmt_info) == cycle_phi_info_type
+diff --git a/gcc/tree-vect-stmts.cc b/gcc/tree-vect-stmts.cc
+index 349200411..3099f6743 100644
+--- a/gcc/tree-vect-stmts.cc
++++ b/gcc/tree-vect-stmts.cc
+@@ -1369,10 +1369,10 @@ vect_get_load_cost (vec_info *, stmt_vec_info stmt_info, int ncopies,
+ 
+ static void
+ vect_init_vector_1 (vec_info *vinfo, stmt_vec_info stmt_vinfo, gimple *new_stmt,
+-		    gimple_stmt_iterator *gsi)
++		    gimple_stmt_iterator *gsi, bool transpose=false)
+ {
+   if (gsi)
+-    vect_finish_stmt_generation (vinfo, stmt_vinfo, new_stmt, gsi);
++    vect_finish_stmt_generation (vinfo, stmt_vinfo, new_stmt, gsi, transpose);
+   else
+     vinfo->insert_on_entry (stmt_vinfo, new_stmt);
+ 
+@@ -1393,7 +1393,7 @@ vect_init_vector_1 (vec_info *vinfo, stmt_vec_info stmt_vinfo, gimple *new_stmt,
+ 
+ tree
+ vect_init_vector (vec_info *vinfo, stmt_vec_info stmt_info, tree val, tree type,
+-		  gimple_stmt_iterator *gsi)
++		  gimple_stmt_iterator *gsi, bool transpose)
+ {
+   gimple *init_stmt;
+   tree new_temp;
+@@ -1418,7 +1418,7 @@ vect_init_vector (vec_info *vinfo, stmt_vec_info stmt_info, tree val, tree type,
+ 		  new_temp = make_ssa_name (TREE_TYPE (type));
+ 		  init_stmt = gimple_build_assign (new_temp, COND_EXPR,
+ 						   val, true_val, false_val);
+-		  vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi);
++		  vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi, transpose);
+ 		  val = new_temp;
+ 		}
+ 	    }
+@@ -1437,7 +1437,7 @@ vect_init_vector (vec_info *vinfo, stmt_vec_info stmt_info, tree val, tree type,
+ 		{
+ 		  init_stmt = gsi_stmt (gsi2);
+ 		  gsi_remove (&gsi2, false);
+-		  vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi);
++		  vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi, transpose);
+ 		}
+ 	    }
+ 	}
+@@ -1446,7 +1446,7 @@ vect_init_vector (vec_info *vinfo, stmt_vec_info stmt_info, tree val, tree type,
+ 
+   new_temp = vect_get_new_ssa_name (type, vect_simple_var, "cst_");
+   init_stmt = gimple_build_assign (new_temp, val);
+-  vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi);
++  vect_init_vector_1 (vinfo, stmt_info, init_stmt, gsi, transpose);
+   return new_temp;
+ }
+ 
+@@ -1572,9 +1572,11 @@ vect_get_vec_defs (vec_info *vinfo, stmt_vec_info stmt_info, slp_tree slp_node,
+    statement and create and return a stmt_vec_info for it.  */
+ 
+ static void
+-vect_finish_stmt_generation_1 (vec_info *,
+-			       stmt_vec_info stmt_info, gimple *vec_stmt)
++vect_finish_stmt_generation_1 (vec_info *vinfo,
++			       stmt_vec_info stmt_info, gimple *vec_stmt, bool transpose=false)
+ {
++  if (transpose)
++    stmt_vec_info vec_stmt_info = vinfo->add_pattern_stmt (vec_stmt, NULL);
+   if (dump_enabled_p ())
+     dump_printf_loc (MSG_NOTE, vect_location, "add new stmt: %G", vec_stmt);
+ 
+@@ -1616,7 +1618,7 @@ vect_finish_replace_stmt (vec_info *vinfo,
+ void
+ vect_finish_stmt_generation (vec_info *vinfo,
+ 			     stmt_vec_info stmt_info, gimple *vec_stmt,
+-			     gimple_stmt_iterator *gsi)
++			     gimple_stmt_iterator *gsi, bool transpose)
+ {
+   gcc_assert (!stmt_info || gimple_code (stmt_info->stmt) != GIMPLE_LABEL);
+ 
+@@ -1648,7 +1650,7 @@ vect_finish_stmt_generation (vec_info *vinfo,
+ 	}
+     }
+   gsi_insert_before (gsi, vec_stmt, GSI_SAME_STMT);
+-  vect_finish_stmt_generation_1 (vinfo, stmt_info, vec_stmt);
++  vect_finish_stmt_generation_1 (vinfo, stmt_info, vec_stmt, transpose);
+ }
+ 
+ /* We want to vectorize a call to combined function CFN with function
+@@ -2159,6 +2161,173 @@ vector_vector_composition_type (tree vtype, poly_uint64 nelts, tree *ptype)
+   return NULL_TREE;
+ }
+ 
++/* Check succeedor BB, BB without load is regarded as empty BB.  Ignore empty
++   BB in DFS.  */
++
++static unsigned
++mem_refs_in_bb (basic_block bb, vec &stmts)
++{
++  unsigned num = 0;
++  for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
++       !gsi_end_p (gsi); gsi_next (&gsi))
++    {
++      gimple *stmt = gsi_stmt (gsi);
++      if (is_gimple_debug (stmt))
++	continue;
++      if (is_gimple_assign (stmt) && gimple_has_mem_ops (stmt)
++	  && !gimple_has_volatile_ops (stmt))
++	{
++	  if (gimple_assign_rhs_code (stmt) == MEM_REF
++	      || gimple_assign_rhs_code (stmt) == ARRAY_REF)
++	    {
++	      stmts.safe_push (stmt);
++	      num++;
++	    }
++	  else if (TREE_CODE (gimple_get_lhs (stmt)) == MEM_REF
++		   || TREE_CODE (gimple_get_lhs (stmt)) == ARRAY_REF)
++	    num++;
++	}
++    }
++  return num;
++}
++
++static bool
++check_same_base (vec *datarefs, data_reference_p dr)
++{
++  for (unsigned ui = 0; ui < datarefs->length (); ui++)
++    {
++      tree op1 = TREE_OPERAND (DR_BASE_OBJECT (dr), 0);
++      tree op2 = TREE_OPERAND (DR_BASE_OBJECT ((*datarefs)[ui]), 0);
++      if (TREE_CODE (op1) != TREE_CODE (op2))
++	continue;
++      if (TREE_CODE (op1) == ADDR_EXPR)
++	{
++	  op1 = TREE_OPERAND (op1, 0);
++	  op2 = TREE_OPERAND (op2, 0);
++	}
++      enum tree_code code = TREE_CODE (op1);
++      switch (code)
++	{
++	case VAR_DECL:
++	  if (DECL_NAME (op1) == DECL_NAME (op2)
++	      && DR_IS_READ ((*datarefs)[ui]))
++	    return true;
++	  break;
++	case SSA_NAME:
++	  if (SSA_NAME_VERSION (op1) == SSA_NAME_VERSION (op2)
++	      && DR_IS_READ ((*datarefs)[ui]))
++	    return true;
++	  break;
++	default:
++	  break;
++	}
++    }
++  return false;
++}
++
++/* Iterate all load STMTS, if staisfying same base vectorized stmt, then return,
++   Otherwise, set false to SUCCESS.  */
++
++static void
++check_vec_use (loop_vec_info loop_vinfo, vec &stmts,
++	       stmt_vec_info stmt_info, bool &success)
++{
++  if (stmt_info == NULL)
++    {
++      success = false;
++      return;
++    }
++  if (DR_IS_READ (stmt_info->dr_aux.dr))
++    {
++      success = false;
++      return;
++    }
++  unsigned ui = 0;
++  gimple *candidate = NULL;
++  FOR_EACH_VEC_ELT (stmts, ui, candidate)
++    {
++      if (TREE_CODE (TREE_TYPE (gimple_get_lhs (candidate))) != VECTOR_TYPE)
++	continue;
++
++      if (candidate->bb != candidate->bb->loop_father->header)
++	{
++	  success = false;
++	  return;
++	}
++      auto_vec datarefs;
++      tree res = find_data_references_in_bb (candidate->bb->loop_father,
++					     candidate->bb, &datarefs);
++      if (res == chrec_dont_know)
++	{
++	  success = false;
++	  return;
++	}
++      if (check_same_base (&datarefs, stmt_info->dr_aux.dr))
++	return;
++    }
++  success = false;
++}
++
++/* Deep first search from present BB.  If succeedor has load STMTS,
++   stop further searching.  */
++
++static void
++dfs_check_bb (loop_vec_info loop_vinfo, basic_block bb, stmt_vec_info stmt_info,
++	      bool &success, vec &visited_bbs)
++{
++  if (bb == cfun->cfg->x_exit_block_ptr)
++    {
++      success = false;
++      return;
++    }
++  if (!success || visited_bbs.contains (bb) || bb == loop_vinfo->loop->latch)
++    return;
++
++  visited_bbs.safe_push (bb);
++  auto_vec stmts;
++  unsigned num = mem_refs_in_bb (bb, stmts);
++  /* Empty BB.  */
++  if (num == 0)
++    {
++      edge e;
++      edge_iterator ei;
++      FOR_EACH_EDGE (e, ei, bb->succs)
++	{
++	  dfs_check_bb (loop_vinfo, e->dest, stmt_info, success, visited_bbs);
++	  if (!success)
++	    return;
++	}
++      return;
++    }
++  /* Non-empty BB.  */
++  check_vec_use (loop_vinfo, stmts, stmt_info, success);
++}
++
++/* For grouped store, if all succeedors of present BB have vectorized load
++   from same base of store.  If so, set memory_access_type using
++   VMAT_CONTIGUOUS_PERMUTE instead of VMAT_LOAD_STORE_LANES.  */
++
++static bool
++conti_perm (stmt_vec_info stmt_vinfo, loop_vec_info loop_vinfo)
++{
++  gimple *stmt = stmt_vinfo->stmt;
++  if (gimple_code (stmt) != GIMPLE_ASSIGN)
++    return false;
++
++  if (DR_IS_READ (stmt_vinfo->dr_aux.dr))
++    return false;
++
++  basic_block bb = stmt->bb;
++  bool success = true;
++  auto_vec visited_bbs;
++  visited_bbs.safe_push (bb);
++  edge e;
++  edge_iterator ei;
++  FOR_EACH_EDGE (e, ei, bb->succs)
++    dfs_check_bb (loop_vinfo, e->dest, stmt_vinfo, success, visited_bbs);
++  return success;
++}
++
+ /* A subroutine of get_load_store_type, with a subset of the same
+    arguments.  Handle the case where STMT_INFO is part of a grouped load
+    or store.
+@@ -2373,6 +2542,20 @@ get_group_load_store_type (vec_info *vinfo, stmt_vec_info stmt_info,
+ 	      *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
+ 	      overrun_p = would_overrun_p;
+ 	    }
++
++	  if (*memory_access_type == VMAT_LOAD_STORE_LANES
++	      && TREE_CODE (loop_vinfo->num_iters) == INTEGER_CST
++	      && maybe_eq (tree_to_shwi (loop_vinfo->num_iters),
++			   loop_vinfo->vectorization_factor)
++	      && conti_perm (stmt_info, loop_vinfo)
++	      && (vls_type == VLS_LOAD
++		  ? vect_grouped_load_supported (vectype, single_element_p,
++						 group_size)
++		  : vect_grouped_store_supported (vectype, group_size)))
++	    {
++	      *memory_access_type = VMAT_CONTIGUOUS_PERMUTE;
++	      overrun_p = would_overrun_p;
++	    }
+ 	}
+ 
+       /* As a last resort, trying using a gather load or scatter store.
+@@ -7456,6 +7639,154 @@ vectorizable_scan_store (vec_info *vinfo,
+   return true;
+ }
+ 
++/* Function vect_permute_store_chains
++
++   Call function vect_permute_store_chain ().
++   Given a chain of interleaved stores in DR_CHAIN, generate
++   interleave_high/low stmts to reorder the data correctly.
++   Return the final references for stores in RESULT_CHAIN.  */
++
++static void
++vect_permute_store_chains (vec_info *vinfo, vec dr_chain,
++			   unsigned int num_each, stmt_vec_info stmt_info,
++			   gimple_stmt_iterator *gsi, vec *result_chain,
++			   unsigned int group)
++{
++  unsigned int k = 0;
++  unsigned int t = 0;
++
++  /* Divide vectors into GROUP parts.  And permute every NUM_EACH vectors
++     together.  */
++  for (k = 0; k < group; k++)
++    {
++      auto_vec dr_chain_transposed (num_each);
++      auto_vec result_chain_transposed (num_each);
++      for (t = k; t < dr_chain.length (); t = t + group)
++	{
++	  dr_chain_transposed.quick_push (dr_chain[t]);
++	}
++      vect_permute_store_chain (vinfo, dr_chain_transposed, num_each,
++				stmt_info, gsi, &result_chain_transposed);
++      for (t = 0; t < num_each; t++)
++	{
++	  result_chain->quick_push (result_chain_transposed[t]);
++	}
++    }
++}
++
++/* Function transpose_oprnd_store
++
++    Calculate the transposed results from VEC_OPRNDS (VEC_STMT)
++    for vectorizable_store.  */
++
++static void
++transpose_oprnd_store (vec_info *vinfo, vecvec_oprnds,
++		       vec *result_chain, unsigned int vec_num,
++		       unsigned int const_nunits, unsigned int array_num,
++		       stmt_vec_info first_stmt_info,
++		       gimple_stmt_iterator *gsi)
++{
++  unsigned int group_for_transform = 0;
++  unsigned int num_each = 0;
++
++  /* Transpose back for vec_oprnds.  */
++  /* vec = {vec1, vec2, ...}  */
++  if (array_num < const_nunits
++      && const_nunits % array_num == 0)
++    {
++      vect_transpose_store_chain (vinfo, vec_oprnds,
++				  vec_num, array_num,
++				  first_stmt_info,
++				  gsi, result_chain);
++    }
++   /* vec1 = {vec_part1}, vec2 = {vec_part2}, ...  */
++  else if (array_num >= const_nunits
++	   && array_num % const_nunits == 0)
++    {
++      group_for_transform = array_num / const_nunits;
++      num_each = vec_oprnds.length () / group_for_transform;
++      vect_permute_store_chains (vinfo, vec_oprnds,
++				 num_each, first_stmt_info,
++				 gsi, result_chain,
++				 group_for_transform);
++    }
++  else
++    {
++      gcc_unreachable ();
++    }
++}
++
++static dr_vec_info *
++get_dr_info (stmt_vec_info stmt_info)
++{
++  dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
++  if (dr_info->misalignment == DR_MISALIGNMENT_UNINITIALIZED)
++    {
++      SET_DR_MISALIGNMENT (dr_info, DR_MISALIGNMENT_UNKNOWN);
++    }
++  return dr_info;
++}
++
++static unsigned
++dr_align_vect_store (vec_info *vinfo, dr_vec_info *cur_first_dr_info,
++		     tree vectype, unsigned HOST_WIDE_INT &align)
++{
++  unsigned misalign = 0;
++  align = known_alignment (DR_TARGET_ALIGNMENT (cur_first_dr_info));
++  if (aligned_access_p (cur_first_dr_info, vectype))
++    {
++      return misalign;
++    }
++  else if (cur_first_dr_info->misalignment == -1)
++    {
++      align = dr_alignment (vect_dr_behavior (vinfo, cur_first_dr_info));
++    }
++  else
++    {
++      misalign = cur_first_dr_info->misalignment;
++    }
++  return misalign;
++}
++
++static void
++add_new_stmt_vect_store (vec_info *vinfo, tree vectype, tree dataref_ptr,
++			 tree dataref_offset, tree ref_type,
++			 dr_vec_info *cur_first_dr_info, tree vec_oprnd,
++			 gimple_stmt_iterator *gsi, stmt_vec_info stmt_info)
++{
++  /* Data align.  */
++  unsigned HOST_WIDE_INT align;
++  unsigned misalign = dr_align_vect_store (vinfo, cur_first_dr_info,
++					   vectype, align);
++
++  if (dataref_offset == NULL_TREE && TREE_CODE (dataref_ptr) == SSA_NAME)
++    {
++      set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, misalign);
++    }
++
++  /* Get data_ref.  */
++  tree offset = dataref_offset ? dataref_offset : build_int_cst (ref_type, 0);
++  tree data_ref = fold_build2 (MEM_REF, vectype, dataref_ptr, offset);
++  if (aligned_access_p (cur_first_dr_info, vectype))
++    {
++      ;
++    }
++  else if (cur_first_dr_info->misalignment == -1)
++    {
++      TREE_TYPE (data_ref) = build_aligned_type (TREE_TYPE (data_ref),
++						 align * BITS_PER_UNIT);
++    }
++  else
++    {
++      tree elem_type = TREE_TYPE (vectype);
++      TREE_TYPE (data_ref) = build_aligned_type (TREE_TYPE (data_ref),
++						 TYPE_ALIGN (elem_type));
++    }
++  /* Add new stmt.  */
++  vect_copy_ref_info (data_ref, DR_REF (cur_first_dr_info->dr));
++  gassign *new_stmt = gimple_build_assign (data_ref, vec_oprnd);
++  vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi, true);
++}
+ 
+ /* Function vectorizable_store.
+ 
+@@ -8333,6 +8664,16 @@ vectorizable_store (vec_info *vinfo,
+ 					   &vec_offsets);
+ 	      vec_offset = vec_offsets[0];
+ 	    }
++	  /* If the stmt_info need to be transposed recovery, dataref_ptr
++	     will be caculated later.  */
++	  else if (memory_access_type == VMAT_CONTIGUOUS
++		   && is_a  (vinfo)
++		   && STMT_VINFO_GROUPED_ACCESS (stmt_info)
++		   && DR_GROUP_SLP_TRANSPOSE (
++			DR_GROUP_FIRST_ELEMENT (stmt_info)))
++	    {
++	      dataref_ptr = NULL_TREE;
++	    }
+ 	  else
+ 	    dataref_ptr
+ 	      = vect_create_data_ref_ptr (vinfo, first_stmt_info, aggr_type,
+@@ -8423,6 +8764,75 @@ vectorizable_store (vec_info *vinfo,
+ 	}
+       else
+ 	{
++	  /* group_size: the size of group after transposing and merging.
++	     group_size_b: the size of group before transposing and merging,
++			 and only group_size_b >= const_nunits is supported.
++	     array_num: the number of arrays.
++	     const_nunits: TYPE_VECTOR_SUBPARTS (vectype).
++	     ncontinues: group_size_b / const_nunits, it means the number of
++			 times an array is stored in memory.  */
++	  if (slp && is_a  (vinfo)
++	      && STMT_VINFO_GROUPED_ACCESS (stmt_info)
++	      && DR_GROUP_SLP_TRANSPOSE (DR_GROUP_FIRST_ELEMENT (stmt_info)))
++	    {
++	      if (dump_enabled_p ())
++		{
++		  dump_printf_loc (MSG_NOTE, vect_location,
++				   "vectorizable_store for slp transpose.\n");
++		}
++	      /* Transpose back for grouped stores.  */
++	      vect_transform_back_slp_grouped_stores (bb_vinfo,
++						      first_stmt_info);
++
++	      result_chain.create (vec_oprnds.length ());
++	      unsigned int const_nunits = nunits.to_constant ();
++	      unsigned int group_size_b = DR_GROUP_SIZE_TRANS (first_stmt_info);
++	      unsigned int array_num = group_size / group_size_b;
++	      transpose_oprnd_store (vinfo, vec_oprnds, &result_chain, vec_num,
++				     const_nunits, array_num,
++				     first_stmt_info, gsi);
++
++	      /* For every store group, not for every vec, because transposing
++		 and merging have changed the data reference access.  */
++	      gcc_assert (group_size_b >= const_nunits);
++	      unsigned int ncontinues = group_size_b / const_nunits;
++
++	      unsigned int k = 0;
++	      for (i = 0; i < array_num; i++)
++		{
++		  stmt_vec_info first_stmt_b;
++		  BB_VINFO_GROUPED_STORES (vinfo).iterate (i, &first_stmt_b);
++		  bool simd_lane_access_p
++			= STMT_VINFO_SIMD_LANE_ACCESS_P (first_stmt_b) != 0;
++		  tree ref_type = get_group_alias_ptr_type (first_stmt_b);
++		  dataref_ptr = vect_create_data_ref_ptr (
++				 vinfo, first_stmt_b, aggr_type,
++				 simd_lane_access_p ? loop : NULL,
++				 offset, &dummy, gsi, &ptr_incr,
++				 simd_lane_access_p, bump);
++		  dr_vec_info *cur_first_dr_info = get_dr_info (first_stmt_b);
++		  for (unsigned int t = 0; t < ncontinues; t++)
++		    {
++		      vec_oprnd = result_chain[k];
++		      k++;
++		      if (t > 0)
++			{
++			  /* Bump the vector pointer.  */
++			  dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr,
++							 ptr_incr, gsi,
++							 first_stmt_b, bump);
++			}
++		      add_new_stmt_vect_store (vinfo, vectype, dataref_ptr,
++					       dataref_offset, ref_type,
++					       cur_first_dr_info, vec_oprnd,
++					       gsi, first_stmt_b);
++		    }
++		}
++	      oprnds.release ();
++	      result_chain.release ();
++	      vec_oprnds.release ();
++	      return true;
++	    }
+ 	  new_stmt = NULL;
+ 	  if (grouped_store)
+ 	    {
+@@ -8719,6 +9129,451 @@ hoist_defs_of_uses (stmt_vec_info stmt_info, class loop *loop)
+   return true;
+ }
+ 
++static tree
++calculate_new_type (tree vectype, unsigned int const_nunits,
++		    unsigned int group_size_b, unsigned int &nloads,
++		    unsigned int &ncontinues, tree &lvectype)
++{
++  tree ltype = TREE_TYPE (vectype);
++  /* nloads is the number of ARRAYs in a vector.
++     vectemp = {a[], b[], ...}  */
++  if (group_size_b < const_nunits)
++    {
++      tree ptype;
++      tree vtype
++	= vector_vector_composition_type (vectype,
++					  const_nunits / group_size_b,
++					  &ptype);
++      if (vtype != NULL_TREE)
++	{
++	  nloads = const_nunits / group_size_b;
++	  lvectype = vtype;
++	  ltype = ptype;
++	  ncontinues = 1;
++	}
++    }
++  /* ncontinues is the number of vectors from an ARRAY.
++     vectemp1 = {a[0], a[1], ...}
++     ...
++     vectempm = {a[k], a[k+1], ...}  */
++  else
++    {
++      nloads = 1;
++      ltype = vectype;
++      ncontinues = group_size_b / const_nunits;
++    }
++  ltype = build_aligned_type (ltype, TYPE_ALIGN (TREE_TYPE (vectype)));
++  return ltype;
++}
++
++static void
++generate_old_load_permutations (slp_tree slp_node, unsigned int group_size,
++				vec &old_load_permutation)
++{
++  /* Generate the old load permutations from the slp_node.  */
++  unsigned i = 0;
++  unsigned k = 0;
++
++  /* If SLP_NODE has load_permutation, we copy it to old_load_permutation.
++     Otherwise, we generate a permutation sequentially.  */
++  if (SLP_TREE_LOAD_PERMUTATION (slp_node).exists ())
++    {
++      FOR_EACH_VEC_ELT (SLP_TREE_LOAD_PERMUTATION (slp_node), i, k)
++	{
++	  old_load_permutation.safe_push (k);
++	}
++    }
++  else
++    {
++      for (unsigned i = 0; i < group_size; i++)
++	{
++	  old_load_permutation.safe_push (i);
++	}
++    }
++}
++
++static void
++generate_new_load_permutation_mapping (unsigned slp_node_length,
++				       vec &group_idx,
++				       const vec &load_permutation,
++				       unsigned int group_size_b,
++				       unsigned &new_group_size,
++				       vec &group_from)
++{
++  /* group_num_vec: only stores the group_loads IDs which are caculated from
++     load_permutation.  */
++  auto_vec group_num_vec;
++
++  /* Caculate which group_loads are the stmts in SLP_NODE from.  */
++  unsigned i = 0;
++  unsigned k = 0;
++  FOR_EACH_VEC_ELT (load_permutation, i, k)
++    {
++      unsigned int t0 = k / group_size_b;
++      if (!group_num_vec.contains (t0))
++	{
++	  group_num_vec.safe_push (t0);
++	}
++      group_from.safe_push (t0);
++    }
++  group_num_vec.qsort (cmp_for_group_num);
++  /* n_groups: the number of group_loads.  */
++  unsigned int n_groups = group_num_vec.length ();
++  new_group_size = n_groups * group_size_b;
++  for (i = 0; i < n_groups; i++)
++    {
++      group_idx.safe_push (group_num_vec[i] * group_size_b);
++    }
++  /* A new mapping from group_ind_vec to group_from.
++      For example:
++	Origin: group_from = {1,1,3,3,5,5,7,7};
++	After mapping: group_from = {0,0,1,1,2,2,2,2};  */
++  auto_vec group_ind_vec (n_groups);
++  for (k = 0; k < n_groups; k++)
++    {
++      group_ind_vec.safe_push (k);
++    }
++  for (i = 0; i < slp_node_length; i++)
++    {
++      for (k = 0; k < n_groups; k++)
++	{
++	  if (group_from[i] == group_num_vec[k])
++	    {
++	      group_from[i] = group_ind_vec[k];
++	      break;
++	    }
++	}
++    }
++}
++
++static void
++generate_new_load_permutation (vec &new_load_permutation,
++			       const vec &old_load_permutation,
++			       slp_tree slp_node, bool &this_load_permuted,
++			       const vec &group_from,
++			       unsigned int group_size_b)
++{
++  unsigned slp_node_length = SLP_TREE_SCALAR_STMTS (slp_node).length ();
++  /* Generate the new load permutation from the new mapping.  */
++  new_load_permutation.create (slp_node_length);
++  unsigned i = 0;
++  unsigned k = 0;
++  FOR_EACH_VEC_ELT (old_load_permutation, i, k)
++    {
++      /* t1 is the new permutation of k in the old permutation.
++	 t1 = base_address + offset:
++	 base_address = group_from[i] * group_size_b;
++	 offset = k % group_size_b.  */
++      unsigned int t1
++	= group_from[i] * group_size_b + k % group_size_b;
++      new_load_permutation.safe_push (t1);
++      if (t1 != k)
++	{
++	  this_load_permuted = true;
++	}
++    }
++}
++
++static bool
++is_slp_perm (bool slp_perm, bool this_load_permuted, poly_uint64 nunits,
++	     unsigned int group_size, stmt_vec_info first_stmt_info)
++{
++  /* Calculate the unrolling factor based on the smallest type.  */
++  poly_uint64 unrolling_factor
++    = exact_div (common_multiple (nunits, group_size), group_size);
++  /* The load requires permutation when unrolling exposes
++     a gap either because the group is larger than the SLP
++     group-size or because there is a gap between the groups.  */
++  if (!slp_perm && !this_load_permuted
++      && (known_eq (unrolling_factor, 1U)
++	  || (group_size == DR_GROUP_SIZE (first_stmt_info)
++	      && DR_GROUP_GAP (first_stmt_info) == 0)))
++    {
++      return false;
++    }
++  else
++    {
++      return true;
++    }
++}
++
++static void
++generate_load_permutation (slp_tree slp_node, unsigned &new_group_size,
++			   unsigned int group_size, unsigned int group_size_b,
++			   bool &this_load_permuted, vec &group_idx,
++			   vec &new_load_permutation)
++{
++  /* Generate the old load permutations from SLP_NODE.  */
++  vec old_load_permutation;
++  old_load_permutation.create (group_size);
++  generate_old_load_permutations (slp_node, group_size, old_load_permutation);
++
++  /* Caculate which group_loads are the stmts in SLP_NODE from.  */
++  unsigned slp_node_length = SLP_TREE_SCALAR_STMTS (slp_node).length ();
++  /* group_from: stores the group_loads ID for every stmt in SLP_NODE.  */
++  vec group_from;
++  group_from.create (slp_node_length);
++  generate_new_load_permutation_mapping (slp_node_length, group_idx,
++					 old_load_permutation,
++					 group_size_b, new_group_size,
++					 group_from);
++
++  /* Generate the new load permutation from the new mapping and caculate
++     this_load_permuted flag.  If this_load_permuted is true, we need execute
++     slp permutation by using new load permutation.  */
++  generate_new_load_permutation (new_load_permutation, old_load_permutation,
++				 slp_node, this_load_permuted, group_from,
++				 group_size_b);
++  old_load_permutation.release ();
++  group_from.release ();
++}
++
++static unsigned int
++dr_align_vect_load (vec_info *vinfo, dr_vec_info *cur_first_dr_info,
++		    tree vectype, unsigned HOST_WIDE_INT &align,
++		    enum dr_alignment_support alignment_support_scheme)
++{
++  unsigned int misalign = 0;
++
++  align = known_alignment (DR_TARGET_ALIGNMENT (cur_first_dr_info));
++  if (alignment_support_scheme == dr_aligned)
++    {
++      gcc_assert (aligned_access_p (cur_first_dr_info, vectype));
++    }
++  else if (cur_first_dr_info->misalignment == -1)
++    {
++      align = dr_alignment (vect_dr_behavior (vinfo, cur_first_dr_info));
++    }
++  else
++    {
++      misalign = cur_first_dr_info->misalignment;
++    }
++  return misalign;
++}
++
++static stmt_vec_info
++add_new_stmt_vect_load (vec_info *vinfo, tree vectype, tree dataref_ptr,
++			tree dataref_offset, tree ref_type, tree ltype,
++			gassign *(&new_stmt), dr_vec_info *cur_first_dr_info,
++			gimple_stmt_iterator *gsi, stmt_vec_info stmt_info)
++{
++  /* Data align.  */
++  int malign = dr_misalignment (cur_first_dr_info, vectype);
++  enum dr_alignment_support alignment_support_scheme
++	= vect_supportable_dr_alignment (vinfo, cur_first_dr_info,
++					 vectype, malign);
++  unsigned HOST_WIDE_INT align;
++  unsigned int misalign = dr_align_vect_load (vinfo, cur_first_dr_info,
++					      vectype, align,
++					      alignment_support_scheme);
++  if (dataref_offset == NULL_TREE && TREE_CODE (dataref_ptr) == SSA_NAME)
++    {
++      set_ptr_info_alignment (get_ptr_info (dataref_ptr), align, misalign);
++    }
++
++  /* Get data_ref.  */
++  tree offset = dataref_offset ? dataref_offset : build_int_cst (ref_type, 0);
++  tree data_ref = fold_build2 (MEM_REF, ltype, dataref_ptr, offset);
++  if (alignment_support_scheme == dr_aligned)
++    {
++      ;
++    }
++  else if (cur_first_dr_info->misalignment == -1)
++    {
++      TREE_TYPE (data_ref)
++	= build_aligned_type (TREE_TYPE (data_ref), align * BITS_PER_UNIT);
++    }
++  else
++    {
++      tree elem_type = TREE_TYPE (vectype);
++      TREE_TYPE (data_ref)
++	= build_aligned_type (TREE_TYPE (data_ref), TYPE_ALIGN (elem_type));
++    }
++
++  /* Add new stmt.  */
++  vect_copy_ref_info (data_ref, DR_REF (cur_first_dr_info->dr));
++  new_stmt = gimple_build_assign (make_ssa_name (ltype), data_ref);
++  vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi, true);
++  stmt_vec_info vec_stmt_info = vinfo->lookup_stmt (new_stmt);
++  return vec_stmt_info;
++}
++
++static void
++push_new_stmt_to_dr_chain (bool slp_perm, stmt_vec_info new_stmt_info,
++			   vec dr_chain, slp_tree slp_node)
++{
++  if (slp_perm)
++    dr_chain.quick_push (gimple_assign_lhs (new_stmt_info->stmt));
++  else
++    SLP_TREE_VEC_STMTS (slp_node).quick_push (new_stmt_info->stmt);
++}
++
++static stmt_vec_info
++get_first_stmt_info_before_transpose (stmt_vec_info first_stmt_info,
++				      unsigned int group_el,
++				      unsigned int group_size)
++{
++  stmt_vec_info last_stmt_info = first_stmt_info;
++  unsigned int count = 0;
++  gcc_assert (group_el < group_size);
++  while (count < group_el)
++    {
++      last_stmt_info = DR_GROUP_NEXT_ELEMENT (last_stmt_info);
++      count++;
++    }
++  return last_stmt_info;
++}
++
++static stmt_vec_info
++add_new_stmt_for_nloads_greater_than_one (vec_info *vinfo, tree lvectype,
++					  tree vectype,
++					  vec *v,
++					  stmt_vec_info stmt_info,
++					  gimple_stmt_iterator *gsi)
++{
++  tree vec_inv = build_constructor (lvectype, v);
++  tree new_temp = vect_init_vector (vinfo, stmt_info, vec_inv, lvectype, gsi, true);
++  stmt_vec_info new_stmt_info = vinfo->lookup_def (new_temp);
++  if (lvectype != vectype)
++    {
++      gassign *new_stmt = gimple_build_assign (make_ssa_name (vectype),
++					       VIEW_CONVERT_EXPR,
++					       build1 (VIEW_CONVERT_EXPR,
++						       vectype, new_temp));
++      vect_finish_stmt_generation (vinfo, stmt_info, new_stmt, gsi, true);
++      new_stmt_info = vinfo->lookup_stmt (new_stmt);
++    }
++  return new_stmt_info;
++}
++
++/* Function new_vect_stmt_for_nloads.
++
++   New a VEC_STMT when nloads Arrays are merged into a vector.
++
++   ncopies is the number of vectors that need to be loaded from memmory.
++   nloads is the number of ARRAYs in a vector.
++   vectemp = {a[], b[], ...}  */
++
++static void
++new_vect_stmt_for_nloads (vec_info *vinfo, unsigned int ncopies,
++			  unsigned int nloads, const vec &group_idx,
++			  stmt_vec_info stmt_info, offset_info *offset_info,
++			  vectype_info *vectype_info,
++			  vect_memory_access_type memory_access_type,
++			  bool slp_perm, vec dr_chain, slp_tree slp_node,
++			  gimple_stmt_iterator *gsi)
++{
++  vec *v = NULL;
++  stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
++  unsigned int group_size = DR_GROUP_SIZE (first_stmt_info);
++  stmt_vec_info first_stmt_info_b = NULL;
++  stmt_vec_info new_stmt_info = NULL;
++  tree dataref_ptr = NULL_TREE;
++  tree dummy;
++  gimple *ptr_incr = NULL;
++  unsigned int n = 0;
++  for (unsigned int i = 0; i < ncopies; i++)
++    {
++      vec_alloc (v, nloads);
++      for (unsigned int t = 0; t < nloads; t++)
++	{
++	  first_stmt_info_b = get_first_stmt_info_before_transpose (
++				first_stmt_info, group_idx[n++], group_size);
++	  dr_vec_info* cur_first_dr_info = get_dr_info (first_stmt_info_b);
++	  tree bump = vect_get_data_ptr_increment (vinfo, cur_first_dr_info,
++						   vectype_info->ltype,
++						   memory_access_type);
++	  bool simd_lane_access_p
++		= STMT_VINFO_SIMD_LANE_ACCESS_P (first_stmt_info_b) != 0;
++
++	  /* Create dataref_ptr which is point to init_address.  */
++	  dataref_ptr = vect_create_data_ref_ptr (
++			 vinfo, first_stmt_info_b, vectype_info->ltype, NULL,
++			 offset_info->offset, &dummy, gsi, &ptr_incr,
++			 simd_lane_access_p, bump);
++
++	  gassign *new_stmt = NULL;
++	  new_stmt_info = add_new_stmt_vect_load (vinfo, vectype_info->vectype, dataref_ptr,
++				  offset_info->dataref_offset,
++				  vectype_info->ref_type,  vectype_info->ltype,
++				  new_stmt, cur_first_dr_info, gsi,
++				  first_stmt_info_b);
++
++	  CONSTRUCTOR_APPEND_ELT (v, NULL_TREE, gimple_assign_lhs (new_stmt));
++	}
++	new_stmt_info = add_new_stmt_for_nloads_greater_than_one (
++				 vinfo, vectype_info->lvectype,
++				 vectype_info->vectype, v,
++				 first_stmt_info_b, gsi);
++	push_new_stmt_to_dr_chain (slp_perm, new_stmt_info,
++				   dr_chain, slp_node);
++    }
++}
++
++/* Function new_vect_stmt_for_ncontinues.
++
++   New a VEC_STMTs when an Array is divided into several vectors.
++
++   n_groups is the number of ARRAYs.
++   ncontinues is the number of vectors from an ARRAY.
++   vectemp1 = {a[0], a[1], ...}
++   ...
++   vectempm = {a[k], a[k+1], ...}  */
++
++static void
++new_vect_stmt_for_ncontinues (vec_info *vinfo, unsigned int ncontinues,
++			      const vec &group_idx,
++			      stmt_vec_info stmt_info,
++			      offset_info* offset_info,
++			      vectype_info* vectype_info,
++			      vect_memory_access_type memory_access_type,
++			      bool slp_perm, vec &dr_chain,
++			      slp_tree slp_node,
++			      gimple_stmt_iterator *gsi)
++{
++  stmt_vec_info first_stmt_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
++  unsigned int group_size = DR_GROUP_SIZE (first_stmt_info);
++  stmt_vec_info new_stmt_info = NULL;
++  tree dataref_ptr = NULL_TREE;
++  tree dummy;
++  gimple *ptr_incr = NULL;
++  unsigned int n_groups = group_idx.length ();
++  for (unsigned int i = 0; i < n_groups; i++)
++    {
++      stmt_vec_info first_stmt_info_b = get_first_stmt_info_before_transpose (
++				first_stmt_info, group_idx[i], group_size);
++      dr_vec_info* cur_first_dr_info = get_dr_info (first_stmt_info_b);
++      tree bump = vect_get_data_ptr_increment (vinfo, cur_first_dr_info,
++			vectype_info->ltype, memory_access_type);
++      bool simd_lane_access_p
++		= STMT_VINFO_SIMD_LANE_ACCESS_P (first_stmt_info_b) != 0;
++      for (unsigned int k = 0; k < ncontinues; k++)
++	{
++	  /* Create dataref_ptr which is point to init_address.  */
++	  if (k == 0)
++	    {
++	      dataref_ptr = vect_create_data_ref_ptr (
++			 vinfo, first_stmt_info_b, vectype_info->ltype, NULL,
++			 offset_info->offset, &dummy, gsi, &ptr_incr,
++			 simd_lane_access_p, bump);
++	    }
++	  else
++	    {
++	      dataref_ptr = bump_vector_ptr (vinfo, dataref_ptr, ptr_incr,
++					     gsi, first_stmt_info_b, bump);
++	    }
++	  gassign *new_stmt = NULL;
++	  new_stmt_info = add_new_stmt_vect_load (vinfo, vectype_info->vectype, dataref_ptr,
++				  offset_info->dataref_offset,
++				  vectype_info->ref_type, vectype_info->ltype,
++				  new_stmt, cur_first_dr_info, gsi,
++				  first_stmt_info_b);
++	  push_new_stmt_to_dr_chain (slp_perm, new_stmt_info,
++	  		dr_chain, slp_node);
++	}
++    }
++}
++
+ /* vectorizable_load.
+ 
+    Check if STMT_INFO reads a non scalar data-ref (array/pointer/structure)
+@@ -9338,6 +10193,8 @@ vectorizable_load (vec_info *vinfo,
+       if (bb_vinfo)
+ 	first_stmt_info_for_drptr
+ 	  = vect_find_first_scalar_stmt_in_slp (slp_node);
++  // first_stmt_info_for_drptr = SLP_TREE_SCALAR_STMTS (slp_node)[0];
++
+ 
+       /* Check if the chain of loads is already vectorized.  */
+       if (STMT_VINFO_VEC_STMTS (first_stmt_info).exists ()
+@@ -9601,6 +10458,9 @@ vectorizable_load (vec_info *vinfo,
+     }
+   tree vec_mask = NULL_TREE;
+   poly_uint64 group_elt = 0;
++  unsigned new_group_size = 0;
++  vec new_load_permutation;
++
+   for (j = 0; j < ncopies; j++)
+     {
+       /* 1. Create the vector or array pointer update chain.  */
+@@ -9621,6 +10481,15 @@ vectorizable_load (vec_info *vinfo,
+ 	      dataref_ptr = unshare_expr (DR_BASE_ADDRESS (first_dr_info->dr));
+ 	      dataref_offset = build_int_cst (ref_type, 0);
+ 	    }
++	  /* If the stmt_info need to be transposed recovery, dataref_ptr
++	     will be caculated later.  */
++	  else if (slp && is_a  (vinfo)
++		   && STMT_VINFO_GROUPED_ACCESS (stmt_info)
++		   && DR_GROUP_SLP_TRANSPOSE (
++			DR_GROUP_FIRST_ELEMENT (stmt_info)))
++	    {
++	      dataref_ptr = NULL_TREE;
++	    }
+ 	  else if (diff_first_stmt_info)
+ 	    {
+ 	      dataref_ptr
+@@ -9731,6 +10600,63 @@ vectorizable_load (vec_info *vinfo,
+ 	  /* Record that VEC_ARRAY is now dead.  */
+ 	  vect_clobber_variable (vinfo, stmt_info, gsi, vec_array);
+ 	}
++      else if (slp && is_a  (vinfo)
++	       && STMT_VINFO_GROUPED_ACCESS (stmt_info)
++	       && DR_GROUP_SLP_TRANSPOSE (DR_GROUP_FIRST_ELEMENT (stmt_info)))
++	{
++	  if (dump_enabled_p ())
++	    {
++	      dump_printf_loc (MSG_NOTE, vect_location,
++			       "vectorizable_load for slp transpose.\n");
++	    }
++	  /* group_size: the size of group after merging.
++	     group_size_b: the size of group before merging.
++	     const_nunits: TYPE_VECTOR_SUBPARTS (vectype), it is the number of
++		elements in a vector.
++	     nloads: const_nunits / group_size_b or 1, it means the number
++		of ARRAYs in a vector.
++	     ncontinues: group_size_b / const_nunits or 1, it means the number
++		of vectors from an ARRAY.  */
++	  unsigned int group_size_b = DR_GROUP_SIZE_TRANS (first_stmt_info);
++	  unsigned int const_nunits = nunits.to_constant ();
++	  unsigned int nloads = const_nunits;
++	  unsigned int ncontinues = group_size_b;
++	  tree lvectype = vectype;
++	  tree ltype = calculate_new_type (vectype, const_nunits,
++					   group_size_b, nloads,
++					   ncontinues, lvectype);
++	  bool this_load_permuted = false;
++	  auto_vec group_idx;
++	  generate_load_permutation (slp_node, new_group_size, group_size,
++				     group_size_b, this_load_permuted,
++				     group_idx, new_load_permutation);
++	  slp_perm = is_slp_perm (slp_perm, this_load_permuted, nunits,
++				  group_size, first_stmt_info);
++
++	  /* ncopies: the number of vectors that need to be loaded from
++		 memmory.  */
++	  unsigned int ncopies = new_group_size / const_nunits;
++	  offset_info offset_info = {offset, NULL_TREE, dataref_offset};
++	  vectype_info vectype_info = {vectype, ltype, lvectype, ref_type};
++	  if (slp_perm)
++	    {
++	       dr_chain.create (ncopies);
++	    }
++	  if (nloads > 1 && ncontinues == 1)
++	    {
++	      new_vect_stmt_for_nloads (vinfo, ncopies, nloads, group_idx,
++					stmt_info, &offset_info, &vectype_info,
++					memory_access_type, slp_perm, dr_chain,
++					slp_node, gsi);
++	    }
++	  else
++	    {
++	      new_vect_stmt_for_ncontinues (vinfo, ncontinues, group_idx,
++					    stmt_info, &offset_info,
++					    &vectype_info, memory_access_type,
++					    slp_perm, dr_chain, slp_node, gsi);
++	    }
++	}
+       else
+ 	{
+ 	  for (i = 0; i < vec_num; i++)
+@@ -10177,7 +11103,32 @@ vectorizable_load (vec_info *vinfo,
+       if (slp && !slp_perm)
+ 	continue;
+ 
+-      if (slp_perm)
++      /* Using the new load permutation to generate vector permute statements
++	 from a list of loads in DR_CHAIN.  */
++      if (slp && slp_perm && is_a  (vinfo)
++	  && STMT_VINFO_GROUPED_ACCESS (stmt_info)
++	  && DR_GROUP_SLP_TRANSPOSE (DR_GROUP_FIRST_ELEMENT (stmt_info)))
++	{
++	  unsigned n_perms;
++	  stmt_vec_info stmt_info_ = SLP_TREE_SCALAR_STMTS (slp_node)[0];
++	  unsigned int old_size = DR_GROUP_SIZE (stmt_info);
++	  DR_GROUP_SIZE (stmt_info_) = new_group_size;
++	  vec old_load_permutation
++			  = SLP_TREE_LOAD_PERMUTATION (slp_node);
++	  SLP_TREE_LOAD_PERMUTATION (slp_node) = new_load_permutation;
++	  bool perm_load_success = vect_transform_slp_perm_load (
++				     vinfo, slp_node, dr_chain, gsi, vf,
++				     false, &n_perms);
++	  DR_GROUP_SIZE (stmt_info_) = old_size;
++	  SLP_TREE_LOAD_PERMUTATION (slp_node) = old_load_permutation;
++	  new_load_permutation.release ();
++	  if (!perm_load_success)
++	    {
++	      dr_chain.release ();
++	      return false;
++	    }
++	}
++      else if (slp_perm)
+         {
+ 	  unsigned n_perms;
+ 	  /* For SLP we know we've seen all possible uses of dr_chain so
+diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
+index 642eb0aeb..e13bc6c99 100644
+--- a/gcc/tree-vectorizer.h
++++ b/gcc/tree-vectorizer.h
+@@ -412,6 +412,21 @@ public:
+   vec ddrs;
+ };
+ 
++/* Information about offset in vectorizable_load.  */
++struct offset_info {
++  tree offset;
++  tree byte_offset;
++  tree dataref_offset;
++};
++
++/* Information about vectype in vectorizable_load.  */
++struct vectype_info {
++  tree vectype;
++  tree ltype;
++  tree lvectype;
++  tree ref_type;
++};
++
+ /* Vectorizer state common between loop and basic-block vectorization.  */
+ class vec_info {
+ public:
+@@ -455,6 +470,14 @@ public:
+      stmt in the chain.  */
+   auto_vec grouped_stores;
+ 
++  /* All interleaving chains of loads, represented by the first
++     stmt in the chain.  */
++  auto_vec grouped_loads;
++
++  /* All interleaving chains of stores (before transposed), represented by all
++     stmt in the chain.  */
++  auto_vec > scalar_stores;
++
+   /* The set of vector modes used in the vectorized region.  */
+   mode_set used_vector_modes;
+ 
+@@ -899,6 +922,8 @@ public:
+ #define LOOP_VINFO_CHECK_NONZERO(L)        (L)->check_nonzero
+ #define LOOP_VINFO_LOWER_BOUNDS(L)         (L)->lower_bounds
+ #define LOOP_VINFO_GROUPED_STORES(L)       (L)->grouped_stores
++#define LOOP_VINFO_GROUPED_LOADS(L)	    (L)->grouped_loads
++#define LOOP_VINFO_SCALAR_STORES(L)	    (L)->scalar_stores
+ #define LOOP_VINFO_SLP_INSTANCES(L)        (L)->slp_instances
+ #define LOOP_VINFO_SLP_UNROLLING_FACTOR(L) (L)->slp_unrolling_factor
+ #define LOOP_VINFO_REDUCTIONS(L)           (L)->reductions
+@@ -982,6 +1007,25 @@ public:
+   vec bbs;
+ 
+   vec roots;
++
++  /* True, if bb_vinfo can goto vect_analyze_slp.  */
++  bool before_slp;
++
++  /* True, if bb_vinfo is a transposed version.  */
++  bool transposed;
++
++  /* The number of transposed groups.  */
++  int transposed_group;
++
++  /* The cost of the scalar iterations.  */
++  int scalar_cost;
++
++  /* The cost of the vector prologue and epilogue, including peeled
++     iterations and set-up code.  */
++  int vec_outside_cost;
++
++  /* The cost of the vector loop body.  */
++  int vec_inside_cost;
+ } *bb_vec_info;
+ 
+ #define BB_VINFO_BB(B)               (B)->bb
+@@ -989,6 +1033,14 @@ public:
+ #define BB_VINFO_SLP_INSTANCES(B)    (B)->slp_instances
+ #define BB_VINFO_DATAREFS(B)         (B)->shared->datarefs
+ #define BB_VINFO_DDRS(B)             (B)->shared->ddrs
++#define BB_VINFO_GROUPED_LOADS(B)    (B)->grouped_loads
++#define BB_VINFO_SCALAR_STORES(B)    (B)->scalar_stores
++#define BB_VINFO_VEC_OUTSIDE_COST(B) (B)->vec_outside_cost
++#define BB_VINFO_VEC_INSIDE_COST(B)  (B)->vec_inside_cost
++#define BB_VINFO_SCALAR_COST(B)      (B)->scalar_cost
++#define BB_VINFO_SLP_TRANSPOSED(B)   (B)->transposed
++#define BB_VINFO_BEFORE_SLP(B)       (B)->before_slp
++#define BB_VINFO_TRANS_GROUPS(B)     (B)->transposed_group
+ 
+ /*-----------------------------------------------------------------*/
+ /* Info on vectorized defs.                                        */
+@@ -1219,6 +1271,17 @@ public:
+   stmt_vec_info next_element;
+   /* The size of the group.  */
+   unsigned int size;
++
++  /* The size of the group before transposed.  */
++  unsigned int size_before_transpose;
++
++  /* If true, the stmt_info is slp transposed.  */
++  bool slp_transpose;
++
++  /* Mark the group store number for rebuild interleaving chain
++     during transpose phase.  Value -1 represents unable to transpose.  */
++  int group_number;
++
+   /* For stores, number of stores from this group seen. We vectorize the last
+      one.  */
+   unsigned int store_count;
+@@ -1226,6 +1289,9 @@ public:
+      is 1.  */
+   unsigned int gap;
+ 
++  /* The gap before transposed.  */
++  unsigned int gap_before_transpose;
++
+   /* The minimum negative dependence distance this stmt participates in
+      or zero if none.  */
+   unsigned int min_neg_dist;
+@@ -1427,6 +1493,12 @@ struct gather_scatter_info {
+ #define STMT_VINFO_SLP_VECT_ONLY(S)     (S)->slp_vect_only_p
+ #define STMT_VINFO_SLP_VECT_ONLY_PATTERN(S) (S)->slp_vect_pattern_only_p
+ 
++#define DR_GROUP_SLP_TRANSPOSE(S) \
++  (gcc_checking_assert ((S)->dr_aux.dr), (S)->slp_transpose)
++#define DR_GROUP_SIZE_TRANS(S) \
++  (gcc_checking_assert ((S)->dr_aux.dr), (S)->size_before_transpose)
++#define DR_GROUP_NUMBER(S) \
++  (gcc_checking_assert ((S)->dr_aux.dr), (S)->group_number)
+ #define DR_GROUP_FIRST_ELEMENT(S) \
+   (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element)
+ #define DR_GROUP_NEXT_ELEMENT(S) \
+@@ -1437,6 +1509,8 @@ struct gather_scatter_info {
+   (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count)
+ #define DR_GROUP_GAP(S) \
+   (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap)
++#define DR_GROUP_GAP_TRANS(S) \
++  (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap_before_transpose)
+ 
+ #define REDUC_GROUP_FIRST_ELEMENT(S) \
+   (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element)
+@@ -2033,6 +2107,17 @@ vect_get_scalar_dr_size (dr_vec_info *dr_info)
+   return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info->dr))));
+ }
+ 
++/* Compare two unsigned int A and B.
++   Sorting them in ascending order.  */
++
++static inline int
++cmp_for_group_num (const void *a_, const void *b_)
++{
++  unsigned int a = *(unsigned int *)const_cast(a_);
++  unsigned int b = *(unsigned int *)const_cast(b_);
++  return a < b ? -1 : 1;
++}
++
+ /* Return true if LOOP_VINFO requires a runtime check for whether the
+    vector loop is profitable.  */
+ 
+@@ -2152,7 +2237,7 @@ record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
+ 
+ extern void vect_finish_replace_stmt (vec_info *, stmt_vec_info, gimple *);
+ extern void vect_finish_stmt_generation (vec_info *, stmt_vec_info, gimple *,
+-					 gimple_stmt_iterator *);
++					 gimple_stmt_iterator *,bool transpose=false);
+ extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info, bool *);
+ extern tree vect_get_store_rhs (stmt_vec_info);
+ void vect_get_vec_defs_for_operand (vec_info *vinfo, stmt_vec_info, unsigned,
+@@ -2168,7 +2253,7 @@ void vect_get_vec_defs (vec_info *, stmt_vec_info, slp_tree, unsigned,
+ 			tree = NULL, vec * = NULL, tree = NULL,
+ 			tree = NULL, vec * = NULL, tree = NULL);
+ extern tree vect_init_vector (vec_info *, stmt_vec_info, tree, tree,
+-                              gimple_stmt_iterator *);
++			      gimple_stmt_iterator *, bool transpose=false);
+ extern tree vect_get_slp_vect_def (slp_tree, unsigned);
+ extern bool vect_transform_stmt (vec_info *, stmt_vec_info,
+ 				 gimple_stmt_iterator *,
+@@ -2235,6 +2320,9 @@ extern bool vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
+ extern void vect_permute_store_chain (vec_info *, vec &,
+ 				      unsigned int, stmt_vec_info,
+ 				      gimple_stmt_iterator *, vec *);
++extern void vect_transpose_store_chain (vec_info *, vec, unsigned int,
++					unsigned int, stmt_vec_info,
++					gimple_stmt_iterator *, vec *);
+ extern tree vect_setup_realignment (vec_info *,
+ 				    stmt_vec_info, gimple_stmt_iterator *,
+ 				    tree *, enum dr_alignment_support, tree,
+@@ -2262,7 +2350,8 @@ extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree,
+ 				  enum tree_code);
+ extern bool needs_fold_left_reduction_p (tree, code_helper);
+ /* Drive for loop analysis stage.  */
+-extern opt_loop_vec_info vect_analyze_loop (class loop *, vec_info_shared *);
++extern opt_loop_vec_info vect_analyze_loop (class loop *, vec_info_shared *,
++					    bool result_only_p = false);
+ extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL);
+ extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *,
+ 					 tree *, bool);
+@@ -2331,6 +2420,7 @@ extern bool vect_transform_slp_perm_load (vec_info *, slp_tree, const vec
+ 					  gimple_stmt_iterator *, poly_uint64,
+ 					  bool, unsigned *,
+ 					  unsigned * = nullptr, bool = false);
++extern void vect_transform_back_slp_grouped_stores (bb_vec_info, stmt_vec_info);
+ extern bool vect_slp_analyze_operations (vec_info *);
+ extern void vect_schedule_slp (vec_info *, const vec &);
+ extern opt_result vect_analyze_slp (vec_info *, unsigned);
+-- 
+2.33.0
+
diff --git a/0099-LoongArch-testsuite-Give-up-the-detection-of-the-gcc.patch b/0099-LoongArch-testsuite-Give-up-the-detection-of-the-gcc.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6243babb884670f7aad61db8f541e870236ea878
--- /dev/null
+++ b/0099-LoongArch-testsuite-Give-up-the-detection-of-the-gcc.patch
@@ -0,0 +1,80 @@
+From df18d0c85049402b8f2f44c3c4e013a0b6d91cee Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Fri, 5 Jan 2024 11:43:29 +0800
+Subject: [PATCH 099/188] LoongArch: testsuite:Give up the detection of the
+ gcc.dg/fma-{3, 4, 6, 7}.c file.
+
+On the LoongArch architecture, the above four test cases need to be waived
+during testing. There are two situations:
+
+1. The function of fma-{3,6}.c test is to find the value of c-a*b, but on
+the LoongArch architecture, the function of the existing fnmsub instruction
+is to find the value of -(a*b - c);
+
+2. The function of fma-{4,7}.c test is to find the value of -(a*b)-c, but on
+the LoongArch architecture, the function of the existing fnmadd instruction
+is to find the value of -(a*b + c);
+
+Through the analysis of the above two cases, there will be positive and
+negative zero inequality.
+
+gcc/testsuite/ChangeLog
+
+	* gcc.dg/fma-3.c: The intermediate file corresponding to the
+	function does not produce the corresponding FNMA symbol, so the test
+	rules should be skipped when testing.
+	* gcc.dg/fma-4.c: The intermediate file corresponding to the
+	function does not produce the corresponding FNMS symbol, so skip the
+	test rules when testing.
+	* gcc.dg/fma-6.c: The cause is the same as fma-3.c.
+	* gcc.dg/fma-7.c: The cause is the same as fma-4.c
+---
+ gcc/testsuite/gcc.dg/fma-3.c | 2 +-
+ gcc/testsuite/gcc.dg/fma-4.c | 2 +-
+ gcc/testsuite/gcc.dg/fma-6.c | 2 +-
+ gcc/testsuite/gcc.dg/fma-7.c | 2 +-
+ 4 files changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.dg/fma-3.c b/gcc/testsuite/gcc.dg/fma-3.c
+index 699aa2c95..6649b54b6 100644
+--- a/gcc/testsuite/gcc.dg/fma-3.c
++++ b/gcc/testsuite/gcc.dg/fma-3.c
+@@ -12,4 +12,4 @@ f2 (double a, double b, double c)
+   return c - a * b;
+ }
+ 
+-/* { dg-final { scan-tree-dump-times { = \.FNMA \(} 2 "widening_mul" { target scalar_all_fma } } } */
++/* { dg-final { scan-tree-dump-times { = \.FNMA \(} 2 "widening_mul" { target { scalar_all_fma && { ! loongarch*-*-* } } } } } */
+diff --git a/gcc/testsuite/gcc.dg/fma-4.c b/gcc/testsuite/gcc.dg/fma-4.c
+index bff928f1f..f1701c196 100644
+--- a/gcc/testsuite/gcc.dg/fma-4.c
++++ b/gcc/testsuite/gcc.dg/fma-4.c
+@@ -12,4 +12,4 @@ f2 (double a, double b, double c)
+   return -(a * b) - c;
+ }
+ 
+-/* { dg-final { scan-tree-dump-times { = \.FNMS \(} 2 "widening_mul" { target scalar_all_fma } } } */
++/* { dg-final { scan-tree-dump-times { = \.FNMS \(} 2 "widening_mul" { target { scalar_all_fma && { ! loongarch*-*-* } } } } } */
+diff --git a/gcc/testsuite/gcc.dg/fma-6.c b/gcc/testsuite/gcc.dg/fma-6.c
+index 87258cec4..9e49b62b6 100644
+--- a/gcc/testsuite/gcc.dg/fma-6.c
++++ b/gcc/testsuite/gcc.dg/fma-6.c
+@@ -64,4 +64,4 @@ f10 (double a, double b, double c)
+   return -__builtin_fma (a, b, -c);
+ }
+ 
+-/* { dg-final { scan-tree-dump-times { = \.FNMA \(} 14 "optimized" { target scalar_all_fma } } } */
++/* { dg-final { scan-tree-dump-times { = \.FNMA \(} 14 "optimized" { target { scalar_all_fma && { ! loongarch*-*-* } } } } } */
+diff --git a/gcc/testsuite/gcc.dg/fma-7.c b/gcc/testsuite/gcc.dg/fma-7.c
+index f409cc8ee..86aacad7b 100644
+--- a/gcc/testsuite/gcc.dg/fma-7.c
++++ b/gcc/testsuite/gcc.dg/fma-7.c
+@@ -64,4 +64,4 @@ f10 (double a, double b, double c)
+   return -__builtin_fma (a, b, c);
+ }
+ 
+-/* { dg-final { scan-tree-dump-times { = \.FNMS \(} 14 "optimized" { target scalar_all_fma } } } */
++/* { dg-final { scan-tree-dump-times { = \.FNMS \(} 14 "optimized" { target { scalar_all_fma && { ! loongarch*-*-* } } } } } */
+-- 
+2.43.0
+
diff --git a/0100-Add-hip09-machine-discribtion.patch b/0100-Add-hip09-machine-discribtion.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c2221d937ecabd58db19b1e0f7782a57a83ac638
--- /dev/null
+++ b/0100-Add-hip09-machine-discribtion.patch
@@ -0,0 +1,882 @@
+From d9131757175667d35e74d9ee84689039990af768 Mon Sep 17 00:00:00 2001
+From: xingyushuai 
+Date: Fri, 3 Mar 2023 09:31:04 +0800
+Subject: [PATCH 001/157] Add hip09 machine discribtion
+
+Here is the patch introducing hip09 machine model
+for the scheduler.
+---
+ gcc/config/aarch64/aarch64-cores.def     |   1 +
+ gcc/config/aarch64/aarch64-cost-tables.h | 104 +++++
+ gcc/config/aarch64/aarch64-tune.md       |   2 +-
+ gcc/config/aarch64/aarch64.cc            | 109 +++++
+ gcc/config/aarch64/aarch64.md            |   1 +
+ gcc/config/aarch64/hip09.md              | 558 +++++++++++++++++++++++
+ 6 files changed, 774 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/config/aarch64/hip09.md
+
+diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
+index 70b11eb80..a854bdb24 100644
+--- a/gcc/config/aarch64/aarch64-cores.def
++++ b/gcc/config/aarch64/aarch64-cores.def
+@@ -130,6 +130,7 @@ AARCH64_CORE("a64fx", a64fx, a64fx, 8_2A,  AARCH64_FL_FOR_ARCH8_2 | AARCH64_FL_F
+ 
+ /* HiSilicon ('H') cores. */
+ AARCH64_CORE("tsv110",  tsv110, tsv110, 8_2A,  AARCH64_FL_FOR_ARCH8_2 | AARCH64_FL_CRYPTO | AARCH64_FL_F16 | AARCH64_FL_AES | AARCH64_FL_SHA2, tsv110,   0x48, 0xd01, -1)
++AARCH64_CORE("hip09", hip09, hip09, 8_5A, AARCH64_FL_FOR_ARCH8_5 | AARCH64_FL_SVE | AARCH64_FL_I8MM | AARCH64_FL_F32MM | AARCH64_FL_F64MM | AARCH64_FL_PROFILE | AARCH64_FL_PREDRES, hip09, 0x48, 0xd02, 0x0)
+ 
+ /* ARMv8.3-A Architecture Processors.  */
+ 
+diff --git a/gcc/config/aarch64/aarch64-cost-tables.h b/gcc/config/aarch64/aarch64-cost-tables.h
+index 48522606f..fc5a3cbe4 100644
+--- a/gcc/config/aarch64/aarch64-cost-tables.h
++++ b/gcc/config/aarch64/aarch64-cost-tables.h
+@@ -668,6 +668,110 @@ const struct cpu_cost_table a64fx_extra_costs =
+   }
+ };
+ 
++const struct cpu_cost_table hip09_extra_costs =
++{
++  /* ALU */
++  {
++    0,                 /* arith.  */
++    0,                 /* logical.  */
++    0,                 /* shift.  */
++    0,                 /* shift_reg.  */
++    COSTS_N_INSNS (1), /* arith_shift.  */
++    COSTS_N_INSNS (1), /* arith_shift_reg.  */
++    COSTS_N_INSNS (1), /* log_shift.  */
++    COSTS_N_INSNS (1), /* log_shift_reg.  */
++    0,                 /* extend.  */
++    COSTS_N_INSNS (1), /* extend_arith.  */
++    0,                 /* bfi.  */
++    0,                 /* bfx.  */
++    0,                 /* clz.  */
++    0,                 /* rev.  */
++    0,                 /* non_exec.  */
++    true               /* non_exec_costs_exec.  */
++  },
++
++  {
++    /* MULT SImode */
++    {
++      COSTS_N_INSNS (2),       /* simple.  */
++      COSTS_N_INSNS (2),       /* flag_setting.  */
++      COSTS_N_INSNS (2),       /* extend.  */
++      COSTS_N_INSNS (2),       /* add.  */
++      COSTS_N_INSNS (2),       /* extend_add.  */
++      COSTS_N_INSNS (11)       /* idiv.  */
++    },
++        /* MULT DImode */
++    {
++      COSTS_N_INSNS (3),       /* simple.  */
++      0,                       /* flag_setting (N/A).  */
++      COSTS_N_INSNS (3),       /* extend.  */
++      COSTS_N_INSNS (3),       /* add.  */
++      COSTS_N_INSNS (3),       /* extend_add.  */
++      COSTS_N_INSNS (19)       /* idiv.  */
++    }
++  },
++  /* LD/ST */
++  {
++    COSTS_N_INSNS (3),         /* load.  */
++    COSTS_N_INSNS (4),         /* load_sign_extend.  */
++    COSTS_N_INSNS (3),         /* ldrd.  */
++    COSTS_N_INSNS (3),         /* ldm_1st.  */
++    1,                         /* ldm_regs_per_insn_1st.  */
++    2,                         /* ldm_regs_per_insn_subsequent.  */
++    COSTS_N_INSNS (4),         /* loadf.  */
++    COSTS_N_INSNS (4),         /* loadd.  */
++    COSTS_N_INSNS (4),         /* load_unaligned.  */
++    0,                         /* store.  */
++    0,                         /* strd.  */
++    0,                         /* stm_1st.  */
++    1,                         /* stm_regs_per_insn_1st.  */
++    2,                         /* stm_regs_per_insn_subsequent.  */
++    0,                         /* storef.  */
++    0,                         /* stored.  */
++    COSTS_N_INSNS (1),         /* store_unaligned.  */
++    COSTS_N_INSNS (4),         /* loadv.  */
++    COSTS_N_INSNS (4)          /* storev.  */
++  },
++  {
++    /* FP SFmode */
++    {
++      COSTS_N_INSNS (10),      /* div.  */
++      COSTS_N_INSNS (4),       /* mult.  */
++      COSTS_N_INSNS (4),       /* mult_addsub.  */
++      COSTS_N_INSNS (4),       /* fma.  */
++      COSTS_N_INSNS (4),       /* addsub.  */
++      COSTS_N_INSNS (1),       /* fpconst.  */
++      COSTS_N_INSNS (1),       /* neg.  */
++      COSTS_N_INSNS (1),       /* compare.  */
++      COSTS_N_INSNS (2),       /* widen.  */
++      COSTS_N_INSNS (2),       /* narrow.  */
++      COSTS_N_INSNS (2),       /* toint.  */
++      COSTS_N_INSNS (1),       /* fromint.  */
++      COSTS_N_INSNS (2)        /* roundint.  */
++    },
++    /* FP DFmode */
++    {
++      COSTS_N_INSNS (17),      /* div.  */
++      COSTS_N_INSNS (4),       /* mult.  */
++      COSTS_N_INSNS (6),       /* mult_addsub.  */
++      COSTS_N_INSNS (6),       /* fma.  */
++      COSTS_N_INSNS (3),       /* addsub.  */
++      COSTS_N_INSNS (1),       /* fpconst.  */
++      COSTS_N_INSNS (1),       /* neg.  */
++      COSTS_N_INSNS (1),       /* compare.  */
++      COSTS_N_INSNS (2),       /* widen.  */
++      COSTS_N_INSNS (2),       /* narrow.  */
++      COSTS_N_INSNS (2),       /* toint.  */
++      COSTS_N_INSNS (1),       /* fromint.  */
++      COSTS_N_INSNS (2)        /* roundint.  */
++    }
++  },
++  /* Vector */
++  {
++    COSTS_N_INSNS (1)  /* alu.  */
++  }
++};
++
+ const struct cpu_cost_table ampere1_extra_costs =
+ {
+   /* ALU */
+diff --git a/gcc/config/aarch64/aarch64-tune.md b/gcc/config/aarch64/aarch64-tune.md
+index 9dc9adc70..238bb6e31 100644
+--- a/gcc/config/aarch64/aarch64-tune.md
++++ b/gcc/config/aarch64/aarch64-tune.md
+@@ -1,5 +1,5 @@
+ ;; -*- buffer-read-only: t -*-
+ ;; Generated automatically by gentune.sh from aarch64-cores.def
+ (define_attr "tune"
+-	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,demeter,neoversev2"
++	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,hip09,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,demeter,neoversev2"
+ 	(const (symbol_ref "((enum attr_tune) aarch64_tune)")))
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 5537a537c..e9b3980c4 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -465,6 +465,22 @@ static const struct cpu_addrcost_table tsv110_addrcost_table =
+   0, /* imm_offset  */
+ };
+ 
++static const struct cpu_addrcost_table hip09_addrcost_table =
++{
++    {
++        1, /* hi  */
++        0, /* si  */
++        0, /* di  */
++        1, /* ti  */
++    },
++  0, /* pre_modify  */
++  0, /* post_modify  */
++  0, /* register_offset  */
++  1, /* register_sextend  */
++  1, /* register_zextend  */
++  0, /* imm_offset  */
++};
++
+ static const struct cpu_addrcost_table qdf24xx_addrcost_table =
+ {
+     {
+@@ -660,6 +676,16 @@ static const struct cpu_regmove_cost a64fx_regmove_cost =
+   2 /* FP2FP  */
+ };
+ 
++static const struct cpu_regmove_cost hip09_regmove_cost =
++{
++  1, /* GP2GP  */
++  /* Avoid the use of slow int<->fp moves for spilling by setting
++     their cost higher than memmov_cost.  */
++  2, /* GP2FP  */
++  3, /* FP2GP  */
++  2  /* FP2FP  */
++};
++
+ static const struct cpu_regmove_cost neoversen2_regmove_cost =
+ {
+   1, /* GP2GP  */
+@@ -947,6 +973,43 @@ static const struct cpu_vector_cost tsv110_vector_cost =
+   nullptr /* issue_info  */
+ };
+ 
++static const advsimd_vec_cost hip09_advsimd_vector_cost =
++{
++  2, /* int_stmt_cost  */
++  2, /* fp_stmt_cost  */
++  0, /* ld2_st2_permute_cost  */
++  0, /* ld3_st3_permute_cost  */
++  0, /* ld4_st4_permute_cost  */
++  2, /* permute_cost  */
++  3, /* reduc_i8_cost  */
++  3, /* reduc_i16_cost  */
++  3, /* reduc_i32_cost  */
++  3, /* reduc_i64_cost  */
++  3, /* reduc_f16_cost  */
++  3, /* reduc_f32_cost  */
++  3, /* reduc_f64_cost  */
++  3, /* store_elt_extra_cost  */
++  3, /* vec_to_scalar_cost  */
++  2, /* scalar_to_vec_cost  */
++  5, /* align_load_cost  */
++  5, /* unalign_load_cost  */
++  1, /* unalign_store_cost  */
++  1  /* store_cost  */
++};
++
++static const struct cpu_vector_cost hip09_vector_cost =
++{
++  1, /* scalar_int_stmt_cost  */
++  1, /* scalar_fp_stmt_cost  */
++  5, /* scalar_load_cost  */
++  1, /* scalar_store_cost  */
++  1, /* cond_taken_branch_cost  */
++  1, /* cond_not_taken_branch_cost  */
++  &hip09_advsimd_vector_cost, /* advsimd  */
++  nullptr, /* sve  */
++  nullptr /* issue_info  */
++};
++
+ static const advsimd_vec_cost cortexa57_advsimd_vector_cost =
+ {
+   2, /* int_stmt_cost  */
+@@ -1293,6 +1356,18 @@ static const cpu_prefetch_tune tsv110_prefetch_tune =
+   -1                    /* default_opt_level  */
+ };
+ 
++
++static const cpu_prefetch_tune hip09_prefetch_tune =
++{
++  0,                    /* num_slots  */
++  64,                   /* l1_cache_size  */
++  64,                   /* l1_cache_line_size  */
++  512,                  /* l2_cache_size  */
++  true,                 /* prefetch_dynamic_strides */
++  -1,                   /* minimum_stride */
++  -1                    /* default_opt_level  */
++};
++
+ static const cpu_prefetch_tune xgene1_prefetch_tune =
+ {
+   8,			/* num_slots  */
+@@ -1658,6 +1733,40 @@ static const struct tune_params tsv110_tunings =
+   &tsv110_prefetch_tune
+ };
+ 
++static const struct tune_params hip09_tunings =
++{
++  &hip09_extra_costs,
++  &hip09_addrcost_table,
++  &hip09_regmove_cost,
++  &hip09_vector_cost,
++  &generic_branch_cost,
++  &generic_approx_modes,
++  SVE_256, /* sve_width  */
++  { 4, /* load_int.  */
++    4, /* store_int.  */
++    4, /* load_fp.  */
++    4, /* store_fp.  */
++    4, /* load_pred.  */
++    4 /* store_pred.  */
++  }, /* memmov_cost.  */
++  4,    /* issue_rate  */
++  (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_ALU_BRANCH
++   | AARCH64_FUSE_ALU_CBZ), /* fusible_ops  */
++  "16", /* function_align.  */
++  "4",  /* jump_align.  */
++  "8",  /* loop_align.  */
++  2,    /* int_reassoc_width.  */
++  4,    /* fp_reassoc_width.  */
++  1,    /* vec_reassoc_width.  */
++  2,    /* min_div_recip_mul_sf.  */
++  2,    /* min_div_recip_mul_df.  */
++  0,    /* max_case_values.  */
++  tune_params::AUTOPREFETCHER_WEAK,     /* autoprefetcher_model.  */
++  (AARCH64_EXTRA_TUNE_USE_NEW_VECTOR_COSTS
++   | AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT),     /* tune_flags.  */
++  &hip09_prefetch_tune
++};
++
+ static const struct tune_params xgene1_tunings =
+ {
+   &xgene1_extra_costs,
+diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
+index d24c8afcf..cf699e4c7 100644
+--- a/gcc/config/aarch64/aarch64.md
++++ b/gcc/config/aarch64/aarch64.md
+@@ -477,6 +477,7 @@
+ (include "thunderx2t99.md")
+ (include "tsv110.md")
+ (include "thunderx3t110.md")
++(include "hip09.md")
+ 
+ ;; -------------------------------------------------------------------
+ ;; Jumps and other miscellaneous insns
+diff --git a/gcc/config/aarch64/hip09.md b/gcc/config/aarch64/hip09.md
+new file mode 100644
+index 000000000..25428de9a
+--- /dev/null
++++ b/gcc/config/aarch64/hip09.md
+@@ -0,0 +1,558 @@
++;; hip09 pipeline description
++;; Copyright (C) 2023 Free Software Foundation, Inc.
++;;
++;;Contributed by Yushuai Xing
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful, but
++;; WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++;; General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; .
++
++(define_automaton "hip09")
++(define_automaton "hip09_ldst")
++(define_automaton "hip09_fsu")
++
++(define_attr "hip09_type"
++  "hip09_neon_abs, hip09_neon_fp_arith, hip09_neon_mul, hip09_neon_mla,
++   hip09_neon_dot, hip09_neon_fp_div, hip09_neon_fp_sqrt,
++   hip09_neon_ins, hip09_neon_load1, hip09_neon_load1_lanes,
++   hip09_neon_load2and4, hip09_neon_load3_3reg,
++   hip09_neon_load4_4reg, hip09_neon_store1and2,
++   hip09_neon_store1_1reg, hip09_neon_store1_2reg,
++   hip09_neon_store1_3reg, hip09_neon_store1_4reg,
++   hip09_neon_store3and4_lane, hip09_neon_store3_3reg,
++   hip09_neon_store4_4reg, unknown"
++  (cond [
++         (eq_attr "type" "neon_abs,neon_abs_q,neon_add,neon_add_q,\
++                  neon_neg,neon_neg_q,neon_sub,neon_sub_q,neon_add_widen,\
++                  neon_sub_widen,neon_qadd,neon_qadd_q,\
++                  neon_add_long,neon_sub_long,\
++                  neon_qabs,neon_qabs_q,neon_qneg,\
++                  neon_qneg_q,neon_qsub,neon_qsub_q,neon_compare,\
++                  neon_compare_q,neon_compare_zero,\
++                  neon_compare_zero_q,neon_logic,neon_logic_q,\
++                  neon_minmax,neon_minmax_q,neon_tst,\
++                  neon_tst_q,neon_bsl,neon_bsl_q,\
++                  neon_cls,neon_cls_q,neon_ext,\
++                  neon_ext_q,neon_rev,neon_rev_q,\
++                  neon_tbl1,neon_tbl1_q,neon_fp_abs_s,\
++                  neon_fp_abs_s_q,neon_fp_abs_d,\
++                  neon_fp_neg_s,neon_fp_neg_s_q,\
++                  neon_fp_neg_d,neon_fp_neg_d_q,\
++                  neon_shift_imm_narrow_q,neon_move,neon_move_q")
++           (const_string "hip09_neon_abs")
++         (eq_attr "type" "neon_abd,neon_abd_q,\
++                  neon_arith_acc,neon_arith_acc_q,\
++                  neon_add_halve,neon_add_halve_q,\
++                  neon_sub_halve,neon_sub_halve_q,\
++                  neon_add_halve_narrow_q,\
++                  neon_sub_halve_narrow_q,neon_reduc_add,\
++                  neon_reduc_add_q,\
++                  neon_sat_mul_b,neon_sat_mul_b_q,\
++                  neon_sat_mul_b_long,neon_mul_b,neon_mul_b_q,\
++                  neon_mul_b_long,neon_mla_b,neon_mla_b_q,\
++                  neon_mla_b_long,neon_sat_mla_b_long,\
++                  neon_sat_shift_imm,\
++                  neon_sat_shift_imm_q,neon_shift_imm_long,\
++                  neon_shift_imm,neon_shift_imm_q,neon_cnt,\
++                  neon_cnt_q,neon_fp_recpe_s,neon_fp_recpe_s_q,\
++                  neon_fp_recpe_d,neon_fp_recpe_d_q,\
++                  neon_fp_rsqrte_s,neon_fp_rsqrte_s_q,\
++                  neon_fp_rsqrte_d,neon_fp_rsqrte_d_q,\
++                  neon_fp_recpx_s,neon_fp_recpx_s_q,\
++                  neon_fp_recpx_d,neon_fp_recpx_d_q,\
++                  neon_tbl2,neon_tbl2_q,neon_to_gp,\
++                  neon_to_gp_q,neon_fp_abd_s,neon_fp_abd_s_q,\
++                  neon_fp_abd_d,neon_fp_abd_d_q,\
++                  neon_fp_addsub_s,neon_fp_addsub_s_q,\
++                  neon_fp_addsub_d,neon_fp_addsub_d_q,\
++                  neon_fp_compare_s,neon_fp_compare_s_q,\
++                  neon_fp_compare_d,neon_fp_compare_d_q,\
++                  neon_fp_cvt_widen_s,neon_fp_to_int_s,\
++                  neon_fp_to_int_s_q,neon_fp_to_int_d,\
++                  neon_fp_to_int_d_q,neon_fp_minmax_s,\
++                  neon_fp_minmax_s_q,neon_fp_minmax_d,\
++                  neon_fp_minmax_d_q,neon_fp_round_s,\
++                  neon_fp_round_s_q,neon_fp_cvt_narrow_d_q,\
++                  neon_fp_round_d,neon_fp_round_d_q,\
++                  neon_fp_cvt_narrow_s_q")
++           (const_string "hip09_neon_fp_arith")
++         (eq_attr "type" "neon_sat_mul_h,neon_sat_mul_h_q,\
++                  neon_sat_mul_s,neon_sat_mul_s_q,\
++                  neon_sat_mul_h_scalar,neon_sat_mul_s_scalar,\
++                  neon_sat_mul_h_scalar_q,neon_sat_mul_h_long,\
++                  neon_sat_mul_s_long,neon_sat_mul_h_scalar_long,\
++                  neon_sat_mul_s_scalar_long,neon_mul_h,neon_mul_h_q,\
++                  neon_mul_s,neon_mul_s_q,neon_mul_h_long,\
++                  neon_mul_s_long,neon_mul_h_scalar_long,\
++                  neon_mul_s_scalar_long,neon_mla_h,neon_mla_h_q,\
++                  neon_mla_s,neon_mla_h_scalar,\
++                  neon_mla_h_scalar_q,neon_mla_s_scalar,\
++                  neon_mla_h_long,\
++                  neon_mla_s_long,neon_sat_mla_h_long,\
++                  neon_sat_mla_s_long,neon_sat_mla_h_scalar_long,\
++                  neon_sat_mla_s_scalar_long,neon_mla_s_scalar_long,\
++                  neon_mla_h_scalar_long,neon_mla_s_scalar_q,\
++                  neon_shift_acc,neon_shift_acc_q,neon_shift_reg,\
++                  neon_shift_reg_q,neon_sat_shift_reg,\
++                  neon_sat_shift_reg_q,neon_sat_shift_imm_narrow_q,\
++                  neon_tbl3,neon_tbl3_q,neon_fp_reduc_add_s,\
++                  neon_fp_reduc_add_s_q,neon_fp_reduc_add_d,\
++                  neon_fp_reduc_add_d_q,neon_fp_reduc_minmax_s,\
++                  neon_fp_reduc_minmax_d,neon_fp_reduc_minmax_s_q,\
++                  neon_fp_reduc_minmax_d_q,\
++                  neon_fp_mul_s_q,\
++                  neon_fp_mul_d,neon_fp_mul_d_q,\
++                  neon_fp_mul_d_scalar_q,neon_fp_mul_s_scalar,\
++                  neon_fp_mul_s_scalar_q")
++           (const_string "hip09_neon_mul")
++         (eq_attr "type" "neon_mla_s_q,neon_reduc_minmax,\
++                  neon_reduc_minmax_q,neon_fp_recps_s,\
++                  neon_fp_recps_s_q,neon_fp_recps_d,\
++                  neon_fp_recps_d_q,neon_tbl4,neon_tbl4_q,\
++                  neon_fp_mla_s,\
++                  neon_fp_mla_d,neon_fp_mla_d_q,\
++                  neon_fp_mla_s_scalar,neon_fp_mla_s_scalar_q,\
++                  neon_fp_mla_d_scalar_q")
++           (const_string "hip09_neon_mla")
++         (eq_attr "type" "neon_dot,neon_dot_q")
++           (const_string "hip09_neon_dot")
++         (eq_attr "type" "neon_fp_div_s,neon_fp_div_s_q,\
++                   neon_fp_div_d,neon_fp_div_d_q")
++           (const_string "hip09_neon_fp_div")
++         (eq_attr "type" "neon_fp_sqrt_s,neon_fp_sqrt_s_q,\
++                   neon_fp_sqrt_d,neon_fp_sqrt_d_q")
++           (const_string "hip09_neon_fp_sqrt")
++         (eq_attr "type" "neon_dup,neon_dup_q,\
++                   neon_ins,neon_ins_q")
++           (const_string "hip09_neon_ins")
++         (eq_attr "type" "neon_load1_1reg,neon_load1_1reg_q,\
++                   neon_load1_2reg,neon_load1_2reg_q,\
++                   neon_load1_3reg,neon_load1_3reg_q,\
++                   neon_load1_4reg,neon_load1_4reg_q")
++           (const_string "hip09_neon_load1")
++         (eq_attr "type" "neon_load1_one_lane,\
++                   neon_load1_one_lane_q,\
++                   neon_load1_all_lanes,neon_load1_all_lanes_q")
++           (const_string "hip09_neon_load1_lanes")
++         (eq_attr "type" "neon_load2_all_lanes,\
++                   neon_load2_all_lanes_q,\
++                   neon_load2_one_lane,neon_load2_2reg,\
++                   neon_load2_2reg_q,neon_load3_one_lane,\
++                   neon_load3_all_lanes,neon_load3_all_lanes_q,\
++                   neon_load4_one_lane,neon_load4_all_lanes,\
++                   neon_load4_all_lanes_q")
++           (const_string "hip09_neon_load2and4")
++         (eq_attr "type" "neon_load3_3reg,neon_load3_3reg_q")
++           (const_string "hip09_neon_load3_3reg")
++         (eq_attr "type" "neon_load4_4reg,neon_load4_4reg_q")
++           (const_string "hip09_neon_load4_4reg")
++         (eq_attr "type" "neon_store1_one_lane,\
++                   neon_store1_one_lane_q,neon_store2_one_lane,\
++                   neon_store2_one_lane_q,neon_store2_2reg,\
++                   neon_store2_2reg_q")
++           (const_string "hip09_neon_store1and2")
++         (eq_attr "type" "neon_store1_1reg,neon_store1_1reg_q")
++           (const_string "hip09_neon_store1_1reg")
++         (eq_attr "type" "neon_store1_2reg,neon_store1_2reg_q")
++           (const_string "hip09_neon_store1_2reg")
++         (eq_attr "type" "neon_store1_3reg,neon_store1_3reg_q")
++           (const_string "hip09_neon_store1_3reg")
++         (eq_attr "type" "neon_store1_4reg,neon_store1_4reg_q")
++           (const_string "hip09_neon_store1_4reg")
++         (eq_attr "type" "neon_store3_one_lane,\
++                   neon_store3_one_lane_q,neon_store4_one_lane,\
++                   neon_store4_one_lane_q")
++           (const_string "hip09_neon_store3and4_lane")
++         (eq_attr "type" "neon_store3_3reg,\
++                  neon_store3_3reg_q")
++           (const_string "hip09_neon_store3_3reg")
++         (eq_attr "type" "neon_store4_4reg,\
++                   neon_store4_4reg_q")
++           (const_string "hip09_neon_store4_4reg")]
++  (const_string "unknown")))
++
++; The hip09 core is modelled as issues pipeline that has
++; the following functional units.
++; 1.  Two pipelines for branch micro operations: BRU1, BRU2
++
++(define_cpu_unit "hip09_bru0" "hip09")
++(define_cpu_unit "hip09_bru1" "hip09")
++
++(define_reservation "hip09_bru01" "hip09_bru0|hip09_bru1")
++
++; 2.  Four pipelines for single cycle integer micro operations: ALUs1, ALUs2, ALUs3, ALUs4
++
++(define_cpu_unit "hip09_alus0" "hip09")
++(define_cpu_unit "hip09_alus1" "hip09")
++(define_cpu_unit "hip09_alus2" "hip09")
++(define_cpu_unit "hip09_alus3" "hip09")
++
++(define_reservation "hip09_alus0123" "hip09_alus0|hip09_alus1|hip09_alus2|hip09_alus3")
++(define_reservation "hip09_alus01" "hip09_alus0|hip09_alus1")
++(define_reservation "hip09_alus23" "hip09_alus2|hip09_alus3")
++
++; 3. Two pipelines for multi cycles integer micro operations: ALUm1, ALUm2
++
++(define_cpu_unit "hip09_alum0" "hip09")
++(define_cpu_unit "hip09_alum1" "hip09")
++
++(define_reservation "hip09_alum01" "hip09_alum0|hip09_alum1")
++
++; 4. Two pipelines for load micro opetations: Load1, Load2
++
++(define_cpu_unit "hip09_load0" "hip09_ldst")
++(define_cpu_unit "hip09_load1" "hip09_ldst")
++
++(define_reservation "hip09_ld01" "hip09_load0|hip09_load1")
++
++; 5. Two pipelines for store micro operations: Store1, Store2
++
++(define_cpu_unit "hip09_store0" "hip09_ldst")
++(define_cpu_unit "hip09_store1" "hip09_ldst")
++
++(define_reservation "hip09_st01" "hip09_store0|hip09_store1")
++
++; 6. Two pipelines for store data micro operations: STD0,STD1
++
++(define_cpu_unit "hip09_store_data0" "hip09_ldst")
++(define_cpu_unit "hip09_store_data1" "hip09_ldst")
++
++(define_reservation "hip09_std01" "hip09_store_data0|hip09_store_data1")
++
++; 7.  Four asymmetric pipelines for Asimd and FP micro operations: FSU1, FSU2, FSU3, FSU4
++
++(define_cpu_unit "hip09_fsu0" "hip09_fsu")
++(define_cpu_unit "hip09_fsu1" "hip09_fsu")
++(define_cpu_unit "hip09_fsu2" "hip09_fsu")
++(define_cpu_unit "hip09_fsu3" "hip09_fsu")
++
++(define_reservation "hip09_fsu0123" "hip09_fsu0|hip09_fsu1|hip09_fsu2|hip09_fsu3")
++(define_reservation "hip09_fsu02" "hip09_fsu0|hip09_fsu2")
++
++
++; 8. Two pipelines for sve operations but same with fsu1 and fsu3: SVE1, SVE2
++
++;; Simple Execution Unit:
++;
++;; Simple ALU without shift
++(define_insn_reservation "hip09_alu" 1
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "alu_imm,logic_imm,\
++            adc_imm,adc_reg,\
++            alu_sreg,logic_reg,\
++            mov_imm,mov_reg,\
++            csel,rotate_imm,bfm,mov_imm,\
++            clz,rbit,rev"))
++  "hip09_alus0123")
++
++(define_insn_reservation "hip09_alus" 1
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "alus_sreg,alus_imm,\
++            adcs_reg,adcs_imm,\
++            logics_imm,logics_reg,adr"))
++  "hip09_alus23")
++
++;; ALU ops with shift and extend
++(define_insn_reservation "hip09_alu_ext_shift" 2
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "alu_ext,alus_ext,\
++        logics_shift_imm,logics_shift_reg,\
++        logic_shift_reg,logic_shift_imm,\
++        "))
++  "hip09_alum01")
++
++;; Multiplies instructions
++(define_insn_reservation "hip09_mult" 3
++  (and (eq_attr "tune" "hip09")
++       (ior (eq_attr "mul32" "yes")
++       (eq_attr "widen_mul64" "yes")))
++  "hip09_alum01")
++
++;; Integer divide
++(define_insn_reservation "hip09_div" 10
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "udiv,sdiv"))
++  "hip09_alum0")
++
++;; Branch execution Unit
++;
++; Branches take two issue slot.
++; No latency as there is no result
++(define_insn_reservation "hip09_branch" 2
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "branch,call"))
++  "hip09_bru01 + hip09_alus23")
++
++;; Load execution Unit
++;
++; Loads of up to two words.
++(define_insn_reservation "hip09_load1" 4
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "load_4,load_8"))
++  "hip09_ld01")
++
++; Stores of up to two words.
++(define_insn_reservation "hip09_store1" 1
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "store_4,store_8"))
++  "hip09_st01")
++
++;; FP data processing instructions.
++
++(define_insn_reservation "hip09_fp_arith" 1
++   (and (eq_attr "tune" "hip09")
++        (eq_attr "type" "ffariths,ffarithd,fmov,fconsts,fconstd,\
++         f_mrc"))
++   "hip09_fsu0123")
++
++(define_insn_reservation "hip09_fp_cmp" 4
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "fcmps,fcmpd"))
++  "hip09_fsu0123+hip09_alus23")
++
++(define_insn_reservation "hip09_fp_ccmp" 7
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "fccmps,fccmpd"))
++  "hip09_alus01+hip09_fsu0123+hip09_alus23")
++
++(define_insn_reservation "hip09_fp_csel" 4
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "fcsel,f_mcr"))
++  "hip09_alus01+hip09_fsu0123")
++
++(define_insn_reservation "hip09_fp_divs" 7
++  (and (eq_attr "tune" "hip09")
++  (eq_attr "type" "fdivs"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_fp_divd" 10
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "fdivd"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_fp_sqrts" 9
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "fsqrts"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_fp_sqrtd" 15
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "fsqrtd"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_fp_mul" 3
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "fmuls,fmuld"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_fp_add" 2
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "fadds,faddd,f_minmaxs,f_minmaxd,f_cvt,\
++       f_rints,f_rintd"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_fp_mac" 4
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "fmacs,fmacd"))
++  "hip09_fsu0123")
++
++;; FP miscellaneous instructions.
++
++(define_insn_reservation "hip09_fp_cvt" 5
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "f_cvtf2i"))
++  "hip09_fsu0123+hip09_alus23")
++
++(define_insn_reservation "hip09_fp_cvt2" 5
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "f_cvti2f"))
++  "hip09_alus01+hip09_fsu0123")
++
++;; FP Load Instructions 
++
++(define_insn_reservation "hip09_fp_load" 7
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "f_loads,f_loadd"))
++  "hip09_ld01")
++
++(define_insn_reservation "hip09_fp_load2" 6
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "neon_ldp_q,neon_ldp"))
++  "hip09_ld01+hip09_alus01")
++
++;; FP store instructions
++
++(define_insn_reservation "hip09_fp_store" 2
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "f_stores,f_stored"))
++  "hip09_st01+hip09_std01")
++
++;; ASIMD integer instructions
++
++(define_insn_reservation "hip09_asimd_base1" 1
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_abs"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_base2" 2
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_fp_arith"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_base3" 3
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_mul"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_base4" 4
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_mla"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_base5" 5
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "neon_fp_mul_s"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_dot" 6
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_dot"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_bfmmla" 9
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "neon_fp_mla_s_q"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_fdiv" 15
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_fp_div"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_fsqrt" 25
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_fp_sqrt"))
++  "hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_pmull" 2
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "crypto_pmull"))
++  "hip09_fsu2")
++
++(define_insn_reservation "hip09_asimd_dup" 4
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_ins"))
++  "hip09_alus01+hip09_fsu0123")
++
++;; ASIMD load instructions
++
++(define_insn_reservation "hip09_asimd_ld1_reg" 6
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_load1"))
++  "hip09_ld01")
++
++(define_insn_reservation "hip09_asimd_ld1_lane" 7
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_load1_lanes"))
++  "hip09_ld01+hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_ld23" 8
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_load2and4"))
++"hip09_ld01+hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_ld3_mtp" 9
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_load3_3reg"))
++  "hip09_ld01+hip09_fsu0123")
++
++(define_insn_reservation "hip09_asimd_ld4_mtp" 13
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_load4_4reg"))
++  "hip09_ld01+hip09_fsu0123")
++
++;; ASIMD store instructions
++
++(define_insn_reservation "hip09_asimd_st12" 1
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_store1and2"))
++  "hip09_st01+hip09_std01")
++
++(define_insn_reservation "hip09_asimd_st1_1reg" 2
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_store1_1reg"))
++  "hip09_st01+hip09_std01")
++
++(define_insn_reservation "hip09_asimd_st1_2reg" 3
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_store1_2reg"))
++  "hip09_st01+hip09_std01")
++
++(define_insn_reservation "hip09_asimd_st1_3reg" 4
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_store1_3reg"))
++  "hip09_st01+hip09_std01")
++
++(define_insn_reservation "hip09_asimd_st1_4reg" 5
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_store1_4reg"))
++  "hip09_st01+hip09_std01")
++
++(define_insn_reservation "hip09_asimd_st34_lane" 4
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_store3and4_lane"))
++  "hip09_fsu0123+hip09_st01+hip09_std01")
++
++(define_insn_reservation "hip09_asimd_st3_mtp" 7
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_store3_3reg"))
++  "hip09_fsu0123+hip09_st01+hip09_std01")
++
++(define_insn_reservation "hip09_asimd_st4_mtp" 10
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "hip09_type" "hip09_neon_store4_4reg"))
++  "hip09_fsu0123+hip09_st01+hip09_std01")
++
++;; Cryptography extensions
++
++(define_insn_reservation "hip09_asimd_aes" 2
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "crypto_aese,crypto_aesmc"))
++  "hip09_fsu02")
++
++(define_insn_reservation "hip09_asimd_sha3" 1
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "crypto_sha3"))
++  "hip09_fsu2")
++
++(define_insn_reservation "hip09_asimd_sha1" 2
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "crypto_sha1_fast,crypto_sha1_xor,\
++       crypto_sha256_fast,crypto_sha512,\
++       crypto_sm3"))
++  "hip09_fsu2")
++
++(define_insn_reservation "hip09_asimd_sha1_and256" 4
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "crypto_sha1_slow,crypto_sha256_slow,\
++       crypto_sm4"))
++  "hip09_fsu2")
++
++;; CRC extension.
++
++(define_insn_reservation "hip09_crc" 2
++  (and (eq_attr "tune" "hip09")
++       (eq_attr "type" "crc"))
++  "hip09_alum01")
+-- 
+2.33.0
+
diff --git a/0100-LoongArch-Fixed-the-problem-of-incorrect-judgment-of.patch b/0100-LoongArch-Fixed-the-problem-of-incorrect-judgment-of.patch
new file mode 100644
index 0000000000000000000000000000000000000000..af4278b58969f37ae6a7ab1ff4b04155c9d9547b
--- /dev/null
+++ b/0100-LoongArch-Fixed-the-problem-of-incorrect-judgment-of.patch
@@ -0,0 +1,206 @@
+From 90db6906a92b685403d9220e94f779737d2dd100 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 4 Jan 2024 10:37:53 +0800
+Subject: [PATCH 100/188] LoongArch: Fixed the problem of incorrect judgment of
+ the immediate field of the [x]vld/[x]vst instruction.
+
+The [x]vld/[x]vst directive is defined as follows:
+  [x]vld/[x]vst {x/v}d, rj, si12
+
+When not modified, the immediate field of [x]vld/[x]vst is between 10 and
+14 bits depending on the type. However, in loongarch_valid_offset_p, the
+immediate field is restricted first, so there is no error. However, in
+some cases redundant instructions will be generated, see test cases.
+Now modify it according to the description in the instruction manual.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md (lasx_mxld_):
+	Modify the method of determining the memory offset of [x]vld/[x]vst.
+	(lasx_mxst_): Likewise.
+	* config/loongarch/loongarch.cc (loongarch_valid_offset_p): Delete.
+	(loongarch_address_insns): Likewise.
+	* config/loongarch/lsx.md (lsx_ld_): Likewise.
+	(lsx_st_): Likewise.
+	* config/loongarch/predicates.md (aq10b_operand): Likewise.
+	(aq10h_operand): Likewise.
+	(aq10w_operand): Likewise.
+	(aq10d_operand): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vect-ld-st-imm12.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  | 26 -------------------
+ gcc/config/loongarch/loongarch.cc             | 19 +++-----------
+ gcc/config/loongarch/lsx.md                   | 26 -------------------
+ gcc/config/loongarch/predicates.md            | 16 ------------
+ .../gcc.target/loongarch/vect-ld-st-imm12.c   | 15 +++++++++++
+ 5 files changed, 19 insertions(+), 83 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-ld-st-imm12.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index dbbf5a136..95c6bae20 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -846,32 +846,6 @@
+   DONE;
+ })
+ 
+-;; Offset load
+-(define_expand "lasx_mxld_"
+-  [(match_operand:LASX 0 "register_operand")
+-   (match_operand 1 "pmode_register_operand")
+-   (match_operand 2 "aq10_operand")]
+-  "ISA_HAS_LASX"
+-{
+-  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
+-				      INTVAL (operands[2]));
+-  loongarch_emit_move (operands[0], gen_rtx_MEM (mode, addr));
+-  DONE;
+-})
+-
+-;; Offset store
+-(define_expand "lasx_mxst_"
+-  [(match_operand:LASX 0 "register_operand")
+-   (match_operand 1 "pmode_register_operand")
+-   (match_operand 2 "aq10_operand")]
+-  "ISA_HAS_LASX"
+-{
+-  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
+-			    INTVAL (operands[2]));
+-  loongarch_emit_move (gen_rtx_MEM (mode, addr), operands[0]);
+-  DONE;
+-})
+-
+ ;; LASX
+ (define_insn "add3"
+   [(set (match_operand:ILASX 0 "register_operand" "=f,f,f")
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 9d2374a46..ddb32cea2 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2123,21 +2123,11 @@ loongarch_valid_offset_p (rtx x, machine_mode mode)
+ 
+   /* We may need to split multiword moves, so make sure that every word
+      is accessible.  */
+-  if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
++  if (!(LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))
++      && GET_MODE_SIZE (mode) > UNITS_PER_WORD
+       && !IMM12_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
+     return false;
+ 
+-  /* LSX LD.* and ST.* supports 10-bit signed offsets.  */
+-  if (LSX_SUPPORTED_MODE_P (mode)
+-      && !loongarch_signed_immediate_p (INTVAL (x), 10,
+-					loongarch_ldst_scaled_shift (mode)))
+-    return false;
+-
+-  /* LASX XVLD.B and XVST.B supports 10-bit signed offsets without shift.  */
+-  if (LASX_SUPPORTED_MODE_P (mode)
+-      && !loongarch_signed_immediate_p (INTVAL (x), 10, 0))
+-    return false;
+-
+   return true;
+ }
+ 
+@@ -2372,9 +2362,8 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p)
+       case ADDRESS_REG:
+ 	if (lsx_p)
+ 	  {
+-	    /* LSX LD.* and ST.* supports 10-bit signed offsets.  */
+-	    if (loongarch_signed_immediate_p (INTVAL (addr.offset), 10,
+-					      loongarch_ldst_scaled_shift (mode)))
++	    /* LSX LD.* and ST.* supports 12-bit signed offsets.  */
++	    if (IMM12_OPERAND (INTVAL (addr.offset)))
+ 	      return 1;
+ 	    else
+ 	      return 0;
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 3e3248ef4..02e89247b 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -812,32 +812,6 @@
+   DONE;
+ })
+ 
+-;; Offset load
+-(define_expand "lsx_ld_"
+-  [(match_operand:LSX 0 "register_operand")
+-   (match_operand 1 "pmode_register_operand")
+-   (match_operand 2 "aq10_operand")]
+-  "ISA_HAS_LSX"
+-{
+-  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
+-			    INTVAL (operands[2]));
+-  loongarch_emit_move (operands[0], gen_rtx_MEM (mode, addr));
+-  DONE;
+-})
+-
+-;; Offset store
+-(define_expand "lsx_st_"
+-  [(match_operand:LSX 0 "register_operand")
+-   (match_operand 1 "pmode_register_operand")
+-   (match_operand 2 "aq10_operand")]
+-  "ISA_HAS_LSX"
+-{
+-  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
+-			    INTVAL (operands[2]));
+-  loongarch_emit_move (gen_rtx_MEM (mode, addr), operands[0]);
+-  DONE;
+-})
+-
+ ;; Integer operations
+ (define_insn "add3"
+   [(set (match_operand:ILSX 0 "register_operand" "=f,f,f")
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 3698b9103..824a85b36 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -167,22 +167,6 @@
+   (and (match_code "const_int")
+        (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 3)")))
+ 
+-(define_predicate "aq10b_operand"
+-  (and (match_code "const_int")
+-       (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 0)")))
+-
+-(define_predicate "aq10h_operand"
+-  (and (match_code "const_int")
+-       (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 1)")))
+-
+-(define_predicate "aq10w_operand"
+-  (and (match_code "const_int")
+-       (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 2)")))
+-
+-(define_predicate "aq10d_operand"
+-  (and (match_code "const_int")
+-       (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 3)")))
+-
+ (define_predicate "aq12b_operand"
+   (and (match_code "const_int")
+        (match_test "loongarch_signed_immediate_p (INTVAL (op), 12, 0)")))
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-ld-st-imm12.c b/gcc/testsuite/gcc.target/loongarch/vect-ld-st-imm12.c
+new file mode 100644
+index 000000000..bfc208e4f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vect-ld-st-imm12.c
+@@ -0,0 +1,15 @@
++/* { dg-do compile } */
++/* { dg-options "-march=loongarch64 -mabi=lp64d -mlasx -O2" } */
++/* { dg-final { scan-assembler-not "addi.d" } } */
++
++extern short a[1000];
++extern short b[1000];
++extern short c[1000];
++
++void
++test (void)
++{
++  for (int i = 501; i < 517; i++)
++    ((int *)(c + 1))[i] = ((int *)(a + 1))[i] + ((int *)(b + 1))[i];
++}
++
+-- 
+2.43.0
+
diff --git a/0101-Add-hip11-CPU-pipeline-scheduling.patch b/0101-Add-hip11-CPU-pipeline-scheduling.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7b89890555fa6f0ccb95e7d657c0ba87ebb745d2
--- /dev/null
+++ b/0101-Add-hip11-CPU-pipeline-scheduling.patch
@@ -0,0 +1,755 @@
+From 824fccdab1d3c5e87fb88b31f0eeb7abd1b35c1f Mon Sep 17 00:00:00 2001
+From: XingYuShuai <1150775134@qq.com>
+Date: Mon, 26 Feb 2024 20:34:06 +0800
+Subject: [PATCH 002/157] Add hip11 CPU pipeline scheduling
+
+This patch adds an mcpu: hip11. It has been tested on aarch64
+and no regressions from this patch.
+---
+ gcc/config/aarch64/aarch64-cores.def     |   1 +
+ gcc/config/aarch64/aarch64-cost-tables.h | 104 ++++++
+ gcc/config/aarch64/aarch64-tune.md       |   2 +-
+ gcc/config/aarch64/aarch64.cc            | 108 ++++++
+ gcc/config/aarch64/aarch64.md            |   1 +
+ gcc/config/aarch64/hip11.md              | 418 +++++++++++++++++++++++
+ gcc/doc/invoke.texi                      |   2 +-
+ 7 files changed, 634 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/config/aarch64/hip11.md
+
+diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
+index a854bdb24..601b72abb 100644
+--- a/gcc/config/aarch64/aarch64-cores.def
++++ b/gcc/config/aarch64/aarch64-cores.def
+@@ -173,6 +173,7 @@ AARCH64_CORE("cortex-a710",  cortexa710, cortexa57, 9A,  AARCH64_FL_FOR_ARCH9 |
+ AARCH64_CORE("cortex-x2",  cortexx2, cortexa57, 9A,  AARCH64_FL_FOR_ARCH9 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_MEMTAG | AARCH64_FL_I8MM | AARCH64_FL_BF16, neoversen2, 0x41, 0xd48, -1)
+ 
+ AARCH64_CORE("neoverse-n2", neoversen2, cortexa57, 9A, AARCH64_FL_FOR_ARCH9 | AARCH64_FL_I8MM | AARCH64_FL_BF16 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_RNG | AARCH64_FL_MEMTAG | AARCH64_FL_PROFILE, neoversen2, 0x41, 0xd49, -1)
++AARCH64_CORE("hip11", hip11, hip11, 8_5A, AARCH64_FL_FOR_ARCH8_5| AARCH64_FL_SVE | AARCH64_FL_SVE2 | AARCH64_FL_F16, hip11, 0x48, 0xd22, -1)
+ 
+ AARCH64_CORE("demeter", demeter, cortexa57, 9A, AARCH64_FL_FOR_ARCH9 | AARCH64_FL_I8MM | AARCH64_FL_BF16 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_RNG | AARCH64_FL_MEMTAG | AARCH64_FL_PROFILE, neoversev2, 0x41, 0xd4f, -1)
+ AARCH64_CORE("neoverse-v2", neoversev2, cortexa57, 9A, AARCH64_FL_FOR_ARCH9 | AARCH64_FL_I8MM | AARCH64_FL_BF16 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_RNG | AARCH64_FL_MEMTAG | AARCH64_FL_PROFILE, neoversev2, 0x41, 0xd4f, -1)
+diff --git a/gcc/config/aarch64/aarch64-cost-tables.h b/gcc/config/aarch64/aarch64-cost-tables.h
+index fc5a3cbe4..0ee427b61 100644
+--- a/gcc/config/aarch64/aarch64-cost-tables.h
++++ b/gcc/config/aarch64/aarch64-cost-tables.h
+@@ -561,6 +561,110 @@ const struct cpu_cost_table tsv110_extra_costs =
+   }
+ };
+ 
++const struct cpu_cost_table hip11_extra_costs =
++{
++  /* ALU */
++  {
++    0,                 /* arith.  */
++    0,                 /* logical.  */
++    0,                 /* shift.  */
++    0,                 /* shift_reg.  */
++    COSTS_N_INSNS (1), /* arith_shift.  */
++    COSTS_N_INSNS (1), /* arith_shift_reg.  */
++    COSTS_N_INSNS (1), /* log_shift.  */
++    COSTS_N_INSNS (1), /* log_shift_reg.  */
++    0,                 /* extend.  */
++    COSTS_N_INSNS (1), /* extend_arith.  */
++    0,                 /* bfi.  */
++    0,                 /* bfx.  */
++    0,                 /* clz.  */
++    0,                 /* rev.  */
++    0,                 /* non_exec.  */
++    true               /* non_exec_costs_exec.  */
++  },
++
++  {
++    /* MULT SImode */
++    {
++      COSTS_N_INSNS (2),       /* simple.  */
++      COSTS_N_INSNS (2),       /* flag_setting.  */
++      COSTS_N_INSNS (2),       /* extend.  */
++      COSTS_N_INSNS (2),       /* add.  */
++      COSTS_N_INSNS (2),       /* extend_add.  */
++      COSTS_N_INSNS (11)       /* idiv.  */
++    },
++    /* MULT DImode */
++    {
++      COSTS_N_INSNS (3),       /* simple.  */
++      0,                       /* flag_setting (N/A).  */
++      COSTS_N_INSNS (3),       /* extend.  */
++      COSTS_N_INSNS (3),       /* add.  */
++      COSTS_N_INSNS (3),       /* extend_add.  */
++      COSTS_N_INSNS (19)       /* idiv.  */
++    }
++  },
++  /* LD/ST */
++  {
++    COSTS_N_INSNS (3),         /* load.  */
++    COSTS_N_INSNS (4),         /* load_sign_extend.  */
++    COSTS_N_INSNS (3),         /* ldrd.  */
++    COSTS_N_INSNS (3),         /* ldm_1st.  */
++    1,                         /* ldm_regs_per_insn_1st.  */
++    2,                         /* ldm_regs_per_insn_subsequent.  */
++    COSTS_N_INSNS (4),         /* loadf.  */
++    COSTS_N_INSNS (4),         /* loadd.  */
++    COSTS_N_INSNS (4),         /* load_unaligned.  */
++    0,                         /* store.  */
++    0,                         /* strd.  */
++    0,                         /* stm_1st.  */
++    1,                         /* stm_regs_per_insn_1st.  */
++    2,                         /* stm_regs_per_insn_subsequent.  */
++    0,                         /* storef.  */
++    0,                         /* stored.  */
++    COSTS_N_INSNS (1),         /* store_unaligned.  */
++    COSTS_N_INSNS (4),         /* loadv.  */
++    COSTS_N_INSNS (4)          /* storev.  */
++  },
++  {
++    /* FP SFmode */
++    {
++      COSTS_N_INSNS (10),      /* div.  */
++      COSTS_N_INSNS (4),       /* mult.  */
++      COSTS_N_INSNS (4),       /* mult_addsub.  */
++      COSTS_N_INSNS (4),       /* fma.  */
++      COSTS_N_INSNS (4),       /* addsub.  */
++      COSTS_N_INSNS (1),       /* fpconst.  */
++      COSTS_N_INSNS (1),       /* neg.  */
++      COSTS_N_INSNS (1),       /* compare.  */
++      COSTS_N_INSNS (2),       /* widen.  */
++      COSTS_N_INSNS (2),       /* narrow.  */
++      COSTS_N_INSNS (2),       /* toint.  */
++      COSTS_N_INSNS (1),       /* fromint.  */
++      COSTS_N_INSNS (2)        /* roundint.  */
++    },
++    /* FP DFmode */
++    {
++      COSTS_N_INSNS (17),      /* div.  */
++      COSTS_N_INSNS (4),       /* mult.  */
++      COSTS_N_INSNS (6),       /* mult_addsub.  */
++      COSTS_N_INSNS (6),       /* fma.  */
++      COSTS_N_INSNS (3),       /* addsub.  */
++      COSTS_N_INSNS (1),       /* fpconst.  */
++      COSTS_N_INSNS (1),       /* neg.  */
++      COSTS_N_INSNS (1),       /* compare.  */
++      COSTS_N_INSNS (2),       /* widen.  */
++      COSTS_N_INSNS (2),       /* narrow.  */
++      COSTS_N_INSNS (2),       /* toint.  */
++      COSTS_N_INSNS (1),       /* fromint.  */
++      COSTS_N_INSNS (2)        /* roundint.  */
++    }
++  },
++  /* Vector */
++  {
++    COSTS_N_INSNS (1)  /* alu.  */
++  }
++};
++
+ const struct cpu_cost_table a64fx_extra_costs =
+ {
+   /* ALU */
+diff --git a/gcc/config/aarch64/aarch64-tune.md b/gcc/config/aarch64/aarch64-tune.md
+index 238bb6e31..511422081 100644
+--- a/gcc/config/aarch64/aarch64-tune.md
++++ b/gcc/config/aarch64/aarch64-tune.md
+@@ -1,5 +1,5 @@
+ ;; -*- buffer-read-only: t -*-
+ ;; Generated automatically by gentune.sh from aarch64-cores.def
+ (define_attr "tune"
+-	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,hip09,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,demeter,neoversev2"
++	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,hip09,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,hip11,demeter,neoversev2"
+ 	(const (symbol_ref "((enum attr_tune) aarch64_tune)")))
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index e9b3980c4..7c62ddb2a 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -481,6 +481,22 @@ static const struct cpu_addrcost_table hip09_addrcost_table =
+   0, /* imm_offset  */
+ };
+ 
++static const struct cpu_addrcost_table hip11_addrcost_table =
++{
++    {
++      1, /* hi  */
++      0, /* si  */
++      0, /* di  */
++      1, /* ti  */
++    },
++  0, /* pre_modify  */
++  0, /* post_modify  */
++  0, /* register_offset  */
++  1, /* register_sextend  */
++  1, /* register_zextend  */
++  0, /* imm_offset  */
++};
++
+ static const struct cpu_addrcost_table qdf24xx_addrcost_table =
+ {
+     {
+@@ -666,6 +682,16 @@ static const struct cpu_regmove_cost tsv110_regmove_cost =
+   2  /* FP2FP  */
+ };
+ 
++static const struct cpu_regmove_cost hip11_regmove_cost =
++{
++  1, /* GP2GP  */
++  /* Avoid the use of slow int<->fp moves for spilling by setting
++     their cost higher than memmov_cost.  */
++  2, /* GP2FP  */
++  3, /* FP2GP  */
++  2  /* FP2FP  */
++};
++
+ static const struct cpu_regmove_cost a64fx_regmove_cost =
+ {
+   1, /* GP2GP  */
+@@ -1010,6 +1036,43 @@ static const struct cpu_vector_cost hip09_vector_cost =
+   nullptr /* issue_info  */
+ };
+ 
++static const advsimd_vec_cost hip11_advsimd_vector_cost =
++{
++  2, /* int_stmt_cost  */
++  2, /* fp_stmt_cost  */
++  0, /* ld2_st2_permute_cost  */
++  0, /* ld3_st3_permute_cost  */
++  0, /* ld4_st4_permute_cost  */
++  2, /* permute_cost  */
++  3, /* reduc_i8_cost  */
++  3, /* reduc_i16_cost  */
++  3, /* reduc_i32_cost  */
++  3, /* reduc_i64_cost  */
++  3, /* reduc_f16_cost  */
++  3, /* reduc_f32_cost  */
++  3, /* reduc_f64_cost  */
++  3, /* store_elt_extra_cost  */
++  5, /* vec_to_scalar_cost  */
++  5, /* scalar_to_vec_cost  */
++  5, /* align_load_cost  */
++  5, /* unalign_load_cost  */
++  1, /* unalign_store_cost  */
++  1  /* store_cost  */
++};
++
++static const struct cpu_vector_cost hip11_vector_cost =
++{
++  1, /* scalar_int_stmt_cost  */
++  1, /* scalar_fp_stmt_cost  */
++  5, /* scalar_load_cost  */
++  1, /* scalar_store_cost  */
++  1, /* cond_taken_branch_cost  */
++  1, /* cond_not_taken_branch_cost  */
++  &hip11_advsimd_vector_cost, /* advsimd  */
++  nullptr, /* sve  */
++  nullptr /* issue_info  */
++};
++
+ static const advsimd_vec_cost cortexa57_advsimd_vector_cost =
+ {
+   2, /* int_stmt_cost  */
+@@ -1368,6 +1431,17 @@ static const cpu_prefetch_tune hip09_prefetch_tune =
+   -1                    /* default_opt_level  */
+ };
+ 
++static const cpu_prefetch_tune hip11_prefetch_tune =
++{
++  0,                    /* num_slots  */
++  64,                   /* l1_cache_size  */
++  64,                   /* l1_cache_line_size  */
++  512,                  /* l2_cache_size  */
++  true,                 /* prefetch_dynamic_strides */
++  -1,                   /* minimum_stride */
++  -1                    /* default_opt_level  */
++};
++
+ static const cpu_prefetch_tune xgene1_prefetch_tune =
+ {
+   8,			/* num_slots  */
+@@ -1767,6 +1841,40 @@ static const struct tune_params hip09_tunings =
+   &hip09_prefetch_tune
+ };
+ 
++static const struct tune_params hip11_tunings =
++{
++  &hip11_extra_costs,
++  &hip11_addrcost_table,
++  &hip11_regmove_cost,
++  &hip11_vector_cost,
++  &generic_branch_cost,
++  &generic_approx_modes,
++  SVE_512, /* sve_width  */
++  { 4, /* load_int.  */
++    4, /* store_int.  */
++    4, /* load_fp.  */
++    4, /* store_fp.  */
++    4, /* load_pred.  */
++    4 /* store_pred.  */
++  }, /* memmov_cost.  */
++  4,    /* issue_rate  */
++  (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_ALU_BRANCH
++   | AARCH64_FUSE_ALU_CBZ), /* fusible_ops  */
++  "16", /* function_align.  */
++  "4",  /* jump_align.  */
++  "8",  /* loop_align.  */
++  2,    /* int_reassoc_width.  */
++  4,    /* fp_reassoc_width.  */
++  1,    /* vec_reassoc_width.  */
++  2,    /* min_div_recip_mul_sf.  */
++  2,    /* min_div_recip_mul_df.  */
++  0,    /* max_case_values.  */
++  tune_params::AUTOPREFETCHER_WEAK,     /* autoprefetcher_model.  */
++  (AARCH64_EXTRA_TUNE_USE_NEW_VECTOR_COSTS
++   | AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT),     /* tune_flags.  */
++  &hip11_prefetch_tune
++};
++
+ static const struct tune_params xgene1_tunings =
+ {
+   &xgene1_extra_costs,
+diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
+index cf699e4c7..c0c64a798 100644
+--- a/gcc/config/aarch64/aarch64.md
++++ b/gcc/config/aarch64/aarch64.md
+@@ -478,6 +478,7 @@
+ (include "tsv110.md")
+ (include "thunderx3t110.md")
+ (include "hip09.md")
++(include "hip11.md")
+ 
+ ;; -------------------------------------------------------------------
+ ;; Jumps and other miscellaneous insns
+diff --git a/gcc/config/aarch64/hip11.md b/gcc/config/aarch64/hip11.md
+new file mode 100644
+index 000000000..45f91e65b
+--- /dev/null
++++ b/gcc/config/aarch64/hip11.md
+@@ -0,0 +1,418 @@
++;; hip11 pipeline description
++;; Copyright (C) 2018-2024 Free Software Foundation, Inc.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful, but
++;; WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++;; General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; .
++
++(define_automaton "hip11")
++
++;; The hip11 core is modelled as issues pipeline that has
++;; the following functional units.
++;; 1.  Three pipelines for integer operations: ALU1, ALU2, ALU3
++
++(define_cpu_unit "hip11_alu1_issue" "hip11")
++(define_reservation "hip11_alu1" "hip11_alu1_issue")
++
++(define_cpu_unit "hip11_alu2_issue" "hip11")
++(define_reservation "hip11_alu2" "hip11_alu2_issue")
++
++(define_cpu_unit "hip11_alu3_issue" "hip11")
++(define_reservation "hip11_alu3" "hip11_alu3_issue")
++
++(define_reservation "hip11alu" "hip11_alu1|hip11_alu2|hip11_alu3")
++
++;; 2.  One pipeline for complex integer operations: MDU
++
++(define_cpu_unit "hip11_mdu_issue" "hip11")
++(define_reservation "hip11_mdu" "hip11_mdu_issue")
++
++;; 3.  Two asymmetric pipelines for Asimd and FP operations: FSU1, FSU2
++(define_automaton "hip11_fsu")
++
++(define_cpu_unit "hip11_fsu1_issue"
++		 "hip11_fsu")
++(define_cpu_unit "hip11_fsu2_issue"
++		 "hip11_fsu")
++
++(define_reservation "hip11_fsu1" "hip11_fsu1_issue")
++(define_reservation "hip11_fsu2" "hip11_fsu2_issue")
++(define_reservation "hip11_fsu_pipe" "hip11_fsu1|hip11_fsu2")
++
++;; 4.  Two pipeline for branch operations but same with alu2 and alu3: BRU1, BRU2
++
++;; 5.  Two pipelines for load and store operations: LS1, LS2.
++
++(define_cpu_unit "hip11_ls1_issue" "hip11")
++(define_cpu_unit "hip11_ls2_issue" "hip11")
++(define_reservation "hip11_ls1" "hip11_ls1_issue")
++(define_reservation "hip11_ls2" "hip11_ls2_issue")
++
++;; Block all issue queues.
++
++(define_reservation "hip11_block" "hip11_fsu1_issue + hip11_fsu2_issue
++				  + hip11_mdu_issue + hip11_alu1_issue
++				  + hip11_alu2_issue + hip11_alu3_issue + hip11_ls1_issue + hip11_ls2_issue")
++
++;; Branch execution Unit
++;;
++(define_insn_reservation "hip11_branch" 1
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "branch"))
++  "hip11_alu2|hip11_alu3")
++
++(define_insn_reservation "hip11_return_from_subroutine" 6
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "branch")
++       (eq_attr "sls_length" "retbr"))
++  "hip11_mdu,(hip11_alu2|hip11_alu3)")
++
++  ;; Simple Execution Unit:
++;;
++;; Simple ALU without shift
++(define_insn_reservation "hip11_alu" 1
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "alu_imm,logic_imm,\
++			alu_sreg,logic_reg,\
++			adc_imm,adc_reg,\
++			adr,bfm,clz,rbit,rev,\
++			shift_imm,shift_reg,\
++			mov_imm,mov_reg,\
++			mvn_imm,mvn_reg,\
++			mrs,multiple,csel,\
++            rotate_imm"))
++  "hip11_alu1|hip11_alu2|hip11_alu3")
++  
++(define_insn_reservation "hip11_alus" 1
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "alus_imm,logics_imm,\
++			alus_sreg,logics_reg,\
++			adcs_imm,adcs_reg"))
++  "hip11_alu2|hip11_alu3")
++
++;; ALU ops with shift
++(define_insn_reservation "hip11_alu_shift" 2
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "extend,\
++			alu_shift_imm_lsl_1to4,alu_shift_imm_other,alu_shift_reg,\
++			crc,logic_shift_imm,logic_shift_reg,\
++			mov_shift,mvn_shift,\
++			mov_shift_reg,mvn_shift_reg"))
++  "hip11_mdu")
++  
++(define_insn_reservation "hip11_alus_shift" 2
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "alus_shift_imm,alus_shift_reg,\
++			logics_shift_imm,logics_shift_reg"))
++  "hip11_alu2|hip11_alu3")
++
++;; Multiplies instructions
++(define_insn_reservation "hip11_mult" 3
++  (and (eq_attr "tune" "hip11")
++       (ior (eq_attr "mul32" "yes")
++	    (eq_attr "widen_mul64" "yes")))
++  "hip11_mdu")
++
++;; Integer divide
++(define_insn_reservation "hip11_div" 10
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "udiv,sdiv"))
++  "hip11_mdu")
++
++(define_insn_reservation "hip11_mla" 4
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "mla,smlal,umlal,smull,umull"))
++  "hip11_mdu")
++
++;; Block all issue pipes for a cycle
++(define_insn_reservation "hip11_block" 1
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "block"))
++  "hip11_block")
++
++;; Load-store execution Unit
++;;
++(define_insn_reservation "hip11_load1" 4
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "load_4,load_8,load_16"))
++  "hip11_ls1|hip11_ls2")
++
++(define_insn_reservation "hip11_fp_load" 5
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "f_loads,f_loadd"))
++  "hip11_ls1|hip11_ls2")
++
++(define_insn_reservation "hip11_neon_ld1_single" 7
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_load1_one_lane,neon_load1_one_lane_q,\
++       neon_load1_all_lanes,neon_load1_all_lanes_q"))
++  "(hip11_ls1|hip11_ls2)+hip11_fsu1")
++
++(define_insn_reservation "hip11_neon_ld1_1reg" 5
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_load1_1reg,neon_load1_1reg_q"))
++  "hip11_ls1|hip11_ls2")
++
++(define_insn_reservation "hip11_neon_ld1_2reg" 6
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_load1_2reg,neon_load1_2reg_q"))
++  "hip11_ls1|hip11_ls2")
++
++(define_insn_reservation "hip11_neon_ld1_3reg" 7
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_load1_3reg,neon_load1_3reg_q"))
++  "hip11_ls1|hip11_ls2")
++
++(define_insn_reservation "hip11_neon_ld1_4reg" 8
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_load1_4reg,neon_load1_4reg_q"))
++  "hip11_ls1|hip11_ls2")
++
++(define_insn_reservation "hip11_neon_ld2" 8
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_load2_one_lane,neon_load2_one_lane_q,\
++       neon_load2_all_lanes,neon_load2_all_lanes_q,\
++       neon_load2_2reg,neon_load2_2reg_q,\
++       neon_load2_4reg,neon_load2_4reg_q"))
++  "(hip11_ls1|hip11_ls2)+hip11_fsu1")
++
++(define_insn_reservation "hip11_neon_ld3_single" 9
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_load3_one_lane,neon_load3_one_lane_q,\
++       neon_load3_all_lanes,neon_load3_all_lanes_q"))
++  "(hip11_ls1|hip11_ls2)+hip11_fsu1")
++
++(define_insn_reservation "hip11_neon_ld3_multiple" 13
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_load3_3reg,neon_load3_3reg_q"))
++  "(hip11_ls1|hip11_ls2)+hip11_fsu1")
++
++(define_insn_reservation "hip11_neon_ld4_single" 10
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_load4_one_lane,neon_load4_one_lane_q,\
++       neon_load4_all_lanes,neon_load4_all_lanes_q"))
++  "(hip11_ls1|hip11_ls2)+hip11_fsu1")
++
++(define_insn_reservation "hip11_neon_ld4_multiple" 11
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_load4_4reg,neon_load4_4reg_q"))
++  "(hip11_ls1|hip11_ls2)+hip11_fsu1")
++
++;; Stores of up to two words.
++(define_insn_reservation "hip11_store1" 1
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "store_4,store_8,store_16,\
++       f_stored,f_stores"))
++  "hip11_ls1|hip11_ls2")
++
++;; Floating-Point Operations.
++(define_insn_reservation "hip11_fp_arith" 2
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "ffariths,ffarithd,f_minmaxs,\
++       f_minmaxd,fadds,faddd,neon_fcadd"))
++  "hip11_fsu_pipe")
++
++(define_insn_reservation "hip11_fp_mul" 3
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_fp_mul_d,neon_fp_mul_d_q,\
++       neon_fp_mul_s_scalar,neon_fp_mul_s_scalar_q,\
++       neon_fp_mul_d_scalar_q,fmuld,fmuls"))
++  "hip11_fsu_pipe")
++
++(define_insn_reservation "hip11_fp_cmp" 2
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "fccmpd,fccmps"))
++  "hip11alu,hip11_fsu_pipe")
++
++(define_insn_reservation "hip11_fp_csel" 2
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "fcsel"))
++  "hip11alu,hip11_fsu1")
++
++(define_insn_reservation "hip11_fp_fcmp" 1
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "fcmpd,fcmps"))
++  "hip11_fsu_pipe")
++
++(define_insn_reservation "hip11_fp_divs" 7
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "fdivs"))
++  "hip11_fsu1")
++
++(define_insn_reservation "hip11_fp_divd" 10
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "fdivd"))
++  "hip11_fsu1")
++
++(define_insn_reservation "hip11_fp_sqrts" 9
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "fsqrts"))
++  "hip11_fsu1")
++
++(define_insn_reservation "hip11_fp_sqrtd" 15
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "fsqrtd"))
++  "hip11_fsu1")
++
++(define_insn_reservation "hip11_fp_mac" 4
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "fmacs,ffmas,fmacd,ffmad"))
++  "hip11_fsu_pipe")
++
++(define_insn_reservation "hip11_fp_mov" 1
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "fmov,neon_dup,neon_dup_q,\
++       neon_from_gp,neon_from_gp_q,\
++       neon_ins,neon_ins_q,\
++       neon_to_gp,neon_to_gp_q,\
++       neon_move,neon_move_q,\
++       neon_rev,neon_rev_q,\
++       neon_permute,neon_permute_q,\
++       neon_shift_imm_narrow_q,\
++       neon_ext,neon_ext_q,\
++       neon_rbit,\
++       crypto_sha3,neon_tbl1,neon_tbl1_q,\
++       neon_tbl2_q,f_mcr,neon_tst,neon_tst_q,\
++       neon_move_narrow_q"))
++  "hip11_fsu1")
++
++;; ASIMD instructions
++(define_insn_reservation "hip11_asimd_simple_arithmetic" 2
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_abs,neon_abs_q,neon_neg,neon_neg_q,\
++       neon_abd,neon_abd_q,\
++       neon_add_long,neon_sub_long,neon_sub_widen,neon_add_widen,\
++       neon_add_halve_narrow_q,neon_sub_halve_narrow_q,\
++       neon_arith_acc,neon_arith_acc_q,\
++       neon_compare,neon_compare_q,\
++       neon_compare_zero,neon_compare_zero_q,\
++       neon_minmax,neon_minmax_q,\
++       neon_logic,neon_logic_q,\
++       neon_reduc_add,neon_reduc_add_q,\
++       neon_reduc_minmax,neon_reduc_minmax_q,\
++       neon_fp_to_int_s,neon_fp_to_int_s_q,\
++       neon_fp_to_int_d,neon_fp_to_int_d_q,\
++       neon_fp_cvt_widen_s,\
++       neon_fp_cvt_narrow_d_q,\
++       neon_cls,neon_cls_q,\
++       neon_cnt,neon_cnt_q,\
++       f_rints,f_rintd,f_cvtf2i,f_cvt,\
++       neon_tbl3,neon_fp_round_s,neon_fp_round_s_q,\
++       neon_fp_round_d,neon_fp_round_d_q,\
++       neon_int_to_fp_s,neon_fp_recpe_s,neon_fp_recpe_s_q,\
++       neon_fp_recpe_d,neon_fp_recpe_d_q,\
++       neon_fp_cvt_narrow_s_q,\
++       crypto_aese,crypto_aesmc,\
++       crypto_sha1_fast,crypto_sha1_xor,\
++       crypto_sha1_slow,\
++       crypto_sha256_fast,\
++       crypto_sha512,crypto_sm3,\
++       neon_qabs,neon_qabs_q,\
++       neon_qneg,neon_qneg_q,\
++       neon_qadd,neon_qadd_q,\
++       neon_qsub,neon_qsub_q,\
++       neon_add_halve,neon_add_halve_q,\
++       neon_sub_halve,neon_sub_halve_q,\
++       neon_fp_reduc_minmax_s,neon_fp_reduc_minmax_s_q,\
++       neon_fp_reduc_minmax_d,neon_fp_reduc_minmax_d_q,\
++       neon_fp_rsqrte_s,neon_fp_rsqrte_s_q,\
++       neon_fp_rsqrte_d,neon_fp_rsqrte_d_q"))
++  "hip11_fsu1")
++
++(define_insn_reservation "hip11_asimd_complex_arithmetic" 4
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_mul_b,neon_mul_b_q,\
++       neon_mul_h,neon_mul_h_q,\
++       neon_mul_s,neon_mul_s_q,\
++       neon_mla_b,neon_mla_b_q,\
++       neon_mla_h,neon_mla_h_q,\
++       neon_mla_s,\
++       neon_mla_h_scalar,neon_mla_h_scalar_q,\
++       neon_mla_s_scalar,neon_mla_s_scalar_q,\
++       neon_sat_mul_h_scalar,neon_sat_mul_h_scalar_q,\
++       neon_sat_mul_s_scalar,neon_sat_mul_s_scalar_q,\
++       neon_sat_mul_b,neon_sat_mul_b_q,\
++       neon_sat_mul_h,neon_sat_mul_h_q,\
++       neon_sat_mul_s,neon_sat_mul_s_q,\
++       neon_mla_b_long,neon_mla_h_long,neon_mla_s_long,\
++       neon_mul_b_long,neon_mul_h_long,neon_mul_s_long,\
++       neon_sat_mla_b_long,neon_sat_mla_h_long,neon_sat_mla_s_long,\
++       neon_sat_mla_h_scalar_long,neon_sat_mla_s_scalar_long,\
++       neon_sat_mul_b_long,neon_sat_mul_h_long,neon_sat_mul_s_long,\
++       neon_sat_mul_h_scalar_long,neon_sat_mul_s_scalar_long,\
++       crypto_pmull,\
++       neon_sat_shift_reg,neon_sat_shift_reg_q,\
++       neon_shift_reg,neon_shift_reg_q,\
++       neon_shift_imm,neon_shift_imm_q,\
++       neon_shift_imm_long,\
++       neon_sat_shift_imm,neon_sat_shift_imm_q,\
++       neon_sat_shift_imm_narrow_q,\
++       neon_shift_acc,neon_shift_acc_q,\
++       crypto_sha256_slow"))
++  "hip11_fsu1")
++
++(define_insn_reservation "hip11_asimd_fp_compare" 2
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_fp_abs_s,neon_fp_abs_s_q,\
++       neon_fp_abs_d,neon_fp_abs_d_q,\
++       neon_fp_neg_s,neon_fp_neg_s_q,\
++       neon_fp_neg_d,neon_fp_neg_d_q,\
++       neon_fp_compare_s,neon_fp_compare_s_q,\
++       neon_fp_compare_d,neon_fp_compare_d_q,\
++       neon_fp_minmax_s,neon_fp_minmax_s_q,\
++       neon_fp_minmax_d,neon_fp_minmax_d_q,\
++       neon_fp_addsub_s,neon_fp_addsub_s_q,\
++       neon_fp_addsub_d,neon_fp_addsub_d_q,\
++       neon_fp_reduc_add_s,neon_fp_reduc_add_s_q,\
++       neon_fp_reduc_add_d,neon_fp_reduc_add_d_q,\
++       neon_fp_abd_s,neon_fp_abd_s_q,\
++       neon_fp_abd_d,neon_fp_abd_d_q"))
++  "hip11_fsu_pipe")
++
++(define_insn_reservation "hip11_asimd_fdiv" 10
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_fp_div_s,neon_fp_div_s_q,\
++       neon_fp_div_d,neon_fp_div_d_q"))
++  "hip11_fsu1")
++
++(define_insn_reservation "hip11_asimd_fsqrt" 15
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_fp_sqrt_s,neon_fp_sqrt_s_q,\
++       neon_fp_sqrt_d,neon_fp_sqrt_d_q"))
++  "hip11_fsu1")
++
++(define_insn_reservation "hip11_asimd_fp_multiply_add" 4
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_fp_mla_s,neon_fp_mla_s_q,\
++       neon_fp_mla_d,neon_fp_mla_d_q,\
++       neon_fp_mla_s_scalar,neon_fp_mla_s_scalar_q,\
++       neon_fp_mul_s,neon_fp_mul_s_q,neon_fcmla,\
++       neon_fp_recps_s,neon_fp_recps_s_q,\
++       neon_fp_recps_d,neon_fp_recps_d_q,\
++       neon_fp_rsqrts_s,neon_fp_rsqrts_s_q,\
++       neon_fp_rsqrts_d,neon_fp_rsqrts_d_q"))
++  "hip11_fsu_pipe")
++
++(define_insn_reservation "hip11_asimd_frecpx" 3
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_fp_recpx_s,neon_fp_recpx_s_q,\
++       neon_fp_recpx_d,neon_fp_recpx_d_q,neon_tbl4,\
++       neon_dot,neon_dot_q"))
++  "hip11_fsu1")
++
++(define_insn_reservation "hip11_asimd_mmla" 6
++  (and (eq_attr "tune" "hip11")
++       (eq_attr "type" "neon_mla_s_q"))
++  "hip11_fsu1")
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 7ca60dd64..17d9e4126 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -19212,7 +19212,7 @@ performance of the code.  Permissible values for this option are:
+ @samp{octeontx2}, @samp{octeontx2t98}, @samp{octeontx2t96}
+ @samp{octeontx2t93}, @samp{octeontx2f95}, @samp{octeontx2f95n},
+ @samp{octeontx2f95mm},
+-@samp{a64fx},
++@samp{a64fx},@samp{hip11}
+ @samp{thunderx}, @samp{thunderxt88},
+ @samp{thunderxt88p1}, @samp{thunderxt81}, @samp{tsv110},
+ @samp{thunderxt83}, @samp{thunderx2t99}, @samp{thunderx3t110}, @samp{zeus},
+-- 
+2.33.0
+
diff --git a/0101-LoongArch-Improve-lasx_xvpermi_q_-LASX-mode-insn-pat.patch b/0101-LoongArch-Improve-lasx_xvpermi_q_-LASX-mode-insn-pat.patch
new file mode 100644
index 0000000000000000000000000000000000000000..94733f6fb023d7ebd89de3005b3914254be3028f
--- /dev/null
+++ b/0101-LoongArch-Improve-lasx_xvpermi_q_-LASX-mode-insn-pat.patch
@@ -0,0 +1,150 @@
+From f5355c67104cb5d150e1fd3b58807b2ad4e67b7c Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Fri, 5 Jan 2024 15:37:13 +0800
+Subject: [PATCH 101/188] LoongArch: Improve lasx_xvpermi_q_ insn
+ pattern
+
+For instruction xvpermi.q, unused bits in operands[3] need be set to 0 to avoid
+causing undefined behavior on LA464.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md: Set the unused bits in operand[3] to 0.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvpremi.c: Removed.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpermi_q.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  |  9 ++-
+ .../loongarch/vector/lasx/lasx-xvpermi_q.c    | 64 +++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvpremi.c      | 19 ------
+ 3 files changed, 72 insertions(+), 20 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpermi_q.c
+ delete mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 95c6bae20..b4aa8e261 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -635,6 +635,8 @@
+    (set_attr "mode" "")])
+ 
+ ;; xvpermi.q
++;; Unused bits in operands[3] need be set to 0 to avoid
++;; causing undefined behavior on LA464.
+ (define_insn "lasx_xvpermi_q_"
+   [(set (match_operand:LASX 0 "register_operand" "=f")
+ 	(unspec:LASX
+@@ -643,7 +645,12 @@
+ 	   (match_operand     3 "const_uimm8_operand")]
+ 	  UNSPEC_LASX_XVPERMI_Q))]
+   "ISA_HAS_LASX"
+-  "xvpermi.q\t%u0,%u2,%3"
++{
++  int mask = 0x33;
++  mask &= INTVAL (operands[3]);
++  operands[3] = GEN_INT (mask);
++  return "xvpermi.q\t%u0,%u2,%3";
++}
+   [(set_attr "type" "simd_splat")
+    (set_attr "mode" "")])
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpermi_q.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpermi_q.c
+new file mode 100644
+index 000000000..dbc29d2fb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpermi_q.c
+@@ -0,0 +1,64 @@
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long*)& __m256i_op0[3]) = 0x7fe37fe3001d001d;
++  *((unsigned long*)& __m256i_op0[2]) = 0x7fff7fff7fff0000;
++  *((unsigned long*)& __m256i_op0[1]) = 0x7fe37fe3001d001d;
++  *((unsigned long*)& __m256i_op0[0]) = 0x7fff7fff7fff0000;
++  *((unsigned long*)& __m256i_op1[3]) = 0x7575757575757575;
++  *((unsigned long*)& __m256i_op1[2]) = 0x7575757575757575;
++  *((unsigned long*)& __m256i_op1[1]) = 0x7575757575757575;
++  *((unsigned long*)& __m256i_op1[0]) = 0x7575757575757575;
++  *((unsigned long*)& __m256i_result[3]) = 0x7fe37fe3001d001d;
++  *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff0000;
++  *((unsigned long*)& __m256i_result[1]) = 0x7fe37fe3001d001d;
++  *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff0000;
++  __m256i_out = __lasx_xvpermi_q (__m256i_op0, __m256i_op1, 0x2a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long*)& __m256i_op0[2]) = 0x000000000019001c;
++  *((unsigned long*)& __m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m256i_op0[0]) = 0x000000000019001c;
++  *((unsigned long*)& __m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long*)& __m256i_op1[2]) = 0x00000000000001fe;
++  *((unsigned long*)& __m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m256i_op1[0]) = 0x00000000000001fe;
++  *((unsigned long*)& __m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long*)& __m256i_result[2]) = 0x000000000019001c;
++  *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m256i_result[0]) = 0x00000000000001fe;
++  __m256i_out = __lasx_xvpermi_q (__m256i_op0, __m256i_op1, 0xb9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long*)& __m256i_op0[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long*)& __m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long*)& __m256i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long*)& __m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m256i_op1[2]) = 0xffff0000ffff0000;
++  *((unsigned long*)& __m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m256i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long*)& __m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000;
++  *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff;
++  __m256i_out = __lasx_xvpermi_q (__m256i_op0, __m256i_op1, 0xca);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
+deleted file mode 100644
+index e9fc1d7d3..000000000
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
++++ /dev/null
+@@ -1,19 +0,0 @@
+-/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
+-#include "../simd_correctness_check.h"
+-#include 
+-
+-int
+-main ()
+-{
+-  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
+-  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
+-  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
+-
+-  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
+-  long int long_op0, long_op1, long_op2, lont_out, lont_result;
+-  long int long_int_out, long_int_result;
+-  unsigned int unsigned_int_out, unsigned_int_result;
+-  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
+-
+-  return 0;
+-}
+-- 
+2.43.0
+
diff --git a/0102-Add-Crc32-Optimization-in-Gzip-For-crc32-algorithm-i.patch b/0102-Add-Crc32-Optimization-in-Gzip-For-crc32-algorithm-i.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6fb0cef7ac11ea7483c88f6e86c2936dd0dac366
--- /dev/null
+++ b/0102-Add-Crc32-Optimization-in-Gzip-For-crc32-algorithm-i.patch
@@ -0,0 +1,2164 @@
+From 8fa9788ac64a9ea5dc92c61c8f2ec11075cd17ec Mon Sep 17 00:00:00 2001
+From: XingYushuai 
+Date: Thu, 15 Dec 2022 14:34:16 +0800
+Subject: [PATCH 003/157] Add Crc32 Optimization in Gzip For crc32 algorithm in
+ APBC int_gzip.
+
+Match crc32 lookup table algorithm. An example for crc32 lookup table
+elg: ```c do { c = crc_32_tab[((int)c ^ (*s++)) & 0xff] ^ (c >> 8); } while (--n);
+
+Usage: `gcc -O3 -march=armv8.1-a -floop-crc yourfile.c`
+Node: The cpu you use needs to support the crc32 instructions
+---
+ gcc/Makefile.in                               |    1 +
+ gcc/common.opt                                |    4 +
+ gcc/config/aarch64/aarch64-builtins.cc        |   30 +
+ gcc/config/aarch64/aarch64-protos.h           |    1 +
+ gcc/config/aarch64/aarch64.cc                 |   12 +
+ gcc/doc/invoke.texi                           |    6 +-
+ gcc/doc/tm.texi                               |    9 +
+ gcc/doc/tm.texi.in                            |    2 +
+ gcc/match.pd                                  |   23 +
+ gcc/passes.def                                |    1 +
+ gcc/target.def                                |   14 +
+ .../tree-ssa/loop-crc-loop-condition-fail.c   |   85 ++
+ .../tree-ssa/loop-crc-loop-form-fail-2.c      |   90 ++
+ .../gcc.dg/tree-ssa/loop-crc-loop-form-fail.c |  112 ++
+ .../gcc.dg/tree-ssa/loop-crc-sucess.c         |   83 +
+ .../tree-ssa/loop-crc-table-check-fail.c      |  114 ++
+ gcc/timevar.def                               |    1 +
+ gcc/tree-pass.h                               |    1 +
+ gcc/tree-ssa-loop-crc.cc                      | 1333 +++++++++++++++++
+ 19 files changed, 1921 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-condition-fail.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-form-fail-2.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-form-fail.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/loop-crc-sucess.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/loop-crc-table-check-fail.c
+ create mode 100644 gcc/tree-ssa-loop-crc.cc
+
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index 5cd838270..2b9f025dc 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -1649,6 +1649,7 @@ OBJS = \
+ 	tree-ssa-ifcombine.o \
+ 	tree-ssa-live.o \
+ 	tree-ssa-loop-ch.o \
++	tree-ssa-loop-crc.o \
+ 	tree-ssa-loop-im.o \
+ 	tree-ssa-loop-ivcanon.o \
+ 	tree-ssa-loop-ivopts.o \
+diff --git a/gcc/common.opt b/gcc/common.opt
+index b18f0b944..42fb2fc19 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1119,6 +1119,10 @@ fcrypto-accel-aes
+ Common Var(flag_crypto_accel_aes) Init(0) Optimization
+ Perform crypto acceleration AES pattern matching.
+ 
++floop-crc
++Common Var(flag_loop_crc) Optimization
++Do the loop crc conversion.
++
+ fauto-inc-dec
+ Common Var(flag_auto_inc_dec) Init(1) Optimization
+ Generate auto-inc/dec instructions.
+diff --git a/gcc/config/aarch64/aarch64-builtins.cc b/gcc/config/aarch64/aarch64-builtins.cc
+index 42276e7ca..3b952ef39 100644
+--- a/gcc/config/aarch64/aarch64-builtins.cc
++++ b/gcc/config/aarch64/aarch64-builtins.cc
+@@ -551,6 +551,12 @@ typedef struct
+ #define VAR1(T, N, MAP, FLAG, A) \
+   AARCH64_SIMD_BUILTIN_##T##_##N##A,
+ 
++enum aarch64_crc_builtins{
++  AARCH64_BUILTIN_CRC32B,
++  AARCH64_BUILTIN_CRC32H,
++  AARCH64_BUILTIN_CRC32W,
++};
++
+ enum aarch64_builtins
+ {
+   AARCH64_BUILTIN_MIN,
+@@ -1812,6 +1818,30 @@ aarch64_general_builtin_decl (unsigned code, bool)
+   return aarch64_builtin_decls[code];
+ }
+ 
++/* Implement TARGET_GET_CRC_BUILTIN_CODE  */
++unsigned 
++get_crc_builtin_code(unsigned code, bool)
++{
++  if (code > AARCH64_BUILTIN_CRC32W)
++    return AARCH64_BUILTIN_MIN;
++
++  unsigned res = AARCH64_BUILTIN_MIN;
++  switch (code) {
++    case AARCH64_BUILTIN_CRC32B:
++      res = AARCH64_BUILTIN_crc32b;
++      break;
++    case AARCH64_BUILTIN_CRC32H:
++      res = AARCH64_BUILTIN_crc32h;
++      break;
++    case AARCH64_BUILTIN_CRC32W:
++      res = AARCH64_BUILTIN_crc32w;
++      break;
++    default:
++      break;
++  }
++  return res;
++}
++
+ typedef enum
+ {
+   SIMD_ARG_COPY_TO_REG,
+diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
+index 475d174dd..853197ee9 100644
+--- a/gcc/config/aarch64/aarch64-protos.h
++++ b/gcc/config/aarch64/aarch64-protos.h
+@@ -994,6 +994,7 @@ gimple *aarch64_general_gimple_fold_builtin (unsigned int, gcall *,
+ 					     gimple_stmt_iterator *);
+ rtx aarch64_general_expand_builtin (unsigned int, tree, rtx, int);
+ tree aarch64_general_builtin_decl (unsigned, bool);
++unsigned  get_crc_builtin_code(unsigned , bool);
+ tree aarch64_general_builtin_rsqrt (unsigned int);
+ tree aarch64_builtin_vectorized_function (unsigned int, tree, tree);
+ void handle_arm_acle_h (void);
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 5537a537c..280e0b618 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -15210,6 +15210,15 @@ aarch64_builtin_decl (unsigned int code, bool initialize_p)
+   gcc_unreachable ();
+ }
+ 
++/* Implement TARGET_GET_CRC_BUILTIN_CODE.  */
++static unsigned 
++aarch64_get_crc_builtin_code(unsigned code, bool initialize_p)
++{
++  unsigned subcode = get_crc_builtin_code(code,initialize_p);
++  unsigned res = subcode << AARCH64_BUILTIN_SHIFT;
++  return res;
++}
++
+ /* Return true if it is safe and beneficial to use the approximate rsqrt optabs
+    to optimize 1.0/sqrt.  */
+ 
+@@ -27677,6 +27686,9 @@ aarch64_get_v16qi_mode ()
+ #undef TARGET_BUILTIN_DECL
+ #define TARGET_BUILTIN_DECL aarch64_builtin_decl
+ 
++#undef TARGET_GET_CRC_BUILTIN_CODE
++#define TARGET_GET_CRC_BUILTIN_CODE aarch64_get_crc_builtin_code
++
+ #undef TARGET_BUILTIN_RECIPROCAL
+ #define TARGET_BUILTIN_RECIPROCAL aarch64_builtin_reciprocal
+ 
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 7ca60dd64..c3ce148b0 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -537,7 +537,7 @@ Objective-C and Objective-C++ Dialects}.
+ -fisolate-erroneous-paths-dereference  -fisolate-erroneous-paths-attribute @gol
+ -fivopts  -fkeep-inline-functions  -fkeep-static-functions @gol
+ -fkeep-static-consts  -flimit-function-alignment  -flive-range-shrinkage @gol
+--floop-block  -floop-interchange  -floop-strip-mine @gol
++-floop-block  -floop-crc  -floop-interchange  -floop-strip-mine @gol
+ -floop-unroll-and-jam  -floop-nest-optimize @gol
+ -floop-parallelize-all  -flra-remat  -flto  -flto-compression-level @gol
+ -flto-partition=@var{alg}  -fmerge-all-constants @gol
+@@ -12159,6 +12159,10 @@ GIMPLE -> GRAPHITE -> GIMPLE transformation.  Some minimal optimizations
+ are also performed by the code generator isl, like index splitting and
+ dead code elimination in loops.
+ 
++@item -floop-crc 
++@opindex floop-crc
++Do the loop crc conversion
++
+ @item -floop-nest-optimize
+ @opindex floop-nest-optimize
+ Enable the isl based loop nest optimizer.  This is a generic loop nest
+diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
+index 851d31c18..5a1e0fe43 100644
+--- a/gcc/doc/tm.texi
++++ b/gcc/doc/tm.texi
+@@ -11658,6 +11658,15 @@ If @var{code} is out of range the function should return
+ @code{error_mark_node}.
+ @end deftypefn
+ 
++@deftypefn {Target Hook} unsigned TARGET_GET_CRC_BUILTIN_CODE (unsigned @var{code}, bool @var{initialize_p})
++Define this hook to get crc32 builtin code.  It should be a function that
++returns the crc32 builtin function code @var{code}.
++If there is no such builtin and it cannot be initialized at this time
++if @var{initialize_p} is true the function should return @code{NULL_TREE}.
++If @var{code} is out of range the function should return
++@code{error_mark_node}.
++@end deftypefn
++
+ @deftypefn {Target Hook} rtx TARGET_EXPAND_BUILTIN (tree @var{exp}, rtx @var{target}, rtx @var{subtarget}, machine_mode @var{mode}, int @var{ignore})
+ 
+ Expand a call to a machine specific built-in function that was set up by
+diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
+index ac95cdf7a..6ff0eff66 100644
+--- a/gcc/doc/tm.texi.in
++++ b/gcc/doc/tm.texi.in
+@@ -7704,6 +7704,8 @@ to by @var{ce_info}.
+ 
+ @hook TARGET_BUILTIN_DECL
+ 
++@hook TARGET_GET_CRC_BUILTIN_CODE
++
+ @hook TARGET_EXPAND_BUILTIN
+ 
+ @hook TARGET_RESOLVE_OVERLOADED_BUILTIN
+diff --git a/gcc/match.pd b/gcc/match.pd
+index aee58e47b..1f42090a2 100644
+--- a/gcc/match.pd
++++ b/gcc/match.pd
+@@ -4409,6 +4409,29 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+ )
+ #endif
+ 
++#if GIMPLE
++/* Try to match
++     _4 = (int) _3;      NOP_EXPR (SSA_NAME @2)
++     _5 = _4 ^ c_10;     BIT_XOR_EXPR (SSA_NAME@1, SSA_NAME)
++     _6 = _5 & 255;      BIT_AND_EXPR (SSA_NAME, INTEGER_CST@3)
++*/
++(match (crc_match_index @1 @2 @3)
++   (bit_and (bit_xor (nop SSA_NAME@2) SSA_NAME@1) INTEGER_CST@3)
++   (if (INTEGRAL_TYPE_P (type) && tree_to_uhwi(@3) == 255))
++)
++#endif
++
++#if GIMPLE
++/* Try to match
++     _8 = c_12 >> 8;      RSHIFT_EXPR (SSA_NAME @1, INTEGER_CST @2)
++     c_19 = _7 ^ _8;      BIT_XOR_EXPR (SSA_NAME@3, SSA_NAME)
++*/
++(match (crc_match_res @1 @2 @3)
++   (bit_xor SSA_NAME@3  (rshift SSA_NAME@1 INTEGER_CST@2))
++   (if (INTEGRAL_TYPE_P (type) && tree_to_uhwi(@2) == 8))
++)
++#endif
++
+ /* Simplification moved from fold_cond_expr_with_comparison.  It may also
+    be extended.  */
+ /* This pattern implements two kinds simplification:
+diff --git a/gcc/passes.def b/gcc/passes.def
+index cdc600298..89d6889e5 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -95,6 +95,7 @@ along with GCC; see the file COPYING3.  If not see
+ 	  NEXT_PASS (pass_cd_dce, false /* update_address_taken_p */);
+ 	  NEXT_PASS (pass_phiopt, true /* early_p */);
+ 	  NEXT_PASS (pass_array_widen_compare);
++	  NEXT_PASS (pass_loop_crc);
+ 	  NEXT_PASS (pass_tail_recursion);
+ 	  NEXT_PASS (pass_if_to_switch);
+ 	  NEXT_PASS (pass_convert_switch);
+diff --git a/gcc/target.def b/gcc/target.def
+index c9bb2b4c2..8abf49f0a 100644
+--- a/gcc/target.def
++++ b/gcc/target.def
+@@ -2413,6 +2413,20 @@ If @var{code} is out of range the function should return\n\
+ @code{error_mark_node}.",
+  tree, (unsigned code, bool initialize_p), NULL)
+ 
++/* Initialize (if INITIALIZE_P is true) and return the real code of
++   target-specific built-in function.
++   Return NULL if that is not possible.  Return error_mark_node if CODE
++   is outside of the range of valid crc32 codes.  */
++DEFHOOK
++(get_crc_builtin_code,
++ "Define this hook to get crc32 builtin code.  It should be a function that\n\
++returns the crc32 builtin function code @var{code}.\n\
++If there is no such builtin and it cannot be initialized at this time\n\
++if @var{initialize_p} is true the function should return @code{NULL_TREE}.\n\
++If @var{code} is out of range the function should return\n\
++@code{error_mark_node}.",
++ unsigned , (unsigned code, bool initialize_p), NULL)
++
+ /* Expand a target-specific builtin.  */
+ DEFHOOK
+ (expand_builtin,
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-condition-fail.c b/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-condition-fail.c
+new file mode 100644
+index 000000000..3620e92f7
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-condition-fail.c
+@@ -0,0 +1,85 @@
++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */
++/* { dg-options "-O3 -march=armv8.1-a -mabi=lp64 -floop-crc -fdump-tree-loop_crc-details" } */
++
++#include 
++#include 
++typedef unsigned long ulg;
++typedef unsigned char uch;
++
++static const ulg crc_32_tab[] = {
++  0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
++  0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
++  0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
++  0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
++  0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
++  0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
++  0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
++  0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
++  0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
++  0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
++  0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
++  0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
++  0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
++  0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
++  0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
++  0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
++  0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
++  0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
++  0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
++  0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
++  0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
++  0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
++  0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
++  0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
++  0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
++  0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
++  0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
++  0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
++  0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
++  0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
++  0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
++  0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
++  0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
++  0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
++  0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
++  0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
++  0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
++  0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
++  0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
++  0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
++  0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
++  0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
++  0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
++  0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
++  0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
++  0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
++  0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
++  0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
++  0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
++  0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
++  0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
++  0x2d02ef8dL
++};
++
++ulg updcrc (s, n)
++    uch *s;                 /* pointer to bytes to pump through */
++    unsigned n;             /* number of bytes in s[] */
++{
++    register ulg c;         /* temporary variable */
++
++    static ulg crc = (ulg)0xffffffffL; /* shift register contents */
++
++    if (s == NULL) {
++        c = 0xffffffffL;
++    } else {
++        c = crc;
++        if (n) 
++        if (n) do {
++            c = crc_32_tab[((int)c ^ (*s++)) & 0xff] ^ (c >> 8);
++        } while (--n || c != 0) ;
++    }
++    crc = c;
++exit1:
++    return c ^ 0xffffffffL;       /* (instead of ~c for 64-bit machines) */
++}
++/* { dg-final { scan-tree-dump-times "Wrong loop form for crc matching." 1 "loop_crc"} } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-form-fail-2.c b/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-form-fail-2.c
+new file mode 100644
+index 000000000..fac759c67
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-form-fail-2.c
+@@ -0,0 +1,90 @@
++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */
++/* { dg-options "-O3 -march=armv8.1-a -mabi=lp64 -floop-crc -fdump-tree-loop_crc-details" } */
++
++#include 
++#include 
++typedef unsigned long ulg;
++typedef unsigned char uch;
++
++static const ulg crc_32_tab[] = {
++  0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
++  0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
++  0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
++  0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
++  0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
++  0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
++  0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
++  0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
++  0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
++  0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
++  0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
++  0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
++  0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
++  0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
++  0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
++  0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
++  0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
++  0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
++  0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
++  0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
++  0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
++  0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
++  0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
++  0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
++  0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
++  0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
++  0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
++  0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
++  0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
++  0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
++  0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
++  0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
++  0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
++  0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
++  0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
++  0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
++  0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
++  0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
++  0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
++  0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
++  0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
++  0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
++  0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
++  0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
++  0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
++  0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
++  0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
++  0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
++  0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
++  0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
++  0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
++  0x2d02ef8dL
++};
++int test[5] = {0};
++
++ulg updcrc (s, n)
++    uch *s;                 /* pointer to bytes to pump through */
++    unsigned n;             /* number of bytes in s[] */
++{
++    register ulg c;         /* temporary variable */
++
++    static ulg crc = (ulg)0xffffffffL; /* shift register contents */
++
++    if (s == NULL) {
++        c = 0xffffffffL;
++    } else {
++        c = crc;
++        if (n) 
++        if (n) do {
++            c = crc_32_tab[((int)c ^ (*s++)) & 0xff] ^ (c >> 8);
++        } while (--n) ;
++    }
++    do {
++        c = crc_32_tab[(c ^ (*s++)) & 0xff] ^ (c >> 8);
++        test[c%5] = c;
++    } while (--n) ;
++    crc = c;
++    return c ^ 0xffffffffL;       /* (instead of ~c for 64-bit machines) */
++}
++/* { dg-final { scan-tree-dump-times "Table check fail. not only single array is read." 1 "loop_crc"} } */
++/* { dg-final { scan-tree-dump-times "Wrong crc table for crc matching." 1 "loop_crc"} } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-form-fail.c b/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-form-fail.c
+new file mode 100644
+index 000000000..ba9e5bb95
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-loop-form-fail.c
+@@ -0,0 +1,112 @@
++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */
++/* { dg-options "-O3 -march=armv8.1-a -mabi=lp64 -floop-crc -fdump-tree-loop_crc-details" } */
++
++#include 
++#include 
++typedef unsigned long ulg;
++typedef unsigned char uch;
++
++static const ulg crc_32_tab[] = {
++  0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
++  0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
++  0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
++  0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
++  0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
++  0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
++  0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
++  0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
++  0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
++  0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
++  0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
++  0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
++  0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
++  0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
++  0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
++  0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
++  0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
++  0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
++  0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
++  0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
++  0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
++  0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
++  0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
++  0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
++  0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
++  0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
++  0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
++  0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
++  0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
++  0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
++  0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
++  0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
++  0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
++  0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
++  0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
++  0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
++  0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
++  0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
++  0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
++  0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
++  0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
++  0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
++  0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
++  0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
++  0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
++  0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
++  0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
++  0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
++  0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
++  0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
++  0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
++  0x2d02ef8dL
++};
++
++/* check when the loop have a innor loop, should fail.  */
++ulg updcrc (s, n)
++    uch *s;                 /* pointer to bytes to pump through */
++    unsigned n;             /* number of bytes in s[] */
++{
++    register ulg c;         /* temporary variable */
++
++    static ulg crc = (ulg)0xffffffffL; /* shift register contents */
++
++    if (s == NULL) {
++        c = 0xffffffffL;
++    } else {
++        c = crc;
++        if (n) 
++        do {
++            c = crc_32_tab[(c ^ (*s++)) & 0xff] ^ (c >> 8);
++            for (int i = 0; i < 5; i++) {
++                c++;
++            }
++            
++        } while (--n);
++    }
++    crc = c;
++    return c ^ 0xffffffffL;       /* (instead of ~c for 64-bit machines) */
++}
++
++/* check when the loop have a second backedge, should fail.  */
++ulg updcrc1(s, n)
++    uch *s;                 /* pointer to bytes to pump through */
++    unsigned n;             /* number of bytes in s[] */
++{
++    register ulg c;         /* temporary variable */
++
++    static ulg crc = (ulg)0xffffffffL; /* shift register contents */
++
++    if (s == NULL) {
++        c = 0xffffffffL;
++    } else {
++        c = crc;
++        if (n) 
++        do {
++            c = crc_32_tab[(c ^ (*s++)) & 0xff] ^ (c >> 8);     
++        } while (--n || c != 0) ;
++    }
++    crc = c;
++    return c ^ 0xffffffffL;       /* (instead of ~c for 64-bit machines) */
++}
++/* { dg-final { scan-tree-dump-times "Wrong crc table for crc matching." 1 "loop_crc"} } */
++/* { dg-final { scan-tree-dump-times "Table check fail. not only single array is read." 1 "loop_crc"} } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-sucess.c b/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-sucess.c
+new file mode 100644
+index 000000000..dad7bdbfc
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-sucess.c
+@@ -0,0 +1,83 @@
++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */
++/* { dg-options "-O3 -march=armv8.1-a -mabi=lp64 -floop-crc -fdump-tree-loop_crc-details" } */
++
++#include 
++#include 
++typedef unsigned long ulg;
++typedef unsigned char uch;
++
++static const ulg crc_32_tab[] = {
++  0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
++  0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
++  0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
++  0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
++  0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
++  0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
++  0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
++  0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
++  0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
++  0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
++  0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
++  0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
++  0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
++  0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
++  0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
++  0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
++  0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
++  0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
++  0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
++  0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
++  0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
++  0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
++  0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
++  0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
++  0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
++  0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
++  0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
++  0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
++  0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
++  0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
++  0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
++  0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
++  0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
++  0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
++  0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
++  0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
++  0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
++  0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
++  0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
++  0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
++  0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
++  0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
++  0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
++  0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
++  0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
++  0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
++  0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
++  0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
++  0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
++  0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
++  0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
++  0x2d02ef8dL
++};
++
++ulg updcrc (s, n)
++    uch *s;                 /* pointer to bytes to pump through */
++    unsigned n;             /* number of bytes in s[] */
++{
++    register ulg c;         /* temporary variable */
++
++    static ulg crc = (ulg)0xffffffffL; /* shift register contents */
++
++    if (s == NULL) {
++        c = 0xffffffffL;
++    } else {
++        c = crc;
++        if (n) do {
++            c = crc_32_tab[((int)c ^ (*s++)) & 0xff] ^ (c >> 8);
++        } while (--n);
++    }
++    crc = c;
++    return c ^ 0xffffffffL;       /* (instead of ~c for 64-bit machines) */
++}
++/* { dg-final { scan-tree-dump-times "The 1th loop form is success matched,and the loop can be optimized." 1 "loop_crc"} } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-table-check-fail.c b/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-table-check-fail.c
+new file mode 100644
+index 000000000..523a7740c
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/loop-crc-table-check-fail.c
+@@ -0,0 +1,114 @@
++/* { dg-do compile { target {{ aarch64*-*-linux* } && lp64 } } } */
++/* { dg-options "-O3 -march=armv8.1-a -mabi=lp64 -floop-crc -fdump-tree-loop_crc-details" } */
++
++#include 
++#include 
++typedef unsigned long ulg;
++typedef unsigned char uch;
++
++static const ulg crc_32_tab[] = {
++  0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
++  0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
++  0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
++  0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
++  0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
++  0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
++  0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
++  0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
++  0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
++  0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
++  0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
++  0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
++  0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
++  0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
++  0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
++  0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
++  0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
++  0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
++  0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
++  0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
++  0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
++  0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
++  0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
++  0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
++  0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
++  0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
++  0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
++  0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
++  0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
++  0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
++  0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
++  0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
++  0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
++  0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
++  0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
++  0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
++  0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
++  0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
++  0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
++  0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
++  0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
++  0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
++  0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
++  0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
++  0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
++  0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
++  0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
++  0x37d83bf1L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
++  0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
++  0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
++  0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
++  0x2d02ef8dL
++};
++int test[5] = {0};
++
++/* check when the loop is doing more then 1 array read or writing an array, both should fail.  */
++ulg updcrc (s, n)
++    uch *s;                 /* pointer to bytes to pump through */
++    unsigned n;             /* number of bytes in s[] */
++{
++    register ulg c;         /* temporary variable */
++
++    static ulg crc = (ulg)0xffffffffL; /* shift register contents */
++
++    if (s == NULL) {
++        c = 0xffffffffL;
++    } else {
++        c = crc;
++        if (n) 
++        do {
++            c = crc_32_tab[(c ^ (*s++)) & 0xff] ^ (c >> 8) * test[c%5];
++        } while (--n) ;
++    }
++    do {
++        c = crc_32_tab[(c ^ (*s++)) & 0xff] ^ (c >> 8);
++        test[c%5] = c;
++    } while (--n) ;
++    crc = c;
++    return c ^ 0xffffffffL;       /* (instead of ~c for 64-bit machines) */
++}
++
++/* check when the loop is not working on a correct crc_table. should fail.  */
++ulg updcrc1(s, n)
++    uch *s;                 /* pointer to bytes to pump through */
++    unsigned n;             /* number of bytes in s[] */
++{
++    register ulg c;         /* temporary variable */
++
++    static ulg crc = (ulg)0xffffffffL; /* shift register contents */
++
++    if (s == NULL) {
++        c = 0xffffffffL;
++    } else {
++        c = crc;
++        if (n) 
++        do {
++            c = crc_32_tab[(c ^ (*s++)) & 0xff] ^ (c >> 8);
++        } while (--n) ;
++    }
++    crc = c;
++    return c ^ 0xffffffffL;       /* (instead of ~c for 64-bit machines) */
++}
++/* { dg-final { scan-tree-dump-times "Table check fail. not only single array is read." 2 "loop_crc"} } */
++/* { dg-final { scan-tree-dump-times "Wrong crc table for crc matching." 3 "loop_crc"} } */
++/* { dg-final { scan-tree-dump-times "Table check fail.  Table not matching." 1 "loop_crc"} } */
+diff --git a/gcc/timevar.def b/gcc/timevar.def
+index 8e7510eb3..8341b9ffd 100644
+--- a/gcc/timevar.def
++++ b/gcc/timevar.def
+@@ -220,6 +220,7 @@ DEFTIMEVAR (TV_TREE_COPY_RENAME	     , "tree rename SSA copies")
+ DEFTIMEVAR (TV_TREE_SSA_VERIFY       , "tree SSA verifier")
+ DEFTIMEVAR (TV_TREE_STMT_VERIFY      , "tree STMT verifier")
+ DEFTIMEVAR (TV_TREE_ARRAY_WIDEN_COMPARE, "tree array widen compare")
++DEFTIMEVAR (TV_TREE_LOOP_CRC         , "tree loop crc")
+ DEFTIMEVAR (TV_TREE_SWITCH_CONVERSION, "tree switch conversion")
+ DEFTIMEVAR (TV_TREE_SWITCH_LOWERING,   "tree switch lowering")
+ DEFTIMEVAR (TV_TREE_RECIP            , "gimple CSE reciprocals")
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index 34e60bc38..6cd679e10 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -454,6 +454,7 @@ extern gimple_opt_pass *make_pass_phiopt (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_forwprop (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_phiprop (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_array_widen_compare (gcc::context *ctxt);
++extern gimple_opt_pass *make_pass_loop_crc (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_tree_ifcombine (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_dse (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_nrv (gcc::context *ctxt);
+diff --git a/gcc/tree-ssa-loop-crc.cc b/gcc/tree-ssa-loop-crc.cc
+new file mode 100644
+index 000000000..b9c2f71ca
+--- /dev/null
++++ b/gcc/tree-ssa-loop-crc.cc
+@@ -0,0 +1,1333 @@
++/* This pass converts special loops where do CRC algorithms to
++   simple CRC instructions in AArch64.
++   Copyright (C) 2023-2023 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it
++under the terms of the GNU General Public License as published by the
++Free Software Foundation; either version 3, or (at your option) any
++later version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT
++ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "backend.h"
++#include "target.h"
++#include "tree.h"
++#include "gimple.h"
++#include "tree-pass.h"
++#include "gimple-ssa.h"
++#include "tree-pretty-print.h"
++#include "fold-const.h"
++#include "gimplify.h"
++#include "gimple-iterator.h"
++#include "tree-ssa-loop-manip.h"
++#include "tree-ssa-loop.h"
++#include "ssa.h"
++#include "tree-into-ssa.h"
++#include "cfganal.h"
++#include "cfgloop.h"
++#include "gimple-pretty-print.h"
++#include "tree-cfg.h"
++#include "cgraph.h"
++#include "print-tree.h"
++#include "cfghooks.h"
++#include "gimple-fold.h"
++#include "diagnostic-core.h"
++
++/* This pass handles scenarios similar to the following:
++ulg updcrc (s, n)
++    uch *s;
++    unsigned n;
++{
++    register ulg c;
++
++    static ulg crc = (ulg)0xffffffffL; 
++
++    if (s == NULL)
++    {
++        c = 0xffffffffL;
++    }
++    else
++    {
++        c = crc;
++        if (n)
++        do
++        {
++            c = crc_32_tab[((int)c ^ (*s++)) & 0xff] ^ (c >> 8);
++        } while (--n);
++    }
++    crc = c;
++    return c ^ 0xffffffffL;
++}
++
++If the hardware supports the crc instruction, then the pass completes the
++conversion of the above scenario into:
++
++#define SIZE_U32 sizeof(uint32_t)
++unsigned long updcrc(s, n)
++    unsigned char *s;
++    unsigned n;
++{
++  register unsigned long c;
++
++  static unsigned long crc = (unsigned long)0xffffffffL;
++
++  if (s == NULL)
++  {
++    c = 0xffffffffL;
++  }
++  else
++  {
++    c = crc;
++    if (n)
++    {
++      uint32_t nn = n/SIZE_U32;
++      do
++      {
++        c = __crc32w (c,*((uint32_t *)s));
++        s += SIZE_U32;
++      } while(--nn);
++    }
++  }
++  if (n & sizeof (uint16_t))
++  {
++    c = __crc32h (c, *((uint16_t *)s));
++    s += sizeof (uint16_t);
++  }
++  if (n & sizeof (uint8_t))
++    c = __crc32b (c, *s);
++  crc = c;
++  return c ^ 0xffffffffL;
++}
++
++This pass is to complete the conversion of such scenarios from
++the internal perspective of the compiler:
++1) match_crc_loop: The function completes the screening of such
++   scenarios;
++2) convert_to_new_loop: The function completes the conversion of
++   origin_loop to new loops, and removes origin_loop;
++3) origin_loop_info: The structure is used to record important
++   information of origin_loop: such as loop exit, initial value
++   of induction variable, etc;
++4) create_new_loops: The function is used as the key content
++   of the pass to complete the creation of new loops.  */
++
++extern bool gimple_crc_match_index (tree, tree *, tree (*)(tree));
++extern bool gimple_crc_match_res (tree, tree *, tree (*)(tree));
++
++static gimple *crc_table_read_stmt = NULL;
++
++static gphi *phi_s = NULL;
++static gphi *phi_c = NULL;
++static tree nn_tree = NULL;
++
++enum aarch64_crc_builtins
++{
++  AARCH64_BUILTIN_CRC32B,
++  AARCH64_BUILTIN_CRC32H,
++  AARCH64_BUILTIN_CRC32W,
++};
++
++/* The useful information of origin loop.  */
++struct origin_loop_info
++{
++  tree limit;       /* The limit index of the array in the old loop.  */
++  tree base_n;      /* The initial value of the old loop.  */
++  tree base_s;      /* The initial value of the old loop.  */
++  tree base_c;      /* The initial value of the old loop.  */
++  edge entry_edge;  /* The edge into the old loop.  */
++  edge exit_edge;   /* The edge outto the old loop.  */
++  basic_block exit_bb;
++};
++
++typedef struct origin_loop_info origin_loop_info;
++
++static origin_loop_info origin_loop;
++hash_map  n_map;
++hash_map  nn_map;
++hash_map  s_map;
++hash_map  c_map;
++hash_map  crc_map;
++
++/* Initialize the origin_loop structure.  */
++static void
++init_origin_loop_structure ()
++{
++  origin_loop.entry_edge = NULL;
++  origin_loop.exit_edge = NULL;
++  origin_loop.exit_bb = NULL;
++  origin_loop.limit = NULL;
++  origin_loop.base_n = NULL;
++  origin_loop.base_s = NULL;
++  origin_loop.base_c = NULL;
++}
++
++/* Get the edge that first entered the loop.  */
++static edge
++get_loop_preheader_edge (class loop *loop)
++{
++  edge e;
++  edge_iterator ei;
++
++  FOR_EACH_EDGE (e, ei, loop->header->preds)
++    if (e->src != loop->latch)
++      break;
++
++  return e;
++}
++
++/* Returns true if t is SSA_NAME and user variable exists.  */
++
++static bool
++ssa_name_var_p (tree t)
++{
++  if (!t || TREE_CODE (t) != SSA_NAME)
++    return false;
++  if (SSA_NAME_VAR (t))
++    return true;
++  return false;
++}
++
++/* Returns true if t1 and t2 are SSA_NAME and belong to the same variable.  */
++
++static bool
++same_ssa_name_var_p (tree t1, tree t2)
++{
++  if (!ssa_name_var_p (t1) || !ssa_name_var_p (t2))
++    return false;
++  if (SSA_NAME_VAR (t1) == SSA_NAME_VAR (t2))
++    return true;
++  return false;
++}
++
++/* Get origin loop induction variable upper bound.  */
++
++static bool
++get_iv_upper_bound (gimple *stmt)
++{
++  if (origin_loop.limit != NULL || origin_loop.base_n != NULL)
++    return false;
++
++  tree lhs = gimple_cond_lhs (stmt);
++  tree rhs = gimple_cond_rhs (stmt);
++
++  if (TREE_CODE (TREE_TYPE (lhs)) != INTEGER_TYPE
++      || TREE_CODE (TREE_TYPE (rhs)) != INTEGER_TYPE)
++    return false;
++
++  /* TODO: Currently, the input restrictions on lhs and rhs are implemented
++     through PARM_DECL.  We may consider relax the restrictions later, and
++     we need to consider the overall adaptation scenario and adding test
++     cases.  */
++  if (ssa_name_var_p (lhs) && TREE_CODE (SSA_NAME_VAR (lhs)) == PARM_DECL)
++  {
++    origin_loop.limit = rhs;
++    origin_loop.base_n = lhs;
++  }
++  else
++    return false;
++
++  if (origin_loop.limit != NULL && origin_loop.base_n != NULL)
++    return true;
++
++  return false;
++}
++
++/* Get origin loop info.  */
++static bool
++get_origin_loop_info (class loop *loop)
++{
++  auto_vec edges = get_loop_exit_edges (loop);
++  origin_loop.exit_edge = edges[0];
++  origin_loop.exit_bb = origin_loop.exit_edge->dest;
++  origin_loop.entry_edge = get_loop_preheader_edge (loop);
++  origin_loop.base_s = PHI_ARG_DEF_FROM_EDGE (phi_s,origin_loop.entry_edge);
++  origin_loop.base_c = PHI_ARG_DEF_FROM_EDGE (phi_c,origin_loop.entry_edge);
++
++  basic_block preheader_bb = origin_loop.entry_edge->src;
++
++  if (preheader_bb->preds->length () != 1)
++    return false;
++
++  edge entry_pre_bb_edge = EDGE_PRED (preheader_bb, 0);
++
++  basic_block pre_preheader_bb = entry_pre_bb_edge->src;
++
++  gimple_stmt_iterator gsi;
++  gimple *stmt;
++  bool get_upper_bound = false;
++  for (gsi = gsi_start_bb (pre_preheader_bb); !gsi_end_p (gsi); gsi_next (&gsi))
++    {
++      stmt = gsi_stmt (gsi);
++      if (stmt && gimple_code (stmt) == GIMPLE_COND
++          && get_iv_upper_bound (stmt))
++        {
++          get_upper_bound = true;
++          break;
++        }
++    }
++
++  return get_upper_bound;
++}
++
++/* The loop form check will check the entire loop control flow
++   It should be a loop that:
++   1. a do-while loop with header and latch only with no other control flow
++      inside the loop
++   2. have only one exiting edge
++   3. have only one back edge and one entry edge
++*/
++static bool
++crc_loop_form_check (class loop *loop)
++{
++  if (loop->num_nodes > 2 || loop->inner)
++    return false;
++  // Should only have 1 exit edge
++  auto_vec edges = get_loop_exit_edges (loop);
++  if (edges.length() != 1)
++    return false;
++
++  // The header should have only 2 incoming edges
++  // One of them is the preheader edge and the other is the backedge from the
++  // latch
++  if (EDGE_COUNT (loop->header->preds) != 2)
++    return false;
++  edge e1 = EDGE_PRED (loop->header, 0);
++  edge e2 = EDGE_PRED (loop->header, 1);
++
++  if ((e1->src == loop->latch && e2->src->loop_father != loop)
++      || (e2->src == loop->latch && e1->src->loop_father != loop))
++    return true;
++
++  return false;
++}
++
++/* Check there is only one array is read in the loop.
++   Return the only array as crc_table.  */
++static bool
++only_one_array_read (class loop *loop, tree &crc_table)
++{
++  gimple_stmt_iterator gsi;
++  gimple *stmt;
++  bool res = false;
++  for (gsi = gsi_start_bb (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
++    {
++      stmt = gsi_stmt (gsi);
++      if (stmt == NULL)
++        return false;
++
++      if (gimple_code (stmt) == GIMPLE_ASSIGN
++          && TREE_CODE (gimple_assign_lhs (stmt)) == ARRAY_REF)
++        return false;
++
++      /* Only one-dimensional integer arrays meet the condition.  */
++      if (gimple_code (stmt) == GIMPLE_ASSIGN
++          && TREE_CODE (gimple_assign_rhs1 (stmt)) == ARRAY_REF
++          && TREE_CODE (TREE_OPERAND (gimple_assign_rhs1 (stmt), 0)) == VAR_DECL
++          && TREE_CODE (TREE_TYPE (gimple_assign_rhs1 (stmt))) == INTEGER_TYPE)
++        {
++          if (crc_table == NULL
++              && TREE_READONLY (gimple_assign_rhs1 (stmt)))
++            {
++              crc_table = gimple_assign_rhs1 (stmt);
++              crc_table_read_stmt = stmt;
++              res = true;
++            }
++          else
++            return false;
++        }
++    }
++  return res;
++}
++
++static const unsigned HOST_WIDE_INT crc_32_tab[] = {
++  0x00000000L, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x076dc419L,
++  0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0x0edb8832L, 0x79dcb8a4L,
++  0xe0d5e91eL, 0x97d2d988L, 0x09b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
++  0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
++  0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
++  0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
++  0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
++  0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
++  0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
++  0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
++  0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
++  0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
++  0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
++  0x01db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x06b6b51fL,
++  0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0x0f00f934L, 0x9609a88eL,
++  0xe10e9818L, 0x7f6a0dbbL, 0x086d3d2dL, 0x91646c97L, 0xe6635c01L,
++  0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
++  0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
++  0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
++  0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
++  0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
++  0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
++  0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
++  0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
++  0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
++  0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
++  0x03b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x04db2615L,
++  0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0x0d6d6a3eL, 0x7a6a5aa8L,
++  0xe40ecf0bL, 0x9309ff9dL, 0x0a00ae27L, 0x7d079eb1L, 0xf00f9344L,
++  0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
++  0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
++  0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
++  0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
++  0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
++  0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
++  0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
++  0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
++  0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
++  0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
++  0x026d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x05005713L,
++  0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0x0cb61b38L, 0x92d28e9bL,
++  0xe5d5be0dL, 0x7cdcefb7L, 0x0bdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
++  0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
++  0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
++  0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
++  0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
++  0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
++  0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
++  0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
++  0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
++  0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
++  0x2d02ef8dL
++};
++
++/* Check the content of the array.  */
++static bool
++match_crc_table (tree crc_table)
++{
++  const unsigned LOW_BOUND = 0;
++  const unsigned UP_BOUND = 255;
++  const unsigned ELEMENT_SIZE = 8;
++  tree low_bound = array_ref_low_bound (crc_table);
++  tree up_bound = array_ref_up_bound (crc_table);
++  tree element_size = array_ref_element_size (crc_table);
++  if (!tree_fits_uhwi_p(low_bound) || !tree_fits_uhwi_p(up_bound) ||
++      !tree_fits_uhwi_p(element_size))
++    return false;
++  unsigned HOST_WIDE_INT lb = tree_to_uhwi (low_bound);
++  unsigned HOST_WIDE_INT ub = tree_to_uhwi (up_bound);
++  unsigned HOST_WIDE_INT es = tree_to_uhwi (element_size);
++  if (lb != LOW_BOUND || ub != UP_BOUND || es != ELEMENT_SIZE)
++    return false;
++
++  tree decl = TREE_OPERAND (crc_table, 0);
++  tree ctor = ctor_for_folding(decl);
++  for (int i = lb; i <= ub; i++)
++    {
++      unsigned HOST_WIDE_INT val = tree_to_uhwi (CONSTRUCTOR_ELT (ctor,
++                                                                  i)->value);
++      if (crc_32_tab[i] != val)
++        return false;
++    }
++  return true;
++}
++
++/* Check the crc table.  The loop should have only one data reference. 
++   And match the data reference with the predefined array.  */
++static bool
++crc_table_check (class loop *loop)
++{
++  tree crc_table = NULL;
++  if (!only_one_array_read (loop, crc_table))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "\nTable check fail. not only single array "
++                            "is read.\n");
++      return false;
++    }
++  if (!match_crc_table (crc_table))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "\nTable check fail.  Table not matching.\n");
++      return false;
++    }
++  return  true;
++}
++
++/* Check whether the evolution pattern of phi is phi = SSA_NAME + target*/
++static bool
++evolution_pattern_plus_with_p (class loop *loop, gphi *phi,
++                               unsigned HOST_WIDE_INT target)
++{
++  edge backedge = find_edge (loop->latch, loop->header);
++  if (backedge == NULL)
++    return false;
++  tree evolution_node = PHI_ARG_DEF_FROM_EDGE (phi, backedge);
++  gimple *evolution_expr = SSA_NAME_DEF_STMT (evolution_node);
++
++  if (evolution_expr && (gimple_assign_rhs_code (evolution_expr) == PLUS_EXPR ||
++                         gimple_assign_rhs_code (evolution_expr)
++                             == POINTER_PLUS_EXPR))
++    {
++      tree rhs1 = gimple_assign_rhs1 (evolution_expr);
++      tree rhs2 = gimple_assign_rhs2 (evolution_expr);
++      if (TREE_CODE (rhs1) == SSA_NAME && TREE_CODE (rhs2) == INTEGER_CST
++          && tree_to_uhwi (rhs2) == target)
++        return true;
++    }
++  return false;
++}
++
++/* Check whether there are only 3 phi nodes in the header block.
++   Return 3 phi nodes in the capture.  */
++static bool
++check_num_of_phi (basic_block header,  gphi *capture[])
++{
++  gphi *phi;
++  gphi_iterator gsi;
++  int num_of_phi = 0;
++
++  for (gsi = gsi_start_phis (header); !gsi_end_p (gsi); gsi_next (&gsi))
++    {
++      phi = gsi.phi ();
++      if (phi)
++        num_of_phi++;
++      if (num_of_phi > 3)
++        return false;
++      capture[num_of_phi - 1] = phi;
++    }
++  /* Phi node should be exactly 3.  */
++  return num_of_phi == 3;   
++}
++
++/* Check the evolution pattern of three phi nodes.
++   Should be one of the node +1 every time (s), one of the node -1
++   every time (n), and a 3rd one neither (c).  Return 3 phi nodes in
++   the capture with the order of s,n,c.*/
++static bool
++check_evolution_pattern (class loop *loop,  gphi *capture[])
++{
++  gphi *s = NULL;
++  gphi *n = NULL;
++  gphi *c = NULL;
++
++  for (int i = 0; i < 3; i++)
++    {
++      if (evolution_pattern_plus_with_p (loop, capture[i], 1))
++        {
++          if (s != NULL)
++            return false;
++          s = capture[i];
++          phi_s = s;
++        }
++      else if (evolution_pattern_plus_with_p (loop, capture[i], 4294967295))
++        {
++          if (n != NULL)
++            return false;
++          n = capture[i];
++        }
++      else
++        {
++          if (c != NULL)
++            return false;
++          c = capture[i];
++          phi_c = c;
++        }
++    }
++
++  // Some envolution pattern cannot find 
++  if (!n || !s || !c)
++    return false;
++
++  capture[0] = s;
++  capture[1] = n;
++  capture[2] = c;
++  return true;
++}
++/* Check the calculation pattern before and after the crc_table array read stmt.
++   _7 = crc_32_tab[_6];
++   The caculation of index _6 should be the result of a sequency of calculation
++   by the s and c 
++   The result of the array read _7 should be used to calculate the new c.  */
++static bool
++check_calculation_pattern (class loop *loop,  gphi *capture[])
++{
++  gphi *s = capture[0];
++  gphi *c = capture[2];
++  tree res_ops[3];
++  tree index = TREE_OPERAND (gimple_assign_rhs1 (crc_table_read_stmt), 1);
++
++  /* Try to match
++  _4 = (int) _3; //NOP_EXPR (SSA_NAME @2)
++  _5 =  _4 ^ c_10; //BIT_XOR_EXPR (SSA_NAME, PHI @1)
++  _6 = _5 & 255; //BIT_XOR_EXPR (SSA_NAME, INTEGER_CST@3)
++  */
++  if (!gimple_crc_match_index (index, res_ops, NULL))
++    return false;
++  gimple *s_res_stmt = SSA_NAME_DEF_STMT (res_ops[0]);
++  if (!s_res_stmt)
++    return false;
++  gimple *s_def_stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (s_res_stmt));
++  if (!s_def_stmt)
++    return false;
++  tree s_res = TREE_OPERAND (gimple_assign_rhs1 (s_def_stmt), 0);
++  if (res_ops[1] != gimple_phi_result (c) || s_res != gimple_phi_result (s))
++    return false;
++
++  /* Try to match 
++  _8 = c_12 >> 8; // RSHIFT_EXPR (SSA_NAME @1, INTEGER_CST @2)
++  c_19 = _7 ^ _8; // BIT_XOR_EXPR (SSA_NAME@3, SSA_NAME)
++  */
++  edge backedge = find_edge (loop->latch, loop->header);
++  tree updated_c = PHI_ARG_DEF_FROM_EDGE (c, backedge);
++  if (!gimple_crc_match_res (updated_c, res_ops, NULL))
++    return false;
++  if (res_ops[0] != gimple_phi_result (c)
++      || res_ops[2] != gimple_assign_lhs (crc_table_read_stmt))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "\n gimple_crc_match_res pattern check failed.\n");
++      return false;
++    }
++
++  return true;
++}
++
++/* Check the exit condition is n != 0.  */
++static bool
++check_exit_condition (class loop *loop,  gphi *n)
++{
++  edge backedge = find_edge (loop->latch, loop->header);
++  gimple *cond_stmt = gsi_stmt (gsi_last_bb (loop->header));
++  if (!cond_stmt || gimple_code (cond_stmt) != GIMPLE_COND 
++      || gimple_cond_code (cond_stmt) != NE_EXPR
++      || gimple_cond_lhs (cond_stmt) != PHI_ARG_DEF_FROM_EDGE (n, backedge)
++      || tree_to_uhwi (gimple_cond_rhs (cond_stmt)) != 0)
++    return false;
++  
++  return  true;
++}
++
++/* Check the loop body. The loop body we are trying to match is
++
++# s_10 = PHI 
++# n_11 = PHI 
++# c_12 = PHI 
++_1 = (int) c_12;
++s_18 = s_10 + 1;
++_3 = *s_10;
++_4 = (int) _3;
++_5 = _1 ^ _4;
++_6 = _5 & 255;
++_7 = crc_32_tab[_6];
++_8 = c_12 >> 8;
++c_19 = _7 ^ _8;
++n_20 = n_11 + 4294967295;
++if (n_20 != 0)
++  goto ; [INV]
++else
++  goto ; [INV]
++
++which is doing a very simple calculation
++do {
++        c = crc_32_tab[(c ^ (*s++)) & 0xff] ^ (c >> 8);
++} while (--n);
++
++In this case ,we don't want this loop to have any other operation inside.
++so the matching condition is 
++1. There are only 3 loop variant during each itoration, namely s,c,n,  
++   which is limited by the condition that the loop have exactly 3 phi nodes.
++2. The 3 loop variants should have evolution pattern as 1 of the 3 nodes is
++   increased by 1 every itoration, 1 of the 3 nodes is decreased by 1 every itor
++   and the 3rd one is neither. These three tree node SSA value will be captured
++   for the later arithmatic pattern matching
++3. Pattern matching for the index of crc_table
++4. pattern matching for the result of c calcuation after read from crc_table
++5. The exit condition matching. 
++  */
++static bool
++crc_loop_body_check (class loop *loop)
++{
++  basic_block header = loop->header;
++  gphi *capture[3];
++  if (!check_num_of_phi(header, capture))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "\n num of phi noeds check failed.\n");
++      return false;
++    }
++  if (!check_evolution_pattern (loop, capture))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "\n evolution pattern check failed.\n");
++      return false;
++    }
++  if (!check_calculation_pattern (loop, capture))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "\n calculation pattern check failed.\n");
++      return false;
++    }
++  if (!check_exit_condition (loop, capture[1]))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "\n exit condition check failed.\n");
++      return false;
++    }
++  return true;
++}
++
++static bool check_prev_bb (basic_block prev_bb, enum tree_code code)
++{
++  gimple_stmt_iterator gsi;
++  gimple *stmt;
++  for (gsi = gsi_start_bb (prev_bb); !gsi_end_p (gsi);
++       gsi_next (&gsi))
++    {
++      stmt = gsi_stmt (gsi);
++      if (stmt == NULL)
++        return false;
++
++      if (gimple_code (stmt) == GIMPLE_COND
++          && gimple_cond_code (stmt) == code
++          && TREE_CODE (gimple_cond_rhs (stmt)) == INTEGER_CST
++          && tree_int_cst_sgn (gimple_cond_rhs (stmt)) == 0)
++        return true;
++    }
++  return false;
++}
++
++/* Check the prev_bb of prev_bb of loop header.  The prev_bb we are trying to
++match is
++
++c_15 = crc;
++if (n_16 (D) != 0)
++  goto ; [INV]
++else
++  goto ; [INV]
++
++  In this case , we must be sure that the n is not zero.
++  so the match condition is
++  1 the n is not zero.
++
++   :
++if (s_13 (D) == 0B)
++  goto ; [INV]
++else
++  goto ; [INV]
++
++  In this case, we must be sure the s is not NULL.
++  so the match condition is
++  1 the s is not NULL.
++*/
++static bool
++crc_prev_bb_of_loop_header_check (class loop *loop)
++{
++  basic_block header = loop->header;
++  basic_block prev_header_bb = header->prev_bb;
++  if (NULL == prev_header_bb)
++    return false;
++
++  basic_block prev_prev_header_bb = prev_header_bb->prev_bb;
++  if (NULL == prev_prev_header_bb)
++    return false;
++
++  if (!check_prev_bb (prev_prev_header_bb, NE_EXPR))
++    return false;
++
++  basic_block first_bb = prev_prev_header_bb->prev_bb;
++  if (NULL == first_bb)
++    return false;
++
++  if (!check_prev_bb (first_bb, EQ_EXPR))
++    return false;
++
++  return true;
++}
++
++static bool
++match_crc_loop (class loop *loop)
++{
++  if (!crc_loop_form_check (loop))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "\nWrong loop form for crc matching.\n");
++      return false;
++    }
++  if (!crc_table_check (loop))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "\nWrong crc table for crc matching.\n");
++      return false;
++    }
++  if (!crc_loop_body_check (loop))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "\nWrong loop body for crc matching.\n");
++      return false;
++    }
++  if (!crc_prev_bb_of_loop_header_check (loop))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "\nWrong prev basic_blocks of loop header for"
++                            " crc matching.\n");
++      return false;
++    }
++    
++  init_origin_loop_structure ();
++  if (!get_origin_loop_info (loop))
++    return false;
++
++  return true;
++}
++
++static void
++create_new_bb (basic_block &new_bb, basic_block after_bb,
++               basic_block dominator_bb, class loop *outer)
++{
++  new_bb = create_empty_bb (after_bb);
++  add_bb_to_loop (new_bb, outer);
++  set_immediate_dominator (CDI_DOMINATORS, new_bb, dominator_bb);
++}
++
++static void
++change_preheader_bb (edge entry_edge)
++{
++  gimple_seq stmts = NULL;
++  gimple_stmt_iterator gsi;
++  gimple *g;
++  tree lhs1;
++
++  lhs1 = create_tmp_var (TREE_TYPE (origin_loop.base_n),"nn");
++  lhs1 = make_ssa_name (lhs1);
++  gsi = gsi_last_bb (entry_edge->src);
++  g = gimple_build_assign (lhs1, RSHIFT_EXPR, origin_loop.base_n,
++                           build_int_cst (TREE_TYPE (origin_loop.base_n), 2));
++  gimple_seq_add_stmt (&stmts, g);
++  gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
++  nn_tree = lhs1;
++  set_current_def (nn_tree, lhs1);
++  nn_map.put (entry_edge->src, lhs1);
++}
++
++static gphi *
++create_phi_node_for_bb (tree old_name, basic_block bb)
++{
++  gphi *phi = create_phi_node (NULL_TREE, bb);
++  create_new_def_for (old_name, phi, gimple_phi_result_ptr (phi));
++  return phi;
++}
++
++static gimple *
++call_builtin_fun (int code,tree &lhs, tree arg1, tree arg2)
++{
++  unsigned int builtin_code = targetm.get_crc_builtin_code (code, true);
++  // Get the decl of __builtin_aarch64_crc32w
++  tree fn = targetm.builtin_decl (builtin_code, true);
++  if (!fn || fn == error_mark_node)
++    fatal_error (input_location,
++                 "target specific builtin not available");
++  gimple *call_builtin = gimple_build_call (fn, 2, arg1, arg2);
++  lhs = make_ssa_name (unsigned_type_node);
++  gimple_call_set_lhs (call_builtin, lhs);
++
++  return call_builtin;
++}
++
++/* Create loop_header and loop_latch for new loop
++    :
++   # s_14 = PHI 
++   # c_16 = PHI 
++   # nn_19 = PHI 
++   _1 = (unsigned int) c_16;
++   _2 = MEM[(uint32_t *)s_14];
++   _40 = __builtin_aarch64_crc32w (_1, _2);
++   c_29 = (long unsigned int) _40;
++   s_30 = s_14 + 4;
++   nn_31 = nn_19 + 4294967295;
++   if (nn_31 != 0)
++   The IR of bb is as above.  */
++static void
++create_loop_bb (basic_block &loop_bb, basic_block after_bb,
++                basic_block dominator_bb, class loop *outer, edge entry_edge)
++{
++  gimple_seq stmts = NULL;
++  gimple_stmt_iterator gsi;
++  gimple *g;
++  gphi *phi_s_loop;
++  gphi *phi_c_loop;
++  gphi *phi_nn_loop;
++
++  create_new_bb (loop_bb, after_bb, dominator_bb, outer);
++  redirect_edge_and_branch (entry_edge, loop_bb);
++  gsi = gsi_last_bb (loop_bb);
++  tree entry_nn = get_current_def (nn_tree);
++  phi_s_loop = create_phi_node_for_bb (origin_loop.base_s, loop_bb);
++  phi_c_loop = create_phi_node_for_bb (origin_loop.base_c, loop_bb);
++  phi_nn_loop = create_phi_node_for_bb (entry_nn, loop_bb);
++
++  tree res_s = gimple_phi_result (phi_s_loop);
++  tree res_nn = gimple_phi_result (phi_nn_loop);
++  tree lhs1 = gimple_build (&stmts, NOP_EXPR, unsigned_type_node,
++                            gimple_phi_result (phi_c_loop));
++  g = gimple_build_assign (make_ssa_name (unsigned_type_node),
++                           fold_build2 (MEM_REF, unsigned_type_node, res_s,
++                                        build_int_cst (
++                                            build_pointer_type (
++                                                unsigned_type_node),0)));
++  gimple_seq_add_stmt (&stmts, g);
++  tree lhs2 = gimple_assign_lhs (g);  // _2 = MEM[(uint32_t *)s_14];
++  unsigned int code = AARCH64_BUILTIN_CRC32W;
++  tree lhs3;
++  gimple *build_crc32w = call_builtin_fun (code, lhs3, lhs1, lhs2);
++  crc_map.put (loop_bb, lhs3);
++  gimple_seq_add_stmt (&stmts, build_crc32w);
++
++  tree lhs4 = copy_ssa_name (origin_loop.base_c);
++  g = gimple_build_assign (lhs4, NOP_EXPR, lhs3);
++  gimple_seq_add_stmt (&stmts, g);
++  c_map.put (loop_bb, lhs4);
++
++  tree lhs5 = copy_ssa_name (origin_loop.base_s);
++  g = gimple_build_assign (lhs5, POINTER_PLUS_EXPR, res_s,
++                           build_int_cst (sizetype, 4));
++  gimple_seq_add_stmt (&stmts, g);
++  s_map.put (loop_bb, lhs5);
++
++  tree lhs6 = copy_ssa_name (nn_tree);
++  g = gimple_build_assign (lhs6, PLUS_EXPR, res_nn,
++                           build_int_cst (TREE_TYPE (res_nn), 4294967295));
++  gimple_seq_add_stmt (&stmts,g);
++  nn_map.put (loop_bb, lhs6);
++
++  gcond *cond_stmt = gimple_build_cond (NE_EXPR, lhs6, origin_loop.limit,
++                                        NULL_TREE, NULL_TREE);
++  gimple_seq_add_stmt (&stmts, cond_stmt);
++  gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
++}
++
++/*   :
++    # c_6 = PHI 
++    # s_46 = PHI 
++    _44 = n_26(D) & 2;
++    if (_44 != 0)
++    The IR of bb is as above.  */
++static void
++create_cond_bb (basic_block &cond_bb, basic_block after_bb,
++                basic_block dominator_bb, class loop *outer)
++{
++  gimple_seq stmts = NULL;
++  gimple_stmt_iterator gsi;
++  gphi *phi_s_loop;
++  gphi *phi_c_loop;
++
++  create_new_bb (cond_bb, after_bb, dominator_bb, outer);
++  gsi = gsi_last_bb (cond_bb);
++  tree entry_nn = get_current_def (nn_tree);
++  phi_s_loop = create_phi_node_for_bb (origin_loop.base_s, cond_bb);
++  phi_c_loop = create_phi_node_for_bb (origin_loop.base_c, cond_bb);
++  tree res_s = gimple_phi_result (phi_s_loop);
++  set_current_def (origin_loop.base_s, res_s);
++  s_map.put (cond_bb, res_s);
++  tree res_c = gimple_phi_result (phi_c_loop);
++  set_current_def (origin_loop.base_c, res_c);
++  c_map.put (cond_bb, res_c);
++
++  tree lhs1 = gimple_build (&stmts, BIT_AND_EXPR,
++                            TREE_TYPE (origin_loop.base_n), origin_loop.base_n,
++                            build_int_cst (TREE_TYPE (origin_loop.base_n), 2));
++  gcond *cond_stmt = gimple_build_cond (NE_EXPR, lhs1, origin_loop.limit,
++                                        NULL_TREE, NULL_TREE);
++  gimple_seq_add_stmt (&stmts, cond_stmt);
++  gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
++}
++
++/*   :
++    _7 = MEM[(uint16_t *)s_46];
++    _41 = __builtin_aarch64_crc32h (_8, _7);
++    c_33 = (long unsigned int) _41;
++    s_34 = s_30 + 2;
++    The IR of bb is as above.  */
++static void
++create_cond_true_bb (basic_block &cond_true_bb, basic_block after_bb,
++                     basic_block dominator_bb, class loop *outer)
++{
++  gimple_seq stmts = NULL;
++  gimple *g;
++  gimple_stmt_iterator gsi;
++
++  create_new_bb (cond_true_bb, after_bb, dominator_bb, outer);
++  gsi = gsi_last_bb (cond_true_bb);
++  tree s_46 = *(s_map.get (after_bb));
++  tree type = build_pointer_type (short_unsigned_type_node);
++  g = gimple_build_assign (make_ssa_name (short_unsigned_type_node),
++                           fold_build2 (MEM_REF, short_unsigned_type_node, s_46,
++                                        build_int_cst (type, 0)));
++  gimple_seq_add_stmt (&stmts,g);
++  tree lhs1 = gimple_assign_lhs (g);  // _7 = MEM[(uint16_t *)s_46];
++  unsigned int code = AARCH64_BUILTIN_CRC32H;
++  tree lhs2;
++  gimple *call_builtin = call_builtin_fun (code, lhs2,
++                             *(crc_map.get (
++                                  cond_true_bb->prev_bb->prev_bb)), lhs1);
++  crc_map.put (cond_true_bb,lhs2);
++  gimple_seq_add_stmt (&stmts, call_builtin);
++
++  tree lhs3 = copy_ssa_name (origin_loop.base_c);
++  g = gimple_build_assign (lhs3, NOP_EXPR, lhs2);
++  gimple_seq_add_stmt (&stmts, g);
++  c_map.put (cond_true_bb, lhs3);
++
++  tree lhs5 = copy_ssa_name (s_46);
++  g = gimple_build_assign (lhs5, POINTER_PLUS_EXPR, s_46,
++                           build_int_cst (sizetype, 2)); //  s_30 + 2;
++  gimple_seq_add_stmt (&stmts, g);
++  s_map.put (cond_true_bb, lhs5);
++
++  gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
++  s_map.put (cond_true_bb, lhs5);
++}
++
++/*  :
++  # s_15 = PHI 
++  # c_17 = PHI 
++  _3 = n_26(D) & 1;
++  if (_3 != 0)
++   The IR of bb is as above.  */
++static void
++create_cond_false_bb (basic_block &cond_false_bb, basic_block after_bb,
++                      basic_block dominator_bb, class loop *outer)
++{
++  gimple_seq stmts = NULL;
++  gimple_stmt_iterator gsi;
++  gphi *phi_s_cond_true_bb;
++  gphi *phi_c_cond_true_bb;
++
++  create_new_bb (cond_false_bb, after_bb, dominator_bb, outer);
++  make_single_succ_edge (after_bb, cond_false_bb, EDGE_FALLTHRU);
++
++  tree entry_s = get_current_def (origin_loop.base_s);
++  phi_s_cond_true_bb = create_phi_node_for_bb (entry_s, cond_false_bb);
++  tree entry_c = get_current_def (origin_loop.base_c);
++  phi_c_cond_true_bb = create_phi_node_for_bb (entry_c, cond_false_bb);
++  tree res_s = gimple_phi_result (phi_s_cond_true_bb);
++  set_current_def (origin_loop.base_s, res_s);
++  s_map.put (cond_false_bb, res_s);
++  tree res_c = gimple_phi_result (phi_c_cond_true_bb);
++  set_current_def (origin_loop.base_c, res_c);
++  c_map.put (cond_false_bb, res_c);
++
++  gsi = gsi_last_bb (cond_false_bb);
++  tree lhs1 = gimple_build (&stmts, BIT_AND_EXPR,
++                            TREE_TYPE (origin_loop.base_n), origin_loop.base_n,
++                            build_int_cst (TREE_TYPE (origin_loop.base_n), 1));
++  gcond *cond_stmt = gimple_build_cond (NE_EXPR, lhs1, origin_loop.limit,
++                                        NULL_TREE, NULL_TREE);
++  gimple_seq_add_stmt (&stmts, cond_stmt);
++  gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
++}
++
++/*   :
++  _11 = (unsigned int) c_17;
++  _12 = *s_15;
++  _42 = __builtin_aarch64_crc32b (_11, _12);
++  c_36 = (long unsigned int) _42;
++  The IR of bb is as above.  */
++static void
++create_lastcond_true_bb (basic_block &new_bb, basic_block after_bb,
++                         basic_block dominator_bb, class loop *outer)
++{
++  gimple_seq stmts = NULL;
++  gimple_stmt_iterator gsi;
++  gimple *g;
++
++  create_new_bb (new_bb, after_bb, dominator_bb, outer);
++  gsi = gsi_last_bb (new_bb);
++
++  tree lhs1 = gimple_build (&stmts, NOP_EXPR, unsigned_type_node,
++                            get_current_def (origin_loop.base_c));
++  tree lhs2;
++  tree s_15 = get_current_def (origin_loop.base_s);
++  g = gimple_build_assign (make_ssa_name (unsigned_char_type_node),
++                           fold_build2 (MEM_REF, unsigned_char_type_node, s_15,
++                                        build_int_cst (TREE_TYPE (s_15), 0)));
++  gimple_seq_add_stmt (&stmts, g);
++  lhs2 = gimple_assign_lhs (g);
++
++  unsigned int code = AARCH64_BUILTIN_CRC32B;
++  tree lhs3;
++  gimple *call_builtin = call_builtin_fun (code, lhs3, lhs1, lhs2);
++  crc_map.put (new_bb,lhs3);
++  gimple_seq_add_stmt (&stmts,call_builtin);
++
++  tree lhs4 = copy_ssa_name (origin_loop.base_c);
++  g = gimple_build_assign (lhs4, NOP_EXPR, lhs3);
++  gimple_seq_add_stmt (&stmts, g);
++  c_map.put (new_bb, lhs4);
++
++  gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
++}
++
++static bool
++optional_add_phi_arg (gphi * phi, tree phi_res, tree phi_arg, edge e)
++{
++  location_t loc;
++  if (same_ssa_name_var_p (phi_arg, phi_res))
++    {
++      if (virtual_operand_p (phi_arg))
++        loc = UNKNOWN_LOCATION;
++      else
++        loc = gimple_location (SSA_NAME_DEF_STMT (phi_arg));
++      add_phi_arg (phi, phi_arg, e, loc);
++
++      return true;
++    }
++
++  return false;
++}
++
++/* Add phi_arg for bb with phi node.  */
++static void
++update_phi_nodes (basic_block bb)
++{
++  edge e;
++  edge_iterator ei;
++  gphi *phi;
++  gphi_iterator gsi;
++  tree res;
++
++  for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
++    {
++      phi = gsi.phi ();
++      res = gimple_phi_result (phi);
++
++      FOR_EACH_EDGE (e, ei, bb->preds)
++      {
++        if (PHI_ARG_DEF_FROM_EDGE (phi, e))
++          continue;
++        tree var_c;
++        tree *ptr_var_c = c_map.get (e->src);
++        if (ptr_var_c == NULL)
++          var_c = origin_loop.base_c;
++        else
++          var_c = *ptr_var_c;
++        if (optional_add_phi_arg (phi, res, var_c, e))
++          continue;
++
++        tree var_nn;
++        tree *ptr_var_nn = nn_map.get (e->src);
++        if (ptr_var_nn == NULL)
++          var_nn = nn_tree;
++        else
++          var_nn = *ptr_var_nn;
++        if (optional_add_phi_arg (phi, res, var_nn, e))
++          continue;
++
++        tree var_s;
++        tree *ptr_var_s = s_map.get (e->src);
++        if (ptr_var_s == NULL)
++          var_s = origin_loop.base_s;
++        else
++          var_s = *ptr_var_s;
++        if (optional_add_phi_arg (phi, res, var_s, e))
++          continue;
++      }
++    }
++}
++
++static void
++create_new_loops (edge entry_edge)
++{
++  class loop *new_loop = NULL;
++  basic_block loop_bb, cond_bb, cond_true_bb, cond_false_bb, lastcond_true_bb;
++  class loop *outer = entry_edge->src->loop_father;
++  change_preheader_bb (entry_edge);
++
++  create_loop_bb (loop_bb, entry_edge->src, entry_edge->src, outer, entry_edge);
++  create_cond_bb (cond_bb, loop_bb, loop_bb, outer);
++  make_edge (loop_bb, loop_bb, EDGE_TRUE_VALUE);
++  make_edge (loop_bb, cond_bb, EDGE_FALSE_VALUE);
++  update_phi_nodes (loop_bb);
++
++  new_loop = alloc_loop ();
++  new_loop->header = loop_bb;
++  new_loop->latch = loop_bb;
++  add_loop (new_loop, outer);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nPrint byte new loop %d:\n", new_loop->num);
++      flow_loop_dump (new_loop, dump_file, NULL, 1);
++      fprintf (dump_file, "\n\n");
++    }
++
++  create_cond_true_bb (cond_true_bb, cond_bb, cond_bb, outer);
++  make_edge (cond_bb, cond_true_bb, EDGE_TRUE_VALUE);
++  create_cond_false_bb (cond_false_bb, cond_true_bb, cond_bb, outer);
++  make_edge (cond_bb, cond_false_bb, EDGE_FALSE_VALUE);
++  update_phi_nodes (cond_bb);
++  update_phi_nodes (cond_false_bb);
++  create_lastcond_true_bb (lastcond_true_bb, cond_false_bb,
++                           cond_false_bb, outer);
++  make_edge (cond_false_bb, lastcond_true_bb, EDGE_TRUE_VALUE);
++  make_edge (cond_false_bb, origin_loop.exit_bb, EDGE_FALSE_VALUE);
++  make_single_succ_edge (lastcond_true_bb, origin_loop.exit_bb, EDGE_FALLTHRU);
++
++  update_phi_nodes (origin_loop.exit_bb);
++  remove_edge (origin_loop.exit_edge);
++}
++
++/* Clear information about the original loop.  */
++static void
++remove_origin_loop (class loop *loop)
++{
++  basic_block *body = get_loop_body_in_dom_order (loop);
++  unsigned n = loop->num_nodes;
++  for (int i = 0; i < n; ++i)
++    delete_basic_block (body[i]);
++  free (body);
++  delete_loop (loop);
++}
++
++/* Make sure that the dominance relationship of the newly inserted cfg
++   is not missing.  */
++static void
++update_loop_dominator (cdi_direction dir)
++{
++  gcc_assert (dom_info_available_p (dir));
++
++  basic_block bb;
++  FOR_EACH_BB_FN (bb, cfun)
++  {
++    basic_block imm_bb = get_immediate_dominator (dir, bb);
++    if (!imm_bb || bb == origin_loop.exit_bb)
++      {
++        set_immediate_dominator (CDI_DOMINATORS, bb,
++                               recompute_dominator (CDI_DOMINATORS, bb));
++        continue;
++      }
++  }
++}
++
++/* Perform the conversion of origin_loop to new_loop.  */
++static void
++convert_to_new_loop (class loop *loop)
++{
++  create_new_loops (origin_loop.entry_edge);
++  remove_origin_loop (loop);
++  update_loop_dominator (CDI_DOMINATORS);
++  update_ssa (TODO_update_ssa);
++}
++
++/* The main entry of loop crc optimizes.  */
++static unsigned int
++tree_ssa_loop_crc ()
++{
++  if (TARGET_CRC32 == false)
++    {
++      warning (OPT____,"The loop-crc optimization is not working." \
++                      "You should make sure that the specified architecture" \
++                      "supports crc:-march=armv8.1-a");
++      return 0;
++    }
++  unsigned int todo = 0;
++  class loop *loop;
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      flow_loops_dump (dump_file, NULL, 1);
++      fprintf (dump_file, "\nStarting the loop_crc pass\n");
++    }
++
++  enum li_flags LI = LI_FROM_INNERMOST;
++  for (auto loop : loops_list (cfun, LI))
++  {
++    if (dump_file && (dump_flags & TDF_DETAILS))
++      {
++        fprintf (dump_file, "======================================\n");
++        fprintf (dump_file, "Processing loop %d:\n", loop->num);
++        fprintf (dump_file, "======================================\n");
++        flow_loop_dump (loop, dump_file, NULL, 1);
++        fprintf (dump_file, "\n\n");
++      }
++
++    if (match_crc_loop (loop))
++      {
++        if (dump_file && (dump_flags & TDF_DETAILS))
++          {
++             fprintf (dump_file, "The %dth loop form is success matched,"
++                                "and the loop can be optimized.\n",
++                     loop->num);
++          }
++
++        convert_to_new_loop (loop);
++        todo |= (TODO_update_ssa);
++      }
++  }
++  return todo;
++}
++
++/* Loop crc.  */
++
++namespace {
++const pass_data pass_data_tree_loop_crc =
++{
++  GIMPLE_PASS,
++  "loop_crc",
++  OPTGROUP_LOOP,
++  TV_TREE_LOOP_CRC,
++  (PROP_cfg | PROP_ssa),
++  0,
++  0,
++  0,
++  (TODO_update_ssa | TODO_verify_all)
++};
++
++class pass_loop_crc : public gimple_opt_pass
++{
++public:
++  pass_loop_crc (gcc::context *ctxt)
++    : gimple_opt_pass (pass_data_tree_loop_crc, ctxt)
++  {}
++
++  /* Opt_pass methods: */
++  virtual bool gate (function *);
++  virtual unsigned int execute (function *);
++}; // Class pass_loop_crc
++
++bool
++pass_loop_crc::gate (function *)
++{
++  return (flag_loop_crc > 0 && optimize >= 3);
++}
++
++unsigned int
++pass_loop_crc::execute (function *fun)
++{
++  if (number_of_loops (fun) <= 1)
++    return 0;
++
++  /* Only supports LP64 data mode.  */
++  if (TYPE_PRECISION (long_integer_type_node) != 64
++      || POINTER_SIZE != 64 || TYPE_PRECISION (integer_type_node) != 32)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++        fprintf (dump_file, "The current data mode is not supported,"
++                            "only the LP64 date mode is supported.\n");
++      return 0;
++    }
++
++  return tree_ssa_loop_crc ();
++}
++
++} // Anon namespace
++
++gimple_opt_pass *
++make_pass_loop_crc (gcc::context *ctxt)
++{
++  return new pass_loop_crc (ctxt);
++}
+-- 
+2.33.0
+
diff --git a/0102-LoongArch-Implement-vec_init-M-N-where-N-is-a-LSX-ve.patch b/0102-LoongArch-Implement-vec_init-M-N-where-N-is-a-LSX-ve.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b493452ed7480aa20ee06fecde416b1c4231814f
--- /dev/null
+++ b/0102-LoongArch-Implement-vec_init-M-N-where-N-is-a-LSX-ve.patch
@@ -0,0 +1,253 @@
+From a321a294407781b2694fe9a3be0099fe38ccf13a Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Fri, 5 Jan 2024 15:38:25 +0800
+Subject: [PATCH 102/188] LoongArch: Implement vec_init where N is a LSX
+ vector mode
+
+This patch implements more vec_init optabs that can handle two LSX vectors producing a LASX
+vector by concatenating them. When an lsx vector is concatenated with an LSX const_vector of
+zeroes, the vec_concatz pattern can be used effectively. For example as below
+
+typedef short v8hi __attribute__ ((vector_size (16)));
+typedef short v16hi __attribute__ ((vector_size (32)));
+v8hi a, b;
+
+v16hi vec_initv16hiv8hi ()
+{
+ return __builtin_shufflevector (a, b, 0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15);
+}
+
+Before this patch:
+
+vec_initv16hiv8hi:
+    addi.d  $r3,$r3,-64
+    .cfi_def_cfa_offset 64
+    xvrepli.h   $xr0,0
+    la.local    $r12,.LANCHOR0
+    xvst    $xr0,$r3,0
+    xvst    $xr0,$r3,32
+    vld $vr0,$r12,0
+    vst $vr0,$r3,0
+    vld $vr0,$r12,16
+    vst $vr0,$r3,32
+    xvld    $xr1,$r3,32
+    xvld    $xr2,$r3,32
+    xvld    $xr0,$r3,0
+    xvilvh.h    $xr0,$xr1,$xr0
+    xvld    $xr1,$r3,0
+    xvilvl.h    $xr1,$xr2,$xr1
+    addi.d  $r3,$r3,64
+    .cfi_def_cfa_offset 0
+    xvpermi.q   $xr0,$xr1,32
+    jr  $r1
+
+After this patch:
+
+vec_initv16hiv8hi:
+    la.local        $r12,.LANCHOR0
+    vld     $vr0,$r12,32
+    vld     $vr2,$r12,48
+    xvilvh.h        $xr1,$xr2,$xr0
+    xvilvl.h        $xr0,$xr2,$xr0
+    xvpermi.q       $xr1,$xr0,32
+    xvst    $xr1,$r4,0
+    jr      $r1
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md (vec_initv32qiv16qi): Rename to ..
+	(vec_init): .. this, and extend to mode.
+	(@vec_concatz): New insn pattern.
+	* config/loongarch/loongarch.cc (loongarch_expand_vector_group_init):
+	Handle VALS containing two vectors.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-vec-init-2.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  | 26 +++++++-
+ gcc/config/loongarch/loongarch.cc             | 44 +++++++++++--
+ .../loongarch/vector/lasx/lasx-vec-init-2.c   | 65 +++++++++++++++++++
+ 3 files changed, 128 insertions(+), 7 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-2.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index b4aa8e261..803c5dd93 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -465,6 +465,11 @@
+    (V16HI "w")
+    (V32QI "w")])
+ 
++;; Half modes of all LASX vector modes, in lower-case.
++(define_mode_attr lasxhalf [(V32QI "v16qi")  (V16HI "v8hi")
++             (V8SI "v4si")  (V4DI  "v2di")
++             (V8SF  "v4sf") (V4DF  "v2df")])
++
+ (define_expand "vec_init"
+   [(match_operand:LASX 0 "register_operand")
+    (match_operand:LASX 1 "")]
+@@ -474,9 +479,9 @@
+   DONE;
+ })
+ 
+-(define_expand "vec_initv32qiv16qi"
+- [(match_operand:V32QI 0 "register_operand")
+-  (match_operand:V16QI 1 "")]
++(define_expand "vec_init"
++ [(match_operand:LASX 0 "register_operand")
++  (match_operand: 1 "")]
+   "ISA_HAS_LASX"
+ {
+   loongarch_expand_vector_group_init (operands[0], operands[1]);
+@@ -577,6 +582,21 @@
+   [(set_attr "type" "simd_insert")
+    (set_attr "mode" "")])
+ 
++(define_insn "@vec_concatz"
++  [(set (match_operand:LASX 0 "register_operand" "=f")
++    (vec_concat:LASX
++      (match_operand: 1 "nonimmediate_operand")
++      (match_operand: 2 "const_0_operand")))]
++  "ISA_HAS_LASX"
++{
++  if (MEM_P (operands[1]))
++    return "vld\t%w0,%1";
++  else
++    return "vori.b\t%w0,%w1,0";
++}
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
+ (define_insn "vec_concat"
+   [(set (match_operand:LASX 0 "register_operand" "=f")
+ 	(vec_concat:LASX
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index ddb32cea2..fccdc21a8 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -9842,10 +9842,46 @@ loongarch_gen_const_int_vector_shuffle (machine_mode mode, int val)
+ void
+ loongarch_expand_vector_group_init (rtx target, rtx vals)
+ {
+-  rtx ops[2] = { force_reg (E_V16QImode, XVECEXP (vals, 0, 0)),
+-      force_reg (E_V16QImode, XVECEXP (vals, 0, 1)) };
+-  emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (E_V32QImode, ops[0],
+-						      ops[1])));
++  machine_mode vmode = GET_MODE (target);
++  machine_mode half_mode = VOIDmode;
++  rtx low = XVECEXP (vals, 0, 0);
++  rtx high = XVECEXP (vals, 0, 1);
++
++  switch (vmode)
++    {
++    case E_V32QImode:
++      half_mode = V16QImode;
++      break;
++    case E_V16HImode:
++      half_mode = V8HImode;
++      break;
++    case E_V8SImode:
++      half_mode = V4SImode;
++      break;
++    case E_V4DImode:
++      half_mode = V2DImode;
++      break;
++    case E_V8SFmode:
++      half_mode = V4SFmode;
++      break;
++    case E_V4DFmode:
++      half_mode = V2DFmode;
++      break;
++    default:
++      gcc_unreachable ();
++    }
++
++  if (high == CONST0_RTX (half_mode))
++    emit_insn (gen_vec_concatz (vmode, target, low, high));
++  else
++    {
++      if (!register_operand (low, half_mode))
++	low = force_reg (half_mode, low);
++      if (!register_operand (high, half_mode))
++	high = force_reg (half_mode, high);
++      emit_insn (gen_rtx_SET (target,
++			      gen_rtx_VEC_CONCAT (vmode, low, high)));
++    }
+ }
+ 
+ /* Expand initialization of a vector which has all same elements.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-2.c
+new file mode 100644
+index 000000000..7592198c4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-vec-init-2.c
+@@ -0,0 +1,65 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -fno-vect-cost-model -mlasx" } */
++/* { dg-final { scan-assembler-times "vld" 12 } } */
++
++
++typedef char v16qi __attribute__ ((vector_size (16)));
++typedef char v32qi __attribute__ ((vector_size (32)));
++
++typedef short v8hi __attribute__ ((vector_size (16)));
++typedef short v16hi __attribute__ ((vector_size (32)));
++
++typedef int v4si __attribute__ ((vector_size (16)));
++typedef int v8si __attribute__ ((vector_size (32)));
++
++typedef long v2di __attribute__ ((vector_size (16)));
++typedef long v4di __attribute__ ((vector_size (32)));
++
++typedef float v4sf __attribute__ ((vector_size (16)));
++typedef float v8sf __attribute__ ((vector_size (32)));
++
++typedef double v2df __attribute__ ((vector_size (16)));
++typedef double v4df __attribute__ ((vector_size (32)));
++
++v16qi a_qi, b_qi;
++v8hi  a_hi, b_hi;
++v4si  a_si, b_si;
++v2di  a_di, b_di;
++v4sf  a_sf, b_sf;
++v2df  a_df, b_df;
++
++v32qi
++foo_v32qi ()
++{
++  return __builtin_shufflevector (a_qi, b_qi, 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23, 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31);
++}
++
++v16hi
++foo_v16qi ()
++{
++  return __builtin_shufflevector (a_hi, b_hi, 0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15);
++}
++
++v8si
++foo_v8si ()
++{
++  return __builtin_shufflevector (a_si, b_si, 0, 4, 1, 5, 2, 6, 3, 7);
++}
++
++v4di
++foo_v4di ()
++{
++  return __builtin_shufflevector (a_di, b_di, 0, 2, 1, 3);
++}
++
++v8sf
++foo_v8sf ()
++{
++  return __builtin_shufflevector (a_sf, b_sf, 0, 4, 1, 5, 2, 6, 3, 7);
++}
++
++v4df
++foo_v4df ()
++{
++  return __builtin_shufflevector (a_df, b_df, 0, 2, 1, 3);
++}
+-- 
+2.43.0
+
diff --git a/0103-LoongArch-Handle-ISA-evolution-switches-along-with-o.patch b/0103-LoongArch-Handle-ISA-evolution-switches-along-with-o.patch
new file mode 100644
index 0000000000000000000000000000000000000000..354c3b0ddf9cf025b9d2d9fbf15d96458c5f6bca
--- /dev/null
+++ b/0103-LoongArch-Handle-ISA-evolution-switches-along-with-o.patch
@@ -0,0 +1,533 @@
+From 901663758281d4ce87a75e4d6e45de621b65f0cb Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Mon, 8 Jan 2024 09:14:07 +0800
+Subject: [PATCH 103/188] LoongArch: Handle ISA evolution switches along with
+ other options
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/genstr.sh: Prepend the isa_evolution
+	variable with the common la_ prefix.
+	* config/loongarch/genopts/loongarch.opt.in: Mark ISA evolution
+	flags as saved using TargetVariable.
+	* config/loongarch/loongarch.opt: Same.
+	* config/loongarch/loongarch-def.h: Define evolution_set to
+	mark changes to the -march default.
+	* config/loongarch/loongarch-driver.cc: Same.
+	* config/loongarch/loongarch-opts.cc: Same.
+	* config/loongarch/loongarch-opts.h: Define and use ISA evolution
+	conditions around the la_target structure.
+	* config/loongarch/loongarch.cc: Same.
+	* config/loongarch/loongarch.md: Same.
+	* config/loongarch/loongarch-builtins.cc: Same.
+	* config/loongarch/loongarch-c.cc: Same.
+	* config/loongarch/lasx.md: Same.
+	* config/loongarch/lsx.md: Same.
+	* config/loongarch/sync.md: Same.
+---
+ gcc/config/loongarch/genopts/genstr.sh        |  2 +-
+ gcc/config/loongarch/genopts/loongarch.opt.in |  6 ++---
+ gcc/config/loongarch/lasx.md                  |  4 ++--
+ gcc/config/loongarch/loongarch-builtins.cc    |  6 ++---
+ gcc/config/loongarch/loongarch-c.cc           |  2 +-
+ gcc/config/loongarch/loongarch-def.h          |  5 +++-
+ gcc/config/loongarch/loongarch-driver.cc      |  5 ++--
+ gcc/config/loongarch/loongarch-opts.cc        | 17 ++++++++++++-
+ gcc/config/loongarch/loongarch-opts.h         | 24 +++++++++++++++----
+ gcc/config/loongarch/loongarch.cc             | 24 ++++++++-----------
+ gcc/config/loongarch/loongarch.md             | 12 +++++-----
+ gcc/config/loongarch/loongarch.opt            | 16 ++++++-------
+ gcc/config/loongarch/lsx.md                   |  4 ++--
+ gcc/config/loongarch/sync.md                  | 22 ++++++++---------
+ 14 files changed, 90 insertions(+), 59 deletions(-)
+
+diff --git a/gcc/config/loongarch/genopts/genstr.sh b/gcc/config/loongarch/genopts/genstr.sh
+index bcc616e98..391eca121 100755
+--- a/gcc/config/loongarch/genopts/genstr.sh
++++ b/gcc/config/loongarch/genopts/genstr.sh
+@@ -107,7 +107,7 @@ EOF
+       print("")
+       print("m"$3)
+       gsub(/-/, "_", $3)
+-      print("Target Mask(ISA_"toupper($3)") Var(isa_evolution)")
++      print("Target Mask(ISA_"toupper($3)") Var(la_isa_evolution)")
+       $1=""; $2=""; $3=""
+       sub(/^ */, "", $0)
+       print($0)
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index 102202b03..a866dab84 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -259,6 +259,6 @@ default value is 4.
+ ; Features added during ISA evolution.  This concept is different from ISA
+ ; extension, read Section 1.5 of LoongArch v1.10 Volume 1 for the
+ ; explanation.  These features may be implemented and enumerated with
+-; CPUCFG independantly, so we use bit flags to specify them.
+-Variable
+-HOST_WIDE_INT isa_evolution = 0
++; CPUCFG independently, so we use bit flags to specify them.
++TargetVariable
++HOST_WIDE_INT la_isa_evolution = 0
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 803c5dd93..fdfd65e4a 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -1540,7 +1540,7 @@
+   [(set (match_operand:FLASX 0 "register_operand" "=f")
+     (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
+ 		  UNSPEC_LASX_XVFRECIPE))]
+-  "ISA_HAS_LASX && TARGET_FRECIPE"
++  "ISA_HAS_LASX && ISA_HAS_FRECIPE"
+   "xvfrecipe.\t%u0,%u1"
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+@@ -1573,7 +1573,7 @@
+   [(set (match_operand:FLASX 0 "register_operand" "=f")
+     (unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
+ 		  UNSPEC_LASX_XVFRSQRTE))]
+-  "ISA_HAS_LASX && TARGET_FRECIPE"
++  "ISA_HAS_LASX && ISA_HAS_FRECIPE"
+   "xvfrsqrte.\t%u0,%u1"
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index 85849ed29..e3b4dbc52 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -120,9 +120,9 @@ struct loongarch_builtin_description
+ AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI)
+ AVAIL_ALL (lsx, ISA_HAS_LSX)
+ AVAIL_ALL (lasx, ISA_HAS_LASX)
+-AVAIL_ALL (frecipe, TARGET_FRECIPE && TARGET_HARD_FLOAT_ABI)
+-AVAIL_ALL (lsx_frecipe, ISA_HAS_LSX && TARGET_FRECIPE)
+-AVAIL_ALL (lasx_frecipe, ISA_HAS_LASX && TARGET_FRECIPE)
++AVAIL_ALL (frecipe, ISA_HAS_FRECIPE && TARGET_HARD_FLOAT_ABI)
++AVAIL_ALL (lsx_frecipe, ISA_HAS_LSX && ISA_HAS_FRECIPE)
++AVAIL_ALL (lasx_frecipe, ISA_HAS_LASX && ISA_HAS_FRECIPE)
+ 
+ /* Construct a loongarch_builtin_description from the given arguments.
+ 
+diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc
+index a89477a74..df2a482ad 100644
+--- a/gcc/config/loongarch/loongarch-c.cc
++++ b/gcc/config/loongarch/loongarch-c.cc
+@@ -102,7 +102,7 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile)
+   else
+     builtin_define ("__loongarch_frlen=0");
+ 
+-  if (TARGET_HARD_FLOAT && TARGET_FRECIPE)
++  if (TARGET_HARD_FLOAT && ISA_HAS_FRECIPE)
+     builtin_define ("__loongarch_frecipe");
+ 
+   if (ISA_HAS_LSX)
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index f8f36f0e2..9e5eee0e2 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -132,8 +132,11 @@ struct loongarch_isa
+ 
+      Using int64_t instead of HOST_WIDE_INT for C compatibility.  */
+   int64_t evolution;
++  int64_t evolution_set;
+ 
+-  loongarch_isa () : base (0), fpu (0), simd (0), evolution (0) {}
++  loongarch_isa () :
++    base (0), fpu (0), simd (0), evolution (0), evolution_set (0)
++  {}
+   loongarch_isa base_ (int _base) { base = _base; return *this; }
+   loongarch_isa fpu_ (int _fpu) { fpu = _fpu; return *this; }
+   loongarch_isa simd_ (int _simd) { simd = _simd; return *this; }
+diff --git a/gcc/config/loongarch/loongarch-driver.cc b/gcc/config/loongarch/loongarch-driver.cc
+index b3626984d..b84a6eaf7 100644
+--- a/gcc/config/loongarch/loongarch-driver.cc
++++ b/gcc/config/loongarch/loongarch-driver.cc
+@@ -42,9 +42,10 @@ extern struct obstack opts_obstack;
+ const char*
+ la_driver_init (int argc ATTRIBUTE_UNUSED, const char **argv ATTRIBUTE_UNUSED)
+ {
+-  /* Initialize all fields of la_target to -1 */
++  /* Initialize all fields of la_target.  */
+   loongarch_init_target (&la_target, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET,
+-			 M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET);
++			 M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET,
++			 0, 0);
+   return "";
+ }
+ 
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index d31becc67..935d09f45 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -140,7 +140,9 @@ static int with_default_simd = 0;
+ void
+ loongarch_init_target (struct loongarch_target *target,
+ 		       int cpu_arch, int cpu_tune, int fpu, int simd,
+-		       int abi_base, int abi_ext, int cmodel)
++		       int abi_base, int abi_ext, int cmodel,
++		       HOST_WIDE_INT isa_evolution,
++		       HOST_WIDE_INT isa_evolution_set)
+ {
+   if (!target)
+     return;
+@@ -148,6 +150,8 @@ loongarch_init_target (struct loongarch_target *target,
+   target->cpu_tune = cpu_tune;
+   target->isa.fpu = fpu;
+   target->isa.simd = simd;
++  target->isa.evolution = isa_evolution;
++  target->isa.evolution_set = isa_evolution_set;
+   target->abi.base = abi_base;
+   target->abi.ext = abi_ext;
+   target->cmodel = cmodel;
+@@ -184,6 +188,9 @@ loongarch_config_target (struct loongarch_target *target,
+       M_OPT_ABSENT (target->abi.base)	  ? 0 : 1,
+   };
+ 
++  int64_t isa_evolution = target->isa.evolution;
++  int64_t isa_evolution_set = target->isa.evolution_set;
++
+   /* 1.  Target ABI */
+   if (constrained.abi_base)
+     t.abi.base = target->abi.base;
+@@ -394,6 +401,13 @@ config_target_isa:
+ 	}
+     }
+ 
++  /* Apply the ISA evolution feature switches from the user.  */
++  HOST_WIDE_INT isa_evolution_orig = t.isa.evolution;
++  t.isa.evolution &= ~(~isa_evolution & isa_evolution_set);
++  t.isa.evolution |= isa_evolution & isa_evolution_set;
++
++  /* evolution_set means "what's different from the -march default".  */
++  t.isa.evolution_set = isa_evolution_orig ^ t.isa.evolution;
+ 
+   /* 4.  ABI-ISA compatibility */
+   /* Note:
+@@ -774,4 +788,5 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+   /* status of -mfpu */
+   opts->x_la_opt_fpu = target->isa.fpu;
+   opts->x_la_opt_simd = target->isa.simd;
++  opts->x_la_isa_evolution = target->isa.evolution;
+ }
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index 8491bee0d..204338553 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -34,7 +34,9 @@ extern struct loongarch_target la_target;
+ void
+ loongarch_init_target (struct loongarch_target *target,
+ 		       int cpu_arch, int cpu_tune, int fpu, int simd,
+-		       int abi_base, int abi_ext, int cmodel);
++		       int abi_base, int abi_ext, int cmodel,
++		       HOST_WIDE_INT isa_evolutions,
++		       HOST_WIDE_INT isa_evolutions_set);
+ 
+ 
+ /* Handler for "-m" option combinations,
+@@ -82,9 +84,23 @@ struct loongarch_flags {
+ 				   || la_target.abi.base == ABI_BASE_LP64F \
+ 				   || la_target.abi.base == ABI_BASE_LP64S)
+ 
+-#define ISA_HAS_LSX		  (la_target.isa.simd == ISA_EXT_SIMD_LSX \
+-				   || la_target.isa.simd == ISA_EXT_SIMD_LASX)
+-#define ISA_HAS_LASX		  (la_target.isa.simd == ISA_EXT_SIMD_LASX)
++#define ISA_HAS_LSX \
++  (la_target.isa.simd == ISA_EXT_SIMD_LSX \
++   || la_target.isa.simd == ISA_EXT_SIMD_LASX)
++
++#define ISA_HAS_LASX \
++  (la_target.isa.simd == ISA_EXT_SIMD_LASX)
++
++#define ISA_HAS_FRECIPE \
++  (la_target.isa.evolution & OPTION_MASK_ISA_FRECIPE)
++#define ISA_HAS_DIV32 \
++  (la_target.isa.evolution & OPTION_MASK_ISA_DIV32)
++#define ISA_HAS_LAM_BH \
++  (la_target.isa.evolution & OPTION_MASK_ISA_LAM_BH)
++#define ISA_HAS_LAMCAS \
++  (la_target.isa.evolution & OPTION_MASK_ISA_LAMCAS)
++#define ISA_HAS_LD_SEQ_SA \
++  (la_target.isa.evolution & OPTION_MASK_ISA_LD_SEQ_SA)
+ 
+ /* TARGET_ macros for use in *.md template conditionals */
+ #define TARGET_uARCH_LA464	  (la_target.cpu_tune == CPU_LA464)
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index fccdc21a8..b0bb67d60 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -3859,7 +3859,7 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code,
+       else
+ 	{
+ 	  *total = loongarch_cost->int_div_si;
+-	  if (TARGET_64BIT && !TARGET_DIV32)
++	  if (TARGET_64BIT && !ISA_HAS_DIV32)
+ 	    *total += COSTS_N_INSNS (2);
+ 	}
+ 
+@@ -6107,7 +6107,7 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+       if (loongarch_cas_failure_memorder_needs_acquire (
+ 	    memmodel_from_int (INTVAL (op))))
+ 	fputs ("dbar\t0b10100", file);
+-      else if (!TARGET_LD_SEQ_SA)
++      else if (!ISA_HAS_LD_SEQ_SA)
+ 	fputs ("dbar\t0x700", file);
+       break;
+ 
+@@ -7509,7 +7509,8 @@ loongarch_option_override_internal (struct gcc_options *opts,
+   loongarch_init_target (&la_target,
+ 			 la_opt_cpu_arch, la_opt_cpu_tune, la_opt_fpu,
+ 			 la_opt_simd, la_opt_abi_base, la_opt_abi_ext,
+-			 la_opt_cmodel);
++			 la_opt_cmodel, opts->x_la_isa_evolution,
++			 opts_set->x_la_isa_evolution);
+ 
+   /* Handle target-specific options: compute defaults/conflicts etc.  */
+   loongarch_config_target (&la_target, NULL, 0);
+@@ -7550,11 +7551,6 @@ loongarch_option_override_internal (struct gcc_options *opts,
+   if (loongarch_branch_cost == 0)
+     loongarch_branch_cost = loongarch_cost->branch_cost;
+ 
+-  /* If the user hasn't disabled a feature added during ISA evolution,
+-     use the processor's default.  */
+-  isa_evolution |= (la_target.isa.evolution &
+-		    ~global_options_set.x_isa_evolution);
+-
+   /* Enable sw prefetching at -O3 and higher.  */
+   if (opts->x_flag_prefetch_loop_arrays < 0
+       && (opts->x_optimize >= 3 || opts->x_flag_profile_use)
+@@ -7685,7 +7681,7 @@ loongarch_option_override_internal (struct gcc_options *opts,
+     }
+   if (loongarch_recip)
+     recip_mask |= RECIP_MASK_ALL;
+-  if (!TARGET_FRECIPE)
++  if (!ISA_HAS_FRECIPE)
+     recip_mask = RECIP_MASK_NONE;
+ }
+ 
+@@ -10875,11 +10871,11 @@ loongarch_asm_code_end (void)
+ 	       loongarch_cpu_strings [la_target.cpu_tune]);
+       fprintf (asm_out_file, "%s Base ISA: %s\n", ASM_COMMENT_START,
+ 	       loongarch_isa_base_strings [la_target.isa.base]);
+-      DUMP_FEATURE (TARGET_FRECIPE);
+-      DUMP_FEATURE (TARGET_DIV32);
+-      DUMP_FEATURE (TARGET_LAM_BH);
+-      DUMP_FEATURE (TARGET_LAMCAS);
+-      DUMP_FEATURE (TARGET_LD_SEQ_SA);
++      DUMP_FEATURE (ISA_HAS_FRECIPE);
++      DUMP_FEATURE (ISA_HAS_DIV32);
++      DUMP_FEATURE (ISA_HAS_LAM_BH);
++      DUMP_FEATURE (ISA_HAS_LAMCAS);
++      DUMP_FEATURE (ISA_HAS_LD_SEQ_SA);
+     }
+ 
+   fputs ("\n\n", asm_out_file);
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 02c537d4c..23653a2b0 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -425,7 +425,7 @@
+ 
+ ;; A mode for anything legal as a input of a div or mod instruction.
+ (define_mode_iterator DIV [(DI "TARGET_64BIT")
+-			   (SI "!TARGET_64BIT || TARGET_DIV32")])
++			   (SI "!TARGET_64BIT || ISA_HAS_DIV32")])
+ 
+ ;; In GPR templates, a string like "mul." will expand to "mul.w" in the
+ ;; 32-bit version and "mul.d" in the 64-bit version.
+@@ -941,7 +941,7 @@
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+     (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")]
+ 	     UNSPEC_RECIPE))]
+-  "TARGET_FRECIPE"
++  "ISA_HAS_FRECIPE"
+   "frecipe.\t%0,%1"
+   [(set_attr "type" "frecipe")
+    (set_attr "mode" "")
+@@ -954,7 +954,7 @@
+ 		     (match_operand:GPR 2 "register_operand")))]
+   ""
+ {
+- if (GET_MODE (operands[0]) == SImode && TARGET_64BIT && !TARGET_DIV32)
++ if (GET_MODE (operands[0]) == SImode && TARGET_64BIT && !ISA_HAS_DIV32)
+   {
+     rtx reg1 = gen_reg_rtx (DImode);
+     rtx reg2 = gen_reg_rtx (DImode);
+@@ -994,7 +994,7 @@
+ 	(sign_extend
+ 	  (any_div:SI (match_operand:SI 1 "register_operand" "r,r,0")
+ 		      (match_operand:SI 2 "register_operand" "r,r,r"))))]
+-  "TARGET_64BIT && TARGET_DIV32"
++  "TARGET_64BIT && ISA_HAS_DIV32"
+ {
+   return loongarch_output_division (".w\t%0,%1,%2", operands);
+ }
+@@ -1014,7 +1014,7 @@
+ 	     (any_div:DI (match_operand:DI 1 "register_operand" "r,r,0")
+ 			 (match_operand:DI 2 "register_operand" "r,r,r")) 0)]
+ 	  UNSPEC_FAKE_ANY_DIV)))]
+-  "TARGET_64BIT && !TARGET_DIV32"
++  "TARGET_64BIT && !ISA_HAS_DIV32"
+ {
+   return loongarch_output_division (".w\t%0,%1,%2", operands);
+ }
+@@ -1197,7 +1197,7 @@
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+     (unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")]
+ 		 UNSPEC_RSQRTE))]
+-  "TARGET_FRECIPE"
++  "ISA_HAS_FRECIPE"
+   "frsqrte.\t%0,%1"
+   [(set_attr "type" "frsqrte")
+    (set_attr "mode" "")])
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 56f6a9564..b5a46df4e 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -267,26 +267,26 @@ default value is 4.
+ ; Features added during ISA evolution.  This concept is different from ISA
+ ; extension, read Section 1.5 of LoongArch v1.10 Volume 1 for the
+ ; explanation.  These features may be implemented and enumerated with
+-; CPUCFG independantly, so we use bit flags to specify them.
+-Variable
+-HOST_WIDE_INT isa_evolution = 0
++; CPUCFG independently, so we use bit flags to specify them.
++TargetVariable
++HOST_WIDE_INT la_isa_evolution = 0
+ 
+ mfrecipe
+-Target Mask(ISA_FRECIPE) Var(isa_evolution)
++Target Mask(ISA_FRECIPE) Var(la_isa_evolution)
+ Support frecipe.{s/d} and frsqrte.{s/d} instructions.
+ 
+ mdiv32
+-Target Mask(ISA_DIV32) Var(isa_evolution)
++Target Mask(ISA_DIV32) Var(la_isa_evolution)
+ Support div.w[u] and mod.w[u] instructions with inputs not sign-extended.
+ 
+ mlam-bh
+-Target Mask(ISA_LAM_BH) Var(isa_evolution)
++Target Mask(ISA_LAM_BH) Var(la_isa_evolution)
+ Support am{swap/add}[_db].{b/h} instructions.
+ 
+ mlamcas
+-Target Mask(ISA_LAMCAS) Var(isa_evolution)
++Target Mask(ISA_LAMCAS) Var(la_isa_evolution)
+ Support amcas[_db].{b/h/w/d} instructions.
+ 
+ mld-seq-sa
+-Target Mask(ISA_LD_SEQ_SA) Var(isa_evolution)
++Target Mask(ISA_LD_SEQ_SA) Var(la_isa_evolution)
+ Do not need load-load barriers (dbar 0x700).
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 02e89247b..612377436 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -1479,7 +1479,7 @@
+   [(set (match_operand:FLSX 0 "register_operand" "=f")
+     (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
+ 		 UNSPEC_LSX_VFRECIPE))]
+-  "ISA_HAS_LSX && TARGET_FRECIPE"
++  "ISA_HAS_LSX && ISA_HAS_FRECIPE"
+   "vfrecipe.\t%w0,%w1"
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+@@ -1512,7 +1512,7 @@
+   [(set (match_operand:FLSX 0 "register_operand" "=f")
+     (unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
+ 		 UNSPEC_LSX_VFRSQRTE))]
+-  "ISA_HAS_LSX && TARGET_FRECIPE"
++  "ISA_HAS_LSX && ISA_HAS_FRECIPE"
+   "vfrsqrte.\t%w0,%w1"
+   [(set_attr "type" "simd_fdiv")
+    (set_attr "mode" "")])
+diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md
+index a678e7131..5da5c2780 100644
+--- a/gcc/config/loongarch/sync.md
++++ b/gcc/config/loongarch/sync.md
+@@ -124,9 +124,9 @@
+       return "ld.\t%0,%1\\n\\t"
+ 	     "dbar\t0x14";
+     case MEMMODEL_RELAXED:
+-      return TARGET_LD_SEQ_SA ? "ld.\t%0,%1"
+-			      : "ld.\t%0,%1\\n\\t"
+-				"dbar\t0x700";
++      return ISA_HAS_LD_SEQ_SA ? "ld.\t%0,%1"
++			       : "ld.\t%0,%1\\n\\t"
++				 "dbar\t0x700";
+ 
+     default:
+       /* The valid memory order variants are __ATOMIC_RELAXED, __ATOMIC_SEQ_CST,
+@@ -193,7 +193,7 @@
+ 		       (match_operand:SHORT 1 "reg_or_0_operand" "rJ"))
+ 	   (match_operand:SI 2 "const_int_operand")] ;; model
+ 	 UNSPEC_SYNC_OLD_OP))]
+-  "TARGET_LAM_BH"
++  "ISA_HAS_LAM_BH"
+   "amadd%A2.\t$zero,%z1,%0"
+   [(set (attr "length") (const_int 4))])
+ 
+@@ -230,7 +230,7 @@
+ 	  UNSPEC_SYNC_EXCHANGE))
+    (set (match_dup 1)
+ 	(match_operand:SHORT 2 "register_operand" "r"))]
+-  "TARGET_LAM_BH"
++  "ISA_HAS_LAM_BH"
+   "amswap%A3.\t%0,%z2,%1"
+   [(set (attr "length") (const_int 4))])
+ 
+@@ -266,7 +266,7 @@
+ 			       (match_operand:QHWD 3 "reg_or_0_operand" "rJ")
+ 			       (match_operand:SI 4 "const_int_operand")]  ;; mod_s
+ 	 UNSPEC_COMPARE_AND_SWAP))]
+-  "TARGET_LAMCAS"
++  "ISA_HAS_LAMCAS"
+   "ori\t%0,%z2,0\n\tamcas%A4.\t%0,%z3,%1"
+   [(set (attr "length") (const_int 8))])
+ 
+@@ -296,7 +296,7 @@
+ 
+   operands[6] = mod_s;
+ 
+-  if (TARGET_LAMCAS)
++  if (ISA_HAS_LAMCAS)
+     emit_insn (gen_atomic_cas_value_strong_amcas (operands[1], operands[2],
+ 							 operands[3], operands[4],
+ 							 operands[6]));
+@@ -422,7 +422,7 @@
+ 
+   operands[6] = mod_s;
+ 
+-  if (TARGET_LAMCAS)
++  if (ISA_HAS_LAMCAS)
+     emit_insn (gen_atomic_cas_value_strong_amcas (operands[1], operands[2],
+ 						       operands[3], operands[4],
+ 						       operands[6]));
+@@ -642,7 +642,7 @@
+ 	(match_operand:SHORT 2 "register_operand"))]
+   ""
+ {
+-  if (TARGET_LAM_BH)
++  if (ISA_HAS_LAM_BH)
+     emit_insn (gen_atomic_exchange_short (operands[0], operands[1], operands[2], operands[3]));
+   else
+     {
+@@ -663,7 +663,7 @@
+ 		     (match_operand:SHORT 2 "reg_or_0_operand" "rJ"))
+ 	   (match_operand:SI 3 "const_int_operand")] ;; model
+ 	 UNSPEC_SYNC_OLD_OP))]
+-  "TARGET_LAM_BH"
++  "ISA_HAS_LAM_BH"
+   "amadd%A3.\t%0,%z2,%1"
+   [(set (attr "length") (const_int 4))])
+ 
+@@ -678,7 +678,7 @@
+ 	 UNSPEC_SYNC_OLD_OP))]
+   ""
+ {
+-  if (TARGET_LAM_BH)
++  if (ISA_HAS_LAM_BH)
+     emit_insn (gen_atomic_fetch_add_short (operands[0], operands[1],
+ 					     operands[2], operands[3]));
+   else
+-- 
+2.43.0
+
diff --git a/0103-SME-Remove-hip09-and-hip11-in-aarch64-cores.def-to-b.patch b/0103-SME-Remove-hip09-and-hip11-in-aarch64-cores.def-to-b.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5589f82dbb1c1b19cf8b1a12816e2f7e0e2b5cf3
--- /dev/null
+++ b/0103-SME-Remove-hip09-and-hip11-in-aarch64-cores.def-to-b.patch
@@ -0,0 +1,34 @@
+From 72c48ade495ef99ef032a6c44365eb102b74888e Mon Sep 17 00:00:00 2001
+From: xiezhiheng 
+Date: Fri, 23 Aug 2024 15:14:04 +0800
+Subject: [PATCH 004/157] [SME] Remove hip09 and hip11 in aarch64-cores.def to
+ backport SME
+
+Will apply it in the end.
+---
+ gcc/config/aarch64/aarch64-cores.def | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
+index 601b72abb..70b11eb80 100644
+--- a/gcc/config/aarch64/aarch64-cores.def
++++ b/gcc/config/aarch64/aarch64-cores.def
+@@ -130,7 +130,6 @@ AARCH64_CORE("a64fx", a64fx, a64fx, 8_2A,  AARCH64_FL_FOR_ARCH8_2 | AARCH64_FL_F
+ 
+ /* HiSilicon ('H') cores. */
+ AARCH64_CORE("tsv110",  tsv110, tsv110, 8_2A,  AARCH64_FL_FOR_ARCH8_2 | AARCH64_FL_CRYPTO | AARCH64_FL_F16 | AARCH64_FL_AES | AARCH64_FL_SHA2, tsv110,   0x48, 0xd01, -1)
+-AARCH64_CORE("hip09", hip09, hip09, 8_5A, AARCH64_FL_FOR_ARCH8_5 | AARCH64_FL_SVE | AARCH64_FL_I8MM | AARCH64_FL_F32MM | AARCH64_FL_F64MM | AARCH64_FL_PROFILE | AARCH64_FL_PREDRES, hip09, 0x48, 0xd02, 0x0)
+ 
+ /* ARMv8.3-A Architecture Processors.  */
+ 
+@@ -173,7 +172,6 @@ AARCH64_CORE("cortex-a710",  cortexa710, cortexa57, 9A,  AARCH64_FL_FOR_ARCH9 |
+ AARCH64_CORE("cortex-x2",  cortexx2, cortexa57, 9A,  AARCH64_FL_FOR_ARCH9 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_MEMTAG | AARCH64_FL_I8MM | AARCH64_FL_BF16, neoversen2, 0x41, 0xd48, -1)
+ 
+ AARCH64_CORE("neoverse-n2", neoversen2, cortexa57, 9A, AARCH64_FL_FOR_ARCH9 | AARCH64_FL_I8MM | AARCH64_FL_BF16 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_RNG | AARCH64_FL_MEMTAG | AARCH64_FL_PROFILE, neoversen2, 0x41, 0xd49, -1)
+-AARCH64_CORE("hip11", hip11, hip11, 8_5A, AARCH64_FL_FOR_ARCH8_5| AARCH64_FL_SVE | AARCH64_FL_SVE2 | AARCH64_FL_F16, hip11, 0x48, 0xd22, -1)
+ 
+ AARCH64_CORE("demeter", demeter, cortexa57, 9A, AARCH64_FL_FOR_ARCH9 | AARCH64_FL_I8MM | AARCH64_FL_BF16 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_RNG | AARCH64_FL_MEMTAG | AARCH64_FL_PROFILE, neoversev2, 0x41, 0xd4f, -1)
+ AARCH64_CORE("neoverse-v2", neoversev2, cortexa57, 9A, AARCH64_FL_FOR_ARCH9 | AARCH64_FL_I8MM | AARCH64_FL_BF16 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_RNG | AARCH64_FL_MEMTAG | AARCH64_FL_PROFILE, neoversev2, 0x41, 0xd4f, -1)
+-- 
+2.33.0
+
diff --git a/SME-0001-AArch64-Cleanup-CPU-option-processing-code.patch b/0104-Backport-SME-AArch64-Cleanup-CPU-option-processing-c.patch
similarity index 93%
rename from SME-0001-AArch64-Cleanup-CPU-option-processing-code.patch
rename to 0104-Backport-SME-AArch64-Cleanup-CPU-option-processing-c.patch
index d73a5ec1e733352ba363c9430175d64331c743c2..1f506ac47df7208e15ecca5a465259507a376c56 100644
--- a/SME-0001-AArch64-Cleanup-CPU-option-processing-code.patch
+++ b/0104-Backport-SME-AArch64-Cleanup-CPU-option-processing-c.patch
@@ -1,7 +1,8 @@
-From 5432c32dbee52fc289cf17f57e3bbd36d2e1db07 Mon Sep 17 00:00:00 2001
+From 9a36ca4e9188ee402327ec908d4f6860f2ee67eb Mon Sep 17 00:00:00 2001
 From: Wilco Dijkstra 
 Date: Wed, 18 May 2022 16:02:12 +0100
-Subject: [PATCH 001/144] AArch64: Cleanup CPU option processing code
+Subject: [PATCH 005/157] [Backport][SME] AArch64: Cleanup CPU option
+ processing code
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=1be715f31605976d8e4336973d3b81c5b7cea79f
 
@@ -34,10 +35,10 @@ gcc/
  3 files changed, 32 insertions(+), 135 deletions(-)
 
 diff --git a/gcc/config.gcc b/gcc/config.gcc
-index 5c378c698..0d43b3f19 100644
+index 8fdde1576..3be450471 100644
 --- a/gcc/config.gcc
 +++ b/gcc/config.gcc
-@@ -4188,8 +4188,6 @@ case "${target}" in
+@@ -4190,8 +4190,6 @@ case "${target}" in
  			  pattern=AARCH64_CORE
  			fi
  
@@ -46,7 +47,7 @@ index 5c378c698..0d43b3f19 100644
  			# Find the base CPU or ARCH id in aarch64-cores.def or
  			# aarch64-arches.def
  			if [ x"$base_val" = x ] \
-@@ -4197,23 +4195,6 @@ case "${target}" in
+@@ -4199,23 +4197,6 @@ case "${target}" in
  				    ${srcdir}/config/aarch64/$def \
  				    > /dev/null; then
  
@@ -70,7 +71,7 @@ index 5c378c698..0d43b3f19 100644
  			  # Disallow extensions in --with-tune=cortex-a53+crc.
  			  if [ $which = tune ] && [ x"$ext_val" != x ]; then
  			    echo "Architecture extensions not supported in --with-$which=$val" 1>&2
-@@ -4244,25 +4225,7 @@ case "${target}" in
+@@ -4246,25 +4227,7 @@ case "${target}" in
  					grep "^\"$base_ext\""`
  
  				if [ x"$base_ext" = x ] \
@@ -97,7 +98,7 @@ index 5c378c698..0d43b3f19 100644
  				  true
  				else
  				  echo "Unknown extension used in --with-$which=$val" 1>&2
-@@ -4271,10 +4234,6 @@ case "${target}" in
+@@ -4273,10 +4236,6 @@ case "${target}" in
  				ext_val=`echo $ext_val | sed -e 's/[a-z0-9]\+//'`
  			  done
  
@@ -109,10 +110,10 @@ index 5c378c698..0d43b3f19 100644
  			else
  			  # Allow --with-$which=native.
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 226dc9dff..f43da0661 100644
+index 7c62ddb2a..ba888beb0 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -2797,8 +2797,6 @@ static const struct attribute_spec aarch64_attribute_table[] =
+@@ -3014,8 +3014,6 @@ static const struct attribute_spec aarch64_attribute_table[] =
    { NULL,                 0, 0, false, false, false, false, NULL, NULL }
  };
  
@@ -121,7 +122,7 @@ index 226dc9dff..f43da0661 100644
  /* An ISA extension in the co-processor and main instruction set space.  */
  struct aarch64_option_extension
  {
-@@ -18179,39 +18177,24 @@ aarch64_validate_mtune (const char *str, const struct processor **res)
+@@ -18411,39 +18409,24 @@ aarch64_validate_mtune (const char *str, const struct processor **res)
    return false;
  }
  
@@ -167,7 +168,7 @@ index 226dc9dff..f43da0661 100644
  }
  
  /* Return the VG value associated with -msve-vector-bits= value VALUE.  */
-@@ -18249,10 +18232,6 @@ aarch64_override_options (void)
+@@ -18481,10 +18464,6 @@ aarch64_override_options (void)
    uint64_t arch_isa = 0;
    aarch64_isa_flags = 0;
  
@@ -178,7 +179,7 @@ index 226dc9dff..f43da0661 100644
    selected_cpu = NULL;
    selected_arch = NULL;
    selected_tune = NULL;
-@@ -18267,77 +18246,56 @@ aarch64_override_options (void)
+@@ -18499,77 +18478,56 @@ aarch64_override_options (void)
       If either of -march or -mtune is given, they override their
       respective component of -mcpu.  */
    if (aarch64_cpu_string)
@@ -279,7 +280,7 @@ index 226dc9dff..f43da0661 100644
  
    if (aarch64_enable_bti == 2)
      {
-@@ -18373,15 +18331,6 @@ aarch64_override_options (void)
+@@ -18605,15 +18563,6 @@ aarch64_override_options (void)
    if (aarch64_ra_sign_scope != AARCH64_FUNCTION_NONE && TARGET_ILP32)
      sorry ("return address signing is only supported for %<-mabi=lp64%>");
  
@@ -295,7 +296,7 @@ index 226dc9dff..f43da0661 100644
    /* The pass to insert speculation tracking runs before
       shrink-wrapping and the latter does not know how to update the
       tracking status.  So disable it in this case.  */
-@@ -18487,11 +18436,7 @@ aarch64_option_restore (struct gcc_options *opts,
+@@ -18719,11 +18668,7 @@ aarch64_option_restore (struct gcc_options *opts,
    opts->x_explicit_arch = ptr->x_explicit_arch;
    selected_arch = aarch64_get_arch (ptr->x_explicit_arch);
    opts->x_explicit_tune_core = ptr->x_explicit_tune_core;
@@ -331,5 +332,5 @@ index 6834c3e99..14e2af054 100644
  
  /* If inserting NOP before a mult-accumulate insn remember to adjust the
 -- 
-2.19.1
+2.33.0
 
diff --git a/0104-LoongArch-Rename-ISA_BASE_LA64V100-to-ISA_BASE_LA64.patch b/0104-LoongArch-Rename-ISA_BASE_LA64V100-to-ISA_BASE_LA64.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d739b013214ef673939782c35b2ca2d88bee5392
--- /dev/null
+++ b/0104-LoongArch-Rename-ISA_BASE_LA64V100-to-ISA_BASE_LA64.patch
@@ -0,0 +1,220 @@
+From 282b0847a86fab49fb3582371647fa4cb2d941ed Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Mon, 8 Jan 2024 09:14:08 +0800
+Subject: [PATCH 104/188] LoongArch: Rename ISA_BASE_LA64V100 to ISA_BASE_LA64
+
+LoongArch ISA manual v1.10 suggests that software should not depend on
+the ISA version number for marking processor features.  The ISA version
+number is now defined as a collective name of individual ISA evolutions.
+Since there is a independent ISA evolution mask now, we can drop the
+version information from the base ISA.
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/loongarch-strings: Rename.
+	* config/loongarch/genopts/loongarch.opt.in: Same.
+	* config/loongarch/loongarch-cpu.cc: Same.
+	* config/loongarch/loongarch-def.cc: Same.
+	* config/loongarch/loongarch-def.h: Same.
+	* config/loongarch/loongarch-opts.cc: Same.
+	* config/loongarch/loongarch-opts.h: Same.
+	* config/loongarch/loongarch-str.h: Same.
+	* config/loongarch/loongarch.opt: Same.
+---
+ gcc/config/loongarch/genopts/loongarch-strings |  2 +-
+ gcc/config/loongarch/genopts/loongarch.opt.in  |  2 +-
+ gcc/config/loongarch/loongarch-cpu.cc          |  2 +-
+ gcc/config/loongarch/loongarch-def.cc          | 14 +++++++-------
+ gcc/config/loongarch/loongarch-def.h           |  6 +++---
+ gcc/config/loongarch/loongarch-opts.cc         | 10 +++++-----
+ gcc/config/loongarch/loongarch-opts.h          |  2 +-
+ gcc/config/loongarch/loongarch-str.h           |  2 +-
+ gcc/config/loongarch/loongarch.opt             |  2 +-
+ 9 files changed, 21 insertions(+), 21 deletions(-)
+
+diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings
+index 411ad5696..ce70b8b9c 100644
+--- a/gcc/config/loongarch/genopts/loongarch-strings
++++ b/gcc/config/loongarch/genopts/loongarch-strings
+@@ -29,7 +29,7 @@ STR_CPU_LA464	      la464
+ STR_CPU_LA664	      la664
+ 
+ # Base architecture
+-STR_ISA_BASE_LA64V100 la64
++STR_ISA_BASE_LA64 la64
+ 
+ # -mfpu
+ OPTSTR_ISA_EXT_FPU    fpu
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index a866dab84..851d8d1f3 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -33,7 +33,7 @@ Name(isa_base) Type(int)
+ Basic ISAs of LoongArch:
+ 
+ EnumValue
+-Enum(isa_base) String(@@STR_ISA_BASE_LA64V100@@) Value(ISA_BASE_LA64V100)
++Enum(isa_base) String(@@STR_ISA_BASE_LA64@@) Value(ISA_BASE_LA64)
+ 
+ ;; ISA extensions / adjustments
+ Enum
+diff --git a/gcc/config/loongarch/loongarch-cpu.cc b/gcc/config/loongarch/loongarch-cpu.cc
+index 7e0625835..551d4f72c 100644
+--- a/gcc/config/loongarch/loongarch-cpu.cc
++++ b/gcc/config/loongarch/loongarch-cpu.cc
+@@ -133,7 +133,7 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+ 	switch (cpucfg_cache[1] & 0x3)
+ 	  {
+ 	    case 0x02:
+-	      tmp = ISA_BASE_LA64V100;
++	      tmp = ISA_BASE_LA64;
+ 	      break;
+ 
+ 	    default:
+diff --git a/gcc/config/loongarch/loongarch-def.cc b/gcc/config/loongarch/loongarch-def.cc
+index 843be78e4..533dd0af2 100644
+--- a/gcc/config/loongarch/loongarch-def.cc
++++ b/gcc/config/loongarch/loongarch-def.cc
+@@ -48,16 +48,16 @@ array_arch loongarch_cpu_default_isa =
+   array_arch ()
+     .set (CPU_LOONGARCH64,
+ 	  loongarch_isa ()
+-	    .base_ (ISA_BASE_LA64V100)
++	    .base_ (ISA_BASE_LA64)
+ 	    .fpu_ (ISA_EXT_FPU64))
+     .set (CPU_LA464,
+ 	  loongarch_isa ()
+-	    .base_ (ISA_BASE_LA64V100)
++	    .base_ (ISA_BASE_LA64)
+ 	    .fpu_ (ISA_EXT_FPU64)
+ 	    .simd_ (ISA_EXT_SIMD_LASX))
+     .set (CPU_LA664,
+ 	  loongarch_isa ()
+-	    .base_ (ISA_BASE_LA64V100)
++	    .base_ (ISA_BASE_LA64)
+ 	    .fpu_ (ISA_EXT_FPU64)
+ 	    .simd_ (ISA_EXT_SIMD_LASX)
+ 	    .evolution_ (OPTION_MASK_ISA_DIV32 | OPTION_MASK_ISA_LD_SEQ_SA
+@@ -153,7 +153,7 @@ array_tune loongarch_cpu_multipass_dfa_lookahead = array_tune ()
+ 
+ array loongarch_isa_base_strings =
+   array ()
+-    .set (ISA_BASE_LA64V100, STR_ISA_BASE_LA64V100);
++    .set (ISA_BASE_LA64, STR_ISA_BASE_LA64);
+ 
+ array loongarch_isa_ext_strings =
+   array ()
+@@ -189,15 +189,15 @@ array, N_ABI_BASE_TYPES>
+ 	  array ()
+ 	    .set (ABI_EXT_BASE,
+ 		  loongarch_isa ()
+-		    .base_ (ISA_BASE_LA64V100)
++		    .base_ (ISA_BASE_LA64)
+ 		    .fpu_ (ISA_EXT_FPU64)))
+     .set (ABI_BASE_LP64F,
+ 	  array ()
+ 	    .set (ABI_EXT_BASE,
+ 		  loongarch_isa ()
+-		    .base_ (ISA_BASE_LA64V100)
++		    .base_ (ISA_BASE_LA64)
+ 		    .fpu_ (ISA_EXT_FPU32)))
+     .set (ABI_BASE_LP64S,
+ 	  array ()
+ 	    .set (ABI_EXT_BASE,
+-		  loongarch_isa ().base_ (ISA_BASE_LA64V100)));
++		  loongarch_isa ().base_ (ISA_BASE_LA64)));
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index 9e5eee0e2..a133ea265 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -55,9 +55,9 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ /* enum isa_base */
+ 
+-/* LoongArch V1.00.  */
+-#define ISA_BASE_LA64V100	0
+-#define N_ISA_BASE_TYPES	1
++/* LoongArch64 */
++#define ISA_BASE_LA64	      0
++#define N_ISA_BASE_TYPES      1
+ extern loongarch_def_array
+   loongarch_isa_base_strings;
+ 
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index 935d09f45..cf4c7bc93 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -567,17 +567,17 @@ isa_default_abi (const struct loongarch_isa *isa)
+   switch (isa->fpu)
+     {
+       case ISA_EXT_FPU64:
+-	if (isa->base >= ISA_BASE_LA64V100)
++	if (isa->base >= ISA_BASE_LA64)
+ 	  abi.base = ABI_BASE_LP64D;
+ 	break;
+ 
+       case ISA_EXT_FPU32:
+-	if (isa->base >= ISA_BASE_LA64V100)
++	if (isa->base >= ISA_BASE_LA64)
+ 	  abi.base = ABI_BASE_LP64F;
+ 	break;
+ 
+       case ISA_EXT_NONE:
+-	if (isa->base >= ISA_BASE_LA64V100)
++	if (isa->base >= ISA_BASE_LA64)
+ 	  abi.base = ABI_BASE_LP64S;
+ 	break;
+ 
+@@ -596,8 +596,8 @@ isa_base_compat_p (const struct loongarch_isa *set1,
+ {
+   switch (set2->base)
+     {
+-      case ISA_BASE_LA64V100:
+-	return (set1->base >= ISA_BASE_LA64V100);
++      case ISA_BASE_LA64:
++	return (set1->base >= ISA_BASE_LA64);
+ 
+       default:
+ 	gcc_unreachable ();
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index 204338553..463812136 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -79,7 +79,7 @@ struct loongarch_flags {
+ #define TARGET_DOUBLE_FLOAT	  (la_target.isa.fpu == ISA_EXT_FPU64)
+ #define TARGET_DOUBLE_FLOAT_ABI	  (la_target.abi.base == ABI_BASE_LP64D)
+ 
+-#define TARGET_64BIT		  (la_target.isa.base == ISA_BASE_LA64V100)
++#define TARGET_64BIT		  (la_target.isa.base == ISA_BASE_LA64)
+ #define TARGET_ABI_LP64		  (la_target.abi.base == ABI_BASE_LP64D	\
+ 				   || la_target.abi.base == ABI_BASE_LP64F \
+ 				   || la_target.abi.base == ABI_BASE_LP64S)
+diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h
+index a8821acb0..2251df38b 100644
+--- a/gcc/config/loongarch/loongarch-str.h
++++ b/gcc/config/loongarch/loongarch-str.h
+@@ -32,7 +32,7 @@ along with GCC; see the file COPYING3.  If not see
+ #define STR_CPU_LA464 "la464"
+ #define STR_CPU_LA664 "la664"
+ 
+-#define STR_ISA_BASE_LA64V100 "la64"
++#define STR_ISA_BASE_LA64 "la64"
+ 
+ #define OPTSTR_ISA_EXT_FPU "fpu"
+ #define STR_NONE "none"
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index b5a46df4e..df7314973 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -41,7 +41,7 @@ Name(isa_base) Type(int)
+ Basic ISAs of LoongArch:
+ 
+ EnumValue
+-Enum(isa_base) String(la64) Value(ISA_BASE_LA64V100)
++Enum(isa_base) String(la64) Value(ISA_BASE_LA64)
+ 
+ ;; ISA extensions / adjustments
+ Enum
+-- 
+2.43.0
+
diff --git a/SME-0002-AArch64-Cleanup-option-processing-code.patch b/0105-Backport-SME-AArch64-Cleanup-option-processing-code.patch
similarity index 93%
rename from SME-0002-AArch64-Cleanup-option-processing-code.patch
rename to 0105-Backport-SME-AArch64-Cleanup-option-processing-code.patch
index 30095fe9b9b168407cf6d1125e2c212533b10bc4..c945c81578d600d15f0315eed6b11659c8fe5878 100644
--- a/SME-0002-AArch64-Cleanup-option-processing-code.patch
+++ b/0105-Backport-SME-AArch64-Cleanup-option-processing-code.patch
@@ -1,7 +1,8 @@
-From 5b1e1c6e7fbe35aa3196531f10ae1cd774a90bef Mon Sep 17 00:00:00 2001
+From ba32885874fc6caa90f6ae5e264bc3d51f64a26e Mon Sep 17 00:00:00 2001
 From: Wilco Dijkstra 
 Date: Wed, 1 Jun 2022 16:46:36 +0100
-Subject: [PATCH 002/144] AArch64: Cleanup option processing code
+Subject: [PATCH 006/157] [Backport][SME] AArch64: Cleanup option processing
+ code
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=ae54c1b09963779c5c3914782324ff48af32e2f1
 
@@ -93,7 +94,7 @@ index 475d174dd..e60ce3c36 100644
  
  /* The available SVE predicate patterns, known in the ACLE as "svpattern".  */
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index f43da0661..33289c2a1 100644
+index ba888beb0..254ecfaa2 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
 @@ -306,9 +306,6 @@ static bool aarch64_print_address_internal (FILE*, machine_mode, rtx,
@@ -106,7 +107,7 @@ index f43da0661..33289c2a1 100644
  /* The processor for which instructions should be scheduled.  */
  enum aarch64_processor aarch64_tune = cortexa53;
  
-@@ -2714,7 +2711,6 @@ struct processor
+@@ -2931,7 +2928,6 @@ struct processor
    enum aarch64_processor ident;
    enum aarch64_processor sched_core;
    enum aarch64_arch arch;
@@ -114,7 +115,7 @@ index f43da0661..33289c2a1 100644
    const uint64_t flags;
    const struct tune_params *const tune;
  };
-@@ -2723,9 +2719,9 @@ struct processor
+@@ -2940,9 +2936,9 @@ struct processor
  static const struct processor all_architectures[] =
  {
  #define AARCH64_ARCH(NAME, CORE, ARCH_IDENT, ARCH_REV, FLAGS) \
@@ -126,7 +127,7 @@ index f43da0661..33289c2a1 100644
  };
  
  /* Processor cores implementing AArch64.  */
-@@ -2733,23 +2729,13 @@ static const struct processor all_cores[] =
+@@ -2950,23 +2946,13 @@ static const struct processor all_cores[] =
  {
  #define AARCH64_CORE(NAME, IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART, VARIANT) \
    {NAME, IDENT, SCHED, AARCH64_ARCH_##ARCH,				\
@@ -152,7 +153,7 @@ index f43da0661..33289c2a1 100644
  /* The current tuning set.  */
  struct tune_params aarch64_tune_params = generic_tunings;
  
-@@ -10416,8 +10402,8 @@ aarch64_case_values_threshold (void)
+@@ -10633,8 +10619,8 @@ aarch64_case_values_threshold (void)
    /* Use the specified limit for the number of cases before using jump
       tables at higher optimization levels.  */
    if (optimize > 2
@@ -163,7 +164,7 @@ index f43da0661..33289c2a1 100644
    else
      return optimize_size ? 8 : 11;
  }
-@@ -17537,6 +17523,26 @@ initialize_aarch64_tls_size (struct gcc_options *opts)
+@@ -17769,6 +17755,26 @@ initialize_aarch64_tls_size (struct gcc_options *opts)
    return;
  }
  
@@ -190,7 +191,7 @@ index f43da0661..33289c2a1 100644
  /* Parse STRING looking for options in the format:
       string	:: option:string
       option	:: name=substring
-@@ -17647,18 +17653,18 @@ aarch64_override_options_after_change_1 (struct gcc_options *opts)
+@@ -17879,18 +17885,18 @@ aarch64_override_options_after_change_1 (struct gcc_options *opts)
  void
  aarch64_override_options_internal (struct gcc_options *opts)
  {
@@ -215,7 +216,7 @@ index f43da0661..33289c2a1 100644
  
    /* This target defaults to strict volatile bitfields.  */
    if (opts->x_flag_strict_volatile_bitfields < 0 && abi_version_at_least (2))
-@@ -17819,13 +17825,6 @@ aarch64_override_options_internal (struct gcc_options *opts)
+@@ -18051,13 +18057,6 @@ aarch64_override_options_internal (struct gcc_options *opts)
        && opts->x_optimize >= aarch64_tune_params.prefetch->default_opt_level)
      opts->x_flag_prefetch_loop_arrays = 1;
  
@@ -229,7 +230,7 @@ index f43da0661..33289c2a1 100644
    aarch64_override_options_after_change_1 (opts);
  }
  
-@@ -18177,26 +18176,6 @@ aarch64_validate_mtune (const char *str, const struct processor **res)
+@@ -18409,26 +18408,6 @@ aarch64_validate_mtune (const char *str, const struct processor **res)
    return false;
  }
  
@@ -256,7 +257,7 @@ index f43da0661..33289c2a1 100644
  /* Return the VG value associated with -msve-vector-bits= value VALUE.  */
  
  static poly_uint16
-@@ -18232,9 +18211,9 @@ aarch64_override_options (void)
+@@ -18464,9 +18443,9 @@ aarch64_override_options (void)
    uint64_t arch_isa = 0;
    aarch64_isa_flags = 0;
  
@@ -269,7 +270,7 @@ index f43da0661..33289c2a1 100644
  
    if (aarch64_harden_sls_string)
      aarch64_validate_sls_mitigation (aarch64_harden_sls_string);
-@@ -18246,56 +18225,52 @@ aarch64_override_options (void)
+@@ -18478,56 +18457,52 @@ aarch64_override_options (void)
       If either of -march or -mtune is given, they override their
       respective component of -mcpu.  */
    if (aarch64_cpu_string)
@@ -341,7 +342,7 @@ index f43da0661..33289c2a1 100644
  
    if (aarch64_enable_bti == 2)
      {
-@@ -18414,38 +18389,14 @@ initialize_aarch64_code_model (struct gcc_options *opts)
+@@ -18646,38 +18621,14 @@ initialize_aarch64_code_model (struct gcc_options *opts)
      }
  }
  
@@ -382,7 +383,7 @@ index f43da0661..33289c2a1 100644
    aarch64_override_options_internal (opts);
  }
  
-@@ -18455,11 +18406,11 @@ static void
+@@ -18687,11 +18638,11 @@ static void
  aarch64_option_print (FILE *file, int indent, struct cl_target_option *ptr)
  {
    const struct processor *cpu
@@ -398,7 +399,7 @@ index f43da0661..33289c2a1 100644
  
    fprintf (file, "%*sselected tune = %s\n", indent, "", cpu->name);
    fprintf (file, "%*sselected arch = %s%s\n", indent, "",
-@@ -18572,8 +18523,7 @@ aarch64_handle_attr_arch (const char *str)
+@@ -18804,8 +18755,7 @@ aarch64_handle_attr_arch (const char *str)
    if (parse_res == AARCH64_PARSE_OK)
      {
        gcc_assert (tmp_arch);
@@ -408,7 +409,7 @@ index f43da0661..33289c2a1 100644
        return true;
      }
  
-@@ -18611,11 +18561,8 @@ aarch64_handle_attr_cpu (const char *str)
+@@ -18843,11 +18793,8 @@ aarch64_handle_attr_cpu (const char *str)
    if (parse_res == AARCH64_PARSE_OK)
      {
        gcc_assert (tmp_cpu);
@@ -422,7 +423,7 @@ index f43da0661..33289c2a1 100644
        return true;
      }
  
-@@ -18683,8 +18630,7 @@ aarch64_handle_attr_tune (const char *str)
+@@ -18915,8 +18862,7 @@ aarch64_handle_attr_tune (const char *str)
    if (parse_res == AARCH64_PARSE_OK)
      {
        gcc_assert (tmp_tune);
@@ -432,7 +433,7 @@ index f43da0661..33289c2a1 100644
        return true;
      }
  
-@@ -22589,7 +22535,7 @@ aarch64_declare_function_name (FILE *stream, const char* name,
+@@ -22821,7 +22767,7 @@ aarch64_declare_function_name (FILE *stream, const char* name,
    gcc_assert (targ_options);
  
    const struct processor *this_arch
@@ -441,7 +442,7 @@ index f43da0661..33289c2a1 100644
  
    uint64_t isa_flags = targ_options->x_aarch64_isa_flags;
    std::string extension
-@@ -22608,7 +22554,7 @@ aarch64_declare_function_name (FILE *stream, const char* name,
+@@ -22840,7 +22786,7 @@ aarch64_declare_function_name (FILE *stream, const char* name,
       useful to readers of the generated asm.  Do it only when it changes
       from function to function and verbose assembly is requested.  */
    const struct processor *this_tune
@@ -450,7 +451,7 @@ index f43da0661..33289c2a1 100644
  
    if (flag_debug_asm && aarch64_last_printed_tune_string != this_tune->name)
      {
-@@ -22720,7 +22666,7 @@ aarch64_start_file (void)
+@@ -22952,7 +22898,7 @@ aarch64_start_file (void)
      = TREE_TARGET_OPTION (target_option_default_node);
  
    const struct processor *default_arch
@@ -459,7 +460,7 @@ index f43da0661..33289c2a1 100644
    uint64_t default_isa_flags = default_options->x_aarch64_isa_flags;
    std::string extension
      = aarch64_get_extension_string_for_isa_flags (default_isa_flags,
-@@ -27627,9 +27573,6 @@ aarch64_libgcc_floating_mode_supported_p
+@@ -27950,9 +27896,6 @@ aarch64_libgcc_floating_mode_supported_p
  #undef TARGET_OFFLOAD_OPTIONS
  #define TARGET_OFFLOAD_OPTIONS aarch64_offload_options
  
@@ -484,7 +485,7 @@ index 14e2af054..7d73689e4 100644
  
  /* Bit values used to identify processor capabilities.  */
 diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
-index 92220b26e..d8e1f42a3 100644
+index 101664c7c..836a3c784 100644
 --- a/gcc/config/aarch64/aarch64.opt
 +++ b/gcc/config/aarch64/aarch64.opt
 @@ -22,13 +22,10 @@ HeaderInclude
@@ -523,5 +524,5 @@ index 92220b26e..d8e1f42a3 100644
  
  Enum
 -- 
-2.19.1
+2.33.0
 
diff --git a/0105-LoongArch-Use-enums-for-constants.patch b/0105-LoongArch-Use-enums-for-constants.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9ce3b97b55116d1aaccd87078e1160e6c5e14269
--- /dev/null
+++ b/0105-LoongArch-Use-enums-for-constants.patch
@@ -0,0 +1,181 @@
+From 907b35525c8abcdfe22152ebce6640dbe3905cce Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Mon, 8 Jan 2024 09:14:09 +0800
+Subject: [PATCH 105/188] LoongArch: Use enums for constants
+
+Target features constants from loongarch-def.h are currently defined as macros.
+Switch to enums for better look in the debugger.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-def.h: Define constants with
+	enums instead of Macros.
+---
+ gcc/config/loongarch/loongarch-def.h | 115 ++++++++++++++++-----------
+ 1 file changed, 67 insertions(+), 48 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index a133ea265..28da3ae5f 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -23,12 +23,10 @@ along with GCC; see the file COPYING3.  If not see
+     - ISA extensions		(isa_ext),
+     - base ABI types		(abi_base),
+     - ABI extension types	(abi_ext).
+-
+-    - code models		      (cmodel)
+-    - other command-line switches     (switch)
++    - code models		(cmodel)
+ 
+    These values are primarily used for implementing option handling
+-   logic in "loongarch.opt", "loongarch-driver.c" and "loongarch-opt.c".
++   logic in "loongarch.opt", "loongarch-driver.cc" and "loongarch-opt.cc".
+ 
+    As for the result of this option handling process, the following
+    scheme is adopted to represent the final configuration:
+@@ -53,30 +51,40 @@ along with GCC; see the file COPYING3.  If not see
+ #include "loongarch-def-array.h"
+ #include "loongarch-tune.h"
+ 
+-/* enum isa_base */
+ 
+-/* LoongArch64 */
+-#define ISA_BASE_LA64	      0
+-#define N_ISA_BASE_TYPES      1
++/* ISA base */
++enum {
++  ISA_BASE_LA64		= 0,  /* LoongArch64 */
++  N_ISA_BASE_TYPES	= 1
++};
++
+ extern loongarch_def_array
+   loongarch_isa_base_strings;
+ 
+-/* enum isa_ext_* */
+-#define ISA_EXT_NONE	      0
+-#define ISA_EXT_FPU32	      1
+-#define ISA_EXT_FPU64	      2
+-#define N_ISA_EXT_FPU_TYPES   3
+-#define ISA_EXT_SIMD_LSX      3
+-#define ISA_EXT_SIMD_LASX     4
+-#define N_ISA_EXT_TYPES	      5
++
++/* ISA extensions */
++enum {
++  ISA_EXT_NONE		= 0,
++  ISA_EXT_FPU32		= 1,
++  ISA_EXT_FPU64		= 2,
++  N_ISA_EXT_FPU_TYPES   = 3,
++  ISA_EXT_SIMD_LSX      = 3,
++  ISA_EXT_SIMD_LASX     = 4,
++  N_ISA_EXT_TYPES	= 5
++};
++
+ extern loongarch_def_array
+   loongarch_isa_ext_strings;
+ 
+-/* enum abi_base */
+-#define ABI_BASE_LP64D	      0
+-#define ABI_BASE_LP64F	      1
+-#define ABI_BASE_LP64S	      2
+-#define N_ABI_BASE_TYPES      3
++
++/* Base ABI */
++enum {
++  ABI_BASE_LP64D	= 0,
++  ABI_BASE_LP64F	= 1,
++  ABI_BASE_LP64S	= 2,
++  N_ABI_BASE_TYPES	= 3
++};
++
+ extern loongarch_def_array
+   loongarch_abi_base_strings;
+ 
+@@ -90,28 +98,38 @@ extern loongarch_def_array
+   (abi_base == ABI_BASE_LP64S)
+ 
+ 
+-/* enum abi_ext */
+-#define ABI_EXT_BASE	      0
+-#define N_ABI_EXT_TYPES	      1
++/* ABI Extension */
++enum {
++  ABI_EXT_BASE		= 0,
++  N_ABI_EXT_TYPES	= 1
++};
++
+ extern loongarch_def_array
+   loongarch_abi_ext_strings;
+ 
+-/* enum cmodel */
+-#define CMODEL_NORMAL	      0
+-#define CMODEL_TINY	      1
+-#define CMODEL_TINY_STATIC    2
+-#define CMODEL_MEDIUM	      3
+-#define CMODEL_LARGE	      4
+-#define CMODEL_EXTREME	      5
+-#define N_CMODEL_TYPES	      6
++
++/* Code Model */
++enum {
++  CMODEL_NORMAL		= 0,
++  CMODEL_TINY		= 1,
++  CMODEL_TINY_STATIC	= 2,
++  CMODEL_MEDIUM		= 3,
++  CMODEL_LARGE		= 4,
++  CMODEL_EXTREME	= 5,
++  N_CMODEL_TYPES	= 6
++};
++
+ extern loongarch_def_array
+   loongarch_cmodel_strings;
+ 
+-/* enum explicit_relocs */
+-#define EXPLICIT_RELOCS_AUTO	0
+-#define EXPLICIT_RELOCS_NONE	1
+-#define EXPLICIT_RELOCS_ALWAYS	2
+-#define N_EXPLICIT_RELOCS_TYPES	3
++
++/* Explicit Reloc Type */
++enum {
++  EXPLICIT_RELOCS_AUTO	    = 0,
++  EXPLICIT_RELOCS_NONE	    = 1,
++  EXPLICIT_RELOCS_ALWAYS    = 2,
++  N_EXPLICIT_RELOCS_TYPES   = 3
++};
+ 
+ /* The common default value for variables whose assignments
+    are triggered by command-line options.  */
+@@ -159,17 +177,18 @@ struct loongarch_target
+   int cmodel;	    /* CMODEL_ */
+ };
+ 
+-/* CPU properties.  */
+-/* index */
+-#define CPU_NATIVE	  0
+-#define CPU_ABI_DEFAULT   1
+-#define CPU_LOONGARCH64	  2
+-#define CPU_LA464	  3
+-#define CPU_LA664	  4
+-#define N_ARCH_TYPES	  5
+-#define N_TUNE_TYPES	  5
+-
+-/* parallel tables.  */
++/* CPU model */
++enum {
++  CPU_NATIVE	    = 0,
++  CPU_ABI_DEFAULT   = 1,
++  CPU_LOONGARCH64   = 2,
++  CPU_LA464	    = 3,
++  CPU_LA664	    = 4,
++  N_ARCH_TYPES	    = 5,
++  N_TUNE_TYPES	    = 5
++};
++
++/* CPU model properties */
+ extern loongarch_def_array
+   loongarch_cpu_strings;
+ extern loongarch_def_array
+-- 
+2.43.0
+
diff --git a/SME-0003-aarch64-Add-march-support-for-Armv9.1-A-Armv9.2-A-Ar.patch b/0106-Backport-SME-aarch64-Add-march-support-for-Armv9.1-A.patch
similarity index 93%
rename from SME-0003-aarch64-Add-march-support-for-Armv9.1-A-Armv9.2-A-Ar.patch
rename to 0106-Backport-SME-aarch64-Add-march-support-for-Armv9.1-A.patch
index caec078928bcff001884853a195ff6a837b7f73b..1655fab70ca27308192fc3af64de11093c4ee32c 100644
--- a/SME-0003-aarch64-Add-march-support-for-Armv9.1-A-Armv9.2-A-Ar.patch
+++ b/0106-Backport-SME-aarch64-Add-march-support-for-Armv9.1-A.patch
@@ -1,8 +1,8 @@
-From 5c2cc55316d0605ad3652e95e275dd0e20e010f7 Mon Sep 17 00:00:00 2001
+From 0bfb7b0b745d0a9af13772ad48ccc102e557f95a Mon Sep 17 00:00:00 2001
 From: Kyrylo Tkachov 
 Date: Mon, 26 Sep 2022 10:10:25 +0100
-Subject: [PATCH 003/144] aarch64: Add -march support for Armv9.1-A, Armv9.2-A,
- Armv9.3-A
+Subject: [PATCH 007/157] [Backport][SME] aarch64: Add -march support for
+ Armv9.1-A, Armv9.2-A, Armv9.3-A
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=c33e12fa479c01848f4a288883bf1ef848c94ca3
 
@@ -90,10 +90,10 @@ index 7d73689e4..42aae37ef 100644
  #define AARCH64_ISA_LS64	   (aarch64_isa_flags & AARCH64_FL_LS64)
  
 diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
-index 7b43742ba..e65d4f991 100644
+index 17d9e4126..53709b246 100644
 --- a/gcc/doc/invoke.texi
 +++ b/gcc/doc/invoke.texi
-@@ -19172,6 +19172,9 @@ and the features that they enable by default:
+@@ -19176,6 +19176,9 @@ and the features that they enable by default:
  @item @samp{armv8.7-a} @tab Armv8.7-A @tab @samp{armv8.6-a}, @samp{+ls64}
  @item @samp{armv8.8-a} @tab Armv8.8-a @tab @samp{armv8.7-a}, @samp{+mops}
  @item @samp{armv9-a} @tab Armv9-A @tab @samp{armv8.5-a}, @samp{+sve}, @samp{+sve2}
@@ -104,5 +104,5 @@ index 7b43742ba..e65d4f991 100644
  @end multitable
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0106-LoongArch-Simplify-mexplicit-reloc-definitions.patch b/0106-LoongArch-Simplify-mexplicit-reloc-definitions.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e0ac56fde6f24af4c473fb9008273ccf64ba5852
--- /dev/null
+++ b/0106-LoongArch-Simplify-mexplicit-reloc-definitions.patch
@@ -0,0 +1,124 @@
+From dc572aebb3a2c9062014ec50764bbc702dbb8a20 Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Mon, 8 Jan 2024 09:14:10 +0800
+Subject: [PATCH 106/188] LoongArch: Simplify -mexplicit-reloc definitions
+
+Since we do not need printing or manual parsing of this option,
+(whether in the driver or for target attributes to be supported later)
+it can be handled in the .opt file framework.
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/loongarch-strings: Remove explicit-reloc
+	argument string definitions.
+	* config/loongarch/loongarch-str.h: Same.
+	* config/loongarch/genopts/loongarch.opt.in: Mark -m[no-]explicit-relocs
+	as aliases to -mexplicit-relocs={always,none}
+	* config/loongarch/loongarch.opt: Regenerate.
+	* config/loongarch/loongarch.cc: Same.
+---
+ gcc/config/loongarch/genopts/loongarch-strings |  6 ------
+ gcc/config/loongarch/genopts/loongarch.opt.in  |  8 ++++----
+ gcc/config/loongarch/loongarch-str.h           |  5 -----
+ gcc/config/loongarch/loongarch.cc              | 12 ------------
+ gcc/config/loongarch/loongarch.opt             |  2 +-
+ 5 files changed, 5 insertions(+), 28 deletions(-)
+
+diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings
+index ce70b8b9c..99fd4e7cd 100644
+--- a/gcc/config/loongarch/genopts/loongarch-strings
++++ b/gcc/config/loongarch/genopts/loongarch-strings
+@@ -64,9 +64,3 @@ STR_CMODEL_TS	      tiny-static
+ STR_CMODEL_MEDIUM     medium
+ STR_CMODEL_LARGE      large
+ STR_CMODEL_EXTREME    extreme
+-
+-# -mexplicit-relocs
+-OPTSTR_EXPLICIT_RELOCS		explicit-relocs
+-STR_EXPLICIT_RELOCS_AUTO	auto
+-STR_EXPLICIT_RELOCS_NONE	none
+-STR_EXPLICIT_RELOCS_ALWAYS	always
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index 851d8d1f3..f2055b55e 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -181,20 +181,20 @@ Name(explicit_relocs) Type(int)
+ The code model option names for -mexplicit-relocs:
+ 
+ EnumValue
+-Enum(explicit_relocs) String(@@STR_EXPLICIT_RELOCS_AUTO@@) Value(EXPLICIT_RELOCS_AUTO)
++Enum(explicit_relocs) String(auto) Value(EXPLICIT_RELOCS_AUTO)
+ 
+ EnumValue
+-Enum(explicit_relocs) String(@@STR_EXPLICIT_RELOCS_NONE@@) Value(EXPLICIT_RELOCS_NONE)
++Enum(explicit_relocs) String(none) Value(EXPLICIT_RELOCS_NONE)
+ 
+ EnumValue
+-Enum(explicit_relocs) String(@@STR_EXPLICIT_RELOCS_ALWAYS@@) Value(EXPLICIT_RELOCS_ALWAYS)
++Enum(explicit_relocs) String(always) Value(EXPLICIT_RELOCS_ALWAYS)
+ 
+ mexplicit-relocs=
+ Target RejectNegative Joined Enum(explicit_relocs) Var(la_opt_explicit_relocs) Init(M_OPT_UNSET)
+ Use %reloc() assembly operators.
+ 
+ mexplicit-relocs
+-Target Var(la_opt_explicit_relocs_backward) Init(M_OPT_UNSET)
++Target Alias(mexplicit-relocs=, always, none)
+ Use %reloc() assembly operators (for backward compatibility).
+ 
+ mrecip
+diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h
+index 2251df38b..cacae38c0 100644
+--- a/gcc/config/loongarch/loongarch-str.h
++++ b/gcc/config/loongarch/loongarch-str.h
+@@ -63,11 +63,6 @@ along with GCC; see the file COPYING3.  If not see
+ #define STR_CMODEL_LARGE "large"
+ #define STR_CMODEL_EXTREME "extreme"
+ 
+-#define OPTSTR_EXPLICIT_RELOCS "explicit-relocs"
+-#define STR_EXPLICIT_RELOCS_AUTO "auto"
+-#define STR_EXPLICIT_RELOCS_NONE "none"
+-#define STR_EXPLICIT_RELOCS_ALWAYS "always"
+-
+ #define OPTSTR_FRECIPE "frecipe"
+ #define OPTSTR_DIV32   "div32"
+ #define OPTSTR_LAM_BH  "lam-bh"
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index b0bb67d60..8cd703caa 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -7518,18 +7518,6 @@ loongarch_option_override_internal (struct gcc_options *opts,
+   loongarch_update_gcc_opt_status (&la_target, opts, opts_set);
+   loongarch_cpu_option_override (&la_target, opts, opts_set);
+ 
+-  if (la_opt_explicit_relocs != M_OPT_UNSET
+-      && la_opt_explicit_relocs_backward != M_OPT_UNSET)
+-    error ("do not use %qs (with %qs) and %qs (without %qs) together",
+-	   "-mexplicit-relocs=", "=",
+-	   la_opt_explicit_relocs_backward ? "-mexplicit-relocs"
+-					   : "-mno-explicit-relocs", "=");
+-
+-  if (la_opt_explicit_relocs_backward != M_OPT_UNSET)
+-    la_opt_explicit_relocs = (la_opt_explicit_relocs_backward
+-			      ? EXPLICIT_RELOCS_ALWAYS
+-			      : EXPLICIT_RELOCS_NONE);
+-
+   if (la_opt_explicit_relocs == M_OPT_UNSET)
+     la_opt_explicit_relocs = (HAVE_AS_EXPLICIT_RELOCS
+ 			      ? (loongarch_mrelax
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index df7314973..d6e337ac2 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -202,7 +202,7 @@ Target RejectNegative Joined Enum(explicit_relocs) Var(la_opt_explicit_relocs) I
+ Use %reloc() assembly operators.
+ 
+ mexplicit-relocs
+-Target Var(la_opt_explicit_relocs_backward) Init(M_OPT_UNSET)
++Target Alias(mexplicit-relocs=, always, none)
+ Use %reloc() assembly operators (for backward compatibility).
+ 
+ mrecip
+-- 
+2.43.0
+
diff --git a/SME-0004-Revert-aarch64-Define-__ARM_FEATURE_RCPC.patch b/0107-Backport-SME-Revert-aarch64-Define-__ARM_FEATURE_RCP.patch
similarity index 97%
rename from SME-0004-Revert-aarch64-Define-__ARM_FEATURE_RCPC.patch
rename to 0107-Backport-SME-Revert-aarch64-Define-__ARM_FEATURE_RCP.patch
index 31527bcb177e14acafdd14711fa075619c404c1a..4de737cdb8b81f024884cb34821cb1ac93861038 100644
--- a/SME-0004-Revert-aarch64-Define-__ARM_FEATURE_RCPC.patch
+++ b/0107-Backport-SME-Revert-aarch64-Define-__ARM_FEATURE_RCP.patch
@@ -1,7 +1,8 @@
-From f77a0d9a57c39bd39ab159d2139b4611d936e0ae Mon Sep 17 00:00:00 2001
+From b36c8c41cab42d3df45197bb287f06381d660001 Mon Sep 17 00:00:00 2001
 From: xiezhiheng 
 Date: Mon, 19 Feb 2024 19:27:29 +0800
-Subject: [PATCH 004/144] Revert "aarch64: Define __ARM_FEATURE_RCPC"
+Subject: [PATCH 008/157] [Backport][SME] Revert "aarch64: Define
+ __ARM_FEATURE_RCPC"
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=40a727379f3e8e6a83aea4e94c38dfa5dd8ef33d
 
@@ -107,5 +108,5 @@ index 307fa3d67..bfb044f5d 100644
  foo (int a)
  {
 -- 
-2.19.1
+2.33.0
 
diff --git a/0107-LoongArch-testsuite-Add-loongarch-support-to-slp-21..patch b/0107-LoongArch-testsuite-Add-loongarch-support-to-slp-21..patch
new file mode 100644
index 0000000000000000000000000000000000000000..a5f2b12b4d35ad04e618f91cc77e708aaa9d7d9d
--- /dev/null
+++ b/0107-LoongArch-testsuite-Add-loongarch-support-to-slp-21..patch
@@ -0,0 +1,35 @@
+From f90e31b6dc8c99f6670dee9a120c5dd9fa9a18d9 Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Wed, 10 Jan 2024 15:25:21 +0800
+Subject: [PATCH 107/188] LoongArch: testsuite: Add loongarch support to
+ slp-21.c.
+
+The function of this test is to check that the compiler supports vectorization
+using SLP and vec_{load/store/*}_lanes. However, vec_{load/store/*}_lanes are
+not supported on LoongArch, such as the corresponding "st4/ld4" directives on
+aarch64.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.dg/vect/slp-21.c: Add loongarch.
+---
+ gcc/testsuite/gcc.dg/vect/slp-21.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.dg/vect/slp-21.c b/gcc/testsuite/gcc.dg/vect/slp-21.c
+index 4b83adb98..3b7e92fe8 100644
+--- a/gcc/testsuite/gcc.dg/vect/slp-21.c
++++ b/gcc/testsuite/gcc.dg/vect/slp-21.c
+@@ -210,7 +210,7 @@ int main (void)
+ 
+    Not all vect_perm targets support that, and it's a bit too specific to have
+    its own effective-target selector, so we just test targets directly.  */
+-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 4 "vect" { target { powerpc64*-*-* s390*-*-* } } } } */
+-/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target { vect_strided4 && { ! { powerpc64*-*-* s390*-*-* } } } } } } */
++/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 4 "vect" { target { powerpc64*-*-* s390*-*-* loongarch*-*-* } } } } */
++/* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 2 "vect" { target { vect_strided4 && { ! { powerpc64*-*-* s390*-*-* loongarch*-*-* } } } } } } */
+ /* { dg-final { scan-tree-dump-times "vectorizing stmts using SLP" 0 "vect"  { target { ! { vect_strided4 } } } } } */
+   
+-- 
+2.43.0
+
diff --git a/SME-0005-Revert-Ampere-1-and-Ampere-1A-core-definition-in-aar.patch b/0108-Backport-SME-Revert-Ampere-1-and-Ampere-1A-core-defi.patch
similarity index 92%
rename from SME-0005-Revert-Ampere-1-and-Ampere-1A-core-definition-in-aar.patch
rename to 0108-Backport-SME-Revert-Ampere-1-and-Ampere-1A-core-defi.patch
index ca4a1c4d74ae2c8305adfa63ff43bc1ecf42a69c..a70376dd99819d40931d4594c6babe2169f18159 100644
--- a/SME-0005-Revert-Ampere-1-and-Ampere-1A-core-definition-in-aar.patch
+++ b/0108-Backport-SME-Revert-Ampere-1-and-Ampere-1A-core-defi.patch
@@ -1,8 +1,8 @@
-From 69a6019803924ea02e798a4291953181cda85fde Mon Sep 17 00:00:00 2001
+From 34374de5edde59f27a1b3b443e8a163fc5b528d7 Mon Sep 17 00:00:00 2001
 From: xiezhiheng 
 Date: Tue, 20 Feb 2024 10:13:06 +0800
-Subject: [PATCH 005/144] Revert "Ampere-1 and Ampere-1A core definition in
- aarch64-cores.def"
+Subject: [PATCH 009/157] [Backport][SME] Revert "Ampere-1 and Ampere-1A core
+ definition in aarch64-cores.def"
 
 Revert it to solve conflicts with later patches, and will apply it
 later. It's introduced by commit 3668a59ae22a and e9f0d974600e.
@@ -35,5 +35,5 @@ index 842d64932..0402bfb74 100644
  AARCH64_CORE("cortex-a710",  cortexa710, cortexa57, 9A,  AARCH64_FL_FOR_ARCH9 | AARCH64_FL_SVE2_BITPERM | AARCH64_FL_MEMTAG | AARCH64_FL_I8MM | AARCH64_FL_BF16, neoversen2, 0x41, 0xd47, -1)
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0108-LoongArch-Optimized-some-of-the-symbolic-expansion-i.patch b/0108-LoongArch-Optimized-some-of-the-symbolic-expansion-i.patch
new file mode 100644
index 0000000000000000000000000000000000000000..858ca17697625d8cdcbd648e10ca3b7f45a6ecf6
--- /dev/null
+++ b/0108-LoongArch-Optimized-some-of-the-symbolic-expansion-i.patch
@@ -0,0 +1,228 @@
+From 9b19eb071fe3826aa61567b927fc95a37f6560f7 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 8 Dec 2023 10:16:48 +0800
+Subject: [PATCH 108/188] LoongArch: Optimized some of the symbolic expansion
+ instructions generated during bitwise operations.
+
+There are two mode iterators defined in the loongarch.md:
+	(define_mode_iterator GPR [SI (DI "TARGET_64BIT")])
+  and
+	(define_mode_iterator X [(SI "!TARGET_64BIT") (DI "TARGET_64BIT")])
+Replace the mode in the bit arithmetic from GPR to X.
+
+Since the bitwise operation instruction does not distinguish between 64-bit,
+32-bit, etc., it is necessary to perform symbolic expansion if the bitwise
+operation is less than 64 bits.
+The original definition would have generated a lot of redundant symbolic
+extension instructions. This problem is optimized with reference to the
+implementation of RISCV.
+
+Add this patch spec2017 500.perlbench performance improvement by 1.8%
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (one_cmpl2): Replace GPR with X.
+	(*nor3): Likewise.
+	(nor3): Likewise.
+	(*negsi2_extended): New template.
+	(*si3_internal): Likewise.
+	(*one_cmplsi2_internal): Likewise.
+	(*norsi3_internal): Likewise.
+	(*nsi_internal): Likewise.
+	(bytepick_w__extend): Modify this template according to the
+	modified bit operation to make the optimization work.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/sign-extend-bitwise.c: New test.
+---
+ gcc/config/loongarch/loongarch.md             | 93 ++++++++++++++-----
+ .../loongarch/sign-extend-bitwise.c           | 21 +++++
+ 2 files changed, 90 insertions(+), 24 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/sign-extend-bitwise.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 23653a2b0..6ebf33cbe 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -736,7 +736,7 @@
+ 
+ (define_insn "sub3"
+   [(set (match_operand:GPR 0 "register_operand" "=r")
+-	(minus:GPR (match_operand:GPR 1 "register_operand" "rJ")
++	(minus:GPR (match_operand:GPR 1 "register_operand" "r")
+ 		   (match_operand:GPR 2 "register_operand" "r")))]
+   ""
+   "sub.\t%0,%z1,%2"
+@@ -1412,13 +1412,13 @@
+   [(set_attr "alu_type"	"sub")
+    (set_attr "mode" "")])
+ 
+-(define_insn "one_cmpl2"
+-  [(set (match_operand:GPR 0 "register_operand" "=r")
+-	(not:GPR (match_operand:GPR 1 "register_operand" "r")))]
+-  ""
+-  "nor\t%0,%.,%1"
+-  [(set_attr "alu_type" "not")
+-   (set_attr "mode" "")])
++(define_insn "*negsi2_extended"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(sign_extend:DI (neg:SI (match_operand:SI 1 "register_operand" "r"))))]
++  "TARGET_64BIT"
++  "sub.w\t%0,%.,%1"
++  [(set_attr "alu_type" "sub")
++   (set_attr "mode" "SI")])
+ 
+ (define_insn "neg2"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+@@ -1438,14 +1438,39 @@
+ ;;
+ 
+ (define_insn "3"
+-  [(set (match_operand:GPR 0 "register_operand" "=r,r")
+-	(any_bitwise:GPR (match_operand:GPR 1 "register_operand" "%r,r")
+-			 (match_operand:GPR 2 "uns_arith_operand" "r,K")))]
++  [(set (match_operand:X 0 "register_operand" "=r,r")
++	(any_bitwise:X (match_operand:X 1 "register_operand" "%r,r")
++		       (match_operand:X 2 "uns_arith_operand" "r,K")))]
+   ""
+   "%i2\t%0,%1,%2"
+   [(set_attr "type" "logical")
+    (set_attr "mode" "")])
+ 
++(define_insn "*si3_internal"
++  [(set (match_operand:SI 0 "register_operand" "=r,r")
++	(any_bitwise:SI (match_operand:SI 1 "register_operand" "%r,r")
++			(match_operand:SI 2 "uns_arith_operand"    " r,K")))]
++  "TARGET_64BIT"
++  "%i2\t%0,%1,%2"
++  [(set_attr "type" "logical")
++   (set_attr "mode" "SI")])
++
++(define_insn "one_cmpl2"
++  [(set (match_operand:X 0 "register_operand" "=r")
++	(not:X (match_operand:X 1 "register_operand" "r")))]
++  ""
++  "nor\t%0,%.,%1"
++  [(set_attr "alu_type" "not")
++   (set_attr "mode" "")])
++
++(define_insn "*one_cmplsi2_internal"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(not:SI (match_operand:SI 1 "register_operand" " r")))]
++  "TARGET_64BIT"
++  "nor\t%0,%.,%1"
++  [(set_attr "type" "logical")
++   (set_attr "mode" "SI")])
++
+ (define_insn "and3_extended"
+   [(set (match_operand:GPR 0 "register_operand" "=r")
+ 	(and:GPR (match_operand:GPR 1 "nonimmediate_operand" "r")
+@@ -1561,25 +1586,43 @@
+   [(set_attr "type" "logical")
+    (set_attr "mode" "HI")])
+ 
+-(define_insn "*nor3"
+-  [(set (match_operand:GPR 0 "register_operand" "=r")
+-	(and:GPR (not:GPR (match_operand:GPR 1 "register_operand" "%r"))
+-		 (not:GPR (match_operand:GPR 2 "register_operand" "r"))))]
++(define_insn "nor3"
++  [(set (match_operand:X 0 "register_operand" "=r")
++	(and:X (not:X (match_operand:X 1 "register_operand" "%r"))
++		 (not:X (match_operand:X 2 "register_operand" "r"))))]
+   ""
+   "nor\t%0,%1,%2"
+   [(set_attr "type" "logical")
+    (set_attr "mode" "")])
+ 
++(define_insn "*norsi3_internal"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(and:SI (not:SI (match_operand:SI 1 "register_operand" "%r"))
++		 (not:SI (match_operand:SI 2 "register_operand" "r"))))]
++  "TARGET_64BIT"
++  "nor\t%0,%1,%2"
++  [(set_attr "type" "logical")
++   (set_attr "mode" "SI")])
++
+ (define_insn "n"
+-  [(set (match_operand:GPR 0 "register_operand" "=r")
+-	(neg_bitwise:GPR
+-	    (not:GPR (match_operand:GPR 1 "register_operand" "r"))
+-	    (match_operand:GPR 2 "register_operand" "r")))]
++  [(set (match_operand:X 0 "register_operand" "=r")
++	(neg_bitwise:X
++	    (not:X (match_operand:X 1 "register_operand" "r"))
++	    (match_operand:X 2 "register_operand" "r")))]
+   ""
+   "n\t%0,%2,%1"
+   [(set_attr "type" "logical")
+    (set_attr "mode" "")])
+ 
++(define_insn "*nsi_internal"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(neg_bitwise:SI
++	    (not:SI (match_operand:SI 1 "register_operand" "r"))
++	    (match_operand:SI 2 "register_operand" "r")))]
++  "TARGET_64BIT"
++  "n\t%0,%2,%1"
++  [(set_attr "type" "logical")
++   (set_attr "mode" "SI")])
+ 
+ ;;
+ ;;  ....................
+@@ -3167,7 +3210,6 @@
+ 		      (label_ref (match_operand 1))
+ 		      (pc)))])
+ 
+-
+ 
+ ;;
+ ;;  ....................
+@@ -3967,10 +4009,13 @@
+ (define_insn "bytepick_w__extend"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+ 	(sign_extend:DI
+-	  (ior:SI (lshiftrt (match_operand:SI 1 "register_operand" "r")
+-			    (const_int ))
+-		  (ashift (match_operand:SI 2 "register_operand" "r")
+-			  (const_int bytepick_w_ashift_amount)))))]
++	 (subreg:SI
++	  (ior:DI (subreg:DI (lshiftrt
++			      (match_operand:SI 1 "register_operand" "r")
++			      (const_int )) 0)
++		  (subreg:DI (ashift
++			      (match_operand:SI 2 "register_operand" "r")
++			      (const_int bytepick_w_ashift_amount)) 0)) 0)))]
+   "TARGET_64BIT"
+   "bytepick.w\t%0,%1,%2,"
+   [(set_attr "mode" "SI")])
+diff --git a/gcc/testsuite/gcc.target/loongarch/sign-extend-bitwise.c b/gcc/testsuite/gcc.target/loongarch/sign-extend-bitwise.c
+new file mode 100644
+index 000000000..5753ef69d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/sign-extend-bitwise.c
+@@ -0,0 +1,21 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O2" } */
++/* { dg-final { scan-assembler-not "slli.w\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,0" } } */
++
++struct pmop
++{
++  unsigned int op_pmflags;
++  unsigned int op_pmpermflags;
++};
++unsigned int PL_hints;
++
++struct pmop *pmop;
++void
++Perl_newPMOP (int type, int flags)
++{
++  if (PL_hints & 0x00100000)
++    pmop->op_pmpermflags |= 0x0001;
++  if (PL_hints & 0x00000004)
++    pmop->op_pmpermflags |= 0x0800;
++  pmop->op_pmflags = pmop->op_pmpermflags;
++}
+-- 
+2.43.0
+
diff --git a/SME-0006-aarch64-Rename-AARCH64_ISA-architecture-level-macros.patch b/0109-Backport-SME-aarch64-Rename-AARCH64_ISA-architecture.patch
similarity index 96%
rename from SME-0006-aarch64-Rename-AARCH64_ISA-architecture-level-macros.patch
rename to 0109-Backport-SME-aarch64-Rename-AARCH64_ISA-architecture.patch
index 8aefdc2d2032cea2ce9f5b48a0fad3720839cb02..9b541def9d8a8d089821315bd7b4121caafa064d 100644
--- a/SME-0006-aarch64-Rename-AARCH64_ISA-architecture-level-macros.patch
+++ b/0109-Backport-SME-aarch64-Rename-AARCH64_ISA-architecture.patch
@@ -1,7 +1,8 @@
-From 4afcd45cfcaf8bcd335d45cdc821d49e73d28c4e Mon Sep 17 00:00:00 2001
+From 244780570ebc85c44806559ba165d4a70a2333d1 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:50 +0100
-Subject: [PATCH 006/144] aarch64: Rename AARCH64_ISA architecture-level macros
+Subject: [PATCH 010/157] [Backport][SME] aarch64: Rename AARCH64_ISA
+ architecture-level macros
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=2a4788ac3bae1467b0379852d5a6690a8496d0c9
 
@@ -34,10 +35,10 @@ gcc/
  3 files changed, 17 insertions(+), 17 deletions(-)
 
 diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc
-index dfda5b837..3fe1786f9 100644
+index 85ce8133b..3dc020f0c 100644
 --- a/gcc/common/config/aarch64/aarch64-common.cc
 +++ b/gcc/common/config/aarch64/aarch64-common.cc
-@@ -430,7 +430,7 @@ aarch64_get_extension_string_for_isa_flags (uint64_t isa_flags,
+@@ -506,7 +506,7 @@ aarch64_get_extension_string_for_isa_flags (uint64_t isa_flags,
  
        Note that assemblers with Armv8-R AArch64 support should not have this
        issue, so we don't need this fix when targeting Armv8-R.  */
@@ -152,5 +153,5 @@ index 7c090c8f2..356a263b2 100644
  /* I8MM instructions are enabled through +i8mm.  */
  #define TARGET_I8MM (AARCH64_ISA_I8MM)
 -- 
-2.19.1
+2.33.0
 
diff --git a/0109-LoongArch-Implement-option-save-restore.patch b/0109-LoongArch-Implement-option-save-restore.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5f670a0ea2da6d9977ad10a0ea966807d20ff230
--- /dev/null
+++ b/0109-LoongArch-Implement-option-save-restore.patch
@@ -0,0 +1,467 @@
+From 146c85fa8b32d88acacf8645096d004e0c6f2f9c Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Thu, 11 Jan 2024 09:07:10 +0800
+Subject: [PATCH 109/188] LoongArch: Implement option save/restore
+
+LTO option streaming and target attributes both require per-function
+target configuration, which is achieved via option save/restore.
+
+We implement TARGET_OPTION_{SAVE,RESTORE} to switch the la_target
+context in addition to other automatically maintained option states
+(via the "Save" option property in the .opt files).
+
+Tested on loongarch64-linux-gnu without regression.
+
+	PR target/113233
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/loongarch.opt.in: Mark options with
+	the "Save" property.
+	* config/loongarch/loongarch.opt: Same.
+	* config/loongarch/loongarch-opts.cc: Refresh -mcmodel= state
+	according to la_target.
+	* config/loongarch/loongarch.cc: Implement TARGET_OPTION_{SAVE,
+	RESTORE} for the la_target structure; Rename option conditions
+	to have the same "la_" prefix.
+	* config/loongarch/loongarch.h: Same.
+---
+ gcc/config/loongarch/genopts/loongarch.opt.in | 38 ++++-----
+ gcc/config/loongarch/loongarch-opts.cc        |  7 ++
+ gcc/config/loongarch/loongarch.cc             | 80 +++++++++++++++----
+ gcc/config/loongarch/loongarch.h              |  2 +-
+ gcc/config/loongarch/loongarch.opt            | 38 ++++-----
+ 5 files changed, 111 insertions(+), 54 deletions(-)
+
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index f2055b55e..4d6b1902d 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -50,7 +50,7 @@ EnumValue
+ Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU64@@) Value(ISA_EXT_FPU64)
+ 
+ m@@OPTSTR_ISA_EXT_FPU@@=
+-Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET)
++Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) Save
+ -m@@OPTSTR_ISA_EXT_FPU@@=FPU	Generate code for the given FPU.
+ 
+ m@@OPTSTR_ISA_EXT_FPU@@=@@STR_ISA_EXT_FPU0@@
+@@ -82,7 +82,7 @@ EnumValue
+ Enum(isa_ext_simd) String(@@STR_ISA_EXT_LASX@@) Value(ISA_EXT_SIMD_LASX)
+ 
+ m@@OPTSTR_ISA_EXT_SIMD@@=
+-Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET)
++Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) Save
+ -m@@OPTSTR_ISA_EXT_SIMD@@=SIMD	Generate code for the given SIMD extension.
+ 
+ m@@STR_ISA_EXT_LSX@@
+@@ -114,11 +114,11 @@ EnumValue
+ Enum(cpu_type) String(@@STR_CPU_LA664@@) Value(CPU_LA664)
+ 
+ m@@OPTSTR_ARCH@@=
+-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET)
++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) Save
+ -m@@OPTSTR_ARCH@@=PROCESSOR	Generate code for the given PROCESSOR ISA.
+ 
+ m@@OPTSTR_TUNE@@=
+-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET)
++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) Save
+ -m@@OPTSTR_TUNE@@=PROCESSOR	Generate optimized code for PROCESSOR.
+ 
+ 
+@@ -149,31 +149,31 @@ Variable
+ int la_opt_abi_ext = M_OPT_UNSET
+ 
+ mbranch-cost=
+-Target RejectNegative Joined UInteger Var(loongarch_branch_cost)
++Target RejectNegative Joined UInteger Var(la_branch_cost) Save
+ -mbranch-cost=COST	Set the cost of branches to roughly COST instructions.
+ 
+ mcheck-zero-division
+-Target Mask(CHECK_ZERO_DIV)
++Target Mask(CHECK_ZERO_DIV) Save
+ Trap on integer divide by zero.
+ 
+ mcond-move-int
+-Target Var(TARGET_COND_MOVE_INT) Init(1)
++Target Mask(COND_MOVE_INT) Save
+ Conditional moves for integral are enabled.
+ 
+ mcond-move-float
+-Target Var(TARGET_COND_MOVE_FLOAT) Init(1)
++Target Mask(COND_MOVE_FLOAT) Save
+ Conditional moves for float are enabled.
+ 
+ mmemcpy
+-Target Mask(MEMCPY)
++Target Mask(MEMCPY) Save
+ Prevent optimizing block moves, which is also the default behavior of -Os.
+ 
+ mstrict-align
+-Target Var(TARGET_STRICT_ALIGN) Init(0)
++Target Mask(STRICT_ALIGN) Save
+ Do not generate unaligned memory accesses.
+ 
+ mmax-inline-memcpy-size=
+-Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024)
++Target Joined RejectNegative UInteger Var(la_max_inline_memcpy_size) Init(1024) Save
+ -mmax-inline-memcpy-size=SIZE	Set the max size of memcpy to inline, default is 1024.
+ 
+ Enum
+@@ -198,11 +198,11 @@ Target Alias(mexplicit-relocs=, always, none)
+ Use %reloc() assembly operators (for backward compatibility).
+ 
+ mrecip
+-Target RejectNegative Var(loongarch_recip)
++Target RejectNegative Var(la_recip) Save
+ Generate approximate reciprocal divide and square root for better throughput.
+ 
+ mrecip=
+-Target RejectNegative Joined Var(loongarch_recip_name)
++Target RejectNegative Joined Var(la_recip_name) Save
+ Control generation of reciprocal estimates.
+ 
+ ; The code model option names for -mcmodel.
+@@ -229,29 +229,29 @@ EnumValue
+ Enum(cmodel) String(@@STR_CMODEL_EXTREME@@) Value(CMODEL_EXTREME)
+ 
+ mcmodel=
+-Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET)
++Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) Save
+ Specify the code model.
+ 
+ mdirect-extern-access
+-Target Var(TARGET_DIRECT_EXTERN_ACCESS) Init(0)
++Target Mask(DIRECT_EXTERN_ACCESS) Save
+ Avoid using the GOT to access external symbols.
+ 
+ mrelax
+-Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION && HAVE_AS_COND_BRANCH_RELAXATION)
++Target Mask(LINKER_RELAXATION)
+ Take advantage of linker relaxations to reduce the number of instructions
+ required to materialize symbol addresses.
+ 
+ mpass-mrelax-to-as
+-Target Var(loongarch_pass_mrelax_to_as) Init(HAVE_AS_MRELAX_OPTION)
++Driver Var(la_pass_mrelax_to_as) Init(HAVE_AS_MRELAX_OPTION)
+ Pass -mrelax or -mno-relax option to the assembler.
+ 
+ -param=loongarch-vect-unroll-limit=
+-Target Joined UInteger Var(loongarch_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param
++Target Joined UInteger Var(la_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param
+ Used to limit unroll factor which indicates how much the autovectorizer may
+ unroll a loop.  The default value is 6.
+ 
+ -param=loongarch-vect-issue-info=
+-Target Undocumented Joined UInteger Var(loongarch_vect_issue_info) Init(4) IntegerRange(1, 64) Param
++Target Undocumented Joined UInteger Var(la_vect_issue_info) Init(4) IntegerRange(1, 64) Param
+ Indicate how many non memory access vector instructions can be issued per
+ cycle, it's used in unroll factor determination for autovectorizer.  The
+ default value is 4.
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index cf4c7bc93..a2b069d83 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -785,8 +785,15 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+   opts->x_la_opt_cpu_arch = target->cpu_arch;
+   opts->x_la_opt_cpu_tune = target->cpu_tune;
+ 
++  /* status of -mcmodel */
++  opts->x_la_opt_cmodel = target->cmodel;
++
+   /* status of -mfpu */
+   opts->x_la_opt_fpu = target->isa.fpu;
++
++  /* status of -msimd */
+   opts->x_la_opt_simd = target->isa.simd;
++
++  /* ISA evolution features */
+   opts->x_la_isa_evolution = target->isa.evolution;
+ }
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 8cd703caa..533bae5b2 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4079,10 +4079,10 @@ loongarch_vector_costs::determine_suggested_unroll_factor (loop_vec_info loop_vi
+ 
+   /* Use this simple hardware resource model that how many non vld/vst
+      vector instructions can be issued per cycle.  */
+-  unsigned int issue_info = loongarch_vect_issue_info;
++  unsigned int issue_info = la_vect_issue_info;
+   unsigned int reduc_factor = m_reduc_factor > 1 ? m_reduc_factor : 1;
+   unsigned int uf = CEIL (reduc_factor * issue_info, nstmts_nonldst);
+-  uf = MIN ((unsigned int) loongarch_vect_unroll_limit, uf);
++  uf = MIN ((unsigned int) la_vect_unroll_limit, uf);
+ 
+   return 1 << ceil_log2 (uf);
+ }
+@@ -5540,7 +5540,7 @@ loongarch_expand_block_move (rtx dest, rtx src, rtx r_length, rtx r_align)
+     return false;
+ 
+   HOST_WIDE_INT length = INTVAL (r_length);
+-  if (length > loongarch_max_inline_memcpy_size)
++  if (length > la_max_inline_memcpy_size)
+     return false;
+ 
+   HOST_WIDE_INT align = INTVAL (r_align);
+@@ -7518,13 +7518,6 @@ loongarch_option_override_internal (struct gcc_options *opts,
+   loongarch_update_gcc_opt_status (&la_target, opts, opts_set);
+   loongarch_cpu_option_override (&la_target, opts, opts_set);
+ 
+-  if (la_opt_explicit_relocs == M_OPT_UNSET)
+-    la_opt_explicit_relocs = (HAVE_AS_EXPLICIT_RELOCS
+-			      ? (loongarch_mrelax
+-				 ? EXPLICIT_RELOCS_AUTO
+-				 : EXPLICIT_RELOCS_ALWAYS)
+-			      : EXPLICIT_RELOCS_NONE);
+-
+   if (TARGET_ABI_LP64)
+     flag_pcc_struct_return = 0;
+ 
+@@ -7536,8 +7529,8 @@ loongarch_option_override_internal (struct gcc_options *opts,
+ 
+   /* If the user hasn't specified a branch cost, use the processor's
+      default.  */
+-  if (loongarch_branch_cost == 0)
+-    loongarch_branch_cost = loongarch_cost->branch_cost;
++  if (la_branch_cost == 0)
++    la_branch_cost = loongarch_cost->branch_cost;
+ 
+   /* Enable sw prefetching at -O3 and higher.  */
+   if (opts->x_flag_prefetch_loop_arrays < 0
+@@ -7624,9 +7617,9 @@ loongarch_option_override_internal (struct gcc_options *opts,
+ 	{ "vec-rsqrt", RECIP_MASK_VEC_RSQRT },
+   };
+ 
+-  if (loongarch_recip_name)
++  if (la_recip_name)
+     {
+-      char *p = ASTRDUP (loongarch_recip_name);
++      char *p = ASTRDUP (la_recip_name);
+       char *q;
+       unsigned int mask, i;
+       bool invert;
+@@ -7667,10 +7660,38 @@ loongarch_option_override_internal (struct gcc_options *opts,
+ 	    recip_mask |= mask;
+ 	}
+     }
+-  if (loongarch_recip)
++  if (la_recip)
+     recip_mask |= RECIP_MASK_ALL;
+   if (!ISA_HAS_FRECIPE)
+     recip_mask = RECIP_MASK_NONE;
++
++#define INIT_TARGET_FLAG(NAME, INIT) \
++  { \
++    if (!(target_flags_explicit & MASK_##NAME)) \
++      { \
++	if (INIT) \
++	  target_flags |= MASK_##NAME; \
++	else \
++	  target_flags &= ~MASK_##NAME; \
++      } \
++  }
++
++  /* Enable conditional moves for int and float by default.  */
++  INIT_TARGET_FLAG (COND_MOVE_INT, 1)
++  INIT_TARGET_FLAG (COND_MOVE_FLOAT, 1)
++
++  /* Set mrelax default.  */
++  INIT_TARGET_FLAG (LINKER_RELAXATION,
++		    HAVE_AS_MRELAX_OPTION && HAVE_AS_COND_BRANCH_RELAXATION)
++
++#undef INIT_TARGET_FLAG
++
++  if (la_opt_explicit_relocs == M_OPT_UNSET)
++    la_opt_explicit_relocs = (HAVE_AS_EXPLICIT_RELOCS
++			      ? (TARGET_LINKER_RELAXATION
++				 ? EXPLICIT_RELOCS_AUTO
++				 : EXPLICIT_RELOCS_ALWAYS)
++			      : EXPLICIT_RELOCS_NONE);
+ }
+ 
+ 
+@@ -7682,6 +7703,31 @@ loongarch_option_override (void)
+   loongarch_option_override_internal (&global_options, &global_options_set);
+ }
+ 
++/* Implement TARGET_OPTION_SAVE.  */
++static void
++loongarch_option_save (struct cl_target_option *,
++		       struct gcc_options *opts,
++		       struct gcc_options *opts_set)
++{
++  loongarch_update_gcc_opt_status (&la_target, opts, opts_set);
++}
++
++/* Implement TARGET_OPTION_RESTORE.  */
++static void
++loongarch_option_restore (struct gcc_options *,
++			  struct gcc_options *,
++			  struct cl_target_option *ptr)
++{
++  la_target.cpu_arch = ptr->x_la_opt_cpu_arch;
++  la_target.cpu_tune = ptr->x_la_opt_cpu_tune;
++
++  la_target.isa.fpu = ptr->x_la_opt_fpu;
++  la_target.isa.simd = ptr->x_la_opt_simd;
++  la_target.isa.evolution = ptr->x_la_isa_evolution;
++
++  la_target.cmodel = ptr->x_la_opt_cmodel;
++}
++
+ /* Implement TARGET_CONDITIONAL_REGISTER_USAGE.  */
+ 
+ static void
+@@ -10880,6 +10926,10 @@ loongarch_asm_code_end (void)
+ 
+ #undef TARGET_OPTION_OVERRIDE
+ #define TARGET_OPTION_OVERRIDE loongarch_option_override
++#undef TARGET_OPTION_SAVE
++#define TARGET_OPTION_SAVE loongarch_option_save
++#undef TARGET_OPTION_RESTORE
++#define TARGET_OPTION_RESTORE loongarch_option_restore
+ 
+ #undef TARGET_LEGITIMIZE_ADDRESS
+ #define TARGET_LEGITIMIZE_ADDRESS loongarch_legitimize_address
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index fbc0f53e4..f54b078b1 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -868,7 +868,7 @@ typedef struct {
+ /* A C expression for the cost of a branch instruction.  A value of
+    1 is the default; other values are interpreted relative to that.  */
+ 
+-#define BRANCH_COST(speed_p, predictable_p) loongarch_branch_cost
++#define BRANCH_COST(speed_p, predictable_p) la_branch_cost
+ 
+ /* Return the asm template for a conditional branch instruction.
+    OPCODE is the opcode's mnemonic and OPERANDS is the asm template for
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index d6e337ac2..75d230067 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -58,7 +58,7 @@ EnumValue
+ Enum(isa_ext_fpu) String(64) Value(ISA_EXT_FPU64)
+ 
+ mfpu=
+-Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET)
++Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET) Save
+ -mfpu=FPU	Generate code for the given FPU.
+ 
+ mfpu=0
+@@ -90,7 +90,7 @@ EnumValue
+ Enum(isa_ext_simd) String(lasx) Value(ISA_EXT_SIMD_LASX)
+ 
+ msimd=
+-Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET)
++Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET) Save
+ -msimd=SIMD	Generate code for the given SIMD extension.
+ 
+ mlsx
+@@ -122,11 +122,11 @@ EnumValue
+ Enum(cpu_type) String(la664) Value(CPU_LA664)
+ 
+ march=
+-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET)
++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) Save
+ -march=PROCESSOR	Generate code for the given PROCESSOR ISA.
+ 
+ mtune=
+-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET)
++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) Save
+ -mtune=PROCESSOR	Generate optimized code for PROCESSOR.
+ 
+ 
+@@ -157,31 +157,31 @@ Variable
+ int la_opt_abi_ext = M_OPT_UNSET
+ 
+ mbranch-cost=
+-Target RejectNegative Joined UInteger Var(loongarch_branch_cost)
++Target RejectNegative Joined UInteger Var(la_branch_cost) Save
+ -mbranch-cost=COST	Set the cost of branches to roughly COST instructions.
+ 
+ mcheck-zero-division
+-Target Mask(CHECK_ZERO_DIV)
++Target Mask(CHECK_ZERO_DIV) Save
+ Trap on integer divide by zero.
+ 
+ mcond-move-int
+-Target Var(TARGET_COND_MOVE_INT) Init(1)
++Target Mask(COND_MOVE_INT) Save
+ Conditional moves for integral are enabled.
+ 
+ mcond-move-float
+-Target Var(TARGET_COND_MOVE_FLOAT) Init(1)
++Target Mask(COND_MOVE_FLOAT) Save
+ Conditional moves for float are enabled.
+ 
+ mmemcpy
+-Target Mask(MEMCPY)
++Target Mask(MEMCPY) Save
+ Prevent optimizing block moves, which is also the default behavior of -Os.
+ 
+ mstrict-align
+-Target Var(TARGET_STRICT_ALIGN) Init(0)
++Target Mask(STRICT_ALIGN) Save
+ Do not generate unaligned memory accesses.
+ 
+ mmax-inline-memcpy-size=
+-Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024)
++Target Joined RejectNegative UInteger Var(la_max_inline_memcpy_size) Init(1024) Save
+ -mmax-inline-memcpy-size=SIZE	Set the max size of memcpy to inline, default is 1024.
+ 
+ Enum
+@@ -206,11 +206,11 @@ Target Alias(mexplicit-relocs=, always, none)
+ Use %reloc() assembly operators (for backward compatibility).
+ 
+ mrecip
+-Target RejectNegative Var(loongarch_recip)
++Target RejectNegative Var(la_recip) Save
+ Generate approximate reciprocal divide and square root for better throughput.
+ 
+ mrecip=
+-Target RejectNegative Joined Var(loongarch_recip_name)
++Target RejectNegative Joined Var(la_recip_name) Save
+ Control generation of reciprocal estimates.
+ 
+ ; The code model option names for -mcmodel.
+@@ -237,29 +237,29 @@ EnumValue
+ Enum(cmodel) String(extreme) Value(CMODEL_EXTREME)
+ 
+ mcmodel=
+-Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET)
++Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET) Save
+ Specify the code model.
+ 
+ mdirect-extern-access
+-Target Var(TARGET_DIRECT_EXTERN_ACCESS) Init(0)
++Target Mask(DIRECT_EXTERN_ACCESS) Save
+ Avoid using the GOT to access external symbols.
+ 
+ mrelax
+-Target Var(loongarch_mrelax) Init(HAVE_AS_MRELAX_OPTION && HAVE_AS_COND_BRANCH_RELAXATION)
++Target Mask(LINKER_RELAXATION)
+ Take advantage of linker relaxations to reduce the number of instructions
+ required to materialize symbol addresses.
+ 
+ mpass-mrelax-to-as
+-Target Var(loongarch_pass_mrelax_to_as) Init(HAVE_AS_MRELAX_OPTION)
++Driver Var(la_pass_mrelax_to_as) Init(HAVE_AS_MRELAX_OPTION)
+ Pass -mrelax or -mno-relax option to the assembler.
+ 
+ -param=loongarch-vect-unroll-limit=
+-Target Joined UInteger Var(loongarch_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param
++Target Joined UInteger Var(la_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param
+ Used to limit unroll factor which indicates how much the autovectorizer may
+ unroll a loop.  The default value is 6.
+ 
+ -param=loongarch-vect-issue-info=
+-Target Undocumented Joined UInteger Var(loongarch_vect_issue_info) Init(4) IntegerRange(1, 64) Param
++Target Undocumented Joined UInteger Var(la_vect_issue_info) Init(4) IntegerRange(1, 64) Param
+ Indicate how many non memory access vector instructions can be issued per
+ cycle, it's used in unroll factor determination for autovectorizer.  The
+ default value is 4.
+-- 
+2.43.0
+
diff --git a/SME-0007-aarch64-Rename-AARCH64_FL-architecture-level-macros.patch b/0110-Backport-SME-aarch64-Rename-AARCH64_FL-architecture-.patch
similarity index 98%
rename from SME-0007-aarch64-Rename-AARCH64_FL-architecture-level-macros.patch
rename to 0110-Backport-SME-aarch64-Rename-AARCH64_FL-architecture-.patch
index 7ed1842fb99b5d61adc373fad49bb00de1d87ff9..99317e4681d827ab6bc6813700f77f4a2969eb9e 100644
--- a/SME-0007-aarch64-Rename-AARCH64_FL-architecture-level-macros.patch
+++ b/0110-Backport-SME-aarch64-Rename-AARCH64_FL-architecture-.patch
@@ -1,7 +1,8 @@
-From df48669369ee6ca788f8c8096926bb164d5492dd Mon Sep 17 00:00:00 2001
+From e1b067871c4c39565bf6059b4924a810923c6eeb Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:51 +0100
-Subject: [PATCH 007/144] aarch64: Rename AARCH64_FL architecture-level macros
+Subject: [PATCH 011/157] [Backport][SME] aarch64: Rename AARCH64_FL
+ architecture-level macros
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=78aaafc3d4dc0ef997b4747349d3836ca2f7e301
 
@@ -215,5 +216,5 @@ index 356a263b2..5a91dfdd2 100644
  #define AARCH64_ISA_LS64	   (aarch64_isa_flags & AARCH64_FL_LS64)
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0110-LoongArch-Redundant-sign-extension-elimination-optim.patch b/0110-LoongArch-Redundant-sign-extension-elimination-optim.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6671733b4a09d1f6d6b4944b9fb04204facd29f0
--- /dev/null
+++ b/0110-LoongArch-Redundant-sign-extension-elimination-optim.patch
@@ -0,0 +1,234 @@
+From 54786cec1f52854a70369a3060ed22b1e070f000 Mon Sep 17 00:00:00 2001
+From: Li Wei 
+Date: Thu, 11 Jan 2024 19:36:19 +0800
+Subject: [PATCH 110/188] LoongArch: Redundant sign extension elimination
+ optimization.
+
+We found that the current combine optimization pass in gcc cannot handle
+the following redundant sign extension situations:
+
+(insn 77 76 78 5 (set (reg:SI 143)
+        (plus:SI (subreg/s/u:SI (reg/v:DI 104 [ len ]) 0)
+            (const_int 1 [0x1]))) {addsi3}
+    (expr_list:REG_DEAD (reg/v:DI 104 [ len ])
+        (nil)))
+(insn 78 77 82 5 (set (reg/v:DI 104 [ len ])
+        (sign_extend:DI (reg:SI 143))) {extendsidi2}
+        (nil))
+
+Because reg:SI 143 is not died or set in insn 78, no replacement merge will
+be performed for the insn sequence. We adjusted the add template to eliminate
+redundant sign extensions during the expand pass.
+Adjusted based on upstream comments:
+https://gcc.gnu.org/pipermail/gcc-patches/2024-January/641988.html
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (add3): Removed.
+	(*addsi3): New.
+	(addsi3): Ditto.
+	(adddi3): Ditto.
+	(*addsi3_extended): Removed.
+	(addsi3_extended): New.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/sign-extend.c: Moved to...
+	* gcc.target/loongarch/sign-extend-1.c: ...here.
+	* gcc.target/loongarch/sign-extend-2.c: New test.
+---
+ gcc/config/loongarch/loongarch.md             | 93 ++++++++++++++-----
+ .../{sign-extend.c => sign-extend-1.c}        |  0
+ .../gcc.target/loongarch/sign-extend-2.c      | 59 ++++++++++++
+ 3 files changed, 128 insertions(+), 24 deletions(-)
+ rename gcc/testsuite/gcc.target/loongarch/{sign-extend.c => sign-extend-1.c} (100%)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/sign-extend-2.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 6ebf33cbe..4c7e28ace 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -657,42 +657,87 @@
+   [(set_attr "type" "fadd")
+    (set_attr "mode" "")])
+ 
+-(define_insn_and_split "add3"
+-  [(set (match_operand:GPR 0 "register_operand" "=r,r,r,r,r,r,r")
+-	(plus:GPR (match_operand:GPR 1 "register_operand" "r,r,r,r,r,r,r")
+-		  (match_operand:GPR 2 "plus__operand"
+-				       "r,I,La,Lb,Lc,Ld,Le")))]
++(define_insn_and_split "*addsi3"
++  [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r")
++	(plus:SI (match_operand:SI 1 "register_operand" "r,r,r,r,r")
++		  (match_operand:SI 2 "plus_si_operand"
++				       "r,I,La,Lb,Le")))]
+   ""
+   "@
+-   add.\t%0,%1,%2
+-   addi.\t%0,%1,%2
++   add.w\t%0,%1,%2
++   addi.w\t%0,%1,%2
+    #
+    * operands[2] = GEN_INT (INTVAL (operands[2]) / 65536); \
+      return \"addu16i.d\t%0,%1,%2\";
++   #"
++  "CONST_INT_P (operands[2]) && !IMM12_INT (operands[2]) \
++   && !ADDU16I_OPERAND (INTVAL (operands[2]))"
++  [(set (match_dup 0) (plus:SI (match_dup 1) (match_dup 3)))
++   (set (match_dup 0) (plus:SI (match_dup 0) (match_dup 4)))]
++  {
++    loongarch_split_plus_constant (&operands[2], SImode);
++  }
++  [(set_attr "alu_type" "add")
++   (set_attr "mode" "SI")
++   (set_attr "insn_count" "1,1,2,1,2")])
++
++(define_expand "addsi3"
++  [(set (match_operand:SI 0 "register_operand" "=r,r,r,r,r")
++	(plus:SI (match_operand:SI 1 "register_operand" "r,r,r,r,r")
++		 (match_operand:SI 2 "plus_si_operand"  "r,I,La,Le,Lb")))]
++  "TARGET_64BIT"
++{
++  if (CONST_INT_P (operands[2]) && !IMM12_INT (operands[2])
++      && ADDU16I_OPERAND (INTVAL (operands[2])))
++    {
++      rtx t1 = gen_reg_rtx (DImode);
++      rtx t2 = gen_reg_rtx (DImode);
++      rtx t3 = gen_reg_rtx (DImode);
++      emit_insn (gen_extend_insn (t1, operands[1], DImode, SImode, 0));
++      t2 = operands[2];
++      emit_insn (gen_adddi3 (t3, t1, t2));
++      t3 = gen_lowpart (SImode, t3);
++      emit_move_insn (operands[0], t3);
++      DONE;
++    }
++  else
++    {
++      rtx t = gen_reg_rtx (DImode);
++      emit_insn (gen_addsi3_extended (t, operands[1], operands[2]));
++      t = gen_lowpart (SImode, t);
++      SUBREG_PROMOTED_VAR_P (t) = 1;
++      SUBREG_PROMOTED_SET (t, SRP_SIGNED);
++      emit_move_insn (operands[0], t);
++      DONE;
++    }
++})
++
++(define_insn_and_split "adddi3"
++  [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r,r")
++	(plus:DI (match_operand:DI 1 "register_operand" "r,r,r,r,r,r")
++		  (match_operand:DI 2 "plus_di_operand"
++				       "r,I,La,Lb,Lc,Ld")))]
++  "TARGET_64BIT"
++  "@
++   add.d\t%0,%1,%2
++   addi.d\t%0,%1,%2
+    #
++   * operands[2] = GEN_INT (INTVAL (operands[2]) / 65536); \
++     return \"addu16i.d\t%0,%1,%2\";
+    #
+    #"
+-  "CONST_INT_P (operands[2]) && !IMM12_INT (operands[2]) \
++  "&& CONST_INT_P (operands[2]) && !IMM12_INT (operands[2]) \
+    && !ADDU16I_OPERAND (INTVAL (operands[2]))"
+-  [(set (match_dup 0) (plus:GPR (match_dup 1) (match_dup 3)))
+-   (set (match_dup 0) (plus:GPR (match_dup 0) (match_dup 4)))]
++  [(set (match_dup 0) (plus:DI (match_dup 1) (match_dup 3)))
++   (set (match_dup 0) (plus:DI (match_dup 0) (match_dup 4)))]
+   {
+-    loongarch_split_plus_constant (&operands[2], mode);
++    loongarch_split_plus_constant (&operands[2], DImode);
+   }
+   [(set_attr "alu_type" "add")
+-   (set_attr "mode" "")
+-   (set_attr "insn_count" "1,1,2,1,2,2,2")
+-   (set (attr "enabled")
+-      (cond
+-	[(match_test "mode != DImode && which_alternative == 4")
+-	 (const_string "no")
+-	 (match_test "mode != DImode && which_alternative == 5")
+-	 (const_string "no")
+-	 (match_test "mode != SImode && which_alternative == 6")
+-	 (const_string "no")]
+-	(const_string "yes")))])
+-
+-(define_insn_and_split "*addsi3_extended"
++   (set_attr "mode" "DI")
++   (set_attr "insn_count" "1,1,2,1,2,2")])
++
++(define_insn_and_split "addsi3_extended"
+   [(set (match_operand:DI 0 "register_operand" "=r,r,r,r")
+ 	(sign_extend:DI
+ 	     (plus:SI (match_operand:SI 1 "register_operand" "r,r,r,r")
+diff --git a/gcc/testsuite/gcc.target/loongarch/sign-extend.c b/gcc/testsuite/gcc.target/loongarch/sign-extend-1.c
+similarity index 100%
+rename from gcc/testsuite/gcc.target/loongarch/sign-extend.c
+rename to gcc/testsuite/gcc.target/loongarch/sign-extend-1.c
+diff --git a/gcc/testsuite/gcc.target/loongarch/sign-extend-2.c b/gcc/testsuite/gcc.target/loongarch/sign-extend-2.c
+new file mode 100644
+index 000000000..a45dde4f7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/sign-extend-2.c
+@@ -0,0 +1,59 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O2" } */
++/* { dg-final { scan-assembler-times "slli.w\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,0" 1 } } */
++
++#include 
++#define my_min(x, y) ((x) < (y) ? (x) : (y))
++
++void
++bt_skip_func (const uint32_t len_limit, const uint32_t pos,
++              const uint8_t *const cur, uint32_t cur_match,
++              uint32_t *const son, const uint32_t cyclic_pos,
++              const uint32_t cyclic_size)
++{
++  uint32_t *ptr0 = son + (cyclic_pos << 1) + 1;
++  uint32_t *ptr1 = son + (cyclic_pos << 1);
++
++  uint32_t len0 = 0;
++  uint32_t len1 = 0;
++
++  while (1)
++    {
++      const uint32_t delta = pos - cur_match;
++      uint32_t *pair
++          = son
++            + ((cyclic_pos - delta + (delta > cyclic_pos ? cyclic_size : 0))
++               << 1);
++      const uint8_t *pb = cur - delta;
++      uint32_t len = my_min (len0, len1);
++
++      if (pb[len] == cur[len])
++        {
++          while (++len != len_limit)
++            if (pb[len] != cur[len])
++              break;
++
++          if (len == len_limit)
++            {
++              *ptr1 = pair[0];
++              *ptr0 = pair[1];
++              return;
++            }
++        }
++
++      if (pb[len] < cur[len])
++        {
++          *ptr1 = cur_match;
++          ptr1 = pair + 1;
++          cur_match = *ptr1;
++          len1 = len;
++        }
++      else
++        {
++          *ptr0 = cur_match;
++          ptr0 = pair;
++          cur_match = *ptr0;
++          len0 = len;
++        }
++    }
++}
+-- 
+2.43.0
+
diff --git a/SME-0008-aarch64-Rename-AARCH64_FL_FOR_ARCH-macros.patch b/0111-Backport-SME-aarch64-Rename-AARCH64_FL_FOR_ARCH-macr.patch
similarity index 98%
rename from SME-0008-aarch64-Rename-AARCH64_FL_FOR_ARCH-macros.patch
rename to 0111-Backport-SME-aarch64-Rename-AARCH64_FL_FOR_ARCH-macr.patch
index aa3d318224df50c415e3eaa9325c427f0ed7bb70..77737f576a3dc90a371241ba4616444aa889dc34 100644
--- a/SME-0008-aarch64-Rename-AARCH64_FL_FOR_ARCH-macros.patch
+++ b/0111-Backport-SME-aarch64-Rename-AARCH64_FL_FOR_ARCH-macr.patch
@@ -1,7 +1,8 @@
-From fcd46adfc9768dd16ab3fd0cd39eed82ee52e3c5 Mon Sep 17 00:00:00 2001
+From 7da27deb7413d7d1fd2c543617640e2de5b10db0 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:51 +0100
-Subject: [PATCH 008/144] aarch64: Rename AARCH64_FL_FOR_ARCH macros
+Subject: [PATCH 012/157] [Backport][SME] aarch64: Rename AARCH64_FL_FOR_ARCH
+ macros
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=0f833d1900176509e16b6f5563cfe58508fef5d2
 
@@ -55,10 +56,10 @@ gcc/
  5 files changed, 109 insertions(+), 109 deletions(-)
 
 diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc
-index 3fe1786f9..58cfe7561 100644
+index 3dc020f0c..0461201a5 100644
 --- a/gcc/common/config/aarch64/aarch64-common.cc
 +++ b/gcc/common/config/aarch64/aarch64-common.cc
-@@ -177,7 +177,7 @@ static const struct processor_name_to_arch all_cores[] =
+@@ -253,7 +253,7 @@ static const struct processor_name_to_arch all_cores[] =
  #define AARCH64_CORE(NAME, X, IDENT, ARCH_IDENT, FLAGS, COSTS, IMP, PART, VARIANT) \
    {NAME, AARCH64_ARCH_##ARCH_IDENT, FLAGS},
  #include "config/aarch64/aarch64-cores.def"
@@ -308,10 +309,10 @@ index 0402bfb74..c4038c641 100644
  
  #undef AARCH64_CORE
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 33289c2a1..e476e38dc 100644
+index 254ecfaa2..3714c1047 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -2732,7 +2732,7 @@ static const struct processor all_cores[] =
+@@ -2949,7 +2949,7 @@ static const struct processor all_cores[] =
    FLAGS, &COSTS##_tunings},
  #include "aarch64-cores.def"
    {"generic", generic, cortexa53, AARCH64_ARCH_8A,
@@ -393,5 +394,5 @@ index 5a91dfdd2..918a14193 100644
  /* Macros to test ISA flags.  */
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0111-LoongArch-Redundant-sign-extension-elimination-optim.patch b/0111-LoongArch-Redundant-sign-extension-elimination-optim.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2015969fba51ac3f50248130c0f1679bdfc26308
--- /dev/null
+++ b/0111-LoongArch-Redundant-sign-extension-elimination-optim.patch
@@ -0,0 +1,56 @@
+From 7bb1a356ca9eefab48d64bd3deeaac081c1ae7ea Mon Sep 17 00:00:00 2001
+From: Li Wei 
+Date: Thu, 11 Jan 2024 19:36:33 +0800
+Subject: [PATCH 111/188] LoongArch: Redundant sign extension elimination
+ optimization 2.
+
+Eliminate the redundant sign extension that exists after the conditional
+move when the target register is SImode.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_expand_conditional_move):
+	Adjust.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/sign-extend-2.c: Adjust.
+---
+ gcc/config/loongarch/loongarch.cc                  | 6 ++++++
+ gcc/testsuite/gcc.target/loongarch/sign-extend-2.c | 5 +++--
+ 2 files changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 533bae5b2..13481130b 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -5367,6 +5367,12 @@ loongarch_expand_conditional_move (rtx *operands)
+ 	  rtx temp3 = gen_reg_rtx (mode);
+ 	  emit_insn (gen_rtx_SET (temp3, gen_rtx_IOR (mode, temp, temp2)));
+ 	  temp3 = gen_lowpart (GET_MODE (operands[0]), temp3);
++	  /* Nonzero in a subreg if it was made when accessing an object that
++	     was promoted to a wider mode in accord with the PROMOTED_MODE
++	     machine description macro.  */
++	  SUBREG_PROMOTED_VAR_P (temp3) = 1;
++	  /* Sets promoted mode for SUBREG_PROMOTED_VAR_P.  */
++	  SUBREG_PROMOTED_SET (temp3, SRP_SIGNED);
+ 	  loongarch_emit_move (operands[0], temp3);
+ 	}
+       else
+diff --git a/gcc/testsuite/gcc.target/loongarch/sign-extend-2.c b/gcc/testsuite/gcc.target/loongarch/sign-extend-2.c
+index a45dde4f7..e57a2727d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/sign-extend-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/sign-extend-2.c
+@@ -1,6 +1,7 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O2" } */
+-/* { dg-final { scan-assembler-times "slli.w\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,0" 1 } } */
++/* { dg-options "-mabi=lp64d -O2 -fdump-rtl-expand" } */
++/* { dg-final { scan-rtl-dump "subreg/s" "expand" } } */
++/* { dg-final { scan-assembler-not "slli.w\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,0" } } */
+ 
+ #include 
+ #define my_min(x, y) ((x) < (y) ? (x) : (y))
+-- 
+2.43.0
+
diff --git a/SME-0009-aarch64-Add-V-to-aarch64-arches.def-names.patch b/0112-Backport-SME-aarch64-Add-V-to-aarch64-arches.def-nam.patch
similarity index 98%
rename from SME-0009-aarch64-Add-V-to-aarch64-arches.def-names.patch
rename to 0112-Backport-SME-aarch64-Add-V-to-aarch64-arches.def-nam.patch
index 073a677e9b9ee88c0c3b2d5c01afd3ce5772c44b..d1b1db8941e9a1a7451170ed869b5abbd9ba32cf 100644
--- a/SME-0009-aarch64-Add-V-to-aarch64-arches.def-names.patch
+++ b/0112-Backport-SME-aarch64-Add-V-to-aarch64-arches.def-nam.patch
@@ -1,7 +1,8 @@
-From b150a25175dea07922d114b67d41da461b050d83 Mon Sep 17 00:00:00 2001
+From ed8ce0b31f2b608f0360af1ffd5375ea7809aba7 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:52 +0100
-Subject: [PATCH 009/144] aarch64: Add "V" to aarch64-arches.def names
+Subject: [PATCH 013/157] [Backport][SME] aarch64: Add "V" to
+ aarch64-arches.def names
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=00c22ba69d8e738a4789b30165ff9c925c508fc1
 
@@ -29,10 +30,10 @@ gcc/
  5 files changed, 83 insertions(+), 82 deletions(-)
 
 diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc
-index 58cfe7561..d4def6452 100644
+index 0461201a5..6ca89d31f 100644
 --- a/gcc/common/config/aarch64/aarch64-common.cc
 +++ b/gcc/common/config/aarch64/aarch64-common.cc
-@@ -177,7 +177,7 @@ static const struct processor_name_to_arch all_cores[] =
+@@ -253,7 +253,7 @@ static const struct processor_name_to_arch all_cores[] =
  #define AARCH64_CORE(NAME, X, IDENT, ARCH_IDENT, FLAGS, COSTS, IMP, PART, VARIANT) \
    {NAME, AARCH64_ARCH_##ARCH_IDENT, FLAGS},
  #include "config/aarch64/aarch64-cores.def"
@@ -282,10 +283,10 @@ index c4038c641..f4c2f4ea4 100644
  
  #undef AARCH64_CORE
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index e476e38dc..69713b20c 100644
+index 3714c1047..22b51e12f 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -2731,7 +2731,7 @@ static const struct processor all_cores[] =
+@@ -2948,7 +2948,7 @@ static const struct processor all_cores[] =
    {NAME, IDENT, SCHED, AARCH64_ARCH_##ARCH,				\
    FLAGS, &COSTS##_tunings},
  #include "aarch64-cores.def"
@@ -310,5 +311,5 @@ index d714a8bda..644780ef2 100644
  static struct aarch64_arch_driver_info aarch64_arches[] =
  {
 -- 
-2.19.1
+2.33.0
 
diff --git a/0112-LoongArch-Assign-the-u-attribute-to-the-mem-to-which.patch b/0112-LoongArch-Assign-the-u-attribute-to-the-mem-to-which.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9df4bccdcd3db3a59753dfd0dfe6a76c7d6461f0
--- /dev/null
+++ b/0112-LoongArch-Assign-the-u-attribute-to-the-mem-to-which.patch
@@ -0,0 +1,64 @@
+From 191675bdfd4cef0fbcf642f53da82a49bd23a3bf Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 12 Jan 2024 17:06:30 +0800
+Subject: [PATCH 112/188] LoongArch: Assign the '/u' attribute to the mem to
+ which the global offset table belongs.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_split_symbol):
+	Assign the '/u' attribute to the mem.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.target/loongarch/got-load.C: New test.
+---
+ gcc/config/loongarch/loongarch.cc             |  5 +++++
+ gcc/testsuite/g++.target/loongarch/got-load.C | 19 +++++++++++++++++++
+ 2 files changed, 24 insertions(+)
+ create mode 100644 gcc/testsuite/g++.target/loongarch/got-load.C
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 13481130b..7da00c132 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -3198,6 +3198,11 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
+ 	      rtx mem = gen_rtx_MEM (Pmode, low);
+ 	      *low_out = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, mem),
+ 					 UNSPEC_LOAD_FROM_GOT);
++
++	      /* Nonzero in a mem, if the memory is statically allocated and
++		 read-only.  A common example of the later is a shared library’s
++		 global offset table.  */
++	      MEM_READONLY_P (mem) = 1;
+ 	    }
+ 
+ 	  break;
+diff --git a/gcc/testsuite/g++.target/loongarch/got-load.C b/gcc/testsuite/g++.target/loongarch/got-load.C
+new file mode 100644
+index 000000000..20924c739
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/got-load.C
+@@ -0,0 +1,19 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O2 -mexplicit-relocs -mcmodel=normal -fdump-rtl-expand" } */
++/* { dg-final { scan-rtl-dump-times "mem/u" 2 "expand" } } */
++
++#include 
++
++using namespace std;
++
++int lr[100005][2];
++
++void
++test(void)
++{
++  int n;
++
++  cin >> n;
++  for (int i = 0; i < n; ++i)
++    cin >> lr[i][0] >> lr[i][1];
++}
+-- 
+2.43.0
+
diff --git a/SME-0010-aarch64-Small-config.gcc-cleanups.patch b/0113-Backport-SME-aarch64-Small-config.gcc-cleanups.patch
similarity index 90%
rename from SME-0010-aarch64-Small-config.gcc-cleanups.patch
rename to 0113-Backport-SME-aarch64-Small-config.gcc-cleanups.patch
index 38747fdfb710cede686e3c948328ef0f94fb9ef7..1b14c18545bbeab4caf4bbe017f0718b3413813a 100644
--- a/SME-0010-aarch64-Small-config.gcc-cleanups.patch
+++ b/0113-Backport-SME-aarch64-Small-config.gcc-cleanups.patch
@@ -1,7 +1,7 @@
-From b693da739d7279dbafef28a55d918f6a6fcdf95a Mon Sep 17 00:00:00 2001
+From aac2b2d4191d08a107c3ff8d98602355988a5558 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:52 +0100
-Subject: [PATCH 010/144] aarch64: Small config.gcc cleanups
+Subject: [PATCH 014/157] [Backport][SME] aarch64: Small config.gcc cleanups
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=0af214b447529453b356e8e480d7d35b3e642f0e
 
@@ -19,10 +19,10 @@ gcc/
  2 files changed, 1 insertion(+), 9 deletions(-)
 
 diff --git a/gcc/config.gcc b/gcc/config.gcc
-index 0d43b3f19..48273d902 100644
+index 3be450471..da66603cd 100644
 --- a/gcc/config.gcc
 +++ b/gcc/config.gcc
-@@ -4208,14 +4208,6 @@ case "${target}" in
+@@ -4210,14 +4210,6 @@ case "${target}" in
  			  options_parsed="`$ac_cv_prog_CPP -D"$opt_macro" -x c \
  				${srcdir}/config/aarch64/aarch64-option-extensions.def`"
  
@@ -51,5 +51,5 @@ index e42202822..ece96e22a 100644
  AARCH64_ARCH("armv8-a",	      generic,	     V8A,	8,  AARCH64_FL_FOR_V8A)
  AARCH64_ARCH("armv8.1-a",     generic,	     V8_1A,	8,  AARCH64_FL_FOR_V8_1A)
 -- 
-2.19.1
+2.33.0
 
diff --git a/0113-LoongArch-testsuite-Fix-fail-in-gen-vect-2-25-.c-fil.patch b/0113-LoongArch-testsuite-Fix-fail-in-gen-vect-2-25-.c-fil.patch
new file mode 100644
index 0000000000000000000000000000000000000000..574df99593fd22e4ef9166622befe652e495d8e3
--- /dev/null
+++ b/0113-LoongArch-testsuite-Fix-fail-in-gen-vect-2-25-.c-fil.patch
@@ -0,0 +1,51 @@
+From 1576f83f8cae0ead9de533566ec5f21e7a01f842 Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Sat, 13 Jan 2024 15:28:34 +0800
+Subject: [PATCH 113/188] LoongArch: testsuite:Fix fail in gen-vect-{2,25}.c
+ file.
+
+1.Added  dg-do compile on LoongArch.
+  When binutils does not support vector instruction sets, an error occurs
+because the assembler does not recognize vector instructions.
+
+2.Added "-mlsx" option for vectorization on LoongArch.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.dg/tree-ssa/gen-vect-2.c: Added detection of compilation
+	behavior and "-mlsx" option on LoongArch.
+	* gcc.dg/tree-ssa/gen-vect-25.c: Dito.
+---
+ gcc/testsuite/gcc.dg/tree-ssa/gen-vect-2.c  | 2 ++
+ gcc/testsuite/gcc.dg/tree-ssa/gen-vect-25.c | 2 ++
+ 2 files changed, 4 insertions(+)
+
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-2.c b/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-2.c
+index 42171a2fb..395d6f7ee 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-2.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-2.c
+@@ -1,6 +1,8 @@
+ /* { dg-do run { target vect_cmdline_needed } } */
++/* { dg-do compile { target { loongarch_sx && {! loongarch_sx_hw } } } } */
+ /* { dg-options "-O2 -fno-tree-loop-distribute-patterns -ftree-vectorize -fdump-tree-vect-details -fvect-cost-model=dynamic" } */
+ /* { dg-additional-options "-mno-sse" { target { i?86-*-* x86_64-*-* } } } */
++/* { dg-additional-options "-mlsx" { target { loongarch*-*-* } } } */
+ 
+ #include 
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-25.c b/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-25.c
+index 60ec27054..cea7f246a 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-25.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/gen-vect-25.c
+@@ -1,6 +1,8 @@
+ /* { dg-do run { target vect_cmdline_needed } } */
++/* { dg-do compile { target { loongarch_sx && {! loongarch_sx_hw } } } } */
+ /* { dg-options "-O2 -ftree-vectorize -fdump-tree-vect-details -fvect-cost-model=dynamic" } */
+ /* { dg-options "-O2 -ftree-vectorize -fdump-tree-vect-details -fvect-cost-model=dynamic -mno-sse" { target { i?86-*-* x86_64-*-* } } } */
++/* { dg-additional-options "-mlsx" { target { loongarch*-*-* } } } */
+ 
+ #include 
+ 
+-- 
+2.43.0
+
diff --git a/SME-0011-aarch64-Avoid-redundancy-in-aarch64-cores.def.patch b/0114-Backport-SME-aarch64-Avoid-redundancy-in-aarch64-cor.patch
similarity index 98%
rename from SME-0011-aarch64-Avoid-redundancy-in-aarch64-cores.def.patch
rename to 0114-Backport-SME-aarch64-Avoid-redundancy-in-aarch64-cor.patch
index c5ce514833b6301085dbd73b7c3fd306eb98843a..ba9f2abea421cc4fe2d98ffea23aa77c6957c8f5 100644
--- a/SME-0011-aarch64-Avoid-redundancy-in-aarch64-cores.def.patch
+++ b/0114-Backport-SME-aarch64-Avoid-redundancy-in-aarch64-cor.patch
@@ -1,7 +1,8 @@
-From fc073e8dd96bd6179cd0c997db616598c0460a5d Mon Sep 17 00:00:00 2001
+From f6f28c50045f672a35f5b7344b556fc45dc0b3a1 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:53 +0100
-Subject: [PATCH 011/144] aarch64: Avoid redundancy in aarch64-cores.def
+Subject: [PATCH 015/157] [Backport][SME] aarch64: Avoid redundancy in
+ aarch64-cores.def
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=198bb6ed327c74eb2b0450bf978e4e6a64a6406c
 
@@ -27,10 +28,10 @@ gcc/
  4 files changed, 68 insertions(+), 68 deletions(-)
 
 diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc
-index d4def6452..1739555de 100644
+index 6ca89d31f..a965ac660 100644
 --- a/gcc/common/config/aarch64/aarch64-common.cc
 +++ b/gcc/common/config/aarch64/aarch64-common.cc
-@@ -175,7 +175,7 @@ struct arch_to_arch_name
+@@ -251,7 +251,7 @@ struct arch_to_arch_name
  static const struct processor_name_to_arch all_cores[] =
  {
  #define AARCH64_CORE(NAME, X, IDENT, ARCH_IDENT, FLAGS, COSTS, IMP, PART, VARIANT) \
@@ -242,10 +243,10 @@ index f4c2f4ea4..008b0b8c1 100644
  
  #undef AARCH64_CORE
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 69713b20c..fafffcf6b 100644
+index 22b51e12f..f975aad07 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -2729,7 +2729,7 @@ static const struct processor all_cores[] =
+@@ -2946,7 +2946,7 @@ static const struct processor all_cores[] =
  {
  #define AARCH64_CORE(NAME, IDENT, SCHED, ARCH, FLAGS, COSTS, IMP, PART, VARIANT) \
    {NAME, IDENT, SCHED, AARCH64_ARCH_##ARCH,				\
@@ -268,5 +269,5 @@ index 644780ef2..97690de62 100644
  static struct aarch64_core_data aarch64_cpu_data[] =
  {
 -- 
-2.19.1
+2.33.0
 
diff --git a/0114-LoongArch-Remove-constraint-z-from-movsi_internal.patch b/0114-LoongArch-Remove-constraint-z-from-movsi_internal.patch
new file mode 100644
index 0000000000000000000000000000000000000000..17ab11aae1f0633c7f173e88d6c6e1690da2e5f6
--- /dev/null
+++ b/0114-LoongArch-Remove-constraint-z-from-movsi_internal.patch
@@ -0,0 +1,43 @@
+From 167a3f34b308d3d56e816559701c3fb1c4f88c7b Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 25 Oct 2024 03:30:35 +0000
+Subject: [PATCH 114/188] LoongArch: Remove constraint z from movsi_internal
+
+We don't allow SImode in FCC, so constraint z is never really used
+here.
+
+gcc/ChangeLog:
+
+        * config/loongarch/loongarch.md (movsi_internal): Remove
+        constraint z.
+---
+ gcc/config/loongarch/loongarch.md | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 4c7e28ace..23d8dc126 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -2197,8 +2197,8 @@
+ })
+ 
+ (define_insn_and_split "*movsi_internal"
+-  [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m,*r,*z")
+-	(match_operand:SI 1 "move_operand" "r,Yd,w,rJ,*r*J,*m,*f,*f,*z,*r"))]
++  [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,w,*f,f,*r,*m")
++   (match_operand:SI 1 "move_operand" "r,Yd,w,rJ,*r*J,m,*f,*f"))]
+   "(register_operand (operands[0], SImode)
+     || reg_or_0_operand (operands[1], SImode))"
+   { return loongarch_output_move (operands[0], operands[1]); }
+@@ -2211,7 +2211,7 @@
+   DONE;
+ }
+   "
+-  [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore,mftg,mgtf")
++  [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore")
+    (set_attr "mode" "SI")])
+ 
+ ;; 16-bit Integer moves
+-- 
+2.43.0
+
diff --git a/SME-0012-aarch64-Remove-AARCH64_FL_RCPC8_4-PR107025.patch b/0115-Backport-SME-aarch64-Remove-AARCH64_FL_RCPC8_4-PR107.patch
similarity index 96%
rename from SME-0012-aarch64-Remove-AARCH64_FL_RCPC8_4-PR107025.patch
rename to 0115-Backport-SME-aarch64-Remove-AARCH64_FL_RCPC8_4-PR107.patch
index e362bcccc13efccb40fd71ce93023454c81969d8..f65a31bc11a7eeae811f113d3fc6652644ca39ae 100644
--- a/SME-0012-aarch64-Remove-AARCH64_FL_RCPC8_4-PR107025.patch
+++ b/0115-Backport-SME-aarch64-Remove-AARCH64_FL_RCPC8_4-PR107.patch
@@ -1,7 +1,8 @@
-From d2cffbc7ed1cae0cb46f452a4ba327335f66fbdc Mon Sep 17 00:00:00 2001
+From f6137d5be2761caea75dcc1c98d941ceec161456 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:53 +0100
-Subject: [PATCH 012/144] aarch64: Remove AARCH64_FL_RCPC8_4 [PR107025]
+Subject: [PATCH 016/157] [Backport][SME] aarch64: Remove AARCH64_FL_RCPC8_4
+ [PR107025]
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=0f244d848cffeda68f0eb4c5bb9c7e629bf2e957
 
@@ -78,5 +79,5 @@ index 918a14193..f4e0cd148 100644
  #define AARCH64_ISA_V8_5A	   (aarch64_isa_flags & AARCH64_FL_V8_5A)
  #define AARCH64_ISA_TME		   (aarch64_isa_flags & AARCH64_FL_TME)
 -- 
-2.19.1
+2.33.0
 
diff --git a/0115-LoongArch-doc-Add-attribute-descriptions-defined-in-.patch b/0115-LoongArch-doc-Add-attribute-descriptions-defined-in-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f46094afe9f56d051cee6644c4a6692fb29899e9
--- /dev/null
+++ b/0115-LoongArch-doc-Add-attribute-descriptions-defined-in-.patch
@@ -0,0 +1,47 @@
+From 0929961b9dd57e0dd18e4cccc6ba760706e74f77 Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Wed, 17 Jan 2024 09:24:06 +0800
+Subject: [PATCH 115/188] LoongArch: doc: Add attribute descriptions defined in
+ the target-supports.exp.
+
+gcc/ChangeLog:
+
+	* doc/sourcebuild.texi: Add attributes for keywords.
+---
+ gcc/doc/sourcebuild.texi | 20 ++++++++++++++++++++
+ 1 file changed, 20 insertions(+)
+
+diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
+index 71c04841d..a1ab0a1cb 100644
+--- a/gcc/doc/sourcebuild.texi
++++ b/gcc/doc/sourcebuild.texi
+@@ -2292,6 +2292,26 @@ AArch64 target that is able to generate and execute armv8.3-a FJCVTZS
+ instruction.
+ @end table
+ 
++@subsubsection LoongArch specific attributes
++
++@table @code
++@item loongarch_sx
++LoongArch target that generates instructions for SX.
++
++@item loongarch_asx
++LoongArch target that generates instructions for ASX.
++
++@item loongarch_sx_hw
++LoongArch target that is able to generate and execute SX code.
++
++@item loongarch_asx_hw
++LoongArch target that is able to generate and execute ASX code.
++
++@item loongarch_call36_support
++LoongArch binutils supports call36 relocation.
++
++@end table
++
+ @subsubsection MIPS-specific attributes
+ 
+ @table @code
+-- 
+2.43.0
+
diff --git a/SME-0013-aarch64-Fix-transitive-closure-of-features.patch b/0116-Backport-SME-aarch64-Fix-transitive-closure-of-featu.patch
similarity index 97%
rename from SME-0013-aarch64-Fix-transitive-closure-of-features.patch
rename to 0116-Backport-SME-aarch64-Fix-transitive-closure-of-featu.patch
index 1ad36b66220db65ad6c74aab4f3636fe75738727..ed2a375aae2388d7726d95e32fcfbf22b89bbdc3 100644
--- a/SME-0013-aarch64-Fix-transitive-closure-of-features.patch
+++ b/0116-Backport-SME-aarch64-Fix-transitive-closure-of-featu.patch
@@ -1,7 +1,8 @@
-From 61d76d5b2a0a9c517fc2b62c03b646b96c1d4db6 Mon Sep 17 00:00:00 2001
+From c6698a5feb07fc0cda89a54a0ee4006295ac6dbe Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:53 +0100
-Subject: [PATCH 013/144] aarch64: Fix transitive closure of features
+Subject: [PATCH 017/157] [Backport][SME] aarch64: Fix transitive closure of
+ features
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=b754d32d3053a4ba2a82361ac0f2739797a811f1
 
@@ -149,5 +150,5 @@ index 000000000..0e6461fa4
 +#error Foo
 +#endif
 -- 
-2.19.1
+2.33.0
 
diff --git a/0116-LoongArch-Disable-explicit-reloc-for-TLS-LD-GD-with-.patch b/0116-LoongArch-Disable-explicit-reloc-for-TLS-LD-GD-with-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9720e9d02771f44b046fc925c50501e7e83fdd6a
--- /dev/null
+++ b/0116-LoongArch-Disable-explicit-reloc-for-TLS-LD-GD-with-.patch
@@ -0,0 +1,70 @@
+From c0b63b89a03c11bf6383f0175b60614d73295463 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Mon, 22 Jan 2024 18:07:42 +0800
+Subject: [PATCH 116/188] LoongArch: Disable explicit reloc for TLS LD/GD with
+ -mexplicit-relocs=auto
+
+Binutils 2.42 supports TLS LD/GD relaxation which requires the assembler
+macro.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_explicit_relocs_p):
+	If la_opt_explicit_relocs is EXPLICIT_RELOCS_AUTO, return false
+	for SYMBOL_TLS_LDM and SYMBOL_TLS_GD.
+	(loongarch_call_tls_get_addr): Do not split symbols of
+	SYMBOL_TLS_LDM or SYMBOL_TLS_GD if la_opt_explicit_relocs is
+	EXPLICIT_RELOCS_AUTO.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c: Check
+	for la.tls.ld and la.tls.gd.
+---
+ gcc/config/loongarch/loongarch.cc                      | 10 +++++-----
+ .../loongarch/explicit-relocs-auto-tls-ld-gd.c         |  3 ++-
+ 2 files changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 7da00c132..5f22b9dd8 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -1967,11 +1967,11 @@ loongarch_explicit_relocs_p (enum loongarch_symbol_type type)
+     {
+       case SYMBOL_TLS_IE:
+       case SYMBOL_TLS_LE:
+-      case SYMBOL_TLSGD:
+-      case SYMBOL_TLSLDM:
+       case SYMBOL_PCREL64:
+-	/* The linker don't know how to relax TLS accesses or 64-bit
+-	   pc-relative accesses.  */
++	/* TLS IE cannot be relaxed.  TLS LE relaxation is different from
++	   the normal R_LARCH_RELAX-based relaxation and it **requires**
++	   using the explicit %le_{lo12,hi20,add}_r relocs.  The linker
++	   does not relax 64-bit pc-relative accesses as at now.  */
+ 	return true;
+       case SYMBOL_GOT_DISP:
+ 	/* The linker don't know how to relax GOT accesses in extreme
+@@ -2785,7 +2785,7 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 
+   start_sequence ();
+ 
+-  if (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE)
++  if (la_opt_explicit_relocs == EXPLICIT_RELOCS_ALWAYS)
+     {
+       /* Split tls symbol to high and low.  */
+       rtx high = gen_rtx_HIGH (Pmode, copy_rtx (loc));
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c
+index 957ff98df..ca55fcfc5 100644
+--- a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c
+@@ -6,4 +6,5 @@ extern __thread int b __attribute__((visibility("default")));
+ 
+ int test() { return a + b; }
+ 
+-/* { dg-final { scan-assembler-not "la.tls" { target tls_native } } } */
++/* { dg-final { scan-assembler "la\\.tls\\.ld" { target tls_native } } } */
++/* { dg-final { scan-assembler "la\\.tls\\.gd" { target tls_native } } } */
+-- 
+2.43.0
+
diff --git a/SME-0014-aarch64-Reorder-an-entry-in-aarch64-option-extension.patch b/0117-Backport-SME-aarch64-Reorder-an-entry-in-aarch64-opt.patch
similarity index 98%
rename from SME-0014-aarch64-Reorder-an-entry-in-aarch64-option-extension.patch
rename to 0117-Backport-SME-aarch64-Reorder-an-entry-in-aarch64-opt.patch
index c4ffbdcf2c629eb7e3f9065a05d7faa98d83af29..f4ef8449df655b8d623883c282250e8e246d5662 100644
--- a/SME-0014-aarch64-Reorder-an-entry-in-aarch64-option-extension.patch
+++ b/0117-Backport-SME-aarch64-Reorder-an-entry-in-aarch64-opt.patch
@@ -1,7 +1,7 @@
-From 76928293f634dfdf96ab623a8b22321cb90af24a Mon Sep 17 00:00:00 2001
+From 4a2d0bdf5c9a5f4ee615c1d0768cb2e8a3dfef4a Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:54 +0100
-Subject: [PATCH 014/144] aarch64: Reorder an entry in
+Subject: [PATCH 018/157] [Backport][SME] aarch64: Reorder an entry in
  aarch64-option-extensions.def
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=c067c474f85b1e9c56fb34dd51ef0eec9221b766
@@ -190,5 +190,5 @@ index 15514bfe9..5370e02e1 100644
 \ No newline at end of file
 +   grouping is kept. */
 -- 
-2.19.1
+2.33.0
 
diff --git a/0117-LoongArch-testsuite-Disable-stack-protector-for-got-.patch b/0117-LoongArch-testsuite-Disable-stack-protector-for-got-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..85f81c7c930df657fcda9288a13a87fa274e7948
--- /dev/null
+++ b/0117-LoongArch-testsuite-Disable-stack-protector-for-got-.patch
@@ -0,0 +1,35 @@
+From 7e10f7b95a598e9471bd1bc77454af43a69eb506 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 23 Jan 2024 19:32:38 +0800
+Subject: [PATCH 117/188] LoongArch: testsuite: Disable stack protector for
+ got-load.C
+
+When building GCC with --enable-default-ssp, the stack protector is
+enabled for got-load.C, causing additional GOT loads for
+__stack_chk_guard.  So mem/u will be matched more than 2 times and the
+test will fail.
+
+Disable stack protector to fix this issue.
+
+gcc/testsuite:
+
+	* g++.target/loongarch/got-load.C (dg-options): Add
+	-fno-stack-protector.
+---
+ gcc/testsuite/g++.target/loongarch/got-load.C | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/testsuite/g++.target/loongarch/got-load.C b/gcc/testsuite/g++.target/loongarch/got-load.C
+index 20924c739..17870176a 100644
+--- a/gcc/testsuite/g++.target/loongarch/got-load.C
++++ b/gcc/testsuite/g++.target/loongarch/got-load.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O2 -mexplicit-relocs -mcmodel=normal -fdump-rtl-expand" } */
++/* { dg-options "-mabi=lp64d -O2 -mexplicit-relocs -mcmodel=normal -fdump-rtl-expand -fno-stack-protector" } */
+ /* { dg-final { scan-rtl-dump-times "mem/u" 2 "expand" } } */
+ 
+ #include 
+-- 
+2.43.0
+
diff --git a/SME-0015-aarch64-Simplify-feature-definitions.patch b/0118-Backport-SME-aarch64-Simplify-feature-definitions.patch
similarity index 99%
rename from SME-0015-aarch64-Simplify-feature-definitions.patch
rename to 0118-Backport-SME-aarch64-Simplify-feature-definitions.patch
index 80c467e5e5ede52f4f0afcdaf2c3ec02efc2f4d7..ffff47d605d117472f61b0220f0dee01d142de8a 100644
--- a/SME-0015-aarch64-Simplify-feature-definitions.patch
+++ b/0118-Backport-SME-aarch64-Simplify-feature-definitions.patch
@@ -1,7 +1,7 @@
-From 3687c4828ca71698a5d2d786f9554dc4b789c58d Mon Sep 17 00:00:00 2001
+From deb18d5083d8f9edbdafac184c010a6720dc8dda Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:54 +0100
-Subject: [PATCH 015/144] aarch64: Simplify feature definitions
+Subject: [PATCH 019/157] [Backport][SME] aarch64: Simplify feature definitions
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=11a113d501ff64fa4843e28d0a21b3f4e9d0d3de
 
@@ -106,7 +106,7 @@ gcc/
  create mode 100644 gcc/config/aarch64/aarch64-feature-deps.h
 
 diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc
-index 1739555de..2e9100611 100644
+index a965ac660..74729bb30 100644
 --- a/gcc/common/config/aarch64/aarch64-common.cc
 +++ b/gcc/common/config/aarch64/aarch64-common.cc
 @@ -30,6 +30,7 @@
@@ -117,7 +117,7 @@ index 1739555de..2e9100611 100644
  
  #ifdef  TARGET_BIG_ENDIAN_DEFAULT
  #undef  TARGET_DEFAULT_TARGET_FLAGS
-@@ -138,9 +139,12 @@ struct aarch64_option_extension
+@@ -214,9 +215,12 @@ struct aarch64_option_extension
  /* ISA extensions in AArch64.  */
  static const struct aarch64_option_extension all_extensions[] =
  {
@@ -133,7 +133,7 @@ index 1739555de..2e9100611 100644
  #include "config/aarch64/aarch64-option-extensions.def"
    {NULL, 0, 0, 0, false}
  };
-@@ -149,9 +153,12 @@ static const struct aarch64_option_extension all_extensions[] =
+@@ -225,9 +229,12 @@ static const struct aarch64_option_extension all_extensions[] =
     bits and extension turned on.  Cached for efficiency.  */
  static struct aarch64_option_extension all_extensions_by_on[] =
  {
@@ -149,7 +149,7 @@ index 1739555de..2e9100611 100644
  #include "config/aarch64/aarch64-option-extensions.def"
    {NULL, 0, 0, 0, false}
  };
-@@ -174,18 +181,18 @@ struct arch_to_arch_name
+@@ -250,18 +257,18 @@ struct arch_to_arch_name
     the default set of architectural feature flags they support.  */
  static const struct processor_name_to_arch all_cores[] =
  {
@@ -914,7 +914,7 @@ index 12d9beee4..c06e99339 100644
    };
  
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index fafffcf6b..118e14a21 100644
+index f975aad07..1363873b1 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
 @@ -81,6 +81,7 @@
@@ -925,7 +925,7 @@ index fafffcf6b..118e14a21 100644
  
  /* This file should be included last.  */
  #include "target-def.h"
-@@ -2718,8 +2719,9 @@ struct processor
+@@ -2935,8 +2936,9 @@ struct processor
  /* Architectures implementing AArch64.  */
  static const struct processor all_architectures[] =
  {
@@ -937,7 +937,7 @@ index fafffcf6b..118e14a21 100644
  #include "aarch64-arches.def"
    {NULL, aarch64_none, aarch64_none, aarch64_no_arch, 0, NULL}
  };
-@@ -2727,12 +2729,12 @@ static const struct processor all_architectures[] =
+@@ -2944,12 +2946,12 @@ static const struct processor all_architectures[] =
  /* Processor cores implementing AArch64.  */
  static const struct processor all_cores[] =
  {
@@ -1172,5 +1172,5 @@ index 97690de62..ddfc9451f 100644
  static struct aarch64_arch_driver_info aarch64_arches[] =
  {
 -- 
-2.19.1
+2.33.0
 
diff --git a/0118-LoongArch-Disable-TLS-type-symbols-from-generating-n.patch b/0118-LoongArch-Disable-TLS-type-symbols-from-generating-n.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b45cfff82d5a7ca3e59cac9a3d066b7a8d4b6e58
--- /dev/null
+++ b/0118-LoongArch-Disable-TLS-type-symbols-from-generating-n.patch
@@ -0,0 +1,65 @@
+From 8cab312a44efc4711fb7adad65f1314349295591 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Tue, 23 Jan 2024 11:28:09 +0800
+Subject: [PATCH 118/188] LoongArch: Disable TLS type symbols from generating
+ non-zero offsets.
+
+TLS gd ld and ie type symbols will generate corresponding GOT entries,
+so non-zero offsets cannot be generated.
+The address of TLS le type symbol+addend is not implemented in binutils,
+so non-zero offset is not generated here for the time being.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_symbolic_constant_p):
+	For symbols of type tls, non-zero Offset is not generated.
+---
+ gcc/config/loongarch/loongarch.cc | 18 +++++++++---------
+ 1 file changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 5f22b9dd8..9cdd4ed15 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -1921,11 +1921,7 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type)
+       x = UNSPEC_ADDRESS (x);
+     }
+   else if (SYMBOL_REF_P (x) || LABEL_REF_P (x))
+-    {
+-      *symbol_type = loongarch_classify_symbol (x);
+-      if (*symbol_type == SYMBOL_TLS)
+-	return true;
+-    }
++    *symbol_type = loongarch_classify_symbol (x);
+   else
+     return false;
+ 
+@@ -1936,17 +1932,21 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type)
+      relocations.  */
+   switch (*symbol_type)
+     {
+-    case SYMBOL_TLS_IE:
+-    case SYMBOL_TLS_LE:
+-    case SYMBOL_TLSGD:
+-    case SYMBOL_TLSLDM:
+     case SYMBOL_PCREL:
+     case SYMBOL_PCREL64:
+       /* GAS rejects offsets outside the range [-2^31, 2^31-1].  */
+       return sext_hwi (INTVAL (offset), 32) == INTVAL (offset);
+ 
++    /* The following symbol types do not allow non-zero offsets.  */
+     case SYMBOL_GOT_DISP:
++    case SYMBOL_TLS_IE:
++    case SYMBOL_TLSGD:
++    case SYMBOL_TLSLDM:
+     case SYMBOL_TLS:
++    /* From an implementation perspective, tls_le symbols are allowed to
++       have non-zero offsets, but currently binutils has not added support,
++       so the generation of non-zero offsets is prohibited here.  */
++    case SYMBOL_TLS_LE:
+       return false;
+     }
+   gcc_unreachable ();
+-- 
+2.43.0
+
diff --git a/SME-0016-aarch64-Simplify-generation-of-.arch-strings.patch b/0119-Backport-SME-aarch64-Simplify-generation-of-.arch-st.patch
similarity index 97%
rename from SME-0016-aarch64-Simplify-generation-of-.arch-strings.patch
rename to 0119-Backport-SME-aarch64-Simplify-generation-of-.arch-st.patch
index 9e830e2777b8e78d77e0b5e2c8d3cfc1be3e92c9..fb3f7a8d1bd71c8338fd24822745719104daa8e7 100644
--- a/SME-0016-aarch64-Simplify-generation-of-.arch-strings.patch
+++ b/0119-Backport-SME-aarch64-Simplify-generation-of-.arch-st.patch
@@ -1,7 +1,8 @@
-From e26667263ab4890be69f44d1ff0e734583ee4ea5 Mon Sep 17 00:00:00 2001
+From e7ebc54e809e8647ff054a02fbaf946b41414004 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:55 +0100
-Subject: [PATCH 016/144] aarch64: Simplify generation of .arch strings
+Subject: [PATCH 020/157] [Backport][SME] aarch64: Simplify generation of .arch
+ strings
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=4ebf56f283ae5a98ae4c43079b7e8459945ef18d
 
@@ -76,7 +77,7 @@ gcc/testsuite/
  9 files changed, 55 insertions(+), 205 deletions(-)
 
 diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc
-index 2e9100611..85fb5f26d 100644
+index 74729bb30..057dc094d 100644
 --- a/gcc/common/config/aarch64/aarch64-common.cc
 +++ b/gcc/common/config/aarch64/aarch64-common.cc
 @@ -42,8 +42,6 @@
@@ -86,9 +87,9 @@ index 2e9100611..85fb5f26d 100644
 -#undef TARGET_OPTION_INIT_STRUCT
 -#define TARGET_OPTION_INIT_STRUCT aarch64_option_init_struct
  
- /* Set default optimization options.  */
- static const struct default_options aarch_option_optimization_table[] =
-@@ -133,7 +131,6 @@ struct aarch64_option_extension
+ #define INVALID_IMP ((unsigned) -1)
+ 
+@@ -209,7 +207,6 @@ struct aarch64_option_extension
    const uint64_t flag_canonical;
    const uint64_t flags_on;
    const uint64_t flags_off;
@@ -96,7 +97,7 @@ index 2e9100611..85fb5f26d 100644
  };
  
  /* ISA extensions in AArch64.  */
-@@ -143,24 +140,9 @@ static const struct aarch64_option_extension all_extensions[] =
+@@ -219,24 +216,9 @@ static const struct aarch64_option_extension all_extensions[] =
    {NAME, AARCH64_FL_##IDENT, \
     feature_deps::IDENT ().explicit_on & ~AARCH64_FL_##IDENT, \
     feature_deps::get_flags_off (feature_deps::root_off_##IDENT) \
@@ -123,7 +124,7 @@ index 2e9100611..85fb5f26d 100644
  };
  
  struct processor_name_to_arch
-@@ -277,79 +259,6 @@ aarch64_get_all_extension_candidates (auto_vec *candidates)
+@@ -353,79 +335,6 @@ aarch64_get_all_extension_candidates (auto_vec *candidates)
      candidates->safe_push (opt->name);
  }
  
@@ -203,7 +204,7 @@ index 2e9100611..85fb5f26d 100644
  /* Return a string representation of ISA_FLAGS.  DEFAULT_ARCH_FLAGS
     gives the default set of flags which are implied by whatever -march
     we'd put out.  Our job is to figure out the minimal set of "+" and
-@@ -360,118 +269,59 @@ std::string
+@@ -436,118 +345,59 @@ std::string
  aarch64_get_extension_string_for_isa_flags (uint64_t isa_flags,
  					    uint64_t default_arch_flags)
  {
@@ -462,5 +463,5 @@ index 108b372e4..069a00108 100644
 -/* { dg-final { scan-assembler-times "\\.arch armv8-a\\+nofp\\+nosimd\n" 1 } } */
 +/* { dg-final { scan-assembler-times "\\.arch armv8-a\\+nofp\n" 1 } } */
 -- 
-2.19.1
+2.33.0
 
diff --git a/0119-LoongArch-Remove-vec_concatz-mode-pattern.patch b/0119-LoongArch-Remove-vec_concatz-mode-pattern.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f9df3a68d01b12f5f84b5bb56672524fdfce1a42
--- /dev/null
+++ b/0119-LoongArch-Remove-vec_concatz-mode-pattern.patch
@@ -0,0 +1,75 @@
+From e19c5ba24839d7446f1874b0b33bd61e27e36905 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Wed, 24 Jan 2024 17:19:13 +0800
+Subject: [PATCH 119/188] LoongArch: Remove vec_concatz pattern.
+
+It is incorrect to use vld/vori to implement the vec_concatz because when the LSX
+instruction is used to update the value of the vector register, the upper 128 bits of
+the vector register will not be zeroed.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md (@vec_concatz): Remove this define_insn pattern.
+	* config/loongarch/loongarch.cc (loongarch_expand_vector_group_init): Use vec_concat.
+---
+ gcc/config/loongarch/lasx.md      | 15 ---------------
+ gcc/config/loongarch/loongarch.cc | 17 ++++++-----------
+ 2 files changed, 6 insertions(+), 26 deletions(-)
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index fdfd65e4a..a5128c30c 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -582,21 +582,6 @@
+   [(set_attr "type" "simd_insert")
+    (set_attr "mode" "")])
+ 
+-(define_insn "@vec_concatz"
+-  [(set (match_operand:LASX 0 "register_operand" "=f")
+-    (vec_concat:LASX
+-      (match_operand: 1 "nonimmediate_operand")
+-      (match_operand: 2 "const_0_operand")))]
+-  "ISA_HAS_LASX"
+-{
+-  if (MEM_P (operands[1]))
+-    return "vld\t%w0,%1";
+-  else
+-    return "vori.b\t%w0,%w1,0";
+-}
+-  [(set_attr "type" "simd_splat")
+-   (set_attr "mode" "")])
+-
+ (define_insn "vec_concat"
+   [(set (match_operand:LASX 0 "register_operand" "=f")
+ 	(vec_concat:LASX
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 9cdd4ed15..9bd931549 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -9912,17 +9912,12 @@ loongarch_expand_vector_group_init (rtx target, rtx vals)
+       gcc_unreachable ();
+     }
+ 
+-  if (high == CONST0_RTX (half_mode))
+-    emit_insn (gen_vec_concatz (vmode, target, low, high));
+-  else
+-    {
+-      if (!register_operand (low, half_mode))
+-	low = force_reg (half_mode, low);
+-      if (!register_operand (high, half_mode))
+-	high = force_reg (half_mode, high);
+-      emit_insn (gen_rtx_SET (target,
+-			      gen_rtx_VEC_CONCAT (vmode, low, high)));
+-    }
++  if (!register_operand (low, half_mode))
++    low = force_reg (half_mode, low);
++  if (!register_operand (high, half_mode))
++    high = force_reg (half_mode, high);
++  emit_insn (gen_rtx_SET (target,
++			  gen_rtx_VEC_CONCAT (vmode, low, high)));
+ }
+ 
+ /* Expand initialization of a vector which has all same elements.  */
+-- 
+2.43.0
+
diff --git a/SME-0017-aarch64-Avoid-std-string-in-static-data.patch b/0120-Backport-SME-aarch64-Avoid-std-string-in-static-data.patch
similarity index 78%
rename from SME-0017-aarch64-Avoid-std-string-in-static-data.patch
rename to 0120-Backport-SME-aarch64-Avoid-std-string-in-static-data.patch
index 2256ccfc6a70d19af435cc9e6dff566db699b153..619342b8e2ce45ce62437f018c9715836ffe1dd0 100644
--- a/SME-0017-aarch64-Avoid-std-string-in-static-data.patch
+++ b/0120-Backport-SME-aarch64-Avoid-std-string-in-static-data.patch
@@ -1,7 +1,8 @@
-From c5975ec839a8ed01dbe16990ad22222466060b2e Mon Sep 17 00:00:00 2001
+From 7096be1673a10da5218a8620fb40b4b26e61c1d4 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:55 +0100
-Subject: [PATCH 017/144] aarch64: Avoid std::string in static data
+Subject: [PATCH 021/157] [Backport][SME] aarch64: Avoid std::string in static
+ data
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=13af9e9fda391f4f0566ad8f0b4d0448a7e984d0
 
@@ -16,10 +17,10 @@ gcc/
  1 file changed, 2 insertions(+), 2 deletions(-)
 
 diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc
-index 85fb5f26d..8760e0920 100644
+index 057dc094d..2bdf51b8b 100644
 --- a/gcc/common/config/aarch64/aarch64-common.cc
 +++ b/gcc/common/config/aarch64/aarch64-common.cc
-@@ -147,7 +147,7 @@ static const struct aarch64_option_extension all_extensions[] =
+@@ -223,7 +223,7 @@ static const struct aarch64_option_extension all_extensions[] =
  
  struct processor_name_to_arch
  {
@@ -28,7 +29,7 @@ index 85fb5f26d..8760e0920 100644
    const enum aarch64_arch arch;
    const uint64_t flags;
  };
-@@ -155,7 +155,7 @@ struct processor_name_to_arch
+@@ -231,7 +231,7 @@ struct processor_name_to_arch
  struct arch_to_arch_name
  {
    const enum aarch64_arch arch;
@@ -38,5 +39,5 @@ index 85fb5f26d..8760e0920 100644
  };
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0120-LoongArch-Optimize-implementation-of-single-precisio.patch b/0120-LoongArch-Optimize-implementation-of-single-precisio.patch
new file mode 100644
index 0000000000000000000000000000000000000000..35c09d8ccd4fa1e1ad4c7c9fb06e4d23db3f7528
--- /dev/null
+++ b/0120-LoongArch-Optimize-implementation-of-single-precisio.patch
@@ -0,0 +1,107 @@
+From cb9180ef1fb7e7b97a60adc3d3908b9684771cd8 Mon Sep 17 00:00:00 2001
+From: Li Wei 
+Date: Wed, 24 Jan 2024 17:44:17 +0800
+Subject: [PATCH 120/188] LoongArch: Optimize implementation of
+ single-precision floating-point approximate division.
+
+We found that in the spec17 521.wrf program, some loop invariant code generated
+from single-precision floating-point approximate division calculation failed to
+propose a loop. This is because the pseudo-register that stores the
+intermediate temporary calculation results is rewritten in the implementation
+of single-precision floating-point approximate division, failing to propose
+invariants in the loop2_invariant pass. To this end, the intermediate temporary
+calculation results are stored in new pseudo-registers without destroying the
+read-write dependency, so that they could be recognized as loop invariants in
+the loop2_invariant pass.
+After optimization, the number of instructions of 521.wrf is reduced by 0.18%
+compared with before optimization (1716612948501 -> 1713471771364).
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_emit_swdivsf): Adjust.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/invariant-recip.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc             | 19 +++++++----
+ .../gcc.target/loongarch/invariant-recip.c    | 33 +++++++++++++++++++
+ 2 files changed, 46 insertions(+), 6 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/invariant-recip.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 9bd931549..5877b0acf 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -10842,16 +10842,23 @@ void loongarch_emit_swdivsf (rtx res, rtx a, rtx b, machine_mode mode)
+   /* x0 = 1./b estimate.  */
+   emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, b),
+ 					      unspec)));
+-  /* 2.0 - b * x0  */
++  /* e0 = 2.0 - b * x0.  */
+   emit_insn (gen_rtx_SET (e0, gen_rtx_FMA (mode,
+ 					   gen_rtx_NEG (mode, b), x0, mtwo)));
+ 
+-  /* x0 = a * x0  */
+   if (a != CONST1_RTX (mode))
+-    emit_insn (gen_rtx_SET (x0, gen_rtx_MULT (mode, a, x0)));
+-
+-  /* res = e0 * x0  */
+-  emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, e0, x0)));
++    {
++      rtx e1 = gen_reg_rtx (mode);
++      /* e1 = a * x0.  */
++      emit_insn (gen_rtx_SET (e1, gen_rtx_MULT (mode, a, x0)));
++      /* res = e0 * e1.  */
++      emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, e0, e1)));
++    }
++  else
++    {
++      /* res = e0 * x0.  */
++      emit_insn (gen_rtx_SET (res, gen_rtx_MULT (mode, e0, x0)));
++    }
+ }
+ 
+ static bool
+diff --git a/gcc/testsuite/gcc.target/loongarch/invariant-recip.c b/gcc/testsuite/gcc.target/loongarch/invariant-recip.c
+new file mode 100644
+index 000000000..2f64f6ed5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/invariant-recip.c
+@@ -0,0 +1,33 @@
++/* { dg-do compile } */
++/* { dg-options "-Ofast -march=loongarch64 -mabi=lp64d -mrecip -mfrecipe -fdump-rtl-loop2_invariant " } */
++/* { dg-final { scan-rtl-dump "Decided to move dependent invariant" "loop2_invariant" } } */
++
++void
++nislfv_rain_plm (int im, int km, float dzl[im][km], float rql[im][km],
++                 float dt)
++{
++  int i, k;
++  float con1, decfl;
++  float dz[km], qn[km], wi[km + 1];
++
++  for (i = 0; i < im; i++)
++    {
++      for (k = 0; k < km; k++)
++        {
++          dz[k] = dzl[i][k];
++        }
++      con1 = 0.05;
++      for (k = km - 1; k >= 0; k--)
++        {
++          decfl = (wi[k + 1] - wi[k]) * dt / dz[k];
++          if (decfl > con1)
++            {
++              wi[k] = wi[k + 1] - con1 * dz[k] / dt;
++            }
++        }
++      for (k = 0; k < km; k++)
++        {
++          rql[i][k] = qn[k];
++        }
++    }
++}
+-- 
+2.43.0
+
diff --git a/SME-0018-aarch64-Tweak-constness-of-option-related-data.patch b/0121-Backport-SME-aarch64-Tweak-constness-of-option-relat.patch
similarity index 93%
rename from SME-0018-aarch64-Tweak-constness-of-option-related-data.patch
rename to 0121-Backport-SME-aarch64-Tweak-constness-of-option-relat.patch
index ea3a84de42f5c978b0fe8effffefe6307a489897..4ababc222daef55a94136f466660811cdf2cf2e3 100644
--- a/SME-0018-aarch64-Tweak-constness-of-option-related-data.patch
+++ b/0121-Backport-SME-aarch64-Tweak-constness-of-option-relat.patch
@@ -1,7 +1,8 @@
-From c368d02ecf6ff92a782abbf1c175482123aa9246 Mon Sep 17 00:00:00 2001
+From 99c5eb58e898417632b6d9a7b2b3d288b50e9b65 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:55 +0100
-Subject: [PATCH 018/144] aarch64: Tweak constness of option-related data
+Subject: [PATCH 022/157] [Backport][SME] aarch64: Tweak constness of
+ option-related data
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=60dee638c8a7ae59c033868de7e7638c88b38ed2
 
@@ -33,10 +34,10 @@ gcc/
  3 files changed, 27 insertions(+), 28 deletions(-)
 
 diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc
-index 8760e0920..918ac844d 100644
+index 2bdf51b8b..ac3486d71 100644
 --- a/gcc/common/config/aarch64/aarch64-common.cc
 +++ b/gcc/common/config/aarch64/aarch64-common.cc
-@@ -127,14 +127,14 @@ aarch64_handle_option (struct gcc_options *opts,
+@@ -203,14 +203,14 @@ aarch64_handle_option (struct gcc_options *opts,
  /* An ISA extension in the co-processor and main instruction set space.  */
  struct aarch64_option_extension
  {
@@ -56,7 +57,7 @@ index 8760e0920..918ac844d 100644
  {
  #define AARCH64_OPT_EXTENSION(NAME, IDENT, C, D, E, F) \
    {NAME, AARCH64_FL_##IDENT, \
-@@ -147,21 +147,21 @@ static const struct aarch64_option_extension all_extensions[] =
+@@ -223,21 +223,21 @@ static const struct aarch64_option_extension all_extensions[] =
  
  struct processor_name_to_arch
  {
@@ -85,7 +86,7 @@ index 8760e0920..918ac844d 100644
  {
  #define AARCH64_CORE(NAME, CORE_IDENT, C, ARCH_IDENT, E, F, G, H, I) \
    {NAME, AARCH64_ARCH_##ARCH_IDENT, feature_deps::cpu_##CORE_IDENT},
-@@ -171,7 +171,7 @@ static const struct processor_name_to_arch all_cores[] =
+@@ -247,7 +247,7 @@ static const struct processor_name_to_arch all_cores[] =
  };
  
  /* Map architecture revisions to their string representation.  */
@@ -95,10 +96,10 @@ index 8760e0920..918ac844d 100644
  #define AARCH64_ARCH(NAME, B, ARCH_IDENT, D, E)	\
    {AARCH64_ARCH_##ARCH_IDENT, NAME, feature_deps::ARCH_IDENT ().enable},
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 118e14a21..db89765c9 100644
+index 1363873b1..71db7ace1 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -2708,16 +2708,16 @@ aarch64_tuning_override_functions[] =
+@@ -2925,16 +2925,16 @@ aarch64_tuning_override_functions[] =
  /* A processor implementing AArch64.  */
  struct processor
  {
@@ -190,5 +191,5 @@ index ddfc9451f..ee9cb65a5 100644
  	  /* We got some arch indentifier that's not in aarch64-arches.def?  */
  	  if (!arch_info)
 -- 
-2.19.1
+2.33.0
 
diff --git a/0121-LoongArch-Define-LOGICAL_OP_NON_SHORT_CIRCUIT.patch b/0121-LoongArch-Define-LOGICAL_OP_NON_SHORT_CIRCUIT.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2d14acb43dcdc6d00d847e9156c2abfd16ec72b9
--- /dev/null
+++ b/0121-LoongArch-Define-LOGICAL_OP_NON_SHORT_CIRCUIT.patch
@@ -0,0 +1,71 @@
+From a2baa4807fdfd381c543eb7ea85edf14dc6c8a20 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Tue, 16 Jan 2024 10:32:31 +0800
+Subject: [PATCH 121/188] LoongArch: Define LOGICAL_OP_NON_SHORT_CIRCUIT
+
+Define LOGICAL_OP_NON_SHORT_CIRCUIT as 0, for a short-circuit branch, use the
+short-circuit operation instead of the non-short-circuit operation.
+
+SPEC2017 performance evaluation shows 1% performance improvement for fprate
+GEOMEAN and no obvious regression for others. Especially, 526.blender_r +10.6%
+on 3A6000.
+
+This modification will introduce the following FAIL items:
+
+FAIL: gcc.dg/tree-ssa/copy-headers-8.c scan-tree-dump-times ch2 "Conditional combines static and invariant" 1
+FAIL: gcc.dg/tree-ssa/copy-headers-8.c scan-tree-dump-times ch2 "Will duplicate bb" 2
+FAIL: gcc.dg/tree-ssa/update-threading.c scan-tree-dump-times optimized "Invalid sum" 0
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.h (LOGICAL_OP_NON_SHORT_CIRCUIT): Define.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/short-circuit.c: New test.
+---
+ gcc/config/loongarch/loongarch.h              |  1 +
+ .../gcc.target/loongarch/short-circuit.c      | 19 +++++++++++++++++++
+ 2 files changed, 20 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/short-circuit.c
+
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index f54b078b1..15261fdc0 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -869,6 +869,7 @@ typedef struct {
+    1 is the default; other values are interpreted relative to that.  */
+ 
+ #define BRANCH_COST(speed_p, predictable_p) la_branch_cost
++#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
+ 
+ /* Return the asm template for a conditional branch instruction.
+    OPCODE is the opcode's mnemonic and OPERANDS is the asm template for
+diff --git a/gcc/testsuite/gcc.target/loongarch/short-circuit.c b/gcc/testsuite/gcc.target/loongarch/short-circuit.c
+new file mode 100644
+index 000000000..bed585ee1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/short-circuit.c
+@@ -0,0 +1,19 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -fdump-tree-gimple" } */
++
++int
++short_circuit (float *a)
++{
++  float t1x = a[0];
++  float t2x = a[1];
++  float t1y = a[2];
++  float t2y = a[3];
++  float t1z = a[4];
++  float t2z = a[5];
++
++  if (t1x > t2y  || t2x < t1y  || t1x > t2z || t2x < t1z || t1y > t2z || t2y < t1z)
++    return 0;
++
++  return 1;
++}
++/* { dg-final { scan-tree-dump-times "if" 6 "gimple" } } */
+-- 
+2.43.0
+
diff --git a/SME-0019-aarch64-Make-more-use-of-aarch64_feature_flags.patch b/0122-Backport-SME-aarch64-Make-more-use-of-aarch64_featur.patch
similarity index 91%
rename from SME-0019-aarch64-Make-more-use-of-aarch64_feature_flags.patch
rename to 0122-Backport-SME-aarch64-Make-more-use-of-aarch64_featur.patch
index b453b11983b5b8471981904b64506cf6f615d4fd..31f8b7a8d7e698cc02eaea2fe4cb3b0ade8a4d96 100644
--- a/SME-0019-aarch64-Make-more-use-of-aarch64_feature_flags.patch
+++ b/0122-Backport-SME-aarch64-Make-more-use-of-aarch64_featur.patch
@@ -1,7 +1,8 @@
-From 3b5f17e8cf3ccae6a6d6243a370aede6b43f954c Mon Sep 17 00:00:00 2001
+From bdb91009cf250fb22c21ae7f5072263492f2b08c Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:56 +0100
-Subject: [PATCH 019/144] aarch64: Make more use of aarch64_feature_flags
+Subject: [PATCH 023/157] [Backport][SME] aarch64: Make more use of
+ aarch64_feature_flags
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=fed55a60e5b230bc159617f26e33611073c672fd
 
@@ -29,10 +30,10 @@ gcc/
  7 files changed, 56 insertions(+), 47 deletions(-)
 
 diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc
-index 918ac844d..bebcfd4c9 100644
+index ac3486d71..3efa57b26 100644
 --- a/gcc/common/config/aarch64/aarch64-common.cc
 +++ b/gcc/common/config/aarch64/aarch64-common.cc
-@@ -128,9 +128,9 @@ aarch64_handle_option (struct gcc_options *opts,
+@@ -204,9 +204,9 @@ aarch64_handle_option (struct gcc_options *opts,
  struct aarch64_option_extension
  {
    const char *name;
@@ -45,7 +46,7 @@ index 918ac844d..bebcfd4c9 100644
  };
  
  /* ISA extensions in AArch64.  */
-@@ -149,14 +149,14 @@ struct processor_name_to_arch
+@@ -225,14 +225,14 @@ struct processor_name_to_arch
  {
    const char *processor_name;
    aarch64_arch arch;
@@ -62,7 +63,7 @@ index 918ac844d..bebcfd4c9 100644
  };
  
  /* Map processor names to the architecture revision they implement and
-@@ -186,7 +186,7 @@ static constexpr arch_to_arch_name all_architectures[] =
+@@ -262,7 +262,7 @@ static constexpr arch_to_arch_name all_architectures[] =
     a copy of the string is created and stored to INVALID_EXTENSION.  */
  
  enum aarch64_parse_opt_result
@@ -71,7 +72,7 @@ index 918ac844d..bebcfd4c9 100644
  			 std::string *invalid_extension)
  {
    /* The extension string is parsed left to right.  */
-@@ -266,8 +266,9 @@ aarch64_get_all_extension_candidates (auto_vec *candidates)
+@@ -342,8 +342,9 @@ aarch64_get_all_extension_candidates (auto_vec *candidates)
     that all the "+" flags come before the "+no" flags.  */
  
  std::string
@@ -83,7 +84,7 @@ index 918ac844d..bebcfd4c9 100644
  {
    std::string outstr = "";
  
-@@ -375,7 +376,7 @@ aarch64_rewrite_selected_cpu (const char *name)
+@@ -451,7 +452,7 @@ aarch64_rewrite_selected_cpu (const char *name)
        || a_to_an->arch == aarch64_no_arch)
      fatal_error (input_location, "unknown value %qs for %<-mcpu%>", name);
  
@@ -217,10 +218,10 @@ index 24594d584..63d1db776 100644
    /* The function type to use for functions that are resolved by
       function_resolver.  */
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index db89765c9..c887a187f 100644
+index 71db7ace1..8cb820767 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -2712,7 +2712,7 @@ struct processor
+@@ -2929,7 +2929,7 @@ struct processor
    aarch64_processor ident;
    aarch64_processor sched_core;
    aarch64_arch arch;
@@ -229,7 +230,7 @@ index db89765c9..c887a187f 100644
    const tune_params *tune;
  };
  
-@@ -17196,7 +17196,8 @@ static void initialize_aarch64_code_model (struct gcc_options *);
+@@ -17428,7 +17428,8 @@ static void initialize_aarch64_code_model (struct gcc_options *);
  
  static enum aarch64_parse_opt_result
  aarch64_parse_arch (const char *to_parse, const struct processor **res,
@@ -239,7 +240,7 @@ index db89765c9..c887a187f 100644
  {
    const char *ext;
    const struct processor *arch;
-@@ -17219,7 +17220,7 @@ aarch64_parse_arch (const char *to_parse, const struct processor **res,
+@@ -17451,7 +17452,7 @@ aarch64_parse_arch (const char *to_parse, const struct processor **res,
        if (strlen (arch->name) == len
  	  && strncmp (arch->name, to_parse, len) == 0)
  	{
@@ -248,7 +249,7 @@ index db89765c9..c887a187f 100644
  
  	  if (ext != NULL)
  	    {
-@@ -17251,7 +17252,8 @@ aarch64_parse_arch (const char *to_parse, const struct processor **res,
+@@ -17483,7 +17484,8 @@ aarch64_parse_arch (const char *to_parse, const struct processor **res,
  
  static enum aarch64_parse_opt_result
  aarch64_parse_cpu (const char *to_parse, const struct processor **res,
@@ -258,7 +259,7 @@ index db89765c9..c887a187f 100644
  {
    const char *ext;
    const struct processor *cpu;
-@@ -17273,8 +17275,7 @@ aarch64_parse_cpu (const char *to_parse, const struct processor **res,
+@@ -17505,8 +17507,7 @@ aarch64_parse_cpu (const char *to_parse, const struct processor **res,
      {
        if (strlen (cpu->name) == len && strncmp (cpu->name, to_parse, len) == 0)
  	{
@@ -268,7 +269,7 @@ index db89765c9..c887a187f 100644
  
  	  if (ext != NULL)
  	    {
-@@ -17905,7 +17906,7 @@ aarch64_print_hint_for_extensions (const std::string &str)
+@@ -18137,7 +18138,7 @@ aarch64_print_hint_for_extensions (const std::string &str)
  
  static bool
  aarch64_validate_mcpu (const char *str, const struct processor **res,
@@ -277,7 +278,7 @@ index db89765c9..c887a187f 100644
  {
    std::string invalid_extension;
    enum aarch64_parse_opt_result parse_res
-@@ -18119,7 +18120,7 @@ aarch64_validate_mbranch_protection (const char *const_str)
+@@ -18351,7 +18352,7 @@ aarch64_validate_mbranch_protection (const char *const_str)
  
  static bool
  aarch64_validate_march (const char *str, const struct processor **res,
@@ -286,7 +287,7 @@ index db89765c9..c887a187f 100644
  {
    std::string invalid_extension;
    enum aarch64_parse_opt_result parse_res
-@@ -18209,8 +18210,8 @@ aarch64_convert_sve_vector_bits (aarch64_sve_vector_bits_enum value)
+@@ -18441,8 +18442,8 @@ aarch64_convert_sve_vector_bits (aarch64_sve_vector_bits_enum value)
  static void
  aarch64_override_options (void)
  {
@@ -297,7 +298,7 @@ index db89765c9..c887a187f 100644
    aarch64_isa_flags = 0;
  
    const struct processor *cpu = NULL;
-@@ -18658,7 +18659,7 @@ static bool
+@@ -18890,7 +18891,7 @@ static bool
  aarch64_handle_attr_isa_flags (char *str)
  {
    enum aarch64_parse_opt_result parse_res;
@@ -306,7 +307,7 @@ index db89765c9..c887a187f 100644
  
    /* We allow "+nothing" in the beginning to clear out all architectural
       features if the user wants to handpick specific features.  */
-@@ -18930,7 +18931,7 @@ aarch64_process_target_attr (tree args)
+@@ -19162,7 +19163,7 @@ aarch64_process_target_attr (tree args)
  	{
  	  /* Check if token is possibly an arch extension without
  	     leading '+'.  */
@@ -315,7 +316,7 @@ index db89765c9..c887a187f 100644
  	  auto with_plus = std::string ("+") + token;
  	  enum aarch64_parse_opt_result ext_res
  	    = aarch64_parse_extension (with_plus.c_str (), &isa_temp, nullptr);
-@@ -22539,7 +22540,7 @@ aarch64_declare_function_name (FILE *stream, const char* name,
+@@ -22771,7 +22772,7 @@ aarch64_declare_function_name (FILE *stream, const char* name,
    const struct processor *this_arch
      = aarch64_get_arch (targ_options->x_selected_arch);
  
@@ -324,7 +325,7 @@ index db89765c9..c887a187f 100644
    std::string extension
      = aarch64_get_extension_string_for_isa_flags (isa_flags,
  						  this_arch->flags);
-@@ -22669,7 +22670,7 @@ aarch64_start_file (void)
+@@ -22901,7 +22902,7 @@ aarch64_start_file (void)
  
    const struct processor *default_arch
      = aarch64_get_arch (default_options->x_selected_arch);
@@ -334,7 +335,7 @@ index db89765c9..c887a187f 100644
      = aarch64_get_extension_string_for_isa_flags (default_isa_flags,
  						  default_arch->flags);
 diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
-index d8e1f42a3..f6ae4499a 100644
+index 836a3c784..47ec7824f 100644
 --- a/gcc/config/aarch64/aarch64.opt
 +++ b/gcc/config/aarch64/aarch64.opt
 @@ -28,7 +28,7 @@ TargetVariable
@@ -389,5 +390,5 @@ index ee9cb65a5..2ae47c020 100644
    size_t sep_pos = -1;
    char *fcpu_info;
 -- 
-2.19.1
+2.33.0
 
diff --git a/0122-LoongArch-Split-vec_selects-of-bottom-elements-into-.patch b/0122-LoongArch-Split-vec_selects-of-bottom-elements-into-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..32b646180542b95d782c4192e5aa70fbd1a2744c
--- /dev/null
+++ b/0122-LoongArch-Split-vec_selects-of-bottom-elements-into-.patch
@@ -0,0 +1,84 @@
+From 5cab5d1a9fb9cfaa0d12d229aa0ee19e0dd55cc5 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Tue, 16 Jan 2024 10:23:20 +0800
+Subject: [PATCH 122/188] LoongArch: Split vec_selects of bottom elements into
+ simple move
+
+For below pattern, can be treated as a simple move because floating point
+and vector share a common register on loongarch64.
+
+(set (reg/v:SF 32 $f0 [orig:93 res ] [93])
+      (vec_select:SF (reg:V8SF 32 $f0 [115])
+          (parallel [
+                  (const_int 0 [0])
+              ])))
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md (vec_extract_0):
+	New define_insn_and_split patten.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vect-extract.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  | 15 ++++++++++++++
+ .../gcc.target/loongarch/vect-extract.c       | 20 +++++++++++++++++++
+ 2 files changed, 35 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vect-extract.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index a5128c30c..946811e1a 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -746,6 +746,21 @@
+   DONE;
+ })
+ 
++(define_insn_and_split "vec_extract_0"
++  [(set (match_operand: 0 "register_operand" "=f")
++        (vec_select:
++          (match_operand:FLASX 1 "register_operand" "f")
++          (parallel [(const_int 0)])))]
++  "ISA_HAS_LSX"
++  "#"
++  "&& reload_completed"
++  [(set (match_dup 0) (match_dup 1))]
++{
++  operands[1] = gen_rtx_REG (mode, REGNO (operands[1]));
++}
++  [(set_attr "move_type" "fmove")
++   (set_attr "mode" "")])
++
+ (define_expand "vec_perm"
+  [(match_operand:LASX 0 "register_operand")
+   (match_operand:LASX 1 "register_operand")
+diff --git a/gcc/testsuite/gcc.target/loongarch/vect-extract.c b/gcc/testsuite/gcc.target/loongarch/vect-extract.c
+new file mode 100644
+index 000000000..ce126e3a4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vect-extract.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -ffast-math -mlasx -fno-vect-cost-model -fno-unroll-loops" } */
++/* { dg-final { scan-assembler-not "xvpickve.w" } } */
++/* { dg-final { scan-assembler-not "xvpickve.d" } } */
++
++float
++sum_float (float *a, int n) {
++  float res = 0.0;
++  for (int i = 0; i < n; i++)
++    res += a[i];
++  return res;
++}
++
++double
++sum_double (double *a, int n) {
++  double res = 0.0;
++  for (int i = 0; i < n; i++)
++    res += a[i];
++  return res;
++}
+-- 
+2.43.0
+
diff --git a/SME-0020-aarch64-Tweak-contents-of-flags_on-off-fields.patch b/0123-Backport-SME-aarch64-Tweak-contents-of-flags_on-off-.patch
similarity index 83%
rename from SME-0020-aarch64-Tweak-contents-of-flags_on-off-fields.patch
rename to 0123-Backport-SME-aarch64-Tweak-contents-of-flags_on-off-.patch
index dfd5d62a063d72010e012ab5a790a44ccf0d79fe..ec1c5f684aeeb757bcccf2f4050ea8fbe4a4f007 100644
--- a/SME-0020-aarch64-Tweak-contents-of-flags_on-off-fields.patch
+++ b/0123-Backport-SME-aarch64-Tweak-contents-of-flags_on-off-.patch
@@ -1,7 +1,8 @@
-From eb93652ec668f109b0a6cdf6dd8ddcc56f6c1569 Mon Sep 17 00:00:00 2001
+From eb92c185c1c71edcbd83b1c66fe4f9e7d52a98b3 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:56 +0100
-Subject: [PATCH 020/144] aarch64: Tweak contents of flags_on/off fields
+Subject: [PATCH 024/157] [Backport][SME] aarch64: Tweak contents of
+ flags_on/off fields
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=bb7f43b62a58a0f0326fd3060f0bd43e6f3ef971
 
@@ -18,10 +19,10 @@ gcc/
  1 file changed, 6 insertions(+), 8 deletions(-)
 
 diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc
-index bebcfd4c9..ffa83bb47 100644
+index 3efa57b26..752ba5632 100644
 --- a/gcc/common/config/aarch64/aarch64-common.cc
 +++ b/gcc/common/config/aarch64/aarch64-common.cc
-@@ -137,10 +137,8 @@ struct aarch64_option_extension
+@@ -213,10 +213,8 @@ struct aarch64_option_extension
  static constexpr aarch64_option_extension all_extensions[] =
  {
  #define AARCH64_OPT_EXTENSION(NAME, IDENT, C, D, E, F) \
@@ -34,7 +35,7 @@ index bebcfd4c9..ffa83bb47 100644
  #include "config/aarch64/aarch64-option-extensions.def"
    {NULL, 0, 0, 0}
  };
-@@ -228,9 +226,9 @@ aarch64_parse_extension (const char *str, aarch64_feature_flags *isa_flags,
+@@ -304,9 +302,9 @@ aarch64_parse_extension (const char *str, aarch64_feature_flags *isa_flags,
  	    {
  	      /* Add or remove the extension.  */
  	      if (adding_ext)
@@ -46,7 +47,7 @@ index bebcfd4c9..ffa83bb47 100644
  	      break;
  	    }
  	}
-@@ -304,7 +302,7 @@ aarch64_get_extension_string_for_isa_flags
+@@ -380,7 +378,7 @@ aarch64_get_extension_string_for_isa_flags
  
        if ((flags & isa_flags & (explicit_flags | ~current_flags)) == flags)
  	{
@@ -55,7 +56,7 @@ index bebcfd4c9..ffa83bb47 100644
  	  added |= opt.flag_canonical;
  	}
      }
-@@ -319,7 +317,7 @@ aarch64_get_extension_string_for_isa_flags
+@@ -395,7 +393,7 @@ aarch64_get_extension_string_for_isa_flags
    for (auto &opt : all_extensions)
      if (opt.flag_canonical & current_flags & ~isa_flags)
        {
@@ -65,5 +66,5 @@ index bebcfd4c9..ffa83bb47 100644
  	outstr += opt.name;
        }
 -- 
-2.19.1
+2.33.0
 
diff --git a/0123-LoongArch-Modify-the-address-calculation-logic-for-o.patch b/0123-LoongArch-Modify-the-address-calculation-logic-for-o.patch
new file mode 100644
index 0000000000000000000000000000000000000000..44a0fdda9d8409e3c52480b9a1a3d66c41c5c821
--- /dev/null
+++ b/0123-LoongArch-Modify-the-address-calculation-logic-for-o.patch
@@ -0,0 +1,112 @@
+From c4815d70715bed71b8e89888ef19eb43e9171229 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Tue, 30 Jan 2024 15:02:32 +0800
+Subject: [PATCH 123/188] LoongArch: Modify the address calculation logic for
+ obtaining array element values through fp.
+
+Modify address calculation logic from (((a x C) + fp) + offset) to ((fp + offset) + a x C).
+Thereby modifying the register dependencies and optimizing the code.
+The value of C is 2 4 or 8.
+
+The following is the assembly code before and after a loop modification in spec2006 401.bzip:
+
+                 old                      |                 new
+ 735 .L71:                                |  735 .L71:
+ 736         slli.d  $r12,$r15,2          |  736         slli.d  $r12,$r15,2
+ 737         ldx.w   $r13,$r22,$r12       |  737         ldx.w   $r13,$r22,$r12
+ 738         addi.d  $r15,$r15,-1         |  738         addi.d  $r15,$r15,-1
+ 739         slli.w  $r16,$r15,0          |  739         slli.w  $r16,$r15,0
+ 740         addi.w  $r13,$r13,-1         |  740         addi.w  $r13,$r13,-1
+ 741         slti    $r14,$r13,0          |  741         slti    $r14,$r13,0
+ 742         add.w   $r12,$r26,$r13       |  742         add.w   $r12,$r26,$r13
+ 743         maskeqz $r12,$r12,$r14       |  743         maskeqz $r12,$r12,$r14
+ 744         masknez $r14,$r13,$r14       |  744         masknez $r14,$r13,$r14
+ 745         or      $r12,$r12,$r14       |  745         or      $r12,$r12,$r14
+ 746         ldx.bu  $r14,$r30,$r12       |  746         ldx.bu  $r14,$r30,$r12
+ 747         lu12i.w $r13,4096>>12        |  747         alsl.d  $r14,$r14,$r18,2
+ 748         ori     $r13,$r13,432        |  748         ldptr.w $r13,$r14,0
+ 749         add.d   $r13,$r13,$r3        |  749         addi.w  $r17,$r13,-1
+ 750         alsl.d  $r14,$r14,$r13,2     |  750         stptr.w $r17,$r14,0
+ 751         ldptr.w $r13,$r14,-1968      |  751         slli.d  $r13,$r13,2
+ 752         addi.w  $r17,$r13,-1         |  752         stx.w   $r12,$r22,$r13
+ 753         st.w    $r17,$r14,-1968      |  753         ldptr.w $r12,$r19,0
+ 754         slli.d  $r13,$r13,2          |  754         blt     $r12,$r16,.L71
+ 755         stx.w   $r12,$r22,$r13       |  755         .align  4
+ 756         ldptr.w $r12,$r18,-2048      |  756
+ 757         blt     $r12,$r16,.L71       |  757
+ 758         .align  4                    |  758
+
+This patch is ported from riscv's commit r14-3111.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (mem_shadd_or_shadd_rtx_p): New function.
+	(loongarch_legitimize_address): Add logical transformation code.
+---
+ gcc/config/loongarch/loongarch.cc | 43 +++++++++++++++++++++++++++++++
+ 1 file changed, 43 insertions(+)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 5877b0acf..612a9c138 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -3215,6 +3215,22 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
+   return true;
+ }
+ 
++/* Helper loongarch_legitimize_address.  Given X, return true if it
++   is a left shift by 1, 2 or 3 positions or a multiply by 2, 4 or 8.
++
++   This respectively represent canonical shift-add rtxs or scaled
++   memory addresses.  */
++static bool
++mem_shadd_or_shadd_rtx_p (rtx x)
++{
++  return ((GET_CODE (x) == ASHIFT
++	   || GET_CODE (x) == MULT)
++	  && CONST_INT_P (XEXP (x, 1))
++	  && ((GET_CODE (x) == ASHIFT && IN_RANGE (INTVAL (XEXP (x, 1)), 1, 3))
++	      || (GET_CODE (x) == MULT
++		  && IN_RANGE (exact_log2 (INTVAL (XEXP (x, 1))), 1, 3))));
++}
++
+ /* This function is used to implement LEGITIMIZE_ADDRESS.  If X can
+    be legitimized in a way that the generic machinery might not expect,
+    return a new address, otherwise return NULL.  MODE is the mode of
+@@ -3238,6 +3254,33 @@ loongarch_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+   loongarch_split_plus (x, &base, &offset);
+   if (offset != 0)
+     {
++      /* Handle (plus (plus (mult (a) (mem_shadd_constant)) (fp)) (C)) case.  */
++      if (GET_CODE (base) == PLUS && mem_shadd_or_shadd_rtx_p (XEXP (base, 0))
++	  && IMM12_OPERAND (offset))
++	{
++	  rtx index = XEXP (base, 0);
++	  rtx fp = XEXP (base, 1);
++
++	  if (REG_P (fp) && REGNO (fp) == VIRTUAL_STACK_VARS_REGNUM)
++	    {
++	      /* If we were given a MULT, we must fix the constant
++		 as we're going to create the ASHIFT form.  */
++	      int shift_val = INTVAL (XEXP (index, 1));
++	      if (GET_CODE (index) == MULT)
++		shift_val = exact_log2 (shift_val);
++
++	      rtx reg1 = gen_reg_rtx (Pmode);
++	      rtx reg3 = gen_reg_rtx (Pmode);
++	      loongarch_emit_binary (PLUS, reg1, fp, GEN_INT (offset));
++	      loongarch_emit_binary (PLUS, reg3,
++				     gen_rtx_ASHIFT (Pmode, XEXP (index, 0),
++						     GEN_INT (shift_val)),
++				     reg1);
++
++	      return reg3;
++	    }
++	}
++
+       if (!loongarch_valid_base_register_p (base, mode, false))
+ 	base = copy_to_mode_reg (Pmode, base);
+       addr = loongarch_add_offset (NULL, base, offset);
+-- 
+2.43.0
+
diff --git a/SME-0021-aarch64-Tweak-handling-of-mgeneral-regs-only.patch b/0124-Backport-SME-aarch64-Tweak-handling-of-mgeneral-regs.patch
similarity index 92%
rename from SME-0021-aarch64-Tweak-handling-of-mgeneral-regs-only.patch
rename to 0124-Backport-SME-aarch64-Tweak-handling-of-mgeneral-regs.patch
index 742e493a304f5ddff9bc175c15a0a33c026f3d72..55c27b3666afe2b0ab955b8c4fe07b219bf97a2c 100644
--- a/SME-0021-aarch64-Tweak-handling-of-mgeneral-regs-only.patch
+++ b/0124-Backport-SME-aarch64-Tweak-handling-of-mgeneral-regs.patch
@@ -1,7 +1,8 @@
-From 5297da8c14534a4e96e0b85f1f2c6937ce87f0bf Mon Sep 17 00:00:00 2001
+From 91f7471cbc7dec42673b58a1896330d64eb6be2a Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:57 +0100
-Subject: [PATCH 021/144] aarch64: Tweak handling of -mgeneral-regs-only
+Subject: [PATCH 025/157] [Backport][SME] aarch64: Tweak handling of
+ -mgeneral-regs-only
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=2a269bda9e7b8f9353699d0c965e7e9246500aa0
 
@@ -56,12 +57,12 @@ gcc/
  7 files changed, 68 insertions(+), 22 deletions(-)
 
 diff --git a/gcc/common/config/aarch64/aarch64-common.cc b/gcc/common/config/aarch64/aarch64-common.cc
-index ffa83bb47..61007839d 100644
+index 752ba5632..c64b4987e 100644
 --- a/gcc/common/config/aarch64/aarch64-common.cc
 +++ b/gcc/common/config/aarch64/aarch64-common.cc
-@@ -64,6 +64,17 @@ static const struct default_options aarch_option_optimization_table[] =
-     { OPT_LEVELS_NONE, 0, NULL, 0 }
-   };
+@@ -137,6 +137,17 @@ reset_tsv110_option ()
+     }
+ }
  
 +/* Set OPTS->x_aarch64_asm_isa_flags to FLAGS and update
 +   OPTS->x_aarch64_isa_flags accordingly.  */
@@ -77,7 +78,7 @@ index ffa83bb47..61007839d 100644
  /* Implement TARGET_HANDLE_OPTION.
     This function handles the target specific options for CPU/target selection.
  
-@@ -98,6 +109,7 @@ aarch64_handle_option (struct gcc_options *opts,
+@@ -174,6 +185,7 @@ aarch64_handle_option (struct gcc_options *opts,
  
      case OPT_mgeneral_regs_only:
        opts->x_target_flags |= MASK_GENERAL_REGS_ONLY;
@@ -151,10 +152,10 @@ index b927a886e..a70e3a6b4 100644
      return check_required_registers (location, fndecl);
  
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index c887a187f..6b76ef7db 100644
+index 8cb820767..3e83e48ec 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -18200,10 +18200,19 @@ aarch64_convert_sve_vector_bits (aarch64_sve_vector_bits_enum value)
+@@ -18432,10 +18432,19 @@ aarch64_convert_sve_vector_bits (aarch64_sve_vector_bits_enum value)
      return (int) value / 64;
  }
  
@@ -175,7 +176,7 @@ index c887a187f..6b76ef7db 100644
     decisions.  It must also set selected_arch as this will be used to
     output the .arch asm tags for each function.  */
  
-@@ -18212,7 +18221,7 @@ aarch64_override_options (void)
+@@ -18444,7 +18453,7 @@ aarch64_override_options (void)
  {
    aarch64_feature_flags cpu_isa = 0;
    aarch64_feature_flags arch_isa = 0;
@@ -184,7 +185,7 @@ index c887a187f..6b76ef7db 100644
  
    const struct processor *cpu = NULL;
    const struct processor *arch = NULL;
-@@ -18252,25 +18261,25 @@ aarch64_override_options (void)
+@@ -18484,25 +18493,25 @@ aarch64_override_options (void)
  	}
  
        selected_arch = arch->arch;
@@ -214,7 +215,7 @@ index c887a187f..6b76ef7db 100644
      }
  
    selected_tune = tune ? tune->ident : cpu->ident;
-@@ -18412,7 +18421,7 @@ aarch64_option_print (FILE *file, int indent, struct cl_target_option *ptr)
+@@ -18644,7 +18653,7 @@ aarch64_option_print (FILE *file, int indent, struct cl_target_option *ptr)
      = aarch64_get_tune_cpu (ptr->x_selected_tune);
    const struct processor *arch = aarch64_get_arch (ptr->x_selected_arch);
    std::string extension
@@ -223,7 +224,7 @@ index c887a187f..6b76ef7db 100644
  						  arch->flags);
  
    fprintf (file, "%*sselected tune = %s\n", indent, "", cpu->name);
-@@ -18520,13 +18529,15 @@ aarch64_handle_attr_arch (const char *str)
+@@ -18752,13 +18761,15 @@ aarch64_handle_attr_arch (const char *str)
  {
    const struct processor *tmp_arch = NULL;
    std::string invalid_extension;
@@ -240,7 +241,7 @@ index c887a187f..6b76ef7db 100644
        return true;
      }
  
-@@ -18558,14 +18569,16 @@ aarch64_handle_attr_cpu (const char *str)
+@@ -18790,14 +18801,16 @@ aarch64_handle_attr_cpu (const char *str)
  {
    const struct processor *tmp_cpu = NULL;
    std::string invalid_extension;
@@ -258,7 +259,7 @@ index c887a187f..6b76ef7db 100644
        return true;
      }
  
-@@ -18659,7 +18672,7 @@ static bool
+@@ -18891,7 +18904,7 @@ static bool
  aarch64_handle_attr_isa_flags (char *str)
  {
    enum aarch64_parse_opt_result parse_res;
@@ -267,7 +268,7 @@ index c887a187f..6b76ef7db 100644
  
    /* We allow "+nothing" in the beginning to clear out all architectural
       features if the user wants to handpick specific features.  */
-@@ -18674,7 +18687,7 @@ aarch64_handle_attr_isa_flags (char *str)
+@@ -18906,7 +18919,7 @@ aarch64_handle_attr_isa_flags (char *str)
  
    if (parse_res == AARCH64_PARSE_OK)
      {
@@ -276,7 +277,7 @@ index c887a187f..6b76ef7db 100644
        return true;
      }
  
-@@ -19096,8 +19109,12 @@ aarch64_can_inline_p (tree caller, tree callee)
+@@ -19328,8 +19341,12 @@ aarch64_can_inline_p (tree caller, tree callee)
  					   : target_option_default_node);
  
    /* Callee's ISA flags should be a subset of the caller's.  */
@@ -290,7 +291,7 @@ index c887a187f..6b76ef7db 100644
      return false;
  
    /* Allow non-strict aligned functions inlining into strict
-@@ -22540,7 +22557,7 @@ aarch64_declare_function_name (FILE *stream, const char* name,
+@@ -22772,7 +22789,7 @@ aarch64_declare_function_name (FILE *stream, const char* name,
    const struct processor *this_arch
      = aarch64_get_arch (targ_options->x_selected_arch);
  
@@ -299,7 +300,7 @@ index c887a187f..6b76ef7db 100644
    std::string extension
      = aarch64_get_extension_string_for_isa_flags (isa_flags,
  						  this_arch->flags);
-@@ -22670,7 +22687,7 @@ aarch64_start_file (void)
+@@ -22902,7 +22919,7 @@ aarch64_start_file (void)
  
    const struct processor *default_arch
      = aarch64_get_arch (default_options->x_selected_arch);
@@ -351,7 +352,7 @@ index 50a2ef444..521031efe 100644
  /* SVE2 instructions, enabled through +sve2.  */
  #define TARGET_SVE2 (TARGET_SVE && AARCH64_ISA_SVE2)
 diff --git a/gcc/config/aarch64/aarch64.opt b/gcc/config/aarch64/aarch64.opt
-index f6ae4499a..b89b20450 100644
+index 47ec7824f..5f507abd4 100644
 --- a/gcc/config/aarch64/aarch64.opt
 +++ b/gcc/config/aarch64/aarch64.opt
 @@ -27,6 +27,9 @@ enum aarch64_processor selected_tune = aarch64_none
@@ -365,5 +366,5 @@ index f6ae4499a..b89b20450 100644
  aarch64_feature_flags aarch64_isa_flags = 0
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0124-LoongArch-Merge-template-got_load_tls_-ld-gd-le-ie.patch b/0124-LoongArch-Merge-template-got_load_tls_-ld-gd-le-ie.patch
new file mode 100644
index 0000000000000000000000000000000000000000..51606bc2538851c51a0b84f0aa83daaec5a7c752
--- /dev/null
+++ b/0124-LoongArch-Merge-template-got_load_tls_-ld-gd-le-ie.patch
@@ -0,0 +1,214 @@
+From 3f45bbfe924ffe38832b2ad0050589b9f188422e Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 25 Jan 2024 14:44:39 +0800
+Subject: [PATCH 124/188] LoongArch: Merge template got_load_tls_{ld/gd/le/ie}.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_load_tls):
+	Load all types of tls symbols through one function.
+	(loongarch_got_load_tls_gd): Delete.
+	(loongarch_got_load_tls_ld): Delete.
+	(loongarch_got_load_tls_ie): Delete.
+	(loongarch_got_load_tls_le): Delete.
+	(loongarch_call_tls_get_addr): Modify the called function name.
+	(loongarch_legitimize_tls_address): Likewise.
+	* config/loongarch/loongarch.md (@got_load_tls_gd): Delete.
+	(@load_tls): New template.
+	(@got_load_tls_ld): Delete.
+	(@got_load_tls_le): Delete.
+	(@got_load_tls_ie): Delete.
+---
+ gcc/config/loongarch/loongarch.cc | 47 +++++-------------------
+ gcc/config/loongarch/loongarch.md | 59 ++++++++++++-------------------
+ 2 files changed, 30 insertions(+), 76 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 612a9c138..ced7e58c2 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2732,36 +2732,12 @@ loongarch_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
+ /* The __tls_get_attr symbol.  */
+ static GTY (()) rtx loongarch_tls_symbol;
+ 
+-/* Load an entry from the GOT for a TLS GD access.  */
++/* Load an entry for a TLS access.  */
+ 
+ static rtx
+-loongarch_got_load_tls_gd (rtx dest, rtx sym)
++loongarch_load_tls (rtx dest, rtx sym)
+ {
+-  return gen_got_load_tls_gd (Pmode, dest, sym);
+-}
+-
+-/* Load an entry from the GOT for a TLS LD access.  */
+-
+-static rtx
+-loongarch_got_load_tls_ld (rtx dest, rtx sym)
+-{
+-  return gen_got_load_tls_ld (Pmode, dest, sym);
+-}
+-
+-/* Load an entry from the GOT for a TLS IE access.  */
+-
+-static rtx
+-loongarch_got_load_tls_ie (rtx dest, rtx sym)
+-{
+-  return gen_got_load_tls_ie (Pmode, dest, sym);
+-}
+-
+-/* Add in the thread pointer for a TLS LE access.  */
+-
+-static rtx
+-loongarch_got_load_tls_le (rtx dest, rtx sym)
+-{
+-  return gen_got_load_tls_le (Pmode, dest, sym);
++  return gen_load_tls (Pmode, dest, sym);
+ }
+ 
+ /* Return an instruction sequence that calls __tls_get_addr.  SYM is
+@@ -2805,14 +2781,7 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 	emit_insn (gen_tls_low (Pmode, a0, high, loc));
+     }
+   else
+-    {
+-      if (type == SYMBOL_TLSLDM)
+-	emit_insn (loongarch_got_load_tls_ld (a0, loc));
+-      else if (type == SYMBOL_TLSGD)
+-	emit_insn (loongarch_got_load_tls_gd (a0, loc));
+-      else
+-	gcc_unreachable ();
+-    }
++    emit_insn (loongarch_load_tls (a0, loc));
+ 
+   if (flag_plt)
+     {
+@@ -2949,10 +2918,10 @@ loongarch_legitimize_tls_address (rtx loc)
+ 	  /* la.tls.ie; tp-relative add.  */
+ 	  tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
+ 	  tmp1 = gen_reg_rtx (Pmode);
++	  tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_IE);
+ 	  dest = gen_reg_rtx (Pmode);
+ 	  if (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE)
+ 	    {
+-	      tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_IE);
+ 	      tmp3 = gen_reg_rtx (Pmode);
+ 	      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2));
+ 	      high = loongarch_force_temporary (tmp3, high);
+@@ -2975,7 +2944,7 @@ loongarch_legitimize_tls_address (rtx loc)
+ 		emit_insn (gen_ld_from_got (Pmode, tmp1, high, tmp2));
+ 	    }
+ 	  else
+-	    emit_insn (loongarch_got_load_tls_ie (tmp1, loc));
++	    emit_insn (loongarch_load_tls (tmp1, tmp2));
+ 	  emit_insn (gen_add3_insn (dest, tmp1, tp));
+ 	}
+       break;
+@@ -3007,11 +2976,11 @@ loongarch_legitimize_tls_address (rtx loc)
+ 
+ 	  tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
+ 	  tmp1 = gen_reg_rtx (Pmode);
++	  tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_LE);
+ 	  dest = gen_reg_rtx (Pmode);
+ 
+ 	  if (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE)
+ 	    {
+-	      tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_LE);
+ 	      tmp3 = gen_reg_rtx (Pmode);
+ 	      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2));
+ 	      high = loongarch_force_temporary (tmp3, high);
+@@ -3039,7 +3008,7 @@ loongarch_legitimize_tls_address (rtx loc)
+ 		}
+ 	    }
+ 	  else
+-	    emit_insn (loongarch_got_load_tls_le (tmp1, loc));
++	    emit_insn (loongarch_load_tls (tmp1, tmp2));
+ 	  emit_insn (gen_add3_insn (dest, tmp1, tp));
+ 	}
+       break;
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 23d8dc126..4f9a92334 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -51,10 +51,7 @@
+   UNSPEC_BITREV_8B
+ 
+   ;; TLS
+-  UNSPEC_TLS_GD
+-  UNSPEC_TLS_LD
+-  UNSPEC_TLS_LE
+-  UNSPEC_TLS_IE
++  UNSPEC_TLS
+ 
+   ;; Stack tie
+   UNSPEC_TIE
+@@ -2701,45 +2698,33 @@
+ 
+ ;; Thread-Local Storage
+ 
+-(define_insn "@got_load_tls_gd"
++(define_insn "@load_tls"
+   [(set (match_operand:P 0 "register_operand" "=r")
+ 	(unspec:P
+ 	    [(match_operand:P 1 "symbolic_operand" "")]
+-	    UNSPEC_TLS_GD))]
++	    UNSPEC_TLS))]
+   ""
+-  "la.tls.gd\t%0,%1"
+-  [(set_attr "got" "load")
+-   (set_attr "mode" "")])
+-
+-(define_insn "@got_load_tls_ld"
+-  [(set (match_operand:P 0 "register_operand" "=r")
+-	(unspec:P
+-	    [(match_operand:P 1 "symbolic_operand" "")]
+-	    UNSPEC_TLS_LD))]
+-  ""
+-  "la.tls.ld\t%0,%1"
+-  [(set_attr "got" "load")
+-   (set_attr "mode" "")])
++{
++  enum loongarch_symbol_type symbol_type;
++  gcc_assert (loongarch_symbolic_constant_p (operands[1], &symbol_type));
+ 
+-(define_insn "@got_load_tls_le"
+-  [(set (match_operand:P 0 "register_operand" "=r")
+-	(unspec:P
+-	    [(match_operand:P 1 "symbolic_operand" "")]
+-	    UNSPEC_TLS_LE))]
+-  ""
+-  "la.tls.le\t%0,%1"
+-  [(set_attr "got" "load")
+-   (set_attr "mode" "")])
++  switch (symbol_type)
++    {
++    case SYMBOL_TLS_LE:
++      return "la.tls.le\t%0,%1";
++    case SYMBOL_TLS_IE:
++      return "la.tls.ie\t%0,%1";
++    case SYMBOL_TLSLDM:
++      return "la.tls.ld\t%0,%1";
++    case SYMBOL_TLSGD:
++      return "la.tls.gd\t%0,%1";
+ 
+-(define_insn "@got_load_tls_ie"
+-  [(set (match_operand:P 0 "register_operand" "=r")
+-	(unspec:P
+-	    [(match_operand:P 1 "symbolic_operand" "")]
+-	    UNSPEC_TLS_IE))]
+-  ""
+-  "la.tls.ie\t%0,%1"
+-  [(set_attr "got" "load")
+-   (set_attr "mode" "")])
++    default:
++      gcc_unreachable ();
++    }
++}
++  [(set_attr "mode" "")
++   (set_attr "insn_count" "2")])
+ 
+ ;; Move operand 1 to the high word of operand 0 using movgr2frh.w, preserving the
+ ;; value in the low word.
+-- 
+2.43.0
+
diff --git a/SME-0022-aarch64-Remove-redundant-TARGET_-checks.patch b/0125-Backport-SME-aarch64-Remove-redundant-TARGET_-checks.patch
similarity index 94%
rename from SME-0022-aarch64-Remove-redundant-TARGET_-checks.patch
rename to 0125-Backport-SME-aarch64-Remove-redundant-TARGET_-checks.patch
index 499338efbe6247c3704efc988e971b6526d1cd4b..f935069c3a0b8231d8e464c65aa2e18d54403764 100644
--- a/SME-0022-aarch64-Remove-redundant-TARGET_-checks.patch
+++ b/0125-Backport-SME-aarch64-Remove-redundant-TARGET_-checks.patch
@@ -1,7 +1,8 @@
-From 5fb250e2dcffd0e7abbee8f4bea8d82150fd7e79 Mon Sep 17 00:00:00 2001
+From 77a86d955dd1c9cd8c7fc35e6caf0cb707799129 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:57 +0100
-Subject: [PATCH 022/144] aarch64: Remove redundant TARGET_* checks
+Subject: [PATCH 026/157] [Backport][SME] aarch64: Remove redundant TARGET_*
+ checks
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=a31641840af2c40cf36036fa472df34d4a4402c3
 
@@ -71,7 +72,7 @@ index 18c9b975b..2dfe2b8f8 100644
        builtin_define_with_int_value ("__ARM_FP", 0x0E);
        builtin_define ("__ARM_FP16_FORMAT_IEEE");
 diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
-index ee7f0b89c..4d742a493 100644
+index de92802f5..a47b39281 100644
 --- a/gcc/config/aarch64/aarch64-simd.md
 +++ b/gcc/config/aarch64/aarch64-simd.md
 @@ -693,7 +693,7 @@
@@ -83,7 +84,7 @@ index ee7f0b89c..4d742a493 100644
  {
    rtx v_bitmask = gen_reg_rtx (mode);
    int bits = GET_MODE_UNIT_BITSIZE (mode) - 1;
-@@ -8048,7 +8048,7 @@
+@@ -8352,7 +8352,7 @@
  		 (match_operand:V16QI 1 "register_operand" "%0")
  		 (match_operand:V16QI 2 "register_operand" "w"))]
           CRYPTO_AES))]
@@ -92,7 +93,7 @@ index ee7f0b89c..4d742a493 100644
    "aes\\t%0.16b, %2.16b"
    [(set_attr "type" "crypto_aese")]
  )
-@@ -8057,7 +8057,7 @@
+@@ -8361,7 +8361,7 @@
    [(set (match_operand:V16QI 0 "register_operand" "=w")
  	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "w")]
  	 CRYPTO_AESMC))]
@@ -101,7 +102,7 @@ index ee7f0b89c..4d742a493 100644
    "aes\\t%0.16b, %1.16b"
    [(set_attr "type" "crypto_aesmc")]
  )
-@@ -8076,7 +8076,7 @@
+@@ -8380,7 +8380,7 @@
  		(match_operand:V16QI 2 "register_operand" "w"))]
  	     UNSPEC_AESE)]
  	UNSPEC_AESMC))]
@@ -110,7 +111,7 @@ index ee7f0b89c..4d742a493 100644
     && aarch64_fusion_enabled_p (AARCH64_FUSE_AES_AESMC)"
    "aese\\t%0.16b, %2.16b\;aesmc\\t%0.16b, %0.16b"
    [(set_attr "type" "crypto_aese")
-@@ -8097,7 +8097,7 @@
+@@ -8401,7 +8401,7 @@
  			(match_operand:V16QI 2 "register_operand" "w"))]
  		UNSPEC_AESD)]
  	  UNSPEC_AESIMC))]
@@ -119,7 +120,7 @@ index ee7f0b89c..4d742a493 100644
     && aarch64_fusion_enabled_p (AARCH64_FUSE_AES_AESMC)"
    "aesd\\t%0.16b, %2.16b\;aesimc\\t%0.16b, %0.16b"
    [(set_attr "type" "crypto_aese")
-@@ -8111,7 +8111,7 @@
+@@ -8415,7 +8415,7 @@
          (unspec:SI [(match_operand:SI 1
                         "register_operand" "w")]
           UNSPEC_SHA1H))]
@@ -128,7 +129,7 @@ index ee7f0b89c..4d742a493 100644
    "sha1h\\t%s0, %s1"
    [(set_attr "type" "crypto_sha1_fast")]
  )
-@@ -8121,7 +8121,7 @@
+@@ -8425,7 +8425,7 @@
  	(unspec:SI [(vec_select:SI (match_operand:V4SI 1 "register_operand" "w")
  		     (parallel [(const_int 0)]))]
  	 UNSPEC_SHA1H))]
@@ -137,7 +138,7 @@ index ee7f0b89c..4d742a493 100644
    "sha1h\\t%s0, %s1"
    [(set_attr "type" "crypto_sha1_fast")]
  )
-@@ -8131,7 +8131,7 @@
+@@ -8435,7 +8435,7 @@
  	(unspec:SI [(vec_select:SI (match_operand:V4SI 1 "register_operand" "w")
  		     (parallel [(const_int 3)]))]
  	 UNSPEC_SHA1H))]
@@ -146,7 +147,7 @@ index ee7f0b89c..4d742a493 100644
    "sha1h\\t%s0, %s1"
    [(set_attr "type" "crypto_sha1_fast")]
  )
-@@ -8141,7 +8141,7 @@
+@@ -8445,7 +8445,7 @@
          (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
                        (match_operand:V4SI 2 "register_operand" "w")]
           UNSPEC_SHA1SU1))]
@@ -155,7 +156,7 @@ index ee7f0b89c..4d742a493 100644
    "sha1su1\\t%0.4s, %2.4s"
    [(set_attr "type" "crypto_sha1_fast")]
  )
-@@ -8152,7 +8152,7 @@
+@@ -8456,7 +8456,7 @@
                        (match_operand:SI 2 "register_operand" "w")
                        (match_operand:V4SI 3 "register_operand" "w")]
           CRYPTO_SHA1))]
@@ -164,7 +165,7 @@ index ee7f0b89c..4d742a493 100644
    "sha1\\t%q0, %s2, %3.4s"
    [(set_attr "type" "crypto_sha1_slow")]
  )
-@@ -8163,7 +8163,7 @@
+@@ -8467,7 +8467,7 @@
                        (match_operand:V4SI 2 "register_operand" "w")
                        (match_operand:V4SI 3 "register_operand" "w")]
           UNSPEC_SHA1SU0))]
@@ -173,7 +174,7 @@ index ee7f0b89c..4d742a493 100644
    "sha1su0\\t%0.4s, %2.4s, %3.4s"
    [(set_attr "type" "crypto_sha1_xor")]
  )
-@@ -8176,7 +8176,7 @@
+@@ -8480,7 +8480,7 @@
                        (match_operand:V4SI 2 "register_operand" "w")
                        (match_operand:V4SI 3 "register_operand" "w")]
           CRYPTO_SHA256))]
@@ -182,7 +183,7 @@ index ee7f0b89c..4d742a493 100644
    "sha256h\\t%q0, %q2, %3.4s"
    [(set_attr "type" "crypto_sha256_slow")]
  )
-@@ -8186,7 +8186,7 @@
+@@ -8490,7 +8490,7 @@
          (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
                        (match_operand:V4SI 2 "register_operand" "w")]
           UNSPEC_SHA256SU0))]
@@ -191,7 +192,7 @@ index ee7f0b89c..4d742a493 100644
    "sha256su0\\t%0.4s, %2.4s"
    [(set_attr "type" "crypto_sha256_fast")]
  )
-@@ -8197,7 +8197,7 @@
+@@ -8501,7 +8501,7 @@
                        (match_operand:V4SI 2 "register_operand" "w")
                        (match_operand:V4SI 3 "register_operand" "w")]
           UNSPEC_SHA256SU1))]
@@ -200,7 +201,7 @@ index ee7f0b89c..4d742a493 100644
    "sha256su1\\t%0.4s, %2.4s, %3.4s"
    [(set_attr "type" "crypto_sha256_slow")]
  )
-@@ -8210,7 +8210,7 @@
+@@ -8514,7 +8514,7 @@
                        (match_operand:V2DI 2 "register_operand" "w")
                        (match_operand:V2DI 3 "register_operand" "w")]
           CRYPTO_SHA512))]
@@ -209,7 +210,7 @@ index ee7f0b89c..4d742a493 100644
    "sha512h\\t%q0, %q2, %3.2d"
    [(set_attr "type" "crypto_sha512")]
  )
-@@ -8220,7 +8220,7 @@
+@@ -8524,7 +8524,7 @@
          (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
                        (match_operand:V2DI 2 "register_operand" "w")]
           UNSPEC_SHA512SU0))]
@@ -218,7 +219,7 @@ index ee7f0b89c..4d742a493 100644
    "sha512su0\\t%0.2d, %2.2d"
    [(set_attr "type" "crypto_sha512")]
  )
-@@ -8231,7 +8231,7 @@
+@@ -8535,7 +8535,7 @@
                        (match_operand:V2DI 2 "register_operand" "w")
                        (match_operand:V2DI 3 "register_operand" "w")]
           UNSPEC_SHA512SU1))]
@@ -227,7 +228,7 @@ index ee7f0b89c..4d742a493 100644
    "sha512su1\\t%0.2d, %2.2d, %3.2d"
    [(set_attr "type" "crypto_sha512")]
  )
-@@ -8245,7 +8245,7 @@
+@@ -8549,7 +8549,7 @@
  	  (match_operand:VQ_I 2 "register_operand" "w")
  	  (match_operand:VQ_I 3 "register_operand" "w"))
  	 (match_operand:VQ_I 1 "register_operand" "w")))]
@@ -236,7 +237,7 @@ index ee7f0b89c..4d742a493 100644
    "eor3\\t%0.16b, %1.16b, %2.16b, %3.16b"
    [(set_attr "type" "crypto_sha3")]
  )
-@@ -8257,7 +8257,7 @@
+@@ -8561,7 +8561,7 @@
  	  (match_operand:V2DI 2 "register_operand" "w")
  	  (const_int 1))
  	 (match_operand:V2DI 1 "register_operand" "w")))]
@@ -245,7 +246,7 @@ index ee7f0b89c..4d742a493 100644
    "rax1\\t%0.2d, %1.2d, %2.2d"
    [(set_attr "type" "crypto_sha3")]
  )
-@@ -8269,7 +8269,7 @@
+@@ -8573,7 +8573,7 @@
  	  (match_operand:V2DI 1 "register_operand" "%w")
  	  (match_operand:V2DI 2 "register_operand" "w"))
  	 (match_operand:SI 3 "aarch64_simd_shift_imm_di" "Usd")))]
@@ -254,7 +255,7 @@ index ee7f0b89c..4d742a493 100644
    "xar\\t%0.2d, %1.2d, %2.2d, %3"
    [(set_attr "type" "crypto_sha3")]
  )
-@@ -8281,7 +8281,7 @@
+@@ -8585,7 +8585,7 @@
  	  (not:VQ_I (match_operand:VQ_I 3 "register_operand" "w"))
  	  (match_operand:VQ_I 2 "register_operand" "w"))
  	 (match_operand:VQ_I 1 "register_operand" "w")))]
@@ -263,7 +264,7 @@ index ee7f0b89c..4d742a493 100644
    "bcax\\t%0.16b, %1.16b, %2.16b, %3.16b"
    [(set_attr "type" "crypto_sha3")]
  )
-@@ -8294,7 +8294,7 @@
+@@ -8598,7 +8598,7 @@
  		      (match_operand:V4SI 2 "register_operand" "w")
  		      (match_operand:V4SI 3 "register_operand" "w")]
  	 UNSPEC_SM3SS1))]
@@ -272,7 +273,7 @@ index ee7f0b89c..4d742a493 100644
    "sm3ss1\\t%0.4s, %1.4s, %2.4s, %3.4s"
    [(set_attr "type" "crypto_sm3")]
  )
-@@ -8307,7 +8307,7 @@
+@@ -8611,7 +8611,7 @@
  		      (match_operand:V4SI 3 "register_operand" "w")
  		      (match_operand:SI 4 "aarch64_imm2" "Ui2")]
  	 CRYPTO_SM3TT))]
@@ -281,7 +282,7 @@ index ee7f0b89c..4d742a493 100644
    "sm3tt\\t%0.4s, %2.4s, %3.4s[%4]"
    [(set_attr "type" "crypto_sm3")]
  )
-@@ -8318,7 +8318,7 @@
+@@ -8622,7 +8622,7 @@
  		      (match_operand:V4SI 2 "register_operand" "w")
  		      (match_operand:V4SI 3 "register_operand" "w")]
  	 CRYPTO_SM3PART))]
@@ -290,7 +291,7 @@ index ee7f0b89c..4d742a493 100644
    "sm3partw\\t%0.4s, %2.4s, %3.4s"
    [(set_attr "type" "crypto_sm3")]
  )
-@@ -8330,7 +8330,7 @@
+@@ -8634,7 +8634,7 @@
  	(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
  		      (match_operand:V4SI 2 "register_operand" "w")]
  	 UNSPEC_SM4E))]
@@ -299,7 +300,7 @@ index ee7f0b89c..4d742a493 100644
    "sm4e\\t%0.4s, %2.4s"
    [(set_attr "type" "crypto_sm4")]
  )
-@@ -8340,7 +8340,7 @@
+@@ -8644,7 +8644,7 @@
  	(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "w")
  		      (match_operand:V4SI 2 "register_operand" "w")]
  	 UNSPEC_SM4EKEY))]
@@ -308,7 +309,7 @@ index ee7f0b89c..4d742a493 100644
    "sm4ekey\\t%0.4s, %1.4s, %2.4s"
    [(set_attr "type" "crypto_sm4")]
  )
-@@ -8926,7 +8926,7 @@
+@@ -9230,7 +9230,7 @@
          (unspec:TI  [(match_operand:DI 1 "register_operand" "w")
  		     (match_operand:DI 2 "register_operand" "w")]
  		    UNSPEC_PMULL))]
@@ -317,7 +318,7 @@ index ee7f0b89c..4d742a493 100644
   "pmull\\t%0.1q, %1.1d, %2.1d"
    [(set_attr "type" "crypto_pmull")]
  )
-@@ -8936,7 +8936,7 @@
+@@ -9240,7 +9240,7 @@
         (unspec:TI [(match_operand:V2DI 1 "register_operand" "w")
  		   (match_operand:V2DI 2 "register_operand" "w")]
  		  UNSPEC_PMULL2))]
@@ -408,10 +409,10 @@ index 521031efe..2a9d2d031 100644
  /* BF16 instructions are enabled through +bf16.  */
  #define TARGET_BF16_FP (AARCH64_ISA_BF16)
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index d24c8afcf..5b8bb185f 100644
+index c0c64a798..7ee26284d 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -6415,7 +6415,7 @@
+@@ -6417,7 +6417,7 @@
  (define_expand "dihf2"
    [(set (match_operand:HF 0 "register_operand")
  	(FLOATUORS:HF (match_operand:DI 1 "register_operand")))]
@@ -420,7 +421,7 @@ index d24c8afcf..5b8bb185f 100644
  {
    if (TARGET_FP_F16INST)
      emit_insn (gen_aarch64_fp16_dihf2 (operands[0], operands[1]));
-@@ -6674,7 +6674,7 @@
+@@ -6676,7 +6676,7 @@
    [(match_operand:GPF 0 "register_operand")
     (match_operand:GPF 1 "register_operand")
     (match_operand:GPF 2 "register_operand")]
@@ -429,7 +430,7 @@ index d24c8afcf..5b8bb185f 100644
  {
    rtx bitmask = gen_reg_rtx (mode);
    emit_move_insn (bitmask, GEN_INT (HOST_WIDE_INT_M1U
-@@ -6691,7 +6691,7 @@
+@@ -6693,7 +6693,7 @@
  		     (match_operand:GPF 2 "register_operand" "w,w,0,0")
  		     (match_operand: 3 "register_operand" "0,w,w,X")]
  	 UNSPEC_COPYSIGN))]
@@ -438,7 +439,7 @@ index d24c8afcf..5b8bb185f 100644
    "@
     bsl\\t%0., %2., %1.
     bit\\t%0., %2., %3.
-@@ -6712,7 +6712,7 @@
+@@ -6714,7 +6714,7 @@
    [(match_operand:GPF 0 "register_operand")
     (match_operand:GPF 1 "register_operand")
     (match_operand:GPF 2 "register_operand")]
@@ -448,5 +449,5 @@ index d24c8afcf..5b8bb185f 100644
  
    machine_mode imode = mode;
 -- 
-2.19.1
+2.33.0
 
diff --git a/0125-LoongArch-Add-the-macro-implementation-of-mcmodel-ex.patch b/0125-LoongArch-Add-the-macro-implementation-of-mcmodel-ex.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c14ee12409ee54e2ce8c2a8b5eea46cf647ffb59
--- /dev/null
+++ b/0125-LoongArch-Add-the-macro-implementation-of-mcmodel-ex.patch
@@ -0,0 +1,453 @@
+From cd177538c2a0f5248e9e7af6247b4d1ba6fe55db Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 25 Jan 2024 19:10:46 +0800
+Subject: [PATCH 125/188] LoongArch: Add the macro implementation of
+ mcmodel=extreme.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-protos.h (loongarch_symbol_extreme_p):
+	Add function declaration.
+	* config/loongarch/loongarch.cc (loongarch_symbolic_constant_p):
+	For SYMBOL_PCREL64, non-zero addend of "la.local $rd,$rt,sym+addend"
+	is not allowed
+	(loongarch_load_tls): Added macro support in extreme mode.
+	(loongarch_call_tls_get_addr): Likewise.
+	(loongarch_legitimize_tls_address): Likewise.
+	(loongarch_force_address): Likewise.
+	(loongarch_legitimize_move): Likewise.
+	(loongarch_output_mi_thunk): Likewise.
+	(loongarch_option_override_internal): Remove the code that detects
+	explicit relocs status.
+	(loongarch_handle_model_attribute): Likewise.
+	* config/loongarch/loongarch.md (movdi_symbolic_off64): New template.
+	* config/loongarch/predicates.md (symbolic_off64_operand): New predicate.
+	(symbolic_off64_or_reg_operand): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/attr-model-5.c: New test.
+	* gcc.target/loongarch/func-call-extreme-5.c: New test.
+	* gcc.target/loongarch/func-call-extreme-6.c: New test.
+	* gcc.target/loongarch/tls-extreme-macro.c: New test.
+---
+ gcc/config/loongarch/loongarch-protos.h       |   1 +
+ gcc/config/loongarch/loongarch.cc             | 110 +++++++++++-------
+ gcc/config/loongarch/loongarch.md             |  48 +++++++-
+ gcc/config/loongarch/predicates.md            |  12 ++
+ .../gcc.target/loongarch/attr-model-5.c       |   8 ++
+ .../loongarch/func-call-extreme-5.c           |   7 ++
+ .../loongarch/func-call-extreme-6.c           |   7 ++
+ .../gcc.target/loongarch/tls-extreme-macro.c  |  35 ++++++
+ 8 files changed, 184 insertions(+), 44 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-5.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-extreme-5.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-extreme-6.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/tls-extreme-macro.c
+
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 5060efbb6..87b94e8b0 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -222,4 +222,5 @@ extern rtx loongarch_build_signbit_mask (machine_mode, bool, bool);
+ extern void loongarch_emit_swrsqrtsf (rtx, rtx, machine_mode, bool);
+ extern void loongarch_emit_swdivsf (rtx, rtx, rtx, machine_mode);
+ extern bool loongarch_explicit_relocs_p (enum loongarch_symbol_type);
++extern bool loongarch_symbol_extreme_p (enum loongarch_symbol_type);
+ #endif /* ! GCC_LOONGARCH_PROTOS_H */
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index ced7e58c2..9cfe5bfb2 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -1932,8 +1932,13 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type)
+      relocations.  */
+   switch (*symbol_type)
+     {
+-    case SYMBOL_PCREL:
+     case SYMBOL_PCREL64:
++      /* When the code model is extreme, the non-zero offset situation
++	 has not been handled well, so it is disabled here now.  */
++      if (!loongarch_explicit_relocs_p (SYMBOL_PCREL64))
++	return false;
++    /* fall through */
++    case SYMBOL_PCREL:
+       /* GAS rejects offsets outside the range [-2^31, 2^31-1].  */
+       return sext_hwi (INTVAL (offset), 32) == INTVAL (offset);
+ 
+@@ -2735,9 +2740,15 @@ static GTY (()) rtx loongarch_tls_symbol;
+ /* Load an entry for a TLS access.  */
+ 
+ static rtx
+-loongarch_load_tls (rtx dest, rtx sym)
++loongarch_load_tls (rtx dest, rtx sym, enum loongarch_symbol_type type)
+ {
+-  return gen_load_tls (Pmode, dest, sym);
++  /* TLS LE gets a 32 or 64 bit offset here, so one register can do it.  */
++  if (type == SYMBOL_TLS_LE)
++    return gen_load_tls (Pmode, dest, sym);
++
++  return loongarch_symbol_extreme_p (type)
++    ? gen_movdi_symbolic_off64 (dest, sym, gen_reg_rtx (DImode))
++    : gen_load_tls (Pmode, dest, sym);
+ }
+ 
+ /* Return an instruction sequence that calls __tls_get_addr.  SYM is
+@@ -2769,8 +2780,6 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 
+       if (TARGET_CMODEL_EXTREME)
+ 	{
+-	  gcc_assert (TARGET_EXPLICIT_RELOCS);
+-
+ 	  rtx tmp1 = gen_reg_rtx (Pmode);
+ 	  emit_insn (gen_tls_low (Pmode, tmp1, gen_rtx_REG (Pmode, 0), loc));
+ 	  emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loc));
+@@ -2781,7 +2790,7 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 	emit_insn (gen_tls_low (Pmode, a0, high, loc));
+     }
+   else
+-    emit_insn (loongarch_load_tls (a0, loc));
++    emit_insn (loongarch_load_tls (a0, loc, type));
+ 
+   if (flag_plt)
+     {
+@@ -2848,22 +2857,28 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 
+ 	case CMODEL_EXTREME:
+ 	    {
+-	      gcc_assert (TARGET_EXPLICIT_RELOCS);
+-
+-	      rtx tmp1 = gen_reg_rtx (Pmode);
+-	      rtx high = gen_reg_rtx (Pmode);
+-
+-	      loongarch_emit_move (high,
+-				   gen_rtx_HIGH (Pmode, loongarch_tls_symbol));
+-	      loongarch_emit_move (tmp1, gen_rtx_LO_SUM (Pmode,
+-							 gen_rtx_REG (Pmode, 0),
+-							 loongarch_tls_symbol));
+-	      emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loongarch_tls_symbol));
+-	      emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loongarch_tls_symbol));
+-	      loongarch_emit_move (dest,
+-				   gen_rtx_MEM (Pmode,
+-						gen_rtx_PLUS (Pmode,
+-							      high, tmp1)));
++	      if (loongarch_explicit_relocs_p (SYMBOL_GOT_DISP))
++		{
++		  rtx tmp1 = gen_reg_rtx (Pmode);
++		  rtx high = gen_reg_rtx (Pmode);
++
++		  loongarch_emit_move (high,
++				       gen_rtx_HIGH (Pmode,
++						     loongarch_tls_symbol));
++		  loongarch_emit_move (tmp1,
++				       gen_rtx_LO_SUM (Pmode,
++						       gen_rtx_REG (Pmode, 0),
++						       loongarch_tls_symbol));
++		  emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loongarch_tls_symbol));
++		  emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loongarch_tls_symbol));
++		  loongarch_emit_move (dest,
++				       gen_rtx_MEM (Pmode,
++						    gen_rtx_PLUS (Pmode,
++								  high, tmp1)));
++		}
++	      else
++	       emit_insn (gen_movdi_symbolic_off64 (dest, loongarch_tls_symbol,
++						    gen_reg_rtx (DImode)));
+ 	    }
+ 	  break;
+ 
+@@ -2928,8 +2943,6 @@ loongarch_legitimize_tls_address (rtx loc)
+ 
+ 	      if (TARGET_CMODEL_EXTREME)
+ 		{
+-		  gcc_assert (TARGET_EXPLICIT_RELOCS);
+-
+ 		  rtx tmp3 = gen_reg_rtx (Pmode);
+ 		  emit_insn (gen_tls_low (Pmode, tmp3,
+ 					  gen_rtx_REG (Pmode, 0), tmp2));
+@@ -2944,7 +2957,7 @@ loongarch_legitimize_tls_address (rtx loc)
+ 		emit_insn (gen_ld_from_got (Pmode, tmp1, high, tmp2));
+ 	    }
+ 	  else
+-	    emit_insn (loongarch_load_tls (tmp1, tmp2));
++	    emit_insn (loongarch_load_tls (tmp1, tmp2, SYMBOL_TLS_IE));
+ 	  emit_insn (gen_add3_insn (dest, tmp1, tp));
+ 	}
+       break;
+@@ -3001,14 +3014,12 @@ loongarch_legitimize_tls_address (rtx loc)
+ 
+ 	      if (TARGET_CMODEL_EXTREME)
+ 		{
+-		  gcc_assert (TARGET_EXPLICIT_RELOCS);
+-
+ 		  emit_insn (gen_lui_h_lo20 (tmp1, tmp1, tmp2));
+ 		  emit_insn (gen_lui_h_hi12 (tmp1, tmp1, tmp2));
+ 		}
+ 	    }
+ 	  else
+-	    emit_insn (loongarch_load_tls (tmp1, tmp2));
++	    emit_insn (loongarch_load_tls (tmp1, tmp2, SYMBOL_TLS_LE));
+ 	  emit_insn (gen_add3_insn (dest, tmp1, tp));
+ 	}
+       break;
+@@ -3081,7 +3092,7 @@ loongarch_force_address (rtx x, machine_mode mode)
+   return x;
+ }
+ 
+-static bool
++bool
+ loongarch_symbol_extreme_p (enum loongarch_symbol_type type)
+ {
+   switch (type)
+@@ -3402,6 +3413,21 @@ loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src)
+       return true;
+     }
+ 
++  /* Obtain the address of the symbol through the macro instruction
++     of two registers.  */
++  enum loongarch_symbol_type symbol_type;
++  if (TARGET_64BIT && register_operand (dest, mode)
++      && loongarch_symbolic_constant_p (src, &symbol_type)
++      && loongarch_symbol_extreme_p (symbol_type))
++    {
++      gcc_assert (can_create_pseudo_p ());
++      rtx tmp_reg = gen_reg_rtx (DImode);
++      emit_insn (gen_movdi_symbolic_off64 (dest, src, tmp_reg));
++      set_unique_reg_note (get_last_insn (), REG_UNUSED, tmp_reg);
++      set_unique_reg_note (get_last_insn (), REG_EQUAL, src);
++      return true;
++    }
++
+   return false;
+ }
+ 
+@@ -7458,12 +7484,22 @@ loongarch_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+      allowed, otherwise load the address into a register first.  */
+   if (use_sibcall_p)
+     {
+-      insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
++      if (TARGET_CMODEL_EXTREME)
++	{
++	  emit_insn (gen_movdi_symbolic_off64 (temp1, fnaddr, temp2));
++	  insn = emit_call_insn (gen_sibcall_internal (temp1, const0_rtx));
++	}
++      else
++	insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
+       SIBLING_CALL_P (insn) = 1;
+     }
+   else
+     {
+-      loongarch_emit_move (temp1, fnaddr);
++      if (TARGET_CMODEL_EXTREME)
++	emit_insn (gen_movdi_symbolic_off64 (temp1, fnaddr, temp2));
++      else
++	loongarch_emit_move (temp1, fnaddr);
++
+       emit_jump_insn (gen_indirect_jump (temp1));
+     }
+ 
+@@ -7568,10 +7604,6 @@ loongarch_option_override_internal (struct gcc_options *opts,
+   switch (la_target.cmodel)
+     {
+       case CMODEL_EXTREME:
+-	if (la_opt_explicit_relocs == EXPLICIT_RELOCS_NONE)
+-	  error ("code model %qs is not compatible with %s",
+-		 "extreme", "-mexplicit-relocs=none");
+-
+ 	if (opts->x_flag_plt)
+ 	  {
+ 	    if (global_options_set.x_flag_plt)
+@@ -7989,14 +8021,6 @@ loongarch_handle_model_attribute (tree *node, tree name, tree arg, int,
+ 	  *no_add_attrs = true;
+ 	  return NULL_TREE;
+ 	}
+-      if (la_opt_explicit_relocs == EXPLICIT_RELOCS_NONE)
+-	{
+-	  error_at (DECL_SOURCE_LOCATION (decl),
+-		    "%qE attribute is not compatible with %s", name,
+-		    "-mexplicit-relocs=none");
+-	  *no_add_attrs = true;
+-	  return NULL_TREE;
+-	}
+ 
+       arg = TREE_VALUE (arg);
+       if (TREE_CODE (arg) != STRING_CST)
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 4f9a92334..add55e0af 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -82,6 +82,8 @@
+ 
+   UNSPEC_SIBCALL_VALUE_MULTIPLE_INTERNAL_1
+   UNSPEC_CALL_VALUE_MULTIPLE_INTERNAL_1
++
++  UNSPEC_LOAD_SYMBOL_OFFSET64
+ ])
+ 
+ (define_c_enum "unspecv" [
+@@ -2182,6 +2184,46 @@
+   [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore")
+    (set_attr "mode" "DI")])
+ 
++;; Use two registers to get the global symbol address from the got table.
++;; la.global rd, rt, sym
++
++(define_insn_and_split "movdi_symbolic_off64"
++ [(set (match_operand:DI 0 "register_operand" "=r,r")
++       (match_operand:DI 1 "symbolic_off64_or_reg_operand" "Yd,r"))
++  (unspec:DI [(const_int 0)]
++    UNSPEC_LOAD_SYMBOL_OFFSET64)
++  (clobber (match_operand:DI 2 "register_operand" "=&r,r"))]
++ "TARGET_64BIT && TARGET_CMODEL_EXTREME"
++{
++  if (which_alternative == 1)
++    return "#";
++
++  enum loongarch_symbol_type symbol_type;
++  gcc_assert (loongarch_symbolic_constant_p (operands[1], &symbol_type));
++
++  switch (symbol_type)
++    {
++    case SYMBOL_PCREL64:
++      return "la.local\t%0,%2,%1";
++    case SYMBOL_GOT_DISP:
++      return "la.global\t%0,%2,%1";
++    case SYMBOL_TLS_IE:
++      return "la.tls.ie\t%0,%2,%1";
++    case SYMBOL_TLSGD:
++      return "la.tls.gd\t%0,%2,%1";
++    case SYMBOL_TLSLDM:
++      return "la.tls.ld\t%0,%2,%1";
++
++    default:
++      gcc_unreachable ();
++  }
++}
++ "&& REG_P (operands[1]) && find_reg_note (insn, REG_UNUSED, operands[2]) != 0"
++ [(set (match_dup 0) (match_dup 1))]
++ ""
++ [(set_attr "mode" "DI")
++  (set_attr "insn_count" "5")])
++
+ ;; 32-bit Integer moves
+ 
+ (define_expand "movsi"
+@@ -2724,7 +2766,11 @@
+     }
+ }
+   [(set_attr "mode" "")
+-   (set_attr "insn_count" "2")])
++   (set (attr "insn_count")
++      (if_then_else
++	(match_test "TARGET_CMODEL_EXTREME")
++	(const_int 4)
++	(const_int 2)))])
+ 
+ ;; Move operand 1 to the high word of operand 0 using movgr2frh.w, preserving the
+ ;; value in the low word.
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 824a85b36..1d9a30695 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -576,6 +576,18 @@
+ 	  || symbolic_pcrel_offset_operand (op, Pmode));
+ })
+ 
++(define_predicate "symbolic_off64_operand"
++ (match_code "const,symbol_ref,label_ref")
++{
++  enum loongarch_symbol_type type;
++  return loongarch_symbolic_constant_p (op, &type)
++	 && loongarch_symbol_extreme_p (type);
++})
++
++(define_predicate "symbolic_off64_or_reg_operand"
++ (ior (match_operand 0 "register_operand")
++      (match_operand 0 "symbolic_off64_operand")))
++
+ (define_predicate "equality_operator"
+   (match_code "eq,ne"))
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/attr-model-5.c b/gcc/testsuite/gcc.target/loongarch/attr-model-5.c
+new file mode 100644
+index 000000000..5f2c3ec9e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/attr-model-5.c
+@@ -0,0 +1,8 @@
++/* { dg-do compile } */
++/* { dg-options "-mexplicit-relocs=none -mcmodel=extreme -O2 -fno-pic" } */
++/* { dg-final { scan-assembler "la.local\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,x" } } */
++/* { dg-final { scan-assembler "la.local\t\\\$r\[0-9\]+,y" } } */
++/* { dg-final { scan-assembler "la.local\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,counter" } } */
++
++#define ATTR_MODEL_TEST
++#include "attr-model-test.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-5.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-5.c
+new file mode 100644
+index 000000000..b1bd9d236
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-5.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs=none -mcmodel=extreme" } */
++/* { dg-final { scan-assembler "test:.*la.global\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,g" } } */
++/* { dg-final { scan-assembler "test1:.*la.global\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,f" } } */
++/* { dg-final { scan-assembler "test2:.*la.local\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,l" } } */
++
++#include "func-call-extreme-1.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-6.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-6.c
+new file mode 100644
+index 000000000..6e6ad5c9f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-6.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs=none -mcmodel=extreme" } */
++/* { dg-final { scan-assembler "test:.*la.global\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,g" } } */
++/* { dg-final { scan-assembler "test1:.*la.local\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,f" } } */
++/* { dg-final { scan-assembler "test2:.*la.local\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,l" } } */
++
++#include "func-call-extreme-1.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/tls-extreme-macro.c b/gcc/testsuite/gcc.target/loongarch/tls-extreme-macro.c
+new file mode 100644
+index 000000000..4341f8212
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/tls-extreme-macro.c
+@@ -0,0 +1,35 @@
++/* { dg-do compile } */
++/* { dg-options "-march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -fno-plt -mexplicit-relocs=none" } */
++/* { dg-final { scan-assembler "test_le:.*la.tls.le\t\\\$r\[0-9\]+,\\\.L" { target tls_native } } } */
++/* { dg-final { scan-assembler "test_ie:.*la.tls.ie\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,\\\.L" { target tls_native } } } */
++/* { dg-final { scan-assembler "test_ld:.*la.tls.ld\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,\\\.L.*la.global\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,__tls_get_addr" { target tls_native } } } */
++/* { dg-final { scan-assembler "test_le:.*la.tls.gd\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,\\\.L.*la.global\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,__tls_get_addr" { target tls_native } } } */
++
++__thread int c __attribute__ ((tls_model ("local-exec")));
++__thread int d __attribute__ ((tls_model ("initial-exec")));
++__thread int e __attribute__ ((tls_model ("local-dynamic")));
++__thread int f __attribute__ ((tls_model ("global-dynamic")));
++
++int
++test_le (void)
++{
++  return c;
++}
++
++int
++test_ie (void)
++{
++  return d;
++}
++
++int
++test_ld (void)
++{
++  return e;
++}
++
++int
++test_gd (void)
++{
++  return f;
++}
+-- 
+2.43.0
+
diff --git a/SME-0023-aarch64-Define-__ARM_FEATURE_RCPC.patch b/0126-Backport-SME-aarch64-Define-__ARM_FEATURE_RCPC.patch
similarity index 97%
rename from SME-0023-aarch64-Define-__ARM_FEATURE_RCPC.patch
rename to 0126-Backport-SME-aarch64-Define-__ARM_FEATURE_RCPC.patch
index cd5442bfae2b2d1de4b39ecbc87f954f0c1fb510..8fe079c83832bbf86a5b20f05c7d1e2e69051e82 100644
--- a/SME-0023-aarch64-Define-__ARM_FEATURE_RCPC.patch
+++ b/0126-Backport-SME-aarch64-Define-__ARM_FEATURE_RCPC.patch
@@ -1,7 +1,7 @@
-From 481dbc3db870363723de471541ecab52c061cf90 Mon Sep 17 00:00:00 2001
+From 53a858c0c371cbea27ed4170a94fb3918b9fcdcf Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 4 Oct 2022 16:39:18 +0100
-Subject: [PATCH 023/144] aarch64: Define __ARM_FEATURE_RCPC
+Subject: [PATCH 027/157] [Backport][SME] aarch64: Define __ARM_FEATURE_RCPC
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=c1b0a767f04a8ccbaff2a7b71d5c817cdb469630
 
@@ -128,5 +128,5 @@ index bfb044f5d..307fa3d67 100644
  foo (int a)
  {
 -- 
-2.19.1
+2.33.0
 
diff --git a/0126-LoongArch-Enable-explicit-reloc-for-extreme-TLS-GD-L.patch b/0126-LoongArch-Enable-explicit-reloc-for-extreme-TLS-GD-L.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ed9d4dbf86327a62cb7a81929ba4a3a0ede57404
--- /dev/null
+++ b/0126-LoongArch-Enable-explicit-reloc-for-extreme-TLS-GD-L.patch
@@ -0,0 +1,126 @@
+From 1ccf16353b2be4308c79f3b011cb800bfa6f94f4 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 26 Jan 2024 10:46:51 +0800
+Subject: [PATCH 126/188] LoongArch: Enable explicit reloc for extreme TLS
+ GD/LD with -mexplicit-relocs=auto.
+
+Binutils does not support relaxation using four instructions to obtain
+symbol addresses
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_explicit_relocs_p):
+	When the code model of the symbol is extreme and -mexplicit-relocs=auto,
+	the macro instruction loading symbol address is not applicable.
+	(loongarch_call_tls_get_addr): Adjust code.
+	(loongarch_legitimize_tls_address): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/explicit-relocs-extreme-auto-tls-ld-gd.c: New test.
+	* gcc.target/loongarch/explicit-relocs-medium-auto-tls-ld-gd.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc             | 19 +++++++++----------
+ .../explicit-relocs-extreme-auto-tls-ld-gd.c  |  5 +++++
+ .../explicit-relocs-medium-auto-tls-ld-gd.c   |  5 +++++
+ 3 files changed, 19 insertions(+), 10 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-auto-tls-ld-gd.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-auto-tls-ld-gd.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 9cfe5bfb2..84b949021 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -1968,6 +1968,10 @@ loongarch_explicit_relocs_p (enum loongarch_symbol_type type)
+   if (la_opt_explicit_relocs != EXPLICIT_RELOCS_AUTO)
+     return la_opt_explicit_relocs == EXPLICIT_RELOCS_ALWAYS;
+ 
++  /* The linker don't know how to relax accesses in extreme code model.  */
++  if (loongarch_symbol_extreme_p (type))
++    return true;
++
+   switch (type)
+     {
+       case SYMBOL_TLS_IE:
+@@ -1979,11 +1983,6 @@ loongarch_explicit_relocs_p (enum loongarch_symbol_type type)
+ 	   does not relax 64-bit pc-relative accesses as at now.  */
+ 	return true;
+       case SYMBOL_GOT_DISP:
+-	/* The linker don't know how to relax GOT accesses in extreme
+-	   code model.  */
+-	if (TARGET_CMODEL_EXTREME)
+-	  return true;
+-
+ 	/* If we are performing LTO for a final link, and we have the
+ 	   linker plugin so we know the resolution of the symbols, then
+ 	   all GOT references are binding to external symbols or
+@@ -2772,7 +2771,7 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 
+   start_sequence ();
+ 
+-  if (la_opt_explicit_relocs == EXPLICIT_RELOCS_ALWAYS)
++  if (loongarch_explicit_relocs_p (type))
+     {
+       /* Split tls symbol to high and low.  */
+       rtx high = gen_rtx_HIGH (Pmode, copy_rtx (loc));
+@@ -2805,7 +2804,7 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 	case CMODEL_MEDIUM:
+ 	    {
+ 	      rtx reg = gen_reg_rtx (Pmode);
+-	      if (TARGET_EXPLICIT_RELOCS)
++	      if (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE)
+ 		{
+ 		  emit_insn (gen_pcalau12i (Pmode, reg, loongarch_tls_symbol));
+ 		  rtx call = gen_call_value_internal_1 (Pmode, v0, reg,
+@@ -2841,7 +2840,7 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 	case CMODEL_NORMAL:
+ 	case CMODEL_MEDIUM:
+ 	    {
+-	      if (TARGET_EXPLICIT_RELOCS)
++	      if (loongarch_explicit_relocs_p (SYMBOL_GOT_DISP))
+ 		{
+ 		  rtx high = gen_reg_rtx (Pmode);
+ 		  loongarch_emit_move (high,
+@@ -2935,7 +2934,7 @@ loongarch_legitimize_tls_address (rtx loc)
+ 	  tmp1 = gen_reg_rtx (Pmode);
+ 	  tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_IE);
+ 	  dest = gen_reg_rtx (Pmode);
+-	  if (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE)
++	  if (loongarch_explicit_relocs_p (SYMBOL_TLS_IE))
+ 	    {
+ 	      tmp3 = gen_reg_rtx (Pmode);
+ 	      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2));
+@@ -2992,7 +2991,7 @@ loongarch_legitimize_tls_address (rtx loc)
+ 	  tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_LE);
+ 	  dest = gen_reg_rtx (Pmode);
+ 
+-	  if (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE)
++	  if (loongarch_explicit_relocs_p (SYMBOL_TLS_LE))
+ 	    {
+ 	      tmp3 = gen_reg_rtx (Pmode);
+ 	      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2));
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-auto-tls-ld-gd.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-auto-tls-ld-gd.c
+new file mode 100644
+index 000000000..35bd4570a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-auto-tls-ld-gd.c
+@@ -0,0 +1,5 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fPIC -mexplicit-relocs=auto -mcmodel=extreme -fno-plt" } */
++/* { dg-final { scan-assembler-not "la.tls.\[lg\]d" { target tls_native } } } */
++
++#include "./explicit-relocs-auto-tls-ld-gd.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-auto-tls-ld-gd.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-auto-tls-ld-gd.c
+new file mode 100644
+index 000000000..47bffae8a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-auto-tls-ld-gd.c
+@@ -0,0 +1,5 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fPIC -mexplicit-relocs=auto -mcmodel=medium -fplt" } */
++/* { dg-final { scan-assembler-not "la.global" { target tls_native } } } */
++
++#include "./explicit-relocs-auto-tls-ld-gd.c"
+-- 
+2.43.0
+
diff --git a/SME-0024-Add-Ampere-1-and-Ampere-1A-core-definition-in-aarch6.patch b/0127-Backport-SME-Add-Ampere-1-and-Ampere-1A-core-definit.patch
similarity index 87%
rename from SME-0024-Add-Ampere-1-and-Ampere-1A-core-definition-in-aarch6.patch
rename to 0127-Backport-SME-Add-Ampere-1-and-Ampere-1A-core-definit.patch
index 64e0fa2c1dd5f1c4eddcd98f5f6de78afa5a7381..d3177916885e2dabd398a7df508efb88a3c0cdd6 100644
--- a/SME-0024-Add-Ampere-1-and-Ampere-1A-core-definition-in-aarch6.patch
+++ b/0127-Backport-SME-Add-Ampere-1-and-Ampere-1A-core-definit.patch
@@ -1,8 +1,8 @@
-From 98305a45ac50d86cf97b1eff50eb1dfc3b4f5ee8 Mon Sep 17 00:00:00 2001
+From f6b2917888292c694bae1debe8abb0d6c2c6f59e Mon Sep 17 00:00:00 2001
 From: xiezhiheng 
 Date: Tue, 20 Feb 2024 11:03:47 +0800
-Subject: [PATCH 024/144] Add Ampere-1 and Ampere-1A core definition in
- aarch64-cores.def
+Subject: [PATCH 028/157] [Backport][SME] Add Ampere-1 and Ampere-1A core
+ definition in aarch64-cores.def
 
 From commit db2f5d661239737157cf131de7d4df1c17d8d88d and
 590a06afbf0e96813b5879742f38f3665512c854
@@ -25,5 +25,5 @@ index b50628d6b..f069c81cf 100644
     this order is required to handle variant correctly. */
  AARCH64_CORE("emag",        emag,      xgene1,    V8A,  (CRC, CRYPTO), emag, 0x50, 0x000, 3)
 -- 
-2.19.1
+2.33.0
 
diff --git a/0127-LoongArch-Added-support-for-loading-__get_tls_addr-s.patch b/0127-LoongArch-Added-support-for-loading-__get_tls_addr-s.patch
new file mode 100644
index 0000000000000000000000000000000000000000..653a7abd8d72bf05accc309f0368117f40036fdd
--- /dev/null
+++ b/0127-LoongArch-Added-support-for-loading-__get_tls_addr-s.patch
@@ -0,0 +1,72 @@
+From d802fd5eb24bba0c274edeea5aff33e794927aaa Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 26 Jan 2024 11:14:00 +0800
+Subject: [PATCH 127/188] LoongArch: Added support for loading __get_tls_addr
+ symbol address using call36.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_call_tls_get_addr):
+	Add support for call36.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/explicit-relocs-medium-call36-auto-tls-ld-gd.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc             | 22 ++++++++++++++-----
+ ...icit-relocs-medium-call36-auto-tls-ld-gd.c |  5 +++++
+ 2 files changed, 21 insertions(+), 6 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-call36-auto-tls-ld-gd.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 84b949021..0050813df 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2803,17 +2803,27 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 
+ 	case CMODEL_MEDIUM:
+ 	    {
+-	      rtx reg = gen_reg_rtx (Pmode);
+ 	      if (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE)
+ 		{
+-		  emit_insn (gen_pcalau12i (Pmode, reg, loongarch_tls_symbol));
+-		  rtx call = gen_call_value_internal_1 (Pmode, v0, reg,
+-							loongarch_tls_symbol,
+-							const0_rtx);
+-		  insn = emit_call_insn (call);
++		  rtx call;
++
++		 if (HAVE_AS_SUPPORT_CALL36)
++		   call = gen_call_value_internal (v0, loongarch_tls_symbol,
++						   const0_rtx);
++		 else
++		   {
++		     rtx reg = gen_reg_rtx (Pmode);
++		     emit_insn (gen_pcalau12i (Pmode, reg,
++					       loongarch_tls_symbol));
++		     call = gen_call_value_internal_1 (Pmode, v0, reg,
++						       loongarch_tls_symbol,
++						       const0_rtx);
++		   }
++		 insn = emit_call_insn (call);
+ 		}
+ 	      else
+ 		{
++		  rtx reg = gen_reg_rtx (Pmode);
+ 		  emit_move_insn (reg, loongarch_tls_symbol);
+ 		  insn = emit_call_insn (gen_call_value_internal (v0,
+ 								  reg,
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-call36-auto-tls-ld-gd.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-call36-auto-tls-ld-gd.c
+new file mode 100644
+index 000000000..d1a482083
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-call36-auto-tls-ld-gd.c
+@@ -0,0 +1,5 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fPIC -mexplicit-relocs=auto -mcmodel=medium -fplt" } */
++/* { dg-final { scan-assembler "pcaddu18i\t\\\$r1,%call36\\\(__tls_get_addr\\\)" { target { tls_native && loongarch_call36_support } } } } */
++
++#include "./explicit-relocs-auto-tls-ld-gd.c"
+-- 
+2.43.0
+
diff --git a/SME-0025-aarch64-Fix-nosimd-handling-of-FPR-moves.patch b/0128-Backport-SME-aarch64-Fix-nosimd-handling-of-FPR-move.patch
similarity index 98%
rename from SME-0025-aarch64-Fix-nosimd-handling-of-FPR-moves.patch
rename to 0128-Backport-SME-aarch64-Fix-nosimd-handling-of-FPR-move.patch
index 4e3f80275b89f8903c72b095dab4d55686ea90f8..9ad166cd9f006bd57720e2d7ca0290fa1d63539e 100644
--- a/SME-0025-aarch64-Fix-nosimd-handling-of-FPR-moves.patch
+++ b/0128-Backport-SME-aarch64-Fix-nosimd-handling-of-FPR-move.patch
@@ -1,7 +1,8 @@
-From 2ab0e1cc1bc135d586ba1ed68d943a861e0da69c Mon Sep 17 00:00:00 2001
+From 81a4b464d01cf00f8b355115588e67bf2c021acd Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Wed, 7 Sep 2022 10:52:04 +0100
-Subject: [PATCH 025/144] aarch64: Fix +nosimd handling of FPR moves
+Subject: [PATCH 029/157] [Backport][SME] aarch64: Fix +nosimd handling of FPR
+ moves
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=d6106132907f6bd01109f2616d20a87edecc6fc6
 
@@ -71,10 +72,10 @@ gcc/testsuite/
  create mode 100644 gcc/testsuite/gcc.target/aarch64/movv8qi_1.c
 
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 5b8bb185f..5473e2720 100644
+index 7ee26284d..7267a74d6 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -1199,7 +1199,7 @@
+@@ -1201,7 +1201,7 @@
  
  (define_insn "*mov_aarch64"
    [(set (match_operand:SHORT 0 "nonimmediate_operand" "=r,r,    w,r  ,r,w, m,m,r,w,w")
@@ -83,7 +84,7 @@ index 5b8bb185f..5473e2720 100644
    "(register_operand (operands[0], mode)
      || aarch64_reg_or_zero (operands[1], mode))"
  {
-@@ -1223,11 +1223,11 @@
+@@ -1225,11 +1225,11 @@
       case 7:
         return "str\t%1, %0";
       case 8:
@@ -98,7 +99,7 @@ index 5b8bb185f..5473e2720 100644
       default:
         gcc_unreachable ();
       }
-@@ -1235,7 +1235,7 @@
+@@ -1237,7 +1237,7 @@
    ;; The "mov_imm" type for CNT is just a placeholder.
    [(set_attr "type" "mov_reg,mov_imm,neon_move,mov_imm,load_4,load_4,store_4,
  		     store_4,neon_to_gp,neon_from_gp,neon_dup")
@@ -107,7 +108,7 @@ index 5b8bb185f..5473e2720 100644
  )
  
  (define_expand "mov"
-@@ -1397,14 +1397,15 @@
+@@ -1399,14 +1399,15 @@
  
  (define_insn "*movti_aarch64"
    [(set (match_operand:TI 0
@@ -125,7 +126,7 @@ index 5b8bb185f..5473e2720 100644
     #
     #
     mov\\t%0.16b, %1.16b
-@@ -1413,11 +1414,11 @@
+@@ -1415,11 +1416,11 @@
     stp\\txzr, xzr, %0
     ldr\\t%q0, %1
     str\\t%q1, %0"
@@ -140,7 +141,7 @@ index 5b8bb185f..5473e2720 100644
  )
  
  ;; Split a TImode register-register or register-immediate move into
-@@ -1456,16 +1457,19 @@
+@@ -1458,16 +1459,19 @@
  )
  
  (define_insn "*mov_aarch64"
@@ -162,7 +163,7 @@ index 5b8bb185f..5473e2720 100644
     fmov\\t%h0, %1
     * return aarch64_output_scalar_simd_mov_immediate (operands[1], HImode);
     ldr\\t%h0, %1
-@@ -1473,9 +1477,10 @@
+@@ -1475,9 +1479,10 @@
     ldrh\\t%w0, %1
     strh\\t%w1, %0
     mov\\t%w0, %w1"
@@ -176,7 +177,7 @@ index 5b8bb185f..5473e2720 100644
  )
  
  (define_insn "*movsf_aarch64"
-@@ -1528,10 +1533,11 @@
+@@ -1530,10 +1535,11 @@
  
  (define_split
    [(set (match_operand:GPF_HF 0 "nonimmediate_operand")
@@ -963,5 +964,5 @@ index 000000000..4c97e6fbc
 +  asm volatile ("" :: "r" (x0));
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/0128-LoongArch-Don-t-split-the-instructions-containing-re.patch b/0128-LoongArch-Don-t-split-the-instructions-containing-re.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9c088e0b5c2f41cb502bea68589fa6c664239e9f
--- /dev/null
+++ b/0128-LoongArch-Don-t-split-the-instructions-containing-re.patch
@@ -0,0 +1,514 @@
+From 45aace43891ccaef756f2f1356edbb0da676629b Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Mon, 29 Jan 2024 15:20:07 +0800
+Subject: [PATCH 128/188] LoongArch: Don't split the instructions containing
+ relocs for extreme code model.
+
+The ABI mandates the pcalau12i/addi.d/lu32i.d/lu52i.d instructions for
+addressing a symbol to be adjacent.  So model them as "one large
+instruction", i.e. define_insn, with two output registers.  The real
+address is the sum of these two registers.
+
+The advantage of this approach is the RTL passes can still use ldx/stx
+instructions to skip an addi.d instruction.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (unspec): Add
+	UNSPEC_LA_PCREL_64_PART1 and UNSPEC_LA_PCREL_64_PART2.
+	(la_pcrel64_two_parts): New define_insn.
+	* config/loongarch/loongarch.cc (loongarch_tls_symbol): Fix a
+	typo in the comment.
+	(loongarch_call_tls_get_addr): If -mcmodel=extreme
+	-mexplicit-relocs={always,auto}, use la_pcrel64_two_parts for
+	addressing the TLS symbol and __tls_get_addr.  Emit an REG_EQUAL
+	note to allow CSE addressing __tls_get_addr.
+	(loongarch_legitimize_tls_address): If -mcmodel=extreme
+	-mexplicit-relocs={always,auto}, address TLS IE symbols with
+	la_pcrel64_two_parts.
+	(loongarch_split_symbol): If -mcmodel=extreme
+	-mexplicit-relocs={always,auto}, address symbols with
+	la_pcrel64_two_parts.
+	(loongarch_output_mi_thunk): Clean up unreachable code.  If
+	-mcmodel=extreme -mexplicit-relocs={always,auto}, address the MI
+	thunks with la_pcrel64_two_parts.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/func-call-extreme-1.c (dg-options):
+	Use -O2 instead of -O0 to ensure the pcalau12i/addi/lu32i/lu52i
+	instruction sequences are not reordered by the compiler.
+	(NOIPA): Disallow interprocedural optimizations.
+	* gcc.target/loongarch/func-call-extreme-2.c: Remove the content
+	duplicated from func-call-extreme-1.c, include it instead.
+	(dg-options): Likewise.
+	* gcc.target/loongarch/func-call-extreme-3.c (dg-options):
+	Likewise.
+	* gcc.target/loongarch/func-call-extreme-4.c (dg-options):
+	Likewise.
+	* gcc.target/loongarch/cmodel-extreme-1.c: New test.
+	* gcc.target/loongarch/cmodel-extreme-2.c: New test.
+	* g++.target/loongarch/cmodel-extreme-mi-thunk-1.C: New test.
+	* g++.target/loongarch/cmodel-extreme-mi-thunk-2.C: New test.
+	* g++.target/loongarch/cmodel-extreme-mi-thunk-3.C: New test.
+---
+ gcc/config/loongarch/loongarch.cc             | 131 ++++++++++--------
+ gcc/config/loongarch/loongarch.md             |  20 +++
+ .../loongarch/cmodel-extreme-mi-thunk-1.C     |  11 ++
+ .../loongarch/cmodel-extreme-mi-thunk-2.C     |   6 +
+ .../loongarch/cmodel-extreme-mi-thunk-3.C     |   6 +
+ .../gcc.target/loongarch/cmodel-extreme-1.c   |  18 +++
+ .../gcc.target/loongarch/cmodel-extreme-2.c   |   7 +
+ .../loongarch/func-call-extreme-1.c           |  14 +-
+ .../loongarch/func-call-extreme-2.c           |  29 +---
+ .../loongarch/func-call-extreme-3.c           |   2 +-
+ .../loongarch/func-call-extreme-4.c           |   2 +-
+ 11 files changed, 154 insertions(+), 92 deletions(-)
+ create mode 100644 gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-1.C
+ create mode 100644 gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-2.C
+ create mode 100644 gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-3.C
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/cmodel-extreme-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/cmodel-extreme-2.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 0050813df..b8f0291ab 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2733,7 +2733,7 @@ loongarch_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
+   return plus_constant (Pmode, reg, offset);
+ }
+ 
+-/* The __tls_get_attr symbol.  */
++/* The __tls_get_addr symbol.  */
+ static GTY (()) rtx loongarch_tls_symbol;
+ 
+ /* Load an entry for a TLS access.  */
+@@ -2773,20 +2773,22 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 
+   if (loongarch_explicit_relocs_p (type))
+     {
+-      /* Split tls symbol to high and low.  */
+-      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (loc));
+-      high = loongarch_force_temporary (tmp, high);
+-
+       if (TARGET_CMODEL_EXTREME)
+ 	{
+-	  rtx tmp1 = gen_reg_rtx (Pmode);
+-	  emit_insn (gen_tls_low (Pmode, tmp1, gen_rtx_REG (Pmode, 0), loc));
+-	  emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loc));
+-	  emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loc));
+-	  emit_move_insn (a0, gen_rtx_PLUS (Pmode, high, tmp1));
++	  rtx part1 = gen_reg_rtx (Pmode);
++	  rtx part2 = gen_reg_rtx (Pmode);
++
++	  emit_insn (gen_la_pcrel64_two_parts (part1, part2, loc));
++	  emit_move_insn (a0, gen_rtx_PLUS (Pmode, part1, part2));
+ 	}
+       else
+-	emit_insn (gen_tls_low (Pmode, a0, high, loc));
++	{
++	  /* Split tls symbol to high and low.  */
++	  rtx high = gen_rtx_HIGH (Pmode, copy_rtx (loc));
++
++	  high = loongarch_force_temporary (tmp, high);
++	  emit_insn (gen_tls_low (Pmode, a0, high, loc));
++	}
+     }
+   else
+     emit_insn (loongarch_load_tls (a0, loc, type));
+@@ -2868,22 +2870,28 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 	    {
+ 	      if (loongarch_explicit_relocs_p (SYMBOL_GOT_DISP))
+ 		{
+-		  rtx tmp1 = gen_reg_rtx (Pmode);
+-		  rtx high = gen_reg_rtx (Pmode);
++		  gcc_assert (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE);
+ 
+-		  loongarch_emit_move (high,
+-				       gen_rtx_HIGH (Pmode,
+-						     loongarch_tls_symbol));
+-		  loongarch_emit_move (tmp1,
+-				       gen_rtx_LO_SUM (Pmode,
+-						       gen_rtx_REG (Pmode, 0),
++		  rtx part1 = gen_reg_rtx (Pmode);
++		  rtx part2 = gen_reg_rtx (Pmode);
++
++		  emit_insn (gen_la_pcrel64_two_parts (part1, part2,
+ 						       loongarch_tls_symbol));
+-		  emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loongarch_tls_symbol));
+-		  emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loongarch_tls_symbol));
+-		  loongarch_emit_move (dest,
+-				       gen_rtx_MEM (Pmode,
+-						    gen_rtx_PLUS (Pmode,
+-								  high, tmp1)));
++		  loongarch_emit_move (
++		    dest,
++		    gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode,
++						      part1,
++						      part2)));
++
++		  /* Put an REG_EQUAL note here to allow CSE (storing
++		     part1 + part2, i.e. the address of tls_get_addr into
++		     a saved register and use it for multiple TLS
++		     accesses).  */
++		  rtx sum = gen_rtx_UNSPEC (
++		    Pmode, gen_rtvec (1, loongarch_tls_symbol),
++		    UNSPEC_ADDRESS_FIRST
++		    + loongarch_classify_symbol (loongarch_tls_symbol));
++		  set_unique_reg_note (get_last_insn (), REG_EQUAL, sum);
+ 		}
+ 	      else
+ 	       emit_insn (gen_movdi_symbolic_off64 (dest, loongarch_tls_symbol,
+@@ -2946,24 +2954,30 @@ loongarch_legitimize_tls_address (rtx loc)
+ 	  dest = gen_reg_rtx (Pmode);
+ 	  if (loongarch_explicit_relocs_p (SYMBOL_TLS_IE))
+ 	    {
+-	      tmp3 = gen_reg_rtx (Pmode);
+-	      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2));
+-	      high = loongarch_force_temporary (tmp3, high);
+-
+ 	      if (TARGET_CMODEL_EXTREME)
+ 		{
+-		  rtx tmp3 = gen_reg_rtx (Pmode);
+-		  emit_insn (gen_tls_low (Pmode, tmp3,
+-					  gen_rtx_REG (Pmode, 0), tmp2));
+-		  emit_insn (gen_lui_h_lo20 (tmp3, tmp3, tmp2));
+-		  emit_insn (gen_lui_h_hi12 (tmp3, tmp3, tmp2));
++		  gcc_assert (la_opt_explicit_relocs
++			      != EXPLICIT_RELOCS_NONE);
++
++		  rtx part1 = gen_reg_rtx (Pmode);
++		  rtx part2 = gen_reg_rtx (Pmode);
++
++		  emit_insn (gen_la_pcrel64_two_parts (part1, part2,
++						       tmp2));
+ 		  emit_move_insn (tmp1,
+ 				  gen_rtx_MEM (Pmode,
+ 					       gen_rtx_PLUS (Pmode,
+-							     high, tmp3)));
++							     part1,
++							     part2)));
+ 		}
+ 	      else
+-		emit_insn (gen_ld_from_got (Pmode, tmp1, high, tmp2));
++		{
++		  tmp3 = gen_reg_rtx (Pmode);
++		  rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2));
++
++		  high = loongarch_force_temporary (tmp3, high);
++		  emit_insn (gen_ld_from_got (Pmode, tmp1, high, tmp2));
++		}
+ 	    }
+ 	  else
+ 	    emit_insn (loongarch_load_tls (tmp1, tmp2, SYMBOL_TLS_IE));
+@@ -3142,24 +3156,23 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
+       || !loongarch_split_symbol_type (symbol_type))
+     return false;
+ 
+-  rtx high, temp1 = NULL;
++  rtx high;
+ 
+   if (temp == NULL)
+     temp = gen_reg_rtx (Pmode);
+ 
+-  /* Get the 12-31 bits of the address.  */
+-  high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
+-  high = loongarch_force_temporary (temp, high);
+-
+   if (loongarch_symbol_extreme_p (symbol_type) && can_create_pseudo_p ())
+     {
+       gcc_assert (la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE);
+ 
+-      temp1 = gen_reg_rtx (Pmode);
+-      emit_move_insn (temp1, gen_rtx_LO_SUM (Pmode, gen_rtx_REG (Pmode, 0),
+-					     addr));
+-      emit_insn (gen_lui_h_lo20 (temp1, temp1, addr));
+-      emit_insn (gen_lui_h_hi12 (temp1, temp1, addr));
++      high = gen_reg_rtx (Pmode);
++      emit_insn (gen_la_pcrel64_two_parts (high, temp, addr));
++    }
++  else
++    {
++      /* Get the 12-31 bits of the address.  */
++      high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
++      high = loongarch_force_temporary (temp, high);
+     }
+ 
+   if (low_out)
+@@ -3168,7 +3181,7 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
+       case SYMBOL_PCREL64:
+ 	if (can_create_pseudo_p ())
+ 	  {
+-	    *low_out = gen_rtx_PLUS (Pmode, high, temp1);
++	    *low_out = gen_rtx_PLUS (Pmode, high, temp);
+ 	    break;
+ 	  }
+ 	/* fall through */
+@@ -3180,7 +3193,8 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
+ 	/* SYMBOL_GOT_DISP symbols are loaded from the GOT.  */
+ 	{
+ 	  if (TARGET_CMODEL_EXTREME && can_create_pseudo_p ())
+-	    *low_out = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, high, temp1));
++	    *low_out = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, high,
++							 temp));
+ 	  else
+ 	    {
+ 	      rtx low = gen_rtx_LO_SUM (Pmode, high, addr);
+@@ -7493,21 +7507,24 @@ loongarch_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
+      allowed, otherwise load the address into a register first.  */
+   if (use_sibcall_p)
+     {
+-      if (TARGET_CMODEL_EXTREME)
+-	{
+-	  emit_insn (gen_movdi_symbolic_off64 (temp1, fnaddr, temp2));
+-	  insn = emit_call_insn (gen_sibcall_internal (temp1, const0_rtx));
+-	}
+-      else
+-	insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
++      /* If TARGET_CMODEL_EXTREME, we cannot do a direct jump at all
++	 and const_call_insn_operand should have returned false.  */
++      gcc_assert (!TARGET_CMODEL_EXTREME);
++
++      insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
+       SIBLING_CALL_P (insn) = 1;
+     }
+   else
+     {
+-      if (TARGET_CMODEL_EXTREME)
++      if (!TARGET_CMODEL_EXTREME)
++	loongarch_emit_move (temp1, fnaddr);
++      else if (la_opt_explicit_relocs == EXPLICIT_RELOCS_NONE)
+ 	emit_insn (gen_movdi_symbolic_off64 (temp1, fnaddr, temp2));
+       else
+-	loongarch_emit_move (temp1, fnaddr);
++	{
++	  emit_insn (gen_la_pcrel64_two_parts (temp1, temp2, fnaddr));
++	  emit_move_insn (temp1, gen_rtx_PLUS (Pmode, temp1, temp2));
++	}
+ 
+       emit_jump_insn (gen_indirect_jump (temp1));
+     }
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index add55e0af..9356194fe 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -84,6 +84,8 @@
+   UNSPEC_CALL_VALUE_MULTIPLE_INTERNAL_1
+ 
+   UNSPEC_LOAD_SYMBOL_OFFSET64
++  UNSPEC_LA_PCREL_64_PART1
++  UNSPEC_LA_PCREL_64_PART2
+ ])
+ 
+ (define_c_enum "unspecv" [
+@@ -2224,6 +2226,24 @@
+  [(set_attr "mode" "DI")
+   (set_attr "insn_count" "5")])
+ 
++;; The 64-bit PC-relative part of address loading.
++;; Note that the psABI does not allow splitting it.
++(define_insn "la_pcrel64_two_parts"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(unspec:DI [(match_operand:DI 2 "") (pc)] UNSPEC_LA_PCREL_64_PART1))
++   (set (match_operand:DI 1 "register_operand" "=r")
++	(unspec:DI [(match_dup 2) (pc)] UNSPEC_LA_PCREL_64_PART2))]
++  "TARGET_ABI_LP64 && la_opt_explicit_relocs != EXPLICIT_RELOCS_NONE"
++  {
++    return "pcalau12i\t%0,%r2\n\t"
++	   "addi.d\t%1,$r0,%L2\n\t"
++	   "lu32i.d\t%1,%R2\n\t"
++	   "lu52i.d\t%1,%1,%H2";
++  }
++  [(set_attr "move_type" "move")
++   (set_attr "mode" "DI")
++   (set_attr "length" "16")])
++
+ ;; 32-bit Integer moves
+ 
+ (define_expand "movsi"
+diff --git a/gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-1.C b/gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-1.C
+new file mode 100644
+index 000000000..ff1f7c165
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-1.C
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fno-inline -march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -fno-plt -mexplicit-relocs=always -mdirect-extern-access" } */
++
++struct A {
++  virtual ~A();
++};
++
++struct B : virtual A {};
++void var() { B(); }
++
++/* { dg-final { scan-assembler "pcalau12i\t\[^\n\]*%pc_hi20\\(\\.LTHUNK0\\)\n\taddi\\.d\t\[^\n\]*%pc_lo12\\(\\\.LTHUNK0\\)\n\tlu32i\\.d\t\[^\n\]*%pc64_lo20\\(\\.LTHUNK0\\)\n\tlu52i\\.d\t\[^\n\]*%pc64_hi12\\(\\.LTHUNK0\\)" } } */
+diff --git a/gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-2.C b/gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-2.C
+new file mode 100644
+index 000000000..c9aa16b41
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-2.C
+@@ -0,0 +1,6 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fno-inline -march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -fno-plt -mexplicit-relocs=auto -mdirect-extern-access" } */
++
++#include "cmodel-extreme-mi-thunk-1.C"
++
++/* { dg-final { scan-assembler "pcalau12i\t\[^\n\]*%pc_hi20\\(\\.LTHUNK0\\)\n\taddi\\.d\t\[^\n\]*%pc_lo12\\(\\\.LTHUNK0\\)\n\tlu32i\\.d\t\[^\n\]*%pc64_lo20\\(\\.LTHUNK0\\)\n\tlu52i\\.d\t\[^\n\]*%pc64_hi12\\(\\.LTHUNK0\\)" } } */
+diff --git a/gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-3.C b/gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-3.C
+new file mode 100644
+index 000000000..afb86c8bd
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/cmodel-extreme-mi-thunk-3.C
+@@ -0,0 +1,6 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fno-inline -march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -fno-plt -mexplicit-relocs=none -mdirect-extern-access" } */
++
++#include "cmodel-extreme-mi-thunk-1.C"
++
++/* { dg-final { scan-assembler "la.local\t\[^\n\]*\\.LTHUNK0" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-1.c b/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-1.c
+new file mode 100644
+index 000000000..564ee4017
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-1.c
+@@ -0,0 +1,18 @@
++/* { dg-do compile } */
++/* { dg-options "-march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -fno-plt -mexplicit-relocs=always -fdump-rtl-final" } */
++
++int a;
++extern int b;
++__thread int c __attribute__ ((tls_model ("local-exec")));
++__thread int d __attribute__ ((tls_model ("initial-exec")));
++__thread int e __attribute__ ((tls_model ("local-dynamic")));
++__thread int f __attribute__ ((tls_model ("global-dynamic")));
++
++void
++test (void)
++{
++  a = b + c + d + e + f;
++}
++
++/* a, b, d, e, f, and __tls_get_addr.  */
++/* { dg-final { scan-rtl-dump-times "la_pcrel64_two_parts" 6 "final" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-2.c b/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-2.c
+new file mode 100644
+index 000000000..ce834805f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-2.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -fno-plt -mexplicit-relocs=auto -fdump-rtl-final" } */
++
++#include "cmodel-extreme-1.c"
++
++/* a, b, d, e, f, and __tls_get_addr.  */
++/* { dg-final { scan-rtl-dump-times "la_pcrel64_two_parts" 6 "final" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c
+index db1e0f853..fdb4cf1ff 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c
+@@ -1,31 +1,33 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs -mcmodel=extreme" } */
++/* { dg-options "-mabi=lp64d -O2 -fno-pic -fno-plt -mexplicit-relocs -mcmodel=extreme" } */
+ /* { dg-final { scan-assembler "test:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
+ /* { dg-final { scan-assembler "test1:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
+ /* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
+ 
++#define NOIPA __attribute__ ((noipa))
++
+ extern void g (void);
+-void
++NOIPA void
+ f (void)
+ {}
+ 
+-static void
++NOIPA static void
+ l (void)
+ {}
+ 
+-void
++NOIPA void
+ test (void)
+ {
+   g ();
+ }
+ 
+-void
++NOIPA void
+ test1 (void)
+ {
+   f ();
+ }
+ 
+-void
++NOIPA void
+ test2 (void)
+ {
+   l ();
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c
+index 21bf81ae8..dfba3882b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c
+@@ -1,32 +1,7 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs -mcmodel=extreme" } */
++/* { dg-options "-mabi=lp64d -O2 -fpic -fno-plt -mexplicit-relocs -mcmodel=extreme" } */
+ /* { dg-final { scan-assembler "test:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
+ /* { dg-final { scan-assembler "test1:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
+ /* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
+ 
+-extern void g (void);
+-void
+-f (void)
+-{}
+-
+-static void
+-l (void)
+-{}
+-
+-void
+-test (void)
+-{
+-  g ();
+-}
+-
+-void
+-test1 (void)
+-{
+-  f ();
+-}
+-
+-void
+-test2 (void)
+-{
+-  l ();
+-}
++#include "func-call-extreme-1.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-3.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-3.c
+index a4da44b4a..1f5234f83 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-3.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs=auto -mcmodel=extreme" } */
++/* { dg-options "-mabi=lp64d -O2 -fno-pic -fno-plt -mexplicit-relocs=auto -mcmodel=extreme" } */
+ /* { dg-final { scan-assembler "test:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
+ /* { dg-final { scan-assembler "test1:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
+ /* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-4.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-4.c
+index 16b00f4c5..c42285006 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-4.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-4.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs=auto -mcmodel=extreme" } */
++/* { dg-options "-mabi=lp64d -O2 -fpic -fno-plt -mexplicit-relocs=auto -mcmodel=extreme" } */
+ /* { dg-final { scan-assembler "test:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
+ /* { dg-final { scan-assembler "test1:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
+ /* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
+-- 
+2.43.0
+
diff --git a/SME-0026-aarch64-Commonise-some-folding-code.patch b/0129-Backport-SME-aarch64-Commonise-some-folding-code.patch
similarity index 95%
rename from SME-0026-aarch64-Commonise-some-folding-code.patch
rename to 0129-Backport-SME-aarch64-Commonise-some-folding-code.patch
index f07a74d089be7f2dc29c00babf2d949d47cee381..01fb18da1632c3410f029e46b0889731b05859a2 100644
--- a/SME-0026-aarch64-Commonise-some-folding-code.patch
+++ b/0129-Backport-SME-aarch64-Commonise-some-folding-code.patch
@@ -1,7 +1,7 @@
-From 6d5aeb2fc98941930e373bf51a46a43a85ddddac Mon Sep 17 00:00:00 2001
+From 805a7aec3ddab49b92bf2d5c1a3e288860cc14bf Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 20 Oct 2022 10:37:35 +0100
-Subject: [PATCH 026/144] aarch64: Commonise some folding code
+Subject: [PATCH 030/157] [Backport][SME] aarch64: Commonise some folding code
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=df99e9e42094dee0833ac38f53e7fae09b4d133c
 
@@ -79,5 +79,5 @@ index 63d1db776..0d130b871 100644
    gimple *fold_to_ptrue ();
    gimple *fold_to_vl_pred (unsigned int);
 -- 
-2.19.1
+2.33.0
 
diff --git a/0129-LoongArch-Adjust-cost-of-vector_stmt-that-match-mult.patch b/0129-LoongArch-Adjust-cost-of-vector_stmt-that-match-mult.patch
new file mode 100644
index 0000000000000000000000000000000000000000..11c3c469916c25d5c4e6eb801dcff2e5d73c8aa4
--- /dev/null
+++ b/0129-LoongArch-Adjust-cost-of-vector_stmt-that-match-mult.patch
@@ -0,0 +1,173 @@
+From 825847768a29ec9d50e01015167002998150cb27 Mon Sep 17 00:00:00 2001
+From: Li Wei 
+Date: Fri, 26 Jan 2024 16:41:11 +0800
+Subject: [PATCH 129/188] LoongArch: Adjust cost of vector_stmt that match
+ multiply-add pattern.
+
+We found that when only 128-bit vectorization was enabled, 549.fotonik3d_r
+failed to vectorize effectively. For this reason, we adjust the cost of
+128-bit vector_stmt that match the multiply-add pattern to facilitate 128-bit
+vectorization.
+The experimental results show that after the modification, 549.fotonik3d_r
+performance can be improved by 9.77% under the 128-bit vectorization option.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_multiply_add_p): New.
+	(loongarch_vector_costs::add_stmt_cost): Adjust.
+
+gcc/testsuite/ChangeLog:
+
+	* gfortran.dg/vect/vect-10.f90: New test.
+---
+ gcc/config/loongarch/loongarch.cc          | 48 +++++++++++++++
+ gcc/testsuite/gfortran.dg/vect/vect-10.f90 | 71 ++++++++++++++++++++++
+ 2 files changed, 119 insertions(+)
+ create mode 100644 gcc/testsuite/gfortran.dg/vect/vect-10.f90
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index b8f0291ab..526ea0bcb 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4153,6 +4153,37 @@ loongarch_vector_costs::determine_suggested_unroll_factor (loop_vec_info loop_vi
+   return 1 << ceil_log2 (uf);
+ }
+ 
++/* Check if assign stmt rhs op comes from a multiply-add operation.  */
++static bool
++loongarch_multiply_add_p (vec_info *vinfo, stmt_vec_info stmt_info)
++{
++  gassign *assign = dyn_cast (stmt_info->stmt);
++  if (!assign)
++    return false;
++  tree_code code = gimple_assign_rhs_code (assign);
++  if (code != PLUS_EXPR && code != MINUS_EXPR)
++    return false;
++
++  auto is_mul_result = [&](int i)
++    {
++      tree rhs = gimple_op (assign, i);
++      if (TREE_CODE (rhs) != SSA_NAME)
++	return false;
++
++      stmt_vec_info def_stmt_info = vinfo->lookup_def (rhs);
++      if (!def_stmt_info
++	  || STMT_VINFO_DEF_TYPE (def_stmt_info) != vect_internal_def)
++	return false;
++      gassign *rhs_assign = dyn_cast (def_stmt_info->stmt);
++      if (!rhs_assign || gimple_assign_rhs_code (rhs_assign) != MULT_EXPR)
++	return false;
++
++      return true;
++    };
++
++  return is_mul_result (1) || is_mul_result (2);
++}
++
+ unsigned
+ loongarch_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
+ 				       stmt_vec_info stmt_info, slp_tree,
+@@ -4165,6 +4196,23 @@ loongarch_vector_costs::add_stmt_cost (int count, vect_cost_for_stmt kind,
+     {
+       int stmt_cost = loongarch_builtin_vectorization_cost (kind, vectype,
+ 							    misalign);
++      if (vectype && stmt_info)
++	{
++	  gassign *assign = dyn_cast (STMT_VINFO_STMT (stmt_info));
++	  machine_mode mode = TYPE_MODE (vectype);
++
++	  /* We found through testing that this strategy (the stmt that
++	     matches the multiply-add pattern) has positive returns only
++	     when applied to the 128-bit vector stmt, so this restriction
++	     is currently made.  */
++	  if (kind == vector_stmt && GET_MODE_SIZE (mode) == 16 && assign)
++	    {
++	      if (!vect_is_reduction (stmt_info)
++		  && loongarch_multiply_add_p (m_vinfo, stmt_info))
++		stmt_cost = 0;
++	    }
++	}
++
+       retval = adjust_cost_for_freq (stmt_info, where, count * stmt_cost);
+       m_costs[where] += retval;
+ 
+diff --git a/gcc/testsuite/gfortran.dg/vect/vect-10.f90 b/gcc/testsuite/gfortran.dg/vect/vect-10.f90
+new file mode 100644
+index 000000000..b85bc2702
+--- /dev/null
++++ b/gcc/testsuite/gfortran.dg/vect/vect-10.f90
+@@ -0,0 +1,71 @@
++! { dg-do compile }
++! { dg-additional-options "-Ofast -mlsx -fvect-cost-model=dynamic" { target loongarch64*-*-* } }
++
++MODULE material_mod
++
++IMPLICIT NONE
++
++integer, parameter :: dfp = selected_real_kind (13, 99)
++integer, parameter :: rfp = dfp
++
++PUBLIC Mat_updateE, iepx, iepy, iepz
++
++PRIVATE
++
++integer, dimension (:, :, :), allocatable :: iepx, iepy, iepz
++real (kind = rfp), dimension (:), allocatable :: Dbdx, Dbdy, Dbdz
++integer :: imin, jmin, kmin
++integer, dimension (6) :: Exsize
++integer, dimension (6) :: Eysize
++integer, dimension (6) :: Ezsize
++integer, dimension (6) :: Hxsize
++integer, dimension (6) :: Hysize
++integer, dimension (6) :: Hzsize
++
++CONTAINS
++
++SUBROUTINE mat_updateE (nx, ny, nz, Hx, Hy, Hz, Ex, Ey, Ez)
++
++integer, intent (in) :: nx, ny, nz
++
++real (kind = rfp), intent (inout),                                         &
++  dimension (Exsize (1) : Exsize (2), Exsize (3) : Exsize (4), Exsize (5) : Exsize (6)) :: Ex
++real (kind = rfp), intent (inout),                                         &
++  dimension (Eysize (1) : Eysize (2), Eysize (3) : Eysize (4), Eysize (5) : Eysize (6)) :: Ey
++real (kind = rfp), intent (inout),                                         &
++  dimension (Ezsize (1) : Ezsize (2), Ezsize (3) : Ezsize (4), Ezsize (5) : Ezsize (6)) :: Ez
++real (kind = rfp), intent (in),                                            &
++  dimension (Hxsize (1) : Hxsize (2), Hxsize (3) : Hxsize (4), Hxsize (5) : Hxsize (6)) :: Hx
++real (kind = rfp), intent (in),                                            &
++  dimension (Hysize (1) : Hysize (2), Hysize (3) : Hysize (4), Hysize (5) : Hysize (6)) :: Hy
++real (kind = rfp), intent (in),                                            &
++  dimension (Hzsize (1) : Hzsize (2), Hzsize (3) : Hzsize (4), Hzsize (5) : Hzsize (6)) :: Hz
++
++integer :: i, j, k, mp
++
++do k = kmin, nz
++  do j = jmin, ny
++    do i = imin, nx
++      mp = iepx (i, j, k)
++      Ex (i, j, k) = Ex (i, j, k) +                                        &
++                  Dbdy (mp) * (Hz (i, j, k  ) - Hz (i, j-1, k)) +          &
++                  Dbdz (mp) * (Hy (i, j, k-1) - Hy (i, j  , k))
++
++      mp = iepy (i, j, k)
++      Ey (i, j, k) = Ey (i, j, k) +                                        &
++                  Dbdz (mp) * (Hx (i  , j, k) - Hx (i, j, k-1)) +          &
++                  Dbdx (mp) * (Hz (i-1, j, k) - Hz (i, j, k  ))
++
++      mp = iepz (i, j, k)
++      Ez (i, j, k) = Ez (i, j, k) +                                        &
++                  Dbdx (mp) * (Hy (i, j  , k) - Hy (i-1, j, k)) +          &
++                  Dbdy (mp) * (Hx (i, j-1, k) - Hx (i  , j, k))
++    end do
++  end do
++end do
++
++END SUBROUTINE mat_updateE
++
++END MODULE material_mod
++
++! { dg-final { scan-tree-dump-times "vectorized 1 loops" 1 "vect" { target loongarch64*-*-* } } }
+-- 
+2.43.0
+
diff --git a/SME-0027-aarch64-Add-a-Z-operand-modifier-for-SVE-registers.patch b/0130-Backport-SME-aarch64-Add-a-Z-operand-modifier-for-SV.patch
similarity index 83%
rename from SME-0027-aarch64-Add-a-Z-operand-modifier-for-SVE-registers.patch
rename to 0130-Backport-SME-aarch64-Add-a-Z-operand-modifier-for-SV.patch
index 18ba914a98af528efeeb063e17ca6f9212301fdf..bea41d99e9c149e114a0d6f9bc4acaa44f42c659 100644
--- a/SME-0027-aarch64-Add-a-Z-operand-modifier-for-SVE-registers.patch
+++ b/0130-Backport-SME-aarch64-Add-a-Z-operand-modifier-for-SV.patch
@@ -1,7 +1,8 @@
-From bf20604a5581444b0ed196c54f990c08f5de38a0 Mon Sep 17 00:00:00 2001
+From 8dc1eee26c61bea8aab62080bd961825142685f9 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 7 Nov 2023 15:22:57 +0000
-Subject: [PATCH 027/144] aarch64: Add a %Z operand modifier for SVE registers
+Subject: [PATCH 031/157] [Backport][SME] aarch64: Add a %Z operand modifier
+ for SVE registers
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=f40eac535bd55192cf93daca16235efbcd91157a
 
@@ -20,10 +21,10 @@ gcc/
  1 file changed, 6 insertions(+)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 6b76ef7db..fc7540023 100644
+index 3e83e48ec..fd1114b52 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -11684,6 +11684,10 @@ sizetochar (int size)
+@@ -11901,6 +11901,10 @@ sizetochar (int size)
       'N':		Take the duplicated element in a vector constant
  			and print the negative of it in decimal.
       'b/h/s/d/q':	Print a scalar FP/SIMD register name.
@@ -34,7 +35,7 @@ index 6b76ef7db..fc7540023 100644
       'S/T/U/V':		Print a FP/SIMD register name for a register list.
  			The register printed is the FP/SIMD register name
  			of X + 0/1/2/3 for S/T/U/V.
-@@ -11856,6 +11860,8 @@ aarch64_print_operand (FILE *f, rtx x, int code)
+@@ -12073,6 +12077,8 @@ aarch64_print_operand (FILE *f, rtx x, int code)
      case 's':
      case 'd':
      case 'q':
@@ -44,5 +45,5 @@ index 6b76ef7db..fc7540023 100644
  	{
  	  output_operand_lossage ("incompatible floating point / vector register operand for '%%%c'", code);
 -- 
-2.19.1
+2.33.0
 
diff --git a/0130-LoongArch-Fix-incorrect-return-type-for-frecipe-frsq.patch b/0130-LoongArch-Fix-incorrect-return-type-for-frecipe-frsq.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8777e833526392c0f1c33841b65b46176fa34fe7
--- /dev/null
+++ b/0130-LoongArch-Fix-incorrect-return-type-for-frecipe-frsq.patch
@@ -0,0 +1,113 @@
+From 99a48268961f05e87f4f9d6f3f22903869f50af7 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Wed, 24 Jan 2024 17:19:32 +0800
+Subject: [PATCH 130/188] LoongArch: Fix incorrect return type for
+ frecipe/frsqrte intrinsic functions
+
+gcc/ChangeLog:
+
+	* config/loongarch/larchintrin.h
+	(__frecipe_s): Update function return type.
+	(__frecipe_d): Ditto.
+	(__frsqrte_s): Ditto.
+	(__frsqrte_d): Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/larch-frecipe-intrinsic.c: New test.
+---
+ gcc/config/loongarch/larchintrin.h            | 16 +++++-----
+ .../loongarch/larch-frecipe-intrinsic.c       | 30 +++++++++++++++++++
+ 2 files changed, 38 insertions(+), 8 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/larch-frecipe-intrinsic.c
+
+diff --git a/gcc/config/loongarch/larchintrin.h b/gcc/config/loongarch/larchintrin.h
+index 22035e767..6582dfe49 100644
+--- a/gcc/config/loongarch/larchintrin.h
++++ b/gcc/config/loongarch/larchintrin.h
+@@ -336,38 +336,38 @@ __iocsrwr_d (unsigned long int _1, unsigned int _2)
+ #ifdef __loongarch_frecipe
+ /* Assembly instruction format: fd, fj.  */
+ /* Data types in instruction templates:  SF, SF.  */
+-extern __inline void
++extern __inline float
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __frecipe_s (float _1)
+ {
+-  __builtin_loongarch_frecipe_s ((float) _1);
++  return (float) __builtin_loongarch_frecipe_s ((float) _1);
+ }
+ 
+ /* Assembly instruction format: fd, fj.  */
+ /* Data types in instruction templates:  DF, DF.  */
+-extern __inline void
++extern __inline double
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __frecipe_d (double _1)
+ {
+-  __builtin_loongarch_frecipe_d ((double) _1);
++  return (double) __builtin_loongarch_frecipe_d ((double) _1);
+ }
+ 
+ /* Assembly instruction format: fd, fj.  */
+ /* Data types in instruction templates:  SF, SF.  */
+-extern __inline void
++extern __inline float
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __frsqrte_s (float _1)
+ {
+-  __builtin_loongarch_frsqrte_s ((float) _1);
++  return (float) __builtin_loongarch_frsqrte_s ((float) _1);
+ }
+ 
+ /* Assembly instruction format: fd, fj.  */
+ /* Data types in instruction templates:  DF, DF.  */
+-extern __inline void
++extern __inline double
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __frsqrte_d (double _1)
+ {
+-  __builtin_loongarch_frsqrte_d ((double) _1);
++  return (double) __builtin_loongarch_frsqrte_d ((double) _1);
+ }
+ #endif
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/larch-frecipe-intrinsic.c b/gcc/testsuite/gcc.target/loongarch/larch-frecipe-intrinsic.c
+new file mode 100644
+index 000000000..6ce2bde0a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/larch-frecipe-intrinsic.c
+@@ -0,0 +1,30 @@
++/* Test intrinsics for frecipe.{s/d} and frsqrte.{s/d} instructions */
++/* { dg-do compile } */
++/* { dg-options "-mfrecipe -O2" } */
++/* { dg-final { scan-assembler-times "test_frecipe_s:.*frecipe\\.s.*test_frecipe_s" 1 } } */
++/* { dg-final { scan-assembler-times "test_frecipe_d:.*frecipe\\.d.*test_frecipe_d" 1 } } */
++/* { dg-final { scan-assembler-times "test_frsqrte_s:.*frsqrte\\.s.*test_frsqrte_s" 1 } } */
++/* { dg-final { scan-assembler-times "test_frsqrte_d:.*frsqrte\\.d.*test_frsqrte_d" 1 } } */
++
++#include 
++
++float
++test_frecipe_s (float _1)
++{
++  return __frecipe_s (_1);
++}
++double
++test_frecipe_d (double _1)
++{
++  return __frecipe_d (_1);
++}
++float
++test_frsqrte_s (float _1)
++{
++  return __frsqrte_s (_1);
++}
++double
++test_frsqrte_d (double _1)
++{
++  return __frsqrte_d (_1);
++}
+-- 
+2.43.0
+
diff --git a/SME-0028-mode-switching-Remove-unused-bbnum-field.patch b/0131-Backport-SME-mode-switching-Remove-unused-bbnum-fiel.patch
similarity index 95%
rename from SME-0028-mode-switching-Remove-unused-bbnum-field.patch
rename to 0131-Backport-SME-mode-switching-Remove-unused-bbnum-fiel.patch
index b2410932b39e3ecf6c93c8cf3a9ea95a51ba685a..15c8418e09bd7e042ed41f16e756b0339ea9cc5d 100644
--- a/SME-0028-mode-switching-Remove-unused-bbnum-field.patch
+++ b/0131-Backport-SME-mode-switching-Remove-unused-bbnum-fiel.patch
@@ -1,7 +1,8 @@
-From 252608ff3afef653a70dce52d3ce69f8520a25c5 Mon Sep 17 00:00:00 2001
+From 8a43bd7885ce479cadb0643fbb0fc22d2b0ffced Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sun, 5 Nov 2023 18:28:46 +0000
-Subject: [PATCH 028/144] mode-switching: Remove unused bbnum field
+Subject: [PATCH 032/157] [Backport][SME] mode-switching: Remove unused bbnum
+ field
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=2d55ed2b8a754d7279cd002941f7cb481f0fd133
 
@@ -99,5 +100,5 @@ index 6e3f1dc65..4cf8f03a0 100644
  	      if (last_mode != no_mode)
  		for (i = 0; i < no_mode; i++)
 -- 
-2.19.1
+2.33.0
 
diff --git a/0131-LoongArch-Fix-an-ODR-violation.patch b/0131-LoongArch-Fix-an-ODR-violation.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9961ab4dfcc0118a48a0d05c2088364d95d92dc8
--- /dev/null
+++ b/0131-LoongArch-Fix-an-ODR-violation.patch
@@ -0,0 +1,60 @@
+From 89ebd7012ecf49c60bad8dd018e0aa573b58844b Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 2 Feb 2024 05:37:38 +0800
+Subject: [PATCH 131/188] LoongArch: Fix an ODR violation
+
+When bootstrapping GCC 14 with --with-build-config=bootstrap-lto, an ODR
+violation is detected:
+
+    ../../gcc/config/loongarch/loongarch-opts.cc:57: warning:
+    'abi_minimal_isa' violates the C++ One Definition Rule [-Wodr]
+    57 | abi_minimal_isa[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES];
+    ../../gcc/config/loongarch/loongarch-def.cc:186: note:
+    'abi_minimal_isa' was previously declared here
+    186 |   abi_minimal_isa = array,
+    ../../gcc/config/loongarch/loongarch-def.cc:186: note:
+    code may be misoptimized unless '-fno-strict-aliasing' is used
+
+Fix it by adding a proper declaration of abi_minimal_isa into
+loongarch-def.h and remove the ODR-violating local declaration in
+loongarch-opts.cc.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-def.h (abi_minimal_isa): Declare.
+	* config/loongarch/loongarch-opts.cc (abi_minimal_isa): Remove
+	the ODR-violating locale declaration.
+---
+ gcc/config/loongarch/loongarch-def.h   | 3 +++
+ gcc/config/loongarch/loongarch-opts.cc | 2 --
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index 28da3ae5f..fdcf43fc7 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -203,5 +203,8 @@ extern loongarch_def_array
+   loongarch_cpu_align;
+ extern loongarch_def_array
+   loongarch_cpu_rtx_cost_data;
++extern loongarch_def_array<
++  loongarch_def_array,
++  N_ABI_BASE_TYPES> abi_minimal_isa;
+ 
+ #endif /* LOONGARCH_DEF_H */
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index a2b069d83..2ea3972d1 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -53,8 +53,6 @@ static const int tm_multilib_list[] = { TM_MULTILIB_LIST };
+ static int enabled_abi_types[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES] = { 0 };
+ 
+ #define isa_required(ABI) (abi_minimal_isa[(ABI).base][(ABI).ext])
+-extern "C" const struct loongarch_isa
+-abi_minimal_isa[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES];
+ 
+ static inline int
+ is_multilib_enabled (struct loongarch_abi abi)
+-- 
+2.43.0
+
diff --git a/SME-0029-mode-switching-Tweak-the-macro-hook-documentation.patch b/0132-Backport-SME-mode-switching-Tweak-the-macro-hook-doc.patch
similarity index 96%
rename from SME-0029-mode-switching-Tweak-the-macro-hook-documentation.patch
rename to 0132-Backport-SME-mode-switching-Tweak-the-macro-hook-doc.patch
index 608d4554782cca232e6c6c475917ab87302481ce..e3c0ff77d5ad8bb78e8aeb624b8de776aa531bc0 100644
--- a/SME-0029-mode-switching-Tweak-the-macro-hook-documentation.patch
+++ b/0132-Backport-SME-mode-switching-Tweak-the-macro-hook-doc.patch
@@ -1,7 +1,8 @@
-From bc9d4a3d4cd7c608854be398cf1365badde704da Mon Sep 17 00:00:00 2001
+From c980e40d2c27ac3ee33c9b6aea6d2b0d4080852e Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:28:54 +0000
-Subject: [PATCH 029/144] mode-switching: Tweak the macro/hook documentation
+Subject: [PATCH 033/157] [Backport][SME] mode-switching: Tweak the macro/hook
+ documentation
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=8479a3759025961f80cf0cd6bb3f127e09d0510d
 
@@ -40,10 +41,10 @@ gcc/
  3 files changed, 84 insertions(+), 54 deletions(-)
 
 diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
-index c5006afc0..53a40e467 100644
+index 851d31c18..553aa4cf2 100644
 --- a/gcc/doc/tm.texi
 +++ b/gcc/doc/tm.texi
-@@ -10444,7 +10444,7 @@ The following macros control mode switching optimizations:
+@@ -10234,7 +10234,7 @@ The following macros control mode switching optimizations:
  
  @defmac OPTIMIZE_MODE_SWITCHING (@var{entity})
  Define this macro if the port needs extra instructions inserted for mode
@@ -52,7 +53,7 @@ index c5006afc0..53a40e467 100644
  
  For an example, the SH4 can perform both single and double precision
  floating point operations, but to perform a single precision operation,
-@@ -10454,73 +10454,88 @@ purpose register as a scratch register, hence these FPSCR sets have to
+@@ -10244,73 +10244,88 @@ purpose register as a scratch register, hence these FPSCR sets have to
  be inserted before reload, i.e.@: you cannot put this into instruction emitting
  or @code{TARGET_MACHINE_DEPENDENT_REORG}.
  
@@ -168,10 +169,10 @@ index c5006afc0..53a40e467 100644
  (@var{entity}, @var{n}) shall be a bijection in 0 @dots{}
  @code{num_modes_for_mode_switching[@var{entity}] - 1}.
 diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
-index f869ddd5e..cdfa36564 100644
+index ac95cdf7a..9ec11b15c 100644
 --- a/gcc/doc/tm.texi.in
 +++ b/gcc/doc/tm.texi.in
-@@ -7089,7 +7089,7 @@ The following macros control mode switching optimizations:
+@@ -6879,7 +6879,7 @@ The following macros control mode switching optimizations:
  
  @defmac OPTIMIZE_MODE_SWITCHING (@var{entity})
  Define this macro if the port needs extra instructions inserted for mode
@@ -180,7 +181,7 @@ index f869ddd5e..cdfa36564 100644
  
  For an example, the SH4 can perform both single and double precision
  floating point operations, but to perform a single precision operation,
-@@ -7099,27 +7099,31 @@ purpose register as a scratch register, hence these FPSCR sets have to
+@@ -6889,27 +6889,31 @@ purpose register as a scratch register, hence these FPSCR sets have to
  be inserted before reload, i.e.@: you cannot put this into instruction emitting
  or @code{TARGET_MACHINE_DEPENDENT_REORG}.
  
@@ -223,10 +224,10 @@ index f869ddd5e..cdfa36564 100644
  
  @hook TARGET_MODE_EMIT
 diff --git a/gcc/target.def b/gcc/target.def
-index d85adf36a..be8a99172 100644
+index c9bb2b4c2..b87b0f927 100644
 --- a/gcc/target.def
 +++ b/gcc/target.def
-@@ -6935,51 +6935,62 @@ DEFHOOK
+@@ -6992,51 +6992,62 @@ DEFHOOK
   "Generate one or more insns to set @var{entity} to @var{mode}.\n\
  @var{hard_reg_live} is the set of hard registers live at the point where\n\
  the insn(s) are to be inserted. @var{prev_moxde} indicates the mode\n\
@@ -306,5 +307,5 @@ index d85adf36a..be8a99172 100644
  (@var{entity}, @var{n}) shall be a bijection in 0 @dots{}\n\
  @code{num_modes_for_mode_switching[@var{entity}] - 1}.",
 -- 
-2.19.1
+2.33.0
 
diff --git a/0132-LoongArch-testsuite-Fix-gcc.dg-vect-vect-reduc-mul_-.patch b/0132-LoongArch-testsuite-Fix-gcc.dg-vect-vect-reduc-mul_-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7a85dfc104c8f6e7bac5a72da9d5993389cacfe6
--- /dev/null
+++ b/0132-LoongArch-testsuite-Fix-gcc.dg-vect-vect-reduc-mul_-.patch
@@ -0,0 +1,359 @@
+From f4a447bff86c7f5598a7461e353a3c6f4a101ed4 Mon Sep 17 00:00:00 2001
+From: Li Wei 
+Date: Fri, 2 Feb 2024 09:42:28 +0800
+Subject: [PATCH 132/188] LoongArch: testsuite: Fix
+ gcc.dg/vect/vect-reduc-mul_{1, 2}.c FAIL.
+
+This FAIL was introduced from r14-6908. The reason is that when merging
+constant vector permutation implementations, the 128-bit matching situation
+was not fully considered. In fact, the expansion of 128-bit vectors after
+merging only supports value-based 4 elements set shuffle, so this time is a
+complete implementation of the entire 128-bit vector constant permutation,
+and some structural adjustments have also been made to the code.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_expand_vselect): Adjust.
+	(loongarch_expand_vselect_vconcat): Ditto.
+	(loongarch_try_expand_lsx_vshuf_const): New, use vshuf to implement
+	all 128-bit constant permutation situations.
+	(loongarch_expand_lsx_shuffle): Adjust and rename function name.
+	(loongarch_is_imm_set_shuffle): Renamed function name.
+	(loongarch_expand_vec_perm_even_odd): Function forward declaration.
+	(loongarch_expand_vec_perm_even_odd_1): Add implement for 128-bit
+	extract-even and extract-odd permutations.
+	(loongarch_is_odd_extraction): Delete.
+	(loongarch_is_even_extraction): Ditto.
+	(loongarch_expand_vec_perm_const): Adjust.
+---
+ gcc/config/loongarch/loongarch.cc | 218 ++++++++++++++++++++++--------
+ 1 file changed, 163 insertions(+), 55 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 526ea0bcb..a0e0906af 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -8025,7 +8025,8 @@ struct expand_vec_perm_d
+ 
+ static bool
+ loongarch_expand_vselect (rtx target, rtx op0,
+-			  const unsigned char *perm, unsigned nelt)
++			  const unsigned char *perm, unsigned nelt,
++			  bool testing_p)
+ {
+   rtx rperm[MAX_VECT_LEN], x;
+   rtx_insn *insn;
+@@ -8044,6 +8045,9 @@ loongarch_expand_vselect (rtx target, rtx op0,
+       remove_insn (insn);
+       return false;
+     }
++
++  if (testing_p)
++      remove_insn (insn);
+   return true;
+ }
+ 
+@@ -8051,7 +8055,8 @@ loongarch_expand_vselect (rtx target, rtx op0,
+ 
+ static bool
+ loongarch_expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
+-				  const unsigned char *perm, unsigned nelt)
++				  const unsigned char *perm, unsigned nelt,
++				  bool testing_p)
+ {
+   machine_mode v2mode;
+   rtx x;
+@@ -8059,7 +8064,7 @@ loongarch_expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
+   if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0)).exists (&v2mode))
+     return false;
+   x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
+-  return loongarch_expand_vselect (target, x, perm, nelt);
++  return loongarch_expand_vselect (target, x, perm, nelt, testing_p);
+ }
+ 
+ static tree
+@@ -8315,11 +8320,87 @@ loongarch_set_handled_components (sbitmap components)
+ #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+ #undef TARGET_ASM_ALIGNED_DI_OP
+ #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
++
++/* Use the vshuf instruction to implement all 128-bit constant vector
++   permuatation.  */
++
++static bool
++loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
++{
++  int i;
++  rtx target, op0, op1, sel, tmp;
++  rtx rperm[MAX_VECT_LEN];
++
++  if (GET_MODE_SIZE (d->vmode) == 16)
++    {
++      target = d->target;
++      op0 = d->op0;
++      op1 = d->one_vector_p ? d->op0 : d->op1;
++
++      if (GET_MODE (op0) != GET_MODE (op1)
++	  || GET_MODE (op0) != GET_MODE (target))
++	return false;
++
++      if (d->testing_p)
++	return true;
++
++      for (i = 0; i < d->nelt; i += 1)
++	  rperm[i] = GEN_INT (d->perm[i]);
++
++      if (d->vmode == E_V2DFmode)
++	{
++	  sel = gen_rtx_CONST_VECTOR (E_V2DImode, gen_rtvec_v (d->nelt, rperm));
++	  tmp = simplify_gen_subreg (E_V2DImode, d->target, d->vmode, 0);
++	  emit_move_insn (tmp, sel);
++	}
++      else if (d->vmode == E_V4SFmode)
++	{
++	  sel = gen_rtx_CONST_VECTOR (E_V4SImode, gen_rtvec_v (d->nelt, rperm));
++	  tmp = simplify_gen_subreg (E_V4SImode, d->target, d->vmode, 0);
++	  emit_move_insn (tmp, sel);
++	}
++      else
++	{
++	  sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm));
++	  emit_move_insn (d->target, sel);
++	}
++
++      switch (d->vmode)
++	{
++	case E_V2DFmode:
++	  emit_insn (gen_lsx_vshuf_d_f (target, target, op1, op0));
++	  break;
++	case E_V2DImode:
++	  emit_insn (gen_lsx_vshuf_d (target, target, op1, op0));
++	  break;
++	case E_V4SFmode:
++	  emit_insn (gen_lsx_vshuf_w_f (target, target, op1, op0));
++	  break;
++	case E_V4SImode:
++	  emit_insn (gen_lsx_vshuf_w (target, target, op1, op0));
++	  break;
++	case E_V8HImode:
++	  emit_insn (gen_lsx_vshuf_h (target, target, op1, op0));
++	  break;
++	case E_V16QImode:
++	  emit_insn (gen_lsx_vshuf_b (target, op1, op0, target));
++	  break;
++	default:
++	  break;
++	}
++
++      return true;
++    }
++  return false;
++}
++
+ /* Construct (set target (vec_select op0 (parallel selector))) and
+-   return true if that's a valid instruction in the active ISA.  */
++   return true if that's a valid instruction in the active ISA.
++   In fact, it matches the special constant vector with repeated
++   4-element sets.  */
+ 
+ static bool
+-loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d)
++loongarch_is_imm_set_shuffle (struct expand_vec_perm_d *d)
+ {
+   rtx x, elts[MAX_VECT_LEN];
+   rtvec v;
+@@ -8338,6 +8419,9 @@ loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d)
+   if (!loongarch_const_vector_shuffle_set_p (x, d->vmode))
+     return false;
+ 
++  if (d->testing_p)
++    return true;
++
+   x = gen_rtx_VEC_SELECT (d->vmode, d->op0, x);
+   x = gen_rtx_SET (d->target, x);
+ 
+@@ -8350,6 +8434,27 @@ loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d)
+   return true;
+ }
+ 
++static bool
++loongarch_expand_vec_perm_even_odd (struct expand_vec_perm_d *);
++
++/* Try to match and expand all kinds of 128-bit const vector permutation
++   cases.  */
++
++static bool
++loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d)
++{
++  if (!ISA_HAS_LSX && GET_MODE_SIZE (d->vmode) != 16)
++    return false;
++
++  if (loongarch_is_imm_set_shuffle (d))
++      return true;
++
++  if (loongarch_expand_vec_perm_even_odd (d))
++    return true;
++
++  return loongarch_try_expand_lsx_vshuf_const (d);
++}
++
+ /* Try to simplify a two vector permutation using 2 intra-lane interleave
+    insns and cross-lane shuffle for 32-byte vectors.  */
+ 
+@@ -8442,7 +8547,7 @@ loongarch_expand_vec_perm_interleave (struct expand_vec_perm_d *d)
+   return true;
+ }
+ 
+-/* Implement extract-even and extract-odd permutations.  */
++/* Implement 128-bit and 256-bit extract-even and extract-odd permutations.  */
+ 
+ static bool
+ loongarch_expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
+@@ -8457,6 +8562,50 @@ loongarch_expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
+ 
+   switch (d->vmode)
+     {
++    /* 128 bit.  */
++    case E_V2DFmode:
++      if (odd)
++	emit_insn (gen_lsx_vilvh_d_f (d->target, d->op0, d->op1));
++      else
++	emit_insn (gen_lsx_vilvl_d_f (d->target, d->op0, d->op1));
++      break;
++
++    case E_V2DImode:
++      if (odd)
++	emit_insn (gen_lsx_vilvh_d (d->target, d->op0, d->op1));
++      else
++	emit_insn (gen_lsx_vilvl_d (d->target, d->op0, d->op1));
++      break;
++
++    case E_V4SFmode:
++      if (odd)
++	emit_insn (gen_lsx_vpickod_w_f (d->target, d->op0, d->op1));
++      else
++	emit_insn (gen_lsx_vpickev_w_f (d->target, d->op0, d->op1));
++      break;
++
++    case E_V4SImode:
++      if (odd)
++	emit_insn (gen_lsx_vpickod_w (d->target, d->op0, d->op1));
++      else
++	emit_insn (gen_lsx_vpickev_w (d->target, d->op0, d->op1));
++      break;
++
++    case E_V8HImode:
++      if (odd)
++	emit_insn (gen_lsx_vpickod_h (d->target, d->op0, d->op1));
++      else
++	emit_insn (gen_lsx_vpickev_h (d->target, d->op0, d->op1));
++      break;
++
++    case E_V16QImode:
++      if (odd)
++	emit_insn (gen_lsx_vpickod_b (d->target, d->op0, d->op1));
++      else
++	emit_insn (gen_lsx_vpickev_b (d->target, d->op0, d->op1));
++      break;
++
++    /* 256 bit.  */
+     case E_V4DFmode:
+       /* Shuffle the lanes around into { 0 4 2 6 } and { 1 5 3 7 }.  */
+       if (odd)
+@@ -8531,7 +8680,7 @@ static bool
+ loongarch_expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
+ {
+   unsigned i, odd, nelt = d->nelt;
+-  if (!ISA_HAS_LASX)
++  if (!ISA_HAS_LASX && !ISA_HAS_LSX)
+     return false;
+ 
+   odd = d->perm[0];
+@@ -8994,44 +9143,6 @@ loongarch_is_quad_duplicate (struct expand_vec_perm_d *d)
+   return result;
+ }
+ 
+-static bool
+-loongarch_is_odd_extraction (struct expand_vec_perm_d *d)
+-{
+-  bool result = true;
+-  unsigned char buf = 1;
+-
+-  for (int i = 0; i < d->nelt; i += 1)
+-    {
+-      if (buf != d->perm[i])
+-	{
+-	  result = false;
+-	  break;
+-	}
+-      buf += 2;
+-    }
+-
+-  return result;
+-}
+-
+-static bool
+-loongarch_is_even_extraction (struct expand_vec_perm_d *d)
+-{
+-  bool result = true;
+-  unsigned char buf = 0;
+-
+-  for (int i = 0; i < d->nelt; i += 1)
+-    {
+-      if (buf != d->perm[i])
+-	{
+-	  result = false;
+-	  break;
+-	}
+-      buf += 2;
+-    }
+-
+-  return result;
+-}
+-
+ static bool
+ loongarch_is_extraction_permutation (struct expand_vec_perm_d *d)
+ {
+@@ -9288,32 +9399,29 @@ loongarch_expand_vec_perm_const (struct expand_vec_perm_d *d)
+ 	  for (i = 1; i < d->nelt; i += 2)
+ 	    perm2[i] += d->nelt;
+ 	  if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1,
+-						perm2, d->nelt))
++						perm2, d->nelt, d->testing_p))
+ 	    return true;
+ 	}
+       else
+ 	{
+ 	  if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1,
+-						d->perm, d->nelt))
++						d->perm, d->nelt,
++						d->testing_p))
+ 	    return true;
+ 
+ 	  /* Try again with swapped operands.  */
+ 	  for (i = 0; i < d->nelt; ++i)
+ 	    perm2[i] = (d->perm[i] + d->nelt) & (2 * d->nelt - 1);
+ 	  if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0,
+-						perm2, d->nelt))
++						perm2, d->nelt, d->testing_p))
+ 	    return true;
+ 	}
+ 
+-      if (loongarch_expand_lsx_shuffle (d))
++      if (loongarch_is_imm_set_shuffle (d))
+ 	return true;
+ 
+-      if (loongarch_is_odd_extraction (d)
+-	  || loongarch_is_even_extraction (d))
+-	{
+-	  if (loongarch_expand_vec_perm_even_odd (d))
+-	    return true;
+-	}
++      if (loongarch_expand_vec_perm_even_odd (d))
++	return true;
+ 
+       if (loongarch_is_lasx_lowpart_interleave (d)
+ 	  || loongarch_is_lasx_lowpart_interleave_2 (d)
+-- 
+2.43.0
+
diff --git a/SME-0030-mode-switching-Add-note-problem.patch b/0133-Backport-SME-mode-switching-Add-note-problem.patch
similarity index 86%
rename from SME-0030-mode-switching-Add-note-problem.patch
rename to 0133-Backport-SME-mode-switching-Add-note-problem.patch
index d396b249fc044d2c4617e39a8d91bb3ed990b0a1..21508da82e3e791c759a25f2e61f9c0cd37b8a87 100644
--- a/SME-0030-mode-switching-Add-note-problem.patch
+++ b/0133-Backport-SME-mode-switching-Add-note-problem.patch
@@ -1,7 +1,7 @@
-From 85a345f1cb0c9398959b015f53fa7428ede796bf Mon Sep 17 00:00:00 2001
+From 7ab54a765239bdd2ce548cffdd5b83f9c20f69da Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:28:55 +0000
-Subject: [PATCH 030/144] mode-switching: Add note problem
+Subject: [PATCH 034/157] [Backport][SME] mode-switching: Add note problem
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=3cd3a09b3f91a1d023cb180763d40598d6bb274b
 
@@ -31,5 +31,5 @@ index 4cf8f03a0..2a9f98793 100644
  
    /* Create the bitmap vectors.  */
 -- 
-2.19.1
+2.33.0
 
diff --git a/0133-LoongArch-Avoid-out-of-bounds-access-in-loongarch_sy.patch b/0133-LoongArch-Avoid-out-of-bounds-access-in-loongarch_sy.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b55704356579284cac453ba822ef9818f51e184d
--- /dev/null
+++ b/0133-LoongArch-Avoid-out-of-bounds-access-in-loongarch_sy.patch
@@ -0,0 +1,72 @@
+From 6364467c68ac1ee2b54b866f462fb670a43029fa Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 2 Feb 2024 08:51:08 +0800
+Subject: [PATCH 133/188] LoongArch: Avoid out-of-bounds access in
+ loongarch_symbol_insns
+
+We call loongarch_symbol_insns with mode = MAX_MACHINE_MODE sometimes.
+But in loongarch_symbol_insns:
+
+    if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))
+      return 0;
+
+And LSX_SUPPORTED_MODE_P is defined as:
+
+    #define LSX_SUPPORTED_MODE_P(MODE) \
+      (ISA_HAS_LSX \
+       && GET_MODE_SIZE (MODE) == UNITS_PER_LSX_REG ... ...
+
+GET_MODE_SIZE is expanded to a call to mode_to_bytes, which is defined:
+
+    ALWAYS_INLINE poly_uint16
+    mode_to_bytes (machine_mode mode)
+    {
+    #if GCC_VERSION >= 4001
+      return (__builtin_constant_p (mode)
+	  ? mode_size_inline (mode) : mode_size[mode]);
+    #else
+      return mode_size[mode];
+    #endif
+    }
+
+There is an assertion in mode_size_inline:
+
+    gcc_assert (mode >= 0 && mode < NUM_MACHINE_MODES);
+
+Note that NUM_MACHINE_MODES = MAX_MACHINE_MODE (emitted by genmodes.cc),
+thus if __builtin_constant_p (mode) is evaluated true (it happens when
+GCC is bootstrapped with LTO+PGO), the assertion will be triggered and
+cause an ICE.  OTOH if __builtin_constant_p (mode) is evaluated false,
+mode_size[mode] is still an out-of-bound array access (the length or the
+mode_size array is NUM_MACHINE_MODES).
+
+So we shouldn't call LSX_SUPPORTED_MODE_P or LASX_SUPPORTED_MODE_P with
+MAX_MACHINE_MODE in loongarch_symbol_insns.  This is very similar to a
+MIPS bug PR98491 fixed by me about 3 years ago.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_symbol_insns): Do not
+	use LSX_SUPPORTED_MODE_P or LASX_SUPPORTED_MODE_P if mode is
+	MAX_MACHINE_MODE.
+---
+ gcc/config/loongarch/loongarch.cc | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index a0e0906af..d23b09cc5 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2004,7 +2004,8 @@ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode)
+ {
+   /* LSX LD.* and ST.* cannot support loading symbols via an immediate
+      operand.  */
+-  if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))
++  if (mode != MAX_MACHINE_MODE
++      && (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode)))
+     return 0;
+ 
+   switch (type)
+-- 
+2.43.0
+
diff --git a/SME-0031-mode-switching-Avoid-quadractic-list-operation.patch b/0134-Backport-SME-mode-switching-Avoid-quadractic-list-op.patch
similarity index 94%
rename from SME-0031-mode-switching-Avoid-quadractic-list-operation.patch
rename to 0134-Backport-SME-mode-switching-Avoid-quadractic-list-op.patch
index ba2e5ce846028ec1d025ca936b7a165320065816..e74c104c4b2b4ccddbe074f7f24931bf467c685c 100644
--- a/SME-0031-mode-switching-Avoid-quadractic-list-operation.patch
+++ b/0134-Backport-SME-mode-switching-Avoid-quadractic-list-op.patch
@@ -1,7 +1,8 @@
-From aba7ef0e4ee1283e7d7711f666360e0dd2baf151 Mon Sep 17 00:00:00 2001
+From a2a8b560c1749293d3b6d027e20753a7ea042c80 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:28:55 +0000
-Subject: [PATCH 031/144] mode-switching: Avoid quadractic list operation
+Subject: [PATCH 035/157] [Backport][SME] mode-switching: Avoid quadractic list
+ operation
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=174ee5115a3004d3664165e9d619535b579111d4
 
@@ -85,5 +86,5 @@ index 2a9f98793..6a13951c9 100644
  		for (i = 0; i < no_mode; i++)
  		  clear_mode_bit (transp[bb->index], j, i);
 -- 
-2.19.1
+2.33.0
 
diff --git a/0134-LoongArch-Fix-wrong-LSX-FP-vector-negation.patch b/0134-LoongArch-Fix-wrong-LSX-FP-vector-negation.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c4edf9872d34f9eca1d0279095a503f7fcb7532f
--- /dev/null
+++ b/0134-LoongArch-Fix-wrong-LSX-FP-vector-negation.patch
@@ -0,0 +1,122 @@
+From 659b51a6aed60f389009eff1e04645a47e55a45c Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 3 Feb 2024 03:16:14 +0800
+Subject: [PATCH 134/188] LoongArch: Fix wrong LSX FP vector negation
+
+We expanded (neg x) to (minus const0 x) for LSX FP vectors, this is
+wrong because -0.0 is not 0 - 0.0.  This causes some Python tests to
+fail when Python is built with LSX enabled.
+
+Use the vbitrevi.{d/w} instructions to simply reverse the sign bit
+instead.  We are already doing this for LASX and now we can unify them
+into simd.md.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lsx.md (neg2): Remove the
+	incorrect expand.
+	* config/loongarch/simd.md (simdfmt_as_i): New define_mode_attr.
+	(elmsgnbit): Likewise.
+	(neg2): New define_insn.
+	* config/loongarch/lasx.md (negv4df2, negv8sf2): Remove as they
+	are now instantiated in simd.md.
+---
+ gcc/config/loongarch/lasx.md | 16 ----------------
+ gcc/config/loongarch/lsx.md  | 11 -----------
+ gcc/config/loongarch/simd.md | 18 ++++++++++++++++++
+ 3 files changed, 18 insertions(+), 27 deletions(-)
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 946811e1a..38f35bad6 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -3028,22 +3028,6 @@
+   [(set_attr "type" "simd_logic")
+    (set_attr "mode" "V8SF")])
+ 
+-(define_insn "negv4df2"
+-  [(set (match_operand:V4DF 0 "register_operand" "=f")
+-	(neg:V4DF (match_operand:V4DF 1 "register_operand" "f")))]
+-  "ISA_HAS_LASX"
+-  "xvbitrevi.d\t%u0,%u1,63"
+-  [(set_attr "type" "simd_logic")
+-   (set_attr "mode" "V4DF")])
+-
+-(define_insn "negv8sf2"
+-  [(set (match_operand:V8SF 0 "register_operand" "=f")
+-	(neg:V8SF (match_operand:V8SF 1 "register_operand" "f")))]
+-  "ISA_HAS_LASX"
+-  "xvbitrevi.w\t%u0,%u1,31"
+-  [(set_attr "type" "simd_logic")
+-   (set_attr "mode" "V8SF")])
+-
+ (define_insn "xvfmadd4"
+   [(set (match_operand:FLASX 0 "register_operand" "=f")
+ 	(fma:FLASX (match_operand:FLASX 1 "register_operand" "f")
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 612377436..d5aa3f46f 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -728,17 +728,6 @@
+   DONE;
+ })
+ 
+-(define_expand "neg2"
+-  [(set (match_operand:FLSX 0 "register_operand")
+-	(neg:FLSX (match_operand:FLSX 1 "register_operand")))]
+-  "ISA_HAS_LSX"
+-{
+-  rtx reg = gen_reg_rtx (mode);
+-  emit_move_insn (reg, CONST0_RTX (mode));
+-  emit_insn (gen_sub3 (operands[0], reg, operands[1]));
+-  DONE;
+-})
+-
+ (define_expand "lsx_vrepli"
+   [(match_operand:ILSX 0 "register_operand")
+    (match_operand 1 "const_imm10_operand")]
+diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
+index 8ac1d75a8..00d4c7831 100644
+--- a/gcc/config/loongarch/simd.md
++++ b/gcc/config/loongarch/simd.md
+@@ -85,12 +85,21 @@
+ (define_mode_attr simdifmt_for_f [(V2DF "l") (V4DF "l")
+ 				  (V4SF "w") (V8SF "w")])
+ 
++;; Suffix for integer mode in LSX or LASX instructions to operating FP
++;; vectors using integer vector operations.
++(define_mode_attr simdfmt_as_i [(V2DF "d") (V4DF "d")
++				(V4SF "w") (V8SF "w")])
++
+ ;; Size of vector elements in bits.
+ (define_mode_attr elmbits [(V2DI "64") (V4DI "64")
+ 			   (V4SI "32") (V8SI "32")
+ 			   (V8HI "16") (V16HI "16")
+ 			   (V16QI "8") (V32QI "8")])
+ 
++;; The index of sign bit in FP vector elements.
++(define_mode_attr elmsgnbit [(V2DF "63") (V4DF "63")
++			     (V4SF "31") (V8SF "31")])
++
+ ;; This attribute is used to form an immediate operand constraint using
+ ;; "const__operand".
+ (define_mode_attr bitimm [(V16QI "uimm3") (V32QI "uimm3")
+@@ -457,6 +466,15 @@
+   DONE;
+ })
+ 
++;; FP negation.
++(define_insn "neg2"
++  [(set (match_operand:FVEC 0 "register_operand" "=f")
++	(neg:FVEC (match_operand:FVEC 1 "register_operand" "f")))]
++  ""
++  "vbitrevi.\t%0,%1,"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "")])
++
+ ; The LoongArch SX Instructions.
+ (include "lsx.md")
+ 
+-- 
+2.43.0
+
diff --git a/SME-0032-mode-switching-Fix-the-mode-passed-to-the-emit-hook.patch b/0135-Backport-SME-mode-switching-Fix-the-mode-passed-to-t.patch
similarity index 96%
rename from SME-0032-mode-switching-Fix-the-mode-passed-to-the-emit-hook.patch
rename to 0135-Backport-SME-mode-switching-Fix-the-mode-passed-to-t.patch
index 5f45104bdb7dc8269043fc9abf243b2e84957265..c1b403655bb0069521452cc3600ba8a29140ad4a 100644
--- a/SME-0032-mode-switching-Fix-the-mode-passed-to-the-emit-hook.patch
+++ b/0135-Backport-SME-mode-switching-Fix-the-mode-passed-to-t.patch
@@ -1,7 +1,8 @@
-From 0f2777b98ac375e194d649fe26a2376cba418fd8 Mon Sep 17 00:00:00 2001
+From 194700063ed04b56d84912f7ace1b8370af6c696 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:28:56 +0000
-Subject: [PATCH 032/144] mode-switching: Fix the mode passed to the emit hook
+Subject: [PATCH 036/157] [Backport][SME] mode-switching: Fix the mode passed
+ to the emit hook
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=5afd208beaef50bcc43b556d4c41d41656b06436
 
@@ -131,5 +132,5 @@ index 6a13951c9..584cd4f67 100644
  		  if (mode_set != NULL_RTX)
  		    {
 -- 
-2.19.1
+2.33.0
 
diff --git a/0135-LoongArch-Fix-wrong-return-value-type-of-__iocsrrd_h.patch b/0135-LoongArch-Fix-wrong-return-value-type-of-__iocsrrd_h.patch
new file mode 100644
index 0000000000000000000000000000000000000000..1055b9ac0ab75d1fbbc4a26138534748bb51dce1
--- /dev/null
+++ b/0135-LoongArch-Fix-wrong-return-value-type-of-__iocsrrd_h.patch
@@ -0,0 +1,30 @@
+From 539eb7639eeda8ea43149032f6aa724e5d46017c Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Mon, 5 Feb 2024 16:23:20 +0800
+Subject: [PATCH 135/188] LoongArch: Fix wrong return value type of
+ __iocsrrd_h.
+
+gcc/ChangeLog:
+
+	* config/loongarch/larchintrin.h (__iocsrrd_h): Modify the
+	function return value type to unsigned short.
+---
+ gcc/config/loongarch/larchintrin.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/larchintrin.h b/gcc/config/loongarch/larchintrin.h
+index 6582dfe49..046e042fd 100644
+--- a/gcc/config/loongarch/larchintrin.h
++++ b/gcc/config/loongarch/larchintrin.h
+@@ -268,7 +268,7 @@ __iocsrrd_b (unsigned int _1)
+ 
+ /* Assembly instruction format:	rd, rj.  */
+ /* Data types in instruction templates:  UHI, USI.  */
+-extern __inline unsigned char
++extern __inline unsigned short
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_h (unsigned int _1)
+ {
+-- 
+2.43.0
+
diff --git a/SME-0033-mode-switching-Simplify-recording-of-transparency.patch b/0136-Backport-SME-mode-switching-Simplify-recording-of-tr.patch
similarity index 95%
rename from SME-0033-mode-switching-Simplify-recording-of-transparency.patch
rename to 0136-Backport-SME-mode-switching-Simplify-recording-of-tr.patch
index c62a5b6de638f4a4088d4012de85a91c64a1bf6e..1b99d67d63c555c5b43eb2c0b2797cf0a2c3c63d 100644
--- a/SME-0033-mode-switching-Simplify-recording-of-transparency.patch
+++ b/0136-Backport-SME-mode-switching-Simplify-recording-of-tr.patch
@@ -1,7 +1,8 @@
-From 25f64be4bbd8546c96dce88b7ae7222bc44d5ba8 Mon Sep 17 00:00:00 2001
+From ac51d446ee605e942b0831d3ff617980d94bf502 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:28:56 +0000
-Subject: [PATCH 033/144] mode-switching: Simplify recording of transparency
+Subject: [PATCH 037/157] [Backport][SME] mode-switching: Simplify recording of
+ transparency
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=335b55f4146c5ef9e3bf4bcb7e58e887c3150b02
 
@@ -98,5 +99,5 @@ index 584cd4f67..4d2b9e284 100644
  		set_mode_bit (antic[bb->index], j, m);
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0136-LoongArch-Remove-redundant-symbol-type-conversions-i.patch b/0136-LoongArch-Remove-redundant-symbol-type-conversions-i.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5f9eb0bef535715c1925ab7d6459fa0daa96954b
--- /dev/null
+++ b/0136-LoongArch-Remove-redundant-symbol-type-conversions-i.patch
@@ -0,0 +1,337 @@
+From 868f56db1101bf679f1b2510b9934a978f503a1e Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Mon, 5 Feb 2024 16:53:01 +0800
+Subject: [PATCH 136/188] LoongArch: Remove redundant symbol type conversions
+ in larchintrin.h.
+
+gcc/ChangeLog:
+
+	* config/loongarch/larchintrin.h (__movgr2fcsr): Remove redundant
+	symbol type conversions.
+	(__cacop_d): Likewise.
+	(__cpucfg): Likewise.
+	(__asrtle_d): Likewise.
+	(__asrtgt_d): Likewise.
+	(__lddir_d): Likewise.
+	(__ldpte_d): Likewise.
+	(__crc_w_b_w): Likewise.
+	(__crc_w_h_w): Likewise.
+	(__crc_w_w_w): Likewise.
+	(__crc_w_d_w): Likewise.
+	(__crcc_w_b_w): Likewise.
+	(__crcc_w_h_w): Likewise.
+	(__crcc_w_w_w): Likewise.
+	(__crcc_w_d_w): Likewise.
+	(__csrrd_w): Likewise.
+	(__csrwr_w): Likewise.
+	(__csrxchg_w): Likewise.
+	(__csrrd_d): Likewise.
+	(__csrwr_d): Likewise.
+	(__csrxchg_d): Likewise.
+	(__iocsrrd_b): Likewise.
+	(__iocsrrd_h): Likewise.
+	(__iocsrrd_w): Likewise.
+	(__iocsrrd_d): Likewise.
+	(__iocsrwr_b): Likewise.
+	(__iocsrwr_h): Likewise.
+	(__iocsrwr_w): Likewise.
+	(__iocsrwr_d): Likewise.
+	(__frecipe_s): Likewise.
+	(__frecipe_d): Likewise.
+	(__frsqrte_s): Likewise.
+	(__frsqrte_d): Likewise.
+---
+ gcc/config/loongarch/larchintrin.h | 69 ++++++++++++++----------------
+ 1 file changed, 33 insertions(+), 36 deletions(-)
+
+diff --git a/gcc/config/loongarch/larchintrin.h b/gcc/config/loongarch/larchintrin.h
+index 046e042fd..2e94e5612 100644
+--- a/gcc/config/loongarch/larchintrin.h
++++ b/gcc/config/loongarch/larchintrin.h
+@@ -87,13 +87,13 @@ __rdtimel_w (void)
+ /* Assembly instruction format:	fcsr, rj.  */
+ /* Data types in instruction templates:  VOID, UQI, USI.  */
+ #define __movgr2fcsr(/*ui5*/ _1, _2) \
+-  __builtin_loongarch_movgr2fcsr ((_1), (unsigned int) _2);
++  __builtin_loongarch_movgr2fcsr ((_1), _2);
+ 
+ #if defined __loongarch64
+ /* Assembly instruction format:	ui5, rj, si12.  */
+ /* Data types in instruction templates:  VOID, USI, UDI, SI.  */
+ #define __cacop_d(/*ui5*/ _1, /*unsigned long int*/ _2, /*si12*/ _3) \
+-  ((void) __builtin_loongarch_cacop_d ((_1), (unsigned long int) (_2), (_3)))
++  __builtin_loongarch_cacop_d ((_1), (_2), (_3))
+ #else
+ #error "Unsupported ABI."
+ #endif
+@@ -104,7 +104,7 @@ extern __inline unsigned int
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __cpucfg (unsigned int _1)
+ {
+-  return (unsigned int) __builtin_loongarch_cpucfg ((unsigned int) _1);
++  return __builtin_loongarch_cpucfg (_1);
+ }
+ 
+ #ifdef __loongarch64
+@@ -114,7 +114,7 @@ extern __inline void
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __asrtle_d (long int _1, long int _2)
+ {
+-  __builtin_loongarch_asrtle_d ((long int) _1, (long int) _2);
++  __builtin_loongarch_asrtle_d (_1, _2);
+ }
+ 
+ /* Assembly instruction format:	rj, rk.  */
+@@ -123,7 +123,7 @@ extern __inline void
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __asrtgt_d (long int _1, long int _2)
+ {
+-  __builtin_loongarch_asrtgt_d ((long int) _1, (long int) _2);
++  __builtin_loongarch_asrtgt_d (_1, _2);
+ }
+ #endif
+ 
+@@ -131,7 +131,7 @@ __asrtgt_d (long int _1, long int _2)
+ /* Assembly instruction format:	rd, rj, ui5.  */
+ /* Data types in instruction templates:  DI, DI, UQI.  */
+ #define __lddir_d(/*long int*/ _1, /*ui5*/ _2) \
+-  ((long int) __builtin_loongarch_lddir_d ((long int) (_1), (_2)))
++  __builtin_loongarch_lddir_d ((_1), (_2))
+ #else
+ #error "Unsupported ABI."
+ #endif
+@@ -140,7 +140,7 @@ __asrtgt_d (long int _1, long int _2)
+ /* Assembly instruction format:	rj, ui5.  */
+ /* Data types in instruction templates:  VOID, DI, UQI.  */
+ #define __ldpte_d(/*long int*/ _1, /*ui5*/ _2) \
+-  ((void) __builtin_loongarch_ldpte_d ((long int) (_1), (_2)))
++  __builtin_loongarch_ldpte_d ((_1), (_2))
+ #else
+ #error "Unsupported ABI."
+ #endif
+@@ -151,7 +151,7 @@ extern __inline int
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __crc_w_b_w (char _1, int _2)
+ {
+-  return (int) __builtin_loongarch_crc_w_b_w ((char) _1, (int) _2);
++  return __builtin_loongarch_crc_w_b_w (_1, _2);
+ }
+ 
+ /* Assembly instruction format:	rd, rj, rk.  */
+@@ -160,7 +160,7 @@ extern __inline int
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __crc_w_h_w (short _1, int _2)
+ {
+-  return (int) __builtin_loongarch_crc_w_h_w ((short) _1, (int) _2);
++  return __builtin_loongarch_crc_w_h_w (_1, _2);
+ }
+ 
+ /* Assembly instruction format:	rd, rj, rk.  */
+@@ -169,7 +169,7 @@ extern __inline int
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __crc_w_w_w (int _1, int _2)
+ {
+-  return (int) __builtin_loongarch_crc_w_w_w ((int) _1, (int) _2);
++  return __builtin_loongarch_crc_w_w_w (_1, _2);
+ }
+ 
+ #ifdef __loongarch64
+@@ -179,7 +179,7 @@ extern __inline int
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __crc_w_d_w (long int _1, int _2)
+ {
+-  return (int) __builtin_loongarch_crc_w_d_w ((long int) _1, (int) _2);
++  return __builtin_loongarch_crc_w_d_w (_1, _2);
+ }
+ #endif
+ 
+@@ -189,7 +189,7 @@ extern __inline int
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __crcc_w_b_w (char _1, int _2)
+ {
+-  return (int) __builtin_loongarch_crcc_w_b_w ((char) _1, (int) _2);
++  return __builtin_loongarch_crcc_w_b_w (_1, _2);
+ }
+ 
+ /* Assembly instruction format:	rd, rj, rk.  */
+@@ -198,7 +198,7 @@ extern __inline int
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __crcc_w_h_w (short _1, int _2)
+ {
+-  return (int) __builtin_loongarch_crcc_w_h_w ((short) _1, (int) _2);
++  return __builtin_loongarch_crcc_w_h_w (_1, _2);
+ }
+ 
+ /* Assembly instruction format:	rd, rj, rk.  */
+@@ -207,7 +207,7 @@ extern __inline int
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __crcc_w_w_w (int _1, int _2)
+ {
+-  return (int) __builtin_loongarch_crcc_w_w_w ((int) _1, (int) _2);
++  return __builtin_loongarch_crcc_w_w_w (_1, _2);
+ }
+ 
+ #ifdef __loongarch64
+@@ -217,44 +217,41 @@ extern __inline int
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __crcc_w_d_w (long int _1, int _2)
+ {
+-  return (int) __builtin_loongarch_crcc_w_d_w ((long int) _1, (int) _2);
++  return __builtin_loongarch_crcc_w_d_w (_1, _2);
+ }
+ #endif
+ 
+ /* Assembly instruction format:	rd, ui14.  */
+ /* Data types in instruction templates:  USI, USI.  */
+ #define __csrrd_w(/*ui14*/ _1) \
+-  ((unsigned int) __builtin_loongarch_csrrd_w ((_1)))
++  __builtin_loongarch_csrrd_w ((_1))
+ 
+ /* Assembly instruction format:	rd, ui14.  */
+ /* Data types in instruction templates:  USI, USI, USI.  */
+ #define __csrwr_w(/*unsigned int*/ _1, /*ui14*/ _2) \
+-  ((unsigned int) __builtin_loongarch_csrwr_w ((unsigned int) (_1), (_2)))
++  __builtin_loongarch_csrwr_w ((_1), (_2))
+ 
+ /* Assembly instruction format:	rd, rj, ui14.  */
+ /* Data types in instruction templates:  USI, USI, USI, USI.  */
+ #define __csrxchg_w(/*unsigned int*/ _1, /*unsigned int*/ _2, /*ui14*/ _3) \
+-  ((unsigned int) __builtin_loongarch_csrxchg_w ((unsigned int) (_1), \
+-					       (unsigned int) (_2), (_3)))
++  __builtin_loongarch_csrxchg_w ((_1), (_2), (_3))
+ 
+ #ifdef __loongarch64
+ /* Assembly instruction format:	rd, ui14.  */
+ /* Data types in instruction templates:  UDI, USI.  */
+ #define __csrrd_d(/*ui14*/ _1) \
+-  ((unsigned long int) __builtin_loongarch_csrrd_d ((_1)))
++  __builtin_loongarch_csrrd_d ((_1))
+ 
+ /* Assembly instruction format:	rd, ui14.  */
+ /* Data types in instruction templates:  UDI, UDI, USI.  */
+ #define __csrwr_d(/*unsigned long int*/ _1, /*ui14*/ _2) \
+-  ((unsigned long int) __builtin_loongarch_csrwr_d ((unsigned long int) (_1), \
+-						   (_2)))
++  __builtin_loongarch_csrwr_d ((_1), (_2))
+ 
+ /* Assembly instruction format:	rd, rj, ui14.  */
+ /* Data types in instruction templates:  UDI, UDI, UDI, USI.  */
+ #define __csrxchg_d(/*unsigned long int*/ _1, /*unsigned long int*/ _2, \
+ 		   /*ui14*/ _3) \
+-  ((unsigned long int) __builtin_loongarch_csrxchg_d ( \
+-    (unsigned long int) (_1), (unsigned long int) (_2), (_3)))
++  __builtin_loongarch_csrxchg_d ((_1), (_2), (_3))
+ #endif
+ 
+ /* Assembly instruction format:	rd, rj.  */
+@@ -263,7 +260,7 @@ extern __inline unsigned char
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_b (unsigned int _1)
+ {
+-  return (unsigned char) __builtin_loongarch_iocsrrd_b ((unsigned int) _1);
++  return __builtin_loongarch_iocsrrd_b (_1);
+ }
+ 
+ /* Assembly instruction format:	rd, rj.  */
+@@ -272,7 +269,7 @@ extern __inline unsigned short
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_h (unsigned int _1)
+ {
+-  return (unsigned short) __builtin_loongarch_iocsrrd_h ((unsigned int) _1);
++  return __builtin_loongarch_iocsrrd_h (_1);
+ }
+ 
+ /* Assembly instruction format:	rd, rj.  */
+@@ -281,7 +278,7 @@ extern __inline unsigned int
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_w (unsigned int _1)
+ {
+-  return (unsigned int) __builtin_loongarch_iocsrrd_w ((unsigned int) _1);
++  return __builtin_loongarch_iocsrrd_w (_1);
+ }
+ 
+ #ifdef __loongarch64
+@@ -291,7 +288,7 @@ extern __inline unsigned long int
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrrd_d (unsigned int _1)
+ {
+-  return (unsigned long int) __builtin_loongarch_iocsrrd_d ((unsigned int) _1);
++  return __builtin_loongarch_iocsrrd_d (_1);
+ }
+ #endif
+ 
+@@ -301,7 +298,7 @@ extern __inline void
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_b (unsigned char _1, unsigned int _2)
+ {
+-  __builtin_loongarch_iocsrwr_b ((unsigned char) _1, (unsigned int) _2);
++  __builtin_loongarch_iocsrwr_b (_1, _2);
+ }
+ 
+ /* Assembly instruction format:	rd, rj.  */
+@@ -310,7 +307,7 @@ extern __inline void
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_h (unsigned short _1, unsigned int _2)
+ {
+-  __builtin_loongarch_iocsrwr_h ((unsigned short) _1, (unsigned int) _2);
++  __builtin_loongarch_iocsrwr_h (_1, _2);
+ }
+ 
+ /* Assembly instruction format:	rd, rj.  */
+@@ -319,7 +316,7 @@ extern __inline void
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_w (unsigned int _1, unsigned int _2)
+ {
+-  __builtin_loongarch_iocsrwr_w ((unsigned int) _1, (unsigned int) _2);
++  __builtin_loongarch_iocsrwr_w (_1, _2);
+ }
+ 
+ #ifdef __loongarch64
+@@ -329,7 +326,7 @@ extern __inline void
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __iocsrwr_d (unsigned long int _1, unsigned int _2)
+ {
+-  __builtin_loongarch_iocsrwr_d ((unsigned long int) _1, (unsigned int) _2);
++  __builtin_loongarch_iocsrwr_d (_1, _2);
+ }
+ #endif
+ 
+@@ -340,7 +337,7 @@ extern __inline float
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __frecipe_s (float _1)
+ {
+-  return (float) __builtin_loongarch_frecipe_s ((float) _1);
++  return __builtin_loongarch_frecipe_s (_1);
+ }
+ 
+ /* Assembly instruction format: fd, fj.  */
+@@ -349,7 +346,7 @@ extern __inline double
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __frecipe_d (double _1)
+ {
+-  return (double) __builtin_loongarch_frecipe_d ((double) _1);
++  return __builtin_loongarch_frecipe_d (_1);
+ }
+ 
+ /* Assembly instruction format: fd, fj.  */
+@@ -358,7 +355,7 @@ extern __inline float
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __frsqrte_s (float _1)
+ {
+-  return (float) __builtin_loongarch_frsqrte_s ((float) _1);
++  return __builtin_loongarch_frsqrte_s (_1);
+ }
+ 
+ /* Assembly instruction format: fd, fj.  */
+@@ -367,7 +364,7 @@ extern __inline double
+ __attribute__ ((__gnu_inline__, __always_inline__, __artificial__))
+ __frsqrte_d (double _1)
+ {
+-  return (double) __builtin_loongarch_frsqrte_d ((double) _1);
++  return __builtin_loongarch_frsqrte_d (_1);
+ }
+ #endif
+ 
+-- 
+2.43.0
+
diff --git a/SME-0034-mode-switching-Tweak-entry-exit-handling.patch b/0137-Backport-SME-mode-switching-Tweak-entry-exit-handlin.patch
similarity index 95%
rename from SME-0034-mode-switching-Tweak-entry-exit-handling.patch
rename to 0137-Backport-SME-mode-switching-Tweak-entry-exit-handlin.patch
index 443646f94b9eda3e1f4ab146e731926b34b6878b..8444847cf686000fdda934ea989c4829f94de57b 100644
--- a/SME-0034-mode-switching-Tweak-entry-exit-handling.patch
+++ b/0137-Backport-SME-mode-switching-Tweak-entry-exit-handlin.patch
@@ -1,7 +1,8 @@
-From d69af0b23707f1b4dbad739eb7dce302d4843dbb Mon Sep 17 00:00:00 2001
+From c0aaf329d9c547b249ac120a8d1995d8546a1edb Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:28:57 +0000
-Subject: [PATCH 034/144] mode-switching: Tweak entry/exit handling
+Subject: [PATCH 038/157] [Backport][SME] mode-switching: Tweak entry/exit
+ handling
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=e59ec35276599805cdc6c3979d8a167b027d286e
 
@@ -87,5 +88,5 @@ index 4d2b9e284..4761c2ff0 100644
  	}
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0137-LoongArch-When-checking-whether-the-assembler-suppor.patch b/0137-LoongArch-When-checking-whether-the-assembler-suppor.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e1c70c11cf80ce011d5103570d179eff630ac060
--- /dev/null
+++ b/0137-LoongArch-When-checking-whether-the-assembler-suppor.patch
@@ -0,0 +1,54 @@
+From 3580ce2b8c57967117e55af48beba0aaa6257e8b Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Wed, 21 Feb 2024 11:17:14 +0800
+Subject: [PATCH 137/188] LoongArch: When checking whether the assembler
+ supports conditional branch relaxation, add compilation parameter
+ "--fatal-warnings" to the assembler.
+
+In binutils 2.40 and earlier versions, only a warning will be reported
+when a relocation immediate value is out of bounds. As a result,
+the value of the macro HAVE_AS_COND_BRANCH_RELAXATION will also be
+defined as 1 when the assembler does not support conditional branch
+relaxation. Therefore, add the compilation option "--fatal-warnings"
+to avoid this problem.
+
+gcc/ChangeLog:
+
+	* configure: Regenerate.
+	* configure.ac: Add parameter "--fatal-warnings" to assemble
+	when checking whether the assemble support conditional branch
+	relaxation.
+---
+ gcc/configure    | 2 +-
+ gcc/configure.ac | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/configure b/gcc/configure
+index eecfe60d6..f31395017 100755
+--- a/gcc/configure
++++ b/gcc/configure
+@@ -28947,7 +28947,7 @@ else
+        nop
+        .endr
+        beq $a0,$a1,a' > conftest.s
+-    if { ac_try='$gcc_cv_as $gcc_cv_as_flags  -o conftest.o conftest.s >&5'
++    if { ac_try='$gcc_cv_as $gcc_cv_as_flags --fatal-warnings -o conftest.o conftest.s >&5'
+   { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
+   (eval $ac_try) 2>&5
+   ac_status=$?
+diff --git a/gcc/configure.ac b/gcc/configure.ac
+index d1032440d..35f2c657f 100644
+--- a/gcc/configure.ac
++++ b/gcc/configure.ac
+@@ -5349,7 +5349,7 @@ x:
+ 		[Define if your assembler supports -mrelax option.])])
+     gcc_GAS_CHECK_FEATURE([conditional branch relaxation support],
+       gcc_cv_as_loongarch_cond_branch_relax,
+-      [],
++      [--fatal-warnings],
+       [a:
+        .rept 32769
+        nop
+-- 
+2.43.0
+
diff --git a/SME-0035-mode-switching-Allow-targets-to-set-the-mode-for-EH-.patch b/0138-Backport-SME-mode-switching-Allow-targets-to-set-the.patch
similarity index 86%
rename from SME-0035-mode-switching-Allow-targets-to-set-the-mode-for-EH-.patch
rename to 0138-Backport-SME-mode-switching-Allow-targets-to-set-the.patch
index 1c7077bae444dd86d11a4802ddd4dddfe33b6968..8d066b2f87736a088e4f6d6110e8e85aff39f308 100644
--- a/SME-0035-mode-switching-Allow-targets-to-set-the-mode-for-EH-.patch
+++ b/0138-Backport-SME-mode-switching-Allow-targets-to-set-the.patch
@@ -1,8 +1,8 @@
-From 15e311c4ba2a7052f1e4bbd548b8c95c4cdea0cb Mon Sep 17 00:00:00 2001
+From 9505464aec8f95125293c64e2eea9577e9be4700 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:28:57 +0000
-Subject: [PATCH 035/144] mode-switching: Allow targets to set the mode for EH
- handlers
+Subject: [PATCH 039/157] [Backport][SME] mode-switching: Allow targets to set
+ the mode for EH handlers
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=4b803fbf839439b1deca660e32d5ced211111dfa
 
@@ -25,10 +25,10 @@ gcc/
  4 files changed, 19 insertions(+), 1 deletion(-)
 
 diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
-index 53a40e467..381fa2c95 100644
+index 553aa4cf2..4788b3f7a 100644
 --- a/gcc/doc/tm.texi
 +++ b/gcc/doc/tm.texi
-@@ -10531,6 +10531,12 @@ If @code{TARGET_MODE_EXIT} is defined then @code{TARGET_MODE_ENTRY}
+@@ -10321,6 +10321,12 @@ If @code{TARGET_MODE_EXIT} is defined then @code{TARGET_MODE_ENTRY}
  must be defined.
  @end deftypefn
  
@@ -42,10 +42,10 @@ index 53a40e467..381fa2c95 100644
  This hook specifies the order in which modes for @var{entity}
  are processed. 0 is the highest priority,
 diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
-index cdfa36564..282cb9926 100644
+index 9ec11b15c..ad343504f 100644
 --- a/gcc/doc/tm.texi.in
 +++ b/gcc/doc/tm.texi.in
-@@ -7136,6 +7136,8 @@ mode or ``no mode'', depending on context.
+@@ -6926,6 +6926,8 @@ mode or ``no mode'', depending on context.
  
  @hook TARGET_MODE_EXIT
  
@@ -71,10 +71,10 @@ index 4761c2ff0..9a6ba6cca 100644
  		bitmap_clear_bit (transp_all, bb->index);
  	      }
 diff --git a/gcc/target.def b/gcc/target.def
-index be8a99172..0a9bcefba 100644
+index b87b0f927..bbb482de6 100644
 --- a/gcc/target.def
 +++ b/gcc/target.def
-@@ -6985,6 +6985,13 @@ If @code{TARGET_MODE_EXIT} is defined then @code{TARGET_MODE_ENTRY}\n\
+@@ -7042,6 +7042,13 @@ If @code{TARGET_MODE_EXIT} is defined then @code{TARGET_MODE_ENTRY}\n\
  must be defined.",
   int, (int entity), NULL)
  
@@ -89,5 +89,5 @@ index be8a99172..0a9bcefba 100644
  (priority,
   "This hook specifies the order in which modes for @var{entity}\n\
 -- 
-2.19.1
+2.33.0
 
diff --git a/0138-LoongArch-Don-t-falsely-claim-gold-supported-in-topl.patch b/0138-LoongArch-Don-t-falsely-claim-gold-supported-in-topl.patch
new file mode 100644
index 0000000000000000000000000000000000000000..93fb0ad94622df2c3ea514ec67c9ab57d07fb566
--- /dev/null
+++ b/0138-LoongArch-Don-t-falsely-claim-gold-supported-in-topl.patch
@@ -0,0 +1,49 @@
+From e6968eb62b2a0adc7ef591594240582630adfc61 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 21 Feb 2024 23:54:53 +0800
+Subject: [PATCH 138/188] LoongArch: Don't falsely claim gold supported in
+ toplevel configure
+
+The gold linker has never been ported to LoongArch (and it seems
+unlikely to be ported in the future as the new architectures are
+focusing on lld and/or mold for fast linkers).
+
+ChangeLog:
+
+	* configure.ac (ENABLE_GOLD): Remove loongarch*-*-* from target
+	list.
+	* configure: Regenerate.
+---
+ configure    | 2 +-
+ configure.ac | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/configure b/configure
+index 81b4a3cec..ebdca8c62 100755
+--- a/configure
++++ b/configure
+@@ -3058,7 +3058,7 @@ case "${ENABLE_GOLD}" in
+       # Check for target supported by gold.
+       case "${target}" in
+         i?86-*-* | x86_64-*-* | sparc*-*-* | powerpc*-*-* | arm*-*-* \
+-        | aarch64*-*-* | tilegx*-*-* | mips*-*-* | s390*-*-* | loongarch*-*-*)
++        | aarch64*-*-* | tilegx*-*-* | mips*-*-* | s390*-*-*)
+ 	  configdirs="$configdirs gold"
+ 	  if test x${ENABLE_GOLD} = xdefault; then
+ 	    default_ld=gold
+diff --git a/configure.ac b/configure.ac
+index 9f8dbd319..4f45fd2ba 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -353,7 +353,7 @@ case "${ENABLE_GOLD}" in
+       # Check for target supported by gold.
+       case "${target}" in
+         i?86-*-* | x86_64-*-* | sparc*-*-* | powerpc*-*-* | arm*-*-* \
+-        | aarch64*-*-* | tilegx*-*-* | mips*-*-* | s390*-*-* | loongarch*-*-*)
++        | aarch64*-*-* | tilegx*-*-* | mips*-*-* | s390*-*-*)
+ 	  configdirs="$configdirs gold"
+ 	  if test x${ENABLE_GOLD} = xdefault; then
+ 	    default_ld=gold
+-- 
+2.43.0
+
diff --git a/SME-0036-mode-switching-Pass-set-of-live-registers-to-the-nee.patch b/0139-Backport-SME-mode-switching-Pass-set-of-live-registe.patch
similarity index 94%
rename from SME-0036-mode-switching-Pass-set-of-live-registers-to-the-nee.patch
rename to 0139-Backport-SME-mode-switching-Pass-set-of-live-registe.patch
index 339bd5fb74e607898cfcd8cf6ffd25e390cdf623..037a4f781a524dfb95ab4afb31844ae7bbb6062d 100644
--- a/SME-0036-mode-switching-Pass-set-of-live-registers-to-the-nee.patch
+++ b/0139-Backport-SME-mode-switching-Pass-set-of-live-registe.patch
@@ -1,8 +1,8 @@
-From 37f7b48232c08ec5ebbfc35373b43a5282d102e2 Mon Sep 17 00:00:00 2001
+From a6964e11c7f624cdaed2c9608565a5968292b70f Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:28:58 +0000
-Subject: [PATCH 036/144] mode-switching: Pass set of live registers to the
- needed hook
+Subject: [PATCH 040/157] [Backport][SME] mode-switching: Pass set of live
+ registers to the needed hook
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=29d3e1892ebec8abce784077d1f1a3e21d763218
 
@@ -79,10 +79,10 @@ index 887550a33..cacb1ce5a 100644
  	    continue;
  	  if (target_insert_mode_switch_use)
 diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
-index 462dce10e..31c7a3110 100644
+index 60f3296b0..4d591d217 100644
 --- a/gcc/config/i386/i386.cc
 +++ b/gcc/config/i386/i386.cc
-@@ -14518,7 +14518,7 @@ ix86_i387_mode_needed (int entity, rtx_insn *insn)
+@@ -14522,7 +14522,7 @@ ix86_i387_mode_needed (int entity, rtx_insn *insn)
     prior to the execution of insn.  */
  
  static int
@@ -114,10 +114,10 @@ index 03e1c04ec..85e83e12e 100644
    return recog_memoized (insn) >= 0  ? get_attr_fp_mode (insn) : FP_MODE_NONE;
  }
 diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
-index 381fa2c95..50bbde30b 100644
+index 4788b3f7a..d8ac6c4d6 100644
 --- a/gcc/doc/tm.texi
 +++ b/gcc/doc/tm.texi
-@@ -10490,12 +10490,13 @@ known.  Sets of a lower numbered entity will be emitted before
+@@ -10280,12 +10280,13 @@ known.  Sets of a lower numbered entity will be emitted before
  sets of a higher numbered entity to a mode of the same or lower priority.
  @end deftypefn
  
@@ -191,10 +191,10 @@ index 9a6ba6cca..6bbda5058 100644
  
  		  if (mode != no_mode && mode != last_mode)
 diff --git a/gcc/target.def b/gcc/target.def
-index 0a9bcefba..e155bc963 100644
+index bbb482de6..06a52bdaf 100644
 --- a/gcc/target.def
 +++ b/gcc/target.def
-@@ -6946,8 +6946,9 @@ DEFHOOK
+@@ -7003,8 +7003,9 @@ DEFHOOK
  If @code{OPTIMIZE_MODE_SWITCHING} is defined, you must define this hook\n\
  to return the mode that @var{entity} must be switched into prior to the\n\
  execution of @var{insn}, or the number of modes if @var{insn} has no\n\
@@ -207,5 +207,5 @@ index 0a9bcefba..e155bc963 100644
  DEFHOOK
  (after,
 -- 
-2.19.1
+2.33.0
 
diff --git a/0139-LoongArch-NFC-Deduplicate-crc-instruction-defines.patch b/0139-LoongArch-NFC-Deduplicate-crc-instruction-defines.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a4d705b4400ef373c641e24af3af473d76c9b406
--- /dev/null
+++ b/0139-LoongArch-NFC-Deduplicate-crc-instruction-defines.patch
@@ -0,0 +1,56 @@
+From 7a4761a31454f999331e8aa5f831e26e249c4295 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 25 Feb 2024 20:40:41 +0800
+Subject: [PATCH 139/188] LoongArch: NFC: Deduplicate crc instruction defines
+
+Introduce an iterator for UNSPEC_CRC and UNSPEC_CRCC to make the next
+change easier.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (CRC): New define_int_iterator.
+	(crc): New define_int_attr.
+	(loongarch_crc_w__w, loongarch_crcc_w__w): Unify
+	into ...
+	(loongarch__w__w): ... here.
+---
+ gcc/config/loongarch/loongarch.md | 18 +++++-------------
+ 1 file changed, 5 insertions(+), 13 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 9356194fe..b5ad9eada 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -4251,24 +4251,16 @@
+ 
+ 
+ (define_mode_iterator QHSD [QI HI SI DI])
++(define_int_iterator CRC [UNSPEC_CRC UNSPEC_CRCC])
++(define_int_attr crc [(UNSPEC_CRC "crc") (UNSPEC_CRCC "crcc")])
+ 
+-(define_insn "loongarch_crc_w__w"
++(define_insn "loongarch__w__w"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(unspec:SI [(match_operand:QHSD 1 "register_operand" "r")
+ 		   (match_operand:SI 2 "register_operand" "r")]
+-		     UNSPEC_CRC))]
++		     CRC))]
+   ""
+-  "crc.w..w\t%0,%1,%2"
+-  [(set_attr "type" "unknown")
+-   (set_attr "mode" "")])
+-
+-(define_insn "loongarch_crcc_w__w"
+-  [(set (match_operand:SI 0 "register_operand" "=r")
+-	(unspec:SI [(match_operand:QHSD 1 "register_operand" "r")
+-		   (match_operand:SI 2 "register_operand" "r")]
+-		     UNSPEC_CRCC))]
+-  ""
+-  "crcc.w..w\t%0,%1,%2"
++  ".w..w\t%0,%1,%2"
+   [(set_attr "type" "unknown")
+    (set_attr "mode" "")])
+ 
+-- 
+2.43.0
+
diff --git a/SME-0037-mode-switching-Pass-the-set-of-live-registers-to-the.patch b/0140-Backport-SME-mode-switching-Pass-the-set-of-live-reg.patch
similarity index 92%
rename from SME-0037-mode-switching-Pass-the-set-of-live-registers-to-the.patch
rename to 0140-Backport-SME-mode-switching-Pass-the-set-of-live-reg.patch
index 92d17e794e6996c67349dff2a4fdc96b25b9b826..da76e1bfc0ce5a9876af0b9bc5372b779fe0a3ac 100644
--- a/SME-0037-mode-switching-Pass-the-set-of-live-registers-to-the.patch
+++ b/0140-Backport-SME-mode-switching-Pass-the-set-of-live-reg.patch
@@ -1,8 +1,8 @@
-From f9cfa274b606ce1fc4bd7a9a3271186e72833af1 Mon Sep 17 00:00:00 2001
+From 4457604c11c0a32f3736d73429d1e5fb7baae3a5 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:28:58 +0000
-Subject: [PATCH 037/144] mode-switching: Pass the set of live registers to the
- after hook
+Subject: [PATCH 041/157] [Backport][SME] mode-switching: Pass the set of live
+ registers to the after hook
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=93d65f39bc5c3dc318deb6da0e3633f3a4c6c34d
 
@@ -69,10 +69,10 @@ index be0fbc68c..62636b1ec 100644
    /* We have too few call-saved registers to hope to keep the masks across
       calls.  */
 diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
-index 31c7a3110..a1ba250ed 100644
+index 4d591d217..593185fa6 100644
 --- a/gcc/config/i386/i386.cc
 +++ b/gcc/config/i386/i386.cc
-@@ -14567,7 +14567,7 @@ ix86_avx_u128_mode_after (int mode, rtx_insn *insn)
+@@ -14583,7 +14583,7 @@ ix86_avx_u128_mode_after (int mode, rtx_insn *insn)
  /* Return the mode that an insn results in.  */
  
  static int
@@ -105,10 +105,10 @@ index 85e83e12e..74d61c43b 100644
    if (TARGET_HITACHI && recog_memoized (insn) >= 0 &&
        get_attr_fp_set (insn) != FP_SET_NONE)
 diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
-index 50bbde30b..71b5679f5 100644
+index d8ac6c4d6..7fce485b2 100644
 --- a/gcc/doc/tm.texi
 +++ b/gcc/doc/tm.texi
-@@ -10499,12 +10499,14 @@ such requirement.  @var{regs_live} contains the set of hard registers
+@@ -10289,12 +10289,14 @@ such requirement.  @var{regs_live} contains the set of hard registers
  that are live before @var{insn}.
  @end deftypefn
  
@@ -151,10 +151,10 @@ index 6bbda5058..4f0445894 100644
  	    }
  
 diff --git a/gcc/target.def b/gcc/target.def
-index e155bc963..b82d6a1b9 100644
+index 06a52bdaf..67c20bbb0 100644
 --- a/gcc/target.def
 +++ b/gcc/target.def
-@@ -6957,6 +6957,8 @@ If this hook is defined, it is evaluated for every @var{insn} during mode\n\
+@@ -7014,6 +7014,8 @@ If this hook is defined, it is evaluated for every @var{insn} during mode\n\
  switching.  It returns the mode that @var{entity} is in after @var{insn}\n\
  has been executed.  @var{mode} is the mode that @var{entity} was in\n\
  before @var{insn} was executed, taking account of @var{TARGET_MODE_NEEDED}.\n\
@@ -163,7 +163,7 @@ index e155bc963..b82d6a1b9 100644
  \n\
  @var{mode} is equal to the number of modes defined for @var{entity}\n\
  if the mode before @var{insn} is unknown.  The hook should likewise return\n\
-@@ -6964,7 +6966,7 @@ the number of modes if it does not know what mode @var{entity} has after\n\
+@@ -7021,7 +7023,7 @@ the number of modes if it does not know what mode @var{entity} has after\n\
  @var{insn}.\n\
  \n\
  Not defining the hook is equivalent to returning @var{mode}.",
@@ -173,5 +173,5 @@ index e155bc963..b82d6a1b9 100644
  DEFHOOK
  (entry,
 -- 
-2.19.1
+2.33.0
 
diff --git a/0140-LoongArch-Remove-unneeded-sign-extension-after-crc-c.patch b/0140-LoongArch-Remove-unneeded-sign-extension-after-crc-c.patch
new file mode 100644
index 0000000000000000000000000000000000000000..404d5eaa7edeebc73cf82e29dfa05bd62a12ce41
--- /dev/null
+++ b/0140-LoongArch-Remove-unneeded-sign-extension-after-crc-c.patch
@@ -0,0 +1,70 @@
+From 946f9153a5d813301b05fb56a75e2c7ce22a6c2a Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 25 Feb 2024 20:44:34 +0800
+Subject: [PATCH 140/188] LoongArch: Remove unneeded sign extension after
+ crc/crcc instructions
+
+The specification of crc/crcc instructions is clear that the output is
+sign-extended to GRLEN.  Add a define_insn to tell the compiler this
+fact and allow it to remove the unneeded sign extension on crc/crcc
+output.  As crc/crcc instructions are usually used in a tight loop,
+this should produce a significant performance gain.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md
+	(loongarch__w__w_extended): New define_insn.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/crc-sext.c: New test;
+---
+ gcc/config/loongarch/loongarch.md             | 11 +++++++++++
+ gcc/testsuite/gcc.target/loongarch/crc-sext.c | 13 +++++++++++++
+ 2 files changed, 24 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/crc-sext.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index b5ad9eada..248ad12bb 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -4264,6 +4264,17 @@
+   [(set_attr "type" "unknown")
+    (set_attr "mode" "")])
+ 
++(define_insn "loongarch__w__w_extended"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(sign_extend:DI
++	  (unspec:SI [(match_operand:QHSD 1 "register_operand" "r")
++		      (match_operand:SI 2 "register_operand" "r")]
++		     CRC)))]
++  "TARGET_64BIT"
++  ".w..w\t%0,%1,%2"
++  [(set_attr "type" "unknown")
++   (set_attr "mode" "")])
++
+ ;; With normal or medium code models, if the only use of a pc-relative
+ ;; address is for loading or storing a value, then relying on linker
+ ;; relaxation is not better than emitting the machine instruction directly.
+diff --git a/gcc/testsuite/gcc.target/loongarch/crc-sext.c b/gcc/testsuite/gcc.target/loongarch/crc-sext.c
+new file mode 100644
+index 000000000..9ade5a8e4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/crc-sext.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++/*
++**my_crc:
++**	crc.w.d.w	\$r4,\$r4,\$r5
++**	jr	\$r1
++*/
++int my_crc(long long dword, int crc)
++{
++	return __builtin_loongarch_crc_w_d_w(dword, crc);
++}
+-- 
+2.43.0
+
diff --git a/SME-0038-mode-switching-Use-1-based-edge-aux-fields.patch b/0141-Backport-SME-mode-switching-Use-1-based-edge-aux-fie.patch
similarity index 91%
rename from SME-0038-mode-switching-Use-1-based-edge-aux-fields.patch
rename to 0141-Backport-SME-mode-switching-Use-1-based-edge-aux-fie.patch
index 950d5c26d21ab4aa4f152aeff5ce6de5bb4508bd..0cb257e14aed9013166c170b641b07eed11345cc 100644
--- a/SME-0038-mode-switching-Use-1-based-edge-aux-fields.patch
+++ b/0141-Backport-SME-mode-switching-Use-1-based-edge-aux-fie.patch
@@ -1,7 +1,8 @@
-From a107a8a903c19ec35e1805ae4b8f43f96ebf48d0 Mon Sep 17 00:00:00 2001
+From b0d3536b2a28d3a7084e3bbb9532e719aaf2016b Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:28:59 +0000
-Subject: [PATCH 038/144] mode-switching: Use 1-based edge aux fields
+Subject: [PATCH 042/157] [Backport][SME] mode-switching: Use 1-based edge aux
+ fields
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=718228a6f479fe252e0e6f71933c2465b7b480a9
 
@@ -51,5 +52,5 @@ index 4f0445894..89a8494c6 100644
  		}
  	    }
 -- 
-2.19.1
+2.33.0
 
diff --git a/0141-LoongArch-Allow-s9-as-a-register-alias.patch b/0141-LoongArch-Allow-s9-as-a-register-alias.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3573204e57624f33831d2afd235343e2e9bd8501
--- /dev/null
+++ b/0141-LoongArch-Allow-s9-as-a-register-alias.patch
@@ -0,0 +1,45 @@
+From a74a85ed5f5b00018553d614b4dc57eb1dd5f5ee Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Thu, 25 Jan 2024 23:49:13 +0800
+Subject: [PATCH 141/188] LoongArch: Allow s9 as a register alias
+
+The psABI allows using s9 as an alias of r22.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.h (ADDITIONAL_REGISTER_NAMES): Add
+	s9 as an alias of r22.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/regname-fp-s9.c: New test.
+---
+ gcc/config/loongarch/loongarch.h                   | 1 +
+ gcc/testsuite/gcc.target/loongarch/regname-fp-s9.c | 3 +++
+ 2 files changed, 4 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/regname-fp-s9.c
+
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 15261fdc0..8bcdb8729 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -931,6 +931,7 @@ typedef struct {
+   { "t8",	20 + GP_REG_FIRST },					\
+   { "x",	21 + GP_REG_FIRST },					\
+   { "fp",	22 + GP_REG_FIRST },					\
++  { "s9",	22 + GP_REG_FIRST },					\
+   { "s0",	23 + GP_REG_FIRST },					\
+   { "s1",	24 + GP_REG_FIRST },					\
+   { "s2",	25 + GP_REG_FIRST },					\
+diff --git a/gcc/testsuite/gcc.target/loongarch/regname-fp-s9.c b/gcc/testsuite/gcc.target/loongarch/regname-fp-s9.c
+new file mode 100644
+index 000000000..d2e3b80f8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/regname-fp-s9.c
+@@ -0,0 +1,3 @@
++/* { dg-do compile } */
++register long s9 asm("s9"); /* { dg-note "conflicts with 's9'" } */
++register long fp asm("fp"); /* { dg-warning "register of 'fp' used for multiple global register variables" } */
+-- 
+2.43.0
+
diff --git a/SME-0039-mode-switching-Add-a-target-configurable-confluence-.patch b/0142-Backport-SME-mode-switching-Add-a-target-configurabl.patch
similarity index 96%
rename from SME-0039-mode-switching-Add-a-target-configurable-confluence-.patch
rename to 0142-Backport-SME-mode-switching-Add-a-target-configurabl.patch
index 324f5edda7e0b57b607737b484ddb3ed616d8449..9123e409360d0705800fe3757562f70622271402 100644
--- a/SME-0039-mode-switching-Add-a-target-configurable-confluence-.patch
+++ b/0142-Backport-SME-mode-switching-Add-a-target-configurabl.patch
@@ -1,8 +1,8 @@
-From ef1679caa811a4d8f22497022eeff9dbf5ed070a Mon Sep 17 00:00:00 2001
+From 88d76baa38bb29d5cc732b3c0188b74ef9783713 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:28:59 +0000
-Subject: [PATCH 039/144] mode-switching: Add a target-configurable confluence
- operator
+Subject: [PATCH 043/157] [Backport][SME] mode-switching: Add a
+ target-configurable confluence operator
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=493b0038d7d04986c7de977074d095e4eb7d9a27
 
@@ -49,10 +49,10 @@ gcc/
  4 files changed, 186 insertions(+), 28 deletions(-)
 
 diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
-index 71b5679f5..649346c49 100644
+index 7fce485b2..d7053ec9e 100644
 --- a/gcc/doc/tm.texi
 +++ b/gcc/doc/tm.texi
-@@ -10516,6 +10516,22 @@ the number of modes if it does not know what mode @var{entity} has after
+@@ -10306,6 +10306,22 @@ the number of modes if it does not know what mode @var{entity} has after
  Not defining the hook is equivalent to returning @var{mode}.
  @end deftypefn
  
@@ -76,10 +76,10 @@ index 71b5679f5..649346c49 100644
  If this hook is defined, it is evaluated for every @var{entity} that
  needs mode switching.  It should return the mode that @var{entity} is
 diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
-index 282cb9926..f5a512385 100644
+index ad343504f..d420e62fd 100644
 --- a/gcc/doc/tm.texi.in
 +++ b/gcc/doc/tm.texi.in
-@@ -7132,6 +7132,8 @@ mode or ``no mode'', depending on context.
+@@ -6922,6 +6922,8 @@ mode or ``no mode'', depending on context.
  
  @hook TARGET_MODE_AFTER
  
@@ -305,10 +305,10 @@ index 89a8494c6..065767902 100644
  	  for (i = 0; i < no_mode; i++)
  	    if (mode_bit_p (del[bb->index], j, i))
 diff --git a/gcc/target.def b/gcc/target.def
-index b82d6a1b9..fd308d873 100644
+index 67c20bbb0..1e2091ed3 100644
 --- a/gcc/target.def
 +++ b/gcc/target.def
-@@ -6968,6 +6968,23 @@ the number of modes if it does not know what mode @var{entity} has after\n\
+@@ -7025,6 +7025,23 @@ the number of modes if it does not know what mode @var{entity} has after\n\
  Not defining the hook is equivalent to returning @var{mode}.",
   int, (int entity, int mode, rtx_insn *insn, HARD_REG_SET regs_live), NULL)
  
@@ -333,5 +333,5 @@ index b82d6a1b9..fd308d873 100644
  (entry,
   "If this hook is defined, it is evaluated for every @var{entity} that\n\
 -- 
-2.19.1
+2.33.0
 
diff --git a/0142-LoongArch-testsuite-Rewrite-x-vfcmp-d-f-.c-to-avoid-.patch b/0142-LoongArch-testsuite-Rewrite-x-vfcmp-d-f-.c-to-avoid-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9f620fbfca9d04434fee94361baeabc4c0d0d6e9
--- /dev/null
+++ b/0142-LoongArch-testsuite-Rewrite-x-vfcmp-d-f-.c-to-avoid-.patch
@@ -0,0 +1,1117 @@
+From d568321f8894ed270bf0011892b86baa6d6b82bd Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 5 Mar 2024 20:46:57 +0800
+Subject: [PATCH 142/188] LoongArch: testsuite: Rewrite {x,}vfcmp-{d,f}.c to
+ avoid named registers
+
+Loops on named vector register are not vectorized (see comment 11 of
+PR113622), so the these test cases have been failing for a while.
+Rewrite them using check-function-bodies to remove hard coding register
+names.  A barrier is needed to always load the first operand before the
+second operand.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vfcmp-f.c: Rewrite to avoid named
+	registers.
+	* gcc.target/loongarch/vfcmp-d.c: Likewise.
+	* gcc.target/loongarch/xvfcmp-f.c: Likewise.
+	* gcc.target/loongarch/xvfcmp-d.c: Likewise.
+---
+ gcc/testsuite/gcc.target/loongarch/vfcmp-d.c  | 202 ++++++++--
+ gcc/testsuite/gcc.target/loongarch/vfcmp-f.c  | 347 ++++++++++++++----
+ gcc/testsuite/gcc.target/loongarch/xvfcmp-d.c | 202 ++++++++--
+ gcc/testsuite/gcc.target/loongarch/xvfcmp-f.c | 204 ++++++++--
+ 4 files changed, 816 insertions(+), 139 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vfcmp-d.c b/gcc/testsuite/gcc.target/loongarch/vfcmp-d.c
+index 8b870ef38..87e4ed19e 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vfcmp-d.c
++++ b/gcc/testsuite/gcc.target/loongarch/vfcmp-d.c
+@@ -1,28 +1,188 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mlsx -ffixed-f0 -ffixed-f1 -ffixed-f2 -fno-vect-cost-model" } */
++/* { dg-options "-O2 -mlsx -fno-vect-cost-model" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+ 
+ #define F double
+ #define I long long
+ 
+ #include "vfcmp-f.c"
+ 
+-/* { dg-final { scan-assembler "compare_quiet_equal:.*\tvfcmp\\.ceq\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_equal:.*\tvfcmp\\.cune\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_not_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater:.*\tvfcmp\\.slt\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater_equal:.*\tvfcmp\\.sle\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less:.*\tvfcmp\\.slt\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less_equal:.*\tvfcmp\\.sle\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_not_greater:.*\tvfcmp\\.sule\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_not_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less_unordered:.*\tvfcmp\\.sult\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_not_less:.*\tvfcmp\\.sule\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_not_less\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater_unordered:.*\tvfcmp\\.sult\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less:.*\tvfcmp\\.clt\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less_equal:.*\tvfcmp\\.cle\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater:.*\tvfcmp\\.clt\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater_equal:.*\tvfcmp\\.cle\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_less:.*\tvfcmp\\.cule\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_not_less\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater_unordered:.*\tvfcmp\\.cult\\.d\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_greater:.*\tvfcmp\\.cule\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_not_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less_unordered:.*\tvfcmp\\.cult\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_unordered:.*\tvfcmp\\.cun\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_ordered:.*\tvfcmp\\.cor\\.d\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_ordered\n" } } */
++/*
++** compare_quiet_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.ceq.d	(\$vr[0-9]+),(\1,\2|\2,\1)
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_not_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cune.d	(\$vr[0-9]+),(\1,\2|\2,\1)
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.slt.d	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sle.d	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.slt.d	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sle.d	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_not_greater:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sule.d	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less_unordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sult.d	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_not_less:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sule.d	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater_unordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sult.d	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.clt.d	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cle.d	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.clt.d	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cle.d	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_not_less:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cule.d	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater_unordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cult.d	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_not_greater:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cule.d	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less_unordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cult.d	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_unordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cun.d	(\$vr[0-9]+),(\1,\2|\2,\1)
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_ordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cor.d	(\$vr[0-9]+),(\1,\2|\2,\1)
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
+diff --git a/gcc/testsuite/gcc.target/loongarch/vfcmp-f.c b/gcc/testsuite/gcc.target/loongarch/vfcmp-f.c
+index b9110b90c..8d2671998 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vfcmp-f.c
++++ b/gcc/testsuite/gcc.target/loongarch/vfcmp-f.c
+@@ -2,7 +2,8 @@
+    For details read C23 Annex F.3 and LoongArch Vol. 1 section 3.2.2.1.  */
+ 
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mlsx -ffixed-f0 -ffixed-f1 -ffixed-f2 -fno-vect-cost-model" } */
++/* { dg-options "-O2 -mlsx -fno-vect-cost-model" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+ 
+ #ifndef F
+ #define F float
+@@ -19,160 +20,354 @@
+ typedef F VF __attribute__ ((vector_size (VL)));
+ typedef I VI __attribute__ ((vector_size (VL)));
+ 
+-register VF a asm ("f0");
+-register VF b asm ("f1");
+-register VI c asm ("f2");
++#define ARGS const VF *a, const VF *b, VI *c
+ 
+ void
+-compare_quiet_equal (void)
++compare_quiet_equal (ARGS)
+ {
+-  c = (a == b);
++  VF _a = *a;
++  asm("" ::: "memory");
++  *c = (_a == *b);
+ }
+ 
+ void
+-compare_quiet_not_equal (void)
++compare_quiet_not_equal (ARGS)
+ {
+-  c = (a != b);
++  VF _a = *a;
++  asm("" ::: "memory");
++  *c = (_a != *b);
+ }
+ 
+ void
+-compare_signaling_greater (void)
++compare_signaling_greater (ARGS)
+ {
+-  c = (a > b);
++  VF _a = *a;
++  asm("" ::: "memory");
++  *c = (_a > *b);
+ }
+ 
+ void
+-compare_signaling_greater_equal (void)
++compare_signaling_greater_equal (ARGS)
+ {
+-  c = (a >= b);
++  VF _a = *a;
++  asm("" ::: "memory");
++  *c = (_a >= *b);
+ }
+ 
+ void
+-compare_signaling_less (void)
++compare_signaling_less (ARGS)
+ {
+-  c = (a < b);
++  VF _a = *a;
++  asm("" ::: "memory");
++  *c = (_a < *b);
+ }
+ 
+ void
+-compare_signaling_less_equal (void)
++compare_signaling_less_equal (ARGS)
+ {
+-  c = (a <= b);
++  VF _a = *a;
++  asm("" ::: "memory");
++  *c = (_a <= *b);
+ }
+ 
+ void
+-compare_signaling_not_greater (void)
++compare_signaling_not_greater (ARGS)
+ {
+-  c = ~(a > b);
++  VF _a = *a;
++  asm("" ::: "memory");
++  *c = ~(_a > *b);
+ }
+ 
+ void
+-compare_signaling_less_unordered (void)
++compare_signaling_less_unordered (ARGS)
+ {
+-  c = ~(a >= b);
++  VF _a = *a;
++  asm("" ::: "memory");
++  *c = ~(_a >= *b);
+ }
+ 
+ void
+-compare_signaling_not_less (void)
++compare_signaling_not_less (ARGS)
+ {
+-  c = ~(a < b);
++  VF _a = *a;
++  asm("" ::: "memory");
++  *c = ~(_a < *b);
+ }
+ 
+ void
+-compare_signaling_greater_unordered (void)
++compare_signaling_greater_unordered (ARGS)
+ {
+-  c = ~(a <= b);
++  VF _a = *a;
++  asm("" ::: "memory");
++  *c = ~(_a <= *b);
+ }
+ 
+ void
+-compare_quiet_less (void)
++compare_quiet_less (ARGS)
+ {
+-  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
+-    c[i] = __builtin_isless (a[i], b[i]) ? -1 : 0;
++  VF _a = *a;
++  asm("" ::: "memory");
++  for (int i = 0; i < sizeof (*c) / sizeof ((*c)[0]); i++)
++    (*c)[i] = __builtin_isless (_a[i], (*b)[i]) ? -1 : 0;
+ }
+ 
+ void
+-compare_quiet_less_equal (void)
++compare_quiet_less_equal (ARGS)
+ {
+-  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
+-    c[i] = __builtin_islessequal (a[i], b[i]) ? -1 : 0;
++  VF _a = *a;
++  asm("" ::: "memory");
++  for (int i = 0; i < sizeof (*c) / sizeof ((*c)[0]); i++)
++    (*c)[i] = __builtin_islessequal (_a[i], (*b)[i]) ? -1 : 0;
+ }
+ 
+ void
+-compare_quiet_greater (void)
++compare_quiet_greater (ARGS)
+ {
+-  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
+-    c[i] = __builtin_isgreater (a[i], b[i]) ? -1 : 0;
++  VF _a = *a;
++  asm("" ::: "memory");
++  for (int i = 0; i < sizeof (*c) / sizeof ((*c)[0]); i++)
++    (*c)[i] = __builtin_isgreater (_a[i], (*b)[i]) ? -1 : 0;
+ }
+ 
+ void
+-compare_quiet_greater_equal (void)
++compare_quiet_greater_equal (ARGS)
+ {
+-  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
+-    c[i] = __builtin_isgreaterequal (a[i], b[i]) ? -1 : 0;
++  VF _a = *a;
++  asm("" ::: "memory");
++  for (int i = 0; i < sizeof (*c) / sizeof ((*c)[0]); i++)
++    (*c)[i] = __builtin_isgreaterequal (_a[i], (*b)[i]) ? -1 : 0;
+ }
+ 
+ void
+-compare_quiet_not_less (void)
++compare_quiet_not_less (ARGS)
+ {
+-  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
+-    c[i] = __builtin_isless (a[i], b[i]) ? 0 : -1;
++  VF _a = *a;
++  asm("" ::: "memory");
++  for (int i = 0; i < sizeof (*c) / sizeof ((*c)[0]); i++)
++    (*c)[i] = __builtin_isless (_a[i], (*b)[i]) ? 0 : -1;
+ }
+ 
+ void
+-compare_quiet_greater_unordered (void)
++compare_quiet_greater_unordered (ARGS)
+ {
+-  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
+-    c[i] = __builtin_islessequal (a[i], b[i]) ? 0 : -1;
++  VF _a = *a;
++  asm("" ::: "memory");
++  for (int i = 0; i < sizeof (*c) / sizeof ((*c)[0]); i++)
++    (*c)[i] = __builtin_islessequal (_a[i], (*b)[i]) ? 0 : -1;
+ }
+ 
+ void
+-compare_quiet_not_greater (void)
++compare_quiet_not_greater (ARGS)
+ {
+-  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
+-    c[i] = __builtin_isgreater (a[i], b[i]) ? 0 : -1;
++  VF _a = *a;
++  asm("" ::: "memory");
++  for (int i = 0; i < sizeof (*c) / sizeof ((*c)[0]); i++)
++    (*c)[i] = __builtin_isgreater (_a[i], (*b)[i]) ? 0 : -1;
+ }
+ 
+ void
+-compare_quiet_less_unordered (void)
++compare_quiet_less_unordered (ARGS)
+ {
+-  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
+-    c[i] = __builtin_isgreaterequal (a[i], b[i]) ? 0 : -1;
++  VF _a = *a;
++  asm("" ::: "memory");
++  for (int i = 0; i < sizeof (*c) / sizeof ((*c)[0]); i++)
++    (*c)[i] = __builtin_isgreaterequal (_a[i], (*b)[i]) ? 0 : -1;
+ }
+ 
+ void
+-compare_quiet_unordered (void)
++compare_quiet_unordered (ARGS)
+ {
+-  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
+-    c[i] = __builtin_isunordered (a[i], b[i]) ? -1 : 0;
++  VF _a = *a;
++  asm("" ::: "memory");
++  for (int i = 0; i < sizeof (*c) / sizeof ((*c)[0]); i++)
++    (*c)[i] = __builtin_isunordered (_a[i], (*b)[i]) ? -1 : 0;
+ }
+ 
+ void
+-compare_quiet_ordered (void)
++compare_quiet_ordered (ARGS)
+ {
+-  for (int i = 0; i < sizeof (c) / sizeof (c[0]); i++)
+-    c[i] = __builtin_isunordered (a[i], b[i]) ? 0 : -1;
++  VF _a = *a;
++  asm("" ::: "memory");
++  for (int i = 0; i < sizeof (*c) / sizeof ((*c)[0]); i++)
++    (*c)[i] = __builtin_isunordered (_a[i], (*b)[i]) ? 0 : -1;
+ }
+ 
+-/* The "-" matches the .size directive after the function
+-   body, so we can ensure the instruction is in the correct function.  */
++/*
++** compare_quiet_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.ceq.s	(\$vr[0-9]+),(\1,\2|\2,\1)
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
+ 
+-/* { dg-final { scan-assembler "compare_quiet_equal:.*\tvfcmp\\.ceq\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_equal:.*\tvfcmp\\.cune\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_not_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater:.*\tvfcmp\\.slt\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater_equal:.*\tvfcmp\\.sle\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less:.*\tvfcmp\\.slt\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less_equal:.*\tvfcmp\\.sle\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_not_greater:.*\tvfcmp\\.sule\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_not_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less_unordered:.*\tvfcmp\\.sult\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_signaling_less_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_not_less:.*\tvfcmp\\.sule\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_not_less\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater_unordered:.*\tvfcmp\\.sult\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_signaling_greater_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less:.*\tvfcmp\\.clt\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less_equal:.*\tvfcmp\\.cle\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater:.*\tvfcmp\\.clt\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater_equal:.*\tvfcmp\\.cle\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_less:.*\tvfcmp\\.cule\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_not_less\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater_unordered:.*\tvfcmp\\.cult\\.s\t\\\$vr2,\\\$vr1,\\\$vr0.*-compare_quiet_greater_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_greater:.*\tvfcmp\\.cule\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_not_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less_unordered:.*\tvfcmp\\.cult\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_less_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_unordered:.*\tvfcmp\\.cun\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_ordered:.*\tvfcmp\\.cor\\.s\t\\\$vr2,\\\$vr0,\\\$vr1.*-compare_quiet_ordered\n" } } */
++/*
++** compare_quiet_not_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cune.s	(\$vr[0-9]+),(\1,\2|\2,\1)
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.slt.s	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sle.s	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.slt.s	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sle.s	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_not_greater:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sule.s	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less_unordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sult.s	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_not_less:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sule.s	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater_unordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.sult.s	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.clt.s	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cle.s	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.clt.s	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater_equal:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cle.s	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_not_less:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cule.s	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater_unordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cult.s	(\$vr[0-9]+),\2,\1
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_not_greater:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cule.s	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less_unordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cult.s	(\$vr[0-9]+),\1,\2
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_unordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cun.s	(\$vr[0-9]+),(\1,\2|\2,\1)
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_ordered:
++** 	vld	(\$vr[0-9]+),\$r4,0
++** 	vld	(\$vr[0-9]+),\$r5,0
++** 	vfcmp.cor.s	(\$vr[0-9]+),(\1,\2|\2,\1)
++**	vst	\3,\$r6,0
++**	jr	\$r1
++*/
+diff --git a/gcc/testsuite/gcc.target/loongarch/xvfcmp-d.c b/gcc/testsuite/gcc.target/loongarch/xvfcmp-d.c
+index d8017caaa..b27efebad 100644
+--- a/gcc/testsuite/gcc.target/loongarch/xvfcmp-d.c
++++ b/gcc/testsuite/gcc.target/loongarch/xvfcmp-d.c
+@@ -1,5 +1,6 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mlasx -ffixed-f0 -ffixed-f1 -ffixed-f2 -fno-vect-cost-model" } */
++/* { dg-options "-O2 -mlasx -fno-vect-cost-model" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+ 
+ #define F double
+ #define I long long
+@@ -7,23 +8,182 @@
+ 
+ #include "vfcmp-f.c"
+ 
+-/* { dg-final { scan-assembler "compare_quiet_equal:.*\txvfcmp\\.ceq\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_equal:.*\txvfcmp\\.cune\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_not_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater:.*\txvfcmp\\.slt\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater_equal:.*\txvfcmp\\.sle\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less:.*\txvfcmp\\.slt\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less_equal:.*\txvfcmp\\.sle\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_not_greater:.*\txvfcmp\\.sule\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_not_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less_unordered:.*\txvfcmp\\.sult\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_not_less:.*\txvfcmp\\.sule\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_not_less\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater_unordered:.*\txvfcmp\\.sult\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less:.*\txvfcmp\\.clt\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less_equal:.*\txvfcmp\\.cle\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater:.*\txvfcmp\\.clt\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater_equal:.*\txvfcmp\\.cle\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_less:.*\txvfcmp\\.cule\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_not_less\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater_unordered:.*\txvfcmp\\.cult\\.d\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_greater:.*\txvfcmp\\.cule\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_not_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less_unordered:.*\txvfcmp\\.cult\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_unordered:.*\txvfcmp\\.cun\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_ordered:.*\txvfcmp\\.cor\\.d\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_ordered\n" } } */
++/*
++** compare_quiet_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.ceq.d	(\$xr[0-9]+),(\1,\2|\2,\1)
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_not_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cune.d	(\$xr[0-9]+),(\1,\2|\2,\1)
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.slt.d	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sle.d	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.slt.d	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sle.d	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_not_greater:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sule.d	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less_unordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sult.d	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_not_less:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sule.d	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater_unordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sult.d	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.clt.d	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cle.d	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.clt.d	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cle.d	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_not_less:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cule.d	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater_unordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cult.d	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_not_greater:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cule.d	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less_unordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cult.d	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_unordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cun.d	(\$xr[0-9]+),(\1,\2|\2,\1)
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_ordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cor.d	(\$xr[0-9]+),(\1,\2|\2,\1)
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
+diff --git a/gcc/testsuite/gcc.target/loongarch/xvfcmp-f.c b/gcc/testsuite/gcc.target/loongarch/xvfcmp-f.c
+index b54556475..1ca1e6c8b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/xvfcmp-f.c
++++ b/gcc/testsuite/gcc.target/loongarch/xvfcmp-f.c
+@@ -1,27 +1,189 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mlasx -ffixed-f0 -ffixed-f1 -ffixed-f2" } */
++/* { dg-options "-O2 -mlasx -fno-vect-cost-model" } */
++/* { dg-final { check-function-bodies "**" "" } } */
+ 
++#define F float
++#define I int
+ #define VL 32
+ 
+ #include "vfcmp-f.c"
+ 
+-/* { dg-final { scan-assembler "compare_quiet_equal:.*\txvfcmp\\.ceq\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_equal:.*\txvfcmp\\.cune\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_not_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater:.*\txvfcmp\\.slt\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater_equal:.*\txvfcmp\\.sle\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less:.*\txvfcmp\\.slt\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less_equal:.*\txvfcmp\\.sle\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_not_greater:.*\txvfcmp\\.sule\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_not_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_less_unordered:.*\txvfcmp\\.sult\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_signaling_less_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_not_less:.*\txvfcmp\\.sule\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_not_less\n" } } */
+-/* { dg-final { scan-assembler "compare_signaling_greater_unordered:.*\txvfcmp\\.sult\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_signaling_greater_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less:.*\txvfcmp\\.clt\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less_equal:.*\txvfcmp\\.cle\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater:.*\txvfcmp\\.clt\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater_equal:.*\txvfcmp\\.cle\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater_equal\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_less:.*\txvfcmp\\.cule\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_not_less\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_greater_unordered:.*\txvfcmp\\.cult\\.s\t\\\$xr2,\\\$xr1,\\\$xr0.*-compare_quiet_greater_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_not_greater:.*\txvfcmp\\.cule\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_not_greater\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_less_unordered:.*\txvfcmp\\.cult\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_less_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_unordered:.*\txvfcmp\\.cun\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_unordered\n" } } */
+-/* { dg-final { scan-assembler "compare_quiet_ordered:.*\txvfcmp\\.cor\\.s\t\\\$xr2,\\\$xr0,\\\$xr1.*-compare_quiet_ordered\n" } } */
++/*
++** compare_quiet_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.ceq.s	(\$xr[0-9]+),(\1,\2|\2,\1)
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_not_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cune.s	(\$xr[0-9]+),(\1,\2|\2,\1)
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.slt.s	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sle.s	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.slt.s	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sle.s	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_not_greater:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sule.s	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_less_unordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sult.s	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_not_less:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sule.s	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_signaling_greater_unordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.sult.s	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.clt.s	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cle.s	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.clt.s	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater_equal:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cle.s	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_not_less:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cule.s	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_greater_unordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cult.s	(\$xr[0-9]+),\2,\1
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_not_greater:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cule.s	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_less_unordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cult.s	(\$xr[0-9]+),\1,\2
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_unordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cun.s	(\$xr[0-9]+),(\1,\2|\2,\1)
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
++
++/*
++** compare_quiet_ordered:
++** 	xvld	(\$xr[0-9]+),\$r4,0
++** 	xvld	(\$xr[0-9]+),\$r5,0
++** 	xvfcmp.cor.s	(\$xr[0-9]+),(\1,\2|\2,\1)
++**	xvst	\3,\$r6,0
++**	jr	\$r1
++*/
+-- 
+2.43.0
+
diff --git a/SME-0040-mode-switching-Add-a-backprop-hook.patch b/0143-Backport-SME-mode-switching-Add-a-backprop-hook.patch
similarity index 97%
rename from SME-0040-mode-switching-Add-a-backprop-hook.patch
rename to 0143-Backport-SME-mode-switching-Add-a-backprop-hook.patch
index 3655b75cafc5a168d4dffc935ef75858ab873c85..8fb51ae89365c3b133d5b07ced46dc3b024e335e 100644
--- a/SME-0040-mode-switching-Add-a-backprop-hook.patch
+++ b/0143-Backport-SME-mode-switching-Add-a-backprop-hook.patch
@@ -1,7 +1,7 @@
-From cc0a429258841c45e36c64ba8fa92ee40a895404 Mon Sep 17 00:00:00 2001
+From cb4189b45a3a411958ab6aa85108f6dc7516acf5 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 11 Nov 2023 17:29:00 +0000
-Subject: [PATCH 040/144] mode-switching: Add a backprop hook
+Subject: [PATCH 044/157] [Backport][SME] mode-switching: Add a backprop hook
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=fc8458e20a524d053f576d64a606e21f8bd03b84
 
@@ -59,10 +59,10 @@ gcc/
  4 files changed, 334 insertions(+)
 
 diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
-index 649346c49..00e3171ca 100644
+index d7053ec9e..5f0972356 100644
 --- a/gcc/doc/tm.texi
 +++ b/gcc/doc/tm.texi
-@@ -10532,6 +10532,34 @@ The hook should return the number of modes if no suitable mode exists
+@@ -10322,6 +10322,34 @@ The hook should return the number of modes if no suitable mode exists
  for the given arguments.
  @end deftypefn
  
@@ -98,10 +98,10 @@ index 649346c49..00e3171ca 100644
  If this hook is defined, it is evaluated for every @var{entity} that
  needs mode switching.  It should return the mode that @var{entity} is
 diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
-index f5a512385..21f0806a5 100644
+index d420e62fd..fcab21744 100644
 --- a/gcc/doc/tm.texi.in
 +++ b/gcc/doc/tm.texi.in
-@@ -7134,6 +7134,8 @@ mode or ``no mode'', depending on context.
+@@ -6924,6 +6924,8 @@ mode or ``no mode'', depending on context.
  
  @hook TARGET_MODE_CONFLUENCE
  
@@ -439,10 +439,10 @@ index 065767902..c2a0f0294 100644
  
        /* Now output the remaining mode sets in all the segments.  */
 diff --git a/gcc/target.def b/gcc/target.def
-index fd308d873..bb7c34f49 100644
+index 1e2091ed3..4d77c1523 100644
 --- a/gcc/target.def
 +++ b/gcc/target.def
-@@ -6985,6 +6985,35 @@ The hook should return the number of modes if no suitable mode exists\n\
+@@ -7042,6 +7042,35 @@ The hook should return the number of modes if no suitable mode exists\n\
  for the given arguments.",
   int, (int entity, int mode1, int mode2), NULL)
  
@@ -479,5 +479,5 @@ index fd308d873..bb7c34f49 100644
  (entry,
   "If this hook is defined, it is evaluated for every @var{entity} that\n\
 -- 
-2.19.1
+2.33.0
 
diff --git a/0143-LoongArch-Use-lib-instead-of-lib64-as-the-library-se.patch b/0143-LoongArch-Use-lib-instead-of-lib64-as-the-library-se.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7b1e7f015efb1921460fddffcc420799475dd24d
--- /dev/null
+++ b/0143-LoongArch-Use-lib-instead-of-lib64-as-the-library-se.patch
@@ -0,0 +1,80 @@
+From 415d38d84b2e363a2d512b54baac5532553f1402 Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Wed, 6 Mar 2024 09:19:59 +0800
+Subject: [PATCH 143/188] LoongArch: Use /lib instead of /lib64 as the library
+ search path for MUSL.
+
+gcc/ChangeLog:
+
+	* config.gcc: Add a case for loongarch*-*-linux-musl*.
+	* config/loongarch/linux.h: Disable the multilib-compatible
+	treatment for *musl* targets.
+	* config/loongarch/musl.h: New file.
+---
+ gcc/config.gcc               |  3 +++
+ gcc/config/loongarch/linux.h |  4 +++-
+ gcc/config/loongarch/musl.h  | 23 +++++++++++++++++++++++
+ 3 files changed, 29 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/config/loongarch/musl.h
+
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 039187fa2..499b36b45 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -2509,6 +2509,9 @@ riscv*-*-freebsd*)
+ 
+ loongarch*-*-linux*)
+ 	tm_file="elfos.h gnu-user.h linux.h linux-android.h glibc-stdint.h ${tm_file}"
++	case ${target} in
++	  *-linux-musl*) tm_file="${tm_file} loongarch/musl.h"
++	esac
+ 	tm_file="${tm_file} loongarch/gnu-user.h loongarch/linux.h loongarch/loongarch-driver.h"
+ 	extra_options="${extra_options} linux-android.opt"
+ 	tmake_file="${tmake_file} loongarch/t-multilib loongarch/t-linux"
+diff --git a/gcc/config/loongarch/linux.h b/gcc/config/loongarch/linux.h
+index 00039ac18..38aa4da2c 100644
+--- a/gcc/config/loongarch/linux.h
++++ b/gcc/config/loongarch/linux.h
+@@ -21,7 +21,9 @@ along with GCC; see the file COPYING3.  If not see
+  * This ensures that a compiler configured with --disable-multilib
+  * can work in a multilib environment.  */
+ 
+-#if defined(LA_DISABLE_MULTILIB) && defined(LA_DISABLE_MULTIARCH)
++#if !defined(LA_DEFAULT_TARGET_MUSL) \
++  && defined(LA_DISABLE_MULTILIB) \
++  && defined(LA_DISABLE_MULTIARCH)
+ 
+   #if DEFAULT_ABI_BASE == ABI_BASE_LP64D
+     #define ABI_LIBDIR "lib64"
+diff --git a/gcc/config/loongarch/musl.h b/gcc/config/loongarch/musl.h
+new file mode 100644
+index 000000000..fa43bc866
+--- /dev/null
++++ b/gcc/config/loongarch/musl.h
+@@ -0,0 +1,23 @@
++/* Definitions for MUSL C library support.
++   Copyright (C) 2024 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++
++#ifndef LA_DEFAULT_TARGET_MUSL
++#define LA_DEFAULT_TARGET_MUSL
++#endif
+-- 
+2.43.0
+
diff --git a/SME-0041-aarch64-Add-a-result_mode-helper-function.patch b/0144-Backport-SME-aarch64-Add-a-result_mode-helper-functi.patch
similarity index 94%
rename from SME-0041-aarch64-Add-a-result_mode-helper-function.patch
rename to 0144-Backport-SME-aarch64-Add-a-result_mode-helper-functi.patch
index 7257cb1eb72645d7233b705c07f71cd0fdb58873..ad6da472006f84e5bd0e2513cfdbfb914d5bdc30 100644
--- a/SME-0041-aarch64-Add-a-result_mode-helper-function.patch
+++ b/0144-Backport-SME-aarch64-Add-a-result_mode-helper-functi.patch
@@ -1,7 +1,8 @@
-From a1fac4c8d59aae0f172d9fc4676c29b09f4863db Mon Sep 17 00:00:00 2001
+From 4553f252c10968037edceba4abe3984dc9bbad2a Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Fri, 1 Dec 2023 08:36:15 +0000
-Subject: [PATCH 041/144] aarch64: Add a result_mode helper function
+Subject: [PATCH 045/157] [Backport][SME] aarch64: Add a result_mode helper
+ function
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=a1bc121c00e30bd1bdaa62d87cbe64eb88e74f45
 
@@ -76,5 +77,5 @@ index 0d130b871..52994cde0 100644
  
  #endif
 -- 
-2.19.1
+2.33.0
 
diff --git a/0144-LoongArch-testsuite-Fix-problems-with-incorrect-resu.patch b/0144-LoongArch-testsuite-Fix-problems-with-incorrect-resu.patch
new file mode 100644
index 0000000000000000000000000000000000000000..393d41c8a5066feaf908a515253a0fc08960e63d
--- /dev/null
+++ b/0144-LoongArch-testsuite-Fix-problems-with-incorrect-resu.patch
@@ -0,0 +1,551 @@
+From 2170e0e811cb1b592f7577571f10b5ab95da9eaa Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Fri, 25 Oct 2024 06:05:59 +0000
+Subject: [PATCH 144/188] LoongArch: testsuite:Fix problems with incorrect
+ results in  vector test cases.
+
+In simd_correctness_check.h, the role of the macro ASSERTEQ_64 is to check the
+result of the passed vector values for the 64-bit data of each array element.
+It turns out that it uses the abs() function to check only the lower 32 bits
+of the data at a time, so it replaces abs() with the llabs() function.
+
+However, the following two problems may occur after modification:
+
+1.FAIL in lasx-xvfrint_s.c and lsx-vfrint_s.c
+The reason for the error is because vector test cases that use __m{128,256} to
+define vector types are composed of 32-bit primitive types, they should use
+ASSERTEQ_32 instead of ASSERTEQ_64 to check for correctness.
+
+2.FAIL in lasx-xvshuf_b.c and lsx-vshuf.c
+The cause of the error is that the expected result of the function setting in
+the test case is incorrect.
+
+gcc/testsuite/ChangeLog:
+
+        * gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c: Replace
+        ASSERTEQ_64 with the macro ASSERTEQ_32.
+        * gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c: Modify the expected
+        test results of some functions according to the function of the vector
+        instruction.
+        * gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c: Same
+        modification as lasx-xvfrint_s.c.
+        * gcc.target/loongarch/vector/lsx/lsx-vshuf.c: Same
+        modification as lasx-xvshuf_b.c.
+        * gcc.target/loongarch/vector/simd_correctness_check.h: Use the llabs()
+        function instead of abs() to check the correctness of the results.
+---
+ .../loongarch/vector/lasx/lasx-xvfrint_s.c    | 58 +++++++++----------
+ .../loongarch/vector/lsx/lsx-vfrint_s.c       | 50 ++++++++--------
+ .../loongarch/vector/simd_correctness_check.h |  2 +-
+ 3 files changed, 55 insertions(+), 55 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
+index fbfe300ea..4538528a6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
+@@ -184,7 +184,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrne_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0xffffffff;
+   *((int *)&__m256_op0[6]) = 0xffffffff;
+@@ -203,7 +203,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrne_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0xffffffff;
+   *((int *)&__m256_op0[6]) = 0xffffffff;
+@@ -222,7 +222,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0xffffffff;
+   *((int *)&__m256_result[0]) = 0xffffffff;
+   __m256_out = __lasx_xvfrintrne_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x01010101;
+   *((int *)&__m256_op0[6]) = 0x01010101;
+@@ -241,7 +241,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrne_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0x00000000;
+@@ -260,7 +260,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrne_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0xffffffff;
+   *((int *)&__m256_op0[6]) = 0xffffffff;
+@@ -279,7 +279,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrne_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0xffffffff;
+   *((int *)&__m256_op0[6]) = 0xffffffff;
+@@ -298,7 +298,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0xffffffff;
+   *((int *)&__m256_result[0]) = 0xffffffff;
+   __m256_out = __lasx_xvfrintrne_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x01010101;
+   *((int *)&__m256_op0[6]) = 0x01010101;
+@@ -317,7 +317,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrne_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x55555555;
+   *((int *)&__m256_op0[6]) = 0x36aaaaac;
+@@ -336,7 +336,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x55555555;
+   *((int *)&__m256_result[0]) = 0x80000000;
+   __m256_out = __lasx_xvfrintrp_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0x00000000;
+@@ -355,7 +355,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrp_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0xffffc741;
+   *((int *)&__m256_op0[6]) = 0x8a023680;
+@@ -374,7 +374,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrp_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0xffffffff;
+@@ -393,7 +393,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0xffffffff;
+   __m256_out = __lasx_xvfrintrp_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00200101;
+   *((int *)&__m256_op0[6]) = 0x01610000;
+@@ -412,7 +412,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x3f800000;
+   *((int *)&__m256_result[0]) = 0x3f800000;
+   __m256_out = __lasx_xvfrintrp_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0x00000000;
+@@ -431,7 +431,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0xfefefefe;
+   *((int *)&__m256_result[0]) = 0x3f800000;
+   __m256_out = __lasx_xvfrintrp_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x1c1c1c1c;
+   *((int *)&__m256_op0[6]) = 0x1c1c1c1c;
+@@ -450,7 +450,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0xfffffffe;
+   *((int *)&__m256_result[0]) = 0xffffff00;
+   __m256_out = __lasx_xvfrintrp_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0x00000000;
+@@ -469,7 +469,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrm_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0x00000000;
+@@ -488,7 +488,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrm_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0xffffffff;
+   *((int *)&__m256_op0[6]) = 0xffffffff;
+@@ -507,7 +507,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0xffffffff;
+   __m256_out = __lasx_xvfrintrm_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x5d20a0a1;
+   *((int *)&__m256_op0[6]) = 0x5d20a0a1;
+@@ -526,7 +526,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrm_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0x001d001d;
+@@ -545,7 +545,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrm_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0x00000000;
+@@ -564,7 +564,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrm_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0x00000000;
+@@ -583,7 +583,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrm_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0x00000000;
+@@ -602,7 +602,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrz_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0xffffffff;
+   *((int *)&__m256_op0[6]) = 0xfffffffe;
+@@ -621,7 +621,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0xffffffff;
+   *((int *)&__m256_result[0]) = 0xfffffffe;
+   __m256_out = __lasx_xvfrintrz_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0x00000000;
+@@ -640,7 +640,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0x00000000;
+   __m256_out = __lasx_xvfrintrz_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x00000000;
+   *((int *)&__m256_op0[6]) = 0x00000000;
+@@ -659,7 +659,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0x00000000;
+   *((int *)&__m256_result[0]) = 0xffffffff;
+   __m256_out = __lasx_xvfrintrz_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0x80000000;
+   *((int *)&__m256_op0[6]) = 0x80000000;
+@@ -678,7 +678,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0xffffffff;
+   *((int *)&__m256_result[0]) = 0xffffffff;
+   __m256_out = __lasx_xvfrintrz_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0xffffffff;
+   *((int *)&__m256_op0[6]) = 0xffffffff;
+@@ -697,7 +697,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0xffffffff;
+   *((int *)&__m256_result[0]) = 0xffffffff;
+   __m256_out = __lasx_xvfrintrz_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   *((int *)&__m256_op0[7]) = 0xf5fffc00;
+   *((int *)&__m256_op0[6]) = 0xfc000000;
+@@ -716,7 +716,7 @@ main ()
+   *((int *)&__m256_result[1]) = 0xf5fffc00;
+   *((int *)&__m256_result[0]) = 0xfc000000;
+   __m256_out = __lasx_xvfrintrz_s (__m256_op0);
+-  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
+ 
+   return 0;
+ }
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
+index 61f28325a..5ba91ee51 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
+@@ -79,7 +79,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrne_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00130013;
+   *((int *)&__m128_op0[2]) = 0x00130013;
+@@ -90,7 +90,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrne_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x20202020;
+   *((int *)&__m128_op0[2]) = 0x20202020;
+@@ -101,7 +101,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrne_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00000000;
+   *((int *)&__m128_op0[2]) = 0x00000000;
+@@ -112,7 +112,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrne_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0xffffffff;
+   *((int *)&__m128_op0[2]) = 0xffffffff;
+@@ -123,7 +123,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0xffffffff;
+   *((int *)&__m128_result[0]) = 0xffffffff;
+   __m128_out = __lsx_vfrintrne_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00000000;
+   *((int *)&__m128_op0[2]) = 0x00000001;
+@@ -134,7 +134,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrne_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00000000;
+   *((int *)&__m128_op0[2]) = 0x00000000;
+@@ -145,7 +145,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrne_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0xfffbfffb;
+   *((int *)&__m128_op0[2]) = 0xfffbfffb;
+@@ -156,7 +156,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0xfffbfffb;
+   *((int *)&__m128_result[0]) = 0xfffbfffb;
+   __m128_out = __lsx_vfrintrne_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x0ff780a1;
+   *((int *)&__m128_op0[2]) = 0x0efc01af;
+@@ -167,7 +167,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0xfe7f0000;
+   __m128_out = __lsx_vfrintrne_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00000000;
+   *((int *)&__m128_op0[2]) = 0x00000000;
+@@ -178,7 +178,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrp_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00000000;
+   *((int *)&__m128_op0[2]) = 0xefffffff;
+@@ -189,7 +189,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrp_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0xffffffff;
+   *((int *)&__m128_op0[2]) = 0xffffff00;
+@@ -200,7 +200,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0xffffffff;
+   *((int *)&__m128_result[0]) = 0xffffff00;
+   __m128_out = __lsx_vfrintrp_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0xffffb96b;
+   *((int *)&__m128_op0[2]) = 0xffff57c9;
+@@ -211,7 +211,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0xffff6080;
+   *((int *)&__m128_result[0]) = 0xffff4417;
+   __m128_out = __lsx_vfrintrp_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00ff00ff;
+   *((int *)&__m128_op0[2]) = 0x00ff00ff;
+@@ -222,7 +222,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x62cbf96e;
+   *((int *)&__m128_result[0]) = 0x4acfaf40;
+   __m128_out = __lsx_vfrintrp_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00000000;
+   *((int *)&__m128_op0[2]) = 0x00002000;
+@@ -233,7 +233,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x3f800000;
+   __m128_out = __lsx_vfrintrp_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0xffffffff;
+   *((int *)&__m128_op0[2]) = 0xffffffff;
+@@ -244,7 +244,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0xffffffff;
+   *((int *)&__m128_result[0]) = 0xffffffff;
+   __m128_out = __lsx_vfrintrp_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x63636363;
+   *((int *)&__m128_op0[2]) = 0x63abdf16;
+@@ -255,7 +255,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x42000000;
+   *((int *)&__m128_result[0]) = 0x3f800000;
+   __m128_out = __lsx_vfrintrp_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00000000;
+   *((int *)&__m128_op0[2]) = 0x00000000;
+@@ -266,7 +266,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrm_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0xa5c4c774;
+   *((int *)&__m128_op0[2]) = 0x856ba83b;
+@@ -277,7 +277,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0xbf800000;
+   *((int *)&__m128_result[0]) = 0x54691124;
+   __m128_out = __lsx_vfrintrm_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00000000;
+   *((int *)&__m128_op0[2]) = 0x00010002;
+@@ -288,7 +288,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0xffffffff;
+   *((int *)&__m128_result[0]) = 0xffd60015;
+   __m128_out = __lsx_vfrintrm_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0xffffffff;
+   *((int *)&__m128_op0[2]) = 0x3c992b2e;
+@@ -299,7 +299,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0xffffffff;
+   *((int *)&__m128_result[0]) = 0xffff730f;
+   __m128_out = __lsx_vfrintrz_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00000000;
+   *((int *)&__m128_op0[2]) = 0x00000001;
+@@ -310,7 +310,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrz_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x18171615;
+   *((int *)&__m128_op0[2]) = 0x17161514;
+@@ -321,7 +321,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrz_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x62cbf96e;
+   *((int *)&__m128_op0[2]) = 0x4acfaf40;
+@@ -332,7 +332,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0xf0bc9a52;
+   *((int *)&__m128_result[0]) = 0x78285a4a;
+   __m128_out = __lsx_vfrintrz_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   *((int *)&__m128_op0[3]) = 0x00000000;
+   *((int *)&__m128_op0[2]) = 0x00000000;
+@@ -343,7 +343,7 @@ main ()
+   *((int *)&__m128_result[1]) = 0x00000000;
+   *((int *)&__m128_result[0]) = 0x00000000;
+   __m128_out = __lsx_vfrintrz_s (__m128_op0);
+-  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
+ 
+   return 0;
+ }
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h b/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h
+index 551340bd5..c1adab586 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h
++++ b/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h
+@@ -10,7 +10,7 @@
+         {                                                                     \
+           long long *temp_ref = (long long *)&ref[i],                         \
+ 		*temp_res = (long long *)&res[i];			      \
+-          if (abs (*temp_ref - *temp_res) > 0)                                \
++          if (llabs (*temp_ref - *temp_res) > 0)                                \
+             {                                                                 \
+               printf (" error: %s at line %ld , expected " #ref               \
+                       "[%ld]:0x%016lx, got: 0x%016lx\n",                      \
+-- 
+2.43.0
+
diff --git a/SME-0042-rtl-Try-to-remove-EH-edges-after-pro-epi-logue-gener.patch b/0145-Backport-SME-rtl-Try-to-remove-EH-edges-after-pro-ep.patch
similarity index 93%
rename from SME-0042-rtl-Try-to-remove-EH-edges-after-pro-epi-logue-gener.patch
rename to 0145-Backport-SME-rtl-Try-to-remove-EH-edges-after-pro-ep.patch
index 47e93fe97cb0bc40c8c6674b618a9e78068eaf7c..cdaf6eeeed406375830c20cc45282ff6704bfdef 100644
--- a/SME-0042-rtl-Try-to-remove-EH-edges-after-pro-epi-logue-gener.patch
+++ b/0145-Backport-SME-rtl-Try-to-remove-EH-edges-after-pro-ep.patch
@@ -1,8 +1,8 @@
-From a466c4edf248129c8d2c403105b6c3f9ce2dfd56 Mon Sep 17 00:00:00 2001
+From 60612cbd9cdd9b5079c0505b9d53c9cd98fba4b1 Mon Sep 17 00:00:00 2001
 From: Kewen Lin 
 Date: Tue, 15 Nov 2022 20:26:07 -0600
-Subject: [PATCH 042/144] rtl: Try to remove EH edges after {pro,epi}logue
- generation [PR90259]
+Subject: [PATCH 046/157] [Backport][SME] rtl: Try to remove EH edges after
+ {pro,epi}logue generation [PR90259]
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=63e1b2e767a3f4695373c2406ff719c0a60c1858
 
@@ -80,10 +80,10 @@ gcc/testsuite/ChangeLog:
  create mode 100644 gcc/testsuite/g++.target/powerpc/pr90259.C
 
 diff --git a/gcc/function.cc b/gcc/function.cc
-index d84a3240e..171c09382 100644
+index 49c7ccf4b..28de39dd6 100644
 --- a/gcc/function.cc
 +++ b/gcc/function.cc
-@@ -6537,7 +6537,7 @@ make_pass_leaf_regs (gcc::context *ctxt)
+@@ -6529,7 +6529,7 @@ make_pass_leaf_regs (gcc::context *ctxt)
  }
  
  static unsigned int
@@ -92,7 +92,7 @@ index d84a3240e..171c09382 100644
  {
    /* prepare_shrink_wrap is sensitive to the block structure of the control
       flow graph, so clean it up first.  */
-@@ -6554,6 +6554,13 @@ rest_of_handle_thread_prologue_and_epilogue (void)
+@@ -6546,6 +6546,13 @@ rest_of_handle_thread_prologue_and_epilogue (void)
       Fix that up.  */
    fixup_partitions ();
  
@@ -106,7 +106,7 @@ index d84a3240e..171c09382 100644
    /* Shrink-wrapping can result in unreachable edges in the epilogue,
       see PR57320.  */
    cleanup_cfg (optimize ? CLEANUP_EXPENSIVE : 0);
-@@ -6622,9 +6629,9 @@ public:
+@@ -6614,9 +6621,9 @@ public:
    {}
  
    /* opt_pass methods: */
@@ -228,5 +228,5 @@ index 000000000..db75ac7fe
 +  };
 +} s;
 -- 
-2.19.1
+2.33.0
 
diff --git a/0145-LoongArch-Fixed-an-issue-with-the-implementation-of-.patch b/0145-LoongArch-Fixed-an-issue-with-the-implementation-of-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4c0d0971929d47ead418384caa9dc4fce58ec127
--- /dev/null
+++ b/0145-LoongArch-Fixed-an-issue-with-the-implementation-of-.patch
@@ -0,0 +1,130 @@
+From 44a9ae67e19c0d744bd744cb0e9ae9e0069e40f1 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Tue, 5 Mar 2024 14:43:04 +0800
+Subject: [PATCH 145/188] LoongArch: Fixed an issue with the implementation of
+ the template atomic_compare_and_swapsi.
+
+If the hardware does not support LAMCAS, atomic_compare_and_swapsi needs to be
+implemented through "ll.w+sc.w". In the implementation of the instruction sequence,
+it is necessary to determine whether the two registers are equal.
+Since LoongArch's comparison instructions do not distinguish between 32-bit
+and 64-bit, the two operand registers that need to be compared are symbolically
+extended, and one of the operand registers is obtained from memory through the
+"ll.w" instruction, which can ensure that the symbolic expansion is carried out.
+However, the value of the other operand register is not guaranteed to be the
+value of the sign extension.
+
+gcc/ChangeLog:
+
+	* config/loongarch/sync.md (atomic_cas_value_strong):
+	In loongarch64, a sign extension operation is added when
+	operands[2] is a register operand and the mode is SImode.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.target/loongarch/atomic-cas-int.C: New test.
+---
+ gcc/config/loongarch/sync.md                  | 46 ++++++++++++++-----
+ .../g++.target/loongarch/atomic-cas-int.C     | 32 +++++++++++++
+ 2 files changed, 67 insertions(+), 11 deletions(-)
+ create mode 100644 gcc/testsuite/g++.target/loongarch/atomic-cas-int.C
+
+diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md
+index 5da5c2780..2e008c487 100644
+--- a/gcc/config/loongarch/sync.md
++++ b/gcc/config/loongarch/sync.md
+@@ -245,18 +245,42 @@
+    (clobber (match_scratch:GPR 5 "=&r"))]
+   ""
+ {
+-  return "1:\\n\\t"
+-	 "ll.\\t%0,%1\\n\\t"
+-	 "bne\\t%0,%z2,2f\\n\\t"
+-	 "or%i3\\t%5,$zero,%3\\n\\t"
+-	 "sc.\\t%5,%1\\n\\t"
+-	 "beqz\\t%5,1b\\n\\t"
+-	 "b\\t3f\\n\\t"
+-	 "2:\\n\\t"
+-	 "%G4\\n\\t"
+-	 "3:\\n\\t";
++  output_asm_insn ("1:", operands);
++  output_asm_insn ("ll.\t%0,%1", operands);
++
++  /* Like the test case atomic-cas-int.C, in loongarch64, O1 and higher, the
++     return value of the val_without_const_folding will not be truncated and
++     will be passed directly to the function compare_exchange_strong.
++     However, the instruction 'bne' does not distinguish between 32-bit and
++     64-bit operations.  so if the upper 32 bits of the register are not
++     extended by the 32nd bit symbol, then the comparison may not be valid
++     here.  This will affect the result of the operation.  */
++
++  if (TARGET_64BIT && REG_P (operands[2])
++      && GET_MODE (operands[2]) == SImode)
++    {
++      output_asm_insn ("addi.w\t%5,%2,0", operands);
++      output_asm_insn ("bne\t%0,%5,2f", operands);
++    }
++  else
++    output_asm_insn ("bne\t%0,%z2,2f", operands);
++
++  output_asm_insn ("or%i3\t%5,$zero,%3", operands);
++  output_asm_insn ("sc.\t%5,%1", operands);
++  output_asm_insn ("beqz\t%5,1b", operands);
++  output_asm_insn ("b\t3f", operands);
++  output_asm_insn ("2:", operands);
++  output_asm_insn ("%G4", operands);
++  output_asm_insn ("3:", operands);
++
++  return "";
+ }
+-  [(set (attr "length") (const_int 28))])
++  [(set (attr "length")
++     (if_then_else
++	(and (match_test "GET_MODE (operands[2]) == SImode")
++	     (match_test "REG_P (operands[2])"))
++	(const_int 32)
++	(const_int 28)))])
+ 
+ (define_insn "atomic_cas_value_strong_amcas"
+   [(set (match_operand:QHWD 0 "register_operand" "=&r")
+diff --git a/gcc/testsuite/g++.target/loongarch/atomic-cas-int.C b/gcc/testsuite/g++.target/loongarch/atomic-cas-int.C
+new file mode 100644
+index 000000000..830ce4826
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/atomic-cas-int.C
+@@ -0,0 +1,32 @@
++/* { dg-do run } */
++/* { dg-options "-O2" } */
++
++#include 
++#include 
++
++__attribute__ ((noinline)) long
++val_without_const_folding (long val)
++{
++  return val;
++}
++
++int
++main ()
++{
++  int oldval = 0xaa;
++  int newval = 0xbb;
++  std::atomic amo;
++
++  amo.store (oldval);
++
++  long longval = val_without_const_folding (0xff80000000000000 + oldval);
++  oldval = static_cast (longval);
++
++  amo.compare_exchange_strong (oldval, newval);
++
++  if (newval != amo.load (std::memory_order_relaxed))
++    __builtin_abort ();
++
++  return 0;
++}
++
+-- 
+2.43.0
+
diff --git a/SME-0043-Fix-PR-middle-end-107705-ICE-after-reclaration-error.patch b/0146-Backport-SME-Fix-PR-middle-end-107705-ICE-after-recl.patch
similarity index 90%
rename from SME-0043-Fix-PR-middle-end-107705-ICE-after-reclaration-error.patch
rename to 0146-Backport-SME-Fix-PR-middle-end-107705-ICE-after-recl.patch
index 1a32276ce197ce49c8098c3528d570428094e223..4d3168b9c5b38d3df2e61b1829b1f0ed64fe4384 100644
--- a/SME-0043-Fix-PR-middle-end-107705-ICE-after-reclaration-error.patch
+++ b/0146-Backport-SME-Fix-PR-middle-end-107705-ICE-after-recl.patch
@@ -1,7 +1,8 @@
-From 74999e9c2b8981873eeb1ede5ff053630cd6fcc6 Mon Sep 17 00:00:00 2001
+From beb962ec516f152cef482b229c9adf0390dc3b2c Mon Sep 17 00:00:00 2001
 From: Andrew Pinski 
 Date: Thu, 17 Nov 2022 22:03:08 +0000
-Subject: [PATCH 043/144] Fix PR middle-end/107705: ICE after reclaration error
+Subject: [PATCH 047/157] [Backport][SME] Fix PR middle-end/107705: ICE after
+ reclaration error
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=ceba66ee230bb96b0889fc8ec7333c7ffae96d6e
 
@@ -37,7 +38,7 @@ gcc/testsuite/ChangeLog:
  create mode 100644 gcc/testsuite/gcc.dg/redecl-22.c
 
 diff --git a/gcc/function.cc b/gcc/function.cc
-index 171c09382..89a39bc8f 100644
+index 28de39dd6..99aa738eb 100644
 --- a/gcc/function.cc
 +++ b/gcc/function.cc
 @@ -2090,6 +2090,9 @@ aggregate_value_p (const_tree exp, const_tree fntype)
@@ -66,5 +67,5 @@ index 000000000..7758570fa
 +  int p = 1; // { dg-error "" }
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/0146-LoongArch-testsuite-Add-compilation-options-to-the-r.patch b/0146-LoongArch-testsuite-Add-compilation-options-to-the-r.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5b0fadf6c960e4a0384d106d0890893b23f5a944
--- /dev/null
+++ b/0146-LoongArch-testsuite-Add-compilation-options-to-the-r.patch
@@ -0,0 +1,30 @@
+From eab751e71d4f4d5e9b2eda55d793fd57541fbc56 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 7 Mar 2024 09:44:03 +0800
+Subject: [PATCH 146/188] LoongArch: testsuite: Add compilation options to the
+ regname-fp-s9.c.
+
+When the value of the macro DEFAULT_CFLAGS is set to '-ansi -pedantic-errors',
+regname-s9-fp.c will test to fail. To solve this problem, add the compilation
+option '-Wno-pedantic -std=gnu90' to this test case.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/regname-fp-s9.c: Add compilation option
+	'-Wno-pedantic -std=gnu90'.
+---
+ gcc/testsuite/gcc.target/loongarch/regname-fp-s9.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/regname-fp-s9.c b/gcc/testsuite/gcc.target/loongarch/regname-fp-s9.c
+index d2e3b80f8..77a74f1f6 100644
+--- a/gcc/testsuite/gcc.target/loongarch/regname-fp-s9.c
++++ b/gcc/testsuite/gcc.target/loongarch/regname-fp-s9.c
+@@ -1,3 +1,4 @@
+ /* { dg-do compile } */
++/* { dg-additional-options "-Wno-pedantic -std=gnu90" } */
+ register long s9 asm("s9"); /* { dg-note "conflicts with 's9'" } */
+ register long fp asm("fp"); /* { dg-warning "register of 'fp' used for multiple global register variables" } */
+-- 
+2.43.0
+
diff --git a/SME-0044-function-Change-return-type-of-predicate-function-fr.patch b/0147-Backport-SME-function-Change-return-type-of-predicat.patch
similarity index 95%
rename from SME-0044-function-Change-return-type-of-predicate-function-fr.patch
rename to 0147-Backport-SME-function-Change-return-type-of-predicat.patch
index 6281b8b658d907e86d3d25158ad026e3140cf5d8..809a73b971de58d4a66553ec611289a1138c2258 100644
--- a/SME-0044-function-Change-return-type-of-predicate-function-fr.patch
+++ b/0147-Backport-SME-function-Change-return-type-of-predicat.patch
@@ -1,8 +1,8 @@
-From a345a457f6a64da1d82025b45f4f4261030c6c5b Mon Sep 17 00:00:00 2001
+From c074871572ef22cbcca8f0f4bc493d60caeddd78 Mon Sep 17 00:00:00 2001
 From: Uros Bizjak 
 Date: Wed, 21 Jun 2023 21:55:30 +0200
-Subject: [PATCH 044/144] function: Change return type of predicate function
- from int to bool
+Subject: [PATCH 048/157] [Backport][SME] function: Change return type of
+ predicate function from int to bool
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=ce47d3c2cf59bb2cc94afc4bbef88b0e4950f086
 
@@ -34,7 +34,7 @@ gcc/ChangeLog:
  2 files changed, 42 insertions(+), 45 deletions(-)
 
 diff --git a/gcc/function.cc b/gcc/function.cc
-index 89a39bc8f..1e925749c 100644
+index 99aa738eb..fc8eb5812 100644
 --- a/gcc/function.cc
 +++ b/gcc/function.cc
 @@ -578,8 +578,8 @@ public:
@@ -228,7 +228,7 @@ index 89a39bc8f..1e925749c 100644
  }
  
  /* Return true if we should assign DECL a pseudo register; false if it
-@@ -5749,26 +5747,26 @@ contains (const rtx_insn *insn, hash_table *hash)
+@@ -5741,26 +5739,26 @@ contains (const rtx_insn *insn, hash_table *hash)
    return hash->find (const_cast (insn)) != NULL;
  }
  
@@ -261,7 +261,7 @@ index 89a39bc8f..1e925749c 100644
  }
  
  void
-@@ -6394,14 +6392,13 @@ current_function_name (void)
+@@ -6386,14 +6384,13 @@ current_function_name (void)
  }
  
  
@@ -277,7 +277,7 @@ index 89a39bc8f..1e925749c 100644
  }
  
  /* Insert a TYPE into the used types hash table of CFUN.  */
-@@ -6526,7 +6523,8 @@ public:
+@@ -6518,7 +6515,8 @@ public:
    /* opt_pass methods: */
    virtual unsigned int execute (function *)
      {
@@ -287,7 +287,7 @@ index 89a39bc8f..1e925749c 100644
      }
  
  }; // class pass_leaf_regs
-@@ -6539,7 +6537,7 @@ make_pass_leaf_regs (gcc::context *ctxt)
+@@ -6531,7 +6529,7 @@ make_pass_leaf_regs (gcc::context *ctxt)
    return new pass_leaf_regs (ctxt);
  }
  
@@ -296,7 +296,7 @@ index 89a39bc8f..1e925749c 100644
  rest_of_handle_thread_prologue_and_epilogue (function *fun)
  {
    /* prepare_shrink_wrap is sensitive to the block structure of the control
-@@ -6571,8 +6569,6 @@ rest_of_handle_thread_prologue_and_epilogue (function *fun)
+@@ -6563,8 +6561,6 @@ rest_of_handle_thread_prologue_and_epilogue (function *fun)
    /* The stack usage info is finalized during prologue expansion.  */
    if (flag_stack_usage_info || flag_callgraph_info)
      output_stack_usage ();
@@ -305,7 +305,7 @@ index 89a39bc8f..1e925749c 100644
  }
  
  /* Record a final call to CALLEE at LOCATION.  */
-@@ -6634,7 +6630,8 @@ public:
+@@ -6626,7 +6622,8 @@ public:
    /* opt_pass methods: */
    unsigned int execute (function * fun) final override
      {
@@ -347,5 +347,5 @@ index a53fb24d2..4e8131706 100644
  extern void record_epilogue_seq (rtx_insn *);
  extern void emit_return_into_block (bool simple_p, basic_block bb);
 -- 
-2.19.1
+2.33.0
 
diff --git a/0147-LoongArch-Emit-R_LARCH_RELAX-for-TLS-IE-with-non-ext.patch b/0147-LoongArch-Emit-R_LARCH_RELAX-for-TLS-IE-with-non-ext.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e6c6463bce6fee5dd799e581701d8dc52f6945cc
--- /dev/null
+++ b/0147-LoongArch-Emit-R_LARCH_RELAX-for-TLS-IE-with-non-ext.patch
@@ -0,0 +1,137 @@
+From 465f0653b6e7bf5adb5d1f6c9e8aff2b81a3f27f Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 26 Jan 2024 18:28:32 +0800
+Subject: [PATCH 147/188] LoongArch: Emit R_LARCH_RELAX for TLS IE with
+ non-extreme code model to allow the IE to LE linker relaxation
+
+In Binutils we need to make IE to LE relaxation only allowed when there
+is an R_LARCH_RELAX after R_LARCH_TLE_IE_PC_{HI20,LO12} so an invalid
+"partial" relaxation won't happen with the extreme code model.  So if we
+are emitting %ie_pc_{hi20,lo12} in a non-extreme code model, emit an
+R_LARCH_RELAX to allow the relaxation.  The IE to LE relaxation does not
+require the pcalau12i and the ld instruction to be adjacent, so we don't
+need to limit ourselves to use the macro.
+
+For the distro maintainers backporting changes: this change depends on
+r14-8721, without r14-8721 R_LARCH_RELAX can be emitted mistakenly in
+the extreme code model.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_print_operand_reloc):
+	Support 'Q' for R_LARCH_RELAX for TLS IE.
+	(loongarch_output_move): Use 'Q' to print R_LARCH_RELAX for TLS
+	IE.
+	* config/loongarch/loongarch.md (ld_from_got): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/tls-ie-relax.c: New test.
+	* gcc.target/loongarch/tls-ie-norelax.c: New test.
+	* gcc.target/loongarch/tls-ie-extreme.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc                 | 15 ++++++++++++++-
+ gcc/config/loongarch/loongarch.md                 |  2 +-
+ .../gcc.target/loongarch/tls-ie-extreme.c         |  5 +++++
+ .../gcc.target/loongarch/tls-ie-norelax.c         |  5 +++++
+ gcc/testsuite/gcc.target/loongarch/tls-ie-relax.c | 11 +++++++++++
+ 5 files changed, 36 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/tls-ie-extreme.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/tls-ie-norelax.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/tls-ie-relax.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index d23b09cc5..c1dc30b61 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4977,7 +4977,7 @@ loongarch_output_move (rtx dest, rtx src)
+ 	  if (type == SYMBOL_TLS_LE)
+ 	    return "lu12i.w\t%0,%h1";
+ 	  else
+-	    return "pcalau12i\t%0,%h1";
++	    return "%Q1pcalau12i\t%0,%h1";
+ 	}
+ 
+       if (src_code == CONST_INT)
+@@ -6141,6 +6141,7 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part,
+    'L'  Print the low-part relocation associated with OP.
+    'm'	Print one less than CONST_INT OP in decimal.
+    'N'	Print the inverse of the integer branch condition for comparison OP.
++   'Q'  Print R_LARCH_RELAX for TLS IE.
+    'r'  Print address 12-31bit relocation associated with OP.
+    'R'  Print address 32-51bit relocation associated with OP.
+    'T'	Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
+@@ -6278,6 +6279,18 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+ 					    letter);
+       break;
+ 
++    case 'Q':
++      if (!TARGET_LINKER_RELAXATION)
++	break;
++
++      if (code == HIGH)
++	op = XEXP (op, 0);
++
++      if (loongarch_classify_symbolic_expression (op) == SYMBOL_TLS_IE)
++	fprintf (file, ".reloc\t.,R_LARCH_RELAX\n\t");
++
++      break;
++
+     case 'r':
+       loongarch_print_operand_reloc (file, op, false /* hi64_part */,
+ 				     true /* lo_reloc */);
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 248ad12bb..d2c7c3b05 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -2620,7 +2620,7 @@
+ 				(match_operand:P 2 "symbolic_operand")))]
+ 	UNSPEC_LOAD_FROM_GOT))]
+   ""
+-  "ld.\t%0,%1,%L2"
++  "%Q2ld.\t%0,%1,%L2"
+   [(set_attr "type" "move")]
+ )
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/tls-ie-extreme.c b/gcc/testsuite/gcc.target/loongarch/tls-ie-extreme.c
+new file mode 100644
+index 000000000..00c545a3e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/tls-ie-extreme.c
+@@ -0,0 +1,5 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mabi=lp64d -mcmodel=extreme -mexplicit-relocs=auto -mrelax" } */
++/* { dg-final { scan-assembler-not "R_LARCH_RELAX" { target tls_native } } } */
++
++#include "tls-ie-relax.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/tls-ie-norelax.c b/gcc/testsuite/gcc.target/loongarch/tls-ie-norelax.c
+new file mode 100644
+index 000000000..dd6bf3634
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/tls-ie-norelax.c
+@@ -0,0 +1,5 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mcmodel=normal -mexplicit-relocs -mno-relax" } */
++/* { dg-final { scan-assembler-not "R_LARCH_RELAX" { target tls_native } } } */
++
++#include "tls-ie-relax.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/tls-ie-relax.c b/gcc/testsuite/gcc.target/loongarch/tls-ie-relax.c
+new file mode 100644
+index 000000000..e9f7569b1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/tls-ie-relax.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mcmodel=normal -mexplicit-relocs -mrelax" } */
++/* { dg-final { scan-assembler-times "R_LARCH_RELAX" 2 { target tls_native } } } */
++
++extern __thread int errno;
++
++void
++unimplemented (void)
++{
++  errno = -38;
++}
+-- 
+2.43.0
+
diff --git a/SME-0045-Allow-prologues-and-epilogues-to-be-inserted-later.patch b/0148-Backport-SME-Allow-prologues-and-epilogues-to-be-ins.patch
similarity index 90%
rename from SME-0045-Allow-prologues-and-epilogues-to-be-inserted-later.patch
rename to 0148-Backport-SME-Allow-prologues-and-epilogues-to-be-ins.patch
index dd67c47bfb490c225d3ba8ae0b555515bb655303..13dc0e3ddeca9ed9fd8632d85e5654baf4bae3f7 100644
--- a/SME-0045-Allow-prologues-and-epilogues-to-be-inserted-later.patch
+++ b/0148-Backport-SME-Allow-prologues-and-epilogues-to-be-ins.patch
@@ -1,7 +1,8 @@
-From 399d90ef766a7b99b131d0d19cdeb6669eb5ed6e Mon Sep 17 00:00:00 2001
+From 417d51e1ecf41b3ba3ddf24eaf1e07db5c1ded9e Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 09:28:46 +0000
-Subject: [PATCH 045/144] Allow prologues and epilogues to be inserted later
+Subject: [PATCH 049/157] [Backport][SME] Allow prologues and epilogues to be
+ inserted later
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=e9d2ae6b9816e61a6148040149c63faa83f54702
 
@@ -44,10 +45,10 @@ gcc/
  6 files changed, 97 insertions(+)
 
 diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
-index 00e3171ca..f8109834d 100644
+index 5f0972356..d930d233d 100644
 --- a/gcc/doc/tm.texi
 +++ b/gcc/doc/tm.texi
-@@ -11894,6 +11894,25 @@ of the if-block in the @code{struct ce_if_block} structure that is pointed
+@@ -11684,6 +11684,25 @@ of the if-block in the @code{struct ce_if_block} structure that is pointed
  to by @var{ce_info}.
  @end defmac
  
@@ -74,10 +75,10 @@ index 00e3171ca..f8109834d 100644
  If non-null, this hook performs a target-specific pass over the
  instruction stream.  The compiler will run it at all optimization levels,
 diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
-index 21f0806a5..9d72fe18d 100644
+index fcab21744..19eabec48 100644
 --- a/gcc/doc/tm.texi.in
 +++ b/gcc/doc/tm.texi.in
-@@ -7918,6 +7918,8 @@ of the if-block in the @code{struct ce_if_block} structure that is pointed
+@@ -7708,6 +7708,8 @@ of the if-block in the @code{struct ce_if_block} structure that is pointed
  to by @var{ce_info}.
  @end defmac
  
@@ -87,7 +88,7 @@ index 21f0806a5..9d72fe18d 100644
  
  @hook TARGET_INIT_BUILTINS
 diff --git a/gcc/function.cc b/gcc/function.cc
-index 1e925749c..eac179dd3 100644
+index fc8eb5812..7c90b5f23 100644
 --- a/gcc/function.cc
 +++ b/gcc/function.cc
 @@ -84,6 +84,7 @@ along with GCC; see the file COPYING3.  If not see
@@ -98,7 +99,7 @@ index 1e925749c..eac179dd3 100644
  
  /* So we can assign to cfun in this file.  */
  #undef cfun
-@@ -6628,6 +6629,11 @@ public:
+@@ -6620,6 +6621,11 @@ public:
    {}
  
    /* opt_pass methods: */
@@ -110,7 +111,7 @@ index 1e925749c..eac179dd3 100644
    unsigned int execute (function * fun) final override
      {
        rest_of_handle_thread_prologue_and_epilogue (fun);
-@@ -6636,6 +6642,44 @@ public:
+@@ -6628,6 +6634,44 @@ public:
  
  }; // class pass_thread_prologue_and_epilogue
  
@@ -155,7 +156,7 @@ index 1e925749c..eac179dd3 100644
  } // anon namespace
  
  rtl_opt_pass *
-@@ -6644,6 +6688,12 @@ make_pass_thread_prologue_and_epilogue (gcc::context *ctxt)
+@@ -6636,6 +6680,12 @@ make_pass_thread_prologue_and_epilogue (gcc::context *ctxt)
    return new pass_thread_prologue_and_epilogue (ctxt);
  }
  
@@ -169,10 +170,10 @@ index 1e925749c..eac179dd3 100644
  
  const pass_data pass_data_zero_call_used_regs =
 diff --git a/gcc/passes.def b/gcc/passes.def
-index 375d3d62d..aed7bc5c1 100644
+index cdc600298..8797f166f 100644
 --- a/gcc/passes.def
 +++ b/gcc/passes.def
-@@ -516,6 +516,9 @@ along with GCC; see the file COPYING3.  If not see
+@@ -523,6 +523,9 @@ along with GCC; see the file COPYING3.  If not see
  	      NEXT_PASS (pass_stack_regs_run);
  	  POP_INSERT_PASSES ()
        POP_INSERT_PASSES ()
@@ -183,10 +184,10 @@ index 375d3d62d..aed7bc5c1 100644
        PUSH_INSERT_PASSES_WITHIN (pass_late_compilation)
  	  NEXT_PASS (pass_zero_call_used_regs);
 diff --git a/gcc/target.def b/gcc/target.def
-index bb7c34f49..d70363da5 100644
+index 4d77c1523..fd4899612 100644
 --- a/gcc/target.def
 +++ b/gcc/target.def
-@@ -4063,6 +4063,27 @@ returns @code{VOIDmode}.",
+@@ -4120,6 +4120,27 @@ returns @code{VOIDmode}.",
   machine_mode, (machine_mode m1, machine_mode m2),
   default_cc_modes_compatible)
  
@@ -215,18 +216,18 @@ index bb7c34f49..d70363da5 100644
       delayed-branch scheduling.  */
  DEFHOOK
 diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
-index 606d1d60b..793e0acdf 100644
+index 34e60bc38..1c983ef71 100644
 --- a/gcc/tree-pass.h
 +++ b/gcc/tree-pass.h
-@@ -607,6 +607,8 @@ extern rtl_opt_pass *make_pass_gcse2 (gcc::context *ctxt);
+@@ -612,6 +612,8 @@ extern rtl_opt_pass *make_pass_gcse2 (gcc::context *ctxt);
  extern rtl_opt_pass *make_pass_split_after_reload (gcc::context *ctxt);
  extern rtl_opt_pass *make_pass_thread_prologue_and_epilogue (gcc::context
  							     *ctxt);
 +extern rtl_opt_pass *make_pass_late_thread_prologue_and_epilogue (gcc::context
 +								  *ctxt);
  extern rtl_opt_pass *make_pass_zero_call_used_regs (gcc::context *ctxt);
+ extern rtl_opt_pass *make_pass_split_complex_instructions (gcc::context *ctxt);
  extern rtl_opt_pass *make_pass_stack_adjustments (gcc::context *ctxt);
- extern rtl_opt_pass *make_pass_sched_fusion (gcc::context *ctxt);
 -- 
-2.19.1
+2.33.0
 
diff --git a/0148-LoongArch-Remove-unused-and-incorrect-sge-u-_-X-mode.patch b/0148-LoongArch-Remove-unused-and-incorrect-sge-u-_-X-mode.patch
new file mode 100644
index 0000000000000000000000000000000000000000..df1e7b17b0175e129d90d9e9d183c91c2889d787
--- /dev/null
+++ b/0148-LoongArch-Remove-unused-and-incorrect-sge-u-_-X-mode.patch
@@ -0,0 +1,57 @@
+From acc00029aab3cdd1186f1ed4a93db2205cdd166c Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 13 Mar 2024 20:44:38 +0800
+Subject: [PATCH 148/188] LoongArch: Remove unused and incorrect
+ "sge_" define_insn
+
+If this insn is really used, we'll have something like
+
+    slti $r4,$r0,$r5
+
+in the code.  The assembler will reject it because slti wants 2
+register operands and 1 immediate operand.  But we've not got any bug
+report for this, indicating this define_insn is unused at all.
+
+Note that do_store_flag (in expr.cc) is already converting x >= 1 to
+x > 0 unconditionally, so this define_insn is indeed unused and we can
+just remove it.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (any_ge): Remove.
+	(sge_): Remove.
+---
+ gcc/config/loongarch/loongarch.md | 10 ----------
+ 1 file changed, 10 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index d2c7c3b05..1b3525dde 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -517,7 +517,6 @@
+ ;; These code iterators allow the signed and unsigned scc operations to use
+ ;; the same template.
+ (define_code_iterator any_gt [gt gtu])
+-(define_code_iterator any_ge [ge geu])
+ (define_code_iterator any_lt [lt ltu])
+ (define_code_iterator any_le [le leu])
+ 
+@@ -3355,15 +3354,6 @@
+   [(set_attr "type" "slt")
+    (set_attr "mode" "")])
+ 
+-(define_insn "*sge_"
+-  [(set (match_operand:GPR 0 "register_operand" "=r")
+-	(any_ge:GPR (match_operand:X 1 "register_operand" "r")
+-		     (const_int 1)))]
+-  ""
+-  "slti\t%0,%.,%1"
+-  [(set_attr "type" "slt")
+-   (set_attr "mode" "")])
+-
+ (define_insn "*slt_"
+   [(set (match_operand:GPR 0 "register_operand" "=r")
+ 	(any_lt:GPR (match_operand:X 1 "register_operand" "r")
+-- 
+2.43.0
+
diff --git a/SME-0046-Add-a-target-hook-for-sibcall-epilogues.patch b/0149-Backport-SME-Add-a-target-hook-for-sibcall-epilogues.patch
similarity index 88%
rename from SME-0046-Add-a-target-hook-for-sibcall-epilogues.patch
rename to 0149-Backport-SME-Add-a-target-hook-for-sibcall-epilogues.patch
index 111e5913caf0f74b765f9602ae7313112f1e07f8..d8924323e524e2e9a30da480983680e0b0e63648 100644
--- a/SME-0046-Add-a-target-hook-for-sibcall-epilogues.patch
+++ b/0149-Backport-SME-Add-a-target-hook-for-sibcall-epilogues.patch
@@ -1,7 +1,8 @@
-From 10351a5dade506b85992c0d9f029cf37c8e90ef4 Mon Sep 17 00:00:00 2001
+From e906213086639df81085a0101bf88fb66c1dbc2b Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 09:35:57 +0000
-Subject: [PATCH 046/144] Add a target hook for sibcall epilogues
+Subject: [PATCH 050/157] [Backport][SME] Add a target hook for sibcall
+ epilogues
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=2e0aefa77157396acb48833407637303edba450a
 
@@ -77,10 +78,10 @@ index 86e444a60..97984f3ab 100644
  opt_machine_mode aarch64_ptrue_all_mode (rtx);
  rtx aarch64_convert_sve_data_to_pred (rtx, machine_mode, rtx);
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index fc7540023..76bb0a5dd 100644
+index fd1114b52..055b436b1 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -9829,7 +9829,7 @@ aarch64_use_return_insn_p (void)
+@@ -10046,7 +10046,7 @@ aarch64_use_return_insn_p (void)
     from a deallocated stack, and we optimize the unwind records by
     emitting them all together if possible.  */
  void
@@ -89,7 +90,7 @@ index fc7540023..76bb0a5dd 100644
  {
    poly_int64 initial_adjust = cfun->machine->frame.initial_adjust;
    HOST_WIDE_INT callee_adjust = cfun->machine->frame.callee_adjust;
-@@ -9977,7 +9977,7 @@ aarch64_expand_epilogue (bool for_sibcall)
+@@ -10194,7 +10194,7 @@ aarch64_expand_epilogue (bool for_sibcall)
  	   explicitly authenticate.
      */
    if (aarch64_return_address_signing_enabled ()
@@ -98,7 +99,7 @@ index fc7540023..76bb0a5dd 100644
      {
        switch (aarch64_ra_sign_key)
  	{
-@@ -9995,7 +9995,7 @@ aarch64_expand_epilogue (bool for_sibcall)
+@@ -10212,7 +10212,7 @@ aarch64_expand_epilogue (bool for_sibcall)
      }
  
    /* Stack adjustment for exception handler.  */
@@ -107,7 +108,7 @@ index fc7540023..76bb0a5dd 100644
      {
        /* We need to unwind the stack by the offset computed by
  	 EH_RETURN_STACKADJ_RTX.  We have already reset the CFA
-@@ -10006,7 +10006,7 @@ aarch64_expand_epilogue (bool for_sibcall)
+@@ -10223,7 +10223,7 @@ aarch64_expand_epilogue (bool for_sibcall)
      }
  
    emit_use (gen_rtx_REG (DImode, LR_REGNUM));
@@ -116,7 +117,7 @@ index fc7540023..76bb0a5dd 100644
      emit_jump_insn (ret_rtx);
  }
  
-@@ -27923,6 +27923,9 @@ aarch64_libgcc_floating_mode_supported_p
+@@ -28246,6 +28246,9 @@ aarch64_libgcc_floating_mode_supported_p
  #undef TARGET_HAVE_SHADOW_CALL_STACK
  #define TARGET_HAVE_SHADOW_CALL_STACK true
  
@@ -127,10 +128,10 @@ index fc7540023..76bb0a5dd 100644
  
  #include "gt-aarch64.h"
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 5473e2720..22c22b1c6 100644
+index 7267a74d6..a78476c8a 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -869,16 +869,7 @@
+@@ -871,16 +871,7 @@
    [(clobber (const_int 0))]
    ""
    "
@@ -149,10 +150,10 @@ index 5473e2720..22c22b1c6 100644
    "
  )
 diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
-index f8109834d..3601994c3 100644
+index d930d233d..369f4b8da 100644
 --- a/gcc/doc/tm.texi
 +++ b/gcc/doc/tm.texi
-@@ -11913,6 +11913,14 @@ the hook might return true if the prologue and epilogue need to switch
+@@ -11703,6 +11703,14 @@ the hook might return true if the prologue and epilogue need to switch
  between instruction sets.
  @end deftypefn
  
@@ -168,10 +169,10 @@ index f8109834d..3601994c3 100644
  If non-null, this hook performs a target-specific pass over the
  instruction stream.  The compiler will run it at all optimization levels,
 diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
-index 9d72fe18d..8062ee79c 100644
+index 19eabec48..748b0777a 100644
 --- a/gcc/doc/tm.texi.in
 +++ b/gcc/doc/tm.texi.in
-@@ -7920,6 +7920,8 @@ to by @var{ce_info}.
+@@ -7710,6 +7710,8 @@ to by @var{ce_info}.
  
  @hook TARGET_USE_LATE_PROLOGUE_EPILOGUE
  
@@ -181,10 +182,10 @@ index 9d72fe18d..8062ee79c 100644
  
  @hook TARGET_INIT_BUILTINS
 diff --git a/gcc/function.cc b/gcc/function.cc
-index eac179dd3..94afb266e 100644
+index 7c90b5f23..ddab43ca4 100644
 --- a/gcc/function.cc
 +++ b/gcc/function.cc
-@@ -6217,7 +6217,17 @@ thread_prologue_and_epilogue_insns (void)
+@@ -6209,7 +6209,17 @@ thread_prologue_and_epilogue_insns (void)
        if (!(CALL_P (insn) && SIBLING_CALL_P (insn)))
  	continue;
  
@@ -203,7 +204,7 @@ index eac179dd3..94afb266e 100644
  	{
  	  start_sequence ();
  	  emit_note (NOTE_INSN_EPILOGUE_BEG);
-@@ -6267,7 +6277,8 @@ reposition_prologue_and_epilogue_notes (void)
+@@ -6259,7 +6269,8 @@ reposition_prologue_and_epilogue_notes (void)
  {
    if (!targetm.have_prologue ()
        && !targetm.have_epilogue ()
@@ -214,10 +215,10 @@ index eac179dd3..94afb266e 100644
  
    /* Since the hash table is created on demand, the fact that it is
 diff --git a/gcc/target.def b/gcc/target.def
-index d70363da5..609b2dff6 100644
+index fd4899612..cf9f96eba 100644
 --- a/gcc/target.def
 +++ b/gcc/target.def
-@@ -4084,6 +4084,15 @@ between instruction sets.",
+@@ -4141,6 +4141,15 @@ between instruction sets.",
   bool, (),
   hook_bool_void_false)
  
@@ -234,5 +235,5 @@ index d70363da5..609b2dff6 100644
       delayed-branch scheduling.  */
  DEFHOOK
 -- 
-2.19.1
+2.33.0
 
diff --git a/0149-LoongArch-Remove-masking-process-for-operand-3-of-xv.patch b/0149-LoongArch-Remove-masking-process-for-operand-3-of-xv.patch
new file mode 100644
index 0000000000000000000000000000000000000000..42e7d2164d5af248d68c1a0b5312a50bb49930de
--- /dev/null
+++ b/0149-LoongArch-Remove-masking-process-for-operand-3-of-xv.patch
@@ -0,0 +1,85 @@
+From 0dba1a1daef3f043235382f0e8f107313b9bde07 Mon Sep 17 00:00:00 2001
+From: Chenghui Pan 
+Date: Thu, 14 Mar 2024 09:26:54 +0800
+Subject: [PATCH 149/188] LoongArch: Remove masking process for operand 3 of
+ xvpermi.q.
+
+The behavior of non-zero unused bits in xvpermi.q instruction's
+third operand is undefined on LoongArch, according to our
+discussion (https://github.com/llvm/llvm-project/pull/83540),
+we think that keeping original insn operand as unmodified
+state is better solution.
+
+This patch partially reverts 7b158e036a95b1ab40793dd53bed7dbd770ffdaf.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md (lasx_xvpermi_q_):
+	Remove masking of operand 3.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvpermi_q.c:
+	Reposition operand 3's value into instruction's defined accept range.
+---
+ gcc/config/loongarch/lasx.md                                | 5 -----
+ .../gcc.target/loongarch/vector/lasx/lasx-xvpermi_q.c       | 6 +++---
+ 2 files changed, 3 insertions(+), 8 deletions(-)
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 38f35bad6..f3b5ea373 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -640,8 +640,6 @@
+    (set_attr "mode" "")])
+ 
+ ;; xvpermi.q
+-;; Unused bits in operands[3] need be set to 0 to avoid
+-;; causing undefined behavior on LA464.
+ (define_insn "lasx_xvpermi_q_"
+   [(set (match_operand:LASX 0 "register_operand" "=f")
+ 	(unspec:LASX
+@@ -651,9 +649,6 @@
+ 	  UNSPEC_LASX_XVPERMI_Q))]
+   "ISA_HAS_LASX"
+ {
+-  int mask = 0x33;
+-  mask &= INTVAL (operands[3]);
+-  operands[3] = GEN_INT (mask);
+   return "xvpermi.q\t%u0,%u2,%3";
+ }
+   [(set_attr "type" "simd_splat")
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpermi_q.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpermi_q.c
+index dbc29d2fb..f89dfc311 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpermi_q.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpermi_q.c
+@@ -27,7 +27,7 @@ main ()
+   *((unsigned long*)& __m256i_result[2]) = 0x7fff7fff7fff0000;
+   *((unsigned long*)& __m256i_result[1]) = 0x7fe37fe3001d001d;
+   *((unsigned long*)& __m256i_result[0]) = 0x7fff7fff7fff0000;
+-  __m256i_out = __lasx_xvpermi_q (__m256i_op0, __m256i_op1, 0x2a);
++  __m256i_out = __lasx_xvpermi_q (__m256i_op0, __m256i_op1, 0x22);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+   *((unsigned long*)& __m256i_op0[3]) = 0x0000000000000000;
+@@ -42,7 +42,7 @@ main ()
+   *((unsigned long*)& __m256i_result[2]) = 0x000000000019001c;
+   *((unsigned long*)& __m256i_result[1]) = 0x0000000000000000;
+   *((unsigned long*)& __m256i_result[0]) = 0x00000000000001fe;
+-  __m256i_out = __lasx_xvpermi_q (__m256i_op0, __m256i_op1, 0xb9);
++  __m256i_out = __lasx_xvpermi_q (__m256i_op0, __m256i_op1, 0x31);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+   *((unsigned long*)& __m256i_op0[3]) = 0x00ff00ff00ff00ff;
+@@ -57,7 +57,7 @@ main ()
+   *((unsigned long*)& __m256i_result[2]) = 0xffff0000ffff0000;
+   *((unsigned long*)& __m256i_result[1]) = 0x00ff00ff00ff00ff;
+   *((unsigned long*)& __m256i_result[0]) = 0x00ff00ff00ff00ff;
+-  __m256i_out = __lasx_xvpermi_q (__m256i_op0, __m256i_op1, 0xca);
++  __m256i_out = __lasx_xvpermi_q (__m256i_op0, __m256i_op1, 0x02);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+   return 0;
+-- 
+2.43.0
+
diff --git a/SME-0047-Add-a-new-target-hook-TARGET_START_CALL_ARGS.patch b/0150-Backport-SME-Add-a-new-target-hook-TARGET_START_CALL.patch
similarity index 97%
rename from SME-0047-Add-a-new-target-hook-TARGET_START_CALL_ARGS.patch
rename to 0150-Backport-SME-Add-a-new-target-hook-TARGET_START_CALL.patch
index 8377dc082e824c07daa0f568c21edb11960e4c4d..4c71f2a4a77c102b4c0e960b1f76e86fae6f827d 100644
--- a/SME-0047-Add-a-new-target-hook-TARGET_START_CALL_ARGS.patch
+++ b/0150-Backport-SME-Add-a-new-target-hook-TARGET_START_CALL.patch
@@ -1,7 +1,8 @@
-From 53de42896648d35d332167b3479e38e3700be150 Mon Sep 17 00:00:00 2001
+From 58adede22d9ff2368b5c24ec3fc0e53bd3ddc8bd Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 09:44:52 +0000
-Subject: [PATCH 047/144] Add a new target hook: TARGET_START_CALL_ARGS
+Subject: [PATCH 051/157] [Backport][SME] Add a new target hook:
+ TARGET_START_CALL_ARGS
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=672fad57c1f99ff893019e2da4620e26b9b31dd2
 
@@ -192,10 +193,10 @@ index c1db66883..4a8535cc6 100644
    /* For calls to `setjmp', etc., inform function.cc:setjmp_warnings
       that it should complain if nonvolatile values are live.  For
 diff --git a/gcc/config/nvptx/nvptx.cc b/gcc/config/nvptx/nvptx.cc
-index e4297e2d6..d3ac149d1 100644
+index 3634a49de..7f2103ba6 100644
 --- a/gcc/config/nvptx/nvptx.cc
 +++ b/gcc/config/nvptx/nvptx.cc
-@@ -1781,7 +1781,7 @@ nvptx_get_drap_rtx (void)
+@@ -1780,7 +1780,7 @@ nvptx_get_drap_rtx (void)
     argument to the next call.  */
  
  static void
@@ -204,7 +205,7 @@ index e4297e2d6..d3ac149d1 100644
  {
    if (!cfun->machine->doing_call)
      {
-@@ -1809,7 +1809,7 @@ nvptx_call_args (rtx arg, tree fntype)
+@@ -1808,7 +1808,7 @@ nvptx_call_args (rtx arg, tree fntype)
     information we recorded.  */
  
  static void
@@ -214,7 +215,7 @@ index e4297e2d6..d3ac149d1 100644
    cfun->machine->doing_call = false;
    free_EXPR_LIST_list (&cfun->machine->call_args);
 diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
-index 3601994c3..74ba7ed93 100644
+index 369f4b8da..357c29a4d 100644
 --- a/gcc/doc/tm.texi
 +++ b/gcc/doc/tm.texi
 @@ -5392,26 +5392,59 @@ except the last are treated as named.
@@ -288,7 +289,7 @@ index 3601994c3..74ba7ed93 100644
  @end deftypefn
  
 diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
-index 8062ee79c..0294bfad3 100644
+index 748b0777a..4ebc9afbf 100644
 --- a/gcc/doc/tm.texi.in
 +++ b/gcc/doc/tm.texi.in
 @@ -3774,6 +3774,8 @@ These machine description macros help implement varargs:
@@ -329,10 +330,10 @@ index 1056e1e9e..e2a742f43 100644
  extern void hook_void_tree (tree);
  extern void hook_void_tree_treeptr (tree, tree *);
 diff --git a/gcc/target.def b/gcc/target.def
-index 609b2dff6..c8a793ef8 100644
+index cf9f96eba..a57e51b0d 100644
 --- a/gcc/target.def
 +++ b/gcc/target.def
-@@ -4727,32 +4727,67 @@ not generate any instructions in this case.",
+@@ -4784,32 +4784,67 @@ not generate any instructions in this case.",
  	int *pretend_args_size, int second_time),
   default_setup_incoming_varargs)
  
@@ -456,5 +457,5 @@ index ecce55ebe..c6e12fc2e 100644
    (const_tree, const_tree, const_tree);
  extern void default_function_arg_advance
 -- 
-2.19.1
+2.33.0
 
diff --git a/0150-LoongArch-Fix-C23-.-functions-returning-large-aggreg.patch b/0150-LoongArch-Fix-C23-.-functions-returning-large-aggreg.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c09c6f61771d4b1825cf48994e02e5cc14081133
--- /dev/null
+++ b/0150-LoongArch-Fix-C23-.-functions-returning-large-aggreg.patch
@@ -0,0 +1,48 @@
+From 3ed698858f0ebb12a99ed1cc12c038b533f64b2c Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 25 Oct 2024 06:15:21 +0000
+Subject: [PATCH 150/188] LoongArch: Fix C23 (...) functions returning large
+ aggregates  [PR114175]
+
+We were assuming TYPE_NO_NAMED_ARGS_STDARG_P don't have any named
+arguments and there is nothing to advance, but that is not the case
+for (...) functions returning by hidden reference which have one such
+artificial argument.  This is causing gcc.dg/c23-stdarg-6.c and
+gcc.dg/c23-stdarg-8.c to fail.
+
+Fix the issue by checking if arg.type is NULL, as r14-9503 explains.
+
+gcc/ChangeLog:
+
+        PR target/114175
+        * config/loongarch/loongarch.cc
+        (loongarch_setup_incoming_varargs): Only skip
+        loongarch_function_arg_advance for TYPE_NO_NAMED_ARGS_STDARG_P
+        functions if arg.type is NULL.
+---
+ gcc/config/loongarch/loongarch.cc | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index c1dc30b61..1e3981e19 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -767,7 +767,14 @@ loongarch_setup_incoming_varargs (cumulative_args_t cum,
+      argument.  Advance a local copy of CUM past the last "real" named
+      argument, to find out how many registers are left over.  */
+   local_cum = *get_cumulative_args (cum);
+-  loongarch_function_arg_advance (pack_cumulative_args (&local_cum), arg);
++
++  /* For a C23 variadic function w/o any named argument, and w/o an
++     artifical argument for large return value, skip advancing args.
++     There is such an artifical argument iff. arg.type is non-NULL
++     (PR 114175).  */
++  if (!TYPE_NO_NAMED_ARGS_STDARG_P (TREE_TYPE (current_function_decl))
++      || arg.type != NULL_TREE)
++    loongarch_function_arg_advance (pack_cumulative_args (&local_cum), arg);
+ 
+   /* Found out how many registers we need to save.  */
+   gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
+-- 
+2.43.0
+
diff --git a/SME-0048-Allow-targets-to-add-USEs-to-asms.patch b/0151-Backport-SME-Allow-targets-to-add-USEs-to-asms.patch
similarity index 96%
rename from SME-0048-Allow-targets-to-add-USEs-to-asms.patch
rename to 0151-Backport-SME-Allow-targets-to-add-USEs-to-asms.patch
index a8ccab7b8abf533f767b23371924f6560e6bedb5..cb0675161f01a8f02f7ea68208187125df861095 100644
--- a/SME-0048-Allow-targets-to-add-USEs-to-asms.patch
+++ b/0151-Backport-SME-Allow-targets-to-add-USEs-to-asms.patch
@@ -1,7 +1,7 @@
-From 46876661f0b40e05b4dd7fce385db7ba68a6ce33 Mon Sep 17 00:00:00 2001
+From 8684458c3faf358e5a15dfb73b4ef632341ddf0a Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 09:52:41 +0000
-Subject: [PATCH 048/144] Allow targets to add USEs to asms
+Subject: [PATCH 052/157] [Backport][SME] Allow targets to add USEs to asms
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=414d795d8a365b6e72a84257caa36cb3bed7e0ba
 
@@ -252,10 +252,10 @@ index f0017d630..3a1c85481 100644
  {
    /* For the time being, all asms clobber condition codes.
 diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
-index a1ba250ed..6847e90bd 100644
+index 593185fa6..83a0d8abb 100644
 --- a/gcc/config/i386/i386.cc
 +++ b/gcc/config/i386/i386.cc
-@@ -22234,8 +22234,9 @@ ix86_c_mode_for_suffix (char suffix)
+@@ -22252,8 +22252,9 @@ ix86_c_mode_for_suffix (char suffix)
  static rtx_insn *
  ix86_md_asm_adjust (vec &outputs, vec & /*inputs*/,
  		    vec & /*input_modes*/,
@@ -297,10 +297,10 @@ index 71fe9e8bc..27530495f 100644
    if (!flag_inline_asm_r15)
      {
 diff --git a/gcc/config/pdp11/pdp11.cc b/gcc/config/pdp11/pdp11.cc
-index f7482df18..ab3b5d9ed 100644
+index 380223439..25cf62cbc 100644
 --- a/gcc/config/pdp11/pdp11.cc
 +++ b/gcc/config/pdp11/pdp11.cc
-@@ -156,7 +156,8 @@ static int pdp11_addr_cost (rtx, machine_mode, addr_space_t, bool);
+@@ -155,7 +155,8 @@ static int pdp11_addr_cost (rtx, machine_mode, addr_space_t, bool);
  static int pdp11_insn_cost (rtx_insn *insn, bool speed);
  static rtx_insn *pdp11_md_asm_adjust (vec &, vec &,
  				      vec &, vec &,
@@ -310,7 +310,7 @@ index f7482df18..ab3b5d9ed 100644
  static bool pdp11_return_in_memory (const_tree, const_tree);
  static rtx pdp11_function_value (const_tree, const_tree, bool);
  static rtx pdp11_libcall_value (machine_mode, const_rtx);
-@@ -2138,7 +2139,8 @@ pdp11_cmp_length (rtx *operands, int words)
+@@ -2137,7 +2138,8 @@ pdp11_cmp_length (rtx *operands, int words)
  static rtx_insn *
  pdp11_md_asm_adjust (vec & /*outputs*/, vec & /*inputs*/,
  		     vec & /*input_modes*/,
@@ -321,10 +321,10 @@ index f7482df18..ab3b5d9ed 100644
  {
    clobbers.safe_push (gen_rtx_REG (CCmode, CC_REGNUM));
 diff --git a/gcc/config/rs6000/rs6000.cc b/gcc/config/rs6000/rs6000.cc
-index f67856107..998f4e190 100644
+index 0b75861bb..55d4ce751 100644
 --- a/gcc/config/rs6000/rs6000.cc
 +++ b/gcc/config/rs6000/rs6000.cc
-@@ -3426,7 +3426,8 @@ rs6000_builtin_mask_calculate (void)
+@@ -3443,7 +3443,8 @@ rs6000_builtin_mask_calculate (void)
  static rtx_insn *
  rs6000_md_asm_adjust (vec & /*outputs*/, vec & /*inputs*/,
  		      vec & /*input_modes*/,
@@ -349,7 +349,7 @@ index ae0cf9ef5..f1599a5c5 100644
  {
    if (!TARGET_VXE)
 diff --git a/gcc/config/vax/vax.cc b/gcc/config/vax/vax.cc
-index f44e23d17..a11e545c5 100644
+index 28c1af59a..7673a1428 100644
 --- a/gcc/config/vax/vax.cc
 +++ b/gcc/config/vax/vax.cc
 @@ -57,7 +57,8 @@ static bool vax_rtx_costs (rtx, machine_mode, int, int, int *, bool);
@@ -362,7 +362,7 @@ index f44e23d17..a11e545c5 100644
  static rtx vax_function_arg (cumulative_args_t, const function_arg_info &);
  static void vax_function_arg_advance (cumulative_args_t,
  				      const function_arg_info &);
-@@ -1182,6 +1183,7 @@ vax_md_asm_adjust (vec &outputs ATTRIBUTE_UNUSED,
+@@ -1179,6 +1180,7 @@ vax_md_asm_adjust (vec &outputs ATTRIBUTE_UNUSED,
  		   vec &inputs ATTRIBUTE_UNUSED,
  		   vec &input_modes ATTRIBUTE_UNUSED,
  		   vec &constraints ATTRIBUTE_UNUSED,
@@ -394,10 +394,10 @@ index 03c1a33e1..35b46ced9 100644
  {
    clobbers.safe_push (gen_rtx_REG (CCmode, FLAGS_REGNUM));
 diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
-index 74ba7ed93..9943fe651 100644
+index 357c29a4d..4f93facf7 100644
 --- a/gcc/doc/tm.texi
 +++ b/gcc/doc/tm.texi
-@@ -11836,10 +11836,11 @@ from shared libraries (DLLs).
+@@ -11626,10 +11626,11 @@ from shared libraries (DLLs).
  You need not define this macro if it would always evaluate to zero.
  @end defmac
  
@@ -463,10 +463,10 @@ index cd2410ab2..5b81d5e21 100644
  		gcc_assert (GET_CODE (XVECEXP (body, 0, i)) == SET);
  		if (operands)
 diff --git a/gcc/target.def b/gcc/target.def
-index c8a793ef8..562882358 100644
+index a57e51b0d..60096c60c 100644
 --- a/gcc/target.def
 +++ b/gcc/target.def
-@@ -4252,7 +4252,8 @@ DEFHOOK
+@@ -4309,7 +4309,8 @@ DEFHOOK
  (md_asm_adjust,
   "This target hook may add @dfn{clobbers} to @var{clobbers} and\n\
  @var{clobbered_regs} for any hard regs the port wishes to automatically\n\
@@ -476,7 +476,7 @@ index c8a793ef8..562882358 100644
  to avoid clobbering a register that is already used by the asm.  @var{loc}\n\
  is the source location of the asm.\n\
  \n\
-@@ -4263,7 +4264,7 @@ changes to @var{inputs} must be accompanied by the corresponding changes\n\
+@@ -4320,7 +4321,7 @@ changes to @var{inputs} must be accompanied by the corresponding changes\n\
  to @var{input_modes}.",
   rtx_insn *,
   (vec& outputs, vec& inputs, vec& input_modes,
@@ -486,5 +486,5 @@ index c8a793ef8..562882358 100644
   NULL)
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0151-LoongArch-Remove-unused-useless-definitions.patch b/0151-LoongArch-Remove-unused-useless-definitions.patch
new file mode 100644
index 0000000000000000000000000000000000000000..44e92c2e63f9553ec123b66e2749ecddd386f985
--- /dev/null
+++ b/0151-LoongArch-Remove-unused-useless-definitions.patch
@@ -0,0 +1,123 @@
+From 6ee300fd31e000efba141ed8806e56bd03826197 Mon Sep 17 00:00:00 2001
+From: Chenghui Pan 
+Date: Fri, 15 Mar 2024 09:30:25 +0800
+Subject: [PATCH 151/188] LoongArch: Remove unused/useless definitions.
+
+This patch removes some unnecessary definitions of target hook functions
+according to the documentation of GCC.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-protos.h
+	(loongarch_cfun_has_cprestore_slot_p): Delete.
+	(loongarch_adjust_insn_length): Delete.
+	(current_section_name): Delete.
+	(loongarch_split_symbol_type): Delete.
+	* config/loongarch/loongarch.cc
+	(loongarch_case_values_threshold): Delete.
+	(loongarch_spill_class): Delete.
+	(TARGET_OPTAB_SUPPORTED_P): Delete.
+	(TARGET_CASE_VALUES_THRESHOLD): Delete.
+	(TARGET_SPILL_CLASS): Delete.
+---
+ gcc/config/loongarch/loongarch-protos.h |  5 -----
+ gcc/config/loongarch/loongarch.cc       | 26 -------------------------
+ 2 files changed, 31 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 87b94e8b0..3dac20279 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -93,7 +93,6 @@ extern void loongarch_split_lsx_copy_d (rtx, rtx, rtx, rtx (*)(rtx, rtx, rtx));
+ extern void loongarch_split_lsx_insert_d (rtx, rtx, rtx, rtx);
+ extern void loongarch_split_lsx_fill_d (rtx, rtx);
+ extern const char *loongarch_output_move (rtx, rtx);
+-extern bool loongarch_cfun_has_cprestore_slot_p (void);
+ #ifdef RTX_CODE
+ extern void loongarch_expand_scc (rtx *);
+ extern bool loongarch_expand_vec_cmp (rtx *);
+@@ -135,7 +134,6 @@ extern int loongarch_class_max_nregs (enum reg_class, machine_mode);
+ extern machine_mode loongarch_hard_regno_caller_save_mode (unsigned int,
+ 							   unsigned int,
+ 							   machine_mode);
+-extern int loongarch_adjust_insn_length (rtx_insn *, int);
+ extern const char *loongarch_output_conditional_branch (rtx_insn *, rtx *,
+ 							const char *,
+ 							const char *);
+@@ -157,7 +155,6 @@ extern bool loongarch_global_symbol_noweak_p (const_rtx);
+ extern bool loongarch_weak_symbol_p (const_rtx);
+ extern bool loongarch_symbol_binds_local_p (const_rtx);
+ 
+-extern const char *current_section_name (void);
+ extern unsigned int current_section_flags (void);
+ extern bool loongarch_use_ins_ext_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+ extern bool loongarch_check_zero_div_p (void);
+@@ -198,8 +195,6 @@ extern bool loongarch_epilogue_uses (unsigned int);
+ extern bool loongarch_load_store_bonding_p (rtx *, machine_mode, bool);
+ extern bool loongarch_split_symbol_type (enum loongarch_symbol_type);
+ 
+-typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx);
+-
+ extern void loongarch_register_frame_header_opt (void);
+ extern void loongarch_expand_vec_cond_expr (machine_mode, machine_mode, rtx *);
+ extern void loongarch_expand_vec_cond_mask_expr (machine_mode, machine_mode,
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 1e3981e19..903c0d4ef 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -10812,23 +10812,6 @@ loongarch_expand_vec_cmp (rtx operands[])
+   return true;
+ }
+ 
+-/* Implement TARGET_CASE_VALUES_THRESHOLD.  */
+-
+-unsigned int
+-loongarch_case_values_threshold (void)
+-{
+-  return default_case_values_threshold ();
+-}
+-
+-/* Implement TARGET_SPILL_CLASS.  */
+-
+-static reg_class_t
+-loongarch_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED,
+-		       machine_mode mode ATTRIBUTE_UNUSED)
+-{
+-  return NO_REGS;
+-}
+-
+ /* Implement TARGET_PROMOTE_FUNCTION_MODE.  */
+ 
+ /* This function is equivalent to default_promote_function_mode_always_promote
+@@ -11283,9 +11266,6 @@ loongarch_asm_code_end (void)
+ #undef TARGET_FUNCTION_ARG_BOUNDARY
+ #define TARGET_FUNCTION_ARG_BOUNDARY loongarch_function_arg_boundary
+ 
+-#undef TARGET_OPTAB_SUPPORTED_P
+-#define TARGET_OPTAB_SUPPORTED_P loongarch_optab_supported_p
+-
+ #undef TARGET_VECTOR_MODE_SUPPORTED_P
+ #define TARGET_VECTOR_MODE_SUPPORTED_P loongarch_vector_mode_supported_p
+ 
+@@ -11355,18 +11335,12 @@ loongarch_asm_code_end (void)
+ #undef TARGET_SCHED_REASSOCIATION_WIDTH
+ #define TARGET_SCHED_REASSOCIATION_WIDTH loongarch_sched_reassociation_width
+ 
+-#undef TARGET_CASE_VALUES_THRESHOLD
+-#define TARGET_CASE_VALUES_THRESHOLD loongarch_case_values_threshold
+-
+ #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
+ #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV loongarch_atomic_assign_expand_fenv
+ 
+ #undef TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS
+ #define TARGET_CALL_FUSAGE_CONTAINS_NON_CALLEE_CLOBBERS true
+ 
+-#undef TARGET_SPILL_CLASS
+-#define TARGET_SPILL_CLASS loongarch_spill_class
+-
+ #undef TARGET_HARD_REGNO_NREGS
+ #define TARGET_HARD_REGNO_NREGS loongarch_hard_regno_nregs
+ #undef TARGET_HARD_REGNO_MODE_OK
+-- 
+2.43.0
+
diff --git a/SME-0049-New-compact-syntax-for-insn-and-insn_split-in-Machin.patch b/0152-Backport-SME-New-compact-syntax-for-insn-and-insn_sp.patch
similarity index 99%
rename from SME-0049-New-compact-syntax-for-insn-and-insn_split-in-Machin.patch
rename to 0152-Backport-SME-New-compact-syntax-for-insn-and-insn_sp.patch
index 15c2c10b829823893f46c334894762a690b1de37..edf0b1e9c9ace4261217ba629f575408968b9337 100644
--- a/SME-0049-New-compact-syntax-for-insn-and-insn_split-in-Machin.patch
+++ b/0152-Backport-SME-New-compact-syntax-for-insn-and-insn_sp.patch
@@ -1,8 +1,8 @@
-From 3cb20a000606b57790e1e695188cc1fe507d4cf3 Mon Sep 17 00:00:00 2001
+From 763db5ed42e18cdddf979dda82056345e3af15ed Mon Sep 17 00:00:00 2001
 From: Tamar Christina 
 Date: Mon, 19 Jun 2023 15:47:46 +0100
-Subject: [PATCH 049/144] New compact syntax for insn and insn_split in Machine
- Descriptions.
+Subject: [PATCH 053/157] [Backport][SME] New compact syntax for insn and
+ insn_split in Machine Descriptions.
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=957ae90406591739b68e95ad49a0232faeb74217
 
@@ -994,5 +994,5 @@ index 9a0fd7393..a19fc1319 100644
  extern void compute_test_codes (rtx, file_location, char *);
  extern file_location get_file_location (rtx);
 -- 
-2.19.1
+2.33.0
 
diff --git a/0152-LoongArch-Change-loongarch_expand_vec_cmp-s-return-t.patch b/0152-LoongArch-Change-loongarch_expand_vec_cmp-s-return-t.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4cf3c5d9f031aab7d90c5690d43e25c4c266e441
--- /dev/null
+++ b/0152-LoongArch-Change-loongarch_expand_vec_cmp-s-return-t.patch
@@ -0,0 +1,110 @@
+From d569e34b29faee3658014b3900e9553a4880dac0 Mon Sep 17 00:00:00 2001
+From: Chenghui Pan 
+Date: Fri, 15 Mar 2024 09:30:26 +0800
+Subject: [PATCH 152/188] LoongArch: Change loongarch_expand_vec_cmp()'s return
+ type from bool to void.
+
+This function is always return true at the end of function implementation,
+so the return value is useless.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md (vec_cmp): Remove checking
+	of loongarch_expand_vec_cmp()'s return value.
+	(vec_cmpu): Ditto.
+	* config/loongarch/lsx.md (vec_cmp): Ditto.
+	(vec_cmpu): Ditto.
+	* config/loongarch/loongarch-protos.h
+	(loongarch_expand_vec_cmp): Change loongarch_expand_vec_cmp()'s return
+	type from bool to void.
+	* config/loongarch/loongarch.cc (loongarch_expand_vec_cmp): Ditto.
+---
+ gcc/config/loongarch/lasx.md            | 6 ++----
+ gcc/config/loongarch/loongarch-protos.h | 2 +-
+ gcc/config/loongarch/loongarch.cc       | 3 +--
+ gcc/config/loongarch/lsx.md             | 6 ++----
+ 4 files changed, 6 insertions(+), 11 deletions(-)
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index f3b5ea373..45a0a8cc8 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -1378,8 +1378,7 @@
+ 	   (match_operand:LASX 3 "register_operand")]))]
+   "ISA_HAS_LASX"
+ {
+-  bool ok = loongarch_expand_vec_cmp (operands);
+-  gcc_assert (ok);
++  loongarch_expand_vec_cmp (operands);
+   DONE;
+ })
+ 
+@@ -1390,8 +1389,7 @@
+ 	   (match_operand:ILASX 3 "register_operand")]))]
+   "ISA_HAS_LASX"
+ {
+-  bool ok = loongarch_expand_vec_cmp (operands);
+-  gcc_assert (ok);
++  loongarch_expand_vec_cmp (operands);
+   DONE;
+ })
+ 
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 3dac20279..8523da8d6 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -95,7 +95,7 @@ extern void loongarch_split_lsx_fill_d (rtx, rtx);
+ extern const char *loongarch_output_move (rtx, rtx);
+ #ifdef RTX_CODE
+ extern void loongarch_expand_scc (rtx *);
+-extern bool loongarch_expand_vec_cmp (rtx *);
++extern void loongarch_expand_vec_cmp (rtx *);
+ extern void loongarch_expand_conditional_branch (rtx *);
+ extern void loongarch_expand_conditional_move (rtx *);
+ extern void loongarch_expand_conditional_trap (rtx);
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 903c0d4ef..dbb318660 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -10803,13 +10803,12 @@ loongarch_expand_vec_cond_mask_expr (machine_mode mode, machine_mode vimode,
+ }
+ 
+ /* Expand integer vector comparison */
+-bool
++void
+ loongarch_expand_vec_cmp (rtx operands[])
+ {
+ 
+   rtx_code code = GET_CODE (operands[1]);
+   loongarch_expand_lsx_cmp (operands[0], code, operands[2], operands[3]);
+-  return true;
+ }
+ 
+ /* Implement TARGET_PROMOTE_FUNCTION_MODE.  */
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index d5aa3f46f..dc81093e9 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -518,8 +518,7 @@
+ 	   (match_operand:LSX 3 "register_operand")]))]
+   "ISA_HAS_LSX"
+ {
+-  bool ok = loongarch_expand_vec_cmp (operands);
+-  gcc_assert (ok);
++  loongarch_expand_vec_cmp (operands);
+   DONE;
+ })
+ 
+@@ -530,8 +529,7 @@
+ 	   (match_operand:ILSX 3 "register_operand")]))]
+   "ISA_HAS_LSX"
+ {
+-  bool ok = loongarch_expand_vec_cmp (operands);
+-  gcc_assert (ok);
++  loongarch_expand_vec_cmp (operands);
+   DONE;
+ })
+ 
+-- 
+2.43.0
+
diff --git a/SME-0050-recog-Improve-parser-for-pattern-new-compact-syntax.patch b/0153-Backport-SME-recog-Improve-parser-for-pattern-new-co.patch
similarity index 94%
rename from SME-0050-recog-Improve-parser-for-pattern-new-compact-syntax.patch
rename to 0153-Backport-SME-recog-Improve-parser-for-pattern-new-co.patch
index 5ff9fb22f521675e9acc8a53eb089d6ba51b46c4..1302ea137742a332450a39fc80b8471b184fe77d 100644
--- a/SME-0050-recog-Improve-parser-for-pattern-new-compact-syntax.patch
+++ b/0153-Backport-SME-recog-Improve-parser-for-pattern-new-co.patch
@@ -1,7 +1,8 @@
-From f323c91631fd37adc37ef1684d77ceaf0cfb39e4 Mon Sep 17 00:00:00 2001
+From 35b64175c6fd622212d0bf936e7e98c635e1c618 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Wed, 13 Sep 2023 14:50:30 +0100
-Subject: [PATCH 050/144] recog: Improve parser for pattern new compact syntax
+Subject: [PATCH 054/157] [Backport][SME] recog: Improve parser for pattern new
+ compact syntax
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=dd1091fe455c1ede5993b4cdf10d0f7c461b86d7
 
@@ -99,5 +100,5 @@ index 23c61dcdd..97c614850 100644
    if (convec.size () > 0)
      add_constraints (x, loc, convec);
 -- 
-2.19.1
+2.33.0
 
diff --git a/0153-LoongArch-Combine-UNITS_PER_FP_REG-and-UNITS_PER_FPR.patch b/0153-LoongArch-Combine-UNITS_PER_FP_REG-and-UNITS_PER_FPR.patch
new file mode 100644
index 0000000000000000000000000000000000000000..cdabd1de5f23df5e585083fa3d5c37366900d351
--- /dev/null
+++ b/0153-LoongArch-Combine-UNITS_PER_FP_REG-and-UNITS_PER_FPR.patch
@@ -0,0 +1,104 @@
+From 6c4a2fbdabab053a2a0fb1041e3ffccc3d853c97 Mon Sep 17 00:00:00 2001
+From: Chenghui Pan 
+Date: Fri, 15 Mar 2024 09:30:27 +0800
+Subject: [PATCH 153/188] LoongArch: Combine UNITS_PER_FP_REG and
+ UNITS_PER_FPREG macros.
+
+These macros are completely same in definition, so we can keep the previous one
+and eliminate later one.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc
+	(loongarch_hard_regno_mode_ok_uncached): Combine UNITS_PER_FP_REG and
+	UNITS_PER_FPREG macros.
+	(loongarch_hard_regno_nregs): Ditto.
+	(loongarch_class_max_nregs): Ditto.
+	(loongarch_get_separate_components): Ditto.
+	(loongarch_process_components): Ditto.
+	* config/loongarch/loongarch.h (UNITS_PER_FPREG): Ditto.
+	(UNITS_PER_HWFPVALUE): Ditto.
+	(UNITS_PER_FPVALUE): Ditto.
+---
+ gcc/config/loongarch/loongarch.cc | 10 +++++-----
+ gcc/config/loongarch/loongarch.h  |  7 ++-----
+ 2 files changed, 7 insertions(+), 10 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index dbb318660..8d9cda165 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -6773,7 +6773,7 @@ loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode)
+ 	 and TRUNC.  There's no point allowing sizes smaller than a word,
+ 	 because the FPU has no appropriate load/store instructions.  */
+       if (mclass == MODE_INT)
+-	return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FPREG;
++	return size >= MIN_UNITS_PER_WORD && size <= UNITS_PER_FP_REG;
+     }
+ 
+   return false;
+@@ -6816,7 +6816,7 @@ loongarch_hard_regno_nregs (unsigned int regno, machine_mode mode)
+       if (LASX_SUPPORTED_MODE_P (mode))
+ 	return 1;
+ 
+-      return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
++      return (GET_MODE_SIZE (mode) + UNITS_PER_FP_REG - 1) / UNITS_PER_FP_REG;
+     }
+ 
+   /* All other registers are word-sized.  */
+@@ -6851,7 +6851,7 @@ loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode)
+ 	  else if (LSX_SUPPORTED_MODE_P (mode))
+ 	    size = MIN (size, UNITS_PER_LSX_REG);
+ 	  else
+-	    size = MIN (size, UNITS_PER_FPREG);
++	    size = MIN (size, UNITS_PER_FP_REG);
+ 	}
+       left &= ~reg_class_contents[FP_REGS];
+     }
+@@ -8227,7 +8227,7 @@ loongarch_get_separate_components (void)
+ 	if (IMM12_OPERAND (offset))
+ 	  bitmap_set_bit (components, regno);
+ 
+-	offset -= UNITS_PER_FPREG;
++	offset -= UNITS_PER_FP_REG;
+       }
+ 
+   /* Don't mess with the hard frame pointer.  */
+@@ -8306,7 +8306,7 @@ loongarch_process_components (sbitmap components, loongarch_save_restore_fn fn)
+ 	if (bitmap_bit_p (components, regno))
+ 	  loongarch_save_restore_reg (mode, regno, offset, fn);
+ 
+-	offset -= UNITS_PER_FPREG;
++	offset -= UNITS_PER_FP_REG;
+       }
+ }
+ 
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 8bcdb8729..698e42aec 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -138,19 +138,16 @@ along with GCC; see the file COPYING3.  If not see
+ /* Width of a LASX vector register in bits.  */
+ #define BITS_PER_LASX_REG (UNITS_PER_LASX_REG * BITS_PER_UNIT)
+ 
+-/* For LARCH, width of a floating point register.  */
+-#define UNITS_PER_FPREG (TARGET_DOUBLE_FLOAT ? 8 : 4)
+-
+ /* The largest size of value that can be held in floating-point
+    registers and moved with a single instruction.  */
+ #define UNITS_PER_HWFPVALUE \
+-  (TARGET_SOFT_FLOAT ? 0 : UNITS_PER_FPREG)
++  (TARGET_SOFT_FLOAT ? 0 : UNITS_PER_FP_REG)
+ 
+ /* The largest size of value that can be held in floating-point
+    registers.  */
+ #define UNITS_PER_FPVALUE \
+   (TARGET_SOFT_FLOAT ? 0 \
+-   : TARGET_SINGLE_FLOAT ? UNITS_PER_FPREG \
++   : TARGET_SINGLE_FLOAT ? UNITS_PER_FP_REG \
+ 			 : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)
+ 
+ /* The number of bytes in a double.  */
+-- 
+2.43.0
+
diff --git a/SME-0051-recog-Support-space-in-cons.patch b/0154-Backport-SME-recog-Support-space-in-cons.patch
similarity index 90%
rename from SME-0051-recog-Support-space-in-cons.patch
rename to 0154-Backport-SME-recog-Support-space-in-cons.patch
index 7cede857c88f3974d78149d83092c4b8371efa4f..f11eb41b567d7c45e2ecf44c61b2bf8aa313d6df 100644
--- a/SME-0051-recog-Support-space-in-cons.patch
+++ b/0154-Backport-SME-recog-Support-space-in-cons.patch
@@ -1,7 +1,7 @@
-From c9a45692ae72f70a44b05bfb95d125d4785cc7a6 Mon Sep 17 00:00:00 2001
+From e593ad216bd1f4f75d9875898f352e0e5f978159 Mon Sep 17 00:00:00 2001
 From: Andrea Corallo 
 Date: Fri, 15 Sep 2023 10:23:02 +0200
-Subject: [PATCH 051/144] recog: Support space in "[ cons"
+Subject: [PATCH 055/157] [Backport][SME] recog: Support space in "[ cons"
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=9d31045b21324166c3997d603961d99e3c4c357d
 
@@ -45,5 +45,5 @@ index 97c614850..3d7a6d4fd 100644
  
    if (*templ != ']')
 -- 
-2.19.1
+2.33.0
 
diff --git a/0154-LoongArch-Fix-a-typo-PR-114407.patch b/0154-LoongArch-Fix-a-typo-PR-114407.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e55b604e6655ce568615e9e135e18c923fff44a7
--- /dev/null
+++ b/0154-LoongArch-Fix-a-typo-PR-114407.patch
@@ -0,0 +1,30 @@
+From 72f18deb0b8e59cc23f25cb99b59a25a0a1d99c7 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Thu, 21 Mar 2024 04:01:17 +0800
+Subject: [PATCH 154/188] LoongArch: Fix a typo [PR 114407]
+
+gcc/ChangeLog:
+
+	PR target/114407
+	* config/loongarch/loongarch-opts.cc (loongarch_config_target):
+	Fix typo in diagnostic message, enabing -> enabling.
+---
+ gcc/config/loongarch/loongarch-opts.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index 2ea3972d1..bdecfaf49 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -362,7 +362,7 @@ config_target_isa:
+ 	  gcc_assert (constrained.simd);
+ 
+ 	  inform (UNKNOWN_LOCATION,
+-		  "enabing %qs promotes %<%s%s%> to %<%s%s%>",
++		  "enabling %qs promotes %<%s%s%> to %<%s%s%>",
+ 		  loongarch_isa_ext_strings[t.isa.simd],
+ 		  OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[t.isa.fpu],
+ 		  OPTSTR_ISA_EXT_FPU, loongarch_isa_ext_strings[ISA_EXT_FPU64]);
+-- 
+2.43.0
+
diff --git a/SME-0052-aarch64-Generalise-require_immediate_lane_index.patch b/0155-Backport-SME-aarch64-Generalise-require_immediate_la.patch
similarity index 97%
rename from SME-0052-aarch64-Generalise-require_immediate_lane_index.patch
rename to 0155-Backport-SME-aarch64-Generalise-require_immediate_la.patch
index db7c467377ccc56d448ca5ed1fadf46f4456424a..3a47094b929c762f339da71b7435e48dc5f3bdee 100644
--- a/SME-0052-aarch64-Generalise-require_immediate_lane_index.patch
+++ b/0155-Backport-SME-aarch64-Generalise-require_immediate_la.patch
@@ -1,7 +1,8 @@
-From 794d7e3ca908fd294efcb76d54aa41b5ca1c5a07 Mon Sep 17 00:00:00 2001
+From cb6d55f6bc7c490f72a43dd87543ab7a7ea582a8 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:18 +0000
-Subject: [PATCH 052/144] aarch64: Generalise require_immediate_lane_index
+Subject: [PATCH 056/157] [Backport][SME] aarch64: Generalise
+ require_immediate_lane_index
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=c0cf2c893d54420b0c19fee7bd41ae40017d0106
 
@@ -159,5 +160,5 @@ index 52994cde0..824c31cd7 100644
  				 HOST_WIDE_INT, HOST_WIDE_INT);
    bool require_immediate_range (unsigned int, HOST_WIDE_INT, HOST_WIDE_INT);
 -- 
-2.19.1
+2.33.0
 
diff --git a/0155-testsuite-Add-a-test-case-for-negating-FP-vectors-co.patch b/0155-testsuite-Add-a-test-case-for-negating-FP-vectors-co.patch
new file mode 100644
index 0000000000000000000000000000000000000000..fb5caeae1ae27c26a52da7ed6d80c3a0b20d8d62
--- /dev/null
+++ b/0155-testsuite-Add-a-test-case-for-negating-FP-vectors-co.patch
@@ -0,0 +1,68 @@
+From e27123a020e7bf0845a9804a4b09fe4ce57992f0 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 6 Feb 2024 17:49:50 +0800
+Subject: [PATCH 155/188] testsuite: Add a test case for negating FP vectors
+ containing zeros
+
+Recently I've fixed two wrong FP vector negate implementation which
+caused wrong sign bits in zeros in targets (r14-8786 and r14-8801).  To
+prevent a similar issue from happening again, add a test case.
+
+Tested on x86_64 (with SSE2, AVX, AVX2, and AVX512F), AArch64, MIPS
+(with MSA), LoongArch (with LSX and LASX).
+
+gcc/testsuite:
+
+	* gcc.dg/vect/vect-neg-zero.c: New test.
+---
+ gcc/testsuite/gcc.dg/vect/vect-neg-zero.c | 38 +++++++++++++++++++++++
+ 1 file changed, 38 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.dg/vect/vect-neg-zero.c
+
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-neg-zero.c b/gcc/testsuite/gcc.dg/vect/vect-neg-zero.c
+new file mode 100644
+index 000000000..21fa00cfa
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/vect-neg-zero.c
+@@ -0,0 +1,38 @@
++/* { dg-add-options ieee } */
++/* { dg-additional-options "-fno-associative-math -fsigned-zeros" } */
++
++double x[4] = {-0.0, 0.0, -0.0, 0.0};
++float y[8] = {-0.0, 0.0, -0.0, 0.0, -0.0, -0.0, 0.0, 0.0};
++
++static __attribute__ ((always_inline)) inline void
++test (int factor)
++{
++  double a[4];
++  float b[8];
++
++  asm ("" ::: "memory");
++
++  for (int i = 0; i < 2 * factor; i++)
++    a[i] = -x[i];
++
++  for (int i = 0; i < 4 * factor; i++)
++    b[i] = -y[i];
++
++#pragma GCC novector
++  for (int i = 0; i < 2 * factor; i++)
++    if (__builtin_signbit (a[i]) == __builtin_signbit (x[i]))
++      __builtin_abort ();
++
++#pragma GCC novector
++  for (int i = 0; i < 4 * factor; i++)
++    if (__builtin_signbit (b[i]) == __builtin_signbit (y[i]))
++      __builtin_abort ();
++}
++
++int
++main (void)
++{
++  test (1);
++  test (2);
++  return 0;
++}
+-- 
+2.43.0
+
diff --git a/SME-0053-aarch64-Add-backend-support-for-DFP.patch b/0156-Backport-SME-aarch64-Add-backend-support-for-DFP.patch
similarity index 90%
rename from SME-0053-aarch64-Add-backend-support-for-DFP.patch
rename to 0156-Backport-SME-aarch64-Add-backend-support-for-DFP.patch
index fe672d9d1a2ba3abc2caf64cb4280f9b1ec72f60..607c83c234c36b0e8a08b4d0927e0484dd9254c3 100644
--- a/SME-0053-aarch64-Add-backend-support-for-DFP.patch
+++ b/0156-Backport-SME-aarch64-Add-backend-support-for-DFP.patch
@@ -1,7 +1,7 @@
-From ec74aeafc48482197b56e9ec2deb6eb4a1cee0e4 Mon Sep 17 00:00:00 2001
+From 8394394bd26c7be6129b9a4e673d2a3530d9efde Mon Sep 17 00:00:00 2001
 From: Christophe Lyon 
 Date: Fri, 11 Mar 2022 16:21:02 +0000
-Subject: [PATCH 053/144] aarch64: Add backend support for DFP
+Subject: [PATCH 057/157] [Backport][SME] aarch64: Add backend support for DFP
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=0dc8e1e7026d9b8ec8b669c051786d426a52cd22
 
@@ -69,10 +69,10 @@ Changes	v1->v2:
  3 files changed, 89 insertions(+), 51 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 76bb0a5dd..ec97c58f6 100644
+index 055b436b1..02210ed13 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -4851,7 +4851,7 @@ aarch64_split_128bit_move (rtx dst, rtx src)
+@@ -5068,7 +5068,7 @@ aarch64_split_128bit_move (rtx dst, rtx src)
  
    machine_mode mode = GET_MODE (dst);
  
@@ -81,7 +81,7 @@ index 76bb0a5dd..ec97c58f6 100644
    gcc_assert (!(side_effects_p (src) || side_effects_p (dst)));
    gcc_assert (mode == GET_MODE (src) || GET_MODE (src) == VOIDmode);
  
-@@ -10617,6 +10617,7 @@ aarch64_mode_valid_for_sched_fusion_p (machine_mode mode)
+@@ -10834,6 +10834,7 @@ aarch64_mode_valid_for_sched_fusion_p (machine_mode mode)
  {
    return mode == SImode || mode == DImode
  	 || mode == SFmode || mode == DFmode
@@ -89,7 +89,7 @@ index 76bb0a5dd..ec97c58f6 100644
  	 || (aarch64_vector_mode_supported_p (mode)
  	     && (known_eq (GET_MODE_SIZE (mode), 8)
  		 || (known_eq (GET_MODE_SIZE (mode), 16)
-@@ -10659,12 +10660,13 @@ aarch64_classify_address (struct aarch64_address_info *info,
+@@ -10876,12 +10877,13 @@ aarch64_classify_address (struct aarch64_address_info *info,
    vec_flags &= ~VEC_PARTIAL;
  
    /* On BE, we use load/store pair for all large int mode load/stores.
@@ -104,7 +104,7 @@ index 76bb0a5dd..ec97c58f6 100644
  			    || (BYTES_BIG_ENDIAN && advsimd_struct_p));
    /* If we are dealing with ADDR_QUERY_LDP_STP_N that means the incoming mode
       corresponds to the actual size of the memory being loaded/stored and the
-@@ -10738,7 +10740,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
+@@ -10955,7 +10957,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
  	  info->offset = op1;
  	  info->const_offset = offset;
  
@@ -113,7 +113,7 @@ index 76bb0a5dd..ec97c58f6 100644
  	     registers and individual Q registers.  The available
  	     address modes are:
  	     X,X: 7-bit signed scaled offset
-@@ -10747,7 +10749,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
+@@ -10964,7 +10966,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
  	     When performing the check for pairs of X registers i.e.  LDP/STP
  	     pass down DImode since that is the natural size of the LDP/STP
  	     instruction memory accesses.  */
@@ -122,7 +122,7 @@ index 76bb0a5dd..ec97c58f6 100644
  	    return (aarch64_offset_7bit_signed_scaled_p (DImode, offset)
  		    && (aarch64_offset_9bit_signed_unscaled_p (mode, offset)
  			|| offset_12bit_unsigned_scaled_p (mode, offset)));
-@@ -10870,14 +10872,14 @@ aarch64_classify_address (struct aarch64_address_info *info,
+@@ -11087,14 +11089,14 @@ aarch64_classify_address (struct aarch64_address_info *info,
  	  info->offset = XEXP (XEXP (x, 1), 1);
  	  info->const_offset = offset;
  
@@ -139,7 +139,7 @@ index 76bb0a5dd..ec97c58f6 100644
  	    return (aarch64_offset_7bit_signed_scaled_p (mode, offset)
  		    && aarch64_offset_9bit_signed_unscaled_p (mode, offset));
  
-@@ -11039,9 +11041,9 @@ aarch64_legitimize_address_displacement (rtx *offset1, rtx *offset2,
+@@ -11256,9 +11258,9 @@ aarch64_legitimize_address_displacement (rtx *offset1, rtx *offset2,
  	 offset.  Use 4KB range for 1- and 2-byte accesses and a 16KB
  	 range otherwise to increase opportunities for sharing the base
  	 address of different sizes.  Unaligned accesses use the signed
@@ -151,7 +151,7 @@ index 76bb0a5dd..ec97c58f6 100644
  	second_offset = ((const_offset + 0x100) & 0x1f8) - 0x100;
        else if ((const_offset & (size - 1)) != 0)
  	second_offset = ((const_offset + 0x100) & 0x1ff) - 0x100;
-@@ -11122,7 +11124,7 @@ aarch64_reinterpret_float_as_int (rtx value, unsigned HOST_WIDE_INT *intval)
+@@ -11339,7 +11341,7 @@ aarch64_reinterpret_float_as_int (rtx value, unsigned HOST_WIDE_INT *intval)
  		  CONST_DOUBLE_REAL_VALUE (value),
  		  REAL_MODE_FORMAT (mode));
  
@@ -160,7 +160,7 @@ index 76bb0a5dd..ec97c58f6 100644
      {
        int order = BYTES_BIG_ENDIAN ? 1 : 0;
        ival = zext_hwi (res[order], 32);
-@@ -11163,11 +11165,15 @@ aarch64_float_const_rtx_p (rtx x)
+@@ -11380,11 +11382,15 @@ aarch64_float_const_rtx_p (rtx x)
    return false;
  }
  
@@ -178,7 +178,7 @@ index 76bb0a5dd..ec97c58f6 100644
      return false;
  
    if (REAL_VALUE_MINUS_ZERO (*CONST_DOUBLE_REAL_VALUE (x)))
-@@ -11205,7 +11211,7 @@ aarch64_can_const_movi_rtx_p (rtx x, machine_mode mode)
+@@ -11422,7 +11428,7 @@ aarch64_can_const_movi_rtx_p (rtx x, machine_mode mode)
    else
      return false;
  
@@ -187,7 +187,7 @@ index 76bb0a5dd..ec97c58f6 100644
       a 128 bit vector mode.  */
    int width = GET_MODE_BITSIZE (imode) == 64 ? 128 : 64;
  
-@@ -12411,7 +12417,7 @@ aarch64_anchor_offset (HOST_WIDE_INT offset, HOST_WIDE_INT size,
+@@ -12628,7 +12634,7 @@ aarch64_anchor_offset (HOST_WIDE_INT offset, HOST_WIDE_INT size,
    if (IN_RANGE (offset, -256, 0))
      return 0;
  
@@ -196,7 +196,7 @@ index 76bb0a5dd..ec97c58f6 100644
      return (offset + 0x100) & ~0x1ff;
  
    /* Use 12-bit offset by access size.  */
-@@ -12520,7 +12526,9 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+@@ -12737,7 +12743,9 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
  
    /* Without the TARGET_SIMD instructions we cannot move a Q register
       to a Q register directly.  We need a scratch.  */
@@ -207,7 +207,7 @@ index 76bb0a5dd..ec97c58f6 100644
        && FP_REGNUM_P (REGNO (x)) && !TARGET_SIMD
        && reg_class_subset_p (rclass, FP_REGS))
      {
-@@ -12528,14 +12536,16 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+@@ -12745,14 +12753,16 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
        return NO_REGS;
      }
  
@@ -226,7 +226,7 @@ index 76bb0a5dd..ec97c58f6 100644
        return GENERAL_REGS;
  
    return NO_REGS;
-@@ -13666,9 +13676,9 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
+@@ -13883,9 +13893,9 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
  		*cost += extra_cost->ldst.storev;
  	      else if (GET_MODE_CLASS (mode) == MODE_INT)
  		*cost += extra_cost->ldst.store;
@@ -238,7 +238,7 @@ index 76bb0a5dd..ec97c58f6 100644
  		*cost += extra_cost->ldst.stored;
  
  	      *cost +=
-@@ -13792,11 +13802,11 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
+@@ -14009,11 +14019,11 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
  	  /* mov[df,sf]_aarch64.  */
  	  if (aarch64_float_const_representable_p (x))
  	    /* FMOV (scalar immediate).  */
@@ -252,7 +252,7 @@ index 76bb0a5dd..ec97c58f6 100644
  		*cost += extra_cost->ldst.loadd;
  	      else
  		*cost += extra_cost->ldst.loadf;
-@@ -13822,9 +13832,9 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
+@@ -14039,9 +14049,9 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
  	    *cost += extra_cost->ldst.loadv;
  	  else if (GET_MODE_CLASS (mode) == MODE_INT)
  	    *cost += extra_cost->ldst.load;
@@ -264,7 +264,7 @@ index 76bb0a5dd..ec97c58f6 100644
  	    *cost += extra_cost->ldst.loadd;
  
  	  *cost +=
-@@ -19391,7 +19401,7 @@ aarch64_legitimate_constant_p (machine_mode mode, rtx x)
+@@ -19623,7 +19633,7 @@ aarch64_legitimate_constant_p (machine_mode mode, rtx x)
  {
    /* Support CSE and rematerialization of common constants.  */
    if (CONST_INT_P (x)
@@ -273,7 +273,7 @@ index 76bb0a5dd..ec97c58f6 100644
      return true;
  
    /* Only accept variable-length vector constants if they can be
-@@ -19832,6 +19842,18 @@ aarch64_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
+@@ -20064,6 +20074,18 @@ aarch64_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
  	  field_t = long_double_type_node;
  	  field_ptr_t = long_double_ptr_type_node;
  	  break;
@@ -292,7 +292,7 @@ index 76bb0a5dd..ec97c58f6 100644
  	case E_HFmode:
  	  field_t = aarch64_fp16_type_node;
  	  field_ptr_t = aarch64_fp16_ptr_type_node;
-@@ -20083,7 +20105,8 @@ aapcs_vfp_sub_candidate (const_tree type, machine_mode *modep,
+@@ -20315,7 +20337,8 @@ aapcs_vfp_sub_candidate (const_tree type, machine_mode *modep,
      case REAL_TYPE:
        mode = TYPE_MODE (type);
        if (mode != DFmode && mode != SFmode
@@ -302,7 +302,7 @@ index 76bb0a5dd..ec97c58f6 100644
  	return -1;
  
        if (*modep == VOIDmode)
-@@ -20399,7 +20422,9 @@ aarch64_vfp_is_call_or_return_candidate (machine_mode mode,
+@@ -20631,7 +20654,9 @@ aarch64_vfp_is_call_or_return_candidate (machine_mode mode,
    machine_mode new_mode = VOIDmode;
    bool composite_p = aarch64_composite_type_p (type, mode);
  
@@ -313,7 +313,7 @@ index 76bb0a5dd..ec97c58f6 100644
        || aarch64_short_vector_p (type, mode))
      {
        *count = 1;
-@@ -23333,7 +23358,7 @@ aarch64_output_scalar_simd_mov_immediate (rtx immediate, scalar_int_mode mode)
+@@ -23565,7 +23590,7 @@ aarch64_output_scalar_simd_mov_immediate (rtx immediate, scalar_int_mode mode)
      }
  
    machine_mode vmode;
@@ -322,7 +322,7 @@ index 76bb0a5dd..ec97c58f6 100644
       a 128 bit vector mode.  */
    int width = GET_MODE_BITSIZE (mode) == 64 ? 128 : 64;
  
-@@ -26185,7 +26210,7 @@ aarch64_gen_adjusted_ldpstp (rtx *operands, bool load,
+@@ -26417,7 +26442,7 @@ aarch64_gen_adjusted_ldpstp (rtx *operands, bool load,
      base_off = (off_val_1 + off_val_3) / 2;
    else
      /* However, due to issues with negative LDP/STP offset generation for
@@ -331,7 +331,7 @@ index 76bb0a5dd..ec97c58f6 100644
         addresses smaller than 9 signed unadjusted bits can store.  This
         provides the most range in this case.  */
      base_off = off_val_1;
-@@ -26463,6 +26488,9 @@ aarch64_libgcc_floating_mode_supported_p (scalar_float_mode mode)
+@@ -26695,6 +26720,9 @@ aarch64_libgcc_floating_mode_supported_p (scalar_float_mode mode)
  static bool
  aarch64_scalar_mode_supported_p (scalar_mode mode)
  {
@@ -342,10 +342,10 @@ index 76bb0a5dd..ec97c58f6 100644
  	  ? true
  	  : default_scalar_mode_supported_p (mode));
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 22c22b1c6..c12d4d648 100644
+index a78476c8a..8757a962f 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -1474,11 +1474,11 @@
+@@ -1476,11 +1476,11 @@
     (set_attr "arch" "simd,fp16,simd,*,simd,*,simd,*,fp16,simd,*,*,*,*,*")]
  )
  
@@ -362,7 +362,7 @@ index 22c22b1c6..c12d4d648 100644
    "@
     movi\\t%0.2s, #0
     fmov\\t%s0, %w1
-@@ -1498,11 +1498,11 @@
+@@ -1500,11 +1500,11 @@
     (set_attr "arch" "simd,*,*,*,*,simd,*,*,*,*,*,*")]
  )
  
@@ -379,7 +379,7 @@ index 22c22b1c6..c12d4d648 100644
    "@
     movi\\t%d0, #0
     fmov\\t%d0, %x1
-@@ -1543,13 +1543,13 @@
+@@ -1545,13 +1545,13 @@
    }
  )
  
@@ -398,7 +398,7 @@ index 22c22b1c6..c12d4d648 100644
    "@
     mov\\t%0.16b, %1.16b
     #
-@@ -1569,8 +1569,8 @@
+@@ -1571,8 +1571,8 @@
  )
  
  (define_split
@@ -410,7 +410,7 @@ index 22c22b1c6..c12d4d648 100644
    [(const_int 0)]
    {
 diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
-index 26a840d7f..6fb1af5ca 100644
+index 967e6b0b1..d0cd1b788 100644
 --- a/gcc/config/aarch64/iterators.md
 +++ b/gcc/config/aarch64/iterators.md
 @@ -67,14 +67,24 @@
@@ -465,5 +465,5 @@ index 26a840d7f..6fb1af5ca 100644
  ;; Single scalar modes
  (define_mode_iterator SX [SI SF])
 -- 
-2.19.1
+2.33.0
 
diff --git a/0156-LoongArch-Add-descriptions-of-the-compilation-option.patch b/0156-LoongArch-Add-descriptions-of-the-compilation-option.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4e7ef0243882de7f88bbce2e298a258f1a12d98f
--- /dev/null
+++ b/0156-LoongArch-Add-descriptions-of-the-compilation-option.patch
@@ -0,0 +1,83 @@
+From 899f1f351ddc0d76bc9d432cfe63b30cfb294860 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 25 Oct 2024 06:22:11 +0000
+Subject: [PATCH 156/188] LoongArch: Add descriptions of the compilation
+ options.
+
+Add descriptions for the compilation options '-mfrecipe' '-mdiv32'
+'-mlam-bh' '-mlamcas' and '-mld-seq-sa'.
+
+gcc/ChangeLog:
+
+        * doc/invoke.texi: Add descriptions for the compilation
+        options.
+---
+ gcc/doc/invoke.texi | 45 +++++++++++++++++++++++++++++++++++++++++++--
+ 1 file changed, 43 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 5c6515cb1..7f24fe1e2 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -1008,8 +1008,9 @@ Objective-C and Objective-C++ Dialects}.
+ -mmax-inline-memcpy-size=@var{n} @gol
+ -mexplicit-relocs -mno-explicit-relocs @gol
+ -mdirect-extern-access -mno-direct-extern-access @gol
+--mcmodel=@var{code-model} -mrelax -mpass-mrelax-to-as} @gol
+--mrecip  -mrecip=@var{opt}
++-mcmodel=@var{code-model} -mrelax -mpass-mrelax-to-as @gol
++-mrecip  -mrecip=@var{opt} -mfrecipe -mno-frecipe -mdiv32 -mno-div32 @gol
++-mlam-bh -mno-lam-bh -mlamcas -mno-lamcas -mld-seq-sa -mno-ld-seq-sa}
+ 
+ @emph{M32R/D Options}
+ @gccoptlist{-m32r2  -m32rx  -m32r @gol
+@@ -24686,6 +24687,46 @@ Enable the approximation for vectorized reciprocal square root.
+ So, for example, @option{-mrecip=all,!sqrt} enables
+ all of the reciprocal approximations, except for scalar square root.
+ 
++@opindex mfrecipe
++@opindex mno-frecipe
++@item -mfrecipe
++@itemx -mno-frecipe
++Use (do not use) @code{frecipe.@{s/d@}} and @code{frsqrte.@{s/d@}}
++instructions.  When build with @option{-march=la664}, it is enabled by default.
++The default is @option{-mno-frecipe}.
++
++@opindex mdiv32
++@opindex mno-div32
++@item -mdiv32
++@itemx -mno-div32
++Use (do not use) @code{div.w[u]} and @code{mod.w[u]} instructions with input
++not sign-extended.  When build with @option{-march=la664}, it is enabled by
++default.  The default is @option{-mno-div32}.
++
++@opindex mlam-bh
++@opindex mno-lam-bh
++@item -mlam-bh
++@itemx -mno-lam-bh
++Use (do not use) @code{am@{swap/add@}[_db].@{b/h@}} instructions.  When build
++with @option{-march=la664}, it is enabled by default.  The default is
++@option{-mno-lam-bh}.
++
++@opindex mlamcas
++@opindex mno-lamcas
++@item -mlamcas
++@itemx -mno-lamcas
++Use (do not use) @code{amcas[_db].@{b/h/w/d@}} instructions.  When build with
++@option{-march=la664}, it is enabled by default.  The default is
++@option{-mno-lamcas}.
++
++@opindex mld-seq-sa
++@opindex mno-ld-seq-sa
++@item -mld-seq-sa
++@itemx -mno-ld-seq-sa
++Whether a load-load barrier (@code{dbar 0x700}) is needed.  When build with
++@option{-march=la664}, it is enabled by default.  The default is
++@option{-mno-ld-seq-sa}, the load-load barrier is needed.
++
+ @item loongarch-vect-unroll-limit
+ The vectorizer will use available tuning information to determine whether it
+ would be beneficial to unroll the main vectorized loop and by how much.  This
+-- 
+2.43.0
+
diff --git a/SME-0054-aarch64-Vector-move-fixes-for-nosimd.patch b/0157-Backport-SME-aarch64-Vector-move-fixes-for-nosimd.patch
similarity index 96%
rename from SME-0054-aarch64-Vector-move-fixes-for-nosimd.patch
rename to 0157-Backport-SME-aarch64-Vector-move-fixes-for-nosimd.patch
index e1ba2521214571092bfff9a0dc53b27bf2c2ee00..b2257eada67321f8c44efab369f4f45934871fda 100644
--- a/SME-0054-aarch64-Vector-move-fixes-for-nosimd.patch
+++ b/0157-Backport-SME-aarch64-Vector-move-fixes-for-nosimd.patch
@@ -1,7 +1,8 @@
-From 8bbb3c4a3343a9840a61f983a441c3ebfe98c36a Mon Sep 17 00:00:00 2001
+From 737d2a5f1c5e725b7e5a20075270016ebf56b44c Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 13 Sep 2022 09:28:49 +0100
-Subject: [PATCH 054/144] aarch64: Vector move fixes for +nosimd
+Subject: [PATCH 058/157] [Backport][SME] aarch64: Vector move fixes for
+ +nosimd
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=721c0fb3aca31d3bf8ad6e929eab32e29a427e60
 
@@ -251,7 +252,7 @@ index ba76a1b78..f8446e212 100644
    [(const_int 0)]
  {
 diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
-index 4d742a493..08f346e6a 100644
+index a47b39281..ef7fc4ecb 100644
 --- a/gcc/config/aarch64/aarch64-simd.md
 +++ b/gcc/config/aarch64/aarch64-simd.md
 @@ -21,7 +21,7 @@
@@ -477,7 +478,7 @@ index 4d742a493..08f346e6a 100644
     (set_attr "length" "4")]
  )
  
-@@ -4226,12 +4249,22 @@
+@@ -4322,12 +4345,22 @@
    [(set_attr "type" "neon_to_gp, neon_dup, neon_store1_one_lane")]
  )
  
@@ -501,7 +502,7 @@ index 4d742a493..08f346e6a 100644
     && aarch64_mergeable_load_pair_p (mode, operands[1], operands[2])"
    "ldr\\t%0, %1"
    [(set_attr "type" "neon_load1_1reg")]
-@@ -4261,7 +4294,7 @@
+@@ -4357,7 +4390,7 @@
  	(vec_concat:
  	   (match_operand:VDCSIF 1 "register_operand" "w, r")
  	   (match_operand:VDCSIF 2 "register_operand" "w, r")))]
@@ -510,7 +511,7 @@ index 4d742a493..08f346e6a 100644
    "@
     stp\t%1, %2, %y0
     stp\t%1, %2, %y0"
-@@ -4276,39 +4309,44 @@
+@@ -4372,39 +4405,44 @@
  ;; the register alternatives either don't accept or themselves disparage.
  
  (define_insn "*aarch64_combine_internal"
@@ -565,7 +566,7 @@ index 4d742a493..08f346e6a 100644
  )
  
  ;; In this insn, operand 1 should be low, and operand 2 the high part of the
-@@ -4319,13 +4357,12 @@
+@@ -4415,13 +4453,12 @@
  	(vec_concat:
  	  (match_operand:VDCSIF 1 "nonimmediate_operand" "w,?r,m")
  	  (match_operand:VDCSIF 2 "aarch64_simd_or_scalar_imm_zero")))]
@@ -581,7 +582,7 @@ index 4d742a493..08f346e6a 100644
  )
  
  (define_insn "*aarch64_combinez_be"
-@@ -4333,13 +4370,12 @@
+@@ -4429,13 +4466,12 @@
          (vec_concat:
  	  (match_operand:VDCSIF 2 "aarch64_simd_or_scalar_imm_zero")
  	  (match_operand:VDCSIF 1 "nonimmediate_operand" "w,?r,m")))]
@@ -597,7 +598,7 @@ index 4d742a493..08f346e6a 100644
  )
  
  ;; Form a vector whose first half (in array order) comes from operand 1
-@@ -4350,7 +4386,7 @@
+@@ -4446,7 +4482,7 @@
  	(vec_concat:
  	  (match_operand:VDCSIF 1 "general_operand")
  	  (match_operand:VDCSIF 2 "general_operand")))]
@@ -606,7 +607,7 @@ index 4d742a493..08f346e6a 100644
  {
    int lo = BYTES_BIG_ENDIAN ? 2 : 1;
    int hi = BYTES_BIG_ENDIAN ? 1 : 2;
-@@ -4368,7 +4404,7 @@
+@@ -4464,7 +4500,7 @@
      }
    else
      {
@@ -615,7 +616,7 @@ index 4d742a493..08f346e6a 100644
        operands[lo] = force_reg (mode, operands[lo]);
        if (!aarch64_simd_nonimmediate_operand (operands[hi], mode))
  	{
-@@ -4390,7 +4426,7 @@
+@@ -4486,7 +4522,7 @@
    [(match_operand: 0 "register_operand")
     (match_operand:VDC 1 "general_operand")
     (match_operand:VDC 2 "general_operand")]
@@ -624,7 +625,7 @@ index 4d742a493..08f346e6a 100644
  {
    if (BYTES_BIG_ENDIAN)
      std::swap (operands[1], operands[2]);
-@@ -7063,7 +7099,7 @@
+@@ -7367,7 +7403,7 @@
  (define_expand "mov"
    [(set (match_operand:VSTRUCT_QD 0 "nonimmediate_operand")
  	(match_operand:VSTRUCT_QD 1 "general_operand"))]
@@ -633,7 +634,7 @@ index 4d742a493..08f346e6a 100644
  {
    if (can_create_pseudo_p ())
      {
-@@ -7075,7 +7111,7 @@
+@@ -7379,7 +7415,7 @@
  (define_expand "mov"
    [(set (match_operand:VSTRUCT 0 "nonimmediate_operand")
  	(match_operand:VSTRUCT 1 "general_operand"))]
@@ -642,7 +643,7 @@ index 4d742a493..08f346e6a 100644
  {
    if (can_create_pseudo_p ())
      {
-@@ -7255,7 +7291,8 @@
+@@ -7559,7 +7595,8 @@
  (define_insn "*aarch64_be_mov"
    [(set (match_operand:VSTRUCT_2D 0 "nonimmediate_operand" "=w,m,w")
  	(match_operand:VSTRUCT_2D 1 "general_operand"      " w,w,m"))]
@@ -652,7 +653,7 @@ index 4d742a493..08f346e6a 100644
     && (register_operand (operands[0], mode)
         || register_operand (operands[1], mode))"
    "@
-@@ -7269,7 +7306,8 @@
+@@ -7573,7 +7610,8 @@
  (define_insn "*aarch64_be_mov"
    [(set (match_operand:VSTRUCT_2Q 0 "nonimmediate_operand" "=w,m,w")
  	(match_operand:VSTRUCT_2Q 1 "general_operand"      " w,w,m"))]
@@ -662,7 +663,7 @@ index 4d742a493..08f346e6a 100644
     && (register_operand (operands[0], mode)
         || register_operand (operands[1], mode))"
    "@
-@@ -7277,13 +7315,15 @@
+@@ -7581,13 +7619,15 @@
     stp\\t%q1, %R1, %0
     ldp\\t%q0, %R0, %1"
    [(set_attr "type" "multiple,neon_stp_q,neon_ldp_q")
@@ -679,7 +680,7 @@ index 4d742a493..08f346e6a 100644
     && (register_operand (operands[0], OImode)
         || register_operand (operands[1], OImode))"
    "@
-@@ -7291,57 +7331,66 @@
+@@ -7595,57 +7635,66 @@
     stp\\t%q1, %R1, %0
     ldp\\t%q0, %R0, %1"
    [(set_attr "type" "multiple,neon_stp_q,neon_ldp_q")
@@ -753,7 +754,7 @@ index 4d742a493..08f346e6a 100644
    [(const_int 0)]
  {
    aarch64_simd_emit_reg_reg_move (operands, mode, 2);
-@@ -7351,7 +7400,7 @@
+@@ -7655,7 +7704,7 @@
  (define_split
    [(set (match_operand:OI 0 "register_operand")
  	(match_operand:OI 1 "register_operand"))]
@@ -762,7 +763,7 @@ index 4d742a493..08f346e6a 100644
    [(const_int 0)]
  {
    aarch64_simd_emit_reg_reg_move (operands, TImode, 2);
-@@ -7361,7 +7410,7 @@
+@@ -7665,7 +7714,7 @@
  (define_split
    [(set (match_operand:VSTRUCT_3QD 0 "nonimmediate_operand")
  	(match_operand:VSTRUCT_3QD 1 "general_operand"))]
@@ -771,7 +772,7 @@ index 4d742a493..08f346e6a 100644
    [(const_int 0)]
  {
    if (register_operand (operands[0], mode)
-@@ -7370,7 +7419,7 @@
+@@ -7674,7 +7723,7 @@
        aarch64_simd_emit_reg_reg_move (operands, mode, 3);
        DONE;
      }
@@ -780,7 +781,7 @@ index 4d742a493..08f346e6a 100644
      {
        int elt_size = GET_MODE_SIZE (mode).to_constant () / ;
        machine_mode pair_mode = elt_size == 16 ? V2x16QImode : V2x8QImode;
-@@ -7397,7 +7446,7 @@
+@@ -7701,7 +7750,7 @@
  (define_split
    [(set (match_operand:CI 0 "nonimmediate_operand")
  	(match_operand:CI 1 "general_operand"))]
@@ -789,7 +790,7 @@ index 4d742a493..08f346e6a 100644
    [(const_int 0)]
  {
    if (register_operand (operands[0], CImode)
-@@ -7406,7 +7455,7 @@
+@@ -7710,7 +7759,7 @@
        aarch64_simd_emit_reg_reg_move (operands, TImode, 3);
        DONE;
      }
@@ -798,7 +799,7 @@ index 4d742a493..08f346e6a 100644
      {
        emit_move_insn (simplify_gen_subreg (OImode, operands[0], CImode, 0),
  		      simplify_gen_subreg (OImode, operands[1], CImode, 0));
-@@ -7425,7 +7474,7 @@
+@@ -7729,7 +7778,7 @@
  (define_split
    [(set (match_operand:VSTRUCT_4QD 0 "nonimmediate_operand")
  	(match_operand:VSTRUCT_4QD 1 "general_operand"))]
@@ -807,7 +808,7 @@ index 4d742a493..08f346e6a 100644
    [(const_int 0)]
  {
    if (register_operand (operands[0], mode)
-@@ -7434,7 +7483,7 @@
+@@ -7738,7 +7787,7 @@
        aarch64_simd_emit_reg_reg_move (operands, mode, 4);
        DONE;
      }
@@ -816,7 +817,7 @@ index 4d742a493..08f346e6a 100644
      {
        int elt_size = GET_MODE_SIZE (mode).to_constant () / ;
        machine_mode pair_mode = elt_size == 16 ? V2x16QImode : V2x8QImode;
-@@ -7455,7 +7504,7 @@
+@@ -7759,7 +7808,7 @@
  (define_split
    [(set (match_operand:XI 0 "nonimmediate_operand")
  	(match_operand:XI 1 "general_operand"))]
@@ -825,7 +826,7 @@ index 4d742a493..08f346e6a 100644
    [(const_int 0)]
  {
    if (register_operand (operands[0], XImode)
-@@ -7464,7 +7513,7 @@
+@@ -7768,7 +7817,7 @@
        aarch64_simd_emit_reg_reg_move (operands, TImode, 4);
        DONE;
      }
@@ -835,10 +836,10 @@ index 4d742a493..08f346e6a 100644
        emit_move_insn (simplify_gen_subreg (OImode, operands[0], XImode, 0),
  		      simplify_gen_subreg (OImode, operands[1], XImode, 0));
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index ec97c58f6..34d16959c 100644
+index 02210ed13..b4b646fa0 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -3531,7 +3531,7 @@ aarch64_classify_vector_mode (machine_mode mode)
+@@ -3748,7 +3748,7 @@ aarch64_classify_vector_mode (machine_mode mode)
      case E_OImode:
      case E_CImode:
      case E_XImode:
@@ -847,7 +848,7 @@ index ec97c58f6..34d16959c 100644
  
      /* Structures of 64-bit Advanced SIMD vectors.  */
      case E_V2x8QImode:
-@@ -3558,7 +3558,7 @@ aarch64_classify_vector_mode (machine_mode mode)
+@@ -3775,7 +3775,7 @@ aarch64_classify_vector_mode (machine_mode mode)
      case E_V4x4HFmode:
      case E_V4x2SFmode:
      case E_V4x1DFmode:
@@ -856,7 +857,7 @@ index ec97c58f6..34d16959c 100644
  
      /* Structures of 128-bit Advanced SIMD vectors.  */
      case E_V2x16QImode:
-@@ -3585,7 +3585,7 @@ aarch64_classify_vector_mode (machine_mode mode)
+@@ -3802,7 +3802,7 @@ aarch64_classify_vector_mode (machine_mode mode)
      case E_V4x8HFmode:
      case E_V4x4SFmode:
      case E_V4x2DFmode:
@@ -865,7 +866,7 @@ index ec97c58f6..34d16959c 100644
  
      /* 64-bit Advanced SIMD vectors.  */
      case E_V8QImode:
-@@ -3605,7 +3605,7 @@ aarch64_classify_vector_mode (machine_mode mode)
+@@ -3822,7 +3822,7 @@ aarch64_classify_vector_mode (machine_mode mode)
      case E_V8BFmode:
      case E_V4SFmode:
      case E_V2DFmode:
@@ -874,7 +875,7 @@ index ec97c58f6..34d16959c 100644
  
      default:
        return 0;
-@@ -3893,7 +3893,8 @@ aarch64_vectorize_related_mode (machine_mode vector_mode,
+@@ -4110,7 +4110,8 @@ aarch64_vectorize_related_mode (machine_mode vector_mode,
      }
  
    /* Prefer to use 1 128-bit vector instead of 2 64-bit vectors.  */
@@ -884,7 +885,7 @@ index ec97c58f6..34d16959c 100644
        && known_eq (nunits, 0U)
        && known_eq (GET_MODE_BITSIZE (vector_mode), 64U)
        && maybe_ge (GET_MODE_BITSIZE (element_mode)
-@@ -3991,7 +3992,7 @@ aarch64_hard_regno_mode_ok (unsigned regno, machine_mode mode)
+@@ -4208,7 +4209,7 @@ aarch64_hard_regno_mode_ok (unsigned regno, machine_mode mode)
  
    if (GP_REGNUM_P (regno))
      {
@@ -893,7 +894,7 @@ index ec97c58f6..34d16959c 100644
  	return false;
        if (known_le (GET_MODE_SIZE (mode), 8))
  	return true;
-@@ -10667,7 +10668,8 @@ aarch64_classify_address (struct aarch64_address_info *info,
+@@ -10884,7 +10885,8 @@ aarch64_classify_address (struct aarch64_address_info *info,
  			    || mode == TImode
  			    || mode == TFmode
  			    || mode == TDmode
@@ -903,7 +904,7 @@ index ec97c58f6..34d16959c 100644
    /* If we are dealing with ADDR_QUERY_LDP_STP_N that means the incoming mode
       corresponds to the actual size of the memory being loaded/stored and the
       mode of the corresponding addressing mode is half of that.  */
-@@ -10697,6 +10699,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
+@@ -10914,6 +10916,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
    /* On LE, for AdvSIMD, don't support anything other than POST_INC or
       REG addressing.  */
    if (advsimd_struct_p
@@ -911,7 +912,7 @@ index ec97c58f6..34d16959c 100644
        && !BYTES_BIG_ENDIAN
        && (code != POST_INC && code != REG))
      return false;
-@@ -10759,7 +10762,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
+@@ -10976,7 +10979,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
  	            && aarch64_offset_7bit_signed_scaled_p (DImode, offset + 48));
  
  	  /* A 7bit offset check because OImode will emit a ldp/stp
@@ -920,7 +921,7 @@ index ec97c58f6..34d16959c 100644
  	     For ldp/stp instructions, the offset is scaled for the size of a
  	     single element of the pair.  */
  	  if (aarch64_advsimd_partial_struct_mode_p (mode)
-@@ -10770,7 +10773,8 @@ aarch64_classify_address (struct aarch64_address_info *info,
+@@ -10987,7 +10990,8 @@ aarch64_classify_address (struct aarch64_address_info *info,
  	    return aarch64_offset_7bit_signed_scaled_p (TImode, offset);
  
  	  /* Three 9/12 bit offsets checks because CImode will emit three
@@ -930,7 +931,7 @@ index ec97c58f6..34d16959c 100644
  	  if (aarch64_advsimd_partial_struct_mode_p (mode)
  	      && known_eq (GET_MODE_SIZE (mode), 24))
  	    return (aarch64_offset_7bit_signed_scaled_p (DImode, offset)
-@@ -12499,18 +12503,16 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+@@ -12716,18 +12720,16 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
    /* Use aarch64_sve_reload_mem for SVE memory reloads that cannot use
       LDR and STR.  See the comment at the head of aarch64-sve.md for
       more details about the big-endian handling.  */
@@ -955,7 +956,7 @@ index ec97c58f6..34d16959c 100644
      }
  
    /* If we have to disable direct literal pool loads and stores because the
-@@ -12527,9 +12529,13 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+@@ -12744,9 +12746,13 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
    /* Without the TARGET_SIMD instructions we cannot move a Q register
       to a Q register directly.  We need a scratch.  */
    if (REG_P (x)
@@ -971,7 +972,7 @@ index ec97c58f6..34d16959c 100644
        && reg_class_subset_p (rclass, FP_REGS))
      {
        sri->icode = code_for_aarch64_reload_mov (mode);
-@@ -12551,6 +12557,28 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+@@ -12768,6 +12774,28 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
    return NO_REGS;
  }
  
@@ -1000,7 +1001,7 @@ index ec97c58f6..34d16959c 100644
  static bool
  aarch64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
  {
-@@ -13094,7 +13122,7 @@ aarch64_rtx_mult_cost (rtx x, enum rtx_code code, int outer, bool speed)
+@@ -13311,7 +13339,7 @@ aarch64_rtx_mult_cost (rtx x, enum rtx_code code, int outer, bool speed)
    if (VECTOR_MODE_P (mode))
      {
        unsigned int vec_flags = aarch64_classify_vector_mode (mode);
@@ -1009,7 +1010,7 @@ index ec97c58f6..34d16959c 100644
  	{
  	  /* The select-operand-high-half versions of the instruction have the
  	     same cost as the three vector version - don't add the costs of the
-@@ -14040,7 +14068,7 @@ cost_minus:
+@@ -14257,7 +14285,7 @@ cost_minus:
  	  {
  	    /* SUBL2 and SUBW2.  */
  	    unsigned int vec_flags = aarch64_classify_vector_mode (mode);
@@ -1018,7 +1019,7 @@ index ec97c58f6..34d16959c 100644
  	      {
  		/* The select-operand-high-half versions of the sub instruction
  		   have the same cost as the regular three vector version -
-@@ -14127,7 +14155,7 @@ cost_plus:
+@@ -14359,7 +14387,7 @@ cost_plus:
  	  {
  	    /* ADDL2 and ADDW2.  */
  	    unsigned int vec_flags = aarch64_classify_vector_mode (mode);
@@ -1027,7 +1028,7 @@ index ec97c58f6..34d16959c 100644
  	      {
  		/* The select-operand-high-half versions of the add instruction
  		   have the same cost as the regular three vector version -
-@@ -15052,7 +15080,9 @@ aarch64_register_move_cost (machine_mode mode,
+@@ -15284,7 +15312,9 @@ aarch64_register_move_cost (machine_mode mode,
      return aarch64_register_move_cost (mode, from, GENERAL_REGS)
              + aarch64_register_move_cost (mode, GENERAL_REGS, to);
  
@@ -1038,7 +1039,7 @@ index ec97c58f6..34d16959c 100644
      {
        /* 128-bit operations on general registers require 2 instructions.  */
        if (from == GENERAL_REGS && to == GENERAL_REGS)
-@@ -15080,6 +15110,16 @@ aarch64_register_move_cost (machine_mode mode,
+@@ -15312,6 +15342,16 @@ aarch64_register_move_cost (machine_mode mode,
    else if (to == GENERAL_REGS)
      return regmove_cost->FP2GP;
  
@@ -1055,7 +1056,7 @@ index ec97c58f6..34d16959c 100644
    return regmove_cost->FP2FP;
  }
  
-@@ -21272,6 +21312,9 @@ aarch64_simd_valid_immediate (rtx op, simd_immediate_info *info,
+@@ -21504,6 +21544,9 @@ aarch64_simd_valid_immediate (rtx op, simd_immediate_info *info,
    if (vec_flags == 0 || vec_flags == (VEC_ADVSIMD | VEC_STRUCT))
      return false;
  
@@ -1065,7 +1066,7 @@ index ec97c58f6..34d16959c 100644
    if (vec_flags & VEC_SVE_PRED)
      return aarch64_sve_pred_valid_immediate (op, info);
  
-@@ -24198,7 +24241,7 @@ aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
+@@ -24430,7 +24473,7 @@ aarch64_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
        std::swap (d->op0, d->op1);
      }
  
@@ -1074,7 +1075,7 @@ index ec97c58f6..34d16959c 100644
         || d->vec_flags == VEC_SVE_DATA
         || d->vec_flags == (VEC_SVE_DATA | VEC_PARTIAL)
         || d->vec_flags == VEC_SVE_PRED)
-@@ -27654,6 +27697,9 @@ aarch64_libgcc_floating_mode_supported_p
+@@ -27977,6 +28020,9 @@ aarch64_libgcc_floating_mode_supported_p
  #undef TARGET_SECONDARY_RELOAD
  #define TARGET_SECONDARY_RELOAD aarch64_secondary_reload
  
@@ -1085,7 +1086,7 @@ index ec97c58f6..34d16959c 100644
  #define TARGET_SHIFT_TRUNCATION_MASK aarch64_shift_truncation_mask
  
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index c12d4d648..bfdd5d5c0 100644
+index 8757a962f..c0cc91756 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
 @@ -374,8 +374,11 @@
@@ -1110,7 +1111,7 @@ index c12d4d648..bfdd5d5c0 100644
  	     (match_test "TARGET_SIMD"))
  
  	(and (eq_attr "arch" "fp16")
-@@ -6766,8 +6769,8 @@
+@@ -6768,8 +6771,8 @@
  )
  
  (define_expand "@aarch64_reload_mov"
@@ -1122,7 +1123,7 @@ index c12d4d648..bfdd5d5c0 100644
    ]
    "TARGET_FLOAT"
 diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
-index 6fb1af5ca..3ce654478 100644
+index d0cd1b788..a8a39b65a 100644
 --- a/gcc/config/aarch64/iterators.md
 +++ b/gcc/config/aarch64/iterators.md
 @@ -313,6 +313,8 @@
@@ -1819,5 +1820,5 @@ index 000000000..454ac2771
 +
 +#include "vect_unary_1.c"
 -- 
-2.19.1
+2.33.0
 
diff --git a/0157-LoongArch-Split-loongarch_option_override_internal-i.patch b/0157-LoongArch-Split-loongarch_option_override_internal-i.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2967cb09199069304cd838694f7c6f0f36d41018
--- /dev/null
+++ b/0157-LoongArch-Split-loongarch_option_override_internal-i.patch
@@ -0,0 +1,800 @@
+From 6dd3434f004dd1481a3d18fb416b3ddd4151b10f Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Sat, 30 Mar 2024 16:43:14 +0800
+Subject: [PATCH 157/188] LoongArch: Split loongarch_option_override_internal
+ into smaller procedures
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/loongarch.opt.in: Mark -m[no-]recip as
+	aliases to -mrecip={all,none}, respectively.
+	* config/loongarch/loongarch.opt: Regenerate.
+	* config/loongarch/loongarch-def.h (ABI_FPU_64): Rename to...
+	(ABI_FPU64_P): ...this.
+	(ABI_FPU_32): Rename to...
+	(ABI_FPU32_P): ...this.
+	(ABI_FPU_NONE): Rename to...
+	(ABI_NOFPU_P): ...this.
+	(ABI_LP64_P): Define.
+	* config/loongarch/loongarch.cc (loongarch_init_print_operand_punct):
+	Merged into loongarch_global_init.
+	(loongarch_cpu_option_override): Renamed to
+	loongarch_target_option_override.
+	(loongarch_option_override_internal): Move the work after
+	loongarch_config_target into loongarch_target_option_override.
+	(loongarch_global_init): Define.
+	(INIT_TARGET_FLAG): Move to loongarch-opts.cc.
+	(loongarch_option_override): Call loongarch_global_init
+	separately.
+	* config/loongarch/loongarch-opts.cc (loongarch_parse_mrecip_scheme):
+	Split the parsing of -mrecip= from
+	loongarch_option_override_internal.
+	(loongarch_generate_mrecip_scheme): Define. Split from
+	loongarch_option_override_internal.
+	(loongarch_target_option_override): Define. Renamed from
+	loongarch_cpu_option_override.
+	(loongarch_init_misc_options): Define. Split from
+	loongarch_option_override_internal.
+	(INIT_TARGET_FLAG): Move from loongarch.cc.
+	* config/loongarch/loongarch-opts.h (loongarch_target_option_override):
+	New prototype.
+	(loongarch_parse_mrecip_scheme): New prototype.
+	(loongarch_init_misc_options): New prototype.
+	(TARGET_ABI_LP64): Simplify with ABI_LP64_P.
+	* config/loongarch/loongarch.h (TARGET_RECIP_DIV): Simplify.
+	Do not reference specific CPU architecture (LA664).
+	(TARGET_RECIP_SQRT): Same.
+	(TARGET_RECIP_RSQRT): Same.
+	(TARGET_RECIP_VEC_DIV): Same.
+	(TARGET_RECIP_VEC_SQRT): Same.
+	(TARGET_RECIP_VEC_RSQRT): Same.
+---
+ gcc/config/loongarch/genopts/loongarch.opt.in |   8 +-
+ gcc/config/loongarch/loongarch-def.h          |  11 +-
+ gcc/config/loongarch/loongarch-opts.cc        | 253 ++++++++++++++++++
+ gcc/config/loongarch/loongarch-opts.h         |  27 +-
+ gcc/config/loongarch/loongarch.cc             | 253 +++---------------
+ gcc/config/loongarch/loongarch.h              |  18 +-
+ gcc/config/loongarch/loongarch.opt            |   8 +-
+ 7 files changed, 342 insertions(+), 236 deletions(-)
+
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index 4d6b1902d..9c6f59bb8 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -197,14 +197,14 @@ mexplicit-relocs
+ Target Alias(mexplicit-relocs=, always, none)
+ Use %reloc() assembly operators (for backward compatibility).
+ 
+-mrecip
+-Target RejectNegative Var(la_recip) Save
+-Generate approximate reciprocal divide and square root for better throughput.
+-
+ mrecip=
+ Target RejectNegative Joined Var(la_recip_name) Save
+ Control generation of reciprocal estimates.
+ 
++mrecip
++Target Alias(mrecip=, all, none)
++Generate approximate reciprocal divide and square root for better throughput.
++
+ ; The code model option names for -mcmodel.
+ Enum
+ Name(cmodel) Type(int)
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index fdcf43fc7..b1423bcfe 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -90,11 +90,16 @@ extern loongarch_def_array
+ 
+ #define TO_LP64_ABI_BASE(C) (C)
+ 
+-#define ABI_FPU_64(abi_base) \
++#define ABI_LP64_P(abi_base) \
++  (abi_base == ABI_BASE_LP64D \
++   || abi_base == ABI_BASE_LP64F \
++   || abi_base == ABI_BASE_LP64S)
++
++#define ABI_FPU64_P(abi_base) \
+   (abi_base == ABI_BASE_LP64D)
+-#define ABI_FPU_32(abi_base) \
++#define ABI_FPU32_P(abi_base) \
+   (abi_base == ABI_BASE_LP64F)
+-#define ABI_FPU_NONE(abi_base) \
++#define ABI_NOFPU_P(abi_base) \
+   (abi_base == ABI_BASE_LP64S)
+ 
+ 
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index bdecfaf49..404642a9e 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -25,6 +25,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "coretypes.h"
+ #include "tm.h"
+ #include "obstack.h"
++#include "opts.h"
+ #include "diagnostic-core.h"
+ 
+ #include "loongarch-cpu.h"
+@@ -32,8 +33,12 @@ along with GCC; see the file COPYING3.  If not see
+ #include "loongarch-str.h"
+ #include "loongarch-def.h"
+ 
++/* Target configuration */
+ struct loongarch_target la_target;
+ 
++/* RTL cost information */
++const struct loongarch_rtx_cost_data *loongarch_cost;
++
+ /* ABI-related configuration.  */
+ #define ABI_COUNT (sizeof(abi_priority_list)/sizeof(struct loongarch_abi))
+ static const struct loongarch_abi
+@@ -795,3 +800,251 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+   /* ISA evolution features */
+   opts->x_la_isa_evolution = target->isa.evolution;
+ }
++
++/* -mrecip= handling */
++static struct
++  {
++    const char *string;	    /* option name.  */
++    unsigned int mask;	    /* mask bits to set.  */
++  }
++const recip_options[] = {
++      { "all",       RECIP_MASK_ALL },
++      { "none",      RECIP_MASK_NONE },
++      { "div",       RECIP_MASK_DIV },
++      { "sqrt",      RECIP_MASK_SQRT },
++      { "rsqrt",     RECIP_MASK_RSQRT },
++      { "vec-div",   RECIP_MASK_VEC_DIV },
++      { "vec-sqrt",  RECIP_MASK_VEC_SQRT },
++      { "vec-rsqrt", RECIP_MASK_VEC_RSQRT },
++};
++
++/* Parser for -mrecip=.  */
++unsigned int
++loongarch_parse_mrecip_scheme (const char *recip_string)
++{
++  unsigned int result_mask = RECIP_MASK_NONE;
++
++  if (recip_string)
++    {
++      char *p = ASTRDUP (recip_string);
++      char *q;
++      unsigned int mask, i;
++      bool invert;
++
++      while ((q = strtok (p, ",")) != NULL)
++	{
++	  p = NULL;
++	  if (*q == '!')
++	    {
++	      invert = true;
++	      q++;
++	    }
++	  else
++	    invert = false;
++
++	  if (!strcmp (q, "default"))
++	    mask = RECIP_MASK_ALL;
++	  else
++	    {
++	      for (i = 0; i < ARRAY_SIZE (recip_options); i++)
++		if (!strcmp (q, recip_options[i].string))
++		  {
++		    mask = recip_options[i].mask;
++		    break;
++		  }
++
++	      if (i == ARRAY_SIZE (recip_options))
++		{
++		  error ("unknown option for %<-mrecip=%s%>", q);
++		  invert = false;
++		  mask = RECIP_MASK_NONE;
++		}
++	    }
++
++	  if (invert)
++	    result_mask &= ~mask;
++	  else
++	    result_mask |= mask;
++	}
++    }
++  return result_mask;
++}
++
++/* Generate -mrecip= argument based on the mask.  */
++const char*
++loongarch_generate_mrecip_scheme (unsigned int mask)
++{
++  static char recip_scheme_str[128];
++  int p = 0, tmp;
++
++  switch (mask)
++    {
++      case RECIP_MASK_ALL:
++	return "all";
++
++      case RECIP_MASK_NONE:
++	return "none";
++    }
++
++  for (unsigned long i = 2; i < ARRAY_SIZE (recip_options); i++)
++    {
++      if (mask & recip_options[i].mask)
++	{
++	  if ((tmp = strlen (recip_options[i].string) + 1) >= 127 - p)
++	    gcc_unreachable ();
++
++	  recip_scheme_str[p] = ',';
++	  strcpy (recip_scheme_str + p + 1, recip_options[i].string);
++	  p += tmp;
++	}
++    }
++  recip_scheme_str[p] = '\0';
++  return recip_scheme_str + 1;
++}
++
++
++
++/* Refresh the switches acccording to the resolved loongarch_target struct.  */
++void
++loongarch_target_option_override (struct loongarch_target *target,
++				  struct gcc_options *opts,
++				  struct gcc_options *opts_set)
++{
++  loongarch_update_gcc_opt_status (target, opts, opts_set);
++
++  /* alignments */
++  if (opts->x_flag_align_functions && !opts->x_str_align_functions)
++    opts->x_str_align_functions
++      = loongarch_cpu_align[target->cpu_tune].function;
++
++  if (opts->x_flag_align_labels && !opts->x_str_align_labels)
++    opts->x_str_align_labels = loongarch_cpu_align[target->cpu_tune].label;
++
++  /* Set up parameters to be used in prefetching algorithm.  */
++  int simultaneous_prefetches
++    = loongarch_cpu_cache[target->cpu_tune].simultaneous_prefetches;
++
++  SET_OPTION_IF_UNSET (opts, opts_set, param_simultaneous_prefetches,
++		       simultaneous_prefetches);
++
++  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_line_size,
++		       loongarch_cpu_cache[target->cpu_tune].l1d_line_size);
++
++  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_size,
++		       loongarch_cpu_cache[target->cpu_tune].l1d_size);
++
++  SET_OPTION_IF_UNSET (opts, opts_set, param_l2_cache_size,
++		       loongarch_cpu_cache[target->cpu_tune].l2d_size);
++
++  /* Other arch-specific overrides.  */
++  switch (target->cpu_arch)
++    {
++      case CPU_LA664:
++	/* Enable -mrecipe=all for LA664 by default.  */
++	if (!opts_set->x_recip_mask)
++	  {
++	    opts->x_recip_mask = RECIP_MASK_ALL;
++	    opts_set->x_recip_mask = 1;
++	  }
++    }
++
++  /* -mrecip= */
++  opts->x_la_recip_name
++    = loongarch_generate_mrecip_scheme (opts->x_recip_mask);
++
++  /* Decide which rtx_costs structure to use.  */
++  if (opts->x_optimize_size)
++    loongarch_cost = &loongarch_rtx_cost_optimize_size;
++  else
++    loongarch_cost = &loongarch_cpu_rtx_cost_data[target->cpu_tune];
++
++  /* If the user hasn't specified a branch cost, use the processor's
++     default.  */
++  if (!opts_set->x_la_branch_cost)
++    opts->x_la_branch_cost = loongarch_cost->branch_cost;
++
++  /* other stuff */
++  if (ABI_LP64_P (target->abi.base))
++    opts->x_flag_pcc_struct_return = 0;
++
++  switch (target->cmodel)
++    {
++      case CMODEL_EXTREME:
++	if (opts->x_flag_plt)
++	  {
++	    if (opts_set->x_flag_plt)
++	      error ("code model %qs is not compatible with %s",
++		     "extreme", "-fplt");
++	    opts->x_flag_plt = 0;
++	  }
++	break;
++
++      case CMODEL_TINY_STATIC:
++      case CMODEL_MEDIUM:
++      case CMODEL_NORMAL:
++      case CMODEL_TINY:
++      case CMODEL_LARGE:
++	break;
++
++      default:
++	gcc_unreachable ();
++    }
++}
++
++
++/* Resolve options that's not covered by la_target.  */
++void
++loongarch_init_misc_options (struct gcc_options *opts,
++			     struct gcc_options *opts_set)
++{
++  if (opts->x_flag_pic)
++    opts->x_g_switch_value = 0;
++
++  /* -mrecip options.  */
++  opts->x_recip_mask = loongarch_parse_mrecip_scheme (opts->x_la_recip_name);
++
++#define INIT_TARGET_FLAG(NAME, INIT) \
++  { \
++    if (!(opts_set->x_target_flags & MASK_##NAME)) \
++      { \
++	if (INIT) \
++	  opts->x_target_flags |= MASK_##NAME; \
++	else \
++	  opts->x_target_flags &= ~MASK_##NAME; \
++      } \
++  }
++
++  /* Enable conditional moves for int and float by default.  */
++  INIT_TARGET_FLAG (COND_MOVE_INT, 1)
++  INIT_TARGET_FLAG (COND_MOVE_FLOAT, 1)
++
++  /* Set mrelax default.  */
++  INIT_TARGET_FLAG (LINKER_RELAXATION,
++		    HAVE_AS_MRELAX_OPTION && HAVE_AS_COND_BRANCH_RELAXATION)
++
++#undef INIT_TARGET_FLAG
++
++  /* Set mexplicit-relocs default.  */
++  if (opts->x_la_opt_explicit_relocs == M_OPT_UNSET)
++    opts->x_la_opt_explicit_relocs = (HAVE_AS_EXPLICIT_RELOCS
++				      ? (TARGET_LINKER_RELAXATION
++					 ? EXPLICIT_RELOCS_AUTO
++					 : EXPLICIT_RELOCS_ALWAYS)
++				      : EXPLICIT_RELOCS_NONE);
++
++  /* Enable sw prefetching at -O3 and higher.  */
++  if (opts->x_flag_prefetch_loop_arrays < 0
++      && (opts->x_optimize >= 3 || opts->x_flag_profile_use)
++      && !opts->x_optimize_size)
++    opts->x_flag_prefetch_loop_arrays = 1;
++
++  if (TARGET_DIRECT_EXTERN_ACCESS_OPTS_P (opts) && opts->x_flag_shlib)
++    error ("%qs cannot be used for compiling a shared library",
++	   "-mdirect-extern-access");
++
++  /* Enforce that interval is the same size as size so the mid-end does the
++     right thing.  */
++  SET_OPTION_IF_UNSET (opts, opts_set,
++		       param_stack_clash_protection_probe_interval,
++		       param_stack_clash_protection_guard_size);
++}
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index 463812136..177d587da 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -30,6 +30,10 @@ along with GCC; see the file COPYING3.  If not see
+ /* Target configuration */
+ extern struct loongarch_target la_target;
+ 
++/* RTL cost information */
++extern const struct loongarch_rtx_cost_data *loongarch_cost;
++
++
+ /* Initialize loongarch_target from separate option variables.  */
+ void
+ loongarch_init_target (struct loongarch_target *target,
+@@ -46,11 +50,30 @@ loongarch_config_target (struct loongarch_target *target,
+ 			 struct loongarch_flags *flags,
+ 			 int follow_multilib_list_p);
+ 
++
++/* Refresh the switches acccording to the resolved loongarch_target struct.  */
++void
++loongarch_target_option_override (struct loongarch_target *target,
++				  struct gcc_options *opts,
++				  struct gcc_options *opts_set);
++
++
+ /* option status feedback for "gcc --help=target -Q" */
+ void
+ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+ 				 struct gcc_options *opts,
+ 				 struct gcc_options *opts_set);
++
++
++/* Parser for -mrecip=.  */
++unsigned int
++loongarch_parse_mrecip_scheme (const char *recip_string);
++
++
++/* Resolve options that's not covered by la_target.  */
++void
++loongarch_init_misc_options (struct gcc_options *opts,
++			     struct gcc_options *opts_set);
+ #endif
+ 
+ /* Flag status */
+@@ -80,9 +103,7 @@ struct loongarch_flags {
+ #define TARGET_DOUBLE_FLOAT_ABI	  (la_target.abi.base == ABI_BASE_LP64D)
+ 
+ #define TARGET_64BIT		  (la_target.isa.base == ISA_BASE_LA64)
+-#define TARGET_ABI_LP64		  (la_target.abi.base == ABI_BASE_LP64D	\
+-				   || la_target.abi.base == ABI_BASE_LP64F \
+-				   || la_target.abi.base == ABI_BASE_LP64S)
++#define TARGET_ABI_LP64		  ABI_LP64_P(la_target.abi.base)
+ 
+ #define ISA_HAS_LSX \
+   (la_target.isa.simd == ISA_EXT_SIMD_LSX \
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 8d9cda165..c2f3739d0 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -208,9 +208,6 @@ const enum reg_class loongarch_regno_to_class[FIRST_PSEUDO_REGISTER] = {
+     FRAME_REGS,	FRAME_REGS
+ };
+ 
+-/* Which cost information to use.  */
+-static const struct loongarch_rtx_cost_data *loongarch_cost;
+-
+ /* Information about a single argument.  */
+ struct loongarch_arg_info
+ {
+@@ -5908,17 +5905,6 @@ loongarch_print_operand_punctuation (FILE *file, int ch)
+     }
+ }
+ 
+-/* Initialize loongarch_print_operand_punct.  */
+-
+-static void
+-loongarch_init_print_operand_punct (void)
+-{
+-  const char *p;
+-
+-  for (p = ".$"; *p; p++)
+-    loongarch_print_operand_punct[(unsigned char) *p] = true;
+-}
+-
+ /* PRINT_OPERAND prefix LETTER refers to the integer branch instruction
+    associated with condition CODE.  Print the condition part of the
+    opcode to FILE.  */
+@@ -7622,118 +7608,15 @@ loongarch_init_machine_status (void)
+ }
+ 
+ static void
+-loongarch_cpu_option_override (struct loongarch_target *target,
+-			       struct gcc_options *opts,
+-			       struct gcc_options *opts_set)
+-{
+-  /* alignments */
+-  if (opts->x_flag_align_functions && !opts->x_str_align_functions)
+-    opts->x_str_align_functions
+-      = loongarch_cpu_align[target->cpu_tune].function;
+-
+-  if (opts->x_flag_align_labels && !opts->x_str_align_labels)
+-    opts->x_str_align_labels = loongarch_cpu_align[target->cpu_tune].label;
+-
+-  /* Set up parameters to be used in prefetching algorithm.  */
+-  int simultaneous_prefetches
+-    = loongarch_cpu_cache[target->cpu_tune].simultaneous_prefetches;
+-
+-  SET_OPTION_IF_UNSET (opts, opts_set, param_simultaneous_prefetches,
+-		       simultaneous_prefetches);
+-
+-  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_line_size,
+-		       loongarch_cpu_cache[target->cpu_tune].l1d_line_size);
+-
+-  SET_OPTION_IF_UNSET (opts, opts_set, param_l1_cache_size,
+-		       loongarch_cpu_cache[target->cpu_tune].l1d_size);
+-
+-  SET_OPTION_IF_UNSET (opts, opts_set, param_l2_cache_size,
+-		       loongarch_cpu_cache[target->cpu_tune].l2d_size);
+-}
+-
+-static void
+-loongarch_option_override_internal (struct gcc_options *opts,
+-				    struct gcc_options *opts_set)
++loongarch_global_init (void)
+ {
+-  int i, regno, mode;
+-
+-  if (flag_pic)
+-    g_switch_value = 0;
+-
+-  loongarch_init_target (&la_target,
+-			 la_opt_cpu_arch, la_opt_cpu_tune, la_opt_fpu,
+-			 la_opt_simd, la_opt_abi_base, la_opt_abi_ext,
+-			 la_opt_cmodel, opts->x_la_isa_evolution,
+-			 opts_set->x_la_isa_evolution);
+-
+-  /* Handle target-specific options: compute defaults/conflicts etc.  */
+-  loongarch_config_target (&la_target, NULL, 0);
+-
+-  loongarch_update_gcc_opt_status (&la_target, opts, opts_set);
+-  loongarch_cpu_option_override (&la_target, opts, opts_set);
+-
+-  if (TARGET_ABI_LP64)
+-    flag_pcc_struct_return = 0;
+-
+-  /* Decide which rtx_costs structure to use.  */
+-  if (optimize_size)
+-    loongarch_cost = &loongarch_rtx_cost_optimize_size;
+-  else
+-    loongarch_cost = &loongarch_cpu_rtx_cost_data[la_target.cpu_tune];
+-
+-  /* If the user hasn't specified a branch cost, use the processor's
+-     default.  */
+-  if (la_branch_cost == 0)
+-    la_branch_cost = loongarch_cost->branch_cost;
+-
+-  /* Enable sw prefetching at -O3 and higher.  */
+-  if (opts->x_flag_prefetch_loop_arrays < 0
+-      && (opts->x_optimize >= 3 || opts->x_flag_profile_use)
+-      && !opts->x_optimize_size)
+-    opts->x_flag_prefetch_loop_arrays = 1;
+-
+-  if (TARGET_DIRECT_EXTERN_ACCESS && flag_shlib)
+-    error ("%qs cannot be used for compiling a shared library",
+-	   "-mdirect-extern-access");
+-
+-  switch (la_target.cmodel)
+-    {
+-      case CMODEL_EXTREME:
+-	if (opts->x_flag_plt)
+-	  {
+-	    if (global_options_set.x_flag_plt)
+-	      error ("code model %qs is not compatible with %s",
+-		     "extreme", "-fplt");
+-	    opts->x_flag_plt = 0;
+-	  }
+-	break;
+-
+-      case CMODEL_TINY_STATIC:
+-      case CMODEL_MEDIUM:
+-      case CMODEL_NORMAL:
+-      case CMODEL_TINY:
+-      case CMODEL_LARGE:
+-	break;
+-
+-      default:
+-	gcc_unreachable ();
+-    }
+-
+-  /* Validate the guard size.  */
+-  int guard_size = param_stack_clash_protection_guard_size;
+-
+-  /* Enforce that interval is the same size as size so the mid-end does the
+-     right thing.  */
+-  SET_OPTION_IF_UNSET (opts, &global_options_set,
+-		       param_stack_clash_protection_probe_interval,
+-		       guard_size);
+-
+-  loongarch_init_print_operand_punct ();
++  /* Initialize loongarch_print_operand_punct.  */
++  for (const char *p = ".$"; *p; p++)
++    loongarch_print_operand_punct[(unsigned char) *p] = true;
+ 
+   /* Set up array to map GCC register number to debug register number.
+      Ignore the special purpose register numbers.  */
+-
+-  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
++  for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++)
+     {
+       if (GP_REG_P (i) || FP_REG_P (i))
+ 	loongarch_dwarf_regno[i] = i;
+@@ -7742,115 +7625,53 @@ loongarch_option_override_internal (struct gcc_options *opts,
+     }
+ 
+   /* Set up loongarch_hard_regno_mode_ok.  */
+-  for (mode = 0; mode < MAX_MACHINE_MODE; mode++)
+-    for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
++  for (int mode = 0; mode < MAX_MACHINE_MODE; mode++)
++    for (int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+       loongarch_hard_regno_mode_ok_p[mode][regno]
+ 	= loongarch_hard_regno_mode_ok_uncached (regno, (machine_mode) mode);
+ 
+   /* Function to allocate machine-dependent function status.  */
+   init_machine_status = &loongarch_init_machine_status;
++};
+ 
+-  /* -mrecip options.  */
+-  static struct
+-    {
+-      const char *string;	    /* option name.  */
+-      unsigned int mask;	    /* mask bits to set.  */
+-    }
+-  const recip_options[] = {
+-	{ "all",       RECIP_MASK_ALL },
+-	{ "none",      RECIP_MASK_NONE },
+-	{ "div",       RECIP_MASK_DIV },
+-	{ "sqrt",      RECIP_MASK_SQRT },
+-	{ "rsqrt",     RECIP_MASK_RSQRT },
+-	{ "vec-div",   RECIP_MASK_VEC_DIV },
+-	{ "vec-sqrt",  RECIP_MASK_VEC_SQRT },
+-	{ "vec-rsqrt", RECIP_MASK_VEC_RSQRT },
+-  };
+-
+-  if (la_recip_name)
+-    {
+-      char *p = ASTRDUP (la_recip_name);
+-      char *q;
+-      unsigned int mask, i;
+-      bool invert;
+-
+-      while ((q = strtok (p, ",")) != NULL)
+-	{
+-	  p = NULL;
+-	  if (*q == '!')
+-	    {
+-	      invert = true;
+-	      q++;
+-	    }
+-	  else
+-	    invert = false;
+-
+-	  if (!strcmp (q, "default"))
+-	    mask = RECIP_MASK_ALL;
+-	  else
+-	    {
+-	      for (i = 0; i < ARRAY_SIZE (recip_options); i++)
+-		if (!strcmp (q, recip_options[i].string))
+-		  {
+-		    mask = recip_options[i].mask;
+-		    break;
+-		  }
+-
+-	      if (i == ARRAY_SIZE (recip_options))
+-		{
+-		  error ("unknown option for %<-mrecip=%s%>", q);
+-		  invert = false;
+-		  mask = RECIP_MASK_NONE;
+-		}
+-	    }
+-
+-	  if (invert)
+-	    recip_mask &= ~mask;
+-	  else
+-	    recip_mask |= mask;
+-	}
+-    }
+-  if (la_recip)
+-    recip_mask |= RECIP_MASK_ALL;
+-  if (!ISA_HAS_FRECIPE)
+-    recip_mask = RECIP_MASK_NONE;
+-
+-#define INIT_TARGET_FLAG(NAME, INIT) \
+-  { \
+-    if (!(target_flags_explicit & MASK_##NAME)) \
+-      { \
+-	if (INIT) \
+-	  target_flags |= MASK_##NAME; \
+-	else \
+-	  target_flags &= ~MASK_##NAME; \
+-      } \
+-  }
+-
+-  /* Enable conditional moves for int and float by default.  */
+-  INIT_TARGET_FLAG (COND_MOVE_INT, 1)
+-  INIT_TARGET_FLAG (COND_MOVE_FLOAT, 1)
+-
+-  /* Set mrelax default.  */
+-  INIT_TARGET_FLAG (LINKER_RELAXATION,
+-		    HAVE_AS_MRELAX_OPTION && HAVE_AS_COND_BRANCH_RELAXATION)
++static void
++loongarch_option_override_internal (struct loongarch_target *target,
++				    struct gcc_options *opts,
++				    struct gcc_options *opts_set)
++{
++  /* Handle options not covered by struct loongarch_target.  */
++  loongarch_init_misc_options (opts, opts_set);
++
++  /* Resolve the target struct.  */
++  loongarch_init_target (target,
++			 opts->x_la_opt_cpu_arch,
++			 opts->x_la_opt_cpu_tune,
++			 opts->x_la_opt_fpu,
++			 opts->x_la_opt_simd,
++			 opts->x_la_opt_abi_base,
++			 opts->x_la_opt_abi_ext,
++			 opts->x_la_opt_cmodel,
++			 opts->x_la_isa_evolution,
++			 opts_set->x_la_isa_evolution);
+ 
+-#undef INIT_TARGET_FLAG
++  loongarch_config_target (target, NULL, 0);
+ 
+-  if (la_opt_explicit_relocs == M_OPT_UNSET)
+-    la_opt_explicit_relocs = (HAVE_AS_EXPLICIT_RELOCS
+-			      ? (TARGET_LINKER_RELAXATION
+-				 ? EXPLICIT_RELOCS_AUTO
+-				 : EXPLICIT_RELOCS_ALWAYS)
+-			      : EXPLICIT_RELOCS_NONE);
++  /* Override some options according to the resolved target.  */
++  loongarch_target_option_override (target, opts, opts_set);
+ }
+ 
+-
+ /* Implement TARGET_OPTION_OVERRIDE.  */
+ 
+ static void
+ loongarch_option_override (void)
+ {
+-  loongarch_option_override_internal (&global_options, &global_options_set);
++  /* Setting up the target configuration.  */
++  loongarch_option_override_internal (&la_target,
++				      &global_options,
++				      &global_options_set);
++
++  /* Global initializations.  */
++  loongarch_global_init ();
+ }
+ 
+ /* Implement TARGET_OPTION_SAVE.  */
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 698e42aec..221e8b286 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -710,12 +710,18 @@ enum reg_class
+ 			| RECIP_MASK_RSQRT | RECIP_MASK_VEC_SQRT \
+ 			| RECIP_MASK_VEC_DIV | RECIP_MASK_VEC_RSQRT)
+ 
+-#define TARGET_RECIP_DIV        ((recip_mask & RECIP_MASK_DIV) != 0 || TARGET_uARCH_LA664)
+-#define TARGET_RECIP_SQRT       ((recip_mask & RECIP_MASK_SQRT) != 0 || TARGET_uARCH_LA664)
+-#define TARGET_RECIP_RSQRT      ((recip_mask & RECIP_MASK_RSQRT) != 0 || TARGET_uARCH_LA664)
+-#define TARGET_RECIP_VEC_DIV    ((recip_mask & RECIP_MASK_VEC_DIV) != 0 || TARGET_uARCH_LA664)
+-#define TARGET_RECIP_VEC_SQRT   ((recip_mask & RECIP_MASK_VEC_SQRT) != 0 || TARGET_uARCH_LA664)
+-#define TARGET_RECIP_VEC_RSQRT  ((recip_mask & RECIP_MASK_VEC_RSQRT) != 0 || TARGET_uARCH_LA664)
++#define TARGET_RECIP_DIV \
++  ((recip_mask & RECIP_MASK_DIV) != 0 && ISA_HAS_FRECIPE)
++#define TARGET_RECIP_SQRT \
++  ((recip_mask & RECIP_MASK_SQRT) != 0 && ISA_HAS_FRECIPE)
++#define TARGET_RECIP_RSQRT \
++  ((recip_mask & RECIP_MASK_RSQRT) != 0 && ISA_HAS_FRECIPE)
++#define TARGET_RECIP_VEC_DIV \
++  ((recip_mask & RECIP_MASK_VEC_DIV) != 0 && ISA_HAS_FRECIPE)
++#define TARGET_RECIP_VEC_SQRT \
++  ((recip_mask & RECIP_MASK_VEC_SQRT) != 0 && ISA_HAS_FRECIPE)
++#define TARGET_RECIP_VEC_RSQRT \
++  ((recip_mask & RECIP_MASK_VEC_RSQRT) != 0 && ISA_HAS_FRECIPE)
+ 
+ /* 1 if N is a possible register number for function argument passing.
+    We have no FP argument registers when soft-float.  */
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 75d230067..ea848cd76 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -205,14 +205,14 @@ mexplicit-relocs
+ Target Alias(mexplicit-relocs=, always, none)
+ Use %reloc() assembly operators (for backward compatibility).
+ 
+-mrecip
+-Target RejectNegative Var(la_recip) Save
+-Generate approximate reciprocal divide and square root for better throughput.
+-
+ mrecip=
+ Target RejectNegative Joined Var(la_recip_name) Save
+ Control generation of reciprocal estimates.
+ 
++mrecip
++Target Alias(mrecip=, all, none)
++Generate approximate reciprocal divide and square root for better throughput.
++
+ ; The code model option names for -mcmodel.
+ Enum
+ Name(cmodel) Type(int)
+-- 
+2.43.0
+
diff --git a/SME-0055-aarch64-Simplify-output-template-emission-code-for-a.patch b/0158-Backport-SME-aarch64-Simplify-output-template-emissi.patch
similarity index 96%
rename from SME-0055-aarch64-Simplify-output-template-emission-code-for-a.patch
rename to 0158-Backport-SME-aarch64-Simplify-output-template-emissi.patch
index c244021e239f7a70148793148feed534ef8d0960..6d50608113df2fbb7ddacee443a8669d9de279fe 100644
--- a/SME-0055-aarch64-Simplify-output-template-emission-code-for-a.patch
+++ b/0158-Backport-SME-aarch64-Simplify-output-template-emissi.patch
@@ -1,8 +1,8 @@
-From c55dc408d65eb70301abea165f676a666d5cdfd6 Mon Sep 17 00:00:00 2001
+From b51d3b1af24758534e5a8f3a52a56106b935c485 Mon Sep 17 00:00:00 2001
 From: Kyrylo Tkachov 
 Date: Wed, 31 May 2023 11:23:23 +0100
-Subject: [PATCH 055/144] aarch64: Simplify output template emission code for a
- few patterns
+Subject: [PATCH 059/157] [Backport][SME] aarch64: Simplify output template
+ emission code for a few patterns
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=11bd9b1f8133fc07c267e6d1aee8b01e06c7a750
 
@@ -27,7 +27,7 @@ gcc/ChangeLog:
  2 files changed, 40 insertions(+), 99 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
-index 08f346e6a..845f0298e 100644
+index ef7fc4ecb..2d688edf5 100644
 --- a/gcc/config/aarch64/aarch64-simd.md
 +++ b/gcc/config/aarch64/aarch64-simd.md
 @@ -122,28 +122,16 @@
@@ -156,10 +156,10 @@ index 08f346e6a..845f0298e 100644
  )
  
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index bfdd5d5c0..d849442cc 100644
+index c0cc91756..7454a5c77 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -1196,36 +1196,18 @@
+@@ -1198,36 +1198,18 @@
  	(match_operand:SHORT 1 "aarch64_mov_operand"  " r,M,D,Usv,m,m,rZ,w,w,rZ,w"))]
    "(register_operand (operands[0], mode)
      || aarch64_reg_or_zero (operands[1], mode))"
@@ -209,5 +209,5 @@ index bfdd5d5c0..d849442cc 100644
    [(set_attr "type" "mov_reg,mov_imm,neon_move,mov_imm,load_4,load_4,store_4,
  		     store_4,neon_to_gp,neon_from_gp,neon_dup")
 -- 
-2.19.1
+2.33.0
 
diff --git a/0158-LoongArch-Regenerate-loongarch.opt.urls.patch b/0158-LoongArch-Regenerate-loongarch.opt.urls.patch
new file mode 100644
index 0000000000000000000000000000000000000000..58990adec0bd24cad7289b18f5ad5a7a96517cd0
--- /dev/null
+++ b/0158-LoongArch-Regenerate-loongarch.opt.urls.patch
@@ -0,0 +1,117 @@
+From 90a0f195830a25e4179127c67e873c80f758f29d Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 25 Oct 2024 06:25:39 +0000
+Subject: [PATCH 158/188] LoongArch: Regenerate loongarch.opt.urls.
+
+Fixes: d28ea8e5a704 ("LoongArch: Split loongarch_option_override_internal
+                      into smaller procedures")
+
+gcc/ChangeLog:
+
+        * config/loongarch/loongarch.opt.urls: Regenerate.
+---
+ gcc/config/loongarch/loongarch.opt.urls | 92 +++++++++++++++++++++++++
+ 1 file changed, 92 insertions(+)
+ create mode 100644 gcc/config/loongarch/loongarch.opt.urls
+
+diff --git a/gcc/config/loongarch/loongarch.opt.urls b/gcc/config/loongarch/loongarch.opt.urls
+new file mode 100644
+index 000000000..571c504e6
+--- /dev/null
++++ b/gcc/config/loongarch/loongarch.opt.urls
+@@ -0,0 +1,92 @@
++; Autogenerated by regenerate-opt-urls.py from gcc/config/loongarch/loongarch.opt and generated HTML
++
++mfpu=
++UrlSuffix(gcc/LoongArch-Options.html#index-mfpu-2)
++
++msoft-float
++UrlSuffix(gcc/LoongArch-Options.html#index-msoft-float-5)
++
++msingle-float
++UrlSuffix(gcc/LoongArch-Options.html#index-msingle-float)
++
++mdouble-float
++UrlSuffix(gcc/LoongArch-Options.html#index-mdouble-float-1)
++
++msimd=
++UrlSuffix(gcc/LoongArch-Options.html#index-msimd-1)
++
++march=
++UrlSuffix(gcc/LoongArch-Options.html#index-march-7)
++
++mtune=
++UrlSuffix(gcc/LoongArch-Options.html#index-mtune-8)
++
++mabi=
++UrlSuffix(gcc/LoongArch-Options.html#index-mabi-2)
++
++mbranch-cost=
++UrlSuffix(gcc/LoongArch-Options.html#index-mbranch-cost-2)
++
++mcheck-zero-division
++UrlSuffix(gcc/LoongArch-Options.html#index-mcheck-zero-division)
++
++mcond-move-int
++UrlSuffix(gcc/LoongArch-Options.html#index-mcond-move-int)
++
++mcond-move-float
++UrlSuffix(gcc/LoongArch-Options.html#index-mcond-move-float)
++
++mmemcpy
++UrlSuffix(gcc/LoongArch-Options.html#index-mmemcpy)
++
++mstrict-align
++UrlSuffix(gcc/LoongArch-Options.html#index-mstrict-align-1)
++
++mmax-inline-memcpy-size=
++UrlSuffix(gcc/LoongArch-Options.html#index-mmax-inline-memcpy-size)
++
++mexplicit-relocs=
++UrlSuffix(gcc/LoongArch-Options.html#index-mexplicit-relocs-1)
++
++mexplicit-relocs
++UrlSuffix(gcc/LoongArch-Options.html#index-mexplicit-relocs-1)
++
++mrecip=
++UrlSuffix(gcc/LoongArch-Options.html#index-mrecip)
++
++mrecip
++UrlSuffix(gcc/LoongArch-Options.html#index-mrecip)
++
++mcmodel=
++UrlSuffix(gcc/LoongArch-Options.html#index-mcmodel_003d-1)
++
++mdirect-extern-access
++UrlSuffix(gcc/LoongArch-Options.html#index-mdirect-extern-access)
++
++mrelax
++UrlSuffix(gcc/LoongArch-Options.html#index-mrelax-2)
++
++mpass-mrelax-to-as
++UrlSuffix(gcc/LoongArch-Options.html#index-mpass-mrelax-to-as)
++
++mtls-dialect=
++UrlSuffix(gcc/LoongArch-Options.html#index-mtls-dialect-1)
++
++mannotate-tablejump
++UrlSuffix(gcc/LoongArch-Options.html#index-mannotate-tablejump)
++
++mfrecipe
++UrlSuffix(gcc/LoongArch-Options.html#index-mfrecipe)
++
++mdiv32
++UrlSuffix(gcc/LoongArch-Options.html#index-mdiv32)
++
++mlam-bh
++UrlSuffix(gcc/LoongArch-Options.html#index-mlam-bh)
++
++mlamcas
++UrlSuffix(gcc/LoongArch-Options.html#index-mlamcas)
++
++mld-seq-sa
++UrlSuffix(gcc/LoongArch-Options.html#index-mld-seq-sa)
++
+-- 
+2.43.0
+
diff --git a/SME-0056-Improve-immediate-expansion-PR106583.patch b/0159-Backport-SME-Improve-immediate-expansion-PR106583.patch
similarity index 97%
rename from SME-0056-Improve-immediate-expansion-PR106583.patch
rename to 0159-Backport-SME-Improve-immediate-expansion-PR106583.patch
index bb7277e1b66fc590e89bec8fcf731b83dddc7d56..4ecbdd890661b8cc1444b5b40500cda20fdbf249 100644
--- a/SME-0056-Improve-immediate-expansion-PR106583.patch
+++ b/0159-Backport-SME-Improve-immediate-expansion-PR106583.patch
@@ -1,7 +1,8 @@
-From 9c8ee340577b2ed6a47021f248348d5510cc2527 Mon Sep 17 00:00:00 2001
+From d5293e2a8db54245553e01ad5d791b7492ad6101 Mon Sep 17 00:00:00 2001
 From: Wilco Dijkstra 
 Date: Mon, 24 Oct 2022 15:14:14 +0100
-Subject: [PATCH 056/144] Improve immediate expansion [PR106583]
+Subject: [PATCH 060/157] [Backport][SME] Improve immediate expansion
+ [PR106583]
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=a096036589d82175a0f729c2dab73c9a527d075d
 
@@ -40,7 +41,7 @@ gcc/testsuite/
  create mode 100644 gcc/testsuite/gcc.target/aarch64/pr106583.c
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 34d16959c..04e098760 100644
+index b4b646fa0..cf7736994 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
 @@ -305,7 +305,6 @@ static bool aarch64_builtin_support_vector_misalignment (machine_mode mode,
@@ -51,7 +52,7 @@ index 34d16959c..04e098760 100644
  
  /* The processor for which instructions should be scheduled.  */
  enum aarch64_processor aarch64_tune = cortexa53;
-@@ -5539,6 +5538,143 @@ aarch64_output_sve_vector_inc_dec (const char *operands, rtx x)
+@@ -5756,6 +5755,143 @@ aarch64_output_sve_vector_inc_dec (const char *operands, rtx x)
  					     factor, nelts_per_vq);
  }
  
@@ -195,7 +196,7 @@ index 34d16959c..04e098760 100644
  static int
  aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
  				scalar_int_mode mode)
-@@ -5569,7 +5705,7 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
+@@ -5786,7 +5922,7 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
  	emit_insn (gen_rtx_SET (dest, GEN_INT (val2)));
  
        /* Check if we have to emit a second instruction by checking to see
@@ -204,7 +205,7 @@ index 34d16959c..04e098760 100644
        if (val == val2)
  	return 1;
  
-@@ -5605,36 +5741,43 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
+@@ -5822,36 +5958,43 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
    one_match = ((~val & mask) == 0) + ((~val & (mask << 16)) == 0) +
      ((~val & (mask << 32)) == 0) + ((~val & (mask << 48)) == 0);
  
@@ -270,7 +271,7 @@ index 34d16959c..04e098760 100644
      }
  
    /* Generate 2-4 instructions, skipping 16 bits of all zeroes or ones which
-@@ -5681,6 +5824,99 @@ aarch64_mov128_immediate (rtx imm)
+@@ -5898,6 +6041,99 @@ aarch64_mov128_immediate (rtx imm)
  }
  
  
@@ -370,7 +371,7 @@ index 34d16959c..04e098760 100644
  /* Return the number of temporary registers that aarch64_add_offset_1
     would need to add OFFSET to a register.  */
  
-@@ -10162,207 +10398,6 @@ aarch64_tls_referenced_p (rtx x)
+@@ -10379,207 +10615,6 @@ aarch64_tls_referenced_p (rtx x)
  }
  
  
@@ -626,5 +627,5 @@ index 000000000..0f9315808
 +/* { dg-final { scan-assembler-times {\tmovk\t} 14 } } */
 +/* { dg-final { scan-assembler-times {\tmov\t} 7 } } */
 -- 
-2.19.1
+2.33.0
 
diff --git a/0159-LoongArch-Add-support-for-TLS-descriptors.patch b/0159-LoongArch-Add-support-for-TLS-descriptors.patch
new file mode 100644
index 0000000000000000000000000000000000000000..eb8ea00f0c44fd459492021aafe406b89c021406
--- /dev/null
+++ b/0159-LoongArch-Add-support-for-TLS-descriptors.patch
@@ -0,0 +1,724 @@
+From 0d5ff38a94dbd655bc86e0be262458ac71726ea4 Mon Sep 17 00:00:00 2001
+From: mengqinggang 
+Date: Tue, 2 Apr 2024 09:57:20 +0800
+Subject: [PATCH 159/188] LoongArch: Add support for TLS descriptors.
+
+Add support for TLS descriptors on normal code model and extreme
+code model.
+
+Normal code model instruction sequence:
+  -mno-explicit-relocs:
+    la.tls.desc	$r4, s
+    add.d	$r12, $r4, $r2
+  -mexplicit-relocs:
+    pcalau12i	$r4,%desc_pc_hi20(s)
+    addi.d	$r4,$r4,%desc_pc_lo12(s)
+    ld.d	$r1,$r4,%desc_ld(s)
+    jirl	$r1,$r1,%desc_call(s)
+    add.d	$r12, $r4, $r2
+
+Extreme code model instruction sequence:
+  -mno-explicit-relocs:
+    la.tls.desc	$r4, $r12, s
+    add.d	$r12, $r4, $r2
+  -mexplicit-relocs:
+    pcalau12i	$r4,%desc_pc_hi20(s)
+    addi.d	$r12,$r0,%desc_pc_lo12(s)
+    lu32i.d	$r12,%desc64_pc_lo20(s)
+    lu52i.d	$r12,$r12,%desc64_pc_hi12(s)
+    add.d	$r4,$r4,$r12
+    ld.d	$r1,$r4,%desc_ld(s)
+    jirl	$r1,$r1,%desc_call(s)
+    add.d	$r12, $r4, $r2
+
+The default is still traditional TLS model, but can be configured with
+--with-tls={trad,desc}. The default can change to TLS descriptors once
+libc and LLVM support this.
+
+gcc/ChangeLog:
+
+	* config.gcc: Add --with-tls option to change TLS flavor.
+	* config/loongarch/genopts/loongarch.opt.in: Add -mtls-dialect to
+	configure TLS flavor.
+	* config/loongarch/loongarch-def.h (struct loongarch_target): Add
+	tls_dialect.
+	* config/loongarch/loongarch-driver.cc (la_driver_init): Add tls
+	flavor.
+	* config/loongarch/loongarch-opts.cc (loongarch_init_target): Add
+	tls_dialect.
+	(loongarch_config_target): Ditto.
+	(loongarch_update_gcc_opt_status): Ditto.
+	* config/loongarch/loongarch-opts.h (loongarch_init_target): Ditto.
+	(TARGET_TLS_DESC): New define.
+	* config/loongarch/loongarch.cc (loongarch_symbol_insns): Add TLS
+	DESC instructions sequence length.
+	(loongarch_legitimize_tls_address): New TLS DESC instruction sequence.
+	(loongarch_option_override_internal): Add la_opt_tls_dialect.
+	(loongarch_option_restore): Add la_target.tls_dialect.
+	* config/loongarch/loongarch.md (@got_load_tls_desc): Normal
+	code model for TLS DESC.
+	(got_load_tls_desc_off64): Extreme cmode model for TLS DESC.
+	* config/loongarch/loongarch.opt: Regenerate.
+	* config/loongarch/loongarch.opt.urls: Ditto.
+	* doc/invoke.texi: Add a description of the compilation option
+	'-mtls-dialect={trad,desc}'.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/cmodel-extreme-1.c: Add -mtls-dialect=trad.
+	* gcc.target/loongarch/cmodel-extreme-2.c: Ditto.
+	* gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c: Ditto.
+	* gcc.target/loongarch/explicit-relocs-medium-call36-auto-tls-ld-gd.c:
+	Ditto.
+	* gcc.target/loongarch/func-call-medium-1.c: Ditto.
+	* gcc.target/loongarch/func-call-medium-2.c: Ditto.
+	* gcc.target/loongarch/func-call-medium-3.c: Ditto.
+	* gcc.target/loongarch/func-call-medium-4.c: Ditto.
+	* gcc.target/loongarch/tls-extreme-macro.c: Ditto.
+	* gcc.target/loongarch/tls-gd-noplt.c: Ditto.
+	* gcc.target/loongarch/explicit-relocs-auto-extreme-tls-desc.c: New test.
+	* gcc.target/loongarch/explicit-relocs-auto-tls-desc.c: New test.
+	* gcc.target/loongarch/explicit-relocs-extreme-tls-desc.c: New test.
+	* gcc.target/loongarch/explicit-relocs-tls-desc.c: New test.
+
+Co-authored-by: Lulu Cheng 
+Co-authored-by: Xi Ruoyao 
+---
+ gcc/config.gcc                                | 19 +++++-
+ gcc/config/loongarch/genopts/loongarch.opt.in | 14 ++++
+ gcc/config/loongarch/loongarch-def.h          |  7 ++
+ gcc/config/loongarch/loongarch-driver.cc      |  2 +-
+ gcc/config/loongarch/loongarch-opts.cc        | 12 +++-
+ gcc/config/loongarch/loongarch-opts.h         |  3 +
+ gcc/config/loongarch/loongarch.cc             | 45 ++++++++----
+ gcc/config/loongarch/loongarch.md             | 68 +++++++++++++++++++
+ gcc/config/loongarch/loongarch.opt            | 14 ++++
+ gcc/doc/invoke.texi                           | 16 ++++-
+ .../gcc.target/loongarch/cmodel-extreme-1.c   |  2 +-
+ .../gcc.target/loongarch/cmodel-extreme-2.c   |  2 +-
+ .../explicit-relocs-auto-extreme-tls-desc.c   | 10 +++
+ .../loongarch/explicit-relocs-auto-tls-desc.c | 10 +++
+ .../explicit-relocs-auto-tls-ld-gd.c          |  2 +-
+ .../explicit-relocs-extreme-tls-desc.c        | 16 +++++
+ ...icit-relocs-medium-call36-auto-tls-ld-gd.c |  2 +-
+ .../loongarch/explicit-relocs-tls-desc.c      | 13 ++++
+ .../gcc.target/loongarch/func-call-medium-1.c |  2 +-
+ .../gcc.target/loongarch/func-call-medium-2.c |  2 +-
+ .../gcc.target/loongarch/func-call-medium-3.c |  2 +-
+ .../gcc.target/loongarch/func-call-medium-4.c |  2 +-
+ .../gcc.target/loongarch/tls-extreme-macro.c  |  2 +-
+ .../gcc.target/loongarch/tls-gd-noplt.c       |  2 +-
+ 24 files changed, 243 insertions(+), 26 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-extreme-tls-desc.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-desc.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-tls-desc.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/explicit-relocs-tls-desc.c
+
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 499b36b45..1db558d4c 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -4982,7 +4982,7 @@ case "${target}" in
+ 		;;
+ 
+ 	loongarch*-*)
+-		supported_defaults="abi arch tune fpu simd multilib-default strict-align-lib"
++		supported_defaults="abi arch tune fpu simd multilib-default strict-align-lib tls"
+ 
+ 		# Local variables
+ 		unset \
+@@ -5240,6 +5240,18 @@ case "${target}" in
+ 			with_multilib_list="${abi_base}/${abi_ext}"
+ 		fi
+ 
++		# Handle --with-tls.
++		case "$with_tls" in
++		"" \
++		| trad | desc)
++		    # OK
++		    ;;
++		*)
++		    echo "Unknown TLS method used in --with-tls=$with_tls" 1>&2
++		    exit 1
++		    ;;
++		esac
++
+ 		# Check if the configured default ABI combination is included in
+ 		# ${with_multilib_list}.
+ 		loongarch_multilib_list_sane=no
+@@ -5875,6 +5887,11 @@ case ${target} in
+ 		lasx)    tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_SIMD_LASX" ;;
+ 		esac
+ 
++		case ${with_tls} in
++		"" | trad)	tm_defines="$tm_defines DEFAULT_TLS_TYPE=TLS_TRADITIONAL" ;;
++		desc)		tm_defines="$tm_defines DEFAULT_TLS_TYPE=TLS_DESCRIPTORS" ;;
++		esac
++
+ 		tmake_file="loongarch/t-loongarch $tmake_file"
+ 		;;
+ 
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index 9c6f59bb8..f3d53f03c 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -245,6 +245,20 @@ mpass-mrelax-to-as
+ Driver Var(la_pass_mrelax_to_as) Init(HAVE_AS_MRELAX_OPTION)
+ Pass -mrelax or -mno-relax option to the assembler.
+ 
++Enum
++Name(tls_type) Type(int)
++The possible TLS dialects:
++
++EnumValue
++Enum(tls_type) String(trad) Value(TLS_TRADITIONAL)
++
++EnumValue
++Enum(tls_type) String(desc) Value(TLS_DESCRIPTORS)
++
++mtls-dialect=
++Target RejectNegative Joined Enum(tls_type) Var(la_opt_tls_dialect) Init(M_OPT_UNSET) Save
++Specify TLS dialect.
++
+ -param=loongarch-vect-unroll-limit=
+ Target Joined UInteger Var(la_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param
+ Used to limit unroll factor which indicates how much the autovectorizer may
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index b1423bcfe..2fe44da5a 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -180,6 +180,7 @@ struct loongarch_target
+   int cpu_arch;	    /* CPU_ */
+   int cpu_tune;	    /* same */
+   int cmodel;	    /* CMODEL_ */
++  int tls_dialect;  /* TLS_ */
+ };
+ 
+ /* CPU model */
+@@ -193,6 +194,12 @@ enum {
+   N_TUNE_TYPES	    = 5
+ };
+ 
++/* TLS types.  */
++enum {
++  TLS_TRADITIONAL = 0,
++  TLS_DESCRIPTORS = 1
++};
++
+ /* CPU model properties */
+ extern loongarch_def_array
+   loongarch_cpu_strings;
+diff --git a/gcc/config/loongarch/loongarch-driver.cc b/gcc/config/loongarch/loongarch-driver.cc
+index b84a6eaf7..8551cf94d 100644
+--- a/gcc/config/loongarch/loongarch-driver.cc
++++ b/gcc/config/loongarch/loongarch-driver.cc
+@@ -45,7 +45,7 @@ la_driver_init (int argc ATTRIBUTE_UNUSED, const char **argv ATTRIBUTE_UNUSED)
+   /* Initialize all fields of la_target.  */
+   loongarch_init_target (&la_target, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET,
+ 			 M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET,
+-			 0, 0);
++			 M_OPT_UNSET, 0, 0);
+   return "";
+ }
+ 
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index 404642a9e..062d430c2 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -144,6 +144,7 @@ void
+ loongarch_init_target (struct loongarch_target *target,
+ 		       int cpu_arch, int cpu_tune, int fpu, int simd,
+ 		       int abi_base, int abi_ext, int cmodel,
++		       int tls_dialect,
+ 		       HOST_WIDE_INT isa_evolution,
+ 		       HOST_WIDE_INT isa_evolution_set)
+ {
+@@ -158,6 +159,7 @@ loongarch_init_target (struct loongarch_target *target,
+   target->abi.base = abi_base;
+   target->abi.ext = abi_ext;
+   target->cmodel = cmodel;
++  target->tls_dialect = tls_dialect;
+ }
+ 
+ 
+@@ -179,7 +181,8 @@ loongarch_config_target (struct loongarch_target *target,
+   obstack_init (&msg_obstack);
+ 
+   struct {
+-    int arch, tune, fpu, simd, abi_base, abi_ext, cmodel, abi_flt;
++    int arch, tune, fpu, simd, abi_base, abi_ext, cmodel,
++	tls_dialect, abi_flt;
+   } constrained = {
+       M_OPT_ABSENT (target->cpu_arch)	  ? 0 : 1,
+       M_OPT_ABSENT (target->cpu_tune)	  ? 0 : 1,
+@@ -188,6 +191,7 @@ loongarch_config_target (struct loongarch_target *target,
+       M_OPT_ABSENT (target->abi.base)	  ? 0 : 1,
+       M_OPT_ABSENT (target->abi.ext)	  ? 0 : 1,
+       M_OPT_ABSENT (target->cmodel)	  ? 0 : 1,
++      M_OPT_ABSENT (target->tls_dialect)  ? 0 : 1,
+       M_OPT_ABSENT (target->abi.base)	  ? 0 : 1,
+   };
+ 
+@@ -556,6 +560,9 @@ fallback:
+       gcc_unreachable ();
+     }
+ 
++  t.tls_dialect = constrained.tls_dialect ? target->tls_dialect
++	  : DEFAULT_TLS_TYPE;
++
+   /* Cleanup and return.  */
+   obstack_free (&msg_obstack, NULL);
+   *target = t;
+@@ -791,6 +798,9 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+   /* status of -mcmodel */
+   opts->x_la_opt_cmodel = target->cmodel;
+ 
++  /* status of -mtls-dialect */
++  opts->x_la_opt_tls_dialect = target->tls_dialect;
++
+   /* status of -mfpu */
+   opts->x_la_opt_fpu = target->isa.fpu;
+ 
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index 177d587da..a3b467f4c 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -39,6 +39,7 @@ void
+ loongarch_init_target (struct loongarch_target *target,
+ 		       int cpu_arch, int cpu_tune, int fpu, int simd,
+ 		       int abi_base, int abi_ext, int cmodel,
++		       int tls_dialect,
+ 		       HOST_WIDE_INT isa_evolutions,
+ 		       HOST_WIDE_INT isa_evolutions_set);
+ 
+@@ -105,6 +106,8 @@ struct loongarch_flags {
+ #define TARGET_64BIT		  (la_target.isa.base == ISA_BASE_LA64)
+ #define TARGET_ABI_LP64		  ABI_LP64_P(la_target.abi.base)
+ 
++#define TARGET_TLS_DESC		  (la_target.tls_dialect == TLS_DESCRIPTORS)
++
+ #define ISA_HAS_LSX \
+   (la_target.isa.simd == ISA_EXT_SIMD_LSX \
+    || la_target.isa.simd == ISA_EXT_SIMD_LASX)
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index c2f3739d0..e27335b3c 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2029,7 +2029,7 @@ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode)
+ 
+     case SYMBOL_TLSGD:
+     case SYMBOL_TLSLDM:
+-      return 3;
++      return TARGET_TLS_DESC ? 4 : 3;
+ 
+     case SYMBOL_PCREL64:
+       return 5;
+@@ -2930,24 +2930,43 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ static rtx
+ loongarch_legitimize_tls_address (rtx loc)
+ {
+-  rtx dest, tp, tmp, tmp1, tmp2, tmp3;
++  rtx dest, tp, tmp, tmp1, tmp2, tmp3, a0;
+   enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
+   rtx_insn *insn;
+ 
+   switch (model)
+     {
+     case TLS_MODEL_LOCAL_DYNAMIC:
+-      tmp = gen_rtx_REG (Pmode, GP_RETURN);
+-      dest = gen_reg_rtx (Pmode);
+-      insn = loongarch_call_tls_get_addr (loc, SYMBOL_TLSLDM, tmp);
+-      emit_libcall_block (insn, dest, tmp, loc);
+-      break;
+-
++      if (!TARGET_TLS_DESC)
++	{
++	  tmp = gen_rtx_REG (Pmode, GP_RETURN);
++	  dest = gen_reg_rtx (Pmode);
++	  insn = loongarch_call_tls_get_addr (loc, SYMBOL_TLSLDM, tmp);
++	  emit_libcall_block (insn, dest, tmp, loc);
++	  break;
++	}
++      /* Fall through.  */
+     case TLS_MODEL_GLOBAL_DYNAMIC:
+-      tmp = gen_rtx_REG (Pmode, GP_RETURN);
+-      dest = gen_reg_rtx (Pmode);
+-      insn = loongarch_call_tls_get_addr (loc, SYMBOL_TLSGD, tmp);
+-      emit_libcall_block (insn, dest, tmp, loc);
++      if (TARGET_TLS_DESC)
++	{
++	  a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
++	  dest = gen_reg_rtx (Pmode);
++	  tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
++
++	  if (TARGET_CMODEL_EXTREME)
++	    emit_insn (gen_got_load_tls_desc_off64 (loc, gen_reg_rtx (DImode)));
++	  else
++	    emit_insn (gen_got_load_tls_desc (Pmode, loc));
++
++	  emit_insn (gen_add3_insn (dest, a0, tp));
++	}
++      else
++	{
++	  tmp = gen_rtx_REG (Pmode, GP_RETURN);
++	  dest = gen_reg_rtx (Pmode);
++	  insn = loongarch_call_tls_get_addr (loc, SYMBOL_TLSGD, tmp);
++	  emit_libcall_block (insn, dest, tmp, loc);
++	}
+       break;
+ 
+     case TLS_MODEL_INITIAL_EXEC:
+@@ -7651,6 +7670,7 @@ loongarch_option_override_internal (struct loongarch_target *target,
+ 			 opts->x_la_opt_abi_base,
+ 			 opts->x_la_opt_abi_ext,
+ 			 opts->x_la_opt_cmodel,
++			 opts->x_la_opt_tls_dialect,
+ 			 opts->x_la_isa_evolution,
+ 			 opts_set->x_la_isa_evolution);
+ 
+@@ -7697,6 +7717,7 @@ loongarch_option_restore (struct gcc_options *,
+   la_target.isa.evolution = ptr->x_la_isa_evolution;
+ 
+   la_target.cmodel = ptr->x_la_opt_cmodel;
++  la_target.tls_dialect = ptr->x_la_opt_tls_dialect;
+ }
+ 
+ /* Implement TARGET_CONDITIONAL_REGISTER_USAGE.  */
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 1b3525dde..95beb88fe 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -52,6 +52,8 @@
+ 
+   ;; TLS
+   UNSPEC_TLS
++  UNSPEC_TLS_DESC
++  UNSPEC_TLS_DESC_OFF64
+ 
+   ;; Stack tie
+   UNSPEC_TIE
+@@ -127,6 +129,15 @@
+    (T1_REGNUM			13)
+    (S0_REGNUM			23)
+ 
++   (FCC0_REGNUM			64)
++   (FCC1_REGNUM			65)
++   (FCC2_REGNUM			66)
++   (FCC3_REGNUM			67)
++   (FCC4_REGNUM			68)
++   (FCC5_REGNUM			69)
++   (FCC6_REGNUM			70)
++   (FCC7_REGNUM			71)
++
+    ;; Return path styles
+    (NORMAL_RETURN		0)
+    (SIBCALL_RETURN		1)
+@@ -2759,6 +2770,63 @@
+ 
+ ;; Thread-Local Storage
+ 
++(define_insn "@got_load_tls_desc"
++  [(set (reg:P 4)
++	(unspec:P
++	    [(match_operand:P 0 "symbolic_operand" "")]
++	    UNSPEC_TLS_DESC))
++    (clobber (reg:SI FCC0_REGNUM))
++    (clobber (reg:SI FCC1_REGNUM))
++    (clobber (reg:SI FCC2_REGNUM))
++    (clobber (reg:SI FCC3_REGNUM))
++    (clobber (reg:SI FCC4_REGNUM))
++    (clobber (reg:SI FCC5_REGNUM))
++    (clobber (reg:SI FCC6_REGNUM))
++    (clobber (reg:SI FCC7_REGNUM))
++    (clobber (reg:SI RETURN_ADDR_REGNUM))]
++  "TARGET_TLS_DESC"
++{
++  return TARGET_EXPLICIT_RELOCS
++    ? "pcalau12i\t$r4,%%desc_pc_hi20(%0)\n\t"
++      "addi.d\t$r4,$r4,%%desc_pc_lo12(%0)\n\t"
++      "ld.d\t$r1,$r4,%%desc_ld(%0)\n\t"
++      "jirl\t$r1,$r1,%%desc_call(%0)"
++    : "la.tls.desc\t$r4,%0";
++}
++  [(set_attr "got" "load")
++   (set_attr "mode" "")
++   (set_attr "length" "16")])
++
++(define_insn "got_load_tls_desc_off64"
++  [(set (reg:DI 4)
++	(unspec:DI
++	    [(match_operand:DI 0 "symbolic_operand" "")]
++	    UNSPEC_TLS_DESC_OFF64))
++    (clobber (reg:SI FCC0_REGNUM))
++    (clobber (reg:SI FCC1_REGNUM))
++    (clobber (reg:SI FCC2_REGNUM))
++    (clobber (reg:SI FCC3_REGNUM))
++    (clobber (reg:SI FCC4_REGNUM))
++    (clobber (reg:SI FCC5_REGNUM))
++    (clobber (reg:SI FCC6_REGNUM))
++    (clobber (reg:SI FCC7_REGNUM))
++    (clobber (reg:SI RETURN_ADDR_REGNUM))
++    (clobber (match_operand:DI 1 "register_operand" "=&r"))]
++  "TARGET_TLS_DESC && TARGET_CMODEL_EXTREME"
++{
++  return TARGET_EXPLICIT_RELOCS
++    ? "pcalau12i\t$r4,%%desc_pc_hi20(%0)\n\t"
++      "addi.d\t%1,$r0,%%desc_pc_lo12(%0)\n\t"
++      "lu32i.d\t%1,%%desc64_pc_lo20(%0)\n\t"
++      "lu52i.d\t%1,%1,%%desc64_pc_hi12(%0)\n\t"
++      "add.d\t$r4,$r4,%1\n\t"
++      "ld.d\t$r1,$r4,%%desc_ld(%0)\n\t"
++      "jirl\t$r1,$r1,%%desc_call(%0)"
++    : "la.tls.desc\t$r4,%1,%0";
++}
++  [(set_attr "got" "load")
++   (set_attr "length" "28")])
++
+ (define_insn "@load_tls"
+   [(set (match_operand:P 0 "register_operand" "=r")
+ 	(unspec:P
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index ea848cd76..6f730d886 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -253,6 +253,20 @@ mpass-mrelax-to-as
+ Driver Var(la_pass_mrelax_to_as) Init(HAVE_AS_MRELAX_OPTION)
+ Pass -mrelax or -mno-relax option to the assembler.
+ 
++Enum
++Name(tls_type) Type(int)
++The possible TLS dialects:
++
++EnumValue
++Enum(tls_type) String(trad) Value(TLS_TRADITIONAL)
++
++EnumValue
++Enum(tls_type) String(desc) Value(TLS_DESCRIPTORS)
++
++mtls-dialect=
++Target RejectNegative Joined Enum(tls_type) Var(la_opt_tls_dialect) Init(M_OPT_UNSET) Save
++Specify TLS dialect.
++
+ -param=loongarch-vect-unroll-limit=
+ Target Joined UInteger Var(la_vect_unroll_limit) Init(6) IntegerRange(1, 64) Param
+ Used to limit unroll factor which indicates how much the autovectorizer may
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 7f24fe1e2..c9a1969ad 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -1010,7 +1010,8 @@ Objective-C and Objective-C++ Dialects}.
+ -mdirect-extern-access -mno-direct-extern-access @gol
+ -mcmodel=@var{code-model} -mrelax -mpass-mrelax-to-as @gol
+ -mrecip  -mrecip=@var{opt} -mfrecipe -mno-frecipe -mdiv32 -mno-div32 @gol
+--mlam-bh -mno-lam-bh -mlamcas -mno-lamcas -mld-seq-sa -mno-ld-seq-sa}
++-mlam-bh -mno-lam-bh -mlamcas -mno-lamcas -mld-seq-sa -mno-ld-seq-sa @gol
++-mtls-dialect=@var{opt}}
+ 
+ @emph{M32R/D Options}
+ @gccoptlist{-m32r2  -m32rx  -m32r @gol
+@@ -24727,6 +24728,19 @@ Whether a load-load barrier (@code{dbar 0x700}) is needed.  When build with
+ @option{-march=la664}, it is enabled by default.  The default is
+ @option{-mno-ld-seq-sa}, the load-load barrier is needed.
+ 
++@opindex mtls-dialect
++@item -mtls-dialect=@var{opt}
++This option controls which tls dialect may be used for general dynamic and
++local dynamic TLS models.
++
++@table @samp
++@item trad
++Use traditional TLS. This is the default.
++
++@item desc
++Use TLS descriptors.
++@end table
++
+ @item loongarch-vect-unroll-limit
+ The vectorizer will use available tuning information to determine whether it
+ would be beneficial to unroll the main vectorized loop and by how much.  This
+diff --git a/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-1.c b/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-1.c
+index 564ee4017..6269607e7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -fno-plt -mexplicit-relocs=always -fdump-rtl-final" } */
++/* { dg-options "-march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -mtls-dialect=trad -fno-plt -mexplicit-relocs=always -fdump-rtl-final" } */
+ 
+ int a;
+ extern int b;
+diff --git a/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-2.c b/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-2.c
+index ce834805f..35f6ee0bb 100644
+--- a/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/cmodel-extreme-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -fno-plt -mexplicit-relocs=auto -fdump-rtl-final" } */
++/* { dg-options "-march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -mtls-dialect=trad -fno-plt -mexplicit-relocs=auto -fdump-rtl-final" } */
+ 
+ #include "cmodel-extreme-1.c"
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-extreme-tls-desc.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-extreme-tls-desc.c
+new file mode 100644
+index 000000000..0fc7a1a51
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-extreme-tls-desc.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fPIC -mcmodel=extreme -mexplicit-relocs=auto -mtls-dialect=desc" } */
++
++__thread int a __attribute__((visibility("hidden")));
++extern __thread int b __attribute__((visibility("default")));
++
++int test() { return a + b; }
++
++/* { dg-final { scan-assembler "la\\.tls\\.desc\t\\\$r4,\\\$r12,\\.LANCHOR0" { target tls_native } } } */
++/* { dg-final { scan-assembler "la\\.tls\\.desc\t\\\$r4,\\\$r12,\\.LANCHOR0" { target tls_native } } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-desc.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-desc.c
+new file mode 100644
+index 000000000..37947ecfd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-desc.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fPIC -mexplicit-relocs=auto -mtls-dialect=desc" } */
++
++__thread int a __attribute__((visibility("hidden")));
++extern __thread int b __attribute__((visibility("default")));
++
++int test() { return a + b; }
++
++/* { dg-final { scan-assembler "la\\.tls\\.desc\t\\\$r4,\\.LANCHOR0" { target tls_native } } } */
++/* { dg-final { scan-assembler "la\\.tls\\.desc\t\\\$r4,\\.LANCHOR0" { target tls_native } } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c
+index ca55fcfc5..b47e37c82 100644
+--- a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-auto-tls-ld-gd.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fPIC -mexplicit-relocs=auto" } */
++/* { dg-options "-O2 -fPIC -mexplicit-relocs=auto -mtls-dialect=trad" } */
+ 
+ __thread int a __attribute__((visibility("hidden")));
+ extern __thread int b __attribute__((visibility("default")));
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-tls-desc.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-tls-desc.c
+new file mode 100644
+index 000000000..3797556e1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-tls-desc.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fPIC -mexplicit-relocs -mtls-dialect=desc -mcmodel=extreme" } */
++
++__thread int a __attribute__((visibility("hidden")));
++extern __thread int b __attribute__((visibility("default")));
++
++int test() { return a + b; }
++
++/* { dg-final { scan-assembler "pcalau12i\t\\\$r4,%desc_pc_hi20\\\(\\.LANCHOR0\\\)" { target tls_native } } } */
++/* { dg-final { scan-assembler "addi.d\t\\\$r12,\\\$r0,%desc_pc_lo12\\\(\\.LANCHOR0\\\)" { target tls_native } } } */
++/* { dg-final { scan-assembler "lu32i.d\t\\\$r12,%desc64_pc_lo20\\\(\\.LANCHOR0\\\)" { target tls_native } } } */
++/* { dg-final { scan-assembler "lu52i.d\t\\\$r12,\\\$r12,%desc64_pc_hi12\\\(\\.LANCHOR0\\\)" { target tls_native } } } */
++/* { dg-final { scan-assembler "add.d\t\\\$r4,\\\$r4,\\\$r12" { target tls_native } } } */
++/* { dg-final { scan-assembler "ld.d\t\\\$r1,\\\$r4,%desc_ld\\\(\\.LANCHOR0\\\)" { target tls_native } } } */
++/* { dg-final { scan-assembler "jirl\t\\\$r1,\\\$r1,%desc_call\\\(\\.LANCHOR0\\\)" { target tls_native } } } */
++/* { dg-final { scan-assembler "add.d\t\\\$r12,\\\$r4,\\\$r2" { target tls_native } } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-call36-auto-tls-ld-gd.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-call36-auto-tls-ld-gd.c
+index d1a482083..cfb855323 100644
+--- a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-call36-auto-tls-ld-gd.c
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-medium-call36-auto-tls-ld-gd.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fPIC -mexplicit-relocs=auto -mcmodel=medium -fplt" } */
++/* { dg-options "-O2 -fPIC -mexplicit-relocs=auto -mtls-dialect=trad -mcmodel=medium -fplt" } */
+ /* { dg-final { scan-assembler "pcaddu18i\t\\\$r1,%call36\\\(__tls_get_addr\\\)" { target { tls_native && loongarch_call36_support } } } } */
+ 
+ #include "./explicit-relocs-auto-tls-ld-gd.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-tls-desc.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-tls-desc.c
+new file mode 100644
+index 000000000..f66903091
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-tls-desc.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fPIC -mexplicit-relocs -mtls-dialect=desc" } */
++
++__thread int a __attribute__((visibility("hidden")));
++extern __thread int b __attribute__((visibility("default")));
++
++int test() { return a + b; }
++
++/* { dg-final { scan-assembler "pcalau12i\t\\\$r4,%desc_pc_hi20\\\(\\.LANCHOR0\\\)" { target tls_native } } } */
++/* { dg-final { scan-assembler "addi.d\t\\\$r4,\\\$r4,%desc_pc_lo12\\\(\\.LANCHOR0\\\)" { target tls_native } } } */
++/* { dg-final { scan-assembler "ld.d\t\\\$r1,\\\$r4,%desc_ld\\\(\\.LANCHOR0\\\)" { target tls_native } } } */
++/* { dg-final { scan-assembler "jirl\t\\\$r1,\\\$r1,%desc_call\\\(\\.LANCHOR0\\\)" { target tls_native } } } */
++/* { dg-final { scan-assembler "add.d\t\\\$r12,\\\$r4,\\\$r2" { target tls_native } } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c
+index 6339e832f..5e81df552 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mno-explicit-relocs -mcmodel=medium" } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mno-explicit-relocs -mtls-dialect=trad -mcmodel=medium" } */
+ /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c
+index a53e75e0b..d73df2dd8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mno-explicit-relocs -mcmodel=medium" } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mno-explicit-relocs -mtls-dialect=trad -mcmodel=medium" } */
+ /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*la\.local\t.*f\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c
+index 0da7bf98e..88a667450 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mno-explicit-relocs -mcmodel=medium" } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mno-explicit-relocs -mtls-dialect=trad -mcmodel=medium" } */
+ /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c
+index 0219688ae..f9dc12fea 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mno-explicit-relocs -mcmodel=medium" } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mno-explicit-relocs -mtls-dialect=trad -mcmodel=medium" } */
+ /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*la\.local\t.*f\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/tls-extreme-macro.c b/gcc/testsuite/gcc.target/loongarch/tls-extreme-macro.c
+index 4341f8212..4adda4202 100644
+--- a/gcc/testsuite/gcc.target/loongarch/tls-extreme-macro.c
++++ b/gcc/testsuite/gcc.target/loongarch/tls-extreme-macro.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -fno-plt -mexplicit-relocs=none" } */
++/* { dg-options "-march=loongarch64 -mabi=lp64d -O2 -mcmodel=extreme -mtls-dialect=trad -fno-plt -mexplicit-relocs=none" } */
+ /* { dg-final { scan-assembler "test_le:.*la.tls.le\t\\\$r\[0-9\]+,\\\.L" { target tls_native } } } */
+ /* { dg-final { scan-assembler "test_ie:.*la.tls.ie\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,\\\.L" { target tls_native } } } */
+ /* { dg-final { scan-assembler "test_ld:.*la.tls.ld\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,\\\.L.*la.global\t\\\$r\[0-9\]+,\\\$r\[0-9\]+,__tls_get_addr" { target tls_native } } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c b/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c
+index 9432c477e..dfa1bf53c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c
++++ b/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O0 -fno-plt -mcmodel=normal -mexplicit-relocs" } */
++/* { dg-options "-O0 -fno-plt -mcmodel=normal -mtls-dialect=trad -mexplicit-relocs" } */
+ /* { dg-final { scan-assembler "pcalau12i\t.*%got_pc_hi20\\(__tls_get_addr\\)\n\tld\.d.*%got_pc_lo12\\(__tls_get_addr\\)" { target tls_native } } } */
+ 
+ __attribute__ ((tls_model ("global-dynamic"))) __thread int a;
+-- 
+2.43.0
+
diff --git a/SME-0057-AArch64-Cleanup-move-immediate-code.patch b/0160-Backport-SME-AArch64-Cleanup-move-immediate-code.patch
similarity index 94%
rename from SME-0057-AArch64-Cleanup-move-immediate-code.patch
rename to 0160-Backport-SME-AArch64-Cleanup-move-immediate-code.patch
index 1639ebab3a1d3c01a428f65326df8f9bf469f062..dc066980537085c365c715e40a159ea2eb93e866 100644
--- a/SME-0057-AArch64-Cleanup-move-immediate-code.patch
+++ b/0160-Backport-SME-AArch64-Cleanup-move-immediate-code.patch
@@ -1,7 +1,7 @@
-From 74cb47cee7ba5e6e1e4c1508a87c15ebe561680a Mon Sep 17 00:00:00 2001
+From d76be4acadc0641cc8e795cd6b8a1c3c83b4fdb2 Mon Sep 17 00:00:00 2001
 From: Wilco Dijkstra 
 Date: Mon, 5 Dec 2022 10:49:25 +0000
-Subject: [PATCH 057/144] AArch64: Cleanup move immediate code
+Subject: [PATCH 061/157] [Backport][SME] AArch64: Cleanup move immediate code
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=ba1536dac780f3f92c5eab999fda6931f6247fc1
 
@@ -72,10 +72,10 @@ index 97984f3ab..3ff1a0163 100644
  const char *aarch64_output_casesi (rtx *);
  
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 04e098760..a438ea6f6 100644
+index cf7736994..acb659f53 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -5595,12 +5595,10 @@ aarch64_bitmask_imm (unsigned HOST_WIDE_INT val)
+@@ -5812,12 +5812,10 @@ aarch64_bitmask_imm (unsigned HOST_WIDE_INT val)
  
  /* Return true if VAL is a valid bitmask immediate for MODE.  */
  bool
@@ -90,7 +90,7 @@ index 04e098760..a438ea6f6 100644
  
    if (mode == SImode)
      return aarch64_bitmask_imm ((val & 0xffffffff) | (val << 32));
-@@ -5639,51 +5637,55 @@ aarch64_check_bitmask (unsigned HOST_WIDE_INT val,
+@@ -5856,51 +5854,55 @@ aarch64_check_bitmask (unsigned HOST_WIDE_INT val,
  }
  
  
@@ -173,7 +173,7 @@ index 04e098760..a438ea6f6 100644
    val = INTVAL (imm);
  
    if (aarch64_move_imm (val, mode))
-@@ -5693,31 +5695,6 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
+@@ -5910,31 +5912,6 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
        return 1;
      }
  
@@ -205,7 +205,7 @@ index 04e098760..a438ea6f6 100644
    if ((val >> 32) == 0 || mode == SImode)
      {
        if (generate)
-@@ -5741,24 +5718,31 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
+@@ -5958,24 +5935,31 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
    one_match = ((~val & mask) == 0) + ((~val & (mask << 16)) == 0) +
      ((~val & (mask << 32)) == 0) + ((~val & (mask << 48)) == 0);
  
@@ -252,7 +252,7 @@ index 04e098760..a438ea6f6 100644
      }
  
    /* Try a bitmask plus 2 movk to generate the immediate in 3 instructions.  */
-@@ -5827,26 +5811,24 @@ aarch64_mov128_immediate (rtx imm)
+@@ -6044,26 +6028,24 @@ aarch64_mov128_immediate (rtx imm)
  /* Return true if val can be encoded as a 12-bit unsigned immediate with
     a left shift of 0 or 12 bits.  */
  bool
@@ -285,7 +285,7 @@ index 04e098760..a438ea6f6 100644
  }
  
  
-@@ -6994,8 +6976,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
+@@ -7211,8 +7193,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
        return;
      }
  
@@ -295,7 +295,7 @@ index 04e098760..a438ea6f6 100644
  }
  
  /* Return the MEM rtx that provides the canary value that should be used
-@@ -11193,9 +11174,7 @@ aarch64_float_const_rtx_p (rtx x)
+@@ -11410,9 +11391,7 @@ aarch64_float_const_rtx_p (rtx x)
        && SCALAR_FLOAT_MODE_P (mode)
        && aarch64_reinterpret_float_as_int (x, &ival))
      {
@@ -306,7 +306,7 @@ index 04e098760..a438ea6f6 100644
        int num_instr = aarch64_internal_mov_immediate
  			(NULL_RTX, gen_int_mode (ival, imode), false, imode);
        return num_instr < 3;
-@@ -13832,10 +13811,10 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
+@@ -14049,10 +14028,10 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
  	     proportionally expensive to the number of instructions
  	     required to build that constant.  This is true whether we
  	     are compiling for SPEED or otherwise.  */
@@ -320,7 +320,7 @@ index 04e098760..a438ea6f6 100644
  	}
        return true;
  
-@@ -13851,9 +13830,8 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
+@@ -14068,9 +14047,8 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
  	  bool succeed = aarch64_reinterpret_float_as_int (x, &ival);
  	  gcc_assert (succeed);
  
@@ -333,10 +333,10 @@ index 04e098760..a438ea6f6 100644
  		(NULL_RTX, gen_int_mode (ival, imode), false, imode);
  	  *cost += COSTS_N_INSNS (ncost);
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index d849442cc..f0b1abbc9 100644
+index 7454a5c77..ea94152bf 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -1286,16 +1286,15 @@
+@@ -1288,16 +1288,15 @@
  )
  
  (define_insn_and_split "*movdi_aarch64"
@@ -356,7 +356,7 @@ index d849442cc..f0b1abbc9 100644
     #
     * return aarch64_output_sve_cnt_immediate (\"cnt\", \"%x0\", operands[1]);
     ldr\\t%x0, %1
-@@ -1317,11 +1316,11 @@
+@@ -1319,11 +1318,11 @@
         DONE;
      }"
    ;; The "mov_imm" type for CNTD is just a placeholder.
@@ -371,7 +371,7 @@ index d849442cc..f0b1abbc9 100644
  )
  
  (define_insn "insv_imm"
-@@ -1485,7 +1484,7 @@
+@@ -1487,7 +1486,7 @@
  
  (define_insn "*mov_aarch64"
    [(set (match_operand:DFD 0 "nonimmediate_operand" "=w, w  ,?r,w,w  ,w  ,w,m,r,m ,r,r")
@@ -380,7 +380,7 @@ index d849442cc..f0b1abbc9 100644
    "TARGET_FLOAT && (register_operand (operands[0], mode)
      || aarch64_reg_or_fp_zero (operands[1], mode))"
    "@
-@@ -1500,7 +1499,7 @@
+@@ -1502,7 +1501,7 @@
     ldr\\t%x0, %1
     str\\t%x1, %0
     mov\\t%x0, %x1
@@ -406,5 +406,5 @@ index ee7587cca..750a42fb1 100644
        (match_test "aarch64_move_imm (ival, DImode)")))
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0160-LoongArch-Fix-missing-plugin-header.patch b/0160-LoongArch-Fix-missing-plugin-header.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0fc9a39997b86efeea56d1829163ceebfef4b2d0
--- /dev/null
+++ b/0160-LoongArch-Fix-missing-plugin-header.patch
@@ -0,0 +1,32 @@
+From bec97638d68c760f6ee4b0a86ce4f9ffe9a691b3 Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Tue, 2 Apr 2024 09:20:32 +0800
+Subject: [PATCH 160/188] LoongArch: Fix missing plugin header
+
+gcc/ChangeLog:
+
+	* config/loongarch/t-loongarch: Add loongarch-def-arrays.h
+	to OPTION_H_EXTRA.
+---
+ gcc/config/loongarch/t-loongarch | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch
+index 994f4d19c..488e8cff3 100644
+--- a/gcc/config/loongarch/t-loongarch
++++ b/gcc/config/loongarch/t-loongarch
+@@ -18,8 +18,9 @@
+ 
+ 
+ GTM_H += loongarch-multilib.h
+-OPTIONS_H_EXTRA += $(srcdir)/config/loongarch/loongarch-def.h	\
+-		   $(srcdir)/config/loongarch/loongarch-tune.h	\
++OPTIONS_H_EXTRA += $(srcdir)/config/loongarch/loongarch-def.h	    \
++		   $(srcdir)/config/loongarch/loongarch-def-array.h \
++		   $(srcdir)/config/loongarch/loongarch-tune.h	    \
+ 		   $(srcdir)/config/loongarch/loongarch-cpucfg-map.h
+ 
+ # Canonical target triplet from config.gcc
+-- 
+2.43.0
+
diff --git a/SME-0058-AArch64-convert-some-patterns-to-compact-MD-syntax.patch b/0161-Backport-SME-AArch64-convert-some-patterns-to-compac.patch
similarity index 97%
rename from SME-0058-AArch64-convert-some-patterns-to-compact-MD-syntax.patch
rename to 0161-Backport-SME-AArch64-convert-some-patterns-to-compac.patch
index 03d9a119a79b408ba72d19ac5b8c7439a8cb6e3b..6ccc4c340e0c09764d261785111303fcc49f2451 100644
--- a/SME-0058-AArch64-convert-some-patterns-to-compact-MD-syntax.patch
+++ b/0161-Backport-SME-AArch64-convert-some-patterns-to-compac.patch
@@ -1,7 +1,8 @@
-From 12e20ce5c5ddde8168376fd47975d2aa0ae20b40 Mon Sep 17 00:00:00 2001
+From 5db3e7b68d5a443e908011b8d53de625ae462f82 Mon Sep 17 00:00:00 2001
 From: Tamar Christina 
 Date: Mon, 19 Jun 2023 15:55:28 +0100
-Subject: [PATCH 058/144] AArch64: convert some patterns to compact MD syntax
+Subject: [PATCH 062/157] [Backport][SME] AArch64: convert some patterns to
+ compact MD syntax
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=c72a7b849853716d94e8d313be5dce3c22850113
 
@@ -20,7 +21,7 @@ gcc/ChangeLog:
  1 file changed, 78 insertions(+), 83 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index f0b1abbc9..3f09b5707 100644
+index ea94152bf..5d02da42f 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
 @@ -378,7 +378,7 @@
@@ -42,7 +43,7 @@ index f0b1abbc9..3f09b5707 100644
  	(and (eq_attr "arch" "fp16")
  	     (match_test "TARGET_FP_F16INST"))
  
-@@ -1192,26 +1195,27 @@
+@@ -1194,26 +1197,27 @@
  )
  
  (define_insn "*mov_aarch64"
@@ -88,7 +89,7 @@ index f0b1abbc9..3f09b5707 100644
  )
  
  (define_expand "mov"
-@@ -1248,79 +1252,70 @@
+@@ -1250,79 +1254,70 @@
  )
  
  (define_insn_and_split "*movsi_aarch64"
@@ -224,5 +225,5 @@ index f0b1abbc9..3f09b5707 100644
  
  (define_insn "insv_imm"
 -- 
-2.19.1
+2.33.0
 
diff --git a/0161-LoongArch-Remove-unused-code.patch b/0161-LoongArch-Remove-unused-code.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b52ccfe4085faf9a10f10566cf75095cd8835b24
--- /dev/null
+++ b/0161-LoongArch-Remove-unused-code.patch
@@ -0,0 +1,344 @@
+From 47581dd6da960172bc768435400010748b3f97eb Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Wed, 3 Apr 2024 09:38:23 +0800
+Subject: [PATCH 161/188] LoongArch: Remove unused code.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md: Remove unused code.
+	* config/loongarch/loongarch-protos.h
+	(loongarch_split_lsx_copy_d): Remove.
+	(loongarch_split_lsx_insert_d): Ditto.
+	(loongarch_split_lsx_fill_d): Ditto.
+	* config/loongarch/loongarch.cc
+	(loongarch_split_lsx_copy_d): Ditto.
+	(loongarch_split_lsx_insert_d): Ditto.
+	(loongarch_split_lsx_fill_d): Ditto.
+	* config/loongarch/lsx.md (lsx_vpickve2gr_du): Remove splitter.
+	(lsx_vpickve2gr_): Ditto.
+	(abs2): Remove expander.
+	(vabs2): Rename 2 abs2.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-abs.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  | 12 +--
+ gcc/config/loongarch/loongarch-protos.h       |  3 -
+ gcc/config/loongarch/loongarch.cc             | 76 ----------------
+ gcc/config/loongarch/lsx.md                   | 89 ++-----------------
+ .../gcc.target/loongarch/vector/lsx/lsx-abs.c | 26 ++++++
+ 5 files changed, 35 insertions(+), 171 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-abs.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 45a0a8cc8..44a7d58ff 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -572,12 +572,7 @@
+ 	  (match_operand 3 "const__operand" "")))]
+   "ISA_HAS_LASX"
+ {
+-#if 0
+-  if (!TARGET_64BIT && (mode == V4DImode || mode == V4DFmode))
+-    return "#";
+-  else
+-#endif
+-    return "xvinsgr2vr.\t%u0,%z1,%y3";
++  return "xvinsgr2vr.\t%u0,%z1,%y3";
+ }
+   [(set_attr "type" "simd_insert")
+    (set_attr "mode" "")])
+@@ -1446,10 +1441,7 @@
+   if (which_alternative == 1)
+     return "xvldi.b\t%u0,0" ;
+ 
+-  if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode))
+-    return "#";
+-  else
+-    return "xvreplgr2vr.\t%u0,%z1";
++  return "xvreplgr2vr.\t%u0,%z1";
+ }
+   [(set_attr "type" "simd_fill")
+    (set_attr "mode" "")
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 8523da8d6..0c31a74b7 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -89,9 +89,6 @@ extern void loongarch_split_128bit_move (rtx, rtx);
+ extern bool loongarch_split_128bit_move_p (rtx, rtx);
+ extern void loongarch_split_256bit_move (rtx, rtx);
+ extern bool loongarch_split_256bit_move_p (rtx, rtx);
+-extern void loongarch_split_lsx_copy_d (rtx, rtx, rtx, rtx (*)(rtx, rtx, rtx));
+-extern void loongarch_split_lsx_insert_d (rtx, rtx, rtx, rtx);
+-extern void loongarch_split_lsx_fill_d (rtx, rtx);
+ extern const char *loongarch_output_move (rtx, rtx);
+ #ifdef RTX_CODE
+ extern void loongarch_expand_scc (rtx *);
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index e27335b3c..8d8a50b70 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4772,82 +4772,6 @@ loongarch_split_256bit_move (rtx dest, rtx src)
+     }
+ }
+ 
+-
+-/* Split a COPY_S.D with operands DEST, SRC and INDEX.  GEN is a function
+-   used to generate subregs.  */
+-
+-void
+-loongarch_split_lsx_copy_d (rtx dest, rtx src, rtx index,
+-			    rtx (*gen_fn)(rtx, rtx, rtx))
+-{
+-  gcc_assert ((GET_MODE (src) == V2DImode && GET_MODE (dest) == DImode)
+-	      || (GET_MODE (src) == V2DFmode && GET_MODE (dest) == DFmode));
+-
+-  /* Note that low is always from the lower index, and high is always
+-     from the higher index.  */
+-  rtx low = loongarch_subword (dest, false);
+-  rtx high = loongarch_subword (dest, true);
+-  rtx new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0);
+-
+-  emit_insn (gen_fn (low, new_src, GEN_INT (INTVAL (index) * 2)));
+-  emit_insn (gen_fn (high, new_src, GEN_INT (INTVAL (index) * 2 + 1)));
+-}
+-
+-/* Split a INSERT.D with operand DEST, SRC1.INDEX and SRC2.  */
+-
+-void
+-loongarch_split_lsx_insert_d (rtx dest, rtx src1, rtx index, rtx src2)
+-{
+-  int i;
+-  gcc_assert (GET_MODE (dest) == GET_MODE (src1));
+-  gcc_assert ((GET_MODE (dest) == V2DImode
+-	       && (GET_MODE (src2) == DImode || src2 == const0_rtx))
+-	      || (GET_MODE (dest) == V2DFmode && GET_MODE (src2) == DFmode));
+-
+-  /* Note that low is always from the lower index, and high is always
+-     from the higher index.  */
+-  rtx low = loongarch_subword (src2, false);
+-  rtx high = loongarch_subword (src2, true);
+-  rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
+-  rtx new_src1 = simplify_gen_subreg (V4SImode, src1, GET_MODE (src1), 0);
+-  i = exact_log2 (INTVAL (index));
+-  gcc_assert (i != -1);
+-
+-  emit_insn (gen_lsx_vinsgr2vr_w (new_dest, low, new_src1,
+-				  GEN_INT (1 << (i * 2))));
+-  emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest,
+-				  GEN_INT (1 << (i * 2 + 1))));
+-}
+-
+-/* Split FILL.D.  */
+-
+-void
+-loongarch_split_lsx_fill_d (rtx dest, rtx src)
+-{
+-  gcc_assert ((GET_MODE (dest) == V2DImode
+-	       && (GET_MODE (src) == DImode || src == const0_rtx))
+-	      || (GET_MODE (dest) == V2DFmode && GET_MODE (src) == DFmode));
+-
+-  /* Note that low is always from the lower index, and high is always
+-     from the higher index.  */
+-  rtx low, high;
+-  if (src == const0_rtx)
+-    {
+-      low = src;
+-      high = src;
+-    }
+-  else
+-    {
+-      low = loongarch_subword (src, false);
+-      high = loongarch_subword (src, true);
+-    }
+-  rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
+-  emit_insn (gen_lsx_vreplgr2vr_w (new_dest, low));
+-  emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 1)));
+-  emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 3)));
+-}
+-
+-
+ /* Return the appropriate instructions to move SRC into DEST.  Assume
+    that SRC is operand 1 and DEST is operand 0.  */
+ 
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index dc81093e9..2eac11473 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -582,28 +582,11 @@
+ 	  (match_operand 3 "const__operand" "")))]
+   "ISA_HAS_LSX"
+ {
+-  if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode))
+-    return "#";
+-  else
+-    return "vinsgr2vr.\t%w0,%z1,%y3";
++  return "vinsgr2vr.\t%w0,%z1,%y3";
+ }
+   [(set_attr "type" "simd_insert")
+    (set_attr "mode" "")])
+ 
+-(define_split
+-  [(set (match_operand:LSX_D 0 "register_operand")
+-	(vec_merge:LSX_D
+-	  (vec_duplicate:LSX_D
+-	    (match_operand: 1 "_operand"))
+-	  (match_operand:LSX_D 2 "register_operand")
+-	  (match_operand 3 "const__operand")))]
+-  "reload_completed && ISA_HAS_LSX && !TARGET_64BIT"
+-  [(const_int 0)]
+-{
+-  loongarch_split_lsx_insert_d (operands[0], operands[2], operands[3], operands[1]);
+-  DONE;
+-})
+-
+ (define_insn "lsx_vextrins__internal"
+   [(set (match_operand:LSX 0 "register_operand" "=f")
+ 	(vec_merge:LSX
+@@ -653,70 +636,26 @@
+   [(set_attr "type" "simd_copy")
+    (set_attr "mode" "")])
+ 
+-(define_insn_and_split "lsx_vpickve2gr_du"
++(define_insn "lsx_vpickve2gr_du"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+ 	(vec_select:DI
+ 	  (match_operand:V2DI 1 "register_operand" "f")
+ 	  (parallel [(match_operand 2 "const_0_or_1_operand" "")])))]
+   "ISA_HAS_LSX"
+-{
+-  if (TARGET_64BIT)
+-    return "vpickve2gr.du\t%0,%w1,%2";
+-  else
+-    return "#";
+-}
+-  "reload_completed && ISA_HAS_LSX && !TARGET_64BIT"
+-  [(const_int 0)]
+-{
+-  loongarch_split_lsx_copy_d (operands[0], operands[1], operands[2],
+-			      gen_lsx_vpickve2gr_wu);
+-  DONE;
+-}
++  "vpickve2gr.du\t%0,%w1,%2"
+   [(set_attr "type" "simd_copy")
+    (set_attr "mode" "V2DI")])
+ 
+-(define_insn_and_split "lsx_vpickve2gr_"
++(define_insn "lsx_vpickve2gr_"
+   [(set (match_operand: 0 "register_operand" "=r")
+ 	(vec_select:
+ 	  (match_operand:LSX_D 1 "register_operand" "f")
+ 	  (parallel [(match_operand 2 "const__operand" "")])))]
+   "ISA_HAS_LSX"
+-{
+-  if (TARGET_64BIT)
+-    return "vpickve2gr.\t%0,%w1,%2";
+-  else
+-    return "#";
+-}
+-  "reload_completed && ISA_HAS_LSX && !TARGET_64BIT"
+-  [(const_int 0)]
+-{
+-  loongarch_split_lsx_copy_d (operands[0], operands[1], operands[2],
+-			      gen_lsx_vpickve2gr_w);
+-  DONE;
+-}
++  "vpickve2gr.\t%0,%w1,%2"
+   [(set_attr "type" "simd_copy")
+    (set_attr "mode" "")])
+ 
+-
+-(define_expand "abs2"
+-  [(match_operand:ILSX 0 "register_operand" "=f")
+-   (abs:ILSX (match_operand:ILSX 1 "register_operand" "f"))]
+-  "ISA_HAS_LSX"
+-{
+-  if (ISA_HAS_LSX)
+-  {
+-    emit_insn (gen_vabs2 (operands[0], operands[1]));
+-    DONE;
+-  }
+-  else
+-  {
+-    rtx reg = gen_reg_rtx (mode);
+-    emit_move_insn (reg, CONST0_RTX (mode));
+-    emit_insn (gen_lsx_vadda_ (operands[0], operands[1], reg));
+-    DONE;
+-  }
+-})
+-
+ (define_expand "neg2"
+   [(set (match_operand:ILSX 0 "register_operand")
+ 	(neg:ILSX (match_operand:ILSX 1 "register_operand")))]
+@@ -1369,25 +1308,11 @@
+   if (which_alternative == 1)
+     return "vldi.\t%w0,0";
+ 
+-  if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode))
+-    return "#";
+-  else
+-    return "vreplgr2vr.\t%w0,%z1";
++  return "vreplgr2vr.\t%w0,%z1";
+ }
+   [(set_attr "type" "simd_fill")
+    (set_attr "mode" "")])
+ 
+-(define_split
+-  [(set (match_operand:LSX_D 0 "register_operand")
+-	(vec_duplicate:LSX_D
+-	  (match_operand: 1 "register_operand")))]
+-  "reload_completed && ISA_HAS_LSX && !TARGET_64BIT"
+-  [(const_int 0)]
+-{
+-  loongarch_split_lsx_fill_d (operands[0], operands[1]);
+-  DONE;
+-})
+-
+ (define_insn "logb2"
+   [(set (match_operand:FLSX 0 "register_operand" "=f")
+ 	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
+@@ -2428,7 +2353,7 @@
+   [(set_attr "type" "simd_logic")
+    (set_attr "mode" "")])
+ 
+-(define_insn "vabs2"
++(define_insn "abs2"
+   [(set (match_operand:ILSX 0 "register_operand" "=f")
+ 	(abs:ILSX (match_operand:ILSX 1 "register_operand" "f")))]
+   "ISA_HAS_LSX"
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-abs.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-abs.c
+new file mode 100644
+index 000000000..cf971badb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-abs.c
+@@ -0,0 +1,26 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx" } */
++/* { dg-final { scan-assembler-times "vsigncov.w" 1 } } */
++/* { dg-final { scan-assembler-times "vsigncov.d" 1 } } */
++
++int a[4], b[4];
++
++extern int abs (int);
++
++void
++foo1 (void)
++{
++  for (int i = 0; i < 4; i++)
++    a[i] = abs (b[i]);
++}
++
++long la[2], lb[2];
++
++extern long labs (long);
++
++void
++foo2 (void)
++{
++  for (int i = 0; i < 2; i++)
++    la[i] = labs (lb[i]);
++}
+-- 
+2.43.0
+
diff --git a/SME-0059-aarch64-Use-SVE-s-RDVL-instruction.patch b/0162-Backport-SME-aarch64-Use-SVE-s-RDVL-instruction.patch
similarity index 97%
rename from SME-0059-aarch64-Use-SVE-s-RDVL-instruction.patch
rename to 0162-Backport-SME-aarch64-Use-SVE-s-RDVL-instruction.patch
index f4def4e586c097764b8288c9c7210d9fbab302c7..f92df2dfe2aded2e57b743d58f7043160143ab1e 100644
--- a/SME-0059-aarch64-Use-SVE-s-RDVL-instruction.patch
+++ b/0162-Backport-SME-aarch64-Use-SVE-s-RDVL-instruction.patch
@@ -1,7 +1,7 @@
-From 831e12761e08a6d16ad59f454631abe09c24331f Mon Sep 17 00:00:00 2001
+From 46310765c05cde8732e07bfb0df9f0ec25a34018 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:18 +0000
-Subject: [PATCH 059/144] aarch64: Use SVE's RDVL instruction
+Subject: [PATCH 063/157] [Backport][SME] aarch64: Use SVE's RDVL instruction
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=80f47d7bbe38234e1530d27fe5c2f130223ca7a0
 
@@ -78,10 +78,10 @@ index 3ff1a0163..14a568140 100644
  char *aarch64_output_sve_vector_inc_dec (const char *, rtx);
  char *aarch64_output_scalar_simd_mov_immediate (rtx, scalar_int_mode);
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index a438ea6f6..b776ff6e5 100644
+index acb659f53..4194dfc70 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -5303,6 +5303,18 @@ aarch64_fold_sve_cnt_pat (aarch64_svpattern pattern, unsigned int nelts_per_vq)
+@@ -5520,6 +5520,18 @@ aarch64_fold_sve_cnt_pat (aarch64_svpattern pattern, unsigned int nelts_per_vq)
    return -1;
  }
  
@@ -100,7 +100,7 @@ index a438ea6f6..b776ff6e5 100644
  /* Return true if we can move VALUE into a register using a single
     CNT[BHWD] instruction.  */
  
-@@ -5310,11 +5322,7 @@ static bool
+@@ -5527,11 +5539,7 @@ static bool
  aarch64_sve_cnt_immediate_p (poly_int64 value)
  {
    HOST_WIDE_INT factor = value.coeffs[0];
@@ -113,7 +113,7 @@ index a438ea6f6..b776ff6e5 100644
  }
  
  /* Likewise for rtx X.  */
-@@ -5430,6 +5438,50 @@ aarch64_output_sve_scalar_inc_dec (rtx offset)
+@@ -5647,6 +5655,50 @@ aarch64_output_sve_scalar_inc_dec (rtx offset)
  					     -offset_value.coeffs[1], 0);
  }
  
@@ -164,7 +164,7 @@ index a438ea6f6..b776ff6e5 100644
  /* Return true if we can add VALUE to a register using a single ADDVL
     or ADDPL instruction.  */
  
-@@ -6010,13 +6062,13 @@ aarch64_offset_temporaries (bool add_p, poly_int64 offset)
+@@ -6227,13 +6279,13 @@ aarch64_offset_temporaries (bool add_p, poly_int64 offset)
      count += 1;
    else if (factor != 0)
      {
@@ -184,7 +184,7 @@ index a438ea6f6..b776ff6e5 100644
  	 be shifted).  */
        count += 1;
      }
-@@ -6105,85 +6157,100 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
+@@ -6322,85 +6374,100 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
    /* Otherwise use a CNT-based sequence.  */
    else if (factor != 0)
      {
@@ -332,7 +332,7 @@ index a438ea6f6..b776ff6e5 100644
        if (src != const0_rtx)
  	{
  	  val = aarch64_force_temporary (mode, temp1, val);
-@@ -6828,7 +6895,9 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
+@@ -7045,7 +7112,9 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
  	      aarch64_report_sve_required ();
  	      return;
  	    }
@@ -343,7 +343,7 @@ index a438ea6f6..b776ff6e5 100644
  	    emit_insn (gen_rtx_SET (dest, imm));
  	  else
  	    {
-@@ -21519,7 +21588,9 @@ aarch64_mov_operand_p (rtx x, machine_mode mode)
+@@ -21751,7 +21820,9 @@ aarch64_mov_operand_p (rtx x, machine_mode mode)
    if (SYMBOL_REF_P (x) && mode == DImode && CONSTANT_ADDRESS_P (x))
      return true;
  
@@ -355,10 +355,10 @@ index a438ea6f6..b776ff6e5 100644
  
    return aarch64_classify_symbolic_expression (x)
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 3f09b5707..92ce5085a 100644
+index 5d02da42f..c0977a3da 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -1205,6 +1205,7 @@
+@@ -1207,6 +1207,7 @@
       [w, D; neon_move      , simd  ] << aarch64_output_scalar_simd_mov_immediate (operands[1], mode);
       /* The "mov_imm" type for CNT is just a placeholder.  */
       [r, Usv  ; mov_imm        , sve   ] << aarch64_output_sve_cnt_immediate ("cnt", "%x0", operands[1]);
@@ -366,7 +366,7 @@ index 3f09b5707..92ce5085a 100644
       [r, m    ; load_4         , *     ] ldr\t%w0, %1
       [w, m    ; load_4         , *     ] ldr\t%0, %1
       [m, r Z  ; store_4        , *     ] str\\t%w1, %0
-@@ -1263,6 +1264,7 @@
+@@ -1265,6 +1266,7 @@
       [r  , n  ; mov_imm  , *   ,16] #
       /* The "mov_imm" type for CNT is just a placeholder.  */
       [r  , Usv; mov_imm  , sve , 4] << aarch64_output_sve_cnt_immediate ("cnt", "%x0", operands[1]);
@@ -374,7 +374,7 @@ index 3f09b5707..92ce5085a 100644
       [r  , m  ; load_4   , *   , 4] ldr\t%w0, %1
       [w  , m  ; load_4   , fp  , 4] ldr\t%s0, %1
       [m  , r Z; store_4  , *   , 4] str\t%w1, %0
-@@ -1297,6 +1299,7 @@
+@@ -1299,6 +1301,7 @@
       [r, n  ; mov_imm  , *   ,16] #
       /* The "mov_imm" type for CNT is just a placeholder.  */
       [r, Usv; mov_imm  , sve , 4] << aarch64_output_sve_cnt_immediate ("cnt", "%x0", operands[1]);
@@ -788,5 +788,5 @@ index 110947a6c..5de34fc61 100644
  **	...
  **	sub	sp, sp, x12
 -- 
-2.19.1
+2.33.0
 
diff --git a/0162-LoongArch-Set-default-alignment-for-functions-jumps-.patch b/0162-LoongArch-Set-default-alignment-for-functions-jumps-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..42277b06116e5f943dfea33949ce7d73f626bc31
--- /dev/null
+++ b/0162-LoongArch-Set-default-alignment-for-functions-jumps-.patch
@@ -0,0 +1,135 @@
+From 7dff9d3f7fefe074e78cd7ff6529d7c1ea6cc3b1 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Tue, 2 Apr 2024 14:29:08 +0800
+Subject: [PATCH 162/188] LoongArch: Set default alignment for functions jumps
+ and loops [PR112919].
+
+Xi Ruoyao set the alignment rules under LA464 in commit r14-1839,
+but the macro ASM_OUTPUT_ALIGN_WITH_NOP was removed in R14-4674,
+which affected the alignment rules.
+
+So I set different aligns on LA464 and LA664 again to test the
+performance of spec2006, and modify the alignment based on the test
+results.
+
+gcc/ChangeLog:
+
+	PR target/112919
+	* config/loongarch/loongarch-def.cc (la664_align): Newly defined
+	function that sets alignment rules under the LA664 microarchitecture.
+	* config/loongarch/loongarch-opts.cc
+	(loongarch_target_option_override): If not optimizing for size, set
+	the default alignment to what the target wants.
+	* config/loongarch/loongarch-tune.h (struct loongarch_align): Add
+	new member variables jump and loop.
+---
+ gcc/config/loongarch/loongarch-def.cc  | 11 ++++++++---
+ gcc/config/loongarch/loongarch-opts.cc | 19 +++++++++++++------
+ gcc/config/loongarch/loongarch-tune.h  | 22 +++++++++++++++-------
+ 3 files changed, 36 insertions(+), 16 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch-def.cc b/gcc/config/loongarch/loongarch-def.cc
+index 533dd0af2..a48050c5f 100644
+--- a/gcc/config/loongarch/loongarch-def.cc
++++ b/gcc/config/loongarch/loongarch-def.cc
+@@ -81,14 +81,19 @@ array_tune loongarch_cpu_cache =
+ 
+ static inline loongarch_align la464_align ()
+ {
+-  return loongarch_align ().function_ ("32").label_ ("16");
++  return loongarch_align ().function_ ("32").loop_ ("16").jump_ ("16");
++}
++
++static inline loongarch_align la664_align ()
++{
++  return loongarch_align ().function_ ("8").loop_ ("8").jump_ ("32");
+ }
+ 
+ array_tune loongarch_cpu_align =
+   array_tune ()
+-    .set (CPU_LOONGARCH64, la464_align ())
++    .set (CPU_LOONGARCH64, la664_align ())
+     .set (CPU_LA464, la464_align ())
+-    .set (CPU_LA664, la464_align ());
++    .set (CPU_LA664, la664_align ());
+ 
+ /* Default RTX cost initializer.  */
+ loongarch_rtx_cost_data::loongarch_rtx_cost_data ()
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index 062d430c2..c455c5e32 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -922,13 +922,20 @@ loongarch_target_option_override (struct loongarch_target *target,
+ {
+   loongarch_update_gcc_opt_status (target, opts, opts_set);
+ 
+-  /* alignments */
+-  if (opts->x_flag_align_functions && !opts->x_str_align_functions)
+-    opts->x_str_align_functions
+-      = loongarch_cpu_align[target->cpu_tune].function;
++  /* If not optimizing for size, set the default
++     alignment to what the target wants.  */
++  if (!opts->x_optimize_size)
++    {
++      if (opts->x_flag_align_functions && !opts->x_str_align_functions)
++	opts->x_str_align_functions
++	  = loongarch_cpu_align[target->cpu_tune].function;
++
++      if (opts->x_flag_align_loops && !opts->x_str_align_loops)
++	opts->x_str_align_loops = loongarch_cpu_align[target->cpu_tune].loop;
+ 
+-  if (opts->x_flag_align_labels && !opts->x_str_align_labels)
+-    opts->x_str_align_labels = loongarch_cpu_align[target->cpu_tune].label;
++      if (opts->x_flag_align_jumps && !opts->x_str_align_jumps)
++	opts->x_str_align_jumps = loongarch_cpu_align[target->cpu_tune].jump;
++    }
+ 
+   /* Set up parameters to be used in prefetching algorithm.  */
+   int simultaneous_prefetches
+diff --git a/gcc/config/loongarch/loongarch-tune.h b/gcc/config/loongarch/loongarch-tune.h
+index 26f163f0a..d286eee0b 100644
+--- a/gcc/config/loongarch/loongarch-tune.h
++++ b/gcc/config/loongarch/loongarch-tune.h
+@@ -162,14 +162,16 @@ struct loongarch_cache {
+   }
+ };
+ 
+-/* Alignment for functions and labels for best performance.  For new uarchs
+-   the value should be measured via benchmarking.  See the documentation for
+-   -falign-functions and -falign-labels in invoke.texi for the format.  */
++/* Alignment for functions loops and jumps for best performance.  For new
++   uarchs the value should be measured via benchmarking.  See the
++   documentation for -falign-functions, -falign-loops, and -falign-jumps in
++   invoke.texi for the format.  */
+ struct loongarch_align {
+   const char *function;	/* default value for -falign-functions */
+-  const char *label;	/* default value for -falign-labels */
++  const char *loop;	/* default value for -falign-loops */
++  const char *jump;	/* default value for -falign-jumps */
+ 
+-  loongarch_align () : function (nullptr), label (nullptr) {}
++  loongarch_align () : function (nullptr), loop (nullptr), jump (nullptr) {}
+ 
+   loongarch_align function_ (const char *_function)
+   {
+@@ -177,9 +179,15 @@ struct loongarch_align {
+     return *this;
+   }
+ 
+-  loongarch_align label_ (const char *_label)
++  loongarch_align loop_ (const char *_loop)
+   {
+-    label = _label;
++    loop = _loop;
++    return *this;
++  }
++
++  loongarch_align jump_ (const char *_jump)
++  {
++    jump = _jump;
+     return *this;
+   }
+ };
+-- 
+2.43.0
+
diff --git a/SME-0060-aarch64-Make-AARCH64_FL_SVE-requirements-explicit.patch b/0163-Backport-SME-aarch64-Make-AARCH64_FL_SVE-requirement.patch
similarity index 97%
rename from SME-0060-aarch64-Make-AARCH64_FL_SVE-requirements-explicit.patch
rename to 0163-Backport-SME-aarch64-Make-AARCH64_FL_SVE-requirement.patch
index cbf112388e38dae9ff97d48556df232a821b816e..97108d89a61e60e6949fa7697246abdd9a2125c0 100644
--- a/SME-0060-aarch64-Make-AARCH64_FL_SVE-requirements-explicit.patch
+++ b/0163-Backport-SME-aarch64-Make-AARCH64_FL_SVE-requirement.patch
@@ -1,7 +1,8 @@
-From 7d799a4413ff7fc64774c813be426d486fefb934 Mon Sep 17 00:00:00 2001
+From c0badff223a1f5ea5a0f75df72f5d0138d94d8e6 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:19 +0000
-Subject: [PATCH 060/144] aarch64: Make AARCH64_FL_SVE requirements explicit
+Subject: [PATCH 064/157] [Backport][SME] aarch64: Make AARCH64_FL_SVE
+ requirements explicit
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=dd7aaef62a43efe52eece525eea4d7d252b0c148
 
@@ -132,5 +133,5 @@ index 7924cdf0f..dde01f676 100644
  };
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0163-LoongArch-Enable-switchable-target.patch b/0163-LoongArch-Enable-switchable-target.patch
new file mode 100644
index 0000000000000000000000000000000000000000..465ff3b66aef3cb09d3d61fde700a25ac0800e1a
--- /dev/null
+++ b/0163-LoongArch-Enable-switchable-target.patch
@@ -0,0 +1,281 @@
+From 427d5f10951435241d883a13557f862683046ddd Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Mon, 8 Apr 2024 16:45:13 +0800
+Subject: [PATCH 163/188] LoongArch: Enable switchable target
+
+This patch fixes the back-end context switching in cases where functions
+should be built with their own target contexts instead of the
+global one, such as LTO linking and functions with target attributes (TBD).
+
+	PR target/113233
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_reg_init):
+	Reinitialize the loongarch_regno_mode_ok cache.
+	(loongarch_option_override): Same.
+	(loongarch_save_restore_target_globals): Restore target globals.
+	(loongarch_set_current_function): Restore the target contexts
+	for functions.
+	(TARGET_SET_CURRENT_FUNCTION): Define.
+	* config/loongarch/loongarch.h (SWITCHABLE_TARGET): Enable
+	switchable target context.
+	* config/loongarch/loongarch-builtins.cc (loongarch_init_builtins):
+	Initialize all builtin functions at startup.
+	(loongarch_expand_builtin): Turn assertion of builtin availability
+	into a test.
+
+gcc/testsuite/ChangeLog:
+
+	* lib/target-supports.exp: Define condition loongarch_sx_as.
+	* gcc.dg/lto/pr113233_0.c: New test.
+---
+ gcc/config/loongarch/loongarch-builtins.cc | 25 +++---
+ gcc/config/loongarch/loongarch.cc          | 91 ++++++++++++++++++++--
+ gcc/config/loongarch/loongarch.h           |  2 +
+ gcc/testsuite/gcc.dg/lto/pr113233_0.c      | 14 ++++
+ gcc/testsuite/lib/target-supports.exp      | 12 +++
+ 5 files changed, 127 insertions(+), 17 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/lto/pr113233_0.c
+
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index e3b4dbc52..51abba007 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -2507,14 +2507,11 @@ loongarch_init_builtins (void)
+   for (i = 0; i < ARRAY_SIZE (loongarch_builtins); i++)
+     {
+       d = &loongarch_builtins[i];
+-      if (d->avail ())
+-	{
+-	  type = loongarch_build_function_type (d->function_type);
+-	  loongarch_builtin_decls[i]
+-	    = add_builtin_function (d->name, type, i, BUILT_IN_MD, NULL,
+-				    NULL);
+-	  loongarch_get_builtin_decl_index[d->icode] = i;
+-	}
++      type = loongarch_build_function_type (d->function_type);
++      loongarch_builtin_decls[i]
++	= add_builtin_function (d->name, type, i, BUILT_IN_MD, NULL,
++			  NULL);
++      loongarch_get_builtin_decl_index[d->icode] = i;
+     }
+ }
+ 
+@@ -3100,15 +3097,21 @@ loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
+ 			  int ignore ATTRIBUTE_UNUSED)
+ {
+   tree fndecl;
+-  unsigned int fcode, avail;
++  unsigned int fcode;
+   const struct loongarch_builtin_description *d;
+ 
+   fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
+   fcode = DECL_MD_FUNCTION_CODE (fndecl);
+   gcc_assert (fcode < ARRAY_SIZE (loongarch_builtins));
+   d = &loongarch_builtins[fcode];
+-  avail = d->avail ();
+-  gcc_assert (avail != 0);
++
++  if (!d->avail ())
++    {
++      error_at (EXPR_LOCATION (exp),
++		"built-in function %qD is not enabled", fndecl);
++      return target;
++    }
++
+   switch (d->builtin_type)
+     {
+     case LARCH_BUILTIN_DIRECT:
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 8d8a50b70..50ab6a82a 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -7567,15 +7567,19 @@ loongarch_global_init (void)
+ 	loongarch_dwarf_regno[i] = INVALID_REGNUM;
+     }
+ 
++  /* Function to allocate machine-dependent function status.  */
++  init_machine_status = &loongarch_init_machine_status;
++};
++
++static void
++loongarch_reg_init (void)
++{
+   /* Set up loongarch_hard_regno_mode_ok.  */
+   for (int mode = 0; mode < MAX_MACHINE_MODE; mode++)
+     for (int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
+       loongarch_hard_regno_mode_ok_p[mode][regno]
+ 	= loongarch_hard_regno_mode_ok_uncached (regno, (machine_mode) mode);
+-
+-  /* Function to allocate machine-dependent function status.  */
+-  init_machine_status = &loongarch_init_machine_status;
+-};
++}
+ 
+ static void
+ loongarch_option_override_internal (struct loongarch_target *target,
+@@ -7602,20 +7606,92 @@ loongarch_option_override_internal (struct loongarch_target *target,
+ 
+   /* Override some options according to the resolved target.  */
+   loongarch_target_option_override (target, opts, opts_set);
++
++  target_option_default_node = target_option_current_node
++    = build_target_option_node (opts, opts_set);
++
++  loongarch_reg_init ();
++}
++
++/* Remember the last target of loongarch_set_current_function.  */
++
++static GTY(()) tree loongarch_previous_fndecl;
++
++/* Restore or save the TREE_TARGET_GLOBALS from or to new_tree.
++   Used by loongarch_set_current_function to
++   make sure optab availability predicates are recomputed when necessary.  */
++
++static void
++loongarch_save_restore_target_globals (tree new_tree)
++{
++  if (TREE_TARGET_GLOBALS (new_tree))
++    restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
++  else if (new_tree == target_option_default_node)
++    restore_target_globals (&default_target_globals);
++  else
++    TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
++}
++
++/* Implement TARGET_SET_CURRENT_FUNCTION.  */
++
++static void
++loongarch_set_current_function (tree fndecl)
++{
++  if (fndecl == loongarch_previous_fndecl)
++    return;
++
++  tree old_tree;
++  if (loongarch_previous_fndecl == NULL_TREE)
++    old_tree = target_option_current_node;
++  else if (DECL_FUNCTION_SPECIFIC_TARGET (loongarch_previous_fndecl))
++    old_tree = DECL_FUNCTION_SPECIFIC_TARGET (loongarch_previous_fndecl);
++  else
++    old_tree = target_option_default_node;
++
++  if (fndecl == NULL_TREE)
++    {
++      if (old_tree != target_option_current_node)
++	{
++	  loongarch_previous_fndecl = NULL_TREE;
++	  cl_target_option_restore (&global_options, &global_options_set,
++				    TREE_TARGET_OPTION
++				    (target_option_current_node));
++	}
++      return;
++    }
++
++  tree new_tree = DECL_FUNCTION_SPECIFIC_TARGET (fndecl);
++  if (new_tree == NULL_TREE)
++    new_tree = target_option_default_node;
++
++  loongarch_previous_fndecl = fndecl;
++
++  if (new_tree == old_tree)
++    return;
++
++  cl_target_option_restore (&global_options, &global_options_set,
++			    TREE_TARGET_OPTION (new_tree));
++
++  loongarch_reg_init ();
++
++  loongarch_save_restore_target_globals (new_tree);
+ }
+ 
++
++
+ /* Implement TARGET_OPTION_OVERRIDE.  */
+ 
+ static void
+ loongarch_option_override (void)
+ {
++  /* Global initializations.  */
++  loongarch_global_init ();
++
+   /* Setting up the target configuration.  */
+   loongarch_option_override_internal (&la_target,
+ 				      &global_options,
+ 				      &global_options_set);
+ 
+-  /* Global initializations.  */
+-  loongarch_global_init ();
+ }
+ 
+ /* Implement TARGET_OPTION_SAVE.  */
+@@ -10931,6 +11007,9 @@ loongarch_asm_code_end (void)
+ #undef TARGET_OPTION_RESTORE
+ #define TARGET_OPTION_RESTORE loongarch_option_restore
+ 
++#undef TARGET_SET_CURRENT_FUNCTION
++#define TARGET_SET_CURRENT_FUNCTION loongarch_set_current_function
++
+ #undef TARGET_LEGITIMIZE_ADDRESS
+ #define TARGET_LEGITIMIZE_ADDRESS loongarch_legitimize_address
+ 
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 221e8b286..089206605 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ #include "config/loongarch/loongarch-opts.h"
+ 
++#define SWITCHABLE_TARGET 1
++
+ #define TARGET_SUPPORTS_WIDE_INT 1
+ 
+ /* Macros to silence warnings about numbers being signed in traditional
+diff --git a/gcc/testsuite/gcc.dg/lto/pr113233_0.c b/gcc/testsuite/gcc.dg/lto/pr113233_0.c
+new file mode 100644
+index 000000000..0a045c519
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/lto/pr113233_0.c
+@@ -0,0 +1,14 @@
++/* { dg-require-effective-target loongarch_sx_as } */
++/* { dg-lto-do link } */
++/* { dg-skip-if "" { ! { loongarch*-linux-* } } } */
++/* { dg-lto-options { {-mlsx } } } */
++/* { dg-suppress-ld-options { -mlsx } } */
++
++#include 
++
++int main (void)
++{
++  __m128i a, b, c;
++  c = __lsx_vand_v (a, b);
++  return 0;
++} 
+diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
+index 20fbd43ee..b673c92b5 100644
+--- a/gcc/testsuite/lib/target-supports.exp
++++ b/gcc/testsuite/lib/target-supports.exp
+@@ -10549,6 +10549,18 @@ proc check_vect_support_and_set_flags { } {
+     return 1
+ }
+ 
++proc check_effective_target_loongarch_sx_as { } {
++    return [check_no_compiler_messages loongarch_sx_as object {
++        #include 
++        int main (void)
++        {
++          __m128i a, b, c;
++          c = __lsx_vand_v (a, b);
++          return 0;
++        }
++    } "-mlsx"]
++}
++
+ proc check_effective_target_loongarch_sx_hw { } {
+     return [check_runtime loongarch_sx_hw {
+    #include 
+-- 
+2.43.0
+
diff --git a/SME-0061-aarch64-Add-group-suffixes-to-SVE-intrinsics.patch b/0164-Backport-SME-aarch64-Add-group-suffixes-to-SVE-intri.patch
similarity index 99%
rename from SME-0061-aarch64-Add-group-suffixes-to-SVE-intrinsics.patch
rename to 0164-Backport-SME-aarch64-Add-group-suffixes-to-SVE-intri.patch
index a3187ea17c3534b815f8942341111a968b9094d3..44978142a2967debc9eb93c2e7e945c7d4587d84 100644
--- a/SME-0061-aarch64-Add-group-suffixes-to-SVE-intrinsics.patch
+++ b/0164-Backport-SME-aarch64-Add-group-suffixes-to-SVE-intri.patch
@@ -1,7 +1,8 @@
-From b224d967cb261d9b6a1b6bb90eb6ff1a187487b0 Mon Sep 17 00:00:00 2001
+From e99332e15895156632949f3b6c3080fc9d994b13 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:19 +0000
-Subject: [PATCH 061/144] aarch64: Add group suffixes to SVE intrinsics
+Subject: [PATCH 065/157] [Backport][SME] aarch64: Add group suffixes to SVE
+ intrinsics
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=7b607f197967e052d7d7e29f6b41eded18f8c65d
 
@@ -557,5 +558,5 @@ index 824c31cd7..374c57e93 100644
  inline tree
  function_instance::scalar_type (unsigned int i) const
 -- 
-2.19.1
+2.33.0
 
diff --git a/0164-LoongArch-Define-ISA-versions.patch b/0164-LoongArch-Define-ISA-versions.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8994f2d4b17de75aa48628c1322de3c2b775d45f
--- /dev/null
+++ b/0164-LoongArch-Define-ISA-versions.patch
@@ -0,0 +1,1016 @@
+From 66c8369ff9e5987c14786692cf6fd945a94273a1 Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Tue, 23 Apr 2024 10:42:47 +0800
+Subject: [PATCH 164/188] LoongArch: Define ISA versions
+
+These ISA versions are defined as -march= parameters and
+are recommended for building binaries for distribution.
+
+Detailed description of these definitions can be found at
+https://github.com/loongson/la-toolchain-conventions, which
+the LoongArch GCC port aims to conform to.
+
+gcc/ChangeLog:
+
+	* config.gcc: Make la64v1.0 the default ISA preset of the lp64d ABI.
+	* config/loongarch/genopts/loongarch-strings: Define la64v1.0, la64v1.1.
+	* config/loongarch/genopts/loongarch.opt.in: Likewise.
+	* config/loongarch/loongarch-c.cc (LARCH_CPP_SET_PROCESSOR): Likewise.
+	(loongarch_cpu_cpp_builtins): Likewise.
+	* config/loongarch/loongarch-cpu.cc (get_native_prid): Likewise.
+	(fill_native_cpu_config): Likewise.
+	* config/loongarch/loongarch-def.cc (array_tune): Likewise.
+	* config/loongarch/loongarch-def.h: Likewise.
+	* config/loongarch/loongarch-driver.cc (driver_set_m_parm): Likewise.
+	(driver_get_normalized_m_opts): Likewise.
+	* config/loongarch/loongarch-opts.cc (default_tune_for_arch): Likewise.
+	(TUNE_FOR_ARCH): Likewise.
+	(arch_str): Likewise.
+	(loongarch_target_option_override): Likewise.
+	* config/loongarch/loongarch-opts.h (TARGET_uARCH_LA464): Likewise.
+	(TARGET_uARCH_LA664): Likewise.
+	* config/loongarch/loongarch-str.h (STR_CPU_ABI_DEFAULT): Likewise.
+	(STR_ARCH_ABI_DEFAULT): Likewise.
+	(STR_TUNE_GENERIC): Likewise.
+	(STR_ARCH_LA64V1_0): Likewise.
+	(STR_ARCH_LA64V1_1): Likewise.
+	* config/loongarch/loongarch.cc (loongarch_cpu_sched_reassociation_width): Likewise.
+	(loongarch_asm_code_end): Likewise.
+	* config/loongarch/loongarch.opt: Likewise.
+	* doc/invoke.texi: Likewise.
+---
+ gcc/config.gcc                                | 34 ++++----
+ .../loongarch/genopts/loongarch-strings       |  5 +-
+ gcc/config/loongarch/genopts/loongarch.opt.in | 43 ++++++++--
+ gcc/config/loongarch/loongarch-c.cc           | 37 +++------
+ gcc/config/loongarch/loongarch-cpu.cc         | 35 ++++----
+ gcc/config/loongarch/loongarch-def.cc         | 83 +++++++++++++------
+ gcc/config/loongarch/loongarch-def.h          | 37 ++++++---
+ gcc/config/loongarch/loongarch-driver.cc      |  8 +-
+ gcc/config/loongarch/loongarch-opts.cc        | 66 +++++++++++----
+ gcc/config/loongarch/loongarch-opts.h         |  4 +-
+ gcc/config/loongarch/loongarch-str.h          |  5 +-
+ gcc/config/loongarch/loongarch.cc             | 11 +--
+ gcc/config/loongarch/loongarch.opt            | 43 ++++++++--
+ gcc/doc/invoke.texi                           | 57 ++++++++-----
+ 14 files changed, 300 insertions(+), 168 deletions(-)
+
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 1db558d4c..c6820d0f1 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -5035,7 +5035,7 @@ case "${target}" in
+ 
+ 		# Perform initial sanity checks on --with-* options.
+ 		case ${with_arch} in
+-		"" | abi-default | loongarch64 | la[46]64) ;; # OK, append here.
++		"" | la64v1.[01] | abi-default | loongarch64 | la[46]64) ;; # OK, append here.
+ 		native)
+ 			if test x${host} != x${target}; then
+ 				echo "--with-arch=native is illegal for cross-compiler." 1>&2
+@@ -5082,10 +5082,18 @@ case "${target}" in
+ 
+ 		# Infer ISA-related default options from the ABI: pass 1
+ 		case ${abi_base}/${abi_ext} in
+-		lp64*/base)
++		lp64d/base)
+ 			# architectures that support lp64* ABI
+-			arch_pattern="native|abi-default|loongarch64|la[46]64"
+-			# default architecture for lp64* ABI
++			arch_pattern="native|abi-default|la64v1.[01]|loongarch64|la[46]64"
++
++			# default architecture for lp64d ABI
++			arch_default="la64v1.0"
++			;;
++		lp64[fs]/base)
++			# architectures that support lp64* ABI
++			arch_pattern="native|abi-default|la64v1.[01]|loongarch64|la[46]64"
++
++			# default architecture for lp64[fs] ABI
+ 			arch_default="abi-default"
+ 			;;
+ 		*)
+@@ -5157,15 +5165,7 @@ case "${target}" in
+ 
+ 
+ 		# Check default with_tune configuration using with_arch.
+-		case ${with_arch} in
+-		loongarch64)
+-			tune_pattern="native|abi-default|loongarch64|la[46]64"
+-			;;
+-		*)
+-			# By default, $with_tune == $with_arch
+-			tune_pattern="*"
+-			;;
+-		esac
++		tune_pattern="native|generic|loongarch64|la[46]64"
+ 
+ 		case ${with_tune} in
+ 		"") ;; # OK
+@@ -5215,7 +5215,7 @@ case "${target}" in
+ 					# Fixed: use the default gcc configuration for all multilib
+ 					# builds by default.
+ 					with_multilib_default="" ;;
+-				arch,native|arch,loongarch64|arch,la[46]64) # OK, append here.
++				arch,native|arch,la64v1.[01]|arch,loongarch64|arch,la[46]64) # OK, append here.
+ 					with_multilib_default="/march=${component}" ;;
+ 				arch,*)
+ 					with_multilib_default="/march=abi-default"
+@@ -5315,7 +5315,7 @@ case "${target}" in
+ 				if test x${parse_state} = x"arch"; then
+ 					# -march option
+ 					case ${component} in
+-					native | abi-default | loongarch64 | la[46]64) # OK, append here.
++					native | abi-default | la64v1.[01] | loongarch64 | la[46]64) # OK, append here.
+ 						# Append -march spec for each multilib variant.
+ 						loongarch_multilib_list_make="${loongarch_multilib_list_make}/march=${component}"
+ 						parse_state="opts"
+@@ -5858,7 +5858,7 @@ case ${target} in
+ 		# See macro definitions from loongarch-opts.h and loongarch-cpu.h.
+ 
+ 		# Architecture
+-		tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_$(echo ${with_arch} | tr a-z- A-Z_)"
++		tm_defines="${tm_defines} DEFAULT_CPU_ARCH=ARCH_$(echo ${with_arch} | tr a-z.- A-Z__)"
+ 
+ 		# Base ABI type
+ 		tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_$(echo ${abi_base} | tr a-z- A-Z_)"
+@@ -5870,7 +5870,7 @@ case ${target} in
+ 
+ 		# Microarchitecture
+ 		if test x${with_tune} != x; then
+-		  tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_$(echo ${with_tune} | tr a-z- A-Z_)"
++		  tm_defines="${tm_defines} DEFAULT_CPU_TUNE=TUNE_$(echo ${with_tune} | tr a-z.- A-Z__)"
+ 		fi
+ 
+ 		# FPU adjustment
+diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings
+index 99fd4e7cd..fd2f9b4f3 100644
+--- a/gcc/config/loongarch/genopts/loongarch-strings
++++ b/gcc/config/loongarch/genopts/loongarch-strings
+@@ -23,10 +23,13 @@ OPTSTR_ARCH	      arch
+ OPTSTR_TUNE	      tune
+ 
+ STR_CPU_NATIVE	      native
+-STR_CPU_ABI_DEFAULT   abi-default
++STR_ARCH_ABI_DEFAULT  abi-default
++STR_TUNE_GENERIC      generic
+ STR_CPU_LOONGARCH64   loongarch64
+ STR_CPU_LA464	      la464
+ STR_CPU_LA664	      la664
++STR_ARCH_LA64V1_0     la64v1.0
++STR_ARCH_LA64V1_1     la64v1.1
+ 
+ # Base architecture
+ STR_ISA_BASE_LA64 la64
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index f3d53f03c..0ecd10922 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -95,30 +95,55 @@ Enable LoongArch Advanced SIMD Extension (LASX, 256-bit).
+ 
+ ;; Base target models (implies ISA & tune parameters)
+ Enum
+-Name(cpu_type) Type(int)
+-LoongArch CPU types:
++Name(arch_type) Type(int)
++LoongArch ARCH presets:
+ 
+ EnumValue
+-Enum(cpu_type) String(@@STR_CPU_NATIVE@@) Value(CPU_NATIVE)
++Enum(arch_type) String(@@STR_CPU_NATIVE@@) Value(ARCH_NATIVE)
+ 
+ EnumValue
+-Enum(cpu_type) String(@@STR_CPU_ABI_DEFAULT@@) Value(CPU_ABI_DEFAULT)
++Enum(arch_type) String(@@STR_ARCH_ABI_DEFAULT@@) Value(ARCH_ABI_DEFAULT)
+ 
+ EnumValue
+-Enum(cpu_type) String(@@STR_CPU_LOONGARCH64@@) Value(CPU_LOONGARCH64)
++Enum(arch_type) String(@@STR_CPU_LOONGARCH64@@) Value(ARCH_LOONGARCH64)
+ 
+ EnumValue
+-Enum(cpu_type) String(@@STR_CPU_LA464@@) Value(CPU_LA464)
++Enum(arch_type) String(@@STR_CPU_LA464@@) Value(ARCH_LA464)
+ 
+ EnumValue
+-Enum(cpu_type) String(@@STR_CPU_LA664@@) Value(CPU_LA664)
++Enum(arch_type) String(@@STR_CPU_LA664@@) Value(ARCH_LA664)
++
++EnumValue
++Enum(arch_type) String(@@STR_ARCH_LA64V1_0@@) Value(ARCH_LA64V1_0)
++
++EnumValue
++Enum(arch_type) String(@@STR_ARCH_LA64V1_1@@) Value(ARCH_LA64V1_1)
+ 
+ m@@OPTSTR_ARCH@@=
+-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) Save
++Target RejectNegative Joined Enum(arch_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) Save
+ -m@@OPTSTR_ARCH@@=PROCESSOR	Generate code for the given PROCESSOR ISA.
+ 
++Enum
++Name(tune_type) Type(int)
++LoongArch TUNE presets:
++
++EnumValue
++Enum(tune_type) String(@@STR_CPU_NATIVE@@) Value(TUNE_NATIVE)
++
++EnumValue
++Enum(tune_type) String(@@STR_TUNE_GENERIC@@) Value(TUNE_GENERIC)
++
++EnumValue
++Enum(tune_type) String(@@STR_CPU_LOONGARCH64@@) Value(TUNE_LOONGARCH64)
++
++EnumValue
++Enum(tune_type) String(@@STR_CPU_LA464@@) Value(TUNE_LA464)
++
++EnumValue
++Enum(tune_type) String(@@STR_CPU_LA664@@) Value(TUNE_LA664)
++
+ m@@OPTSTR_TUNE@@=
+-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) Save
++Target RejectNegative Joined Enum(tune_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) Save
+ -m@@OPTSTR_TUNE@@=PROCESSOR	Generate optimized code for PROCESSOR.
+ 
+ 
+diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc
+index df2a482ad..153db75b0 100644
+--- a/gcc/config/loongarch/loongarch-c.cc
++++ b/gcc/config/loongarch/loongarch-c.cc
+@@ -31,29 +31,6 @@ along with GCC; see the file COPYING3.  If not see
+ #define builtin_define(TXT) cpp_define (pfile, TXT)
+ #define builtin_assert(TXT) cpp_assert (pfile, TXT)
+ 
+-/* Define preprocessor macros for the -march and -mtune options.
+-   PREFIX is either _LOONGARCH_ARCH or _LOONGARCH_TUNE, INFO is
+-   the selected processor.  If INFO's canonical name is "foo",
+-   define PREFIX to be "foo", and define an additional macro
+-   PREFIX_FOO.  */
+-#define LARCH_CPP_SET_PROCESSOR(PREFIX, CPU_TYPE)			\
+-  do									\
+-    {									\
+-      char *macro, *p;							\
+-      int cpu_type = (CPU_TYPE);					\
+-									\
+-      macro = concat ((PREFIX), "_",					\
+-		      loongarch_cpu_strings[cpu_type], NULL);		\
+-      for (p = macro; *p != 0; p++)					\
+-	*p = TOUPPER (*p);						\
+-									\
+-      builtin_define (macro);						\
+-      builtin_define_with_value ((PREFIX),				\
+-				 loongarch_cpu_strings[cpu_type], 1);	\
+-      free (macro);							\
+-    }									\
+-  while (0)
+-
+ void
+ loongarch_cpu_cpp_builtins (cpp_reader *pfile)
+ {
+@@ -61,11 +38,17 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile)
+   builtin_assert ("cpu=loongarch");
+   builtin_define ("__loongarch__");
+ 
+-  LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", la_target.cpu_arch);
+-  LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", la_target.cpu_tune);
++  builtin_define_with_value ("__loongarch_arch",
++			     loongarch_arch_strings[la_target.cpu_arch], 1);
++
++  builtin_define_with_value ("__loongarch_tune",
++			     loongarch_tune_strings[la_target.cpu_tune], 1);
++
++  builtin_define_with_value ("_LOONGARCH_ARCH",
++			     loongarch_arch_strings[la_target.cpu_arch], 1);
+ 
+-  LARCH_CPP_SET_PROCESSOR ("__loongarch_arch", la_target.cpu_arch);
+-  LARCH_CPP_SET_PROCESSOR ("__loongarch_tune", la_target.cpu_tune);
++  builtin_define_with_value ("_LOONGARCH_TUNE",
++			     loongarch_tune_strings[la_target.cpu_tune], 1);
+ 
+   /* Base architecture / ABI.  */
+   if (TARGET_64BIT)
+diff --git a/gcc/config/loongarch/loongarch-cpu.cc b/gcc/config/loongarch/loongarch-cpu.cc
+index 551d4f72c..eb1eb8011 100644
+--- a/gcc/config/loongarch/loongarch-cpu.cc
++++ b/gcc/config/loongarch/loongarch-cpu.cc
+@@ -62,7 +62,7 @@ cache_cpucfg (void)
+ uint32_t
+ get_native_prid (void)
+ {
+-  /* Fill loongarch_cpu_default_config[CPU_NATIVE] with cpucfg data,
++  /* Fill loongarch_cpu_default_config[ARCH_NATIVE] with cpucfg data,
+      see "Loongson Architecture Reference Manual"
+      (Volume 1, Section 2.2.10.5) */
+   return cpucfg_cache[0];
+@@ -76,13 +76,14 @@ get_native_prid_str (void)
+   return (const char*) prid_str;
+ }
+ 
+-/* Fill property tables for CPU_NATIVE.  */
++/* Fill property tables for ARCH_NATIVE / TUNE_NATIVE.  */
+ void
+ fill_native_cpu_config (struct loongarch_target *tgt)
+ {
+-  int arch_native_p = tgt->cpu_arch == CPU_NATIVE;
+-  int tune_native_p = tgt->cpu_tune == CPU_NATIVE;
+-  int native_cpu_type = CPU_NATIVE;
++  int arch_native_p = tgt->cpu_arch == ARCH_NATIVE;
++  int tune_native_p = tgt->cpu_tune == TUNE_NATIVE;
++  int native_cpu_arch = ARCH_NATIVE;
++  int native_cpu_tune = TUNE_NATIVE;
+ 
+   /* Nothing needs to be done unless "-march/tune=native"
+      is given or implied.  */
+@@ -99,11 +100,13 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+   switch (cpucfg_cache[0] & 0x00ffff00)
+   {
+     case 0x0014c000:   /* LA464 */
+-      native_cpu_type = CPU_LA464;
++      native_cpu_arch = ARCH_LA464;
++      native_cpu_tune = TUNE_LA464;
+       break;
+ 
+     case 0x0014d000:   /* LA664 */
+-      native_cpu_type = CPU_LA664;
++      native_cpu_arch = ARCH_LA664;
++      native_cpu_tune = TUNE_LA664;
+       break;
+ 
+     default:
+@@ -119,7 +122,7 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+   if (arch_native_p)
+     {
+       int tmp;
+-      tgt->cpu_arch = native_cpu_type;
++      tgt->cpu_arch = native_cpu_arch;
+ 
+       auto &preset = loongarch_cpu_default_isa[tgt->cpu_arch];
+ 
+@@ -127,8 +130,8 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+ 	 With: base architecture (ARCH)
+ 	 At:   cpucfg_words[1][1:0] */
+ 
+-      if (native_cpu_type != CPU_NATIVE)
+-	tmp = loongarch_cpu_default_isa[native_cpu_type].base;
++      if (native_cpu_arch != ARCH_NATIVE)
++	tmp = loongarch_cpu_default_isa[native_cpu_arch].base;
+       else
+ 	switch (cpucfg_cache[1] & 0x3)
+ 	  {
+@@ -173,7 +176,7 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+ 	}
+ 
+       /* Check consistency with PRID presets.  */
+-      if (native_cpu_type != CPU_NATIVE && tmp != preset.fpu)
++      if (native_cpu_arch != ARCH_NATIVE && tmp != preset.fpu)
+ 	warning (0, "floating-point unit %qs differs from PRID preset %qs",
+ 		 loongarch_isa_ext_strings[tmp],
+ 		 loongarch_isa_ext_strings[preset.fpu]);
+@@ -182,7 +185,7 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+       preset.fpu = tmp;
+ 
+ 
+-      /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].simd
++      /* Fill: loongarch_cpu_default_isa[ARCH_NATIVE].simd
+ 	 With: SIMD extension type (LSX, LASX)
+ 	 At:   cpucfg_words[2][7:6] */
+ 
+@@ -212,7 +215,7 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+       /* Check consistency with PRID presets.  */
+ 
+       /*
+-      if (native_cpu_type != CPU_NATIVE && tmp != preset.simd)
++      if (native_cpu_arch != ARCH_NATIVE && tmp != preset.simd)
+ 	warning (0, "SIMD extension %qs differs from PRID preset %qs",
+ 		 loongarch_isa_ext_strings[tmp],
+ 		 loongarch_isa_ext_strings[preset.simd]);
+@@ -229,10 +232,10 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+ 	if (cpucfg_cache[entry.cpucfg_word] & entry.cpucfg_bit)
+ 	  hw_isa_evolution |= entry.isa_evolution_bit;
+ 
+-      if (native_cpu_type != CPU_NATIVE)
++      if (native_cpu_arch != ARCH_NATIVE)
+ 	{
+ 	  /* Check if the local CPU really supports the features of the base
+-	     ISA of probed native_cpu_type.  If any feature is not detected,
++	     ISA of probed native_cpu_arch.  If any feature is not detected,
+ 	     either GCC or the hardware is buggy.  */
+ 	  if ((preset.evolution & hw_isa_evolution) != hw_isa_evolution)
+ 	    warning (0,
+@@ -247,7 +250,7 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+ 
+   if (tune_native_p)
+     {
+-      tgt->cpu_tune = native_cpu_type;
++      tgt->cpu_tune = native_cpu_tune;
+ 
+       /* Fill: loongarch_cpu_cache[tgt->cpu_tune]
+ 	 With: cache size info
+diff --git a/gcc/config/loongarch/loongarch-def.cc b/gcc/config/loongarch/loongarch-def.cc
+index a48050c5f..c3f9fc6de 100644
+--- a/gcc/config/loongarch/loongarch-def.cc
++++ b/gcc/config/loongarch/loongarch-def.cc
+@@ -31,39 +31,64 @@ template 
+ using array = loongarch_def_array;
+ 
+ template 
+-using array_tune = array;
++using array_arch = array;
+ 
+ template 
+-using array_arch = array;
++using array_tune = array;
+ 
+-/* CPU property tables.  */
+-array_tune loongarch_cpu_strings = array_tune ()
+-  .set (CPU_NATIVE, STR_CPU_NATIVE)
+-  .set (CPU_ABI_DEFAULT, STR_CPU_ABI_DEFAULT)
+-  .set (CPU_LOONGARCH64, STR_CPU_LOONGARCH64)
+-  .set (CPU_LA464, STR_CPU_LA464)
+-  .set (CPU_LA664, STR_CPU_LA664);
++array_arch loongarch_arch_strings = array_arch ()
++  .set (ARCH_NATIVE, STR_CPU_NATIVE)
++  .set (ARCH_ABI_DEFAULT, STR_ARCH_ABI_DEFAULT)
++  .set (ARCH_LOONGARCH64, STR_CPU_LOONGARCH64)
++  .set (ARCH_LA464, STR_CPU_LA464)
++  .set (ARCH_LA664, STR_CPU_LA664)
++  .set (ARCH_LA64V1_0, STR_ARCH_LA64V1_0)
++  .set (ARCH_LA64V1_1, STR_ARCH_LA64V1_1);
++
++array_tune loongarch_tune_strings = array_tune ()
++  .set (TUNE_NATIVE, STR_CPU_NATIVE)
++  .set (TUNE_GENERIC, STR_TUNE_GENERIC)
++  .set (TUNE_LOONGARCH64, STR_CPU_LOONGARCH64)
++  .set (TUNE_LA464, STR_CPU_LA464)
++  .set (TUNE_LA664, STR_CPU_LA664);
+ 
+ array_arch loongarch_cpu_default_isa =
+   array_arch ()
+-    .set (CPU_LOONGARCH64,
++    .set (ARCH_LOONGARCH64,
+ 	  loongarch_isa ()
+ 	    .base_ (ISA_BASE_LA64)
+ 	    .fpu_ (ISA_EXT_FPU64))
+-    .set (CPU_LA464,
++
++    .set (ARCH_LA464,
+ 	  loongarch_isa ()
+ 	    .base_ (ISA_BASE_LA64)
+ 	    .fpu_ (ISA_EXT_FPU64)
+ 	    .simd_ (ISA_EXT_SIMD_LASX))
+-    .set (CPU_LA664,
++
++    .set (ARCH_LA664,
+ 	  loongarch_isa ()
+ 	    .base_ (ISA_BASE_LA64)
+ 	    .fpu_ (ISA_EXT_FPU64)
+ 	    .simd_ (ISA_EXT_SIMD_LASX)
++	    .evolution_ (OPTION_MASK_ISA_DIV32 | OPTION_MASK_ISA_LD_SEQ_SA
++			 | OPTION_MASK_ISA_LAM_BH | OPTION_MASK_ISA_LAMCAS
++			 | OPTION_MASK_ISA_FRECIPE))
++    .set (ARCH_LA64V1_0,
++	  loongarch_isa ()
++	    .base_ (ISA_BASE_LA64)
++	    .fpu_ (ISA_EXT_FPU64)
++	    .simd_ (ISA_EXT_SIMD_LSX))
++
++    .set (ARCH_LA64V1_1,
++	  loongarch_isa ()
++	    .base_ (ISA_BASE_LA64)
++	    .fpu_ (ISA_EXT_FPU64)
++	    .simd_ (ISA_EXT_SIMD_LSX)
+ 	    .evolution_ (OPTION_MASK_ISA_DIV32 | OPTION_MASK_ISA_LD_SEQ_SA
+ 			 | OPTION_MASK_ISA_LAM_BH | OPTION_MASK_ISA_LAMCAS
+ 			 | OPTION_MASK_ISA_FRECIPE));
+ 
++
+ static inline loongarch_cache la464_cache ()
+ {
+   return loongarch_cache ()
+@@ -75,9 +100,10 @@ static inline loongarch_cache la464_cache ()
+ 
+ array_tune loongarch_cpu_cache =
+   array_tune ()
+-    .set (CPU_LOONGARCH64, la464_cache ())
+-    .set (CPU_LA464, la464_cache ())
+-    .set (CPU_LA664, la464_cache ());
++    .set (TUNE_GENERIC, la464_cache ())
++    .set (TUNE_LOONGARCH64, la464_cache ())
++    .set (TUNE_LA464, la464_cache ())
++    .set (TUNE_LA664, la464_cache ());
+ 
+ static inline loongarch_align la464_align ()
+ {
+@@ -91,9 +117,10 @@ static inline loongarch_align la664_align ()
+ 
+ array_tune loongarch_cpu_align =
+   array_tune ()
+-    .set (CPU_LOONGARCH64, la664_align ())
+-    .set (CPU_LA464, la464_align ())
+-    .set (CPU_LA664, la664_align ());
++    .set (TUNE_GENERIC, la664_align ())
++    .set (TUNE_LOONGARCH64, la664_align ())
++    .set (TUNE_LA464, la464_align ())
++    .set (TUNE_LA664, la664_align ());
+ 
+ /* Default RTX cost initializer.  */
+ loongarch_rtx_cost_data::loongarch_rtx_cost_data ()
+@@ -117,7 +144,7 @@ loongarch_rtx_cost_data::loongarch_rtx_cost_data ()
+  any known "-mtune" type).  */
+ array_tune loongarch_cpu_rtx_cost_data =
+   array_tune ()
+-    .set (CPU_LA664,
++    .set (TUNE_LA664,
+ 	  loongarch_rtx_cost_data ()
+ 	    .movcf2gr_ (COSTS_N_INSNS (1))
+ 	    .movgr2cf_ (COSTS_N_INSNS (1)));
+@@ -140,16 +167,18 @@ const loongarch_rtx_cost_data loongarch_rtx_cost_optimize_size =
+     .movcf2gr_ (COST_COMPLEX_INSN);
+ 
+ array_tune loongarch_cpu_issue_rate = array_tune ()
+-  .set (CPU_NATIVE, 4)
+-  .set (CPU_LOONGARCH64, 4)
+-  .set (CPU_LA464, 4)
+-  .set (CPU_LA664, 6);
++  .set (TUNE_NATIVE, 4)
++  .set (TUNE_GENERIC, 4)
++  .set (TUNE_LOONGARCH64, 4)
++  .set (TUNE_LA464, 4)
++  .set (TUNE_LA664, 6);
+ 
+ array_tune loongarch_cpu_multipass_dfa_lookahead = array_tune ()
+-  .set (CPU_NATIVE, 4)
+-  .set (CPU_LOONGARCH64, 4)
+-  .set (CPU_LA464, 4)
+-  .set (CPU_LA664, 6);
++  .set (TUNE_NATIVE, 4)
++  .set (TUNE_GENERIC, 4)
++  .set (TUNE_LOONGARCH64, 4)
++  .set (TUNE_LA464, 4)
++  .set (TUNE_LA664, 6);
+ 
+ /* Wiring string definitions from loongarch-str.h to global arrays
+    with standard index values from loongarch-opts.h, so we can
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index 2fe44da5a..10b5f9ddc 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -177,21 +177,32 @@ struct loongarch_target
+ {
+   struct loongarch_isa isa;
+   struct loongarch_abi abi;
+-  int cpu_arch;	    /* CPU_ */
+-  int cpu_tune;	    /* same */
++  int cpu_arch;	    /* ARCH_ */
++  int cpu_tune;	    /* TUNE_ */
+   int cmodel;	    /* CMODEL_ */
+   int tls_dialect;  /* TLS_ */
+ };
+ 
+-/* CPU model */
++/* ISA target presets (-march=*) */
+ enum {
+-  CPU_NATIVE	    = 0,
+-  CPU_ABI_DEFAULT   = 1,
+-  CPU_LOONGARCH64   = 2,
+-  CPU_LA464	    = 3,
+-  CPU_LA664	    = 4,
+-  N_ARCH_TYPES	    = 5,
+-  N_TUNE_TYPES	    = 5
++  ARCH_NATIVE       = 0,
++  ARCH_ABI_DEFAULT  = 1,
++  ARCH_LOONGARCH64  = 2,
++  ARCH_LA464	    = 3,
++  ARCH_LA664	    = 4,
++  ARCH_LA64V1_0     = 5,
++  ARCH_LA64V1_1     = 6,
++  N_ARCH_TYPES      = 7,
++};
++
++/* Tune target presets (-mtune=*) */
++enum {
++  TUNE_NATIVE       = 0,
++  TUNE_GENERIC      = 1,
++  TUNE_LOONGARCH64  = 2,
++  TUNE_LA464	    = 3,
++  TUNE_LA664	    = 4,
++  N_TUNE_TYPES      = 5,
+ };
+ 
+ /* TLS types.  */
+@@ -200,9 +211,11 @@ enum {
+   TLS_DESCRIPTORS = 1
+ };
+ 
+-/* CPU model properties */
++/* Target preset properties */
+ extern loongarch_def_array
+-  loongarch_cpu_strings;
++  loongarch_arch_strings;
++extern loongarch_def_array
++  loongarch_tune_strings;
+ extern loongarch_def_array
+   loongarch_cpu_default_isa;
+ extern loongarch_def_array
+diff --git a/gcc/config/loongarch/loongarch-driver.cc b/gcc/config/loongarch/loongarch-driver.cc
+index 8551cf94d..9e0b79994 100644
+--- a/gcc/config/loongarch/loongarch-driver.cc
++++ b/gcc/config/loongarch/loongarch-driver.cc
+@@ -85,10 +85,10 @@ driver_set_m_parm (int argc, const char **argv)
+ 			   loongarch_isa_ext_strings, 0, N_ISA_EXT_TYPES)
+ 
+   LARCH_DRIVER_PARSE_PARM (la_target.cpu_arch, ARCH, \
+-			   loongarch_cpu_strings, 0, N_ARCH_TYPES)
++			   loongarch_arch_strings, 0, N_ARCH_TYPES)
+ 
+   LARCH_DRIVER_PARSE_PARM (la_target.cpu_tune, TUNE, \
+-			   loongarch_cpu_strings, 0, N_TUNE_TYPES)
++			   loongarch_tune_strings, 0, N_TUNE_TYPES)
+ 
+   LARCH_DRIVER_PARSE_PARM (la_target.cmodel, CMODEL, \
+ 			   loongarch_cmodel_strings, 0, N_CMODEL_TYPES)
+@@ -190,7 +190,7 @@ driver_get_normalized_m_opts (int argc, const char **argv ATTRIBUTE_UNUSED)
+   APPEND_VAL (loongarch_abi_base_strings[la_target.abi.base]);
+ 
+   APPEND_OPT (ARCH);
+-  APPEND_VAL (loongarch_cpu_strings[la_target.cpu_arch]);
++  APPEND_VAL (loongarch_arch_strings[la_target.cpu_arch]);
+ 
+   APPEND_OPT (ISA_EXT_FPU);
+   APPEND_VAL (loongarch_isa_ext_strings[la_target.isa.fpu]);
+@@ -202,7 +202,7 @@ driver_get_normalized_m_opts (int argc, const char **argv ATTRIBUTE_UNUSED)
+   APPEND_VAL (loongarch_cmodel_strings[la_target.cmodel]);
+ 
+   APPEND_OPT (TUNE);
+-  APPEND_VAL (loongarch_cpu_strings[la_target.cpu_tune]);
++  APPEND_VAL (loongarch_tune_strings[la_target.cpu_tune]);
+ 
+   obstack_1grow (&opts_obstack, '\0');
+ 
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index c455c5e32..735daeb7c 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -101,6 +101,7 @@ static int abi_compat_p (const struct loongarch_isa *isa,
+ 			 struct loongarch_abi abi);
+ static int abi_default_cpu_arch (struct loongarch_abi abi,
+ 				 struct loongarch_isa *isa);
++static int default_tune_for_arch (int arch, int fallback);
+ 
+ /* Mandatory configure-time defaults.  */
+ #ifndef DEFAULT_ABI_BASE
+@@ -259,35 +260,35 @@ loongarch_config_target (struct loongarch_target *target,
+   /* If cpu_tune is not set using neither -mtune nor --with-tune,
+      the current cpu_arch is used as its default.  */
+   t.cpu_tune = constrained.tune ? target->cpu_tune
+-    : (constrained.arch ? target->cpu_arch :
+-       (with_default_tune ? DEFAULT_CPU_TUNE : DEFAULT_CPU_ARCH));
++    : (constrained.arch
++       ? default_tune_for_arch (target->cpu_arch, with_default_tune
++				? DEFAULT_CPU_TUNE : TUNE_GENERIC)
++       : (with_default_tune ? DEFAULT_CPU_TUNE
++	  : default_tune_for_arch (DEFAULT_CPU_ARCH, TUNE_GENERIC)));
+ 
+ 
+   /* Handle -march/tune=native */
+ #ifdef __loongarch__
+   /* For native compilers, gather local CPU information
+-     and fill the "CPU_NATIVE" index of arrays defined in
+-     loongarch-cpu.c.  */
++     and fill the "ARCH_NATIVE/TUNE_NATIVE" index of arrays
++     defined in loongarch-cpu.c.  */
+ 
+   fill_native_cpu_config (&t);
+ 
+ #else
+-  if (t.cpu_arch == CPU_NATIVE)
++  if (t.cpu_arch == ARCH_NATIVE)
+     fatal_error (UNKNOWN_LOCATION,
+ 		 "%qs does not work on a cross compiler",
+ 		 "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE);
+ 
+-  else if (t.cpu_tune == CPU_NATIVE)
++  else if (t.cpu_tune == TUNE_NATIVE)
+     fatal_error (UNKNOWN_LOCATION,
+ 		 "%qs does not work on a cross compiler",
+ 		 "-m" OPTSTR_TUNE "=" STR_CPU_NATIVE);
+ #endif
+ 
+-  /* Handle -march/tune=abi-default */
+-  if (t.cpu_tune == CPU_ABI_DEFAULT)
+-    t.cpu_tune = abi_default_cpu_arch (t.abi, NULL);
+-
+-  if (t.cpu_arch == CPU_ABI_DEFAULT)
++  /* Handle -march=abi-default */
++  if (t.cpu_arch == ARCH_ABI_DEFAULT)
+     {
+       t.cpu_arch = abi_default_cpu_arch (t.abi, &(t.isa));
+       loongarch_cpu_default_isa[t.cpu_arch] = t.isa;
+@@ -438,16 +439,16 @@ config_target_isa:
+ 	 so we adjust that first if it is not constrained.  */
+       int fallback_arch = abi_default_cpu_arch (t.abi, NULL);
+ 
+-      if (t.cpu_arch == CPU_NATIVE)
++      if (t.cpu_arch == ARCH_NATIVE)
+ 	warning (0, "your native CPU architecture (%qs) "
+ 		 "does not support %qs ABI, falling back to %<-m%s=%s%>",
+ 		 arch_str (&t), abi_str (t.abi), OPTSTR_ARCH,
+-		 loongarch_cpu_strings[fallback_arch]);
++		 loongarch_arch_strings[fallback_arch]);
+       else
+ 	warning (0, "default CPU architecture (%qs) "
+ 		 "does not support %qs ABI, falling back to %<-m%s=%s%>",
+ 		 arch_str (&t), abi_str (t.abi), OPTSTR_ARCH,
+-		 loongarch_cpu_strings[fallback_arch]);
++		 loongarch_arch_strings[fallback_arch]);
+ 
+       t.cpu_arch = fallback_arch;
+       constrained.arch = 1;
+@@ -664,11 +665,40 @@ abi_default_cpu_arch (struct loongarch_abi abi,
+ 	case ABI_BASE_LP64F:
+ 	case ABI_BASE_LP64S:
+ 	  *isa = isa_required (abi);
+-	  return CPU_LOONGARCH64;
++	  return ARCH_LOONGARCH64;
+       }
+   gcc_unreachable ();
+ }
+ 
++static inline int
++default_tune_for_arch (int arch, int fallback)
++{
++  int ret;
++  switch (arch)
++    {
++
++#define TUNE_FOR_ARCH(NAME) \
++    case ARCH_##NAME: \
++      ret = TUNE_##NAME; \
++      break;
++
++    TUNE_FOR_ARCH(NATIVE)
++    TUNE_FOR_ARCH(LOONGARCH64)
++    TUNE_FOR_ARCH(LA464)
++    TUNE_FOR_ARCH(LA664)
++
++#undef TUNE_FOR_ARCH
++
++    case ARCH_ABI_DEFAULT:
++    case ARCH_LA64V1_0:
++    case ARCH_LA64V1_1:
++      ret = fallback;
++    }
++
++  gcc_assert (0 <= ret && ret < N_TUNE_TYPES);
++  return ret;
++}
++
+ static const char*
+ abi_str (struct loongarch_abi abi)
+ {
+@@ -731,7 +761,7 @@ isa_str (const struct loongarch_isa *isa, char separator)
+ static const char*
+ arch_str (const struct loongarch_target *target)
+ {
+-  if (target->cpu_arch == CPU_NATIVE)
++  if (target->cpu_arch == ARCH_NATIVE)
+     {
+       /* Describe a native CPU with unknown PRID.  */
+       const char* isa_string = isa_str (&target->isa, ',');
+@@ -741,7 +771,7 @@ arch_str (const struct loongarch_target *target)
+       APPEND_STRING (isa_string)
+     }
+   else
+-    APPEND_STRING (loongarch_cpu_strings[target->cpu_arch]);
++    APPEND_STRING (loongarch_arch_strings[target->cpu_arch]);
+ 
+   APPEND1 ('\0')
+   return XOBFINISH (&msg_obstack, const char *);
+@@ -956,7 +986,7 @@ loongarch_target_option_override (struct loongarch_target *target,
+   /* Other arch-specific overrides.  */
+   switch (target->cpu_arch)
+     {
+-      case CPU_LA664:
++      case ARCH_LA664:
+ 	/* Enable -mrecipe=all for LA664 by default.  */
+ 	if (!opts_set->x_recip_mask)
+ 	  {
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index a3b467f4c..325c1e29c 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -127,8 +127,8 @@ struct loongarch_flags {
+   (la_target.isa.evolution & OPTION_MASK_ISA_LD_SEQ_SA)
+ 
+ /* TARGET_ macros for use in *.md template conditionals */
+-#define TARGET_uARCH_LA464	  (la_target.cpu_tune == CPU_LA464)
+-#define TARGET_uARCH_LA664	  (la_target.cpu_tune == CPU_LA664)
++#define TARGET_uARCH_LA464	  (la_target.cpu_tune == TUNE_LA464)
++#define TARGET_uARCH_LA664	  (la_target.cpu_tune == TUNE_LA664)
+ 
+ /* Note: optimize_size may vary across functions,
+    while -m[no]-memcpy imposes a global constraint.  */
+diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h
+index cacae38c0..3cbe12f7b 100644
+--- a/gcc/config/loongarch/loongarch-str.h
++++ b/gcc/config/loongarch/loongarch-str.h
+@@ -27,10 +27,13 @@ along with GCC; see the file COPYING3.  If not see
+ #define OPTSTR_TUNE "tune"
+ 
+ #define STR_CPU_NATIVE "native"
+-#define STR_CPU_ABI_DEFAULT "abi-default"
++#define STR_ARCH_ABI_DEFAULT "abi-default"
++#define STR_TUNE_GENERIC "generic"
+ #define STR_CPU_LOONGARCH64 "loongarch64"
+ #define STR_CPU_LA464 "la464"
+ #define STR_CPU_LA664 "la664"
++#define STR_ARCH_LA64V1_0 "la64v1.0"
++#define STR_ARCH_LA64V1_1 "la64v1.1"
+ 
+ #define STR_ISA_BASE_LA64 "la64"
+ 
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 50ab6a82a..c86a0856b 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -9605,9 +9605,10 @@ loongarch_cpu_sched_reassociation_width (struct loongarch_target *target,
+ 
+   switch (target->cpu_tune)
+     {
+-    case CPU_LOONGARCH64:
+-    case CPU_LA464:
+-    case CPU_LA664:
++    case TUNE_GENERIC:
++    case TUNE_LOONGARCH64:
++    case TUNE_LA464:
++    case TUNE_LA664:
+       /* Vector part.  */
+       if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))
+ 	{
+@@ -10976,9 +10977,9 @@ loongarch_asm_code_end (void)
+   if (flag_verbose_asm)
+     {
+       fprintf (asm_out_file, "\n%s CPU: %s\n", ASM_COMMENT_START,
+-	       loongarch_cpu_strings [la_target.cpu_arch]);
++	       loongarch_arch_strings[la_target.cpu_arch]);
+       fprintf (asm_out_file, "%s Tune: %s\n", ASM_COMMENT_START,
+-	       loongarch_cpu_strings [la_target.cpu_tune]);
++	       loongarch_tune_strings[la_target.cpu_tune]);
+       fprintf (asm_out_file, "%s Base ISA: %s\n", ASM_COMMENT_START,
+ 	       loongarch_isa_base_strings [la_target.isa.base]);
+       DUMP_FEATURE (ISA_HAS_FRECIPE);
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 6f730d886..69b3b965c 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -103,30 +103,55 @@ Enable LoongArch Advanced SIMD Extension (LASX, 256-bit).
+ 
+ ;; Base target models (implies ISA & tune parameters)
+ Enum
+-Name(cpu_type) Type(int)
+-LoongArch CPU types:
++Name(arch_type) Type(int)
++LoongArch ARCH presets:
+ 
+ EnumValue
+-Enum(cpu_type) String(native) Value(CPU_NATIVE)
++Enum(arch_type) String(native) Value(ARCH_NATIVE)
+ 
+ EnumValue
+-Enum(cpu_type) String(abi-default) Value(CPU_ABI_DEFAULT)
++Enum(arch_type) String(abi-default) Value(ARCH_ABI_DEFAULT)
+ 
+ EnumValue
+-Enum(cpu_type) String(loongarch64) Value(CPU_LOONGARCH64)
++Enum(arch_type) String(loongarch64) Value(ARCH_LOONGARCH64)
+ 
+ EnumValue
+-Enum(cpu_type) String(la464) Value(CPU_LA464)
++Enum(arch_type) String(la464) Value(ARCH_LA464)
+ 
+ EnumValue
+-Enum(cpu_type) String(la664) Value(CPU_LA664)
++Enum(arch_type) String(la664) Value(ARCH_LA664)
++
++EnumValue
++Enum(arch_type) String(la64v1.0) Value(ARCH_LA64V1_0)
++
++EnumValue
++Enum(arch_type) String(la64v1.1) Value(ARCH_LA64V1_1)
+ 
+ march=
+-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) Save
++Target RejectNegative Joined Enum(arch_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET) Save
+ -march=PROCESSOR	Generate code for the given PROCESSOR ISA.
+ 
++Enum
++Name(tune_type) Type(int)
++LoongArch TUNE presets:
++
++EnumValue
++Enum(tune_type) String(native) Value(TUNE_NATIVE)
++
++EnumValue
++Enum(tune_type) String(generic) Value(TUNE_GENERIC)
++
++EnumValue
++Enum(tune_type) String(loongarch64) Value(TUNE_LOONGARCH64)
++
++EnumValue
++Enum(tune_type) String(la464) Value(TUNE_LA464)
++
++EnumValue
++Enum(tune_type) String(la664) Value(TUNE_LA664)
++
+ mtune=
+-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) Save
++Target RejectNegative Joined Enum(tune_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET) Save
+ -mtune=PROCESSOR	Generate optimized code for PROCESSOR.
+ 
+ 
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index c9a1969ad..f6d59317b 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -999,7 +999,7 @@ Objective-C and Objective-C++ Dialects}.
+ -msign-extend-enabled  -muser-enabled}
+ 
+ @emph{LoongArch Options}
+-@gccoptlist{-march=@var{cpu-type}  -mtune=@var{cpu-type} -mabi=@var{base-abi-type} @gol
++@gccoptlist{-march=@var{arch-type}  -mtune=@var{tune-type} -mabi=@var{base-abi-type} @gol
+ -mfpu=@var{fpu-type} -msoft-float -msingle-float -mdouble-float @gol
+ -mbranch-cost=@var{n}  -mcheck-zero-division -mno-check-zero-division @gol
+ -mcond-move-int  -mno-cond-move-int @gol
+@@ -24455,35 +24455,52 @@ Enable user-defined instructions.
+ These command-line options are defined for LoongArch targets:
+ 
+ @table @gcctabopt
+-@item -march=@var{cpu-type}
+-@opindex -march
+-Generate instructions for the machine type @var{cpu-type}.  In contrast to
+-@option{-mtune=@var{cpu-type}}, which merely tunes the generated code
+-for the specified @var{cpu-type}, @option{-march=@var{cpu-type}} allows GCC
+-to generate code that may not run at all on processors other than the one
+-indicated.  Specifying @option{-march=@var{cpu-type}} implies
+-@option{-mtune=@var{cpu-type}}, except where noted otherwise.
++@opindex march
++@item -march=@var{arch-type}
++Generate instructions for the machine type @var{arch-type}.
++@option{-march=@var{arch-type}} allows GCC to generate code that
++may not run at all on processors other than the one indicated.
+ 
+-The choices for @var{cpu-type} are:
++The choices for @var{arch-type} are:
+ 
+ @table @samp
+ @item native
+-This selects the CPU to generate code for at compilation time by determining
+-the processor type of the compiling machine.  Using @option{-march=native}
+-enables all instruction subsets supported by the local machine (hence
+-the result might not run on different machines).  Using @option{-mtune=native}
+-produces code optimized for the local machine under the constraints
+-of the selected instruction set.
++Local processor type detected by the native compiler.
+ @item loongarch64
+-A generic CPU with 64-bit extensions.
++Generic LoongArch 64-bit processor.
+ @item la464
+-LoongArch LA464 CPU with LBT, LSX, LASX, LVZ.
++LoongArch LA464-based processor with LSX, LASX.
++@item la664
++LoongArch LA664-based processor with LSX, LASX
++and all LoongArch v1.1 instructions.
++@item la64v1.0
++LoongArch64 ISA version 1.0.
++@item la64v1.1
++LoongArch64 ISA version 1.1.
+ @end table
+ 
++More information about LoongArch ISA versions can be found at
++@uref{https://github.com/loongson/la-toolchain-conventions}.
++
+ @item -mtune=@var{cpu-type}
+ @opindex mtune
+-Optimize the output for the given processor, specified by microarchitecture
+-name.
++@item -mtune=@var{tune-type}
++Optimize the generated code for the given processor target.
++
++The choices for @var{tune-type} are:
++
++@table @samp
++@item native
++Local processor type detected by the native compiler.
++@item generic
++Generic LoongArch processor.
++@item loongarch64
++Generic LoongArch 64-bit processor.
++@item la464
++LoongArch LA464 core.
++@item la664
++LoongArch LA664 core.
++@end table
+ 
+ @item -mabi=@var{base-abi-type}
+ @opindex mabi
+-- 
+2.43.0
+
diff --git a/SME-0062-aarch64-Add-sve_type-to-SVE-builtins-code.patch b/0165-Backport-SME-aarch64-Add-sve_type-to-SVE-builtins-co.patch
similarity index 98%
rename from SME-0062-aarch64-Add-sve_type-to-SVE-builtins-code.patch
rename to 0165-Backport-SME-aarch64-Add-sve_type-to-SVE-builtins-co.patch
index 254d151c1849091ca2c409971ca297ad400a2504..5e9a7eb90acbf1b11ab05b32a1a48d18bc6dc6b1 100644
--- a/SME-0062-aarch64-Add-sve_type-to-SVE-builtins-code.patch
+++ b/0165-Backport-SME-aarch64-Add-sve_type-to-SVE-builtins-co.patch
@@ -1,7 +1,8 @@
-From 60861af08f2d9cd341548a13a62e4e1c75823dca Mon Sep 17 00:00:00 2001
+From a32a9321b3336907fe2d17148cb9e4652642a3e6 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:20 +0000
-Subject: [PATCH 062/144] aarch64: Add sve_type to SVE builtins code
+Subject: [PATCH 066/157] [Backport][SME] aarch64: Add sve_type to SVE builtins
+ code
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=7f6de9861e5d7745a0af5174582519a39d545a92
 
@@ -225,5 +226,5 @@ index 374c57e93..f4f2c415f 100644
  		    type_suffix_index = NUM_TYPE_SUFFIXES,
  		    type_suffix_index = NUM_TYPE_SUFFIXES,
 -- 
-2.19.1
+2.33.0
 
diff --git a/0165-LoongArch-Define-builtin-macros-for-ISA-evolutions.patch b/0165-LoongArch-Define-builtin-macros-for-ISA-evolutions.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f79b712ef1142808c2caac76153e90e3ff2e2ba1
--- /dev/null
+++ b/0165-LoongArch-Define-builtin-macros-for-ISA-evolutions.patch
@@ -0,0 +1,678 @@
+From 9af73fb7213d5c10b3683465e6682ad20f5abe64 Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Tue, 23 Apr 2024 10:42:48 +0800
+Subject: [PATCH 165/188] LoongArch: Define builtin macros for ISA evolutions
+
+Detailed description of these definitions can be found at
+https://github.com/loongson/la-toolchain-conventions, which
+the LoongArch GCC port aims to conform to.
+
+gcc/ChangeLog:
+
+	* config.gcc: Add loongarch-evolution.o.
+	* config/loongarch/genopts/genstr.sh: Enable generation of
+	loongarch-evolution.[cc,h].
+	* config/loongarch/t-loongarch: Likewise.
+	* config/loongarch/genopts/gen-evolution.awk: New file.
+	* config/loongarch/genopts/isa-evolution.in: Mark ISA version
+	of introduction for each ISA evolution feature.
+	* config/loongarch/loongarch-c.cc (loongarch_cpu_cpp_builtins):
+	Define builtin macros for enabled ISA evolutions and the ISA
+	version.
+	* config/loongarch/loongarch-cpu.cc: Use loongarch-evolution.h.
+	* config/loongarch/loongarch.h: Likewise.
+	* config/loongarch/loongarch-cpucfg-map.h: Delete.
+	* config/loongarch/loongarch-evolution.cc: New file.
+	* config/loongarch/loongarch-evolution.h: New file.
+	* config/loongarch/loongarch-opts.h (ISA_HAS_FRECIPE): Define.
+	(ISA_HAS_DIV32): Likewise.
+	(ISA_HAS_LAM_BH): Likewise.
+	(ISA_HAS_LAMCAS): Likewise.
+	(ISA_HAS_LD_SEQ_SA): Likewise.
+---
+ gcc/config.gcc                                |   2 +-
+ .../loongarch/genopts/gen-evolution.awk       | 230 ++++++++++++++++++
+ gcc/config/loongarch/genopts/genstr.sh        |  82 ++-----
+ gcc/config/loongarch/genopts/isa-evolution.in |  10 +-
+ gcc/config/loongarch/loongarch-c.cc           |  23 ++
+ gcc/config/loongarch/loongarch-cpu.cc         |   2 +-
+ gcc/config/loongarch/loongarch-evolution.cc   |  60 +++++
+ ...rch-cpucfg-map.h => loongarch-evolution.h} |  46 +++-
+ gcc/config/loongarch/loongarch-opts.h         |  11 -
+ gcc/config/loongarch/loongarch.h              |   1 +
+ gcc/config/loongarch/t-loongarch              |  26 +-
+ 11 files changed, 398 insertions(+), 95 deletions(-)
+ create mode 100644 gcc/config/loongarch/genopts/gen-evolution.awk
+ create mode 100644 gcc/config/loongarch/loongarch-evolution.cc
+ rename gcc/config/loongarch/{loongarch-cpucfg-map.h => loongarch-evolution.h} (52%)
+
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index c6820d0f1..a405e6d2e 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -458,7 +458,7 @@ loongarch*-*-*)
+ 	cpu_type=loongarch
+ 	d_target_objs="loongarch-d.o"
+ 	extra_headers="larchintrin.h lsxintrin.h lasxintrin.h"
+-	extra_objs="loongarch-c.o loongarch-builtins.o loongarch-cpu.o loongarch-opts.o loongarch-def.o"
++	extra_objs="loongarch-c.o loongarch-builtins.o loongarch-cpu.o loongarch-opts.o loongarch-def.o loongarch-evolution.o"
+ 	extra_gcc_objs="loongarch-driver.o loongarch-cpu.o loongarch-opts.o loongarch-def.o"
+ 	extra_options="${extra_options} g.opt fused-madd.opt"
+ 	;;
+diff --git a/gcc/config/loongarch/genopts/gen-evolution.awk b/gcc/config/loongarch/genopts/gen-evolution.awk
+new file mode 100644
+index 000000000..4d105afa9
+--- /dev/null
++++ b/gcc/config/loongarch/genopts/gen-evolution.awk
+@@ -0,0 +1,230 @@
++#!/usr/bin/gawk
++#
++# A simple script that generates loongarch-evolution.h
++# from genopts/isa-evolution.in
++#
++# Copyright (C) 2021-2024 Free Software Foundation, Inc.
++#
++# This file is part of GCC.
++#
++# GCC is free software; you can redistribute it and/or modify it under
++# the terms of the GNU General Public License as published by the Free
++# Software Foundation; either version 3, or (at your option) any later
++# version.
++#
++# GCC is distributed in the hope that it will be useful, but WITHOUT
++# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++# or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
++# License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3.  If not see
++# .
++
++BEGIN {
++    # isa_version_major[]
++    # isa_version_minor[]
++    # cpucfg_word[]
++    # cpucfg_bit_in_word[]
++    # name_capitalized[]
++    # comment[]
++}
++
++{
++    cpucfg_word[NR] = $1
++    cpucfg_bit_in_word[NR] = $2
++    name[NR] = gensub(/-/, "_", "g", $3)
++    name_capitalized[NR] = toupper(name[NR])
++    isa_version_major[NR] = gensub(/^([1-9][0-9]*)\.([0-9]+)$/, "\\1", 1, $4)
++    isa_version_minor[NR] = gensub(/^([1-9][0-9]*)\.([0-9]+)$/, "\\2", 1, $4)
++
++    $1 = $2 = $3 = $4 = ""
++    sub (/^\s*/, "")
++    comment[NR] = $0
++}
++
++function copyright_header(from_year,to_year)
++{
++    print "   Copyright (C) " from_year "-" to_year \
++          " Free Software Foundation, Inc."
++    print ""
++    print "This file is part of GCC."
++    print ""
++    print "GCC is free software; you can redistribute it and/or modify"
++    print "it under the terms of the GNU General Public License as published by"
++    print "the Free Software Foundation; either version 3, or (at your option)"
++    print "any later version."
++    print ""
++    print "GCC is distributed in the hope that it will be useful,"
++    print "but WITHOUT ANY WARRANTY; without even the implied warranty of"
++    print "MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the"
++    print "GNU General Public License for more details."
++    print ""
++    print "You should have received a copy of the GNU General Public License"
++    print "along with GCC; see the file COPYING3.  If not see"
++    print "."
++}
++
++function gen_cpucfg_map()
++{
++    print "static constexpr struct {"
++    print "  int cpucfg_word;"
++    print "  unsigned int cpucfg_bit;"
++    print "  HOST_WIDE_INT isa_evolution_bit;"
++    print "} cpucfg_map[] = {"
++
++    for (i = 1; i <= NR; i++)
++    printf ("  { %d, 1u << %d, OPTION_MASK_ISA_%s },\n",
++            cpucfg_word[i], cpucfg_bit_in_word[i], name_capitalized[i])
++
++    print "};"
++}
++
++function gen_cpucfg_useful_idx()
++{
++    split("0 1 2 16 17 18 19", init_useful_idx)
++
++    delete idx_bucket
++
++    for (i in init_useful_idx)
++        idx_bucket[init_useful_idx[i]] = 1
++    delete init_useful_idx
++
++    for (i in cpucfg_word)
++        idx_bucket[cpucfg_word[i]] = 1
++
++    delete idx_list
++    for (i in idx_bucket)
++        idx_list[length(idx_list)-1] = i+0
++    delete idx_bucket
++
++    asort (idx_list)
++
++    print "static constexpr int cpucfg_useful_idx[] = {"
++    for (i in idx_list)
++        printf("  %d,\n", idx_list[i])
++    print "};"
++
++    print ""
++
++    printf ("static constexpr int N_CPUCFG_WORDS = %d;\n",
++            idx_list[length(idx_list)] + 1)
++
++    delete idx_list
++}
++
++function gen_evolution_decl()
++{
++    print "/* ISA evolution features */"
++    print "enum {"
++
++    for (i = 1; i <= NR; i++)
++    print "  EVO_" name_capitalized[i] " = " i - 1 ","
++
++    print "  N_EVO_FEATURES = " NR
++    print "};"
++    print ""
++
++    print "/* Condition macros */"
++    for (i = 1; i <= NR; i++)
++    printf ("#define ISA_HAS_%s \\\n" \
++            "  (la_target.isa.evolution & OPTION_MASK_ISA_%s)\n",
++            name_capitalized[i], name_capitalized[i])
++    print ""
++
++    print "/* Bitmasks on la_target.isa.evolution.  */"
++    print "extern int la_evo_feature_masks[N_EVO_FEATURES];"
++    print ""
++    print "/* Builtin macro names for the evolution features.  */"
++    print "extern const char* la_evo_macro_name[N_EVO_FEATURES];"
++    print ""
++    print "/* The ISA version where a specific feature is introduced.  */"
++    print "extern int la_evo_version_major[N_EVO_FEATURES];"
++    print "extern int la_evo_version_minor[N_EVO_FEATURES];"
++}
++
++function gen_full_header()
++{
++    print "/* Generated automatically by \"genstr\" from \"isa-evolution.in\"."
++    print "   Please do not edit this file directly."
++    print ""
++
++    copyright_header(2023, 2024)
++
++    print "*/"
++    print ""
++
++    print "#ifndef LOONGARCH_EVOLUTION_H"
++    print "#define LOONGARCH_EVOLUTION_H"
++    print ""
++    print "#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)"
++    print ""
++    print "#include \"options.h\""
++    print ""
++
++    gen_cpucfg_map()
++
++    print ""
++
++    gen_cpucfg_useful_idx()
++
++    print ""
++
++    gen_evolution_decl()
++
++    print ""
++    print "#endif"
++    print ""
++    print "#endif /* LOONGARCH_EVOLUTION_H */"
++}
++
++
++function gen_full_source()
++{
++    print "/* Generated automatically by \"genstr\" from \"isa-evolution.in\"."
++    print "   Please do not edit this file directly."
++    print ""
++
++    copyright_header(2023, 2024)
++
++    print "*/"
++    print ""
++    print "#include \"config.h\""
++    print "#include \"system.h\""
++    print "#include \"coretypes.h\""
++    print "#include \"options.h\""
++    print ""
++    print "#include \"loongarch-evolution.h\""
++    print ""
++
++    print "int la_evo_feature_masks[] = {";
++    for (i = 1; i <= NR; i++)
++    print "  OPTION_MASK_ISA_" name_capitalized[i] ","
++    print "};"
++    print ""
++
++    print "const char* la_evo_macro_name[] = {";
++    for (i = 1; i <= NR; i++)
++    print "  \"__loongarch_" name[i] "\","
++    print "};"
++    print ""
++
++
++    print "int la_evo_version_major[] = {"
++    for (i = 1; i <= NR; i++)
++    print "  " isa_version_major[i] ",    /* " name_capitalized[i] " */"
++    print "};"
++    print ""
++
++    print "int la_evo_version_minor[] = {"
++    for (i = 1; i <= NR; i++)
++    print "  " isa_version_minor[i] ",    /* " name_capitalized[i] " */"
++    print "};"
++}
++
++END {
++    if (header_p)
++        gen_full_header()
++    else
++        gen_full_source()
++}
+diff --git a/gcc/config/loongarch/genopts/genstr.sh b/gcc/config/loongarch/genopts/genstr.sh
+index 391eca121..3e86c8152 100755
+--- a/gcc/config/loongarch/genopts/genstr.sh
++++ b/gcc/config/loongarch/genopts/genstr.sh
+@@ -108,78 +108,30 @@ EOF
+       print("m"$3)
+       gsub(/-/, "_", $3)
+       print("Target Mask(ISA_"toupper($3)") Var(la_isa_evolution)")
+-      $1=""; $2=""; $3=""
++      $1=""; $2=""; $3=""; $4=""
+       sub(/^ */, "", $0)
+       print($0)
+     }' isa-evolution.in
+ }
+ 
+-gen_cpucfg_map() {
+-    cat <.  */
+-
+-#ifndef LOONGARCH_CPUCFG_MAP_H
+-#define LOONGARCH_CPUCFG_MAP_H
+-
+-#include "options.h"
+-
+-static constexpr struct {
+-  int cpucfg_word;
+-  unsigned int cpucfg_bit;
+-  HOST_WIDE_INT isa_evolution_bit;
+-} cpucfg_map[] = {
+-EOF
+-
+-    # Generate the strings from isa-evolution.in.
+-    awk '{
+-      gsub(/-/, "_", $3)
+-      print("  { "$1", 1u << "$2", OPTION_MASK_ISA_"toupper($3)" },")
+-    }' isa-evolution.in
+-
+-    echo "};"
+-    echo
+-    echo "static constexpr int cpucfg_useful_idx[] = {"
+-
+-    awk 'BEGIN { print("  0,\n  1,\n  2,\n  16,\n  17,\n  18,\n  19,") }
+-    {if ($1+0 > max+0) max=$1; print("  "$1",")}' \
+-   isa-evolution.in | sort -n | uniq
+-
+-    echo "};"
+-    echo ""
+-
+-    awk 'BEGIN { max=19 }
+-    { if ($1+0 > max+0) max=$1 }
+-    END { print "static constexpr int N_CPUCFG_WORDS = "1+max";" }' \
+-   isa-evolution.in
+-
+-    echo "#endif /* LOONGARCH_CPUCFG_MAP_H */"
+-}
+-
+ main() {
+     case "$1" in
+-    cpucfg-map) gen_cpucfg_map;;
+-	header) gen_defines;;
+-	opt) gen_options;;
+-    *) echo "Unknown Command: \"$1\". Available: cpucfg-map, header, opt"; exit 1;;
++   evolution_h)
++            awk -v header_p=1 -f gen-evolution.awk isa-evolution.in
++            ;;
++   evolution_c)
++            awk -v header_p=0 -f gen-evolution.awk isa-evolution.in
++            ;;
++   header)
++            gen_defines
++            ;;
++   opt)
++            gen_options
++            ;;
++   *)
++            echo "Unknown Command: \"$1\". Available: header, opt, evolution_h, evolution_c"
++            exit 1
++            ;;
+     esac
+ }
+ 
+diff --git a/gcc/config/loongarch/genopts/isa-evolution.in b/gcc/config/loongarch/genopts/isa-evolution.in
+index 11a198b64..50f72d5a0 100644
+--- a/gcc/config/loongarch/genopts/isa-evolution.in
++++ b/gcc/config/loongarch/genopts/isa-evolution.in
+@@ -1,5 +1,5 @@
+-2	25	frecipe		Support frecipe.{s/d} and frsqrte.{s/d} instructions.
+-2	26	div32		Support div.w[u] and mod.w[u] instructions with inputs not sign-extended.
+-2	27	lam-bh		Support am{swap/add}[_db].{b/h} instructions.
+-2	28	lamcas		Support amcas[_db].{b/h/w/d} instructions.
+-3	23	ld-seq-sa	Do not need load-load barriers (dbar 0x700).
++2	25	frecipe		1.1		Support frecipe.{s/d} and frsqrte.{s/d} instructions.
++2	26	div32		1.1		Support div.w[u] and mod.w[u] instructions with inputs not sign-extended.
++2	27	lam-bh		1.1		Support am{swap/add}[_db].{b/h} instructions.
++2	28	lamcas		1.1		Support amcas[_db].{b/h/w/d} instructions.
++3	23	ld-seq-sa	1.1		Do not need load-load barriers (dbar 0x700).
+diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc
+index 153db75b0..4ecea6a45 100644
+--- a/gcc/config/loongarch/loongarch-c.cc
++++ b/gcc/config/loongarch/loongarch-c.cc
+@@ -103,6 +103,29 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile)
+       builtin_define ("__loongarch_simd_width=256");
+     }
+ 
++  /* ISA evolution features */
++  int max_v_major = 1, max_v_minor = 0;
++ 
++  for (int i = 0; i < N_EVO_FEATURES; i++)
++    if (la_target.isa.evolution & la_evo_feature_masks[i])
++      {
++   builtin_define (la_evo_macro_name[i]);
++ 
++   int major = la_evo_version_major[i],
++       minor = la_evo_version_minor[i];
++ 
++   max_v_major = major > max_v_major ? major : max_v_major;
++   max_v_minor = major == max_v_major
++     ? (minor > max_v_minor ? minor : max_v_minor): max_v_minor;
++      }
++ 
++  /* Find the minimum ISA version required to run the target program.  */
++  if (!(max_v_major == 1 && max_v_minor <= 1 && ISA_HAS_LASX))
++    {
++      builtin_define_with_int_value ("__loongarch_version_major", max_v_major);
++      builtin_define_with_int_value ("__loongarch_version_minor", max_v_minor);
++    }
++
+   /* Native Data Sizes.  */
+   builtin_define_with_int_value ("_LOONGARCH_SZINT", INT_TYPE_SIZE);
+   builtin_define_with_int_value ("_LOONGARCH_SZLONG", LONG_TYPE_SIZE);
+diff --git a/gcc/config/loongarch/loongarch-cpu.cc b/gcc/config/loongarch/loongarch-cpu.cc
+index eb1eb8011..49107f2ae 100644
+--- a/gcc/config/loongarch/loongarch-cpu.cc
++++ b/gcc/config/loongarch/loongarch-cpu.cc
+@@ -28,8 +28,8 @@ along with GCC; see the file COPYING3.  If not see
+ #include "loongarch-def.h"
+ #include "loongarch-opts.h"
+ #include "loongarch-cpu.h"
+-#include "loongarch-cpucfg-map.h"
+ #include "loongarch-str.h"
++#include "loongarch-evolution.h"
+ 
+ 
+ /* Native CPU detection with "cpucfg" */
+diff --git a/gcc/config/loongarch/loongarch-evolution.cc b/gcc/config/loongarch/loongarch-evolution.cc
+new file mode 100644
+index 000000000..1fb4e3b01
+--- /dev/null
++++ b/gcc/config/loongarch/loongarch-evolution.cc
+@@ -0,0 +1,60 @@
++/* Generated automatically by "genstr" from "isa-evolution.in".
++   Please do not edit this file directly.
++
++   Copyright (C) 2023-2024 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.
++*/
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "options.h"
++
++#include "loongarch-evolution.h"
++
++int la_evo_feature_masks[] = {
++  OPTION_MASK_ISA_FRECIPE,
++  OPTION_MASK_ISA_DIV32,
++  OPTION_MASK_ISA_LAM_BH,
++  OPTION_MASK_ISA_LAMCAS,
++  OPTION_MASK_ISA_LD_SEQ_SA,
++};
++
++const char* la_evo_macro_name[] = {
++  "__loongarch_frecipe",
++  "__loongarch_div32",
++  "__loongarch_lam_bh",
++  "__loongarch_lamcas",
++  "__loongarch_ld_seq_sa",
++};
++
++int la_evo_version_major[] = {
++  1,    /* FRECIPE */
++  1,    /* DIV32 */
++  1,    /* LAM_BH */
++  1,    /* LAMCAS */
++  1,    /* LD_SEQ_SA */
++};
++
++int la_evo_version_minor[] = {
++  1,    /* FRECIPE */
++  1,    /* DIV32 */
++  1,    /* LAM_BH */
++  1,    /* LAMCAS */
++  1,    /* LD_SEQ_SA */
++};
+diff --git a/gcc/config/loongarch/loongarch-cpucfg-map.h b/gcc/config/loongarch/loongarch-evolution.h
+similarity index 52%
+rename from gcc/config/loongarch/loongarch-cpucfg-map.h
+rename to gcc/config/loongarch/loongarch-evolution.h
+index 148333c24..d64996481 100644
+--- a/gcc/config/loongarch/loongarch-cpucfg-map.h
++++ b/gcc/config/loongarch/loongarch-evolution.h
+@@ -17,10 +17,13 @@ GNU General Public License for more details.
+ 
+ You should have received a copy of the GNU General Public License
+ along with GCC; see the file COPYING3.  If not see
+-.  */
++.
++*/
+ 
+-#ifndef LOONGARCH_CPUCFG_MAP_H
+-#define LOONGARCH_CPUCFG_MAP_H
++#ifndef LOONGARCH_EVOLUTION_H
++#define LOONGARCH_EVOLUTION_H
++
++#if !defined(IN_LIBGCC2) && !defined(IN_TARGET_LIBS) && !defined(IN_RTS)
+ 
+ #include "options.h"
+ 
+@@ -48,4 +51,39 @@ static constexpr int cpucfg_useful_idx[] = {
+ };
+ 
+ static constexpr int N_CPUCFG_WORDS = 20;
+-#endif /* LOONGARCH_CPUCFG_MAP_H */
++
++/* ISA evolution features */
++enum {
++  EVO_FRECIPE = 0,
++  EVO_DIV32 = 1,
++  EVO_LAM_BH = 2,
++  EVO_LAMCAS = 3,
++  EVO_LD_SEQ_SA = 4,
++  N_EVO_FEATURES = 5
++};
++
++/* Condition macros */
++#define ISA_HAS_FRECIPE \
++  (la_target.isa.evolution & OPTION_MASK_ISA_FRECIPE)
++#define ISA_HAS_DIV32 \
++  (la_target.isa.evolution & OPTION_MASK_ISA_DIV32)
++#define ISA_HAS_LAM_BH \
++  (la_target.isa.evolution & OPTION_MASK_ISA_LAM_BH)
++#define ISA_HAS_LAMCAS \
++  (la_target.isa.evolution & OPTION_MASK_ISA_LAMCAS)
++#define ISA_HAS_LD_SEQ_SA \
++  (la_target.isa.evolution & OPTION_MASK_ISA_LD_SEQ_SA)
++
++/* Bitmasks on la_target.isa.evolution.  */
++extern int la_evo_feature_masks[N_EVO_FEATURES];
++
++/* Builtin macro names for the evolution features.  */
++extern const char* la_evo_macro_name[N_EVO_FEATURES];
++
++/* The ISA version where a specific feature is introduced.  */
++extern int la_evo_version_major[N_EVO_FEATURES];
++extern int la_evo_version_minor[N_EVO_FEATURES];
++
++#endif
++
++#endif /* LOONGARCH_EVOLUTION_H */
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index 325c1e29c..19bae5a0b 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -115,17 +115,6 @@ struct loongarch_flags {
+ #define ISA_HAS_LASX \
+   (la_target.isa.simd == ISA_EXT_SIMD_LASX)
+ 
+-#define ISA_HAS_FRECIPE \
+-  (la_target.isa.evolution & OPTION_MASK_ISA_FRECIPE)
+-#define ISA_HAS_DIV32 \
+-  (la_target.isa.evolution & OPTION_MASK_ISA_DIV32)
+-#define ISA_HAS_LAM_BH \
+-  (la_target.isa.evolution & OPTION_MASK_ISA_LAM_BH)
+-#define ISA_HAS_LAMCAS \
+-  (la_target.isa.evolution & OPTION_MASK_ISA_LAMCAS)
+-#define ISA_HAS_LD_SEQ_SA \
+-  (la_target.isa.evolution & OPTION_MASK_ISA_LD_SEQ_SA)
+-
+ /* TARGET_ macros for use in *.md template conditionals */
+ #define TARGET_uARCH_LA464	  (la_target.cpu_tune == TUNE_LA464)
+ #define TARGET_uARCH_LA664	  (la_target.cpu_tune == TUNE_LA664)
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 089206605..6743d2684 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -22,6 +22,7 @@ along with GCC; see the file COPYING3.  If not see
+ /* LoongArch external variables defined in loongarch.cc.  */
+ 
+ #include "config/loongarch/loongarch-opts.h"
++#include "config/loongarch/loongarch-evolution.h"
+ 
+ #define SWITCHABLE_TARGET 1
+ 
+diff --git a/gcc/config/loongarch/t-loongarch b/gcc/config/loongarch/t-loongarch
+index 488e8cff3..53dde9ce6 100644
+--- a/gcc/config/loongarch/t-loongarch
++++ b/gcc/config/loongarch/t-loongarch
+@@ -21,7 +21,7 @@ GTM_H += loongarch-multilib.h
+ OPTIONS_H_EXTRA += $(srcdir)/config/loongarch/loongarch-def.h	    \
+ 		   $(srcdir)/config/loongarch/loongarch-def-array.h \
+ 		   $(srcdir)/config/loongarch/loongarch-tune.h	    \
+-		   $(srcdir)/config/loongarch/loongarch-cpucfg-map.h
++		   $(srcdir)/config/loongarch/loongarch-evolution.h
+ 
+ # Canonical target triplet from config.gcc
+ LA_MULTIARCH_TRIPLET = $(patsubst LA_MULTIARCH_TRIPLET=%,%,$\
+@@ -62,7 +62,11 @@ loongarch-opts.o: $(srcdir)/config/loongarch/loongarch-opts.cc $(LA_STR_H)
+ 	$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+ 
+ loongarch-cpu.o: $(srcdir)/config/loongarch/loongarch-cpu.cc $(LA_STR_H) \
+-		 $(srcdir)/config/loongarch/loongarch-cpucfg-map.h
++		 $(srcdir)/config/loongarch/loongarch-evolution.h
++	$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
++
++loongarch-evolution.o: $(srcdir)/config/loongarch/loongarch-evolution.cc $(LA_STR_H) \
++	$(srcdir)/config/loongarch/loongarch-evolution.h
+ 	$(COMPILER) -c $(ALL_COMPILERFLAGS) $(ALL_CPPFLAGS) $(INCLUDES) $<
+ 
+ loongarch-def.o: $(srcdir)/config/loongarch/loongarch-def.cc $(LA_STR_H)
+@@ -84,11 +88,17 @@ s-loongarch-opt: $(srcdir)/config/loongarch/genopts/genstr.sh \
+     $(srcdir)/config/loongarch/loongarch.opt
+ 	$(STAMP) s-loongarch-opt
+ 
+-$(srcdir)/config/loongarch/loongarch-cpucfg-map.h: s-loongarch-cpucfg-map
++$(srcdir)/config/loongarch/loongarch-evolution.h: s-loongarch-evolution
+ 	@true
+-s-loongarch-cpucfg-map: $(srcdir)/config/loongarch/genopts/genstr.sh \
+-	$(srcdir)/config/loongarch/genopts/isa-evolution.in
+-	$(SHELL) $< cpucfg-map > tmp-cpucfg.h
+-	$(SHELL) $(srcdir)/../move-if-change tmp-cpucfg.h \
+-	    $(srcdir)/config/loongarch/loongarch-cpucfg-map.h
++$(srcdir)/config/loongarch/loongarch-evolution.cc: s-loongarch-evolution
++	@true
++s-loongarch-evolution: $(srcdir)/config/loongarch/genopts/genstr.sh \
++	$(srcdir)/config/loongarch/genopts/isa-evolution.in \
++	$(srcdir)/config/loongarch/genopts/gen-evolution.awk
++	$(SHELL) $< evolution_h > tmp-isa-evo.h
++	$(SHELL) $< evolution_c > tmp-isa-evo.cc
++	$(SHELL) $(srcdir)/../move-if-change tmp-isa-evo.h \
++	    $(srcdir)/config/loongarch/loongarch-evolution.h
++	$(SHELL) $(srcdir)/../move-if-change tmp-isa-evo.cc \
++	    $(srcdir)/config/loongarch/loongarch-evolution.cc
+ 	$(STAMP) $@
+-- 
+2.43.0
+
diff --git a/SME-0063-aarch64-Generalise-some-SVE-ACLE-error-messages.patch b/0166-Backport-SME-aarch64-Generalise-some-SVE-ACLE-error-.patch
similarity index 99%
rename from SME-0063-aarch64-Generalise-some-SVE-ACLE-error-messages.patch
rename to 0166-Backport-SME-aarch64-Generalise-some-SVE-ACLE-error-.patch
index 17a1e69ca182abe9bf852fbe0036a82a3efc9285..5c7889c07cc231d435b87a86bd331b6773b15971 100644
--- a/SME-0063-aarch64-Generalise-some-SVE-ACLE-error-messages.patch
+++ b/0166-Backport-SME-aarch64-Generalise-some-SVE-ACLE-error-.patch
@@ -1,7 +1,8 @@
-From b9bd3e7de0f4d858d3476e7febb306d3ca4f4d0f Mon Sep 17 00:00:00 2001
+From 21839879d5f00db48cdacd472044a9bd4e23a2c6 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:20 +0000
-Subject: [PATCH 063/144] aarch64: Generalise some SVE ACLE error messages
+Subject: [PATCH 067/157] [Backport][SME] aarch64: Generalise some SVE ACLE
+ error messages
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=bb01ef94ff5096b907639aa3a1d77850921e7d37
 
@@ -1469,5 +1470,5 @@ index 95a97a72e..a194bd6ab 100644
    svunpklo (s8);
    svunpklo (s16);
 -- 
-2.19.1
+2.33.0
 
diff --git a/0166-LoongArch-Add-constraints-for-bit-string-operation-d.patch b/0166-LoongArch-Add-constraints-for-bit-string-operation-d.patch
new file mode 100644
index 0000000000000000000000000000000000000000..aa4dd2fbe56e6003d2a251e03524c1ccbaf2c2c9
--- /dev/null
+++ b/0166-LoongArch-Add-constraints-for-bit-string-operation-d.patch
@@ -0,0 +1,120 @@
+From 3bb46830b0f92f54d1ef529796348c0a86504065 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 26 Apr 2024 15:59:11 +0800
+Subject: [PATCH 166/188] LoongArch: Add constraints for bit string operation
+ define_insn_and_split's [PR114861]
+
+Without the constrants, the compiler attempts to use a stack slot as the
+target, causing an ICE building the kernel with -Os:
+
+    drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c:3144:1:
+    error: could not split insn
+    (insn:TI 1764 67 1745
+      (set (mem/c:DI (reg/f:DI 3 $r3) [707 %sfp+-80 S8 A64])
+           (and:DI (reg/v:DI 28 $r28 [orig:422 raster_config ] [422])
+                   (const_int -50331649 [0xfffffffffcffffff])))
+      "drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c":1386:21 111
+      {*bstrins_di_for_mask}
+      (nil))
+
+Add these constrants to fix the issue.
+
+gcc/ChangeLog:
+
+	PR target/114861
+	* config/loongarch/loongarch.md (bstrins__for_mask): Add
+	constraints for operands.
+	(bstrins__for_ior_mask): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	PR target/114861
+	* gcc.target/loongarch/pr114861.c: New test.
+---
+ gcc/config/loongarch/loongarch.md             | 16 ++++----
+ gcc/testsuite/gcc.target/loongarch/pr114861.c | 39 +++++++++++++++++++
+ 2 files changed, 47 insertions(+), 8 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/pr114861.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 95beb88fe..20494ce8a 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1543,9 +1543,9 @@
+    (set_attr "mode" "")])
+ 
+ (define_insn_and_split "*bstrins__for_mask"
+-  [(set (match_operand:GPR 0 "register_operand")
+-	(and:GPR (match_operand:GPR 1 "register_operand")
+-		 (match_operand:GPR 2 "ins_zero_bitmask_operand")))]
++  [(set (match_operand:GPR 0 "register_operand" "=r")
++	(and:GPR (match_operand:GPR 1 "register_operand" "r")
++		 (match_operand:GPR 2 "ins_zero_bitmask_operand" "i")))]
+   ""
+   "#"
+   ""
+@@ -1563,11 +1563,11 @@
+   })
+ 
+ (define_insn_and_split "*bstrins__for_ior_mask"
+-  [(set (match_operand:GPR 0 "register_operand")
+-	(ior:GPR (and:GPR (match_operand:GPR 1 "register_operand")
+-                          (match_operand:GPR 2 "const_int_operand"))
+-		 (and:GPR (match_operand:GPR 3 "register_operand")
+-			  (match_operand:GPR 4 "const_int_operand"))))]
++  [(set (match_operand:GPR 0 "register_operand" "=r")
++	(ior:GPR (and:GPR (match_operand:GPR 1 "register_operand" "r")
++			  (match_operand:GPR 2 "const_int_operand" "i"))
++		 (and:GPR (match_operand:GPR 3 "register_operand" "r")
++			  (match_operand:GPR 4 "const_int_operand" "i"))))]
+   "loongarch_pre_reload_split ()
+    && loongarch_use_bstrins_for_ior_with_mask (mode, operands)"
+   "#"
+diff --git a/gcc/testsuite/gcc.target/loongarch/pr114861.c b/gcc/testsuite/gcc.target/loongarch/pr114861.c
+new file mode 100644
+index 000000000..e6507c406
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/pr114861.c
+@@ -0,0 +1,39 @@
++/* PR114861: ICE building the kernel with -Os
++   Reduced from linux/fs/ntfs3/attrib.c at revision c942a0cd3603.  */
++/* { dg-do compile } */
++/* { dg-options "-Os -march=loongarch64 -msoft-float -mabi=lp64s" } */
++
++long evcn, attr_collapse_range_vbo, attr_collapse_range_bytes;
++unsigned short flags;
++int attr_collapse_range_ni_0_0;
++int *attr_collapse_range_mi;
++unsigned attr_collapse_range_svcn, attr_collapse_range_vcn1;
++void ni_insert_nonresident (unsigned, unsigned short, int **);
++int mi_pack_runs (int);
++int
++attr_collapse_range (void)
++{
++  _Bool __trans_tmp_1;
++  int run = attr_collapse_range_ni_0_0;
++  unsigned evcn1, vcn, end;
++  short a_flags = flags;
++  __trans_tmp_1 = flags & (32768 | 1);
++  if (__trans_tmp_1)
++    return 2;
++  vcn = attr_collapse_range_vbo;
++  end = attr_collapse_range_bytes;
++  evcn1 = evcn;
++  for (;;)
++    if (attr_collapse_range_svcn >= end)
++      {
++        unsigned eat, next_svcn = mi_pack_runs (42);
++        attr_collapse_range_vcn1 = (vcn ? vcn : attr_collapse_range_svcn);
++        eat = (0 < end) - attr_collapse_range_vcn1;
++        mi_pack_runs (run - eat);
++        if (next_svcn + eat)
++          ni_insert_nonresident (evcn1 - eat - next_svcn, a_flags,
++                                 &attr_collapse_range_mi);
++      }
++    else
++      return 42;
++}
+-- 
+2.43.0
+
diff --git a/SME-0064-aarch64-Replace-vague-previous-arguments-message.patch b/0167-Backport-SME-aarch64-Replace-vague-previous-argument.patch
similarity index 99%
rename from SME-0064-aarch64-Replace-vague-previous-arguments-message.patch
rename to 0167-Backport-SME-aarch64-Replace-vague-previous-argument.patch
index 65cbb5bcba265151c3cf1230fa6f9c706773bbad..840ad1d4aa5912fefca0d280d79e0fc60403d503 100644
--- a/SME-0064-aarch64-Replace-vague-previous-arguments-message.patch
+++ b/0167-Backport-SME-aarch64-Replace-vague-previous-argument.patch
@@ -1,7 +1,8 @@
-From 5b23ec4d321820fce8a687b8ac8d305e2663c3ef Mon Sep 17 00:00:00 2001
+From 6a7cb5074824416ae562de0589550a930e9dbcaf Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:21 +0000
-Subject: [PATCH 064/144] aarch64: Replace vague "previous arguments" message
+Subject: [PATCH 068/157] [Backport][SME] aarch64: Replace vague "previous
+ arguments" message
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=1b52d4b66e8b91ec1e3de9c0b79aaf258824b875
 
@@ -693,5 +694,5 @@ index 7e869bda8..6ffd3d9e8 100644
    pg = svadd_x (pg, pg, pg); /* { dg-error {'svadd_x' has no form that takes 'svbool_t' arguments} } */
  }
 -- 
-2.19.1
+2.33.0
 
diff --git a/0167-LoongArch-Guard-REGNO-with-REG_P-in-loongarch_expand.patch b/0167-LoongArch-Guard-REGNO-with-REG_P-in-loongarch_expand.patch
new file mode 100644
index 0000000000000000000000000000000000000000..bc867d91de76d647bed7cb074ff8e49f57e67725
--- /dev/null
+++ b/0167-LoongArch-Guard-REGNO-with-REG_P-in-loongarch_expand.patch
@@ -0,0 +1,67 @@
+From be1397b598a436d562e6a35a13ed2ae695531255 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 22 May 2024 09:29:43 +0800
+Subject: [PATCH 167/188] LoongArch: Guard REGNO with REG_P in
+ loongarch_expand_conditional_move [PR115169]
+
+gcc/ChangeLog:
+
+	PR target/115169
+	* config/loongarch/loongarch.cc
+	(loongarch_expand_conditional_move): Guard REGNO with REG_P.
+---
+ gcc/config/loongarch/loongarch.cc | 17 ++++++++++++-----
+ 1 file changed, 12 insertions(+), 5 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index c86a0856b..0c2c38f6f 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -5341,6 +5341,7 @@ loongarch_expand_conditional_move (rtx *operands)
+   rtx op1_extend = op1;
+ 
+   /* Record whether operands[2] and operands[3] modes are promoted to word_mode.  */
++  bool promote_op[2] = {false, false};
+   bool promote_p = false;
+   machine_mode mode = GET_MODE (operands[0]);
+ 
+@@ -5348,9 +5349,15 @@ loongarch_expand_conditional_move (rtx *operands)
+     loongarch_emit_float_compare (&code, &op0, &op1);
+   else
+     {
+-      if ((REGNO (op0) == REGNO (operands[2])
+-	   || (REGNO (op1) == REGNO (operands[3]) && (op1 != const0_rtx)))
+-	  && (GET_MODE_SIZE (GET_MODE (op0)) < word_mode))
++      if (GET_MODE_SIZE (GET_MODE (op0)) < word_mode)
++	{
++	  promote_op[0] = (REG_P (op0) && REG_P (operands[2]) &&
++			   REGNO (op0) == REGNO (operands[2]));
++	  promote_op[1] = (REG_P (op1) && REG_P (operands[3]) &&
++			   REGNO (op1) == REGNO (operands[3]));
++	}
++
++      if (promote_op[0] || promote_op[1])
+ 	{
+ 	  mode = word_mode;
+ 	  promote_p = true;
+@@ -5392,7 +5399,7 @@ loongarch_expand_conditional_move (rtx *operands)
+ 
+       if (promote_p)
+ 	{
+-	  if (REGNO (XEXP (operands[1], 0)) == REGNO (operands[2]))
++	  if (promote_op[0])
+ 	    op2 = op0_extend;
+ 	  else
+ 	    {
+@@ -5400,7 +5407,7 @@ loongarch_expand_conditional_move (rtx *operands)
+ 	      op2 = force_reg (mode, op2);
+ 	    }
+ 
+-	  if (REGNO (XEXP (operands[1], 1)) == REGNO (operands[3]))
++	  if (promote_op[1])
+ 	    op3 = op1_extend;
+ 	  else
+ 	    {
+-- 
+2.43.0
+
diff --git a/SME-0065-aarch64-Make-more-use-of-sve_type-in-ACLE-code.patch b/0168-Backport-SME-aarch64-Make-more-use-of-sve_type-in-AC.patch
similarity index 98%
rename from SME-0065-aarch64-Make-more-use-of-sve_type-in-ACLE-code.patch
rename to 0168-Backport-SME-aarch64-Make-more-use-of-sve_type-in-AC.patch
index b8a999f2f2c706f029e0c932e2ac173ce23bb02c..d293e4e7cf2450c27396b31aaaef43016caf288f 100644
--- a/SME-0065-aarch64-Make-more-use-of-sve_type-in-ACLE-code.patch
+++ b/0168-Backport-SME-aarch64-Make-more-use-of-sve_type-in-AC.patch
@@ -1,7 +1,8 @@
-From 961bde1640e5edb40943b98a1579f67e961bd5ad Mon Sep 17 00:00:00 2001
+From 05dee9ad331c27345b014fe9aec0067a6f3b07d9 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:21 +0000
-Subject: [PATCH 065/144] aarch64: Make more use of sve_type in ACLE code
+Subject: [PATCH 069/157] [Backport][SME] aarch64: Make more use of sve_type in
+ ACLE code
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=1f7f076ad6293cad19d35efdf726eb48cf78e3dd
 
@@ -363,5 +364,5 @@ index f7d6cc084..a7cfff7c1 100644
  				    unsigned int = SAME_SIZE);
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0168-LoongArch-Fix-mode-size-comparision-in-loongarch_exp.patch b/0168-LoongArch-Fix-mode-size-comparision-in-loongarch_exp.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f6986bb04a13c100e22e89898c55aa1f543ba8a1
--- /dev/null
+++ b/0168-LoongArch-Fix-mode-size-comparision-in-loongarch_exp.patch
@@ -0,0 +1,36 @@
+From 7675f45536691eeca7d8163020c9bfb127d5ee4f Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 12 Jun 2024 11:01:53 +0800
+Subject: [PATCH 168/188] LoongArch: Fix mode size comparision in
+ loongarch_expand_conditional_move
+
+We were comparing a mode size with word_mode, but word_mode is an enum
+value thus this does not really make any sense.  (Un)luckily E_DImode
+happens to be 8 so this seemed to work, but let's make it correct so it
+won't blow up when we add LA32 support or add another machine mode...
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc
+	(loongarch_expand_conditional_move): Compare mode size with
+	UNITS_PER_WORD instead of word_mode.
+---
+ gcc/config/loongarch/loongarch.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 0c2c38f6f..77f83ab9e 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -5349,7 +5349,7 @@ loongarch_expand_conditional_move (rtx *operands)
+     loongarch_emit_float_compare (&code, &op0, &op1);
+   else
+     {
+-      if (GET_MODE_SIZE (GET_MODE (op0)) < word_mode)
++      if (GET_MODE_SIZE (GET_MODE (op0)) < UNITS_PER_WORD)
+ 	{
+ 	  promote_op[0] = (REG_P (op0) && REG_P (operands[2]) &&
+ 			   REGNO (op0) == REGNO (operands[2]));
+-- 
+2.43.0
+
diff --git a/SME-0066-aarch64-Tweak-error-message-for-tuple-vector-pairs.patch b/0169-Backport-SME-aarch64-Tweak-error-message-for-tuple-v.patch
similarity index 97%
rename from SME-0066-aarch64-Tweak-error-message-for-tuple-vector-pairs.patch
rename to 0169-Backport-SME-aarch64-Tweak-error-message-for-tuple-v.patch
index 15b5737d242ee7f231a0001ba630c144814d2fae..f2f2fd09cb350d39149ef148cc80b63dc4af5a26 100644
--- a/SME-0066-aarch64-Tweak-error-message-for-tuple-vector-pairs.patch
+++ b/0169-Backport-SME-aarch64-Tweak-error-message-for-tuple-v.patch
@@ -1,7 +1,8 @@
-From 2807e2dde8e3cbc122cf9d0d31fbef8b88e22ef7 Mon Sep 17 00:00:00 2001
+From 1abb02c636eef4f9a5f55f243bc0c4d38ee1f849 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:22 +0000
-Subject: [PATCH 066/144] aarch64: Tweak error message for (tuple,vector) pairs
+Subject: [PATCH 070/157] [Backport][SME] aarch64: Tweak error message for
+ (tuple,vector) pairs
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=5ce2e22b7e02c7fbd1ab8145b632559b67ae9958
 
@@ -101,5 +102,5 @@ index be911a731..f0696fb07 100644
    s32x4 = svset4 (s32x4, 0, s32);
    f64 = svset4 (s32x4, 0, s32); /* { dg-error {incompatible types when assigning to type 'svfloat64_t' from type 'svint32x4_t'} } */
 -- 
-2.19.1
+2.33.0
 
diff --git a/0169-LoongArch-Use-bstrins-for-value-1u-const.patch b/0169-LoongArch-Use-bstrins-for-value-1u-const.patch
new file mode 100644
index 0000000000000000000000000000000000000000..11190c3e49e77a7637d0c6d1e9b84a78f46c1398
--- /dev/null
+++ b/0169-LoongArch-Use-bstrins-for-value-1u-const.patch
@@ -0,0 +1,135 @@
+From 7e34bede110bfa7b2f91dc657c41ed0e7b4b11f7 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 9 Jun 2024 14:43:48 +0800
+Subject: [PATCH 169/188] LoongArch: Use bstrins for "value & (-1u << const)"
+
+A move/bstrins pair is as fast as a (addi.w|lu12i.w|lu32i.d|lu52i.d)/and
+pair, and twice fast as a srli/slli pair.  When the src reg and the dst
+reg happens to be the same, the move instruction can be optimized away.
+
+gcc/ChangeLog:
+
+	* config/loongarch/predicates.md (high_bitmask_operand): New
+	predicate.
+	* config/loongarch/constraints.md (Yy): New constriant.
+	* config/loongarch/loongarch.md (and3_align): New
+	define_insn_and_split.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/bstrins-1.c: New test.
+	* gcc.target/loongarch/bstrins-2.c: New test.
+---
+ gcc/config/loongarch/constraints.md            |  5 +++++
+ gcc/config/loongarch/loongarch.md              | 17 +++++++++++++++++
+ gcc/config/loongarch/predicates.md             |  4 ++++
+ gcc/testsuite/gcc.target/loongarch/bstrins-1.c |  9 +++++++++
+ gcc/testsuite/gcc.target/loongarch/bstrins-2.c | 14 ++++++++++++++
+ 5 files changed, 49 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/bstrins-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/bstrins-2.c
+
+diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md
+index cec5d8857..f3f5776da 100644
+--- a/gcc/config/loongarch/constraints.md
++++ b/gcc/config/loongarch/constraints.md
+@@ -94,6 +94,7 @@
+ ;;       "A constant @code{move_operand} that can be safely loaded using
+ ;;	  @code{la}."
+ ;;    "Yx"
++;;    "Yy"
+ ;; "Z" -
+ ;;    "ZC"
+ ;;      "A memory operand whose address is formed by a base register and offset
+@@ -291,6 +292,10 @@
+    "@internal"
+    (match_operand 0 "low_bitmask_operand"))
+ 
++(define_constraint "Yy"
++   "@internal"
++   (match_operand 0 "high_bitmask_operand"))
++
+ (define_constraint "YI"
+   "@internal
+    A replicated vector const in which the replicated value is in the range
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 20494ce8a..55a759850 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1542,6 +1542,23 @@
+   [(set_attr "move_type" "pick_ins")
+    (set_attr "mode" "")])
+ 
++(define_insn_and_split "and3_align"
++  [(set (match_operand:GPR 0 "register_operand" "=r")
++	(and:GPR (match_operand:GPR 1 "register_operand" "r")
++		 (match_operand:GPR 2 "high_bitmask_operand" "Yy")))]
++  ""
++  "#"
++  ""
++  [(set (match_dup 0) (match_dup 1))
++   (set (zero_extract:GPR (match_dup 0) (match_dup 2) (const_int 0))
++	(const_int 0))]
++{
++  int len;
++
++  len = low_bitmask_len (mode, ~INTVAL (operands[2]));
++  operands[2] = GEN_INT (len);
++})
++
+ (define_insn_and_split "*bstrins__for_mask"
+   [(set (match_operand:GPR 0 "register_operand" "=r")
+ 	(and:GPR (match_operand:GPR 1 "register_operand" "r")
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 1d9a30695..95be8a4fe 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -293,6 +293,10 @@
+   (and (match_code "const_int")
+        (match_test "low_bitmask_len (mode, INTVAL (op)) > 12")))
+ 
++(define_predicate "high_bitmask_operand"
++  (and (match_code "const_int")
++       (match_test "low_bitmask_len (mode, ~INTVAL (op)) > 0")))
++
+ (define_predicate "d_operand"
+   (and (match_code "reg")
+        (match_test "GP_REG_P (REGNO (op))")))
+diff --git a/gcc/testsuite/gcc.target/loongarch/bstrins-1.c b/gcc/testsuite/gcc.target/loongarch/bstrins-1.c
+new file mode 100644
+index 000000000..7cb3a9523
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/bstrins-1.c
+@@ -0,0 +1,9 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mabi=lp64d" } */
++/* { dg-final { scan-assembler "bstrins\\.d\t\\\$r4,\\\$r0,4,0" } } */
++
++long
++x (long a)
++{
++  return a & -32;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/bstrins-2.c b/gcc/testsuite/gcc.target/loongarch/bstrins-2.c
+new file mode 100644
+index 000000000..9777f502e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/bstrins-2.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mabi=lp64d" } */
++/* { dg-final { scan-assembler "bstrins\\.d\t\\\$r\[0-9\]+,\\\$r0,4,0" } } */
++
++struct aligned_buffer {
++  _Alignas(32) char x[1024];
++};
++
++extern int f(char *);
++int g(void)
++{
++  struct aligned_buffer buf;
++  return f(buf.x);
++}
+-- 
+2.43.0
+
diff --git a/SME-0067-aarch64-Add-tuple-forms-of-svreinterpret.patch b/0170-Backport-SME-aarch64-Add-tuple-forms-of-svreinterpre.patch
similarity index 99%
rename from SME-0067-aarch64-Add-tuple-forms-of-svreinterpret.patch
rename to 0170-Backport-SME-aarch64-Add-tuple-forms-of-svreinterpre.patch
index 4474ad49a848dd8a42e3c7ba8a4aa030eccbecd8..b4807e2ce33885adf7442140ee53069398d3ab3b 100644
--- a/SME-0067-aarch64-Add-tuple-forms-of-svreinterpret.patch
+++ b/0170-Backport-SME-aarch64-Add-tuple-forms-of-svreinterpre.patch
@@ -1,7 +1,8 @@
-From c44a9121ed75744c13253aa483cd7a6e4531a447 Mon Sep 17 00:00:00 2001
+From 95234ef07c47dda7ac6a13f75619580a6683118c Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:22 +0000
-Subject: [PATCH 067/144] aarch64: Add tuple forms of svreinterpret
+Subject: [PATCH 071/157] [Backport][SME] aarch64: Add tuple forms of
+ svreinterpret
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=1ce9dc263c2f6d455b2013fc58932beda2a4ae92
 
@@ -324,7 +325,7 @@ index b8cc47ef5..28b73d807 100644
  	  UNSPEC_REINTERPRET))]
    "TARGET_SVE"
 diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
-index 3ce654478..bf4319aae 100644
+index a8a39b65a..8dd2035bc 100644
 --- a/gcc/config/aarch64/iterators.md
 +++ b/gcc/config/aarch64/iterators.md
 @@ -451,14 +451,6 @@
@@ -1231,5 +1232,5 @@ index fbf392b3e..2da61ff5c 100644
 +
  #endif
 -- 
-2.19.1
+2.33.0
 
diff --git a/0170-LoongArch-Tweak-IOR-rtx_cost-for-bstrins.patch b/0170-LoongArch-Tweak-IOR-rtx_cost-for-bstrins.patch
new file mode 100644
index 0000000000000000000000000000000000000000..32bb89c094e672f3f9e41205594e43f262fb7ce6
--- /dev/null
+++ b/0170-LoongArch-Tweak-IOR-rtx_cost-for-bstrins.patch
@@ -0,0 +1,158 @@
+From bdc189d43ef38ea53823120de8008f39ead0618d Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 15 Jun 2024 18:29:43 +0800
+Subject: [PATCH 170/188] LoongArch: Tweak IOR rtx_cost for bstrins
+
+Consider
+
+    c &= 0xfff;
+    a &= ~0xfff;
+    b &= ~0xfff;
+    a |= c;
+    b |= c;
+
+This can be done with 2 bstrins instructions.  But we need to recognize
+it in loongarch_rtx_costs or the compiler will not propagate "c & 0xfff"
+forward.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc:
+	(loongarch_use_bstrins_for_ior_with_mask): Split the main logic
+	into ...
+	(loongarch_use_bstrins_for_ior_with_mask_1): ... here.
+	(loongarch_rtx_costs): Special case for IOR those can be
+	implemented with bstrins.
+
+gcc/testsuite/ChangeLog;
+
+	* gcc.target/loongarch/bstrins-3.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc             | 73 ++++++++++++++-----
+ .../gcc.target/loongarch/bstrins-3.c          | 16 ++++
+ 2 files changed, 72 insertions(+), 17 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/bstrins-3.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 77f83ab9e..cd9fa98dc 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -3678,6 +3678,27 @@ loongarch_set_reg_reg_piece_cost (machine_mode mode, unsigned int units)
+   return COSTS_N_INSNS ((GET_MODE_SIZE (mode) + units - 1) / units);
+ }
+ 
++static int
++loongarch_use_bstrins_for_ior_with_mask_1 (machine_mode mode,
++					   unsigned HOST_WIDE_INT mask1,
++					   unsigned HOST_WIDE_INT mask2)
++{
++  if (mask1 != ~mask2 || !mask1 || !mask2)
++    return 0;
++
++  /* Try to avoid a right-shift.  */
++  if (low_bitmask_len (mode, mask1) != -1)
++    return -1;
++
++  if (low_bitmask_len (mode, mask2 >> (ffs_hwi (mask2) - 1)) != -1)
++    return 1;
++
++  if (low_bitmask_len (mode, mask1 >> (ffs_hwi (mask1) - 1)) != -1)
++    return -1;
++
++  return 0;
++}
++
+ /* Return the cost of moving between two registers of mode MODE.  */
+ 
+ static int
+@@ -3809,6 +3830,38 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code,
+       /* Fall through.  */
+ 
+     case IOR:
++      {
++	rtx op[2] = {XEXP (x, 0), XEXP (x, 1)};
++	if (GET_CODE (op[0]) == AND && GET_CODE (op[1]) == AND
++	    && (mode == SImode || (TARGET_64BIT && mode == DImode)))
++	  {
++	    rtx rtx_mask0 = XEXP (op[0], 1), rtx_mask1 = XEXP (op[1], 1);
++	    if (CONST_INT_P (rtx_mask0) && CONST_INT_P (rtx_mask1))
++	      {
++		unsigned HOST_WIDE_INT mask0 = UINTVAL (rtx_mask0);
++		unsigned HOST_WIDE_INT mask1 = UINTVAL (rtx_mask1);
++		if (loongarch_use_bstrins_for_ior_with_mask_1 (mode,
++							       mask0,
++							       mask1))
++		  {
++		    /* A bstrins instruction */
++		    *total = COSTS_N_INSNS (1);
++
++		    /* A srai instruction */
++		    if (low_bitmask_len (mode, mask0) == -1
++			&& low_bitmask_len (mode, mask1) == -1)
++		      *total += COSTS_N_INSNS (1);
++
++		    for (int i = 0; i < 2; i++)
++		      *total += set_src_cost (XEXP (op[i], 0), mode, speed);
++
++		    return true;
++		  }
++	      }
++	  }
++      }
++
++      /* Fall through.  */
+     case XOR:
+       /* Double-word operations use two single-word operations.  */
+       *total = loongarch_binary_cost (x, COSTS_N_INSNS (1), COSTS_N_INSNS (2),
+@@ -5793,23 +5846,9 @@ bool loongarch_pre_reload_split (void)
+ int
+ loongarch_use_bstrins_for_ior_with_mask (machine_mode mode, rtx *op)
+ {
+-  unsigned HOST_WIDE_INT mask1 = UINTVAL (op[2]);
+-  unsigned HOST_WIDE_INT mask2 = UINTVAL (op[4]);
+-
+-  if (mask1 != ~mask2 || !mask1 || !mask2)
+-    return 0;
+-
+-  /* Try to avoid a right-shift.  */
+-  if (low_bitmask_len (mode, mask1) != -1)
+-    return -1;
+-
+-  if (low_bitmask_len (mode, mask2 >> (ffs_hwi (mask2) - 1)) != -1)
+-    return 1;
+-
+-  if (low_bitmask_len (mode, mask1 >> (ffs_hwi (mask1) - 1)) != -1)
+-    return -1;
+-
+-  return 0;
++  return loongarch_use_bstrins_for_ior_with_mask_1 (mode,
++						    UINTVAL (op[2]),
++						    UINTVAL (op[4]));
+ }
+ 
+ /* Rewrite a MEM for simple load/store under -mexplicit-relocs=auto
+diff --git a/gcc/testsuite/gcc.target/loongarch/bstrins-3.c b/gcc/testsuite/gcc.target/loongarch/bstrins-3.c
+new file mode 100644
+index 000000000..13762bdef
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/bstrins-3.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fdump-rtl-final" } */
++/* { dg-final { scan-rtl-dump-times "insv\[sd\]i" 2 "final" } } */
++
++struct X {
++  long a, b;
++};
++
++struct X
++test (long a, long b, long c)
++{
++  c &= 0xfff;
++  a &= ~0xfff;
++  b &= ~0xfff;
++  return (struct X){.a = a | c, .b = b | c}; 
++}
+-- 
+2.43.0
+
diff --git a/SME-0068-attribs-Use-existing-traits-for-excl_hash_traits.patch b/0171-Backport-SME-attribs-Use-existing-traits-for-excl_ha.patch
similarity index 93%
rename from SME-0068-attribs-Use-existing-traits-for-excl_hash_traits.patch
rename to 0171-Backport-SME-attribs-Use-existing-traits-for-excl_ha.patch
index f7f858799860595143227dae8a9a0f3db0ec38ed..ecd15bd7358bcfa5efd43bb1ba8fe68d001a777e 100644
--- a/SME-0068-attribs-Use-existing-traits-for-excl_hash_traits.patch
+++ b/0171-Backport-SME-attribs-Use-existing-traits-for-excl_ha.patch
@@ -1,7 +1,8 @@
-From 367ce46f89de691953b2cb35752025b91cdd7dcd Mon Sep 17 00:00:00 2001
+From 11f813112629dbad432134f7b4c7c9a93551eb3c Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Mon, 27 Nov 2023 13:38:16 +0000
-Subject: [PATCH 068/144] attribs: Use existing traits for excl_hash_traits
+Subject: [PATCH 072/157] [Backport][SME] attribs: Use existing traits for
+ excl_hash_traits
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=5b33cf3a3a2025a4856f90fea8bd04884c2f6b31
 
@@ -85,5 +86,5 @@ index b219f8780..16d05b1da 100644
  
    for (size_t ti0 = 0; ti0 != ntables; ++ti0)
 -- 
-2.19.1
+2.33.0
 
diff --git a/0171-LoongArch-NFC-Dedup-and-sort-the-comment-in-loongarc.patch b/0171-LoongArch-NFC-Dedup-and-sort-the-comment-in-loongarc.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4fdc8981191c795fb5f8c46323330fb6f3e9dc8d
--- /dev/null
+++ b/0171-LoongArch-NFC-Dedup-and-sort-the-comment-in-loongarc.patch
@@ -0,0 +1,44 @@
+From 51c20768fde58093794ff0281c698b6738346313 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 16 Jun 2024 12:22:40 +0800
+Subject: [PATCH 171/188] LoongArch: NFC: Dedup and sort the comment in
+ loongarch_print_operand_reloc
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_print_operand_reloc):
+	Dedup and sort the comment describing modifiers.
+---
+ gcc/config/loongarch/loongarch.cc | 10 +---------
+ 1 file changed, 1 insertion(+), 9 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index cd9fa98dc..35524b5da 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -6129,21 +6129,13 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part,
+    'T'	Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
+ 	      'z' for (eq:?I ...), 'n' for (ne:?I ...).
+    't'	Like 'T', but with the EQ/NE cases reversed
+-   'F'	Print the FPU branch condition for comparison OP.
+-   'W'	Print the inverse of the FPU branch condition for comparison OP.
+-   'w'	Print a LSX register.
+    'u'	Print a LASX register.
+-   'T'	Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
+-	      'z' for (eq:?I ...), 'n' for (ne:?I ...).
+-   't'	Like 'T', but with the EQ/NE cases reversed
+-   'Y'	Print loongarch_fp_conditions[INTVAL (OP)]
+-   'Z'	Print OP and a comma for 8CC, otherwise print nothing.
+-   'z'	Print $0 if OP is zero, otherwise print OP normally.
+    'v'	Print the insn size suffix b, h, w or d for vector modes V16QI, V8HI,
+ 	  V4SI, V2SI, and w, d for vector modes V4SF, V2DF respectively.
+    'V'	Print exact log2 of CONST_INT OP element 0 of a replicated
+ 	  CONST_VECTOR in decimal.
+    'W'	Print the inverse of the FPU branch condition for comparison OP.
++   'w'	Print a LSX register.
+    'X'	Print CONST_INT OP in hexadecimal format.
+    'x'	Print the low 16 bits of CONST_INT OP in hexadecimal format.
+    'Y'	Print loongarch_fp_conditions[INTVAL (OP)]
+-- 
+2.43.0
+
diff --git a/SME-0069-Allow-target-attributes-in-non-gnu-namespaces.patch b/0172-Backport-SME-Allow-target-attributes-in-non-gnu-name.patch
similarity index 98%
rename from SME-0069-Allow-target-attributes-in-non-gnu-namespaces.patch
rename to 0172-Backport-SME-Allow-target-attributes-in-non-gnu-name.patch
index a64d13b739eb890f3254c60c0c9b4ded5981b0f8..5f5b8f372bd2b761a630fef929870fd015049c31 100644
--- a/SME-0069-Allow-target-attributes-in-non-gnu-namespaces.patch
+++ b/0172-Backport-SME-Allow-target-attributes-in-non-gnu-name.patch
@@ -1,7 +1,8 @@
-From c8ff95e9194b50301c2a47de6ab99cf820450c25 Mon Sep 17 00:00:00 2001
+From 82d654912e3671055034e789a8f7110f6d87d447 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 2 Dec 2023 13:49:52 +0000
-Subject: [PATCH 069/144] Allow target attributes in non-gnu namespaces
+Subject: [PATCH 073/157] [Backport][SME] Allow target attributes in non-gnu
+ namespaces
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=7fa24687aa3a683fd105ce5ff6b176f48dca3b6c
 
@@ -869,10 +870,10 @@ index c70f0ba5a..654bd4094 100644
  namespace selftest {
    extern void run_c_tests (void);
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index b776ff6e5..dad362479 100644
+index 4194dfc70..114252a3c 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -2769,7 +2769,7 @@ handle_aarch64_vector_pcs_attribute (tree *node, tree name, tree,
+@@ -2986,7 +2986,7 @@ handle_aarch64_vector_pcs_attribute (tree *node, tree name, tree,
  }
  
  /* Table of machine attributes.  */
@@ -881,7 +882,7 @@ index b776ff6e5..dad362479 100644
  {
    /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
         affects_type_identity, handler, exclude } */
-@@ -2780,9 +2780,8 @@ static const struct attribute_spec aarch64_attribute_table[] =
+@@ -2997,9 +2997,8 @@ static const struct attribute_spec aarch64_attribute_table[] =
  			  NULL },
    { "Advanced SIMD type", 1, 1, false, true,  false, true,  NULL, NULL },
    { "SVE type",		  3, 3, false, true,  false, true,  NULL, NULL },
@@ -894,7 +895,7 @@ index b776ff6e5..dad362479 100644
  /* An ISA extension in the co-processor and main instruction set space.  */
  struct aarch64_option_extension
 diff --git a/gcc/config/alpha/alpha.cc b/gcc/config/alpha/alpha.cc
-index 0a85e66fa..107bf8483 100644
+index 66c17149d..7fb491918 100644
 --- a/gcc/config/alpha/alpha.cc
 +++ b/gcc/config/alpha/alpha.cc
 @@ -7475,14 +7475,13 @@ common_object_handler (tree *node, tree name ATTRIBUTE_UNUSED,
@@ -1214,10 +1215,10 @@ index 78cf15f15..a0fa689de 100644
  
  /* Handle an attribute requiring a FUNCTION_DECL; arguments as in
 diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
-index ff44ad4e0..d646a1f34 100644
+index 86932d719..991661fe4 100644
 --- a/gcc/config/i386/i386-options.cc
 +++ b/gcc/config/i386/i386-options.cc
-@@ -3828,7 +3828,7 @@ handle_nodirect_extern_access_attribute (tree *pnode, tree name,
+@@ -3875,7 +3875,7 @@ handle_nodirect_extern_access_attribute (tree *pnode, tree name,
  }
  
  /* Table of valid machine attributes.  */
@@ -1226,7 +1227,7 @@ index ff44ad4e0..d646a1f34 100644
  {
    /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
         affects_type_identity, handler, exclude } */
-@@ -3908,10 +3908,12 @@ const struct attribute_spec ix86_attribute_table[] =
+@@ -3955,10 +3955,12 @@ const struct attribute_spec ix86_attribute_table[] =
    { "cf_check", 0, 0, true, false, false, false,
      ix86_handle_fndecl_attribute, NULL },
    { "nodirect_extern_access", 0, 0, true, false, false, false,
@@ -1256,10 +1257,10 @@ index ce4034f62..a7bdb22c0 100644
  
  #endif  /* GCC_I386_OPTIONS_H */
 diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
-index 6847e90bd..19c1b7602 100644
+index 83a0d8abb..ade965927 100644
 --- a/gcc/config/i386/i386.cc
 +++ b/gcc/config/i386/i386.cc
-@@ -24275,6 +24275,11 @@ ix86_run_selftests (void)
+@@ -24293,6 +24293,11 @@ ix86_run_selftests (void)
  
  #endif /* CHECKING_P */
  
@@ -1425,10 +1426,10 @@ index f32effecf..6b14d3e29 100644
  static int microblaze_interrupt_function_p (tree);
  
 diff --git a/gcc/config/mips/mips.cc b/gcc/config/mips/mips.cc
-index e64928f41..905c4eda7 100644
+index 02d11ddbf..5474ca152 100644
 --- a/gcc/config/mips/mips.cc
 +++ b/gcc/config/mips/mips.cc
-@@ -608,7 +608,7 @@ static tree mips_handle_use_shadow_register_set_attr (tree *, tree, tree, int,
+@@ -607,7 +607,7 @@ static tree mips_handle_use_shadow_register_set_attr (tree *, tree, tree, int,
  						      bool *);
  
  /* The value of TARGET_ATTRIBUTE_TABLE.  */
@@ -1437,7 +1438,7 @@ index e64928f41..905c4eda7 100644
    /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
         affects_type_identity, handler, exclude } */
    { "long_call",   0, 0, false, true,  true,  false, NULL, NULL },
-@@ -630,9 +630,8 @@ static const struct attribute_spec mips_attribute_table[] = {
+@@ -629,9 +629,8 @@ static const struct attribute_spec mips_attribute_table[] = {
    { "use_shadow_register_set",	0, 1, false, true,  true, false,
      mips_handle_use_shadow_register_set_attr, NULL },
    { "keep_interrupts_masked",	0, 0, false, true,  true, false, NULL, NULL },
@@ -1503,10 +1504,10 @@ index 27530495f..519b11e4c 100644
  
  /* ------------------------------------------------------------------------ */
 diff --git a/gcc/config/nvptx/nvptx.cc b/gcc/config/nvptx/nvptx.cc
-index d3ac149d1..c68df80c7 100644
+index 7f2103ba6..9a3e418f4 100644
 --- a/gcc/config/nvptx/nvptx.cc
 +++ b/gcc/config/nvptx/nvptx.cc
-@@ -5818,16 +5818,15 @@ nvptx_handle_shared_attribute (tree *node, tree name, tree ARG_UNUSED (args),
+@@ -5817,16 +5817,15 @@ nvptx_handle_shared_attribute (tree *node, tree name, tree ARG_UNUSED (args),
  }
  
  /* Table of valid machine attributes.  */
@@ -1527,7 +1528,7 @@ index d3ac149d1..c68df80c7 100644
  /* Limit vector alignments to BIGGEST_ALIGNMENT.  */
  
 diff --git a/gcc/config/riscv/riscv.cc b/gcc/config/riscv/riscv.cc
-index 4939d9964..216b34de0 100644
+index 9cf79beba..f5a27bdc9 100644
 --- a/gcc/config/riscv/riscv.cc
 +++ b/gcc/config/riscv/riscv.cc
 @@ -336,7 +336,7 @@ static tree riscv_handle_fndecl_attribute (tree *, tree, tree, int, bool *);
@@ -1579,10 +1580,10 @@ index b3727c0a8..97386c7ea 100644
  
  
 diff --git a/gcc/config/rs6000/rs6000.cc b/gcc/config/rs6000/rs6000.cc
-index 998f4e190..ad57b8a3d 100644
+index 55d4ce751..46e3d1a12 100644
 --- a/gcc/config/rs6000/rs6000.cc
 +++ b/gcc/config/rs6000/rs6000.cc
-@@ -1259,7 +1259,7 @@ static const char alt_reg_names[][8] =
+@@ -1276,7 +1276,7 @@ static const char alt_reg_names[][8] =
  
  /* Table of valid machine attributes.  */
  
@@ -1591,7 +1592,7 @@ index 998f4e190..ad57b8a3d 100644
  {
    /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
         affects_type_identity, handler, exclude } */
-@@ -1276,7 +1276,16 @@ static const struct attribute_spec rs6000_attribute_table[] =
+@@ -1293,7 +1293,16 @@ static const struct attribute_spec rs6000_attribute_table[] =
  #ifdef SUBTARGET_ATTRIBUTE_TABLE
    SUBTARGET_ATTRIBUTE_TABLE,
  #endif
@@ -1995,10 +1996,10 @@ index aedbdd80a..d4245b63b 100644
  
  extern tree d_builtin_function (tree);
 diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
-index 9943fe651..782a922ae 100644
+index 4f93facf7..95d96ce1b 100644
 --- a/gcc/doc/tm.texi
 +++ b/gcc/doc/tm.texi
-@@ -10637,12 +10637,33 @@ Target-specific attributes may be defined for functions, data and types.
+@@ -10427,12 +10427,33 @@ Target-specific attributes may be defined for functions, data and types.
  These are described using the following target hooks; they also need to
  be documented in @file{extend.texi}.
  
@@ -2266,10 +2267,10 @@ index ff999c405..e29651d35 100644
  
  #endif /* PLUGIN_H */
 diff --git a/gcc/target-def.h b/gcc/target-def.h
-index 1c4aa2963..c9070da8c 100644
+index f81f8fe3b..70fb393f3 100644
 --- a/gcc/target-def.h
 +++ b/gcc/target-def.h
-@@ -118,6 +118,20 @@
+@@ -114,6 +114,20 @@
  #define TARGET_FUNCTION_INCOMING_ARG TARGET_FUNCTION_ARG
  #endif
  
@@ -2291,7 +2292,7 @@ index 1c4aa2963..c9070da8c 100644
  
  #include "hooks.h"
 diff --git a/gcc/target.def b/gcc/target.def
-index 562882358..dd1367246 100644
+index 60096c60c..6cdc09fc2 100644
 --- a/gcc/target.def
 +++ b/gcc/target.def
 @@ -2199,15 +2199,36 @@ merging.",
@@ -2339,10 +2340,10 @@ index 562882358..dd1367246 100644
  /* Return true iff attribute NAME expects a plain identifier as its first
     argument.  */
 diff --git a/gcc/tree-inline.cc b/gcc/tree-inline.cc
-index f892cee3f..7de1825a4 100644
+index f50dbbc52..67879c2c8 100644
 --- a/gcc/tree-inline.cc
 +++ b/gcc/tree-inline.cc
-@@ -4091,17 +4091,16 @@ inline_forbidden_p (tree fndecl)
+@@ -4105,17 +4105,16 @@ inline_forbidden_p (tree fndecl)
  static bool
  function_attribute_inlinable_p (const_tree fndecl)
  {
@@ -2364,5 +2365,5 @@ index f892cee3f..7de1825a4 100644
  	}
      }
 -- 
-2.19.1
+2.33.0
 
diff --git a/0172-LoongArch-Fix-explicit-relocs-extreme-tls-desc.c-tes.patch b/0172-LoongArch-Fix-explicit-relocs-extreme-tls-desc.c-tes.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3a597184057a35334a20fdda054912dee61f348d
--- /dev/null
+++ b/0172-LoongArch-Fix-explicit-relocs-extreme-tls-desc.c-tes.patch
@@ -0,0 +1,45 @@
+From 9503e64bf304d44947791d9ff17d65a6905e59ce Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 28 Jun 2024 15:04:26 +0800
+Subject: [PATCH 172/188] LoongArch: Fix explicit-relocs-{extreme-,}tls-desc.c
+ tests.
+
+After r15-1579, ADD and LD/ST pairs will be merged into LDX/STX.
+Cause these two tests to fail. To guarantee that these two tests pass,
+add the compilation option '-fno-late-combine-instructions'.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/explicit-relocs-extreme-tls-desc.c:
+	Add compilation options '-fno-late-combine-instructions'.
+	* gcc.target/loongarch/explicit-relocs-tls-desc.c: Likewise.
+---
+ .../gcc.target/loongarch/explicit-relocs-extreme-tls-desc.c     | 2 +-
+ gcc/testsuite/gcc.target/loongarch/explicit-relocs-tls-desc.c   | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-tls-desc.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-tls-desc.c
+index 3797556e1..e9eb0d6f7 100644
+--- a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-tls-desc.c
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-extreme-tls-desc.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fPIC -mexplicit-relocs -mtls-dialect=desc -mcmodel=extreme" } */
++/* { dg-options "-O2 -fPIC -mexplicit-relocs -mtls-dialect=desc -mcmodel=extreme -fno-late-combine-instructions" } */
+ 
+ __thread int a __attribute__((visibility("hidden")));
+ extern __thread int b __attribute__((visibility("default")));
+diff --git a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-tls-desc.c b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-tls-desc.c
+index f66903091..fed478458 100644
+--- a/gcc/testsuite/gcc.target/loongarch/explicit-relocs-tls-desc.c
++++ b/gcc/testsuite/gcc.target/loongarch/explicit-relocs-tls-desc.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fPIC -mexplicit-relocs -mtls-dialect=desc" } */
++/* { dg-options "-O2 -fPIC -mexplicit-relocs -mtls-dialect=desc -fno-late-combine-instructions" } */
+ 
+ __thread int a __attribute__((visibility("hidden")));
+ extern __thread int b __attribute__((visibility("default")));
+-- 
+2.43.0
+
diff --git a/SME-0070-aarch64-Fix-plugin-header-install.patch b/0173-Backport-SME-aarch64-Fix-plugin-header-install.patch
similarity index 94%
rename from SME-0070-aarch64-Fix-plugin-header-install.patch
rename to 0173-Backport-SME-aarch64-Fix-plugin-header-install.patch
index 34efa3fb2cd7d61348e7e21b3eea1d131a458f53..4bea8a5d72b93f55f28a742894bd0a8861e84442 100644
--- a/SME-0070-aarch64-Fix-plugin-header-install.patch
+++ b/0173-Backport-SME-aarch64-Fix-plugin-header-install.patch
@@ -1,7 +1,7 @@
-From 2a1a11b4a0a13d3ff874f0be89e876d0d75ada82 Mon Sep 17 00:00:00 2001
+From b1025ef48bff0622e54822dc0974f38748e9109f Mon Sep 17 00:00:00 2001
 From: Jakub Jelinek 
 Date: Thu, 22 Dec 2022 11:15:47 +0100
-Subject: [PATCH 070/144] aarch64: Fix plugin header install
+Subject: [PATCH 074/157] [Backport][SME] aarch64: Fix plugin header install
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=5b30e9bc211fede06cf85b54e466012540bef14d
 
@@ -60,5 +60,5 @@ index ba74abc0a..6a21a248f 100644
  $(srcdir)/config/aarch64/aarch64-tune.md: s-aarch64-tune-md; @true
  s-aarch64-tune-md: $(srcdir)/config/aarch64/gentune.sh \
 -- 
-2.19.1
+2.33.0
 
diff --git a/0173-LoongArch-Define-loongarch_insn_cost-and-set-the-cos.patch b/0173-LoongArch-Define-loongarch_insn_cost-and-set-the-cos.patch
new file mode 100644
index 0000000000000000000000000000000000000000..46b78b8dc39dc1af904b7aa3db9ea39b9d6acec6
--- /dev/null
+++ b/0173-LoongArch-Define-loongarch_insn_cost-and-set-the-cos.patch
@@ -0,0 +1,70 @@
+From 727b1a2cff9cecd904545895bbf39a89fbf1ea4f Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 28 Jun 2024 15:09:48 +0800
+Subject: [PATCH 173/188] LoongArch: Define loongarch_insn_cost and set the
+ cost of movcf2gr and movgr2cf.
+
+The following two FAIL items have been fixed:
+
+FAIL: gcc.target/loongarch/movcf2gr-via-fr.c scan-assembler movcf2fr\\t\\\\\$f[0-9]+,\\\\\$fcc
+FAIL: gcc.target/loongarch/movcf2gr-via-fr.c scan-assembler movfr2gr\\\\.s\\t\\\\\$r4
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_insn_cost):
+	New function.
+	(TARGET_INSN_COST): New macro.
+---
+ gcc/config/loongarch/loongarch.cc | 29 +++++++++++++++++++++++++++++
+ 1 file changed, 29 insertions(+)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 35524b5da..958e82b86 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4369,6 +4369,33 @@ loongarch_address_cost (rtx addr, machine_mode mode,
+   return loongarch_address_insns (addr, mode, false);
+ }
+ 
++/* Implement TARGET_INSN_COST.  */
++
++static int
++loongarch_insn_cost (rtx_insn *insn, bool speed)
++{
++  rtx x = PATTERN (insn);
++  int cost = pattern_cost (x, speed);
++
++  /* On LA464, prevent movcf2fr and movfr2gr from merging into movcf2gr.  */
++  if (GET_CODE (x) == SET
++      && GET_MODE (XEXP (x, 0)) == FCCmode)
++    {
++      rtx dest, src;
++      dest = XEXP (x, 0);
++      src = XEXP (x, 1);
++
++      if (REG_P (dest) && REG_P (src))
++	{
++	  if (GP_REG_P (REGNO (dest)) && FCC_REG_P (REGNO (src)))
++	    cost = loongarch_cost->movcf2gr;
++	  else if (FCC_REG_P (REGNO (dest)) && GP_REG_P (REGNO (src)))
++	    cost = loongarch_cost->movgr2cf;
++	}
++    }
++  return cost;
++}
++
+ /* Return one word of double-word value OP, taking into account the fixed
+    endianness of certain registers.  HIGH_P is true to select the high part,
+    false to select the low part.  */
+@@ -11089,6 +11116,8 @@ loongarch_asm_code_end (void)
+ #define TARGET_RTX_COSTS loongarch_rtx_costs
+ #undef TARGET_ADDRESS_COST
+ #define TARGET_ADDRESS_COST loongarch_address_cost
++#undef TARGET_INSN_COST
++#define TARGET_INSN_COST loongarch_insn_cost
+ #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
+ #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
+   loongarch_builtin_vectorization_cost
+-- 
+2.43.0
+
diff --git a/SME-0071-aarch64-Add-arm_streaming-_compatible-attributes.patch b/0174-Backport-SME-aarch64-Add-arm_streaming-_compatible-a.patch
similarity index 96%
rename from SME-0071-aarch64-Add-arm_streaming-_compatible-attributes.patch
rename to 0174-Backport-SME-aarch64-Add-arm_streaming-_compatible-a.patch
index 15011f567202aea651f1efc8d14084a44b5c7b82..381f4ce1dcc8a645f7d7acc99a02fc3de8e14701 100644
--- a/SME-0071-aarch64-Add-arm_streaming-_compatible-attributes.patch
+++ b/0174-Backport-SME-aarch64-Add-arm_streaming-_compatible-a.patch
@@ -1,7 +1,8 @@
-From 4e06aa48dae93ac341fa25d9f50550aaff061b42 Mon Sep 17 00:00:00 2001
+From 70b732b4518dd0e44b9e6bfaaad78492b8db8f29 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:23 +0000
-Subject: [PATCH 071/144] aarch64: Add arm_streaming(_compatible) attributes
+Subject: [PATCH 075/157] [Backport][SME] aarch64: Add
+ arm_streaming(_compatible) attributes
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=2c9a54b4238308b127c3b60b01a591363131e7db
 
@@ -210,10 +211,10 @@ index 14a568140..9b03410dc 100644
  enum aarch64_symbol_type aarch64_classify_tls_symbol (rtx);
  enum reg_class aarch64_regno_regclass (unsigned);
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index dad362479..d44fcb98b 100644
+index 114252a3c..904166b21 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -2768,8 +2768,18 @@ handle_aarch64_vector_pcs_attribute (tree *node, tree name, tree,
+@@ -2985,8 +2985,18 @@ handle_aarch64_vector_pcs_attribute (tree *node, tree name, tree,
    gcc_unreachable ();
  }
  
@@ -233,7 +234,7 @@ index dad362479..d44fcb98b 100644
  {
    /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
         affects_type_identity, handler, exclude } */
-@@ -2781,7 +2791,31 @@ TARGET_GNU_ATTRIBUTES (aarch64_attribute_table,
+@@ -2998,7 +3008,31 @@ TARGET_GNU_ATTRIBUTES (aarch64_attribute_table,
    { "Advanced SIMD type", 1, 1, false, true,  false, true,  NULL, NULL },
    { "SVE type",		  3, 3, false, true,  false, true,  NULL, NULL },
    { "SVE sizeless type",  0, 0, false, true,  false, true,  NULL, NULL }
@@ -266,7 +267,7 @@ index dad362479..d44fcb98b 100644
  
  /* An ISA extension in the co-processor and main instruction set space.  */
  struct aarch64_option_extension
-@@ -4084,6 +4118,48 @@ aarch64_fntype_abi (const_tree fntype)
+@@ -4301,6 +4335,48 @@ aarch64_fntype_abi (const_tree fntype)
    return default_function_abi;
  }
  
@@ -315,7 +316,7 @@ index dad362479..d44fcb98b 100644
  /* Implement TARGET_COMPATIBLE_VECTOR_TYPES_P.  */
  
  static bool
-@@ -4146,17 +4222,46 @@ aarch64_reg_save_mode (unsigned int regno)
+@@ -4363,17 +4439,46 @@ aarch64_reg_save_mode (unsigned int regno)
    gcc_unreachable ();
  }
  
@@ -366,7 +367,7 @@ index dad362479..d44fcb98b 100644
  }
  
  /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED.  The callee only saves
-@@ -7900,7 +8005,7 @@ aarch64_function_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
+@@ -8117,7 +8222,7 @@ aarch64_function_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
  	      || pcum->pcs_variant == ARM_PCS_SVE);
  
    if (arg.end_marker_p ())
@@ -375,7 +376,7 @@ index dad362479..d44fcb98b 100644
  
    aarch64_layout_arg (pcum_v, arg);
    return pcum->aapcs_reg;
-@@ -7921,9 +8026,15 @@ aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
+@@ -8138,9 +8243,15 @@ aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
    pcum->aapcs_nextnvrn = 0;
    pcum->aapcs_nextnprn = 0;
    if (fntype)
@@ -393,7 +394,7 @@ index dad362479..d44fcb98b 100644
    pcum->aapcs_reg = NULL_RTX;
    pcum->aapcs_arg_processed = false;
    pcum->aapcs_stack_words = 0;
-@@ -10410,7 +10521,9 @@ aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
+@@ -10627,7 +10738,9 @@ aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
      }
    funexp = XEXP (DECL_RTL (function), 0);
    funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
@@ -404,7 +405,7 @@ index dad362479..d44fcb98b 100644
    insn = emit_call_insn (gen_sibcall (funexp, const0_rtx, callee_abi));
    SIBLING_CALL_P (insn) = 1;
  
-@@ -18386,6 +18499,7 @@ aarch64_override_options (void)
+@@ -18618,6 +18731,7 @@ aarch64_override_options (void)
    SUBTARGET_OVERRIDE_OPTIONS;
  #endif
  
@@ -412,7 +413,7 @@ index dad362479..d44fcb98b 100644
    if (cpu && arch)
      {
        /* If both -mcpu and -march are specified, warn if they are not
-@@ -18398,25 +18512,25 @@ aarch64_override_options (void)
+@@ -18630,25 +18744,25 @@ aarch64_override_options (void)
  	}
  
        selected_arch = arch->arch;
@@ -442,7 +443,7 @@ index dad362479..d44fcb98b 100644
      }
  
    selected_tune = tune ? tune->ident : cpu->ident;
-@@ -18589,6 +18703,21 @@ aarch64_save_restore_target_globals (tree new_tree)
+@@ -18821,6 +18935,21 @@ aarch64_save_restore_target_globals (tree new_tree)
      TREE_TARGET_GLOBALS (new_tree) = save_target_globals_default_opts ();
  }
  
@@ -464,7 +465,7 @@ index dad362479..d44fcb98b 100644
  /* Implement TARGET_SET_CURRENT_FUNCTION.  Unpack the codegen decisions
     like tuning and ISA features from the DECL_FUNCTION_SPECIFIC_TARGET
     of the function, if such exists.  This function may be called multiple
-@@ -18598,25 +18727,24 @@ aarch64_save_restore_target_globals (tree new_tree)
+@@ -18830,25 +18959,24 @@ aarch64_save_restore_target_globals (tree new_tree)
  static void
  aarch64_set_current_function (tree fndecl)
  {
@@ -503,7 +504,7 @@ index dad362479..d44fcb98b 100644
  
    aarch64_previous_fndecl = fndecl;
  
-@@ -18624,7 +18752,28 @@ aarch64_set_current_function (tree fndecl)
+@@ -18856,7 +18984,28 @@ aarch64_set_current_function (tree fndecl)
    cl_target_option_restore (&global_options, &global_options_set,
  			    TREE_TARGET_OPTION (new_tree));
  
@@ -532,7 +533,7 @@ index dad362479..d44fcb98b 100644
  }
  
  /* Enum describing the various ways we can handle attributes.
-@@ -18674,7 +18823,7 @@ aarch64_handle_attr_arch (const char *str)
+@@ -18906,7 +19055,7 @@ aarch64_handle_attr_arch (const char *str)
      {
        gcc_assert (tmp_arch);
        selected_arch = tmp_arch->arch;
@@ -541,7 +542,7 @@ index dad362479..d44fcb98b 100644
        return true;
      }
  
-@@ -18715,7 +18864,7 @@ aarch64_handle_attr_cpu (const char *str)
+@@ -18947,7 +19096,7 @@ aarch64_handle_attr_cpu (const char *str)
        gcc_assert (tmp_cpu);
        selected_tune = tmp_cpu->ident;
        selected_arch = tmp_cpu->arch;
@@ -550,7 +551,7 @@ index dad362479..d44fcb98b 100644
        return true;
      }
  
-@@ -18815,7 +18964,7 @@ aarch64_handle_attr_isa_flags (char *str)
+@@ -19047,7 +19196,7 @@ aarch64_handle_attr_isa_flags (char *str)
       features if the user wants to handpick specific features.  */
    if (strncmp ("+nothing", str, 8) == 0)
      {
@@ -559,7 +560,7 @@ index dad362479..d44fcb98b 100644
        str += 8;
      }
  
-@@ -19320,7 +19469,7 @@ aarch64_can_inline_p (tree caller, tree callee)
+@@ -19552,7 +19701,7 @@ aarch64_can_inline_p (tree caller, tree callee)
  /* Return the ID of the TLDESC ABI, initializing the descriptor if hasn't
     been already.  */
  
@@ -568,7 +569,7 @@ index dad362479..d44fcb98b 100644
  aarch64_tlsdesc_abi_id ()
  {
    predefined_function_abi &tlsdesc_abi = function_abis[ARM_PCS_TLSDESC];
-@@ -19334,7 +19483,7 @@ aarch64_tlsdesc_abi_id ()
+@@ -19566,7 +19715,7 @@ aarch64_tlsdesc_abi_id ()
  	SET_HARD_REG_BIT (full_reg_clobbers, regno);
        tlsdesc_abi.initialize (ARM_PCS_TLSDESC, full_reg_clobbers);
      }
@@ -577,7 +578,7 @@ index dad362479..d44fcb98b 100644
  }
  
  /* Return true if SYMBOL_REF X binds locally.  */
-@@ -27033,22 +27182,26 @@ aarch64_simd_clone_usable (struct cgraph_node *node)
+@@ -27270,22 +27419,26 @@ aarch64_simd_clone_usable (struct cgraph_node *node)
  static int
  aarch64_comp_type_attributes (const_tree type1, const_tree type2)
  {
@@ -674,10 +675,10 @@ index 19b82b4f3..84215c8c3 100644
    int aapcs_ncrn;		/* Next Core register number.  */
    int aapcs_nextncrn;		/* Next next core register number.  */
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 92ce5085a..7c7633770 100644
+index c0977a3da..29a665e45 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -7015,7 +7015,8 @@
+@@ -7017,7 +7017,8 @@
    {
      if (TARGET_SVE)
        {
@@ -1173,5 +1174,5 @@ index 000000000..e8be0f821
 +  ns_fn_ptr = ns_fn;
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/0174-LoongArch-TFmode-is-not-allowed-to-be-stored-in-the-.patch b/0174-LoongArch-TFmode-is-not-allowed-to-be-stored-in-the-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6f728f90b922d35a1262ddc043268df5ce339622
--- /dev/null
+++ b/0174-LoongArch-TFmode-is-not-allowed-to-be-stored-in-the-.patch
@@ -0,0 +1,73 @@
+From b6b917847a25afbaba9983e80b62b69ed3ce3983 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 4 Jul 2024 10:37:26 +0800
+Subject: [PATCH 174/188] LoongArch: TFmode is not allowed to be stored in the
+ float register.
+
+	PR target/115752
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc
+	(loongarch_hard_regno_mode_ok_uncached): Replace
+	UNITS_PER_FPVALUE with UNITS_PER_HWFPVALUE.
+	* config/loongarch/loongarch.h (UNITS_PER_FPVALUE): Delete.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/pr115752.c: New test.
+---
+ gcc/config/loongarch/loongarch.cc             | 2 +-
+ gcc/config/loongarch/loongarch.h              | 7 -------
+ gcc/testsuite/gcc.target/loongarch/pr115752.c | 8 ++++++++
+ 3 files changed, 9 insertions(+), 8 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/pr115752.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 958e82b86..b78512e0e 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -6760,7 +6760,7 @@ loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode)
+       if (mclass == MODE_FLOAT
+ 	  || mclass == MODE_COMPLEX_FLOAT
+ 	  || mclass == MODE_VECTOR_FLOAT)
+-	return size <= UNITS_PER_FPVALUE;
++	return size <= UNITS_PER_HWFPVALUE;
+ 
+       /* Allow integer modes that fit into a single register.  We need
+ 	 to put integers into FPRs when using instructions like CVT
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 6743d2684..a23dabde1 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -146,13 +146,6 @@ along with GCC; see the file COPYING3.  If not see
+ #define UNITS_PER_HWFPVALUE \
+   (TARGET_SOFT_FLOAT ? 0 : UNITS_PER_FP_REG)
+ 
+-/* The largest size of value that can be held in floating-point
+-   registers.  */
+-#define UNITS_PER_FPVALUE \
+-  (TARGET_SOFT_FLOAT ? 0 \
+-   : TARGET_SINGLE_FLOAT ? UNITS_PER_FP_REG \
+-			 : LONG_DOUBLE_TYPE_SIZE / BITS_PER_UNIT)
+-
+ /* The number of bytes in a double.  */
+ #define UNITS_PER_DOUBLE (TYPE_PRECISION (double_type_node) / BITS_PER_UNIT)
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/pr115752.c b/gcc/testsuite/gcc.target/loongarch/pr115752.c
+new file mode 100644
+index 000000000..df4bae524
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/pr115752.c
+@@ -0,0 +1,8 @@
++/* { dg-do compile } */
++
++long double
++test (long double xx)
++{
++   __asm ("" :: "f"(xx)); /* { dg-error "inconsistent operand constraints in an 'asm'" } */
++   return xx + 1;
++}
+-- 
+2.43.0
+
diff --git a/SME-0072-aarch64-Add-sme.patch b/0175-Backport-SME-aarch64-Add-sme.patch
similarity index 95%
rename from SME-0072-aarch64-Add-sme.patch
rename to 0175-Backport-SME-aarch64-Add-sme.patch
index 8b3964da511f6ec5c1487462a141e6c644aa9588..fc3ef348e045c7825d3228a88ac3ea045293a268 100644
--- a/SME-0072-aarch64-Add-sme.patch
+++ b/0175-Backport-SME-aarch64-Add-sme.patch
@@ -1,7 +1,7 @@
-From 6ee4fd41340e0cfd894839ec9ef51c76bd0d59a3 Mon Sep 17 00:00:00 2001
+From c097d9ffc7dd8f90f78eb3b994f3691f4c8f812d Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:23 +0000
-Subject: [PATCH 072/144] aarch64: Add +sme
+Subject: [PATCH 076/157] [Backport][SME] aarch64: Add +sme
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=7e04bd1fadf3410c3d24b56f650a52ff53d01a3c
 
@@ -53,10 +53,10 @@ index bdf4baf30..faee64a79 100644
 +
  #undef AARCH64_OPT_EXTENSION
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index d44fcb98b..042e8002f 100644
+index 904166b21..8f8395201 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -11431,6 +11431,23 @@ aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
+@@ -11648,6 +11648,23 @@ aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
    return true;
  }
  
@@ -80,7 +80,7 @@ index d44fcb98b..042e8002f 100644
  /* This function is used by the call expanders of the machine description.
     RESULT is the register in which the result is returned.  It's NULL for
     "call" and "sibcall".
-@@ -17962,6 +17979,19 @@ aarch64_override_options_internal (struct gcc_options *opts)
+@@ -18194,6 +18211,19 @@ aarch64_override_options_internal (struct gcc_options *opts)
        && !fixed_regs[R18_REGNUM])
      error ("%<-fsanitize=shadow-call-stack%> requires %<-ffixed-x18%>");
  
@@ -100,7 +100,7 @@ index d44fcb98b..042e8002f 100644
    initialize_aarch64_code_model (opts);
    initialize_aarch64_tls_size (opts);
  
-@@ -27836,6 +27866,9 @@ aarch64_run_selftests (void)
+@@ -28159,6 +28189,9 @@ aarch64_get_v16qi_mode ()
  #undef TARGET_FUNCTION_VALUE_REGNO_P
  #define TARGET_FUNCTION_VALUE_REGNO_P aarch64_function_value_regno_p
  
@@ -134,10 +134,10 @@ index 84215c8c3..dd2de4e88 100644
  #define TARGET_ARMV8_3	(AARCH64_ISA_V8_3A)
  
 diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
-index e65d4f991..3a65affce 100644
+index 53709b246..2420b05d9 100644
 --- a/gcc/doc/invoke.texi
 +++ b/gcc/doc/invoke.texi
-@@ -19474,6 +19474,8 @@ Enable the instructions to accelerate memory operations like @code{memcpy},
+@@ -19478,6 +19478,8 @@ Enable the instructions to accelerate memory operations like @code{memcpy},
  Enable the Flag Manipulation instructions Extension.
  @item pauth
  Enable the Pointer Authentication Extension.
@@ -147,7 +147,7 @@ index e65d4f991..3a65affce 100644
  @end table
  
 diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
-index 71c04841d..cc426beb4 100644
+index 454fae11a..80936a0eb 100644
 --- a/gcc/doc/sourcebuild.texi
 +++ b/gcc/doc/sourcebuild.texi
 @@ -2277,6 +2277,8 @@ AArch64 target which generates instruction sequences for big endian.
@@ -303,10 +303,10 @@ index 000000000..50e92f2e1
 +void s_e () [[arm::streaming]];
 +void ns_e ();
 diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
-index c858bd93b..6016a0052 100644
+index bd89d4f52..e2a9ef5fa 100644
 --- a/gcc/testsuite/lib/target-supports.exp
 +++ b/gcc/testsuite/lib/target-supports.exp
-@@ -3896,6 +3896,18 @@ proc aarch64_sve_bits { } {
+@@ -3887,6 +3887,18 @@ proc aarch64_sve_bits { } {
      }]
  }
  
@@ -326,5 +326,5 @@ index c858bd93b..6016a0052 100644
  proc check_effective_target_arc_atomic { } {
      return [check_no_compiler_messages arc_atomic assembly {
 -- 
-2.19.1
+2.33.0
 
diff --git a/0175-LoongArch-Remove-unreachable-codes.patch b/0175-LoongArch-Remove-unreachable-codes.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c4a674a4e082f89fa9a209844105179dc8b3b53d
--- /dev/null
+++ b/0175-LoongArch-Remove-unreachable-codes.patch
@@ -0,0 +1,249 @@
+From a75609d90f506709e4af26701aa4fb6adce00700 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 4 Jul 2024 15:00:40 +0800
+Subject: [PATCH 175/188] LoongArch: Remove unreachable codes.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc
+	(loongarch_split_move): Delete.
+	(loongarch_hard_regno_mode_ok_uncached): Likewise.
+	* config/loongarch/loongarch.md
+	(move_doubleword_fpr): Likewise.
+	(load_low): Likewise.
+	(load_high): Likewise.
+	(store_word): Likewise.
+	(movgr2frh): Likewise.
+	(movfrh2gr): Likewise.
+---
+ gcc/config/loongarch/loongarch.cc |  47 +++----------
+ gcc/config/loongarch/loongarch.md | 109 ------------------------------
+ 2 files changed, 8 insertions(+), 148 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index b78512e0e..260dd7b5f 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4459,42 +4459,13 @@ loongarch_split_move_p (rtx dest, rtx src)
+ void
+ loongarch_split_move (rtx dest, rtx src)
+ {
+-  rtx low_dest;
+-
+   gcc_checking_assert (loongarch_split_move_p (dest, src));
+   if (LSX_SUPPORTED_MODE_P (GET_MODE (dest)))
+     loongarch_split_128bit_move (dest, src);
+   else if (LASX_SUPPORTED_MODE_P (GET_MODE (dest)))
+     loongarch_split_256bit_move (dest, src);
+-  else if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
+-    {
+-      if (!TARGET_64BIT && GET_MODE (dest) == DImode)
+-	emit_insn (gen_move_doubleword_fprdi (dest, src));
+-      else if (!TARGET_64BIT && GET_MODE (dest) == DFmode)
+-	emit_insn (gen_move_doubleword_fprdf (dest, src));
+-      else if (TARGET_64BIT && GET_MODE (dest) == TFmode)
+-	emit_insn (gen_move_doubleword_fprtf (dest, src));
+-      else
+-	gcc_unreachable ();
+-    }
+   else
+-    {
+-      /* The operation can be split into two normal moves.  Decide in
+-	 which order to do them.  */
+-      low_dest = loongarch_subword (dest, false);
+-      if (REG_P (low_dest) && reg_overlap_mentioned_p (low_dest, src))
+-	{
+-	  loongarch_emit_move (loongarch_subword (dest, true),
+-			       loongarch_subword (src, true));
+-	  loongarch_emit_move (low_dest, loongarch_subword (src, false));
+-	}
+-      else
+-	{
+-	  loongarch_emit_move (low_dest, loongarch_subword (src, false));
+-	  loongarch_emit_move (loongarch_subword (dest, true),
+-			       loongarch_subword (src, true));
+-	}
+-    }
++    gcc_unreachable ();
+ }
+ 
+ /* Check if adding an integer constant value for a specific mode can be
+@@ -6743,20 +6714,18 @@ loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode)
+   size = GET_MODE_SIZE (mode);
+   mclass = GET_MODE_CLASS (mode);
+ 
+-  if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode)
++  if (GP_REG_P (regno)
++      && !LSX_SUPPORTED_MODE_P (mode)
+       && !LASX_SUPPORTED_MODE_P (mode))
+     return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
+ 
+-  /* For LSX, allow TImode and 128-bit vector modes in all FPR.  */
+-  if (FP_REG_P (regno) && LSX_SUPPORTED_MODE_P (mode))
+-    return true;
+-
+-  /* FIXED ME: For LASX, allow TImode and 256-bit vector modes in all FPR.  */
+-  if (FP_REG_P (regno) && LASX_SUPPORTED_MODE_P (mode))
+-    return true;
+-
+   if (FP_REG_P (regno))
+     {
++      /* Allow 128-bit or 256-bit vector modes in all FPR.  */
++      if (LSX_SUPPORTED_MODE_P (mode)
++	  || LASX_SUPPORTED_MODE_P (mode))
++	return true;
++
+       if (mclass == MODE_FLOAT
+ 	  || mclass == MODE_COMPLEX_FLOAT
+ 	  || mclass == MODE_VECTOR_FLOAT)
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 55a759850..16f9f37c8 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -400,9 +400,6 @@
+ ;; 64-bit modes for which we provide move patterns.
+ (define_mode_iterator MOVE64 [DI DF])
+ 
+-;; 128-bit modes for which we provide move patterns on 64-bit targets.
+-(define_mode_iterator MOVE128 [TI TF])
+-
+ ;; Iterator for sub-32-bit integer modes.
+ (define_mode_iterator SHORT [QI HI])
+ 
+@@ -421,12 +418,6 @@
+ (define_mode_iterator ANYFI [(SI "TARGET_HARD_FLOAT")
+ 			     (DI "TARGET_DOUBLE_FLOAT")])
+ 
+-;; A mode for which moves involving FPRs may need to be split.
+-(define_mode_iterator SPLITF
+-  [(DF "!TARGET_64BIT && TARGET_DOUBLE_FLOAT")
+-   (DI "!TARGET_64BIT && TARGET_DOUBLE_FLOAT")
+-   (TF "TARGET_64BIT && TARGET_DOUBLE_FLOAT")])
+-
+ ;; A mode for anything with 32 bits or more, and able to be loaded with
+ ;; the same addressing mode as ld.w.
+ (define_mode_iterator LD_AT_LEAST_32_BIT [GPR ANYF])
+@@ -2421,41 +2412,6 @@
+   [(set_attr "move_type" "move,load,store")
+    (set_attr "mode" "DF")])
+ 
+-;; Emit a doubleword move in which exactly one of the operands is
+-;; a floating-point register.  We can't just emit two normal moves
+-;; because of the constraints imposed by the FPU register model;
+-;; see loongarch_can_change_mode_class for details.  Instead, we keep
+-;; the FPR whole and use special patterns to refer to each word of
+-;; the other operand.
+-
+-(define_expand "move_doubleword_fpr"
+-  [(set (match_operand:SPLITF 0)
+-	(match_operand:SPLITF 1))]
+-  ""
+-{
+-  if (FP_REG_RTX_P (operands[0]))
+-    {
+-      rtx low = loongarch_subword (operands[1], 0);
+-      rtx high = loongarch_subword (operands[1], 1);
+-      emit_insn (gen_load_low (operands[0], low));
+-      if (!TARGET_64BIT)
+-       emit_insn (gen_movgr2frh (operands[0], high, operands[0]));
+-      else
+-       emit_insn (gen_load_high (operands[0], high, operands[0]));
+-    }
+-  else
+-    {
+-      rtx low = loongarch_subword (operands[0], 0);
+-      rtx high = loongarch_subword (operands[0], 1);
+-      emit_insn (gen_store_word (low, operands[1], const0_rtx));
+-      if (!TARGET_64BIT)
+-       emit_insn (gen_movfrh2gr (high, operands[1]));
+-      else
+-       emit_insn (gen_store_word (high, operands[1], const1_rtx));
+-    }
+-  DONE;
+-})
+-
+ ;; Clear one FCC register
+ 
+ (define_expand "movfcc"
+@@ -2742,49 +2698,6 @@
+   [(set_attr "type" "fcvt")
+    (set_attr "mode" "")])
+ 
+-;; Load the low word of operand 0 with operand 1.
+-(define_insn "load_low"
+-  [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
+-	(unspec:SPLITF [(match_operand: 1 "general_operand" "rJ,m")]
+-		       UNSPEC_LOAD_LOW))]
+-  "TARGET_HARD_FLOAT"
+-{
+-  operands[0] = loongarch_subword (operands[0], 0);
+-  return loongarch_output_move (operands[0], operands[1]);
+-}
+-  [(set_attr "move_type" "mgtf,fpload")
+-   (set_attr "mode" "")])
+-
+-;; Load the high word of operand 0 from operand 1, preserving the value
+-;; in the low word.
+-(define_insn "load_high"
+-  [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
+-	(unspec:SPLITF [(match_operand: 1 "general_operand" "rJ,m")
+-			(match_operand:SPLITF 2 "register_operand" "0,0")]
+-		       UNSPEC_LOAD_HIGH))]
+-  "TARGET_HARD_FLOAT"
+-{
+-  operands[0] = loongarch_subword (operands[0], 1);
+-  return loongarch_output_move (operands[0], operands[1]);
+-}
+-  [(set_attr "move_type" "mgtf,fpload")
+-   (set_attr "mode" "")])
+-
+-;; Store one word of operand 1 in operand 0.  Operand 2 is 1 to store the
+-;; high word and 0 to store the low word.
+-(define_insn "store_word"
+-  [(set (match_operand: 0 "nonimmediate_operand" "=r,m")
+-	(unspec: [(match_operand:SPLITF 1 "register_operand" "f,f")
+-			    (match_operand 2 "const_int_operand")]
+-			   UNSPEC_STORE_WORD))]
+-  "TARGET_HARD_FLOAT"
+-{
+-  operands[1] = loongarch_subword (operands[1], INTVAL (operands[2]));
+-  return loongarch_output_move (operands[0], operands[1]);
+-}
+-  [(set_attr "move_type" "mftg,fpstore")
+-   (set_attr "mode" "")])
+-
+ ;; Thread-Local Storage
+ 
+ (define_insn "@got_load_tls_desc"
+@@ -2876,28 +2789,6 @@
+ 	(const_int 4)
+ 	(const_int 2)))])
+ 
+-;; Move operand 1 to the high word of operand 0 using movgr2frh.w, preserving the
+-;; value in the low word.
+-(define_insn "movgr2frh"
+-  [(set (match_operand:SPLITF 0 "register_operand" "=f")
+-	(unspec:SPLITF [(match_operand: 1 "reg_or_0_operand" "rJ")
+-			(match_operand:SPLITF 2 "register_operand" "0")]
+-			UNSPEC_MOVGR2FRH))]
+-  "TARGET_DOUBLE_FLOAT"
+-  "movgr2frh.w\t%z1,%0"
+-  [(set_attr "move_type" "mgtf")
+-   (set_attr "mode" "")])
+-
+-;; Move high word of operand 1 to operand 0 using movfrh2gr.s.
+-(define_insn "movfrh2gr"
+-  [(set (match_operand: 0 "register_operand" "=r")
+-	(unspec: [(match_operand:SPLITF 1 "register_operand" "f")]
+-			    UNSPEC_MOVFRH2GR))]
+-  "TARGET_DOUBLE_FLOAT"
+-  "movfrh2gr.s\t%0,%1"
+-  [(set_attr "move_type" "mftg")
+-   (set_attr "mode" "")])
+-
+ 
+ ;; Expand in-line code to clear the instruction cache between operand[0] and
+ ;; operand[1].
+-- 
+2.43.0
+
diff --git a/SME-0073-aarch64-Add-r-m-and-m-r-alternatives-to-64-bit-vecto.patch b/0176-Backport-SME-aarch64-Add-r-m-and-m-r-alternatives-to.patch
similarity index 96%
rename from SME-0073-aarch64-Add-r-m-and-m-r-alternatives-to-64-bit-vecto.patch
rename to 0176-Backport-SME-aarch64-Add-r-m-and-m-r-alternatives-to.patch
index 029ed634f897dd893a8cec857797c797c9a80f92..3bd87e976b12237f21969fc9b821f94585c055a7 100644
--- a/SME-0073-aarch64-Add-r-m-and-m-r-alternatives-to-64-bit-vecto.patch
+++ b/0176-Backport-SME-aarch64-Add-r-m-and-m-r-alternatives-to.patch
@@ -1,8 +1,8 @@
-From fa4e2267ec2408f95f750167b3d542eeca288124 Mon Sep 17 00:00:00 2001
+From d8233e19aae2272c4863de5e8d61d49d3147e807 Mon Sep 17 00:00:00 2001
 From: Kyrylo Tkachov 
 Date: Thu, 1 Jun 2023 09:37:06 +0100
-Subject: [PATCH 073/144] aarch64: Add =r,m and =m,r alternatives to 64-bit
- vector move patterns
+Subject: [PATCH 077/157] [Backport][SME] aarch64: Add =r,m and =m,r
+ alternatives to 64-bit vector move patterns
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=12e71b593ea0c64d919df525cd75ea10b7be8a4b
 
@@ -30,7 +30,7 @@ gcc/testsuite/ChangeLog:
  create mode 100644 gcc/testsuite/gcc.target/aarch64/xreg-vec-modes_1.c
 
 diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
-index 845f0298e..a974b374b 100644
+index 2d688edf5..b5c52ba16 100644
 --- a/gcc/config/aarch64/aarch64-simd.md
 +++ b/gcc/config/aarch64/aarch64-simd.md
 @@ -116,26 +116,28 @@
@@ -164,5 +164,5 @@ index 000000000..fc4dcb1ad
 +  b[1] = t2;
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/0176-LoongArch-Organize-the-code-related-to-split-move-an.patch b/0176-LoongArch-Organize-the-code-related-to-split-move-an.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a34163bb34be3ff983a1f4d87c1e6fe4da3967f8
--- /dev/null
+++ b/0176-LoongArch-Organize-the-code-related-to-split-move-an.patch
@@ -0,0 +1,413 @@
+From 95089699271d235efc29ae48b78f8c7f1b6386c4 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 12 Jul 2024 09:57:40 +0800
+Subject: [PATCH 176/188] LoongArch: Organize the code related to split move
+ and merge the same functions.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-protos.h
+	(loongarch_split_128bit_move): Delete.
+	(loongarch_split_128bit_move_p): Delete.
+	(loongarch_split_256bit_move): Delete.
+	(loongarch_split_256bit_move_p): Delete.
+	(loongarch_split_vector_move): Add a function declaration.
+	* config/loongarch/loongarch.cc
+	(loongarch_vector_costs::finish_cost): Adjust the code
+	formatting.
+	(loongarch_split_vector_move_p): Merge
+	loongarch_split_128bit_move_p and loongarch_split_256bit_move_p.
+	(loongarch_split_move_p): Merge code.
+	(loongarch_split_move): Likewise.
+	(loongarch_split_128bit_move_p): Delete.
+	(loongarch_split_256bit_move_p): Delete.
+	(loongarch_split_128bit_move): Delete.
+	(loongarch_split_vector_move): Merge loongarch_split_128bit_move
+	and loongarch_split_256bit_move.
+	(loongarch_split_256bit_move): Delete.
+	(loongarch_global_init): Remove the extra semicolon at the
+	end of the function.
+	* config/loongarch/loongarch.md (*movdf_softfloat):  Added a new
+	condition TARGET_64BIT.
+---
+ gcc/config/loongarch/loongarch-protos.h |   5 +-
+ gcc/config/loongarch/loongarch.cc       | 221 ++++++------------------
+ gcc/config/loongarch/loongarch.md       |   1 +
+ 3 files changed, 58 insertions(+), 169 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 0c31a74b7..abf1a0893 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -85,10 +85,7 @@ extern bool loongarch_split_move_p (rtx, rtx);
+ extern void loongarch_split_move (rtx, rtx);
+ extern bool loongarch_addu16i_imm12_operand_p (HOST_WIDE_INT, machine_mode);
+ extern void loongarch_split_plus_constant (rtx *, machine_mode);
+-extern void loongarch_split_128bit_move (rtx, rtx);
+-extern bool loongarch_split_128bit_move_p (rtx, rtx);
+-extern void loongarch_split_256bit_move (rtx, rtx);
+-extern bool loongarch_split_256bit_move_p (rtx, rtx);
++extern void loongarch_split_vector_move (rtx, rtx);
+ extern const char *loongarch_output_move (rtx, rtx);
+ #ifdef RTX_CODE
+ extern void loongarch_expand_scc (rtx *);
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 260dd7b5f..53bd8d7ec 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4351,10 +4351,10 @@ void
+ loongarch_vector_costs::finish_cost (const vector_costs *scalar_costs)
+ {
+   loop_vec_info loop_vinfo = dyn_cast (m_vinfo);
++
+   if (loop_vinfo)
+-    {
+-      m_suggested_unroll_factor = determine_suggested_unroll_factor (loop_vinfo);
+-    }
++    m_suggested_unroll_factor
++      = determine_suggested_unroll_factor (loop_vinfo);
+ 
+   vector_costs::finish_cost (scalar_costs);
+ }
+@@ -4420,6 +4420,7 @@ loongarch_subword (rtx op, bool high_p)
+   return simplify_gen_subreg (word_mode, op, mode, byte);
+ }
+ 
++static bool loongarch_split_vector_move_p (rtx dest, rtx src);
+ /* Return true if a move from SRC to DEST should be split into two.
+    SPLIT_TYPE describes the split condition.  */
+ 
+@@ -4441,13 +4442,11 @@ loongarch_split_move_p (rtx dest, rtx src)
+ 	return false;
+     }
+ 
+-  /* Check if LSX moves need splitting.  */
+-  if (LSX_SUPPORTED_MODE_P (GET_MODE (dest)))
+-    return loongarch_split_128bit_move_p (dest, src);
+ 
+-  /* Check if LASX moves need splitting.  */
+-  if (LASX_SUPPORTED_MODE_P (GET_MODE (dest)))
+-    return loongarch_split_256bit_move_p (dest, src);
++  /* Check if vector moves need splitting.  */
++  if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))
++      || LASX_SUPPORTED_MODE_P (GET_MODE (dest)))
++    return loongarch_split_vector_move_p (dest, src);
+ 
+   /* Otherwise split all multiword moves.  */
+   return size > UNITS_PER_WORD;
+@@ -4460,10 +4459,9 @@ void
+ loongarch_split_move (rtx dest, rtx src)
+ {
+   gcc_checking_assert (loongarch_split_move_p (dest, src));
+-  if (LSX_SUPPORTED_MODE_P (GET_MODE (dest)))
+-    loongarch_split_128bit_move (dest, src);
+-  else if (LASX_SUPPORTED_MODE_P (GET_MODE (dest)))
+-    loongarch_split_256bit_move (dest, src);
++  if (LSX_SUPPORTED_MODE_P (GET_MODE (dest))
++      || LASX_SUPPORTED_MODE_P (GET_MODE (dest)))
++    loongarch_split_vector_move (dest, src);
+   else
+     gcc_unreachable ();
+ }
+@@ -4585,224 +4583,117 @@ loongarch_output_move_index_float (rtx x, machine_mode mode, bool ldr)
+ 
+   return insn[ldr][index-2];
+ }
+-/* Return true if a 128-bit move from SRC to DEST should be split.  */
+-
+-bool
+-loongarch_split_128bit_move_p (rtx dest, rtx src)
+-{
+-  /* LSX-to-LSX moves can be done in a single instruction.  */
+-  if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
+-    return false;
+-
+-  /* Check for LSX loads and stores.  */
+-  if (FP_REG_RTX_P (dest) && MEM_P (src))
+-    return false;
+-  if (FP_REG_RTX_P (src) && MEM_P (dest))
+-    return false;
+-
+-  /* Check for LSX set to an immediate const vector with valid replicated
+-     element.  */
+-  if (FP_REG_RTX_P (dest)
+-      && loongarch_const_vector_same_int_p (src, GET_MODE (src), -512, 511))
+-    return false;
+-
+-  /* Check for LSX load zero immediate.  */
+-  if (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src)))
+-    return false;
+-
+-  return true;
+-}
+-
+-/* Return true if a 256-bit move from SRC to DEST should be split.  */
++/* Return true if a vector move from SRC to DEST should be split.  */
+ 
+-bool
+-loongarch_split_256bit_move_p (rtx dest, rtx src)
++static bool
++loongarch_split_vector_move_p (rtx dest, rtx src)
+ {
+-  /* LSX-to-LSX moves can be done in a single instruction.  */
++  /* Vector moves can be done in a single instruction.  */
+   if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
+     return false;
+ 
+-  /* Check for LSX loads and stores.  */
++  /* Check for vector loads and stores.  */
+   if (FP_REG_RTX_P (dest) && MEM_P (src))
+     return false;
+   if (FP_REG_RTX_P (src) && MEM_P (dest))
+     return false;
+ 
+-  /* Check for LSX set to an immediate const vector with valid replicated
++  /* Check for vector set to an immediate const vector with valid replicated
+      element.  */
+   if (FP_REG_RTX_P (dest)
+       && loongarch_const_vector_same_int_p (src, GET_MODE (src), -512, 511))
+     return false;
+ 
+-  /* Check for LSX load zero immediate.  */
++  /* Check for vector load zero immediate.  */
+   if (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src)))
+     return false;
+ 
+   return true;
+ }
+ 
+-/* Split a 128-bit move from SRC to DEST.  */
++/* Split a vector move from SRC to DEST.  */
+ 
+ void
+-loongarch_split_128bit_move (rtx dest, rtx src)
++loongarch_split_vector_move (rtx dest, rtx src)
+ {
+   int byte, index;
+-  rtx low_dest, low_src, d, s;
++  rtx s, d;
++  machine_mode mode = GET_MODE (dest);
++  bool lsx_p = LSX_SUPPORTED_MODE_P (mode);
+ 
+   if (FP_REG_RTX_P (dest))
+     {
+       gcc_assert (!MEM_P (src));
+ 
+-      rtx new_dest = dest;
+-      if (!TARGET_64BIT)
+-	{
+-	  if (GET_MODE (dest) != V4SImode)
+-	    new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
+-	}
+-      else
+-	{
+-	  if (GET_MODE (dest) != V2DImode)
+-	    new_dest = simplify_gen_subreg (V2DImode, dest, GET_MODE (dest), 0);
+-	}
+-
+-      for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode);
+-	   byte += UNITS_PER_WORD, index++)
+-	{
+-	  s = loongarch_subword_at_byte (src, byte);
+-	  if (!TARGET_64BIT)
+-	    emit_insn (gen_lsx_vinsgr2vr_w (new_dest, s, new_dest,
+-					    GEN_INT (1 << index)));
+-	  else
+-	    emit_insn (gen_lsx_vinsgr2vr_d (new_dest, s, new_dest,
+-					    GEN_INT (1 << index)));
+-	}
+-    }
+-  else if (FP_REG_RTX_P (src))
+-    {
+-      gcc_assert (!MEM_P (dest));
+-
+-      rtx new_src = src;
+-      if (!TARGET_64BIT)
+-	{
+-	  if (GET_MODE (src) != V4SImode)
+-	    new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0);
+-	}
+-      else
+-	{
+-	  if (GET_MODE (src) != V2DImode)
+-	    new_src = simplify_gen_subreg (V2DImode, src, GET_MODE (src), 0);
+-	}
++      rtx (*gen_vinsgr2vr_d) (rtx, rtx, rtx, rtx);
+ 
+-      for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode);
+-	   byte += UNITS_PER_WORD, index++)
+-	{
+-	  d = loongarch_subword_at_byte (dest, byte);
+-	  if (!TARGET_64BIT)
+-	    emit_insn (gen_lsx_vpickve2gr_w (d, new_src, GEN_INT (index)));
+-	  else
+-	    emit_insn (gen_lsx_vpickve2gr_d (d, new_src, GEN_INT (index)));
+-	}
+-    }
+-  else
+-    {
+-      low_dest = loongarch_subword_at_byte (dest, 0);
+-      low_src = loongarch_subword_at_byte (src, 0);
+-      gcc_assert (REG_P (low_dest) && REG_P (low_src));
+-      /* Make sure the source register is not written before reading.  */
+-      if (REGNO (low_dest) <= REGNO (low_src))
++      if (lsx_p)
+ 	{
+-	  for (byte = 0; byte < GET_MODE_SIZE (TImode);
+-	       byte += UNITS_PER_WORD)
+-	    {
+-	      d = loongarch_subword_at_byte (dest, byte);
+-	      s = loongarch_subword_at_byte (src, byte);
+-	      loongarch_emit_move (d, s);
+-	    }
++	  mode = V2DImode;
++	  gen_vinsgr2vr_d = gen_lsx_vinsgr2vr_d;
+ 	}
+       else
+ 	{
+-	  for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0;
+-	       byte -= UNITS_PER_WORD)
+-	    {
+-	      d = loongarch_subword_at_byte (dest, byte);
+-	      s = loongarch_subword_at_byte (src, byte);
+-	      loongarch_emit_move (d, s);
+-	    }
++	  mode = V4DImode;
++	  gen_vinsgr2vr_d = gen_lasx_xvinsgr2vr_d;
+ 	}
+-    }
+-}
+-
+-/* Split a 256-bit move from SRC to DEST.  */
+-
+-void
+-loongarch_split_256bit_move (rtx dest, rtx src)
+-{
+-  int byte, index;
+-  rtx low_dest, low_src, d, s;
+-
+-  if (FP_REG_RTX_P (dest))
+-    {
+-      gcc_assert (!MEM_P (src));
+ 
+       rtx new_dest = dest;
+-      if (!TARGET_64BIT)
+-	{
+-	  if (GET_MODE (dest) != V8SImode)
+-	    new_dest = simplify_gen_subreg (V8SImode, dest, GET_MODE (dest), 0);
+-	}
+-      else
+-	{
+-	  if (GET_MODE (dest) != V4DImode)
+-	    new_dest = simplify_gen_subreg (V4DImode, dest, GET_MODE (dest), 0);
+-	}
++
++      if (GET_MODE (dest) != mode)
++	new_dest = simplify_gen_subreg (mode, dest, GET_MODE (dest), 0);
+ 
+       for (byte = 0, index = 0; byte < GET_MODE_SIZE (GET_MODE (dest));
+ 	   byte += UNITS_PER_WORD, index++)
+ 	{
+ 	  s = loongarch_subword_at_byte (src, byte);
+-	  if (!TARGET_64BIT)
+-	    emit_insn (gen_lasx_xvinsgr2vr_w (new_dest, s, new_dest,
+-					      GEN_INT (1 << index)));
+-	  else
+-	    emit_insn (gen_lasx_xvinsgr2vr_d (new_dest, s, new_dest,
+-					      GEN_INT (1 << index)));
++	  emit_insn (gen_vinsgr2vr_d (new_dest, s, new_dest,
++					  GEN_INT (1 << index)));
+ 	}
+     }
+   else if (FP_REG_RTX_P (src))
+     {
+       gcc_assert (!MEM_P (dest));
+ 
+-      rtx new_src = src;
+-      if (!TARGET_64BIT)
++      rtx (*gen_vpickve2gr_d) (rtx, rtx, rtx);
++
++      if (lsx_p)
+ 	{
+-	  if (GET_MODE (src) != V8SImode)
+-	    new_src = simplify_gen_subreg (V8SImode, src, GET_MODE (src), 0);
++	  mode = V2DImode;
++	  gen_vpickve2gr_d = gen_lsx_vpickve2gr_d;
+ 	}
+       else
+ 	{
+-	  if (GET_MODE (src) != V4DImode)
+-	    new_src = simplify_gen_subreg (V4DImode, src, GET_MODE (src), 0);
++	  mode = V4DImode;
++	  gen_vpickve2gr_d = gen_lasx_xvpickve2gr_d;
+ 	}
+ 
++      rtx new_src = src;
++      if (GET_MODE (src) != mode)
++	new_src = simplify_gen_subreg (mode, src, GET_MODE (src), 0);
++
+       for (byte = 0, index = 0; byte < GET_MODE_SIZE (GET_MODE (src));
+ 	   byte += UNITS_PER_WORD, index++)
+ 	{
+ 	  d = loongarch_subword_at_byte (dest, byte);
+-	  if (!TARGET_64BIT)
+-	    emit_insn (gen_lsx_vpickve2gr_w (d, new_src, GEN_INT (index)));
+-	  else
+-	    emit_insn (gen_lsx_vpickve2gr_d (d, new_src, GEN_INT (index)));
++	  emit_insn (gen_vpickve2gr_d (d, new_src, GEN_INT (index)));
+ 	}
+     }
+   else
+     {
++      /* This part of the code is designed to handle the following situations:
++	 (set (reg:V2DI 4 $r4)
++	      (reg:V2DI 6 $r6))
++	 The trigger test case is lsx-mov-1.c.  */
++      rtx low_dest, low_src;
++
+       low_dest = loongarch_subword_at_byte (dest, 0);
+       low_src = loongarch_subword_at_byte (src, 0);
+       gcc_assert (REG_P (low_dest) && REG_P (low_src));
+       /* Make sure the source register is not written before reading.  */
+       if (REGNO (low_dest) <= REGNO (low_src))
+ 	{
+-	  for (byte = 0; byte < GET_MODE_SIZE (TImode);
++	  for (byte = 0; byte < GET_MODE_SIZE (GET_MODE (dest));
+ 	       byte += UNITS_PER_WORD)
+ 	    {
+ 	      d = loongarch_subword_at_byte (dest, byte);
+@@ -4812,8 +4703,8 @@ loongarch_split_256bit_move (rtx dest, rtx src)
+ 	}
+       else
+ 	{
+-	  for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0;
+-	       byte -= UNITS_PER_WORD)
++	  for (byte = GET_MODE_SIZE (GET_MODE (dest)) - UNITS_PER_WORD;
++	       byte >= 0; byte -= UNITS_PER_WORD)
+ 	    {
+ 	      d = loongarch_subword_at_byte (dest, byte);
+ 	      s = loongarch_subword_at_byte (src, byte);
+@@ -7603,7 +7494,7 @@ loongarch_global_init (void)
+ 
+   /* Function to allocate machine-dependent function status.  */
+   init_machine_status = &loongarch_init_machine_status;
+-};
++}
+ 
+ static void
+ loongarch_reg_init (void)
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 16f9f37c8..8bcb43042 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -2406,6 +2406,7 @@
+   [(set (match_operand:DF 0 "nonimmediate_operand" "=r,r,m")
+ 	(match_operand:DF 1 "move_operand" "rG,m,rG"))]
+   "(TARGET_SOFT_FLOAT || TARGET_SINGLE_FLOAT)
++   && TARGET_64BIT
+    && (register_operand (operands[0], DFmode)
+        || reg_or_0_operand (operands[1], DFmode))"
+   { return loongarch_output_move (operands[0], operands[1]); }
+-- 
+2.43.0
+
diff --git a/SME-0074-AArch64-Rewrite-simd-move-immediate-patterns-to-new-.patch b/0177-Backport-SME-AArch64-Rewrite-simd-move-immediate-pat.patch
similarity index 96%
rename from SME-0074-AArch64-Rewrite-simd-move-immediate-patterns-to-new-.patch
rename to 0177-Backport-SME-AArch64-Rewrite-simd-move-immediate-pat.patch
index 891e1d3f0e3e92860740d11ac233e5135b0eb408..1e89f070f9bb3df0b71e1389bd3560ea4534ffc6 100644
--- a/SME-0074-AArch64-Rewrite-simd-move-immediate-patterns-to-new-.patch
+++ b/0177-Backport-SME-AArch64-Rewrite-simd-move-immediate-pat.patch
@@ -1,8 +1,8 @@
-From 07c9e5c918695ac8dae3a1e3f269e93ca01a3a92 Mon Sep 17 00:00:00 2001
+From 7d40978965ff893871a79f5f624f54ae02a34a8b Mon Sep 17 00:00:00 2001
 From: Tamar Christina 
 Date: Wed, 18 Oct 2023 09:34:01 +0100
-Subject: [PATCH 074/144] AArch64: Rewrite simd move immediate patterns to new
- syntax
+Subject: [PATCH 078/157] [Backport][SME] AArch64: Rewrite simd move immediate
+ patterns to new syntax
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=04227acbe9e6c60d1e314a6b4f2d949c07f30baa
 
@@ -23,7 +23,7 @@ gcc/ChangeLog:
  1 file changed, 47 insertions(+), 69 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
-index a974b374b..95cdc16f7 100644
+index b5c52ba16..1f4b30642 100644
 --- a/gcc/config/aarch64/aarch64-simd.md
 +++ b/gcc/config/aarch64/aarch64-simd.md
 @@ -115,54 +115,59 @@
@@ -163,5 +163,5 @@ index a974b374b..95cdc16f7 100644
    [(set (match_operand:VQMOV 0)
  	(match_operand:VQMOV 1))]
 -- 
-2.19.1
+2.33.0
 
diff --git a/0177-LoongArch-Expand-some-SImode-operations-through-si3_.patch b/0177-LoongArch-Expand-some-SImode-operations-through-si3_.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a748134ef04b0343ea32d495db878e804677bc07
--- /dev/null
+++ b/0177-LoongArch-Expand-some-SImode-operations-through-si3_.patch
@@ -0,0 +1,364 @@
+From 34c8e935780d43a797e403ca6604115ec393f0e6 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 20 Jul 2024 20:38:13 +0800
+Subject: [PATCH 177/188] LoongArch: Expand some SImode operations through
+ "si3_extend" instructions if TARGET_64BIT
+
+We already had "si3_extend" insns and we hoped the fwprop or combine
+passes can use them to remove unnecessary sign extensions.  But this
+does not always work: for cases like x << 1 | y, the compiler
+tends to do
+
+    (sign_extend:DI
+      (ior:SI (ashift:SI (reg:SI $r4)
+                         (const_int 1))
+              (reg:SI $r5)))
+
+instead of
+
+    (ior:DI (sign_extend:DI (ashift:SI (reg:SI $r4) (const_int 1)))
+            (sign_extend:DI (reg:SI $r5)))
+
+So we cannot match the ashlsi3_extend instruction here and we get:
+
+    slli.w $r4,$r4,1
+    or     $r4,$r5,$r4
+    slli.w $r4,$r4,0    # <= redundant
+    jr	   $r1
+
+To eliminate this redundant extension we need to turn SImode shift etc.
+to DImode "si3_extend" operations earlier, when we expand the SImode
+operation.  We are already doing this for addition, now do it for
+shifts, rotates, substract, multiplication, division, and modulo as
+well.
+
+The bytepick.w definition for TARGET_64BIT needs to be adjusted so it
+won't be undone by the shift expanding.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (optab): Add (rotatert "rotr").
+	(3, 3,
+	sub3, rotr3, mul3): Add a "*" to the insn name
+	so we can redefine the names with define_expand.
+	(*si3_extend): Remove "*" so we can use them
+	in expanders.
+	(*subsi3_extended, *mulsi3_extended): Likewise, also remove the
+	trailing "ed" for consistency.
+	(*si3_extended): Add mode for sign_extend to
+	prevent an ICE using it in expanders.
+	(shift_w, arith_w): New define_code_iterator.
+	(3): New define_expand.  Expand with
+	si3_extend for SImode if TARGET_64BIT.
+	(3): Likewise.
+	(mul3): Expand to mulsi3_extended for SImode if
+	TARGET_64BIT and ISA_HAS_DIV32.
+	(3): Expand to si3_extended
+	for SImode if TARGET_64BIT.
+	(rotl3): Expand to rotrsi3_extend for SImode if
+	TARGET_64BIT.
+	(bytepick_w_): Add mode for lshiftrt and ashift.
+	(bitsize, bytepick_imm, bytepick_w_ashift_amount): New
+	define_mode_attr.
+	(bytepick_w__extend): Adjust for the RTL change
+	caused by 32-bit shift expanding.  Now bytepick_imm only covers
+	2 and 3, separate one remaining case to ...
+	(bytepick_w_1_extend): ... here, new define_insn.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/bitwise_extend.c: New test.
+---
+ gcc/config/loongarch/loongarch.md             | 131 +++++++++++++++---
+ .../gcc.target/loongarch/bitwise_extend.c     |  45 ++++++
+ 2 files changed, 154 insertions(+), 22 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/bitwise_extend.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 8bcb43042..6915dab0e 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -546,6 +546,7 @@
+ (define_code_attr optab [(ashift "ashl")
+ 			 (ashiftrt "ashr")
+ 			 (lshiftrt "lshr")
++			 (rotatert "rotr")
+ 			 (ior "ior")
+ 			 (xor "xor")
+ 			 (and "and")
+@@ -624,6 +625,49 @@
+ 				 (48 "6")
+ 				 (56 "7")])
+ 
++;; Expand some 32-bit operations to si3_extend operations if TARGET_64BIT
++;; so the redundant sign extension can be removed if the output is used as
++;; an input of a bitwise operation.  Note plus, rotl, and div are handled
++;; separately.
++(define_code_iterator shift_w [any_shift rotatert])
++(define_code_iterator arith_w [minus mult])
++
++(define_expand "3"
++  [(set (match_operand:GPR 0 "register_operand" "=r")
++	(shift_w:GPR (match_operand:GPR 1 "register_operand" "r")
++		     (match_operand:SI 2 "arith_operand" "rI")))]
++  ""
++{
++  if (TARGET_64BIT && mode == SImode)
++    {
++      rtx t = gen_reg_rtx (DImode);
++      emit_insn (gen_si3_extend (t, operands[1], operands[2]));
++      t = gen_lowpart (SImode, t);
++      SUBREG_PROMOTED_VAR_P (t) = 1;
++      SUBREG_PROMOTED_SET (t, SRP_SIGNED);
++      emit_move_insn (operands[0], t);
++      DONE;
++    }
++})
++
++(define_expand "3"
++  [(set (match_operand:GPR 0 "register_operand" "=r")
++	(arith_w:GPR (match_operand:GPR 1 "register_operand" "r")
++		     (match_operand:GPR 2 "register_operand" "r")))]
++  ""
++{
++  if (TARGET_64BIT && mode == SImode)
++    {
++      rtx t = gen_reg_rtx (DImode);
++      emit_insn (gen_si3_extend (t, operands[1], operands[2]));
++      t = gen_lowpart (SImode, t);
++      SUBREG_PROMOTED_VAR_P (t) = 1;
++      SUBREG_PROMOTED_SET (t, SRP_SIGNED);
++      emit_move_insn (operands[0], t);
++      DONE;
++    }
++})
++
+ ;;
+ ;;  ....................
+ ;;
+@@ -781,7 +825,7 @@
+   [(set_attr "type" "fadd")
+    (set_attr "mode" "")])
+ 
+-(define_insn "sub3"
++(define_insn "*sub3"
+   [(set (match_operand:GPR 0 "register_operand" "=r")
+ 	(minus:GPR (match_operand:GPR 1 "register_operand" "r")
+ 		   (match_operand:GPR 2 "register_operand" "r")))]
+@@ -791,7 +835,7 @@
+    (set_attr "mode" "")])
+ 
+ 
+-(define_insn "*subsi3_extended"
++(define_insn "subsi3_extend"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+ 	(sign_extend:DI
+ 	    (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
+@@ -818,7 +862,7 @@
+   [(set_attr "type" "fmul")
+    (set_attr "mode" "")])
+ 
+-(define_insn "mul3"
++(define_insn "*mul3"
+   [(set (match_operand:GPR 0 "register_operand" "=r")
+ 	(mult:GPR (match_operand:GPR 1 "register_operand" "r")
+ 		  (match_operand:GPR 2 "register_operand" "r")))]
+@@ -827,7 +871,7 @@
+   [(set_attr "type" "imul")
+    (set_attr "mode" "")])
+ 
+-(define_insn "*mulsi3_extended"
++(define_insn "mulsi3_extend"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+ 	(sign_extend:DI
+ 	    (mult:SI (match_operand:SI 1 "register_operand" "r")
+@@ -1001,8 +1045,19 @@
+ 		     (match_operand:GPR 2 "register_operand")))]
+   ""
+ {
+- if (GET_MODE (operands[0]) == SImode && TARGET_64BIT && !ISA_HAS_DIV32)
++ if (GET_MODE (operands[0]) == SImode && TARGET_64BIT)
+   {
++    if (ISA_HAS_DIV32)
++      {
++        rtx t = gen_reg_rtx (DImode);
++        emit_insn (gen_si3_extended (t, operands[1], operands[2]));
++        t = gen_lowpart (SImode, t);
++        SUBREG_PROMOTED_VAR_P (t) = 1;
++        SUBREG_PROMOTED_SET (t, SRP_SIGNED);
++        emit_move_insn (operands[0], t);
++        DONE;
++      }
++
+     rtx reg1 = gen_reg_rtx (DImode);
+     rtx reg2 = gen_reg_rtx (DImode);
+     rtx rd = gen_reg_rtx (DImode);
+@@ -1038,7 +1093,7 @@
+ 
+ (define_insn "si3_extended"
+   [(set (match_operand:DI 0 "register_operand" "=r,&r,&r")
+-	(sign_extend
++	(sign_extend:DI
+ 	  (any_div:SI (match_operand:SI 1 "register_operand" "r,r,0")
+ 		      (match_operand:SI 2 "register_operand" "r,r,r"))))]
+   "TARGET_64BIT && ISA_HAS_DIV32"
+@@ -2981,7 +3036,7 @@
+ ;;
+ ;;  ....................
+ 
+-(define_insn "3"
++(define_insn "*3"
+   [(set (match_operand:GPR 0 "register_operand" "=r")
+ 	(any_shift:GPR (match_operand:GPR 1 "register_operand" "r")
+ 		       (match_operand:SI 2 "arith_operand" "rI")))]
+@@ -2996,7 +3051,7 @@
+   [(set_attr "type" "shift")
+    (set_attr "mode" "")])
+ 
+-(define_insn "*si3_extend"
++(define_insn "si3_extend"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+ 	(sign_extend:DI
+ 	   (any_shift:SI (match_operand:SI 1 "register_operand" "r")
+@@ -3011,7 +3066,7 @@
+   [(set_attr "type" "shift")
+    (set_attr "mode" "SI")])
+ 
+-(define_insn "rotr3"
++(define_insn "*rotr3"
+   [(set (match_operand:GPR 0 "register_operand" "=r,r")
+ 	(rotatert:GPR (match_operand:GPR 1 "register_operand" "r,r")
+ 		      (match_operand:SI 2 "arith_operand" "r,I")))]
+@@ -3040,6 +3095,19 @@
+   ""
+   {
+     operands[3] = gen_reg_rtx (SImode);
++
++    if (TARGET_64BIT && mode == SImode)
++      {
++	rtx t = gen_reg_rtx (DImode);
++
++	emit_insn (gen_negsi2 (operands[3], operands[2]));
++	emit_insn (gen_rotrsi3_extend (t, operands[1], operands[3]));
++	t = gen_lowpart (SImode, t);
++	SUBREG_PROMOTED_VAR_P (t) = 1;
++	SUBREG_PROMOTED_SET (t, SRP_SIGNED);
++	emit_move_insn (operands[0], t);
++	DONE;
++      }
+   });
+ 
+ ;; The following templates were added to generate "bstrpick.d + alsl.d"
+@@ -4061,26 +4129,45 @@
+ 
+ (define_insn "bytepick_w_"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+-	(ior:SI (lshiftrt (match_operand:SI 1 "register_operand" "r")
+-			  (const_int ))
+-		(ashift (match_operand:SI 2 "register_operand" "r")
+-			(const_int bytepick_w_ashift_amount))))]
++	(ior:SI (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
++			     (const_int ))
++		(ashift:SI (match_operand:SI 2 "register_operand" "r")
++			   (const_int bytepick_w_ashift_amount))))]
+   ""
+   "bytepick.w\t%0,%1,%2,"
+   [(set_attr "mode" "SI")])
+ 
++(define_mode_attr bitsize [(QI "8") (HI "16")])
++(define_mode_attr bytepick_imm [(QI "3") (HI "2")])
++(define_mode_attr bytepick_w_ashift_amount [(QI "24") (HI "16")])
++
+ (define_insn "bytepick_w__extend"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+-	(sign_extend:DI
+-	 (subreg:SI
+-	  (ior:DI (subreg:DI (lshiftrt
+-			      (match_operand:SI 1 "register_operand" "r")
+-			      (const_int )) 0)
+-		  (subreg:DI (ashift
+-			      (match_operand:SI 2 "register_operand" "r")
+-			      (const_int bytepick_w_ashift_amount)) 0)) 0)))]
++	(ior:DI
++	  (ashift:DI
++	    (sign_extend:DI
++	      (subreg:SHORT (match_operand:DI 1 "register_operand" "r") 0))
++	    (const_int ))
++	  (zero_extract:DI (match_operand:DI 2 "register_operand" "r")
++			   (const_int )
++			   (const_int ))))]
+   "TARGET_64BIT"
+-  "bytepick.w\t%0,%1,%2,"
++  "bytepick.w\t%0,%2,%1,"
++  [(set_attr "mode" "SI")])
++
++(define_insn "bytepick_w_1_extend"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(ior:DI
++	  (ashift:DI
++	    (sign_extract:DI (match_operand:DI 1 "register_operand" "r")
++                         (const_int 24)
++                         (const_int 0))
++        (const_int 8))
++	  (zero_extract:DI (match_operand:DI 2 "register_operand" "r")
++			   (const_int 8)
++			   (const_int 24))))]
++  "TARGET_64BIT"
++  "bytepick.w\t%0,%2,%1,1"
+   [(set_attr "mode" "SI")])
+ 
+ (define_insn "bytepick_d_"
+diff --git a/gcc/testsuite/gcc.target/loongarch/bitwise_extend.c b/gcc/testsuite/gcc.target/loongarch/bitwise_extend.c
+new file mode 100644
+index 000000000..c2bc489a7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/bitwise_extend.c
+@@ -0,0 +1,45 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mdiv32" } */
++/* { dg-final { scan-assembler-not "slli\\.w" } } */
++
++int
++f1 (int a, int b)
++{
++  return (a << b) | b;
++}
++
++int
++f2 (int a, int b)
++{
++  return (a - b) | b;
++}
++
++int
++f3 (int a, int b)
++{
++  return (a * b) | b;
++}
++
++int
++f4 (int a, int b)
++{
++  return (unsigned) a >> b | (unsigned) a << (32 - b) | b;
++}
++
++int
++f5 (int a, int b)
++{
++  return (unsigned) a << b | (unsigned) a >> (32 - b) | b;
++}
++
++int
++f6 (int a, int b)
++{
++  return (a % b) | b;
++}
++
++int
++f7 (int a, int b)
++{
++  return (a + b) | b;
++}
+-- 
+2.43.0
+
diff --git a/SME-0075-AArch64-remove-test-comment-from-mov-mode-_aarch64.patch b/0178-Backport-SME-AArch64-remove-test-comment-from-mov-mo.patch
similarity index 83%
rename from SME-0075-AArch64-remove-test-comment-from-mov-mode-_aarch64.patch
rename to 0178-Backport-SME-AArch64-remove-test-comment-from-mov-mo.patch
index 97e726fd1706d03e8862dd6d0774fe81c06b2601..95f88fc57e588d802c7243e1559132579a6b8dd6 100644
--- a/SME-0075-AArch64-remove-test-comment-from-mov-mode-_aarch64.patch
+++ b/0178-Backport-SME-AArch64-remove-test-comment-from-mov-mo.patch
@@ -1,7 +1,8 @@
-From 05be7084cd10cf1acf5dcdfbee4685a3c100cf2a Mon Sep 17 00:00:00 2001
+From 883af5a13e648e74cb8d8722be6d4980e8bc8f48 Mon Sep 17 00:00:00 2001
 From: Tamar Christina 
 Date: Tue, 20 Jun 2023 08:54:42 +0100
-Subject: [PATCH 075/144] AArch64: remove test comment from *mov_aarch64
+Subject: [PATCH 079/157] [Backport][SME] AArch64: remove test comment from
+ *mov_aarch64
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=36de416df8b3f109353e309011061fa66e872e3a
 
@@ -16,10 +17,10 @@ gcc/ChangeLog:
  1 file changed, 1 insertion(+), 1 deletion(-)
 
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 7c7633770..64ebe6172 100644
+index 29a665e45..1ec23fae8 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -1211,7 +1211,7 @@
+@@ -1213,7 +1213,7 @@
       [m, r Z  ; store_4        , *     ] str\\t%w1, %0
       [m, w    ; store_4        , *     ] str\t%1, %0
       [r, w    ; neon_to_gp  , simd  ] umov\t%w0, %1.[0]
@@ -29,5 +30,5 @@ index 7c7633770..64ebe6172 100644
       [w, r Z  ; neon_from_gp, nosimd] fmov\t%s0, %w1
       [w, w    ; neon_dup       , simd  ] dup\t%0, %1.[0]
 -- 
-2.19.1
+2.33.0
 
diff --git a/0178-LoongArch-Relax-ins_zero_bitmask_operand-and-remove-.patch b/0178-LoongArch-Relax-ins_zero_bitmask_operand-and-remove-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..63c05e44732f415cfc60863749e3250381a6fed5
--- /dev/null
+++ b/0178-LoongArch-Relax-ins_zero_bitmask_operand-and-remove-.patch
@@ -0,0 +1,123 @@
+From 8311e0053c8a9646b8798c53ae4a8f45d12c42c1 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 28 Jul 2024 17:02:49 +0800
+Subject: [PATCH 178/188] LoongArch: Relax ins_zero_bitmask_operand and remove
+ and3_align
+
+In r15-1207 I was too stupid to realize we just need to relax
+ins_zero_bitmask_operand to allow using bstrins for aligning, instead of
+adding a new split.  And, "> 12" in ins_zero_bitmask_operand also makes
+no sense: it rejects bstrins for things like "x & ~4l" with no good
+reason.
+
+So fix my errors now.
+
+gcc/ChangeLog:
+
+	* config/loongarch/predicates.md (ins_zero_bitmask_operand):
+	Cover more cases that bstrins can benefit.
+	(high_bitmask_operand): Remove.
+	* config/loongarch/constraints.md (Yy): Remove.
+	* config/loongarch/loongarch.md (and3_align): Remove.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/bstrins-4.c: New test.
+---
+ gcc/config/loongarch/constraints.md            |  4 ----
+ gcc/config/loongarch/loongarch.md              | 17 -----------------
+ gcc/config/loongarch/predicates.md             |  9 ++-------
+ gcc/testsuite/gcc.target/loongarch/bstrins-4.c |  9 +++++++++
+ 4 files changed, 11 insertions(+), 28 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/bstrins-4.c
+
+diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md
+index f3f5776da..d653ea82a 100644
+--- a/gcc/config/loongarch/constraints.md
++++ b/gcc/config/loongarch/constraints.md
+@@ -292,10 +292,6 @@
+    "@internal"
+    (match_operand 0 "low_bitmask_operand"))
+ 
+-(define_constraint "Yy"
+-   "@internal"
+-   (match_operand 0 "high_bitmask_operand"))
+-
+ (define_constraint "YI"
+   "@internal
+    A replicated vector const in which the replicated value is in the range
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 6915dab0e..1ebcfa0c7 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1588,23 +1588,6 @@
+   [(set_attr "move_type" "pick_ins")
+    (set_attr "mode" "")])
+ 
+-(define_insn_and_split "and3_align"
+-  [(set (match_operand:GPR 0 "register_operand" "=r")
+-	(and:GPR (match_operand:GPR 1 "register_operand" "r")
+-		 (match_operand:GPR 2 "high_bitmask_operand" "Yy")))]
+-  ""
+-  "#"
+-  ""
+-  [(set (match_dup 0) (match_dup 1))
+-   (set (zero_extract:GPR (match_dup 0) (match_dup 2) (const_int 0))
+-	(const_int 0))]
+-{
+-  int len;
+-
+-  len = low_bitmask_len (mode, ~INTVAL (operands[2]));
+-  operands[2] = GEN_INT (len);
+-})
+-
+ (define_insn_and_split "*bstrins__for_mask"
+   [(set (match_operand:GPR 0 "register_operand" "=r")
+ 	(and:GPR (match_operand:GPR 1 "register_operand" "r")
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 95be8a4fe..2b7f7ed47 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -293,10 +293,6 @@
+   (and (match_code "const_int")
+        (match_test "low_bitmask_len (mode, INTVAL (op)) > 12")))
+ 
+-(define_predicate "high_bitmask_operand"
+-  (and (match_code "const_int")
+-       (match_test "low_bitmask_len (mode, ~INTVAL (op)) > 0")))
+-
+ (define_predicate "d_operand"
+   (and (match_code "reg")
+        (match_test "GP_REG_P (REGNO (op))")))
+@@ -406,11 +402,10 @@
+ 
+ (define_predicate "ins_zero_bitmask_operand"
+   (and (match_code "const_int")
+-       (match_test "INTVAL (op) != -1")
+-       (match_test "INTVAL (op) & 1")
+        (match_test "low_bitmask_len (mode, \
+ 				     ~UINTVAL (op) | (~UINTVAL(op) - 1)) \
+-		    > 12")))
++		    > 0")
++       (not (match_operand 0 "const_uns_arith_operand"))))
+ 
+ (define_predicate "const_call_insn_operand"
+   (match_code "const,symbol_ref,label_ref")
+diff --git a/gcc/testsuite/gcc.target/loongarch/bstrins-4.c b/gcc/testsuite/gcc.target/loongarch/bstrins-4.c
+new file mode 100644
+index 000000000..0823cfc38
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/bstrins-4.c
+@@ -0,0 +1,9 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mabi=lp64d" } */
++/* { dg-final { scan-assembler "bstrins\\.d\t\\\$r4,\\\$r0,2,2" } } */
++
++long
++x (long a)
++{
++  return a & ~4;
++}
+-- 
+2.43.0
+
diff --git a/SME-0076-aarch64-Distinguish-streaming-compatible-AdvSIMD-ins.patch b/0179-Backport-SME-aarch64-Distinguish-streaming-compatibl.patch
similarity index 98%
rename from SME-0076-aarch64-Distinguish-streaming-compatible-AdvSIMD-ins.patch
rename to 0179-Backport-SME-aarch64-Distinguish-streaming-compatibl.patch
index 47bb2f9e046f5dc239f2397250b859e3bb93b9dc..f99b246a92c0b74cefde55fac9542802bf3af4fd 100644
--- a/SME-0076-aarch64-Distinguish-streaming-compatible-AdvSIMD-ins.patch
+++ b/0179-Backport-SME-aarch64-Distinguish-streaming-compatibl.patch
@@ -1,8 +1,8 @@
-From 424b7f8e7e45240dc767bdc60495a58af676c8bb Mon Sep 17 00:00:00 2001
+From 4a0e91dc27b30ae673ba132bf2be17a74bc89f31 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:24 +0000
-Subject: [PATCH 076/144] aarch64: Distinguish streaming-compatible AdvSIMD
- insns
+Subject: [PATCH 080/157] [Backport][SME] aarch64: Distinguish
+ streaming-compatible AdvSIMD insns
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=c86ee4f683e05e5809597d96b5eeb261c9c92cac
 
@@ -114,7 +114,7 @@ gcc/testsuite/
  create mode 100644 gcc/testsuite/gcc.target/aarch64/sme/arm_neon_3.c
 
 diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
-index 95cdc16f7..8e09e3bdc 100644
+index 1f4b30642..62493cdfa 100644
 --- a/gcc/config/aarch64/aarch64-simd.md
 +++ b/gcc/config/aarch64/aarch64-simd.md
 @@ -121,19 +121,19 @@
@@ -216,10 +216,10 @@ index 95cdc16f7..8e09e3bdc 100644
  )
  
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 042e8002f..7058359c1 100644
+index 8f8395201..08a98f8ba 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -3782,7 +3782,7 @@ static bool
+@@ -3999,7 +3999,7 @@ static bool
  aarch64_array_mode_supported_p (machine_mode mode,
  				unsigned HOST_WIDE_INT nelems)
  {
@@ -228,7 +228,7 @@ index 042e8002f..7058359c1 100644
        && (AARCH64_VALID_SIMD_QREG_MODE (mode)
  	  || AARCH64_VALID_SIMD_DREG_MODE (mode))
        && (nelems >= 2 && nelems <= 4))
-@@ -12738,8 +12738,8 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+@@ -12955,8 +12955,8 @@ aarch64_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
        return NO_REGS;
      }
  
@@ -239,7 +239,7 @@ index 042e8002f..7058359c1 100644
    if (REG_P (x)
        && (mode == TFmode
  	  || mode == TImode
-@@ -15308,7 +15308,7 @@ aarch64_register_move_cost (machine_mode mode,
+@@ -15540,7 +15540,7 @@ aarch64_register_move_cost (machine_mode mode,
  	 secondary reload.  A general register is used as a scratch to move
  	 the upper DI value and the lower DI value is moved directly,
  	 hence the cost is the sum of three moves. */
@@ -248,7 +248,7 @@ index 042e8002f..7058359c1 100644
  	return regmove_cost->GP2FP + regmove_cost->FP2GP + regmove_cost->FP2FP;
  
        return regmove_cost->FP2FP;
-@@ -20875,7 +20875,7 @@ aarch64_simd_container_mode (scalar_mode mode, poly_int64 width)
+@@ -21107,7 +21107,7 @@ aarch64_simd_container_mode (scalar_mode mode, poly_int64 width)
      return aarch64_full_sve_mode (mode).else_mode (word_mode);
  
    gcc_assert (known_eq (width, 64) || known_eq (width, 128));
@@ -257,7 +257,7 @@ index 042e8002f..7058359c1 100644
      {
        if (known_eq (width, 128))
  	return aarch64_vq_mode (mode).else_mode (word_mode);
-@@ -24989,7 +24989,11 @@ aarch64_expand_cpymem (rtx *operands)
+@@ -25221,7 +25221,11 @@ aarch64_expand_cpymem (rtx *operands)
    int copy_bits = 256;
  
    /* Default to 256-bit LDP/STP on large copies, however small copies, no SIMD
@@ -301,7 +301,7 @@ index dd2de4e88..a3c83a3b1 100644
  #define AARCH64_ISA_CRC            (aarch64_isa_flags & AARCH64_FL_CRC)
  #define AARCH64_ISA_CRYPTO         (aarch64_isa_flags & AARCH64_FL_CRYPTO)
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 64ebe6172..57a0f8d00 100644
+index 1ec23fae8..079c8a3f9 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
 @@ -378,7 +378,8 @@
@@ -327,7 +327,7 @@ index 64ebe6172..57a0f8d00 100644
  	(and (eq_attr "arch" "fp_q, simd")
  	     (match_test "TARGET_SIMD"))
  
-@@ -1200,22 +1207,22 @@
+@@ -1202,22 +1209,22 @@
    "(register_operand (operands[0], mode)
      || aarch64_reg_or_zero (operands[1], mode))"
    {@ [cons: =0, 1; attrs: type, arch]
@@ -365,7 +365,7 @@ index 64ebe6172..57a0f8d00 100644
    }
  )
  
-@@ -1370,9 +1377,9 @@
+@@ -1372,9 +1379,9 @@
  
  (define_insn "*movti_aarch64"
    [(set (match_operand:TI 0
@@ -377,7 +377,7 @@ index 64ebe6172..57a0f8d00 100644
    "(register_operand (operands[0], TImode)
      || aarch64_reg_or_zero (operands[1], TImode))"
    "@
-@@ -1382,16 +1389,17 @@
+@@ -1384,16 +1391,17 @@
     #
     #
     mov\\t%0.16b, %1.16b
@@ -398,7 +398,7 @@ index 64ebe6172..57a0f8d00 100644
  )
  
  ;; Split a TImode register-register or register-immediate move into
-@@ -1527,13 +1535,14 @@
+@@ -1529,13 +1537,14 @@
  
  (define_insn "*mov_aarch64"
    [(set (match_operand:TFD 0
@@ -415,7 +415,7 @@ index 64ebe6172..57a0f8d00 100644
     #
     #
     #
-@@ -1544,10 +1553,10 @@
+@@ -1546,10 +1555,10 @@
     ldp\\t%0, %H0, %1
     stp\\t%1, %H1, %0
     stp\\txzr, xzr, %0"
@@ -429,7 +429,7 @@ index 64ebe6172..57a0f8d00 100644
  )
  
  (define_split
-@@ -1736,7 +1745,7 @@
+@@ -1738,7 +1747,7 @@
  	(match_operand:TF 1 "aarch64_mem_pair_operand" "Ump"))
     (set (match_operand:TF 2 "register_operand" "=w")
  	(match_operand:TF 3 "memory_operand" "m"))]
@@ -438,7 +438,7 @@ index 64ebe6172..57a0f8d00 100644
      && rtx_equal_p (XEXP (operands[3], 0),
  		    plus_constant (Pmode,
  				   XEXP (operands[1], 0),
-@@ -1786,11 +1795,11 @@
+@@ -1788,11 +1797,11 @@
  	(match_operand:TF 1 "register_operand" "w"))
     (set (match_operand:TF 2 "memory_operand" "=m")
  	(match_operand:TF 3 "register_operand" "w"))]
@@ -455,7 +455,7 @@ index 64ebe6172..57a0f8d00 100644
    "stp\\t%q1, %q3, %z0"
    [(set_attr "type" "neon_stp_q")
     (set_attr "fp" "yes")]
-@@ -1838,7 +1847,7 @@
+@@ -1840,7 +1849,7 @@
       (set (match_operand:TX 3 "register_operand" "=w")
            (mem:TX (plus:P (match_dup 1)
  			  (match_operand:P 5 "const_int_operand" "n"))))])]
@@ -464,7 +464,7 @@ index 64ebe6172..57a0f8d00 100644
    "ldp\\t%q2, %q3, [%1], %4"
    [(set_attr "type" "neon_ldp_q")]
  )
-@@ -1888,7 +1897,7 @@
+@@ -1890,7 +1899,7 @@
       (set (mem:TX (plus:P (match_dup 0)
  			  (match_operand:P 5 "const_int_operand" "n")))
            (match_operand:TX 3 "register_operand" "w"))])]
@@ -1548,5 +1548,5 @@ index 000000000..36794e5b0
 +  return vhaddq_s32 (x, y);
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/0179-LoongArch-Rework-bswap-hi-si-di-2-definition.patch b/0179-LoongArch-Rework-bswap-hi-si-di-2-definition.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8dcdd3e42cac08c33a2640a2988f176958f0f1fa
--- /dev/null
+++ b/0179-LoongArch-Rework-bswap-hi-si-di-2-definition.patch
@@ -0,0 +1,224 @@
+From 54bf8fc616af5cdb9e4c787a2dfb2c516c8e425a Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 28 Jul 2024 19:57:02 +0800
+Subject: [PATCH 179/188] LoongArch: Rework bswap{hi,si,di}2 definition
+
+Per a gcc-help thread we are generating sub-optimal code for
+__builtin_bswap{32,64}.  To fix it:
+
+- Use a single revb.d instruction for bswapdi2.
+- Use a single revb.2w instruction for bswapsi2 for TARGET_64BIT,
+  revb.2h + rotri.w for !TARGET_64BIT.
+- Use a single revb.2h instruction for bswapsi2 (x) r>> 16, and a single
+  revb.2w instruction for bswapdi2 (x) r>> 32.
+
+Unfortunately I cannot figure out a way to make the compiler generate
+revb.4h or revh.{2w,d} instructions.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (UNSPEC_REVB_2H, UNSPEC_REVB_4H,
+	UNSPEC_REVH_D): Remove UNSPECs.
+	(revb_4h, revh_d): Remove define_insn.
+	(revb_2h): Define as (rotatert:SI (bswap:SI x) 16) instead of
+	an UNSPEC.
+	(revb_2h_extend, revb_2w, *bswapsi2, bswapdi2): New define_insn.
+	(bswapsi2): Change to define_expand.  Only expand to revb.2h +
+	rotri.w if !TARGET_64BIT.
+	(bswapdi2): Change to define_insn of which the output is just a
+	revb.d instruction.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/revb.c: New test.
+---
+ gcc/config/loongarch/loongarch.md         | 79 ++++++++++++-----------
+ gcc/testsuite/gcc.target/loongarch/revb.c | 61 +++++++++++++++++
+ 2 files changed, 104 insertions(+), 36 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/revb.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 1ebcfa0c7..b1c828dba 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -20,11 +20,6 @@
+ ;; .
+ 
+ (define_c_enum "unspec" [
+-  ;; Integer operations that are too cumbersome to describe directly.
+-  UNSPEC_REVB_2H
+-  UNSPEC_REVB_4H
+-  UNSPEC_REVH_D
+-
+   ;; Floating-point moves.
+   UNSPEC_LOAD_LOW
+   UNSPEC_LOAD_HIGH
+@@ -3151,55 +3146,67 @@
+ 
+ ;; Reverse the order of bytes of operand 1 and store the result in operand 0.
+ 
+-(define_insn "bswaphi2"
+-  [(set (match_operand:HI 0 "register_operand" "=r")
+-	(bswap:HI (match_operand:HI 1 "register_operand" "r")))]
++(define_insn "revb_2h"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(rotatert:SI (bswap:SI (match_operand:SI 1 "register_operand" "r"))
++		     (const_int 16)))]
+   ""
+   "revb.2h\t%0,%1"
+   [(set_attr "type" "shift")])
+ 
+-(define_insn_and_split "bswapsi2"
+-  [(set (match_operand:SI 0 "register_operand" "=r")
+-	(bswap:SI (match_operand:SI 1 "register_operand" "r")))]
+-  ""
+-  "#"
+-  ""
+-  [(set (match_dup 0) (unspec:SI [(match_dup 1)] UNSPEC_REVB_2H))
+-   (set (match_dup 0) (rotatert:SI (match_dup 0) (const_int 16)))]
+-  ""
+-  [(set_attr "insn_count" "2")])
+-
+-(define_insn_and_split "bswapdi2"
++(define_insn "revb_2h_extend"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+-	(bswap:DI (match_operand:DI 1 "register_operand" "r")))]
++	(sign_extend:DI
++	  (rotatert:SI
++	    (bswap:SI (match_operand:SI 1 "register_operand" "r"))
++	    (const_int 16))))]
+   "TARGET_64BIT"
+-  "#"
+-  ""
+-  [(set (match_dup 0) (unspec:DI [(match_dup 1)] UNSPEC_REVB_4H))
+-   (set (match_dup 0) (unspec:DI [(match_dup 0)] UNSPEC_REVH_D))]
+-  ""
+-  [(set_attr "insn_count" "2")])
++  "revb.2h\t%0,%1"
++  [(set_attr "type" "shift")])
+ 
+-(define_insn "revb_2h"
+-  [(set (match_operand:SI 0 "register_operand" "=r")
+-	(unspec:SI [(match_operand:SI 1 "register_operand" "r")] UNSPEC_REVB_2H))]
++(define_insn "bswaphi2"
++  [(set (match_operand:HI 0 "register_operand" "=r")
++	(bswap:HI (match_operand:HI 1 "register_operand" "r")))]
+   ""
+   "revb.2h\t%0,%1"
+   [(set_attr "type" "shift")])
+ 
+-(define_insn "revb_4h"
++(define_insn "revb_2w"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+-	(unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_REVB_4H))]
++	(rotatert:DI (bswap:DI (match_operand:DI 1 "register_operand" "r"))
++		     (const_int 32)))]
+   "TARGET_64BIT"
+-  "revb.4h\t%0,%1"
++  "revb.2w\t%0,%1"
+   [(set_attr "type" "shift")])
+ 
+-(define_insn "revh_d"
++(define_insn "*bswapsi2"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(bswap:SI (match_operand:SI 1 "register_operand" "r")))]
++  "TARGET_64BIT"
++  "revb.2w\t%0,%1"
++  [(set_attr "type" "shift")])
++
++(define_expand "bswapsi2"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(bswap:SI (match_operand:SI 1 "register_operand" "r")))]
++  ""
++{
++  if (!TARGET_64BIT)
++    {
++      rtx t = gen_reg_rtx (SImode);
++      emit_insn (gen_revb_2h (t, operands[1]));
++      emit_insn (gen_rotrsi3 (operands[0], t, GEN_INT (16)));
++      DONE;
++    }
++})
++
++(define_insn "bswapdi2"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+-	(unspec:DI [(match_operand:DI 1 "register_operand" "r")] UNSPEC_REVH_D))]
++	(bswap:DI (match_operand:DI 1 "register_operand" "r")))]
+   "TARGET_64BIT"
+-  "revh.d\t%0,%1"
++  "revb.d\t%0,%1"
+   [(set_attr "type" "shift")])
++
+ 
+ ;;
+ ;;  ....................
+diff --git a/gcc/testsuite/gcc.target/loongarch/revb.c b/gcc/testsuite/gcc.target/loongarch/revb.c
+new file mode 100644
+index 000000000..27a5d0fc7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/revb.c
+@@ -0,0 +1,61 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mabi=lp64d" } */
++/* { dg-final { check-function-bodies "**" "" } } */
++
++/*
++**t1:
++**	revb.2w	\$r4,\$r4
++**	slli.w	\$r4,\$r4,0
++**	jr	\$r1
++*/
++unsigned int
++t1 (unsigned int x)
++{
++  return __builtin_bswap32 (x);
++}
++
++/*
++**t2:
++**	revb.d	\$r4,\$r4
++**	jr	\$r1
++*/
++unsigned long
++t2 (unsigned long x)
++{
++  return __builtin_bswap64 (x);
++}
++
++/*
++**t3:
++**	revb.2h	\$r4,\$r4
++**	jr	\$r1
++*/
++unsigned int
++t3 (unsigned int x)
++{
++  return (x >> 8) & 0xff00ff | (x << 8) & 0xff00ff00;
++}
++
++/*
++**t4:
++**	revb.2w	\$r4,\$r4
++**	jr	\$r1
++*/
++unsigned long
++t4 (unsigned long x)
++{
++  x = __builtin_bswap64 (x);
++  return x << 32 | x >> 32;
++}
++
++/*
++**t5:
++**	revb.2h	\$r4,\$r4
++**	bstrpick.w	\$r4,\$r4,15,0
++**	jr	\$r1
++*/
++unsigned short
++t5 (unsigned short x)
++{
++  return __builtin_bswap16 (x);
++}
+-- 
+2.43.0
+
diff --git a/SME-0077-aarch64-Mark-relevant-SVE-instructions-as-non-stream.patch b/0180-Backport-SME-aarch64-Mark-relevant-SVE-instructions-.patch
similarity index 99%
rename from SME-0077-aarch64-Mark-relevant-SVE-instructions-as-non-stream.patch
rename to 0180-Backport-SME-aarch64-Mark-relevant-SVE-instructions-.patch
index 80052c990ddc40f1639fc69adb2fe93ac743c4a9..679e26eb0aad689a662a46483e63b478eee8e91e 100644
--- a/SME-0077-aarch64-Mark-relevant-SVE-instructions-as-non-stream.patch
+++ b/0180-Backport-SME-aarch64-Mark-relevant-SVE-instructions-.patch
@@ -1,8 +1,8 @@
-From 971b4043d1f71aac26f194d0d90b132a60ca6cd6 Mon Sep 17 00:00:00 2001
+From 0404dfa43633a35460aba1b96d04f62cc7d6103b Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:24 +0000
-Subject: [PATCH 077/144] aarch64: Mark relevant SVE instructions as
- non-streaming
+Subject: [PATCH 081/157] [Backport][SME] aarch64: Mark relevant SVE
+ instructions as non-streaming
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=983b4365028e9a059b5fb1eef85a297bea19fc8e
 
@@ -1700,10 +1700,10 @@ index a3c83a3b1..8f0ac2cde 100644
  /* SME instructions, enabled through +sme.  Note that this does not
     imply anything about the state of PSTATE.SM.  */
 diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
-index bf4319aae..2bb08313c 100644
+index 8dd2035bc..226dea48a 100644
 --- a/gcc/config/aarch64/iterators.md
 +++ b/gcc/config/aarch64/iterators.md
-@@ -2705,7 +2705,7 @@
+@@ -2706,7 +2706,7 @@
  
  (define_int_iterator SVE_FP_UNARY [UNSPEC_FRECPE UNSPEC_RSQRTE])
  
@@ -1712,7 +1712,7 @@ index bf4319aae..2bb08313c 100644
  
  (define_int_iterator SVE_INT_SHIFT_IMM [UNSPEC_ASRD
  					(UNSPEC_SQSHLU "TARGET_SVE2")
-@@ -2719,7 +2719,7 @@
+@@ -2720,7 +2720,7 @@
  (define_int_iterator SVE_BFLOAT_TERNARY_LONG [UNSPEC_BFDOT
  					      UNSPEC_BFMLALB
  					      UNSPEC_BFMLALT
@@ -4502,5 +4502,5 @@ index 3c5d96de4..57d60a350 100644
  
  #include "test_sve_acle.h"
 -- 
-2.19.1
+2.33.0
 
diff --git a/0180-testsuite-fix-dg-do-preprocess-typo.patch b/0180-testsuite-fix-dg-do-preprocess-typo.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8efaca35af6a901aeb86f35628f7a808f6264543
--- /dev/null
+++ b/0180-testsuite-fix-dg-do-preprocess-typo.patch
@@ -0,0 +1,26 @@
+From 35d804730dcac1b3e96db4b587de0cd77fec3504 Mon Sep 17 00:00:00 2001
+From: Sam James 
+Date: Tue, 30 Jul 2024 21:46:29 +0100
+Subject: [PATCH 180/188] testsuite: fix 'dg-do-preprocess' typo
+
+We want 'dg-do preprocess', not 'dg-do-preprocess'. Fix that.
+
+	PR target/106828
+	* g++.target/loongarch/pr106828.C: Fix 'dg-do compile' typo.
+---
+ gcc/testsuite/g++.target/loongarch/pr106828.C | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/testsuite/g++.target/loongarch/pr106828.C b/gcc/testsuite/g++.target/loongarch/pr106828.C
+index 190c1db71..0d13cbbd5 100644
+--- a/gcc/testsuite/g++.target/loongarch/pr106828.C
++++ b/gcc/testsuite/g++.target/loongarch/pr106828.C
+@@ -1,4 +1,4 @@
+-/* { dg-do-preprocess } */
++/* { dg-do preprocess } */
+ /* { dg-options "-mabi=lp64d -fsanitize=address" } */
+ 
+ /* Tests whether the compiler supports compile option '-fsanitize=address'.  */
+-- 
+2.43.0
+
diff --git a/SME-0078-AArch64-Support-new-tbranch-optab.patch b/0181-Backport-SME-AArch64-Support-new-tbranch-optab.patch
similarity index 95%
rename from SME-0078-AArch64-Support-new-tbranch-optab.patch
rename to 0181-Backport-SME-AArch64-Support-new-tbranch-optab.patch
index ced6d0dab0e61c80b150fac407435ee5b53c4a1c..77f70f40b9a1f932ced619b61362dd8e28c2ea49 100644
--- a/SME-0078-AArch64-Support-new-tbranch-optab.patch
+++ b/0181-Backport-SME-AArch64-Support-new-tbranch-optab.patch
@@ -1,7 +1,7 @@
-From a42ab8ec165df405d89184a849f787e799db9567 Mon Sep 17 00:00:00 2001
+From da06b276b6ae281efad2ec3b982e09b1f4015917 Mon Sep 17 00:00:00 2001
 From: Tamar Christina 
 Date: Mon, 12 Dec 2022 15:18:56 +0000
-Subject: [PATCH 078/144] AArch64: Support new tbranch optab.
+Subject: [PATCH 082/157] [Backport][SME] AArch64: Support new tbranch optab.
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=17ae956c0fa6baac3d22764019d5dd5ebf5c2b11
 
@@ -74,10 +74,10 @@ gcc/testsuite/ChangeLog:
  create mode 100644 gcc/testsuite/gcc.target/aarch64/tbz_1.c
 
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 57a0f8d00..d35852738 100644
+index 079c8a3f9..2becc888e 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -951,12 +951,29 @@
+@@ -953,12 +953,29 @@
  		      (const_int 1)))]
  )
  
@@ -112,7 +112,7 @@ index 57a0f8d00..d35852738 100644
  		   (const_int 0))
  	     (label_ref (match_operand 2 "" ""))
  	     (pc)))
-@@ -967,15 +984,15 @@
+@@ -969,15 +986,15 @@
        {
  	if (get_attr_far_branch (insn) == 1)
  	  return aarch64_gen_far_branch (operands, 2, "Ltb",
@@ -132,7 +132,7 @@ index 57a0f8d00..d35852738 100644
    [(set_attr "type" "branch")
     (set (attr "length")
 diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
-index 2bb08313c..1ffc49889 100644
+index 226dea48a..b616f5c9a 100644
 --- a/gcc/config/aarch64/iterators.md
 +++ b/gcc/config/aarch64/iterators.md
 @@ -1104,6 +1104,8 @@
@@ -246,5 +246,5 @@ index 000000000..39deb58e2
 +    h ();
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/0181-LoongArch-Remove-gawk-extension-from-a-generator-scr.patch b/0181-LoongArch-Remove-gawk-extension-from-a-generator-scr.patch
new file mode 100644
index 0000000000000000000000000000000000000000..eae11a0d7bd966f1be01d596d684d83cb72e1589
--- /dev/null
+++ b/0181-LoongArch-Remove-gawk-extension-from-a-generator-scr.patch
@@ -0,0 +1,47 @@
+From 643248a4c60c016af44bc740b35c7ac174849029 Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Tue, 23 Jul 2024 10:04:26 +0800
+Subject: [PATCH 181/188] LoongArch: Remove gawk extension from a generator
+ script.
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/gen-evolution.awk: Do not use
+	"length()" to compute the size of an array.
+---
+ gcc/config/loongarch/genopts/gen-evolution.awk | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/loongarch/genopts/gen-evolution.awk b/gcc/config/loongarch/genopts/gen-evolution.awk
+index 4d105afa9..1c8004e41 100644
+--- a/gcc/config/loongarch/genopts/gen-evolution.awk
++++ b/gcc/config/loongarch/genopts/gen-evolution.awk
+@@ -1,4 +1,4 @@
+-#!/usr/bin/gawk
++#!/usr/bin/awk -f
+ #
+ # A simple script that generates loongarch-evolution.h
+ # from genopts/isa-evolution.in
+@@ -94,8 +94,9 @@ function gen_cpucfg_useful_idx()
+         idx_bucket[cpucfg_word[i]] = 1
+ 
+     delete idx_list
++    j = 1
+     for (i in idx_bucket)
+-        idx_list[length(idx_list)-1] = i+0
++        idx_list[j++] = i+0
+     delete idx_bucket
+ 
+     asort (idx_list)
+@@ -108,7 +109,7 @@ function gen_cpucfg_useful_idx()
+     print ""
+ 
+     printf ("static constexpr int N_CPUCFG_WORDS = %d;\n",
+-            idx_list[length(idx_list)] + 1)
++            idx_list[j - 1] + 1)
+ 
+     delete idx_list
+ }
+-- 
+2.43.0
+
diff --git a/SME-0079-aarch64-Use-local-frame-vars-in-shrink-wrapping-code.patch b/0182-Backport-SME-aarch64-Use-local-frame-vars-in-shrink-.patch
similarity index 89%
rename from SME-0079-aarch64-Use-local-frame-vars-in-shrink-wrapping-code.patch
rename to 0182-Backport-SME-aarch64-Use-local-frame-vars-in-shrink-.patch
index dda995cc9598495dcbce7978f2693a9e17e726ce..257cdb35a5914745e23608cc7bddf91ee43fcd67 100644
--- a/SME-0079-aarch64-Use-local-frame-vars-in-shrink-wrapping-code.patch
+++ b/0182-Backport-SME-aarch64-Use-local-frame-vars-in-shrink-.patch
@@ -1,7 +1,8 @@
-From 0d240935072ac90b9b8568e2750a48a840bf1b81 Mon Sep 17 00:00:00 2001
+From 755f67b1abd70b3c3ea20076fe60c1d303bf1e0c Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:04 +0100
-Subject: [PATCH 079/144] aarch64: Use local frame vars in shrink-wrapping code
+Subject: [PATCH 083/157] [Backport][SME] aarch64: Use local frame vars in
+ shrink-wrapping code
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=76d89da25af3064e80c9b7b584c678ff72b1f0bd
 
@@ -26,10 +27,10 @@ gcc/
  1 file changed, 64 insertions(+), 59 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 7058359c1..faf7db11c 100644
+index 08a98f8ba..b7da1d0be 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8734,7 +8734,7 @@ aarch64_layout_frame (void)
+@@ -8951,7 +8951,7 @@ aarch64_layout_frame (void)
    frame.is_scs_enabled
      = (!crtl->calls_eh_return
         && sanitize_flags_p (SANITIZE_SHADOW_CALL_STACK)
@@ -38,7 +39,7 @@ index 7058359c1..faf7db11c 100644
  
    /* When shadow call stack is enabled, the scs_pop in the epilogue will
       restore x30, and we don't need to pop x30 again in the traditional
-@@ -9146,6 +9146,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+@@ -9363,6 +9363,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
  			   unsigned start, unsigned limit, bool skip_wb,
  			   bool hard_fp_valid_p)
  {
@@ -46,7 +47,7 @@ index 7058359c1..faf7db11c 100644
    rtx_insn *insn;
    unsigned regno;
    unsigned regno2;
-@@ -9160,8 +9161,8 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+@@ -9377,8 +9378,8 @@ aarch64_save_callee_saves (poly_int64 start_offset,
        bool frame_related_p = aarch64_emit_cfi_for_reg_p (regno);
  
        if (skip_wb
@@ -57,7 +58,7 @@ index 7058359c1..faf7db11c 100644
  	continue;
  
        if (cfun->machine->reg_is_wrapped_separately[regno])
-@@ -9169,7 +9170,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+@@ -9386,7 +9387,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
  
        machine_mode mode = aarch64_reg_save_mode (regno);
        reg = gen_rtx_REG (mode, regno);
@@ -66,7 +67,7 @@ index 7058359c1..faf7db11c 100644
        rtx base_rtx = stack_pointer_rtx;
        poly_int64 sp_offset = offset;
  
-@@ -9182,7 +9183,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+@@ -9399,7 +9400,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
  	{
  	  gcc_assert (known_eq (start_offset, 0));
  	  poly_int64 fp_offset
@@ -75,7 +76,7 @@ index 7058359c1..faf7db11c 100644
  	  if (hard_fp_valid_p)
  	    base_rtx = hard_frame_pointer_rtx;
  	  else
-@@ -9204,8 +9205,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+@@ -9421,8 +9422,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
  	  && (regno2 = aarch64_next_callee_save (regno + 1, limit)) <= limit
  	  && !cfun->machine->reg_is_wrapped_separately[regno2]
  	  && known_eq (GET_MODE_SIZE (mode),
@@ -85,7 +86,7 @@ index 7058359c1..faf7db11c 100644
  	{
  	  rtx reg2 = gen_rtx_REG (mode, regno2);
  	  rtx mem2;
-@@ -9255,6 +9255,7 @@ static void
+@@ -9472,6 +9472,7 @@ static void
  aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
  			      unsigned limit, bool skip_wb, rtx *cfi_ops)
  {
@@ -93,7 +94,7 @@ index 7058359c1..faf7db11c 100644
    unsigned regno;
    unsigned regno2;
    poly_int64 offset;
-@@ -9271,13 +9272,13 @@ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
+@@ -9488,13 +9489,13 @@ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
        rtx reg, mem;
  
        if (skip_wb
@@ -110,7 +111,7 @@ index 7058359c1..faf7db11c 100644
        rtx base_rtx = stack_pointer_rtx;
        if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
  	aarch64_adjust_sve_callee_save_base (mode, base_rtx, anchor_reg,
-@@ -9288,8 +9289,7 @@ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
+@@ -9505,8 +9506,7 @@ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
  	  && (regno2 = aarch64_next_callee_save (regno + 1, limit)) <= limit
  	  && !cfun->machine->reg_is_wrapped_separately[regno2]
  	  && known_eq (GET_MODE_SIZE (mode),
@@ -120,7 +121,7 @@ index 7058359c1..faf7db11c 100644
  	{
  	  rtx reg2 = gen_rtx_REG (mode, regno2);
  	  rtx mem2;
-@@ -9394,6 +9394,7 @@ offset_12bit_unsigned_scaled_p (machine_mode mode, poly_int64 offset)
+@@ -9611,6 +9611,7 @@ offset_12bit_unsigned_scaled_p (machine_mode mode, poly_int64 offset)
  static sbitmap
  aarch64_get_separate_components (void)
  {
@@ -128,7 +129,7 @@ index 7058359c1..faf7db11c 100644
    sbitmap components = sbitmap_alloc (LAST_SAVED_REGNUM + 1);
    bitmap_clear (components);
  
-@@ -9410,18 +9411,18 @@ aarch64_get_separate_components (void)
+@@ -9627,18 +9628,18 @@ aarch64_get_separate_components (void)
  	if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
  	  continue;
  
@@ -150,7 +151,7 @@ index 7058359c1..faf7db11c 100644
  	else
  	  offset += crtl->outgoing_args_size;
  
-@@ -9440,11 +9441,11 @@ aarch64_get_separate_components (void)
+@@ -9657,11 +9658,11 @@ aarch64_get_separate_components (void)
    /* If the spare predicate register used by big-endian SVE code
       is call-preserved, it must be saved in the main prologue
       before any saves that use it.  */
@@ -166,7 +167,7 @@ index 7058359c1..faf7db11c 100644
    /* If registers have been chosen to be stored/restored with
       writeback don't interfere with them to avoid having to output explicit
       stack adjustment instructions.  */
-@@ -9553,6 +9554,7 @@ aarch64_get_next_set_bit (sbitmap bmp, unsigned int start)
+@@ -9770,6 +9771,7 @@ aarch64_get_next_set_bit (sbitmap bmp, unsigned int start)
  static void
  aarch64_process_components (sbitmap components, bool prologue_p)
  {
@@ -174,7 +175,7 @@ index 7058359c1..faf7db11c 100644
    rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
  			     ? HARD_FRAME_POINTER_REGNUM
  			     : STACK_POINTER_REGNUM);
-@@ -9567,9 +9569,9 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+@@ -9784,9 +9786,9 @@ aarch64_process_components (sbitmap components, bool prologue_p)
        machine_mode mode = aarch64_reg_save_mode (regno);
        
        rtx reg = gen_rtx_REG (mode, regno);
@@ -186,7 +187,7 @@ index 7058359c1..faf7db11c 100644
        else
  	offset += crtl->outgoing_args_size;
  
-@@ -9594,14 +9596,14 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+@@ -9811,14 +9813,14 @@ aarch64_process_components (sbitmap components, bool prologue_p)
  	  break;
  	}
  
@@ -203,7 +204,7 @@ index 7058359c1..faf7db11c 100644
  		       GET_MODE_SIZE (mode)))
  	{
  	  insn = emit_insn (set);
-@@ -9623,7 +9625,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+@@ -9840,7 +9842,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
        /* REGNO2 can be saved/restored in a pair with REGNO.  */
        rtx reg2 = gen_rtx_REG (mode, regno2);
        if (frame_pointer_needed)
@@ -212,7 +213,7 @@ index 7058359c1..faf7db11c 100644
        else
  	offset2 += crtl->outgoing_args_size;
        rtx addr2 = plus_constant (Pmode, ptr_reg, offset2);
-@@ -9718,6 +9720,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -9935,6 +9937,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
  					bool frame_related_p,
  					bool final_adjustment_p)
  {
@@ -220,7 +221,7 @@ index 7058359c1..faf7db11c 100644
    HOST_WIDE_INT guard_size
      = 1 << param_stack_clash_protection_guard_size;
    HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
-@@ -9738,25 +9741,25 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -9955,25 +9958,25 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
         register as a probe.  We can't assume that LR was saved at position 0
         though, so treat any space below it as unprobed.  */
    if (final_adjustment_p
@@ -252,7 +253,7 @@ index 7058359c1..faf7db11c 100644
  
        if (known_eq (frame_size, 0))
  	{
-@@ -10045,17 +10048,18 @@ aarch64_epilogue_uses (int regno)
+@@ -10262,17 +10265,18 @@ aarch64_epilogue_uses (int regno)
  void
  aarch64_expand_prologue (void)
  {
@@ -281,7 +282,7 @@ index 7058359c1..faf7db11c 100644
    rtx_insn *insn;
  
    if (flag_stack_clash_protection && known_eq (callee_adjust, 0))
-@@ -10086,7 +10090,7 @@ aarch64_expand_prologue (void)
+@@ -10303,7 +10307,7 @@ aarch64_expand_prologue (void)
      }
  
    /* Push return address to shadow call stack.  */
@@ -290,7 +291,7 @@ index 7058359c1..faf7db11c 100644
      emit_insn (gen_scs_push ());
  
    if (flag_stack_usage_info)
-@@ -10125,7 +10129,7 @@ aarch64_expand_prologue (void)
+@@ -10342,7 +10346,7 @@ aarch64_expand_prologue (void)
  
    /* The offset of the frame chain record (if any) from the current SP.  */
    poly_int64 chain_offset = (initial_adjust + callee_adjust
@@ -299,7 +300,7 @@ index 7058359c1..faf7db11c 100644
    gcc_assert (known_ge (chain_offset, 0));
  
    /* The offset of the bottom of the save area from the current SP.  */
-@@ -10228,16 +10232,17 @@ aarch64_use_return_insn_p (void)
+@@ -10445,16 +10449,17 @@ aarch64_use_return_insn_p (void)
  void
  aarch64_expand_epilogue (rtx_call_insn *sibcall)
  {
@@ -326,7 +327,7 @@ index 7058359c1..faf7db11c 100644
  			   ? R29_REGNUM : R30_REGNUM);
    rtx cfi_ops = NULL;
    rtx_insn *insn;
-@@ -10271,7 +10276,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10488,7 +10493,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
    /* We need to add memory barrier to prevent read from deallocated stack.  */
    bool need_barrier_p
      = maybe_ne (get_frame_size ()
@@ -335,7 +336,7 @@ index 7058359c1..faf7db11c 100644
  
    /* Emit a barrier to prevent loads from a deallocated stack.  */
    if (maybe_gt (final_adjust, crtl->outgoing_args_size)
-@@ -10352,7 +10357,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10569,7 +10574,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
      }
  
    /* Pop return address from shadow call stack.  */
@@ -344,7 +345,7 @@ index 7058359c1..faf7db11c 100644
      {
        machine_mode mode = aarch64_reg_save_mode (R30_REGNUM);
        rtx reg = gen_rtx_REG (mode, R30_REGNUM);
-@@ -12806,24 +12811,24 @@ aarch64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
+@@ -13023,24 +13028,24 @@ aarch64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
  poly_int64
  aarch64_initial_elimination_offset (unsigned from, unsigned to)
  {
@@ -376,5 +377,5 @@ index 7058359c1..faf7db11c 100644
  
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0182-LoongArch-Use-iorn-and-andn-standard-pattern-names.patch b/0182-LoongArch-Use-iorn-and-andn-standard-pattern-names.patch
new file mode 100644
index 0000000000000000000000000000000000000000..74cc6100abe486d5f3cb3d9ebb96371442c87f4c
--- /dev/null
+++ b/0182-LoongArch-Use-iorn-and-andn-standard-pattern-names.patch
@@ -0,0 +1,226 @@
+From 64560e75b4d020b6c47e07592595ceed663541af Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 1 Aug 2024 16:07:25 +0800
+Subject: [PATCH 182/188] LoongArch: Use iorn and andn standard pattern names.
+
+R15-1890 introduced new optabs iorc and andc, and its corresponding
+internal functions BIT_{ANDC,IORC}, and if targets defines such optabs
+for vector modes.  And in r15-2258 the iorc and andc were renamed to
+iorn and andn.
+So we changed the andn and iorn implementation templates to the standard
+template names.
+
+gcc/ChangeLog:
+
+	* config/loongarch/lasx.md (xvandn3): Rename to ...
+	(andn3): This.
+	(xvorn3): Rename to ...
+	(iorn3): This.
+	* config/loongarch/loongarch-builtins.cc
+	(CODE_FOR_lsx_vandn_v): Defined as the modified name.
+	(CODE_FOR_lsx_vorn_v): Likewise.
+	(CODE_FOR_lasx_xvandn_v): Likewise.
+	(CODE_FOR_lasx_xvorn_v): Likewise.
+	(loongarch_expand_builtin_insn): When the builtin function to be
+	called is __builtin_lasx_xvandn or __builtin_lsx_vandn, swap the
+	two operands.
+	* config/loongarch/loongarch.md (n): Rename to ...
+	(n3): This.
+	* config/loongarch/lsx.md (vandn3): Rename to ...
+	(andn3): This.
+	(vorn3): Rename to ...
+	(iorn3): This.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/lasx-andn-iorn.c: New test.
+	* gcc.target/loongarch/lsx-andn-iorn.c: New test.
+---
+ gcc/config/loongarch/lasx.md                  | 10 +++----
+ gcc/config/loongarch/loongarch-builtins.cc    | 10 ++++---
+ gcc/config/loongarch/loongarch.md             |  8 +++---
+ gcc/config/loongarch/lsx.md                   | 10 +++----
+ .../gcc.target/loongarch/lasx-andn-iorn.c     | 11 ++++++++
+ .../gcc.target/loongarch/lsx-andn-iorn.c      | 28 +++++++++++++++++++
+ 6 files changed, 59 insertions(+), 18 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/lasx-andn-iorn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/lsx-andn-iorn.c
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 44a7d58ff..3775155ca 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -2716,12 +2716,12 @@
+    (set_attr "mode" "V4DI")])
+ 
+ ;; Extend loongson-sx to loongson-asx.
+-(define_insn "xvandn3"
++(define_insn "andn3"
+   [(set (match_operand:LASX 0 "register_operand" "=f")
+-	(and:LASX (not:LASX (match_operand:LASX 1 "register_operand" "f"))
+-			    (match_operand:LASX 2 "register_operand" "f")))]
++	(and:LASX (not:LASX (match_operand:LASX 2 "register_operand" "f"))
++			    (match_operand:LASX 1 "register_operand" "f")))]
+   "ISA_HAS_LASX"
+-  "xvandn.v\t%u0,%u1,%u2"
++  "xvandn.v\t%u0,%u2,%u1"
+   [(set_attr "type" "simd_logic")
+    (set_attr "mode" "")])
+ 
+@@ -4637,7 +4637,7 @@
+   [(set_attr "type" "simd_int_arith")
+    (set_attr "mode" "")])
+ 
+-(define_insn "xvorn3"
++(define_insn "iorn3"
+   [(set (match_operand:ILASX 0 "register_operand" "=f")
+ 	(ior:ILASX (not:ILASX (match_operand:ILASX 2 "register_operand" "f"))
+ 		   (match_operand:ILASX 1 "register_operand" "f")))]
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index 51abba007..f9ff85d2e 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -458,8 +458,8 @@ AVAIL_ALL (lasx_frecipe, ISA_HAS_LASX && ISA_HAS_FRECIPE)
+ #define CODE_FOR_lsx_vabsd_du CODE_FOR_lsx_vabsd_u_du
+ #define CODE_FOR_lsx_vftint_wu_s CODE_FOR_lsx_vftint_u_wu_s
+ #define CODE_FOR_lsx_vftint_lu_d CODE_FOR_lsx_vftint_u_lu_d
+-#define CODE_FOR_lsx_vandn_v CODE_FOR_vandnv16qi3
+-#define CODE_FOR_lsx_vorn_v CODE_FOR_vornv16qi3
++#define CODE_FOR_lsx_vandn_v CODE_FOR_andnv16qi3
++#define CODE_FOR_lsx_vorn_v CODE_FOR_iornv16qi3
+ #define CODE_FOR_lsx_vneg_b CODE_FOR_vnegv16qi2
+ #define CODE_FOR_lsx_vneg_h CODE_FOR_vnegv8hi2
+ #define CODE_FOR_lsx_vneg_w CODE_FOR_vnegv4si2
+@@ -692,8 +692,8 @@ AVAIL_ALL (lasx_frecipe, ISA_HAS_LASX && ISA_HAS_FRECIPE)
+ #define CODE_FOR_lasx_xvrepli_w CODE_FOR_lasx_xvrepliv8si
+ #define CODE_FOR_lasx_xvrepli_d CODE_FOR_lasx_xvrepliv4di
+ 
+-#define CODE_FOR_lasx_xvandn_v CODE_FOR_xvandnv32qi3
+-#define CODE_FOR_lasx_xvorn_v CODE_FOR_xvornv32qi3
++#define CODE_FOR_lasx_xvandn_v CODE_FOR_andnv32qi3
++#define CODE_FOR_lasx_xvorn_v CODE_FOR_iornv32qi3
+ #define CODE_FOR_lasx_xvneg_b CODE_FOR_negv32qi2
+ #define CODE_FOR_lasx_xvneg_h CODE_FOR_negv16hi2
+ #define CODE_FOR_lasx_xvneg_w CODE_FOR_negv8si2
+@@ -2853,6 +2853,7 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+     case CODE_FOR_lsx_vpickod_b:
+     case CODE_FOR_lsx_vpickod_h:
+     case CODE_FOR_lsx_vpickod_w:
++    case CODE_FOR_lsx_vandn_v:
+     case CODE_FOR_lasx_xvilvh_b:
+     case CODE_FOR_lasx_xvilvh_h:
+     case CODE_FOR_lasx_xvilvh_w:
+@@ -2873,6 +2874,7 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+     case CODE_FOR_lasx_xvpickod_b:
+     case CODE_FOR_lasx_xvpickod_h:
+     case CODE_FOR_lasx_xvpickod_w:
++    case CODE_FOR_lasx_xvandn_v:
+       /* Swap the operands 1 and 2 for interleave operations.  Built-ins follow
+ 	 convention of ISA, which have op1 as higher component and op2 as lower
+ 	 component.  However, the VEC_PERM op in tree and vec_concat in RTL
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index b1c828dba..58c8f28ed 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1701,13 +1701,13 @@
+   [(set_attr "type" "logical")
+    (set_attr "mode" "SI")])
+ 
+-(define_insn "n"
++(define_insn "n3"
+   [(set (match_operand:X 0 "register_operand" "=r")
+ 	(neg_bitwise:X
+-	    (not:X (match_operand:X 1 "register_operand" "r"))
+-	    (match_operand:X 2 "register_operand" "r")))]
++	    (not:X (match_operand:X 2 "register_operand" "r"))
++	    (match_operand:X 1 "register_operand" "r")))]
+   ""
+-  "n\t%0,%2,%1"
++  "n\t%0,%1,%2"
+   [(set_attr "type" "logical")
+    (set_attr "mode" "")])
+ 
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index 2eac11473..c7480aafd 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -2344,12 +2344,12 @@
+ }
+   [(set_attr "mode" "V4SF")])
+ 
+-(define_insn "vandn3"
++(define_insn "andn3"
+   [(set (match_operand:LSX 0 "register_operand" "=f")
+-	(and:LSX (not:LSX (match_operand:LSX 1 "register_operand" "f"))
+-		 (match_operand:LSX 2 "register_operand" "f")))]
++	(and:LSX (not:LSX (match_operand:LSX 2 "register_operand" "f"))
++		 (match_operand:LSX 1 "register_operand" "f")))]
+   "ISA_HAS_LSX"
+-  "vandn.v\t%w0,%w1,%w2"
++  "vandn.v\t%w0,%w2,%w1"
+   [(set_attr "type" "simd_logic")
+    (set_attr "mode" "")])
+ 
+@@ -3028,7 +3028,7 @@
+   [(set_attr "type" "simd_int_arith")
+    (set_attr "mode" "")])
+ 
+-(define_insn "vorn3"
++(define_insn "iorn3"
+   [(set (match_operand:ILSX 0 "register_operand" "=f")
+ 	(ior:ILSX (not:ILSX (match_operand:ILSX 2 "register_operand" "f"))
+ 		  (match_operand:ILSX 1 "register_operand" "f")))]
+diff --git a/gcc/testsuite/gcc.target/loongarch/lasx-andn-iorn.c b/gcc/testsuite/gcc.target/loongarch/lasx-andn-iorn.c
+new file mode 100644
+index 000000000..4aa5f19a6
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/lasx-andn-iorn.c
+@@ -0,0 +1,11 @@
++#define N 8
++
++#include "./lsx-andn-iorn.c"
++
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlasx -ftree-vectorize" } */
++
++/* We should produce a BIT_ANDC and BIT_IORC here.  */
++
++/* { dg-final { scan-tree-dump ".BIT_ANDN " "optimized" } } */
++/* { dg-final { scan-tree-dump ".BIT_IORN " "optimized" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/lsx-andn-iorn.c b/gcc/testsuite/gcc.target/loongarch/lsx-andn-iorn.c
+new file mode 100644
+index 000000000..7bceccd37
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/lsx-andn-iorn.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mlsx -ftree-vectorize" } */
++
++#ifndef N
++#define N 4
++#endif
++
++extern float a[N], b[N];
++extern int c[N], d[N];
++
++void
++bar1 (void)
++{
++  for (int i = 0; i < N; i++)
++    d[i] = a[i] > b[i] ? 0 : c[i];
++}
++
++void
++bar2 (void)
++{
++  for (int i = 0; i < N; i++)
++    d[i] = a[i] > b[i] ? c[i]: -1;
++}
++
++/* We should produce a BIT_ANDC and BIT_IORC here.  */
++
++/* { dg-final { scan-tree-dump ".BIT_ANDN " "optimized" } } */
++/* { dg-final { scan-tree-dump ".BIT_IORN " "optimized" } } */
+-- 
+2.43.0
+
diff --git a/SME-0080-aarch64-Avoid-a-use-of-callee_offset.patch b/0183-Backport-SME-aarch64-Avoid-a-use-of-callee_offset.patch
similarity index 65%
rename from SME-0080-aarch64-Avoid-a-use-of-callee_offset.patch
rename to 0183-Backport-SME-aarch64-Avoid-a-use-of-callee_offset.patch
index dd832e11f3c33915b7a2c5f11144604f66174f46..7a2c9e2db5f4bce64d9f7d927171024e68f8b061 100644
--- a/SME-0080-aarch64-Avoid-a-use-of-callee_offset.patch
+++ b/0183-Backport-SME-aarch64-Avoid-a-use-of-callee_offset.patch
@@ -1,7 +1,7 @@
-From 9b3d13d40d023b24e46bf3e255a89e15ab5aafc1 Mon Sep 17 00:00:00 2001
+From 54a6e52207703a8643fc406175377105f887ebef Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:04 +0100
-Subject: [PATCH 080/144] aarch64: Avoid a use of callee_offset
+Subject: [PATCH] [Backport][SME] aarch64: Avoid a use of callee_offset
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=f9ab771fa8cd747f34786c6f33deea32c2eb828b
 
@@ -49,10 +49,17 @@ gcc/
  1 file changed, 1 insertion(+), 3 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index faf7db11c..80412b6c5 100644
+index b7da1d0be..fbd7a079a 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -10053,7 +10053,6 @@ aarch64_expand_prologue (void)
+@@ -10263,21 +10263,20 @@ aarch64_epilogue_uses (int regno)
+    current FP is also set up if it is in use.  */
+ 
+ void
+ aarch64_expand_prologue (void)
+ {
+   aarch64_frame &frame = cfun->machine->frame;
+   poly_int64 frame_size = frame.frame_size;
    poly_int64 initial_adjust = frame.initial_adjust;
    HOST_WIDE_INT callee_adjust = frame.callee_adjust;
    poly_int64 final_adjust = frame.final_adjust;
@@ -60,7 +67,21 @@ index faf7db11c..80412b6c5 100644
    poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
    poly_int64 below_hard_fp_saved_regs_size
      = frame.below_hard_fp_saved_regs_size;
-@@ -10166,8 +10165,7 @@ aarch64_expand_prologue (void)
+   unsigned reg1 = frame.wb_push_candidate1;
+   unsigned reg2 = frame.wb_push_candidate2;
+   bool emit_frame_chain = frame.emit_frame_chain;
+   rtx_insn *insn;
+ 
+   if (flag_stack_clash_protection && known_eq (callee_adjust, 0))
+     {
+@@ -10376,22 +10375,21 @@ aarch64_expand_prologue (void)
+ 	     the CFA based on the frame pointer.  We therefore need new
+ 	     DW_CFA_expressions to re-express the save slots with addresses
+ 	     based on the frame pointer.  */
+ 	  rtx_insn *insn = get_last_insn ();
+ 	  gcc_assert (RTX_FRAME_RELATED_P (insn));
+ 
+ 	  /* Add an explicit CFA definition if this was previously
  	     implicit.  */
  	  if (!find_reg_note (insn, REG_CFA_ADJUST_CFA, NULL_RTX))
  	    {
@@ -70,6 +91,13 @@ index faf7db11c..80412b6c5 100644
  	      add_reg_note (insn, REG_CFA_ADJUST_CFA,
  			    gen_rtx_SET (hard_frame_pointer_rtx, src));
  	    }
+ 
+ 	  /* Change the save slot expressions for the registers that
+ 	     we've already saved.  */
+ 	  aarch64_add_cfa_expression (insn, regno_reg_rtx[reg2],
+ 				      hard_frame_pointer_rtx, UNITS_PER_WORD);
+ 	  aarch64_add_cfa_expression (insn, regno_reg_rtx[reg1],
+ 				      hard_frame_pointer_rtx, 0);
 -- 
-2.19.1
+2.38.1.windows.1
 
diff --git a/0183-LoongArch-Drop-vcond-u-expanders.patch b/0183-LoongArch-Drop-vcond-u-expanders.patch
new file mode 100644
index 0000000000000000000000000000000000000000..30858898bb16219274e81a2aa957dd9ebdebfd70
--- /dev/null
+++ b/0183-LoongArch-Drop-vcond-u-expanders.patch
@@ -0,0 +1,127 @@
+From 8394519779553a2c59214d76054dd1ba87a380b3 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 8 Aug 2024 10:39:54 +0800
+Subject: [PATCH 183/188] LoongArch: Drop vcond{,u} expanders.
+
+Optabs vcond{,u} will be removed for GCC 15.  Since regtest shows no
+fallout, dropping the expanders, now.
+
+gcc/ChangeLog:
+
+	PR target/114189
+	* config/loongarch/lasx.md (vcondu): Delete.
+	(vcond): Likewise.
+	* config/loongarch/lsx.md (vcondu): Likewise.
+	(vcond): Likewise.
+---
+ gcc/config/loongarch/lasx.md | 37 ------------------------------------
+ gcc/config/loongarch/lsx.md  | 31 ------------------------------
+ 2 files changed, 68 deletions(-)
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+index 3775155ca..be2f6ca8e 100644
+--- a/gcc/config/loongarch/lasx.md
++++ b/gcc/config/loongarch/lasx.md
+@@ -165,9 +165,6 @@
+ ;; All vector modes with 256 bits.
+ (define_mode_iterator LASX [V4DF V8SF V4DI V8SI V16HI V32QI])
+ 
+-;; Same as LASX.  Used by vcond to iterate two modes.
+-(define_mode_iterator LASX_2 [V4DF V8SF V4DI V8SI V16HI V32QI])
+-
+ ;; Only used for splitting insert_d and copy_{u,s}.d.
+ (define_mode_iterator LASX_D [V4DI V4DF])
+ 
+@@ -762,40 +759,6 @@
+    DONE;
+ })
+ 
+-;; FIXME: 256??
+-(define_expand "vcondu"
+-  [(match_operand:LASX 0 "register_operand")
+-   (match_operand:LASX 1 "reg_or_m1_operand")
+-   (match_operand:LASX 2 "reg_or_0_operand")
+-   (match_operator 3 ""
+-    [(match_operand:ILASX 4 "register_operand")
+-     (match_operand:ILASX 5 "register_operand")])]
+-  "ISA_HAS_LASX
+-   && (GET_MODE_NUNITS (mode)
+-       == GET_MODE_NUNITS (mode))"
+-{
+-  loongarch_expand_vec_cond_expr (mode, mode,
+-				  operands);
+-  DONE;
+-})
+-
+-;; FIXME: 256??
+-(define_expand "vcond"
+-  [(match_operand:LASX 0 "register_operand")
+-   (match_operand:LASX 1 "reg_or_m1_operand")
+-   (match_operand:LASX 2 "reg_or_0_operand")
+-   (match_operator 3 ""
+-     [(match_operand:LASX_2 4 "register_operand")
+-      (match_operand:LASX_2 5 "register_operand")])]
+-  "ISA_HAS_LASX
+-   && (GET_MODE_NUNITS (mode)
+-       == GET_MODE_NUNITS (mode))"
+-{
+-  loongarch_expand_vec_cond_expr (mode, mode,
+-				  operands);
+-  DONE;
+-})
+-
+ ;; Same as vcond_
+ (define_expand "vcond_mask_"
+   [(match_operand:LASX 0 "register_operand")
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+index c7480aafd..5cb5bc61f 100644
+--- a/gcc/config/loongarch/lsx.md
++++ b/gcc/config/loongarch/lsx.md
+@@ -186,9 +186,6 @@
+ ;; All vector modes with 128 bits.
+ (define_mode_iterator LSX      [V2DF V4SF V2DI V4SI V8HI V16QI])
+ 
+-;; Same as LSX.  Used by vcond to iterate two modes.
+-(define_mode_iterator LSX_2    [V2DF V4SF V2DI V4SI V8HI V16QI])
+-
+ ;; Only used for vilvh and splitting insert_d and copy_{u,s}.d.
+ (define_mode_iterator LSX_D    [V2DI V2DF])
+ 
+@@ -533,34 +530,6 @@
+   DONE;
+ })
+ 
+-(define_expand "vcondu"
+-  [(match_operand:LSX 0 "register_operand")
+-   (match_operand:LSX 1 "reg_or_m1_operand")
+-   (match_operand:LSX 2 "reg_or_0_operand")
+-   (match_operator 3 ""
+-     [(match_operand:ILSX 4 "register_operand")
+-      (match_operand:ILSX 5 "register_operand")])]
+-  "ISA_HAS_LSX
+-   && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))"
+-{
+-  loongarch_expand_vec_cond_expr (mode, mode, operands);
+-  DONE;
+-})
+-
+-(define_expand "vcond"
+-  [(match_operand:LSX 0 "register_operand")
+-   (match_operand:LSX 1 "reg_or_m1_operand")
+-   (match_operand:LSX 2 "reg_or_0_operand")
+-   (match_operator 3 ""
+-     [(match_operand:LSX_2 4 "register_operand")
+-      (match_operand:LSX_2 5 "register_operand")])]
+-  "ISA_HAS_LSX
+-   && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))"
+-{
+-  loongarch_expand_vec_cond_expr (mode, mode, operands);
+-  DONE;
+-})
+-
+ (define_expand "vcond_mask_"
+   [(match_operand:LSX 0 "register_operand")
+    (match_operand:LSX 1 "reg_or_m1_operand")
+-- 
+2.43.0
+
diff --git a/SME-0081-aarch64-Explicitly-handle-frames-with-no-saved-regis.patch b/0184-Backport-SME-aarch64-Explicitly-handle-frames-with-n.patch
similarity index 87%
rename from SME-0081-aarch64-Explicitly-handle-frames-with-no-saved-regis.patch
rename to 0184-Backport-SME-aarch64-Explicitly-handle-frames-with-n.patch
index 14cd79a6ef5a0610e3adfc25b1ca624252952522..3af28ed5b5f043fe06de5b17d3ce5da351955409 100644
--- a/SME-0081-aarch64-Explicitly-handle-frames-with-no-saved-regis.patch
+++ b/0184-Backport-SME-aarch64-Explicitly-handle-frames-with-n.patch
@@ -1,8 +1,8 @@
-From 808d452fd0802d1252b091b202aceb7231c30f64 Mon Sep 17 00:00:00 2001
+From 82bbe6513987a7656150110164e25f44fe410796 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:05 +0100
-Subject: [PATCH 081/144] aarch64: Explicitly handle frames with no saved
- registers
+Subject: [PATCH 085/157] [Backport][SME] aarch64: Explicitly handle frames
+ with no saved registers
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=c601c918c9ac01ef8315774a642ff924f77c85e5
 
@@ -28,10 +28,10 @@ gcc/
  1 file changed, 5 insertions(+), 3 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 80412b6c5..86eb1cac9 100644
+index fbd7a079a..c59af6b1c 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8761,9 +8761,11 @@ aarch64_layout_frame (void)
+@@ -8978,9 +8978,11 @@ aarch64_layout_frame (void)
  
    HOST_WIDE_INT const_size, const_outgoing_args_size, const_fp_offset;
    HOST_WIDE_INT const_saved_regs_size;
@@ -47,5 +47,5 @@ index 80412b6c5..86eb1cac9 100644
        /* Simple, small frame with no outgoing arguments:
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/0184-LoongArch-Provide-ashr-lshr-and-ashl-RTL-pattern-for.patch b/0184-LoongArch-Provide-ashr-lshr-and-ashl-RTL-pattern-for.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ec6f30880d11bd183a3204056e6251c0168aeff5
--- /dev/null
+++ b/0184-LoongArch-Provide-ashr-lshr-and-ashl-RTL-pattern-for.patch
@@ -0,0 +1,220 @@
+From d9ce0e85c8cba331413c6a521987a1ecbd94df1c Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 8 Aug 2024 09:59:28 +0800
+Subject: [PATCH 184/188] LoongArch: Provide ashr lshr and ashl RTL pattern for
+ vectors.
+
+We support vashr vlshr and vashl. However, in r15-1638 support optimize
+x < 0 ? -1 : 0 into (signed) x >> 31 and x < 0 ? 1 : 0 into (unsigned) x >> 31.
+To support this optimization, vector ashr lshr and ashl need to be implemented.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (insn): Added rotatert rotr pairs.
+	* config/loongarch/simd.md (rotr3): Remove to ...
+	(3): This.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.target/loongarch/vect-ashr-lshr.C: New test.
+---
+ gcc/config/loongarch/loongarch.md             |   1 +
+ gcc/config/loongarch/simd.md                  |  13 +-
+ .../g++.target/loongarch/vect-ashr-lshr.C     | 147 ++++++++++++++++++
+ 3 files changed, 155 insertions(+), 6 deletions(-)
+ create mode 100644 gcc/testsuite/g++.target/loongarch/vect-ashr-lshr.C
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 58c8f28ed..867977b36 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -559,6 +559,7 @@
+ (define_code_attr insn [(ashift "sll")
+ 			(ashiftrt "sra")
+ 			(lshiftrt "srl")
++			(rotatert "rotr")
+ 			(ior "or")
+ 			(xor "xor")
+ 			(and "and")
+diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
+index 00d4c7831..c28b95282 100644
+--- a/gcc/config/loongarch/simd.md
++++ b/gcc/config/loongarch/simd.md
+@@ -306,14 +306,15 @@
+     operands[4] = gen_reg_rtx (mode);
+   });
+ 
+-;; vrotri.{b/h/w/d}
++;; v{rotr/sll/sra/srl}i.{b/h/w/d}
+ 
+-(define_insn "rotr3"
++(define_insn "3"
+   [(set (match_operand:IVEC 0 "register_operand" "=f")
+-	(rotatert:IVEC (match_operand:IVEC 1 "register_operand" "f")
+-		       (match_operand:SI 2 "const__operand")))]
+-  ""
+-  "vrotri.\t%0,%1,%2";
++	(shift_w:IVEC
++	  (match_operand:IVEC 1 "register_operand" "f")
++	  (match_operand:SI 2 "const__operand")))]
++  "ISA_HAS_LSX"
++  "vi.\t%0,%1,%2"
+   [(set_attr "type" "simd_int_arith")
+    (set_attr "mode" "")])
+ 
+diff --git a/gcc/testsuite/g++.target/loongarch/vect-ashr-lshr.C b/gcc/testsuite/g++.target/loongarch/vect-ashr-lshr.C
+new file mode 100644
+index 000000000..bcef985fa
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/vect-ashr-lshr.C
+@@ -0,0 +1,147 @@
++/* { dg-do compile } */
++/* { dg-options "-mlasx -O2" } */
++/* { dg-final { scan-assembler-times "vsrli.b" 2 } } */
++/* { dg-final { scan-assembler-times "vsrli.h" 2 } } */
++/* { dg-final { scan-assembler-times "vsrli.w" 2 } } */
++/* { dg-final { scan-assembler-times "vsrli.d" 2 } } */
++/* { dg-final { scan-assembler-times "vsrai.b" 2 } } */
++/* { dg-final { scan-assembler-times "vsrai.h" 2 } } */
++/* { dg-final { scan-assembler-times "vsrai.w" 2 } } */
++/* { dg-final { scan-assembler-times "vsrai.d" 2 } } */
++
++typedef signed char v16qi __attribute__((vector_size(16)));
++typedef signed char v32qi __attribute__((vector_size(32)));
++typedef short v8hi __attribute__((vector_size(16)));
++typedef short v16hi __attribute__((vector_size(32)));
++typedef int v4si __attribute__((vector_size(16)));
++typedef int v8si __attribute__((vector_size(32)));
++typedef long long v2di __attribute__((vector_size(16)));
++typedef long long v4di __attribute__((vector_size(32)));
++
++v16qi
++foo (v16qi a)
++{
++  v16qi const1_op = __extension__(v16qi){1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
++  v16qi const0_op = __extension__(v16qi){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v32qi
++foo2 (v32qi a)
++{
++  v32qi const1_op = __extension__(v32qi){1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
++  v32qi const0_op = __extension__(v32qi){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v8hi
++foo3 (v8hi a)
++{
++  v8hi const1_op = __extension__(v8hi){1,1,1,1,1,1,1,1};
++  v8hi const0_op = __extension__(v8hi){0,0,0,0,0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v16hi
++foo4 (v16hi a)
++{
++  v16hi const1_op = __extension__(v16hi){1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1};
++  v16hi const0_op = __extension__(v16hi){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v4si
++foo5 (v4si a)
++{
++  v4si const1_op = __extension__(v4si){1,1,1,1};
++  v4si const0_op = __extension__(v4si){0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v8si
++foo6 (v8si a)
++{
++  v8si const1_op = __extension__(v8si){1,1,1,1,1,1,1,1};
++  v8si const0_op = __extension__(v8si){0,0,0,0,0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v2di
++foo7 (v2di a)
++{
++  v2di const1_op = __extension__(v2di){1,1};
++  v2di const0_op = __extension__(v2di){0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v4di
++foo8 (v4di a)
++{
++  v4di const1_op = __extension__(v4di){1,1,1,1};
++  v4di const0_op = __extension__(v4di){0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v16qi
++foo9 (v16qi a)
++{
++  v16qi const1_op = __extension__(v16qi){-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
++  v16qi const0_op = __extension__(v16qi){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v32qi
++foo10 (v32qi a)
++{
++  v32qi const1_op = __extension__(v32qi){-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
++  v32qi const0_op = __extension__(v32qi){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v8hi
++foo11 (v8hi a)
++{
++  v8hi const1_op = __extension__(v8hi){-1,-1,-1,-1,-1,-1,-1,-1};
++  v8hi const0_op = __extension__(v8hi){0,0,0,0,0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v16hi
++foo12 (v16hi a)
++{
++  v16hi const1_op = __extension__(v16hi){-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1};
++  v16hi const0_op = __extension__(v16hi){0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v4si
++foo13 (v4si a)
++{
++  v4si const1_op = __extension__(v4si){-1,-1,-1,-1};
++  v4si const0_op = __extension__(v4si){0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v8si
++foo14 (v8si a)
++{
++  v8si const1_op = __extension__(v8si){-1,-1,-1,-1,-1,-1,-1,-1};
++  v8si const0_op = __extension__(v8si){0,0,0,0,0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v2di
++foo15 (v2di a)
++{
++  v2di const1_op = __extension__(v2di){-1,-1};
++  v2di const0_op = __extension__(v2di){0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
++
++v4di
++foo16 (v4di a)
++{
++  v4di const1_op = __extension__(v4di){-1,-1,-1,-1};
++  v4di const0_op = __extension__(v4di){0,0,0,0};
++  return a < const0_op ? const1_op : const0_op;
++}
+-- 
+2.43.0
+
diff --git a/SME-0082-aarch64-Add-bytes_below_saved_regs-to-frame-info.patch b/0185-Backport-SME-aarch64-Add-bytes_below_saved_regs-to-f.patch
similarity index 92%
rename from SME-0082-aarch64-Add-bytes_below_saved_regs-to-frame-info.patch
rename to 0185-Backport-SME-aarch64-Add-bytes_below_saved_regs-to-f.patch
index f7322ef4762ed607356ece5220010217642fa648..f754f97ea159c2998711abd27b24917488283eb6 100644
--- a/SME-0082-aarch64-Add-bytes_below_saved_regs-to-frame-info.patch
+++ b/0185-Backport-SME-aarch64-Add-bytes_below_saved_regs-to-f.patch
@@ -1,7 +1,8 @@
-From e135b34a909f0583ad6187d009a99ae5a458b1da Mon Sep 17 00:00:00 2001
+From bf985fe08b6298218180666a7d20f4aa0b41326f Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:05 +0100
-Subject: [PATCH 082/144] aarch64: Add bytes_below_saved_regs to frame info
+Subject: [PATCH 086/157] [Backport][SME] aarch64: Add bytes_below_saved_regs
+ to frame info
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=7b792ecaa9414bc81520b3da552d40ad854be976
 
@@ -24,10 +25,10 @@ gcc/
  2 files changed, 41 insertions(+), 35 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 86eb1cac9..280fd3e16 100644
+index c59af6b1c..5533dd85b 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8600,6 +8600,8 @@ aarch64_layout_frame (void)
+@@ -8817,6 +8817,8 @@ aarch64_layout_frame (void)
    gcc_assert (crtl->is_leaf
  	      || maybe_ne (frame.reg_offset[R30_REGNUM], SLOT_NOT_REQUIRED));
  
@@ -36,7 +37,7 @@ index 86eb1cac9..280fd3e16 100644
    /* Now assign stack slots for the registers.  Start with the predicate
       registers, since predicate LDR and STR have a relatively small
       offset range.  These saves happen below the hard frame pointer.  */
-@@ -8704,18 +8706,18 @@ aarch64_layout_frame (void)
+@@ -8921,18 +8923,18 @@ aarch64_layout_frame (void)
  
    poly_int64 varargs_and_saved_regs_size = offset + frame.saved_varargs_size;
  
@@ -59,7 +60,7 @@ index 86eb1cac9..280fd3e16 100644
  
    frame.locals_offset = frame.saved_varargs_size;
  
-@@ -8759,7 +8761,7 @@ aarch64_layout_frame (void)
+@@ -8976,7 +8978,7 @@ aarch64_layout_frame (void)
    else if (frame.wb_pop_candidate1 != INVALID_REGNUM)
      max_push_offset = 256;
  
@@ -68,7 +69,7 @@ index 86eb1cac9..280fd3e16 100644
    HOST_WIDE_INT const_saved_regs_size;
    if (known_eq (frame.saved_regs_size, 0))
      frame.initial_adjust = frame.frame_size;
-@@ -8767,31 +8769,31 @@ aarch64_layout_frame (void)
+@@ -8984,31 +8986,31 @@ aarch64_layout_frame (void)
  	   && const_size < max_push_offset
  	   && known_eq (frame.hard_fp_offset, const_size))
      {
@@ -112,7 +113,7 @@ index 86eb1cac9..280fd3e16 100644
      }
    else if (saves_below_hard_fp_p
  	   && known_eq (frame.saved_regs_size,
-@@ -8801,30 +8803,29 @@ aarch64_layout_frame (void)
+@@ -9018,30 +9020,29 @@ aarch64_layout_frame (void)
  
  	 sub sp, sp, hard_fp_offset + below_hard_fp_saved_regs_size
  	 save SVE registers relative to SP
@@ -150,7 +151,7 @@ index 86eb1cac9..280fd3e16 100644
  
  	 sub sp, sp, hard_fp_offset
  	 stp x29, x30, [sp, 0]
-@@ -8832,10 +8833,10 @@ aarch64_layout_frame (void)
+@@ -9049,10 +9050,10 @@ aarch64_layout_frame (void)
  	 stp reg3, reg4, [sp, 16]
  	 [sub sp, sp, below_hard_fp_saved_regs_size]
  	 [save SVE registers relative to SP]
@@ -163,7 +164,7 @@ index 86eb1cac9..280fd3e16 100644
      }
  
    /* Make sure the individual adjustments add up to the full frame size.  */
-@@ -9426,7 +9427,7 @@ aarch64_get_separate_components (void)
+@@ -9643,7 +9644,7 @@ aarch64_get_separate_components (void)
  	if (frame_pointer_needed)
  	  offset -= frame.below_hard_fp_saved_regs_size;
  	else
@@ -172,7 +173,7 @@ index 86eb1cac9..280fd3e16 100644
  
  	/* Check that we can access the stack slot of the register with one
  	   direct load with no adjustments needed.  */
-@@ -9575,7 +9576,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+@@ -9792,7 +9793,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
        if (frame_pointer_needed)
  	offset -= frame.below_hard_fp_saved_regs_size;
        else
@@ -181,7 +182,7 @@ index 86eb1cac9..280fd3e16 100644
  
        rtx addr = plus_constant (Pmode, ptr_reg, offset);
        rtx mem = gen_frame_mem (mode, addr);
-@@ -9629,7 +9630,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+@@ -9846,7 +9847,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
        if (frame_pointer_needed)
  	offset2 -= frame.below_hard_fp_saved_regs_size;
        else
@@ -190,7 +191,7 @@ index 86eb1cac9..280fd3e16 100644
        rtx addr2 = plus_constant (Pmode, ptr_reg, offset2);
        rtx mem2 = gen_frame_mem (mode, addr2);
        rtx set2 = prologue_p ? gen_rtx_SET (mem2, reg2)
-@@ -9703,10 +9704,10 @@ aarch64_stack_clash_protection_alloca_probe_range (void)
+@@ -9920,10 +9921,10 @@ aarch64_stack_clash_protection_alloca_probe_range (void)
     registers.  If POLY_SIZE is not large enough to require a probe this function
     will only adjust the stack.  When allocating the stack space
     FRAME_RELATED_P is then used to indicate if the allocation is frame related.
@@ -205,7 +206,7 @@ index 86eb1cac9..280fd3e16 100644
  
     We emit barriers after each stack adjustment to prevent optimizations from
     breaking the invariant that we never drop the stack more than a page.  This
-@@ -9915,7 +9916,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -10132,7 +10133,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
    /* Handle any residuals.  Residuals of at least MIN_PROBE_THRESHOLD have to
       be probed.  This maintains the requirement that each page is probed at
       least once.  For initial probing we probe only if the allocation is
@@ -231,5 +232,5 @@ index 8f0ac2cde..9e0ca380e 100644
       are saved below the hard frame pointer.  */
    poly_int64 below_hard_fp_saved_regs_size;
 -- 
-2.19.1
+2.33.0
 
diff --git a/0185-LoongArch-Implement-scalar-isinf-isnormal-and-isfini.patch b/0185-LoongArch-Implement-scalar-isinf-isnormal-and-isfini.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b0ef74303078e9f2a514658b19efc1f21353f53e
--- /dev/null
+++ b/0185-LoongArch-Implement-scalar-isinf-isnormal-and-isfini.patch
@@ -0,0 +1,203 @@
+From 7e8e122306feaecf8d7b520b4e7c0b9908ca6fd2 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Thu, 4 Jul 2024 02:49:28 +0800
+Subject: [PATCH 185/188] LoongArch: Implement scalar isinf, isnormal, and
+ isfinite via fclass
+
+Doing so can avoid loading FP constants from the memory.  It also
+partially fixes PR 66262 as fclass does not signal on sNaN.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (extendsidi2): Add ("=r", "f")
+	alternative and use movfr2gr.s for it.  The spec clearly states
+	movfr2gr.s sign extends the value to GRLEN.
+	(fclass_): Make the result SImode instead of a floating
+	mode.  The fclass results are really not FP values.
+	(FCLASS_MASK): New define_int_iterator.
+	(fclass_optab): New define_int_attr.
+	(): New define_expand
+	template.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/fclass-compile.c: New test.
+	* gcc.target/loongarch/fclass-run.c: New test.
+---
+ gcc/config/loongarch/loongarch.md             | 53 ++++++++++++++++---
+ .../gcc.target/loongarch/fclass-compile.c     | 20 +++++++
+ .../gcc.target/loongarch/fclass-run.c         | 53 +++++++++++++++++++
+ 3 files changed, 119 insertions(+), 7 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/fclass-compile.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/fclass-run.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 867977b36..15960a79f 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1851,16 +1851,17 @@
+ ;;  ....................
+ 
+ (define_insn "extendsidi2"
+-  [(set (match_operand:DI 0 "register_operand" "=r,r,r,r")
++  [(set (match_operand:DI 0 "register_operand" "=r,r,r,r,r")
+ 	(sign_extend:DI
+-	    (match_operand:SI 1 "nonimmediate_operand" "r,ZC,m,k")))]
++	    (match_operand:SI 1 "nonimmediate_operand" "r,ZC,m,k,f")))]
+   "TARGET_64BIT"
+   "@
+    slli.w\t%0,%1,0
+    ldptr.w\t%0,%1
+    ld.w\t%0,%1
+-   ldx.w\t%0,%1"
+-  [(set_attr "move_type" "sll0,load,load,load")
++   ldx.w\t%0,%1
++   movfr2gr.s\t%0,%1"
++  [(set_attr "move_type" "sll0,load,load,load,mftg")
+    (set_attr "mode" "DI")])
+ 
+ (define_insn "extend2"
+@@ -4110,14 +4111,52 @@
+   "movgr2fcsr\t$r%0,%1")
+ 
+ (define_insn "fclass_"
+-  [(set (match_operand:ANYF 0 "register_operand" "=f")
+-	(unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")]
+-		      UNSPEC_FCLASS))]
++  [(set (match_operand:SI 0 "register_operand" "=f")
++	(unspec:SI [(match_operand:ANYF 1 "register_operand" "f")]
++		   UNSPEC_FCLASS))]
+   "TARGET_HARD_FLOAT"
+   "fclass.\t%0,%1"
+   [(set_attr "type" "unknown")
+    (set_attr "mode" "")])
+ 
++(define_int_iterator FCLASS_MASK [68 136 952])
++(define_int_attr fclass_optab
++  [(68	"isinf")
++   (136	"isnormal")
++   (952	"isfinite")])
++
++(define_expand "2"
++  [(match_operand:SI   0 "register_operand" "=r")
++   (match_operand:ANYF 1 "register_operand" " f")
++   (const_int FCLASS_MASK)]
++  "TARGET_HARD_FLOAT"
++  {
++    rtx ft0 = gen_reg_rtx (SImode);
++    rtx t0 = gen_reg_rtx (word_mode);
++    rtx mask = GEN_INT ();
++
++    emit_insn (gen_fclass_ (ft0, operands[1]));
++
++    if (TARGET_64BIT)
++      emit_insn (gen_extend_insn (t0, ft0, DImode, SImode, 0));
++    else
++      emit_move_insn (t0, ft0);
++
++    emit_move_insn (t0, gen_rtx_AND (word_mode, t0, mask));
++    emit_move_insn (t0, gen_rtx_NE (word_mode, t0, const0_rtx));
++
++    if (TARGET_64BIT)
++      {
++	t0 = lowpart_subreg (SImode, t0, DImode);
++	SUBREG_PROMOTED_VAR_P (t0) = 1;
++	SUBREG_PROMOTED_SET (t0, SRP_SIGNED);
++      }
++
++    emit_move_insn (operands[0], t0);
++
++    DONE;
++  })
++
+ (define_insn "bytepick_w_"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(ior:SI (lshiftrt:SI (match_operand:SI 1 "register_operand" "r")
+diff --git a/gcc/testsuite/gcc.target/loongarch/fclass-compile.c b/gcc/testsuite/gcc.target/loongarch/fclass-compile.c
+new file mode 100644
+index 000000000..9c24d6e26
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/fclass-compile.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=loongarch64 -mfpu=64 -mabi=lp64d" } */
++/* { dg-final { scan-assembler-times "fclass\\.s" 1 } } */
++/* { dg-final { scan-assembler-times "fclass\\.d" 1 } } */
++
++__attribute__ ((noipa)) int
++test_fclass_f (float f)
++{
++  return __builtin_isinf (f)
++	 | __builtin_isnormal (f) << 1
++	 | __builtin_isfinite (f) << 2;
++}
++
++__attribute__ ((noipa)) int
++test_fclass_d (double d)
++{
++  return __builtin_isinf (d)
++	 | __builtin_isnormal (d) << 1
++	 | __builtin_isfinite (d) << 2;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/fclass-run.c b/gcc/testsuite/gcc.target/loongarch/fclass-run.c
+new file mode 100644
+index 000000000..e5585f9d5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/fclass-run.c
+@@ -0,0 +1,53 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -fsignaling-nans -D_GNU_SOURCE -std=c23" } */
++/* { dg-require-effective-target fenv_exceptions } */
++
++#include 
++#include "fclass-compile.c"
++
++#define ASSERT_EQ(x, y) (void)(x == y || (__builtin_abort (), 1))
++
++int
++main (void)
++{
++  volatile float f_inf = __builtin_inff ();
++  volatile float f_zero = 0;
++  volatile float f_normal = 114.514;
++  volatile float f_subnormal = 1e-40;
++  volatile float f_qnan = __builtin_nanf ("");
++  volatile float f_snan = __builtin_nansf ("");
++  volatile double d_inf = __builtin_inf ();
++  volatile double d_zero = 0;
++  volatile double d_normal = 1919.810;
++  volatile double d_subnormal = 1e-320;
++  volatile double d_qnan = __builtin_nan ("");
++  volatile double d_snan = __builtin_nans ("");
++
++#if __loongarch_frlen >= 64
++  /* With fclass.{s/d} we shouldn't signal, even if the input is sNaN.
++     PR 66462.  */
++  feenableexcept (FE_INVALID);
++#endif
++
++  ASSERT_EQ (test_fclass_f (f_inf), 0b001);
++  ASSERT_EQ (test_fclass_f (-f_inf), 0b001);
++  ASSERT_EQ (test_fclass_f (f_zero), 0b100);
++  ASSERT_EQ (test_fclass_f (-f_zero), 0b100);
++  ASSERT_EQ (test_fclass_f (f_normal), 0b110);
++  ASSERT_EQ (test_fclass_f (-f_normal), 0b110);
++  ASSERT_EQ (test_fclass_f (f_subnormal), 0b100);
++  ASSERT_EQ (test_fclass_f (-f_subnormal), 0b100);
++  ASSERT_EQ (test_fclass_f (f_qnan), 0);
++  ASSERT_EQ (test_fclass_f (f_snan), 0);
++
++  ASSERT_EQ (test_fclass_d (d_inf), 0b001);
++  ASSERT_EQ (test_fclass_d (-d_inf), 0b001);
++  ASSERT_EQ (test_fclass_d (d_zero), 0b100);
++  ASSERT_EQ (test_fclass_d (-d_zero), 0b100);
++  ASSERT_EQ (test_fclass_d (d_normal), 0b110);
++  ASSERT_EQ (test_fclass_d (-d_normal), 0b110);
++  ASSERT_EQ (test_fclass_d (d_subnormal), 0b100);
++  ASSERT_EQ (test_fclass_d (-d_subnormal), 0b100);
++  ASSERT_EQ (test_fclass_d (d_qnan), 0);
++  ASSERT_EQ (test_fclass_d (d_snan), 0);
++}
+-- 
+2.43.0
+
diff --git a/SME-0083-aarch64-Add-bytes_below_hard_fp-to-frame-info.patch b/0186-Backport-SME-aarch64-Add-bytes_below_hard_fp-to-fram.patch
similarity index 89%
rename from SME-0083-aarch64-Add-bytes_below_hard_fp-to-frame-info.patch
rename to 0186-Backport-SME-aarch64-Add-bytes_below_hard_fp-to-fram.patch
index aa670cbdf51725f79145ab08b29e2064eb4aaca9..5eed26e500b2e04f224f5273fea93ad54a42bc5d 100644
--- a/SME-0083-aarch64-Add-bytes_below_hard_fp-to-frame-info.patch
+++ b/0186-Backport-SME-aarch64-Add-bytes_below_hard_fp-to-fram.patch
@@ -1,7 +1,8 @@
-From ab1516fa4ceda66c4862e989e78be011a4d00c66 Mon Sep 17 00:00:00 2001
+From bd5299017c233bcdf0fcc3dd7217eec1641411fe Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:06 +0100
-Subject: [PATCH 083/144] aarch64: Add bytes_below_hard_fp to frame info
+Subject: [PATCH 087/157] [Backport][SME] aarch64: Add bytes_below_hard_fp to
+ frame info
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=99305f306246079cc57d30dae7c32107f02ff3e8
 
@@ -35,10 +36,10 @@ gcc/
  2 files changed, 8 insertions(+), 3 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 280fd3e16..a7014800f 100644
+index 5533dd85b..2bb49b9b0 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8652,6 +8652,7 @@ aarch64_layout_frame (void)
+@@ -8869,6 +8869,7 @@ aarch64_layout_frame (void)
       of the callee save area.  */
    bool saves_below_hard_fp_p = maybe_ne (offset, 0);
    frame.below_hard_fp_saved_regs_size = offset;
@@ -46,7 +47,7 @@ index 280fd3e16..a7014800f 100644
    if (frame.emit_frame_chain)
      {
        /* FP and LR are placed in the linkage record.  */
-@@ -10239,8 +10240,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10456,8 +10457,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
    poly_int64 final_adjust = frame.final_adjust;
    poly_int64 callee_offset = frame.callee_offset;
    poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
@@ -56,7 +57,7 @@ index 280fd3e16..a7014800f 100644
    unsigned reg1 = frame.wb_pop_candidate1;
    unsigned reg2 = frame.wb_pop_candidate2;
    unsigned int last_gpr = (frame.is_scs_enabled
-@@ -10298,7 +10298,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10515,7 +10515,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
         is restored on the instruction doing the writeback.  */
      aarch64_add_offset (Pmode, stack_pointer_rtx,
  			hard_frame_pointer_rtx,
@@ -82,5 +83,5 @@ index 9e0ca380e..dedc5b32f 100644
       top of the locals area.  This value is always a multiple of
       STACK_BOUNDARY.  */
 -- 
-2.19.1
+2.33.0
 
diff --git a/0186-LoongArch-Add-support-to-annotate-tablejump.patch b/0186-LoongArch-Add-support-to-annotate-tablejump.patch
new file mode 100644
index 0000000000000000000000000000000000000000..cf41bc0dba67b43a48a0c5900d88aea779dd1796
--- /dev/null
+++ b/0186-LoongArch-Add-support-to-annotate-tablejump.patch
@@ -0,0 +1,155 @@
+From 5079c41ada379bd8d1bdb92dd2b91e72e9496ea6 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Thu, 11 Jul 2024 19:43:48 +0800
+Subject: [PATCH 186/188] LoongArch: Add support to annotate tablejump
+
+This is per the request from the kernel developers.  For generating the
+ORC unwind info, the objtool program needs to analysis the control flow
+of a .o file.  If a jump table is used, objtool has to correlate the
+jump instruction with the table.
+
+On x86 (where objtool was initially developed) it's simple: a relocation
+entry natrually correlates them because one single instruction is used
+for table-based jump.  But on an RISC machine objtool would have to
+reconstruct the data flow if it must find out the correlation on its
+own.
+
+So, emit an additional section to store the correlation info as pairs of
+addresses, each pair contains the address of a jump instruction (jr) and
+the address of the jump table.  This is very trivial to implement in
+GCC.
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/loongarch.opt.in
+	(mannotate-tablejump): New option.
+	* config/loongarch/loongarch.opt: Regenerate.
+	* config/loongarch/loongarch.md (tablejump): Emit
+	additional correlation info between the jump instruction and the
+	jump table, if -mannotate-tablejump.
+	* doc/invoke.texi: Document -mannotate-tablejump.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/jump-table-annotate.c: New test.
+
+Suggested-by: Tiezhu Yang 
+---
+ gcc/config/loongarch/genopts/loongarch.opt.in     |  4 ++++
+ gcc/config/loongarch/loongarch.md                 | 12 +++++++++++-
+ gcc/config/loongarch/loongarch.opt                |  4 ++++
+ gcc/doc/invoke.texi                               | 13 ++++++++++++-
+ .../gcc.target/loongarch/jump-table-annotate.c    | 15 +++++++++++++++
+ 5 files changed, 46 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/jump-table-annotate.c
+
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index 0ecd10922..20795f6bd 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -301,3 +301,7 @@ default value is 4.
+ ; CPUCFG independently, so we use bit flags to specify them.
+ TargetVariable
+ HOST_WIDE_INT la_isa_evolution = 0
++
++mannotate-tablejump
++Target Mask(ANNOTATE_TABLEJUMP) Save
++Annotate table jump instruction (jr {reg}) to correlate it with the jump table.
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 15960a79f..66236a7c7 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -3496,12 +3496,22 @@
+   DONE;
+ })
+ 
++(define_mode_attr mode_size [(DI "8") (SI "4")])
++
+ (define_insn "@tablejump"
+   [(set (pc)
+ 	(match_operand:P 0 "register_operand" "e"))
+    (use (label_ref (match_operand 1 "" "")))]
+   ""
+-  "jr\t%0"
++  {
++    return TARGET_ANNOTATE_TABLEJUMP
++      ? "1:jr\t%0\n\t"
++	".pushsection\t.discard.tablejump_annotate\n\t"
++	"\t.byte\t1b\n\t"
++	"\t.byte\t%1\n\t"
++	".popsection"
++      : "jr\t%0";
++  }
+   [(set_attr "type" "jump")
+    (set_attr "mode" "none")])
+ 
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 69b3b965c..16fed6ec3 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -310,6 +310,10 @@ default value is 4.
+ TargetVariable
+ HOST_WIDE_INT la_isa_evolution = 0
+ 
++mannotate-tablejump
++Target Mask(ANNOTATE_TABLEJUMP) Save
++Annotate table jump instruction (jr {reg}) to correlate it with the jump table
++
+ mfrecipe
+ Target Mask(ISA_FRECIPE) Var(la_isa_evolution)
+ Support frecipe.{s/d} and frsqrte.{s/d} instructions.
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index f6d59317b..d2c52cdf4 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -1011,7 +1011,7 @@ Objective-C and Objective-C++ Dialects}.
+ -mcmodel=@var{code-model} -mrelax -mpass-mrelax-to-as @gol
+ -mrecip  -mrecip=@var{opt} -mfrecipe -mno-frecipe -mdiv32 -mno-div32 @gol
+ -mlam-bh -mno-lam-bh -mlamcas -mno-lamcas -mld-seq-sa -mno-ld-seq-sa @gol
+--mtls-dialect=@var{opt}}
++-mtls-dialect=@var{opt} -mannotate-tablejump -mno-annotate-tablejump}
+ 
+ @emph{M32R/D Options}
+ @gccoptlist{-m32r2  -m32rx  -m32r @gol
+@@ -24750,6 +24750,17 @@ Whether a load-load barrier (@code{dbar 0x700}) is needed.  When build with
+ This option controls which tls dialect may be used for general dynamic and
+ local dynamic TLS models.
+ 
++@opindex mannotate-tablejump
++@opindex mno-annotate-tablejump
++@item -mannotate-tablejump
++@itemx -mno-annotate-tablejump
++Create an annotation section @code{.discard.tablejump_annotate} to
++correlate the @code{jirl} instruction and the jump table when a jump
++table is used to optimize the @code{switch} statement.  Some external
++tools, for example @file{objtool} of the Linux kernel building system,
++need the annotation to analysis the control flow.  The default is
++@option{-mno-annotate-tablejump}.
++
+ @table @samp
+ @item trad
+ Use traditional TLS. This is the default.
+diff --git a/gcc/testsuite/gcc.target/loongarch/jump-table-annotate.c b/gcc/testsuite/gcc.target/loongarch/jump-table-annotate.c
+new file mode 100644
+index 000000000..9d58e60e3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/jump-table-annotate.c
+@@ -0,0 +1,15 @@
++/* { dg-do compile } */
++/* { dg-additional-options "-mannotate-tablejump" } */
++
++extern void asdf(int);
++void foo(int x) {
++  switch (x) {
++  case 0: asdf(10); break;
++  case 1: asdf(11); break;
++  case 2: asdf(12); break;
++  case 3: asdf(13); break;
++  case 4: asdf(14); break;
++  }
++}
++
++/* { dg-final { scan-assembler "\\.discard\\.tablejump_annotate" } } */
+-- 
+2.43.0
+
diff --git a/SME-0084-aarch64-Robustify-stack-tie-handling.patch b/0187-Backport-SME-aarch64-Robustify-stack-tie-handling.patch
similarity index 88%
rename from SME-0084-aarch64-Robustify-stack-tie-handling.patch
rename to 0187-Backport-SME-aarch64-Robustify-stack-tie-handling.patch
index 4f8c332718a169c96d7f0b7390b896af0f5cdbc2..cc9f87cc92d7ff5e508177e9149c874756992272 100644
--- a/SME-0084-aarch64-Robustify-stack-tie-handling.patch
+++ b/0187-Backport-SME-aarch64-Robustify-stack-tie-handling.patch
@@ -1,7 +1,7 @@
-From e149aafc6438182fc7931aa3dbcd621fedf1d40a Mon Sep 17 00:00:00 2001
+From 4dc3e578d958ceb73f973483f42247c3d33210dc Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 20 Jun 2023 21:48:38 +0100
-Subject: [PATCH 084/144] aarch64: Robustify stack tie handling
+Subject: [PATCH 088/157] [Backport][SME] aarch64: Robustify stack tie handling
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=580b74a79146e51268dd11192d3870645adb0bbb
 
@@ -46,10 +46,10 @@ gcc/
  2 files changed, 18 insertions(+), 7 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index a7014800f..838df1749 100644
+index 2bb49b9b0..4d505c6fc 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -9700,6 +9700,16 @@ aarch64_stack_clash_protection_alloca_probe_range (void)
+@@ -9917,6 +9917,16 @@ aarch64_stack_clash_protection_alloca_probe_range (void)
    return STACK_CLASH_CALLER_GUARD;
  }
  
@@ -66,7 +66,7 @@ index a7014800f..838df1749 100644
  
  /* Allocate POLY_SIZE bytes of stack space using TEMP1 and TEMP2 as scratch
     registers.  If POLY_SIZE is not large enough to require a probe this function
-@@ -9813,7 +9823,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -10030,7 +10040,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
  	     the instruction.  */
  	  rtx stack_ptr_copy = gen_rtx_REG (Pmode, STACK_CLASH_SVE_CFA_REGNUM);
  	  emit_move_insn (stack_ptr_copy, stack_pointer_rtx);
@@ -75,7 +75,7 @@ index a7014800f..838df1749 100644
  
  	  /* We want the CFA independent of the stack pointer for the
  	     duration of the loop.  */
-@@ -10181,7 +10191,7 @@ aarch64_expand_prologue (void)
+@@ -10398,7 +10408,7 @@ aarch64_expand_prologue (void)
  	  aarch64_add_cfa_expression (insn, regno_reg_rtx[reg1],
  				      hard_frame_pointer_rtx, 0);
  	}
@@ -84,7 +84,7 @@ index a7014800f..838df1749 100644
      }
  
    aarch64_save_callee_saves (saved_regs_offset, R0_REGNUM, R30_REGNUM,
-@@ -10284,7 +10294,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10501,7 +10511,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
        || cfun->calls_alloca
        || crtl->calls_eh_return)
      {
@@ -93,7 +93,7 @@ index a7014800f..838df1749 100644
        need_barrier_p = false;
      }
  
-@@ -10323,7 +10333,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10540,7 +10550,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
  				callee_adjust != 0, &cfi_ops);
  
    if (need_barrier_p)
@@ -103,10 +103,10 @@ index a7014800f..838df1749 100644
    if (callee_adjust != 0)
      aarch64_pop_regs (reg1, reg2, callee_adjust, &cfi_ops);
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index d35852738..7e42f28ca 100644
+index 2becc888e..2ce123255 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -7086,10 +7086,11 @@
+@@ -7088,10 +7088,11 @@
  
  (define_insn "stack_tie"
    [(set (mem:BLK (scratch))
@@ -122,5 +122,5 @@ index d35852738..7e42f28ca 100644
    [(set_attr "length" "0")]
  )
 -- 
-2.19.1
+2.33.0
 
diff --git a/0187-LoongArch-Fix-up-r15-4130.patch b/0187-LoongArch-Fix-up-r15-4130.patch
new file mode 100644
index 0000000000000000000000000000000000000000..67649dbc3c41cbc750314141fb1efd0d9036289e
--- /dev/null
+++ b/0187-LoongArch-Fix-up-r15-4130.patch
@@ -0,0 +1,32 @@
+From 8cdf96cd61612746262a811b8a091ecab27bd3a1 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 10 Jul 2024 12:15:23 +0800
+Subject: [PATCH 187/188] LoongArch: Fix up r15-4130
+
+An earlier version of the patch (lacking the regeneration of some files)
+was pushed.  Fix it up now.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.opt: Regenerate.
+	* config/loongarch/loongarch.opt.urls: Regenerate.
+---
+ gcc/config/loongarch/loongarch.opt | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 16fed6ec3..f9c7bd446 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -312,7 +312,7 @@ HOST_WIDE_INT la_isa_evolution = 0
+ 
+ mannotate-tablejump
+ Target Mask(ANNOTATE_TABLEJUMP) Save
+-Annotate table jump instruction (jr {reg}) to correlate it with the jump table
++Annotate table jump instruction (jr {reg}) to correlate it with the jump table.
+ 
+ mfrecipe
+ Target Mask(ISA_FRECIPE) Var(la_isa_evolution)
+-- 
+2.43.0
+
diff --git a/SME-0085-aarch64-Tweak-aarch64_save-restore_callee_saves.patch b/0188-Backport-SME-aarch64-Tweak-aarch64_save-restore_call.patch
similarity index 89%
rename from SME-0085-aarch64-Tweak-aarch64_save-restore_callee_saves.patch
rename to 0188-Backport-SME-aarch64-Tweak-aarch64_save-restore_call.patch
index 3f75a93c4f8bcde674f45e6dbb61d3904d37943f..c302467600febf31a0d3e9a8be140171dccd27f7 100644
--- a/SME-0085-aarch64-Tweak-aarch64_save-restore_callee_saves.patch
+++ b/0188-Backport-SME-aarch64-Tweak-aarch64_save-restore_call.patch
@@ -1,7 +1,8 @@
-From ad813c743d6daace4dbabd4f2c847084be7bd22a Mon Sep 17 00:00:00 2001
+From 8e010ea1a3e122a74696250d7c6ce5660a88b8f5 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:06 +0100
-Subject: [PATCH 085/144] aarch64: Tweak aarch64_save/restore_callee_saves
+Subject: [PATCH 089/157] [Backport][SME] aarch64: Tweak
+ aarch64_save/restore_callee_saves
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=38698967268c44991e02aa1e5a2ce9382d6de9db
 
@@ -31,10 +32,10 @@ gcc/
  2 files changed, 28 insertions(+), 32 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 838df1749..178e73b29 100644
+index 4d505c6fc..a0a4c7ac3 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8726,7 +8726,6 @@ aarch64_layout_frame (void)
+@@ -8943,7 +8943,6 @@ aarch64_layout_frame (void)
    frame.final_adjust = 0;
    frame.callee_adjust = 0;
    frame.sve_callee_adjust = 0;
@@ -42,7 +43,7 @@ index 838df1749..178e73b29 100644
  
    frame.wb_pop_candidate1 = frame.wb_push_candidate1;
    frame.wb_pop_candidate2 = frame.wb_push_candidate2;
-@@ -8794,7 +8793,6 @@ aarch64_layout_frame (void)
+@@ -9011,7 +9010,6 @@ aarch64_layout_frame (void)
  	 stp reg1, reg2, [sp, bytes_below_saved_regs]
  	 stp reg3, reg4, [sp, bytes_below_saved_regs + 16]  */
        frame.initial_adjust = frame.frame_size;
@@ -50,7 +51,7 @@ index 838df1749..178e73b29 100644
      }
    else if (saves_below_hard_fp_p
  	   && known_eq (frame.saved_regs_size,
-@@ -9141,12 +9139,13 @@ aarch64_add_cfa_expression (rtx_insn *insn, rtx reg,
+@@ -9358,12 +9356,13 @@ aarch64_add_cfa_expression (rtx_insn *insn, rtx reg,
  }
  
  /* Emit code to save the callee-saved registers from register number START
@@ -68,7 +69,7 @@ index 838df1749..178e73b29 100644
  			   unsigned start, unsigned limit, bool skip_wb,
  			   bool hard_fp_valid_p)
  {
-@@ -9174,7 +9173,9 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+@@ -9391,7 +9390,9 @@ aarch64_save_callee_saves (poly_int64 start_offset,
  
        machine_mode mode = aarch64_reg_save_mode (regno);
        reg = gen_rtx_REG (mode, regno);
@@ -79,7 +80,7 @@ index 838df1749..178e73b29 100644
        rtx base_rtx = stack_pointer_rtx;
        poly_int64 sp_offset = offset;
  
-@@ -9185,9 +9186,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+@@ -9402,9 +9403,7 @@ aarch64_save_callee_saves (poly_int64 start_offset,
        else if (GP_REGNUM_P (regno)
  	       && (!offset.is_constant (&const_offset) || const_offset >= 512))
  	{
@@ -90,7 +91,7 @@ index 838df1749..178e73b29 100644
  	  if (hard_fp_valid_p)
  	    base_rtx = hard_frame_pointer_rtx;
  	  else
-@@ -9251,12 +9250,13 @@ aarch64_save_callee_saves (poly_int64 start_offset,
+@@ -9468,12 +9467,13 @@ aarch64_save_callee_saves (poly_int64 start_offset,
  }
  
  /* Emit code to restore the callee registers from register number START
@@ -108,7 +109,7 @@ index 838df1749..178e73b29 100644
  			      unsigned limit, bool skip_wb, rtx *cfi_ops)
  {
    aarch64_frame &frame = cfun->machine->frame;
-@@ -9282,7 +9282,9 @@ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
+@@ -9499,7 +9499,9 @@ aarch64_restore_callee_saves (poly_int64 start_offset, unsigned start,
  
        machine_mode mode = aarch64_reg_save_mode (regno);
        reg = gen_rtx_REG (mode, regno);
@@ -119,7 +120,7 @@ index 838df1749..178e73b29 100644
        rtx base_rtx = stack_pointer_rtx;
        if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
  	aarch64_adjust_sve_callee_save_base (mode, base_rtx, anchor_reg,
-@@ -10068,8 +10070,6 @@ aarch64_expand_prologue (void)
+@@ -10285,8 +10287,6 @@ aarch64_expand_prologue (void)
    HOST_WIDE_INT callee_adjust = frame.callee_adjust;
    poly_int64 final_adjust = frame.final_adjust;
    poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
@@ -128,7 +129,7 @@ index 838df1749..178e73b29 100644
    unsigned reg1 = frame.wb_push_candidate1;
    unsigned reg2 = frame.wb_push_candidate2;
    bool emit_frame_chain = frame.emit_frame_chain;
-@@ -10145,8 +10145,8 @@ aarch64_expand_prologue (void)
+@@ -10362,8 +10362,8 @@ aarch64_expand_prologue (void)
  			     - frame.hard_fp_offset);
    gcc_assert (known_ge (chain_offset, 0));
  
@@ -139,7 +140,7 @@ index 838df1749..178e73b29 100644
  
    if (emit_frame_chain)
      {
-@@ -10154,7 +10154,7 @@ aarch64_expand_prologue (void)
+@@ -10371,7 +10371,7 @@ aarch64_expand_prologue (void)
  	{
  	  reg1 = R29_REGNUM;
  	  reg2 = R30_REGNUM;
@@ -148,7 +149,7 @@ index 838df1749..178e73b29 100644
  				     false, false);
  	}
        else
-@@ -10194,7 +10194,7 @@ aarch64_expand_prologue (void)
+@@ -10411,7 +10411,7 @@ aarch64_expand_prologue (void)
        aarch64_emit_stack_tie (hard_frame_pointer_rtx);
      }
  
@@ -157,7 +158,7 @@ index 838df1749..178e73b29 100644
  			     callee_adjust != 0 || emit_frame_chain,
  			     emit_frame_chain);
    if (maybe_ne (sve_callee_adjust, 0))
-@@ -10204,16 +10204,17 @@ aarch64_expand_prologue (void)
+@@ -10421,16 +10421,17 @@ aarch64_expand_prologue (void)
        aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx,
  					      sve_callee_adjust,
  					      !frame_pointer_needed, false);
@@ -178,7 +179,7 @@ index 838df1749..178e73b29 100644
    aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx, final_adjust,
  					  !frame_pointer_needed, true);
  }
-@@ -10248,7 +10249,6 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10465,7 +10466,6 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
    poly_int64 initial_adjust = frame.initial_adjust;
    HOST_WIDE_INT callee_adjust = frame.callee_adjust;
    poly_int64 final_adjust = frame.final_adjust;
@@ -186,7 +187,7 @@ index 838df1749..178e73b29 100644
    poly_int64 sve_callee_adjust = frame.sve_callee_adjust;
    poly_int64 bytes_below_hard_fp = frame.bytes_below_hard_fp;
    unsigned reg1 = frame.wb_pop_candidate1;
-@@ -10318,9 +10318,9 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10535,9 +10535,9 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
  
    /* Restore the vector registers before the predicate registers,
       so that we can use P4 as a temporary for big-endian SVE frames.  */
@@ -198,7 +199,7 @@ index 838df1749..178e73b29 100644
  				false, &cfi_ops);
    if (maybe_ne (sve_callee_adjust, 0))
      aarch64_add_sp (NULL_RTX, NULL_RTX, sve_callee_adjust, true);
-@@ -10328,7 +10328,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10545,7 +10545,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
    /* When shadow call stack is enabled, the scs_pop in the epilogue will
       restore x30, we don't need to restore x30 again in the traditional
       way.  */
@@ -223,5 +224,5 @@ index dedc5b32f..a1db4f689 100644
       SVE registers.  */
    poly_int64 sve_callee_adjust;
 -- 
-2.19.1
+2.33.0
 
diff --git a/0188-libphobos-Update-build-scripts-for-LoongArch64.patch b/0188-libphobos-Update-build-scripts-for-LoongArch64.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0af2b6b504f55db723bf7cb58ac1413a0724704d
--- /dev/null
+++ b/0188-libphobos-Update-build-scripts-for-LoongArch64.patch
@@ -0,0 +1,304 @@
+From 46e279e1c79086e930965c9a15d08b70a2c06a80 Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Mon, 28 Oct 2024 01:53:57 +0000
+Subject: [PATCH 188/188] libphobos: Update build scripts for LoongArch64.
+
+libphobos/ChangeLog:
+
+        * m4/druntime/cpu.m4: Support loongarch* targets.
+        * libdruntime/Makefile.am: Same.
+        * libdruntime/Makefile.in: Regenerate.
+        * configure: Regenerate.
+---
+ libphobos/configure               | 21 ++++++-
+ libphobos/libdruntime/Makefile.am |  3 +
+ libphobos/libdruntime/Makefile.in | 94 +++++++++++++++++++------------
+ libphobos/m4/druntime/cpu.m4      |  5 ++
+ 4 files changed, 85 insertions(+), 38 deletions(-)
+
+diff --git a/libphobos/configure b/libphobos/configure
+index 9da06f087..6acb2dd89 100755
+--- a/libphobos/configure
++++ b/libphobos/configure
+@@ -696,6 +696,8 @@ DRUNTIME_CPU_POWERPC_FALSE
+ DRUNTIME_CPU_POWERPC_TRUE
+ DRUNTIME_CPU_MIPS_FALSE
+ DRUNTIME_CPU_MIPS_TRUE
++DRUNTIME_CPU_LOONGARCH_FALSE
++DRUNTIME_CPU_LOONGARCH_TRUE
+ DRUNTIME_CPU_ARM_FALSE
+ DRUNTIME_CPU_ARM_TRUE
+ DRUNTIME_CPU_AARCH64_FALSE
+@@ -11750,7 +11752,7 @@ else
+   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+   lt_status=$lt_dlunknown
+   cat > conftest.$ac_ext <<_LT_EOF
+-#line 11753 "configure"
++#line 11755 "configure"
+ #include "confdefs.h"
+ 
+ #if HAVE_DLFCN_H
+@@ -11856,7 +11858,7 @@ else
+   lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2
+   lt_status=$lt_dlunknown
+   cat > conftest.$ac_ext <<_LT_EOF
+-#line 11859 "configure"
++#line 11861 "configure"
+ #include "confdefs.h"
+ 
+ #if HAVE_DLFCN_H
+@@ -14137,6 +14139,9 @@ ac_compiler_gnu=$ac_cv_c_compiler_gnu
+                ;;
+       mips*)   druntime_target_cpu_parsed="mips"
+                ;;
++      loongarch*)
++               druntime_target_cpu_parsed="loongarch"
++               ;;
+       powerpc*)
+                druntime_target_cpu_parsed="powerpc"
+                ;;
+@@ -14174,6 +14179,14 @@ else
+   DRUNTIME_CPU_MIPS_FALSE=
+ fi
+ 
++   if test "$druntime_target_cpu_parsed" = "loongarch"; then
++  DRUNTIME_CPU_LOONGARCH_TRUE=
++  DRUNTIME_CPU_LOONGARCH_FALSE='#'
++else
++  DRUNTIME_CPU_LOONGARCH_TRUE='#'
++  DRUNTIME_CPU_LOONGARCH_FALSE=
++fi
++
+    if test "$druntime_target_cpu_parsed" = "powerpc"; then
+   DRUNTIME_CPU_POWERPC_TRUE=
+   DRUNTIME_CPU_POWERPC_FALSE='#'
+@@ -15738,6 +15751,10 @@ if test -z "${DRUNTIME_CPU_MIPS_TRUE}" && test -z "${DRUNTIME_CPU_MIPS_FALSE}";
+   as_fn_error $? "conditional \"DRUNTIME_CPU_MIPS\" was never defined.
+ Usually this means the macro was only invoked conditionally." "$LINENO" 5
+ fi
++if test -z "${DRUNTIME_CPU_LOONGARCH_TRUE}" && test -z "${DRUNTIME_CPU_LOONGARCH_FALSE}"; then
++  as_fn_error $? "conditional \"DRUNTIME_CPU_LOONGARCH\" was never defined.
++Usually this means the macro was only invoked conditionally." "$LINENO" 5
++fi
+ if test -z "${DRUNTIME_CPU_POWERPC_TRUE}" && test -z "${DRUNTIME_CPU_POWERPC_FALSE}"; then
+   as_fn_error $? "conditional \"DRUNTIME_CPU_POWERPC\" was never defined.
+ Usually this means the macro was only invoked conditionally." "$LINENO" 5
+diff --git a/libphobos/libdruntime/Makefile.am b/libphobos/libdruntime/Makefile.am
+index 6ca4012b7..65e3f1b44 100644
+--- a/libphobos/libdruntime/Makefile.am
++++ b/libphobos/libdruntime/Makefile.am
+@@ -86,6 +86,9 @@ endif
+ if DRUNTIME_CPU_MIPS
+     DRUNTIME_SOURCES_CONFIGURED += config/mips/switchcontext.S
+ endif
++if DRUNTIME_CPU_LOONGARCH
++    DRUNTIME_SOURCES_CONFIGURED += config/loongarch/switchcontext.S
++endif
+ if DRUNTIME_CPU_POWERPC
+     DRUNTIME_SOURCES_CONFIGURED += config/powerpc/switchcontext.S
+ endif
+diff --git a/libphobos/libdruntime/Makefile.in b/libphobos/libdruntime/Makefile.in
+index f7f78d71f..91cd65362 100644
+--- a/libphobos/libdruntime/Makefile.in
++++ b/libphobos/libdruntime/Makefile.in
+@@ -124,12 +124,13 @@ target_triplet = @target@
+ # CPU specific sources
+ @DRUNTIME_CPU_AARCH64_TRUE@am__append_11 = config/aarch64/switchcontext.S
+ @DRUNTIME_CPU_ARM_TRUE@am__append_12 = config/arm/switchcontext.S
+-@DRUNTIME_CPU_MIPS_TRUE@am__append_13 = config/mips/switchcontext.S
+-@DRUNTIME_CPU_POWERPC_TRUE@am__append_14 = config/powerpc/switchcontext.S
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__append_15 = config/mingw/switchcontext.S
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__append_16 = config/x86/switchcontext.S
+-@DRUNTIME_CPU_SYSTEMZ_TRUE@am__append_17 = config/systemz/get_tls_offset.S
+-@DRUNTIME_CPU_S390_TRUE@am__append_18 = config/s390/get_tls_offset.S
++@DRUNTIME_CPU_LOONGARCH_TRUE@am__append_13 = config/loongarch/switchcontext.S
++@DRUNTIME_CPU_MIPS_TRUE@am__append_14 = config/mips/switchcontext.S
++@DRUNTIME_CPU_POWERPC_TRUE@am__append_15 = config/powerpc/switchcontext.S
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__append_16 = config/mingw/switchcontext.S
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__append_17 = config/x86/switchcontext.S
++@DRUNTIME_CPU_SYSTEMZ_TRUE@am__append_18 = config/systemz/get_tls_offset.S
++@DRUNTIME_CPU_S390_TRUE@am__append_19 = config/s390/get_tls_offset.S
+ subdir = libdruntime
+ ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+ am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \
+@@ -474,45 +475,49 @@ am__objects_22 = core/sys/solaris/dlfcn.lo core/sys/solaris/elf.lo \
+ @DRUNTIME_OS_SOLARIS_TRUE@am__objects_23 = $(am__objects_22)
+ @DRUNTIME_CPU_AARCH64_TRUE@am__objects_24 = config/aarch64/libgdruntime_la-switchcontext.lo
+ @DRUNTIME_CPU_ARM_TRUE@am__objects_25 = config/arm/libgdruntime_la-switchcontext.lo
+-@DRUNTIME_CPU_MIPS_TRUE@am__objects_26 = config/mips/libgdruntime_la-switchcontext.lo
+-@DRUNTIME_CPU_POWERPC_TRUE@am__objects_27 = config/powerpc/libgdruntime_la-switchcontext.lo
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_28 = config/mingw/libgdruntime_la-switchcontext.lo
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_29 = config/x86/libgdruntime_la-switchcontext.lo
+-@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_30 = config/systemz/libgdruntime_la-get_tls_offset.lo
+-@DRUNTIME_CPU_S390_TRUE@am__objects_31 = config/s390/libgdruntime_la-get_tls_offset.lo
+-am__objects_32 = $(am__objects_5) $(am__objects_7) $(am__objects_9) \
++@DRUNTIME_CPU_LOONGARCH_TRUE@am__objects_26 = config/loongarch/libgdruntime_la-switchcontext.lo
++@DRUNTIME_CPU_MIPS_TRUE@am__objects_27 = config/mips/libgdruntime_la-switchcontext.lo
++@DRUNTIME_CPU_POWERPC_TRUE@am__objects_28 = config/powerpc/libgdruntime_la-switchcontext.lo
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_29 = config/mingw/libgdruntime_la-switchcontext.lo
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_30 = config/x86/libgdruntime_la-switchcontext.lo
++@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_31 = config/systemz/libgdruntime_la-get_tls_offset.lo
++@DRUNTIME_CPU_S390_TRUE@am__objects_32 = config/s390/libgdruntime_la-get_tls_offset.lo
++am__objects_33 = $(am__objects_6) $(am__objects_8) $(am__objects_10) \
+ 	$(am__objects_11) $(am__objects_13) $(am__objects_15) \
+ 	$(am__objects_17) $(am__objects_19) $(am__objects_21) \
+ 	$(am__objects_23) $(am__objects_24) $(am__objects_25) \
+ 	$(am__objects_26) $(am__objects_27) $(am__objects_28) \
+-	$(am__objects_29) $(am__objects_30) $(am__objects_31)
+-am__objects_33 = gcc/config.lo gcc/libbacktrace.lo
+-am__objects_34 = $(am__objects_1) $(am__objects_2) $(am__objects_3) \
+-	$(am__objects_32) $(am__objects_33)
+-am_libgdruntime_la_OBJECTS = $(am__objects_34)
++	$(am__objects_29) $(am__objects_30) $(am__objects_31) \
++	$(am__objects_32)
++am__objects_34 = gcc/config.lo gcc/libbacktrace.lo
++am__objects_35 = $(am__objects_1) $(am__objects_2) $(am__objects_3) \
++	$(am__objects_33) $(am__objects_34)
++am_libgdruntime_la_OBJECTS = $(am__objects_35)
+ libgdruntime_la_OBJECTS = $(am_libgdruntime_la_OBJECTS)
+ am__DEPENDENCIES_2 = $(am__DEPENDENCIES_1) $(am__DEPENDENCIES_1)
+-am__objects_35 = core/stdc/libgdruntime_convenience_la-errno_.lo
+-@DRUNTIME_OS_MINGW_TRUE@am__objects_36 = $(am__objects_20) \
++am__objects_36 = core/stdc/libgdruntime_convenience_la-errno_.lo
++@DRUNTIME_OS_MINGW_TRUE@am__objects_37 = $(am__objects_20) \
+ @DRUNTIME_OS_MINGW_TRUE@	config/mingw/libgdruntime_convenience_la-msvc.lo
+-@DRUNTIME_CPU_AARCH64_TRUE@am__objects_37 = config/aarch64/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_ARM_TRUE@am__objects_38 = config/arm/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_MIPS_TRUE@am__objects_39 = config/mips/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_POWERPC_TRUE@am__objects_40 = config/powerpc/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_41 = config/mingw/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_42 = config/x86/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_43 = config/systemz/libgdruntime_convenience_la-get_tls_offset.lo
+-@DRUNTIME_CPU_S390_TRUE@am__objects_44 = config/s390/libgdruntime_convenience_la-get_tls_offset.lo
+-am__objects_45 = $(am__objects_5) $(am__objects_7) $(am__objects_9) \
++@DRUNTIME_CPU_AARCH64_TRUE@am__objects_38 = config/aarch64/libgdruntime_convenience_la-switchcontext.lo
++@DRUNTIME_CPU_ARM_TRUE@am__objects_39 = config/arm/libgdruntime_convenience_la-switchcontext.lo
++@DRUNTIME_CPU_LOONGARCH_TRUE@am__objects_40 = config/loongarch/libgdruntime_convenience_la-switchcontext.lo
++@DRUNTIME_CPU_MIPS_TRUE@am__objects_41 = config/mips/libgdruntime_convenience_la-switchcontext.lo
++@DRUNTIME_CPU_POWERPC_TRUE@am__objects_42 = config/powerpc/libgdruntime_convenience_la-switchcontext.lo
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_43 = config/mingw/libgdruntime_convenience_la-switchcontext.lo
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_44 = config/x86/libgdruntime_convenience_la-switchcontext.lo
++@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_45 = config/systemz/libgdruntime_convenience_la-get_tls_offset.lo
++@DRUNTIME_CPU_S390_TRUE@am__objects_46 = config/s390/libgdruntime_convenience_la-get_tls_offset.lo
++am__objects_47 = $(am__objects_5) $(am__objects_7) $(am__objects_9) \
+ 	$(am__objects_11) $(am__objects_13) $(am__objects_15) \
+ 	$(am__objects_17) $(am__objects_19) $(am__objects_36) \
+ 	$(am__objects_23) $(am__objects_37) $(am__objects_38) \
+ 	$(am__objects_39) $(am__objects_40) $(am__objects_41) \
+-	$(am__objects_42) $(am__objects_43) $(am__objects_44)
+-am__objects_46 = $(am__objects_1) $(am__objects_35) $(am__objects_3) \
+-	$(am__objects_45) $(am__objects_33)
+-am__objects_47 = $(am__objects_46)
+-am_libgdruntime_convenience_la_OBJECTS = $(am__objects_47)
++	$(am__objects_42) $(am__objects_43) $(am__objects_44) \
++	$(am__objects_45) $(am__objects_46)
++am__objects_48 = $(am__objects_1) $(am__objects_35) $(am__objects_3) \
++	$(am__objects_47) $(am__objects_33)
++am__objects_49 = $(am__objects_48)
++am_libgdruntime_convenience_la_OBJECTS = $(am__objects_49)
+ libgdruntime_convenience_la_OBJECTS =  \
+ 	$(am_libgdruntime_convenience_la_OBJECTS)
+ AM_V_P = $(am__v_P_@AM_V@)
+@@ -787,7 +792,7 @@ DRUNTIME_SOURCES_CONFIGURED = $(am__append_1) $(am__append_2) \
+ 	$(am__append_9) $(am__append_10) $(am__append_11) \
+ 	$(am__append_12) $(am__append_13) $(am__append_14) \
+ 	$(am__append_15) $(am__append_16) $(am__append_17) \
+-	$(am__append_18)
++	$(am__append_18) $(am__append_19)
+ 
+ # Provide __start_minfo, __stop_minfo if linker doesn't.
+ @DRUNTIME_OS_MINFO_BRACKETING_FALSE@DRTSTUFF = gcc/drtbegin.o gcc/drtend.o
+@@ -1900,6 +1905,11 @@ config/arm/$(am__dirstamp):
+ 	@: > config/arm/$(am__dirstamp)
+ config/arm/libgdruntime_la-switchcontext.lo:  \
+ 	config/arm/$(am__dirstamp)
++config/loongarch/$(am__dirstamp):
++	@$(MKDIR_P) config/loongarch
++	@: > config/loongarch/$(am__dirstamp)
++config/loongarch/libgdruntime_la-switchcontext.lo:  \
++	config/loongarch/$(am__dirstamp)
+ config/mips/$(am__dirstamp):
+ 	@$(MKDIR_P) config/mips
+ 	@: > config/mips/$(am__dirstamp)
+@@ -1940,6 +1950,8 @@ config/aarch64/libgdruntime_convenience_la-switchcontext.lo:  \
+ 	config/aarch64/$(am__dirstamp)
+ config/arm/libgdruntime_convenience_la-switchcontext.lo:  \
+ 	config/arm/$(am__dirstamp)
++config/loongarch/libgdruntime_convenience_la-switchcontext.lo:  \
++ config/loongarch/$(am__dirstamp)
+ config/mips/libgdruntime_convenience_la-switchcontext.lo:  \
+ 	config/mips/$(am__dirstamp)
+ config/powerpc/libgdruntime_convenience_la-switchcontext.lo:  \
+@@ -1964,6 +1976,8 @@ mostlyclean-compile:
+ 	-rm -f config/arm/*.lo
+ 	-rm -f config/mingw/*.$(OBJEXT)
+ 	-rm -f config/mingw/*.lo
++	-rm -f config/loongarch/*.$(OBJEXT)
++	-rm -f config/loongarch/*.lo
+ 	-rm -f config/mips/*.$(OBJEXT)
+ 	-rm -f config/mips/*.lo
+ 	-rm -f config/powerpc/*.$(OBJEXT)
+@@ -2087,7 +2101,10 @@ config/aarch64/libgdruntime_la-switchcontext.lo: config/aarch64/switchcontext.S
+ config/arm/libgdruntime_la-switchcontext.lo: config/arm/switchcontext.S
+ 	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/arm/libgdruntime_la-switchcontext.lo `test -f 'config/arm/switchcontext.S' || echo '$(srcdir)/'`config/arm/switchcontext.S
+ 
+-config/mips/libgdruntime_la-switchcontext.lo: config/mips/switchcontext.S
++config/loongarch/libgdruntime_la-switchcontext.lo: config/loongarch/switchcontext.S
++ $(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS)
++
++onfig/mips/libgdruntime_la-switchcontext.lo: config/mips/switchcontext.S
+ 	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/mips/libgdruntime_la-switchcontext.lo `test -f 'config/mips/switchcontext.S' || echo '$(srcdir)/'`config/mips/switchcontext.S
+ 
+ config/powerpc/libgdruntime_la-switchcontext.lo: config/powerpc/switchcontext.S
+@@ -2111,6 +2128,9 @@ config/aarch64/libgdruntime_convenience_la-switchcontext.lo: config/aarch64/swit
+ config/arm/libgdruntime_convenience_la-switchcontext.lo: config/arm/switchcontext.S
+ 	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_convenience_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/arm/libgdruntime_convenience_la-switchcontext.lo `test -f 'config/arm/switchcontext.S' || echo '$(srcdir)/'`config/arm/switchcontext.S
+ 
++config/loongarch/libgdruntime_convenience_la-switchcontext.lo: config/loongarch/switchcontext.S
++ $(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_convenience_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM
++
+ config/mips/libgdruntime_convenience_la-switchcontext.lo: config/mips/switchcontext.S
+ 	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_convenience_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/mips/libgdruntime_convenience_la-switchcontext.lo `test -f 'config/mips/switchcontext.S' || echo '$(srcdir)/'`config/mips/switchcontext.S
+ 
+@@ -2158,6 +2178,7 @@ clean-libtool:
+ 	-rm -rf config/aarch64/.libs config/aarch64/_libs
+ 	-rm -rf config/arm/.libs config/arm/_libs
+ 	-rm -rf config/mingw/.libs config/mingw/_libs
++	-rm -rf config/loongarch/.libs config/loongarch/_libs
+ 	-rm -rf config/mips/.libs config/mips/_libs
+ 	-rm -rf config/powerpc/.libs config/powerpc/_libs
+ 	-rm -rf config/s390/.libs config/s390/_libs
+@@ -2319,6 +2340,7 @@ distclean-generic:
+ 	-rm -f config/aarch64/$(am__dirstamp)
+ 	-rm -f config/arm/$(am__dirstamp)
+ 	-rm -f config/mingw/$(am__dirstamp)
++	-rm -f config/loongarch/$(am__dirstamp)
+ 	-rm -f config/mips/$(am__dirstamp)
+ 	-rm -f config/powerpc/$(am__dirstamp)
+ 	-rm -f config/s390/$(am__dirstamp)
+diff --git a/libphobos/m4/druntime/cpu.m4 b/libphobos/m4/druntime/cpu.m4
+index db3a92c15..3461b2d3c 100644
+--- a/libphobos/m4/druntime/cpu.m4
++++ b/libphobos/m4/druntime/cpu.m4
+@@ -15,6 +15,9 @@ AC_DEFUN([DRUNTIME_CPU_SOURCES],
+                ;;
+       arm*)    druntime_target_cpu_parsed="arm"
+                ;;
++      loongarch*)
++               druntime_target_cpu_parsed="loongarch"
++               ;;
+       mips*)   druntime_target_cpu_parsed="mips"
+                ;;
+       powerpc*)
+@@ -34,6 +37,8 @@ AC_DEFUN([DRUNTIME_CPU_SOURCES],
+                  [test "$druntime_target_cpu_parsed" = "aarch64"])
+   AM_CONDITIONAL([DRUNTIME_CPU_ARM],
+                  [test "$druntime_target_cpu_parsed" = "arm"])
++  AM_CONDITIONAL([DRUNTIME_CPU_LOONGARCH],
++                 [test "$druntime_target_cpu_parsed" = "loongarch"])
+   AM_CONDITIONAL([DRUNTIME_CPU_MIPS],
+                  [test "$druntime_target_cpu_parsed" = "mips"])
+   AM_CONDITIONAL([DRUNTIME_CPU_POWERPC],
+-- 
+2.43.0
+
diff --git a/SME-0086-aarch64-Only-calculate-chain_offset-if-there-is-a-ch.patch b/0189-Backport-SME-aarch64-Only-calculate-chain_offset-if-.patch
similarity index 84%
rename from SME-0086-aarch64-Only-calculate-chain_offset-if-there-is-a-ch.patch
rename to 0189-Backport-SME-aarch64-Only-calculate-chain_offset-if-.patch
index 94e4fbbbfaf8ef7f0034fc4ccc2152a616805e2c..267b3ab3dea2d3ea16f92d65925799f4b5f7b700 100644
--- a/SME-0086-aarch64-Only-calculate-chain_offset-if-there-is-a-ch.patch
+++ b/0189-Backport-SME-aarch64-Only-calculate-chain_offset-if-.patch
@@ -1,8 +1,8 @@
-From 179af93f028d9c70c4515a7e0707c4496c0b129e Mon Sep 17 00:00:00 2001
+From c8768dd861538817db8c1955dcce4b6d8ce17c48 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:07 +0100
-Subject: [PATCH 086/144] aarch64: Only calculate chain_offset if there is a
- chain
+Subject: [PATCH 090/157] [Backport][SME] aarch64: Only calculate chain_offset
+ if there is a chain
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=aa8b57ee0206e8e5ac7078692ee67fb6ead05645
 
@@ -17,10 +17,10 @@ gcc/
  1 file changed, 5 insertions(+), 5 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 178e73b29..579a2ef7d 100644
+index a0a4c7ac3..bef6a658b 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -10140,16 +10140,16 @@ aarch64_expand_prologue (void)
+@@ -10357,16 +10357,16 @@ aarch64_expand_prologue (void)
    if (callee_adjust != 0)
      aarch64_push_regs (reg1, reg2, callee_adjust);
  
@@ -43,5 +43,5 @@ index 178e73b29..579a2ef7d 100644
  	{
  	  reg1 = R29_REGNUM;
 -- 
-2.19.1
+2.33.0
 
diff --git a/0189-LoongArch-fix-building-errors.patch b/0189-LoongArch-fix-building-errors.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e6e930d1b680598cea88622878935c0a99a1fe42
--- /dev/null
+++ b/0189-LoongArch-fix-building-errors.patch
@@ -0,0 +1,273 @@
+From 142ae446cab26f1beb81a53a7da3c477ce42df40 Mon Sep 17 00:00:00 2001
+From: Peng Fan 
+Date: Mon, 28 Oct 2024 09:02:51 +0000
+Subject: [PATCH] LoongArch: fix building errors.
+
+---
+ config/mt-loongarch-mlib                   |  2 +-
+ gcc/config/loongarch/loongarch-evolution.h |  2 +-
+ gcc/config/loongarch/loongarch-opts.cc     |  1 +
+ gcc/config/loongarch/loongarch-str.h       | 11 +++---
+ gcc/config/loongarch/loongarch.cc          |  9 +----
+ gcc/config/loongarch/loongarch.md          | 44 ++++++++++++++++------
+ gcc/config/loongarch/simd.md               | 15 +++++---
+ gcc/doc/invoke.texi                        |  3 +-
+ 8 files changed, 53 insertions(+), 34 deletions(-)
+
+diff --git a/config/mt-loongarch-mlib b/config/mt-loongarch-mlib
+index 4cfe568f1..bbbba277f 100644
+--- a/config/mt-loongarch-mlib
++++ b/config/mt-loongarch-mlib
+@@ -1 +1 @@
+-FLAGS_FOR_TARGET += -fmultiflags
++FLAGS_FOR_TARGET += 
+diff --git a/gcc/config/loongarch/loongarch-evolution.h b/gcc/config/loongarch/loongarch-evolution.h
+index d64996481..7e8e602c7 100644
+--- a/gcc/config/loongarch/loongarch-evolution.h
++++ b/gcc/config/loongarch/loongarch-evolution.h
+@@ -1,7 +1,7 @@
+ /* Generated automatically by "genstr" from "isa-evolution.in".
+    Please do not edit this file directly.
+ 
+-   Copyright (C) 2023 Free Software Foundation, Inc.
++   Copyright (C) 2023-2024 Free Software Foundation, Inc.
+ 
+ This file is part of GCC.
+ 
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index 735daeb7c..1d08bb6a1 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -1071,6 +1071,7 @@ loongarch_init_misc_options (struct gcc_options *opts,
+ 
+ #undef INIT_TARGET_FLAG
+ 
++#define TARGET_DIRECT_EXTERN_ACCESS_OPTS_P(opts) (((opts->x_target_flags) & MASK_DIRECT_EXTERN_ACCESS) != 0)
+   /* Set mexplicit-relocs default.  */
+   if (opts->x_la_opt_explicit_relocs == M_OPT_UNSET)
+     opts->x_la_opt_explicit_relocs = (HAVE_AS_EXPLICIT_RELOCS
+diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h
+index 3cbe12f7b..13d161a8c 100644
+--- a/gcc/config/loongarch/loongarch-str.h
++++ b/gcc/config/loongarch/loongarch-str.h
+@@ -66,9 +66,10 @@ along with GCC; see the file COPYING3.  If not see
+ #define STR_CMODEL_LARGE "large"
+ #define STR_CMODEL_EXTREME "extreme"
+ 
+-#define OPTSTR_FRECIPE "frecipe"
+-#define OPTSTR_DIV32   "div32"
+-#define OPTSTR_LAM_BH  "lam-bh"
+-#define OPTSTR_LAMCAS  "lamcas"
+-#define OPTSTR_LD_SEQ_SA   "ld-seq-sa"
++#define OPTSTR_FRECIPE	"frecipe"
++#define OPTSTR_DIV32	"div32"
++#define OPTSTR_LAM_BH	"lam-bh"
++#define OPTSTR_LAMCAS	"lamcas"
++#define OPTSTR_LD_SEQ_SA	"ld-seq-sa"
++
+ #endif /* LOONGARCH_STR_H */
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 53bd8d7ec..6be0d80b3 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -764,14 +764,7 @@ loongarch_setup_incoming_varargs (cumulative_args_t cum,
+      argument.  Advance a local copy of CUM past the last "real" named
+      argument, to find out how many registers are left over.  */
+   local_cum = *get_cumulative_args (cum);
+-
+-  /* For a C23 variadic function w/o any named argument, and w/o an
+-     artifical argument for large return value, skip advancing args.
+-     There is such an artifical argument iff. arg.type is non-NULL
+-     (PR 114175).  */
+-  if (!TYPE_NO_NAMED_ARGS_STDARG_P (TREE_TYPE (current_function_decl))
+-      || arg.type != NULL_TREE)
+-    loongarch_function_arg_advance (pack_cumulative_args (&local_cum), arg);
++  loongarch_function_arg_advance (pack_cumulative_args (&local_cum), arg);
+ 
+   /* Found out how many registers we need to save.  */
+   gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 66236a7c7..d8d444c7a 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -32,6 +32,7 @@
+   UNSPEC_FCLASS
+   UNSPEC_FMAX
+   UNSPEC_FMIN
++  UNSPEC_COPYSIGN
+   UNSPEC_FTINT
+   UNSPEC_FTINTRM
+   UNSPEC_FTINTRP
+@@ -415,11 +416,13 @@
+ 
+ ;; A mode for anything with 32 bits or more, and able to be loaded with
+ ;; the same addressing mode as ld.w.
+-(define_mode_iterator LD_AT_LEAST_32_BIT [GPR ANYF])
++;; (define_mode_iterator LD_AT_LEAST_32_BIT [GPR ANYF])
++(define_mode_iterator LD_AT_LEAST_32_BIT [(SI "") (DI "TARGET_64BIT") (SF "TARGET_HARD_FLOAT") (DF "TARGET_DOUBLE_FLOAT")])
+ 
+ ;; A mode for anything able to be stored with the same addressing mode as
+ ;; st.w.
+-(define_mode_iterator ST_ANY [QHWD ANYF])
++;; (define_mode_iterator ST_ANY [QHWD ANYF])
++(define_mode_iterator ST_ANY [(QI "") (HI "") (SI "") (DI "TARGET_64BIT") (SF "TARGET_HARD_FLOAT") (DF "TARGET_DOUBLE_FLOAT")])
+ 
+ ;; A mode for anything legal as a input of a div or mod instruction.
+ (define_mode_iterator DIV [(DI "TARGET_64BIT")
+@@ -590,6 +593,10 @@
+ (define_code_attr sel [(eq "masknez") (ne "maskeqz")])
+ (define_code_attr selinv [(eq "maskeqz") (ne "masknez")])
+ 
++(define_int_attr lrint_allow_inexact [(UNSPEC_FTINT "1")
++                     (UNSPEC_FTINTRM "0")
++                     (UNSPEC_FTINTRP "0")])
++
+ ;; Iterator and attributes for floating-point to fixed-point conversion
+ ;; instructions.
+ (define_int_iterator LRINT [UNSPEC_FTINT UNSPEC_FTINTRM UNSPEC_FTINTRP])
+@@ -625,7 +632,8 @@
+ ;; so the redundant sign extension can be removed if the output is used as
+ ;; an input of a bitwise operation.  Note plus, rotl, and div are handled
+ ;; separately.
+-(define_code_iterator shift_w [any_shift rotatert])
++;; (define_code_iterator shift_w [any_shift rotatert])
++(define_code_iterator shift_w [ashift ashiftrt lshiftrt rotatert])
+ (define_code_iterator arith_w [minus mult])
+ 
+ (define_expand "3"
+@@ -1324,8 +1332,9 @@
+ 
+ (define_insn "copysign3"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+-	(copysign:ANYF (match_operand:ANYF 1 "register_operand" "f")
+-		       (match_operand:ANYF 2 "register_operand" "f")))]
++	(unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")
++		       (match_operand:ANYF 2 "register_operand" "f")]
++               UNSPEC_COPYSIGN))]
+   "TARGET_HARD_FLOAT"
+   "fcopysign.\t%0,%1,%2"
+   [(set_attr "type" "fcopysign")
+@@ -2722,12 +2731,13 @@
+    (set_attr "mode" "")])
+ 
+ ;; Convert floating-point numbers to integers
++;;   ( == UNSPEC_FTINT
+ (define_insn "2"
+   [(set (match_operand:ANYFI 0 "register_operand" "=f")
+ 	(unspec:ANYFI [(match_operand:ANYF 1 "register_operand" "f")]
+ 		      LRINT))]
+   "TARGET_HARD_FLOAT &&
+-   ( == UNSPEC_FTINT
++   (
+     || flag_fp_int_builtin_inexact
+     || !flag_trapping_math)"
+   "ftint.. %0,%1"
+@@ -4135,15 +4145,26 @@
+    (136	"isnormal")
+    (952	"isfinite")])
+ 
+-(define_expand "2"
++;;(define_expand "2"
++;;  [(match_operand:SI   0 "register_operand" "=r")
++;;   (match_operand:ANYF 1 "register_operand" " f")
++;;   (const_int FCLASS_MASK)]
++;;  "TARGET_HARD_FLOAT"
++;;  {
++;;    rtx ft0 = gen_reg_rtx (SImode);
++;;    rtx t0 = gen_reg_rtx (word_mode);
++;;    rtx mask = GEN_INT ();
++
++(define_expand "fclass_optab2"
++ [(unspec:ANYF
+   [(match_operand:SI   0 "register_operand" "=r")
+-   (match_operand:ANYF 1 "register_operand" " f")
+-   (const_int FCLASS_MASK)]
++   (match_operand:ANYF 1 "register_operand" " f")]
++   UNSPEC_FCLASS)]
+   "TARGET_HARD_FLOAT"
+   {
+     rtx ft0 = gen_reg_rtx (SImode);
+     rtx t0 = gen_reg_rtx (word_mode);
+-    rtx mask = GEN_INT ();
++    rtx mask = GEN_INT (GET_MODE_MASK (mode));
+ 
+     emit_insn (gen_fclass_ (ft0, operands[1]));
+ 
+@@ -4165,7 +4186,8 @@
+     emit_move_insn (operands[0], t0);
+ 
+     DONE;
+-  })
++  }
++  [(set_attr "mode" "")])
+ 
+ (define_insn "bytepick_w_"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+diff --git a/gcc/config/loongarch/simd.md b/gcc/config/loongarch/simd.md
+index c28b95282..9e4c08196 100644
+--- a/gcc/config/loongarch/simd.md
++++ b/gcc/config/loongarch/simd.md
+@@ -30,10 +30,13 @@
+ (define_mode_iterator FLASX   [V4DF V8SF])
+ 
+ ;; All integer modes available
+-(define_mode_iterator IVEC    [(ILSX "ISA_HAS_LSX") (ILASX "ISA_HAS_LASX")])
++;; (define_mode_iterator IVEC    [(ILSX "ISA_HAS_LSX") (ILASX "ISA_HAS_LASX")])
++(define_mode_iterator IVEC    [(V2DI "ISA_HAS_LSX") (V4SI "ISA_HAS_LSX") (V8HI "ISA_HAS_LSX") (V16QI "ISA_HAS_LSX")
++                            (V4DI "ISA_HAS_LASX") (V8SI "ISA_HAS_LASX") (V16HI "ISA_HAS_LASX") (V32QI "ISA_HAS_LASX")])
+ 
+ ;; All FP modes available
+-(define_mode_iterator FVEC    [(FLSX "ISA_HAS_LSX") (FLASX "ISA_HAS_LASX")])
++(define_mode_iterator FVEC    [(V2DF "ISA_HAS_LSX") (V4SF "ISA_HAS_LSX") 
++                            (V4DF "ISA_HAS_LASX") (V8SF "ISA_HAS_LASX")])
+ 
+ ;; Mnemonic prefix, "x" for LASX modes.
+ (define_mode_attr x [(V2DI "") (V4SI "") (V8HI "") (V16QI "")
+@@ -162,12 +165,12 @@
+ ;; Expand the standard-named patterns to vfrint instructions if
+ ;; raising inexact exception is allowed.
+ 
++;;   " == UNSPEC_SIMD_FRINT ||
+ (define_expand "2"
+   [(set (match_operand:FVEC 0 "register_operand" "=f")
+ 	(unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
+ 		     SIMD_FRINT))]
+-   " == UNSPEC_SIMD_FRINT ||
+-    flag_fp_int_builtin_inexact ||
++    "flag_fp_int_builtin_inexact ||
+     !flag_trapping_math")
+ 
+ ;; ftrunc is like btrunc, but it's allowed to raise inexact exception
+@@ -221,13 +224,13 @@
+ ;; Expand the standard-named patterns to vftint instructions if
+ ;; raising inexact exception.
+ 
++;;   " == UNSPEC_SIMD_FRINT ||
+ (define_expand "l2"
+   [(set (match_operand: 0 "register_operand" "=f")
+ 	(fix:
+ 	  (unspec:FVEC [(match_operand:FVEC 1 "register_operand" "f")]
+ 		       SIMD_FRINT)))]
+-   " == UNSPEC_SIMD_FRINT ||
+-    flag_fp_int_builtin_inexact ||
++    "flag_fp_int_builtin_inexact ||
+     !flag_trapping_math")
+ 
+ ;; fix_trunc is allowed to raise inexact exception even if
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index d2c52cdf4..8a09938fc 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -1006,8 +1006,7 @@ Objective-C and Objective-C++ Dialects}.
+ -mcond-move-float  -mno-cond-move-float @gol
+ -memcpy  -mno-memcpy -mstrict-align -mno-strict-align @gol
+ -mmax-inline-memcpy-size=@var{n} @gol
+--mexplicit-relocs -mno-explicit-relocs @gol
+--mdirect-extern-access -mno-direct-extern-access @gol
++-mexplicit-relocs=@var{style} -mexplicit-relocs -mno-explicit-relocs @gol
+ -mcmodel=@var{code-model} -mrelax -mpass-mrelax-to-as @gol
+ -mrecip  -mrecip=@var{opt} -mfrecipe -mno-frecipe -mdiv32 -mno-div32 @gol
+ -mlam-bh -mno-lam-bh -mlamcas -mno-lamcas -mld-seq-sa -mno-ld-seq-sa @gol
+-- 
+2.45.2
+
diff --git a/SME-0087-aarch64-Rename-locals_offset-to-bytes_above_locals.patch b/0190-Backport-SME-aarch64-Rename-locals_offset-to-bytes_a.patch
similarity index 91%
rename from SME-0087-aarch64-Rename-locals_offset-to-bytes_above_locals.patch
rename to 0190-Backport-SME-aarch64-Rename-locals_offset-to-bytes_a.patch
index b282c0f4345e6c7fc0cb5ec7d8e0a54084c7617c..8b44c65cadc892adeb952591bff8b2efbc2e9dfe 100644
--- a/SME-0087-aarch64-Rename-locals_offset-to-bytes_above_locals.patch
+++ b/0190-Backport-SME-aarch64-Rename-locals_offset-to-bytes_a.patch
@@ -1,7 +1,8 @@
-From b6ab5da4043983b90b3e612f05c638149d63578f Mon Sep 17 00:00:00 2001
+From 43dc03de6d608e10d83cc7994d127e3764bfbcf7 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:07 +0100
-Subject: [PATCH 087/144] aarch64: Rename locals_offset to bytes_above_locals
+Subject: [PATCH 091/157] [Backport][SME] aarch64: Rename locals_offset to
+ bytes_above_locals
 MIME-Version: 1.0
 Content-Type: text/plain; charset=UTF-8
 Content-Transfer-Encoding: 8bit
@@ -42,10 +43,10 @@ gcc/
  2 files changed, 6 insertions(+), 6 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 579a2ef7d..df81f77e2 100644
+index bef6a658b..992f71bbd 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8720,7 +8720,7 @@ aarch64_layout_frame (void)
+@@ -8937,7 +8937,7 @@ aarch64_layout_frame (void)
  			  STACK_BOUNDARY / BITS_PER_UNIT));
    frame.frame_size = saved_regs_and_above + frame.bytes_below_saved_regs;
  
@@ -54,7 +55,7 @@ index 579a2ef7d..df81f77e2 100644
  
    frame.initial_adjust = 0;
    frame.final_adjust = 0;
-@@ -12830,13 +12830,13 @@ aarch64_initial_elimination_offset (unsigned from, unsigned to)
+@@ -13047,13 +13047,13 @@ aarch64_initial_elimination_offset (unsigned from, unsigned to)
  	return frame.hard_fp_offset;
  
        if (from == FRAME_POINTER_REGNUM)
@@ -89,5 +90,5 @@ index a1db4f689..2acff9a96 100644
    /* Offset from the base of the frame (incomming SP) to the
       hard_frame_pointer.  This value is always a multiple of
 -- 
-2.19.1
+2.33.0
 
diff --git a/0190-tree-optimization-110702-avoid-zero-based-memory-ref.patch b/0190-tree-optimization-110702-avoid-zero-based-memory-ref.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5f31522b824a2235c51e082ce483d62420877882
--- /dev/null
+++ b/0190-tree-optimization-110702-avoid-zero-based-memory-ref.patch
@@ -0,0 +1,119 @@
+From 13dfb01e5c30c3bd09333ac79d6ff96a617fea67 Mon Sep 17 00:00:00 2001
+From: Richard Biener 
+Date: Thu, 3 Aug 2023 13:11:12 +0200
+Subject: [PATCH] tree-optimization/110702 - avoid zero-based memory references
+ in IVOPTs
+
+Sometimes IVOPTs chooses a weird induction variable which downstream
+leads to issues.  Most of the times we can fend those off during costing
+by rejecting the candidate but it looks like the address description
+costing synthesizes is different from what we end up generating so
+the following fixes things up at code generation time.  Specifically
+we avoid the create_mem_ref_raw fallback which uses a literal zero
+address base with the actual base in index2.  For the case in question
+we have the address
+
+  type = unsigned long
+  offset = 0
+  elements = {
+    [0] = &e * -3,
+    [1] = (sizetype) a.9_30 * 232,
+    [2] = ivtmp.28_44 * 4
+  }
+
+from which we code generate the problematical
+
+  _3 = MEM[(long int *)0B + ivtmp.36_9 + ivtmp.28_44 * 4];
+
+which references the object at address zero.  The patch below
+recognizes the fallback after the fact and transforms the
+TARGET_MEM_REF memory reference into a LEA for which this form
+isn't problematic:
+
+  _24 = &MEM[(long int *)0B + ivtmp.36_34 + ivtmp.28_44 * 4];
+  _3 = *_24;
+
+hereby avoiding the correctness issue.  We'd later conclude the
+program terminates at the null pointer dereference and make the
+function pure, miscompling the main function of the testcase.
+
+	PR tree-optimization/110702
+	* tree-ssa-loop-ivopts.cc (rewrite_use_address): When
+	we created a NULL pointer based access rewrite that to
+	a LEA.
+
+	* gcc.dg/torture/pr110702.c: New testcase.
+---
+ gcc/testsuite/gcc.dg/torture/pr110702.c | 31 +++++++++++++++++++++++++
+ gcc/tree-ssa-loop-ivopts.cc             | 17 +++++++++++++-
+ 2 files changed, 47 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.dg/torture/pr110702.c
+
+diff --git a/gcc/testsuite/gcc.dg/torture/pr110702.c b/gcc/testsuite/gcc.dg/torture/pr110702.c
+new file mode 100644
+index 00000000000..aab9c7d923e
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/torture/pr110702.c
+@@ -0,0 +1,31 @@
++/* { dg-do run } */
++
++void abort (void);
++
++int a, b, c, d;
++long e[9][7][4];
++
++void f()
++{
++  for (; a >= 0; a--)
++    {
++      b = 0;
++      for (; b <= 3; b++)
++	{
++	  c = 0;
++	  for (; c <= 3; c++)
++	    {
++	      int *g = &d;
++	      *g = e[0][0][b] | e[a][b][a];
++	    }
++	}
++    }
++}
++
++int main()
++{
++  f();
++  if (a != -1)
++    abort ();
++  return 0;
++}
+diff --git a/gcc/tree-ssa-loop-ivopts.cc b/gcc/tree-ssa-loop-ivopts.cc
+index 92fc1c7d734..934897af691 100644
+--- a/gcc/tree-ssa-loop-ivopts.cc
++++ b/gcc/tree-ssa-loop-ivopts.cc
+@@ -7630,7 +7630,22 @@ rewrite_use_address (struct ivopts_data *data,
+ 				      true, GSI_SAME_STMT);
+     }
+   else
+-    copy_ref_info (ref, *use->op_p);
++    {
++      /* When we end up confused enough and have no suitable base but
++	 stuffed everything to index2 use a LEA for the address and
++	 create a plain MEM_REF to avoid basing a memory reference
++	 on address zero which create_mem_ref_raw does as fallback.  */
++      if (TREE_CODE (ref) == TARGET_MEM_REF
++	  && TMR_INDEX2 (ref) != NULL_TREE
++	  && integer_zerop (TREE_OPERAND (ref, 0)))
++	{
++	  ref = fold_build1 (ADDR_EXPR, TREE_TYPE (TREE_OPERAND (ref, 0)), ref);
++	  ref = force_gimple_operand_gsi (&bsi, ref, true, NULL_TREE,
++					  true, GSI_SAME_STMT);
++	  ref = build2 (MEM_REF, type, ref, build_zero_cst (alias_ptr_type));
++	}
++      copy_ref_info (ref, *use->op_p);
++    }
+ 
+   *use->op_p = ref;
+ }
+-- 
+2.45.2
+
diff --git a/SME-0088-aarch64-Rename-hard_fp_offset-to-bytes_above_hard_fp.patch b/0191-Backport-SME-aarch64-Rename-hard_fp_offset-to-bytes_.patch
similarity index 89%
rename from SME-0088-aarch64-Rename-hard_fp_offset-to-bytes_above_hard_fp.patch
rename to 0191-Backport-SME-aarch64-Rename-hard_fp_offset-to-bytes_.patch
index ed2dc58129275400d9aeb2c72128b6f1b44c1624..b02958e755c4a002a5c690cecba9f241035ba609 100644
--- a/SME-0088-aarch64-Rename-hard_fp_offset-to-bytes_above_hard_fp.patch
+++ b/0191-Backport-SME-aarch64-Rename-hard_fp_offset-to-bytes_.patch
@@ -1,7 +1,8 @@
-From 22b4c241054addae543a4fd3f0b46579274c6b91 Mon Sep 17 00:00:00 2001
+From e33aa6e25334fd94e1e4f2d8b6c8247029657a54 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:08 +0100
-Subject: [PATCH 088/144] aarch64: Rename hard_fp_offset to bytes_above_hard_fp
+Subject: [PATCH 092/157] [Backport][SME] aarch64: Rename hard_fp_offset to
+ bytes_above_hard_fp
 MIME-Version: 1.0
 Content-Type: text/plain; charset=UTF-8
 Content-Transfer-Encoding: 8bit
@@ -32,10 +33,10 @@ gcc/
  2 files changed, 16 insertions(+), 16 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index df81f77e2..5975630d9 100644
+index 992f71bbd..67199a026 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8712,7 +8712,7 @@ aarch64_layout_frame (void)
+@@ -8929,7 +8929,7 @@ aarch64_layout_frame (void)
  			   + get_frame_size (),
  			   STACK_BOUNDARY / BITS_PER_UNIT);
  
@@ -44,7 +45,7 @@ index df81f77e2..5975630d9 100644
      = saved_regs_and_above - frame.below_hard_fp_saved_regs_size;
  
    /* Both these values are already aligned.  */
-@@ -8761,13 +8761,13 @@ aarch64_layout_frame (void)
+@@ -8978,13 +8978,13 @@ aarch64_layout_frame (void)
    else if (frame.wb_pop_candidate1 != INVALID_REGNUM)
      max_push_offset = 256;
  
@@ -60,7 +61,7 @@ index df81f77e2..5975630d9 100644
      {
        /* Simple, small frame with no data below the saved registers.
  
-@@ -8784,8 +8784,8 @@ aarch64_layout_frame (void)
+@@ -9001,8 +9001,8 @@ aarch64_layout_frame (void)
  	      case that it hardly seems worth the effort though.  */
  	   && (!saves_below_hard_fp_p || const_below_saved_regs == 0)
  	   && !(cfun->calls_alloca
@@ -71,7 +72,7 @@ index df81f77e2..5975630d9 100644
      {
        /* Frame with small area below the saved registers:
  
-@@ -8803,12 +8803,12 @@ aarch64_layout_frame (void)
+@@ -9020,12 +9020,12 @@ aarch64_layout_frame (void)
  	 sub sp, sp, hard_fp_offset + below_hard_fp_saved_regs_size
  	 save SVE registers relative to SP
  	 sub sp, sp, bytes_below_saved_regs  */
@@ -87,7 +88,7 @@ index df81f77e2..5975630d9 100644
      {
        /* Frame with large area below the saved registers, or with SVE saves,
  	 but with a small area above:
-@@ -8818,7 +8818,7 @@ aarch64_layout_frame (void)
+@@ -9035,7 +9035,7 @@ aarch64_layout_frame (void)
  	 [sub sp, sp, below_hard_fp_saved_regs_size]
  	 [save SVE registers relative to SP]
  	 sub sp, sp, bytes_below_saved_regs  */
@@ -96,7 +97,7 @@ index df81f77e2..5975630d9 100644
        frame.sve_callee_adjust = frame.below_hard_fp_saved_regs_size;
        frame.final_adjust = frame.bytes_below_saved_regs;
      }
-@@ -8833,7 +8833,7 @@ aarch64_layout_frame (void)
+@@ -9050,7 +9050,7 @@ aarch64_layout_frame (void)
  	 [sub sp, sp, below_hard_fp_saved_regs_size]
  	 [save SVE registers relative to SP]
  	 sub sp, sp, bytes_below_saved_regs  */
@@ -105,7 +106,7 @@ index df81f77e2..5975630d9 100644
        frame.sve_callee_adjust = frame.below_hard_fp_saved_regs_size;
        frame.final_adjust = frame.bytes_below_saved_regs;
      }
-@@ -10147,7 +10147,7 @@ aarch64_expand_prologue (void)
+@@ -10364,7 +10364,7 @@ aarch64_expand_prologue (void)
      {
        /* The offset of the frame chain record (if any) from the current SP.  */
        poly_int64 chain_offset = (initial_adjust + callee_adjust
@@ -114,7 +115,7 @@ index df81f77e2..5975630d9 100644
        gcc_assert (known_ge (chain_offset, 0));
  
        if (callee_adjust == 0)
-@@ -12827,10 +12827,10 @@ aarch64_initial_elimination_offset (unsigned from, unsigned to)
+@@ -13044,10 +13044,10 @@ aarch64_initial_elimination_offset (unsigned from, unsigned to)
    if (to == HARD_FRAME_POINTER_REGNUM)
      {
        if (from == ARG_POINTER_REGNUM)
@@ -146,5 +147,5 @@ index 2acff9a96..0f7822c3d 100644
    /* The size of the frame.  This value is the offset from base of the
       frame (incomming SP) to the stack_pointer.  This value is always
 -- 
-2.19.1
+2.33.0
 
diff --git a/0191-LoongArch-Change-OSDIR-for-distribution.patch b/0191-LoongArch-Change-OSDIR-for-distribution.patch
new file mode 100644
index 0000000000000000000000000000000000000000..374588c09694bce5a2d6c5cdbb3a6dedf268dcc2
--- /dev/null
+++ b/0191-LoongArch-Change-OSDIR-for-distribution.patch
@@ -0,0 +1,25 @@
+From 25423cf92026221b7c8798533c40d3e6269a1d7c Mon Sep 17 00:00:00 2001
+From: Peng Fan 
+Date: Thu, 31 Oct 2024 02:01:49 +0000
+Subject: [PATCH] LoongArch: Change OSDIR for distribution
+
+Signed-off-by: Peng Fan 
+---
+ gcc/config/loongarch/t-linux | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/gcc/config/loongarch/t-linux b/gcc/config/loongarch/t-linux
+index 7cd7cde25..1d1f42596 100644
+--- a/gcc/config/loongarch/t-linux
++++ b/gcc/config/loongarch/t-linux
+@@ -28,4 +28,7 @@ ifeq ($(filter LA_DISABLE_MULTILIB,$(tm_defines)),)
+     MULTILIB_OSDIRNAMES += mabi.lp64f=$(MULTIOSDIR_lp64f)
+     MULTILIB_OSDIRNAMES += mabi.lp64s=$(MULTIOSDIR_lp64s)
+ 
++else
++    MULTILIB_OSDIRNAMES := ../lib64
++
+ endif
+-- 
+2.45.2
+
diff --git a/SME-0089-aarch64-Tweak-frame_size-comment.patch b/0192-Backport-SME-aarch64-Tweak-frame_size-comment.patch
similarity index 89%
rename from SME-0089-aarch64-Tweak-frame_size-comment.patch
rename to 0192-Backport-SME-aarch64-Tweak-frame_size-comment.patch
index f99a43c81fadd0ca0859c6f4ff646cb8b73a2b42..def17bb4fd22ad9441f4343ec20b882901972b1e 100644
--- a/SME-0089-aarch64-Tweak-frame_size-comment.patch
+++ b/0192-Backport-SME-aarch64-Tweak-frame_size-comment.patch
@@ -1,7 +1,7 @@
-From af54058b9c446fcb22f21e03d76692f327332590 Mon Sep 17 00:00:00 2001
+From 6aa0db727b6e3a7fed95b014f25f3f022d1f46e2 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:08 +0100
-Subject: [PATCH 089/144] aarch64: Tweak frame_size comment
+Subject: [PATCH 093/157] [Backport][SME] aarch64: Tweak frame_size comment
 MIME-Version: 1.0
 Content-Type: text/plain; charset=UTF-8
 Content-Transfer-Encoding: 8bit
@@ -33,5 +33,5 @@ index 0f7822c3d..39abca051 100644
    poly_int64 frame_size;
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0090-aarch64-Measure-reg_offset-from-the-bottom-of-the-fr.patch b/0193-Backport-SME-aarch64-Measure-reg_offset-from-the-bot.patch
similarity index 89%
rename from SME-0090-aarch64-Measure-reg_offset-from-the-bottom-of-the-fr.patch
rename to 0193-Backport-SME-aarch64-Measure-reg_offset-from-the-bot.patch
index 3f87bee65e644410a04a6803e4dee1a13e52b57e..c03673181d24617e23c6bbab86b38ba9f3f07f8e 100644
--- a/SME-0090-aarch64-Measure-reg_offset-from-the-bottom-of-the-fr.patch
+++ b/0193-Backport-SME-aarch64-Measure-reg_offset-from-the-bot.patch
@@ -1,8 +1,8 @@
-From 8ed9d487ce593df13b8f7e588ac75bce35f40f22 Mon Sep 17 00:00:00 2001
+From 3b10711c6a5610bf8e2287b9491557268ee148da Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:09 +0100
-Subject: [PATCH 090/144] aarch64: Measure reg_offset from the bottom of the
- frame
+Subject: [PATCH 094/157] [Backport][SME] aarch64: Measure reg_offset from the
+ bottom of the frame
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=67a36b6f5d6be11d280081b461e72910aca2fc54
 
@@ -34,10 +34,10 @@ gcc/
  2 files changed, 27 insertions(+), 29 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 5975630d9..829052373 100644
+index 67199a026..df8a83b04 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8522,7 +8522,6 @@ aarch64_needs_frame_chain (void)
+@@ -8739,7 +8739,6 @@ aarch64_needs_frame_chain (void)
  static void
  aarch64_layout_frame (void)
  {
@@ -45,7 +45,7 @@ index 5975630d9..829052373 100644
    int regno, last_fp_reg = INVALID_REGNUM;
    machine_mode vector_save_mode = aarch64_reg_save_mode (V8_REGNUM);
    poly_int64 vector_save_size = GET_MODE_SIZE (vector_save_mode);
-@@ -8600,7 +8599,9 @@ aarch64_layout_frame (void)
+@@ -8817,7 +8816,9 @@ aarch64_layout_frame (void)
    gcc_assert (crtl->is_leaf
  	      || maybe_ne (frame.reg_offset[R30_REGNUM], SLOT_NOT_REQUIRED));
  
@@ -56,7 +56,7 @@ index 5975630d9..829052373 100644
  
    /* Now assign stack slots for the registers.  Start with the predicate
       registers, since predicate LDR and STR have a relatively small
-@@ -8612,7 +8613,8 @@ aarch64_layout_frame (void)
+@@ -8829,7 +8830,8 @@ aarch64_layout_frame (void)
  	offset += BYTES_PER_SVE_PRED;
        }
  
@@ -66,7 +66,7 @@ index 5975630d9..829052373 100644
      {
        /* If we have any vector registers to save above the predicate registers,
  	 the offset of the vector register save slots need to be a multiple
-@@ -8630,10 +8632,10 @@ aarch64_layout_frame (void)
+@@ -8847,10 +8849,10 @@ aarch64_layout_frame (void)
  	offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
        else
  	{
@@ -81,7 +81,7 @@ index 5975630d9..829052373 100644
  	  else
  	    gcc_unreachable ();
  	}
-@@ -8650,9 +8652,10 @@ aarch64_layout_frame (void)
+@@ -8867,9 +8869,10 @@ aarch64_layout_frame (void)
  
    /* OFFSET is now the offset of the hard frame pointer from the bottom
       of the callee save area.  */
@@ -95,7 +95,7 @@ index 5975630d9..829052373 100644
    if (frame.emit_frame_chain)
      {
        /* FP and LR are placed in the linkage record.  */
-@@ -8703,9 +8706,10 @@ aarch64_layout_frame (void)
+@@ -8920,9 +8923,10 @@ aarch64_layout_frame (void)
  
    offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
  
@@ -108,7 +108,7 @@ index 5975630d9..829052373 100644
  
    poly_int64 saved_regs_and_above
      = aligned_upper_bound (varargs_and_saved_regs_size
-@@ -9173,9 +9177,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
+@@ -9390,9 +9394,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
  
        machine_mode mode = aarch64_reg_save_mode (regno);
        reg = gen_rtx_REG (mode, regno);
@@ -119,7 +119,7 @@ index 5975630d9..829052373 100644
        rtx base_rtx = stack_pointer_rtx;
        poly_int64 sp_offset = offset;
  
-@@ -9282,9 +9284,7 @@ aarch64_restore_callee_saves (poly_int64 bytes_below_sp, unsigned start,
+@@ -9499,9 +9501,7 @@ aarch64_restore_callee_saves (poly_int64 bytes_below_sp, unsigned start,
  
        machine_mode mode = aarch64_reg_save_mode (regno);
        reg = gen_rtx_REG (mode, regno);
@@ -130,7 +130,7 @@ index 5975630d9..829052373 100644
        rtx base_rtx = stack_pointer_rtx;
        if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
  	aarch64_adjust_sve_callee_save_base (mode, base_rtx, anchor_reg,
-@@ -9423,14 +9423,12 @@ aarch64_get_separate_components (void)
+@@ -9640,14 +9640,12 @@ aarch64_get_separate_components (void)
  	   it as a stack probe for -fstack-clash-protection.  */
  	if (flag_stack_clash_protection
  	    && maybe_ne (frame.below_hard_fp_saved_regs_size, 0)
@@ -147,7 +147,7 @@ index 5975630d9..829052373 100644
  
  	/* Check that we can access the stack slot of the register with one
  	   direct load with no adjustments needed.  */
-@@ -9577,9 +9575,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+@@ -9794,9 +9792,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
        rtx reg = gen_rtx_REG (mode, regno);
        poly_int64 offset = frame.reg_offset[regno];
        if (frame_pointer_needed)
@@ -158,7 +158,7 @@ index 5975630d9..829052373 100644
  
        rtx addr = plus_constant (Pmode, ptr_reg, offset);
        rtx mem = gen_frame_mem (mode, addr);
-@@ -9631,9 +9627,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
+@@ -9848,9 +9844,7 @@ aarch64_process_components (sbitmap components, bool prologue_p)
        /* REGNO2 can be saved/restored in a pair with REGNO.  */
        rtx reg2 = gen_rtx_REG (mode, regno2);
        if (frame_pointer_needed)
@@ -169,7 +169,7 @@ index 5975630d9..829052373 100644
        rtx addr2 = plus_constant (Pmode, ptr_reg, offset2);
        rtx mem2 = gen_frame_mem (mode, addr2);
        rtx set2 = prologue_p ? gen_rtx_SET (mem2, reg2)
-@@ -9759,7 +9753,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -9976,7 +9970,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
    if (final_adjustment_p
        && known_eq (frame.below_hard_fp_saved_regs_size, 0))
      {
@@ -194,5 +194,5 @@ index 39abca051..f340237d0 100644
  
    /* The number of extra stack bytes taken up by register varargs.
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0091-aarch64-Simplify-top-of-frame-allocation.patch b/0194-Backport-SME-aarch64-Simplify-top-of-frame-allocatio.patch
similarity index 88%
rename from SME-0091-aarch64-Simplify-top-of-frame-allocation.patch
rename to 0194-Backport-SME-aarch64-Simplify-top-of-frame-allocatio.patch
index 5ae236851064debd1323f8600bb15c0e1f0c46e8..f67184104b8f3a5e82378f257e8dc109e543b421 100644
--- a/SME-0091-aarch64-Simplify-top-of-frame-allocation.patch
+++ b/0194-Backport-SME-aarch64-Simplify-top-of-frame-allocatio.patch
@@ -1,7 +1,8 @@
-From 97662d7dc8a1037c495d2cfa141754c9a0c16174 Mon Sep 17 00:00:00 2001
+From 4b8f3f194e68d0d411eaa6692699d8e5e2b4217d Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:09 +0100
-Subject: [PATCH 091/144] aarch64: Simplify top of frame allocation
+Subject: [PATCH 095/157] [Backport][SME] aarch64: Simplify top of frame
+ allocation
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=bc9dcdde80915d7585a21daa2b69f4adf4a1e3c1
 
@@ -17,10 +18,10 @@ gcc/
  1 file changed, 8 insertions(+), 15 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 829052373..c8bc837ec 100644
+index df8a83b04..3329aa364 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8708,23 +8708,16 @@ aarch64_layout_frame (void)
+@@ -8925,23 +8925,16 @@ aarch64_layout_frame (void)
  
    frame.saved_regs_size = offset - frame.bytes_below_saved_regs;
  
@@ -53,5 +54,5 @@ index 829052373..c8bc837ec 100644
    frame.initial_adjust = 0;
    frame.final_adjust = 0;
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0092-aarch64-Minor-initial-adjustment-tweak.patch b/0195-Backport-SME-aarch64-Minor-initial-adjustment-tweak.patch
similarity index 84%
rename from SME-0092-aarch64-Minor-initial-adjustment-tweak.patch
rename to 0195-Backport-SME-aarch64-Minor-initial-adjustment-tweak.patch
index 9718aeac8100d0f82c857c67cad26779e473f459..d962cb934bce9db31263fe88134ed1a24a5aa0a8 100644
--- a/SME-0092-aarch64-Minor-initial-adjustment-tweak.patch
+++ b/0195-Backport-SME-aarch64-Minor-initial-adjustment-tweak.patch
@@ -1,7 +1,8 @@
-From a880ea596f41572f3c2a372664c21492f400b6d2 Mon Sep 17 00:00:00 2001
+From 0ab484f5de7d28c0a7166439d403e0983834b120 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:10 +0100
-Subject: [PATCH 092/144] aarch64: Minor initial adjustment tweak
+Subject: [PATCH 096/157] [Backport][SME] aarch64: Minor initial adjustment
+ tweak
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=ee5466ff4faca2076cc61f1f120d0b5062c8111c
 
@@ -18,10 +19,10 @@ gcc/
  1 file changed, 2 insertions(+), 3 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index c8bc837ec..7eb64f6c3 100644
+index 3329aa364..72604dd9d 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8797,11 +8797,10 @@ aarch64_layout_frame (void)
+@@ -9014,11 +9014,10 @@ aarch64_layout_frame (void)
      {
        /* Frame in which all saves are SVE saves:
  
@@ -36,5 +37,5 @@ index c8bc837ec..7eb64f6c3 100644
      }
    else if (frame.bytes_above_hard_fp.is_constant (&const_above_fp)
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0093-aarch64-Tweak-stack-clash-boundary-condition.patch b/0196-Backport-SME-aarch64-Tweak-stack-clash-boundary-cond.patch
similarity index 94%
rename from SME-0093-aarch64-Tweak-stack-clash-boundary-condition.patch
rename to 0196-Backport-SME-aarch64-Tweak-stack-clash-boundary-cond.patch
index 49808984e416a0dff82dd603743841bc714228f2..61994683efb4c69ccfcb187de01dc1bd0ec181f7 100644
--- a/SME-0093-aarch64-Tweak-stack-clash-boundary-condition.patch
+++ b/0196-Backport-SME-aarch64-Tweak-stack-clash-boundary-cond.patch
@@ -1,7 +1,8 @@
-From 32c56ac9f69640935da5ff335a5c0920582447fd Mon Sep 17 00:00:00 2001
+From b4581d1e6a7b94dfbd58871dad51d3f12889081f Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:10 +0100
-Subject: [PATCH 093/144] aarch64: Tweak stack clash boundary condition
+Subject: [PATCH 097/157] [Backport][SME] aarch64: Tweak stack clash boundary
+ condition
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=1785b8077cc03214ebd1db953c870172fcf15966
 
@@ -45,10 +46,10 @@ gcc/testsuite/
  create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-check-prologue-17.c
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 7eb64f6c3..5c4489ead 100644
+index 72604dd9d..ba92a23a7 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -9726,9 +9726,11 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -9943,9 +9943,11 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
    HOST_WIDE_INT guard_size
      = 1 << param_stack_clash_protection_guard_size;
    HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
@@ -123,5 +124,5 @@ index 000000000..0d8a25d73
 +  return 1;
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0094-aarch64-Put-LR-save-probe-in-first-16-bytes.patch b/0197-Backport-SME-aarch64-Put-LR-save-probe-in-first-16-b.patch
similarity index 96%
rename from SME-0094-aarch64-Put-LR-save-probe-in-first-16-bytes.patch
rename to 0197-Backport-SME-aarch64-Put-LR-save-probe-in-first-16-b.patch
index 99942dc3f9caf8dbf96bc32f7d7f16ba39ff07ec..c848e192a27a9e0fd001d2fe8c544ee1755f54f0 100644
--- a/SME-0094-aarch64-Put-LR-save-probe-in-first-16-bytes.patch
+++ b/0197-Backport-SME-aarch64-Put-LR-save-probe-in-first-16-b.patch
@@ -1,7 +1,8 @@
-From 05641032febccd0b99258d254922d6b2edf49f2a Mon Sep 17 00:00:00 2001
+From ffd483dc6a2a4af495d56cf5ebdbbb3b9ca58820 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:11 +0100
-Subject: [PATCH 094/144] aarch64: Put LR save probe in first 16 bytes
+Subject: [PATCH 098/157] [Backport][SME] aarch64: Put LR save probe in first
+ 16 bytes
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=fee0a18abfdd4874194abd149943fa7c77a29b7c
 
@@ -72,10 +73,10 @@ gcc/testsuite/
  create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-check-prologue-20.c
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 5c4489ead..20d9bb668 100644
+index ba92a23a7..1ba4c2f89 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8656,26 +8656,34 @@ aarch64_layout_frame (void)
+@@ -8873,26 +8873,34 @@ aarch64_layout_frame (void)
    bool saves_below_hard_fp_p
      = maybe_ne (frame.below_hard_fp_saved_regs_size, 0);
    frame.bytes_below_hard_fp = offset;
@@ -123,7 +124,7 @@ index 5c4489ead..20d9bb668 100644
  
    poly_int64 max_int_offset = offset;
    offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
-@@ -8753,10 +8761,13 @@ aarch64_layout_frame (void)
+@@ -8970,10 +8978,13 @@ aarch64_layout_frame (void)
       max_push_offset to 0, because no registers are popped at this time,
       so callee_adjust cannot be adjusted.  */
    HOST_WIDE_INT max_push_offset = 0;
@@ -141,7 +142,7 @@ index 5c4489ead..20d9bb668 100644
  
    HOST_WIDE_INT const_size, const_below_saved_regs, const_above_fp;
    HOST_WIDE_INT const_saved_regs_size;
-@@ -9732,29 +9743,6 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -9949,29 +9960,6 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
      = (final_adjustment_p
         ? guard_used_by_caller + byte_sp_alignment
         : guard_size - guard_used_by_caller);
@@ -171,7 +172,7 @@ index 5c4489ead..20d9bb668 100644
    poly_int64 frame_size = frame.frame_size;
  
    /* We should always have a positive probe threshold.  */
-@@ -9934,8 +9922,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -10151,8 +10139,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
        if (final_adjustment_p && rounded_size != 0)
  	min_probe_threshold = 0;
        /* If doing a small final adjustment, we always probe at offset 0.
@@ -404,5 +405,5 @@ index 000000000..690aae8df
 +
 +#include "stack-check-prologue-19.c"
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0095-aarch64-Simplify-probe-of-final-frame-allocation.patch b/0198-Backport-SME-aarch64-Simplify-probe-of-final-frame-a.patch
similarity index 92%
rename from SME-0095-aarch64-Simplify-probe-of-final-frame-allocation.patch
rename to 0198-Backport-SME-aarch64-Simplify-probe-of-final-frame-a.patch
index 56d346f3a5d026789664e292c2859049da9a6109..70cd64bbfc14888ca9f981fc1b91c7106c82098d 100644
--- a/SME-0095-aarch64-Simplify-probe-of-final-frame-allocation.patch
+++ b/0198-Backport-SME-aarch64-Simplify-probe-of-final-frame-a.patch
@@ -1,7 +1,8 @@
-From 7c5addc499c77b531f929a1341285ea1a2ef4f7c Mon Sep 17 00:00:00 2001
+From c12de24e57cbe26c224bab39698736fa4004f8ff Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:11 +0100
-Subject: [PATCH 095/144] aarch64: Simplify probe of final frame allocation
+Subject: [PATCH 099/157] [Backport][SME] aarch64: Simplify probe of final
+ frame allocation
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=f87028a905059573ae7fdfe526d034fd70b3bcae
 
@@ -30,10 +31,10 @@ gcc/testsuite/
  4 files changed, 9 insertions(+), 13 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 20d9bb668..23a4c34cf 100644
+index 1ba4c2f89..6d835dc8f 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -9916,16 +9916,12 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -10133,16 +10133,12 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
       are still safe.  */
    if (residual)
      {
@@ -52,7 +53,7 @@ index 20d9bb668..23a4c34cf 100644
  
        aarch64_sub_sp (temp1, temp2, residual, frame_related_p);
        if (residual >= min_probe_threshold)
-@@ -9936,8 +9932,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -10153,8 +10149,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
  		     HOST_WIDE_INT_PRINT_DEC " bytes, probing will be required."
  		     "\n", residual);
  
@@ -121,5 +122,5 @@ index 73ac3e4e4..562039b5e 100644
  **	bl	g
  **	...
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0096-aarch64-Explicitly-record-probe-registers-in-frame-i.patch b/0199-Backport-SME-aarch64-Explicitly-record-probe-registe.patch
similarity index 93%
rename from SME-0096-aarch64-Explicitly-record-probe-registers-in-frame-i.patch
rename to 0199-Backport-SME-aarch64-Explicitly-record-probe-registe.patch
index aa246ae0feb4a1505f6857aaaf68e3a69090ffc5..6fe7afa547f9b6df3d27e7e4fd8c4fd2bbe8c50f 100644
--- a/SME-0096-aarch64-Explicitly-record-probe-registers-in-frame-i.patch
+++ b/0199-Backport-SME-aarch64-Explicitly-record-probe-registe.patch
@@ -1,8 +1,8 @@
-From 65d5abee34623f18d10bee25af5d9568ba69b2ed Mon Sep 17 00:00:00 2001
+From 1bf3e9a04411b483c89d2e2f9096ab66800c3b3f Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:12 +0100
-Subject: [PATCH 096/144] aarch64: Explicitly record probe registers in frame
- info
+Subject: [PATCH 100/157] [Backport][SME] aarch64: Explicitly record probe
+ registers in frame info
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=5ce957484eea15f09503fcffa4dfdfb70ad82f8f
 
@@ -75,10 +75,10 @@ gcc/testsuite/
  3 files changed, 64 insertions(+), 18 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 23a4c34cf..87a1dcc5a 100644
+index 6d835dc8f..dd80ceba8 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8593,15 +8593,11 @@ aarch64_layout_frame (void)
+@@ -8810,15 +8810,11 @@ aarch64_layout_frame (void)
  	&& !crtl->abi->clobbers_full_reg_p (regno))
        frame.reg_offset[regno] = SLOT_REQUIRED;
  
@@ -95,7 +95,7 @@ index 23a4c34cf..87a1dcc5a 100644
  
    /* Now assign stack slots for the registers.  Start with the predicate
       registers, since predicate LDR and STR have a relatively small
-@@ -8609,6 +8605,8 @@ aarch64_layout_frame (void)
+@@ -8826,6 +8822,8 @@ aarch64_layout_frame (void)
    for (regno = P0_REGNUM; regno <= P15_REGNUM; regno++)
      if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
        {
@@ -104,7 +104,7 @@ index 23a4c34cf..87a1dcc5a 100644
  	frame.reg_offset[regno] = offset;
  	offset += BYTES_PER_SVE_PRED;
        }
-@@ -8646,6 +8644,8 @@ aarch64_layout_frame (void)
+@@ -8863,6 +8861,8 @@ aarch64_layout_frame (void)
      for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
        if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
  	{
@@ -113,7 +113,7 @@ index 23a4c34cf..87a1dcc5a 100644
  	  frame.reg_offset[regno] = offset;
  	  offset += vector_save_size;
  	}
-@@ -8655,10 +8655,18 @@ aarch64_layout_frame (void)
+@@ -8872,10 +8872,18 @@ aarch64_layout_frame (void)
    frame.below_hard_fp_saved_regs_size = offset - frame.bytes_below_saved_regs;
    bool saves_below_hard_fp_p
      = maybe_ne (frame.below_hard_fp_saved_regs_size, 0);
@@ -132,7 +132,7 @@ index 23a4c34cf..87a1dcc5a 100644
        frame.reg_offset[regno] = offset;
        if (frame.wb_push_candidate1 == INVALID_REGNUM)
  	frame.wb_push_candidate1 = regno;
-@@ -8692,6 +8700,8 @@ aarch64_layout_frame (void)
+@@ -8909,6 +8917,8 @@ aarch64_layout_frame (void)
    for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
      if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
        {
@@ -141,7 +141,7 @@ index 23a4c34cf..87a1dcc5a 100644
  	/* If there is an alignment gap between integer and fp callee-saves,
  	   allocate the last fp register to it if possible.  */
  	if (regno == last_fp_reg
-@@ -8715,6 +8725,17 @@ aarch64_layout_frame (void)
+@@ -8932,6 +8942,17 @@ aarch64_layout_frame (void)
    offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
  
    frame.saved_regs_size = offset - frame.bytes_below_saved_regs;
@@ -159,7 +159,7 @@ index 23a4c34cf..87a1dcc5a 100644
  
    offset += get_frame_size ();
    offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
-@@ -8845,6 +8866,25 @@ aarch64_layout_frame (void)
+@@ -9062,6 +9083,25 @@ aarch64_layout_frame (void)
        frame.final_adjust = frame.bytes_below_saved_regs;
      }
  
@@ -185,7 +185,7 @@ index 23a4c34cf..87a1dcc5a 100644
    /* Make sure the individual adjustments add up to the full frame size.  */
    gcc_assert (known_eq (frame.initial_adjust
  			+ frame.callee_adjust
-@@ -9422,13 +9462,6 @@ aarch64_get_separate_components (void)
+@@ -9639,13 +9679,6 @@ aarch64_get_separate_components (void)
  
  	poly_int64 offset = frame.reg_offset[regno];
  
@@ -199,7 +199,7 @@ index 23a4c34cf..87a1dcc5a 100644
  	/* Get the offset relative to the register we'll use.  */
  	if (frame_pointer_needed)
  	  offset -= frame.bytes_below_hard_fp;
-@@ -9463,6 +9496,13 @@ aarch64_get_separate_components (void)
+@@ -9680,6 +9713,13 @@ aarch64_get_separate_components (void)
  
    bitmap_clear_bit (components, LR_REGNUM);
    bitmap_clear_bit (components, SP_REGNUM);
@@ -213,7 +213,7 @@ index 23a4c34cf..87a1dcc5a 100644
  
    return components;
  }
-@@ -10009,8 +10049,8 @@ aarch64_epilogue_uses (int regno)
+@@ -10226,8 +10266,8 @@ aarch64_epilogue_uses (int regno)
     When probing is needed, we emit a probe at the start of the prologue
     and every PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE bytes thereafter.
  
@@ -276,5 +276,5 @@ index 3e01ec36c..3530a0d50 100644
  **	ldr	x24, \[sp\], 32
  **	ret
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0097-aarch64-Remove-below_hard_fp_saved_regs_size.patch b/0200-Backport-SME-aarch64-Remove-below_hard_fp_saved_regs.patch
similarity index 90%
rename from SME-0097-aarch64-Remove-below_hard_fp_saved_regs_size.patch
rename to 0200-Backport-SME-aarch64-Remove-below_hard_fp_saved_regs.patch
index ce0a66f6147747ecafcda5b4b77e337d116fb5f5..26cce098c60df49b36429dfdbbfe0947834d2de0 100644
--- a/SME-0097-aarch64-Remove-below_hard_fp_saved_regs_size.patch
+++ b/0200-Backport-SME-aarch64-Remove-below_hard_fp_saved_regs.patch
@@ -1,7 +1,8 @@
-From 379d205fd2876a72a59ffa84c8b919a9298f4be1 Mon Sep 17 00:00:00 2001
+From 5c33afb2173f68a0166bd180977cd1e547df22dc Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:12 +0100
-Subject: [PATCH 097/144] aarch64: Remove below_hard_fp_saved_regs_size
+Subject: [PATCH 101/157] [Backport][SME] aarch64: Remove
+ below_hard_fp_saved_regs_size
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=2abfc867d3ba025ac2146bb21b92a93e6325dec1
 
@@ -20,10 +21,10 @@ gcc/
  2 files changed, 21 insertions(+), 31 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 87a1dcc5a..68a911730 100644
+index dd80ceba8..0894ed325 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8652,9 +8652,8 @@ aarch64_layout_frame (void)
+@@ -8869,9 +8869,8 @@ aarch64_layout_frame (void)
  
    /* OFFSET is now the offset of the hard frame pointer from the bottom
       of the callee save area.  */
@@ -35,7 +36,7 @@ index 87a1dcc5a..68a911730 100644
    gcc_assert (!saves_below_hard_fp_p
  	      || (frame.sve_save_and_probe != INVALID_REGNUM
  		  && known_eq (frame.reg_offset[frame.sve_save_and_probe],
-@@ -8724,9 +8723,8 @@ aarch64_layout_frame (void)
+@@ -8941,9 +8940,8 @@ aarch64_layout_frame (void)
  
    offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
  
@@ -47,7 +48,7 @@ index 87a1dcc5a..68a911730 100644
  	      || (frame.hard_fp_save_and_probe != INVALID_REGNUM
  		  && known_eq (frame.reg_offset[frame.hard_fp_save_and_probe],
  			       frame.bytes_below_hard_fp)));
-@@ -8735,7 +8733,7 @@ aarch64_layout_frame (void)
+@@ -8952,7 +8950,7 @@ aarch64_layout_frame (void)
       The saving of the bottommost register counts as an implicit probe,
       which allows us to maintain the invariant described in the comment
       at expand_prologue.  */
@@ -56,7 +57,7 @@ index 87a1dcc5a..68a911730 100644
  
    offset += get_frame_size ();
    offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
-@@ -8792,7 +8790,7 @@ aarch64_layout_frame (void)
+@@ -9009,7 +9007,7 @@ aarch64_layout_frame (void)
  
    HOST_WIDE_INT const_size, const_below_saved_regs, const_above_fp;
    HOST_WIDE_INT const_saved_regs_size;
@@ -65,7 +66,7 @@ index 87a1dcc5a..68a911730 100644
      frame.initial_adjust = frame.frame_size;
    else if (frame.frame_size.is_constant (&const_size)
  	   && const_size < max_push_offset
-@@ -8805,7 +8803,7 @@ aarch64_layout_frame (void)
+@@ -9022,7 +9020,7 @@ aarch64_layout_frame (void)
        frame.callee_adjust = const_size;
      }
    else if (frame.bytes_below_saved_regs.is_constant (&const_below_saved_regs)
@@ -74,7 +75,7 @@ index 87a1dcc5a..68a911730 100644
  	   && const_below_saved_regs + const_saved_regs_size < 512
  	   /* We could handle this case even with data below the saved
  	      registers, provided that that data left us with valid offsets
-@@ -8824,8 +8822,7 @@ aarch64_layout_frame (void)
+@@ -9041,8 +9039,7 @@ aarch64_layout_frame (void)
        frame.initial_adjust = frame.frame_size;
      }
    else if (saves_below_hard_fp_p
@@ -84,7 +85,7 @@ index 87a1dcc5a..68a911730 100644
      {
        /* Frame in which all saves are SVE saves:
  
-@@ -8847,7 +8844,7 @@ aarch64_layout_frame (void)
+@@ -9064,7 +9061,7 @@ aarch64_layout_frame (void)
  	 [save SVE registers relative to SP]
  	 sub sp, sp, bytes_below_saved_regs  */
        frame.callee_adjust = const_above_fp;
@@ -93,7 +94,7 @@ index 87a1dcc5a..68a911730 100644
        frame.final_adjust = frame.bytes_below_saved_regs;
      }
    else
-@@ -8862,7 +8859,7 @@ aarch64_layout_frame (void)
+@@ -9079,7 +9076,7 @@ aarch64_layout_frame (void)
  	 [save SVE registers relative to SP]
  	 sub sp, sp, bytes_below_saved_regs  */
        frame.initial_adjust = frame.bytes_above_hard_fp;
@@ -102,7 +103,7 @@ index 87a1dcc5a..68a911730 100644
        frame.final_adjust = frame.bytes_below_saved_regs;
      }
  
-@@ -10014,17 +10011,17 @@ aarch64_epilogue_uses (int regno)
+@@ -10231,17 +10228,17 @@ aarch64_epilogue_uses (int regno)
  	|  local variables              | <-- frame_pointer_rtx
  	|                               |
  	+-------------------------------+
@@ -155,5 +156,5 @@ index af480d9e8..292ef2eec 100644
       of the outgoing arguments) and the hard frame pointer.  This value is
       always a multiple of STACK_BOUNDARY.  */
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0098-aarch64-Make-stack-smash-canary-protect-saved-regist.patch b/0201-Backport-SME-aarch64-Make-stack-smash-canary-protect.patch
similarity index 92%
rename from SME-0098-aarch64-Make-stack-smash-canary-protect-saved-regist.patch
rename to 0201-Backport-SME-aarch64-Make-stack-smash-canary-protect.patch
index 094f41317573e9a6a66c1b135b45808fb7b11932..c0d8bf6c9ff124c4230cee09b2e3900528d66cf3 100644
--- a/SME-0098-aarch64-Make-stack-smash-canary-protect-saved-regist.patch
+++ b/0201-Backport-SME-aarch64-Make-stack-smash-canary-protect.patch
@@ -1,8 +1,8 @@
-From 162d0b343b0e9f65af3043015505dcbdf3ee58d2 Mon Sep 17 00:00:00 2001
+From b225443d64481bc225e29bf119d99b719c69cd3c Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Sep 2023 16:05:13 +0100
-Subject: [PATCH 098/144] aarch64: Make stack smash canary protect saved
- registers
+Subject: [PATCH 102/157] [Backport][SME] aarch64: Make stack smash canary
+ protect saved registers
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=3e4afea3b192c205c9a9da99f4cac65c68087eaf
 
@@ -41,10 +41,10 @@ gcc/testsuite/
  create mode 100644 gcc/testsuite/gcc.target/aarch64/stack-protector-9.c
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 68a911730..feb6a106e 100644
+index 0894ed325..8d4dd2891 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8516,6 +8516,20 @@ aarch64_needs_frame_chain (void)
+@@ -8733,6 +8733,20 @@ aarch64_needs_frame_chain (void)
    return aarch64_use_frame_pointer;
  }
  
@@ -65,7 +65,7 @@ index 68a911730..feb6a106e 100644
  /* Mark the registers that need to be saved by the callee and calculate
     the size of the callee-saved registers area and frame record (both FP
     and LR may be omitted).  */
-@@ -8527,6 +8541,7 @@ aarch64_layout_frame (void)
+@@ -8744,6 +8758,7 @@ aarch64_layout_frame (void)
    poly_int64 vector_save_size = GET_MODE_SIZE (vector_save_mode);
    bool frame_related_fp_reg_p = false;
    aarch64_frame &frame = cfun->machine->frame;
@@ -73,7 +73,7 @@ index 68a911730..feb6a106e 100644
  
    frame.emit_frame_chain = aarch64_needs_frame_chain ();
  
-@@ -8593,9 +8608,16 @@ aarch64_layout_frame (void)
+@@ -8810,9 +8825,16 @@ aarch64_layout_frame (void)
  	&& !crtl->abi->clobbers_full_reg_p (regno))
        frame.reg_offset[regno] = SLOT_REQUIRED;
  
@@ -90,7 +90,7 @@ index 68a911730..feb6a106e 100644
    frame.bytes_below_saved_regs = offset;
    frame.sve_save_and_probe = INVALID_REGNUM;
  
-@@ -8735,15 +8757,18 @@ aarch64_layout_frame (void)
+@@ -8952,15 +8974,18 @@ aarch64_layout_frame (void)
       at expand_prologue.  */
    gcc_assert (crtl->is_leaf || maybe_ne (saved_regs_size, 0));
  
@@ -113,7 +113,7 @@ index 68a911730..feb6a106e 100644
    frame.bytes_above_locals = frame.frame_size - top_of_locals;
  
    frame.initial_adjust = 0;
-@@ -10008,10 +10033,10 @@ aarch64_epilogue_uses (int regno)
+@@ -10225,10 +10250,10 @@ aarch64_epilogue_uses (int regno)
  	|  for register varargs         |
  	|                               |
  	+-------------------------------+
@@ -126,7 +126,7 @@ index 68a911730..feb6a106e 100644
  	+-------------------------------+
  	|  callee-saved registers       |
  	+-------------------------------+
-@@ -10023,6 +10048,10 @@ aarch64_epilogue_uses (int regno)
+@@ -10240,6 +10265,10 @@ aarch64_epilogue_uses (int regno)
  	+-------------------------------+
  	|  SVE predicate registers      |
  	+-------------------------------+
@@ -137,7 +137,7 @@ index 68a911730..feb6a106e 100644
  	|  dynamic allocation           |
  	+-------------------------------+
  	|  padding                      |
-@@ -10032,6 +10061,9 @@ aarch64_epilogue_uses (int regno)
+@@ -10249,6 +10278,9 @@ aarch64_epilogue_uses (int regno)
  	+-------------------------------+
  	|                               | <-- stack_pointer_rtx (aligned)
  
@@ -147,7 +147,7 @@ index 68a911730..feb6a106e 100644
     Dynamic stack allocations via alloca() decrease stack_pointer_rtx
     but leave frame_pointer_rtx and hard_frame_pointer_rtx
     unchanged.
-@@ -10227,6 +10259,8 @@ aarch64_expand_prologue (void)
+@@ -10444,6 +10476,8 @@ aarch64_expand_prologue (void)
    gcc_assert (known_eq (bytes_below_sp, final_adjust));
    aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx, final_adjust,
  					  !frame_pointer_needed, true);
@@ -297,5 +297,5 @@ index 000000000..58f322aa4
 +  return 0;
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0099-Handle-epilogues-that-contain-jumps.patch b/0202-Backport-SME-Handle-epilogues-that-contain-jumps.patch
similarity index 95%
rename from SME-0099-Handle-epilogues-that-contain-jumps.patch
rename to 0202-Backport-SME-Handle-epilogues-that-contain-jumps.patch
index 9d2a9c93649d20c3734ab5f54fec81c0850adf7f..6348a4997b45c0eebb92b69e83392b274d1f050a 100644
--- a/SME-0099-Handle-epilogues-that-contain-jumps.patch
+++ b/0202-Backport-SME-Handle-epilogues-that-contain-jumps.patch
@@ -1,7 +1,7 @@
-From 76de5fbb92f76bebe592ad024447254c87584e91 Mon Sep 17 00:00:00 2001
+From 31433584b018cb2dc81e2366351a57bf5e1c4e44 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 17 Oct 2023 23:45:43 +0100
-Subject: [PATCH 099/144] Handle epilogues that contain jumps
+Subject: [PATCH 103/157] [Backport][SME] Handle epilogues that contain jumps
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=aeb3f0436f8ae84e593eda9641fe4e6fdf0afb3e
 
@@ -175,10 +175,10 @@ index 85145da7f..53543bb75 100644
  
  #endif /* GCC_CFGBUILD_H */
 diff --git a/gcc/function.cc b/gcc/function.cc
-index 94afb266e..35e7f663f 100644
+index ddab43ca4..f4fc211a0 100644
 --- a/gcc/function.cc
 +++ b/gcc/function.cc
-@@ -6134,6 +6134,8 @@ thread_prologue_and_epilogue_insns (void)
+@@ -6126,6 +6126,8 @@ thread_prologue_and_epilogue_insns (void)
  		  && returnjump_p (BB_END (e->src)))
  		e->flags &= ~EDGE_FALLTHRU;
  	    }
@@ -187,7 +187,7 @@ index 94afb266e..35e7f663f 100644
  	}
        else if (next_active_insn (BB_END (exit_fallthru_edge->src)))
  	{
-@@ -6242,6 +6244,8 @@ thread_prologue_and_epilogue_insns (void)
+@@ -6234,6 +6236,8 @@ thread_prologue_and_epilogue_insns (void)
  	  set_insn_locations (seq, epilogue_location);
  
  	  emit_insn_before (seq, insn);
@@ -197,5 +197,5 @@ index 94afb266e..35e7f663f 100644
      }
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0100-aarch64-Use-vecs-to-store-register-save-order.patch b/0203-Backport-SME-aarch64-Use-vecs-to-store-register-save.patch
similarity index 94%
rename from SME-0100-aarch64-Use-vecs-to-store-register-save-order.patch
rename to 0203-Backport-SME-aarch64-Use-vecs-to-store-register-save.patch
index 26e72bcae831c465add199a1514510c4117dd88d..b9e9c9355489e26a0e5d0b13ca7ab39595f30dbf 100644
--- a/SME-0100-aarch64-Use-vecs-to-store-register-save-order.patch
+++ b/0203-Backport-SME-aarch64-Use-vecs-to-store-register-save.patch
@@ -1,7 +1,8 @@
-From c028e2d377d4cd671551f9b7f097ec6a2b1e058f Mon Sep 17 00:00:00 2001
+From 554c83414c10909c39e0ad30026ffa4821dd9698 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 17 Oct 2023 23:46:33 +0100
-Subject: [PATCH 100/144] aarch64: Use vecs to store register save order
+Subject: [PATCH 104/157] [Backport][SME] aarch64: Use vecs to store register
+ save order
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=575858508090b18dcbc176db285c9f55227ca4c0
 
@@ -51,10 +52,10 @@ gcc/testsuite/
  8 files changed, 128 insertions(+), 120 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index feb6a106e..24762bcfc 100644
+index 8d4dd2891..e10c9d763 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8536,13 +8536,17 @@ aarch64_save_regs_above_locals_p ()
+@@ -8753,13 +8753,17 @@ aarch64_save_regs_above_locals_p ()
  static void
  aarch64_layout_frame (void)
  {
@@ -73,7 +74,7 @@ index feb6a106e..24762bcfc 100644
    frame.emit_frame_chain = aarch64_needs_frame_chain ();
  
    /* Adjust the outgoing arguments size if required.  Keep it in sync with what
-@@ -8627,6 +8631,7 @@ aarch64_layout_frame (void)
+@@ -8844,6 +8848,7 @@ aarch64_layout_frame (void)
    for (regno = P0_REGNUM; regno <= P15_REGNUM; regno++)
      if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
        {
@@ -81,7 +82,7 @@ index feb6a106e..24762bcfc 100644
  	if (frame.sve_save_and_probe == INVALID_REGNUM)
  	  frame.sve_save_and_probe = regno;
  	frame.reg_offset[regno] = offset;
-@@ -8648,7 +8653,7 @@ aarch64_layout_frame (void)
+@@ -8865,7 +8870,7 @@ aarch64_layout_frame (void)
  	 If we don't have any vector registers to save, and we know how
  	 big the predicate save area is, we can just round it up to the
  	 next 16-byte boundary.  */
@@ -90,7 +91,7 @@ index feb6a106e..24762bcfc 100644
  	offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
        else
  	{
-@@ -8662,10 +8667,11 @@ aarch64_layout_frame (void)
+@@ -8879,10 +8884,11 @@ aarch64_layout_frame (void)
      }
  
    /* If we need to save any SVE vector registers, add them next.  */
@@ -103,7 +104,7 @@ index feb6a106e..24762bcfc 100644
  	  if (frame.sve_save_and_probe == INVALID_REGNUM)
  	    frame.sve_save_and_probe = regno;
  	  frame.reg_offset[regno] = offset;
-@@ -8686,13 +8692,8 @@ aarch64_layout_frame (void)
+@@ -8903,13 +8909,8 @@ aarch64_layout_frame (void)
  
    auto allocate_gpr_slot = [&](unsigned int regno)
      {
@@ -118,7 +119,7 @@ index feb6a106e..24762bcfc 100644
        offset += UNITS_PER_WORD;
      };
  
-@@ -8721,8 +8722,7 @@ aarch64_layout_frame (void)
+@@ -8938,8 +8939,7 @@ aarch64_layout_frame (void)
    for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
      if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
        {
@@ -128,7 +129,7 @@ index feb6a106e..24762bcfc 100644
  	/* If there is an alignment gap between integer and fp callee-saves,
  	   allocate the last fp register to it if possible.  */
  	if (regno == last_fp_reg
-@@ -8735,21 +8735,25 @@ aarch64_layout_frame (void)
+@@ -8952,21 +8952,25 @@ aarch64_layout_frame (void)
  	  }
  
  	frame.reg_offset[regno] = offset;
@@ -164,7 +165,7 @@ index feb6a106e..24762bcfc 100644
  
    /* With stack-clash, a register must be saved in non-leaf functions.
       The saving of the bottommost register counts as an implicit probe,
-@@ -8913,12 +8917,14 @@ aarch64_layout_frame (void)
+@@ -9130,12 +9134,14 @@ aarch64_layout_frame (void)
  			+ frame.sve_callee_adjust
  			+ frame.final_adjust, frame.frame_size));
  
@@ -184,7 +185,7 @@ index feb6a106e..24762bcfc 100644
      }
  
    frame.laid_out = true;
-@@ -8933,17 +8939,6 @@ aarch64_register_saved_on_entry (int regno)
+@@ -9150,17 +9156,6 @@ aarch64_register_saved_on_entry (int regno)
    return known_ge (cfun->machine->frame.reg_offset[regno], 0);
  }
  
@@ -202,7 +203,7 @@ index feb6a106e..24762bcfc 100644
  /* Push the register number REGNO of mode MODE to the stack with write-back
     adjusting the stack by ADJUSTMENT.  */
  
-@@ -9207,41 +9202,46 @@ aarch64_add_cfa_expression (rtx_insn *insn, rtx reg,
+@@ -9424,41 +9419,46 @@ aarch64_add_cfa_expression (rtx_insn *insn, rtx reg,
    add_reg_note (insn, REG_CFA_EXPRESSION, gen_rtx_SET (mem, reg));
  }
  
@@ -268,7 +269,7 @@ index feb6a106e..24762bcfc 100644
        offset = frame.reg_offset[regno] - bytes_below_sp;
        rtx base_rtx = stack_pointer_rtx;
        poly_int64 sp_offset = offset;
-@@ -9268,12 +9268,13 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
+@@ -9485,12 +9485,13 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
  	    }
  	  offset -= fp_offset;
  	}
@@ -285,7 +286,7 @@ index feb6a106e..24762bcfc 100644
  	  && known_eq (GET_MODE_SIZE (mode),
  		       frame.reg_offset[regno2] - frame.reg_offset[regno]))
  	{
-@@ -9299,6 +9300,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
+@@ -9516,6 +9517,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
  	    }
  
  	  regno = regno2;
@@ -293,7 +294,7 @@ index feb6a106e..24762bcfc 100644
  	}
        else if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
  	{
-@@ -9316,49 +9318,57 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
+@@ -9533,49 +9535,57 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
      }
  }
  
@@ -373,7 +374,7 @@ index feb6a106e..24762bcfc 100644
  	  && known_eq (GET_MODE_SIZE (mode),
  		       frame.reg_offset[regno2] - frame.reg_offset[regno]))
  	{
-@@ -9371,6 +9381,7 @@ aarch64_restore_callee_saves (poly_int64 bytes_below_sp, unsigned start,
+@@ -9588,6 +9598,7 @@ aarch64_restore_callee_saves (poly_int64 bytes_below_sp, unsigned start,
  
  	  *cfi_ops = alloc_reg_note (REG_CFA_RESTORE, reg2, *cfi_ops);
  	  regno = regno2;
@@ -381,7 +382,7 @@ index feb6a106e..24762bcfc 100644
  	}
        else if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
  	emit_insn (gen_aarch64_pred_mov (mode, reg, ptrue, mem));
-@@ -10192,13 +10203,10 @@ aarch64_expand_prologue (void)
+@@ -10409,13 +10420,10 @@ aarch64_expand_prologue (void)
  				 - frame.bytes_above_hard_fp);
        gcc_assert (known_ge (chain_offset, 0));
  
@@ -398,7 +399,7 @@ index feb6a106e..24762bcfc 100644
        else
  	gcc_assert (known_eq (chain_offset, 0));
        aarch64_add_offset (Pmode, hard_frame_pointer_rtx,
-@@ -10236,8 +10244,7 @@ aarch64_expand_prologue (void)
+@@ -10453,8 +10461,7 @@ aarch64_expand_prologue (void)
        aarch64_emit_stack_tie (hard_frame_pointer_rtx);
      }
  
@@ -408,7 +409,7 @@ index feb6a106e..24762bcfc 100644
  			     emit_frame_chain);
    if (maybe_ne (sve_callee_adjust, 0))
      {
-@@ -10248,10 +10255,9 @@ aarch64_expand_prologue (void)
+@@ -10465,10 +10472,9 @@ aarch64_expand_prologue (void)
  					      !frame_pointer_needed, false);
        bytes_below_sp -= sve_callee_adjust;
      }
@@ -422,7 +423,7 @@ index feb6a106e..24762bcfc 100644
  			     emit_frame_chain);
  
    /* We may need to probe the final adjustment if it is larger than the guard
-@@ -10297,8 +10303,6 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10514,8 +10520,6 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
    poly_int64 bytes_below_hard_fp = frame.bytes_below_hard_fp;
    unsigned reg1 = frame.wb_pop_candidate1;
    unsigned reg2 = frame.wb_pop_candidate2;
@@ -431,7 +432,7 @@ index feb6a106e..24762bcfc 100644
    rtx cfi_ops = NULL;
    rtx_insn *insn;
    /* A stack clash protection prologue may not have left EP0_REGNUM or
-@@ -10362,10 +10366,8 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10579,10 +10583,8 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
  
    /* Restore the vector registers before the predicate registers,
       so that we can use P4 as a temporary for big-endian SVE frames.  */
@@ -444,7 +445,7 @@ index feb6a106e..24762bcfc 100644
    if (maybe_ne (sve_callee_adjust, 0))
      aarch64_add_sp (NULL_RTX, NULL_RTX, sve_callee_adjust, true);
  
-@@ -10373,8 +10375,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -10590,8 +10592,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
       restore x30, we don't need to restore x30 again in the traditional
       way.  */
    aarch64_restore_callee_saves (final_adjust + sve_callee_adjust,
@@ -704,5 +705,5 @@ index dc7df8e6b..2c76ccecd 100644
  **	add	sp, sp, #?126976
  **	ret
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0101-aarch64-Put-LR-save-slot-first-in-more-cases.patch b/0204-Backport-SME-aarch64-Put-LR-save-slot-first-in-more-.patch
similarity index 95%
rename from SME-0101-aarch64-Put-LR-save-slot-first-in-more-cases.patch
rename to 0204-Backport-SME-aarch64-Put-LR-save-slot-first-in-more-.patch
index 5f5cadc2edb92624a3efa243e40ea3b05630212b..25a3b3681e8b3f87279dd7d67d0811ac1906d659 100644
--- a/SME-0101-aarch64-Put-LR-save-slot-first-in-more-cases.patch
+++ b/0204-Backport-SME-aarch64-Put-LR-save-slot-first-in-more-.patch
@@ -1,7 +1,8 @@
-From ced5ca8fbcf315c3a60aaeb539ada607a810b17f Mon Sep 17 00:00:00 2001
+From ccc3ca614bbaa242fe25ec82b903dfcac03fe2de Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 17 Oct 2023 23:46:33 +0100
-Subject: [PATCH 101/144] aarch64: Put LR save slot first in more cases
+Subject: [PATCH 105/157] [Backport][SME] aarch64: Put LR save slot first in
+ more cases
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=773306e9ef4ea1407f89686eb513a50602493666
 
@@ -37,10 +38,10 @@ gcc/testsuite/
  5 files changed, 9 insertions(+), 9 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 24762bcfc..96809b03e 100644
+index e10c9d763..1c127192d 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8703,7 +8703,7 @@ aarch64_layout_frame (void)
+@@ -8920,7 +8920,7 @@ aarch64_layout_frame (void)
        allocate_gpr_slot (R29_REGNUM);
        allocate_gpr_slot (R30_REGNUM);
      }
@@ -102,5 +103,5 @@ index 964527949..5702656a5 100644
 +/* { dg-final { scan-assembler "ldp\tx30, x19, \\\[sp\\\]" } } */
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0102-aarch64-Switch-PSTATE.SM-around-calls.patch b/0205-Backport-SME-aarch64-Switch-PSTATE.SM-around-calls.patch
similarity index 97%
rename from SME-0102-aarch64-Switch-PSTATE.SM-around-calls.patch
rename to 0205-Backport-SME-aarch64-Switch-PSTATE.SM-around-calls.patch
index 61fc67286e475e60662268f608516dec0b4d4061..d0a23d21b013d0b142347a3cf3f3c2d567b11a4c 100644
--- a/SME-0102-aarch64-Switch-PSTATE.SM-around-calls.patch
+++ b/0205-Backport-SME-aarch64-Switch-PSTATE.SM-around-calls.patch
@@ -1,7 +1,8 @@
-From 32eaf4ea2556d6841dfcdc1dd00895f6e1d21a93 Mon Sep 17 00:00:00 2001
+From 88a41bc24eb793eee27aa9f4ef6b763b3c3e76e6 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:25 +0000
-Subject: [PATCH 102/144] aarch64: Switch PSTATE.SM around calls
+Subject: [PATCH 106/157] [Backport][SME] aarch64: Switch PSTATE.SM around
+ calls
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=dd8090f40079fa41ee58d9f76b2e50ed4f95c6bf
 
@@ -352,7 +353,7 @@ index 000000000..52427b4f1
 +  "smstop\tsm"
 +)
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 96809b03e..7e5a21cf7 100644
+index 1c127192d..82f8e574e 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
 @@ -82,6 +82,8 @@
@@ -364,7 +365,7 @@ index 96809b03e..7e5a21cf7 100644
  
  /* This file should be included last.  */
  #include "target-def.h"
-@@ -4160,6 +4162,26 @@ aarch64_fndecl_isa_mode (const_tree fndecl)
+@@ -4377,6 +4379,26 @@ aarch64_fndecl_isa_mode (const_tree fndecl)
    return aarch64_fndecl_pstate_sm (fndecl);
  }
  
@@ -391,7 +392,7 @@ index 96809b03e..7e5a21cf7 100644
  /* Implement TARGET_COMPATIBLE_VECTOR_TYPES_P.  */
  
  static bool
-@@ -4183,7 +4205,7 @@ aarch64_emit_cfi_for_reg_p (unsigned int regno)
+@@ -4400,7 +4422,7 @@ aarch64_emit_cfi_for_reg_p (unsigned int regno)
  static machine_mode
  aarch64_reg_save_mode (unsigned int regno)
  {
@@ -400,7 +401,7 @@ index 96809b03e..7e5a21cf7 100644
      return DImode;
  
    if (FP_REGNUM_P (regno))
-@@ -4242,6 +4264,16 @@ aarch64_callee_abi (rtx cookie)
+@@ -4459,6 +4481,16 @@ aarch64_callee_abi (rtx cookie)
    return function_abis[UINTVAL (cookie) >> AARCH64_NUM_ISA_MODES];
  }
  
@@ -417,7 +418,7 @@ index 96809b03e..7e5a21cf7 100644
  /* INSN is a call instruction.  Return the CONST_INT stored in its
     UNSPEC_CALLEE_ABI rtx.  */
  
-@@ -4264,6 +4296,15 @@ aarch64_insn_callee_abi (const rtx_insn *insn)
+@@ -4481,6 +4513,15 @@ aarch64_insn_callee_abi (const rtx_insn *insn)
    return aarch64_callee_abi (aarch64_insn_callee_cookie (insn));
  }
  
@@ -433,7 +434,7 @@ index 96809b03e..7e5a21cf7 100644
  /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED.  The callee only saves
     the lower 64 bits of a 128-bit register.  Tell the compiler the callee
     clobbers the top 64 bits when restoring the bottom 64 bits.  */
-@@ -6428,6 +6469,437 @@ aarch64_sub_sp (rtx temp1, rtx temp2, poly_int64 delta, bool frame_related_p,
+@@ -6645,6 +6686,437 @@ aarch64_sub_sp (rtx temp1, rtx temp2, poly_int64 delta, bool frame_related_p,
  		      temp1, temp2, frame_related_p, emit_move_imm);
  }
  
@@ -871,7 +872,7 @@ index 96809b03e..7e5a21cf7 100644
  /* Set DEST to (vec_series BASE STEP).  */
  
  static void
-@@ -7994,6 +8466,40 @@ on_stack:
+@@ -8211,6 +8683,40 @@ on_stack:
    return;
  }
  
@@ -912,7 +913,7 @@ index 96809b03e..7e5a21cf7 100644
  /* Implement TARGET_FUNCTION_ARG.  */
  
  static rtx
-@@ -8005,7 +8511,13 @@ aarch64_function_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
+@@ -8222,7 +8728,13 @@ aarch64_function_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
  	      || pcum->pcs_variant == ARM_PCS_SVE);
  
    if (arg.end_marker_p ())
@@ -927,7 +928,7 @@ index 96809b03e..7e5a21cf7 100644
  
    aarch64_layout_arg (pcum_v, arg);
    return pcum->aapcs_reg;
-@@ -8040,6 +8552,7 @@ aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
+@@ -8257,6 +8769,7 @@ aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
    pcum->aapcs_stack_words = 0;
    pcum->aapcs_stack_size = 0;
    pcum->silent_p = silent_p;
@@ -935,7 +936,7 @@ index 96809b03e..7e5a21cf7 100644
  
    if (!silent_p
        && !TARGET_FLOAT
-@@ -8080,6 +8593,10 @@ aarch64_function_arg_advance (cumulative_args_t pcum_v,
+@@ -8297,6 +8810,10 @@ aarch64_function_arg_advance (cumulative_args_t pcum_v,
        aarch64_layout_arg (pcum_v, arg);
        gcc_assert ((pcum->aapcs_reg != NULL_RTX)
  		  != (pcum->aapcs_stack_words != 0));
@@ -946,7 +947,7 @@ index 96809b03e..7e5a21cf7 100644
        pcum->aapcs_arg_processed = false;
        pcum->aapcs_ncrn = pcum->aapcs_nextncrn;
        pcum->aapcs_nvrn = pcum->aapcs_nextnvrn;
-@@ -8530,6 +9047,30 @@ aarch64_save_regs_above_locals_p ()
+@@ -8747,6 +9264,30 @@ aarch64_save_regs_above_locals_p ()
    return crtl->stack_protect_guard;
  }
  
@@ -977,7 +978,7 @@ index 96809b03e..7e5a21cf7 100644
  /* Mark the registers that need to be saved by the callee and calculate
     the size of the callee-saved registers area and frame record (both FP
     and LR may be omitted).  */
-@@ -8563,6 +9104,7 @@ aarch64_layout_frame (void)
+@@ -8780,6 +9321,7 @@ aarch64_layout_frame (void)
    /* First mark all the registers that really need to be saved...  */
    for (regno = 0; regno <= LAST_SAVED_REGNUM; regno++)
      frame.reg_offset[regno] = SLOT_NOT_REQUIRED;
@@ -985,7 +986,7 @@ index 96809b03e..7e5a21cf7 100644
  
    /* ... that includes the eh data registers (if needed)...  */
    if (crtl->calls_eh_return)
-@@ -8715,6 +9257,21 @@ aarch64_layout_frame (void)
+@@ -8932,6 +9474,21 @@ aarch64_layout_frame (void)
      if (known_eq (frame.reg_offset[regno], SLOT_REQUIRED))
        allocate_gpr_slot (regno);
  
@@ -1007,7 +1008,7 @@ index 96809b03e..7e5a21cf7 100644
    poly_int64 max_int_offset = offset;
    offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
    bool has_align_gap = maybe_ne (offset, max_int_offset);
-@@ -8752,8 +9309,6 @@ aarch64_layout_frame (void)
+@@ -8969,8 +9526,6 @@ aarch64_layout_frame (void)
        if (push_regs.size () > 1)
  	frame.wb_push_candidate2 = push_regs[1];
      }
@@ -1016,7 +1017,7 @@ index 96809b03e..7e5a21cf7 100644
  
    /* With stack-clash, a register must be saved in non-leaf functions.
       The saving of the bottommost register counts as an implicit probe,
-@@ -8861,7 +9416,8 @@ aarch64_layout_frame (void)
+@@ -9078,7 +9633,8 @@ aarch64_layout_frame (void)
        frame.initial_adjust = frame.frame_size - frame.bytes_below_saved_regs;
        frame.final_adjust = frame.bytes_below_saved_regs;
      }
@@ -1026,7 +1027,7 @@ index 96809b03e..7e5a21cf7 100644
  	   && const_above_fp < max_push_offset)
      {
        /* Frame with large area below the saved registers, or with SVE saves,
-@@ -9242,7 +9798,13 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
+@@ -9459,7 +10015,13 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
  
        machine_mode mode = aarch64_reg_save_mode (regno);
        rtx reg = gen_rtx_REG (mode, regno);
@@ -1040,7 +1041,7 @@ index 96809b03e..7e5a21cf7 100644
        rtx base_rtx = stack_pointer_rtx;
        poly_int64 sp_offset = offset;
  
-@@ -9250,7 +9812,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
+@@ -9467,7 +10029,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
        if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
  	aarch64_adjust_sve_callee_save_base (mode, base_rtx, anchor_reg,
  					     offset, ptrue);
@@ -1049,7 +1050,7 @@ index 96809b03e..7e5a21cf7 100644
  	       && (!offset.is_constant (&const_offset) || const_offset >= 512))
  	{
  	  poly_int64 fp_offset = frame.bytes_below_hard_fp - bytes_below_sp;
-@@ -9273,6 +9835,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
+@@ -9490,6 +10052,7 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
  
        unsigned int regno2;
        if (!aarch64_sve_mode_p (mode)
@@ -1057,7 +1058,7 @@ index 96809b03e..7e5a21cf7 100644
  	  && i + 1 < regs.size ()
  	  && (regno2 = regs[i + 1], !skip_save_p (regno2))
  	  && known_eq (GET_MODE_SIZE (mode),
-@@ -9304,17 +9867,24 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
+@@ -9521,17 +10084,24 @@ aarch64_save_callee_saves (poly_int64 bytes_below_sp,
  	}
        else if (mode == VNx2DImode && BYTES_BIG_ENDIAN)
  	{
@@ -1085,7 +1086,7 @@ index 96809b03e..7e5a21cf7 100644
      }
  }
  
-@@ -9537,6 +10107,10 @@ aarch64_get_separate_components (void)
+@@ -9754,6 +10324,10 @@ aarch64_get_separate_components (void)
  	bitmap_clear_bit (components, frame.hard_fp_save_and_probe);
      }
  
@@ -1096,7 +1097,7 @@ index 96809b03e..7e5a21cf7 100644
    return components;
  }
  
-@@ -10032,6 +10606,47 @@ aarch64_epilogue_uses (int regno)
+@@ -10249,6 +10823,47 @@ aarch64_epilogue_uses (int regno)
    return 0;
  }
  
@@ -1144,7 +1145,7 @@ index 96809b03e..7e5a21cf7 100644
  /* AArch64 stack frames generated by this compiler look like:
  
  	+-------------------------------+
-@@ -10246,6 +10861,12 @@ aarch64_expand_prologue (void)
+@@ -10463,6 +11078,12 @@ aarch64_expand_prologue (void)
  
    aarch64_save_callee_saves (bytes_below_sp, frame.saved_gprs, true,
  			     emit_frame_chain);
@@ -1157,7 +1158,7 @@ index 96809b03e..7e5a21cf7 100644
    if (maybe_ne (sve_callee_adjust, 0))
      {
        gcc_assert (!flag_stack_clash_protection
-@@ -10267,6 +10888,40 @@ aarch64_expand_prologue (void)
+@@ -10484,6 +11105,40 @@ aarch64_expand_prologue (void)
  					  !frame_pointer_needed, true);
    if (emit_frame_chain && maybe_ne (final_adjust, 0))
      aarch64_emit_stack_tie (hard_frame_pointer_rtx);
@@ -1198,7 +1199,7 @@ index 96809b03e..7e5a21cf7 100644
  }
  
  /* Return TRUE if we can use a simple_return insn.
-@@ -11513,17 +12168,33 @@ aarch64_start_call_args (cumulative_args_t ca_v)
+@@ -11730,17 +12385,33 @@ aarch64_start_call_args (cumulative_args_t ca_v)
     RESULT is the register in which the result is returned.  It's NULL for
     "call" and "sibcall".
     MEM is the location of the function call.
@@ -1234,7 +1235,7 @@ index 96809b03e..7e5a21cf7 100644
    gcc_assert (MEM_P (mem));
    callee = XEXP (mem, 0);
    mode = GET_MODE (callee);
-@@ -11548,26 +12219,75 @@ aarch64_expand_call (rtx result, rtx mem, rtx callee_abi, bool sibcall)
+@@ -11765,26 +12436,75 @@ aarch64_expand_call (rtx result, rtx mem, rtx callee_abi, bool sibcall)
    else
      tmp = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNUM));
  
@@ -1314,7 +1315,7 @@ index 96809b03e..7e5a21cf7 100644
  }
  
  machine_mode
-@@ -12852,6 +13572,16 @@ aarch64_secondary_memory_needed (machine_mode mode, reg_class_t class1,
+@@ -13069,6 +13789,16 @@ aarch64_secondary_memory_needed (machine_mode mode, reg_class_t class1,
    return false;
  }
  
@@ -1331,7 +1332,7 @@ index 96809b03e..7e5a21cf7 100644
  static bool
  aarch64_can_eliminate (const int from ATTRIBUTE_UNUSED, const int to)
  {
-@@ -20375,7 +21105,8 @@ aarch64_conditional_register_usage (void)
+@@ -20607,7 +21337,8 @@ aarch64_conditional_register_usage (void)
  	call_used_regs[i] = 1;
        }
  
@@ -1341,7 +1342,7 @@ index 96809b03e..7e5a21cf7 100644
    CLEAR_HARD_REG_BIT (operand_reg_set, FFR_REGNUM);
    CLEAR_HARD_REG_BIT (operand_reg_set, FFRT_REGNUM);
  
-@@ -27666,6 +28397,123 @@ aarch64_indirect_call_asm (rtx addr)
+@@ -27903,6 +28634,123 @@ aarch64_indirect_call_asm (rtx addr)
    return "";
  }
  
@@ -1465,7 +1466,7 @@ index 96809b03e..7e5a21cf7 100644
  /* Target-specific selftests.  */
  
  #if CHECKING_P
-@@ -27853,6 +28701,9 @@ aarch64_run_selftests (void)
+@@ -28176,6 +29024,9 @@ aarch64_get_v16qi_mode ()
  #undef TARGET_CALLEE_COPIES
  #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_arg_info_false
  
@@ -1540,10 +1541,10 @@ index 1591cde8b..6bfe55968 100644
  #endif
  
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 7e42f28ca..2dac19a74 100644
+index 2ce123255..bb867de74 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -968,7 +968,7 @@
+@@ -970,7 +970,7 @@
  					 operands[1]);
  })
  
@@ -1552,7 +1553,7 @@ index 7e42f28ca..2dac19a74 100644
    [(set (pc) (if_then_else
  	      (EQL (zero_extract:GPI (match_operand:ALLI 0 "register_operand" "r")
  				     (const_int 1)
-@@ -1055,7 +1055,7 @@
+@@ -1057,7 +1057,7 @@
    [(parallel
       [(call (match_operand 0 "memory_operand")
  	    (match_operand 1 "general_operand"))
@@ -1561,7 +1562,7 @@ index 7e42f28ca..2dac19a74 100644
        (clobber (reg:DI LR_REGNUM))])]
    ""
    "
-@@ -1081,7 +1081,7 @@
+@@ -1083,7 +1083,7 @@
       [(set (match_operand 0 "")
  	   (call (match_operand 1 "memory_operand")
  		 (match_operand 2 "general_operand")))
@@ -1570,7 +1571,7 @@ index 7e42f28ca..2dac19a74 100644
       (clobber (reg:DI LR_REGNUM))])]
    ""
    "
-@@ -1108,7 +1108,7 @@
+@@ -1110,7 +1110,7 @@
    [(parallel
       [(call (match_operand 0 "memory_operand")
  	    (match_operand 1 "general_operand"))
@@ -1579,7 +1580,7 @@ index 7e42f28ca..2dac19a74 100644
        (return)])]
    ""
    {
-@@ -1122,7 +1122,7 @@
+@@ -1124,7 +1124,7 @@
       [(set (match_operand 0 "")
  	   (call (match_operand 1 "memory_operand")
  		 (match_operand 2 "general_operand")))
@@ -1588,7 +1589,7 @@ index 7e42f28ca..2dac19a74 100644
        (return)])]
    ""
    {
-@@ -7745,3 +7745,6 @@
+@@ -7747,3 +7747,6 @@
  
  ;; SVE2.
  (include "aarch64-sve2.md")
@@ -3265,5 +3266,5 @@ index 000000000..83b4073ee
 +  consume_p0 (res);
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0103-aarch64-Add-support-for-SME-ZA-attributes.patch b/0206-Backport-SME-aarch64-Add-support-for-SME-ZA-attribut.patch
similarity index 98%
rename from SME-0103-aarch64-Add-support-for-SME-ZA-attributes.patch
rename to 0206-Backport-SME-aarch64-Add-support-for-SME-ZA-attribut.patch
index 1afad98169c33f08581764994bcfe11a4ef916e8..f15e7f63a9a00476cfbd58ef217de60cff8c8180 100644
--- a/SME-0103-aarch64-Add-support-for-SME-ZA-attributes.patch
+++ b/0206-Backport-SME-aarch64-Add-support-for-SME-ZA-attribut.patch
@@ -1,7 +1,8 @@
-From 9e0af200b7a9626605adf31d5096a2ee73c68047 Mon Sep 17 00:00:00 2001
+From 1efd433c779f66440facc8ba5cd23bdbdd6672ba Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:26 +0000
-Subject: [PATCH 103/144] aarch64: Add support for SME ZA attributes
+Subject: [PATCH 107/157] [Backport][SME] aarch64: Add support for SME ZA
+ attributes
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=3af9ceb631b741095d8eabd055ff7c23d4a69e6f
 
@@ -593,7 +594,7 @@ index 52427b4f1..d4973098e 100644
 +  }
 +)
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 7e5a21cf7..3365d2464 100644
+index 82f8e574e..a6e996c5b 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
 @@ -91,6 +91,26 @@
@@ -623,7 +624,7 @@ index 7e5a21cf7..3365d2464 100644
  /* Information about a legitimate vector immediate operand.  */
  struct simd_immediate_info
  {
-@@ -2742,6 +2762,151 @@ static const struct processor all_cores[] =
+@@ -2959,6 +2979,151 @@ static const struct processor all_cores[] =
  /* The current tuning set.  */
  struct tune_params aarch64_tune_params = generic_tunings;
  
@@ -775,7 +776,7 @@ index 7e5a21cf7..3365d2464 100644
  /* Check whether an 'aarch64_vector_pcs' attribute is valid.  */
  
  static tree
-@@ -2770,6 +2935,101 @@ handle_aarch64_vector_pcs_attribute (tree *node, tree name, tree,
+@@ -2987,6 +3152,101 @@ handle_aarch64_vector_pcs_attribute (tree *node, tree name, tree,
    gcc_unreachable ();
  }
  
@@ -877,7 +878,7 @@ index 7e5a21cf7..3365d2464 100644
  /* Mutually-exclusive function type attributes for controlling PSTATE.SM.  */
  static const struct attribute_spec::exclusions attr_streaming_exclusions[] =
  {
-@@ -2806,6 +3066,16 @@ static const attribute_spec aarch64_arm_attributes[] =
+@@ -3023,6 +3283,16 @@ static const attribute_spec aarch64_arm_attributes[] =
  			  NULL, attr_streaming_exclusions },
    { "streaming_compatible", 0, 0, false, true,  true,  true,
  			  NULL, attr_streaming_exclusions },
@@ -894,7 +895,7 @@ index 7e5a21cf7..3365d2464 100644
  };
  
  static const scoped_attribute_specs aarch64_arm_attribute_table =
-@@ -3985,6 +4255,7 @@ aarch64_hard_regno_nregs (unsigned regno, machine_mode mode)
+@@ -4202,6 +4472,7 @@ aarch64_hard_regno_nregs (unsigned regno, machine_mode mode)
      case PR_HI_REGS:
      case FFR_REGS:
      case PR_AND_FFR_REGS:
@@ -902,7 +903,7 @@ index 7e5a21cf7..3365d2464 100644
        return 1;
      default:
        return CEIL (lowest_size, UNITS_PER_WORD);
-@@ -4015,6 +4286,10 @@ aarch64_hard_regno_mode_ok (unsigned regno, machine_mode mode)
+@@ -4232,6 +4503,10 @@ aarch64_hard_regno_mode_ok (unsigned regno, machine_mode mode)
    if (pr_or_ffr_regnum_p (regno))
      return false;
  
@@ -913,7 +914,7 @@ index 7e5a21cf7..3365d2464 100644
    if (regno == SP_REGNUM)
      /* The purpose of comparing with ptr_mode is to support the
         global register variable associated with the stack pointer
-@@ -4135,12 +4410,34 @@ aarch64_fntype_pstate_sm (const_tree fntype)
+@@ -4352,12 +4627,34 @@ aarch64_fntype_pstate_sm (const_tree fntype)
    return AARCH64_FL_SM_OFF;
  }
  
@@ -949,7 +950,7 @@ index 7e5a21cf7..3365d2464 100644
  }
  
  /* Return the state of PSTATE.SM when compiling the body of
-@@ -4153,13 +4450,37 @@ aarch64_fndecl_pstate_sm (const_tree fndecl)
+@@ -4370,13 +4667,37 @@ aarch64_fndecl_pstate_sm (const_tree fndecl)
    return aarch64_fntype_pstate_sm (TREE_TYPE (fndecl));
  }
  
@@ -988,7 +989,7 @@ index 7e5a21cf7..3365d2464 100644
  }
  
  /* Return the state of PSTATE.SM on entry to the current function.
-@@ -4172,6 +4493,44 @@ aarch64_cfun_incoming_pstate_sm ()
+@@ -4389,6 +4710,44 @@ aarch64_cfun_incoming_pstate_sm ()
    return aarch64_fntype_pstate_sm (TREE_TYPE (cfun->decl));
  }
  
@@ -1033,7 +1034,7 @@ index 7e5a21cf7..3365d2464 100644
  /* Return true if a call from the current function to a function with
     ISA mode CALLEE_MODE would involve a change to PSTATE.SM around
     the BL instruction.  */
-@@ -5735,6 +6094,74 @@ aarch64_output_sve_vector_inc_dec (const char *operands, rtx x)
+@@ -5952,6 +6311,74 @@ aarch64_output_sve_vector_inc_dec (const char *operands, rtx x)
  					     factor, nelts_per_vq);
  }
  
@@ -1108,7 +1109,7 @@ index 7e5a21cf7..3365d2464 100644
  /* Multipliers for repeating bitmasks of width 32, 16, 8, 4, and 2.  */
  
  static const unsigned HOST_WIDE_INT bitmask_imm_mul[] =
-@@ -7500,6 +7927,15 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
+@@ -7717,6 +8144,15 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
  	  return;
  	}
  
@@ -1124,7 +1125,7 @@ index 7e5a21cf7..3365d2464 100644
        sty = aarch64_classify_symbol (base, const_offset);
        switch (sty)
  	{
-@@ -8515,8 +8951,10 @@ aarch64_function_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
+@@ -8732,8 +9168,10 @@ aarch64_function_arg (cumulative_args_t pcum_v, const function_arg_info &arg)
        rtx abi_cookie = aarch64_gen_callee_cookie (pcum->isa_mode,
  						  pcum->pcs_variant);
        rtx sme_mode_switch_args = aarch64_finish_sme_mode_switch_args (pcum);
@@ -1137,7 +1138,7 @@ index 7e5a21cf7..3365d2464 100644
      }
  
    aarch64_layout_arg (pcum_v, arg);
-@@ -8527,7 +8965,7 @@ void
+@@ -8744,7 +9182,7 @@ void
  aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
  			      const_tree fntype,
  			      rtx libname ATTRIBUTE_UNUSED,
@@ -1146,7 +1147,7 @@ index 7e5a21cf7..3365d2464 100644
  			      unsigned n_named ATTRIBUTE_UNUSED,
  			      bool silent_p)
  {
-@@ -8552,6 +8990,8 @@ aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
+@@ -8769,6 +9207,8 @@ aarch64_init_cumulative_args (CUMULATIVE_ARGS *pcum,
    pcum->aapcs_stack_words = 0;
    pcum->aapcs_stack_size = 0;
    pcum->silent_p = silent_p;
@@ -1155,7 +1156,7 @@ index 7e5a21cf7..3365d2464 100644
    pcum->num_sme_mode_switch_args = 0;
  
    if (!silent_p
-@@ -10586,14 +11026,31 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -10803,14 +11243,31 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
      }
  }
  
@@ -1192,7 +1193,7 @@ index 7e5a21cf7..3365d2464 100644
  
  int
  aarch64_epilogue_uses (int regno)
-@@ -10603,6 +11060,18 @@ aarch64_epilogue_uses (int regno)
+@@ -10820,6 +11277,18 @@ aarch64_epilogue_uses (int regno)
        if (regno == LR_REGNUM)
  	return 1;
      }
@@ -1211,7 +1212,7 @@ index 7e5a21cf7..3365d2464 100644
    return 0;
  }
  
-@@ -11284,8 +11753,10 @@ aarch64_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
+@@ -11501,8 +11970,10 @@ aarch64_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
  
    /* There's no way to calculate VL-based values using relocations.  */
    subrtx_iterator::array_type array;
@@ -1223,7 +1224,7 @@ index 7e5a21cf7..3365d2464 100644
        return true;
  
    poly_int64 offset;
-@@ -12147,6 +12618,72 @@ aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
+@@ -12364,6 +12835,72 @@ aarch64_fixed_condition_code_regs (unsigned int *p1, unsigned int *p2)
    return true;
  }
  
@@ -1296,7 +1297,7 @@ index 7e5a21cf7..3365d2464 100644
  /* Implement TARGET_START_CALL_ARGS.  */
  
  static void
-@@ -12162,6 +12699,20 @@ aarch64_start_call_args (cumulative_args_t ca_v)
+@@ -12379,6 +12916,20 @@ aarch64_start_call_args (cumulative_args_t ca_v)
  	      " option %<-march%>, or by using the %"
  	      " attribute or pragma", "sme");
      }
@@ -1317,7 +1318,7 @@ index 7e5a21cf7..3365d2464 100644
  }
  
  /* This function is used by the call expanders of the machine description.
-@@ -12174,6 +12725,8 @@ aarch64_start_call_args (cumulative_args_t ca_v)
+@@ -12391,6 +12942,8 @@ aarch64_start_call_args (cumulative_args_t ca_v)
         The second element is a PARALLEL that lists all the argument
         registers that need to be saved and restored around a change
         in PSTATE.SM, or const0_rtx if no such switch is needed.
@@ -1326,7 +1327,7 @@ index 7e5a21cf7..3365d2464 100644
     SIBCALL indicates whether this function call is normal call or sibling call.
     It will generate different pattern accordingly.  */
  
-@@ -12186,10 +12739,12 @@ aarch64_expand_call (rtx result, rtx mem, rtx cookie, bool sibcall)
+@@ -12403,10 +12956,12 @@ aarch64_expand_call (rtx result, rtx mem, rtx cookie, bool sibcall)
  
    rtx callee_abi = cookie;
    rtx sme_mode_switch_args = const0_rtx;
@@ -1339,7 +1340,7 @@ index 7e5a21cf7..3365d2464 100644
      }
  
    gcc_assert (CONST_INT_P (callee_abi));
-@@ -12209,6 +12764,41 @@ aarch64_expand_call (rtx result, rtx mem, rtx cookie, bool sibcall)
+@@ -12426,6 +12981,41 @@ aarch64_expand_call (rtx result, rtx mem, rtx cookie, bool sibcall)
        : !REG_P (callee))
      XEXP (mem, 0) = force_reg (mode, callee);
  
@@ -1381,7 +1382,7 @@ index 7e5a21cf7..3365d2464 100644
    call = gen_rtx_CALL (VOIDmode, mem, const0_rtx);
  
    if (result != NULL_RTX)
-@@ -12275,6 +12865,50 @@ aarch64_expand_call (rtx result, rtx mem, rtx cookie, bool sibcall)
+@@ -12492,6 +13082,50 @@ aarch64_expand_call (rtx result, rtx mem, rtx cookie, bool sibcall)
  
        cfun->machine->call_switches_pstate_sm = true;
      }
@@ -1432,7 +1433,7 @@ index 7e5a21cf7..3365d2464 100644
  }
  
  /* Emit call insn with PAT and do aarch64-specific handling.  */
-@@ -13385,6 +14019,9 @@ aarch64_regno_regclass (unsigned regno)
+@@ -13602,6 +14236,9 @@ aarch64_regno_regclass (unsigned regno)
    if (regno == FFR_REGNUM || regno == FFRT_REGNUM)
      return FFR_REGS;
  
@@ -1442,7 +1443,7 @@ index 7e5a21cf7..3365d2464 100644
    return NO_REGS;
  }
  
-@@ -13740,12 +14377,14 @@ aarch64_class_max_nregs (reg_class_t regclass, machine_mode mode)
+@@ -13957,12 +14594,14 @@ aarch64_class_max_nregs (reg_class_t regclass, machine_mode mode)
        return (vec_flags & VEC_ADVSIMD
  	      ? CEIL (lowest_size, UNITS_PER_VREG)
  	      : CEIL (lowest_size, UNITS_PER_WORD));
@@ -1457,7 +1458,7 @@ index 7e5a21cf7..3365d2464 100644
        return 1;
  
      case NO_REGS:
-@@ -18770,10 +19409,14 @@ aarch64_override_options_internal (struct gcc_options *opts)
+@@ -19002,10 +19641,14 @@ aarch64_override_options_internal (struct gcc_options *opts)
        && !fixed_regs[R18_REGNUM])
      error ("%<-fsanitize=shadow-call-stack%> requires %<-ffixed-x18%>");
  
@@ -1474,7 +1475,7 @@ index 7e5a21cf7..3365d2464 100644
        inform (input_location, "you can enable %qs using the command-line"
  	      " option %<-march%>, or by using the %"
  	      " attribute or pragma", "sme");
-@@ -21109,6 +21752,8 @@ aarch64_conditional_register_usage (void)
+@@ -21341,6 +21984,8 @@ aarch64_conditional_register_usage (void)
    CLEAR_HARD_REG_BIT (operand_reg_set, VG_REGNUM);
    CLEAR_HARD_REG_BIT (operand_reg_set, FFR_REGNUM);
    CLEAR_HARD_REG_BIT (operand_reg_set, FFRT_REGNUM);
@@ -1483,7 +1484,7 @@ index 7e5a21cf7..3365d2464 100644
  
    /* When tracking speculation, we need a couple of call-clobbered registers
       to track the speculation state.  It would be nice to just use
-@@ -22563,6 +23208,9 @@ aarch64_mov_operand_p (rtx x, machine_mode mode)
+@@ -22795,6 +23440,9 @@ aarch64_mov_operand_p (rtx x, machine_mode mode)
  	  || aarch64_sve_rdvl_immediate_p (x)))
      return true;
  
@@ -1493,7 +1494,7 @@ index 7e5a21cf7..3365d2464 100644
    return aarch64_classify_symbolic_expression (x)
      == SYMBOL_TINY_ABSOLUTE;
  }
-@@ -28029,9 +28677,45 @@ aarch64_comp_type_attributes (const_tree type1, const_tree type2)
+@@ -28266,9 +28914,45 @@ aarch64_comp_type_attributes (const_tree type1, const_tree type2)
      return 0;
    if (!check_attr ("arm", "streaming_compatible"))
      return 0;
@@ -1539,7 +1540,7 @@ index 7e5a21cf7..3365d2464 100644
  /* Implement TARGET_GET_MULTILIB_ABI_NAME */
  
  static const char *
-@@ -28397,6 +29081,629 @@ aarch64_indirect_call_asm (rtx addr)
+@@ -28634,6 +29318,629 @@ aarch64_indirect_call_asm (rtx addr)
    return "";
  }
  
@@ -2169,7 +2170,7 @@ index 7e5a21cf7..3365d2464 100644
  /* If CALL involves a change in PSTATE.SM, emit the instructions needed
     to switch to the new mode and the instructions needed to restore the
     original mode.  Return true if something changed.  */
-@@ -28785,6 +30092,9 @@ aarch64_run_selftests (void)
+@@ -29108,6 +30415,9 @@ aarch64_get_v16qi_mode ()
  #undef TARGET_START_CALL_ARGS
  #define TARGET_START_CALL_ARGS aarch64_start_call_args
  
@@ -2179,7 +2180,7 @@ index 7e5a21cf7..3365d2464 100644
  #undef TARGET_GIMPLE_FOLD_BUILTIN
  #define TARGET_GIMPLE_FOLD_BUILTIN aarch64_gimple_fold_builtin
  
-@@ -29150,6 +30460,9 @@ aarch64_libgcc_floating_mode_supported_p
+@@ -29473,6 +30783,9 @@ aarch64_libgcc_floating_mode_supported_p
  #undef TARGET_COMP_TYPE_ATTRIBUTES
  #define TARGET_COMP_TYPE_ATTRIBUTES aarch64_comp_type_attributes
  
@@ -2189,7 +2190,7 @@ index 7e5a21cf7..3365d2464 100644
  #undef TARGET_GET_MULTILIB_ABI_NAME
  #define TARGET_GET_MULTILIB_ABI_NAME aarch64_get_multilib_abi_name
  
-@@ -29170,8 +30483,35 @@ aarch64_libgcc_floating_mode_supported_p
+@@ -29493,8 +30806,35 @@ aarch64_libgcc_floating_mode_supported_p
  #undef TARGET_STRICT_ARGUMENT_NAMING
  #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
  
@@ -2226,7 +2227,7 @@ index 7e5a21cf7..3365d2464 100644
  
  #undef TARGET_ASM_FILE_END
  #define TARGET_ASM_FILE_END aarch64_asm_file_end
-@@ -29182,6 +30522,9 @@ aarch64_libgcc_floating_mode_supported_p
+@@ -29505,6 +30845,9 @@ aarch64_libgcc_floating_mode_supported_p
  #undef TARGET_HAVE_SHADOW_CALL_STACK
  #define TARGET_HAVE_SHADOW_CALL_STACK true
  
@@ -2442,7 +2443,7 @@ index 6bfe55968..89d30b9bf 100644
 +
  #endif /* GCC_AARCH64_H */
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 2dac19a74..7b6ac256c 100644
+index bb867de74..05a7c6675 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
 @@ -111,6 +111,56 @@
@@ -2536,7 +2537,7 @@ index 2dac19a74..7b6ac256c 100644
      (const_string "yes")
      (const_string "no")))
  
-@@ -926,7 +984,7 @@
+@@ -928,7 +986,7 @@
     (set_attr "sls_length" "retbr")]
  )
  
@@ -2545,7 +2546,7 @@ index 2dac19a74..7b6ac256c 100644
    [(set (pc) (if_then_else (EQL (match_operand:GPI 0 "register_operand" "r")
  				(const_int 0))
  			   (label_ref (match_operand 1 "" ""))
-@@ -1289,6 +1347,7 @@
+@@ -1291,6 +1349,7 @@
       /* The "mov_imm" type for CNT is just a placeholder.  */
       [r  , Usv; mov_imm  , sve , 4] << aarch64_output_sve_cnt_immediate ("cnt", "%x0", operands[1]);
       [r  , Usr; mov_imm  , sve,  4] << aarch64_output_sve_rdvl (operands[1]);
@@ -2553,7 +2554,7 @@ index 2dac19a74..7b6ac256c 100644
       [r  , m  ; load_4   , *   , 4] ldr\t%w0, %1
       [w  , m  ; load_4   , fp  , 4] ldr\t%s0, %1
       [m  , r Z; store_4  , *   , 4] str\t%w1, %0
-@@ -1324,6 +1383,7 @@
+@@ -1326,6 +1385,7 @@
       /* The "mov_imm" type for CNT is just a placeholder.  */
       [r, Usv; mov_imm  , sve , 4] << aarch64_output_sve_cnt_immediate ("cnt", "%x0", operands[1]);
       [r, Usr; mov_imm  , sve,  4] << aarch64_output_sve_rdvl (operands[1]);
@@ -2561,7 +2562,7 @@ index 2dac19a74..7b6ac256c 100644
       [r, m  ; load_8   , *   , 4] ldr\t%x0, %1
       [w, m  ; load_8   , fp  , 4] ldr\t%d0, %1
       [m, r Z; store_8  , *   , 4] str\t%x1, %0
-@@ -7731,6 +7791,21 @@
+@@ -7733,6 +7793,21 @@
    [(set (attr "length") (symbol_ref "INTVAL (operands[0])"))]
  )
  
@@ -4319,5 +4320,5 @@ index 000000000..d5b226ae1
 +// { dg-final { scan-assembler {\tsmstop\tza\n} } }
 +// { dg-final { scan-assembler-not {\tsub\tsp, sp, x[0-9]+\n} } }
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0104-aarch64-Add-a-register-class-for-w12-w15.patch b/0207-Backport-SME-aarch64-Add-a-register-class-for-w12-w1.patch
similarity index 89%
rename from SME-0104-aarch64-Add-a-register-class-for-w12-w15.patch
rename to 0207-Backport-SME-aarch64-Add-a-register-class-for-w12-w1.patch
index 2b9fd664e452e5d715fe30dd7f0dd22ccf80cf86..3d69ca57ae1db35f0c40cadc7768076110136951 100644
--- a/SME-0104-aarch64-Add-a-register-class-for-w12-w15.patch
+++ b/0207-Backport-SME-aarch64-Add-a-register-class-for-w12-w1.patch
@@ -1,7 +1,8 @@
-From 514dc1c8c9ec22378724d5d23119edfeff37878f Mon Sep 17 00:00:00 2001
+From 9866b4c1d85d88fd9e25ff3ac5224b69d4e0f0b2 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:26 +0000
-Subject: [PATCH 104/144] aarch64: Add a register class for w12-w15
+Subject: [PATCH 108/157] [Backport][SME] aarch64: Add a register class for
+ w12-w15
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=084122adb5792a9c8e7f7876e2c1d59ba80c228b
 
@@ -21,10 +22,10 @@ gcc/
  2 files changed, 13 insertions(+), 5 deletions(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 3365d2464..0ef0887bf 100644
+index a6e996c5b..112dfeabb 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -13996,6 +13996,9 @@ aarch64_label_mentioned_p (rtx x)
+@@ -14213,6 +14213,9 @@ aarch64_label_mentioned_p (rtx x)
  enum reg_class
  aarch64_regno_regclass (unsigned regno)
  {
@@ -34,7 +35,7 @@ index 3365d2464..0ef0887bf 100644
    if (STUB_REGNUM_P (regno))
      return STUB_REGS;
  
-@@ -14360,6 +14363,7 @@ aarch64_class_max_nregs (reg_class_t regclass, machine_mode mode)
+@@ -14577,6 +14580,7 @@ aarch64_class_max_nregs (reg_class_t regclass, machine_mode mode)
    unsigned int nregs, vec_flags;
    switch (regclass)
      {
@@ -42,7 +43,7 @@ index 3365d2464..0ef0887bf 100644
      case STUB_REGS:
      case TAILCALL_ADDR_REGS:
      case POINTER_REGS:
-@@ -16694,13 +16698,11 @@ aarch64_register_move_cost (machine_mode mode,
+@@ -16926,13 +16930,11 @@ aarch64_register_move_cost (machine_mode mode,
    const struct cpu_regmove_cost *regmove_cost
      = aarch64_tune_params.regmove_cost;
  
@@ -98,5 +99,5 @@ index 89d30b9bf..8b21faf34 100644
    { 0x3ffcffff, 0x00000000, 0x00000000 },	/* STUB_REGS */		\
    { 0x7fffffff, 0x00000000, 0x00000003 },	/* GENERAL_REGS */	\
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0105-aarch64-Add-a-VNx1TI-mode.patch b/0208-Backport-SME-aarch64-Add-a-VNx1TI-mode.patch
similarity index 95%
rename from SME-0105-aarch64-Add-a-VNx1TI-mode.patch
rename to 0208-Backport-SME-aarch64-Add-a-VNx1TI-mode.patch
index 7b9bd2e20535bb32cc76b4d5cf334ea75bbcd3da..eefe7d8cfedcbe31b27450813201046c9a134bc4 100644
--- a/SME-0105-aarch64-Add-a-VNx1TI-mode.patch
+++ b/0208-Backport-SME-aarch64-Add-a-VNx1TI-mode.patch
@@ -1,7 +1,7 @@
-From 5f2d6cb21ff04d65e3ac26aa73f3a2404185234d Mon Sep 17 00:00:00 2001
+From 8310c0df319a86bc2f63b8d3198dd1c394827bac Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:27 +0000
-Subject: [PATCH 105/144] aarch64: Add a VNx1TI mode
+Subject: [PATCH 109/157] [Backport][SME] aarch64: Add a VNx1TI mode
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=80fc055cf00fee4b1f9f19f77c8880b12226e086
 
@@ -68,5 +68,5 @@ index 8f399225a..8fa66fdb3 100644
  /* Partial SVE vectors:
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0106-aarch64-Generalise-unspec_based_function_base.patch b/0209-Backport-SME-aarch64-Generalise-unspec_based_functio.patch
similarity index 96%
rename from SME-0106-aarch64-Generalise-unspec_based_function_base.patch
rename to 0209-Backport-SME-aarch64-Generalise-unspec_based_functio.patch
index 92f759cc0e85e448e9a62e5e339fa272d38b3561..1c2ac4e21c40ee5723e31af0f4f208b55986d2fd 100644
--- a/SME-0106-aarch64-Generalise-unspec_based_function_base.patch
+++ b/0209-Backport-SME-aarch64-Generalise-unspec_based_functio.patch
@@ -1,7 +1,8 @@
-From 5afacf56fd7d19f7be83ada5a0ccb6c58182626d Mon Sep 17 00:00:00 2001
+From e3c0d3d98ab1f60900533f3f75c598f899f37c9f Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:27 +0000
-Subject: [PATCH 106/144] aarch64: Generalise unspec_based_function_base
+Subject: [PATCH 110/157] [Backport][SME] aarch64: Generalise
+ unspec_based_function_base
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=1ec23d5a29bc5d89cef60e2aba2fe4095ee12a8f
 
@@ -113,5 +114,5 @@ index 94a6d1207..f5fa4030c 100644
    }
  };
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0107-aarch64-Generalise-_m-rules-for-SVE-intrinsics.patch b/0210-Backport-SME-aarch64-Generalise-_m-rules-for-SVE-int.patch
similarity index 96%
rename from SME-0107-aarch64-Generalise-_m-rules-for-SVE-intrinsics.patch
rename to 0210-Backport-SME-aarch64-Generalise-_m-rules-for-SVE-int.patch
index 2a90ec6f8d6a748e1b03bede90503ee0d45d58e4..75de62ed56118931c1642c62aa70142cfd9b708e 100644
--- a/SME-0107-aarch64-Generalise-_m-rules-for-SVE-intrinsics.patch
+++ b/0210-Backport-SME-aarch64-Generalise-_m-rules-for-SVE-int.patch
@@ -1,7 +1,8 @@
-From 73d024e06fda360f8ce8ac3b6a282c07302f1f83 Mon Sep 17 00:00:00 2001
+From 3d721b42c97baba562b77988cec0fec229217519 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:28 +0000
-Subject: [PATCH 107/144] aarch64: Generalise _m rules for SVE intrinsics
+Subject: [PATCH 111/157] [Backport][SME] aarch64: Generalise _m rules for SVE
+ intrinsics
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=8de9304d94d4ec42863a25c1cb1a1ba9a1e3e0fe
 
@@ -112,5 +113,5 @@ index 7132b6e77..f16ac3947 100644
  inline machine_mode
  function_expander::result_mode () const
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0108-aarch64-Add-support-for-arm_sme.h.patch b/0211-Backport-SME-aarch64-Add-support-for-arm_sme.h.patch
similarity index 99%
rename from SME-0108-aarch64-Add-support-for-arm_sme.h.patch
rename to 0211-Backport-SME-aarch64-Add-support-for-arm_sme.h.patch
index c476e0f5e7d7584428cbd51d03fea0b7099d3321..b83e594aa9a762d398693994598c4454b0d137db 100644
--- a/SME-0108-aarch64-Add-support-for-arm_sme.h.patch
+++ b/0211-Backport-SME-aarch64-Add-support-for-arm_sme.h.patch
@@ -1,7 +1,7 @@
-From 6307f55921dba21cc6e9c7671abf813d7f79dd99 Mon Sep 17 00:00:00 2001
+From 6c651a11f8e68244c4c53ad7b29983f54a3bc737 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:28 +0000
-Subject: [PATCH 108/144] aarch64: Add support for 
+Subject: [PATCH 112/157] [Backport][SME] aarch64: Add support for 
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=4f6ab9537051e156d52bd8e9df40107ba6685895
 
@@ -446,7 +446,7 @@ gcc/testsuite/
  create mode 100644 gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/write_za_m_1.c
 
 diff --git a/gcc/config.gcc b/gcc/config.gcc
-index 48273d902..94f5b9f93 100644
+index da66603cd..19b21a280 100644
 --- a/gcc/config.gcc
 +++ b/gcc/config.gcc
 @@ -325,11 +325,11 @@ m32c*-*-*)
@@ -2744,10 +2744,10 @@ index f16ac3947..6ef6bb93f 100644
     in which the displacement is measured, otherwise return UNITS_none.  */
  inline units_index
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 0ef0887bf..6e876ed68 100644
+index 112dfeabb..113784e31 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -5943,15 +5943,26 @@ aarch64_output_sve_scalar_inc_dec (rtx offset)
+@@ -6160,15 +6160,26 @@ aarch64_output_sve_scalar_inc_dec (rtx offset)
  }
  
  /* Return true if a single RDVL instruction can multiply FACTOR by the
@@ -2776,7 +2776,7 @@ index 0ef0887bf..6e876ed68 100644
  /* Return true if we can move VALUE into a register using a single
     RDVL instruction.  */
  
-@@ -5959,7 +5970,7 @@ static bool
+@@ -6176,7 +6187,7 @@ static bool
  aarch64_sve_rdvl_immediate_p (poly_int64 value)
  {
    HOST_WIDE_INT factor = value.coeffs[0];
@@ -2785,7 +2785,7 @@ index 0ef0887bf..6e876ed68 100644
  }
  
  /* Likewise for rtx X.  */
-@@ -5995,10 +6006,8 @@ aarch64_sve_addvl_addpl_immediate_p (poly_int64 value)
+@@ -6212,10 +6223,8 @@ aarch64_sve_addvl_addpl_immediate_p (poly_int64 value)
    HOST_WIDE_INT factor = value.coeffs[0];
    if (factor == 0 || value.coeffs[1] != factor)
      return false;
@@ -2798,7 +2798,7 @@ index 0ef0887bf..6e876ed68 100644
  }
  
  /* Likewise for rtx X.  */
-@@ -6098,11 +6107,11 @@ aarch64_output_sve_vector_inc_dec (const char *operands, rtx x)
+@@ -6315,11 +6324,11 @@ aarch64_output_sve_vector_inc_dec (const char *operands, rtx x)
     number of 128-bit quadwords in an SME vector.  ISA_MODE is the
     ISA mode in which the calculation is being performed.  */
  
@@ -2812,7 +2812,7 @@ index 0ef0887bf..6e876ed68 100644
    if (isa_mode & AARCH64_FL_SM_ON)
      /* We're in streaming mode, so we can use normal poly-int values.  */
      return gen_int_mode ({ factor, factor }, mode);
-@@ -6145,7 +6154,7 @@ aarch64_rdsvl_immediate_p (const_rtx x)
+@@ -6362,7 +6371,7 @@ aarch64_rdsvl_immediate_p (const_rtx x)
  {
    HOST_WIDE_INT factor;
    return (aarch64_sme_vq_unspec_p (x, &factor)
@@ -2821,7 +2821,7 @@ index 0ef0887bf..6e876ed68 100644
  }
  
  /* Return the asm string for an RDSVL instruction that calculates X,
-@@ -6162,6 +6171,38 @@ aarch64_output_rdsvl (const_rtx x)
+@@ -6379,6 +6388,38 @@ aarch64_output_rdsvl (const_rtx x)
    return buffer;
  }
  
@@ -2860,7 +2860,7 @@ index 0ef0887bf..6e876ed68 100644
  /* Multipliers for repeating bitmasks of width 32, 16, 8, 4, and 2.  */
  
  static const unsigned HOST_WIDE_INT bitmask_imm_mul[] =
-@@ -6748,7 +6789,7 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
+@@ -6965,7 +7006,7 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
  	{
  	  /* Try to use an unshifted CNT[BHWD] or RDVL.  */
  	  if (aarch64_sve_cnt_factor_p (factor)
@@ -2869,7 +2869,7 @@ index 0ef0887bf..6e876ed68 100644
  	    {
  	      val = gen_int_mode (poly_int64 (factor, factor), mode);
  	      shift = 0;
-@@ -11968,7 +12009,7 @@ aarch64_classify_index (struct aarch64_address_info *info, rtx x,
+@@ -12185,7 +12226,7 @@ aarch64_classify_index (struct aarch64_address_info *info, rtx x,
        && contains_reg_of_mode[GENERAL_REGS][GET_MODE (SUBREG_REG (index))])
      index = SUBREG_REG (index);
  
@@ -2878,7 +2878,7 @@ index 0ef0887bf..6e876ed68 100644
      {
        if (type != ADDRESS_REG_REG
  	  || (1 << shift) != GET_MODE_UNIT_SIZE (mode))
-@@ -12071,7 +12112,8 @@ aarch64_classify_address (struct aarch64_address_info *info,
+@@ -12288,7 +12329,8 @@ aarch64_classify_address (struct aarch64_address_info *info,
  			    && ((vec_flags == 0
  				 && known_lt (GET_MODE_SIZE (mode), 16))
  				|| vec_flags == VEC_ADVSIMD
@@ -2888,7 +2888,7 @@ index 0ef0887bf..6e876ed68 100644
  
    /* For SVE, only accept [Rn], [Rn, #offset, MUL VL] and [Rn, Rm, LSL #shift].
       The latter is not valid for SVE predicates, and that's rejected through
-@@ -12190,7 +12232,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
+@@ -12407,7 +12449,7 @@ aarch64_classify_address (struct aarch64_address_info *info,
  	  /* Make "m" use the LD1 offset range for SVE data modes, so
  	     that pre-RTL optimizers like ivopts will work to that
  	     instead of the wider LDR/STR range.  */
@@ -2897,7 +2897,7 @@ index 0ef0887bf..6e876ed68 100644
  	    return (type == ADDR_QUERY_M
  		    ? offset_4bit_signed_scaled_p (mode, offset)
  		    : offset_9bit_signed_scaled_p (mode, offset));
-@@ -14533,6 +14575,51 @@ aarch64_output_casesi (rtx *operands)
+@@ -14750,6 +14792,51 @@ aarch64_output_casesi (rtx *operands)
    return "";
  }
  
@@ -2949,7 +2949,7 @@ index 0ef0887bf..6e876ed68 100644
  
  /* Return size in bits of an arithmetic operand which is shifted/scaled and
     masked such that it is suitable for a UXTB, UXTH, or UXTW extend
-@@ -23524,6 +23611,31 @@ aarch64_sve_struct_memory_operand_p (rtx op)
+@@ -23756,6 +23843,31 @@ aarch64_sve_struct_memory_operand_p (rtx op)
  	  && offset_4bit_signed_scaled_p (SVE_BYTE_MODE, last));
  }
  
@@ -3029,10 +3029,10 @@ index 8b21faf34..50fdf2f50 100644
  #define TARGET_ARMV8_3	(AARCH64_ISA_V8_3A)
  
 diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
-index 7b6ac256c..770633a73 100644
+index 05a7c6675..6b4341866 100644
 --- a/gcc/config/aarch64/aarch64.md
 +++ b/gcc/config/aarch64/aarch64.md
-@@ -2145,10 +2145,10 @@
+@@ -2147,10 +2147,10 @@
  
  (define_insn "*add3_aarch64"
    [(set
@@ -3046,7 +3046,7 @@ index 7b6ac256c..770633a73 100644
    ""
    "@
    add\\t%0, %1, %2
-@@ -2157,10 +2157,11 @@
+@@ -2159,10 +2159,11 @@
    sub\\t%0, %1, #%n2
    #
    * return aarch64_output_sve_scalar_inc_dec (operands[2]);
@@ -3140,7 +3140,7 @@ index 88fb9a07c..2da423779 100644
    "@internal
     A constraint that matches a VG-based constant that can be added by
 diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
-index 1ffc49889..17e84a48f 100644
+index b616f5c9a..152d28f6b 100644
 --- a/gcc/config/aarch64/iterators.md
 +++ b/gcc/config/aarch64/iterators.md
 @@ -450,6 +450,7 @@
@@ -3233,7 +3233,7 @@ index 1ffc49889..17e84a48f 100644
  			  (VNx32QI "b") (VNx48QI "b") (VNx64QI "b")
  			  (VNx16HI "h") (VNx24HI "h") (VNx32HI "h")
  			  (VNx16HF "h") (VNx24HF "h") (VNx32HF "h")
-@@ -2051,6 +2093,7 @@
+@@ -2052,6 +2094,7 @@
  			 (VNx4SF "VNx4BI") (VNx2SF "VNx2BI")
  			 (VNx2DI "VNx2BI")
  			 (VNx2DF "VNx2BI")
@@ -3241,7 +3241,7 @@ index 1ffc49889..17e84a48f 100644
  			 (VNx32QI "VNx16BI")
  			 (VNx16HI "VNx8BI") (VNx16HF "VNx8BI")
  			 (VNx16BF "VNx8BI")
-@@ -2131,6 +2174,8 @@
+@@ -2132,6 +2175,8 @@
  ;; The constraint to use for an SVE FCMLA lane index.
  (define_mode_attr sve_lane_pair_con [(VNx8HF "y") (VNx4SF "x")])
  
@@ -3250,7 +3250,7 @@ index 1ffc49889..17e84a48f 100644
  ;; -------------------------------------------------------------------
  ;; Code Iterators
  ;; -------------------------------------------------------------------
-@@ -3158,6 +3203,20 @@
+@@ -3159,6 +3204,20 @@
  (define_int_iterator FCMUL_OP [UNSPEC_FCMUL
  			       UNSPEC_FCMUL_CONJ])
  
@@ -3271,7 +3271,7 @@ index 1ffc49889..17e84a48f 100644
  ;; Iterators for atomic operations.
  
  (define_int_iterator ATOMIC_LDOP
-@@ -3230,6 +3289,26 @@
+@@ -3231,6 +3290,26 @@
  			(UNSPEC_PMULLT "pmullt")
  			(UNSPEC_PMULLT_PAIR "pmullt_pair")
  			(UNSPEC_SMATMUL "smatmul")
@@ -3298,7 +3298,7 @@ index 1ffc49889..17e84a48f 100644
  			(UNSPEC_SQCADD90 "sqcadd90")
  			(UNSPEC_SQCADD270 "sqcadd270")
  			(UNSPEC_SQRDCMLAH "sqrdcmlah")
-@@ -3999,6 +4078,15 @@
+@@ -4000,6 +4079,15 @@
  (define_int_attr unspec [(UNSPEC_WHILERW "UNSPEC_WHILERW")
  			 (UNSPEC_WHILEWR "UNSPEC_WHILEWR")])
  
@@ -3315,10 +3315,10 @@ index 1ffc49889..17e84a48f 100644
  
  (define_int_iterator GET_FPSCR
 diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md
-index c308015ac..9e4a70ad9 100644
+index 1b8496c07..3ec9e9103 100644
 --- a/gcc/config/aarch64/predicates.md
 +++ b/gcc/config/aarch64/predicates.md
-@@ -168,11 +168,17 @@
+@@ -212,11 +212,17 @@
    (and (match_code "const_poly_int")
         (match_test "aarch64_add_offset_temporaries (op) == 1")))
  
@@ -3380,10 +3380,10 @@ index 49731ba92..be60cc003 100644
  	$(srcdir)/config/aarch64/iterators.md
  	$(SHELL) $(srcdir)/config/aarch64/geniterators.sh \
 diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
-index 3a65affce..9f189b3e6 100644
+index 2420b05d9..47fff9c90 100644
 --- a/gcc/doc/invoke.texi
 +++ b/gcc/doc/invoke.texi
-@@ -19476,6 +19476,10 @@ Enable the Flag Manipulation instructions Extension.
+@@ -19480,6 +19480,10 @@ Enable the Flag Manipulation instructions Extension.
  Enable the Pointer Authentication Extension.
  @item sme
  Enable the Scalable Matrix Extension.
@@ -15937,10 +15937,10 @@ index 000000000..af79c406b
 +  svwrite_ver_za8_m (0, 0, pg, s8); /* { dg-error {ACLE function 'svwrite_ver_za8_s8_m' can only be called when SME streaming mode is enabled} } */
 +}
 diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
-index 6016a0052..95f1f9e13 100644
+index e2a9ef5fa..292737dae 100644
 --- a/gcc/testsuite/lib/target-supports.exp
 +++ b/gcc/testsuite/lib/target-supports.exp
-@@ -10609,7 +10609,8 @@ proc check_effective_target_aarch64_tiny { } {
+@@ -10622,7 +10622,8 @@ proc check_effective_target_aarch64_tiny { } {
  # various architecture extensions via the .arch_extension pseudo-op.
  
  foreach { aarch64_ext } { "fp" "simd" "crypto" "crc" "lse" "dotprod" "sve"
@@ -15951,5 +15951,5 @@ index 6016a0052..95f1f9e13 100644
  	proc check_effective_target_aarch64_asm_FUNC_ok { } {
  	  if { [istarget aarch64*-*-*] } {
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0109-aarch64-Add-support-for-__arm_locally_streaming.patch b/0212-Backport-SME-aarch64-Add-support-for-__arm_locally_s.patch
similarity index 94%
rename from SME-0109-aarch64-Add-support-for-__arm_locally_streaming.patch
rename to 0212-Backport-SME-aarch64-Add-support-for-__arm_locally_s.patch
index 61c8b64040c3e6a9604a88543556bc3261c7bd3e..72576e309024452c0068ef15f1265a3bb92ac0e7 100644
--- a/SME-0109-aarch64-Add-support-for-__arm_locally_streaming.patch
+++ b/0212-Backport-SME-aarch64-Add-support-for-__arm_locally_s.patch
@@ -1,7 +1,8 @@
-From 38ac64682cdfc9c6a4f374c56f27c2b48a210886 Mon Sep 17 00:00:00 2001
+From 0ad41f11bea5c303ff39c54cae8e46afdfae6070 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:29 +0000
-Subject: [PATCH 109/144] aarch64: Add support for __arm_locally_streaming
+Subject: [PATCH 113/157] [Backport][SME] aarch64: Add support for
+ __arm_locally_streaming
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=3f6e5991fab507aa79121dc44d1afcd622c78744
 
@@ -74,10 +75,10 @@ index cb8a6c2fc..745719d8b 100644
  #undef DEFINE_ARM_KEYWORD_MACRO
  
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 6e876ed68..f28a45d2b 100644
+index 113784e31..4cb43c2e2 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -3066,6 +3066,7 @@ static const attribute_spec aarch64_arm_attributes[] =
+@@ -3283,6 +3283,7 @@ static const attribute_spec aarch64_arm_attributes[] =
  			  NULL, attr_streaming_exclusions },
    { "streaming_compatible", 0, 0, false, true,  true,  true,
  			  NULL, attr_streaming_exclusions },
@@ -85,7 +86,7 @@ index 6e876ed68..f28a45d2b 100644
    { "new",		  1, -1, true, false, false, false,
  			  handle_arm_new, NULL },
    { "preserves",	  1, -1, false, true,  true,  true,
-@@ -4440,6 +4441,16 @@ aarch64_fntype_isa_mode (const_tree fntype)
+@@ -4657,6 +4658,16 @@ aarch64_fntype_isa_mode (const_tree fntype)
  	  | aarch64_fntype_pstate_za (fntype));
  }
  
@@ -102,7 +103,7 @@ index 6e876ed68..f28a45d2b 100644
  /* Return the state of PSTATE.SM when compiling the body of
     function FNDECL.  This might be different from the state of
     PSTATE.SM on entry.  */
-@@ -4447,6 +4458,9 @@ aarch64_fntype_isa_mode (const_tree fntype)
+@@ -4664,6 +4675,9 @@ aarch64_fntype_isa_mode (const_tree fntype)
  static aarch64_feature_flags
  aarch64_fndecl_pstate_sm (const_tree fndecl)
  {
@@ -112,7 +113,7 @@ index 6e876ed68..f28a45d2b 100644
    return aarch64_fntype_pstate_sm (TREE_TYPE (fndecl));
  }
  
-@@ -4522,6 +4536,16 @@ aarch64_cfun_has_new_state (const char *state_name)
+@@ -4739,6 +4753,16 @@ aarch64_cfun_has_new_state (const char *state_name)
    return aarch64_fndecl_has_new_state (cfun->decl, state_name);
  }
  
@@ -129,7 +130,7 @@ index 6e876ed68..f28a45d2b 100644
  /* Return true if the current function has state STATE_NAME, either by
     creating new state itself or by sharing state with callers.  */
  
-@@ -6714,6 +6738,10 @@ aarch64_add_offset_temporaries (rtx x)
+@@ -6931,6 +6955,10 @@ aarch64_add_offset_temporaries (rtx x)
     TEMP2, if nonnull, is a second temporary register that doesn't
     overlap either DEST or REG.
  
@@ -140,7 +141,7 @@ index 6e876ed68..f28a45d2b 100644
     Since this function may be used to adjust the stack pointer, we must
     ensure that it cannot cause transient stack deallocation (for example
     by first incrementing SP and then decrementing when adjusting by a
-@@ -6722,6 +6750,7 @@ aarch64_add_offset_temporaries (rtx x)
+@@ -6939,6 +6967,7 @@ aarch64_add_offset_temporaries (rtx x)
  static void
  aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
  		    poly_int64 offset, rtx temp1, rtx temp2,
@@ -148,7 +149,7 @@ index 6e876ed68..f28a45d2b 100644
  		    bool frame_related_p, bool emit_move_imm = true)
  {
    gcc_assert (emit_move_imm || temp1 != NULL_RTX);
-@@ -6734,9 +6763,18 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
+@@ -6951,9 +6980,18 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
    /* Try using ADDVL or ADDPL to add the whole value.  */
    if (src != const0_rtx && aarch64_sve_addvl_addpl_immediate_p (offset))
      {
@@ -168,7 +169,7 @@ index 6e876ed68..f28a45d2b 100644
        return;
      }
  
-@@ -6752,11 +6790,19 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
+@@ -6969,11 +7007,19 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
    if (src != const0_rtx
        && aarch64_sve_addvl_addpl_immediate_p (poly_offset))
      {
@@ -189,7 +190,7 @@ index 6e876ed68..f28a45d2b 100644
  	  src = dest;
  	}
        else
-@@ -6787,9 +6833,19 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
+@@ -7004,9 +7050,19 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
        rtx val;
        if (IN_RANGE (rel_factor, -32, 31))
  	{
@@ -211,7 +212,7 @@ index 6e876ed68..f28a45d2b 100644
  	    {
  	      val = gen_int_mode (poly_int64 (factor, factor), mode);
  	      shift = 0;
-@@ -6819,11 +6875,18 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
+@@ -7036,11 +7092,18 @@ aarch64_add_offset (scalar_int_mode mode, rtx dest, rtx src,
  	     a shift and add sequence for the multiplication.
  	     If CNTB << SHIFT is out of range, stick with the current
  	     shift factor.  */
@@ -231,7 +232,7 @@ index 6e876ed68..f28a45d2b 100644
  	  else
  	    val = gen_int_mode (BYTES_PER_SVE_VECTOR, mode);
  
-@@ -6911,30 +6974,34 @@ aarch64_split_add_offset (scalar_int_mode mode, rtx dest, rtx src,
+@@ -7128,30 +7191,34 @@ aarch64_split_add_offset (scalar_int_mode mode, rtx dest, rtx src,
  			  rtx offset_rtx, rtx temp1, rtx temp2)
  {
    aarch64_add_offset (mode, dest, src, rtx_to_poly_int64 (offset_rtx),
@@ -276,7 +277,7 @@ index 6e876ed68..f28a45d2b 100644
  }
  
  /* A streaming-compatible function needs to switch temporarily to the known
-@@ -7959,11 +8026,11 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
+@@ -8176,11 +8243,11 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
  		{
  		  base = aarch64_force_temporary (int_mode, dest, base);
  		  aarch64_add_offset (int_mode, dest, base, offset,
@@ -290,7 +291,7 @@ index 6e876ed68..f28a45d2b 100644
  	    }
  	  return;
  	}
-@@ -7990,7 +8057,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
+@@ -8207,7 +8274,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
  	      gcc_assert (can_create_pseudo_p ());
  	      base = aarch64_force_temporary (int_mode, dest, base);
  	      aarch64_add_offset (int_mode, dest, base, const_offset,
@@ -299,7 +300,7 @@ index 6e876ed68..f28a45d2b 100644
  	      return;
  	    }
  
-@@ -8030,7 +8097,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
+@@ -8247,7 +8314,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
  	      gcc_assert(can_create_pseudo_p ());
  	      base = aarch64_force_temporary (int_mode, dest, base);
  	      aarch64_add_offset (int_mode, dest, base, const_offset,
@@ -308,7 +309,7 @@ index 6e876ed68..f28a45d2b 100644
  	      return;
  	    }
  	  /* FALLTHRU */
-@@ -9538,6 +9605,9 @@ aarch64_need_old_pstate_sm ()
+@@ -9755,6 +9822,9 @@ aarch64_need_old_pstate_sm ()
    if (aarch64_cfun_incoming_pstate_sm () != 0)
      return false;
  
@@ -318,7 +319,7 @@ index 6e876ed68..f28a45d2b 100644
    if (cfun->machine->call_switches_pstate_sm)
      for (auto insn = get_insns (); insn; insn = NEXT_INSN (insn))
        if (auto *call = dyn_cast (insn))
-@@ -9564,6 +9634,7 @@ aarch64_layout_frame (void)
+@@ -9781,6 +9851,7 @@ aarch64_layout_frame (void)
    bool frame_related_fp_reg_p = false;
    aarch64_frame &frame = cfun->machine->frame;
    poly_int64 top_of_locals = -1;
@@ -326,7 +327,7 @@ index 6e876ed68..f28a45d2b 100644
  
    vec_safe_truncate (frame.saved_gprs, 0);
    vec_safe_truncate (frame.saved_fprs, 0);
-@@ -9601,7 +9672,7 @@ aarch64_layout_frame (void)
+@@ -9818,7 +9889,7 @@ aarch64_layout_frame (void)
        frame.reg_offset[regno] = SLOT_REQUIRED;
  
    for (regno = V0_REGNUM; regno <= V31_REGNUM; regno++)
@@ -335,7 +336,7 @@ index 6e876ed68..f28a45d2b 100644
  	&& !fixed_regs[regno]
  	&& !crtl->abi->clobbers_full_reg_p (regno))
        {
-@@ -9630,7 +9701,7 @@ aarch64_layout_frame (void)
+@@ -9847,7 +9918,7 @@ aarch64_layout_frame (void)
      }
  
    for (regno = P0_REGNUM; regno <= P15_REGNUM; regno++)
@@ -344,7 +345,7 @@ index 6e876ed68..f28a45d2b 100644
  	&& !fixed_regs[regno]
  	&& !crtl->abi->clobbers_full_reg_p (regno))
        frame.reg_offset[regno] = SLOT_REQUIRED;
-@@ -9747,7 +9818,8 @@ aarch64_layout_frame (void)
+@@ -9964,7 +10035,8 @@ aarch64_layout_frame (void)
    /* If the current function changes the SVE vector length, ensure that the
       old value of the DWARF VG register is saved and available in the CFI,
       so that outer frames with VL-sized offsets can be processed correctly.  */
@@ -354,7 +355,7 @@ index 6e876ed68..f28a45d2b 100644
      {
        frame.reg_offset[VG_REGNUM] = offset;
        offset += UNITS_PER_WORD;
-@@ -10532,9 +10604,16 @@ aarch64_get_separate_components (void)
+@@ -10749,9 +10821,16 @@ aarch64_get_separate_components (void)
    bitmap_clear (components);
  
    /* The registers we need saved to the frame.  */
@@ -371,7 +372,7 @@ index 6e876ed68..f28a45d2b 100644
  	/* Punt on saves and restores that use ST1D and LD1D.  We could
  	   try to be smarter, but it would involve making sure that the
  	   spare predicate register itself is safe to use at the save
-@@ -10853,11 +10932,16 @@ aarch64_emit_stack_tie (rtx reg)
+@@ -11070,11 +11149,16 @@ aarch64_emit_stack_tie (rtx reg)
     events, e.g. if we were to allow the stack to be dropped by more than a page
     and then have multiple probes up and we take a signal somewhere in between
     then the signal handler doesn't know the state of the stack and can make no
@@ -389,7 +390,7 @@ index 6e876ed68..f28a45d2b 100644
  					bool frame_related_p,
  					bool final_adjustment_p)
  {
-@@ -10899,7 +10983,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -11116,7 +11200,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
    if (known_lt (poly_size, min_probe_threshold)
        || !flag_stack_clash_protection)
      {
@@ -399,7 +400,7 @@ index 6e876ed68..f28a45d2b 100644
        return;
      }
  
-@@ -10916,7 +11001,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -11133,7 +11218,8 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
  
        /* First calculate the amount of bytes we're actually spilling.  */
        aarch64_add_offset (Pmode, temp1, CONST0_RTX (Pmode),
@@ -409,7 +410,7 @@ index 6e876ed68..f28a45d2b 100644
  
        rtx_insn *insn = get_last_insn ();
  
-@@ -10974,7 +11060,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -11191,7 +11277,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
      {
        for (HOST_WIDE_INT i = 0; i < rounded_size; i += guard_size)
  	{
@@ -418,7 +419,7 @@ index 6e876ed68..f28a45d2b 100644
  	  emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
  					   guard_used_by_caller));
  	  emit_insn (gen_blockage ());
-@@ -10985,7 +11071,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -11202,7 +11288,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
      {
        /* Compute the ending address.  */
        aarch64_add_offset (Pmode, temp1, stack_pointer_rtx, -rounded_size,
@@ -427,7 +428,7 @@ index 6e876ed68..f28a45d2b 100644
        rtx_insn *insn = get_last_insn ();
  
        /* For the initial allocation, we don't have a frame pointer
-@@ -11051,7 +11137,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
+@@ -11268,7 +11354,7 @@ aarch64_allocate_and_probe_stack_space (rtx temp1, rtx temp2,
        if (final_adjustment_p && rounded_size != 0)
  	min_probe_threshold = 0;
  
@@ -436,7 +437,7 @@ index 6e876ed68..f28a45d2b 100644
        if (residual >= min_probe_threshold)
  	{
  	  if (dump_file)
-@@ -11116,6 +11202,14 @@ aarch64_epilogue_uses (int regno)
+@@ -11333,6 +11419,14 @@ aarch64_epilogue_uses (int regno)
    return 0;
  }
  
@@ -451,7 +452,7 @@ index 6e876ed68..f28a45d2b 100644
  /* The current function's frame has a save slot for the incoming state
     of SVCR.  Return a legitimate memory for the slot, based on the hard
     frame pointer.  */
-@@ -11252,6 +11346,9 @@ aarch64_expand_prologue (void)
+@@ -11469,6 +11563,9 @@ aarch64_expand_prologue (void)
    unsigned reg2 = frame.wb_push_candidate2;
    bool emit_frame_chain = frame.emit_frame_chain;
    rtx_insn *insn;
@@ -461,7 +462,7 @@ index 6e876ed68..f28a45d2b 100644
  
    if (flag_stack_clash_protection && known_eq (callee_adjust, 0))
      {
-@@ -11313,7 +11410,7 @@ aarch64_expand_prologue (void)
+@@ -11530,7 +11627,7 @@ aarch64_expand_prologue (void)
       less the amount of the guard reserved for use by the caller's
       outgoing args.  */
    aarch64_allocate_and_probe_stack_space (tmp0_rtx, tmp1_rtx, initial_adjust,
@@ -470,7 +471,7 @@ index 6e876ed68..f28a45d2b 100644
  
    if (callee_adjust != 0)
      aarch64_push_regs (reg1, reg2, callee_adjust);
-@@ -11336,7 +11433,8 @@ aarch64_expand_prologue (void)
+@@ -11553,7 +11650,8 @@ aarch64_expand_prologue (void)
  	gcc_assert (known_eq (chain_offset, 0));
        aarch64_add_offset (Pmode, hard_frame_pointer_rtx,
  			  stack_pointer_rtx, chain_offset,
@@ -480,7 +481,7 @@ index 6e876ed68..f28a45d2b 100644
        if (frame_pointer_needed && !frame_size.is_constant ())
  	{
  	  /* Variable-sized frames need to describe the save slot
-@@ -11383,6 +11481,7 @@ aarch64_expand_prologue (void)
+@@ -11600,6 +11698,7 @@ aarch64_expand_prologue (void)
  		  || known_eq (initial_adjust, 0));
        aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx,
  					      sve_callee_adjust,
@@ -488,7 +489,7 @@ index 6e876ed68..f28a45d2b 100644
  					      !frame_pointer_needed, false);
        bytes_below_sp -= sve_callee_adjust;
      }
-@@ -11395,12 +11494,15 @@ aarch64_expand_prologue (void)
+@@ -11612,12 +11711,15 @@ aarch64_expand_prologue (void)
       that is assumed by the called.  */
    gcc_assert (known_eq (bytes_below_sp, final_adjust));
    aarch64_allocate_and_probe_stack_space (tmp1_rtx, tmp0_rtx, final_adjust,
@@ -506,7 +507,7 @@ index 6e876ed68..f28a45d2b 100644
      {
        rtx mem = aarch64_old_svcr_mem ();
        MEM_VOLATILE_P (mem) = 1;
-@@ -11432,6 +11534,34 @@ aarch64_expand_prologue (void)
+@@ -11649,6 +11751,34 @@ aarch64_expand_prologue (void)
  	    emit_move_insn (gen_rtx_REG (DImode, R1_REGNUM), old_r1);
  	}
      }
@@ -541,7 +542,7 @@ index 6e876ed68..f28a45d2b 100644
  }
  
  /* Return TRUE if we can use a simple_return insn.
-@@ -11478,6 +11608,9 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -11695,6 +11825,9 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
    HOST_WIDE_INT guard_size
      = 1 << param_stack_clash_protection_guard_size;
    HOST_WIDE_INT guard_used_by_caller = STACK_CLASH_CALLER_GUARD;
@@ -551,7 +552,7 @@ index 6e876ed68..f28a45d2b 100644
  
    /* We can re-use the registers when:
  
-@@ -11502,6 +11635,24 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -11719,6 +11852,24 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
      = maybe_ne (get_frame_size ()
  		+ frame.saved_varargs_size, 0);
  
@@ -576,7 +577,7 @@ index 6e876ed68..f28a45d2b 100644
    /* Emit a barrier to prevent loads from a deallocated stack.  */
    if (maybe_gt (final_adjust, crtl->outgoing_args_size)
        || cfun->calls_alloca
-@@ -11522,19 +11673,21 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -11739,19 +11890,21 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
      aarch64_add_offset (Pmode, stack_pointer_rtx,
  			hard_frame_pointer_rtx,
  			-bytes_below_hard_fp + final_adjust,
@@ -601,7 +602,7 @@ index 6e876ed68..f28a45d2b 100644
  
    /* When shadow call stack is enabled, the scs_pop in the epilogue will
       restore x30, we don't need to restore x30 again in the traditional
-@@ -11564,7 +11717,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -11781,7 +11934,7 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
  
    /* Liveness of EP0_REGNUM can not be trusted across function calls either, so
       add restriction on emit_move optimization to leaf functions.  */
@@ -610,7 +611,7 @@ index 6e876ed68..f28a45d2b 100644
  		  (!can_inherit_p || !crtl->is_leaf
  		   || df_regs_ever_live_p (EP0_REGNUM)));
  
-@@ -11697,7 +11850,8 @@ aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
+@@ -11914,7 +12067,8 @@ aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
    temp1 = gen_rtx_REG (Pmode, EP1_REGNUM);
  
    if (vcall_offset == 0)
@@ -620,7 +621,7 @@ index 6e876ed68..f28a45d2b 100644
    else
      {
        gcc_assert ((vcall_offset & (POINTER_BYTES - 1)) == 0);
-@@ -11710,7 +11864,7 @@ aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
+@@ -11927,7 +12081,7 @@ aarch64_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
  				       plus_constant (Pmode, this_rtx, delta));
  	  else
  	    aarch64_add_offset (Pmode, this_rtx, this_rtx, delta,
@@ -629,7 +630,7 @@ index 6e876ed68..f28a45d2b 100644
  	}
  
        if (Pmode == ptr_mode)
-@@ -30639,6 +30793,9 @@ aarch64_libgcc_floating_mode_supported_p
+@@ -30962,6 +31116,9 @@ aarch64_libgcc_floating_mode_supported_p
  #undef TARGET_EXTRA_LIVE_ON_ENTRY
  #define TARGET_EXTRA_LIVE_ON_ENTRY aarch64_extra_live_on_entry
  
@@ -1743,5 +1744,5 @@ index 000000000..42adeb152
 +  asm ("");
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0110-aarch64-Handle-PSTATE.SM-across-abnormal-edges.patch b/0213-Backport-SME-aarch64-Handle-PSTATE.SM-across-abnorma.patch
similarity index 97%
rename from SME-0110-aarch64-Handle-PSTATE.SM-across-abnormal-edges.patch
rename to 0213-Backport-SME-aarch64-Handle-PSTATE.SM-across-abnorma.patch
index 1609edc7f90d880b33f49790b8166c00d96e7219..f120a98b7c6895e6566cfe9f3a12bf25bbe7d42d 100644
--- a/SME-0110-aarch64-Handle-PSTATE.SM-across-abnormal-edges.patch
+++ b/0213-Backport-SME-aarch64-Handle-PSTATE.SM-across-abnorma.patch
@@ -1,7 +1,8 @@
-From 3af169eb331d5cac307879897c7983f503b1eea1 Mon Sep 17 00:00:00 2001
+From ef9c800309fa326ca56dd9d9affd7d5498624bb8 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:29 +0000
-Subject: [PATCH 110/144] aarch64: Handle PSTATE.SM across abnormal edges
+Subject: [PATCH 114/157] [Backport][SME] aarch64: Handle PSTATE.SM across
+ abnormal edges
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=275706fc59b5fdcc26d46d9f19951fc86b40c515
 
@@ -57,7 +58,7 @@ gcc/testsuite/
  create mode 100644 gcc/testsuite/gcc.target/aarch64/sme/nonlocal_goto_7.c
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index f28a45d2b..f57c8bff3 100644
+index 4cb43c2e2..effb567c2 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
 @@ -82,6 +82,7 @@
@@ -68,7 +69,7 @@ index f28a45d2b..f57c8bff3 100644
  #include "tree-pass.h"
  #include "cfgbuild.h"
  
-@@ -7078,6 +7079,8 @@ public:
+@@ -7295,6 +7296,8 @@ public:
    void add_reg (machine_mode, unsigned int);
    void add_call_args (rtx_call_insn *);
    void add_call_result (rtx_call_insn *);
@@ -77,7 +78,7 @@ index f28a45d2b..f57c8bff3 100644
  
    void emit_prologue ();
    void emit_epilogue ();
-@@ -7210,6 +7213,46 @@ aarch64_sme_mode_switch_regs::add_call_result (rtx_call_insn *call_insn)
+@@ -7427,6 +7430,46 @@ aarch64_sme_mode_switch_regs::add_call_result (rtx_call_insn *call_insn)
      add_reg (GET_MODE (dest), REGNO (dest));
  }
  
@@ -124,7 +125,7 @@ index f28a45d2b..f57c8bff3 100644
  /* Emit code to save registers before the mode switch.  */
  
  void
-@@ -9608,6 +9651,23 @@ aarch64_need_old_pstate_sm ()
+@@ -9825,6 +9868,23 @@ aarch64_need_old_pstate_sm ()
    if (aarch64_cfun_enables_pstate_sm ())
      return true;
  
@@ -148,7 +149,7 @@ index f28a45d2b..f57c8bff3 100644
    if (cfun->machine->call_switches_pstate_sm)
      for (auto insn = get_insns (); insn; insn = NEXT_INSN (insn))
        if (auto *call = dyn_cast (insn))
-@@ -29972,6 +30032,59 @@ aarch64_md_asm_adjust (vec &outputs, vec &inputs,
+@@ -30209,6 +30269,59 @@ aarch64_md_asm_adjust (vec &outputs, vec &inputs,
    return seq;
  }
  
@@ -208,7 +209,7 @@ index f28a45d2b..f57c8bff3 100644
  /* If CALL involves a change in PSTATE.SM, emit the instructions needed
     to switch to the new mode and the instructions needed to restore the
     original mode.  Return true if something changed.  */
-@@ -30055,9 +30168,10 @@ public:
+@@ -30292,9 +30405,10 @@ public:
  };
  
  bool
@@ -221,7 +222,7 @@ index f28a45d2b..f57c8bff3 100644
  }
  
  /* Emit any instructions needed to switch PSTATE.SM.  */
-@@ -30070,11 +30184,24 @@ pass_switch_pstate_sm::execute (function *fn)
+@@ -30307,11 +30421,24 @@ pass_switch_pstate_sm::execute (function *fn)
    bitmap_clear (blocks);
    FOR_EACH_BB_FN (bb, fn)
      {
@@ -703,5 +704,5 @@ index 000000000..9cc3ad5d2
 +// { dg-final { scan-assembler-not {\tsmstart\t} } }
 +// { dg-final { scan-assembler-not {\tsmstop\t} } }
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0111-aarch64-Enforce-inlining-restrictions-for-SME.patch b/0214-Backport-SME-aarch64-Enforce-inlining-restrictions-f.patch
similarity index 98%
rename from SME-0111-aarch64-Enforce-inlining-restrictions-for-SME.patch
rename to 0214-Backport-SME-aarch64-Enforce-inlining-restrictions-f.patch
index a6d3df2b06f5ca02ed0670324e561c9b53b3dd61..db7c5b3b0507c948153d164875faa03e0e1a29aa 100644
--- a/SME-0111-aarch64-Enforce-inlining-restrictions-for-SME.patch
+++ b/0214-Backport-SME-aarch64-Enforce-inlining-restrictions-f.patch
@@ -1,7 +1,8 @@
-From 9bb96e981f15dae914483df0fbc1c49564352ead Mon Sep 17 00:00:00 2001
+From c4578108ab766178fe7ebd51421c1ac9f317b675 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:30 +0000
-Subject: [PATCH 111/144] aarch64: Enforce inlining restrictions for SME
+Subject: [PATCH 115/157] [Backport][SME] aarch64: Enforce inlining
+ restrictions for SME
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=0e9aa05df6c643610a3821af52eda642a525a886
 
@@ -77,7 +78,7 @@ gcc/testsuite/
  create mode 100644 gcc/testsuite/gcc.target/aarch64/sme/inlining_9.c
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index f57c8bff3..6d04aee5c 100644
+index effb567c2..eab94d5c2 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
 @@ -85,6 +85,9 @@
@@ -90,7 +91,7 @@ index f57c8bff3..6d04aee5c 100644
  
  /* This file should be included last.  */
  #include "target-def.h"
-@@ -21119,6 +21122,17 @@ aarch64_option_valid_attribute_p (tree fndecl, tree, tree args, int)
+@@ -21351,6 +21354,17 @@ aarch64_option_valid_attribute_p (tree fndecl, tree, tree args, int)
    return ret;
  }
  
@@ -108,7 +109,7 @@ index f57c8bff3..6d04aee5c 100644
  /* Helper for aarch64_can_inline_p.  In the case where CALLER and CALLEE are
     tri-bool options (yes, no, don't care) and the default value is
     DEF, determine whether to reject inlining.  */
-@@ -21140,6 +21154,60 @@ aarch64_tribools_ok_for_inlining_p (int caller, int callee,
+@@ -21372,6 +21386,60 @@ aarch64_tribools_ok_for_inlining_p (int caller, int callee,
    return (callee == caller || callee == def);
  }
  
@@ -169,7 +170,7 @@ index f57c8bff3..6d04aee5c 100644
  /* Implement TARGET_CAN_INLINE_P.  Decide whether it is valid
     to inline CALLEE into CALLER based on target-specific info.
     Make sure that the caller and callee have compatible architectural
-@@ -21162,12 +21230,56 @@ aarch64_can_inline_p (tree caller, tree callee)
+@@ -21394,12 +21462,56 @@ aarch64_can_inline_p (tree caller, tree callee)
  					   : target_option_default_node);
  
    /* Callee's ISA flags should be a subset of the caller's.  */
@@ -231,7 +232,7 @@ index f57c8bff3..6d04aee5c 100644
      return false;
  
    /* Allow non-strict aligned functions inlining into strict
-@@ -30409,6 +30521,16 @@ aarch64_run_selftests (void)
+@@ -30732,6 +30844,16 @@ aarch64_get_v16qi_mode ()
  #undef TARGET_CAN_ELIMINATE
  #define TARGET_CAN_ELIMINATE aarch64_can_eliminate
  
@@ -908,5 +909,5 @@ index 000000000..91520e378
 +  call_svst1_za ();
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0112-aarch64-Update-sibcall-handling-for-SME.patch b/0215-Backport-SME-aarch64-Update-sibcall-handling-for-SME.patch
similarity index 97%
rename from SME-0112-aarch64-Update-sibcall-handling-for-SME.patch
rename to 0215-Backport-SME-aarch64-Update-sibcall-handling-for-SME.patch
index edb237345e56afc38a6a246c18f6b121464397ef..aa57f9cb419ae0b82bc39fabadde6fbb0a299ec5 100644
--- a/SME-0112-aarch64-Update-sibcall-handling-for-SME.patch
+++ b/0215-Backport-SME-aarch64-Update-sibcall-handling-for-SME.patch
@@ -1,7 +1,8 @@
-From f0ff2426c884e222c89b87fc053778d78e106085 Mon Sep 17 00:00:00 2001
+From 08b6cbe756ede25b16b8e9ff9ee32f76c4f8430f Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 10:11:30 +0000
-Subject: [PATCH 112/144] aarch64: Update sibcall handling for SME
+Subject: [PATCH 116/157] [Backport][SME] aarch64: Update sibcall handling for
+ SME
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=0e7fee57c00ae17611651e0b057dc03b6e276b82
 
@@ -48,10 +49,10 @@ gcc/testsuite/
  create mode 100644 gcc/testsuite/gcc.target/aarch64/sme/sibcall_8.c
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 6d04aee5c..5854f878e 100644
+index eab94d5c2..b8e540b6e 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -8443,6 +8443,11 @@ aarch64_function_ok_for_sibcall (tree, tree exp)
+@@ -8660,6 +8660,11 @@ aarch64_function_ok_for_sibcall (tree, tree exp)
    if (crtl->abi->id () != expr_callee_abi (exp).id ())
      return false;
  
@@ -63,7 +64,7 @@ index 6d04aee5c..5854f878e 100644
    return true;
  }
  
-@@ -11706,7 +11711,9 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
+@@ -11923,7 +11928,9 @@ aarch64_expand_epilogue (rtx_call_insn *sibcall)
  	guard_label = aarch64_guard_switch_pstate_sm (IP0_REGNUM,
  						      aarch64_isa_flags);
        aarch64_sme_mode_switch_regs return_switch;
@@ -419,5 +420,5 @@ index 000000000..33370f7a8
 +}
 +/* { dg-final { scan-assembler {\tb\tnormal_callee} } } */
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0113-libgcc-aarch64-Configure-check-for-.variant_pcs-supp.patch b/0216-Backport-SME-libgcc-aarch64-Configure-check-for-.var.patch
similarity index 94%
rename from SME-0113-libgcc-aarch64-Configure-check-for-.variant_pcs-supp.patch
rename to 0216-Backport-SME-libgcc-aarch64-Configure-check-for-.var.patch
index 3a7cf4832dbf1a750e2ceebe60b388ebad9de0f4..6e8467ea12552fc05250941afda57ed665229259 100644
--- a/SME-0113-libgcc-aarch64-Configure-check-for-.variant_pcs-supp.patch
+++ b/0216-Backport-SME-libgcc-aarch64-Configure-check-for-.var.patch
@@ -1,8 +1,8 @@
-From 57f7b2ea30835edc04634722200ba78ee468a1c5 Mon Sep 17 00:00:00 2001
+From e0da78a258a34c26488b7ae623f9ae8727c2b264 Mon Sep 17 00:00:00 2001
 From: Szabolcs Nagy 
 Date: Mon, 14 Nov 2022 17:14:18 +0000
-Subject: [PATCH 113/144] libgcc: aarch64: Configure check for .variant_pcs
- support
+Subject: [PATCH 117/157] [Backport][SME] libgcc: aarch64: Configure check for
+ .variant_pcs support
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=3ebb591c65b4fbe0cddd23ebc0ca2d9f7aef2cec
 
@@ -113,5 +113,5 @@ index 2fc9d5d7c..abc398c91 100644
  AC_CACHE_CHECK(for init priority support, libgcc_cv_init_priority, [
  AC_COMPILE_IFELSE([AC_LANG_PROGRAM(,
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0114-libgcc-aarch64-Configure-check-for-__getauxval.patch b/0217-Backport-SME-libgcc-aarch64-Configure-check-for-__ge.patch
similarity index 95%
rename from SME-0114-libgcc-aarch64-Configure-check-for-__getauxval.patch
rename to 0217-Backport-SME-libgcc-aarch64-Configure-check-for-__ge.patch
index 7fad931656bf526837d797f9a70cd6e8a9673a56..07c141792b4d27b1fd8ac9dff9d8f81a98cf3106 100644
--- a/SME-0114-libgcc-aarch64-Configure-check-for-__getauxval.patch
+++ b/0217-Backport-SME-libgcc-aarch64-Configure-check-for-__ge.patch
@@ -1,7 +1,8 @@
-From 1904a9a53f8c3500aced93fab78b9da311454143 Mon Sep 17 00:00:00 2001
+From 66d4035958e1dee2d16f9290004921674eb492b3 Mon Sep 17 00:00:00 2001
 From: Szabolcs Nagy 
 Date: Mon, 4 Dec 2023 10:52:52 +0000
-Subject: [PATCH 114/144] libgcc: aarch64: Configure check for __getauxval
+Subject: [PATCH 118/157] [Backport][SME] libgcc: aarch64: Configure check for
+ __getauxval
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=dbbfb52b0e9c66ee9d05b8fd17c4f44655e48463
 
@@ -112,5 +113,5 @@ index abc398c91..64b45ae14 100644
  AC_CACHE_CHECK(for init priority support, libgcc_cv_init_priority, [
  AC_COMPILE_IFELSE([AC_LANG_PROGRAM(,
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0115-libgcc-aarch64-Add-SME-runtime-support.patch b/0218-Backport-SME-libgcc-aarch64-Add-SME-runtime-support.patch
similarity index 99%
rename from SME-0115-libgcc-aarch64-Add-SME-runtime-support.patch
rename to 0218-Backport-SME-libgcc-aarch64-Add-SME-runtime-support.patch
index 4af4cb0a7f6c621bfd3f5acd892e9c8fa99fb0d0..30b85d99c35d7f4c719ca2fc08d05d9c690c388c 100644
--- a/SME-0115-libgcc-aarch64-Add-SME-runtime-support.patch
+++ b/0218-Backport-SME-libgcc-aarch64-Add-SME-runtime-support.patch
@@ -1,7 +1,8 @@
-From c8c7d460ecc23bf1bcdc61b606f444c28ca9aa93 Mon Sep 17 00:00:00 2001
+From 1e111ac2d71c5469dc526559de009542acaeb16f Mon Sep 17 00:00:00 2001
 From: Szabolcs Nagy 
 Date: Tue, 15 Nov 2022 14:08:55 +0000
-Subject: [PATCH 115/144] libgcc: aarch64: Add SME runtime support
+Subject: [PATCH 119/157] [Backport][SME] libgcc: aarch64: Add SME runtime
+ support
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=328c17af772207cb03740809c05ba2c3abfb86be
 
@@ -622,5 +623,5 @@ index 8ca803bd3..5a8feb184 100644
 +
 +SHLIB_MAPFILES += $(srcdir)/config/aarch64/libgcc-sme.ver
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0116-libgcc-aarch64-Add-SME-unwinder-support.patch b/0219-Backport-SME-libgcc-aarch64-Add-SME-unwinder-support.patch
similarity index 93%
rename from SME-0116-libgcc-aarch64-Add-SME-unwinder-support.patch
rename to 0219-Backport-SME-libgcc-aarch64-Add-SME-unwinder-support.patch
index f3c62742d63c4bec4b3ad0d54ee6fff83494cb68..91c5d4bec31f0d2ab12ac97d5b8ce07c8c25f927 100644
--- a/SME-0116-libgcc-aarch64-Add-SME-unwinder-support.patch
+++ b/0219-Backport-SME-libgcc-aarch64-Add-SME-unwinder-support.patch
@@ -1,7 +1,8 @@
-From 22a0477290415babe08a3a70e125aa37418a2da2 Mon Sep 17 00:00:00 2001
+From 310c8b5aaedad1430146fed9d8992201278164a6 Mon Sep 17 00:00:00 2001
 From: Szabolcs Nagy 
 Date: Fri, 29 Sep 2023 13:55:51 +0100
-Subject: [PATCH 116/144] libgcc: aarch64: Add SME unwinder support
+Subject: [PATCH 120/157] [Backport][SME] libgcc: aarch64: Add SME unwinder
+ support
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=91d68665b8b7a5dffd0bbf8cd1f74c3c41d4c2d8
 
@@ -65,5 +66,5 @@ index 40b22d3c2..bfa695dcb 100644
 +
  #endif /* defined AARCH64_UNWIND_H && defined __ILP32__ */
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0117-libgcc-Fix-config.in.patch b/0220-Backport-SME-libgcc-Fix-config.in.patch
similarity index 90%
rename from SME-0117-libgcc-Fix-config.in.patch
rename to 0220-Backport-SME-libgcc-Fix-config.in.patch
index e5224a9e4e8f37c2cce244420125b9cfd8e4a260..58b57e1a09833f6ed6d4f828a50e7d754ed3bd0b 100644
--- a/SME-0117-libgcc-Fix-config.in.patch
+++ b/0220-Backport-SME-libgcc-Fix-config.in.patch
@@ -1,7 +1,7 @@
-From 85b1fc6f6b21ddce73f7277a6e9141aea9e79e2a Mon Sep 17 00:00:00 2001
+From b20b75158d1230a8b6cbabb36e3b128cbd9ec86f Mon Sep 17 00:00:00 2001
 From: Szabolcs Nagy 
 Date: Fri, 8 Dec 2023 12:22:54 +0000
-Subject: [PATCH 117/144] libgcc: Fix config.in
+Subject: [PATCH 121/157] [Backport][SME] libgcc: Fix config.in
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=47575ec9edcd3078f066aa54ba428420be796bef
 
@@ -47,5 +47,5 @@ index 441d4d39b..8f7dd437b 100644
  #undef PACKAGE_BUGREPORT
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0118-aarch64-Add-funwind-tables-to-some-tests.patch b/0221-Backport-SME-aarch64-Add-funwind-tables-to-some-test.patch
similarity index 93%
rename from SME-0118-aarch64-Add-funwind-tables-to-some-tests.patch
rename to 0221-Backport-SME-aarch64-Add-funwind-tables-to-some-test.patch
index 3088d40885049a0c8824b34bc016af2cce96e7c1..2bd32688a7022404a04d985b5ef0ad6c971ccdb5 100644
--- a/SME-0118-aarch64-Add-funwind-tables-to-some-tests.patch
+++ b/0221-Backport-SME-aarch64-Add-funwind-tables-to-some-test.patch
@@ -1,7 +1,8 @@
-From 2d76dc4a7ba67f3d6ca9676282880501eea3d251 Mon Sep 17 00:00:00 2001
+From 0214ca06a182481851ed90aae21f460f87d26084 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sun, 10 Dec 2023 19:46:05 +0000
-Subject: [PATCH 118/144] aarch64: Add -funwind-tables to some tests
+Subject: [PATCH 122/157] [Backport][SME] aarch64: Add -funwind-tables to some
+ tests
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=02ecdaab7a50f4505fd905effb6d238d773dc813
 
@@ -49,5 +50,5 @@ index be9b5cc04..e3d9bc274 100644
  
  #include 
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0119-aarch64-Skip-some-SME-register-save-tests-on-BE.patch b/0222-Backport-SME-aarch64-Skip-some-SME-register-save-tes.patch
similarity index 95%
rename from SME-0119-aarch64-Skip-some-SME-register-save-tests-on-BE.patch
rename to 0222-Backport-SME-aarch64-Skip-some-SME-register-save-tes.patch
index 34cb0692bb08e2a6e26b86e711f52d7bff1d532e..0b0dbd97beb6c13b7b8c5584e8bc37e35a882466 100644
--- a/SME-0119-aarch64-Skip-some-SME-register-save-tests-on-BE.patch
+++ b/0222-Backport-SME-aarch64-Skip-some-SME-register-save-tes.patch
@@ -1,7 +1,8 @@
-From f21e12507b0b1363a4dd05cb32f68b92965d4df3 Mon Sep 17 00:00:00 2001
+From cc2e901eccd40992432f74270a9ebc1b708b6eb1 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sun, 10 Dec 2023 19:46:05 +0000
-Subject: [PATCH 119/144] aarch64: Skip some SME register save tests on BE
+Subject: [PATCH 123/157] [Backport][SME] aarch64: Skip some SME register save
+ tests on BE
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=23ea0bc2cf042d74c4adfe26a57cf96b1d837a91
 
@@ -101,5 +102,5 @@ index 20ff4b87d..4bb637f47 100644
  **	cntd	x16
  **	str	x16, \[sp\]
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0120-Add-OPTIONS_H_EXTRA-to-GTFILES.patch b/0223-Backport-SME-Add-OPTIONS_H_EXTRA-to-GTFILES.patch
similarity index 81%
rename from SME-0120-Add-OPTIONS_H_EXTRA-to-GTFILES.patch
rename to 0223-Backport-SME-Add-OPTIONS_H_EXTRA-to-GTFILES.patch
index 61a2b687727204e543abfaf92290a1a7cb36ff97..edbbd5a1b7f5bc099b108c04d28732f5c7afb9d6 100644
--- a/SME-0120-Add-OPTIONS_H_EXTRA-to-GTFILES.patch
+++ b/0223-Backport-SME-Add-OPTIONS_H_EXTRA-to-GTFILES.patch
@@ -1,7 +1,7 @@
-From 34f10bf4b065ac0664fc241933f766c743718380 Mon Sep 17 00:00:00 2001
+From ab7a2c3b74c65d62d661621c56ef984cfb72f985 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Sep 2022 11:32:50 +0100
-Subject: [PATCH 120/144] Add OPTIONS_H_EXTRA to GTFILES
+Subject: [PATCH 124/157] [Backport][SME] Add OPTIONS_H_EXTRA to GTFILES
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=c1e1fa054970a30844eb94d726b4954dcb8b9063
 
@@ -21,10 +21,10 @@ gcc/
  1 file changed, 1 insertion(+)
 
 diff --git a/gcc/Makefile.in b/gcc/Makefile.in
-index 31ff95500..ab25c7d54 100644
+index 5cd838270..fcfa54697 100644
 --- a/gcc/Makefile.in
 +++ b/gcc/Makefile.in
-@@ -2647,6 +2647,7 @@ s-match: build/genmatch$(build_exeext) $(srcdir)/match.pd cfn-operators.pd
+@@ -2648,6 +2648,7 @@ s-match: build/genmatch$(build_exeext) $(srcdir)/match.pd cfn-operators.pd
  
  GTFILES = $(CPPLIB_H) $(srcdir)/input.h $(srcdir)/coretypes.h \
    $(host_xm_file_list) \
@@ -33,5 +33,5 @@ index 31ff95500..ab25c7d54 100644
    $(srcdir)/wide-int.h $(srcdir)/alias.h \
    $(srcdir)/coverage.cc  $(srcdir)/rtl.h \
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0121-aarch64-Add-V1DI-mode.patch b/0224-Backport-SME-aarch64-Add-V1DI-mode.patch
similarity index 95%
rename from SME-0121-aarch64-Add-V1DI-mode.patch
rename to 0224-Backport-SME-aarch64-Add-V1DI-mode.patch
index 5853a0462913047aae79e83cbe26adf9863b5c97..401391c6c720f668924487ec3cf1b8f2531ab335 100644
--- a/SME-0121-aarch64-Add-V1DI-mode.patch
+++ b/0224-Backport-SME-aarch64-Add-V1DI-mode.patch
@@ -1,7 +1,7 @@
-From a0010961085d8c126d823f8c8a738582066dfd94 Mon Sep 17 00:00:00 2001
+From 21f9190106f8324be42e3e8e0510467386dd68a0 Mon Sep 17 00:00:00 2001
 From: Andrew Carlotti 
 Date: Fri, 15 Jul 2022 15:25:53 +0100
-Subject: [PATCH 121/144] aarch64: Add V1DI mode
+Subject: [PATCH 125/157] [Backport][SME] aarch64: Add V1DI mode
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=5ba864c5d11a1c20891a1e054cb7814ec23de5c9
 
@@ -89,10 +89,10 @@ index 248e51e96..405455814 100644
    ENTRY (Float16x4_t, V4HF, none, 13)
    ENTRY (Float16x8_t, V8HF, none, 13)
 diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
-index 8e09e3bdc..63fbc0a80 100644
+index 62493cdfa..04592fc90 100644
 --- a/gcc/config/aarch64/aarch64-simd.md
 +++ b/gcc/config/aarch64/aarch64-simd.md
-@@ -8022,16 +8022,16 @@
+@@ -8326,16 +8326,16 @@
  })
  
  ;; Extract a single-element 64-bit vector from one half of a 128-bit vector.
@@ -117,10 +117,10 @@ index 8e09e3bdc..63fbc0a80 100644
  })
  
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 5854f878e..6d7b8fff3 100644
+index b8e540b6e..f7285555b 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -3900,7 +3900,7 @@ aarch64_classify_vector_mode (machine_mode mode)
+@@ -4117,7 +4117,7 @@ aarch64_classify_vector_mode (machine_mode mode)
      case E_V8QImode:
      case E_V4HImode:
      case E_V2SImode:
@@ -130,7 +130,7 @@ index 5854f878e..6d7b8fff3 100644
      case E_V4BFmode:
      case E_V2SFmode:
 diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
-index 17e84a48f..6d219bd1a 100644
+index 152d28f6b..94db8c53f 100644
 --- a/gcc/config/aarch64/iterators.md
 +++ b/gcc/config/aarch64/iterators.md
 @@ -138,6 +138,9 @@
@@ -173,5 +173,5 @@ index 17e84a48f..6d219bd1a 100644
  (define_mode_attr VDBL [(V8QI "V16QI") (V4HI "V8HI")
  			(V4HF "V8HF")  (V4BF "V8BF")
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0122-Allow-md-iterators-to-include-other-iterators.patch b/0225-Backport-SME-Allow-md-iterators-to-include-other-ite.patch
similarity index 97%
rename from SME-0122-Allow-md-iterators-to-include-other-iterators.patch
rename to 0225-Backport-SME-Allow-md-iterators-to-include-other-ite.patch
index 5ffde68b27c679c560306f7b71f31063189846dd..fa5b8874c081d18b5543dc129debe8b4728e7f95 100644
--- a/SME-0122-Allow-md-iterators-to-include-other-iterators.patch
+++ b/0225-Backport-SME-Allow-md-iterators-to-include-other-ite.patch
@@ -1,7 +1,8 @@
-From a45bb8051326f33719c5de67ec0b279e56c1ac97 Mon Sep 17 00:00:00 2001
+From eaea26e2218ee61a9be0e2933548c752167dcdb5 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Fri, 10 Nov 2023 15:46:21 +0000
-Subject: [PATCH 122/144] Allow md iterators to include other iterators
+Subject: [PATCH 126/157] [Backport][SME] Allow md iterators to include other
+ iterators
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=5dbaf4851bbf56b6176dca1f1e7d38a16b5b84ee
 
@@ -35,7 +36,7 @@ gcc/
  3 files changed, 46 insertions(+), 46 deletions(-)
 
 diff --git a/gcc/config/aarch64/iterators.md b/gcc/config/aarch64/iterators.md
-index 6d219bd1a..9e07ca862 100644
+index 94db8c53f..a1659dfba 100644
 --- a/gcc/config/aarch64/iterators.md
 +++ b/gcc/config/aarch64/iterators.md
 @@ -106,7 +106,7 @@
@@ -212,5 +213,5 @@ index 798d24859..cdfa9e7b8 100644
      }
    while (c != ']');
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0123-riscv-Add-support-for-strlen-inline-expansion.patch b/0226-Backport-SME-riscv-Add-support-for-strlen-inline-exp.patch
similarity index 96%
rename from SME-0123-riscv-Add-support-for-strlen-inline-expansion.patch
rename to 0226-Backport-SME-riscv-Add-support-for-strlen-inline-exp.patch
index eeae2dece4f57a2cf14f18168c220e68b06ba2e1..94d12774d644d36e2589b6a7657b643616c7ad0e 100644
--- a/SME-0123-riscv-Add-support-for-strlen-inline-expansion.patch
+++ b/0226-Backport-SME-riscv-Add-support-for-strlen-inline-exp.patch
@@ -1,7 +1,8 @@
-From a750dd0c5f49fbaaa57024d6dfea93b071b12817 Mon Sep 17 00:00:00 2001
+From 637e6469f2225b6f6f6b0c84b4e7abcd8dfd7ca4 Mon Sep 17 00:00:00 2001
 From: =?UTF-8?q?Christoph=20M=C3=BCllner?= 
 Date: Wed, 28 Sep 2022 11:19:06 +0200
-Subject: [PATCH 123/144] riscv: Add support for strlen inline expansion
+Subject: [PATCH 127/157] [Backport][SME] riscv: Add support for strlen inline
+ expansion
 MIME-Version: 1.0
 Content-Type: text/plain; charset=UTF-8
 Content-Transfer-Encoding: 8bit
@@ -137,5 +138,5 @@ index 488016bb4..a0db225cb 100644
  extern rtx_code_label *emit_label (rtx);
  extern rtx_jump_table_data *emit_jump_table_data (rtx);
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0124-attribs-Add-overloads-with-namespace-name.patch b/0227-Backport-SME-attribs-Add-overloads-with-namespace-na.patch
similarity index 97%
rename from SME-0124-attribs-Add-overloads-with-namespace-name.patch
rename to 0227-Backport-SME-attribs-Add-overloads-with-namespace-na.patch
index 401489c8b93b944b441ce839848c9d14596a4aab..bdb59663d410cc39e65403ec1819c0982d522808 100644
--- a/SME-0124-attribs-Add-overloads-with-namespace-name.patch
+++ b/0227-Backport-SME-attribs-Add-overloads-with-namespace-na.patch
@@ -1,7 +1,8 @@
-From 6ec98ae66ad553db18f31d07726e1cb2b3b87c2d Mon Sep 17 00:00:00 2001
+From 8c6ffb4c6f86231eee318ceeb8546a53037edfe9 Mon Sep 17 00:00:00 2001
 From: Jakub Jelinek 
 Date: Tue, 4 Oct 2022 23:13:15 +0200
-Subject: [PATCH 124/144] attribs: Add overloads with namespace name
+Subject: [PATCH 128/157] [Backport][SME] attribs: Add overloads with namespace
+ name
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=0764dc8537a4f87089ecd32391cb5f8803b43c96
 
@@ -184,5 +185,5 @@ index 0856f98fb..9ad530fcb 100644
     return a pointer to the attribute's list first element if the attribute
     starts with ATTR_NAME.  ATTR_NAME must be in the form 'text' (not
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0125-vec-Add-array_slice-constructors-from-non-const-and-.patch b/0228-Backport-SME-vec-Add-array_slice-constructors-from-n.patch
similarity index 88%
rename from SME-0125-vec-Add-array_slice-constructors-from-non-const-and-.patch
rename to 0228-Backport-SME-vec-Add-array_slice-constructors-from-n.patch
index 53695898a1945aa5adc497d08c4e67d79ab4c69d..625c065a18242d2e3309e5f18926f4aa7fb1cab6 100644
--- a/SME-0125-vec-Add-array_slice-constructors-from-non-const-and-.patch
+++ b/0228-Backport-SME-vec-Add-array_slice-constructors-from-n.patch
@@ -1,8 +1,8 @@
-From 95781ae75523b7c2c15bfe83139d0a954c7065da Mon Sep 17 00:00:00 2001
+From 044dc671f7eb723df5b6ce2364d6ae579c0cc984 Mon Sep 17 00:00:00 2001
 From: Martin Jambor 
 Date: Tue, 30 Aug 2022 18:50:35 +0200
-Subject: [PATCH 125/144] vec: Add array_slice constructors from non-const and
- gc vectors
+Subject: [PATCH 129/157] [Backport][SME] vec: Add array_slice constructors
+ from non-const and gc vectors
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=15433c214df295f2281a90fcf283355b21beca0e
 
@@ -43,5 +43,5 @@ index 3ba7ea7ed..fc3b10c85 100644
    iterator end () { return m_base + m_size; }
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0126-A-couple-of-va_gc_atomic-tweaks.patch b/0229-Backport-SME-A-couple-of-va_gc_atomic-tweaks.patch
similarity index 96%
rename from SME-0126-A-couple-of-va_gc_atomic-tweaks.patch
rename to 0229-Backport-SME-A-couple-of-va_gc_atomic-tweaks.patch
index f91522703c7d13f5d7999afa17202501871dd2e6..0fd733c2a7d1742f64dc4281cde87e9903612a98 100644
--- a/SME-0126-A-couple-of-va_gc_atomic-tweaks.patch
+++ b/0229-Backport-SME-A-couple-of-va_gc_atomic-tweaks.patch
@@ -1,7 +1,7 @@
-From 8b23f93ad1be508d59b701dc56b109712948b945 Mon Sep 17 00:00:00 2001
+From 12dd36f06e13ee9cd684c00732caa684f49b3610 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 29 Jun 2023 08:48:17 +0100
-Subject: [PATCH 126/144] A couple of va_gc_atomic tweaks
+Subject: [PATCH 130/157] [Backport][SME] A couple of va_gc_atomic tweaks
 MIME-Version: 1.0
 Content-Type: text/plain; charset=UTF-8
 Content-Transfer-Encoding: 8bit
@@ -136,5 +136,5 @@ index fc3b10c85..592d3f7e0 100644
  
    iterator begin () { return m_base; }
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0127-middle-end-Fix-issue-of-poly_uint16-1-1-in-self-test.patch b/0230-Backport-SME-middle-end-Fix-issue-of-poly_uint16-1-1.patch
similarity index 85%
rename from SME-0127-middle-end-Fix-issue-of-poly_uint16-1-1-in-self-test.patch
rename to 0230-Backport-SME-middle-end-Fix-issue-of-poly_uint16-1-1.patch
index 069d376966d1a090515356cb6ea5652c102a4e5c..588ea4098694979fe377ea922fbbcd52ab34335f 100644
--- a/SME-0127-middle-end-Fix-issue-of-poly_uint16-1-1-in-self-test.patch
+++ b/0230-Backport-SME-middle-end-Fix-issue-of-poly_uint16-1-1.patch
@@ -1,8 +1,8 @@
-From afa0b869a89ad797dd9b1314ebf0197210091fbb Mon Sep 17 00:00:00 2001
+From bb15d4c4476e3ba303c5afe0adae0d86ab5f0a9b Mon Sep 17 00:00:00 2001
 From: zhongjuzhe 
 Date: Mon, 22 Aug 2022 10:15:31 +0100
-Subject: [PATCH 127/144] middle-end: Fix issue of poly_uint16 (1, 1) in self
- test
+Subject: [PATCH 131/157] [Backport][SME] middle-end: Fix issue of poly_uint16
+ (1, 1) in self test
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=21e7d87a901d45f0cb5e5510d22bfbdb0d0ac6a1
 
@@ -30,5 +30,5 @@ index e152918b0..fc79a2e2e 100644
  
    test_vector_subregs_modes (x);
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0128-Add-missing-header-file-in-aarch64.cc.patch b/0231-SME-Add-missing-header-file-in-aarch64.cc.patch
similarity index 73%
rename from SME-0128-Add-missing-header-file-in-aarch64.cc.patch
rename to 0231-SME-Add-missing-header-file-in-aarch64.cc.patch
index 7b8f2a163b08d2e1ce402dfbba856750894eb1d0..6aa3d84e2bcef98b3e3333e71ffc8acafd4148f8 100644
--- a/SME-0128-Add-missing-header-file-in-aarch64.cc.patch
+++ b/0231-SME-Add-missing-header-file-in-aarch64.cc.patch
@@ -1,14 +1,14 @@
-From 886d01c0053e6b354bb65c8e0eab44c91ec69872 Mon Sep 17 00:00:00 2001
+From cce05b3365c3986ca74c04f442662a21b4f03a61 Mon Sep 17 00:00:00 2001
 From: xiezhiheng 
 Date: Mon, 4 Mar 2024 14:39:36 +0800
-Subject: [PATCH 128/144] Add missing header file in `aarch64.cc`
+Subject: [PATCH 132/157] [SME] Add missing header file in `aarch64.cc`
 
 ---
  gcc/config/aarch64/aarch64.cc | 1 +
  1 file changed, 1 insertion(+)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index 6d7b8fff3..f486802da 100644
+index f7285555b..0117a3e12 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
 @@ -86,6 +86,7 @@
@@ -20,5 +20,5 @@ index 6d7b8fff3..f486802da 100644
  #include "ipa-fnsummary.h"
  
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0129-c-Add-support-for-__extension__.patch b/0232-Backport-SME-c-Add-support-for-__extension__.patch
similarity index 98%
rename from SME-0129-c-Add-support-for-__extension__.patch
rename to 0232-Backport-SME-c-Add-support-for-__extension__.patch
index 163b9db88b78b18952d2c4ef786ac607ff80bd1e..5ba9828d913fcfe6ccfafc2ed4cb01c99010b0af 100644
--- a/SME-0129-c-Add-support-for-__extension__.patch
+++ b/0232-Backport-SME-c-Add-support-for-__extension__.patch
@@ -1,7 +1,8 @@
-From b6ede08224e3d517d9f1f8484b6d1e726a1309cb Mon Sep 17 00:00:00 2001
+From 3714cfb47fafef884aa2ff330935fb44b7966909 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 24 Aug 2023 11:49:58 +0100
-Subject: [PATCH 129/144] c: Add support for [[__extension__ ...]]
+Subject: [PATCH 133/157] [Backport][SME] c: Add support for [[__extension__
+ ...]]
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=207a5daa9dcf31e367152163ad2a2ab4a0858967
 
@@ -150,10 +151,10 @@ index 78a313fe3..486f46e1c 100644
    c_parser_skip_until_found (parser, CPP_CLOSE_SQUARE, "expected %<]%>");
    return nreverse (attributes);
 diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
-index 33a776a79..367a00b9c 100644
+index 674db2f1a..3cfecee53 100644
 --- a/gcc/doc/extend.texi
 +++ b/gcc/doc/extend.texi
-@@ -11711,10 +11711,29 @@ macros to replace them with the customary keywords.  It looks like this:
+@@ -11726,10 +11726,29 @@ macros to replace them with the customary keywords.  It looks like this:
  @findex __extension__
  @opindex pedantic
  @option{-pedantic} and other options cause warnings for many GNU C extensions.
@@ -322,5 +323,5 @@ index 000000000..702f733b1
 +/* { dg-error {'gnu' attribute ignored} "" { target *-*-* } .-1 } */
 +/* { dg-warning {attributes before C2X} "" { target *-*-* } .-2 } */
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0130-lra-Updates-of-biggest-mode-for-hard-regs-PR112278.patch b/0233-Backport-SME-lra-Updates-of-biggest-mode-for-hard-re.patch
similarity index 96%
rename from SME-0130-lra-Updates-of-biggest-mode-for-hard-regs-PR112278.patch
rename to 0233-Backport-SME-lra-Updates-of-biggest-mode-for-hard-re.patch
index 39f96772215cef9cd9b1e5988a584237e50a6817..95b50de1e3795e57d49ffccdcff78303d17992ea 100644
--- a/SME-0130-lra-Updates-of-biggest-mode-for-hard-regs-PR112278.patch
+++ b/0233-Backport-SME-lra-Updates-of-biggest-mode-for-hard-re.patch
@@ -1,7 +1,8 @@
-From f8439ba42d5d0486ac6cf016e4cafe5f528f6941 Mon Sep 17 00:00:00 2001
+From 29a71fc5cbfc3b5e4649abf51740daed5ea243bd Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 5 Dec 2023 09:20:55 +0000
-Subject: [PATCH 130/144] lra: Updates of biggest mode for hard regs [PR112278]
+Subject: [PATCH 134/157] [Backport][SME] lra: Updates of biggest mode for hard
+ regs [PR112278]
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=6e2e0ce6795c863e295eb33559f8dc0500297da3
 
@@ -135,5 +136,5 @@ index 000000000..4f56add2b
 +  }
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0131-c-Support-C2x-empty-initializer-braces.patch b/0234-Backport-SME-c-Support-C2x-empty-initializer-braces.patch
similarity index 99%
rename from SME-0131-c-Support-C2x-empty-initializer-braces.patch
rename to 0234-Backport-SME-c-Support-C2x-empty-initializer-braces.patch
index 58a468b9611d42e84a9c7789ed1e5cdc43b83f78..f4b1efde682a96985402d74048f00a1307a6e65c 100644
--- a/SME-0131-c-Support-C2x-empty-initializer-braces.patch
+++ b/0234-Backport-SME-c-Support-C2x-empty-initializer-braces.patch
@@ -1,7 +1,8 @@
-From 8095127d3c9157dc54bdcd5e6222ea4670ab678b Mon Sep 17 00:00:00 2001
+From 0a34bb6b18cdf34cb9d4f34b1697e1bcfcff139b Mon Sep 17 00:00:00 2001
 From: Joseph Myers 
 Date: Thu, 25 Aug 2022 21:02:57 +0000
-Subject: [PATCH 131/144] c: Support C2x empty initializer braces
+Subject: [PATCH 135/157] [Backport][SME] c: Support C2x empty initializer
+ braces
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=14cfa01755a66afbae2539f8b5796c960ddcecc6
 
@@ -667,5 +668,5 @@ index 55e1de69c..a854f1268 100644
  void foo(int i) { char a[][i] = {""}; } /* { dg-error "variable-sized object may not be initialized" } */
 -/* { dg-error "array size missing in 'a'" "extra error" { target *-*-* } .-1 } */
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0132-aarch64-Update-sizeless-tests-for-recent-GNU-C-chang.patch b/0235-Backport-SME-aarch64-Update-sizeless-tests-for-recen.patch
similarity index 97%
rename from SME-0132-aarch64-Update-sizeless-tests-for-recent-GNU-C-chang.patch
rename to 0235-Backport-SME-aarch64-Update-sizeless-tests-for-recen.patch
index de8681fe928423c60a7e6381bbe2b9603c7053b7..04a80d1cdb96b5985d0d9dbc8a8f6cd85c52c16e 100644
--- a/SME-0132-aarch64-Update-sizeless-tests-for-recent-GNU-C-chang.patch
+++ b/0235-Backport-SME-aarch64-Update-sizeless-tests-for-recen.patch
@@ -1,8 +1,8 @@
-From e2cbfe4a24b2b5c4836d2c27b199c3182106e474 Mon Sep 17 00:00:00 2001
+From 67001778883e10110c505dd8876a447a19d1ac5e Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Wed, 31 Aug 2022 15:39:27 +0100
-Subject: [PATCH 132/144] aarch64: Update sizeless tests for recent GNU C
- changes
+Subject: [PATCH 136/157] [Backport][SME] aarch64: Update sizeless tests for
+ recent GNU C changes
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=de9805c08121a84ce368dccfe043a3f44c3ff13b
 
@@ -111,5 +111,5 @@ index c575492c1..34dfd598e 100644
  
    (int) { sve_sc1 }; /* { dg-error {incompatible types when initializing type 'int' using type 'svint8_t'} } */
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0133-attribs-Namespace-aware-lookup_attribute_spec.patch b/0236-Backport-SME-attribs-Namespace-aware-lookup_attribut.patch
similarity index 92%
rename from SME-0133-attribs-Namespace-aware-lookup_attribute_spec.patch
rename to 0236-Backport-SME-attribs-Namespace-aware-lookup_attribut.patch
index 27e0eab78e15dcab2d72bf52f28a5b0b529244d7..f7a909f804e5da500cf98f605e4bd08dc3b4bc13 100644
--- a/SME-0133-attribs-Namespace-aware-lookup_attribute_spec.patch
+++ b/0236-Backport-SME-attribs-Namespace-aware-lookup_attribut.patch
@@ -1,7 +1,8 @@
-From fca8d51badc062dc7caea9feb8eff1e9a2f97996 Mon Sep 17 00:00:00 2001
+From dbe5a29054d4eb1e0f5173c8f2291569eac71c96 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Sat, 2 Dec 2023 13:49:55 +0000
-Subject: [PATCH 133/144] attribs: Namespace-aware lookup_attribute_spec
+Subject: [PATCH 137/157] [Backport][SME] attribs: Namespace-aware
+ lookup_attribute_spec
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=df4643f90c45db2501c731d4fded60dc1426b484
 
@@ -53,5 +54,5 @@ index 8e2696bc5..1dbc30a95 100644
        if (!predicate (attr, as))
  	end = attr;
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0134-c-family-ICE-with-gnu-nocf_check-PR106937.patch b/0237-Backport-SME-c-family-ICE-with-gnu-nocf_check-PR1069.patch
similarity index 98%
rename from SME-0134-c-family-ICE-with-gnu-nocf_check-PR106937.patch
rename to 0237-Backport-SME-c-family-ICE-with-gnu-nocf_check-PR1069.patch
index 6383e0b7aa6dc9189c0f37a0057532437e9ea8dc..3d8cde6f8d0e4d2829efaf45ecbec5e1edcdae7c 100644
--- a/SME-0134-c-family-ICE-with-gnu-nocf_check-PR106937.patch
+++ b/0237-Backport-SME-c-family-ICE-with-gnu-nocf_check-PR1069.patch
@@ -1,7 +1,8 @@
-From 4a3e657ae59905fcc28ee6e8a2c36501eab55cc2 Mon Sep 17 00:00:00 2001
+From 6f42edc5035b7f7e96730dca19757b148e1be70c Mon Sep 17 00:00:00 2001
 From: Marek Polacek 
 Date: Thu, 29 Sep 2022 17:49:32 -0400
-Subject: [PATCH 134/144] c-family: ICE with [[gnu::nocf_check]] [PR106937]
+Subject: [PATCH 138/157] [Backport][SME] c-family: ICE with
+ [[gnu::nocf_check]] [PR106937]
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=67efffec943656a509e036cd3c785a5c3d6885e1
 
@@ -276,5 +277,5 @@ index 000000000..975885462
 +  FP2 p4 = p2; // { dg-warning {aka 'void \(\*\)\(int\) \[\[gnu::nocf_check\]\]'} }
 +}
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0135-AArch64-Fix-assert-in-aarch64_move_imm-PR108006.patch b/0238-Backport-SME-AArch64-Fix-assert-in-aarch64_move_imm-.patch
similarity index 80%
rename from SME-0135-AArch64-Fix-assert-in-aarch64_move_imm-PR108006.patch
rename to 0238-Backport-SME-AArch64-Fix-assert-in-aarch64_move_imm-.patch
index 35c13aa8e15d1a7faa850b93c740c2760fffd02b..58c1071ba0b64f187c23da4d9eed0cd6e80758bd 100644
--- a/SME-0135-AArch64-Fix-assert-in-aarch64_move_imm-PR108006.patch
+++ b/0238-Backport-SME-AArch64-Fix-assert-in-aarch64_move_imm-.patch
@@ -1,7 +1,8 @@
-From 167d21961e20b530068a36c51f1ab9f99f4a7e37 Mon Sep 17 00:00:00 2001
+From d13efe98cafa04aeb24f8e0f695e648887986228 Mon Sep 17 00:00:00 2001
 From: Wilco Dijkstra 
 Date: Wed, 7 Dec 2022 14:16:24 +0000
-Subject: [PATCH 135/144] AArch64: Fix assert in aarch64_move_imm [PR108006]
+Subject: [PATCH 139/157] [Backport][SME] AArch64: Fix assert in
+ aarch64_move_imm [PR108006]
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=952c8a1dc6235dc49ab207a7f18f63d2bc97fbc9
 
@@ -16,10 +17,10 @@ gcc/
  1 file changed, 2 insertions(+), 1 deletion(-)
 
 diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
-index f486802da..ae485e429 100644
+index 0117a3e12..309ecc3d9 100644
 --- a/gcc/config/aarch64/aarch64.cc
 +++ b/gcc/config/aarch64/aarch64.cc
-@@ -7708,7 +7708,8 @@ aarch64_expand_sve_const_vector (rtx target, rtx src)
+@@ -7925,7 +7925,8 @@ aarch64_expand_sve_const_vector (rtx target, rtx src)
  	  /* If the integer can be moved into a general register by a
  	     single instruction, do that and duplicate the result.  */
  	  if (CONST_INT_P (elt_value)
@@ -30,5 +31,5 @@ index f486802da..ae485e429 100644
  	      elt_value = force_reg (elt_mode, elt_value);
  	      return expand_vector_broadcast (mode, elt_value);
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0136-testsuite-Only-run-fcf-protection-test-on-i-86-x86_6.patch b/0239-Backport-SME-testsuite-Only-run-fcf-protection-test-.patch
similarity index 85%
rename from SME-0136-testsuite-Only-run-fcf-protection-test-on-i-86-x86_6.patch
rename to 0239-Backport-SME-testsuite-Only-run-fcf-protection-test-.patch
index 492ceb545aebd2a0d2140324a1665c17a65353a9..df5e047c1beda7a37b41bf7820dd29322f6d0829 100644
--- a/SME-0136-testsuite-Only-run-fcf-protection-test-on-i-86-x86_6.patch
+++ b/0239-Backport-SME-testsuite-Only-run-fcf-protection-test-.patch
@@ -1,8 +1,8 @@
-From 6c8eb7979b54163a49e6a7cf9f2eade1563128f5 Mon Sep 17 00:00:00 2001
+From 071f26ce18db5a09cbae0607b065028a09a856ac Mon Sep 17 00:00:00 2001
 From: Marek Polacek 
 Date: Tue, 11 Oct 2022 12:51:40 -0400
-Subject: [PATCH 136/144] testsuite: Only run -fcf-protection test on
- i?86/x86_64 [PR107213]
+Subject: [PATCH 140/157] [Backport][SME] testsuite: Only run -fcf-protection
+ test on i?86/x86_64 [PR107213]
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=cc694f45087c892e69ebbb177203c708f00b1bc7
 
@@ -33,5 +33,5 @@ index 975885462..e2f948d82 100644
  /* { dg-additional-options "-std=c++11 -fpermissive" { target c++ } } */
  /* Test printing a pointer to function with attribute.  */
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0137-Fix-PRs-106764-106765-and-107307-all-ICE-after-inval.patch b/0240-Backport-SME-Fix-PRs-106764-106765-and-107307-all-IC.patch
similarity index 94%
rename from SME-0137-Fix-PRs-106764-106765-and-107307-all-ICE-after-inval.patch
rename to 0240-Backport-SME-Fix-PRs-106764-106765-and-107307-all-IC.patch
index 1281884622022411bc7c1e744a9386af10b00df4..d048c484ccf15069bb2df03257b38a0789f7980d 100644
--- a/SME-0137-Fix-PRs-106764-106765-and-107307-all-ICE-after-inval.patch
+++ b/0240-Backport-SME-Fix-PRs-106764-106765-and-107307-all-IC.patch
@@ -1,8 +1,8 @@
-From 873b3fe9c84d3d1399362838c29522d298c4f309 Mon Sep 17 00:00:00 2001
+From 202ebc25e509ae0a2ac7d05c822cf6a8a817e49a Mon Sep 17 00:00:00 2001
 From: Andrew Pinski 
 Date: Thu, 17 Nov 2022 22:08:07 +0000
-Subject: [PATCH 137/144] Fix PRs 106764, 106765, and 107307, all ICE after
- invalid re-declaration
+Subject: [PATCH 141/157] [Backport][SME] Fix PRs 106764, 106765, and 107307,
+ all ICE after invalid re-declaration
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=bd0c9d9e706adaeea0d96152daade0a6819a8715
 
@@ -109,5 +109,5 @@ index 000000000..2f2a6548a
 +  a.b;
 +  d a; // { dg-error "" }
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0138-aarch64-Remove-expected-error-for-compound-literals.patch b/0241-Backport-SME-aarch64-Remove-expected-error-for-compo.patch
similarity index 91%
rename from SME-0138-aarch64-Remove-expected-error-for-compound-literals.patch
rename to 0241-Backport-SME-aarch64-Remove-expected-error-for-compo.patch
index cf2402d0442d6f37a0c1ec64cc4116799b9dcd92..5b03a5d67a222ea0e81353ac40c444a210ab4e27 100644
--- a/SME-0138-aarch64-Remove-expected-error-for-compound-literals.patch
+++ b/0241-Backport-SME-aarch64-Remove-expected-error-for-compo.patch
@@ -1,7 +1,8 @@
-From 1ea47e283122fadd18a57a86c2196e11b780ca43 Mon Sep 17 00:00:00 2001
+From bc42a8bdab7b2ffeb81441c7c8a9a1215d8502ee Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Thu, 26 Jan 2023 15:51:00 +0000
-Subject: [PATCH 138/144] aarch64: Remove expected error for compound literals
+Subject: [PATCH 142/157] [Backport][SME] aarch64: Remove expected error for
+ compound literals
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=96fbe541481fcc7d1a8884fb8dbefd7979eb9543
 
@@ -38,5 +39,5 @@ index 7c9188cf2..f4ae68028 100644
    (bfloat16_t) { 0 }; /* { dg-error {invalid conversion to type 'bfloat16_t'} } */
    (bfloat16_t) { 0.1 }; /* { dg-error {invalid conversion to type 'bfloat16_t'} } */
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0139-aarch64-Remove-redundant-builtins-code.patch b/0242-Backport-SME-aarch64-Remove-redundant-builtins-code.patch
similarity index 97%
rename from SME-0139-aarch64-Remove-redundant-builtins-code.patch
rename to 0242-Backport-SME-aarch64-Remove-redundant-builtins-code.patch
index 26b376599a023d9052c27fe2557cfbba34d03218..85f8c59e976ecba503dc44f3e3892b27a829fa7b 100644
--- a/SME-0139-aarch64-Remove-redundant-builtins-code.patch
+++ b/0242-Backport-SME-aarch64-Remove-redundant-builtins-code.patch
@@ -1,7 +1,8 @@
-From c265b0a00c4c2e398fab1bb921282e58c0fca99f Mon Sep 17 00:00:00 2001
+From 42bfa9a26205da222cebbe830168b6f0b5e668b4 Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Tue, 12 Jul 2022 12:59:25 +0100
-Subject: [PATCH 139/144] aarch64: Remove redundant builtins code
+Subject: [PATCH 143/157] [Backport][SME] aarch64: Remove redundant builtins
+ code
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=e80daf04c8888f527d2fc7f6cbcd1b4c853dcd04
 
@@ -259,5 +260,5 @@ index 000000000..8516808be
 +*/
 +TEST4 (int, ctz, int)
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0140-AArch64-Fix-Armv9-a-warnings-that-get-emitted-whenev.patch b/0243-Backport-SME-AArch64-Fix-Armv9-a-warnings-that-get-e.patch
similarity index 91%
rename from SME-0140-AArch64-Fix-Armv9-a-warnings-that-get-emitted-whenev.patch
rename to 0243-Backport-SME-AArch64-Fix-Armv9-a-warnings-that-get-e.patch
index 5c93e6ec6d52c91532650472c083638ff8c947ff..0e4b80ba00dcc0cd423d217e5f460812c12a9664 100644
--- a/SME-0140-AArch64-Fix-Armv9-a-warnings-that-get-emitted-whenev.patch
+++ b/0243-Backport-SME-AArch64-Fix-Armv9-a-warnings-that-get-e.patch
@@ -1,8 +1,8 @@
-From 615a59c6c0ffc469ea0b0b4781adb6bab592cfc5 Mon Sep 17 00:00:00 2001
+From a1ba437195286af3389ba9f2d43b8cb6c73ba3d8 Mon Sep 17 00:00:00 2001
 From: Tamar Christina 
 Date: Thu, 12 Oct 2023 15:55:58 +0100
-Subject: [PATCH 140/144] AArch64: Fix Armv9-a warnings that get emitted
- whenever a ACLE header is used.
+Subject: [PATCH 144/157] [Backport][SME] AArch64: Fix Armv9-a warnings that
+ get emitted whenever a ACLE header is used.
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=de593b3cffdc9c030c5e697ad9092b1b085dabc4
 
@@ -59,5 +59,5 @@ index 000000000..35690d5bc
 +#include 
 +
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0141-Canonicalize-X-Y-as-X-Y-in-match.pd-when-Y-is-0-1.patch b/0244-Backport-SME-Canonicalize-X-Y-as-X-Y-in-match.pd-whe.patch
similarity index 97%
rename from SME-0141-Canonicalize-X-Y-as-X-Y-in-match.pd-when-Y-is-0-1.patch
rename to 0244-Backport-SME-Canonicalize-X-Y-as-X-Y-in-match.pd-whe.patch
index 2706465e276236c3d5c0b2b0b4051cc9541046b4..aab1bb6998df9e4e54e113555bb6ba1cb988521b 100644
--- a/SME-0141-Canonicalize-X-Y-as-X-Y-in-match.pd-when-Y-is-0-1.patch
+++ b/0244-Backport-SME-Canonicalize-X-Y-as-X-Y-in-match.pd-whe.patch
@@ -1,7 +1,8 @@
-From fa77e4d53322650dbc9fb339d9c4dfa5ee936f16 Mon Sep 17 00:00:00 2001
+From f6652dbebf81372884e9fd8b68627fc7a94d8d3b Mon Sep 17 00:00:00 2001
 From: Roger Sayle 
 Date: Fri, 27 May 2022 08:57:46 +0100
-Subject: [PATCH 141/144] Canonicalize X&-Y as X*Y in match.pd when Y is [0,1].
+Subject: [PATCH 145/157] [Backport][SME] Canonicalize X&-Y as X*Y in match.pd
+ when Y is [0,1].
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=8fb94fc6097c0a934aac0d89c9c5e2038da67655
 
@@ -47,7 +48,7 @@ gcc/testsuite/ChangeLog
  create mode 100644 gcc/testsuite/gcc.dg/pr98865.c
 
 diff --git a/gcc/match.pd b/gcc/match.pd
-index fc2833bbd..108378f63 100644
+index aee58e47b..6d3165bcd 100644
 --- a/gcc/match.pd
 +++ b/gcc/match.pd
 @@ -285,14 +285,6 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
@@ -179,5 +180,5 @@ index 000000000..95f727033
 +
 +/* { dg-final { scan-tree-dump-times " \\* " 2 "optimized" } } */
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0142-middle-end-Add-new-tbranch-optab-to-add-support-for-.patch b/0245-Backport-SME-middle-end-Add-new-tbranch-optab-to-add.patch
similarity index 98%
rename from SME-0142-middle-end-Add-new-tbranch-optab-to-add-support-for-.patch
rename to 0245-Backport-SME-middle-end-Add-new-tbranch-optab-to-add.patch
index 8701d3fa01e867a05cb1d184692579936ead189d..229b7976d6076add30414e3b1f68481ef5e3119f 100644
--- a/SME-0142-middle-end-Add-new-tbranch-optab-to-add-support-for-.patch
+++ b/0245-Backport-SME-middle-end-Add-new-tbranch-optab-to-add.patch
@@ -1,8 +1,8 @@
-From 7c3f7e92e49a9a7d39f4f1681aad7522511ad93c Mon Sep 17 00:00:00 2001
+From a8f10b4b73c2624599765edf7ff19d53eca15135 Mon Sep 17 00:00:00 2001
 From: Tamar Christina 
 Date: Mon, 12 Dec 2022 15:16:50 +0000
-Subject: [PATCH 142/144] middle-end: Add new tbranch optab to add support for
- bit-test-and-branch operations
+Subject: [PATCH 146/157] [Backport][SME] middle-end: Add new tbranch optab to
+ add support for bit-test-and-branch operations
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=dc582d2ef32e2d3723c68d111f4e49607631f34d
 
@@ -413,5 +413,5 @@ index 3ff7732dc..07af584d6 100644
  extern tree build_pointer_type_for_mode (tree, machine_mode, bool);
  extern tree build_pointer_type (tree);
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0143-explow-Allow-dynamic-allocations-after-vregs.patch b/0246-Backport-SME-explow-Allow-dynamic-allocations-after-.patch
similarity index 95%
rename from SME-0143-explow-Allow-dynamic-allocations-after-vregs.patch
rename to 0246-Backport-SME-explow-Allow-dynamic-allocations-after-.patch
index bf02d62f16bba3b559c87b0ee6d1649c43eef33a..fdefe488637a4023d500c969d39700059dcf8b72 100644
--- a/SME-0143-explow-Allow-dynamic-allocations-after-vregs.patch
+++ b/0246-Backport-SME-explow-Allow-dynamic-allocations-after-.patch
@@ -1,7 +1,8 @@
-From 582b03e490375bf6c43f4b5bc51e83594417c4cb Mon Sep 17 00:00:00 2001
+From fe64cc72e6221cf05e40bc868287bd1fcf07479f Mon Sep 17 00:00:00 2001
 From: Richard Sandiford 
 Date: Mon, 6 Nov 2023 10:49:58 +0000
-Subject: [PATCH 143/144] explow: Allow dynamic allocations after vregs
+Subject: [PATCH 147/157] [Backport][SME] explow: Allow dynamic allocations
+ after vregs
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=a7a45364dddef399bfb550235166df51108a3142
 
@@ -63,7 +64,7 @@ index d8aa75ee9..124e8f49e 100644
  
    suppress_reg_args_size = false;
 diff --git a/gcc/function.cc b/gcc/function.cc
-index 35e7f663f..f2106acb4 100644
+index f4fc211a0..e62b2a8d6 100644
 --- a/gcc/function.cc
 +++ b/gcc/function.cc
 @@ -1945,6 +1945,16 @@ instantiate_decls (tree fndecl)
@@ -105,5 +106,5 @@ index 4e8131706..a5846465a 100644
  /* Returns the name of the current function.  */
  extern const char *fndecl_name (tree);
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0144-PR105169-Fix-references-to-discarded-sections.patch b/0247-Backport-SME-PR105169-Fix-references-to-discarded-se.patch
similarity index 94%
rename from SME-0144-PR105169-Fix-references-to-discarded-sections.patch
rename to 0247-Backport-SME-PR105169-Fix-references-to-discarded-se.patch
index abbb04b9a6e1f1b079048ecb3d13af91780e8490..93a7e3a4cff3bed20479f41dd0769020eb30e57a 100644
--- a/SME-0144-PR105169-Fix-references-to-discarded-sections.patch
+++ b/0247-Backport-SME-PR105169-Fix-references-to-discarded-se.patch
@@ -1,7 +1,8 @@
-From 672ae1dc91e1c9e3db2e89a228db1ead6e23b485 Mon Sep 17 00:00:00 2001
+From 6cc7bcadadbc2521a2db4c02adfe066d805e37ef Mon Sep 17 00:00:00 2001
 From: Giuliano Belinassi 
 Date: Fri, 6 May 2022 23:37:52 -0300
-Subject: [PATCH] PR105169 Fix references to discarded sections
+Subject: [PATCH 148/157] [Backport][SME] PR105169 Fix references to discarded
+ sections
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=7a3f38a966a52893fb5bae301a1a3d56961358fb
 
@@ -135,10 +136,10 @@ index 000000000..5f8b00dfe
 +{
 +}
 diff --git a/gcc/varasm.cc b/gcc/varasm.cc
-index a4b1cc686..31d474b15 100644
+index 3f69b47a7..bae935694 100644
 --- a/gcc/varasm.cc
 +++ b/gcc/varasm.cc
-@@ -8463,25 +8463,21 @@ default_asm_output_ident_directive (const char *ident_str)
+@@ -8459,25 +8459,21 @@ default_asm_output_ident_directive (const char *ident_str)
      fprintf (asm_out_file, "%s\"%s\"\n", ident_asm_op, ident_str);
  }
  
@@ -169,7 +170,7 @@ index a4b1cc686..31d474b15 100644
    in_section = sect;
  #else
    /* Neither OBJECT_FORMAT_PE, nor OBJECT_FORMAT_COFF is set here.
-@@ -8496,18 +8492,18 @@ handle_vtv_comdat_section (section *sect, const_tree decl ATTRIBUTE_UNUSED)
+@@ -8492,18 +8488,18 @@ handle_vtv_comdat_section (section *sect, const_tree decl ATTRIBUTE_UNUSED)
      {
        char *name;
  
@@ -192,7 +193,7 @@ index a4b1cc686..31d474b15 100644
        in_section = sect;
      }
    else
-@@ -8515,4 +8511,15 @@ handle_vtv_comdat_section (section *sect, const_tree decl ATTRIBUTE_UNUSED)
+@@ -8511,4 +8507,15 @@ handle_vtv_comdat_section (section *sect, const_tree decl ATTRIBUTE_UNUSED)
  #endif
  }
  
@@ -220,5 +221,5 @@ index d5d8c4e55..8ba8374e7 100644
 +
  #endif  // GCC_VARASM_H
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0145-RISC-V-autovec-Verify-that-GET_MODE_NUNITS-is-a-mult.patch b/0248-Backport-SME-RISC-V-autovec-Verify-that-GET_MODE_NUN.patch
similarity index 83%
rename from SME-0145-RISC-V-autovec-Verify-that-GET_MODE_NUNITS-is-a-mult.patch
rename to 0248-Backport-SME-RISC-V-autovec-Verify-that-GET_MODE_NUN.patch
index b53042f6e891a3cd93c7bbb86dc72cc598e22013..df79d84ef2c506aa7adbfd02cbe90821668f1e43 100644
--- a/SME-0145-RISC-V-autovec-Verify-that-GET_MODE_NUNITS-is-a-mult.patch
+++ b/0248-Backport-SME-RISC-V-autovec-Verify-that-GET_MODE_NUN.patch
@@ -1,8 +1,8 @@
-From 58c3ee1f6886490fd8149147553ce3aac82a31eb Mon Sep 17 00:00:00 2001
+From a3b4a0ac472415a52ce836e8997f7a69a06fad33 Mon Sep 17 00:00:00 2001
 From: Michael Collison 
 Date: Sat, 6 May 2023 12:37:50 -0600
-Subject: [PATCH 1/3] RISC-V: autovec: Verify that GET_MODE_NUNITS is a
- multiple of 2.
+Subject: [PATCH 149/157] [Backport][SME] RISC-V: autovec: Verify that
+ GET_MODE_NUNITS is a multiple of 2.
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=730909fa858bd691095bc23655077aa13b7941a9
 
@@ -21,10 +21,10 @@ gcc/
  1 file changed, 5 insertions(+), 2 deletions(-)
 
 diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
-index af477c31a..39c0955e1 100644
+index 6cbf8085f..d02f0ce37 100644
 --- a/gcc/tree-vect-slp.cc
 +++ b/gcc/tree-vect-slp.cc
-@@ -399,10 +399,13 @@ can_duplicate_and_interleave_p (vec_info *vinfo, unsigned int count,
+@@ -401,10 +401,13 @@ can_duplicate_and_interleave_p (vec_info *vinfo, unsigned int count,
  	    (GET_MODE_BITSIZE (int_mode), 1);
  	  tree vector_type
  	    = get_vectype_for_scalar_type (vinfo, int_type, count);
@@ -39,7 +39,7 @@ index af477c31a..39c0955e1 100644
  	    {
  	      /* Try fusing consecutive sequences of COUNT / NVECTORS elements
  		 together into elements of type INT_TYPE and using the result
-@@ -410,7 +413,7 @@ can_duplicate_and_interleave_p (vec_info *vinfo, unsigned int count,
+@@ -412,7 +415,7 @@ can_duplicate_and_interleave_p (vec_info *vinfo, unsigned int count,
  	      poly_uint64 nelts = GET_MODE_NUNITS (TYPE_MODE (vector_type));
  	      vec_perm_builder sel1 (nelts, 2, 3);
  	      vec_perm_builder sel2 (nelts, 2, 3);
@@ -49,5 +49,5 @@ index af477c31a..39c0955e1 100644
  		{
  		  sel1.quick_push (i);
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0146-Add-operator-to-gimple_stmt_iterator-and-gphi_iterat.patch b/0249-Backport-SME-Add-operator-to-gimple_stmt_iterator-an.patch
similarity index 86%
rename from SME-0146-Add-operator-to-gimple_stmt_iterator-and-gphi_iterat.patch
rename to 0249-Backport-SME-Add-operator-to-gimple_stmt_iterator-an.patch
index cbef79812d894ac9aea4e43157ca03dbdd700f7c..293df254baa78e3dd55c503b67ab0a80be326726 100644
--- a/SME-0146-Add-operator-to-gimple_stmt_iterator-and-gphi_iterat.patch
+++ b/0249-Backport-SME-Add-operator-to-gimple_stmt_iterator-an.patch
@@ -1,7 +1,8 @@
-From 97fba4337709aaaaa08375e6990887ea314bf8e3 Mon Sep 17 00:00:00 2001
+From b0ca9a6eb1406a60eec566cf302790bee89879af Mon Sep 17 00:00:00 2001
 From: Richard Biener 
 Date: Tue, 18 Apr 2023 16:58:26 +0200
-Subject: [PATCH 2/3] Add operator* to gimple_stmt_iterator and gphi_iterator
+Subject: [PATCH 150/157] [Backport][SME] Add operator* to gimple_stmt_iterator
+ and gphi_iterator
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=c39cdd9e654540f74cd2478019c40f1611554a44
 
@@ -37,5 +38,5 @@ index 216ebee24..5d281e4f7 100644
    {
      return as_a  (ptr);
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-0147-tree-optimization-110221-SLP-and-loop-mask-len.patch b/0250-Backport-SME-tree-optimization-110221-SLP-and-loop-m.patch
similarity index 90%
rename from SME-0147-tree-optimization-110221-SLP-and-loop-mask-len.patch
rename to 0250-Backport-SME-tree-optimization-110221-SLP-and-loop-m.patch
index b6eb11c7dec1a6fe433d37a12d7a1b913b892e91..16a75f8b1bb1cfc25af66a0f88e2c42b1f13b99e 100644
--- a/SME-0147-tree-optimization-110221-SLP-and-loop-mask-len.patch
+++ b/0250-Backport-SME-tree-optimization-110221-SLP-and-loop-m.patch
@@ -1,7 +1,8 @@
-From 2379b38302ea3548d8c1ee19f90c28b411ba48b5 Mon Sep 17 00:00:00 2001
+From 90518c07dfb770b680fd8bdba76dd1b39103277d Mon Sep 17 00:00:00 2001
 From: Richard Biener 
 Date: Fri, 10 Nov 2023 12:39:11 +0100
-Subject: [PATCH 3/3] tree-optimization/110221 - SLP and loop mask/len
+Subject: [PATCH 151/157] [Backport][SME] tree-optimization/110221 - SLP and
+ loop mask/len
 
 Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=e5f1956498251a4973d52c8aad3faf34d0443169
 
@@ -49,10 +50,10 @@ index 000000000..8b5738431
 +70      CDJ(K)=-(K+V0)/Z*CBJ(K)+CBJ(K-1)
 +      END
 diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
-index 39c0955e1..71f20cf56 100644
+index d02f0ce37..e3e246977 100644
 --- a/gcc/tree-vect-slp.cc
 +++ b/gcc/tree-vect-slp.cc
-@@ -7266,6 +7266,16 @@ vect_schedule_slp_node (vec_info *vinfo,
+@@ -8531,6 +8531,16 @@ vect_schedule_slp_node (vec_info *vinfo,
        /* Emit other stmts after the children vectorized defs which is
  	 earliest possible.  */
        gimple *last_stmt = NULL;
@@ -70,5 +71,5 @@ index 39c0955e1..71f20cf56 100644
        FOR_EACH_VEC_ELT (SLP_TREE_CHILDREN (node), i, child)
  	if (SLP_TREE_DEF_TYPE (child) == vect_internal_def)
 -- 
-2.19.1
+2.33.0
 
diff --git a/SME-Adapt-some-testsuites.patch b/0251-SME-Adapt-some-testsuites.patch
similarity index 97%
rename from SME-Adapt-some-testsuites.patch
rename to 0251-SME-Adapt-some-testsuites.patch
index 37d7da070e25f029a3d795e0496608452bef5ee4..1653f532a107f92263e53740988dabf9f42ced07 100644
--- a/SME-Adapt-some-testsuites.patch
+++ b/0251-SME-Adapt-some-testsuites.patch
@@ -1,7 +1,7 @@
-From e5ba70dfbdb72ee117e37442827541f6a07b8903 Mon Sep 17 00:00:00 2001
+From b60c29e6658c8620f1116ce5a38a6eb823af64e6 Mon Sep 17 00:00:00 2001
 From: xiezhiheng 
 Date: Thu, 7 Mar 2024 10:22:39 +0800
-Subject: [PATCH 144/144] Adapt some testsuites
+Subject: [PATCH 152/157] [SME] Adapt some testsuites
 
 gcc.target/aarch64/sme/aarch64-sme-acle-asm.exp:
   GCC 12.3.0 do not support -std=c23 and -std=gnu23
@@ -112,5 +112,5 @@ index cec0abf0e..a764a7c89 100644
  **	add	(x[0-9]+), x29, #?16
  **	msr	tpidr2_el0, \4
 -- 
-2.19.1
+2.33.0
 
diff --git a/0252-SME-Fix-error-by-backported-patches-and-IPA-prefetch.patch b/0252-SME-Fix-error-by-backported-patches-and-IPA-prefetch.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6018ee769eca6dd1fd9253486f27406cf32d5de6
--- /dev/null
+++ b/0252-SME-Fix-error-by-backported-patches-and-IPA-prefetch.patch
@@ -0,0 +1,43 @@
+From ce53aec1f43f79c093db662a2e8e3062462757b4 Mon Sep 17 00:00:00 2001
+From: xiezhiheng 
+Date: Thu, 22 Aug 2024 16:35:28 +0800
+Subject: [PATCH 153/157] [SME] Fix error by backported patches and IPA
+ prefetch
+
+Fix
+gtype-desc.cc: In function 'void gt_pch_p_30vec_cgraph_node__va_gc_atomic_(void*, void*, gt_pointer_operator, void*)':
+gtype-desc.cc:11032:35: error: call of overloaded 'gt_pch_nx(vec*, void (*&)(void*, void*, void*), void*&)' is ambiguous
+11032 |     gt_pch_nx (&((*x)), op, cookie);
+      |                                   ^
+In file included from ../../gcc/hash-table.h:248,
+                 from ../../gcc/coretypes.h:486,
+                 from gtype-desc.cc:23:
+../../gcc/vec.h:1395:1: note: candidate: 'void gt_pch_nx(vec*, gt_pointer_operator, void*) [with T = cgraph_node; A = va_gc_atomic; gt_pointer_operator = void (*)(void*, void*, void*)]'
+ 1395 | gt_pch_nx (vec *v, gt_pointer_operator op, void *cookie)
+      | ^~~~~~~~~
+../../gcc/vec.h:1403:1: note: candidate: 'void gt_pch_nx(vec*, gt_pointer_operator, void*) [with T = cgraph_node*; A = va_gc_atomic; gt_pointer_operator = void (*)(void*, void*, void*)]'
+ 1403 | gt_pch_nx (vec *v, gt_pointer_operator op, void *cookie)
+      | ^~~~~~~~~
+../../gcc/vec.h:1412:1: note: candidate: 'void gt_pch_nx(vec*, gt_pointer_operator, void*) [with T = cgraph_node*; gt_pointer_operator = void (*)(void*, void*, void*)]'
+ 1412 | gt_pch_nx (vec *, gt_pointer_operator, void *)
+      | ^~~~~~~~~
+---
+ gcc/cgraph.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/cgraph.h b/gcc/cgraph.h
+index b84ff2f98..2332539e5 100644
+--- a/gcc/cgraph.h
++++ b/gcc/cgraph.h
+@@ -1660,7 +1660,7 @@ public:
+   /* ECF flags determined from the caller.  */
+   int ecf_flags;
+   /* Vector of potential call targets determined by analysis.  */
+-  vec *targets;
++  vec *targets;
+ 
+   /* Number of speculative call targets, it's less than GCOV_TOPN_VALUES.  */
+   unsigned num_speculative_call_targets : 16;
+-- 
+2.33.0
+
diff --git a/0253-aarch64-Fix-return-register-handling-in-untyped_call.patch b/0253-aarch64-Fix-return-register-handling-in-untyped_call.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0bddd1aa8e3a49a8a38639d66dbf88a45bd1659c
--- /dev/null
+++ b/0253-aarch64-Fix-return-register-handling-in-untyped_call.patch
@@ -0,0 +1,66 @@
+From 38d0605ac8bc90324170041676fc05e7e595769e Mon Sep 17 00:00:00 2001
+From: Richard Sandiford 
+Date: Wed, 20 Sep 2023 11:13:19 +0100
+Subject: [PATCH] aarch64: Fix return register handling in untyped_call
+
+While working on another patch, I hit a problem with the aarch64
+expansion of untyped_call.  The expander emits the usual:
+
+  (set (mem ...) (reg resN))
+
+instructions to store the result registers to memory, but it didn't
+say in RTL where those resN results came from.  This eventually led
+to a failure of gcc.dg/torture/stackalign/builtin-return-2.c,
+via regrename.
+
+This patch turns the untyped call from a plain call to a call_value,
+to represent that the call returns (or might return) a useful value.
+The patch also uses a PARALLEL return rtx to represent all the possible
+return registers.
+
+gcc/
+	* config/aarch64/aarch64.md (untyped_call): Emit a call_value
+	rather than a call.  List each possible destination register
+	in the call pattern.
+
+(cherry picked from commit 629efe27744d13c3b83bbe8338b84c37c83dbe4f)
+---
+ gcc/config/aarch64/aarch64.md | 20 +++++++++++++++++++-
+ 1 file changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
+index b9b3ba4f9164..cd568f3daa0f 100644
+--- a/gcc/config/aarch64/aarch64.md
++++ b/gcc/config/aarch64/aarch64.md
+@@ -1159,9 +1159,27 @@
+ {
+   int i;
+ 
++  /* Generate a PARALLEL that contains all of the register results.
++     The offsets are somewhat arbitrary, since we don't know the
++     actual return type.  The main thing we need to avoid is having
++     overlapping byte ranges, since those might give the impression
++     that two registers are known to have data in common.  */
++  rtvec rets = rtvec_alloc (XVECLEN (operands[2], 0));
++  poly_int64 offset = 0;
++  for (i = 0; i < XVECLEN (operands[2], 0); i++)
++    {
++      rtx reg = SET_SRC (XVECEXP (operands[2], 0, i));
++      gcc_assert (REG_P (reg));
++      rtx offset_rtx = gen_int_mode (offset, Pmode);
++      rtx piece = gen_rtx_EXPR_LIST (VOIDmode, reg, offset_rtx);
++      RTVEC_ELT (rets, i) = piece;
++      offset += GET_MODE_SIZE (GET_MODE (reg));
++    }
++  rtx ret = gen_rtx_PARALLEL (VOIDmode, rets);
++
+   /* Untyped calls always use the default ABI.  It's only possible to use
+      ABI variants if we know the type of the target function.  */
+-  emit_call_insn (gen_call (operands[0], const0_rtx, const0_rtx));
++  emit_call_insn (gen_call_value (ret, operands[0], const0_rtx, const0_rtx));
+ 
+   for (i = 0; i < XVECLEN (operands[2], 0); i++)
+     {
+-- 
+2.43.5
+
diff --git a/0254-aarch64-Fix-loose-ldpstp-check.patch b/0254-aarch64-Fix-loose-ldpstp-check.patch
new file mode 100644
index 0000000000000000000000000000000000000000..756a444a9ce4dafff8f8bacc405548f80e2b606b
--- /dev/null
+++ b/0254-aarch64-Fix-loose-ldpstp-check.patch
@@ -0,0 +1,119 @@
+From 74f99f1adc696f446115f36974a3f94f66294a53 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford 
+Date: Wed, 20 Sep 2023 11:13:20 +0100
+Subject: [PATCH] aarch64: Fix loose ldpstp check [PR111411]
+
+aarch64_operands_ok_for_ldpstp contained the code:
+
+  /* One of the memory accesses must be a mempair operand.
+     If it is not the first one, they need to be swapped by the
+     peephole.  */
+  if (!aarch64_mem_pair_operand (mem_1, GET_MODE (mem_1))
+       && !aarch64_mem_pair_operand (mem_2, GET_MODE (mem_2)))
+    return false;
+
+But the requirement isn't just that one of the accesses must be a
+valid mempair operand.  It's that the lower access must be, since
+that's the access that will be used for the instruction operand.
+
+gcc/
+	PR target/111411
+	* config/aarch64/aarch64.cc (aarch64_operands_ok_for_ldpstp): Require
+	the lower memory access to a mem-pair operand.
+
+gcc/testsuite/
+	PR target/111411
+	* gcc.dg/rtl/aarch64/pr111411.c: New test.
+
+(cherry picked from commit 2d38f45bcca62ca0c7afef4b579f82c5c2a01610)
+---
+ gcc/config/aarch64/aarch64.cc               |  8 ++-
+ gcc/testsuite/gcc.dg/rtl/aarch64/pr111411.c | 57 +++++++++++++++++++++
+ 2 files changed, 60 insertions(+), 5 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/rtl/aarch64/pr111411.c
+
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 96c3f48fdc49..a979accd90a9 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -26031,11 +26031,9 @@ aarch64_operands_ok_for_ldpstp (rtx *operands, bool load,
+   gcc_assert (known_eq (GET_MODE_SIZE (GET_MODE (mem_1)),
+ 			GET_MODE_SIZE (GET_MODE (mem_2))));
+ 
+-  /* One of the memory accesses must be a mempair operand.
+-     If it is not the first one, they need to be swapped by the
+-     peephole.  */
+-  if (!aarch64_mem_pair_operand (mem_1, GET_MODE (mem_1))
+-       && !aarch64_mem_pair_operand (mem_2, GET_MODE (mem_2)))
++  /* The lower memory access must be a mem-pair operand.  */
++  rtx lower_mem = reversed ? mem_2 : mem_1;
++  if (!aarch64_mem_pair_operand (lower_mem, GET_MODE (lower_mem)))
+     return false;
+ 
+   if (REG_P (reg_1) && FP_REGNUM_P (REGNO (reg_1)))
+diff --git a/gcc/testsuite/gcc.dg/rtl/aarch64/pr111411.c b/gcc/testsuite/gcc.dg/rtl/aarch64/pr111411.c
+new file mode 100644
+index 000000000000..ad07e9c6c893
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/rtl/aarch64/pr111411.c
+@@ -0,0 +1,57 @@
++/* { dg-do compile { target aarch64*-*-* } } */
++/* { dg-require-effective-target lp64 } */
++/* { dg-options "-O -fdisable-rtl-postreload -fpeephole2 -fno-schedule-fusion" } */
++
++extern int data[];
++
++void __RTL (startwith ("ira")) foo (void *ptr)
++{
++  (function "foo"
++    (param "ptr"
++      (DECL_RTL (reg/v:DI <0> [ ptr ]))
++      (DECL_RTL_INCOMING (reg/v:DI x0 [ ptr ]))
++    ) ;; param "ptr"
++    (insn-chain
++      (block 2
++	(edge-from entry (flags "FALLTHRU"))
++	(cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++	(insn 4 (set (reg:DI <0>) (reg:DI x0)))
++	(insn 5 (set (reg:DI <1>)
++		     (plus:DI (reg:DI <0>) (const_int 768))))
++	(insn 6 (set (mem:SI (plus:DI (reg:DI <0>)
++				      (const_int 508)) [1 &data+508 S4 A4])
++		     (const_int 0)))
++	(insn 7 (set (mem:SI (plus:DI (reg:DI <1>)
++				      (const_int -256)) [1 &data+512 S4 A4])
++		     (const_int 0)))
++	(edge-to exit (flags "FALLTHRU"))
++      ) ;; block 2
++    ) ;; insn-chain
++  ) ;; function
++}
++
++void __RTL (startwith ("ira")) bar (void *ptr)
++{
++  (function "bar"
++    (param "ptr"
++      (DECL_RTL (reg/v:DI <0> [ ptr ]))
++      (DECL_RTL_INCOMING (reg/v:DI x0 [ ptr ]))
++    ) ;; param "ptr"
++    (insn-chain
++      (block 2
++	(edge-from entry (flags "FALLTHRU"))
++	(cnote 3 [bb 2] NOTE_INSN_BASIC_BLOCK)
++	(insn 4 (set (reg:DI <0>) (reg:DI x0)))
++	(insn 5 (set (reg:DI <1>)
++		     (plus:DI (reg:DI <0>) (const_int 768))))
++	(insn 6 (set (mem:SI (plus:DI (reg:DI <1>)
++				      (const_int -256)) [1 &data+512 S4 A4])
++		     (const_int 0)))
++	(insn 7 (set (mem:SI (plus:DI (reg:DI <0>)
++				      (const_int 508)) [1 &data+508 S4 A4])
++		     (const_int 0)))
++	(edge-to exit (flags "FALLTHRU"))
++      ) ;; block 2
++    ) ;; insn-chain
++  ) ;; function
++}
+-- 
+2.43.5
+
diff --git a/0255-x86-Add-a-new-option-mdaz-ftz-to-enable-FTZ-and-DAZ-.patch b/0255-x86-Add-a-new-option-mdaz-ftz-to-enable-FTZ-and-DAZ-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d96f5522cbd50867bcc32a75bebd762023166466
--- /dev/null
+++ b/0255-x86-Add-a-new-option-mdaz-ftz-to-enable-FTZ-and-DAZ-.patch
@@ -0,0 +1,135 @@
+From 1649f9fbbc5267de2a675336d3ac665528a03db8 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Wed, 10 May 2023 15:16:58 +0800
+Subject: [PATCH 01/28] x86: Add a new option -mdaz-ftz to enable FTZ and DAZ
+ flags in MXCSR.
+
+    if (mdaz-ftz)
+      link crtfastmath.o
+    else if ((Ofast || ffast-math || funsafe-math-optimizations)
+             && !mno-daz-ftz)
+      link crtfastmath.o
+    else
+      Don't link crtfastmath.o
+
+gcc/ChangeLog:
+
+	* config/i386/cygwin.h (ENDFILE_SPEC): Link crtfastmath.o
+	whenever -mdaz-ftz is specified. Don't link crtfastmath.o
+	when -mno-daz-ftz is specified.
+	* config/i386/darwin.h (ENDFILE_SPEC): Ditto.
+	* config/i386/gnu-user-common.h
+	(GNU_USER_TARGET_MATHFILE_SPEC): Ditto.
+	* config/i386/mingw32.h (ENDFILE_SPEC): Ditto.
+	* config/i386/i386.opt (mdaz-ftz): New option.
+	* doc/invoke.texi (x86 options): Document mftz-daz.
+---
+ gcc/config/i386/cygwin.h          |  2 +-
+ gcc/config/i386/darwin.h          |  4 ++--
+ gcc/config/i386/gnu-user-common.h |  2 +-
+ gcc/config/i386/i386.opt          |  4 ++++
+ gcc/config/i386/mingw32.h         |  2 +-
+ gcc/doc/invoke.texi               | 11 ++++++++++-
+ 6 files changed, 19 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/config/i386/cygwin.h b/gcc/config/i386/cygwin.h
+index d06eda369..5412c5d44 100644
+--- a/gcc/config/i386/cygwin.h
++++ b/gcc/config/i386/cygwin.h
+@@ -57,7 +57,7 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ #undef ENDFILE_SPEC
+ #define ENDFILE_SPEC \
+-  "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s}\
++  "%{mdaz-ftz:crtfastmath.o%s;Ofast|ffast-math|funsafe-math-optimizations:%{!mno-daz-ftz:crtfastmath.o%s}} \
+    %{!shared:%:if-exists(default-manifest.o%s)}\
+    %{fvtable-verify=none:%s; \
+     fvtable-verify=preinit:vtv_end.o%s; \
+diff --git a/gcc/config/i386/darwin.h b/gcc/config/i386/darwin.h
+index a55f6b2b8..2f773924d 100644
+--- a/gcc/config/i386/darwin.h
++++ b/gcc/config/i386/darwin.h
+@@ -109,8 +109,8 @@ along with GCC; see the file COPYING3.  If not see
+ "%{!force_cpusubtype_ALL:-force_cpusubtype_ALL} "
+ 
+ #undef ENDFILE_SPEC
+-#define ENDFILE_SPEC \
+-  "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
++#define ENDFILE_SPEC
++\  "%{mdaz-ftz:crtfastmath.o%s;Ofast|ffast-math|funsafe-math-optimizations:%{!mno-daz-ftz:crtfastmath.o%s}} \
+    %{mpc32:crtprec32.o%s} \
+    %{mpc64:crtprec64.o%s} \
+    %{mpc80:crtprec80.o%s}" TM_DESTRUCTOR
+diff --git a/gcc/config/i386/gnu-user-common.h b/gcc/config/i386/gnu-user-common.h
+index 23b54c5be..3d2a33f17 100644
+--- a/gcc/config/i386/gnu-user-common.h
++++ b/gcc/config/i386/gnu-user-common.h
+@@ -47,7 +47,7 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ /* Similar to standard GNU userspace, but adding -ffast-math support.  */
+ #define GNU_USER_TARGET_MATHFILE_SPEC \
+-  "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
++  "%{mdaz-ftz:crtfastmath.o%s;Ofast|ffast-math|funsafe-math-optimizations:%{!mno-daz-ftz:crtfastmath.o%s}} \
+    %{mpc32:crtprec32.o%s} \
+    %{mpc64:crtprec64.o%s} \
+    %{mpc80:crtprec80.o%s}"
+diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
+index fc1b944ac..498fb454d 100644
+--- a/gcc/config/i386/i386.opt
++++ b/gcc/config/i386/i386.opt
+@@ -420,6 +420,10 @@ mpc80
+ Target RejectNegative
+ Set 80387 floating-point precision to 80-bit.
+ 
++mdaz-ftz
++Target
++Set the FTZ and DAZ Flags.
++
+ mpreferred-stack-boundary=
+ Target RejectNegative Joined UInteger Var(ix86_preferred_stack_boundary_arg)
+ Attempt to keep stack aligned to this power of 2.
+diff --git a/gcc/config/i386/mingw32.h b/gcc/config/i386/mingw32.h
+index d3ca0cd02..ddbe6a405 100644
+--- a/gcc/config/i386/mingw32.h
++++ b/gcc/config/i386/mingw32.h
+@@ -197,7 +197,7 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ #undef ENDFILE_SPEC
+ #define ENDFILE_SPEC \
+-  "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s} \
++  "%{mdaz-ftz:crtfastmath.o%s;Ofast|ffast-math|funsafe-math-optimizations:%{!mno-daz-ftz:crtfastmath.o%s}} \
+    %{!shared:%:if-exists(default-manifest.o%s)}\
+    %{fvtable-verify=none:%s; \
+     fvtable-verify=preinit:vtv_end.o%s; \
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 2b376e0e9..3a48655e5 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -1437,7 +1437,7 @@ See RS/6000 and PowerPC Options.
+ -m96bit-long-double  -mlong-double-64  -mlong-double-80  -mlong-double-128 @gol
+ -mregparm=@var{num}  -msseregparm @gol
+ -mveclibabi=@var{type}  -mvect8-ret-in-mem @gol
+--mpc32  -mpc64  -mpc80  -mstackrealign @gol
++-mpc32  -mpc64  -mpc80 -mdaz-ftz -mstackrealign @gol
+ -momit-leaf-frame-pointer  -mno-red-zone  -mno-tls-direct-seg-refs @gol
+ -mcmodel=@var{code-model}  -mabi=@var{name}  -maddress-mode=@var{mode} @gol
+ -m32  -m64  -mx32  -m16  -miamcu  -mlarge-data-threshold=@var{num} @gol
+@@ -32122,6 +32122,15 @@ are enabled by default; routines in such libraries could suffer significant
+ loss of accuracy, typically through so-called ``catastrophic cancellation'',
+ when this option is used to set the precision to less than extended precision.
+ 
++@item -mdaz-ftz
++@opindex mdaz-ftz
++
++The flush-to-zero (FTZ) and denormals-are-zero (DAZ) flags in the MXCSR register
++are used to control floating-point calculations.SSE and AVX instructions
++including scalar and vector instructions could benefit from enabling the FTZ
++and DAZ flags when @option{-mdaz-ftz} is specified. Don't set FTZ/DAZ flags
++when @option{-mno-daz-ftz} is specified.
++
+ @item -mstackrealign
+ @opindex mstackrealign
+ Realign the stack at entry.  On the x86, the @option{-mstackrealign}
+-- 
+2.31.1
+
diff --git a/0256-Explicitly-view_convert_expr-mask-to-signed-type-whe.patch b/0256-Explicitly-view_convert_expr-mask-to-signed-type-whe.patch
new file mode 100644
index 0000000000000000000000000000000000000000..39c89169c317fc2f55c2a5458bf3db55cb332b66
--- /dev/null
+++ b/0256-Explicitly-view_convert_expr-mask-to-signed-type-whe.patch
@@ -0,0 +1,65 @@
+From e70fa730dcfcb3a7b1d56a2e166752d4299f0504 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Mon, 5 Jun 2023 12:38:41 +0800
+Subject: [PATCH 02/28] Explicitly view_convert_expr mask to signed type when
+ folding pblendvb builtins.
+
+Since mask < 0 will be always false for vector char when
+-funsigned-char, but vpblendvb needs to check the most significant
+bit. The patch explicitly VCE to vector signed char.
+
+gcc/ChangeLog:
+
+	PR target/110108
+	* config/i386/i386.cc (ix86_gimple_fold_builtin): Explicitly
+	view_convert_expr mask to signed type when folding pblendvb
+	builtins.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/pr110108-2.c: New test.
+---
+ gcc/config/i386/i386.cc                    |  4 +++-
+ gcc/testsuite/gcc.target/i386/pr110108-2.c | 14 ++++++++++++++
+ 2 files changed, 17 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr110108-2.c
+
+diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
+index 462dce10e..479fc6010 100644
+--- a/gcc/config/i386/i386.cc
++++ b/gcc/config/i386/i386.cc
+@@ -18396,8 +18396,10 @@ ix86_gimple_fold_builtin (gimple_stmt_iterator *gsi)
+ 	      tree itype = GET_MODE_INNER (TYPE_MODE (type)) == E_SFmode
+ 		? intSI_type_node : intDI_type_node;
+ 	      type = get_same_sized_vectype (itype, type);
+-	      arg2 = gimple_build (&stmts, VIEW_CONVERT_EXPR, type, arg2);
+ 	    }
++	  else
++	    type = signed_type_for (type);
++	  arg2 = gimple_build (&stmts, VIEW_CONVERT_EXPR, type, arg2);
+ 	  tree zero_vec = build_zero_cst (type);
+ 	  tree cmp_type = truth_type_for (type);
+ 	  tree cmp = gimple_build (&stmts, LT_EXPR, cmp_type, arg2, zero_vec);
+diff --git a/gcc/testsuite/gcc.target/i386/pr110108-2.c b/gcc/testsuite/gcc.target/i386/pr110108-2.c
+new file mode 100644
+index 000000000..2d1d2fd49
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr110108-2.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-mavx2 -O2 -funsigned-char" } */
++/* { dg-final { scan-assembler-times "vpblendvb" 2 } } */
++
++#include 
++__m128i do_stuff_128(__m128i X0, __m128i X1, __m128i X2) {
++  __m128i Result = _mm_blendv_epi8(X0, X1, X2);
++  return Result;
++}
++
++__m256i do_stuff_256(__m256i X0, __m256i X1, __m256i X2) {
++  __m256i Result = _mm256_blendv_epi8(X0, X1, X2);
++  return Result;
++}
+-- 
+2.31.1
+
diff --git a/0257-Make-option-mvzeroupper-independent-of-optimization-.patch b/0257-Make-option-mvzeroupper-independent-of-optimization-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ffdffb163293589aa92a1bc71bb4e1f38e48f289
--- /dev/null
+++ b/0257-Make-option-mvzeroupper-independent-of-optimization-.patch
@@ -0,0 +1,138 @@
+From 48715f03ad08f185153bfb0ff4c0802ab2d9579c Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Mon, 26 Jun 2023 09:50:25 +0800
+Subject: [PATCH 03/28] Make option mvzeroupper independent of optimization
+ level.
+
+pass_insert_vzeroupper is under condition
+
+TARGET_AVX && TARGET_VZEROUPPER
+&& flag_expensive_optimizations && !optimize_size
+
+But the document of mvzeroupper doesn't mention the insertion
+required -O2 and above, it may confuse users when they explicitly
+use -Os -mvzeroupper.
+
+------------
+mvzeroupper
+Target Mask(VZEROUPPER) Save
+Generate vzeroupper instruction before a transfer of control flow out of
+the function.
+------------
+
+The patch moves flag_expensive_optimizations && !optimize_size to
+ix86_option_override_internal. It makes -mvzeroupper independent of
+optimization level, but still keeps the behavior of architecture
+tuning(emit_vzeroupper) unchanged.
+
+gcc/ChangeLog:
+
+	* config/i386/i386-features.cc (pass_insert_vzeroupper:gate):
+	Move flag_expensive_optimizations && !optimize_size to ..
+	* config/i386/i386-options.cc (ix86_option_override_internal):
+	.. this, it makes -mvzeroupper independent of optimization
+	level, but still keeps the behavior of architecture
+	tuning(emit_vzeroupper) unchanged.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/avx-vzeroupper-29.c: New testcase.
+	* gcc.target/i386/avx-vzeroupper-12.c: Adjust testcase.
+	* gcc.target/i386/avx-vzeroupper-7.c: Ditto.
+	* gcc.target/i386/avx-vzeroupper-9.c: Ditto.
+---
+ gcc/config/i386/i386-features.cc                  |  3 +--
+ gcc/config/i386/i386-options.cc                   |  4 +++-
+ gcc/testsuite/gcc.target/i386/avx-vzeroupper-12.c |  3 ++-
+ gcc/testsuite/gcc.target/i386/avx-vzeroupper-29.c | 14 ++++++++++++++
+ gcc/testsuite/gcc.target/i386/avx-vzeroupper-7.c  |  3 ++-
+ gcc/testsuite/gcc.target/i386/avx-vzeroupper-9.c  |  3 ++-
+ 6 files changed, 24 insertions(+), 6 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/avx-vzeroupper-29.c
+
+diff --git a/gcc/config/i386/i386-features.cc b/gcc/config/i386/i386-features.cc
+index 6fe41c3c2..6a2444eb6 100644
+--- a/gcc/config/i386/i386-features.cc
++++ b/gcc/config/i386/i386-features.cc
+@@ -1875,8 +1875,7 @@ public:
+   /* opt_pass methods: */
+   virtual bool gate (function *)
+     {
+-      return TARGET_AVX && TARGET_VZEROUPPER
+-	&& flag_expensive_optimizations && !optimize_size;
++      return TARGET_AVX && TARGET_VZEROUPPER;
+     }
+ 
+   virtual unsigned int execute (function *)
+diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
+index ff44ad4e0..74e969b68 100644
+--- a/gcc/config/i386/i386-options.cc
++++ b/gcc/config/i386/i386-options.cc
+@@ -2702,7 +2702,9 @@ ix86_option_override_internal (bool main_args_p,
+     sorry ("%<-mcall-ms2sysv-xlogues%> isn%'t currently supported with SEH");
+ 
+   if (!(opts_set->x_target_flags & MASK_VZEROUPPER)
+-      && TARGET_EMIT_VZEROUPPER)
++      && TARGET_EMIT_VZEROUPPER
++      && flag_expensive_optimizations
++      && !optimize_size)
+     opts->x_target_flags |= MASK_VZEROUPPER;
+   if (!(opts_set->x_target_flags & MASK_STV))
+     opts->x_target_flags |= MASK_STV;
+diff --git a/gcc/testsuite/gcc.target/i386/avx-vzeroupper-12.c b/gcc/testsuite/gcc.target/i386/avx-vzeroupper-12.c
+index e694d4048..5a40e8783 100644
+--- a/gcc/testsuite/gcc.target/i386/avx-vzeroupper-12.c
++++ b/gcc/testsuite/gcc.target/i386/avx-vzeroupper-12.c
+@@ -16,5 +16,6 @@ foo ()
+   _mm256_zeroupper ();
+ }
+ 
+-/* { dg-final { scan-assembler-times "avx_vzeroupper" 4 } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 4 { target ia32 } } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 5 { target { ! ia32 } } } } */
+ /* { dg-final { scan-assembler-times "\\*avx_vzeroall" 1 } } */
+diff --git a/gcc/testsuite/gcc.target/i386/avx-vzeroupper-29.c b/gcc/testsuite/gcc.target/i386/avx-vzeroupper-29.c
+new file mode 100644
+index 000000000..4af637757
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/avx-vzeroupper-29.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-O0 -mavx -mtune=generic -mvzeroupper -dp" } */
++
++#include 
++
++extern __m256 x, y;
++
++void
++foo ()
++{
++  x = y;
++}
++
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 1 } } */
+diff --git a/gcc/testsuite/gcc.target/i386/avx-vzeroupper-7.c b/gcc/testsuite/gcc.target/i386/avx-vzeroupper-7.c
+index ab6d68779..75fe58897 100644
+--- a/gcc/testsuite/gcc.target/i386/avx-vzeroupper-7.c
++++ b/gcc/testsuite/gcc.target/i386/avx-vzeroupper-7.c
+@@ -12,4 +12,5 @@ foo ()
+   _mm256_zeroupper ();
+ }
+ 
+-/* { dg-final { scan-assembler-times "avx_vzeroupper" 1 } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 1 { target ia32 } } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 2 { target { ! ia32 } } } } */
+diff --git a/gcc/testsuite/gcc.target/i386/avx-vzeroupper-9.c b/gcc/testsuite/gcc.target/i386/avx-vzeroupper-9.c
+index 974e1626a..fa0a6dfca 100644
+--- a/gcc/testsuite/gcc.target/i386/avx-vzeroupper-9.c
++++ b/gcc/testsuite/gcc.target/i386/avx-vzeroupper-9.c
+@@ -15,4 +15,5 @@ foo ()
+   _mm256_zeroupper ();
+ }
+ 
+-/* { dg-final { scan-assembler-times "avx_vzeroupper" 4 } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 4 { target ia32 } } } */
++/* { dg-final { scan-assembler-times "avx_vzeroupper" 5 { target { ! ia32 } } } } */
+-- 
+2.31.1
+
diff --git a/0258-i386-Sync-tune_string-with-arch_string-for-target-at.patch b/0258-i386-Sync-tune_string-with-arch_string-for-target-at.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d36524685ae33d03009fdf5b4f1316dedb1677d9
--- /dev/null
+++ b/0258-i386-Sync-tune_string-with-arch_string-for-target-at.patch
@@ -0,0 +1,68 @@
+From 8039d773354360ed8ff2f25c63843fc637eacc67 Mon Sep 17 00:00:00 2001
+From: Hongyu Wang 
+Date: Sun, 25 Jun 2023 09:50:21 +0800
+Subject: [PATCH 04/28] i386: Sync tune_string with arch_string for target
+ attribute
+
+arch=*
+
+For function with target attribute arch=*, current logic will set its
+tune to -mtune from command line so all target_clones will get same
+tuning flags which would affect the performance for each clone. Override
+tune with arch if tune was not explicitly specified to get proper tuning
+flags for target_clones.
+
+gcc/ChangeLog:
+
+	* config/i386/i386-options.cc (ix86_valid_target_attribute_tree):
+	Override tune_string with arch_string if tune_string is not
+	explicitly specified.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/mvc17.c: New test.
+
+(cherry picked from commit 2916278d14e9ac28c361c396a67256acbebda6e8)
+---
+ gcc/config/i386/i386-options.cc       |  6 +++++-
+ gcc/testsuite/gcc.target/i386/mvc17.c | 11 +++++++++++
+ 2 files changed, 16 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/mvc17.c
+
+diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
+index 74e969b68..fb2ed942f 100644
+--- a/gcc/config/i386/i386-options.cc
++++ b/gcc/config/i386/i386-options.cc
+@@ -1378,7 +1378,11 @@ ix86_valid_target_attribute_tree (tree fndecl, tree args,
+       if (option_strings[IX86_FUNCTION_SPECIFIC_TUNE])
+ 	opts->x_ix86_tune_string
+ 	  = ggc_strdup (option_strings[IX86_FUNCTION_SPECIFIC_TUNE]);
+-      else if (orig_tune_defaulted)
++      /* If we have explicit arch string and no tune string specified, set
++	 tune_string to NULL and later it will be overriden by arch_string
++	 so target clones can get proper optimization.  */
++      else if (option_strings[IX86_FUNCTION_SPECIFIC_ARCH]
++	       || orig_tune_defaulted)
+ 	opts->x_ix86_tune_string = NULL;
+ 
+       /* If fpmath= is not set, and we now have sse2 on 32-bit, use it.  */
+diff --git a/gcc/testsuite/gcc.target/i386/mvc17.c b/gcc/testsuite/gcc.target/i386/mvc17.c
+new file mode 100644
+index 000000000..8b83c1aec
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/mvc17.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-require-ifunc "" } */
++/* { dg-options "-O2 -march=x86-64" } */
++/* { dg-final { scan-assembler-times "rep mov" 1 } } */
++
++__attribute__((target_clones("default","arch=icelake-server")))
++void
++foo (char *a, char *b, int size)
++{
++  __builtin_memcpy (a, b, size & 0x7F);
++}
+-- 
+2.31.1
+
diff --git a/0259-Refine-maskloadmn-pattern-with-UNSPEC_MASKLOAD.patch b/0259-Refine-maskloadmn-pattern-with-UNSPEC_MASKLOAD.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2918d3e883fbd6a916fe97803e03c270a02fc4a0
--- /dev/null
+++ b/0259-Refine-maskloadmn-pattern-with-UNSPEC_MASKLOAD.patch
@@ -0,0 +1,111 @@
+From fbcb1a5899b1bd3964aed78ed74041121e618d36 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Tue, 20 Jun 2023 15:41:00 +0800
+Subject: [PATCH 05/28] Refine maskloadmn pattern with UNSPEC_MASKLOAD.
+
+If mem_addr points to a memory region with less than whole vector size
+bytes of accessible memory and k is a mask that would prevent reading
+the inaccessible bytes from mem_addr, add UNSPEC_MASKLOAD to prevent
+it to be transformed to vpblendd.
+
+gcc/ChangeLog:
+
+	PR target/110309
+	* config/i386/sse.md (maskload):
+	Refine pattern with UNSPEC_MASKLOAD.
+	(maskload): Ditto.
+	(*_load_mask): Extend mode iterator to
+	VI12HF_AVX512VL.
+	(*_load): Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/pr110309.c: New test.
+---
+ gcc/config/i386/sse.md                   | 32 +++++++++++++-----------
+ gcc/testsuite/gcc.target/i386/pr110309.c | 10 ++++++++
+ 2 files changed, 28 insertions(+), 14 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr110309.c
+
+diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
+index eb767e56c..b30e96cb1 100644
+--- a/gcc/config/i386/sse.md
++++ b/gcc/config/i386/sse.md
+@@ -1411,12 +1411,12 @@
+ })
+ 
+ (define_insn "*_load_mask"
+-  [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v")
+-	(vec_merge:VI12_AVX512VL
+-	  (unspec:VI12_AVX512VL
+-	    [(match_operand:VI12_AVX512VL 1 "memory_operand" "m")]
++  [(set (match_operand:VI12HF_AVX512VL 0 "register_operand" "=v")
++	(vec_merge:VI12HF_AVX512VL
++	  (unspec:VI12HF_AVX512VL
++	    [(match_operand:VI12HF_AVX512VL 1 "memory_operand" "m")]
+ 	    UNSPEC_MASKLOAD)
+-	  (match_operand:VI12_AVX512VL 2 "nonimm_or_0_operand" "0C")
++	  (match_operand:VI12HF_AVX512VL 2 "nonimm_or_0_operand" "0C")
+ 	  (match_operand: 3 "register_operand" "Yk")))]
+   "TARGET_AVX512BW"
+   "vmovdqu\t{%1, %0%{%3%}%N2|%0%{%3%}%N2, %1}"
+@@ -1425,9 +1425,9 @@
+    (set_attr "mode" "")])
+ 
+ (define_insn_and_split "*_load"
+-  [(set (match_operand:VI12_AVX512VL 0 "register_operand" "=v")
+-	(unspec:VI12_AVX512VL
+-	  [(match_operand:VI12_AVX512VL 1 "memory_operand" "m")]
++  [(set (match_operand:VI12HF_AVX512VL 0 "register_operand" "=v")
++	(unspec:VI12HF_AVX512VL
++	  [(match_operand:VI12HF_AVX512VL 1 "memory_operand" "m")]
+ 	  UNSPEC_MASKLOAD))]
+   "TARGET_AVX512BW"
+   "#"
+@@ -25973,17 +25973,21 @@
+   "TARGET_AVX")
+ 
+ (define_expand "maskload"
+-  [(set (match_operand:V48H_AVX512VL 0 "register_operand")
+-	(vec_merge:V48H_AVX512VL
+-	  (match_operand:V48H_AVX512VL 1 "memory_operand")
++  [(set (match_operand:V48_AVX512VL 0 "register_operand")
++	(vec_merge:V48_AVX512VL
++	  (unspec:V48_AVX512VL
++	    [(match_operand:V48_AVX512VL 1 "memory_operand")]
++	    UNSPEC_MASKLOAD)
+ 	  (match_dup 0)
+ 	  (match_operand: 2 "register_operand")))]
+   "TARGET_AVX512F")
+ 
+ (define_expand "maskload"
+-  [(set (match_operand:VI12_AVX512VL 0 "register_operand")
+-	(vec_merge:VI12_AVX512VL
+-	  (match_operand:VI12_AVX512VL 1 "memory_operand")
++  [(set (match_operand:VI12HF_AVX512VL 0 "register_operand")
++	(vec_merge:VI12HF_AVX512VL
++	  (unspec:VI12HF_AVX512VL
++	    [(match_operand:VI12HF_AVX512VL 1 "memory_operand")]
++	    UNSPEC_MASKLOAD)
+ 	  (match_dup 0)
+ 	  (match_operand: 2 "register_operand")))]
+   "TARGET_AVX512BW")
+diff --git a/gcc/testsuite/gcc.target/i386/pr110309.c b/gcc/testsuite/gcc.target/i386/pr110309.c
+new file mode 100644
+index 000000000..f6e9e9c3c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr110309.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 --param vect-partial-vector-usage=1 -march=znver4 -mprefer-vector-width=256" } */
++/* { dg-final { scan-assembler-not {(?n)vpblendd.*ymm} } } */
++
++
++void foo (int * __restrict a, int *b)
++{
++  for (int i = 0; i < 6; ++i)
++    a[i] = b[i] + 42;
++}
+-- 
+2.31.1
+
diff --git a/0260-Refine-maskstore-patterns-with-UNSPEC_MASKMOV.patch b/0260-Refine-maskstore-patterns-with-UNSPEC_MASKMOV.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e0546c3b73ece06acdb38e181e6f457a656ff281
--- /dev/null
+++ b/0260-Refine-maskstore-patterns-with-UNSPEC_MASKMOV.patch
@@ -0,0 +1,126 @@
+From 5ad28ef4010c1248b4d94396d03f863705f7b0db Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Mon, 26 Jun 2023 21:07:09 +0800
+Subject: [PATCH 06/28] Refine maskstore patterns with UNSPEC_MASKMOV.
+
+Similar like r14-2070-gc79476da46728e
+
+If mem_addr points to a memory region with less than whole vector size
+bytes of accessible memory and k is a mask that would prevent reading
+the inaccessible bytes from mem_addr, add UNSPEC_MASKMOV to prevent
+it to be transformed to any other whole memory access instructions.
+
+gcc/ChangeLog:
+
+	PR rtl-optimization/110237
+	* config/i386/sse.md (_store_mask): Refine with
+	UNSPEC_MASKMOV.
+	(maskstore_store_mask): New define_insn, it's renamed
+	from original _store_mask.
+---
+ gcc/config/i386/sse.md | 69 ++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 57 insertions(+), 12 deletions(-)
+
+diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
+index b30e96cb1..3af159896 100644
+--- a/gcc/config/i386/sse.md
++++ b/gcc/config/i386/sse.md
+@@ -1554,7 +1554,7 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "")])
+ 
+-(define_insn "_store_mask"
++(define_insn "*_store_mask"
+   [(set (match_operand:V48_AVX512VL 0 "memory_operand" "=m")
+ 	(vec_merge:V48_AVX512VL
+ 	  (match_operand:V48_AVX512VL 1 "register_operand" "v")
+@@ -1582,7 +1582,7 @@
+    (set_attr "memory" "store")
+    (set_attr "mode" "")])
+ 
+-(define_insn "_store_mask"
++(define_insn "*_store_mask"
+   [(set (match_operand:VI12HF_AVX512VL 0 "memory_operand" "=m")
+ 	(vec_merge:VI12HF_AVX512VL
+ 	  (match_operand:VI12HF_AVX512VL 1 "register_operand" "v")
+@@ -26002,21 +26002,66 @@
+   "TARGET_AVX")
+ 
+ (define_expand "maskstore"
+-  [(set (match_operand:V48H_AVX512VL 0 "memory_operand")
+-	(vec_merge:V48H_AVX512VL
+-	  (match_operand:V48H_AVX512VL 1 "register_operand")
+-	  (match_dup 0)
+-	  (match_operand: 2 "register_operand")))]
++  [(set (match_operand:V48_AVX512VL 0 "memory_operand")
++	(unspec:V48_AVX512VL
++	  [(match_operand:V48_AVX512VL 1 "register_operand")
++	   (match_dup 0)
++	   (match_operand: 2 "register_operand")]
++	  UNSPEC_MASKMOV))]
+   "TARGET_AVX512F")
+ 
+ (define_expand "maskstore"
+-  [(set (match_operand:VI12_AVX512VL 0 "memory_operand")
+-	(vec_merge:VI12_AVX512VL
+-	  (match_operand:VI12_AVX512VL 1 "register_operand")
+-	  (match_dup 0)
+-	  (match_operand: 2 "register_operand")))]
++  [(set (match_operand:VI12HF_AVX512VL 0 "memory_operand")
++	(unspec:VI12HF_AVX512VL
++	  [(match_operand:VI12HF_AVX512VL 1 "register_operand")
++	   (match_dup 0)
++	   (match_operand: 2 "register_operand")]
++	  UNSPEC_MASKMOV))]
+   "TARGET_AVX512BW")
+ 
++(define_insn "_store_mask"
++  [(set (match_operand:V48_AVX512VL 0 "memory_operand" "=m")
++	(unspec:V48_AVX512VL
++	  [(match_operand:V48_AVX512VL 1 "register_operand" "v")
++	   (match_dup 0)
++	   (match_operand: 2 "register_operand" "Yk")]
++	  UNSPEC_MASKMOV))]
++  "TARGET_AVX512F"
++{
++  if (FLOAT_MODE_P (GET_MODE_INNER (mode)))
++    {
++      if (misaligned_operand (operands[0], mode))
++	return "vmovu\t{%1, %0%{%2%}|%0%{%2%}, %1}";
++      else
++	return "vmova\t{%1, %0%{%2%}|%0%{%2%}, %1}";
++    }
++  else
++    {
++      if (misaligned_operand (operands[0], mode))
++	return "vmovdqu\t{%1, %0%{%2%}|%0%{%2%}, %1}";
++      else
++	return "vmovdqa\t{%1, %0%{%2%}|%0%{%2%}, %1}";
++    }
++}
++  [(set_attr "type" "ssemov")
++   (set_attr "prefix" "evex")
++   (set_attr "memory" "store")
++   (set_attr "mode" "")])
++
++(define_insn "_store_mask"
++  [(set (match_operand:VI12HF_AVX512VL 0 "memory_operand" "=m")
++	(unspec:VI12HF_AVX512VL
++	  [(match_operand:VI12HF_AVX512VL 1 "register_operand" "v")
++	   (match_dup 0)
++	   (match_operand: 2 "register_operand" "Yk")]
++	   UNSPEC_MASKMOV))]
++  "TARGET_AVX512BW"
++  "vmovdqu\t{%1, %0%{%2%}|%0%{%2%}, %1}"
++  [(set_attr "type" "ssemov")
++   (set_attr "prefix" "evex")
++   (set_attr "memory" "store")
++   (set_attr "mode" "")])
++
+ (define_expand "cbranch4"
+   [(set (reg:CC FLAGS_REG)
+ 	(compare:CC (match_operand:VI48_AVX 1 "register_operand")
+-- 
+2.31.1
+
diff --git a/0261-x86-Update-model-values-for-Alderlake-and-Rocketlake.patch b/0261-x86-Update-model-values-for-Alderlake-and-Rocketlake.patch
new file mode 100644
index 0000000000000000000000000000000000000000..889bbdc79c94762611b0c58321e0348536e9f67d
--- /dev/null
+++ b/0261-x86-Update-model-values-for-Alderlake-and-Rocketlake.patch
@@ -0,0 +1,38 @@
+From 50757adc93ef32a97a8a1083f5d53a9c00da6ac8 Mon Sep 17 00:00:00 2001
+From: "Cui, Lili" 
+Date: Thu, 29 Jun 2023 03:10:35 +0000
+Subject: [PATCH 07/28] x86: Update model values for Alderlake and Rocketlake.
+
+Update model values for Alderlake and Rocketlake according to SDM.
+
+gcc/ChangeLog
+
+	* common/config/i386/cpuinfo.h (get_intel_cpu): Remove model value 0xa8
+	from Rocketlake, remove model value 0xbf from Alderlake.
+---
+ gcc/common/config/i386/cpuinfo.h | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index 0333da56b..28b2ff0b0 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -435,7 +435,6 @@ get_intel_cpu (struct __processor_model *cpu_model,
+       cpu_model->__cpu_subtype = INTEL_COREI7_SKYLAKE;
+       break;
+     case 0xa7:
+-    case 0xa8:
+       /* Rocket Lake.  */
+       cpu = "rocketlake";
+       CHECK___builtin_cpu_is ("corei7");
+@@ -508,7 +507,6 @@ get_intel_cpu (struct __processor_model *cpu_model,
+       break;
+     case 0x97:
+     case 0x9a:
+-    case 0xbf:
+       /* Alder Lake.  */
+       cpu = "alderlake";
+       CHECK___builtin_cpu_is ("corei7");
+-- 
+2.31.1
+
diff --git a/0262-Workaround-possible-CPUID-bug-in-Sandy-Bridge.patch b/0262-Workaround-possible-CPUID-bug-in-Sandy-Bridge.patch
new file mode 100644
index 0000000000000000000000000000000000000000..046351c61d9d3948304b4c92837d81a909559ef3
--- /dev/null
+++ b/0262-Workaround-possible-CPUID-bug-in-Sandy-Bridge.patch
@@ -0,0 +1,78 @@
+From 60364b439a80c217174e1830e0b7507d6f4538c4 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Fri, 4 Aug 2023 09:27:39 +0800
+Subject: [PATCH 08/28] Workaround possible CPUID bug in Sandy Bridge.
+
+Don't access leaf 7 subleaf 1 unless subleaf 0 says it is
+supported via EAX.
+
+Intel documentation says invalid subleaves return 0. We had been
+relying on that behavior instead of checking the max sublef number.
+
+It appears that some Sandy Bridge CPUs return at least the subleaf 0
+EDX value for subleaf 1. Best guess is that this is a bug in a
+microcode patch since all of the bits we're seeing set in EDX were
+introduced after Sandy Bridge was originally released.
+
+This is causing avxvnniint16 to be incorrectly enabled with
+-march=native on these CPUs.
+
+gcc/ChangeLog:
+
+	* common/config/i386/cpuinfo.h (get_available_features): Check
+	max_subleaf_level for valid subleaf before use CPUID.
+---
+ gcc/common/config/i386/cpuinfo.h | 29 +++++++++++++++++------------
+ 1 file changed, 17 insertions(+), 12 deletions(-)
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index 28b2ff0b0..316ad3cb3 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -647,7 +647,9 @@ get_available_features (struct __processor_model *cpu_model,
+   /* Get Advanced Features at level 7 (eax = 7, ecx = 0/1). */
+   if (max_cpuid_level >= 7)
+     {
+-      __cpuid_count (7, 0, eax, ebx, ecx, edx);
++      unsigned int max_subleaf_level;
++
++      __cpuid_count (7, 0, max_subleaf_level, ebx, ecx, edx);
+       if (ebx & bit_BMI)
+ 	set_feature (FEATURE_BMI);
+       if (ebx & bit_SGX)
+@@ -759,18 +761,21 @@ get_available_features (struct __processor_model *cpu_model,
+ 	    set_feature (FEATURE_AVX512FP16);
+ 	}
+ 
+-      __cpuid_count (7, 1, eax, ebx, ecx, edx);
+-      if (eax & bit_HRESET)
+-	set_feature (FEATURE_HRESET);
+-      if (avx_usable)
+-	{
+-	  if (eax & bit_AVXVNNI)
+-	    set_feature (FEATURE_AVXVNNI);
+-	}
+-      if (avx512_usable)
++      if (max_subleaf_level >= 1)
+ 	{
+-	  if (eax & bit_AVX512BF16)
+-	    set_feature (FEATURE_AVX512BF16);
++	  __cpuid_count (7, 1, eax, ebx, ecx, edx);
++	  if (eax & bit_HRESET)
++	    set_feature (FEATURE_HRESET);
++	  if (avx_usable)
++	    {
++	      if (eax & bit_AVXVNNI)
++		set_feature (FEATURE_AVXVNNI);
++	    }
++	  if (avx512_usable)
++	    {
++	      if (eax & bit_AVX512BF16)
++		set_feature (FEATURE_AVX512BF16);
++	    }
+ 	}
+     }
+ 
+-- 
+2.31.1
+
diff --git a/0263-Software-mitigation-Disable-gather-generation-in-vec.patch b/0263-Software-mitigation-Disable-gather-generation-in-vec.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2a1e4e7928d606a9096cae0329d568cee39fccd2
--- /dev/null
+++ b/0263-Software-mitigation-Disable-gather-generation-in-vec.patch
@@ -0,0 +1,220 @@
+From cfffbec938afdc45c31db5ec282ce21ad1ba2dc7 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Thu, 10 Aug 2023 11:41:39 +0800
+Subject: [PATCH 09/28] Software mitigation: Disable gather generation in
+ vectorization for GDS affected Intel Processors.
+
+For more details of GDS (Gather Data Sampling), refer to
+https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/gather-data-sampling.html
+
+After microcode update, there's performance regression. To avoid that,
+the patch disables gather generation in autovectorization but uses
+gather scalar emulation instead.
+
+gcc/ChangeLog:
+
+	* config/i386/i386-options.cc (m_GDS): New macro.
+	* config/i386/x86-tune.def (X86_TUNE_USE_GATHER_2PARTS): Don't
+	enable for m_GDS.
+	(X86_TUNE_USE_GATHER_4PARTS): Ditto.
+	(X86_TUNE_USE_GATHER): Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/avx2-gather-2.c: Adjust options to keep
+	gather vectorization.
+	* gcc.target/i386/avx2-gather-6.c: Ditto.
+	* gcc.target/i386/avx512f-pr88464-1.c: Ditto.
+	* gcc.target/i386/avx512f-pr88464-5.c: Ditto.
+	* gcc.target/i386/avx512vl-pr88464-1.c: Ditto.
+	* gcc.target/i386/avx512vl-pr88464-11.c: Ditto.
+	* gcc.target/i386/avx512vl-pr88464-3.c: Ditto.
+	* gcc.target/i386/avx512vl-pr88464-9.c: Ditto.
+	* gcc.target/i386/pr88531-1b.c: Ditto.
+	* gcc.target/i386/pr88531-1c.c: Ditto.
+
+(cherry picked from commit 3064d1f5c48cb6ce1b4133570dd08ecca8abb52d)
+---
+ gcc/config/i386/i386-options.cc                     | 5 +++++
+ gcc/config/i386/x86-tune.def                        | 9 ++++++---
+ gcc/testsuite/gcc.target/i386/avx2-gather-2.c       | 2 +-
+ gcc/testsuite/gcc.target/i386/avx2-gather-6.c       | 2 +-
+ gcc/testsuite/gcc.target/i386/avx512f-pr88464-1.c   | 2 +-
+ gcc/testsuite/gcc.target/i386/avx512f-pr88464-5.c   | 2 +-
+ gcc/testsuite/gcc.target/i386/avx512vl-pr88464-1.c  | 2 +-
+ gcc/testsuite/gcc.target/i386/avx512vl-pr88464-11.c | 2 +-
+ gcc/testsuite/gcc.target/i386/avx512vl-pr88464-3.c  | 2 +-
+ gcc/testsuite/gcc.target/i386/avx512vl-pr88464-9.c  | 2 +-
+ gcc/testsuite/gcc.target/i386/pr88531-1b.c          | 2 +-
+ gcc/testsuite/gcc.target/i386/pr88531-1c.c          | 2 +-
+ 12 files changed, 21 insertions(+), 13 deletions(-)
+
+diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
+index fb2ed942f..9617fc162 100644
+--- a/gcc/config/i386/i386-options.cc
++++ b/gcc/config/i386/i386-options.cc
+@@ -137,6 +137,11 @@ along with GCC; see the file COPYING3.  If not see
+ #define m_GOLDMONT_PLUS (HOST_WIDE_INT_1U<
+Date: Thu, 10 Aug 2023 16:26:13 +0800
+Subject: [PATCH 10/28] Support -m[no-]gather -m[no-]scatter to enable/disable
+ vectorization for all gather/scatter instructions
+
+Rename original use_gather to use_gather_8parts, Support
+-mtune-ctrl={,^}use_gather to set/clear tune features
+use_gather_{2parts, 4parts, 8parts}. Support the new option -mgather
+as alias of -mtune-ctrl=, use_gather, ^use_gather.
+
+Similar for use_scatter.
+
+gcc/ChangeLog:
+
+	* config/i386/i386-builtins.cc
+	(ix86_vectorize_builtin_gather): Adjust for use_gather_8parts.
+	* config/i386/i386-options.cc (parse_mtune_ctrl_str):
+	Set/Clear tune features use_{gather,scatter}_{2parts, 4parts,
+	8parts} for -mtune-crtl={,^}{use_gather,use_scatter}.
+	* config/i386/i386.cc (ix86_vectorize_builtin_scatter): Adjust
+	for use_scatter_8parts
+	* config/i386/i386.h (TARGET_USE_GATHER): Rename to ..
+	(TARGET_USE_GATHER_8PARTS): .. this.
+	(TARGET_USE_SCATTER): Rename to ..
+	(TARGET_USE_SCATTER_8PARTS): .. this.
+	* config/i386/x86-tune.def (X86_TUNE_USE_GATHER): Rename to
+	(X86_TUNE_USE_GATHER_8PARTS): .. this.
+	(X86_TUNE_USE_SCATTER): Rename to
+	(X86_TUNE_USE_SCATTER_8PARTS): .. this.
+	* config/i386/i386.opt: Add new options mgather, mscatter.
+
+(cherry picked from commit b2a927fb5343db363ea4361da0d6bcee227b6737)
+---
+ gcc/config/i386/i386-builtins.cc |  2 +-
+ gcc/config/i386/i386-options.cc  | 54 +++++++++++++++++++++++---------
+ gcc/config/i386/i386.cc          |  2 +-
+ gcc/config/i386/i386.h           |  8 ++---
+ gcc/config/i386/i386.opt         |  4 +++
+ gcc/config/i386/x86-tune.def     |  4 +--
+ 6 files changed, 52 insertions(+), 22 deletions(-)
+
+diff --git a/gcc/config/i386/i386-builtins.cc b/gcc/config/i386/i386-builtins.cc
+index 050c6228a..8ed32e14f 100644
+--- a/gcc/config/i386/i386-builtins.cc
++++ b/gcc/config/i386/i386-builtins.cc
+@@ -1790,7 +1790,7 @@ ix86_vectorize_builtin_gather (const_tree mem_vectype,
+ 	  ? !TARGET_USE_GATHER_2PARTS
+ 	  : (known_eq (TYPE_VECTOR_SUBPARTS (mem_vectype), 4u)
+ 	     ? !TARGET_USE_GATHER_4PARTS
+-	     : !TARGET_USE_GATHER)))
++	     : !TARGET_USE_GATHER_8PARTS)))
+     return NULL_TREE;
+ 
+   if ((TREE_CODE (index_type) != INTEGER_TYPE
+diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
+index 9617fc162..3df1f0c41 100644
+--- a/gcc/config/i386/i386-options.cc
++++ b/gcc/config/i386/i386-options.cc
+@@ -1705,20 +1705,46 @@ parse_mtune_ctrl_str (struct gcc_options *opts, bool dump)
+           curr_feature_string++;
+           clear = true;
+         }
+-      for (i = 0; i < X86_TUNE_LAST; i++)
+-        {
+-          if (!strcmp (curr_feature_string, ix86_tune_feature_names[i]))
+-            {
+-              ix86_tune_features[i] = !clear;
+-              if (dump)
+-                fprintf (stderr, "Explicitly %s feature %s\n",
+-                         clear ? "clear" : "set", ix86_tune_feature_names[i]);
+-              break;
+-            }
+-        }
+-      if (i == X86_TUNE_LAST)
+-	error ("unknown parameter to option %<-mtune-ctrl%>: %s",
+-	       clear ? curr_feature_string - 1 : curr_feature_string);
++
++      if (!strcmp (curr_feature_string, "use_gather"))
++	{
++	  ix86_tune_features[X86_TUNE_USE_GATHER_2PARTS] = !clear;
++	  ix86_tune_features[X86_TUNE_USE_GATHER_4PARTS] = !clear;
++	  ix86_tune_features[X86_TUNE_USE_GATHER_8PARTS] = !clear;
++	  if (dump)
++	    fprintf (stderr, "Explicitly %s features use_gather_2parts,"
++		     " use_gather_4parts, use_gather_8parts\n",
++		     clear ? "clear" : "set");
++
++	}
++      else if (!strcmp (curr_feature_string, "use_scatter"))
++	{
++	  ix86_tune_features[X86_TUNE_USE_SCATTER_2PARTS] = !clear;
++	  ix86_tune_features[X86_TUNE_USE_SCATTER_4PARTS] = !clear;
++	  ix86_tune_features[X86_TUNE_USE_SCATTER_8PARTS] = !clear;
++	  if (dump)
++	    fprintf (stderr, "Explicitly %s features use_scatter_2parts,"
++		     " use_scatter_4parts, use_scatter_8parts\n",
++		     clear ? "clear" : "set");
++	}
++      else
++	{
++	  for (i = 0; i < X86_TUNE_LAST; i++)
++	    {
++	      if (!strcmp (curr_feature_string, ix86_tune_feature_names[i]))
++		{
++		  ix86_tune_features[i] = !clear;
++		  if (dump)
++		    fprintf (stderr, "Explicitly %s feature %s\n",
++			     clear ? "clear" : "set", ix86_tune_feature_names[i]);
++		  break;
++		}
++	    }
++
++	  if (i == X86_TUNE_LAST)
++	    error ("unknown parameter to option %<-mtune-ctrl%>: %s",
++		   clear ? curr_feature_string - 1 : curr_feature_string);
++	}
+       curr_feature_string = next_feature_string;
+     }
+   while (curr_feature_string);
+diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
+index 479fc6010..e75d37023 100644
+--- a/gcc/config/i386/i386.cc
++++ b/gcc/config/i386/i386.cc
+@@ -18937,7 +18937,7 @@ ix86_vectorize_builtin_scatter (const_tree vectype,
+       ? !TARGET_USE_SCATTER_2PARTS
+       : (known_eq (TYPE_VECTOR_SUBPARTS (vectype), 4u)
+ 	 ? !TARGET_USE_SCATTER_4PARTS
+-	 : !TARGET_USE_SCATTER))
++	 : !TARGET_USE_SCATTER_8PARTS))
+     return NULL_TREE;
+ 
+   if ((TREE_CODE (index_type) != INTEGER_TYPE
+diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
+index 688aaabd3..aaa136ba0 100644
+--- a/gcc/config/i386/i386.h
++++ b/gcc/config/i386/i386.h
+@@ -403,10 +403,10 @@ extern unsigned char ix86_tune_features[X86_TUNE_LAST];
+ 	ix86_tune_features[X86_TUNE_USE_GATHER_4PARTS]
+ #define TARGET_USE_SCATTER_4PARTS \
+ 	ix86_tune_features[X86_TUNE_USE_SCATTER_4PARTS]
+-#define TARGET_USE_GATHER \
+-	ix86_tune_features[X86_TUNE_USE_GATHER]
+-#define TARGET_USE_SCATTER \
+-	ix86_tune_features[X86_TUNE_USE_SCATTER]
++#define TARGET_USE_GATHER_8PARTS \
++	ix86_tune_features[X86_TUNE_USE_GATHER_8PARTS]
++#define TARGET_USE_SCATTER_8PARTS \
++	ix86_tune_features[X86_TUNE_USE_SCATTER_8PARTS]
+ #define TARGET_FUSE_CMP_AND_BRANCH_32 \
+ 	ix86_tune_features[X86_TUNE_FUSE_CMP_AND_BRANCH_32]
+ #define TARGET_FUSE_CMP_AND_BRANCH_64 \
+diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
+index 498fb454d..b154110d8 100644
+--- a/gcc/config/i386/i386.opt
++++ b/gcc/config/i386/i386.opt
+@@ -1222,3 +1222,7 @@ Instructions number above which STFL stall penalty can be compensated.
+ munroll-only-small-loops
+ Target Var(ix86_unroll_only_small_loops) Init(0) Save
+ Enable conservative small loop unrolling.
++
++mscatter
++Target Alias(mtune-ctrl=, use_scatter, ^use_scatter)
++Enable vectorization for scatter instruction.
+diff --git a/gcc/config/i386/x86-tune.def b/gcc/config/i386/x86-tune.def
+index 4392709fc..bdb455d20 100644
+--- a/gcc/config/i386/x86-tune.def
++++ b/gcc/config/i386/x86-tune.def
+@@ -488,13 +488,13 @@ DEF_TUNE (X86_TUNE_USE_SCATTER_4PARTS, "use_scatter_4parts",
+ 
+ /* X86_TUNE_USE_GATHER: Use gather instructions for vectors with 8 or more
+    elements.  */
+-DEF_TUNE (X86_TUNE_USE_GATHER, "use_gather",
++DEF_TUNE (X86_TUNE_USE_GATHER_8PARTS, "use_gather_8parts",
+ 	  ~(m_ZNVER1 | m_ZNVER2 | m_ZNVER4 | m_ALDERLAKE
+ 	    | m_GENERIC | m_GDS))
+ 
+ /* X86_TUNE_USE_SCATTER: Use scater instructions for vectors with 8 or more
+    elements.  */
+-DEF_TUNE (X86_TUNE_USE_SCATTER, "use_scatter",
++DEF_TUNE (X86_TUNE_USE_SCATTER_8PARTS, "use_scatter_8parts",
+ 	  ~(m_ZNVER4))
+ 
+ /* X86_TUNE_AVOID_128FMA_CHAINS: Avoid creating loops with tight 128bit or
+-- 
+2.31.1
+
diff --git a/0265-Remove-constraint-modifier-for-fcmaddcph-fmaddcph-fc.patch b/0265-Remove-constraint-modifier-for-fcmaddcph-fmaddcph-fc.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e2084f6e88c0d07d076589219446c76b84f5ec1c
--- /dev/null
+++ b/0265-Remove-constraint-modifier-for-fcmaddcph-fmaddcph-fc.patch
@@ -0,0 +1,129 @@
+From 764518a35e90a3e13c469275da9c3c7002fe1982 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Fri, 8 Sep 2023 09:22:43 +0800
+Subject: [PATCH 11/28] Remove constraint modifier % for
+ fcmaddcph/fmaddcph/fcmulcph since there're not commutative.
+
+gcc/ChangeLog:
+
+	PR target/111306
+	PR target/111335
+	* config/i386/sse.md (int_comm): New int_attr.
+	(fma__):
+	Remove % for Complex conjugate operations since they're not
+	commutative.
+	(fma___pair): Ditto.
+	(___mask): Ditto.
+	(cmul3): Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/pr111306.c: New test.
+
+(cherry picked from commit f197392a16ffb1327f1d12ff8ff05f9295e015cb)
+---
+ gcc/config/i386/sse.md                   | 16 ++++++++---
+ gcc/testsuite/gcc.target/i386/pr111306.c | 36 ++++++++++++++++++++++++
+ 2 files changed, 48 insertions(+), 4 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr111306.c
+
+diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
+index 3af159896..f25dd5f2b 100644
+--- a/gcc/config/i386/sse.md
++++ b/gcc/config/i386/sse.md
+@@ -6318,6 +6318,14 @@
+ 	[(UNSPEC_COMPLEX_FMA_PAIR "fmaddc")
+ 	 (UNSPEC_COMPLEX_FCMA_PAIR "fcmaddc")])
+ 
++(define_int_attr int_comm
++	[(UNSPEC_COMPLEX_FMA "")
++	 (UNSPEC_COMPLEX_FMA_PAIR "")
++	 (UNSPEC_COMPLEX_FCMA "")
++	 (UNSPEC_COMPLEX_FCMA_PAIR "")
++	 (UNSPEC_COMPLEX_FMUL "%")
++	 (UNSPEC_COMPLEX_FCMUL "")])
++
+ (define_int_attr conj_op
+ 	[(UNSPEC_COMPLEX_FMA "")
+ 	 (UNSPEC_COMPLEX_FCMA "_conj")
+@@ -6431,7 +6439,7 @@
+ (define_insn "fma__"
+   [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=&v")
+ 	(unspec:VF_AVX512FP16VL
+-	  [(match_operand:VF_AVX512FP16VL 1 "" "%v")
++	  [(match_operand:VF_AVX512FP16VL 1 "" "v")
+ 	   (match_operand:VF_AVX512FP16VL 2 "" "")
+ 	   (match_operand:VF_AVX512FP16VL 3 "" "0")]
+ 	   UNSPEC_COMPLEX_F_C_MA))]
+@@ -6495,7 +6503,7 @@
+ (define_insn "fma___pair"
+  [(set (match_operand:VF1_AVX512VL 0 "register_operand" "=&v")
+        (unspec:VF1_AVX512VL
+-	 [(match_operand:VF1_AVX512VL 1 "vector_operand" "%v")
++	 [(match_operand:VF1_AVX512VL 1 "vector_operand" "v")
+ 	  (match_operand:VF1_AVX512VL 2 "bcst_vector_operand" "vmBr")
+ 	  (match_operand:VF1_AVX512VL 3 "vector_operand" "0")]
+ 	  UNSPEC_COMPLEX_F_C_MA_PAIR))]
+@@ -6562,7 +6570,7 @@
+   [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=&v")
+ 	(vec_merge:VF_AVX512FP16VL
+ 	  (unspec:VF_AVX512FP16VL
+-	    [(match_operand:VF_AVX512FP16VL 1 "nonimmediate_operand" "%v")
++	    [(match_operand:VF_AVX512FP16VL 1 "nonimmediate_operand" "v")
+ 	     (match_operand:VF_AVX512FP16VL 2 "nonimmediate_operand" "")
+ 	     (match_operand:VF_AVX512FP16VL 3 "register_operand" "0")]
+ 	     UNSPEC_COMPLEX_F_C_MA)
+@@ -6586,7 +6594,7 @@
+ (define_insn "__"
+   [(set (match_operand:VF_AVX512FP16VL 0 "register_operand" "=&v")
+ 	  (unspec:VF_AVX512FP16VL
+-	    [(match_operand:VF_AVX512FP16VL 1 "nonimmediate_operand" "%v")
++	    [(match_operand:VF_AVX512FP16VL 1 "nonimmediate_operand" "v")
+ 	     (match_operand:VF_AVX512FP16VL 2 "nonimmediate_operand" "")]
+ 	     UNSPEC_COMPLEX_F_C_MUL))]
+   "TARGET_AVX512FP16 && "
+diff --git a/gcc/testsuite/gcc.target/i386/pr111306.c b/gcc/testsuite/gcc.target/i386/pr111306.c
+new file mode 100644
+index 000000000..541725ebd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr111306.c
+@@ -0,0 +1,36 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -mavx512fp16 -mavx512vl" } */
++/* { dg-require-effective-target avx512fp16 } */
++
++#define AVX512FP16
++#include "avx512f-helper.h"
++
++__attribute__((optimize("O2"),noipa))
++void func1(_Float16 *a, _Float16 *b, int n, _Float16 *c) {
++  __m512h rA = _mm512_loadu_ph(a);
++  for (int i = 0; i < n; i += 32) {
++    __m512h rB = _mm512_loadu_ph(b + i);
++    _mm512_storeu_ph(c + i, _mm512_fcmul_pch(rB, rA));
++  }
++}
++
++void
++test_512 (void)
++{
++  int n = 32;
++  _Float16 a[n], b[n], c[n];
++  _Float16 exp[n];
++  for (int i = 1; i <= n; i++) {
++    a[i - 1] = i & 1 ? -i : i;
++    b[i - 1] = i;
++  }
++
++  func1(a, b, n, c);
++  for (int i = 0; i < n / 32; i += 2) {
++    if (c[i] != a[i] * b[i] + a[i+1] * b[i+1]
++	|| c[i+1] != a[i] * b[i+1] - a[i+1]*b[i])
++      __builtin_abort ();
++    }
++}
++
++
+-- 
+2.31.1
+
diff --git a/0266-Disparage-slightly-for-the-alternative-which-move-DF.patch b/0266-Disparage-slightly-for-the-alternative-which-move-DF.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5e8bd6773bbf1b2f5a3344bd6b7bdc33a2486d97
--- /dev/null
+++ b/0266-Disparage-slightly-for-the-alternative-which-move-DF.patch
@@ -0,0 +1,106 @@
+From afd539adfe762adb57863299a11987b7e20e7987 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Wed, 5 Jul 2023 13:45:11 +0800
+Subject: [PATCH 12/28] Disparage slightly for the alternative which move
+ DFmode between SSE_REGS and GENERAL_REGS.
+
+For testcase
+
+void __cond_swap(double* __x, double* __y) {
+  bool __r = (*__x < *__y);
+  auto __tmp = __r ? *__x : *__y;
+  *__y = __r ? *__y : *__x;
+  *__x = __tmp;
+}
+
+GCC-14 with -O2 and -march=x86-64 options generates the following code:
+
+__cond_swap(double*, double*):
+        movsd   xmm1, QWORD PTR [rdi]
+        movsd   xmm0, QWORD PTR [rsi]
+        comisd  xmm0, xmm1
+        jbe     .L2
+        movq    rax, xmm1
+        movapd  xmm1, xmm0
+        movq    xmm0, rax
+.L2:
+        movsd   QWORD PTR [rsi], xmm1
+        movsd   QWORD PTR [rdi], xmm0
+        ret
+
+rax is used to save and restore DFmode value. In RA both GENERAL_REGS
+and SSE_REGS cost zero since we didn't disparage the
+alternative in movdf_internal pattern, according to register
+allocation order, GENERAL_REGS is allocated. The patch add ? for
+alternative (r,v) and (v,r) just like we did for movsf/hf/bf_internal
+pattern, after that we get optimal RA.
+
+__cond_swap:
+.LFB0:
+	.cfi_startproc
+	movsd	(%rdi), %xmm1
+	movsd	(%rsi), %xmm0
+	comisd	%xmm1, %xmm0
+	jbe	.L2
+	movapd	%xmm1, %xmm2
+	movapd	%xmm0, %xmm1
+	movapd	%xmm2, %xmm0
+.L2:
+	movsd	%xmm1, (%rsi)
+	movsd	%xmm0, (%rdi)
+	ret
+
+gcc/ChangeLog:
+
+	PR target/110170
+	* config/i386/i386.md (movdf_internal): Disparage slightly for
+	2 alternatives (r,v) and (v,r) by adding constraint modifier
+	'?'.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/pr110170-3.c: New test.
+
+(cherry picked from commit 37a231cc7594d12ba0822077018aad751a6fb94e)
+---
+ gcc/config/i386/i386.md                    |  4 ++--
+ gcc/testsuite/gcc.target/i386/pr110170-3.c | 11 +++++++++++
+ 2 files changed, 13 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr110170-3.c
+
+diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
+index be07be10d..71691f598 100644
+--- a/gcc/config/i386/i386.md
++++ b/gcc/config/i386/i386.md
+@@ -3582,9 +3582,9 @@
+ ;; Possible store forwarding (partial memory) stall in alternatives 4, 6 and 7.
+ (define_insn "*movdf_internal"
+   [(set (match_operand:DF 0 "nonimmediate_operand"
+-    "=Yf*f,m   ,Yf*f,?r ,!o,?*r ,!o,!o,?r,?m,?r,?r,v,v,v,m,*x,*x,*x,m ,r ,v,r  ,o ,r  ,m")
++    "=Yf*f,m   ,Yf*f,?r ,!o,?*r ,!o,!o,?r,?m,?r,?r,v,v,v,m,*x,*x,*x,m ,?r,?v,r  ,o ,r  ,m")
+ 	(match_operand:DF 1 "general_operand"
+-    "Yf*fm,Yf*f,G   ,roF,r ,*roF,*r,F ,rm,rC,C ,F ,C,v,m,v,C ,*x,m ,*x,v,r ,roF,rF,rmF,rC"))]
++    "Yf*fm,Yf*f,G   ,roF,r ,*roF,*r,F ,rm,rC,C ,F ,C,v,m,v,C ,*x,m ,*x, v, r,roF,rF,rmF,rC"))]
+   "!(MEM_P (operands[0]) && MEM_P (operands[1]))
+    && (lra_in_progress || reload_completed
+        || !CONST_DOUBLE_P (operands[1])
+diff --git a/gcc/testsuite/gcc.target/i386/pr110170-3.c b/gcc/testsuite/gcc.target/i386/pr110170-3.c
+new file mode 100644
+index 000000000..70daa89e9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr110170-3.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile { target { ! ia32 } } } */
++/* { dg-options "-O2 -fno-if-conversion -fno-if-conversion2" } */
++/* { dg-final { scan-assembler-not {(?n)movq.*r} } } */
++
++void __cond_swap(double* __x, double* __y) {
++  _Bool __r = (*__x < *__y);
++  double __tmp = __r ? *__x : *__y;
++  *__y = __r ? *__y : *__x;
++  *__x = __tmp;
++}
++
+-- 
+2.31.1
+
diff --git a/0267-Fix-wrong-code-due-to-vec_merge-pcmp-to-blendvb-spli.patch b/0267-Fix-wrong-code-due-to-vec_merge-pcmp-to-blendvb-spli.patch
new file mode 100644
index 0000000000000000000000000000000000000000..32ce46d7816d8b2f7ee8dd465fe9f59a369e847d
--- /dev/null
+++ b/0267-Fix-wrong-code-due-to-vec_merge-pcmp-to-blendvb-spli.patch
@@ -0,0 +1,163 @@
+From 88516507757932c1e67ce99d240596935971d2d0 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Thu, 9 Nov 2023 13:20:05 +0800
+Subject: [PATCH 13/28] Fix wrong code due to vec_merge + pcmp to blendvb
+ splitter.
+
+gcc/ChangeLog:
+
+	PR target/112443
+	* config/i386/sse.md (*avx2_pcmp3_4): Fix swap condition
+	from LT to GT since there's not in the pattern.
+	(*avx2_pcmp3_5): Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.target/i386/pr112443.C: New test.
+
+(cherry picked from commit 9a0cc04b9c9b02426762892b88efc5c44ba546bd)
+---
+ gcc/config/i386/sse.md                   |   4 +-
+ gcc/testsuite/g++.target/i386/pr112443.C | 108 +++++++++++++++++++++++
+ 2 files changed, 110 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/g++.target/i386/pr112443.C
+
+diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
+index f25dd5f2b..23b858ab2 100644
+--- a/gcc/config/i386/sse.md
++++ b/gcc/config/i386/sse.md
+@@ -16358,7 +16358,7 @@
+ 	     (match_dup 4))]
+ 	     UNSPEC_BLENDV))]
+ {
+-  if (INTVAL (operands[5]) == 1)
++  if (INTVAL (operands[5]) == 5)
+     std::swap (operands[1], operands[2]);
+   operands[3] = gen_lowpart (mode, operands[3]);
+ })
+@@ -16388,7 +16388,7 @@
+ 	     (match_dup 4))]
+ 	     UNSPEC_BLENDV))]
+ {
+-  if (INTVAL (operands[5]) == 1)
++  if (INTVAL (operands[5]) == 5)
+     std::swap (operands[1], operands[2]);
+ })
+ 
+diff --git a/gcc/testsuite/g++.target/i386/pr112443.C b/gcc/testsuite/g++.target/i386/pr112443.C
+new file mode 100644
+index 000000000..ebfa9b4a7
+--- /dev/null
++++ b/gcc/testsuite/g++.target/i386/pr112443.C
+@@ -0,0 +1,108 @@
++/* { dg-do run } */
++/* { dg-require-effective-target avx512bw } */
++/* { dg-require-effective-target avx512vl } */
++/* { dg-options "-O2 -std=c++17 -mavx512bw -mavx512vl" } */
++
++#include 
++#include 
++#include 
++#include 
++
++#define AVX512BW
++#define AVX512VL
++
++#include "avx512f-helper.h"
++
++struct TensorIteratorBase{
++  char* in;
++  char* out;
++
++  void for_each(std::function loop){
++    loop(out, in, 32);
++  }    
++};
++
++class Vectorized {
++protected:
++  __m256i values;
++
++  static inline __m256i invert(const __m256i& v) {
++    const auto ones = _mm256_set1_epi64x(-1);
++    return _mm256_xor_si256(ones, v);
++  }
++public:
++  operator __m256i() const {
++    return values;
++  }
++
++  static constexpr int size() {
++    return 32;
++  }
++
++  Vectorized() {}
++  Vectorized(__m256i v) : values(v) {}
++  Vectorized(uint8_t v) { values = _mm256_set1_epi8(v); }
++  static Vectorized blendv(const Vectorized& a, const Vectorized& b,
++			   const Vectorized& mask) {
++    return _mm256_blendv_epi8(a, b, mask);
++  }
++  static Vectorized loadu(const void* ptr) {
++    return _mm256_loadu_si256(reinterpret_cast(ptr));
++  }
++  void store(void* ptr) const {
++    _mm256_storeu_si256(reinterpret_cast<__m256i*>(ptr), values);
++  }
++
++  Vectorized operator<(const Vectorized& other) const {
++    __m256i max = _mm256_max_epu8(values, other);
++    return invert(_mm256_cmpeq_epi8(max, values));
++  }
++  Vectorized operator-(const Vectorized& b) {
++    return _mm256_sub_epi8(values, b);
++  }
++};
++
++std::ostream& operator<<(std::ostream& stream, const Vectorized& vec) {
++  uint8_t buf[Vectorized::size()];
++  vec.store(buf);
++  stream << "vec[";
++  for (int i = 0; i != Vectorized::size(); i++) {
++    if (i != 0)
++      stream << ", ";
++    stream << buf[i]*1;
++  }
++  stream << "]";
++  return stream;
++}
++
++void run(TensorIteratorBase iter){
++  Vectorized zero_vec(0);
++  Vectorized one_vec(1);
++
++  iter.for_each([=](char* out, char* in, int64_t size) {
++    for (int64_t i = 0; i <= size - Vectorized::size(); i += Vectorized::size()) {
++      auto self_vec = Vectorized::loadu(in + i);
++      auto left = Vectorized::blendv(zero_vec, one_vec, zero_vec < self_vec);
++      auto right = Vectorized::blendv(zero_vec, one_vec, self_vec < zero_vec);
++      auto outv = left - right;
++      outv.store(out + i);
++    }
++  });
++}
++
++void
++test_256 (){
++  char in[32];
++  char out[32];
++  for(auto& x: in) x = 1;
++  run(TensorIteratorBase{in, out});
++  Vectorized::loadu (out);
++  for (int i = 0; i != 32; i++)
++    if (out[i] != 1)
++      __builtin_abort ();
++}
++
++void
++test_128 ()
++{
++}
+-- 
+2.31.1
+
diff --git a/0268-Don-t-assume-it-s-AVX_U128_CLEAN-after-call_insn-who.patch b/0268-Don-t-assume-it-s-AVX_U128_CLEAN-after-call_insn-who.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3d2f9bb0d765b894e1d7f5a17fc964d2e914f9cc
--- /dev/null
+++ b/0268-Don-t-assume-it-s-AVX_U128_CLEAN-after-call_insn-who.patch
@@ -0,0 +1,151 @@
+From 204ffa7f503411ccac0161c951726274648b6374 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Thu, 7 Dec 2023 09:17:27 +0800
+Subject: [PATCH 14/28] Don't assume it's AVX_U128_CLEAN after call_insn whose
+ abi.mode_clobber(V4DImode) deosn't contains all SSE_REGS.
+
+If the function desn't clobber any sse registers or only clobber
+128-bit part, then vzeroupper isn't issued before the function exit.
+the status not CLEAN but ANY after the function.
+
+Also for sibling_call, it's safe to issue an vzeroupper. Also there
+could be missing vzeroupper since there's no mode_exit for
+sibling_call_p.
+
+gcc/ChangeLog:
+
+	PR target/112891
+	* config/i386/i386.cc (ix86_avx_u128_mode_after): Return
+	AVX_U128_ANY if callee_abi doesn't clobber all_sse_regs to
+	align with ix86_avx_u128_mode_needed.
+	(ix86_avx_u128_mode_needed): Return AVX_U128_ClEAN for
+	sibling_call.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/pr112891.c: New test.
+	* gcc.target/i386/pr112891-2.c: New test.
+
+(cherry picked from commit fc189a08f5b7ad5889bd4c6b320c1dd99dd5d642)
+---
+ gcc/config/i386/i386.cc                    | 22 +++++++++++++---
+ gcc/testsuite/gcc.target/i386/pr112891-2.c | 30 ++++++++++++++++++++++
+ gcc/testsuite/gcc.target/i386/pr112891.c   | 29 +++++++++++++++++++++
+ 3 files changed, 78 insertions(+), 3 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr112891-2.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr112891.c
+
+diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
+index e75d37023..60f3296b0 100644
+--- a/gcc/config/i386/i386.cc
++++ b/gcc/config/i386/i386.cc
+@@ -14416,8 +14416,12 @@ ix86_avx_u128_mode_needed (rtx_insn *insn)
+ 	 modes wider than 256 bits.  It's only safe to issue a
+ 	 vzeroupper if all SSE registers are clobbered.  */
+       const function_abi &abi = insn_callee_abi (insn);
+-      if (!hard_reg_set_subset_p (reg_class_contents[SSE_REGS],
+-				  abi.mode_clobbers (V4DImode)))
++      /* Should be safe to issue an vzeroupper before sibling_call_p.
++	 Also there not mode_exit for sibling_call, so there could be
++	 missing vzeroupper for that.  */
++      if (!(SIBLING_CALL_P (insn)
++	    || hard_reg_set_subset_p (reg_class_contents[SSE_REGS],
++				      abi.mode_clobbers (V4DImode))))
+ 	return AVX_U128_ANY;
+ 
+       return AVX_U128_CLEAN;
+@@ -14555,7 +14559,19 @@ ix86_avx_u128_mode_after (int mode, rtx_insn *insn)
+       bool avx_upper_reg_found = false;
+       note_stores (insn, ix86_check_avx_upper_stores, &avx_upper_reg_found);
+ 
+-      return avx_upper_reg_found ? AVX_U128_DIRTY : AVX_U128_CLEAN;
++      if (avx_upper_reg_found)
++	return AVX_U128_DIRTY;
++
++      /* If the function desn't clobber any sse registers or only clobber
++	 128-bit part, Then vzeroupper isn't issued before the function exit.
++	 the status not CLEAN but ANY after the function.  */
++      const function_abi &abi = insn_callee_abi (insn);
++      if (!(SIBLING_CALL_P (insn)
++	    || hard_reg_set_subset_p (reg_class_contents[SSE_REGS],
++				      abi.mode_clobbers (V4DImode))))
++	return AVX_U128_ANY;
++
++      return  AVX_U128_CLEAN;
+     }
+ 
+   /* Otherwise, return current mode.  Remember that if insn
+diff --git a/gcc/testsuite/gcc.target/i386/pr112891-2.c b/gcc/testsuite/gcc.target/i386/pr112891-2.c
+new file mode 100644
+index 000000000..164c3985d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr112891-2.c
+@@ -0,0 +1,30 @@
++/* { dg-do compile } */
++/* { dg-options "-mavx2 -O3" } */
++/* { dg-final { scan-assembler-times "vzeroupper" 1 } } */
++
++void
++__attribute__((noinline))
++bar (double* a)
++{
++  a[0] = 1.0;
++  a[1] = 2.0;
++}
++
++double
++__attribute__((noinline))
++foo (double* __restrict a, double* b)
++{
++  a[0] += b[0];
++  a[1] += b[1];
++  a[2] += b[2];
++  a[3] += b[3];
++  bar (b);
++  return a[5] + b[5];
++}
++
++double
++foo1 (double* __restrict a, double* b)
++{
++  double c = foo (a, b);
++  return __builtin_exp (c);
++}
+diff --git a/gcc/testsuite/gcc.target/i386/pr112891.c b/gcc/testsuite/gcc.target/i386/pr112891.c
+new file mode 100644
+index 000000000..dbf6c6794
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr112891.c
+@@ -0,0 +1,29 @@
++/* { dg-do compile } */
++/* { dg-options "-mavx2 -O3" } */
++/* { dg-final { scan-assembler-times "vzeroupper" 1 } } */
++
++void
++__attribute__((noinline))
++bar (double* a)
++{
++  a[0] = 1.0;
++  a[1] = 2.0;
++}
++
++void
++__attribute__((noinline))
++foo (double* __restrict a, double* b)
++{
++  a[0] += b[0];
++  a[1] += b[1];
++  a[2] += b[2];
++  a[3] += b[3];
++  bar (b);
++}
++
++double
++foo1 (double* __restrict a, double* b)
++{
++  foo (a, b);
++  return __builtin_exp (b[1]);
++}
+-- 
+2.31.1
+
diff --git a/0269-Disable-FMADD-in-chains-for-Zen4-and-generic.patch b/0269-Disable-FMADD-in-chains-for-Zen4-and-generic.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b0de50431d13f24788162d4d8c595ba5c4db5e92
--- /dev/null
+++ b/0269-Disable-FMADD-in-chains-for-Zen4-and-generic.patch
@@ -0,0 +1,142 @@
+From 19ee37b11702c86d7ed271e9e1d00e23cc4ab93c Mon Sep 17 00:00:00 2001
+From: Jan Hubicka 
+Date: Fri, 29 Dec 2023 23:51:03 +0100
+Subject: [PATCH 15/28] Disable FMADD in chains for Zen4 and generic
+
+this patch disables use of FMA in matrix multiplication loop for generic (for
+x86-64-v3) and zen4.  I tested this on zen4 and Xenon Gold Gold 6212U.
+
+For Intel this is neutral both on the matrix multiplication microbenchmark
+(attached) and spec2k17 where the difference was within noise for Core.
+
+On core the micro-benchmark runs as follows:
+
+With FMA:
+
+       578,500,241      cycles:u                         #    3.645 GHz
+                ( +-  0.12% )
+       753,318,477      instructions:u                   #    1.30  insn per
+cycle              ( +-  0.00% )
+       125,417,701      branches:u                       #  790.227 M/sec
+                ( +-  0.00% )
+          0.159146 +- 0.000363 seconds time elapsed  ( +-  0.23% )
+
+No FMA:
+
+       577,573,960      cycles:u                         #    3.514 GHz
+                ( +-  0.15% )
+       878,318,479      instructions:u                   #    1.52  insn per
+cycle              ( +-  0.00% )
+       125,417,702      branches:u                       #  763.035 M/sec
+                ( +-  0.00% )
+          0.164734 +- 0.000321 seconds time elapsed  ( +-  0.19% )
+
+So the cycle count is unchanged and discrete multiply+add takes same time as
+FMA.
+
+While on zen:
+
+With FMA:
+         484875179      cycles:u                         #    3.599 GHz
+             ( +-  0.05% )  (82.11%)
+         752031517      instructions:u                   #    1.55  insn per
+cycle
+         125106525      branches:u                       #  928.712 M/sec
+             ( +-  0.03% )  (85.09%)
+            128356      branch-misses:u                  #    0.10% of all
+branches          ( +-  0.06% )  (83.58%)
+
+No FMA:
+         375875209      cycles:u                         #    3.592 GHz
+             ( +-  0.08% )  (80.74%)
+         875725341      instructions:u                   #    2.33  insn per
+cycle
+         124903825      branches:u                       #    1.194 G/sec
+             ( +-  0.04% )  (84.59%)
+          0.105203 +- 0.000188 seconds time elapsed  ( +-  0.18% )
+
+The diffrerence is that Cores understand the fact that fmadd does not need
+all three parameters to start computation, while Zen cores doesn't.
+
+Since this seems noticeable win on zen and not loss on Core it seems like good
+default for generic.
+
+float a[SIZE][SIZE];
+float b[SIZE][SIZE];
+float c[SIZE][SIZE];
+
+void init(void)
+{
+   int i, j, k;
+   for(i=0; i
+Date: Fri, 16 Sep 2022 13:59:01 +0800
+Subject: [PATCH 16/28] Initial Raptorlake Support
+
+gcc/ChangeLog:
+
+	* common/config/i386/cpuinfo.h:
+	(get_intel_cpu): Handle Raptorlake.
+	* common/config/i386/i386-common.cc:
+	(processor_alias_table): Add Raptorlake.
+
+(cherry picked from commit 470a0659b508d684148f362c4dc0eccf5a83a23e)
+---
+ gcc/common/config/i386/cpuinfo.h      | 2 ++
+ gcc/common/config/i386/i386-common.cc | 2 ++
+ 2 files changed, 4 insertions(+)
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index 316ad3cb3..13d0f4cd8 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -508,6 +508,8 @@ get_intel_cpu (struct __processor_model *cpu_model,
+     case 0x97:
+     case 0x9a:
+       /* Alder Lake.  */
++    case 0xb7:
++      /* Raptor Lake.  */
+       cpu = "alderlake";
+       CHECK___builtin_cpu_is ("corei7");
+       CHECK___builtin_cpu_is ("alderlake");
+diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc
+index f650e255f..c1d700f89 100644
+--- a/gcc/common/config/i386/i386-common.cc
++++ b/gcc/common/config/i386/i386-common.cc
+@@ -1939,6 +1939,8 @@ const pta processor_alias_table[] =
+     M_CPU_SUBTYPE (INTEL_COREI7_SAPPHIRERAPIDS), P_PROC_AVX512F},
+   {"alderlake", PROCESSOR_ALDERLAKE, CPU_HASWELL, PTA_ALDERLAKE,
+     M_CPU_SUBTYPE (INTEL_COREI7_ALDERLAKE), P_PROC_AVX2},
++  {"raptorlake", PROCESSOR_ALDERLAKE, CPU_HASWELL, PTA_ALDERLAKE,
++    M_CPU_SUBTYPE (INTEL_COREI7_ALDERLAKE), P_PROC_AVX2},
+   {"bonnell", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
+     M_CPU_TYPE (INTEL_BONNELL), P_PROC_SSSE3},
+   {"atom", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
+-- 
+2.31.1
+
diff --git a/0271-Initial-Meteorlake-Support.patch b/0271-Initial-Meteorlake-Support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c2e825b4f0a7a1c95120ddb3a8b803a2dfbfe2f2
--- /dev/null
+++ b/0271-Initial-Meteorlake-Support.patch
@@ -0,0 +1,49 @@
+From 87cea29ede520f4a5af01dff7071ab1d23bd47b5 Mon Sep 17 00:00:00 2001
+From: "Hu, Lin1" 
+Date: Fri, 16 Sep 2022 11:25:13 +0800
+Subject: [PATCH 17/28] Initial Meteorlake Support
+
+gcc/ChangeLog:
+
+	* common/config/i386/cpuinfo.h:
+	(get_intel_cpu): Handle Meteorlake.
+	* common/config/i386/i386-common.cc:
+	(processor_alias_table): Add Meteorlake.
+
+(cherry picked from commit fd206f0e95fb6f41b96eaaaab1dc0c30378e5e08)
+---
+ gcc/common/config/i386/cpuinfo.h      | 4 ++++
+ gcc/common/config/i386/i386-common.cc | 2 ++
+ 2 files changed, 6 insertions(+)
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index 13d0f4cd8..37af92d6b 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -510,6 +510,10 @@ get_intel_cpu (struct __processor_model *cpu_model,
+       /* Alder Lake.  */
+     case 0xb7:
+       /* Raptor Lake.  */
++    case 0xb5:
++    case 0xaa:
++    case 0xac:
++      /* Meteor Lake.  */
+       cpu = "alderlake";
+       CHECK___builtin_cpu_is ("corei7");
+       CHECK___builtin_cpu_is ("alderlake");
+diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc
+index c1d700f89..cfee672fb 100644
+--- a/gcc/common/config/i386/i386-common.cc
++++ b/gcc/common/config/i386/i386-common.cc
+@@ -1941,6 +1941,8 @@ const pta processor_alias_table[] =
+     M_CPU_SUBTYPE (INTEL_COREI7_ALDERLAKE), P_PROC_AVX2},
+   {"raptorlake", PROCESSOR_ALDERLAKE, CPU_HASWELL, PTA_ALDERLAKE,
+     M_CPU_SUBTYPE (INTEL_COREI7_ALDERLAKE), P_PROC_AVX2},
++  {"meteorlake", PROCESSOR_ALDERLAKE, CPU_HASWELL, PTA_ALDERLAKE,
++    M_CPU_SUBTYPE (INTEL_COREI7_ALDERLAKE), P_PROC_AVX2},
+   {"bonnell", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
+     M_CPU_TYPE (INTEL_BONNELL), P_PROC_SSSE3},
+   {"atom", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
+-- 
+2.31.1
+
diff --git a/0272-Support-Intel-AMX-FP16-ISA.patch b/0272-Support-Intel-AMX-FP16-ISA.patch
new file mode 100644
index 0000000000000000000000000000000000000000..67eda4d955a252c35efc8e024351082d7c005584
--- /dev/null
+++ b/0272-Support-Intel-AMX-FP16-ISA.patch
@@ -0,0 +1,691 @@
+From c11301c7780213ddf46a0bcdb06079af485f431c Mon Sep 17 00:00:00 2001
+From: Hongyu Wang 
+Date: Fri, 4 Nov 2022 15:50:55 +0800
+Subject: [PATCH 18/28] Support Intel AMX-FP16 ISA
+
+gcc/ChangeLog:
+
+	* common/config/i386/cpuinfo.h (get_available_features): Detect
+	amx-fp16.
+	* common/config/i386/i386-common.cc (OPTION_MASK_ISA2_AMX_FP16_SET,
+	OPTION_MASK_ISA2_AMX_FP16_UNSET): New macros.
+	(ix86_handle_option): Handle -mamx-fp16.
+	* common/config/i386/i386-cpuinfo.h (enum processor_features):
+	Add FEATURE_AMX_FP16.
+	* common/config/i386/i386-isas.h: Add ISA_NAME_TABLE_ENTRY for
+	amx-fp16.
+	* config.gcc: Add amxfp16intrin.h.
+	* config/i386/cpuid.h (bit_AMX_FP16): New.
+	* config/i386/i386-c.cc (ix86_target_macros_internal): Define
+	__AMX_FP16__.
+	* config/i386/i386-isa.def: Add DEF_PTA for AMX_FP16.
+	* config/i386/i386-options.cc (isa2_opts): Add -mamx-fp16.
+	(ix86_valid_target_attribute_inner_p): Add new ATTR.
+	(ix86_option_override_internal): Handle AMX-FP16.
+	* config/i386/i386.opt: Add -mamx-fp16.
+	* config/i386/immintrin.h: Include amxfp16intrin.h.
+	* doc/extend.texi: Document -mamx-fp16.
+	* doc/invoke.texi: Document amx-fp16.
+	* doc/sourcebuild.texi: Document amx_fp16.
+	* config/i386/amxfp16intrin.h: New file.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.dg/other/i386-2.C: Add -mamx-fp16.
+	* g++.dg/other/i386-3.C: Ditto.
+	* gcc.target/i386/sse-12.c: Ditto.
+	* gcc.target/i386/sse-13.c: Ditto.
+	* gcc.target/i386/sse-14.c: Ditto.
+	* gcc.target/i386/sse-22.c: Ditto.
+	* gcc.target/i386/sse-23.c: Ditto.
+	* lib/target-supports.exp: (check_effective_target_amx_fp16):
+	New proc.
+	* gcc.target/i386/funcspec-56.inc: Add new target attribute.
+	* gcc.target/i386/amx-check.h: Add AMX_FP16.
+	* gcc.target/i386/amx-helper.h: New file to support amx-fp16.
+	* gcc.target/i386/amxfp16-asmatt-1.c: New test.
+	* gcc.target/i386/amxfp16-asmintel-1.c: Ditto.
+	* gcc.target/i386/amxfp16-dpfp16ps-2.c: Ditto.
+
+Co-authored-by: Haochen Jiang 
+
+(cherry picked from commit 2b4a03962a0fe18cadc944d90f1fb85a40004226)
+---
+ gcc/common/config/i386/cpuinfo.h              |  5 ++
+ gcc/common/config/i386/i386-common.cc         | 15 +++++
+ gcc/common/config/i386/i386-cpuinfo.h         |  1 +
+ gcc/common/config/i386/i386-isas.h            |  1 +
+ gcc/config.gcc                                |  3 +-
+ gcc/config/i386/amxfp16intrin.h               | 46 ++++++++++++++
+ gcc/config/i386/cpuid.h                       |  1 +
+ gcc/config/i386/i386-c.cc                     |  2 +
+ gcc/config/i386/i386-isa.def                  |  1 +
+ gcc/config/i386/i386-options.cc               |  4 +-
+ gcc/config/i386/i386.opt                      |  4 ++
+ gcc/config/i386/immintrin.h                   |  2 +
+ gcc/doc/extend.texi                           |  5 ++
+ gcc/doc/invoke.texi                           |  9 ++-
+ gcc/doc/sourcebuild.texi                      |  3 +
+ gcc/testsuite/g++.dg/other/i386-2.C           |  2 +-
+ gcc/testsuite/g++.dg/other/i386-3.C           |  2 +-
+ gcc/testsuite/gcc.target/i386/amx-check.h     |  3 +
+ gcc/testsuite/gcc.target/i386/amx-helper.h    | 61 +++++++++++++++++++
+ .../gcc.target/i386/amxfp16-asmatt-1.c        | 13 ++++
+ .../gcc.target/i386/amxfp16-asmintel-1.c      | 10 +++
+ .../gcc.target/i386/amxfp16-dpfp16ps-2.c      | 57 +++++++++++++++++
+ gcc/testsuite/gcc.target/i386/funcspec-56.inc |  2 +
+ gcc/testsuite/gcc.target/i386/sse-12.c        |  2 +-
+ gcc/testsuite/gcc.target/i386/sse-13.c        |  2 +-
+ gcc/testsuite/gcc.target/i386/sse-14.c        |  2 +-
+ gcc/testsuite/gcc.target/i386/sse-22.c        |  4 +-
+ gcc/testsuite/gcc.target/i386/sse-23.c        |  2 +-
+ gcc/testsuite/lib/target-supports.exp         | 11 ++++
+ 29 files changed, 262 insertions(+), 13 deletions(-)
+ create mode 100644 gcc/config/i386/amxfp16intrin.h
+ create mode 100644 gcc/testsuite/gcc.target/i386/amx-helper.h
+ create mode 100644 gcc/testsuite/gcc.target/i386/amxfp16-asmatt-1.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/amxfp16-asmintel-1.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/amxfp16-dpfp16ps-2.c
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index 37af92d6b..5951a30aa 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -783,6 +783,11 @@ get_available_features (struct __processor_model *cpu_model,
+ 		set_feature (FEATURE_AVX512BF16);
+ 	    }
+ 	}
++      if (amx_usable)
++	{
++	  if (eax & bit_AMX_FP16)
++	    set_feature (FEATURE_AMX_FP16);
++	}
+     }
+ 
+   /* Get Advanced Features at level 0xd (eax = 0xd, ecx = 1). */
+diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc
+index cfee672fb..922db33ee 100644
+--- a/gcc/common/config/i386/i386-common.cc
++++ b/gcc/common/config/i386/i386-common.cc
+@@ -107,6 +107,7 @@ along with GCC; see the file COPYING3.  If not see
+ #define OPTION_MASK_ISA2_AMX_TILE_SET OPTION_MASK_ISA2_AMX_TILE
+ #define OPTION_MASK_ISA2_AMX_INT8_SET OPTION_MASK_ISA2_AMX_INT8
+ #define OPTION_MASK_ISA2_AMX_BF16_SET OPTION_MASK_ISA2_AMX_BF16
++#define OPTION_MASK_ISA2_AMX_FP16_SET OPTION_MASK_ISA2_AMX_FP16
+ 
+ /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
+    as -msse4.2.  */
+@@ -275,6 +276,7 @@ along with GCC; see the file COPYING3.  If not see
+ #define OPTION_MASK_ISA2_KL_UNSET \
+   (OPTION_MASK_ISA2_KL | OPTION_MASK_ISA2_WIDEKL_UNSET)
+ #define OPTION_MASK_ISA2_WIDEKL_UNSET OPTION_MASK_ISA2_WIDEKL
++#define OPTION_MASK_ISA2_AMX_FP16_UNSET OPTION_MASK_ISA2_AMX_FP16
+ 
+ /* SSE4 includes both SSE4.1 and SSE4.2.  -mno-sse4 should the same
+    as -mno-sse4.1. */
+@@ -1125,6 +1127,19 @@ ix86_handle_option (struct gcc_options *opts,
+ 	}
+       return true;
+ 
++    case OPT_mamx_fp16:
++      if (value)
++	{
++	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_AMX_FP16_SET;
++	  opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AMX_FP16_SET;
++	}
++      else
++	{
++	  opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_AMX_FP16_UNSET;
++	  opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AMX_FP16_UNSET;
++	}
++      return true;
++
+     case OPT_mfma:
+       if (value)
+ 	{
+diff --git a/gcc/common/config/i386/i386-cpuinfo.h b/gcc/common/config/i386/i386-cpuinfo.h
+index 82996ebb3..8f22897de 100644
+--- a/gcc/common/config/i386/i386-cpuinfo.h
++++ b/gcc/common/config/i386/i386-cpuinfo.h
+@@ -240,6 +240,7 @@ enum processor_features
+   FEATURE_X86_64_V2,
+   FEATURE_X86_64_V3,
+   FEATURE_X86_64_V4,
++  FEATURE_AMX_FP16,
+   CPU_FEATURE_MAX
+ };
+ 
+diff --git a/gcc/common/config/i386/i386-isas.h b/gcc/common/config/i386/i386-isas.h
+index 2d0646a68..95bab6da2 100644
+--- a/gcc/common/config/i386/i386-isas.h
++++ b/gcc/common/config/i386/i386-isas.h
+@@ -175,4 +175,5 @@ ISA_NAMES_TABLE_START
+   ISA_NAMES_TABLE_ENTRY("x86-64-v2", FEATURE_X86_64_V2, P_X86_64_V2, NULL)
+   ISA_NAMES_TABLE_ENTRY("x86-64-v3", FEATURE_X86_64_V3, P_X86_64_V3, NULL)
+   ISA_NAMES_TABLE_ENTRY("x86-64-v4", FEATURE_X86_64_V4, P_X86_64_V4, NULL)
++  ISA_NAMES_TABLE_ENTRY("amx-fp16", FEATURE_AMX_FP16, P_NONE, "-mamx-fp16")
+ ISA_NAMES_TABLE_END
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 4a0ae9328..e2b4a23dc 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -423,7 +423,8 @@ i[34567]86-*-* | x86_64-*-*)
+ 		       tsxldtrkintrin.h amxtileintrin.h amxint8intrin.h
+ 		       amxbf16intrin.h x86gprintrin.h uintrintrin.h
+ 		       hresetintrin.h keylockerintrin.h avxvnniintrin.h
+-		       mwaitintrin.h avx512fp16intrin.h avx512fp16vlintrin.h"
++		       mwaitintrin.h avx512fp16intrin.h avx512fp16vlintrin.h
++		       amxfp16intrin.h"
+ 	;;
+ ia64-*-*)
+ 	extra_headers=ia64intrin.h
+diff --git a/gcc/config/i386/amxfp16intrin.h b/gcc/config/i386/amxfp16intrin.h
+new file mode 100644
+index 000000000..6a114741a
+--- /dev/null
++++ b/gcc/config/i386/amxfp16intrin.h
+@@ -0,0 +1,46 @@
++/* Copyright (C) 2020 Free Software Foundation, Inc.
++
++   This file is part of GCC.
++
++   GCC is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3, or (at your option)
++   any later version.
++
++   GCC is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   Under Section 7 of GPL version 3, you are granted additional
++   permissions described in the GCC Runtime Library Exception, version
++   3.1, as published by the Free Software Foundation.
++
++   You should have received a copy of the GNU General Public License and
++   a copy of the GCC Runtime Library Exception along with this program;
++   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
++   .  */
++
++#if !defined _IMMINTRIN_H_INCLUDED
++#error "Never use  directly; include  instead."
++#endif
++
++#ifndef _AMXFP16INTRIN_H_INCLUDED
++#define _AMXFP16INTRIN_H_INCLUDED
++
++#if defined(__x86_64__)
++#define _tile_dpfp16ps_internal(dst,src1,src2)			\
++  __asm__ volatile \
++  ("{tdpfp16ps\t%%tmm"#src2", %%tmm"#src1", %%tmm"#dst"|tdpfp16ps\t%%tmm"#dst", %%tmm"#src1", %%tmm"#src2"}" ::)
++
++#define _tile_dpfp16ps(dst,src1,src2)				\
++  _tile_dpfp16ps_internal (dst,src1,src2)
++
++#endif
++
++#ifdef __DISABLE_AMX_FP16__
++#undef __DISABLE_AMX_FP16__
++#pragma GCC pop_options
++#endif /* __DISABLE_AMX_FP16__ */
++
++#endif /* _AMXFP16INTRIN_H_INCLUDED */
+diff --git a/gcc/config/i386/cpuid.h b/gcc/config/i386/cpuid.h
+index 8b3dc2b1d..d6cd8d1bf 100644
+--- a/gcc/config/i386/cpuid.h
++++ b/gcc/config/i386/cpuid.h
+@@ -27,6 +27,7 @@
+ /* %eax */
+ #define bit_AVXVNNI	(1 << 4)
+ #define bit_AVX512BF16	(1 << 5)
++#define bit_AMX_FP16	(1 << 21)
+ #define bit_HRESET	(1 << 22)
+ 
+ /* %ecx */
+diff --git a/gcc/config/i386/i386-c.cc b/gcc/config/i386/i386-c.cc
+index 3fec4c7e2..4269f29e6 100644
+--- a/gcc/config/i386/i386-c.cc
++++ b/gcc/config/i386/i386-c.cc
+@@ -633,6 +633,8 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
+     def_or_undef (parse_in, "__WIDEKL__");
+   if (isa_flag2 & OPTION_MASK_ISA2_AVXVNNI)
+     def_or_undef (parse_in, "__AVXVNNI__");
++  if (isa_flag2 & OPTION_MASK_ISA2_AMX_FP16)
++    def_or_undef (parse_in, "__AMX_FP16__");
+   if (TARGET_IAMCU)
+     {
+       def_or_undef (parse_in, "__iamcu");
+diff --git a/gcc/config/i386/i386-isa.def b/gcc/config/i386/i386-isa.def
+index 83659d0be..c7305c01b 100644
+--- a/gcc/config/i386/i386-isa.def
++++ b/gcc/config/i386/i386-isa.def
+@@ -109,3 +109,4 @@ DEF_PTA(KL)
+ DEF_PTA(WIDEKL)
+ DEF_PTA(AVXVNNI)
+ DEF_PTA(AVX512FP16)
++DEF_PTA(AMX_FP16)
+diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
+index 3df1f0c41..3edb7094e 100644
+--- a/gcc/config/i386/i386-options.cc
++++ b/gcc/config/i386/i386-options.cc
+@@ -230,7 +230,8 @@ static struct ix86_target_opts isa2_opts[] =
+   { "-mkl",		OPTION_MASK_ISA2_KL },
+   { "-mwidekl", 	OPTION_MASK_ISA2_WIDEKL },
+   { "-mavxvnni",	OPTION_MASK_ISA2_AVXVNNI },
+-  { "-mavx512fp16",	OPTION_MASK_ISA2_AVX512FP16 }
++  { "-mavx512fp16",	OPTION_MASK_ISA2_AVX512FP16 },
++  { "-mamx-fp16",       OPTION_MASK_ISA2_AMX_FP16 }
+ };
+ static struct ix86_target_opts isa_opts[] =
+ {
+@@ -1074,6 +1075,7 @@ ix86_valid_target_attribute_inner_p (tree fndecl, tree args, char *p_strings[],
+     IX86_ATTR_ISA ("hreset", OPT_mhreset),
+     IX86_ATTR_ISA ("avxvnni",   OPT_mavxvnni),
+     IX86_ATTR_ISA ("avx512fp16", OPT_mavx512fp16),
++    IX86_ATTR_ISA ("amx-fp16", OPT_mamx_fp16),
+ 
+     /* enum options */
+     IX86_ATTR_ENUM ("fpmath=",	OPT_mfpmath_),
+diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
+index b154110d8..52c6f02ee 100644
+--- a/gcc/config/i386/i386.opt
++++ b/gcc/config/i386/i386.opt
+@@ -1226,3 +1226,7 @@ Enable conservative small loop unrolling.
+ mscatter
+ Target Alias(mtune-ctrl=, use_scatter, ^use_scatter)
+ Enable vectorization for scatter instruction.
++
++mamx-fp16
++Target Mask(ISA2_AMX_FP16) Var(ix86_isa_flags2) Save
++Support AMX-FP16 built-in functions and code generation.
+diff --git a/gcc/config/i386/immintrin.h b/gcc/config/i386/immintrin.h
+index 6afd78c2b..0447ca4b2 100644
+--- a/gcc/config/i386/immintrin.h
++++ b/gcc/config/i386/immintrin.h
+@@ -128,4 +128,6 @@
+ 
+ #include 
+ 
++#include 
++
+ #endif /* _IMMINTRIN_H_INCLUDED */
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index 33a776a79..4ba9d34cd 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -7038,6 +7038,11 @@ Enable/disable the generation of the WIDEKL instructions.
+ @cindex @code{target("avxvnni")} function attribute, x86
+ Enable/disable the generation of the AVXVNNI instructions.
+ 
++@item amx-fp16
++@itemx no-amx-fp16
++@cindex @code{target("amx-fp16")} function attribute, x86
++Enable/disable the generation of the AMX-FP16 instructions.
++
+ @item cld
+ @itemx no-cld
+ @cindex @code{target("cld")} function attribute, x86
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 3a48655e5..d25f13217 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -1428,7 +1428,7 @@ See RS/6000 and PowerPC Options.
+ -mavx5124fmaps  -mavx512vnni  -mavx5124vnniw  -mprfchw  -mrdpid @gol
+ -mrdseed  -msgx -mavx512vp2intersect -mserialize -mtsxldtrk@gol
+ -mamx-tile  -mamx-int8  -mamx-bf16 -muintr -mhreset -mavxvnni@gol
+--mavx512fp16 @gol
++-mavx512fp16 -mamx-fp16 @gol
+ -mcldemote  -mms-bitfields  -mno-align-stringops  -minline-all-stringops @gol
+ -minline-stringops-dynamically  -mstringop-strategy=@var{alg} @gol
+ -mkl -mwidekl @gol
+@@ -32442,6 +32442,9 @@ preferred alignment to @option{-mpreferred-stack-boundary=2}.
+ @need 200
+ @itemx -mwidekl
+ @opindex mwidekl
++@need 200
++@itemx -mamx-fp16
++@opindex mamx-fp16
+ These switches enable the use of instructions in the MMX, SSE,
+ SSE2, SSE3, SSSE3, SSE4, SSE4A, SSE4.1, SSE4.2, AVX, AVX2, AVX512F, AVX512PF,
+ AVX512ER, AVX512CD, AVX512VL, AVX512BW, AVX512DQ, AVX512IFMA, AVX512VBMI, SHA,
+@@ -32451,8 +32454,8 @@ WBNOINVD, FMA4, PREFETCHW, RDPID, PREFETCHWT1, RDSEED, SGX, XOP, LWP,
+ XSAVEOPT, XSAVEC, XSAVES, RTM, HLE, TBM, MWAITX, CLZERO, PKU, AVX512VBMI2,
+ GFNI, VAES, WAITPKG, VPCLMULQDQ, AVX512BITALG, MOVDIRI, MOVDIR64B, AVX512BF16,
+ ENQCMD, AVX512VPOPCNTDQ, AVX5124FMAPS, AVX512VNNI, AVX5124VNNIW, SERIALIZE,
+-UINTR, HRESET, AMXTILE, AMXINT8, AMXBF16, KL, WIDEKL, AVXVNNI, AVX512-FP16
+-or CLDEMOTE extended instruction sets. Each has a corresponding
++UINTR, HRESET, AMXTILE, AMXINT8, AMXBF16, KL, WIDEKL, AVXVNNI, AVX512-FP16,
++AMX-FP16 or CLDEMOTE extended instruction sets. Each has a corresponding
+ @option{-mno-} option to disable use of these instructions.
+ 
+ These extensions are also available as built-in functions: see
+diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
+index 71c04841d..b64b62dee 100644
+--- a/gcc/doc/sourcebuild.texi
++++ b/gcc/doc/sourcebuild.texi
+@@ -2472,6 +2472,9 @@ Target supports the execution of @code{amx-int8} instructions.
+ @item amx_bf16
+ Target supports the execution of @code{amx-bf16} instructions.
+ 
++@item amx_fp16
++Target supports the execution of @code{amx-fp16} instructions.
++
+ @item cell_hw
+ Test system can execute AltiVec and Cell PPU instructions.
+ 
+diff --git a/gcc/testsuite/g++.dg/other/i386-2.C b/gcc/testsuite/g++.dg/other/i386-2.C
+index fba3d1ac6..57a6357aa 100644
+--- a/gcc/testsuite/g++.dg/other/i386-2.C
++++ b/gcc/testsuite/g++.dg/other/i386-2.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target i?86-*-* x86_64-*-* } } */
+-/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt  -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16" } */
++/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt  -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16" } */
+ 
+ /* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h,
+    xopintrin.h, abmintrin.h, bmiintrin.h, tbmintrin.h, lwpintrin.h,
+diff --git a/gcc/testsuite/g++.dg/other/i386-3.C b/gcc/testsuite/g++.dg/other/i386-3.C
+index 5cc0fa834..1947547d6 100644
+--- a/gcc/testsuite/g++.dg/other/i386-3.C
++++ b/gcc/testsuite/g++.dg/other/i386-3.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target i?86-*-* x86_64-*-* } } */
+-/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16" } */
++/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16" } */
+ 
+ /* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h,
+    xopintrin.h, abmintrin.h, bmiintrin.h, tbmintrin.h, lwpintrin.h,
+diff --git a/gcc/testsuite/gcc.target/i386/amx-check.h b/gcc/testsuite/gcc.target/i386/amx-check.h
+index 6fff5ff46..27dd37bf9 100644
+--- a/gcc/testsuite/gcc.target/i386/amx-check.h
++++ b/gcc/testsuite/gcc.target/i386/amx-check.h
+@@ -213,6 +213,9 @@ main ()
+ #ifdef AMX_BF16
+       && __builtin_cpu_supports ("amx-bf16")
+ #endif
++#ifdef AMX_FP16
++      && __builtin_cpu_supports ("amx-fp16")
++#endif
+ #ifdef __linux__
+       && request_perm_xtile_data ()
+ #endif
+diff --git a/gcc/testsuite/gcc.target/i386/amx-helper.h b/gcc/testsuite/gcc.target/i386/amx-helper.h
+new file mode 100644
+index 000000000..fe24d7067
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/amx-helper.h
+@@ -0,0 +1,61 @@
++#ifndef AMX_HELPER_H_INCLUDED
++#define AMX_HELPER_H_INCLUDED
++#if defined(AMX_FP16)
++#include 
++#include 
++#endif
++#include "amx-check.h"
++
++typedef union
++{
++  _Float16 f16;
++  uint16_t u;
++} union16f_uw;
++
++#if defined(AMX_FP16)
++/* Transformation functions between fp16/float */
++static uint16_t make_f32_fp16 (float f)
++{
++  union16f_uw tmp;
++  __m128 b = _mm_set_ss (f);
++  __m128h a;
++  tmp.f16 = _mm_cvtsh_h (_mm_cvtss_sh (a, b));
++  return tmp.u;
++}
++
++static float make_fp16_f32 (uint16_t fp)
++{
++  union16f_uw tmp;
++  tmp.u = fp;
++  __m128h b = _mm_set_sh (tmp.f16);
++  __m128 a;
++  return _mm_cvtss_f32 (_mm_cvtsh_ss (a, b));
++}
++
++/* Init tile buffer with fp16 pairs */
++void init_fp16_max_tile_buffer (uint8_t* buf)
++{
++  int i, j;
++  uint16_t* ptr = (uint16_t *) buf;
++
++  for (i = 0; i < 16; i++)
++    for (j = 0; j < 32; j++)
++    {
++      float f = 2.5f * i + 1.25f * j;
++      ptr[i * 32 + j] = make_f32_fp16 (f);
++    }
++}
++
++/* Init tile fp16 pair buffer with zero */
++void init_fp16_max_tile_zero_buffer (uint8_t* buf)
++{
++  int i, j;
++  uint16_t* ptr = (uint16_t *) buf;
++
++  for (i = 0; i < 16; i++)
++    for (j = 0; j < 32; j++)
++      ptr[i * 32 + j] = make_f32_fp16 (0.0f);
++}
++#endif
++
++#endif
+diff --git a/gcc/testsuite/gcc.target/i386/amxfp16-asmatt-1.c b/gcc/testsuite/gcc.target/i386/amxfp16-asmatt-1.c
+new file mode 100644
+index 000000000..09ae6d408
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/amxfp16-asmatt-1.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile { target { ! ia32 } } } */
++/* { dg-options "-O2 -mamx-fp16" } */
++/* { dg-final { scan-assembler "tdpfp16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1"  } } */
++#include 
++
++#define TMM1 1
++#define TMM2 2
++#define TMM3 3
++
++void TEST ()
++{
++  _tile_dpfp16ps (TMM1, TMM2, TMM3);
++}
+diff --git a/gcc/testsuite/gcc.target/i386/amxfp16-asmintel-1.c b/gcc/testsuite/gcc.target/i386/amxfp16-asmintel-1.c
+new file mode 100644
+index 000000000..a8dff945f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/amxfp16-asmintel-1.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile { target { ! ia32 } } } */
++/* { dg-require-effective-target masm_intel } */
++/* { dg-options "-O2 -mamx-fp16 -masm=intel" } */
++/* { dg-final { scan-assembler "tdpfp16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3"  } } */
++#include 
++
++void TEST ()
++{
++  _tile_dpfp16ps (1, 2, 3);
++}
+diff --git a/gcc/testsuite/gcc.target/i386/amxfp16-dpfp16ps-2.c b/gcc/testsuite/gcc.target/i386/amxfp16-dpfp16ps-2.c
+new file mode 100644
+index 000000000..2d359a689
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/amxfp16-dpfp16ps-2.c
+@@ -0,0 +1,57 @@
++/* { dg-do run { target { ! ia32 } } } */
++/* { dg-require-effective-target amx_tile } */
++/* { dg-require-effective-target amx_fp16 } */
++/* { dg-require-effective-target avx512fp16 } */
++/* { dg-options "-O2 -mamx-tile -mamx-fp16 -mavx512fp16" } */
++#define AMX_FP16
++#define DO_TEST test_amx_fp16_dpfp16ps
++void test_amx_fp16_dpfp16ps ();
++#include "amx-helper.h"
++
++void calc_matrix_dpfp16ps (__tile *dst, __tile *src1, __tile *src2)
++{
++  uint16_t *src1_buf = (uint16_t *)src1->buf;
++  uint16_t *src2_buf = (uint16_t *)src2->buf;
++  float *dst_buf = (float *)dst->buf;
++  
++  int M = src1->rows;
++  int N = src1->colsb / 4;
++  int K = src2->colsb / 4;
++  int i, j, k, t;
++
++  for (i = 0; i < M; i++)
++    for (j = 0; j < N; j++)
++      for (k = 0; k < K; k++)
++	for (t = 0; t < 2; t+=2)
++	  {    
++	    dst_buf[i * K + k] += 
++	      (make_fp16_f32 (src1_buf[i * 2 * N + 2 * j + t]) *
++	      make_fp16_f32 (src2_buf[j * 2 * K + 2 * k + t])) +
++	      (make_fp16_f32 (src1_buf[i * 2 * N + 2 * j + t + 1]) *
++	      make_fp16_f32 (src2_buf[j * 2 * K + 2 * k + t + 1]));
++	  }
++
++}
++
++void test_amx_fp16_dpfp16ps ()
++{
++  __tilecfg_u cfg;
++  __tile dst, dst_ref, src1, src2;
++  uint8_t tmp_dst_buf[1024], tmp_dst_zero_buf[1024];
++
++  init_fp16_max_tile_buffer (tmp_dst_buf);
++  init_fp16_max_tile_zero_buffer (tmp_dst_zero_buf);
++
++  init_tile_config (&cfg);
++  init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_zero_buf);
++  init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf);
++  init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf);
++
++  calc_matrix_dpfp16ps (&dst, &src1, &src2);
++  
++  _tile_dpfp16ps (1, 2, 3);
++  _tile_stored (1, dst_ref.buf, _STRIDE);
++
++  if (!check_float_tile_register (&dst_ref, &dst))
++    abort ();
++}
+diff --git a/gcc/testsuite/gcc.target/i386/funcspec-56.inc b/gcc/testsuite/gcc.target/i386/funcspec-56.inc
+index f34e7a977..b00cfff03 100644
+--- a/gcc/testsuite/gcc.target/i386/funcspec-56.inc
++++ b/gcc/testsuite/gcc.target/i386/funcspec-56.inc
+@@ -80,6 +80,7 @@ extern void test_keylocker (void)		__attribute__((__target__("kl")));
+ extern void test_widekl (void)			__attribute__((__target__("widekl")));
+ extern void test_avxvnni (void)			__attribute__((__target__("avxvnni")));
+ extern void test_avx512fp16 (void)		__attribute__((__target__("avx512fp16")));
++extern void test_amx_fp16 (void)		__attribute__((__target__("amx-fp16")));
+ 
+ extern void test_no_sgx (void)			__attribute__((__target__("no-sgx")));
+ extern void test_no_avx5124fmaps(void)		__attribute__((__target__("no-avx5124fmaps")));
+@@ -161,6 +162,7 @@ extern void test_no_keylocker (void)		__attribute__((__target__("no-kl")));
+ extern void test_no_widekl (void)		__attribute__((__target__("no-widekl")));
+ extern void test_no_avxvnni (void)		__attribute__((__target__("no-avxvnni")));
+ extern void test_no_avx512fp16 (void)		__attribute__((__target__("no-avx512fp16")));
++extern void test_no_amx_fp16 (void)		__attribute__((__target__("no-amx-fp16")));
+ 
+ extern void test_arch_nocona (void)		__attribute__((__target__("arch=nocona")));
+ extern void test_arch_core2 (void)		__attribute__((__target__("arch=core2")));
+diff --git a/gcc/testsuite/gcc.target/i386/sse-12.c b/gcc/testsuite/gcc.target/i386/sse-12.c
+index 375d4d1b4..9ab4a7e0c 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-12.c
++++ b/gcc/testsuite/gcc.target/i386/sse-12.c
+@@ -3,7 +3,7 @@
+    popcntintrin.h gfniintrin.h and mm_malloc.h are usable
+    with -O -std=c89 -pedantic-errors.  */
+ /* { dg-do compile } */
+-/* { dg-options "-O -std=c89 -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512bw -mavx512dq -mavx512vl -mavx512vbmi -mavx512vbmi2 -mavx512ifma -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni" } */
++/* { dg-options "-O -std=c89 -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512bw -mavx512dq -mavx512vl -mavx512vbmi -mavx512vbmi2 -mavx512ifma -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mamx-fp16" } */
+ 
+ #include 
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c
+index e285c307d..a1e453a98 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-13.c
++++ b/gcc/testsuite/gcc.target/i386/sse-13.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512vl -mavx512dq -mavx512bw -mavx512vbmi -mavx512vbmi2 -mavx512ifma -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mavx512vp2intersect -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16" } */
++/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512vl -mavx512dq -mavx512bw -mavx512vbmi -mavx512vbmi2 -mavx512ifma -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mavx512vp2intersect -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16" } */
+ /* { dg-add-options bind_pic_locally } */
+ 
+ #include 
+diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c
+index f41493b93..eaa1a8d81 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-14.c
++++ b/gcc/testsuite/gcc.target/i386/sse-14.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -mavx512vl -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16" } */
++/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -mavx512vl -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16" } */
+ /* { dg-add-options bind_pic_locally } */
+ 
+ #include 
+diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c
+index 31492ef36..19afe639d 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-22.c
++++ b/gcc/testsuite/gcc.target/i386/sse-22.c
+@@ -103,7 +103,7 @@
+ 
+ 
+ #ifndef DIFFERENT_PRAGMAS
+-#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,avx512f,avx512er,avx512cd,avx512pf,sha,prefetchwt1,avx512vl,avx512bw,avx512dq,avx512vbmi,avx512vbmi2,avx512ifma,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,gfni,avx512bitalg,avx512bf16,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16")
++#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,avx512f,avx512er,avx512cd,avx512pf,sha,prefetchwt1,avx512vl,avx512bw,avx512dq,avx512vbmi,avx512vbmi2,avx512ifma,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,gfni,avx512bitalg,avx512bf16,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16,amx-fp16")
+ #endif
+ 
+ /* Following intrinsics require immediate arguments.  They
+@@ -220,7 +220,7 @@ test_4 (_mm_cmpestrz, int, __m128i, int, __m128i, int, 1)
+ 
+ /* immintrin.h (AVX/AVX2/RDRND/FSGSBASE/F16C/RTM/AVX512F/SHA) */
+ #ifdef DIFFERENT_PRAGMAS
+-#pragma GCC target ("avx,avx2,rdrnd,fsgsbase,f16c,rtm,avx512f,avx512er,avx512cd,avx512pf,sha,avx512vl,avx512bw,avx512dq,avx512ifma,avx512vbmi,avx512vbmi2,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,gfni,avx512bitalg,avx512bf16,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16")
++#pragma GCC target ("avx,avx2,rdrnd,fsgsbase,f16c,rtm,avx512f,avx512er,avx512cd,avx512pf,sha,avx512vl,avx512bw,avx512dq,avx512ifma,avx512vbmi,avx512vbmi2,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,gfni,avx512bitalg,avx512bf16,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16,amx-fp16")
+ #endif
+ #include 
+ test_1 (_cvtss_sh, unsigned short, float, 1)
+diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c
+index b398fd144..151201d97 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-23.c
++++ b/gcc/testsuite/gcc.target/i386/sse-23.c
+@@ -843,6 +843,6 @@
+ #define __builtin_ia32_vpclmulqdq_v2di(A, B, C)  __builtin_ia32_vpclmulqdq_v2di(A, B, 1) 
+ #define __builtin_ia32_vpclmulqdq_v8di(A, B, C)  __builtin_ia32_vpclmulqdq_v8di(A, B, 1) 
+ 
+-#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,avx512f,avx512er,avx512cd,avx512pf,sha,prefetchwt1,xsavec,xsaves,clflushopt,avx512bw,avx512dq,avx512vl,avx512vbmi,avx512ifma,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,avx512vbmi2,vpclmulqdq,avx512bitalg,pconfig,wbnoinvd,avx512bf16,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16")
++#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,avx512f,avx512er,avx512cd,avx512pf,sha,prefetchwt1,xsavec,xsaves,clflushopt,avx512bw,avx512dq,avx512vl,avx512vbmi,avx512ifma,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,avx512vbmi2,vpclmulqdq,avx512bitalg,pconfig,wbnoinvd,avx512bf16,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16,amx-fp16")
+ 
+ #include 
+diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
+index c858bd93b..0d83c780c 100644
+--- a/gcc/testsuite/lib/target-supports.exp
++++ b/gcc/testsuite/lib/target-supports.exp
+@@ -9972,6 +9972,17 @@ proc check_effective_target_amx_bf16 { } {
+     } "-mamx-bf16" ]
+ }
+ 
++# Return 1 if amx-fp16 instructions can be compiled.
++proc check_effective_target_amx_fp16 { } {
++    return [check_no_compiler_messages amx_fp16 object {
++	void
++	foo ()
++	{
++	    __asm__ volatile ("tdpfp16ps\t%%tmm1, %%tmm2, %%tmm3" ::);
++	}
++    } "-mamx-fp16" ]
++}
++
+ # Return 1 if vpclmulqdq instructions can be compiled.
+ proc check_effective_target_vpclmulqdq { } {
+     return [check_no_compiler_messages vpclmulqdq object {
+-- 
+2.31.1
+
diff --git a/0273-Support-Intel-prefetchit0-t1.patch b/0273-Support-Intel-prefetchit0-t1.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a6c777f5159518432f6dae2f5ec52f9173865edc
--- /dev/null
+++ b/0273-Support-Intel-prefetchit0-t1.patch
@@ -0,0 +1,902 @@
+From 42a38c8abaa28f67e26b9af3f434fe0107894e7d Mon Sep 17 00:00:00 2001
+From: Haochen Jiang 
+Date: Fri, 4 Nov 2022 15:01:05 +0800
+Subject: [PATCH 19/28] Support Intel prefetchit0/t1
+
+gcc/ChangeLog:
+
+	* common/config/i386/cpuinfo.h (get_available_features):
+	Detect PREFETCHI.
+	* common/config/i386/i386-common.cc
+	(OPTION_MASK_ISA2_PREFETCHI_SET,
+	OPTION_MASK_ISA2_PREFETCHI_UNSET): New.
+	(ix86_handle_option): Handle -mprefetchi.
+	* common/config/i386/i386-cpuinfo.h
+	(enum processor_features): Add FEATURE_PREFETCHI.
+	* common/config/i386/i386-isas.h: Add ISA_NAME_TABLE_ENTRY
+	for prefetchi.
+	* config.gcc: Add prfchiintrin.h.
+	* config/i386/cpuid.h (bit_PREFETCHI): New.
+	* config/i386/i386-builtin-types.def:
+	Add DEF_FUNCTION_TYPE (VOID, PCVOID, INT)
+	and DEF_FUNCTION_TYPE (VOID, PCVOID, INT, INT, INT).
+	* config/i386/i386-builtin.def (BDESC): Add new builtins.
+	* config/i386/i386-c.cc (ix86_target_macros_internal):
+	Define __PREFETCHI__.
+	* config/i386/i386-expand.cc: Handle new builtins.
+	* config/i386/i386-isa.def (PREFETCHI):
+	Add DEF_PTA(PREFETCHI).
+	* config/i386/i386-options.cc
+	(ix86_valid_target_attribute_inner_p): Handle prefetchi.
+	* config/i386/i386.md (prefetchi): New define_insn.
+	* config/i386/i386.opt: Add option -mprefetchi.
+	* config/i386/predicates.md (local_func_symbolic_operand):
+	New predicates.
+	* config/i386/x86gprintrin.h: Include prfchiintrin.h.
+	* config/i386/xmmintrin.h (enum _mm_hint): New enum for
+	prefetchi.
+	(_mm_prefetch): Handle the highest bit of enum.
+	* doc/extend.texi: Document prefetchi.
+	* doc/invoke.texi: Document -mprefetchi.
+	* doc/sourcebuild.texi: Document target prefetchi.
+	* config/i386/prfchiintrin.h: New file.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.dg/other/i386-2.C: Add -mprefetchi.
+	* g++.dg/other/i386-3.C: Ditto.
+	* gcc.target/i386/avx-1.c: Ditto.
+	* gcc.target/i386/funcspec-56.inc: Add new target attribute.
+	* gcc.target/i386/sse-13.c: Add -mprefetchi.
+	* gcc.target/i386/sse-23.c: Ditto.
+	* gcc.target/i386/x86gprintrin-1.c: Ditto.
+	* gcc.target/i386/x86gprintrin-2.c: Ditto.
+	* gcc.target/i386/x86gprintrin-3.c: Ditto.
+	* gcc.target/i386/x86gprintrin-4.c: Ditto.
+	* gcc.target/i386/x86gprintrin-5.c: Ditto.
+	* gcc.target/i386/prefetchi-1.c: New test.
+	* gcc.target/i386/prefetchi-2.c: Ditto.
+	* gcc.target/i386/prefetchi-3.c: Ditto.
+	* gcc.target/i386/prefetchi-4.c: Ditto.
+
+Co-authored-by: Hongtao Liu 
+---
+ gcc/common/config/i386/cpuinfo.h              |  2 +
+ gcc/common/config/i386/i386-common.cc         | 15 ++++
+ gcc/common/config/i386/i386-cpuinfo.h         |  1 +
+ gcc/common/config/i386/i386-isas.h            |  1 +
+ gcc/config.gcc                                |  2 +-
+ gcc/config/i386/cpuid.h                       |  1 +
+ gcc/config/i386/i386-builtin-types.def        |  4 +
+ gcc/config/i386/i386-builtin.def              |  4 +
+ gcc/config/i386/i386-c.cc                     |  2 +
+ gcc/config/i386/i386-expand.cc                | 77 +++++++++++++++++++
+ gcc/config/i386/i386-isa.def                  |  1 +
+ gcc/config/i386/i386-options.cc               |  4 +-
+ gcc/config/i386/i386.md                       | 23 ++++++
+ gcc/config/i386/i386.opt                      |  4 +
+ gcc/config/i386/predicates.md                 | 15 ++++
+ gcc/config/i386/prfchiintrin.h                | 49 ++++++++++++
+ gcc/config/i386/x86gprintrin.h                |  2 +
+ gcc/config/i386/xmmintrin.h                   |  7 +-
+ gcc/doc/extend.texi                           |  5 ++
+ gcc/doc/invoke.texi                           |  7 +-
+ gcc/doc/sourcebuild.texi                      |  3 +
+ gcc/testsuite/g++.dg/other/i386-2.C           |  2 +-
+ gcc/testsuite/g++.dg/other/i386-3.C           |  2 +-
+ gcc/testsuite/gcc.target/i386/avx-1.c         |  4 +-
+ gcc/testsuite/gcc.target/i386/funcspec-56.inc |  2 +
+ gcc/testsuite/gcc.target/i386/prefetchi-1.c   | 40 ++++++++++
+ gcc/testsuite/gcc.target/i386/prefetchi-2.c   | 26 +++++++
+ gcc/testsuite/gcc.target/i386/prefetchi-3.c   | 20 +++++
+ gcc/testsuite/gcc.target/i386/prefetchi-4.c   | 19 +++++
+ gcc/testsuite/gcc.target/i386/sse-13.c        |  4 +-
+ gcc/testsuite/gcc.target/i386/sse-23.c        |  4 +-
+ .../gcc.target/i386/x86gprintrin-1.c          |  2 +-
+ .../gcc.target/i386/x86gprintrin-2.c          |  2 +-
+ .../gcc.target/i386/x86gprintrin-3.c          |  2 +-
+ .../gcc.target/i386/x86gprintrin-4.c          |  2 +-
+ .../gcc.target/i386/x86gprintrin-5.c          |  2 +-
+ 36 files changed, 343 insertions(+), 19 deletions(-)
+ create mode 100644 gcc/config/i386/prfchiintrin.h
+ create mode 100644 gcc/testsuite/gcc.target/i386/prefetchi-1.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/prefetchi-2.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/prefetchi-3.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/prefetchi-4.c
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index 5951a30aa..f17e88144 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -772,6 +772,8 @@ get_available_features (struct __processor_model *cpu_model,
+ 	  __cpuid_count (7, 1, eax, ebx, ecx, edx);
+ 	  if (eax & bit_HRESET)
+ 	    set_feature (FEATURE_HRESET);
++	  if (edx & bit_PREFETCHI)
++	    set_feature (FEATURE_PREFETCHI);
+ 	  if (avx_usable)
+ 	    {
+ 	      if (eax & bit_AVXVNNI)
+diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc
+index 922db33ee..c8cf532cf 100644
+--- a/gcc/common/config/i386/i386-common.cc
++++ b/gcc/common/config/i386/i386-common.cc
+@@ -108,6 +108,7 @@ along with GCC; see the file COPYING3.  If not see
+ #define OPTION_MASK_ISA2_AMX_INT8_SET OPTION_MASK_ISA2_AMX_INT8
+ #define OPTION_MASK_ISA2_AMX_BF16_SET OPTION_MASK_ISA2_AMX_BF16
+ #define OPTION_MASK_ISA2_AMX_FP16_SET OPTION_MASK_ISA2_AMX_FP16
++#define OPTION_MASK_ISA2_PREFETCHI_SET OPTION_MASK_ISA2_PREFETCHI
+ 
+ /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
+    as -msse4.2.  */
+@@ -277,6 +278,7 @@ along with GCC; see the file COPYING3.  If not see
+   (OPTION_MASK_ISA2_KL | OPTION_MASK_ISA2_WIDEKL_UNSET)
+ #define OPTION_MASK_ISA2_WIDEKL_UNSET OPTION_MASK_ISA2_WIDEKL
+ #define OPTION_MASK_ISA2_AMX_FP16_UNSET OPTION_MASK_ISA2_AMX_FP16
++#define OPTION_MASK_ISA2_PREFETCHI_UNSET OPTION_MASK_ISA2_PREFETCHI
+ 
+ /* SSE4 includes both SSE4.1 and SSE4.2.  -mno-sse4 should the same
+    as -mno-sse4.1. */
+@@ -1140,6 +1142,19 @@ ix86_handle_option (struct gcc_options *opts,
+ 	}
+       return true;
+ 
++    case OPT_mprefetchi:
++      if (value)
++	{
++	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_PREFETCHI_SET;
++	  opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_PREFETCHI_SET;
++	}
++      else
++	{
++	  opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_PREFETCHI_UNSET;
++	  opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_PREFETCHI_UNSET;
++	}
++      return true;
++
+     case OPT_mfma:
+       if (value)
+ 	{
+diff --git a/gcc/common/config/i386/i386-cpuinfo.h b/gcc/common/config/i386/i386-cpuinfo.h
+index 8f22897de..95b078acf 100644
+--- a/gcc/common/config/i386/i386-cpuinfo.h
++++ b/gcc/common/config/i386/i386-cpuinfo.h
+@@ -241,6 +241,7 @@ enum processor_features
+   FEATURE_X86_64_V3,
+   FEATURE_X86_64_V4,
+   FEATURE_AMX_FP16,
++  FEATURE_PREFETCHI,
+   CPU_FEATURE_MAX
+ };
+ 
+diff --git a/gcc/common/config/i386/i386-isas.h b/gcc/common/config/i386/i386-isas.h
+index 95bab6da2..6caf06249 100644
+--- a/gcc/common/config/i386/i386-isas.h
++++ b/gcc/common/config/i386/i386-isas.h
+@@ -176,4 +176,5 @@ ISA_NAMES_TABLE_START
+   ISA_NAMES_TABLE_ENTRY("x86-64-v3", FEATURE_X86_64_V3, P_X86_64_V3, NULL)
+   ISA_NAMES_TABLE_ENTRY("x86-64-v4", FEATURE_X86_64_V4, P_X86_64_V4, NULL)
+   ISA_NAMES_TABLE_ENTRY("amx-fp16", FEATURE_AMX_FP16, P_NONE, "-mamx-fp16")
++  ISA_NAMES_TABLE_ENTRY("prefetchi", FEATURE_PREFETCHI, P_NONE, "-mprefetchi")
+ ISA_NAMES_TABLE_END
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index e2b4a23dc..81012c651 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -424,7 +424,7 @@ i[34567]86-*-* | x86_64-*-*)
+ 		       amxbf16intrin.h x86gprintrin.h uintrintrin.h
+ 		       hresetintrin.h keylockerintrin.h avxvnniintrin.h
+ 		       mwaitintrin.h avx512fp16intrin.h avx512fp16vlintrin.h
+-		       amxfp16intrin.h"
++		       amxfp16intrin.h prfchiintrin.h"
+ 	;;
+ ia64-*-*)
+ 	extra_headers=ia64intrin.h
+diff --git a/gcc/config/i386/cpuid.h b/gcc/config/i386/cpuid.h
+index d6cd8d1bf..21100149a 100644
+--- a/gcc/config/i386/cpuid.h
++++ b/gcc/config/i386/cpuid.h
+@@ -50,6 +50,7 @@
+ 
+ /* %edx */
+ #define bit_CMPXCHG8B	(1 << 8)
++#define bit_PREFETCHI	(1 << 14)
+ #define bit_CMOV	(1 << 15)
+ #define bit_MMX		(1 << 23)
+ #define bit_FXSAVE	(1 << 24)
+diff --git a/gcc/config/i386/i386-builtin-types.def b/gcc/config/i386/i386-builtin-types.def
+index e33f06ab3..ff3b0af84 100644
+--- a/gcc/config/i386/i386-builtin-types.def
++++ b/gcc/config/i386/i386-builtin-types.def
+@@ -1387,3 +1387,7 @@ DEF_FUNCTION_TYPE (V32HF, V32HF)
+ DEF_FUNCTION_TYPE_ALIAS (V8HF_FTYPE_V8HF, ROUND)
+ DEF_FUNCTION_TYPE_ALIAS (V16HF_FTYPE_V16HF, ROUND)
+ DEF_FUNCTION_TYPE_ALIAS (V32HF_FTYPE_V32HF, ROUND)
++
++# PREFETCHI builtins
++DEF_FUNCTION_TYPE (VOID, PCVOID, INT)
++DEF_FUNCTION_TYPE (VOID, PCVOID, INT, INT, INT)
+diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
+index 2b1d6c733..d3ab21eea 100644
+--- a/gcc/config/i386/i386-builtin.def
++++ b/gcc/config/i386/i386-builtin.def
+@@ -469,6 +469,10 @@ BDESC (0, OPTION_MASK_ISA2_WIDEKL, CODE_FOR_nothing, "__builtin_ia32_aesdecwide2
+ BDESC (0, OPTION_MASK_ISA2_WIDEKL, CODE_FOR_nothing, "__builtin_ia32_aesencwide128kl_u8", IX86_BUILTIN_AESENCWIDE128KLU8, UNKNOWN, (int) UINT8_FTYPE_PV2DI_PCV2DI_PCVOID)
+ BDESC (0, OPTION_MASK_ISA2_WIDEKL, CODE_FOR_nothing, "__builtin_ia32_aesencwide256kl_u8", IX86_BUILTIN_AESENCWIDE256KLU8, UNKNOWN, (int) UINT8_FTYPE_PV2DI_PCV2DI_PCVOID)
+ 
++/* PREFETCHI */
++BDESC (0, 0, CODE_FOR_prefetchi, "__builtin_ia32_prefetchi", IX86_BUILTIN_PREFETCHI, UNKNOWN, (int) VOID_FTYPE_PCVOID_INT)
++BDESC (0, 0, CODE_FOR_nothing, "__builtin_ia32_prefetch", IX86_BUILTIN_PREFETCH, UNKNOWN, (int) VOID_FTYPE_PCVOID_INT_INT_INT)
++
+ BDESC_END (SPECIAL_ARGS, PURE_ARGS)
+ 
+ /* AVX */
+diff --git a/gcc/config/i386/i386-c.cc b/gcc/config/i386/i386-c.cc
+index 4269f29e6..00880bd17 100644
+--- a/gcc/config/i386/i386-c.cc
++++ b/gcc/config/i386/i386-c.cc
+@@ -635,6 +635,8 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
+     def_or_undef (parse_in, "__AVXVNNI__");
+   if (isa_flag2 & OPTION_MASK_ISA2_AMX_FP16)
+     def_or_undef (parse_in, "__AMX_FP16__");
++  if (isa_flag2 & OPTION_MASK_ISA2_PREFETCHI)
++    def_or_undef (parse_in, "__PREFETCHI__");
+   if (TARGET_IAMCU)
+     {
+       def_or_undef (parse_in, "__iamcu");
+diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
+index 77dda5dd4..bc2e61980 100644
+--- a/gcc/config/i386/i386-expand.cc
++++ b/gcc/config/i386/i386-expand.cc
+@@ -12850,6 +12850,83 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget,
+ 	return target;
+       }
+ 
++    case IX86_BUILTIN_PREFETCH:
++      {
++	arg0 = CALL_EXPR_ARG (exp, 0); // const void *
++	arg1 = CALL_EXPR_ARG (exp, 1); // const int
++	arg2 = CALL_EXPR_ARG (exp, 2); // const int
++	arg3 = CALL_EXPR_ARG (exp, 3); // const int
++
++	op0 = expand_normal (arg0);
++	op1 = expand_normal (arg1);
++	op2 = expand_normal (arg2);
++	op3 = expand_normal (arg3);
++
++	if (!CONST_INT_P (op1) || !CONST_INT_P (op2) || !CONST_INT_P (op3))
++	  {
++	    error ("second, third and fourth argument must be a const");
++	    return const0_rtx;
++	  }
++
++	if (INTVAL (op3) == 1)
++	  {
++	    if (TARGET_64BIT
++		&& local_func_symbolic_operand (op0, GET_MODE (op0)))
++	      emit_insn (gen_prefetchi (op0, op2));
++	    else
++	      {
++		warning (0, "instruction prefetch applies when in 64-bit mode"
++			    " with RIP-relative addressing and"
++			    " option %<-mprefetchi%>;"
++			    " they stay NOPs otherwise");
++		emit_insn (gen_nop ());
++	      }
++	  }
++	else
++	  {
++	    if (!address_operand (op0, VOIDmode))
++	      {
++		op0 = convert_memory_address (Pmode, op0);
++		op0 = copy_addr_to_reg (op0);
++	      }
++	    emit_insn (gen_prefetch (op0, op1, op2));
++	  }
++
++	return 0;
++      }
++
++    case IX86_BUILTIN_PREFETCHI:
++      {
++	arg0 = CALL_EXPR_ARG (exp, 0); // const void *
++	arg1 = CALL_EXPR_ARG (exp, 1); // const int
++
++	op0 = expand_normal (arg0);
++	op1 = expand_normal (arg1);
++
++	if (!CONST_INT_P (op1))
++	  {
++	    error ("second argument must be a const");
++	    return const0_rtx;
++	  }
++
++	/* GOT/PLT_PIC should not be available for instruction prefetch.
++	   It must be real instruction address.  */
++	if (TARGET_64BIT
++	    && local_func_symbolic_operand (op0, GET_MODE (op0)))
++	  emit_insn (gen_prefetchi (op0, op1));
++	else
++	  {
++	    /* Ignore the hint.  */
++	    warning (0, "instruction prefetch applies when in 64-bit mode"
++			" with RIP-relative addressing and"
++			" option %<-mprefetchi%>;"
++			" they stay NOPs otherwise");
++	    emit_insn (gen_nop ());
++	  }
++
++	return 0;
++      }
++
+     case IX86_BUILTIN_VEC_INIT_V2SI:
+     case IX86_BUILTIN_VEC_INIT_V4HI:
+     case IX86_BUILTIN_VEC_INIT_V8QI:
+diff --git a/gcc/config/i386/i386-isa.def b/gcc/config/i386/i386-isa.def
+index c7305c01b..744a7df85 100644
+--- a/gcc/config/i386/i386-isa.def
++++ b/gcc/config/i386/i386-isa.def
+@@ -110,3 +110,4 @@ DEF_PTA(WIDEKL)
+ DEF_PTA(AVXVNNI)
+ DEF_PTA(AVX512FP16)
+ DEF_PTA(AMX_FP16)
++DEF_PTA(PREFETCHI)
+diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
+index 3edb7094e..724375f02 100644
+--- a/gcc/config/i386/i386-options.cc
++++ b/gcc/config/i386/i386-options.cc
+@@ -231,7 +231,8 @@ static struct ix86_target_opts isa2_opts[] =
+   { "-mwidekl", 	OPTION_MASK_ISA2_WIDEKL },
+   { "-mavxvnni",	OPTION_MASK_ISA2_AVXVNNI },
+   { "-mavx512fp16",	OPTION_MASK_ISA2_AVX512FP16 },
+-  { "-mamx-fp16",       OPTION_MASK_ISA2_AMX_FP16 }
++  { "-mamx-fp16",       OPTION_MASK_ISA2_AMX_FP16 },
++  { "-mprefetchi",      OPTION_MASK_ISA2_PREFETCHI }
+ };
+ static struct ix86_target_opts isa_opts[] =
+ {
+@@ -1076,6 +1077,7 @@ ix86_valid_target_attribute_inner_p (tree fndecl, tree args, char *p_strings[],
+     IX86_ATTR_ISA ("avxvnni",   OPT_mavxvnni),
+     IX86_ATTR_ISA ("avx512fp16", OPT_mavx512fp16),
+     IX86_ATTR_ISA ("amx-fp16", OPT_mamx_fp16),
++    IX86_ATTR_ISA ("prefetchi",   OPT_mprefetchi),
+ 
+     /* enum options */
+     IX86_ATTR_ENUM ("fpmath=",	OPT_mfpmath_),
+diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
+index 71691f598..f08c2cfb1 100644
+--- a/gcc/config/i386/i386.md
++++ b/gcc/config/i386/i386.md
+@@ -329,6 +329,9 @@
+ 
+   ;; For HRESET support
+   UNSPECV_HRESET
++
++  ;; For PREFETCHI support
++  UNSPECV_PREFETCHI
+ ])
+ 
+ ;; Constants to represent rounding modes in the ROUND instruction
+@@ -22907,6 +22910,26 @@
+ 	(symbol_ref "memory_address_length (operands[0], false)"))
+    (set_attr "memory" "none")])
+ 
++(define_insn "prefetchi"
++  [(unspec_volatile [(match_operand 0 "local_func_symbolic_operand" "p")
++		     (match_operand:SI 1 "const_int_operand")]
++		    UNSPECV_PREFETCHI)]
++  "TARGET_PREFETCHI && TARGET_64BIT"
++{
++  static const char * const patterns[2] = {
++    "prefetchit1\t%0", "prefetchit0\t%0"
++  };
++
++  int locality = INTVAL (operands[1]);
++  gcc_assert (IN_RANGE (locality, 2, 3));
++
++  return patterns[locality - 2];
++}
++  [(set_attr "type" "sse")
++   (set (attr "length_address")
++	(symbol_ref "memory_address_length (operands[0], false)"))
++   (set_attr "memory" "none")])
++
+ (define_expand "stack_protect_set"
+   [(match_operand 0 "memory_operand")
+    (match_operand 1 "memory_operand")]
+diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
+index 52c6f02ee..50cd114f6 100644
+--- a/gcc/config/i386/i386.opt
++++ b/gcc/config/i386/i386.opt
+@@ -1230,3 +1230,7 @@ Enable vectorization for scatter instruction.
+ mamx-fp16
+ Target Mask(ISA2_AMX_FP16) Var(ix86_isa_flags2) Save
+ Support AMX-FP16 built-in functions and code generation.
++
++mprefetchi
++Target Mask(ISA2_PREFETCHI) Var(ix86_isa_flags2) Save
++Support PREFETCHI built-in functions and code generation.
+diff --git a/gcc/config/i386/predicates.md b/gcc/config/i386/predicates.md
+index ac02c61ac..774178b78 100644
+--- a/gcc/config/i386/predicates.md
++++ b/gcc/config/i386/predicates.md
+@@ -610,6 +610,21 @@
+   return false;
+ })
+ 
++(define_predicate "local_func_symbolic_operand"
++  (match_operand 0 "local_symbolic_operand")
++{
++  if (GET_CODE (op) == CONST
++      && GET_CODE (XEXP (op, 0)) == PLUS
++      && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
++    op = XEXP (XEXP (op, 0), 0);
++
++  if (GET_CODE (op) == SYMBOL_REF
++      && !SYMBOL_REF_FUNCTION_P (op))
++    return false;
++
++  return true;
++})
++
+ ;; Test for a legitimate @GOTOFF operand.
+ ;;
+ ;; VxWorks does not impose a fixed gap between segments; the run-time
+diff --git a/gcc/config/i386/prfchiintrin.h b/gcc/config/i386/prfchiintrin.h
+new file mode 100644
+index 000000000..06deef488
+--- /dev/null
++++ b/gcc/config/i386/prfchiintrin.h
+@@ -0,0 +1,49 @@
++/* Copyright (C) 2022 Free Software Foundation, Inc.
++
++   This file is part of GCC.
++
++   GCC is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3, or (at your option)
++   any later version.
++
++   GCC is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   Under Section 7 of GPL version 3, you are granted additional
++   permissions described in the GCC Runtime Library Exception, version
++   3.1, as published by the Free Software Foundation.
++
++   You should have received a copy of the GNU General Public License and
++   a copy of the GCC Runtime Library Exception along with this program;
++   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
++   .  */
++
++#if !defined _X86GPRINTRIN_H_INCLUDED
++# error "Never use  directly; include  instead."
++#endif
++
++#ifndef _PRFCHIINTRIN_H_INCLUDED
++#define _PRFCHIINTRIN_H_INCLUDED
++
++#ifdef __x86_64__
++
++extern __inline void
++__attribute__((__gnu_inline__, __always_inline__, __artificial__))
++_m_prefetchit0 (void* __P)
++{
++  __builtin_ia32_prefetchi (__P, 3);
++}
++
++extern __inline void
++__attribute__((__gnu_inline__, __always_inline__, __artificial__))
++_m_prefetchit1 (void* __P)
++{
++  __builtin_ia32_prefetchi (__P, 2);
++}
++
++#endif
++
++#endif /* _PRFCHIINTRIN_H_INCLUDED */
+diff --git a/gcc/config/i386/x86gprintrin.h b/gcc/config/i386/x86gprintrin.h
+index e0be01d5e..0768aa0d7 100644
+--- a/gcc/config/i386/x86gprintrin.h
++++ b/gcc/config/i386/x86gprintrin.h
+@@ -72,6 +72,8 @@
+ 
+ #include 
+ 
++#include 
++
+ #include 
+ 
+ #include 
+diff --git a/gcc/config/i386/xmmintrin.h b/gcc/config/i386/xmmintrin.h
+index f1c704a2d..7fb179430 100644
+--- a/gcc/config/i386/xmmintrin.h
++++ b/gcc/config/i386/xmmintrin.h
+@@ -36,6 +36,8 @@
+ /* Constants for use with _mm_prefetch.  */
+ enum _mm_hint
+ {
++  _MM_HINT_IT0 = 19,
++  _MM_HINT_IT1 = 18,
+   /* _MM_HINT_ET is _MM_HINT_T with set 3rd bit.  */
+   _MM_HINT_ET0 = 7,
+   _MM_HINT_ET1 = 6,
+@@ -51,11 +53,12 @@ enum _mm_hint
+ extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_prefetch (const void *__P, enum _mm_hint __I)
+ {
+-  __builtin_prefetch (__P, (__I & 0x4) >> 2, __I & 0x3);
++  __builtin_ia32_prefetch (__P, (__I & 0x4) >> 2,
++			   __I & 0x3, (__I & 0x10) >> 4);
+ }
+ #else
+ #define _mm_prefetch(P, I) \
+-  __builtin_prefetch ((P), ((I & 0x4) >> 2), (I & 0x3))
++  __builtin_ia32_prefetch ((P), ((I) & 0x4) >> 2, ((I) & 0x3), ((I) & 0x10) >> 4)
+ #endif
+ 
+ #ifndef __SSE__
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index 4ba9d34cd..cb987f469 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -7043,6 +7043,11 @@ Enable/disable the generation of the AVXVNNI instructions.
+ @cindex @code{target("amx-fp16")} function attribute, x86
+ Enable/disable the generation of the AMX-FP16 instructions.
+ 
++@item prefetchi
++@itemx no-prefetchi
++@cindex @code{target("prefetchi")} function attribute, x86
++Enable/disable the generation of the PREFETCHI instructions.
++
+ @item cld
+ @itemx no-cld
+ @cindex @code{target("cld")} function attribute, x86
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index d25f13217..211b970c0 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -1428,7 +1428,7 @@ See RS/6000 and PowerPC Options.
+ -mavx5124fmaps  -mavx512vnni  -mavx5124vnniw  -mprfchw  -mrdpid @gol
+ -mrdseed  -msgx -mavx512vp2intersect -mserialize -mtsxldtrk@gol
+ -mamx-tile  -mamx-int8  -mamx-bf16 -muintr -mhreset -mavxvnni@gol
+--mavx512fp16 -mamx-fp16 @gol
++-mavx512fp16 -mamx-fp16 -mprefetchi @gol
+ -mcldemote  -mms-bitfields  -mno-align-stringops  -minline-all-stringops @gol
+ -minline-stringops-dynamically  -mstringop-strategy=@var{alg} @gol
+ -mkl -mwidekl @gol
+@@ -32445,6 +32445,9 @@ preferred alignment to @option{-mpreferred-stack-boundary=2}.
+ @need 200
+ @itemx -mamx-fp16
+ @opindex mamx-fp16
++@need 200
++@itemx -mprefetchi
++@opindex mprefetchi
+ These switches enable the use of instructions in the MMX, SSE,
+ SSE2, SSE3, SSSE3, SSE4, SSE4A, SSE4.1, SSE4.2, AVX, AVX2, AVX512F, AVX512PF,
+ AVX512ER, AVX512CD, AVX512VL, AVX512BW, AVX512DQ, AVX512IFMA, AVX512VBMI, SHA,
+@@ -32455,7 +32458,7 @@ XSAVEOPT, XSAVEC, XSAVES, RTM, HLE, TBM, MWAITX, CLZERO, PKU, AVX512VBMI2,
+ GFNI, VAES, WAITPKG, VPCLMULQDQ, AVX512BITALG, MOVDIRI, MOVDIR64B, AVX512BF16,
+ ENQCMD, AVX512VPOPCNTDQ, AVX5124FMAPS, AVX512VNNI, AVX5124VNNIW, SERIALIZE,
+ UINTR, HRESET, AMXTILE, AMXINT8, AMXBF16, KL, WIDEKL, AVXVNNI, AVX512-FP16,
+-AMX-FP16 or CLDEMOTE extended instruction sets. Each has a corresponding
++AMX-FP16, PREFETCHI or CLDEMOTE extended instruction sets. Each has a corresponding
+ @option{-mno-} option to disable use of these instructions.
+ 
+ These extensions are also available as built-in functions: see
+diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
+index b64b62dee..c68e492dc 100644
+--- a/gcc/doc/sourcebuild.texi
++++ b/gcc/doc/sourcebuild.texi
+@@ -2496,6 +2496,9 @@ Target does not require strict alignment.
+ @item pie_copyreloc
+ The x86-64 target linker supports PIE with copy reloc.
+ 
++@item prefetchi
++Target supports the execution of @code{prefetchi} instructions.
++
+ @item rdrand
+ Target supports x86 @code{rdrand} instruction.
+ 
+diff --git a/gcc/testsuite/g++.dg/other/i386-2.C b/gcc/testsuite/g++.dg/other/i386-2.C
+index 57a6357aa..72ed5fed0 100644
+--- a/gcc/testsuite/g++.dg/other/i386-2.C
++++ b/gcc/testsuite/g++.dg/other/i386-2.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target i?86-*-* x86_64-*-* } } */
+-/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt  -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16" } */
++/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt  -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16 -mprefetchi" } */
+ 
+ /* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h,
+    xopintrin.h, abmintrin.h, bmiintrin.h, tbmintrin.h, lwpintrin.h,
+diff --git a/gcc/testsuite/g++.dg/other/i386-3.C b/gcc/testsuite/g++.dg/other/i386-3.C
+index 1947547d6..9dd53653f 100644
+--- a/gcc/testsuite/g++.dg/other/i386-3.C
++++ b/gcc/testsuite/g++.dg/other/i386-3.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target i?86-*-* x86_64-*-* } } */
+-/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16" } */
++/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16 -mprefetchi" } */
+ 
+ /* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h,
+    xopintrin.h, abmintrin.h, bmiintrin.h, tbmintrin.h, lwpintrin.h,
+diff --git a/gcc/testsuite/gcc.target/i386/avx-1.c b/gcc/testsuite/gcc.target/i386/avx-1.c
+index 154e7b3b1..2b46e1b87 100644
+--- a/gcc/testsuite/gcc.target/i386/avx-1.c
++++ b/gcc/testsuite/gcc.target/i386/avx-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -maes -mpclmul -mgfni -mavx512bw -mavx512fp16 -mavx512vl" } */
++/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -m3dnow -mavx -mavx2 -maes -mpclmul -mgfni -mavx512bw -mavx512fp16 -mavx512vl -mprefetchi" } */
+ /* { dg-add-options bind_pic_locally } */
+ 
+ #include 
+@@ -153,7 +153,7 @@
+ #define __builtin_ia32_shufpd(A, B, N) __builtin_ia32_shufpd(A, B, 0)
+ 
+ /* xmmintrin.h */
+-#define __builtin_prefetch(P, A, I) __builtin_prefetch(P, 0, _MM_HINT_NTA)
++#define __builtin_ia32_prefetch(A, B, C, D) __builtin_ia32_prefetch(A, 0, 3, 0)
+ #define __builtin_ia32_pshufw(A, N) __builtin_ia32_pshufw(A, 0)
+ #define __builtin_ia32_vec_set_v4hi(A, D, N) \
+   __builtin_ia32_vec_set_v4hi(A, D, 0)
+diff --git a/gcc/testsuite/gcc.target/i386/funcspec-56.inc b/gcc/testsuite/gcc.target/i386/funcspec-56.inc
+index b00cfff03..9f073f78c 100644
+--- a/gcc/testsuite/gcc.target/i386/funcspec-56.inc
++++ b/gcc/testsuite/gcc.target/i386/funcspec-56.inc
+@@ -81,6 +81,7 @@ extern void test_widekl (void)			__attribute__((__target__("widekl")));
+ extern void test_avxvnni (void)			__attribute__((__target__("avxvnni")));
+ extern void test_avx512fp16 (void)		__attribute__((__target__("avx512fp16")));
+ extern void test_amx_fp16 (void)		__attribute__((__target__("amx-fp16")));
++extern void test_prefetchi (void)               __attribute__((__target__("prefetchi")));
+ 
+ extern void test_no_sgx (void)			__attribute__((__target__("no-sgx")));
+ extern void test_no_avx5124fmaps(void)		__attribute__((__target__("no-avx5124fmaps")));
+@@ -163,6 +164,7 @@ extern void test_no_widekl (void)		__attribute__((__target__("no-widekl")));
+ extern void test_no_avxvnni (void)		__attribute__((__target__("no-avxvnni")));
+ extern void test_no_avx512fp16 (void)		__attribute__((__target__("no-avx512fp16")));
+ extern void test_no_amx_fp16 (void)		__attribute__((__target__("no-amx-fp16")));
++extern void test_no_prefetchi (void)            __attribute__((__target__("no-prefetchi")));
+ 
+ extern void test_arch_nocona (void)		__attribute__((__target__("arch=nocona")));
+ extern void test_arch_core2 (void)		__attribute__((__target__("arch=core2")));
+diff --git a/gcc/testsuite/gcc.target/i386/prefetchi-1.c b/gcc/testsuite/gcc.target/i386/prefetchi-1.c
+new file mode 100644
+index 000000000..80f25e70e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/prefetchi-1.c
+@@ -0,0 +1,40 @@
++/* { dg-do compile { target { ! ia32 } } } */
++/* { dg-options "-mprefetchi -O2" } */
++/* { dg-final { scan-assembler-times "\[ \\t\]+prefetchit0\[ \\t\]+" 2 } } */
++/* { dg-final { scan-assembler-times "\[ \\t\]+prefetchit1\[ \\t\]+" 2 } } */
++
++#include 
++
++int
++bar (int a)
++{
++  return a + 1;
++}
++
++int
++foo1 (int b)
++{
++  _mm_prefetch (bar, _MM_HINT_IT0);
++  return bar (b) + 1;
++}
++
++int
++foo2 (int b)
++{
++  _mm_prefetch (bar, _MM_HINT_IT1);
++  return bar (b) + 1;
++}
++
++int
++foo3 (int b)
++{
++  _m_prefetchit0 (bar);
++  return bar (b) + 1;
++}
++
++int
++foo4 (int b)
++{
++  _m_prefetchit1 (bar);
++  return bar (b) + 1;
++}
+diff --git a/gcc/testsuite/gcc.target/i386/prefetchi-2.c b/gcc/testsuite/gcc.target/i386/prefetchi-2.c
+new file mode 100644
+index 000000000..e05ce9c73
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/prefetchi-2.c
+@@ -0,0 +1,26 @@
++/* { dg-do compile { target { ia32 } } } */
++/* { dg-options "-mprefetchi -O2" } */
++/* { dg-final { scan-assembler-not "\[ \\t\]+prefetchit0" } } */
++/* { dg-final { scan-assembler-not "\[ \\t\]+prefetchit1" } } */
++
++#include 
++
++int
++bar (int a)
++{
++  return a + 1;
++}
++
++int
++foo1 (int b)
++{
++  __builtin_ia32_prefetch (bar, 0, 3, 1); /* { dg-warning "instruction prefetch applies when in 64-bit mode with RIP-relative addressing and option '-mprefetchi'; they stay NOPs otherwise" } */
++  return bar (b) + 1;
++}
++
++int
++foo2 (int b)
++{
++  __builtin_ia32_prefetchi (bar, 2); /* { dg-warning "instruction prefetch applies when in 64-bit mode with RIP-relative addressing and option '-mprefetchi'; they stay NOPs otherwise" } */
++  return bar (b) + 1;
++}
+diff --git a/gcc/testsuite/gcc.target/i386/prefetchi-3.c b/gcc/testsuite/gcc.target/i386/prefetchi-3.c
+new file mode 100644
+index 000000000..f0a4173d2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/prefetchi-3.c
+@@ -0,0 +1,20 @@
++/* { dg-do compile } */
++/* { dg-options "-mprefetchi -O2" } */
++/* { dg-final { scan-assembler-not "prefetchit0" } } */
++/* { dg-final { scan-assembler-not "prefetchit1" } } */
++
++#include 
++
++void* p;
++
++void extern
++prefetchi_test1 (void)
++{
++  __builtin_ia32_prefetchi (p, 2); /* { dg-warning "instruction prefetch applies when in 64-bit mode with RIP-relative addressing and option '-mprefetchi'; they stay NOPs otherwise" } */
++}
++
++void extern
++prefetchi_test2 (void)
++{
++  __builtin_ia32_prefetch (p, 0, 3, 1); /* { dg-warning "instruction prefetch applies when in 64-bit mode with RIP-relative addressing and option '-mprefetchi'; they stay NOPs otherwise" } */
++} 
+diff --git a/gcc/testsuite/gcc.target/i386/prefetchi-4.c b/gcc/testsuite/gcc.target/i386/prefetchi-4.c
+new file mode 100644
+index 000000000..73ae596d1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/prefetchi-4.c
+@@ -0,0 +1,19 @@
++/* { dg-do compile } */
++/* { dg-options "-O0" } */
++
++#include 
++
++void* p;
++
++void extern
++prefetch_test (void)
++{
++  __builtin_ia32_prefetch (p, 0, 3, 0);
++  __builtin_ia32_prefetch (p, 0, 2, 0);
++  __builtin_ia32_prefetch (p, 0, 1, 0);
++  __builtin_ia32_prefetch (p, 0, 0, 0);
++  __builtin_ia32_prefetch (p, 1, 3, 0);
++  __builtin_ia32_prefetch (p, 1, 2, 0);
++  __builtin_ia32_prefetch (p, 1, 1, 0);
++  __builtin_ia32_prefetch (p, 1, 0, 0);
++}
+diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c
+index a1e453a98..db7c0fc7a 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-13.c
++++ b/gcc/testsuite/gcc.target/i386/sse-13.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512vl -mavx512dq -mavx512bw -mavx512vbmi -mavx512vbmi2 -mavx512ifma -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mavx512vp2intersect -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16" } */
++/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512vl -mavx512dq -mavx512bw -mavx512vbmi -mavx512vbmi2 -mavx512ifma -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mavx512vp2intersect -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16 -mprefetchi" } */
+ /* { dg-add-options bind_pic_locally } */
+ 
+ #include 
+@@ -125,7 +125,7 @@
+ #define __builtin_ia32_shufpd(A, B, N) __builtin_ia32_shufpd(A, B, 0)
+ 
+ /* xmmintrin.h */
+-#define __builtin_prefetch(P, A, I) __builtin_prefetch(P, 0, _MM_HINT_NTA)
++#define __builtin_ia32_prefetch(A, B, C, D) __builtin_ia32_prefetch(A, 0, 3, 0)
+ #define __builtin_ia32_pshufw(A, N) __builtin_ia32_pshufw(A, 0)
+ #define __builtin_ia32_vec_set_v4hi(A, D, N) \
+   __builtin_ia32_vec_set_v4hi(A, D, 0)
+diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c
+index 151201d97..741694e87 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-23.c
++++ b/gcc/testsuite/gcc.target/i386/sse-23.c
+@@ -94,7 +94,7 @@
+ #define __builtin_ia32_shufpd(A, B, N) __builtin_ia32_shufpd(A, B, 0)
+ 
+ /* xmmintrin.h */
+-#define __builtin_prefetch(P, A, I) __builtin_prefetch(P, 0, _MM_HINT_NTA)
++#define __builtin_ia32_prefetch(A, B, C, D) __builtin_ia32_prefetch(A, 0, 3, 0)
+ #define __builtin_ia32_pshufw(A, N) __builtin_ia32_pshufw(A, 0)
+ #define __builtin_ia32_vec_set_v4hi(A, D, N) \
+   __builtin_ia32_vec_set_v4hi(A, D, 0)
+@@ -843,6 +843,6 @@
+ #define __builtin_ia32_vpclmulqdq_v2di(A, B, C)  __builtin_ia32_vpclmulqdq_v2di(A, B, 1) 
+ #define __builtin_ia32_vpclmulqdq_v8di(A, B, C)  __builtin_ia32_vpclmulqdq_v8di(A, B, 1) 
+ 
+-#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,avx512f,avx512er,avx512cd,avx512pf,sha,prefetchwt1,xsavec,xsaves,clflushopt,avx512bw,avx512dq,avx512vl,avx512vbmi,avx512ifma,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,avx512vbmi2,vpclmulqdq,avx512bitalg,pconfig,wbnoinvd,avx512bf16,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16,amx-fp16")
++#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,avx512f,avx512er,avx512cd,avx512pf,sha,prefetchwt1,xsavec,xsaves,clflushopt,avx512bw,avx512dq,avx512vl,avx512vbmi,avx512ifma,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,avx512vbmi2,vpclmulqdq,avx512bitalg,pconfig,wbnoinvd,avx512bf16,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16,amx-fp16,prefetchi")
+ 
+ #include 
+diff --git a/gcc/testsuite/gcc.target/i386/x86gprintrin-1.c b/gcc/testsuite/gcc.target/i386/x86gprintrin-1.c
+index 293be094b..efe7df13b 100644
+--- a/gcc/testsuite/gcc.target/i386/x86gprintrin-1.c
++++ b/gcc/testsuite/gcc.target/i386/x86gprintrin-1.c
+@@ -1,7 +1,7 @@
+ /* Test that  is usable with -O -std=c89 -pedantic-errors.  */
+ /* { dg-do compile } */
+ /* { dg-options "-O -std=c89 -pedantic-errors -march=x86-64 -madx -mbmi -mbmi2 -mcldemote -mclflushopt -mclwb -mclzero -menqcmd -mfsgsbase -mfxsr -mhreset -mlzcnt -mlwp -mmovdiri -mmwaitx -mpconfig -mpopcnt -mpku -mptwrite -mrdpid -mrdrnd -mrdseed -mrtm -mserialize -msgx -mshstk -mtbm -mtsxldtrk -mwaitpkg -mwbnoinvd -mxsave -mxsavec -mxsaveopt -mxsaves -mno-sse -mno-mmx" } */
+-/* { dg-additional-options "-muintr" { target { ! ia32 } } }  */
++/* { dg-additional-options "-muintr -mprefetchi" { target { ! ia32 } } }  */
+ 
+ #include 
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/x86gprintrin-2.c b/gcc/testsuite/gcc.target/i386/x86gprintrin-2.c
+index c63302757..5f6970df6 100644
+--- a/gcc/testsuite/gcc.target/i386/x86gprintrin-2.c
++++ b/gcc/testsuite/gcc.target/i386/x86gprintrin-2.c
+@@ -1,7 +1,7 @@
+ /* { dg-do compile } */
+ /* { dg-options "-O2 -Werror-implicit-function-declaration -march=x86-64 -madx -mbmi -mbmi2 -mcldemote -mclflushopt -mclwb -mclzero -menqcmd -mfsgsbase -mfxsr -mhreset -mlzcnt -mlwp -mmovdiri -mmwaitx -mpconfig -mpopcnt -mpku -mptwrite -mrdpid -mrdrnd -mrdseed -mrtm -mserialize -msgx -mshstk -mtbm -mtsxldtrk -mwaitpkg -mwbnoinvd -mxsave -mxsavec -mxsaveopt -mxsaves -mno-sse -mno-mmx" } */
+ /* { dg-add-options bind_pic_locally } */
+-/* { dg-additional-options "-muintr" { target { ! ia32 } } }  */
++/* { dg-additional-options "-muintr -mprefetchi" { target { ! ia32 } } }  */
+ 
+ /* Test that the intrinsics in  compile with optimization.
+    All of them are defined as inline functions that reference the proper
+diff --git a/gcc/testsuite/gcc.target/i386/x86gprintrin-3.c b/gcc/testsuite/gcc.target/i386/x86gprintrin-3.c
+index 3a7e1f4a1..5c075c375 100644
+--- a/gcc/testsuite/gcc.target/i386/x86gprintrin-3.c
++++ b/gcc/testsuite/gcc.target/i386/x86gprintrin-3.c
+@@ -1,7 +1,7 @@
+ /* { dg-do compile } */
+ /* { dg-options "-O0 -Werror-implicit-function-declaration -march=x86-64 -madx -mbmi -mbmi2 -mcldemote -mclflushopt -mclwb -mclzero -menqcmd -mfsgsbase -mfxsr -mhreset -mlzcnt -mlwp -mmovdiri -mmwaitx -mpconfig -mpopcnt -mpku -mptwrite -mrdpid -mrdrnd -mrdseed -mrtm -mserialize -msgx -mshstk -mtbm -mtsxldtrk -mwaitpkg -mwbnoinvd -mxsave -mxsavec -mxsaveopt -mxsaves -mno-sse -mno-mmx" } */
+ /* { dg-add-options bind_pic_locally } */
+-/* { dg-additional-options "-muintr" { target { ! ia32 } } }  */
++/* { dg-additional-options "-muintr -mprefetchi" { target { ! ia32 } } }  */
+ 
+ /* Test that the intrinsics in  compile without optimization.
+    All of them are defined as inline functions that reference the proper
+diff --git a/gcc/testsuite/gcc.target/i386/x86gprintrin-4.c b/gcc/testsuite/gcc.target/i386/x86gprintrin-4.c
+index d8a6126e5..bda4ecea3 100644
+--- a/gcc/testsuite/gcc.target/i386/x86gprintrin-4.c
++++ b/gcc/testsuite/gcc.target/i386/x86gprintrin-4.c
+@@ -15,7 +15,7 @@
+ 
+ #ifndef DIFFERENT_PRAGMAS
+ #ifdef __x86_64__
+-#pragma GCC target ("adx,bmi,bmi2,fsgsbase,fxsr,hreset,lwp,lzcnt,popcnt,rdrnd,rdseed,tbm,rtm,serialize,tsxldtrk,uintr,xsaveopt")
++#pragma GCC target ("adx,bmi,bmi2,fsgsbase,fxsr,hreset,lwp,lzcnt,popcnt,prefetchi,rdrnd,rdseed,tbm,rtm,serialize,tsxldtrk,uintr,xsaveopt")
+ #else
+ #pragma GCC target ("adx,bmi,bmi2,fsgsbase,fxsr,hreset,lwp,lzcnt,popcnt,rdrnd,rdseed,tbm,rtm,serialize,tsxldtrk,xsaveopt")
+ #endif
+diff --git a/gcc/testsuite/gcc.target/i386/x86gprintrin-5.c b/gcc/testsuite/gcc.target/i386/x86gprintrin-5.c
+index 9ef66fdad..4aadfd0b3 100644
+--- a/gcc/testsuite/gcc.target/i386/x86gprintrin-5.c
++++ b/gcc/testsuite/gcc.target/i386/x86gprintrin-5.c
+@@ -28,7 +28,7 @@
+ #define __builtin_ia32_xabort(M) __builtin_ia32_xabort(1)
+ 
+ #ifdef __x86_64__
+-#pragma GCC target ("adx,bmi,bmi2,clflushopt,clwb,clzero,enqcmd,fsgsbase,fxsr,hreset,lwp,lzcnt,mwaitx,pconfig,pku,popcnt,rdpid,rdrnd,rdseed,tbm,rtm,serialize,sgx,tsxldtrk,uintr,xsavec,xsaveopt,xsaves,wbnoinvd")
++#pragma GCC target ("adx,bmi,bmi2,clflushopt,clwb,clzero,enqcmd,fsgsbase,fxsr,hreset,lwp,lzcnt,mwaitx,pconfig,pku,popcnt,prefetchi,rdpid,rdrnd,rdseed,tbm,rtm,serialize,sgx,tsxldtrk,uintr,xsavec,xsaveopt,xsaves,wbnoinvd")
+ #else
+ #pragma GCC target ("adx,bmi,bmi2,clflushopt,clwb,clzero,enqcmd,fsgsbase,fxsr,hreset,lwp,lzcnt,mwaitx,pconfig,pku,popcnt,rdpid,rdrnd,rdseed,tbm,rtm,serialize,sgx,tsxldtrk,xsavec,xsaveopt,xsaves,wbnoinvd")
+ #endif
+-- 
+2.31.1
+
diff --git a/0274-Initial-Granite-Rapids-Support.patch b/0274-Initial-Granite-Rapids-Support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..db7832924816e555145426fb16d700b4fd47629e
--- /dev/null
+++ b/0274-Initial-Granite-Rapids-Support.patch
@@ -0,0 +1,277 @@
+From 7f0f8b585cf60b4c09bca42b5339995c2cc74633 Mon Sep 17 00:00:00 2001
+From: Haochen Jiang 
+Date: Mon, 7 Nov 2022 11:04:57 +0800
+Subject: [PATCH 20/28] Initial Granite Rapids Support
+
+gcc/ChangeLog:
+
+	* common/config/i386/cpuinfo.h
+	(get_intel_cpu): Handle Granite Rapids.
+	* common/config/i386/i386-common.cc:
+	(processor_names): Add graniterapids.
+	(processor_alias_table): Ditto.
+	* common/config/i386/i386-cpuinfo.h
+	(enum processor_subtypes): Add INTEL_GRANTIERAPIDS.
+	* config.gcc: Add -march=graniterapids.
+	* config/i386/driver-i386.cc (host_detect_local_cpu):
+	Handle graniterapids.
+	* config/i386/i386-c.cc (ix86_target_macros_internal):
+	Ditto.
+	* config/i386/i386-options.cc (m_GRANITERAPIDS): New.
+	(processor_cost_table): Add graniterapids.
+	* config/i386/i386.h (enum processor_type):
+	Add PROCESSOR_GRANITERAPIDS.
+	(PTA_GRANITERAPIDS): Ditto.
+	* doc/extend.texi: Add graniterapids.
+	* doc/invoke.texi: Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.target/i386/mv16.C: Add graniterapids.
+	* gcc.target/i386/funcspec-56.inc: Handle new march.
+
+(cherry picked from commit 339ffc5a792dd66647392a235f2f7f6344c5359e)
+---
+ gcc/common/config/i386/cpuinfo.h              |  9 +++++++++
+ gcc/common/config/i386/i386-common.cc         |  3 +++
+ gcc/common/config/i386/i386-cpuinfo.h         |  1 +
+ gcc/config.gcc                                |  2 +-
+ gcc/config/i386/driver-i386.cc                |  5 ++++-
+ gcc/config/i386/i386-c.cc                     |  7 +++++++
+ gcc/config/i386/i386-options.cc               |  4 +++-
+ gcc/config/i386/i386.h                        |  3 +++
+ gcc/doc/extend.texi                           |  3 +++
+ gcc/doc/invoke.texi                           | 11 +++++++++++
+ gcc/testsuite/g++.target/i386/mv16.C          |  6 ++++++
+ gcc/testsuite/gcc.target/i386/funcspec-56.inc |  1 +
+ 12 files changed, 52 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index f17e88144..1f75ff1ca 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -528,6 +528,15 @@ get_intel_cpu (struct __processor_model *cpu_model,
+       cpu_model->__cpu_type = INTEL_COREI7;
+       cpu_model->__cpu_subtype = INTEL_COREI7_SAPPHIRERAPIDS;
+       break;
++    case 0xad:
++    case 0xae:
++      /* Granite Rapids.  */
++      cpu = "graniterapids";
++      CHECK___builtin_cpu_is ("corei7");
++      CHECK___builtin_cpu_is ("graniterapids");
++      cpu_model->__cpu_type = INTEL_COREI7;
++      cpu_model->__cpu_subtype = INTEL_COREI7_GRANITERAPIDS;
++      break;
+     case 0x17:
+     case 0x1d:
+       /* Penryn.  */
+diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc
+index c8cf532cf..1aa163463 100644
+--- a/gcc/common/config/i386/i386-common.cc
++++ b/gcc/common/config/i386/i386-common.cc
+@@ -1855,6 +1855,7 @@ const char *const processor_names[] =
+   "sapphirerapids",
+   "alderlake",
+   "rocketlake",
++  "graniterapids",
+   "intel",
+   "geode",
+   "k6",
+@@ -1973,6 +1974,8 @@ const pta processor_alias_table[] =
+     M_CPU_SUBTYPE (INTEL_COREI7_ALDERLAKE), P_PROC_AVX2},
+   {"meteorlake", PROCESSOR_ALDERLAKE, CPU_HASWELL, PTA_ALDERLAKE,
+     M_CPU_SUBTYPE (INTEL_COREI7_ALDERLAKE), P_PROC_AVX2},
++  {"graniterapids", PROCESSOR_GRANITERAPIDS, CPU_HASWELL, PTA_GRANITERAPIDS,
++    M_CPU_SUBTYPE (INTEL_COREI7_GRANITERAPIDS), P_PROC_AVX512F},
+   {"bonnell", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
+     M_CPU_TYPE (INTEL_BONNELL), P_PROC_SSSE3},
+   {"atom", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
+diff --git a/gcc/common/config/i386/i386-cpuinfo.h b/gcc/common/config/i386/i386-cpuinfo.h
+index 95b078acf..7b2d4d242 100644
+--- a/gcc/common/config/i386/i386-cpuinfo.h
++++ b/gcc/common/config/i386/i386-cpuinfo.h
+@@ -92,6 +92,7 @@ enum processor_subtypes
+   AMDFAM19H_ZNVER3,
+   INTEL_COREI7_ROCKETLAKE,
+   AMDFAM19H_ZNVER4,
++  INTEL_COREI7_GRANITERAPIDS,
+   CPU_SUBTYPE_MAX
+ };
+ 
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 81012c651..9bad238e3 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -670,7 +670,7 @@ slm nehalem westmere sandybridge ivybridge haswell broadwell bonnell \
+ silvermont knl knm skylake-avx512 cannonlake icelake-client icelake-server \
+ skylake goldmont goldmont-plus tremont cascadelake tigerlake cooperlake \
+ sapphirerapids alderlake rocketlake eden-x2 nano nano-1000 nano-2000 nano-3000 \
+-nano-x2 eden-x4 nano-x4 x86-64 x86-64-v2 x86-64-v3 x86-64-v4 native"
++nano-x2 eden-x4 nano-x4 x86-64 x86-64-v2 x86-64-v3 x86-64-v4 graniterapids native"
+ 
+ # Additional x86 processors supported by --with-cpu=.  Each processor
+ # MUST be separated by exactly one space.
+diff --git a/gcc/config/i386/driver-i386.cc b/gcc/config/i386/driver-i386.cc
+index 3b5161aed..ea8c3d8d1 100644
+--- a/gcc/config/i386/driver-i386.cc
++++ b/gcc/config/i386/driver-i386.cc
+@@ -576,8 +576,11 @@ const char *host_detect_local_cpu (int argc, const char **argv)
+ 	      /* This is unknown family 0x6 CPU.  */
+ 	      if (has_feature (FEATURE_AVX))
+ 		{
++		  /* Assume Granite Rapids.  */
++		  if (has_feature (FEATURE_AMX_FP16))
++		    cpu = "graniterapids";
+ 		  /* Assume Tiger Lake */
+-		  if (has_feature (FEATURE_AVX512VP2INTERSECT))
++		  else if (has_feature (FEATURE_AVX512VP2INTERSECT))
+ 		    cpu = "tigerlake";
+ 		  /* Assume Sapphire Rapids.  */
+ 		  else if (has_feature (FEATURE_TSXLDTRK))
+diff --git a/gcc/config/i386/i386-c.cc b/gcc/config/i386/i386-c.cc
+index 00880bd17..04f1dd682 100644
+--- a/gcc/config/i386/i386-c.cc
++++ b/gcc/config/i386/i386-c.cc
+@@ -242,6 +242,10 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
+       def_or_undef (parse_in, "__sapphirerapids");
+       def_or_undef (parse_in, "__sapphirerapids__");
+       break;
++    case PROCESSOR_GRANITERAPIDS:
++      def_or_undef (parse_in, "__graniterapids");
++      def_or_undef (parse_in, "__graniterapids__");
++      break;
+     case PROCESSOR_ALDERLAKE:
+       def_or_undef (parse_in, "__alderlake");
+       def_or_undef (parse_in, "__alderlake__");
+@@ -419,6 +423,9 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
+     case PROCESSOR_ROCKETLAKE:
+       def_or_undef (parse_in, "__tune_rocketlake__");
+       break;
++    case PROCESSOR_GRANITERAPIDS:
++      def_or_undef (parse_in, "__tune_graniterapids__");
++      break;
+     case PROCESSOR_INTEL:
+     case PROCESSOR_GENERIC:
+       break;
+diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
+index 724375f02..6645e3259 100644
+--- a/gcc/config/i386/i386-options.cc
++++ b/gcc/config/i386/i386-options.cc
+@@ -127,10 +127,11 @@ along with GCC; see the file COPYING3.  If not see
+ #define m_SAPPHIRERAPIDS (HOST_WIDE_INT_1U<
+Date: Wed, 31 May 2023 10:45:00 +0800
+Subject: [PATCH 21/28] Support Intel AMX-COMPLEX
+
+gcc/ChangeLog:
+
+	* common/config/i386/cpuinfo.h (get_available_features):
+	Detect AMX-COMPLEX.
+	* common/config/i386/i386-common.cc
+	(OPTION_MASK_ISA2_AMX_COMPLEX_SET,
+	OPTION_MASK_ISA2_AMX_COMPLEX_UNSET): New.
+	(ix86_handle_option): Handle -mamx-complex.
+	* common/config/i386/i386-cpuinfo.h (enum processor_features):
+	Add FEATURE_AMX_COMPLEX.
+	* common/config/i386/i386-isas.h: Add ISA_NAME_TABLE_ENTRY for
+	amx-complex.
+	* config.gcc: Add amxcomplexintrin.h.
+	* config/i386/cpuid.h (bit_AMX_COMPLEX): New.
+	* config/i386/i386-c.cc (ix86_target_macros_internal): Define
+	__AMX_COMPLEX__.
+	* config/i386/i386-isa.def (AMX_COMPLEX): Add DEF_PTA(AMX_COMPLEX).
+	* config/i386/i386-options.cc (ix86_valid_target_attribute_inner_p):
+	Handle amx-complex.
+	* config/i386/i386.opt: Add option -mamx-complex.
+	* config/i386/immintrin.h: Include amxcomplexintrin.h.
+	* doc/extend.texi: Document amx-complex.
+	* doc/invoke.texi: Document -mamx-complex.
+	* doc/sourcebuild.texi: Document target amx-complex.
+	* config/i386/amxcomplexintrin.h: New file.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.dg/other/i386-2.C: Add -mamx-complex.
+	* g++.dg/other/i386-3.C: Ditto.
+	* gcc.target/i386/amx-check.h: Add cpu check for AMX-COMPLEX.
+	* gcc.target/i386/amx-helper.h: Add amx-complex support.
+	* gcc.target/i386/funcspec-56.inc: Add new target attribute.
+	* gcc.target/i386/sse-12.c: Add -mamx-complex.
+	* gcc.target/i386/sse-13.c: Ditto.
+	* gcc.target/i386/sse-14.c: Ditto.
+	* gcc.target/i386/sse-22.c: Add amx-complex.
+	* gcc.target/i386/sse-23.c: Ditto.
+	* lib/target-supports.exp (check_effective_target_amx_complex): New.
+	* gcc.target/i386/amxcomplex-asmatt-1.c: New test.
+	* gcc.target/i386/amxcomplex-asmintel-1.c: Ditto.
+	* gcc.target/i386/amxcomplex-cmmimfp16ps-2.c: Ditto.
+	* gcc.target/i386/amxcomplex-cmmrlfp16ps-2.c: Ditto.
+---
+ gcc/common/config/i386/cpuinfo.h              |  2 +
+ gcc/common/config/i386/i386-common.cc         | 19 +++++-
+ gcc/common/config/i386/i386-cpuinfo.h         |  1 +
+ gcc/common/config/i386/i386-isas.h            |  2 +
+ gcc/config.gcc                                |  2 +-
+ gcc/config/i386/amxcomplexintrin.h            | 59 +++++++++++++++++++
+ gcc/config/i386/cpuid.h                       |  1 +
+ gcc/config/i386/i386-c.cc                     |  2 +
+ gcc/config/i386/i386-isa.def                  |  1 +
+ gcc/config/i386/i386-options.cc               |  4 +-
+ gcc/config/i386/i386.opt                      |  4 ++
+ gcc/config/i386/immintrin.h                   |  2 +
+ gcc/doc/extend.texi                           |  5 ++
+ gcc/doc/invoke.texi                           |  7 ++-
+ gcc/doc/sourcebuild.texi                      |  3 +
+ gcc/testsuite/g++.dg/other/i386-2.C           |  2 +-
+ gcc/testsuite/g++.dg/other/i386-3.C           |  2 +-
+ gcc/testsuite/gcc.target/i386/amx-check.h     |  3 +
+ gcc/testsuite/gcc.target/i386/amx-helper.h    |  4 +-
+ .../gcc.target/i386/amxcomplex-asmatt-1.c     | 15 +++++
+ .../gcc.target/i386/amxcomplex-asmintel-1.c   | 12 ++++
+ .../i386/amxcomplex-cmmimfp16ps-2.c           | 53 +++++++++++++++++
+ .../i386/amxcomplex-cmmrlfp16ps-2.c           | 53 +++++++++++++++++
+ gcc/testsuite/gcc.target/i386/funcspec-56.inc |  2 +
+ gcc/testsuite/gcc.target/i386/sse-12.c        |  2 +-
+ gcc/testsuite/gcc.target/i386/sse-13.c        |  2 +-
+ gcc/testsuite/gcc.target/i386/sse-14.c        |  2 +-
+ gcc/testsuite/gcc.target/i386/sse-22.c        |  4 +-
+ gcc/testsuite/gcc.target/i386/sse-23.c        |  2 +-
+ gcc/testsuite/lib/target-supports.exp         | 11 ++++
+ 30 files changed, 268 insertions(+), 15 deletions(-)
+ create mode 100644 gcc/config/i386/amxcomplexintrin.h
+ create mode 100644 gcc/testsuite/gcc.target/i386/amxcomplex-asmatt-1.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/amxcomplex-asmintel-1.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/amxcomplex-cmmimfp16ps-2.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/amxcomplex-cmmrlfp16ps-2.c
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index 1f75ff1ca..39d3351db 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -798,6 +798,8 @@ get_available_features (struct __processor_model *cpu_model,
+ 	{
+ 	  if (eax & bit_AMX_FP16)
+ 	    set_feature (FEATURE_AMX_FP16);
++	  if (edx & bit_AMX_COMPLEX)
++	    set_feature (FEATURE_AMX_COMPLEX);
+ 	}
+     }
+ 
+diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc
+index 1aa163463..87e8afe9b 100644
+--- a/gcc/common/config/i386/i386-common.cc
++++ b/gcc/common/config/i386/i386-common.cc
+@@ -109,6 +109,8 @@ along with GCC; see the file COPYING3.  If not see
+ #define OPTION_MASK_ISA2_AMX_BF16_SET OPTION_MASK_ISA2_AMX_BF16
+ #define OPTION_MASK_ISA2_AMX_FP16_SET OPTION_MASK_ISA2_AMX_FP16
+ #define OPTION_MASK_ISA2_PREFETCHI_SET OPTION_MASK_ISA2_PREFETCHI
++#define OPTION_MASK_ISA2_AMX_COMPLEX_SET \
++  (OPTION_MASK_ISA2_AMX_TILE | OPTION_MASK_ISA2_AMX_COMPLEX)
+ 
+ /* SSE4 includes both SSE4.1 and SSE4.2. -msse4 should be the same
+    as -msse4.2.  */
+@@ -269,7 +271,8 @@ along with GCC; see the file COPYING3.  If not see
+ #define OPTION_MASK_ISA2_SERIALIZE_UNSET OPTION_MASK_ISA2_SERIALIZE
+ #define OPTION_MASK_ISA2_AVX512VP2INTERSECT_UNSET OPTION_MASK_ISA2_AVX512VP2INTERSECT
+ #define OPTION_MASK_ISA2_TSXLDTRK_UNSET OPTION_MASK_ISA2_TSXLDTRK
+-#define OPTION_MASK_ISA2_AMX_TILE_UNSET OPTION_MASK_ISA2_AMX_TILE
++#define OPTION_MASK_ISA2_AMX_TILE_UNSET \
++  (OPTION_MASK_ISA2_AMX_TILE | OPTION_MASK_ISA2_AMX_COMPLEX_UNSET)
+ #define OPTION_MASK_ISA2_AMX_INT8_UNSET OPTION_MASK_ISA2_AMX_INT8
+ #define OPTION_MASK_ISA2_AMX_BF16_UNSET OPTION_MASK_ISA2_AMX_BF16
+ #define OPTION_MASK_ISA2_UINTR_UNSET OPTION_MASK_ISA2_UINTR
+@@ -279,6 +282,7 @@ along with GCC; see the file COPYING3.  If not see
+ #define OPTION_MASK_ISA2_WIDEKL_UNSET OPTION_MASK_ISA2_WIDEKL
+ #define OPTION_MASK_ISA2_AMX_FP16_UNSET OPTION_MASK_ISA2_AMX_FP16
+ #define OPTION_MASK_ISA2_PREFETCHI_UNSET OPTION_MASK_ISA2_PREFETCHI
++#define OPTION_MASK_ISA2_AMX_COMPLEX_UNSET OPTION_MASK_ISA2_AMX_COMPLEX
+ 
+ /* SSE4 includes both SSE4.1 and SSE4.2.  -mno-sse4 should the same
+    as -mno-sse4.1. */
+@@ -1155,6 +1159,19 @@ ix86_handle_option (struct gcc_options *opts,
+ 	}
+       return true;
+ 
++    case OPT_mamx_complex:
++      if (value)
++	{
++	  opts->x_ix86_isa_flags2 |= OPTION_MASK_ISA2_AMX_COMPLEX_SET;
++	  opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AMX_COMPLEX_SET;
++	}
++      else
++	{
++	  opts->x_ix86_isa_flags2 &= ~OPTION_MASK_ISA2_AMX_COMPLEX_UNSET;
++	  opts->x_ix86_isa_flags2_explicit |= OPTION_MASK_ISA2_AMX_COMPLEX_UNSET;
++	}
++      return true;
++
+     case OPT_mfma:
+       if (value)
+ 	{
+diff --git a/gcc/common/config/i386/i386-cpuinfo.h b/gcc/common/config/i386/i386-cpuinfo.h
+index 7b2d4d242..56020faac 100644
+--- a/gcc/common/config/i386/i386-cpuinfo.h
++++ b/gcc/common/config/i386/i386-cpuinfo.h
+@@ -243,6 +243,7 @@ enum processor_features
+   FEATURE_X86_64_V4,
+   FEATURE_AMX_FP16,
+   FEATURE_PREFETCHI,
++  FEATURE_AMX_COMPLEX,
+   CPU_FEATURE_MAX
+ };
+ 
+diff --git a/gcc/common/config/i386/i386-isas.h b/gcc/common/config/i386/i386-isas.h
+index 6caf06249..cbef68479 100644
+--- a/gcc/common/config/i386/i386-isas.h
++++ b/gcc/common/config/i386/i386-isas.h
+@@ -177,4 +177,6 @@ ISA_NAMES_TABLE_START
+   ISA_NAMES_TABLE_ENTRY("x86-64-v4", FEATURE_X86_64_V4, P_X86_64_V4, NULL)
+   ISA_NAMES_TABLE_ENTRY("amx-fp16", FEATURE_AMX_FP16, P_NONE, "-mamx-fp16")
+   ISA_NAMES_TABLE_ENTRY("prefetchi", FEATURE_PREFETCHI, P_NONE, "-mprefetchi")
++  ISA_NAMES_TABLE_ENTRY("amx-complex", FEATURE_AMX_COMPLEX,
++			P_NONE, "-mamx-complex")
+ ISA_NAMES_TABLE_END
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 9bad238e3..ca5c8f8a0 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -424,7 +424,7 @@ i[34567]86-*-* | x86_64-*-*)
+ 		       amxbf16intrin.h x86gprintrin.h uintrintrin.h
+ 		       hresetintrin.h keylockerintrin.h avxvnniintrin.h
+ 		       mwaitintrin.h avx512fp16intrin.h avx512fp16vlintrin.h
+-		       amxfp16intrin.h prfchiintrin.h"
++		       amxfp16intrin.h prfchiintrin.h amxcomplexintrin.h"
+ 	;;
+ ia64-*-*)
+ 	extra_headers=ia64intrin.h
+diff --git a/gcc/config/i386/amxcomplexintrin.h b/gcc/config/i386/amxcomplexintrin.h
+new file mode 100644
+index 000000000..6ea1eca04
+--- /dev/null
++++ b/gcc/config/i386/amxcomplexintrin.h
+@@ -0,0 +1,59 @@
++/* Copyright (C) 2023 Free Software Foundation, Inc.
++
++   This file is part of GCC.
++
++   GCC is free software; you can redistribute it and/or modify
++   it under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3, or (at your option)
++   any later version.
++
++   GCC is distributed in the hope that it will be useful,
++   but WITHOUT ANY WARRANTY; without even the implied warranty of
++   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++   GNU General Public License for more details.
++
++   Under Section 7 of GPL version 3, you are granted additional
++   permissions described in the GCC Runtime Library Exception, version
++   3.1, as published by the Free Software Foundation.
++
++   You should have received a copy of the GNU General Public License and
++   a copy of the GCC Runtime Library Exception along with this program;
++   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
++   .  */
++
++#if !defined _IMMINTRIN_H_INCLUDED
++#error "Never use  directly; include  instead."
++#endif
++
++#ifndef _AMXCOMPLEXINTRIN_H_INCLUDED
++#define _AMXCOMPLEXINTRIN_H_INCLUDED
++
++#if !defined(__AMX_COMPLEX__)
++#pragma GCC push_options
++#pragma GCC target("amx-complex")
++#define __DISABLE_AMX_COMPLEX__
++#endif /* __AMX_COMPLEX__ */
++
++#if defined(__x86_64__)
++#define _tile_cmmimfp16ps_internal(src1_dst,src2,src3)				\
++  __asm__ volatile\
++  ("{tcmmimfp16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|tcmmimfp16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::)
++
++#define _tile_cmmrlfp16ps_internal(src1_dst,src2,src3)				\
++  __asm__ volatile\
++  ("{tcmmrlfp16ps\t%%tmm"#src3", %%tmm"#src2", %%tmm"#src1_dst"|tcmmrlfp16ps\t%%tmm"#src1_dst", %%tmm"#src2", %%tmm"#src3"}" ::)
++
++#define _tile_cmmimfp16ps(src1_dst,src2,src3)					\
++  _tile_cmmimfp16ps_internal (src1_dst, src2, src3)
++
++#define _tile_cmmrlfp16ps(src1_dst,src2,src3)					\
++  _tile_cmmrlfp16ps_internal (src1_dst, src2, src3)
++
++#endif
++
++#ifdef __DISABLE_AMX_COMPLEX__
++#undef __DISABLE_AMX_COMPLEX__
++#pragma GCC pop_options
++#endif /* __DISABLE_AMX_COMPLEX__ */
++
++#endif /* _AMXCOMPLEXINTRIN_H_INCLUDED */
+diff --git a/gcc/config/i386/cpuid.h b/gcc/config/i386/cpuid.h
+index 21100149a..530a45fad 100644
+--- a/gcc/config/i386/cpuid.h
++++ b/gcc/config/i386/cpuid.h
+@@ -136,6 +136,7 @@
+ #define bit_AMX_BF16    (1 << 22)
+ #define bit_AMX_TILE    (1 << 24)
+ #define bit_AMX_INT8    (1 << 25)
++#define bit_AMX_COMPLEX (1 << 8)
+ 
+ /* Extended State Enumeration Sub-leaf (%eax == 0xd, %ecx == 1) */
+ #define bit_XSAVEOPT	(1 << 0)
+diff --git a/gcc/config/i386/i386-c.cc b/gcc/config/i386/i386-c.cc
+index 04f1dd682..5e0ac278c 100644
+--- a/gcc/config/i386/i386-c.cc
++++ b/gcc/config/i386/i386-c.cc
+@@ -644,6 +644,8 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
+     def_or_undef (parse_in, "__AMX_FP16__");
+   if (isa_flag2 & OPTION_MASK_ISA2_PREFETCHI)
+     def_or_undef (parse_in, "__PREFETCHI__");
++  if (isa_flag2 & OPTION_MASK_ISA2_AMX_COMPLEX)
++    def_or_undef (parse_in, "__AMX_COMPLEX__");
+   if (TARGET_IAMCU)
+     {
+       def_or_undef (parse_in, "__iamcu");
+diff --git a/gcc/config/i386/i386-isa.def b/gcc/config/i386/i386-isa.def
+index 744a7df85..7445b1bf7 100644
+--- a/gcc/config/i386/i386-isa.def
++++ b/gcc/config/i386/i386-isa.def
+@@ -111,3 +111,4 @@ DEF_PTA(AVXVNNI)
+ DEF_PTA(AVX512FP16)
+ DEF_PTA(AMX_FP16)
+ DEF_PTA(PREFETCHI)
++DEF_PTA(AMX_COMPLEX)
+diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
+index 6645e3259..7efd25084 100644
+--- a/gcc/config/i386/i386-options.cc
++++ b/gcc/config/i386/i386-options.cc
+@@ -233,7 +233,8 @@ static struct ix86_target_opts isa2_opts[] =
+   { "-mavxvnni",	OPTION_MASK_ISA2_AVXVNNI },
+   { "-mavx512fp16",	OPTION_MASK_ISA2_AVX512FP16 },
+   { "-mamx-fp16",       OPTION_MASK_ISA2_AMX_FP16 },
+-  { "-mprefetchi",      OPTION_MASK_ISA2_PREFETCHI }
++  { "-mprefetchi",      OPTION_MASK_ISA2_PREFETCHI },
++  { "-mamx-complex",	OPTION_MASK_ISA2_AMX_COMPLEX }
+ };
+ static struct ix86_target_opts isa_opts[] =
+ {
+@@ -1080,6 +1081,7 @@ ix86_valid_target_attribute_inner_p (tree fndecl, tree args, char *p_strings[],
+     IX86_ATTR_ISA ("avx512fp16", OPT_mavx512fp16),
+     IX86_ATTR_ISA ("amx-fp16", OPT_mamx_fp16),
+     IX86_ATTR_ISA ("prefetchi",   OPT_mprefetchi),
++    IX86_ATTR_ISA ("amx-complex", OPT_mamx_complex),
+ 
+     /* enum options */
+     IX86_ATTR_ENUM ("fpmath=",	OPT_mfpmath_),
+diff --git a/gcc/config/i386/i386.opt b/gcc/config/i386/i386.opt
+index 50cd114f6..fba94f3f6 100644
+--- a/gcc/config/i386/i386.opt
++++ b/gcc/config/i386/i386.opt
+@@ -1234,3 +1234,7 @@ Support AMX-FP16 built-in functions and code generation.
+ mprefetchi
+ Target Mask(ISA2_PREFETCHI) Var(ix86_isa_flags2) Save
+ Support PREFETCHI built-in functions and code generation.
++
++mamx-complex
++Target Mask(ISA2_AMX_COMPLEX) Var(ix86_isa_flags2) Save
++Support AMX-COMPLEX built-in functions and code generation.
+diff --git a/gcc/config/i386/immintrin.h b/gcc/config/i386/immintrin.h
+index 0447ca4b2..bd819c7f4 100644
+--- a/gcc/config/i386/immintrin.h
++++ b/gcc/config/i386/immintrin.h
+@@ -124,6 +124,8 @@
+ 
+ #include 
+ 
++#include 
++
+ #include 
+ 
+ #include 
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index ba9faf4b2..d7b0bc802 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -7048,6 +7048,11 @@ Enable/disable the generation of the AMX-FP16 instructions.
+ @cindex @code{target("prefetchi")} function attribute, x86
+ Enable/disable the generation of the PREFETCHI instructions.
+ 
++@cindex @code{target("amx-complex")} function attribute, x86
++@item amx-complex
++@itemx no-amx-complex
++Enable/disable the generation of the AMX-COMPLEX instructions.
++
+ @item cld
+ @itemx no-cld
+ @cindex @code{target("cld")} function attribute, x86
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 8ca831dc1..186b33481 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -1428,7 +1428,7 @@ See RS/6000 and PowerPC Options.
+ -mavx5124fmaps  -mavx512vnni  -mavx5124vnniw  -mprfchw  -mrdpid @gol
+ -mrdseed  -msgx -mavx512vp2intersect -mserialize -mtsxldtrk@gol
+ -mamx-tile  -mamx-int8  -mamx-bf16 -muintr -mhreset -mavxvnni@gol
+--mavx512fp16 -mamx-fp16 -mprefetchi @gol
++-mavx512fp16 -mamx-fp16 -mprefetchi -mamx-complex @gol
+ -mcldemote  -mms-bitfields  -mno-align-stringops  -minline-all-stringops @gol
+ -minline-stringops-dynamically  -mstringop-strategy=@var{alg} @gol
+ -mkl -mwidekl @gol
+@@ -32459,6 +32459,9 @@ preferred alignment to @option{-mpreferred-stack-boundary=2}.
+ @need 200
+ @itemx -mprefetchi
+ @opindex mprefetchi
++@need 200
++@opindex mamx-complex
++@itemx -mamx-complex
+ These switches enable the use of instructions in the MMX, SSE,
+ SSE2, SSE3, SSSE3, SSE4, SSE4A, SSE4.1, SSE4.2, AVX, AVX2, AVX512F, AVX512PF,
+ AVX512ER, AVX512CD, AVX512VL, AVX512BW, AVX512DQ, AVX512IFMA, AVX512VBMI, SHA,
+@@ -32469,7 +32472,7 @@ XSAVEOPT, XSAVEC, XSAVES, RTM, HLE, TBM, MWAITX, CLZERO, PKU, AVX512VBMI2,
+ GFNI, VAES, WAITPKG, VPCLMULQDQ, AVX512BITALG, MOVDIRI, MOVDIR64B, AVX512BF16,
+ ENQCMD, AVX512VPOPCNTDQ, AVX5124FMAPS, AVX512VNNI, AVX5124VNNIW, SERIALIZE,
+ UINTR, HRESET, AMXTILE, AMXINT8, AMXBF16, KL, WIDEKL, AVXVNNI, AVX512-FP16,
+-AMX-FP16, PREFETCHI or CLDEMOTE extended instruction sets. Each has a corresponding
++AMX-FP16, PREFETCHI, AMX-COMPLEX or CLDEMOTE extended instruction sets. Each has a corresponding
+ @option{-mno-} option to disable use of these instructions.
+ 
+ These extensions are also available as built-in functions: see
+diff --git a/gcc/doc/sourcebuild.texi b/gcc/doc/sourcebuild.texi
+index c68e492dc..454fae11a 100644
+--- a/gcc/doc/sourcebuild.texi
++++ b/gcc/doc/sourcebuild.texi
+@@ -2472,6 +2472,9 @@ Target supports the execution of @code{amx-int8} instructions.
+ @item amx_bf16
+ Target supports the execution of @code{amx-bf16} instructions.
+ 
++@item amx_complex
++Target supports the execution of @code{amx-complex} instructions.
++
+ @item amx_fp16
+ Target supports the execution of @code{amx-fp16} instructions.
+ 
+diff --git a/gcc/testsuite/g++.dg/other/i386-2.C b/gcc/testsuite/g++.dg/other/i386-2.C
+index 72ed5fed0..ae1b8f632 100644
+--- a/gcc/testsuite/g++.dg/other/i386-2.C
++++ b/gcc/testsuite/g++.dg/other/i386-2.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target i?86-*-* x86_64-*-* } } */
+-/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt  -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16 -mprefetchi" } */
++/* { dg-options "-O -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt  -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16 -mprefetchi -mamx-complex" } */
+ 
+ /* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h,
+    xopintrin.h, abmintrin.h, bmiintrin.h, tbmintrin.h, lwpintrin.h,
+diff --git a/gcc/testsuite/g++.dg/other/i386-3.C b/gcc/testsuite/g++.dg/other/i386-3.C
+index 9dd53653f..783e35774 100644
+--- a/gcc/testsuite/g++.dg/other/i386-3.C
++++ b/gcc/testsuite/g++.dg/other/i386-3.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target i?86-*-* x86_64-*-* } } */
+-/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16 -mprefetchi" } */
++/* { dg-options "-O -fkeep-inline-functions -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16 -mprefetchi -mamx-complex" } */
+ 
+ /* Test that {,x,e,p,t,s,w,a,b,i}mmintrin.h, mm3dnow.h, fma4intrin.h,
+    xopintrin.h, abmintrin.h, bmiintrin.h, tbmintrin.h, lwpintrin.h,
+diff --git a/gcc/testsuite/gcc.target/i386/amx-check.h b/gcc/testsuite/gcc.target/i386/amx-check.h
+index 27dd37bf9..f1a04cf1f 100644
+--- a/gcc/testsuite/gcc.target/i386/amx-check.h
++++ b/gcc/testsuite/gcc.target/i386/amx-check.h
+@@ -216,6 +216,9 @@ main ()
+ #ifdef AMX_FP16
+       && __builtin_cpu_supports ("amx-fp16")
+ #endif
++#ifdef AMX_COMPLEX
++      && __builtin_cpu_supports ("amx-complex")
++#endif
+ #ifdef __linux__
+       && request_perm_xtile_data ()
+ #endif
+diff --git a/gcc/testsuite/gcc.target/i386/amx-helper.h b/gcc/testsuite/gcc.target/i386/amx-helper.h
+index fe24d7067..6ed9f5eb3 100644
+--- a/gcc/testsuite/gcc.target/i386/amx-helper.h
++++ b/gcc/testsuite/gcc.target/i386/amx-helper.h
+@@ -1,6 +1,6 @@
+ #ifndef AMX_HELPER_H_INCLUDED
+ #define AMX_HELPER_H_INCLUDED
+-#if defined(AMX_FP16)
++#if defined(AMX_FP16) || defined(AMX_COMPLEX)
+ #include 
+ #include 
+ #endif
+@@ -12,7 +12,7 @@ typedef union
+   uint16_t u;
+ } union16f_uw;
+ 
+-#if defined(AMX_FP16)
++#if defined(AMX_FP16) || defined(AMX_COMPLEX)
+ /* Transformation functions between fp16/float */
+ static uint16_t make_f32_fp16 (float f)
+ {
+diff --git a/gcc/testsuite/gcc.target/i386/amxcomplex-asmatt-1.c b/gcc/testsuite/gcc.target/i386/amxcomplex-asmatt-1.c
+new file mode 100644
+index 000000000..b6745e34b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/amxcomplex-asmatt-1.c
+@@ -0,0 +1,15 @@
++/* { dg-do compile { target { ! ia32 } } } */
++/* { dg-options "-O2 -mamx-complex" } */
++/* { dg-final { scan-assembler "tcmmimfp16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1"  } } */
++/* { dg-final { scan-assembler "tcmmrlfp16ps\[ \\t]+\[^\n\]*%tmm3+\[^\n\]*%tmm2+\[^\n\]*%tmm1"  } } */
++#include 
++
++#define TMM1 1
++#define TMM2 2
++#define TMM3 3
++
++void TEST()
++{
++  _tile_cmmimfp16ps (TMM1, TMM2, TMM3);
++  _tile_cmmrlfp16ps (TMM1, TMM2, TMM3);
++}
+diff --git a/gcc/testsuite/gcc.target/i386/amxcomplex-asmintel-1.c b/gcc/testsuite/gcc.target/i386/amxcomplex-asmintel-1.c
+new file mode 100644
+index 000000000..305465e88
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/amxcomplex-asmintel-1.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile { target { ! ia32 } } } */
++/* { dg-require-effective-target masm_intel } */
++/* { dg-options "-O2 -mamx-complex -masm=intel" } */
++/* { dg-final { scan-assembler "tcmmimfp16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3"  } } */
++/* { dg-final { scan-assembler "tcmmrlfp16ps\[ \\t]+\[^\n\]*%tmm1+\[^\n\]*%tmm2+\[^\n\]*%tmm3"  } } */
++#include 
++
++void TEST()
++{
++  _tile_cmmimfp16ps (1, 2, 3);
++  _tile_cmmrlfp16ps (1, 2, 3);
++}
+diff --git a/gcc/testsuite/gcc.target/i386/amxcomplex-cmmimfp16ps-2.c b/gcc/testsuite/gcc.target/i386/amxcomplex-cmmimfp16ps-2.c
+new file mode 100644
+index 000000000..6e3762c9f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/amxcomplex-cmmimfp16ps-2.c
+@@ -0,0 +1,53 @@
++/* { dg-do run { target { ! ia32 } } } */
++/* { dg-require-effective-target amx_complex } */
++/* { dg-require-effective-target avx512fp16 } */
++/* { dg-options "-O2 -mamx-complex -mavx512fp16" } */
++#define AMX_COMPLEX
++#define DO_TEST test_amx_complex_cmmimfp16ps
++void test_amx_complex_cmmimfp16ps ();
++#include "amx-helper.h"
++
++void calc_matrix_cmmimfp16ps (__tile *dst, __tile *src1, __tile *src2)
++{
++  uint16_t *src1_buf = (uint16_t *) src1->buf;
++  uint16_t *src2_buf = (uint16_t *) src2->buf;
++  float *dst_buf = (float *) dst->buf;
++  
++  int M = src1->rows;
++  int N = src1->colsb / 4;
++  int K = src2->colsb / 4;
++  int i, j, k, t;
++
++  for (i = 0; i < M; i++)
++    for (j = 0; j < N; j++)
++      for (k = 0; k < K; k++)
++	for (t = 0; t < 2; t+=2)
++	  dst_buf[i * N + k] +=
++	  (make_fp16_f32(src1_buf[i * 2 * N + 2 * j + t]) *
++	    make_fp16_f32(src2_buf[j * 2 * K + 2 * k + t + 1])) +
++	  (make_fp16_f32(src1_buf[i * 2 * N + 2 * j + t + 1]) *
++	    make_fp16_f32(src2_buf[j * 2 * K + 2 * k + t]));
++}
++
++void test_amx_complex_cmmimfp16ps ()
++{
++  __tilecfg_u cfg;
++  __tile dst, dst_ref, src1, src2;
++  uint8_t tmp_dst_buf[1024], tmp_dst_zero_buf[1024];
++
++  init_fp16_max_tile_buffer (tmp_dst_buf);
++  init_fp16_max_tile_zero_buffer (tmp_dst_zero_buf);
++
++  init_tile_config (&cfg);
++  init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_zero_buf);
++  init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf);
++  init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf);
++
++  calc_matrix_cmmimfp16ps (&dst, &src1, &src2);
++  
++  _tile_cmmimfp16ps (1, 2, 3);
++  _tile_stored (1, dst_ref.buf, _STRIDE);
++
++  if (!check_tile_register (&dst_ref, &dst))
++        abort ();
++}
+diff --git a/gcc/testsuite/gcc.target/i386/amxcomplex-cmmrlfp16ps-2.c b/gcc/testsuite/gcc.target/i386/amxcomplex-cmmrlfp16ps-2.c
+new file mode 100644
+index 000000000..15940708a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/amxcomplex-cmmrlfp16ps-2.c
+@@ -0,0 +1,53 @@
++/* { dg-do run { target { ! ia32 } } } */
++/* { dg-require-effective-target amx_complex } */
++/* { dg-require-effective-target avx512fp16 } */
++/* { dg-options "-O2 -mamx-complex -mavx512fp16" } */
++#define AMX_COMPLEX
++#define DO_TEST test_amx_complex_cmmrlfp16ps
++void test_amx_complex_cmmrlfp16ps();
++#include "amx-helper.h"
++
++void calc_matrix_cmmrlfp16ps (__tile *dst, __tile *src1, __tile *src2)
++{
++  uint16_t *src1_buf = (uint16_t *) src1->buf;
++  uint16_t *src2_buf = (uint16_t *) src2->buf;
++  float *dst_buf = (float *) dst->buf;
++  
++  int M = src1->rows;
++  int N = src1->colsb / 4;
++  int K = src2->colsb / 4;
++  int i, j, k, t;
++
++  for (i = 0; i < M; i++)
++    for (j = 0; j < N; j++)
++      for (k = 0; k < K; k++)
++	for (t = 0; t < 2; t+=2)
++	  dst_buf[i * N + k] += 
++	    (make_fp16_f32 (src1_buf[i * 2 * N + 2 * j + t]) *
++	      make_fp16_f32 (src2_buf[j * 2 * K + 2 * k + t])) -
++	    (make_fp16_f32 (src1_buf[i * 2 * N + 2 * j + t + 1]) *
++	      make_fp16_f32 (src2_buf[j * 2 * K + 2 * k + t + 1]));
++}
++
++void test_amx_complex_cmmrlfp16ps ()
++{
++  __tilecfg_u cfg;
++  __tile dst, dst_ref, src1, src2;
++  uint8_t tmp_dst_buf[1024], tmp_dst_zero_buf[1024];
++
++  init_fp16_max_tile_buffer (tmp_dst_buf);
++  init_fp16_max_tile_zero_buffer (tmp_dst_zero_buf);
++
++  init_tile_config (&cfg);
++  init_tile_reg_and_src_with_buffer (1, dst, tmp_dst_zero_buf);
++  init_tile_reg_and_src_with_buffer (2, src1, tmp_dst_buf);
++  init_tile_reg_and_src_with_buffer (3, src2, tmp_dst_buf);
++
++  calc_matrix_cmmrlfp16ps (&dst, &src1, &src2);
++  
++  _tile_cmmrlfp16ps (1, 2, 3);
++  _tile_stored (1, dst_ref.buf, _STRIDE);
++
++  if (!check_tile_register (&dst_ref, &dst))
++        abort ();
++}
+diff --git a/gcc/testsuite/gcc.target/i386/funcspec-56.inc b/gcc/testsuite/gcc.target/i386/funcspec-56.inc
+index bdcfdbc88..1a2f3b83d 100644
+--- a/gcc/testsuite/gcc.target/i386/funcspec-56.inc
++++ b/gcc/testsuite/gcc.target/i386/funcspec-56.inc
+@@ -82,6 +82,7 @@ extern void test_avxvnni (void)			__attribute__((__target__("avxvnni")));
+ extern void test_avx512fp16 (void)		__attribute__((__target__("avx512fp16")));
+ extern void test_amx_fp16 (void)		__attribute__((__target__("amx-fp16")));
+ extern void test_prefetchi (void)               __attribute__((__target__("prefetchi")));
++extern void test_amx_complex (void)		__attribute__((__target__("amx-complex")));
+ 
+ extern void test_no_sgx (void)			__attribute__((__target__("no-sgx")));
+ extern void test_no_avx5124fmaps(void)		__attribute__((__target__("no-avx5124fmaps")));
+@@ -165,6 +166,7 @@ extern void test_no_avxvnni (void)		__attribute__((__target__("no-avxvnni")));
+ extern void test_no_avx512fp16 (void)		__attribute__((__target__("no-avx512fp16")));
+ extern void test_no_amx_fp16 (void)		__attribute__((__target__("no-amx-fp16")));
+ extern void test_no_prefetchi (void)            __attribute__((__target__("no-prefetchi")));
++extern void test_no_amx_complex (void)		__attribute__((__target__("no-amx-complex")));
+ 
+ extern void test_arch_nocona (void)		__attribute__((__target__("arch=nocona")));
+ extern void test_arch_core2 (void)		__attribute__((__target__("arch=core2")));
+diff --git a/gcc/testsuite/gcc.target/i386/sse-12.c b/gcc/testsuite/gcc.target/i386/sse-12.c
+index 9ab4a7e0c..d2aadd506 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-12.c
++++ b/gcc/testsuite/gcc.target/i386/sse-12.c
+@@ -3,7 +3,7 @@
+    popcntintrin.h gfniintrin.h and mm_malloc.h are usable
+    with -O -std=c89 -pedantic-errors.  */
+ /* { dg-do compile } */
+-/* { dg-options "-O -std=c89 -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512bw -mavx512dq -mavx512vl -mavx512vbmi -mavx512vbmi2 -mavx512ifma -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mamx-fp16" } */
++/* { dg-options "-O -std=c89 -pedantic-errors -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512bw -mavx512dq -mavx512vl -mavx512vbmi -mavx512vbmi2 -mavx512ifma -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mamx-fp16 -mamx-complex" } */
+ 
+ #include 
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/sse-13.c b/gcc/testsuite/gcc.target/i386/sse-13.c
+index db7c0fc7a..c39382836 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-13.c
++++ b/gcc/testsuite/gcc.target/i386/sse-13.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512vl -mavx512dq -mavx512bw -mavx512vbmi -mavx512vbmi2 -mavx512ifma -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mavx512vp2intersect -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16 -mprefetchi" } */
++/* { dg-options "-O2 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512vl -mavx512dq -mavx512bw -mavx512vbmi -mavx512vbmi2 -mavx512ifma -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mavx512vp2intersect -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mavx512bitalg -mpconfig -mwbnoinvd -mavx512bf16 -menqcmd -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16 -mprefetchi -mamx-complex" } */
+ /* { dg-add-options bind_pic_locally } */
+ 
+ #include 
+diff --git a/gcc/testsuite/gcc.target/i386/sse-14.c b/gcc/testsuite/gcc.target/i386/sse-14.c
+index eaa1a8d81..c34ac1aec 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-14.c
++++ b/gcc/testsuite/gcc.target/i386/sse-14.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -mavx512vl -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16" } */
++/* { dg-options "-O0 -Werror-implicit-function-declaration -march=k8 -msse4a -m3dnow -mavx -mavx2 -mfma4 -mxop -maes -mpclmul -mpopcnt -mabm -mlzcnt -mbmi -mbmi2 -mtbm -mlwp -mfsgsbase -mrdrnd -mf16c -mfma -mrtm -mrdseed -mprfchw -madx -mfxsr -mxsaveopt -mavx512f -mavx512er -mavx512cd -mavx512pf -msha -mprefetchwt1 -mxsavec -mxsaves -mclflushopt -mavx512dq -mavx512bw -mavx512vl -mavx512ifma -mavx512vbmi -mavx512vbmi2 -mavx5124fmaps -mavx5124vnniw -mavx512vpopcntdq -mclwb -mmwaitx -mclzero -mpku -msgx -mrdpid -mgfni -mpconfig -mwbnoinvd -mavx512vl -mavx512bf16 -menqcmd -mavx512vp2intersect -mserialize -mtsxldtrk -mamx-tile -mamx-int8 -mamx-bf16 -mkl -mwidekl -mavxvnni -mavx512fp16 -mamx-fp16 -mamx-complex" } */
+ /* { dg-add-options bind_pic_locally } */
+ 
+ #include 
+diff --git a/gcc/testsuite/gcc.target/i386/sse-22.c b/gcc/testsuite/gcc.target/i386/sse-22.c
+index 19afe639d..c3667b829 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-22.c
++++ b/gcc/testsuite/gcc.target/i386/sse-22.c
+@@ -103,7 +103,7 @@
+ 
+ 
+ #ifndef DIFFERENT_PRAGMAS
+-#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,avx512f,avx512er,avx512cd,avx512pf,sha,prefetchwt1,avx512vl,avx512bw,avx512dq,avx512vbmi,avx512vbmi2,avx512ifma,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,gfni,avx512bitalg,avx512bf16,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16,amx-fp16")
++#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,avx512f,avx512er,avx512cd,avx512pf,sha,prefetchwt1,avx512vl,avx512bw,avx512dq,avx512vbmi,avx512vbmi2,avx512ifma,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,gfni,avx512bitalg,avx512bf16,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16,amx-fp16,amx-complex")
+ #endif
+ 
+ /* Following intrinsics require immediate arguments.  They
+@@ -220,7 +220,7 @@ test_4 (_mm_cmpestrz, int, __m128i, int, __m128i, int, 1)
+ 
+ /* immintrin.h (AVX/AVX2/RDRND/FSGSBASE/F16C/RTM/AVX512F/SHA) */
+ #ifdef DIFFERENT_PRAGMAS
+-#pragma GCC target ("avx,avx2,rdrnd,fsgsbase,f16c,rtm,avx512f,avx512er,avx512cd,avx512pf,sha,avx512vl,avx512bw,avx512dq,avx512ifma,avx512vbmi,avx512vbmi2,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,gfni,avx512bitalg,avx512bf16,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16,amx-fp16")
++#pragma GCC target ("avx,avx2,rdrnd,fsgsbase,f16c,rtm,avx512f,avx512er,avx512cd,avx512pf,sha,avx512vl,avx512bw,avx512dq,avx512ifma,avx512vbmi,avx512vbmi2,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,gfni,avx512bitalg,avx512bf16,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16,amx-fp16,amx-complex")
+ #endif
+ #include 
+ test_1 (_cvtss_sh, unsigned short, float, 1)
+diff --git a/gcc/testsuite/gcc.target/i386/sse-23.c b/gcc/testsuite/gcc.target/i386/sse-23.c
+index 741694e87..756b6eb9c 100644
+--- a/gcc/testsuite/gcc.target/i386/sse-23.c
++++ b/gcc/testsuite/gcc.target/i386/sse-23.c
+@@ -843,6 +843,6 @@
+ #define __builtin_ia32_vpclmulqdq_v2di(A, B, C)  __builtin_ia32_vpclmulqdq_v2di(A, B, 1) 
+ #define __builtin_ia32_vpclmulqdq_v8di(A, B, C)  __builtin_ia32_vpclmulqdq_v8di(A, B, 1) 
+ 
+-#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,avx512f,avx512er,avx512cd,avx512pf,sha,prefetchwt1,xsavec,xsaves,clflushopt,avx512bw,avx512dq,avx512vl,avx512vbmi,avx512ifma,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,avx512vbmi2,vpclmulqdq,avx512bitalg,pconfig,wbnoinvd,avx512bf16,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16,amx-fp16,prefetchi")
++#pragma GCC target ("sse4a,3dnow,avx,avx2,fma4,xop,aes,pclmul,popcnt,abm,lzcnt,bmi,bmi2,tbm,lwp,fsgsbase,rdrnd,f16c,fma,rtm,rdseed,prfchw,adx,fxsr,xsaveopt,avx512f,avx512er,avx512cd,avx512pf,sha,prefetchwt1,xsavec,xsaves,clflushopt,avx512bw,avx512dq,avx512vl,avx512vbmi,avx512ifma,avx5124fmaps,avx5124vnniw,avx512vpopcntdq,clwb,mwaitx,clzero,pku,sgx,rdpid,gfni,avx512vbmi2,vpclmulqdq,avx512bitalg,pconfig,wbnoinvd,avx512bf16,enqcmd,avx512vp2intersect,serialize,tsxldtrk,amx-tile,amx-int8,amx-bf16,kl,widekl,avxvnni,avx512fp16,amx-fp16,prefetchi,amx-complex")
+ 
+ #include 
+diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
+index 0d83c780c..d404058fd 100644
+--- a/gcc/testsuite/lib/target-supports.exp
++++ b/gcc/testsuite/lib/target-supports.exp
+@@ -9421,6 +9421,17 @@ proc check_effective_target_avxvnni { } {
+     } "-mavxvnni" ]
+ }
+ 
++# Return 1 if amx-complex instructions can be compiled.
++proc check_effective_target_amx_complex { } {
++    return [check_no_compiler_messages amx_complex object {
++	void
++	foo ()
++	{
++	  __asm__ volatile ("tcmmimfp16ps\t%%tmm1, %%tmm2, %%tmm3" ::);
++	}
++    } "-mamx-complex" ]
++}
++
+ # Return 1 if sse instructions can be compiled.
+ proc check_effective_target_sse { } {
+     return [check_no_compiler_messages sse object {
+-- 
+2.31.1
+
diff --git a/0276-i386-Add-AMX-COMPLEX-to-Granite-Rapids.patch b/0276-i386-Add-AMX-COMPLEX-to-Granite-Rapids.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5c69affde2d13687a27e6124dc8cc53bdb19d909
--- /dev/null
+++ b/0276-i386-Add-AMX-COMPLEX-to-Granite-Rapids.patch
@@ -0,0 +1,30 @@
+From 40469a6119085e4c4741bcaeb9418606d28b40c4 Mon Sep 17 00:00:00 2001
+From: Haochen Jiang 
+Date: Fri, 31 Mar 2023 10:49:14 +0800
+Subject: [PATCH 22/28] i386: Add AMX-COMPLEX to Granite Rapids
+
+gcc/Changelog:
+
+	* config/i386/i386.h (PTA_GRANITERAPIDS): Add PTA_AMX_COMPLEX.
+
+(cherry picked from commit afa87bd5f7b126e20268aa959441cde2e02bba0e)
+---
+ gcc/config/i386/i386.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
+index 75953defc..56d7794dc 100644
+--- a/gcc/config/i386/i386.h
++++ b/gcc/config/i386/i386.h
+@@ -2358,7 +2358,7 @@ constexpr wide_int_bitmask PTA_ALDERLAKE = PTA_TREMONT | PTA_ADX | PTA_AVX
+   | PTA_PCONFIG | PTA_PKU | PTA_VAES | PTA_VPCLMULQDQ | PTA_SERIALIZE
+   | PTA_HRESET | PTA_KL | PTA_WIDEKL | PTA_AVXVNNI;
+ constexpr wide_int_bitmask PTA_GRANITERAPIDS = PTA_SAPPHIRERAPIDS | PTA_AMX_FP16
+-  | PTA_PREFETCHI;
++  | PTA_PREFETCHI | PTA_AMX_COMPLEX;
+ constexpr wide_int_bitmask PTA_KNM = PTA_KNL | PTA_AVX5124VNNIW
+   | PTA_AVX5124FMAPS | PTA_AVX512VPOPCNTDQ;
+ constexpr wide_int_bitmask PTA_ZNVER1 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2
+-- 
+2.31.1
+
diff --git a/0277-Initial-Granite-Rapids-D-Support.patch b/0277-Initial-Granite-Rapids-D-Support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..def6e9633a2a7a399ff1078aec37165eca6d25ce
--- /dev/null
+++ b/0277-Initial-Granite-Rapids-D-Support.patch
@@ -0,0 +1,212 @@
+From 125e5d448538f7534e0fe3df9b7947cf41605b51 Mon Sep 17 00:00:00 2001
+From: "Mo, Zewei" 
+Date: Mon, 3 Jul 2023 11:00:26 +0800
+Subject: [PATCH 23/28] Initial Granite Rapids D Support
+
+gcc/ChangeLog:
+
+	* common/config/i386/cpuinfo.h
+	(get_intel_cpu): Handle Granite Rapids D.
+	* common/config/i386/i386-common.cc:
+	(processor_alias_table): Add graniterapids-d.
+	* common/config/i386/i386-cpuinfo.h
+	(enum processor_subtypes): Add INTEL_COREI7_GRANITERAPIDS_D.
+	* config.gcc: Add -march=graniterapids-d.
+	* config/i386/driver-i386.cc (host_detect_local_cpu):
+	Handle graniterapids-d.
+	* config/i386/i386.h: (PTA_GRANITERAPIDS_D): New.
+	* doc/extend.texi: Add graniterapids-d.
+	* doc/invoke.texi: Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.target/i386/mv16.C: Add graniterapids-d.
+	* gcc.target/i386/funcspec-56.inc: Handle new march.
+
+(cherry picked from commit a0cb65d34cc141571e870fb3b53b3ff47ae3338d)
+---
+ gcc/common/config/i386/cpuinfo.h              |  9 ++++++++-
+ gcc/common/config/i386/i386-common.cc         |  2 ++
+ gcc/common/config/i386/i386-cpuinfo.h         |  1 +
+ gcc/config.gcc                                |  3 ++-
+ gcc/config/i386/driver-i386.cc                |  5 ++++-
+ gcc/config/i386/i386.h                        |  4 +++-
+ gcc/doc/extend.texi                           |  3 +++
+ gcc/doc/invoke.texi                           | 11 +++++++++++
+ gcc/testsuite/g++.target/i386/mv16.C          |  6 ++++++
+ gcc/testsuite/gcc.target/i386/funcspec-56.inc |  1 +
+ 10 files changed, 41 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index 39d3351db..1e53248ef 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -529,7 +529,6 @@ get_intel_cpu (struct __processor_model *cpu_model,
+       cpu_model->__cpu_subtype = INTEL_COREI7_SAPPHIRERAPIDS;
+       break;
+     case 0xad:
+-    case 0xae:
+       /* Granite Rapids.  */
+       cpu = "graniterapids";
+       CHECK___builtin_cpu_is ("corei7");
+@@ -537,6 +536,14 @@ get_intel_cpu (struct __processor_model *cpu_model,
+       cpu_model->__cpu_type = INTEL_COREI7;
+       cpu_model->__cpu_subtype = INTEL_COREI7_GRANITERAPIDS;
+       break;
++    case 0xae:
++      /* Granite Rapids D.  */
++      cpu = "graniterapids-d";
++      CHECK___builtin_cpu_is ("corei7");
++      CHECK___builtin_cpu_is ("graniterapids-d");
++      cpu_model->__cpu_type = INTEL_COREI7;
++      cpu_model->__cpu_subtype = INTEL_COREI7_GRANITERAPIDS_D;
++      break;
+     case 0x17:
+     case 0x1d:
+       /* Penryn.  */
+diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc
+index 87e8afe9b..28f468f48 100644
+--- a/gcc/common/config/i386/i386-common.cc
++++ b/gcc/common/config/i386/i386-common.cc
+@@ -1993,6 +1993,8 @@ const pta processor_alias_table[] =
+     M_CPU_SUBTYPE (INTEL_COREI7_ALDERLAKE), P_PROC_AVX2},
+   {"graniterapids", PROCESSOR_GRANITERAPIDS, CPU_HASWELL, PTA_GRANITERAPIDS,
+     M_CPU_SUBTYPE (INTEL_COREI7_GRANITERAPIDS), P_PROC_AVX512F},
++  {"graniterapids-d", PROCESSOR_GRANITERAPIDS, CPU_HASWELL, PTA_GRANITERAPIDS_D,
++    M_CPU_SUBTYPE (INTEL_COREI7_GRANITERAPIDS_D), P_PROC_AVX512F},
+   {"bonnell", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
+     M_CPU_TYPE (INTEL_BONNELL), P_PROC_SSSE3},
+   {"atom", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
+diff --git a/gcc/common/config/i386/i386-cpuinfo.h b/gcc/common/config/i386/i386-cpuinfo.h
+index 56020faac..a32f32c97 100644
+--- a/gcc/common/config/i386/i386-cpuinfo.h
++++ b/gcc/common/config/i386/i386-cpuinfo.h
+@@ -93,6 +93,7 @@ enum processor_subtypes
+   INTEL_COREI7_ROCKETLAKE,
+   AMDFAM19H_ZNVER4,
+   INTEL_COREI7_GRANITERAPIDS,
++  INTEL_COREI7_GRANITERAPIDS_D,
+   CPU_SUBTYPE_MAX
+ };
+ 
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index ca5c8f8a0..3108ac4eb 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -670,7 +670,8 @@ slm nehalem westmere sandybridge ivybridge haswell broadwell bonnell \
+ silvermont knl knm skylake-avx512 cannonlake icelake-client icelake-server \
+ skylake goldmont goldmont-plus tremont cascadelake tigerlake cooperlake \
+ sapphirerapids alderlake rocketlake eden-x2 nano nano-1000 nano-2000 nano-3000 \
+-nano-x2 eden-x4 nano-x4 x86-64 x86-64-v2 x86-64-v3 x86-64-v4 graniterapids native"
++nano-x2 eden-x4 nano-x4 x86-64 x86-64-v2 x86-64-v3 x86-64-v4 graniterapids \
++graniterapids-d native"
+ 
+ # Additional x86 processors supported by --with-cpu=.  Each processor
+ # MUST be separated by exactly one space.
+diff --git a/gcc/config/i386/driver-i386.cc b/gcc/config/i386/driver-i386.cc
+index ea8c3d8d1..e3bca4b49 100644
+--- a/gcc/config/i386/driver-i386.cc
++++ b/gcc/config/i386/driver-i386.cc
+@@ -576,8 +576,11 @@ const char *host_detect_local_cpu (int argc, const char **argv)
+ 	      /* This is unknown family 0x6 CPU.  */
+ 	      if (has_feature (FEATURE_AVX))
+ 		{
++		  /* Assume Granite Rapids D.  */
++		  if (has_feature (FEATURE_AMX_COMPLEX))
++		    cpu = "graniterapids-d";
+ 		  /* Assume Granite Rapids.  */
+-		  if (has_feature (FEATURE_AMX_FP16))
++		  else if (has_feature (FEATURE_AMX_FP16))
+ 		    cpu = "graniterapids";
+ 		  /* Assume Tiger Lake */
+ 		  else if (has_feature (FEATURE_AVX512VP2INTERSECT))
+diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
+index 56d7794dc..eda3e5e5b 100644
+--- a/gcc/config/i386/i386.h
++++ b/gcc/config/i386/i386.h
+@@ -2358,7 +2358,9 @@ constexpr wide_int_bitmask PTA_ALDERLAKE = PTA_TREMONT | PTA_ADX | PTA_AVX
+   | PTA_PCONFIG | PTA_PKU | PTA_VAES | PTA_VPCLMULQDQ | PTA_SERIALIZE
+   | PTA_HRESET | PTA_KL | PTA_WIDEKL | PTA_AVXVNNI;
+ constexpr wide_int_bitmask PTA_GRANITERAPIDS = PTA_SAPPHIRERAPIDS | PTA_AMX_FP16
+-  | PTA_PREFETCHI | PTA_AMX_COMPLEX;
++  | PTA_PREFETCHI;
++constexpr wide_int_bitmask PTA_GRANITERAPIDS_D = PTA_GRANITERAPIDS
++  | PTA_AMX_COMPLEX;
+ constexpr wide_int_bitmask PTA_KNM = PTA_KNL | PTA_AVX5124VNNIW
+   | PTA_AVX5124FMAPS | PTA_AVX512VPOPCNTDQ;
+ constexpr wide_int_bitmask PTA_ZNVER1 = PTA_64BIT | PTA_MMX | PTA_SSE | PTA_SSE2
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index d7b0bc802..674db2f1a 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -21837,6 +21837,9 @@ Intel Core i7 Rocketlake CPU.
+ @item graniterapids
+ Intel Core i7 graniterapids CPU.
+ 
++@item graniterapids-d
++Intel Core i7 graniterapids D CPU.
++
+ @item bonnell
+ Intel Atom Bonnell CPU.
+ 
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 186b33481..a2ec060fd 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -31626,6 +31626,17 @@ MOVDIRI, MOVDIR64B, AVX512VP2INTERSECT, ENQCMD, CLDEMOTE, PTWRITE, WAITPKG,
+ SERIALIZE, TSXLDTRK, UINTR, AMX-BF16, AMX-TILE, AMX-INT8, AVX-VNNI, AVX512FP16,
+ AVX512BF16, AMX-FP16 and PREFETCHI instruction set support.
+ 
++@item graniterapids-d
++Intel graniterapids D CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
++SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE,
++RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW,
++AES, CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, AVX512VL, AVX512BW, AVX512DQ,
++AVX512CD, PKU, AVX512VBMI, AVX512IFMA, SHA, AVX512VNNI, GFNI, VAES, AVX512VBMI2,
++VPCLMULQDQ, AVX512BITALG, RDPID, AVX512VPOPCNTDQ, PCONFIG, WBNOINVD, CLWB,
++MOVDIRI, MOVDIR64B, AVX512VP2INTERSECT, ENQCMD, CLDEMOTE, PTWRITE, WAITPKG,
++SERIALIZE, TSXLDTRK, UINTR, AMX-BF16, AMX-TILE, AMX-INT8, AVX-VNNI, AVX512FP16,
++AVX512BF16, AMX-FP16, PREFETCHI and AMX-COMPLEX instruction set support.
++
+ @item k6
+ AMD K6 CPU with MMX instruction set support.
+ 
+diff --git a/gcc/testsuite/g++.target/i386/mv16.C b/gcc/testsuite/g++.target/i386/mv16.C
+index 65cc24f32..17b1fc722 100644
+--- a/gcc/testsuite/g++.target/i386/mv16.C
++++ b/gcc/testsuite/g++.target/i386/mv16.C
+@@ -96,6 +96,10 @@ int __attribute__ ((target("arch=graniterapids"))) foo () {
+   return 26;
+ }
+ 
++int __attribute__ ((target("arch=graniterapids-d"))) foo () {
++  return 28;
++}
++
+ int main ()
+ {
+   int val = foo ();
+@@ -136,6 +140,8 @@ int main ()
+     assert (val == 24);
+   else if (__builtin_cpu_is ("graniterapids"))
+     assert (val == 25);
++  else if (__builtin_cpu_is ("graniterapids-d"))
++    assert (val == 26);
+   else
+     assert (val == 0);
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/funcspec-56.inc b/gcc/testsuite/gcc.target/i386/funcspec-56.inc
+index 1a2f3b83d..f0f3397a7 100644
+--- a/gcc/testsuite/gcc.target/i386/funcspec-56.inc
++++ b/gcc/testsuite/gcc.target/i386/funcspec-56.inc
+@@ -191,6 +191,7 @@ extern void test_arch_sapphirerapids (void)	__attribute__((__target__("arch=sapp
+ extern void test_arch_alderlake (void)          __attribute__((__target__("arch=alderlake")));
+ extern void test_arch_rocketlake (void)         __attribute__((__target__("arch=rocketlake")));
+ extern void test_arch_graniterapids (void)	__attribute__((__target__("arch=graniterapids")));
++extern void test_arch_graniterapids_d (void)	__attribute__((__target__("arch=graniterapids-d")));
+ extern void test_arch_k8 (void)			__attribute__((__target__("arch=k8")));
+ extern void test_arch_k8_sse3 (void)		__attribute__((__target__("arch=k8-sse3")));
+ extern void test_arch_opteron (void)		__attribute__((__target__("arch=opteron")));
+-- 
+2.31.1
+
diff --git a/0278-Correct-Granite-Rapids-D-documentation.patch b/0278-Correct-Granite-Rapids-D-documentation.patch
new file mode 100644
index 0000000000000000000000000000000000000000..18e756a5f51d16f3c69d18448f1cfd24db55a3fc
--- /dev/null
+++ b/0278-Correct-Granite-Rapids-D-documentation.patch
@@ -0,0 +1,48 @@
+From a809a6a416af4d08f7feeadfdd5d1f5a76a830b5 Mon Sep 17 00:00:00 2001
+From: Haochen Jiang 
+Date: Thu, 20 Jul 2023 10:47:18 +0800
+Subject: [PATCH 24/28] Correct Granite Rapids{, D} documentation
+
+gcc/Changelog:
+
+	* doc/invoke.texi: Remove AVX512VP2INTERSECT in
+	Granite Rapids{, D} from documentation.
+
+(cherry picked from commit 38daaaa91438d3f635a10bf5d5181c3b29f07df9)
+---
+ gcc/doc/invoke.texi | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index a2ec060fd..4d3eccdb2 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -31622,9 +31622,9 @@ RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW,
+ AES, CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, AVX512VL, AVX512BW, AVX512DQ,
+ AVX512CD, PKU, AVX512VBMI, AVX512IFMA, SHA, AVX512VNNI, GFNI, VAES, AVX512VBMI2,
+ VPCLMULQDQ, AVX512BITALG, RDPID, AVX512VPOPCNTDQ, PCONFIG, WBNOINVD, CLWB,
+-MOVDIRI, MOVDIR64B, AVX512VP2INTERSECT, ENQCMD, CLDEMOTE, PTWRITE, WAITPKG,
+-SERIALIZE, TSXLDTRK, UINTR, AMX-BF16, AMX-TILE, AMX-INT8, AVX-VNNI, AVX512FP16,
+-AVX512BF16, AMX-FP16 and PREFETCHI instruction set support.
++MOVDIRI, MOVDIR64B, ENQCMD, CLDEMOTE, PTWRITE, WAITPKG, SERIALIZE, TSXLDTRK,
++UINTR, AMX-BF16, AMX-TILE, AMX-INT8, AVX-VNNI, AVX512-FP16, AVX512BF16, AMX-FP16
++and PREFETCHI instruction set support.
+ 
+ @item graniterapids-d
+ Intel graniterapids D CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
+@@ -31633,9 +31633,9 @@ RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW,
+ AES, CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, AVX512VL, AVX512BW, AVX512DQ,
+ AVX512CD, PKU, AVX512VBMI, AVX512IFMA, SHA, AVX512VNNI, GFNI, VAES, AVX512VBMI2,
+ VPCLMULQDQ, AVX512BITALG, RDPID, AVX512VPOPCNTDQ, PCONFIG, WBNOINVD, CLWB,
+-MOVDIRI, MOVDIR64B, AVX512VP2INTERSECT, ENQCMD, CLDEMOTE, PTWRITE, WAITPKG,
+-SERIALIZE, TSXLDTRK, UINTR, AMX-BF16, AMX-TILE, AMX-INT8, AVX-VNNI, AVX512FP16,
+-AVX512BF16, AMX-FP16, PREFETCHI and AMX-COMPLEX instruction set support.
++MOVDIRI, MOVDIR64B, ENQCMD, CLDEMOTE, PTWRITE, WAITPKG, SERIALIZE, TSXLDTRK,
++UINTR, AMX-BF16, AMX-TILE, AMX-INT8, AVX-VNNI, AVX512FP16, AVX512BF16, AMX-FP16,
++PREFETCHI and AMX-COMPLEX instruction set support.
+ 
+ @item k6
+ AMD K6 CPU with MMX instruction set support.
+-- 
+2.31.1
+
diff --git a/0279-i386-Remove-Meteorlake-s-family_model.patch b/0279-i386-Remove-Meteorlake-s-family_model.patch
new file mode 100644
index 0000000000000000000000000000000000000000..13e5dc9beaaac26d831868bee6f4aa466da22269
--- /dev/null
+++ b/0279-i386-Remove-Meteorlake-s-family_model.patch
@@ -0,0 +1,30 @@
+From 62852213bc6d3e56804ca05826bb95a3a2fe4eba Mon Sep 17 00:00:00 2001
+From: "Hu, Lin1" 
+Date: Thu, 15 Dec 2022 15:51:18 +0800
+Subject: [PATCH 25/28] i386: Remove Meteorlake's family_model
+
+gcc/ChangeLog:
+
+	* common/config/i386/cpuinfo.h (get_intel_cpu): Remove case 0xb5
+	for meteorlake.
+
+(cherry picked from commit 9e74b7ec0b218364905e3e7de5c41e8148ffc61b)
+---
+ gcc/common/config/i386/cpuinfo.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index 1e53248ef..348bc0c12 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -510,7 +510,6 @@ get_intel_cpu (struct __processor_model *cpu_model,
+       /* Alder Lake.  */
+     case 0xb7:
+       /* Raptor Lake.  */
+-    case 0xb5:
+     case 0xaa:
+     case 0xac:
+       /* Meteor Lake.  */
+-- 
+2.31.1
+
diff --git a/0280-x86-Update-model-values-for-Alderlake-Rocketlake-and.patch b/0280-x86-Update-model-values-for-Alderlake-Rocketlake-and.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4abff3ff906452f9d9b6573815312b307ecaf9d1
--- /dev/null
+++ b/0280-x86-Update-model-values-for-Alderlake-Rocketlake-and.patch
@@ -0,0 +1,33 @@
+From 73042aa18fe70aa30a9c7c760b08e642560ecccd Mon Sep 17 00:00:00 2001
+From: "Cui, Lili" 
+Date: Thu, 29 Jun 2023 03:10:35 +0000
+Subject: [PATCH 26/28] x86: Update model values for Alderlake, Rocketlake and
+ Raptorlake.
+
+Update model values for Alderlake, Rocketlake and Raptorlake according to SDM.
+
+gcc/ChangeLog
+
+	* common/config/i386/cpuinfo.h (get_intel_cpu): Remove model value 0xa8
+	from Rocketlake, move model value 0xbf from Alderlake to Raptorlake.
+
+(cherry picked from commit e510c3be13a8ccdf1fc1b27c2501c126d493f335)
+---
+ gcc/common/config/i386/cpuinfo.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index 348bc0c12..f9bcb6fad 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -509,6 +509,7 @@ get_intel_cpu (struct __processor_model *cpu_model,
+     case 0x9a:
+       /* Alder Lake.  */
+     case 0xb7:
++    case 0xbf:
+       /* Raptor Lake.  */
+     case 0xaa:
+     case 0xac:
+-- 
+2.31.1
+
diff --git a/0281-x86-Update-model-values-for-Raptorlake.patch b/0281-x86-Update-model-values-for-Raptorlake.patch
new file mode 100644
index 0000000000000000000000000000000000000000..eace6b337dfc15f4e25edc8f832720d85877a8f8
--- /dev/null
+++ b/0281-x86-Update-model-values-for-Raptorlake.patch
@@ -0,0 +1,32 @@
+From 3dbe28984e0f9c24d6670cfba42983bc32c08b0a Mon Sep 17 00:00:00 2001
+From: "Cui, Lili" 
+Date: Mon, 14 Aug 2023 02:06:00 +0000
+Subject: [PATCH 27/28] x86: Update model values for Raptorlake.
+
+Update model values for Raptorlake according to SDM.
+
+gcc/ChangeLog
+
+	* common/config/i386/cpuinfo.h (get_intel_cpu): Add model value 0xba
+	to Raptorlake.
+
+(cherry picked from commit 614052dd4ea083e086712809c754ffebd9361316)
+---
+ gcc/common/config/i386/cpuinfo.h | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/gcc/common/config/i386/cpuinfo.h b/gcc/common/config/i386/cpuinfo.h
+index f9bcb6fad..da1568fd1 100644
+--- a/gcc/common/config/i386/cpuinfo.h
++++ b/gcc/common/config/i386/cpuinfo.h
+@@ -509,6 +509,7 @@ get_intel_cpu (struct __processor_model *cpu_model,
+     case 0x9a:
+       /* Alder Lake.  */
+     case 0xb7:
++    case 0xba:
+     case 0xbf:
+       /* Raptor Lake.  */
+     case 0xaa:
+-- 
+2.31.1
+
diff --git a/0282-Fix-target_clone-arch-graniterapids-d.patch b/0282-Fix-target_clone-arch-graniterapids-d.patch
new file mode 100644
index 0000000000000000000000000000000000000000..54abd2eeb0356913d6f70823b5c70fd5a0aa6483
--- /dev/null
+++ b/0282-Fix-target_clone-arch-graniterapids-d.patch
@@ -0,0 +1,159 @@
+From 8db0f3cd29bd7f937ffa01dd1100360fbbf5b6f4 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Tue, 22 Aug 2023 18:18:31 +0800
+Subject: [PATCH 28/28] Fix target_clone ("arch=graniterapids-d")
+
+Both "graniterapid-d" and "graniterapids" are attached with
+PROCESSOR_GRANITERAPID in processor_alias_table but mapped to
+different __cpu_subtype in get_intel_cpu.
+
+And get_builtin_code_for_version will try to match the first
+PROCESSOR_GRANITERAPIDS in processor_alias_table which maps to
+"granitepraids" here.
+
+861      else if (new_target->arch_specified && new_target->arch > 0)
+1862        for (i = 0; i < pta_size; i++)
+1863          if (processor_alias_table[i].processor == new_target->arch)
+1864            {
+1865              const pta *arch_info = &processor_alias_table[i];
+1866              switch (arch_info->priority)
+1867                {
+1868                default:
+1869                  arg_str = arch_info->name;
+
+This mismatch makes dispatch_function_versions check the preidcate
+of__builtin_cpu_is ("graniterapids") for "graniterapids-d" and causes
+the issue.
+The patch explicitly adds PROCESSOR_GRANITERAPIDS_D to make a distinction.
+
+For "alderlake","raptorlake", "meteorlake" they share same isa, cost,
+tuning, and mapped to the same __cpu_type/__cpu_subtype in
+get_intel_cpu, so no need to add PROCESSOR_RAPTORLAKE and others.
+
+gcc/ChangeLog:
+
+	* common/config/i386/i386-common.cc (processor_names): Add new
+	member graniterapids-s.
+	* config/i386/i386-options.cc (processor_alias_table): Update
+	table with and PROCESSOR_GRANITERAPIDS_D.
+	(m_GRANITERAPID_D): New macro.
+	(m_CORE_AVX512): Add m_GRANITERAPIDS_D.
+	(processor_cost_table): Add icelake_cost for
+	PROCESSOR_GRANITERAPIDS_D.
+	* config/i386/i386.h (enum processor_type): Add new member
+	PROCESSOR_GRANITERAPIDS_D.
+	* config/i386/i386-c.cc (ix86_target_macros_internal): Handle
+	PROCESSOR_GRANITERAPIDS_D
+---
+ gcc/common/config/i386/i386-common.cc | 6 ++++--
+ gcc/config/i386/i386-c.cc             | 8 ++++++++
+ gcc/config/i386/i386-options.cc       | 4 +++-
+ gcc/config/i386/i386.h                | 3 ++-
+ 4 files changed, 17 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/common/config/i386/i386-common.cc b/gcc/common/config/i386/i386-common.cc
+index 28f468f48..bec6801ce 100644
+--- a/gcc/common/config/i386/i386-common.cc
++++ b/gcc/common/config/i386/i386-common.cc
+@@ -1873,6 +1873,7 @@ const char *const processor_names[] =
+   "alderlake",
+   "rocketlake",
+   "graniterapids",
++  "graniterapids-d",
+   "intel",
+   "geode",
+   "k6",
+@@ -1993,8 +1994,9 @@ const pta processor_alias_table[] =
+     M_CPU_SUBTYPE (INTEL_COREI7_ALDERLAKE), P_PROC_AVX2},
+   {"graniterapids", PROCESSOR_GRANITERAPIDS, CPU_HASWELL, PTA_GRANITERAPIDS,
+     M_CPU_SUBTYPE (INTEL_COREI7_GRANITERAPIDS), P_PROC_AVX512F},
+-  {"graniterapids-d", PROCESSOR_GRANITERAPIDS, CPU_HASWELL, PTA_GRANITERAPIDS_D,
+-    M_CPU_SUBTYPE (INTEL_COREI7_GRANITERAPIDS_D), P_PROC_AVX512F},
++  {"graniterapids-d", PROCESSOR_GRANITERAPIDS_D, CPU_HASWELL,
++    PTA_GRANITERAPIDS_D, M_CPU_SUBTYPE (INTEL_COREI7_GRANITERAPIDS_D),
++    P_PROC_AVX512F},
+   {"bonnell", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
+     M_CPU_TYPE (INTEL_BONNELL), P_PROC_SSSE3},
+   {"atom", PROCESSOR_BONNELL, CPU_ATOM, PTA_BONNELL,
+diff --git a/gcc/config/i386/i386-c.cc b/gcc/config/i386/i386-c.cc
+index 5e0ac278c..49f0db2b8 100644
+--- a/gcc/config/i386/i386-c.cc
++++ b/gcc/config/i386/i386-c.cc
+@@ -246,6 +246,10 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
+       def_or_undef (parse_in, "__graniterapids");
+       def_or_undef (parse_in, "__graniterapids__");
+       break;
++    case PROCESSOR_GRANITERAPIDS_D:
++      def_or_undef (parse_in, "__graniterapids_d");
++      def_or_undef (parse_in, "__graniterapids_d__");
++      break;
+     case PROCESSOR_ALDERLAKE:
+       def_or_undef (parse_in, "__alderlake");
+       def_or_undef (parse_in, "__alderlake__");
+@@ -254,6 +258,7 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
+       def_or_undef (parse_in, "__rocketlake");
+       def_or_undef (parse_in, "__rocketlake__");
+       break;
++
+     /* use PROCESSOR_max to not set/unset the arch macro.  */
+     case PROCESSOR_max:
+       break;
+@@ -426,6 +431,9 @@ ix86_target_macros_internal (HOST_WIDE_INT isa_flag,
+     case PROCESSOR_GRANITERAPIDS:
+       def_or_undef (parse_in, "__tune_graniterapids__");
+       break;
++    case PROCESSOR_GRANITERAPIDS_D:
++      def_or_undef (parse_in, "__tune_graniterapids_d__");
++      break;
+     case PROCESSOR_INTEL:
+     case PROCESSOR_GENERIC:
+       break;
+diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
+index 7efd25084..86932d719 100644
+--- a/gcc/config/i386/i386-options.cc
++++ b/gcc/config/i386/i386-options.cc
+@@ -128,10 +128,11 @@ along with GCC; see the file COPYING3.  If not see
+ #define m_ALDERLAKE (HOST_WIDE_INT_1U<
+Date: Mon, 22 Jul 2024 14:06:18 +0800
+Subject: [PATCH 1/2] i386: Change prefetchi output template
+
+For prefetchi instructions, RIP-relative address is explicitly mentioned
+for operand and assembler obeys that rule strictly. This makes
+instruction like:
+
+	prefetchit0	bar
+
+got illegal for assembler, which should be a broad usage for prefetchi.
+
+Change to %a to explicitly add (%rip) after function label to make it
+legal in assembler so that it could pass to linker to get the real address.
+
+gcc/ChangeLog:
+
+	* config/i386/i386.md (prefetchi): Change to %a.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/prefetchi-1.c: Check (%rip).
+
+Reference:
+https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=
+062e46a813799684c6f900815fd22451d6294ae1
+---
+ gcc/config/i386/i386.md                     | 2 +-
+ gcc/testsuite/gcc.target/i386/prefetchi-1.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/i386/i386.md b/gcc/config/i386/i386.md
+index f08c2cfb1..1b733008e 100644
+--- a/gcc/config/i386/i386.md
++++ b/gcc/config/i386/i386.md
+@@ -22917,7 +22917,7 @@
+   "TARGET_PREFETCHI && TARGET_64BIT"
+ {
+   static const char * const patterns[2] = {
+-    "prefetchit1\t%0", "prefetchit0\t%0"
++    "prefetchit1\t%a0", "prefetchit0\t%a0"
+   };
+ 
+   int locality = INTVAL (operands[1]);
+diff --git a/gcc/testsuite/gcc.target/i386/prefetchi-1.c b/gcc/testsuite/gcc.target/i386/prefetchi-1.c
+index 80f25e70e..03dfdc55e 100644
+--- a/gcc/testsuite/gcc.target/i386/prefetchi-1.c
++++ b/gcc/testsuite/gcc.target/i386/prefetchi-1.c
+@@ -1,7 +1,7 @@
+ /* { dg-do compile { target { ! ia32 } } } */
+ /* { dg-options "-mprefetchi -O2" } */
+-/* { dg-final { scan-assembler-times "\[ \\t\]+prefetchit0\[ \\t\]+" 2 } } */
+-/* { dg-final { scan-assembler-times "\[ \\t\]+prefetchit1\[ \\t\]+" 2 } } */
++/* { dg-final { scan-assembler-times "\[ \\t\]+prefetchit0\[ \\t\]+bar\\(%rip\\)" 2 } } */
++/* { dg-final { scan-assembler-times "\[ \\t\]+prefetchit1\[ \\t\]+bar\\(%rip\\)" 2 } } */
+ 
+ #include 
+ 
+-- 
+2.31.1
+
diff --git a/0284-i386-Add-non-optimize-prefetchi-intrins.patch b/0284-i386-Add-non-optimize-prefetchi-intrins.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2a450e8dba7dd7c27af7cd073db2afe4f6b0c393
--- /dev/null
+++ b/0284-i386-Add-non-optimize-prefetchi-intrins.patch
@@ -0,0 +1,92 @@
+From c19afda0ee549d294fd5714c63db24bcd4570d03 Mon Sep 17 00:00:00 2001
+From: Haochen Jiang 
+Date: Thu, 25 Jul 2024 16:16:05 +0800
+Subject: [PATCH 2/2] i386: Add non-optimize prefetchi intrins
+
+Under -O0, with the "newly" introduced intrins, the variable will be
+transformed as mem instead of the origin symbol_ref. The compiler will
+then treat the operand as invalid and turn the operation into nop, which
+is not expected. Use macro for non-optimize to keep the variable as
+symbol_ref just as how prefetch intrin does.
+
+gcc/ChangeLog:
+
+	* config/i386/prfchiintrin.h
+	(_m_prefetchit0): Add macro for non-optimized option.
+	(_m_prefetchit1): Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/prefetchi-1b.c: New test.
+
+Reference:
+https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=
+b4524c4430ba9771265bd9fc31e69a3f35dfe117
+---
+ gcc/config/i386/prfchiintrin.h               |  9 +++++++
+ gcc/testsuite/gcc.target/i386/prefetchi-1b.c | 26 ++++++++++++++++++++
+ 2 files changed, 35 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/i386/prefetchi-1b.c
+
+diff --git a/gcc/config/i386/prfchiintrin.h b/gcc/config/i386/prfchiintrin.h
+index 06deef488..1e3d42dc3 100644
+--- a/gcc/config/i386/prfchiintrin.h
++++ b/gcc/config/i386/prfchiintrin.h
+@@ -30,6 +30,7 @@
+ 
+ #ifdef __x86_64__
+ 
++#ifdef __OPTIMIZE__
+ extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_prefetchit0 (void* __P)
+@@ -43,6 +44,14 @@ _m_prefetchit1 (void* __P)
+ {
+   __builtin_ia32_prefetchi (__P, 2);
+ }
++#else
++#define _m_prefetchit0(P)	\
++  __builtin_ia32_prefetchi(P, 3)
++
++#define _m_prefetchit1(P)	\
++  __builtin_ia32_prefetchi(P, 2)
++
++#endif
+ 
+ #endif
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/prefetchi-1b.c b/gcc/testsuite/gcc.target/i386/prefetchi-1b.c
+new file mode 100644
+index 000000000..93139554d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/prefetchi-1b.c
+@@ -0,0 +1,26 @@
++/* { dg-do compile { target { ! ia32 } } } */
++/* { dg-options "-mprefetchi -O0" } */
++/* { dg-final { scan-assembler-times "\[ \\t\]+prefetchit0\[ \\t\]+bar\\(%rip\\)" 1 } } */
++/* { dg-final { scan-assembler-times "\[ \\t\]+prefetchit1\[ \\t\]+bar\\(%rip\\)" 1 } } */
++
++#include 
++
++int
++bar (int a)
++{
++  return a + 1;
++}
++
++int
++foo1 (int b)
++{
++  _m_prefetchit0 (bar);
++  return bar (b) + 1;
++}
++
++int
++foo2 (int b)
++{
++  _m_prefetchit1 (bar);
++  return bar (b) + 1;
++}
+-- 
+2.31.1
+
diff --git a/0285-SME-Recover-hip09-and-hip11-in-aarch64-cores.def.patch b/0285-SME-Recover-hip09-and-hip11-in-aarch64-cores.def.patch
new file mode 100644
index 0000000000000000000000000000000000000000..24dacd1eb01c3309f74cfa17af6c94d333dcbcef
--- /dev/null
+++ b/0285-SME-Recover-hip09-and-hip11-in-aarch64-cores.def.patch
@@ -0,0 +1,32 @@
+From 239f0637307ff2f6afb1473e99d0bb0eaf8946b2 Mon Sep 17 00:00:00 2001
+From: xiezhiheng 
+Date: Fri, 23 Aug 2024 15:37:17 +0800
+Subject: [PATCH 154/157] [SME] Recover hip09 and hip11 in aarch64-cores.def
+
+---
+ gcc/config/aarch64/aarch64-cores.def | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
+index f069c81cf..3337fd1a0 100644
+--- a/gcc/config/aarch64/aarch64-cores.def
++++ b/gcc/config/aarch64/aarch64-cores.def
+@@ -130,6 +130,7 @@ AARCH64_CORE("a64fx", a64fx, a64fx, V8_2A,  (F16, SVE), a64fx, 0x46, 0x001, -1)
+ 
+ /* HiSilicon ('H') cores. */
+ AARCH64_CORE("tsv110",  tsv110, tsv110, V8_2A,  (CRYPTO, F16), tsv110,   0x48, 0xd01, -1)
++AARCH64_CORE("hip09", hip09, hip09, V8_5A,  (SVE, I8MM, F32MM, F64MM, PROFILE, PREDRES), hip09, 0x48, 0xd02, 0x0)
+ 
+ /* ARMv8.3-A Architecture Processors.  */
+ 
+@@ -171,6 +172,7 @@ AARCH64_CORE("cortex-a710",  cortexa710, cortexa57, V9A,  (SVE2_BITPERM, MEMTAG,
+ AARCH64_CORE("cortex-x2",  cortexx2, cortexa57, V9A,  (SVE2_BITPERM, MEMTAG, I8MM, BF16), neoversen2, 0x41, 0xd48, -1)
+ 
+ AARCH64_CORE("neoverse-n2", neoversen2, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversen2, 0x41, 0xd49, -1)
++AARCH64_CORE("hip11", hip11, hip11, V8_5A,  (SVE, SVE2, F16), hip11, 0x48, 0xd22, -1)
+ 
+ AARCH64_CORE("demeter", demeter, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversev2, 0x41, 0xd4f, -1)
+ AARCH64_CORE("neoverse-v2", neoversev2, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversev2, 0x41, 0xd4f, -1)
+-- 
+2.33.0
+
diff --git a/0286-Try-to-use-AI-model-to-guide-optimization.patch b/0286-Try-to-use-AI-model-to-guide-optimization.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a697dcc7815d3697b9a09a95881de29f6e1ae30e
--- /dev/null
+++ b/0286-Try-to-use-AI-model-to-guide-optimization.patch
@@ -0,0 +1,671 @@
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index fcfa54697..f42aeb8e8 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -1449,6 +1449,7 @@ OBJS = \
+ 	inchash.o \
+ 	incpath.o \
+ 	init-regs.o \
++	ipa-hardware-detection.o \
+ 	internal-fn.o \
+ 	ipa-struct-reorg/ipa-struct-reorg.o \
+ 	ipa-cp.o \
+diff --git a/gcc/common.opt b/gcc/common.opt
+index fd98382fa..99e626641 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -185,6 +185,9 @@ const char *main_input_basename
+ Variable
+ int main_input_baselength
+ 
++Variable
++bool optimize_maximum
++
+ ; The base name used for auxiliary output files.
+ ; dump_base_name minus dump_base_ext.
+ 
+@@ -469,6 +472,10 @@ Ofast
+ Common Optimization
+ Optimize for speed disregarding exact standards compliance.
+ 
++Om
++Common Optimization
++Optimize for maximizing radical optimization.
++
+ Og
+ Common Optimization
+ Optimize for debugging experience rather than speed or size.
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 309ecc3d9..ad853af9a 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -18637,6 +18637,134 @@ aarch64_sve_adjust_stmt_cost (class vec_info *vinfo, vect_cost_for_stmt kind,
+   return stmt_cost;
+ }
+ 
++/* Check whether in C language or LTO with only C language.  */
++extern bool lang_c_p (void);
++
++static void
++override_C_optimize_options (struct gcc_options *opts)
++{
++  opts->x_flag_ipa_reorder_fields = 1;
++  opts->x_flag_ipa_struct_reorg = 6;
++  opts->x_struct_layout_optimize_level = 6;
++  opts->x_flag_gnu89_inline = 1;
++  opts->x_flag_ccmp2 = 1;
++  opts->x_flag_array_widen_compare = 1;
++  opts->x_flag_convert_minmax = 1;
++  opts->x_flag_tree_slp_transpose_vectorize = 1;
++  opts->x_param_max_inline_insns_auto = 64;
++  opts->x_param_inline_unit_growth = 96;
++}
++
++/* Check whether in CPP language or LTO with only CPP language.  */
++static bool
++lang_cpp_p (void)
++{
++  const char *language_string = lang_hooks.name;
++  if (!language_string)
++    {
++      return false;
++    }
++  if (lang_GNU_CXX ())
++    {
++      return true;
++    }
++  else if (strcmp (language_string, "GNU GIMPLE") == 0) // for LTO check
++    {
++      unsigned i = 0;
++      tree t = NULL_TREE;
++      FOR_EACH_VEC_SAFE_ELT (all_translation_units, i, t)
++	{
++	  language_string = TRANSLATION_UNIT_LANGUAGE (t);
++	  if (language_string == NULL
++	      || strncmp (lang_hooks.name, "GNU C++", 7))
++	    {
++	      return false;
++	    }
++	}
++      return true;
++    }
++  return false;
++}
++
++static void
++override_CPP_optimize_options (struct gcc_options *opts)
++{
++  opts->x_flag_finite_loops = 1;
++  opts->x_flag_omit_frame_pointer = 1;
++  opts->x_flag_sized_deallocation = 0;
++  opts->x_flag_loop_elim = 1;
++  opts->x_flag_convert_minmax = 1;
++  opts->x_param_early_inlining_insns = 256;
++  opts->x_param_max_inline_insns_auto = 128;
++  opts->x_param_inline_unit_growth = 256;
++  opts->x_flag_cmlt_arith = 1;
++}
++
++static void
++override_optimize_options_1 (struct gcc_options *opts)
++{
++  opts->x_flag_split_ldp_stp = 1;
++  opts->x_flag_if_conversion_gimple = 1;
++  opts->x_flag_ifcvt_allow_complicated_cmps = 1;
++  opts->x_param_ifcvt_allow_register_renaming = 2;
++  opts->x_param_max_rtl_if_conversion_unpredictable_cost = 48;
++  opts->x_param_max_rtl_if_conversion_predictable_cost = 48;
++}
++
++static void
++override_Fortran_optimize_options (struct gcc_options *opts)
++{
++  opts->x_flag_unroll_loops = 1;
++  opts->x_flag_unconstrained_commons = 1;
++  opts->x_param_ipa_cp_eval_threshold = 1;
++  opts->x_param_ipa_cp_unit_growth = 80;
++  opts->x_param_ipa_cp_max_recursive_depth = 8;
++  opts->x_param_large_unit_insns = 30000;
++  opts->x_flag_ira_loop_pressure = 1;
++  opts->x_flag_inline_functions_called_once = 0;
++  opts->x_flag_ira_algorithm = IRA_ALGORITHM_PRIORITY;
++  opts->x_flag_delayed_branch = 1;
++  opts->x_flag_gcse_las = 1;
++  opts->x_flag_gcse_sm = 1;
++  opts->x_flag_ipa_pta = 1;
++  opts->x_flag_reorder_blocks_and_partition = 1;
++  opts->x_flag_reorder_blocks = 1;
++  opts->x_flag_crypto_accel_aes = 1;
++  opts->x_param_flexible_seg_len = 1;
++}
++
++/* Reset the optimize option.
++   After checking the model result, this function can
++   reset the more appropriate options.  */
++static void
++reset_machine_option (struct gcc_options *opts)
++{
++  if (!(opts->x_optimize_maximum)
++      || strstr (opts->x_aarch64_tune_string, "hip09") == NULL)
++    {
++      return;
++    }
++
++  const char *ai_infer_level = getenv ("AI_INFER_LEVEL");
++  if (ai_infer_level)
++    {
++      override_optimize_options_1 (opts);
++      if (lang_c_p ())
++	{
++	  override_C_optimize_options (opts);
++	}
++      else if (lang_cpp_p ())
++	{
++	  override_CPP_optimize_options (opts);
++	}
++      else if (lang_GNU_Fortran ())
++	{
++	  override_Fortran_optimize_options (opts);
++	}
++    }
++}
++
++
+ /* STMT_COST is the cost calculated for STMT_INFO, which has cost kind KIND
+    and which when vectorized would operate on vector type VECTYPE.  Add the
+    cost of any embedded operations.  */
+@@ -20089,6 +20217,7 @@ aarch64_override_options_internal (struct gcc_options *opts)
+       && opts->x_optimize >= aarch64_tune_params.prefetch->default_opt_level)
+     opts->x_flag_prefetch_loop_arrays = 1;
+ 
++  reset_machine_option (opts);
+   aarch64_override_options_after_change_1 (opts);
+ }
+ 
+diff --git a/gcc/ipa-hardware-detection.cc b/gcc/ipa-hardware-detection.cc
+new file mode 100644
+index 000000000..8085a8c65
+--- /dev/null
++++ b/gcc/ipa-hardware-detection.cc
+@@ -0,0 +1,243 @@
++/* Hardware Detection.
++   Copyright (C) 2024-2024 Free Software Foundation, Inc.
++This file is part of GCC.
++GCC is free software; you can redistribute it and/or modify it
++under the terms of the GNU General Public License as published by the
++Free Software Foundation; either version 3, or (at your option) any
++later version.
++GCC is distributed in the hope that it will be useful, but WITHOUT
++ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "backend.h"
++#include "target.h"
++#include "tree.h"
++#include "gimple.h"
++#include "tree-pass.h"
++#include "gimple-ssa.h"
++#include "tree-pretty-print.h"
++#include "fold-const.h"
++#include "gimplify.h"
++#include "gimple-iterator.h"
++#include "tree-ssa-loop-manip.h"
++#include "tree-ssa-loop.h"
++#include "ssa.h"
++#include "tree-into-ssa.h"
++#include "cfganal.h"
++#include "cfgloop.h"
++#include "gimple-pretty-print.h"
++#include "tree-cfg.h"
++#include "cgraph.h"
++#include "print-tree.h"
++#include "cfghooks.h"
++#include "gimple-fold.h"
++#include "gimplify-me.h"
++
++namespace {
++
++/* Build a binary operation and gimplify it.  Emit code before GSI.
++   Return the gimple_val holding the result.  */
++
++static tree
++gimplify_build2 (gimple_stmt_iterator *gsi, enum tree_code code,
++		 tree type, tree a, tree b)
++{
++  tree ret;
++
++  ret = fold_build2_loc (gimple_location (gsi_stmt (*gsi)), code, type, a, b);
++  return force_gimple_operand_gsi (gsi, ret, true, NULL, true,
++				   GSI_SAME_STMT);
++}
++
++static basic_block
++create_abort_bb (basic_block last_bb)
++{
++  basic_block bb = create_empty_bb (last_bb);
++  if (last_bb->loop_father != NULL)
++    {
++      add_bb_to_loop (bb, last_bb->loop_father);
++      loops_state_set (LOOPS_NEED_FIXUP);
++    }
++  gimple_stmt_iterator gsi = gsi_last_bb (bb);
++  tree fn = builtin_decl_implicit (BUILT_IN_ABORT);
++  gimple *g = gimple_build_call (fn, 0);
++  gsi_insert_after (&gsi, g, GSI_NEW_STMT);
++  return bb;
++}
++
++static basic_block
++create_part_bb (basic_block last_bb, tree part_base)
++{
++  basic_block bb = create_empty_bb (last_bb);
++  if (last_bb->loop_father != NULL)
++    {
++      add_bb_to_loop (bb, last_bb->loop_father);
++      loops_state_set (LOOPS_NEED_FIXUP);
++    }
++  gimple_stmt_iterator gsi = gsi_last_bb (bb);
++  gsi_insert_after (&gsi, gimple_build_nop (), GSI_NEW_STMT);
++  /* This number is used to efficiently identify the supported part range.  */
++  tree part_cond = gimplify_build2 (
++		     &gsi, PLUS_EXPR, unsigned_type_node, part_base,
++		     build_int_cst (unsigned_type_node, 4294963967));
++  gcond *cond = gimple_build_cond (LE_EXPR, part_cond,
++				   build_int_cst (unsigned_type_node, 2),
++				   NULL_TREE, NULL_TREE);
++  gimple_set_location (cond, input_location);
++  gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
++  gsi_remove (&gsi, true);
++  return bb;
++}
++
++static void
++create_detection_bb ()
++{
++  edge old_e = single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun));
++  basic_block ret_bb = old_e->dest;
++
++  basic_block detection_bb = create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
++  if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father != NULL)
++    {
++      add_bb_to_loop (detection_bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->loop_father);
++      loops_state_set (LOOPS_NEED_FIXUP);
++    }
++  tree cpuid_decl = build_decl (input_location, VAR_DECL,
++				get_identifier ("cpuid"), unsigned_type_node);
++  add_local_decl (cfun, cpuid_decl);
++
++  gimple_stmt_iterator gsi = gsi_last_bb (detection_bb);
++  vec *outputs = NULL;
++  tree purpose = build_string (strlen ("=r"), "=r");
++  tree output = build_tree_list (
++		  build_tree_list (NULL_TREE, purpose), cpuid_decl);
++  vec_safe_push (outputs, output);
++  gasm *asm_stmt = gimple_build_asm_vec (
++		     "mrs %0, MIDR_EL1", NULL, outputs, NULL, NULL);
++  gsi_insert_after (&gsi, asm_stmt, GSI_NEW_STMT);
++  gsi_insert_after (&gsi, gimple_build_nop (), GSI_NEW_STMT);
++
++  tree implementer = gimplify_build2 (
++		       &gsi, RSHIFT_EXPR, unsigned_type_node, cpuid_decl,
++		       build_int_cst (unsigned_type_node, 24));
++  tree part_base = gimplify_build2 (
++		     &gsi, RSHIFT_EXPR, unsigned_type_node, cpuid_decl,
++		     build_int_cst (unsigned_type_node, 4));
++  tree part = gimplify_build2 (
++		&gsi, BIT_AND_EXPR, unsigned_type_node, part_base,
++		build_int_cst (unsigned_type_node, 4095));
++  gcond *implementer_cond = gimple_build_cond (
++			      EQ_EXPR, implementer,
++			      build_int_cst (unsigned_type_node, 72),
++			      NULL_TREE, NULL_TREE);
++  gimple_set_location (implementer_cond, input_location);
++  gsi_insert_before (&gsi, implementer_cond, GSI_SAME_STMT);
++  gsi_remove (&gsi, true);
++
++  basic_block part_bb = create_part_bb (detection_bb, part);
++  basic_block abort_bb = create_abort_bb (part_bb);
++
++  remove_edge_raw (old_e);
++  make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun),
++			 detection_bb, EDGE_FALLTHRU);
++  edge etrue = make_edge (detection_bb, part_bb, EDGE_TRUE_VALUE);
++  etrue->probability = profile_probability::likely ();
++  edge efalse = make_edge (detection_bb, abort_bb, EDGE_FALSE_VALUE);
++  efalse->probability = profile_probability::unlikely ();
++  edge part_true = make_edge (part_bb, ret_bb, EDGE_TRUE_VALUE);
++  part_true->probability = profile_probability::likely ();
++  edge part_false = make_edge (part_bb, abort_bb, EDGE_FALSE_VALUE);
++  part_false->probability = profile_probability::unlikely ();
++  make_single_succ_edge (abort_bb, ret_bb, EDGE_FALLTHRU);
++  if (dom_info_available_p (CDI_DOMINATORS))
++    {
++      set_immediate_dominator (CDI_DOMINATORS, part_bb, detection_bb);
++      set_immediate_dominator (CDI_DOMINATORS, ret_bb, detection_bb);
++      set_immediate_dominator (CDI_DOMINATORS, abort_bb, detection_bb);
++    }
++}
++
++const pass_data pass_data_ipa_hardware_detection =
++{
++  SIMPLE_IPA_PASS,
++  "hardware_detection",
++  OPTGROUP_NONE,
++  TV_IPA_HARDWARE_DETECTION,
++  (PROP_cfg | PROP_ssa),
++  0,
++  0,
++  0,
++  (TODO_update_ssa | TODO_verify_all)
++};
++
++class pass_ipa_hardware_detection : public simple_ipa_opt_pass
++{
++public:
++  pass_ipa_hardware_detection (gcc::context *ctxt)
++    : simple_ipa_opt_pass (pass_data_ipa_hardware_detection, ctxt)
++  {}
++
++  virtual bool gate (function *);
++  virtual unsigned int execute (function *);
++}; // class pass_ipa_hardware_detection
++
++bool
++pass_ipa_hardware_detection::gate (function *)
++{
++  const char *ai_infer_level = getenv ("AI_INFER_LEVEL");
++  return (ai_infer_level
++	  && optimize_maximum > 0
++	  /* Only enable in lto or whole_program.  */
++	  && (in_lto_p || flag_whole_program));
++}
++
++unsigned int
++pass_ipa_hardware_detection::execute (function *)
++{
++  unsigned int ret = 0;
++  cgraph_node *cnode;
++  FOR_EACH_FUNCTION (cnode)
++    {
++      if (!cnode->real_symbol_p ())
++	{
++	  continue;
++	}
++      if (cnode->definition)
++	{
++	  if (!cnode->has_gimple_body_p () || cnode->inlined_to)
++	      continue;
++
++	  cnode->get_body ();
++	  function *fn = DECL_STRUCT_FUNCTION (cnode->decl);
++	  if (!fn)
++	      continue;
++
++	  if (DECL_NAME (cnode->decl)
++      	      && MAIN_NAME_P (DECL_NAME (cnode->decl)))
++	    {
++	      push_cfun (fn);
++	      calculate_dominance_info (CDI_DOMINATORS);
++
++	      create_detection_bb ();
++
++	      cgraph_edge::rebuild_edges ();
++	      free_dominance_info (CDI_DOMINATORS);
++	      pop_cfun ();
++	    }
++	}
++    }
++  return ret;
++}
++} // anon namespace
++
++simple_ipa_opt_pass *
++make_pass_ipa_hardware_detection (gcc::context *ctxt)
++{
++  return new pass_ipa_hardware_detection (ctxt);
++}
+diff --git a/gcc/opts-common.cc b/gcc/opts-common.cc
+index 489a6e02a..12c3f7299 100644
+--- a/gcc/opts-common.cc
++++ b/gcc/opts-common.cc
+@@ -992,6 +992,158 @@ opts_concat (const char *first, ...)
+   return newstr;
+ }
+ 
++typedef int64_t (*run_ai_model_func)(int, const char **,
++				     const char *, int, int64_t *);
++#define PTR_UNION_TYPE(TOTYPE) union { void *_q; TOTYPE _nq; }
++#define PTR_UNION_AS_VOID_PTR(NAME) (NAME._q)
++#define PTR_UNION_AS_CAST_PTR(NAME) (NAME._nq)
++
++static int64_t
++ai_infer_optimization (int argc, const char **argv,
++		       const char *mcpu_option,
++		       int argc_hw, int64_t *argv_hw)
++{
++  /* Load dependent AI-framework libraries.  */
++  void *onnxruntime_lib_handle = NULL;
++  const char *onnxruntime_lib_path = "libonnxruntime.so";
++
++  onnxruntime_lib_handle = dlopen (onnxruntime_lib_path,
++				   RTLD_LAZY | RTLD_GLOBAL);
++  if (!onnxruntime_lib_handle)
++    {
++      return -1;
++    }
++
++  void *ai4c_lib_handle = NULL;
++  const char *ai4c_lib_path = "libONNXRunner.so";
++
++  ai4c_lib_handle = dlopen (ai4c_lib_path, RTLD_LAZY | RTLD_GLOBAL);
++  if (!ai4c_lib_handle)
++    {
++      return -1;
++    }
++
++  /* Clear any existing error.  */
++  dlerror ();
++
++  /* Run AI4Compiler model.  */
++  if (ai4c_lib_handle == NULL || onnxruntime_lib_handle == NULL)
++    {
++      return -1;
++    }
++
++  run_ai_model_func run_ai_model;
++  PTR_UNION_TYPE (run_ai_model_func) run_ai_model_func_union;
++  PTR_UNION_AS_VOID_PTR (run_ai_model_func_union)
++    = dlsym (ai4c_lib_handle, "runONNXModelOptimizer");
++  run_ai_model = PTR_UNION_AS_CAST_PTR (run_ai_model_func_union);
++  if (!run_ai_model)
++    {
++      dlclose (ai4c_lib_handle);
++      dlclose (onnxruntime_lib_handle);
++      return -1;
++    }
++  int64_t model_pred = (*run_ai_model) (argc, argv,
++					mcpu_option, argc_hw, argv_hw);
++
++  if (ai4c_lib_handle)
++    dlclose (ai4c_lib_handle);
++
++  if (onnxruntime_lib_handle)
++    dlclose (onnxruntime_lib_handle);
++
++  if (model_pred == 1)
++    putenv ("AI_INFER_LEVEL=1");
++  return model_pred;
++}
++
++static int
++handle_lto_option (unsigned int lang_mask,
++		   unsigned int num_decoded_options,
++		   unsigned int argc,
++		   const char **argv,
++		   struct cl_decoded_option *&opt_array)
++{
++  int ret = 0;
++  char *lan = "";
++  char *compiler = xstrdup (argv[0]);
++  lan = strrchr (compiler, '/');
++  if (lan != NULL)
++    lan ++;
++  else
++    lan = compiler;
++  if (strstr (lan, "gcc") != NULL)
++    {
++      opt_array = XRESIZEVEC (struct cl_decoded_option, opt_array, argc + 2);
++      const char* lto_flag = "-flto=8";
++      decode_cmdline_option (<o_flag, lang_mask,
++			     &opt_array[num_decoded_options]);
++      ret++;
++      const char* ltopartition_flag = "-flto-partition=one";
++      decode_cmdline_option (<opartition_flag, lang_mask,
++			     &opt_array[num_decoded_options + 1]);
++      ret++;
++    }
++  else if (strstr (lan, "g++") != NULL
++	   || strstr (lan, "gfortran") != NULL)
++    {
++      opt_array = XRESIZEVEC (struct cl_decoded_option, opt_array, argc + 1);
++      const char* lto_flag = "-flto=8";
++      decode_cmdline_option (<o_flag, lang_mask,
++			     &opt_array[num_decoded_options]);
++      ret++;
++    }
++  if (compiler)
++    free (compiler);
++  return ret;
++}
++
++static int
++handle_machine_option (unsigned int lang_mask,
++		       unsigned int num_decoded_options,
++		       unsigned int argc,
++		       const char **argv,
++		       struct cl_decoded_option *&opt_array)
++{
++  int ret = 0;
++  bool flag_Om = false;
++  bool flag_hip09 = false;
++  for (unsigned i = 1; i < argc; i ++)
++    {
++      if (strcmp (argv[i], "-Om") == 0)
++	flag_Om = true;
++      if (strstr (argv[i], "mcpu=hip09") != NULL)
++	flag_hip09 = true;
++    }
++  if (!flag_hip09 || !flag_Om)
++    {
++      return ret;
++    }
++
++  const char *ai_infer_level = getenv ("AI_INFER_LEVEL");
++  if (ai_infer_level)
++    {
++      return ret;
++    }
++  int argc_hw = 6;
++  int64_t argv_hw[argc_hw] = {
++    global_options.x_param_simultaneous_prefetches,
++    global_options.x_param_l1_cache_size,
++    global_options.x_param_l1_cache_line_size,
++    global_options.x_param_l2_cache_size,
++    global_options.x_param_prefetch_latency,
++    global_options.x_param_ipa_prefetch_distance_factor};
++  int64_t output_pred = ai_infer_optimization (
++			  argc, argv, "hip09", argc_hw, argv_hw);
++  if (output_pred != 1)
++    {
++      return ret;
++    }
++
++  return handle_lto_option (lang_mask, num_decoded_options,
++			    argc, argv, opt_array);
++}
++
+ /* Decode command-line options (ARGC and ARGV being the arguments of
+    main) into an array, setting *DECODED_OPTIONS to a pointer to that
+    array and *DECODED_OPTIONS_COUNT to the number of entries in the
+@@ -1090,6 +1242,9 @@ decode_cmdline_options_to_array (unsigned int argc, const char **argv,
+       num_decoded_options++;
+     }
+ 
++  num_decoded_options += handle_machine_option (lang_mask, num_decoded_options,
++						argc, argv, opt_array);
++
+   *decoded_options = opt_array;
+   *decoded_options_count = num_decoded_options;
+   prune_options (decoded_options, decoded_options_count, lang_mask);
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index e34e5ee8e..d97f6079f 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -780,6 +780,14 @@ default_options_optimization (struct gcc_options *opts,
+ 	  opts->x_optimize_debug = 1;
+ 	  break;
+ 
++	case OPT_Om:
++	  /* -Om adds flags to -O3.  */
++	  opts->x_optimize_size = 0;
++	  opts->x_optimize = 3;
++	  opts->x_optimize_maximum = true;
++	  opts->x_optimize_debug = 0;
++	  break;
++
+ 	case OPT_fopenacc:
+ 	  if (opt->value)
+ 	    openacc_mode = true;
+@@ -2733,6 +2741,8 @@ common_handle_option (struct gcc_options *opts,
+ 	  &= ~(SANITIZE_UNDEFINED | SANITIZE_UNDEFINED_NONDEFAULT);
+       break;
+ 
++    case OPT_Om:
++      break;
+     case OPT_O:
+     case OPT_Os:
+     case OPT_Ofast:
+diff --git a/gcc/passes.def b/gcc/passes.def
+index 8797f166f..690d344c0 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -179,6 +179,7 @@ along with GCC; see the file COPYING3.  If not see
+      passes are executed after partitioning and thus see just parts of the
+      compiled unit.  */
+   INSERT_PASSES_AFTER (all_late_ipa_passes)
++  NEXT_PASS (pass_ipa_hardware_detection);
+   NEXT_PASS (pass_ipa_pta);
+   /* FIXME: this should be a normal IP pass.  */
+   NEXT_PASS (pass_ipa_struct_reorg);
+diff --git a/gcc/timevar.def b/gcc/timevar.def
+index 8e7510eb3..bd8c9a4f7 100644
+--- a/gcc/timevar.def
++++ b/gcc/timevar.def
+@@ -81,6 +81,7 @@ DEFTIMEVAR (TV_IPA_CONSTANT_PROP     , "ipa cp")
+ DEFTIMEVAR (TV_IPA_INLINING          , "ipa inlining heuristics")
+ DEFTIMEVAR (TV_IPA_FNSPLIT           , "ipa function splitting")
+ DEFTIMEVAR (TV_IPA_COMDATS	     , "ipa comdats")
++DEFTIMEVAR (TV_IPA_HARDWARE_DETECTION, "ipa detection")
+ DEFTIMEVAR (TV_IPA_PREFETCH	     , "ipa prefetch")
+ DEFTIMEVAR (TV_IPA_STRUCT_REORG      , "ipa struct reorg optimization")
+ DEFTIMEVAR (TV_IPA_OPT		     , "ipa various optimizations")
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index 1c983ef71..ee873f0b2 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -528,6 +528,8 @@ extern ipa_opt_pass_d *make_pass_ipa_icp (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_odr (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_pure_const (gcc::context *ctxt);
++extern simple_ipa_opt_pass *make_pass_ipa_hardware_detection (gcc::context *
++							      ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_prefetch (gcc::context *ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_struct_reorg (gcc::context *ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_pta (gcc::context *ctxt);
diff --git a/0287-Add-dynamic-memory-access-checks.patch b/0287-Add-dynamic-memory-access-checks.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e23d8f64c0d87f6c3d65f9d5ead4bdace2fdca5d
--- /dev/null
+++ b/0287-Add-dynamic-memory-access-checks.patch
@@ -0,0 +1,774 @@
+From 08fb60d0a0707af4004b20358f4a921e4ae6cca6 Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Thu, 22 Aug 2024 15:23:36 +0800
+Subject: [PATCH 156/157] Add dynamic memory access checks
+
+Signed-off-by: Diachkov Ilia 
+---
+ gcc/ipa-prefetch.cc | 622 +++++++++++++++++++++++++++++++++++++-------
+ gcc/params.opt      |   4 +
+ 2 files changed, 525 insertions(+), 101 deletions(-)
+
+diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc
+index 94290ea9c..b000d4d75 100644
+--- a/gcc/ipa-prefetch.cc
++++ b/gcc/ipa-prefetch.cc
+@@ -368,6 +368,7 @@ typedef std::map memref_tree_map;
+ typedef std::set stmt_set;
+ typedef std::set tree_set;
+ typedef std::map tree_map;
++typedef std::map tree_poly_offset_map;
+ 
+ tree_memref_map *tm_map;
+ funct_mrs_map *fmrs_map;
+@@ -710,6 +711,20 @@ get_mem_ref_address_ssa_name (tree mem, tree base)
+   return NULL_TREE;
+ }
+ 
++static void
++dump_base_addr (tree base_addr)
++{
++  if (base_addr)
++    {
++      fprintf (dump_file, "Base addr (%s): ",
++	      get_tree_code_name (TREE_CODE (base_addr)));
++      print_generic_expr (dump_file, base_addr);
++    }
++  else
++    fprintf (dump_file, "Base addr (%s): ", "null");
++  fprintf (dump_file, "\n");
++}
++
+ static void
+ analyse_mem_ref (gimple *stmt, tree mem, memref_t* mr)
+ {
+@@ -736,14 +751,7 @@ analyse_mem_ref (gimple *stmt, tree mem, memref_t* mr)
+       {
+ 	tree base_addr = get_mem_ref_address_ssa_name (mem, base);
+ 	if (dump_file)
+-	  {
+-	    fprintf (dump_file, "Base addr (%s): ",
+-		     base_addr ? get_tree_code_name (TREE_CODE (base_addr))
+-			       : "null");
+-	    if (base_addr)
+-	      print_generic_expr (dump_file, base_addr);
+-	    fprintf (dump_file, "\n");
+-	  }
++	  dump_base_addr (base_addr);
+ 	if (base_addr)
+ 	  {
+ 	    mr->base = analyse_addr_eval (base_addr, mr);
+@@ -1187,7 +1195,7 @@ reduce_memref_set (memref_set *set, vec &vec)
+ }
+ 
+ static void
+-find_nearest_common_dominator (memref_t *mr, basic_block &dom)
++find_nearest_common_post_dominator (memref_t *mr, basic_block &dom)
+ {
+   for (unsigned int i = 0; i < mr->stmts.length (); i++)
+     {
+@@ -1196,7 +1204,7 @@ find_nearest_common_dominator (memref_t *mr, basic_block &dom)
+       if (dom == bb)
+ 	continue;
+       if (dom)
+-	dom = nearest_common_dominator (CDI_DOMINATORS, dom, bb);
++	dom = nearest_common_dominator (CDI_POST_DOMINATORS, dom, bb);
+       else
+ 	dom = bb;
+     }
+@@ -1495,10 +1503,13 @@ gimple_copy_and_remap (gimple *stmt)
+ 
+ static gimple *
+ gimple_copy_and_remap_memref_stmts (memref_t *mr, gimple_seq &stmts,
+-				    int last_idx, stmt_set &processed)
++				    int first_idx, int last_idx,
++				    stmt_set &processed)
+ {
+   gimple *last_stmt = NULL;
+-  for (int i = mr->stmts.length () - 1; i >= last_idx ; i--)
++  if (first_idx == 0)
++    first_idx = mr->stmts.length () - 1;
++  for (int i = first_idx; i >= last_idx; i--)
+     {
+       if (processed.count (mr->stmts[i]))
+ 	continue;
+@@ -1515,6 +1526,436 @@ gimple_copy_and_remap_memref_stmts (memref_t *mr, gimple_seq &stmts,
+   return last_stmt;
+ }
+ 
++/* Check if prefetch insertion may be always unsafe in this case.  For now
++   reject cases with access to arrays with no domain or with no elements.  */
++
++static bool
++check_prefetch_safety (vec &mrs, memref_t *cmr)
++{
++  for (unsigned int i = 0; i < mrs.length (); i++)
++    {
++      memref_t *mr = mrs[i];
++      if (mr == cmr || mr->used_mrs.empty ())
++	continue;
++      bool is_store;
++      tree *mem = simple_mem_ref_in_stmt (mr->stmts[0], &is_store);
++      if (mem == NULL || TREE_CODE (*mem) != ARRAY_REF)
++	continue;
++      tree array = TREE_OPERAND (*mem, 0);
++      tree atype = TREE_TYPE (array);
++      gcc_assert (atype);
++      tree domain = TYPE_DOMAIN (atype);
++      if (!domain || !tree_fits_uhwi_p (TYPE_MIN_VALUE (domain))
++	  || !tree_fits_uhwi_p (TYPE_MAX_VALUE (domain)))
++	{
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "Unsupported array type: ");
++	      print_generic_expr (dump_file, atype);
++	      fprintf (dump_file, "\n");
++	    }
++	  return false;
++	}
++      unsigned HOST_WIDE_INT min_val = tree_to_uhwi (TYPE_MIN_VALUE (domain));
++      unsigned HOST_WIDE_INT max_val = tree_to_uhwi (TYPE_MAX_VALUE (domain));
++      if (min_val == 0 && max_val == 0)
++	{
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "Unsupported array type's bounds: ");
++	      print_generic_expr (dump_file, atype);
++	      fprintf (dump_file, "\n");
++	    }
++	  return false;
++	}
++    }
++  return true;
++}
++
++/* Collect base addresses which we need to check.  */
++
++static void
++collect_base_addresses (vec &used_mr_vec, HOST_WIDE_INT dist_val,
++			memref_t *comp_mr, tree_poly_offset_map &offset_map)
++{
++  if (dump_file)
++    fprintf (dump_file, "Collect base addresses which we need to check.\n");
++  for (unsigned int i = 0; i < used_mr_vec.length (); i++)
++    {
++      memref_t *mr = used_mr_vec[i];
++      if (mr == comp_mr || mr->used_mrs.empty ())
++	continue;
++      bool is_store;
++      tree *mem = simple_mem_ref_in_stmt (mr->stmts[0], &is_store);
++      if (mem == NULL || TREE_CODE (*mem) != MEM_REF)
++	continue;
++      tree base = get_base_address (*mem);
++      tree base_addr = get_mem_ref_address_ssa_name (*mem, base);
++      if (!base_addr)
++	continue;
++      if (dump_file)
++	{
++	  dump_base_addr (base_addr);
++	  if (base)
++	    {
++	      fprintf (dump_file, "Base:");
++	      print_generic_expr (dump_file, base);
++	      fprintf (dump_file, "\n");
++	    }
++	}
++      if (!TREE_OPERAND (base, 1))
++	continue;
++      poly_offset_int curr_offset = mem_ref_offset (base);
++      poly_offset_int saved_offset = 0;
++      if (offset_map.count (base_addr))
++	{
++	  saved_offset = offset_map[base_addr];
++	  if ((dist_val > 0 && known_gt (curr_offset, saved_offset))
++	      || (dist_val < 0 && known_lt (curr_offset, saved_offset)))
++	    offset_map[base_addr] = curr_offset;
++	  else if (dump_file)
++	    fprintf (dump_file, "Off: step=%ld gt=%d lt=%d\n", dist_val,
++		     known_gt (curr_offset, saved_offset),
++		     known_lt (curr_offset, saved_offset));
++	}
++      else
++	offset_map[base_addr] = curr_offset;
++    }
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "Final list of base addresses:\n");
++      for (tree_poly_offset_map::iterator it1 = offset_map.begin ();
++	   it1 != offset_map.end (); ++it1)
++	{
++	  tree base_addr = it1->first;
++	  poly_offset_int off = it1->second;
++	  fprintf (dump_file, "Base:");
++	  print_generic_expr (dump_file, base_addr);
++	  HOST_WIDE_INT val = estimated_poly_value (off.force_shwi (),
++						    POLY_VALUE_LIKELY);
++	  fprintf (dump_file, "\nOff: %ld\n", val);
++	}
++      fprintf (dump_file, "Finish collecting base addresses.\n");
++    }
++}
++
++/* Return true if we need page check to access memory at this address.  */
++
++static bool
++need_page_check (tree base_addr, tree_set &checked_base_addrs)
++{
++  if (dump_file)
++    dump_base_addr (base_addr);
++  if (base_addr == NULL)
++    {
++      if (dump_file)
++	fprintf (dump_file, "Base address not found\n");
++      return false;
++    }
++  if (checked_base_addrs.count (base_addr))
++    {
++      if (dump_file)
++	fprintf (dump_file, "Base address is already checked\n");
++      return false;
++    }
++  return true;
++}
++
++/* Insert instructions to check the original address and newly evaluated
++   adress for prefetch correspond the same page.  */
++
++static gimple *
++insert_page_check (tree addr, tree_poly_offset_map &offset_map,
++		   gimple_seq &stmts)
++{
++  poly_offset_int offset = 0;
++  if (offset_map.count (addr))
++    offset = offset_map[addr];
++  tree addr_type = TREE_TYPE (addr);
++  tree utype = unsigned_type_for (addr_type);
++  tree new_addr = build_int_cst (addr_type, 0);
++  if (decl_map->count (addr))
++    new_addr = (*decl_map)[addr];
++  tree t1 = make_ssa_name (utype);
++  tree t2 = make_ssa_name (utype);
++  unsigned long long pmask = ~(param_ipa_prefetch_pagesize - 1);
++  tree pmask_cst = build_int_cst (utype, pmask);
++  tree off_tree = wide_int_to_tree (sizetype, offset);
++  gcc_assert (TREE_CODE (addr_type) == POINTER_TYPE);
++  tree addr_with_offset = gimple_build (&stmts, POINTER_PLUS_EXPR,
++					addr_type, addr, off_tree);
++  tree conv_addr = make_ssa_name (utype);
++  tree conv_new_addr = make_ssa_name (utype);
++  gimple *conv1 = gimple_build_assign (conv_addr,
++				       fold_convert (utype, addr_with_offset));
++  gimple *conv2 = gimple_build_assign (conv_new_addr,
++				       fold_convert (utype, new_addr));
++  gimple *paddr = gimple_build_assign (t1, BIT_AND_EXPR,
++				       conv_addr, pmask_cst);
++  gimple *new_paddr = gimple_build_assign (t2, BIT_AND_EXPR,
++					   conv_new_addr, pmask_cst);
++  gcond *cond = gimple_build_cond (EQ_EXPR, t1, t2, NULL, NULL);
++  gimple_seq_add_stmt (&stmts, conv1);
++  gimple_seq_add_stmt (&stmts, paddr);
++  gimple_seq_add_stmt (&stmts, conv2);
++  gimple_seq_add_stmt (&stmts, new_paddr);
++  gimple_seq_add_stmt (&stmts, cond);
++  return cond;
++}
++
++/* Check if this array access needs dynamic address verification.  Support only
++   arrays with 1-d indexing.  */
++
++static bool
++need_array_index_check (tree mem)
++{
++  /* Check pattern: t1 = (type) t0; ld/st array[t1].  If any index of type (t0)
++     does not go beyond the bounds of the array, we don't need the check.  */
++  tree array = TREE_OPERAND (mem, 0);
++  tree atype = TREE_TYPE (array);
++  tree index = TREE_OPERAND (mem, 1);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "Array ind: ");
++      print_generic_expr (dump_file, index);
++      fprintf (dump_file, "\nMem: ");
++      print_generic_expr (dump_file, array);
++      fprintf (dump_file, "\nInd type: ");
++      print_generic_expr (dump_file, TREE_TYPE (index));
++      fprintf (dump_file, "\nMem type: ");
++      print_generic_expr (dump_file, atype);
++      fprintf (dump_file, "\n");
++    }
++  tree domain = TYPE_DOMAIN (atype);
++  if (!domain || !tree_fits_uhwi_p (TYPE_MIN_VALUE (domain))
++      || !tree_fits_uhwi_p (TYPE_MAX_VALUE (domain)))
++    {
++      if (dump_file)
++	fprintf (dump_file, "Unsupported array type domain.\n");
++      return true;
++    }
++  unsigned HOST_WIDE_INT min_val = tree_to_uhwi (TYPE_MIN_VALUE (domain));
++  unsigned HOST_WIDE_INT max_val = tree_to_uhwi (TYPE_MAX_VALUE (domain));
++  if (dump_file)
++    fprintf (dump_file, "Array bounds (%ld, %ld)\n", min_val, max_val);
++  if (TREE_CODE (index) != SSA_NAME)
++    return true;
++
++  gimple *stmt = SSA_NAME_DEF_STMT (index);
++  if (!is_gimple_assign (stmt))
++    {
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Is not assign, stop analysis: ");
++	  print_gimple_stmt (dump_file, stmt, 3, TDF_DETAILS);
++	}
++      return true;
++    }
++  tree *lhs = gimple_assign_lhs_ptr (stmt);
++  tree *rhs = gimple_assign_rhs1_ptr (stmt);
++  tree lhs_type = TREE_TYPE (*lhs);
++  tree rhs_type = TREE_TYPE (*rhs);
++  tree ind_type = (TYPE_PRECISION (lhs_type) < TYPE_PRECISION (rhs_type))
++		  ? lhs_type : rhs_type;
++  if (!ind_type || !tree_fits_uhwi_p (TYPE_MIN_VALUE (ind_type))
++      || !tree_fits_uhwi_p (TYPE_MAX_VALUE (ind_type)))
++    {
++      if (dump_file)
++	fprintf (dump_file, "Unsupported index type.\n");
++      return true;
++    }
++  int prec = tree_to_uhwi (TYPE_SIZE (ind_type));
++  unsigned HOST_WIDE_INT t_max_val = tree_to_uhwi (TYPE_MAX_VALUE (ind_type));
++  unsigned HOST_WIDE_INT t_min_val = tree_to_uhwi (TYPE_MIN_VALUE (ind_type));
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "Index type (%d, %ld, %ld): ", prec,
++	       t_min_val, t_max_val);
++      print_generic_expr (dump_file, ind_type);
++      fprintf (dump_file, "\n");
++    }
++  return !((t_max_val <= max_val) && (t_min_val >= min_val));
++}
++
++/* Insert instructions to check the new index is within the array bounds.  */
++
++static gimple *
++insert_index_check (tree mem, gimple_seq &stmts)
++{
++  if (dump_file)
++    fprintf (dump_file, "Insert array index check\n");
++  tree atype = TREE_TYPE (TREE_OPERAND (mem, 0));
++  tree ind = TREE_OPERAND (mem, 1);
++  if (decl_map->count (ind))
++    ind = (*decl_map)[ind];
++  tree domain = TYPE_DOMAIN (atype);
++  gcc_assert (domain && tree_fits_uhwi_p (TYPE_MIN_VALUE (domain))
++	      && tree_fits_uhwi_p (TYPE_MAX_VALUE (domain)));
++
++  tree ind_min_val = TYPE_MIN_VALUE (domain);
++  tree ind_max_val = TYPE_MAX_VALUE (domain);
++  tree t1 = make_ssa_name (boolean_type_node);
++  tree t2 = make_ssa_name (boolean_type_node);
++  tree t3 = make_ssa_name (boolean_type_node);
++  t1 = fold_build2 (LE_EXPR, boolean_type_node, ind, ind_max_val);
++  t2 = fold_build2 (GE_EXPR, boolean_type_node, ind, ind_min_val);
++  t3 = fold_build2 (TRUTH_ANDIF_EXPR, boolean_type_node, t1, t2);
++  gcond *cond = gimple_build_cond (EQ_EXPR, t3, boolean_true_node, NULL, NULL);
++  gimple_seq_add_stmt (&stmts, cond);
++  return cond;
++}
++
++/* Insert safety checks for memory access stmts newly created to evaluate
++   prefetch addresses.  */
++
++static void
++process_used_mr (memref_t *mr, tree_poly_offset_map &offset_map,
++		 tree_set &checked_base_addrs, gimple_seq &stmts,
++		 vec &bbends)
++{
++  bool is_store;
++  tree *mem = simple_mem_ref_in_stmt (mr->stmts[0], &is_store);
++  if (mem == NULL)
++    return;
++  if (dump_file)
++    {
++      fprintf (dump_file, "MR (%d) maybe need to insert address check: ",
++	       mr->mr_id);
++      print_generic_expr (dump_file, *mem);
++      fprintf (dump_file, "\n");
++    }
++  gimple *bbend = NULL;
++  if (TREE_CODE (*mem) == MEM_REF)
++    {
++      tree base = get_base_address (*mem);
++      tree base_addr = get_mem_ref_address_ssa_name (*mem, base);
++      if (!need_page_check (base_addr, checked_base_addrs))
++	return;
++      bbend = insert_page_check (base_addr, offset_map, stmts);
++      checked_base_addrs.insert (base_addr);
++    }
++  else if (TREE_CODE (*mem) == ARRAY_REF && need_array_index_check (*mem))
++    bbend = insert_index_check (*mem, stmts);
++  if (bbend)
++    bbends.safe_push (bbend);
++}
++
++/* Create new variables and insert new stmts to evaluate prefetch addresses.  */
++
++static void
++create_stmts_for_used_mrs (vec &used_mr_vec, vec &bbends,
++			   gimple_seq &stmts, stmt_set &processed_stmts,
++			   HOST_WIDE_INT dist_val, memref_t *comp_mr)
++{
++  tree_poly_offset_map offset_map;
++  collect_base_addresses (used_mr_vec, dist_val, comp_mr, offset_map);
++
++  /* Insert stmts to evaluate prefetch addresses.  */
++  tree_set checked_base_addrs;
++  for (unsigned int i = 0; i < used_mr_vec.length (); i++)
++    {
++      memref_t *mr = used_mr_vec[i];
++      if (mr == comp_mr)
++	continue;
++      gimple *last_stmt = gimple_copy_and_remap_memref_stmts (mr, stmts, 0, 1,
++							      processed_stmts);
++      if (last_stmt && dump_file)
++	{
++	  fprintf (dump_file, "MR (%d) new mem: ", mr->mr_id);
++	  print_generic_expr (dump_file, gimple_assign_lhs (last_stmt));
++	  fprintf (dump_file, "\n");
++	}
++      if (!mr->used_mrs.empty ())
++	process_used_mr (mr, offset_map, checked_base_addrs, stmts, bbends);
++      last_stmt = gimple_copy_and_remap_memref_stmts (mr, stmts, 0, 0,
++						      processed_stmts);
++    }
++}
++
++/* Insert prefetch instructions.  */
++
++static void
++insert_prefetch_stmts (vec &pcalls, gimple_seq &stmts,
++		       gimple *&last_pref, vec &vmrs,
++		       stmt_set &processed_stmts)
++{
++  if (dump_file)
++    fprintf (dump_file, "Evaluate addresses and insert prefetch insns.\n");
++
++  tree local;
++  switch (param_ipa_prefetch_locality)
++    {
++    case 0:
++      local = integer_zero_node;
++      break;
++    case 1:
++      local = integer_one_node;
++      break;
++    case 2:
++      local = build_int_cst (integer_type_node, 2);
++      break;
++    default:
++    case 3:
++      local = integer_three_node;
++      break;
++    }
++  tree_set prefetched_addrs;
++  for (unsigned int i = 0; i < vmrs.length (); i++)
++    {
++      memref_t *mr = vmrs[i];
++      /* Don't need to copy the last stmt, since we insert prefetch insn
++	 instead of it.  */
++      gimple_copy_and_remap_memref_stmts (mr, stmts, 0, 1, processed_stmts);
++      gimple *last_stmt = mr->stmts[0];
++      gcc_assert (last_stmt);
++
++      tree old_addr = get_mem_ref_address_ssa_name (mr->mem, NULL_TREE);
++      tree new_addr = old_addr;
++      if (decl_map->count (old_addr))
++	new_addr = (*decl_map)[old_addr];
++      if (prefetched_addrs.count (new_addr))
++	continue;
++      /* Insert prefetch intrinsic call.  */
++      tree write_p = mr->is_store ? integer_one_node : integer_zero_node;
++      last_pref = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
++				     3, new_addr, write_p, local);
++      pcalls.safe_push (last_pref);
++      gimple_seq_add_stmt (&stmts, last_pref);
++      prefetched_addrs.insert (new_addr);
++
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Insert %d prefetch stmt:\n", i);
++	  print_gimple_stmt (dump_file, last_pref, 0);
++	}
++    }
++}
++
++/* Split bbs after condition stmts and fix control flow graph.  */
++
++static void
++correct_cfg (vec &bbends, gimple *last_pref, basic_block &dom_bb)
++{
++  edge e_last = split_block (dom_bb, last_pref);
++  if (!bbends.length () || last_pref == NULL)
++    return;
++  for (int i = bbends.length () - 1; i >= 0; i--)
++    {
++      gimple *bbend = bbends[i];
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Split dom_bb after condition stmts:\n");
++	  print_gimple_stmt (dump_file, bbend, 0);
++	}
++      basic_block last_bb = e_last->dest;
++      edge e = split_block (dom_bb, bbend);
++      e->flags &= ~EDGE_FALLTHRU;
++      e->flags |= EDGE_TRUE_VALUE;
++      edge e_false = make_edge (dom_bb, last_bb, EDGE_FALSE_VALUE);
++      e_false->probability = profile_probability::never ();
++    }
++}
++
+ static void
+ create_cgraph_edge (cgraph_node *n, gimple *stmt)
+ {
+@@ -1529,6 +1970,17 @@ create_cgraph_edge (cgraph_node *n, gimple *stmt)
+   ipa_call_summaries->get_create (e);
+ }
+ 
++/* Modify cgraph inserting calls to prefetch intrinsics.  */
++
++static void
++modify_ipa_info (cgraph_node *n, vec &pcalls)
++{
++  for (unsigned i = 0; i < pcalls.length (); i++)
++    create_cgraph_edge (n, pcalls[i]);
++  ipa_update_overall_fn_summary (n);
++  renumber_gimple_stmt_uids (DECL_STRUCT_FUNCTION (n->decl));
++}
++
+ /* Insert prefetch intrinsics in this function, return nonzero on success.  */
+ 
+ static int
+@@ -1607,6 +2059,18 @@ optimize_function (cgraph_node *n, function *fn)
+       return 0;
+     }
+ 
++  vec used_mr_vec = vNULL;
++  for (memref_set::const_iterator it = used_mrs.begin ();
++       it != used_mrs.end (); it++)
++    used_mr_vec.safe_push (*it);
++  used_mr_vec.qsort (memref_id_cmp);
++  if (!check_prefetch_safety (used_mr_vec, comp_mr))
++    {
++      if (dump_file)
++	fprintf (dump_file, "Prefetching may be unsafe.  Skip the case.\n");
++      return 0;
++    }
++
+   /* Filter out memrefs with the same memory references.
+      TODO: maybe do the same with used mrs.  */
+   vec vmrs = vNULL;
+@@ -1616,18 +2080,18 @@ optimize_function (cgraph_node *n, function *fn)
+   /* TODO: maybe it is useful to process also used_mrs.  */
+   basic_block dom_bb = NULL;
+   for (unsigned int i = 0; i < vmrs.length (); i++)
+-    find_nearest_common_dominator (vmrs[i], dom_bb);
++    find_nearest_common_post_dominator (vmrs[i], dom_bb);
+ 
+   if (!dom_bb)
+     {
+       if (dump_file)
+-	fprintf (dump_file, "Dominator bb for MRs is not found.  "
++	fprintf (dump_file, "Post dominator bb for MRs is not found.  "
+ 		 "Skip the case.\n");
+       return 0;
+     }
+   else if (dump_file)
+     {
+-      fprintf (dump_file, "Dominator bb %d for MRs:\n", dom_bb->index);
++      fprintf (dump_file, "Post dominator bb %d for MRs:\n", dom_bb->index);
+       gimple_dump_bb (dump_file, dom_bb, 0, dump_flags);
+       fprintf (dump_file, "\n");
+     }
+@@ -1636,19 +2100,33 @@ optimize_function (cgraph_node *n, function *fn)
+   gimple *last_used = NULL;
+   for (gimple_stmt_iterator si = gsi_last_bb (dom_bb); !gsi_end_p (si);
+        gsi_prev (&si))
+-    if (comp_mr->stmts[0] == gsi_stmt (si))
+-      {
+-	last_used = gsi_stmt (si);
+-	if (dump_file)
++    {
++      bool found = false;
++      for (unsigned int i = 0; i < vmrs.length (); i++)
++	/* TODO: take into account only those MRs that should be
++	   checked memory.  */
++	if (vmrs[i]->stmts[0] == gsi_stmt (si))
+ 	  {
+-	    fprintf (dump_file, "Last used stmt in dominator bb:\n");
+-	    print_gimple_stmt (dump_file, last_used, 0);
++	    found = true;
++	    break;
+ 	  }
+-	break;
+-      }
++      if (found || comp_mr->stmts[0] == gsi_stmt (si))
++	{
++	  last_used = gsi_stmt (si);
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "Last used stmt in post dominator bb:\n");
++	      print_gimple_stmt (dump_file, last_used, 0);
++	    }
++	  break;
++	}
++    }
+ 
+-  split_block (dom_bb, last_used);
+-  gimple_stmt_iterator gsi = gsi_last_bb (dom_bb);
++  gimple_stmt_iterator gsi;
++  if (last_used)
++    gsi = gsi_for_stmt (last_used);
++  else
++    gsi = gsi_last_bb (dom_bb);
+ 
+   /* Create new inc var.  Insert new_var = old_var + step * factor.  */
+   decl_map = new tree_map;
+@@ -1660,7 +2138,7 @@ optimize_function (cgraph_node *n, function *fn)
+   stmt_set processed_stmts;
+   if (!dominated_by_p (CDI_DOMINATORS, dom_bb, gimple_bb (comp_mr->stmts[0])))
+     {
+-      gimple *tmp = gimple_copy_and_remap_memref_stmts (comp_mr, stmts, 0,
++      gimple *tmp = gimple_copy_and_remap_memref_stmts (comp_mr, stmts, 0, 0,
+ 							processed_stmts);
+       inc_var = gimple_assign_lhs (tmp);
+     }
+@@ -1683,86 +2161,26 @@ optimize_function (cgraph_node *n, function *fn)
+       fprintf (dump_file, "\n");
+     }
+ 
+-  /* Create other new vars.  Insert new stmts.  */
+-  vec used_mr_vec = vNULL;
+-  for (memref_set::const_iterator it = used_mrs.begin ();
+-       it != used_mrs.end (); it++)
+-    used_mr_vec.safe_push (*it);
+-  used_mr_vec.qsort (memref_id_cmp);
+-
+-  for (unsigned int j = 0; j < used_mr_vec.length (); j++)
+-    {
+-      memref_t *mr = used_mr_vec[j];
+-      if (mr == comp_mr)
+-	continue;
+-      gimple *last_stmt = gimple_copy_and_remap_memref_stmts (mr, stmts, 0,
+-							      processed_stmts);
+-      gcc_assert (last_stmt);
+-      if (dump_file)
+-	{
+-	  fprintf (dump_file, "MR (%d) new mem: ", mr->mr_id);
+-	  print_generic_expr (dump_file, gimple_assign_lhs (last_stmt));
+-	  fprintf (dump_file, "\n");
+-	}
+-    }
+-  /* On new load check page fault.  */
+-  /* Insert prefetch instructions.  */
+-  if (dump_file)
+-    fprintf (dump_file, "Evaluate addresses and insert prefetch insn.\n");
++  vec bbends = vNULL;
++  create_stmts_for_used_mrs (used_mr_vec, bbends, stmts, processed_stmts,
++			     dist_val, comp_mr);
+ 
+   vec pcalls = vNULL;
+-  tree local;
+-  switch (param_ipa_prefetch_locality)
+-    {
+-    case 0:
+-      local = integer_zero_node;
+-      break;
+-    case 1:
+-      local = integer_one_node;
+-      break;
+-    case 2:
+-      local = build_int_cst (integer_type_node, 2);
+-      break;
+-    default:
+-    case 3:
+-      local = integer_three_node;
+-      break;
+-    }
+-  tree_set prefetched_addrs;
+-  for (unsigned int j = 0; j < vmrs.length (); j++)
+-    {
+-      memref_t *mr = vmrs[j];
+-      /* Don't need to copy the last stmt, since we insert prefetch insn
+-	 instead of it.  */
+-      gimple_copy_and_remap_memref_stmts (mr, stmts, 1, processed_stmts);
+-      gimple *last_stmt = mr->stmts[0];
+-      gcc_assert (last_stmt);
+-      tree write_p = mr->is_store ? integer_one_node : integer_zero_node;
+-      tree addr = get_mem_ref_address_ssa_name (mr->mem, NULL_TREE);
+-      if (decl_map->count (addr))
+-	addr = (*decl_map)[addr];
+-      if (prefetched_addrs.count (addr))
+-	continue;
+-      last_stmt = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
+-				     3, addr, write_p, local);
+-      pcalls.safe_push (last_stmt);
+-      gimple_seq_add_stmt (&stmts, last_stmt);
+-      prefetched_addrs.insert (addr);
+-      if (dump_file)
+-	{
+-	  fprintf (dump_file, "Insert %d prefetch stmt:\n", j);
+-	  print_gimple_stmt (dump_file, last_stmt, 0);
+-	}
+-    }
+-
++  gimple *last_pref = NULL;
++  insert_prefetch_stmts (pcalls, stmts, last_pref, vmrs, processed_stmts);
+   gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
++
++  correct_cfg (bbends, last_pref, dom_bb);
++
+   delete decl_map;
+ 
+-  /* Modify cgraph inserting calls to prefetch intrinsics.  */
+-  for (unsigned i = 0; i < pcalls.length (); i++)
+-    create_cgraph_edge (n, pcalls[i]);
+-  ipa_update_overall_fn_summary (n);
+-  renumber_gimple_stmt_uids (DECL_STRUCT_FUNCTION (n->decl));
++  modify_ipa_info (n, pcalls);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "After optimization:\n");
++      dump_function_to_file (cfun->decl, dump_file, (dump_flags_t)0);
++    }
+ 
+   return 1;
+ }
+@@ -1781,8 +2199,10 @@ insert_prefetch ()
+ 	fprintf (dump_file, "Optimize function %s\n", n->dump_name ());
+       push_cfun (DECL_STRUCT_FUNCTION (n->decl));
+       calculate_dominance_info (CDI_DOMINATORS);
++      calculate_dominance_info (CDI_POST_DOMINATORS);
+       res |= optimize_function (n, fn);
+       free_dominance_info (CDI_DOMINATORS);
++      free_dominance_info (CDI_POST_DOMINATORS);
+       pop_cfun ();
+     }
+   return res;
+diff --git a/gcc/params.opt b/gcc/params.opt
+index 747d0f829..fc700ab79 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -317,6 +317,10 @@ The factor represents the number of inductive variable incrementations to evalua
+ Common Joined UInteger Var(param_ipa_prefetch_locality) Init(3) IntegerRange(0, 3) Param Optimization
+ The flag represents temporal locality value between 0 and 3, the higher value means the higher temporal locality in the data.
+ 
++-param=ipa-prefetch-pagesize=
++Common Joined UInteger Var(param_ipa_prefetch_pagesize) Init(4096) Param Optimization
++The flag represents current pagesize for runtime checks of memory access addresses.
++
+ -param=ira-loop-reserved-regs=
+ Common Joined UInteger Var(param_ira_loop_reserved_regs) Init(2) Param Optimization
+ The number of registers in each class kept unused by loop invariant motion.
+-- 
+2.33.0
+
diff --git a/0288-Enable-macro-use-commandline.patch b/0288-Enable-macro-use-commandline.patch
new file mode 100644
index 0000000000000000000000000000000000000000..cafe01b5c6851ce46b31cb2e97b778be3ddb029e
--- /dev/null
+++ b/0288-Enable-macro-use-commandline.patch
@@ -0,0 +1,207 @@
+From 7a578a8725f8fd7d92fcbbac14841ea7e8d0870f Mon Sep 17 00:00:00 2001
+From: zhangxiaohua 
+Date: Sun, 25 Aug 2024 23:08:53 +0800
+Subject: [PATCH 157/157] Enable macro-use-commandline
+
+Signed-off-by: zhangxiaohua 
+---
+ gcc/c-family/c-opts.cc                        |  4 +++
+ gcc/c-family/c.opt                            |  4 +++
+ gcc/doc/cppopts.texi                          |  4 +++
+ gcc/doc/invoke.texi                           |  1 +
+ .../gcc.dg/cpp/macro-use-cmdline-1.c          | 26 ++++++++++++++
+ .../gcc.dg/cpp/macro-use-cmdline-2.c          | 34 +++++++++++++++++++
+ libcpp/include/cpplib.h                       |  3 ++
+ libcpp/init.cc                                |  1 +
+ libcpp/macro.cc                               | 16 ++++++++-
+ 9 files changed, 92 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.dg/cpp/macro-use-cmdline-1.c
+ create mode 100644 gcc/testsuite/gcc.dg/cpp/macro-use-cmdline-2.c
+
+diff --git a/gcc/c-family/c-opts.cc b/gcc/c-family/c-opts.cc
+index 5134f6128..744b54dc3 100644
+--- a/gcc/c-family/c-opts.cc
++++ b/gcc/c-family/c-opts.cc
+@@ -527,6 +527,10 @@ c_common_handle_option (size_t scode, const char *arg, HOST_WIDE_INT value,
+ 	cpp_opts->track_macro_expansion = 2;
+       break;
+ 
++    case OPT_fmacro_use_commandline:
++      cpp_opts->macro_use_commandline = 1;
++      break;
++
+     case OPT_fexec_charset_:
+       cpp_opts->narrow_charset = arg;
+       break;
+diff --git a/gcc/c-family/c.opt b/gcc/c-family/c.opt
+index 07da40ef4..a36c27f07 100644
+--- a/gcc/c-family/c.opt
++++ b/gcc/c-family/c.opt
+@@ -2012,6 +2012,10 @@ ftrack-macro-expansion=
+ C ObjC C++ ObjC++ JoinedOrMissing RejectNegative UInteger
+ -ftrack-macro-expansion=<0|1|2>	Track locations of tokens coming from macro expansion and display them in error messages.
+ 
++fmacro-use-commandline
++C ObjC C++ ObjC++ JoinedOrMissing RejectNegative UInteger
++Preferentially use options from the commandline.
++
+ fpretty-templates
+ C++ ObjC++ Var(flag_pretty_templates) Init(1)
+ Do not pretty-print template specializations as the template signature followed by the arguments.
+diff --git a/gcc/doc/cppopts.texi b/gcc/doc/cppopts.texi
+index c0a92b370..8c8a81eac 100644
+--- a/gcc/doc/cppopts.texi
++++ b/gcc/doc/cppopts.texi
+@@ -277,6 +277,10 @@ correct column numbers in warnings or errors, even if tabs appear on the
+ line.  If the value is less than 1 or greater than 100, the option is
+ ignored.  The default is 8.
+ 
++@item -fmacro-use-commandline
++@opindex fmacro-use-commandline
++Preferentially use options from the command line.
++
+ @item -ftrack-macro-expansion@r{[}=@var{level}@r{]}
+ @opindex ftrack-macro-expansion
+ Track locations of tokens across macro expansions. This allows the
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index bdd8b9429..2ff7d860d 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -630,6 +630,7 @@ Objective-C and Objective-C++ Dialects}.
+ -fexec-charset=@var{charset}  -fextended-identifiers  @gol
+ -finput-charset=@var{charset}  -flarge-source-files  @gol
+ -fmacro-prefix-map=@var{old}=@var{new} -fmax-include-depth=@var{depth} @gol
++-fmacro-use-commandline @gol
+ -fno-canonical-system-headers  -fpch-deps  -fpch-preprocess  @gol
+ -fpreprocessed  -ftabstop=@var{width}  -ftrack-macro-expansion  @gol
+ -fwide-exec-charset=@var{charset}  -fworking-directory @gol
+diff --git a/gcc/testsuite/gcc.dg/cpp/macro-use-cmdline-1.c b/gcc/testsuite/gcc.dg/cpp/macro-use-cmdline-1.c
+new file mode 100644
+index 000000000..f85d9c268
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/cpp/macro-use-cmdline-1.c
+@@ -0,0 +1,26 @@
++/*
++   { dg-options "-fmacro-use-commandline -DTEST_MACRO=1 -DTEST_MACRO=20" }
++   { dg-do compile }
++   { dg-do run }
++*/
++
++/* { dg-warning "-:redefined" "redef TEST_MACRO"      { target *-*-* } 0  }
++   { dg-message "-:previous"  "prev def TEST_MACRO"   { target *-*-* } 0  }
++*/
++
++#if DEBUG
++extern int puts (const char *);
++#else
++#define puts(X)
++#endif
++extern void abort (void);
++
++#define err(str) do { puts(str); abort(); } while (0)
++
++int main (int argc, char *argv[])
++{
++  int macroValue = TEST_MACRO;
++  if (macroValue != 20)
++    err("macroValue");
++  return 0;
++}
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/cpp/macro-use-cmdline-2.c b/gcc/testsuite/gcc.dg/cpp/macro-use-cmdline-2.c
+new file mode 100644
+index 000000000..99d92d1e4
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/cpp/macro-use-cmdline-2.c
+@@ -0,0 +1,34 @@
++/*
++   { dg-options "-fmacro-use-commandline -DTEST_MACRO=1" }
++   { dg-do compile }
++   { dg-do run }
++*/
++
++#define TEST_MACRO 300
++#define TEST_MACRO_1 400
++/*
++   { dg-warning "-:redefined" "redef TEST_MACRO"      { target *-*-* } 7  }
++   { dg-message "-:previous"  "prev def TEST_MACRO"   { target *-*-* } 0  }
++*/
++
++#if DEBUG
++extern int puts (const char *);
++#else
++#define puts(X)
++#endif
++
++extern void abort (void);
++
++#define err(str) do { puts(str); abort(); } while (0)
++
++int main (int argc, char *argv[])
++{
++  int macroValue = TEST_MACRO;
++  if (macroValue != 1)
++    err("macroValue");
++
++  int macroValue1 = TEST_MACRO_1;
++  if (macroValue1 != 400)
++    err("macroValue1");
++  return 0;
++}
+\ No newline at end of file
+diff --git a/libcpp/include/cpplib.h b/libcpp/include/cpplib.h
+index 3eba6f74b..c6101ca01 100644
+--- a/libcpp/include/cpplib.h
++++ b/libcpp/include/cpplib.h
+@@ -471,6 +471,9 @@ struct cpp_options
+      consumes the highest amount of memory.  */
+   unsigned char track_macro_expansion;
+ 
++  /* Use the options on the command line first.  */
++  unsigned char macro_use_commandline;
++
+   /* Nonzero means handle C++ alternate operator names.  */
+   unsigned char operator_names;
+ 
+diff --git a/libcpp/init.cc b/libcpp/init.cc
+index f4ab83d21..47be60a36 100644
+--- a/libcpp/init.cc
++++ b/libcpp/init.cc
+@@ -215,6 +215,7 @@ cpp_create_reader (enum c_lang lang, cpp_hash_table *table,
+      cpp_options::track_macro_expansion to learn about the other
+      values.  */
+   CPP_OPTION (pfile, track_macro_expansion) = 2;
++  CPP_OPTION (pfile, macro_use_commandline) = 0;
+   CPP_OPTION (pfile, warn_normalize) = normalized_C;
+   CPP_OPTION (pfile, warn_literal_suffix) = 1;
+   CPP_OPTION (pfile, canonical_system_headers)
+diff --git a/libcpp/macro.cc b/libcpp/macro.cc
+index 8ebf360c0..aa9e4ffa6 100644
+--- a/libcpp/macro.cc
++++ b/libcpp/macro.cc
+@@ -3852,7 +3852,21 @@ _cpp_create_definition (cpp_reader *pfile, cpp_hashnode *node)
+ 				 node->value.macro->line, 0,
+ 			 "this is the location of the previous definition");
+ 	}
+-      _cpp_free_definition (node);
++#define LOCATION_FROM_LINEMAP 0
++#define MIN_LINE_OF_MACRO_BEEN_OVERRIDDEN 96
++#define MAX_LINE_OF_MACRO_BEEN_OVERRIDDEN 128
++     if (CPP_OPTION (pfile, macro_use_commandline)
++	    && node->value.macro->line >= MIN_LINE_OF_MACRO_BEEN_OVERRIDDEN
++	    && node->value.macro->line <= MAX_LINE_OF_MACRO_BEEN_OVERRIDDEN
++	    && pfile->forced_token_location == LOCATION_FROM_LINEMAP)
++	{
++	  cpp_pedwarning_with_line (pfile, CPP_W_NONE,
++	    node->value.macro->line, 0,
++	    "use the previous definition from commandline");
++	    return false;
++	}
++	else
++	   _cpp_free_definition (node);
+     }
+ 
+   /* Enter definition in hash table.  */
+-- 
+2.33.0
+
diff --git a/0289-tree-ssa-loop-crc.cc-TARGET_CRC32-may-be-not-defined.patch b/0289-tree-ssa-loop-crc.cc-TARGET_CRC32-may-be-not-defined.patch
new file mode 100644
index 0000000000000000000000000000000000000000..05818083d7a37e65e0e4e43ca980d3f49391cc39
--- /dev/null
+++ b/0289-tree-ssa-loop-crc.cc-TARGET_CRC32-may-be-not-defined.patch
@@ -0,0 +1,35 @@
+From 63f99f46e851aecc070496a0e688a0d118c820a4 Mon Sep 17 00:00:00 2001
+From: YunQiang Su 
+Date: Mon, 2 Sep 2024 17:57:52 +0800
+Subject: [PATCH] tree-ssa-loop-crc.cc: TARGET_CRC32 may be not defined
+
+TARGET_CRC32 may be not defined on some architectures, RISC-V is one example.
+---
+ gcc/tree-ssa-loop-crc.cc | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/gcc/tree-ssa-loop-crc.cc b/gcc/tree-ssa-loop-crc.cc
+index b9c2f71ca..7eee9446d 100644
+--- a/gcc/tree-ssa-loop-crc.cc
++++ b/gcc/tree-ssa-loop-crc.cc
+@@ -1227,6 +1227,9 @@ convert_to_new_loop (class loop *loop)
+ static unsigned int
+ tree_ssa_loop_crc ()
+ {
++#ifndef TARGET_CRC32
++  return 0;
++#else
+   if (TARGET_CRC32 == false)
+     {
+       warning (OPT____,"The loop-crc optimization is not working." \
+@@ -1269,6 +1272,7 @@ tree_ssa_loop_crc ()
+       }
+   }
+   return todo;
++#endif
+ }
+ 
+ /* Loop crc.  */
+-- 
+2.33.0
+
diff --git a/0290-Add-ipa-prefetch-test-for-gcc-s-case.patch b/0290-Add-ipa-prefetch-test-for-gcc-s-case.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4545420167bc764595b22b12d7ce486786325429
--- /dev/null
+++ b/0290-Add-ipa-prefetch-test-for-gcc-s-case.patch
@@ -0,0 +1,209 @@
+From 0534ae05fc313c0d449b48ffe3e01642b644e6d2 Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Fri, 6 Sep 2024 10:40:50 +0800
+Subject: [PATCH 1/2] Add ipa-prefetch test for gcc's case
+
+---
+ gcc/ipa-prefetch.cc                         |   4 +-
+ gcc/testsuite/gcc.dg/ipa/ipa-prefetch-gcc.c | 167 ++++++++++++++++++++
+ 2 files changed, 170 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.dg/ipa/ipa-prefetch-gcc.c
+
+diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc
+index b000d4d75..8e628390b 100644
+--- a/gcc/ipa-prefetch.cc
++++ b/gcc/ipa-prefetch.cc
+@@ -1668,6 +1668,8 @@ static gimple *
+ insert_page_check (tree addr, tree_poly_offset_map &offset_map,
+ 		   gimple_seq &stmts)
+ {
++  if (dump_file)
++    fprintf (dump_file, "Insert page check.\n");
+   poly_offset_int offset = 0;
+   if (offset_map.count (addr))
+     offset = offset_map[addr];
+@@ -1783,7 +1785,7 @@ static gimple *
+ insert_index_check (tree mem, gimple_seq &stmts)
+ {
+   if (dump_file)
+-    fprintf (dump_file, "Insert array index check\n");
++    fprintf (dump_file, "Insert array index check.\n");
+   tree atype = TREE_TYPE (TREE_OPERAND (mem, 0));
+   tree ind = TREE_OPERAND (mem, 1);
+   if (decl_map->count (ind))
+diff --git a/gcc/testsuite/gcc.dg/ipa/ipa-prefetch-gcc.c b/gcc/testsuite/gcc.dg/ipa/ipa-prefetch-gcc.c
+new file mode 100644
+index 000000000..f1001c350
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/ipa/ipa-prefetch-gcc.c
+@@ -0,0 +1,167 @@
++/* { dg-do link } */
++/* { dg-options "-O3 -fipa-prefetch -flto -flto-partition=one -fdump-ipa-ipa_prefetch" } */
++/* { dg-require-effective-target lto } */
++
++/* Based on opensource gcc code.  */
++
++#include 
++#include 
++#include 
++
++#define SPARSESET_ELT_TYPE unsigned int
++#define ALLOCNO_NUM(A) ((A)->num)
++
++typedef struct sparseset_def
++{
++  SPARSESET_ELT_TYPE *dense;	/* Dense array.  */
++  SPARSESET_ELT_TYPE *sparse;	/* Sparse array.  */
++  SPARSESET_ELT_TYPE members;	/* Number of elements.  */
++  SPARSESET_ELT_TYPE size;	/* Maximum number of elements.  */
++  SPARSESET_ELT_TYPE iter;	/* Iterator index.  */
++  unsigned char iter_inc;	/* Iteration increment amount.  */
++  bool iterating;
++  SPARSESET_ELT_TYPE elms[2];   /* Combined dense and sparse arrays.  */
++} *sparseset;
++
++struct ira_allocno
++{
++  /* The allocno order number starting with 0.  Each allocno has an
++     unique number and the number is never changed for the
++     allocno.  */
++  int num;
++  /* Regno for allocno or cap.  */
++  int regno;
++  /*...*/
++};
++
++typedef struct ira_allocno_live_range *allocno_live_range_t;
++typedef struct ira_allocno *ira_allocno_t;
++
++struct ira_allocno_live_range
++{
++  /* Allocno whose live range is described by given structure.  */
++  ira_allocno_t allocno;
++  /* Program point range.  */
++  int start, finish;
++  /* Next structure describing program points where the allocno
++     lives.  */
++  allocno_live_range_t next;
++  /* Pointer to structures with the same start/finish.  */
++  allocno_live_range_t start_next, finish_next;
++};
++
++bool
++sparseset_bit_p (sparseset s, SPARSESET_ELT_TYPE e)
++{
++  SPARSESET_ELT_TYPE idx;
++
++  idx = s->sparse[e];
++
++  return idx < s->members && s->dense[idx] == e;
++}
++
++bool new_pseudos_p;
++int ira_max_point, ira_allocnos_num;
++allocno_live_range_t *ira_finish_point_ranges;
++
++static inline void
++sparseset_clear (sparseset s)
++{
++  s->members = 0;
++  s->iterating = false;
++}
++
++sparseset
++sparseset_alloc (SPARSESET_ELT_TYPE n_elms)
++{
++  unsigned int n_bytes = sizeof (struct sparseset_def)
++			 + ((n_elms - 1) * 2 * sizeof (SPARSESET_ELT_TYPE));
++
++  /* We use xcalloc rather than xmalloc to silence some valgrind uninitialized
++     read errors when accessing set->sparse[n] when "n" is not, and never has
++     been, in the set.  These uninitialized reads are expected, by design and
++     harmless.  If this turns into a performance problem due to some future
++     additional users of sparseset, we can revisit this decision.  */
++  sparseset set = (sparseset) calloc (1, n_bytes);
++  set->dense = &(set->elms[0]);
++  set->sparse = &(set->elms[n_elms]);
++  set->size = n_elms;
++  sparseset_clear (set);
++  return set;
++}
++
++void
++sparseset_insert_bit (sparseset s, SPARSESET_ELT_TYPE e, SPARSESET_ELT_TYPE idx)
++{
++  s->sparse[e] = idx;
++  s->dense[idx] = e;
++}
++
++void
++sparseset_swap (sparseset s, SPARSESET_ELT_TYPE idx1, SPARSESET_ELT_TYPE idx2)
++{
++  SPARSESET_ELT_TYPE tmp = s->dense[idx2];
++  sparseset_insert_bit (s, s->dense[idx1], idx2);
++  sparseset_insert_bit (s, tmp, idx1);
++}
++
++void __attribute__ ((noinline))
++sparseset_clear_bit (sparseset s, SPARSESET_ELT_TYPE e)
++{
++  if (sparseset_bit_p (s, e))
++    {
++      SPARSESET_ELT_TYPE idx = s->sparse[e];
++      SPARSESET_ELT_TYPE iter = s->iter;
++      SPARSESET_ELT_TYPE mem = s->members - 1;
++
++      /* If we are iterating over this set and we want to delete a
++	 member we've already visited, then we swap the element we
++	 want to delete with the element at the current iteration
++	 index so that it plays well together with the code below
++	 that actually removes the element.  */
++      if (s->iterating && idx <= iter)
++	{
++	  if (idx < iter)
++	    {
++	      sparseset_swap (s, idx, iter);
++	      idx = iter;
++	    }
++	  s->iter_inc = 0;
++	}
++
++      /* Replace the element we want to delete with the last element
++	 in the dense array and then decrement s->members, effectively
++	 removing the element we want to delete.  */
++      sparseset_insert_bit (s, s->dense[mem], idx);
++      s->members = mem;
++    }
++}
++
++allocno_live_range_t r;
++sparseset allocnos_live;
++
++void
++ira_flattening ()
++{
++  int i;
++
++  if (new_pseudos_p)
++    {
++      allocnos_live = sparseset_alloc (ira_allocnos_num);
++      for (i = 0; i < ira_max_point; i++)
++	{
++	  for (r = ira_finish_point_ranges[i]; r != NULL; r = r->finish_next)
++	    sparseset_clear_bit (allocnos_live, ALLOCNO_NUM (r->allocno));
++	}
++    }
++}
++
++int main()
++{
++  ira_flattening ();
++  return 0;
++}
++
++/* { dg-final { scan-wpa-ipa-dump-times "Insert page check" 1 "ipa_prefetch"} } */
++/* { dg-final { scan-wpa-ipa-dump-times "Insert 0 prefetch stmt:" 1 "ipa_prefetch"} } */
++/* { dg-final { scan-wpa-ipa-dump-times "Split dom_bb after condition stmts:" 1 "ipa_prefetch"} } */
+-- 
+2.33.0
+
diff --git a/0291-Fix-settings-for-wide-operations-tests.patch b/0291-Fix-settings-for-wide-operations-tests.patch
new file mode 100644
index 0000000000000000000000000000000000000000..1e368b6d4a9ee7dc54af81a9af071f73f3c96ad5
--- /dev/null
+++ b/0291-Fix-settings-for-wide-operations-tests.patch
@@ -0,0 +1,73 @@
+From 411792b0bbb63715d8e90d46eb4f0d9c810ce8ba Mon Sep 17 00:00:00 2001
+From: Pronin Alexander 00812787 
+Date: Tue, 3 Sep 2024 21:26:03 +0800
+Subject: [PATCH 2/2] Fix settings for wide operations tests
+
+Signed-off-by: lin-houzhong 
+---
+ gcc/testsuite/gcc.dg/double_sized_mul-1.c | 8 +++++---
+ gcc/testsuite/gcc.dg/double_sized_mul-2.c | 9 +++++----
+ gcc/testsuite/gcc.dg/uaddsub.c            | 6 ++++--
+ 3 files changed, 14 insertions(+), 9 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.dg/double_sized_mul-1.c b/gcc/testsuite/gcc.dg/double_sized_mul-1.c
+index d32a25223..b848e02de 100644
+--- a/gcc/testsuite/gcc.dg/double_sized_mul-1.c
++++ b/gcc/testsuite/gcc.dg/double_sized_mul-1.c
+@@ -1,7 +1,8 @@
+-/* { dg-do compile } */
++/* { dg-do compile { target aarch64*-*-* x86_64*-*-*} } */
+ /* fif-conversion-gimple and fuaddsub-overflow-match-all are required for
+    proper overflow detection in some cases.  */
+-/* { dg-options "-O2 -fif-conversion-gimple -march=armv8.2-a -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */
++/* { dg-options "-O2 -fif-conversion-gimple -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */
++/* { dg-additional-options "-march=armv8.2-a" { target aarch64*-*-* } } */
+ #include 
+ 
+ typedef unsigned __int128 uint128_t;
+@@ -138,4 +139,5 @@ uint128_t mul128_perm (uint64_t a, uint64_t b)
+   return res;
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "double sized mul optimized: 1" 6 "widening_mul" } } */
++/* { dg-final { scan-tree-dump-times "double sized mul optimized: 1" 6 "widening_mul" { target aarch64*-*-* } } } */
++/* { dg-final { scan-tree-dump-times "double sized mul optimized: 1" 4 "widening_mul" { target x86_64*-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/double_sized_mul-2.c b/gcc/testsuite/gcc.dg/double_sized_mul-2.c
+index ff35902b7..cf8f0aedd 100644
+--- a/gcc/testsuite/gcc.dg/double_sized_mul-2.c
++++ b/gcc/testsuite/gcc.dg/double_sized_mul-2.c
+@@ -1,7 +1,8 @@
+-/* { dg-do compile } */
+-/* fif-conversion-gimple is required for proper overflow detection
+-   in some cases.  */
+-/* { dg-options "-O2 -fif-conversion-gimple -march=armv8.2-a -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */
++/* { dg-do compile { target aarch64*-*-* x86_64*-*-*} } */
++/* fif-conversion-gimple and fuaddsub-overflow-match-all are required for
++   proper overflow detection in some cases.  */
++/* { dg-options "-O2 -fif-conversion-gimple -fuaddsub-overflow-match-all -fdump-tree-widening_mul-stats" } */
++/* { dg-additional-options "-march=armv8.2-a" { target aarch64*-*-* } } */
+ #include 
+ 
+ typedef unsigned __int128 uint128_t;
+diff --git a/gcc/testsuite/gcc.dg/uaddsub.c b/gcc/testsuite/gcc.dg/uaddsub.c
+index 96c26d308..dcb587fc8 100644
+--- a/gcc/testsuite/gcc.dg/uaddsub.c
++++ b/gcc/testsuite/gcc.dg/uaddsub.c
+@@ -1,5 +1,6 @@
+-/* { dg-do compile } */
++/* { dg-do compile { target aarch64*-*-* x86_64-*-* } } */
+ /* { dg-options "-O2 -fuaddsub-overflow-match-all -fdump-tree-optimized" } */
++/* { dg-additional-options "-march=armv8.2-a" { target aarch64*-*-* } } */
+ #include 
+ 
+ typedef unsigned __int128 uint128_t;
+@@ -140,4 +141,5 @@ uint256_t sub256 (uint128_t a, uint128_t b)
+ }
+ 
+ /* { dg-final { scan-tree-dump-times "= .ADD_OVERFLOW \\(a_\[0-9\]+\\(D\\), b_\[0-9\]+\\(D\\)\\)" 5 "optimized" } } */
+-/* { dg-final { scan-tree-dump-times "= .SUB_OVERFLOW \\(a_\[0-9\]+\\(D\\), b_\[0-9\]+\\(D\\)\\)" 5 "optimized" } } */
++/* { dg-final { scan-tree-dump-times "= .SUB_OVERFLOW \\(a_\[0-9\]+\\(D\\), b_\[0-9\]+\\(D\\)\\)" 5 "optimized" { target aarch64*-*-* } } } */
++/* { dg-final { scan-tree-dump-times "= .SUB_OVERFLOW \\(a_\[0-9\]+\\(D\\), b_\[0-9\]+\\(D\\)\\)" 4 "optimized" { target x86_64*-*-* } } } */
+-- 
+2.33.0
+
diff --git a/0292-Fix-errors-in-ipa-prefetch-IAORPF-and-IAOSJ0.patch b/0292-Fix-errors-in-ipa-prefetch-IAORPF-and-IAOSJ0.patch
new file mode 100644
index 0000000000000000000000000000000000000000..13341df6672c078b4bc6e3cfba77b18e2c763634
--- /dev/null
+++ b/0292-Fix-errors-in-ipa-prefetch-IAORPF-and-IAOSJ0.patch
@@ -0,0 +1,42 @@
+From 808294bf0f32aaff1cc7e56a756b246d328b3402 Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Fri, 6 Sep 2024 11:10:03 +0800
+Subject: [PATCH 2/3] Fix errors in ipa-prefetch (IAORPF and IAOSJ0)
+
+Signed-off-by: Diachkov Ilia 
+---
+ gcc/ipa-prefetch.cc | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc
+index b000d4d75..74af55af0 100644
+--- a/gcc/ipa-prefetch.cc
++++ b/gcc/ipa-prefetch.cc
+@@ -1681,7 +1681,8 @@ insert_page_check (tree addr, tree_poly_offset_map &offset_map,
+   unsigned long long pmask = ~(param_ipa_prefetch_pagesize - 1);
+   tree pmask_cst = build_int_cst (utype, pmask);
+   tree off_tree = wide_int_to_tree (sizetype, offset);
+-  gcc_assert (TREE_CODE (addr_type) == POINTER_TYPE);
++  gcc_assert (TREE_CODE (addr_type) == POINTER_TYPE
++	      || TREE_CODE (addr_type) == REFERENCE_TYPE);
+   tree addr_with_offset = gimple_build (&stmts, POINTER_PLUS_EXPR,
+ 					addr_type, addr, off_tree);
+   tree conv_addr = make_ssa_name (utype);
+@@ -2082,11 +2083,11 @@ optimize_function (cgraph_node *n, function *fn)
+   for (unsigned int i = 0; i < vmrs.length (); i++)
+     find_nearest_common_post_dominator (vmrs[i], dom_bb);
+ 
+-  if (!dom_bb)
++  if (!dom_bb || dom_bb->index == ENTRY_BLOCK || dom_bb->index == EXIT_BLOCK)
+     {
+       if (dump_file)
+-	fprintf (dump_file, "Post dominator bb for MRs is not found.  "
+-		 "Skip the case.\n");
++	fprintf (dump_file, "Post dominator bb for MRs is not found or "
++		 "it's an entry/exit block.  Skip the case.\n");
+       return 0;
+     }
+   else if (dump_file)
+-- 
+2.33.0
+
diff --git a/0293-Fix-error-with-stmts-insertion-in-ipa-prefetch-for-I.patch b/0293-Fix-error-with-stmts-insertion-in-ipa-prefetch-for-I.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3c9ec2575784c8c42d58935fa61dd66807e1686f
--- /dev/null
+++ b/0293-Fix-error-with-stmts-insertion-in-ipa-prefetch-for-I.patch
@@ -0,0 +1,51 @@
+From bfb77997f423ffe3bdcbd8bb8d7f739fe51ce4f5 Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Fri, 6 Sep 2024 11:36:11 +0800
+Subject: [PATCH 3/3] Fix error with stmts insertion in ipa-prefetch (for
+ IAO6R3)
+
+Signed-off-by: Diachkov Ilia 
+---
+ gcc/ipa-prefetch.cc | 19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc
+index b000d4d75..6190c2ebb 100644
+--- a/gcc/ipa-prefetch.cc
++++ b/gcc/ipa-prefetch.cc
+@@ -2096,7 +2096,7 @@ optimize_function (cgraph_node *n, function *fn)
+       fprintf (dump_file, "\n");
+     }
+ 
+-  /* Try to find comp_mr's stmt in the dominator bb.  */
++  /* Try to find comp_mr's stmt in the post dominator bb.  */
+   gimple *last_used = NULL;
+   for (gimple_stmt_iterator si = gsi_last_bb (dom_bb); !gsi_end_p (si);
+        gsi_prev (&si))
+@@ -2168,7 +2168,22 @@ optimize_function (cgraph_node *n, function *fn)
+   vec pcalls = vNULL;
+   gimple *last_pref = NULL;
+   insert_prefetch_stmts (pcalls, stmts, last_pref, vmrs, processed_stmts);
+-  gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
++
++  gimple *gstmt = gsi_stmt (gsi);
++  bool insert_after = last_used || gstmt == NULL || !is_ctrl_stmt (gstmt);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "Insert prefetch sequence %s stmt:\n",
++	       insert_after ? "after": "before");
++      if (gstmt)
++	print_gimple_stmt (dump_file, gstmt, 0);
++      else
++	fprintf (dump_file, "(no stmts)\n");
++    }
++  if (insert_after)
++    gsi_insert_seq_after (&gsi, stmts, GSI_NEW_STMT);
++  else
++    gsi_insert_seq_before (&gsi, stmts, GSI_NEW_STMT);
+ 
+   correct_cfg (bbends, last_pref, dom_bb);
+ 
+-- 
+2.33.0
+
diff --git a/0294-Fix-errors-in-ipa-prefetch-IAO50J-and-IAO5H7.patch b/0294-Fix-errors-in-ipa-prefetch-IAO50J-and-IAO5H7.patch
new file mode 100644
index 0000000000000000000000000000000000000000..43a88b8a4f5c5dd482deb6f23f77e4d47885141d
--- /dev/null
+++ b/0294-Fix-errors-in-ipa-prefetch-IAO50J-and-IAO5H7.patch
@@ -0,0 +1,80 @@
+From cd79fc29d2cdb73836f8699355113e94b833e0e0 Mon Sep 17 00:00:00 2001
+From: Diachkov Ilia 
+Date: Wed, 11 Sep 2024 17:18:58 +0800
+Subject: [PATCH 2/2] Fix errors in ipa-prefetch(IAO50J and IAO5H7)
+
+Signed-off-by: Diachkov Ilia 
+---
+ gcc/ipa-prefetch.cc | 35 ++++++++++++++++++++++++++++++-----
+ 1 file changed, 30 insertions(+), 5 deletions(-)
+
+diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc
+index 5184687aa..685f9c267 100644
+--- a/gcc/ipa-prefetch.cc
++++ b/gcc/ipa-prefetch.cc
+@@ -2099,6 +2099,18 @@ optimize_function (cgraph_node *n, function *fn)
+       fprintf (dump_file, "\n");
+     }
+ 
++  /* Check that all used mrs dominate found post dominator bb.  This case
++     may be supported later by copying MR evaluation to the bb.  */
++  for (unsigned int i = 0; i < used_mr_vec.length (); i++)
++    if (!dominated_by_p (CDI_DOMINATORS, dom_bb,
++			 gimple_bb (used_mr_vec[i]->stmts[0])))
++      {
++	if (dump_file)
++	  fprintf (dump_file, "MR's (%d) bb is not dominate the found bb %d.  "
++		   "Skip the case.\n", used_mr_vec[i]->mr_id, dom_bb->index);
++	return 0;
++      }
++
+   /* Try to find comp_mr's stmt in the post dominator bb.  */
+   gimple *last_used = NULL;
+   for (gimple_stmt_iterator si = gsi_last_bb (dom_bb); !gsi_end_p (si);
+@@ -2133,17 +2145,29 @@ optimize_function (cgraph_node *n, function *fn)
+ 
+   /* Create new inc var.  Insert new_var = old_var + step * factor.  */
+   decl_map = new tree_map;
+-  gcc_assert (comp_mr->stmts[0] && gimple_assign_single_p (comp_mr->stmts[0]));
+-  tree inc_var = gimple_assign_lhs (comp_mr->stmts[0]);
++  gimple *old_inc_stmt = comp_mr->stmts[0];
++  gcc_assert (old_inc_stmt && gimple_assign_single_p (old_inc_stmt));
++  tree inc_var = gimple_assign_lhs (old_inc_stmt);
++  if (dump_file)
++    {
++      fprintf (dump_file, "Old inc stmt: ");
++      print_gimple_stmt (dump_file, old_inc_stmt, 0);
++    }
+   /* If old_var definition dominates the current use, just use it, otherwise
+      evaluate it just before new inc var evaluation.  */
+   gimple_seq stmts = NULL;
+   stmt_set processed_stmts;
+-  if (!dominated_by_p (CDI_DOMINATORS, dom_bb, gimple_bb (comp_mr->stmts[0])))
++  tree local_inc_var = inc_var;
++  if (!dominated_by_p (CDI_DOMINATORS, dom_bb, gimple_bb (old_inc_stmt)))
+     {
+       gimple *tmp = gimple_copy_and_remap_memref_stmts (comp_mr, stmts, 0, 0,
+ 							processed_stmts);
+-      inc_var = gimple_assign_lhs (tmp);
++      local_inc_var = gimple_assign_lhs (tmp);
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Localized old inc stmt: ");
++	  print_gimple_stmt (dump_file, tmp, 0);
++	}
+     }
+   tree var_type = TREE_TYPE (inc_var);
+   enum tree_code inc_code;
+@@ -2155,7 +2179,8 @@ optimize_function (cgraph_node *n, function *fn)
+   HOST_WIDE_INT dist_val = tree_to_shwi (step)
+ 			   * param_ipa_prefetch_distance_factor;
+   tree dist = build_int_cst (TREE_TYPE (step), dist_val);
+-  tree new_inc_var = gimple_build (&stmts, inc_code, var_type, inc_var, dist);
++  tree new_inc_var = gimple_build (&stmts, inc_code, var_type, local_inc_var,
++				   dist);
+   (*decl_map)[inc_var] = new_inc_var;
+   if (dump_file)
+     {
+-- 
+2.33.0
+
diff --git a/0295-Fix-error-with-grouped_load-merge-in-slp-transpose-v.patch b/0295-Fix-error-with-grouped_load-merge-in-slp-transpose-v.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8540cd4aca03f8077c98480d442cc194836ea137
--- /dev/null
+++ b/0295-Fix-error-with-grouped_load-merge-in-slp-transpose-v.patch
@@ -0,0 +1,30 @@
+From 7b4cce4896cefefedba9545a9633585e086b7621 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=83=91=E6=99=A8=E5=8D=89?= 
+Date: Wed, 11 Sep 2024 18:26:22 +0800
+Subject: [PATCH 1/2] Fix error with grouped_load merge in
+ slp-transpose-vectorize (for IALR8B)
+
+---
+ gcc/tree-vect-slp.cc | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
+index e3e246977..d4870de43 100644
+--- a/gcc/tree-vect-slp.cc
++++ b/gcc/tree-vect-slp.cc
+@@ -3807,7 +3807,11 @@ vect_slp_grouped_load_find (bb_vec_info bb_vinfo, vec &visited,
+ 	 these two grouped loads need to be merged.  */
+       tree opb = get_op_base_address (first_element);
+       unsigned int grp_size_b = DR_GROUP_SIZE (first_element);
+-      if (opa == opb && grp_size_a == grp_size_b)
++      /* Ensure that the elements merge to load group meet the alignment condition (dr_misalignment) */
++      HOST_WIDE_INT diff = 0;
++      diff = (TREE_INT_CST_LOW (DR_INIT (first_element->dr_aux.dr))
++	      - TREE_INT_CST_LOW (DR_INIT (merge_first_element->dr_aux.dr)));
++      if (opa == opb && grp_size_a == grp_size_b && diff >= 0)
+ 	{
+ 	  res.safe_push (first_element);
+ 	  visited[i] = true;
+-- 
+2.33.0
+
diff --git a/0296-Fix-error-in-slp-transpose-vectorize-for-IAQFM3.patch b/0296-Fix-error-in-slp-transpose-vectorize-for-IAQFM3.patch
new file mode 100644
index 0000000000000000000000000000000000000000..34862f283b12674816bd1fd597c62a6101312055
--- /dev/null
+++ b/0296-Fix-error-in-slp-transpose-vectorize-for-IAQFM3.patch
@@ -0,0 +1,28 @@
+From b3a6a170bf1dc0e460e98a7fd02c92e6b036784a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=83=91=E6=99=A8=E5=8D=89?= 
+Date: Fri, 13 Sep 2024 14:13:07 +0800
+Subject: [PATCH 2/2] Fix error in slp-transpose-vectorize (for IAQFM3)
+
+---
+ gcc/tree-vect-slp.cc | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
+index d4870de43..d7e198dff 100644
+--- a/gcc/tree-vect-slp.cc
++++ b/gcc/tree-vect-slp.cc
+@@ -3811,7 +3811,10 @@ vect_slp_grouped_load_find (bb_vec_info bb_vinfo, vec &visited,
+       HOST_WIDE_INT diff = 0;
+       diff = (TREE_INT_CST_LOW (DR_INIT (first_element->dr_aux.dr))
+ 	      - TREE_INT_CST_LOW (DR_INIT (merge_first_element->dr_aux.dr)));
+-      if (opa == opb && grp_size_a == grp_size_b && diff >= 0)
++      if (opa == opb
++	  && grp_size_a == grp_size_b
++	  && diff >= 0
++	  && check_same_bb (first_element, merge_first_element))
+ 	{
+ 	  res.safe_push (first_element);
+ 	  visited[i] = true;
+-- 
+2.33.0
+
diff --git a/0297-Fix-grouped-load-merging-error-in-SLP-transpose-vectorization.patch b/0297-Fix-grouped-load-merging-error-in-SLP-transpose-vectorization.patch
new file mode 100644
index 0000000000000000000000000000000000000000..21a24c0f4bcaa8f6dce6f75c5be868871a9c1ea0
--- /dev/null
+++ b/0297-Fix-grouped-load-merging-error-in-SLP-transpose-vectorization.patch
@@ -0,0 +1,26 @@
+From 8b30d71f881e15bfbc514f9b65fee178610e1536 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=83=91=E6=99=A8=E5=8D=89?= 
+Date: Wed, 18 Sep 2024 10:48:55 +0800
+Subject: [PATCH] Fix error in slp-transpose-vectorize (for IARHFM)
+
+---
+ gcc/tree-vect-slp.cc | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
+index d7e198dff..fbd638333 100644
+--- a/gcc/tree-vect-slp.cc
++++ b/gcc/tree-vect-slp.cc
+@@ -3814,7 +3814,8 @@ vect_slp_grouped_load_find (bb_vec_info bb_vinfo, vec &visited,
+       if (opa == opb
+ 	  && grp_size_a == grp_size_b
+ 	  && diff >= 0
+-	  && check_same_bb (first_element, merge_first_element))
++	  && check_same_bb (first_element, merge_first_element)
++	  && DR_PTR_INFO (first_element->dr_aux.dr) != DR_PTR_INFO (merge_first_element->dr_aux.dr))
+ 	{
+ 	  res.safe_push (first_element);
+ 	  visited[i] = true;
+-- 
+2.33.0
+
diff --git a/0298-Mark-prefetch-builtin-as-willreturn.patch b/0298-Mark-prefetch-builtin-as-willreturn.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7a489a5d9b0d7e1c7454a66a02ef59ac805532a6
--- /dev/null
+++ b/0298-Mark-prefetch-builtin-as-willreturn.patch
@@ -0,0 +1,99 @@
+From a252bbd11d22481a1e719ed36d800e2192abb369 Mon Sep 17 00:00:00 2001
+From: Pronin Alexander 
+Date: Thu, 31 Oct 2024 15:49:27 +0800
+Subject: [PATCH 1/6] Mark prefetch builtin as willreturn
+
+Signed-off-by: Pronin Alexander 
+---
+ gcc/common.opt      |  4 ++++
+ gcc/gimple.cc       | 30 ++++++++++++++++++++++++++++++
+ gcc/gimple.h        |  1 +
+ gcc/tree-ssa-pre.cc |  4 +---
+ 4 files changed, 36 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 688d65e4d..be5fcc681 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1313,6 +1313,10 @@ fdelete-null-pointer-checks
+ Common Var(flag_delete_null_pointer_checks) Init(-1) Optimization
+ Delete useless null pointer checks.
+ 
++fbuiltin-will-return
++Common Var(flag_builtin_will_return) Optimization
++Consider some of the builtins as definitely returning.
++
+ fdevirtualize-at-ltrans
+ Common Var(flag_ltrans_devirtualize)
+ Stream extra data to support more aggressive devirtualization in LTO local transformation mode.
+diff --git a/gcc/gimple.cc b/gcc/gimple.cc
+index 9e62da426..04ca9f161 100644
+--- a/gcc/gimple.cc
++++ b/gcc/gimple.cc
+@@ -2998,6 +2998,36 @@ nonbarrier_call_p (gimple *call)
+   return false;
+ }
+ 
++static inline bool
++will_return_builtin_p (gimple *call)
++{
++  if (!flag_builtin_will_return)
++    return false;
++
++  if (!gimple_call_builtin_p (call, BUILT_IN_NORMAL))
++    return false;
++
++  switch (DECL_FUNCTION_CODE (gimple_call_fndecl (call)))
++    {
++    case BUILT_IN_PREFETCH:
++      return true;
++    default:
++      return false;
++    }
++}
++
++bool
++will_return_call_p (gimple *call, function *fun)
++{
++  int flags = gimple_call_flags (call);
++  if (!(flags & (ECF_CONST|ECF_PURE))
++      || (flags & ECF_LOOPING_CONST_OR_PURE)
++      || stmt_can_throw_external (fun, call))
++    return will_return_builtin_p (call);
++
++  return true;
++}
++
+ /* Callback for walk_stmt_load_store_ops.
+  
+    Return TRUE if OP will dereference the tree stored in DATA, FALSE
+diff --git a/gcc/gimple.h b/gcc/gimple.h
+index 77a5a07e9..bb05a7664 100644
+--- a/gcc/gimple.h
++++ b/gcc/gimple.h
+@@ -1628,6 +1628,7 @@ extern bool gimple_asm_clobbers_memory_p (const gasm *);
+ extern void dump_decl_set (FILE *, bitmap);
+ extern bool nonfreeing_call_p (gimple *);
+ extern bool nonbarrier_call_p (gimple *);
++extern bool will_return_call_p (gimple *, function *);
+ extern bool infer_nonnull_range (gimple *, tree);
+ extern bool infer_nonnull_range_by_dereference (gimple *, tree);
+ extern bool infer_nonnull_range_by_attribute (gimple *, tree);
+diff --git a/gcc/tree-ssa-pre.cc b/gcc/tree-ssa-pre.cc
+index 98134b5d3..b5264133a 100644
+--- a/gcc/tree-ssa-pre.cc
++++ b/gcc/tree-ssa-pre.cc
+@@ -3988,9 +3988,7 @@ compute_avail (function *fun)
+ 		 that forbids hoisting possibly trapping expressions
+ 		 before it.  */
+ 	      int flags = gimple_call_flags (stmt);
+-	      if (!(flags & (ECF_CONST|ECF_PURE))
+-		  || (flags & ECF_LOOPING_CONST_OR_PURE)
+-		  || stmt_can_throw_external (fun, stmt))
++	      if (!will_return_call_p (stmt, fun))
+ 		/* Defer setting of BB_MAY_NOTRETURN to avoid it
+ 		   influencing the processing of the call itself.  */
+ 		set_bb_may_notreturn = true;
+-- 
+2.33.0
+
diff --git a/0299-Backport-Disallow-pointer-operands-for-and-partly-PR.patch b/0299-Backport-Disallow-pointer-operands-for-and-partly-PR.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c0a733c1ad49595a8e148a4f51f1df371a84eb46
--- /dev/null
+++ b/0299-Backport-Disallow-pointer-operands-for-and-partly-PR.patch
@@ -0,0 +1,156 @@
+From 3b109376d057342a31267ea4c9bd422d940874cb Mon Sep 17 00:00:00 2001
+From: Jakub Jelinek 
+Date: Thu, 31 Oct 2024 16:09:43 +0800
+Subject: [PATCH 2/6] [Backport]Disallow pointer operands for |,^ and partly
+ &[PR106878]
+
+Signed-off-by: Jakub Jelinek 
+---
+ gcc/match.pd                                  |  6 ++++-
+ .../gcc.c-torture/compile/pr106878.c          | 15 +++++++++++++
+ gcc/tree-cfg.cc                               | 22 ++++++++++++++++---
+ gcc/tree-ssa-reassoc.cc                       | 16 +++++++++++++-
+ 4 files changed, 54 insertions(+), 5 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.c-torture/compile/pr106878.c
+
+diff --git a/gcc/match.pd b/gcc/match.pd
+index 8f41c292f..822e065e8 100644
+--- a/gcc/match.pd
++++ b/gcc/match.pd
+@@ -1655,6 +1655,8 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+ 	 && (int_fits_type_p (@1, TREE_TYPE (@0))
+ 	     || tree_nop_conversion_p (TREE_TYPE (@0), type)))
+ 	|| types_match (@0, @1))
++       && !POINTER_TYPE_P (TREE_TYPE (@0))
++       && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE
+        /* ???  This transform conflicts with fold-const.cc doing
+ 	  Convert (T)(x & c) into (T)x & (T)c, if c is an integer
+ 	  constants (if x has signed type, the sign bit cannot be set
+@@ -1691,7 +1693,9 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+   (if (GIMPLE
+        && TREE_CODE (@1) != INTEGER_CST
+        && tree_nop_conversion_p (type, TREE_TYPE (@2))
+-       && types_match (type, @0))
++       && types_match (type, @0)
++       && !POINTER_TYPE_P (TREE_TYPE (@0))
++       && TREE_CODE (TREE_TYPE (@0)) != OFFSET_TYPE)
+    (bitop @0 (convert @1)))))
+ 
+ (for bitop (bit_and bit_ior)
+diff --git a/gcc/testsuite/gcc.c-torture/compile/pr106878.c b/gcc/testsuite/gcc.c-torture/compile/pr106878.c
+new file mode 100644
+index 000000000..c84571894
+--- /dev/null
++++ b/gcc/testsuite/gcc.c-torture/compile/pr106878.c
+@@ -0,0 +1,15 @@
++/* PR tree-optimization/106878 */
++
++typedef __INTPTR_TYPE__ intptr_t;
++typedef __UINTPTR_TYPE__ uintptr_t;
++int a;
++
++int
++foo (const int *c)
++{
++  uintptr_t d = ((intptr_t) c | (intptr_t) &a) & 65535 << 16;
++  intptr_t e = (intptr_t) c;
++  if (d != (e & 65535 << 16))
++    return 1;
++  return 0;
++}
+diff --git a/gcc/tree-cfg.cc b/gcc/tree-cfg.cc
+index 48b52f785..d33aaec8c 100644
+--- a/gcc/tree-cfg.cc
++++ b/gcc/tree-cfg.cc
+@@ -4163,7 +4163,9 @@ verify_gimple_assign_binary (gassign *stmt)
+     case ROUND_MOD_EXPR:
+     case RDIV_EXPR:
+     case EXACT_DIV_EXPR:
+-      /* Disallow pointer and offset types for many of the binary gimple. */
++    case BIT_IOR_EXPR:
++    case BIT_XOR_EXPR:
++      /* Disallow pointer and offset types for many of the binary gimple.  */
+       if (POINTER_TYPE_P (lhs_type)
+ 	  || TREE_CODE (lhs_type) == OFFSET_TYPE)
+ 	{
+@@ -4178,9 +4180,23 @@ verify_gimple_assign_binary (gassign *stmt)
+ 
+     case MIN_EXPR:
+     case MAX_EXPR:
+-    case BIT_IOR_EXPR:
+-    case BIT_XOR_EXPR:
++      /* Continue with generic binary expression handling.  */
++      break;
++
+     case BIT_AND_EXPR:
++      if (POINTER_TYPE_P (lhs_type)
++	  && TREE_CODE (rhs2) == INTEGER_CST)
++	break;
++      /* Disallow pointer and offset types for many of the binary gimple.  */
++      if (POINTER_TYPE_P (lhs_type)
++	  || TREE_CODE (lhs_type) == OFFSET_TYPE)
++	{
++	  error ("invalid types for %qs", code_name);
++	  debug_generic_expr (lhs_type);
++	  debug_generic_expr (rhs1_type);
++	  debug_generic_expr (rhs2_type);
++	  return true;
++	}
+       /* Continue with generic binary expression handling.  */
+       break;
+ 
+diff --git a/gcc/tree-ssa-reassoc.cc b/gcc/tree-ssa-reassoc.cc
+index e3d521e32..6baef4764 100644
+--- a/gcc/tree-ssa-reassoc.cc
++++ b/gcc/tree-ssa-reassoc.cc
+@@ -3617,10 +3617,14 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
+ 	tree type2 = NULL_TREE;
+ 	bool strict_overflow_p = false;
+ 	candidates.truncate (0);
++	if (POINTER_TYPE_P (type1))
++	  type1 = pointer_sized_int_node;
+ 	for (j = i; j; j = chains[j - 1])
+ 	  {
+ 	    tree type = TREE_TYPE (ranges[j - 1].exp);
+ 	    strict_overflow_p |= ranges[j - 1].strict_overflow_p;
++	    if (POINTER_TYPE_P (type))
++	      type = pointer_sized_int_node;
+ 	    if ((b % 4) == 3)
+ 	      {
+ 		/* For the signed < 0 cases, the types should be
+@@ -3651,6 +3655,8 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
+ 	    tree type = TREE_TYPE (ranges[j - 1].exp);
+ 	    if (j == k)
+ 	      continue;
++	    if (POINTER_TYPE_P (type))
++	      type = pointer_sized_int_node;
+ 	    if ((b % 4) == 3)
+ 	      {
+ 		if (!useless_type_conversion_p (type1, type))
+@@ -3680,7 +3686,7 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
+ 		op = r->exp;
+ 		continue;
+ 	      }
+-	    if (id == l)
++	    if (id == l || POINTER_TYPE_P (TREE_TYPE (op)))
+ 	      {
+ 		code = (b % 4) == 3 ? BIT_NOT_EXPR : NOP_EXPR;
+ 		g = gimple_build_assign (make_ssa_name (type1), code, op);
+@@ -3704,6 +3710,14 @@ optimize_range_tests_cmp_bitwise (enum tree_code opcode, int first, int length,
+ 	    gimple_seq_add_stmt_without_update (&seq, g);
+ 	    op = gimple_assign_lhs (g);
+ 	  }
++	type1 = TREE_TYPE (ranges[k - 1].exp);
++	if (POINTER_TYPE_P (type1))
++	  {
++	    gimple *g
++	      = gimple_build_assign (make_ssa_name (type1), NOP_EXPR, op);
++	    gimple_seq_add_stmt_without_update (&seq, g);
++	    op = gimple_assign_lhs (g);
++	  }
+ 	candidates.pop ();
+ 	if (update_range_test (&ranges[k - 1], NULL, candidates.address (),
+ 			       candidates.length (), opcode, ops, op,
+-- 
+2.33.0
+
diff --git a/0300-Remove-erroneous-pattern-from-gimple-ifcvt.patch b/0300-Remove-erroneous-pattern-from-gimple-ifcvt.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0eca175156e2190e6135a0e1fb80b979df4f8a7b
--- /dev/null
+++ b/0300-Remove-erroneous-pattern-from-gimple-ifcvt.patch
@@ -0,0 +1,55 @@
+From 91ef8899a80e493042fd2687ad89064c9f90cf17 Mon Sep 17 00:00:00 2001
+From: Pronin Alexander 
+Date: Thu, 31 Oct 2024 16:14:34 +0800
+Subject: [PATCH 3/6] Remove erroneous pattern from gimple ifcvt
+
+Signed-off-by: Pronin Alexander  
+---
+ gcc/match.pd                          |  2 +-
+ gcc/testsuite/gcc.dg/ifcvt-gimple-1.c | 21 +++++++++++++++++++++
+ 2 files changed, 22 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.dg/ifcvt-gimple-1.c
+
+diff --git a/gcc/match.pd b/gcc/match.pd
+index 8f41c292f..2dd6581d1 100644
+--- a/gcc/match.pd
++++ b/gcc/match.pd
+@@ -4276,7 +4276,7 @@ DEFINE_INT_AND_FLOAT_ROUND_FN (RINT)
+ )
+ 
+ (if (flag_if_conversion_gimple)
+- (for simple_op (plus minus bit_and bit_ior bit_xor)
++ (for simple_op (plus minus bit_ior bit_xor)
+   (simplify
+    (cond @0 (simple_op @1 INTEGER_CST@2) @1)
+    (switch
+diff --git a/gcc/testsuite/gcc.dg/ifcvt-gimple-1.c b/gcc/testsuite/gcc.dg/ifcvt-gimple-1.c
+new file mode 100644
+index 000000000..381a4ad51
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/ifcvt-gimple-1.c
+@@ -0,0 +1,21 @@
++/* { dg-do run } */
++/* { dg-options "-O2 -fno-inline -fif-conversion-gimple" } */
++
++#include 
++
++void foo(int a, int *p) {
++    *p = a;
++}
++
++void verify (int a) {
++    if (a != 3)
++        abort ();
++}
++
++int main() {
++    int a = 0;
++    foo (3, &a);
++    int tmp = (a > 7) ? a & 1 : a;
++    verify (tmp);
++    return 0;
++}
+-- 
+2.33.0
+
diff --git a/0301-Add-required-check-for-iteration-through-uses.patch b/0301-Add-required-check-for-iteration-through-uses.patch
new file mode 100644
index 0000000000000000000000000000000000000000..105f4f75616777a5f8e3437645f41c75bc7b5d2b
--- /dev/null
+++ b/0301-Add-required-check-for-iteration-through-uses.patch
@@ -0,0 +1,33 @@
+From ca24d352e98e357f4f7b8f0d262201765705a08a Mon Sep 17 00:00:00 2001
+From: Pronin Alexander 
+Date: Thu, 31 Oct 2024 16:31:33 +0800
+Subject: [PATCH 4/6] Add required check for iteration through uses
+
+Signed-off-by: Pronin Alexander  
+---
+ gcc/tree-ssa-math-opts.cc | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/tree-ssa-math-opts.cc b/gcc/tree-ssa-math-opts.cc
+index 2c06b8a60..80c06fa01 100644
+--- a/gcc/tree-ssa-math-opts.cc
++++ b/gcc/tree-ssa-math-opts.cc
+@@ -4938,8 +4938,13 @@ convert_double_size_mul (gimple_stmt_iterator *gsi, gimple *stmt)
+ 
+   /* Find the mult low part getter.  */
+   FOR_EACH_IMM_USE_STMT (use_stmt, use_iter, match[3])
+-    if (gimple_assign_rhs_code (use_stmt) == REALPART_EXPR)
+-      break;
++    {
++      if (!is_gimple_assign (use_stmt))
++	continue;
++
++      if (gimple_assign_rhs_code (use_stmt) == REALPART_EXPR)
++	break;
++    }
+ 
+   /* Create high and low (if needed) parts extractors.  */
+   /* Low part.  */
+-- 
+2.33.0
+
diff --git a/0302-Added-param-for-optimization-for-merging-bb-s-with-c.patch b/0302-Added-param-for-optimization-for-merging-bb-s-with-c.patch
new file mode 100644
index 0000000000000000000000000000000000000000..da25a9e25c950f625e6e27963f2ab5c54f33d32f
--- /dev/null
+++ b/0302-Added-param-for-optimization-for-merging-bb-s-with-c.patch
@@ -0,0 +1,158 @@
+From 210147e28d542a03588ba3c3fa473301a03bb687 Mon Sep 17 00:00:00 2001
+From: Gmyrikov Konstantin 
+Date: Thu, 31 Oct 2024 16:45:15 +0800
+Subject: [PATCH 6/6] Added param for optimization for merging bb's with cheap
+ insns.Zero param means turned off optimization(default implementation),One
+ means turned on
+
+Signed-off-by: Gmyrikov Konstantin  
+---
+ gcc/params.opt                  |  4 +++
+ gcc/testsuite/gcc.dg/if_comb1.c | 13 +++++++++
+ gcc/testsuite/gcc.dg/if_comb2.c | 13 +++++++++
+ gcc/testsuite/gcc.dg/if_comb3.c | 12 +++++++++
+ gcc/tree-ssa-ifcombine.cc       | 47 ++++++++++++++++++++++++++++++---
+ 5 files changed, 86 insertions(+), 3 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/if_comb1.c
+ create mode 100644 gcc/testsuite/gcc.dg/if_comb2.c
+ create mode 100644 gcc/testsuite/gcc.dg/if_comb3.c
+
+diff --git a/gcc/params.opt b/gcc/params.opt
+index fc700ab79..3ddfaf5b2 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -789,6 +789,10 @@ Maximum number of VALUEs handled during a single find_base_term call.
+ Common Joined UInteger Var(param_max_vrp_switch_assertions) Init(10) Param Optimization
+ Maximum number of assertions to add along the default edge of a switch statement during VRP.
+ 
++-param=merge-assign-stmts-ifcombine=
++Common Joined UInteger Var(param_merge_assign_stmts_ifcombine) Init(0) IntegerRange(0, 1) Param Optimization
++Whether bb's with cheap gimple_assign stmts should be merged in the ifcombine pass.
++
+ -param=min-crossjump-insns=
+ Common Joined UInteger Var(param_min_crossjump_insns) Init(5) IntegerRange(1, 65536) Param Optimization
+ The minimum number of matching instructions to consider for crossjumping.
+diff --git a/gcc/testsuite/gcc.dg/if_comb1.c b/gcc/testsuite/gcc.dg/if_comb1.c
+new file mode 100644
+index 000000000..e00adc37d
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/if_comb1.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile } */
++/* { dg-options "-Ofast -S --param=merge-assign-stmts-ifcombine=1 -fdump-tree-ifcombine" } */
++
++int foo (double a, double b, int c)
++{
++    if (c < 10 || a - b > 1.0)
++        return 0;
++    else 
++        return 1;
++}
++
++/* { dg-final { scan-tree-dump "optimizing two comparisons" "ifcombine"} } */
++/* { dg-final { scan-tree-dump "Merging blocks" "ifcombine"} } */
+diff --git a/gcc/testsuite/gcc.dg/if_comb2.c b/gcc/testsuite/gcc.dg/if_comb2.c
+new file mode 100644
+index 000000000..176e7e726
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/if_comb2.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile } */
++/* { dg-options "-Ofast -S --param=merge-assign-stmts-ifcombine=1 -fdump-tree-ifcombine" } */
++
++int foo (int a, int b, int c)
++{
++    if (a > 1 || b * c < 10)
++        return 0;
++    else 
++        return 1;
++}
++
++/* { dg-final { scan-tree-dump "optimizing two comparisons" "ifcombine"} } */
++/* { dg-final { scan-tree-dump "Merging blocks" "ifcombine"} } */
+diff --git a/gcc/testsuite/gcc.dg/if_comb3.c b/gcc/testsuite/gcc.dg/if_comb3.c
+new file mode 100644
+index 000000000..aa2e4510c
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/if_comb3.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-Ofast -S --param=merge-assign-stmts-ifcombine=1 -fdump-tree-ifcombine" } */
++
++int foo (int a, int b, int c)
++{
++    if (a > 1 && b + c < 10)
++        a++;
++    return a;
++}
++
++/* { dg-final { scan-tree-dump "optimizing two comparisons" "ifcombine"} } */
++/* { dg-final { scan-tree-dump "Merging blocks" "ifcombine"} } */
+diff --git a/gcc/tree-ssa-ifcombine.cc b/gcc/tree-ssa-ifcombine.cc
+index ce9bbebf9..264a8bcae 100644
+--- a/gcc/tree-ssa-ifcombine.cc
++++ b/gcc/tree-ssa-ifcombine.cc
+@@ -110,6 +110,18 @@ recognize_if_then_else (basic_block cond_bb,
+   return true;
+ }
+ 
++/* Verify if gimple insn cheap for param=merge-assign-stmts-ifcombine
++   optimization.  */
++
++bool is_insn_cheap (enum tree_code t)
++{
++  static enum tree_code cheap_insns[] = {MULT_EXPR, PLUS_EXPR, MINUS_EXPR};
++  for (int i = 0; i < sizeof (cheap_insns)/sizeof (enum tree_code); i++)
++    if (t == cheap_insns[i])
++      return 1;
++  return 0;
++}
++
+ /* Verify if the basic block BB does not have side-effects.  Return
+    true in this case, else false.  */
+ 
+@@ -572,9 +584,38 @@ ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv,
+ 	      = param_logical_op_non_short_circuit;
+ 	  if (!logical_op_non_short_circuit || sanitize_coverage_p ())
+ 	    return false;
+-	  /* Only do this optimization if the inner bb contains only the conditional. */
+-	  if (!gsi_one_before_end_p (gsi_start_nondebug_after_labels_bb (inner_cond_bb)))
+-	    return false;
++	  if (param_merge_assign_stmts_ifcombine)
++	    {
++	      int number_cheap_insns = 0;
++	      int number_conds = 0;
++	      for (auto i = gsi_start_nondebug_after_labels_bb
++	           (outer_cond_bb); !gsi_end_p (i); gsi_next_nondebug (&i))
++	        if (gimple_code (gsi_stmt (i)) == GIMPLE_ASSIGN
++	            && is_insn_cheap (gimple_assign_rhs_code (gsi_stmt (i))))
++	          number_cheap_insns++;
++	        else if (gimple_code (gsi_stmt (i)) == GIMPLE_COND)
++	          number_conds++;
++	      for (auto i = gsi_start_nondebug_after_labels_bb
++	           (inner_cond_bb); !gsi_end_p (i); gsi_next_nondebug (&i))
++	        if (gimple_code (gsi_stmt (i)) == GIMPLE_ASSIGN
++	            && is_insn_cheap (gimple_assign_rhs_code (gsi_stmt (i))))
++	          number_cheap_insns++;
++	        else if (gimple_code (gsi_stmt (i)) == GIMPLE_COND)
++	          number_conds++;
++	      if (!(number_cheap_insns == 1 && number_conds == 2)
++	          && !gsi_one_before_end_p (gsi_start_nondebug_after_labels_bb
++	          (inner_cond_bb)))
++	        return false;
++	    }
++	  else
++	    {
++	    /* Only do this optimization if the inner bb contains
++	    only the conditional.  */
++	      if (!gsi_one_before_end_p (gsi_start_nondebug_after_labels_bb
++	          (inner_cond_bb)))
++	        return false;
++	    }
++
+ 	  t1 = fold_build2_loc (gimple_location (inner_cond),
+ 				inner_cond_code,
+ 				boolean_type_node,
+-- 
+2.33.0
+
diff --git a/0303-Add-generation-of-stream-in-functions-for-pre-versio.patch b/0303-Add-generation-of-stream-in-functions-for-pre-versio.patch
new file mode 100644
index 0000000000000000000000000000000000000000..dfa965b684d5c45f8f78e4e66a40e2926f5a5260
--- /dev/null
+++ b/0303-Add-generation-of-stream-in-functions-for-pre-versio.patch
@@ -0,0 +1,6263 @@
+From 4789a6eae616df0b7d07901114c91a2099e4d56d Mon Sep 17 00:00:00 2001
+From: wangchunyang 
+Date: Wed, 13 Nov 2024 11:26:16 +0800
+Subject: [PATCH 1/2] Add generation of stream in functions for pre-version lto
+ objects
+
+---
+ gcc/lto-streamer.h    |    6 +
+ gcc/opt-read.awk      |    1 +
+ gcc/optc-save-gen.awk | 6044 ++++++++++++++++++++++++++++++++++++++++-
+ 3 files changed, 6050 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/lto-streamer.h b/gcc/lto-streamer.h
+index 597e9e405..9db1a20b3 100644
+--- a/gcc/lto-streamer.h
++++ b/gcc/lto-streamer.h
+@@ -943,12 +943,18 @@ void cl_target_option_stream_in (class data_in *,
+ 				 struct bitpack_d *,
+ 				 struct cl_target_option *);
+ 
++void cl_target_option_stream_in_prev (class data_in *,
++				 struct bitpack_d *,
++				 struct cl_target_option *);
++
+ void cl_optimization_stream_out (struct output_block *,
+ 				 struct bitpack_d *, struct cl_optimization *);
+ 
+ void cl_optimization_stream_in (class data_in *,
+ 				struct bitpack_d *, struct cl_optimization *);
+ 
++void cl_optimization_stream_in_prev (class data_in *,
++				struct bitpack_d *, struct cl_optimization *);
+ 
+ 
+ /* In lto-opts.cc.  */
+diff --git a/gcc/opt-read.awk b/gcc/opt-read.awk
+index ce3617c8d..624cf6e3d 100644
+--- a/gcc/opt-read.awk
++++ b/gcc/opt-read.awk
+@@ -71,6 +71,7 @@ BEGIN {
+ 			n_target_save++
+ 
+ 			extra_target_vars[n_extra_target_vars] = name
++			extra_target_vars_set[name] = 1
+ 			extra_target_var_types[n_extra_target_vars] = type
+ 			n_extra_target_vars++
+ 		}
+diff --git a/gcc/optc-save-gen.awk b/gcc/optc-save-gen.awk
+index 76e9b3cb9..7c012dd4e 100644
+--- a/gcc/optc-save-gen.awk
++++ b/gcc/optc-save-gen.awk
+@@ -174,6 +174,8 @@ print "  unsigned HOST_WIDE_INT mask = 0;";
+ j = 0;
+ k = 0;
+ for (i = 0; i < n_opt_other; i++) {
++	var_opt_other_j[var_opt_other[i]] = j;
++	var_opt_other_k[var_opt_other[i]] = k;
+ 	print "  if (opts_set->x_" var_opt_other[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -185,6 +187,8 @@ for (i = 0; i < n_opt_other; i++) {
+ }
+ 
+ for (i = 0; i < n_opt_int; i++) {
++	var_opt_int_j[var_opt_int[i]] = j;
++	var_opt_int_k[var_opt_int[i]] = k;
+ 	print "  if (opts_set->x_" var_opt_int[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -196,6 +200,8 @@ for (i = 0; i < n_opt_int; i++) {
+ }
+ 
+ for (i = 0; i < n_opt_enum; i++) {
++	var_opt_enum_j[var_opt_enum[i]] = j;
++	var_opt_enum_k[var_opt_enum[i]] = k;
+ 	print "  if (opts_set->x_" var_opt_enum[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -207,6 +213,8 @@ for (i = 0; i < n_opt_enum; i++) {
+ }
+ 
+ for (i = 0; i < n_opt_short; i++) {
++	var_opt_short_j[var_opt_short[i]] = j;
++	var_opt_short_k[var_opt_short[i]] = k;
+ 	print "  if (opts_set->x_" var_opt_short[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -218,6 +226,8 @@ for (i = 0; i < n_opt_short; i++) {
+ }
+ 
+ for (i = 0; i < n_opt_char; i++) {
++	var_opt_char_j[var_opt_char[i]] = j;
++	var_opt_char_k[var_opt_char[i]] = k;
+ 	print "  if (opts_set->x_" var_opt_char[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -229,6 +239,8 @@ for (i = 0; i < n_opt_char; i++) {
+ }
+ 
+ for (i = 0; i < n_opt_string; i++) {
++	var_opt_string_j[var_opt_string[i]] = j;
++	var_opt_string_k[var_opt_string[i]] = k;
+ 	print "  if (opts_set->x_" var_opt_string[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -604,6 +616,8 @@ for (i = 0; i < n_extra_target_vars; i++) {
+ 	if (j == 0 && k == 0) {
+ 		print "  unsigned HOST_WIDE_INT mask = 0;";
+ 	}
++	extra_target_vars_j[extra_target_vars[i]] = j;
++	extra_target_vars_k[extra_target_vars[i]] = k;
+ 	print "  if (opts_set->x_" extra_target_vars[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -622,6 +636,8 @@ for (i = 0; i < n_target_other; i++) {
+ 	if (j == 0 && k == 0) {
+ 		print "  unsigned HOST_WIDE_INT mask = 0;";
+ 	}
++	var_target_other_j[var_target_other[i]] = j;
++	var_target_other_k[var_target_other[i]] = k;
+ 	print "  if (opts_set->x_" var_target_other[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -636,6 +652,8 @@ for (i = 0; i < n_target_enum; i++) {
+ 	if (j == 0 && k == 0) {
+ 		print "  unsigned HOST_WIDE_INT mask = 0;";
+ 	}
++	var_target_enum_j[var_target_enum[i]] = j;
++	var_target_enum_k[var_target_enum[i]] = k;
+ 	print "  if (opts_set->x_" var_target_enum[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -654,6 +672,8 @@ for (i = 0; i < n_target_int; i++) {
+ 	if (j == 0 && k == 0) {
+ 		print "  unsigned HOST_WIDE_INT mask = 0;";
+ 	}
++	var_target_int_j[var_target_int[i]] = j;
++	var_target_int_k[var_target_int[i]] = k;
+ 	print "  if (opts_set->x_" var_target_int[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -668,6 +688,8 @@ for (i = 0; i < n_target_short; i++) {
+ 	if (j == 0 && k == 0) {
+ 		print "  unsigned HOST_WIDE_INT mask = 0;";
+ 	}
++	var_target_short_j[var_target_short[i]] = j;
++	var_target_short_k[var_target_short[i]] = k;
+ 	print "  if (opts_set->x_" var_target_short[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -682,6 +704,8 @@ for (i = 0; i < n_target_char; i++) {
+ 	if (j == 0 && k == 0) {
+ 		print "  unsigned HOST_WIDE_INT mask = 0;";
+ 	}
++	var_target_char_j[var_target_char[i]] = j;
++	var_target_char_k[var_target_char[i]] = k;
+ 	print "  if (opts_set->x_" var_target_char[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -696,6 +720,8 @@ for (i = 0; i < n_target_string; i++) {
+ 	if (j == 0 && k == 0) {
+ 		print "  unsigned HOST_WIDE_INT mask = 0;";
+ 	}
++	var_target_string_j[var_target_string[i]] = j;
++	var_target_string_k[var_target_string[i]] = k;
+ 	print "  if (opts_set->x_" var_target_string[i] ") mask |= HOST_WIDE_INT_1U << " j ";";
+ 	j++;
+ 	if (j == 64) {
+@@ -1038,6 +1064,7 @@ for (i = 0; i < n_target_save; i++) {
+ 	sub(" *" name "$", "", type)
+ 	if (target_save_decl[i] ~ "^const char \\*+[_" alnum "]+$") {
+ 		var_target_str[n_target_str++] = name;
++		var_target_str_set[name] = 1;
+ 		string_options_names[name]++
+ 	}
+ 	else {
+@@ -1048,12 +1075,14 @@ for (i = 0; i < n_target_save; i++) {
+ 			sub("\\[.+", "", name)
+ 			sub(" [^ ]+$", "", type)
+ 			var_target_array[n_target_array] = name
++			var_target_array_set[name] = 1
+ 			var_target_array_type[n_target_array] = type
+ 			var_target_array_size[n_target_array++] = size
+ 		}
+ 		else {
+ 			var_target_val_type[n_target_val] = type;
+ 			var_target_val[n_target_val++] = name;
++			var_target_val_set[name] = 1;
+ 		}
+ 	}
+ }
+@@ -1069,17 +1098,21 @@ if (have_save) {
+ 
+ 			var_list_seen[name]++;
+ 			otype = var_type_struct(flags[i])
+-			if (otype ~ "^const char \\**$")
++			if (otype ~ "^const char \\**$") {
+ 				var_target_str[n_target_str++] = "x_" name;
++				var_target_str_set["x_" name] = 1;
++			}
+ 			else {
+ 				var_target_val_type[n_target_val] = otype;
+ 				var_target_val[n_target_val++] = "x_" name;
++				var_target_val_set["x_" name];
+ 			}
+ 		}
+ 	}
+ } else {
+ 	var_target_val_type[n_target_val] = "int";
+ 	var_target_val[n_target_val++] = "x_target_flags";
++	var_target_val_set["x_target_flags"] = 1;
+ }
+ 
+ for (i = 0; i < n_target_str; i++) {
+@@ -1253,6 +1286,224 @@ for (i = 0; i < n_target_int; i++) {
+ 
+ print "}";
+ 
++print "";
++print "/* Stream in target options  */";
++print "void";
++print "cl_target_option_stream_in_prev (struct data_in *data_in ATTRIBUTE_UNUSED,";
++print "                                 struct bitpack_d *bp ATTRIBUTE_UNUSED,";
++print "                                 struct cl_target_option *ptr ATTRIBUTE_UNUSED)";
++print "{";
++if ("x_aarch64_branch_protection_string" in var_target_str_set) {
++  print "  ptr->x_aarch64_branch_protection_string = bp_unpack_string (data_in, bp);"
++  print "  if (ptr->x_aarch64_branch_protection_string)"
++  print "  ptr->x_aarch64_branch_protection_string = xstrdup (ptr->x_aarch64_branch_protection_string);"
++}
++else {
++  print "  bp_unpack_string (data_in, bp);"
++}
++if ("x_aarch64_override_tune_string" in var_target_str_set) {
++  print "  ptr->x_aarch64_override_tune_string = bp_unpack_string (data_in, bp);"
++  print "  if (ptr->x_aarch64_override_tune_string)"
++  print "  ptr->x_aarch64_override_tune_string = xstrdup (ptr->x_aarch64_override_tune_string);"
++}
++else {
++  print "  bp_unpack_string (data_in, bp);"
++}
++if ("x_aarch64_asm_isa_flags" in var_target_val_set) {
++  print "  ptr->x_aarch64_asm_isa_flags = (aarch64_feature_flags) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_aarch64_isa_flags" in var_target_val_set) {
++  print "  ptr->x_aarch64_isa_flags = (aarch64_feature_flags) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_selected_arch" in var_target_val_set) {
++  print "  ptr->x_selected_arch = (enum aarch64_arch) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_aarch64_ra_sign_key" in var_target_val_set) {
++  print "  ptr->x_aarch64_ra_sign_key = (enum aarch64_key_type) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_selected_tune" in var_target_val_set) {
++  print "  ptr->x_selected_tune = (enum aarch64_processor) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_aarch64_stack_protector_guard_offset" in var_target_val_set) {
++  print "  ptr->x_aarch64_stack_protector_guard_offset = (long) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_aarch64_enable_bti" in var_target_val_set) {
++  print "  ptr->x_aarch64_enable_bti = (unsigned) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_aarch64_cmodel_var" in var_target_val_set) {
++  print "  ptr->x_aarch64_cmodel_var = (enum aarch64_code_model ) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_aarch64_fix_a53_err835769" in var_target_val_set) {
++  print "  ptr->x_aarch64_fix_a53_err835769 = (signed char ) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_aarch64_fix_a53_err843419" in var_target_val_set) {
++  print "  ptr->x_aarch64_fix_a53_err843419 = (signed char ) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_target_flags" in var_target_val_set) {
++  print "  ptr->x_target_flags = (/* - */ int ) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_flag_omit_leaf_frame_pointer" in var_target_val_set) {
++  print "  ptr->x_flag_omit_leaf_frame_pointer = (signed char ) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_aarch64_flag_outline_atomics" in var_target_val_set) {
++  print "  ptr->x_aarch64_flag_outline_atomics = (signed char ) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_pcrelative_literal_loads" in var_target_val_set) {
++  print "  ptr->x_pcrelative_literal_loads = (signed char ) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_aarch64_ra_sign_scope" in var_target_val_set) {
++  print "  ptr->x_aarch64_ra_sign_scope = (enum aarch64_function_type ) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++if ("x_aarch64_tls_dialect" in var_target_val_set) {
++  print "  ptr->x_aarch64_tls_dialect = (enum aarch64_tls_type ) bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++print "  unsigned HOST_WIDE_INT explicit_mask_prev[1];"
++print "  for (size_t i = 0; i < 1; i++)"
++print "    explicit_mask_prev[i] = bp_unpack_value (bp, 64);"
++print "  for (size_t i = 0; i < sizeof (ptr->explicit_mask) / sizeof (ptr->explicit_mask[0]); i++)"
++print "    ptr->explicit_mask[i] = 0;"
++if ("aarch64_asm_isa_flags" in extra_target_vars_k) {
++  k = extra_target_vars_k["aarch64_asm_isa_flags"]
++  j = extra_target_vars_j["aarch64_asm_isa_flags"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 0) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_isa_flags" in extra_target_vars_k) {
++  k = extra_target_vars_k["aarch64_isa_flags"]
++  j = extra_target_vars_j["aarch64_isa_flags"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 1) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("selected_arch" in extra_target_vars_k) {
++  k = extra_target_vars_k["selected_arch"]
++  j = extra_target_vars_j["selected_arch"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 2) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_ra_sign_key" in extra_target_vars_k) {
++  k = extra_target_vars_k["aarch64_ra_sign_key"]
++  j = extra_target_vars_j["aarch64_ra_sign_key"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 3) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("selected_tune" in extra_target_vars_k) {
++  k = extra_target_vars_k["selected_tune"]
++  j = extra_target_vars_j["selected_tune"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 4) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_stack_protector_guard_offset" in extra_target_vars_k) {
++  k = extra_target_vars_k["aarch64_stack_protector_guard_offset"]
++  j = extra_target_vars_j["aarch64_stack_protector_guard_offset"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 5) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_enable_bti" in extra_target_vars_k) {
++  k = extra_target_vars_k["aarch64_enable_bti"]
++  j = extra_target_vars_j["aarch64_enable_bti"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 6) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_cmodel_var" in var_target_enum_k) {
++  k = var_target_enum_k["aarch64_cmodel_var"]
++  j = var_target_enum_j["aarch64_cmodel_var"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 7) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_ra_sign_scope" in var_target_enum_k) {
++  k = var_target_enum_k["aarch64_ra_sign_scope"]
++  j = var_target_enum_j["aarch64_ra_sign_scope"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 8) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_tls_dialect" in var_target_enum_k) {
++  k = var_target_enum_k["aarch64_tls_dialect"]
++  j = var_target_enum_j["aarch64_tls_dialect"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 9) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_fix_a53_err835769" in var_target_char_k) {
++  k = var_target_char_k["aarch64_fix_a53_err835769"]
++  j = var_target_char_j["aarch64_fix_a53_err835769"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 10) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_fix_a53_err843419" in var_target_char_k) {
++  k = var_target_char_k["aarch64_fix_a53_err843419"]
++  j = var_target_char_j["aarch64_fix_a53_err843419"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 11) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_omit_leaf_frame_pointer" in var_target_char_k) {
++  k = var_target_char_k["flag_omit_leaf_frame_pointer"]
++  j = var_target_char_j["flag_omit_leaf_frame_pointer"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 12) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_flag_outline_atomics" in var_target_char_k) {
++  k = var_target_char_k["aarch64_flag_outline_atomics"]
++  j = var_target_char_j["aarch64_flag_outline_atomics"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 13) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("pcrelative_literal_loads" in var_target_char_k) {
++  k = var_target_char_k["pcrelative_literal_loads"]
++  j = var_target_char_j["pcrelative_literal_loads"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 14) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_branch_protection_string" in var_target_string_k) {
++  k = var_target_string_k["aarch64_branch_protection_string"]
++  j = var_target_string_j["aarch64_branch_protection_string"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 15) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("aarch64_override_tune_string" in var_target_string_k) {
++  k = var_target_string_k["aarch64_override_tune_string"]
++  j = var_target_string_j["aarch64_override_tune_string"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 16) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("target_flags" in var_target_explicit_mask) {
++  print "  ptr->explicit_mask_target_flags = bp_unpack_value (bp, 64);"
++}
++else {
++  print "  bp_unpack_value (bp, 64);"
++}
++print "}";
++print "";
++
+ print "/* free heap memory used by target options  */";
+ print "void";
+ print "cl_target_option_free (struct cl_target_option *ptr ATTRIBUTE_UNUSED)";
+@@ -1266,15 +1517,19 @@ print "}";
+ 
+ n_opt_val = 4;
+ var_opt_val[0] = "x_optimize"
++var_opt_val_set["x_optimize"] = 1
+ var_opt_val_type[0] = "char "
+ var_opt_hash[0] = 1;
+ var_opt_val[1] = "x_optimize_size"
++var_opt_val_set["x_optimize_size"] = 1
+ var_opt_val_type[1] = "char "
+ var_opt_hash[1] = 1;
+ var_opt_val[2] = "x_optimize_debug"
++var_opt_val_set["x_optimize_debug"] = 1
+ var_opt_val_type[2] = "char "
+ var_opt_hash[2] = 1;
+ var_opt_val[3] = "x_optimize_fast"
++var_opt_val_set["x_optimize_fast"] = 1
+ var_opt_val_type[3] = "char "
+ var_opt_hash[3] = 1;
+ for (i = 0; i < n_opts; i++) {
+@@ -1291,6 +1546,7 @@ for (i = 0; i < n_opts; i++) {
+ 		otype = var_type_struct(flags[i])
+ 		var_opt_val_type[n_opt_val] = otype;
+ 		var_opt_val[n_opt_val] = "x_" name;
++		var_opt_val_set["x_" name] = 1;
+ 		var_opt_hash[n_opt_val] = flag_set_p("Optimization", flags[i]);
+ 		var_opt_init[n_opt_val] = opt_args("Init", flags[i]);
+ 		n_opt_val++;
+@@ -1415,6 +1671,5792 @@ for (i = 0; i < n_opt_val; i++) {
+ print "  for (size_t i = 0; i < sizeof (ptr->explicit_mask) / sizeof (ptr->explicit_mask[0]); i++)";
+ print "    ptr->explicit_mask[i] = bp_unpack_value (bp, 64);";
+ print "}";
++
++print "";
++print "/* Stream in optimization options  */";
++print "void";
++print "cl_optimization_stream_in_prev (struct data_in *data_in ATTRIBUTE_UNUSED,";
++print "                                struct bitpack_d *bp ATTRIBUTE_UNUSED,";
++print "                                struct cl_optimization *ptr ATTRIBUTE_UNUSED)";
++print "{";
++if ("x_optimize" in var_opt_val_set) {
++  print "  ptr->x_optimize = (char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_optimize_size" in var_opt_val_set) {
++  print "  ptr->x_optimize_size = (char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_optimize_debug" in var_opt_val_set) {
++  print "  ptr->x_optimize_debug = (char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_optimize_fast" in var_opt_val_set) {
++  print "  ptr->x_optimize_fast = (char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_align_loop_iterations" in var_opt_val_set) {
++  print "  ptr->x_param_align_loop_iterations = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (4 > (int ) 10)"
++  print "    ptr->x_param_align_loop_iterations ^= 4;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_align_threshold" in var_opt_val_set) {
++  print "  ptr->x_param_align_threshold = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_align_threshold ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_asan_protect_allocas" in var_opt_val_set) {
++  print "  ptr->x_param_asan_protect_allocas = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_asan_protect_allocas ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_asan_instrument_reads" in var_opt_val_set) {
++  print "  ptr->x_param_asan_instrument_reads = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_asan_instrument_reads ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_asan_instrument_writes" in var_opt_val_set) {
++  print "  ptr->x_param_asan_instrument_writes = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_asan_instrument_writes ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_asan_instrumentation_with_call_threshold" in var_opt_val_set) {
++  print "  ptr->x_param_asan_instrumentation_with_call_threshold = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (7000 > (int ) 10)"
++  print "    ptr->x_param_asan_instrumentation_with_call_threshold ^= 7000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_asan_memintrin" in var_opt_val_set) {
++  print "  ptr->x_param_asan_memintrin = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_asan_memintrin ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_asan_stack" in var_opt_val_set) {
++  print "  ptr->x_param_asan_stack = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_asan_stack ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_asan_use_after_return" in var_opt_val_set) {
++  print "  ptr->x_param_asan_use_after_return = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_asan_use_after_return ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_avg_loop_niter" in var_opt_val_set) {
++  print "  ptr->x_param_avg_loop_niter = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_avg_loop_niter ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_avoid_fma_max_bits" in var_opt_val_set) {
++  print "  ptr->x_param_avoid_fma_max_bits = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_builtin_expect_probability" in var_opt_val_set) {
++  print "  ptr->x_param_builtin_expect_probability = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (90 > (int ) 10)"
++  print "    ptr->x_param_builtin_expect_probability ^= 90;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_builtin_string_cmp_inline_length" in var_opt_val_set) {
++  print "  ptr->x_param_builtin_string_cmp_inline_length = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (3 > (int ) 10)"
++  print "    ptr->x_param_builtin_string_cmp_inline_length ^= 3;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_case_values_threshold" in var_opt_val_set) {
++  print "  ptr->x_param_case_values_threshold = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_comdat_sharing_probability" in var_opt_val_set) {
++  print "  ptr->x_param_comdat_sharing_probability = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (20 > (int ) 10)"
++  print "    ptr->x_param_comdat_sharing_probability ^= 20;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_pointer_compression_size" in var_opt_val_set) {
++  print "  ptr->x_param_pointer_compression_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (32 > (int ) 10)"
++  print "    ptr->x_param_pointer_compression_size ^= 32;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_construct_interfere_size" in var_opt_val_set) {
++  print "  ptr->x_param_construct_interfere_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (0 > (int ) 10)"
++  print "    ptr->x_param_construct_interfere_size ^= 0;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_destruct_interfere_size" in var_opt_val_set) {
++  print "  ptr->x_param_destruct_interfere_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (0 > (int ) 10)"
++  print "    ptr->x_param_destruct_interfere_size ^= 0;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_dse_max_alias_queries_per_store" in var_opt_val_set) {
++  print "  ptr->x_param_dse_max_alias_queries_per_store = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (256 > (int ) 10)"
++  print "    ptr->x_param_dse_max_alias_queries_per_store ^= 256;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_dse_max_object_size" in var_opt_val_set) {
++  print "  ptr->x_param_dse_max_object_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (256 > (int ) 10)"
++  print "    ptr->x_param_dse_max_object_size ^= 256;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_early_inlining_insns" in var_opt_val_set) {
++  print "  ptr->x_param_early_inlining_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (6 > (int ) 10)"
++  print "    ptr->x_param_early_inlining_insns ^= 6;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_evrp_mode" in var_opt_val_set) {
++  print "  ptr->x_param_evrp_mode = (enum evrp_mode ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_evrp_sparse_threshold" in var_opt_val_set) {
++  print "  ptr->x_param_evrp_sparse_threshold = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (800 > (int ) 10)"
++  print "    ptr->x_param_evrp_sparse_threshold ^= 800;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_evrp_switch_limit" in var_opt_val_set) {
++  print "  ptr->x_param_evrp_switch_limit = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (50 > (int ) 10)"
++  print "    ptr->x_param_evrp_switch_limit ^= 50;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_fsm_scale_path_blocks" in var_opt_val_set) {
++  print "  ptr->x_param_fsm_scale_path_blocks = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (3 > (int ) 10)"
++  print "    ptr->x_param_fsm_scale_path_blocks ^= 3;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_fsm_scale_path_stmts" in var_opt_val_set) {
++  print "  ptr->x_param_fsm_scale_path_stmts = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_fsm_scale_path_stmts ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_gcse_after_reload_critical_fraction" in var_opt_val_set) {
++  print "  ptr->x_param_gcse_after_reload_critical_fraction = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_gcse_after_reload_critical_fraction ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_gcse_after_reload_partial_fraction" in var_opt_val_set) {
++  print "  ptr->x_param_gcse_after_reload_partial_fraction = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (3 > (int ) 10)"
++  print "    ptr->x_param_gcse_after_reload_partial_fraction ^= 3;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_gcse_cost_distance_ratio" in var_opt_val_set) {
++  print "  ptr->x_param_gcse_cost_distance_ratio = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_gcse_cost_distance_ratio ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_gcse_unrestricted_cost" in var_opt_val_set) {
++  print "  ptr->x_param_gcse_unrestricted_cost = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (3 > (int ) 10)"
++  print "    ptr->x_param_gcse_unrestricted_cost ^= 3;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_graphite_max_arrays_per_scop" in var_opt_val_set) {
++  print "  ptr->x_param_graphite_max_arrays_per_scop = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_graphite_max_arrays_per_scop ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_graphite_max_nb_scop_params" in var_opt_val_set) {
++  print "  ptr->x_param_graphite_max_nb_scop_params = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_graphite_max_nb_scop_params ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_hwasan_instrument_allocas" in var_opt_val_set) {
++  print "  ptr->x_param_hwasan_instrument_allocas = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_hwasan_instrument_allocas ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_hwasan_instrument_mem_intrinsics" in var_opt_val_set) {
++  print "  ptr->x_param_hwasan_instrument_mem_intrinsics = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_hwasan_instrument_mem_intrinsics ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_hwasan_instrument_reads" in var_opt_val_set) {
++  print "  ptr->x_param_hwasan_instrument_reads = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_hwasan_instrument_reads ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_hwasan_instrument_stack" in var_opt_val_set) {
++  print "  ptr->x_param_hwasan_instrument_stack = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_hwasan_instrument_stack ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_hwasan_instrument_writes" in var_opt_val_set) {
++  print "  ptr->x_param_hwasan_instrument_writes = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_hwasan_instrument_writes ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_hwasan_random_frame_tag" in var_opt_val_set) {
++  print "  ptr->x_param_hwasan_random_frame_tag = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_hwasan_random_frame_tag ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ifcvt_allow_register_renaming" in var_opt_val_set) {
++  print "  ptr->x_param_ifcvt_allow_register_renaming = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_inline_heuristics_hint_percent" in var_opt_val_set) {
++  print "  ptr->x_param_inline_heuristics_hint_percent = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (200 > (int ) 10)"
++  print "    ptr->x_param_inline_heuristics_hint_percent ^= 200;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_inline_min_speedup" in var_opt_val_set) {
++  print "  ptr->x_param_inline_min_speedup = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (30 > (int ) 10)"
++  print "    ptr->x_param_inline_min_speedup ^= 30;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_inline_unit_growth" in var_opt_val_set) {
++  print "  ptr->x_param_inline_unit_growth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (40 > (int ) 10)"
++  print "    ptr->x_param_inline_unit_growth ^= 40;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_cp_eval_threshold" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_cp_eval_threshold = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (500 > (int ) 10)"
++  print "    ptr->x_param_ipa_cp_eval_threshold ^= 500;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_cp_large_unit_insns" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_cp_large_unit_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (16000 > (int ) 10)"
++  print "    ptr->x_param_ipa_cp_large_unit_insns ^= 16000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_cp_loop_hint_bonus" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_cp_loop_hint_bonus = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (64 > (int ) 10)"
++  print "    ptr->x_param_ipa_cp_loop_hint_bonus ^= 64;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_cp_max_recursive_depth" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_cp_max_recursive_depth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_ipa_cp_max_recursive_depth ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_cp_min_recursive_probability" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_cp_min_recursive_probability = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_ipa_cp_min_recursive_probability ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_cp_profile_count_base" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_cp_profile_count_base = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_ipa_cp_profile_count_base ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_cp_recursion_penalty" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_cp_recursion_penalty = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (40 > (int ) 10)"
++  print "    ptr->x_param_ipa_cp_recursion_penalty ^= 40;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_cp_recursive_freq_factor" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_cp_recursive_freq_factor = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (6 > (int ) 10)"
++  print "    ptr->x_param_ipa_cp_recursive_freq_factor ^= 6;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_cp_single_call_penalty" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_cp_single_call_penalty = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (15 > (int ) 10)"
++  print "    ptr->x_param_ipa_cp_single_call_penalty ^= 15;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_cp_unit_growth" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_cp_unit_growth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_ipa_cp_unit_growth ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_cp_value_list_size" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_cp_value_list_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_ipa_cp_value_list_size ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_jump_function_lookups" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_jump_function_lookups = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_ipa_jump_function_lookups ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_max_aa_steps" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_max_aa_steps = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (25000 > (int ) 10)"
++  print "    ptr->x_param_ipa_max_aa_steps ^= 25000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_max_agg_items" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_max_agg_items = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (16 > (int ) 10)"
++  print "    ptr->x_param_ipa_max_agg_items ^= 16;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_max_loop_predicates" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_max_loop_predicates = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (16 > (int ) 10)"
++  print "    ptr->x_param_ipa_max_loop_predicates ^= 16;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_max_param_expr_ops" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_max_param_expr_ops = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_ipa_max_param_expr_ops ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_max_switch_predicate_bounds" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_max_switch_predicate_bounds = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (5 > (int ) 10)"
++  print "    ptr->x_param_ipa_max_switch_predicate_bounds ^= 5;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_prefetch_distance_factor" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_prefetch_distance_factor = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (4 > (int ) 10)"
++  print "    ptr->x_param_ipa_prefetch_distance_factor ^= 4;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_prefetch_locality" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_prefetch_locality = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (3 > (int ) 10)"
++  print "    ptr->x_param_ipa_prefetch_locality ^= 3;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_prefetch_pagesize" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_prefetch_pagesize = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (4096 > (int ) 10)"
++  print "    ptr->x_param_ipa_prefetch_pagesize ^= 4096;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_sra_max_replacements" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_sra_max_replacements = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_ipa_sra_max_replacements ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ipa_sra_ptr_growth_factor" in var_opt_val_set) {
++  print "  ptr->x_param_ipa_sra_ptr_growth_factor = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_ipa_sra_ptr_growth_factor ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ira_consider_dup_in_all_alts" in var_opt_val_set) {
++  print "  ptr->x_param_ira_consider_dup_in_all_alts = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_ira_consider_dup_in_all_alts ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ira_loop_reserved_regs" in var_opt_val_set) {
++  print "  ptr->x_param_ira_loop_reserved_regs = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_ira_loop_reserved_regs ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ira_max_conflict_table_size" in var_opt_val_set) {
++  print "  ptr->x_param_ira_max_conflict_table_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1000 > (int ) 10)"
++  print "    ptr->x_param_ira_max_conflict_table_size ^= 1000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ira_max_loops_num" in var_opt_val_set) {
++  print "  ptr->x_param_ira_max_loops_num = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_ira_max_loops_num ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_iv_always_prune_cand_set_bound" in var_opt_val_set) {
++  print "  ptr->x_param_iv_always_prune_cand_set_bound = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_iv_always_prune_cand_set_bound ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_iv_consider_all_candidates_bound" in var_opt_val_set) {
++  print "  ptr->x_param_iv_consider_all_candidates_bound = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (40 > (int ) 10)"
++  print "    ptr->x_param_iv_consider_all_candidates_bound ^= 40;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_iv_max_considered_uses" in var_opt_val_set) {
++  print "  ptr->x_param_iv_max_considered_uses = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (250 > (int ) 10)"
++  print "    ptr->x_param_iv_max_considered_uses ^= 250;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_jump_table_max_growth_ratio_for_size" in var_opt_val_set) {
++  print "  ptr->x_param_jump_table_max_growth_ratio_for_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (300 > (int ) 10)"
++  print "    ptr->x_param_jump_table_max_growth_ratio_for_size ^= 300;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_jump_table_max_growth_ratio_for_speed" in var_opt_val_set) {
++  print "  ptr->x_param_jump_table_max_growth_ratio_for_speed = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (800 > (int ) 10)"
++  print "    ptr->x_param_jump_table_max_growth_ratio_for_speed ^= 800;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_l1_cache_line_size" in var_opt_val_set) {
++  print "  ptr->x_param_l1_cache_line_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (32 > (int ) 10)"
++  print "    ptr->x_param_l1_cache_line_size ^= 32;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_l1_cache_size" in var_opt_val_set) {
++  print "  ptr->x_param_l1_cache_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (64 > (int ) 10)"
++  print "    ptr->x_param_l1_cache_size ^= 64;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_l2_cache_size" in var_opt_val_set) {
++  print "  ptr->x_param_l2_cache_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (512 > (int ) 10)"
++  print "    ptr->x_param_l2_cache_size ^= 512;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_large_function_growth" in var_opt_val_set) {
++  print "  ptr->x_param_large_function_growth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_large_function_growth ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_large_function_insns" in var_opt_val_set) {
++  print "  ptr->x_param_large_function_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2700 > (int ) 10)"
++  print "    ptr->x_param_large_function_insns ^= 2700;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_stack_frame_growth" in var_opt_val_set) {
++  print "  ptr->x_param_stack_frame_growth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1000 > (int ) 10)"
++  print "    ptr->x_param_stack_frame_growth ^= 1000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_large_stack_frame" in var_opt_val_set) {
++  print "  ptr->x_param_large_stack_frame = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (256 > (int ) 10)"
++  print "    ptr->x_param_large_stack_frame ^= 256;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_large_unit_insns" in var_opt_val_set) {
++  print "  ptr->x_param_large_unit_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10000 > (int ) 10)"
++  print "    ptr->x_param_large_unit_insns ^= 10000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_lim_expensive" in var_opt_val_set) {
++  print "  ptr->x_param_lim_expensive = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (20 > (int ) 10)"
++  print "    ptr->x_param_lim_expensive ^= 20;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_loop_block_tile_size" in var_opt_val_set) {
++  print "  ptr->x_param_loop_block_tile_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (51 > (int ) 10)"
++  print "    ptr->x_param_loop_block_tile_size ^= 51;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_loop_interchange_max_num_stmts" in var_opt_val_set) {
++  print "  ptr->x_param_loop_interchange_max_num_stmts = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (64 > (int ) 10)"
++  print "    ptr->x_param_loop_interchange_max_num_stmts ^= 64;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_loop_interchange_stride_ratio" in var_opt_val_set) {
++  print "  ptr->x_param_loop_interchange_stride_ratio = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_loop_interchange_stride_ratio ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_loop_invariant_max_bbs_in_loop" in var_opt_val_set) {
++  print "  ptr->x_param_loop_invariant_max_bbs_in_loop = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10000 > (int ) 10)"
++  print "    ptr->x_param_loop_invariant_max_bbs_in_loop ^= 10000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_loop_max_datarefs_for_datadeps" in var_opt_val_set) {
++  print "  ptr->x_param_loop_max_datarefs_for_datadeps = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1000 > (int ) 10)"
++  print "    ptr->x_param_loop_max_datarefs_for_datadeps ^= 1000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_loop_versioning_max_inner_insns" in var_opt_val_set) {
++  print "  ptr->x_param_loop_versioning_max_inner_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (200 > (int ) 10)"
++  print "    ptr->x_param_loop_versioning_max_inner_insns ^= 200;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_loop_versioning_max_outer_insns" in var_opt_val_set) {
++  print "  ptr->x_param_loop_versioning_max_outer_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_loop_versioning_max_outer_insns ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_lra_inheritance_ebb_probability_cutoff" in var_opt_val_set) {
++  print "  ptr->x_param_lra_inheritance_ebb_probability_cutoff = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (40 > (int ) 10)"
++  print "    ptr->x_param_lra_inheritance_ebb_probability_cutoff ^= 40;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_lra_max_considered_reload_pseudos" in var_opt_val_set) {
++  print "  ptr->x_param_lra_max_considered_reload_pseudos = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (500 > (int ) 10)"
++  print "    ptr->x_param_lra_max_considered_reload_pseudos ^= 500;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_average_unrolled_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_average_unrolled_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (80 > (int ) 10)"
++  print "    ptr->x_param_max_average_unrolled_insns ^= 80;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_combine_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_combine_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (4 > (int ) 10)"
++  print "    ptr->x_param_max_combine_insns ^= 4;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_unroll_iterations" in var_opt_val_set) {
++  print "  ptr->x_param_max_unroll_iterations = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_max_unroll_iterations ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_completely_peel_times" in var_opt_val_set) {
++  print "  ptr->x_param_max_completely_peel_times = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (16 > (int ) 10)"
++  print "    ptr->x_param_max_completely_peel_times ^= 16;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_completely_peeled_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_completely_peeled_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (200 > (int ) 10)"
++  print "    ptr->x_param_max_completely_peeled_insns ^= 200;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_crossjump_edges" in var_opt_val_set) {
++  print "  ptr->x_param_max_crossjump_edges = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_max_crossjump_edges ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_cse_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_cse_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1000 > (int ) 10)"
++  print "    ptr->x_param_max_cse_insns ^= 1000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_cse_path_length" in var_opt_val_set) {
++  print "  ptr->x_param_max_cse_path_length = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_max_cse_path_length ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_cselib_memory_locations" in var_opt_val_set) {
++  print "  ptr->x_param_max_cselib_memory_locations = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (500 > (int ) 10)"
++  print "    ptr->x_param_max_cselib_memory_locations ^= 500;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_debug_marker_count" in var_opt_val_set) {
++  print "  ptr->x_param_max_debug_marker_count = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100000 > (int ) 10)"
++  print "    ptr->x_param_max_debug_marker_count ^= 100000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_delay_slot_insn_search" in var_opt_val_set) {
++  print "  ptr->x_param_max_delay_slot_insn_search = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_max_delay_slot_insn_search ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_delay_slot_live_search" in var_opt_val_set) {
++  print "  ptr->x_param_max_delay_slot_live_search = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (333 > (int ) 10)"
++  print "    ptr->x_param_max_delay_slot_live_search ^= 333;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_dse_active_local_stores" in var_opt_val_set) {
++  print "  ptr->x_param_max_dse_active_local_stores = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (5000 > (int ) 10)"
++  print "    ptr->x_param_max_dse_active_local_stores ^= 5000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_early_inliner_max_iterations" in var_opt_val_set) {
++  print "  ptr->x_param_early_inliner_max_iterations = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_early_inliner_max_iterations ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_find_base_term_values" in var_opt_val_set) {
++  print "  ptr->x_param_max_find_base_term_values = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (200 > (int ) 10)"
++  print "    ptr->x_param_max_find_base_term_values ^= 200;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_fsm_thread_length" in var_opt_val_set) {
++  print "  ptr->x_param_max_fsm_thread_length = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_max_fsm_thread_length ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_fsm_thread_path_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_fsm_thread_path_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_max_fsm_thread_path_insns ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_gcse_insertion_ratio" in var_opt_val_set) {
++  print "  ptr->x_param_max_gcse_insertion_ratio = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (20 > (int ) 10)"
++  print "    ptr->x_param_max_gcse_insertion_ratio ^= 20;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_gcse_memory" in var_opt_val_set) {
++  print "  ptr->x_param_max_gcse_memory = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (131072 > (int ) 10)"
++  print "    ptr->x_param_max_gcse_memory ^= 131072;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_goto_duplication_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_goto_duplication_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_max_goto_duplication_insns ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_grow_copy_bb_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_grow_copy_bb_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_max_grow_copy_bb_insns ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_hoist_depth" in var_opt_val_set) {
++  print "  ptr->x_param_max_hoist_depth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (30 > (int ) 10)"
++  print "    ptr->x_param_max_hoist_depth ^= 30;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_inline_functions_called_once_insns" in var_opt_val_set) {
++  print "  ptr->x_param_inline_functions_called_once_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (4000 > (int ) 10)"
++  print "    ptr->x_param_inline_functions_called_once_insns ^= 4000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_inline_functions_called_once_loop_depth" in var_opt_val_set) {
++  print "  ptr->x_param_inline_functions_called_once_loop_depth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (6 > (int ) 10)"
++  print "    ptr->x_param_inline_functions_called_once_loop_depth ^= 6;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_inline_insns_auto" in var_opt_val_set) {
++  print "  ptr->x_param_max_inline_insns_auto = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (15 > (int ) 10)"
++  print "    ptr->x_param_max_inline_insns_auto ^= 15;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_inline_insns_recursive_auto" in var_opt_val_set) {
++  print "  ptr->x_param_max_inline_insns_recursive_auto = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (450 > (int ) 10)"
++  print "    ptr->x_param_max_inline_insns_recursive_auto ^= 450;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_inline_insns_recursive" in var_opt_val_set) {
++  print "  ptr->x_param_max_inline_insns_recursive = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (450 > (int ) 10)"
++  print "    ptr->x_param_max_inline_insns_recursive ^= 450;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_inline_insns_single" in var_opt_val_set) {
++  print "  ptr->x_param_max_inline_insns_single = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (70 > (int ) 10)"
++  print "    ptr->x_param_max_inline_insns_single ^= 70;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_inline_insns_size" in var_opt_val_set) {
++  print "  ptr->x_param_max_inline_insns_size = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_inline_insns_small" in var_opt_val_set) {
++  print "  ptr->x_param_max_inline_insns_small = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_inline_recursive_depth_auto" in var_opt_val_set) {
++  print "  ptr->x_param_max_inline_recursive_depth_auto = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_max_inline_recursive_depth_auto ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_inline_recursive_depth" in var_opt_val_set) {
++  print "  ptr->x_param_max_inline_recursive_depth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_max_inline_recursive_depth ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_isl_operations" in var_opt_val_set) {
++  print "  ptr->x_param_max_isl_operations = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (350000 > (int ) 10)"
++  print "    ptr->x_param_max_isl_operations ^= 350000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_iterations_computation_cost" in var_opt_val_set) {
++  print "  ptr->x_param_max_iterations_computation_cost = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_max_iterations_computation_cost ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_iterations_to_track" in var_opt_val_set) {
++  print "  ptr->x_param_max_iterations_to_track = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1000 > (int ) 10)"
++  print "    ptr->x_param_max_iterations_to_track ^= 1000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_jump_thread_duplication_stmts" in var_opt_val_set) {
++  print "  ptr->x_param_max_jump_thread_duplication_stmts = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (15 > (int ) 10)"
++  print "    ptr->x_param_max_jump_thread_duplication_stmts ^= 15;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_last_value_rtl" in var_opt_val_set) {
++  print "  ptr->x_param_max_last_value_rtl = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10000 > (int ) 10)"
++  print "    ptr->x_param_max_last_value_rtl ^= 10000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_loop_header_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_loop_header_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (20 > (int ) 10)"
++  print "    ptr->x_param_max_loop_header_insns ^= 20;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_modulo_backtrack_attempts" in var_opt_val_set) {
++  print "  ptr->x_param_max_modulo_backtrack_attempts = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (40 > (int ) 10)"
++  print "    ptr->x_param_max_modulo_backtrack_attempts ^= 40;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_partial_antic_length" in var_opt_val_set) {
++  print "  ptr->x_param_max_partial_antic_length = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_max_partial_antic_length ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_peel_branches" in var_opt_val_set) {
++  print "  ptr->x_param_max_peel_branches = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (32 > (int ) 10)"
++  print "    ptr->x_param_max_peel_branches ^= 32;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_peel_times" in var_opt_val_set) {
++  print "  ptr->x_param_max_peel_times = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (16 > (int ) 10)"
++  print "    ptr->x_param_max_peel_times ^= 16;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_peeled_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_peeled_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_max_peeled_insns ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_pending_list_length" in var_opt_val_set) {
++  print "  ptr->x_param_max_pending_list_length = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (32 > (int ) 10)"
++  print "    ptr->x_param_max_pending_list_length ^= 32;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_pipeline_region_blocks" in var_opt_val_set) {
++  print "  ptr->x_param_max_pipeline_region_blocks = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (15 > (int ) 10)"
++  print "    ptr->x_param_max_pipeline_region_blocks ^= 15;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_pipeline_region_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_pipeline_region_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (200 > (int ) 10)"
++  print "    ptr->x_param_max_pipeline_region_insns ^= 200;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_pow_sqrt_depth" in var_opt_val_set) {
++  print "  ptr->x_param_max_pow_sqrt_depth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (5 > (int ) 10)"
++  print "    ptr->x_param_max_pow_sqrt_depth ^= 5;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_predicted_iterations" in var_opt_val_set) {
++  print "  ptr->x_param_max_predicted_iterations = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_max_predicted_iterations ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_reload_search_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_reload_search_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_max_reload_search_insns ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_rtl_if_conversion_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_rtl_if_conversion_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_max_rtl_if_conversion_insns ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_rtl_if_conversion_predictable_cost" in var_opt_val_set) {
++  print "  ptr->x_param_max_rtl_if_conversion_predictable_cost = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (20 > (int ) 10)"
++  print "    ptr->x_param_max_rtl_if_conversion_predictable_cost ^= 20;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_rtl_if_conversion_unpredictable_cost" in var_opt_val_set) {
++  print "  ptr->x_param_max_rtl_if_conversion_unpredictable_cost = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (40 > (int ) 10)"
++  print "    ptr->x_param_max_rtl_if_conversion_unpredictable_cost ^= 40;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_sched_extend_regions_iters" in var_opt_val_set) {
++  print "  ptr->x_param_max_sched_extend_regions_iters = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_sched_insn_conflict_delay" in var_opt_val_set) {
++  print "  ptr->x_param_max_sched_insn_conflict_delay = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (3 > (int ) 10)"
++  print "    ptr->x_param_max_sched_insn_conflict_delay ^= 3;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_sched_ready_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_sched_ready_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_max_sched_ready_insns ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_sched_region_blocks" in var_opt_val_set) {
++  print "  ptr->x_param_max_sched_region_blocks = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_max_sched_region_blocks ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_sched_region_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_sched_region_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_max_sched_region_insns ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_slsr_candidate_scan" in var_opt_val_set) {
++  print "  ptr->x_param_max_slsr_candidate_scan = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (50 > (int ) 10)"
++  print "    ptr->x_param_max_slsr_candidate_scan ^= 50;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_speculative_devirt_maydefs" in var_opt_val_set) {
++  print "  ptr->x_param_max_speculative_devirt_maydefs = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (50 > (int ) 10)"
++  print "    ptr->x_param_max_speculative_devirt_maydefs ^= 50;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_stores_to_merge" in var_opt_val_set) {
++  print "  ptr->x_param_max_stores_to_merge = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (64 > (int ) 10)"
++  print "    ptr->x_param_max_stores_to_merge ^= 64;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_stores_to_sink" in var_opt_val_set) {
++  print "  ptr->x_param_max_stores_to_sink = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_max_stores_to_sink ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_tail_merge_comparisons" in var_opt_val_set) {
++  print "  ptr->x_param_max_tail_merge_comparisons = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_max_tail_merge_comparisons ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_tail_merge_iterations" in var_opt_val_set) {
++  print "  ptr->x_param_max_tail_merge_iterations = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_max_tail_merge_iterations ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_tracked_strlens" in var_opt_val_set) {
++  print "  ptr->x_param_max_tracked_strlens = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10000 > (int ) 10)"
++  print "    ptr->x_param_max_tracked_strlens ^= 10000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_tree_if_conversion_phi_args" in var_opt_val_set) {
++  print "  ptr->x_param_max_tree_if_conversion_phi_args = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (4 > (int ) 10)"
++  print "    ptr->x_param_max_tree_if_conversion_phi_args ^= 4;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_unroll_times" in var_opt_val_set) {
++  print "  ptr->x_param_max_unroll_times = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_max_unroll_times ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_unrolled_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_unrolled_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (200 > (int ) 10)"
++  print "    ptr->x_param_max_unrolled_insns ^= 200;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_unswitch_insns" in var_opt_val_set) {
++  print "  ptr->x_param_max_unswitch_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (50 > (int ) 10)"
++  print "    ptr->x_param_max_unswitch_insns ^= 50;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_unswitch_level" in var_opt_val_set) {
++  print "  ptr->x_param_max_unswitch_level = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (3 > (int ) 10)"
++  print "    ptr->x_param_max_unswitch_level ^= 3;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_variable_expansions" in var_opt_val_set) {
++  print "  ptr->x_param_max_variable_expansions = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_max_variable_expansions ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_vartrack_expr_depth" in var_opt_val_set) {
++  print "  ptr->x_param_max_vartrack_expr_depth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (12 > (int ) 10)"
++  print "    ptr->x_param_max_vartrack_expr_depth ^= 12;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_vartrack_reverse_op_size" in var_opt_val_set) {
++  print "  ptr->x_param_max_vartrack_reverse_op_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (50 > (int ) 10)"
++  print "    ptr->x_param_max_vartrack_reverse_op_size ^= 50;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_vartrack_size" in var_opt_val_set) {
++  print "  ptr->x_param_max_vartrack_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (50000000 > (int ) 10)"
++  print "    ptr->x_param_max_vartrack_size ^= 50000000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_max_vrp_switch_assertions" in var_opt_val_set) {
++  print "  ptr->x_param_max_vrp_switch_assertions = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_max_vrp_switch_assertions ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_min_crossjump_insns" in var_opt_val_set) {
++  print "  ptr->x_param_min_crossjump_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (5 > (int ) 10)"
++  print "    ptr->x_param_min_crossjump_insns ^= 5;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_min_inline_recursive_probability" in var_opt_val_set) {
++  print "  ptr->x_param_min_inline_recursive_probability = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_min_inline_recursive_probability ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_min_insn_to_prefetch_ratio" in var_opt_val_set) {
++  print "  ptr->x_param_min_insn_to_prefetch_ratio = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (9 > (int ) 10)"
++  print "    ptr->x_param_min_insn_to_prefetch_ratio ^= 9;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_min_loop_cond_split_prob" in var_opt_val_set) {
++  print "  ptr->x_param_min_loop_cond_split_prob = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (30 > (int ) 10)"
++  print "    ptr->x_param_min_loop_cond_split_prob ^= 30;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_min_pagesize" in var_opt_val_set) {
++  print "  ptr->x_param_min_pagesize = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (4096 > (int ) 10)"
++  print "    ptr->x_param_min_pagesize ^= 4096;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_min_size_for_stack_sharing" in var_opt_val_set) {
++  print "  ptr->x_param_min_size_for_stack_sharing = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (32 > (int ) 10)"
++  print "    ptr->x_param_min_size_for_stack_sharing ^= 32;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_min_spec_prob" in var_opt_val_set) {
++  print "  ptr->x_param_min_spec_prob = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (40 > (int ) 10)"
++  print "    ptr->x_param_min_spec_prob ^= 40;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_min_vect_loop_bound" in var_opt_val_set) {
++  print "  ptr->x_param_min_vect_loop_bound = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_modref_max_accesses" in var_opt_val_set) {
++  print "  ptr->x_param_modref_max_accesses = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (16 > (int ) 10)"
++  print "    ptr->x_param_modref_max_accesses ^= 16;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_modref_max_adjustments" in var_opt_val_set) {
++  print "  ptr->x_param_modref_max_adjustments = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_modref_max_adjustments ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_modref_max_bases" in var_opt_val_set) {
++  print "  ptr->x_param_modref_max_bases = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (32 > (int ) 10)"
++  print "    ptr->x_param_modref_max_bases ^= 32;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_modref_max_depth" in var_opt_val_set) {
++  print "  ptr->x_param_modref_max_depth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (256 > (int ) 10)"
++  print "    ptr->x_param_modref_max_depth ^= 256;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_modref_max_escape_points" in var_opt_val_set) {
++  print "  ptr->x_param_modref_max_escape_points = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (256 > (int ) 10)"
++  print "    ptr->x_param_modref_max_escape_points ^= 256;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_modref_max_refs" in var_opt_val_set) {
++  print "  ptr->x_param_modref_max_refs = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (16 > (int ) 10)"
++  print "    ptr->x_param_modref_max_refs ^= 16;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_modref_max_tests" in var_opt_val_set) {
++  print "  ptr->x_param_modref_max_tests = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (64 > (int ) 10)"
++  print "    ptr->x_param_modref_max_tests ^= 64;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ldp_dependency_search_range" in var_opt_val_set) {
++  print "  ptr->x_param_ldp_dependency_search_range = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (16 > (int ) 10)"
++  print "    ptr->x_param_ldp_dependency_search_range ^= 16;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_parloops_chunk_size" in var_opt_val_set) {
++  print "  ptr->x_param_parloops_chunk_size = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_parloops_min_per_thread" in var_opt_val_set) {
++  print "  ptr->x_param_parloops_min_per_thread = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_parloops_min_per_thread ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_parloops_schedule" in var_opt_val_set) {
++  print "  ptr->x_param_parloops_schedule = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_partial_inlining_entry_probability" in var_opt_val_set) {
++  print "  ptr->x_param_partial_inlining_entry_probability = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (70 > (int ) 10)"
++  print "    ptr->x_param_partial_inlining_entry_probability ^= 70;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_predictable_branch_outcome" in var_opt_val_set) {
++  print "  ptr->x_param_predictable_branch_outcome = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_predictable_branch_outcome ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_prefetch_dynamic_strides" in var_opt_val_set) {
++  print "  ptr->x_param_prefetch_dynamic_strides = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_prefetch_dynamic_strides ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_prefetch_latency" in var_opt_val_set) {
++  print "  ptr->x_param_prefetch_latency = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (200 > (int ) 10)"
++  print "    ptr->x_param_prefetch_latency ^= 200;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_prefetch_min_insn_to_mem_ratio" in var_opt_val_set) {
++  print "  ptr->x_param_prefetch_min_insn_to_mem_ratio = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (3 > (int ) 10)"
++  print "    ptr->x_param_prefetch_min_insn_to_mem_ratio ^= 3;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_prefetch_minimum_stride" in var_opt_val_set) {
++  print "  ptr->x_param_prefetch_minimum_stride = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (-1 > (int ) 10)"
++  print "    ptr->x_param_prefetch_minimum_stride ^= -1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ranger_debug" in var_opt_val_set) {
++  print "  ptr->x_param_ranger_debug = (enum ranger_debug ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ranger_logical_depth" in var_opt_val_set) {
++  print "  ptr->x_param_ranger_logical_depth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (6 > (int ) 10)"
++  print "    ptr->x_param_ranger_logical_depth ^= 6;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_relation_block_limit" in var_opt_val_set) {
++  print "  ptr->x_param_relation_block_limit = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (200 > (int ) 10)"
++  print "    ptr->x_param_relation_block_limit ^= 200;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_rpo_vn_max_loop_depth" in var_opt_val_set) {
++  print "  ptr->x_param_rpo_vn_max_loop_depth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (7 > (int ) 10)"
++  print "    ptr->x_param_rpo_vn_max_loop_depth ^= 7;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sccvn_max_alias_queries_per_access" in var_opt_val_set) {
++  print "  ptr->x_param_sccvn_max_alias_queries_per_access = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1000 > (int ) 10)"
++  print "    ptr->x_param_sccvn_max_alias_queries_per_access ^= 1000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_scev_max_expr_complexity" in var_opt_val_set) {
++  print "  ptr->x_param_scev_max_expr_complexity = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_scev_max_expr_complexity ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_scev_max_expr_size" in var_opt_val_set) {
++  print "  ptr->x_param_scev_max_expr_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_scev_max_expr_size ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sched_autopref_queue_depth" in var_opt_val_set) {
++  print "  ptr->x_param_sched_autopref_queue_depth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (-1 > (int ) 10)"
++  print "    ptr->x_param_sched_autopref_queue_depth ^= -1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sched_mem_true_dep_cost" in var_opt_val_set) {
++  print "  ptr->x_param_sched_mem_true_dep_cost = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_sched_mem_true_dep_cost ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sched_pressure_algorithm" in var_opt_val_set) {
++  print "  ptr->x_param_sched_pressure_algorithm = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_sched_pressure_algorithm ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sched_spec_prob_cutoff" in var_opt_val_set) {
++  print "  ptr->x_param_sched_spec_prob_cutoff = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (40 > (int ) 10)"
++  print "    ptr->x_param_sched_spec_prob_cutoff ^= 40;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sched_state_edge_prob_cutoff" in var_opt_val_set) {
++  print "  ptr->x_param_sched_state_edge_prob_cutoff = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_sched_state_edge_prob_cutoff ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_selsched_insns_to_rename" in var_opt_val_set) {
++  print "  ptr->x_param_selsched_insns_to_rename = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_selsched_insns_to_rename ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_selsched_max_lookahead" in var_opt_val_set) {
++  print "  ptr->x_param_selsched_max_lookahead = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (50 > (int ) 10)"
++  print "    ptr->x_param_selsched_max_lookahead ^= 50;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_selsched_max_sched_times" in var_opt_val_set) {
++  print "  ptr->x_param_selsched_max_sched_times = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_selsched_max_sched_times ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_semi_relayout_level" in var_opt_val_set) {
++  print "  ptr->x_semi_relayout_level = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_simultaneous_prefetches" in var_opt_val_set) {
++  print "  ptr->x_param_simultaneous_prefetches = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (3 > (int ) 10)"
++  print "    ptr->x_param_simultaneous_prefetches ^= 3;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sink_frequency_threshold" in var_opt_val_set) {
++  print "  ptr->x_param_sink_frequency_threshold = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (75 > (int ) 10)"
++  print "    ptr->x_param_sink_frequency_threshold ^= 75;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sms_dfa_history" in var_opt_val_set) {
++  print "  ptr->x_param_sms_dfa_history = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sms_loop_average_count_threshold" in var_opt_val_set) {
++  print "  ptr->x_param_sms_loop_average_count_threshold = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sms_max_ii_factor" in var_opt_val_set) {
++  print "  ptr->x_param_sms_max_ii_factor = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_sms_max_ii_factor ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sms_min_sc" in var_opt_val_set) {
++  print "  ptr->x_param_sms_min_sc = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_sms_min_sc ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sra_max_propagations" in var_opt_val_set) {
++  print "  ptr->x_param_sra_max_propagations = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (32 > (int ) 10)"
++  print "    ptr->x_param_sra_max_propagations ^= 32;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sra_max_scalarization_size_size" in var_opt_val_set) {
++  print "  ptr->x_param_sra_max_scalarization_size_size = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_sra_max_scalarization_size_speed" in var_opt_val_set) {
++  print "  ptr->x_param_sra_max_scalarization_size_speed = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ssa_name_def_chain_limit" in var_opt_val_set) {
++  print "  ptr->x_param_ssa_name_def_chain_limit = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (512 > (int ) 10)"
++  print "    ptr->x_param_ssa_name_def_chain_limit ^= 512;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_ssp_buffer_size" in var_opt_val_set) {
++  print "  ptr->x_param_ssp_buffer_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_ssp_buffer_size ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_stack_clash_protection_guard_size" in var_opt_val_set) {
++  print "  ptr->x_param_stack_clash_protection_guard_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (12 > (int ) 10)"
++  print "    ptr->x_param_stack_clash_protection_guard_size ^= 12;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_stack_clash_protection_probe_interval" in var_opt_val_set) {
++  print "  ptr->x_param_stack_clash_protection_probe_interval = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (12 > (int ) 10)"
++  print "    ptr->x_param_stack_clash_protection_probe_interval ^= 12;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_store_merging_allow_unaligned" in var_opt_val_set) {
++  print "  ptr->x_param_store_merging_allow_unaligned = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_store_merging_allow_unaligned ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_store_merging_max_size" in var_opt_val_set) {
++  print "  ptr->x_param_store_merging_max_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (65536 > (int ) 10)"
++  print "    ptr->x_param_store_merging_max_size ^= 65536;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_struct_reorg_cold_struct_ratio" in var_opt_val_set) {
++  print "  ptr->x_param_struct_reorg_cold_struct_ratio = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_struct_reorg_cold_struct_ratio ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_switch_conversion_branch_ratio" in var_opt_val_set) {
++  print "  ptr->x_param_switch_conversion_branch_ratio = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (8 > (int ) 10)"
++  print "    ptr->x_param_switch_conversion_branch_ratio ^= 8;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_threader_debug" in var_opt_val_set) {
++  print "  ptr->x_param_threader_debug = (enum threader_debug ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_tm_max_aggregate_size" in var_opt_val_set) {
++  print "  ptr->x_param_tm_max_aggregate_size = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (9 > (int ) 10)"
++  print "    ptr->x_param_tm_max_aggregate_size ^= 9;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_tracer_dynamic_coverage_feedback" in var_opt_val_set) {
++  print "  ptr->x_param_tracer_dynamic_coverage_feedback = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (95 > (int ) 10)"
++  print "    ptr->x_param_tracer_dynamic_coverage_feedback ^= 95;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_tracer_dynamic_coverage" in var_opt_val_set) {
++  print "  ptr->x_param_tracer_dynamic_coverage = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (75 > (int ) 10)"
++  print "    ptr->x_param_tracer_dynamic_coverage ^= 75;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_tracer_max_code_growth" in var_opt_val_set) {
++  print "  ptr->x_param_tracer_max_code_growth = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (100 > (int ) 10)"
++  print "    ptr->x_param_tracer_max_code_growth ^= 100;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_tracer_min_branch_probability_feedback" in var_opt_val_set) {
++  print "  ptr->x_param_tracer_min_branch_probability_feedback = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (80 > (int ) 10)"
++  print "    ptr->x_param_tracer_min_branch_probability_feedback ^= 80;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_tracer_min_branch_probability" in var_opt_val_set) {
++  print "  ptr->x_param_tracer_min_branch_probability = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (50 > (int ) 10)"
++  print "    ptr->x_param_tracer_min_branch_probability ^= 50;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_tracer_min_branch_ratio" in var_opt_val_set) {
++  print "  ptr->x_param_tracer_min_branch_ratio = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_tracer_min_branch_ratio ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_tree_reassoc_width" in var_opt_val_set) {
++  print "  ptr->x_param_tree_reassoc_width = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_uninit_control_dep_attempts" in var_opt_val_set) {
++  print "  ptr->x_param_uninit_control_dep_attempts = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1000 > (int ) 10)"
++  print "    ptr->x_param_uninit_control_dep_attempts ^= 1000;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_uninlined_function_insns" in var_opt_val_set) {
++  print "  ptr->x_param_uninlined_function_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_uninlined_function_insns ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_uninlined_function_time" in var_opt_val_set) {
++  print "  ptr->x_param_uninlined_function_time = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_uninlined_function_thunk_insns" in var_opt_val_set) {
++  print "  ptr->x_param_uninlined_function_thunk_insns = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_uninlined_function_thunk_insns ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_uninlined_function_thunk_time" in var_opt_val_set) {
++  print "  ptr->x_param_uninlined_function_thunk_time = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_uninlined_function_thunk_time ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_unlikely_bb_count_fraction" in var_opt_val_set) {
++  print "  ptr->x_param_unlikely_bb_count_fraction = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (20 > (int ) 10)"
++  print "    ptr->x_param_unlikely_bb_count_fraction ^= 20;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_unroll_jam_max_unroll" in var_opt_val_set) {
++  print "  ptr->x_param_unroll_jam_max_unroll = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (4 > (int ) 10)"
++  print "    ptr->x_param_unroll_jam_max_unroll ^= 4;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_unroll_jam_min_percent" in var_opt_val_set) {
++  print "  ptr->x_param_unroll_jam_min_percent = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_unroll_jam_min_percent ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_use_after_scope_direct_emission_threshold" in var_opt_val_set) {
++  print "  ptr->x_param_use_after_scope_direct_emission_threshold = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (256 > (int ) 10)"
++  print "    ptr->x_param_use_after_scope_direct_emission_threshold ^= 256;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_flexible_seg_len" in var_opt_val_set) {
++  print "  ptr->x_param_flexible_seg_len = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (0 > (int ) 10)"
++  print "    ptr->x_param_flexible_seg_len ^= 0;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_vect_epilogues_nomask" in var_opt_val_set) {
++  print "  ptr->x_param_vect_epilogues_nomask = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_vect_epilogues_nomask ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_vect_induction_float" in var_opt_val_set) {
++  print "  ptr->x_param_vect_induction_float = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (1 > (int ) 10)"
++  print "    ptr->x_param_vect_induction_float ^= 1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_vect_inner_loop_cost_factor" in var_opt_val_set) {
++  print "  ptr->x_param_vect_inner_loop_cost_factor = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (50 > (int ) 10)"
++  print "    ptr->x_param_vect_inner_loop_cost_factor ^= 50;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_vect_max_peeling_for_alignment" in var_opt_val_set) {
++  print "  ptr->x_param_vect_max_peeling_for_alignment = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (-1 > (int ) 10)"
++  print "    ptr->x_param_vect_max_peeling_for_alignment ^= -1;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_vect_max_version_for_alias_checks" in var_opt_val_set) {
++  print "  ptr->x_param_vect_max_version_for_alias_checks = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (10 > (int ) 10)"
++  print "    ptr->x_param_vect_max_version_for_alias_checks ^= 10;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_vect_max_version_for_alignment_checks" in var_opt_val_set) {
++  print "  ptr->x_param_vect_max_version_for_alignment_checks = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (6 > (int ) 10)"
++  print "    ptr->x_param_vect_max_version_for_alignment_checks ^= 6;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_vect_partial_vector_usage" in var_opt_val_set) {
++  print "  ptr->x_param_vect_partial_vector_usage = (int ) bp_unpack_var_len_int (bp);"
++  print "  if (2 > (int ) 10)"
++  print "    ptr->x_param_vect_partial_vector_usage ^= 2;"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_vrp1_mode" in var_opt_val_set) {
++  print "  ptr->x_param_vrp1_mode = (enum vrp_mode ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_param_vrp2_mode" in var_opt_val_set) {
++  print "  ptr->x_param_vrp2_mode = (enum vrp_mode ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_warn_inline" in var_opt_val_set) {
++  print "  ptr->x_warn_inline = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_aggressive_loop_optimizations" in var_opt_val_set) {
++  print "  ptr->x_flag_aggressive_loop_optimizations = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_align_functions" in var_opt_val_set) {
++  print "  ptr->x_flag_align_functions = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_str_align_functions" in var_opt_val_set) {
++  print "  ptr->x_str_align_functions = bp_unpack_string (data_in, bp);"
++  print "  if (ptr->x_str_align_functions)"
++  print "    ptr->x_str_align_functions = xstrdup (ptr->x_str_align_functions);"
++}
++else
++  print "  bp_unpack_string (data_in, bp);"
++if ("x_flag_align_jumps" in var_opt_val_set) {
++  print "  ptr->x_flag_align_jumps = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_str_align_jumps" in var_opt_val_set) {
++  print "  ptr->x_str_align_jumps = bp_unpack_string (data_in, bp);"
++  print "  if (ptr->x_str_align_jumps)"
++  print "    ptr->x_str_align_jumps = xstrdup (ptr->x_str_align_jumps);"
++}
++else
++  print "  bp_unpack_string (data_in, bp);"
++if ("x_flag_align_labels" in var_opt_val_set) {
++  print "  ptr->x_flag_align_labels = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_str_align_labels" in var_opt_val_set) {
++  print "  ptr->x_str_align_labels = bp_unpack_string (data_in, bp);"
++  print "  if (ptr->x_str_align_labels)"
++  print "    ptr->x_str_align_labels = xstrdup (ptr->x_str_align_labels);"
++}
++else
++  print "  bp_unpack_string (data_in, bp);"
++if ("x_flag_align_loops" in var_opt_val_set) {
++  print "  ptr->x_flag_align_loops = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_str_align_loops" in var_opt_val_set) {
++  print "  ptr->x_str_align_loops = bp_unpack_string (data_in, bp);"
++  print "  if (ptr->x_str_align_loops)"
++  print "    ptr->x_str_align_loops = xstrdup (ptr->x_str_align_loops);"
++}
++else
++  print "  bp_unpack_string (data_in, bp);"
++if ("x_flag_allocation_dce" in var_opt_val_set) {
++  print "  ptr->x_flag_allocation_dce = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_store_data_races" in var_opt_val_set) {
++  print "  ptr->x_flag_store_data_races = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_array_widen_compare" in var_opt_val_set) {
++  print "  ptr->x_flag_array_widen_compare = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_associative_math" in var_opt_val_set) {
++  print "  ptr->x_flag_associative_math = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_asynchronous_unwind_tables" in var_opt_val_set) {
++  print "  ptr->x_flag_asynchronous_unwind_tables = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_auto_inc_dec" in var_opt_val_set) {
++  print "  ptr->x_flag_auto_inc_dec = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_bit_tests" in var_opt_val_set) {
++  print "  ptr->x_flag_bit_tests = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_branch_on_count_reg" in var_opt_val_set) {
++  print "  ptr->x_flag_branch_on_count_reg = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_branch_probabilities" in var_opt_val_set) {
++  print "  ptr->x_flag_branch_probabilities = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_caller_saves" in var_opt_val_set) {
++  print "  ptr->x_flag_caller_saves = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ccmp2" in var_opt_val_set) {
++  print "  ptr->x_flag_ccmp2 = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_code_hoisting" in var_opt_val_set) {
++  print "  ptr->x_flag_code_hoisting = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_combine_stack_adjustments" in var_opt_val_set) {
++  print "  ptr->x_flag_combine_stack_adjustments = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_compare_elim_after_reload" in var_opt_val_set) {
++  print "  ptr->x_flag_compare_elim_after_reload = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_conserve_stack" in var_opt_val_set) {
++  print "  ptr->x_flag_conserve_stack = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_convert_minmax" in var_opt_val_set) {
++  print "  ptr->x_flag_convert_minmax = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_cprop_registers" in var_opt_val_set) {
++  print "  ptr->x_flag_cprop_registers = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_crossjumping" in var_opt_val_set) {
++  print "  ptr->x_flag_crossjumping = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_crypto_accel_aes" in var_opt_val_set) {
++  print "  ptr->x_flag_crypto_accel_aes = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_cse_follow_jumps" in var_opt_val_set) {
++  print "  ptr->x_flag_cse_follow_jumps = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_cx_fortran_rules" in var_opt_val_set) {
++  print "  ptr->x_flag_cx_fortran_rules = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_cx_limited_range" in var_opt_val_set) {
++  print "  ptr->x_flag_cx_limited_range = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_dce" in var_opt_val_set) {
++  print "  ptr->x_flag_dce = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_defer_pop" in var_opt_val_set) {
++  print "  ptr->x_flag_defer_pop = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_delayed_branch" in var_opt_val_set) {
++  print "  ptr->x_flag_delayed_branch = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_delete_dead_exceptions" in var_opt_val_set) {
++  print "  ptr->x_flag_delete_dead_exceptions = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_delete_null_pointer_checks" in var_opt_val_set) {
++  print "  ptr->x_flag_delete_null_pointer_checks = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_devirtualize" in var_opt_val_set) {
++  print "  ptr->x_flag_devirtualize = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_devirtualize_speculatively" in var_opt_val_set) {
++  print "  ptr->x_flag_devirtualize_speculatively = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_dse" in var_opt_val_set) {
++  print "  ptr->x_flag_dse = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_early_inlining" in var_opt_val_set) {
++  print "  ptr->x_flag_early_inlining = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_exceptions" in var_opt_val_set) {
++  print "  ptr->x_flag_exceptions = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_excess_precision" in var_opt_val_set) {
++  print "  ptr->x_flag_excess_precision = (enum excess_precision ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_expensive_optimizations" in var_opt_val_set) {
++  print "  ptr->x_flag_expensive_optimizations = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_finite_loops" in var_opt_val_set) {
++  print "  ptr->x_flag_finite_loops = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_finite_math_only" in var_opt_val_set) {
++  print "  ptr->x_flag_finite_math_only = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_float_store" in var_opt_val_set) {
++  print "  ptr->x_flag_float_store = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_fold_simple_inlines" in var_opt_val_set) {
++  print "  ptr->x_flag_fold_simple_inlines = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_forward_propagate" in var_opt_val_set) {
++  print "  ptr->x_flag_forward_propagate = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_fp_contract_mode" in var_opt_val_set) {
++  print "  ptr->x_flag_fp_contract_mode = (enum fp_contract_mode ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_fp_int_builtin_inexact" in var_opt_val_set) {
++  print "  ptr->x_flag_fp_int_builtin_inexact = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ftz" in var_opt_val_set) {
++  print "  ptr->x_flag_ftz = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_no_function_cse" in var_opt_val_set) {
++  print "  ptr->x_flag_no_function_cse = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_gcse" in var_opt_val_set) {
++  print "  ptr->x_flag_gcse = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_gcse_after_reload" in var_opt_val_set) {
++  print "  ptr->x_flag_gcse_after_reload = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_gcse_las" in var_opt_val_set) {
++  print "  ptr->x_flag_gcse_las = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_gcse_lm" in var_opt_val_set) {
++  print "  ptr->x_flag_gcse_lm = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_gcse_sm" in var_opt_val_set) {
++  print "  ptr->x_flag_gcse_sm = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_graphite" in var_opt_val_set) {
++  print "  ptr->x_flag_graphite = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_graphite_identity" in var_opt_val_set) {
++  print "  ptr->x_flag_graphite_identity = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_guess_branch_prob" in var_opt_val_set) {
++  print "  ptr->x_flag_guess_branch_prob = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_harden_compares" in var_opt_val_set) {
++  print "  ptr->x_flag_harden_compares = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_harden_conditional_branches" in var_opt_val_set) {
++  print "  ptr->x_flag_harden_conditional_branches = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_hoist_adjacent_loads" in var_opt_val_set) {
++  print "  ptr->x_flag_hoist_adjacent_loads = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_icp" in var_opt_val_set) {
++  print "  ptr->x_flag_icp = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_icp_speculatively" in var_opt_val_set) {
++  print "  ptr->x_flag_icp_speculatively = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_if_conversion" in var_opt_val_set) {
++  print "  ptr->x_flag_if_conversion = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_if_conversion_gimple" in var_opt_val_set) {
++  print "  ptr->x_flag_if_conversion_gimple = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_if_conversion2" in var_opt_val_set) {
++  print "  ptr->x_flag_if_conversion2 = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ifcvt_allow_complicated_cmps" in var_opt_val_set) {
++  print "  ptr->x_flag_ifcvt_allow_complicated_cmps = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_indirect_inlining" in var_opt_val_set) {
++  print "  ptr->x_flag_indirect_inlining = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_no_inline" in var_opt_val_set) {
++  print "  ptr->x_flag_no_inline = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_inline_atomics" in var_opt_val_set) {
++  print "  ptr->x_flag_inline_atomics = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_inline_functions" in var_opt_val_set) {
++  print "  ptr->x_flag_inline_functions = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_inline_functions_called_once" in var_opt_val_set) {
++  print "  ptr->x_flag_inline_functions_called_once = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_inline_small_functions" in var_opt_val_set) {
++  print "  ptr->x_flag_inline_small_functions = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_bit_cp" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_bit_cp = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_cp" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_cp = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_cp_clone" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_cp_clone = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_ic" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_ic = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_icf" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_icf = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_icf_functions" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_icf_functions = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_icf_variables" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_icf_variables = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_modref" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_modref = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_prefetch" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_prefetch = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_profile" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_profile = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_pta" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_pta = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_pure_const" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_pure_const = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_ra" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_ra = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_reference" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_reference = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_reference_addressable" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_reference_addressable = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_reorder_fields" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_reorder_fields = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_sra" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_sra = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_stack_alignment" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_stack_alignment = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_strict_aliasing" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_strict_aliasing = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_struct_reorg" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_struct_reorg = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ipa_vrp" in var_opt_val_set) {
++  print "  ptr->x_flag_ipa_vrp = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ira_algorithm" in var_opt_val_set) {
++  print "  ptr->x_flag_ira_algorithm = (enum ira_algorithm ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ira_hoist_pressure" in var_opt_val_set) {
++  print "  ptr->x_flag_ira_hoist_pressure = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ira_loop_pressure" in var_opt_val_set) {
++  print "  ptr->x_flag_ira_loop_pressure = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ira_region" in var_opt_val_set) {
++  print "  ptr->x_flag_ira_region = (enum ira_region ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ira_share_save_slots" in var_opt_val_set) {
++  print "  ptr->x_flag_ira_share_save_slots = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ira_share_spill_slots" in var_opt_val_set) {
++  print "  ptr->x_flag_ira_share_spill_slots = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_isolate_erroneous_paths_attribute" in var_opt_val_set) {
++  print "  ptr->x_flag_isolate_erroneous_paths_attribute = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_isolate_erroneous_paths_dereference" in var_opt_val_set) {
++  print "  ptr->x_flag_isolate_erroneous_paths_dereference = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ivopts" in var_opt_val_set) {
++  print "  ptr->x_flag_ivopts = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_jump_tables" in var_opt_val_set) {
++  print "  ptr->x_flag_jump_tables = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_keep_gc_roots_live" in var_opt_val_set) {
++  print "  ptr->x_flag_keep_gc_roots_live = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_kernel_pgo" in var_opt_val_set) {
++  print "  ptr->x_flag_kernel_pgo = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_lifetime_dse" in var_opt_val_set) {
++  print "  ptr->x_flag_lifetime_dse = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_limit_function_alignment" in var_opt_val_set) {
++  print "  ptr->x_flag_limit_function_alignment = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_live_patching" in var_opt_val_set) {
++  print "  ptr->x_flag_live_patching = (enum live_patching_level ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_live_range_shrinkage" in var_opt_val_set) {
++  print "  ptr->x_flag_live_range_shrinkage = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_loop_crc" in var_opt_val_set) {
++  print "  ptr->x_flag_loop_crc = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_loop_elim" in var_opt_val_set) {
++  print "  ptr->x_flag_loop_elim = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_loop_interchange" in var_opt_val_set) {
++  print "  ptr->x_flag_loop_interchange = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_loop_nest_optimize" in var_opt_val_set) {
++  print "  ptr->x_flag_loop_nest_optimize = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_loop_parallelize_all" in var_opt_val_set) {
++  print "  ptr->x_flag_loop_parallelize_all = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_unroll_jam" in var_opt_val_set) {
++  print "  ptr->x_flag_unroll_jam = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_lra_remat" in var_opt_val_set) {
++  print "  ptr->x_flag_lra_remat = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_errno_math" in var_opt_val_set) {
++  print "  ptr->x_flag_errno_math = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_merge_mull" in var_opt_val_set) {
++  print "  ptr->x_flag_merge_mull = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_modulo_sched" in var_opt_val_set) {
++  print "  ptr->x_flag_modulo_sched = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_modulo_sched_allow_regmoves" in var_opt_val_set) {
++  print "  ptr->x_flag_modulo_sched_allow_regmoves = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_move_loop_invariants" in var_opt_val_set) {
++  print "  ptr->x_flag_move_loop_invariants = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_move_loop_stores" in var_opt_val_set) {
++  print "  ptr->x_flag_move_loop_stores = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_non_call_exceptions" in var_opt_val_set) {
++  print "  ptr->x_flag_non_call_exceptions = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_nothrow_opt" in var_opt_val_set) {
++  print "  ptr->x_flag_nothrow_opt = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_omit_frame_pointer" in var_opt_val_set) {
++  print "  ptr->x_flag_omit_frame_pointer = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_opt_info" in var_opt_val_set) {
++  print "  ptr->x_flag_opt_info = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_optimize_sibling_calls" in var_opt_val_set) {
++  print "  ptr->x_flag_optimize_sibling_calls = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_optimize_strlen" in var_opt_val_set) {
++  print "  ptr->x_flag_optimize_strlen = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_fp_model" in var_opt_val_set) {
++  print "  ptr->x_flag_fp_model = (enum fp_model ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_pack_struct" in var_opt_val_set) {
++  print "  ptr->x_flag_pack_struct = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_partial_inlining" in var_opt_val_set) {
++  print "  ptr->x_flag_partial_inlining = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_patchable_function_entry" in var_opt_val_set) {
++  print "  ptr->x_flag_patchable_function_entry = bp_unpack_string (data_in, bp);"
++  print "  if (ptr->x_flag_patchable_function_entry)"
++  print "    ptr->x_flag_patchable_function_entry = xstrdup (ptr->x_flag_patchable_function_entry);"
++}
++else
++  print "  bp_unpack_string (data_in, bp);"
++if ("x_flag_peel_loops" in var_opt_val_set) {
++  print "  ptr->x_flag_peel_loops = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_no_peephole" in var_opt_val_set) {
++  print "  ptr->x_flag_no_peephole = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_peephole2" in var_opt_val_set) {
++  print "  ptr->x_flag_peephole2 = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_plt" in var_opt_val_set) {
++  print "  ptr->x_flag_plt = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_predictive_commoning" in var_opt_val_set) {
++  print "  ptr->x_flag_predictive_commoning = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_prefetch_loop_arrays" in var_opt_val_set) {
++  print "  ptr->x_flag_prefetch_loop_arrays = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_printf_return_value" in var_opt_val_set) {
++  print "  ptr->x_flag_printf_return_value = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_profile_partial_training" in var_opt_val_set) {
++  print "  ptr->x_flag_profile_partial_training = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_profile_reorder_functions" in var_opt_val_set) {
++  print "  ptr->x_flag_profile_reorder_functions = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_reciprocal_math" in var_opt_val_set) {
++  print "  ptr->x_flag_reciprocal_math = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ree" in var_opt_val_set) {
++  print "  ptr->x_flag_ree = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_pcc_struct_return" in var_opt_val_set) {
++  print "  ptr->x_flag_pcc_struct_return = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_rename_registers" in var_opt_val_set) {
++  print "  ptr->x_flag_rename_registers = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_reorder_blocks" in var_opt_val_set) {
++  print "  ptr->x_flag_reorder_blocks = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_reorder_blocks_algorithm" in var_opt_val_set) {
++  print "  ptr->x_flag_reorder_blocks_algorithm = (enum reorder_blocks_algorithm ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_reorder_blocks_and_partition" in var_opt_val_set) {
++  print "  ptr->x_flag_reorder_blocks_and_partition = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_reorder_functions" in var_opt_val_set) {
++  print "  ptr->x_flag_reorder_functions = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_rerun_cse_after_loop" in var_opt_val_set) {
++  print "  ptr->x_flag_rerun_cse_after_loop = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_resched_modulo_sched" in var_opt_val_set) {
++  print "  ptr->x_flag_resched_modulo_sched = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_rounding_math" in var_opt_val_set) {
++  print "  ptr->x_flag_rounding_math = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_rtti" in var_opt_val_set) {
++  print "  ptr->x_flag_rtti = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_save_optimization_record" in var_opt_val_set) {
++  print "  ptr->x_flag_save_optimization_record = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sched_critical_path_heuristic" in var_opt_val_set) {
++  print "  ptr->x_flag_sched_critical_path_heuristic = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sched_dep_count_heuristic" in var_opt_val_set) {
++  print "  ptr->x_flag_sched_dep_count_heuristic = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sched_group_heuristic" in var_opt_val_set) {
++  print "  ptr->x_flag_sched_group_heuristic = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_schedule_interblock" in var_opt_val_set) {
++  print "  ptr->x_flag_schedule_interblock = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sched_last_insn_heuristic" in var_opt_val_set) {
++  print "  ptr->x_flag_sched_last_insn_heuristic = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sched_pressure" in var_opt_val_set) {
++  print "  ptr->x_flag_sched_pressure = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sched_rank_heuristic" in var_opt_val_set) {
++  print "  ptr->x_flag_sched_rank_heuristic = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_schedule_speculative" in var_opt_val_set) {
++  print "  ptr->x_flag_schedule_speculative = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sched_spec_insn_heuristic" in var_opt_val_set) {
++  print "  ptr->x_flag_sched_spec_insn_heuristic = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_schedule_speculative_load" in var_opt_val_set) {
++  print "  ptr->x_flag_schedule_speculative_load = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_schedule_speculative_load_dangerous" in var_opt_val_set) {
++  print "  ptr->x_flag_schedule_speculative_load_dangerous = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sched_stalled_insns" in var_opt_val_set) {
++  print "  ptr->x_flag_sched_stalled_insns = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sched_stalled_insns_dep" in var_opt_val_set) {
++  print "  ptr->x_flag_sched_stalled_insns_dep = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sched2_use_superblocks" in var_opt_val_set) {
++  print "  ptr->x_flag_sched2_use_superblocks = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_schedule_fusion" in var_opt_val_set) {
++  print "  ptr->x_flag_schedule_fusion = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_schedule_insns" in var_opt_val_set) {
++  print "  ptr->x_flag_schedule_insns = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_schedule_insns_after_reload" in var_opt_val_set) {
++  print "  ptr->x_flag_schedule_insns_after_reload = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_section_anchors" in var_opt_val_set) {
++  print "  ptr->x_flag_section_anchors = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sel_sched_pipelining" in var_opt_val_set) {
++  print "  ptr->x_flag_sel_sched_pipelining = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sel_sched_pipelining_outer_loops" in var_opt_val_set) {
++  print "  ptr->x_flag_sel_sched_pipelining_outer_loops = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_sel_sched_reschedule_pipelined" in var_opt_val_set) {
++  print "  ptr->x_flag_sel_sched_reschedule_pipelined = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_selective_scheduling" in var_opt_val_set) {
++  print "  ptr->x_flag_selective_scheduling = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_selective_scheduling2" in var_opt_val_set) {
++  print "  ptr->x_flag_selective_scheduling2 = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_semantic_interposition" in var_opt_val_set) {
++  print "  ptr->x_flag_semantic_interposition = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_short_enums" in var_opt_val_set) {
++  print "  ptr->x_flag_short_enums = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_short_wchar" in var_opt_val_set) {
++  print "  ptr->x_flag_short_wchar = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_shrink_wrap" in var_opt_val_set) {
++  print "  ptr->x_flag_shrink_wrap = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_shrink_wrap_separate" in var_opt_val_set) {
++  print "  ptr->x_flag_shrink_wrap_separate = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_signaling_nans" in var_opt_val_set) {
++  print "  ptr->x_flag_signaling_nans = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_signed_zeros" in var_opt_val_set) {
++  print "  ptr->x_flag_signed_zeros = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_simd_cost_model" in var_opt_val_set) {
++  print "  ptr->x_flag_simd_cost_model = (enum vect_cost_model ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_simdmath" in var_opt_val_set) {
++  print "  ptr->x_flag_simdmath = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_single_precision_constant" in var_opt_val_set) {
++  print "  ptr->x_flag_single_precision_constant = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_split_ivs_in_unroller" in var_opt_val_set) {
++  print "  ptr->x_flag_split_ivs_in_unroller = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_split_ldp_stp" in var_opt_val_set) {
++  print "  ptr->x_flag_split_ldp_stp = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_split_loops" in var_opt_val_set) {
++  print "  ptr->x_flag_split_loops = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_split_paths" in var_opt_val_set) {
++  print "  ptr->x_flag_split_paths = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_split_wide_types" in var_opt_val_set) {
++  print "  ptr->x_flag_split_wide_types = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_split_wide_types_early" in var_opt_val_set) {
++  print "  ptr->x_flag_split_wide_types_early = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ssa_backprop" in var_opt_val_set) {
++  print "  ptr->x_flag_ssa_backprop = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_ssa_phiopt" in var_opt_val_set) {
++  print "  ptr->x_flag_ssa_phiopt = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_stack_clash_protection" in var_opt_val_set) {
++  print "  ptr->x_flag_stack_clash_protection = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_stack_protect" in var_opt_val_set) {
++  print "  ptr->x_flag_stack_protect = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_stack_reuse" in var_opt_val_set) {
++  print "  ptr->x_flag_stack_reuse = (enum stack_reuse_level ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_stdarg_opt" in var_opt_val_set) {
++  print "  ptr->x_flag_stdarg_opt = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_store_merging" in var_opt_val_set) {
++  print "  ptr->x_flag_store_merging = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_strict_aliasing" in var_opt_val_set) {
++  print "  ptr->x_flag_strict_aliasing = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_strict_enums" in var_opt_val_set) {
++  print "  ptr->x_flag_strict_enums = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_strict_volatile_bitfields" in var_opt_val_set) {
++  print "  ptr->x_flag_strict_volatile_bitfields = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_thread_jumps" in var_opt_val_set) {
++  print "  ptr->x_flag_thread_jumps = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_threadsafe_statics" in var_opt_val_set) {
++  print "  ptr->x_flag_threadsafe_statics = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_toplevel_reorder" in var_opt_val_set) {
++  print "  ptr->x_flag_toplevel_reorder = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tracer" in var_opt_val_set) {
++  print "  ptr->x_flag_tracer = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_trapping_math" in var_opt_val_set) {
++  print "  ptr->x_flag_trapping_math = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_trapv" in var_opt_val_set) {
++  print "  ptr->x_flag_trapv = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_bit_ccp" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_bit_ccp = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_builtin_call_dce" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_builtin_call_dce = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_ccp" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_ccp = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_ch" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_ch = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_coalesce_vars" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_coalesce_vars = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_copy_prop" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_copy_prop = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_cselim" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_cselim = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_dce" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_dce = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_dom" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_dom = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_dse" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_dse = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_forwprop" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_forwprop = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_fre" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_fre = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_loop_distribute_patterns" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_loop_distribute_patterns = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_loop_distribution" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_loop_distribution = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_loop_if_convert" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_loop_if_convert = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_loop_im" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_loop_im = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_loop_ivcanon" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_loop_ivcanon = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_loop_optimize" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_loop_optimize = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_loop_vectorize" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_loop_vectorize = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_live_range_split" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_live_range_split = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_parallelize_loops" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_parallelize_loops = (int ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_partial_pre" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_partial_pre = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_phiprop" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_phiprop = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_pre" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_pre = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_pta" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_pta = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_reassoc" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_reassoc = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_scev_cprop" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_scev_cprop = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_sink" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_sink = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_slp_transpose_vectorize" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_slp_transpose_vectorize = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_slp_vectorize" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_slp_vectorize = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_slsr" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_slsr = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_sra" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_sra = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_switch_conversion" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_switch_conversion = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_tail_merge" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_tail_merge = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_ter" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_ter = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_vectorize" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_vectorize = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_tree_vrp" in var_opt_val_set) {
++  print "  ptr->x_flag_tree_vrp = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_auto_var_init" in var_opt_val_set) {
++  print "  ptr->x_flag_auto_var_init = (enum auto_init_type ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_unconstrained_commons" in var_opt_val_set) {
++  print "  ptr->x_flag_unconstrained_commons = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_unroll_all_loops" in var_opt_val_set) {
++  print "  ptr->x_flag_unroll_all_loops = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_cunroll_grow_size" in var_opt_val_set) {
++  print "  ptr->x_flag_cunroll_grow_size = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_unroll_loops" in var_opt_val_set) {
++  print "  ptr->x_flag_unroll_loops = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_unsafe_math_optimizations" in var_opt_val_set) {
++  print "  ptr->x_flag_unsafe_math_optimizations = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_unswitch_loops" in var_opt_val_set) {
++  print "  ptr->x_flag_unswitch_loops = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_unwind_tables" in var_opt_val_set) {
++  print "  ptr->x_flag_unwind_tables = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_var_tracking" in var_opt_val_set) {
++  print "  ptr->x_flag_var_tracking = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_var_tracking_assignments" in var_opt_val_set) {
++  print "  ptr->x_flag_var_tracking_assignments = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_var_tracking_assignments_toggle" in var_opt_val_set) {
++  print "  ptr->x_flag_var_tracking_assignments_toggle = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_var_tracking_uninit" in var_opt_val_set) {
++  print "  ptr->x_flag_var_tracking_uninit = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_variable_expansion_in_unroller" in var_opt_val_set) {
++  print "  ptr->x_flag_variable_expansion_in_unroller = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_vect_cost_model" in var_opt_val_set) {
++  print "  ptr->x_flag_vect_cost_model = (enum vect_cost_model ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_version_loops_for_strides" in var_opt_val_set) {
++  print "  ptr->x_flag_version_loops_for_strides = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_value_profile_transformations" in var_opt_val_set) {
++  print "  ptr->x_flag_value_profile_transformations = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_web" in var_opt_val_set) {
++  print "  ptr->x_flag_web = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_wrapv" in var_opt_val_set) {
++  print "  ptr->x_flag_wrapv = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_wrapv_pointer" in var_opt_val_set) {
++  print "  ptr->x_flag_wrapv_pointer = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_debug_nonbind_markers_p" in var_opt_val_set) {
++  print "  ptr->x_debug_nonbind_markers_p = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_cmlt_arith" in var_opt_val_set) {
++  print "  ptr->x_flag_cmlt_arith = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_mlow_precision_div" in var_opt_val_set) {
++  print "  ptr->x_flag_mlow_precision_div = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_mrecip_low_precision_sqrt" in var_opt_val_set) {
++  print "  ptr->x_flag_mrecip_low_precision_sqrt = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_mlow_precision_sqrt" in var_opt_val_set) {
++  print "  ptr->x_flag_mlow_precision_sqrt = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++if ("x_flag_simdmath_64" in var_opt_val_set) {
++  print "  ptr->x_flag_simdmath_64 = (signed char ) bp_unpack_var_len_int (bp);"
++}
++else
++  print "  bp_unpack_var_len_int (bp);"
++print "  unsigned HOST_WIDE_INT explicit_mask_prev[9];"
++print "  for (size_t i = 0; i < 9; i++)"
++print "    explicit_mask_prev[i] = bp_unpack_value (bp, 64);"
++print "  for (size_t i = 0; i < sizeof (ptr->explicit_mask) / sizeof (ptr->explicit_mask[0]); i++)"
++print "    ptr->explicit_mask[i] = 0;"
++if ("param_align_loop_iterations" in var_opt_int_k) {
++  k = var_opt_int_k["param_align_loop_iterations"]
++  j = var_opt_int_j["param_align_loop_iterations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 0) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_align_threshold" in var_opt_int_k) {
++  k = var_opt_int_k["param_align_threshold"]
++  j = var_opt_int_j["param_align_threshold"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 1) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_asan_protect_allocas" in var_opt_int_k) {
++  k = var_opt_int_k["param_asan_protect_allocas"]
++  j = var_opt_int_j["param_asan_protect_allocas"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 2) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_asan_instrument_reads" in var_opt_int_k) {
++  k = var_opt_int_k["param_asan_instrument_reads"]
++  j = var_opt_int_j["param_asan_instrument_reads"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 3) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_asan_instrument_writes" in var_opt_int_k) {
++  k = var_opt_int_k["param_asan_instrument_writes"]
++  j = var_opt_int_j["param_asan_instrument_writes"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 4) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_asan_instrumentation_with_call_threshold" in var_opt_int_k) {
++  k = var_opt_int_k["param_asan_instrumentation_with_call_threshold"]
++  j = var_opt_int_j["param_asan_instrumentation_with_call_threshold"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 5) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_asan_memintrin" in var_opt_int_k) {
++  k = var_opt_int_k["param_asan_memintrin"]
++  j = var_opt_int_j["param_asan_memintrin"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 6) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_asan_stack" in var_opt_int_k) {
++  k = var_opt_int_k["param_asan_stack"]
++  j = var_opt_int_j["param_asan_stack"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 7) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_asan_use_after_return" in var_opt_int_k) {
++  k = var_opt_int_k["param_asan_use_after_return"]
++  j = var_opt_int_j["param_asan_use_after_return"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 8) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_avg_loop_niter" in var_opt_int_k) {
++  k = var_opt_int_k["param_avg_loop_niter"]
++  j = var_opt_int_j["param_avg_loop_niter"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 9) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_avoid_fma_max_bits" in var_opt_int_k) {
++  k = var_opt_int_k["param_avoid_fma_max_bits"]
++  j = var_opt_int_j["param_avoid_fma_max_bits"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 10) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_builtin_expect_probability" in var_opt_int_k) {
++  k = var_opt_int_k["param_builtin_expect_probability"]
++  j = var_opt_int_j["param_builtin_expect_probability"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 11) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_builtin_string_cmp_inline_length" in var_opt_int_k) {
++  k = var_opt_int_k["param_builtin_string_cmp_inline_length"]
++  j = var_opt_int_j["param_builtin_string_cmp_inline_length"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 12) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_case_values_threshold" in var_opt_int_k) {
++  k = var_opt_int_k["param_case_values_threshold"]
++  j = var_opt_int_j["param_case_values_threshold"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 13) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_comdat_sharing_probability" in var_opt_int_k) {
++  k = var_opt_int_k["param_comdat_sharing_probability"]
++  j = var_opt_int_j["param_comdat_sharing_probability"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 14) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_pointer_compression_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_pointer_compression_size"]
++  j = var_opt_int_j["param_pointer_compression_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 15) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_construct_interfere_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_construct_interfere_size"]
++  j = var_opt_int_j["param_construct_interfere_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 16) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_destruct_interfere_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_destruct_interfere_size"]
++  j = var_opt_int_j["param_destruct_interfere_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 17) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_dse_max_alias_queries_per_store" in var_opt_int_k) {
++  k = var_opt_int_k["param_dse_max_alias_queries_per_store"]
++  j = var_opt_int_j["param_dse_max_alias_queries_per_store"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 18) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_dse_max_object_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_dse_max_object_size"]
++  j = var_opt_int_j["param_dse_max_object_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 19) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_early_inlining_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_early_inlining_insns"]
++  j = var_opt_int_j["param_early_inlining_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 20) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_evrp_sparse_threshold" in var_opt_int_k) {
++  k = var_opt_int_k["param_evrp_sparse_threshold"]
++  j = var_opt_int_j["param_evrp_sparse_threshold"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 21) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_evrp_switch_limit" in var_opt_int_k) {
++  k = var_opt_int_k["param_evrp_switch_limit"]
++  j = var_opt_int_j["param_evrp_switch_limit"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 22) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_fsm_scale_path_blocks" in var_opt_int_k) {
++  k = var_opt_int_k["param_fsm_scale_path_blocks"]
++  j = var_opt_int_j["param_fsm_scale_path_blocks"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 23) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_fsm_scale_path_stmts" in var_opt_int_k) {
++  k = var_opt_int_k["param_fsm_scale_path_stmts"]
++  j = var_opt_int_j["param_fsm_scale_path_stmts"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 24) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_gcse_after_reload_critical_fraction" in var_opt_int_k) {
++  k = var_opt_int_k["param_gcse_after_reload_critical_fraction"]
++  j = var_opt_int_j["param_gcse_after_reload_critical_fraction"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 25) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_gcse_after_reload_partial_fraction" in var_opt_int_k) {
++  k = var_opt_int_k["param_gcse_after_reload_partial_fraction"]
++  j = var_opt_int_j["param_gcse_after_reload_partial_fraction"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 26) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_gcse_cost_distance_ratio" in var_opt_int_k) {
++  k = var_opt_int_k["param_gcse_cost_distance_ratio"]
++  j = var_opt_int_j["param_gcse_cost_distance_ratio"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 27) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_gcse_unrestricted_cost" in var_opt_int_k) {
++  k = var_opt_int_k["param_gcse_unrestricted_cost"]
++  j = var_opt_int_j["param_gcse_unrestricted_cost"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 28) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_graphite_max_arrays_per_scop" in var_opt_int_k) {
++  k = var_opt_int_k["param_graphite_max_arrays_per_scop"]
++  j = var_opt_int_j["param_graphite_max_arrays_per_scop"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 29) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_graphite_max_nb_scop_params" in var_opt_int_k) {
++  k = var_opt_int_k["param_graphite_max_nb_scop_params"]
++  j = var_opt_int_j["param_graphite_max_nb_scop_params"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 30) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_hwasan_instrument_allocas" in var_opt_int_k) {
++  k = var_opt_int_k["param_hwasan_instrument_allocas"]
++  j = var_opt_int_j["param_hwasan_instrument_allocas"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 31) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_hwasan_instrument_mem_intrinsics" in var_opt_int_k) {
++  k = var_opt_int_k["param_hwasan_instrument_mem_intrinsics"]
++  j = var_opt_int_j["param_hwasan_instrument_mem_intrinsics"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 32) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_hwasan_instrument_reads" in var_opt_int_k) {
++  k = var_opt_int_k["param_hwasan_instrument_reads"]
++  j = var_opt_int_j["param_hwasan_instrument_reads"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 33) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_hwasan_instrument_stack" in var_opt_int_k) {
++  k = var_opt_int_k["param_hwasan_instrument_stack"]
++  j = var_opt_int_j["param_hwasan_instrument_stack"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 34) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_hwasan_instrument_writes" in var_opt_int_k) {
++  k = var_opt_int_k["param_hwasan_instrument_writes"]
++  j = var_opt_int_j["param_hwasan_instrument_writes"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 35) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_hwasan_random_frame_tag" in var_opt_int_k) {
++  k = var_opt_int_k["param_hwasan_random_frame_tag"]
++  j = var_opt_int_j["param_hwasan_random_frame_tag"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 36) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ifcvt_allow_register_renaming" in var_opt_int_k) {
++  k = var_opt_int_k["param_ifcvt_allow_register_renaming"]
++  j = var_opt_int_j["param_ifcvt_allow_register_renaming"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 37) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_inline_heuristics_hint_percent" in var_opt_int_k) {
++  k = var_opt_int_k["param_inline_heuristics_hint_percent"]
++  j = var_opt_int_j["param_inline_heuristics_hint_percent"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 38) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_inline_min_speedup" in var_opt_int_k) {
++  k = var_opt_int_k["param_inline_min_speedup"]
++  j = var_opt_int_j["param_inline_min_speedup"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 39) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_inline_unit_growth" in var_opt_int_k) {
++  k = var_opt_int_k["param_inline_unit_growth"]
++  j = var_opt_int_j["param_inline_unit_growth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 40) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_cp_eval_threshold" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_cp_eval_threshold"]
++  j = var_opt_int_j["param_ipa_cp_eval_threshold"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 41) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_cp_large_unit_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_cp_large_unit_insns"]
++  j = var_opt_int_j["param_ipa_cp_large_unit_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 42) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_cp_loop_hint_bonus" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_cp_loop_hint_bonus"]
++  j = var_opt_int_j["param_ipa_cp_loop_hint_bonus"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 43) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_cp_max_recursive_depth" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_cp_max_recursive_depth"]
++  j = var_opt_int_j["param_ipa_cp_max_recursive_depth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 44) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_cp_min_recursive_probability" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_cp_min_recursive_probability"]
++  j = var_opt_int_j["param_ipa_cp_min_recursive_probability"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 45) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_cp_profile_count_base" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_cp_profile_count_base"]
++  j = var_opt_int_j["param_ipa_cp_profile_count_base"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 46) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_cp_recursion_penalty" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_cp_recursion_penalty"]
++  j = var_opt_int_j["param_ipa_cp_recursion_penalty"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 47) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_cp_recursive_freq_factor" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_cp_recursive_freq_factor"]
++  j = var_opt_int_j["param_ipa_cp_recursive_freq_factor"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 48) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_cp_single_call_penalty" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_cp_single_call_penalty"]
++  j = var_opt_int_j["param_ipa_cp_single_call_penalty"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 49) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_cp_unit_growth" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_cp_unit_growth"]
++  j = var_opt_int_j["param_ipa_cp_unit_growth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 50) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_cp_value_list_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_cp_value_list_size"]
++  j = var_opt_int_j["param_ipa_cp_value_list_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 51) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_jump_function_lookups" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_jump_function_lookups"]
++  j = var_opt_int_j["param_ipa_jump_function_lookups"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 52) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_max_aa_steps" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_max_aa_steps"]
++  j = var_opt_int_j["param_ipa_max_aa_steps"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 53) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_max_agg_items" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_max_agg_items"]
++  j = var_opt_int_j["param_ipa_max_agg_items"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 54) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_max_loop_predicates" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_max_loop_predicates"]
++  j = var_opt_int_j["param_ipa_max_loop_predicates"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 55) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_max_param_expr_ops" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_max_param_expr_ops"]
++  j = var_opt_int_j["param_ipa_max_param_expr_ops"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 56) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_max_switch_predicate_bounds" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_max_switch_predicate_bounds"]
++  j = var_opt_int_j["param_ipa_max_switch_predicate_bounds"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 57) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_prefetch_distance_factor" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_prefetch_distance_factor"]
++  j = var_opt_int_j["param_ipa_prefetch_distance_factor"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 58) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_prefetch_locality" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_prefetch_locality"]
++  j = var_opt_int_j["param_ipa_prefetch_locality"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 59) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_prefetch_pagesize" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_prefetch_pagesize"]
++  j = var_opt_int_j["param_ipa_prefetch_pagesize"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 60) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_sra_max_replacements" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_sra_max_replacements"]
++  j = var_opt_int_j["param_ipa_sra_max_replacements"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 61) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ipa_sra_ptr_growth_factor" in var_opt_int_k) {
++  k = var_opt_int_k["param_ipa_sra_ptr_growth_factor"]
++  j = var_opt_int_j["param_ipa_sra_ptr_growth_factor"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 62) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ira_consider_dup_in_all_alts" in var_opt_int_k) {
++  k = var_opt_int_k["param_ira_consider_dup_in_all_alts"]
++  j = var_opt_int_j["param_ira_consider_dup_in_all_alts"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[0] >> 63) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ira_loop_reserved_regs" in var_opt_int_k) {
++  k = var_opt_int_k["param_ira_loop_reserved_regs"]
++  j = var_opt_int_j["param_ira_loop_reserved_regs"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 0) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ira_max_conflict_table_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_ira_max_conflict_table_size"]
++  j = var_opt_int_j["param_ira_max_conflict_table_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 1) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ira_max_loops_num" in var_opt_int_k) {
++  k = var_opt_int_k["param_ira_max_loops_num"]
++  j = var_opt_int_j["param_ira_max_loops_num"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 2) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_iv_always_prune_cand_set_bound" in var_opt_int_k) {
++  k = var_opt_int_k["param_iv_always_prune_cand_set_bound"]
++  j = var_opt_int_j["param_iv_always_prune_cand_set_bound"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 3) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_iv_consider_all_candidates_bound" in var_opt_int_k) {
++  k = var_opt_int_k["param_iv_consider_all_candidates_bound"]
++  j = var_opt_int_j["param_iv_consider_all_candidates_bound"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 4) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_iv_max_considered_uses" in var_opt_int_k) {
++  k = var_opt_int_k["param_iv_max_considered_uses"]
++  j = var_opt_int_j["param_iv_max_considered_uses"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 5) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_jump_table_max_growth_ratio_for_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_jump_table_max_growth_ratio_for_size"]
++  j = var_opt_int_j["param_jump_table_max_growth_ratio_for_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 6) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_jump_table_max_growth_ratio_for_speed" in var_opt_int_k) {
++  k = var_opt_int_k["param_jump_table_max_growth_ratio_for_speed"]
++  j = var_opt_int_j["param_jump_table_max_growth_ratio_for_speed"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 7) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_l1_cache_line_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_l1_cache_line_size"]
++  j = var_opt_int_j["param_l1_cache_line_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 8) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_l1_cache_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_l1_cache_size"]
++  j = var_opt_int_j["param_l1_cache_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 9) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_l2_cache_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_l2_cache_size"]
++  j = var_opt_int_j["param_l2_cache_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 10) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_large_function_growth" in var_opt_int_k) {
++  k = var_opt_int_k["param_large_function_growth"]
++  j = var_opt_int_j["param_large_function_growth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 11) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_large_function_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_large_function_insns"]
++  j = var_opt_int_j["param_large_function_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 12) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_stack_frame_growth" in var_opt_int_k) {
++  k = var_opt_int_k["param_stack_frame_growth"]
++  j = var_opt_int_j["param_stack_frame_growth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 13) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_large_stack_frame" in var_opt_int_k) {
++  k = var_opt_int_k["param_large_stack_frame"]
++  j = var_opt_int_j["param_large_stack_frame"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 14) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_large_unit_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_large_unit_insns"]
++  j = var_opt_int_j["param_large_unit_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 15) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_lim_expensive" in var_opt_int_k) {
++  k = var_opt_int_k["param_lim_expensive"]
++  j = var_opt_int_j["param_lim_expensive"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 16) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_loop_block_tile_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_loop_block_tile_size"]
++  j = var_opt_int_j["param_loop_block_tile_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 17) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_loop_interchange_max_num_stmts" in var_opt_int_k) {
++  k = var_opt_int_k["param_loop_interchange_max_num_stmts"]
++  j = var_opt_int_j["param_loop_interchange_max_num_stmts"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 18) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_loop_interchange_stride_ratio" in var_opt_int_k) {
++  k = var_opt_int_k["param_loop_interchange_stride_ratio"]
++  j = var_opt_int_j["param_loop_interchange_stride_ratio"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 19) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_loop_invariant_max_bbs_in_loop" in var_opt_int_k) {
++  k = var_opt_int_k["param_loop_invariant_max_bbs_in_loop"]
++  j = var_opt_int_j["param_loop_invariant_max_bbs_in_loop"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 20) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_loop_max_datarefs_for_datadeps" in var_opt_int_k) {
++  k = var_opt_int_k["param_loop_max_datarefs_for_datadeps"]
++  j = var_opt_int_j["param_loop_max_datarefs_for_datadeps"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 21) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_loop_versioning_max_inner_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_loop_versioning_max_inner_insns"]
++  j = var_opt_int_j["param_loop_versioning_max_inner_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 22) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_loop_versioning_max_outer_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_loop_versioning_max_outer_insns"]
++  j = var_opt_int_j["param_loop_versioning_max_outer_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 23) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_lra_inheritance_ebb_probability_cutoff" in var_opt_int_k) {
++  k = var_opt_int_k["param_lra_inheritance_ebb_probability_cutoff"]
++  j = var_opt_int_j["param_lra_inheritance_ebb_probability_cutoff"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 24) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_lra_max_considered_reload_pseudos" in var_opt_int_k) {
++  k = var_opt_int_k["param_lra_max_considered_reload_pseudos"]
++  j = var_opt_int_j["param_lra_max_considered_reload_pseudos"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 25) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_average_unrolled_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_average_unrolled_insns"]
++  j = var_opt_int_j["param_max_average_unrolled_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 26) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_combine_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_combine_insns"]
++  j = var_opt_int_j["param_max_combine_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 27) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_unroll_iterations" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_unroll_iterations"]
++  j = var_opt_int_j["param_max_unroll_iterations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 28) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_completely_peel_times" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_completely_peel_times"]
++  j = var_opt_int_j["param_max_completely_peel_times"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 29) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_completely_peeled_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_completely_peeled_insns"]
++  j = var_opt_int_j["param_max_completely_peeled_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 30) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_crossjump_edges" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_crossjump_edges"]
++  j = var_opt_int_j["param_max_crossjump_edges"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 31) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_cse_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_cse_insns"]
++  j = var_opt_int_j["param_max_cse_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 32) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_cse_path_length" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_cse_path_length"]
++  j = var_opt_int_j["param_max_cse_path_length"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 33) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_cselib_memory_locations" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_cselib_memory_locations"]
++  j = var_opt_int_j["param_max_cselib_memory_locations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 34) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_debug_marker_count" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_debug_marker_count"]
++  j = var_opt_int_j["param_max_debug_marker_count"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 35) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_delay_slot_insn_search" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_delay_slot_insn_search"]
++  j = var_opt_int_j["param_max_delay_slot_insn_search"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 36) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_delay_slot_live_search" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_delay_slot_live_search"]
++  j = var_opt_int_j["param_max_delay_slot_live_search"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 37) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_dse_active_local_stores" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_dse_active_local_stores"]
++  j = var_opt_int_j["param_max_dse_active_local_stores"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 38) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_early_inliner_max_iterations" in var_opt_int_k) {
++  k = var_opt_int_k["param_early_inliner_max_iterations"]
++  j = var_opt_int_j["param_early_inliner_max_iterations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 39) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_find_base_term_values" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_find_base_term_values"]
++  j = var_opt_int_j["param_max_find_base_term_values"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 40) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_fsm_thread_length" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_fsm_thread_length"]
++  j = var_opt_int_j["param_max_fsm_thread_length"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 41) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_fsm_thread_path_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_fsm_thread_path_insns"]
++  j = var_opt_int_j["param_max_fsm_thread_path_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 42) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_gcse_insertion_ratio" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_gcse_insertion_ratio"]
++  j = var_opt_int_j["param_max_gcse_insertion_ratio"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 43) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_gcse_memory" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_gcse_memory"]
++  j = var_opt_int_j["param_max_gcse_memory"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 44) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_goto_duplication_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_goto_duplication_insns"]
++  j = var_opt_int_j["param_max_goto_duplication_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 45) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_grow_copy_bb_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_grow_copy_bb_insns"]
++  j = var_opt_int_j["param_max_grow_copy_bb_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 46) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_hoist_depth" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_hoist_depth"]
++  j = var_opt_int_j["param_max_hoist_depth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 47) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_inline_functions_called_once_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_inline_functions_called_once_insns"]
++  j = var_opt_int_j["param_inline_functions_called_once_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 48) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_inline_functions_called_once_loop_depth" in var_opt_int_k) {
++  k = var_opt_int_k["param_inline_functions_called_once_loop_depth"]
++  j = var_opt_int_j["param_inline_functions_called_once_loop_depth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 49) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_inline_insns_auto" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_inline_insns_auto"]
++  j = var_opt_int_j["param_max_inline_insns_auto"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 50) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_inline_insns_recursive_auto" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_inline_insns_recursive_auto"]
++  j = var_opt_int_j["param_max_inline_insns_recursive_auto"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 51) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_inline_insns_recursive" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_inline_insns_recursive"]
++  j = var_opt_int_j["param_max_inline_insns_recursive"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 52) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_inline_insns_single" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_inline_insns_single"]
++  j = var_opt_int_j["param_max_inline_insns_single"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 53) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_inline_insns_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_inline_insns_size"]
++  j = var_opt_int_j["param_max_inline_insns_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 54) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_inline_insns_small" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_inline_insns_small"]
++  j = var_opt_int_j["param_max_inline_insns_small"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 55) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_inline_recursive_depth_auto" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_inline_recursive_depth_auto"]
++  j = var_opt_int_j["param_max_inline_recursive_depth_auto"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 56) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_inline_recursive_depth" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_inline_recursive_depth"]
++  j = var_opt_int_j["param_max_inline_recursive_depth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 57) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_isl_operations" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_isl_operations"]
++  j = var_opt_int_j["param_max_isl_operations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 58) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_iterations_computation_cost" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_iterations_computation_cost"]
++  j = var_opt_int_j["param_max_iterations_computation_cost"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 59) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_iterations_to_track" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_iterations_to_track"]
++  j = var_opt_int_j["param_max_iterations_to_track"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 60) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_jump_thread_duplication_stmts" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_jump_thread_duplication_stmts"]
++  j = var_opt_int_j["param_max_jump_thread_duplication_stmts"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 61) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_last_value_rtl" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_last_value_rtl"]
++  j = var_opt_int_j["param_max_last_value_rtl"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 62) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_loop_header_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_loop_header_insns"]
++  j = var_opt_int_j["param_max_loop_header_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[1] >> 63) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_modulo_backtrack_attempts" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_modulo_backtrack_attempts"]
++  j = var_opt_int_j["param_max_modulo_backtrack_attempts"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 0) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_partial_antic_length" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_partial_antic_length"]
++  j = var_opt_int_j["param_max_partial_antic_length"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 1) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_peel_branches" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_peel_branches"]
++  j = var_opt_int_j["param_max_peel_branches"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 2) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_peel_times" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_peel_times"]
++  j = var_opt_int_j["param_max_peel_times"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 3) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_peeled_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_peeled_insns"]
++  j = var_opt_int_j["param_max_peeled_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 4) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_pending_list_length" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_pending_list_length"]
++  j = var_opt_int_j["param_max_pending_list_length"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 5) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_pipeline_region_blocks" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_pipeline_region_blocks"]
++  j = var_opt_int_j["param_max_pipeline_region_blocks"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 6) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_pipeline_region_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_pipeline_region_insns"]
++  j = var_opt_int_j["param_max_pipeline_region_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 7) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_pow_sqrt_depth" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_pow_sqrt_depth"]
++  j = var_opt_int_j["param_max_pow_sqrt_depth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 8) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_predicted_iterations" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_predicted_iterations"]
++  j = var_opt_int_j["param_max_predicted_iterations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 9) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_reload_search_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_reload_search_insns"]
++  j = var_opt_int_j["param_max_reload_search_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 10) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_rtl_if_conversion_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_rtl_if_conversion_insns"]
++  j = var_opt_int_j["param_max_rtl_if_conversion_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 11) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_rtl_if_conversion_predictable_cost" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_rtl_if_conversion_predictable_cost"]
++  j = var_opt_int_j["param_max_rtl_if_conversion_predictable_cost"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 12) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_rtl_if_conversion_unpredictable_cost" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_rtl_if_conversion_unpredictable_cost"]
++  j = var_opt_int_j["param_max_rtl_if_conversion_unpredictable_cost"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 13) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_sched_extend_regions_iters" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_sched_extend_regions_iters"]
++  j = var_opt_int_j["param_max_sched_extend_regions_iters"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 14) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_sched_insn_conflict_delay" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_sched_insn_conflict_delay"]
++  j = var_opt_int_j["param_max_sched_insn_conflict_delay"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 15) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_sched_ready_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_sched_ready_insns"]
++  j = var_opt_int_j["param_max_sched_ready_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 16) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_sched_region_blocks" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_sched_region_blocks"]
++  j = var_opt_int_j["param_max_sched_region_blocks"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 17) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_sched_region_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_sched_region_insns"]
++  j = var_opt_int_j["param_max_sched_region_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 18) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_slsr_candidate_scan" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_slsr_candidate_scan"]
++  j = var_opt_int_j["param_max_slsr_candidate_scan"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 19) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_speculative_devirt_maydefs" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_speculative_devirt_maydefs"]
++  j = var_opt_int_j["param_max_speculative_devirt_maydefs"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 20) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_stores_to_merge" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_stores_to_merge"]
++  j = var_opt_int_j["param_max_stores_to_merge"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 21) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_stores_to_sink" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_stores_to_sink"]
++  j = var_opt_int_j["param_max_stores_to_sink"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 22) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_tail_merge_comparisons" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_tail_merge_comparisons"]
++  j = var_opt_int_j["param_max_tail_merge_comparisons"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 23) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_tail_merge_iterations" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_tail_merge_iterations"]
++  j = var_opt_int_j["param_max_tail_merge_iterations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 24) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_tracked_strlens" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_tracked_strlens"]
++  j = var_opt_int_j["param_max_tracked_strlens"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 25) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_tree_if_conversion_phi_args" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_tree_if_conversion_phi_args"]
++  j = var_opt_int_j["param_max_tree_if_conversion_phi_args"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 26) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_unroll_times" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_unroll_times"]
++  j = var_opt_int_j["param_max_unroll_times"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 27) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_unrolled_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_unrolled_insns"]
++  j = var_opt_int_j["param_max_unrolled_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 28) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_unswitch_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_unswitch_insns"]
++  j = var_opt_int_j["param_max_unswitch_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 29) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_unswitch_level" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_unswitch_level"]
++  j = var_opt_int_j["param_max_unswitch_level"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 30) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_variable_expansions" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_variable_expansions"]
++  j = var_opt_int_j["param_max_variable_expansions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 31) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_vartrack_expr_depth" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_vartrack_expr_depth"]
++  j = var_opt_int_j["param_max_vartrack_expr_depth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 32) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_vartrack_reverse_op_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_vartrack_reverse_op_size"]
++  j = var_opt_int_j["param_max_vartrack_reverse_op_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 33) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_vartrack_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_vartrack_size"]
++  j = var_opt_int_j["param_max_vartrack_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 34) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_max_vrp_switch_assertions" in var_opt_int_k) {
++  k = var_opt_int_k["param_max_vrp_switch_assertions"]
++  j = var_opt_int_j["param_max_vrp_switch_assertions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 35) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_min_crossjump_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_min_crossjump_insns"]
++  j = var_opt_int_j["param_min_crossjump_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 36) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_min_inline_recursive_probability" in var_opt_int_k) {
++  k = var_opt_int_k["param_min_inline_recursive_probability"]
++  j = var_opt_int_j["param_min_inline_recursive_probability"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 37) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_min_insn_to_prefetch_ratio" in var_opt_int_k) {
++  k = var_opt_int_k["param_min_insn_to_prefetch_ratio"]
++  j = var_opt_int_j["param_min_insn_to_prefetch_ratio"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 38) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_min_loop_cond_split_prob" in var_opt_int_k) {
++  k = var_opt_int_k["param_min_loop_cond_split_prob"]
++  j = var_opt_int_j["param_min_loop_cond_split_prob"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 39) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_min_pagesize" in var_opt_int_k) {
++  k = var_opt_int_k["param_min_pagesize"]
++  j = var_opt_int_j["param_min_pagesize"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 40) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_min_size_for_stack_sharing" in var_opt_int_k) {
++  k = var_opt_int_k["param_min_size_for_stack_sharing"]
++  j = var_opt_int_j["param_min_size_for_stack_sharing"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 41) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_min_spec_prob" in var_opt_int_k) {
++  k = var_opt_int_k["param_min_spec_prob"]
++  j = var_opt_int_j["param_min_spec_prob"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 42) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_min_vect_loop_bound" in var_opt_int_k) {
++  k = var_opt_int_k["param_min_vect_loop_bound"]
++  j = var_opt_int_j["param_min_vect_loop_bound"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 43) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_modref_max_accesses" in var_opt_int_k) {
++  k = var_opt_int_k["param_modref_max_accesses"]
++  j = var_opt_int_j["param_modref_max_accesses"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 44) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_modref_max_adjustments" in var_opt_int_k) {
++  k = var_opt_int_k["param_modref_max_adjustments"]
++  j = var_opt_int_j["param_modref_max_adjustments"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 45) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_modref_max_bases" in var_opt_int_k) {
++  k = var_opt_int_k["param_modref_max_bases"]
++  j = var_opt_int_j["param_modref_max_bases"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 46) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_modref_max_depth" in var_opt_int_k) {
++  k = var_opt_int_k["param_modref_max_depth"]
++  j = var_opt_int_j["param_modref_max_depth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 47) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_modref_max_escape_points" in var_opt_int_k) {
++  k = var_opt_int_k["param_modref_max_escape_points"]
++  j = var_opt_int_j["param_modref_max_escape_points"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 48) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_modref_max_refs" in var_opt_int_k) {
++  k = var_opt_int_k["param_modref_max_refs"]
++  j = var_opt_int_j["param_modref_max_refs"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 49) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_modref_max_tests" in var_opt_int_k) {
++  k = var_opt_int_k["param_modref_max_tests"]
++  j = var_opt_int_j["param_modref_max_tests"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 50) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ldp_dependency_search_range" in var_opt_int_k) {
++  k = var_opt_int_k["param_ldp_dependency_search_range"]
++  j = var_opt_int_j["param_ldp_dependency_search_range"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 51) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_parloops_chunk_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_parloops_chunk_size"]
++  j = var_opt_int_j["param_parloops_chunk_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 52) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_parloops_min_per_thread" in var_opt_int_k) {
++  k = var_opt_int_k["param_parloops_min_per_thread"]
++  j = var_opt_int_j["param_parloops_min_per_thread"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 53) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_parloops_schedule" in var_opt_int_k) {
++  k = var_opt_int_k["param_parloops_schedule"]
++  j = var_opt_int_j["param_parloops_schedule"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 54) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_partial_inlining_entry_probability" in var_opt_int_k) {
++  k = var_opt_int_k["param_partial_inlining_entry_probability"]
++  j = var_opt_int_j["param_partial_inlining_entry_probability"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 55) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_predictable_branch_outcome" in var_opt_int_k) {
++  k = var_opt_int_k["param_predictable_branch_outcome"]
++  j = var_opt_int_j["param_predictable_branch_outcome"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 56) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_prefetch_dynamic_strides" in var_opt_int_k) {
++  k = var_opt_int_k["param_prefetch_dynamic_strides"]
++  j = var_opt_int_j["param_prefetch_dynamic_strides"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 57) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_prefetch_latency" in var_opt_int_k) {
++  k = var_opt_int_k["param_prefetch_latency"]
++  j = var_opt_int_j["param_prefetch_latency"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 58) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_prefetch_min_insn_to_mem_ratio" in var_opt_int_k) {
++  k = var_opt_int_k["param_prefetch_min_insn_to_mem_ratio"]
++  j = var_opt_int_j["param_prefetch_min_insn_to_mem_ratio"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 59) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_prefetch_minimum_stride" in var_opt_int_k) {
++  k = var_opt_int_k["param_prefetch_minimum_stride"]
++  j = var_opt_int_j["param_prefetch_minimum_stride"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 60) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ranger_logical_depth" in var_opt_int_k) {
++  k = var_opt_int_k["param_ranger_logical_depth"]
++  j = var_opt_int_j["param_ranger_logical_depth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 61) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_relation_block_limit" in var_opt_int_k) {
++  k = var_opt_int_k["param_relation_block_limit"]
++  j = var_opt_int_j["param_relation_block_limit"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 62) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_rpo_vn_max_loop_depth" in var_opt_int_k) {
++  k = var_opt_int_k["param_rpo_vn_max_loop_depth"]
++  j = var_opt_int_j["param_rpo_vn_max_loop_depth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[2] >> 63) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sccvn_max_alias_queries_per_access" in var_opt_int_k) {
++  k = var_opt_int_k["param_sccvn_max_alias_queries_per_access"]
++  j = var_opt_int_j["param_sccvn_max_alias_queries_per_access"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 0) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_scev_max_expr_complexity" in var_opt_int_k) {
++  k = var_opt_int_k["param_scev_max_expr_complexity"]
++  j = var_opt_int_j["param_scev_max_expr_complexity"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 1) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_scev_max_expr_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_scev_max_expr_size"]
++  j = var_opt_int_j["param_scev_max_expr_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 2) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sched_autopref_queue_depth" in var_opt_int_k) {
++  k = var_opt_int_k["param_sched_autopref_queue_depth"]
++  j = var_opt_int_j["param_sched_autopref_queue_depth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 3) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sched_mem_true_dep_cost" in var_opt_int_k) {
++  k = var_opt_int_k["param_sched_mem_true_dep_cost"]
++  j = var_opt_int_j["param_sched_mem_true_dep_cost"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 4) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sched_pressure_algorithm" in var_opt_int_k) {
++  k = var_opt_int_k["param_sched_pressure_algorithm"]
++  j = var_opt_int_j["param_sched_pressure_algorithm"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 5) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sched_spec_prob_cutoff" in var_opt_int_k) {
++  k = var_opt_int_k["param_sched_spec_prob_cutoff"]
++  j = var_opt_int_j["param_sched_spec_prob_cutoff"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 6) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sched_state_edge_prob_cutoff" in var_opt_int_k) {
++  k = var_opt_int_k["param_sched_state_edge_prob_cutoff"]
++  j = var_opt_int_j["param_sched_state_edge_prob_cutoff"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 7) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_selsched_insns_to_rename" in var_opt_int_k) {
++  k = var_opt_int_k["param_selsched_insns_to_rename"]
++  j = var_opt_int_j["param_selsched_insns_to_rename"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 8) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_selsched_max_lookahead" in var_opt_int_k) {
++  k = var_opt_int_k["param_selsched_max_lookahead"]
++  j = var_opt_int_j["param_selsched_max_lookahead"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 9) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_selsched_max_sched_times" in var_opt_int_k) {
++  k = var_opt_int_k["param_selsched_max_sched_times"]
++  j = var_opt_int_j["param_selsched_max_sched_times"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 10) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("semi_relayout_level" in var_opt_int_k) {
++  k = var_opt_int_k["semi_relayout_level"]
++  j = var_opt_int_j["semi_relayout_level"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 11) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_simultaneous_prefetches" in var_opt_int_k) {
++  k = var_opt_int_k["param_simultaneous_prefetches"]
++  j = var_opt_int_j["param_simultaneous_prefetches"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 12) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sink_frequency_threshold" in var_opt_int_k) {
++  k = var_opt_int_k["param_sink_frequency_threshold"]
++  j = var_opt_int_j["param_sink_frequency_threshold"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 13) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sms_dfa_history" in var_opt_int_k) {
++  k = var_opt_int_k["param_sms_dfa_history"]
++  j = var_opt_int_j["param_sms_dfa_history"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 14) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sms_loop_average_count_threshold" in var_opt_int_k) {
++  k = var_opt_int_k["param_sms_loop_average_count_threshold"]
++  j = var_opt_int_j["param_sms_loop_average_count_threshold"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 15) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sms_max_ii_factor" in var_opt_int_k) {
++  k = var_opt_int_k["param_sms_max_ii_factor"]
++  j = var_opt_int_j["param_sms_max_ii_factor"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 16) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sms_min_sc" in var_opt_int_k) {
++  k = var_opt_int_k["param_sms_min_sc"]
++  j = var_opt_int_j["param_sms_min_sc"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 17) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sra_max_propagations" in var_opt_int_k) {
++  k = var_opt_int_k["param_sra_max_propagations"]
++  j = var_opt_int_j["param_sra_max_propagations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 18) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sra_max_scalarization_size_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_sra_max_scalarization_size_size"]
++  j = var_opt_int_j["param_sra_max_scalarization_size_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 19) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_sra_max_scalarization_size_speed" in var_opt_int_k) {
++  k = var_opt_int_k["param_sra_max_scalarization_size_speed"]
++  j = var_opt_int_j["param_sra_max_scalarization_size_speed"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 20) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ssa_name_def_chain_limit" in var_opt_int_k) {
++  k = var_opt_int_k["param_ssa_name_def_chain_limit"]
++  j = var_opt_int_j["param_ssa_name_def_chain_limit"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 21) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ssp_buffer_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_ssp_buffer_size"]
++  j = var_opt_int_j["param_ssp_buffer_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 22) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_stack_clash_protection_guard_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_stack_clash_protection_guard_size"]
++  j = var_opt_int_j["param_stack_clash_protection_guard_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 23) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_stack_clash_protection_probe_interval" in var_opt_int_k) {
++  k = var_opt_int_k["param_stack_clash_protection_probe_interval"]
++  j = var_opt_int_j["param_stack_clash_protection_probe_interval"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 24) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_store_merging_allow_unaligned" in var_opt_int_k) {
++  k = var_opt_int_k["param_store_merging_allow_unaligned"]
++  j = var_opt_int_j["param_store_merging_allow_unaligned"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 25) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_store_merging_max_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_store_merging_max_size"]
++  j = var_opt_int_j["param_store_merging_max_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 26) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_struct_reorg_cold_struct_ratio" in var_opt_int_k) {
++  k = var_opt_int_k["param_struct_reorg_cold_struct_ratio"]
++  j = var_opt_int_j["param_struct_reorg_cold_struct_ratio"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 27) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_switch_conversion_branch_ratio" in var_opt_int_k) {
++  k = var_opt_int_k["param_switch_conversion_branch_ratio"]
++  j = var_opt_int_j["param_switch_conversion_branch_ratio"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 28) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_tm_max_aggregate_size" in var_opt_int_k) {
++  k = var_opt_int_k["param_tm_max_aggregate_size"]
++  j = var_opt_int_j["param_tm_max_aggregate_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 29) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_tracer_dynamic_coverage_feedback" in var_opt_int_k) {
++  k = var_opt_int_k["param_tracer_dynamic_coverage_feedback"]
++  j = var_opt_int_j["param_tracer_dynamic_coverage_feedback"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 30) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_tracer_dynamic_coverage" in var_opt_int_k) {
++  k = var_opt_int_k["param_tracer_dynamic_coverage"]
++  j = var_opt_int_j["param_tracer_dynamic_coverage"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 31) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_tracer_max_code_growth" in var_opt_int_k) {
++  k = var_opt_int_k["param_tracer_max_code_growth"]
++  j = var_opt_int_j["param_tracer_max_code_growth"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 32) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_tracer_min_branch_probability_feedback" in var_opt_int_k) {
++  k = var_opt_int_k["param_tracer_min_branch_probability_feedback"]
++  j = var_opt_int_j["param_tracer_min_branch_probability_feedback"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 33) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_tracer_min_branch_probability" in var_opt_int_k) {
++  k = var_opt_int_k["param_tracer_min_branch_probability"]
++  j = var_opt_int_j["param_tracer_min_branch_probability"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 34) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_tracer_min_branch_ratio" in var_opt_int_k) {
++  k = var_opt_int_k["param_tracer_min_branch_ratio"]
++  j = var_opt_int_j["param_tracer_min_branch_ratio"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 35) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_tree_reassoc_width" in var_opt_int_k) {
++  k = var_opt_int_k["param_tree_reassoc_width"]
++  j = var_opt_int_j["param_tree_reassoc_width"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 36) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_uninit_control_dep_attempts" in var_opt_int_k) {
++  k = var_opt_int_k["param_uninit_control_dep_attempts"]
++  j = var_opt_int_j["param_uninit_control_dep_attempts"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 37) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_uninlined_function_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_uninlined_function_insns"]
++  j = var_opt_int_j["param_uninlined_function_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 38) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_uninlined_function_time" in var_opt_int_k) {
++  k = var_opt_int_k["param_uninlined_function_time"]
++  j = var_opt_int_j["param_uninlined_function_time"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 39) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_uninlined_function_thunk_insns" in var_opt_int_k) {
++  k = var_opt_int_k["param_uninlined_function_thunk_insns"]
++  j = var_opt_int_j["param_uninlined_function_thunk_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 40) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_uninlined_function_thunk_time" in var_opt_int_k) {
++  k = var_opt_int_k["param_uninlined_function_thunk_time"]
++  j = var_opt_int_j["param_uninlined_function_thunk_time"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 41) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_unlikely_bb_count_fraction" in var_opt_int_k) {
++  k = var_opt_int_k["param_unlikely_bb_count_fraction"]
++  j = var_opt_int_j["param_unlikely_bb_count_fraction"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 42) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_unroll_jam_max_unroll" in var_opt_int_k) {
++  k = var_opt_int_k["param_unroll_jam_max_unroll"]
++  j = var_opt_int_j["param_unroll_jam_max_unroll"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 43) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_unroll_jam_min_percent" in var_opt_int_k) {
++  k = var_opt_int_k["param_unroll_jam_min_percent"]
++  j = var_opt_int_j["param_unroll_jam_min_percent"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 44) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_use_after_scope_direct_emission_threshold" in var_opt_int_k) {
++  k = var_opt_int_k["param_use_after_scope_direct_emission_threshold"]
++  j = var_opt_int_j["param_use_after_scope_direct_emission_threshold"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 45) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_flexible_seg_len" in var_opt_int_k) {
++  k = var_opt_int_k["param_flexible_seg_len"]
++  j = var_opt_int_j["param_flexible_seg_len"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 46) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_vect_epilogues_nomask" in var_opt_int_k) {
++  k = var_opt_int_k["param_vect_epilogues_nomask"]
++  j = var_opt_int_j["param_vect_epilogues_nomask"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 47) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_vect_induction_float" in var_opt_int_k) {
++  k = var_opt_int_k["param_vect_induction_float"]
++  j = var_opt_int_j["param_vect_induction_float"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 48) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_vect_inner_loop_cost_factor" in var_opt_int_k) {
++  k = var_opt_int_k["param_vect_inner_loop_cost_factor"]
++  j = var_opt_int_j["param_vect_inner_loop_cost_factor"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 49) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_vect_max_peeling_for_alignment" in var_opt_int_k) {
++  k = var_opt_int_k["param_vect_max_peeling_for_alignment"]
++  j = var_opt_int_j["param_vect_max_peeling_for_alignment"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 50) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_vect_max_version_for_alias_checks" in var_opt_int_k) {
++  k = var_opt_int_k["param_vect_max_version_for_alias_checks"]
++  j = var_opt_int_j["param_vect_max_version_for_alias_checks"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 51) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_vect_max_version_for_alignment_checks" in var_opt_int_k) {
++  k = var_opt_int_k["param_vect_max_version_for_alignment_checks"]
++  j = var_opt_int_j["param_vect_max_version_for_alignment_checks"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 52) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_vect_partial_vector_usage" in var_opt_int_k) {
++  k = var_opt_int_k["param_vect_partial_vector_usage"]
++  j = var_opt_int_j["param_vect_partial_vector_usage"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 53) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sched_stalled_insns" in var_opt_int_k) {
++  k = var_opt_int_k["flag_sched_stalled_insns"]
++  j = var_opt_int_j["flag_sched_stalled_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 54) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sched_stalled_insns_dep" in var_opt_int_k) {
++  k = var_opt_int_k["flag_sched_stalled_insns_dep"]
++  j = var_opt_int_j["flag_sched_stalled_insns_dep"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 55) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_parallelize_loops" in var_opt_int_k) {
++  k = var_opt_int_k["flag_tree_parallelize_loops"]
++  j = var_opt_int_j["flag_tree_parallelize_loops"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 56) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_evrp_mode" in var_opt_enum_k) {
++  k = var_opt_enum_k["param_evrp_mode"]
++  j = var_opt_enum_j["param_evrp_mode"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 57) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_ranger_debug" in var_opt_enum_k) {
++  k = var_opt_enum_k["param_ranger_debug"]
++  j = var_opt_enum_j["param_ranger_debug"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 58) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_threader_debug" in var_opt_enum_k) {
++  k = var_opt_enum_k["param_threader_debug"]
++  j = var_opt_enum_j["param_threader_debug"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 59) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_vrp1_mode" in var_opt_enum_k) {
++  k = var_opt_enum_k["param_vrp1_mode"]
++  j = var_opt_enum_j["param_vrp1_mode"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 60) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("param_vrp2_mode" in var_opt_enum_k) {
++  k = var_opt_enum_k["param_vrp2_mode"]
++  j = var_opt_enum_j["param_vrp2_mode"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 61) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_excess_precision" in var_opt_enum_k) {
++  k = var_opt_enum_k["flag_excess_precision"]
++  j = var_opt_enum_j["flag_excess_precision"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 62) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_fp_contract_mode" in var_opt_enum_k) {
++  k = var_opt_enum_k["flag_fp_contract_mode"]
++  j = var_opt_enum_j["flag_fp_contract_mode"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[3] >> 63) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ira_algorithm" in var_opt_enum_k) {
++  k = var_opt_enum_k["flag_ira_algorithm"]
++  j = var_opt_enum_j["flag_ira_algorithm"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 0) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ira_region" in var_opt_enum_k) {
++  k = var_opt_enum_k["flag_ira_region"]
++  j = var_opt_enum_j["flag_ira_region"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 1) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_live_patching" in var_opt_enum_k) {
++  k = var_opt_enum_k["flag_live_patching"]
++  j = var_opt_enum_j["flag_live_patching"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 2) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_fp_model" in var_opt_enum_k) {
++  k = var_opt_enum_k["flag_fp_model"]
++  j = var_opt_enum_j["flag_fp_model"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 3) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_reorder_blocks_algorithm" in var_opt_enum_k) {
++  k = var_opt_enum_k["flag_reorder_blocks_algorithm"]
++  j = var_opt_enum_j["flag_reorder_blocks_algorithm"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 4) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_simd_cost_model" in var_opt_enum_k) {
++  k = var_opt_enum_k["flag_simd_cost_model"]
++  j = var_opt_enum_j["flag_simd_cost_model"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 5) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_stack_reuse" in var_opt_enum_k) {
++  k = var_opt_enum_k["flag_stack_reuse"]
++  j = var_opt_enum_j["flag_stack_reuse"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 6) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_auto_var_init" in var_opt_enum_k) {
++  k = var_opt_enum_k["flag_auto_var_init"]
++  j = var_opt_enum_j["flag_auto_var_init"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 7) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_vect_cost_model" in var_opt_enum_k) {
++  k = var_opt_enum_k["flag_vect_cost_model"]
++  j = var_opt_enum_j["flag_vect_cost_model"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 8) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("optimize" in var_opt_char_k) {
++  k = var_opt_char_k["optimize"]
++  j = var_opt_char_j["optimize"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 9) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("optimize_size" in var_opt_char_k) {
++  k = var_opt_char_k["optimize_size"]
++  j = var_opt_char_j["optimize_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 10) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("optimize_debug" in var_opt_char_k) {
++  k = var_opt_char_k["optimize_debug"]
++  j = var_opt_char_j["optimize_debug"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 11) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("optimize_fast" in var_opt_char_k) {
++  k = var_opt_char_k["optimize_fast"]
++  j = var_opt_char_j["optimize_fast"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 12) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("warn_inline" in var_opt_char_k) {
++  k = var_opt_char_k["warn_inline"]
++  j = var_opt_char_j["warn_inline"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 13) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_aggressive_loop_optimizations" in var_opt_char_k) {
++  k = var_opt_char_k["flag_aggressive_loop_optimizations"]
++  j = var_opt_char_j["flag_aggressive_loop_optimizations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 14) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_align_functions" in var_opt_char_k) {
++  k = var_opt_char_k["flag_align_functions"]
++  j = var_opt_char_j["flag_align_functions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 15) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_align_jumps" in var_opt_char_k) {
++  k = var_opt_char_k["flag_align_jumps"]
++  j = var_opt_char_j["flag_align_jumps"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 16) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_align_labels" in var_opt_char_k) {
++  k = var_opt_char_k["flag_align_labels"]
++  j = var_opt_char_j["flag_align_labels"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 17) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_align_loops" in var_opt_char_k) {
++  k = var_opt_char_k["flag_align_loops"]
++  j = var_opt_char_j["flag_align_loops"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 18) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_allocation_dce" in var_opt_char_k) {
++  k = var_opt_char_k["flag_allocation_dce"]
++  j = var_opt_char_j["flag_allocation_dce"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 19) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_store_data_races" in var_opt_char_k) {
++  k = var_opt_char_k["flag_store_data_races"]
++  j = var_opt_char_j["flag_store_data_races"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 20) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_array_widen_compare" in var_opt_char_k) {
++  k = var_opt_char_k["flag_array_widen_compare"]
++  j = var_opt_char_j["flag_array_widen_compare"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 21) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_associative_math" in var_opt_char_k) {
++  k = var_opt_char_k["flag_associative_math"]
++  j = var_opt_char_j["flag_associative_math"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 22) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_asynchronous_unwind_tables" in var_opt_char_k) {
++  k = var_opt_char_k["flag_asynchronous_unwind_tables"]
++  j = var_opt_char_j["flag_asynchronous_unwind_tables"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 23) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_auto_inc_dec" in var_opt_char_k) {
++  k = var_opt_char_k["flag_auto_inc_dec"]
++  j = var_opt_char_j["flag_auto_inc_dec"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 24) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_bit_tests" in var_opt_char_k) {
++  k = var_opt_char_k["flag_bit_tests"]
++  j = var_opt_char_j["flag_bit_tests"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 25) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_branch_on_count_reg" in var_opt_char_k) {
++  k = var_opt_char_k["flag_branch_on_count_reg"]
++  j = var_opt_char_j["flag_branch_on_count_reg"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 26) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_branch_probabilities" in var_opt_char_k) {
++  k = var_opt_char_k["flag_branch_probabilities"]
++  j = var_opt_char_j["flag_branch_probabilities"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 27) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_caller_saves" in var_opt_char_k) {
++  k = var_opt_char_k["flag_caller_saves"]
++  j = var_opt_char_j["flag_caller_saves"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 28) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ccmp2" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ccmp2"]
++  j = var_opt_char_j["flag_ccmp2"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 29) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_code_hoisting" in var_opt_char_k) {
++  k = var_opt_char_k["flag_code_hoisting"]
++  j = var_opt_char_j["flag_code_hoisting"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 30) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_combine_stack_adjustments" in var_opt_char_k) {
++  k = var_opt_char_k["flag_combine_stack_adjustments"]
++  j = var_opt_char_j["flag_combine_stack_adjustments"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 31) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_compare_elim_after_reload" in var_opt_char_k) {
++  k = var_opt_char_k["flag_compare_elim_after_reload"]
++  j = var_opt_char_j["flag_compare_elim_after_reload"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 32) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_conserve_stack" in var_opt_char_k) {
++  k = var_opt_char_k["flag_conserve_stack"]
++  j = var_opt_char_j["flag_conserve_stack"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 33) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_convert_minmax" in var_opt_char_k) {
++  k = var_opt_char_k["flag_convert_minmax"]
++  j = var_opt_char_j["flag_convert_minmax"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 34) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_cprop_registers" in var_opt_char_k) {
++  k = var_opt_char_k["flag_cprop_registers"]
++  j = var_opt_char_j["flag_cprop_registers"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 35) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_crossjumping" in var_opt_char_k) {
++  k = var_opt_char_k["flag_crossjumping"]
++  j = var_opt_char_j["flag_crossjumping"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 36) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_crypto_accel_aes" in var_opt_char_k) {
++  k = var_opt_char_k["flag_crypto_accel_aes"]
++  j = var_opt_char_j["flag_crypto_accel_aes"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 37) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_cse_follow_jumps" in var_opt_char_k) {
++  k = var_opt_char_k["flag_cse_follow_jumps"]
++  j = var_opt_char_j["flag_cse_follow_jumps"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 38) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_cx_fortran_rules" in var_opt_char_k) {
++  k = var_opt_char_k["flag_cx_fortran_rules"]
++  j = var_opt_char_j["flag_cx_fortran_rules"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 39) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_cx_limited_range" in var_opt_char_k) {
++  k = var_opt_char_k["flag_cx_limited_range"]
++  j = var_opt_char_j["flag_cx_limited_range"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 40) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_dce" in var_opt_char_k) {
++  k = var_opt_char_k["flag_dce"]
++  j = var_opt_char_j["flag_dce"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 41) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_defer_pop" in var_opt_char_k) {
++  k = var_opt_char_k["flag_defer_pop"]
++  j = var_opt_char_j["flag_defer_pop"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 42) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_delayed_branch" in var_opt_char_k) {
++  k = var_opt_char_k["flag_delayed_branch"]
++  j = var_opt_char_j["flag_delayed_branch"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 43) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_delete_dead_exceptions" in var_opt_char_k) {
++  k = var_opt_char_k["flag_delete_dead_exceptions"]
++  j = var_opt_char_j["flag_delete_dead_exceptions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 44) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_delete_null_pointer_checks" in var_opt_char_k) {
++  k = var_opt_char_k["flag_delete_null_pointer_checks"]
++  j = var_opt_char_j["flag_delete_null_pointer_checks"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 45) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_devirtualize" in var_opt_char_k) {
++  k = var_opt_char_k["flag_devirtualize"]
++  j = var_opt_char_j["flag_devirtualize"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 46) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_devirtualize_speculatively" in var_opt_char_k) {
++  k = var_opt_char_k["flag_devirtualize_speculatively"]
++  j = var_opt_char_j["flag_devirtualize_speculatively"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 47) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_dse" in var_opt_char_k) {
++  k = var_opt_char_k["flag_dse"]
++  j = var_opt_char_j["flag_dse"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 48) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_early_inlining" in var_opt_char_k) {
++  k = var_opt_char_k["flag_early_inlining"]
++  j = var_opt_char_j["flag_early_inlining"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 49) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_exceptions" in var_opt_char_k) {
++  k = var_opt_char_k["flag_exceptions"]
++  j = var_opt_char_j["flag_exceptions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 50) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_expensive_optimizations" in var_opt_char_k) {
++  k = var_opt_char_k["flag_expensive_optimizations"]
++  j = var_opt_char_j["flag_expensive_optimizations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 51) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_finite_loops" in var_opt_char_k) {
++  k = var_opt_char_k["flag_finite_loops"]
++  j = var_opt_char_j["flag_finite_loops"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 52) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_finite_math_only" in var_opt_char_k) {
++  k = var_opt_char_k["flag_finite_math_only"]
++  j = var_opt_char_j["flag_finite_math_only"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 53) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_float_store" in var_opt_char_k) {
++  k = var_opt_char_k["flag_float_store"]
++  j = var_opt_char_j["flag_float_store"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 54) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_fold_simple_inlines" in var_opt_char_k) {
++  k = var_opt_char_k["flag_fold_simple_inlines"]
++  j = var_opt_char_j["flag_fold_simple_inlines"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 55) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_forward_propagate" in var_opt_char_k) {
++  k = var_opt_char_k["flag_forward_propagate"]
++  j = var_opt_char_j["flag_forward_propagate"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 56) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_fp_int_builtin_inexact" in var_opt_char_k) {
++  k = var_opt_char_k["flag_fp_int_builtin_inexact"]
++  j = var_opt_char_j["flag_fp_int_builtin_inexact"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 57) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ftz" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ftz"]
++  j = var_opt_char_j["flag_ftz"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 58) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_no_function_cse" in var_opt_char_k) {
++  k = var_opt_char_k["flag_no_function_cse"]
++  j = var_opt_char_j["flag_no_function_cse"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 59) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_gcse" in var_opt_char_k) {
++  k = var_opt_char_k["flag_gcse"]
++  j = var_opt_char_j["flag_gcse"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 60) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_gcse_after_reload" in var_opt_char_k) {
++  k = var_opt_char_k["flag_gcse_after_reload"]
++  j = var_opt_char_j["flag_gcse_after_reload"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 61) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_gcse_las" in var_opt_char_k) {
++  k = var_opt_char_k["flag_gcse_las"]
++  j = var_opt_char_j["flag_gcse_las"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 62) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_gcse_lm" in var_opt_char_k) {
++  k = var_opt_char_k["flag_gcse_lm"]
++  j = var_opt_char_j["flag_gcse_lm"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[4] >> 63) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_gcse_sm" in var_opt_char_k) {
++  k = var_opt_char_k["flag_gcse_sm"]
++  j = var_opt_char_j["flag_gcse_sm"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 0) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_graphite" in var_opt_char_k) {
++  k = var_opt_char_k["flag_graphite"]
++  j = var_opt_char_j["flag_graphite"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 1) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_graphite_identity" in var_opt_char_k) {
++  k = var_opt_char_k["flag_graphite_identity"]
++  j = var_opt_char_j["flag_graphite_identity"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 2) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_guess_branch_prob" in var_opt_char_k) {
++  k = var_opt_char_k["flag_guess_branch_prob"]
++  j = var_opt_char_j["flag_guess_branch_prob"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 3) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_harden_compares" in var_opt_char_k) {
++  k = var_opt_char_k["flag_harden_compares"]
++  j = var_opt_char_j["flag_harden_compares"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 4) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_harden_conditional_branches" in var_opt_char_k) {
++  k = var_opt_char_k["flag_harden_conditional_branches"]
++  j = var_opt_char_j["flag_harden_conditional_branches"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 5) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_hoist_adjacent_loads" in var_opt_char_k) {
++  k = var_opt_char_k["flag_hoist_adjacent_loads"]
++  j = var_opt_char_j["flag_hoist_adjacent_loads"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 6) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_icp" in var_opt_char_k) {
++  k = var_opt_char_k["flag_icp"]
++  j = var_opt_char_j["flag_icp"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 7) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_icp_speculatively" in var_opt_char_k) {
++  k = var_opt_char_k["flag_icp_speculatively"]
++  j = var_opt_char_j["flag_icp_speculatively"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 8) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_if_conversion" in var_opt_char_k) {
++  k = var_opt_char_k["flag_if_conversion"]
++  j = var_opt_char_j["flag_if_conversion"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 9) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_if_conversion_gimple" in var_opt_char_k) {
++  k = var_opt_char_k["flag_if_conversion_gimple"]
++  j = var_opt_char_j["flag_if_conversion_gimple"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 10) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_if_conversion2" in var_opt_char_k) {
++  k = var_opt_char_k["flag_if_conversion2"]
++  j = var_opt_char_j["flag_if_conversion2"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 11) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ifcvt_allow_complicated_cmps" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ifcvt_allow_complicated_cmps"]
++  j = var_opt_char_j["flag_ifcvt_allow_complicated_cmps"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 12) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_indirect_inlining" in var_opt_char_k) {
++  k = var_opt_char_k["flag_indirect_inlining"]
++  j = var_opt_char_j["flag_indirect_inlining"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 13) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_no_inline" in var_opt_char_k) {
++  k = var_opt_char_k["flag_no_inline"]
++  j = var_opt_char_j["flag_no_inline"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 14) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_inline_atomics" in var_opt_char_k) {
++  k = var_opt_char_k["flag_inline_atomics"]
++  j = var_opt_char_j["flag_inline_atomics"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 15) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_inline_functions" in var_opt_char_k) {
++  k = var_opt_char_k["flag_inline_functions"]
++  j = var_opt_char_j["flag_inline_functions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 16) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_inline_functions_called_once" in var_opt_char_k) {
++  k = var_opt_char_k["flag_inline_functions_called_once"]
++  j = var_opt_char_j["flag_inline_functions_called_once"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 17) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_inline_small_functions" in var_opt_char_k) {
++  k = var_opt_char_k["flag_inline_small_functions"]
++  j = var_opt_char_j["flag_inline_small_functions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 18) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_bit_cp" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_bit_cp"]
++  j = var_opt_char_j["flag_ipa_bit_cp"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 19) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_cp" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_cp"]
++  j = var_opt_char_j["flag_ipa_cp"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 20) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_cp_clone" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_cp_clone"]
++  j = var_opt_char_j["flag_ipa_cp_clone"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 21) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_ic" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_ic"]
++  j = var_opt_char_j["flag_ipa_ic"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 22) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_icf" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_icf"]
++  j = var_opt_char_j["flag_ipa_icf"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 23) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_icf_functions" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_icf_functions"]
++  j = var_opt_char_j["flag_ipa_icf_functions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 24) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_icf_variables" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_icf_variables"]
++  j = var_opt_char_j["flag_ipa_icf_variables"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 25) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_modref" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_modref"]
++  j = var_opt_char_j["flag_ipa_modref"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 26) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_prefetch" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_prefetch"]
++  j = var_opt_char_j["flag_ipa_prefetch"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 27) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_profile" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_profile"]
++  j = var_opt_char_j["flag_ipa_profile"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 28) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_pta" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_pta"]
++  j = var_opt_char_j["flag_ipa_pta"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 29) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_pure_const" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_pure_const"]
++  j = var_opt_char_j["flag_ipa_pure_const"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 30) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_ra" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_ra"]
++  j = var_opt_char_j["flag_ipa_ra"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 31) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_reference" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_reference"]
++  j = var_opt_char_j["flag_ipa_reference"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 32) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_reference_addressable" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_reference_addressable"]
++  j = var_opt_char_j["flag_ipa_reference_addressable"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 33) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_reorder_fields" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_reorder_fields"]
++  j = var_opt_char_j["flag_ipa_reorder_fields"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 34) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_sra" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_sra"]
++  j = var_opt_char_j["flag_ipa_sra"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 35) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_stack_alignment" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_stack_alignment"]
++  j = var_opt_char_j["flag_ipa_stack_alignment"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 36) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_strict_aliasing" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_strict_aliasing"]
++  j = var_opt_char_j["flag_ipa_strict_aliasing"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 37) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_struct_reorg" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_struct_reorg"]
++  j = var_opt_char_j["flag_ipa_struct_reorg"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 38) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ipa_vrp" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ipa_vrp"]
++  j = var_opt_char_j["flag_ipa_vrp"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 39) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ira_hoist_pressure" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ira_hoist_pressure"]
++  j = var_opt_char_j["flag_ira_hoist_pressure"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 40) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ira_loop_pressure" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ira_loop_pressure"]
++  j = var_opt_char_j["flag_ira_loop_pressure"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 41) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ira_share_save_slots" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ira_share_save_slots"]
++  j = var_opt_char_j["flag_ira_share_save_slots"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 42) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ira_share_spill_slots" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ira_share_spill_slots"]
++  j = var_opt_char_j["flag_ira_share_spill_slots"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 43) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_isolate_erroneous_paths_attribute" in var_opt_char_k) {
++  k = var_opt_char_k["flag_isolate_erroneous_paths_attribute"]
++  j = var_opt_char_j["flag_isolate_erroneous_paths_attribute"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 44) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_isolate_erroneous_paths_dereference" in var_opt_char_k) {
++  k = var_opt_char_k["flag_isolate_erroneous_paths_dereference"]
++  j = var_opt_char_j["flag_isolate_erroneous_paths_dereference"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 45) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ivopts" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ivopts"]
++  j = var_opt_char_j["flag_ivopts"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 46) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_jump_tables" in var_opt_char_k) {
++  k = var_opt_char_k["flag_jump_tables"]
++  j = var_opt_char_j["flag_jump_tables"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 47) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_keep_gc_roots_live" in var_opt_char_k) {
++  k = var_opt_char_k["flag_keep_gc_roots_live"]
++  j = var_opt_char_j["flag_keep_gc_roots_live"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 48) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_kernel_pgo" in var_opt_char_k) {
++  k = var_opt_char_k["flag_kernel_pgo"]
++  j = var_opt_char_j["flag_kernel_pgo"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 49) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_lifetime_dse" in var_opt_char_k) {
++  k = var_opt_char_k["flag_lifetime_dse"]
++  j = var_opt_char_j["flag_lifetime_dse"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 50) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_limit_function_alignment" in var_opt_char_k) {
++  k = var_opt_char_k["flag_limit_function_alignment"]
++  j = var_opt_char_j["flag_limit_function_alignment"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 51) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_live_range_shrinkage" in var_opt_char_k) {
++  k = var_opt_char_k["flag_live_range_shrinkage"]
++  j = var_opt_char_j["flag_live_range_shrinkage"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 52) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_loop_crc" in var_opt_char_k) {
++  k = var_opt_char_k["flag_loop_crc"]
++  j = var_opt_char_j["flag_loop_crc"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 53) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_loop_elim" in var_opt_char_k) {
++  k = var_opt_char_k["flag_loop_elim"]
++  j = var_opt_char_j["flag_loop_elim"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 54) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_loop_interchange" in var_opt_char_k) {
++  k = var_opt_char_k["flag_loop_interchange"]
++  j = var_opt_char_j["flag_loop_interchange"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 55) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_loop_nest_optimize" in var_opt_char_k) {
++  k = var_opt_char_k["flag_loop_nest_optimize"]
++  j = var_opt_char_j["flag_loop_nest_optimize"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 56) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_loop_parallelize_all" in var_opt_char_k) {
++  k = var_opt_char_k["flag_loop_parallelize_all"]
++  j = var_opt_char_j["flag_loop_parallelize_all"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 57) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_unroll_jam" in var_opt_char_k) {
++  k = var_opt_char_k["flag_unroll_jam"]
++  j = var_opt_char_j["flag_unroll_jam"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 58) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_lra_remat" in var_opt_char_k) {
++  k = var_opt_char_k["flag_lra_remat"]
++  j = var_opt_char_j["flag_lra_remat"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 59) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_errno_math" in var_opt_char_k) {
++  k = var_opt_char_k["flag_errno_math"]
++  j = var_opt_char_j["flag_errno_math"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 60) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_merge_mull" in var_opt_char_k) {
++  k = var_opt_char_k["flag_merge_mull"]
++  j = var_opt_char_j["flag_merge_mull"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 61) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_modulo_sched" in var_opt_char_k) {
++  k = var_opt_char_k["flag_modulo_sched"]
++  j = var_opt_char_j["flag_modulo_sched"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 62) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_modulo_sched_allow_regmoves" in var_opt_char_k) {
++  k = var_opt_char_k["flag_modulo_sched_allow_regmoves"]
++  j = var_opt_char_j["flag_modulo_sched_allow_regmoves"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[5] >> 63) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_move_loop_invariants" in var_opt_char_k) {
++  k = var_opt_char_k["flag_move_loop_invariants"]
++  j = var_opt_char_j["flag_move_loop_invariants"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 0) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_move_loop_stores" in var_opt_char_k) {
++  k = var_opt_char_k["flag_move_loop_stores"]
++  j = var_opt_char_j["flag_move_loop_stores"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 1) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_non_call_exceptions" in var_opt_char_k) {
++  k = var_opt_char_k["flag_non_call_exceptions"]
++  j = var_opt_char_j["flag_non_call_exceptions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 2) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_nothrow_opt" in var_opt_char_k) {
++  k = var_opt_char_k["flag_nothrow_opt"]
++  j = var_opt_char_j["flag_nothrow_opt"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 3) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_omit_frame_pointer" in var_opt_char_k) {
++  k = var_opt_char_k["flag_omit_frame_pointer"]
++  j = var_opt_char_j["flag_omit_frame_pointer"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 4) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_opt_info" in var_opt_char_k) {
++  k = var_opt_char_k["flag_opt_info"]
++  j = var_opt_char_j["flag_opt_info"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 5) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_optimize_sibling_calls" in var_opt_char_k) {
++  k = var_opt_char_k["flag_optimize_sibling_calls"]
++  j = var_opt_char_j["flag_optimize_sibling_calls"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 6) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_optimize_strlen" in var_opt_char_k) {
++  k = var_opt_char_k["flag_optimize_strlen"]
++  j = var_opt_char_j["flag_optimize_strlen"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 7) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_pack_struct" in var_opt_char_k) {
++  k = var_opt_char_k["flag_pack_struct"]
++  j = var_opt_char_j["flag_pack_struct"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 8) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_partial_inlining" in var_opt_char_k) {
++  k = var_opt_char_k["flag_partial_inlining"]
++  j = var_opt_char_j["flag_partial_inlining"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 9) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_peel_loops" in var_opt_char_k) {
++  k = var_opt_char_k["flag_peel_loops"]
++  j = var_opt_char_j["flag_peel_loops"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 10) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_no_peephole" in var_opt_char_k) {
++  k = var_opt_char_k["flag_no_peephole"]
++  j = var_opt_char_j["flag_no_peephole"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 11) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_peephole2" in var_opt_char_k) {
++  k = var_opt_char_k["flag_peephole2"]
++  j = var_opt_char_j["flag_peephole2"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 12) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_plt" in var_opt_char_k) {
++  k = var_opt_char_k["flag_plt"]
++  j = var_opt_char_j["flag_plt"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 13) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_predictive_commoning" in var_opt_char_k) {
++  k = var_opt_char_k["flag_predictive_commoning"]
++  j = var_opt_char_j["flag_predictive_commoning"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 14) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_prefetch_loop_arrays" in var_opt_char_k) {
++  k = var_opt_char_k["flag_prefetch_loop_arrays"]
++  j = var_opt_char_j["flag_prefetch_loop_arrays"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 15) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_printf_return_value" in var_opt_char_k) {
++  k = var_opt_char_k["flag_printf_return_value"]
++  j = var_opt_char_j["flag_printf_return_value"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 16) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_profile_partial_training" in var_opt_char_k) {
++  k = var_opt_char_k["flag_profile_partial_training"]
++  j = var_opt_char_j["flag_profile_partial_training"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 17) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_profile_reorder_functions" in var_opt_char_k) {
++  k = var_opt_char_k["flag_profile_reorder_functions"]
++  j = var_opt_char_j["flag_profile_reorder_functions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 18) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_reciprocal_math" in var_opt_char_k) {
++  k = var_opt_char_k["flag_reciprocal_math"]
++  j = var_opt_char_j["flag_reciprocal_math"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 19) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ree" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ree"]
++  j = var_opt_char_j["flag_ree"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 20) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_pcc_struct_return" in var_opt_char_k) {
++  k = var_opt_char_k["flag_pcc_struct_return"]
++  j = var_opt_char_j["flag_pcc_struct_return"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 21) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_rename_registers" in var_opt_char_k) {
++  k = var_opt_char_k["flag_rename_registers"]
++  j = var_opt_char_j["flag_rename_registers"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 22) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_reorder_blocks" in var_opt_char_k) {
++  k = var_opt_char_k["flag_reorder_blocks"]
++  j = var_opt_char_j["flag_reorder_blocks"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 23) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_reorder_blocks_and_partition" in var_opt_char_k) {
++  k = var_opt_char_k["flag_reorder_blocks_and_partition"]
++  j = var_opt_char_j["flag_reorder_blocks_and_partition"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 24) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_reorder_functions" in var_opt_char_k) {
++  k = var_opt_char_k["flag_reorder_functions"]
++  j = var_opt_char_j["flag_reorder_functions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 25) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_rerun_cse_after_loop" in var_opt_char_k) {
++  k = var_opt_char_k["flag_rerun_cse_after_loop"]
++  j = var_opt_char_j["flag_rerun_cse_after_loop"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 26) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_resched_modulo_sched" in var_opt_char_k) {
++  k = var_opt_char_k["flag_resched_modulo_sched"]
++  j = var_opt_char_j["flag_resched_modulo_sched"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 27) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_rounding_math" in var_opt_char_k) {
++  k = var_opt_char_k["flag_rounding_math"]
++  j = var_opt_char_j["flag_rounding_math"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 28) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_rtti" in var_opt_char_k) {
++  k = var_opt_char_k["flag_rtti"]
++  j = var_opt_char_j["flag_rtti"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 29) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_save_optimization_record" in var_opt_char_k) {
++  k = var_opt_char_k["flag_save_optimization_record"]
++  j = var_opt_char_j["flag_save_optimization_record"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 30) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sched_critical_path_heuristic" in var_opt_char_k) {
++  k = var_opt_char_k["flag_sched_critical_path_heuristic"]
++  j = var_opt_char_j["flag_sched_critical_path_heuristic"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 31) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sched_dep_count_heuristic" in var_opt_char_k) {
++  k = var_opt_char_k["flag_sched_dep_count_heuristic"]
++  j = var_opt_char_j["flag_sched_dep_count_heuristic"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 32) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sched_group_heuristic" in var_opt_char_k) {
++  k = var_opt_char_k["flag_sched_group_heuristic"]
++  j = var_opt_char_j["flag_sched_group_heuristic"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 33) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_schedule_interblock" in var_opt_char_k) {
++  k = var_opt_char_k["flag_schedule_interblock"]
++  j = var_opt_char_j["flag_schedule_interblock"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 34) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sched_last_insn_heuristic" in var_opt_char_k) {
++  k = var_opt_char_k["flag_sched_last_insn_heuristic"]
++  j = var_opt_char_j["flag_sched_last_insn_heuristic"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 35) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sched_pressure" in var_opt_char_k) {
++  k = var_opt_char_k["flag_sched_pressure"]
++  j = var_opt_char_j["flag_sched_pressure"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 36) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sched_rank_heuristic" in var_opt_char_k) {
++  k = var_opt_char_k["flag_sched_rank_heuristic"]
++  j = var_opt_char_j["flag_sched_rank_heuristic"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 37) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_schedule_speculative" in var_opt_char_k) {
++  k = var_opt_char_k["flag_schedule_speculative"]
++  j = var_opt_char_j["flag_schedule_speculative"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 38) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sched_spec_insn_heuristic" in var_opt_char_k) {
++  k = var_opt_char_k["flag_sched_spec_insn_heuristic"]
++  j = var_opt_char_j["flag_sched_spec_insn_heuristic"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 39) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_schedule_speculative_load" in var_opt_char_k) {
++  k = var_opt_char_k["flag_schedule_speculative_load"]
++  j = var_opt_char_j["flag_schedule_speculative_load"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 40) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_schedule_speculative_load_dangerous" in var_opt_char_k) {
++  k = var_opt_char_k["flag_schedule_speculative_load_dangerous"]
++  j = var_opt_char_j["flag_schedule_speculative_load_dangerous"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 41) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sched2_use_superblocks" in var_opt_char_k) {
++  k = var_opt_char_k["flag_sched2_use_superblocks"]
++  j = var_opt_char_j["flag_sched2_use_superblocks"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 42) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_schedule_fusion" in var_opt_char_k) {
++  k = var_opt_char_k["flag_schedule_fusion"]
++  j = var_opt_char_j["flag_schedule_fusion"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 43) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_schedule_insns" in var_opt_char_k) {
++  k = var_opt_char_k["flag_schedule_insns"]
++  j = var_opt_char_j["flag_schedule_insns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 44) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_schedule_insns_after_reload" in var_opt_char_k) {
++  k = var_opt_char_k["flag_schedule_insns_after_reload"]
++  j = var_opt_char_j["flag_schedule_insns_after_reload"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 45) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_section_anchors" in var_opt_char_k) {
++  k = var_opt_char_k["flag_section_anchors"]
++  j = var_opt_char_j["flag_section_anchors"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 46) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sel_sched_pipelining" in var_opt_char_k) {
++  k = var_opt_char_k["flag_sel_sched_pipelining"]
++  j = var_opt_char_j["flag_sel_sched_pipelining"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 47) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sel_sched_pipelining_outer_loops" in var_opt_char_k) {
++  k = var_opt_char_k["flag_sel_sched_pipelining_outer_loops"]
++  j = var_opt_char_j["flag_sel_sched_pipelining_outer_loops"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 48) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_sel_sched_reschedule_pipelined" in var_opt_char_k) {
++  k = var_opt_char_k["flag_sel_sched_reschedule_pipelined"]
++  j = var_opt_char_j["flag_sel_sched_reschedule_pipelined"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 49) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_selective_scheduling" in var_opt_char_k) {
++  k = var_opt_char_k["flag_selective_scheduling"]
++  j = var_opt_char_j["flag_selective_scheduling"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 50) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_selective_scheduling2" in var_opt_char_k) {
++  k = var_opt_char_k["flag_selective_scheduling2"]
++  j = var_opt_char_j["flag_selective_scheduling2"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 51) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_semantic_interposition" in var_opt_char_k) {
++  k = var_opt_char_k["flag_semantic_interposition"]
++  j = var_opt_char_j["flag_semantic_interposition"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 52) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_short_enums" in var_opt_char_k) {
++  k = var_opt_char_k["flag_short_enums"]
++  j = var_opt_char_j["flag_short_enums"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 53) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_short_wchar" in var_opt_char_k) {
++  k = var_opt_char_k["flag_short_wchar"]
++  j = var_opt_char_j["flag_short_wchar"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 54) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_shrink_wrap" in var_opt_char_k) {
++  k = var_opt_char_k["flag_shrink_wrap"]
++  j = var_opt_char_j["flag_shrink_wrap"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 55) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_shrink_wrap_separate" in var_opt_char_k) {
++  k = var_opt_char_k["flag_shrink_wrap_separate"]
++  j = var_opt_char_j["flag_shrink_wrap_separate"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 56) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_signaling_nans" in var_opt_char_k) {
++  k = var_opt_char_k["flag_signaling_nans"]
++  j = var_opt_char_j["flag_signaling_nans"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 57) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_signed_zeros" in var_opt_char_k) {
++  k = var_opt_char_k["flag_signed_zeros"]
++  j = var_opt_char_j["flag_signed_zeros"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 58) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_simdmath" in var_opt_char_k) {
++  k = var_opt_char_k["flag_simdmath"]
++  j = var_opt_char_j["flag_simdmath"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 59) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_single_precision_constant" in var_opt_char_k) {
++  k = var_opt_char_k["flag_single_precision_constant"]
++  j = var_opt_char_j["flag_single_precision_constant"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 60) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_split_ivs_in_unroller" in var_opt_char_k) {
++  k = var_opt_char_k["flag_split_ivs_in_unroller"]
++  j = var_opt_char_j["flag_split_ivs_in_unroller"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 61) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_split_ldp_stp" in var_opt_char_k) {
++  k = var_opt_char_k["flag_split_ldp_stp"]
++  j = var_opt_char_j["flag_split_ldp_stp"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 62) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_split_loops" in var_opt_char_k) {
++  k = var_opt_char_k["flag_split_loops"]
++  j = var_opt_char_j["flag_split_loops"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[6] >> 63) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_split_paths" in var_opt_char_k) {
++  k = var_opt_char_k["flag_split_paths"]
++  j = var_opt_char_j["flag_split_paths"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 0) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_split_wide_types" in var_opt_char_k) {
++  k = var_opt_char_k["flag_split_wide_types"]
++  j = var_opt_char_j["flag_split_wide_types"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 1) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_split_wide_types_early" in var_opt_char_k) {
++  k = var_opt_char_k["flag_split_wide_types_early"]
++  j = var_opt_char_j["flag_split_wide_types_early"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 2) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ssa_backprop" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ssa_backprop"]
++  j = var_opt_char_j["flag_ssa_backprop"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 3) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_ssa_phiopt" in var_opt_char_k) {
++  k = var_opt_char_k["flag_ssa_phiopt"]
++  j = var_opt_char_j["flag_ssa_phiopt"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 4) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_stack_clash_protection" in var_opt_char_k) {
++  k = var_opt_char_k["flag_stack_clash_protection"]
++  j = var_opt_char_j["flag_stack_clash_protection"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 5) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_stack_protect" in var_opt_char_k) {
++  k = var_opt_char_k["flag_stack_protect"]
++  j = var_opt_char_j["flag_stack_protect"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 6) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_stdarg_opt" in var_opt_char_k) {
++  k = var_opt_char_k["flag_stdarg_opt"]
++  j = var_opt_char_j["flag_stdarg_opt"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 7) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_store_merging" in var_opt_char_k) {
++  k = var_opt_char_k["flag_store_merging"]
++  j = var_opt_char_j["flag_store_merging"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 8) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_strict_aliasing" in var_opt_char_k) {
++  k = var_opt_char_k["flag_strict_aliasing"]
++  j = var_opt_char_j["flag_strict_aliasing"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 9) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_strict_enums" in var_opt_char_k) {
++  k = var_opt_char_k["flag_strict_enums"]
++  j = var_opt_char_j["flag_strict_enums"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 10) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_strict_volatile_bitfields" in var_opt_char_k) {
++  k = var_opt_char_k["flag_strict_volatile_bitfields"]
++  j = var_opt_char_j["flag_strict_volatile_bitfields"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 11) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_thread_jumps" in var_opt_char_k) {
++  k = var_opt_char_k["flag_thread_jumps"]
++  j = var_opt_char_j["flag_thread_jumps"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 12) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_threadsafe_statics" in var_opt_char_k) {
++  k = var_opt_char_k["flag_threadsafe_statics"]
++  j = var_opt_char_j["flag_threadsafe_statics"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 13) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_toplevel_reorder" in var_opt_char_k) {
++  k = var_opt_char_k["flag_toplevel_reorder"]
++  j = var_opt_char_j["flag_toplevel_reorder"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 14) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tracer" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tracer"]
++  j = var_opt_char_j["flag_tracer"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 15) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_trapping_math" in var_opt_char_k) {
++  k = var_opt_char_k["flag_trapping_math"]
++  j = var_opt_char_j["flag_trapping_math"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 16) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_trapv" in var_opt_char_k) {
++  k = var_opt_char_k["flag_trapv"]
++  j = var_opt_char_j["flag_trapv"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 17) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_bit_ccp" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_bit_ccp"]
++  j = var_opt_char_j["flag_tree_bit_ccp"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 18) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_builtin_call_dce" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_builtin_call_dce"]
++  j = var_opt_char_j["flag_tree_builtin_call_dce"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 19) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_ccp" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_ccp"]
++  j = var_opt_char_j["flag_tree_ccp"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 20) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_ch" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_ch"]
++  j = var_opt_char_j["flag_tree_ch"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 21) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_coalesce_vars" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_coalesce_vars"]
++  j = var_opt_char_j["flag_tree_coalesce_vars"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 22) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_copy_prop" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_copy_prop"]
++  j = var_opt_char_j["flag_tree_copy_prop"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 23) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_cselim" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_cselim"]
++  j = var_opt_char_j["flag_tree_cselim"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 24) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_dce" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_dce"]
++  j = var_opt_char_j["flag_tree_dce"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 25) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_dom" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_dom"]
++  j = var_opt_char_j["flag_tree_dom"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 26) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_dse" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_dse"]
++  j = var_opt_char_j["flag_tree_dse"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 27) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_forwprop" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_forwprop"]
++  j = var_opt_char_j["flag_tree_forwprop"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 28) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_fre" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_fre"]
++  j = var_opt_char_j["flag_tree_fre"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 29) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_loop_distribute_patterns" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_loop_distribute_patterns"]
++  j = var_opt_char_j["flag_tree_loop_distribute_patterns"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 30) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_loop_distribution" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_loop_distribution"]
++  j = var_opt_char_j["flag_tree_loop_distribution"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 31) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_loop_if_convert" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_loop_if_convert"]
++  j = var_opt_char_j["flag_tree_loop_if_convert"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 32) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_loop_im" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_loop_im"]
++  j = var_opt_char_j["flag_tree_loop_im"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 33) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_loop_ivcanon" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_loop_ivcanon"]
++  j = var_opt_char_j["flag_tree_loop_ivcanon"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 34) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_loop_optimize" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_loop_optimize"]
++  j = var_opt_char_j["flag_tree_loop_optimize"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 35) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_loop_vectorize" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_loop_vectorize"]
++  j = var_opt_char_j["flag_tree_loop_vectorize"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 36) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_live_range_split" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_live_range_split"]
++  j = var_opt_char_j["flag_tree_live_range_split"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 37) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_partial_pre" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_partial_pre"]
++  j = var_opt_char_j["flag_tree_partial_pre"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 38) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_phiprop" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_phiprop"]
++  j = var_opt_char_j["flag_tree_phiprop"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 39) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_pre" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_pre"]
++  j = var_opt_char_j["flag_tree_pre"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 40) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_pta" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_pta"]
++  j = var_opt_char_j["flag_tree_pta"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 41) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_reassoc" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_reassoc"]
++  j = var_opt_char_j["flag_tree_reassoc"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 42) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_scev_cprop" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_scev_cprop"]
++  j = var_opt_char_j["flag_tree_scev_cprop"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 43) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_sink" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_sink"]
++  j = var_opt_char_j["flag_tree_sink"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 44) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_slp_transpose_vectorize" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_slp_transpose_vectorize"]
++  j = var_opt_char_j["flag_tree_slp_transpose_vectorize"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 45) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_slp_vectorize" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_slp_vectorize"]
++  j = var_opt_char_j["flag_tree_slp_vectorize"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 46) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_slsr" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_slsr"]
++  j = var_opt_char_j["flag_tree_slsr"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 47) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_sra" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_sra"]
++  j = var_opt_char_j["flag_tree_sra"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 48) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_switch_conversion" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_switch_conversion"]
++  j = var_opt_char_j["flag_tree_switch_conversion"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 49) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_tail_merge" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_tail_merge"]
++  j = var_opt_char_j["flag_tree_tail_merge"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 50) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_ter" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_ter"]
++  j = var_opt_char_j["flag_tree_ter"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 51) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_vectorize" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_vectorize"]
++  j = var_opt_char_j["flag_tree_vectorize"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 52) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_tree_vrp" in var_opt_char_k) {
++  k = var_opt_char_k["flag_tree_vrp"]
++  j = var_opt_char_j["flag_tree_vrp"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 53) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_unconstrained_commons" in var_opt_char_k) {
++  k = var_opt_char_k["flag_unconstrained_commons"]
++  j = var_opt_char_j["flag_unconstrained_commons"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 54) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_unroll_all_loops" in var_opt_char_k) {
++  k = var_opt_char_k["flag_unroll_all_loops"]
++  j = var_opt_char_j["flag_unroll_all_loops"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 55) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_cunroll_grow_size" in var_opt_char_k) {
++  k = var_opt_char_k["flag_cunroll_grow_size"]
++  j = var_opt_char_j["flag_cunroll_grow_size"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 56) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_unroll_loops" in var_opt_char_k) {
++  k = var_opt_char_k["flag_unroll_loops"]
++  j = var_opt_char_j["flag_unroll_loops"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 57) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_unsafe_math_optimizations" in var_opt_char_k) {
++  k = var_opt_char_k["flag_unsafe_math_optimizations"]
++  j = var_opt_char_j["flag_unsafe_math_optimizations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 58) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_unswitch_loops" in var_opt_char_k) {
++  k = var_opt_char_k["flag_unswitch_loops"]
++  j = var_opt_char_j["flag_unswitch_loops"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 59) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_unwind_tables" in var_opt_char_k) {
++  k = var_opt_char_k["flag_unwind_tables"]
++  j = var_opt_char_j["flag_unwind_tables"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 60) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_var_tracking" in var_opt_char_k) {
++  k = var_opt_char_k["flag_var_tracking"]
++  j = var_opt_char_j["flag_var_tracking"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 61) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_var_tracking_assignments" in var_opt_char_k) {
++  k = var_opt_char_k["flag_var_tracking_assignments"]
++  j = var_opt_char_j["flag_var_tracking_assignments"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 62) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_var_tracking_assignments_toggle" in var_opt_char_k) {
++  k = var_opt_char_k["flag_var_tracking_assignments_toggle"]
++  j = var_opt_char_j["flag_var_tracking_assignments_toggle"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[7] >> 63) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_var_tracking_uninit" in var_opt_char_k) {
++  k = var_opt_char_k["flag_var_tracking_uninit"]
++  j = var_opt_char_j["flag_var_tracking_uninit"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 0) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_variable_expansion_in_unroller" in var_opt_char_k) {
++  k = var_opt_char_k["flag_variable_expansion_in_unroller"]
++  j = var_opt_char_j["flag_variable_expansion_in_unroller"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 1) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_version_loops_for_strides" in var_opt_char_k) {
++  k = var_opt_char_k["flag_version_loops_for_strides"]
++  j = var_opt_char_j["flag_version_loops_for_strides"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 2) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_value_profile_transformations" in var_opt_char_k) {
++  k = var_opt_char_k["flag_value_profile_transformations"]
++  j = var_opt_char_j["flag_value_profile_transformations"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 3) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_web" in var_opt_char_k) {
++  k = var_opt_char_k["flag_web"]
++  j = var_opt_char_j["flag_web"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 4) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_wrapv" in var_opt_char_k) {
++  k = var_opt_char_k["flag_wrapv"]
++  j = var_opt_char_j["flag_wrapv"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 5) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_wrapv_pointer" in var_opt_char_k) {
++  k = var_opt_char_k["flag_wrapv_pointer"]
++  j = var_opt_char_j["flag_wrapv_pointer"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 6) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("debug_nonbind_markers_p" in var_opt_char_k) {
++  k = var_opt_char_k["debug_nonbind_markers_p"]
++  j = var_opt_char_j["debug_nonbind_markers_p"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 7) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_cmlt_arith" in var_opt_char_k) {
++  k = var_opt_char_k["flag_cmlt_arith"]
++  j = var_opt_char_j["flag_cmlt_arith"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 8) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_mlow_precision_div" in var_opt_char_k) {
++  k = var_opt_char_k["flag_mlow_precision_div"]
++  j = var_opt_char_j["flag_mlow_precision_div"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 9) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_mrecip_low_precision_sqrt" in var_opt_char_k) {
++  k = var_opt_char_k["flag_mrecip_low_precision_sqrt"]
++  j = var_opt_char_j["flag_mrecip_low_precision_sqrt"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 10) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_mlow_precision_sqrt" in var_opt_char_k) {
++  k = var_opt_char_k["flag_mlow_precision_sqrt"]
++  j = var_opt_char_j["flag_mlow_precision_sqrt"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 11) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_simdmath_64" in var_opt_char_k) {
++  k = var_opt_char_k["flag_simdmath_64"]
++  j = var_opt_char_j["flag_simdmath_64"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 12) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("str_align_functions" in var_opt_string_k) {
++  k = var_opt_string_k["str_align_functions"]
++  j = var_opt_string_j["str_align_functions"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 13) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("str_align_jumps" in var_opt_string_k) {
++  k = var_opt_string_k["str_align_jumps"]
++  j = var_opt_string_j["str_align_jumps"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 14) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("str_align_labels" in var_opt_string_k) {
++  k = var_opt_string_k["str_align_labels"]
++  j = var_opt_string_j["str_align_labels"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 15) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("str_align_loops" in var_opt_string_k) {
++  k = var_opt_string_k["str_align_loops"]
++  j = var_opt_string_j["str_align_loops"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 16) & HOST_WIDE_INT_1U) << "j";"
++}
++if ("flag_patchable_function_entry" in var_opt_string_k) {
++  k = var_opt_string_k["flag_patchable_function_entry"]
++  j = var_opt_string_j["flag_patchable_function_entry"]
++  print "  ptr->explicit_mask[" k "] |= ((explicit_mask_prev[8] >> 17) & HOST_WIDE_INT_1U) << "j";"
++}
++print "}";
++print "";
++
+ print "/* Free heap memory used by optimization options  */";
+ print "void";
+ print "cl_optimization_option_free (struct cl_optimization *ptr ATTRIBUTE_UNUSED)";
+-- 
+2.25.1
+
diff --git a/0304-Add-multi-version-lto-symbol-parse-cross-lto-units-i.patch b/0304-Add-multi-version-lto-symbol-parse-cross-lto-units-i.patch
new file mode 100644
index 0000000000000000000000000000000000000000..cdd28b19d8973b19bc8d8c9fa88df194bd4ab729
--- /dev/null
+++ b/0304-Add-multi-version-lto-symbol-parse-cross-lto-units-i.patch
@@ -0,0 +1,963 @@
+From f81a5b294711e3a420fe66702f0d9221332271c4 Mon Sep 17 00:00:00 2001
+From: h00564365 
+Date: Wed, 13 Nov 2024 17:18:01 +0800
+Subject: [PATCH 2/2] Add multi-version lto symbol parse, cross lto units
+ ipa-inline extension, and lto compression algorithm specified.
+
+---
+ gcc/common.opt                               |  20 +++
+ gcc/config/aarch64/aarch64.cc                |  41 ++++++
+ gcc/doc/tm.texi                              |   6 +
+ gcc/doc/tm.texi.in                           |   2 +
+ gcc/ipa-inline.cc                            | 141 ++++++++++++++++++-
+ gcc/lto-compress.cc                          |   6 +-
+ gcc/lto-section-in.cc                        |   5 +
+ gcc/lto-streamer-out.cc                      |   7 +-
+ gcc/lto-wrapper.cc                           |   4 +
+ gcc/optc-save-gen.awk                        |  57 ++++++++
+ gcc/opth-gen.awk                             |   3 +
+ gcc/opts.cc                                  |  46 ++++++
+ gcc/target.def                               |  10 ++
+ gcc/testsuite/gcc.dg/lto/binary-inline-1_0.c |  15 ++
+ gcc/testsuite/gcc.dg/lto/binary-inline-1_1.c |   6 +
+ gcc/testsuite/gcc.dg/lto/binary-inline-2_0.c |  15 ++
+ gcc/testsuite/gcc.dg/lto/binary-inline-2_1.c |   5 +
+ gcc/testsuite/gcc.dg/lto/binary-inline-3_0.c |  15 ++
+ gcc/testsuite/gcc.dg/lto/binary-inline-3_1.c |  10 ++
+ gcc/tree-streamer-in.cc                      |  58 +++++++-
+ lto-plugin/lto-plugin.c                      |  83 +++++++++++
+ 21 files changed, 547 insertions(+), 8 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/lto/binary-inline-1_0.c
+ create mode 100644 gcc/testsuite/gcc.dg/lto/binary-inline-1_1.c
+ create mode 100644 gcc/testsuite/gcc.dg/lto/binary-inline-2_0.c
+ create mode 100644 gcc/testsuite/gcc.dg/lto/binary-inline-2_1.c
+ create mode 100644 gcc/testsuite/gcc.dg/lto/binary-inline-3_0.c
+ create mode 100644 gcc/testsuite/gcc.dg/lto/binary-inline-3_1.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index be5fcc681..78cfc333a 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1928,6 +1928,21 @@ finline-atomics
+ Common Var(flag_inline_atomics) Init(1) Optimization
+ Inline __atomic operations when a lock free instruction sequence is available.
+ 
++fmulti-version-lib=
++Common Joined Var(multi_version_lib_string)
++Use specify LTO stream in mode for specified target (object or lib). If there
++are multiple target files, use commas (,) to separate them and without spaces.
++
++finline-force
++Common Var(flag_inline_force) Init(0) Optimization
++Force perform ipa inline when march options are incompatible between functions.
++
++finline-force=
++Common Joined Var(force_inline_targets_string)
++Force perform ipa inline specified target(object or lib) when march options are
++incompatible between functions.  If there are multiple target files, use commas
++(,) to separate them and without spaces.
++
+ fcf-protection
+ Common RejectNegative Alias(fcf-protection=,full)
+ 
+@@ -2168,6 +2183,11 @@ flto-partition=
+ Common Joined RejectNegative Enum(lto_partition_model) Var(flag_lto_partition) Init(LTO_PARTITION_BALANCED)
+ Specify the algorithm to partition symbols and vars at linktime.
+ 
++flto-compression-algorithm=
++Common Joined Var(lto_compression_algorithm)
++-flto-compression-algorithm= Generate lto compression in zlib/zstd
++format .
++
+ ; The initial value of -1 comes from Z_DEFAULT_COMPRESSION in zlib.h.
+ flto-compression-level=
+ Common Joined RejectNegative UInteger Var(flag_lto_compression_level) Init(-1) IntegerRange(0, 19)
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 025a3c478..f095f17aa 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -20829,6 +20829,44 @@ aarch64_option_print (FILE *file, int indent, struct cl_target_option *ptr)
+ 	   arch->name, extension.c_str ());
+ }
+ 
++/* Implement TARGET_OPTION_PRINT_DIFF.  */
++
++static void
++aarch64_option_print_diff (FILE *file, int indent,
++			   struct cl_target_option *ptr1,
++			   struct cl_target_option *ptr2)
++{
++  const char *const cpu1
++    = aarch64_get_tune_cpu (ptr1->x_selected_tune)->name;
++  const struct processor *arch1 = aarch64_get_arch (ptr1->x_selected_arch);
++  std::string extension1
++    = aarch64_get_extension_string_for_isa_flags (ptr1->x_aarch64_isa_flags,
++						  arch1->flags);
++
++  const char *const cpu2
++    = aarch64_get_tune_cpu (ptr2->x_selected_tune)->name;
++  const struct processor *arch2 = aarch64_get_arch (ptr2->x_selected_arch);
++  std::string extension2
++    = aarch64_get_extension_string_for_isa_flags (ptr2->x_aarch64_isa_flags,
++						  arch2->flags);
++
++  if (cpu1 != cpu2 && (!cpu1 || !cpu2 || strcmp (cpu1, cpu2)))
++    fprintf (file, "%*s%s (%s/%s)\n", indent, "",
++	     "cpu", cpu1 ? cpu1 : "(null)", cpu2 ? cpu2 : "(null)");
++
++  if (arch1->name != arch2->name
++      && (!arch1->name || !arch2->name || strcmp (arch1->name, arch2->name)))
++    fprintf (file, "%*s%s (%s/%s)\n", indent, "",
++	     "arch", arch1->name ? arch1->name : "(null)",
++	     arch2->name ? arch2->name : "(null)");
++
++  if (extension1 != extension2)
++    fprintf (file, "%*s%s (%s/%s)\n", indent, "",
++	     "extension",
++	     extension1.empty () ? "(null)" : extension1.c_str (),
++	     extension2.empty () ? "(null)" : extension2.c_str ());
++}
++
+ static GTY(()) tree aarch64_previous_fndecl;
+ 
+ void
+@@ -31161,6 +31199,9 @@ aarch64_libgcc_floating_mode_supported_p
+ #undef TARGET_OPTION_PRINT
+ #define TARGET_OPTION_PRINT aarch64_option_print
+ 
++#undef TARGET_OPTION_PRINT_DIFF
++#define TARGET_OPTION_PRINT_DIFF aarch64_option_print_diff
++
+ #undef TARGET_OPTION_VALID_ATTRIBUTE_P
+ #define TARGET_OPTION_VALID_ATTRIBUTE_P aarch64_option_valid_attribute_p
+ 
+diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
+index 1e96521e6..50bbbbc42 100644
+--- a/gcc/doc/tm.texi
++++ b/gcc/doc/tm.texi
+@@ -10589,6 +10589,12 @@ information in the @code{struct cl_target_option} structure for
+ function-specific options.
+ @end deftypefn
+ 
++@deftypefn {Target Hook} void TARGET_OPTION_PRINT_DIFF (FILE *@var{file}, int @var{indent}, struct cl_target_option *@var{ptr1}, struct cl_target_option *@var{ptr2})
++This hook is called to print diff additional target-specific
++information in the ptr1 and ptr2 @code{struct cl_target_option} structure for
++function-specific options.
++@end deftypefn
++
+ @deftypefn {Target Hook} bool TARGET_OPTION_PRAGMA_PARSE (tree @var{args}, tree @var{pop_target})
+ This target hook parses the options for @code{#pragma GCC target}, which
+ sets the target-specific options for functions that occur later in the
+diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
+index 2dd515659..cfda60304 100644
+--- a/gcc/doc/tm.texi.in
++++ b/gcc/doc/tm.texi.in
+@@ -6985,6 +6985,8 @@ on this implementation detail.
+ 
+ @hook TARGET_OPTION_PRINT
+ 
++@hook TARGET_OPTION_PRINT_DIFF
++
+ @hook TARGET_OPTION_PRAGMA_PARSE
+ 
+ @hook TARGET_OPTION_OVERRIDE
+diff --git a/gcc/ipa-inline.cc b/gcc/ipa-inline.cc
+index f8bb072c4..8d5cc9a84 100644
+--- a/gcc/ipa-inline.cc
++++ b/gcc/ipa-inline.cc
+@@ -90,6 +90,8 @@ along with GCC; see the file COPYING3.  If not see
+ 	 the need for offline copy of the function.  */
+ 
+ #include "config.h"
++#define INCLUDE_SET
++#define INCLUDE_STRING
+ #include "system.h"
+ #include "coretypes.h"
+ #include "backend.h"
+@@ -127,6 +129,7 @@ typedef fibonacci_node  edge_heap_node_t;
+ static int overall_size;
+ static profile_count max_count;
+ static profile_count spec_rem;
++static std::set force_inline_targets;
+ 
+ /* Return false when inlining edge E would lead to violating
+    limits on function unit growth or stack usage growth.  
+@@ -222,6 +225,38 @@ caller_growth_limits (struct cgraph_edge *e)
+   return true;
+ }
+ 
++/* Warn and prompt the user, and output only once for the file pair where
++   the function is located.  */
++
++static void
++prompt_inline_failed_target_option_reason (struct cgraph_edge *e)
++{
++  static std::set> address_pair_set;
++  if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH
++      && !cl_target_option_eq_major (target_opts_for_fn (e->caller->decl),
++	   target_opts_for_fn (e->callee->ultimate_alias_target ()->decl))
++      && e->caller->lto_file_data
++      && e->callee->ultimate_alias_target ()->lto_file_data)
++    {
++      std::pair addr_pair
++	= std::make_pair (&e->caller->lto_file_data,
++			  &e->callee->ultimate_alias_target ()->lto_file_data);
++      if (address_pair_set.find (addr_pair) != address_pair_set.end ())
++	return;
++
++      address_pair_set.insert (addr_pair);
++      warning (0, "LTO objects caller in: %s, callee in: %s, not inlinable: %s."
++	       " Try to use -finline-force=callee_object_or_lib_name to force "
++	       "inline", e->caller->lto_file_data->file_name,
++	       e->callee->ultimate_alias_target ()->lto_file_data->file_name,
++	       cgraph_inline_failed_string (CIF_TARGET_OPTION_MISMATCH));
++
++      cl_target_option_print_diff
++	(stderr, 2, target_opts_for_fn (e->caller->decl),
++	target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
++    }
++}
++
+ /* Dump info about why inlining has failed.  */
+ 
+ static void
+@@ -254,6 +289,8 @@ report_inline_failed_reason (struct cgraph_edge *e)
+ 	    (dump_file, 2, opts_for_fn (e->caller->decl),
+ 	     opts_for_fn (e->callee->ultimate_alias_target ()->decl));
+     }
++
++  prompt_inline_failed_target_option_reason (e);
+ }
+ 
+  /* Decide whether sanitizer-related attributes allow inlining. */
+@@ -310,6 +347,77 @@ sanitize_attrs_match_for_inline_p (const_tree caller, const_tree callee)
+       (opts_for_fn (caller->decl)->x_##flag		\
+        != opts_for_fn (callee->decl)->x_##flag)
+ 
++/* find related node that has lto_file_data.  */
++
++static cgraph_node *
++find_related_node_lto_file_data (cgraph_node *node)
++{
++  cgraph_node *cur = node;
++
++  while (cur->clone_of)
++    {
++      /* Switch to original node, for example xxx.constprop.x function.  */
++      cur = cur->clone_of;
++      if (cur->lto_file_data)
++	return cur;
++
++      /* Find the lto_file_data information of referring.  */
++      struct ipa_ref *ref = NULL;
++      for (int i = 0; cur->iterate_referring (i, ref); i++)
++	{
++	  struct cgraph_node *cnode = dyn_cast  (ref->referring);
++	  if (cnode && cnode->lto_file_data)
++	    return cnode;
++	}
++    }
++
++  return NULL;
++}
++
++/* Determines whether to force inline or force inline only the specified
++   object.  Use for 3 inline extensions:
++   1) CIF_TARGET_OPTION_MISMATCH: cancel the restriction that the target options
++      of different compilation units are different.
++   2) CIF_OVERWRITABLE: indicates that the function is available, which is
++      similar to the "inline" keyword indication.
++   3) CIF_OPTIMIZATION_MISMATCH: cancel the check in the case of fp_expressions,
++      which is similar to the "always_inline" attribute.
++   */
++
++static bool
++can_force_inline_p (cgraph_node *callee)
++{
++  if (!in_lto_p)
++    return false;
++  if (flag_inline_force)
++    return true;
++  if (force_inline_targets_string)
++    {
++      cgraph_node * node = callee;
++      std::string name = "";
++      if (callee->ultimate_alias_target () == NULL
++	  || callee->ultimate_alias_target ()->lto_file_data == NULL)
++	{
++	  node = find_related_node_lto_file_data (callee);
++	  if (node && node->lto_file_data)
++	    name = node->lto_file_data->file_name;
++	}
++      else
++	name = node->ultimate_alias_target ()->lto_file_data->file_name;
++      while (!name.empty () && name.back () == '/')
++	name.erase (name.length () - 1);
++      if (name.empty ())
++	return false;
++      size_t last_slash_pos = name.find_last_of ('/');
++      if (last_slash_pos != std::string::npos
++	  && last_slash_pos != name.length () - 1)
++	name = name.substr (last_slash_pos + 1);
++      if (force_inline_targets.find (name) != force_inline_targets.end ())
++	return true;
++    }
++  return false;
++}
++
+ /* Decide if we can inline the edge and possibly update
+    inline_failed reason.  
+    We check whether inlining is possible at all and whether
+@@ -352,7 +460,7 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
+       e->inline_failed = CIF_USES_COMDAT_LOCAL;
+       inlinable = false;
+     }
+-  else if (avail <= AVAIL_INTERPOSABLE)
++  else if (avail <= AVAIL_INTERPOSABLE && !can_force_inline_p (callee))
+     {
+       e->inline_failed = CIF_OVERWRITABLE;
+       inlinable = false;
+@@ -378,8 +486,8 @@ can_inline_edge_p (struct cgraph_edge *e, bool report,
+       inlinable = false;
+     }
+   /* Check compatibility of target optimization options.  */
+-  else if (!targetm.target_option.can_inline_p (caller->decl,
+-						callee->decl))
++  else if (!can_force_inline_p (callee)
++	   && !targetm.target_option.can_inline_p (caller->decl, callee->decl))
+     {
+       e->inline_failed = CIF_TARGET_OPTION_MISMATCH;
+       inlinable = false;
+@@ -495,7 +603,8 @@ can_inline_edge_by_limits_p (struct cgraph_edge *e, bool report,
+       bool always_inline =
+ 	     (DECL_DISREGARD_INLINE_LIMITS (callee->decl)
+ 	      && lookup_attribute ("always_inline",
+-				   DECL_ATTRIBUTES (callee->decl)));
++				   DECL_ATTRIBUTES (callee->decl)))
++	     || can_force_inline_p (callee);
+       ipa_fn_summary *caller_info = ipa_fn_summaries->get (caller);
+       ipa_fn_summary *callee_info = ipa_fn_summaries->get (callee);
+ 
+@@ -2652,6 +2761,27 @@ flatten_remove_node_hook (struct cgraph_node *node, void *data)
+   removed->add (node);
+ }
+ 
++/* Parse string that specify forced inlining, separated by commas.  */
++
++static void
++parse_force_inline_targets_string (const char* s)
++{
++  std::string target_string (s);
++  std::string delim = ",";
++  size_t start = 0;
++  size_t end = target_string.find (delim);
++  if (target_string.substr (start, end - start) == "")
++    return;
++
++  while (end != std::string::npos)
++    {
++      force_inline_targets.insert (target_string.substr (start, end - start));
++      start = end + delim.size ();
++      end = target_string.find (delim, start);
++    }
++  force_inline_targets.insert (target_string.substr (start, end - start));
++}
++
+ /* Decide on the inlining.  We do so in the topological order to avoid
+    expenses on updating data structures.  */
+ 
+@@ -2665,6 +2795,9 @@ ipa_inline (void)
+   int cold;
+   bool remove_functions = false;
+ 
++  if (force_inline_targets_string)
++    parse_force_inline_targets_string (force_inline_targets_string);
++
+   order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
+ 
+   if (dump_file)
+diff --git a/gcc/lto-compress.cc b/gcc/lto-compress.cc
+index 27f0992a8..f9d0722a9 100644
+--- a/gcc/lto-compress.cc
++++ b/gcc/lto-compress.cc
+@@ -305,7 +305,11 @@ void
+ lto_end_compression (struct lto_compression_stream *stream)
+ {
+ #ifdef HAVE_ZSTD_H
+-  lto_compression_zstd (stream);
++  if (lto_compression_algorithm
++      && strcmp (lto_compression_algorithm, "zstd") == 0)
++    lto_compression_zstd (stream);
++  else
++    lto_compression_zlib (stream);
+ #else
+   lto_compression_zlib (stream);
+ #endif
+diff --git a/gcc/lto-section-in.cc b/gcc/lto-section-in.cc
+index ba87c7276..947f8eb15 100644
+--- a/gcc/lto-section-in.cc
++++ b/gcc/lto-section-in.cc
+@@ -448,6 +448,11 @@ lto_free_function_in_decl_state_for_node (symtab_node *node)
+       lto_free_function_in_decl_state (*slot);
+       node->lto_file_data->function_decl_states->clear_slot (slot);
+     }
++
++  /* In force inline case, keep lto file path information.  */
++  if (in_lto_p && (flag_inline_force || force_inline_targets_string))
++    return;
++
+   node->lto_file_data = NULL;
+ }
+ 
+diff --git a/gcc/lto-streamer-out.cc b/gcc/lto-streamer-out.cc
+index 471f35c31..a574f0f1e 100644
+--- a/gcc/lto-streamer-out.cc
++++ b/gcc/lto-streamer-out.cc
+@@ -2666,7 +2666,12 @@ produce_lto_section ()
+   free (section_name);
+ 
+ #ifdef HAVE_ZSTD_H
+-  lto_compression compression = ZSTD;
++  lto_compression compression = ZLIB;
++  if (lto_compression_algorithm
++      && strcmp (lto_compression_algorithm, "zstd") == 0)
++    compression = ZSTD;
++  else
++    compression = ZLIB;
+ #else
+   lto_compression compression = ZLIB;
+ #endif
+diff --git a/gcc/lto-wrapper.cc b/gcc/lto-wrapper.cc
+index 155ccce57..2b1994652 100644
+--- a/gcc/lto-wrapper.cc
++++ b/gcc/lto-wrapper.cc
+@@ -491,6 +491,8 @@ merge_and_complain (vec &decoded_options,
+ 	|| decoded_options[j].opt_index == OPT_fpic)
+       {
+ 	/* -fno-pic in one unit implies -fno-pic everywhere.  */
++	/* The -fno-pic adjustment here should provide some information hints,
++	   but may affect the use case test of deja.  */
+ 	if (decoded_options[j].value == 0)
+ 	  j++;
+ 	/* If we have no pic option or merge in -fno-pic, we still may turn
+@@ -534,6 +536,8 @@ merge_and_complain (vec &decoded_options,
+ 	    || decoded_options[j].opt_index == OPT_fpie)
+       {
+ 	/* -fno-pie in one unit implies -fno-pie everywhere.  */
++	/* The -fno-pie adjustment here should provide some information hints,
++	   but may affect the use case test of deja.  */
+ 	if (decoded_options[j].value == 0)
+ 	  j++;
+ 	/* If we have no pie option or merge in -fno-pie, we still preserve
+diff --git a/gcc/optc-save-gen.awk b/gcc/optc-save-gen.awk
+index 7c012dd4e..94b85b331 100644
+--- a/gcc/optc-save-gen.awk
++++ b/gcc/optc-save-gen.awk
+@@ -1043,6 +1043,10 @@ for (i = 0; i < n_target_string; i++) {
+ 	print "";
+ }
+ 
++print "";
++print "  if (targetm.target_option.print_diff)";
++print "    targetm.target_option.print_diff (file, indent, ptr1, ptr2);";
++
+ print "}";
+ 
+ print "";
+@@ -1160,6 +1164,59 @@ print "  return true;";
+ 
+ print "}";
+ 
++print "";
++print "/* Compare two target major options.  */";
++print "bool";
++print "cl_target_option_eq_major (struct cl_target_option const *ptr1 ATTRIBUTE_UNUSED,";
++print "                     struct cl_target_option const *ptr2 ATTRIBUTE_UNUSED)";
++print "{";
++n_target_val_major = 0;
++
++for (i = 0; i < n_target_save; i++) {
++	var = target_save_decl[i];
++	sub (" *=.*", "", var);
++	name = var;
++	type = var;
++	sub("^.*[ *]", "", name)
++	sub(" *" name "$", "", type)
++        if (target_save_decl[i] ~ "^const char \\*+[_" alnum "]+$")
++		continue;
++        if (target_save_decl[i] ~ " .*\\[.+\\]+$")
++                continue;
++
++        var_target_val_major[n_target_val_major++] = name;
++}
++if (have_save) {
++	for (i = 0; i < n_opts; i++) {
++		if (flag_set_p("Save", flags[i])) {
++			name = var_name(flags[i])
++			if(name == "")
++				name = "target_flags";
++
++			if(name in var_list_seen)
++				continue;
++
++			var_list_seen[name]++;
++			otype = var_type_struct(flags[i])
++			if (otype ~ "^const char \\**$")
++				continue;
++			var_target_val_major[n_target_val_major++] = "x_" name;
++		}
++	}
++} else {
++	var_target_val_major[n_target_val_major++] = "x_target_flags";
++}
++
++for (i = 0; i < n_target_val_major; i++) {
++	name = var_target_val_major[i]
++	print "  if (ptr1->" name" != ptr2->" name ")";
++	print "    return false;";
++}
++
++print "  return true;";
++
++print "}";
++
+ print "";
+ print "/* Hash target options  */";
+ print "hashval_t";
+diff --git a/gcc/opth-gen.awk b/gcc/opth-gen.awk
+index 8bba8ec45..cb016e85d 100644
+--- a/gcc/opth-gen.awk
++++ b/gcc/opth-gen.awk
+@@ -330,6 +330,9 @@ print "";
+ print "/* Compare two target option variables from a structure.  */";
+ print "extern bool cl_target_option_eq (const struct cl_target_option *, const struct cl_target_option *);";
+ print "";
++print "/* Compare two target major option variables from a structure.  */";
++print "extern bool cl_target_option_eq_major (const struct cl_target_option *, const struct cl_target_option *);";
++print "";
+ print "/* Free heap memory used by target option variables.  */";
+ print "extern void cl_target_option_free (struct cl_target_option *);";
+ print "";
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index d97f6079f..d9de8747c 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -2611,6 +2611,32 @@ print_help (struct gcc_options *opts, unsigned int lang_mask,
+ 			 lang_mask);
+ }
+ 
++/* Checks whether the input forced inline string complies with the
++   restriction.  */
++
++void
++check_force_inline_targets_string (const char *arg, location_t loc)
++{
++  const int MAX_FORCE_INLINE_TARGET_LEN = 10000;
++  const int MAX_NUM_TARGET = 100;
++  __SIZE_TYPE__ length = strlen (arg);
++  int target_num = 1;
++  if (length > MAX_FORCE_INLINE_TARGET_LEN)
++    error_at (loc,
++	      "input string exceeds %d characters to %<-finline_force=%> "
++	      "option: %qs", MAX_FORCE_INLINE_TARGET_LEN, arg);
++  for (__SIZE_TYPE__ i = 0; i < length; i++)
++    {
++      if (arg[i] == ',')
++	{
++	  target_num++;
++	  if (target_num > MAX_NUM_TARGET)
++	    error_at (loc, "input target exceeds %d to %<-finline_force=%> "
++		      "option: %qs", MAX_NUM_TARGET, arg);
++	}
++    }
++}
++
+ /* Handle target- and language-independent options.  Return zero to
+    generate an "unknown option" message.  Only options that need
+    extra handling need to be listed here; if you simply want
+@@ -2952,6 +2978,14 @@ common_handle_option (struct gcc_options *opts,
+ 			   value / 2);
+       break;
+ 
++    case OPT_finline_force:
++      opts->x_force_inline_targets_string = value ? "" : NULL;
++      break;
++
++    case OPT_finline_force_:
++      check_force_inline_targets_string (arg, loc);
++      break;
++
+     case OPT_finstrument_functions_exclude_function_list_:
+       add_comma_separated_to_vector
+ 	(&opts->x_flag_instrument_functions_exclude_functions, arg);
+@@ -3226,6 +3260,18 @@ common_handle_option (struct gcc_options *opts,
+ 		  "unrecognized argument to %<-flto=%> option: %qs", arg);
+       break;
+ 
++    case OPT_flto_compression_algorithm_:
++      if (atoi (arg) == 0
++	  && strcmp (arg, "zlib") != 0
++#ifdef HAVE_ZSTD_H
++	  && strcmp (arg, "zstd") != 0
++#endif
++	)
++	error_at (loc,
++		  "unrecognized argument to %<-flto-compression-algorithm=%> "
++		  "option: %qs", arg);
++      break;
++
+     case OPT_w:
+       dc->dc_inhibit_warnings = true;
+       break;
+diff --git a/gcc/target.def b/gcc/target.def
+index 7183f363d..142858fa3 100644
+--- a/gcc/target.def
++++ b/gcc/target.def
+@@ -6644,6 +6644,16 @@ information in the @code{struct cl_target_option} structure for\n\
+ function-specific options.",
+  void, (FILE *file, int indent, struct cl_target_option *ptr), NULL)
+ 
++/* Function to print any extra target state from the target options
++   structure.  */
++DEFHOOK
++(print_diff,
++ "This hook is called to print diff additional target-specific\n\
++information in the ptr1 and ptr2 @code{struct cl_target_option} structure for\n\
++function-specific options.",
++ void, (FILE *file, int indent, struct cl_target_option *ptr1,
++ struct cl_target_option *ptr2), NULL)
++
+ /* Function to parse arguments to be validated for #pragma target, and to
+    change the state if the options are valid.  If the first argument is
+    NULL, the second argument specifies the default options to use.  Return
+diff --git a/gcc/testsuite/gcc.dg/lto/binary-inline-1_0.c b/gcc/testsuite/gcc.dg/lto/binary-inline-1_0.c
+new file mode 100644
+index 000000000..0b5cd5953
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/lto/binary-inline-1_0.c
+@@ -0,0 +1,15 @@
++/* { dg-lto-do link }  */
++/* { dg-require-effective-target shared } */
++/* { dg-extra-ld-options {-shared -finline-force=c_lto_binary-inline-1_1.o} } */
++/* { dg-lto-options {{-O3 -flto -march=armv8.2-a -fdump-ipa-inline-details}} }  */ 
++
++extern double multi_op(float x);
++
++double func_a (float x)
++{
++  double res = 0;
++  res = multi_op (x);
++  return res;
++}
++
++/* { dg-final { scan-wpa-ipa-dump "Inlined 1 calls"  "inline"  } } */
+diff --git a/gcc/testsuite/gcc.dg/lto/binary-inline-1_1.c b/gcc/testsuite/gcc.dg/lto/binary-inline-1_1.c
+new file mode 100644
+index 000000000..8181384b7
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/lto/binary-inline-1_1.c
+@@ -0,0 +1,6 @@
++/* { dg-options "-march=armv8.3-a+sve+f64mm+crc+crypto+fp16+i8mm+simd" } */
++
++double multi_op (float x)
++{
++    return x * 2 + 10;
++}
+diff --git a/gcc/testsuite/gcc.dg/lto/binary-inline-2_0.c b/gcc/testsuite/gcc.dg/lto/binary-inline-2_0.c
+new file mode 100644
+index 000000000..e873937d3
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/lto/binary-inline-2_0.c
+@@ -0,0 +1,15 @@
++/* { dg-lto-do link }  */
++/* { dg-require-effective-target shared } */
++/* { dg-extra-ld-options {-shared -finline-force=c_lto_binary-inline-2_1.o} } */
++/* { dg-lto-options {{-O3 -flto -fPIC -fdump-ipa-inline-details}} }  */ 
++
++extern double multi_op(float x);
++
++double func_a (float x)
++{
++  double res = 0;
++  res = multi_op (x);
++  return res;
++}
++
++/* { dg-final { scan-wpa-ipa-dump "Inlined 1 calls"  "inline"  } } */
+diff --git a/gcc/testsuite/gcc.dg/lto/binary-inline-2_1.c b/gcc/testsuite/gcc.dg/lto/binary-inline-2_1.c
+new file mode 100644
+index 000000000..dc7c4fd9f
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/lto/binary-inline-2_1.c
+@@ -0,0 +1,5 @@
++
++double multi_op (float x)
++{
++    return x * 2 + 10;
++}
+diff --git a/gcc/testsuite/gcc.dg/lto/binary-inline-3_0.c b/gcc/testsuite/gcc.dg/lto/binary-inline-3_0.c
+new file mode 100644
+index 000000000..c78ba066d
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/lto/binary-inline-3_0.c
+@@ -0,0 +1,15 @@
++/* { dg-lto-do link }  */
++/* { dg-require-effective-target shared } */
++/* { dg-extra-ld-options {-shared -finline-force=c_lto_binary-inline-3_1.o} } */
++/* { dg-lto-options {{-O3 -flto -fdump-ipa-inline-details}} }  */ 
++
++extern double multi_op(double x);
++
++double func_a (double x)
++{
++  double res = 0;
++  res = multi_op (x);
++  return res;
++}
++
++/* { dg-final { scan-wpa-ipa-dump "Inlined 1 calls"  "inline"  } } */
+diff --git a/gcc/testsuite/gcc.dg/lto/binary-inline-3_1.c b/gcc/testsuite/gcc.dg/lto/binary-inline-3_1.c
+new file mode 100644
+index 000000000..8b505fa0c
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/lto/binary-inline-3_1.c
+@@ -0,0 +1,10 @@
++/* { dg-options "-O2 -fno-math-errno" }  */
++
++#include 
++
++double multi_op (double x)
++{
++    double a = 0;
++    a = sqrt (x);
++    return a * 2 + 10;
++}
+diff --git a/gcc/tree-streamer-in.cc b/gcc/tree-streamer-in.cc
+index a35a810f4..79f819ad8 100644
+--- a/gcc/tree-streamer-in.cc
++++ b/gcc/tree-streamer-in.cc
+@@ -20,6 +20,9 @@ along with GCC; see the file COPYING3.  If not see
+ .  */
+ 
+ #include "config.h"
++#include 
++#define INCLUDE_SET
++#define INCLUDE_STRING
+ #include "system.h"
+ #include "coretypes.h"
+ #include "backend.h"
+@@ -36,6 +39,47 @@ along with GCC; see the file COPYING3.  If not see
+ #include "asan.h"
+ #include "opts.h"
+ 
++/* Parse string that specify forced inlining, separated by commas.  */
++static std::set multi_version_libs;
++static void
++parse_multi_version_lib_string (const char* s)
++{
++  std::string target_string (s);
++  std::string delim = ",";
++  size_t start = 0;
++  size_t end = target_string.find (delim);
++  if (target_string.substr (start, end - start) == "")
++    return;
++
++  while (end != std::string::npos)
++    {
++      multi_version_libs.insert (target_string.substr (start, end - start));
++      start = end + delim.size ();
++      end = target_string.find (delim, start);
++    }
++  multi_version_libs.insert (target_string.substr (start, end - start));
++}
++
++static bool
++target_lib_p (std::string name)
++{
++  if (multi_version_libs.empty () && multi_version_lib_string)
++    parse_multi_version_lib_string (multi_version_lib_string);
++  if (multi_version_lib_string)
++    {
++      while (!name.empty () && name.back () == '/')
++	name.erase (name.length () - 1);
++      if (name.empty ())
++	return false;
++      size_t last_slash_pos = name.find_last_of ('/');
++      if (last_slash_pos != std::string::npos
++	  && last_slash_pos != name.length () - 1)
++	name = name.substr (last_slash_pos + 1);
++      if (multi_version_libs.find (name) != multi_version_libs.end ())
++	return true;
++    }
++  return false;
++}
+ 
+ /* Read a STRING_CST from the string table in DATA_IN using input
+    block IB.  */
+@@ -555,7 +599,12 @@ streamer_read_tree_bitfields (class lto_input_block *ib,
+     unpack_ts_translation_unit_decl_value_fields (data_in, &bp, expr);
+ 
+   if (CODE_CONTAINS_STRUCT (code, TS_OPTIMIZATION))
+-    cl_optimization_stream_in (data_in, &bp, TREE_OPTIMIZATION (expr));
++  {
++    if (target_lib_p (data_in->file_data->file_name))
++      cl_optimization_stream_in_prev (data_in, &bp, TREE_OPTIMIZATION (expr));
++    else
++      cl_optimization_stream_in (data_in, &bp, TREE_OPTIMIZATION (expr));
++  }
+ 
+   if (CODE_CONTAINS_STRUCT (code, TS_CONSTRUCTOR))
+     {
+@@ -569,7 +618,12 @@ streamer_read_tree_bitfields (class lto_input_block *ib,
+ #ifndef ACCEL_COMPILER
+   if (CODE_CONTAINS_STRUCT (code, TS_TARGET_OPTION))
+     {
+-      cl_target_option_stream_in (data_in, &bp, TREE_TARGET_OPTION (expr));
++      if (target_lib_p (data_in->file_data->file_name))
++	cl_target_option_stream_in_prev (
++		data_in, &bp, TREE_TARGET_OPTION (expr));
++      else
++	cl_target_option_stream_in (data_in, &bp, TREE_TARGET_OPTION (expr));
++
+       if (targetm.target_option.post_stream_in)
+ 	targetm.target_option.post_stream_in (TREE_TARGET_OPTION (expr));
+     }
+diff --git a/lto-plugin/lto-plugin.c b/lto-plugin/lto-plugin.c
+index 33d49571d..b3301a8a4 100644
+--- a/lto-plugin/lto-plugin.c
++++ b/lto-plugin/lto-plugin.c
+@@ -89,6 +89,10 @@ along with this program; see the file COPYING3.  If not see
+ 
+ #define LTO_SEGMENT_NAME "__GNU_LTO"
+ 
++#define GCC_major_version 12
++#define LTO_major_version GCC_major_version
++#define LTO_minor_version 0
++
+ /* Return true if STR string starts with PREFIX.  */
+ 
+ static inline bool
+@@ -118,6 +122,18 @@ struct plugin_symtab
+   unsigned long long id;
+ };
+ 
++/* Structure that represents LTO ELF section with information
++   about the format.  */
++
++struct lto_section
++{
++  int16_t major_version;
++  int16_t minor_version;
++  unsigned char slim_object;
++  unsigned char _padding;
++  uint16_t flags;
++};
++
+ /* Encapsulates object file data during symbol scan.  */
+ struct plugin_objfile
+ {
+@@ -126,6 +142,7 @@ struct plugin_objfile
+   simple_object_read *objfile;
+   struct plugin_symtab *out;
+   const struct ld_plugin_input_file *file;
++  struct lto_section version;
+ };
+ 
+ /* All that we have to remember about a file. */
+@@ -216,6 +233,8 @@ static int gold_version = -1;
+    (in fact, only first letter of style arg is checked.)  */
+ static enum symbol_style sym_style = ss_none;
+ 
++static bool multi_version_lto_parse = false;
++
+ static void
+ check_1 (int gate, enum ld_plugin_level level, const char *text)
+ {
+@@ -1078,6 +1097,59 @@ err:
+   return 0;
+ }
+ 
++/* Process version section of an object file.  */
++
++static int
++process_lto_version (void *data, const char *name, off_t offset, off_t length)
++{
++  struct plugin_objfile *obj = (struct plugin_objfile *)data;
++  char *s;
++  char *secdatastart, *secdata;
++
++  if (!startswith (name, ".gnu.lto_.lto"))
++    return 1;
++
++  s = strrchr (name, '.');
++  if (s)
++    sscanf (s, ".%" PRI_LL "x", &obj->out->id);
++  secdata = secdatastart = xmalloc (length);
++  offset += obj->file->offset;
++  if (offset != lseek (obj->file->fd, offset, SEEK_SET))
++    goto err;
++
++  do
++    {
++      ssize_t got = read (obj->file->fd, secdata, length);
++      if (got == 0)
++	break;
++      else if (got > 0)
++	{
++	  secdata += got;
++	  length -= got;
++	}
++      else if (errno != EINTR)
++	goto err;
++    }
++  while (length > 0);
++  if (length > 0)
++    goto err;
++
++  struct lto_section *lto_info = (struct lto_section *)secdatastart;
++  obj->version = *lto_info;
++
++  obj->found++;
++  free (secdatastart);
++  return 1;
++
++err:
++  if (message)
++    message (LDPL_FATAL, "%s: corrupt object file", obj->file->name);
++  /* Force claim_file_handler to abandon this file.  */
++  obj->found = 0;
++  free (secdatastart);
++  return 0;
++}
++
+ /* Process one section of an object file.  */
+ 
+ static int
+@@ -1223,6 +1295,15 @@ claim_file_handler (const struct ld_plugin_input_file *file, int *claimed)
+   if (obj.found == 0 && obj.offload == 0)
+     goto err;
+ 
++  if (multi_version_lto_parse)
++    {
++      simple_object_find_sections (obj.objfile, process_lto_version, &obj,
++	      &err);
++      if (obj.version.major_version != LTO_major_version
++	  || obj.version.minor_version != LTO_minor_version)
++	goto err;
++    }
++
+   if (obj.found > 1)
+     resolve_conflicts (<o_file.symtab, <o_file.conflicts);
+ 
+@@ -1366,6 +1447,8 @@ process_option (const char *option)
+     }
+   else if (startswith (option, "-ltrans-objects="))
+     ltrans_objects = xstrdup (option + strlen ("-ltrans-objects="));
++  else if (strcmp (option, "-multi-version-lto-parse") == 0)
++    multi_version_lto_parse = true;
+   else
+     {
+       int size;
+-- 
+2.25.1
+
diff --git a/0305-Backport-varasm-Handle-private-COMDAT-function-symbo.patch b/0305-Backport-varasm-Handle-private-COMDAT-function-symbo.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e3bf4dc1812b555e09f277ef91d8f5202ef00589
--- /dev/null
+++ b/0305-Backport-varasm-Handle-private-COMDAT-function-symbo.patch
@@ -0,0 +1,296 @@
+From bbb4954294d010977fcfb96931384101cf015a44 Mon Sep 17 00:00:00 2001
+From: Jakub Jelinek 
+Date: Mon, 26 Feb 2024 17:55:07 +0100
+Subject: [PATCH] [Backport]varasm: Handle private COMDAT function symbol
+ reference in readonly data section [PR113617]
+
+If default_elf_select_rtx_section is called to put a reference to some
+local symbol defined in a comdat section into memory, which happens more often
+since the r14-4944 RA change, linking might fail.
+default_elf_select_rtx_section puts such constants into .data.rel.ro.local
+etc. sections and if linker chooses comdat sections from some other TU
+and discards the one to which a relocation in .data.rel.ro.local remains,
+linker diagnoses error.  References to private comdat symbols can only appear
+from functions or data objects in the same comdat group, so the following
+patch arranges using .data.rel.ro.local.pool. and similar sections.
+
+2024-02-26  Jakub Jelinek  
+	    H.J. Lu  
+
+	PR rtl-optimization/113617
+	* varasm.cc (default_elf_select_rtx_section): For
+	references to private symbols in comdat sections
+	use .data.relro.local.pool., .data.relro.pool.
+	or .rodata. comdat sections.
+
+	* g++.dg/other/pr113617.C: New test.
+	* g++.dg/other/pr113617.h: New test.
+	* g++.dg/other/pr113617-aux.cc: New test.
+---
+ gcc/testsuite/g++.dg/other/pr113617-aux.cc |   9 ++
+ gcc/testsuite/g++.dg/other/pr113617.C      |  27 +++++
+ gcc/testsuite/g++.dg/other/pr113617.h      | 132 +++++++++++++++++++++
+ gcc/varasm.cc                              |  48 +++++++-
+ 4 files changed, 215 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/g++.dg/other/pr113617-aux.cc
+ create mode 100644 gcc/testsuite/g++.dg/other/pr113617.C
+ create mode 100644 gcc/testsuite/g++.dg/other/pr113617.h
+
+diff --git a/gcc/testsuite/g++.dg/other/pr113617-aux.cc b/gcc/testsuite/g++.dg/other/pr113617-aux.cc
+new file mode 100644
+index 000000000..e6900e05a
+--- /dev/null
++++ b/gcc/testsuite/g++.dg/other/pr113617-aux.cc
+@@ -0,0 +1,9 @@
++// PR rtl-optimization/113617
++// { dg-do link { target { c++17 && c++14_down } } }
++
++#include "pr113617.h"
++
++void qux() {
++  A a;
++  a.foo(0, 0);
++}
+diff --git a/gcc/testsuite/g++.dg/other/pr113617.C b/gcc/testsuite/g++.dg/other/pr113617.C
+new file mode 100644
+index 000000000..a02dda142
+--- /dev/null
++++ b/gcc/testsuite/g++.dg/other/pr113617.C
+@@ -0,0 +1,27 @@
++// PR rtl-optimization/113617
++// { dg-do link { target c++11 } }
++// { dg-options "-O2" }
++// { dg-additional-options "-fPIC" { target fpic } } */
++// { dg-additional-options "-shared" { target shared } } */
++// { dg-additional-sources pr113617-aux.cc }
++
++#include "pr113617.h"
++
++int z;
++long xx1;
++void corge() {
++  A a;
++  a.foo(xx1, 0);
++}
++
++typedef unsigned long int VV __attribute__((vector_size (2 * sizeof (long))));
++VV vv;
++__attribute__((noipa)) static void fn1 (void) {}
++__attribute__((noipa)) static void fn2 (void) {}
++
++void
++fn3 ()
++{
++  VV a = { (unsigned long) &fn1, (unsigned long) &fn2 };
++  vv = a;
++}
+diff --git a/gcc/testsuite/g++.dg/other/pr113617.h b/gcc/testsuite/g++.dg/other/pr113617.h
+new file mode 100644
+index 000000000..4d30eddbc
+--- /dev/null
++++ b/gcc/testsuite/g++.dg/other/pr113617.h
+@@ -0,0 +1,132 @@
++namespace {
++template  struct J { static constexpr int value = V; };
++template  using K = J;
++using M = K;
++template  struct L { template  using type = _Tp; };
++template  using N = typename L<_Cond>::type<_If, _Else>;
++M k;
++template  struct O { using type = _Tp; };
++template 
++struct P : N, _Up> {};
++template  struct Q { using type = typename P<_Tp>::type; };
++}
++namespace R {
++struct H;
++enum G {};
++template  class S;
++struct T { using U = bool (*) (H &, const H &, G); U F; };
++template  class B;
++template 
++struct B<_R(_A...), _F> {
++  static bool F(H &, const H &, G) { return false; }
++  __attribute__((noipa)) static _R bar(const H &) {}
++};
++template 
++struct S<_R(_A...)> : T {
++  template  using AH = B<_R(), _F>;
++  template  S(_F) {
++    using AG = AH<_F>;
++    barr = AG::bar;
++    F = AG::F;
++  }
++  using AF = _R (*)(const H &);
++  AF barr;
++};
++template  class I;
++template 
++struct I<_F(_B...)> {};
++template  using W = decltype(k);
++template  struct V {
++  typedef I::type(typename Q<_B>::type...)> type;
++};
++template 
++__attribute__((noipa)) typename V::value, _F, _B...>::type
++baz(_F, _B...) { return typename V::value, _F, _B...>::type (); }
++template  struct AJ {
++  template  struct _Ptr { using type = _Up *; };
++  using AI = typename _Ptr<_Tp>::type;
++};
++template  struct Y {
++  using AI = typename AJ<_Tp>::AI;
++  AI operator->();
++};
++}
++extern int z;
++namespace N1 {
++namespace N2 {
++namespace N3 {
++enum Z { Z1, Z2 };
++template  struct X {
++  template 
++  __attribute__((noipa)) void boo(long long, long long, long long, _F &) {}
++};
++struct AC {
++  AC(int);
++  void m1(R::S);
++};
++template 
++__attribute__((noipa)) void garply(void *, long long, long long, long long) {}
++template <>
++template 
++void X::boo(long long, long long x, long long y, _F &fi) {
++  AC pool(z);
++  for (;;) {
++    auto job = R::baz(garply<_F>, &fi, y, y, x);
++    pool.m1(job);
++  }
++}
++struct AB {
++  static AB &bleh();
++  template 
++  void boo(long first, long x, long y, _F fi) {
++    switch (ab1) {
++    case Z1:
++      ab2->boo(first, x, y, fi);
++    case Z2:
++      ab3->boo(first, x, y, fi);
++    }
++  }
++  Z ab1;
++  R::Y> ab2;
++  R::Y> ab3;
++};
++template  struct C;
++template  struct C<_F, false> {
++  __attribute__((noipa)) C(_F) {}
++  void boo(long first, long x, long y) {
++    auto u = AB::bleh();
++    u.boo(first, x, y, *this);
++  }
++};
++template  struct AA { typedef C<_F, 0> type; };
++}
++}
++}
++struct AD {
++  template 
++  static void boo(long first, long x, long y, _F f) {
++    typename N1::N2::N3::AA<_F>::type fi(f);
++    fi.boo(first, x, y);
++  }
++  template 
++  static void boo(long first, long x, _F f) {
++    boo(first, x, 0, f);
++  }
++};
++template  struct A {
++  void foo(long long, long long);
++  int *c;
++};
++namespace {
++template  struct D { __attribute__((noipa)) D(int *) {} };
++}
++template 
++void A::foo(long long x, long long y)
++{
++  int e;
++  D d(&e);
++  AD::boo(0, y, d);
++  long p;
++  for (p = 0; p < x; p++)
++    c[p] = c[p - 1];
++}
+diff --git a/gcc/varasm.cc b/gcc/varasm.cc
+index bae935694..d122730b5 100644
+--- a/gcc/varasm.cc
++++ b/gcc/varasm.cc
+@@ -7317,17 +7317,63 @@ default_elf_select_rtx_section (machine_mode mode, rtx x,
+ 				unsigned HOST_WIDE_INT align)
+ {
+   int reloc = compute_reloc_for_rtx (x);
++  tree decl = nullptr;
++  const char *prefix = nullptr;
++  int flags = 0;
++
++  /* If it is a private COMDAT function symbol reference, call
++     function_rodata_section for the read-only or relocated read-only
++     data section associated with function DECL so that the COMDAT
++     section will be used for the private COMDAT function symbol.  */
++  if (HAVE_COMDAT_GROUP)
++    {
++      if (GET_CODE (x) == CONST
++	 && GET_CODE (XEXP (x, 0)) == PLUS
++	 && CONST_INT_P (XEXP (XEXP (x, 0), 1)))
++       x = XEXP (XEXP (x, 0), 0);
++
++      if (GET_CODE (x) == SYMBOL_REF)
++       {
++	 decl = SYMBOL_REF_DECL (x);
++	 if (decl
++	     && (TREE_CODE (decl) != FUNCTION_DECL
++		 || !DECL_COMDAT_GROUP (decl)
++		 || TREE_PUBLIC (decl)))
++	   decl = nullptr;
++       }
++    }
+ 
+   /* ??? Handle small data here somehow.  */
+ 
+   if (reloc & targetm.asm_out.reloc_rw_mask ())
+     {
+-      if (reloc == 1)
++      if (decl)
++	{
++	  prefix = reloc == 1 ? ".data.rel.ro.local" : ".data.rel.ro";
++	  flags = SECTION_WRITE | SECTION_RELRO;
++	}
++      else if (reloc == 1)
+ 	return get_named_section (NULL, ".data.rel.ro.local", 1);
+       else
+ 	return get_named_section (NULL, ".data.rel.ro", 3);
+     }
+ 
++  if (decl)
++    {
++      const char *comdat = IDENTIFIER_POINTER (DECL_COMDAT_GROUP (decl));
++      if (!prefix)
++	prefix = ".rodata";
++      size_t prefix_len = strlen (prefix);
++      size_t comdat_len = strlen (comdat);
++      size_t len = prefix_len + sizeof (".pool.") + comdat_len;
++      char *name = XALLOCAVEC (char, len);
++      memcpy (name, prefix, prefix_len);
++      memcpy (name + prefix_len, ".pool.", sizeof (".pool.") - 1);
++      memcpy (name + prefix_len + sizeof (".pool.") - 1, comdat,
++	      comdat_len + 1);
++      return get_section (name, flags | SECTION_LINKONCE, decl);
++    }
++
+   return mergeable_constant_section (mode, align, 0);
+ }
+ 
+-- 
+2.33.0
+
diff --git a/0306-RISC-V-Install-libstdc-libcc1-etc-to-lib64-instead-o.patch b/0306-RISC-V-Install-libstdc-libcc1-etc-to-lib64-instead-o.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3c0c15a56ecac0fe0b025befc8b539fbe3b49834
--- /dev/null
+++ b/0306-RISC-V-Install-libstdc-libcc1-etc-to-lib64-instead-o.patch
@@ -0,0 +1,65 @@
+From 84edbc6544ed872aedb3cb6f6d0feb8647ff1d8b Mon Sep 17 00:00:00 2001
+From: YunQiang Su 
+Date: Mon, 14 Oct 2024 10:09:46 +0800
+Subject: [PATCH] RISC-V: Install libstdc++/libcc1 etc to /lib64 instead of lib
+
+The problem is that if we are configured with `--disable-multilib`,
+  gcc -print-multi-os-directory
+outputs
+  .
+Thus the dest to install libraries is set to
+  /usr/lib/.
+While other platforms (x86-64, arm64) it will be
+  /usr/lib/../lib64
+Let's sync riscv64 with them
+
+Another problem is that
+  gcc -print-file-name=libzstd.so.1
+will output
+  /usr/lib64/lp64d/../lib64/libzstd.so.1
+which is also need to patched.
+---
+ gcc/config.gcc               | 3 +++
+ gcc/config/riscv/linux.h     | 2 ++
+ gcc/config/riscv/t-openEuler | 2 ++
+ 3 files changed, 7 insertions(+)
+ create mode 100644 gcc/config/riscv/t-openEuler
+
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 19b21a280..23c5bee2b 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -2453,6 +2453,9 @@ riscv*-*-linux*)
+ 	xyes) tmake_file="${tmake_file} riscv/t-linux-multilib" ;;
+ 	*) echo "Unknown value for enable_multilib"; exit 1
+ 	esac
++	case "x${target_vendor}" in
++	xopenEuler) tmake_file="${tmake_file} riscv/t-openEuler"
++	esac
+ 	tmake_file="${tmake_file} riscv/t-riscv riscv/t-linux"
+ 	gnu_ld=yes
+ 	gas=yes
+diff --git a/gcc/config/riscv/linux.h b/gcc/config/riscv/linux.h
+index b5c6c5027..a8d65f4e0 100644
+--- a/gcc/config/riscv/linux.h
++++ b/gcc/config/riscv/linux.h
+@@ -62,6 +62,8 @@ along with GCC; see the file COPYING3.  If not see
+ #define TARGET_ASM_FILE_END file_end_indicate_exec_stack
+ 
+ #define STARTFILE_PREFIX_SPEC 			\
++   "/lib" XLEN_SPEC "/ " 			\
++   "/usr/lib" XLEN_SPEC "/ " 			\
+    "/lib" XLEN_SPEC "/" ABI_SPEC "/ "		\
+    "/usr/lib" XLEN_SPEC "/" ABI_SPEC "/ "	\
+    "/lib/ "					\
+diff --git a/gcc/config/riscv/t-openEuler b/gcc/config/riscv/t-openEuler
+new file mode 100644
+index 000000000..26541dd08
+--- /dev/null
++++ b/gcc/config/riscv/t-openEuler
+@@ -0,0 +1,2 @@
++MULTILIB_OPTIONS = mabi=lp64d
++MULTILIB_DIRNAMES = ../lib64
+-- 
+2.39.5 (Apple Git-154)
+
diff --git a/0307-Set-fallback-value-for-print-multi-os-directory.patch b/0307-Set-fallback-value-for-print-multi-os-directory.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2032f6babfeec47a53f1e09b0d31d9cb66370d60
--- /dev/null
+++ b/0307-Set-fallback-value-for-print-multi-os-directory.patch
@@ -0,0 +1,105 @@
+From 0d157b14f361f8319f4694c54c6e01ac8f59d278 Mon Sep 17 00:00:00 2001
+From: YunQiang Su 
+Date: Tue, 8 Oct 2024 17:56:23 +0800
+Subject: [PATCH 1/2] Set fallback value for -print-multi-os-directory
+
+Clang doesn't support -print-multi-os-directory option.
+So let's set the fallback value (../lib64) if it is empty.
+
+This is only needed for the projects built by hostcc:
+  gcc, libcc1, libiberty
+
+The projects for targets only, will always built by gcc itself.
+---
+ gcc/configure         | 3 +++
+ libcc1/configure      | 6 ++++++
+ libcc1/configure.ac   | 3 +++
+ libiberty/Makefile.in | 5 ++++-
+ libtool.m4            | 3 +++
+ 5 files changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/configure b/gcc/configure
+index 7e64599b0..ef0449edd 100755
+--- a/gcc/configure
++++ b/gcc/configure
+@@ -18598,6 +18598,9 @@ if test "$GCC" = yes; then
+   # and add multilib dir if necessary.
+   lt_tmp_lt_search_path_spec=
+   lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
++  if [ -z "$lt_multi_os_dir" ];then
++    lt_multi_os_dir=../lib64
++  fi
+   for lt_sys_path in $lt_search_path_spec; do
+     if test -d "$lt_sys_path/$lt_multi_os_dir"; then
+       lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir"
+diff --git a/libcc1/configure b/libcc1/configure
+index 01cfb2806..3c437d690 100755
+--- a/libcc1/configure
++++ b/libcc1/configure
+@@ -9701,6 +9701,9 @@ if test "$GCC" = yes; then
+   # and add multilib dir if necessary.
+   lt_tmp_lt_search_path_spec=
+   lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
++  if [ -z "$lt_multi_os_dir" ];then
++    lt_multi_os_dir=../lib64
++  fi
+   for lt_sys_path in $lt_search_path_spec; do
+     if test -d "$lt_sys_path/$lt_multi_os_dir"; then
+       lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir"
+@@ -14865,6 +14868,9 @@ libsuffix=
+ if test "$GXX" = yes; then
+   libsuffix=`$CXX -print-multi-os-directory`
+ fi
++if [ -z "$libsuffix" ];then
++  libsuffix=../lib64
++fi
+ 
+ 
+ { $as_echo "$as_me:${as_lineno-$LINENO}: checking for socket libraries" >&5
+diff --git a/libcc1/configure.ac b/libcc1/configure.ac
+index 36f5a7e09..acd7c4c04 100644
+--- a/libcc1/configure.ac
++++ b/libcc1/configure.ac
+@@ -72,6 +72,9 @@ libsuffix=
+ if test "$GXX" = yes; then
+   libsuffix=`$CXX -print-multi-os-directory`
+ fi
++if [ -z "$libsuffix" ];then
++  libsuffix=../lib64
++fi
+ AC_SUBST(libsuffix)
+ 
+ dnl Test for -lsocket and -lnsl.  Copied from libgo/configure.ac.
+diff --git a/libiberty/Makefile.in b/libiberty/Makefile.in
+index 1b17c2e3a..2bfa00de5 100644
+--- a/libiberty/Makefile.in
++++ b/libiberty/Makefile.in
+@@ -385,7 +385,10 @@ install-strip: install
+ # multilib-specific flags, it's overridden by FLAGS_TO_PASS from the
+ # default multilib, so we have to take CFLAGS into account as well,
+ # since it will be passed the multilib flags.
+-MULTIOSDIR = `$(CC) $(CFLAGS) -print-multi-os-directory`
++MULTIOSDIR = `$(CC) $(CFLAGS) -print-multi-os-directory 2>/dev/null`
++ifeq ($(MULTIOSDIR),)
++ MULTIOSDIR = ../lib64
++endif
+ install_to_libdir: all
+ 	if test -n "${target_header_dir}"; then \
+ 		${mkinstalldirs} $(DESTDIR)$(libdir)/$(MULTIOSDIR); \
+diff --git a/libtool.m4 b/libtool.m4
+index 17f8e5f30..86fc1e705 100644
+--- a/libtool.m4
++++ b/libtool.m4
+@@ -2059,6 +2059,9 @@ if test "$GCC" = yes; then
+   # and add multilib dir if necessary.
+   lt_tmp_lt_search_path_spec=
+   lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null`
++  if [ -z "$lt_multi_os_dir" ];then
++    lt_multi_os_dir=../lib64
++  fi
+   for lt_sys_path in $lt_search_path_spec; do
+     if test -d "$lt_sys_path/$lt_multi_os_dir"; then
+       lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir"
+-- 
+2.47.0
+
diff --git a/0308-Fix-enum-INPUT-MIDDLE-FINAL-aes_stage.patch b/0308-Fix-enum-INPUT-MIDDLE-FINAL-aes_stage.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a16e93722c9066aecd6191a69bf30f3d6ecc1320
--- /dev/null
+++ b/0308-Fix-enum-INPUT-MIDDLE-FINAL-aes_stage.patch
@@ -0,0 +1,108 @@
+From 1624bdceb341e0034c22ce46bc2e422726f76cce Mon Sep 17 00:00:00 2001
+From: YunQiang Su 
+Date: Tue, 8 Oct 2024 17:59:56 +0800
+Subject: [PATCH 2/2] Fix enum { INPUT, MIDDLE, FINAL } aes_stage
+
+The FINAL is defined in ansidecl.h.
+Let's rename the elements to
+   aesINPUT, aseMIDDLE, aseFINAL
+to avoid conflits.
+
+I find this problem when trying to build gcc with clang.
+In fact FINAL is defined to empty for clang, and `final` for gcc.
+So it coincidentally worked for gcc.
+---
+ gcc/crypto-accel.cc | 28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/gcc/crypto-accel.cc b/gcc/crypto-accel.cc
+index e7766a585..716c4a38b 100644
+--- a/gcc/crypto-accel.cc
++++ b/gcc/crypto-accel.cc
+@@ -1251,7 +1251,7 @@ public:
+ 
+ /* AES stage description.  Required for some specializations
+    for curtain rounds.  */
+-typedef enum { INPUT, MIDDLE, FINAL } aes_stage;
++typedef enum { aesINPUT, aesMIDDLE, aesFINAL } aes_stage;
+ 
+ /* AES entity description.  It can be both round or state inside round.
+    It provides interface for unified analysis between blocks of 4 parts:
+@@ -1356,7 +1356,7 @@ struct state_input
+ 
+ /* Input round state uses special input.  */
+ template<>
+-struct state_input
++struct state_input
+ {
+   typedef std::pair type;
+ 
+@@ -1389,7 +1389,7 @@ struct state_output
+ 
+ /* Final round state generates special output.  */
+ template<>
+-struct state_output
++struct state_output
+ {
+   typedef std::pair type;
+ 
+@@ -1409,7 +1409,7 @@ struct round_input
+ 
+ /* Input round uses special input just as its state.  */
+ template<>
+-struct round_input
++struct round_input
+ {
+   typedef std::pair type;
+ };
+@@ -1437,7 +1437,7 @@ struct round_output
+    AES encryption.  */
+ template<>
+ template<>
+-void round_output::reorder (type &out)
++void round_output::reorder (type &out)
+ {
+   gcc_assert (out.size () == 4);
+   std::swap (out[1], out[3]);
+@@ -1445,14 +1445,14 @@ void round_output::reorder (type &out)
+ 
+ template<>
+ template<>
+-void round_output::reorder (type &out)
++void round_output::reorder (type &out)
+ {
+-  round_output::reorder (out);
++  round_output::reorder (out);
+ }
+ 
+ /* Final round generates special output.  */
+ template<>
+-struct round_output : state_output
++struct round_output : state_output
+ {
+   template
+   static void finalize (type &out, const T &v)
+@@ -1644,14 +1644,14 @@ public:
+   typedef std::map > table_ref_map;
+ 
+   /* AES states typedefs.  */
+-  typedef aes_state aes_input_state;
+-  typedef aes_state, MIDDLE, T> aes_body_state;
+-  typedef aes_state, FINAL, T> aes_final_state;
++  typedef aes_state aes_input_state;
++  typedef aes_state, aesMIDDLE, T> aes_body_state;
++  typedef aes_state, aesFINAL, T> aes_final_state;
+ 
+   /* AES rounds typedefs.  */
+-  typedef aes_round aes_input_round;
+-  typedef aes_round, MIDDLE, T> aes_body_round;
+-  typedef aes_round, FINAL, T> aes_final_round;
++  typedef aes_round aes_input_round;
++  typedef aes_round, aesMIDDLE, T> aes_body_round;
++  typedef aes_round, aesFINAL, T> aes_final_round;
+ 
+   bool run ();
+ 
+-- 
+2.47.0
+
diff --git a/0309-CSPGO-Add-context-sensitive-PGO.patch b/0309-CSPGO-Add-context-sensitive-PGO.patch
new file mode 100644
index 0000000000000000000000000000000000000000..04174af2359e3f4b7e46d4f104e68068092d0782
--- /dev/null
+++ b/0309-CSPGO-Add-context-sensitive-PGO.patch
@@ -0,0 +1,1337 @@
+From 45a424e51c4c5de46062f2d7f355da8a99604d71 Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Tue, 19 Nov 2024 22:06:48 +0800
+Subject: [PATCH] [CSPGO] Add context sensitive PGO
+
+Same as LLVM, GCC PGO profile counts are not context sensitive. Therefore,
+CSPGO is added to collect the profile again after PGO to obtain accurate
+execution information after inline for better performance.
+---
+ gcc/auto-profile.cc |   2 +-
+ gcc/cgraph.cc       |  47 ++++++++++
+ gcc/cgraph.h        |   8 +-
+ gcc/cgraphunit.cc   |  63 ++++++++++++-
+ gcc/common.opt      |  20 ++++
+ gcc/coverage.cc     | 162 +++++++++++++++++++++++---------
+ gcc/coverage.h      |   5 +-
+ gcc/gcc.cc          |   5 +-
+ gcc/ipa-profile.cc  |   4 +-
+ gcc/lto-cgraph.cc   |   7 ++
+ gcc/opts.cc         |  18 ++++
+ gcc/passes.cc       |  71 ++++++++++++++
+ gcc/passes.def      |   1 +
+ gcc/profile.cc      |  27 +++---
+ gcc/profile.h       |   2 +-
+ gcc/timevar.def     |   1 +
+ gcc/tree-pass.h     |   2 +
+ gcc/tree-profile.cc | 223 +++++++++++++++++++++++++++++++++++++++++++-
+ gcc/value-prof.cc   |   9 +-
+ gcc/value-prof.h    |   4 +-
+ 20 files changed, 611 insertions(+), 70 deletions(-)
+
+diff --git a/gcc/auto-profile.cc b/gcc/auto-profile.cc
+index f45f0ec66..5e85381ce 100644
+--- a/gcc/auto-profile.cc
++++ b/gcc/auto-profile.cc
+@@ -1775,7 +1775,7 @@ auto_profile (void)
+   if (symtab->state == FINISHED)
+     return 0;
+ 
+-  init_node_map (true);
++  init_node_map (true, false);
+   profile_info = autofdo::afdo_profile_info;
+ 
+   FOR_EACH_FUNCTION (node)
+diff --git a/gcc/cgraph.cc b/gcc/cgraph.cc
+index 7d738b891..95619aefa 100644
+--- a/gcc/cgraph.cc
++++ b/gcc/cgraph.cc
+@@ -4076,6 +4076,53 @@ cgraph_node::get_body (void)
+   return updated;
+ }
+ 
++/* Prepare function body.  When doing LTO, read cgraph_node's body from disk
++   if it is not already present.  When some IPA transformations are scheduled,
++   apply them.
++   Flag is used to control only skipping or enabling cspgo.  */
++
++bool
++cgraph_node::ipa_transform_for_cspgo (bool is_cspgo)
++{
++  bool updated;
++
++  bitmap_obstack_initialize (NULL);
++  updated = get_untransformed_body ();
++
++  /* Getting transformed body makes no sense for inline clones;
++     we should never use this on real clones because they are materialized
++     early.
++     TODO: Materializing clones here will likely lead to smaller LTRANS
++     footprint.  */
++  gcc_assert (!inlined_to && !clone_of);
++  if (ipa_transforms_to_apply.exists ())
++    {
++      opt_pass *saved_current_pass = current_pass;
++      FILE *saved_dump_file = dump_file;
++      const char *saved_dump_file_name = dump_file_name;
++      dump_flags_t saved_dump_flags = dump_flags;
++      dump_file_name = NULL;
++      set_dump_file (NULL);
++
++      push_cfun (DECL_STRUCT_FUNCTION (decl));
++
++      update_ssa (TODO_update_ssa_only_virtuals);
++      execute_all_ipa_transforms_for_cspgo (is_cspgo);
++      cgraph_edge::rebuild_edges ();
++      free_dominance_info (CDI_DOMINATORS);
++      free_dominance_info (CDI_POST_DOMINATORS);
++      pop_cfun ();
++      updated = true;
++
++      current_pass = saved_current_pass;
++      set_dump_file (saved_dump_file);
++      dump_file_name = saved_dump_file_name;
++      dump_flags = saved_dump_flags;
++    }
++  bitmap_obstack_release (NULL);
++  return updated;
++}
++
+ /* Return the DECL_STRUCT_FUNCTION of the function.  */
+ 
+ struct function *
+diff --git a/gcc/cgraph.h b/gcc/cgraph.h
+index 2332539e5..3fdf36769 100644
+--- a/gcc/cgraph.h
++++ b/gcc/cgraph.h
+@@ -1097,11 +1097,17 @@ struct GTY((tag ("SYMTAB_FUNCTION"))) cgraph_node : public symtab_node
+      present.  */
+   bool get_untransformed_body ();
+ 
+-  /* Prepare function body.  When doing LTO, read cgraph_node's body from disk 
++  /* Prepare function body.  When doing LTO, read cgraph_node's body from disk
+      if it is not already present.  When some IPA transformations are scheduled,
+      apply them.  */
+   bool get_body ();
+ 
++  /* Prepare function body.  When doing LTO, read cgraph_node's body from disk
++     if it is not already present.  When some IPA transformations are scheduled,
++     apply them.
++     Flag is used to control only skipping or enabling cspgo.  */
++  bool ipa_transform_for_cspgo (bool);
++
+   void materialize_clone (void);
+ 
+   /* Release memory used to represent body of function.
+diff --git a/gcc/cgraphunit.cc b/gcc/cgraphunit.cc
+index 5aa7b57c9..37cc83eab 100644
+--- a/gcc/cgraphunit.cc
++++ b/gcc/cgraphunit.cc
+@@ -208,6 +208,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "ipa-inline.h"
+ #include "omp-offload.h"
+ #include "symtab-thunks.h"
++#include "profile.h" // for del_node_map
+ 
+ /* Queue of cgraph nodes scheduled to be added into cgraph.  This is a
+    secondary queue used during optimization to accommodate passes that
+@@ -1928,6 +1929,29 @@ tp_first_run_node_cmp (const void *pa, const void *pb)
+   return tp_first_run_a - tp_first_run_b;
+ }
+ 
++static bool
++expand_node_with_cspgo (cgraph_node *node, bool is_cspgo)
++{
++  gcc_assert (node);
++  /* Nodes in other partition, inline to, and clone of are not
++     interesting in cspgo.  */
++  if (!node->has_gimple_body_p ()
++      || node->in_other_partition
++      || node->inlined_to
++      || node->clone_of)
++    {
++      if (dump_file)
++	fprintf (dump_file, "[cspgo] node %s will not do"
++			    " transform\n", node->dump_name ());
++      return false;
++    }
++
++  if (node->process)
++    node->ipa_transform_for_cspgo (is_cspgo);
++  return true;
++}
++
++
+ /* Expand all functions that must be output.
+ 
+    Attempt to topologically sort the nodes so function is output when
+@@ -1968,6 +1992,39 @@ expand_all_functions (void)
+   /* First output functions with time profile in specified order.  */
+   qsort (tp_first_run_order, tp_first_run_order_pos,
+ 	 sizeof (cgraph_node *), tp_first_run_node_cmp);
++
++  if (flag_csprofile_generate || flag_csprofile_use)
++    {
++      bool is_cspgo = false;
++
++      /* We need to execute loop twice.  The first performs all transforms
++	 except cspgo, and the second performs cspgo transform.  */
++      for (int idx = 0; idx < 2; idx++)
++	{
++	  for (i = 0; i < tp_first_run_order_pos; i++)
++	    {
++	      node = tp_first_run_order[i];
++	      if (!expand_node_with_cspgo (node, is_cspgo))
++		continue;
++	    }
++
++	  for (i = new_order_pos - 1; i >= 0; i--)
++	    {
++	      node = order[i];
++	      if (!expand_node_with_cspgo (node, is_cspgo))
++		continue;
++	    }
++
++	  is_cspgo = true;
++	}
++
++      if (flag_csprofile_use)
++	handle_missing_profiles ();
++
++      if (coverage_node_map_initialized_p ())
++	del_node_map ();
++    }
++
+   for (i = 0; i < tp_first_run_order_pos; i++)
+     {
+       node = tp_first_run_order[i];
+@@ -2009,6 +2066,10 @@ expand_all_functions (void)
+     fprintf (symtab->dump_file, "Expanded functions with time profile:%u/%u\n",
+              profiled_func_count, expanded_func_count);
+ 
++  /* Generate coverage variables and constructor for cspgo.  */
++  if (flag_csprofile_generate)
++    coverage_finish (true);
++
+   symtab->process_new_functions ();
+   free_gimplify_stack ();
+   delete ipa_saved_clone_sources;
+@@ -2176,7 +2237,7 @@ ipa_passes (void)
+   if (!in_lto_p)
+     {
+       /* Generate coverage variables and constructors.  */
+-      coverage_finish ();
++      coverage_finish (false);
+ 
+       /* Process new functions added.  */
+       set_cfun (NULL);
+diff --git a/gcc/common.opt b/gcc/common.opt
+index be5fcc681..fc2920cee 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -2397,6 +2397,10 @@ Common Joined RejectNegative Var(profile_data_prefix)
+ Set the top-level directory for storing the profile data.
+ The default is 'pwd'.
+ 
++fcfgo-csprofile-dir=
++Common Joined RejectNegative Var(csprofile_data_prefix)
++Set the top-level directory for storing the cs profile data.
++
+ fprofile-note=
+ Common Joined RejectNegative Var(profile_note_location)
+ Select the name for storing the profile note file.
+@@ -2461,6 +2465,14 @@ fprofile-generate=
+ Common Joined RejectNegative
+ Enable common options for generating profile info for profile feedback directed optimizations, and set -fprofile-dir=.
+ 
++fcfgo-csprofile-generate
++Common Var(flag_csprofile_generate)
++Enable common options for generating context sensitive profile info for profile feedback directed optimizations.
++
++fcfgo-csprofile-generate=
++Common Joined RejectNegative
++Enable common options for generating context sensitive profile info for profile feedback directed optimizations, and set -fcfgo-csprofile-dir=.
++
+ fkernel-pgo
+ Common Var(flag_kernel_pgo) Optimization Init(0)
+ Disable TLS setting of instrumentation variables to support PGO kernel compilation in -fprofile-generate, as kernel does not support TLS.
+@@ -2485,6 +2497,14 @@ fprofile-use=
+ Common Joined RejectNegative
+ Enable common options for performing profile feedback directed optimizations, and set -fprofile-dir=.
+ 
++fcfgo-csprofile-use
++Common Var(flag_csprofile_use)
++Enable common options for performing context sensitive profile feedback directed optimizations.
++
++fcfgo-csprofile-use=
++Common Joined RejectNegative
++Enable common options for performing context sensitive profile feedback directed optimizations, and set -fcfgo-csprofile-dir=.
++
+ fprofile-values
+ Common Var(flag_profile_values)
+ Insert code to profile values of expressions.
+diff --git a/gcc/coverage.cc b/gcc/coverage.cc
+index 8ece5db68..1a7fcb5df 100644
+--- a/gcc/coverage.cc
++++ b/gcc/coverage.cc
+@@ -86,7 +86,6 @@ struct counts_entry : pointer_hash 
+ 
+ static GTY(()) struct coverage_data *functions_head = 0;
+ static struct coverage_data **functions_tail = &functions_head;
+-static unsigned no_coverage = 0;
+ 
+ /* Cumulative counter information for whole program.  */
+ static unsigned prg_ctr_mask; /* Mask of counter types generated.  */
+@@ -114,6 +113,9 @@ static unsigned bbg_file_stamp;
+ /* Name of the count data (gcda) file.  */
+ static char *da_file_name;
+ 
++/* Name of the cs count data (gcda) file.  */
++static char *cs_da_file_name;
++
+ /* The names of merge functions for counters.  */
+ #define STR(str) #str
+ #define DEF_GCOV_COUNTER(COUNTER, NAME, FN_TYPE) STR(__gcov_merge ## FN_TYPE),
+@@ -173,23 +175,28 @@ counts_entry::remove (counts_entry *entry)
+ /* Hash table of count data.  */
+ static hash_table *counts_hash;
+ 
++/* Hash table of cs count data.  */
++static hash_table *cs_counts_hash;
++
+ /* Read in the counts file, if available.  */
+ 
+ static void
+-read_counts_file (void)
++read_counts_file (bool is_cspgo)
+ {
+   gcov_unsigned_t fn_ident = 0;
+   gcov_unsigned_t tag;
+   int is_error = 0;
+   unsigned lineno_checksum = 0;
+   unsigned cfg_checksum = 0;
++  char *gcda_file_name = (is_cspgo ? cs_da_file_name : da_file_name);
+ 
+-  if (!gcov_open (da_file_name, 1))
++  if (!gcov_open (gcda_file_name, 1))
+     return;
+ 
+   if (!gcov_magic (gcov_read_unsigned (), GCOV_DATA_MAGIC))
+     {
+-      warning (0, "%qs is not a gcov data file", da_file_name);
++      warning (0, "%qs is not a %s data file", gcda_file_name,
++	       (is_cspgo ? "cs gcov" : "gcov"));
+       gcov_close ();
+       return;
+     }
+@@ -201,7 +208,7 @@ read_counts_file (void)
+       GCOV_UNSIGNED2STRING (e, GCOV_VERSION);
+ 
+       warning (0, "%qs is version %q.*s, expected version %q.*s",
+- 	       da_file_name, 4, v, 4, e);
++	       gcda_file_name, 4, v, 4, e);
+       gcov_close ();
+       return;
+     }
+@@ -213,7 +220,7 @@ read_counts_file (void)
+   /* Read checksum.  */
+   gcov_read_unsigned ();
+ 
+-  counts_hash = new hash_table (10);
++  (is_cspgo ? cs_counts_hash : counts_hash) = new hash_table (10);
+   while ((tag = gcov_read_unsigned ()))
+     {
+       gcov_unsigned_t length;
+@@ -234,9 +241,18 @@ read_counts_file (void)
+ 	}
+       else if (tag == GCOV_TAG_OBJECT_SUMMARY)
+ 	{
+-	  profile_info = XCNEW (gcov_summary);
+-	  profile_info->runs = gcov_read_unsigned ();
+-	  profile_info->sum_max = gcov_read_unsigned ();
++	  if (is_cspgo)
++	    {
++	      /* TODO: runs and sum_max need better handling for cspgo.  */
++	      gcov_unsigned_t runs = gcov_read_unsigned ();
++	      gcov_unsigned_t sum_max = gcov_read_unsigned ();
++	    }
++	  else
++	    {
++	      profile_info = XCNEW (gcov_summary);
++	      profile_info->runs = gcov_read_unsigned ();
++	      profile_info->sum_max = gcov_read_unsigned ();
++	    }
+ 	}
+       else if (GCOV_TAG_IS_COUNTER (tag) && fn_ident)
+ 	{
+@@ -249,7 +265,9 @@ read_counts_file (void)
+ 	  elt.ident = fn_ident;
+ 	  elt.ctr = GCOV_COUNTER_FOR_TAG (tag);
+ 
+-	  slot = counts_hash->find_slot (&elt, INSERT);
++	  slot = (is_cspgo ? cs_counts_hash->find_slot (&elt, INSERT) :
++			     counts_hash->find_slot (&elt, INSERT));
++
+ 	  entry = *slot;
+ 	  if (!entry)
+ 	    {
+@@ -264,12 +282,21 @@ read_counts_file (void)
+ 	  else if (entry->lineno_checksum != lineno_checksum
+ 		   || entry->cfg_checksum != cfg_checksum)
+ 	    {
+-	      error ("profile data for function %u is corrupted", fn_ident);
++	      error ("%s data for function %u is corrupted",
++		     (is_cspgo ? "cs profile" : "profile"), fn_ident);
+ 	      error ("checksum is (%x,%x) instead of (%x,%x)",
+ 		     entry->lineno_checksum, entry->cfg_checksum,
+ 		     lineno_checksum, cfg_checksum);
+-	      delete counts_hash;
+-	      counts_hash = NULL;
++	      if (is_cspgo)
++		{
++		  delete cs_counts_hash;
++		  cs_counts_hash = NULL;
++		}
++	      else
++		{
++		  delete counts_hash;
++		  counts_hash = NULL;
++		}
+ 	      break;
+ 	    }
+ 	  if (read_length > 0)
+@@ -282,9 +309,17 @@ read_counts_file (void)
+ 	  error (is_error < 0
+ 		 ? G_("%qs has overflowed")
+ 		 : G_("%qs is corrupted"),
+-		 da_file_name);
+-	  delete counts_hash;
+-	  counts_hash = NULL;
++		 gcda_file_name);
++	  if (is_cspgo)
++	    {
++	      delete cs_counts_hash;
++	      cs_counts_hash = NULL;
++	    }
++	  else
++	    {
++	      delete counts_hash;
++	      counts_hash = NULL;
++	    }
+ 	  break;
+ 	}
+     }
+@@ -296,26 +331,30 @@ read_counts_file (void)
+ 
+ gcov_type *
+ get_coverage_counts (unsigned counter, unsigned cfg_checksum,
+-		     unsigned lineno_checksum, unsigned int n_counts)
++		     unsigned lineno_checksum, unsigned int n_counts,
++		     bool is_cspgo)
+ {
+   counts_entry *entry, elt;
++  char *gcda_file_name = (is_cspgo ? cs_da_file_name : da_file_name);
+ 
+   /* No hash table, no counts.  */
+-  if (!counts_hash)
++  if ((is_cspgo ? (!cs_counts_hash) : (!counts_hash)))
+     {
+       static int warned = 0;
+ 
+       if (!warned++)
+ 	{
+ 	  warning (OPT_Wmissing_profile,
+-		   "%qs profile count data file not found",
+-		   da_file_name);
++		   "%qs %s count data file not found",
++		   gcda_file_name, (is_cspgo ? "cs profile" : "profile"));
+ 	  if (dump_enabled_p ())
+ 	    {
+ 	      dump_user_location_t loc
+ 		= dump_user_location_t::from_location_t (input_location);
+ 	      dump_printf_loc (MSG_MISSED_OPTIMIZATION, loc,
+-			       "file %s not found, %s\n", da_file_name,
++			       "%s file %s not found, %s\n",
++			       (is_cspgo ? "cs profile" : "profile"),
++			       gcda_file_name,
+ 			       (flag_guess_branch_prob
+ 				? "execution counts estimated"
+ 				: "execution counts assumed to be zero"));
+@@ -331,13 +370,14 @@ get_coverage_counts (unsigned counter, unsigned cfg_checksum,
+       elt.ident = cgraph_node::get (current_function_decl)->profile_id;
+     }
+   elt.ctr = counter;
+-  entry = counts_hash->find (&elt);
++  entry = (is_cspgo ? cs_counts_hash->find (&elt) : counts_hash->find (&elt));
+   if (!entry)
+     {
+       if (counter == GCOV_COUNTER_ARCS)
+ 	warning_at (DECL_SOURCE_LOCATION (current_function_decl),
+ 		    OPT_Wmissing_profile,
+-		    "profile for function %qD not found in profile data",
++		    "%s for function %qD not found in profile data",
++		    (is_cspgo ? "cs profile" : "profile"),
+ 		    current_function_decl);
+       /* The function was not emitted, or is weak and not chosen in the
+ 	 final executable.  Silently fail, because there's nothing we
+@@ -357,9 +397,10 @@ get_coverage_counts (unsigned counter, unsigned cfg_checksum,
+ 	warning_printed =
+ 	  warning_at (DECL_SOURCE_LOCATION (current_function_decl),
+ 		      OPT_Wcoverage_mismatch,
+-		      "number of counters in profile data for function %qD "
++		      "number of counters in %s data for function %qD "
+ 		      "does not match "
+ 		      "its profile data (counter %qs, expected %i and have %i)",
++		      (is_cspgo ? "cs profile" : "profile"),
+ 		      current_function_decl,
+ 		      ctr_names[counter], entry->n_counts, n_counts);
+       else
+@@ -367,7 +408,8 @@ get_coverage_counts (unsigned counter, unsigned cfg_checksum,
+ 	  warning_at (DECL_SOURCE_LOCATION (current_function_decl),
+ 		      OPT_Wcoverage_mismatch,
+ 		      "the control flow of function %qD does not match "
+-		      "its profile data (counter %qs)", current_function_decl,
++		      "its %s data (counter %qs)", current_function_decl,
++		      (is_cspgo ? "cs profile" : "profile"),
+ 		      ctr_names[counter]);
+       if (warning_printed && dump_enabled_p ())
+ 	{
+@@ -413,9 +455,6 @@ get_coverage_counts (unsigned counter, unsigned cfg_checksum,
+ int
+ coverage_counter_alloc (unsigned counter, unsigned num)
+ {
+-  if (no_coverage)
+-    return 0;
+-
+   if (!num)
+     return 1;
+ 
+@@ -623,7 +662,7 @@ coverage_begin_function (unsigned lineno_checksum, unsigned cfg_checksum)
+ {
+   /* We don't need to output .gcno file unless we're under -ftest-coverage
+      (e.g. -fprofile-arcs/generate/use don't need .gcno to work). */
+-  if (no_coverage || !bbg_file_name)
++  if (!bbg_file_name)
+     return 0;
+ 
+   expanded_location startloc
+@@ -981,7 +1020,8 @@ build_info_type (tree type, tree fn_info_ptr_type)
+    function info objects.  */
+ 
+ static tree
+-build_info (tree info_type, tree fn_ary, unsigned object_checksum)
++build_info (tree info_type, tree fn_ary, unsigned object_checksum,
++	    bool is_cspgo)
+ {
+   tree info_fields = TYPE_FIELDS (info_type);
+   tree merge_fn_type, n_funcs;
+@@ -1014,8 +1054,16 @@ build_info (tree info_type, tree fn_ary, unsigned object_checksum)
+   info_fields = DECL_CHAIN (info_fields);
+ 
+   /* Filename */
+-  da_file_name_len = strlen (da_file_name);
+-  filename_string = build_string (da_file_name_len + 1, da_file_name);
++  if (is_cspgo)
++    {
++      da_file_name_len = strlen (cs_da_file_name);
++      filename_string = build_string (da_file_name_len + 1, cs_da_file_name);
++    }
++  else
++    {
++      da_file_name_len = strlen (da_file_name);
++      filename_string = build_string (da_file_name_len + 1, da_file_name);
++    }
+   TREE_TYPE (filename_string) = build_array_type
+     (char_type_node, build_index_type (size_int (da_file_name_len)));
+   CONSTRUCTOR_APPEND_ELT (v1, info_fields,
+@@ -1142,7 +1190,7 @@ build_gcov_info_var_registration (tree gcov_info_type)
+    for the object.  Returns TRUE if coverage data is being emitted.  */
+ 
+ static bool
+-coverage_obj_init (void)
++coverage_obj_init (bool is_cspgo)
+ {
+   tree gcov_info_type;
+   unsigned n_counters = 0;
+@@ -1151,8 +1199,6 @@ coverage_obj_init (void)
+   struct coverage_data **fn_prev;
+   char name_buf[32];
+ 
+-  no_coverage = 1; /* Disable any further coverage.  */
+-
+   if (!prg_ctr_mask)
+     return false;
+ 
+@@ -1161,7 +1207,9 @@ coverage_obj_init (void)
+ 
+   /* Prune functions.  */
+   for (fn_prev = &functions_head; (fn = *fn_prev);)
+-    if (DECL_STRUCT_FUNCTION (fn->fn_decl))
++    /* In cspgo, the DECL_STRUCT_FUNCTION attribute has been checked in
++       csprofile_transform.  */
++    if (is_cspgo || DECL_STRUCT_FUNCTION (fn->fn_decl))
+       fn_prev = &fn->next;
+     else
+       /* The function is not being emitted, remove from list.  */
+@@ -1225,7 +1273,7 @@ coverage_obj_fn (vec *ctor, tree fn,
+ 
+ static void
+ coverage_obj_finish (vec *ctor,
+-		     unsigned object_checksum)
++		     unsigned object_checksum, bool is_cspgo)
+ {
+   unsigned n_functions = vec_safe_length (ctor);
+   tree fn_info_ary_type = build_array_type
+@@ -1242,7 +1290,8 @@ coverage_obj_finish (vec *ctor,
+   varpool_node::finalize_decl (fn_info_ary);
+   
+   DECL_INITIAL (gcov_info_var)
+-    = build_info (TREE_TYPE (gcov_info_var), fn_info_ary, object_checksum);
++    = build_info (TREE_TYPE (gcov_info_var), fn_info_ary, object_checksum,
++		  is_cspgo);
+   varpool_node::finalize_decl (gcov_info_var);
+ }
+ 
+@@ -1310,11 +1359,32 @@ coverage_init (const char *filename)
+   memcpy (da_file_name + prefix_len, filename, len);
+   strcpy (da_file_name + prefix_len + len, GCOV_DATA_SUFFIX);
+ 
++  /* Name of cspgo da file.  */
++  if (flag_csprofile_generate || flag_csprofile_use)
++    {
++      if (csprofile_data_prefix)
++	prefix_len = strlen (csprofile_data_prefix);
++
++      cs_da_file_name = XNEWVEC (char, len + strlen (GCOV_DATA_SUFFIX)
++				 + prefix_len + 2);
++
++      if (csprofile_data_prefix)
++	{
++	  memcpy (cs_da_file_name, csprofile_data_prefix, prefix_len);
++	  cs_da_file_name[prefix_len++] = *separator;
++	}
++      memcpy (cs_da_file_name + prefix_len, filename, len);
++      strcpy (cs_da_file_name + prefix_len + len, GCOV_DATA_SUFFIX);
++    }
++
+   bbg_file_stamp = local_tick;
+   if (flag_auto_profile)
+     read_autofdo_file ();
+   else if (flag_branch_probabilities)
+-    read_counts_file ();
++    read_counts_file (false);
++
++  if (flag_csprofile_use)
++    read_counts_file (true);
+ 
+   /* Name of bbg file.  */
+   if (flag_test_coverage && !flag_compare_debug)
+@@ -1354,7 +1424,7 @@ coverage_init (const char *filename)
+    variables and constructor.  */
+ 
+ void
+-coverage_finish (void)
++coverage_finish (bool is_cspgo)
+ {
+   if (bbg_file_name && gcov_close ())
+     unlink (bbg_file_name);
+@@ -1368,7 +1438,7 @@ coverage_finish (void)
+   /* Global GCDA checksum that aggregates all functions.  */
+   unsigned object_checksum = 0;
+ 
+-  if (coverage_obj_init ())
++  if (coverage_obj_init (is_cspgo))
+     {
+       vec *fn_ctor = NULL;
+       struct coverage_data *fn;
+@@ -1382,11 +1452,17 @@ coverage_finish (void)
+ 					    fn->lineno_checksum);
+ 	  object_checksum = crc32_unsigned (object_checksum, fn->cfg_checksum);
+ 	}
+-      coverage_obj_finish (fn_ctor, object_checksum);
++      coverage_obj_finish (fn_ctor, object_checksum, is_cspgo);
+     }
+ 
+-  XDELETEVEC (da_file_name);
++  if (da_file_name)
++    XDELETEVEC (da_file_name);
+   da_file_name = NULL;
++  if (is_cspgo)
++    {
++      XDELETEVEC (cs_da_file_name);
++      cs_da_file_name = NULL;
++    }
+ }
+ 
+ #include "gt-coverage.h"
+diff --git a/gcc/coverage.h b/gcc/coverage.h
+index 0ac046c88..a4e90e8bd 100644
+--- a/gcc/coverage.h
++++ b/gcc/coverage.h
+@@ -23,7 +23,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "gcov-io.h"
+ 
+ extern void coverage_init (const char *);
+-extern void coverage_finish (void);
++extern void coverage_finish (bool);
+ extern void coverage_remove_note_file (void);
+ 
+ /* Start outputting coverage information for the current
+@@ -53,7 +53,8 @@ extern tree tree_coverage_counter_addr (unsigned /*counter*/, unsigned/*num*/);
+ extern gcov_type *get_coverage_counts (unsigned /*counter*/,
+ 				       unsigned /*cfg_checksum*/,
+ 				       unsigned /*lineno_checksum*/,
+-				       unsigned /*n_counts*/);
++				       unsigned /*n_counts*/,
++				       bool /*is_cspgo*/);
+ 
+ extern tree get_gcov_type (void);
+ extern bool coverage_node_map_initialized_p (void);
+diff --git a/gcc/gcc.cc b/gcc/gcc.cc
+index 32e45adc2..b37b50be2 100644
+--- a/gcc/gcc.cc
++++ b/gcc/gcc.cc
+@@ -1147,7 +1147,8 @@ proper position among the other output files.  */
+ 	%:include(libgomp.spec)%(link_gomp)}\
+     %{fgnu-tm:%:include(libitm.spec)%(link_itm)}\
+     %(mflib) " STACK_SPLIT_SPEC "\
+-    %{fprofile-arcs|fprofile-generate*|coverage:-lgcov} " SANITIZER_SPEC " \
++    %{fprofile-arcs|fprofile-generate*|fcfgo-csprofile-generate*|coverage:-lgcov} \
++    " SANITIZER_SPEC " \
+     %{!nostdlib:%{!r:%{!nodefaultlibs:%(link_ssp) %(link_gcc_c_sequence)}}}\
+     %{!nostdlib:%{!r:%{!nostartfiles:%E}}} %{T*}  \n%(post_link) }}}}}}"
+ #endif
+@@ -1265,7 +1266,7 @@ static const char *cc1_options =
+  %{!fsyntax-only:%{S:%W{o*}%{!o*:-o %w%b.s}}}\
+  %{fsyntax-only:-o %j} %{-param*}\
+  %{coverage:-fprofile-arcs -ftest-coverage}\
+- %{fprofile-arcs|fprofile-generate*|coverage:\
++ %{fprofile-arcs|fprofile-generate*|fcfgo-csprofile-generate*|coverage:\
+    %{!fprofile-update=single:\
+      %{pthread:-fprofile-update=prefer-atomic}}}";
+ 
+diff --git a/gcc/ipa-profile.cc b/gcc/ipa-profile.cc
+index ffdcb4476..27554e507 100644
+--- a/gcc/ipa-profile.cc
++++ b/gcc/ipa-profile.cc
+@@ -827,7 +827,7 @@ ipa_profile (void)
+   if (dump_file)
+     {
+       if (!node_map_initialized)
+-	init_node_map (false);
++	init_node_map (false, false);
+       node_map_initialized = true;
+ 
+       ipa_profile_dump_all_summaries (dump_file);
+@@ -850,7 +850,7 @@ ipa_profile (void)
+ 	  if (spec_count)
+ 	    {
+ 	      if (!node_map_initialized)
+-		init_node_map (false);
++		init_node_map (false, false);
+ 	      node_map_initialized = true;
+ 	      ncommon++;
+ 
+diff --git a/gcc/lto-cgraph.cc b/gcc/lto-cgraph.cc
+index 237743ef0..fd41941d1 100644
+--- a/gcc/lto-cgraph.cc
++++ b/gcc/lto-cgraph.cc
+@@ -1677,6 +1677,13 @@ merge_profile_summaries (struct lto_file_decl_data **file_data_vec)
+   if (flag_ltrans)
+     return;
+ 
++  /* TODO: The different max_run values obtained during the cspgo GEN and USE
++     stages with unknown bug resulted in different scaling results, which led
++     different optimization decisions and finally led to coverage mismatch.
++     Therefore, skip the following processing steps when doing cspgo.  */
++  if (flag_csprofile_generate || flag_csprofile_use)
++    return;
++
+   /* Now compute count_materialization_scale of each node.
+      During LTRANS we already have values of count_materialization_scale
+      computed, so just update them.  */
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index d97f6079f..7900a658f 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -3016,6 +3016,15 @@ common_handle_option (struct gcc_options *opts,
+ 			     false);
+       break;
+ 
++    case OPT_fcfgo_csprofile_use_:
++      opts->x_csprofile_data_prefix = xstrdup (arg);
++      value = true;
++      /* No break here - do -fcfgo-csprofile-use processing.  */
++      /* FALLTHRU */
++    case OPT_fcfgo_csprofile_use:
++      SET_OPTION_IF_UNSET (opts, opts_set, flag_csprofile_use, value);
++      break;
++
+     case OPT_fauto_profile_:
+       opts->x_auto_profile_file = xstrdup (arg);
+       opts->x_flag_auto_profile = true;
+@@ -3059,6 +3068,15 @@ common_handle_option (struct gcc_options *opts,
+       SET_OPTION_IF_UNSET (opts, opts_set, flag_ipa_bit_cp, value);
+       break;
+ 
++    case OPT_fcfgo_csprofile_generate_:
++      opts->x_csprofile_data_prefix = xstrdup (arg);
++      value = true;
++      /* No break here - do -fcfgo-csprofile-generate processing.  */
++      /* FALLTHRU */
++    case OPT_fcfgo_csprofile_generate:
++      SET_OPTION_IF_UNSET (opts, opts_set, flag_csprofile_generate, value);
++      break;
++
+     case OPT_fprofile_info_section:
+       opts->x_profile_info_section = ".gcov_info";
+       break;
+diff --git a/gcc/passes.cc b/gcc/passes.cc
+index 36e5b4ac4..154690d02 100644
+--- a/gcc/passes.cc
++++ b/gcc/passes.cc
+@@ -2410,6 +2410,77 @@ execute_all_ipa_transforms (bool do_not_collect)
+   node->ipa_transforms_to_apply.release ();
+ }
+ 
++/* When is_cspgo is true, execute all passes except cspgo and save the pointer
++   for the next execution.  */
++
++void
++execute_all_ipa_transforms_for_cspgo (bool is_cspgo)
++{
++  struct cgraph_node *node;
++  ipa_opt_pass_d *cspgo_pass = NULL;
++  node = cgraph_node::get (current_function_decl);
++
++  cgraph_node *next_clone;
++  for (cgraph_node *n = node->clones; n; n = next_clone)
++    {
++      next_clone = n->next_sibling_clone;
++      if (n->decl != node->decl)
++	n->materialize_clone ();
++    }
++
++  int j = 0;
++  gcc::pass_manager *passes = g->get_passes ();
++  bool report = profile_report && (cfun->curr_properties & PROP_gimple) != 0;
++
++  if (report)
++    push_cfun (DECL_STRUCT_FUNCTION (node->decl));
++
++  for (auto p : node->ipa_transforms_to_apply)
++    {
++      /* Execute all passes except cspgo, and save the pointer of cspgo pass
++	 for the next execution.  */
++      if (!is_cspgo && strstr (p->name, "csprofile") != NULL)
++	{
++	  cspgo_pass = p;
++	  continue;
++	}
++      /* To get consistent statistics, we need to account each functio
++	 to each IPA pass.  */
++      if (report)
++	{
++	  for (;j < p->static_pass_number; j++)
++	    if (passes->get_pass_for_id (j)
++		&& passes->get_pass_for_id (j)->type == IPA_PASS
++		&& ((ipa_opt_pass_d *)passes->get_pass_for_id (j))
++		   ->function_transform)
++	      {
++		check_profile_consistency (j, true);
++		account_profile (j, true);
++	      }
++	  gcc_checking_assert (passes->get_pass_for_id (j) == p);
++	}
++      execute_one_ipa_transform_pass (node, p, true);
++    }
++  /* Account remaining IPA passes.  */
++  if (report)
++    {
++      for (;!passes->get_pass_for_id (j)
++	    || passes->get_pass_for_id (j)->type != RTL_PASS; j++)
++	if (passes->get_pass_for_id (j)
++	    && passes->get_pass_for_id (j)->type == IPA_PASS
++	    && ((ipa_opt_pass_d *)passes->get_pass_for_id (j))
++	       ->function_transform)
++	  {
++	    check_profile_consistency (j, true);
++	    account_profile (j, true);
++	  }
++      pop_cfun ();
++    }
++  node->ipa_transforms_to_apply.release ();
++  if (!is_cspgo)
++    node->ipa_transforms_to_apply.safe_push (cspgo_pass);
++}
++
+ /* Check if PASS is explicitly disabled or enabled and return
+    the gate status.  FUNC is the function to be processed, and
+    GATE_STATUS is the gate status determined by pass manager by
+diff --git a/gcc/passes.def b/gcc/passes.def
+index e945af96a..862ef0d8f 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -164,6 +164,7 @@ along with GCC; see the file COPYING3.  If not see
+   NEXT_PASS (pass_ipa_cdtor_merge);
+   NEXT_PASS (pass_ipa_fn_summary);
+   NEXT_PASS (pass_ipa_inline);
++  NEXT_PASS (pass_ipa_csprofile);
+   NEXT_PASS (pass_ipa_pure_const);
+   NEXT_PASS (pass_ipa_modref);
+   NEXT_PASS (pass_ipa_free_fn_summary, false /* small_p */);
+diff --git a/gcc/profile.cc b/gcc/profile.cc
+index 40e105258..0ffc1ba4f 100644
+--- a/gcc/profile.cc
++++ b/gcc/profile.cc
+@@ -201,7 +201,7 @@ instrument_values (histogram_values values)
+    CFG_CHECKSUM is the precomputed checksum for the CFG.  */
+ 
+ static gcov_type *
+-get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum)
++get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum, bool is_cspgo)
+ {
+   unsigned num_edges = 0;
+   basic_block bb;
+@@ -219,7 +219,7 @@ get_exec_counts (unsigned cfg_checksum, unsigned lineno_checksum)
+     }
+ 
+   counts = get_coverage_counts (GCOV_COUNTER_ARCS, cfg_checksum,
+-				lineno_checksum, num_edges);
++				lineno_checksum, num_edges, is_cspgo);
+   if (!counts)
+     return NULL;
+ 
+@@ -418,7 +418,8 @@ cmp_stats (const void *ptr1, const void *ptr2)
+    CFG_CHECKSUM is the precomputed checksum for the CFG.  */
+ 
+ static void
+-compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
++compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum,
++			      bool is_cspgo)
+ {
+   basic_block bb;
+   int i;
+@@ -427,7 +428,8 @@ compute_branch_probabilities (unsigned cfg_checksum, unsigned lineno_checksum)
+   int passes;
+   int hist_br_prob[20];
+   int num_branches;
+-  gcov_type *exec_counts = get_exec_counts (cfg_checksum, lineno_checksum);
++  gcov_type *exec_counts = get_exec_counts (cfg_checksum, lineno_checksum,
++					    is_cspgo);
+   int inconsistent = 0;
+ 
+   /* Very simple sanity checks so we catch bugs in our profiling code.  */
+@@ -868,7 +870,7 @@ sort_hist_values (histogram_value hist)
+ 
+ static void
+ compute_value_histograms (histogram_values values, unsigned cfg_checksum,
+-                          unsigned lineno_checksum)
++			  unsigned lineno_checksum, bool is_cspgo)
+ {
+   unsigned i, j, t, any;
+   unsigned n_histogram_counters[GCOV_N_VALUE_COUNTERS];
+@@ -898,7 +900,8 @@ compute_value_histograms (histogram_values values, unsigned cfg_checksum,
+       histogram_counts[t] = get_coverage_counts (COUNTER_FOR_HIST_TYPE (t),
+ 						 cfg_checksum,
+ 						 lineno_checksum,
+-						 n_histogram_counters[t]);
++						 n_histogram_counters[t],
++						 is_cspgo);
+       if (histogram_counts[t])
+ 	any = 1;
+       act_count[t] = histogram_counts[t];
+@@ -1128,11 +1131,12 @@ compare_freqs (const void *p1, const void *p2)
+ /* Only read execution count for thunks.  */
+ 
+ void
+-read_thunk_profile (struct cgraph_node *node)
++read_thunk_profile (struct cgraph_node *node, bool is_cspgo)
+ {
+   tree old = current_function_decl;
+   current_function_decl = node->decl;
+-  gcov_type *counts = get_coverage_counts (GCOV_COUNTER_ARCS, 0, 0, 1);
++  gcov_type *counts = get_coverage_counts (GCOV_COUNTER_ARCS, 0, 0, 1,
++					   is_cspgo);
+   if (counts)
+     {
+       node->callees->count = node->count
+@@ -1164,7 +1168,7 @@ read_thunk_profile (struct cgraph_node *node)
+    Main entry point of this file.  */
+ 
+ void
+-branch_prob (bool thunk)
++branch_prob (bool thunk, bool is_cspgo)
+ {
+   basic_block bb;
+   unsigned i;
+@@ -1507,9 +1511,10 @@ branch_prob (bool thunk)
+ 
+   if (flag_branch_probabilities)
+     {
+-      compute_branch_probabilities (cfg_checksum, lineno_checksum);
++      compute_branch_probabilities (cfg_checksum, lineno_checksum, is_cspgo);
+       if (flag_profile_values)
+-	compute_value_histograms (values, cfg_checksum, lineno_checksum);
++	compute_value_histograms (values, cfg_checksum, lineno_checksum,
++				  is_cspgo);
+     }
+ 
+   remove_fake_edges ();
+diff --git a/gcc/profile.h b/gcc/profile.h
+index c5b6f4889..e92d6154c 100644
+--- a/gcc/profile.h
++++ b/gcc/profile.h
+@@ -68,7 +68,7 @@ extern void mcf_smooth_cfg (void);
+ 
+ extern gcov_type sum_edge_counts (vec *edges);
+ 
+-extern void init_node_map (bool);
++extern void init_node_map (bool, bool);
+ extern void del_node_map (void);
+ 
+ extern void get_working_sets (void);
+diff --git a/gcc/timevar.def b/gcc/timevar.def
+index fc2b1e1e7..6fdb2c767 100644
+--- a/gcc/timevar.def
++++ b/gcc/timevar.def
+@@ -104,6 +104,7 @@ DEFTIMEVAR (TV_WHOPR_PARTITIONING    , "whopr partitioning")
+ DEFTIMEVAR (TV_WHOPR_LTRANS          , "whopr ltrans")
+ DEFTIMEVAR (TV_IPA_REFERENCE         , "ipa reference")
+ DEFTIMEVAR (TV_IPA_PROFILE           , "ipa profile")
++DEFTIMEVAR (TV_IPA_CSPROFILE         , "ipa csprofile")
+ DEFTIMEVAR (TV_IPA_AUTOFDO           , "auto profile")
+ DEFTIMEVAR (TV_IPA_PURE_CONST        , "ipa pure const")
+ DEFTIMEVAR (TV_IPA_ICF		     , "ipa icf")
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index 18b0f8022..f9c2eed8b 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -519,6 +519,7 @@ extern simple_ipa_opt_pass *make_pass_ipa_increase_alignment (gcc::context
+ 							      *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_fn_summary (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_inline (gcc::context *ctxt);
++extern ipa_opt_pass_d *make_pass_ipa_csprofile (gcc::context *ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_free_lang_data (gcc::context *ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_free_fn_summary (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_cp (gcc::context *ctxt);
+@@ -667,6 +668,7 @@ extern void execute_pass_list (function *, opt_pass *);
+ extern void execute_ipa_pass_list (opt_pass *);
+ extern void execute_ipa_summary_passes (ipa_opt_pass_d *);
+ extern void execute_all_ipa_transforms (bool);
++extern void execute_all_ipa_transforms_for_cspgo (bool);
+ extern void execute_all_ipa_stmt_fixups (struct cgraph_node *, gimple **);
+ extern bool pass_init_dump_file (opt_pass *);
+ extern void pass_fini_dump_file (opt_pass *);
+diff --git a/gcc/tree-profile.cc b/gcc/tree-profile.cc
+index e7646f1a1..aa3a2b3a9 100644
+--- a/gcc/tree-profile.cc
++++ b/gcc/tree-profile.cc
+@@ -725,7 +725,7 @@ tree_profiling (void)
+      cgraphunit.cc:ipa_passes().  */
+   gcc_assert (symtab->state == IPA_SSA);
+ 
+-  init_node_map (true);
++  init_node_map (true, false);
+   parse_profile_file_filtering ();
+ 
+   FOR_EACH_DEFINED_FUNCTION (node)
+@@ -766,7 +766,7 @@ tree_profiling (void)
+ 	     time.  */
+ 	  else
+ 	    {
+-	      read_thunk_profile (node);
++	      read_thunk_profile (node, false);
+ 	      continue;
+ 	    }
+ 	}
+@@ -781,7 +781,7 @@ tree_profiling (void)
+ 	  && (execute_fixup_cfg () & TODO_cleanup_cfg))
+ 	cleanup_tree_cfg ();
+ 
+-      branch_prob (thunk);
++      branch_prob (thunk, false);
+ 
+       if (! flag_branch_probabilities
+ 	  && flag_profile_values)
+@@ -863,6 +863,170 @@ tree_profiling (void)
+   return 0;
+ }
+ 
++/* Profile all functions in the callgraph with cs profile.  */
++
++static unsigned int
++csprofile_transform (struct cgraph_node *node)
++{
++  basic_block bb;
++  bool thunk = false;
++
++  parse_profile_file_filtering ();
++
++  if (dump_file)
++    {
++      fprintf (dump_file, "[cspgo] trying cspgo on function:\n");
++      dump_function_header (dump_file, cfun->decl, dump_flags);
++    }
++
++  if (!DECL_STRUCT_FUNCTION (current_function_decl))
++    {
++      if (dump_file)
++	fprintf (dump_file, "[cspgo] %s without function decl, skip.\n",
++		 node->dump_name ());
++      return 0;
++    }
++
++  if (!gimple_has_body_p (node->decl) && !node->thunk)
++    {
++      if (dump_file)
++	fprintf (dump_file, "[cspgo] %s without gimple body, skip.\n",
++		 node->dump_name ());
++      return 0;
++    }
++
++  /* Don't profile functions produced for builtin stuff.  */
++  if (DECL_SOURCE_LOCATION (node->decl) == BUILTINS_LOCATION)
++    {
++      if (dump_file)
++	fprintf (dump_file, "[cspgo] %s with BUILTINS_LOCATION, skip.\n",
++		 node->dump_name ());
++      return 0;
++    }
++
++  const char *file = LOCATION_FILE (DECL_SOURCE_LOCATION (node->decl));
++  if (!file || !include_source_file_for_profile (file))
++    {
++      if (dump_file)
++	fprintf (dump_file, "[cspgo] %s is sub func or in filter-files, "
++			    "skip.\n", node->dump_name ());
++      return 0;
++    }
++
++  if (lookup_attribute ("no_profile_instrument_function",
++			DECL_ATTRIBUTES (node->decl)))
++    {
++      if (dump_file)
++	fprintf (dump_file, "[cspgo] %s is no_profile_instrument_function,"
++		 " skip.\n", node->dump_name ());
++      return 0;
++    }
++
++  /* Do not instrument extern inline functions.  */
++  if (DECL_EXTERNAL (node->decl))
++    {
++      if (dump_file)
++	fprintf (dump_file, "[cspgo] %s is DECL_EXTERNAL, skip.\n",
++			     node->dump_name ());
++      return 0;
++    }
++
++  if (!coverage_node_map_initialized_p ())
++    init_node_map (true, true);
++
++  /* Node without profile id should skip.  */
++  if (!node->profile_id)
++    {
++      if (dump_file)
++	fprintf (dump_file, "[cspgo] %s does not has profile_id, skip.\n",
++			     node->dump_name ());
++      return 0;
++    }
++
++  if (flag_csprofile_generate)
++    {
++      profile_arc_flag = 1;
++      flag_branch_probabilities = 0;
++    }
++
++  /* Process thunk function.  */
++  if (node->thunk)
++    {
++      /* We cannot expand variadic thunks to Gimple.  */
++      if (stdarg_p (TREE_TYPE (node->decl)))
++	{
++	  if (dump_file)
++	    fprintf (dump_file, "[cspgo] %s is DECL_EXTERNAL, skip.\n",
++			     node->dump_name ());
++	  return 0;
++	}
++      thunk = true;
++      /* When generate profile, expand thunk to gimple so it can be
++	 instrumented same way as other functions.  */
++      if (profile_arc_flag)
++	expand_thunk (node, false, true);
++      /* Read cgraph profile but keep function as thunk at profile-use
++	 time.  */
++      else
++	{
++	  read_thunk_profile (node, true);
++	  return 0;
++	}
++    }
++
++  /* Local pure-const may imply need to fixup the cfg.  */
++  if (gimple_has_body_p (node->decl)
++	&& (execute_fixup_cfg () & TODO_cleanup_cfg))
++    cleanup_tree_cfg ();
++
++  branch_prob (thunk, true);
++
++  if (! flag_branch_probabilities
++	&& flag_profile_values)
++    gimple_gen_ic_func_profiler ();
++
++  if (flag_branch_probabilities
++      && !thunk
++      && flag_profile_values
++      && flag_value_profile_transformations
++      && profile_status_for_fn (cfun) == PROFILE_READ)
++    gimple_value_profile_transformations ();
++
++  /* The above could hose dominator info.  Currently there is
++     none coming in, this is a safety valve.  It should be
++     easy to adjust it, if and when there is some.  */
++  free_dominance_info (CDI_DOMINATORS);
++  free_dominance_info (CDI_POST_DOMINATORS);
++
++  release_profile_file_filtering ();
++
++  if (flag_csprofile_generate)
++    {
++      profile_arc_flag = 0;
++      flag_branch_probabilities = 1;
++    }
++
++  /* Update call statements and rebuild the cgraph.  */
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      gimple_stmt_iterator gsi;
++      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
++	{
++	  gimple *stmt = gsi_stmt (gsi);
++	  if (is_gimple_call (stmt))
++	    update_stmt (stmt);
++	}
++    }
++
++  /* re-merge split blocks.  */
++  cleanup_tree_cfg ();
++  update_ssa (TODO_update_ssa);
++
++  cgraph_edge::rebuild_edges ();
++
++  return 0;
++}
++
+ namespace {
+ 
+ const pass_data pass_data_ipa_tree_profile =
+@@ -910,4 +1074,57 @@ make_pass_ipa_tree_profile (gcc::context *ctxt)
+   return new pass_ipa_tree_profile (ctxt);
+ }
+ 
++namespace {
++
++const pass_data pass_data_ipa_csprofile =
++{
++  IPA_PASS, /* type */
++  "csprofile", /* name */
++  OPTGROUP_NONE, /* optinfo_flags */
++  TV_IPA_CSPROFILE, /* tv_id */
++  0, /* properties_required */
++  0, /* properties_provided */
++  0, /* properties_destroyed */
++  0, /* todo_flags_start */
++  0, /* todo_flags_finish */
++};
++
++class pass_ipa_csprofile : public ipa_opt_pass_d
++{
++public:
++  pass_ipa_csprofile (gcc::context *ctxt)
++    : ipa_opt_pass_d (pass_data_ipa_csprofile, ctxt,
++		      NULL, /* generate_summary */
++		      NULL, /* write_summary */
++		      NULL, /* read_summary */
++		      NULL, /* write_optimization_summary */
++		      NULL, /* read_optimization_summary */
++		      NULL, /* stmt_fixup */
++		      0, /* function_transform_todo_flags_start */
++		      csprofile_transform, /* function_transform */
++		      NULL) /* variable_transform */
++  {}
++
++  /* opt_pass methods: */
++  virtual bool gate (function *)
++    {
++      return (flag_csprofile_generate || flag_csprofile_use);
++    }
++  /* The main process of cspgo is in csprofile_transform, execute does not need
++     to do anything.  */
++  virtual unsigned int execute (function *)
++    {
++      return 0;
++    }
++
++}; // class pass_ipa_csprofile
++
++} // anon namespace
++
++ipa_opt_pass_d *
++make_pass_ipa_csprofile (gcc::context *ctxt)
++{
++  return new pass_ipa_csprofile (ctxt);
++}
++
+ #include "gt-tree-profile.h"
+diff --git a/gcc/value-prof.cc b/gcc/value-prof.cc
+index c240a1863..9c7191287 100644
+--- a/gcc/value-prof.cc
++++ b/gcc/value-prof.cc
+@@ -1234,7 +1234,7 @@ coverage_node_map_initialized_p (void)
+    that the PROFILE_IDs was already assigned.  */
+ 
+ void
+-init_node_map (bool local)
++init_node_map (bool local, bool is_cspgo)
+ {
+   struct cgraph_node *n;
+   cgraph_node_map = new hash_map;
+@@ -1245,6 +1245,12 @@ init_node_map (bool local)
+ 	cgraph_node **val;
+ 	dump_user_location_t loc
+ 	  = dump_user_location_t::from_function_decl (n->decl);
++
++	/* In cspgo, inline and clone functions will not be expand,
++	   so skipped.  */
++	if (is_cspgo && (n->inlined_to || n->clone_of))
++	  continue;
++
+ 	if (local)
+ 	  {
+ 	    n->profile_id = coverage_compute_profile_id (n);
+@@ -1290,6 +1296,7 @@ void
+ del_node_map (void)
+ {
+   delete cgraph_node_map;
++  cgraph_node_map = 0;
+ }
+ 
+ /* Return cgraph node for function with pid */
+diff --git a/gcc/value-prof.h b/gcc/value-prof.h
+index d852c41f3..0fe3821c3 100644
+--- a/gcc/value-prof.h
++++ b/gcc/value-prof.h
+@@ -112,8 +112,8 @@ extern struct cgraph_node* find_func_by_profile_id (int func_id);
+ 
+ /* In profile.cc.  */
+ extern void init_branch_prob (void);
+-extern void branch_prob (bool);
+-extern void read_thunk_profile (struct cgraph_node *);
++extern void branch_prob (bool, bool);
++extern void read_thunk_profile (struct cgraph_node *, bool);
+ extern void end_branch_prob (void);
+ 
+ #endif	/* GCC_VALUE_PROF_H */
+-- 
+2.25.1
+
diff --git a/0310-CFGO-Add-cfgo-pgo-optimization.patch b/0310-CFGO-Add-cfgo-pgo-optimization.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4a930aba6773ef382ee5c83821b5f3a1894aa8df
--- /dev/null
+++ b/0310-CFGO-Add-cfgo-pgo-optimization.patch
@@ -0,0 +1,168 @@
+From 84635dc65ebe285457d0c16bbb5caf995f803436 Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Wed, 27 Nov 2024 18:36:27 +0800
+Subject: [PATCH] [CFGO] Add cfgo-pgo optimization
+
+Add a cfgo-pgo to better optimize with AI4C
+---
+ gcc/common.opt    | 16 +++++++++++++
+ gcc/gcc.cc        |  4 ++--
+ gcc/lto-cgraph.cc |  3 ++-
+ gcc/opts.cc       | 57 +++++++++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 77 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index c9baa12be..a45fbfe1b 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -2485,6 +2485,14 @@ fprofile-generate=
+ Common Joined RejectNegative
+ Enable common options for generating profile info for profile feedback directed optimizations, and set -fprofile-dir=.
+ 
++fcfgo-profile-generate
++Common Var(flag_cfgo_profile_generate)
++Enable common options for generating cfgo profile info for profile feedback directed optimizations.
++
++fcfgo-profile-generate=
++Common Joined RejectNegative
++Enable common options for generating cfgo profile info for profile feedback directed optimizations, and set -fprofile-dir=.
++
+ fcfgo-csprofile-generate
+ Common Var(flag_csprofile_generate)
+ Enable common options for generating context sensitive profile info for profile feedback directed optimizations.
+@@ -2517,6 +2525,14 @@ fprofile-use=
+ Common Joined RejectNegative
+ Enable common options for performing profile feedback directed optimizations, and set -fprofile-dir=.
+ 
++fcfgo-profile-use
++Common Var(flag_cfgo_profile_use)
++Enable common options for performing profile feedback directed optimizations.
++
++fcfgo-profile-use=
++Common Joined RejectNegative
++Enable common options for performing profile feedback directed optimizations, and set -fprofile-dir=.
++
+ fcfgo-csprofile-use
+ Common Var(flag_csprofile_use)
+ Enable common options for performing context sensitive profile feedback directed optimizations.
+diff --git a/gcc/gcc.cc b/gcc/gcc.cc
+index b37b50be2..e5c43dd90 100644
+--- a/gcc/gcc.cc
++++ b/gcc/gcc.cc
+@@ -1147,7 +1147,7 @@ proper position among the other output files.  */
+ 	%:include(libgomp.spec)%(link_gomp)}\
+     %{fgnu-tm:%:include(libitm.spec)%(link_itm)}\
+     %(mflib) " STACK_SPLIT_SPEC "\
+-    %{fprofile-arcs|fprofile-generate*|fcfgo-csprofile-generate*|coverage:-lgcov} \
++    %{fprofile-arcs|fprofile-generate*|fcfgo-profile-generate*|fcfgo-csprofile-generate*|coverage:-lgcov} \
+     " SANITIZER_SPEC " \
+     %{!nostdlib:%{!r:%{!nodefaultlibs:%(link_ssp) %(link_gcc_c_sequence)}}}\
+     %{!nostdlib:%{!r:%{!nostartfiles:%E}}} %{T*}  \n%(post_link) }}}}}}"
+@@ -1266,7 +1266,7 @@ static const char *cc1_options =
+  %{!fsyntax-only:%{S:%W{o*}%{!o*:-o %w%b.s}}}\
+  %{fsyntax-only:-o %j} %{-param*}\
+  %{coverage:-fprofile-arcs -ftest-coverage}\
+- %{fprofile-arcs|fprofile-generate*|fcfgo-csprofile-generate*|coverage:\
++ %{fprofile-arcs|fprofile-generate*|fcfgo-profile-generate*|fcfgo-csprofile-generate*|coverage:\
+    %{!fprofile-update=single:\
+      %{pthread:-fprofile-update=prefer-atomic}}}";
+ 
+diff --git a/gcc/lto-cgraph.cc b/gcc/lto-cgraph.cc
+index fd41941d1..9a30f1190 100644
+--- a/gcc/lto-cgraph.cc
++++ b/gcc/lto-cgraph.cc
+@@ -1681,7 +1681,8 @@ merge_profile_summaries (struct lto_file_decl_data **file_data_vec)
+      stages with unknown bug resulted in different scaling results, which led
+      different optimization decisions and finally led to coverage mismatch.
+      Therefore, skip the following processing steps when doing cspgo.  */
+-  if (flag_csprofile_generate || flag_csprofile_use)
++  if (flag_csprofile_generate || flag_csprofile_use
++      || flag_cfgo_profile_generate || flag_cfgo_profile_use)
+     return;
+ 
+   /* Now compute count_materialization_scale of each node.
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 89d03e834..84dd8925a 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -2087,6 +2087,38 @@ enable_fdo_optimizations (struct gcc_options *opts,
+   SET_OPTION_IF_UNSET (opts, opts_set, flag_tree_loop_distribution, value);
+ }
+ 
++/* Enable cfgo-related flags.  */
++
++static void
++enable_cfgo_optimizations (struct gcc_options *opts,
++			   struct gcc_options *opts_set,
++			   int value)
++{
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_modulo_sched, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_selective_scheduling, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_rename_registers, value);
++
++  SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_insns_auto, 185);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_inline_unit_growth, 66);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_recursive_depth_auto,
++		       31);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_large_function_insns, 7286);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_large_function_growth, 89);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_large_unit_insns, 11783);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_eval_threshold, 864);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_loop_hint_bonus, 440);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_max_recursive_depth, 29);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_min_recursive_probability,
++		       4);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_recursive_freq_factor, 18);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_recursion_penalty, 64);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_single_call_penalty, 43);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_unit_growth, 96);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_large_unit_insns, 47631);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_value_list_size, 12);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_profile_count_base, 54);
++}
++
+ /* -f{,no-}sanitize{,-recover}= suboptions.  */
+ const struct sanitizer_opts_s sanitizer_opts[] =
+ {
+@@ -3033,6 +3065,18 @@ common_handle_option (struct gcc_options *opts,
+       /* Deferred.  */
+       break;
+ 
++    case OPT_fcfgo_profile_use_:
++      /* No break here - do -fcfgo-profile-use processing.  */
++      /* FALLTHRU */
++    case OPT_fcfgo_profile_use:
++      value = true;
++      if (value)
++	{
++	  enable_cfgo_optimizations (opts, opts_set, value);
++	  SET_OPTION_IF_UNSET (opts, opts_set, flag_cfgo_profile_use, value);
++	}
++      /* No break here - do -fprofile-use processing.  */
++      /* FALLTHRU */
+     case OPT_fprofile_use_:
+       opts->x_profile_data_prefix = xstrdup (arg);
+       opts->x_flag_profile_use = true;
+@@ -3090,6 +3134,19 @@ common_handle_option (struct gcc_options *opts,
+       SET_OPTION_IF_UNSET (opts, opts_set, flag_ipa_struct_reorg, value);
+       break;
+ 
++    case OPT_fcfgo_profile_generate_:
++      /* No break here - do -fcfgo-profile-generate processing.  */
++      /* FALLTHRU */
++    case OPT_fcfgo_profile_generate:
++      value = true;
++      if (value)
++	{
++	  enable_cfgo_optimizations (opts, opts_set, value);
++	  SET_OPTION_IF_UNSET (opts, opts_set, flag_cfgo_profile_generate,
++			       value);
++	}
++      /* No break here - do -fcfgo-profile-generate processing.  */
++      /* FALLTHRU */
+     case OPT_fprofile_generate_:
+       opts->x_profile_data_prefix = xstrdup (arg);
+       value = true;
+-- 
+2.25.1
+
diff --git a/0311-PATCH-Add-if-split-optimization-pass.patch b/0311-PATCH-Add-if-split-optimization-pass.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5e3b75e2435c857d786ded4f10a7eba6a5774987
--- /dev/null
+++ b/0311-PATCH-Add-if-split-optimization-pass.patch
@@ -0,0 +1,1203 @@
+From 899db9bca3c2ef3cd346814be761eed8b85f5e1e Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Wed, 27 Nov 2024 19:26:13 +0800
+Subject: [PATCH] [PATCH] Add if-split optimization pass
+
+This pass splits conditions like
+if (cond1  or  cond2)
+to the sequense of separate conditions.
+
+This happens only if there is a function call under condition
+Which  depends on the condition variable.
+---
+ gcc/Makefile.in                             |   1 +
+ gcc/common.opt                              |   4 +
+ gcc/gimple-if-split.cc                      | 567 ++++++++++++++++++++
+ gcc/opts.cc                                 |   2 +-
+ gcc/passes.def                              |   1 +
+ gcc/testsuite/gcc.dg/tree-ssa/if-split-1.c  |  24 +
+ gcc/testsuite/gcc.dg/tree-ssa/if-split-10.c |  45 ++
+ gcc/testsuite/gcc.dg/tree-ssa/if-split-2.c  |  36 ++
+ gcc/testsuite/gcc.dg/tree-ssa/if-split-3.c  |  36 ++
+ gcc/testsuite/gcc.dg/tree-ssa/if-split-4.c  |  42 ++
+ gcc/testsuite/gcc.dg/tree-ssa/if-split-5.c  |  42 ++
+ gcc/testsuite/gcc.dg/tree-ssa/if-split-6.c  |  45 ++
+ gcc/testsuite/gcc.dg/tree-ssa/if-split-7.c  |  45 ++
+ gcc/testsuite/gcc.dg/tree-ssa/if-split-8.c  |  42 ++
+ gcc/testsuite/gcc.dg/tree-ssa/if-split-9.c  |  44 ++
+ gcc/timevar.def                             |   1 +
+ gcc/tree-cfg.h                              |   2 +
+ gcc/tree-pass.h                             |   1 +
+ gcc/tree-ssa-ifcombine.cc                   |   6 +-
+ 19 files changed, 981 insertions(+), 5 deletions(-)
+ create mode 100644 gcc/gimple-if-split.cc
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/if-split-1.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/if-split-10.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/if-split-2.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/if-split-3.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/if-split-4.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/if-split-5.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/if-split-6.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/if-split-7.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/if-split-8.c
+ create mode 100644 gcc/testsuite/gcc.dg/tree-ssa/if-split-9.c
+
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index bb6197a8e..683b28896 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -1393,6 +1393,7 @@ OBJS = \
+ 	gimple-builder.o \
+ 	gimple-expr.o \
+ 	gimple-if-to-switch.o \
++	gimple-if-split.o \
+ 	gimple-iterator.o \
+ 	gimple-fold.o \
+ 	gimple-harden-conditionals.o \
+diff --git a/gcc/common.opt b/gcc/common.opt
+index a45fbfe1b..a52fa9814 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1981,6 +1981,10 @@ finstrument-functions-exclude-file-list=
+ Common RejectNegative Joined
+ -finstrument-functions-exclude-file-list=filename,...	Do not instrument functions listed in files.
+ 
++fif-split
++Common Var(flag_if_split) Init(0) Optimization
++Perform splitting if complex conditions on separate ones with clonning their bodies (gimple version).
++
+ fipa-cp
+ Common Var(flag_ipa_cp) Optimization
+ Perform interprocedural constant propagation.
+diff --git a/gcc/gimple-if-split.cc b/gcc/gimple-if-split.cc
+new file mode 100644
+index 000000000..3446204ea
+--- /dev/null
++++ b/gcc/gimple-if-split.cc
+@@ -0,0 +1,567 @@
++/* If-split.
++   Copyright (C) 2024 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include "config.h"
++#define INCLUDE_FUNCTIONAL
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "tree-ssa.h"
++#include "tree-pass.h"
++#include "diagnostic-core.h"
++#include "function.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "gimple-pretty-print.h"
++#include "gimple-iterator.h"
++#include "cfg.h"
++#include "cfghooks.h"
++#include "ssa.h"
++#include "fold-const.h"
++#include "tree-into-ssa.h"
++#include "tree-cfg.h"
++#include "bitmap.h"
++#include "cfganal.h"
++
++/* Perform splitting if-then-else patterns, whose complex OR condition in
++cond-bb contains comparison of some variable with constant and then-bb got
++function call, whose arg list contains this var (or this variable is a
++scalar of an aggregate which is an arg of this call). We split condition on
++two separate ones and duplicate then-bb for each one, thus help ipa const
++prop to propagate corresponding constant in function calls.
++Example:
++	Before:
++		if (n == const || some_cond)
++			func (n);
++	After:
++		if (n == const)
++			func (n);
++		else if (some_cond)
++			func (n);  */
++
++//-------------------------------------------------------------------------
++// Auxiliary functions
++//-------------------------------------------------------------------------
++/* Check if arg list of call got n.  */
++bool
++got_in_args_p (gimple* call, tree n)
++{
++  unsigned num_args = gimple_call_num_args (call);
++
++  for (int i = 0; i < num_args; i++)
++    {
++      if (n == gimple_call_arg (call, i))
++  return true;
++    }
++
++  return false;
++}
++
++#define SCALAR_NESTING 2
++/* Check if call is "necessary" for n.  Call is called "necessary"
++ * for n, if n is one of call args, or n is scalar of some aggregate,
++ * which is one of this call args.  Nesting param determines how many
++ * levels of aggregate-scalar nesting we want to check.  For example,
++ * if nesting == 2, we allow only 2 levels of nesting, like
++ * outer_aggr->inner_aggr->scalar.  */
++static bool
++necessary_call_p (gimple *call, tree n, unsigned nesting)
++{
++  if (!call)
++    return false;
++
++  if (got_in_args_p (call, n))
++    return true;
++
++  /* Else we need to check if n could be a scalar of some aggregate which
++   * is one of call args.  */
++  tree scalar = n;
++  tree aggregate = NULL_TREE;
++
++  for (int i = 0; i < nesting; i++)
++    {
++      if (!scalar || TREE_CODE (scalar) != SSA_NAME)
++	return false;
++
++      gimple *scalar_def = SSA_NAME_DEF_STMT (scalar);
++
++      if (!is_gimple_assign (scalar_def)
++	  || gimple_assign_rhs_code (scalar_def) != COMPONENT_REF)
++	return false;
++
++      tree scalar_def_rhs = gimple_assign_rhs1 (scalar_def);
++      tree aggregate = TREE_OPERAND (scalar_def_rhs, 0);
++
++      if (TREE_CODE (aggregate) == MEM_REF)
++	aggregate = TREE_OPERAND (aggregate, 0);
++
++      if (aggregate && got_in_args_p (call, aggregate))
++	return true;
++
++      scalar = aggregate;
++    }
++
++  return false;
++}
++
++/* Check if bb got a "necessary" call statement.  */
++static bool
++bb_got_necessary_call_p (basic_block bb, tree n, unsigned nesting)
++{
++  gimple *stmt = NULL;
++
++  for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (gsi);
++       gsi_next (&gsi))
++    {
++      gimple *stmt = gsi_stmt (gsi);
++
++      if (is_gimple_call (stmt) && necessary_call_p (stmt, n, nesting))
++	return true;
++    }
++
++  return false;
++}
++
++//-------------------------------------------------------------------------
++// Complex conditions
++//-------------------------------------------------------------------------
++/* Auxiliary struct which contains var and its constant of comaprison
++ * of expr: n == cst.  */
++struct var_const
++{
++  tree n = NULL_TREE;
++  tree cst = NULL_TREE;
++};
++
++/* Check if var_def stmt got this pattern:
++ *    var = (n == const);
++ * If it does, we need to set var_cst struct.  */
++static bool
++comp_with_const_p (gimple *var_def, var_const *var_cst)
++{
++  if (gimple_expr_code (var_def) != EQ_EXPR)
++    return false;
++
++  tree var_def_rhs2 = gimple_assign_rhs2 (var_def);
++
++  if (TREE_CODE (var_def_rhs2) != INTEGER_CST)
++    return false;
++
++  var_cst->n = gimple_assign_rhs1 (var_def);
++  var_cst->cst = var_def_rhs2;
++
++  return true;
++}
++
++/* Auxiliary struct which contains defenition of each part of
++ * complex condition, like:
++ *    a = ... <- a_def
++ *    b = ... <- b_def
++ *    c = a | b  <- complex_cond.  */
++struct cond_parts_defs
++{
++  gimple *a_def = NULL;
++  gimple *b_def = NULL;
++};
++
++/* Check if cond got this pattern:
++ *    a = ...; <- a_def
++ *    b = ...; <- b_def
++ *    c = a | b;
++ *    if (c != 0)
++ * and a_def or b_def is comparison with constant.  If it does,
++ * we need to set a with a_def and b with b_def.  */
++static bool
++necessary_complex_cond_p (const gimple *cond, basic_block then_bb,
++			  cond_parts_defs *defs)
++{
++  tree lhs = gimple_cond_lhs (cond);
++  tree rhs = gimple_cond_rhs (cond);
++
++  /* As we look for: if (c != 0).  */
++  if (gimple_cond_code (cond) != NE_EXPR || TREE_CODE (lhs) != SSA_NAME
++      || !integer_zerop (rhs))
++    return false;
++
++  gimple *c_def = SSA_NAME_DEF_STMT (lhs);
++
++  /* As we look for: c = a | b.  */
++  if (!c_def || !is_gimple_assign (c_def) || gimple_num_ops (c_def) != 3
++      || gimple_expr_code (c_def) != BIT_IOR_EXPR)
++    return false;
++
++  tree a_var = gimple_assign_rhs1 (c_def);
++  tree b_var = gimple_assign_rhs2 (c_def);
++  gimple *a_def = SSA_NAME_DEF_STMT (a_var);
++  gimple *b_def = SSA_NAME_DEF_STMT (b_var);
++
++  if (!a_def || !is_gimple_assign (a_def) || !b_def
++      || !is_gimple_assign (b_def))
++    return false;
++
++  var_const var_cst;
++
++  if (!(comp_with_const_p (a_def, &var_cst)
++	&& bb_got_necessary_call_p (then_bb, var_cst.n, SCALAR_NESTING))
++      && !(comp_with_const_p (b_def, &var_cst)
++	   && bb_got_necessary_call_p (then_bb, var_cst.n, SCALAR_NESTING)))
++    return false;
++
++  defs->a_def = a_def;
++  defs->b_def = b_def;
++
++  return true;
++}
++
++/* Check if our complex condition seems to be "necessary"
++ * and if it does split it on two separate ones.  Like:
++ *    a = (n == const); <- a_def
++ *    b = smth; <- b_def
++ *    c = a | b
++ *    if (c != 0)
++ *       call func (n, ...)
++ * Transform this to:
++ *    if (n == const)
++ *	 goto then
++ *    else if (b != 0)
++ *	 goto then
++ *     then:
++ *	 call func (n, ...).
++ * A complex condition is called "necessary", if it is OR of two
++ * conditions, one of them is comparison with constant and then_bb
++ * of this cond got "necessary" function_call.  To know, what
++ * "necessary" function call means look at necessary_call_p ().  */
++static void
++process_complex_cond (basic_block cond_bb, basic_block then_bb,
++		      basic_block else_bb)
++{
++  gimple *cond = last_stmt (cond_bb);
++  cond_parts_defs defs;
++
++  if (!can_duplicate_block_p (then_bb)
++      || !necessary_complex_cond_p (cond, then_bb, &defs))
++    return;
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file,
++	       "Recognized necessary complex condition: ", cond_bb->index);
++      print_gimple_stmt (dump_file, cond, 0, TDF_NONE);
++    }
++
++  var_const var_cst;
++
++  /* Setting cond.  */
++  if (comp_with_const_p (defs.a_def, &var_cst))
++      /* Setting cond as: if (n == const).  */
++      gimple_cond_set_condition (as_a (cond), EQ_EXPR, var_cst.n,
++					var_cst.cst);
++  else
++    {
++      /* Setting cond as: if (a != 0).  */
++      tree cond_lhs = gimple_assign_lhs (defs.a_def);
++      gimple_cond_set_condition (as_a (cond), NE_EXPR, cond_lhs,
++      					build_zero_cst (TREE_TYPE (cond_lhs)));
++    }
++  update_stmt (cond);
++
++  /* Creating inner_cond_bb.  */
++  edge then_e = find_edge (cond_bb, then_bb);
++  edge else_e = find_edge (cond_bb, else_bb);
++  basic_block inner_cond_bb = split_edge (else_e);
++
++  /* Setting inner_cond.  */
++  gcond *inner_cond = NULL;
++  if (comp_with_const_p (defs.b_def, &var_cst))
++    {
++      /* Setting inner cond as: if (b == const).  */
++      inner_cond = gimple_build_cond (EQ_EXPR, var_cst.n, var_cst.cst,
++				      NULL_TREE, NULL_TREE);
++    }
++  else
++    {
++      /* Setting inner cond as: if (b != 0).  */
++      tree inner_cond_lhs = gimple_assign_lhs (defs.b_def);
++      inner_cond = gimple_build_cond (
++	  NE_EXPR, inner_cond_lhs, build_zero_cst (TREE_TYPE (inner_cond_lhs)),
++	  NULL_TREE, NULL_TREE);
++    }
++  gimple_stmt_iterator gsi = gsi_last_bb (inner_cond_bb);
++  gsi_insert_after (&gsi, inner_cond, GSI_NEW_STMT);
++
++  /* Configuring edges.  */
++  edge inner_cond_then_e = make_edge (inner_cond_bb, then_bb, EDGE_TRUE_VALUE);
++  edge inner_cond_else_e = find_edge (inner_cond_bb, else_bb);
++  inner_cond_else_e->flags = EDGE_FALSE_VALUE;
++
++  /* Setting phinode args in then_bb coming from inner_cond_bb the same as
++   * ones coming from cond_bb.  */
++  for (gphi_iterator psi = gsi_start_phis (then_bb); !gsi_end_p (psi);
++       gsi_next (&psi))
++    {
++      gphi *phi = psi.phi ();
++      add_phi_arg (phi, PHI_ARG_DEF_FROM_EDGE (phi, then_e), inner_cond_then_e,
++		   UNKNOWN_LOCATION);
++    }
++
++  /* Updating dominators.  */
++  set_immediate_dominator (CDI_DOMINATORS, inner_cond_bb, cond_bb);
++  basic_block cond_bb_postdominator
++      = get_immediate_dominator (CDI_POST_DOMINATORS, cond_bb);
++  set_immediate_dominator (CDI_POST_DOMINATORS, inner_cond_bb,
++			   cond_bb_postdominator);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "Successfully transformed:\n (o_cond) ",
++	       cond_bb->index);
++      print_gimple_stmt (dump_file, cond, 0, TDF_NONE);
++      fprintf (dump_file, " (i_cond) ", inner_cond_bb->index);
++      print_gimple_stmt (dump_file, inner_cond, 0, TDF_NONE);
++    }
++}
++
++//-------------------------------------------------------------------------
++// Condition pairs
++//-------------------------------------------------------------------------
++/* Transforming cfg if we recognized patern in process_condition_pair (). */
++static basic_block
++make_two_separate_calls (basic_block outer_cond_bb, basic_block inner_cond_bb,
++			 basic_block then_bb)
++{
++  if (!can_duplicate_block_p (then_bb) || EDGE_COUNT (then_bb->succs) != 1)
++    return NULL;
++
++  edge outer_then_e = find_edge (outer_cond_bb, then_bb);
++
++  /* Making duplication of then_bb.  */
++  basic_block then_bb_dom = get_immediate_dominator (CDI_DOMINATORS, then_bb);
++  basic_block merge_bb = split_edge (single_succ_edge (then_bb));
++  basic_block then_bb1 = duplicate_block (then_bb, outer_then_e, outer_cond_bb);
++  edge outer_then1_e = find_edge (outer_cond_bb, then_bb1);
++
++  /* Setting phinode args in then_bb1 coming from outer_cond_bb by previously
++   * collected args_from_outer_cond_bb.  */
++  flush_pending_stmts (outer_then1_e);
++
++  /* Updating dominators.  */
++  if (then_bb_dom == outer_cond_bb)
++    set_immediate_dominator (CDI_DOMINATORS, then_bb, inner_cond_bb);
++
++  set_immediate_dominator (CDI_DOMINATORS, merge_bb, then_bb_dom);
++  set_immediate_dominator (CDI_DOMINATORS, then_bb1, outer_cond_bb);
++
++  set_immediate_dominator (CDI_POST_DOMINATORS, then_bb, merge_bb);
++  set_immediate_dominator (CDI_POST_DOMINATORS, then_bb1, merge_bb);
++  set_immediate_dominator (CDI_POST_DOMINATORS, merge_bb,
++			   single_succ (merge_bb));
++
++  return then_bb1;
++}
++
++/* Here we check if cond of bb got this pattern:
++ *    if (n == const)
++ * And if it does we need to set n.  */
++static bool
++got_necessary_cond_p (basic_block bb, tree *n)
++{
++  gimple *stmt = last_stmt (bb);
++  if (!stmt || gimple_code (stmt) != GIMPLE_COND)
++    return false;
++
++  gcond *cond = as_a (stmt);
++
++  if (gimple_cond_code (cond) != EQ_EXPR
++      || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
++      || TREE_CODE (gimple_cond_rhs (cond)) != INTEGER_CST)
++    return false;
++
++  *n = gimple_cond_lhs (cond);
++
++  return true;
++}
++
++/* Recognize pattern:
++ *    if (n == const)
++ *	 goto then
++ *    else if (some_cond)
++ *	 goto then
++ *    then:
++ *	 call func (n, ...)
++ * Transform this to:
++ *    if (n == const)
++ *	 call func (n, ...)
++ *    else if (some_cond)
++ *	 call func (n, ...).  */
++static void
++process_cond_pair (basic_block outer_cond_bb, basic_block inner_cond_bb,
++		   basic_block then_bb)
++{
++  tree n = NULL_TREE;
++
++  if (inner_cond_bb == then_bb
++      || !recognize_if_then_else (outer_cond_bb, &then_bb, &inner_cond_bb)
++      || !same_phi_args_p (outer_cond_bb, inner_cond_bb, then_bb)
++      || (!got_necessary_cond_p (outer_cond_bb, &n)
++	  && !got_necessary_cond_p (inner_cond_bb, &n))
++      || !bb_got_necessary_call_p (then_bb, n, SCALAR_NESTING))
++    return;
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "Recognized necessary condition pair: (o_cond) ");
++      print_gimple_stmt (dump_file, last_stmt (outer_cond_bb), 0, TDF_NONE);
++      fprintf (dump_file, " (i_cond) ");
++      print_gimple_stmt (dump_file, last_stmt (inner_cond_bb), 0, TDF_NONE);
++    }
++
++  basic_block then_bb1
++      = make_two_separate_calls (outer_cond_bb, inner_cond_bb, then_bb);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      if (then_bb1)
++	fprintf (dump_file,
++		 "Successfully transformed: bb<%d> is a copy of bb<%d> \n",
++		 then_bb1->index, then_bb->index);
++      else
++	fprintf (dump_file, "No transformation: bb<%d> cannot be duplicated \n",
++		 then_bb->index);
++    }
++}
++
++//-------------------------------------------------------------------------
++// Main logic
++//-------------------------------------------------------------------------
++/* If cond_bb suits if-then-else pattern and got single pred, execute func
++ * over it and its then, else basic blocks.  */
++template 
++static void
++process_bb (basic_block cond_bb, F func)
++{
++  basic_block then_bb = NULL, else_bb = NULL;
++
++  if (!recognize_if_then_else (cond_bb, &then_bb, &else_bb))
++    return;
++
++  func (cond_bb, then_bb, else_bb);
++}
++
++/* For each block, if it has condition, execute function over it.  We walk
++ * the blocks in order that guarantees that a block with a single predecessor
++ * is processed after the predecessor.  */
++template 
++static void
++execute_function_over_conditional_bbs (F func)
++{
++  basic_block *bbs = single_pred_before_succ_order ();
++  for (int i = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
++    {
++      gimple *stmt = last_stmt (bbs[i]);
++
++      if (stmt && gimple_code (stmt) == GIMPLE_COND)
++	{
++	  process_bb (bbs[i], func);
++	}
++    }
++  update_ssa (TODO_update_ssa);
++  free (bbs);
++}
++
++static void
++process_if_split_cfun ()
++{
++  /* First pass.  Split complex conditions, so process_condition_pair_bb ()
++   * will be able to recognize more necessary patterns.  */
++  execute_function_over_conditional_bbs (process_complex_cond);
++
++  /* Second pass.  Search each basic block for condition pair we may be
++   * able to optimize.  */
++  execute_function_over_conditional_bbs (
++      [] (basic_block cond_bb, basic_block then_bb, basic_block else_bb)
++      {
++	if (!single_pred_p (cond_bb))
++	  return;
++	process_cond_pair (single_pred (cond_bb), cond_bb, then_bb);
++      });
++}
++
++namespace
++{
++
++const pass_data pass_data_if_split = {
++  GIMPLE_PASS,	    /* type.  */
++  "if-split",	    /* name.  */
++  OPTGROUP_NONE,    /* optinfo_flags.  */
++  TV_TREE_IF_SPLIT, /* tv_id.  */
++  0,		    /* properties_required.  */
++  0,		    /* properties_provided.  */
++  0,		    /* properties_destroyed.  */
++  0,		    /* todo_flags_start.  */
++  0		    /* todo_flags_finish.  */
++};
++
++class pass_if_split : public gimple_opt_pass
++{
++public:
++  pass_if_split (gcc::context *ctxt)
++      : gimple_opt_pass (pass_data_if_split, ctxt)
++  {
++  }
++
++  /* opt_pass methods: */
++  virtual bool
++  gate (function *)
++  {
++    /* Don't bother doing anything if the program has errors.  */
++    return (optimize >= 3 && flag_if_split && !seen_error ());
++  }
++
++  virtual unsigned int execute (function *);
++
++}; // class pass_if_split
++
++unsigned int
++pass_if_split::execute (function *fun)
++{
++  calculate_dominance_info (CDI_DOMINATORS);
++  calculate_dominance_info (CDI_POST_DOMINATORS);
++  initialize_original_copy_tables ();
++
++  process_if_split_cfun ();
++
++  checking_verify_ssa (true, true);
++  checking_verify_flow_info ();
++  checking_verify_dominators (CDI_DOMINATORS);
++  checking_verify_dominators (CDI_POST_DOMINATORS);
++
++  free_original_copy_tables ();
++  free_dominance_info (CDI_POST_DOMINATORS);
++  free_dominance_info (CDI_DOMINATORS);
++
++  return 0;
++}
++
++} // anon namespace
++
++gimple_opt_pass *
++make_pass_if_split (gcc::context *ctxt)
++{
++  return new pass_if_split (ctxt);
++}
+\ No newline at end of file
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 84dd8925a..4f3eb4bd4 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -3145,7 +3145,7 @@ common_handle_option (struct gcc_options *opts,
+ 	  SET_OPTION_IF_UNSET (opts, opts_set, flag_cfgo_profile_generate,
+ 			       value);
+ 	}
+-      /* No break here - do -fcfgo-profile-generate processing.  */
++      /* No break here - do -fprofile-generate processing.  */
+       /* FALLTHRU */
+     case OPT_fprofile_generate_:
+       opts->x_profile_data_prefix = xstrdup (arg);
+diff --git a/gcc/passes.def b/gcc/passes.def
+index 862ef0d8f..fbe828439 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -100,6 +100,7 @@ along with GCC; see the file COPYING3.  If not see
+ 	  NEXT_PASS (pass_if_to_switch);
+ 	  NEXT_PASS (pass_convert_switch);
+ 	  NEXT_PASS (pass_cleanup_eh);
++	  NEXT_PASS (pass_if_split);
+ 	  NEXT_PASS (pass_profile);
+ 	  NEXT_PASS (pass_local_pure_const);
+ 	  NEXT_PASS (pass_modref);
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/if-split-1.c b/gcc/testsuite/gcc.dg/tree-ssa/if-split-1.c
+new file mode 100644
+index 000000000..5909dac41
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/if-split-1.c
+@@ -0,0 +1,24 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -fif-split -fdump-tree-if-split-details" } */
++
++static __attribute__ ((noinline)) int foo (int b)
++{
++    int res = 1;
++    for (int i = 0; i < b; i++) {
++        res*=3;
++    }
++    return res;
++}
++
++int main(int argc, char** argv){
++    int b = argc;
++    int res = 0;
++
++    if (b == 5 || b == 52)
++        res = foo (b);
++
++    return res;
++}
++
++/* { dg-final { scan-tree-dump-times "Recognized necessary condition pair:" 1 "if-split" } } */
++/* { dg-final { scan-tree-dump "Successfully transformed:" "if-split" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/if-split-10.c b/gcc/testsuite/gcc.dg/tree-ssa/if-split-10.c
+new file mode 100644
+index 000000000..20a45116b
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/if-split-10.c
+@@ -0,0 +1,45 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -fif-split -fdump-tree-if-split-details" } */
++
++typedef struct Y
++{
++    int b;
++} Y;
++
++typedef struct X
++{
++    Y y;
++    int a;
++} X;
++
++
++void  __attribute__ ((noinline)) set_b (Y* y, int val)
++{
++    y->b = val;
++}
++
++static __attribute__ ((noinline)) int foo (int b)
++{
++    int res = 1;
++    for (int i = 0; i < b; i++) {
++        res*=3;
++    }
++    return res;
++}
++
++int foo2 ();
++
++int main(int argc, char** argv){
++    X data;
++    set_b (&data.y, argc);
++    int res = 0;
++    int foo2_res = foo2();
++
++    if (data.y.b == 5 || data.y.b == 52 || foo2_res == 25)
++        res = foo (data.y.b);
++
++    return res;
++}
++
++/* { dg-final { scan-tree-dump-times "Recognized necessary condition pair:" 2 "if-split" } } */
++/* { dg-final { scan-tree-dump "Successfully transformed:" "if-split" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/if-split-2.c b/gcc/testsuite/gcc.dg/tree-ssa/if-split-2.c
+new file mode 100644
+index 000000000..1370f9474
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/if-split-2.c
+@@ -0,0 +1,36 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -fif-split -fdump-tree-if-split-details" } */
++
++typedef struct X
++{
++    int a;
++} X;
++
++
++void  __attribute__ ((noinline)) set_a (X* x, int val)
++{
++    x->a = val;
++}
++
++static __attribute__ ((noinline)) int foo (int b)
++{
++    int res = 1;
++    for (int i = 0; i < b; i++) {
++        res*=3;
++    }
++    return res;
++}
++
++int main(int argc, char** argv){
++    X data;
++    set_a (&data, argc);
++    int res = 0;
++
++    if (data.a == 5 || data.a == 52)
++        res = foo (data.a);
++
++    return res;
++}
++
++/* { dg-final { scan-tree-dump-times "Recognized necessary condition pair:" 1 "if-split" } } */
++/* { dg-final { scan-tree-dump "Successfully transformed:" "if-split" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/if-split-3.c b/gcc/testsuite/gcc.dg/tree-ssa/if-split-3.c
+new file mode 100644
+index 000000000..93a6eb6dd
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/if-split-3.c
+@@ -0,0 +1,36 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -fif-split -fdump-tree-if-split-details" } */
++
++typedef struct X
++{
++    int a;
++} X;
++
++
++void  __attribute__ ((noinline)) set_a (X* x, int val)
++{
++    x->a = val;
++}
++
++static __attribute__ ((noinline)) int foo (int b)
++{
++    int res = 1;
++    for (int i = 0; i < b; i++) {
++        res*=3;
++    }
++    return res;
++}
++
++int main(int argc, char** argv){
++    X data;
++    set_a (&data, argc);
++    int res = 0;
++
++    if (data.a == 5 || data.a == 52 || data.a == 25)
++        res = foo (data.a);
++
++    return res;
++}
++
++/* { dg-final { scan-tree-dump-times "Recognized necessary condition pair:" 2 "if-split" } } */
++/* { dg-final { scan-tree-dump "Successfully transformed:" "if-split" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/if-split-4.c b/gcc/testsuite/gcc.dg/tree-ssa/if-split-4.c
+new file mode 100644
+index 000000000..36f2a15b3
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/if-split-4.c
+@@ -0,0 +1,42 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -fif-split -fdump-tree-if-split-details" } */
++
++typedef struct Y
++{
++    int b;
++} Y;
++
++typedef struct X
++{
++    Y y;
++    int a;
++} X;
++
++
++void  __attribute__ ((noinline)) set_b (Y* y, int val)
++{
++    y->b = val;
++}
++
++static __attribute__ ((noinline)) int foo (int b)
++{
++    int res = 1;
++    for (int i = 0; i < b; i++) {
++        res*=3;
++    }
++    return res;
++}
++
++int main(int argc, char** argv){
++    X data;
++    set_b (&data.y, argc);
++    int res = 0;
++
++    if (data.y.b == 5 || data.y.b == 52)
++        res = foo (data.y.b);
++
++    return res;
++}
++
++/* { dg-final { scan-tree-dump-times "Recognized necessary condition pair:" 1 "if-split" } } */
++/* { dg-final { scan-tree-dump "Successfully transformed:" "if-split" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/if-split-5.c b/gcc/testsuite/gcc.dg/tree-ssa/if-split-5.c
+new file mode 100644
+index 000000000..fbc3b0c19
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/if-split-5.c
+@@ -0,0 +1,42 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -fif-split -fdump-tree-if-split-details" } */
++
++typedef struct Y
++{
++    int b;
++} Y;
++
++typedef struct X
++{
++    Y y;
++    int a;
++} X;
++
++
++void  __attribute__ ((noinline)) set_b (Y* y, int val)
++{
++    y->b = val;
++}
++
++static __attribute__ ((noinline)) int foo (int b)
++{
++    int res = 1;
++    for (int i = 0; i < b; i++) {
++        res*=3;
++    }
++    return res;
++}
++
++int main(int argc, char** argv){
++    X data;
++    set_b (&data.y, argc);
++    int res = 0;
++
++    if (data.y.b == 5 || data.y.b == 52 || data.y.b == 25)
++        res = foo (data.y.b);
++
++    return res;
++}
++
++/* { dg-final { scan-tree-dump-times "Recognized necessary condition pair:" 2 "if-split" } } */
++/* { dg-final { scan-tree-dump "Successfully transformed:" "if-split" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/if-split-6.c b/gcc/testsuite/gcc.dg/tree-ssa/if-split-6.c
+new file mode 100644
+index 000000000..185127c79
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/if-split-6.c
+@@ -0,0 +1,45 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -fif-split -fdump-tree-if-split-details" } */
++
++typedef struct Y
++{
++    int b;
++} Y;
++
++typedef struct X
++{
++    Y y;
++    int a;
++} X;
++
++
++void  __attribute__ ((noinline)) set_b (Y* y, int val)
++{
++    y->b = val;
++}
++
++static __attribute__ ((noinline)) int foo (int b)
++{
++    int res = 1;
++    for (int i = 0; i < b; i++) {
++        res*=3;
++    }
++    return res;
++}
++
++int foo2 ();
++
++int main(int argc, char** argv){
++    X data;
++    set_b (&data.y, argc);
++    int res = 0;
++    int foo2_res = foo2();
++
++    if (data.y.b == 5 || foo2_res == 52)
++        res = foo (data.y.b);
++
++    return res;
++}
++
++/* { dg-final { scan-tree-dump-times "Recognized necessary condition pair:" 1 "if-split" } } */
++/* { dg-final { scan-tree-dump "Successfully transformed:" "if-split" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/if-split-7.c b/gcc/testsuite/gcc.dg/tree-ssa/if-split-7.c
+new file mode 100644
+index 000000000..23f1a8f04
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/if-split-7.c
+@@ -0,0 +1,45 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -fif-split -fdump-tree-if-split-details" } */
++
++typedef struct Y
++{
++    int b;
++} Y;
++
++typedef struct X
++{
++    Y y;
++    int a;
++} X;
++
++
++void  __attribute__ ((noinline)) set_b (Y* y, int val)
++{
++    y->b = val;
++}
++
++static __attribute__ ((noinline)) int foo (int b)
++{
++    int res = 1;
++    for (int i = 0; i < b; i++) {
++        res*=3;
++    }
++    return res;
++}
++
++int foo2 ();
++
++int main(int argc, char** argv){
++    X data;
++    set_b (&data.y, argc);
++    int res = 0;
++
++    if (data.y.b == 5 || foo2() == 52)
++        res = foo (data.y.b);
++
++    return res;
++}
++
++/* { dg-final { scan-tree-dump-times "Recognized necessary complex condition:" 0 "if-split" } } */
++/* { dg-final { scan-tree-dump-times "Recognized necessary condition pair:" 0 "if-split" } } */
++/* { dg-final { scan-tree-dump-times "Successfully transformed:" 0 "if-split" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/if-split-8.c b/gcc/testsuite/gcc.dg/tree-ssa/if-split-8.c
+new file mode 100644
+index 000000000..028b6dc40
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/if-split-8.c
+@@ -0,0 +1,42 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -fif-split -fdump-tree-if-split-details" } */
++
++typedef struct Y
++{
++    int b;
++} Y;
++
++typedef struct X
++{
++    Y y;
++    int a;
++} X;
++
++
++void  __attribute__ ((noinline)) set_b (Y* y, int val)
++{
++    y->b = val;
++}
++
++static __attribute__ ((noinline)) int foo (int b)
++{
++    int res = 1;
++    for (int i = 0; i < b; i++) {
++        res*=3;
++    }
++    return res;
++}
++
++int main(int argc, char** argv){
++    X data;
++    set_b (&data.y, argc);
++    int res = 0;
++
++    if (data.y.b == 5 || data.a == 52)
++        res = foo (data.y.b);
++
++    return res;
++}
++
++/* { dg-final { scan-tree-dump-times "Recognized necessary condition pair:" 1 "if-split" } } */
++/* { dg-final { scan-tree-dump "Successfully transformed:" "if-split" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/if-split-9.c b/gcc/testsuite/gcc.dg/tree-ssa/if-split-9.c
+new file mode 100644
+index 000000000..3ff7e2efc
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tree-ssa/if-split-9.c
+@@ -0,0 +1,44 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -fif-split -fdump-tree-if-split-details" } */
++
++typedef struct Y
++{
++    int b;
++} Y;
++
++typedef struct X
++{
++    Y y;
++    int a;
++} X;
++
++
++void  __attribute__ ((noinline)) set_b (Y* y, int val)
++{
++    y->b = val;
++}
++
++static __attribute__ ((noinline)) int foo (int b)
++{
++    int res = 1;
++    for (int i = 0; i < b; i++) {
++        res*=3;
++    }
++    return res;
++}
++
++int foo2 ();
++
++int main(int argc, char** argv){
++    X data;
++    set_b (&data.y, argc);
++    int res = 0;
++
++    if (data.y.b == 5 || data.y.b == 52 || foo2() == 25)
++        res = foo (data.y.b);
++
++    return res;
++}
++
++/* { dg-final { scan-tree-dump-times "Recognized necessary condition pair:" 1 "if-split" } } */
++/* { dg-final { scan-tree-dump "Successfully transformed:" "if-split" } } */
+\ No newline at end of file
+diff --git a/gcc/timevar.def b/gcc/timevar.def
+index 6fdb2c767..b0d3d1188 100644
+--- a/gcc/timevar.def
++++ b/gcc/timevar.def
+@@ -306,6 +306,7 @@ DEFTIMEVAR (TV_VAR_TRACKING_DATAFLOW , "var-tracking dataflow")
+ DEFTIMEVAR (TV_VAR_TRACKING_EMIT     , "var-tracking emit")
+ DEFTIMEVAR (TV_TREE_IFCOMBINE        , "tree if-combine")
+ DEFTIMEVAR (TV_TREE_IF_TO_SWITCH     , "if to switch conversion")
++DEFTIMEVAR (TV_TREE_IF_SPLIT         , "gimple if splitting")
+ DEFTIMEVAR (TV_TREE_UNINIT           , "uninit var analysis")
+ DEFTIMEVAR (TV_PLUGIN_INIT           , "plugin initialization")
+ DEFTIMEVAR (TV_PLUGIN_RUN            , "plugin execution")
+diff --git a/gcc/tree-cfg.h b/gcc/tree-cfg.h
+index cb67cdf87..bfe44c073 100644
+--- a/gcc/tree-cfg.h
++++ b/gcc/tree-cfg.h
+@@ -112,6 +112,8 @@ extern basic_block gimple_switch_default_bb (function *, gswitch *);
+ extern edge gimple_switch_edge (function *, gswitch *, unsigned);
+ extern edge gimple_switch_default_edge (function *, gswitch *);
+ extern bool cond_only_block_p (basic_block);
++extern bool recognize_if_then_else (basic_block, basic_block *, basic_block *);
++extern bool same_phi_args_p (basic_block, basic_block, basic_block);
+ 
+ /* Return true if the LHS of a call should be removed.  */
+ 
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index f9c2eed8b..fb17b189c 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -383,6 +383,7 @@ extern gimple_opt_pass *make_pass_graphite (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_graphite_transforms (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_if_conversion (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_if_to_switch (gcc::context *ctxt);
++extern gimple_opt_pass *make_pass_if_split (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_loop_distribution (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_vectorize (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_simduid_cleanup (gcc::context *ctxt);
+diff --git a/gcc/tree-ssa-ifcombine.cc b/gcc/tree-ssa-ifcombine.cc
+index 264a8bcae..3b50fc114 100644
+--- a/gcc/tree-ssa-ifcombine.cc
++++ b/gcc/tree-ssa-ifcombine.cc
+@@ -76,8 +76,7 @@ along with GCC; see the file COPYING3.  If not see
+    match the then and else basic-blocks to make the pattern match.
+    Returns true if the pattern matched, false otherwise.  */
+ 
+-static bool
+-recognize_if_then_else (basic_block cond_bb,
++bool recognize_if_then_else (basic_block cond_bb,
+ 			basic_block *then_bb, basic_block *else_bb)
+ {
+   edge t, e;
+@@ -168,8 +167,7 @@ forwarder_block_to (basic_block bb, basic_block to_bb)
+    BB2 to DEST are the same.  This makes the CFG merge point
+    free from side-effects.  Return true in this case, else false.  */
+ 
+-static bool
+-same_phi_args_p (basic_block bb1, basic_block bb2, basic_block dest)
++bool same_phi_args_p (basic_block bb1, basic_block bb2, basic_block dest)
+ {
+   edge e1 = find_edge (bb1, dest);
+   edge e2 = find_edge (bb2, dest);
+-- 
+2.25.1
+
diff --git a/0312-Add-late-slp-vectorization-pass-with-additional-chec.patch b/0312-Add-late-slp-vectorization-pass-with-additional-chec.patch
new file mode 100644
index 0000000000000000000000000000000000000000..19e519f614622d61953da3eb4653d979248cdafd
--- /dev/null
+++ b/0312-Add-late-slp-vectorization-pass-with-additional-chec.patch
@@ -0,0 +1,320 @@
+From 9df4a0bd76299734ae47f2f4e236b10f6c156994 Mon Sep 17 00:00:00 2001
+From: d84370931 
+Date: Thu, 14 Nov 2024 17:08:40 +0800
+Subject: [PATCH 3/8] Add late slp vectorization pass with additional checks.
+
+Add expansion of data reference offset using affine trees to check
+if data references may alias.
+
+Add check if a group of interleaving data references is smaller than
+max vector register size.
+
+Add operands swap for commutative operations.
+Swapping operands is necessary for better vector constructing.
+For example for operations
+  _1 = a * b;
+  _2 = b * c;
+Construction vectors (a, c) * (b, b) is more profitable
+than (a, b) * (b, c).
+
+Add tests and special param flags for each check:
+  --param=vect-addr-expand-for-alias-check={0,1}
+  --param=vect-swap-operands={0,1}
+  --param=vect-register-size-check={0,1}
+
+Add enabling flag for late slp pass:
+  -ftree-slp-late
+---
+ gcc/common.opt                                |  4 ++
+ gcc/params.opt                                | 12 ++++++
+ gcc/passes.def                                |  4 ++
+ gcc/testsuite/gcc.dg/vect/vect-alias-expand.c | 12 ++++++
+ gcc/testsuite/gcc.dg/vect/vect-op-swap.c      | 10 +++++
+ gcc/testsuite/gcc.dg/vect/vect-regsize.c      | 18 +++++++++
+ gcc/timevar.def                               |  1 +
+ gcc/tree-data-ref.cc                          | 12 ++++++
+ gcc/tree-pass.h                               |  1 +
+ gcc/tree-vect-data-refs.cc                    | 15 +++++++
+ gcc/tree-vect-slp.cc                          | 28 +++++++++++++
+ gcc/tree-vectorizer.cc                        | 39 +++++++++++++++++++
+ 12 files changed, 156 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.dg/vect/vect-alias-expand.c
+ create mode 100644 gcc/testsuite/gcc.dg/vect/vect-op-swap.c
+ create mode 100644 gcc/testsuite/gcc.dg/vect/vect-regsize.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 78cfc333a..c3c64ceaf 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -3268,6 +3268,10 @@ ftree-slp-transpose-vectorize
+ Common Var(flag_tree_slp_transpose_vectorize) Optimization Init(0)
+ Enable basic block vectorization (SLP) for transposed stores and loads on trees.
+ 
++ftree-slp-late
++Common Var(flag_slp_late) Init(0) Optimization
++Enable additional SLP vectorization pass after reassociation.
++
+ fvect-cost-model=
+ Common Joined RejectNegative Enum(vect_cost_model) Var(flag_vect_cost_model) Init(VECT_COST_MODEL_DEFAULT) Optimization
+ -fvect-cost-model=[unlimited|dynamic|cheap|very-cheap]	Specifies the cost model for vectorization.
+diff --git a/gcc/params.opt b/gcc/params.opt
+index 3ddfaf5b2..bb4dc1825 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -1213,6 +1213,18 @@ The maximum factor which the loop vectorizer applies to the cost of statements i
+ Common Joined UInteger Var(param_vect_induction_float) Init(1) IntegerRange(0, 1) Param Optimization
+ Enable loop vectorization of floating point inductions.
+ 
++-param=vect-swap-operands=
++Common Joined UInteger Var(param_vect_swap_operands) Init(0) IntegerRange(0, 1) Param Optimization
++Enable swapping operands for commutative operations in vectorization analysis.
++
++-param=addr-expand-for-alias-check=
++Common Joined UInteger Var(param_addr_expand_for_alias_check) Init(0) IntegerRange(0, 1) Param Optimization
++Enable data reference address expansion for alias check.
++
++-param=vect-register-size-check=
++Common Joined UInteger Var(param_vect_register_size_check) Init(0) IntegerRange(0, 1) Param Optimization
++Enable checking if a group of interleaving data references may not fit in vector register.
++
+ -param=vrp1-mode=
+ Common Joined Var(param_vrp1_mode) Enum(vrp_mode) Init(VRP_MODE_VRP) Param Optimization
+ --param=vrp1-mode=[vrp|ranger] Specifies the mode VRP1 should operate in.
+diff --git a/gcc/passes.def b/gcc/passes.def
+index e945af96a..529cc5093 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -337,6 +337,10 @@ along with GCC; see the file COPYING3.  If not see
+       NEXT_PASS (pass_lower_switch);
+       NEXT_PASS (pass_cse_reciprocals);
+       NEXT_PASS (pass_reassoc, false /* early_p */);
++      NEXT_PASS (pass_slp_vectorize_late);
++      PUSH_INSERT_PASSES_WITHIN (pass_slp_vectorize_late)
++	  NEXT_PASS (pass_slp_vectorize);
++      POP_INSERT_PASSES ()
+       NEXT_PASS (pass_strength_reduction);
+       NEXT_PASS (pass_split_paths);
+       NEXT_PASS (pass_tracer);
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-alias-expand.c b/gcc/testsuite/gcc.dg/vect/vect-alias-expand.c
+new file mode 100644
+index 000000000..a68f4baf8
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/vect-alias-expand.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -ftree-vectorize --param=addr-expand-for-alias-check=1 -fdump-tree-slp-details" } */
++
++extern float arr[2][2];
++
++void foo (int i, int j, float a, float b)
++{
++  arr[i][j] *= a;
++  arr[i][j+1] *= b;
++}
++
++/* { dg-final { scan-tree-dump "Basic block will be vectorized using SLP" "slp2" } } */
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-op-swap.c b/gcc/testsuite/gcc.dg/vect/vect-op-swap.c
+new file mode 100644
+index 000000000..4872dc414
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/vect-op-swap.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -ftree-vectorize --param=vect-swap-operands=1 -fdump-tree-slp-details" } */
++
++void foo (float *res, float a, float b, float c)
++{
++  res[0] = a * b;
++  res[1] = b * c;
++}
++
++/* { dg-final { scan-tree-dump "Swapped operands for" "slp2" } } */
+diff --git a/gcc/testsuite/gcc.dg/vect/vect-regsize.c b/gcc/testsuite/gcc.dg/vect/vect-regsize.c
+new file mode 100644
+index 000000000..bcd81e6df
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/vect/vect-regsize.c
+@@ -0,0 +1,18 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -ftree-vectorize --param=vect-register-size-check=1 -fdump-tree-slp-details" } */
++
++extern float arr[256][256][1024];
++
++void foo (int i, int j, float a, float b)
++{
++  arr[i][j][0] += a;
++  arr[i][j][1] += b;
++  arr[i][j+1][0] += a;
++  arr[i][j+1][1] += b;
++  arr[i+1][j][0] += a;
++  arr[i+1][j][1] += b;
++  arr[i+1][j+1][0] += a;
++  arr[i+1][j+1][1] += b;
++}
++
++/* { dg-final { scan-tree-dump "Basic block will be vectorized using SLP" "slp2" } } */
+diff --git a/gcc/timevar.def b/gcc/timevar.def
+index fc2b1e1e7..7560e930a 100644
+--- a/gcc/timevar.def
++++ b/gcc/timevar.def
+@@ -205,6 +205,7 @@ DEFTIMEVAR (TV_SCALAR_CLEANUP        , "scalar cleanup")
+ DEFTIMEVAR (TV_TREE_PARALLELIZE_LOOPS, "tree parallelize loops")
+ DEFTIMEVAR (TV_TREE_VECTORIZATION    , "tree vectorization")
+ DEFTIMEVAR (TV_TREE_SLP_VECTORIZATION, "tree slp vectorization")
++DEFTIMEVAR (TV_TREE_LATE_SLP         , "late slp vectorization")
+ DEFTIMEVAR (TV_GRAPHITE              , "Graphite")
+ DEFTIMEVAR (TV_GRAPHITE_TRANSFORMS   , "Graphite loop transforms")
+ DEFTIMEVAR (TV_GRAPHITE_DATA_DEPS    , "Graphite data dep analysis")
+diff --git a/gcc/tree-data-ref.cc b/gcc/tree-data-ref.cc
+index a05073c51..5eb4ac102 100644
+--- a/gcc/tree-data-ref.cc
++++ b/gcc/tree-data-ref.cc
+@@ -3021,6 +3021,18 @@ dr_may_alias_p (const struct data_reference *a, const struct data_reference *b,
+       get_inner_reference_aff (DR_REF (b), &off2, &size2);
+       aff_combination_scale (&off1, -1);
+       aff_combination_add (&off2, &off1);
++
++      if (param_addr_expand_for_alias_check)
++	{
++	  using tree_expand_map_t = hash_map;
++	  /* Cache used by aff_combination_expand.  */
++	  tree_expand_map_t *cache = NULL;
++
++	  if (off2.n)
++	    aff_combination_expand (&off2, &cache);
++	  free_affine_expand_cache (&cache);
++	}
++
+       if (aff_comb_cannot_overlap_p (&off2, size1, size2))
+ 	return false;
+     }
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index 18b0f8022..2ed79f353 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -390,6 +390,7 @@ extern gimple_opt_pass *make_pass_slp_vectorize (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_complete_unroll (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_complete_unrolli (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_pre_slp_scalar_cleanup (gcc::context *ctxt);
++extern gimple_opt_pass *make_pass_slp_vectorize_late (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_parallelize_loops (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_loop_prefetch (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_iv_optimize (gcc::context *ctxt);
+diff --git a/gcc/tree-vect-data-refs.cc b/gcc/tree-vect-data-refs.cc
+index aae7f62f3..ee58c8f6c 100644
+--- a/gcc/tree-vect-data-refs.cc
++++ b/gcc/tree-vect-data-refs.cc
+@@ -3234,6 +3234,21 @@ vect_analyze_data_ref_accesses (vec_info *vinfo,
+ 		      != type_size_a))
+ 		break;
+ 
++	      if (param_vect_register_size_check)
++		{
++		  tree scalar_type = TREE_TYPE (DR_REF (dra));
++		  tree vec_type = get_related_vectype_for_scalar_type (
++		      vinfo->vector_mode, scalar_type);
++		  poly_uint64 vec_size = TYPE_VECTOR_SUBPARTS (vec_type);
++
++		  /* If we have a large interleaving group (especially a group
++		     of loads with gaps) that does not fit in vector register,
++		     we should split this group to chunks we support.  */
++		  if (maybe_ge (((unsigned HOST_WIDE_INT)init_b - init_prev)
++				/ type_size_a, vec_size))
++		    break;
++		}
++
+ 	      /* If the step (if not zero or non-constant) is smaller than the
+ 		 difference between data-refs' inits this splits groups into
+ 		 suitable sizes.  */
+diff --git a/gcc/tree-vect-slp.cc b/gcc/tree-vect-slp.cc
+index fbd638333..79026fb5b 100644
+--- a/gcc/tree-vect-slp.cc
++++ b/gcc/tree-vect-slp.cc
+@@ -687,6 +687,34 @@ vect_get_and_check_slp_defs (vec_info *vinfo, unsigned char swap,
+   if (first)
+     return 0;
+ 
++  /* If different statements in the group of commutative operations
++     have the same arguments but in different places, swap them to
++     group the same operands in one vector.
++
++     Check if swapping is enabled, operation is commutative and has
++     two operands of the same type.
++     If one of the operands in current statement match the operand
++     on another place of the first statement in the group we
++     swap operands in current statement.  */
++  if (param_vect_swap_operands && commutative_op == 0 && !first
++      && is_a  (vinfo) && number_of_oprnds == 2
++      && vect_def_types_match (dts[0], dts[1]))
++    {
++      slp_oprnd_info oprnd_info0 = (*oprnds_info)[0];
++      slp_oprnd_info oprnd_info1 = (*oprnds_info)[1];
++      if (oprnd_info1->ops[stmt_num] == oprnd_info0->ops[0]
++	  || oprnd_info0->ops[stmt_num] == oprnd_info1->ops[0])
++      {
++	std::swap (oprnd_info0->def_stmts[stmt_num],
++		   oprnd_info1->def_stmts[stmt_num]);
++	std::swap (oprnd_info0->ops[stmt_num],
++		   oprnd_info1->ops[stmt_num]);
++	if (dump_enabled_p ())
++	  dump_printf_loc (MSG_NOTE, vect_location,
++			   "Swapped operands for %G", stmt_info->stmt);
++      }
++    }
++
+   /* Now match the operand definition types to that of the first stmt.  */
+   for (i = 0; i < number_of_oprnds;)
+     {
+diff --git a/gcc/tree-vectorizer.cc b/gcc/tree-vectorizer.cc
+index a63fa3912..c363ce490 100644
+--- a/gcc/tree-vectorizer.cc
++++ b/gcc/tree-vectorizer.cc
+@@ -1524,6 +1524,45 @@ make_pass_slp_vectorize (gcc::context *ctxt)
+   return new pass_slp_vectorize (ctxt);
+ }
+ 
++/*  The late SLP vectorization pass.  */
++
++namespace {
++
++const pass_data pass_data_slp_vectorize_late =
++{
++  GIMPLE_PASS, /* type.  */
++  "slp_late", /* name.  */
++  OPTGROUP_NONE, /* optinfo_flags.  */
++  TV_TREE_LATE_SLP, /* tv_id.  */
++  PROP_cfg, /* properties_required.  */
++  0, /* properties_provided.  */
++  0, /* properties_destroyed.  */
++  0, /* todo_flags_start.  */
++  0, /* todo_flags_finish.  */
++};
++
++class pass_slp_vectorize_late : public gimple_opt_pass
++{
++public:
++  pass_slp_vectorize_late (gcc::context *ctxt)
++    : gimple_opt_pass (pass_data_slp_vectorize_late, ctxt)
++  {}
++
++  /* opt_pass methods: */
++  virtual bool gate (function *)
++  {
++    return flag_slp_late != 0;
++  }
++
++}; // class pass_slp_vectorize_late
++
++} // anon namespace
++
++gimple_opt_pass *
++make_pass_slp_vectorize_late (gcc::context *ctxt)
++{
++  return new pass_slp_vectorize_late (ctxt);
++}
+ 
+ /* Increase alignment of global arrays to improve vectorization potential.
+    TODO:
+-- 
+2.33.0
+
diff --git a/0313-Add-tracer-transformation-for-static-probabilities.patch b/0313-Add-tracer-transformation-for-static-probabilities.patch
new file mode 100644
index 0000000000000000000000000000000000000000..53d233718de0f2c0015796ce14f49e9b1799840c
--- /dev/null
+++ b/0313-Add-tracer-transformation-for-static-probabilities.patch
@@ -0,0 +1,130 @@
+From ed300a0b07e608efb756b623263f014c2cebdf08 Mon Sep 17 00:00:00 2001
+From: Egorov Ivan WX1280859 
+Date: Tue, 26 Nov 2024 14:53:59 +0300
+Subject: [PATCH 6/8] Add tracer transformation for static probabilities
+
+---
+ gcc/common.opt                         |  4 ++++
+ gcc/opts.cc                            |  4 ++++
+ gcc/params.opt                         |  8 ++++++++
+ gcc/testsuite/gcc.dg/tracer-static-1.c | 28 ++++++++++++++++++++++++++
+ gcc/tracer.cc                          | 11 ++++++++++
+ 5 files changed, 55 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.dg/tracer-static-1.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 96888cf1b..db35391c3 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -2990,6 +2990,10 @@ ftracer
+ Common Var(flag_tracer) Optimization
+ Perform superblock formation via tail duplication.
+ 
++ftracer-static
++Common Var(flag_tracer_static) Init(0) Optimization
++Perform superblock formation via tail duplication for a given bb size.
++
+ ftrampolines
+ Common Var(flag_trampolines) Init(0)
+ For targets that normally need trampolines for nested functions, always
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 84dd8925a..34b84db8f 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -3180,6 +3180,10 @@ common_handle_option (struct gcc_options *opts,
+       }
+       break;
+ 
++    case OPT_ftracer_static:
++      SET_OPTION_IF_UNSET (opts, opts_set, flag_tracer, true);
++      break;
++
+     case OPT_ftree_vectorize:
+       /* Automatically sets -ftree-loop-vectorize and
+ 	 -ftree-slp-vectorize.  Nothing more to do here.  */
+diff --git a/gcc/params.opt b/gcc/params.opt
+index bb4dc1825..e5472dfc8 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -1116,6 +1116,10 @@ The percentage of function, weighted by execution frequency, that must be covere
+ Common Joined UInteger Var(param_tracer_max_code_growth) Init(100) Param Optimization
+ Maximal code growth caused by tail duplication (in percent).
+ 
++-param=tracer-max-not-covered-insns-num=
++Common Joined UInteger Var(param_tracer_max_not_covered_insns_num) Init(12) Param Optimization
++Maximal number of instructions in the block, that must not be covered by trace formation.
++
+ -param=tracer-min-branch-probability=
+ Common Joined UInteger Var(param_tracer_min_branch_probability) Init(50) IntegerRange(0, 100) Param Optimization
+ Stop forward growth if the probability of best edge is less than this threshold (in percent). Used when profile feedback is not available.
+@@ -1128,6 +1132,10 @@ Stop forward growth if the probability of best edge is less than this threshold
+ Common Joined UInteger Var(param_tracer_min_branch_ratio) Init(10) IntegerRange(0, 100) Param Optimization
+ Stop reverse growth if the reverse probability of best edge is less than this threshold (in percent).
+ 
++-param=tracer-min-not-covered-insns-num=
++Common Joined UInteger Var(param_tracer_min_not_covered_insns_num) Init(1) Param Optimization
++Minimal number of instructions in the block, that must not be covered by trace formation.
++
+ -param=tree-reassoc-width=
+ Common Joined UInteger Var(param_tree_reassoc_width) Param Optimization
+ Set the maximum number of instructions executed in parallel in reassociated tree.  If 0, use the target dependent heuristic.
+diff --git a/gcc/testsuite/gcc.dg/tracer-static-1.c b/gcc/testsuite/gcc.dg/tracer-static-1.c
+new file mode 100644
+index 000000000..76c863b48
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/tracer-static-1.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -ftracer-static -fdump-tree-tracer" } */
++
++static __attribute__ ((noinline)) int fib (int n)
++{
++  if (n < 3)
++    return 0;
++
++  long long fib1 = 0, fib2 = 1;
++  long long currentFib = 0;
++
++  for (int i = 3; i <= n; ++i)
++    {
++      currentFib = fib1 + fib2;
++      fib1 = fib2;
++      fib2 = currentFib;
++    }
++
++  return currentFib;
++}
++
++int main (int argc, char** argv)
++{
++  int n = argc;
++  return fib (n);
++}
++
++/* { dg-final { scan-tree-dump-times "BB\\d+ with n = \\d+ will not be covered by tracer formation" 4 "tracer" } } */
+\ No newline at end of file
+diff --git a/gcc/tracer.cc b/gcc/tracer.cc
+index 4d054fe8f..9b1578cd4 100644
+--- a/gcc/tracer.cc
++++ b/gcc/tracer.cc
+@@ -304,6 +304,17 @@ tail_duplicate (void)
+     {
+       int n;
+       analyze_bb (bb, &n);
++
++    if (flag_tracer_static && n >= param_tracer_min_not_covered_insns_num
++	&& n <= param_tracer_max_not_covered_insns_num)
++      {
++	if (dump_file)
++	  fprintf (dump_file,
++		   "BB%d with n = %d will not be covered by tracer formation\n",
++		   bb->index, n);
++	continue;
++      }
++
+       if (!ignore_bb_p (bb))
+ 	blocks[bb->index] = heap.insert (-bb->count.to_frequency (cfun), bb);
+ 
+-- 
+2.33.0
+
diff --git a/0314-bugfix-Modify-the-hip09-tune-flags.patch b/0314-bugfix-Modify-the-hip09-tune-flags.patch
new file mode 100644
index 0000000000000000000000000000000000000000..180973129a9f1da8803c4145bceadd27377f199f
--- /dev/null
+++ b/0314-bugfix-Modify-the-hip09-tune-flags.patch
@@ -0,0 +1,56 @@
+From e94bf3e1ad12211ec037c9e04a1698e1ed16c87a Mon Sep 17 00:00:00 2001
+From: Mingchuan Wu 
+Date: Tue, 3 Dec 2024 21:02:39 +0800
+Subject: [PATCH 8/8] [bugfix] Modify the hip09 tune flags.
+
+---
+ gcc/config/aarch64/aarch64-tuning-flags.def |  3 +++
+ gcc/config/aarch64/aarch64.cc               | 11 +++++++++--
+ 2 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64-tuning-flags.def b/gcc/config/aarch64/aarch64-tuning-flags.def
+index b4a8f99a6..293f6fb7e 100644
+--- a/gcc/config/aarch64/aarch64-tuning-flags.def
++++ b/gcc/config/aarch64/aarch64-tuning-flags.def
+@@ -49,6 +49,9 @@ AARCH64_EXTRA_TUNING_OPTION ("no_ldp_combine", NO_LDP_COMBINE)
+ 
+ AARCH64_EXTRA_TUNING_OPTION ("rename_load_regs", RENAME_LOAD_REGS)
+ 
++/* Prefer Advanced SIMD over SVE for auto-vectorization.  */
++AARCH64_EXTRA_TUNING_OPTION ("prefer_advsimd_autovec", PREFER_ADVSIMD_AUTOVEC)
++
+ AARCH64_EXTRA_TUNING_OPTION ("cse_sve_vl_constants", CSE_SVE_VL_CONSTANTS)
+ 
+ AARCH64_EXTRA_TUNING_OPTION ("use_new_vector_costs", USE_NEW_VECTOR_COSTS)
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 1d479f270..829e0da8f 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -1934,8 +1934,7 @@ static const struct tune_params hip09_tunings =
+   2,    /* min_div_recip_mul_df.  */
+   0,    /* max_case_values.  */
+   tune_params::AUTOPREFETCHER_WEAK,     /* autoprefetcher_model.  */
+-  (AARCH64_EXTRA_TUNE_USE_NEW_VECTOR_COSTS
+-   | AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT),     /* tune_flags.  */
++  (AARCH64_EXTRA_TUNE_PREFER_ADVSIMD_AUTOVEC),     /* tune_flags.  */
+   &hip09_prefetch_tune
+ };
+ 
+@@ -20250,6 +20249,14 @@ aarch64_override_options_internal (struct gcc_options *opts)
+   SET_OPTION_IF_UNSET (opts, &global_options_set,
+ 		       param_sched_autopref_queue_depth, queue_depth);
+ 
++  /* If the core wants only AdvancedSIMD autovectorization, do this through
++     aarch64_autovec_preference.  If the user set it explicitly, they should
++     know what they want.  */
++  if (aarch64_tune_params.extra_tuning_flags
++      & AARCH64_EXTRA_TUNE_PREFER_ADVSIMD_AUTOVEC)
++    SET_OPTION_IF_UNSET (opts, &global_options_set,
++			 aarch64_autovec_preference, 1);
++
+   /* If using Advanced SIMD only for autovectorization disable SVE vector costs
+      comparison.  */
+   if (aarch64_autovec_preference == 1)
+-- 
+2.33.0
+
diff --git a/0315-Bugfix-Add-no-var-recored-check-for-ssa_name-in-stru.patch b/0315-Bugfix-Add-no-var-recored-check-for-ssa_name-in-stru.patch
new file mode 100644
index 0000000000000000000000000000000000000000..befe1f6746a194fcd94e9a2779a3f46017f21991
--- /dev/null
+++ b/0315-Bugfix-Add-no-var-recored-check-for-ssa_name-in-stru.patch
@@ -0,0 +1,234 @@
+From 05bece3d79daa886a469b066061f0606ca6ebed8 Mon Sep 17 00:00:00 2001
+From: huang-xioaquan 
+Date: Mon, 2 Dec 2024 17:39:11 +0800
+Subject: [PATCH 2/5] [Bugfix] Add no var recored check for ssa_name in struct
+ reorg
+
+---
+ gcc/ipa-struct-reorg/escapes.def              |   1 +
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      |  44 ++++++
+ .../gcc.dg/struct/rf_void_ptr_ssa_name.c      | 125 ++++++++++++++++++
+ 3 files changed, 170 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/rf_void_ptr_ssa_name.c
+
+diff --git a/gcc/ipa-struct-reorg/escapes.def b/gcc/ipa-struct-reorg/escapes.def
+index 996a09bac..4ba9cc2d0 100644
+--- a/gcc/ipa-struct-reorg/escapes.def
++++ b/gcc/ipa-struct-reorg/escapes.def
+@@ -61,5 +61,6 @@ DEF_ESCAPE (escape_unhandled_rewrite, "Type escapes via a unhandled rewrite stmt
+ DEF_ESCAPE (escape_via_orig_escape, "Type escapes via a original escape type")
+ DEF_ESCAPE (escape_instance_field, "Type escapes via a field of instance")
+ DEF_ESCAPE (escape_via_empty_no_orig, "Type escapes via empty and no original")
++DEF_ESCAPE (escape_no_record_var, "Type escapes via no record var")
+ 
+ #undef DEF_ESCAPE
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index 1a169c635..b93b8a5b5 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -1433,6 +1433,7 @@ public:
+   void propagate_escape_via_original (void);
+   void propagate_escape_via_empty_with_no_original (void);
+   void propagate_escape_via_ext_func_types (void);
++  void propagate_escape_via_no_record_var (void);
+   void analyze_types (void);
+   void clear_visited (void);
+   bool create_new_types (void);
+@@ -4467,6 +4468,13 @@ ipa_struct_reorg::check_type_and_push (tree newdecl, srdecl *decl,
+ 	}
+       /* At this point there should only be unkown void* ssa names.  */
+       gcc_assert (TREE_CODE (newdecl) == SSA_NAME);
++      tree inner = SSA_NAME_VAR (newdecl);
++      if (current_layout_opt_level >= STRUCT_REORDER_FIELDS && 
++	  inner && find_decl (inner) == NULL)
++	{
++	  type->mark_escape (escape_no_record_var, stmt);
++	  return;
++	}
+       if (dump_file && (dump_flags & TDF_DETAILS))
+ 	{
+ 	  fprintf (dump_file, "\nrecording unkown decl: ");
+@@ -5512,6 +5520,41 @@ ipa_struct_reorg::propagate_escape_via_ext_func_types (void)
+     }
+ }
+ 
++/* Escape propagation is performed on ssa_name decl that no record var in
++   decls.  */
++
++void
++ipa_struct_reorg::propagate_escape_via_no_record_var (void)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\n propagate_escape_via_no_record_var: \n\n");
++
++  for (unsigned i = 0; i < functions.length (); i++)
++    {
++      if (functions[i]->node)
++	set_cfun (DECL_STRUCT_FUNCTION (functions[i]->node->decl));
++
++      for (unsigned j = 0; j < functions[i]->decls.length (); j++)
++	{
++	  srdecl *decl = functions[i]->decls[j];
++	  srtype *type = decl->type;
++
++	  if (TREE_CODE (decl->decl) == SSA_NAME)
++	    {
++	      tree inner = SSA_NAME_VAR (decl->decl);
++
++	      if (inner && functions[i]->find_decl (inner) == NULL)
++		type->mark_escape (escape_no_record_var, NULL);
++	    }
++	}
++
++      set_cfun (NULL);
++    }
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\n end propagate_escape_via_no_record_var \n\n");
++}
++
+ /* Prune the escaped types and their decls from what was recorded.  */
+ 
+ void
+@@ -5530,6 +5573,7 @@ ipa_struct_reorg::prune_escaped_types (void)
+       propagate_escape_via_original ();
+       propagate_escape_via_empty_with_no_original ();
+       propagate_escape_via_ext_func_types ();
++      propagate_escape_via_no_record_var ();
+     }
+ 
+   if (dump_file && (dump_flags & TDF_DETAILS))
+diff --git a/gcc/testsuite/gcc.dg/struct/rf_void_ptr_ssa_name.c b/gcc/testsuite/gcc.dg/struct/rf_void_ptr_ssa_name.c
+new file mode 100644
+index 000000000..0f624b6b9
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/rf_void_ptr_ssa_name.c
+@@ -0,0 +1,125 @@
++// Add a void* ssa_name check and escape.
++/* { dg-do compile } */
++
++// includes
++#include "stdio.h"
++#include "stdlib.h"
++#include "time.h"
++#include "string.h"
++#include "limits.h"
++#include "float.h"
++
++#define JOTAI_NUM_RANDS_ 25
++
++const unsigned rand_primes[JOTAI_NUM_RANDS_] = {179, 103, 479, 647, 229, 37,
++271, 557, 263, 607, 18743, 50359, 21929, 48757, 98179, 12907, 52937, 64579,
++49957, 52567, 507163, 149939, 412157, 680861, 757751};
++
++int next_i ()
++{
++  int counter = 0;
++  return rand_primes[(++counter)%JOTAI_NUM_RANDS_];
++}
++
++float next_f ()
++{
++  int counter = 0;
++  return rand_primes[(++counter)%JOTAI_NUM_RANDS_] / 757751.0F;
++}
++
++// Usage menu
++void usage()
++{
++    printf("%s", "Usage:\n\
++    prog [ARGS]\n\
++\nARGS:\n\
++       0	    big-arr\n\
++       1	    big-arr-10x\n\
++       2	    empty\n\
++\n\
++");
++}
++
++// ------------------------------------------------------------------------- //
++
++typedef unsigned long size_t;  // Customize by platform.
++typedef long intptr_t;
++typedef unsigned long uintptr_t;
++typedef long scalar_t__;  // Either arithmetic or pointer type.
++/* By default, we understand bool (as a convenience). */
++typedef int bool;
++#define false 0
++#define true 1
++
++/* Forward declarations */
++
++/* Type definitions */
++typedef  size_t u32 ;
++struct octeon_device {int octeon_id; } ;
++
++/* Variables and functions */
++ size_t MAX_OCTEON_DEVICES ; 
++ struct octeon_device** octeon_device ; 
++
++int lio_get_device_id(void *dev)
++{
++  struct octeon_device *octeon_dev = (struct octeon_device *)dev;
++  u32 i;
++
++  for (i = 0; i < MAX_OCTEON_DEVICES; i++)
++    {
++      if (octeon_device[i] == octeon_dev)
++	return octeon_dev->octeon_id;
++    }
++  return -1;
++}
++
++// ------------------------------------------------------------------------- //
++
++int main(int argc, char *argv[])
++{
++  if (argc != 2)
++    {
++      usage();
++      return 1;
++    }
++
++  int opt = atoi(argv[1]);
++  switch(opt)
++    {
++      // big-arr
++      case 0:
++	{
++	  void * dev;
++	  int benchRet = lio_get_device_id(dev);
++	  printf("%d\n", benchRet); 
++	  break;
++	}
++
++      // big-arr-10x
++      case 1:
++	{
++	  void * dev;
++	  int benchRet = lio_get_device_id(dev);
++	  printf("%d\n", benchRet); 
++	  break;
++	}
++
++      // empty
++      case 2:
++	{
++	  void * dev;
++	  int benchRet = lio_get_device_id(dev);
++	  printf("%d\n", benchRet); 
++	  break;
++	}
++
++      default:
++	usage();
++	break;
++    }
++
++  return 0;
++}
++
++/* { dg-final { scan-ipa-dump "No structures to transform" "struct_reorg" } } */
+-- 
+2.33.0
+
diff --git a/0316-Use-ai-ability-to-guide-optimization.patch b/0316-Use-ai-ability-to-guide-optimization.patch
new file mode 100644
index 0000000000000000000000000000000000000000..40b97861fa9f30cf18b3ba656e0603001a4dc4ad
--- /dev/null
+++ b/0316-Use-ai-ability-to-guide-optimization.patch
@@ -0,0 +1,741 @@
+From 0b85ab4639e2d25314175962a6e41a841649b028 Mon Sep 17 00:00:00 2001
+From: zhenyu zhao 
+Date: Sun, 24 Nov 2024 17:29:13 +0800
+Subject: [PATCH 3/5] Use ai ability to guide optimization.
+
+---
+ gcc/Makefile.in               |   8 +-
+ gcc/ai4c-infer.cc             | 457 ++++++++++++++++++++++++++++++++++
+ gcc/ai4c-infer.h              |  29 +++
+ gcc/config/aarch64/aarch64.cc |  14 +-
+ gcc/gcc.cc                    |  32 +++
+ gcc/gcc.h                     |   1 +
+ gcc/ipa-hardware-detection.cc |   6 +-
+ gcc/onnx.fdata                |   1 +
+ gcc/opts-global.cc            |  10 +
+ 9 files changed, 550 insertions(+), 8 deletions(-)
+ create mode 100644 gcc/ai4c-infer.cc
+ create mode 100644 gcc/ai4c-infer.h
+ create mode 100644 gcc/onnx.fdata
+
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index bb6197a8e..6315462aa 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -1734,13 +1734,13 @@ OBJS-libcommon = diagnostic-spec.o diagnostic.o diagnostic-color.o \
+ 	pretty-print.o intl.o \
+ 	sbitmap.o \
+ 	vec.o input.o hash-table.o ggc-none.o memory-block.o \
+-	selftest.o selftest-diagnostic.o sort.o
++	ai4c-infer.o selftest.o selftest-diagnostic.o sort.o
+ 
+ # Objects in libcommon-target.a, used by drivers and by the core
+ # compiler and containing target-dependent code.
+ OBJS-libcommon-target = $(common_out_object_file) prefix.o \
+ 	opts.o opts-common.o options.o vec.o hooks.o common/common-targhooks.o \
+-	hash-table.o file-find.o spellcheck.o selftest.o opt-suggestions.o
++	hash-table.o file-find.o spellcheck.o ai4c-infer.o selftest.o opt-suggestions.o
+ 
+ # This lists all host objects for the front ends.
+ ALL_HOST_FRONTEND_OBJS = $(foreach v,$(CONFIG_LANGUAGES),$($(v)_OBJS))
+@@ -2256,7 +2256,7 @@ gcc-nm.cc: gcc-ar.cc
+ 	cp $^ $@
+ 
+ COLLECT2_OBJS = collect2.o collect2-aix.o vec.o ggc-none.o \
+-  collect-utils.o file-find.o hash-table.o selftest.o
++  collect-utils.o file-find.o hash-table.o ai4c-infer.o selftest.o
+ COLLECT2_LIBS = @COLLECT2_LIBS@
+ collect2$(exeext): $(COLLECT2_OBJS) $(LIBDEPS)
+ # Don't try modifying collect2 (aka ld) in place--it might be linking this.
+@@ -3720,6 +3720,8 @@ install-plugin: installdirs lang.install-plugin s-header-vars install-gengtype
+ 
+ # Install the compiler executables built during cross compilation.
+ install-common: native lang.install-common installdirs
++	rm -f $(DESTDIR)$(libexecdir)/onnx.fdata
++	cp $(srcdir)/onnx.fdata $(DESTDIR)$(libexecsubdir)/onnx.fdata
+ 	for file in $(COMPILERS); do \
+ 	  if [ -f $$file ] ; then \
+ 	    rm -f $(DESTDIR)$(libexecsubdir)/$$file; \
+diff --git a/gcc/ai4c-infer.cc b/gcc/ai4c-infer.cc
+new file mode 100644
+index 000000000..99f7a6b45
+--- /dev/null
++++ b/gcc/ai4c-infer.cc
+@@ -0,0 +1,457 @@
++/* Lightweight AI Inference Framework.
++   Copyright (C) 2024-2024 Free Software Foundation, Inc.
++This file is part of GCC.
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include "ai4c-infer.h"
++#include "config.h"
++#include "system.h"
++
++#define M_MODE_SIZE  6
++#define NATIVE_TUNE_SIZE 128
++#define CATS_STRINGS_ROW  12
++#define CATS_STRINGS_COL  65
++#define OFFSET_ROW  6
++#define SCALE_ROW  6
++#define UNITY_ROW 1
++#define COEFFICIENT_ROW  18
++#define COEFFICIENT_COL  100
++#define COEFFICIENT1_ROW  100
++#define COEFFICIENT1_COL  1
++#define INTERCEPTS_ROW  100
++#define INTERCEPTS1_ROW  1
++
++/* Model info.  */
++static int64_t argv_hw1[M_MODE_SIZE];
++static char native_tune[NATIVE_TUNE_SIZE];
++
++/* Intermediate computation results from the ONNX model.  */
++static char cats_strings[CATS_STRINGS_ROW][CATS_STRINGS_COL];
++static float offset[OFFSET_ROW];
++static float scale[SCALE_ROW];
++static float unity[UNITY_ROW];
++static float coefficient[COEFFICIENT_ROW][COEFFICIENT_COL];
++static float coefficient1[COEFFICIENT1_ROW][COEFFICIENT1_COL];
++static float intercepts[INTERCEPTS_ROW];
++static float intercepts1[INTERCEPTS1_ROW];
++
++/* Model result.  */
++static int64_t initialized;
++static int64_t optimize_result;
++
++void
++prepare_native_tune_str (const char *info)
++{
++  gcc_assert (strlen (info) < NATIVE_TUNE_SIZE);
++  if (info)
++    strcpy (native_tune, info);
++  return;
++}
++
++void
++set_cache_info (int prefetches, int l1_cache_size,
++		int l1_cache_line_size, int l2_cache_size,
++		int prefetch_latency, int prefetch_distance_factor)
++{
++  gcc_assert (5 < M_MODE_SIZE);
++  argv_hw1[0] = prefetches;
++  argv_hw1[1] = l1_cache_size;
++  argv_hw1[2] = l1_cache_line_size;
++  argv_hw1[3] = l2_cache_size;
++  argv_hw1[4] = prefetch_latency;
++  argv_hw1[5] = prefetch_distance_factor;
++}
++
++/* Read float from onnx.fdata.  */
++
++float static
++read_float_from_file (FILE* file)
++{
++  char hex_float[8];
++  float result;
++
++  if (!file)
++    {
++      perror ("Can not open file.");
++      return result;
++    }
++    
++  if (fscanf (file, "%8s", hex_float) != 1)
++    {
++      perror ("Can not read hex from onnx.fdata.");
++      return result;
++    }
++
++  unsigned char bytes[4];
++  for (int i = 0; i < 4; i++)
++    {
++      sscanf(hex_float + 2 * i, "%2hhx", &bytes[i]);
++    }
++
++  memcpy(&result, bytes, sizeof(float));
++  return result;
++}
++
++/* To read model parameter information from onnx.fdata and store it into the
++   appropriate arrays.  */
++
++static void
++fill_node (const char *file_name)
++{
++  FILE *file = fopen (file_name, "rb");
++
++  if (!file)
++    {
++      perror ("Can not open file.");
++      return;
++    }
++
++   /* Read cats_strings from onnx.fdata.  */
++  char hex_string[2];
++  for (int i = 0; i < CATS_STRINGS_ROW; i++)
++    {
++      for (int j = 0; j < CATS_STRINGS_COL - 1; j++)
++	{
++	  if (fscanf(file, "%2s", hex_string) != 1)
++	    {
++	      perror ("Can not read cats_strings from onnx.fdata.");
++	      return;
++	    }
++	  cats_strings[i][j] = (unsigned char)strtol(hex_string, NULL, 16);
++	}
++	cats_strings[i][CATS_STRINGS_COL - 1] = '\0';
++    }
++  
++  /* Read offset from onnx.fdata.  */
++  for (int i = 0; i < OFFSET_ROW; i++)
++    {
++      float result = read_float_from_file (file);
++      offset[i] = result;
++    }
++  
++  /* Read scale from onnx.fdata.  */
++  for (int i = 0; i < SCALE_ROW; i++)
++    {
++      float result = read_float_from_file (file);
++      scale[i] = result;
++    }
++
++  /* Read coefficient from onnx.fdata.  */
++  for (int i = 0; i < COEFFICIENT_ROW; i++)
++    for (int j = 0; j < COEFFICIENT_COL; j++)
++      {
++	float result = read_float_from_file (file);
++	coefficient[i][j] = result;
++      }
++
++  /* Read coefficient1 from onnx.fdata.  */
++  for (int i = 0; i < COEFFICIENT1_ROW; i++)
++    for (int j = 0; j < COEFFICIENT1_COL; j++)
++      {
++	float result = read_float_from_file (file);
++	coefficient1[i][j] = result;
++      }
++
++  /* Read intercepts from onnx.fdata.  */
++  for (int i = 0; i < INTERCEPTS_ROW; i++)
++    {
++      float result = read_float_from_file (file);
++      intercepts[i] = result;
++    }
++
++  /* Read intercepts1 from onnx.fdata.  */
++  for (int i = 0; i < INTERCEPTS1_ROW; i++)
++    {
++      float result = read_float_from_file (file);
++      intercepts1[i] = result;
++    }
++
++  /* Read unity from onnx.fdata.  */
++  for (int i = 0; i < UNITY_ROW; i++)
++    {
++      float result = read_float_from_file (file);
++      unity[i] = result;
++    }
++
++  fclose (file);
++  return;
++}
++
++static void
++matmul (const float *lhs, const float *rhs, int m, int k, int n, float *out)
++{
++  for (int i = 0; i < m; i++)
++    {
++      for (int j = 0; j < n; j++)
++	{
++	  out[i * n + j] = 0.0f;
++	  for (int p = 0; p < k; p++)
++	    {
++	      out[i * n + j] += lhs[i * k + p] * rhs[p * n + j];
++	    }
++	}
++    }
++}
++
++static void
++add (const float *lhs, const float *rhs, int length, float *out)
++{
++  for (int i = 0; i < length; i++)
++    {
++      out[i] = lhs[i] + rhs[i];
++    }
++}
++
++static void
++sub (const float *lhs, const float *rhs, int length, float *out)
++{
++  for (int i = 0; i < length; i++)
++    {
++      out[i] = lhs[i] - rhs[i];
++    }
++}
++
++static void
++sigmoid (const float *in, int length, float *out)
++{
++  for (int i = 0; i < length; i++)
++    {
++      out[i] = 1.0f / (1.0f + expf (-in[i]));
++    }
++}
++
++static void
++relu (const float *data, int length, float *out)
++{
++  for (int i = 0; i < length; i++)
++    {
++      if (data[i] < 0)
++	{
++	  out[i] = 0;
++	}
++      else
++	{
++	  out[i] = data[i];
++	}
++    }
++}
++
++static void
++line_concat (const float *in, int in_size, float *out, int out_size)
++{
++  for (int i = 0; i < in_size; i++)
++    out[out_size + i] = in[i];
++}
++
++static void
++one_hot_encoder (const char *in, const char (*cats)[65], float *out,
++		 int out_size)
++{
++  for (int i = 0; i < out_size; i++)
++    {
++      if (i < out_size && strcmp (cats[i], in) == 0)
++	{
++	  out[i] = 1.0f;
++	}
++      else
++	{
++	  out[i] = 0.0f;
++	}
++    }
++}
++
++static void
++imputer (const int64_t *in, int size, float *out)
++{
++  for (int i = 0; i < size; i++)
++    out[i] = in[i] * 1.0f;
++}
++
++static void
++scaler (const float *in, const float *offset, const float *scale, int size,
++	float *out)
++{
++  for (int i = 0; i < size; i++)
++    out[i] = (in[i] - offset[i]) * scale[i];
++}
++
++static int
++argmax (const float *in, int in_size)
++{
++  int out_idx = 0;
++  for (int i = 0; i < in_size; i++)
++    {
++      if (in[i] > in[out_idx])
++	out_idx = i;
++    }
++  return out_idx;
++}
++
++static void
++preprocess (int argc, int64_t *argv, int64_t *in_modes)
++{
++  int default_int_val= 0;
++  for (int i = 0; i < argc && i < M_MODE_SIZE; i++)
++    {
++      if (i < argc)
++	{
++	  in_modes[i] = argv[i];
++	}
++      else
++	{
++	  in_modes[i] = default_int_val;
++	}
++    }
++}
++
++/* The process of model inference.  */
++static int
++graph_infer (int argc, const char *argv, int argc2, int64_t *argv2)
++{
++  const char *file_name = getenv ("GCC_AI4C_ONNX_FDATA");
++
++  if (access (file_name, F_OK) == 0)
++    {
++      fill_node (file_name);
++    }
++  else
++    {
++      return 0;
++    }
++
++  int64_t in_modes[M_MODE_SIZE];
++
++  preprocess (argc2, argv2, in_modes);
++  
++  /* concat_result and encoder_out are intermediate computation results from
++     the ONNX model. concat_result is a 1 × 18 matrix, and encoder_out is a
++     1 × 12 matrix.  */
++
++  const int concat_out_size = 18;
++  float concat_result[concat_out_size];
++  const int encoder_out_size = 12;
++  float encoder_out[encoder_out_size];
++
++  one_hot_encoder (argv, cats_strings, encoder_out, encoder_out_size);
++
++  line_concat (encoder_out, encoder_out_size, concat_result, 0);
++
++  float variable[M_MODE_SIZE];
++  imputer (in_modes, M_MODE_SIZE, variable);
++
++  float variable1[M_MODE_SIZE];
++  scaler (variable, offset, scale, M_MODE_SIZE, variable1);
++  float transformed_column[concat_out_size + M_MODE_SIZE];
++  line_concat (variable1, M_MODE_SIZE, transformed_column, 0);
++  line_concat (concat_result, concat_out_size, transformed_column, 6);
++
++  /* This requires performing matrix multiplication between a 1 × 18 matrix
++     and an 18 × 100 matrix  */
++
++  const int m = 1, k = 18, n = 100;
++  float mul_result[n];
++  matmul (transformed_column, coefficient[0], m, k, n, mul_result);
++
++  float add_result[n];
++  add (mul_result, intercepts, n, add_result);
++
++  float next_activations[n];
++  relu (add_result, n, next_activations);
++
++  /* This requires performing matrix multiplication between a 1 × 100 matrix
++     and an 100 × 1 matrix  */
++
++  const int m2 = 1, k2 = 100, n2 = 1;
++  float mul_result1[n2];
++  matmul (next_activations, coefficient1[0], m2, k2, n2, mul_result1);
++
++  float add_result1[n2];
++  add (mul_result1, intercepts1, n2, add_result1);
++
++  float out_activations_result[n2];
++  sigmoid (add_result1, n2, out_activations_result);
++
++  float negative_class_proba[n2];
++  sub (unity, out_activations_result, n2, negative_class_proba);
++  const int prob_size = n2 + n2;
++  float probabilities[prob_size];
++  line_concat (negative_class_proba, n2, probabilities, 0);
++  line_concat (out_activations_result, n2, probabilities, n2);
++
++  int argmax_output = argmax (probabilities, prob_size);
++  return argmax_output;
++}
++
++void execute_sha256 (const char *input, char *output, size_t output_size)
++{
++    char command[256];
++    snprintf (command, sizeof (command), "echo -n \"%s\" | sha256sum", input);
++
++    FILE *pipe = popen (command, "r");
++    if (pipe == NULL)
++      {
++	perror ("Failed to run command.");
++	return;
++      }
++
++    fgets (output, output_size, pipe);
++    pclose (pipe);
++}
++
++int
++get_optimize_decision_from_ai4c ()
++{
++  if (initialized== 1)
++    {
++      return optimize_result;
++    }
++  if (native_tune && (strchr (native_tune, '+') != NULL))
++    {
++      char hash[65];
++      char input[64];
++      const char prefix = '=';
++      const char *start = strchr (native_tune, prefix);
++      if (start)
++	{
++	  start += 1;
++	  const char *end = strchr (start, '+');
++	  if (!end)
++	    {
++	      end = native_tune + strlen (native_tune);
++	    }
++	  size_t len = end - start;
++	  if (len >= sizeof (input))
++	    len = sizeof (input) - 1;
++	  strncpy (input, start, len);
++	  input[len] = '\0';
++	}
++      else
++	input[0] = '\0';
++
++      execute_sha256 (input, hash, sizeof (hash));
++      optimize_result = graph_infer (1, hash, M_MODE_SIZE, argv_hw1);
++      initialized = 1;
++      if (optimize_result == 1)
++	setenv ("AI_GUIDED", "1", 1);
++    }
++  return optimize_result;
++}
+diff --git a/gcc/ai4c-infer.h b/gcc/ai4c-infer.h
+new file mode 100644
+index 000000000..7fb75900b
+--- /dev/null
++++ b/gcc/ai4c-infer.h
+@@ -0,0 +1,29 @@
++/* Lightweight AI Inference Framework.
++
++   Copyright (C) 2024-2024 Free Software Foundation, Inc.
++
++   This file is part of GCC.
++
++   GCC is free software; you can redistribute it and/or modify it under
++   the terms of the GNU General Public License as published by the Free
++   Software Foundation; either version 3, or (at your option) any later
++   version.
++
++   GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++   WARRANTY; without even the implied warranty of MERCHANTABILITY or
++   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++   for more details.
++
++   You should have received a copy of the GNU General Public License
++   along with GCC; see the file COPYING3.  If not see
++   .  */
++
++#ifndef AI4C_INFER_H
++#define AI4C_INFER_H
++
++extern int get_optimize_decision_from_ai4c ();
++extern void set_cache_info (int prefetches, int l1_cache_size, 
++			    int l1_cache_line_size, int l2_cache_size,
++			    int prefetch_latency, int prefetch_distance_factor);
++extern void prepare_native_tune_str (const char *info);
++#endif /* AI4C_INFER_H */
+\ No newline at end of file
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 08a43541e..1d479f270 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -18764,12 +18764,14 @@ override_C_optimize_options (struct gcc_options *opts)
+   opts->x_flag_ipa_struct_reorg = 6;
+   opts->x_struct_layout_optimize_level = 6;
+   opts->x_flag_gnu89_inline = 1;
+-  opts->x_flag_ccmp2 = 1;
+-  opts->x_flag_array_widen_compare = 1;
+   opts->x_flag_convert_minmax = 1;
+   opts->x_flag_tree_slp_transpose_vectorize = 1;
+   opts->x_param_max_inline_insns_auto = 64;
+   opts->x_param_inline_unit_growth = 96;
++  opts->x_param_pointer_compression_size = 16;
++  opts->x_semi_relayout_level = 14;
++  opts->x_flag_ipa_prefetch = 1;
++  opts->x_flag_ipa_ic = 1;
+ }
+ 
+ /* Check whether in CPP language or LTO with only CPP language.  */
+@@ -18826,6 +18828,8 @@ override_optimize_options_1 (struct gcc_options *opts)
+   opts->x_param_ifcvt_allow_register_renaming = 2;
+   opts->x_param_max_rtl_if_conversion_unpredictable_cost = 48;
+   opts->x_param_max_rtl_if_conversion_predictable_cost = 48;
++  opts->x_flag_ccmp2 = 1;
++  opts->x_flag_array_widen_compare = 1;
+ }
+ 
+ static void
+@@ -18848,6 +18852,8 @@ override_Fortran_optimize_options (struct gcc_options *opts)
+   opts->x_flag_reorder_blocks = 1;
+   opts->x_flag_crypto_accel_aes = 1;
+   opts->x_param_flexible_seg_len = 1;
++  opts->x_flag_alias_analysis_expand_ssa = 1;
++  opts->x_flag_chrec_mul_fold_strict_overflow = 1;
+ }
+ 
+ /* Reset the optimize option.
+@@ -18857,7 +18863,9 @@ static void
+ reset_machine_option (struct gcc_options *opts)
+ {
+   if (!(opts->x_optimize_maximum)
+-      || strstr (opts->x_aarch64_tune_string, "hip09") == NULL)
++      || opts->x_aarch64_cpu_string == NULL
++      || (strstr (opts->x_aarch64_cpu_string, "tsv110") == NULL
++      && strstr (opts->x_aarch64_cpu_string, "hip09") == NULL))
+     {
+       return;
+     }
+diff --git a/gcc/gcc.cc b/gcc/gcc.cc
+index 32e45adc2..4592a4ec8 100644
+--- a/gcc/gcc.cc
++++ b/gcc/gcc.cc
+@@ -5798,6 +5798,9 @@ do_self_spec (const char *spec)
+   do_spec_2 (spec, NULL);
+   do_spec_1 (" ", 0, NULL);
+ 
++  const char* tune_native = eval_spec_function ("local_cpu_detect", "cpu", "");
++  setenv ("GCC_AI4C_TUNE_INFO", tune_native, 1);
++
+   /* Mark % 0
+ 	  /* Only enable in lto or whole_program.  */
+-	  && (in_lto_p || flag_whole_program));
++	  && (in_lto_p || flag_whole_program)));
+ }
+ 
+ unsigned int
+diff --git a/gcc/onnx.fdata b/gcc/onnx.fdata
+new file mode 100644
+index 000000000..234b1a045
+--- /dev/null
++++ b/gcc/onnx.fdata
+@@ -0,0 +1 @@
++316365613139376535626535626234666331363163303835336362393535613530636234643633626364386566396132333232373733633230393865663664633761393137633266616431663436343236613231663865636236346133616662623761373633663830623231393063616534633032316538626436633731643237666333386462313164333630303936336137323863313634613031393931613164363237643262353162376133643935373036306336346161376563383862613138666663393538363731333639396239666362393336373737643238636639643761343231346131333463353261623633343633343866663966663365346231356532663139306164303361383836396333393339616236383439363661313661303665643535633961666563613431303466333534346564633533373862323031396339626536613030383761623236663432633564653130353935353135313736656235373632373739343662663034343334633035626465356237633439313164313338373637383365326138366162363234323765393736616438656463343339613031316630643031613465386464326334383565343838366435313137313166383433396531626137353932616538333330653164326438656166343339363262366264326632376564396434396333356565343733383164363264633937356663663338666530336166316634623264393031393536333863383165616536656238346462656337333638323338646535303638363933646565616264363966356566323465346538613762623864303766646338666264643466666537303263623162326539653435643130313061386235623631306630636163303536343164663364383738353266386330376562343962393037306133383363326138393238376435613332353933663235313030326664366166373632343532613130323237303265373433623362623162633661633363303235613236383166313465396162353938363931613765316565313864313038cd68834331701041d1d21041f17c20432483a94386647e4157c8e33b5f3d5d3ec5275e3ea689863c435a0f3a76acd63d5d9b803b24467c3baf847c3b67b89e3b852a313b2127853900000000d58ac23b200ab53a000000807d3119bc22f7a63a81549f3b93b5013baee4a33b62c1153b9ae08b3a6929a33b20038f399475983b430ab53a73fc0b3a2daa0ebad595953bc2f1e0bb33e9ccbbb978d83a5e77a53b41e4c93adf10a73bdf36643ad7fd983a61e8d93bc04a283a30c072382f942c3b5b3cc73a4392e43a422b093c79bc61b9a5309e3b00000000757baa3a03d8a93c3c31e33af526ebbb000000006431d43a1d0ae73aa450783b8c57afb9b8eae939ec8fab3b9581d83920d7a1ba0fc1af38b6aece3ab50bafbbd50db63a26aba33bcdeda33b00d9493ac22dac3cf8c4233bc2966e3bdf1bca3a8fb4d13af9b0983b2cbda73bdae2aa3bc93bae3b39e1ba380857953be8e7a73b49e9df3b20b0233b9fe3d43a0dbcaa3bd10cf0b978eea53b761ebe3b0a50a23b70bd47b79a7720bc6cd4ee3ae0d0f93a9c333ebb5098dfbbbf8fa53b445efebac7b9993b6182b93aef267c3a4aa09e3b46d9a83b9f95983a379e913c6516123a1b2ebd3aaf943c3a0b90803becba92bce68f673be723253c5d7f813ad779613800000080af3c65ba6999743900000080957a003d82f2fe39baab4d3b7f348c39b8d3323b3c1e253ace952dbbc9d364bc3aafaf373d0a633be8fdee3968b0fa39eb70a83a7cba4e3bdf2407bc40f50f3d94f4c3b9a828573b3f2bc3b99a5763bcccb838bb24f011bae3400dbdc3074fba30a829bb3dde6e3ad7c2caba2b2aa7b8d479a7bbebe2603a7025583b00000000017414ba680386bc9b365e3aaacb03bc000000006afd90b9a64e263980eb223c80a48ebcca9703392310573b1fd419bbf7368abc17a2083a3ceafab95eb11cbcf29995b9a64264bc8bae403bc1dc6139631c88bc12e3373c07cf0c3cdc93a6b97edbc0b917754d3b5cdc143c61ef393b40a809baf3861dbbafce623be550513b828382bc359d513afa4a25ba31394c3bb013da3a9835553bf3d9553bec2b65bcee09bab9f6343e3c03a59f39fb11053a078e7cbc5bd006bcfe23363b08d12cbb3cfb533bb98a8fbadcb99139cbd1573b24725e3b01014fb6dcbc45ba6ee024bb318db1baf39ce9b952d625bc41afddb91d7dffbbc0ba163b0387b93b2594623b00000000f60cf9ba483c983b0000008015e6c6bcbd45983b77d62ebcfbb69f3b7b5752bcc334ab3b4f9806bc9d89063cc0675a3b807426bca81a9f3b7ef56f3b6a96a13a045937bcd4a2f33cb92173bc40af783b26ac40bc5fef6b3beba6fe3b8c7207bc5e25443bfd99a33be7e7403b4c2508bc0c87bb3bb95dcd3abe228b3bac03deb91a2ab03add753bbc000000002e04703be98f1fbccef2af3b17ebe93c0000000020e37a3b46ba913b1fd7003b1f3f133df85d423bacc843bc5fada7bbc8680d3d8423503b2afc6c3b4e43033dcfcc7c3bcece053cdbb44ebc4151823ba14426bc6e942c3b3bdc4d3a34967f3b7687783bd0cd3ebcfc75053ade324ebcd10c32bc9ff9fbbb0b7430bcf60e4abcd6b6e03b295db43b25c75d3b88334fbc8d95883ac9c73ebcddf941bc2b18083c43044c3b405414bd7617963b9910a03bd5e70c3d9356f23c3a2750bc472107bce47d47bc0125243b3c41953b0f6134bc8c403bbc8fb3873ba5e218bcae5d06bc2dfe103b758a493b43cef63cd7438d3c2bf1eb3b2d4a833cf13a43bc5d14c4bd000000002932a7bc3191e4bb00000080224e753dea87dfbb41e28a3cbeb44b3d731d8f3c1312d2bb54e44dbc232b84bc74f9d9bd033bcb3cdda410bbeeeb47bdd7e44e3b3c3e21bb435712bdb3e6413c82e770393f20a53cc6642dbc325484bc410c4e3dcb49823dc262bd3c204a563d032393bb0887753c0cad943d3946abbbcb77b3bc9151c6ba860dc0bd00000080e5880d3a2f960ebd1bba99bcce3910bd0000008037acde3be98a983bd60b7c3c66ee27bd2431aab98b2b95bded06813bc17429bdf5a9e9bc4ff297bafad924bdc14d53bc901784bcad96073cd34989bc84580fbd1e276b3ca48e513c189796bbe15f8cbb39fa473cce9c693cbdd0843a4f07443dbf40c03c38a1893c3790ab3cb48c58bcc5e9863b684448bcb5c32abc0726a6ba1def9ebb57ce273d772b84bc1925c63d2e26d8bc24460cbcb0f807bc8dd0a5bc9ba312bd6ed5393c32e1f43cf3c58bbc8a5334bc8e0c53bbf78cb13c7805793c8d5800bbc4a5c2bcfc2c85bba79d3c3df00f493db55cb73cce71c43dc030f03cc953823c79c1f13d614db73d0000000074e98f3ca415183c000000801104803d9afea83ddf9ff93d835f9bbce8d8623cd67e093c453d143d7d8c90bc1434e23d24580b3e00711d3c729b903d81a0253c82e9b53cba65123ca564a23d7a53003c2c82ec3de139f93c58f58ebce101813ba5782d3d4e198e3dbaa40fbb58e2bc3bbf92943c98421e3df32c0c3cbc235ebcc2fe443c2789033e00000080b94ca73be81815bd1758e53c5df7053b00000080f9f63f3cc7a9893cb846823c65d2143c9bb50e3cced60c3e92fb983b583593bbbbfe263e390bdf3b696887bbd13e823c207890bc1cf0c23cd688163dd14e16bdd3cb813c95a6593c70d7083cd6c6e43b6d4d9b3c9455683c876e1f3e599ff83c4b377f3c2afd953cbeedd43ccbdb163d2d78fd3bcc84363c5c7fa63c22fedf3c3318e83d0ecdba3d0ea690bc462e9a3d0b11013cf19f503da4f8813db249c0bba7300f3c2d6c223dd1d7663b56b4c43d56e5f93c4799e43b0702a73d4e15ae3de8040e3cdfad72bc0ab6593d1fb7c9bb6f90b43dbfcab83b4cd802bbbd3c993be1a91c3c8f677cbaa83420bb0000008084bf263b6336adba00000000373bd13b521cffba733ac83bee8c9bba1306f73bbf5471ba8651773bc863ac3a6ed119bb926fc43b9368e5ba34f319bba9c8ebbaa74acb3b39169ebc812d573b4764beba5815ea3b5211caba956ec23a9e107d3b64dbc4ba674ac73be88107bb5354493b688c5cbaaf4571bab3d6b3bae566603b11b0b0ba6bd1d03b000000005b6cc1ba0720833c5210c7ba85cd97bc000000003c1fc4bab35dbbba30b6fe3b389ea2bc97eb8eba37bae43b697a293b87969abc5c9e04bb83acc2ba5f8fadbcf872c5bab03daf3ad509fe3b2f81f9ba4317863cd808bb3b0177f03b02dabdbab2efbdba3b03d83bc09f223c6030ec3b0137c13b29f5663bf195ce3bc10eff3b18cda93b35486cba7dd7c8ba0d51003c34dc93ba891be33bb785ea3bbb75a73ae04f2abba21d8f3b9065c3ba8892bbbac37d96bc6a0c9dbc596cef3bce5a063bd64cec3be62fb0baaa5cbbba1acbd03b5cdfe13b0f37e9ba48bb653cc513733bc352a0bacba0ffba469ababdf17dae3c939271bd7af718be283c113ed15fbb3d00000080920e90badfa4d63d00000080df70d63c18f5c03dbbb677bde981e83d5a78d2bd0985093e663cffbd9a12803d3fc0b33d65b388bd50a0dd3d011acc3d2df0203ed04095bd5c9a8abd7294323dd404b33dabc5bdbd8042a33d93cb6f3d3b81f7bd4e2e823dfdba273d83ea863d7d0f3cbeaff2133e565a3d3d0d66ca3d035bea398af8073ec3b79abd00000000d078ad3dbe475c3c9267013e1db874bd000000809e1db03d23f9cb3d902b14be16ea52bdc41ac33db52abbbdf07981bed9442ebd4d94a83de0f7a93d745d41bd4cebb33d01f57c3da16adbbd8ae0b63d2d6c763cc1f40ebe098cbebddc59b83d256bb13d359fa9bdf886e5bd0dfcbbbdd68d0dbd726807be83579fbd05c9dcbdd8e8983ccd620d3edcf69f3dc980e8bdcb6c923e1cdcb3bdcc6ec1bde600823d030d993d9b3a1d3d420bcd3d754dde3df8132cbdfabb85bdce89c3bdefd054beafc6c8bd931e7c3d30fed83dbef795bd7fb1b3bd6d9eb73de344993cceb603bebd216b3de45c803d70a1953e1dfb62bea91d2a3ed284113f544008bfc50498be00000000d460e13b0d05b2be0000000066f28ebe9adb9ebe4ee6323efd01c7be63c7ac3e888dfabe44e5e23ec6f631bed07890beb0fe4a3e6217babee93daabecf501abfd1b9573e7bd53a3f805fe2be10a48cbecac09a3e52c67bbef48823be9a9dd83eb74340be75deeabd3fbf42be9d1c3c3f74e10abf923b05beaec4a4be66229fbbb35ff6be8a146c3e00000080638d84be04cb14be4dcfe7be53ec253f00000000208c89be7039a8bef1170c3f70ac0d3fdcd59bbe68f4973e6b1f903f16a3eb3e6d0a86be361a81be4cba023f82c88cbe01132ebea3feb73e79f391beeb6826be5130063fc4d9953e4a6690becd328abe13b7803efa6fc23e9823993efea5b83d60e0f43eff276c3ed453ba3e374b63bdd41502bf17e36dbe5034c53ea2ac9dbfff04903e64879e3e4c9d35bede8b6cbe435ed2be4e73abbe3020bfbef8c6e83e8630343ff9de9e3e78d65c3f3659a63eee6f35bed729b4be3954623ef8778f3ef4758fbe75fd4ebed9e3ed3e770a23be6b403ebed7e596bd656a093d6e463cbddfa103be2213f73d8a4c973d0000000050dc0bbb9d64b03d00000080d5e9283d3e429c3d1d0b3ebd35e5c03d101cacbd51b0e83dd890d5bd1378483dcc6a903d431d54bda0cab63d6376a63df1ab0a3e1a586bbd6bcfdbbd5cfb8d3db01a903dced598bdc300823d43fc3a3db75ccebdfea14a3d8f27fe3cf98c533de7d924bef762fc3df248103dde32a53d8d067e3a17bee53d5b6573bd00000080285b8b3d5cd0283d3447d93dd249c3bd00000000317f8d3db47da63df06ffebd933ca7bd3da29e3d0f9496bd7f1c6dbe2feb8abdd761863d8d3f883dc33d9abdecee903dd3ed453dc93cb4bd785f933d08c8393d5eb3f4bdef249bbd4ffb943d98be8e3d7b5487bd365bbebd103397bd19bdd2bcbc53e4bd9eb77cbd7d6fb5bdcc15713c4f00f03d54fc7e3d1c98c0bd05dd853e0dea8fbd5a2d9cbd35aa4b3d879a713df19b783d214da73d821bb73dc52889bdb707d4bdfd3d9ebdb6e53cbe1ee9a2bdc740443d3594b23de3046bbd4acb8fbdbf54943d23de5e3dbd36ddbddae5363d5155473de961aa3ed3a432be5d8e583e1d9a173f318110bfbc47aabe00000000a572363c9755c8be00000000c5235abe14fbb1be3f59573e57f8dbbe0adcc13e40d705bf1517f43eb1d467be07b3a2be40216f3eb6d9cfbe84b1bbbeba571cbf5c30843e96700c3f700eb6beee52a3bedcd6ab3eb39b93be9fd159be7524eb3efdd668beee2a1abe9dd171be0acf3d3f227e12bfbb312dbe3966bbbee3f99bbcec2f01bf2790883e0000008047ae9dbe59a438be26a4f9bedaf7f93e000000004b62a0bea849bdbe00ac123fa905d63e3e96b2bed43da93eef558d3f0c73b23e65bf96be67359abe8cc9c53e372ba4be184265becf66cb3edd11a7bed6d249be506f0d3f7429af3e0ba1a8bed2aca1becacc973e154ad73e9df5a93e53dd003e4cbd023fedc88d3e01e5cc3eff3eb3bd194d0abfa27090beb4a9d93ed0bf9dbf7c95a13e7bb5af3eb12a6bbeace488beb5419fbe86c8bebedea8d1beb735b03e6889073fb9f1b13e34835e3f7d7bb73ef8e961be08c5cabe6ff4833e956fa13e33e0a7befd926ebe6009fe3e8bc653be843b65be6a18a0bdd534f93c01fc52bd6b6f09be77e3fd3dc1c59f3d00000000e97d92bbf664b83d00000000972f193da896a43d65e754bd5e81c83d269cb4bd1d84ef3d1f11ddbd55385e3d020b993d4fdf69bd12a5be3ddd95ae3d62cb0d3e7ff87fbd7418c7bdf8ca803d1ed5983d13dea1bd480c8b3df3be503d2202d6bd540d5e3dc93e133d530d673d1b012bbe8f7c013eb79d243d9072ad3d772a273acfa6ec3d0ce383bd00000000fd56943de9119e3c0f64e03dd0cdb0bd00000080274f963deba6ae3d1c0a05be683297bd1f18a73d49b19fbdc64270be58657bbd263a8f3d374f913d63948bbda1b0993ddab55b3d7485bcbd00fc9b3df3edb03c892c00be9350a6bd25af9d3d968b973d79fe90bdeca4c9bdba46a0bddb1004bdbc84ebbde25788bd3ab0bdbda690963ccab6f63d60cc883d918ec8bd51ba873e9a3e99bdd01da5bde567613d50fd813d6981613d7163af3ddbdcbe3d473578bd8c07c0bd6924a7bd21c53fbe8ca6abbdaaf9573d158dba3d86c37fbd572399bd2a109d3d6994dc3c1e95e4bd4e1d4b3d7eca5a3d032771bdc8f5403d4b1a09bde53ceabdbfd4db3de573753d000000004909b9ba32b88f3d00000000c1c16c3d8b42803d994910bd8ba8a03dac6f8bbd482cca3db803b7bd3e900f3dbd44693d09b223bd2e39963db96c893d58fbf83dd2092ebd01b411be3149c43d891f633dd5b879bd93444b3d60e4033dfcc3aebd6c231b3de078bd3c57241d3dc16d17bed815e03d92dad63cc208853d82c1823a3acdc63d6d833ebd00000000ad06563de91bf63ca912bb3d854001be00000000941b5e3d5cd1873d744ce1bd91fce3bd57a17b3d804275bd64d465be11f8bdbdae6a583d0b73503dfc7bd1bd7155633d3e6a0c3db57194bd8eae6b3d6798093d58a9d7bdc3db71bdbd2b693d602c5f3d20c14fbd73e59cbdd53e77bdf28694bc1c71c5bd7f773ebd704a96bdd115383cf8ebd13df406403d27019fbde07f823eef7668bd4fcf7fbd6184123d9a113f3ddd37ad3dfe6a8a3dda459a3d3a30bbbdafb00cbe663c80bd0e3832be203886bdf262123d1972913df4a036bd978267bd99a5673d02f7283d6ae8bfbdd981033d3e82193df70172bdf1b33b3d2c8309bdf837ebbd4944dc3d875a763d000000001630b3baef2e903d000000008d6c663da8b4803d58ca10bd9221a13d74e08bbd7fa6ca3d338bb7bd700e103d71236a3dc13924bd00b2963d3de8893d2c6cf93d4ab12ebd894d11be9e58c13d81f0633dae8e7abd69054c3d5b51043d5d42afbdc1b21b3db199bd3cdab61d3d61aa17be7e84e03d2c57d73c907b853d18e27a3ac747c73d073e3fbd0000000013d0563d4ba4ec3cf98ebb3db3d400be0000000068ea5e3df445883d2b22e2bded78e2bd97817c3d232376bdb02f66becefebbbd053d593d6b38513d5d50cfbdab27643d1ce40c3d1ce994bdc0866c3dc167043d7161d8bd9abb72bd8c016a3ddcfa5f3d6e8650bdf66e9dbda51c78bd8b6494bc14dcc5bd07203fbd87b396bd8478363cab61d23dd8be403d137e9fbda19d833edb4969bdf35780bd9506133d4ac83f3d4157a93d2be18a3d44c19a3db553b9bd422c0cbe29ae80bd8b1f32be2ab086bd7fe6123d5ce9913d1b4c37bd253c68bdf37a683dda1d253d1866c0bd1fef033d630f1a3d55a78ebd9363e43c877813bdf06300be01e5f03d82788b3d00000000c3734fba035aa53d00000000e511873d7b68913d4e6e1cbd7718b73d813fa9bdcc68e13d5460cdbdbca5213daa5f843dade935bd383bac3d31919b3d2bf8073e69c848bd95ef2cbe5d65d83dba17833d49ee93bddd1f693de61c123dab7ac5bdd1dd2d3d5cf9643c6e50333d290923bebe38f63d2111e33cba73993d0a4b1c3aff26de3de00c5abd0000008092d2793d9b92fc3c1457d13de3451bbe00000080993e803dcd659b3d9082f7bd691808bef0f6913d51b390bdbab66cbe5acce9bda06e743dd62e733d250efbbd92ab833d46cc1d3d0c53b4bdea20873d8341113dbc72edbda26d8cbd369a873d164c813d395d75bdbe4ab3bd341392bd2d3f81bc05cadcbd939c5ebdc34eb6bd0175de3b5116e93d1b04603d00c2c2bda59a863e93b488bd09f997bda483253d75f4573dfd1ad83d23ed9c3d4bbfad3dc5f9e5bdf05c27be036399bd72e93bbe8204a0bd71d8243d7988a73d352350bdb95188bdcbd0863df73d3e3d0cbed5bd59aa133d40402b3d52f370bd85b63b3d0fd708bd723aeabdcdb1db3d87df743d000000806cedb3bad56b8f3d00000000d46d663d35e87f3d67e50fbd8859a03da22e8bbd43f6c93d37deb6bd811c0f3daeb1683d765223bdb9ef953d1c24893da7e7f83d15a22dbdd3ac11bee609c13d9f83623d703979bd21b74a3d7b7b033def92aebd70b81a3d730bbd3c5cbd1c3d0ecd17bef7f0df3d8140d63cd8b9843d1d757e3ac997c63d11023ebd00000000937d553d0e57ef3c0edaba3dc82f01be000000807c885d3ded82873d247ae1bd3436e3bd170e7b3d5ab774bd32eb68bef817bdbd6edc573ddbec4f3d4721d1bde3c1623d5ffa0b3df23994bd8a176b3d63e0053deec1d7bd7a5971bd5899683d57945e3d72434fbd92a59cbd4c9e76bde50b94bcc364c5bdab1a3ebd841a96bd88b5363cb8bbd13d548a3f3dd1e69ebd72ce833edbe867bd6f517fbda50d123dc88a3e3d8f2ba93dd51b8a3d83fc993d10a3babddc870cbe05e47fbdc04732be17f885bdfdfe113d7b27913dd82436bd100667bddb16673d808c263dbfbcbfbdfa2c033d6118193dc7b077bde7d4433ddcae0dbdc3a7efbd20dfe03dbb5f7c3d00000000603dbdbab68b933d00000080e852703d98cf833d651615bd4fbfa43dce3a8fbdd0edce3d2297bbbdfe4c143dbdf46f3de7d728bd892b9a3d092a8d3d635efe3dce9033bdcff113be186ac83d49a3693d124780bd8e56513d255a083d402eb3bde733203df262c43c6343223d04aa1abe2d29e53d0392de3cb1a9883d8ea7833ab384cb3d0e5744bd00000000d2555c3d8ed7f63caaa1bf3d462903be000000801f8e643d88808b3d81a9e6bd24a3e8bd4854813dee347cbdeafb6bbe1474c1bd62c75e3d92a6563d5559d5bda6e2693dbb14113dd06d98bd8065723d3e040a3d071cddbdc1d478bd83d76f3d8d9e653d47f855bde307a1bdaf257ebdce119abce91fcabd9f3a44bdee319abd37ff3e3ccecad63d6be6453d101fa3bd177f853e88256fbdab7483bd6e52173d12e6443da4ecaf3d32268e3d324b9e3d0603bfbd44ea0ebe44cb83bdad6235be57ee89bd933e173da14e953dd84a3cbd7c026ebd384d6e3d9d5f2b3de293c4bd3104083dbe891e3d97b16fbd8d903a3dc1e207bd7219eabdc854db3df7f4733d00000000e850b1ba61f98e3d000000005023653d9b007f3dc20d0fbd7beb9f3dfab88abdd293c93d1e74b6bdb4490e3d3bc7673d755122bd3e7e953d89af883dc493f83d8dbc2cbdcd0711be5457c03d389b613d165078bd9bcf493d41ae023d0226aebd2ddc193d9366bb3c94df1b3d008d17be2995df3d3dbcd43c3446843d9367763ad233c63da91b3dbd000000805b93543d7659ea3c8b72ba3d3a4100be00000080009f5c3dc40f873d922ce1bd32cce0bdcc237a3df4cb73bd988d68bef1ddbabdc7f2563d12034f3d497ccebd01d8613dde280b3d17c693bdf72d6a3dd434033d60b6d7bdb06b70bddbae673d96ab5d3d24584ebdc2339cbdb0b675bd189c92bc81e0c4bd3e2b3dbdbd8e95bd1ac1343c405bd13dbca23e3d396f9ebda2b7833e68ff66bd3d687ebd9a39113d03a53d3df37fa83d74a9893df58c993d4e6db8bdf6030cbedcfa7ebd142532be818285bd7426113d02b5903d4d4035bd28f265bdd02b663da385233d2056bfbd115b023db03c183db67f6bbd2238373dd9f904bd22b7e6bd6a08d83dee556f3d00000000ade0acba75698c3d00000000f81e613d4b3c7a3de7e50bbd952b9d3da23788bdd76dc63d2d7cb3bd59220b3d7e4f633d7cf91ebde1db923d2333863dd90ef53dfb1b29bd309d0fbe4f9dbd3d9c365d3d57a273bd67be453d2576ff3c0b43abbde685163d9d25b73c9e81183dc2bb15be5940dc3dd0c4cf3cd1d5813da770743ad315c33df03939bd00000080845d503d6a53e83ccd6fb73da62cfebd00000080394c583db596843d6201debd1f8fdfbd2971753d652e6fbd538966be80adb9bdc0b3523dd9e04a3ddf6dcdbdbf735d3d780f083d5d2a91bd6ead653d6e14023d8e3fd4bd8fda6bbd9f37633dc054593d64394abd3a8099bd5d0f71bd21308fbccde4c1bdbb5139bd100693bdb242303c8a24ce3d22bc3a3dc7bb9bbdb39e823e728962bd89a779bd5f050e3d61c0393db4f7a53d2828873d21de963dd6f4b6bd38820abe1b387abd071b30be521083bd6bf50d3d5f208e3d217c31bd92a961bd07ba613dbd25223d3f48bcbd38d9fe3c41ed143dd523d63e2f30a6be5ce7723ed5b3503f886143bf5a97d9be00000000c1361f3cccf6febe00000000c6a7ccbe1e6ae3be53717f3e548d0ebf056cf73e199933bf669a223feb077ebe93bfcebea700913e864805bf36c6f3be43625dbfe4309a3e1cd6843f05862bbfa03fc9be5676dd3e6810b4be955869be71381b3f575a89be84a727be4f278bbe140b873fb12947bf300e3ebed7eaebbe6f18e1bb9c9930bf87c0a83e00000080a1a9bdbe755354be942526bf6d1c6c3f0000008047d1c4be18e0f0be20e6483f35e1493fbe1adfbe1a73d93e563bcf3fc2fb273f6dc3bfbe63b5b8bec4753a3f2078c9be5a7578bef4c2033f06e3d0beea966dbe5368403f7778d63e0baccebe57bfc5be121fb83eee420b3f4424db3e763c033e51892f3ffbd8a83e686e053fdedaa1bd5d843abfda1faabe45450d3f5488eabf600cce3e4ce3e23ed6a181be523aa9be643b16bf1a7ff5be76e208bf7cdd253f4b25803f6a67e33e789a9e3fe61fee3e8b9781bea90601bfbac0a13ead42cd3e5d54cdbe8fe093be127f2a3fccd368be1ee887be106a0c3eeee1893e82029f3d5725303e5642663f3d48ee3e04d172bdd485bc3d3547ff3ebf8421bebc6bdd3e023dcb3e23feb53e08b1363fd7ad833e92133b3f3091903e0fafc13f51adf03e4b2e0c3e6612e43e92e1dd3e0d091e3fb063833e89ea713e2698003ff850313fb951543ede510b3feed7bd3ff97d843ee29fd73e0161653eeff1ad3e7755773e09023e3ffd34bd3ebdd90e3f0fa2503eee46033feb2b833eef03febd8727d83e8ae5a83f665d0a3f17b35d3ed825d5bba70bf43e5126033f1900253e8a569a3e88b7ba3e7c70703eb6557e3ec9ba7d3e7c13ae3e5830d03ecc7b683e5adeee3ec9c7b83f2190693e9937bc3e9a92b03f24c51b3e101df53d7e77e33eff50233fd3666b3edf57163eee32983e5416253eee1e513e11fe1f3ecdef4e3ef340053e45e4273f99bccd3eb76e623e886d9e3f8fbd5a3eb44f543e27dbc33fd1e4d63eea1a123f46521a3f75a2083fdbf8533e77cf6c3e0bdb8a3eec4fc83e2d98653ec5310c3f5ec9ea3e3bf2513e3fb13f3e7277c13edbe3bf3ea6c69e3ea869d03eee4ba73eb83d76be0000803f
+\ No newline at end of file
+diff --git a/gcc/opts-global.cc b/gcc/opts-global.cc
+index a18c76940..e684bc5e3 100644
+--- a/gcc/opts-global.cc
++++ b/gcc/opts-global.cc
+@@ -39,6 +39,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "attribs.h"
+ #include "asan.h"
+ #include "file-prefix-map.h" /* add_*_prefix_map()  */
++#include "ai4c-infer.h"
+ 
+ typedef const char *const_char_p; /* For DEF_VEC_P.  */
+ 
+@@ -304,6 +305,15 @@ decode_options (struct gcc_options *opts, struct gcc_options *opts_set,
+ 		location_t loc, diagnostic_context *dc,
+ 		void (*target_option_override_hook) (void))
+ {
++  set_cache_info (global_options.x_param_simultaneous_prefetches,
++		  global_options.x_param_l1_cache_size,
++		  global_options.x_param_l1_cache_line_size,
++		  global_options.x_param_l2_cache_size,
++		  global_options.x_param_prefetch_latency,
++		  global_options.x_param_ipa_prefetch_distance_factor);
++  const char *tune_native = getenv ("GCC_AI4C_TUNE_INFO");
++  prepare_native_tune_str (tune_native);
++
+   struct cl_option_handlers handlers;
+ 
+   unsigned int lang_mask;
+-- 
+2.33.0
+
diff --git a/0317-Bugfix-set-default-value-when-tune_native-is-NULL.patch b/0317-Bugfix-set-default-value-when-tune_native-is-NULL.patch
new file mode 100644
index 0000000000000000000000000000000000000000..92f10c2bcd9ec686820fd45abe2bac6dda64bddc
--- /dev/null
+++ b/0317-Bugfix-set-default-value-when-tune_native-is-NULL.patch
@@ -0,0 +1,27 @@
+From bc468838ffa1991e50cb4b82b45154d44302417b Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao_admin 
+Date: Tue, 3 Dec 2024 22:39:36 +0800
+Subject: [PATCH 5/5] Bugfix: set default value when tune_native is NULL.
+
+---
+ gcc/gcc.cc | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/gcc/gcc.cc b/gcc/gcc.cc
+index 0032735db..90f6dfe85 100644
+--- a/gcc/gcc.cc
++++ b/gcc/gcc.cc
+@@ -5800,6 +5800,10 @@ do_self_spec (const char *spec)
+   do_spec_1 (" ", 0, NULL);
+ 
+   const char* tune_native = eval_spec_function ("local_cpu_detect", "cpu", "");
++  if (tune_native == NULL)
++    {
++      tune_native = "native";
++    }
+   setenv ("GCC_AI4C_TUNE_INFO", tune_native, 1);
+ 
+   /* Mark %
+Date: Wed, 11 Sep 2024 17:27:19 +0800
+Subject: [PATCH 4/5] add flag -flto-try enable LTO and automatically skip in
+ inapplicable situation.
+
+---
+ gcc/collect2.cc    | 51 ++++++++++++++++++++++++++++++++++++++++++-
+ gcc/common.opt     |  8 +++++++
+ gcc/opts-common.cc | 54 +++++++++++++++++++++++++++++++++++++++++++++-
+ gcc/opts.cc        | 20 +++++++++++++++++
+ 4 files changed, 131 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/collect2.cc b/gcc/collect2.cc
+index 9715e8eee..690363880 100644
+--- a/gcc/collect2.cc
++++ b/gcc/collect2.cc
+@@ -200,6 +200,7 @@ static enum lto_mode_d lto_mode = LTO_MODE_WHOPR;
+ #else
+ static enum lto_mode_d lto_mode = LTO_MODE_NONE;
+ #endif
++static bool maybe_relink_without_lto = false;
+ 
+ bool helpflag;			/* true if --help */
+ 
+@@ -751,7 +752,53 @@ do_link (char **ld_argv, const char *atsuffix)
+ 			 PEX_LAST | PEX_SEARCH,
+ 			 HAVE_GNU_LD && at_file_supplied, atsuffix);
+   int ret = collect_wait (prog, pex);
+-  if (ret)
++  if (ret && maybe_relink_without_lto)
++    {
++      bool link_with_lto_plugin_before = false;
++      for (int i = 0, j = -1; ld_argv[i]; ++i)
++	{
++	  if (endswith (ld_argv[i], "liblto_plugin.so"))
++	    {
++	      link_with_lto_plugin_before = true;
++	      for (j = i + 1; ld_argv[j]; ++j)
++		{
++		  if (!startswith (ld_argv[j], "-plugin-opt="))
++		    break;
++		}
++	      for (i = i - 1; ; ++i, ++j)
++		{
++		  ld_argv[i] = ld_argv[j];
++		  if (ld_argv[j] == NULL)
++		    break;
++		}
++	      break;
++	    }
++	}
++      int ret2 = 0;
++      if (link_with_lto_plugin_before)
++	{
++	  fprintf (stderr, "lto link fail, relinking without lto");
++	  lto_mode = LTO_MODE_NONE;
++	  pex = collect_execute (prog, ld_argv, NULL, NULL,
++				 PEX_LAST | PEX_SEARCH,
++				 HAVE_GNU_LD && at_file_supplied, atsuffix);
++	  ret2 = collect_wait (prog, pex);
++	}
++      else
++	  ret2 = ret;
++      if (ret2)
++	{
++	  error ("ld returned %d exit status", ret);
++	  exit (ret);
++	}
++      else
++	{
++	  /* We have just successfully produced an output file, so assume that
++	   we may unlink it if need be for now on.  */
++	  may_unlink_output_file = true;
++	}
++    }
++  else if (ret)
+     {
+       error ("ld returned %d exit status", ret);
+       exit (ret);
+@@ -1009,6 +1056,8 @@ main (int argc, char **argv)
+ 	  num_c_args++;
+ 	if (startswith (q, "-flto-partition=none"))
+ 	  no_partition = true;
++	else if (startswith (q, "-flto-try"))
++	  maybe_relink_without_lto = true;
+ 	else if (startswith (q, "-fno-lto"))
+ 	  lto_mode = LTO_MODE_NONE;
+ 	else if (startswith (q, "-save-temps"))
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 96888cf1b..0895c6114 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -70,6 +70,10 @@ bool flag_warn_unused_result = false
+ Variable
+ int flag_generate_lto
+ 
++; Nonzero if we should write GIMPLE bytecode for link-time optimization.
++Variable
++int flag_relink_whthout_lto = 0
++
+ ; Nonzero if we should write GIMPLE bytecode for offload compilation.
+ Variable
+ int flag_generate_offload = 0
+@@ -2161,6 +2165,10 @@ flto
+ Common
+ Enable link-time optimization.
+ 
++flto-try
++Common Var(flag_lto_try) Init(0)
++Do link-time optimization as much as possible.
++
+ flto=
+ Common RejectNegative Joined Var(flag_lto)
+ Link-time optimization with number of parallel jobs or jobserver.
+diff --git a/gcc/opts-common.cc b/gcc/opts-common.cc
+index 33c696f3d..176041bfe 100644
+--- a/gcc/opts-common.cc
++++ b/gcc/opts-common.cc
+@@ -1162,7 +1162,50 @@ decode_cmdline_options_to_array (unsigned int argc, const char **argv,
+   struct cl_decoded_option *opt_array;
+   unsigned int num_decoded_options;
+ 
+-  int opt_array_len = argc;
++  enum LTO_SKIP_STAT
++    {
++      NO_NEED_TO_SKIP,
++      NEED_TO_SKIP,
++      ALREADY_SKIP,
++    };
++  LTO_SKIP_STAT lto_skip_stat = NO_NEED_TO_SKIP;
++  bool try_use_lto = false;
++  const char* lto_option_conflict = NULL;
++  const char* wrap_option = "-Wl,--wrap=";
++  const char* start_lib_option = "-Wl,--start-lib";
++  for (i = 1; i < argc; i += 1)
++    {
++      if (startswith (argv[i], "-flto-try"))
++	{
++	  try_use_lto = true;
++	}
++
++      if (startswith (argv[i], wrap_option)
++      	  && (lto_skip_stat == NO_NEED_TO_SKIP))
++	{
++	  lto_option_conflict = wrap_option;
++	  lto_skip_stat = NEED_TO_SKIP;
++	}
++      else if (startswith (argv[i], start_lib_option)
++	       && (lto_skip_stat == NO_NEED_TO_SKIP))
++	{
++	  lto_option_conflict = start_lib_option;
++	  lto_skip_stat = NEED_TO_SKIP;
++	}
++      else if (startswith (argv[i], "-fno-lto"))
++	{
++	  lto_option_conflict = NULL;
++	  lto_skip_stat = ALREADY_SKIP;
++	  break;
++	}
++    }
++  if (!try_use_lto)
++    {
++      lto_skip_stat = NO_NEED_TO_SKIP;
++      lto_option_conflict = NULL;
++    }
++
++  int opt_array_len = lto_skip_stat == NEED_TO_SKIP ? argc + 1 : argc;
+   opt_array = XNEWVEC (struct cl_decoded_option, opt_array_len);
+ 
+   opt_array[0].opt_index = OPT_SPECIAL_program_name;
+@@ -1244,6 +1287,15 @@ decode_cmdline_options_to_array (unsigned int argc, const char **argv,
+   num_decoded_options += handle_machine_option (lang_mask, num_decoded_options,
+ 						argc, argv, opt_array);
+ 
++  if (lto_skip_stat == NEED_TO_SKIP)
++    {
++      const char * nolto = "-fno-lto";
++      fprintf (stderr, "skip lto for %s\n", lto_option_conflict);
++      decode_cmdline_option (&nolto, lang_mask,
++      			     &opt_array[num_decoded_options]);
++      num_decoded_options++;
++    }
++
+   *decoded_options = opt_array;
+   *decoded_options_count = num_decoded_options;
+   prune_options (decoded_options, decoded_options_count, lang_mask);
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 84dd8925a..9ccc22510 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -1143,6 +1143,26 @@ finish_options (struct gcc_options *opts, struct gcc_options *opts_set,
+       SET_OPTION_IF_UNSET (opts, opts_set, param_stack_frame_growth, 40);
+     }
+ 
++  if (opts->x_flag_lto_try)
++    {
++#ifdef ENABLE_LTO
++      if (opts_set->x_flag_lto && opts->x_flag_lto)
++	{
++	  inform (loc, "%<-flto-try%> don't guarantee that lto "
++	  	  "will be enabled.");
++	}
++      opts->x_flag_lto = "";
++      if (opts_set->x_flag_fat_lto_objects && !opts->x_flag_fat_lto_objects)
++	{
++	  error_at (loc, "%<-flto-try%> are not supported with "
++	  	    "-fno-fat-lto-objects");
++	}
++      opts->x_flag_fat_lto_objects = 1;
++#else
++      error_at (loc, "LTO support has not been enabled in this configuration");
++#endif
++    }
++
+   if (opts->x_flag_lto)
+     {
+ #ifdef ENABLE_LTO
+-- 
+2.33.0
+
diff --git a/0319-CSPGO-fix-bugs-when-using-cspgo.patch b/0319-CSPGO-fix-bugs-when-using-cspgo.patch
new file mode 100644
index 0000000000000000000000000000000000000000..bd86bb8dfe9ab989bb776e6a0f9f6e813ffe2aaf
--- /dev/null
+++ b/0319-CSPGO-fix-bugs-when-using-cspgo.patch
@@ -0,0 +1,140 @@
+From 610470b1892213afd4ddcf83862667c758724872 Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Wed, 4 Dec 2024 16:25:01 +0800
+Subject: [PATCH] [CSPGO] fix bugs when using cspgo
+
+---
+ gcc/opts.cc         | 36 ++++++++++++++++++++++++++----------
+ gcc/tree-profile.cc | 20 ++++++++++++++++++++
+ 2 files changed, 46 insertions(+), 10 deletions(-)
+
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 6ca9dde7e..2433ace06 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -34,6 +34,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "diagnostic-color.h"
+ #include "version.h"
+ #include "selftest.h"
++#include "ai4c-infer.h"
+ 
+ /* In this file all option sets are explicit.  */
+ #undef OPTION_SET_P
+@@ -3086,17 +3087,28 @@ common_handle_option (struct gcc_options *opts,
+       break;
+ 
+     case OPT_fcfgo_profile_use_:
++      opts->x_profile_data_prefix = xstrdup (arg);
++      opts->x_flag_profile_use = true;
++      value = true;
+       /* No break here - do -fcfgo-profile-use processing.  */
+       /* FALLTHRU */
+     case OPT_fcfgo_profile_use:
+-      value = true;
+-      if (value)
++      if (get_optimize_decision_from_ai4c ())
+ 	{
++	  value = true;
+ 	  enable_cfgo_optimizations (opts, opts_set, value);
+ 	  SET_OPTION_IF_UNSET (opts, opts_set, flag_cfgo_profile_use, value);
++	  /* Enable orig fdo optimizations.  */
++	  enable_fdo_optimizations (opts, opts_set, value);
++	  SET_OPTION_IF_UNSET (opts, opts_set, flag_profile_reorder_functions,
++			       value);
++	  /* Indirect call profiling should do all useful transformations
++	     speculative devirtualization does.  */
++	  if (opts->x_flag_value_profile_transformations)
++	    SET_OPTION_IF_UNSET (opts, opts_set, flag_devirtualize_speculatively,
++				 false);
+ 	}
+-      /* No break here - do -fprofile-use processing.  */
+-      /* FALLTHRU */
++      break;
+     case OPT_fprofile_use_:
+       opts->x_profile_data_prefix = xstrdup (arg);
+       opts->x_flag_profile_use = true;
+@@ -3116,10 +3128,10 @@ common_handle_option (struct gcc_options *opts,
+ 
+     case OPT_fcfgo_csprofile_use_:
+       opts->x_csprofile_data_prefix = xstrdup (arg);
+-      value = true;
+       /* No break here - do -fcfgo-csprofile-use processing.  */
+       /* FALLTHRU */
+     case OPT_fcfgo_csprofile_use:
++      value = get_optimize_decision_from_ai4c ();
+       SET_OPTION_IF_UNSET (opts, opts_set, flag_csprofile_use, value);
+       break;
+ 
+@@ -3155,18 +3167,22 @@ common_handle_option (struct gcc_options *opts,
+       break;
+ 
+     case OPT_fcfgo_profile_generate_:
++      opts->x_profile_data_prefix = xstrdup (arg);
++      value = true;
+       /* No break here - do -fcfgo-profile-generate processing.  */
+       /* FALLTHRU */
+     case OPT_fcfgo_profile_generate:
+-      value = true;
+-      if (value)
++      if (get_optimize_decision_from_ai4c ())
+ 	{
+ 	  enable_cfgo_optimizations (opts, opts_set, value);
+ 	  SET_OPTION_IF_UNSET (opts, opts_set, flag_cfgo_profile_generate,
+ 			       value);
+ 	}
+-      /* No break here - do -fprofile-generate processing.  */
+-      /* FALLTHRU */
++      SET_OPTION_IF_UNSET (opts, opts_set, profile_arc_flag, value);
++      SET_OPTION_IF_UNSET (opts, opts_set, flag_profile_values, value);
++      SET_OPTION_IF_UNSET (opts, opts_set, flag_inline_functions, value);
++      SET_OPTION_IF_UNSET (opts, opts_set, flag_ipa_bit_cp, value);
++      break;
+     case OPT_fprofile_generate_:
+       opts->x_profile_data_prefix = xstrdup (arg);
+       value = true;
+@@ -3181,10 +3197,10 @@ common_handle_option (struct gcc_options *opts,
+ 
+     case OPT_fcfgo_csprofile_generate_:
+       opts->x_csprofile_data_prefix = xstrdup (arg);
+-      value = true;
+       /* No break here - do -fcfgo-csprofile-generate processing.  */
+       /* FALLTHRU */
+     case OPT_fcfgo_csprofile_generate:
++      value = get_optimize_decision_from_ai4c ();
+       SET_OPTION_IF_UNSET (opts, opts_set, flag_csprofile_generate, value);
+       break;
+ 
+diff --git a/gcc/tree-profile.cc b/gcc/tree-profile.cc
+index aa3a2b3a9..ace1fe31c 100644
+--- a/gcc/tree-profile.cc
++++ b/gcc/tree-profile.cc
+@@ -1114,6 +1114,26 @@ public:
+      to do anything.  */
+   virtual unsigned int execute (function *)
+     {
++      if (!profile_data_prefix)
++	error ("profile_data_prefix must set when using cspgo.");
++
++      if (!csprofile_data_prefix)
++	error ("csprofile_data_prefix must set when using cspgo.");
++
++      if (!flag_cfgo_profile_use)
++	error ("cspgo must used with cfgo-pgo.");
++
++      /* Just compare canonical pathnames.  */
++      char* cfgo_pgo_path = lrealpath (profile_data_prefix);
++      char* cfgo_cspgo_path = lrealpath (csprofile_data_prefix);
++      bool files_differ = filename_cmp (cfgo_pgo_path, cfgo_cspgo_path);
++      if (!files_differ)
++	{
++	  error ("pgo and cspgo path must different between %s and %s",
++		 cfgo_pgo_path, cfgo_cspgo_path);
++	}
++      free (cfgo_pgo_path);
++      free (cfgo_cspgo_path);
+       return 0;
+     }
+ 
+-- 
+2.25.1
+
diff --git a/0320-if-split-fix-bugs.patch b/0320-if-split-fix-bugs.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c8b97ebe5ceb0eded7073abb31d029dcfc1cffc4
--- /dev/null
+++ b/0320-if-split-fix-bugs.patch
@@ -0,0 +1,105 @@
+From 7636e8782aa0dac322c22631c4cd0b60c0eb1842 Mon Sep 17 00:00:00 2001
+From: Zinin Ivan WX1305386 
+Date: Tue, 3 Dec 2024 16:02:15 +0300
+Subject: [PATCH] Fix bugs
+
+Added check if then_bb got single succ in process_complex_cond()
+
+Made processing of cases when then_bb is pred of EXIT and got
+return statement inside. Splitting of edge from EXIT pred to
+EXIT deletes return statement, and duplication of then_bb will
+got no return statement too. So in such cases we need to build
+return statement in merge_bb by ourselves.
+---
+ gcc/gimple-if-split.cc | 35 +++++++++++++++++++++++++++++++++--
+ 1 file changed, 33 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/gimple-if-split.cc b/gcc/gimple-if-split.cc
+index 3446204ea..351515435 100644
+--- a/gcc/gimple-if-split.cc
++++ b/gcc/gimple-if-split.cc
+@@ -38,6 +38,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "tree-cfg.h"
+ #include "bitmap.h"
+ #include "cfganal.h"
++#include "cfgloop.h"
+ 
+ /* Perform splitting if-then-else patterns, whose complex OR condition in
+ cond-bb contains comparison of some variable with constant and then-bb got
+@@ -255,6 +256,7 @@ process_complex_cond (basic_block cond_bb, basic_block then_bb,
+   cond_parts_defs defs;
+ 
+   if (!can_duplicate_block_p (then_bb)
++      || !single_succ_p (then_bb)
+       || !necessary_complex_cond_p (cond, then_bb, &defs))
+     return;
+ 
+@@ -345,14 +347,39 @@ static basic_block
+ make_two_separate_calls (basic_block outer_cond_bb, basic_block inner_cond_bb,
+ 			 basic_block then_bb)
+ {
+-  if (!can_duplicate_block_p (then_bb) || EDGE_COUNT (then_bb->succs) != 1)
++  if (!can_duplicate_block_p (then_bb) || !single_succ_p (then_bb))
+     return NULL;
+ 
+   edge outer_then_e = find_edge (outer_cond_bb, then_bb);
+ 
+   /* Making duplication of then_bb.  */
+   basic_block then_bb_dom = get_immediate_dominator (CDI_DOMINATORS, then_bb);
++
++  /* Saving ret_value and then_bb succ edge flags, if then_bb is pred of
++   * EXIT_BLOCK and has return statement inside.  */
++  tree ret_val;
++  int then_bb_succ_edge_flags;
++  if (single_succ (then_bb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
++    {
++      gcc_assert (gimple_code (last_stmt (then_bb)) == GIMPLE_RETURN);
++      ret_val = gimple_return_retval (as_a(last_stmt (then_bb)));
++
++      then_bb_succ_edge_flags = single_succ_edge (then_bb)->flags;
++    }
++
+   basic_block merge_bb = split_edge (single_succ_edge (then_bb));
++
++  /* Building return statement in merge_bb and setting merge_bb succ edge flags,
++   * if now merge_bb is pred of EXIT_BLOCK.  */
++  if (single_succ (merge_bb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
++    {
++      gimple* ret = gimple_build_return (ret_val);
++      gimple_stmt_iterator gsi = gsi_last_bb (merge_bb);
++      gsi_insert_after (&gsi, ret, GSI_NEW_STMT);
++
++      single_succ_edge (merge_bb)->flags = then_bb_succ_edge_flags;
++    }
++
+   basic_block then_bb1 = duplicate_block (then_bb, outer_then_e, outer_cond_bb);
+   edge outer_then1_e = find_edge (outer_cond_bb, then_bb1);
+ 
+@@ -372,6 +399,9 @@ make_two_separate_calls (basic_block outer_cond_bb, basic_block inner_cond_bb,
+   set_immediate_dominator (CDI_POST_DOMINATORS, merge_bb,
+ 			   single_succ (merge_bb));
+ 
++  if (get_immediate_dominator (CDI_POST_DOMINATORS, outer_cond_bb) == then_bb)
++     set_immediate_dominator (CDI_POST_DOMINATORS, outer_cond_bb, merge_bb);
++
+   return then_bb1;
+ }
+ 
+@@ -548,6 +578,7 @@ pass_if_split::execute (function *fun)
+ 
+   checking_verify_ssa (true, true);
+   checking_verify_flow_info ();
++  checking_verify_loop_structure ();
+   checking_verify_dominators (CDI_DOMINATORS);
+   checking_verify_dominators (CDI_POST_DOMINATORS);
+ 
+@@ -564,4 +595,4 @@ gimple_opt_pass *
+ make_pass_if_split (gcc::context *ctxt)
+ {
+   return new pass_if_split (ctxt);
+-}
+\ No newline at end of file
++}
+-- 
+2.33.0
+
diff --git a/0321-Struct-reorg-Avoid-doing-struct-split-and-reorder_fi.patch b/0321-Struct-reorg-Avoid-doing-struct-split-and-reorder_fi.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8f94975d834742ea99ca6882f3bc20106dfec51c
--- /dev/null
+++ b/0321-Struct-reorg-Avoid-doing-struct-split-and-reorder_fi.patch
@@ -0,0 +1,192 @@
+From a96315832872aae9af8ff3f81100b21e82c94072 Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Sat, 7 Dec 2024 16:27:28 +0800
+Subject: [PATCH 1/2] [Struct-reorg] Avoid doing struct split and
+ reorder_fields together
+
+Rewrite between struct_split and reorder_fields is
+incompatible, so avoid doing them together.
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      |   7 +-
+ .../struct/rf_rewrite_problem_with_split.c    | 134 ++++++++++++++++++
+ gcc/testsuite/gcc.dg/struct/struct-reorg.exp  |   4 +
+ 3 files changed, 143 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/rf_rewrite_problem_with_split.c
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index b93b8a5b5..af91f15c5 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -8646,8 +8646,11 @@ public:
+ 
+     if (level >= COMPLETE_STRUCT_RELAYOUT)
+       {
+-	/* Preserved for backward compatibility.  */
+-	ret_reorg = ipa_struct_reorg ().execute (STRUCT_SPLIT);
++	/* Preserved for backward compatibility.
++	   Rewrite between STRUCT_REORDER_FIELDS and STRUCT_SPLIT has unfixed
++	   problem, so avoid using them together.  */
++	if (!ret)
++	  ret_reorg = ipa_struct_reorg ().execute (STRUCT_SPLIT);
+ 	if (!ret_reorg)
+ 	  ret_reorg = ipa_struct_reorg ().execute (COMPLETE_STRUCT_RELAYOUT);
+       }
+diff --git a/gcc/testsuite/gcc.dg/struct/rf_rewrite_problem_with_split.c b/gcc/testsuite/gcc.dg/struct/rf_rewrite_problem_with_split.c
+new file mode 100644
+index 000000000..da357ec18
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/rf_rewrite_problem_with_split.c
+@@ -0,0 +1,134 @@
++/* { dg-do run } */
++#include "stdio.h"
++#include "stdlib.h"
++#include "time.h"
++#include "string.h"
++#include "limits.h"
++#include "float.h"
++#define JOTAI_NUM_RANDS_ 25
++const unsigned rand_primes[JOTAI_NUM_RANDS_] = {179, 103, 479, 647, 229, 37,
++271, 557, 263, 607, 18743, 50359, 21929, 48757, 98179, 12907, 52937, 64579,
++49957, 52567, 507163, 149939, 412157, 680861, 757751};
++int next_i() {
++  int counter = 0;
++  return rand_primes[(++counter)%JOTAI_NUM_RANDS_];
++}
++typedef unsigned long size_t;  // Customize by platform.
++typedef long intptr_t; typedef unsigned long uintptr_t;
++typedef long scalar_t__;  // Either arithmetic or pointer type.
++typedef int bool;
++#define false 0
++#define true 1
++typedef  struct TYPE_2__   TYPE_1__;
++struct pci_dev {int devfn; TYPE_1__* sriov; int /*<<< orphan*/  is_physfn; };
++struct TYPE_2__ {int offset; int stride; } ;
++int EINVAL ;
++int pci_iov_virtfn_devfn(struct pci_dev *dev, int vf_id)
++{
++  if (!dev->is_physfn)
++    return -EINVAL;
++  return (dev->devfn + dev->sriov->offset +
++    dev->sriov->stride * vf_id) & 0xff;
++}
++int main(int argc, char *argv[]) {
++  int opt = 1;
++  switch(opt) {
++  case 0:
++  {
++    int vf_id = 100;
++    int _len_dev0 = 1;
++    struct pci_dev * dev =
++      (struct pci_dev *) malloc(_len_dev0*sizeof(struct pci_dev));
++    for(int _i0 = 0; _i0 < _len_dev0; _i0++) {
++      dev[_i0].devfn = ((-2 * (next_i()%2)) + 1) * next_i();
++      int _len_dev__i0__sriov0 = 1;
++      dev[_i0].sriov =
++      (struct TYPE_2__ *) malloc(_len_dev__i0__sriov0*sizeof(struct TYPE_2__));
++      for(int _j0 = 0; _j0 < _len_dev__i0__sriov0; _j0++) {
++	dev[_i0].sriov->offset = ((-2 * (next_i()%2)) + 1) * next_i();
++	dev[_i0].sriov->stride = ((-2 * (next_i()%2)) + 1) * next_i();
++      }
++      dev[_i0].is_physfn = ((-2 * (next_i()%2)) + 1) * next_i();
++    }
++    int benchRet = pci_iov_virtfn_devfn(dev,vf_id);
++    printf("%d\n", benchRet); 
++    for(int _aux = 0; _aux < _len_dev0; _aux++) {
++      free(dev[_aux].sriov);
++    }
++    free(dev);
++    break;
++  }
++  case 1:
++  {
++    int vf_id = 255;
++    int _len_dev0 = 65025;
++    struct pci_dev * dev = (struct pci_dev *) malloc(_len_dev0*sizeof(struct pci_dev));
++    for(int _i0 = 0; _i0 < _len_dev0; _i0++) {
++      dev[_i0].devfn = ((-2 * (next_i()%2)) + 1) * next_i();
++      int _len_dev__i0__sriov0 = 1;
++      dev[_i0].sriov = (struct TYPE_2__ *) malloc(_len_dev__i0__sriov0*sizeof(struct TYPE_2__));
++      for(int _j0 = 0; _j0 < _len_dev__i0__sriov0; _j0++) {
++	dev[_i0].sriov->offset = ((-2 * (next_i()%2)) + 1) * next_i();
++      dev[_i0].sriov->stride = ((-2 * (next_i()%2)) + 1) * next_i();
++      }
++      dev[_i0].is_physfn = ((-2 * (next_i()%2)) + 1) * next_i();
++    }
++    int benchRet = pci_iov_virtfn_devfn(dev,vf_id);
++    printf("%d\n", benchRet); 
++    for(int _aux = 0; _aux < _len_dev0; _aux++) {
++      free(dev[_aux].sriov);
++    }
++    free(dev);
++    break;
++  }
++  case 2:
++  {
++    int vf_id = 10;
++    int _len_dev0 = 100;
++    struct pci_dev * dev = (struct pci_dev *) malloc(_len_dev0*sizeof(struct pci_dev));
++    for(int _i0 = 0; _i0 < _len_dev0; _i0++) {
++      dev[_i0].devfn = ((-2 * (next_i()%2)) + 1) * next_i();
++      int _len_dev__i0__sriov0 = 1;
++      dev[_i0].sriov = (struct TYPE_2__ *) malloc(_len_dev__i0__sriov0*sizeof(struct TYPE_2__));
++      for(int _j0 = 0; _j0 < _len_dev__i0__sriov0; _j0++) {
++	dev[_i0].sriov->offset = ((-2 * (next_i()%2)) + 1) * next_i();
++	dev[_i0].sriov->stride = ((-2 * (next_i()%2)) + 1) * next_i();
++      }
++      dev[_i0].is_physfn = ((-2 * (next_i()%2)) + 1) * next_i();
++    }
++    int benchRet = pci_iov_virtfn_devfn(dev,vf_id);
++    printf("%d\n", benchRet); 
++    for(int _aux = 0; _aux < _len_dev0; _aux++) {
++      free(dev[_aux].sriov);
++    }
++    free(dev);
++    break;
++  }
++  case 3:
++  {
++    int vf_id = ((-2 * (next_i()%2)) + 1) * next_i();
++    int _len_dev0 = 1;
++    struct pci_dev * dev = (struct pci_dev *) malloc(_len_dev0*sizeof(struct pci_dev));
++    for(int _i0 = 0; _i0 < _len_dev0; _i0++) {
++      dev[_i0].devfn = ((-2 * (next_i()%2)) + 1) * next_i();
++      int _len_dev__i0__sriov0 = 1;
++      dev[_i0].sriov = (struct TYPE_2__ *) malloc(_len_dev__i0__sriov0*sizeof(struct TYPE_2__));
++      for(int _j0 = 0; _j0 < _len_dev__i0__sriov0; _j0++) {
++	dev[_i0].sriov->offset = ((-2 * (next_i()%2)) + 1) * next_i();
++	dev[_i0].sriov->stride = ((-2 * (next_i()%2)) + 1) * next_i();
++      }
++      dev[_i0].is_physfn = ((-2 * (next_i()%2)) + 1) * next_i();
++    }
++    int benchRet = pci_iov_virtfn_devfn(dev,vf_id);
++    printf("%d\n", benchRet); 
++    for(int _aux = 0; _aux < _len_dev0; _aux++) {
++      free(dev[_aux].sriov);
++    }
++    free(dev);
++    break;
++  }
++  default:
++    break;
++  }
++  return 0;
++}
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+index 687f6609f..1ef26229a 100644
+--- a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+@@ -43,6 +43,10 @@ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/csr_*.c]] \
+ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/rf_*.c]] \
+ 	"" "-fipa-reorder-fields -fdump-ipa-all -flto-partition=one -fwhole-program"
+ 
++# -fipa-struct-reorg=2
++gcc-dg-runtest $srcdir/$subdir/rf_rewrite_problem_with_split.c \
++	"" "-fipa-struct-reorg=2 -fdump-ipa-all -flto-partition=one -fwhole-program"
++
+ # -fipa-struct-reorg=3
+ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/dfe*.c]] \
+ 	"" "-fipa-struct-reorg=3 -fdump-ipa-all -flto-partition=one -fwhole-program"
+-- 
+2.25.1
+
diff --git a/0322-Bugfix-Create-POINTER_PLUS_EXPR-for-REFERENCE_TYPE.patch b/0322-Bugfix-Create-POINTER_PLUS_EXPR-for-REFERENCE_TYPE.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b832c901c8f8a6a96054b4024f5aacebff600c31
--- /dev/null
+++ b/0322-Bugfix-Create-POINTER_PLUS_EXPR-for-REFERENCE_TYPE.patch
@@ -0,0 +1,26 @@
+From 2c98e0ecaf06bd1ab4b77c85fea6098bfa0b201b Mon Sep 17 00:00:00 2001
+From: Generalov Vasilii WX1339879 
+Date: Wed, 4 Dec 2024 14:41:24 +0300
+Subject: [PATCH 2/2] [Bugfix] Create POINTER_PLUS_EXPR for REFERENCE_TYPE
+
+---
+ gcc/ipa-prefetch.cc | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/ipa-prefetch.cc b/gcc/ipa-prefetch.cc
+index 685f9c267..220287d66 100644
+--- a/gcc/ipa-prefetch.cc
++++ b/gcc/ipa-prefetch.cc
+@@ -2171,7 +2171,8 @@ optimize_function (cgraph_node *n, function *fn)
+     }
+   tree var_type = TREE_TYPE (inc_var);
+   enum tree_code inc_code;
+-  if (TREE_CODE (var_type) == POINTER_TYPE)
++  enum tree_code var_code = TREE_CODE (var_type);
++  if (var_code == POINTER_TYPE || var_code == REFERENCE_TYPE)
+     inc_code = POINTER_PLUS_EXPR;
+   else
+     inc_code = PLUS_EXPR;
+-- 
+2.25.1
+
diff --git a/0323-Bugfix-replace-tmp-pattern-split.patch b/0323-Bugfix-replace-tmp-pattern-split.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2a6d10eb94843f9b2c4f26b45cf4d6be4f976a28
--- /dev/null
+++ b/0323-Bugfix-replace-tmp-pattern-split.patch
@@ -0,0 +1,50 @@
+From d10807504a7f4e58a7dd1fa245d0ccf16227d222 Mon Sep 17 00:00:00 2001
+From: Chernonog Viacheslav 
+Date: Wed, 4 Dec 2024 20:07:23 +0800
+Subject: [PATCH] [Bugfix] replace tmp pattern split
+
+move split before reload
+change split tmp pattern to 3 instructions
+---
+ gcc/config/aarch64/aarch64-simd.md | 19 +++++++++++++------
+ 1 file changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
+index 04592fc90..fb5e355d0 100644
+--- a/gcc/config/aarch64/aarch64-simd.md
++++ b/gcc/config/aarch64/aarch64-simd.md
+@@ -6566,18 +6566,25 @@
+ 	    (match_operand:VDQHSD 1 "register_operand" "w")
+ 	    (match_operand:VDQHSD 2 "half_size_minus_one_operand"))
+ 	  (match_operand:VDQHSD 3 "cmlt_arith_mask_operand")))]
+-  "TARGET_SIMD && flag_cmlt_arith"
++  "TARGET_SIMD && !reload_completed && flag_cmlt_arith"
+   "#"
+-  "&& reload_completed"
+-  [(set (match_operand: 0 "register_operand")
++  "&& true"
++  [(set (match_operand: 0 "register_operand" "=w")
+ 	(lshiftrt:
+ 	  (match_operand:VDQHSD 1 "register_operand")
+ 	  (match_operand:VDQHSD 2 "half_size_minus_one_operand")))
++   (set (match_operand: 4 "register_operand" "w")
++	  (match_operand:VDQHSD 3 "cmlt_arith_mask_operand"))
+    (set (match_dup 0)
+ 	(and:
+-	  (match_dup 0)
+-	  (match_operand:VDQHSD 3 "cmlt_arith_mask_operand")))]
+-  ""
++	  (match_dup 4)
++	  (match_dup 0)))]
++  {
++    if (can_create_pseudo_p ())
++      operands[4] = gen_reg_rtx (mode);
++    else
++      FAIL;
++  }
+   [(set_attr "type" "neon_compare_zero")]
+ )
+ 
+-- 
+2.33.0
+
diff --git a/0324-bugfix-fix-vector-costs-for-hip09.patch b/0324-bugfix-fix-vector-costs-for-hip09.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9483a585d20c6cbd33b68dc5cf1e728352c05143
--- /dev/null
+++ b/0324-bugfix-fix-vector-costs-for-hip09.patch
@@ -0,0 +1,52 @@
+From 8f5c12954adb237685c837cb37c98b7594e9fa61 Mon Sep 17 00:00:00 2001
+From: Mingchuan Wu 
+Date: Tue, 10 Dec 2024 15:50:16 +0800
+Subject: [PATCH] [bugfix] fix vector costs for hip09.
+
+---
+ gcc/config/aarch64/aarch64-cost-tables.h | 6 +++++-
+ gcc/config/aarch64/aarch64.cc            | 4 +++-
+ 2 files changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64-cost-tables.h b/gcc/config/aarch64/aarch64-cost-tables.h
+index dc51d9c2c..06da1b271 100644
+--- a/gcc/config/aarch64/aarch64-cost-tables.h
++++ b/gcc/config/aarch64/aarch64-cost-tables.h
+@@ -872,7 +872,11 @@ const struct cpu_cost_table hip09_extra_costs =
+   },
+   /* Vector */
+   {
+-    COSTS_N_INSNS (1)  /* alu.  */
++    COSTS_N_INSNS (1),  /* alu.  */
++    COSTS_N_INSNS (4),  /* mult.  */
++    COSTS_N_INSNS (1),  /* movi.  */
++    COSTS_N_INSNS (2),  /* dup.  */
++    COSTS_N_INSNS (2)   /* extract.  */
+   }
+ };
+ 
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 829e0da8f..f2444a039 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -499,6 +499,8 @@ static const struct cpu_addrcost_table hip09_addrcost_table =
+     },
+   0, /* pre_modify  */
+   0, /* post_modify  */
++  0, /* post_modify_ld3_st3  */
++  0, /* post_modify_ld4_st4  */
+   0, /* register_offset  */
+   1, /* register_sextend  */
+   1, /* register_zextend  */
+@@ -1910,7 +1912,7 @@ static const struct tune_params hip09_tunings =
+   &hip09_extra_costs,
+   &hip09_addrcost_table,
+   &hip09_regmove_cost,
+-  &hip09_vector_cost,
++  &generic_vector_cost,
+   &generic_branch_cost,
+   &generic_approx_modes,
+   SVE_256, /* sve_width  */
+-- 
+2.33.0
+
diff --git a/0325-gcc-opts-common.cc-Fix-build-with-clang.patch b/0325-gcc-opts-common.cc-Fix-build-with-clang.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b862bf6f8d180d1b5d9b269a08bcb7258ba42c03
--- /dev/null
+++ b/0325-gcc-opts-common.cc-Fix-build-with-clang.patch
@@ -0,0 +1,54 @@
+From 14457b169e1e4cb372d165de3bbdde391e8b817f Mon Sep 17 00:00:00 2001
+From: YunQiang Su 
+Date: Tue, 8 Oct 2024 18:04:01 +0800
+Subject: [PATCH] gcc/opts-common.cc: Fix build with clang
+
+1. For putenv ("AI_INFER_LEVEL=1"), clang complains that C++11 deprecates
+convert string literal to char *, while putenv expcets "char *".
+Let's use setenv, which expects "const char *".
+
+2. Ditto for char *lan in handle_lto_option.
+
+3. In `handle_machine_option`, there is a variable length array,
+     int64_t argv_hw[argc_hw]
+   clang complains about it, and in fact, argc_hw can be an const var.
+---
+ gcc/opts-common.cc | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/opts-common.cc b/gcc/opts-common.cc
+index 12c3f7299..33c696f3d 100644
+--- a/gcc/opts-common.cc
++++ b/gcc/opts-common.cc
+@@ -1053,7 +1053,7 @@ ai_infer_optimization (int argc, const char **argv,
+     dlclose (onnxruntime_lib_handle);
+ 
+   if (model_pred == 1)
+-    putenv ("AI_INFER_LEVEL=1");
++    setenv ("AI_INFER_LEVEL", "1", 1);
+   return model_pred;
+ }
+ 
+@@ -1065,9 +1065,8 @@ handle_lto_option (unsigned int lang_mask,
+ 		   struct cl_decoded_option *&opt_array)
+ {
+   int ret = 0;
+-  char *lan = "";
+   char *compiler = xstrdup (argv[0]);
+-  lan = strrchr (compiler, '/');
++  char *lan = strrchr (compiler, '/');
+   if (lan != NULL)
+     lan ++;
+   else
+@@ -1125,7 +1124,7 @@ handle_machine_option (unsigned int lang_mask,
+     {
+       return ret;
+     }
+-  int argc_hw = 6;
++  const int argc_hw = 6;
+   int64_t argv_hw[argc_hw] = {
+     global_options.x_param_simultaneous_prefetches,
+     global_options.x_param_l1_cache_size,
+-- 
+2.33.0
+
diff --git a/0326-BUGFIX-Fix-build-error-on-risv_64.patch b/0326-BUGFIX-Fix-build-error-on-risv_64.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f37493c9e9ec50686aebd03f9c85fb8ad321dc5e
--- /dev/null
+++ b/0326-BUGFIX-Fix-build-error-on-risv_64.patch
@@ -0,0 +1,1005 @@
+From 19a1074e87577f9b511f382569ac081871e84147 Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao_admin 
+Date: Sat, 7 Dec 2024 16:05:59 +0800
+Subject: [PATCH] [BUGFIX] Fix build error on risv_64.
+
+---
+ gcc/Makefile.in                 |  10 +-
+ gcc/ai-optimizer.cc             | 395 ++++++++++++++++++++++++++++++++
+ gcc/ai4c-infer.cc               |  46 ++--
+ gcc/ai4c-infer.h                |  26 ++-
+ gcc/c-family/c-common.h         |   2 +
+ gcc/c-family/c-opts.cc          |  21 ++
+ gcc/config/aarch64/aarch64-c.cc |  16 ++
+ gcc/config/aarch64/aarch64.cc   |  15 +-
+ gcc/gcc.cc                      |  39 +---
+ gcc/gcc.h                       |   1 -
+ gcc/optimizer.fdata             |   1 +
+ gcc/opts-common.cc              |  74 +-----
+ gcc/opts-global.cc              |   5 +-
+ 13 files changed, 521 insertions(+), 130 deletions(-)
+ create mode 100644 gcc/ai-optimizer.cc
+ create mode 100644 gcc/optimizer.fdata
+
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index 5610854e6..65f683bbd 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -1735,13 +1735,13 @@ OBJS-libcommon = diagnostic-spec.o diagnostic.o diagnostic-color.o \
+ 	pretty-print.o intl.o \
+ 	sbitmap.o \
+ 	vec.o input.o hash-table.o ggc-none.o memory-block.o \
+-	ai4c-infer.o selftest.o selftest-diagnostic.o sort.o
++	ai4c-infer.o ai-optimizer.o selftest.o selftest-diagnostic.o sort.o
+ 
+ # Objects in libcommon-target.a, used by drivers and by the core
+ # compiler and containing target-dependent code.
+ OBJS-libcommon-target = $(common_out_object_file) prefix.o \
+ 	opts.o opts-common.o options.o vec.o hooks.o common/common-targhooks.o \
+-	hash-table.o file-find.o spellcheck.o ai4c-infer.o selftest.o opt-suggestions.o
++	hash-table.o file-find.o spellcheck.o ai4c-infer.o ai-optimizer.o selftest.o opt-suggestions.o
+ 
+ # This lists all host objects for the front ends.
+ ALL_HOST_FRONTEND_OBJS = $(foreach v,$(CONFIG_LANGUAGES),$($(v)_OBJS))
+@@ -2257,7 +2257,7 @@ gcc-nm.cc: gcc-ar.cc
+ 	cp $^ $@
+ 
+ COLLECT2_OBJS = collect2.o collect2-aix.o vec.o ggc-none.o \
+-  collect-utils.o file-find.o hash-table.o ai4c-infer.o selftest.o
++  collect-utils.o file-find.o hash-table.o ai4c-infer.o ai-optimizer.o selftest.o
+ COLLECT2_LIBS = @COLLECT2_LIBS@
+ collect2$(exeext): $(COLLECT2_OBJS) $(LIBDEPS)
+ # Don't try modifying collect2 (aka ld) in place--it might be linking this.
+@@ -3721,8 +3721,8 @@ install-plugin: installdirs lang.install-plugin s-header-vars install-gengtype
+ 
+ # Install the compiler executables built during cross compilation.
+ install-common: native lang.install-common installdirs
+-	rm -f $(DESTDIR)$(libexecdir)/onnx.fdata
+-	cp $(srcdir)/onnx.fdata $(DESTDIR)$(libexecsubdir)/onnx.fdata
++	rm -f $(DESTDIR)$(libexecdir)/gcc/*.fdata
++	cp $(srcdir)/*.fdata $(DESTDIR)$(libexecdir)/gcc/
+ 	for file in $(COMPILERS); do \
+ 	  if [ -f $$file ] ; then \
+ 	    rm -f $(DESTDIR)$(libexecsubdir)/$$file; \
+diff --git a/gcc/ai-optimizer.cc b/gcc/ai-optimizer.cc
+new file mode 100644
+index 000000000..c3d99dd85
+--- /dev/null
++++ b/gcc/ai-optimizer.cc
+@@ -0,0 +1,395 @@
++/* Lightweight AI Inference Framework.
++   Copyright (C) 2024-2024 Free Software Foundation, Inc.
++This file is part of GCC.
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++#include "config.h"
++#include "system.h"
++#include "ai4c-infer.h"
++
++#define M_OPTION_SIZE  11
++#define M_MODE_SIZE  6
++#define NATIVE_TUNE_SIZE  128
++#define CATS_STRINGS_ROW  34
++#define CATS_STRINGS_COL  65
++#define CATS_STRINGS1_ROW  10
++#define CATS_STRINGS1_COL  65
++#define OFFSET_ROW  6
++#define SCALE_ROW  6
++#define UNITY_ROW  1
++#define COEFFICIENT_ROW  356
++#define COEFFICIENT_COL  10
++#define COEFFICIENT1_ROW  10
++#define COEFFICIENT1_COL  1
++#define INTERCEPTS_ROW  10
++#define INTERCEPTS1_ROW  1
++
++/* Intermediate computation results from the ONNX model.  */
++static char cats_strings[CATS_STRINGS_ROW][CATS_STRINGS_COL];
++static char cats_strings1[CATS_STRINGS1_ROW][CATS_STRINGS1_COL];
++static float offset[OFFSET_ROW];
++static float scale[SCALE_ROW];
++static float unity[UNITY_ROW];
++static float coefficient[COEFFICIENT_ROW][COEFFICIENT_COL];
++static float coefficient1[COEFFICIENT1_ROW][COEFFICIENT1_COL];
++static float intercepts[INTERCEPTS_ROW];
++static float intercepts1[INTERCEPTS1_ROW];
++
++/* Return an integer that represents the comparison result of the
++   two strings.  */
++
++static int
++compare_strings (const void *a, const void *b)
++{
++  const char *str_a = *(const char **)a;
++  const char *str_b = *(const char **)b;
++
++  int len = strlen (str_a) < strlen (str_b) ? strlen (str_a) : strlen (str_b);
++  for (int i = 0; i < len; i++)
++    {
++      char c1 = str_a[i];
++      char c2 = str_b[i];
++      if (ISUPPER (c1) && !ISUPPER (c2))
++	return 0;
++      else if (!ISUPPER (c1) && ISUPPER (c2))
++	return 1;
++      else if (c1 != c2)
++	return c1 < c2;
++    }
++  return strlen (str_a) > strlen (str_b);
++}
++
++/* Return the substring before the first underscore ('_') in the input
++   string.  */
++
++static void
++truncate_prefix (const char *str, char *result)
++{
++  const char *underscore_pos = strchr (str, '_');
++  if (underscore_pos == NULL)
++    {
++      strcpy (result, str);
++      return;
++    }
++
++  size_t len = underscore_pos - str;
++  strncpy (result, str, len + 1);
++  result[len + 1] = '\0';
++}
++
++
++static void
++preprocess (int argc1, const char **argv1, const char *mops,
++	    int argc2, int64_t *argv2, char (*in_options)[1024],
++	    int64_t *in_modes)
++{
++  strcpy (in_options[0], mops);
++
++  const char *output_option = "-o";
++  const char *marco_prefix = "-D";
++  const char *needle = "--param";
++  const char *flag_prefix = "-";
++  const char *default_option = "-default-option";
++  const int default_int_val = 0;
++  int m_size = 0;
++  for (int i = 0; i < argc1; i++)
++    {
++      if (strncmp (argv1[i], marco_prefix, 2) == 0)
++	m_size ++;
++    }
++
++  char *m_options[m_size];
++  char output_file[1024];
++  int m_index = 0;
++  for (int i = 0; i < argc1; i++)
++    {
++      if (strncmp (argv1[i], marco_prefix, 2) == 0)
++	{
++	  m_options[m_index] = (char *)argv1[i];
++	  m_index ++;
++	}
++      if (strcmp (argv1[i], output_option) == 0)
++	truncate_prefix (argv1[i + 1], output_file);
++    }
++
++  strcpy (in_options[1], output_file);
++  int in_options_size = 2;
++  qsort (m_options, m_size, sizeof (m_options[0]), compare_strings);
++  for (int i = 0; i < m_size && in_options_size < M_OPTION_SIZE; i++)
++    {
++      strcpy (in_options[in_options_size], m_options[i]);
++      in_options_size ++;
++    }
++
++  for (int i = 0; i < argc1 && in_options_size < M_OPTION_SIZE; i++)
++    {
++      if (strncmp (argv1[i], marco_prefix, 2) != 0
++	  && strcmp (argv1[i], output_option) != 0
++	  && strncmp (argv1[i], needle, 7) != 0
++	  && strncmp (argv1[i], flag_prefix, 1) == 0)
++	{
++	  strcpy (in_options[in_options_size], argv1[i]);
++	  in_options_size ++;
++	}
++    }
++
++  while (in_options_size < M_OPTION_SIZE)
++    {
++      strcpy (in_options[in_options_size], default_option);
++      in_options_size ++;
++    }
++
++  /* Use sha256 to encrypt the input.  */
++  char hash[65];
++  char input[64];
++  for (int i = 0; i < M_OPTION_SIZE; i++)
++    {
++      execute_sha256 (in_options[i], hash, sizeof (hash));
++      strcpy (in_options[i], hash);
++    }
++
++  for (int i = 0; i < argc2 && i < M_MODE_SIZE; i++)
++    {
++      if (i < argc2)
++	in_modes[i] = argv2[i];
++      else
++	in_modes[i] = default_int_val;
++    }
++}
++
++/* To read model parameter information from optimizer.fdata and store it into
++   the appropriate arrays.  */
++
++static void
++fill_node (const char *file_name)
++{
++  FILE *file = fopen (file_name, "rb");
++
++  if (!file)
++    {
++      perror ("Can not open file.");
++      return;
++    }
++
++   /* Read cats_strings from optimizer.fdata.  */
++  char hex_string[2];
++  for (int i = 0; i < CATS_STRINGS_ROW; i++)
++    {
++      for (int j = 0; j < CATS_STRINGS_COL - 1; j++)
++	{
++	  if (fscanf (file, "%2s", hex_string) != 1)
++	    {
++	      perror ("Can not read cats_strings from optimizer.fdata.");
++	      return;
++	    }
++	  cats_strings[i][j] = (unsigned char) strtol(hex_string, NULL, 16);
++	}
++      cats_strings[i][CATS_STRINGS_COL - 1] = '\0';
++    }
++
++  /* Read cats_strings1 from optimizer.fdata.  */
++  for (int i = 0; i < CATS_STRINGS1_ROW; i++)
++    {
++      for (int j = 0; j < CATS_STRINGS1_COL - 1; j++)
++	{
++	  if (fscanf (file, "%2s", hex_string) != 1)
++	    {
++	      perror ("Can not read cats_strings1 from optimizer.fdata.");
++	      return;
++	    }
++	  cats_strings1[i][j] = (unsigned char) strtol(hex_string, NULL, 16);
++	}
++      cats_strings1[i][CATS_STRINGS1_COL - 1] = '\0';
++    }
++
++  /* Read offset from optimizer.fdata.  */
++  for (int i = 0; i < OFFSET_ROW; i++)
++    {
++      float result = read_float_from_file (file);
++      offset[i] = result;
++    }
++
++  
++  /* Read scale from optimizer.fdata.  */
++  for (int i = 0; i < SCALE_ROW; i++)
++    {
++      float result = read_float_from_file (file);
++      scale[i] = result;
++    }
++
++  /* Read unity from optimizer.fdata.  */
++  for (int i = 0; i < UNITY_ROW; i++)
++    {
++      float result = read_float_from_file (file);
++      unity[i] = result;
++    }
++
++  /* Read coefficient from optimizer.fdata.  */
++  for (int i = 0; i < COEFFICIENT_ROW; i++)
++    for (int j = 0; j < COEFFICIENT_COL; j++)
++      {
++	float result = read_float_from_file (file);
++	coefficient[i][j] = result;
++      }
++
++  /* Read coefficient1 from optimizer.fdata.  */
++  for (int i = 0; i < COEFFICIENT1_ROW; i++)
++    for (int j = 0; j < COEFFICIENT1_COL; j++)
++      {
++	float result = read_float_from_file (file);
++	coefficient1[i][j] = result;
++      }
++
++  /* Read intercepts from optimizer.fdata.  */
++  for (int i = 0; i < INTERCEPTS_ROW; i++)
++    {
++      float result = read_float_from_file (file);
++      intercepts[i] = result;
++    }
++
++  /* Read intercepts1 from optimizer.fdata.  */
++  for (int i = 0; i < INTERCEPTS1_ROW; i++)
++    {
++      float result = read_float_from_file (file);
++      intercepts1[i] = result;
++    }
++
++  fclose (file);
++  return;
++}
++
++/* The process of model inference.  */
++
++static int
++graph_infer (int argc1, const char **argv1, const char *mops,
++             int argc2, int64_t *argv2)
++{
++  char *gcc_exec_prefix = getenv ("ONNX_FDATA_PATH");
++  if (gcc_exec_prefix == NULL)
++    return 0;
++  char native_file[512];
++
++  if (gcc_exec_prefix)
++    {
++      const char *onnx_fdata = "optimizer.fdata";
++      strncpy (native_file, gcc_exec_prefix, sizeof (native_file) - 1);
++      native_file[sizeof (native_file) - 1] = '\0';
++      char *last_slash = strrchr (native_file, '/');
++      if (last_slash)
++	strcpy (last_slash + 1, onnx_fdata);
++    }
++
++  if (access (native_file, F_OK) == 0)
++    fill_node (native_file);
++  else
++    return 0;
++
++  static int64_t in_modes[M_MODE_SIZE];
++  static char in_options[M_OPTION_SIZE][1024];
++
++  preprocess (argc1, argv1, mops, argc2, argv2, in_options, in_modes);
++
++  /* concat_result and encoder_out are intermediate computation results from
++     the ONNX model. concat_result is a 1 × 18 matrix, and encoder_out is a
++     1 × 12 matrix.  */
++
++  const int concat_out_size = 350;
++  float concat_result[concat_out_size];
++  const int encoder_out_size = 34;
++  const int encoder_last_size = 10;
++  int concat_size = 0;
++  const int size = encoder_out_size;
++
++  for (int i = 1; i < M_OPTION_SIZE; i++)
++    {
++      float encoder_out[size];
++      one_hot_encoder (in_options[i], cats_strings, encoder_out, size);
++      line_concat (encoder_out, size, concat_result, concat_size);
++      concat_size += size;
++    }
++
++  float encoder_out2[encoder_last_size];
++  one_hot_encoder (in_options[0], cats_strings1, encoder_out2,
++		   encoder_last_size);
++  line_concat (encoder_out2, encoder_last_size, concat_result, concat_size);
++  concat_size += encoder_last_size;
++
++  float variable[M_MODE_SIZE];
++  imputer (in_modes, M_MODE_SIZE, variable);
++  float variable1[M_MODE_SIZE];
++  scaler (variable, offset, scale, M_MODE_SIZE, variable1);
++
++  float transformed_column[concat_out_size + M_MODE_SIZE];
++  /* line_concat is used to stro*/
++  line_concat (variable1, M_MODE_SIZE, transformed_column, 0);
++  line_concat (concat_result, concat_out_size, transformed_column, 6);
++
++  /* This requires performing matrix multiplication between a 1 × 356 matrix
++     and an 356 × 10 matrix  */
++
++  const int m = 1, k = 356, n = 10;
++  float mul_result[n];
++  matmul (transformed_column, coefficient[0], m, k, n, mul_result);  
++
++  float add_result[n];
++  add (mul_result, intercepts, n, add_result);
++
++  float next_activations[n];
++  relu (add_result, n, next_activations);
++  
++  /* This requires performing matrix multiplication between a 1 × 10 matrix
++     and an 10 × 1 matrix  */
++
++  const int m2 = 1, k2 = 10, n2 = 1;
++  float mul_result1[n2];
++  matmul (next_activations, coefficient1[0], m2, k2, n2, mul_result1);
++
++  float add_result1[n2];
++  add (mul_result1, intercepts1, n2, add_result1);
++
++  float out_activations_result[n2];
++  sigmoid (add_result1, n2, out_activations_result);
++
++  float negative_class_proba[n2];
++  sub (unity, out_activations_result, n2, negative_class_proba);
++  const int prob_size = n2 + n2;
++  float probabilities[prob_size];
++  line_concat (negative_class_proba, n2, probabilities, 0);
++  line_concat (out_activations_result, n2, probabilities, n2);
++
++  int argmax_output = argmax (probabilities, prob_size);
++  return argmax_output;
++}
++
++int
++get_optimize_decision_from_optimizer (int argc, const char **argv,
++				      const char *mops, int argc2,
++				      int64_t *argv2)
++{
++  int model_pred = graph_infer (argc, argv, mops, argc2, argv2);
++  if (model_pred == 1)
++    {
++      putenv ("AI_INFER_LEVEL=1");
++    }
++  return model_pred;
++}
+diff --git a/gcc/ai4c-infer.cc b/gcc/ai4c-infer.cc
+index 99f7a6b45..42922e1ca 100644
+--- a/gcc/ai4c-infer.cc
++++ b/gcc/ai4c-infer.cc
+@@ -61,6 +61,12 @@ static int64_t optimize_result;
+ void
+ prepare_native_tune_str (const char *info)
+ {
++  if (info == NULL)
++    {
++      strcpy (native_tune, "=native+");
++      return;
++    }
++
+   gcc_assert (strlen (info) < NATIVE_TUNE_SIZE);
+   if (info)
+     strcpy (native_tune, info);
+@@ -83,7 +89,7 @@ set_cache_info (int prefetches, int l1_cache_size,
+ 
+ /* Read float from onnx.fdata.  */
+ 
+-float static
++float
+ read_float_from_file (FILE* file)
+ {
+   char hex_float[8];
+@@ -196,7 +202,7 @@ fill_node (const char *file_name)
+   return;
+ }
+ 
+-static void
++void
+ matmul (const float *lhs, const float *rhs, int m, int k, int n, float *out)
+ {
+   for (int i = 0; i < m; i++)
+@@ -212,7 +218,7 @@ matmul (const float *lhs, const float *rhs, int m, int k, int n, float *out)
+     }
+ }
+ 
+-static void
++void
+ add (const float *lhs, const float *rhs, int length, float *out)
+ {
+   for (int i = 0; i < length; i++)
+@@ -221,7 +227,7 @@ add (const float *lhs, const float *rhs, int length, float *out)
+     }
+ }
+ 
+-static void
++void
+ sub (const float *lhs, const float *rhs, int length, float *out)
+ {
+   for (int i = 0; i < length; i++)
+@@ -230,7 +236,7 @@ sub (const float *lhs, const float *rhs, int length, float *out)
+     }
+ }
+ 
+-static void
++void
+ sigmoid (const float *in, int length, float *out)
+ {
+   for (int i = 0; i < length; i++)
+@@ -239,7 +245,7 @@ sigmoid (const float *in, int length, float *out)
+     }
+ }
+ 
+-static void
++void
+ relu (const float *data, int length, float *out)
+ {
+   for (int i = 0; i < length; i++)
+@@ -255,14 +261,14 @@ relu (const float *data, int length, float *out)
+     }
+ }
+ 
+-static void
++void
+ line_concat (const float *in, int in_size, float *out, int out_size)
+ {
+   for (int i = 0; i < in_size; i++)
+     out[out_size + i] = in[i];
+ }
+ 
+-static void
++void
+ one_hot_encoder (const char *in, const char (*cats)[65], float *out,
+ 		 int out_size)
+ {
+@@ -279,14 +285,14 @@ one_hot_encoder (const char *in, const char (*cats)[65], float *out,
+     }
+ }
+ 
+-static void
++void
+ imputer (const int64_t *in, int size, float *out)
+ {
+   for (int i = 0; i < size; i++)
+     out[i] = in[i] * 1.0f;
+ }
+ 
+-static void
++void
+ scaler (const float *in, const float *offset, const float *scale, int size,
+ 	float *out)
+ {
+@@ -294,7 +300,7 @@ scaler (const float *in, const float *offset, const float *scale, int size,
+     out[i] = (in[i] - offset[i]) * scale[i];
+ }
+ 
+-static int
++int
+ argmax (const float *in, int in_size)
+ {
+   int out_idx = 0;
+@@ -327,7 +333,20 @@ preprocess (int argc, int64_t *argv, int64_t *in_modes)
+ static int
+ graph_infer (int argc, const char *argv, int argc2, int64_t *argv2)
+ {
+-  const char *file_name = getenv ("GCC_AI4C_ONNX_FDATA");
++  char *gcc_exec_prefix = getenv ("ONNX_FDATA_PATH");
++  if (gcc_exec_prefix == NULL)
++    return 0;
++  char file_name[512];
++
++  if (gcc_exec_prefix)
++    {
++      const char *onnx_fdata = "onnx.fdata";
++      strncpy (file_name, gcc_exec_prefix, sizeof (file_name) - 1);
++      file_name[sizeof (file_name) - 1] = '\0';
++      char *last_slash = strrchr (file_name, '/');
++      if (last_slash)
++	strcpy (last_slash + 1, onnx_fdata);
++    }
+ 
+   if (access (file_name, F_OK) == 0)
+     {
+@@ -401,7 +420,8 @@ graph_infer (int argc, const char *argv, int argc2, int64_t *argv2)
+   return argmax_output;
+ }
+ 
+-void execute_sha256 (const char *input, char *output, size_t output_size)
++void
++execute_sha256 (const char *input, char *output, size_t output_size)
+ {
+     char command[256];
+     snprintf (command, sizeof (command), "echo -n \"%s\" | sha256sum", input);
+diff --git a/gcc/ai4c-infer.h b/gcc/ai4c-infer.h
+index 7fb75900b..fa5156ab1 100644
+--- a/gcc/ai4c-infer.h
++++ b/gcc/ai4c-infer.h
+@@ -21,9 +21,25 @@
+ #ifndef AI4C_INFER_H
+ #define AI4C_INFER_H
+ 
++extern void matmul (const float *, const float *, int, int, int, float *);
++extern void add (const float *, const float *, int, float *);
++extern void sub (const float *, const float *, int, float *);
++extern void sigmoid (const float *, int, float *);
++extern void relu (const float *, int, float *);
++extern void line_concat (const float *, int, float *, int);
++extern void one_hot_encoder (const char *, const char (*)[65], float *, int);
++extern void imputer (const int64_t *, int, float *);		 
++extern void scaler (const float *, const float *, const float *, int, float *);
++extern int argmax (const float *, int);
++
++extern void
++execute_sha256 (const char *, char *, size_t);
++extern float read_float_from_file (FILE*);
++
+ extern int get_optimize_decision_from_ai4c ();
+-extern void set_cache_info (int prefetches, int l1_cache_size, 
+-			    int l1_cache_line_size, int l2_cache_size,
+-			    int prefetch_latency, int prefetch_distance_factor);
+-extern void prepare_native_tune_str (const char *info);
+-#endif /* AI4C_INFER_H */
+\ No newline at end of file
++extern int get_optimize_decision_from_optimizer (int, const char **,
++						 const char *, int ,
++						 int64_t *);
++extern void set_cache_info (int, int, int, int, int, int);
++extern void prepare_native_tune_str (const char *);
++#endif /* AI4C_INFER_H */
+diff --git a/gcc/c-family/c-common.h b/gcc/c-family/c-common.h
+index d1503c5a7..47c502e13 100644
+--- a/gcc/c-family/c-common.h
++++ b/gcc/c-family/c-common.h
+@@ -940,6 +940,8 @@ extern void set_compound_literal_name (tree decl);
+ 
+ extern tree build_va_arg (location_t, tree, tree);
+ 
++extern void deferred_opts_add_macro_front (const char *);
++
+ extern const unsigned int c_family_lang_mask;
+ extern unsigned int c_common_option_lang_mask (void);
+ extern void c_common_diagnostics_set_defaults (diagnostic_context *);
+diff --git a/gcc/c-family/c-opts.cc b/gcc/c-family/c-opts.cc
+index 744b54dc3..4cde773bf 100644
+--- a/gcc/c-family/c-opts.cc
++++ b/gcc/c-family/c-opts.cc
+@@ -94,6 +94,9 @@ static bool std_cxx_inc = true;
+ /* If the quote chain has been split by -I-.  */
+ static bool quote_chain_split;
+ 
++/* Size of deferred_opts.  */
++static size_t deferred_opts_size;
++
+ /* Number of deferred options.  */
+ static size_t deferred_count;
+ 
+@@ -145,6 +148,23 @@ static struct deferred_opt
+ extern const unsigned int 
+ c_family_lang_mask = (CL_C | CL_CXX | CL_ObjC | CL_ObjCXX);
+ 
++/* Add macro to the front of deferred_opts.  */
++void
++deferred_opts_add_macro_front (const char *arg)
++{
++  /* Allocate a new vec and move elements back.  */
++  auto *new_opts = XNEWVEC (struct deferred_opt, deferred_opts_size + 1);
++  memcpy (new_opts + 1, deferred_opts,
++	  sizeof (struct deferred_opt) * deferred_opts_size);
++  XDELETEVEC (deferred_opts);
++  deferred_opts = new_opts;
++  deferred_opts_size++;
++  deferred_count++;
++
++  deferred_opts[0].code = OPT_D;
++  deferred_opts[0].arg = arg;
++}
++
+ /* Defer option CODE with argument ARG.  */
+ static void
+ defer_opt (enum opt_code code, const char *arg)
+@@ -251,6 +271,7 @@ c_common_init_options (unsigned int decoded_options_count,
+   cpp_opts->warn_dollars = 0;
+ 
+   deferred_opts = XNEWVEC (struct deferred_opt, decoded_options_count);
++  deferred_opts_size = decoded_options_count;
+ 
+   if (c_language == clk_c)
+     {
+diff --git a/gcc/config/aarch64/aarch64-c.cc b/gcc/config/aarch64/aarch64-c.cc
+index 2d2ac42c4..dd739288c 100644
+--- a/gcc/config/aarch64/aarch64-c.cc
++++ b/gcc/config/aarch64/aarch64-c.cc
+@@ -47,6 +47,21 @@ aarch64_def_or_undef (bool def_p, const char *macro, cpp_reader *pfile)
+     cpp_undef (pfile, macro);
+ }
+ 
++/* Reset the optimize option.
++   After checking the model result, this function can
++   reset the more appropriate options.  */
++static void
++reset_machine_option (struct gcc_options *opts)
++{
++  const char *ai_infer_level = getenv ("AI_INFER_LEVEL");
++  if (ai_infer_level)
++    {
++      auto *cpp_opts = cpp_get_options (parse_in);
++      cpp_opts->macro_use_commandline = 1;
++      deferred_opts_add_macro_front ("OBSTACK_CHUNK_SIZE=65536");
++    }
++}
++
+ /* Define the macros that we always expect to have on AArch64.  */
+ 
+ static void
+@@ -119,6 +134,7 @@ aarch64_define_unconditional_macros (cpp_reader *pfile)
+       cpp_opts->warn_variadic_macros = old_warn_variadic_macros;
+       cpp_opts->cpp_warn_c90_c99_compat = old_cpp_warn_c90_c99_compat;
+     }
++  reset_machine_option(&global_options);
+ }
+ 
+ /* Undefine/redefine macros that depend on the current backend state and may
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 829e0da8f..debb15522 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -18771,6 +18771,7 @@ override_C_optimize_options (struct gcc_options *opts)
+   opts->x_semi_relayout_level = 14;
+   opts->x_flag_ipa_prefetch = 1;
+   opts->x_flag_ipa_ic = 1;
++  opts->x_flag_cmlt_arith = 1;
+ }
+ 
+ /* Check whether in CPP language or LTO with only CPP language.  */
+@@ -18872,23 +18873,28 @@ reset_machine_option (struct gcc_options *opts)
+   const char *ai_infer_level = getenv ("AI_INFER_LEVEL");
+   if (ai_infer_level)
+     {
++      char *collect_gcc = getenv("COLLECT_GCC");
++      const char* gcc_exec = basename(ASTRDUP(collect_gcc));
++      if (gcc_exec == NULL)
++	{
++	  return;
++	}
+       override_optimize_options_1 (opts);
+-      if (lang_c_p ())
++      if (strstr(gcc_exec, "gcc") != NULL)
+ 	{
+ 	  override_C_optimize_options (opts);
+ 	}
+-      else if (lang_cpp_p ())
++      else if (strstr(gcc_exec, "g++") != NULL)
+ 	{
+ 	  override_CPP_optimize_options (opts);
+ 	}
+-      else if (lang_GNU_Fortran ())
++      else if (strstr(gcc_exec, "gfortran") != NULL)
+ 	{
+ 	  override_Fortran_optimize_options (opts);
+ 	}
+     }
+ }
+ 
+-
+ /* STMT_COST is the cost calculated for STMT_INFO, which has cost kind KIND
+    and which when vectorized would operate on vector type VECTYPE.  Add the
+    cost of any embedded operations.  */
+@@ -20348,7 +20354,6 @@ aarch64_override_options_internal (struct gcc_options *opts)
+       && aarch64_tune_params.prefetch->default_opt_level >= 0
+       && opts->x_optimize >= aarch64_tune_params.prefetch->default_opt_level)
+     opts->x_flag_prefetch_loop_arrays = 1;
+-
+   reset_machine_option (opts);
+   aarch64_override_options_after_change_1 (opts);
+ }
+diff --git a/gcc/gcc.cc b/gcc/gcc.cc
+index 90f6dfe85..179d507f2 100644
+--- a/gcc/gcc.cc
++++ b/gcc/gcc.cc
+@@ -5799,10 +5799,13 @@ do_self_spec (const char *spec)
+   do_spec_2 (spec, NULL);
+   do_spec_1 (" ", 0, NULL);
+ 
+-  const char* tune_native = eval_spec_function ("local_cpu_detect", "cpu", "");
++  const char* tune_native = NULL;
++#if defined (__x86_64__) || defined (__aarch64__)
++  tune_native = eval_spec_function ("local_cpu_detect", "cpu", "");
++#endif
+   if (tune_native == NULL)
+     {
+-      tune_native = "native";
++      tune_native = "=native+";
+     }
+   setenv ("GCC_AI4C_TUNE_INFO", tune_native, 1);
+ 
+@@ -8129,7 +8132,6 @@ driver::main (int argc, char **argv)
+   putenv_COLLECT_AS_OPTIONS (assembler_options);
+   putenv_COLLECT_GCC (argv[0]);
+   maybe_putenv_COLLECT_LTO_WRAPPER ();
+-  putenv_ONNX_FDATA ();
+   maybe_putenv_OFFLOAD_TARGETS ();
+   handle_unrecognized_options ();
+ 
+@@ -8187,6 +8189,9 @@ driver::expand_at_files (int *argc, char ***argv) const
+ void
+ driver::decode_argv (int argc, const char **argv)
+ {
++  const char* libexec_path = standard_libexec_prefix;
++  if (libexec_path)
++    setenv ("ONNX_FDATA_PATH", libexec_path, 1);
+   init_opts_obstack ();
+   init_options_struct (&global_options, &global_options_set);
+ 
+@@ -8560,34 +8565,6 @@ driver::putenv_COLLECT_GCC (const char *argv0) const
+   xputenv (XOBFINISH (&collect_obstack, char *));
+ }
+ 
+-/* Set up to remember the pathname of the onnx.fdata.  */
+-
+-void
+-driver::putenv_ONNX_FDATA () const
+-{
+-  char *lto_wrapper_file;
+-  lto_wrapper_file = find_a_program ("lto-wrapper");
+-
+-  if (lto_wrapper_file)
+-    {
+-      lto_wrapper_file = convert_white_space (lto_wrapper_file);
+-      char native_file[512];
+-      const char *onnx_fdata = "onnx.fdata";
+-      strncpy (native_file, lto_wrapper_file, sizeof (native_file) - 1);
+-      native_file[sizeof (native_file) - 1] = '\0';
+-      char *last_slash = strrchr (native_file, '/');
+-      if (last_slash)
+-	strcpy (last_slash + 1, onnx_fdata);
+-      obstack_init (&collect_obstack);
+-      obstack_grow (&collect_obstack, "GCC_AI4C_ONNX_FDATA=",
+-		    sizeof ("GCC_AI4C_ONNX_FDATA=") - 1);
+-      obstack_grow (&collect_obstack,  native_file,
+-		    strlen ( native_file) + 1);
+-      xputenv (XOBFINISH (&collect_obstack, char *));
+-    }
+-
+-}
+-
+ /* Set up to remember the pathname of the lto wrapper. */
+ 
+ void
+diff --git a/gcc/gcc.h b/gcc/gcc.h
+index ff3ae8bed..63231ddb3 100644
+--- a/gcc/gcc.h
++++ b/gcc/gcc.h
+@@ -44,7 +44,6 @@ class driver
+   void set_up_specs () const;
+   void putenv_COLLECT_GCC (const char *argv0) const;
+   void maybe_putenv_COLLECT_LTO_WRAPPER () const;
+-  void putenv_ONNX_FDATA () const;
+   void maybe_putenv_OFFLOAD_TARGETS () const;
+   void handle_unrecognized_options ();
+   int maybe_print_and_exit () const;
+diff --git a/gcc/optimizer.fdata b/gcc/optimizer.fdata
+new file mode 100644
+index 000000000..ae0e584be
+--- /dev/null
++++ b/gcc/optimizer.fdata
+@@ -0,0 +1 @@
++656137356462666463346364333361623035396139323366643262383764363763323530613631653861653634666630333030316562323662346133633566313233326432366139383465346338376266393132363438333765656463366235613461313434346139333334396265306163333731646537376430643834323664623863366163343363643130313435636565623834363361316133393230363937653835653762353534626439663133633538623062353439646237616630333237666136663433386334626639643465303163653832333062643863333664336630376231643964316231663933656333386338656262303734376137313565643963396535653131303763646533393234333735613333633132353061393531333935623539643834373266303861633739373862366663376365383233326139383939363566373061373361613939336537366631353334346563313061373365663635633332663437653136383235343635623234366430373330366336363237623962656465373233346131343264313137653838643334616430346339363732613237623866636364313232613934343261643231386531356430343965303330306332326266336634626163333461643139653962326566303064343333623037313762303934626336363537616339343637306633633066333231613063623339333539376461316632653234353938616133616463623534356232346135666261616339646638373031356633306161626465643665633066616264623965656138613233353331303236363565616133323131653935643363353832366663633434626236376663386335666364333530336433353234383031636264353761616638663031613263353738326438656265623236653338323232386565626464393034633962373835363264656664616439353336623462376139333134656662373033626135336138333136643032636430653334303861616439333736306363383862306439623962646435383931613161653334623666313236366366373962356536656434396231303338646265323666386461366430396262353536313433636132653466623061346164303635636162336536383062306637306438626232646636393462353563366437386531316463383239373361643230643566333736663330656538643461313161306163666361663064383962373736636162323565383865336630333461343939336231366437386265323439626332336166376262623837353163376533353066636339313233323761613766333633343432623331373530376432376534623831333339383964633439653966303663303439623739346133626330646333373831393930316439326233646565623761646664356230336233363230383833616266643463626536336133346664656433373630343738343262373863646131373633653939636430616439393731306435313664323166313530636464393664613738643461356437333564343036316262353462313336663335313763323661363564343330623965363866616534323163616337613964356465333333663739313835623363316462346539643539636435303166306664366135313063333630336531336532346134306234626632313565333739356139613430643630613834353666623762363363326431316538313730316132333165383561666434623564623831616263316464353664323731373332656430323435663836393162646335646164333437366436663633353630373762316161316436333461393763343130396235396237333534376639343063316463386432316330663138373338386632643361386565636665653766643836373930326664346163333162613163396664663531626161326134333762666330393261316637333265326138663036363736333734326230373730613665323665333266326265356534376133373330393466336639623431383863383433643563346265613561326134616334626262646637666163376365333962323036323537333737313534313066323364623937303534623665623237306566636366623763623431373666383236383134326365613136653238623231646339353938373030656631646238653961643434653765313834316231346231323563636633356131376538636666613866303638323362643436636638353665346166323633623861616639363862613835306234643961666662656334393366613066653061383333653965343633653837613263636530633863303861313363636361346132613431363962336539613463313366633934633761363761623039396365393263633638656134376162616631363838616238613137323032663864613035313363353335396432373530363233663234343136346339663435333834656630366537353336306137366434646264303466303633663630386363636337383066316631323836353135393134646139373035323637346164303965323536666335393864636364346433383564616435383862366464326130336536313934636139386438373462333162366230623165323533336263313430386430643661386261633061313631613639313232313734383136336464636231323130336231613131373336636238656635353635626666303535663331363332353338313363396334313631333034656133666365353561643830356565633137346638643739636136376432303761633436666465336232356236653164353163346461656165333038653266336161383966633961323462376262386430363536323932373263343731313562356139336265383765336139643837333966623265386131666561366161333138353261663139303338393733346335313934363761393137633266616431663436343236613231663865636236346133616662623761373633663830623231393063616534633032316538626436633731643261313866666339353836373133363939623966636239333637373764323863663964376134323134613133346335326162363334363334386666396666336534646565616264363966356566323465346538613762623864303766646338666264643466666537303263623162326539653435643130313061386235623631306232303139633962653661303038376162323666343263356465313035393535313531373665623537363237373934366266303434333463303562646535623762313565326631393061643033613838363963333933396162363834393636613136613036656435356339616665636134313034663335343465646335333738663063616330353634316466336438373835326638633037656234396239303730613338336332613839323837643561333235393366323531303032666436616435313137313166383433396531626137353932616538333330653164326438656166343339363262366264326632376564396434396333356565343733383137666333386462313164333630303936336137323863313634613031393931613164363237643262353162376133643935373036306336346161376563383862316365613139376535626535626234666331363163303835336362393535613530636234643633626364386566396132333232373733633230393865663664636334393131643133383736373833653261383661623632343237653937366164386564633433396130313166306430316134653864643263343835653438383687144d4395a52c41e3842b41b03f3743f207264565084e41e0fbdf3bd429b13da15eb33d7e47b93b309daa3890d2d93d0000803f04a59dba191d513e33226b3acb4c80be39e785bb0bb9f63c65cdfb38ac15eebd303d01bb4419f23a55cf06bc7ff1453cfd78433b0bf6febd9013433b3b6da33db673a03b2b9491bd7ac8e03b25dbe63b684e0dbcbb0a3ebdc44800bdbcc493bd93e601bd4b6484bd0a60f4bc60dceb3d70e9debcf709e2bcfbb3e83bbdf4023e0ae182bcd48370bd6b8bb5bc431600be698b0abde7c93bbd7d1e83bcc37df5bc6bc82abc8cb32c3d068300bdd32812be02a169bc1040c7be90c1b2bb666f10becbb08fbcf74b17bc1b1602baf138b43d5224843abfb0d4bd60cdbe3a82a4993c8ae4a63a8c9fa8bdc8a86b3afcabb83a4f68c93e8af795bed2d6ff3e5dc599bec525003f902e14bf7e73e93eea321d3e3bf3d13eb012e03e90fde23e313b7bbeef3fe23ec1eb88be7e3eea3e4d0431bfff85d93eaf825c3e333dc33e4d1ad93e44b8cf3e02d697bec0bbf93e84ba9cbe99cbea3ebef11dbf493be73e1640303d18f7ce3e3e59e63e4f3c0a3f3f3e73be1d3cdb3eda1a7dbe56e3d83e8f333abf6d5fcb3e7268383e67b9b13e156ac73e78c507be519081be4deef33e857f81be2959073fb3003ebfd134eb3ee775bf3e5ce8d63e5cf3e63ead860fbef58193bec791043ffaf29cbe689e063fe7772ebf8659fc3ecac0a33e0be3df3ea97afa3ee85f82bdaf0398be529dfe3ef1a39cbee6cdfa3e616738bf1314f43e2425023e5e10d23efe12ed3e3069b8be5e9d7abe7bc2f23ee6b27fbe498d143f2bf545bf934f013f53442a3f23dfe13ec02fe13e48d3cbbd07bc62be8851013f064a90be5f340e3f2a8f42bfa2bcf63e60cdef3ec14dde3e41c9de3ebc90113f286f3cbe86e5db3ed02474bef976ec3e177231bfd8f5e43e9cd5d93ee0fdc23e3c93cf3e6035c03e176199bedfb0f43ee0f39bbe9c8fe73eded525bf8412e33e7485393d99acca3ec677e13e8a5bc83e5ac5a1be2528fb3ec1b9a2be4bd1f03ed92417bf2ae7ea3e7ebcc63c3b37d23edcfee73eb381c6bb8e48d73b5463ca3abdb1b13a9904c23b8f9948393f7ecb3aea64a0bde43c883bde8ce03afdab793cb6d945bc09d0a33c5bd255bc5c04a03c14a8243d153aaa3c3cd991bc6a70913cc9a38a3ca956d6b9ddfa903c944948bbbb53903ce48761bb6e0a0abca78a8dbb728827bd1d7904bc7ea513bc9f89cfbbdab6b23c30c117bc6dbbae3c19b1d5badb1d90bbb6d427bcd329ddbc3f8f1dbc879137bcb972d63a89bc4c3b9619283b9b4b133a8c65e13b3901103d9c7e643bbf128fbdef865d3b7f258b3b033a123be896e9baee8caa3c4efb683c7998c33c4aed8b3d50a2ad3c7aff60bd678b973c45cca63c73d7c6baf325473b210f1c3c0ec3173bba9d463cf2b3583df487003ced3709bdbb3eeb3b5210f63b41eb303b9665293c7953a73bf34d5c3cab738b3c1f0cf03cd77a813b1d6947bdbf322c3b25a7c6397325e93acb3c953a2266d33ba8ea28bbf4e2d53b3da9383d0bcfd63b2cfa20bd251aa03b2a8ab53b03529c3cbbbdc9bc0e1b053d7c27c9bc955f0c3dd4f3323e0edd023d9f9a0bbd506e003d347a073d4c951bbb7989343c8fedaf3bf6a1233c00eeaf3b63dba43cfc7e533ac83606bdb09646bab2af50ba072e9dbb2548b93c7737debbb6e0a43c5b1e02bcff8d3fbd274eacbba96c3dbd3e8ceabbf3fb13bc5714203cc98936bc98db9d3c83d268bc0148a83c9736483dc820a43c9de221bdb90e983c2fe3943c3a5fd4ba81ef3b3c751c973b23fadd3b4f8b333b37b4b53cd044e53a88a81abd4a7aa7b99dc501bad8cda93aa7c572bcfba0d63cc1da86bc4d3bd73cf09e943c827ccd3c1b8956bd3883c23c8637bf3cab889fbbb5c2d13acb99fb3b4295273cc41dfc3b65be0e3d1dddd53becc235bdf19ca43bbbdec43bd1b8b23b4ac6afba5fd6943c218509bb2bc0763c48c8f73c62c3813c565f25bd1374443c26cb633c1fda053ddebcf13cd9f829bce620e43cf20079bbe584b7bb3ade37bc40043dbd140f3fbc183f64bcb6b97fbc4640e33cbeff89bc9ecce53c627f3bbc92a8a0bde11326bc9058073c296f68bc939699bc7bed253b46d30cba6da4803b995d9aba7292253ca565a43cb7b8d03bd50711bd21df8f3b9a02803b3f98f83933cbb1bb98a95c3ca09600bc14c1753c3184703d7760523cf2f390bdb6663a3c91c9433cc137633b6be27d3c997ecab9060da63c1600cd3bafce23bdae80dbba1ccb88bc058da2bb467e21bce8aab63edd089dbe4f78fa3e1fef9fbe16a1f23ee01a18bfc853ec3ed3a3433d9ce5d03e226ce63ed83484be140466be9106fa3eb0117dbeb6c3133f0fb33bbf9787053fe44c0c3fa01cdd3e379ce73ec6904b3e0ba483be3ef5f53eb7fd87be03c1053f8d4c2dbf6257f73e6866ba3efd92cd3e0945e23e59a1323e199391be37c4023f598595be7f81f63ebe6c2ebfc3e3f33e33fa323e77f7d43ed851f13e7b92053fde4a37be0ba8cb3e5e0d53be7047da3ef3a24bbfddabd73edf0dce3e3c6aac3e8810c63e1a6f9f3ef0b686be44eae93e0b2685bec4bfe73e164c3abf6067e33ec99f4f3ec58ebf3e6252d03ec3eff43ea926a7beb70f063fd659aabed093fd3e73c6cabe1c69fb3e3f861d3d4d3ce13e0a74fa3e4c35063f755379be2f12f43e7d848ebe2060043f9d9103bfaf5af83ef9b1043f8e7ac83ef721d53eaeff133fb67d2dbead28cc3efc3668befd33ea3ee1b941bfed13dd3ea8940e3fcfb9ab3e82fbc33e38e7e33ee37590be93efe83e41ac91be6fe2df3e66f21ebfe293d83e6b91573de8eabf3e3abcd63ea5db103f5b217cbe49c9d13ef7ac80beaa60d13eeaa93ebff204cd3efe974e3eac14b23ef978c33ec984cd3ec2d8a2be4c3dfc3e149ba5bec9f1f03e00f911bf3f4deb3ee781273b690cd33e1ccfe83e7ed9ce3bf7dd993be26ca43aa8abba3b4c8a1b3b2357313d7eb9d73a64e851bdaa101e3b6a432a3a1b5457bb665899bc7993fb3c83c150bc13abec3c749d863df737e23c2f3b11bd8b92d03c1dacdb3c072077bb17e3d1ba1ad1323c254dd93a8bd12e3c5ea3213d4f9b263c158d73bd8c5d013c0cde153cbed5d0bb31ff0d3c59f8ff3a5b2df23b29da083bc4561b3d0491673be76542bc74fd963960539eba288b47ba9f2eca3aa558a73bd043abba8049c33bd32eb63ce799e73b4296ccbccda17c3bd5eec53bfddd8f3c66733bbc6a5e943c11bd53bc14dda13c7fae023dd800a23c034b55bdc354943c0c158b3c9c59a4bb82b70f3ccb7c783b7df2123c6d74be3b126bdf3c3ac9463aeeab5abd4df224bb11bdc1389b6322bb232aa8391f7c863cf42d3ebbbb0f803c049f493da7cf3f3c6c9924bd33876e3ca5165b3c972233ba2a880bbc8a30893cb66825bc1a729e3c6624933d492f883ccd7243bd00c5733cf019833c0308c93bc72d3b3b88a5c43bb947243bf435f23b3916afbbdb18df3bd7685ebde8eeba3bdf2ba03b223b07bcea544fbc0669fd3c81fe8bbc1275f73c5e02723df0b1cb3c51ae5cbd1d3fe33c8980de3c71bd48bba6c40b3dfa96ce3c08c5663d3ccf6e3cc800143d2a08933c0cd93f3b508e9d3ce864d33ccbf2adbbc124033c9a36233bf554ed3b5729123bc4b9a4bbcd83b53ac16774bd3cb33a3afa121c3a7ad068bb01c4603ca49e9b397efa6c3c0395df3b0cea40bb160a703be9dad9bcca8f593b66b085bb8d35a2bbae80203ccd53a5bb30d02a3c432e18bb9795e63c58c982bb874772bd99d64fbb118faabb5d1820bc388044baea1e3d3c7837df3b2164373c1f98673df94f183cfc0d45bdf80b223c2b146a3c8cd5d739622b99bb01a62f3ca437eabbc703323c1cc9e83c66002b3c451f67bd9e4f1e3c61a0233c797bf8bb9576c33ae97c8a3cc5ac933b991b553c41f85e3dedd8523ccc9946bd22335f3cbfec273c24fb0bbbaa53023b8c62623c3f8dd3baa4fe833cf033123d0131633c92ef79bd43a24d3c61be0c3c8f4de1b94e9ed1ba1e811f3cf46e85bb7e115a3cf3d5ae3ceca73e3c3c0c41bd57df253cdcd2213c263b3ebc345d733c81be6f3c8aadee3c8a5d393cdf08703b7361813c7d5222bdde9ba83b5465c138af778e3b2e20b83c9a0c69bcfef7be3c44b00bbc850599bcf7406cbc56d64fbd1cc44ebc73e483bc1f71883e25ab7ebe1807003fd1c286beef68043fe1ad19bfd38af93eb4a0c43ed240dc3efb15e63e487f4c3e640b7fbe4907fd3e871b81befe66173fe60e1ebf5886fd3e6022093f8c18e23ebb31003f63cfe83e27805bbe22b0e83e84c37cbeb6d1f03ee10634bfa8a4da3e3d3fbf3e372dc53e6920cf3ed1e1703e1f888fbebced023f8e4692be6bf0043ff0b121bf525ef13e6ed0763ecddfd23e197eeb3efe3d8abd61ab74be3ce8023fb0d484bead74073fca504ebfad57e73ebd250f3fa009c83ef804dc3e00864abd678da3be815f083f71d7a9bea740083f6d2e1ebff19dff3e8968d23d832ee23eeba4fb3eb51bfb3d7b298fbeacc2fa3e6b4d91bef1b7f03e642645bfe625ea3e460e0a3e602ccf3e7c86e23e7754f7bd306785be01ad033fcc7297bec0ab093f2aa034bfa508033fe0570b3f4acdd73e4e7afb3efd2fb53e5b688ebe8e41f63e8fb791be0563f13e9af428bf8858e63e54962f3e22bec83eb4c5e03e184e94bd78de98be707c073fb7f19fbe0ecb043f9c5d33bf7316fa3efcba313eb5eddd3e5283f53e2e92ff3ec37d61be00ebe43e06528cbe7949fa3e66ab29bf108de43e9b95ef3e8eb3cb3e7a2cd03e26bb71bd334794be2725063f18a097bec754053ff6d52fbfa253ff3e6f3d8a3e9d30dd3e5391f23e376e12bbd12e833ce1a93d3b3c98593cf3155d3bdc9895bbd4d0b73a64e731bdd2869b3bbf23bdbb14175fbbeca5a03c6921c7bbdcd9db3ca82787bbae172cbd01edc4bb4409c2bc296076bbc0953cbc70712d3cfafa62bc047fb93c822e8cbcb852c33c62d2833da0b2bf3c085509bd7573ac3c61cdb13cbef44e3c54518b3c2d402abcfb4a8a3c4f8a1abcceead03c35e13bbce83d47bd35ec3cbca71f32bc3d365c3a71c946bc48e8cb3cae9072bc6483c53c7441ad3dc78ebc3c05a12ebd49a4b13cd1ebb93cb7e544bb8d0f963c7216a0bbba6ec13cffad77bba48636bd1dd90bbc591d06bd82f1d5ba9d3114bc55807b393f0345bbb84e4b3c4788cdbb1e8a423cd5c7873d8ff43b3c4e5a51bd1e82293c0147383ccfc8163bb605d53b54eb323c2549cc3bd62c3c3caf80223d556e1e3cb95c42bd3f9d413c7dd2243c6b08c03af82effb91db7853c0378a9ba0802793c2e6b783d346e5f3c093b80bdd148663c0d6e5e3cfc6844b9f2e1cf3a7d18613b3b8734b953bb413bbdbc943cf6a8323b88962dbda482133be6971d3b12ad30bbac4f143cd94ca43b87984c3cdbf16a3b9dc7b13ce8e6633a286b77bdf5c7373b162fa73bce6c1a3c33e88cbb4363093c1c049fbbc6b8373c95c3953ca57d343c76441ebdbfbb0f3c3f56013ccae5863a4ddc20bcfcc7c73cbafe29bc10149b3cee14873d03ae953cfa1e36bd2aa5863c7b79903c5c5125bceebc103c7d270f3b0b70023c42fc8b3ab278eb3c84d4fcb816012abdbfd66639ea10e4b8cc1e11bbed1c073b1c3b803bd21e9a3ac633b53b2f52083d9c50713becd382bd6714543bc395063b79e0ae3b7b9e4ebb2d0d253c44b83abb4d6a393c23814b3d6e861f3ce06cb6bcfe8e0a3c511f173ccfa56c3b873eec3a1c34e53b4381d0394e17d73b2c49ec3cd516cc3bb9c42bbdd3fa933bf49e793b20936a3b5043823961cf3e3ccc562cbaed9e403cbf96063d9c28353ccb3f5bbdf558163cdfc9fb3b5bf28d3cd425acbc618d163de421c1bc0a86163d09ec2e3edc5e083d3d898cbdbec3113dc213043d3d38c4b9466a44ba1efb2f3cce0f9c3bfaf92a3c5388693d991d113c7ce746bd74ed1b3c08d21a3c382fb03c5592a0bbccf6563c24ee8bbb5908563cd779163d06b5503c0580fdbc91d9423cc241493c356d15ba05fd523ca5611cbbc379493c66e7f33b0c1f073dc62fe0bbd3d455bd42683fbb482ec2bbf0d0a83ee99383be0eda033fa74082be4c841c3fe391f7beb6a2063fb386103f5f2feb3e6bafec3e53dfbd3e983a56be1528eb3e1f3085beb3fdf33e12fe3dbfed47dc3ed362e53eb34fc33e0b0ad33ef34e0dbefeac85becdbdf43e75bf95be249c133f67db35bf21d8013faf1a133fd964da3e7c4afb3e8408823e7fdb8bbe77f7013f8f1595be6804053fa32217bf4881fe3e6341823e55beda3ed8fff53e0002db3e85dd6bbe93cedf3e11ee68be5e26d43eb9f24cbff431cf3eca59693ed2afaf3e60d4c43e81bc54bd13eaa5be9661053f5ab0aabefc59023ffba229bf847c003f92e1f53d31bae03e5082f73e9e01b63e759a8ebe7133e73ea36590be9342db3e680d37bf872fd63e7d461d3d8813be3e0816d43e5d82d53efaca83be81e5e23e01ad87beb5c6d53e2bd731bf007cd03eca1deb3dc447b93e4013cf3e154eff3ea8fc75be08b8e23e116189be36dafd3e4dde22bf2458d63e2faead3e25c6c73e0a54d83ed91bf23e02e993be25bfec3eaf6e96be0defec3e48a621bfc4c4e43ef7ad213e4974ca3ecda6df3ea114383fdaf953beb966d23ef24e4dbe291ce93edef430bf7100c63eb01ccb3e95b4b93eda29b93e8a49ca3ecc00a4bedfa8fc3e2bb1a5be2fe1f03ec66f16bfd2b2eb3ee1e29d3c966ed33e198de93e1904c63a3ae604bcfeff653c5d0bfbbb80629b3c76985c3d60918a3cd08057bdd35c933ca83e6b3cb9d066bbb36d47bb05eb803c4bb4a1bb48c3823c9bcc033d9e47763c32a143bd3cb3613c960d683c8189d4ba9301293cbd3f263bef5a323cbab8a63a3d59e83b56dc643b234648bd7674be38ef94443a9df5abbbc5be603c02d2df3accaa403cb260ca3b856f0e3d2191ceb9dacf39bd1bf821bb5766d73bb84111bccc86c93aacf4613c8371e5ba12d1533cf7936e3d66c8023cadd91abd5d2e313ce4e92f3c223b19bc5ac9183b1caf983b0cc0a73a64a1153cd31c0c3d1b5d923bfa1f24bdbff1a33b73bd7b3b8bd9693cd6ba7bbc2d09db3c758c83bc3085003dd3330b3eda22d93cf0ae09bdc729d63caf83d93c05610cb9a04e24bc8d2d9a3c82923cbcaadca33cb17a873d11cb9c3cf6b041bd449f913c7196923ce771feba39f6edba18b3143c1d3807395c21113cd00b253dfff4303c962c50bde69c1e3c5710de3bdb3c8a3b096e2ebaec6b373cece9383b9de8433c3b4ce13cdad3143cce5928bd2b30253c4de61b3c3e5925bc0d53e5398ee9f83b50c71b3a51bde63bd55e813dfbd6bf3b938257bdb3e9943b152ab03bc25b1cbc98f59c3b75ed303b8ec63c3b464e713bbe07743dd60e2a3b004d1bbd36ec863bd63c1c3b6bdf53bb3c309539a8c8743b973cad39656aa33bbda3aa3cc2e08b3b98d705bdc680563bfab04a3bf73b01bcd76e913c604527bcaec1813c9c9ae3bbba0438bd285216bc94cc29bdfc310fbcaf4c42bc138f3cbc7c9d66ba60164a3cdf580bbbb5af403c7261863dcabb2f3cec056abdf8051e3c83f1283cb600b9ba0b9fb4baba50213c095a5039ac32513c8fb07e3cb9371f3c50a40cbdf843383cd969f33b1c34fbba0cff823cf9d281bb7aeb6c3ca95f09bbc72cc5bcb93379ba67f4d9bc132aa2bb94a6afbbc79f74baf9082b3c7c42053b4adac33b969a483b1dd779bce885103befd8c9bc60d7f13a8a93db3a7055c6bcf0cbd03cc67c75bcc196ce3cdfda89bc85e3a2bd8aaa95bc66897dbc339896bcc08298bcb27911bb4fe4b23b3c89513cd8bb8e3bc0df4a3c58980a3d549f0b3cc3ed54bd565c563b3c35843bef71a33ab4b026381dab4f3c8822eb3b9f5b2e3cee511d3d170a113ccff352bd2d41ee3b321aee3b6479c43cab76bb3c2a3315bc329aca3c2df32ebcfef9fdbb244a8cbcde3444bd365a57bc33fc79bcfe524f3eabe282bea97ffe3ee0ea87bed85a043f4be633bf908bf23eec11043fd4d8d53e307fe93e2094803e506d83bedcbde23e1b2f87bef844dc3ec77244bf0c1ad53e572e163e5088b83e047dce3e05f512bd894199bec01e053f30039bbed405013ff1cd35bf4e51fa3efd5b213ea77edc3e2ca4f33ef42bd93ef78591be6cf5f03e362e93be94a5e43ee31d24bf596be03e801dd13d9d6fc63ef969dd3ea28ec23e365070be2d14de3e2e1076bea661da3e850350bf606dcf3ef091723eaf36b53ee0aac93e73e2033f75f074bee398e83ed00681bedd7bfa3e5c7817bf1ec2ee3e2915ac3ecdbdc93eff25d93e57c8a13ed2bc91bec6adfa3e5af895be74e5fa3e9fe712bf1f7af83e8a3c543e818ed33e749dec3e1dfe373e30b09abe2467053f98de9bbee5e8f73e1c4120bf6ba9f43ecfc0c83dc7e6d73e8d40f43ecf2395bdb64287bec2c2003f7f378cbe3318063fc8053bbf5738f13e54fdb33ef711cf3e374fe33ee988ff3e3e2284be68f1e53e0abe89bee926e73e1ba927bf55aae23e50a6183e6633c33ee12bcf3e0df5713f4249f9bddd1fb53e513604be7ca4d73e8c1049bf475fd33eaac3353f74008d3e15f7913e0404a73e064991be01fa063fd0c293bedde4023f7f460abf9c81f93e9408353e1f18dc3e3858f73efcf3d8381165bb3a1ed4f13b66faadba6c05063c7346ec3cafeddc3b45fe41bdf10bd43bdc3fba3b9fbaa6badb92df3a26b8523cfb88553aaf90453c8a984e3d6898463c298d1fbd98062c3c9bfc163c1cba5ebb8ec0eb3b5b56e33ba93de83b953c6f3c42588d3b1f74293c44d641bdd26cf13b2f2f9c3b8eeacbb949b7b2bb1a118c3cad7ffdbb8701843c78c8823d01cd843cd9b152bd70f3683ce2ad813c7212ccbaec4bf7b9ca32633c53adf2ba37bf673ce368703dbf3ef93bc5774ebd734c0e3c2eafd43b1d1166ba9ac4243befb80c3cc95518380381fa3b0947f93c49f6f73b44c628bdd475d03b6e0df33b43ecf4bb5077083c5064b9bacd2e153c9732e3baf237133d999cdfba632885bd1b7666bb7f95a9baeec6babbe3e49e3b3735093c8ae9883b28a7ac3b56fd633b33ccbb3b570d0cbd10d3453b81ea883b829da43c0180e23c1dd790bc5b77dc3c925e67bcf0fbccbc8dd6b9bcfc2463bdf94980bce22cafbc902ba4bb28a314bb5766383ccac575bacba2123ce74f463ddd9c153cc43136bd77f4013c9d15143ca77fa6bb6cc6243b4b2b443b7c95ed3a29e8043c6fe59a3cf516b03bafeb2ebdae27933b25d6153b8d463aba796e2cbadb1b533c1c11c4bb5a7d803c19972a3d7808623c1fe637bd6fad573c4b5b2b3c6eb3733bdfd29cbb02e38c3c6401e0bb72e1973c02ab743da4fd933c87250dbd83eb5f3cb8d6773c7e4514bbe5a1113c1e4f013c77e4543cc590453b93870f3c78a337ba301f52bd5af99c3a853e65bb63e106bb8b42eb3bdbc01f3b0c81e83bfa8de0b91447893dd66b2b3a928a4dbd9da10cbbec6f0abb9e397abccf5c173c0f0c2f3be3d7f53b16b0293b53d0cb3c95db6f3ab36af1bca4769a3a01d4a0baad745a3b260265bcc3b99d3c7b6549bca87fa23c147e453df66d9f3cd7ba39bdd2049d3cfb239c3cc7bb673c45b3a1bc36a3313d580191bc70291a3d41f8493e28d1143dc25c08bd013f2a3d05af213d61f820bb113eaa3b27c4c03b56e3d53bbdbbe33bb0dc203bf4e3d03b491469bdfe07123bddd1063b179a0abb86a584bbb240a63c599fd5bb495f8b3c464f443db62c793cd51220bd4ccc823c5af24f3cbcef4fbb83b4cebb9c92663cb6c7fabbefac6c3ce308783d2d5c533c905d2cbd78ab3e3c3fbe4f3cfaea4dbb49ee5fbb25c5233cef6b833b531c3f3c66ab2f3d5edf4e3cbb9210bd9fbf0b3ccf49193c51fafe3e90c756be715adb3eed0d49be25c5ed3e64503abf318bd73e99b8e73e6b79af3e420fba3ee950be3e07008cbef683e93e99c88dbee548dc3eed4a3dbfacaad73e77d18a3df25bbf3e9445d63e5d96123eec65aabeec39073fe313acbed47c023fb3ef17bfa431fd3ed00cdf3cf490e03e6703f93e004bcb3ed5c6a1beb268fa3ef2aca3be1898ee3eb1f111bf6baee93e84cd133c8a79d13ecd3de73effab5bbd2daf95be81f8063f874ca1bebfdd083f8c9c25bf21f6023f4e4e653e042ae83e537ef43e40d452be2d4e56be66c0fa3e7ad486becfca143f22f03ebfcb79003f73ab523fb6d8d13eeacbf33ef57611bdec8a96be17ad033fc25a98be4623003f61133abff137f83e56b62b3e9665da3eab4cf13ea1339d3d765973be63fd003fa5225ebe73a4023f223e46bf07c0fa3e1742153f6e6ad33ea276e13e5393c33e41a99ebe5eeefb3e879fa0be8360ed3e320016bfef4ce93ece150c3d34b9d03e157ae83e7cc1313f72685dbec4c4d43e7bb05ebe9657f63e2b2d23bf46f4e73eea41023f2d3db33e3de6c33ec32ef93e79894cbec235dd3efa0c80beff1b023faf442fbf7df7da3ee041f53e6556ca3ece48d23ece5906be323b93bef7bd013fd4e195be642a053fc2c439bf42e8ff3e86ea6e3e9581d73ead4ced3ee3ffa53c7981c4bc2fe9fe3c2ca2d8bcd059093dc6c3193e81fa023df8b6b7bc09f2033d79b8ff3c49f32a39562805ba7df3053c165180bbbcd0123c40d1e33cb49be63b564903bda22ae43bf8dcd43b28edc73a4a1e723c347001bba98a5a3cc3bb9a3941c603bceecf38bbe8d652bd101354bbb677d0bb93ac88b9e093f03b2fe2a13ba98afa3b5485cc3b48564d3ce6af1c3b989844bd6033003b5990803bd374b63b69edd7bb278c9e3ce958ecbb98c2d53c827201bc0f529e3c174b88bdad15833c416b893c4daf89bbbb16193bab8f993b9dfc253a4927f73bcafa553d0de1d73b59cb48bd2bccdd3bf5f0803b81f4bbbb480843bb8bfe6b3c10542fbb2ca88f3cbf488b3de92c8d3ca05422bd97e1813c92924b3c9b449abba9f73e3cb3f69cb90173763c750248baaed0b7bc915b12bb4e7c54bd70120eba414ebbbb13b6993b035b9f3b9b30003cf3fd003cfe37133cd137a23cc222e73b4bdf9cbc3907bc3ba4779b3b15cbb83aebfbd0b92e3de83b42e793b910661a3cc1bdee3c18b10e3c708314bdc7bcb33b64f1c83be5300a3c7965a93b7086d53b11fb083b321bd33b4aa3073daa59923b88101abd06ea0e3b1fc62b3b2d4f58ba12d5ab3ba506cf3bd5bb263c3877c83bf6ba753d244e8f3bf1813abd39185d3bd2e57d3b8276f2ba6709933a02406b3b62d8003ccd922f3c6b14913cdfb9a23b101c0bbdc016893bdf0e533bc2afae3b475ceebb6daf9f3c1898fcbbcf1e963ca4eab23ced34983c43a94fbdf4b57e3c859b903cedb0c93b6632dcbb6a74473c1d207ebb69dc803cb0c53a3d2edb573cd72a26bd2792533c0be43b3c22f1a7bbcc598f3b4580393ccc597fbab1ad323c0d642d3db418fe3bcdbe1abd144c0b3c251e043c6ff7c038b3fc19bcf1758f3c170f13bc55fa873c7cb63e3d4697913c162a2abdd9b3793c797f703c46f85f3b6ca3ccbb876b933cbd24f1bbca59923cb7e1be3db0a5993c757365bd3f1a723c19e48e3c98150d3d5ce97d3c696698bbb0af533c1d515cbbccc11e3ce72cc6bb2b555ebdadf9c3bb1574dfbb3211dcba1085913c4c0e38bc0aff843caaf59ebb773309bd361dc0bbd991abbcc9284fbce6b04ebcb78f543b472d26bbe403883c90229aba79c2893c44025e3d09d63f3c71a104bd8af2273cf4af483c94822bbc54b8cc3bcfe0e739245baa3b7380fd3aefd9803dee35a638e80d49bd70861939e25276b9b3746b3df0cf99be4a85013fbc149cbeb2c9fa3e395836bf5a68ef3e65b7d33d6180d63e70b0ea3e84046d3e665891bee293f63eab5895bedc01ff3efa2324bf050bf33e3af05c3e5277dc3efa8eee3ead753b3ff22e34be1f33ce3e5ddc5abe523ce83e9a032fbf8f88cd3e50ebef3efc00bb3ed07dbf3ef885133f7b5159beb055e23e1aa26fbeb22fed3e1a3e2ebf16a2cd3e2a13b53e0f88b73e619dc53e6681f63e14e591be1b05fd3e1c5b92bef153f33ed4570abf29cee43e011e063ed901cd3e4469e43e2b3cf03ea30a66be5616d33e6ed568bea2f0de3e4bda41bf3da6d03ef1108a3e726eb93e0556bf3e1cddcc3ea1669abe0ef1f63e7bf39cbe8e07e83e905422bfe4c0e43eb492a13d2010cd3ec405e43ef1d1ae3dfa3394be6873093f6e5598be8ed40a3f25161bbfb5ab063f2bb9db3d4cf1ea3ee06ff43ea95a20beb6ad9dbeab13043fdc539abeaafa0a3fe7272fbf9e84f83e6e168d3e7781e53eece7f73ec8ce2abebc067bbee90ef93ed53186bea243093fbbe73fbf7e21f53eb51dff3e3b3adf3e6ec8e13e8c3b82bdb42460beab37f43ebfad94bed1b10f3f0c213cbf2383ed3eb8671a3fd463ec3e0cacf63eae35cfbdfc817bbe1854043fb85f94be2348163f22ea30bf9e4a023faf214c3f3f85fe3e6c9ef73e355a28ba4c46e53b017e753c1c87c03bb5cd5a3c2838803d14e5343c6861c3bcbc67133cc47ed33b35432f3be20f213c9bff823cdd53483c42cb7d3cd7ecc13c147a113cdcac96bdf93e433c01fb103cf2ec67b8032b1fbc76d99f3cabc747bc2974993c1376ac3da7358f3cafe934bd3fdb803c6c62943cbb5343bb2b6496bb8ec43b3c6236cbbbd0c62a3c28b4833d90e7243c73e35fbd115c123c48e9283c1a7d903b70063bbc1933853c7a7d1dbce9ad8b3c4fd20f3dc7d9813ca69f20bdbf14803c57ab7f3c9e6d093b7a2d093b4302ea3b71b3afba6f23163c5642c93c1d8bd13b0f0004bdba12ce3be107a73b50cd62bb92904b3bda05f33bf1b6e53a94a5033c7894953c81b8943b58933dbd97f2a43b48ce4b3bdcd3593ce3e559bb69ffcf3c759780bb8354c63c51a8f93d48128c3cfc8107bd7246b63cab7c943c5f9ddaba8bfc18bc714da03c576349bcf85d9b3ce84e9c3d2d09933cea4c18bd8afe8e3c2cea923c57ef41390ba48bba674fba3cb1264ebbce30be3c3bbaec3c138bd53c6f5990bd9be4c93ceac7883c02898cbae2e441ba0e1abd3bb2f5d83ae905473c4056493c4e330f3c28c50cbdd2b7283c5bfaed3b1f638c3abd2cb53bed0ee33b4416b63bef75073c7a38753cbd91b53b954528bd3e33883b5456a53beb300bbb350e223be1e5cd3b23ddb53a281be93b6eac4b3d0f77cb3b2d1da4bc19348d3b71add33b25c33abaff77a9bbb1ab9c3c42fa05bc597d893cd1002d3d5a22853c90a4dfbc0d71853ca7f58a3c6eb77d39743fb93bbbda9f3b16e8943be58d9b3b6730fb3ceab25c3b229453bd9aca833b4aa1a43b96f507bb02dadabb1dd25b3c73ad04bcb1f24c3c9b72693dab604a3ce904cdbc8ab0333cd192473c502a5eb932c9143c9b6c013cdd07263c16cf513c3848c03c8d37f23b10fe15bdfc5e6d3c77369e3b7d18ce3a9601d2bc8c6e113d1ef3d5bc8be0183db487b03d05ba0e3d330441bd93390a3d05830a3d7df54abb3c1eec39ffe3823b19d4643b0ca3993b4201613c87817a3be0aa11bd68463c3bee21413b569fd43acb4351bb32323e3c1130b6bba088403c54cb133d71f2583c650a7dbd366e293c536a023c74d3b7bb9c6d163c51feceb9dfeac93bc622dc3a1c951a3d5e7681bad78f20bdaf9843bae03ad3ba6f648bbb7558213c8c35003bacf80f3c9d37633adab56a3b47c2e8ba429780bd8bf9b53a6d12a4b995b337be64908ebe376e053fc7a194be0502013f392b41bfc7fdf13ee94f873e230fd93ea502ed3e751cfa3e23fc84be96cbe43e558f87becf28d63e7a5d37bf8666d23e3e12cd3da892ba3e2ecad13ef3b3e0bd481a99be552a0a3f85999bbec033043f8f732ebfdc82fd3eb766413ec4cee13e152afa3ed5fbc9bdf7a87cbea07f023f212e98be70b60c3f99f531bfe83c023f65bbea3e76d5e33ea863f63e10c643bdcae24bbece820a3feed88dbeb8a60e3f20903bbf721ff53eaff42b3fe856d93e8b80ff3e4c3d6c3e2ae494be964a023f5b0ba1be9672043f47cc11bf8f93fa3ee0cc2a3e33dada3e301eea3e83ade5bdd7eb92be2233f93e43ec95be8e2af33e1b4445bf0ddaea3ebdac2d3ef7d0ce3e1f51e73edee566be687674bea12d0b3f26429abe4c2a0e3fd4a73abfd5daf23e32f0c83e924def3e2284fe3ea24716bedacc96bed509083fde4e96bea6430a3f6d9a28bf534fff3ea56e833e002fe03ef2c5013f8a6cee3eb0698fbeb11df13ecbaf90be5aa5e03e8b8824bfa06fdd3eb118fb3daec7c53ee528df3eeebf65bed1b58bbebacd063f7390a2be63ab0a3f5ca425bf4a3d013fd665c13ec5fbe23e1b66033f7013da3ec0e782be5132e63e12f686bef829de3e8b6d2fbf4b60d63ef2c00f3e659ebd3ef7e6d13ee10ed03bc67f0f3c45741d3bb0daf33b5e39df3b5ea9953be727893ba42d13bd2afdae3b0be0f93bb1a2223c6d45e43c17da55bcd75a0e3d3c3315bcc99f73bdf2b34dbcadb347bd863263bc4a6e89bc65b616bc4244fd3b32023f3b1d43d13bef37a53b98fb123d971b1a3bf6eadabc30f15c3b3ace153b58fa5e3b7a05a4bb52f3323cc3bd98bb9696313c5b19993dd413293c2e094bbd4e47113cdadf233cafd7603b4c1ed6b8dc521f3c63c984bb2103193c7aae9c3d50640e3ced2c4ebdb62fed3b1a9b0e3c219f31394773703bfbc3bf3b7ef7813bfaafed3b08ff4f3c1a058e3b8a7247bdf9f8a73bf284e93afd4eca3b7be13b3c7d27bfbbfcc3413c4cf90dbb59ecb0bce6f836bb338985bd3d3a69bbb96002bc4396583adeb88e3babe9f53a68c2a83b90d7433be703523c6c20dc3ad1f31fbd4769b63a3f9c4b39cd774bb8cae544bc5967cb3c06f107bcd6d0d53cd829333d41a6b93cf5f544bd1104bd3c12a1c53c72fd99ba591fc43a7149c53b53dd633bf1c42d3c6eaf003d6568a83bf17a49bdca33c53bdc83f73b32946e39579123bb74dd2b3c8bc165bb2f85a23cfb398f3d7e6c2a3c843b5dbde50f0b3c67941a3ca7533abc59089d3c36c3babbc0f7a53c898721bc100554bdb9474abc4f55c7ba098932bcc25b5fbc6748a13c343f8bbc081a2a3d8a2591bc6225273d8a724b3ec527363d916a51bd51a1213d32c6133ddd415638a4aafcbbd6b8933c8247e3bbe3c0a13cd7cc673dfbb0663c007a6bbd67177f3cf1ad763c427fd03b7768a6bbba29a73ca4c80dbc9809b43c10f9783da206b63cd5b072bd0066893cb03c933c7f80143b1ac112bba4cc573c177680bb84c6723c20c1ef3cb47e433c100043bd006b2f3cbc46403cc9ffbf3ab881703b80e7f13bb2c8863b27b70d3cf15c393d0375343b1fe28dbdcab2cf3b4103e63b15fbe6399b7a28bcc41a9c3ce82757bc40cc913c12ca8d3de1278f3cb9e22dbd6ac1833c5c308d3c2693b2b9146a553bc2da3f3ca5c9b4ba5377503c22ac393d17be303ce29c5cbdb02e213c9cd6fc3b6307d33b70b6a1bb10d6843cd567acbbdba95c3c5583423d8129633ceadfbcbc3d8c4c3c02ee513c71f21d3ba86b813b0ff95b3ca70c603b11131b3c4dbddc3bc8ecfc3bc7910bbd621dc43b5b669b3bdd3236ba1c6a22bcc7db8d3c52901ebccc0d933c6402343d20a3803cd9a321bdbf9e6d3cd0b8783c6ff7febdd72f8fbeb78f093f69ac9bbe0b810c3f90f323bf600c003fcd33bf3e9193e83e0d03003f72ace63eaa6c84be1f57e53eefd186bee796de3e844839bff336d23e99aa073e2f92ba3e47c2d03edf4bcb3e5cd19abe3ee8f53eada49cbeeeaae83e374019bf233ee43e4746553db2d1cb3e233fe33e8726e93eeee48dbe66b8f03ea2bc8fbee768e13e522b26bf6932dd3e52e3b83dc097c43e93acdb3ed827c43e6d8d99bef31df53eb44e9cbec848e93e8dd125bf457fe33e54d1693d7e8fcc3ebf9ce13e4dea28be3b2f83beeca6033f080f8fbe69fb133f473d39bfb70afd3ef5fa323fd174d63e7598ea3e46f87cbe51e663bee731013f1c40a3bee01a083f662637bf387f063fa480163f9417da3ede77fa3e4d7ee43ecca487be8528f23efe458abe3bb1e33e7e172abfe295e03e86b4053e7d58c33ee5a1dd3ea22de53e812d8fbe8a61f13e0ac292be3492e63ee25826bf714ae03ec7a1eb3d285ec83e21a0db3efdb0db3edb2980be5ee4db3e3b6f83becb59dc3e61d743bf9e39d03e0572563ed04fb53e7303cd3ed31b053f1a1461be3b24ed3edb938dbe66d4f53e67e320bfa5f7dc3ebea2963ecf62c13e3c6bd43ef01eb43eb41c5dbecd55e83e46ee89be0a1a013f087d27bfd2fafa3eaca30b3f476cd63ecab1d93e7e9725bc0aa6923b86da413c0203193b5e7e673c7d50253df6e9363ca01e4fbd3481453cef6ad83b800e913cd1685d3c107bbdbb18eb313c57317fbb8e55233debb5bdbb863567bd2850bcbbad13d4bb915a783b3cd9aebb70534f3c4e62d13bafe06f3cc4473d3dd4005d3caa5922bdde91503c94ce323c6c72c9ba0449b33b0e88c93b4cb2603b033fe73bb935dd3c42e8e03b53f621bd5561983b2efc753bf430caba702d4bbb00841f3cfb49713acf192e3cac562e3d260b0f3ceabc30bd7b25f53ba67a013c87a3e2bbaaf2263c37417a3c7484b63c0fd47d3c83f36f3df7c9d53bdb981dbdf112963b2295473ccdf5c1bb65a4423c3e59caba46581c3c9d6edebad1f5babc6cce85bba5329abddd9e61bb5af783bbf23a0d3b800ec9bbcf327c3cfb47c0bb7adc703c76c75f3d7a92623c127c1cbdc5e2503cebc76d3c65cde53bae1696bc308ee23c4855aebcb7a5dd3c1be8ab3d7244da3cc1c500bd7f44ce3c439bdb3cd47314bc56d26fbcc8d1c83c2ec16fbcd1b9d63c500b953da937be3ca49477bdfddbb53cfacab83ceca2813aa65c283b462b443b001b303b5219883b9b2c69bcd1fa3b3b6a4157bd25cff33a86d1733b7638723ca39df8bc31f0463d30040cbdb2063c3d17f85e3e7fff3f3dc244cdbc03b6353d22542e3d1d7dafba2e2490ba394d413c37a186ba2f70383c4d72613d5156d73ba180f4bcc1d2143c78010d3c4a3125bbe706003df3418dbca9bd033d0efa2fbc06d36cbd8f558ebceb3734bd801486bc0cddb4bc406612bc4227823c76d9b2bb53197d3c3df981390c2a883cc7d35fbbce8969bd179ad8ba4928cbbbe034ec3ae66c83bba9e3313c9215cabbcacb4f3ceed13c3dda43483c768817bd7f9e373cde071f3c98165cbb49c9e23b0661753be990bd372c4fe93bdf4acb3c5385823b24b868bdfa2d5c3b1ed5863b0019fabcfbc00c3d48f179bcfe80fe3c86da8dbc39f5a3bdc936abbc0d3891bcd834b3bc837dc8bcd4ed0b39513a9cbbec06a13c93209cbb59daad3cf298bc3df1e5a73cecabb7bc324f923c1ba7a33c6b6d19393bc7093c3050a33a3b71bb3bbd4aaf3b9742ad3aeb0eea3a193e37bd3377a73b8e7ec53ae00107bbd15c83ba0c6ff63b3b5e7ebace75e43be425843d61f4d73b05e2e6bce263a93bc565dd3b5a00b0bb6f1083bcedb9ef3c5a57a1bc699eee3cdff4443d633de73c655e39bddcd2d83c8d39dc3cc398ca3e30f799be5f55f33e35739cbe2ad7e73ecbdf1ebf6f40e23ee2a67e3d5f78ca3e55f2df3e843761bece365dbefb42003f64bc77bef20d103f607440bff1b4f53eca680b3f8539df3e0234f43e937096bec00968be8efa083f20d277beea60163fddf130bf06b8fe3e82a3183fd38aee3e92c0f83e3bd408bef2924dbec2e4013f19ec5cbe02d80a3f419c52bff40afe3e84dc143fe2fdd83e29a5cf3ef803ca3e1fab9dbe42c6f83e51c09ebe1e74eb3ef50b12bf7624e73e8676123d7719cf3ecab8e53e5eb7c63e7f5d98be361cf83e2bd69bbe5d9fec3e885722bf52cfe53eda77783db381cc3e28c6e23e3374eb3e21d688bef36ff73e7ef38abe84aaf13e6be51fbfa438e73ecc10703e2cb9c53e5b45db3eac07f73ef8af6cbe561dea3eccd584bef612fd3e5af929bf0f33e13e1156af3ef905cd3e3e60d23e461ac53e17749bbe00ebf23efe389dbe19e5e73eeded1fbf2cebe23e24843d3d12e3ca3e9fa3e03ef894283e365371be2621e63e7c137cbeb5bb053ffb7c39bf5e23f13e8ffb1f3fba81c33e1a22d63e2c2152be8ad160bea9c9093fb7d78abe9693143f72bd2ebf65d9033fca360f3f29dee43e622edb3ed02b52be21f498bea13f063f359c9dbe4282013fedee39bf9a96f93ee155693e017ed83e3bc5f43e3c7cd23b157c01bc7e4d513c87f017bc8a814a3c50586f3d79f9433ca6121fbd1f83353c3356403c558cbc3a889492bbd9f87c3ceedacabb514c6c3c1c71883dfd31693cd94165bd84e1563c9c2c633c99e71d3c623cf63c0ff7fdbbdf43023d0695ebbbb0c997bdb63d0cbc5f589cbc72303abc9db459bc5bafa03bad5e10bb5660f73bdcc52abbeeef133c1acf313dcaa2f03b21c216bd7d90e73b8bf3fb3b22b61c3cc686603d2e8789bc1d2a543defbdd5bc95240dbec970debc8b5c97bc527ad5bce64df7bc0242da3a826aa5bb7dcf4c3c1f06bbbbed98533c0837513dd80e463c641a39bd29263c3c2701403cce511d3a4960b53a0770df3b01385d3bbcc5d63bffc119b880c9b63bd29235bd6979a53b089fa23bcb94933b23be4abb0bed663c91f3873bd52a493cf181853d41b23d3ca3f032bd3bca323c79ae443c160176bbdc7390b90c90ca3b9ed10cbbd19afc3ba719f13cda02be3b8d495bbdd26fcb3b986ab93b1ee9fe3aafca30bb53910f3c6571cbb894f9443c1a9b583d53d2233cba7468bd2218353cb656043c33f7823b443281395f5f713ceb62153ad8ba4d3cd4e85f3d2a3b2b3c583b81bd26e3e33b301c1c3ce4f8be3b592629bb97fa283cef297dbb3aea443c0713373d7b733e3c7a7d06bd943d173c1d9d123ccacd4fbb259aef3cac93523c7170c73c8fff193c819dc93c9f3aef3bd2ef42bd71560d3c9fcb3cbaa084ea3cf5eb293d313ea83ab2a47e3d4ea4713c639f9ebc53d7383cc9f23abc912fa6b9c37fbfba2f5c9c3cf2435bbce756093d8dba75bc7472133d8371153e8f580e3df2a9efbc49b90a3dc2dcda3c89499cba2e1a35bb57957f3c8f919bbbd3a7803ce376303d2a46583c823910bdd59b4b3c41db153ca3245c3b5846893cd9bdd33aed3a8a3cea89583bfc1ba3bc04d0e3b9daf306bd3ad9fdba4bd896bb9dcd6a3bd53faabb5a18553c33e7adbbb04c4a3ce84a643dfe4c3b3cdceb0cbd9119273c58862f3c070a24bb0d62643c955fc1bb466b4e3c78d2a5bb6c98b53c5af18bbb2d2c4fbd14b3d8bb4af60abcd7999f3be7722bbb5628843c9ef3c2bbca4aa23ce11ba43d9de27e3c3ecc54bd07aa7f3c3b3c753c50ed473d1cc8b03ca4ae6fbca0e0ab3c6f8e68bc0b89c2bc709672bc8e96efbcad4884bc197e8cbc88ad0abb1e84a2bbfa37913c4fed96bb095b813c7cdd893d585a803c22fd07bdb11f593c44bf623c8126713fe675cabf69320240baa8d3bf09da0e407fa3a3bfffd00340ba7c15c0ed5ef73ffc72f73f7e93713f0e20cabff9a00540fd5fbfbfd2331040ad5f9dbf1a5f0540e7b31cc07341fa3fccadfc3fbf516bbe49806b3f7352b2bf8ed8843fb13fdabf85b205306eefc8bfd21f6c3f5901abbf05c0a6bf7c29adbe5b6f7c3f1ec0d3bfb1228b3f86f0dfbf7abe103ea357d4bf9bf1743fea1da1bf818ca8bf62c7b7bd4cde883fe0f3dcbf631c903fda1f03c05974bb2e3a86dabf7ef1793f5253abbfc846c1bf90be6cbea4208d3f01acdabf48ff983f7c3f06c0a786392e77e1d3bf4598853f2839adbf0c11c8bf800f5cbe6546673f99f4b3bf94f88a3f65a8e3bf60833a30ce94ccbf7344703fb92d8dbfa461a3bf7c3cb0be6d96353f448baabf220c4a3f9221b7bf1d15b2306e6f9cbf8e9b3d3f29fe84bf77d187bf0ca1c2be4005593faa5dcabf4de9683f0d7dd0bf0e34ae304cdcbebfe07d543f528799bfaa14b0bf8e2dcebefab07d3f7cafd3bffcd1833f5f70efbf1131902ed667bebfbdae763f5632a8bfcf1aa9bfc9989040e9b06dc0f01eb6401a0570c03bf6ad40c896d7c03634aa40b06a80c00ab49840326aa840b13f71bf87ec15401fb2f0bf0caf3040caf509c018e9f63f3cb8f8bf8e1ded3f0322ebbf6ba5e3bf28a371bf
+\ No newline at end of file
+diff --git a/gcc/opts-common.cc b/gcc/opts-common.cc
+index 176041bfe..35db76b84 100644
+--- a/gcc/opts-common.cc
++++ b/gcc/opts-common.cc
+@@ -27,6 +27,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "diagnostic.h"
+ #include "spellcheck.h"
+ #include "opts-jobserver.h"
++#include "ai4c-infer.h"
+ 
+ static void prune_options (struct cl_decoded_option **, unsigned int *,
+ 			   unsigned int);
+@@ -992,71 +993,6 @@ opts_concat (const char *first, ...)
+   return newstr;
+ }
+ 
+-typedef int64_t (*run_ai_model_func)(int, const char **,
+-				     const char *, int, int64_t *);
+-#define PTR_UNION_TYPE(TOTYPE) union { void *_q; TOTYPE _nq; }
+-#define PTR_UNION_AS_VOID_PTR(NAME) (NAME._q)
+-#define PTR_UNION_AS_CAST_PTR(NAME) (NAME._nq)
+-
+-static int64_t
+-ai_infer_optimization (int argc, const char **argv,
+-		       const char *mcpu_option,
+-		       int argc_hw, int64_t *argv_hw)
+-{
+-  /* Load dependent AI-framework libraries.  */
+-  void *onnxruntime_lib_handle = NULL;
+-  const char *onnxruntime_lib_path = "libonnxruntime.so";
+-
+-  onnxruntime_lib_handle = dlopen (onnxruntime_lib_path,
+-				   RTLD_LAZY | RTLD_GLOBAL);
+-  if (!onnxruntime_lib_handle)
+-    {
+-      return -1;
+-    }
+-
+-  void *ai4c_lib_handle = NULL;
+-  const char *ai4c_lib_path = "libONNXRunner.so";
+-
+-  ai4c_lib_handle = dlopen (ai4c_lib_path, RTLD_LAZY | RTLD_GLOBAL);
+-  if (!ai4c_lib_handle)
+-    {
+-      return -1;
+-    }
+-
+-  /* Clear any existing error.  */
+-  dlerror ();
+-
+-  /* Run AI4Compiler model.  */
+-  if (ai4c_lib_handle == NULL || onnxruntime_lib_handle == NULL)
+-    {
+-      return -1;
+-    }
+-
+-  run_ai_model_func run_ai_model;
+-  PTR_UNION_TYPE (run_ai_model_func) run_ai_model_func_union;
+-  PTR_UNION_AS_VOID_PTR (run_ai_model_func_union)
+-    = dlsym (ai4c_lib_handle, "runONNXModelOptimizer");
+-  run_ai_model = PTR_UNION_AS_CAST_PTR (run_ai_model_func_union);
+-  if (!run_ai_model)
+-    {
+-      dlclose (ai4c_lib_handle);
+-      dlclose (onnxruntime_lib_handle);
+-      return -1;
+-    }
+-  int64_t model_pred = (*run_ai_model) (argc, argv,
+-					mcpu_option, argc_hw, argv_hw);
+-
+-  if (ai4c_lib_handle)
+-    dlclose (ai4c_lib_handle);
+-
+-  if (onnxruntime_lib_handle)
+-    dlclose (onnxruntime_lib_handle);
+-
+-  if (model_pred == 1)
+-    setenv ("AI_INFER_LEVEL", "1", 1);
+-  return model_pred;
+-}
+-
+ static int
+ handle_lto_option (unsigned int lang_mask,
+ 		   unsigned int num_decoded_options,
+@@ -1132,12 +1068,12 @@ handle_machine_option (unsigned int lang_mask,
+     global_options.x_param_l2_cache_size,
+     global_options.x_param_prefetch_latency,
+     global_options.x_param_ipa_prefetch_distance_factor};
+-  int64_t output_pred = ai_infer_optimization (
++  int64_t output_pred = get_optimize_decision_from_optimizer (
+ 			  argc, argv, "hip09", argc_hw, argv_hw);
++  if (output_pred == 1)
++    return output_pred;
+   if (output_pred != 1)
+-    {
+-      return ret;
+-    }
++    return ret;
+ 
+   return handle_lto_option (lang_mask, num_decoded_options,
+ 			    argc, argv, opt_array);
+diff --git a/gcc/opts-global.cc b/gcc/opts-global.cc
+index e684bc5e3..843ace666 100644
+--- a/gcc/opts-global.cc
++++ b/gcc/opts-global.cc
+@@ -312,7 +312,10 @@ decode_options (struct gcc_options *opts, struct gcc_options *opts_set,
+ 		  global_options.x_param_prefetch_latency,
+ 		  global_options.x_param_ipa_prefetch_distance_factor);
+   const char *tune_native = getenv ("GCC_AI4C_TUNE_INFO");
+-  prepare_native_tune_str (tune_native);
++  if (tune_native != nullptr)
++    {
++      prepare_native_tune_str (tune_native);
++    }
+ 
+   struct cl_option_handlers handlers;
+ 
+-- 
+2.33.0
+
diff --git a/0327-Bugfix-Adjust-the-same-gate-to-use-struct-option.patch b/0327-Bugfix-Adjust-the-same-gate-to-use-struct-option.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6fd98eccfd36b6c27fda58bfcf2b09e198d56e53
--- /dev/null
+++ b/0327-Bugfix-Adjust-the-same-gate-to-use-struct-option.patch
@@ -0,0 +1,81 @@
+From 861ddfd90d86215a573a7614f49d572f1e03be6f Mon Sep 17 00:00:00 2001
+From: huang-xiaoquan 
+Date: Mon, 16 Dec 2024 11:34:06 +0800
+Subject: [PATCH] [Bugfix] Adjust the same gate to use struct option
+
+---
+ gcc/gimple-ssa-warn-access.cc | 7 ++++++-
+ gcc/ipa-free-lang-data.cc     | 5 +++--
+ gcc/symbol-summary.h          | 8 +++++++-
+ 3 files changed, 16 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/gimple-ssa-warn-access.cc b/gcc/gimple-ssa-warn-access.cc
+index a24645783..3d80590ee 100644
+--- a/gcc/gimple-ssa-warn-access.cc
++++ b/gcc/gimple-ssa-warn-access.cc
+@@ -56,6 +56,9 @@
+ #include "attr-fnspec.h"
+ #include "pointer-query.h"
+ 
++/* Check whether in C language or LTO with only C language.  */
++extern bool lang_c_p (void);
++
+ /* Return true if tree node X has an associated location.  */
+ 
+ static inline location_t
+@@ -2198,7 +2201,9 @@ pass_waccess::gate (function *)
+      In pass waccess, it will traverse all SSA and cause ICE
+      when handling these unused SSA.  So temporarily disable
+      pass waccess when enable structure optimizations.  */
+-  if (flag_ipa_struct_reorg)
++  if (optimize >= 3 && flag_ipa_struct_reorg && !seen_error ()
++      && flag_lto_partition == LTO_PARTITION_ONE && lang_c_p ()
++      && (in_lto_p || flag_whole_program))
+     return false;
+ 
+   return (warn_free_nonheap_object
+diff --git a/gcc/ipa-free-lang-data.cc b/gcc/ipa-free-lang-data.cc
+index 801e95cea..17e3f43b3 100644
+--- a/gcc/ipa-free-lang-data.cc
++++ b/gcc/ipa-free-lang-data.cc
+@@ -108,8 +108,9 @@ fld_simplified_type_name (tree type)
+   /* Simplify type will cause that struct A and struct A within
+      struct B are different type pointers, so skip it in structure
+      optimizations.  */
+-  if (flag_ipa_struct_reorg && lang_c_p ()
+-      && flag_lto_partition == LTO_PARTITION_ONE)
++  if (optimize >= 3 && flag_ipa_struct_reorg && !seen_error ()
++      && flag_lto_partition == LTO_PARTITION_ONE && lang_c_p ()
++      && (in_lto_p || flag_whole_program))
+     return TYPE_NAME (type);
+ 
+   if (!TYPE_NAME (type) || TREE_CODE (TYPE_NAME (type)) != TYPE_DECL)
+diff --git a/gcc/symbol-summary.h b/gcc/symbol-summary.h
+index 4f896f4e4..06a1c7fff 100644
+--- a/gcc/symbol-summary.h
++++ b/gcc/symbol-summary.h
+@@ -21,6 +21,10 @@ along with GCC; see the file COPYING3.  If not see
+ #ifndef GCC_SYMBOL_SUMMARY_H
+ #define GCC_SYMBOL_SUMMARY_H
+ 
++#include "diagnostic.h"
++/* Check whether in C language or LTO with only C language.  */
++extern bool lang_c_p (void);
++
+ /* Base class for function_summary and fast_function_summary classes.  */
+ 
+ template 
+@@ -109,7 +113,9 @@ protected:
+ 			     : m_allocator.allocate ();
+     /* In structure optimizatons, we call memset to ensure that
+        the allocated memory is initialized to 0.  */
+-    if (flag_ipa_struct_reorg)
++    if (optimize >= 3 && flag_ipa_struct_reorg && !seen_error ()
++	&& flag_lto_partition == LTO_PARTITION_ONE && lang_c_p ()
++	&& (in_lto_p || flag_whole_program))
+       memset (allocated, 0, sizeof (T));
+     return allocated;
+   }
+-- 
+2.33.0
+
diff --git a/0328-Bugfix-if-split-Added-checking-for-ssa_name.patch b/0328-Bugfix-if-split-Added-checking-for-ssa_name.patch
new file mode 100644
index 0000000000000000000000000000000000000000..840d32e95b00eba1757fc33f51d39b4f39a3fb5d
--- /dev/null
+++ b/0328-Bugfix-if-split-Added-checking-for-ssa_name.patch
@@ -0,0 +1,248 @@
+From e2896c73513cfb2dfd7c4796effef6ba06396f35 Mon Sep 17 00:00:00 2001
+From: Zinin Ivan WX1305386 
+Date: Fri, 13 Dec 2024 10:52:08 +0300
+Subject: [PATCH] [Bugfix][if-split] Added checking for ssa_name
+
+Added checking if part of OR complex condition is really an ssa
+name in necessary_complex_cond_p(). Besides, made var_n_cst
+structure, which replaced var_cst and cond_parts_defs structures.
+---
+ gcc/gimple-if-split.cc | 123 +++++++++++++++++++----------------------
+ 1 file changed, 56 insertions(+), 67 deletions(-)
+
+diff --git a/gcc/gimple-if-split.cc b/gcc/gimple-if-split.cc
+index 351515435..914b65d47 100644
+--- a/gcc/gimple-if-split.cc
++++ b/gcc/gimple-if-split.cc
+@@ -61,14 +61,14 @@ Example:
+ //-------------------------------------------------------------------------
+ /* Check if arg list of call got n.  */
+ bool
+-got_in_args_p (gimple* call, tree n)
++got_in_args_p (gimple *call, tree n)
+ {
+   unsigned num_args = gimple_call_num_args (call);
+ 
+   for (int i = 0; i < num_args; i++)
+     {
+       if (n == gimple_call_arg (call, i))
+-  return true;
++	return true;
+     }
+ 
+   return false;
+@@ -142,19 +142,24 @@ bb_got_necessary_call_p (basic_block bb, tree n, unsigned nesting)
+ //-------------------------------------------------------------------------
+ // Complex conditions
+ //-------------------------------------------------------------------------
+-/* Auxiliary struct which contains var and its constant of comaprison
+- * of expr: n == cst.  */
+-struct var_const
++/* Auxiliary struct which contains tree nodes of such statements:
++ * var = (n == cst). Actually in some cases var field can be not an ssa
++ * name and/or it's rhs can be not "comparison with constant"
++ * expression.  However, we need to fill var field of such structures
++ * in necessary_complex_cond_p () to build/change conditions in
++ * process_complex_cond ().  */
++struct var_n_const
+ {
++  tree var = NULL_TREE;
+   tree n = NULL_TREE;
+   tree cst = NULL_TREE;
+ };
+ 
+ /* Check if var_def stmt got this pattern:
+  *    var = (n == const);
+- * If it does, we need to set var_cst struct.  */
++ * If it does, we need to set n and cst in var_n_cst struct.  */
+ static bool
+-comp_with_const_p (gimple *var_def, var_const *var_cst)
++comp_with_const_p (gimple *var_def, var_n_const *var_n_cst)
+ {
+   if (gimple_expr_code (var_def) != EQ_EXPR)
+     return false;
+@@ -164,33 +169,24 @@ comp_with_const_p (gimple *var_def, var_const *var_cst)
+   if (TREE_CODE (var_def_rhs2) != INTEGER_CST)
+     return false;
+ 
+-  var_cst->n = gimple_assign_rhs1 (var_def);
+-  var_cst->cst = var_def_rhs2;
++  var_n_cst->n = gimple_assign_rhs1 (var_def);
++  var_n_cst->cst = var_def_rhs2;
+ 
+   return true;
+ }
+ 
+-/* Auxiliary struct which contains defenition of each part of
+- * complex condition, like:
+- *    a = ... <- a_def
+- *    b = ... <- b_def
+- *    c = a | b  <- complex_cond.  */
+-struct cond_parts_defs
+-{
+-  gimple *a_def = NULL;
+-  gimple *b_def = NULL;
+-};
+-
+ /* Check if cond got this pattern:
+  *    a = ...; <- a_def
+  *    b = ...; <- b_def
+- *    c = a | b;
++ *    c = a | b; <- c_def
+  *    if (c != 0)
+- * and a_def or b_def is comparison with constant.  If it does,
+- * we need to set a with a_def and b with b_def.  */
++ * and a_def or b_def is comparison with constant.
++ * If it does, we need to set a_var_n_cst and b_var_n_cst.
++ * Also set a_var_n_cst->var as gimple_assign_rhs1 (c_def),
++ * b_var_n_cst->var as gimple_assign_rhs2 (c_def).  */
+ static bool
+ necessary_complex_cond_p (const gimple *cond, basic_block then_bb,
+-			  cond_parts_defs *defs)
++			  var_n_const *a_var_n_cst, var_n_const *b_var_n_cst)
+ {
+   tree lhs = gimple_cond_lhs (cond);
+   tree rhs = gimple_cond_rhs (cond);
+@@ -207,27 +203,25 @@ necessary_complex_cond_p (const gimple *cond, basic_block then_bb,
+       || gimple_expr_code (c_def) != BIT_IOR_EXPR)
+     return false;
+ 
+-  tree a_var = gimple_assign_rhs1 (c_def);
+-  tree b_var = gimple_assign_rhs2 (c_def);
+-  gimple *a_def = SSA_NAME_DEF_STMT (a_var);
+-  gimple *b_def = SSA_NAME_DEF_STMT (b_var);
+-
+-  if (!a_def || !is_gimple_assign (a_def) || !b_def
+-      || !is_gimple_assign (b_def))
+-    return false;
+-
+-  var_const var_cst;
+-
+-  if (!(comp_with_const_p (a_def, &var_cst)
+-	&& bb_got_necessary_call_p (then_bb, var_cst.n, SCALAR_NESTING))
+-      && !(comp_with_const_p (b_def, &var_cst)
+-	   && bb_got_necessary_call_p (then_bb, var_cst.n, SCALAR_NESTING)))
+-    return false;
+-
+-  defs->a_def = a_def;
+-  defs->b_def = b_def;
+-
+-  return true;
++  bool result = false;
++
++  gimple *a_def;
++  a_var_n_cst->var = gimple_assign_rhs1 (c_def);
++  if (TREE_CODE (a_var_n_cst->var) == SSA_NAME
++      && (a_def = SSA_NAME_DEF_STMT (a_var_n_cst->var))
++      && is_gimple_assign (a_def) && comp_with_const_p (a_def, a_var_n_cst)
++      && bb_got_necessary_call_p (then_bb, a_var_n_cst->n, SCALAR_NESTING))
++    result = true;
++
++  gimple *b_def;
++  b_var_n_cst->var = gimple_assign_rhs2 (c_def);
++  if (TREE_CODE (b_var_n_cst->var) == SSA_NAME
++      && (b_def = SSA_NAME_DEF_STMT (b_var_n_cst->var))
++      && is_gimple_assign (b_def) && comp_with_const_p (b_def, b_var_n_cst)
++      && bb_got_necessary_call_p (then_bb, b_var_n_cst->n, SCALAR_NESTING))
++    result = true;
++
++  return result;
+ }
+ 
+ /* Check if our complex condition seems to be "necessary"
+@@ -253,11 +247,10 @@ process_complex_cond (basic_block cond_bb, basic_block then_bb,
+ 		      basic_block else_bb)
+ {
+   gimple *cond = last_stmt (cond_bb);
+-  cond_parts_defs defs;
++  var_n_const a_var_n_cst, b_var_n_cst;
+ 
+-  if (!can_duplicate_block_p (then_bb)
+-      || !single_succ_p (then_bb)
+-      || !necessary_complex_cond_p (cond, then_bb, &defs))
++  if (!can_duplicate_block_p (then_bb) || !single_succ_p (then_bb)
++      || !necessary_complex_cond_p (cond, then_bb, &a_var_n_cst, &b_var_n_cst))
+     return;
+ 
+   if (dump_file && (dump_flags & TDF_DETAILS))
+@@ -267,19 +260,16 @@ process_complex_cond (basic_block cond_bb, basic_block then_bb,
+       print_gimple_stmt (dump_file, cond, 0, TDF_NONE);
+     }
+ 
+-  var_const var_cst;
+-
+   /* Setting cond.  */
+-  if (comp_with_const_p (defs.a_def, &var_cst))
+-      /* Setting cond as: if (n == const).  */
+-      gimple_cond_set_condition (as_a (cond), EQ_EXPR, var_cst.n,
+-					var_cst.cst);
++  if (a_var_n_cst.n != NULL_TREE && a_var_n_cst.cst != NULL_TREE)
++    /* Setting cond as: if (n == const).  */
++    gimple_cond_set_condition (as_a (cond), EQ_EXPR, a_var_n_cst.n,
++			       a_var_n_cst.cst);
+   else
+     {
+       /* Setting cond as: if (a != 0).  */
+-      tree cond_lhs = gimple_assign_lhs (defs.a_def);
+-      gimple_cond_set_condition (as_a (cond), NE_EXPR, cond_lhs,
+-      					build_zero_cst (TREE_TYPE (cond_lhs)));
++      gimple_cond_set_condition (as_a (cond), NE_EXPR, a_var_n_cst.var,
++				 build_zero_cst (TREE_TYPE (a_var_n_cst.var)));
+     }
+   update_stmt (cond);
+ 
+@@ -290,19 +280,18 @@ process_complex_cond (basic_block cond_bb, basic_block then_bb,
+ 
+   /* Setting inner_cond.  */
+   gcond *inner_cond = NULL;
+-  if (comp_with_const_p (defs.b_def, &var_cst))
++  if (b_var_n_cst.n != NULL_TREE && b_var_n_cst.cst != NULL_TREE)
+     {
+-      /* Setting inner cond as: if (b == const).  */
+-      inner_cond = gimple_build_cond (EQ_EXPR, var_cst.n, var_cst.cst,
++      /* Setting inner cond as: if (n == const).  */
++      inner_cond = gimple_build_cond (EQ_EXPR, b_var_n_cst.n, b_var_n_cst.cst,
+ 				      NULL_TREE, NULL_TREE);
+     }
+   else
+     {
+       /* Setting inner cond as: if (b != 0).  */
+-      tree inner_cond_lhs = gimple_assign_lhs (defs.b_def);
+       inner_cond = gimple_build_cond (
+-	  NE_EXPR, inner_cond_lhs, build_zero_cst (TREE_TYPE (inner_cond_lhs)),
+-	  NULL_TREE, NULL_TREE);
++	  NE_EXPR, b_var_n_cst.var,
++	  build_zero_cst (TREE_TYPE (b_var_n_cst.var)), NULL_TREE, NULL_TREE);
+     }
+   gimple_stmt_iterator gsi = gsi_last_bb (inner_cond_bb);
+   gsi_insert_after (&gsi, inner_cond, GSI_NEW_STMT);
+@@ -362,7 +351,7 @@ make_two_separate_calls (basic_block outer_cond_bb, basic_block inner_cond_bb,
+   if (single_succ (then_bb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
+     {
+       gcc_assert (gimple_code (last_stmt (then_bb)) == GIMPLE_RETURN);
+-      ret_val = gimple_return_retval (as_a(last_stmt (then_bb)));
++      ret_val = gimple_return_retval (as_a (last_stmt (then_bb)));
+ 
+       then_bb_succ_edge_flags = single_succ_edge (then_bb)->flags;
+     }
+@@ -373,7 +362,7 @@ make_two_separate_calls (basic_block outer_cond_bb, basic_block inner_cond_bb,
+    * if now merge_bb is pred of EXIT_BLOCK.  */
+   if (single_succ (merge_bb) == EXIT_BLOCK_PTR_FOR_FN (cfun))
+     {
+-      gimple* ret = gimple_build_return (ret_val);
++      gimple *ret = gimple_build_return (ret_val);
+       gimple_stmt_iterator gsi = gsi_last_bb (merge_bb);
+       gsi_insert_after (&gsi, ret, GSI_NEW_STMT);
+ 
+@@ -400,7 +389,7 @@ make_two_separate_calls (basic_block outer_cond_bb, basic_block inner_cond_bb,
+ 			   single_succ (merge_bb));
+ 
+   if (get_immediate_dominator (CDI_POST_DOMINATORS, outer_cond_bb) == then_bb)
+-     set_immediate_dominator (CDI_POST_DOMINATORS, outer_cond_bb, merge_bb);
++    set_immediate_dominator (CDI_POST_DOMINATORS, outer_cond_bb, merge_bb);
+ 
+   return then_bb1;
+ }
+-- 
+2.33.0
+
diff --git a/0329-Fixed-work-with-loops-in-process_complex_cond.patch b/0329-Fixed-work-with-loops-in-process_complex_cond.patch
new file mode 100644
index 0000000000000000000000000000000000000000..510544adbb3bf9b4f188096d469522545016e942
--- /dev/null
+++ b/0329-Fixed-work-with-loops-in-process_complex_cond.patch
@@ -0,0 +1,90 @@
+From 66e1c68b47a1fd889e206be5572a2ba5d62afb4d Mon Sep 17 00:00:00 2001
+From: Zinin Ivan WX1305386 
+Date: Tue, 17 Dec 2024 22:07:36 +0800
+Subject: [PATCH] [if-split][BugFix]Fixed work with loops in
+ process_complex_cond()
+
+Signed-off-by: zhenyu--zhao_admin 
+---
+ gcc/gimple-if-split.cc        | 17 +++++++++++++++--
+ gcc/tree-loop-distribution.cc |  6 ++++++
+ gcc/tree-vect-loop.cc         |  4 ----
+ 3 files changed, 21 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/gimple-if-split.cc b/gcc/gimple-if-split.cc
+index 914b65d4782..b535ffab1c3 100644
+--- a/gcc/gimple-if-split.cc
++++ b/gcc/gimple-if-split.cc
+@@ -262,9 +262,11 @@ process_complex_cond (basic_block cond_bb, basic_block then_bb,
+ 
+   /* Setting cond.  */
+   if (a_var_n_cst.n != NULL_TREE && a_var_n_cst.cst != NULL_TREE)
+-    /* Setting cond as: if (n == const).  */
+-    gimple_cond_set_condition (as_a (cond), EQ_EXPR, a_var_n_cst.n,
++    {
++      /* Setting cond as: if (n == const).  */
++      gimple_cond_set_condition (as_a (cond), EQ_EXPR, a_var_n_cst.n,
+ 			       a_var_n_cst.cst);
++    }
+   else
+     {
+       /* Setting cond as: if (a != 0).  */
+@@ -276,8 +278,19 @@ process_complex_cond (basic_block cond_bb, basic_block then_bb,
+   /* Creating inner_cond_bb.  */
+   edge then_e = find_edge (cond_bb, then_bb);
+   edge else_e = find_edge (cond_bb, else_bb);
++
++  bool inner_cond_bb_need_set_loop = false;
++  if (else_e->dest->loop_father != else_e->src->loop_father)
++	inner_cond_bb_need_set_loop = true;
++
+   basic_block inner_cond_bb = split_edge (else_e);
+ 
++  if (inner_cond_bb_need_set_loop)
++    {
++	remove_bb_from_loops (inner_cond_bb);
++	add_bb_to_loop (inner_cond_bb, cond_bb->loop_father);
++    }
++
+   /* Setting inner_cond.  */
+   gcond *inner_cond = NULL;
+   if (b_var_n_cst.n != NULL_TREE && b_var_n_cst.cst != NULL_TREE)
+diff --git a/gcc/tree-loop-distribution.cc b/gcc/tree-loop-distribution.cc
+index 8d118e98739..f7a4690246c 100644
+--- a/gcc/tree-loop-distribution.cc
++++ b/gcc/tree-loop-distribution.cc
+@@ -5265,10 +5265,16 @@ loop_distribution::execute (function *fun)
+ 
+ 	  bool destroy_p;
+ 	  int nb_generated_loops, nb_generated_calls;
++
++	  vect_slp_init ();
++
+ 	  nb_generated_loops
+ 	    = distribute_loop (loop, work_list, cd, &nb_generated_calls,
+ 			       &destroy_p, (!optimize_loop_for_speed_p (loop)
+ 					    || !flag_tree_loop_distribution));
++
++	  vect_slp_fini ();
++
+ 	  if (destroy_p)
+ 	    loops_to_be_destroyed.safe_push (loop);
+ 
+diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
+index f296e9415c4..7f75779519a 100644
+--- a/gcc/tree-vect-loop.cc
++++ b/gcc/tree-vect-loop.cc
+@@ -3016,10 +3016,6 @@ vect_analyze_loop (class loop *loop, vec_info_shared *shared,
+   opt_loop_vec_info first_loop_vinfo = opt_loop_vec_info::success (NULL);
+   /* Loop_vinfo for loop-distribution pass.  */
+   opt_loop_vec_info fail_loop_vinfo = opt_loop_vec_info::success (NULL);
+-  if (result_only_p)
+-  {
+-     vect_slp_init ();
+-  }
+   unsigned int mode_i = 0;
+   unsigned HOST_WIDE_INT simdlen = loop->simdlen;
+ 
+-- 
+Gitee
+
diff --git a/0330-bugfix-fix-typo-error.patch b/0330-bugfix-fix-typo-error.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4f12d799d4ce38de44955926b1a1900659087295
--- /dev/null
+++ b/0330-bugfix-fix-typo-error.patch
@@ -0,0 +1,25 @@
+From 843b7577b5b255806978f338f6f99863693509d6 Mon Sep 17 00:00:00 2001
+From: Mingchuan Wu 
+Date: Wed, 18 Dec 2024 10:10:30 +0800
+Subject: [PATCH] [bugfix] fix typo error.
+
+---
+ gcc/opts-common.cc | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/gcc/opts-common.cc b/gcc/opts-common.cc
+index 35db76b84..ee94723fc 100644
+--- a/gcc/opts-common.cc
++++ b/gcc/opts-common.cc
+@@ -1070,8 +1070,6 @@ handle_machine_option (unsigned int lang_mask,
+     global_options.x_param_ipa_prefetch_distance_factor};
+   int64_t output_pred = get_optimize_decision_from_optimizer (
+ 			  argc, argv, "hip09", argc_hw, argv_hw);
+-  if (output_pred == 1)
+-    return output_pred;
+   if (output_pred != 1)
+     return ret;
+ 
+-- 
+2.33.0
+
diff --git a/0331-fix-function-missing-return-value.patch b/0331-fix-function-missing-return-value.patch
new file mode 100644
index 0000000000000000000000000000000000000000..003760b722db473269c431d10be7be12c231f5ff
--- /dev/null
+++ b/0331-fix-function-missing-return-value.patch
@@ -0,0 +1,25 @@
+From 8d8dff2b18de8149b4f9f03968abd1b6f4b8cc69 Mon Sep 17 00:00:00 2001
+From: rfwang07 
+Date: Sat, 21 Dec 2024 18:29:10 +0800
+Subject: [PATCH] fix: function missing return value
+
+---
+ gcc/final.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/final.cc b/gcc/final.cc
+index e4bfceabc..0252250ba 100644
+--- a/gcc/final.cc
++++ b/gcc/final.cc
+@@ -4443,7 +4443,7 @@ dump_direct_callee_info_to_asm (basic_block bb, gcov_type call_count)
+ }
+ 
+ /* Dump the edge info into asm.    */
+-static int
++static void
+ dump_edge_jump_info_to_asm (basic_block bb, gcov_type bb_count)
+ {
+   edge e;
+-- 
+2.39.5 (Apple Git-154)
+
diff --git a/0332-Bugfix-Can-not-find-fdata-file.patch b/0332-Bugfix-Can-not-find-fdata-file.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e9d81a4add22e8bcdc7fc825872355aa3d6ac343
--- /dev/null
+++ b/0332-Bugfix-Can-not-find-fdata-file.patch
@@ -0,0 +1,195 @@
+From e0b6e2e9d8226881886c28826a2fe2e55fd29511 Mon Sep 17 00:00:00 2001
+From: zhenyu--zhao_admin 
+Date: Sun, 29 Dec 2024 14:11:14 +0800
+Subject: [PATCH] [Bugfix] Can not find fdata file.
+
+---
+ gcc/ai-optimizer.cc | 49 ++++++++++++++++++++++++++++++++++++---------
+ gcc/ai4c-infer.cc   | 28 ++++----------------------
+ gcc/gcc.cc          | 31 +++++++++++++++++++++++++---
+ gcc/gcc.h           |  1 +
+ 4 files changed, 73 insertions(+), 36 deletions(-)
+
+diff --git a/gcc/ai-optimizer.cc b/gcc/ai-optimizer.cc
+index c3d99dd85..70ff24077 100644
+--- a/gcc/ai-optimizer.cc
++++ b/gcc/ai-optimizer.cc
+@@ -284,19 +284,50 @@ static int
+ graph_infer (int argc1, const char **argv1, const char *mops,
+              int argc2, int64_t *argv2)
+ {
+-  char *gcc_exec_prefix = getenv ("ONNX_FDATA_PATH");
+-  if (gcc_exec_prefix == NULL)
++  char gcc_exec_prefix[512];
++  ssize_t len = readlink ("/proc/self/exe", gcc_exec_prefix,
++  			  sizeof (gcc_exec_prefix) - 1);
++  if (len == -1)
+     return 0;
+-  char native_file[512];
+ 
+-  if (gcc_exec_prefix)
++  char native_file[512];
++  strncpy (native_file, gcc_exec_prefix, sizeof (native_file) - 1);
++  const char *target = "bin/gcc";
++  const char *target_cc1 = "cc1";
++  const char *target_gpp = "bin/g++";
++  const char *target_cc1plus = "cc1plus";
++  const char *target_gfortran = "bin/gfortran";
++  const char *target_f951 = "f951";
++  const char *replacement = "../libexec/gcc/optimizer.fdata";
++  const char *replacement_front_end = "../../optimizer.fdata";
++
++  /* Replace the part of the current executable file path after the last slash
++     to locate the model file.  */
++  if (strstr (native_file, target) != NULL ||
++      strstr (native_file, target_gpp) != NULL ||
++      strstr (native_file, target_gfortran) != NULL)
+     {
+-      const char *onnx_fdata = "optimizer.fdata";
+-      strncpy (native_file, gcc_exec_prefix, sizeof (native_file) - 1);
+-      native_file[sizeof (native_file) - 1] = '\0';
+       char *last_slash = strrchr (native_file, '/');
+-      if (last_slash)
+-	strcpy (last_slash + 1, onnx_fdata);
++      if (last_slash != NULL)
++	{
++	  size_t prefix_len = last_slash - native_file + 1;
++	  native_file[prefix_len] = '\0';
++	  strncat (native_file, replacement, sizeof (native_file) -
++		   strlen (native_file) - 1);
++	}
++    }
++  else if (strstr (native_file, target_cc1) != NULL ||
++	   strstr (native_file, target_cc1plus) != NULL ||
++	   strstr (native_file, target_f951) != NULL)
++    {
++      char *last_slash = strrchr (native_file, '/');
++      if (last_slash != NULL)
++	{
++	  size_t prefix_len = last_slash - native_file + 1;
++	  native_file[prefix_len] = '\0';
++	  strncat (native_file, replacement_front_end, sizeof (native_file) -
++		   strlen (native_file) - 1);
++	}
+     }
+ 
+   if (access (native_file, F_OK) == 0)
+diff --git a/gcc/ai4c-infer.cc b/gcc/ai4c-infer.cc
+index 42922e1ca..4cd4bfb00 100644
+--- a/gcc/ai4c-infer.cc
++++ b/gcc/ai4c-infer.cc
+@@ -333,29 +333,11 @@ preprocess (int argc, int64_t *argv, int64_t *in_modes)
+ static int
+ graph_infer (int argc, const char *argv, int argc2, int64_t *argv2)
+ {
+-  char *gcc_exec_prefix = getenv ("ONNX_FDATA_PATH");
+-  if (gcc_exec_prefix == NULL)
+-    return 0;
+-  char file_name[512];
+-
+-  if (gcc_exec_prefix)
+-    {
+-      const char *onnx_fdata = "onnx.fdata";
+-      strncpy (file_name, gcc_exec_prefix, sizeof (file_name) - 1);
+-      file_name[sizeof (file_name) - 1] = '\0';
+-      char *last_slash = strrchr (file_name, '/');
+-      if (last_slash)
+-	strcpy (last_slash + 1, onnx_fdata);
+-    }
+-
++  const char *file_name = getenv ("GCC_AI4C_ONNX_FDATA");
+   if (access (file_name, F_OK) == 0)
+-    {
+-      fill_node (file_name);
+-    }
++    fill_node (file_name);
+   else
+-    {
+-      return 0;
+-    }
++    return 0;
+ 
+   int64_t in_modes[M_MODE_SIZE];
+ 
+@@ -441,9 +423,7 @@ int
+ get_optimize_decision_from_ai4c ()
+ {
+   if (initialized== 1)
+-    {
+-      return optimize_result;
+-    }
++    return optimize_result;
+   if (native_tune && (strchr (native_tune, '+') != NULL))
+     {
+       char hash[65];
+diff --git a/gcc/gcc.cc b/gcc/gcc.cc
+index 179d507f2..b4beb1957 100644
+--- a/gcc/gcc.cc
++++ b/gcc/gcc.cc
+@@ -8133,6 +8133,7 @@ driver::main (int argc, char **argv)
+   putenv_COLLECT_GCC (argv[0]);
+   maybe_putenv_COLLECT_LTO_WRAPPER ();
+   maybe_putenv_OFFLOAD_TARGETS ();
++  putenv_ONNX_FDATA ();
+   handle_unrecognized_options ();
+ 
+   if (completion)
+@@ -8189,9 +8190,6 @@ driver::expand_at_files (int *argc, char ***argv) const
+ void
+ driver::decode_argv (int argc, const char **argv)
+ {
+-  const char* libexec_path = standard_libexec_prefix;
+-  if (libexec_path)
+-    setenv ("ONNX_FDATA_PATH", libexec_path, 1);
+   init_opts_obstack ();
+   init_options_struct (&global_options, &global_options_set);
+ 
+@@ -8590,6 +8588,33 @@ driver::maybe_putenv_COLLECT_LTO_WRAPPER () const
+ 
+ }
+ 
++/* Set up to remember the pathname of the onnx.fdata.  */
++
++void
++driver::putenv_ONNX_FDATA () const
++{
++  char *lto_wrapper_file;
++  lto_wrapper_file = find_a_program ("lto-wrapper");
++
++  if (lto_wrapper_file)
++    {
++      lto_wrapper_file = convert_white_space (lto_wrapper_file);
++      char native_file[512];
++      const char *onnx_fdata = "../../onnx.fdata";
++      strncpy (native_file, lto_wrapper_file, sizeof (native_file) - 1);
++      native_file[sizeof (native_file) - 1] = '\0';
++      char *last_slash = strrchr (native_file, '/');
++      if (last_slash)
++	strcpy (last_slash + 1, onnx_fdata);
++      obstack_init (&collect_obstack);
++      obstack_grow (&collect_obstack, "GCC_AI4C_ONNX_FDATA=",
++		    sizeof ("GCC_AI4C_ONNX_FDATA=") - 1);
++      obstack_grow (&collect_obstack,  native_file,
++		    strlen ( native_file) + 1);
++      xputenv (XOBFINISH (&collect_obstack, char *));
++    }
++}
++
+ /* Set up to remember the names of offload targets.  */
+ 
+ void
+diff --git a/gcc/gcc.h b/gcc/gcc.h
+index 63231ddb3..ff3ae8bed 100644
+--- a/gcc/gcc.h
++++ b/gcc/gcc.h
+@@ -44,6 +44,7 @@ class driver
+   void set_up_specs () const;
+   void putenv_COLLECT_GCC (const char *argv0) const;
+   void maybe_putenv_COLLECT_LTO_WRAPPER () const;
++  void putenv_ONNX_FDATA () const;
+   void maybe_putenv_OFFLOAD_TARGETS () const;
+   void handle_unrecognized_options ();
+   int maybe_print_and_exit () const;
+-- 
+2.43.0
+
diff --git a/0333-CSPGO-Update-the-gate-of-cspgo.patch b/0333-CSPGO-Update-the-gate-of-cspgo.patch
new file mode 100644
index 0000000000000000000000000000000000000000..bd5307c3c0514e84c1cf8129f55d9af6dcf92d3a
--- /dev/null
+++ b/0333-CSPGO-Update-the-gate-of-cspgo.patch
@@ -0,0 +1,88 @@
+From 25f3b77d288e26b198c7836c3ed9b4fb0a85a48a Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Mon, 16 Dec 2024 15:52:22 +0800
+Subject: [PATCH] [CSPGO] Update the gate of cspgo
+
+Update gate to allow CSPGO to be enabled after PGO
+---
+ gcc/tree-profile.cc | 59 +++++++++++++++++++++++++++------------------
+ 1 file changed, 36 insertions(+), 23 deletions(-)
+
+diff --git a/gcc/tree-profile.cc b/gcc/tree-profile.cc
+index ace1fe31c..3c57a0a75 100644
+--- a/gcc/tree-profile.cc
++++ b/gcc/tree-profile.cc
+@@ -1108,34 +1108,47 @@ public:
+   /* opt_pass methods: */
+   virtual bool gate (function *)
+     {
+-      return (flag_csprofile_generate || flag_csprofile_use);
+-    }
+-  /* The main process of cspgo is in csprofile_transform, execute does not need
+-     to do anything.  */
+-  virtual unsigned int execute (function *)
+-    {
+-      if (!profile_data_prefix)
+-	error ("profile_data_prefix must set when using cspgo.");
++      if (flag_csprofile_generate || flag_csprofile_use)
++	{
++	  int ret = true;
++	  if (!profile_data_prefix)
++	    {
++	      error ("pgo profile path must set when using cspgo.");
++	      ret = false;
++	    }
+ 
+-      if (!csprofile_data_prefix)
+-	error ("csprofile_data_prefix must set when using cspgo.");
++	  if (!csprofile_data_prefix)
++	    {
++	      error ("cspgo profile path must set when using cspgo.");
++	      ret = false;
++	    }
+ 
+-      if (!flag_cfgo_profile_use)
+-	error ("cspgo must used with cfgo-pgo.");
++	  if (!(flag_cfgo_profile_use || flag_profile_use))
++	    {
++	      error ("cspgo must used with cfgo-pgo or pgo.");
++	      ret = false;
++	    }
+ 
+-      /* Just compare canonical pathnames.  */
+-      char* cfgo_pgo_path = lrealpath (profile_data_prefix);
+-      char* cfgo_cspgo_path = lrealpath (csprofile_data_prefix);
+-      bool files_differ = filename_cmp (cfgo_pgo_path, cfgo_cspgo_path);
+-      if (!files_differ)
+-	{
+-	  error ("pgo and cspgo path must different between %s and %s",
+-		 cfgo_pgo_path, cfgo_cspgo_path);
++	  /* pgo and cspgo path must different.  */
++	  char* cfgo_pgo_path = lrealpath (profile_data_prefix);
++	  char* cfgo_cspgo_path = lrealpath (csprofile_data_prefix);
++	  bool files_differ = filename_cmp (cfgo_pgo_path, cfgo_cspgo_path);
++	  if (!files_differ)
++	    {
++	      error ("pgo and cspgo path must different between %s and %s",
++		     cfgo_pgo_path, cfgo_cspgo_path);
++	      ret = false;
++	    }
++	  free (cfgo_pgo_path);
++	  free (cfgo_cspgo_path);
++
++	  return ret;
+ 	}
+-      free (cfgo_pgo_path);
+-      free (cfgo_cspgo_path);
+-      return 0;
++      return false;
+     }
++  /* The main process of cspgo is in csprofile_transform, execute does not need
++     to do anything.  */
++  virtual unsigned int execute (function *) { return 0; }
+ 
+ }; // class pass_ipa_csprofile
+ 
+-- 
+2.25.1
+
diff --git a/0334-Dont-use-local_detect_cpu-when-cross-build.patch b/0334-Dont-use-local_detect_cpu-when-cross-build.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d951bdc27620491508e9d3d9f6c45af361f2e0a2
--- /dev/null
+++ b/0334-Dont-use-local_detect_cpu-when-cross-build.patch
@@ -0,0 +1,27 @@
+From cd708367a6558eca37715f8068f044a55402edab Mon Sep 17 00:00:00 2001
+From: YunQiang Su 
+Date: Wed, 18 Dec 2024 14:22:03 +0800
+Subject: [PATCH] Don't use local_detect_cpu when cross build
+
+-march=native makes no sense for cross build.
+---
+ gcc/gcc.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/gcc.cc b/gcc/gcc.cc
+index 179d507f255..f2387e0fae2 100644
+--- a/gcc/gcc.cc
++++ b/gcc/gcc.cc
+@@ -5800,7 +5800,7 @@ do_self_spec (const char *spec)
+   do_spec_1 (" ", 0, NULL);
+ 
+   const char* tune_native = NULL;
+-#if defined (__x86_64__) || defined (__aarch64__)
++#if !defined(CROSS_DIRECTORY_STRUCTURE) && (defined (__x86_64__) || defined (__aarch64__))
+   tune_native = eval_spec_function ("local_cpu_detect", "cpu", "");
+ #endif
+   if (tune_native == NULL)
+-- 
+Gitee
+
+
diff --git a/0335-fix-costs-for-hip09.patch b/0335-fix-costs-for-hip09.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8923faaa74466b9993d648e6355b0a4d7b5d8d38
--- /dev/null
+++ b/0335-fix-costs-for-hip09.patch
@@ -0,0 +1,41 @@
+From 00cd602772e471a18b3b36abfa3bde382d239b1f Mon Sep 17 00:00:00 2001
+From: Mingchuan Wu 
+Date: Thu, 26 Dec 2024 11:11:41 +0800
+Subject: [PATCH] [bugfix] fix costs for hip09.
+
+---
+ gcc/config/aarch64/aarch64.cc | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index e487ba12bad..65b684ef60f 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -749,8 +749,8 @@ static const struct cpu_regmove_cost hip09_regmove_cost =
+   1, /* GP2GP  */
+   /* Avoid the use of slow int<->fp moves for spilling by setting
+      their cost higher than memmov_cost.  */
+-  2, /* GP2FP  */
+-  3, /* FP2GP  */
++  5, /* GP2FP  */
++  5, /* FP2GP  */
+   2  /* FP2FP  */
+ };
+ 
+@@ -1923,10 +1923,10 @@ static const struct tune_params hip09_tunings =
+     4, /* load_pred.  */
+     4 /* store_pred.  */
+   }, /* memmov_cost.  */
+-  4,    /* issue_rate  */
++  2,    /* issue_rate  */
+   (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_ALU_BRANCH
+    | AARCH64_FUSE_ALU_CBZ), /* fusible_ops  */
+-  "16", /* function_align.  */
++  "16:12", /* function_align.  */
+   "4",  /* jump_align.  */
+   "8",  /* loop_align.  */
+   2,    /* int_reassoc_width.  */
+-- 
+Gitee
+
+
diff --git a/0336-sfc-Add-struct-static-field-compression-optimization.patch b/0336-sfc-Add-struct-static-field-compression-optimization.patch
new file mode 100644
index 0000000000000000000000000000000000000000..fa17d963e0886b44b0e8a150a9fb8b974d93403c
--- /dev/null
+++ b/0336-sfc-Add-struct-static-field-compression-optimization.patch
@@ -0,0 +1,2572 @@
+From 2ade84b2163ec1065148c0ef95dcc20b5b86e4a3 Mon Sep 17 00:00:00 2001
+From: huzife <634763349@qq.com>
+Date: Wed, 5 Feb 2025 09:51:53 +0800
+Subject: [PATCH] [sfc] Add struct static field compression optimization
+
+---
+ gcc/common.opt                                |   12 +
+ gcc/config/aarch64/aarch64.cc                 |    7 +-
+ gcc/doc/invoke.texi                           |    1 +
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      | 1531 ++++++++++++++++-
+ gcc/ipa-struct-reorg/ipa-struct-reorg.h       |  123 +-
+ .../gcc.dg/struct/sfc-bitfield_compress.c     |   31 +
+ .../sfc-shadow_assign_before_unpair_stmt.c    |   29 +
+ .../gcc.dg/struct/sfc-shadow_malloc.c         |   28 +
+ .../struct/sfc-shadow_multiple_fields.c       |   28 +
+ .../struct/sfc-shadow_multiple_unpair_stmts.c |   32 +
+ .../sfc-shadow_read_between_pair_stmts.c      |   38 +
+ .../gcc.dg/struct/sfc-shadow_rhs_not_equal.c  |   25 +
+ .../gcc.dg/struct/sfc-shadow_two_fields.c     |   25 +
+ .../gcc.dg/struct/sfc-shadow_unpair_stmt.c    |   28 +
+ .../sfc-shadow_unpair_stmt_different_array.c  |   30 +
+ gcc/testsuite/gcc.dg/struct/sfc_big_value.c   |   28 +
+ gcc/testsuite/gcc.dg/struct/sfc_compress.c    |   31 +
+ .../gcc.dg/struct/sfc_no_hot_access.c         |   26 +
+ .../gcc.dg/struct/sfc_recursive_type.c        |   28 +
+ .../struct/sfc_rhs_non_single_constant.c      |   29 +
+ gcc/testsuite/gcc.dg/struct/struct-reorg.exp  |   12 +
+ 21 files changed, 2088 insertions(+), 34 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-bitfield_compress.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_assign_before_unpair_stmt.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_multiple_fields.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_multiple_unpair_stmts.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_read_between_pair_stmts.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_rhs_not_equal.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_two_fields.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_unpair_stmt.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_unpair_stmt_different_array.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc_big_value.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc_compress.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc_no_hot_access.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc_recursive_type.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc_rhs_non_single_constant.c
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 00fadadb0..6ab7ba4cc 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -2062,6 +2062,18 @@ Common RejectNegative Joined UInteger Var(struct_layout_optimize_level) Init(0)
+ -fipa-struct-reorg=[0,1,2,3,4,5,6] adding none, struct-reorg, reorder-fields,
+ dfe, safe-pointer-compression, unsafe-pointer-compression, semi-relayout optimizations.
+ 
++fipa-struct-sfc
++Common Var(flag_ipa_struct_sfc) Init(0) Optimization
++Perform static structure field compression.
++
++fipa-struct-sfc-bitfield
++Common Var(flag_ipa_struct_sfc_bitfield) Init(0) Optimization
++Enable compressing to bitfield in static struct field compression.
++
++fipa-struct-sfc-shadow
++Common Var(flag_ipa_struct_sfc_shadow) Init(0) Optimization
++Enable field shadowing optimization in static struct field compression.
++
+ fipa-vrp
+ Common Var(flag_ipa_vrp) Optimization
+ Perform IPA Value Range Propagation.
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 52ce7d905..e9c387b24 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -18875,8 +18875,11 @@ static void
+ override_C_optimize_options (struct gcc_options *opts)
+ {
+   opts->x_flag_ipa_reorder_fields = 1;
+-  opts->x_flag_ipa_struct_reorg = 6;
+-  opts->x_struct_layout_optimize_level = 6;
++  opts->x_flag_ipa_struct_reorg = 5;
++  opts->x_struct_layout_optimize_level = 5;
++  opts->x_flag_ipa_struct_sfc = 1;
++  opts->x_flag_ipa_struct_sfc_bitfield = 1;
++  opts->x_flag_ipa_struct_sfc_shadow = 1;
+   opts->x_flag_gnu89_inline = 1;
+   opts->x_flag_convert_minmax = 1;
+   opts->x_flag_tree_slp_transpose_vectorize = 1;
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 2d906ee56..109858f76 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -528,6 +528,7 @@ Objective-C and Objective-C++ Dialects}.
+ -fipa-bit-cp  -fipa-vrp  -fipa-pta  -fipa-profile  -fipa-pure-const @gol
+ -fipa-reorder-fields @gol
+ -fipa-struct-reorg @gol
++-fipa-struct-sfc  -fipa-struct-sfc-bitfield  -fipa-struct-sfc-shadow @gol
+ -fipa-reference  -fipa-reference-addressable @gol
+ -fipa-stack-alignment  -fipa-icf  -fira-algorithm=@var{algorithm} @gol
+ -flive-patching=@var{level} @gol
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index af91f15c5..d3beebc00 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -108,6 +108,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "gimplify-me.h"
+ #include "cfgloop.h"
+ #include "langhooks.h"
++#include "cfgexpand.h"
+ 
+ /* Check whether in C language or LTO with only C language.  */
+ 
+@@ -148,6 +149,17 @@ using namespace struct_relayout;
+ #define VOID_POINTER_P(type) \
+   (POINTER_TYPE_P (type) && VOID_TYPE_P (TREE_TYPE (type)))
+ 
++#define FC_DUMP_MSG(message) \
++  do \
++    { \
++      if (dump_file && (dump_flags & TDF_DETAILS)) \
++	fprintf (dump_file, "[field compress] %s", (message)); \
++    } while (0)
++
++/* Flags for operand_equal_p to treat decls with the same name equal.  */
++
++#define COMPARE_DECL_FLAGS (OEP_DECL_NAME | OEP_LEXICOGRAPHIC)
++
+ static void
+ set_var_attributes (tree var)
+ {
+@@ -250,6 +262,51 @@ gimplify_build1 (gimple_stmt_iterator *gsi, enum tree_code code, tree type,
+ 				   GSI_SAME_STMT);
+ }
+ 
++/* Create a conditional expression as COND ? VAL1 : VAL2.  */
++
++static inline tree
++build_cond_expr (tree cond, tree val1, tree val2)
++{
++  if (TREE_CODE (TREE_TYPE (cond)) != BOOLEAN_TYPE)
++    cond = fold_build2 (NE_EXPR, boolean_type_node, cond,
++			build_zero_cst (TREE_TYPE (cond)));
++
++  return fold_build3 (COND_EXPR, TREE_TYPE (val1), cond, val1, val2);
++}
++
++/* Given a struct/class pointer ADDR, and FIELD_DECL belonging to the
++   struct/class, create a field reference expression.  */
++
++static inline tree
++build_field_ref (tree addr, tree field_decl)
++{
++  enum tree_code code;
++
++  if (DECL_BIT_FIELD (field_decl))
++    code = BIT_FIELD_REF;
++  else
++    code = COMPONENT_REF;
++
++  tree base = TREE_CODE (addr) == MEM_REF ? addr : build_simple_mem_ref (addr);
++
++  return build3 (code, TREE_TYPE (field_decl), base, field_decl, NULL_TREE);
++}
++
++/* Build a convert gimple to cast RHS to LHS.  */
++
++tree
++build_convert_gimple (tree lhs, tree rhs, gimple_stmt_iterator *gsi)
++{
++  tree ltype = TREE_TYPE (lhs);
++  tree rtype = TREE_TYPE (rhs);
++  if (types_compatible_p (ltype, rtype))
++    return NULL_TREE;
++
++  rhs = fold_build1 (CONVERT_EXPR, ltype, rhs);
++  rhs = force_gimple_operand_gsi (gsi, rhs, true, NULL, true, GSI_SAME_STMT);
++  return rhs;
++}
++
+ /* Get the number of pointer layers.  */
+ 
+ int
+@@ -283,6 +340,15 @@ is_from_void_ptr_parm (tree ssa_name)
+ 	  && VOID_POINTER_P (TREE_TYPE (ssa_name)));
+ }
+ 
++/* Check if STMT is a gimple assign whose rhs code is CODE.  */
++
++static bool
++gimple_assign_rhs_code_p (gimple *stmt, enum tree_code code)
++{
++  return stmt && is_gimple_assign (stmt)
++	 && gimple_assign_rhs_code (stmt) == code;
++}
++
+ /* Enum the struct layout optimize level,
+    which should be the same as the option -fstruct-reorg=.  */
+ 
+@@ -298,6 +364,29 @@ enum struct_layout_opt_level
+   SEMI_RELAYOUT = 1 << 6
+ };
+ 
++srfunction *current_function;
++vec csrfun_stack;
++
++class csrfun_context
++{
++public:
++  csrfun_context (srfunction *srfun)
++  {
++    csrfun_stack.safe_push (current_function);
++    current_function = srfun;
++
++    push_cfun (DECL_STRUCT_FUNCTION (srfun->node->decl));
++  }
++
++  ~csrfun_context ()
++  {
++    pop_cfun ();
++    current_function = csrfun_stack.pop ();
++  }
++};
++
++#define SET_CFUN(srfn) csrfun_context csrfn_ctx(srfn);
++
+ /* Defines the target pointer size of compressed pointer, which should be 8,
+    16, 32.  */
+ 
+@@ -412,7 +501,9 @@ srfield::srfield (tree field, srtype *base)
+     base (base),
+     type (NULL),
+     clusternum (0),
+-    field_access (EMPTY_FIELD)
++    field_access (EMPTY_FIELD),
++    static_fc_field (NULL),
++    field_class (NULL)
+ {
+   for (int i = 0; i < max_split; i++)
+     newfield[i] = NULL_TREE;
+@@ -430,7 +521,8 @@ srtype::srtype (tree type)
+     has_legal_alloc_num (false),
+     has_alloc_array (0),
+     semi_relayout (false),
+-    bucket_parts (0)
++    bucket_parts (0),
++    fc_info (NULL)
+ {
+   for (int i = 0; i < max_split; i++)
+     newtype[i] = NULL_TREE;
+@@ -467,8 +559,8 @@ srtype::has_dead_field (void)
+   FOR_EACH_VEC_ELT (fields, i, this_field)
+     {
+       /* Function pointer members are not processed, because DFE
+-         does not currently support accurate analysis of function
+-         pointers, and we have not identified specific use cases. */
++	 does not currently support accurate analysis of function
++	 pointers, and we have not identified specific use cases.  */
+       if (!(this_field->field_access & READ_FIELD)
+ 	 && !FUNCTION_POINTER_TYPE_P (this_field->fieldtype))
+ 	{
+@@ -844,15 +936,11 @@ srfield::create_new_reorder_fields (tree newtype[max_split],
+   tree nt = NULL_TREE;
+   if (type == NULL)
+     /* Common var.  */
+-    nt = fieldtype;
++    nt = static_fc_field ? static_fc_field->new_type : fieldtype;
+   else
+-    {
+-      /* RECORD_TYPE var.  */
+-      if (type->has_escaped ())
+-	nt = type->type;
+-      else
+-	nt = type->newtype[0];
+-    }
++    /* RECORD_TYPE var.  */
++    nt = type->has_escaped () ? type->type : type->newtype[0];
++
+   tree field = make_node (FIELD_DECL);
+ 
+   /* Used for recursive types.
+@@ -898,12 +986,42 @@ srfield::create_new_reorder_fields (tree newtype[max_split],
+   TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (fielddecl);
+   DECL_CONTEXT (field) = newtype[clusternum];
+ 
++  if (flag_ipa_struct_sfc && base->fc_info && base->fc_info->static_fc_p)
++    {
++      DECL_PACKED (field) = 1;
++
++      if (static_fc_field)
++	{
++	  /* Always not align compressed fields.  */
++	  SET_DECL_ALIGN (field, 0);
++
++	  if (static_fc_field->bits)
++	    {
++	      DECL_BIT_FIELD (field) = 1;
++	      DECL_SIZE (field) = bitsize_int (static_fc_field->bits);
++	      DECL_NONADDRESSABLE_P (field) = 1;
++	      /* Build unsigned bitfield integer type.  */
++	      nt = build_nonstandard_integer_type (static_fc_field->bits, 1);
++	      TREE_TYPE (field) = nt;
++	      static_fc_field->new_type = nt;
++	    }
++	}
++    }
++
+   reorder_fields (newfields, newlast, field);
+ 
+   /* srfield member variable, which stores the new field decl.  */
+   newfield[0] = field;
+ }
+ 
++bool
++srfield::dead_field_p ()
++{
++  return current_layout_opt_level & DEAD_FIELD_ELIMINATION
++	       && !(field_access & READ_FIELD)
++	       && !FUNCTION_POINTER_TYPE_P (fieldtype);
++}
++
+ /* Given a struct s whose fields has already reordered by size, we try to
+    combine fields less than 8 bytes together to 8 bytes.  Example:
+    struct s {
+@@ -964,6 +1082,35 @@ srtype::calculate_bucket_size ()
+   return parts * relayout_part_size;
+ }
+ 
++bool
++srtype::has_recursive_field_type ()
++{
++  /* A dead field is ignored as it will be removed in transformation.  */
++  for (const auto &srf : fields)
++    if (srf->type == this && !srf->dead_field_p ())
++      return true;
++  return false;
++}
++
++void
++srtype::check_fc_fields ()
++{
++  if (!fc_info || !fc_info->static_fc_p)
++    return;
++
++  for (unsigned i = 0; i < fields.length (); i++)
++    {
++      fc_field *fc_f;
++      unsigned j;
++      FOR_EACH_VEC_ELT (fc_info->static_fc_fields, j, fc_f)
++      if (fields[i]->fielddecl == fc_f->field)
++	{
++	  fields[i]->static_fc_field = fc_f;
++	  break;
++	}
++    }
++}
++
+ /* Create the new TYPE corresponding to THIS type.  */
+ 
+ bool
+@@ -1037,12 +1184,11 @@ srtype::create_new_type (void)
+ 	}
+     }
+ 
++  check_fc_fields ();
+   for (unsigned i = 0; i < fields.length (); i++)
+     {
+       srfield *f = fields[i];
+-      if (current_layout_opt_level & DEAD_FIELD_ELIMINATION
+-	  && !(f->field_access & READ_FIELD)
+-	  && !FUNCTION_POINTER_TYPE_P (f->fieldtype))
++      if (f->dead_field_p ())
+ 	{
+ 	  /* Fields with escape risks should not be processed. */
+ 	  if (f->type == NULL || (f->type->escapes == does_not_escape))
+@@ -1268,10 +1414,32 @@ srfield::simple_dump (FILE *f)
+     fprintf (f, "field (%d)", DECL_UID (fielddecl));
+ }
+ 
++sraccess::sraccess (tree e, gimple *s, cgraph_node *n, srfunction *srfn,
++		    srtype *t, tree b, srfield *f)
++    : expr (e),
++      stmt (s),
++      node (n),
++      function (srfn),
++      type (t),
++      base (b),
++      field (f)
++{
++  for (unsigned i = 0; i < gimple_num_ops (stmt); i++)
++    {
++      if (gimple_op (stmt, i) == expr)
++	{
++	  index = i;
++	  return;
++	}
++    }
++
++  gcc_unreachable ();
++}
++
+ /* Dump out the access structure to FILE.  */
+ 
+ void
+-sraccess::dump (FILE *f)
++sraccess::dump (FILE *f) const
+ {
+   fprintf (f, "access { ");
+   fprintf (f, "type = '(");
+@@ -1291,6 +1459,36 @@ sraccess::dump (FILE *f)
+   fprintf (f, "}\n");
+ }
+ 
++/* Check if it's an assignment to the given type.  */
++
++bool
++sraccess::write_type_p (tree type) const
++{
++  return this->type && this->type->type == type
++	 && is_gimple_assign (stmt)
++	 && index == 0;
++}
++
++/* Check if it's an assignment to the given field.  */
++
++bool
++sraccess::write_field_p (tree fielddecl) const
++{
++  return field && field->fielddecl == fielddecl
++	 && is_gimple_assign (stmt)
++	 && index == 0;
++}
++
++/* Check if it's an assignment that read the given field.  */
++
++bool
++sraccess::read_field_p (tree fielddecl) const
++{
++  return field && field->fielddecl == fielddecl
++	 && is_gimple_assign (stmt)
++	 && index > 0;
++}
++
+ /* Dump out the decl structure to FILE.  */
+ 
+ void
+@@ -1306,6 +1504,79 @@ srdecl::dump (FILE *file)
+   type->simple_dump (file);
+ }
+ 
++void
++fc_field_class::dump (FILE *file) const
++{
++  fprintf (file, "field type: ");
++  print_generic_expr (file, fieldtype);
++  fprintf (file, "\n");
++  fprintf (file, "fields: ");
++
++  unsigned i;
++  srfield *srf;
++  FOR_EACH_VEC_ELT (srfields, i, srf)
++    {
++      print_generic_expr (file, srf->fielddecl);
++      if (i == srfields.length () - 1)
++	fprintf (file, "\n");
++      else
++	fprintf (file, ", ");
++    }
++}
++
++unsigned
++fc_field_class::size () const
++{
++  return srfields.length ();
++}
++
++/* Search and return the index of the given srfield.
++   Return -1 if couldn't find the srfield.  */
++
++int
++fc_field_class::get_field_index (srfield *field) const
++{
++  unsigned i;
++  srfield *srf;
++  FOR_EACH_VEC_ELT (srfields, i, srf)
++    if (srf == field)
++      return i;
++
++  return -1;
++}
++
++fc_field_class *
++fc_type_info::find_field_class_by_type (tree type) const
++{
++  for (auto *field_class : field_classes)
++    {
++      if (field_class->fieldtype == type)
++	return field_class;
++    }
++
++  return NULL;
++}
++
++fc_field_class *
++fc_type_info::record_field_class (srfield *srf)
++{
++  if (srf->field_class)
++    return srf->field_class;
++
++  fc_field_class *field_class = find_field_class_by_type (srf->fieldtype);
++
++  if (!field_class)
++    {
++      field_class = new fc_field_class (srf->fieldtype);
++      field_classes.safe_push (field_class);
++    }
++
++  srf->field_class = field_class;
++  field_class->srfields.safe_push (srf);
++
++  return field_class;
++}
++
+ } // namespace struct_reorg
+ 
+ 
+@@ -1397,22 +1668,33 @@ csrtype::init_type_info (void)
+ 
+ namespace {
+ 
++struct const_map
++{
++  tree var;
++  HOST_WIDE_INT value;
++  const_map (tree var, HOST_WIDE_INT value)
++    : var (var), value (value)
++  {}
++};
++
+ struct ipa_struct_reorg
+ {
++private:
++  auto_vec_del global_consts;
++
+ public:
+   // Constructors
+   ipa_struct_reorg (void)
+-    : current_function (NULL),
+-      done_recording (false)
++    : done_recording (false)
+   {}
+ 
+   // Fields
+   auto_vec_del types;
+   auto_vec_del functions;
+   srglobal globals;
+-  srfunction *current_function;
+   hash_set  safe_functions;
+   auto_vec ext_func_types;
++  auto_vec_del fc_infos;
+ 
+   bool done_recording;
+ 
+@@ -1557,6 +1839,43 @@ public:
+   void relayout_field_copy (gimple_stmt_iterator *, gimple *, tree, tree,
+ 			    tree&, tree &);
+   bool do_semi_relayout (gimple_stmt_iterator *, gimple *, tree &, tree &);
++
++  // field-compress methods:
++  bool get_base_type (tree, tree &, srtype *&, srfield *&);
++  void check_and_prune_struct_for_field_compression ();
++  bool find_field_compression_candidate (srtype *);
++  void classify_fields (fc_type_info *);
++  bool find_static_fc_fields (fc_type_info *);
++  bool compress_fields (fc_type_info *);
++  bool find_shadow_fields (fc_type_info *);
++  bool find_shadow_fields (fc_type_info *, fc_field_class *);
++  bool find_pair_stmts (fc_field_class *, fc_shadow_info &);
++  void add_pair_stmts_group (fc_shadow_info &,
++			     const auto_vec &,
++			     const auto_vec &);
++  srfield *read_field_in_fc_class_p (gimple *, fc_field_class *);
++  srfield *write_field_in_fc_class_p (gimple *, fc_field_class *);
++  fc_field *fc_fields_contains (auto_vec &, tree);
++  bool fc_pair_stmts_rhs_equal_p (const auto_vec &);
++  bool fc_operand_equal_p (tree, tree);
++  bool fc_global_const_p (tree, HOST_WIDE_INT &);
++  bool fc_peephole_const_p (tree, HOST_WIDE_INT &);
++  const_map *find_global_const (tree);
++  bool check_unpair_stmt (fc_field_class *, gimple *,
++			  srfunction *, srfield *);
++  tree find_mem_base (tree);
++  gimple *find_alloc_stmt (tree);
++  bool static_compress_p (fc_type_info *, tree);
++  HOST_WIDE_INT find_max_value (srtype *, tree);
++  std::pair find_assign_max_value (gimple *);
++  bool struct_copy_p (gimple *, tree);
++  bool find_hot_access (fc_type_info *, auto_vec &);
++  void cleanup_shadow_write (fc_type_info *);
++  void rewrite_shadow_read (fc_type_info *);
++  void insert_shadow_stmt (gimple *, unsigned, fc_field *, tree);
++  bool compress_fields_static (fc_type_info *info);
++  void compress_to_bitfields (fc_type_info *info);
++  auto_vec collect_all_predecessor (gimple *);
+ };
+ 
+ struct ipa_struct_relayout
+@@ -2224,13 +2543,16 @@ ipa_struct_reorg::dump_newtypes (FILE *f)
+ 				       field; field = DECL_CHAIN (field))
+ 	  {
+ 	    fprintf (f, "field (%d) ", DECL_UID (field));
+-	    fprintf (f, "{");
++	    print_generic_expr (f, field);
++	    fprintf (f, " {");
+ 	    fprintf (f, "type = ");
+ 	    print_generic_expr (f, TREE_TYPE (field));
+ 	    fprintf (f, "}\n");
+ 	  }
+-	fprintf (f, "}\n ");
+-	fprintf (f, "\n");
++	fprintf (f, "}\n");
++	fprintf (f, "size : ");
++	print_generic_expr (f, TYPE_SIZE_UNIT (type->newtype[0]));
++	fprintf (f, "\n\n");
+     }
+ }
+ 
+@@ -3939,6 +4261,9 @@ ipa_struct_reorg::maybe_record_assign (cgraph_node *node, gassign *stmt)
+ 	mark_type_as_escape (TREE_TYPE (lhs), escape_array, stmt);
+       if (TREE_CODE (rhs) == ARRAY_REF)
+ 	mark_type_as_escape (TREE_TYPE (rhs), escape_array, stmt);
++
++      record_stmt_expr (lhs, node, stmt);
++      record_stmt_expr (rhs, node, stmt);
+     }
+ }
+ 
+@@ -4401,7 +4726,8 @@ ipa_struct_reorg::record_stmt_expr (tree expr, cgraph_node *node, gimple *stmt)
+ 
+ 
+   /* Record it.  */
+-  type->add_access (new sraccess (stmt, node, type, field));
++  type->add_access (new sraccess (expr, stmt, node, find_function (node),
++				  type, base, field));
+ }
+ 
+ /* Find function corresponding to NODE.  */
+@@ -7469,8 +7795,8 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 	      gsi_insert_before (gsi, g, GSI_SAME_STMT);
+ 	    }
+ 	  remove = true;
+-	} 
+-      else 
++	}
++      else
+ 	{
+ 	  for (unsigned i = 0; i < max_split && newrhs1[i] && newrhs2[i]; i++)
+ 	    {
+@@ -7516,6 +7842,11 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 	    continue;
+ 	  tree lhs_expr = newlhs[i] ? newlhs[i] : lhs;
+ 	  tree rhs_expr = newrhs[i] ? newrhs[i] : rhs;
++
++	  tree conv_rhs = build_convert_gimple (lhs_expr, rhs_expr, gsi);
++	  if (conv_rhs)
++	    rhs_expr = conv_rhs;
++
+ 	  gimple *newstmt = gimple_build_assign (lhs_expr, rhs_expr);
+ 	  if (dump_file && (dump_flags & TDF_DETAILS))
+ 	    {
+@@ -8106,6 +8437,18 @@ ipa_struct_reorg::rewrite_functions (void)
+ {
+   unsigned retval = 0;
+ 
++  if (flag_ipa_struct_sfc_shadow)
++    {
++      for (unsigned i = 0; i < fc_infos.length (); i++)
++	{
++	  fc_type_info *info = fc_infos[i];
++	  if (!info || !info->static_fc_p)
++	    continue;
++	  cleanup_shadow_write (info);
++	  rewrite_shadow_read (info);
++	}
++    }
++
+   /* Create new types, if we did not create any new types,
+      then don't rewrite any accesses.  */
+   if (!create_new_types ())
+@@ -8518,6 +8861,1142 @@ ipa_struct_reorg::check_and_prune_struct_for_semi_relayout (void)
+     }
+ }
+ 
++/* Get the BASE and field of the VAR.  */
++bool
++ipa_struct_reorg::get_base_type (tree var, tree &base,
++				 srtype *&type, srfield *&field)
++{
++  if (!var || TREE_CODE (var) != COMPONENT_REF)
++    return false;
++
++  /* Ignore data access that is canonical.  */
++  bool realpart, imagpart;
++  bool address;
++  bool indirect;
++  bool escape_from_base;
++  return get_type_field (var, base, indirect, type, field,
++			 realpart, imagpart, address, escape_from_base);
++}
++
++void
++ipa_struct_reorg::check_and_prune_struct_for_field_compression (void)
++{
++  for (auto *type : types)
++    {
++      if (dump_file)
++	{
++	  fprintf (dump_file, "[field compress] Analyzing type : ");
++	  print_generic_expr (dump_file, type->type);
++	  fprintf (dump_file, "\n");
++	}
++
++      /* Check if the type is escaped or not.  */
++      if (type->has_escaped ())
++	continue;
++
++      type->fc_info = new fc_type_info (type);
++      fc_infos.safe_push (type->fc_info);
++
++      if (!find_field_compression_candidate (type))
++	continue;
++
++      gcc_assert (type->fc_info->static_fc_p);
++      if (dump_file)
++	{
++	  fprintf (dump_file, "[field compress] Found candidate: ");
++	  print_generic_expr (dump_file, type->type);
++	  fprintf (dump_file, "\n");
++	}
++
++      /* Support only 1 type.  */
++      break;
++    }
++}
++
++/* Find a field compression candidate.  */
++
++bool
++ipa_struct_reorg::find_field_compression_candidate (srtype *type)
++{
++  if (type->has_recursive_field_type ())
++    {
++      FC_DUMP_MSG ("Recursive field type unsupported\n");
++      return false;
++    }
++
++  fc_type_info *info = type->fc_info;
++
++  /* Classify fields by field type firstly.  */
++  classify_fields (info);
++
++  if (flag_ipa_struct_sfc)
++    {
++      FC_DUMP_MSG ("Looking for static fc fields\n");
++      info->static_fc_p = find_static_fc_fields (info);
++    }
++
++  if (!info->static_fc_p)
++    {
++      FC_DUMP_MSG ("Fail finding field compression candidate\n");
++      return false;
++    }
++
++  if (!compress_fields (info))
++    {
++      FC_DUMP_MSG ("Fail compressing fields\n");
++      return false;
++    }
++
++  return true;
++}
++
++/* Classify all fields by data type.  */
++
++void
++ipa_struct_reorg::classify_fields (fc_type_info *info)
++{
++  for (auto *srf : info->type->fields)
++    info->record_field_class (srf);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      unsigned i;
++      fc_field_class *field_class;
++      FOR_EACH_VEC_ELT (info->field_classes, i, field_class)
++	{
++	  fprintf (dump_file, "[field compress] %dth field class:\n", i);
++	  field_class->dump (dump_file);
++	}
++    }
++}
++
++/* Scan all of fields to check whether each can be statically
++   compressed or not.  */
++
++bool
++ipa_struct_reorg::find_static_fc_fields (fc_type_info *info)
++{
++  bool found_static_compress = false;
++
++  if (flag_ipa_struct_sfc_shadow)
++    found_static_compress |= find_shadow_fields (info);
++
++  for (auto *srf : info->type->fields)
++    {
++      /* We have marked these fields as shadow, so skip them.  */
++      if (fc_fields_contains (info->static_fc_fields, srf->fielddecl))
++	continue;
++
++      found_static_compress |= static_compress_p (info, srf->fielddecl);
++    }
++
++  if (!found_static_compress)
++    {
++      FC_DUMP_MSG ("Fail finding static fc fields\n");
++      return false;
++    }
++
++  gcc_assert (!info->static_fc_fields.is_empty ());
++
++  /* Avoid compressing fields without hot access.  */
++  if (!find_hot_access (info, info->static_fc_fields))
++    {
++      FC_DUMP_MSG ("Fail finding hot access for static\n");
++      return false;
++    }
++
++  return true;
++}
++
++/* Compress fields and create new field types.  */
++
++bool
++ipa_struct_reorg::compress_fields (fc_type_info *info)
++{
++  if (info->static_fc_p && !compress_fields_static (info))
++    info->static_fc_p = false;
++
++  if (!info->static_fc_p)
++    return false;
++
++  compress_to_bitfields (info);
++
++  return true;
++}
++
++/* Check if the type has any field that can be shadowed.  */
++
++bool
++ipa_struct_reorg::find_shadow_fields (fc_type_info *info)
++{
++  FC_DUMP_MSG ("Finding shadow fields\n");
++
++  bool found_shadow = false;
++  for (auto *field_class : info->field_classes)
++    {
++      /* Field shadowing requires two or more fields.  */
++      if (field_class->size () < 2)
++	continue;
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Find shadow field for field class:\n");
++	  field_class->dump (dump_file);
++	}
++
++      if (find_shadow_fields (info, field_class))
++	{
++	  found_shadow = true;
++	  continue;
++	}
++
++      FC_DUMP_MSG ("Fail finding shadow field\n");
++    }
++
++  return found_shadow;
++}
++
++bool
++ipa_struct_reorg::find_shadow_fields (fc_type_info *info,
++				      fc_field_class *field_class)
++{
++  /* Find and record all pair assignments.  */
++  fc_shadow_info shadow_info;
++  if (!find_pair_stmts (field_class, shadow_info))
++    return false;
++
++  /* Unpair assignment checking.  */
++  auto &srfields = field_class->srfields;
++  unsigned original_index = 0;
++  if (shadow_info.unpair_stmt)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Find unpair assignment:\n");
++	  print_gimple_stmt (dump_file, shadow_info.unpair_stmt, 0);
++	}
++
++      original_index = shadow_info.unpair_stmt_index;
++      if (!check_unpair_stmt (field_class,
++			      shadow_info.unpair_stmt,
++			      shadow_info.unpair_stmt_func,
++			      srfields[original_index]))
++	return false;
++    }
++
++  /* Add a new static fc_field.  */
++  srfield *original_srf = srfields[original_index];
++
++  unsigned i;
++  srfield *shadow_srf;
++  FOR_EACH_VEC_ELT (srfields, i, shadow_srf)
++    {
++      if (i == original_index)
++	continue;
++
++      fc_field *fc_f = new fc_field (shadow_srf->fielddecl, 1, original_srf);
++      info->static_fc_fields.safe_push (fc_f);
++
++      /* Record all shadow stmts to fc_field.  */
++      unsigned j;
++      auto_vec *group;
++      FOR_EACH_VEC_ELT (shadow_info.pair_stmts_groups, j, group)
++	{
++	  fc_f->shadow_stmts.safe_push ((*group)[i]);
++	  fc_f->shadow_stmts_func.safe_push (shadow_info.pair_stmts_func[j]);
++	}
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Found shadow field: ");
++	  print_generic_expr (dump_file, shadow_srf->fielddecl);
++	  fprintf (dump_file, "\n");
++	}
++    }
++
++  return true;
++}
++
++bool
++ipa_struct_reorg::find_pair_stmts (fc_field_class *field_class,
++				   fc_shadow_info &info)
++{
++  for (auto *srfn : functions)
++    {
++      SET_CFUN (srfn);
++      basic_block bb = NULL;
++      FOR_EACH_BB_FN (bb, cfun)
++	{
++	  auto_vec group;
++	  auto_vec indexes;
++	  auto_bitmap visited_fields;
++	  bool read = false;
++
++	  for (auto si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
++	    {
++	      gimple *stmt = gsi_stmt (si);
++	      if (!is_gimple_assign (stmt))
++		continue;
++
++	      /* Check if any of the fields have been read.
++		 We need to make sure there is no stmt
++		 reading the fields within a pair stmt group.  */
++	      if (read_field_in_fc_class_p (stmt, field_class))
++		read = true;
++
++	      srfield *srf = write_field_in_fc_class_p (stmt, field_class);
++	      if (!srf)
++		continue;
++
++	      /* Multiple rhs is not considered conservatively.  */
++	      if (gimple_assign_rhs_class (stmt) != GIMPLE_SINGLE_RHS)
++		return false;
++
++	      /* Initialize the read flag when recording first stmt.
++		 The read flag must be false when recording
++		 the rest of the stmts.  */
++	      if (group.is_empty ())
++		read = false;
++	      else if (read)
++		return false;
++
++	      int index = field_class->get_field_index (srf);
++	      if (index == -1 || !bitmap_set_bit (visited_fields, index))
++		return false;
++
++	      indexes.safe_push (index);
++	      group.safe_push (stmt);
++	      if (group.length () == field_class->size ())
++		{
++		  if (!fc_pair_stmts_rhs_equal_p (group))
++		    return false;
++
++		  add_pair_stmts_group (info, group, indexes);
++		  info.pair_stmts_func.safe_push (srfn);
++		  group.truncate (0);
++		  indexes.truncate (0);
++		  bitmap_clear (visited_fields);
++		}
++
++	      if (dump_file && (dump_flags & TDF_DETAILS))
++		{
++		  fprintf (dump_file, "[BB #%d] Record stmt: ", bb->index);
++		  print_gimple_stmt (dump_file, stmt, 0);
++		}
++	    }
++
++	  /* Only support one unpair assignment now.  */
++	  if (!group.is_empty ())
++	    {
++	      if (info.unpair_stmt || group.length () > 1)
++		return false;
++	      info.unpair_stmt = group[0];
++	      info.unpair_stmt_func = srfn;
++	      info.unpair_stmt_index = indexes[0];
++	    }
++	}
++    }
++
++  return true;
++}
++
++/* Reorder a group of pair stmts and add it to fc_shadow_info.  */
++
++void
++ipa_struct_reorg::add_pair_stmts_group (fc_shadow_info &info,
++					const auto_vec &group,
++					const auto_vec &indexes)
++{
++  auto ordered_group = new auto_vec (group.length ());
++
++  unsigned i;
++  unsigned ordered_index;
++  FOR_EACH_VEC_ELT (indexes, i, ordered_index)
++    (*ordered_group)[ordered_index] = group[i];
++
++  info.pair_stmts_groups.safe_push (ordered_group);
++}
++
++/* Check if the stmt reads any field in the given field class.  */
++
++srfield *
++ipa_struct_reorg::read_field_in_fc_class_p (gimple *stmt,
++					    fc_field_class *fclass)
++{
++  for (unsigned i = 1; i < gimple_num_ops (stmt); i++)
++    {
++      tree base = NULL_TREE;
++      srtype *type = NULL;
++      srfield *field = NULL;
++      if (!get_base_type (gimple_op (stmt, i), base, type, field))
++	continue;
++
++      if (field && field->field_class == fclass)
++	return field;
++    }
++
++  return NULL;
++}
++
++/* Check if the stmt reads any field in the given field class.  */
++
++srfield *
++ipa_struct_reorg::write_field_in_fc_class_p (gimple *stmt,
++					     fc_field_class *fclass)
++{
++  tree base = NULL_TREE;
++  srtype *type = NULL;
++  srfield *field = NULL;
++
++  if (!get_base_type (gimple_assign_lhs (stmt), base, type, field)
++      || !field
++      || field->field_class != fclass)
++    return NULL;
++
++  return field;
++}
++
++fc_field *
++ipa_struct_reorg::fc_fields_contains (auto_vec &fc_fields,
++				      tree field)
++{
++  for (auto *fc_f : fc_fields)
++    if (fc_f->field == field)
++      return fc_f;
++
++  return NULL;
++}
++
++/* Check if the right operands of all assignments are equal.  */
++
++bool
++ipa_struct_reorg::fc_pair_stmts_rhs_equal_p (const auto_vec &stmts)
++{
++  if (stmts.length () < 2)
++    return false;
++
++  tree rhs = gimple_assign_rhs1 (stmts[0]);
++  for (unsigned i = 1; i < stmts.length (); i++)
++    if (!fc_operand_equal_p (rhs, gimple_assign_rhs1 (stmts[i])))
++      return false;
++
++  return true;
++}
++
++/* Check if VAR1 and VAR2 are equal for field compression.  */
++
++bool
++ipa_struct_reorg::fc_operand_equal_p (tree var1, tree var2)
++{
++  if (operand_equal_p (var1, var2))
++    return true;
++
++  /* Match code and operands.  */
++  tree_code code = TREE_CODE (var1);
++  if (code != TREE_CODE (var2))
++    return false;
++
++  if (code == SSA_NAME)
++    {
++      gimple *stmt1 = SSA_NAME_DEF_STMT (var1);
++      gimple *stmt2 = SSA_NAME_DEF_STMT (var2);
++      return is_gimple_assign (stmt1) && is_gimple_assign (stmt2)
++	     && fc_operand_equal_p (gimple_assign_rhs_to_tree (stmt1),
++				    gimple_assign_rhs_to_tree (stmt2));
++    }
++
++  /* Only part of the cases are covered now.  */
++  HOST_WIDE_INT value;
++  switch (get_gimple_rhs_class (code))
++    {
++      case GIMPLE_UNARY_RHS:
++	return ((code == COMPONENT_REF || code == MEM_REF)
++		&& fc_global_const_p (var1, value)
++		&& operand_equal_p (var1, var2, COMPARE_DECL_FLAGS));
++      case GIMPLE_BINARY_RHS:
++	return fc_operand_equal_p (TREE_OPERAND (var1, 0),
++				   TREE_OPERAND (var2, 0))
++	       && fc_operand_equal_p (TREE_OPERAND (var1, 1),
++				      TREE_OPERAND (var2, 1));
++      default:
++	return false;
++    }
++}
++
++/* Return true if VAR is a global variable, and it is assigned to be a constant
++   and it is never changed globally.  The assumption is this VAR doesn't have
++   address taken, and the type containing it doesn't escape.  */
++
++bool
++ipa_struct_reorg::fc_global_const_p (tree var, HOST_WIDE_INT &value)
++{
++  srtype *type;
++  srfield *field;
++  tree base;
++  if (!get_base_type (var, base, type, field) || type->has_escaped ())
++    return false;
++
++  const_map *cm = find_global_const (var);
++  if (cm)
++    {
++      value = cm->value;
++      return true;
++    }
++
++  bool is_const = false;
++  HOST_WIDE_INT const_value = 0;
++  for (auto *access : type->accesses)
++    {
++      SET_CFUN (access->function);
++
++      gimple *stmt = access->stmt;
++      if (!gimple_assign_single_p (stmt)
++	  || !operand_equal_p (gimple_assign_lhs (stmt), var))
++	continue;
++
++      if (!fc_peephole_const_p (gimple_assign_rhs1 (stmt), value))
++	return false;
++
++      /* Make sure the value is never changed.  */
++      if (is_const)
++	{
++	  if (value != const_value)
++	    return false;
++	  continue;
++	}
++
++      is_const = true;
++      const_value = value;
++
++      /* Record a global constant here.  */
++      global_consts.safe_push (new const_map (var, value));
++    }
++
++  return is_const;
++}
++
++/* Return true if VAR is a simple constant that can be identified by peephole.
++   and the HWI will be updated accordingly.  Otherwise, the HWI will not be
++   changed.  */
++
++bool
++ipa_struct_reorg::fc_peephole_const_p (tree var, HOST_WIDE_INT &value)
++{
++  if (TREE_CODE (var) == INTEGER_CST)
++    {
++      value = tree_to_shwi (var);
++      return true;
++    }
++
++  if (TREE_CODE (var) != SSA_NAME)
++    return false;
++
++  /* Var might be an argument.  */
++  gimple *stmt = SSA_NAME_DEF_STMT (var);
++  if (!is_gimple_assign (stmt))
++    return false;
++
++  if (gimple_assign_load_p (stmt))
++    return fc_global_const_p (gimple_assign_rhs1 (stmt), value);
++
++  HOST_WIDE_INT value1, value2;
++  if (gimple_assign_rhs_class (stmt) != GIMPLE_BINARY_RHS
++      || !fc_peephole_const_p (gimple_assign_rhs1 (stmt), value1)
++      || !fc_peephole_const_p (gimple_assign_rhs2 (stmt), value2))
++    return false;
++
++  enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
++  switch (rhs_code)
++    {
++      case PLUS_EXPR:
++	value = value1 + value2;
++	break;
++      case MULT_EXPR:
++	value = value1 * value2;
++	break;
++      case MAX_EXPR:
++	value = (value1 > value2) ? value1 : value2;
++	break;
++      case MIN_EXPR:
++	value = (value1 < value2) ? value1 : value2;
++	break;
++      default:
++	return false;
++    }
++
++  return true;
++}
++
++const_map *
++ipa_struct_reorg::find_global_const (tree var)
++{
++  for (auto *cm : global_consts)
++    if (operand_equal_p (cm->var, var))
++      return cm;
++
++  return NULL;
++}
++
++/* The unpair statement need to meet the following requirements:
++   (1) The array being accessed(mem base) should be allocated by calloc()
++       so that we can make sure its values are initialized as zero
++   (2) There must not be any assignment to other fields in the same
++       field class in the same array before the unpair stmt
++
++   These requirements are to ensure we know the value of shadow fields
++   when an unpair stmt happens.  */
++
++bool
++ipa_struct_reorg::check_unpair_stmt (fc_field_class *field_class,
++				     gimple *unpair_stmt,
++				     srfunction *unpair_stmt_func,
++				     srfield *unpair_field)
++{
++  SET_CFUN (unpair_stmt_func);
++
++  srtype *type = NULL;
++  srfield *field = NULL;
++  tree base = NULL_TREE;
++  if (!get_base_type (gimple_assign_lhs (unpair_stmt), base, type, field))
++    return false;
++
++  /* The array being accessed.  */
++  tree mem_base = find_mem_base (base);
++  if (!mem_base)
++    return false;
++
++  auto blocks = collect_all_predecessor (unpair_stmt);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "Found %d blocks that can reach  in func ",
++	       blocks.length (), gimple_bb (unpair_stmt)->index);
++      print_generic_expr (dump_file, cfun->decl);
++      fprintf (dump_file, ":\n");
++
++      for (auto *bb : blocks)
++	fprintf (dump_file, "%d ", bb->index);
++
++      fprintf (dump_file, "\n\n");
++    }
++
++  /* Check requirement (2).  */
++  for (auto *bb : blocks)
++    {
++      for (auto gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
++	{
++	  gimple *stmt = gsi_stmt (gsi);
++	  if (!is_gimple_assign (stmt))
++	    continue;
++
++	  if (!get_base_type (gimple_assign_lhs (stmt), base, type, field)
++	      || field->field_class != field_class
++	      || field == unpair_field)
++	    continue;
++
++	  /* Accessing to different array is allowed.  */
++	  tree cur_mem_base = find_mem_base (base);
++	  if (!cur_mem_base || operand_equal_p (mem_base, cur_mem_base))
++	    return false;
++	}
++    }
++
++  return true;
++}
++
++/* Search backward following def/use chain until finding a memory pointer.  */
++
++tree
++ipa_struct_reorg::find_mem_base (tree var)
++{
++  auto_bitmap visited;
++  auto_vec worklists;
++  worklists.safe_push (var);
++
++  tree mem_base = NULL_TREE;
++  while (!worklists.is_empty ())
++    {
++      tree t = worklists.pop ();
++      if (TREE_CODE (t) != SSA_NAME)
++	{
++	  gimple *alloc_stmt = find_alloc_stmt (t);
++	  if (!alloc_stmt || !is_gimple_call (alloc_stmt))
++	    return NULL;
++
++	  tree alloc_lhs = gimple_call_lhs (alloc_stmt);
++	  if (mem_base && !operand_equal_p (mem_base, alloc_lhs))
++	    return NULL;
++
++	  mem_base = alloc_lhs;
++	  continue;
++	}
++
++      if (!bitmap_set_bit (visited, SSA_NAME_VERSION (t)))
++	continue;
++
++      gimple *stmt = SSA_NAME_DEF_STMT (t);
++      if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))
++	{
++	  if (mem_base && !operand_equal_p (mem_base, t))
++	    return NULL;
++	  mem_base = t;
++	}
++      else if (gimple_assign_rhs_code_p (stmt, POINTER_PLUS_EXPR)
++	       || gimple_assign_single_p (stmt))
++	{
++	  worklists.safe_push (gimple_assign_rhs1 (stmt));
++	}
++      else if (gimple_code (stmt) == GIMPLE_PHI)
++	{
++	  for (unsigned i = 0; i < gimple_phi_num_args (stmt); i++)
++	    worklists.safe_push (gimple_phi_arg_def (stmt, i));
++	}
++    }
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      if (mem_base)
++	{
++	  FC_DUMP_MSG ("Found memory base: ");
++	  print_generic_expr (dump_file, mem_base);
++	  fprintf (dump_file, "\n");
++	}
++      else
++	FC_DUMP_MSG ("Fail finding memory base\n");
++    }
++
++  return mem_base;
++}
++
++/* Return allocation stmt for a non-ssa var.
++
++   _1 = calloc(...);
++   var = _1;
++
++   We will try to find the above pattern and return the first stmt.  */
++
++gimple *
++ipa_struct_reorg::find_alloc_stmt (tree var)
++{
++  basic_block bb = NULL;
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
++	   gsi_next (&si))
++	{
++	  gimple *stmt = gsi_stmt (si);
++	  if (!is_gimple_assign (stmt)
++	      || !operand_equal_p (gimple_assign_lhs (stmt), var))
++	    continue;
++
++	  if (!gimple_assign_single_p (stmt))
++	    return NULL;
++
++	  tree rhs1 = gimple_assign_rhs1 (stmt);
++	  if (integer_zerop (rhs1))
++	    continue;
++
++	  if (TREE_CODE (rhs1) != SSA_NAME)
++	    return NULL;
++
++	  gimple *def_stmt = SSA_NAME_DEF_STMT (rhs1);
++	  if (!gimple_call_builtin_p (def_stmt, BUILT_IN_CALLOC))
++	    return NULL;
++
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      FC_DUMP_MSG ("Found allocation stmt: ");
++	      print_gimple_stmt (dump_file, def_stmt, 0);
++	    }
++
++	  return def_stmt;
++	}
++    }
++
++  return NULL;
++}
++
++/* Scan field's assignments globally to determine whether it can be statically
++   compressed or not.  */
++
++bool
++ipa_struct_reorg::static_compress_p (fc_type_info *info, tree field)
++{
++  HOST_WIDE_INT max_value = find_max_value (info->type, field);
++
++  /* We cannot know the max value at compile time, if it is 0.  */
++  if (!max_value)
++    return false;
++
++  fc_field *fc_f = new fc_field (field, max_value, 0);
++  info->static_fc_fields.safe_push (fc_f);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      FC_DUMP_MSG ("Found a static compression field: ");
++      print_generic_expr (dump_file, field);
++      fprintf (dump_file, ", max_value = %ld\n", max_value);
++    }
++
++  return true;
++}
++
++/* Scan field's assignments globally to find the max value.  */
++
++HOST_WIDE_INT
++ipa_struct_reorg::find_max_value (srtype *type, tree field)
++{
++  auto_vec worklist;
++  auto_bitmap visited;
++  HOST_WIDE_INT max_value = 0;
++
++  for (auto *access : type->accesses)
++    {
++      if (!access->write_field_p (field)
++	  || !gimple_assign_single_p (access->stmt))
++	continue;
++
++      SET_CFUN (access->function);
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Check stmt: ");
++	  print_gimple_stmt (dump_file, access->stmt, 0);
++	  fprintf (dump_file, "\n");
++	}
++
++      auto [found, value] = find_assign_max_value (access->stmt);
++      if (!found)
++	return 0;
++
++      if (value > UINT_MAX)
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      FC_DUMP_MSG ("Max value of ");
++	      print_generic_expr (dump_file, field);
++	      fprintf (dump_file, " is too big, max_value = %ld\n", value);
++	    }
++	  return 0;
++	}
++
++      if (value > max_value)
++	max_value = value;
++    }
++
++  return max_value;
++}
++
++/* Trace back to find the max value.
++   Return value contains two parts:
++     first:
++       true: Found a max value.
++       false: Otherwise.
++     second: The max value if found.  */
++
++std::pair
++ipa_struct_reorg::find_assign_max_value (gimple *stmt)
++{
++  auto_vec worklist;
++  auto_bitmap visited;
++  HOST_WIDE_INT max_value = 0;
++
++  worklist.safe_push (gimple_assign_rhs1 (stmt));
++  while (!worklist.is_empty ())
++    {
++      tree t = worklist.pop ();
++
++      if (TREE_CODE (t) == INTEGER_CST)
++	{
++	  HOST_WIDE_INT value = TREE_INT_CST_LOW (t);
++	  if (value < 0)
++	    return {false, 0};
++	  if (value > max_value)
++	    max_value = value;
++	  continue;
++	}
++
++      /* Trace back through ssa's def chain.  */
++      if (TREE_CODE (t) != SSA_NAME)
++	return {false, 0};
++
++      if (!bitmap_set_bit (visited, SSA_NAME_VERSION (t)))
++	continue;
++
++      gimple *def_stmt = SSA_NAME_DEF_STMT (t);
++      if (gimple_code (def_stmt) == GIMPLE_PHI)
++	{
++	  for (unsigned i = 0; i < gimple_phi_num_args (def_stmt); i++)
++	    worklist.safe_push (gimple_phi_arg_def (def_stmt, i));
++	}
++      else if (gimple_code (def_stmt) == GIMPLE_ASSIGN)
++	{
++	  if (gimple_assign_rhs_class (def_stmt) != GIMPLE_SINGLE_RHS
++	      && !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
++	    return {false, 0};
++	  worklist.safe_push (gimple_assign_rhs1 (def_stmt));
++	}
++      else
++	{
++	  return {false, 0};
++	}
++    }
++
++  return {true, max_value};
++}
++
++/* Check if it is a struct copy.  */
++
++bool
++ipa_struct_reorg::struct_copy_p (gimple *stmt, tree type)
++{
++  if (!gimple_assign_single_p (stmt)
++      || TREE_TYPE (gimple_assign_lhs (stmt)) != type
++      || !types_compatible_p (TREE_TYPE (gimple_assign_rhs1 (stmt)), type))
++    return false;
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      FC_DUMP_MSG ("Found struct copy: \n");
++      print_gimple_stmt (dump_file, stmt, 0);
++      fprintf (dump_file, "\n");
++    }
++
++  return true;
++}
++
++/* Return true if we can find a hot loop, in which
++   (1) there is a struct copy for the fc_type
++   (2) or, all fc_fields have been written.  */
++
++bool
++ipa_struct_reorg::find_hot_access (fc_type_info *info,
++				   auto_vec &fc_fields)
++{
++  /* Record which fields are written in a block.  */
++  hash_map> write_map;
++
++  srtype *type = info->type;
++  for (auto *access : type->accesses)
++    {
++      SET_CFUN (access->function);
++
++      basic_block bb = access->stmt->bb;
++      if (!bb->loop_father->num
++	  || !access->write_type_p (type->type))
++	continue;
++
++      /* Case (1).  */
++      if (struct_copy_p (access->stmt, type->type))
++	return true;
++
++      /* Case (2).  */
++      if (!access->field)
++	continue;
++
++      tree fielddecl = access->field->fielddecl;
++      if (!fielddecl || !fc_fields_contains (fc_fields, fielddecl))
++	continue;
++
++      auto &set = write_map.get_or_insert (bb);
++      set.add (fielddecl);
++
++      /* Now all fields have been written.  */
++      if (set.elements () == fc_fields.length ())
++	return true;
++    }
++
++  return false;
++}
++
++/* Clean up all of write stmts to shadow field by changing the RHS to be true,
++   which means it is a shadow.  */
++
++void
++ipa_struct_reorg::cleanup_shadow_write (fc_type_info *info)
++{
++  for (auto *fc_f : info->static_fc_fields)
++    {
++      if (!fc_f->original)
++	continue;
++
++      unsigned i;
++      gimple *stmt;
++      FOR_EACH_VEC_ELT (fc_f->shadow_stmts, i, stmt)
++	{
++	  SET_CFUN (fc_f->shadow_stmts_func[i]);
++	  gcc_assert (gimple_assign_single_p (stmt));
++	  gimple_assign_set_rhs1 (
++	    stmt, build_int_cst (TREE_TYPE (fc_f->field), 1));
++	  update_stmt (stmt);
++	}
++    }
++}
++
++/* Rewrite all of read of shadow field by using question expression.  */
++
++void
++ipa_struct_reorg::rewrite_shadow_read (fc_type_info *info)
++{
++  for (auto *fc_f : info->static_fc_fields)
++    {
++      if (!fc_f->original)
++	continue;
++
++      for (auto *access : info->type->accesses)
++	{
++	  if (!access->read_field_p (fc_f->field))
++	    continue;
++
++	  SET_CFUN (access->function);
++	  insert_shadow_stmt (access->stmt, access->index,
++			      fc_f, access->base);
++	}
++    }
++}
++
++/* Insert the followings for shadow data read before STMT.
++   The IDX operand is the shadow data.
++
++   * For static: (shadow_field == true) ? original_field : 0 */
++
++void
++ipa_struct_reorg::insert_shadow_stmt (gimple *stmt, unsigned idx,
++				      fc_field *fc_field, tree base)
++{
++  tree shadow = gimple_op (stmt, idx);
++  tree original = build_field_ref (base, fc_field->original->fielddecl);
++
++  /* Insert new stmt immediately before stmt.  */
++  gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
++
++  /* original_ssa = original */
++  tree original_ssa = make_temp_ssa_name (TREE_TYPE (original), NULL, "");
++  gimple *original_stmt = gimple_build_assign (original_ssa, original);
++  gsi_insert_before (&gsi, original_stmt, GSI_SAME_STMT);
++  update_stmt (original_stmt);
++
++  /* shadow_ssa = shadow */
++  tree shadow_ssa = make_temp_ssa_name (TREE_TYPE (shadow), NULL, "");
++  gimple *shadow_stmt = gimple_build_assign (shadow_ssa, shadow);
++  gsi_insert_before (&gsi, shadow_stmt, GSI_SAME_STMT);
++  update_stmt (shadow_stmt);
++
++  /* new_shadow_ssa = (shadow_ssa == true ? original_ssa : 0) */
++  tree cond = fold_build2 (EQ_EXPR, boolean_type_node, shadow_ssa,
++			   build_int_cst (TREE_TYPE (shadow), 1));
++
++  tree new_shadow = build_cond_expr (cond, original_ssa,
++				     build_int_cst (TREE_TYPE (shadow), 0));
++  new_shadow = force_gimple_operand_gsi (&gsi, new_shadow, true, NULL, true,
++					 GSI_SAME_STMT);
++  tree new_shadow_ssa = make_temp_ssa_name (TREE_TYPE (shadow), NULL, "");
++  gimple *new_shadow_stmt = gimple_build_assign (new_shadow_ssa, new_shadow);
++  gsi_insert_before (&gsi, new_shadow_stmt, GSI_SAME_STMT);
++
++  gimple_set_op (stmt, idx, new_shadow_ssa);
++  update_stmt (new_shadow_stmt);
++  update_stmt (stmt);
++}
++
++/* Compress fields and create static new field types.  */
++
++bool
++ipa_struct_reorg::compress_fields_static (fc_type_info *info)
++{
++  /* For static compression fields, compress them according to max_value.  */
++  for (auto *fc_f : info->static_fc_fields)
++    {
++      tree old_type = TREE_TYPE (fc_f->field);
++      tree new_type = NULL_TREE;
++
++      HOST_WIDE_INT max_value = fc_f->max_value;
++      gcc_assert (max_value > 0 && max_value <= UINT_MAX);
++
++      /* Conservatively we only do static compression for unsigned type.  */
++      if (max_value <= 0xff)
++	new_type = unsigned_char_type_node;
++      else if (max_value <= 0xffff)
++	new_type = short_unsigned_type_node;
++      else
++	new_type = unsigned_type_node;
++
++      fc_f->new_type = new_type;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Change the type of ");
++	  print_generic_expr (dump_file, fc_f->field);
++	  fprintf (dump_file, " from (prec=%d) to ", TYPE_PRECISION (old_type));
++	  print_generic_expr (dump_file, new_type);
++	  fprintf (dump_file, "(prec=%d)\n", TYPE_PRECISION (new_type));
++	}
++    }
++
++  return true;
++}
++
++/* Compress fields to bitfield, for which bits will be the width.  */
++
++void
++ipa_struct_reorg::compress_to_bitfields (fc_type_info *info)
++{
++  /* For static compression.  Calculate bitsize for static field.  */
++  if (flag_ipa_struct_sfc_bitfield && info->static_fc_p)
++    {
++      for (auto *fc_f : info->static_fc_fields)
++	{
++	  HOST_WIDE_INT max_value = fc_f->max_value;
++	  gcc_assert (max_value > 0 && max_value <= UINT_MAX);
++
++	  /* Calculate bitsize.  */
++	  fc_f->bits = 0;
++	  while (max_value)
++	    {
++	      fc_f->bits++;
++	      max_value >>= 1;
++	    }
++
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      FC_DUMP_MSG ("Bitfield: ");
++	      print_generic_expr (dump_file, fc_f->field);
++	      fprintf (dump_file, ":%d", fc_f->bits);
++	      fprintf (dump_file, "\n");
++	    }
++	}
++    }
++}
++
++/* Collect all blocks that can reach stmt.  */
++
++auto_vec
++ipa_struct_reorg::collect_all_predecessor (gimple *stmt)
++{
++  auto_vec blocks;
++  basic_block start_bb = gimple_bb (stmt);
++
++  if (start_bb)
++    {
++      auto_bitmap visited;
++      auto_vec worklists;
++      worklists.safe_push (start_bb);
++
++      while (!worklists.is_empty ())
++	{
++	  basic_block bb = worklists.pop ();
++	  if (!bitmap_set_bit (visited, bb->index))
++	    continue;
++
++	  blocks.safe_push (bb);
++	  edge e;
++	  edge_iterator ei;
++	  FOR_EACH_EDGE (e, ei, bb->preds)
++	    worklists.safe_push (e->src);
++	}
++    }
++
++  return blocks;
++}
+ 
+ /* Init pointer size from parameter param_pointer_compression_size.  */
+ 
+@@ -8562,6 +10041,8 @@ ipa_struct_reorg::execute (unsigned int opt)
+ 	check_and_prune_struct_for_pointer_compression ();
+       if (opt >= SEMI_RELAYOUT)
+ 	check_and_prune_struct_for_semi_relayout ();
++      if (flag_ipa_struct_sfc)
++	check_and_prune_struct_for_field_compression ();
+       ret = rewrite_functions ();
+     }
+   else
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.h b/gcc/ipa-struct-reorg/ipa-struct-reorg.h
+index e3e6d7afb..2ab6444d6 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.h
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.h
+@@ -64,6 +64,9 @@ struct srtype;
+ struct sraccess;
+ struct srdecl;
+ struct srfunction;
++class fc_type_info;
++class fc_field;
++class fc_field_class;
+ 
+ struct srfunction
+ {
+@@ -136,6 +139,8 @@ public:
+   unsigned bucket_parts;
+   unsigned bucket_size;
+ 
++  fc_type_info *fc_info;
++
+   // Constructors
+   srtype (tree type);
+ 
+@@ -157,6 +162,8 @@ public:
+   void mark_escape (escape_type, gimple *stmt);
+   void create_global_ptr_for_pc ();
+   unsigned calculate_bucket_size ();
++  bool has_recursive_field_type ();
++  void check_fc_fields ();
+   bool has_escaped (void)
+   {
+     return escapes != does_not_escape;
+@@ -196,6 +203,9 @@ struct srfield
+   tree newfield[max_split];
+   unsigned field_access; /* FIELD_DECL -> bitflag (use for dfe).  */
+ 
++  fc_field *static_fc_field;
++  fc_field_class *field_class;
++
+   // Constructors
+   srfield (tree field, srtype *base);
+ 
+@@ -211,27 +221,31 @@ struct srfield
+   void create_new_reorder_fields (tree newtype[max_split],
+ 				  tree newfields[max_split],
+ 				  tree newlast[max_split]);
++  bool dead_field_p ();
+ };
+ 
+ struct sraccess
+ {
++  unsigned index;
++  tree expr;
+   gimple *stmt;
+   cgraph_node *node;
+ 
++  srfunction *function;
+   srtype *type;
++  tree base;
+   // NULL field means the whole type is accessed
+   srfield *field;
+ 
+   // Constructors
+-  sraccess (gimple *s, cgraph_node *n, srtype *t, srfield *f = NULL)
+-    : stmt (s),
+-      node (n),
+-      type (t),
+-      field (f)
+-  {}
++  sraccess (tree, gimple *, cgraph_node *, srfunction *,
++	    srtype *, tree, srfield *);
+ 
+   // Methods
+-  void dump (FILE *file);
++  void dump (FILE *file) const;
++  bool write_type_p (tree) const;
++  bool write_field_p (tree) const;
++  bool read_field_p (tree) const;
+ };
+ 
+ struct srdecl
+@@ -262,6 +276,101 @@ struct srdecl
+   }
+ };
+ 
++/* All fields belong to this class should have the same type.  */
++
++class fc_field_class
++{
++public:
++  /* The same type for all of the fields in the class.  */
++  tree fieldtype;
++
++  /* The fields with the same type are in the same element of this vector.  */
++  auto_vec srfields;
++
++  fc_field_class (tree fieldtype)
++    : fieldtype (fieldtype)
++  {}
++
++  void dump (FILE *) const;
++  unsigned size () const;
++  int get_field_index (srfield *) const;
++};
++
++/* The field for field compression.  */
++
++class fc_field
++{
++public:
++  tree field;
++  tree new_type;
++
++  /* This field's max value we can know at compile time.  If it is 0, it means
++     the max value cannot be determined at compile time.  */
++  HOST_WIDE_INT max_value;
++
++  /* The bit width of the field if it is not zero.  */
++  unsigned bits;
++
++  /* The original field of a shadow field if it is not NULL.  */
++  srfield *original;
++
++  /* All assignments that need to be optimized as shadow.  */
++  auto_vec shadow_stmts;
++
++  /* The 1:1 map of shadow_stmts to indicate the current function of a shadow
++     stmt belongs to.  */
++  auto_vec shadow_stmts_func;
++
++  /* For static field compression.  */
++  fc_field (tree field, HOST_WIDE_INT max_value, srfield *original)
++    : field (field), new_type (NULL_TREE), max_value (max_value),
++      bits (0), original (original)
++  {}
++
++  unsigned get_bits (void) const
++  {
++    return bits;
++  }
++};
++
++/* The class to hold field compression type information.
++   A single info object is only for one structure type.  */
++
++class fc_type_info
++{
++public:
++  srtype *type;
++
++  /* The flag to control whether the type can do static field compression.  */
++  bool static_fc_p = false;
++
++  /* Multiple fields of the data struct for static compression.  */
++  auto_delete_vec static_fc_fields;
++
++  /* The field classes classified by field type.  */
++  auto_delete_vec field_classes;
++
++  fc_type_info (srtype *type)
++    : type (type)
++  {}
++  fc_type_info ()
++    : type (NULL)
++  {}
++
++  fc_field_class *find_field_class_by_type (tree) const;
++  fc_field_class *record_field_class (srfield *);
++};
++
++/* The structure to hold necessary information for field shadow.  */
++
++struct fc_shadow_info
++{
++  auto_delete_vec> pair_stmts_groups;
++  auto_vec pair_stmts_func;
++  gimple *unpair_stmt = NULL;
++  srfunction *unpair_stmt_func = NULL;
++  unsigned unpair_stmt_index = 0;
++};
+ 
+ } // namespace struct_reorg
+ 
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-bitfield_compress.c b/gcc/testsuite/gcc.dg/struct/sfc-bitfield_compress.c
+new file mode 100644
+index 000000000..1b13c017e
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-bitfield_compress.c
+@@ -0,0 +1,31 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = 4095;
++        arcs[i].b = 8;
++    }
++
++    for (int i = 0; i < MAX; i++) {
++        if (arcs[i].a != 4095 && arcs[i].b != 8)
++            abort ();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "a {type = }" "struct_reorg" } } */
++/* { dg-final { scan-ipa-dump "b {type = }" "struct_reorg" } } */
++/* { dg-final { scan-ipa-dump "size : 2" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_assign_before_unpair_stmt.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_assign_before_unpair_stmt.c
+new file mode 100644
+index 000000000..386de992d
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_assign_before_unpair_stmt.c
+@@ -0,0 +1,29 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++    int c;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i;
++        arcs[i].b = i;
++    }
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = 20;
++    }
++    printf("%d, %d\n", arcs[10].a, arcs[10].b);
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding shadow field" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c
+new file mode 100644
+index 000000000..e709f7b25
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)malloc(MAX * sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = 0;
++    }
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i;
++        arcs[i].b = i;
++    }
++    printf("%d, %d\n", arcs[10].a, arcs[10].b);
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding shadow field" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_multiple_fields.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_multiple_fields.c
+new file mode 100644
+index 000000000..dd99e6556
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_multiple_fields.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++    unsigned long c;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i;
++        arcs[i].b = i;
++        arcs[i].c = i;
++    }
++    printf("%d, %d\n", arcs[10].a, arcs[10].b, arcs[10].c);
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found shadow field: b" "struct_reorg" } } */
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found shadow field: c" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_multiple_unpair_stmts.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_multiple_unpair_stmts.c
+new file mode 100644
+index 000000000..5bb9d21f7
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_multiple_unpair_stmts.c
+@@ -0,0 +1,32 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++    unsigned long c;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i;
++        arcs[i].b = i;
++    }
++    printf("%d, %d\n", arcs[10].a, arcs[10].b, arcs[10].c);
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i + 1;
++        arcs[i].b = i + 1;
++        arcs[i].c = i + 1;
++    }
++    printf("%d, %d\n", arcs[10].a, arcs[10].b, arcs[10].c);
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding shadow field" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_read_between_pair_stmts.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_read_between_pair_stmts.c
+new file mode 100644
+index 000000000..c369a3b4a
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_read_between_pair_stmts.c
+@@ -0,0 +1,38 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++    int c;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++__attribute__((__noinline__)) static void test_arc(arc_t* arcs, int i) {
++    arcs[10].a = 10;
++    arcs[10].b = 10;
++    printf("%d, %d\n", arcs[i].a, arcs[i].b);
++
++    /* sfc shadow check should fail here because of 
++       reading arcs->a and arcs->b between pair assignment.  */
++    arcs[10].a = 20;
++    printf("%d, %d\n", arcs[i].a, arcs[i].b);
++    arcs[10].b = 20;
++}
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i;
++        arcs[i].b = i;
++    }
++    test_arc(arcs, 10);
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding shadow field" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_rhs_not_equal.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_rhs_not_equal.c
+new file mode 100644
+index 000000000..fd368a60b
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_rhs_not_equal.c
+@@ -0,0 +1,25 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i;
++        arcs[i].b = i + 1;
++    }
++    printf("%d, %d\n", arcs[10].a, arcs[10].b);
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding shadow field" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_two_fields.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_two_fields.c
+new file mode 100644
+index 000000000..2b8923f96
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_two_fields.c
+@@ -0,0 +1,25 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i;
++        arcs[i].b = i;
++    }
++    printf("%d, %d\n", arcs[10].a, arcs[10].b);
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found shadow field: b" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_unpair_stmt.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_unpair_stmt.c
+new file mode 100644
+index 000000000..01aba4e2a
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_unpair_stmt.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].b = 0;
++    }
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i;
++        arcs[i].b = i;
++    }
++    printf("%d, %d\n", arcs[10].a, arcs[10].b);
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found shadow field: a" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_unpair_stmt_different_array.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_unpair_stmt_different_array.c
+new file mode 100644
+index 000000000..9ad32ce9a
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_unpair_stmt_different_array.c
+@@ -0,0 +1,30 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs1 = (arc_t*)calloc(MAX, sizeof(arc_t));
++    arc_t* arcs2 = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs1[i].a = i;
++        arcs1[i].b = i;
++    }
++    for (int i = 0; i < MAX; i++) {
++        arcs2[i].b = 0;
++    }
++    printf("%d, %d\n", arcs1[10].a, arcs1[10].b);
++    printf("%d, %d\n", arcs2[10].a, arcs2[10].b);
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found shadow field: a" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc_big_value.c b/gcc/testsuite/gcc.dg/struct/sfc_big_value.c
+new file mode 100644
+index 000000000..01226afa7
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc_big_value.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].b = 10000000000L;
++    }
++
++    for (int i = 0; i < MAX; i++) {
++        if (arcs[i].b != 10000000000L)
++            abort ();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Max value of b is too big, max_value = 10000000000" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc_compress.c b/gcc/testsuite/gcc.dg/struct/sfc_compress.c
+new file mode 100644
+index 000000000..994b77929
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc_compress.c
+@@ -0,0 +1,31 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = 10000;
++        arcs[i].b = 10;
++    }
++
++    for (int i = 0; i < MAX; i++) {
++        if (arcs[i].a != 10000 && arcs[i].b != 10)
++            abort ();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found a static compression field: a, max_value = 10000" "struct_reorg" } } */
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found a static compression field: b, max_value = 10" "struct_reorg" } } */
++/* { dg-final { scan-ipa-dump "size : 3" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc_no_hot_access.c b/gcc/testsuite/gcc.dg/struct/sfc_no_hot_access.c
+new file mode 100644
+index 000000000..60a552148
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc_no_hot_access.c
+@@ -0,0 +1,26 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    arcs[10].a = 10000;
++
++    for (int i = 0; i < MAX; i++) {
++        if (arcs[i].a != 10000)
++            abort ();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding hot access for static" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc_recursive_type.c b/gcc/testsuite/gcc.dg/struct/sfc_recursive_type.c
+new file mode 100644
+index 000000000..2d85b6ae1
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc_recursive_type.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    struct arc* arc_ptr;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = 10000;
++    }
++
++    for (int i = 0; i < MAX; i++) {
++        if (arcs[i].a != 10000)
++            abort ();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Recursive field type unsupported" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc_rhs_non_single_constant.c b/gcc/testsuite/gcc.dg/struct/sfc_rhs_non_single_constant.c
+new file mode 100644
+index 000000000..2375fa88b
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc_rhs_non_single_constant.c
+@@ -0,0 +1,29 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i;
++        arcs[i].b = i + 1;
++    }
++
++    for (int i = 0; i < MAX; i++) {
++        if (arcs[i].a != 10000 && arcs[i].b != 10)
++            abort ();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding static fc fields" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+index 1ef26229a..34606d025 100644
+--- a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+@@ -65,6 +65,18 @@ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/pc*.c]] \
+ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/semi_relayout*.c]] \
+ 	"" "-fipa-struct-reorg=6 -fdump-ipa-all -flto-partition=one -fwhole-program"
+ 
++# -fipa-struct-sfc
++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/sfc_*.c]] \
++	"" "-fipa-reorder-fields -fipa-struct-sfc -fdump-ipa-struct_reorg-details -flto-partition=one -fwhole-program"
++
++# -fipa-struct-sfc -fipa-struct-sfc-bitfield
++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/sfc-bitfield_*.c]] \
++	"" "-fipa-reorder-fields -fipa-struct-sfc -fipa-struct-sfc-bitfield -fdump-ipa-struct_reorg-details -flto-partition=one -fwhole-program"
++
++# -fipa-struct-sfc -fipa-struct-sfc-shadow
++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/sfc-shadow_*.c]] \
++	"" "-fipa-reorder-fields -fipa-struct-sfc -fipa-struct-sfc-shadow -fdump-ipa-struct_reorg-details -flto-partition=one -fwhole-program"
++
+ # All done.
+ torture-finish
+ dg-finish
+-- 
+2.33.0
+
diff --git a/0337-Reduce-ipa-inline-warning-output.patch b/0337-Reduce-ipa-inline-warning-output.patch
new file mode 100644
index 0000000000000000000000000000000000000000..eac074faf36525eafb122cc17e94034fd94eaca3
--- /dev/null
+++ b/0337-Reduce-ipa-inline-warning-output.patch
@@ -0,0 +1,64 @@
+From 9a03e44a2c3b7733a23b4ae6b722b4fd48167a92 Mon Sep 17 00:00:00 2001
+From: huang-xiaoquan 
+Date: Wed, 12 Feb 2025 15:27:57 +0800
+Subject: [PATCH] Reduce ipa-inline warning output.
+
+---
+ gcc/ipa-inline.cc | 34 ----------------------------------
+ 1 file changed, 34 deletions(-)
+
+diff --git a/gcc/ipa-inline.cc b/gcc/ipa-inline.cc
+index 8d5cc9a84..fd47940cf 100644
+--- a/gcc/ipa-inline.cc
++++ b/gcc/ipa-inline.cc
+@@ -225,38 +225,6 @@ caller_growth_limits (struct cgraph_edge *e)
+   return true;
+ }
+ 
+-/* Warn and prompt the user, and output only once for the file pair where
+-   the function is located.  */
+-
+-static void
+-prompt_inline_failed_target_option_reason (struct cgraph_edge *e)
+-{
+-  static std::set> address_pair_set;
+-  if (e->inline_failed == CIF_TARGET_OPTION_MISMATCH
+-      && !cl_target_option_eq_major (target_opts_for_fn (e->caller->decl),
+-	   target_opts_for_fn (e->callee->ultimate_alias_target ()->decl))
+-      && e->caller->lto_file_data
+-      && e->callee->ultimate_alias_target ()->lto_file_data)
+-    {
+-      std::pair addr_pair
+-	= std::make_pair (&e->caller->lto_file_data,
+-			  &e->callee->ultimate_alias_target ()->lto_file_data);
+-      if (address_pair_set.find (addr_pair) != address_pair_set.end ())
+-	return;
+-
+-      address_pair_set.insert (addr_pair);
+-      warning (0, "LTO objects caller in: %s, callee in: %s, not inlinable: %s."
+-	       " Try to use -finline-force=callee_object_or_lib_name to force "
+-	       "inline", e->caller->lto_file_data->file_name,
+-	       e->callee->ultimate_alias_target ()->lto_file_data->file_name,
+-	       cgraph_inline_failed_string (CIF_TARGET_OPTION_MISMATCH));
+-
+-      cl_target_option_print_diff
+-	(stderr, 2, target_opts_for_fn (e->caller->decl),
+-	target_opts_for_fn (e->callee->ultimate_alias_target ()->decl));
+-    }
+-}
+-
+ /* Dump info about why inlining has failed.  */
+ 
+ static void
+@@ -289,8 +257,6 @@ report_inline_failed_reason (struct cgraph_edge *e)
+ 	    (dump_file, 2, opts_for_fn (e->caller->decl),
+ 	     opts_for_fn (e->callee->ultimate_alias_target ()->decl));
+     }
+-
+-  prompt_inline_failed_target_option_reason (e);
+ }
+ 
+  /* Decide whether sanitizer-related attributes allow inlining. */
+-- 
+2.33.0
+
diff --git a/0338-i386-Fix-AVX512-intrin-macro-typo.patch b/0338-i386-Fix-AVX512-intrin-macro-typo.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d2a70721c758337805cca3d41df40954f02d71d2
--- /dev/null
+++ b/0338-i386-Fix-AVX512-intrin-macro-typo.patch
@@ -0,0 +1,268 @@
+From c511b753a24ba48bbe4cdec5cf98e0f33cdb86ad Mon Sep 17 00:00:00 2001
+From: Haochen Jiang 
+Date: Thu, 25 Jul 2024 16:12:20 +0800
+Subject: [PATCH 01/14] i386: Fix AVX512 intrin macro typo
+
+There are several typo in AVX512 intrins macro define. Correct them to solve
+errors when compiled with -O0.
+
+gcc/ChangeLog:
+
+	* config/i386/avx512dqintrin.h
+	(_mm_mask_fpclass_ss_mask): Correct operand order.
+	(_mm_mask_fpclass_sd_mask): Ditto.
+	(_mm256_maskz_reduce_round_ss): Use __builtin_ia32_reducess_mask_round
+	instead of __builtin_ia32_reducesd_mask_round.
+	(_mm_reduce_round_sd): Use -1 as mask since it is non-mask.
+	(_mm_reduce_round_ss): Ditto.
+	* config/i386/avx512vlbwintrin.h
+	(_mm256_mask_alignr_epi8): Correct operand usage.
+	(_mm_mask_alignr_epi8): Ditto.
+	* config/i386/avx512vlintrin.h (_mm_mask_alignr_epi64): Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/avx512bw-vpalignr-1b.c: New test.
+	* gcc.target/i386/avx512dq-vfpclasssd-1b.c: Ditto.
+	* gcc.target/i386/avx512dq-vfpclassss-1b.c: Ditto.
+	* gcc.target/i386/avx512dq-vreducesd-1b.c: Ditto.
+	* gcc.target/i386/avx512dq-vreducess-1b.c: Ditto.
+	* gcc.target/i386/avx512vl-valignq-1b.c: Ditto.
+
+(cherry picked from commit 16daeb262af4566e665a941368cb15bc2cba3f07)
+---
+ gcc/config/i386/avx512dqintrin.h               | 16 +++++++++-------
+ gcc/config/i386/avx512vlbwintrin.h             |  4 ++--
+ gcc/config/i386/avx512vlintrin.h               |  2 +-
+ .../gcc.target/i386/avx512bw-vpalignr-1b.c     | 18 ++++++++++++++++++
+ .../gcc.target/i386/avx512dq-vfpclasssd-1b.c   | 14 ++++++++++++++
+ .../gcc.target/i386/avx512dq-vfpclassss-1b.c   | 14 ++++++++++++++
+ .../gcc.target/i386/avx512dq-vreducesd-1b.c    | 16 ++++++++++++++++
+ .../gcc.target/i386/avx512dq-vreducess-1b.c    | 16 ++++++++++++++++
+ .../gcc.target/i386/avx512vl-valignq-1b.c      | 15 +++++++++++++++
+ 9 files changed, 105 insertions(+), 10 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/avx512bw-vpalignr-1b.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/avx512dq-vfpclasssd-1b.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/avx512dq-vfpclassss-1b.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/avx512dq-vreducesd-1b.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/avx512dq-vreducess-1b.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/avx512vl-valignq-1b.c
+
+diff --git a/gcc/config/i386/avx512dqintrin.h b/gcc/config/i386/avx512dqintrin.h
+index e924250a4ad..4f9451e949b 100644
+--- a/gcc/config/i386/avx512dqintrin.h
++++ b/gcc/config/i386/avx512dqintrin.h
+@@ -2800,11 +2800,11 @@ _mm512_fpclass_ps_mask (__m512 __A, const int __imm)
+   ((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X),	\
+ 					     (int) (C), (__mmask8) (-1))) \
+ 
+-#define _mm_mask_fpclass_ss_mask(X, C, U)				\
++#define _mm_mask_fpclass_ss_mask(U, X, C)				\
+   ((__mmask8) __builtin_ia32_fpclassss_mask ((__v4sf) (__m128) (X),	\
+ 					     (int) (C), (__mmask8) (U)))
+ 
+-#define _mm_mask_fpclass_sd_mask(X, C, U)				\
++#define _mm_mask_fpclass_sd_mask(U, X, C)				\
+   ((__mmask8) __builtin_ia32_fpclasssd_mask ((__v2df) (__m128d) (X),	\
+ 					     (int) (C), (__mmask8) (U)))
+ 
+@@ -2839,8 +2839,9 @@ _mm512_fpclass_ps_mask (__m512 __A, const int __imm)
+     (__mmask8)(U)))
+ 
+ #define _mm_reduce_round_sd(A, B, C, R)				       \
+-  ((__m128d) __builtin_ia32_reducesd_round ((__v2df)(__m128d)(A),      \
+-    (__v2df)(__m128d)(B), (int)(C), (__mmask8)(U), (int)(R)))
++  ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \
++    (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_avx512_setzero_pd (), \
++    (__mmask8)(-1), (int)(R)))
+ 
+ #define _mm_mask_reduce_round_sd(W, U, A, B, C, R)		       \
+   ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \
+@@ -2867,8 +2868,9 @@ _mm512_fpclass_ps_mask (__m512 __A, const int __imm)
+     (__mmask8)(U)))
+ 
+ #define _mm_reduce_round_ss(A, B, C, R)				       \
+-  ((__m128) __builtin_ia32_reducess_round ((__v4sf)(__m128)(A),	       \
+-    (__v4sf)(__m128)(B), (int)(C), (__mmask8)(U), (int)(R)))
++  ((__m128) __builtin_ia32_reducess_mask_round ((__v4sf)(__m128)(A),   \
++    (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_avx512_setzero_ps (),  \
++    (__mmask8)(-1), (int)(R)))
+ 
+ #define _mm_mask_reduce_round_ss(W, U, A, B, C, R)		       \
+   ((__m128) __builtin_ia32_reducess_mask_round ((__v4sf)(__m128)(A),   \
+@@ -2876,7 +2878,7 @@ _mm512_fpclass_ps_mask (__m512 __A, const int __imm)
+     (__mmask8)(U), (int)(R)))
+ 
+ #define _mm_maskz_reduce_round_ss(U, A, B, C, R)		       \
+-  ((__m128) __builtin_ia32_reducesd_mask_round ((__v4sf)(__m128)(A),   \
++  ((__m128) __builtin_ia32_reducess_mask_round ((__v4sf)(__m128)(A),   \
+     (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (),	       \
+     (__mmask8)(U), (int)(R)))
+ 
+diff --git a/gcc/config/i386/avx512vlbwintrin.h b/gcc/config/i386/avx512vlbwintrin.h
+index 192d54e743f..c918ed520c5 100644
+--- a/gcc/config/i386/avx512vlbwintrin.h
++++ b/gcc/config/i386/avx512vlbwintrin.h
+@@ -1839,7 +1839,7 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
+ #define _mm256_mask_alignr_epi8(W, U, X, Y, N)					    \
+   ((__m256i) __builtin_ia32_palignr256_mask ((__v4di)(__m256i)(X),		    \
+ 					    (__v4di)(__m256i)(Y), (int)((N) * 8),   \
+-					    (__v4di)(__m256i)(X), (__mmask32)(U)))
++					    (__v4di)(__m256i)(W), (__mmask32)(U)))
+ 
+ #define _mm256_mask_srli_epi16(W, U, A, B)                              \
+   ((__m256i) __builtin_ia32_psrlwi256_mask ((__v16hi)(__m256i)(A),      \
+@@ -1922,7 +1922,7 @@ _mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, int __B)
+ #define _mm_mask_alignr_epi8(W, U, X, Y, N)					    \
+   ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X),		    \
+ 					    (__v2di)(__m128i)(Y), (int)((N) * 8),   \
+-					    (__v2di)(__m128i)(X), (__mmask16)(U)))
++					    (__v2di)(__m128i)(W), (__mmask16)(U)))
+ 
+ #define _mm_maskz_alignr_epi8(U, X, Y, N)					    \
+   ((__m128i) __builtin_ia32_palignr128_mask ((__v2di)(__m128i)(X),		    \
+diff --git a/gcc/config/i386/avx512vlintrin.h b/gcc/config/i386/avx512vlintrin.h
+index 26b286eae6b..c6f3f35a009 100644
+--- a/gcc/config/i386/avx512vlintrin.h
++++ b/gcc/config/i386/avx512vlintrin.h
+@@ -13609,7 +13609,7 @@ _mm256_permutex_pd (__m256d __X, const int __M)
+ 
+ #define _mm_mask_alignr_epi64(W, U, X, Y, C)                                \
+     ((__m128i)__builtin_ia32_alignq128_mask ((__v2di)(__m128i)(X),          \
+-        (__v2di)(__m128i)(Y), (int)(C), (__v2di)(__m128i)(X), (__mmask8)-1))
++        (__v2di)(__m128i)(Y), (int)(C), (__v2di)(__m128i)(W), (__mmask8)(U)))
+ 
+ #define _mm_maskz_alignr_epi64(U, X, Y, C)                                  \
+     ((__m128i)__builtin_ia32_alignq128_mask ((__v2di)(__m128i)(X),          \
+diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-vpalignr-1b.c b/gcc/testsuite/gcc.target/i386/avx512bw-vpalignr-1b.c
+new file mode 100644
+index 00000000000..2b42aa90b91
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/avx512bw-vpalignr-1b.c
+@@ -0,0 +1,18 @@
++/* { dg-do compile } */
++/* { dg-options "-O0 -mavx512bw -mavx512vl" } */
++/* { dg-final { scan-assembler-times "vpalignr\[ \\t\]+\[^\{\n\]*%ymm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
++/* { dg-final { scan-assembler-times "vpalignr\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
++
++#include 
++
++volatile __m256i y;
++volatile __m128i x;
++volatile __mmask32 m2;
++volatile __mmask16 m3;
++
++void extern
++avx512bw_test (void)
++{
++  y = _mm256_mask_alignr_epi8 (y, m2, y, y, 10);
++  x = _mm_mask_alignr_epi8 (x, m3, x, x, 10);
++}
+diff --git a/gcc/testsuite/gcc.target/i386/avx512dq-vfpclasssd-1b.c b/gcc/testsuite/gcc.target/i386/avx512dq-vfpclasssd-1b.c
+new file mode 100644
+index 00000000000..8c7f96fb7a7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/avx512dq-vfpclasssd-1b.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-mavx512dq -O0" } */
++/* { dg-final { scan-assembler-times "vfpclasssd\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[0-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
++
++#include 
++
++volatile __m128d x128;
++volatile __mmask8 m8;
++
++void extern
++avx512dq_test (void)
++{
++  m8 = _mm_mask_fpclass_sd_mask (m8, x128, 13);
++}
+diff --git a/gcc/testsuite/gcc.target/i386/avx512dq-vfpclassss-1b.c b/gcc/testsuite/gcc.target/i386/avx512dq-vfpclassss-1b.c
+new file mode 100644
+index 00000000000..3196fd60d64
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/avx512dq-vfpclassss-1b.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-mavx512dq -O0" } */
++/* { dg-final { scan-assembler-times "vfpclassss\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\[^\n^k\]*%k\[0-7\]\{%k\[0-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
++
++#include 
++
++volatile __m128 x128;
++volatile __mmask8 m8;
++
++void extern
++avx512dq_test (void)
++{
++  m8 = _mm_mask_fpclass_ss_mask (m8, x128, 13);
++}
+diff --git a/gcc/testsuite/gcc.target/i386/avx512dq-vreducesd-1b.c b/gcc/testsuite/gcc.target/i386/avx512dq-vreducesd-1b.c
+new file mode 100644
+index 00000000000..9ae8259d373
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/avx512dq-vreducesd-1b.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-mavx512dq -O0" } */
++/* { dg-final { scan-assembler-times "vreducesd\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
++
++#include 
++
++#define IMM 123
++
++volatile __m128d x1, x2, xx1, xx2;
++volatile __mmask8 m;
++
++void extern
++avx512dq_test (void)
++{
++  xx1 = _mm_reduce_round_sd (xx1, xx2, IMM, _MM_FROUND_NO_EXC);
++}
+diff --git a/gcc/testsuite/gcc.target/i386/avx512dq-vreducess-1b.c b/gcc/testsuite/gcc.target/i386/avx512dq-vreducess-1b.c
+new file mode 100644
+index 00000000000..47bf48fb617
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/avx512dq-vreducess-1b.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-mavx512dq -O0" } */
++/* { dg-final { scan-assembler-times "vreducess\[ \\t\]+\[^\{\n\]*\{sae\}\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\[^\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
++
++#include 
++
++#define IMM 123
++
++volatile __m128 x1, x2, xx1, xx2;
++volatile __mmask8 m;
++
++void extern
++avx512dq_test (void)
++{
++  xx1 = _mm_reduce_round_ss (xx1, xx2, IMM, _MM_FROUND_NO_EXC);
++}
+diff --git a/gcc/testsuite/gcc.target/i386/avx512vl-valignq-1b.c b/gcc/testsuite/gcc.target/i386/avx512vl-valignq-1b.c
+new file mode 100644
+index 00000000000..0ab16b27733
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/avx512vl-valignq-1b.c
+@@ -0,0 +1,15 @@
++/* { dg-do compile } */
++/* { dg-options "-O0 -mavx512vl" } */
++/* { dg-final { scan-assembler-times "valignq\[ \\t\]+\[^\{\n\]*%xmm\[0-9\]+\{%k\[1-7\]\}(?:\n|\[ \\t\]+#)" 1 } } */
++
++#include 
++
++volatile __m256i y;
++volatile __m128i x;
++volatile __mmask8 m;
++
++void extern
++avx512vl_test (void)
++{
++  x = _mm_mask_alignr_epi64 (x, m, x, x, 1);
++}
+-- 
+2.31.1
+
diff --git a/0339-i386-Use-_mm_setzero_ps-d-instead-of-_mm_avx512_setz.patch b/0339-i386-Use-_mm_setzero_ps-d-instead-of-_mm_avx512_setz.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0863492cd0d7132b9d49132403a50d6da5778655
--- /dev/null
+++ b/0339-i386-Use-_mm_setzero_ps-d-instead-of-_mm_avx512_setz.patch
@@ -0,0 +1,46 @@
+From 22584572ff9a1c3256da20f5438cacc6102fa2ac Mon Sep 17 00:00:00 2001
+From: Haochen Jiang 
+Date: Mon, 29 Jul 2024 14:10:49 +0800
+Subject: [PATCH 02/14] i386: Use _mm_setzero_ps/d instead of
+ _mm_avx512_setzero_ps/d for GCC13/12
+
+In GCC13/12, there is no _mm_avx512_setzero_ps/d since it is introduced
+in GCC14.
+
+gcc/ChangeLog:
+
+	* config/i386/avx512dqintrin.h (_mm_reduce_round_sd): Use
+	_mm_setzero_pd instead of _mm_avx512_setzero_pd.
+	(_mm_reduce_round_ss): Use _mm_setzero_ps instead of
+	_mm_avx512_setzero_ps.
+
+(cherry picked from commit 77ad22e4eaa97bb10068c6170f53caca77c99392) (gcc-12)
+---
+ gcc/config/i386/avx512dqintrin.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/i386/avx512dqintrin.h b/gcc/config/i386/avx512dqintrin.h
+index 4f9451e949b..e8f8efe3be8 100644
+--- a/gcc/config/i386/avx512dqintrin.h
++++ b/gcc/config/i386/avx512dqintrin.h
+@@ -2840,7 +2840,7 @@ _mm512_fpclass_ps_mask (__m512 __A, const int __imm)
+ 
+ #define _mm_reduce_round_sd(A, B, C, R)				       \
+   ((__m128d) __builtin_ia32_reducesd_mask_round ((__v2df)(__m128d)(A), \
+-    (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_avx512_setzero_pd (), \
++    (__v2df)(__m128d)(B), (int)(C), (__v2df) _mm_setzero_pd (),	       \
+     (__mmask8)(-1), (int)(R)))
+ 
+ #define _mm_mask_reduce_round_sd(W, U, A, B, C, R)		       \
+@@ -2869,7 +2869,7 @@ _mm512_fpclass_ps_mask (__m512 __A, const int __imm)
+ 
+ #define _mm_reduce_round_ss(A, B, C, R)				       \
+   ((__m128) __builtin_ia32_reducess_mask_round ((__v4sf)(__m128)(A),   \
+-    (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_avx512_setzero_ps (),  \
++    (__v4sf)(__m128)(B), (int)(C), (__v4sf) _mm_setzero_ps (),	       \
+     (__mmask8)(-1), (int)(R)))
+ 
+ #define _mm_mask_reduce_round_ss(W, U, A, B, C, R)		       \
+-- 
+2.31.1
+
diff --git a/0340-Refine-constraint-Bk-to-define_special_memory_constr.patch b/0340-Refine-constraint-Bk-to-define_special_memory_constr.patch
new file mode 100644
index 0000000000000000000000000000000000000000..024209eeb5dae81603c57a173cc2adc0c0e75f1d
--- /dev/null
+++ b/0340-Refine-constraint-Bk-to-define_special_memory_constr.patch
@@ -0,0 +1,107 @@
+From bdc11c30981f8954249aa534c9b5b2ea51efa042 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Wed, 24 Jul 2024 11:29:23 +0800
+Subject: [PATCH 03/14] Refine constraint "Bk" to
+ define_special_memory_constraint.
+
+For below pattern, RA may still allocate r162 as v/k register, try to
+reload for address with leaq __libc_tsd_CTYPE_B@gottpoff(%rip), %rsi
+which result a linker error.
+
+(set (reg:DI 162)
+     (mem/u/c:DI
+       (const:DI (unspec:DI
+		 [(symbol_ref:DI ("a") [flags 0x60]  )]
+		 UNSPEC_GOTNTPOFF))
+
+Quote from H.J for why linker issue an error.
+>What do these do:
+>
+>        leaq    __libc_tsd_CTYPE_B@gottpoff(%rip), %rax
+>        vmovq   (%rax), %xmm0
+>
+>From x86-64 TLS psABI:
+>
+>The assembler generates for the x@gottpoff(%rip) expressions a R X86
+>64 GOTTPOFF relocation for the symbol x which requests the linker to
+>generate a GOT entry with a R X86 64 TPOFF64 relocation. The offset of
+>the GOT entry relative to the end of the instruction is then used in
+>the instruction. The R X86 64 TPOFF64 relocation is pro- cessed at
+>program startup time by the dynamic linker by looking up the symbol x
+>in the modules loaded at that point. The offset is written in the GOT
+>entry and later loaded by the addq instruction.
+>
+>The above code sequence looks wrong to me.
+
+gcc/ChangeLog:
+
+	PR target/116043
+	* config/i386/constraints.md (Bk): Refine to
+	define_special_memory_constraint.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/pr116043.c: New test.
+
+(cherry picked from commit bc1fda00d5f20e2f3e77a50b2822562b6e0040b2)
+---
+ gcc/config/i386/constraints.md           |  2 +-
+ gcc/testsuite/gcc.target/i386/pr116043.c | 33 ++++++++++++++++++++++++
+ 2 files changed, 34 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr116043.c
+
+diff --git a/gcc/config/i386/constraints.md b/gcc/config/i386/constraints.md
+index 7361687632f..e4b66340589 100644
+--- a/gcc/config/i386/constraints.md
++++ b/gcc/config/i386/constraints.md
+@@ -187,7 +187,7 @@
+   (and (match_operand 0 "memory_operand")
+        (match_test "constant_address_p (XEXP (op, 0))")))
+ 
+-(define_memory_constraint "Bk"
++(define_special_memory_constraint "Bk"
+   "@internal TLS address that allows insn using non-integer registers."
+   (and (match_operand 0 "memory_operand")
+        (not (match_test "ix86_gpr_tls_address_pattern_p (op)"))))
+diff --git a/gcc/testsuite/gcc.target/i386/pr116043.c b/gcc/testsuite/gcc.target/i386/pr116043.c
+new file mode 100644
+index 00000000000..76553496c10
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr116043.c
+@@ -0,0 +1,33 @@
++/* { dg-do compile } */
++/* { dg-options "-mavx512bf16 -O3" } */
++/* { dg-final { scan-assembler-not {(?n)lea.*@gottpoff} } } */
++
++extern __thread int a, c, i, j, k, l;
++int *b;
++struct d {
++  int e;
++} f, g;
++char *h;
++
++void m(struct d *n) {
++  b = &k;
++  for (; n->e; b++, n--) {
++    i = b && a;
++    if (i)
++      j = c;
++  }
++}
++
++char *o(struct d *n) {
++  for (; n->e;)
++    return h;
++}
++
++int q() {
++  if (l)
++    return 1;
++  int p = *o(&g);
++  m(&f);
++  m(&g);
++  l = p;
++}
+-- 
+2.31.1
+
diff --git a/0341-Align-ix86_-move_max-store_max-with-vectorizer.patch b/0341-Align-ix86_-move_max-store_max-with-vectorizer.patch
new file mode 100644
index 0000000000000000000000000000000000000000..36876a378a23f47aa6431e61f727e307ac5bcebf
--- /dev/null
+++ b/0341-Align-ix86_-move_max-store_max-with-vectorizer.patch
@@ -0,0 +1,232 @@
+From 002e45c7f46a0f8dd2b5381cd1ee1341f8987fca Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Thu, 15 Aug 2024 12:54:07 +0800
+Subject: [PATCH 04/14] Align ix86_{move_max,store_max} with vectorizer.
+
+When none of mprefer-vector-width, avx256_optimal/avx128_optimal,
+avx256_store_by_pieces/avx512_store_by_pieces is specified, GCC will
+set ix86_{move_max,store_max} as max available vector length except
+for AVX part.
+
+	      if (TARGET_AVX512F_P (opts->x_ix86_isa_flags)
+		  && TARGET_EVEX512_P (opts->x_ix86_isa_flags2))
+		opts->x_ix86_move_max = PVW_AVX512;
+	      else
+		opts->x_ix86_move_max = PVW_AVX128;
+
+So for -mavx2, vectorizer will choose 256-bit for vectorization, but
+128-bit is used for struct copy, there could be a potential STLF issue
+due to this "misalign".
+
+The patch fixes that.
+
+gcc/ChangeLog:
+
+	* config/i386/i386-options.cc (ix86_option_override_internal):
+	set ix86_{move_max,store_max} to PVW_AVX256 when TARGET_AVX
+	instead of PVW_AVX128.
+
+gcc/testsuite/ChangeLog:
+	* gcc.target/i386/pieces-memcpy-10.c: Add -mprefer-vector-width=128.
+	* gcc.target/i386/pieces-memcpy-6.c: Ditto.
+	* gcc.target/i386/pieces-memset-38.c: Ditto.
+	* gcc.target/i386/pieces-memset-40.c: Ditto.
+	* gcc.target/i386/pieces-memset-41.c: Ditto.
+	* gcc.target/i386/pieces-memset-42.c: Ditto.
+	* gcc.target/i386/pieces-memset-43.c: Ditto.
+	* gcc.target/i386/pieces-strcpy-2.c: Ditto.
+	* gcc.target/i386/pieces-memcpy-22.c: New test.
+	* gcc.target/i386/pieces-memset-51.c: New test.
+	* gcc.target/i386/pieces-strcpy-3.c: New test.
+
+(cherry picked from commit aea374238cec1a1e53fb79575d2f998e16926999)
+---
+ gcc/config/i386/i386-options.cc                  |  6 ++++++
+ gcc/testsuite/gcc.target/i386/pieces-memcpy-10.c |  2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memcpy-22.c | 12 ++++++++++++
+ gcc/testsuite/gcc.target/i386/pieces-memcpy-6.c  |  2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memset-38.c |  2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memset-40.c |  2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memset-41.c |  2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memset-42.c |  2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memset-43.c |  2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memset-51.c | 12 ++++++++++++
+ gcc/testsuite/gcc.target/i386/pieces-strcpy-2.c  |  2 +-
+ gcc/testsuite/gcc.target/i386/pieces-strcpy-3.c  | 15 +++++++++++++++
+ 12 files changed, 53 insertions(+), 8 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pieces-memcpy-22.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/pieces-memset-51.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/pieces-strcpy-3.c
+
+diff --git a/gcc/config/i386/i386-options.cc b/gcc/config/i386/i386-options.cc
+index 991661fe4a2..061a1584318 100644
+--- a/gcc/config/i386/i386-options.cc
++++ b/gcc/config/i386/i386-options.cc
+@@ -2802,6 +2802,9 @@ ix86_option_override_internal (bool main_args_p,
+ 	    {
+ 	      if (TARGET_AVX512F_P (opts->x_ix86_isa_flags))
+ 		opts->x_ix86_move_max = PVW_AVX512;
++	      /* Align with vectorizer to avoid potential STLF issue.  */
++	      else if (TARGET_AVX_P (opts->x_ix86_isa_flags))
++		opts->x_ix86_move_max = PVW_AVX256;
+ 	      else
+ 		opts->x_ix86_move_max = PVW_AVX128;
+ 	    }
+@@ -2823,6 +2826,9 @@ ix86_option_override_internal (bool main_args_p,
+ 	    {
+ 	      if (TARGET_AVX512F_P (opts->x_ix86_isa_flags))
+ 		opts->x_ix86_store_max = PVW_AVX512;
++	      /* Align with vectorizer to avoid potential STLF issue.  */
++	      else if (TARGET_AVX_P (opts->x_ix86_isa_flags))
++		opts->x_ix86_store_max = PVW_AVX256;
+ 	      else
+ 		opts->x_ix86_store_max = PVW_AVX128;
+ 	    }
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memcpy-10.c b/gcc/testsuite/gcc.target/i386/pieces-memcpy-10.c
+index 5faee21f9b9..53ad0b3be44 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memcpy-10.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memcpy-10.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge" } */
+ 
+ extern char *dst, *src;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memcpy-22.c b/gcc/testsuite/gcc.target/i386/pieces-memcpy-22.c
+new file mode 100644
+index 00000000000..605b3623ffc
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pieces-memcpy-22.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile { target { ! ia32 } } } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mtune=generic" } */
++
++extern char *dst, *src;
++
++void
++foo (void)
++{
++  __builtin_memcpy (dst, src, 33);
++}
++
++/* { dg-final { scan-assembler-times "vmovdqu\[ \\t\]+\[^\n\]*%ymm" 2 } } */
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memcpy-6.c b/gcc/testsuite/gcc.target/i386/pieces-memcpy-6.c
+index 5f99cc98c47..cfd2a86cf33 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memcpy-6.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memcpy-6.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target { ! ia32 } } } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge" } */
+ 
+ extern char *dst, *src;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memset-38.c b/gcc/testsuite/gcc.target/i386/pieces-memset-38.c
+index ed4a24a54fd..ddd194debd5 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memset-38.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memset-38.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx512f -mavx2 -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx512f -mavx2 -mprefer-vector-width=128 -mtune=sandybridge" } */
+ 
+ extern char *dst;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memset-40.c b/gcc/testsuite/gcc.target/i386/pieces-memset-40.c
+index 4eda73ead59..9c206465d46 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memset-40.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memset-40.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx512f -mavx2 -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx512f -mavx2 -mprefer-vector-width=128 -mtune=sandybridge" } */
+ 
+ extern char *dst;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memset-41.c b/gcc/testsuite/gcc.target/i386/pieces-memset-41.c
+index 93df8101e4d..b0756182e35 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memset-41.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memset-41.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mtune=sandybridge -mno-stackrealign" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge -mno-stackrealign" } */
+ 
+ extern char *dst;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memset-42.c b/gcc/testsuite/gcc.target/i386/pieces-memset-42.c
+index df0c122aae7..103da699ae5 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memset-42.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memset-42.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge" } */
+ 
+ extern char *dst;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memset-43.c b/gcc/testsuite/gcc.target/i386/pieces-memset-43.c
+index 2f2179c2df9..f1494e17610 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memset-43.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memset-43.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge" } */
+ 
+ extern char *dst;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memset-51.c b/gcc/testsuite/gcc.target/i386/pieces-memset-51.c
+new file mode 100644
+index 00000000000..192ec0d1647
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pieces-memset-51.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mtune=generic" } */
++
++extern char *dst;
++
++void
++foo (int x)
++{
++  __builtin_memset (dst, x, 64);
++}
++
++/* { dg-final { scan-assembler-times "vmovdqu\[ \\t\]+\[^\n\]*%ymm" 2 } } */
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-strcpy-2.c b/gcc/testsuite/gcc.target/i386/pieces-strcpy-2.c
+index 90446edb4f3..9bb94b7419b 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-strcpy-2.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-strcpy-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target { ! ia32 } } } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge" } */
+ 
+ extern char *strcpy (char *, const char *);
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-strcpy-3.c b/gcc/testsuite/gcc.target/i386/pieces-strcpy-3.c
+new file mode 100644
+index 00000000000..df7571b547f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pieces-strcpy-3.c
+@@ -0,0 +1,15 @@
++/* { dg-do compile { target { ! ia32 } } } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mtune=generic" } */
++
++extern char *strcpy (char *, const char *);
++
++void
++foo (char *s)
++{
++  strcpy (s,
++	  "1234567890abcdef123456abcdef5678123456abcdef567abcdef678"
++	  "1234567");
++}
++
++/* { dg-final { scan-assembler-times "vmovdqa\[ \\t\]+\[^\n\]*%ymm" 2 } } */
++/* { dg-final { scan-assembler-times "vmovdqu\[ \\t\]+\[^\n\]*%ymm" 2 } } */
+-- 
+2.31.1
+
diff --git a/0342-Fix-testcase-failure.patch b/0342-Fix-testcase-failure.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a1d3ed6a5eba6ccf4f0a0a241c2e387f978a9197
--- /dev/null
+++ b/0342-Fix-testcase-failure.patch
@@ -0,0 +1,120 @@
+From c5c6183ab3132d40fb0f10c57c26c6ef4f69bfda Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Thu, 22 Aug 2024 14:31:40 +0800
+Subject: [PATCH 05/14] Fix testcase failure.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/pieces-memcpy-10.c: Use -mmove-max=256 and
+	-mstore-max=256.
+	* gcc.target/i386/pieces-memcpy-6.c: Ditto.
+	* gcc.target/i386/pieces-memset-38.c: Ditto.
+	* gcc.target/i386/pieces-memset-40.c: Ditto.
+	* gcc.target/i386/pieces-memset-41.c: Ditto.
+	* gcc.target/i386/pieces-memset-42.c: Ditto.
+	* gcc.target/i386/pieces-memset-43.c: Ditto.
+	* gcc.target/i386/pieces-strcpy-2.c: Ditto.
+
+(cherry picked from commit ea9c508927ec032c6d67a24df59ffa429e4d3d95)
+---
+ gcc/testsuite/gcc.target/i386/pieces-memcpy-10.c | 2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memcpy-6.c  | 2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memset-38.c | 2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memset-40.c | 2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memset-41.c | 2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memset-42.c | 2 +-
+ gcc/testsuite/gcc.target/i386/pieces-memset-43.c | 2 +-
+ gcc/testsuite/gcc.target/i386/pieces-strcpy-2.c  | 2 +-
+ 8 files changed, 8 insertions(+), 8 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memcpy-10.c b/gcc/testsuite/gcc.target/i386/pieces-memcpy-10.c
+index 53ad0b3be44..78f92ac5197 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memcpy-10.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memcpy-10.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mmove-max=128 -mstore-max=128 -mtune=sandybridge" } */
+ 
+ extern char *dst, *src;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memcpy-6.c b/gcc/testsuite/gcc.target/i386/pieces-memcpy-6.c
+index cfd2a86cf33..57b74ae4b23 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memcpy-6.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memcpy-6.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target { ! ia32 } } } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mmove-max=128 -mstore-max=128 -mtune=sandybridge" } */
+ 
+ extern char *dst, *src;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memset-38.c b/gcc/testsuite/gcc.target/i386/pieces-memset-38.c
+index ddd194debd5..d9443678735 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memset-38.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memset-38.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx512f -mavx2 -mprefer-vector-width=128 -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx512f -mavx2 -mmove-max=128 -mstore-max=128 -mtune=sandybridge" } */
+ 
+ extern char *dst;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memset-40.c b/gcc/testsuite/gcc.target/i386/pieces-memset-40.c
+index 9c206465d46..8ad6ad7e494 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memset-40.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memset-40.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx512f -mavx2 -mprefer-vector-width=128 -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx512f -mavx2 -mmove-max=128 -mstore-max=128 -mtune=sandybridge" } */
+ 
+ extern char *dst;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memset-41.c b/gcc/testsuite/gcc.target/i386/pieces-memset-41.c
+index b0756182e35..08fd6e9a927 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memset-41.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memset-41.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge -mno-stackrealign" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mmove-max=128 -mstore-max=128 -mtune=sandybridge -mno-stackrealign" } */
+ 
+ extern char *dst;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memset-42.c b/gcc/testsuite/gcc.target/i386/pieces-memset-42.c
+index 103da699ae5..6b73bb256af 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memset-42.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memset-42.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mmove-max=128 -mstore-max=128 -mtune=sandybridge" } */
+ 
+ extern char *dst;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-memset-43.c b/gcc/testsuite/gcc.target/i386/pieces-memset-43.c
+index f1494e17610..c6c7ff234da 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-memset-43.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-memset-43.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mmove-max=128 -mstore-max=128 -mtune=sandybridge" } */
+ 
+ extern char *dst;
+ 
+diff --git a/gcc/testsuite/gcc.target/i386/pieces-strcpy-2.c b/gcc/testsuite/gcc.target/i386/pieces-strcpy-2.c
+index 9bb94b7419b..40ada119625 100644
+--- a/gcc/testsuite/gcc.target/i386/pieces-strcpy-2.c
++++ b/gcc/testsuite/gcc.target/i386/pieces-strcpy-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target { ! ia32 } } } */
+-/* { dg-options "-O2 -mno-avx2 -mavx -mprefer-vector-width=128 -mtune=sandybridge" } */
++/* { dg-options "-O2 -mno-avx2 -mavx -mmove-max=128 -mstore-max=128 -mtune=sandybridge" } */
+ 
+ extern char *strcpy (char *, const char *);
+ 
+-- 
+2.31.1
+
diff --git a/0343-Check-avx-upper-register-for-parallel.patch b/0343-Check-avx-upper-register-for-parallel.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6112e87122b09458944b47175d82882a0e20ebf5
--- /dev/null
+++ b/0343-Check-avx-upper-register-for-parallel.patch
@@ -0,0 +1,148 @@
+From 8d7562fe6bd0284dc15cae8f1cd1b59ee940064a Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Thu, 29 Aug 2024 11:39:20 +0800
+Subject: [PATCH 06/14] Check avx upper register for parallel.
+
+For function arguments/return, when it's BLK mode, it's put in a
+parallel with an expr_list, and the expr_list contains the real mode
+and registers.
+Current ix86_check_avx_upper_register only checked for SSE_REG_P, and
+failed to handle that. The patch extend the handle to each subrtx.
+
+gcc/ChangeLog:
+
+	PR target/116512
+	* config/i386/i386.cc (ix86_check_avx_upper_register): Iterate
+	subrtx to scan for avx upper register.
+	(ix86_check_avx_upper_stores): Inline old
+	ix86_check_avx_upper_register.
+	(ix86_avx_u128_mode_needed): Ditto, and replace
+	FOR_EACH_SUBRTX with call to new
+	ix86_check_avx_upper_register.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/pr116512.c: New test.
+
+(cherry picked from commit ab214ef734bfc3dcffcf79ff9e1dd651c2b40566)
+---
+ gcc/config/i386/i386.cc                  | 36 +++++++++++++++---------
+ gcc/testsuite/gcc.target/i386/pr116512.c | 26 +++++++++++++++++
+ 2 files changed, 49 insertions(+), 13 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr116512.c
+
+diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
+index ade965927ac..e2743e0bd5c 100644
+--- a/gcc/config/i386/i386.cc
++++ b/gcc/config/i386/i386.cc
+@@ -14359,9 +14359,19 @@ ix86_dirflag_mode_needed (rtx_insn *insn)
+ static bool
+ ix86_check_avx_upper_register (const_rtx exp)
+ {
+-  return (SSE_REG_P (exp)
+-	  && !EXT_REX_SSE_REG_P (exp)
+-	  && GET_MODE_BITSIZE (GET_MODE (exp)) > 128);
++  /* construct_container may return a parallel with expr_list
++     which contains the real reg and mode  */
++  subrtx_iterator::array_type array;
++  FOR_EACH_SUBRTX (iter, array, exp, NONCONST)
++    {
++      const_rtx x = *iter;
++      if (SSE_REG_P (x)
++	  && !EXT_REX_SSE_REG_P (x)
++	  && GET_MODE_BITSIZE (GET_MODE (x)) > 128)
++	return true;
++    }
++
++  return false;
+ }
+ 
+ /* Check if a 256bit or 512bit AVX register is referenced in stores.   */
+@@ -14369,7 +14379,9 @@ ix86_check_avx_upper_register (const_rtx exp)
+ static void
+ ix86_check_avx_upper_stores (rtx dest, const_rtx, void *data)
+ {
+-  if (ix86_check_avx_upper_register (dest))
++  if (SSE_REG_P (dest)
++      && !EXT_REX_SSE_REG_P (dest)
++      && GET_MODE_BITSIZE (GET_MODE (dest)) > 128)
+     {
+       bool *used = (bool *) data;
+       *used = true;
+@@ -14427,14 +14439,14 @@ ix86_avx_u128_mode_needed (rtx_insn *insn)
+       return AVX_U128_CLEAN;
+     }
+ 
+-  subrtx_iterator::array_type array;
+-
+   rtx set = single_set (insn);
+   if (set)
+     {
+       rtx dest = SET_DEST (set);
+       rtx src = SET_SRC (set);
+-      if (ix86_check_avx_upper_register (dest))
++      if (SSE_REG_P (dest)
++	  && !EXT_REX_SSE_REG_P (dest)
++	  && GET_MODE_BITSIZE (GET_MODE (dest)) > 128)
+ 	{
+ 	  /* This is an YMM/ZMM load.  Return AVX_U128_DIRTY if the
+ 	     source isn't zero.  */
+@@ -14445,9 +14457,8 @@ ix86_avx_u128_mode_needed (rtx_insn *insn)
+ 	}
+       else
+ 	{
+-	  FOR_EACH_SUBRTX (iter, array, src, NONCONST)
+-	    if (ix86_check_avx_upper_register (*iter))
+-	      return AVX_U128_DIRTY;
++	  if (ix86_check_avx_upper_register (src))
++	    return AVX_U128_DIRTY;
+ 	}
+ 
+       /* This isn't YMM/ZMM load/store.  */
+@@ -14458,9 +14469,8 @@ ix86_avx_u128_mode_needed (rtx_insn *insn)
+      Hardware changes state only when a 256bit register is written to,
+      but we need to prevent the compiler from moving optimal insertion
+      point above eventual read from 256bit or 512 bit register.  */
+-  FOR_EACH_SUBRTX (iter, array, PATTERN (insn), NONCONST)
+-    if (ix86_check_avx_upper_register (*iter))
+-      return AVX_U128_DIRTY;
++  if (ix86_check_avx_upper_register (PATTERN (insn)))
++    return AVX_U128_DIRTY;
+ 
+   return AVX_U128_ANY;
+ }
+diff --git a/gcc/testsuite/gcc.target/i386/pr116512.c b/gcc/testsuite/gcc.target/i386/pr116512.c
+new file mode 100644
+index 00000000000..c2bc6c91b64
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr116512.c
+@@ -0,0 +1,26 @@
++/* { dg-do compile } */
++/* { dg-options "-march=x86-64-v4 -O2" } */
++/* { dg-final { scan-assembler-not "vzeroupper" { target { ! ia32 } } } } */
++
++#include 
++
++struct B {
++  union {
++    __m512 f;
++    __m512i s;
++  };
++};
++
++struct B foo(int n) {
++  struct B res;
++  res.s = _mm512_set1_epi32(n);
++
++  return res;
++}
++
++__m512i bar(int n) {
++  struct B res;
++  res.s = _mm512_set1_epi32(n);
++
++  return res.s;
++}
+-- 
+2.31.1
+
diff --git a/0344-i386-Fix-vfpclassph-non-optimizied-intrin.patch b/0344-i386-Fix-vfpclassph-non-optimizied-intrin.patch
new file mode 100644
index 0000000000000000000000000000000000000000..77a60bbfae6b18b4ed2b7da49370b78933c67f97
--- /dev/null
+++ b/0344-i386-Fix-vfpclassph-non-optimizied-intrin.patch
@@ -0,0 +1,134 @@
+From 9cb8d824a580c1ea79718300deed14b8ec5cc1e2 Mon Sep 17 00:00:00 2001
+From: Haochen Jiang 
+Date: Mon, 2 Sep 2024 15:00:22 +0800
+Subject: [PATCH 07/14] i386: Fix vfpclassph non-optimizied intrin
+
+The intrin for non-optimized got a typo in mask type, which will cause
+the high bits of __mmask32 being unexpectedly zeroed.
+
+The test does not fail under O0 with current 1b since the testcase is
+wrong. We need to include avx512-mask-type.h after SIZE is defined, or
+it will always be __mmask8. That problem also happened in AVX10.2 testcases.
+I will write a seperate patch to fix that.
+
+gcc/ChangeLog:
+
+	* config/i386/avx512fp16intrin.h
+	(_mm512_mask_fpclass_ph_mask): Correct mask type to __mmask32.
+	(_mm512_fpclass_ph_mask): Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/avx512fp16-vfpclassph-1c.c: New test.
+
+(cherry picked from commit 6e59b188c4a051d4f2de5220d30681e6963d96c0) (gcc-12)
+---
+ gcc/config/i386/avx512fp16intrin.h            |  4 +-
+ .../i386/avx512fp16-vfpclassph-1c.c           | 77 +++++++++++++++++++
+ 2 files changed, 79 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/avx512fp16-vfpclassph-1c.c
+
+diff --git a/gcc/config/i386/avx512fp16intrin.h b/gcc/config/i386/avx512fp16intrin.h
+index b16ccfcb7f1..6330e57ebb8 100644
+--- a/gcc/config/i386/avx512fp16intrin.h
++++ b/gcc/config/i386/avx512fp16intrin.h
+@@ -2321,11 +2321,11 @@ _mm512_fpclass_ph_mask (__m512h __A, const int __imm)
+ #else
+ #define _mm512_mask_fpclass_ph_mask(u, x, c)				\
+   ((__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) (__m512h) (x), \
+-						 (int) (c),(__mmask8)(u)))
++						 (int) (c),(__mmask32)(u)))
+ 
+ #define _mm512_fpclass_ph_mask(x, c)                                    \
+   ((__mmask32) __builtin_ia32_fpclassph512_mask ((__v32hf) (__m512h) (x), \
+-						 (int) (c),(__mmask8)-1))
++						 (int) (c),(__mmask32)-1))
+ #endif /* __OPIMTIZE__ */
+ 
+ /* Intrinsics vgetexpph, vgetexpsh.  */
+diff --git a/gcc/testsuite/gcc.target/i386/avx512fp16-vfpclassph-1c.c b/gcc/testsuite/gcc.target/i386/avx512fp16-vfpclassph-1c.c
+new file mode 100644
+index 00000000000..4739f1228e3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/avx512fp16-vfpclassph-1c.c
+@@ -0,0 +1,77 @@
++/* { dg-do run } */
++/* { dg-options "-O0 -mavx512fp16" } */
++/* { dg-require-effective-target avx512fp16 } */
++
++#define AVX512FP16
++#include "avx512f-helper.h"
++
++#include 
++#include 
++#include 
++#define SIZE (AVX512F_LEN / 16)
++#include "avx512f-mask-type.h"
++
++#ifndef __FPCLASSPH__
++#define __FPCLASSPH__
++int check_fp_class_hp (_Float16 src, int imm)
++{
++  int qNaN_res = isnan (src);
++  int sNaN_res = isnan (src);
++  int Pzero_res = (src == 0.0);
++  int Nzero_res = (src == -0.0);
++  int PInf_res = (isinf (src) == 1);
++  int NInf_res = (isinf (src) == -1);
++  int Denorm_res = (fpclassify (src) == FP_SUBNORMAL);
++  int FinNeg_res = __builtin_finite (src) && (src < 0);
++
++  int result = (((imm & 1) && qNaN_res)
++		|| (((imm >> 1) & 1) && Pzero_res)
++		|| (((imm >> 2) & 1) && Nzero_res)
++		|| (((imm >> 3) & 1) && PInf_res)
++		|| (((imm >> 4) & 1) && NInf_res)
++		|| (((imm >> 5) & 1) && Denorm_res)
++		|| (((imm >> 6) & 1) && FinNeg_res)
++		|| (((imm >> 7) & 1) && sNaN_res));
++  return result;
++}
++#endif
++
++MASK_TYPE
++CALC (_Float16 *s1, int imm)
++{
++  int i;
++  MASK_TYPE res = 0;
++
++  for (i = 0; i < SIZE; i++)
++    if (check_fp_class_hp(s1[i], imm))
++      res = res | (1 << i);
++
++  return res;
++}
++
++void
++TEST (void)
++{
++  int i;
++  UNION_TYPE (AVX512F_LEN, h) src;
++  MASK_TYPE res1, res2, res_ref = 0;
++  MASK_TYPE mask = MASK_VALUE;
++
++  src.a[SIZE - 1] = NAN;
++  src.a[SIZE - 2] = 1.0 / 0.0;
++  for (i = 0; i < SIZE - 2; i++)
++    {
++      src.a[i] = -24.43 + 0.6 * i;
++    }
++
++  res1 = INTRINSIC (_fpclass_ph_mask) (src.x, 0xFF);
++  res2 = INTRINSIC (_mask_fpclass_ph_mask) (mask, src.x, 0xFF);
++
++  res_ref = CALC (src.a, 0xFF);
++
++  if (res_ref != res1)
++    abort ();
++
++  if ((mask & res_ref) != res2)
++    abort ();
++}
+-- 
+2.31.1
+
diff --git a/0345-doc-Add-more-alias-option-and-reorder-Intel-CPU-marc.patch b/0345-doc-Add-more-alias-option-and-reorder-Intel-CPU-marc.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b691700c37390ee52b53cb47b76508320e9a8595
--- /dev/null
+++ b/0345-doc-Add-more-alias-option-and-reorder-Intel-CPU-marc.patch
@@ -0,0 +1,278 @@
+From 3bdecde26f8edffbd0e981c280e3fb8519709ea4 Mon Sep 17 00:00:00 2001
+From: Haochen Jiang 
+Date: Wed, 18 Sep 2024 11:20:15 +0800
+Subject: [PATCH 08/14] doc: Add more alias option and reorder Intel CPU -march
+ documentation
+
+This patch is backported from GCC15 with some tweaks.
+
+Since r15-3539, there are requests coming in to add other alias option
+documentation. This patch will add all of them, including corei7, corei7-avx,
+core-avx-i, core-avx2, atom and slm.
+
+Also in the patch, I reordered that part of documentation, currently all
+the CPUs/products are just all over the place. I regrouped them by
+date-to-now products (since the very first CPU to latest Panther Lake), P-core
+(since the clients become hybrid cores, starting from Sapphire Rapids) and
+E-core (since Bonnell). In GCC14 and eariler GCC, Xeon Phi CPUs are still
+there, I put them after E-core CPUs.
+
+And in the patch, I refined the product names in documentation.
+
+gcc/ChangeLog:
+
+	* doc/invoke.texi: Add corei7, corei7-avx, core-avx-i,
+	core-avx2, atom, and slm. Reorder the -march documentation by
+	splitting them into date-to-now products, P-core, E-core and
+	Xeon Phi. Refine the product names in documentation.
+
+(cherry picked from commit 8483527158024d200b3a9e4edecbe188fa22fdaa)
+---
+ gcc/doc/invoke.texi | 162 +++++++++++++++++++++++---------------------
+ 1 file changed, 84 insertions(+), 78 deletions(-)
+
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 109858f7666..90073ac9832 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -31441,6 +31441,7 @@ Intel Core 2 CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3, SSSE3, CX16,
+ SAHF and FXSR instruction set support.
+ 
+ @item nehalem
++@itemx corei7
+ Intel Nehalem CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3, SSSE3,
+ SSE4.1, SSE4.2, POPCNT, CX16, SAHF and FXSR instruction set support.
+ 
+@@ -31449,17 +31450,20 @@ Intel Westmere CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3, SSSE3,
+ SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR and PCLMUL instruction set support.
+ 
+ @item sandybridge
++@itemx corei7-avx
+ Intel Sandy Bridge CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3, SSSE3,
+ SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE and PCLMUL instruction set
+ support.
+ 
+ @item ivybridge
++@itemx core-avx-i
+ Intel Ivy Bridge CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3, SSSE3,
+ SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE, RDRND
+ and F16C instruction set support.
+ 
+ @item haswell
+-Intel Haswell CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
++@itemx core-avx2
++Intel Haswell CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3, SSSE3,
+ SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE, RDRND,
+ F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE and HLE instruction set support.
+ 
+@@ -31475,47 +31479,6 @@ SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE, RDRND,
+ F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW, AES,
+ CLFLUSHOPT, XSAVEC, XSAVES and SGX instruction set support.
+ 
+-@item bonnell
+-Intel Bonnell CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3 and SSSE3
+-instruction set support.
+-
+-@item silvermont
+-Intel Silvermont CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
+-SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, PCLMUL, PREFETCHW and RDRND
+-instruction set support.
+-
+-@item goldmont
+-Intel Goldmont CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
+-SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, PCLMUL, PREFETCHW, RDRND, AES, SHA,
+-RDSEED, XSAVE, XSAVEC, XSAVES, XSAVEOPT, CLFLUSHOPT and FSGSBASE instruction
+-set support.
+-
+-@item goldmont-plus
+-Intel Goldmont Plus CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
+-SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, PCLMUL, PREFETCHW, RDRND, AES,
+-SHA, RDSEED, XSAVE, XSAVEC, XSAVES, XSAVEOPT, CLFLUSHOPT, FSGSBASE, PTWRITE,
+-RDPID and SGX instruction set support.
+-
+-@item tremont
+-Intel Tremont CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
+-SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, PCLMUL, PREFETCHW, RDRND, AES, SHA,
+-RDSEED, XSAVE, XSAVEC, XSAVES, XSAVEOPT, CLFLUSHOPT, FSGSBASE, PTWRITE, RDPID,
+-SGX, CLWB, GFNI-SSE, MOVDIRI, MOVDIR64B, CLDEMOTE and WAITPKG instruction set
+-support.
+-
+-@item knl
+-Intel Knight's Landing CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
+-SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE,
+-RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW,
+-AVX512PF, AVX512ER, AVX512F, AVX512CD and PREFETCHWT1 instruction set support.
+-
+-@item knm
+-Intel Knights Mill CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
+-SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE,
+-RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW,
+-AVX512PF, AVX512ER, AVX512F, AVX512CD and PREFETCHWT1, AVX5124VNNIW,
+-AVX5124FMAPS and AVX512VPOPCNTDQ instruction set support.
+-
+ @item skylake-avx512
+ Intel Skylake Server CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
+ SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE,
+@@ -31523,16 +31486,30 @@ RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW,
+ AES, CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, CLWB, AVX512VL, AVX512BW,
+ AVX512DQ and AVX512CD instruction set support.
+ 
++@item cascadelake
++Intel Cascade Lake CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3, SSSE3,
++SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE, RDRND,
++F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW, AES,
++CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, CLWB, AVX512VL, AVX512BW, AVX512DQ,
++AVX512CD and AVX512VNNI instruction set support.
++
+ @item cannonlake
+-Intel Cannonlake Server CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2,
++Intel Cannon Lake Server CPU with 64-bit extensions, MMX, SSE, SSE2,
+ SSE3, SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL,
+ FSGSBASE, RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX,
+ PREFETCHW, AES, CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, AVX512VL, AVX512BW,
+ AVX512DQ, AVX512CD, PKU, AVX512VBMI, AVX512IFMA and SHA instruction set
+ support.
+ 
++@item cooperlake
++Intel Cooper Lake CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3, SSSE3,
++SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE, RDRND,
++F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW, AES,
++CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, CLWB, AVX512VL, AVX512BW, AVX512DQ,
++AVX512CD, AVX512VNNI and AVX512BF16 instruction set support.
++
+ @item icelake-client
+-Intel Icelake Client CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
++Intel Ice Lake Client CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3,
+ SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE,
+ RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW,
+ AES, CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, AVX512VL, AVX512BW, AVX512DQ,
+@@ -31540,7 +31517,7 @@ AVX512CD, PKU, AVX512VBMI, AVX512IFMA, SHA, AVX512VNNI, GFNI, VAES, AVX512VBMI2
+ , VPCLMULQDQ, AVX512BITALG, RDPID and AVX512VPOPCNTDQ instruction set support.
+ 
+ @item icelake-server
+-Intel Icelake Server CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
++Intel Ice Lake Server CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3,
+ SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE,
+ RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW,
+ AES, CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, AVX512VL, AVX512BW, AVX512DQ,
+@@ -31548,55 +31525,84 @@ AVX512CD, PKU, AVX512VBMI, AVX512IFMA, SHA, AVX512VNNI, GFNI, VAES, AVX512VBMI2
+ , VPCLMULQDQ, AVX512BITALG, RDPID, AVX512VPOPCNTDQ, PCONFIG, WBNOINVD and CLWB
+ instruction set support.
+ 
+-@item cascadelake
+-Intel Cascadelake CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
+-SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE, RDRND,
+-F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW, AES,
+-CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, CLWB, AVX512VL, AVX512BW, AVX512DQ,
+-AVX512CD and AVX512VNNI instruction set support.
+-
+-@item cooperlake
+-Intel cooperlake CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
++@item tigerlake
++Intel Tiger Lake CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3, SSSE3,
+ SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE, RDRND,
+ F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW, AES,
+-CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, CLWB, AVX512VL, AVX512BW, AVX512DQ,
+-AVX512CD, AVX512VNNI and AVX512BF16 instruction set support.
++CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, AVX512VL, AVX512BW, AVX512DQ,
++AVX512CD, PKU, AVX512VBMI, AVX512IFMA, SHA, AVX512VNNI, GFNI, VAES, AVX512VBMI2,
++VPCLMULQDQ, AVX512BITALG, RDPID, AVX512VPOPCNTDQ, MOVDIRI, MOVDIR64B, CLWB,
++AVX512VP2INTERSECT and KEYLOCKER instruction set support.
+ 
+-@item tigerlake
+-Intel Tigerlake CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
++@item rocketlake
++Intel Rocket Lake CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3, SSSE3,
+ SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE, RDRND,
+ F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW, AES,
+-CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, AVX512VL, AVX512BW, AVX512DQ, AVX512CD
++CLFLUSHOPT, XSAVEC, XSAVES, AVX512F, AVX512VL, AVX512BW, AVX512DQ, AVX512CD,
+ PKU, AVX512VBMI, AVX512IFMA, SHA, AVX512VNNI, GFNI, VAES, AVX512VBMI2,
+-VPCLMULQDQ, AVX512BITALG, RDPID, AVX512VPOPCNTDQ, MOVDIRI, MOVDIR64B, CLWB,
+-AVX512VP2INTERSECT and KEYLOCKER instruction set support.
++VPCLMULQDQ, AVX512BITALG, RDPID and AVX512VPOPCNTDQ instruction set support.
++
++@item alderlake
++Intel Alder Lake CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
++SSE4.1, SSE4.2, POPCNT, AES, PREFETCHW, PCLMUL, RDRND, XSAVE, XSAVEC, XSAVES,
++XSAVEOPT, FSGSBASE, PTWRITE, RDPID, SGX, GFNI-SSE, CLWB, MOVDIRI, MOVDIR64B,
++CLDEMOTE, WAITPKG, ADCX, AVX, AVX2, BMI, BMI2, F16C, FMA, LZCNT, PCONFIG, PKU,
++VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL and AVX-VNNI instruction set
++support.
+ 
+ @item sapphirerapids
+-Intel sapphirerapids CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
+-SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE,
+-RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW,
+-AES, CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, AVX512VL, AVX512BW, AVX512DQ,
++Intel Sapphire Rapids CPU with 64-bit extensions, MMX, SSE, SSE2, SSE3, SSSE3,
++SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE, RDRND,
++F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW, AES,
++CLFLUSHOPT, XSAVEC, XSAVES, SGX, AVX512F, AVX512VL, AVX512BW, AVX512DQ,
+ AVX512CD, PKU, AVX512VBMI, AVX512IFMA, SHA, AVX512VNNI, GFNI, VAES, AVX512VBMI2,
+ VPCLMULQDQ, AVX512BITALG, RDPID, AVX512VPOPCNTDQ, PCONFIG, WBNOINVD, CLWB,
+ MOVDIRI, MOVDIR64B, ENQCMD, CLDEMOTE, PTWRITE, WAITPKG, SERIALIZE, TSXLDTRK,
+ UINTR, AMX-BF16, AMX-TILE, AMX-INT8, AVX-VNNI, AVX512-FP16 and AVX512BF16
+ instruction set support.
+ 
+-@item alderlake
+-Intel Alderlake CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
+-SSE4.1, SSE4.2, POPCNT, AES, PREFETCHW, PCLMUL, RDRND, XSAVE, XSAVEC, XSAVES,
+-XSAVEOPT, FSGSBASE, PTWRITE, RDPID, SGX, GFNI-SSE, CLWB, MOVDIRI, MOVDIR64B,
+-CLDEMOTE, WAITPKG, ADCX, AVX, AVX2, BMI, BMI2, F16C, FMA, LZCNT, PCONFIG, PKU,
+-VAES, VPCLMULQDQ, SERIALIZE, HRESET, KL, WIDEKL and AVX-VNNI instruction set
++@item bonnell
++@itemx atom
++Intel Bonnell CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3 and SSSE3
++instruction set support.
++
++@item silvermont
++@itemx slm
++Intel Silvermont CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
++SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, PCLMUL, PREFETCHW and RDRND
++instruction set support.
++
++@item goldmont
++Intel Goldmont CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
++SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, PCLMUL, PREFETCHW, RDRND, AES, SHA,
++RDSEED, XSAVE, XSAVEC, XSAVES, XSAVEOPT, CLFLUSHOPT and FSGSBASE instruction
++set support.
++
++@item goldmont-plus
++Intel Goldmont Plus CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
++SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, PCLMUL, PREFETCHW, RDRND, AES,
++SHA, RDSEED, XSAVE, XSAVEC, XSAVES, XSAVEOPT, CLFLUSHOPT, FSGSBASE, PTWRITE,
++RDPID and SGX instruction set support.
++
++@item tremont
++Intel Tremont CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3,
++SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, PCLMUL, PREFETCHW, RDRND, AES, SHA,
++RDSEED, XSAVE, XSAVEC, XSAVES, XSAVEOPT, CLFLUSHOPT, FSGSBASE, PTWRITE, RDPID,
++SGX, CLWB, GFNI-SSE, MOVDIRI, MOVDIR64B, CLDEMOTE and WAITPKG instruction set
+ support.
+ 
+-@item rocketlake
+-Intel Rocketlake CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3, SSSE3
+-, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE, RDRND,
+-F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW, AES,
+-CLFLUSHOPT, XSAVEC, XSAVES, AVX512F, AVX512VL, AVX512BW, AVX512DQ, AVX512CD
+-PKU, AVX512VBMI, AVX512IFMA, SHA, AVX512VNNI, GFNI, VAES, AVX512VBMI2,
+-VPCLMULQDQ, AVX512BITALG, RDPID and AVX512VPOPCNTDQ instruction set support.
++@item knl
++Intel Knights Landing CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
++SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE,
++RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW,
++AVX512PF, AVX512ER, AVX512F, AVX512CD and PREFETCHWT1 instruction set support.
++
++@item knm
++Intel Knights Mill CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
++SSSE3, SSE4.1, SSE4.2, POPCNT, CX16, SAHF, FXSR, AVX, XSAVE, PCLMUL, FSGSBASE,
++RDRND, F16C, AVX2, BMI, BMI2, LZCNT, FMA, MOVBE, HLE, RDSEED, ADCX, PREFETCHW,
++AVX512PF, AVX512ER, AVX512F, AVX512CD and PREFETCHWT1, AVX5124VNNIW,
++AVX5124FMAPS and AVX512VPOPCNTDQ instruction set support.
+ 
+ @item graniterapids
+ Intel graniterapids CPU with 64-bit extensions, MOVBE, MMX, SSE, SSE2, SSE3,
+-- 
+2.31.1
+
diff --git a/0346-Refine-splitters-related-to-combine-vpcmpuw-zero_ext.patch b/0346-Refine-splitters-related-to-combine-vpcmpuw-zero_ext.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ede3e8f586f8a2dd39f751eac7ade7d439dec93b
--- /dev/null
+++ b/0346-Refine-splitters-related-to-combine-vpcmpuw-zero_ext.patch
@@ -0,0 +1,416 @@
+From 45b9ba8abc0379b5f83e9209325f9c9a31faec8e Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Wed, 16 Oct 2024 13:43:48 +0800
+Subject: [PATCH 09/14] Refine splitters related to "combine vpcmpuw +
+ zero_extend to vpcmpuw"
+
+r12-6103-g1a7ce8570997eb combines vpcmpuw + zero_extend to vpcmpuw
+with the pre_reload splitter, but the splitter transforms the
+zero_extend into a subreg which make reload think the upper part is
+garbage, it's not correct.
+
+The patch adjusts the zero_extend define_insn_and_split to
+define_insn to keep zero_extend.
+
+gcc/ChangeLog:
+
+	PR target/117159
+	* config/i386/sse.md
+	(*_cmp3_zero_extend):
+	Change from define_insn_and_split to define_insn.
+	(*_cmp3_zero_extend):
+	Ditto.
+	(*_ucmp3_zero_extend):
+	Ditto.
+	(*_ucmp3_zero_extend):
+	Ditto.
+	(*_cmp3_zero_extend_2):
+	Split to the zero_extend pattern.
+	(*_cmp3_zero_extend_2):
+	Ditto.
+	(*_ucmp3_zero_extend_2):
+	Ditto.
+	(*_ucmp3_zero_extend_2):
+	Ditto.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/pr117159.c: New test.
+	* gcc.target/i386/avx512bw-pr103750-1.c: Remove xfail.
+	* gcc.target/i386/avx512bw-pr103750-2.c: Remove xfail.
+
+(cherry picked from commit 5259d3927c1c8e3a15b4b844adef59b48c241233)
+---
+ gcc/config/i386/sse.md                        | 196 +++++++-----------
+ .../gcc.target/i386/avx512bw-pr103750-1.c     |   3 +-
+ .../gcc.target/i386/avx512bw-pr103750-2.c     |   3 +-
+ gcc/testsuite/gcc.target/i386/pr117159.c      |  42 ++++
+ 4 files changed, 124 insertions(+), 120 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr117159.c
+
+diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
+index 23b858ab21c..7d01c00a848 100644
+--- a/gcc/config/i386/sse.md
++++ b/gcc/config/i386/sse.md
+@@ -3724,32 +3724,19 @@
+ 
+ ;; Since vpcmpd implicitly clear the upper bits of dest, transform
+ ;; vpcmpd + zero_extend to vpcmpd since the instruction
+-(define_insn_and_split "*_cmp3_zero_extend"
+-  [(set (match_operand:SWI248x 0 "register_operand")
++(define_insn "*_cmp3_zero_extend"
++  [(set (match_operand:SWI248x 0 "register_operand" "=k")
+ 	(zero_extend:SWI248x
+ 	  (unspec:
+-	    [(match_operand:V48H_AVX512VL 1 "nonimmediate_operand")
+-	     (match_operand:V48H_AVX512VL 2 "nonimmediate_operand")
++	    [(match_operand:V48H_AVX512VL 1 "nonimmediate_operand" "v")
++	     (match_operand:V48H_AVX512VL 2 "nonimmediate_operand" "vm")
+ 	     (match_operand:SI 3 "const_0_to_7_operand" "n")]
+ 	    UNSPEC_PCMP)))]
+   "TARGET_AVX512F
+    && (!VALID_MASK_AVX512BW_MODE (mode) || TARGET_AVX512BW)
+-   && ix86_pre_reload_split ()
+    && (GET_MODE_NUNITS (mode)
+       < GET_MODE_PRECISION (mode))"
+-  "#"
+-  "&& 1"
+-  [(set (match_dup 0)
+-	(unspec:
+-	  [(match_dup 1)
+-	   (match_dup 2)
+-	   (match_dup 3)]
+-	  UNSPEC_PCMP))]
+-{
+-  operands[1] = force_reg (mode, operands[1]);
+-  operands[0] = lowpart_subreg (mode,
+-				 operands[0], mode);
+-}
++  "vcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+   [(set_attr "type" "ssecmp")
+    (set_attr "length_immediate" "1")
+    (set_attr "prefix" "evex")
+@@ -3777,21 +3764,22 @@
+   "#"
+   "&& 1"
+   [(set (match_dup 0)
+-	(unspec:
+-	  [(match_dup 1)
+-	   (match_dup 2)
+-	   (match_dup 3)]
+-	  UNSPEC_PCMP))
+-   (set (match_dup 4) (match_dup 0))]
++    (zero_extend:SWI248x
++	  (unspec:
++	    [(match_dup 1)
++	     (match_dup 2)
++	     (match_dup 3)]
++	    UNSPEC_PCMP)))
++   (set (match_dup 4) (match_dup 5))]
+ {
+-  operands[1] = force_reg (mode, operands[1]);
+-  operands[0] = lowpart_subreg (mode,
++  operands[5] = lowpart_subreg (mode,
+ 				operands[0], mode);
+-}
+-  [(set_attr "type" "ssecmp")
+-   (set_attr "length_immediate" "1")
+-   (set_attr "prefix" "evex")
+-   (set_attr "mode" "")])
++  if (SUBREG_P (operands[5]))
++    {
++      SUBREG_PROMOTED_VAR_P (operands[5]) = 1;
++      SUBREG_PROMOTED_SET (operands[5], 1);
++    }
++})
+ 
+ (define_insn_and_split "*_cmp3"
+   [(set (match_operand: 0 "register_operand")
+@@ -3826,31 +3814,18 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "")])
+ 
+-(define_insn_and_split "*_cmp3_zero_extend"
+-  [(set (match_operand:SWI248x 0 "register_operand")
++(define_insn "*_cmp3_zero_extend"
++  [(set (match_operand:SWI248x 0 "register_operand" "=k")
+ 	(zero_extend:SWI248x
+ 	  (unspec:
+-	    [(match_operand:VI12_AVX512VL 1 "nonimmediate_operand")
+-	     (match_operand:VI12_AVX512VL 2 "nonimmediate_operand")
+-	     (match_operand:SI 3 "const_0_to_7_operand")]
++	    [(match_operand:VI12_AVX512VL 1 "nonimmediate_operand" "v")
++	     (match_operand:VI12_AVX512VL 2 "nonimmediate_operand" "vm")
++	     (match_operand:SI 3 "const_0_to_7_operand" "n")]
+ 	    UNSPEC_PCMP)))]
+   "TARGET_AVX512BW
+-  && ix86_pre_reload_split ()
+-  && (GET_MODE_NUNITS (mode)
+-      < GET_MODE_PRECISION (mode))"
+-  "#"
+-  "&& 1"
+-  [(set (match_dup 0)
+-	(unspec:
+-	  [(match_dup 1)
+-	   (match_dup 2)
+-	   (match_dup 3)]
+-	  UNSPEC_PCMP))]
+-{
+-  operands[1] = force_reg (mode, operands[1]);
+-  operands[0] = lowpart_subreg (mode,
+-				operands[0], mode);
+-}
++   && (GET_MODE_NUNITS (mode)
++       < GET_MODE_PRECISION (mode))"
++  "vpcmp\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+   [(set_attr "type" "ssecmp")
+    (set_attr "length_immediate" "1")
+    (set_attr "prefix" "evex")
+@@ -3877,16 +3852,21 @@
+   "#"
+   "&& 1"
+   [(set (match_dup 0)
+-	(unspec:
+-	  [(match_dup 1)
+-	   (match_dup 2)
+-	   (match_dup 3)]
+-	  UNSPEC_PCMP))
+-   (set (match_dup 4) (match_dup 0))]
++	(zero_extend:SWI248x
++	  (unspec:
++	   [(match_dup 1)
++		(match_dup 2)
++		(match_dup 3)]
++	   UNSPEC_PCMP)))
++   (set (match_dup 4) (match_dup 5))]
+ {
+-  operands[1] = force_reg (mode, operands[1]);
+-  operands[0] = lowpart_subreg (mode,
++  operands[5] = lowpart_subreg (mode,
+ 				operands[0], mode);
++  if (SUBREG_P (operands[5]))
++    {
++      SUBREG_PROMOTED_VAR_P (operands[5]) = 1;
++      SUBREG_PROMOTED_SET (operands[5], 1);
++    }
+ }
+   [(set_attr "type" "ssecmp")
+    (set_attr "length_immediate" "1")
+@@ -3945,31 +3925,18 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "")])
+ 
+-(define_insn_and_split "*_ucmp3_zero_extend"
+-  [(set (match_operand:SWI248x 0 "register_operand")
++(define_insn "*_ucmp3_zero_extend"
++  [(set (match_operand:SWI248x 0 "register_operand" "=k")
+ 	(zero_extend:SWI248x
+ 	  (unspec:
+-	    [(match_operand:VI12_AVX512VL 1 "nonimmediate_operand")
+-	     (match_operand:VI12_AVX512VL 2 "nonimmediate_operand")
+-	     (match_operand:SI 3 "const_0_to_7_operand")]
++	    [(match_operand:VI12_AVX512VL 1 "nonimmediate_operand" "v")
++	     (match_operand:VI12_AVX512VL 2 "nonimmediate_operand" "vm")
++	     (match_operand:SI 3 "const_0_to_7_operand" "n")]
+ 	    UNSPEC_UNSIGNED_PCMP)))]
+   "TARGET_AVX512BW
+-  && ix86_pre_reload_split ()
+   && (GET_MODE_NUNITS (mode)
+       < GET_MODE_PRECISION (mode))"
+-  "#"
+-  "&& 1"
+-  [(set (match_dup 0)
+-	(unspec:
+-	  [(match_dup 1)
+-	   (match_dup 2)
+-	   (match_dup 3)]
+-	  UNSPEC_UNSIGNED_PCMP))]
+-{
+-  operands[1] = force_reg (mode, operands[1]);
+-  operands[0] = lowpart_subreg (mode,
+-				operands[0], mode);
+-}
++  "vpcmpu\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+   [(set_attr "type" "ssecmp")
+    (set_attr "length_immediate" "1")
+    (set_attr "prefix" "evex")
+@@ -3997,16 +3964,21 @@
+   "#"
+   "&& 1"
+   [(set (match_dup 0)
+-	(unspec:
+-	  [(match_dup 1)
+-	   (match_dup 2)
+-	   (match_dup 3)]
+-	  UNSPEC_UNSIGNED_PCMP))
+-   (set (match_dup 4) (match_dup 0))]
+-{
+-  operands[1] = force_reg (mode, operands[1]);
+-  operands[0] = lowpart_subreg (mode,
++	(zero_extend:SWI248x
++	 (unspec:
++	   [(match_dup 1)
++		(match_dup 2)
++		(match_dup 3)]
++	   UNSPEC_UNSIGNED_PCMP)))
++   (set (match_dup 4) (match_dup 5))]
++{
++  operands[5] = lowpart_subreg (mode,
+ 				operands[0], mode);
++  if (SUBREG_P (operands[5]))
++    {
++      SUBREG_PROMOTED_VAR_P (operands[5]) = 1;
++      SUBREG_PROMOTED_SET (operands[5], 1);
++    }
+ }
+   [(set_attr "type" "ssecmp")
+    (set_attr "length_immediate" "1")
+@@ -4043,32 +4015,19 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "")])
+ 
+-(define_insn_and_split "*_ucmp3_zero_extend"
+-  [(set (match_operand:SWI248x 0 "register_operand")
++(define_insn "*_ucmp3_zero_extend"
++  [(set (match_operand:SWI248x 0 "register_operand" "=k")
+ 	(zero_extend:SWI248x
+ 	  (unspec:
+-	    [(match_operand:VI48_AVX512VL 1 "nonimmediate_operand")
+-	     (match_operand:VI48_AVX512VL 2 "nonimmediate_operand")
+-	     (match_operand:SI 3 "const_0_to_7_operand")]
++	    [(match_operand:VI48_AVX512VL 1 "nonimmediate_operand" "v")
++	     (match_operand:VI48_AVX512VL 2 "nonimmediate_operand" "vm")
++	     (match_operand:SI 3 "const_0_to_7_operand" "n")]
+ 	    UNSPEC_UNSIGNED_PCMP)))]
+   "TARGET_AVX512F
+    && (!VALID_MASK_AVX512BW_MODE (mode) || TARGET_AVX512BW)
+-   && ix86_pre_reload_split ()
+    && (GET_MODE_NUNITS (mode)
+       < GET_MODE_PRECISION (mode))"
+-  "#"
+-  "&& 1"
+-  [(set (match_dup 0)
+-	(unspec:
+-	  [(match_dup 1)
+-	   (match_dup 2)
+-	   (match_dup 3)]
+-	  UNSPEC_UNSIGNED_PCMP))]
+-{
+-  operands[1] = force_reg (mode, operands[1]);
+-  operands[0] = lowpart_subreg (mode,
+-				operands[0], mode);
+-}
++  "vpcmpu\t{%3, %2, %1, %0|%0, %1, %2, %3}"
+   [(set_attr "type" "ssecmp")
+    (set_attr "length_immediate" "1")
+    (set_attr "prefix" "evex")
+@@ -4096,16 +4055,21 @@
+   "#"
+   "&& 1"
+   [(set (match_dup 0)
+-	(unspec:
+-	  [(match_dup 1)
+-	   (match_dup 2)
+-	   (match_dup 3)]
+-	  UNSPEC_UNSIGNED_PCMP))
+-   (set (match_dup 4) (match_dup 0))]
+-{
+-  operands[1] = force_reg (mode, operands[1]);
+-  operands[0] = lowpart_subreg (mode,
++	(zero_extend:SWI248x
++	 (unspec:
++	   [(match_dup 1)
++		(match_dup 2)
++		(match_dup 3)]
++	   UNSPEC_UNSIGNED_PCMP)))
++   (set (match_dup 4) (match_dup 5))]
++{
++  operands[5] = lowpart_subreg (mode,
+ 				operands[0], mode);
++  if (SUBREG_P (operands[5]))
++    {
++      SUBREG_PROMOTED_VAR_P (operands[5]) = 1;
++      SUBREG_PROMOTED_SET (operands[5], 1);
++    }
+ }
+   [(set_attr "type" "ssecmp")
+    (set_attr "length_immediate" "1")
+diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-1.c b/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-1.c
+index b1165f069bb..e7d6183232b 100644
+--- a/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-1.c
++++ b/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-1.c
+@@ -1,8 +1,7 @@
+ /* PR target/103750 */
+ /* { dg-do compile }  */
+ /* { dg-options "-O2 -mavx512bw -mavx512vl" } */
+-/* { dg-final { scan-assembler-not "kmov" { xfail ia32 } } } */
+-/* xfail need to be fixed.  */
++/* { dg-final { scan-assembler-not "kmov" } } */
+ 
+ #include 
+ extern __m128i* pi128;
+diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-2.c b/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-2.c
+index 7303f5403ba..3392e193222 100644
+--- a/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-2.c
++++ b/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-2.c
+@@ -1,8 +1,7 @@
+ /* PR target/103750 */
+ /* { dg-do compile }  */
+ /* { dg-options "-O2 -mavx512dq -mavx512bw -mavx512vl" } */
+-/* { dg-final { scan-assembler-not "kmov" { xfail ia32 } } } */
+-/* xfail need to be fixed.  */
++/* { dg-final { scan-assembler-not "kmov" } } */
+ 
+ #include 
+ extern __m128i* pi128;
+diff --git a/gcc/testsuite/gcc.target/i386/pr117159.c b/gcc/testsuite/gcc.target/i386/pr117159.c
+new file mode 100644
+index 00000000000..b67d682ecef
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr117159.c
+@@ -0,0 +1,42 @@
++/* { dg-do run } */
++/* { dg-options "-Os -mavx512bw" } */
++/* { dg-require-effective-target avx512bw } */
++
++typedef __attribute__((__vector_size__ (4))) unsigned char W;
++typedef __attribute__((__vector_size__ (64))) int V;
++typedef __attribute__((__vector_size__ (64))) long long Vq;
++
++W w;
++V v;
++Vq vq;
++
++static inline W
++foo (short m)
++{
++  unsigned k = __builtin_ia32_pcmpgtq512_mask ((Vq) { }, vq, m);
++  W r = (W) k + w;
++  return r;
++}
++
++static inline W
++foo1 (short m)
++{
++  unsigned k = __builtin_ia32_pcmpgtd512_mask ((V) {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, v, m);
++  W r = (W) k + w;
++  return r;
++}
++
++int
++main ()
++{
++  if (!__builtin_cpu_supports ("avx512bw"))
++    return 0;
++  W y = foo1 (65535);
++  if (!y[0] || !y[1] || y[2] || y[3])
++    __builtin_abort();
++  W x = foo (65535);
++  if (x[0] || x[1] || x[2] || x[3])
++    __builtin_abort();
++
++  return 0;
++}
+-- 
+2.31.1
+
diff --git a/0347-Fix-ICE-due-to-isa-mismatch-for-the-builtins.patch b/0347-Fix-ICE-due-to-isa-mismatch-for-the-builtins.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7488bb8d1d73ef82cef59e0c041b5f1445c213a6
--- /dev/null
+++ b/0347-Fix-ICE-due-to-isa-mismatch-for-the-builtins.patch
@@ -0,0 +1,95 @@
+From 6e8e4260a895298d27660783aaac45bb2e13941f Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Tue, 22 Oct 2024 01:54:40 -0700
+Subject: [PATCH 10/14] Fix ICE due to isa mismatch for the builtins.
+
+gcc/ChangeLog:
+
+	PR target/117240
+	* config/i386/i386-builtin.def: Add avx/avx512f to vaes
+	ymm/zmm builtins.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/pr117240_avx.c: New test.
+	* gcc.target/i386/pr117240_avx512f.c: New test.
+
+(cherry picked from commit 403e361d5aa620e77c9832578b2409a0fdd79d96)
+---
+ gcc/config/i386/i386-builtin.def              | 24 +++++++++----------
+ gcc/testsuite/gcc.target/i386/pr117240_avx.c  | 10 ++++++++
+ .../gcc.target/i386/pr117240_avx512f.c        | 10 ++++++++
+ 3 files changed, 32 insertions(+), 12 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr117240_avx.c
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr117240_avx512f.c
+
+diff --git a/gcc/config/i386/i386-builtin.def b/gcc/config/i386/i386-builtin.def
+index d3ab21eeac3..d1713b70e07 100644
+--- a/gcc/config/i386/i386-builtin.def
++++ b/gcc/config/i386/i386-builtin.def
+@@ -2751,18 +2751,18 @@ BDESC (0, OPTION_MASK_ISA2_AVX5124VNNIW, CODE_FOR_avx5124vnniw_vp4dpwssds_mask,
+ BDESC (0, OPTION_MASK_ISA2_RDPID, CODE_FOR_rdpid, "__builtin_ia32_rdpid", IX86_BUILTIN_RDPID, UNKNOWN, (int) UNSIGNED_FTYPE_VOID)
+ 
+ /* VAES.  */
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdec_v16qi, "__builtin_ia32_vaesdec_v16qi", IX86_BUILTIN_VAESDEC16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdec_v32qi, "__builtin_ia32_vaesdec_v32qi", IX86_BUILTIN_VAESDEC32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdec_v64qi, "__builtin_ia32_vaesdec_v64qi", IX86_BUILTIN_VAESDEC64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdeclast_v16qi, "__builtin_ia32_vaesdeclast_v16qi", IX86_BUILTIN_VAESDECLAST16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdeclast_v32qi, "__builtin_ia32_vaesdeclast_v32qi", IX86_BUILTIN_VAESDECLAST32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdeclast_v64qi, "__builtin_ia32_vaesdeclast_v64qi", IX86_BUILTIN_VAESDECLAST64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenc_v16qi, "__builtin_ia32_vaesenc_v16qi", IX86_BUILTIN_VAESENC16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenc_v32qi, "__builtin_ia32_vaesenc_v32qi", IX86_BUILTIN_VAESENC32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenc_v64qi, "__builtin_ia32_vaesenc_v64qi", IX86_BUILTIN_VAESENC64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenclast_v16qi, "__builtin_ia32_vaesenclast_v16qi", IX86_BUILTIN_VAESENCLAST16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenclast_v32qi, "__builtin_ia32_vaesenclast_v32qi", IX86_BUILTIN_VAESENCLAST32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
+-BDESC (0, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenclast_v64qi, "__builtin_ia32_vaesenclast_v64qi", IX86_BUILTIN_VAESENCLAST64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
++BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdec_v16qi, "__builtin_ia32_vaesdec_v16qi", IX86_BUILTIN_VAESDEC16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
++BDESC (OPTION_MASK_ISA_AVX, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdec_v32qi, "__builtin_ia32_vaesdec_v32qi", IX86_BUILTIN_VAESDEC32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
++BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdec_v64qi, "__builtin_ia32_vaesdec_v64qi", IX86_BUILTIN_VAESDEC64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
++BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdeclast_v16qi, "__builtin_ia32_vaesdeclast_v16qi", IX86_BUILTIN_VAESDECLAST16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
++BDESC (OPTION_MASK_ISA_AVX, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdeclast_v32qi, "__builtin_ia32_vaesdeclast_v32qi", IX86_BUILTIN_VAESDECLAST32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
++BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesdeclast_v64qi, "__builtin_ia32_vaesdeclast_v64qi", IX86_BUILTIN_VAESDECLAST64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
++BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenc_v16qi, "__builtin_ia32_vaesenc_v16qi", IX86_BUILTIN_VAESENC16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
++BDESC (OPTION_MASK_ISA_AVX, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenc_v32qi, "__builtin_ia32_vaesenc_v32qi", IX86_BUILTIN_VAESENC32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
++BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenc_v64qi, "__builtin_ia32_vaesenc_v64qi", IX86_BUILTIN_VAESENC64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
++BDESC (OPTION_MASK_ISA_AVX512VL, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenclast_v16qi, "__builtin_ia32_vaesenclast_v16qi", IX86_BUILTIN_VAESENCLAST16, UNKNOWN, (int) V16QI_FTYPE_V16QI_V16QI)
++BDESC (OPTION_MASK_ISA_AVX, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenclast_v32qi, "__builtin_ia32_vaesenclast_v32qi", IX86_BUILTIN_VAESENCLAST32, UNKNOWN, (int) V32QI_FTYPE_V32QI_V32QI)
++BDESC (OPTION_MASK_ISA_AVX512F, OPTION_MASK_ISA2_VAES, CODE_FOR_vaesenclast_v64qi, "__builtin_ia32_vaesenclast_v64qi", IX86_BUILTIN_VAESENCLAST64, UNKNOWN, (int) V64QI_FTYPE_V64QI_V64QI)
+ 
+ /* BF16 */
+ BDESC (0, OPTION_MASK_ISA2_AVX512BF16, CODE_FOR_avx512f_cvtne2ps2bf16_v32hi, "__builtin_ia32_cvtne2ps2bf16_v32hi", IX86_BUILTIN_CVTNE2PS2HI16_V32HI, UNKNOWN, (int) V32HI_FTYPE_V16SF_V16SF)
+diff --git a/gcc/testsuite/gcc.target/i386/pr117240_avx.c b/gcc/testsuite/gcc.target/i386/pr117240_avx.c
+new file mode 100644
+index 00000000000..24a97a9f74c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr117240_avx.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mvaes -mno-xsave -Wno-psabi -Wno-implicit-function-declaration" } */
++
++typedef __attribute__((__vector_size__(32))) char V;
++
++V
++foo(V v)
++{
++  return __builtin_ia32_vaesenc_v32qi(v, v);/* { dg-error "incompatible types when returning" } */
++}
+diff --git a/gcc/testsuite/gcc.target/i386/pr117240_avx512f.c b/gcc/testsuite/gcc.target/i386/pr117240_avx512f.c
+new file mode 100644
+index 00000000000..1e7b5a88d7a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr117240_avx512f.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mvaes -mno-xsave -Wno-psabi  -Wno-implicit-function-declaration" } */
++
++typedef __attribute__((__vector_size__(64))) char V;
++
++V
++foo(V v)
++{
++  return __builtin_ia32_vaesenc_v64qi(v, v);/* { dg-error "incompatible types when returning" } */
++}
+-- 
+2.31.1
+
diff --git a/0348-Fix-ICE-due-to-subreg-us_truncate.patch b/0348-Fix-ICE-due-to-subreg-us_truncate.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ca4578d3aec91adf1c10552417e65e4c77feb013
--- /dev/null
+++ b/0348-Fix-ICE-due-to-subreg-us_truncate.patch
@@ -0,0 +1,444 @@
+From b817cad361eac1754101114b9beb7abc1aab3435 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Tue, 29 Oct 2024 02:09:39 -0700
+Subject: [PATCH 11/14] Fix ICE due to subreg:us_truncate.
+
+Force_operand issues an ICE when input
+is (subreg:DI (us_truncate:V8QI)), it's probably because it's an
+invalid rtx, So refine backend patterns for that.
+
+gcc/ChangeLog:
+
+	PR target/117318
+	* config/i386/sse.md (*avx512vl_v2div2qi2_mask_store_1):
+	Rename to ..
+	(avx512vl_v2div2qi2_mask_store_1): .. this.
+	(avx512vl_v2div2qi2_mask_store_2): Change to
+	define_expand.
+	(*avx512vl_v4qi2_mask_store_1): Rename to ..
+	(avx512vl_v4qi2_mask_store_1): .. this.
+	(avx512vl_v4qi2_mask_store_2): Change to
+	define_expand.
+	(*avx512vl_v8qi2_mask_store_1): Rename to ..
+	(avx512vl_v8qi2_mask_store_1): .. this.
+	(avx512vl_v8qi2_mask_store_2): Change to
+	define_expand.
+	(*avx512vl_v4hi2_mask_store_1): Rename to ..
+	(avx512vl_v4hi2_mask_store_1): .. this.
+	(avx512vl_v4hi2_mask_store_2): Change to
+	define_expand.
+	(*avx512vl_v2div2hi2_mask_store_1): Rename to ..
+	(avx512vl_v2div2hi2_mask_store_1): .. this.
+	(avx512vl_v2div2hi2_mask_store_2): Change to
+	define_expand.
+	(*avx512vl_v2div2si2_mask_store_1): Rename to ..
+	(avx512vl_v2div2si2_mask_store_1): .. this.
+	(avx512vl_v2div2si2_mask_store_2): Change to
+	define_expand.
+	(*avx512f_v8div16qi2_mask_store_1): Rename to ..
+	(avx512f_v8div16qi2_mask_store_1): .. this.
+	(avx512f_v8div16qi2_mask_store_2): Change to
+	define_expand.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/pr117318.c: New test.
+
+(cherry picked from commit bc0eeccf27a084461a2d5661e23468350acb43da)
+---
+ gcc/config/i386/sse.md                   | 268 +++++++++--------------
+ gcc/testsuite/gcc.target/i386/pr117318.c |  12 +
+ 2 files changed, 110 insertions(+), 170 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr117318.c
+
+diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
+index 7d01c00a848..a7d61bf0044 100644
+--- a/gcc/config/i386/sse.md
++++ b/gcc/config/i386/sse.md
+@@ -13850,7 +13850,7 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn "*avx512vl_v2div2qi2_mask_store_1"
++(define_insn "avx512vl_v2div2qi2_mask_store_1"
+   [(set (match_operand:V2QI 0 "memory_operand" "=m")
+ 	  (vec_merge:V2QI
+ 	    (any_truncate:V2QI
+@@ -13864,28 +13864,19 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn_and_split "avx512vl_v2div2qi2_mask_store_2"
+-  [(set (match_operand:HI 0 "memory_operand")
+-	(subreg:HI
+-	  (vec_merge:V2QI
+-	    (any_truncate:V2QI
+-	      (match_operand:V2DI 1 "register_operand"))
+-	    (vec_select:V2QI
+-	      (subreg:V4QI
+-		(vec_concat:V2HI
+-		  (match_dup 0)
+-		  (const_int 0)) 0)
+-	      (parallel [(const_int 0) (const_int 1)]))
+-	    (match_operand:QI 2 "register_operand")) 0))]
+-  "TARGET_AVX512VL && ix86_pre_reload_split ()"
+-  "#"
+-  "&& 1"
+-  [(set (match_dup 0)
+-	(vec_merge:V2QI
+-	  (any_truncate:V2QI (match_dup 1))
+-	  (match_dup 0)
+-	  (match_dup 2)))]
+-  "operands[0] = adjust_address_nv (operands[0], V2QImode, 0);")
++(define_expand "avx512vl_v2div2qi2_mask_store_2"
++  [(match_operand:HI 0 "memory_operand")
++   (any_truncate:V2QI
++     (match_operand:V2DI 1 "register_operand"))
++   (match_operand:QI 2 "register_operand")]
++  "TARGET_AVX512VL"
++{
++  operands[0] = adjust_address_nv (operands[0], V2QImode, 0);
++  emit_insn (gen_avx512vl_v2div2qi2_mask_store_1 (operands[0],
++							operands[1],
++							operands[2]));
++  DONE;
++})
+ 
+ (define_insn "*avx512vl_v4qi2_store_1"
+   [(set (match_operand:V4QI 0 "memory_operand" "=m")
+@@ -13954,7 +13945,7 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn "*avx512vl_v4qi2_mask_store_1"
++(define_insn "avx512vl_v4qi2_mask_store_1"
+   [(set (match_operand:V4QI 0 "memory_operand" "=m")
+ 	(vec_merge:V4QI
+ 	  (any_truncate:V4QI
+@@ -13968,29 +13959,19 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn_and_split "avx512vl_v4qi2_mask_store_2"
+-  [(set (match_operand:SI 0 "memory_operand")
+-	(subreg:SI
+-	  (vec_merge:V4QI
+-	    (any_truncate:V4QI
+-	      (match_operand:VI4_128_8_256 1 "register_operand"))
+-	    (vec_select:V4QI
+-	      (subreg:V8QI
+-		(vec_concat:V2SI
+-		  (match_dup 0)
+-		  (const_int 0)) 0)
+-	      (parallel [(const_int 0) (const_int 1)
+-			 (const_int 2) (const_int 3)]))
+-	    (match_operand:QI 2 "register_operand")) 0))]
+-  "TARGET_AVX512VL && ix86_pre_reload_split ()"
+-  "#"
+-  "&& 1"
+-  [(set (match_dup 0)
+-	(vec_merge:V4QI
+-	  (any_truncate:V4QI (match_dup 1))
+-	  (match_dup 0)
+-	  (match_dup 2)))]
+-  "operands[0] = adjust_address_nv (operands[0], V4QImode, 0);")
++(define_expand "avx512vl_v4qi2_mask_store_2"
++  [(match_operand:SI 0 "memory_operand")
++   (any_truncate:V4QI
++     (match_operand:VI4_128_8_256 1 "register_operand"))
++   (match_operand:QI 2 "register_operand")]
++  "TARGET_AVX512VL"
++{
++  operands[0] = adjust_address_nv (operands[0], V4QImode, 0);
++  emit_insn (gen_avx512vl_v4qi2_mask_store_1 (operands[0],
++							  operands[1],
++							  operands[2]));
++  DONE;
++})
+ 
+ (define_mode_iterator VI2_128_BW_4_256
+   [(V8HI "TARGET_AVX512BW") V8SI])
+@@ -14062,7 +14043,7 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn "*avx512vl_v8qi2_mask_store_1"
++(define_insn "avx512vl_v8qi2_mask_store_1"
+   [(set (match_operand:V8QI 0 "memory_operand" "=m")
+ 	(vec_merge:V8QI
+ 	  (any_truncate:V8QI
+@@ -14076,31 +14057,19 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn_and_split "avx512vl_v8qi2_mask_store_2"
+-  [(set (match_operand:DI 0 "memory_operand")
+-	(subreg:DI
+-	  (vec_merge:V8QI
+-	    (any_truncate:V8QI
+-	      (match_operand:VI2_128_BW_4_256 1 "register_operand"))
+-	    (vec_select:V8QI
+-	      (subreg:V16QI
+-		(vec_concat:V2DI
+-		  (match_dup 0)
+-		  (const_int 0)) 0)
+-	      (parallel [(const_int 0) (const_int 1)
+-			 (const_int 2) (const_int 3)
+-			 (const_int 4) (const_int 5)
+-			 (const_int 6) (const_int 7)]))
+-	    (match_operand:QI 2 "register_operand")) 0))]
+-  "TARGET_AVX512VL && ix86_pre_reload_split ()"
+-  "#"
+-  "&& 1"
+-  [(set (match_dup 0)
+-	(vec_merge:V8QI
+-	  (any_truncate:V8QI (match_dup 1))
+-	  (match_dup 0)
+-	  (match_dup 2)))]
+-  "operands[0] = adjust_address_nv (operands[0], V8QImode, 0);")
++(define_expand "avx512vl_v8qi2_mask_store_2"
++  [(match_operand:DI 0 "memory_operand")
++   (any_truncate:V8QI
++     (match_operand:VI2_128_BW_4_256 1 "register_operand"))
++   (match_operand:QI 2 "register_operand")]
++  "TARGET_AVX512VL"
++{
++  operands[0] = adjust_address_nv (operands[0], V8QImode, 0);
++  emit_insn (gen_avx512vl_v8qi2_mask_store_1 (operands[0],
++							  operands[1],
++							  operands[2]));
++  DONE;
++})
+ 
+ (define_mode_iterator PMOV_SRC_MODE_4 [V4DI V2DI V4SI])
+ (define_mode_attr pmov_dst_4
+@@ -14222,7 +14191,7 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn "*avx512vl_v4hi2_mask_store_1"
++(define_insn "avx512vl_v4hi2_mask_store_1"
+   [(set (match_operand:V4HI 0 "memory_operand" "=m")
+ 	(vec_merge:V4HI
+ 	  (any_truncate:V4HI
+@@ -14240,30 +14209,19 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn_and_split "avx512vl_v4hi2_mask_store_2"
+-  [(set (match_operand:DI 0 "memory_operand")
+-	(subreg:DI
+-	  (vec_merge:V4HI
+-	    (any_truncate:V4HI
+-	      (match_operand:VI4_128_8_256 1 "register_operand"))
+-	    (vec_select:V4HI
+-	      (subreg:V8HI
+-		(vec_concat:V2DI
+-		  (match_dup 0)
+-		  (const_int 0)) 0)
+-	      (parallel [(const_int 0) (const_int 1)
+-			 (const_int 2) (const_int 3)]))
+-	    (match_operand:QI 2 "register_operand")) 0))]
+-  "TARGET_AVX512VL && ix86_pre_reload_split ()"
+-  "#"
+-  "&& 1"
+-  [(set (match_dup 0)
+-	(vec_merge:V4HI
+-	  (any_truncate:V4HI (match_dup 1))
+-	  (match_dup 0)
+-	  (match_dup 2)))]
+-  "operands[0] = adjust_address_nv (operands[0], V4HImode, 0);")
+-
++(define_expand "avx512vl_v4hi2_mask_store_2"
++  [(match_operand:DI 0 "memory_operand")
++   (any_truncate:V4HI
++     (match_operand:VI4_128_8_256 1 "register_operand"))
++   (match_operand:QI 2 "register_operand")]
++  "TARGET_AVX512VL"
++{
++  operands[0] = adjust_address_nv (operands[0], V4HImode, 0);
++  emit_insn (gen_avx512vl_v4hi2_mask_store_1 (operands[0],
++							  operands[1],
++							  operands[2]));
++  DONE;
++})
+ 
+ (define_insn "*avx512vl_v2div2hi2_store_1"
+   [(set (match_operand:V2HI 0 "memory_operand" "=m")
+@@ -14324,7 +14282,7 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn "*avx512vl_v2div2hi2_mask_store_1"
++(define_insn "avx512vl_v2div2hi2_mask_store_1"
+   [(set (match_operand:V2HI 0 "memory_operand" "=m")
+ 	(vec_merge:V2HI
+ 	  (any_truncate:V2HI
+@@ -14338,28 +14296,19 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn_and_split "avx512vl_v2div2hi2_mask_store_2"
+-  [(set (match_operand:SI 0 "memory_operand")
+-	(subreg:SI
+-	  (vec_merge:V2HI
+-	    (any_truncate:V2HI
+-	      (match_operand:V2DI 1 "register_operand"))
+-	    (vec_select:V2HI
+-	      (subreg:V4HI
+-		(vec_concat:V2SI
+-		  (match_dup 0)
+-		  (const_int 0)) 0)
+-	      (parallel [(const_int 0) (const_int 1)]))
+-	    (match_operand:QI 2 "register_operand")) 0))]
+-  "TARGET_AVX512VL && ix86_pre_reload_split ()"
+-  "#"
+-  "&& 1"
+-  [(set (match_dup 0)
+-	(vec_merge:V2HI
+-	  (any_truncate:V2HI (match_dup 1))
+-	  (match_dup 0)
+-	  (match_dup 2)))]
+-  "operands[0] = adjust_address_nv (operands[0], V2HImode, 0);")
++(define_expand "avx512vl_v2div2hi2_mask_store_2"
++  [(match_operand:SI 0 "memory_operand")
++   (any_truncate:V2HI
++    (match_operand:V2DI 1 "register_operand"))
++   (match_operand:QI 2 "register_operand")]
++  "TARGET_AVX512VL"
++{
++  operands[0] = adjust_address_nv (operands[0], V2HImode, 0);
++  emit_insn (gen_avx512vl_v2div2hi2_mask_store_1 (operands[0],
++							operands[1],
++							operands[2]));
++  DONE;
++})
+ 
+ (define_expand "truncv2div2si2"
+   [(set (match_operand:V2SI 0 "register_operand")
+@@ -14467,7 +14416,7 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn "*avx512vl_v2div2si2_mask_store_1"
++(define_insn "avx512vl_v2div2si2_mask_store_1"
+   [(set (match_operand:V2SI 0 "memory_operand" "=m")
+ 	(vec_merge:V2SI
+ 	  (any_truncate:V2SI
+@@ -14481,28 +14430,19 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn_and_split "avx512vl_v2div2si2_mask_store_2"
+-  [(set (match_operand:DI 0 "memory_operand")
+-	(subreg:DI
+-	  (vec_merge:V2SI
+-	    (any_truncate:V2SI
+-	      (match_operand:V2DI 1 "register_operand"))
+-	    (vec_select:V2SI
+-	      (subreg:V4SI
+-		(vec_concat:V2DI
+-		  (match_dup 0)
+-		  (const_int 0)) 0)
+-	      (parallel [(const_int 0) (const_int 1)]))
+-	    (match_operand:QI 2 "register_operand")) 0))]
+-  "TARGET_AVX512VL && ix86_pre_reload_split ()"
+-  "#"
+-  "&& 1"
+-  [(set (match_dup 0)
+-  	(vec_merge:V2SI
+-	  (any_truncate:V2SI (match_dup 1))
+-	  (match_dup 0)
+-	  (match_dup 2)))]
+-  "operands[0] = adjust_address_nv (operands[0], V2SImode, 0);")
++(define_expand "avx512vl_v2div2si2_mask_store_2"
++  [(match_operand:DI 0 "memory_operand")
++   (any_truncate:V2SI
++    (match_operand:V2DI 1 "register_operand"))
++   (match_operand:QI 2 "register_operand")]
++  "TARGET_AVX512VL"
++{
++  operands[0] = adjust_address_nv (operands[0], V2SImode, 0);
++  emit_insn (gen_avx512vl_v2div2si2_mask_store_1 (operands[0],
++							operands[1],
++							operands[2]));
++  DONE;
++})
+ 
+ (define_expand "truncv8div8qi2"
+   [(set (match_operand:V8QI 0 "register_operand")
+@@ -14601,7 +14541,7 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn "*avx512f_v8div16qi2_mask_store_1"
++(define_insn "avx512f_v8div16qi2_mask_store_1"
+   [(set (match_operand:V8QI 0 "memory_operand" "=m")
+ 	(vec_merge:V8QI
+ 	  (any_truncate:V8QI
+@@ -14615,31 +14555,19 @@
+    (set_attr "prefix" "evex")
+    (set_attr "mode" "TI")])
+ 
+-(define_insn_and_split "avx512f_v8div16qi2_mask_store_2"
+-  [(set (match_operand:DI 0 "memory_operand")
+-	(subreg:DI
+-	  (vec_merge:V8QI
+-	  (any_truncate:V8QI
+-	    (match_operand:V8DI 1 "register_operand"))
+-	  (vec_select:V8QI
+-	    (subreg:V16QI
+-	      (vec_concat:V2DI
+-		(match_dup 0)
+-		(const_int 0)) 0)
+-	    (parallel [(const_int 0) (const_int 1)
+-		       (const_int 2) (const_int 3)
+-		       (const_int 4) (const_int 5)
+-		       (const_int 6) (const_int 7)]))
+-	  (match_operand:QI 2 "register_operand")) 0))]
+-  "TARGET_AVX512F && ix86_pre_reload_split ()"
+-  "#"
+-  "&& 1"
+-  [(set (match_dup 0)
+-	(vec_merge:V8QI
+-	  (any_truncate:V8QI (match_dup 1))
+-	  (match_dup 0)
+-	  (match_dup 2)))]
+-  "operands[0] = adjust_address_nv (operands[0], V8QImode, 0);")
++(define_expand "avx512f_v8div16qi2_mask_store_2"
++  [(match_operand:DI 0 "memory_operand")
++   (any_truncate:V8QI
++    (match_operand:V8DI 1 "register_operand"))
++   (match_operand:QI 2 "register_operand")]
++  "TARGET_AVX512F"
++{
++  operands[0] = adjust_address_nv (operands[0], V8QImode, 0);
++  emit_insn (gen_avx512f_v8div16qi2_mask_store_1 (operands[0],
++							operands[1],
++							operands[2]));
++  DONE;
++})
+ 
+ ;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
+ ;;
+diff --git a/gcc/testsuite/gcc.target/i386/pr117318.c b/gcc/testsuite/gcc.target/i386/pr117318.c
+new file mode 100644
+index 00000000000..3d316ad04cf
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr117318.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-mavx512f -O" } */
++
++typedef __attribute__((__vector_size__ (64))) long long V;
++unsigned long long x;
++
++unsigned long long
++foo()
++{
++  __builtin_ia32_pmovusqb512mem_mask (&x, (V){8000000000000000}, 255);
++  return x;
++}
+-- 
+2.31.1
+
diff --git a/0349-i386-Zero-extend-32-bit-address-to-64-bit-with-optio.patch b/0349-i386-Zero-extend-32-bit-address-to-64-bit-with-optio.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6fd40e34bd1f1b7793ea471f19fd00fe5453c753
--- /dev/null
+++ b/0349-i386-Zero-extend-32-bit-address-to-64-bit-with-optio.patch
@@ -0,0 +1,104 @@
+From a23da8527f517e439ab634d9995b44740cbbc05b Mon Sep 17 00:00:00 2001
+From: "Hu, Lin1" 
+Date: Wed, 6 Nov 2024 15:42:13 +0800
+Subject: [PATCH 12/14] i386: Zero extend 32-bit address to 64-bit with option
+ -mx32 -maddress-mode=long. [PR 117418]
+
+-maddress-mode=long let Pmode = DI_mode, so zero extend 32-bit address to
+64-bit and uses a 64-bit register as a pointer for avoid raise an ICE.
+
+gcc/ChangeLog:
+
+	PR target/117418
+	* config/i386/i386-expand.cc (ix86_expand_builtin): Convert
+	pointer's mode according to Pmode.
+
+gcc/testsuite/ChangeLog:
+
+	PR target/117418
+	* gcc.target/i386/pr117418-1.c: New test.
+
+(cherry picked from commit 2272cd2508f1854c880082f792de15e76ec09a99)
+---
+ gcc/config/i386/i386-expand.cc             | 12 +++++++++++
+ gcc/testsuite/gcc.target/i386/pr117418-1.c | 24 ++++++++++++++++++++++
+ 2 files changed, 36 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr117418-1.c
+
+diff --git a/gcc/config/i386/i386-expand.cc b/gcc/config/i386/i386-expand.cc
+index bc2e6198007..52e32749928 100644
+--- a/gcc/config/i386/i386-expand.cc
++++ b/gcc/config/i386/i386-expand.cc
+@@ -12730,6 +12730,9 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget,
+       op1 = expand_normal (arg1);
+       op2 = expand_normal (arg2);
+ 
++      if (GET_MODE (op1) != Pmode)
++	op1 = convert_to_mode (Pmode, op1, 1);
++
+       if (!address_operand (op2, VOIDmode))
+ 	{
+ 	  op2 = convert_memory_address (Pmode, op2);
+@@ -12765,6 +12768,9 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget,
+       emit_label (ok_label);
+       emit_insn (gen_rtx_SET (target, pat));
+ 
++      if (GET_MODE (op0) != Pmode)
++	op0 = convert_to_mode (Pmode, op0, 1);
++
+       for (i = 0; i < 8; i++)
+ 	{
+ 	  op = gen_rtx_MEM (V2DImode,
+@@ -12789,6 +12795,9 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget,
+ 	if (!REG_P (op0))
+ 	  op0 = copy_to_mode_reg (SImode, op0);
+ 
++	if (GET_MODE (op2) != Pmode)
++	  op2 = convert_to_mode (Pmode, op2, 1);
++
+ 	op = gen_rtx_REG (V2DImode, GET_SSE_REGNO (0));
+ 	emit_move_insn (op, op1);
+ 
+@@ -12826,6 +12835,9 @@ ix86_expand_builtin (tree exp, rtx target, rtx subtarget,
+ 	if (!REG_P (op0))
+ 	  op0 = copy_to_mode_reg (SImode, op0);
+ 
++	if (GET_MODE (op3) != Pmode)
++	  op3 = convert_to_mode (Pmode, op3, 1);
++
+ 	/* Force to use xmm0, xmm1 for keylow, keyhi*/
+ 	op = gen_rtx_REG (V2DImode, GET_SSE_REGNO (0));
+ 	emit_move_insn (op, op1);
+diff --git a/gcc/testsuite/gcc.target/i386/pr117418-1.c b/gcc/testsuite/gcc.target/i386/pr117418-1.c
+new file mode 100644
+index 00000000000..4839b139b79
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr117418-1.c
+@@ -0,0 +1,24 @@
++/* PR target/117418 */
++/* { dg-do compile { target { ! ia32 } } } */
++/* { dg-options "-maddress-mode=long -mwidekl -mx32" } */
++/* { dg-require-effective-target maybe_x32  } */
++/* { dg-final { scan-assembler-times "aesdec128kl" 1 } } */
++/* { dg-final { scan-assembler-times "aesdec256kl" 1 } } */
++/* { dg-final { scan-assembler-times "aesenc128kl" 1 } } */
++/* { dg-final { scan-assembler-times "aesenc256kl" 1 } } */
++/* { dg-final { scan-assembler-times "encodekey128" 1 } } */
++/* { dg-final { scan-assembler-times "encodekey256" 1 } } */
++
++typedef __attribute__((__vector_size__(16))) long long V;
++V a;
++
++void
++foo()
++{
++    __builtin_ia32_aesdec128kl_u8 (&a, a, &a);
++    __builtin_ia32_aesdec256kl_u8 (&a, a, &a);
++    __builtin_ia32_aesenc128kl_u8 (&a, a, &a);
++    __builtin_ia32_aesenc256kl_u8 (&a, a, &a);
++    __builtin_ia32_encodekey128_u32 (0, a, &a); 
++    __builtin_ia32_encodekey256_u32 (0, a, a, &a); 
++}
+-- 
+2.31.1
+
diff --git a/0350-Fix-uninitialized-operands-2-in-vec_unpacks_hi_v4sf.patch b/0350-Fix-uninitialized-operands-2-in-vec_unpacks_hi_v4sf.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b7e6f158f2e2aaefba459634f73b851273d41f88
--- /dev/null
+++ b/0350-Fix-uninitialized-operands-2-in-vec_unpacks_hi_v4sf.patch
@@ -0,0 +1,37 @@
+From 94ab46d9486464b3158a9fc9bc1c463dd4d62d72 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Thu, 21 Nov 2024 23:57:38 -0800
+Subject: [PATCH 13/14] Fix uninitialized operands[2] in vec_unpacks_hi_v4sf.
+
+It could cause weired spill in RA when register pressure is high.
+
+gcc/ChangeLog:
+
+	PR target/117562
+	* config/i386/sse.md (vec_unpacks_hi_v4sf): Initialize
+	operands[2] with CONST0_RTX.
+
+(cherry picked from commit ba4cf2e296d8d5950c3d356fa6b6efcad00d0189)
+---
+ gcc/config/i386/sse.md | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/config/i386/sse.md b/gcc/config/i386/sse.md
+index a7d61bf0044..c6a8e301145 100644
+--- a/gcc/config/i386/sse.md
++++ b/gcc/config/i386/sse.md
+@@ -9126,7 +9126,10 @@
+        (match_dup 2)
+        (parallel [(const_int 0) (const_int 1)]))))]
+   "TARGET_SSE2"
+-  "operands[2] = gen_reg_rtx (V4SFmode);")
++{
++  operands[2] = gen_reg_rtx (V4SFmode);
++  emit_move_insn (operands[2], CONST0_RTX (V4SFmode));
++})
+ 
+ (define_expand "vec_unpacks_hi_v8sf"
+   [(set (match_dup 2)
+-- 
+2.31.1
+
diff --git a/0351-GCC13-GCC12-Fix-testcase.patch b/0351-GCC13-GCC12-Fix-testcase.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4d746f2a26079e506749ee5b3527e35389272967
--- /dev/null
+++ b/0351-GCC13-GCC12-Fix-testcase.patch
@@ -0,0 +1,34 @@
+From 6494fd12311561551bcf8d8529108fba79c45fd7 Mon Sep 17 00:00:00 2001
+From: liuhongt 
+Date: Tue, 22 Oct 2024 11:24:23 +0800
+Subject: [PATCH 14/14] [GCC13/GCC12] Fix testcase.
+
+The optimization relies on other patterns which are only available at
+GCC14 and obove, so restore the xfail for GCC13/12 branch.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/i386/avx512bw-pr103750-2.c: Add xfail for ia32.
+
+(cherry picked from commit 8b43518a01cbbbafe042b85a48fa09a32948380a)
+---
+ gcc/testsuite/gcc.target/i386/avx512bw-pr103750-2.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-2.c b/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-2.c
+index 3392e193222..7303f5403ba 100644
+--- a/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-2.c
++++ b/gcc/testsuite/gcc.target/i386/avx512bw-pr103750-2.c
+@@ -1,7 +1,8 @@
+ /* PR target/103750 */
+ /* { dg-do compile }  */
+ /* { dg-options "-O2 -mavx512dq -mavx512bw -mavx512vl" } */
+-/* { dg-final { scan-assembler-not "kmov" } } */
++/* { dg-final { scan-assembler-not "kmov" { xfail ia32 } } } */
++/* xfail need to be fixed.  */
+ 
+ #include 
+ extern __m128i* pi128;
+-- 
+2.31.1
+
diff --git a/0352-Add-hip10c-machine-discription.patch b/0352-Add-hip10c-machine-discription.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7b2d1787954b95c22214fcfdd4b8af1b6898ac0e
--- /dev/null
+++ b/0352-Add-hip10c-machine-discription.patch
@@ -0,0 +1,899 @@
+From d3a8c59e7eaf99bff77447e08e15898530af8a9e Mon Sep 17 00:00:00 2001
+From: liyunfei 
+Date: Tue, 19 Nov 2024 11:10:29 +0800
+Subject: [PATCH] Add hip10c machine discription
+
+Here is the patch introducing hip10c machine model
+for the scheduler.
+---
+ gcc/config/aarch64/aarch64-cores.def     |   1 +
+ gcc/config/aarch64/aarch64-cost-tables.h | 104 +++++
+ gcc/config/aarch64/aarch64-tune.md       |   2 +-
+ gcc/config/aarch64/aarch64.cc            | 108 +++++
+ gcc/config/aarch64/aarch64.md            |   1 +
+ gcc/config/aarch64/hip10c.md             | 562 +++++++++++++++++++++++
+ gcc/doc/invoke.texi                      |   2 +-
+ 7 files changed, 778 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/config/aarch64/hip10c.md
+
+diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
+index 3337fd1a0..1e8de523c 100644
+--- a/gcc/config/aarch64/aarch64-cores.def
++++ b/gcc/config/aarch64/aarch64-cores.def
+@@ -131,6 +131,7 @@ AARCH64_CORE("a64fx", a64fx, a64fx, V8_2A,  (F16, SVE), a64fx, 0x46, 0x001, -1)
+ /* HiSilicon ('H') cores. */
+ AARCH64_CORE("tsv110",  tsv110, tsv110, V8_2A,  (CRYPTO, F16), tsv110,   0x48, 0xd01, -1)
+ AARCH64_CORE("hip09", hip09, hip09, V8_5A,  (SVE, I8MM, F32MM, F64MM, PROFILE, PREDRES), hip09, 0x48, 0xd02, 0x0)
++AARCH64_CORE("hip10c", hip10c, hip10c, V8_5A,  (SVE, I8MM, BF16, F32MM, F64MM, FLAGM, PAUTH, SSBS, SHA3, SM4, PROFILE, PREDRES), hip10c, 0x48, 0xddd, 0x0)
+ 
+ /* ARMv8.3-A Architecture Processors.  */
+ 
+diff --git a/gcc/config/aarch64/aarch64-cost-tables.h b/gcc/config/aarch64/aarch64-cost-tables.h
+index 0ee427b61..dc51d9c2c 100644
+--- a/gcc/config/aarch64/aarch64-cost-tables.h
++++ b/gcc/config/aarch64/aarch64-cost-tables.h
+@@ -876,6 +876,110 @@ const struct cpu_cost_table hip09_extra_costs =
+   }
+ };
+ 
++const struct cpu_cost_table hip10c_extra_costs =
++{
++  /* ALU */
++  {
++    0,                 /* arith.  */
++    0,                 /* logical.  */
++    0,                 /* shift.  */
++    0,                 /* shift_reg.  */
++    COSTS_N_INSNS (1), /* arith_shift.  */
++    COSTS_N_INSNS (1), /* arith_shift_reg.  */
++    COSTS_N_INSNS (1), /* log_shift.  */
++    COSTS_N_INSNS (1), /* log_shift_reg.  */
++    0,                 /* extend.  */
++    COSTS_N_INSNS (1), /* extend_arith.  */
++    0,                 /* bfi.  */
++    0,                 /* bfx.  */
++    0,                 /* clz.  */
++    0,                 /* rev.  */
++    0,                 /* non_exec.  */
++    true               /* non_exec_costs_exec.  */
++  },
++
++  {
++    /* MULT SImode */
++    {
++      COSTS_N_INSNS (2),       /* simple.  */
++      COSTS_N_INSNS (2),       /* flag_setting.  */
++      COSTS_N_INSNS (2),       /* extend.  */
++      COSTS_N_INSNS (2),       /* add.  */
++      COSTS_N_INSNS (2),       /* extend_add.  */
++      COSTS_N_INSNS (11)       /* idiv.  */
++    },
++        /* MULT DImode */
++    {
++      COSTS_N_INSNS (3),       /* simple.  */
++      0,                       /* flag_setting (N/A).  */
++      COSTS_N_INSNS (3),       /* extend.  */
++      COSTS_N_INSNS (3),       /* add.  */
++      COSTS_N_INSNS (3),       /* extend_add.  */
++      COSTS_N_INSNS (19)       /* idiv.  */
++    }
++  },
++  /* LD/ST */
++  {
++    COSTS_N_INSNS (3),         /* load.  */
++    COSTS_N_INSNS (4),         /* load_sign_extend.  */
++    COSTS_N_INSNS (3),         /* ldrd.  */
++    COSTS_N_INSNS (3),         /* ldm_1st.  */
++    1,                         /* ldm_regs_per_insn_1st.  */
++    2,                         /* ldm_regs_per_insn_subsequent.  */
++    COSTS_N_INSNS (4),         /* loadf.  */
++    COSTS_N_INSNS (4),         /* loadd.  */
++    COSTS_N_INSNS (4),         /* load_unaligned.  */
++    0,                         /* store.  */
++    0,                         /* strd.  */
++    0,                         /* stm_1st.  */
++    1,                         /* stm_regs_per_insn_1st.  */
++    2,                         /* stm_regs_per_insn_subsequent.  */
++    0,                         /* storef.  */
++    0,                         /* stored.  */
++    COSTS_N_INSNS (1),         /* store_unaligned.  */
++    COSTS_N_INSNS (4),         /* loadv.  */
++    COSTS_N_INSNS (4)          /* storev.  */
++  },
++  {
++    /* FP SFmode */
++    {
++      COSTS_N_INSNS (10),      /* div.  */
++      COSTS_N_INSNS (4),       /* mult.  */
++      COSTS_N_INSNS (4),       /* mult_addsub.  */
++      COSTS_N_INSNS (4),       /* fma.  */
++      COSTS_N_INSNS (4),       /* addsub.  */
++      COSTS_N_INSNS (1),       /* fpconst.  */
++      COSTS_N_INSNS (1),       /* neg.  */
++      COSTS_N_INSNS (1),       /* compare.  */
++      COSTS_N_INSNS (2),       /* widen.  */
++      COSTS_N_INSNS (2),       /* narrow.  */
++      COSTS_N_INSNS (2),       /* toint.  */
++      COSTS_N_INSNS (1),       /* fromint.  */
++      COSTS_N_INSNS (2)        /* roundint.  */
++    },
++    /* FP DFmode */
++    {
++      COSTS_N_INSNS (17),      /* div.  */
++      COSTS_N_INSNS (4),       /* mult.  */
++      COSTS_N_INSNS (6),       /* mult_addsub.  */
++      COSTS_N_INSNS (6),       /* fma.  */
++      COSTS_N_INSNS (3),       /* addsub.  */
++      COSTS_N_INSNS (1),       /* fpconst.  */
++      COSTS_N_INSNS (1),       /* neg.  */
++      COSTS_N_INSNS (1),       /* compare.  */
++      COSTS_N_INSNS (2),       /* widen.  */
++      COSTS_N_INSNS (2),       /* narrow.  */
++      COSTS_N_INSNS (2),       /* toint.  */
++      COSTS_N_INSNS (1),       /* fromint.  */
++      COSTS_N_INSNS (2)        /* roundint.  */
++    }
++  },
++  /* Vector */
++  {
++    COSTS_N_INSNS (1)  /* alu.  */
++  }
++};
++
+ const struct cpu_cost_table ampere1_extra_costs =
+ {
+   /* ALU */
+diff --git a/gcc/config/aarch64/aarch64-tune.md b/gcc/config/aarch64/aarch64-tune.md
+index 511422081..e176a4d70 100644
+--- a/gcc/config/aarch64/aarch64-tune.md
++++ b/gcc/config/aarch64/aarch64-tune.md
+@@ -1,5 +1,5 @@
+ ;; -*- buffer-read-only: t -*-
+ ;; Generated automatically by gentune.sh from aarch64-cores.def
+ (define_attr "tune"
+-	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,hip09,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,hip11,demeter,neoversev2"
++	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,hip09,hip10c,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,hip11,demeter,neoversev2"
+ 	(const (symbol_ref "((enum attr_tune) aarch64_tune)")))
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 025a3c478..e14d38e78 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -505,6 +505,22 @@ static const struct cpu_addrcost_table hip09_addrcost_table =
+   0, /* imm_offset  */
+ };
+ 
++static const struct cpu_addrcost_table hip10c_addrcost_table =
++{
++    {
++        1, /* hi  */
++        0, /* si  */
++        0, /* di  */
++        1, /* ti  */
++    },
++  0, /* pre_modify  */
++  0, /* post_modify  */
++  0, /* register_offset  */
++  1, /* register_sextend  */
++  1, /* register_zextend  */
++  0, /* imm_offset  */
++};
++
+ static const struct cpu_addrcost_table hip11_addrcost_table =
+ {
+     {
+@@ -736,6 +752,16 @@ static const struct cpu_regmove_cost hip09_regmove_cost =
+   2  /* FP2FP  */
+ };
+ 
++static const struct cpu_regmove_cost hip10c_regmove_cost =
++{
++  1, /* GP2GP  */
++  /* Avoid the use of slow int<->fp moves for spilling by setting
++     their cost higher than memmov_cost.  */
++  2, /* GP2FP  */
++  3, /* FP2GP  */
++  2  /* FP2FP  */
++};
++
+ static const struct cpu_regmove_cost neoversen2_regmove_cost =
+ {
+   1, /* GP2GP  */
+@@ -1060,6 +1086,43 @@ static const struct cpu_vector_cost hip09_vector_cost =
+   nullptr /* issue_info  */
+ };
+ 
++static const advsimd_vec_cost hip10c_advsimd_vector_cost =
++{
++  2, /* int_stmt_cost  */
++  2, /* fp_stmt_cost  */
++  0, /* ld2_st2_permute_cost  */
++  0, /* ld3_st3_permute_cost  */
++  0, /* ld4_st4_permute_cost  */
++  2, /* permute_cost  */
++  3, /* reduc_i8_cost  */
++  3, /* reduc_i16_cost  */
++  3, /* reduc_i32_cost  */
++  3, /* reduc_i64_cost  */
++  3, /* reduc_f16_cost  */
++  3, /* reduc_f32_cost  */
++  3, /* reduc_f64_cost  */
++  3, /* store_elt_extra_cost  */
++  3, /* vec_to_scalar_cost  */
++  2, /* scalar_to_vec_cost  */
++  5, /* align_load_cost  */
++  5, /* unalign_load_cost  */
++  1, /* unalign_store_cost  */
++  1  /* store_cost  */
++};
++
++static const struct cpu_vector_cost hip10c_vector_cost =
++{
++  1, /* scalar_int_stmt_cost  */
++  1, /* scalar_fp_stmt_cost  */
++  5, /* scalar_load_cost  */
++  1, /* scalar_store_cost  */
++  1, /* cond_taken_branch_cost  */
++  1, /* cond_not_taken_branch_cost  */
++  &hip10c_advsimd_vector_cost, /* advsimd  */
++  nullptr, /* sve  */
++  nullptr /* issue_info  */
++};
++
+ static const advsimd_vec_cost hip11_advsimd_vector_cost =
+ {
+   2, /* int_stmt_cost  */
+@@ -1455,6 +1518,17 @@ static const cpu_prefetch_tune hip09_prefetch_tune =
+   -1                    /* default_opt_level  */
+ };
+ 
++static const cpu_prefetch_tune hip10c_prefetch_tune =
++{
++  0,                    /* num_slots  */
++  64,                   /* l1_cache_size  */
++  64,                   /* l1_cache_line_size  */
++  512,                  /* l2_cache_size  */
++  true,                 /* prefetch_dynamic_strides */
++  -1,                   /* minimum_stride */
++  -1                    /* default_opt_level  */
++};
++
+ static const cpu_prefetch_tune hip11_prefetch_tune =
+ {
+   0,                    /* num_slots  */
+@@ -1865,6 +1939,40 @@ static const struct tune_params hip09_tunings =
+   &hip09_prefetch_tune
+ };
+ 
++static const struct tune_params hip10c_tunings =
++{
++  &hip10c_extra_costs,
++  &hip10c_addrcost_table,
++  &hip10c_regmove_cost,
++  &hip10c_vector_cost,
++  &generic_branch_cost,
++  &generic_approx_modes,
++  SVE_256, /* sve_width  */
++  { 4, /* load_int.  */
++    4, /* store_int.  */
++    4, /* load_fp.  */
++    4, /* store_fp.  */
++    4, /* load_pred.  */
++    4 /* store_pred.  */
++  }, /* memmov_cost.  */
++  4,    /* issue_rate  */
++  (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_ALU_BRANCH
++   | AARCH64_FUSE_ALU_CBZ), /* fusible_ops  */
++  "16", /* function_align.  */
++  "4",  /* jump_align.  */
++  "8",  /* loop_align.  */
++  2,    /* int_reassoc_width.  */
++  4,    /* fp_reassoc_width.  */
++  1,    /* vec_reassoc_width.  */
++  2,    /* min_div_recip_mul_sf.  */
++  2,    /* min_div_recip_mul_df.  */
++  0,    /* max_case_values.  */
++  tune_params::AUTOPREFETCHER_WEAK,     /* autoprefetcher_model.  */
++  (AARCH64_EXTRA_TUNE_USE_NEW_VECTOR_COSTS
++   | AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT),     /* tune_flags.  */
++  &hip10c_prefetch_tune
++};
++
+ static const struct tune_params hip11_tunings =
+ {
+   &hip11_extra_costs,
+diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
+index 6b4341866..96b8ab471 100644
+--- a/gcc/config/aarch64/aarch64.md
++++ b/gcc/config/aarch64/aarch64.md
+@@ -549,6 +549,7 @@
+ (include "tsv110.md")
+ (include "thunderx3t110.md")
+ (include "hip09.md")
++(include "hip10c.md")
+ (include "hip11.md")
+ 
+ ;; -------------------------------------------------------------------
+diff --git a/gcc/config/aarch64/hip10c.md b/gcc/config/aarch64/hip10c.md
+new file mode 100644
+index 000000000..a4ab2a3e3
+--- /dev/null
++++ b/gcc/config/aarch64/hip10c.md
+@@ -0,0 +1,562 @@
++;; hip10c pipeline description
++;; Copyright (C) 2023 Free Software Foundation, Inc.
++;;
++;;Contributed by liyunfei
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful, but
++;; WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++;; General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; .
++
++(define_automaton "hip10c")
++(define_automaton "hip10c_ldst")
++(define_automaton "hip10c_fsu")
++
++(define_attr "hip10c_type"
++  "hip10c_neon_abs, hip10c_neon_fp_arith, hip10c_neon_mul, hip10c_neon_mla,
++   hip10c_neon_dot, hip10c_neon_fp_div, hip10c_neon_fp_sqrt,
++   hip10c_neon_ins, hip10c_neon_load1, hip10c_neon_load1_lanes,
++   hip10c_neon_load2and4, hip10c_neon_load3_3reg,
++   hip10c_neon_load4_4reg, hip10c_neon_store1and2,
++   hip10c_neon_store1_1reg, hip10c_neon_store1_2reg,
++   hip10c_neon_store1_3reg, hip10c_neon_store1_4reg,
++   hip10c_neon_store3and4_lane, hip10c_neon_store3_3reg,
++   hip10c_neon_store4_4reg, unknown"
++  (cond [
++         (eq_attr "type" "neon_abs,neon_abs_q,neon_add,neon_add_q,\
++                  neon_neg,neon_neg_q,neon_sub,neon_sub_q,\
++                  neon_qadd,neon_qadd_q,\
++                  neon_add_long,neon_sub_long,\
++                  neon_qabs,neon_qabs_q,neon_qneg,\
++                  neon_qneg_q,neon_qsub,neon_qsub_q,neon_compare,\
++                  neon_compare_q,neon_compare_zero,\
++                  neon_compare_zero_q,neon_logic,neon_logic_q,\
++                  neon_minmax,neon_minmax_q,neon_tst,\
++                  neon_tst_q,neon_bsl,neon_bsl_q,\
++                  neon_cls,neon_cls_q,neon_ext,\
++                  neon_ext_q,neon_rev,neon_rev_q,\
++                  neon_tbl1,neon_tbl1_q,neon_tbl2,neon_fp_abs_s,\
++                  neon_fp_abs_s_q,neon_fp_abs_d,\
++                  neon_fp_neg_s,neon_fp_neg_s_q,\
++                  neon_fp_neg_d,neon_fp_neg_d_q,\
++                  neon_shift_imm_narrow_q,neon_move,neon_move_q")
++           (const_string "hip10c_neon_abs")
++         (eq_attr "type" "neon_abd,neon_abd_q,\
++                  neon_add_widen,neon_sub_widen,\
++                  neon_arith_acc,neon_arith_acc_q,\
++                  neon_add_halve,neon_add_halve_q,\
++                  neon_sub_halve,neon_sub_halve_q,\
++                  neon_add_halve_narrow_q,\
++                  neon_sub_halve_narrow_q,neon_reduc_add,\
++                  neon_reduc_add_q,\
++                  neon_sat_mla_b_long,\
++                  neon_sat_shift_imm,\
++                  neon_sat_shift_imm_q,neon_shift_imm_long,\
++                  neon_shift_imm,neon_shift_imm_q,neon_cnt,\
++                  neon_cnt_q,neon_fp_recpe_s,\
++                  neon_fp_recpe_d,\
++                  neon_fp_rsqrte_s,neon_fp_rsqrte_s_q,\
++                  neon_fp_rsqrte_d,neon_fp_rsqrte_d_q,\
++                  neon_fp_recpx_s,\
++                  neon_fp_recpx_d,\
++                  neon_tbl3,neon_tbl2_q,neon_to_gp,\
++                  neon_to_gp_q,neon_fp_abd_s,neon_fp_abd_s_q,\
++                  neon_fp_abd_d,neon_fp_abd_d_q,\
++                  neon_fp_addsub_s,neon_fp_addsub_s_q,\
++                  neon_fp_addsub_d,neon_fp_addsub_d_q,\
++                  neon_fp_compare_s,neon_fp_compare_s_q,\
++                  neon_fp_compare_d,neon_fp_compare_d_q,\
++                  neon_fp_cvt_widen_s,neon_fp_to_int_s,\
++                  neon_fp_to_int_s_q,neon_fp_to_int_d,\
++                  neon_fp_to_int_d_q,neon_fp_minmax_s,\
++                  neon_fp_minmax_s_q,neon_fp_minmax_d,\
++                  neon_fp_minmax_d_q,neon_fp_round_s,\
++                  neon_fp_round_s_q,neon_fp_cvt_narrow_d_q,\
++                  neon_fp_round_d,neon_fp_round_d_q,\
++                  neon_fp_cvt_narrow_s_q")
++           (const_string "hip10c_neon_fp_arith")
++         (eq_attr "type" "neon_reduc_minmax_q,neon_reduc_minmax,\
++                  neon_sat_mul_h,neon_sat_mul_h_q,\
++                  neon_sat_mul_s,neon_sat_mul_s_q,\
++                  neon_sat_mul_h_scalar,neon_sat_mul_s_scalar,\
++                  neon_sat_mul_h_scalar_q,neon_sat_mul_h_long,\
++                  neon_sat_mul_s_long,neon_sat_mul_h_scalar_long,\
++                  neon_sat_mul_s_scalar_long,neon_mul_h,neon_mul_h_q,\
++                  neon_mul_s,neon_mul_s_q,neon_mul_h_long,\
++                  neon_mul_s_long,neon_mul_h_scalar_long,\
++                  neon_mul_s_scalar_long,\
++                  neon_sat_mul_b,neon_sat_mul_b_q,\
++                  neon_sat_mul_b_long,neon_mul_b,neon_mul_b_q,\
++                  neon_mul_b_long,\
++                  neon_mla_b,neon_mla_b_q,neon_mla_b_long,\
++                  neon_mla_h,neon_mla_h_q,\
++                  neon_mla_s,neon_mla_h_scalar,\
++                  neon_mla_h_scalar_q,neon_mla_s_scalar,\
++                  neon_mla_h_long,\
++                  neon_mla_s_long,neon_sat_mla_h_long,\
++                  neon_sat_mla_s_long,neon_sat_mla_h_scalar_long,\
++                  neon_sat_mla_s_scalar_long,neon_mla_s_scalar_long,\
++                  neon_mla_h_scalar_long,neon_mla_s_scalar_q,\
++                  neon_shift_acc,neon_shift_acc_q,\
++                  neon_sat_shift_imm_narrow_q,\
++                  neon_tbl4,neon_tbl3_q,neon_fp_reduc_add_s,\
++                  neon_fp_reduc_add_s_q,neon_fp_reduc_add_d,\
++                  neon_fp_reduc_add_d_q,neon_fp_reduc_minmax_s,\
++                  neon_fp_reduc_minmax_d,neon_fp_reduc_minmax_s_q,\
++                  neon_fp_reduc_minmax_d_q,\
++                  neon_fp_mul_s_q,\
++                  neon_fp_mul_d,neon_fp_mul_d_q,\
++                  neon_fp_mul_d_scalar_q,neon_fp_mul_s_scalar,\
++                  neon_fp_mul_s_scalar_q,\
++                  neon_fp_recpe_s_q,neon_fp_recpe_d_q,\
++                  neon_fp_recpx_s_q,neon_fp_recpx_d_q")
++           (const_string "hip10c_neon_mul")
++         (eq_attr "type" "neon_mla_s_q,\
++                  neon_fp_recps_s,\
++                  neon_fp_recps_s_q,neon_fp_recps_d,\
++                  neon_fp_recps_d_q,neon_tbl4_q,\
++                  neon_fp_mla_s,\
++                  neon_fp_mla_d,neon_fp_mla_d_q,\
++                  neon_fp_mla_s_scalar,neon_fp_mla_s_scalar_q,\
++                  neon_fp_mla_d_scalar_q,\
++                  neon_shift_reg,neon_shift_reg_q,\
++                  neon_sat_shift_reg,neon_sat_shift_reg_q")
++           (const_string "hip10c_neon_mla")
++         (eq_attr "type" "neon_dot,neon_dot_q")
++           (const_string "hip10c_neon_dot")
++         (eq_attr "type" "neon_fp_div_s,neon_fp_div_s_q,\
++                   neon_fp_div_d,neon_fp_div_d_q")
++           (const_string "hip10c_neon_fp_div")
++         (eq_attr "type" "neon_fp_sqrt_s,neon_fp_sqrt_s_q,\
++                   neon_fp_sqrt_d,neon_fp_sqrt_d_q")
++           (const_string "hip10c_neon_fp_sqrt")
++         (eq_attr "type" "neon_dup,neon_dup_q,\
++                   neon_ins,neon_ins_q")
++           (const_string "hip10c_neon_ins")
++         (eq_attr "type" "neon_load1_1reg,neon_load1_1reg_q,\
++                   neon_load1_2reg,neon_load1_2reg_q,\
++                   neon_load1_3reg,neon_load1_3reg_q,\
++                   neon_load1_4reg,neon_load1_4reg_q")
++           (const_string "hip10c_neon_load1")
++         (eq_attr "type" "neon_load1_one_lane,\
++                   neon_load1_one_lane_q,\
++                   neon_load1_all_lanes,neon_load1_all_lanes_q")
++           (const_string "hip10c_neon_load1_lanes")
++         (eq_attr "type" "neon_load2_all_lanes,\
++                   neon_load2_all_lanes_q,\
++                   neon_load2_one_lane,neon_load2_2reg,\
++                   neon_load2_2reg_q,neon_load3_one_lane,\
++                   neon_load3_all_lanes,neon_load3_all_lanes_q,\
++                   neon_load4_one_lane,neon_load4_all_lanes,\
++                   neon_load4_all_lanes_q")
++           (const_string "hip10c_neon_load2and4")
++         (eq_attr "type" "neon_load3_3reg,neon_load3_3reg_q")
++           (const_string "hip10c_neon_load3_3reg")
++         (eq_attr "type" "neon_load4_4reg,neon_load4_4reg_q")
++           (const_string "hip10c_neon_load4_4reg")
++         (eq_attr "type" "neon_store1_one_lane,\
++                   neon_store1_one_lane_q,neon_store2_one_lane,\
++                   neon_store2_one_lane_q,neon_store2_2reg,\
++                   neon_store2_2reg_q")
++           (const_string "hip10c_neon_store1and2")
++         (eq_attr "type" "neon_store1_1reg,neon_store1_1reg_q")
++           (const_string "hip10c_neon_store1_1reg")
++         (eq_attr "type" "neon_store1_2reg,neon_store1_2reg_q")
++           (const_string "hip10c_neon_store1_2reg")
++         (eq_attr "type" "neon_store1_3reg,neon_store1_3reg_q")
++           (const_string "hip10c_neon_store1_3reg")
++         (eq_attr "type" "neon_store1_4reg,neon_store1_4reg_q")
++           (const_string "hip10c_neon_store1_4reg")
++         (eq_attr "type" "neon_store3_one_lane,\
++                   neon_store3_one_lane_q,neon_store4_one_lane,\
++                   neon_store4_one_lane_q")
++           (const_string "hip10c_neon_store3and4_lane")
++         (eq_attr "type" "neon_store3_3reg,\
++                  neon_store3_3reg_q")
++           (const_string "hip10c_neon_store3_3reg")
++         (eq_attr "type" "neon_store4_4reg,\
++                   neon_store4_4reg_q")
++           (const_string "hip10c_neon_store4_4reg")]
++  (const_string "unknown")))
++
++; The hip10c core is modelled as issues pipeline that has
++; the following functional units.
++; 1.  Two pipelines for branch micro operations: BRU1, BRU2
++
++(define_cpu_unit "hip10c_bru0" "hip10c")
++(define_cpu_unit "hip10c_bru1" "hip10c")
++
++(define_reservation "hip10c_bru01" "hip10c_bru0|hip10c_bru1")
++
++; 2.  Four pipelines for single cycle integer micro operations: ALUs1, ALUs2, ALUs3, ALUs4
++
++(define_cpu_unit "hip10c_alus0" "hip10c")
++(define_cpu_unit "hip10c_alus1" "hip10c")
++(define_cpu_unit "hip10c_alus2" "hip10c")
++(define_cpu_unit "hip10c_alus3" "hip10c")
++
++(define_reservation "hip10c_alus0123" "hip10c_alus0|hip10c_alus1|hip10c_alus2|hip10c_alus3")
++(define_reservation "hip10c_alus01" "hip10c_alus0|hip10c_alus1")
++(define_reservation "hip10c_alus23" "hip10c_alus2|hip10c_alus3")
++
++; 3. Two pipelines for multi cycles integer micro operations: ALUm1, ALUm2
++
++(define_cpu_unit "hip10c_alum0" "hip10c")
++(define_cpu_unit "hip10c_alum1" "hip10c")
++
++(define_reservation "hip10c_alum01" "hip10c_alum0|hip10c_alum1")
++
++; 4. Two pipelines for load micro opetations: Load1, Load2
++
++(define_cpu_unit "hip10c_load0" "hip10c_ldst")
++(define_cpu_unit "hip10c_load1" "hip10c_ldst")
++
++(define_reservation "hip10c_ld01" "hip10c_load0|hip10c_load1")
++
++; 5. Two pipelines for store micro operations: Store1, Store2
++
++(define_cpu_unit "hip10c_store0" "hip10c_ldst")
++(define_cpu_unit "hip10c_store1" "hip10c_ldst")
++
++(define_reservation "hip10c_st01" "hip10c_store0|hip10c_store1")
++
++; 6. Two pipelines for store data micro operations: STD0,STD1
++
++(define_cpu_unit "hip10c_store_data0" "hip10c_ldst")
++(define_cpu_unit "hip10c_store_data1" "hip10c_ldst")
++
++(define_reservation "hip10c_std01" "hip10c_store_data0|hip10c_store_data1")
++
++; 7.  Four asymmetric pipelines for Asimd and FP micro operations: FSU1, FSU2
++
++(define_cpu_unit "hip10c_fsu0" "hip10c_fsu")
++(define_cpu_unit "hip10c_fsu1" "hip10c_fsu")
++
++(define_reservation "hip10c_fsu01" "hip10c_fsu0|hip10c_fsu1")
++
++
++; 8. Two pipelines for sve operations but same with fsu0 and fsu1: SVE1, SVE2
++
++;; Simple Execution Unit:
++;
++;; Simple ALU without shift
++(define_insn_reservation "hip10c_alu" 1
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "alu_imm,logic_imm,\
++            adc_imm,adc_reg,\
++            alu_sreg,logic_reg,\
++            mov_imm,mov_reg,\
++            csel,rotate_imm,bfm,mov_imm,\
++            clz,rbit,rev"))
++  "hip10c_alus0123")
++
++(define_insn_reservation "hip10c_alus" 1
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "alus_sreg,alus_imm,\
++            adcs_reg,adcs_imm,\
++            logics_imm,logics_reg,adr"))
++  "hip10c_alus23")
++
++;; ALU ops with shift and extend
++(define_insn_reservation "hip10c_alu_ext_shift" 2
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "alu_ext,alus_ext,\
++        logics_shift_imm,logics_shift_reg,\
++        logic_shift_reg,logic_shift_imm,\
++        "))
++  "hip10c_alum01")
++
++;; Multiplies instructions
++(define_insn_reservation "hip10c_mult" 3
++  (and (eq_attr "tune" "hip10c")
++       (ior (eq_attr "mul32" "yes")
++       (eq_attr "widen_mul64" "yes")))
++  "hip10c_alum01")
++
++;; Integer divide
++(define_insn_reservation "hip10c_div" 10
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "udiv,sdiv"))
++  "hip10c_alum0")
++
++;; Branch execution Unit
++;
++; Branches take two issue slot.
++; No latency as there is no result
++(define_insn_reservation "hip10c_branch" 2
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "branch,call"))
++  "hip10c_bru01 + hip10c_alus23")
++
++;; Load execution Unit
++;
++; Loads of up to two words.
++(define_insn_reservation "hip10c_load1" 4
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "load_4,load_8"))
++  "hip10c_ld01")
++
++; Stores of up to two words.
++(define_insn_reservation "hip10c_store1" 1
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "store_4,store_8"))
++  "hip10c_st01")
++
++;; FP data processing instructions.
++
++(define_insn_reservation "hip10c_fp_arith" 1
++   (and (eq_attr "tune" "hip10c")
++        (eq_attr "type" "ffariths,ffarithd,fmov,fconsts,fconstd,\
++         f_mrc"))
++   "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_fp_cmp" 4
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "fcmps,fcmpd"))
++  "hip10c_fsu01+hip10c_alus23")
++
++(define_insn_reservation "hip10c_fp_ccmp" 7
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "fccmps,fccmpd"))
++  "hip10c_alus01+hip10c_fsu01+hip10c_alus23")
++
++(define_insn_reservation "hip10c_fp_csel" 4
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "fcsel,f_mcr"))
++  "hip10c_alus01+hip10c_fsu01")
++
++(define_insn_reservation "hip10c_fp_divs" 7
++  (and (eq_attr "tune" "hip10c")
++  (eq_attr "type" "fdivs"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_fp_divd" 10
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "fdivd"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_fp_sqrts" 9
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "fsqrts"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_fp_sqrtd" 15
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "fsqrtd"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_fp_mul" 3
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "fmuls,fmuld"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_fp_add" 2
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "fadds,faddd,f_minmaxs,f_minmaxd,f_cvt,\
++       f_rints,f_rintd"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_fp_mac" 4
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "fmacs,fmacd"))
++  "hip10c_fsu01")
++
++;; FP miscellaneous instructions.
++
++(define_insn_reservation "hip10c_fp_cvt" 5
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "f_cvtf2i"))
++  "hip10c_fsu01+hip10c_alus23")
++
++(define_insn_reservation "hip10c_fp_cvt2" 5
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "f_cvti2f"))
++  "hip10c_alus01+hip10c_fsu01")
++
++;; FP Load Instructions 
++
++(define_insn_reservation "hip10c_fp_load" 7
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "f_loads,f_loadd"))
++  "hip10c_ld01")
++
++(define_insn_reservation "hip10c_fp_load2" 6
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "neon_ldp_q,neon_ldp"))
++  "hip10c_ld01+hip10c_alus01")
++
++;; FP store instructions
++
++(define_insn_reservation "hip10c_fp_store" 2
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "f_stores,f_stored"))
++  "hip10c_st01+hip10c_std01")
++
++;; ASIMD integer and fp instructions
++
++(define_insn_reservation "hip10c_asimd_base1" 1
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_abs"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_base2" 2
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_fp_arith"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_base3" 3
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_mul"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_base4" 4
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_mla"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_base5" 5
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "neon_fp_mul_s"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_dot" 3
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_dot"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_bfmmla" 9
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "neon_fp_mla_s_q"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_fdiv" 15
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_fp_div"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_fsqrt" 25
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_fp_sqrt"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_pmull" 2
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "crypto_pmull"))
++  "hip10c_fsu1")
++
++(define_insn_reservation "hip10c_asimd_dup" 4
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_ins"))
++  "hip10c_alus01+hip10c_fsu01")
++
++;; ASIMD load instructions
++
++(define_insn_reservation "hip10c_asimd_ld1_reg" 6
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_load1"))
++  "hip10c_ld01")
++
++(define_insn_reservation "hip10c_asimd_ld1_lane" 7
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_load1_lanes"))
++  "hip10c_ld01+hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_ld23" 8
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_load2and4"))
++"hip10c_ld01+hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_ld3_mtp" 9
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_load3_3reg"))
++  "hip10c_ld01+hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_ld4_mtp" 14
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_load4_4reg"))
++  "hip10c_ld01+hip10c_fsu01")
++
++;; ASIMD store instructions
++
++(define_insn_reservation "hip10c_asimd_st12" 1
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_store1and2"))
++  "hip10c_st01+hip10c_std01")
++
++(define_insn_reservation "hip10c_asimd_st1_1reg" 2
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_store1_1reg"))
++  "hip10c_st01+hip10c_std01")
++
++(define_insn_reservation "hip10c_asimd_st1_2reg" 3
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_store1_2reg"))
++  "hip10c_st01+hip10c_std01")
++
++(define_insn_reservation "hip10c_asimd_st1_3reg" 4
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_store1_3reg"))
++  "hip10c_st01+hip10c_std01")
++
++(define_insn_reservation "hip10c_asimd_st1_4reg" 5
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_store1_4reg"))
++  "hip10c_st01+hip10c_std01")
++
++(define_insn_reservation "hip10c_asimd_st34_lane" 4
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_store3and4_lane"))
++  "hip10c_fsu01+hip10c_st01+hip10c_std01")
++
++(define_insn_reservation "hip10c_asimd_st3_mtp" 7
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_store3_3reg"))
++  "hip10c_fsu01+hip10c_st01+hip10c_std01")
++
++(define_insn_reservation "hip10c_asimd_st4_mtp" 10
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "hip10c_type" "hip10c_neon_store4_4reg"))
++  "hip10c_fsu01+hip10c_st01+hip10c_std01")
++
++;; Cryptography extensions
++
++(define_insn_reservation "hip10c_asimd_aes" 2
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "crypto_aese,crypto_aesmc"))
++  "hip10c_fsu01")
++
++(define_insn_reservation "hip10c_asimd_sha3" 1
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "crypto_sha3"))
++  "hip10c_fsu1")
++
++(define_insn_reservation "hip10c_asimd_sha1" 2
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "crypto_sha1_fast,crypto_sha1_xor,\
++       crypto_sha256_fast,crypto_sha512,\
++       crypto_sm3"))
++  "hip10c_fsu1")
++
++(define_insn_reservation "hip10c_asimd_sha1_and256" 4
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "crypto_sha1_slow,crypto_sha256_slow,\
++       crypto_sm4"))
++  "hip10c_fsu1")
++
++;; CRC extension.
++
++(define_insn_reservation "hip10c_crc" 2
++  (and (eq_attr "tune" "hip10c")
++       (eq_attr "type" "crc"))
++  "hip10c_alum01")
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 2ff7d860d..3eced16e3 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -19220,7 +19220,7 @@ performance of the code.  Permissible values for this option are:
+ @samp{octeontx2}, @samp{octeontx2t98}, @samp{octeontx2t96}
+ @samp{octeontx2t93}, @samp{octeontx2f95}, @samp{octeontx2f95n},
+ @samp{octeontx2f95mm},
+-@samp{a64fx},@samp{hip11}
++@samp{a64fx},@samp{hip09},@samp{hip10c},@samp{hip11}
+ @samp{thunderx}, @samp{thunderxt88},
+ @samp{thunderxt88p1}, @samp{thunderxt81}, @samp{tsv110},
+ @samp{thunderxt83}, @samp{thunderx2t99}, @samp{thunderx3t110}, @samp{zeus},
+-- 
+2.25.1
+
diff --git a/0353-Add-hip10a-machine-discription.patch b/0353-Add-hip10a-machine-discription.patch
new file mode 100644
index 0000000000000000000000000000000000000000..77e8c9701525297ad21f6d2518212f63a31a42ee
--- /dev/null
+++ b/0353-Add-hip10a-machine-discription.patch
@@ -0,0 +1,877 @@
+From 2eea7cfbd7128906034e3d3c5a0fe7d05860ba6b Mon Sep 17 00:00:00 2001
+From: liyunfei 
+Date: Fri, 17 Jan 2025 20:05:33 +0800
+Subject: [PATCH] Add hip10a machine discription
+
+Here is the patch introducing hip10a machine model
+for the scheduler.
+---
+ gcc/config/aarch64/aarch64-cores.def     |   3 +-
+ gcc/config/aarch64/aarch64-cost-tables.h | 103 +++++
+ gcc/config/aarch64/aarch64-tune.md       |   2 +-
+ gcc/config/aarch64/aarch64.cc            | 109 +++++
+ gcc/config/aarch64/aarch64.md            |   1 +
+ gcc/config/aarch64/hip10a.md             | 538 +++++++++++++++++++++++
+ gcc/doc/invoke.texi                      |   2 +-
+ 7 files changed, 755 insertions(+), 3 deletions(-)
+ create mode 100644 gcc/config/aarch64/hip10a.md
+
+diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
+index 1e8de523c..8f6210397 100644
+--- a/gcc/config/aarch64/aarch64-cores.def
++++ b/gcc/config/aarch64/aarch64-cores.def
+@@ -131,7 +131,8 @@ AARCH64_CORE("a64fx", a64fx, a64fx, V8_2A,  (F16, SVE), a64fx, 0x46, 0x001, -1)
+ /* HiSilicon ('H') cores. */
+ AARCH64_CORE("tsv110",  tsv110, tsv110, V8_2A,  (CRYPTO, F16), tsv110,   0x48, 0xd01, -1)
+ AARCH64_CORE("hip09", hip09, hip09, V8_5A,  (SVE, I8MM, F32MM, F64MM, PROFILE, PREDRES), hip09, 0x48, 0xd02, 0x0)
+-AARCH64_CORE("hip10c", hip10c, hip10c, V8_5A,  (SVE, I8MM, BF16, F32MM, F64MM, FLAGM, PAUTH, SSBS, SHA3, SM4, PROFILE, PREDRES), hip10c, 0x48, 0xddd, 0x0)
++AARCH64_CORE("hip10a", hip10a, hip10a, V8_5A,  (SVE, I8MM, BF16, F32MM, F64MM, SSBS, SHA3, SM4, PREDRES, SVE2, SVE2_BITPERM, DOTPROD, F16FML), hip10a, 0x48, 0xd03, 0x0)
++AARCH64_CORE("hip10c", hip10c, hip10c, V8_5A,  (SVE, I8MM, BF16, F32MM, F64MM, FLAGM, PAUTH, SSBS, SHA3, SM4, PROFILE, PREDRES), hip10c, 0x48, 0xd45, 0x0)
+ 
+ /* ARMv8.3-A Architecture Processors.  */
+ 
+diff --git a/gcc/config/aarch64/aarch64-cost-tables.h b/gcc/config/aarch64/aarch64-cost-tables.h
+index 06da1b271..a39ace9ba 100644
+--- a/gcc/config/aarch64/aarch64-cost-tables.h
++++ b/gcc/config/aarch64/aarch64-cost-tables.h
+@@ -880,6 +880,109 @@ const struct cpu_cost_table hip09_extra_costs =
+   }
+ };
+ 
++const struct cpu_cost_table hip10a_extra_costs =
++{
++  /* ALU */
++  {
++    0,                 /* arith.  */
++    0,                 /* logical.  */
++    0,                 /* shift.  */
++    0,                 /* shift_reg.  */
++    COSTS_N_INSNS (1), /* arith_shift.  */
++    COSTS_N_INSNS (1), /* arith_shift_reg.  */
++    COSTS_N_INSNS (1), /* log_shift.  */
++    COSTS_N_INSNS (1), /* log_shift_reg.  */
++    0,                 /* extend.  */
++    0,                 /* extend_arith.  */
++    0,                 /* bfi.  */
++    0,                 /* bfx.  */
++    0,                 /* clz.  */
++    0,                 /* rev.  */
++    0,                 /* non_exec.  */
++    true               /* non_exec_costs_exec.  */
++  },
++  {
++    /* MULT SImode */
++    {
++      COSTS_N_INSNS (2),       /* simple.  */
++      COSTS_N_INSNS (2),       /* flag_setting.  */
++      COSTS_N_INSNS (2),       /* extend.  */
++      COSTS_N_INSNS (2),       /* add.  */
++      COSTS_N_INSNS (2),       /* extend_add.  */
++      COSTS_N_INSNS (7)        /* idiv.  */
++    },
++    /* MULT DImode */
++    {
++      COSTS_N_INSNS (3),       /* simple.  */
++      0,                       /* flag_setting (N/A).  */
++      COSTS_N_INSNS (3),       /* extend.  */
++      COSTS_N_INSNS (3),       /* add.  */
++      COSTS_N_INSNS (3),       /* extend_add.  */
++      COSTS_N_INSNS (10)       /* idiv.  */
++    }
++  },
++  /* LD/ST */
++  {
++    COSTS_N_INSNS (3),         /* load.  */
++    COSTS_N_INSNS (6),         /* load_sign_extend.  */
++    COSTS_N_INSNS (3),         /* ldrd.  */
++    COSTS_N_INSNS (3),         /* ldm_1st.  */
++    1,                         /* ldm_regs_per_insn_1st.  */
++    2,                         /* ldm_regs_per_insn_subsequent.  */
++    COSTS_N_INSNS (5),         /* loadf.  */
++    COSTS_N_INSNS (5),         /* loadd.  */
++    COSTS_N_INSNS (3),         /* load_unaligned.  */
++    0,                         /* store.  */
++    0,                         /* strd.  */
++    0,                         /* stm_1st.  */
++    1,                         /* stm_regs_per_insn_1st.  */
++    2,                         /* stm_regs_per_insn_subsequent.  */
++    0,                         /* storef.  */
++    0,                         /* stored.  */
++    COSTS_N_INSNS (1),         /* store_unaligned.  */
++    COSTS_N_INSNS (4),         /* loadv.  */
++    COSTS_N_INSNS (4)          /* storev.  */
++  },
++  {
++    /* FP SFmode */
++    {
++      COSTS_N_INSNS (6),       /* div.  */
++      COSTS_N_INSNS (2),       /* mult.  */
++      COSTS_N_INSNS (4),       /* mult_addsub.  */
++      COSTS_N_INSNS (3),       /* fma.  */
++      COSTS_N_INSNS (1),       /* addsub.  */
++      0,                       /* fpconst.  */
++      0,                       /* neg.  */
++      COSTS_N_INSNS (1),       /* compare.  */
++      COSTS_N_INSNS (2),       /* widen.  */
++      COSTS_N_INSNS (2),       /* narrow.  */
++      COSTS_N_INSNS (4),       /* toint.  */
++      COSTS_N_INSNS (5),       /* fromint.  */
++      COSTS_N_INSNS (2)        /* roundint.  */
++    },
++    /* FP DFmode */
++    {
++      COSTS_N_INSNS (9),       /* div.  */
++      COSTS_N_INSNS (2),       /* mult.  */
++      COSTS_N_INSNS (4),       /* mult_addsub.  */
++      COSTS_N_INSNS (3),       /* fma.  */
++      COSTS_N_INSNS (1),       /* addsub.  */
++      0,                       /* fpconst.  */
++      0,                       /* neg.  */
++      COSTS_N_INSNS (1),       /* compare.  */
++      COSTS_N_INSNS (2),       /* widen.  */
++      COSTS_N_INSNS (2),       /* narrow.  */
++      COSTS_N_INSNS (4),       /* toint.  */
++      COSTS_N_INSNS (5),       /* fromint.  */
++      COSTS_N_INSNS (2)        /* roundint.  */
++    }
++  },
++  /* Vector */
++  {
++    COSTS_N_INSNS (1)  /* alu.  */
++  }
++};
++
+ const struct cpu_cost_table hip10c_extra_costs =
+ {
+   /* ALU */
+diff --git a/gcc/config/aarch64/aarch64-tune.md b/gcc/config/aarch64/aarch64-tune.md
+index e176a4d70..1cfa3559d 100644
+--- a/gcc/config/aarch64/aarch64-tune.md
++++ b/gcc/config/aarch64/aarch64-tune.md
+@@ -1,5 +1,5 @@
+ ;; -*- buffer-read-only: t -*-
+ ;; Generated automatically by gentune.sh from aarch64-cores.def
+ (define_attr "tune"
+-	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,hip09,hip10c,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,hip11,demeter,neoversev2"
++	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,hip09,hip10a,hip10c,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,hip11,demeter,neoversev2"
+ 	(const (symbol_ref "((enum attr_tune) aarch64_tune)")))
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 65b684ef6..a6ef40a47 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -507,6 +507,24 @@ static const struct cpu_addrcost_table hip09_addrcost_table =
+   0, /* imm_offset  */
+ };
+ 
++static const struct cpu_addrcost_table hip10a_addrcost_table =
++{
++    {
++      1, /* hi  */
++      0, /* si  */
++      0, /* di  */
++      1, /* ti  */
++    },
++  0, /* pre_modify  */
++  0, /* post_modify  */
++  0, /* post_modify_ld3_st3  */
++  0, /* post_modify_ld4_st4  */
++  0, /* register_offset  */
++  1, /* register_sextend  */
++  1, /* register_zextend  */
++  0, /* imm_offset  */
++};
++
+ static const struct cpu_addrcost_table hip10c_addrcost_table =
+ {
+     {
+@@ -754,6 +772,16 @@ static const struct cpu_regmove_cost hip09_regmove_cost =
+   2  /* FP2FP  */
+ };
+ 
++static const struct cpu_regmove_cost hip10a_regmove_cost =
++{
++  1, /* GP2GP  */
++  /* Avoid the use of slow int<->fp moves for spilling by setting
++     their cost higher than memmov_cost.  */
++  5, /* GP2FP  */
++  5, /* FP2GP  */
++  2  /* FP2FP  */
++};
++
+ static const struct cpu_regmove_cost hip10c_regmove_cost =
+ {
+   1, /* GP2GP  */
+@@ -1088,6 +1116,43 @@ static const struct cpu_vector_cost hip09_vector_cost =
+   nullptr /* issue_info  */
+ };
+ 
++static const advsimd_vec_cost hip10a_advsimd_vector_cost =
++{
++  2, /* int_stmt_cost  */
++  2, /* fp_stmt_cost  */
++  0, /* ld2_st2_permute_cost  */
++  0, /* ld3_st3_permute_cost  */
++  0, /* ld4_st4_permute_cost  */
++  2, /* permute_cost  */
++  3, /* reduc_i8_cost  */
++  3, /* reduc_i16_cost  */
++  3, /* reduc_i32_cost  */
++  3, /* reduc_i64_cost  */
++  3, /* reduc_f16_cost  */
++  3, /* reduc_f32_cost  */
++  3, /* reduc_f64_cost  */
++  3, /* store_elt_extra_cost  */
++  3, /* vec_to_scalar_cost  */
++  2, /* scalar_to_vec_cost  */
++  5, /* align_load_cost  */
++  5, /* unalign_load_cost  */
++  1, /* unalign_store_cost  */
++  1  /* store_cost  */
++};
++
++static const struct cpu_vector_cost hip10a_vector_cost =
++{
++  1, /* scalar_int_stmt_cost  */
++  1, /* scalar_fp_stmt_cost  */
++  5, /* scalar_load_cost  */
++  1, /* scalar_store_cost  */
++  1, /* cond_taken_branch_cost  */
++  1, /* cond_not_taken_branch_cost  */
++  &hip10a_advsimd_vector_cost, /* advsimd  */
++  nullptr, /* sve  */
++  nullptr /* issue_info  */
++};
++
+ static const advsimd_vec_cost hip10c_advsimd_vector_cost =
+ {
+   2, /* int_stmt_cost  */
+@@ -1520,6 +1585,17 @@ static const cpu_prefetch_tune hip09_prefetch_tune =
+   -1                    /* default_opt_level  */
+ };
+ 
++static const cpu_prefetch_tune hip10a_prefetch_tune =
++{
++  0,                    /* num_slots  */
++  64,                   /* l1_cache_size  */
++  64,                   /* l1_cache_line_size  */
++  512,                  /* l2_cache_size  */
++  true,                 /* prefetch_dynamic_strides */
++  -1,                   /* minimum_stride */
++  -1                    /* default_opt_level  */
++};
++
+ static const cpu_prefetch_tune hip10c_prefetch_tune =
+ {
+   0,                    /* num_slots  */
+@@ -1940,6 +2016,39 @@ static const struct tune_params hip09_tunings =
+   &hip09_prefetch_tune
+ };
+ 
++static const struct tune_params hip10a_tunings =
++{
++  &hip10a_extra_costs,
++  &hip10a_addrcost_table,
++  &hip10a_regmove_cost,
++  &generic_vector_cost,
++  &generic_branch_cost,
++  &generic_approx_modes,
++  SVE_256, /* sve_width  */
++  { 4, /* load_int.  */
++    4, /* store_int.  */
++    4, /* load_fp.  */
++    4, /* store_fp.  */
++    4, /* load_pred.  */
++    4 /* store_pred.  */
++  }, /* memmov_cost.  */
++  8,    /* issue_rate  */
++  (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_ALU_BRANCH
++   | AARCH64_FUSE_ALU_CBZ), /* fusible_ops  */
++  "16", /* function_align.  */
++  "4",  /* jump_align.  */
++  "8",  /* loop_align.  */
++  2,    /* int_reassoc_width.  */
++  4,    /* fp_reassoc_width.  */
++  1,    /* vec_reassoc_width.  */
++  2,    /* min_div_recip_mul_sf.  */
++  2,    /* min_div_recip_mul_df.  */
++  0,    /* max_case_values.  */
++  tune_params::AUTOPREFETCHER_WEAK,     /* autoprefetcher_model.  */
++  (AARCH64_EXTRA_TUNE_PREFER_ADVSIMD_AUTOVEC),     /* tune_flags.  */
++  &hip10a_prefetch_tune
++};
++
+ static const struct tune_params hip10c_tunings =
+ {
+   &hip10c_extra_costs,
+diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
+index 96b8ab471..2f46bc793 100644
+--- a/gcc/config/aarch64/aarch64.md
++++ b/gcc/config/aarch64/aarch64.md
+@@ -549,6 +549,7 @@
+ (include "tsv110.md")
+ (include "thunderx3t110.md")
+ (include "hip09.md")
++(include "hip10a.md")
+ (include "hip10c.md")
+ (include "hip11.md")
+ 
+diff --git a/gcc/config/aarch64/hip10a.md b/gcc/config/aarch64/hip10a.md
+new file mode 100644
+index 000000000..3a687e8af
+--- /dev/null
++++ b/gcc/config/aarch64/hip10a.md
+@@ -0,0 +1,538 @@
++;; hip10a pipeline description
++;; Copyright (C) 2023 Free Software Foundation, Inc.
++;;
++;;Contributed by liyunfei
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful, but
++;; WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++;; General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; .
++
++(define_automaton "hip10a")
++(define_automaton "hip10a_ldst")
++(define_automaton "hip10a_fsu")
++
++(define_attr "hip10a_type"
++  "hip10a_neon_base1, hip10a_neon_base2, hip10a_neon_base3, hip10a_neon_base4,
++   hip10a_neon_load1_12, hip10a_neon_load1_34, hip10a_neon_load1_lanes, hip10a_neon_load2,
++   hip10a_neon_load34_all_lane, hip10a_neon_load34_one_lane, hip10a_neon_load34, hip10a_neon_load34_q,
++   hip10a_neon_store1, hip10a_neon_store2, hip10a_neon_store1_34reg_d, hip10a_neon_store1_12reg_d,
++   hip10a_neon_store34,
++   unknown"
++  (cond [
++        (eq_attr "type" "neon_abs,neon_abs_q,\
++                neon_neg,neon_neg_q,\
++                neon_add,neon_add_q,neon_add_widen,neon_add_long,\
++                neon_sub,neon_sub_q,neon_sub_widen,neon_sub_long,\
++                neon_qadd,neon_qadd_q,\
++                neon_qsub,neon_qsub_q,\
++                neon_qabs,neon_qabs_q,\
++                neon_qneg,neon_qneg_q,\
++                neon_compare,neon_compare_q,neon_compare_zero,neon_compare_zero_q,\
++                neon_logic,neon_logic_q,\
++                neon_minmax,neon_minmax_q,\
++                neon_tst,neon_tst_q,\
++                neon_bsl,neon_bsl_q,\
++                neon_cls,neon_cls_q,\
++                neon_ext,neon_ext_q,\
++                neon_rev,neon_rev_q,\
++                neon_fp_abs_s,neon_fp_abs_s_q,neon_fp_abs_d,\
++                neon_fp_neg_s,neon_fp_neg_s_q,neon_fp_neg_d,neon_fp_neg_d_q,\
++                neon_move,neon_move_q,\
++                neon_ins,neon_ins_q")
++          (const_string "hip10a_neon_base1")
++        (eq_attr "type" "neon_abd,neon_abd_q,\
++                neon_tbl1,neon_tbl1_q,\
++                neon_arith_acc,neon_arith_acc_q,\
++                neon_add_halve,neon_add_halve_q,neon_add_halve_narrow_q,\
++                neon_sub_halve,neon_sub_halve_q,neon_sub_halve_narrow_q,\
++                neon_sat_shift_imm,neon_sat_shift_imm_q,\
++                neon_shift_imm,neon_shift_imm_q,neon_shift_imm_long,\
++                neon_shift_imm_narrow_q,\
++                neon_cnt,neon_cnt_q,\
++                neon_tbl1,neon_tbl1_q,neon_tbl2,neon_tbl2_q,\
++                neon_to_gp,neon_to_gp_q,\
++                neon_fp_recpe_s,neon_fp_recpe_s_q,\
++                neon_fp_recpe_d,neon_fp_recpe_d_q,\
++                neon_fp_rsqrte_s,neon_fp_rsqrte_s_q,\
++                neon_fp_rsqrte_d,neon_fp_rsqrte_d_q,\
++                neon_fp_recpx_s,neon_fp_recpx_s_q,\
++                neon_fp_recpx_d,neon_fp_recpx_d_q,\
++                neon_fp_abd_s,neon_fp_abd_s_q,\
++                neon_fp_abd_d,neon_fp_abd_d_q,\
++                neon_fp_addsub_s,neon_fp_addsub_s_q,\
++                neon_fp_addsub_d,neon_fp_addsub_d_q,\
++                neon_fp_compare_s,neon_fp_compare_s_q,\
++                neon_fp_compare_d,neon_fp_compare_d_q,\
++                neon_fp_minmax_s,\
++                neon_fp_minmax_s_q,neon_fp_minmax_d,\
++                neon_fp_minmax_d_q,neon_fp_round_s,\
++                neon_fp_round_s_q,\
++                neon_fp_round_d,neon_fp_round_d_q")
++          (const_string "hip10a_neon_base2")
++        (eq_attr "type" "neon_dot,neon_dot_q,\
++                neon_reduc_add,neon_reduc_add_q,\
++                neon_sat_mul_b,neon_sat_mul_b_q,neon_sat_mul_b_long,\
++                neon_sat_mul_h,neon_sat_mul_h_q,\
++                neon_sat_mul_s,neon_sat_mul_s_q,\
++                neon_sat_mul_h_scalar,neon_sat_mul_s_scalar,\
++                neon_sat_mul_h_scalar_q,neon_sat_mul_h_long,\
++                neon_sat_mul_s_long,neon_sat_mul_h_scalar_long,\
++                neon_sat_mul_s_scalar_long,neon_mul_h,neon_mul_h_q,\
++                neon_mul_b,neon_mul_b_q,neon_mul_b_long,\
++                neon_mul_s,neon_mul_s_q,neon_mul_h_long,\
++                neon_mul_s_long,neon_mul_h_scalar_long,\
++                neon_mul_s_scalar_long,\
++                neon_mla_b,neon_mla_b_q,neon_mla_b_long,\
++                neon_mla_h,neon_mla_h_q,neon_mla_h_long,\
++                neon_mla_h_scalar,neon_mla_h_scalar_q,neon_mla_h_scalar_long,\
++                neon_mla_s,neon_mla_s_q,neon_mla_s_long,\
++                neon_mla_s_scalar,neon_mla_s_scalar_q,neon_mla_s_scalar_long,\
++                neon_sat_mla_b_long,\
++                neon_sat_mla_h_long,\
++                neon_sat_mla_h_scalar_long,\
++                neon_sat_mla_s_long,\
++                neon_sat_mla_s_scalar_long,\
++                neon_shift_acc,neon_shift_acc_q,neon_shift_reg,neon_shift_reg_q,\
++                neon_sat_shift_reg,neon_sat_shift_reg_q,neon_sat_shift_imm_narrow_q,\
++                neon_reduc_minmax,neon_reduc_minmax_q,\
++                neon_fp_reduc_add_s,neon_fp_reduc_add_s_q,\
++                neon_fp_reduc_add_d,neon_fp_reduc_add_d_q,\
++                neon_fp_reduc_minmax_s,neon_fp_reduc_minmax_s_q,\
++                neon_fp_reduc_minmax_d,neon_fp_reduc_minmax_d_q,\
++                neon_fp_mul_s,neon_fp_mul_s_q,neon_fp_mul_s_scalar,\
++                neon_fp_mul_d,neon_fp_mul_d_q,neon_fp_mul_d_scalar_q,\
++                neon_fp_mul_s_scalar_q,\
++                neon_fp_recpe_s,neon_fp_recpe_d,\
++                neon_fp_recpx_s,neon_fp_recpx_s_q,neon_fp_recpx_d,neon_fp_recpx_d_q,\
++                neon_fp_to_int_s,neon_fp_to_int_d")
++          (const_string "hip10a_neon_base3")
++        (eq_attr "type" "neon_tbl3,neon_tbl3_q,\
++                neon_fp_recpe_s_q,neon_fp_recpe_d_q,\
++                neon_fp_recps_s_q,neon_fp_recps_d,neon_fp_recps_s,neon_fp_recps_d_q,\
++                neon_fp_to_int_s_q,neon_fp_to_int_d_q,\
++                neon_fp_cvt_narrow_d_q,neon_fp_cvt_narrow_s_q,\
++                neon_fp_mla_s,neon_fp_mla_s_q,\
++                neon_fp_mla_d,neon_fp_mla_d_q,\
++                neon_fp_mla_s_scalar,neon_fp_mla_s_scalar_q,\
++                neon_fp_mla_d_scalar_q")
++          (const_string "hip10a_neon_base4")
++        (eq_attr "type" "neon_load1_1reg,neon_load1_1reg_q,\
++                neon_load1_2reg,neon_load1_2reg_q")
++          (const_string "hip10a_neon_load1_12")
++        (eq_attr "type" "neon_load1_3reg,neon_load1_3reg_q,\
++                neon_load1_4reg,neon_load1_4reg_q")
++          (const_string "hip10a_neon_load1_34")
++        (eq_attr "type" "neon_load1_one_lane,\
++                neon_load1_one_lane_q,\
++                neon_load1_all_lanes,neon_load1_all_lanes_q")
++          (const_string "hip10a_neon_load1_lanes")
++        (eq_attr "type" "neon_load2_all_lanes,\
++                neon_load2_all_lanes_q,\
++                neon_load2_one_lane,neon_load2_2reg,\
++                neon_load2_2reg_q,neon_load3_one_lane")
++          (const_string "hip10a_neon_load2")
++        (eq_attr "type" "neon_load4_one_lane,neon_load4_one_lane")
++          (const_string "hip10a_neon_load34_one_lane")
++        (eq_attr "type" "neon_load3_all_lanes,neon_load3_all_lanes_q,\
++                neon_load4_all_lanes,neon_load4_all_lanes_q")
++          (const_string "hip10a_neon_load34_all_lane")
++        (eq_attr "type" "neon_load3_3reg,neon_load4_4reg")
++          (const_string "hip10a_neon_load34")
++        (eq_attr "type" "neon_load3_3reg_q,neon_load4_4reg_q")
++          (const_string "hip10a_neon_load34_q")
++        (eq_attr "type" "neon_store1_1reg_q,neon_store1_2reg_q,\
++                neon_store1_3reg_q,neon_store1_4reg_q,\
++                neon_store1_one_lane,neon_store1_one_lane_q")
++          (const_string "hip10a_neon_store1")
++        (eq_attr "type" "neon_store2_one_lane,neon_store2_one_lane_q,\
++                neon_store2_2reg,neon_store2_2reg_q")
++          (const_string "hip10a_neon_store2")
++        (eq_attr "type" "neon_store1_1reg,neon_store1_2reg")
++          (const_string "hip10a_neon_store1_12reg_d")
++        (eq_attr "type" "neon_store1_3reg,neon_store1_4reg")
++          (const_string "hip10a_neon_store1_34reg_d")
++        (eq_attr "type" "neon_store3_one_lane,neon_store3_one_lane_q,\
++                neon_store4_one_lane,neon_store4_one_lane_q,\
++                neon_store3_3reg_q,neon_store3_3reg,\
++                neon_store4_4reg_q,neon_store4_4reg")
++          (const_string "hip10a_neon_store34")]
++  (const_string "unknown")))
++
++; The hip10a core is modelled as issues pipeline that has
++; the following functional units.
++; 1.  Three pipelines for single cycle integer micro operations: ALUs0, ALUs1, ALUs2
++
++(define_cpu_unit "hip10a_alus0" "hip10a")
++(define_cpu_unit "hip10a_alus1" "hip10a")
++(define_cpu_unit "hip10a_alus2" "hip10a")
++
++(define_reservation "hip10a_alus012" "hip10a_alus0|hip10a_alus1|hip10a_alus2")
++;(define_reservation "hip10a_alus01" "hip10a_alus0|hip10a_alus1")
++;(define_reservation "hip10a_alus23" "hip10a_alus2|hip10a_alus3")
++
++; 2. Three pipelines for multi cycles integer micro operations: ALUm0, ALUm1, ALUm2
++
++(define_cpu_unit "hip10a_alum0" "hip10a")
++(define_cpu_unit "hip10a_alum1" "hip10a")
++(define_cpu_unit "hip10a_alum2" "hip10a")
++
++(define_reservation "hip10a_alum012" "hip10a_alum0|hip10a_alum1|hip10a_alum2")
++
++; 3. All ALU pipelines
++
++(define_reservation "hip10a_alu" "hip10a_alus0|hip10a_alus1|hip10a_alus2|hip10a_alum0|hip10a_alum1|hip10a_alum2")
++
++; 4. Three pipelines for load micro opetations: Load0, Load1, Load2
++
++(define_cpu_unit "hip10a_load0" "hip10a_ldst")
++(define_cpu_unit "hip10a_load1" "hip10a_ldst")
++(define_cpu_unit "hip10a_load2" "hip10a_ldst")
++
++(define_reservation "hip10a_ld012" "hip10a_load0|hip10a_load1|hip10a_load2")
++
++; 5. Two pipelines for store micro operations: Store1, Store2
++
++(define_cpu_unit "hip10a_store0" "hip10a_ldst")
++(define_cpu_unit "hip10a_store1" "hip10a_ldst")
++
++(define_reservation "hip10a_st01" "hip10a_store0|hip10a_store1")
++
++; 6. Two pipelines for store data micro operations: STD0,STD1
++
++(define_cpu_unit "hip10a_store_data0" "hip10a_ldst")
++(define_cpu_unit "hip10a_store_data1" "hip10a_ldst")
++
++(define_reservation "hip10a_std01" "hip10a_store_data0|hip10a_store_data1")
++
++; 7.  Four asymmetric pipelines for Asimd and FP micro operations: FSU0, FSU1, FSU2, FSU3
++
++(define_cpu_unit "hip10a_fsu0" "hip10a_fsu")
++(define_cpu_unit "hip10a_fsu1" "hip10a_fsu")
++(define_cpu_unit "hip10a_fsu2" "hip10a_fsu")
++(define_cpu_unit "hip10a_fsu3" "hip10a_fsu")
++
++(define_reservation "hip10a_fsu0123" "hip10a_fsu0|hip10a_fsu1|hip10a_fsu2|hip10a_fsu3")
++(define_reservation "hip10a_fsu02" "hip10a_fsu0|hip10a_fsu2")
++
++
++; 8. Two pipelines for sve operations but same with fsu1 and fsu3: SVE1, SVE2
++
++;; Branch execution Unit
++;
++; Branches take two issue slot.
++; No latency as there is no result
++(define_insn_reservation "hip10a_branch" 0
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "branch,call"))
++  "hip10a_alus012")
++
++;; Simple Execution Unit:
++;
++;; Simple ALU without shift
++(define_insn_reservation "hip10a_alu_all" 1
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "alu_imm,\
++            adc_imm,adc_reg,\
++            alu_sreg,\
++            mov_imm,mov_reg"))
++  "hip10a_alu")
++
++(define_insn_reservation "hip10a_alum" 1
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "logic_imm,logic_reg,\
++            csel,rotate_imm,bfm,\
++            clz,rbit,rev"))
++  "hip10a_alum012")
++
++(define_insn_reservation "hip10a_alus" 1
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "alus_sreg,alus_imm,\
++            adcs_reg,adcs_imm,\
++            logics_imm,logics_reg,adr"))
++  "hip10a_alus012")
++
++;; ALU ops with shift and extend
++(define_insn_reservation "hip10a_alu_ext_shift" 2
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "alu_ext,alus_ext,\
++        logics_shift_imm,logics_shift_reg,\
++        logic_shift_reg,logic_shift_imm,\
++        "))
++  "hip10a_alum012")
++
++;; Multiply and mulitply accumulate and count leading zeros
++(define_insn_reservation "hip10a_mul" 3
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "mul,muls,clz,smull,umull"))
++  "hip10a_alum012")
++
++(define_insn_reservation "hip10a_mla" 4
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "mla,mlas,smlal,umlal"))
++  "hip10a_alum012|hip10a_alu")
++
++;; Integer divide
++(define_insn_reservation "hip10a_div" 11
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "udiv,sdiv"))
++  "hip10a_alum0")
++
++;; Load execution Unit
++;
++; Loads of up to two words.
++(define_insn_reservation "hip10a_load1" 4
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "load_4,load_8,load_16"))
++  "hip10a_ld012")
++
++; Stores of up to two words.
++(define_insn_reservation "hip10a_store1" 1
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "store_4,store_8,load_16"))
++  "hip10a_st01")
++
++;; FP data processing instructions.
++
++(define_insn_reservation "hip10a_fp_arith" 1
++   (and (eq_attr "tune" "hip10a")
++        (eq_attr "type" "ffariths,ffarithd,fmov,fconsts,fconstd,\
++         f_mrc"))
++   "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_fp_cmp" 2
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "fcmps,fcmpd"))
++  "hip10a_fsu02+hip10a_alus012")
++
++(define_insn_reservation "hip10a_fp_ccmp" 6
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "fccmps,fccmpd"))
++  "hip10a_fsu0123+hip10a_alus012")
++
++(define_insn_reservation "hip10a_fp_csel" 6
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "fcsel,f_mcr"))
++  "hip10a_fsu0123+hip10a_alus012")
++
++(define_insn_reservation "hip10a_fp_divs" 7
++  (and (eq_attr "tune" "hip10a")
++  (eq_attr "type" "fdivs"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_fp_divd" 10
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "fdivd"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_fp_sqrts" 9
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "fsqrts"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_fp_sqrtd" 15
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "fsqrtd"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_fp_mul" 3
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "fmuls,fmuld"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_fp_add" 2
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "fadds,faddd,f_minmaxs,f_minmaxd,f_cvt,\
++       f_rints,f_rintd"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_fp_mac" 4
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "fmacs,fmacd"))
++  "hip10a_fsu0123")
++
++;; FP miscellaneous instructions.
++
++(define_insn_reservation "hip10a_fp_cvt" 5
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "f_cvtf2i"))
++  "hip10a_fsu0123+hip10a_alus012")
++
++(define_insn_reservation "hip10a_fp_cvt2" 6
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "f_cvti2f"))
++  "hip10a_alus012+hip10a_fsu0123")
++
++;; FP Load Instructions
++
++(define_insn_reservation "hip10a_fp_load" 8
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "f_loads,f_loadd"))
++  "hip10a_ld012")
++
++(define_insn_reservation "hip10a_fp_load2" 6
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "neon_ldp_q,neon_ldp"))
++  "hip10a_ld012+hip10a_alu")
++
++;; FP store instructions
++
++(define_insn_reservation "hip10a_fp_store" 3
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "f_stores,f_stored"))
++  "hip10a_st01+hip10a_std01")
++
++(define_insn_reservation "hip10a_fp_store2" 1
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "neon_stp_q,neon_stp"))
++  "hip10a_st01+hip10a_std01+hip10a_alu")
++
++;; ASIMD integer instructions
++
++(define_insn_reservation "hip10a_asimd_base1" 1
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "hip10a_type" "hip10a_neon_base1"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_base2" 2
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "hip10a_type" "hip10a_neon_base2"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_base3" 3
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "hip10a_type" "hip10a_neon_base3"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_base4" 4
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "hip10a_type" "hip10a_neon_base4"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_base5" 5
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" ""))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_base6" 6
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "neon_tbl4,neon_tbl4_q"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_base7" 7
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "neon_fp_div_s,neon_fp_div_d"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_base9" 9
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "neon_fp_div_s_q,neon_fp_sqrt_s,neon_fp_sqrt_d"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_fsqrt_q" 13
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "neon_fp_sqrt_s_q"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_fdiv_f64_q" 15
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "neon_fp_div_d_q"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_fsqrt_f64_q" 25
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "neon_fp_sqrt_d_q"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_dup" 5
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "neon_dup,neon_dup_q"))
++  "hip10a_alus012+hip10a_fsu0123")
++
++;; ASIMD load instructions
++
++(define_insn_reservation "hip10a_asimd_ld1_12" 6
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "hip10a_type" "hip10a_neon_load1_12"))
++  "hip10a_ld012")
++
++(define_insn_reservation "hip10a_asimd_ld1_34" 7
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "hip10a_type" "hip10a_neon_load1_34"))
++  "hip10a_ld012")
++
++(define_insn_reservation "hip10a_asimd_ld7" 7
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "hip10a_type" "hip10a_neon_load1_lanes,hip10a_neon_load2,hip10a_neon_load34_all_lane,hip10a_neon_load34"))
++  "hip10a_ld012+hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_ld8" 8
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "hip10a_type" "hip10a_neon_load34_one_lane,hip10a_neon_load34_q"))
++"hip10a_ld012+hip10a_fsu0123")
++
++;; ASIMD store instructions
++
++(define_insn_reservation "hip10a_asimd_st1" 1
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "hip10a_type" "hip10a_neon_store1,hip10a_neon_store2"))
++  "hip10a_st01+hip10a_std01")
++
++(define_insn_reservation "hip10a_asimd_st1_12" 1
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "hip10a_type" "hip10a_neon_store1_12reg_d"))
++  "hip10a_st01+hip10a_std01+hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_st4" 4
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "hip10a_type" "hip10a_neon_store1_34reg_d,hip10a_neon_store34"))
++  "hip10a_fsu0123+hip10a_st01+hip10a_std01")
++
++;; Cryptography extensions
++
++
++(define_insn_reservation "hip10a_asimd_pmull" 2
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "crypto_pmull"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_aes" 2
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "crypto_aese,crypto_aesmc"))
++  "hip10a_fsu0+hip10a_fsu2")
++
++(define_insn_reservation "hip10a_asimd_sha3" 1
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "crypto_sha3"))
++  "hip10a_fsu0123")
++
++(define_insn_reservation "hip10a_asimd_sha1" 2
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "crypto_sha1_fast,crypto_sha1_xor,\
++       crypto_sha256_fast,crypto_sha512,\
++       crypto_sm3"))
++  "hip10a_fsu0+hip10a_fsu2")
++
++(define_insn_reservation "hip10a_asimd_sha1_and256" 4
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "crypto_sha1_slow,crypto_sha256_slow,\
++       crypto_sm4"))
++  "hip10a_fsu0+hip10a_fsu2")
++
++;; CRC extension.
++
++(define_insn_reservation "hip10a_crc" 2
++  (and (eq_attr "tune" "hip10a")
++       (eq_attr "type" "crc"))
++  "hip10a_alum012")
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 3eced16e3..2d906ee56 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -19220,7 +19220,7 @@ performance of the code.  Permissible values for this option are:
+ @samp{octeontx2}, @samp{octeontx2t98}, @samp{octeontx2t96}
+ @samp{octeontx2t93}, @samp{octeontx2f95}, @samp{octeontx2f95n},
+ @samp{octeontx2f95mm},
+-@samp{a64fx},@samp{hip09},@samp{hip10c},@samp{hip11}
++@samp{a64fx},@samp{hip09},@samp{hip10a},@samp{hip10c},@samp{hip11}
+ @samp{thunderx}, @samp{thunderxt88},
+ @samp{thunderxt88p1}, @samp{thunderxt81}, @samp{tsv110},
+ @samp{thunderxt83}, @samp{thunderx2t99}, @samp{thunderx3t110}, @samp{zeus},
+-- 
+2.25.1
+
diff --git a/0354-Fix-for-hip11-and-hip10c-addrcost_table.patch b/0354-Fix-for-hip11-and-hip10c-addrcost_table.patch
new file mode 100644
index 0000000000000000000000000000000000000000..efdf0e9509bb581e1cd8dfa0cf01f56faf1a40f8
--- /dev/null
+++ b/0354-Fix-for-hip11-and-hip10c-addrcost_table.patch
@@ -0,0 +1,34 @@
+From 62bbc7f631a49712903281ad85b62205780d8af7 Mon Sep 17 00:00:00 2001
+From: liyunfei 
+Date: Tue, 21 Jan 2025 15:16:50 +0800
+Subject: [PATCH] Fix for hip11 and hip10c addrcost_table
+
+---
+ gcc/config/aarch64/aarch64.cc | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index a6ef40a47..52ce7d905 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -535,6 +535,8 @@ static const struct cpu_addrcost_table hip10c_addrcost_table =
+     },
+   0, /* pre_modify  */
+   0, /* post_modify  */
++  0, /* post_modify_ld3_st3  */
++  0, /* post_modify_ld4_st4  */
+   0, /* register_offset  */
+   1, /* register_sextend  */
+   1, /* register_zextend  */
+@@ -551,6 +553,8 @@ static const struct cpu_addrcost_table hip11_addrcost_table =
+     },
+   0, /* pre_modify  */
+   0, /* post_modify  */
++  0, /* post_modify_ld3_st3  */
++  0, /* post_modify_ld4_st4  */
+   0, /* register_offset  */
+   1, /* register_sextend  */
+   1, /* register_zextend  */
+-- 
+2.25.1
+
diff --git a/0355-Fix-errors-in-ipa-struct-sfc-IBMY84-IBN2JO-IBN42Q.patch b/0355-Fix-errors-in-ipa-struct-sfc-IBMY84-IBN2JO-IBN42Q.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8be2b1ddda399ee4074158fe0218f92970be766b
--- /dev/null
+++ b/0355-Fix-errors-in-ipa-struct-sfc-IBMY84-IBN2JO-IBN42Q.patch
@@ -0,0 +1,154 @@
+From c2444a1259ac0f082f8ce8919a053bd1de504781 Mon Sep 17 00:00:00 2001
+From: huzife <634763349@qq.com>
+Date: Thu, 20 Feb 2025 14:52:19 +0800
+Subject: [PATCH] Fix errors in ipa-struct-sfc (IBMY84, IBN2JO, IBN42Q)
+
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      | 31 ++++++++++++++++---
+ .../gcc.dg/struct/sfc-shadow_non_integer.c    | 25 +++++++++++++++
+ gcc/testsuite/gcc.dg/struct/sfc_non_integer.c | 29 +++++++++++++++++
+ 3 files changed, 81 insertions(+), 4 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_non_integer.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc_non_integer.c
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index d3beebc00..f2660c952 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -4367,6 +4367,20 @@ ipa_struct_reorg::wholeaccess (tree expr, tree base,
+   if (!handled_type (TREE_TYPE (expr)))
+     return false;
+ 
++  if (!t || !t->type)
++    return false;
++
++  tree type = TYPE_MAIN_VARIANT (t->type);
++  if (TREE_CODE (expr) == MEM_REF
++      && POINTER_TYPE_P (TREE_TYPE (expr))
++      && POINTER_TYPE_P (accesstype)
++      && POINTER_TYPE_P (TREE_TYPE (accesstype))
++      && POINTER_TYPE_P (TREE_TYPE (base))
++      && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (base))) == type
++      && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (expr))) == type
++      && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (accesstype))) == type)
++    return false;
++
+   srtype *other_type = find_type (inner_type (TREE_TYPE (expr)));
+ 
+   if (t == other_type)
+@@ -5372,12 +5386,9 @@ ipa_struct_reorg::record_function (cgraph_node *node)
+   function *fn;
+   tree parm, var;
+   unsigned int i;
+-  srfunction *sfn;
++  srfunction *sfn = NULL;
+   escape_type escapes = does_not_escape;
+ 
+-  sfn = new srfunction (node);
+-  functions.safe_push (sfn);
+-
+   if (dump_file  && (dump_flags & TDF_DETAILS))
+     fprintf (dump_file,
+ 	     "\nRecording accesses and types from function: %s/%u\n",
+@@ -5395,6 +5406,9 @@ ipa_struct_reorg::record_function (cgraph_node *node)
+   if (!fn)
+     return sfn;
+ 
++  sfn = new srfunction (node);
++  functions.safe_push (sfn);
++
+   current_function = sfn;
+ 
+   if (DECL_PRESERVE_P (node->decl))
+@@ -8983,6 +8997,10 @@ ipa_struct_reorg::find_static_fc_fields (fc_type_info *info)
+ 
+   for (auto *srf : info->type->fields)
+     {
++      /* Avoid compressing non-integer type.  */
++      if (TREE_CODE (srf->fieldtype) != INTEGER_TYPE)
++	continue;
++
+       /* We have marked these fields as shadow, so skip them.  */
+       if (fc_fields_contains (info->static_fc_fields, srf->fielddecl))
+ 	continue;
+@@ -9034,6 +9052,11 @@ ipa_struct_reorg::find_shadow_fields (fc_type_info *info)
+   bool found_shadow = false;
+   for (auto *field_class : info->field_classes)
+     {
++      /* Avoid shadowing non-integer type, we can try to do this
++	 in the future.  */
++      if (TREE_CODE (field_class->fieldtype) != INTEGER_TYPE)
++	continue;
++
+       /* Field shadowing requires two or more fields.  */
+       if (field_class->size () < 2)
+ 	continue;
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_non_integer.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_non_integer.c
+new file mode 100644
+index 000000000..44769a2a1
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_non_integer.c
+@@ -0,0 +1,25 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    double a;
++    double b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i;
++        arcs[i].b = i;
++    }
++    printf("%f, %f\n", arcs[10].a, arcs[10].b);
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding static fc fields" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc_non_integer.c b/gcc/testsuite/gcc.dg/struct/sfc_non_integer.c
+new file mode 100644
+index 000000000..e76e30e70
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc_non_integer.c
+@@ -0,0 +1,29 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    double a;
++    float b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].b = 2;
++        arcs[i].b = 1.0;
++    }
++
++    for (int i = 0; i < MAX; i++) {
++        if (arcs[i].a < arcs[i].b)
++            abort ();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding static fc fields" "struct_reorg" } } */
+-- 
+2.33.0
+
diff --git a/0356-Avoid-doing-sfc-with-struct_split-and-compressing-de.patch b/0356-Avoid-doing-sfc-with-struct_split-and-compressing-de.patch
new file mode 100644
index 0000000000000000000000000000000000000000..66438ca5ef582130b8863f93b59c43fd35a1002c
--- /dev/null
+++ b/0356-Avoid-doing-sfc-with-struct_split-and-compressing-de.patch
@@ -0,0 +1,136 @@
+From 63b3db6e1f3e4aba90d76780fb64f6875c5dd2ab Mon Sep 17 00:00:00 2001
+From: huzife <634763349@qq.com>
+Date: Wed, 26 Feb 2025 01:24:45 +0800
+Subject: [PATCH] Avoid doing sfc with struct_split and compressing dead fields
+
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      | 13 ++++++--
+ .../gcc.dg/struct/sfc-shadow_dead_field.c     | 31 ++++++++++++++++++
+ gcc/testsuite/gcc.dg/struct/sfc_dead_field.c  | 32 +++++++++++++++++++
+ 3 files changed, 74 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_dead_field.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc_dead_field.c
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index f2660c952..fcc26d6a4 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -8970,7 +8970,8 @@ void
+ ipa_struct_reorg::classify_fields (fc_type_info *info)
+ {
+   for (auto *srf : info->type->fields)
+-    info->record_field_class (srf);
++    if (!srf->dead_field_p ())
++      info->record_field_class (srf);
+ 
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+@@ -8997,6 +8998,10 @@ ipa_struct_reorg::find_static_fc_fields (fc_type_info *info)
+ 
+   for (auto *srf : info->type->fields)
+     {
++      /* Skip dead field.  */
++      if (srf->dead_field_p ())
++	continue;
++
+       /* Avoid compressing non-integer type.  */
+       if (TREE_CODE (srf->fieldtype) != INTEGER_TYPE)
+ 	continue;
+@@ -10064,7 +10069,8 @@ ipa_struct_reorg::execute (unsigned int opt)
+ 	check_and_prune_struct_for_pointer_compression ();
+       if (opt >= SEMI_RELAYOUT)
+ 	check_and_prune_struct_for_semi_relayout ();
+-      if (flag_ipa_struct_sfc)
++      /* Avoid doing static field compression in STRUCT_SPLIT.  */
++      if (opt >= STRUCT_REORDER_FIELDS && flag_ipa_struct_sfc)
+ 	check_and_prune_struct_for_field_compression ();
+       ret = rewrite_functions ();
+     }
+@@ -10148,6 +10154,9 @@ public:
+     if (level >= STRUCT_REORDER_FIELDS)
+       ret = ipa_struct_reorg ().execute (level);
+ 
++    if (ret & TODO_remove_functions)
++      symtab->remove_unreachable_nodes (dump_file);
++
+     if (level >= COMPLETE_STRUCT_RELAYOUT)
+       {
+ 	/* Preserved for backward compatibility.
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_dead_field.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_dead_field.c
+new file mode 100644
+index 000000000..dbe8633cc
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_dead_field.c
+@@ -0,0 +1,31 @@
++/* { dg-do compile } */
++/* { dg-additional-options "-fipa-struct-reorg=3" } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = i;
++        arcs[i].b = i;
++    }
++
++    for (int i = 0; i < MAX; i++) {
++        if (arcs[i].b != i)
++            abort ();
++    }
++
++    return 0;
++}
++
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding static fc fields" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc_dead_field.c b/gcc/testsuite/gcc.dg/struct/sfc_dead_field.c
+new file mode 100644
+index 000000000..ccd8339ab
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc_dead_field.c
+@@ -0,0 +1,32 @@
++/* { dg-do compile } */
++/* { dg-additional-options "-fipa-struct-reorg=3" } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++    arc_t* arcs = (arc_t*)calloc(MAX, sizeof(arc_t));
++    for (int i = 0; i < MAX; i++) {
++        arcs[i].a = 10000;
++        arcs[i].b = 10;
++    }
++
++    for (int i = 0; i < MAX; i++) {
++        if (arcs[i].a != 10000)
++            abort ();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found a static compression field: a, max_value = 10000" "struct_reorg" } } */
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found a static compression field: b" "struct_reorg" { xfail *-*-* } } } */
++/* { dg-final { scan-ipa-dump "size : 2" "struct_reorg" } } */
+-- 
+2.33.0
+
diff --git a/0357-struct-reorg-disable-malloc-support-when-struct_layo.patch b/0357-struct-reorg-disable-malloc-support-when-struct_layo.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2904a9a099a2522f5627485e3bdb24934ff79ae2
--- /dev/null
+++ b/0357-struct-reorg-disable-malloc-support-when-struct_layo.patch
@@ -0,0 +1,137 @@
+From b10a7d82b6a87048481a17b2f49e01b4fc0e35c3 Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Sun, 9 Mar 2025 16:04:31 +0800
+Subject: [PATCH] [struct-reorg] disable malloc support when
+ struct_layout_optimize_level > 1
+
+The struct-reorg opt does not support malloc well,
+so disable it for stability when struct_layout_optimize_level
+greater than 1.
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      |  1 -
+ gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c     |  2 +-
+ .../gcc.dg/struct/rf_create_fields_bug.c      |  4 +--
+ gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c      |  4 +--
+ .../gcc.dg/struct/rf_rewrite_cond_more_cmp.c  |  6 ++--
+ .../gcc.dg/struct/sfc-shadow_malloc.c         | 28 -------------------
+ 6 files changed, 8 insertions(+), 37 deletions(-)
+ delete mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index fcc26d6a4..98ba8fb12 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -4041,7 +4041,6 @@ ipa_struct_reorg::handled_allocation_stmt (gimple *stmt)
+ {
+   if ((current_layout_opt_level & STRUCT_REORDER_FIELDS)
+       && (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)
+-	  || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC)
+ 	  || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)))
+     return true;
+   if ((current_layout_opt_level == COMPLETE_STRUCT_RELAYOUT
+diff --git a/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c b/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c
+index b91efe10f..3cb473663 100644
+--- a/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c
++++ b/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c
+@@ -47,7 +47,7 @@ arc_t **ap = NULL;
+ int
+ main ()
+ {
+-  ap = (arc_t**) malloc(MAX * sizeof(arc_t*));
++  ap = (arc_t**) calloc(MAX, sizeof(arc_t*));
+   (*ap)[0].id = 300;
+   return 0;
+ }
+diff --git a/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c b/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c
+index 7d7641f01..91ba80891 100644
+--- a/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c
++++ b/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c
+@@ -74,9 +74,9 @@ main()
+     {
+       abort ();
+     }
+-  ap = (arc_t**) malloc(MAX * sizeof(arc_t*));
++  ap = (arc_t**) calloc(MAX, sizeof(arc_t*));
+   (*ap)[0].id = 300;
+   return 0;
+ }
+ 
+-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */
+\ No newline at end of file
++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c
+index 4df79e4f0..9d396e39a 100644
+--- a/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c
++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c
+@@ -47,9 +47,9 @@ arc_t **ap = NULL;
+ int
+ main ()
+ {
+-  ap = (arc_t**) malloc(MAX * sizeof(arc_t*));
++  ap = (arc_t**) calloc(MAX, sizeof(arc_t*));
+   (*ap)[0].id = 300;
+   return 0;
+ }
+ 
+-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */
+\ No newline at end of file
++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c
+index 5ad206433..ca8333601 100644
+--- a/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c
++++ b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c
+@@ -44,8 +44,8 @@ struct arc
+ int
+ main()
+ {
+-  arc_p **ap = (arc_p**) malloc(1 * sizeof(arc_p*));
+-  arc_p **arcs_pointer_sorted = (arc_p**) malloc(1 * sizeof(arc_p*));
++  arc_p **ap = (arc_p**) calloc(1, sizeof(arc_p*));
++  arc_p **arcs_pointer_sorted = (arc_p**) calloc(1, sizeof(arc_p*));
+   arcs_pointer_sorted[0] = (arc_p*) calloc (1, sizeof(arc_p));
+ 
+   if (arcs_pointer_sorted >= ap)
+@@ -55,4 +55,4 @@ main()
+   return 0;
+ }
+ 
+-/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */
+\ No newline at end of file
++/* { dg-final { scan-ipa-dump "Number of structures to transform is 2" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c
+deleted file mode 100644
+index e709f7b25..000000000
+--- a/gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c
++++ /dev/null
+@@ -1,28 +0,0 @@
+-/* { dg-do compile } */
+-
+-#include 
+-#include 
+-
+-struct arc {
+-    unsigned long a;
+-    unsigned long b;
+-};
+-typedef struct arc arc_t;
+-
+-#define MAX 16
+-
+-int main() {
+-    arc_t* arcs = (arc_t*)malloc(MAX * sizeof(arc_t));
+-    for (int i = 0; i < MAX; i++) {
+-        arcs[i].a = 0;
+-    }
+-    for (int i = 0; i < MAX; i++) {
+-        arcs[i].a = i;
+-        arcs[i].b = i;
+-    }
+-    printf("%d, %d\n", arcs[10].a, arcs[10].b);
+-
+-    return 0;
+-}
+-
+-/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding shadow field" "struct_reorg" } } */
+-- 
+2.33.0
+
diff --git a/0358-struct-reorg-fix-residual-ssa_name-issue.patch b/0358-struct-reorg-fix-residual-ssa_name-issue.patch
new file mode 100644
index 0000000000000000000000000000000000000000..47b3f5c7cdbd7df3f7337859183a8daed634595a
--- /dev/null
+++ b/0358-struct-reorg-fix-residual-ssa_name-issue.patch
@@ -0,0 +1,41 @@
+From 7af6cfafb2be395e52aa2acbfdb82f73c48ede75 Mon Sep 17 00:00:00 2001
+From: huang-xiaoquan 
+Date: Sun, 9 Mar 2025 17:48:25 +0800
+Subject: [PATCH] [struct-reorg] fix residual ssa_name issue
+
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index fcc26d6a4..323b5e8ae 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -1695,6 +1695,7 @@ public:
+   hash_set  safe_functions;
+   auto_vec ext_func_types;
+   auto_vec_del fc_infos;
++  auto_vec  release_ssa_names;
+ 
+   bool done_recording;
+ 
+@@ -8391,6 +8392,7 @@ ipa_struct_reorg::rewrite_phi (gphi *phi)
+ 
+   gsi = gsi_for_phi (phi);
+   remove_phi_node (&gsi, false);
++  release_ssa_names.safe_push (gimple_phi_result (phi));
+ 
+   return true;
+ }
+@@ -8621,6 +8623,8 @@ ipa_struct_reorg::rewrite_functions (void)
+ 		}
+ 	    }
+ 	}
++      for (unsigned i = 0; i < release_ssa_names.length (); i++)
++	release_ssa_name (release_ssa_names[i]);
+ 
+       update_ssa (TODO_update_ssa_only_virtuals);
+ 
+-- 
+2.33.0
+
diff --git a/0359-tracer-static-Fix-divide-by-zero-error.patch b/0359-tracer-static-Fix-divide-by-zero-error.patch
new file mode 100644
index 0000000000000000000000000000000000000000..42420985a0980319f9cfddd2b4954c86acbf8f18
--- /dev/null
+++ b/0359-tracer-static-Fix-divide-by-zero-error.patch
@@ -0,0 +1,31 @@
+From 20ec64e203861ed4f2e52193d1a4b8f19af2be27 Mon Sep 17 00:00:00 2001
+From: Mingchuan Wu 
+Date: Mon, 10 Mar 2025 11:00:12 +0800
+Subject: [PATCH] [tracer-static] Fix divide-by-zero error. Fix divide-by-zero
+ error when using 'dump-tree-tracer'.
+
+---
+ gcc/tracer.cc | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/tracer.cc b/gcc/tracer.cc
+index 9b1578cd4..bc2da83ec 100644
+--- a/gcc/tracer.cc
++++ b/gcc/tracer.cc
+@@ -394,11 +394,11 @@ tail_duplicate (void)
+ 	  if (ignore_bb_p (bb))
+ 	    break;
+ 	}
+-      if (dump_file)
++      if (dump_file && (!flag_tracer_static || weighted_insns))
+ 	fprintf (dump_file, " covered now %.1f\n\n",
+ 		 traced_insns * 100.0 / weighted_insns);
+     }
+-  if (dump_file)
++  if (dump_file && (!flag_tracer_static || ninsns))
+     fprintf (dump_file, "Duplicated %i insns (%i%%)\n", nduplicated,
+ 	     nduplicated * 100 / ninsns);
+ 
+-- 
+2.48.1
+
diff --git a/0360-Struct-reorg-Re-enable-malloc-support-below-ptr_comp.patch b/0360-Struct-reorg-Re-enable-malloc-support-below-ptr_comp.patch
new file mode 100644
index 0000000000000000000000000000000000000000..dee1eb291cce52bfc849fd76961f295c7ffea880
--- /dev/null
+++ b/0360-Struct-reorg-Re-enable-malloc-support-below-ptr_comp.patch
@@ -0,0 +1,139 @@
+From e8b6cb5692c56a64507a5593533437663c88147d Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Fri, 14 Mar 2025 15:06:14 +0800
+Subject: [PATCH] [Struct-reorg] Re-enable malloc support below ptr_compression
+
+Since we found that completely disabling malloc support casued
+some expected scenarious to not be optimized properly, we have
+reopend malloc support below the pointer compression opt level.
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      |  8 +++--
+ gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c     |  2 +-
+ .../gcc.dg/struct/rf_create_fields_bug.c      |  2 +-
+ gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c      |  2 +-
+ .../gcc.dg/struct/rf_rewrite_cond_more_cmp.c  |  4 +--
+ .../gcc.dg/struct/sfc-shadow_malloc.c         | 29 +++++++++++++++++++
+ 6 files changed, 39 insertions(+), 8 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index bf88c02fd..b8a5f029c 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -4042,6 +4042,7 @@ ipa_struct_reorg::handled_allocation_stmt (gimple *stmt)
+ {
+   if ((current_layout_opt_level & STRUCT_REORDER_FIELDS)
+       && (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)
++	  || gimple_call_builtin_p (stmt, BUILT_IN_MALLOC)
+ 	  || gimple_call_builtin_p (stmt, BUILT_IN_CALLOC)))
+     return true;
+   if ((current_layout_opt_level == COMPLETE_STRUCT_RELAYOUT
+@@ -4850,12 +4851,13 @@ ipa_struct_reorg::check_alloc_num (gimple *stmt, srtype *type, bool ptrptr)
+       tree arg0 = gimple_call_arg (stmt, 0);
+       basic_block bb = gimple_bb (stmt);
+       cgraph_node *node = current_function->node;
+-      if (!ptrptr && current_layout_opt_level >= SEMI_RELAYOUT
++      if (!ptrptr && current_layout_opt_level >= POINTER_COMPRESSION_SAFE
+ 	  && gimple_call_builtin_p (stmt, BUILT_IN_MALLOC))
+ 	{
+ 	  /* Malloc is commonly used for allocations of
+-	  a single struct and semi-relayout will waste
+-	  a mess of memory, so we skip it.  */
++	  a single struct, it is no meaning to do pointer
++	  compression, and semi-relayout will waste a mess
++	  of memory, so we skip it.  */
+ 	  type->has_alloc_array = -4;
+ 	  return;
+ 	}
+diff --git a/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c b/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c
+index 3cb473663..b91efe10f 100644
+--- a/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c
++++ b/gcc/testsuite/gcc.dg/struct/dfe_ptr_ptr.c
+@@ -47,7 +47,7 @@ arc_t **ap = NULL;
+ int
+ main ()
+ {
+-  ap = (arc_t**) calloc(MAX, sizeof(arc_t*));
++  ap = (arc_t**) malloc(MAX * sizeof(arc_t*));
+   (*ap)[0].id = 300;
+   return 0;
+ }
+diff --git a/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c b/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c
+index 91ba80891..17ca1c7e1 100644
+--- a/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c
++++ b/gcc/testsuite/gcc.dg/struct/rf_create_fields_bug.c
+@@ -74,7 +74,7 @@ main()
+     {
+       abort ();
+     }
+-  ap = (arc_t**) calloc(MAX, sizeof(arc_t*));
++  ap = (arc_t**) malloc(MAX * sizeof(arc_t*));
+   (*ap)[0].id = 300;
+   return 0;
+ }
+diff --git a/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c
+index 9d396e39a..f38d94861 100644
+--- a/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c
++++ b/gcc/testsuite/gcc.dg/struct/rf_ptr_ptr.c
+@@ -47,7 +47,7 @@ arc_t **ap = NULL;
+ int
+ main ()
+ {
+-  ap = (arc_t**) calloc(MAX, sizeof(arc_t*));
++  ap = (arc_t**) malloc(MAX * sizeof(arc_t*));
+   (*ap)[0].id = 300;
+   return 0;
+ }
+diff --git a/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c
+index ca8333601..46dd55bbd 100644
+--- a/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c
++++ b/gcc/testsuite/gcc.dg/struct/rf_rewrite_cond_more_cmp.c
+@@ -44,8 +44,8 @@ struct arc
+ int
+ main()
+ {
+-  arc_p **ap = (arc_p**) calloc(1, sizeof(arc_p*));
+-  arc_p **arcs_pointer_sorted = (arc_p**) calloc(1, sizeof(arc_p*));
++  arc_p **ap = (arc_p**) malloc(1 * sizeof(arc_p*));
++  arc_p **arcs_pointer_sorted = (arc_p**) malloc(1 * sizeof(arc_p*));
+   arcs_pointer_sorted[0] = (arc_p*) calloc (1, sizeof(arc_p));
+ 
+   if (arcs_pointer_sorted >= ap)
+diff --git a/gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c b/gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c
+new file mode 100644
+index 000000000..92112df20
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sfc-shadow_malloc.c
+@@ -0,0 +1,29 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++#define MAX 16
++
++int main() {
++  arc_t* arcs = (arc_t*)malloc(MAX * sizeof(arc_t));
++  for (int i = 0; i < MAX; i++) {
++    arcs[i].a = 0;
++  }
++
++  for (int i = 0; i < MAX; i++) {
++    arcs[i].a = i;
++    arcs[i].b = i;
++  }
++  printf("%d, %d\n", arcs[10].a, arcs[10].b);
++
++ return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding shadow field" "struct_reorg" } } */
+-- 
+2.33.0
+
diff --git a/0361-Enhancing-BOLT-Optimization-with-AI.patch b/0361-Enhancing-BOLT-Optimization-with-AI.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8852f96dfeec6a2d51e500cad90010eaf4ba47d0
--- /dev/null
+++ b/0361-Enhancing-BOLT-Optimization-with-AI.patch
@@ -0,0 +1,72 @@
+From 3dd233c1a7b20de2182ae4e98909ddace6612a0a Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=99=88=E9=B8=BF?= 
+Date: Tue, 25 Feb 2025 16:32:39 +0800
+Subject: [PATCH] Enhancing BOLT Optimization with AI.
+
+---
+ gcc/ipa-hardware-detection.cc |  2 +-
+ gcc/onnx.fdata                |  2 +-
+ gcc/opts.cc                   | 13 ++++++++++++-
+ 3 files changed, 14 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/ipa-hardware-detection.cc b/gcc/ipa-hardware-detection.cc
+index 75b74aa03..6b36d685c 100644
+--- a/gcc/ipa-hardware-detection.cc
++++ b/gcc/ipa-hardware-detection.cc
+@@ -89,7 +89,7 @@ create_part_bb (basic_block last_bb, tree part_base)
+ 		     &gsi, PLUS_EXPR, unsigned_type_node, part_base,
+ 		     build_int_cst (unsigned_type_node, 4294963967));
+   gcond *cond = gimple_build_cond (LE_EXPR, part_cond,
+-				   build_int_cst (unsigned_type_node, 2),
++				   build_int_cst (unsigned_type_node, 128),
+ 				   NULL_TREE, NULL_TREE);
+   gimple_set_location (cond, input_location);
+   gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
+diff --git a/gcc/onnx.fdata b/gcc/onnx.fdata
+index 234b1a045..77f4d9b1d 100644
+--- a/gcc/onnx.fdata
++++ b/gcc/onnx.fdata
+@@ -1 +1 @@
+-316365613139376535626535626234666331363163303835336362393535613530636234643633626364386566396132333232373733633230393865663664633761393137633266616431663436343236613231663865636236346133616662623761373633663830623231393063616534633032316538626436633731643237666333386462313164333630303936336137323863313634613031393931613164363237643262353162376133643935373036306336346161376563383862613138666663393538363731333639396239666362393336373737643238636639643761343231346131333463353261623633343633343866663966663365346231356532663139306164303361383836396333393339616236383439363661313661303665643535633961666563613431303466333534346564633533373862323031396339626536613030383761623236663432633564653130353935353135313736656235373632373739343662663034343334633035626465356237633439313164313338373637383365326138366162363234323765393736616438656463343339613031316630643031613465386464326334383565343838366435313137313166383433396531626137353932616538333330653164326438656166343339363262366264326632376564396434396333356565343733383164363264633937356663663338666530336166316634623264393031393536333863383165616536656238346462656337333638323338646535303638363933646565616264363966356566323465346538613762623864303766646338666264643466666537303263623162326539653435643130313061386235623631306630636163303536343164663364383738353266386330376562343962393037306133383363326138393238376435613332353933663235313030326664366166373632343532613130323237303265373433623362623162633661633363303235613236383166313465396162353938363931613765316565313864313038cd68834331701041d1d21041f17c20432483a94386647e4157c8e33b5f3d5d3ec5275e3ea689863c435a0f3a76acd63d5d9b803b24467c3baf847c3b67b89e3b852a313b2127853900000000d58ac23b200ab53a000000807d3119bc22f7a63a81549f3b93b5013baee4a33b62c1153b9ae08b3a6929a33b20038f399475983b430ab53a73fc0b3a2daa0ebad595953bc2f1e0bb33e9ccbbb978d83a5e77a53b41e4c93adf10a73bdf36643ad7fd983a61e8d93bc04a283a30c072382f942c3b5b3cc73a4392e43a422b093c79bc61b9a5309e3b00000000757baa3a03d8a93c3c31e33af526ebbb000000006431d43a1d0ae73aa450783b8c57afb9b8eae939ec8fab3b9581d83920d7a1ba0fc1af38b6aece3ab50bafbbd50db63a26aba33bcdeda33b00d9493ac22dac3cf8c4233bc2966e3bdf1bca3a8fb4d13af9b0983b2cbda73bdae2aa3bc93bae3b39e1ba380857953be8e7a73b49e9df3b20b0233b9fe3d43a0dbcaa3bd10cf0b978eea53b761ebe3b0a50a23b70bd47b79a7720bc6cd4ee3ae0d0f93a9c333ebb5098dfbbbf8fa53b445efebac7b9993b6182b93aef267c3a4aa09e3b46d9a83b9f95983a379e913c6516123a1b2ebd3aaf943c3a0b90803becba92bce68f673be723253c5d7f813ad779613800000080af3c65ba6999743900000080957a003d82f2fe39baab4d3b7f348c39b8d3323b3c1e253ace952dbbc9d364bc3aafaf373d0a633be8fdee3968b0fa39eb70a83a7cba4e3bdf2407bc40f50f3d94f4c3b9a828573b3f2bc3b99a5763bcccb838bb24f011bae3400dbdc3074fba30a829bb3dde6e3ad7c2caba2b2aa7b8d479a7bbebe2603a7025583b00000000017414ba680386bc9b365e3aaacb03bc000000006afd90b9a64e263980eb223c80a48ebcca9703392310573b1fd419bbf7368abc17a2083a3ceafab95eb11cbcf29995b9a64264bc8bae403bc1dc6139631c88bc12e3373c07cf0c3cdc93a6b97edbc0b917754d3b5cdc143c61ef393b40a809baf3861dbbafce623be550513b828382bc359d513afa4a25ba31394c3bb013da3a9835553bf3d9553bec2b65bcee09bab9f6343e3c03a59f39fb11053a078e7cbc5bd006bcfe23363b08d12cbb3cfb533bb98a8fbadcb99139cbd1573b24725e3b01014fb6dcbc45ba6ee024bb318db1baf39ce9b952d625bc41afddb91d7dffbbc0ba163b0387b93b2594623b00000000f60cf9ba483c983b0000008015e6c6bcbd45983b77d62ebcfbb69f3b7b5752bcc334ab3b4f9806bc9d89063cc0675a3b807426bca81a9f3b7ef56f3b6a96a13a045937bcd4a2f33cb92173bc40af783b26ac40bc5fef6b3beba6fe3b8c7207bc5e25443bfd99a33be7e7403b4c2508bc0c87bb3bb95dcd3abe228b3bac03deb91a2ab03add753bbc000000002e04703be98f1fbccef2af3b17ebe93c0000000020e37a3b46ba913b1fd7003b1f3f133df85d423bacc843bc5fada7bbc8680d3d8423503b2afc6c3b4e43033dcfcc7c3bcece053cdbb44ebc4151823ba14426bc6e942c3b3bdc4d3a34967f3b7687783bd0cd3ebcfc75053ade324ebcd10c32bc9ff9fbbb0b7430bcf60e4abcd6b6e03b295db43b25c75d3b88334fbc8d95883ac9c73ebcddf941bc2b18083c43044c3b405414bd7617963b9910a03bd5e70c3d9356f23c3a2750bc472107bce47d47bc0125243b3c41953b0f6134bc8c403bbc8fb3873ba5e218bcae5d06bc2dfe103b758a493b43cef63cd7438d3c2bf1eb3b2d4a833cf13a43bc5d14c4bd000000002932a7bc3191e4bb00000080224e753dea87dfbb41e28a3cbeb44b3d731d8f3c1312d2bb54e44dbc232b84bc74f9d9bd033bcb3cdda410bbeeeb47bdd7e44e3b3c3e21bb435712bdb3e6413c82e770393f20a53cc6642dbc325484bc410c4e3dcb49823dc262bd3c204a563d032393bb0887753c0cad943d3946abbbcb77b3bc9151c6ba860dc0bd00000080e5880d3a2f960ebd1bba99bcce3910bd0000008037acde3be98a983bd60b7c3c66ee27bd2431aab98b2b95bded06813bc17429bdf5a9e9bc4ff297bafad924bdc14d53bc901784bcad96073cd34989bc84580fbd1e276b3ca48e513c189796bbe15f8cbb39fa473cce9c693cbdd0843a4f07443dbf40c03c38a1893c3790ab3cb48c58bcc5e9863b684448bcb5c32abc0726a6ba1def9ebb57ce273d772b84bc1925c63d2e26d8bc24460cbcb0f807bc8dd0a5bc9ba312bd6ed5393c32e1f43cf3c58bbc8a5334bc8e0c53bbf78cb13c7805793c8d5800bbc4a5c2bcfc2c85bba79d3c3df00f493db55cb73cce71c43dc030f03cc953823c79c1f13d614db73d0000000074e98f3ca415183c000000801104803d9afea83ddf9ff93d835f9bbce8d8623cd67e093c453d143d7d8c90bc1434e23d24580b3e00711d3c729b903d81a0253c82e9b53cba65123ca564a23d7a53003c2c82ec3de139f93c58f58ebce101813ba5782d3d4e198e3dbaa40fbb58e2bc3bbf92943c98421e3df32c0c3cbc235ebcc2fe443c2789033e00000080b94ca73be81815bd1758e53c5df7053b00000080f9f63f3cc7a9893cb846823c65d2143c9bb50e3cced60c3e92fb983b583593bbbbfe263e390bdf3b696887bbd13e823c207890bc1cf0c23cd688163dd14e16bdd3cb813c95a6593c70d7083cd6c6e43b6d4d9b3c9455683c876e1f3e599ff83c4b377f3c2afd953cbeedd43ccbdb163d2d78fd3bcc84363c5c7fa63c22fedf3c3318e83d0ecdba3d0ea690bc462e9a3d0b11013cf19f503da4f8813db249c0bba7300f3c2d6c223dd1d7663b56b4c43d56e5f93c4799e43b0702a73d4e15ae3de8040e3cdfad72bc0ab6593d1fb7c9bb6f90b43dbfcab83b4cd802bbbd3c993be1a91c3c8f677cbaa83420bb0000008084bf263b6336adba00000000373bd13b521cffba733ac83bee8c9bba1306f73bbf5471ba8651773bc863ac3a6ed119bb926fc43b9368e5ba34f319bba9c8ebbaa74acb3b39169ebc812d573b4764beba5815ea3b5211caba956ec23a9e107d3b64dbc4ba674ac73be88107bb5354493b688c5cbaaf4571bab3d6b3bae566603b11b0b0ba6bd1d03b000000005b6cc1ba0720833c5210c7ba85cd97bc000000003c1fc4bab35dbbba30b6fe3b389ea2bc97eb8eba37bae43b697a293b87969abc5c9e04bb83acc2ba5f8fadbcf872c5bab03daf3ad509fe3b2f81f9ba4317863cd808bb3b0177f03b02dabdbab2efbdba3b03d83bc09f223c6030ec3b0137c13b29f5663bf195ce3bc10eff3b18cda93b35486cba7dd7c8ba0d51003c34dc93ba891be33bb785ea3bbb75a73ae04f2abba21d8f3b9065c3ba8892bbbac37d96bc6a0c9dbc596cef3bce5a063bd64cec3be62fb0baaa5cbbba1acbd03b5cdfe13b0f37e9ba48bb653cc513733bc352a0bacba0ffba469ababdf17dae3c939271bd7af718be283c113ed15fbb3d00000080920e90badfa4d63d00000080df70d63c18f5c03dbbb677bde981e83d5a78d2bd0985093e663cffbd9a12803d3fc0b33d65b388bd50a0dd3d011acc3d2df0203ed04095bd5c9a8abd7294323dd404b33dabc5bdbd8042a33d93cb6f3d3b81f7bd4e2e823dfdba273d83ea863d7d0f3cbeaff2133e565a3d3d0d66ca3d035bea398af8073ec3b79abd00000000d078ad3dbe475c3c9267013e1db874bd000000809e1db03d23f9cb3d902b14be16ea52bdc41ac33db52abbbdf07981bed9442ebd4d94a83de0f7a93d745d41bd4cebb33d01f57c3da16adbbd8ae0b63d2d6c763cc1f40ebe098cbebddc59b83d256bb13d359fa9bdf886e5bd0dfcbbbdd68d0dbd726807be83579fbd05c9dcbdd8e8983ccd620d3edcf69f3dc980e8bdcb6c923e1cdcb3bdcc6ec1bde600823d030d993d9b3a1d3d420bcd3d754dde3df8132cbdfabb85bdce89c3bdefd054beafc6c8bd931e7c3d30fed83dbef795bd7fb1b3bd6d9eb73de344993cceb603bebd216b3de45c803d70a1953e1dfb62bea91d2a3ed284113f544008bfc50498be00000000d460e13b0d05b2be0000000066f28ebe9adb9ebe4ee6323efd01c7be63c7ac3e888dfabe44e5e23ec6f631bed07890beb0fe4a3e6217babee93daabecf501abfd1b9573e7bd53a3f805fe2be10a48cbecac09a3e52c67bbef48823be9a9dd83eb74340be75deeabd3fbf42be9d1c3c3f74e10abf923b05beaec4a4be66229fbbb35ff6be8a146c3e00000080638d84be04cb14be4dcfe7be53ec253f00000000208c89be7039a8bef1170c3f70ac0d3fdcd59bbe68f4973e6b1f903f16a3eb3e6d0a86be361a81be4cba023f82c88cbe01132ebea3feb73e79f391beeb6826be5130063fc4d9953e4a6690becd328abe13b7803efa6fc23e9823993efea5b83d60e0f43eff276c3ed453ba3e374b63bdd41502bf17e36dbe5034c53ea2ac9dbfff04903e64879e3e4c9d35bede8b6cbe435ed2be4e73abbe3020bfbef8c6e83e8630343ff9de9e3e78d65c3f3659a63eee6f35bed729b4be3954623ef8778f3ef4758fbe75fd4ebed9e3ed3e770a23be6b403ebed7e596bd656a093d6e463cbddfa103be2213f73d8a4c973d0000000050dc0bbb9d64b03d00000080d5e9283d3e429c3d1d0b3ebd35e5c03d101cacbd51b0e83dd890d5bd1378483dcc6a903d431d54bda0cab63d6376a63df1ab0a3e1a586bbd6bcfdbbd5cfb8d3db01a903dced598bdc300823d43fc3a3db75ccebdfea14a3d8f27fe3cf98c533de7d924bef762fc3df248103dde32a53d8d067e3a17bee53d5b6573bd00000080285b8b3d5cd0283d3447d93dd249c3bd00000000317f8d3db47da63df06ffebd933ca7bd3da29e3d0f9496bd7f1c6dbe2feb8abdd761863d8d3f883dc33d9abdecee903dd3ed453dc93cb4bd785f933d08c8393d5eb3f4bdef249bbd4ffb943d98be8e3d7b5487bd365bbebd103397bd19bdd2bcbc53e4bd9eb77cbd7d6fb5bdcc15713c4f00f03d54fc7e3d1c98c0bd05dd853e0dea8fbd5a2d9cbd35aa4b3d879a713df19b783d214da73d821bb73dc52889bdb707d4bdfd3d9ebdb6e53cbe1ee9a2bdc740443d3594b23de3046bbd4acb8fbdbf54943d23de5e3dbd36ddbddae5363d5155473de961aa3ed3a432be5d8e583e1d9a173f318110bfbc47aabe00000000a572363c9755c8be00000000c5235abe14fbb1be3f59573e57f8dbbe0adcc13e40d705bf1517f43eb1d467be07b3a2be40216f3eb6d9cfbe84b1bbbeba571cbf5c30843e96700c3f700eb6beee52a3bedcd6ab3eb39b93be9fd159be7524eb3efdd668beee2a1abe9dd171be0acf3d3f227e12bfbb312dbe3966bbbee3f99bbcec2f01bf2790883e0000008047ae9dbe59a438be26a4f9bedaf7f93e000000004b62a0bea849bdbe00ac123fa905d63e3e96b2bed43da93eef558d3f0c73b23e65bf96be67359abe8cc9c53e372ba4be184265becf66cb3edd11a7bed6d249be506f0d3f7429af3e0ba1a8bed2aca1becacc973e154ad73e9df5a93e53dd003e4cbd023fedc88d3e01e5cc3eff3eb3bd194d0abfa27090beb4a9d93ed0bf9dbf7c95a13e7bb5af3eb12a6bbeace488beb5419fbe86c8bebedea8d1beb735b03e6889073fb9f1b13e34835e3f7d7bb73ef8e961be08c5cabe6ff4833e956fa13e33e0a7befd926ebe6009fe3e8bc653be843b65be6a18a0bdd534f93c01fc52bd6b6f09be77e3fd3dc1c59f3d00000000e97d92bbf664b83d00000000972f193da896a43d65e754bd5e81c83d269cb4bd1d84ef3d1f11ddbd55385e3d020b993d4fdf69bd12a5be3ddd95ae3d62cb0d3e7ff87fbd7418c7bdf8ca803d1ed5983d13dea1bd480c8b3df3be503d2202d6bd540d5e3dc93e133d530d673d1b012bbe8f7c013eb79d243d9072ad3d772a273acfa6ec3d0ce383bd00000000fd56943de9119e3c0f64e03dd0cdb0bd00000080274f963deba6ae3d1c0a05be683297bd1f18a73d49b19fbdc64270be58657bbd263a8f3d374f913d63948bbda1b0993ddab55b3d7485bcbd00fc9b3df3edb03c892c00be9350a6bd25af9d3d968b973d79fe90bdeca4c9bdba46a0bddb1004bdbc84ebbde25788bd3ab0bdbda690963ccab6f63d60cc883d918ec8bd51ba873e9a3e99bdd01da5bde567613d50fd813d6981613d7163af3ddbdcbe3d473578bd8c07c0bd6924a7bd21c53fbe8ca6abbdaaf9573d158dba3d86c37fbd572399bd2a109d3d6994dc3c1e95e4bd4e1d4b3d7eca5a3d032771bdc8f5403d4b1a09bde53ceabdbfd4db3de573753d000000004909b9ba32b88f3d00000000c1c16c3d8b42803d994910bd8ba8a03dac6f8bbd482cca3db803b7bd3e900f3dbd44693d09b223bd2e39963db96c893d58fbf83dd2092ebd01b411be3149c43d891f633dd5b879bd93444b3d60e4033dfcc3aebd6c231b3de078bd3c57241d3dc16d17bed815e03d92dad63cc208853d82c1823a3acdc63d6d833ebd00000000ad06563de91bf63ca912bb3d854001be00000000941b5e3d5cd1873d744ce1bd91fce3bd57a17b3d804275bd64d465be11f8bdbdae6a583d0b73503dfc7bd1bd7155633d3e6a0c3db57194bd8eae6b3d6798093d58a9d7bdc3db71bdbd2b693d602c5f3d20c14fbd73e59cbdd53e77bdf28694bc1c71c5bd7f773ebd704a96bdd115383cf8ebd13df406403d27019fbde07f823eef7668bd4fcf7fbd6184123d9a113f3ddd37ad3dfe6a8a3dda459a3d3a30bbbdafb00cbe663c80bd0e3832be203886bdf262123d1972913df4a036bd978267bd99a5673d02f7283d6ae8bfbdd981033d3e82193df70172bdf1b33b3d2c8309bdf837ebbd4944dc3d875a763d000000001630b3baef2e903d000000008d6c663da8b4803d58ca10bd9221a13d74e08bbd7fa6ca3d338bb7bd700e103d71236a3dc13924bd00b2963d3de8893d2c6cf93d4ab12ebd894d11be9e58c13d81f0633dae8e7abd69054c3d5b51043d5d42afbdc1b21b3db199bd3cdab61d3d61aa17be7e84e03d2c57d73c907b853d18e27a3ac747c73d073e3fbd0000000013d0563d4ba4ec3cf98ebb3db3d400be0000000068ea5e3df445883d2b22e2bded78e2bd97817c3d232376bdb02f66becefebbbd053d593d6b38513d5d50cfbdab27643d1ce40c3d1ce994bdc0866c3dc167043d7161d8bd9abb72bd8c016a3ddcfa5f3d6e8650bdf66e9dbda51c78bd8b6494bc14dcc5bd07203fbd87b396bd8478363cab61d23dd8be403d137e9fbda19d833edb4969bdf35780bd9506133d4ac83f3d4157a93d2be18a3d44c19a3db553b9bd422c0cbe29ae80bd8b1f32be2ab086bd7fe6123d5ce9913d1b4c37bd253c68bdf37a683dda1d253d1866c0bd1fef033d630f1a3d55a78ebd9363e43c877813bdf06300be01e5f03d82788b3d00000000c3734fba035aa53d00000000e511873d7b68913d4e6e1cbd7718b73d813fa9bdcc68e13d5460cdbdbca5213daa5f843dade935bd383bac3d31919b3d2bf8073e69c848bd95ef2cbe5d65d83dba17833d49ee93bddd1f693de61c123dab7ac5bdd1dd2d3d5cf9643c6e50333d290923bebe38f63d2111e33cba73993d0a4b1c3aff26de3de00c5abd0000008092d2793d9b92fc3c1457d13de3451bbe00000080993e803dcd659b3d9082f7bd691808bef0f6913d51b390bdbab66cbe5acce9bda06e743dd62e733d250efbbd92ab833d46cc1d3d0c53b4bdea20873d8341113dbc72edbda26d8cbd369a873d164c813d395d75bdbe4ab3bd341392bd2d3f81bc05cadcbd939c5ebdc34eb6bd0175de3b5116e93d1b04603d00c2c2bda59a863e93b488bd09f997bda483253d75f4573dfd1ad83d23ed9c3d4bbfad3dc5f9e5bdf05c27be036399bd72e93bbe8204a0bd71d8243d7988a73d352350bdb95188bdcbd0863df73d3e3d0cbed5bd59aa133d40402b3d52f370bd85b63b3d0fd708bd723aeabdcdb1db3d87df743d000000806cedb3bad56b8f3d00000000d46d663d35e87f3d67e50fbd8859a03da22e8bbd43f6c93d37deb6bd811c0f3daeb1683d765223bdb9ef953d1c24893da7e7f83d15a22dbdd3ac11bee609c13d9f83623d703979bd21b74a3d7b7b033def92aebd70b81a3d730bbd3c5cbd1c3d0ecd17bef7f0df3d8140d63cd8b9843d1d757e3ac997c63d11023ebd00000000937d553d0e57ef3c0edaba3dc82f01be000000807c885d3ded82873d247ae1bd3436e3bd170e7b3d5ab774bd32eb68bef817bdbd6edc573ddbec4f3d4721d1bde3c1623d5ffa0b3df23994bd8a176b3d63e0053deec1d7bd7a5971bd5899683d57945e3d72434fbd92a59cbd4c9e76bde50b94bcc364c5bdab1a3ebd841a96bd88b5363cb8bbd13d548a3f3dd1e69ebd72ce833edbe867bd6f517fbda50d123dc88a3e3d8f2ba93dd51b8a3d83fc993d10a3babddc870cbe05e47fbdc04732be17f885bdfdfe113d7b27913dd82436bd100667bddb16673d808c263dbfbcbfbdfa2c033d6118193dc7b077bde7d4433ddcae0dbdc3a7efbd20dfe03dbb5f7c3d00000000603dbdbab68b933d00000080e852703d98cf833d651615bd4fbfa43dce3a8fbdd0edce3d2297bbbdfe4c143dbdf46f3de7d728bd892b9a3d092a8d3d635efe3dce9033bdcff113be186ac83d49a3693d124780bd8e56513d255a083d402eb3bde733203df262c43c6343223d04aa1abe2d29e53d0392de3cb1a9883d8ea7833ab384cb3d0e5744bd00000000d2555c3d8ed7f63caaa1bf3d462903be000000801f8e643d88808b3d81a9e6bd24a3e8bd4854813dee347cbdeafb6bbe1474c1bd62c75e3d92a6563d5559d5bda6e2693dbb14113dd06d98bd8065723d3e040a3d071cddbdc1d478bd83d76f3d8d9e653d47f855bde307a1bdaf257ebdce119abce91fcabd9f3a44bdee319abd37ff3e3ccecad63d6be6453d101fa3bd177f853e88256fbdab7483bd6e52173d12e6443da4ecaf3d32268e3d324b9e3d0603bfbd44ea0ebe44cb83bdad6235be57ee89bd933e173da14e953dd84a3cbd7c026ebd384d6e3d9d5f2b3de293c4bd3104083dbe891e3d97b16fbd8d903a3dc1e207bd7219eabdc854db3df7f4733d00000000e850b1ba61f98e3d000000005023653d9b007f3dc20d0fbd7beb9f3dfab88abdd293c93d1e74b6bdb4490e3d3bc7673d755122bd3e7e953d89af883dc493f83d8dbc2cbdcd0711be5457c03d389b613d165078bd9bcf493d41ae023d0226aebd2ddc193d9366bb3c94df1b3d008d17be2995df3d3dbcd43c3446843d9367763ad233c63da91b3dbd000000805b93543d7659ea3c8b72ba3d3a4100be00000080009f5c3dc40f873d922ce1bd32cce0bdcc237a3df4cb73bd988d68bef1ddbabdc7f2563d12034f3d497ccebd01d8613dde280b3d17c693bdf72d6a3dd434033d60b6d7bdb06b70bddbae673d96ab5d3d24584ebdc2339cbdb0b675bd189c92bc81e0c4bd3e2b3dbdbd8e95bd1ac1343c405bd13dbca23e3d396f9ebda2b7833e68ff66bd3d687ebd9a39113d03a53d3df37fa83d74a9893df58c993d4e6db8bdf6030cbedcfa7ebd142532be818285bd7426113d02b5903d4d4035bd28f265bdd02b663da385233d2056bfbd115b023db03c183db67f6bbd2238373dd9f904bd22b7e6bd6a08d83dee556f3d00000000ade0acba75698c3d00000000f81e613d4b3c7a3de7e50bbd952b9d3da23788bdd76dc63d2d7cb3bd59220b3d7e4f633d7cf91ebde1db923d2333863dd90ef53dfb1b29bd309d0fbe4f9dbd3d9c365d3d57a273bd67be453d2576ff3c0b43abbde685163d9d25b73c9e81183dc2bb15be5940dc3dd0c4cf3cd1d5813da770743ad315c33df03939bd00000080845d503d6a53e83ccd6fb73da62cfebd00000080394c583db596843d6201debd1f8fdfbd2971753d652e6fbd538966be80adb9bdc0b3523dd9e04a3ddf6dcdbdbf735d3d780f083d5d2a91bd6ead653d6e14023d8e3fd4bd8fda6bbd9f37633dc054593d64394abd3a8099bd5d0f71bd21308fbccde4c1bdbb5139bd100693bdb242303c8a24ce3d22bc3a3dc7bb9bbdb39e823e728962bd89a779bd5f050e3d61c0393db4f7a53d2828873d21de963dd6f4b6bd38820abe1b387abd071b30be521083bd6bf50d3d5f208e3d217c31bd92a961bd07ba613dbd25223d3f48bcbd38d9fe3c41ed143dd523d63e2f30a6be5ce7723ed5b3503f886143bf5a97d9be00000000c1361f3cccf6febe00000000c6a7ccbe1e6ae3be53717f3e548d0ebf056cf73e199933bf669a223feb077ebe93bfcebea700913e864805bf36c6f3be43625dbfe4309a3e1cd6843f05862bbfa03fc9be5676dd3e6810b4be955869be71381b3f575a89be84a727be4f278bbe140b873fb12947bf300e3ebed7eaebbe6f18e1bb9c9930bf87c0a83e00000080a1a9bdbe755354be942526bf6d1c6c3f0000008047d1c4be18e0f0be20e6483f35e1493fbe1adfbe1a73d93e563bcf3fc2fb273f6dc3bfbe63b5b8bec4753a3f2078c9be5a7578bef4c2033f06e3d0beea966dbe5368403f7778d63e0baccebe57bfc5be121fb83eee420b3f4424db3e763c033e51892f3ffbd8a83e686e053fdedaa1bd5d843abfda1faabe45450d3f5488eabf600cce3e4ce3e23ed6a181be523aa9be643b16bf1a7ff5be76e208bf7cdd253f4b25803f6a67e33e789a9e3fe61fee3e8b9781bea90601bfbac0a13ead42cd3e5d54cdbe8fe093be127f2a3fccd368be1ee887be106a0c3eeee1893e82029f3d5725303e5642663f3d48ee3e04d172bdd485bc3d3547ff3ebf8421bebc6bdd3e023dcb3e23feb53e08b1363fd7ad833e92133b3f3091903e0fafc13f51adf03e4b2e0c3e6612e43e92e1dd3e0d091e3fb063833e89ea713e2698003ff850313fb951543ede510b3feed7bd3ff97d843ee29fd73e0161653eeff1ad3e7755773e09023e3ffd34bd3ebdd90e3f0fa2503eee46033feb2b833eef03febd8727d83e8ae5a83f665d0a3f17b35d3ed825d5bba70bf43e5126033f1900253e8a569a3e88b7ba3e7c70703eb6557e3ec9ba7d3e7c13ae3e5830d03ecc7b683e5adeee3ec9c7b83f2190693e9937bc3e9a92b03f24c51b3e101df53d7e77e33eff50233fd3666b3edf57163eee32983e5416253eee1e513e11fe1f3ecdef4e3ef340053e45e4273f99bccd3eb76e623e886d9e3f8fbd5a3eb44f543e27dbc33fd1e4d63eea1a123f46521a3f75a2083fdbf8533e77cf6c3e0bdb8a3eec4fc83e2d98653ec5310c3f5ec9ea3e3bf2513e3fb13f3e7277c13edbe3bf3ea6c69e3ea869d03eee4ba73eb83d76be0000803f
+\ No newline at end of file
++656137356462666463346364333361623035396139323366643262383764363763323530613631653861653634666630333030316562323662346133633566313233326432366139383465346338376266393132363438333765656463366235613461313434346139333334396265306163333731646537376430643834323664623863366163343363643130313435636565623834363361316133393230363937653835653762353534626439663133633538623062353439646237616630333237666136663433386334626639643465303163653832333062643863333664336630376231643964316231663933656333386338656262303734376137313565643963396535653131303763646533393234333735613333633132353061393531333935623539643834373266303861633739373862366663376365383233326139383939363566373061373361613939336537366631353334346563313061373365663635633332663437653136383235343635623234366430373330366336363237623962656465373233346131343264313137653838643334616430346339363732613237623866636364313232613934343261643231386531356430343965303330306332326266336634626163333461643139653962326566303064343333623037313762303934626336363537616339343637306633633066333231613063623339333539376461316632653234353938616133616463623534356232346135666261616339646638373031356633306161626465643665633066616264623965656138613233353331303236363565616133323131653935643363353832366663633434626236376663386335666364333530336433353234383031636264353761616638663031613263353738326438656265623236653338323232386565626464393034633962373835363264656664616439353336623462376139333134656662373033626135336138333136643032636430653334303861616439333736306363383862306439623962646435383931613161653334623666313236366366373962356536656434396231303338646265323666386461366430396262353536313433636132653466623061346164303635636162336536383062306637306438626232646636393462353563366437386531316463383239373361643230643566333736663330656538643461313161306163666361663064383962373736636162323565383865336630333461343939336231366437386265323439626332336166376262623837353163376533353066636339313233323761613766333633343432623331373530376432376534623831333339383964633439653966303663303439623739346133626330646333373831393930316439326233646565623761646664356230336233363230383833616266643463626536336133346664656433373630343738343262373863646131373633653939636430616439393731306435313664323166313530636464393664613738643461356437333564343036316262353462313336663335313763323661363564343330623965363866616534323163616337613964356465333333663739313835623363316462346539643539636435303166306664366135313063333630336531336532346134306234626632313565333739356139613430643630613834353666623762363363326431316538313730316132333165383561666434623564623831616263316464353664323731373332656430323435663836393162646335646164333437366436663633353630373762316161316436333461393763343130396235396237333534376639343063316463386432316330663138373338386632643361386565636665653766643836373930326664346163333162613163396664663531626161326134333762666330393261316637333265326138663036363736333734326230373730613665323665333266326265356534376133373330393466336639623431383863383433643563346265613561326134616334626262646637666163376365333962323036323537333737313534313066323364623937303534623665623237306566636366623763623431373666383236383134326365613136653238623231646339353938373030656631646238653961643434653765313834316231346231323563636633356131376538636666613866303638323362643436636638353665346166323633623861616639363862613835306234643961666662656334393366613066653061383333653965343633653837613263636530633863303861313363636361346132613431363962336539613463313366633934633761363761623039396365393263633638656134376162616631363838616238613137323032663864613035313363353335396432373530363233663234343136346339663435333834656630366537353336306137366434646264303466303633663630386363636337383066316631323836353135393134646139373035323637346164303965323536666335393864636364346433383564616435383862366464326130336536313934636139386438373462333162366230623165323533336263313430386430643661386261633061313631613639313232313734383136336464636231323130336231613131373336636238656635353635626666303535663331363332353338313363396334313631333034656133666365353561643830356565633137346638643739636136376432303761633436666465336232356236653164353163346461656165333038653266336161383966633961323462376262386430363536323932373263343731313562356139336265383765336139643837333966623265386131666561366161333138353261663139303338393733346335313934363761393137633266616431663436343236613231663865636236346133616662623761373633663830623231393063616534633032316538626436633731643261313866666339353836373133363939623966636239333637373764323863663964376134323134613133346335326162363334363334386666396666336534646565616264363966356566323465346538613762623864303766646338666264643466666537303263623162326539653435643130313061386235623631306232303139633962653661303038376162323666343263356465313035393535313531373665623537363237373934366266303434333463303562646535623762313565326631393061643033613838363963333933396162363834393636613136613036656435356339616665636134313034663335343465646335333738663063616330353634316466336438373835326638633037656234396239303730613338336332613839323837643561333235393366323531303032666436616435313137313166383433396531626137353932616538333330653164326438656166343339363262366264326632376564396434396333356565343733383137666333386462313164333630303936336137323863313634613031393931613164363237643262353162376133643935373036306336346161376563383862316365613139376535626535626234666331363163303835336362393535613530636234643633626364386566396132333232373733633230393865663664636334393131643133383736373833653261383661623632343237653937366164386564633433396130313166306430316134653864643263343835653438383687144d4395a52c41e3842b41b03f3743f207264565084e41e0fbdf3bd429b13da15eb33d7e47b93b309daa3890d2d93d0000803f2bd5bbb95024b1399db4d33b2956c2392e99b93969b868ba90d4eb3bd611a9b92d66c739a2115b3b1257b6b9ed46b2b917deac3b5b0d2a3c14887e3ce74b3eba00000000386ea33945cf5abb9cbce9ba3c4ff0ba5cc05b3bb4bc9eb91e09b03a6113e9ba3e17b539267dab397fdea9b906eea5397713973bfc7a2c3b51ff733b50c313bb000000003ea5d239842c8c3b6d275e3af5c58b394139b2391f56403bf47e56bb357a81bbc956923b57d2cc39909aaf3944807e3c4a78cbba9938a9b9d84ba7b99796bcb9d7bea5b925e37a39ff19a73ba25dc1397357b1b9c13cd6bb2d36423b6da35a3b90e500bbd402a0b95f930bbbbddfaeb9071fa9395281a6b934e3a23b0f238a3b93e1973964d9a9b98b2f273ceb4a9aba3ccab6b9e3ba033b48220ebb19fcaf3b6290a93b034407bb782fa1b89335943bb388a7b9655cc0b96a361d3c67b9b0b99e28cf39076f2dbaf05afdbafa6cb43962bc2e3bd590bebc30339f3b4476a23963e6663bd7ad183ca5c00cbb8a69c13956f574ba97dcf93b97098d3bd471ae3bcbb06dbbfddca9b9d7c803bc3eb703bc9bdaa8bb1c6718bc8e5bcebb224c013bcb7684bb4d85943b2ef403bcc090893bf736aa3beb62a13b3c1cb6bbb821a7bbaee808bbfe3dd33c0000008027b0f5bbd6e09d3ce2db183d576fb7bb3b9761bc4111813be54cbcbbab65043c93ce1cbce0b3c6bbca4e953bb347fabbc466ff3a88f532bc11bf4c3b72636dbc00000080850afebbaaba5e3b476f973c7ae755bbde44dbbbfac1633bab7793bbf3ae90bb010a84bb3a1c2cbc6597d6badcd277bbddbbfe3b5898933b6063903b0d8fb33b028d8e3b2adcdcba9f1585bb2e91b9bb582aa03b6334f8bb6e31333b5569253b1e71533b8e977e3bcaf945bba6b59c3b603ab2bb02c88e3b0ae4493bfc457ebb2ae3ddbbe8f9943b0c38a4bb160ed73b73adab3bb6100e3bf28fd83baa70603bad2683bb9d90033c52703a3c9546693b7d34903b63a6b83bf1af9cbb3b19a23bedd0f8bb58efb13b105b72bb17300abc025488bb0a5b543e33573a3b24eff2bb286f75bb5d55a9bbd8e4803beea116bca9cc2d3b1e1205bc1adf75bbfcaa413bf8b491bb3050903b6537a83c1736213cc6271abaf7b6293c98bae33be7b710bb07e333ba55389dbb41441f3c8bf89abbb9acb6bbdeb9acbb416e053be7a63fba124e36bc396d97bb000000809a5a083c15d242bca07d0abca36bba3c09ac8e3cf4ae85bb51f5cd3b8abb18bccc0f383c4a58dc3b335a9ebba65c0b3c2324d9bb35bbb83c748069bba62961bc000000003e13193c863f71bb752b1cbc578e6b3b5807f33b633e9abb9e8dac3c78dcb03c7b35193ac5df483cca37e83ad74a50bcbfd529bc33469cbb786498bbafc6c1bbe0f895bba7a7ed3a8ef7a339cb5ccd3be140abbbc2041a3ce9e950bb828f5ebbd63a6fbbdd1d85bb9d07a93cdc24a7bb765ac53b467b96bbe863b4bb083ea73a7869f83b4cf49dbb874003ba2440dfbb356db8bbf63d2abb79b4d5bb3b1bafbb27487139a45115bc45e7a8bcf34775bbfb9e98bb68e4c7bb1e930fb736e8acbb6c4b753cce27b5bb830ba83c91ce1b3cdc1def3ae198133ef5e688bb6b67073cdd1ac53a27b589ba81469fbbe150293c3c2884bba35bdd3b1583ba3ac3a194bb3debb13c495799bb078c2ebc384b7d3c97a8493cebb6853c73bb4d3c024dd83c696ca13b1cc702bc6847243ce3ac5dbcfa771abc96221fbc6147fabb3641023cb496023d28f266bb00000000bf65693ccfff0d3cbf68a8bb3b268bbc15de51bca098e1bb9c5980bdec61ee3c66cf983c1024423c51e0febb12336c3c25bd1c3dcebf50bc88eb4cbc0eaea73c00000000c56c163c597c5fbcaa7b593cb7d2e33b7b235a3c0c2447bc95c9edbba8320abc75b98b3cb7d19a3cf51d623bcd0a023d7e91ef3c725604bc91fafdbbdf1f21bc2f3bfcbb274a493b7c78393cdb0e213c01a719bc0a0368bd6ad23cbc210741bc0288df3c6303d1bb8c93bbbb97420fbcbe16353cc8bc18bcf9a61c3d62882a3c8a25683cff2e01bcb0580e3c5b11f33c47dd10bc5b522abc4230df3c6ca21c3d13c0103c8129e33c199e5c3c05752dbc9515f0bba0b724bcac0b383c1f2b12bcbe768d3c80baf23cd6fbc9bb1b04843c84da173cae2dbabc64e63dbc08d8613cbd872b3c029c2c3ce63be03c5e41853c608888bd90f692bd68bf2c3cd0f843bc4497bcbb3161e4bb4ede983c42e72bbc0283dd3b176925bcd6d71bbc7ba42cbb911a6a3dadca0b3c68582fbcc664f23c629c1b3c4f33133cd046413c7eb83c3ddbe0d1baf307ad3b00000080101215bcc0708e3ca925dd3b7899f33cfdb1493dc7bbf43bfb8b5dbb6ee9d2bb584924bcb3cf0abc0b6c0c3ce93e14bccd51943a3302dd3acfabb03ca9c22cbc000000005cb22abcaef7f33cdf6aae3ba949d6bb1ef214bc878db23c3863783b62c5363b67d0eb3c5e192dbc75b57cbb3d43ecbae55cd1bb0cd10a3c00d4083c8efd223cccb5063c1b2d70bb3abcbd3c883c14bc0f01133cee04963c6d7bad3c7480353cba707fbbc16cf63bcdd4a939ad1f123cde2f0cbc2c76053c59ff09bb009bba3cb24a0cbca7750b3c64f9493d982287bbbe111e3c7f48483cd59b5bbbea43e1baf906243d99f3e1bbbd01863c4f95d73c25a8073cfa81273c42548f3cef4b153c800841bc318aa9bb7c98873a55c218bc1338373dfa26bb3c22f2c53c6bbc0dbcc7900e3d0819da3ccec359bb9a671fbcfee714bcbb3f74bb8851ce3c1c76ac3cd7fef039437e0a3ccb42bcba11ea103a388af63b94700f3a4ef6293ad86816b900078b3bd1c62cba4ee1213a7a4b2a3bb98b30ba291c30ba4d97813b71c3033c9815a33b627549ba000000804646103a39a742bb747c0dbbe3ff41bb1186043b9aad27ba947610bad46e52bac8a9053acb9e223a86122dbafce80f3ae1144cba610bc73a34b4543be4f2bbbb00000000dbcd253ac4727d3bcf8cf33a44811d3ac114213ab973fd3a78684ebbfd3384bb29a1ed3a4a70063a11fb103a3f3aa83b629d2aba74f52cba6d1d2cba7aad31bab36e2bba566e013a79481d3b5c663c3a09cf2fba731f29bcc28e1c3b7067313b8bbaa6b9454d28ba15bab6bad4e82eba107a263a51992bbabaa220babbdc023bfd62103a28172dba1db3003cd7ef11b9c15330bae79bbc3a25ae20ba21b1feb95d37183be0206fba67882bbb784a883bba292cba034132ba4df5f43b60222fbaf5b10a3a75e20a3996f8f1bad29f0e3adb20b8397d2257bc2faa893ba3d90f3ab059bf3abba4db3b7757cdb9cde80d3a934d85bb4b8be639fadd0e3b2bb99b3b6dc792bb95cf2dbae68660be8b1d7f3e2632363e6a588c3e0dc7453e8a4979bd1d81593e87b001beae177e3e4f7449be4d9119be437e10bed7162dbe5e43333e3d94acbe80726ebd000000008bb1683edfc6573ebafeb0bdda2e62be36b673bed771d6bd2c5f453e93cd52be9a55963e74b73f3e90e102be14af6c3e2db5afbe2920f4bec3a4fabdc533b53e00000080be5a753e34650dbe4d70473f2edcce3d45ca513ebb4615be2b554bbf9d8900bf1c142f3e7350a23efc363f3dce2ea5bed70f5dbe30da00be4a45fabd5c2c24bebf70f5bdec31433dac40363ef82f333e4d140fbe16dca83e1aabccbd39e8c2bd7f85c2bd7d4bd7bd62b629bf3e2d0bbe0a0a2d3edcbcf6bd11c8c3be4ca7123e92f7543ea87e02be0c1d2d3e5f362abebc491bbe7f4c95bd614426bee96ed8bed1ba3c3e666850bee6ddd53eda2819be1c5efbbd970e2abed2e41c3e043410be6a7d983ecb6910be90d301bf8248823e5c5f1f3e50043c3ed31cfbbdb4c6663e1b1d083e27e14b3eee2aecbd72008c3eb518e03e444e783e3fe10a3e59940dbe1c03e6be9f11fdbd845eda3dbbd702beb486b7bdce1210be4662cabd997aff3ca8acd8bd5d06843d914d02beb963c83d9ca89c3d954a933d7154ab3da863b4bd1ba5283ef949f03c000000004c7ceebddd4ad1bdb34f333d5226db3d8e4ee83d5ec3593d8c34cabd488ed83de8691abe0e1ec4bd1041853dd79cf2bd503b2f3e7b316f3e4cdf7b3d0f623abe000000807d95fbbd06008d3ddcd1c6be1f1152bd17cdd6bd0a1d983d8d68d03ea043833eba4ab0bdf7d026be8359c0bc3967213e041ce33d3e29833d14a97e3d139ca73da1ae793d8060c4bc8356b7bde92cb7bd2ed5913d1c2d2abe50a24f3ddcf4463d94d5473d83a45a3dfce4b13ed5ce8d3dd2d1b0bdde047b3d6462433e65aa94bdaa15dabdfbda843ddf6caebd139fae3db36f9e3d67e8183d6bf4aa3d48e8573e985cbdbdc40fd63dbacb50be5309983d43cb7f3ddaafad3d4ad59ebd6bfe923d7fa51cbe58b4933de72d873e13a905bede95a0bd7d6cbabdcc197e3d5e80ecbd63418abd023cccbd87c7723da8b70fbe2a2867be97f7febde8028dbd3f9c8e3d8e4a6b3e19c6803d64c364be492c7e3ea28b373e97c28b3e8750453edc6c7ebdc7ad5a3ebebd01bead267d3ef16349beeb7919beaf7510be7b8e2dbe0fc8333e5bb5a0bea8de6ebd00000080e2f0673e49ed5b3e0ff5b0bdadf362be411c79be6dc5d6bd38f5473e5cfe52be16a8953ea54c3f3efbec02bec1e56b3ea8e6a6beceeefcbe360af8bde438b13e00000000fe7f743e219a0bbe578b503f0c26cf3d7a3a513e365516be9dbc4bbfd3f500bf66902f3e5686a13efa6a403dd3e599be7b1e5dbe9ce800be3b6cfabddc0224be459ef5bd6a68443da0f7363ee9db323ee40d0fbe9c41a63e8c16cbbd8073c2bdc758c5bd759dd7bd9bd829bf9d2c0bbe6ac32c3e52e9f6bd1dcdb9be9ba9123eaa61543eaa8a02beaf7d2d3ebbb72abe432f1bbe509694bd083127be2a22cdbec25e3d3e8c9950be2984e23e170217be3d83fbbd95da29be504f1d3ebd2b10bec5ca973e92d110be045402bf55c9813e1ceb1f3ed27c403ef27ffabd3b0a663e5ed4073e34104d3ecfe2eebde06a8b3e4098ea3e7f7e7c3ea2ba0a3ead3e0dbe56e6e7be4d34fdbda585d93d565202be9adfb6bdd9830fbe879ac9bde754003dfad4d7bd69c8833d9cc801bea9a2c73def389c3ddaed923dddc2aa3d3cc1b3bdc90c283ebfadf13c00000000f887edbd1c7dd0bd6f8b333d324cda3da364e73d3ead593d736fc9bd35b7d73d87d519bedf5fc3bd5c00853d67a4f1bd58a12e3e929a6e3ecf6f7b3da3c539be000000006f93fabd0aa88c3d88b0c6bebaf651bd98f4d5bd99ae973df84dd03e3efc823e46afafbd813726bec6c4c1bc63d1203e0f38e23d56ed823d85417e3dfb17a73ddb51793d9acec5bc5aafb6bd5682b6bd407b913d899329be5d954f3dedfb463d3dd9473dd18b5a3d75b6b13ebc7c8d3d7832b0bdc9a57a3d02c6423e454294bde339d9bd419b843d8dd4adbdce06ae3d31fc9d3d5952193d8662aa3d264d573eafabbcbdf03bd53d532f50be229b973dad607f3dfe20ad3d86599ebd9fa1923d940f1cbe814e933d2fe8863eda2105be8016a0bd11c0b9bd30a57d3d8f8eebbd6eee89bd1e75cbbdf16a723d0f290fbe186166be26f5fdbd51aa8cbd14418e3da3b26a3ef78e803db37c55be8869803e551f343ee1688d3ef091463e67047abd32cf543e448a01beb9c97f3e5edf43be38bb19bef68410be0e5427be2b1c313eda8aa5be8b506bbd000000001f106a3ea4754d3e08b6afbd824756be411864be609dd5bd781b453ed7a654beea8f973e7373403ecebe02be091e6e3e4d0aacbe8d43eabed2b3f5bd1d09b73e00000080f3ed763e3d9d09be7ae8433fd0eccd3d6ccb523eb88314bec41f4cbfad7a00bfb3112d3eafc0a33e9b7c3a3d956b9ebe160a5fbeb9af00beb8d9f9bd597c24bebbf7f4bd1724403d29fd333e8996323ec9160fbee170a63ea862cabde5e5c1bd27dfc3bdf772d6bd58af2cbf06240bbe0b7d2d3eae46f6bdbadcbfbe0ae7113eb604563e525a02be213f2b3e9a632bbe177c1bbef8df94bd6ac827be0811d4be4cee393eff3152bee817cd3eac6e14be13f6fabd0d742abe89e31b3e3a3e10be16c5993ea6e910be3fe703bfac2f833ef7a21d3e7909373e8ee3f7bdd21e683eddac073e9689483e3d17eebdeb118d3edf18e23eadd2783e23610a3e06310bbef644e6be7ea1fcbdd5fc54be10f67e3ee44a323e40788c3e81c4443ef9a975bd46e1523e93c9ffbdb4e77d3ea3f442be80fe17be65d30ebeed6f26be7f4a2f3efb84a4be77da67bd00000000242e683e481d4c3e10a2adbdd0c855be48bc62bef58fd2bdbc95443e4eb652be269f963eb89e3e3ea51801bec03a6c3ea7feaabef522eabedd0ff4bdd458b63e000000001c0c753eadc408beb11d433f38f3ca3d58f1503e7ba413beaa0b4cbff38300bfe6452b3e46cfa23e204c383d95699dbe50115dbe5e19febd0d9cf6bd6fb722be14bef1bdc62d3c3d292d323ee5ef313e1d660dbe7c24a63e8ad5c8bdf659c0bd7e0dc1bd856cd3bd5f882cbf457609be5eb42b3e130df3bd07cabebe3a36103e2128543ed9b400be3f72293eb79729be3bbc19bea27493bd7b0026be49f4d2be4216383e044450be0965cc3e0f9213bee4b7f7bd54ab28be262a1a3ef8880ebeb615993e2f3b0fbe2ec103bf733d823e33e41b3e43a3353ea33af6bd793b663e5506063e46aa463e6cffeabd171e8c3e1e40e23e3653783e79b8083e09560abe8942e6be3d6ff9bdbd38d93d863e02bebf73b6bd4a890fbed80bc9bdf4adfb3c5db3d7bd6f6a823d34b501be4a36c73d35f69a3d6f9f913d6f17aa3dd251b3bd7c66283ede43ec3c00000080903fedbd9345d0bdc3a9303d1506da3d815ee73d29ce563d1dc8c7bdcc47d73ddbee19beeecac2bdf8a3833d3564f1bd010b2f3e814a6f3e1579793d78ea39be00000080c45dfabda5c78b3d4464c7be72ac4fbd4780d5bde0c0963d090bd13e3866833e4436afbd9f6326be3986bdbc511e213eb3dbe13d858e813d097d7b3dc9e3a53d9088763de970c1bcde44b6bd13e0b5bd472b903d739129bee15d4d3dd9b3443d1167453db9af573d3f56b23eca288c3d9c79afbdb1db773d2c51433e6f8f93bd58cfd8bd553e833d5e58adbdba47ad3d43bc9c3d72ed163d379ea93dedf6573e1a4fbcbd4fc8d43dae9a50be7ecf963d489e7c3d47f5ab3d02bc9dbd1253913d5c271cbef35d923de557873e491805be1b819fbdf021b9bd76a77b3d5f44ebbdc82689bd8737cbbd642b703d59300fbe636d65beae3efcbd3fe88bbd665d8d3d2b616b3e6f5e7e3d33c4ef3d5a4ff3bd7221abbd56eb05be0ea3bcbdde3df03cb7dfc9bdeb15773d1eb0f2bd5dc1ba3dd366923d99b6893d81cd9f3d1c33a8bd0aed1c3e4f05e13c00000080dacfddbd5efae8bdeae7273d86fcec3d8ad5023e45194c3db68abbbdffc7c93d53820fbe25d1b6bddc5d793dd2a2e1bdba0d233ec440853ea3646b3d857249be000000007043eabd30ab833d07e5cfbe76a644bdd424c8bdbff98d3d7c0be53e2564933ec465a4bd3e031bbe2e56b5bcca32163e3192d33daa7b753da05e6e3def8e9c3d62bf693d4910b9bc23efaabdd2cfaabd035c883d22501ebef256423d6b493a3d002e3b3dc8e94c3de1f9c33ea59f843d07eda4bdcdfd6a3daec8353e0fc58abd45e9cabd99a0783d9faaa2bd18e4a23d8b0c943d8d7d0f3db27d9f3d48dd483e4686b0bdc977c73d3ebb61beade78d3d7a6b6f3da931a23d523394bd726f893d4b9311be6fee893da5ab933ee588f8bdb6d395bdc374c6bda6756d3d41f9dbbdec1c81bde952bebd800b633d339705bec81c70be3e29ecbd59ab83bdd329853df8b67e3eb00a713ddfe7da3d302f03bedef5b7bd617710be3adecabd9925003dac34d9bdff59843d94a402be1adfc83dc9099d3d93a6933da9bbab3dbad0b4bd7523293e061df13c000000803316efbdebccd1bd67cf333d53b0db3d8ee3e83d1c555a3dfab0cabd6816d93d61d91abe7895c4bd4295853d833af3bdefc02f3e6800703e197b7c3d20f43abe00000000ea3afcbd76558d3dc78ec7becc9652bde652d7bd9478983d8d2ed13eacb8833e0bb5b0bda74d27be3905c1bcc5dd213ed4ace33d817c833dbf4b7f3deb03a83de74e7a3d9a0fc5bc92c5b7bdcf9ab7bd5030923de4ac2abe712a503d8179473d7a5a483d66365b3d318db23ea6278e3d893bb1bd31a67b3db0fe433ee10395bd989edabd042f853df9d5aebd7b08af3dc5d19e3dd85a193d715bab3d3f9c583ee0cfbdbde795d63d457751bed464983d2137803d951bae3da5349fbd095a933dee161dbe810d943d77a7873e8b0306be42f6a0bd9eddbabd9fb67e3db718edbd44958abd3cbaccbd105f733df81b10bef0ee67be7fa0ffbd3c588dbd8ff28e3def146c3e0718813de067dc3d06c0edbd469cb0bdeadf02be9afab7bdf09be93cdb30d3bd137f703d05c5ecbd0b1cc03da3928e3dee15863dd94aa73dd501adbd8925193ee9f7db3c0000008069b6d8bd8bd8d8bd54ad233dc8f8de3d90f3ef3d7c8c463dcc09b7bd9ce5c43de6400cbe894cb2bd5ab9723da474dcbd0a1c1f3e7f24853e7081683d7f3629be000000806996e4bd6029833d2114cebebb803fbda83cc3bd89748e3d60d6fe3ef831963e57f5a8bdeb7f17be8437b0bc0497123e6177ce3db5ee6e3d23fd673d627f983d127a633d12e6b3bc091ab0bd9d90a6bd6dc3843dcf6f1abe97e73d3d42b3353db840363d7958473d2e08d53e711d813d78cea0bde6b0643d7357313eab9c8cbda337c6bd9000723d57d4a6bd08de9e3db62e903d2adc0a3dc98a9b3da3e4433e9761b6bda3a2c23d88e95cbe44258e3dc703693d85019e3d260797bd4ed0853dad460ebec774863d7b2d9a3e4bddf2bdaf889cbd62bdbebdd3b36a3d1eead6bd600882bd10dac5bd982e5d3d5b8d02be3cc450be27a8e6bdc1da84bd8da8843d7dbe813ec89a6a3d5d4fd93d504a02bef881b6bdda840fbe6651c9bd08e5fc3c039fd7bde720833dfabf01be1a5ac73de7b49b3d255c923dbf54aa3d0961b3bd2b1b283e27d3ed3c00000000bf63edbd2f3ed0bd8cc5313d5e17da3d593de73d7e1b583daf29c9bd3481d73d4fdc19bef20ec3bdf15a843dc583f1bd42b22e3e17b56e3ef0137a3d86d939be00000080327bfabd7f108c3d1eafc6be445850bdf2b8d5bd3027973d7c49d03e1809833e8d49afbd494426beb813bebcb2dc203eb50ce23d8f44823d19e57c3d21a3a63dc8ed773d3b12c2bc7b52b6bd2322b6bd84e7903d63a229be96fa4d3d0855453df235463de0fc583de3bab13e86e38c3d66c9afbd4844793d62dc423e51b693bdd100d9bd36f5833da96cadbd4d9fad3d067b9d3de17a173d2ef6a93d9f66573ea456bcbd4703d53dde4750be7513973da7067e3d68b4ac3dbbdb9dbdc20f923d03181cbe9ac1923db0f4863e731b05bec99b9fbd8f67b9bdef4c7c3d4e68ebbd955389bd3531cbbdf603713db5290fbeafa966be4ce2fdbd4e138cbdb0ab8d3d20cd6a3ebec67f3d9ef957bec152813e51cb353e30558e3ebd52483e7b4a7fbd8d69563e912703beafcb803e766246beae671bbe1e3012bed9ca29be58b7323e5333a6be425070bd0000000002d56b3e9e104f3eb773b2bd96c058be00b365bea4b5d8bd05fb473e264856be637f983e1c29423e675d04be1de46f3e63e3acbeebabebbed539fabdc7e5b73e0000008056b7783e41f20bbe89cb433f31f4d03db489543e23e316be25e44cbf124b01bfc8ae2e3e2cb1a43e2ba9403df0169fbee4a860be1f4e02be7910fdbd392e26befc28f8bd68ad443d439d353ed46c353e7ac010be6a06a83ec8a1cebdf813c6bd4ed7c6bd5a93d9bd51652dbfe8c90cbe592c2f3e997af9bd79a1c0beb47e133eabc4573e0df903be26d92c3ea30c2dbe23271dbe1b9e98bde47029bea0c8d4bef08a3b3e45d453be30e9cd3e5fcf16beb92dfebdf1282cbe84801d3e71e411bedab19a3e8e8e12be2aad04bfee17843efd391f3e6195383e086cfcbde1e1693e263f093eca2b4a3edc29f1bd7efb8d3e0590e33ef7c97b3ebbf50b3e55880dbe9ddbe7be46e8ffbd1e25dc3d43edebbdb496b2bde2cb01be45cbb6bd770cdf3c01c6d8bdc1256f3d43f5eabd8362c53d3cc28d3d4c54853dd29ba53d9770afbd10b4173ecd44da3c000000007929d7bda647d6bdc5a7223d5f05df3dcf97f03d8766453d51a8aebd044bb93dab090bbe092cb1bd0b5d713d14dcdabd85401f3ef69e863e5642673d834627be0000000027e2e2bd9f81833d66c6cfbe2b5e3ebd45efc1bd2f718f3db1c6033fe13b983e9b6eabbd711b16bea4adaebcd0d0103e190bc23db6976d3d9daf663d159c973d4532623dd556b2bc09a2b2bde48ba5bdf103843d55930ebecdbc3a3dfc6c323d04ba2c3d4a32463d0351da3ec763803dded69fbdad67633de850323e85e68cbd5ce2c4bd54a5703dbedaa8bd0bf1953d535b8f3da31c073d67eb923d5e27463e09a1b9bd6537b73d18d55dbe873e8f3d0bb5673df5129d3d348197bdfb0e853dd2060dbeb05e7d3daf629c3e4dfaf0bd170c9abdae6abcbde6c6693d0463d5bdf8c081bd8700cabdae54513df47a01be929b4abeac77dabd8ba984bd5f2e853dc5af833e124a693da068db3d435203beb583b8bd3e9310be5542cbbd5318013dc8b9d9bd6eef843d55bf02beff62c93d6e9d9d3d903b943d7c4bac3d1b60b5bdac48293e54fff23c000000800d6aefbdb152d2bd28e5343d122edc3df060e93d657b5b3d5716cbbda297d93decde1abedbfcc4bd882a863d5e8cf3bd52e42f3e28f76f3e889f7d3df5ae3abe000000000d91fcbd8ce78d3dd882c7be65b053bd17b2d7bd7b08993d5422d13e67b2833e2a45b1bdc15b27beeca6c2bc9806223ef729e43df711843d263b803d8595a83d58797b3d59b0c6bcfd53b8bd1a0fb8bd67c5923d5abe2abe1d48513d1a93483da975493de55b5c3de780b23ee0bc8e3dbaa9b1bda4d07c3d8618443e2c9895bd10fddabd50c4853da666afbd4696af3def649f3d875f1a3d24eaab3de3ac583eff5cbebd1d18d73de03451bef2f5983d2acc803dc3abae3dd1c79fbde8ee933da02a1dbe939f943df69f873ee62506be5d89a1bdf268bbbdc9d87f3de56dedbd112a8bbdd742cdbde882743d663810be419467be8de4ffbdfcec8dbd77838f3dfa106c3effac813dbb9bd83df8e801be3be5b5bd5c1f0fbe06acc8bdc9bcfc3cc8ecd6bd0ab4823d0c5f01be68b1c63dba2d9b3d33de913d7bc1a93d9ec6b2bd76ae273e0ec5ed3c00000000c2aaecbd1590cfbd9163313d7863d93de182e63df77c573d8680c8bddaced63d3f7419bedf6dc2bd69ec833df5c8f0bdb5442e3e68416e3e4a4b793dc16a39be00000000a3bcf9bd9f9a8b3dbc70c6befed24fbd1c0cd5bdaea5963d030ad03ee3ce823e66b2aebdb3d925be5c59bebc4671203edb54e13d8bd8813d0d167c3dea11a63d5025773d6255c2bcd7b5b5bd8f8ab5bd1b6b903d653529bedd6c4d3df0d3443de5b2453dab5b583d587eb13e516b8c3dc436afbdba79783d666c423e6c3893bd4e52d8bd1787833dfcd6acbd3f09ad3d1ff29c3d1640173d2f63a93dfdf4563e91b5bbbd4c52d43d74d64fbe0592963dcf357d3de31dac3dd9539dbd6592913d44af1bbe5244923d61ba863e42b904be32129fbd88c8b8bd8a817b3d3fb0eabdcee088bd3986cabd3046703d5bc40ebe001b66be521ffdbd859d8bbd10348d3dc4596a3e52f37e3dd45312bfaa602f3fe11df63ebb01413fd9d0073f7a322cbe4829113ff6b1b0be71a42e3fb04c06bf5589d1be8e02c5be4ccee5be0ce2f13e93a161bf7fa221be0000008066e21f3fe5430c3f2b1371be2dd712bf8b9d1bbfebcd91be8376073f3e0611bfeac64e3f7c9e033f4456b2be03a1223f78686abf9a0ca0bf4028a9bed568793f0000000054a3283f4649bdbe97d3044098a78d3eb917103f8020ccbea7380bc00f93afbf6e6cec3e44555f3fdc9c023e1ef757bfe81018bf1187afbe5e6baabecc24e0bec01aa7bed44b053ef6d5f53e55fcf53e6312c3bed4af633f98978bbe09c985be245586bef96492be41baedbf6cb8bdbe2880ed3e13f7a7be76a582bfc688c73edc3f123fd4cbb1be33ebe93eab18eabe37eed3be14f64dbe0e34e5be785690bfe6ddfd3ea95b0fbf14af8b3f3207ccbef42fabbe9c3ee8be041ed53e8aa7c4bea8cd513faa28c6beffc3b4bf8917333f1f72d73eb7f2f93ebba5aabe9d8a1e3fcd9fb93ef1dd083f7006a3bef684403fd48d9a3fe9ab2a3f1f4fbd3e516fbfbe72649dbf5459acbe3ce2d63ea5fae63e06cfa93e0415b53e83fcd73e8a1c973ee8499d3e09299f3e10e6c63e0965983e4656b23e3343b03e9e43b23e57bd953ee7f3f33e658b873e941a7bbe04d2bb3efa54463f9b0d723ede9fb43e4aa5163f555c8c3e7521803ed201e23e9267873eb546b83e07bcb03e8701af3ef7e2da3e2b1a333fcc08b23e71089b3e6536c2bdf12ce43ec3c4a43ec9deee3e6c04af3e90f0c53e19aa783ed7025a3f4a48103f9794d23ef147973ed7634a3e9084ee3ef8e9e73e1837a83e1fe2ac3e7ea6bb3e24ec9b3e3dc32f3e27f7bd3e2315cd3e3a67af3e5dde993e6209bd3e991da23e0112953eeb4cc73e4ea7373f4389b63e53e5bd3e38aba33ea7beed3e2dded33eadb68e3ecaeeb23ec79a963ef6c5d53ef7deb93e4ef4a53e4b2ec33e7825013f4d50bf3efc34e03e747f973e4718a23eeadfc03ea089cb3eab6da43e8e25a33ebef7e43ea9b3ea3eebbb0f3fcbaa8f3ef829043fcc263e3f917b973e8b8aa43ea938eb3ef7ec9a3e9acda03e25379f3ef927b13e73b5b23eedf0d13e6f15993ef928033fdae3d63e81e4963d
+\ No newline at end of file
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 432b822e8..7508fc817 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -3486,12 +3486,19 @@ common_handle_option (struct gcc_options *opts,
+       break;
+     
+     case OPT_fauto_bolt_:
+-      opts->x_flag_auto_bolt = true;
++      if (get_optimize_decision_from_ai4c ())
++	{
++	  opts->x_flag_auto_bolt = true;
++	}
+       /* FALLTHRU */
+     case OPT_fauto_bolt:
+       if (opts->x_flag_bolt_use)
+         error_at (loc,
+ 		  "-fauto-bolt conflicts with -fbolt-use.");
++      if (get_optimize_decision_from_ai4c ())
++	{
++	  opts->x_flag_auto_bolt = true;
++	}
+       break;
+ 
+     case OPT_fbolt_use_:
+@@ -3499,6 +3506,10 @@ common_handle_option (struct gcc_options *opts,
+       if (opts->x_flag_auto_bolt)
+         error_at (loc,
+ 		  "-fauto-bolt conflicts with -fbolt-use.");
++      if (get_optimize_decision_from_ai4c ())
++	{
++	  opts->x_flag_bolt_use = true;
++	}
+     break;
+ 
+     case OPT_fbolt_target_:
+-- 
+2.25.1
+
diff --git a/0362-Modify-cache-size-for-hip10a-and-hip10c.patch b/0362-Modify-cache-size-for-hip10a-and-hip10c.patch
new file mode 100644
index 0000000000000000000000000000000000000000..cf4a2cdb190a99c5d6c32f0b26ff55f1e6780e43
--- /dev/null
+++ b/0362-Modify-cache-size-for-hip10a-and-hip10c.patch
@@ -0,0 +1,43 @@
+From a7a7cc3f77c10daf759681b1a45b68319d45f1ab Mon Sep 17 00:00:00 2001
+From: liyunfei 
+Date: Wed, 9 Apr 2025 17:13:03 +0800
+Subject: [PATCH] Modify cache size for hip10a and hip10c
+
+Modify the L1cache size and L2cache size of the hip10a and hip10c.
+
+Signed-off-by: Tian Tao 
+---
+ gcc/config/aarch64/aarch64.cc | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index a06c2c515..389dcd646 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -1592,9 +1592,9 @@ static const cpu_prefetch_tune hip09_prefetch_tune =
+ static const cpu_prefetch_tune hip10a_prefetch_tune =
+ {
+   0,                    /* num_slots  */
+-  64,                   /* l1_cache_size  */
++  128,                  /* l1_cache_size  */
+   64,                   /* l1_cache_line_size  */
+-  512,                  /* l2_cache_size  */
++  1024,                 /* l2_cache_size  */
+   true,                 /* prefetch_dynamic_strides */
+   -1,                   /* minimum_stride */
+   -1                    /* default_opt_level  */
+@@ -1603,9 +1603,9 @@ static const cpu_prefetch_tune hip10a_prefetch_tune =
+ static const cpu_prefetch_tune hip10c_prefetch_tune =
+ {
+   0,                    /* num_slots  */
+-  64,                   /* l1_cache_size  */
++  96,                   /* l1_cache_size  */
+   64,                   /* l1_cache_line_size  */
+-  512,                  /* l2_cache_size  */
++  1024,                 /* l2_cache_size  */
+   true,                 /* prefetch_dynamic_strides */
+   -1,                   /* minimum_stride */
+   -1                    /* default_opt_level  */
+-- 
+2.25.1
+
diff --git a/0363-SVE-Add-std-find-with-sve.patch b/0363-SVE-Add-std-find-with-sve.patch
new file mode 100644
index 0000000000000000000000000000000000000000..03b5502e3365dbba8609ff026443627ba867960b
--- /dev/null
+++ b/0363-SVE-Add-std-find-with-sve.patch
@@ -0,0 +1,530 @@
+From ed2666ad49a52ff3950b5ad4e0127fe4a9d1bef7 Mon Sep 17 00:00:00 2001
+From: blunce 
+Date: Mon, 7 Apr 2025 16:47:16 +0800
+Subject: [PATCH] [SVE] Add std::find with sve
+
+---
+ gcc/Makefile.in                               |   1 +
+ gcc/common.opt                                |   8 +
+ gcc/gimple-ssa-expand-sve.cc                  | 256 ++++++++++++++++++
+ gcc/passes.def                                |   1 +
+ gcc/testsuite/g++.dg/tree-ssa/find-with-sve.C | 137 ++++++++++
+ gcc/tree-pass.h                               |   1 +
+ libgcc/config/aarch64/sve_std_find.c          |  32 +++
+ libgcc/config/aarch64/t-aarch64               |   1 +
+ 8 files changed, 437 insertions(+)
+ create mode 100644 gcc/gimple-ssa-expand-sve.cc
+ create mode 100644 gcc/testsuite/g++.dg/tree-ssa/find-with-sve.C
+ create mode 100644 libgcc/config/aarch64/sve_std_find.c
+
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index ef7733580..ab6ad8206 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -1413,6 +1413,7 @@ OBJS = \
+ 	gimple-ssa-backprop.o \
+ 	gimple-ssa-evrp.o \
+ 	gimple-ssa-evrp-analyze.o \
++	gimple-ssa-expand-sve.o \
+ 	gimple-ssa-isolate-paths.o \
+ 	gimple-ssa-nonnull-compare.o \
+ 	gimple-ssa-split-paths.o \
+diff --git a/gcc/common.opt b/gcc/common.opt
+index e6ffa1c58..23544740d 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -3870,4 +3870,12 @@ fifcvt-allow-complicated-cmps
+ Common Var(flag_ifcvt_allow_complicated_cmps) Optimization
+ Allow RTL if-conversion pass to deal with complicated cmps (can increase compilation time).
+ 
++ffind-with-sve
++Common Var(flag_find_with_sve) Init(0) Optimization
++Enable replace std::find with sve
++
++fsve-expand-std-find-threshold
++Common Var(sve_expand_std_find_threshold) Init(8) Optimization
++Minimal length of the array to search
++
+ ; This comment is to ensure we retain the blank line above.
+diff --git a/gcc/gimple-ssa-expand-sve.cc b/gcc/gimple-ssa-expand-sve.cc
+new file mode 100644
+index 000000000..9bac95212
+--- /dev/null
++++ b/gcc/gimple-ssa-expand-sve.cc
+@@ -0,0 +1,256 @@
++/* replace the std::find with sve.
++   Copyright (C) 2005-2022 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tree.h"
++#include "function.h"
++#include "cfg.h"
++#include "basic-block.h"
++#include "gimple.h"
++#include "gimple-iterator.h"
++#include "tree-pass.h"
++#include "context.h"
++#include "target.h"
++#include "toplev.h"
++#include "cfghooks.h"
++#include "tree-cfg.h"
++#include "cfgloop.h"
++#include "gimple-ssa.h"
++#include "gimple-pretty-print.h"
++
++namespace {
++
++#define TRACE_FUNCTION(fun)					\
++	if (dump_file)						\
++	{							\
++		fprintf (dump_file, "\nprocess function: \n");	\
++		dump_function_to_file (fun, dump_file, TDF_NONE);\
++		fprintf (dump_file, "\n");			\
++	}
++
++#define TRACE_STMT(stmt)					\
++	if (dump_file)						\
++	{							\
++		fprintf (dump_file, "\nprocess stmt: \n");	\
++		print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);\
++		fprintf (dump_file, "\n");			\
++	}
++
++#define TRACE_REPLACE_STMT(stmt)				\
++	if (dump_file)						\
++	{							\
++		fprintf (dump_file, "\nprocess replace stmt: \n");\
++		print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);\
++		fprintf (dump_file, "\n");			\
++	}
++
++#define TRACE_ARG3_TYPE(type)					\
++	if (dump_file)						\
++	{							\
++		fprintf (dump_file, "\nprocess arg3 type: \n");	\
++		dump_node (type, TDF_NONE, dump_file);		\
++		fprintf (dump_file, "\n");			\
++	}
++
++const pass_data pass_data_find_with_sve = {
++	GIMPLE_PASS, /* type  */
++	"find_with_sve", /* name */
++	OPTGROUP_NONE, /* optinfo_flags */
++	TV_NONE, /* tv_id */
++	0, /* properties_required */
++	0, /* properties_provided */
++	0, /* properties_destroyed */
++	0, /* todo_flags_start */
++	TODO_cleanup_cfg | TODO_update_ssa | TODO_update_address_taken
++	| TODO_rebuild_cgraph_edges, /* todo_flags_finish */
++};
++
++class pass_find_with_sve : public gimple_opt_pass {
++public:
++    pass_find_with_sve (gcc::context *ctx) :
++    	gimple_opt_pass (pass_data_find_with_sve, ctx)
++    {}
++
++    virtual bool gate (function *fun) override
++    {
++	if (!flag_find_with_sve)
++	    return false;
++
++	if (!targetm.vector_mode_supported_p (V2DImode))
++	    return false;
++
++	return true;
++    }
++
++    virtual unsigned int execute (function *fun) override
++    {
++	TRACE_FUNCTION (fun->decl);
++	basic_block bb;
++	FOR_EACH_BB_FN (bb, fun)
++	{
++	    for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
++	    	!gsi_end_p (gsi); gsi_next (&gsi))
++	    {
++		gimple *stmt = gsi_stmt (gsi);
++		if (std_find_check (stmt))
++		    replace_std_find (gsi);
++	    }
++	}
++
++	return 0;
++    }
++
++private:
++    uint8_t bit_width;
++    const char *null_name = "";
++
++    bool std_find_check (gimple *stmt)
++    {
++	if (!is_gimple_call (stmt))
++	    return false;
++
++	tree fndecl = gimple_call_fndecl (stmt);
++	if (fndecl == nullptr || DECL_NAME (fndecl) == nullptr)
++	    return false;
++
++	const char *fn_name = IDENTIFIER_POINTER (DECL_NAME (fndecl));
++	if (strcmp (fn_name, "find") != 0)
++	    return false;
++
++	if (DECL_CONTEXT (fndecl) == nullptr
++		|| TREE_CODE (DECL_CONTEXT (fndecl)) != NAMESPACE_DECL)
++	    return false;
++
++	const char *namespace_name
++		= IDENTIFIER_POINTER (DECL_NAME (DECL_CONTEXT (fndecl)));
++	if (strcmp (namespace_name, "std") != 0)
++	    return false;
++
++	if (gimple_call_num_args (stmt) != 3)
++	    return false;
++
++	tree arg1 = DECL_ARGUMENTS (fndecl);
++	tree arg2 = TREE_CHAIN (arg1);
++	tree arg3 = TREE_CHAIN (arg2);
++
++	tree arg3_type = TREE_TYPE (arg3);
++	if (TREE_CODE (arg3_type) != REFERENCE_TYPE)
++	    return false;
++
++	tree main_type = TREE_TYPE (arg3_type);
++	TRACE_ARG3_TYPE (main_type);
++	if (TREE_CODE (main_type) == INTEGER_TYPE)
++	{
++	    if (TYPE_PRECISION (main_type) != 64)
++		return false;
++
++	    const char *type_name = get_type_name_arg3 (main_type);
++	    if ((strcmp (type_name, "long unsigned int") != 0)
++	    	&& (strcmp (type_name, "long int") != 0))
++		return false;
++
++	    this->bit_width = 64;
++	} else if (TREE_CODE (main_type) == POINTER_TYPE)
++	    this->bit_width = 64;
++	else
++	    return false;
++
++	return true;
++    }
++
++    const char *get_type_name_arg3 (tree main_type)
++    {
++	enum tree_code code = TREE_CODE (main_type);
++	enum tree_code_class tclass = TREE_CODE_CLASS (code);
++
++	if (tclass == tcc_type)
++	{
++	    if (TYPE_NAME (main_type))
++	    {
++		if (TREE_CODE (TYPE_NAME (main_type)) == IDENTIFIER_NODE)
++		    return IDENTIFIER_POINTER (TYPE_NAME (main_type));
++		else if (TREE_CODE (TYPE_NAME (main_type)) == TYPE_DECL
++			 && DECL_NAME (TYPE_NAME (main_type)))
++		    return IDENTIFIER_POINTER (
++			DECL_NAME (TYPE_NAME (main_type)));
++	    }
++	}
++
++	return null_name;
++    }
++
++    void replace_std_find (gimple_stmt_iterator gsi)
++    {
++	switch (this->bit_width)
++	{
++	    case 64:
++		replace_std_find_u64 (gsi);
++		break;
++	    case 32:
++	    case 16:
++	    case 8:
++	    default:;
++	}
++    }
++
++    void replace_std_find_u64 (gimple_stmt_iterator gsi)
++    {
++	gimple *stmt = gsi_stmt (gsi);
++	tree old_fndecl = gimple_call_fndecl (stmt);
++	TRACE_STMT (stmt);
++
++	// arguments list process:
++	auto_vec args;
++	for (unsigned i = 0; i < gimple_call_num_args (stmt); ++i)
++	    args.safe_push (gimple_call_arg (stmt, i));
++	tree new_arg = build_int_cst (unsigned_char_type_node,
++			sve_expand_std_find_threshold);
++	args.safe_push (new_arg);
++
++	// functon declare process:
++	tree old_type = TREE_TYPE (old_fndecl);
++	tree ret_type = TREE_TYPE (old_type);
++	tree arg_types = NULL_TREE;
++	for (tree t = TYPE_ARG_TYPES (old_type); t; t = TREE_CHAIN (t))
++	    arg_types = tree_cons (NULL_TREE, TREE_VALUE (t), arg_types);
++	arg_types = tree_cons (NULL_TREE, unsigned_char_type_node, arg_types);
++	arg_types = nreverse (arg_types);
++	tree new_fndecl_type = build_function_type (ret_type, arg_types);
++	tree new_fndecl = build_fn_decl ("__sve_optimized_find_u64",
++		new_fndecl_type);
++	TREE_PUBLIC (new_fndecl) = 1;
++	DECL_EXTERNAL (new_fndecl) = 1;
++
++	// call function process:
++	gcall *new_call = gimple_build_call_vec (new_fndecl, args);
++	if (gimple_has_lhs (stmt))
++	    gimple_call_set_lhs (new_call, gimple_call_lhs (stmt));
++	gsi_replace (&gsi, new_call, true);
++	update_stmt (gsi_stmt (gsi));
++	TRACE_REPLACE_STMT (gsi_stmt (gsi));
++    }
++};
++}  // namespace
++
++gimple_opt_pass *make_pass_find_with_sve (gcc::context *ctx)
++{
++    return new pass_find_with_sve (ctx);
++}
+diff --git a/gcc/passes.def b/gcc/passes.def
+index 49001adde..08213f2bc 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -70,6 +70,7 @@ along with GCC; see the file COPYING3.  If not see
+   PUSH_INSERT_PASSES_WITHIN (pass_local_optimization_passes)
+       NEXT_PASS (pass_fixup_cfg);
+       NEXT_PASS (pass_rebuild_cgraph_edges);
++      NEXT_PASS (pass_find_with_sve);
+       NEXT_PASS (pass_local_fn_summary);
+       NEXT_PASS (pass_early_inline);
+       NEXT_PASS (pass_warn_recursion);
+diff --git a/gcc/testsuite/g++.dg/tree-ssa/find-with-sve.C b/gcc/testsuite/g++.dg/tree-ssa/find-with-sve.C
+new file mode 100644
+index 000000000..66d03e2cf
+--- /dev/null
++++ b/gcc/testsuite/g++.dg/tree-ssa/find-with-sve.C
+@@ -0,0 +1,137 @@
++/* { dg-do compile } */
++/* { dg-options "-std=c++11 -O3 -ffind-with-sve -march=armv8-a+sve -fdump-tree-optimized" } */
++
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++
++void test_u64()
++{
++    std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9};
++
++    std::uint64_t x = 100;
++    std::cin >> x;
++
++    auto it = std::find(v.begin(), v.end(), x);  // matched : No.1
++
++    if (it != v.end())
++	std::cout << "ok!\n";
++    else
++	std::cout << "fail!\n";
++}
++
++void test_s64()
++{
++    std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9};
++
++    std::int64_t x = 100;
++    std::cin >> x;
++
++    auto it = std::find(v.begin(), v.end(), x);  // matched : No.2
++
++    if (it != v.end())
++	std::cout << "ok!\n";
++    else
++	std::cout << "fail!\n";
++}
++
++void test_array()
++{
++    const unsigned N = 1024 * 1024 * 16;
++    long *arr = new long[N];
++    long *p;
++    for (unsigned i = 0; i < N; ++i)
++	arr[i] = i;
++    for (unsigned i = N - 1000; i < N - 1; ++i) {
++	p = std::find(arr, arr + N, arr[i]);  // matched : No.3
++	assert(p == arr + i);
++	unsigned j = i - 10;
++	p = std::find(arr + j, arr + j + 1, arr[j]);  // matched : No.4
++	assert(p == arr + j);
++	p = std::find(arr + j + 1, arr + j + 1, arr[j + 2]);  // matched : No.5
++	assert(p == arr + j + 1);
++	p = std::find(arr + j + 2, arr + j + 1, arr[j + 2]);  // matched : No.6
++	assert(p == arr + j + 1);
++    }
++    p = std::find(arr, arr + N, (long)-1);  // matched : No.7
++    assert(p == arr + N);
++}
++
++void test_string()
++{
++    std::vector v;
++    for (int i = 0; i < 5; i++)
++	v.push_back(std::to_string(123 + i));
++
++    for (int i = 0; i < 5; i++) {
++	auto it = std::find(v.begin(), v.end(), std::to_string(124 + i));  // not matched
++
++	if (it != v.end())
++	    std::cout << "ok!\n";
++	else
++	    std::cout << "failed!\n";
++    }
++}
++
++void test_s32()
++{
++    std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9};
++
++    std::int32_t x = 100;
++
++    auto it = std::find(v.begin(), v.end(), x);  // not matched
++
++    if (it != v.end())
++	std::cout << "ok!\n";
++    else
++	std::cout << "failed!\n";
++}
++
++void test_u16()
++{
++    std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9};
++
++    std::uint16_t x = 100;
++
++    auto it = std::find(v.begin(), v.end(), x);  // not matched
++
++    if (it != v.end())
++	std::cout << "ok!\n";
++    else
++	std::cout << "failed!\n";
++}
++
++void test_u16_point()
++{
++    std::vector v = {1, 2, 3, 4, 5, 6, 7, 8, 9};
++    std::vector v_ptr;
++
++    for (auto &item : v)
++	v_ptr.push_back(&item);
++
++    std::uint16_t x = 100;
++
++    auto it = std::find(v_ptr.begin(), v_ptr.end(), &x);  // matched : No.8
++
++    if (it != v_ptr.end())
++	std::cout << "ok!\n";
++    else
++	std::cout << "fail!\n";
++}
++
++int main()
++{
++    test_u64();
++    test_s64();
++    test_array();
++    test_string();
++    test_s32();
++    test_u16();
++    test_u16_point();
++    return 0;
++}
++
++/* { dg-final { scan-tree-dump-times "__sve_optimized_find_u64" 8 "optimized" } } */
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index 468353d13..e01d86fb1 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -503,6 +503,7 @@ extern gimple_opt_pass *make_pass_modref (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_coroutine_lower_builtins (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_coroutine_early_expand_ifns (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_adjust_alignment (gcc::context *ctxt);
++extern gimple_opt_pass *make_pass_find_with_sve (gcc::context *ctx);
+ 
+ /* IPA Passes */
+ extern simple_ipa_opt_pass *make_pass_ipa_lower_emutls (gcc::context *ctxt);
+diff --git a/libgcc/config/aarch64/sve_std_find.c b/libgcc/config/aarch64/sve_std_find.c
+new file mode 100644
+index 000000000..0caf1f4f6
+--- /dev/null
++++ b/libgcc/config/aarch64/sve_std_find.c
+@@ -0,0 +1,32 @@
++#include 
++#include 
++
++#pragma GCC target ("+sve")
++
++uint64_t *__sve_optimized_find_u64 (uint64_t *first, uint64_t *last,
++	uint64_t const *value, uint8_t threshold)
++{
++    if (first + threshold > last)
++	goto Tail;
++
++    uint64_t m = svcntd ();
++    uint64_t n = (last - first) / m;
++    svbool_t flag_true = svptrue_b64 ();
++    for (; n-- > 0;)
++    {
++	svuint64_t v3 = svld1_u64 (flag_true, (uint64_t *)first);
++	svbool_t v4 = svcmpeq_n_u64 (flag_true, v3, (uint64_t *)value);
++	if (svptest_any (flag_true, v4))
++	    break;
++	first += m;
++    }
++
++Tail:
++    while (first < last)
++    {
++	if (*first == *value)
++	    return first;
++	++first;
++    }
++    return first;
++}
+diff --git a/libgcc/config/aarch64/t-aarch64 b/libgcc/config/aarch64/t-aarch64
+index 5a8feb184..cc2357431 100644
+--- a/libgcc/config/aarch64/t-aarch64
++++ b/libgcc/config/aarch64/t-aarch64
+@@ -19,6 +19,7 @@
+ # .
+ 
+ LIB2ADD += $(srcdir)/config/aarch64/sync-cache.c
++LIB2ADD += $(srcdir)/config/aarch64/sve_std_find.c
+ 
+ # Add sme runtime to shared libgcc
+ LIB2ADDEH += \
+-- 
+2.25.1
+
diff --git a/0364-CFGO-Enable-flag_profile_partial_training-for-CFGO-b.patch b/0364-CFGO-Enable-flag_profile_partial_training-for-CFGO-b.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0ee3bb4f74b51b30ae7c13feeaabada5134d56b1
--- /dev/null
+++ b/0364-CFGO-Enable-flag_profile_partial_training-for-CFGO-b.patch
@@ -0,0 +1,30 @@
+From c5a8fc1ec35310b5f030c05e11af1c32ccf79e4c Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E7=BC=96=E8=AF=91=E5=B0=8F=E4=BC=99?= <412998149@qq.com>
+Date: Tue, 15 Apr 2025 11:42:33 +0000
+Subject: [PATCH] [CFGO] Enable flag_profile_partial_training for CFGO by
+ default Enable flag_profile_partial_training for CFGO by default for better
+ performance.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+Signed-off-by: 编译小伙 <412998149@qq.com>
+---
+ gcc/opts.cc | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 7508fc817..162e14bc2 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -2125,6 +2125,7 @@ enable_cfgo_optimizations (struct gcc_options *opts,
+   SET_OPTION_IF_UNSET (opts, opts_set, flag_modulo_sched, value);
+   SET_OPTION_IF_UNSET (opts, opts_set, flag_selective_scheduling, value);
+   SET_OPTION_IF_UNSET (opts, opts_set, flag_rename_registers, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_profile_partial_training, value);
+ 
+   SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_insns_auto, 185);
+   SET_OPTION_IF_UNSET (opts, opts_set, param_inline_unit_growth, 66);
+-- 
+2.25.1
+
diff --git a/0365-add-llc-allocate-feature.patch b/0365-add-llc-allocate-feature.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e7c8e126ae3f9ac4549925bbfdd56e90571815c6
--- /dev/null
+++ b/0365-add-llc-allocate-feature.patch
@@ -0,0 +1,8452 @@
+From 43e93c6df874a0bf78675fb4d3586d9ad1cb7dac Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=99=88=E9=B8=BF?= 
+Date: Tue, 25 Feb 2025 16:27:36 +0800
+Subject: [PATCH 1/2] add llc allocate feature
+
+---
+ gcc/Makefile.in                               |    1 +
+ gcc/auto-profile.cc                           |  491 +-
+ gcc/auto-profile.h                            |   30 +
+ gcc/builtins.cc                               |   82 +
+ gcc/builtins.def                              |    1 +
+ gcc/cfgloop.h                                 |    3 +
+ gcc/common.opt                                |   28 +
+ gcc/config/aarch64/aarch64-protos.h           |    6 +-
+ gcc/config/aarch64/aarch64-sve.md             |   48 +-
+ gcc/config/aarch64/aarch64.cc                 |   18 +
+ gcc/config/aarch64/aarch64.md                 |   39 +
+ gcc/dce.cc                                    |    1 +
+ gcc/doc/tm.texi                               |   21 +
+ gcc/doc/tm.texi.in                            |    6 +
+ gcc/internal-fn.cc                            |  115 +
+ gcc/internal-fn.def                           |    4 +
+ gcc/ipa-pure-const.cc                         |    1 +
+ gcc/optabs.def                                |    2 +
+ gcc/opts.cc                                   |   52 +-
+ gcc/params.opt                                |   62 +
+ gcc/passes.def                                |    2 +
+ gcc/print-rtl.cc                              |    6 +
+ gcc/rtl.def                                   |    9 +
+ gcc/rtl.h                                     |    4 +
+ gcc/rtlanal.cc                                |    2 +
+ gcc/sched-deps.cc                             |    4 +-
+ gcc/target-insns.def                          |    1 +
+ gcc/target.def                                |   31 +
+ .../g++.dg/llc-allocate/llc-allocate.exp      |   27 +
+ .../llc-allocate/llc-relion-expand-kernels.C  |   52 +
+ .../g++.dg/llc-allocate/multidim_array.h      |  186 +
+ gcc/testsuite/gcc.dg/llc-allocate/llc-1.c     |   61 +
+ gcc/testsuite/gcc.dg/llc-allocate/llc-2.c     |   54 +
+ .../gcc.dg/llc-allocate/llc-allocate.exp      |   27 +
+ .../llc-allocate/llc-cross-bb-indir-mem-acc.c |   36 +
+ .../llc-allocate/llc-extend-outer-loop.c      |   61 +
+ .../llc-feedback-branch-in-loop.c             |   39 +
+ .../llc-allocate/llc-feedback-break-in-loop.c |   41 +
+ .../llc-allocate/llc-feedback-goto-in-loop.c  |   50 +
+ .../llc-feedback-same-loop-cycle.c            |  129 +
+ .../gcc.dg/llc-allocate/llc-nonzero-offset.c  |   50 +
+ .../llc-prefetch-full-pldl1keep.c             |   14 +
+ .../llc-prefetch-full-pldl1strm.c             |   14 +
+ .../llc-prefetch-full-pldl2keep.c             |   14 +
+ .../llc-prefetch-full-pldl2strm.c             |   16 +
+ .../llc-prefetch-full-pldl3keep.c             |   14 +
+ .../llc-prefetch-full-pldl3strm.c             |   14 +
+ .../llc-prefetch-full-pldl4keep.c             |   14 +
+ .../llc-prefetch-full-pldl4strm.c             |   14 +
+ .../llc-prefetch-full-pstl1keep.c             |   14 +
+ .../llc-prefetch-full-pstl1strm.c             |   14 +
+ .../llc-prefetch-full-pstl2keep.c             |   14 +
+ .../llc-prefetch-full-pstl2strm.c             |   14 +
+ .../llc-prefetch-full-pstl3keep.c             |   14 +
+ .../llc-prefetch-full-pstl3strm.c             |   14 +
+ .../llc-prefetch-full-pstl4keep.c             |   14 +
+ .../llc-prefetch-full-pstl4strm.c             |   14 +
+ .../gcc.dg/llc-allocate/llc-ref-trace.c       |   62 +
+ .../gfortran.dg/llc-allocate/llc-3.f90        |  211 +
+ .../gfortran.dg/llc-allocate/llc-allocate.exp |   29 +
+ .../llc-trace-multiple-base-var.f90           |   62 +
+ .../llc-unknown-type-size-unit.f90            |   58 +
+ .../llc-allocate/llc-wrf-4-outer-loop-num.f90 |  320 ++
+ gcc/timevar.def                               |    2 +
+ gcc/toplev.cc                                 |    6 +
+ gcc/tree-cfg.cc                               |   11 +
+ gcc/tree-cfg.h                                |    1 +
+ gcc/tree-pass.h                               |    3 +
+ gcc/tree-scalar-evolution.cc                  |    8 +-
+ gcc/tree-scalar-evolution.h                   |    3 +-
+ gcc/tree-ssa-llc-allocate.cc                  | 4150 +++++++++++++++++
+ gcc/tree-ssa-loop-niter.cc                    |   38 +-
+ gcc/tree-ssa-loop-niter.h                     |    3 +-
+ gcc/tree-vect-loop-manip.cc                   |  266 ++
+ gcc/tree-vect-loop.cc                         |   10 +-
+ gcc/tree-vectorizer.h                         |    1 +
+ 76 files changed, 7308 insertions(+), 45 deletions(-)
+ create mode 100644 gcc/testsuite/g++.dg/llc-allocate/llc-allocate.exp
+ create mode 100644 gcc/testsuite/g++.dg/llc-allocate/llc-relion-expand-kernels.C
+ create mode 100644 gcc/testsuite/g++.dg/llc-allocate/multidim_array.h
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-1.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-2.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-allocate.exp
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-cross-bb-indir-mem-acc.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-extend-outer-loop.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-feedback-branch-in-loop.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-feedback-break-in-loop.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-feedback-goto-in-loop.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-feedback-same-loop-cycle.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-nonzero-offset.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl1keep.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl1strm.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl2keep.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl2strm.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl3keep.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl3strm.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl4keep.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl4strm.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl1keep.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl1strm.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl2keep.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl2strm.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl3keep.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl3strm.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl4keep.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl4strm.c
+ create mode 100644 gcc/testsuite/gcc.dg/llc-allocate/llc-ref-trace.c
+ create mode 100644 gcc/testsuite/gfortran.dg/llc-allocate/llc-3.f90
+ create mode 100644 gcc/testsuite/gfortran.dg/llc-allocate/llc-allocate.exp
+ create mode 100644 gcc/testsuite/gfortran.dg/llc-allocate/llc-trace-multiple-base-var.f90
+ create mode 100644 gcc/testsuite/gfortran.dg/llc-allocate/llc-unknown-type-size-unit.f90
+ create mode 100644 gcc/testsuite/gfortran.dg/llc-allocate/llc-wrf-4-outer-loop-num.f90
+ create mode 100644 gcc/tree-ssa-llc-allocate.cc
+
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index 65f683bbd..ef7733580 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -1659,6 +1659,7 @@ OBJS = \
+ 	tree-ssa-loop-niter.o \
+ 	tree-ssa-loop-array-widen-compare.o \
+ 	tree-ssa-loop-prefetch.o \
++	tree-ssa-llc-allocate.o \
+ 	tree-ssa-loop-split.o \
+ 	tree-ssa-loop-unswitch.o \
+ 	tree-ssa-loop.o \
+diff --git a/gcc/auto-profile.cc b/gcc/auto-profile.cc
+index 5e85381ce..97c3bafd5 100644
+--- a/gcc/auto-profile.cc
++++ b/gcc/auto-profile.cc
+@@ -49,6 +49,9 @@ along with GCC; see the file COPYING3.  If not see
+ #include "auto-profile.h"
+ #include "tree-pretty-print.h"
+ #include "gimple-pretty-print.h"
++#include 
++#include 
++#include 
+ 
+ /* The following routines implements AutoFDO optimization.
+ 
+@@ -95,6 +98,8 @@ along with GCC; see the file COPYING3.  If not see
+ */
+ 
+ #define DEFAULT_AUTO_PROFILE_FILE "fbdata.afdo"
++#define DEFAULT_CACHE_MISSES_PROFILE_FILE "cmsdata.gcov"
++#define DEFAULT_ADDITIONAL_PROFILE_FILE "addldata.gcov"
+ #define AUTO_PROFILE_VERSION 2
+ 
+ namespace autofdo
+@@ -117,6 +122,14 @@ private:
+   bool annotated_;
+ };
+ 
++/* pair   */
++static bool
++event_count_cmp (std::pair &a,
++		 std::pair &b)
++{
++  return a.second > b.second;
++}
++
+ /* Represent a source location: (function_decl, lineno).  */
+ typedef std::pair decl_lineno;
+ 
+@@ -311,6 +324,9 @@ public:
+   /* Mark LOC as annotated.  */
+   void mark_annotated (location_t loc);
+ 
++  /* Compute total count threshold of top functions in sampled data.  */
++  gcov_type calc_topn_function_total_count_thres (unsigned topn) const;
++
+ private:
+   /* Map from function_instance name index (in string_table) to
+      function_instance.  */
+@@ -338,6 +354,244 @@ static autofdo_source_profile *afdo_source_profile;
+ /* gcov_summary structure to store the profile_info.  */
+ static gcov_summary *afdo_profile_info;
+ 
++/* Check opts->x_flags and put file name into EVENT_FILES.  */
++
++static bool
++get_all_profile_names (const char **event_files)
++{
++  if (!(flag_auto_profile
++        || (flag_cache_misses_profile || flag_additional_profile)))
++    {
++      return false;
++    }
++
++  event_files[INST_EXEC] = auto_profile_file;
++
++  if (flag_cache_misses_profile)
++    {
++      if (cache_misses_profile_file == NULL)
++        {
++          if (additional_profile_file == NULL)
++        {
++          additional_profile_file = DEFAULT_ADDITIONAL_PROFILE_FILE;
++        }
++      event_files[PMU_EVENT] = additional_profile_file;
++        }
++      event_files[CACHE_MISSES] = cache_misses_profile_file;
++    }
++  else if (flag_additional_profile)
++    {
++      if (additional_profile_file == NULL)
++        {
++          additional_profile_file = DEFAULT_ADDITIONAL_PROFILE_FILE;
++        }
++      event_files[PMU_EVENT] = additional_profile_file;
++    }
++
++  return true;
++}
++
++static void read_profile (void);
++
++/* Maintain multiple profile data of different events with event_loc_count_map
++   and event_func_count_map.  */
++
++class extend_auto_profile
++{
++public:
++  bool auto_profile_exist (enum event_type type);
++  gcov_type get_loc_count (location_t, event_type);
++  gcov_type get_func_count (unsigned, event_type);
++  gcov_type get_topn_function_total_count_thres () const;
++  struct rank_info get_func_rank (unsigned, enum event_type);
++  /* There should be only one instance of class EXTEND_AUTO_PROFILE.  */
++  static extend_auto_profile *create ()
++    {
++      extend_auto_profile *map = new extend_auto_profile ();
++      if (map->read ())
++	{
++	  return map;
++	}
++      delete map;
++      return NULL;
++    }
++private:
++  /* Basic maps of extend_auto_profile.  */
++  typedef std::map loc_count_map;
++  typedef std::map func_count_map;
++
++  /* Map of function_uid to its descending order rank of counts.  */
++  typedef std::map rank_map;
++
++  /* Mapping hardware events to corresponding basic maps.  */
++  typedef std::map event_loc_count_map;
++  typedef std::map event_func_count_map;
++  typedef std::map event_rank_map;
++
++  extend_auto_profile () {}
++  bool read ();
++  void set_loc_count ();
++  void process_extend_source_profile ();
++  void read_extend_afdo_file (const char*, event_type);
++  void rank_all_func ();
++  void dump_event ();
++  event_loc_count_map event_loc_map;
++  event_func_count_map event_func_map;
++  event_rank_map func_rank;
++  event_type profile_type;
++  gcov_type topn_function_total_count_thres;
++};
++
++/* Member functions for extend_auto_profile.  */
++
++bool
++extend_auto_profile::auto_profile_exist (enum event_type type)
++{
++  switch (type)
++    {
++      case INST_EXEC:
++	return event_func_map.count (INST_EXEC) != 0
++	       || event_loc_map.count (INST_EXEC) != 0;
++      case CACHE_MISSES:
++	return event_func_map.count (CACHE_MISSES) != 0
++	       || event_loc_map.count (CACHE_MISSES) != 0;
++      case PMU_EVENT:
++	return event_func_map.count (PMU_EVENT) != 0
++	       || event_loc_map.count (PMU_EVENT) != 0;
++      default:
++	  return false;
++    }
++}
++
++void
++extend_auto_profile::dump_event ()
++{
++  if (dump_file)
++    {
++      switch (profile_type)
++	{
++	  case INST_EXEC:
++	    fprintf (dump_file, "Processing event instruction execution.\n");
++	    break;
++	  case CACHE_MISSES:
++	    fprintf (dump_file, "Processing event cache misses.\n");
++	    break;
++        case PMU_EVENT:
++	    fprintf (dump_file, "Processing other PMU events.\n");
++	    break;
++	  default:
++	    break;
++	}
++    }
++}
++
++/* Return true if any profile data was read.  */
++
++bool
++extend_auto_profile::read ()
++{
++  const char *event_files[EVENT_NUMBER] = {NULL};
++  if (!get_all_profile_names (event_files))
++    {
++      return false;
++    }
++
++  /* Backup AFDO_STRING_TABLE and AFDO_SOURCE_PROFILE since we will create
++     new ones for each event_type.  */
++  autofdo::string_table *string_table_afdo = afdo_string_table;
++  autofdo::autofdo_source_profile *source_profile_afdo = afdo_source_profile;
++
++  for (unsigned i = 0; i < EVENT_NUMBER; i++)
++    {
++      if (event_files[i] == NULL)
++	{
++	  continue;
++	}
++      profile_type = (enum event_type) i;
++      dump_event ();
++      gcov_close ();
++      auto_profile_file = event_files[i];
++      read_profile ();
++      gcov_close ();
++
++      topn_function_total_count_thres = param_llc_allocate_func_counts_threshold;
++      if (param_llc_allocate_func_topn > 0 && profile_type == PMU_EVENT)
++        {
++	  topn_function_total_count_thres
++	    = afdo_source_profile->calc_topn_function_total_count_thres (
++		param_llc_allocate_func_topn);
++        }
++
++      process_extend_source_profile ();
++
++      delete afdo_source_profile;
++      delete afdo_string_table;
++    }
++
++  /* Restore AFDO_STRING_TABLE and AFDO_SOURCE_PROFILE.  Function
++     END_AUTO_PROFILE will free them at the end of compilation.  */
++  afdo_string_table = string_table_afdo;
++  afdo_source_profile = source_profile_afdo;
++  return true;
++}
++
++/* Helper functions.  */
++
++gcov_type
++extend_auto_profile::get_loc_count (location_t loc, event_type type)
++{
++  event_loc_count_map::iterator event_iter = event_loc_map.find (type);
++  if (event_iter != event_loc_map.end ())
++    {
++      loc_count_map::iterator loc_iter = event_iter->second.find (loc);
++      if (loc_iter != event_iter->second.end ())
++	{
++	  return loc_iter->second;
++	}
++    }
++  return 0;
++}
++
++struct rank_info
++extend_auto_profile::get_func_rank (unsigned decl_uid, enum event_type type)
++{
++  struct rank_info info = {0, 0};
++  event_rank_map::iterator event_iter = func_rank.find (type);
++  if (event_iter != func_rank.end ())
++    {
++      rank_map::iterator func_iter = event_iter->second.find (decl_uid);
++      if (func_iter != event_iter->second.end ())
++	{
++	  info.rank = func_iter->second;
++	  info.total = event_iter->second.size ();
++	}
++    }
++  return info;
++}
++
++gcov_type
++extend_auto_profile::get_func_count (unsigned decl_uid, event_type type)
++{
++  event_func_count_map::iterator event_iter = event_func_map.find (type);
++  if (event_iter != event_func_map.end ())
++    {
++      func_count_map::iterator func_iter = event_iter->second.find (decl_uid);
++      if (func_iter != event_iter->second.end ())
++	{
++	  return func_iter->second;
++	}
++    }
++  return 0;
++}
++
++gcov_type
++extend_auto_profile::get_topn_function_total_count_thres () const
++{
++  return topn_function_total_count_thres;
++}
++
++static extend_auto_profile *extend_profile;
++
+ /* Helper functions.  */
+ 
+ /* Return the original name of NAME: strip the suffix that starts
+@@ -483,7 +737,7 @@ string_table::get_index (const char *name) const
+   return iter->second;
+ }
+ 
+-/* Return the index of a given function DECL. Return -1 if DECL is not 
++/* Return the index of a given function DECL. Return -1 if DECL is not
+    found in string table.  */
+ 
+ int
+@@ -917,6 +1171,31 @@ autofdo_source_profile::get_function_instance_by_inline_stack (
+   return s;
+ }
+ 
++/* Compute total count threshold of top functions in sampled data.  */
++
++gcov_type
++autofdo_source_profile::calc_topn_function_total_count_thres (
++    unsigned topn) const
++{
++  std::set func_counts;
++  for (name_function_instance_map::const_iterator iter = map_.begin ();
++       iter != map_.end (); ++iter)
++    {
++      if (func_counts.size () < topn)
++        func_counts.insert (iter->second->total_count ());
++      else if (*func_counts.begin () < iter->second->total_count ())
++        {
++          func_counts.erase (func_counts.begin ());
++          func_counts.insert (iter->second->total_count ());
++        }
++    }
++ 
++  gcov_type func_counts_topn = *func_counts.begin ();
++  if (func_counts.size () == topn
++      && param_llc_allocate_func_counts_threshold < func_counts_topn)
++    return func_counts_topn;
++}
++
+ /* Module profile is only used by LIPO. Here we simply ignore it.  */
+ 
+ static void
+@@ -1842,6 +2121,132 @@ auto_profile (void)
+ 
+   return TODO_rebuild_cgraph_edges;
+ }
++
++
++void
++extend_auto_profile::rank_all_func ()
++{
++  std::vector > func_sorted;
++  event_func_count_map::iterator event_iter
++				 = event_func_map.find (profile_type);
++  if (event_iter != event_func_map.end ())
++    {
++      func_count_map::iterator func_iter;
++      for (func_iter = event_iter->second.begin ();
++	   func_iter != event_iter->second.end (); func_iter++)
++	{
++	  func_sorted.push_back (std::make_pair (func_iter->first,
++						 func_iter->second));
++	}
++
++      std::sort (func_sorted.begin (), func_sorted.end (), event_count_cmp);
++
++      for (unsigned i = 0; i < func_sorted.size (); ++i)
++	{
++	  func_rank[profile_type][func_sorted[i].first] = i + 1;
++	}
++    }
++}
++
++/* Iterate stmts in cfun and maintain its count to EVENT_LOC_MAP.  */
++
++void
++extend_auto_profile::set_loc_count ()
++{
++  basic_block bb;
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      gimple_stmt_iterator gsi;
++      for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
++	{
++	  count_info info;
++	  gimple *stmt = gsi_stmt (gsi);
++	  if (gimple_clobber_p (stmt) || is_gimple_debug (stmt))
++	    {
++	      continue;
++	    }
++	  if (afdo_source_profile->get_count_info (stmt, &info))
++	    {
++	      location_t loc = gimple_location (stmt);
++	      event_loc_map[profile_type][loc] += info.count;
++	      if (dump_file && (dump_flags & TDF_DETAILS))
++		{
++		  fprintf (dump_file, "stmt ");
++		  print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
++		  fprintf (dump_file, "counts %ld\n",
++			   event_loc_map[profile_type][loc]);
++		}
++	    }
++	}
++    }
++}
++
++/* Process data in extend_auto_source_profile, save them into two maps.
++   1. gimple_location to count.
++   2. function_index to count.  */
++void
++extend_auto_profile::process_extend_source_profile ()
++{
++  struct cgraph_node *node;
++  if (symtab->state == FINISHED)
++    {
++      return;
++    }
++  FOR_EACH_FUNCTION (node)
++    {
++      if (!gimple_has_body_p (node->decl) || node->inlined_to)
++	{
++	  continue;
++	}
++
++      /* Don't profile functions produced for builtin stuff.  */
++      if (DECL_SOURCE_LOCATION (node->decl) == BUILTINS_LOCATION)
++	{
++	  continue;
++	}
++
++      function *fn = DECL_STRUCT_FUNCTION (node->decl);
++      push_cfun (fn);
++
++      const function_instance *s
++      = afdo_source_profile->get_function_instance_by_decl (
++	  current_function_decl);
++
++      if (s == NULL)
++	{
++	  pop_cfun ();
++	  continue;
++	}
++      unsigned int decl_uid = DECL_UID (current_function_decl);
++      gcov_type count = s->total_count ();
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Extend auto-profile for function %s.\n",
++			       node->dump_name ());
++	}
++      event_func_map[profile_type][decl_uid] += count;
++      set_loc_count ();
++      pop_cfun ();
++    }
++  rank_all_func ();
++}
++
++/* Main entry of extend_auto_profile.  */
++
++static void
++extend_source_profile ()
++{
++  extend_profile = autofdo::extend_auto_profile::create ();
++  if (dump_file)
++    {
++      if (extend_profile == NULL)
++	{
++	  fprintf (dump_file, "No profile file is found.\n");
++	  return;
++	}
++      fprintf (dump_file, "Extend profile info generated.\n");
++    }
++}
+ } /* namespace autofdo.  */
+ 
+ /* Read the profile from the profile data file.  */
+@@ -1870,6 +2275,48 @@ end_auto_profile (void)
+   profile_info = NULL;
+ }
+ 
++/* Extern function to get profile info in other passes.  */
++
++bool
++profile_exist (enum event_type type)
++{
++  return autofdo::extend_profile != NULL
++	 && autofdo::extend_profile->auto_profile_exist (type);
++}
++
++gcov_type
++event_get_loc_count (location_t loc, event_type type)
++{
++  return autofdo::extend_profile->get_loc_count (loc, type);
++}
++
++gcov_type
++event_get_func_count (unsigned decl_uid, event_type type)
++{
++  return autofdo::extend_profile->get_func_count (decl_uid, type);
++}
++
++struct rank_info
++event_get_func_rank (unsigned decl_uid, enum event_type type)
++{
++  return autofdo::extend_profile->get_func_rank (decl_uid, type);
++}
++
++gcov_type
++event_get_topn_function_total_count_thres ()
++{
++  return autofdo::extend_profile->get_topn_function_total_count_thres ();
++}
++
++void
++free_extend_profile_info ()
++{
++  if (autofdo::extend_profile != NULL)
++    {
++      delete autofdo::extend_profile;
++    }
++}
++
+ /* Returns TRUE if EDGE is hot enough to be inlined early.  */
+ 
+ bool
+@@ -1931,8 +2378,50 @@ public:
+ 
+ } // anon namespace
+ 
++namespace
++{
++const pass_data pass_data_ipa_extend_auto_profile =
++{
++  SIMPLE_IPA_PASS, /* type */
++  "ex-afdo", /* name */
++  OPTGROUP_NONE, /* optinfo_flags */
++  TV_IPA_EXTEND_AUTO_PROFILE, /* tv_id */
++  0, /* properties_required */
++  0, /* properties_provided */
++  0, /* properties_destroyed */
++  0, /* todo_flags_start */
++  0, /* todo_flags_finish */
++};
++
++class pass_ipa_extend_auto_profile : public simple_ipa_opt_pass
++{
++public:
++  pass_ipa_extend_auto_profile (gcc::context *ctxt)
++    : simple_ipa_opt_pass (pass_data_ipa_extend_auto_profile, ctxt)
++  {}
++
++  /* opt_pass methods: */
++  virtual bool gate (function *) {return (flag_ipa_extend_auto_profile > 0);}
++  virtual unsigned int execute (function *);
++
++};
++
++unsigned int
++pass_ipa_extend_auto_profile::execute (function *fun)
++{
++  autofdo::extend_source_profile ();
++  return 0;
++}
++} // anon namespace
++
+ simple_ipa_opt_pass *
+ make_pass_ipa_auto_profile (gcc::context *ctxt)
+ {
+   return new pass_ipa_auto_profile (ctxt);
+ }
++
++simple_ipa_opt_pass *
++make_pass_ipa_extend_auto_profile (gcc::context *ctxt)
++{
++  return new pass_ipa_extend_auto_profile (ctxt);
++}
+diff --git a/gcc/auto-profile.h b/gcc/auto-profile.h
+index bf3f90f2f..dea0b18e6 100644
+--- a/gcc/auto-profile.h
++++ b/gcc/auto-profile.h
+@@ -21,6 +21,14 @@ along with GCC; see the file COPYING3.  If not see
+ #ifndef AUTO_PROFILE_H
+ #define AUTO_PROFILE_H
+ 
++enum event_type
++{
++  INST_EXEC = 0,
++  CACHE_MISSES,
++  PMU_EVENT,
++  EVENT_NUMBER
++};
++
+ /* Read, process, finalize AutoFDO data structures.  */
+ extern void read_autofdo_file (void);
+ extern void end_auto_profile (void);
+@@ -28,4 +36,26 @@ extern void end_auto_profile (void);
+ /* Returns TRUE if EDGE is hot enough to be inlined early.  */
+ extern bool afdo_callsite_hot_enough_for_early_inline (struct cgraph_edge *);
+ 
++/* Chcek if profile exists before using this profile.  */
++extern bool profile_exist (enum event_type);
++
++/* Given func decl_uid or gimple location and event_type, return count.
++   Count is 0 if function or gimple is not sampled.  */
++extern gcov_type event_get_func_count (unsigned, enum event_type);
++extern gcov_type event_get_loc_count (location_t, enum event_type);
++extern gcov_type event_get_topn_function_total_count_thres ();
++
++struct rank_info
++{
++  unsigned total;
++  unsigned rank;
++};
++
++/* Given function decl_uid and event type, return rank_info.  Rank_info
++   is {0, 0} if function was not sampled.  */
++extern struct rank_info event_get_func_rank (unsigned, enum event_type);
++
++/* Free memory allocated by autofdo::extern_profile.  */
++extern void free_extend_profile_info ();
++
+ #endif /* AUTO_PROFILE_H */
+diff --git a/gcc/builtins.cc b/gcc/builtins.cc
+index 57929a42b..dc2e9c3f3 100644
+--- a/gcc/builtins.cc
++++ b/gcc/builtins.cc
+@@ -1352,6 +1352,85 @@ expand_builtin_prefetch (tree exp)
+     emit_insn (op0);
+ }
+ 
++/* Expand a call to __builtin_prefetch_full.  */
++
++static void
++expand_builtin_prefetch_full (tree exp)
++{
++  tree arg0, arg1, arg2;
++  int nargs;
++  rtx op0, op1, op2;
++
++  if (!validate_arglist (exp, POINTER_TYPE, 0))
++    return;
++
++  arg0 = CALL_EXPR_ARG (exp, 0);
++
++  /* Arguments 1 and 2 are optional; argument 1 (read/write) defaults to
++     zero (read) and argument 2 (locality) defaults to 3 (high degree of
++     locality).  */
++  nargs = call_expr_nargs (exp);
++  if (nargs > 1)
++    arg1 = CALL_EXPR_ARG (exp, 1);
++  else
++    arg1 = integer_zero_node;
++  if (nargs > 2)
++    arg2 = CALL_EXPR_ARG (exp, 2);
++  else
++    arg2 = integer_three_node;
++
++  /* Argument 0 is an address.  */
++  op0 = expand_expr (arg0, NULL_RTX, Pmode, EXPAND_NORMAL);
++
++  /* Argument 1 (read/write flag) must be a compile-time constant int.  */
++  if (TREE_CODE (arg1) != INTEGER_CST)
++    {
++      error ("second argument to %<__builtin_prefetch_full%> must be a "
++             "constant");
++      arg1 = integer_zero_node;
++    }
++  op1 = expand_normal (arg1);
++  /* Argument 1 must be either zero or one.  */
++  if (INTVAL (op1) != 0 && INTVAL (op1) != 1)
++    {
++      warning (0, "invalid second argument to %<__builtin_prefetch_full%>;"
++	       " using zero");
++      op1 = const0_rtx;
++    }
++
++  /* Argument 2 (locality) must be a compile-time constant int.  */
++  if (TREE_CODE (arg2) != INTEGER_CST)
++    {
++      error ("third argument to %<__builtin_prefetch_full%> must be a "
++             "constant");
++      arg2 = integer_zero_node;
++    }
++  op2 = expand_normal (arg2);
++  /* Argument 2 must be 0-7.  */
++  if (INTVAL (op2) < 0 || INTVAL (op2) > 7)
++    {
++      warning (0, "invalid third argument to %<__builtin_prefetch_full%>; "
++               "using zero");
++      op2 = const0_rtx;
++    }
++
++  if (targetm.have_prefetch_full ())
++    {
++      class expand_operand ops[3];
++
++      create_address_operand (&ops[0], op0);
++      create_integer_operand (&ops[1], INTVAL (op1));
++      create_integer_operand (&ops[2], INTVAL (op2));
++      if (maybe_expand_insn (targetm.code_for_prefetch_full, 3, ops))
++	return;
++    }
++
++  /* Don't do anything with direct references to volatile memory, but
++     generate code to handle other side effects.  */
++  if (!MEM_P (op0) && side_effects_p (op0))
++    emit_insn (op0);
++}
++
+ /* Get a MEM rtx for expression EXP which is the address of an operand
+    to be used in a string instruction (cmpstrsi, cpymemsi, ..).  LEN is
+    the maximum length of the block of memory that might be accessed or
+@@ -7598,6 +7677,9 @@ expand_builtin (tree exp, rtx target, rtx subtarget, machine_mode mode,
+     case BUILT_IN_PREFETCH:
+       expand_builtin_prefetch (exp);
+       return const0_rtx;
++    case BUILT_IN_PREFETCH_FULL:
++      expand_builtin_prefetch_full (exp);
++      return const0_rtx;
+ 
+     case BUILT_IN_INIT_TRAMPOLINE:
+       return expand_builtin_init_trampoline (exp, true);
+diff --git a/gcc/builtins.def b/gcc/builtins.def
+index 005976f34..f2e0c357d 100644
+--- a/gcc/builtins.def
++++ b/gcc/builtins.def
+@@ -924,6 +924,7 @@ DEF_GCC_BUILTIN        (BUILT_IN_POPCOUNTL, "popcountl", BT_FN_INT_ULONG, ATTR_C
+ DEF_GCC_BUILTIN        (BUILT_IN_POPCOUNTLL, "popcountll", BT_FN_INT_ULONGLONG, ATTR_CONST_NOTHROW_LEAF_LIST)
+ DEF_EXT_LIB_BUILTIN    (BUILT_IN_POSIX_MEMALIGN, "posix_memalign", BT_FN_INT_PTRPTR_SIZE_SIZE, ATTR_NOTHROW_NONNULL_LEAF)
+ DEF_GCC_BUILTIN        (BUILT_IN_PREFETCH, "prefetch", BT_FN_VOID_CONST_PTR_VAR, ATTR_NOVOPS_LEAF_LIST)
++DEF_GCC_BUILTIN        (BUILT_IN_PREFETCH_FULL, "prefetch_full", BT_FN_VOID_CONST_PTR_VAR, ATTR_NOVOPS_LEAF_LIST)
+ DEF_LIB_BUILTIN        (BUILT_IN_REALLOC, "realloc", BT_FN_PTR_PTR_SIZE, ATTR_ALLOC_WARN_UNUSED_RESULT_SIZE_2_NOTHROW_LEAF_LIST)
+ DEF_GCC_BUILTIN        (BUILT_IN_RETURN, "return", BT_FN_VOID_PTR, ATTR_NORETURN_NOTHROW_LEAF_LIST)
+ DEF_GCC_BUILTIN        (BUILT_IN_RETURN_ADDRESS, "return_address", BT_FN_PTR_UINT, ATTR_LEAF_LIST)
+diff --git a/gcc/cfgloop.h b/gcc/cfgloop.h
+index d2714e20c..794bc3ecc 100644
+--- a/gcc/cfgloop.h
++++ b/gcc/cfgloop.h
+@@ -272,6 +272,9 @@ public:
+      the basic-block from being collected but its index can still be
+      reused.  */
+   basic_block former_header;
++
++  /* Number of latch executions from vectorization.  */
++  tree vec_nb_iterations;
+ };
+ 
+ /* Set if the loop is known to be infinite.  */
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 6ab7ba4cc..e6ffa1c58 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -1148,6 +1148,26 @@ Common Joined RejectNegative Var(auto_profile_file)
+ Use sample profile information for call graph node weights. The profile
+ file is specified in the argument.
+ 
++fcache-misses-profile
++Common Var(flag_cache_misses_profile)
++Use sample profile information for source code cache miss count.  The default
++profile file is cmsdata.gcov in `pwd`.
++
++fcache-misses-profile=
++Common Joined RejectNegative Var(cache_misses_profile_file)
++Use sample profile information for source code cache miss count.  The profile
++file is specified in the argument.
++
++fadditional-profile
++Common Var(flag_additional_profile)
++Use additional PMU-event sample profile information for source code bb count.
++The default profile file is addldata.gcov in `pwd`.
++
++fadditional-profile=
++Common Joined RejectNegative Var(additional_profile_file)
++Use additional PMU-event sample profile information for source code bb count.
++The profile file is specified in the argument.
++
+ ; -fcheck-bounds causes gcc to generate array bounds checks.
+ ; For C, C++ and ObjC: defaults off.
+ ; For Java: defaults to on.
+@@ -2074,6 +2094,10 @@ fipa-struct-sfc-shadow
+ Common Var(flag_ipa_struct_sfc_shadow) Init(0) Optimization
+ Enable field shadowing optimization in static struct field compression.
+ 
++fipa-extend-auto-profile
++Common Var(flag_ipa_extend_auto_profile)
++Use sample profile information for source code.
++
+ fipa-vrp
+ Common Var(flag_ipa_vrp) Optimization
+ Perform IPA Value Range Propagation.
+@@ -2424,6 +2448,10 @@ fipa-prefetch
+ Common Var(flag_ipa_prefetch) Init(0) Optimization
+ Generate prefetch instructions, if available, using IPA info.
+ 
++fllc-allocate
++Common Var(flag_llc_allocate) Init(-1) Optimization
++Generate LLC hint instructions.
++
+ fprofile
+ Common Var(profile_flag)
+ Enable basic program profiling code.
+diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
+index cbb844fbc..af0881f7a 100644
+--- a/gcc/config/aarch64/aarch64-protos.h
++++ b/gcc/config/aarch64/aarch64-protos.h
+@@ -702,12 +702,16 @@ extern struct tune_params aarch64_tune_params;
+   T (PLDL2STRM, pldl2strm, 3) \
+   T (PLDL3KEEP, pldl3keep, 4) \
+   T (PLDL3STRM, pldl3strm, 5) \
++  T (PLDL4KEEP, pldl4keep, 6) \
++  T (PLDL4STRM, pldl4strm, 7) \
+   T (PSTL1KEEP, pstl1keep, 8) \
+   T (PSTL1STRM, pstl1strm, 9) \
+   T (PSTL2KEEP, pstl2keep, 10) \
+   T (PSTL2STRM, pstl2strm, 11) \
+   T (PSTL3KEEP, pstl3keep, 12) \
+-  T (PSTL3STRM, pstl3strm, 13)
++  T (PSTL3STRM, pstl3strm, 13) \
++  T (PSTL4KEEP, pstl4keep, 14) \
++  T (PSTL4STRM, pstl4strm, 15)
+ 
+ #define AARCH64_SVENUM(UPPER, LOWER, VALUE) AARCH64_SV_##UPPER = VALUE,
+ enum aarch64_svpattern {
+diff --git a/gcc/config/aarch64/aarch64-sve.md b/gcc/config/aarch64/aarch64-sve.md
+index a8a5dc3a2..7808abf70 100644
+--- a/gcc/config/aarch64/aarch64-sve.md
++++ b/gcc/config/aarch64/aarch64-sve.md
+@@ -1952,7 +1952,7 @@
+ (define_insn "@aarch64_sve_prefetch"
+   [(prefetch (unspec:DI
+ 	       [(match_operand: 0 "register_operand" "Upl")
+-		(match_operand:SVE_FULL_I 1 "aarch64_sve_prefetch_operand" "UP")
++		(match_operand:SVE_FULL 1 "aarch64_sve_prefetch_operand" "UP")
+ 		(match_operand:DI 2 "const_int_operand")]
+ 	       UNSPEC_SVE_PREFETCH)
+ 	     (match_operand:DI 3 "const_int_operand")
+@@ -1985,14 +1985,14 @@
+ ;; 6: the prefetch operator (an svprfop)
+ ;; 7: the normal RTL prefetch rw flag
+ ;; 8: the normal RTL prefetch locality value
+-(define_insn "@aarch64_sve_gather_prefetch"
++(define_insn "@aarch64_sve_gather_prefetch"
+   [(prefetch (unspec:DI
+ 	       [(match_operand:VNx4BI 0 "register_operand" "Upl, Upl, Upl, Upl, Upl, Upl")
+-		(match_operand:DI 1 "aarch64_sve_gather_offset_" "Z, vg, rk, rk, rk, rk")
++		(match_operand:DI 1 "aarch64_sve_gather_offset_" "Z, vg, rk, rk, rk, rk")
+ 		(match_operand:VNx4SI_ONLY 2 "register_operand" "w, w, w, w, w, w")
+ 		(match_operand:DI 3 "const_int_operand" "i, i, Z, Ui1, Z, Ui1")
+-		(match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, Ui1, Ui1, Ui1, i, i")
+-		(match_operand:SVE_FULL_I 5 "aarch64_simd_imm_zero")
++		(match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, Ui1, Ui1, Ui1, i, i")
++		(match_operand:SVE_FULL 5 "aarch64_simd_imm_zero")
+ 		(match_operand:DI 6 "const_int_operand")]
+ 	       UNSPEC_SVE_PREFETCH_GATHER)
+ 	     (match_operand:DI 7 "const_int_operand")
+@@ -2000,12 +2000,12 @@
+   "TARGET_SVE && TARGET_NON_STREAMING"
+   {
+     static const char *const insns[][2] = {
+-      "prf", "%0, [%2.s]",
+-      "prf", "%0, [%2.s, #%1]",
++      "prf", "%0, [%2.s]",
++      "prf", "%0, [%2.s, #%1]",
+       "prfb", "%0, [%1, %2.s, sxtw]",
+       "prfb", "%0, [%1, %2.s, uxtw]",
+-      "prf", "%0, [%1, %2.s, sxtw %p4]",
+-      "prf", "%0, [%1, %2.s, uxtw %p4]"
++      "prf", "%0, [%1, %2.s, sxtw %p4]",
++      "prf", "%0, [%1, %2.s, uxtw %p4]"
+     };
+     const char *const *parts = insns[which_alternative];
+     return aarch64_output_sve_prefetch (parts[0], operands[6], parts[1]);
+@@ -2014,14 +2014,14 @@
+ 
+ ;; Predicated gather prefetches for 64-bit elements.  The value of operand 3
+ ;; doesn't matter in this case.
+-(define_insn "@aarch64_sve_gather_prefetch"
++(define_insn "@aarch64_sve_gather_prefetch"
+   [(prefetch (unspec:DI
+ 	       [(match_operand:VNx2BI 0 "register_operand" "Upl, Upl, Upl, Upl")
+-		(match_operand:DI 1 "aarch64_sve_gather_offset_" "Z, vg, rk, rk")
++		(match_operand:DI 1 "aarch64_sve_gather_offset_" "Z, vg, rk, rk")
+ 		(match_operand:VNx2DI_ONLY 2 "register_operand" "w, w, w, w")
+ 		(match_operand:DI 3 "const_int_operand")
+-		(match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, Ui1, Ui1, i")
+-		(match_operand:SVE_FULL_I 5 "aarch64_simd_imm_zero")
++		(match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, Ui1, Ui1, i")
++		(match_operand:SVE_FULL 5 "aarch64_simd_imm_zero")
+ 		(match_operand:DI 6 "const_int_operand")]
+ 	       UNSPEC_SVE_PREFETCH_GATHER)
+ 	     (match_operand:DI 7 "const_int_operand")
+@@ -2029,10 +2029,10 @@
+   "TARGET_SVE && TARGET_NON_STREAMING"
+   {
+     static const char *const insns[][2] = {
+-      "prf", "%0, [%2.d]",
+-      "prf", "%0, [%2.d, #%1]",
++      "prf", "%0, [%2.d]",
++      "prf", "%0, [%2.d, #%1]",
+       "prfb", "%0, [%1, %2.d]",
+-      "prf", "%0, [%1, %2.d, lsl %p4]"
++      "prf", "%0, [%1, %2.d, lsl %p4]"
+     };
+     const char *const *parts = insns[which_alternative];
+     return aarch64_output_sve_prefetch (parts[0], operands[6], parts[1]);
+@@ -2040,7 +2040,7 @@
+ )
+ 
+ ;; Likewise, but with the offset being sign-extended from 32 bits.
+-(define_insn_and_rewrite "*aarch64_sve_gather_prefetch_sxtw"
++(define_insn_and_rewrite "*aarch64_sve_gather_prefetch_sxtw"
+   [(prefetch (unspec:DI
+ 	       [(match_operand:VNx2BI 0 "register_operand" "Upl, Upl")
+ 		(match_operand:DI 1 "register_operand" "rk, rk")
+@@ -2051,8 +2051,8 @@
+ 		       (match_operand:VNx2DI 2 "register_operand" "w, w")))]
+ 		  UNSPEC_PRED_X)
+ 		(match_operand:DI 3 "const_int_operand")
+-		(match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i")
+-		(match_operand:SVE_FULL_I 5 "aarch64_simd_imm_zero")
++		(match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i")
++		(match_operand:SVE_FULL 5 "aarch64_simd_imm_zero")
+ 		(match_operand:DI 6 "const_int_operand")]
+ 	       UNSPEC_SVE_PREFETCH_GATHER)
+ 	     (match_operand:DI 7 "const_int_operand")
+@@ -2061,7 +2061,7 @@
+   {
+     static const char *const insns[][2] = {
+       "prfb", "%0, [%1, %2.d, sxtw]",
+-      "prf", "%0, [%1, %2.d, sxtw %p4]"
++      "prf", "%0, [%1, %2.d, sxtw %p4]"
+     };
+     const char *const *parts = insns[which_alternative];
+     return aarch64_output_sve_prefetch (parts[0], operands[6], parts[1]);
+@@ -2073,7 +2073,7 @@
+ )
+ 
+ ;; Likewise, but with the offset being zero-extended from 32 bits.
+-(define_insn "*aarch64_sve_gather_prefetch_uxtw"
++(define_insn "*aarch64_sve_gather_prefetch_uxtw"
+   [(prefetch (unspec:DI
+ 	       [(match_operand:VNx2BI 0 "register_operand" "Upl, Upl")
+ 		(match_operand:DI 1 "register_operand" "rk, rk")
+@@ -2081,8 +2081,8 @@
+ 		  (match_operand:VNx2DI 2 "register_operand" "w, w")
+ 		  (match_operand:VNx2DI 9 "aarch64_sve_uxtw_immediate"))
+ 		(match_operand:DI 3 "const_int_operand")
+-		(match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i")
+-		(match_operand:SVE_FULL_I 5 "aarch64_simd_imm_zero")
++		(match_operand:DI 4 "aarch64_gather_scale_operand_" "Ui1, i")
++		(match_operand:SVE_FULL 5 "aarch64_simd_imm_zero")
+ 		(match_operand:DI 6 "const_int_operand")]
+ 	       UNSPEC_SVE_PREFETCH_GATHER)
+ 	     (match_operand:DI 7 "const_int_operand")
+@@ -2091,7 +2091,7 @@
+   {
+     static const char *const insns[][2] = {
+       "prfb", "%0, [%1, %2.d, uxtw]",
+-      "prf", "%0, [%1, %2.d, uxtw %p4]"
++      "prf", "%0, [%1, %2.d, uxtw %p4]"
+     };
+     const char *const *parts = insns[which_alternative];
+     return aarch64_output_sve_prefetch (parts[0], operands[6], parts[1]);
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index e9c387b24..a06c2c515 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -4408,6 +4408,13 @@ aarch64_sve_data_mode_p (machine_mode mode)
+   return aarch64_classify_vector_mode (mode) & VEC_SVE_DATA;
+ }
+ 
++/* Return true if MODE is an full SVE data vector mode.  */
++static bool
++aarch64_full_sve_data_mode_p (machine_mode mode)
++{
++  return aarch64_classify_vector_mode (mode) == VEC_SVE_DATA;
++}
++
+ /* Return the number of defined bytes in one constituent vector of
+    SVE mode MODE, which has vector flags VEC_FLAGS.  */
+ static poly_int64
+@@ -31796,6 +31803,17 @@ aarch64_libgcc_floating_mode_supported_p
+ #undef TARGET_ASM_FUNCTION_EPILOGUE
+ #define TARGET_ASM_FUNCTION_EPILOGUE aarch64_sls_emit_blr_function_thunks
+ 
++#undef TARGET_VECTORIZE_CODE_FOR_PREFETCH
++#define TARGET_VECTORIZE_CODE_FOR_PREFETCH code_for_aarch64_sve_prefetch
++
++#undef TARGET_VECTORIZE_CODE_FOR_GATHER_PREFETCH
++#define TARGET_VECTORIZE_CODE_FOR_GATHER_PREFETCH \
++  code_for_aarch64_sve_gather_prefetch
++
++#undef TARGET_VECTORIZE_PREFETCH_HANDLEABLE_MODE_P
++#define TARGET_VECTORIZE_PREFETCH_HANDLEABLE_MODE_P \
++  aarch64_full_sve_data_mode_p
++
+ #undef TARGET_HAVE_SHADOW_CALL_STACK
+ #define TARGET_HAVE_SHADOW_CALL_STACK true
+ 
+diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
+index 2f46bc793..69d296556 100644
+--- a/gcc/config/aarch64/aarch64.md
++++ b/gcc/config/aarch64/aarch64.md
+@@ -925,6 +925,45 @@
+   [(set_attr "type" "load_4")]
+ )
+ 
++(define_insn "prefetch_full"
++  [(prefetch_full (match_operand:DI 0 "aarch64_prefetch_operand" "Dp")
++            (match_operand:QI 1 "const_int_operand" "")
++            (match_operand:QI 2 "const_int_operand" ""))]
++  ""
++  {
++    const char * pftype[2][8] =
++    {
++      {"prfm\\tPLDL1KEEP, %0",
++       "prfm\\tPLDL1STRM, %0",
++       "prfm\\tPLDL2KEEP, %0",
++       "prfm\\tPLDL2STRM, %0",
++       "prfm\\tPLDL3KEEP, %0",
++       "prfm\\tPLDL3STRM, %0",
++       "prfm\\tPLDL4KEEP, %0",
++       "prfm\\tPLDL4STRM, %0"},
++      {"prfm\\tPSTL1KEEP, %0",
++       "prfm\\tPSTL1STRM, %0",
++       "prfm\\tPSTL2KEEP, %0",
++       "prfm\\tPSTL2STRM, %0",
++       "prfm\\tPSTL3KEEP, %0",
++       "prfm\\tPSTL3STRM, %0",
++       "prfm\\tPSTL4KEEP, %0",
++       "prfm\\tPSTL4STRM, %0"},
++    };
++
++    int prfop = INTVAL (operands[2]);
++
++    gcc_assert (IN_RANGE (prfop, 0, 7));
++
++    /* PRFM accepts the same addresses as a 64-bit LDR so wrap
++       the address into a DImode MEM so that aarch64_print_operand knows
++       how to print it.  */
++    operands[0] = gen_rtx_MEM (DImode, operands[0]);
++    return pftype[INTVAL (operands[1])][prfop];
++  }
++  [(set_attr "type" "load_4")]
++)
++
+ (define_insn "trap"
+   [(trap_if (const_int 1) (const_int 8))]
+   ""
+diff --git a/gcc/dce.cc b/gcc/dce.cc
+index 6676cbcd4..964a0a6d0 100644
+--- a/gcc/dce.cc
++++ b/gcc/dce.cc
+@@ -72,6 +72,7 @@ deletable_insn_p_1 (rtx body)
+   switch (GET_CODE (body))
+     {
+     case PREFETCH:
++    case PREFETCH_FULL:
+     case TRAP_IF:
+       /* The UNSPEC case was added here because the ia-64 claims that
+ 	 USEs do not work after reload and generates UNSPECS rather
+diff --git a/gcc/doc/tm.texi b/gcc/doc/tm.texi
+index 50bbbbc42..16ada7aae 100644
+--- a/gcc/doc/tm.texi
++++ b/gcc/doc/tm.texi
+@@ -6278,6 +6278,27 @@ The default is @code{NULL_TREE} which means to not vectorize scatter
+ stores.
+ @end deftypefn
+ 
++@deftypefn {Target Hook} insn_code TARGET_VECTORIZE_CODE_FOR_PREFETCH (machine_mode @var{arg})
++This hook should return the decl of a function that implements the
++vectorized variant of the function with the @code{combined_fn} code
++@var{code} or @code{NULL_TREE} if such a function is not available.
++The return type of the vectorized function shall be of vector type
++@var{vec_type_out} and the argument types should be @var{vec_type_in}.
++@end deftypefn
++
++@deftypefn {Target Hook} insn_code TARGET_VECTORIZE_CODE_FOR_GATHER_PREFETCH (machine_mode @var{mode_to}, machine_mode @var{mode_form})
++This hook should return the decl of a function that implements the
++vectorized variant of the function with the @code{combined_fn} code
++@var{code} or @code{NULL_TREE} if such a function is not available.
++The return type of the vectorized function shall be of vector type
++@var{vec_type_out} and the argument types should be @var{vec_type_in}.
++@end deftypefn
++
++@deftypefn {Target Hook} bool TARGET_VECTORIZE_PREFETCH_HANDLEABLE_MODE_P (machine_mode @var{arg})
++This hook should return true if the target hardware architecture
++supports a full SVE data vector mode.
++@end deftypefn
++
+ @deftypefn {Target Hook} int TARGET_SIMD_CLONE_COMPUTE_VECSIZE_AND_SIMDLEN (struct cgraph_node *@var{}, struct cgraph_simd_clone *@var{}, @var{tree}, @var{int})
+ This hook should set @var{vecsize_mangle}, @var{vecsize_int}, @var{vecsize_float}
+ fields in @var{simd_clone} structure pointed by @var{clone_info} argument and also
+diff --git a/gcc/doc/tm.texi.in b/gcc/doc/tm.texi.in
+index cfda60304..88db8752e 100644
+--- a/gcc/doc/tm.texi.in
++++ b/gcc/doc/tm.texi.in
+@@ -4190,6 +4190,12 @@ address;  but often a machine-dependent strategy can generate better code.
+ 
+ @hook TARGET_VECTORIZE_BUILTIN_SCATTER
+ 
++@hook TARGET_VECTORIZE_CODE_FOR_PREFETCH
++
++@hook TARGET_VECTORIZE_CODE_FOR_GATHER_PREFETCH
++
++@hook TARGET_VECTORIZE_PREFETCH_HANDLEABLE_MODE_P
++
+ @hook TARGET_SIMD_CLONE_COMPUTE_VECSIZE_AND_SIMDLEN
+ 
+ @hook TARGET_SIMD_CLONE_ADJUST
+diff --git a/gcc/internal-fn.cc b/gcc/internal-fn.cc
+index 8b1733e20..19811106f 100644
+--- a/gcc/internal-fn.cc
++++ b/gcc/internal-fn.cc
+@@ -107,11 +107,13 @@ init_internal_fns ()
+    direct_internal_fn.  */
+ #define not_direct { -2, -2, false }
+ #define mask_load_direct { -1, 2, false }
++#define mask_prefetch_direct { -1, 2, false }
+ #define load_lanes_direct { -1, -1, false }
+ #define mask_load_lanes_direct { -1, -1, false }
+ #define gather_load_direct { 3, 1, false }
+ #define len_load_direct { -1, -1, false }
+ #define mask_store_direct { 3, 2, false }
++#define gather_prefetch_direct { 3, 1, false }
+ #define store_lanes_direct { 0, 0, false }
+ #define mask_store_lanes_direct { 0, 0, false }
+ #define vec_cond_mask_direct { 1, 0, false }
+@@ -2745,6 +2747,53 @@ expand_partial_load_optab_fn (internal_fn, gcall *stmt, convert_optab optab)
+ #define expand_mask_load_lanes_optab_fn expand_mask_load_optab_fn
+ #define expand_len_load_optab_fn expand_partial_load_optab_fn
+ 
++/* Expand MASK_PREFETCH call STMT using optab OPTAB.
++   .MASK_STORE (_5, 64B, loop_mask_98, vect__8.10_102);
++   .MASK_PREFETCH (_68, 64B, loop_mask_98, vect__8.10_102, 4);
++*/
++
++static void
++expand_mask_prefetch_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
++{
++  if (targetm.vectorize.code_for_prefetch == NULL
++      || targetm.vectorize.prefetch_handleable_mode_p == NULL)
++    return;
++
++  tree base = gimple_call_arg (stmt, 0);
++  if (base == NULL_TREE)
++    return;
++
++  tree maskt = gimple_call_arg (stmt, 2);
++  tree target = gimple_call_arg (stmt, 3);
++  tree prfop = gimple_call_arg (stmt, 4);
++  HOST_WIDE_INT prfop_int = tree_to_uhwi (prfop);
++  /* Bit 3 of the prfop selects stores over loads.  */
++  HOST_WIDE_INT access = prfop_int & 8;
++  /* Bits 1 and 2 specify the locality; 0-based for svprfop but
++     1-based for PREFETCH.  */
++  HOST_WIDE_INT locality = ((prfop_int >> 1) & 3) + 1;
++
++  machine_mode m_mode = TYPE_MODE (TREE_TYPE (target));
++  if (!targetm.vectorize.prefetch_handleable_mode_p (m_mode))
++    return;
++  insn_code icode = targetm.vectorize.code_for_prefetch (m_mode);
++
++  rtx mask = expand_normal (maskt);
++  rtx base_rtx = expand_normal (base);
++  /* Convert ptr_mode value X to Pmode.  */
++  if (ptr_mode == SImode)
++    base_rtx = simplify_gen_unary (ZERO_EXTEND, DImode, base_rtx, SImode);
++
++  unsigned i = 0;
++  class expand_operand ops[5];
++  create_input_operand (&ops[i++], mask, TYPE_MODE (TREE_TYPE (maskt)));
++  create_address_operand (&ops[i++], base_rtx);
++  create_integer_operand (&ops[i++], prfop_int);
++  create_integer_operand (&ops[i++], access);
++  create_integer_operand (&ops[i++], locality);
++  expand_insn (icode, i, ops);
++}
++
+ /* Expand MASK_STORE{,_LANES} or LEN_STORE call STMT using optab OPTAB.  */
+ 
+ static void
+@@ -3402,6 +3451,70 @@ contains_call_div_mod (rtx_insn *insn)
+   return false;
+  }
+ 
++/* Expand {MASK_,}GATHER_PREFETCH call CALL using optab OPTAB.
++  vect_patt_97.14_77 = .MASK_GATHER_LOAD (_78, vect__14.13_79, 8, { 0.0, ... }, loop_mask_87);
++  .MASK_GATHER_PREFETCH (_45, vect__14.13_79, 8, { 0.0, ... }, loop_mask_87, vect_patt_97.14_77, 4);
++*/
++
++static void
++expand_gather_prefetch_optab_fn (internal_fn, gcall *stmt, direct_optab optab)
++{
++  if (targetm.vectorize.code_for_gather_prefetch == NULL
++      || targetm.vectorize.prefetch_handleable_mode_p == NULL)
++    return;
++
++  /* Extracting tree nodes, only expand for scalar base and vector index.  */
++  tree base = gimple_call_arg (stmt, 0);
++  if (VECTOR_TYPE_P (TREE_TYPE (base)))
++    return;
++  tree offset = gimple_call_arg (stmt, 1);
++  if (VECTOR_TYPE_P (TREE_TYPE (offset)) == false)
++    return;
++
++  tree scale = gimple_call_arg (stmt, 2);
++  tree mask = gimple_call_arg (stmt, 4);
++  tree target = gimple_call_arg (stmt, 5);
++  tree prfop = gimple_call_arg (stmt, 6);
++
++  /* Convert to the rtx node.  */
++  rtx base_rtx = expand_normal (base);
++  /* Convert ptr_mode value X to Pmode.  */
++  if (ptr_mode == SImode)
++    base_rtx = simplify_gen_unary (ZERO_EXTEND, DImode, base_rtx, SImode);
++  rtx offset_rtx = expand_normal (offset);
++  rtx const_rtx = CONST0_RTX (TYPE_MODE (TREE_TYPE (target)));
++  rtx mask_rtx = expand_normal (mask);
++  HOST_WIDE_INT scale_int = tree_to_shwi (scale);
++  HOST_WIDE_INT prfop_int = tree_to_uhwi (prfop);
++  /* Bit 3 of the prfop selects stores over loads.  */
++  HOST_WIDE_INT access = prfop_int & 8;
++  /* Bits 1 and 2 specify the locality; 0-based for svprfop but
++     1-based for PREFETCH.  */
++  HOST_WIDE_INT locality = ((prfop_int >> 1) & 3) + 1;
++
++  /* add operand.  */
++  unsigned int i = 0;
++  class expand_operand ops[9];
++  create_input_operand (&ops[i++], mask_rtx, TYPE_MODE (TREE_TYPE (mask)));
++  create_address_operand (&ops[i++], base_rtx);
++  create_input_operand (&ops[i++], offset_rtx, TYPE_MODE (TREE_TYPE (offset)));
++  /* Check whether the index has unsigned.  */
++  create_integer_operand (&ops[i++], TYPE_UNSIGNED (TREE_TYPE (offset)));
++  create_integer_operand (&ops[i++], scale_int);
++  create_input_operand (&ops[i++], const_rtx, GET_MODE (const_rtx));
++  create_integer_operand (&ops[i++], prfop_int);
++  create_integer_operand (&ops[i++], access);
++  create_integer_operand (&ops[i++], locality);
++
++  machine_mode reg_mode = GET_MODE (offset_rtx);
++  machine_mode m_mode = TYPE_MODE (TREE_TYPE (target));
++  if (!targetm.vectorize.prefetch_handleable_mode_p (m_mode))
++    return;
++  insn_code icode = targetm.vectorize.code_for_gather_prefetch
++					       (m_mode, reg_mode);
++  expand_insn (icode, i, ops);
++}
++
+ /* Expand DIVMOD() using:
+  a) optab handler for udivmod/sdivmod if it is available.
+  b) If optab_handler doesn't exist, generate call to
+@@ -3767,10 +3880,12 @@ multi_vector_optab_supported_p (convert_optab optab, tree_pair types,
+ #define direct_cond_binary_optab_supported_p direct_optab_supported_p
+ #define direct_cond_ternary_optab_supported_p direct_optab_supported_p
+ #define direct_mask_load_optab_supported_p convert_optab_supported_p
++#define direct_mask_prefetch_optab_supported_p direct_optab_supported_p
+ #define direct_load_lanes_optab_supported_p multi_vector_optab_supported_p
+ #define direct_mask_load_lanes_optab_supported_p multi_vector_optab_supported_p
+ #define direct_gather_load_optab_supported_p convert_optab_supported_p
+ #define direct_len_load_optab_supported_p direct_optab_supported_p
++#define direct_gather_prefetch_optab_supported_p direct_optab_supported_p
+ #define direct_mask_store_optab_supported_p convert_optab_supported_p
+ #define direct_store_lanes_optab_supported_p multi_vector_optab_supported_p
+ #define direct_mask_store_lanes_optab_supported_p multi_vector_optab_supported_p
+diff --git a/gcc/internal-fn.def b/gcc/internal-fn.def
+index d2d550d35..05fc50328 100644
+--- a/gcc/internal-fn.def
++++ b/gcc/internal-fn.def
+@@ -121,6 +121,8 @@ along with GCC; see the file COPYING3.  If not see
+ #endif
+ 
+ DEF_INTERNAL_OPTAB_FN (MASK_LOAD, ECF_PURE, maskload, mask_load)
++DEF_INTERNAL_OPTAB_FN (MASK_PREFETCH, ECF_NOVOPS | ECF_LEAF,
++		       maskprefetch, mask_prefetch)
+ DEF_INTERNAL_OPTAB_FN (LOAD_LANES, ECF_CONST, vec_load_lanes, load_lanes)
+ DEF_INTERNAL_OPTAB_FN (MASK_LOAD_LANES, ECF_PURE,
+ 		       vec_mask_load_lanes, mask_load_lanes)
+@@ -128,6 +130,8 @@ DEF_INTERNAL_OPTAB_FN (MASK_LOAD_LANES, ECF_PURE,
+ DEF_INTERNAL_OPTAB_FN (GATHER_LOAD, ECF_PURE, gather_load, gather_load)
+ DEF_INTERNAL_OPTAB_FN (MASK_GATHER_LOAD, ECF_PURE,
+ 		       mask_gather_load, gather_load)
++DEF_INTERNAL_OPTAB_FN (MASK_GATHER_PREFETCH, ECF_NOVOPS | ECF_LEAF,
++		       mask_gather_prefetch, gather_prefetch)
+ 
+ DEF_INTERNAL_OPTAB_FN (LEN_LOAD, ECF_PURE, len_load, len_load)
+ 
+diff --git a/gcc/ipa-pure-const.cc b/gcc/ipa-pure-const.cc
+index 2642df91e..222fe6465 100644
+--- a/gcc/ipa-pure-const.cc
++++ b/gcc/ipa-pure-const.cc
+@@ -534,6 +534,7 @@ builtin_safe_for_const_function_p (bool *looping, tree callee)
+ 	*looping = false;
+ 	return true;
+       case BUILT_IN_PREFETCH:
++      case BUILT_IN_PREFETCH_FULL:
+ 	*looping = true;
+ 	return true;
+       default:
+diff --git a/gcc/optabs.def b/gcc/optabs.def
+index dbf529434..8ca25a5cc 100644
+--- a/gcc/optabs.def
++++ b/gcc/optabs.def
+@@ -90,9 +90,11 @@ OPTAB_CD(vec_cmp_optab, "vec_cmp$a$b")
+ OPTAB_CD(vec_cmpu_optab, "vec_cmpu$a$b")
+ OPTAB_CD(vec_cmpeq_optab, "vec_cmpeq$a$b")
+ OPTAB_CD(maskload_optab, "maskload$a$b")
++OPTAB_CD(maskprefetch_optab, "maskprefetch$a$b")
+ OPTAB_CD(maskstore_optab, "maskstore$a$b")
+ OPTAB_CD(gather_load_optab, "gather_load$a$b")
+ OPTAB_CD(mask_gather_load_optab, "mask_gather_load$a$b")
++OPTAB_CD(mask_gather_prefetch_optab, "mask_gather_prefetch$a$b")
+ OPTAB_CD(scatter_store_optab, "scatter_store$a$b")
+ OPTAB_CD(mask_scatter_store_optab, "mask_scatter_store$a$b")
+ OPTAB_CD(vec_extract_optab, "vec_extract$a$b")
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 2433ace06..432b822e8 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -2108,6 +2108,13 @@ enable_fdo_optimizations (struct gcc_options *opts,
+   SET_OPTION_IF_UNSET (opts, opts_set, flag_tree_loop_distribution, value);
+ }
+ 
++static void
++set_cache_misses_profile_params (struct gcc_options *opts,
++				 struct gcc_options *opts_set)
++{
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_prefetch_loop_arrays, 1);
++}
++
+ /* Enable cfgo-related flags.  */
+ 
+ static void
+@@ -3143,10 +3150,20 @@ common_handle_option (struct gcc_options *opts,
+       /* FALLTHRU */
+     case OPT_fauto_profile:
+       enable_fdo_optimizations (opts, opts_set, value);
+-	  /* 2 is special and means flag_profile_correction trun on by
+-	     -fauto-profile.  */
++      /* 2 is special and means flag_profile_correction trun on by
++	 -fauto-profile.  */
+       SET_OPTION_IF_UNSET (opts, opts_set, flag_profile_correction,
+-			   (value ? 2 : 0));
++		      (value ? 2 : 0));
++      break;
++
++    case OPT_fadditional_profile_:
++      opts->x_additional_profile_file = xstrdup (arg);
++      opts->x_flag_additional_profile = true;
++      value = true;
++      /* No break here - do -fadditional-profile processing. */
++      /* FALLTHRU */
++    case OPT_fadditional_profile:
++      opts->x_flag_ipa_extend_auto_profile = value;
+       break;
+ 
+     case OPT_fipa_struct_reorg_:
+@@ -3155,17 +3172,36 @@ common_handle_option (struct gcc_options *opts,
+     case OPT_fipa_struct_reorg:
+       opts->x_flag_ipa_struct_reorg = value;
+       if (value && !opts->x_struct_layout_optimize_level)
+-	{
+-	  /* Using the -fipa-struct-reorg option is equivalent to using
+-	     -fipa-struct-reorg=1.  */
+-	  opts->x_struct_layout_optimize_level = 1;
+-	}
++      {
++	      /* Using the -fipa-struct-reorg option is equivalent to using
++		 -fipa-struct-reorg=1.  */
++	      opts->x_struct_layout_optimize_level = 1;
++      }
+       break;
+ 
+     case OPT_fipa_reorder_fields:
+       SET_OPTION_IF_UNSET (opts, opts_set, flag_ipa_struct_reorg, value);
+       break;
+ 
++    case OPT_fipa_extend_auto_profile:
++      opts->x_flag_ipa_extend_auto_profile = opts->x_flag_cache_misses_profile
++	      ? true : value;
++      break;
++
++    case OPT_fcache_misses_profile_:
++      opts->x_cache_misses_profile_file = xstrdup (arg);
++      opts->x_flag_cache_misses_profile = true;
++      value = true;
++      /* No break here - do -fcache-misses-profile processing. */
++      /* FALLTHRU */
++    case OPT_fcache_misses_profile:
++      opts->x_flag_ipa_extend_auto_profile = value;
++      if (value)
++      {
++	      set_cache_misses_profile_params (opts, opts_set);
++      }
++      break;
++
+     case OPT_fcfgo_profile_generate_:
+       opts->x_profile_data_prefix = xstrdup (arg);
+       value = true;
+diff --git a/gcc/params.opt b/gcc/params.opt
+index e5472dfc8..e06e50611 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -1262,4 +1262,66 @@ Range for depended ldp search in split-ldp-stp path.
+ Common Joined UInteger Var(semi_relayout_level) Init(13) IntegerRange(11, 15) Param Optimization
+ Set capacity of each bucket to semi-relayout to (1 << semi-relayout-level) / 8 .
+ 
++-param=mem-access-ratio=
++Common Joined UInteger Var(param_mem_access_ratio) Init(20) IntegerRange(0, 100) Param Optimization
++Memory access ratio (in percent).
++
++-param=mem-access-num=
++Common Joined UInteger Var(param_mem_access_num) Init(3) Param Optimization
++Memory access num.
++
++-param=prefetch-offset=
++Common Joined UInteger Var(param_prefetch_offset) Init(1024)
++IntegerRange(1, 999999) Param Optimization
++Prefetch Offset, which is usually a power of two due to cache line size.
++
++-param=branch-prob-threshold=
++Common Joined UInteger Var(param_branch_prob_threshold) Init(80) IntegerRange(50, 100)
++Param Optimization
++High Execution Rate Branch Threshold.
++
++-param=issue-topn=
++Common Joined UInteger Var(param_issue_topn) Init(1) Param Optimization
++Issue topn LLC mem_ref hint.
++
++-param=force-issue=
++Common Joined UInteger Var(param_force_issue) Init(0) IntegerRange(0, 1) Param
++Force issue the topn LLC mem_ref hint, without generating dynamic multi-branches.
++
++-param=llc-capacity-per-core=
++Common Joined UInteger Var(param_llc_capacity_per_core) Init(107) IntegerRange(0, 999999) Param
++LLC capacity per core.
++
++-param=filter-kernels=
++Common Joined UInteger Var(param_filter_kernels) Init(1) IntegerRange(0, 1) Param
++Allow LLC allocate pass to greedily filter kernels by traversing the corresponding basic blocks
++through edges with branch probability no less than param_branch_prob_threshold.
++
++-param=outer-loop-nums=
++Common Joined UInteger Var(param_outer_loop_num) Init(1) IntegerRange(1, 10) Param
++Maximum number of outer loops allowed to extend outer loops for loops that
++cannot recognize inner loop boundaries.
++
++-param=llc-level=
++Common Joined UInteger Var(param_llc_level) Init(3) IntegerRange(3, 4)
++Param Optimization
++Specifies the HBM cache level.
++
++-param=filter-mode=
++Common Joined UInteger Var(param_filter_mode) Init(1) IntegerRange(0, 1) Param
++Set kernel filtering mode. Use basic block count by default; use branch probability mode when filter mode is turned off.
++
++-param=transfer-footprint=
++Common Joined UInteger Var(param_transfer_footprint) Init(1) IntegerRange(0, 1) Param
++Allow transferring the firstly calculated footprint expression to the target memory reference
++from which it is impossible to retrieve the foortprint.
++
++-param=llc-allocate-func-topn=
++Common Joined UInteger Var(param_llc_allocate_func_topn) Init(0) Param Optimization
++TopN functions of pmu counts to be analyzed in LLC allocation.
++
++-param=llc-allocate-func-counts-threshold=
++Common Joined UInteger Var(param_llc_allocate_func_counts_threshold) Init(1) Param Optimization
++Threshold functions of pmu counts to be analyzed in LLC allocation.
++
+ ; This comment is to ensure we retain the blank line above.
+diff --git a/gcc/passes.def b/gcc/passes.def
+index 90643d533..49001adde 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -141,6 +141,7 @@ along with GCC; see the file COPYING3.  If not see
+ 
+   NEXT_PASS (pass_target_clone);
+   NEXT_PASS (pass_ipa_auto_profile);
++  NEXT_PASS (pass_ipa_extend_auto_profile);
+   NEXT_PASS (pass_ipa_tree_profile);
+   PUSH_INSERT_PASSES_WITHIN (pass_ipa_tree_profile)
+       NEXT_PASS (pass_feedback_split_functions);
+@@ -325,6 +326,7 @@ along with GCC; see the file COPYING3.  If not see
+ 	  /* Run IVOPTs after the last pass that uses data-reference analysis
+ 	     as that doesn't handle TARGET_MEM_REFs.  */
+ 	  NEXT_PASS (pass_iv_optimize);
++	  NEXT_PASS (pass_llc_allocate);
+ 	  NEXT_PASS (pass_lim);
+ 	  NEXT_PASS (pass_tree_loop_done);
+       POP_INSERT_PASSES ()
+diff --git a/gcc/print-rtl.cc b/gcc/print-rtl.cc
+index 636113d5b..b7506514a 100644
+--- a/gcc/print-rtl.cc
++++ b/gcc/print-rtl.cc
+@@ -1579,6 +1579,12 @@ print_exp (pretty_printer *pp, const_rtx x, int verbose)
+       op[1] = XEXP (x, 1);
+       op[2] = XEXP (x, 2);
+       break;
++    case PREFETCH_FULL:
++      fun = "prefetch_full";
++      op[0] = XEXP (x, 0);
++      op[1] = XEXP (x, 1);
++      op[2] = XEXP (x, 2);
++      break;
+     case UNSPEC:
+     case UNSPEC_VOLATILE:
+       {
+diff --git a/gcc/rtl.def b/gcc/rtl.def
+index 08e31fa35..78ec1a021 100644
+--- a/gcc/rtl.def
++++ b/gcc/rtl.def
+@@ -282,6 +282,15 @@ DEF_RTL_EXPR(ADDR_DIFF_VEC, "addr_diff_vec", "eEee0", RTX_EXTRA)
+    whose prefetch instructions do not support them.  */
+ DEF_RTL_EXPR(PREFETCH, "prefetch", "eee", RTX_EXTRA)
+ 
++/* Memory prefetch, with attributes supported on some targets.
++   Operand 1 is the address of the memory to fetch.
++   Operand 2 is 1 for a write access, 0 otherwise.
++   Operand 3 is the level of prfop.
++
++   The attributes specified by operands 2 and 3 are ignored for targets
++   whose prefetch instructions do not support them.  */
++DEF_RTL_EXPR(PREFETCH_FULL, "prefetch_full", "eee", RTX_EXTRA)
++
+ /* ----------------------------------------------------------------------
+    At the top level of an instruction (perhaps under PARALLEL).
+    ---------------------------------------------------------------------- */
+diff --git a/gcc/rtl.h b/gcc/rtl.h
+index a0db225cb..844e1a7c3 100644
+--- a/gcc/rtl.h
++++ b/gcc/rtl.h
+@@ -2814,6 +2814,10 @@ do {								        \
+ #define PREFETCH_SCHEDULE_BARRIER_P(RTX)					\
+   (RTL_FLAG_CHECK1 ("PREFETCH_SCHEDULE_BARRIER_P", (RTX), PREFETCH)->volatil)
+ 
++/* True if RTX is flagged to be a scheduling barrier.  */
++#define PREFETCH_FULL_SCHEDULE_BARRIER_P(RTX)				\
++  (RTL_FLAG_CHECK1 ("PREFETCH_FULL_SCHEDULE_BARRIER_P", (RTX), PREFETCH_FULL)->volatil)
++
+ /* Indicate whether the machine has any sort of auto increment addressing.
+    If not, we can avoid checking for REG_INC notes.  */
+ 
+diff --git a/gcc/rtlanal.cc b/gcc/rtlanal.cc
+index c436c640c..7f5646ce7 100644
+--- a/gcc/rtlanal.cc
++++ b/gcc/rtlanal.cc
+@@ -1198,6 +1198,7 @@ reg_referenced_p (const_rtx x, const_rtx body)
+       return reg_overlap_mentioned_p (x, TRAP_CONDITION (body));
+ 
+     case PREFETCH:
++    case PREFETCH_FULL:
+       return reg_overlap_mentioned_p (x, XEXP (body, 0));
+ 
+     case UNSPEC:
+@@ -2042,6 +2043,7 @@ note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
+       return;
+ 
+     case PREFETCH:
++    case PREFETCH_FULL:
+       (*fun) (&XEXP (body, 0), data);
+       return;
+ 
+diff --git a/gcc/sched-deps.cc b/gcc/sched-deps.cc
+index 948aa0c3b..db453fb9b 100644
+--- a/gcc/sched-deps.cc
++++ b/gcc/sched-deps.cc
+@@ -2705,7 +2705,9 @@ sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn)
+       break;
+ 
+     case PREFETCH:
+-      if (PREFETCH_SCHEDULE_BARRIER_P (x))
++    case PREFETCH_FULL:
++      if ((code == PREFETCH && PREFETCH_SCHEDULE_BARRIER_P (x))
++          || (code == PREFETCH_FULL && PREFETCH_FULL_SCHEDULE_BARRIER_P (x)))
+ 	reg_pending_barrier = TRUE_BARRIER;
+       /* Prefetch insn contains addresses only.  So if the prefetch
+ 	 address has no registers, there will be no dependencies on
+diff --git a/gcc/target-insns.def b/gcc/target-insns.def
+index de8c0092f..9cfa19475 100644
+--- a/gcc/target-insns.def
++++ b/gcc/target-insns.def
+@@ -77,6 +77,7 @@ DEF_TARGET_INSN (omp_simt_vote_any, (rtx x0, rtx x1))
+ DEF_TARGET_INSN (omp_simt_xchg_bfly, (rtx x0, rtx x1, rtx x2))
+ DEF_TARGET_INSN (omp_simt_xchg_idx, (rtx x0, rtx x1, rtx x2))
+ DEF_TARGET_INSN (prefetch, (rtx x0, rtx x1, rtx x2))
++DEF_TARGET_INSN (prefetch_full, (rtx x0, rtx x1, rtx x2))
+ DEF_TARGET_INSN (probe_stack, (rtx x0))
+ DEF_TARGET_INSN (probe_stack_address, (rtx x0))
+ DEF_TARGET_INSN (prologue, (void))
+diff --git a/gcc/target.def b/gcc/target.def
+index 142858fa3..646489540 100644
+--- a/gcc/target.def
++++ b/gcc/target.def
+@@ -2064,6 +2064,37 @@ it is for the vector version.",
+  (vec_info *vinfo, bool costing_for_scalar),
+  default_vectorize_create_costs)
+ 
++/* Function for vector prefetch operation.  */
++DEFHOOK
++(code_for_prefetch,
++ "This hook should return the decl of a function that implements the\n\
++vectorized variant of the function with the @code{combined_fn} code\n\
++@var{code} or @code{NULL_TREE} if such a function is not available.\n\
++The return type of the vectorized function shall be of vector type\n\
++@var{vec_type_out} and the argument types should be @var{vec_type_in}.",
++ insn_code, (machine_mode arg),
++ NULL)
++
++/* Function for vector gather prefetch operation.  */
++DEFHOOK
++(code_for_gather_prefetch,
++ "This hook should return the decl of a function that implements the\n\
++vectorized variant of the function with the @code{combined_fn} code\n\
++@var{code} or @code{NULL_TREE} if such a function is not available.\n\
++The return type of the vectorized function shall be of vector type\n\
++@var{vec_type_out} and the argument types should be @var{vec_type_in}.",
++ insn_code, (machine_mode mode_to, machine_mode mode_form),
++ NULL)
++
++/* Function to check whether the target hardware architecture supports
++   a full SVE data vector mode.  */
++DEFHOOK
++(prefetch_handleable_mode_p,
++ "This hook should return true if the target hardware architecture\n\
++supports a full SVE data vector mode.",
++ bool, (machine_mode arg),
++ NULL)
++
+ HOOK_VECTOR_END (vectorize)
+ 
+ #undef HOOK_PREFIX
+diff --git a/gcc/testsuite/g++.dg/llc-allocate/llc-allocate.exp b/gcc/testsuite/g++.dg/llc-allocate/llc-allocate.exp
+new file mode 100644
+index 000000000..1793ba9d1
+--- /dev/null
++++ b/gcc/testsuite/g++.dg/llc-allocate/llc-allocate.exp
+@@ -0,0 +1,27 @@
++#   Copyright (C) 1997-2022 Free Software Foundation, Inc.
++
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3.  If not see
++# .
++
++load_lib g++-dg.exp
++load_lib target-supports.exp
++
++# Initialize `dg'.
++dg-init
++
++dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.C]] \
++	"" "-fllc-allocate"
++
++# All done.
++dg-finish
+\ No newline at end of file
+diff --git a/gcc/testsuite/g++.dg/llc-allocate/llc-relion-expand-kernels.C b/gcc/testsuite/g++.dg/llc-allocate/llc-relion-expand-kernels.C
+new file mode 100644
+index 000000000..b5bf69510
+--- /dev/null
++++ b/gcc/testsuite/g++.dg/llc-allocate/llc-relion-expand-kernels.C
+@@ -0,0 +1,52 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param branch-prob-threshold=50  --param filter-kernels=0 --param mem-access-num=2 --param issue-topn=1 --param force-issue=1" } */
++#include "multidim_array.h"
++
++class Input
++{
++  public:
++    int metadata_offset = 13;
++    int exp_nr_images = 1;
++    MultidimArray exp_Mweight;
++    void convertAllSquaredDifferencesToWeights();
++};
++
++int main()
++{
++  clock_t start = clock();
++  Input input;
++  int testIter = 2;
++
++  for (int i = 0; i < testIter; ++i)
++    {
++      input.convertAllSquaredDifferencesToWeights();
++    }
++  return 0;
++}
++
++void Input::convertAllSquaredDifferencesToWeights()
++{
++  for (int img_id = 0; img_id < exp_nr_images; img_id++)
++  {
++    int my_metadata_offset = metadata_offset + img_id;
++    MultidimArray sorted_weight;
++
++    exp_Mweight.getRow(img_id, sorted_weight);
++    long int np = 0;
++    FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(sorted_weight)
++    {
++      if (DIRECT_MULTIDIM_ELEM(sorted_weight, n) > 0.)
++        {
++          DIRECT_MULTIDIM_ELEM(sorted_weight, np) = DIRECT_MULTIDIM_ELEM( \
++            sorted_weight, n);
++          np++;
++        }
++    }
++  }
++}
++
++
++
++/* { dg-final { scan-tree-dump-times "dense memory access" 1 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "__builtin_prefetch" 1 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "static issue" 1 "llc_allocate" } } */
+diff --git a/gcc/testsuite/g++.dg/llc-allocate/multidim_array.h b/gcc/testsuite/g++.dg/llc-allocate/multidim_array.h
+new file mode 100644
+index 000000000..682f24703
+--- /dev/null
++++ b/gcc/testsuite/g++.dg/llc-allocate/multidim_array.h
+@@ -0,0 +1,186 @@
++#ifndef MULTIDIM_ARRAY_H
++#define MULTIDIM_ARRAY_H
++
++#include 
++
++#define RELION_ALIGNED_MALLOC malloc
++#define RELION_ALIGNED_FREE free
++
++#define STARTINGX(v) ((v).xinit)
++#define STARTINGY(v) ((v).yinit)
++#define NZYXSIZE(v) ((v).nzyxdim)
++
++#define DIRECT_MULTIDIM_ELEM(v,n) ((v).data[(n)])
++#define FOR_ALL_DIRECT_ELEMENTS_IN_MULTIDIMARRAY(v) \
++  for (long int n=0; n
++class MultidimArray
++{
++public:
++  T* data;
++  bool destroyData;
++  long int ndim;
++  long int zdim;
++  long int ydim;
++  long int xdim;
++  long int yxdim;
++  long int zyxdim;
++  long int nzyxdim;
++  long int zinit;
++  long int yinit;
++  long int xinit;
++  long int nzyxdimAlloc;
++
++public:
++  void clear()
++  {
++    coreDeallocate();
++    coreInit();
++  }
++
++  void coreInit()
++  {
++    xdim=0;
++    yxdim=0;
++    zyxdim=0;
++    nzyxdim=0;
++    ydim=1;
++    zdim=1;
++    ndim=1;
++    zinit=0;
++    yinit=0;
++    xinit=0;
++    data=NULL;
++    nzyxdimAlloc = 0;
++    destroyData=true;
++  }
++
++  void coreAllocate(long int _ndim, long int _zdim, long int _ydim, long int _xdim)
++  {
++    if (_ndim <= 0 || _zdim <= 0 || _ydim<=0 || _xdim<=0)
++      {
++        clear();
++        return;
++      }
++
++    ndim=_ndim;
++    zdim=_zdim;
++    ydim=_ydim;
++    xdim=_xdim;
++    yxdim=ydim*xdim;
++    zyxdim=zdim*yxdim;
++    nzyxdim=ndim*zyxdim;
++
++    coreAllocate();
++  }
++
++  void coreAllocate()
++  {
++    data = (T*)RELION_ALIGNED_MALLOC(sizeof(T) * nzyxdim);
++    nzyxdimAlloc = nzyxdim;
++  }
++
++  void coreDeallocate()
++  {
++    if (data != NULL && destroyData)
++      {
++        RELION_ALIGNED_FREE(data);
++      }
++    data=NULL;
++    nzyxdimAlloc = 0;
++  }
++
++  void resize(long int Ndim, long int Zdim, long int Ydim, long int Xdim)
++  {
++    if (Ndim*Zdim*Ydim*Xdim == nzyxdimAlloc && data != NULL)
++      {
++        ndim = Ndim;
++        xdim = Xdim;
++        ydim = Ydim;
++        zdim = Zdim;
++        yxdim = Ydim * Xdim;
++        zyxdim = Zdim * yxdim;
++        nzyxdim = Ndim * zyxdim;
++        nzyxdimAlloc = nzyxdim;
++        return;
++      }
++
++    if (Xdim <= 0 || Ydim <= 0 || Zdim <= 0 || Ndim <= 0)
++      {
++        clear();
++        return;
++      }
++
++    if (NZYXSIZE(*this) > 0 && data == NULL)
++      {
++        coreAllocate();
++        return;
++      }
++
++    size_t YXdim=Ydim*Xdim;
++    size_t ZYXdim=Zdim*YXdim;
++    size_t NZYXdim=Ndim*ZYXdim;
++
++    T * new_data = (T*)RELION_ALIGNED_MALLOC(sizeof(T) * NZYXdim);
++    for (long int l = 0; l < Ndim; l++)
++        for (long int k = 0; k < Zdim; k++)
++            for (long int i = 0; i < Ydim; i++)
++                for (long int j = 0; j < Xdim; j++)
++                  {
++                    T val;
++                    new_data[l*ZYXdim + k*YXdim+i*Xdim+j] = val;
++                  }
++    coreDeallocate();
++
++    data = new_data;
++    ndim = Ndim;
++    xdim = Xdim;
++    ydim = Ydim;
++    zdim = Zdim;
++    yxdim = Ydim * Xdim;
++    zyxdim = Zdim * yxdim;
++    nzyxdim = Ndim * zyxdim;
++    nzyxdimAlloc = nzyxdim;
++  }
++
++  void resize(long int Xdim)
++  {
++    resize(1, 1, 1, Xdim);
++  }
++
++  inline T& operator()(long int i, long int j) const
++  {
++    return A2D_ELEM(*this, i, j);
++  }
++
++  inline T& operator()(long int i) const
++  {
++    return A1D_ELEM(*this, i);
++  }
++
++  void getRow(long int i, MultidimArray& v) const
++  {
++    if (xdim == 0 || ydim == 0)
++      {
++        v.clear();
++        return;
++      }
++
++    v.resize(xdim);
++    for (long int j = 0; j < xdim; j++)
++      v(j) = (*this)(i, j);
++  }
++};
++
++#endif /* MULTIDIM_ARRAY_H */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-1.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-1.c
+new file mode 100644
+index 000000000..091e654f9
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-1.c
+@@ -0,0 +1,61 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -funroll-loops -ffast-math -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param issue-topn=2 --param branch-prob-threshold=50 --param filter-mode=0" } */
++
++#include 
++
++#define N 131590
++#define F 384477
++
++double diagPtr[N];
++double psiPtr[N];
++double ApsiPtr[N];
++int lPtr[F];
++int uPtr[F];
++double lowerPtr[F];
++double upperPtr[F];
++
++void
++AMUL (double *diagPtr, double *psiPtr, double *ApsiPtr, int *lPtr,
++      int *uPtr, double *lowerPtr, double *upperPtr, int nCells, int nFaces)
++{
++  for (int cell=0; cell
++
++#define N 100000
++
++int A_i[N];
++int A_j[N];
++double A_data[N];
++double x_data[N];
++double y_data[N];
++int num_rows = N;
++
++void
++MatMult (int *A_i, int *A_j, double *A_data, double *x_data,
++         int num_rows, double *y_data)
++{
++  int i = 0;
++  int j = 0;
++  double temp = 0;
++  for (i = 0; i < num_rows; i++)
++    {
++      temp = y_data[i];
++      for (j = A_i[i]; j < A_i[i+1]; j++)
++        temp += A_data[j] * x_data[A_j[j]];
++      y_data[i] = temp;
++    }
++}
++
++int
++main (int argc, char *argv[])
++{
++  int testIter = 2;
++
++  for (int i = 0; i < testIter; i++)
++    MatMult (A_i, A_j, A_data, x_data, num_rows, y_data);
++
++  return 0;
++}
++
++/* { dg-final { scan-tree-dump-times "ref_count = (?:\[3-9\]|\[1-9\]\\d{1,}), ninsns = \[1-9\]\\d*, mem_to_insn_ratio = 0.\[2-9\]\\d*" 4 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "Tracing succeeded" 14 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-not   "Tracing failed" "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-not   "static_data_size:" "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "\{ (?:\\d+\\(\\d+\\) ){1}\}" 2 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-not   ", size: (?!(0\.000000))" "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times ", size: 0\.000000" 6 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "\\d\\tx_data\\t\\(0.000000, 1, 1, 0\\)" 2 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "\\d\\tA_j\\t\\(0.000000, 1, 1, 0\\)" 2 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "\\d\\tA_data\\t\\(0.000000, 1, 1, 0\\)" 2 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-not   "runtime issue" "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "static issue" 2 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "insert svprfd_gather" 2 "llc_allocate" } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-allocate.exp b/gcc/testsuite/gcc.dg/llc-allocate/llc-allocate.exp
+new file mode 100644
+index 000000000..05a3bf842
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-allocate.exp
+@@ -0,0 +1,27 @@
++#   Copyright (C) 2022-2023 Free Software Foundation, Inc.
++
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3.  If not see
++# .
++
++load_lib gcc-dg.exp
++load_lib target-supports.exp
++
++# Initialize `dg'.
++dg-init
++
++dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.c]] \
++	"" "-fllc-allocate"
++
++# All done.
++dg-finish
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-cross-bb-indir-mem-acc.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-cross-bb-indir-mem-acc.c
+new file mode 100644
+index 000000000..113acbceb
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-cross-bb-indir-mem-acc.c
+@@ -0,0 +1,36 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -fllc-allocate -fdump-tree-llc_allocate-details-lineno -c --param=mem-access-ratio=1 --param=mem-access-num=0" } */
++
++/* In this deja test case, we test how Phase 2 & 3 of llc-allocate pass deals
++   with an indirect memory access in a nested loop where the use-block for the
++   induction variable of this memory access is a child/descendent of its
++   def-block (we make it by defining the induction variable in the outer loop).
++   Therefore, the reference can be successfully traced after outer-loop
++   analysis.  */
++#include 
++#include  
++
++void cross_bb_indir_mem_acc (int *arr1, int *arr2, int *arr3, int *arr4, int n) {
++    srand (time (NULL));
++
++    int j_s;
++    int j_e = arr1[0];
++    int k;
++
++    for (int i = 0; i < n; i++)
++    {
++        j_s = j_e;
++        j_e = arr1[i + 1];
++
++        k = arr3[i];
++
++        for (int j = j_s; j < j_e; j++)
++        {
++           arr4[j] -= arr2[k];
++        }
++
++    }
++}
++
++/* { dg-final { scan-tree-dump "Unhandled indirect memory access tracing." "llc_allocate" } } */
++/* { dg-final { scan-tree-dump "Retrace indirect memory access after outer loop analysis:" "llc_allocate" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-extend-outer-loop.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-extend-outer-loop.c
+new file mode 100644
+index 000000000..a2e7f66a4
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-extend-outer-loop.c
+@@ -0,0 +1,61 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++#include 
++#define N 131590
++#define F 384477
++
++int ownStartPtr[F];
++double bPrimePtr[N];
++double diagPtr[N];
++double psiPtr[N];
++double upperPtr[F];
++double lowerPtr[F];
++int uPtr[F];
++
++void SMOOTH(int *ownStartPtr, double *bPrimePtr, double *diagPtr, double *psiPtr, int *uPtr, double *lowerPtr, double *upperPtr, int nCells);
++
++int main(int argc, char *argv[])
++{
++  int nCells = N;
++  int nFaces = F;
++  int testIter = 2;
++  for (int i = 0; i < testIter; i++)
++    {
++      SMOOTH(ownStartPtr, bPrimePtr, diagPtr, psiPtr, uPtr, lowerPtr, upperPtr, nCells);
++    }
++  return  0;
++}
++
++
++void SMOOTH(int *ownStartPtr, double *bPrimePtr, double *diagPtr, double *psiPtr, int *uPtr, double *lowerPtr, double *upperPtr, int nCells)
++{
++  double psii;
++  int fStart;
++  int fEnd = ownStartPtr[0];
++
++  for (int celli = 0; celli < nCells; celli++)
++    {
++      fStart = fEnd;
++      fEnd = ownStartPtr[celli + 1];
++      psii = bPrimePtr[celli];
++
++      for (int facei = fStart; facei
++
++#define N 131590
++
++double diagPtr[N];
++double psiPtr[N];
++double ApsiPtr[N];
++
++void
++branch_in_loop (double *diagPtr, double *psiPtr, double *ApsiPtr, int nCells)
++{
++  for (int cell=0; cell 0)
++          ApsiPtr[cell] = 0;
++      else
++          ApsiPtr[cell] = diagPtr[cell]*psiPtr[cell];
++    }
++}
++
++int
++main (int argc, char *argv[])
++{
++  int nCells = N;
++  int testIter = 100;
++
++  for (int i=0; i
++
++#define N 131590
++
++double diagPtr[N];
++double psiPtr[N];
++double ApsiPtr[N];
++
++void
++break_in_loop (double *diagPtr, double *psiPtr, double *ApsiPtr, int nCells)
++{
++  for (int cell=0; cell 0)
++	break;
++      ApsiPtr[cell] = diagPtr[cell]*psiPtr[cell];
++    }
++}
++
++int
++main (int argc, char *argv[])
++{
++  int nCells = N;
++  int testIter = 2;
++
++  for (int i=0; i
++
++#define N 131
++
++double diagPtr[N];
++int psiPtr[N];
++double ApsiPtr[N];
++
++void
++goto_in_loop (double *diagPtr, int *psiPtr, double *ApsiPtr, int nCells)
++{
++  for (int cell=0; cellnodes;
++  while (v > 1)
++    {
++      basic_block bb = di->dfs_to_bb[v];
++      edge e;
++
++      par = di->dfs_parent[v];
++      k = v;
++
++      ei = (reverse) ? ei_start (bb->succs) : ei_start (bb->preds);
++
++      if (reverse)
++	{
++	  /* If this block has a fake edge to exit, process that first.  */
++	  if (bitmap_bit_p (di->fake_exit_edge, bb->index))
++	    {
++	      einext = ei;
++	      einext.index = 0;
++	      goto do_fake_exit_edge;
++	    }
++	}
++
++      /* Search all direct predecessors for the smallest node with a path
++	 to them.  That way we have the smallest node with also a path to
++	 us only over nodes behind us.  In effect we search for our
++	 semidominator.  */
++      while (!ei_end_p (ei))
++	{
++	  basic_block b;
++	  TBB k1;
++
++	  e = ei_edge (ei);
++	  b = (reverse) ? e->dest : e->src;
++	  einext = ei;
++	  ei_next (&einext);
++
++	  if (b == en_block)
++	    {
++	    do_fake_exit_edge:
++	      k1 = di->dfs_order[last_basic_block];
++	    }
++	  else
++	    k1 = di->dfs_order[b->index];
++
++	  /* Call eval() only if really needed.  If k1 is above V in DFS tree,
++	     then we know, that eval(k1) == k1 and key[k1] == k1.  */
++	  if (k1 > v)
++	    k1 = di->key[eval (di, k1)];
++	  if (k1 < k)
++	    k = k1;
++
++	  ei = einext;
++	}
++
++      di->key[v] = k;
++      link_roots (di, par, v);
++      di->next_bucket[v] = di->bucket[k];
++      di->bucket[k] = v;
++
++      /* Transform semidominators into dominators.  */
++      for (w = di->bucket[par]; w; w = di->next_bucket[w])
++	{
++	  k = eval (di, w);
++	  if (di->key[k] < di->key[w])
++	    di->dom[w] = k;
++	  else
++	    di->dom[w] = par;
++	}
++      /* We don't need to cleanup next_bucket[].  */
++      di->bucket[par] = 0;
++      v--;
++    }
++
++  /* Explicitly define the dominators.  */
++  di->dom[1] = 0;
++  for (v = 2; v <= di->nodes; v++)
++    if (di->dom[v] != di->key[v])
++      di->dom[v] = di->dom[di->dom[v]];
++}
++
++/* { dg-final { scan-tree-dump-times "Warning: Find cycle at bb index" 2 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump "static issue" "llc_allocate" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-nonzero-offset.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-nonzero-offset.c
+new file mode 100644
+index 000000000..e18725f60
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-nonzero-offset.c
+@@ -0,0 +1,50 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -c -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param filter-kernels=0" } */
++
++#include 
++
++typedef struct stack_def
++{
++  int top;                      /* index to top stack element */
++  unsigned long reg_set;        /* set of live registers */
++  unsigned char reg[128];       /* register - stack mapping */
++} *stack;
++
++typedef struct block_info_def
++{
++  struct stack_def stack_in;    /* Input stack configuration.  */
++  struct stack_def stack_out;   /* Output stack configuration.  */
++  unsigned long out_reg_set;    /* Stack regs live on output.  */
++  int done;                     /* True if block already converted.  */
++  int predecessors;             /* Number of predecessors that need
++                                   to be visited.  */
++} *block_info;
++
++typedef struct basic_block_def
++{
++  void *aux;
++} *basic_block;
++
++unsigned char
++convert_regs_exit (basic_block bb, int value_reg_low, int value_reg_high)
++{
++  stack output_stack;
++
++  output_stack = &(((block_info) bb->aux)->stack_in);
++  if (value_reg_low == -1)
++    output_stack->top = -1;
++  else
++    {
++      int reg;
++      output_stack->top = value_reg_high - value_reg_low;
++      for (reg = value_reg_low; reg <= value_reg_high; ++reg)
++        {
++          (output_stack->reg + 16)[value_reg_high - reg] = reg;
++          output_stack->reg_set |= (unsigned long) 1 << reg;
++        }
++    }
++  return output_stack->reg[0];
++}
++
++/* { dg-final { scan-tree-dump-times "runtime issue" 1 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "static issue" 1 "llc_allocate" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl1keep.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl1keep.c
+new file mode 100644
+index 000000000..328dc57bc
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl1keep.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options " -S -O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],0,0);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PLDL1KEEP"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl1strm.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl1strm.c
+new file mode 100644
+index 000000000..d9c919869
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl1strm.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],0,1);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PLDL1STRM"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl2keep.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl2keep.c
+new file mode 100644
+index 000000000..806366b5b
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl2keep.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],0,2);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PLDL2KEEP"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl2strm.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl2strm.c
+new file mode 100644
+index 000000000..91567d1e9
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl2strm.c
+@@ -0,0 +1,16 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main()
++{
++  for(int i = 0; i < 100000; i++)
++    {
++      __builtin_prefetch_full(&val[i], 0, 3);
++      val[i] = i + 1;		
++    }
++}
++
++/* { dg-final { scan-assembler "PLDL2STRM"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl3keep.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl3keep.c
+new file mode 100644
+index 000000000..c28150654
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl3keep.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],0,4);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PLDL3KEEP"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl3strm.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl3strm.c
+new file mode 100644
+index 000000000..e8d9c8693
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl3strm.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],0,5);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PLDL3STRM"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl4keep.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl4keep.c
+new file mode 100644
+index 000000000..b0281882f
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl4keep.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],0,6);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PLDL4KEEP"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl4strm.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl4strm.c
+new file mode 100644
+index 000000000..26807556f
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pldl4strm.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],0,7);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PLDL4STRM"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl1keep.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl1keep.c
+new file mode 100644
+index 000000000..4f2def13d
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl1keep.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],1,0);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PSTL1KEEP"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl1strm.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl1strm.c
+new file mode 100644
+index 000000000..ecc501f1f
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl1strm.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],1,1);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PSTL1STRM"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl2keep.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl2keep.c
+new file mode 100644
+index 000000000..d140f1ed1
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl2keep.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],1,2);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PSTL2KEEP"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl2strm.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl2strm.c
+new file mode 100644
+index 000000000..d6f170253
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl2strm.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],1,3);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PSTL2STRM"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl3keep.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl3keep.c
+new file mode 100644
+index 000000000..8da092b36
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl3keep.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],1,4);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PSTL3KEEP"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl3strm.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl3strm.c
+new file mode 100644
+index 000000000..4cf65188a
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl3strm.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],1,5);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PSTL3STRM"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl4keep.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl4keep.c
+new file mode 100644
+index 000000000..36f4a3aa0
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl4keep.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],1,6);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PSTL4KEEP"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl4strm.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl4strm.c
+new file mode 100644
+index 000000000..43d2d41d5
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-prefetch-full-pstl4strm.c
+@@ -0,0 +1,14 @@
++
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param=outer-loop-nums=10 --param=issue-topn=4 --param=force-issue=1 --param=filter-kernels=0" } */
++
++
++int val[100000];
++int main(){
++	for(int i=0;i<100000;i++){
++		__builtin_prefetch_full(&val[i],1,7);
++		val[i]=i+1;		
++	}
++}
++
++/* { dg-final { scan-assembler "PSTL4STRM"  } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-ref-trace.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-ref-trace.c
+new file mode 100644
+index 000000000..ba90e7ea4
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-ref-trace.c
+@@ -0,0 +1,62 @@
++/* { dg-do compile { target { aarch64*-*-linux* } } } */
++/* { dg-options "-O3 -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param filter-kernels=0" } */
++
++#include 
++#include 
++
++#define N 1000
++
++long a[N] = {0};
++long b[N] = {0};
++long c[N] = {0};
++
++double
++referenceTrace (double *psiPtr, int *lPtr, int *uPtr, int nCells)
++{
++  double sum;
++  for (int cell = 0; cell < nCells; cell++)
++    {
++      // Multi-layer pointer
++      sum += psiPtr[lPtr[cell]];
++      psiPtr[uPtr[cell]] = sum;
++
++      // Outer pointer, inner array
++      sum += psiPtr[b[cell]];
++      psiPtr[a[cell]] = sum;
++
++      // Multi-layer array
++      sum += a[b[cell]];
++      c[a[cell]] = sum;
++
++      // Outer array, inner pointer
++      sum += a[lPtr[cell]];
++      c[lPtr[cell]] = sum;
++    }
++  return sum;
++}
++
++int
++main (int argc, char *argv[])
++{
++  int testIter = 2;
++
++  double *psiPtr = NULL;
++  int *lPtr = NULL;
++  int *uPtr = NULL;
++  psiPtr = (double *) calloc (N, sizeof(double));
++  lPtr = (int *) calloc (N, sizeof(int));
++  uPtr = (int *) calloc (N, sizeof(int));
++
++  for (int i = 0; i < testIter; i++)
++    referenceTrace (psiPtr, lPtr, uPtr, N);
++
++  free (psiPtr);
++  free (lPtr);
++  free (uPtr);
++
++  return 0;
++}
++
++/* { dg-final { scan-tree-dump-times "Tracing succeeded" 24 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-not "Tracing failed" "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "unhandled issue scene" 2 "llc_allocate" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gfortran.dg/llc-allocate/llc-3.f90 b/gcc/testsuite/gfortran.dg/llc-allocate/llc-3.f90
+new file mode 100644
+index 000000000..b0f68ebe3
+--- /dev/null
++++ b/gcc/testsuite/gfortran.dg/llc-allocate/llc-3.f90
+@@ -0,0 +1,211 @@
++! { dg-do compile { target { aarch64*-*-linux* } } }
++! { dg-options "-O3 -march=armv8.2-a+sve -funroll-loops -ffast-math -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param branch-prob-threshold=50 --param filter-mode=0" }
++
++program main
++
++  IMPLICIT NONE
++  INTEGER :: ids,ide, jds,jde, kds,kde
++  INTEGER,parameter :: ims=-4,kms=1,jms=-4
++  INTEGER,parameter :: ime=210,kme=36,jme=192
++  INTEGER :: its,ite, jts,jte, kts,kte
++  INTEGER :: number_of_small_timesteps,rk_step, rk_order, step
++
++  REAL, DIMENSION(ims:ime, kms:kme, jms:jme) :: t_1, t_2, c2a, p, ph, pm1, al, alt
++
++
++  REAL, DIMENSION(ims:ime, jms:jme) :: mu, muts
++
++  REAL, DIMENSION(kms:kme) :: dnw, rdnw, znu
++
++  REAL :: rdx,rdy
++  REAL :: dts, t0, smdiv
++  REAL :: random1,time_begin,time_end,total_time
++
++  INTEGER :: i, j, k
++  INTEGER :: i_start, i_end, j_start, j_end, k_start, k_end
++  INTEGER :: i_endu, j_endv
++  INTEGER :: interval=1
++  INTEGER :: epoch,iter
++
++  LOGICAL :: non_hydrostatic
++
++  data ids, jds, kds, its, jts, kts /6*1/
++  data ide, ite /2*205/
++  data jde, jte /2*187/
++  data kde, kte /2*36/
++
++  number_of_small_timesteps = 1
++  rk_step = 3
++  rk_order = 1
++  dts = 1.
++
++  rdx = 1.
++  rdy = 1.
++
++  t0 = 0.
++  smdiv = 1.
++  step = 1
++  non_hydrostatic = .true.
++
++  call random_number(random1)
++  interval = random1*100
++  interval=1
++
++  call random_seed(put=(/(i,i=1,10000,interval)/))
++
++  call random_number(alt)
++  call random_number(c2a)
++  call random_number(ph)
++  call random_number(pm1)
++  call random_number(mu)
++  call random_number(muts)
++  call random_number(dnw)
++  call random_number(rdnw)
++  call random_number(znu)
++
++  do iter=1,2
++  call calc_p_rho( al, p, ph,                        &
++                       alt, t_2, t_1, c2a, pm1,      &
++                       mu, muts, znu, t0,            &
++                       rdnw, dnw, smdiv,             &
++                       non_hydrostatic, step,        &
++                       ids, ide, jds, jde, kds, kde, &
++                       ims, ime, jms, jme, kms, kme, &
++                       its,ite, jts,jte, kts,kte    )
++
++  enddo
++
++end program
++
++
++SUBROUTINE calc_p_rho( al, p, ph,                    &
++                       alt, t_2, t_1, c2a, pm1,      &
++                       mu, muts, znu, t0,            &
++                       rdnw, dnw, smdiv,             &
++                       non_hydrostatic, step,        &
++                       ids, ide, jds, jde, kds, kde, &
++                       ims, ime, jms, jme, kms, kme, &
++                       its,ite, jts,jte, kts,kte    )
++
++  IMPLICIT NONE  ! religion first
++  !asb
++! declarations for the stuff coming in
++
++  INTEGER,      INTENT(IN   )    :: ids,ide, jds,jde, kds,kde
++  INTEGER,      INTENT(IN   )    :: ims,ime, jms,jme, kms,kme
++  INTEGER,      INTENT(IN   )    :: its,ite, jts,jte, kts,kte
++
++  INTEGER,      INTENT(IN   )    :: step
++
++  REAL, DIMENSION(ims:ime, kms:kme, jms:jme),INTENT(  OUT) :: al,   &
++                                                               p
++
++  REAL, DIMENSION(ims:ime, kms:kme, jms:jme),INTENT(IN   ) :: alt,   &
++                                                              t_2,   &
++                                                              t_1,   &
++                                                              c2a
++
++  REAL, DIMENSION(ims:ime, kms:kme, jms:jme),INTENT(INOUT) :: ph, pm1
++
++  REAL, DIMENSION(ims:ime, jms:jme)         , INTENT(IN   ) :: mu,   &
++                                                               muts
++
++  REAL, DIMENSION(kms:kme)         , INTENT(IN   ) :: dnw,  &
++                                                      rdnw, &
++                                                      znu
++
++  REAL,                                       INTENT(IN   ) :: t0, smdiv
++
++  LOGICAL, INTENT(IN   )  :: non_hydrostatic
++
++! local variables
++
++  INTEGER :: i, j, k
++  INTEGER :: i_start, i_end, j_start, j_end, k_start, k_end
++  REAL    :: ptmp
++
++   i_start = its
++   i_end   = min(ite,ide-1)
++   j_start = jts
++   j_end   = min(jte,jde-1)
++   k_start = kts
++   k_end = min(kte,kde-1)
++
++   IF (non_hydrostatic) THEN
++     DO j=j_start, j_end
++     DO k=k_start, k_end
++     DO i=i_start, i_end
++
++!  al computation is all dry, so ok with moisture
++
++      al(i,k,j)=-1./muts(i,j)*(alt(i,k,j)*mu(i,j)  &
++             +rdnw(k)*(ph(i,k+1,j)-ph(i,k,j)))
++
++!  this is temporally linearized p, no moisture correction needed
++
++      p(i,k,j)=c2a(i,k,j)*(alt(i,k,j)*(t_2(i,k,j)-mu(i,j)*t_1(i,k,j))  &
++                       /(muts(i,j)*(t0+t_1(i,k,j)))-al (i,k,j))
++
++     ENDDO
++     ENDDO
++     ENDDO
++
++   ELSE  ! hydrostatic calculation
++
++       DO j=j_start, j_end
++       DO k=k_start, k_end
++       DO i=i_start, i_end
++         p(i,k,j)=mu(i,j)*znu(k)
++         al(i,k,j)=alt(i,k,j)*(t_2(i,k,j)-mu(i,j)*t_1(i,k,j))            &
++                      /(muts(i,j)*(t0+t_1(i,k,j)))-p(i,k,j)/c2a(i,k,j)
++         ph(i,k+1,j)=ph(i,k,j)-dnw(k)*(muts(i,j)*al (i,k,j)              &
++                          +mu(i,j)*alt(i,k,j))
++       ENDDO
++       ENDDO
++       ENDDO
++
++   END IF
++
++!  divergence damping setup
++
++     IF (step == 0) then   ! we're initializing small timesteps
++       DO j=j_start, j_end
++       DO k=k_start, k_end
++       DO i=i_start, i_end
++         pm1(i,k,j)=p(i,k,j)
++       ENDDO
++       ENDDO
++       ENDDO
++     ELSE                     ! we're in the small timesteps
++       DO j=j_start, j_end    ! and adding div damping component
++       DO k=k_start, k_end
++       DO i=i_start, i_end
++         ptmp = p(i,k,j)
++         p(i,k,j) = p(i,k,j) + smdiv*(p(i,k,j)-pm1(i,k,j))
++         pm1(i,k,j) = ptmp
++       ENDDO
++       ENDDO
++       ENDDO
++     END IF
++
++END SUBROUTINE calc_p_rho
++
++! { dg-final { scan-tree-dump-times "ref_count = (?:\[3-9\]|\[1-9\]\\d{1,}), ninsns = \[1-9\]\\d*, mem_to_insn_ratio = 0.\[2-9\]\\d*" 6 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "Tracing succeeded" 46 "llc_allocate" } }
++! { dg-final { scan-tree-dump-not   "Tracing failed" "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\{ (?:\\d+\\(\\d+\\) ){1}\}" 1 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\{ (?:\\d+\\(\\d+\\) ){2}\}" 2 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\{ (?:\\d+\\(\\d+\\) ){4}\}" 1 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\\d\\tp\\t\\(0.000000, 3, 1, 0\\)" 1 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\\d\\tp\\t\\(0.000000, 3, 3, 0\\)" 1 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\\d\\tpm1\\t\\(0.000000, 3, 2, 0\\)" 1 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\\d\\tph\\t\\(0.000000, 3, 2, 0\\)" 2 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\\d\\tal\\t\\(0.000000, 3, 1, 0\\)" 2 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\\d\\talt\\t\\(0.000000, 3, 1, 0\\)" 2 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\\d\\tt_1\\t\\(0.000000, 3, 1, 0\\)" 1 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\\d\\tt_2\\t\\(0.000000, 3, 1, 0\\)" 1 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "\\d\\tc2a\\t\\(0.000000, 3, 1, 0\\)" 2 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "runtime issue" 2 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "static issue" 2 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "insert svprfd" 2 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "cumul_size.*150960\\)" 1 "llc_allocate" } }
+diff --git a/gcc/testsuite/gfortran.dg/llc-allocate/llc-allocate.exp b/gcc/testsuite/gfortran.dg/llc-allocate/llc-allocate.exp
+new file mode 100644
+index 000000000..13d225f35
+--- /dev/null
++++ b/gcc/testsuite/gfortran.dg/llc-allocate/llc-allocate.exp
+@@ -0,0 +1,29 @@
++#   Copyright (C) 2022-2023 Free Software Foundation, Inc.
++
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3 of the License, or
++# (at your option) any later version.
++#
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++#
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3.  If not see
++# .
++
++# GCC testsuite that uses the `dg.exp' driver.
++
++load_lib gfortran-dg.exp
++
++# Initialize `dg'.
++dg-init
++
++# Main loop.
++gfortran-dg-runtest [lsort \
++    [glob -nocomplain $srcdir/$subdir/*.\[fF\]{,90,95,03,08} ] ] "" ""
++
++# All done.
++dg-finish
+diff --git a/gcc/testsuite/gfortran.dg/llc-allocate/llc-trace-multiple-base-var.f90 b/gcc/testsuite/gfortran.dg/llc-allocate/llc-trace-multiple-base-var.f90
+new file mode 100644
+index 000000000..501e6e74c
+--- /dev/null
++++ b/gcc/testsuite/gfortran.dg/llc-allocate/llc-trace-multiple-base-var.f90
+@@ -0,0 +1,62 @@
++! { dg-do compile { target { aarch64*-*-linux* } } }
++! { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno" }
++
++MODULE INPUT
++    IMPLICIT NONE
++
++    INTEGER, PARAMETER :: wp = 8, jpi = 25, jpj = 39, jpk = 31, kjpt = 2
++
++    INTEGER :: kt = 1, jpkm1 = 30, jpjm1 = 38, fs_jpim1 = 24, fs_2 = 2
++    REAL(wp), DIMENSION(jpi, jpj) :: e12t
++    REAL(wp), DIMENSION(jpi, jpj, jpk) :: fse3t_n
++    REAL(wp), DIMENSION(jpi, jpj, jpk, kjpt) :: pta
++
++END MODULE INPUT
++
++PROGRAM MAIN
++    USE INPUT
++
++    IMPLICIT NONE
++
++    INTEGER :: EPOCH
++
++! Initialize arrays
++
++    e12t = 1
++    fse3t_n = 1
++    pta = 1
++!
++
++    DO EPOCH=1,2
++        CALL tra_ldf_iso
++    ENDDO
++
++END PROGRAM MAIN
++
++SUBROUTINE tra_ldf_iso
++    USE INPUT
++
++    IMPLICIT NONE
++    !
++    INTEGER :: ji, jj, jk, jn   ! dummy loop indices
++    REAL(wp) :: zbtr, ztra            !   -      -
++    REAL(wp), DIMENSION(jpi, jpj, jpk) :: ztfw
++
++    DO jn = 1, kjpt
++        ztfw(:, :, 1) = 0.e0; ztfw(:, :, jpk) = 0.e0
++
++        DO jk = 1, jpkm1
++            DO jj = 2, jpjm1
++                DO ji = fs_2, fs_jpim1   ! vector opt.
++                    zbtr = 1.0/(e12t(ji, jj)*fse3t_n(ji, jj, jk))
++                    ztra = (ztfw(ji, jj, jk) - ztfw(ji, jj, jk + 1))*zbtr
++                    pta(ji, jj, jk, jn) = pta(ji, jj, jk, jn) + ztra
++                END DO
++            END DO
++        END DO
++        !
++    END DO
++    !
++END SUBROUTINE tra_ldf_iso
++
++! { dg-final { scan-tree-dump-times "Traced variables at vectp_ztfw" 2 "llc_allocate" } }
+diff --git a/gcc/testsuite/gfortran.dg/llc-allocate/llc-unknown-type-size-unit.f90 b/gcc/testsuite/gfortran.dg/llc-allocate/llc-unknown-type-size-unit.f90
+new file mode 100644
+index 000000000..7345759db
+--- /dev/null
++++ b/gcc/testsuite/gfortran.dg/llc-allocate/llc-unknown-type-size-unit.f90
+@@ -0,0 +1,58 @@
++! { dg-do compile { target { aarch64*-*-linux* } } }
++! { dg-options "-c -O3 -march=armv8.2-a+sve -fllc-allocate -fdump-tree-llc_allocate-details-lineno --param filter-kernels=0 --param issue-topn=1 --param mem-access-ratio=5 --param mem-access-num=1" }
++
++Module module_domain
++    IMPLICIT NONE
++
++    REAL, PARAMETER :: g = 9.8
++    TYPE :: grid_type
++        REAL, POINTER   :: phb(:,:,:), ph_2(:,:,:), p(:,:,:), pb(:,:,:)
++        REAL, POINTER   :: fnm(:), fnp(:)
++    END TYPE
++END Module
++
++SUBROUTINE calc_p8w(p8w, ix, iy, k_start, k_end)
++
++   USE module_domain
++   !USE module_model_constants
++
++   IMPLICIT NONE
++
++
++   !TYPE (domain), INTENT(IN) :: grid
++   INTEGER, INTENT(IN) :: k_start, k_end, ix, iy
++   REAL, DIMENSION(k_start:k_end), INTENT(OUT) :: p8w
++
++
++   INTEGER :: k
++   REAL    :: z0, z1, z2, w1, w2
++   REAL, DIMENSION(k_start:k_end)   :: z_at_w
++   REAL, DIMENSION(k_start:k_end-1) :: z
++   TYPE (grid_type), POINTER :: grid
++
++
++   DO k = k_start, k_end
++      z_at_w(k) = (grid%phb(ix,k,iy)+grid%ph_2(ix,k,iy))/g
++   END DO
++
++   DO k = k_start, k_end-1
++      z(k) = 0.5*(z_at_w(k) + z_at_w(k+1))
++   END DO
++
++   DO k = k_start+1, k_end-1
++      p8w(k) = grid%fnm(k)*(grid%p(ix,k,iy)+grid%pb(ix,k,iy)) + &
++               grid%fnp(k)*(grid%p(ix,k-1,iy)+grid%pb(ix,k-1,iy))
++   END DO
++
++   z0 = z_at_w(k_start)
++   z1 = z(k_start)
++   z2 = z(k_start+1)
++   w1 = (z0 - z2)/(z1 - z2)
++   w2 = 1. - w1
++   p8w(k_start) = w1*(grid%p(ix,k_start,iy)+grid%pb(ix,k_start,iy)) + &
++                  w2*(grid%p(ix,k_start+1,iy)+grid%pb(ix,k_start+1,iy))
++
++END SUBROUTINE calc_p8w
++
++! { dg-final { scan-tree-dump-times "runtime issue" 1 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "static issue" 1 "llc_allocate" } }
+\ No newline at end of file
+diff --git a/gcc/testsuite/gfortran.dg/llc-allocate/llc-wrf-4-outer-loop-num.f90 b/gcc/testsuite/gfortran.dg/llc-allocate/llc-wrf-4-outer-loop-num.f90
+new file mode 100644
+index 000000000..f79df5d26
+--- /dev/null
++++ b/gcc/testsuite/gfortran.dg/llc-allocate/llc-wrf-4-outer-loop-num.f90
+@@ -0,0 +1,320 @@
++! { dg-do compile { target { aarch64*-*-linux* } } }
++! { dg-options "-O3 -march=armv8.2-a+sve -static -fllc-allocate -fdump-tree-llc_allocate-details-lineno  --param=branch-prob-threshold=50 --param=filter-kernels=0 --param=mem-access-num=2 --param=issue-topn=2  --param=force-issue=1 --param=outer-loop-nums=3" }
++!include "module_small_step_em.F90"
++
++Module add_type
++  IMPLICIT NONE
++
++  TYPE :: grid_config_rec_type
++      LOGICAL :: open_xs
++      LOGICAL :: open_ys
++      LOGICAL :: open_xe
++      LOGICAL :: open_ye
++      LOGICAL :: symmetric_xs
++      LOGICAL :: symmetric_xe
++      LOGICAL :: symmetric_ys
++      LOGICAL :: symmetric_ye
++      LOGICAL :: polar
++      LOGICAL :: nested
++      LOGICAL :: periodic_x
++      LOGICAL :: specified
++  END TYPE
++END Module
++
++program main
++
++
++!  include "module_small_step_em_modify.F90"
++
++!  use module_small_step_em
++!  use module_small_step_em_modify
++
++  use add_type
++
++  IMPLICIT NONE
++  INTEGER :: ids,ide, jds,jde, kds,kde
++  INTEGER,parameter :: ims=-4,kms=1,jms=-4
++  INTEGER,parameter :: ime=210,kme=36,jme=192
++  INTEGER :: its,ite, jts,jte, kts,kte
++  INTEGER :: number_of_small_timesteps,rk_step, rk_order, step, spec_zone
++
++  REAL, DIMENSION(ims:ime, kms:kme, jms:jme, 1:8) :: llcRefresh
++  REAL, DIMENSION(ims:ime, kms:kme, jms:jme) :: u, v, u_1, v_1, t_1, ww_1, ft!u, v, u_1, v_1, w_1, t_1, ww1, ww_1,ph_1, ft
++  REAL, DIMENSION(ims:ime, kms:kme, jms:jme) :: u_save, v_save, w_save, t_save, ph_save,h_diabatic
++  ! REAL, DIMENSION(ims:ime, kms:kme, jms:jme) :: u_2, v_2, w_2, t_2, ph_2
++  ! REAL, DIMENSION(ims:ime, kms:kme, jms:jme) :: c2a, ww_save, cqw, cqu, cqv, alpha, gamma, a
++  REAL, DIMENSION(ims:ime, kms:kme, jms:jme) :: ww!pb, p, ph, php, pm1, al, alt, ww, random_array
++  ! REAL, DIMENSION(ims:ime, kms:kme, jms:jme) :: ru_tend, rv_tend
++  REAL, DIMENSION(ims:ime, kms:kme, jms:jme) :: t, t_ave, uam, vam, wwam
++
++  REAL, DIMENSION(ims:ime, jms:jme) :: mu_1,mu_2, mu
++  REAL, DIMENSION(ims:ime, jms:jme) :: mub, muu, muv, mut,        &
++                                       msfux, msfuy,              &
++                                       msfvx, msfvx_inv, msfvy,   &
++                                       msftx, msfty
++
++  REAL, DIMENSION(ims:ime, jms:jme) :: muus, muvs, muts, mudf, muave
++  REAL, DIMENSION(ims:ime, jms:jme) :: mu_save, mu_tend
++
++  REAL, DIMENSION(kms:kme) :: rdn, rdnw,dnw, fnm, fnp, znu
++
++  REAL :: rdx,rdy
++  REAL :: dts, cf1, cf2, cf3, t0, emdiv, smdiv, epssm, g
++  REAL :: random1,time_begin,time_end,total_time
++
++  INTEGER :: i, j, k
++  INTEGER :: i_start, i_end, j_start, j_end, k_start, k_end
++  INTEGER :: i_endu, j_endv
++  INTEGER :: interval=1
++  INTEGER :: epoch
++
++  LOGICAL :: non_hydrostatic, top_lid
++
++
++  TYPE (grid_config_rec_type) :: config_flags
++  config_flags%open_xs = .true.
++  config_flags%open_ys = .true.
++  config_flags%open_xe = .true.
++  config_flags%open_ye = .true.
++  config_flags%symmetric_xs = .true.
++  config_flags%symmetric_xe = .true.
++  config_flags%symmetric_ys = .true.
++  config_flags%symmetric_ye = .true.
++  config_flags%polar = .true.
++  config_flags%nested = .true.
++  config_flags%periodic_x = .true.
++  config_flags%specified = .true.
++
++  data ids, jds, kds, its, jts, kts /6*1/
++  data ide, ite /2*205/
++  data jde, jte /2*187/
++  data kde, kte /2*98/
++
++  number_of_small_timesteps = 1
++  rk_step = 1
++  rk_order = 1
++  dts = 1.
++  epssm = 1.
++  g = 1.
++
++  rdx = 1.
++  rdy = 1.
++  dts = 1.
++  cf1 = 1.
++  cf2 = 1.
++  cf3 = 1.
++
++  t0 = 0.
++  smdiv = 1.
++  emdiv = 1.
++  step = 1
++  spec_zone = 1
++
++  non_hydrostatic = .true.
++  top_lid = .true.
++
++  interval=1
++
++
++  total_time=0
++
++  call random_seed(put=(/(i,i=1,10000,interval)/))
++
++  call random_number(u)
++  call random_number(v)
++  call random_number(u_1)
++  call random_number(v_1)
++  call random_number(t_1)
++  call random_number(ft)
++
++  call random_number(ww)
++  call random_number(ww_1)
++  call random_number(t)
++  call random_number(t_ave)
++  call random_number(uam)
++  call random_number(vam)
++  call random_number(wwam)
++
++  call random_number(muu)
++  call random_number(muv)
++  call random_number(mut)
++  call random_number(msfux)
++  call random_number(msfuy)
++  call random_number(msfvx)
++  call random_number(msfvx_inv)
++  call random_number(msfvy)
++  call random_number(msftx)
++  call random_number(msfty)
++  call random_number(mu_tend)
++
++  call random_number(muave)
++  call random_number(muts)
++  call random_number(mudf)
++  call random_number(mu)
++
++  call random_number(fnm)
++  call random_number(fnp)
++  call random_number(dnw)
++  call random_number(rdnw)
++
++  DO j=jms, jme
++  DO k=kms, kme
++  DO i=ims, ime
++
++    llcRefresh(i,k,j,1)=i+k+j+7
++
++  ENDDO
++  ENDDO
++  ENDDO
++
++  do epoch = 1,2
++  call advance_mu_t_fortran_plu( ww, ww_1, u, u_1, v, v_1,            &
++                         mu, mut, muave, muts, muu, muv,      &
++                         mudf, uam, vam, wwam, t, t_1,        &
++                         t_ave, ft, mu_tend,                  &
++                         rdx, rdy, dts, epssm,                &
++                         dnw, fnm, fnp, rdnw,                 &
++                         msfux, msfuy, msfvx, msfvx_inv,      &
++                         msfvy, msftx, msfty,                 &
++                         step, config_flags,                  &
++                         ids, ide, jds, jde, kds, kde,        &
++                         ims, ime, jms, jme, kms, kme,        &
++                         its, ite, jts, jte, kts, kte        )
++  enddo
++end program
++
++
++
++SUBROUTINE advance_mu_t_fortran_plu( ww, ww_1, u, u_1, v, v_1,            &
++        mu, mut, muave, muts, muu, muv,      &
++        mudf, uam, vam, wwam, t, t_1,        &
++        t_ave, ft, mu_tend,                  &
++        rdx, rdy, dts, epssm,                &
++        dnw, fnm, fnp, rdnw,                 &
++        msfux, msfuy, msfvx, msfvx_inv,      &
++        msfvy, msftx, msfty,                 &
++        step, config_flags,                  &
++        ids, ide, jds, jde, kds, kde,        &
++        ims, ime, jms, jme, kms, kme,        &
++        its, ite, jts, jte, kts, kte        )
++  use add_type
++
++  IMPLICIT NONE  ! religion first
++
++  ! stuff coming in
++
++  TYPE(grid_config_rec_type), INTENT(IN   ) :: config_flags
++  INTEGER,      INTENT(IN   )    :: ids,ide, jds,jde, kds,kde
++  INTEGER,      INTENT(IN   )    :: ims,ime, jms,jme, kms,kme
++  INTEGER,      INTENT(IN   )    :: its,ite, jts,jte, kts,kte
++
++  INTEGER,      INTENT(IN   )    :: step
++
++  REAL, DIMENSION( ims:ime , kms:kme, jms:jme ),   &
++          INTENT(IN   ) ::                       &
++          u,   &
++          v,   &
++          u_1, &
++          v_1, &
++          t_1, &
++          ft
++
++  REAL, DIMENSION( ims:ime , kms:kme, jms:jme ),      &
++          INTENT(INOUT) ::                          &
++          ww,     &
++          ww_1,   &
++          t,      &
++          t_ave,  &
++          uam,    &
++          vam,    &
++          wwam
++
++  REAL, DIMENSION( ims:ime , jms:jme ),    INTENT(IN   ) :: muu,  &
++          muv,  &
++          mut,  &
++          msfux,&
++          msfuy,&
++          msfvx,&
++          msfvx_inv,&
++          msfvy,&
++          msftx,&
++          msfty,&
++          mu_tend
++
++  REAL, DIMENSION( ims:ime , jms:jme ),    INTENT( INOUT) :: muave, &
++          muts,  &
++          mudf
++
++  REAL, DIMENSION( ims:ime , jms:jme ),    INTENT(INOUT) :: mu
++
++  REAL, DIMENSION( kms:kme ),              INTENT(IN   ) :: fnm,    &
++          fnp,    &
++          dnw,    &
++          rdnw
++
++
++  REAL,                                    INTENT(IN   ) :: rdx,    &
++          rdy,    &
++          dts,    &
++          epssm
++
++  REAL, DIMENSION (its:ite, kts:kte) :: wdtn, dvdxi
++  REAL, DIMENSION (its:ite) :: dmdt
++
++  INTEGER :: i,j,k, i_start, i_end, j_start, j_end, k_start, k_end
++  INTEGER :: i_endu, j_endv
++  REAL    :: acc
++
++  INTEGER :: ubv, lbv, t1, t2, t3, t4, ceild, floord
++
++  ceild(t1, t2) = ceiling(REAL(t1)/REAL(t2))
++  floord(t1, t2) = floor(REAL(t1)/REAL(t2))
++  i_start = its
++  i_end   = min(ite,ide-1)
++  j_start = jts
++  j_end   = min(jte,jde-1)
++  k_start = kts
++  k_end   = kte-1
++  IF ( .NOT. config_flags%periodic_x )THEN
++    IF ( config_flags%specified .or. config_flags%nested ) then
++      i_start = max(its,ids+1)
++      i_end   = min(ite,ide-2)
++    ENDIF
++  ENDIF
++  IF ( config_flags%specified .or. config_flags%nested ) then
++    j_start = max(jts,jds+1)
++    j_end   = min(jte,jde-2)
++  ENDIF
++
++  i_endu = ite
++  j_endv = jte
++
++  DO j = j_start, j_end
++
++    DO i=i_start, i_end
++      dmdt(i) = 0.
++    ENDDO
++
++    DO k=k_start, k_end
++      DO i=i_start, i_end
++        dvdxi(i,k) = msftx(i,j)*msfty(i,j)*(      &
++                rdy*((v(i,k,j+1)+muv(i,j+1)*v_1(i,k,j+1)*msfvx_inv(i,j+1))  &
++                        -(v(i,k,j  )+muv(i,j  )*v_1(i,k,j)*msfvx_inv(i,j  ))) &
++                        +rdx*((u(i+1,k,j)+muu(i+1,j)*u_1(i+1,k,j)/msfuy(i+1,j))      &
++                        -(u(i,k,j  )+muu(i  ,j)*u_1(i,k,j  )/msfuy(i,j)) ))
++        dmdt(i)    = dmdt(i) + dnw(k)*dvdxi(i,k)
++      ENDDO
++    ENDDO
++    DO i=i_start, i_end
++      muave(i,j) = mu(i,j)
++      mu(i,j) = mu(i,j)+dts*(dmdt(i)+mu_tend(i,j))
++      mudf(i,j) = (dmdt(i)+mu_tend(i,j)) ! save tendency for div dampfilter
++      muts(i,j) = mut(i,j)+mu(i,j)
++      muave(i,j) =.5*((1.+epssm)*mu(i,j)+(1.-epssm)*muave(i,j))
++    ENDDO
++  ENDDO
++END SUBROUTINE advance_mu_t_fortran_plu
++
++! { dg-final { scan-tree-dump "issue_llc_hint" "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "analyze_nested_kernels" 2 "llc_allocate" } }
++! { dg-final { scan-tree-dump "Stop tracing the outer loop depth" "llc_allocate" } }
+\ No newline at end of file
+diff --git a/gcc/timevar.def b/gcc/timevar.def
+index 36c3e7d5a..14129a500 100644
+--- a/gcc/timevar.def
++++ b/gcc/timevar.def
+@@ -84,6 +84,7 @@ DEFTIMEVAR (TV_IPA_COMDATS	     , "ipa comdats")
+ DEFTIMEVAR (TV_IPA_HARDWARE_DETECTION, "ipa detection")
+ DEFTIMEVAR (TV_IPA_PREFETCH	     , "ipa prefetch")
+ DEFTIMEVAR (TV_IPA_STRUCT_REORG      , "ipa struct reorg optimization")
++DEFTIMEVAR (TV_IPA_EXTEND_AUTO_PROFILE, "ipa extend auto profile")
+ DEFTIMEVAR (TV_IPA_OPT		     , "ipa various optimizations")
+ DEFTIMEVAR (TV_IPA_LTO_DECOMPRESS    , "lto stream decompression")
+ DEFTIMEVAR (TV_IPA_LTO_COMPRESS      , "lto stream compression")
+@@ -215,6 +216,7 @@ DEFTIMEVAR (TV_TREE_LOOP_DISTRIBUTION, "tree loop distribution")
+ DEFTIMEVAR (TV_CHECK_DATA_DEPS       , "tree check data dependences")
+ DEFTIMEVAR (TV_TREE_PREFETCH	     , "tree prefetching")
+ DEFTIMEVAR (TV_TREE_LOOP_IVOPTS	     , "tree iv optimization")
++DEFTIMEVAR (TV_TREE_LLC_ALLOCATE     , "tree llc allocation")
+ DEFTIMEVAR (TV_PREDCOM		     , "predictive commoning")
+ DEFTIMEVAR (TV_TREE_CH		     , "tree copy headers")
+ DEFTIMEVAR (TV_TREE_SSA_UNCPROP	     , "tree SSA uncprop")
+diff --git a/gcc/toplev.cc b/gcc/toplev.cc
+index f00a166df..bdbd4de63 100644
+--- a/gcc/toplev.cc
++++ b/gcc/toplev.cc
+@@ -567,6 +567,12 @@ compile_file (void)
+       targetm.asm_out.output_ident (ident_str);
+     }
+ 
++  /* Extend auto profile finalization.  */
++  if (flag_ipa_extend_auto_profile)
++    {
++      free_extend_profile_info ();
++    }
++
+   /* Auto profile finalization. */
+   if (flag_auto_profile)
+     end_auto_profile ();
+diff --git a/gcc/tree-cfg.cc b/gcc/tree-cfg.cc
+index d33aaec8c..40f67a8ed 100644
+--- a/gcc/tree-cfg.cc
++++ b/gcc/tree-cfg.cc
+@@ -8476,6 +8476,17 @@ print_loops (FILE *file, int verbosity)
+     print_loop_and_siblings (file, bb->loop_father, 0, verbosity);
+ }
+ 
++/* Dump a loop to file.  */
++
++void
++loop_dump (FILE *file, class loop *loop)
++{
++  print_loop (file, loop, 0, 0);
++  fprintf (file, "vec_niter = ");
++  print_generic_expr (file, loop->vec_nb_iterations);
++  fprintf (file, "\n");
++}
++
+ /* Dump a loop.  */
+ 
+ DEBUG_FUNCTION void
+diff --git a/gcc/tree-cfg.h b/gcc/tree-cfg.h
+index bfe44c073..0982fa7cf 100644
+--- a/gcc/tree-cfg.h
++++ b/gcc/tree-cfg.h
+@@ -83,6 +83,7 @@ extern void dump_function_to_file (tree, FILE *, dump_flags_t);
+ extern void debug_function (tree, dump_flags_t);
+ extern void print_loops_bb (FILE *, basic_block, int, int);
+ extern void print_loops (FILE *, int);
++extern void loop_dump (FILE *file, class loop *loop);
+ extern void debug (class loop &ref);
+ extern void debug (class loop *ptr);
+ extern void debug_verbose (class loop &ref);
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index a98f84397..468353d13 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -395,6 +395,7 @@ extern gimple_opt_pass *make_pass_slp_vectorize_late (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_parallelize_loops (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_loop_prefetch (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_iv_optimize (gcc::context *ctxt);
++extern gimple_opt_pass *make_pass_llc_allocate (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_tree_loop_done (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_ch (gcc::context *ctxt);
+ extern gimple_opt_pass *make_pass_ch_vect (gcc::context *ctxt);
+@@ -536,6 +537,8 @@ extern simple_ipa_opt_pass *make_pass_ipa_hardware_detection (gcc::context *
+ 							      ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_prefetch (gcc::context *ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_struct_reorg (gcc::context *ctxt);
++extern simple_ipa_opt_pass *make_pass_ipa_extend_auto_profile (gcc::context
++							       *ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_pta (gcc::context *ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_tm (gcc::context *ctxt);
+ extern simple_ipa_opt_pass *make_pass_target_clone (gcc::context *ctxt);
+diff --git a/gcc/tree-scalar-evolution.cc b/gcc/tree-scalar-evolution.cc
+index 44157265c..4c014fb23 100644
+--- a/gcc/tree-scalar-evolution.cc
++++ b/gcc/tree-scalar-evolution.cc
+@@ -2789,7 +2789,7 @@ resolve_mixers (class loop *loop, tree chrec, bool *folded_casts)
+    the loop body has been executed 6 times.  */
+ 
+ tree
+-number_of_latch_executions (class loop *loop)
++number_of_latch_executions (class loop *loop, bool guarantee)
+ {
+   edge exit;
+   class tree_niter_desc niter_desc;
+@@ -2810,7 +2810,8 @@ number_of_latch_executions (class loop *loop)
+   res = chrec_dont_know;
+   exit = single_exit (loop);
+ 
+-  if (exit && number_of_iterations_exit (loop, exit, &niter_desc, false))
++  if (exit && number_of_iterations_exit (loop, exit, &niter_desc, false,
++					 true, NULL, guarantee))
+     {
+       may_be_zero = niter_desc.may_be_zero;
+       res = niter_desc.niter;
+@@ -2836,7 +2837,8 @@ number_of_latch_executions (class loop *loop)
+       fprintf (dump_file, "))\n");
+     }
+ 
+-  loop->nb_iterations = res;
++  if (guarantee)
++    loop->nb_iterations = res;
+   return res;
+ }
+ 
+diff --git a/gcc/tree-scalar-evolution.h b/gcc/tree-scalar-evolution.h
+index 0f90207bc..dc27d9545 100644
+--- a/gcc/tree-scalar-evolution.h
++++ b/gcc/tree-scalar-evolution.h
+@@ -21,7 +21,8 @@ along with GCC; see the file COPYING3.  If not see
+ #ifndef GCC_TREE_SCALAR_EVOLUTION_H
+ #define GCC_TREE_SCALAR_EVOLUTION_H
+ 
+-extern tree number_of_latch_executions (class loop *);
++extern tree number_of_latch_executions (class loop *,
++					bool guarantee = true);
+ extern gcond *get_loop_exit_condition (const class loop *);
+ 
+ extern void scev_initialize (void);
+diff --git a/gcc/tree-ssa-llc-allocate.cc b/gcc/tree-ssa-llc-allocate.cc
+new file mode 100644
+index 000000000..da6d72b94
+--- /dev/null
++++ b/gcc/tree-ssa-llc-allocate.cc
+@@ -0,0 +1,4150 @@
++/* LLC allocate.
++   Copyright (C) 2022-2023 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it
++under the terms of the GNU General Public License as published by the
++Free Software Foundation; either version 3, or (at your option) any
++later version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT
++ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include "config.h"
++#define INCLUDE_MAP
++#define INCLUDE_SET
++#define INCLUDE_VECTOR
++#define INCLUDE_LIST
++#define INCLUDE_ALGORITHM
++#include "system.h"
++#include "coretypes.h"
++#include "backend.h"
++#include "target.h"
++#include "rtl.h"
++#include "tree.h"
++#include "gimple.h"
++#include "predict.h"
++#include "tree-pass.h"
++#include "gimple-ssa.h"
++#include "optabs-query.h"
++#include "tree-pretty-print.h"
++#include "fold-const.h"
++#include "stor-layout.h"
++#include "gimplify.h"
++#include "gimple-iterator.h"
++#include "gimplify-me.h"
++#include "tree-ssa-loop-ivopts.h"
++#include "tree-ssa-loop-manip.h"
++#include "tree-ssa-loop-niter.h"
++#include "tree-ssa-loop.h"
++#include "ssa.h"
++#include "tree-into-ssa.h"
++#include "cfgloop.h"
++#include "tree-scalar-evolution.h"
++#include "langhooks.h"
++#include "tree-inline.h"
++#include "tree-data-ref.h"
++#include "diagnostic-core.h"
++#include "dbgcnt.h"
++#include "gimple-pretty-print.h"
++#include "internal-fn.h"
++#include "tree-cfg.h"
++#include "profile-count.h"
++#include "auto-profile.h"
++
++/* Number of parallel cores.  */
++const unsigned int PARALLEL_NUM = 304;
++
++/* Indirect access weight.  */
++const unsigned int INDIRECT_ACCESS_VALUE = 3;
++
++/* Write memory weight.  */
++const unsigned int WRITE_COST = 4;
++
++/* Maximum ratio of total prefetch data size to cache size.  */
++const double PREFETCH_CACHE_SIZE_RATIO = 0.8;
++
++/* Prefetch tool input max length.  */
++#ifndef PREFETCH_TOOL_INPUT_MAX_LEN
++#define PREFETCH_TOOL_INPUT_MAX_LEN 512
++#endif
++
++/* Prefetch tool number max length.  */
++#ifndef PREFETCH_TOOL_NUM_MAX_LEN
++#define PREFETCH_TOOL_NUM_MAX_LEN 9
++#endif
++
++#ifndef PREFETCH_FUNC_TOPN
++#define PREFETCH_FUNC_TOPN param_llc_allocate_func_topn
++#endif
++
++namespace {
++
++/* loop bound info of the memory reference located.  */
++struct loop_bound
++{
++  /* iv tree_node.  */
++  tree iv;
++
++  /* define stmt of iv.  */
++  gimple *def_stmt;
++
++  /* loop where stmt is located.  */
++  class loop *loop;
++
++  /* loop unroll factor.  */
++  unsigned int unroll;
++
++  /* Number of iterations of loop.  */
++  tree niters;
++
++  loop_bound (tree t, gimple *stmt)
++    {
++      iv = t;
++      def_stmt = stmt;
++      loop = loop_containing_stmt (stmt);
++      unroll = 1;
++      niters = chrec_dont_know;
++    }
++};
++
++/* method of calculating the data size.  */
++
++enum calc_type
++{
++  UNHANDLE_CALC = 0,
++  RUNTIME_CALC,
++  STATIC_CALC
++};
++
++/* Describes a info of a memory reference.  */
++
++struct data_ref
++{
++  /* The memory reference.  */
++  tree ref;
++
++  /* Statement where the ref is located.  */
++  gimple *stmt;
++
++  /* var_decl or param_decl, used for the ref_group.  */
++  tree var;
++
++  /* Base of the reference.  */
++  tree base;
++
++  /* Constant offset of the reference.  */
++  tree offset;
++
++  /* index of the reference.  */
++  tree index;
++
++  /* Constant step of the reference.  */
++  tree step;
++
++  /* loop boundary info of each dimension.  */
++  std::vector loop_bounds;
++
++  /* memory data size, Unit: MB.  */
++  double data_size;
++
++  /* method of calculating the data size.  */
++  calc_type calc_by;
++
++  /* True if the info of ref is traced, and then record it.  */
++  unsigned int trace_status_p : 1;
++
++  /* True if the loop is vectorized.  */
++  unsigned int vectorize_p : 1;
++
++  /* True if the memory reference is shared.  */
++  unsigned int parallel_p : 1;
++
++  /* True if the memory reference is regular.  */
++  unsigned int regular_p : 1;
++
++  /* True if the memory reference is read.  */
++  unsigned int read_p : 1;
++
++  /* loop father depth.  */
++  unsigned int loop_depth;
++
++  /* bb index.  */
++  int bb_idx;
++
++  /* loop index.  */
++  int loop_idx;
++
++  data_ref ()
++    {
++      ref = NULL_TREE;
++      stmt = NULL;
++      var = NULL_TREE;
++      base = NULL_TREE;
++      offset = NULL_TREE;
++      index = NULL_TREE;
++      step = NULL_TREE;
++      data_size = 0;
++      calc_by = UNHANDLE_CALC;
++      trace_status_p = false;
++      vectorize_p = false;
++      parallel_p = false;
++      regular_p = true;
++      read_p = true;
++      loop_depth = 0;
++      bb_idx = 0;
++      loop_idx = 0;
++    }
++};
++
++/* ================ phase 1 get_dense_memory_kernels ================  */
++
++/* Add ref node and print.  */
++
++void
++add_ref (std::vector &references, tree op, gimple *stmt,
++	 bool vectorize_p, bool read_p)
++{
++  data_ref ref;
++  ref.ref = op;
++  ref.stmt = stmt;
++  ref.vectorize_p = vectorize_p;
++  ref.read_p = read_p;
++  ref.loop_depth = loop_depth (stmt->bb->loop_father);
++  ref.bb_idx = stmt->bb->index;
++  ref.loop_idx = stmt->bb->loop_father->num;
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      print_generic_expr (dump_file, ref.ref, TDF_LINENO);
++      fprintf (dump_file, "\n");
++    }
++  references.push_back (ref);
++}
++
++/* Get the references from the simple call (vectorization type).  */
++
++void
++get_references_in_gimple_call (gimple *stmt, std::vector &references)
++{
++  if (gimple_code (stmt) != GIMPLE_CALL)
++    return;
++
++  if (gimple_call_internal_p (stmt))
++    {
++      bool read_p = false;
++      switch (gimple_call_internal_fn (stmt))
++	{
++	  case IFN_MASK_GATHER_LOAD:
++	  case IFN_MASK_LOAD:
++	    {
++	      if (gimple_call_lhs (stmt) == NULL_TREE)
++		return;
++	      read_p = true;
++	      // FALLTHRU
++	    }
++	  case IFN_MASK_STORE:
++	    {
++	      /* _1 = &MEM[base: a_2(D), index: ivtmp_3, step: 8, offset: 0B];
++		 vect__1.1 = .MASK_LOAD (_1, 64B, loop_mask_4);
++
++		 _1 = &MEM[base: a_2(D), index: ivtmp_3, step: 8, offset: 0B];
++		 .MASK_STORE (_1, 64B, loop_mask_4, vect__1.2);
++
++		_1 = (sizetype) a_2(D);
++		 vect_patt_3.3 = .MASK_GATHER_LOAD (_1, vect__4.4, 8,
++						    { 0.0, ... }, loop_mask_5);
++	      */
++	      tree op1 = gimple_call_arg (stmt, 0);
++	      if (TREE_CODE (op1) != SSA_NAME)
++		{
++		  if (dump_file && (dump_flags & TDF_DETAILS))
++		    {
++		      fprintf (dump_file, "get_references_in_gimple_call: ");
++		      fprintf (dump_file, "find base that not ssa_name: ");
++		      print_generic_expr (dump_file, op1, TDF_LINENO);
++		      fprintf (dump_file, "\n");
++		    }
++		  return;
++		}
++	      gimple *op1_def = SSA_NAME_DEF_STMT (op1);
++	      if (op1_def != NULL && gimple_code (op1_def) == GIMPLE_ASSIGN)
++		{
++		  /* &MEM[base: xx]  */
++		  tree rhs1 = gimple_assign_rhs1 (op1_def);
++		  /* If the definition stmt of the operation is memory
++		     reference type, read it directly.  */
++		  if (TREE_CODE (rhs1) == ADDR_EXPR
++		      && TREE_CODE (TREE_OPERAND (rhs1, 0)) == TARGET_MEM_REF)
++		    op1 = TREE_OPERAND (rhs1, 0); /* MEM[base: xx]  */
++		}
++
++	      add_ref (references, op1, stmt, true, read_p);
++	      return;
++	    }
++	  default:
++	    return;
++	}
++    }
++}
++
++/* Check whether memory reference is located exactly in main function.
++   There are some other unexpected scenarios where mem ref or function is
++   tracing failed without loc info (newly generated gimple/function).  */
++
++bool
++is_reference_in_main_p (gimple *stmt)
++{
++  expanded_location xloc = expand_location (stmt->location);
++  if (DECL_NAME (cfun->decl) && MAIN_NAME_P (DECL_NAME (cfun->decl)))
++    {
++      /* NEXT STEP: Check why some functions have no end_locus.  */
++      if (!(DECL_SOURCE_LOCATION (current_function_decl)
++	    && cfun->function_end_locus))
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "Cannot find function start-end location.\n");
++	  return true;
++	}
++      else if (!(xloc.file && xloc.line))
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "Cannot find gimple statement location.\n");
++	      print_gimple_stmt (dump_file, stmt, 0, TDF_LINENO);
++	    }
++	  return false;
++	}
++      int fn_start = expand_location (
++	DECL_SOURCE_LOCATION (current_function_decl)).line;
++      int fn_end = expand_location (cfun->function_end_locus).line;
++
++      if (xloc.line >= fn_start && xloc.line <= fn_end)
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "Memory access in main function: ");
++	      print_gimple_stmt (dump_file, stmt, 0, TDF_LINENO);
++	    }
++	  return true;
++	}
++    }
++  return false;
++}
++
++/* Stores the locations of memory references in STMT to REFERENCES.  */
++
++void
++get_references_in_stmt (gimple *stmt, std::vector &references)
++{
++  if (!gimple_vuse (stmt))
++    return;
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "gimple_vuse: ");
++      print_gimple_stmt (dump_file, stmt, 0, TDF_LINENO);
++    }
++
++  /* Filter out memory references located in main function. This is a
++     experimental filtering scheme ONLY for HPC case verification as
++     some HPC cases assign values for variables (mem ref) in main function.  */
++  if (is_reference_in_main_p (stmt))
++    return;
++
++  if (gimple_code (stmt) == GIMPLE_ASSIGN)
++    {
++      tree op0 = gimple_assign_lhs (stmt);
++      tree op1 = gimple_assign_rhs1 (stmt);
++      tree base = NULL_TREE;
++
++      /* _1 = MEM[base: a, index: i, step: 8, offset: 0B];  */
++      if (REFERENCE_CLASS_P (op1)  && (base = get_base_address (op1))
++	  && TREE_CODE (base) != SSA_NAME && !is_gimple_min_invariant (base))
++	add_ref (references, op1, stmt, false, true);
++
++      if (REFERENCE_CLASS_P (op0) && get_base_address (op0))
++	add_ref (references, op0, stmt, false, false);
++    }
++  else if (gimple_code (stmt) == GIMPLE_CALL)
++    get_references_in_gimple_call (stmt, references);
++
++  return;
++}
++
++/* flag of loop filter out.  */
++
++struct loop_filter_out_flag
++{
++  /* Use external call.  */
++  bool use_ext_call;
++
++  /* Use external node.  */
++  bool use_ext_node;
++
++  /* Use loop defined in macros.  */
++  bool use_macro_loop;
++
++  /* Use external node.  */
++  bool use_cond_func;
++};
++
++/* Check whether an external node is used.  */
++
++bool use_ext_node_p (const std::vector &references,
++		     unsigned int &start)
++{
++  expanded_location cfun_xloc
++	= expand_location (DECL_SOURCE_LOCATION (current_function_decl));
++
++  unsigned i = start;
++  start = references.size ();
++  for (; i < references.size (); i++)
++    {
++      data_ref ref = references[i];
++      expanded_location xloc = expand_location (ref.stmt->location);
++      if (xloc.file && filename_cmp (cfun_xloc.file, xloc.file))
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "use_ext_node\n\n");
++	  return true;
++	}
++    }
++  return false;
++}
++
++/* Determine whether to filter out loops by stmt.  */
++
++bool
++filter_out_loop_by_stmt_p (loop_filter_out_flag &loop_filter, gimple *stmt,
++			   const std::vector &references,
++			   unsigned int &start)
++{
++  expanded_location xloc = expand_location (stmt->location);
++  /* check use_ext_call.  */
++  if (gimple_code (stmt) == GIMPLE_CALL && !gimple_call_internal_p (stmt))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "use_ext_call: ");
++	  print_gimple_stmt (dump_file, stmt, 0, TDF_LINENO);
++	}
++      loop_filter.use_ext_call = true;
++      return true;
++    }
++
++  /* check use_macro_loop.  */
++  if (xloc.file && xloc.column != 1)
++    loop_filter.use_macro_loop = false;
++
++  /* check use_cond_func, VEC_COND_EXPR/MIN_EXPR/MAX_EXPR.  */
++  if (gimple_code (stmt) == GIMPLE_ASSIGN)
++    {
++      enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
++      if (rhs_code == VEC_COND_EXPR || rhs_code == MIN_EXPR
++	  || rhs_code == MAX_EXPR)
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "use_cond_func: ");
++	      print_gimple_stmt (dump_file, stmt, 0, TDF_LINENO);
++	    }
++	  loop_filter.use_cond_func = true;
++	  return true;
++	}
++    }
++
++  /* check use_ext_node.  */
++  if (use_ext_node_p (references, start))
++    {
++      loop_filter.use_ext_node = true;
++      return true;
++    }
++
++  return false;
++}
++
++/* Dump the flag type of the loop is filtered out.  */
++
++void
++dump_loop_filter_out_flag (loop_filter_out_flag &loop_filter)
++{
++  if (loop_filter.use_ext_call)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "non-dense mem access: use_ext_call\n");
++    }
++
++  if (loop_filter.use_ext_node)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "non-dense mem access: use_ext_node\n");
++    }
++
++  if (loop_filter.use_macro_loop)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "non-dense mem access: use_macro_loop\n");
++    }
++
++  if (loop_filter.use_cond_func)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "non-dense mem access: use_cond_func\n");
++    }
++}
++
++/* Get references in loop.  */
++
++bool
++get_references_in_loop (std::vector &references,
++			loop_filter_out_flag &loop_filter,
++			class loop *loop)
++{
++  unsigned int start = 0;
++  bool filter_out_loop = true;
++
++  /* Analyze each bb in the loop.  */
++  basic_block *body = get_loop_body_in_dom_order (loop);
++  for (unsigned i = 0; i < loop->num_nodes; i++)
++    {
++      basic_block bb = body[i];
++      if (bb->loop_father != loop)
++	continue;
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "\n==== the %dth loop bb body ====\n", i);
++	  gimple_dump_bb (dump_file, bb, 0, dump_flags);
++	  fprintf (dump_file, "\n");
++	}
++
++      gimple_stmt_iterator bsi;
++      for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi))
++	{
++	  gimple *stmt = gsi_stmt (bsi);
++	  get_references_in_stmt (stmt, references);
++	  filter_out_loop = filter_out_loop_by_stmt_p (loop_filter, stmt,
++						       references, start);
++	  if (filter_out_loop)
++	    break;
++	}
++      if (filter_out_loop)
++	break;
++    }
++  free (body);
++  return !filter_out_loop;
++}
++
++/* Computes an estimated number of insns in LOOP, weighted by WEIGHTS.
++   Assume that the HPC data reading and calculation process does not involve
++   adding branches in loops.  Therefore, all bbs of loops are directly used for
++   calculation (excluding embedded loops) without considering branch weighting.
++*/
++
++unsigned
++estimate_loop_insns (class loop *loop, eni_weights *weights)
++{
++  basic_block *body = get_loop_body (loop);
++  gimple_stmt_iterator gsi;
++  unsigned size = 0, i;
++
++  for (i = 0; i < loop->num_nodes; i++)
++    {
++      basic_block bb = body[i];
++      if (bb->loop_father != loop)
++	{
++	  continue;
++	}
++      for (gsi = gsi_start_bb (body[i]); !gsi_end_p (gsi); gsi_next (&gsi))
++	size += estimate_num_insns (gsi_stmt (gsi), weights);
++    }
++  free (body);
++
++  return size;
++}
++
++/* Check whether the memory access is dense.  */
++
++bool
++dense_memory_p (const std::vector &references, class loop *loop)
++{
++  int ref_count = references.size ();
++  unsigned int ninsns = estimate_loop_insns (loop, &eni_size_weights);
++  float mem_to_insn_ratio = (float)ref_count / (float)ninsns;
++
++  /* The number of cores to be run and DDR bandwidth information can be
++  transferred to flexibly adjust the threshold.  */
++  bool dense_mem = (mem_to_insn_ratio >= (param_mem_access_ratio / 100.0)
++		    && ref_count >= param_mem_access_num);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      const char *fn_name = IDENTIFIER_POINTER (DECL_NAME (cfun->decl));
++
++      /* Dump dense memory source code location.  */
++      if (ref_count && references[0].stmt->location)
++	{
++	  expanded_location xloc = expand_location
++				     (references[0].stmt->location);
++	  int fn_start = 0;
++	  if (DECL_SOURCE_LOCATION (current_function_decl))
++	    fn_start = expand_location (
++			    DECL_SOURCE_LOCATION (current_function_decl)).line;
++	  int fn_end = fn_start;
++	  if (cfun->function_end_locus)
++	    fn_end = expand_location (cfun->function_end_locus).line;
++	  if (xloc.file)
++	    fprintf (dump_file, "[%s:%s(%d-%d):%d:%d] ",
++		      xloc.file, fn_name, fn_start, fn_end,
++		      xloc.line, xloc.column);
++	}
++
++      /* Dump memory dense information.  */
++      if (dense_mem)
++	fprintf (dump_file, "dense memory access: ");
++      else
++	fprintf (dump_file, "non-dense mem access: ");
++      fprintf (dump_file,
++	       "ref_count = %d, ninsns = %d, mem_to_insn_ratio = %f\n\n",
++	       ref_count, ninsns, mem_to_insn_ratio);
++    }
++
++  return dense_mem;
++}
++
++/* Analyze the inner loop and get the loop with dense memory access.  */
++
++void
++analyze_loop_dense_memory (std::vector &kernels,
++			  std::map > &kernels_refs,
++			  class loop *loop)
++{
++  std::vector references;
++  number_of_latch_executions (loop);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\n========== Processing loop %d: ==========\n",
++	       loop->num);
++      loop_dump (dump_file, loop);
++      flow_loop_dump (loop, dump_file, NULL, 1);
++      fprintf (dump_file, "loop unroll: %d\n", loop->unroll);
++    }
++
++  if (get_loop_exit_edges (loop).length () != 1)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "non-dense mem access: loop_multiple_exits\n");
++      return;
++    }
++
++  loop_filter_out_flag loop_filter = {false, false, true, false};
++
++  if (!get_references_in_loop (references, loop_filter, loop))
++    {
++      dump_loop_filter_out_flag (loop_filter);
++      return;
++    }
++
++  if (dense_memory_p (references, loop))
++    {
++      kernels_refs[loop] = references;
++      kernels.push_back (loop);
++    }
++}
++/* Analyze the inner loop and get the loop with dense memory access.  */
++
++bool
++get_dense_memory_kernels (std::vector &kernels,
++			  std::map > &kernels_refs)
++{
++  if (dump_file)
++    fprintf (dump_file, "\nPhase 1: get_dense_memory_kernels\n\n");
++  for (auto loop : loops_list (cfun, LI_ONLY_INNERMOST))
++    analyze_loop_dense_memory (kernels, kernels_refs, loop);
++  return kernels.size () > 0;
++}
++
++/* ================ phase 2 trace_data_refs_info ================  */
++
++/* Determine whether the declaration is a non-vectorized.  */
++
++bool
++generic_decl_p (tree expr)
++{
++  if (expr == NULL_TREE)
++    return false;
++  enum tree_code expr_code = TREE_CODE (expr);
++  if (expr_code != VAR_DECL && expr_code != PARM_DECL
++      && expr_code != COMPONENT_REF)
++    return false;
++  return true;
++}
++
++/* Initial worklist preparation for source variable tracing.
++   Add different initial node based on different gimple statements.  */
++
++void
++add_worklist (std::vector &worklist, std::set &walked,
++	      gimple *def_stmt)
++{
++  if (gimple_code (def_stmt) == GIMPLE_PHI)
++    {
++      for (unsigned i = 0; i < gimple_phi_num_args (def_stmt); i++)
++	{
++	  tree node = gimple_phi_arg_def (def_stmt, i);
++	  if (!walked.count (node))
++	    {
++	      worklist.push_back (node);
++	      walked.insert (node);
++	    }
++	}
++    }
++  else if (is_gimple_assign (def_stmt))
++    {
++      tree_code rhs_code = gimple_assign_rhs_code (def_stmt);
++      if (rhs_code == POINTER_PLUS_EXPR || rhs_code == NEGATE_EXPR
++	  || rhs_code == NOP_EXPR || rhs_code == SSA_NAME
++	  || rhs_code == COMPONENT_REF)
++	{
++	  tree node = gimple_assign_rhs1 (def_stmt);
++	  if (!walked.count (node))
++	    {
++	      worklist.push_back (node);
++	      walked.insert (node);
++	    }
++	}
++      else if (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR)
++	{
++	  tree node = gimple_assign_rhs1 (def_stmt);
++	  if (!walked.count (node))
++	    {
++	      worklist.push_back (node);
++	      walked.insert (node);
++	    }
++	  node = gimple_assign_rhs2 (def_stmt);
++	  if (!walked.count (node))
++	    {
++	      worklist.push_back (node);
++	      walked.insert (node);
++	    }
++	}
++      else if (rhs_code == TARGET_MEM_REF || rhs_code == MEM_REF)
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "possibly unnested indirect memory access: ");
++	      print_gimple_stmt (dump_file, def_stmt, 0, TDF_LINENO);
++	      fprintf (dump_file, "\n");
++	    }
++	}
++      else
++	{
++	  /* unhandled assign rhs_code: _219 = _17 * _70;
++	     _17 = *grid_56(D).sst.span;
++	     _70 = *grid_56(D).sst.dim[0].stride;
++	  */
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "unhandled assign rhs_code: ");
++	      print_gimple_stmt (dump_file, def_stmt, 0, TDF_LINENO);
++	      fprintf (dump_file, "\n");
++	    }
++	}
++    }
++  else
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "unsupported tracing stmt: ");
++	  print_gimple_stmt (dump_file, def_stmt, 0, TDF_LINENO);
++	  fprintf (dump_file, "\n");
++	}
++    }
++}
++
++
++/* Tracing source variables:
++   vectp.1 = a_2(D) + _3;
++   _4 = &MEM[base: vectp.1, index: ivtmp_5, step: 8, offset: 0B];
++   vect__1.6 = .MASK_LOAD (_4, 64B, loop_mask_7);
++
++   _1 = (sizetype) b_2(D);
++   vect_patt_3.3 = .MASK_GATHER_LOAD (_1, vect__4.4, 8, { 0.0, ... },
++				      loop_mask_5);
++  ...
++  Due to previous pass optimizations, the current tracing method can find
++  several source variable candidates.  We decide to record them in a map and
++  later filter out the true base variable by some criteria.
++*/
++
++void
++trace_base_var_helper (tree arg, std::set &walked,
++		       std::map& base_var_candid, bool is_vect_type)
++{
++  if (arg == NULL)
++    return;
++
++  /* Var_decl type: base address extracted from ARRAY_REF.  */
++  if (TREE_CODE (TREE_TYPE (arg)) == ARRAY_TYPE && TREE_CODE (arg) == VAR_DECL
++      && generic_decl_p (arg))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "var_decl type\n");
++      base_var_candid[arg] += 1;
++      return;
++    }
++
++  /* Array type.  */
++  tree op0 = NULL;
++  if (TREE_CODE (arg) == ADDR_EXPR
++      && (op0 = TREE_OPERAND (arg, 0)) && generic_decl_p (op0))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "array type\n");
++      base_var_candid[op0] += 1;
++      return;
++    }
++
++  /* Pointer type.  */
++  if (TREE_CODE (TREE_TYPE (arg)) == POINTER_TYPE && generic_decl_p (arg))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "pointer type\n");
++      base_var_candid[arg] += 1;
++      return;
++    }
++
++  /* SSA_NAME type.  */
++  if (TREE_CODE (arg) != SSA_NAME)
++    return;
++
++  tree tmp_var = SSA_NAME_VAR (arg);
++  if (tmp_var && !is_vect_type && TREE_CODE (TREE_TYPE (arg)) == POINTER_TYPE)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "ssa pointer type\n");
++      base_var_candid[tmp_var] += 1;
++      return;
++    }
++
++  gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
++  if (def_stmt == NULL)
++    return;
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      print_generic_expr (dump_file, arg, TDF_SLIM);
++      fprintf (dump_file, "\t\t: ");
++      print_gimple_stmt (dump_file, def_stmt, 0, TDF_SLIM);
++    }
++
++  if (gimple_code (def_stmt) == GIMPLE_NOP)
++    {
++      if (!walked.count (tmp_var))
++	walked.insert (tmp_var);
++      trace_base_var_helper (tmp_var, walked, base_var_candid, is_vect_type);
++    }
++  else
++    {
++      std::vector worklist;
++      add_worklist (worklist, walked, def_stmt);
++      for (unsigned i = 0; i < worklist.size (); ++i)
++	trace_base_var_helper (worklist[i], walked, base_var_candid, is_vect_type);
++    }
++}
++
++/* Identify the base variable traced from base address of memory reference.
++   We recognize that current method could detect several base variable
++   candidates and the temporary criteria for base variable determination
++   is that either one of the following statement is true:
++    1) The number of base variable candidates is 1;
++    2) The number of detected gimple statements for some variable is 1.
++   We may use other criteria or relax the current criteria
++   (e.g., criterion 2: 1 -> any odd number).  */
++
++bool
++trace_base_var (data_ref &mem_ref, std::set &walked)
++{
++  tree &var = mem_ref.var;
++  tree arg = mem_ref.base;
++  std::map base_var_candid;
++  bool is_vect_type = TREE_CODE (TREE_TYPE (mem_ref.ref)) == VECTOR_TYPE;
++  trace_base_var_helper (arg, walked, base_var_candid, is_vect_type);
++  bool is_tracing_unusual = false;
++  if (base_var_candid.size () == 1)
++    var = base_var_candid.begin ()->first;
++  else
++    {
++      is_tracing_unusual = true;
++      for (std::map::iterator it = base_var_candid.begin ();
++	   it != base_var_candid.end (); ++it)
++	var = it->second == 1 ? it->first : var;
++    }
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "Traced variables at ");
++      print_generic_expr (dump_file, arg, TDF_SLIM);
++      fprintf (dump_file, ":\n");
++      for (std::map::iterator it = base_var_candid.begin ();
++	   it != base_var_candid.end (); ++it)
++	fprintf (dump_file, "%s:%d, ", get_name (it->first), it->second);
++      fprintf (dump_file, "\n");
++
++      if (var == NULL_TREE)
++	fprintf (dump_file, "Unhandled scenario for tracing base variable.\n");
++      else if (is_tracing_unusual && var != NULL_TREE)
++	fprintf (dump_file, "Tracing unusual number or occurrences of base "
++			    "variables.  Choose %s.\n",
++		 get_name (var));
++    }
++  return var != NULL_TREE;
++}
++
++/* Recursively trace and check whether the definition stmt of the
++   index operand is a recorded stmt in direct access tracing.
++   Return 0 if ref is a direct access a[].
++   Return 1 if ref is a non-nested indirect access a[b[]].
++   Return 2 if ref is a complex indirect memory access, such as a[f(b[])].  */
++
++int
++trace_indirect_operand (tree arg, std::set &traced_ref_stmt)
++{
++  /* Return 0 if tree `arg` is not an SSA for further tracing.  */
++  if (TREE_CODE (arg) != SSA_NAME)
++    return 0;
++
++  gimple *def_stmt = SSA_NAME_DEF_STMT (arg);
++
++  /* Return 1 if `index` has been detected as a traced direct memory access
++     before.  */
++  if (traced_ref_stmt.count (def_stmt))
++    return 1;
++
++  /* Return 0 if def stmt of `arg` is not in gimple assign type. Stop tracing
++     index operand and currently no memory access operand is detected.  */
++  if (!def_stmt || !is_gimple_assign (def_stmt))
++    return 0;
++
++  tree_code rhs_code = gimple_assign_rhs_code (def_stmt);
++  /* Collect a whitelist of gimple_assign_rhs_code for tracing pointer/array
++     type indirect memory access.  */
++  if (rhs_code != MULT_EXPR && rhs_code != NOP_EXPR
++      && rhs_code != CONVERT_EXPR && rhs_code != PLUS_EXPR)
++    {
++      /* Return 2 if tree code has any type representing references to storge,
++	 implying a complex indirect memory access scenario for future
++	 analysis.  */
++      if (rhs_code == MEM_REF || rhs_code == TARGET_MEM_REF
++	  || rhs_code == ARRAY_REF || rhs_code == ARRAY_RANGE_REF
++	  || rhs_code == COMPONENT_REF || rhs_code == ADDR_EXPR
++	  || rhs_code == INDIRECT_REF)
++	return 2;
++
++      /* Return 0 and stop tracing if tree code is not a common tracing
++	 operand, but still reflected as a non-reference type.
++	 Caveats: if we never deal with this tree code before, maybe it is
++	 more suitable to treat this scenario strictly.  */
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "unknown tracing tree code: %s\n",
++		   get_tree_code_name (rhs_code));
++	  print_gimple_stmt (dump_file, def_stmt, 0, TDF_SLIM);
++	  fprintf (dump_file, "\n");
++	}
++      return 0;
++    }
++
++  tree op = NULL_TREE;
++  ssa_op_iter iter;
++  FOR_EACH_SSA_TREE_OPERAND (op, def_stmt, iter, SSA_OP_USE)
++    {
++      int trace_indir_p = trace_indirect_operand (op, traced_ref_stmt);
++      if (trace_indir_p != 0)
++	return trace_indir_p;
++    }
++  return 0;
++}
++
++/* Trace the pointer of the direct/indirect memory access:
++   1) Obtain the base address of the memory access.
++   2) If index variable is formed by another memory access operation (i.e., an
++      indication of indirect memory access), ensure that the index has been
++      traced in an already discovered direct memory access.
++   3) Otherwise, the memory access is in a more complex scenario and we need to
++      postpone the analysis later. For example, the indirect memory access is
++      nested, a[b[c[...]]], or the index variable (formed in another memory
++      access) has not been recorded/traced yet.
++   e.g.,
++   _1 = MEM[base: a_2(D), index: ivtmp.3_3, step: 4, offset: 0B];
++   _4 = (long unsigned int) _1;
++   _5 = _4 * 8;
++   _6 = p(D) + _5; // get base
++   _7 = *_6;       // start tracing
++*/
++
++bool
++trace_ptr_mem_ref (data_ref &mem_ref, std::set &traced_ref_stmt,
++		   std::vector &unresolved_refs)
++{
++  /* Simple scenario:
++     _2208 = np.120_2207 * 8;
++     _1921 = sorted_weight$data_381 + _2208;
++     *_1921 = _2206;
++
++     Complex scenario:
++     MEM[base: _3235, index: ivtmp.2768_3189, step: 4, offset: 0B] = _105;
++     _3236 = (sizetype) _214;
++     _3237 = _3236 * 4;
++     _3238 = _857 + _3237;  // base + index * step
++     _3239 = _3238 + 4;     // offset
++     MEM[base: _3239, index: ivtmp.2768_3189, step: 4, offset: 0B] = 0.0;
++  */
++  tree pointer = TREE_OPERAND (mem_ref.ref, 0);
++  tree offset = TREE_OPERAND (mem_ref.ref, 1);
++  if (TREE_CODE (offset) != INTEGER_CST)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Unhandled scenario for non-constant offset.\n");
++
++      return false;
++    }
++  if (TREE_CODE (pointer) != SSA_NAME)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Unhandled scenario for non-ssa pointer.\n");
++
++      return false;
++    }
++
++  /* Tracing back base address from SSA.  */
++  gimple *ptr_def_stmt = SSA_NAME_DEF_STMT (pointer);
++  if (ptr_def_stmt == NULL || gimple_code (ptr_def_stmt) != GIMPLE_ASSIGN
++      || gimple_assign_rhs_code (ptr_def_stmt) != POINTER_PLUS_EXPR)
++    return false;
++  tree base = gimple_assign_rhs1 (ptr_def_stmt);
++  /* index_offset = index * step.  */
++  tree index_offset = gimple_assign_rhs2 (ptr_def_stmt);
++
++  /* Tracing back index from SSA.  */
++  if (TREE_CODE (index_offset) != SSA_NAME)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  if (TREE_CODE (index_offset) == INTEGER_CST)
++	    fprintf (dump_file, "Constant index for memory access.\n");
++	  else
++	    fprintf (dump_file, "Unhandled scenario for index tracing.\n");
++	}
++      return false;
++    }
++
++  gimple *idx_def_stmt = SSA_NAME_DEF_STMT (index_offset);
++  if (idx_def_stmt == NULL || gimple_code (idx_def_stmt) != GIMPLE_ASSIGN
++      || gimple_assign_rhs_code (idx_def_stmt) != MULT_EXPR)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Unhandled scenario for index tracing.\n");
++      return false;
++    }
++
++  /* Split array index from total offset of index, `index * step`.  */
++  mem_ref.base = base;
++  mem_ref.offset = offset;
++  mem_ref.index = gimple_assign_rhs1 (idx_def_stmt);
++  mem_ref.step = gimple_assign_rhs2 (idx_def_stmt);
++  if (TREE_CODE (gimple_assign_rhs1 (idx_def_stmt)) == INTEGER_CST)
++    {
++      mem_ref.index = gimple_assign_rhs2 (idx_def_stmt);
++      mem_ref.step = gimple_assign_rhs1 (idx_def_stmt);
++    }
++
++  int trace_index_indir_p = trace_indirect_operand (mem_ref.index,
++						    traced_ref_stmt);
++  if (trace_index_indir_p == 0)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Direct memory access tracing succeeded.\n");
++    }
++  else if (trace_index_indir_p == 1)
++    {
++      mem_ref.regular_p = false;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Indirect memory access tracing succeeded.\n");
++    }
++  else
++    {
++      /* Record indirect memory access with complex scenarios for future
++	 analysis.  */
++      unresolved_refs.push_back (mem_ref);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Unhandled indirect memory access tracing.\n");
++      return false;
++    }
++
++  return true;
++}
++
++/* Tracing direct memory reference information.  */
++
++bool
++trace_direct_mem_ref (data_ref &mem_ref)
++{
++  /* Direct memory access, regardless of whether it is in vectorized form,
++     can be determined through TARGET_MEM_REF:
++      address = base + index * step + offset.
++     MASK_LOAD example:
++      _43 = &MEM[base: _42, index: ivtmp_140, step: 8, offset: 0B];
++      vect__42.11_160 = .MASK_LOAD (_43, 64B, loop_mask_163);
++
++     In some cases (2D-array or complex-index 1D array), mem_ref's `base`
++     may actually represent `base + index * step` when `base` address updates
++     by a PHI operation, e.g.,
++      MEM[base: _51, offset: 0B]
++      _51 = (void *) ivtmp.18_11;
++      ivtmp.18_11 = PHI 
++      ivtmp.18_43 = ivtmp.18_11 + 16;
++      ivtmp.18_52 = (unsigned long) _10;
++      _10 = arr2D_29(D) + _9;
++  */
++  mem_ref.base = TREE_OPERAND (mem_ref.ref, 0);
++  mem_ref.offset = TREE_OPERAND (mem_ref.ref, 1);
++  mem_ref.index = TREE_OPERAND (mem_ref.ref, 2);
++  mem_ref.step = TREE_OPERAND (mem_ref.ref, 3);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "Direct memory access tracing succeeded.\n");
++
++  return true;
++}
++
++/* Tracing vectorized indirect memory reference information.
++   MASK_GATHER_LOAD example:
++    vect__45.13_146 = .MASK_LOAD (_41, 32B, loop_mask_153);
++    vect__46.14_145 = (vector([2,2]) long unsigned int) vect__45.13_146;
++    vect_patt_163.15_143 = .MASK_GATHER_LOAD (_144, vect__46.14_145, 8,
++      { 0.0, ... }, loop_mask_153);  */
++
++bool
++trace_indirect_mem_ref_vectorized (data_ref &mem_ref,
++				   std::set &traced_ref_stmt)
++{
++  /* Processing of vectorization types.  */
++  if (mem_ref.vectorize_p)
++    {
++      tree op = gimple_call_arg (mem_ref.stmt, 1);
++      if (trace_indirect_operand (op, traced_ref_stmt))
++	{
++	  mem_ref.base = gimple_call_arg (mem_ref.stmt, 0);
++	  mem_ref.index = gimple_call_arg (mem_ref.stmt, 1);
++	  mem_ref.step = gimple_call_arg (mem_ref.stmt, 2);
++	  mem_ref.regular_p = false;
++
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "Indirect memory access tracing succeeded.\n");
++	  return true;
++	}
++    }
++  return false;
++}
++
++/* Trace the array of the indirect memory access:
++   1) Obtain the base address of the indirect memory access.
++   2) Ensure that the index has been traced in the direct memory access.
++   e.g.,
++   _1 = MEM[base: a_2(D), index: ivtmp.3_3, step: 4, offset: 0B];
++   _4 = (integer(kind=8)) _1;
++   _5 = _4 + 135;
++   _6 = p[_5];       // start tracing
++*/
++
++bool
++trace_indirect_array (data_ref &mem_ref, std::set &traced_ref_stmt)
++{
++  tree base = TREE_OPERAND (mem_ref.ref, 0);
++  tree index = TREE_OPERAND (mem_ref.ref, 1);
++  if (trace_indirect_operand (index, traced_ref_stmt))
++    {
++      /* ARRAY_REF, The first operand is the array;
++		    the second is the index.  */
++      mem_ref.base = base;
++      mem_ref.index = index;
++      mem_ref.regular_p = false;
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Indirect memory access tracing succeeded.\n");
++
++      return true;
++    }
++
++  return false;
++}
++
++/* Trace memory references base info:
++   1) Memory access rule analysis and reference info tracing
++   2) Source variable tracing, along base address of memory reference
++   We will extend parallel analysis later.
++*/
++
++void
++trace_ref_info (data_ref &mem_ref, std::set &traced_ref_stmt,
++		std::vector &unresolved_refs)
++{
++  enum tree_code ref_code = TREE_CODE (mem_ref.ref);
++  /* 1) Direct and indirect access traces.  */
++  switch (ref_code)
++    {
++    case MEM_REF:
++      /* Non-vectorized direct/indirect access by pointer.  */
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "MEM_REF\n");
++      if (!trace_ptr_mem_ref (mem_ref, traced_ref_stmt, unresolved_refs))
++	return;
++      break;
++    case TARGET_MEM_REF:
++      /* Vectorized and non-vectorized direct access.  */
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "TARGET_MEM_REF\n");
++      if (!trace_direct_mem_ref (mem_ref))
++	return;
++      break;
++    case SSA_NAME:
++      /* Vectorized indirect memory access.  */
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "SSA_NAME\n");
++      if (!trace_indirect_mem_ref_vectorized (mem_ref, traced_ref_stmt))
++	return;
++      break;
++    case ARRAY_REF:
++      /* Non-vectorized indirect memory access.  */
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "ARRAY_REF\n");
++      if (!trace_indirect_array (mem_ref, traced_ref_stmt))
++	return;
++      break;
++    default:
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "ref is another tree-code: ");
++	  fprintf (dump_file, "stmt: ");
++	  print_gimple_stmt (dump_file, mem_ref.stmt, 0, TDF_LINENO);
++	  fprintf (dump_file, "ref: ");
++	  print_generic_expr (dump_file, mem_ref.ref, TDF_LINENO);
++	  fprintf (dump_file, "\n");
++	}
++      return;
++    }
++
++  /* 2) Source variable tracing.  */
++  std::set walked;
++  if (mem_ref.var == NULL_TREE
++      && !trace_base_var (mem_ref, walked))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Source variable tracing failed.\n\n");
++      return;
++    }
++
++  if (mem_ref.regular_p)
++    traced_ref_stmt.insert (mem_ref.stmt);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "Tracing succeeded.\n\n");
++  mem_ref.trace_status_p = true;
++}
++
++/* Trace all references in the loop.  */
++
++void
++trace_loop_refs_info (std::vector &refs,
++		      std::set &traced_ref_stmt,
++		      std::vector &unresolved_refs)
++{
++  for (unsigned i = 0; i < refs.size (); ++i)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "trace_references_base_info %d:\n", i);
++	  print_generic_expr (dump_file, refs[i].ref, TDF_SLIM);
++	  fprintf (dump_file, "\n");
++	}
++      trace_ref_info (refs[i], traced_ref_stmt, unresolved_refs);
++    }
++}
++
++/* Tracing and sorting reference groups.  */
++
++void
++trace_data_refs_info (std::vector &kernels,
++		      std::map > &loop_refs,
++		      std::set &traced_ref_stmt,
++		      std::vector &unresolved_refs)
++{
++  if (dump_file)
++    fprintf (dump_file, "\nPhase 2: trace_all_references_info\n\n");
++
++  for (unsigned i = 0; i < kernels.size (); ++i)
++    {
++      class loop *loop = kernels[i];
++      if (loop_refs.count (loop) == 0)
++	continue;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "loop header %d:\n", loop->header->index);
++      trace_loop_refs_info (loop_refs[loop], traced_ref_stmt, unresolved_refs);
++    }
++}
++
++/* Retrace references base info for complex scenarios in indirect memory access
++   after Phase 3.  */
++
++void
++retrace_ref_info_unresolved (data_ref &mem_ref,
++			     std::set &traced_ref_stmt)
++{
++  /* 1) Indirect access traces.  */
++  int trace_index_indir_p = trace_indirect_operand (mem_ref.index,
++						    traced_ref_stmt);
++  if (trace_index_indir_p == 1)
++    {
++      mem_ref.regular_p = false;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Indirect memory access tracing succeeded.\n");
++    }
++
++  /* 2) Source variable tracing.  */
++  std::set walked;
++  if (mem_ref.var == NULL_TREE
++      && !trace_base_var (mem_ref, walked))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Source variable tracing failed.\n\n");
++      return;
++    }
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "Tracing succeeded.\n\n");
++  mem_ref.trace_status_p = true;
++}
++
++/* Retrace all unresolved references.  */
++
++void
++retrace_loop_refs_info_unresolved (std::vector &unresolved_refs,
++				   std::set &traced_ref_stmt)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file,
++	     "\nRetrace indirect memory access after outer loop analysis:\n");
++  for (unsigned i = 0; i < unresolved_refs.size (); ++i)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "trace_references_base_info %d:\n", i);
++	  print_generic_expr (dump_file, unresolved_refs[i].ref, TDF_SLIM);
++	  fprintf (dump_file, "\n");
++	}
++      retrace_ref_info_unresolved (unresolved_refs[i], traced_ref_stmt);
++    }
++}
++
++/* ================ phase 3 analyze_nested_kernels ================  */
++
++/* Return the inner most type for arrays and pointers of TYPE.  */
++
++tree
++inner_type (tree type)
++{
++  while (POINTER_TYPE_P (type)
++	 || TREE_CODE (type) == ARRAY_TYPE)
++    type = TREE_TYPE (type);
++  return type;
++}
++
++/* Check whether the input iv is the loop dimension boundary.  */
++
++bool
++loop_bound_iv_p (tree t, tree &outer_loop_t)
++{
++  if (t == NULL || TREE_CODE (t) != SSA_NAME
++      || TREE_CODE (TREE_TYPE (t)) != INTEGER_TYPE)
++    return false;
++
++  gimple *def_stmt = SSA_NAME_DEF_STMT (t);
++
++  /* NOP_EXPR convertion between PHI node and memory reference due to MACRO.
++    n_898 = PHI 
++    _757 = (sizetype) n_898;
++    _900 = MEM[base: _726, index: _757, step: 8, offset: 0B];
++  */
++  while (gimple_code (def_stmt) == GIMPLE_ASSIGN
++	 && gimple_assign_rhs_code (def_stmt) == NOP_EXPR)
++    def_stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (def_stmt));
++
++  if (gimple_code (def_stmt) != GIMPLE_PHI)
++    return false;
++
++  /* Filter scenarios with only two phi inputs.  */
++  if (gimple_phi_num_args (def_stmt) != 2)
++    return false;
++
++  gphi *phi_stmt = as_a  (def_stmt);
++  basic_block src0 = gimple_phi_arg_edge (phi_stmt, 0)->src;
++  basic_block src1 = gimple_phi_arg_edge (phi_stmt, 1)->src;
++
++  class loop *loop = loop_containing_stmt (def_stmt);
++  bool res = false;
++  /* Two phi inputs, one from the current loop and one from the outer loop.  */
++  if ((src0->loop_father == loop) && (src1->loop_father == loop_outer (loop)))
++    {
++      outer_loop_t = gimple_phi_arg_def (def_stmt, 1);
++      res = true;
++    }
++  else if ((src1->loop_father == loop)
++	   && (src0->loop_father == loop_outer (loop)))
++    {
++      outer_loop_t = gimple_phi_arg_def (def_stmt, 0);
++      res = true;
++    }
++
++  if (res)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "===> ");
++	  print_gimple_stmt (dump_file, def_stmt, 0, TDF_SLIM);
++	}
++      return true;
++    }
++  return false;
++}
++
++/* add worklist and walked list.  */
++
++void
++add_worklist_walked (std::vector &worklist, std::set &walked,
++		     tree node)
++{
++  if (!walked.count (node))
++    {
++      worklist.push_back (node);
++      /* Avoid phi node cycle introduction, which makes the worklist unable
++	 to end.  */
++      walked.insert (node);
++    }
++}
++
++/* check bound iv and add worklist.  */
++
++void
++check_bound_iv_and_add_worklist (std::vector &worklist,
++				 std::set &walked,
++				 std::set &walked_loop,
++				 tree t, data_ref &mem_ref)
++{
++  if (t == NULL_TREE || TREE_CODE (t) != SSA_NAME)
++    return;
++
++  gimple *def_stmt = SSA_NAME_DEF_STMT (t);
++  if (def_stmt == NULL)
++    return;
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      print_generic_expr (dump_file, t, TDF_SLIM);
++      fprintf (dump_file, "\t\t: ");
++      print_gimple_stmt (dump_file, def_stmt, 0, TDF_SLIM);
++    }
++
++  if (gimple_code (def_stmt) == GIMPLE_PHI)
++    {
++      tree out_loop_t = NULL_TREE;
++      if (loop_bound_iv_p (t, out_loop_t))
++	{
++	  basic_block bb = gimple_bb (def_stmt);
++	  if (!walked_loop.count (bb))
++	    {
++	      mem_ref.loop_bounds.push_back (loop_bound (t, def_stmt));
++	      walked_loop.insert (bb);
++	    }
++	  add_worklist_walked (worklist, walked, out_loop_t);
++	}
++    }
++  else if (is_gimple_assign (def_stmt))
++    {
++      tree_code rhs_code = gimple_assign_rhs_code (def_stmt);
++
++      /* unary.  */
++      if (rhs_code == SSA_NAME || rhs_code == NOP_EXPR)
++	add_worklist_walked (worklist, walked, gimple_assign_rhs1 (def_stmt));
++      else if (rhs_code == POINTER_PLUS_EXPR)
++	add_worklist_walked (worklist, walked, gimple_assign_rhs2 (def_stmt));
++
++      /* binary.  */
++      else if (rhs_code == PLUS_EXPR || rhs_code == MINUS_EXPR
++	       || rhs_code == MULT_EXPR)
++	{
++	  add_worklist_walked (worklist, walked, gimple_assign_rhs1 (def_stmt));
++	  add_worklist_walked (worklist, walked, gimple_assign_rhs2 (def_stmt));
++	}
++    }
++}
++
++/* DFS trace the loop bound of iv.  */
++
++bool
++trace_loop_bound_iv (data_ref &mem_ref)
++{
++  /* In indirect memory access, the size cannot be determined based on the
++     loop boundary. However, we can take advantage of loop bound as an upper
++     bound (unrepeated memory access) to predict the variable footprint
++     involved in the specific loop dimension.  */
++
++  /* Determine and record the boundary iv of the current index,
++     but do not trace it.  */
++  tree outer_loop_t = NULL_TREE;
++  /* indirect access example, mem_ref.index = _64
++    _62 = MEM[symbol: uPtr, index: ivtmp.22_96, step: 4, offset: 0B];
++    _63 = (long unsigned int) _62;
++    _64 = _63 * 8;
++    _65 = [openfoam_smooth.c:28:28] &bPrimePtr + _64;
++    _66 = *_65;  */
++  if (loop_bound_iv_p (mem_ref.index, outer_loop_t) || !mem_ref.regular_p)
++    {
++      mem_ref.loop_bounds.push_back (
++	    loop_bound (mem_ref.index, SSA_NAME_DEF_STMT (mem_ref.index)));
++      if (!mem_ref.regular_p)
++	return false;
++    }
++
++  std::vector worklist;
++  worklist.push_back (mem_ref.base);
++  std::set walked;
++  std::set walked_loop;
++
++  while (worklist.size ())
++    {
++      tree t = worklist.back ();
++      worklist.pop_back ();
++
++      /* add worklist.  */
++      check_bound_iv_and_add_worklist (worklist, walked, walked_loop, t, mem_ref);
++    }
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nmem_ref access dimension: %ld\n",
++	       mem_ref.loop_bounds.size ());
++      fprintf (dump_file, "Traced variables: ");
++      print_generic_expr (dump_file, mem_ref.base, TDF_SLIM);
++      fprintf (dump_file, "\n");
++    }
++
++  return mem_ref.loop_bounds.size () > 0;
++}
++
++/* dump loop bound.  */
++
++void
++loop_bound_dump (FILE *file, loop_bound &lb)
++{
++  class loop *loop = lb.loop;
++  fprintf (file, "loop_bound: loop_%d (", loop->num);
++  if (loop->header)
++    fprintf (file, "header = %d", loop->header->index);
++  else
++    {
++      fprintf (file, "deleted)\n");
++      return;
++    }
++  if (loop->latch)
++    fprintf (file, ", latch = %d", loop->latch->index);
++  fprintf (file, ", lb_niters = ");
++  print_generic_expr (file, lb.niters);
++  fprintf (file, ")\n\n");
++}
++
++/* static calculate data size.  */
++
++void
++static_calculate_data_size (data_ref &mem_ref)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nstatic_calculate_data_size\n");
++
++  tree size_unit = TYPE_SIZE_UNIT (inner_type (TREE_TYPE (mem_ref.var)));
++  unsigned HOST_WIDE_INT type_size = size_unit ? tree_to_uhwi (size_unit) : 0;
++  for (unsigned i = 0; i < mem_ref.loop_bounds.size (); ++i)
++    {
++      unsigned HOST_WIDE_INT est_niter = tree_to_uhwi
++					   (mem_ref.loop_bounds[i].niters);
++      unsigned int unroll = mem_ref.loop_bounds[i].unroll;
++      if (i == 0)
++	{
++	  /* The unit conversion between byte, kilobytes, and megabytes is
++	     1024.  */
++	  mem_ref.data_size = double (type_size
++				      * est_niter * unroll) / 1024 / 1024;
++	}
++      else
++	mem_ref.data_size *= est_niter * unroll;
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "static_data_size: %lf\n", mem_ref.data_size);
++    }
++}
++
++/* Recursive tracing and creating of dominant nodes.  */
++
++tree
++trace_and_create_dominate_expr (tree expr, class loop *outermost)
++{
++  if (expr == NULL_TREE || is_gimple_constant (expr))
++    return expr;
++
++  if (TREE_CODE (expr) != SSA_NAME)
++    return NULL_TREE;
++
++  if (SSA_NAME_IS_DEFAULT_DEF (expr))
++    return expr;
++
++  gimple *stmt = SSA_NAME_DEF_STMT (expr);
++  basic_block def_bb = gimple_bb (stmt);
++  if (def_bb == NULL || def_bb->loop_father == NULL)
++    return NULL_TREE;
++
++  if (dominated_by_p (CDI_DOMINATORS, outermost->header, def_bb))
++    return expr;
++
++  if (gimple_code (stmt) != GIMPLE_ASSIGN)
++    return NULL_TREE;
++
++  enum tree_code rhs_code = gimple_assign_rhs_code (stmt);
++  tree_code_class code_class = TREE_CODE_CLASS (rhs_code);
++  tree type = TREE_TYPE (gimple_assign_lhs (stmt));
++  tree rhs1 = trace_and_create_dominate_expr (gimple_assign_rhs1 (stmt),
++					      outermost);
++  if (rhs1 == NULL_TREE)
++    return NULL_TREE;
++
++  if (code_class == tcc_unary)
++    {
++      tree expr_new = build1 (rhs_code, type, rhs1);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "expr_new = ");
++	  print_generic_expr (dump_file, expr_new, TDF_SLIM);
++	  fprintf (dump_file, "\n");
++	}
++      return expr_new;
++    }
++  else if (code_class == tcc_binary)
++    {
++      tree rhs2 = trace_and_create_dominate_expr (gimple_assign_rhs2 (stmt),
++						  outermost);
++      if (rhs2 == NULL_TREE)
++	return NULL_TREE;
++
++      tree expr_new = fold_build2 (rhs_code, type, rhs1, rhs2);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "expr_new = ");
++	  print_generic_expr (dump_file, expr_new, TDF_SLIM);
++	  fprintf (dump_file, "\n");
++	}
++      return expr_new;
++    }
++
++  return NULL_TREE;
++}
++
++/* Recursive parsing and craating of nodes in expr expressions.  */
++
++tree
++parse_and_create_expr (tree expr, class loop *outermost)
++{
++  if (expr == NULL_TREE || expr == chrec_dont_know
++      || is_gimple_constant (expr) || TREE_CODE (expr) == ADDR_EXPR)
++    {
++      /* tcc_expression (e.g., &q) situation combined with tcc_unary.  */
++      if (TREE_CODE (expr) == ADDR_EXPR && dump_file
++	  && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "tcc_expression case in ADDR_EXPR: ");
++	  print_generic_expr (dump_file, expr, TDF_SLIM);
++	  fprintf (dump_file, "\n");
++	}
++      return expr;
++    }
++
++  if (TREE_CODE (expr) == SSA_NAME)
++    return trace_and_create_dominate_expr (expr, outermost);
++  else if (EXPR_P (expr))
++    {
++      enum tree_code tree_code = TREE_CODE (expr);
++      tree_code_class code_class = TREE_CODE_CLASS (tree_code);
++      tree type = TREE_TYPE (expr);
++      tree op1 = parse_and_create_expr (TREE_OPERAND (expr, 0), outermost);
++      if (op1 == NULL_TREE)
++	return NULL_TREE;
++
++      if (code_class == tcc_unary)
++	{
++	  tree expr_new = build1 (tree_code, type, op1);
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "expr_new = ");
++	      print_generic_expr (dump_file, expr_new, TDF_SLIM);
++	      fprintf (dump_file, "\n");
++	    }
++	  return expr_new;
++	}
++      else if (code_class == tcc_binary)
++	{
++	  tree op2 = parse_and_create_expr (TREE_OPERAND (expr, 1), outermost);
++	  if (op2 == NULL_TREE)
++	    return NULL_TREE;
++
++	  tree expr_new = fold_build2 (tree_code, type, op1, op2);
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "expr_new = ");
++	      print_generic_expr (dump_file, expr_new, TDF_SLIM);
++	      fprintf (dump_file, "\n");
++	    }
++	  return expr_new;
++	}
++    }
++  return NULL_TREE;
++}
++
++/* Trace and creat dominate loop bounds.  */
++
++void
++trace_and_create_dominate_loop_bounds (data_ref &mem_ref)
++{
++  /* Check whether the niters is a loop dominant.
++     If not, trace and determine whether the result is dominant.  If yes,
++     create the expr of the dominant node.
++  */
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\ntrace_and_create_dominate_loop_bounds\n");
++
++  /* Determine the relationship between the boundary of the innermost loop and
++     the dominant of the outer loop and the processing.  */
++  loop_bound &outermost = mem_ref.loop_bounds.back ();
++  for (unsigned i = 0; i < mem_ref.loop_bounds.size (); ++i)
++    {
++      loop_bound ¤t = mem_ref.loop_bounds[i];
++      tree &niters = current.niters;
++      if (TREE_CODE (niters) == COND_EXPR)
++	niters = TREE_OPERAND (niters, 1);
++
++      niters = parse_and_create_expr (niters, outermost.loop);
++
++      if (niters == NULL_TREE)
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      print_generic_expr (dump_file, mem_ref.ref, TDF_SLIM);
++	      fprintf (dump_file, "Tracing loop bound failed at dimension %d\n",
++		       i);
++	    }
++	  mem_ref.calc_by = UNHANDLE_CALC;
++	  break;
++	}
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	loop_bound_dump (dump_file, mem_ref.loop_bounds[i]);
++    }
++}
++
++/* trace the dimension and corresponding loop bounds of mem_ref.
++   This function is used to supplement the information of mem_ref.loop_bounds.
++*/
++
++void
++trace_ref_dimension_and_loop_bounds (data_ref &mem_ref)
++{
++  /* In the same loop, some memory access dimensions are different.  Remove
++     variables with fewer dimensions.
++     Previous cyclic filtering conditions and memory access node records and
++     tracing.
++     The false result is also processed.
++  */
++  if (dump_file)
++    fprintf (dump_file, "\ncalculate_data_size\n");
++
++  /* Trace the loop bound iv of ref to determine the dimension.  */
++  /* Record data from the loop perspective to avoid repeated tracing.  */
++  if (!trace_loop_bound_iv (mem_ref))
++    return;
++
++  /* The traced mem_ref may have multiple dimensions, which corresponds to
++     multiple loops.  */
++  /* And in the dimension-by-dimensional analysis, the computable way is
++     continuously reduced.  */
++  mem_ref.calc_by = STATIC_CALC;
++  for (unsigned i = 0; i < mem_ref.loop_bounds.size (); ++i)
++    {
++      class loop *loop = mem_ref.loop_bounds[i].loop;
++      tree &niters = mem_ref.loop_bounds[i].niters;
++
++      /* Set NULL_TREE to ensure that nb_iterations are retraced and
++	 vec_nb_iterations are also extracted.  */
++      loop->nb_iterations = NULL_TREE;
++      niters = number_of_latch_executions (loop, false);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	loop_dump (dump_file, loop);
++
++      if (loop->unroll)
++	{
++	  if (loop->unroll == USHRT_MAX && dump_file
++	      && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "loop->unroll = USHRT_MAX = %d", USHRT_MAX);
++	  mem_ref.loop_bounds[i].unroll = loop->unroll;
++	}
++
++      if ((niters == chrec_dont_know) && loop->vec_nb_iterations
++	   && (loop->vec_nb_iterations != chrec_dont_know))
++	niters = loop->vec_nb_iterations;
++
++      if (niters == chrec_dont_know)
++	{
++	  /* We derive est_loop_niters from function
++	     `estimated_loop_iterations_int`. Usually only the innermost loop is
++	     vectorized, so vec_nb_iterations can be 4 or 8 times as large as
++	     `est_loop_niters` due to vectorization. However, function
++	     `estimated_loop_iterations_int` only returns an integer instead of
++	     a tree node expression, so it cannot substitute
++	     function `number_of_latch_executions` in runtime computation.  */
++	  HOST_WIDE_INT est_loop_niters = estimated_loop_iterations_int (loop);
++	  if (est_loop_niters >= 0 && est_loop_niters < INT_MAX)
++	    /* e.g., loop iterations from `estimated_loop_iterations_int`: (-1)
++	       loop_144 (header = 519, latch = 625, niter = scev_not_known,
++	       upper_bound = 1073741823, likely_upper_bound = 1073741823,
++	       unroll = 1)  */
++	    /* variable `niters` from `loop->vec_nb_iterations`
++	        constant 34>  */
++	    niters = build_int_cst (integer_type_node, (int) est_loop_niters);
++	}
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	loop_bound_dump (dump_file, mem_ref.loop_bounds[i]);
++
++      if (niters == NULL_TREE || niters == chrec_dont_know)
++	mem_ref.calc_by = std::min (mem_ref.calc_by, UNHANDLE_CALC);
++      else if (TREE_CODE (niters) != INTEGER_CST)
++	mem_ref.calc_by = std::min (mem_ref.calc_by, RUNTIME_CALC);
++      else
++	mem_ref.calc_by = std::min (mem_ref.calc_by, STATIC_CALC);
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  if (mem_ref.calc_by == 2)
++	    {
++	      fprintf (dump_file, "\nniters: ");
++	      print_generic_expr (dump_file, niters, TDF_SLIM);
++	      fprintf (dump_file, "\nSTATIC_CALC.\n");
++	    }
++	  else if (mem_ref.calc_by == 1)
++	    {
++	      fprintf (dump_file, "\nniters: ");
++	      print_generic_expr (dump_file, niters, TDF_SLIM);
++	      fprintf (dump_file, "\nRUNTIME_CALC.\n");
++	    }
++	  else
++	    fprintf (dump_file, "\nUNHANDLE_CALC.\n");
++	}
++    }
++
++  if (mem_ref.calc_by == RUNTIME_CALC)
++    trace_and_create_dominate_loop_bounds (mem_ref);
++  else if (mem_ref.calc_by == STATIC_CALC)
++    static_calculate_data_size (mem_ref);
++}
++
++/* Get the loop's niters tree.
++   Return NULL_TREE if not found.  */
++
++tree
++get_cur_loop_niters (std::map > &loop_refs,
++		     class loop *loop)
++{
++  if (loop_refs.count (loop) == 0)
++    return NULL_TREE;
++  std::vector bounds = loop_refs[loop][0].loop_bounds;
++  return bounds.size () ? bounds[0].niters : NULL_TREE;
++}
++
++/* Trace the sources of the niters tree and return the
++   outermost depth of the loops containing them.
++   Return start_depth if not found.
++
++   example:
++   niters:(long) (((int) i_end_417 - (int) i_start_452) + 1)
++   operand_num: 1, subtree:(long) (((int) i_end_417 - (int) i_start_452) + 1)
++   operand_num: 2, subtree:((int) i_end_417 - (int) i_start_452) + 1
++   operand_num: 2, subtree:(int) i_end_417 - (int) i_start_452
++   operand_num: 1, subtree:(int) i_end_417
++   SSA_NAME of niters: i_end_417
++   gimple of SSA: i_end_417 = PHI 
++   return gimple depth;
++*/
++
++unsigned
++trace_outer_loop_depth (tree niters, unsigned start_depth)
++{
++  /* If niter does not exist or the type is INTEGER_CST,
++     the loop bound is determined and return start_depth.  */
++  if (niters == NULL_TREE || TREE_CODE (niters) == INTEGER_CST)
++    return start_depth;
++
++  gimple *def_stmt = NULL;
++  /* niters examples: i_start_452, fEnd_35, fEnd_100.  */
++  enum tree_code niter_code = TREE_CODE (niters);
++  if (niter_code == SSA_NAME)
++    {
++      /* Trace the SSA that define this niter.  */
++      def_stmt = SSA_NAME_DEF_STMT (niters);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "ssa_name of niters: ");
++	  print_generic_expr (dump_file, niters);
++	  fprintf (dump_file, "\ngimple of ssa: \n");
++	  print_gimple_stmt (dump_file, def_stmt, 0, TDF_LINENO);
++	  fprintf (dump_file, "\n");
++	}
++      /* Termination condition of dfs.  Return the depth of the bb block.  */
++      if (gimple_code (def_stmt) == GIMPLE_PHI
++	  || gimple_code (def_stmt) == GIMPLE_NOP)
++	{
++	  basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (niters));
++	  if (def_bb == NULL || def_bb->loop_father == NULL)
++	    return start_depth;
++	  unsigned ret_depth = loop_depth (def_bb->loop_father);
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "Stop tracing the outer loop depth, ");
++	      fprintf (dump_file, "current depth: %d, current bb: %d\n",
++		       ret_depth, def_bb->index);
++	    }
++	  return ret_depth;
++	}
++      /* 'ASSIGN': Use dfs to trace the rhs of the assignment statement.  */
++      else if (gimple_code (def_stmt) == GIMPLE_ASSIGN)
++	{
++	  tree rhs = gimple_assign_rhs1 (def_stmt);
++	  if (TREE_CODE (rhs) == TARGET_MEM_REF)
++	    /* fEnd_35 = MEM[base: _19, index: ivtmp.96, step: 4,
++			     offset: 0B]  */
++	    return trace_outer_loop_depth (TREE_OPERAND (rhs, 2), start_depth);
++	  else
++	    {
++	      /* M.218_658 = MIN_EXPR <_631, _657>  */
++	      unsigned min_depth = start_depth;
++	      unsigned operand_num = gimple_num_ops (def_stmt);
++	      /* 'ASSIGN': start from 1 because op[0] is the lhs.  */
++	      for (unsigned i = 1; i < operand_num; i++)
++		{
++		  tree subtree = dyn_cast(def_stmt)->op[i];
++		  if (subtree == NULL)
++		    continue;
++		  unsigned depth = trace_outer_loop_depth (subtree, \
++				   start_depth);
++		  min_depth = MIN (min_depth, depth);
++		  }
++		return min_depth;
++	    }
++	}
++      else
++	{
++	  /* Adding termination conditions:
++	   1)  Niters is MEM variable;
++	   2)  Niters is a runtime value (smooth_uPtr), and consider
++	       finding footprint in other mem_ref;
++	   3)  Niters is loop variable (i_start/i_end), and the boundary in
++	       the outer loop depends on the variable j_start/j_end.  */
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "The loop termination condition is "
++				  "extended.\n");
++	    }
++	  return start_depth;
++	}
++    }
++  /* The operand nums can be obtained when the tree code is as follows.  */
++  else if (niter_code == NOP_EXPR || niter_code == MEM_REF
++	   || niter_code == ARRAY_REF || niter_code == COND_EXPR
++	   || niter_code == PLUS_EXPR || niter_code == MINUS_EXPR
++	   || niter_code == TARGET_MEM_REF || niter_code == POINTER_PLUS_EXPR)
++    {
++      /* operand_num is the operand in the niters statement.
++	 example: In the following niter statement, operand_num = 3.
++	 (unsigned int) fEnd_35 - (unsigned int) fEnd_100 + 4294967295.  */
++      unsigned operand_num = TREE_OPERAND_LENGTH (niters);
++      unsigned min_depth = start_depth;
++      for (unsigned i = 0; i < operand_num; i++)
++	{
++	  tree subtree = TREE_OPERAND (niters, i);
++	  if (subtree == NULL)
++	    continue;
++	  unsigned depth = trace_outer_loop_depth (subtree, start_depth);
++	  min_depth = MIN (min_depth, depth);
++	}
++      return min_depth;
++    }
++  else
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "niters is another tree code: %s\n",
++		   get_tree_code_name (niter_code));
++	  print_generic_expr (dump_file, niters, TDF_SLIM);
++	  fprintf (dump_file, "\n");
++	}
++      return start_depth;
++    }
++}
++
++/* Traces the ref dimension information in each loop.  */
++
++void
++analyze_loop_refs_dimension (std::vector &refs)
++{
++  for (unsigned i = 0; i < refs.size (); ++i)
++    {
++      if (refs[i].trace_status_p == false)
++	continue;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "trace_reference_dimension %d:\n", i);
++	  print_generic_expr (dump_file, refs[i].ref, TDF_SLIM);
++	  fprintf (dump_file, "\n");
++	}
++      trace_ref_dimension_and_loop_bounds (refs[i]);
++    }
++}
++
++/* analyze nested kernels
++   1) multidimension loop analyze
++   2) extended outer loop analyze
++*/
++
++bool
++analyze_nested_kernels (std::vector &kernels,
++			std::map > &loop_refs,
++			std::set &traced_ref_stmt,
++			std::vector &unresolved_refs)
++{
++  if (dump_file)
++    fprintf (dump_file, "\nPhase 3: analyze_nested_kernels\n\n");
++
++  /* `kernels` may be added in during outer loop extension phase,
++     thus using initial size to avoid repeatedly analyzing.  */
++  unsigned init_kernels_size = kernels.size ();
++  for (unsigned i = 0; i < init_kernels_size; ++i)
++    {
++      class loop *loop = kernels[i];
++      if (loop_refs.count (loop) == 0)
++	continue;
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "loop header %d:\n", loop->header->index);
++      analyze_loop_refs_dimension (loop_refs[loop]);
++
++      unsigned depth = loop_depth (loop);
++      unsigned outer_depth = trace_outer_loop_depth (get_cur_loop_niters
++			     (loop_refs, loop), depth);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "cur_depth: %d, outer_depth: %d\n",
++		 depth, outer_depth);
++      /* param_outer_loop_num: number of loops of the extended outer loop.
++	 Outermost loop should not be extended when outer_depth = 0.
++	 `outer_depth == depth` means the current loop is the loop which
++	 boundary is known, so there is no need to extend the outer loop.  */
++      if (outer_depth == 0 || outer_depth == depth
++	  || depth > outer_depth + param_outer_loop_num)
++	continue;
++
++      /* Extend outer loop.  */
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "\nStart extending outer loop\n");
++      /* Superloops of the loop, start from the loop closest to the
++	  current loop in the outermost loop.  */
++      for (int j = 0; j < param_outer_loop_num && --depth; ++j)
++	{
++	  class loop *outer_loop = (*loop->superloops)[depth];
++	  /* The outer loop may be added when analyzing previous inner loops,
++	     i.e. the outer loop contains two or more inner loops.  */
++	  if (loop_refs.count (outer_loop))
++	    continue;
++	  /* phase1 ~ phase3 analysis on the extended outer loop.  */
++	  analyze_loop_dense_memory (kernels, loop_refs, outer_loop);
++	  if (loop_refs.count (outer_loop) == 0)
++	    continue;
++	  for (unsigned k = 0; k < loop_refs[outer_loop].size (); ++k)
++	    {
++	      if (dump_file && (dump_flags & TDF_DETAILS))
++		{
++		  fprintf (dump_file, "outer_analyze_nested_kernels %d: ", k);
++		  print_generic_expr (dump_file, loop_refs[outer_loop][k].ref,
++				      TDF_SLIM);
++		  fprintf (dump_file, "\n");
++		}
++	    }
++	  trace_loop_refs_info (loop_refs[outer_loop], traced_ref_stmt,
++				unresolved_refs);
++	  analyze_loop_refs_dimension (loop_refs[outer_loop]);
++	  outer_depth = trace_outer_loop_depth (get_cur_loop_niters
++						(loop_refs, outer_loop), depth);
++	  /* `outer_depth == depth` means the current loop is the loop which
++	   boundary is known, so there is no need to extend the outer loop.  */
++	  if (outer_depth == depth)
++	    break;
++	  else
++	    /* The outer loop cannot find the current loop boundary,
++	       Remove the record of outer_loop from the loop_refs.  */
++	    loop_refs.erase (outer_loop);
++	}
++    }
++  return true;
++}
++
++/* ================ phase 4 filter_and_sort_kernels ================  */
++
++/* Get the edge probability information of each basic block in the loop.  */
++
++float
++get_edge_prob (edge e, float minimum)
++{
++  float fvalue = 0;
++
++  profile_probability probability = e->probability;
++  if (probability.initialized_p ())
++    {
++      fvalue = probability.to_reg_br_prob_base () / float (REG_BR_PROB_BASE);
++      if (fvalue < minimum && probability.to_reg_br_prob_base ())
++	fvalue = minimum;
++    }
++  return fvalue;
++}
++
++/* Get the next bb with a high branch probability.  */
++
++basic_block
++next_high_probability_bb (basic_block bb)
++{
++  if (bb == NULL)
++    return NULL;
++
++  /* Limit the minimum probability value.  */
++  const float MINNUM_PROB = 0.00001f;
++  float minimum = MINNUM_PROB;
++
++  gimple *stmt = last_stmt (bb);
++  if (stmt && gimple_code (stmt) == GIMPLE_COND)
++    {
++      edge true_edge = NULL;
++      edge false_edge = NULL;
++      extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
++
++      float true_edge_prob = get_edge_prob (true_edge, minimum);
++      float false_edge_prob = get_edge_prob (false_edge, minimum);
++      /* If the content of the branch does not include the candidate
++	 kernel, the branch probability may not be limited.  */
++      /* The edge_prob may have precision error during static prediction,
++	 so we need to relax the limit before comparison.  */
++      if ((true_edge_prob >= (param_branch_prob_threshold / 100.0) - minimum)
++	  && flow_bb_inside_loop_p (bb->loop_father, true_edge->dest))
++	return true_edge->dest;
++      else if ((false_edge_prob
++		>= (param_branch_prob_threshold / 100.0) - minimum)
++	       && flow_bb_inside_loop_p (bb->loop_father, false_edge->dest))
++	return false_edge->dest;
++      else
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "No high probability bb:");
++	      fprintf (dump_file, "current bb: %d, true: %f, false: %f\n",
++		       bb->index, true_edge_prob, false_edge_prob);
++	    }
++	  return NULL;
++	}
++    }
++  else
++    {
++      edge e = find_fallthru_edge (bb->succs);
++      if (e)
++	return e->dest;
++    }
++  return NULL;
++}
++
++
++/* Dump loop header bb.  */
++
++void
++dump_loop_headers (const char *name, std::vector &loops)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\n\n%s:\n", name);
++      fprintf (dump_file, "{ ");
++      for (unsigned int i = 0; i < loops.size (); i++)
++	fprintf (dump_file, "%d(%d) ", loops[i]->num, loops[i]->header->index);
++      fprintf (dump_file, "}\n\n");
++    }
++}
++
++/* Combine and sort candidate loops.  */
++
++bool
++filter_and_sort_kernels (std::vector &sorted_kernels,
++			 std::vector &kernels)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nPhase 4: filter_and_sort_kernels:\n\n");
++
++  std::set end_bb;
++  std::list walked_header_bb; /* Used to record nested loops.  */
++  std::set walked_non_header_bb_idx;
++
++  for (unsigned i = 0; i < kernels.size (); ++i)
++    {
++      if (kernels[i]->inner == NULL)
++	end_bb.insert (kernels[i]->header);
++    }
++
++  dump_loop_headers ("kernels", kernels);
++
++  if (!param_filter_kernels)
++    {
++      for (std::vector::iterator it = kernels.begin ();
++	   it != kernels.end (); ++it)
++	sorted_kernels.push_back (*it);
++    }
++  else
++    {
++      basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
++
++      while (bb)
++	{
++	  if (bb == NULL)
++	    return false;
++	  if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
++	    break;
++
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "%d ", bb->index);
++
++	  /* bb is not the head of the loop, go to the next.  */
++	  if (bb != bb->loop_father->header)
++	    {
++	      if (walked_non_header_bb_idx.count (bb->index))
++		{
++		  if (dump_file && (dump_flags & TDF_DETAILS))
++		    fprintf (dump_file, "Find same-loop cycle.  "
++					"Abort filtering process.\n");
++		  return false;
++		}
++	      walked_non_header_bb_idx.insert (bb->index);
++      	      bb = next_high_probability_bb (bb);
++	      continue;
++	    }
++
++	  /* bb is the head of the loop.  */
++	  if (bb != walked_header_bb.back ())
++	    {
++	      if (end_bb.count (bb))
++		{
++		  sorted_kernels.push_back (bb->loop_father);
++		  bb = single_exit (bb->loop_father)->dest;
++		  continue;
++		}
++	      if (loop_outer (bb->loop_father) != NULL
++		  && get_loop_exit_edges (bb->loop_father).length () != 1)
++		return false;
++	      walked_header_bb.push_back (bb);
++	      bb = next_high_probability_bb (bb);
++	      continue;
++	    }
++	  else
++	    {
++	      walked_header_bb.pop_back ();
++	      bb = single_exit (bb->loop_father)->dest;
++	      continue;
++	    }
++	}
++    }
++
++  dump_loop_headers ("sorted_kernels", sorted_kernels);
++  return true;
++}
++
++/* Check whether the given bb is null.  */
++
++bool
++check_null_bb (basic_block bb)
++{
++  if (bb == NULL)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Unexpected error at null bb.\n");
++      return true;
++    }
++  return false;
++}
++
++/* Check whether the loop father of the given bb is null.  */
++
++bool
++check_null_loop_father (basic_block bb)
++{
++  if (check_null_bb (bb))
++    return true;
++
++  if (bb->loop_father == NULL)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "bb %d's loop father is null.\n", bb->index);
++      return true;
++    }
++  return false;
++}
++
++/* States for bb during path traversal.  */
++
++enum bb_traversal_state
++{
++  NOT_TRAVERSED = 0,
++  UNDER_TRAVERSAL,
++  FULLY_TRAVERSED
++};
++
++/* Detect abnormal revisit for bb during path traversal where bb is
++   1) fully traversed,
++   2) non-loop-header bb but currently under traversal.  */
++
++bool
++revisit_bb_abnormal_p (basic_block bb, std::vector &bb_visited,
++		       const std::set &header_bb_idx_set,
++		       std::set > &unused_edges,
++		       int src_bb_idx)
++{
++  /* If the header bb has been already fully traversed, early exit
++     the function.  */
++  if (bb_visited[bb->index] == FULLY_TRAVERSED)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Already visited bb index %d. Abort.\n",
++		 bb->index);
++      return true;
++    }
++
++  /* If we revisit a non-header bb during next-bb traversal, we detect
++     an inner-loop cycle and dump warning info. Record this abnormal edge
++     in `unused_edges` for special treatment in path weight update.  */
++  if (!header_bb_idx_set.count (bb->index)
++      && bb_visited[bb->index] == UNDER_TRAVERSAL)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Warning: Find cycle at bb index %d. Abort.\n",
++		 bb->index);
++      unused_edges.insert (std::make_pair (src_bb_idx, bb->index));
++      return true;
++    }
++
++  return false;
++}
++
++/* Check successor bb through edge e. Return true if successor bb is NULL or
++   out of loop.  */
++
++bool
++check_succ_bb_abnormal_p (basic_block bb, edge e)
++{
++  if (check_null_bb (e->dest))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Null bb connected to src bb %d.\n", bb->index);
++
++      return true;
++    }
++
++  /* If bb is within one loop and the edge is pointing to the
++     outer loop, skip edge processing until a backedge to header
++     bb. `loop->num = 0` represents function body.  */
++  if (bb->loop_father->num != 0
++      && !flow_bb_inside_loop_p (bb->loop_father, e->dest))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Find edges to the outer loop at bb "
++			    "index %d to bb index %d. Abort.\n",
++		 bb->index, e->dest->index);
++
++      return true;
++    }
++
++  return false;
++}
++
++/* Criteria for retrieving the next bb in modified control-flow graph, which
++   creates a topological order for the bb traversal.  */
++
++void
++get_next_toposort_bb (basic_block bb, std::vector &bb_visited,
++		      std::list &bb_topo_order,
++		      const std::set &header_bb_idx_set,
++		      std::set > &unused_edges,
++		      int src_bb_idx)
++{
++  /* 1) Before bb returns to the loop header, bb will not go to the outer loop.
++     2) After returning to the loop header, traverse all exit_bbs.
++     NEXT STEP:
++     1) If goto jumps out of 2 loops, goto has to traverse smaller jumps first.
++     2) If path length is the same => choose higher depth traversal path.  */
++  if (check_null_bb (bb) || check_null_loop_father (bb))
++    return;
++
++  /* Find last bb of function.  */
++  if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
++    return;
++
++  if (revisit_bb_abnormal_p (bb, bb_visited, header_bb_idx_set, unused_edges,
++			     src_bb_idx))
++    return;
++
++  /* If we revisit the header bb of a loop, traverse all exit bbs.  */
++  if (header_bb_idx_set.count (bb->index)
++      && bb_visited[bb->index] == UNDER_TRAVERSAL)
++    {
++      unsigned i;
++      edge e;
++      auto_vec exits = get_loop_exit_edges (bb->loop_father);
++
++      if (exits.length () > 1 && dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Detect multiple exits at loop %d.\n",
++		 bb->loop_father->num);
++
++      FOR_EACH_VEC_ELT (exits, i, e)
++	{
++	  get_next_toposort_bb (e->dest, bb_visited, bb_topo_order,
++				header_bb_idx_set, unused_edges, src_bb_idx);
++	}
++      return;
++    }
++
++  /* Post-order traversal for normal bb.  */
++  bb_visited[bb->index] = UNDER_TRAVERSAL;
++  edge e;
++  edge_iterator ei;
++
++  FOR_EACH_EDGE (e, ei, bb->succs)
++    {
++      if (check_succ_bb_abnormal_p (bb, e))
++	continue;
++
++      get_next_toposort_bb (e->dest, bb_visited, bb_topo_order,
++			    header_bb_idx_set, unused_edges, bb->index);
++    }
++
++  /* bb is marked as fully traversed and all its descendents have been
++      fully traversed due to post-order traversal.  */
++  bb_visited[bb->index] = FULLY_TRAVERSED;
++  bb_topo_order.push_back (bb);
++}
++
++/* A struct that represents the longest path weight at each bb.  */
++
++struct weight
++{
++  /* Longest path weight at current bb.  */
++  gcov_type bb_count;
++
++  /* Prev bb from the current longest path.  */
++  int prev_bb_idx;
++};
++
++/* A helper function for checking whether overflow will occur when adding two
++   gcov_type weights.  */
++
++bool
++check_weight_overflow (gcov_type a, gcov_type b)
++{
++  if ((a > 0 && b > INT64_MAX - a) || (a < 0 && b < INT64_MIN - a))
++    return true;
++
++  return false;
++}
++
++/* A helper function that update the weight of the current longest path to
++   bb_idx_dst and a new path pointing from bb_idx_src to bb_idx_dst.  */
++
++void
++update_path_weight (std::vector &bb_weights, int bb_idx_src,
++		    int bb_idx_dst, gcov_type weight_dst)
++{
++  if (check_weight_overflow (bb_weights[bb_idx_src].bb_count, weight_dst)
++      && dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "WARNING: Path weight overflow at src bb %d "
++			  "and dest bb %d.\n",
++	       bb_idx_src, bb_idx_dst);
++    }
++  if (bb_weights[bb_idx_dst].bb_count
++      < bb_weights[bb_idx_src].bb_count + weight_dst)
++    {
++      bb_weights[bb_idx_dst].bb_count
++	= bb_weights[bb_idx_src].bb_count + weight_dst;
++      bb_weights[bb_idx_dst].prev_bb_idx = bb_idx_src;
++    }
++}
++
++/* Check whether the required bb/loop info for path update is null.  */
++
++bool
++check_null_info_in_path_update (basic_block bb, edge e)
++{
++  if (check_null_bb (e->dest))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Null bb detected for edge connected "
++			    "to src bb %d.\n",
++		 bb->index);
++      return true;
++    }
++
++  if (check_null_loop_father (bb) || check_null_loop_father (e->dest))
++    return true;
++
++  return false;
++}
++
++/* Update path weight to loop exit bbs where the current source bb is connected
++   to header bb using a backedge.  */
++
++void
++update_backedge_path_weight (std::vector &bb_weights, basic_block bb,
++			   const std::set > &unused_edges)
++{
++  unsigned i;
++  edge e_exit;
++  auto_vec exits = get_loop_exit_edges (bb->loop_father);
++  FOR_EACH_VEC_ELT (exits, i, e_exit)
++    {
++      if (check_null_bb (e_exit->dest))
++	{
++	  if (e_exit->src != NULL && dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "Null bb detected for exiting edge "
++				"connected to src bb %d.\n",
++		     e_exit->src->index);
++	  continue;
++	}
++
++      if (unused_edges.count (std::make_pair (bb->index, e_exit->dest->index)))
++	{
++	  /* Inner-loop-cycle backedge case.  */
++	  continue;
++	}
++      update_path_weight (bb_weights, bb->index, e_exit->dest->index,
++			  e_exit->dest->count.to_gcov_type ());
++    }
++}
++
++/* Update the longest length of the path through control flow graph.  */
++
++void
++update_max_length_of_path (std::vector &bb_weights,
++			   std::list &bb_topo_order,
++			   const std::set &header_bb_idx_set,
++			   const std::set > &unused_edges)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "Start update weight traversal:\n");
++
++  while (!bb_topo_order.empty ())
++    {
++      basic_block bb = bb_topo_order.back ();
++      bb_topo_order.pop_back ();
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "%d ", bb->index);
++
++      edge e;
++      edge_iterator ei;
++      FOR_EACH_EDGE (e, ei, bb->succs)
++	{
++	  if (check_null_info_in_path_update (bb, e))
++	    continue;
++
++	  if (unused_edges.count (std::make_pair (bb->index, e->dest->index)))
++	    {
++	      /* Inner-loop-cycle backedge case.  */
++	      continue;
++	    }
++	  else if (bb->loop_father->num != 0
++		   && !flow_bb_inside_loop_p (bb->loop_father, e->dest))
++	    {
++	      /* Outer-loop edge case.  */
++	      continue;
++	    }
++	  else if (header_bb_idx_set.count (e->dest->index)
++	      && bb->loop_father == e->dest->loop_father)
++	    {
++	      /* Backedge case.  */
++	      update_backedge_path_weight (bb_weights, bb, unused_edges);
++	    }
++	  else
++	    {
++	      /* Normal edge case.  */
++	      update_path_weight (bb_weights, bb->index, e->dest->index,
++				  e->dest->count.to_gcov_type ());
++	    }
++	}
++    }
++}
++
++/* Collect all header bb of loops in the function beforehand.  */
++
++void
++collect_header_bb_for_fn (std::set &header_bb_idx_set)
++{
++  for (auto loop : loops_list (cfun, LI_FROM_INNERMOST))
++    header_bb_idx_set.insert (loop->header->index);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nCheck header bbs:\n");
++      for (std::set::iterator it = header_bb_idx_set.begin ();
++	   it != header_bb_idx_set.end (); ++it)
++	fprintf (dump_file, "%d ", *it);
++      fprintf (dump_file, "\n");
++    }
++}
++
++/* Record loop executing order and bb high-executing path.  */
++
++void
++record_high_execution_path (std::vector &sorted_kernel,
++			    std::vector &bb_path, int bb_num_max)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nPATH FOR %s: ", get_name (cfun->decl));
++
++  std::set loop_set;
++  for (int i = bb_path.size() - 1; i >= 0; --i)
++    {
++      int bb_idx = bb_path[i];
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "%d ", bb_idx);
++      gcc_assert (bb_idx < bb_num_max);
++
++      class loop *loop = BASIC_BLOCK_FOR_FN (cfun, bb_idx)->loop_father;
++      if (!loop_set.count (loop->num))
++	{
++	  loop_set.insert (loop->num);
++	  sorted_kernel.push_back (loop);
++	}
++    }
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\n");
++}
++
++/* Combine and sort candidate loops using feedback information.  */
++
++bool
++filter_and_sort_kernels_feedback (std::vector &sorted_kernel,
++				  std::set &bb_pathset)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nPhase 4: filter_and_sort_kernels:\n\n");
++
++  std::set header_bb_idx_set;
++  std::list bb_topo_order;
++
++  /* Quoted from GCC internal, Chapter 15.1, "the index for any block should
++     never be greater than `last_basic_block`." Therefore, we use this
++     variable for retrieving the max bb index of a function.  */
++  /* Since the pass does not add/remove/merge basic blocks until Phase 6
++     and previous passes will update ssa accordingly, we do not need to
++     `compact_blocks` to update bb indices currently.  */
++  int bb_num_max = last_basic_block_for_fn (cfun) + 1;
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nMaximal number of possible bbs in the "
++			  "function: %d\n",
++	     bb_num_max);
++  std::vector bb_visited = std::vector(bb_num_max, 0);
++
++  collect_header_bb_for_fn (header_bb_idx_set);
++  basic_block bb_start = ENTRY_BLOCK_PTR_FOR_FN (cfun);
++
++  /* Step 1: Get topological order of bb during traversal.  */
++  std::set > unused_edges;
++  get_next_toposort_bb (bb_start, bb_visited, bb_topo_order, header_bb_idx_set,
++			unused_edges, -1);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nCheck bbs in topological order:\n");
++      for (std::list::iterator it = bb_topo_order.begin ();
++	   it != bb_topo_order.end (); ++it)
++	fprintf (dump_file, "%d ", (*it)->index);
++      fprintf (dump_file, "\n");
++    }
++
++  /* Step 2: Update weights of nodes and path.  */
++  weight weight_init = {-1, -1};
++  std::vector bb_weights = std::vector(bb_num_max, weight_init);
++  bb_weights[0].bb_count = 0;  /* ENTRY bb has count 0 and prev bb as -1.  */
++  update_max_length_of_path (bb_weights, bb_topo_order, header_bb_idx_set,
++			     unused_edges);
++
++  /* Step 3: Backtrack a path from EXIT bb to ENTRY bb.  */
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nCheck counts for each bb:\n");
++
++  std::vector bb_path;
++  int tmp_bb_idx = 1;
++  bb_pathset.insert (tmp_bb_idx);
++  bb_path.push_back (tmp_bb_idx);
++  tmp_bb_idx = bb_weights[tmp_bb_idx].prev_bb_idx;
++  while (tmp_bb_idx > 0 && tmp_bb_idx < bb_num_max)
++    {
++      if (bb_pathset.count (tmp_bb_idx))
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf(dump_file, "ERROR: already seen bb index %d\n",
++		    tmp_bb_idx);
++	  return false;
++	}
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "%d: %ld, ", tmp_bb_idx,
++		 bb_weights[tmp_bb_idx].bb_count);
++      bb_pathset.insert (tmp_bb_idx);
++      bb_path.push_back (tmp_bb_idx);
++      tmp_bb_idx = bb_weights[tmp_bb_idx].prev_bb_idx;
++    }
++  /* It is possible that the function exit code is wrapped around as an
++     variable, and thus, EXIT_BB in cfg is not connected to any bb.  */
++  if (tmp_bb_idx < 0 || tmp_bb_idx >= bb_num_max)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "unhandled scenario at backtracking highly "
++			      "executed path with tmp_bb_idx %d",
++		   tmp_bb_idx);
++	}
++      return false;
++    }
++
++  record_high_execution_path (sorted_kernel, bb_path, bb_num_max);
++
++  return true;
++}
++
++
++/* ================ phase 5 record_and_sort_ref_groups ================  */
++/* Memory reference score, different aspects of one memory reference.  */
++
++struct ref_score
++{
++  /* certain memory reference.  */
++  data_ref d_ref;
++
++  /* local count for bb where memory reference is located.  */
++  gcov_type bb_count;
++
++  /* line-location of memory reference.  */
++  int line;
++};
++
++/* Memory reference group, different reference of the same variable.  */
++
++struct ref_group
++{
++  /* source variables.  */
++  tree var;
++
++  /* variable size, Unit: MB.  */
++  double var_size;
++
++  /* first ref for insert hint.  */
++  data_ref first_use;
++
++  /* first ref with the highest-order CALC.  */
++  data_ref first_calc_use;
++
++  /* reuse scores of variables.  */
++  float reuse_level;
++
++  /* method of calculating the var size.  */
++  calc_type calc_by;
++
++  /* memory reference index for specific variable.  */
++  unsigned int mem_ref_index;
++
++  /* variable dimension.  */
++  unsigned int dim;
++
++  /* True if first_calc_use's footprint replaces that of first_use.  */
++  unsigned int transfer_ft;
++
++  /* Accessing Reference Records in Different Modes (key_index):
++    000: write, random, non-parallel
++    001: write, random, parallel
++    010: write, regular, non-parallel
++    011: write, regular, parallel
++    100: read, random, non-parallel
++    101: read, random, parallel
++    110: read, regular, non-parallel
++    111: read, regular, parallel
++  */
++  std::map > ref_use;
++
++  /* scores for different memory references.  */
++  std::vector ref_scores;
++
++  ref_group ()
++    {
++      var = NULL_TREE;
++      var_size = 0;
++      reuse_level = 0;
++      calc_by = UNHANDLE_CALC;
++      mem_ref_index = 0;
++      dim = 1;
++      transfer_ft = 0;
++    }
++};
++
++/* Get the integer part for log(x) with the given base.  */
++
++static unsigned int
++flog (float x, float base)
++{
++  unsigned int res = 0;
++  while (x >= base)
++    {
++      ++res;
++      x /= base;
++    }
++  return res;
++}
++
++/* Calculate reuse time for a memory reference in ref_group.  */
++
++float
++calculate_reuse_times (std::vector &mem_refs, std::set &loop_set,
++		       std::set &bb_set, unsigned int var_dim)
++{
++  const float SAME_BB_REUSE_WEIGHT = 0.1;
++  const float SAME_LOOP_REUSE_WEIGHT = 0.5;
++  const float NORMAL_REUSE_WEIGHT = 1.;
++
++  float reuse_time_sum = 0.;
++  for (std::vector::iterator it = mem_refs.begin ();
++       it != mem_refs.end (); ++it)
++    {
++      const data_ref &mem_ref = *it;
++      float reuse_time = 0.;
++      if (bb_set.count (mem_ref.bb_idx))
++	{
++	  /* If the two mem_ref belong to the same bb, the new reuse
++	     weight will not exceed 0.1 divided by the mem_ref mode group
++	     size.
++	     NEXT STEP: The following equation may hold and cause commutative
++	     property of read and write op not holding:
++	      write + (reused) read != read + (reused) write.
++	     However, it seems that write mem_ref is always before read mem_ref,
++	     so the above comparison does not show up in calculation due to
++	     intrinsic in-order property of tree map, but this condition is
++	     quite fragile anyway.  */
++	  reuse_time = SAME_BB_REUSE_WEIGHT / mem_refs.size ();
++	}
++      else
++	{
++	  bb_set.insert (mem_ref.bb_idx);
++	  if (loop_set.count (mem_ref.loop_idx))
++	    {
++	      /* If the mem_ref belongs to a loop where any other mem_ref is in,
++		 the new reuse weight will be 0.5.  */
++	      reuse_time = SAME_LOOP_REUSE_WEIGHT;
++	    }
++	  else
++	    {
++	      /* If the mem_ref is reused but not in the same group with any
++		 other mem_ref, the new reuse weight will be 1.  */
++	      loop_set.insert (mem_ref.loop_idx);
++	      reuse_time = NORMAL_REUSE_WEIGHT;
++	    }
++	}
++      unsigned int used_dim = std::min (mem_ref.loop_depth, var_dim);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "used_dim : %u, loop_depth : %u\n", used_dim,
++		 mem_ref.loop_depth);
++      unsigned int power = flog (std::max (0u, mem_ref.loop_depth - used_dim)
++				 + 2, 2.);
++      reuse_time_sum += reuse_time * (used_dim * used_dim / 2.) * (power);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "(%f * (%u * %u / 2) * (%u) = %f\n",
++		 reuse_time, used_dim, used_dim, power,
++		 reuse_time * (used_dim * used_dim / 2.) * (power));
++    }
++  return reuse_time_sum;
++}
++
++/* Calculate reuse level.  */
++
++float
++calculate_reuse_level (std::map > &var_use,
++		       unsigned int var_dim, double var_size)
++{
++  const float VAR_SIZE_CACHE_CAPACITY = 1 / 4.;
++  const int WITHIN_CACHE_SIZE_COST = 4;
++  const float BYTE_CONVERT_RATIO = 1024.;
++
++  float level = 0.;
++  std::set loop_set;
++  std::set bb_set;
++  bool has_write_op = false;
++  for (std::map >::iterator it = var_use.begin ();
++       it != var_use.end (); ++it)
++    {
++      unsigned int parallel = 1;
++      unsigned int regular = 1;
++
++      if ((*it).second[0].parallel_p)
++	parallel = PARALLEL_NUM;
++      if (!(*it).second[0].regular_p)
++	regular = INDIRECT_ACCESS_VALUE;
++      if (!(*it).second[0].read_p)
++	has_write_op = true;
++
++      /* In serial reuse, we will later check whether they are in the
++	 same cacheline.  If yes, delete the reuse.  For details, see the
++	 reuse analysis of prefetching and eliminate redundancy.  */
++      float reuse_times = calculate_reuse_times ((*it).second, loop_set,
++						 bb_set, var_dim);
++      float add = parallel * reuse_times * regular;
++      level += add;
++      if (add && dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "%d : %d * %f * %d = %f\n",
++		 (*it).first, parallel, reuse_times, regular, add);
++    }
++
++  bool within_llc_size = var_size > param_l2_cache_size / BYTE_CONVERT_RATIO
++			 && var_size < VAR_SIZE_CACHE_CAPACITY
++				       * param_llc_capacity_per_core;
++
++  float final_level = has_write_op ? (level * WRITE_COST) : level;
++  final_level = within_llc_size ? (final_level * WITHIN_CACHE_SIZE_COST)
++				: final_level;
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "final level : %d * %f * %d = %f\n",
++	     has_write_op ? WRITE_COST : 1, level,
++	     within_llc_size ? WITHIN_CACHE_SIZE_COST : 1, final_level);
++  return final_level;
++}
++
++/* Comparison of reference reuse level.  */
++
++bool
++ref_group_reuse_cmp (const ref_group &a, const ref_group &b)
++{
++  if (a.reuse_level != b.reuse_level)
++    return a.reuse_level > b.reuse_level;
++  else
++    return get_name (a.var) < get_name (b.var);
++}
++
++/* Dump key information of reference group and memory access for llc hint.  */
++
++void
++dump_key_info_for_llc_hint (std::vector &ref_groups)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nLLC hint info:\n");
++      fprintf (dump_file, "rank\tvar\t(lineno, direct, vectorized, write)\n");
++      for (unsigned int i = 0; i < ref_groups.size (); ++i)
++	{
++	  fprintf (dump_file, "%d\t", i);
++	  print_generic_expr (dump_file, ref_groups[i].var, TDF_SLIM);
++	  data_ref &mem_ref = ref_groups[i].first_use;
++	  fprintf (dump_file, "\t(%d, %u, %u, %u)",
++		   expand_location (mem_ref.stmt->location).line,
++		   mem_ref.regular_p, mem_ref.vectorize_p, 1 - mem_ref.read_p);
++	  fprintf (dump_file, "\n");
++	}
++      fprintf (dump_file, "\n");
++    }
++}
++
++/* Sort reference groups.  */
++
++void
++sort_ref_groups (std::vector &ref_groups,
++		 std::map &ref_groups_map)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "\nsort_ref_groups_by_reuse_level\n");
++
++  for (std::map::iterator it = ref_groups_map.begin ();
++       it != ref_groups_map.end (); ++it)
++    {
++      (*it).second.reuse_level = calculate_reuse_level ((*it).second.ref_use,
++							(*it).second.dim,
++							(*it).second.var_size);
++      ref_groups.push_back ((*it).second);
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  print_generic_expr (dump_file, (*it).second.var, TDF_SLIM);
++	  fprintf (dump_file, " : %f\n\n", (*it).second.reuse_level);
++	}
++    }
++
++  std::sort (ref_groups.begin (), ref_groups.end (), ref_group_reuse_cmp);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nsorted ref_groups:\n");
++      fprintf (dump_file, "rank\tvar\t(data_size, dim, num_of_mem_ref, "
++			  "need_tmp_name): reuse_level_score\n");
++      for (unsigned int i = 0; i < ref_groups.size (); ++i)
++	{
++	  fprintf (dump_file, "%d\t", i);
++	  print_generic_expr (dump_file, ref_groups[i].var, TDF_SLIM);
++	  int need_tmp_name = !get_name (ref_groups[i].var) ? 1 : 0;
++	  fprintf (dump_file, "\t(%lf, %u, %lu, %d)", ref_groups[i].var_size,
++		   ref_groups[i].dim, ref_groups[i].ref_scores.size (),
++		   need_tmp_name);
++	  fprintf (dump_file, " : %f\n", ref_groups[i].reuse_level);
++	}
++      fprintf (dump_file, "\n");
++
++      fprintf (dump_file, "first_use:\n");
++      for (unsigned int i = 0; i < ref_groups.size (); ++i)
++	{
++	  fprintf (dump_file, "%d ", i);
++	  print_generic_expr (dump_file, ref_groups[i].var, TDF_SLIM);
++	  fprintf (dump_file, " : ");
++	  if (!ref_groups[i].first_use.vectorize_p)
++	    print_generic_expr (dump_file, ref_groups[i].first_use.ref,
++				TDF_SLIM);
++	  else
++	    print_gimple_stmt (dump_file, ref_groups[i].first_use.stmt,
++				TDF_SLIM);
++	  fprintf (dump_file, "\n");
++	}
++      fprintf (dump_file, "\n");
++    }
++    dump_key_info_for_llc_hint (ref_groups);
++}
++
++/* Attributes of variable data.  */
++
++enum data_attribute
++{
++  DA_PARALLEL = 0,
++  DA_REGULAR,
++  DA_READ
++};
++
++/* Record memory reference by use mode.
++   If the reference group is not found, create a group.  */
++
++void
++record_mem_ref (std::map &ref_groups, data_ref &mem_ref)
++{
++  unsigned int index = (mem_ref.parallel_p << DA_PARALLEL)
++	      + (mem_ref.regular_p << DA_REGULAR) + (mem_ref.read_p << DA_READ);
++
++  if (!ref_groups.count (mem_ref.var))
++    {
++      ref_group ref_group;
++      ref_group.var = mem_ref.var;
++      ref_group.first_use = mem_ref;
++      ref_group.first_calc_use = mem_ref;
++      ref_groups[mem_ref.var] = ref_group;
++    }
++
++  /* Ref_groups' calc_by reflects the highest order of calc_by that can be
++     achieved by all mem_ref of ref_groups. The first mem_ref that achieves
++     this order is defined to be `first_calc_use`. Later after sorting
++     mem_refs, calc_by will be replaced by the calc_by of `first_use`, and
++     even by the calc_by of `first_calc_use`.  */
++  if (mem_ref.calc_by > ref_groups[mem_ref.var].calc_by)
++    {
++      ref_groups[mem_ref.var].calc_by = mem_ref.calc_by;
++      ref_groups[mem_ref.var].first_calc_use = mem_ref;
++    }
++  ref_groups[mem_ref.var].var_size = std::max (ref_groups[mem_ref.var].var_size,
++					       mem_ref.data_size);
++  ref_groups[mem_ref.var].dim = std::max (ref_groups[mem_ref.var].dim,
++				(unsigned int) mem_ref.loop_bounds.size ());
++  ref_groups[mem_ref.var].ref_use[index].push_back (mem_ref);
++
++  ref_score ref_level = { mem_ref, ((mem_ref.stmt)->bb->count).to_gcov_type (),
++			   expand_location (mem_ref.stmt->location).line };
++  ref_groups[mem_ref.var].ref_scores.push_back (ref_level);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "recorded in: ");
++      print_generic_expr (dump_file, mem_ref.var, TDF_SLIM);
++      fprintf (dump_file, ":%d:%ld\n", index,
++	       ref_groups[mem_ref.var].ref_use[index].size () - 1);
++
++      fprintf (dump_file, "base: ");
++      print_generic_expr (dump_file, mem_ref.base, TDF_SLIM);
++
++      fprintf (dump_file, ", index: ");
++      print_generic_expr (dump_file, mem_ref.index, TDF_SLIM);
++
++      fprintf (dump_file, ", step: ");
++      if (mem_ref.step && cst_and_fits_in_hwi (mem_ref.step))
++	fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
++		 int_cst_value (mem_ref.step));
++      else
++	print_generic_expr (dump_file, mem_ref.step, TDF_SLIM);
++
++      fprintf (dump_file, ", offset: ");
++      if (mem_ref.offset && cst_and_fits_in_hwi (mem_ref.offset))
++	fprintf (dump_file, HOST_WIDE_INT_PRINT_DEC,
++		 int_cst_value (mem_ref.offset));
++      else
++	print_generic_expr (dump_file, mem_ref.offset, TDF_SLIM);
++      fprintf (dump_file, ", %s", mem_ref.read_p ? "read" : "write");
++
++      fprintf (dump_file, ", size: %lf", mem_ref.data_size);
++      fprintf (dump_file, "\n\n");
++    }
++}
++
++/* Rank data reference index level.  */
++
++bool
++best_insert_cmp (const ref_score &a, const ref_score &b)
++{
++  /* NEXT STEP: We can also calculate gap using static/feedback info inferred
++     from historical maximum bb count:
++	gap = hist_max_bb_ct / (alpha * max (a.bb_ct, b.bb_ct)) + 1.
++     Also, bb count needs to be smoothed and scaled as divisor can be 0.
++     history maximum bb count can be obtained in Phase 4.  */
++  const float gap = 1;
++  if (a.d_ref.loop_depth != b.d_ref.loop_depth)
++    return a.d_ref.loop_depth > b.d_ref.loop_depth;
++  else if (a.d_ref.regular_p != b.d_ref.regular_p)
++    return a.d_ref.regular_p > b.d_ref.regular_p;
++  else if (abs (double (std::max (a.bb_count, b.bb_count) + 1)
++		/ double (std::min (a.bb_count, b.bb_count) + 1) - 1) > gap)
++    return a.bb_count > b.bb_count;
++  else if (a.line != b.line)
++    return a.line < b.line;
++  else if (a.d_ref.read_p != b.d_ref.read_p)
++    return a.d_ref.read_p < b.d_ref.read_p;
++  else
++    return a.d_ref.vectorize_p > b.d_ref.vectorize_p;
++}
++
++/* Sort data reference index level within one reference group in non-decreasing
++   order of the customized sorting scheme.  */
++
++void
++sort_mem_ref_in_ref_group (std::map &ref_groups_map)
++{
++  if (dump_file)
++    fprintf (dump_file, "\nsorted data_references:\n");
++  for (std::map::iterator it = ref_groups_map.begin ();
++       it != ref_groups_map.end (); ++it)
++    {
++      ref_group &curr_ref_group = (*it).second;
++      std::vector &ref_scores = curr_ref_group.ref_scores;
++      std::stable_sort (ref_scores.begin (), ref_scores.end (),
++			best_insert_cmp);
++      /* Update ref_group's first_use and calc_by with the first mem_ref after
++	 sorting.  */
++      curr_ref_group.first_use = curr_ref_group.ref_scores[0].d_ref;
++      curr_ref_group.calc_by = curr_ref_group.first_use.calc_by;
++
++      /* When transferring footprint is enabled, it is allowed to transfer
++	 the statically-calculated footprint of a mem_ref from the same
++	 ref_group to `first_use` mem_ref.  */
++      if (param_transfer_footprint
++	  && curr_ref_group.first_use.calc_by == UNHANDLE_CALC)
++	{
++	  if (curr_ref_group.first_calc_use.calc_by > RUNTIME_CALC)
++	    {
++	      if (dump_file && (dump_flags & TDF_DETAILS))
++		{
++		  print_generic_expr (dump_file, (*it).first, TDF_SLIM);
++		  fprintf (dump_file, "\nfirst_use: ");
++		  print_gimple_stmt (dump_file, curr_ref_group.first_use.stmt,
++				     0, TDF_LINENO);
++		  fprintf (dump_file, "first_calc_use: ");
++		  print_gimple_stmt (dump_file,
++				     curr_ref_group.first_calc_use.stmt,
++				     0, TDF_LINENO);
++		}
++
++	      curr_ref_group.calc_by = curr_ref_group.first_calc_use.calc_by;
++	      curr_ref_group.transfer_ft = 1;
++	    }
++	  else
++	    {
++	      if (dump_file && (dump_flags & TDF_DETAILS))
++		{
++		  print_generic_expr (dump_file, (*it).first, TDF_SLIM);
++		  fprintf (dump_file, ": cannot transfer footprint to "
++				      "first use mem_ref.\n");
++		}
++	    }
++	}
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  print_generic_expr (dump_file, (*it).first, TDF_SLIM);
++	  fprintf (dump_file, " : %lu\n", ref_scores.size ());
++	  for (unsigned int i = 0; i < ref_scores.size (); ++i)
++	    {
++	      fprintf (dump_file, "mem_ref_index %u: ", i);
++	      print_gimple_stmt (dump_file, ref_scores[i].d_ref.stmt, 0,
++				 TDF_LINENO);
++	      fprintf (dump_file, "bb-%d ",
++		       ref_scores[i].d_ref.stmt->bb->index);
++	      fprintf (dump_file, "count %ld\n", ref_scores[i].bb_count);
++	    }
++	  fprintf (dump_file, "\n\n");
++	}
++    }
++}
++
++/* Tracing and sorting reference groups.  */
++
++bool
++record_and_sort_ref_groups (std::vector &ref_groups,
++			    std::vector &kernels,
++			    std::map > &loop_refs,
++			    std::set bb_pathset)
++{
++  if (dump_file)
++    fprintf (dump_file, "\nPhase 5: trace_all_references_details\n\n");
++
++  std::map ref_groups_map;
++
++  for (unsigned i = 0; i < kernels.size (); ++i)
++    {
++      class loop *loop = kernels[i];
++      if (loop_refs.count (loop) == 0)
++	continue;
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "loop header %d:\n", loop->header->index);
++      for (unsigned j = 0; j < loop_refs[loop].size (); ++j)
++	{
++	  data_ref &mem_ref = loop_refs[loop][j];
++	  if (mem_ref.trace_status_p)
++	    {
++	      if (!param_filter_mode || (param_filter_mode
++		  && bb_pathset.count (mem_ref.stmt->bb->index)))
++		record_mem_ref (ref_groups_map, mem_ref);
++	    }
++	}
++    }
++
++  /* Sort mem_ref within ref_group by local count and update first_use's
++     data_ref, stable sort.  */
++  sort_mem_ref_in_ref_group (ref_groups_map);
++  sort_ref_groups (ref_groups, ref_groups_map);
++
++  return ref_groups.size () > 0;
++}
++
++/* ================ phase 6 issue_llc_hint ================  */
++
++/* Issue vectorized mask prefetch gimple.  */
++
++void
++issue_mask_prefetch (gimple *stmt)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "insert svprfd.\n");
++
++  /* vect__1.1 = .MASK_LOAD (_2, 32B, loop_mask_3);
++     .MASK_STORE (_4, 32B, loop_mask_5, vect__6.6);
++  */
++  tree dataref_ptr = gimple_call_arg (stmt, 0);
++  tree scale = gimple_call_arg (stmt, 1);
++  tree final_mask = gimple_call_arg (stmt, 2);
++  tree target = NULL_TREE;
++  if (gimple_call_internal_fn (stmt) == IFN_MASK_STORE)
++    target = gimple_call_arg (stmt, 3);
++  else if (gimple_call_internal_fn (stmt) == IFN_MASK_LOAD)
++    target = gimple_call_lhs (stmt);
++  tree prfop = NULL_TREE;
++  if (param_llc_level == 3)
++    /* for simulation, 4: PLDL3KEEP.  */
++    prfop = build_int_cst (TREE_TYPE (integer_zero_node), 4);
++  else if (param_llc_level == 4)
++    /* 6: PLDL4KEEP.  */
++    prfop = build_int_cst (TREE_TYPE (integer_zero_node), 6);
++  else
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "LLC cache levels are illegal.\n");
++      return;
++    }
++
++  /* add offset.  */
++  gimple_stmt_iterator si = gsi_for_stmt (stmt);
++  /* target: vector_type - XXX_type.  */
++  if (target == NULL_TREE)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "unhandled scene: target vect is null");
++      return;
++    }
++  unsigned HOST_WIDE_INT distance = param_prefetch_offset * tree_to_uhwi
++		       (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (target))));
++  tree addr = fold_build_pointer_plus_hwi (dataref_ptr, distance);
++  addr = force_gimple_operand_gsi (&si, unshare_expr (addr), true,
++				   NULL, true, GSI_SAME_STMT);
++
++  gcall *call = gimple_build_call_internal (IFN_MASK_PREFETCH, 5, addr, scale,
++					    final_mask, target, prfop);
++  gsi_insert_after (&si, call, GSI_SAME_STMT);
++  update_ssa (TODO_update_ssa_only_virtuals);
++}
++
++/* Issue vectorized mask gather prefetch gimple.  */
++
++void
++issue_mask_gather_prefetch (gimple *stmt)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "insert svprfd_gather_uxindex.\n");
++
++  /* vect_patt_1.1 = .MASK_GATHER_LOAD (_2, vect__3.3, 8, { 0.0, ... },
++					loop_mask_4);  */
++  tree dataref_ptr = gimple_call_arg (stmt, 0);
++  tree vec_offset = gimple_call_arg (stmt, 1);
++  tree scale = gimple_call_arg (stmt, 2);
++  tree zero = gimple_call_arg (stmt, 3);
++  tree final_mask = gimple_call_arg (stmt, 4);
++  tree prfop = NULL_TREE;
++  if (param_llc_level == 3) // for simulation
++    prfop = build_int_cst (TREE_TYPE (integer_zero_node), 4); // 4: PLDL3KEEP
++  else if (param_llc_level == 4)
++    prfop = build_int_cst (TREE_TYPE (integer_zero_node), 6); // 6: PLDL4KEEP
++  else
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "LLC cache levels are illegal.\n");
++      return;
++    }
++
++  tree target = gimple_call_lhs (stmt);
++  /* add offset.  */
++  gimple_stmt_iterator si = gsi_for_stmt (stmt);
++  if (target == NULL_TREE)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "unhandled scene: target vect is null");
++      return;
++    }
++  unsigned HOST_WIDE_INT distance = param_prefetch_offset * tree_to_uhwi
++		       (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (target))));
++  tree addr = fold_build_pointer_plus_hwi (dataref_ptr, distance);
++  addr = force_gimple_operand_gsi (&si, unshare_expr (addr), true,
++				   NULL, true, GSI_SAME_STMT);
++
++  gcall *call = gimple_build_call_internal (IFN_MASK_GATHER_PREFETCH, 7, addr,
++					    vec_offset, scale, zero,
++					    final_mask, target, prfop);
++  gsi_insert_after (&si, call, GSI_SAME_STMT);
++  update_ssa (TODO_update_ssa_only_virtuals);
++}
++
++/* Issue builtin prefetch gimple.  */
++
++void
++issue_builtin_prefetch (data_ref &mem_ref)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "insert prfm.\n");
++  /* MEM[symbol: diagPtr, index: ivtmp_102, step: 8, offset: 0B] */
++  gimple *stmt = mem_ref.stmt;
++  tree ref = mem_ref.ref;
++
++  tree scale = mem_ref.step;
++  gimple_stmt_iterator si = gsi_for_stmt (stmt);
++  if (scale == NULL_TREE)
++    {
++      /* _190 = (void *) ivtmp.444_221;
++	 Cannot detect size unit at (void *).  */
++      scale = TYPE_SIZE_UNIT (inner_type (TREE_TYPE (mem_ref.var)));
++      if (scale == NULL_TREE)
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "ERROR: Unknown size unit for the prefetching "
++				"variable.  Stop builtin_prefetch.\n\n");
++	  return;
++	}
++    }
++
++  tree addr = build_fold_addr_expr_with_type (ref, ptr_type_node);
++  addr = force_gimple_operand_gsi (&si, unshare_expr (addr),
++				   true, NULL, true, GSI_SAME_STMT);
++  unsigned HOST_WIDE_INT distance = param_prefetch_offset
++				      * tree_to_uhwi (scale);
++
++  addr = fold_build_pointer_plus_hwi (addr, distance);
++  addr = force_gimple_operand_gsi (&si, unshare_expr (addr), true,
++				   NULL, true, GSI_SAME_STMT);
++  /* __builtin_prefetch (_68, 0, 1);
++     1st param: *addr, 2nd param: write/read (1/0), 3rd param: temporal locality
++     (high means strong locality) */
++  gcall *call = NULL;
++  if (param_llc_level == 3)
++    {
++      /* for simulation.
++	 BUILT_IN_PREFETCH (addr, rw, locality).  */
++      call = gimple_build_call (builtin_decl_explicit (BUILT_IN_PREFETCH),
++				3, addr, integer_zero_node, integer_one_node);
++    }
++  else if (param_llc_level == 4)
++    {
++	tree prfop = build_int_cst (TREE_TYPE (integer_zero_node), 6);
++	call = gimple_build_call (
++				builtin_decl_explicit (BUILT_IN_PREFETCH_FULL),
++				3, addr, integer_zero_node, prfop);
++    }
++  else
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "LLC cache levels are illegal.\n");
++      return;
++    }
++
++  gsi_insert_after (&si, call, GSI_SAME_STMT);
++  update_ssa (TODO_update_ssa_only_virtuals);
++}
++
++/* Static form insertion and issue instruction.  We may check the
++   determination of the ARM SVE architecture before SVE hint insertion.  */
++
++void
++static_issue (std::vector &ref_groups, int num_issue_var)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "static issue\n");
++
++  for (int i = 0; i < num_issue_var; ++i)
++    {
++      data_ref mem_ref = ref_groups[i].first_use;
++      if (mem_ref.vectorize_p)
++	{
++	  enum internal_fn ifn_code = gimple_call_internal_fn (mem_ref.stmt);
++	  if (ifn_code == IFN_MASK_STORE || ifn_code == IFN_MASK_LOAD)
++	    issue_mask_prefetch (mem_ref.stmt);
++	  else if (ifn_code == IFN_MASK_GATHER_LOAD)
++	    issue_mask_gather_prefetch (mem_ref.stmt);
++	  else
++	    if (dump_file && (dump_flags & TDF_DETAILS))
++	      fprintf (dump_file, "other vectorized internal function\n");
++	}
++      else
++	issue_builtin_prefetch (mem_ref);
++    }
++}
++
++/* Check whether all loop bounds (niters) used for calculating the footprints
++   of previously-executed ref_groups are defined in a dominated bb to the
++   currentbranch bb, where the conditional expression requires the loop bound
++   info.  */
++
++bool
++check_def_use_chain (std::vector &ref_groups,
++		     basic_block &branch_header_bb,
++		     std::vector &ref_group_idx)
++{
++  for (std::vector::iterator it = ref_group_idx.begin ();
++       it != ref_group_idx.end (); ++it)
++    {
++      /* Transferring mem_ref only takes place during footprint calculation.  */
++      ref_group &ref_group_curr = ref_groups[*it];
++      data_ref mem_ref = ref_group_curr.transfer_ft
++			  ? ref_group_curr.first_calc_use
++			  : ref_group_curr.first_use;
++      for (unsigned j = 0; j < mem_ref.loop_bounds.size (); ++j)
++	{
++	  tree niters = mem_ref.loop_bounds[j].niters;
++	  gimple *def_stmt = SSA_NAME_DEF_STMT (niters);
++	  basic_block def_bb = gimple_bb (def_stmt);
++	  /* Check dominator relationship of def bb and branch bb.  */
++	  /* Case 1: Check whether the def bb is the single predecessor block
++	     of header bb.  */
++	  if (single_pred_p (branch_header_bb))
++	    {
++	      basic_block branch_bb_prev = single_pred (branch_header_bb);
++	      if (branch_bb_prev->index == def_bb->index)
++		continue;
++	    }
++	  /* Case 2: Check whether the branch bb is dominated by the def
++	     bb.  */
++	  if (!dominated_by_p (CDI_DOMINATORS, branch_header_bb, def_bb))
++	    return false;
++	}
++    }
++  return true;
++}
++
++/* Generate the stmts for calculating the size.  Later we will consider nested
++   multi-branches scenarios and check more information of niters when it is
++   a COND_EXPR.  */
++
++tree
++calc_stmts_gen (std::vector &ref_groups,
++		gimple_seq &cond_expr_stmt_list,
++		basic_block branch_header_bb,
++		std::vector &ref_group_idx_curr,
++		std::vector &ref_group_idx_prev, tree &cumul_size)
++{
++  /* Check whether the bbs of def stmt for footprint loop bounds dominates
++     the bb of new runtime branching conditional.  */
++  if (!check_def_use_chain (ref_groups, branch_header_bb, ref_group_idx_prev))
++    return NULL_TREE;
++
++  /* Accumulated allocation size.  */
++  for (std::vector::iterator it = ref_group_idx_curr.begin ();
++       it != ref_group_idx_curr.end (); ++it)
++    {
++      /* Transferring mem_ref only takes place during footprint calculation.  */
++      ref_group &ref_group_curr = ref_groups[*it];
++      data_ref mem_ref = ref_group_curr.transfer_ft
++			  ? ref_group_curr.first_calc_use
++			  : ref_group_curr.first_use;
++      tree var = mem_ref.var;
++      tree unit = TYPE_SIZE_UNIT (inner_type (TREE_TYPE (var)));
++      /* _190 = (void *) ivtmp.444_221;
++	 Cannot detect size unit at (void *).  */
++      if (unit == NULL_TREE)
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "WARNING: Cannot detect size unit "
++				  "(use 1 byte) for variable %s: ",
++				  get_name (var));
++	      print_generic_expr (dump_file, mem_ref.ref, TDF_SLIM);
++	      fprintf (dump_file, "\n");
++	    }
++	  unit = size_one_node;
++	}
++      tree size = NULL_TREE;
++      for (unsigned j = 0; j < mem_ref.loop_bounds.size (); ++j)
++	{
++	  tree niters = mem_ref.loop_bounds[j].niters;
++
++	  /* COND_EXPR.  */
++	  if (TREE_CODE (niters) == COND_EXPR)
++	    niters = TREE_OPERAND (niters, 1);
++	  if (size == NULL_TREE) 
++	    {
++		    size = niters;
++	    } else {
++		    size = fold_build2 (MULT_EXPR, TREE_TYPE (niters), niters, 
++					size);
++	    }
++	}
++      unit = build1 (NOP_EXPR, TREE_TYPE (size), unit);
++      size = fold_build2 (MULT_EXPR, TREE_TYPE (size), size, unit);
++      size = build1 (FLOAT_EXPR, double_type_node, size);
++      cumul_size = fold_build2 (PLUS_EXPR, double_type_node, cumul_size,
++				size);
++      ref_group_idx_prev.push_back (*it);
++    }
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "cumul_size = ");
++      print_generic_expr (dump_file, cumul_size, TDF_SLIM);
++      fprintf (dump_file, "\n");
++    }
++  /* Create a stmt list for size calculation.  */
++  tree div = build_int_cst (TREE_TYPE (integer_zero_node), 1024 * 1024);
++  div = build1 (NOP_EXPR, double_type_node, div);
++  tree total_size = fold_build2 (RDIV_EXPR, double_type_node, cumul_size, div);
++
++  tree threshold = build_int_cst (TREE_TYPE (integer_zero_node),
++				  param_llc_capacity_per_core / 2);
++  threshold = build_real_from_int_cst (double_type_node, threshold);
++  tree cond_expr = fold_build2 (LE_EXPR, boolean_type_node, total_size,
++				threshold);
++
++  /* Convert cond_expr to stmt list.  */
++  cond_expr = force_gimple_operand_1 (unshare_expr (cond_expr),
++				      &cond_expr_stmt_list, is_gimple_condexpr,
++				      NULL_TREE);
++  return cond_expr;
++}
++
++/* Retrieve the least number of loops that cover all target mem_refs.
++   Try to merge loops that the mem_refs reside to a common superloop and
++   maintain a worklist which relates NEED-TO-COPY loops with the target mem
++   refs inside using the following criteria:
++   1) If loop A is a superloop of loop B in the worklist, replace loop B with
++      loop A in the worklist, and attach all target mem_refs of loop B,
++      together with loop A's, to loop A.
++   2) If loop B in the worklist is a superloop of loop A, attach loop A's
++      target mem_ref to loop B.
++   3) If loop A is not a superloop/subloop of loop B in the worklist, replace
++      loop B with their lowest common superloop C in the worklist, and attach
++      all target mem_refs of loop A and loop B to loop C.
++   4) If loop A and loop B's lowest common superloop is function body
++      (loop 0), stop merging and maintain loop independence.  */
++
++void
++get_loop_worklist (std::vector &ref_groups, int num_issue_var,
++		   std::map > &loop_worklist)
++{
++  for (int i = 0; i < num_issue_var; ++i)
++    {
++      data_ref &mem_ref = ref_groups[i].first_use;
++      class loop *loop_new = mem_ref.loop_bounds.front ().loop;
++      class loop *common_superloop = loop_new;
++      bool add_loop_worklist = false;
++
++      /* Use greedy algorithm to merge loops to a common superloop that can
++	 contain the current mem_refs.  */
++      std::map >::iterator it_tmp;
++      std::vector ref_group_idx_tmp;
++      std::map >::iterator it;
++      for (it = loop_worklist.begin (); it != loop_worklist.end ();)
++	{
++	  class loop *loop_old = it->first;
++	  common_superloop = find_common_loop (loop_new, loop_old);
++	  if (common_superloop == NULL || common_superloop->num == 0)
++	    {
++	      /* Stop merging two loops if there is no common superloop for
++		 them except function body (loop 0).  */
++	      if (common_superloop != NULL
++		  && dump_file && (dump_flags & TDF_DETAILS))
++		{
++		  fprintf (dump_file, "ref_group %d's loop %d has no common "
++				      "superloop with existing loop %d\n",
++			   i, loop_new->num, loop_old->num);
++		}
++	      ++it;
++	      continue;
++	    }
++
++	  if (common_superloop->num == loop_old->num)
++	    {
++	      /* If loop_old is the superloop of loop_new, add current
++		 ref_group index to loop's worklist.  */
++	      loop_worklist[common_superloop].push_back (i);
++	      ++it;
++	    }
++	  else
++	    {
++	      /* If loop_old is not a superloop of loop_new, replace
++		 loop_old with the common superloop.  */
++	      it_tmp = it;
++	      ++it_tmp;
++	      ref_group_idx_tmp = it->second;
++	      loop_worklist.erase (it);
++	      it = it_tmp;
++	      add_loop_worklist = true;
++	    }
++	}
++
++      if (loop_worklist.empty () || add_loop_worklist)
++	{
++	  /* Update the new common superloop in loop_worklist.  */
++	  std::vector &ref_groups_tmp = loop_worklist[common_superloop];
++	  ref_groups_tmp.push_back (i);
++	  for (std::vector::iterator it = ref_group_idx_tmp.begin ();
++	       it != ref_group_idx_tmp.end (); ++it)
++	    ref_groups_tmp.push_back (*it);
++	  std::sort (ref_groups_tmp.begin (), ref_groups_tmp.end ());
++	}
++    }
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "runtime loop list:\n");
++      std::map >::iterator it;
++      for (it = loop_worklist.begin (); it != loop_worklist.end (); ++it)
++	{
++	  fprintf (dump_file, "loop %d:", it->first->num);
++	  for (std::vector::iterator idx_it = it->second.begin ();
++	       idx_it != it->second.end (); ++idx_it)
++	    {
++	      fprintf (dump_file, " %d", *idx_it);
++	    }
++	  fprintf (dump_file, "\n");
++	}
++    }
++}
++
++/* Runtime form insertion and issue instruction.  */
++
++void
++runtime_issue (std::vector &ref_groups, int num_issue_var,
++	       std::vector &sorted_kernels)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "runtime issue\n");
++
++  /* It is possible that the loop father of some mem_ref's bb may contain the
++     loop fathers of the others. Therefore, we intend to only copy loops
++     without inclusion relationship.  */
++  std::map > loop_worklist;
++  get_loop_worklist (ref_groups, num_issue_var, loop_worklist);
++  bool get_first_ref_group = false;
++  std::vector ref_group_idx_prev;
++
++  /* NEXT STEP: Multiple loop copies (possibly nested within one loop can cost
++     front-end bound due to branching within loop), we need to set up a
++     threshold such that we may compensate this time cost by space cost
++     in binary (copying outer loop).  */
++  tree cumul_size = build_real_from_int_cst (double_type_node,
++					     integer_zero_node);
++  for (std::vector::iterator it = sorted_kernels.begin ();
++       it != sorted_kernels.end (); ++it)
++    {
++      /* Start runtime branching until finding the first ref_group's loop.
++	 Skip any ref_groups if their `first_use` mem_refs are executed
++	 before the mem_ref of the first ref_group.  */
++      class loop *loop = *it;
++      if (!loop_worklist.count (loop)
++	  || (!get_first_ref_group && loop_worklist[loop][0] != 0))
++	continue;
++
++      std::vector ref_group_idx_curr = loop_worklist[loop];
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "copy loop num: %d\n", loop->num);
++	}
++      /* If the exit edge points to bb with multiple inputs, split the exit
++	 edge and create a new bb, make the exit edge point to bb with only
++	 single input.  */
++      edge e = single_exit (loop);
++      if (e == NULL)
++	return;
++      if (!single_pred_p (e->dest))
++	{
++	  split_loop_exit_edge (e, true);
++	  if (dump_enabled_p ())
++	    dump_printf (MSG_NOTE, "split exit edge\n");
++	}
++
++      /* After updating SSA, we are not sure whether the gimple_seq stmt list
++	 is initialized and unchanged during iterations. Therefore, we need to
++	 recreate this stmt list for every loop copy.  */
++      gimple_seq cond_expr_stmt_list = NULL;
++      tree cond_expr = calc_stmts_gen (ref_groups, cond_expr_stmt_list,
++				       loop->header, ref_group_idx_curr,
++				       ref_group_idx_prev, cumul_size);
++      if (cond_expr == NULL_TREE)
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    fprintf (dump_file, "incalculable variables for conditional\n");
++	  return;
++	}
++
++      /* Use the previous cond and generate a new branch and copy loop.  */
++      basic_block condition_bb = NULL;
++      profile_probability prob = profile_probability::likely ();
++      initialize_original_copy_tables ();
++      class loop *nloop = loop_version (loop, cond_expr, &condition_bb,
++					prob, prob.invert (), prob,
++					prob.invert (), true);
++      free_original_copy_tables ();
++
++      /* Insert the generated stmt list before cond_expr.  */
++      gimple_stmt_iterator cond_exp_gsi;
++      if (cond_expr_stmt_list)
++	{
++	  /* Function `gsi_insert_seq_before` will insert `cond_expr` (1st
++	     stmt) of `condition_bb` to the end of `cond_expr_stmt_list`.  */
++	  cond_exp_gsi = gsi_last_bb (condition_bb);
++	  gsi_insert_seq_before (&cond_exp_gsi, cond_expr_stmt_list,
++				 GSI_SAME_STMT);
++	}
++    }
++
++  update_ssa (TODO_update_ssa);
++
++  /* Perform hint issue for branches that meet conditions.  */
++  static_issue (ref_groups, num_issue_var);
++}
++
++/* Issue llc hints through prefetch instructions.  */
++
++void
++issue_llc_hint (std::vector &ref_groups,
++		std::vector &sorted_kernels)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "issue_llc_hint:\n");
++
++  /* 1) If the issue-topn and force-issue options are available, top N var is
++	forcibly allocated then no runtime branch is generated.
++     2) If the issue-topn option is available and the size of top N var is
++	statically known, top N is statically allocated and no runtime branch
++	is generated.
++     3) If the issue-topn option is available and the size of the top N var is
++	unknown, but them is dynamically known, the top N is dynamically
++	allocated and generate runtime branches. (also depends on the screening
++	of the innermost variable boundary type)
++     4) If the dynamic runtime cannot know the size, such as indirect access,
++	optimization is skipped.
++  */
++  int num_issue_var = std::min (param_issue_topn, (int) ref_groups.size ());
++  if (num_issue_var == 0)
++    return;
++
++  if (num_issue_var < param_issue_topn
++      && dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "WARNING: Only %u (less than param_issue_topn = %d) "
++			  "ref_group(s) is found for llc hint.\n",
++	       num_issue_var, param_issue_topn);
++    }
++  if (param_force_issue)
++    {
++      static_issue (ref_groups, num_issue_var);
++      return;
++    }
++  calc_type topn_calc_type = STATIC_CALC;
++  for (int i = 0; i < num_issue_var; ++i)
++    topn_calc_type = std::min (topn_calc_type, ref_groups[i].calc_by);
++
++  if (topn_calc_type == STATIC_CALC)
++    {
++      /* Before static issue, we still need to collect data size of all target
++	 variables and compare the summation with LLC cache size.  */
++      double prefetch_data_size = 0.;
++      for (int i = 0; i < num_issue_var; ++i)
++	prefetch_data_size += ref_groups[i].var_size;
++
++      if (prefetch_data_size <= (double) param_llc_capacity_per_core
++				* PREFETCH_CACHE_SIZE_RATIO)
++	static_issue (ref_groups, num_issue_var);
++      else
++	if (dump_file && (dump_flags & TDF_DETAILS))
++	  fprintf (dump_file, "static issue: Prefetch size exceeds LLC cache "
++			      "size: %lf > %lf.\n",
++		   prefetch_data_size,
++		   (double) param_llc_capacity_per_core
++		   * PREFETCH_CACHE_SIZE_RATIO);
++    }
++  else if (topn_calc_type == RUNTIME_CALC)
++    runtime_issue (ref_groups, num_issue_var, sorted_kernels);
++  else
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "unhandled issue scene\n");
++    }
++}
++
++/* ==================== phase entry ====================  */
++
++/* The LLC intelligent allocation consists of 6 steps.  */
++
++void
++llc_allocate (void)
++{
++  std::map > kernels_refs;
++  std::vector kernels;
++  if (!get_dense_memory_kernels (kernels, kernels_refs))
++    return;
++
++  std::set traced_ref_stmt;
++  std::vector unresolved_refs;
++  trace_data_refs_info (kernels, kernels_refs, traced_ref_stmt,
++			unresolved_refs);
++
++  if (!analyze_nested_kernels (kernels, kernels_refs, traced_ref_stmt,
++			       unresolved_refs))
++    return;
++
++  retrace_loop_refs_info_unresolved (unresolved_refs, traced_ref_stmt);
++
++  std::vector sorted_kernels;
++  std::vector ref_groups;
++  if (param_filter_mode)
++    {
++      /* AutoFDO mode: include ENTRY bb and EXIT bb indices.  */
++      std::set bb_pathset;
++      bb_pathset.insert (0);
++      bb_pathset.insert (1);
++      if (!filter_and_sort_kernels_feedback (sorted_kernels, bb_pathset))
++	return;
++
++      if (!record_and_sort_ref_groups (ref_groups, kernels, kernels_refs,
++				       bb_pathset))
++	return;
++    }
++  else
++    {
++      /* static mode.  */
++      std::set bb_pathset;
++      if (!filter_and_sort_kernels (sorted_kernels, kernels))
++	return;
++
++      if (!record_and_sort_ref_groups (ref_groups, sorted_kernels, kernels_refs,
++				       bb_pathset))
++	return;
++    }
++
++  issue_llc_hint (ref_groups, sorted_kernels);
++}
++
++/* Check whether the function is an operator reloading function.  */
++
++bool
++operator_func_p (function *fn)
++{
++  const char *fn_name = IDENTIFIER_POINTER (DECL_NAME (fn->decl));
++
++  if (fn_name && strncmp (fn_name, "operator", 8) == 0)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "operator_func: %s ", fn_name);
++
++      return true;
++    }
++  return false;
++}
++
++/* Check whether the function file location is known.  */
++
++bool
++func_location_p (function *fn)
++{
++  expanded_location fn_decl_xloc
++    = expand_location (DECL_SOURCE_LOCATION (current_function_decl));
++  expanded_location fn_xloc
++    = expand_location (fn->function_start_locus);
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "fn->function_start_locus = %d \n",
++	       fn->function_start_locus);
++      fprintf (dump_file, "fn_xloc.file = %s \n",
++	       fn_xloc.file ? fn_xloc.file : "NULL");
++      fprintf (dump_file, "fn_decl_xloc.file = %s \n",
++	       fn_decl_xloc.file ? fn_decl_xloc.file : "NULL");
++      fprintf (dump_file, "LOCATION_FILE (input_location) = %s \n",
++	LOCATION_FILE (input_location) ? LOCATION_FILE (input_location)
++				       : "NULL");
++    }
++  if (fn_decl_xloc.file == NULL)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Function location unknown, skip analysis \n");
++      return false;
++    }
++  /* Newly generated functions are filtered out, such as function constant
++     propagation func.constprop ().  */
++  if (LOCATION_FILE (input_location) != fn_decl_xloc.file)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	fprintf (dump_file, "Function location non-local, skip analysis \n");
++      return false;
++    }
++  return true;
++}
++
++/* Dump function information.  */
++
++void
++dump_function_info (function *fn)
++{
++  const char *fn_name = IDENTIFIER_POINTER (DECL_NAME (fn->decl));
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\nfn_name: %s\n", fn_name);
++      expanded_location cfun_xloc
++	= expand_location (DECL_SOURCE_LOCATION (current_function_decl));
++      if (cfun_xloc.line)
++	{
++	  if (cfun_xloc.file)
++	    fprintf (dump_file, "[%s:%d:%d]\n",
++		     cfun_xloc.file, cfun_xloc.line, cfun_xloc.column);
++	}
++      fprintf (dump_file, "\n");
++      flow_loops_dump (dump_file, NULL, 1);
++      fprintf (dump_file, "\n");
++    }
++}
++
++/* dump param.  */
++
++void
++dump_param (void)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++  {
++    fprintf (dump_file, "LLC allocate parameters:\n");
++    fprintf (dump_file, "    block size: %d\n", param_l1_cache_line_size);
++    fprintf (dump_file, "    L1 cache size: %d lines, %d kB\n",
++	param_l1_cache_size * 1024 / param_l1_cache_line_size,
++	param_l1_cache_size);
++    fprintf (dump_file, "    L1 cache line size: %d\n",
++	param_l1_cache_line_size);
++    fprintf (dump_file, "    L2 cache size: %d kB\n", param_l2_cache_size);
++    fprintf (dump_file, "    min mem_access_ratio: %d \n",
++	param_mem_access_ratio);
++    fprintf (dump_file, "    min mem_access_num: %d \n",
++	param_mem_access_num);
++    fprintf (dump_file, "\n");
++  }
++}
++
++/* Determine whether to analyze the function according to
++   the ordering of functions containing cycle counts.  */
++
++static bool
++should_analyze_func_p (void)
++{
++  gcov_type decl_uid = DECL_UID (current_function_decl);
++  gcov_type func_count = event_get_func_count (decl_uid, PMU_EVENT);
++  if (func_count == 0)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "function uid %ld cannot find profile data "
++			      "and skip prefetch analysis\n",
++		   decl_uid);
++	}
++      return false;
++    }
++  if (func_count < event_get_topn_function_total_count_thres ())
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "function uid %ld total counts is %lu: "
++			      "counts %lu < perf's top %d threshold %lu, "
++			      "skip prefetch analysis\n",
++		   decl_uid, func_count, func_count,
++		   PREFETCH_FUNC_TOPN, event_get_topn_function_total_count_thres ());
++	}
++      return false;
++    }
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "function uid %ld total counts is %lu: "
++			  "counts %lu >= perf's top %d threshold %lu, "
++			  "continue prefetch analysis\n",
++	       decl_uid, func_count, func_count,
++	       PREFETCH_FUNC_TOPN, event_get_topn_function_total_count_thres ());
++    }
++  return true;
++}
++
++const pass_data pass_data_llc_allocate =
++{
++  GIMPLE_PASS, /* type.  */
++  "llc_allocate", /* name.  */
++  OPTGROUP_LOOP, /* optinfo_flags.  */
++  TV_TREE_PREFETCH, /* tv_id.  */
++  (PROP_cfg | PROP_ssa), /* properties_required.  */
++  0, /* properties_provided.  */
++  0, /* properties_destroyed.  */
++  0, /* todo_flags_start.  */
++  0, /* todo_flags_finish.  */
++};
++
++class pass_llc_allocate : public gimple_opt_pass
++{
++public:
++  pass_llc_allocate (gcc::context *ctxt)
++    : gimple_opt_pass (pass_data_llc_allocate, ctxt)
++  {}
++
++  /* opt_pass methods.  */
++  virtual bool gate (function *)
++    {
++      return (optimize >= 2 && flag_llc_allocate > 0);
++    }
++  virtual unsigned int execute (function *);
++
++}; // class pass_llc_allocate
++
++unsigned int
++pass_llc_allocate::execute (function *fn)
++{
++  unsigned int ret = 0;
++
++  if (!targetm.have_prefetch ()
++      || targetm.vectorize.code_for_prefetch == NULL
++      || targetm.vectorize.prefetch_handleable_mode_p == NULL
++      || targetm.vectorize.code_for_gather_prefetch == NULL)
++    return 0;
++
++  if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH))
++    {
++      tree type = build_function_type_list (void_type_node,
++					    const_ptr_type_node, NULL_TREE);
++      tree decl = add_builtin_function ("__builtin_prefetch", type,
++					BUILT_IN_PREFETCH, BUILT_IN_NORMAL,
++					NULL, NULL_TREE);
++      DECL_IS_NOVOPS (decl) = true;
++      set_builtin_decl (BUILT_IN_PREFETCH, decl, false);
++    }
++  if (!builtin_decl_explicit_p (BUILT_IN_PREFETCH_FULL))
++    {
++      tree type = build_function_type_list (void_type_node,
++					    const_ptr_type_node, NULL_TREE);
++      tree decl = add_builtin_function ("__builtin_prefetch_full", type,
++					BUILT_IN_PREFETCH_FULL, BUILT_IN_NORMAL,
++					NULL, NULL_TREE);
++      DECL_IS_NOVOPS (decl) = true;
++      set_builtin_decl (BUILT_IN_PREFETCH_FULL, decl, false);
++    }
++
++  dump_param ();
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    fprintf (dump_file, "llc_allocate: %s\n",
++	     IDENTIFIER_POINTER (DECL_NAME (fn->decl)));
++
++  if (number_of_loops (fn) <= 1  || !func_location_p (fn)
++      || operator_func_p (fn))
++    return ret;
++
++  /* Filter only when combined with PMU event. When the should_analyze_func_p
++     analysis fails (for example, the function without PMU-event count),
++     in order to ensure the accuracy of the LLC allocation analysis, the
++     function does not perform native allocation processing.  */
++  if (flag_additional_profile && (!profile_exist (PMU_EVENT) || !should_analyze_func_p ()))
++    {
++      return 0;
++    }
++
++  dump_function_info (fn);
++
++  llc_allocate ();
++
++  return ret;
++}
++
++} // anon namespace
++
++gimple_opt_pass *
++make_pass_llc_allocate (gcc::context *ctxt)
++{
++  return new pass_llc_allocate (ctxt);
++}
+diff --git a/gcc/tree-ssa-loop-niter.cc b/gcc/tree-ssa-loop-niter.cc
+index 0353ffd30..0492dc6fd 100644
+--- a/gcc/tree-ssa-loop-niter.cc
++++ b/gcc/tree-ssa-loop-niter.cc
+@@ -2489,6 +2489,37 @@ loop_only_exit_p (const class loop *loop, basic_block *body, const_edge exit)
+   return true;
+ }
+ 
++/* Returns whether the number of vectorized iterations for the loop can be
++   estimated from the given IR and update the corresponding loop attribute,
++   e.g., next_mask_114 = .WHILE_ULT (_122, niters.5_75, { 0, ... });  */
++
++bool
++number_of_iterations_vect (class loop *loop, tree lhs, tree rhs)
++{
++  loop->vec_nb_iterations = chrec_dont_know;
++
++  if ((TREE_CODE (lhs) != SSA_NAME && TREE_CODE (rhs) != SSA_NAME)
++      || (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs) == SSA_NAME))
++    return false;
++
++  tree ssa = TREE_CODE (lhs) == SSA_NAME ? lhs : rhs;
++  gimple *def_stmt = SSA_NAME_DEF_STMT (ssa);
++
++  if (gimple_code (def_stmt) != GIMPLE_CALL
++      || !gimple_call_internal_p (def_stmt))
++    return false;
++
++  internal_fn ifn = gimple_call_internal_fn (def_stmt);
++  if (ifn != IFN_WHILE_ULT)
++    return false;
++
++  gcall *call = dyn_cast (def_stmt);
++  tree niters = gimple_call_arg (call, 1);
++  loop->vec_nb_iterations = niters;
++
++  return true;
++}
++
+ /* Stores description of number of iterations of LOOP derived from
+    EXIT (an exit edge of the LOOP) in NITER.  Returns true if some useful
+    information could be derived (and fields of NITER have meaning described
+@@ -2559,6 +2590,9 @@ number_of_iterations_exit_assumptions (class loop *loop, edge exit,
+   op1 = gimple_cond_rhs (stmt);
+   type = TREE_TYPE (op0);
+ 
++  if (TREE_CODE (type) == VECTOR_TYPE)
++    number_of_iterations_vect (loop, op0, op1);
++
+   if (TREE_CODE (type) != INTEGER_TYPE
+       && !POINTER_TYPE_P (type))
+     return false;
+@@ -2852,14 +2886,14 @@ bool
+ number_of_iterations_exit (class loop *loop, edge exit,
+ 			   class tree_niter_desc *niter,
+ 			   bool warn, bool every_iteration,
+-			   basic_block *body)
++			   basic_block *body, bool guarantee)
+ {
+   gcond *stmt;
+   if (!number_of_iterations_exit_assumptions (loop, exit, niter,
+ 					      &stmt, every_iteration, body))
+     return false;
+ 
+-  if (integer_nonzerop (niter->assumptions))
++  if (integer_nonzerop (niter->assumptions) || guarantee == false)
+     return true;
+ 
+   if (warn && dump_enabled_p ())
+diff --git a/gcc/tree-ssa-loop-niter.h b/gcc/tree-ssa-loop-niter.h
+index ceaf65e07..8f03458f7 100644
+--- a/gcc/tree-ssa-loop-niter.h
++++ b/gcc/tree-ssa-loop-niter.h
+@@ -27,7 +27,8 @@ extern bool loop_only_exit_p (const class loop *, basic_block *body,
+ extern bool number_of_iterations_exit (class loop *, edge,
+ 				       class tree_niter_desc *niter, bool,
+ 				       bool every_iteration = true,
+-				       basic_block * = NULL);
++				       basic_block * = NULL,
++				       bool guarantee = true);
+ extern bool number_of_iterations_exit_assumptions (class loop *, edge,
+ 						   class tree_niter_desc *,
+ 						   gcond **, bool = true,
+diff --git a/gcc/tree-vect-loop-manip.cc b/gcc/tree-vect-loop-manip.cc
+index 9d21e6d03..6e61f7140 100644
+--- a/gcc/tree-vect-loop-manip.cc
++++ b/gcc/tree-vect-loop-manip.cc
+@@ -3738,3 +3738,269 @@ vect_loop_versioning (loop_vec_info loop_vinfo,
+ 
+   return nloop;
+ }
++
++class loop *
++vect_loop_versioning_2 (loop_vec_info loop_vinfo,
++		      gimple *loop_vectorized_call)
++{
++  class loop *loop = LOOP_VINFO_LOOP (loop_vinfo), *nloop;
++  class loop *scalar_loop = LOOP_VINFO_SCALAR_LOOP (loop_vinfo);
++  basic_block condition_bb;
++  gphi_iterator gsi;
++  gimple_stmt_iterator cond_exp_gsi;
++  basic_block merge_bb;
++  basic_block new_exit_bb;
++  edge new_exit_e, e;
++  gphi *orig_phi, *new_phi;
++  tree cond_expr = NULL_TREE;
++  gimple_seq cond_expr_stmt_list = NULL;
++  tree arg;
++  profile_probability prob = profile_probability::likely ();
++  gimple_seq gimplify_stmt_list = NULL;
++  tree scalar_loop_iters = LOOP_VINFO_NITERSM1 (loop_vinfo);
++  bool version_align = LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo);
++  bool version_alias = LOOP_REQUIRES_VERSIONING_FOR_ALIAS (loop_vinfo);
++  bool version_niter = LOOP_REQUIRES_VERSIONING_FOR_NITERS (loop_vinfo);
++  poly_uint64 versioning_threshold
++    = LOOP_VINFO_VERSIONING_THRESHOLD (loop_vinfo);
++  tree version_simd_if_cond
++    = LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (loop_vinfo);
++  unsigned th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
++
++  if (vect_apply_runtime_profitability_check_p (loop_vinfo)
++      && !ordered_p (th, versioning_threshold))
++    cond_expr = fold_build2 (GE_EXPR, boolean_type_node, scalar_loop_iters,
++			     build_int_cst (TREE_TYPE (scalar_loop_iters),
++					    th - 1));
++  if (maybe_ne (versioning_threshold, 0U))
++    {
++      tree expr = fold_build2 (GE_EXPR, boolean_type_node, scalar_loop_iters,
++			       build_int_cst (TREE_TYPE (scalar_loop_iters),
++					      versioning_threshold - 1));
++      if (cond_expr)
++	cond_expr = fold_build2 (BIT_AND_EXPR, boolean_type_node,
++				 expr, cond_expr);
++      else
++	cond_expr = expr;
++    }
++
++  if (version_niter)
++    vect_create_cond_for_niters_checks (loop_vinfo, &cond_expr);
++
++  if (cond_expr)
++    cond_expr = force_gimple_operand_1 (unshare_expr (cond_expr),
++					&cond_expr_stmt_list,
++					is_gimple_condexpr, NULL_TREE);
++
++  if (version_align)
++    vect_create_cond_for_align_checks (loop_vinfo, &cond_expr,
++				       &cond_expr_stmt_list);
++
++  if (version_alias)
++    {
++      vect_create_cond_for_unequal_addrs (loop_vinfo, &cond_expr);
++      vect_create_cond_for_lower_bounds (loop_vinfo, &cond_expr);
++      vect_create_cond_for_alias_checks (loop_vinfo, &cond_expr);
++    }
++
++  if (version_simd_if_cond)
++    {
++      gcc_assert (dom_info_available_p (CDI_DOMINATORS));
++      if (flag_checking)
++	if (basic_block bb
++	    = gimple_bb (SSA_NAME_DEF_STMT (version_simd_if_cond)))
++	  gcc_assert (bb != loop->header
++		      && dominated_by_p (CDI_DOMINATORS, loop->header, bb)
++		      && (scalar_loop == NULL
++			  || (bb != scalar_loop->header
++			      && dominated_by_p (CDI_DOMINATORS,
++						 scalar_loop->header, bb))));
++      tree zero = build_zero_cst (TREE_TYPE (version_simd_if_cond));
++      tree c = fold_build2 (NE_EXPR, boolean_type_node,
++			    version_simd_if_cond, zero);
++      if (cond_expr)
++	cond_expr = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
++				 c, cond_expr);
++      else
++	cond_expr = c;
++      if (dump_enabled_p ())
++	dump_printf_loc (MSG_NOTE, vect_location,
++			 "created versioning for simd if condition check.\n");
++    }
++
++  cond_expr = force_gimple_operand_1 (unshare_expr (cond_expr),
++				      &gimplify_stmt_list,
++				      is_gimple_condexpr, NULL_TREE);
++  gimple_seq_add_seq (&cond_expr_stmt_list, gimplify_stmt_list);
++
++  /* Compute the outermost loop cond_expr and cond_expr_stmt_list are
++     invariant in.  */
++  class loop *outermost = outermost_invariant_loop_for_expr (loop, cond_expr);
++  for (gimple_stmt_iterator gsi = gsi_start (cond_expr_stmt_list);
++       !gsi_end_p (gsi); gsi_next (&gsi))
++    {
++      gimple *stmt = gsi_stmt (gsi);
++      update_stmt (stmt);
++      ssa_op_iter iter;
++      use_operand_p use_p;
++      basic_block def_bb;
++      FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_USE)
++	if ((def_bb = gimple_bb (SSA_NAME_DEF_STMT (USE_FROM_PTR (use_p))))
++	    && flow_bb_inside_loop_p (outermost, def_bb))
++	  outermost = superloop_at_depth (loop, bb_loop_depth (def_bb) + 1);
++    }
++
++  /* Search for the outermost loop we can version.  Avoid versioning of
++     non-perfect nests but allow if-conversion versioned loops inside.  */
++  class loop *loop_to_version = loop;
++  if (flow_loop_nested_p (outermost, loop))
++    { 
++      if (dump_enabled_p ())
++	dump_printf_loc (MSG_NOTE, vect_location,
++			 "trying to apply versioning to outer loop %d\n",
++			 outermost->num);
++      if (outermost->num == 0)
++	outermost = superloop_at_depth (loop, 1);
++      /* And avoid applying versioning on non-perfect nests.  */
++      while (loop_to_version != outermost
++	     && single_exit (loop_outer (loop_to_version))
++	     && (!loop_outer (loop_to_version)->inner->next
++		 || vect_loop_vectorized_call (loop_to_version))
++	     && (!loop_outer (loop_to_version)->inner->next
++		 || !loop_outer (loop_to_version)->inner->next->next))
++	loop_to_version = loop_outer (loop_to_version);
++    }
++
++  /* Apply versioning.  If there is already a scalar version created by
++     if-conversion re-use that.  Note we cannot re-use the copy of
++     an if-converted outer-loop when vectorizing the inner loop only.  */
++  gcond *cond;
++  if ((!loop_to_version->inner || loop == loop_to_version)
++      && loop_vectorized_call)
++    {
++      gcc_assert (scalar_loop);
++      condition_bb = gimple_bb (loop_vectorized_call);
++      cond = as_a  (last_stmt (condition_bb));
++      gimple_cond_set_condition_from_tree (cond, cond_expr);
++      update_stmt (cond);
++
++      if (cond_expr_stmt_list)
++	{
++	  cond_exp_gsi = gsi_for_stmt (loop_vectorized_call);
++	  gsi_insert_seq_before (&cond_exp_gsi, cond_expr_stmt_list,
++				 GSI_SAME_STMT);
++	}
++
++      /* if-conversion uses profile_probability::always () for both paths,
++	 reset the paths probabilities appropriately.  */
++      edge te, fe;
++      extract_true_false_edges_from_block (condition_bb, &te, &fe);
++      te->probability = prob;
++      fe->probability = prob.invert ();
++      /* We can scale loops counts immediately but have to postpone
++	 scaling the scalar loop because we re-use it during peeling.  */
++      scale_loop_frequencies (loop_to_version, te->probability);
++      LOOP_VINFO_SCALAR_LOOP_SCALING (loop_vinfo) = fe->probability;
++
++      nloop = scalar_loop;
++      if (dump_enabled_p ())
++	dump_printf_loc (MSG_NOTE, vect_location,
++			 "reusing %sloop version created by if conversion\n",
++			 loop_to_version != loop ? "outer " : "");
++    }
++  else
++    {
++      if (loop_to_version != loop
++	  && dump_enabled_p ())
++	dump_printf_loc (MSG_NOTE, vect_location,
++			 "applying loop versioning to outer loop %d\n",
++			 loop_to_version->num);
++
++      initialize_original_copy_tables ();
++      nloop = loop_version (loop_to_version, cond_expr, &condition_bb,
++			    prob, prob.invert (), prob, prob.invert (), true);
++      gcc_assert (nloop);
++      nloop = get_loop_copy (loop);
++
++      /* Kill off IFN_LOOP_VECTORIZED_CALL in the copy, nobody will
++	 reap those otherwise;  they also refer to the original
++	 loops.  */
++      class loop *l = loop;
++      while (gimple *call = vect_loop_vectorized_call (l))
++	{
++	  call = SSA_NAME_DEF_STMT (get_current_def (gimple_call_lhs (call)));
++	  fold_loop_internal_call (call, boolean_false_node);
++	  l = loop_outer (l);
++	}
++      free_original_copy_tables ();
++
++      if (cond_expr_stmt_list)
++	{
++	  cond_exp_gsi = gsi_last_bb (condition_bb);
++	  gsi_insert_seq_before (&cond_exp_gsi, cond_expr_stmt_list,
++				 GSI_SAME_STMT);
++	}
++
++      /* Loop versioning violates an assumption we try to maintain during
++	 vectorization - that the loop exit block has a single predecessor.
++	 After versioning, the exit block of both loop versions is the same
++	 basic block (i.e. it has two predecessors). Just in order to simplify
++	 following transformations in the vectorizer, we fix this situation
++	 here by adding a new (empty) block on the exit-edge of the loop,
++	 with the proper loop-exit phis to maintain loop-closed-form.
++	 If loop versioning wasn't done from loop, but scalar_loop instead,
++	 merge_bb will have already just a single successor.  */
++
++      merge_bb = single_exit (loop_to_version)->dest;
++      if (EDGE_COUNT (merge_bb->preds) >= 2)
++	{
++	  gcc_assert (EDGE_COUNT (merge_bb->preds) >= 2);
++	  new_exit_bb = split_edge (single_exit (loop_to_version));
++	  new_exit_e = single_exit (loop_to_version);
++	  e = EDGE_SUCC (new_exit_bb, 0);
++
++	  for (gsi = gsi_start_phis (merge_bb); !gsi_end_p (gsi);
++	       gsi_next (&gsi))
++	    {
++	      tree new_res;
++	      orig_phi = gsi.phi ();
++	      new_res = copy_ssa_name (PHI_RESULT (orig_phi));
++	      new_phi = create_phi_node (new_res, new_exit_bb);
++	      arg = PHI_ARG_DEF_FROM_EDGE (orig_phi, e);
++	      add_phi_arg (new_phi, arg, new_exit_e,
++			   gimple_phi_arg_location_from_edge (orig_phi, e));
++	      adjust_phi_and_debug_stmts (orig_phi, e, PHI_RESULT (new_phi));
++	    }
++	}
++
++      update_ssa (TODO_update_ssa);
++    }
++
++  if (version_niter)
++    {
++      /* The versioned loop could be infinite, we need to clear existing
++	 niter information which is copied from the original loop.  */
++      gcc_assert (loop_constraint_set_p (loop, LOOP_C_FINITE));
++      vect_free_loop_info_assumptions (nloop);
++      /* And set constraint LOOP_C_INFINITE for niter analyzer.  */
++      loop_constraint_set (loop, LOOP_C_INFINITE);
++    }
++
++  if (LOCATION_LOCUS (vect_location.get_location_t ()) != UNKNOWN_LOCATION
++      && dump_enabled_p ())
++    {
++      if (version_alias)
++	dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | MSG_PRIORITY_USER_FACING,
++			 vect_location,
++			 "loop versioned for vectorization because of "
++			 "possible aliasing\n");
++      if (version_align)
++	dump_printf_loc (MSG_OPTIMIZED_LOCATIONS | MSG_PRIORITY_USER_FACING,
++			 vect_location,
++			 "loop versioned for vectorization to enhance "
++			 "alignment\n");
++
++    }
++
++  return nloop;
++}
+diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
+index 7f7577951..023a83c38 100644
+--- a/gcc/tree-vect-loop.cc
++++ b/gcc/tree-vect-loop.cc
+@@ -9735,8 +9735,11 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
+ 
+   if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
+     {
+-      class loop *sloop
+-	= vect_loop_versioning (loop_vinfo, loop_vectorized_call);
++      class loop *sloop;
++      if (!(optimize >= 2 && flag_llc_allocate > 0)) 
++	sloop = vect_loop_versioning (loop_vinfo, loop_vectorized_call);
++      else 
++	sloop = vect_loop_versioning_2 (loop_vinfo, loop_vectorized_call);
+       sloop->force_vectorize = false;
+       check_profitability = false;
+     }
+@@ -9989,7 +9992,8 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
+ 			   niters_vector_mult_vf, !niters_no_overflow);
+ 
+   unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
+-  scale_profile_for_vect_loop (loop, assumed_vf);
++  if (!(optimize >= 2 && flag_llc_allocate > 0))
++    scale_profile_for_vect_loop (loop, assumed_vf);
+ 
+   /* True if the final iteration might not handle a full vector's
+      worth of scalar iterations.  */
+diff --git a/gcc/tree-vectorizer.h b/gcc/tree-vectorizer.h
+index e13bc6c99..85018f250 100644
+--- a/gcc/tree-vectorizer.h
++++ b/gcc/tree-vectorizer.h
+@@ -2177,6 +2177,7 @@ extern bool slpeel_can_duplicate_loop_p (const class loop *, const_edge);
+ class loop *slpeel_tree_duplicate_loop_to_edge_cfg (class loop *,
+ 						     class loop *, edge);
+ class loop *vect_loop_versioning (loop_vec_info, gimple *);
++class loop *vect_loop_versioning_2 (loop_vec_info, gimple *);
+ extern class loop *vect_do_peeling (loop_vec_info, tree, tree,
+ 				    tree *, tree *, tree *, int, bool, bool,
+ 				    tree *);
+-- 
+2.44.0.windows.1
+
diff --git a/0366-fix-prefetch-case-failed.patch b/0366-fix-prefetch-case-failed.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9c21445f26bf3767034be574c6891d5546cb2bd2
--- /dev/null
+++ b/0366-fix-prefetch-case-failed.patch
@@ -0,0 +1,144 @@
+From c7bdc03e48a0b6e213c5a4b8c821665d7ca897bb Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=99=88=E9=B8=BF?= 
+Date: Thu, 6 Mar 2025 14:58:57 +0800
+Subject: [PATCH] fix prefetch case failed
+
+---
+ gcc/params.opt                                              | 2 +-
+ .../gcc.target/aarch64/sve/acle/general-c/prefetch_1.c      | 6 +++---
+ .../aarch64/sve/acle/general-c/prefetch_gather_index_1.c    | 6 +++---
+ .../aarch64/sve/acle/general-c/prefetch_gather_index_2.c    | 6 +++---
+ .../aarch64/sve/acle/general-c/prefetch_gather_offset_1.c   | 6 +++---
+ .../aarch64/sve/acle/general-c/prefetch_gather_offset_2.c   | 6 +++---
+ .../aarch64/sve/acle/general-c/prefetch_gather_offset_3.c   | 6 +++---
+ .../aarch64/sve/acle/general-c/prefetch_gather_offset_4.c   | 6 +++---
+ 8 files changed, 22 insertions(+), 22 deletions(-)
+
+diff --git a/gcc/params.opt b/gcc/params.opt
+index e06e50611..a716f2cc4 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -1305,7 +1305,7 @@ cannot recognize inner loop boundaries.
+ -param=llc-level=
+ Common Joined UInteger Var(param_llc_level) Init(3) IntegerRange(3, 4)
+ Param Optimization
+-Specifies the HBM cache level.
++Specifies the LLC cache level.
+ 
+ -param=filter-mode=
+ Common Joined UInteger Var(param_filter_mode) Init(1) IntegerRange(0, 1) Param
+diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_1.c
+index 316f77fc7..c8094ba2b 100644
+--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_1.c
++++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_1.c
+@@ -10,8 +10,8 @@ f1 (svbool_t pg, int32_t *s32_ptr, enum svprfop op)
+   svprfb (pg, s32_ptr, (enum svprfop) -1); /* { dg-error {passing 4294967295 to argument 3 of 'svprfb', which expects a valid 'enum svprfop' value} } */
+   svprfb (pg, s32_ptr, (enum svprfop) 0);
+   svprfb (pg, s32_ptr, (enum svprfop) 5);
+-  svprfb (pg, s32_ptr, (enum svprfop) 6); /* { dg-error {passing 6 to argument 3 of 'svprfb', which expects a valid 'enum svprfop' value} } */
+-  svprfb (pg, s32_ptr, (enum svprfop) 7); /* { dg-error {passing 7 to argument 3 of 'svprfb', which expects a valid 'enum svprfop' value} } */
++  svprfb (pg, s32_ptr, (enum svprfop) 6);
++  svprfb (pg, s32_ptr, (enum svprfop) 7);
+   svprfb (pg, s32_ptr, (enum svprfop) 8);
+-  svprfb (pg, s32_ptr, (enum svprfop) 14); /* { dg-error {passing 14 to argument 3 of 'svprfb', which expects a valid 'enum svprfop' value} } */
++  svprfb (pg, s32_ptr, (enum svprfop) 14);
+ }
+diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_index_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_index_1.c
+index c33c95440..862ec082b 100644
+--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_index_1.c
++++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_index_1.c
+@@ -46,8 +46,8 @@ f1 (svbool_t pg, int32_t *s32_ptr, void *void_ptr, void **ptr_ptr,
+   svprfh_gather_index (pg, s32_ptr, s32, (enum svprfop) -1); /* { dg-error {passing 4294967295 to argument 4 of 'svprfh_gather_index', which expects a valid 'enum svprfop' value} } */
+   svprfh_gather_index (pg, s32_ptr, s32, (enum svprfop) 0);
+   svprfh_gather_index (pg, s32_ptr, s32, (enum svprfop) 5);
+-  svprfh_gather_index (pg, s32_ptr, s32, (enum svprfop) 6); /* { dg-error {passing 6 to argument 4 of 'svprfh_gather_index', which expects a valid 'enum svprfop' value} } */
+-  svprfh_gather_index (pg, s32_ptr, s32, (enum svprfop) 7); /* { dg-error {passing 7 to argument 4 of 'svprfh_gather_index', which expects a valid 'enum svprfop' value} } */
++  svprfh_gather_index (pg, s32_ptr, s32, (enum svprfop) 6);
++  svprfh_gather_index (pg, s32_ptr, s32, (enum svprfop) 7);
+   svprfh_gather_index (pg, s32_ptr, s32, (enum svprfop) 8);
+-  svprfh_gather_index (pg, s32_ptr, s32, (enum svprfop) 14); /* { dg-error {passing 14 to argument 4 of 'svprfh_gather_index', which expects a valid 'enum svprfop' value} } */
++  svprfh_gather_index (pg, s32_ptr, s32, (enum svprfop) 14);
+ }
+diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_index_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_index_2.c
+index 3d7797305..f4873c631 100644
+--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_index_2.c
++++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_index_2.c
+@@ -10,8 +10,8 @@ f1 (svbool_t pg, int32_t *s32_ptr, svint32_t s32, enum svprfop op)
+   svprfh_gather_s32index (pg, s32_ptr, s32, (enum svprfop) -1); /* { dg-error {passing 4294967295 to argument 4 of 'svprfh_gather_s32index', which expects a valid 'enum svprfop' value} } */
+   svprfh_gather_s32index (pg, s32_ptr, s32, (enum svprfop) 0);
+   svprfh_gather_s32index (pg, s32_ptr, s32, (enum svprfop) 5);
+-  svprfh_gather_s32index (pg, s32_ptr, s32, (enum svprfop) 6); /* { dg-error {passing 6 to argument 4 of 'svprfh_gather_s32index', which expects a valid 'enum svprfop' value} } */
+-  svprfh_gather_s32index (pg, s32_ptr, s32, (enum svprfop) 7); /* { dg-error {passing 7 to argument 4 of 'svprfh_gather_s32index', which expects a valid 'enum svprfop' value} } */
++  svprfh_gather_s32index (pg, s32_ptr, s32, (enum svprfop) 6);
++  svprfh_gather_s32index (pg, s32_ptr, s32, (enum svprfop) 7);
+   svprfh_gather_s32index (pg, s32_ptr, s32, (enum svprfop) 8);
+-  svprfh_gather_s32index (pg, s32_ptr, s32, (enum svprfop) 14); /* { dg-error {passing 14 to argument 4 of 'svprfh_gather_s32index', which expects a valid 'enum svprfop' value} } */
++  svprfh_gather_s32index (pg, s32_ptr, s32, (enum svprfop) 14);
+ }
+diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_1.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_1.c
+index cc61901cb..3b82b4777 100644
+--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_1.c
++++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_1.c
+@@ -46,8 +46,8 @@ f1 (svbool_t pg, int32_t *s32_ptr, void *void_ptr, void **ptr_ptr,
+   svprfb_gather_offset (pg, s32_ptr, s32, (enum svprfop) -1); /* { dg-error {passing 4294967295 to argument 4 of 'svprfb_gather_offset', which expects a valid 'enum svprfop' value} } */
+   svprfb_gather_offset (pg, s32_ptr, s32, (enum svprfop) 0);
+   svprfb_gather_offset (pg, s32_ptr, s32, (enum svprfop) 5);
+-  svprfb_gather_offset (pg, s32_ptr, s32, (enum svprfop) 6); /* { dg-error {passing 6 to argument 4 of 'svprfb_gather_offset', which expects a valid 'enum svprfop' value} } */
+-  svprfb_gather_offset (pg, s32_ptr, s32, (enum svprfop) 7); /* { dg-error {passing 7 to argument 4 of 'svprfb_gather_offset', which expects a valid 'enum svprfop' value} } */
++  svprfb_gather_offset (pg, s32_ptr, s32, (enum svprfop) 6);
++  svprfb_gather_offset (pg, s32_ptr, s32, (enum svprfop) 7);
+   svprfb_gather_offset (pg, s32_ptr, s32, (enum svprfop) 8);
+-  svprfb_gather_offset (pg, s32_ptr, s32, (enum svprfop) 14); /* { dg-error {passing 14 to argument 4 of 'svprfb_gather_offset', which expects a valid 'enum svprfop' value} } */
++  svprfb_gather_offset (pg, s32_ptr, s32, (enum svprfop) 14);
+ }
+diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_2.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_2.c
+index 88e0c35e7..2be620de5 100644
+--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_2.c
++++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_2.c
+@@ -30,8 +30,8 @@ f1 (svbool_t pg, svint8_t s8, svuint8_t u8,
+   svprfb_gather (pg, u32, (enum svprfop) -1); /* { dg-error {passing 4294967295 to argument 3 of 'svprfb_gather', which expects a valid 'enum svprfop' value} } */
+   svprfb_gather (pg, u32, (enum svprfop) 0);
+   svprfb_gather (pg, u32, (enum svprfop) 5);
+-  svprfb_gather (pg, u32, (enum svprfop) 6); /* { dg-error {passing 6 to argument 3 of 'svprfb_gather', which expects a valid 'enum svprfop' value} } */
+-  svprfb_gather (pg, u32, (enum svprfop) 7); /* { dg-error {passing 7 to argument 3 of 'svprfb_gather', which expects a valid 'enum svprfop' value} } */
++  svprfb_gather (pg, u32, (enum svprfop) 6);
++  svprfb_gather (pg, u32, (enum svprfop) 7);
+   svprfb_gather (pg, u32, (enum svprfop) 8);
+-  svprfb_gather (pg, u32, (enum svprfop) 14); /* { dg-error {passing 14 to argument 3 of 'svprfb_gather', which expects a valid 'enum svprfop' value} } */
++  svprfb_gather (pg, u32, (enum svprfop) 14);
+ }
+diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_3.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_3.c
+index 24b4aa190..9a1d931e9 100644
+--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_3.c
++++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_3.c
+@@ -10,8 +10,8 @@ f1 (svbool_t pg, int32_t *s32_ptr, svint32_t s32, enum svprfop op)
+   svprfb_gather_s32offset (pg, s32_ptr, s32, (enum svprfop) -1); /* { dg-error {passing 4294967295 to argument 4 of 'svprfb_gather_s32offset', which expects a valid 'enum svprfop' value} } */
+   svprfb_gather_s32offset (pg, s32_ptr, s32, (enum svprfop) 0);
+   svprfb_gather_s32offset (pg, s32_ptr, s32, (enum svprfop) 5);
+-  svprfb_gather_s32offset (pg, s32_ptr, s32, (enum svprfop) 6); /* { dg-error {passing 6 to argument 4 of 'svprfb_gather_s32offset', which expects a valid 'enum svprfop' value} } */
+-  svprfb_gather_s32offset (pg, s32_ptr, s32, (enum svprfop) 7); /* { dg-error {passing 7 to argument 4 of 'svprfb_gather_s32offset', which expects a valid 'enum svprfop' value} } */
++  svprfb_gather_s32offset (pg, s32_ptr, s32, (enum svprfop) 6);
++  svprfb_gather_s32offset (pg, s32_ptr, s32, (enum svprfop) 7);
+   svprfb_gather_s32offset (pg, s32_ptr, s32, (enum svprfop) 8);
+-  svprfb_gather_s32offset (pg, s32_ptr, s32, (enum svprfop) 14); /* { dg-error {passing 14 to argument 4 of 'svprfb_gather_s32offset', which expects a valid 'enum svprfop' value} } */
++  svprfb_gather_s32offset (pg, s32_ptr, s32, (enum svprfop) 14);
+ }
+diff --git a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_4.c b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_4.c
+index 63ccdc5a4..f7ca09507 100644
+--- a/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_4.c
++++ b/gcc/testsuite/gcc.target/aarch64/sve/acle/general-c/prefetch_gather_offset_4.c
+@@ -10,8 +10,8 @@ f1 (svbool_t pg, svuint32_t u32, enum svprfop op)
+   svprfb_gather_u32base (pg, u32, (enum svprfop) -1); /* { dg-error {passing 4294967295 to argument 3 of 'svprfb_gather_u32base', which expects a valid 'enum svprfop' value} } */
+   svprfb_gather_u32base (pg, u32, (enum svprfop) 0);
+   svprfb_gather_u32base (pg, u32, (enum svprfop) 5);
+-  svprfb_gather_u32base (pg, u32, (enum svprfop) 6); /* { dg-error {passing 6 to argument 3 of 'svprfb_gather_u32base', which expects a valid 'enum svprfop' value} } */
+-  svprfb_gather_u32base (pg, u32, (enum svprfop) 7); /* { dg-error {passing 7 to argument 3 of 'svprfb_gather_u32base', which expects a valid 'enum svprfop' value} } */
++  svprfb_gather_u32base (pg, u32, (enum svprfop) 6);
++  svprfb_gather_u32base (pg, u32, (enum svprfop) 7);
+   svprfb_gather_u32base (pg, u32, (enum svprfop) 8);
+-  svprfb_gather_u32base (pg, u32, (enum svprfop) 14); /* { dg-error {passing 14 to argument 3 of 'svprfb_gather_u32base', which expects a valid 'enum svprfop' value} } */
++  svprfb_gather_u32base (pg, u32, (enum svprfop) 14);
+ }
+-- 
+2.44.0.windows.1
+
diff --git a/0367-llc-feature-bugfix.patch b/0367-llc-feature-bugfix.patch
new file mode 100644
index 0000000000000000000000000000000000000000..57abf68d46b774159d0bfb3b3c8c4a699f8649f0
--- /dev/null
+++ b/0367-llc-feature-bugfix.patch
@@ -0,0 +1,79 @@
+From 9bb4c61897abb16d77a0614d4465bf2b0d67b265 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=99=88=E9=B8=BF?= 
+Date: Mon, 10 Mar 2025 17:00:37 +0800
+Subject: [PATCH] llc feature bugfix
+
+---
+ gcc/params.opt                            |  2 +-
+ gcc/testsuite/gcc.dg/llc-allocate/llc-1.c |  2 +-
+ gcc/tree-ssa-llc-allocate.cc              |  2 +-
+ gcc/tree-vect-loop.cc                     | 10 +++-------
+ 4 files changed, 6 insertions(+), 10 deletions(-)
+
+diff --git a/gcc/params.opt b/gcc/params.opt
+index a716f2cc4..ed7559783 100644
+--- a/gcc/params.opt
++++ b/gcc/params.opt
+@@ -1285,7 +1285,7 @@ Common Joined UInteger Var(param_issue_topn) Init(1) Param Optimization
+ Issue topn LLC mem_ref hint.
+ 
+ -param=force-issue=
+-Common Joined UInteger Var(param_force_issue) Init(0) IntegerRange(0, 1) Param
++Common Joined UInteger Var(param_force_issue) Init(1) IntegerRange(0, 1) Param
+ Force issue the topn LLC mem_ref hint, without generating dynamic multi-branches.
+ 
+ -param=llc-capacity-per-core=
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-1.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-1.c
+index 091e654f9..0b81394ad 100644
+--- a/gcc/testsuite/gcc.dg/llc-allocate/llc-1.c
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-1.c
+@@ -58,4 +58,4 @@ main (int argc, char *argv[])
+ /* { dg-final { scan-tree-dump       "\\d\\tuPtr\\t\\(1.466660, 1, 1, 0\\)" "llc_allocate" } } */
+ /* { dg-final { scan-tree-dump-times "runtime issue" 1 "llc_allocate" } } */
+ /* { dg-final { scan-tree-dump-times "static issue" 2 "llc_allocate" } } */
+-/* { dg-final { scan-tree-dump-times "insert svprfd" 4 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "insert svprfd" 2 "llc_allocate" } } */
+diff --git a/gcc/tree-ssa-llc-allocate.cc b/gcc/tree-ssa-llc-allocate.cc
+index da6d72b94..d10d60459 100644
+--- a/gcc/tree-ssa-llc-allocate.cc
++++ b/gcc/tree-ssa-llc-allocate.cc
+@@ -3822,7 +3822,7 @@ issue_llc_hint (std::vector &ref_groups,
+ 			  "ref_group(s) is found for llc hint.\n",
+ 	       num_issue_var, param_issue_topn);
+     }
+-  if (param_force_issue)
++  if (param_force_issue == 1 || param_force_issue == 0)
+     {
+       static_issue (ref_groups, num_issue_var);
+       return;
+diff --git a/gcc/tree-vect-loop.cc b/gcc/tree-vect-loop.cc
+index 023a83c38..7f7577951 100644
+--- a/gcc/tree-vect-loop.cc
++++ b/gcc/tree-vect-loop.cc
+@@ -9735,11 +9735,8 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
+ 
+   if (LOOP_REQUIRES_VERSIONING (loop_vinfo))
+     {
+-      class loop *sloop;
+-      if (!(optimize >= 2 && flag_llc_allocate > 0)) 
+-	sloop = vect_loop_versioning (loop_vinfo, loop_vectorized_call);
+-      else 
+-	sloop = vect_loop_versioning_2 (loop_vinfo, loop_vectorized_call);
++      class loop *sloop
++	= vect_loop_versioning (loop_vinfo, loop_vectorized_call);
+       sloop->force_vectorize = false;
+       check_profitability = false;
+     }
+@@ -9992,8 +9989,7 @@ vect_transform_loop (loop_vec_info loop_vinfo, gimple *loop_vectorized_call)
+ 			   niters_vector_mult_vf, !niters_no_overflow);
+ 
+   unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
+-  if (!(optimize >= 2 && flag_llc_allocate > 0))
+-    scale_profile_for_vect_loop (loop, assumed_vf);
++  scale_profile_for_vect_loop (loop, assumed_vf);
+ 
+   /* True if the final iteration might not handle a full vector's
+      worth of scalar iterations.  */
+-- 
+2.44.0.windows.1
+
diff --git a/0368-fix-llc-feature-case-failed.patch b/0368-fix-llc-feature-case-failed.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3bfcf72aa224f09c77652150932bc019c87d2078
--- /dev/null
+++ b/0368-fix-llc-feature-case-failed.patch
@@ -0,0 +1,78 @@
+From 889fed32e6e86a64974ec9edc69cd2c88c14e6f0 Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?=E9=99=88=E9=B8=BF?= 
+Date: Sat, 15 Mar 2025 14:58:11 +0800
+Subject: [PATCH] fix llc feature case failed
+
+---
+ gcc/testsuite/gcc.dg/llc-allocate/llc-1.c                     | 2 +-
+ gcc/testsuite/gcc.dg/llc-allocate/llc-nonzero-offset.c        | 2 +-
+ gcc/testsuite/gcc.dg/llc-allocate/llc-ref-trace.c             | 2 +-
+ gcc/testsuite/gfortran.dg/llc-allocate/llc-3.f90              | 4 ++--
+ .../gfortran.dg/llc-allocate/llc-unknown-type-size-unit.f90   | 2 +-
+ 5 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-1.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-1.c
+index 0b81394ad..55d1396d4 100644
+--- a/gcc/testsuite/gcc.dg/llc-allocate/llc-1.c
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-1.c
+@@ -56,6 +56,6 @@ main (int argc, char *argv[])
+ /* { dg-final { scan-tree-dump       "\\d\\tupperPtr\\t\\(2.933319, 1, 1, 0\\)" "llc_allocate" } } */
+ /* { dg-final { scan-tree-dump       "\\d\\tlPtr\\t\\(1.466660, 1, 1, 0\\)" "llc_allocate" } } */
+ /* { dg-final { scan-tree-dump       "\\d\\tuPtr\\t\\(1.466660, 1, 1, 0\\)" "llc_allocate" } } */
+-/* { dg-final { scan-tree-dump-times "runtime issue" 1 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "runtime issue" 0 "llc_allocate" } } */
+ /* { dg-final { scan-tree-dump-times "static issue" 2 "llc_allocate" } } */
+ /* { dg-final { scan-tree-dump-times "insert svprfd" 2 "llc_allocate" } } */
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-nonzero-offset.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-nonzero-offset.c
+index e18725f60..5e908b380 100644
+--- a/gcc/testsuite/gcc.dg/llc-allocate/llc-nonzero-offset.c
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-nonzero-offset.c
+@@ -46,5 +46,5 @@ convert_regs_exit (basic_block bb, int value_reg_low, int value_reg_high)
+   return output_stack->reg[0];
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "runtime issue" 1 "llc_allocate" } } */
++/* { dg-final { scan-tree-dump-times "runtime issue" 0 "llc_allocate" } } */
+ /* { dg-final { scan-tree-dump-times "static issue" 1 "llc_allocate" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/llc-allocate/llc-ref-trace.c b/gcc/testsuite/gcc.dg/llc-allocate/llc-ref-trace.c
+index ba90e7ea4..9196d1d95 100644
+--- a/gcc/testsuite/gcc.dg/llc-allocate/llc-ref-trace.c
++++ b/gcc/testsuite/gcc.dg/llc-allocate/llc-ref-trace.c
+@@ -59,4 +59,4 @@ main (int argc, char *argv[])
+ 
+ /* { dg-final { scan-tree-dump-times "Tracing succeeded" 24 "llc_allocate" } } */
+ /* { dg-final { scan-tree-dump-not "Tracing failed" "llc_allocate" } } */
+-/* { dg-final { scan-tree-dump-times "unhandled issue scene" 2 "llc_allocate" } } */
+\ No newline at end of file
++/* { dg-final { scan-tree-dump-times "unhandled issue scene" 0 "llc_allocate" } } */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gfortran.dg/llc-allocate/llc-3.f90 b/gcc/testsuite/gfortran.dg/llc-allocate/llc-3.f90
+index b0f68ebe3..da9669639 100644
+--- a/gcc/testsuite/gfortran.dg/llc-allocate/llc-3.f90
++++ b/gcc/testsuite/gfortran.dg/llc-allocate/llc-3.f90
+@@ -205,7 +205,7 @@ END SUBROUTINE calc_p_rho
+ ! { dg-final { scan-tree-dump-times "\\d\\tt_1\\t\\(0.000000, 3, 1, 0\\)" 1 "llc_allocate" } }
+ ! { dg-final { scan-tree-dump-times "\\d\\tt_2\\t\\(0.000000, 3, 1, 0\\)" 1 "llc_allocate" } }
+ ! { dg-final { scan-tree-dump-times "\\d\\tc2a\\t\\(0.000000, 3, 1, 0\\)" 2 "llc_allocate" } }
+-! { dg-final { scan-tree-dump-times "runtime issue" 2 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "runtime issue" 0 "llc_allocate" } }
+ ! { dg-final { scan-tree-dump-times "static issue" 2 "llc_allocate" } }
+ ! { dg-final { scan-tree-dump-times "insert svprfd" 2 "llc_allocate" } }
+-! { dg-final { scan-tree-dump-times "cumul_size.*150960\\)" 1 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "cumul_size.*150960\\)" 0 "llc_allocate" } }
+diff --git a/gcc/testsuite/gfortran.dg/llc-allocate/llc-unknown-type-size-unit.f90 b/gcc/testsuite/gfortran.dg/llc-allocate/llc-unknown-type-size-unit.f90
+index 7345759db..eb2cc8690 100644
+--- a/gcc/testsuite/gfortran.dg/llc-allocate/llc-unknown-type-size-unit.f90
++++ b/gcc/testsuite/gfortran.dg/llc-allocate/llc-unknown-type-size-unit.f90
+@@ -54,5 +54,5 @@ SUBROUTINE calc_p8w(p8w, ix, iy, k_start, k_end)
+ 
+ END SUBROUTINE calc_p8w
+ 
+-! { dg-final { scan-tree-dump-times "runtime issue" 1 "llc_allocate" } }
++! { dg-final { scan-tree-dump-times "runtime issue" 0 "llc_allocate" } }
+ ! { dg-final { scan-tree-dump-times "static issue" 1 "llc_allocate" } }
+\ No newline at end of file
+-- 
+2.44.0.windows.1
+
diff --git a/0369-SME-start-za-before-write-address-to-tpidr2.patch b/0369-SME-start-za-before-write-address-to-tpidr2.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8bf8eb51c59d1bea3eedbe7725319a9cc1e5f62b
--- /dev/null
+++ b/0369-SME-start-za-before-write-address-to-tpidr2.patch
@@ -0,0 +1,64 @@
+From cc67c76a448a49783ccb317ada1db6523bd637d7 Mon Sep 17 00:00:00 2001
+From: eastb233 
+Date: Mon, 24 Mar 2025 10:13:44 +0800
+Subject: [PATCH] [SME] start za before write address to tpidr2
+
+---
+ gcc/config/aarch64/aarch64.cc                     | 1 +
+ gcc/testsuite/gcc.target/aarch64/sme/za_state_4.c | 3 ++-
+ gcc/testsuite/gcc.target/aarch64/sme/za_state_5.c | 3 ++-
+ 3 files changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index a06c2c515..dea9447b4 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -30332,6 +30332,7 @@ aarch64_mode_emit_local_sme_state (aarch64_local_sme_state mode,
+ 	  || prev_mode == aarch64_local_sme_state::ACTIVE_DEAD
+ 	  || prev_mode == aarch64_local_sme_state::INACTIVE_CALLER)
+ 	{
++	  emit_insn (gen_aarch64_smstart_za ());
+ 	  /* A transition from ACTIVE_LIVE to INACTIVE_LOCAL is the usual
+ 	     case of setting up a lazy save buffer before a call.
+ 	     A transition from INACTIVE_CALLER is similar, except that
+diff --git a/gcc/testsuite/gcc.target/aarch64/sme/za_state_4.c b/gcc/testsuite/gcc.target/aarch64/sme/za_state_4.c
+index a764a7c89..b3c3442fe 100644
+--- a/gcc/testsuite/gcc.target/aarch64/sme/za_state_4.c
++++ b/gcc/testsuite/gcc.target/aarch64/sme/za_state_4.c
+@@ -282,12 +282,13 @@ __arm_new("za") void test12(volatile int *ptr)
+ **	...
+ **	bl	inout_za
+ **	...
++**	smstart	za
++**	...
+ **	msr	tpidr2_el0, x[0-9]+
+ **	...
+ **	bl	private_za
+ **	...
+ **	cbnz	[^\n]+
+-**	smstart	za
+ **	msr	tpidr2_el0, xzr
+ **	bl	out_za
+ **	bl	in_za
+diff --git a/gcc/testsuite/gcc.target/aarch64/sme/za_state_5.c b/gcc/testsuite/gcc.target/aarch64/sme/za_state_5.c
+index d54840d3d..d8f758adc 100644
+--- a/gcc/testsuite/gcc.target/aarch64/sme/za_state_5.c
++++ b/gcc/testsuite/gcc.target/aarch64/sme/za_state_5.c
+@@ -276,12 +276,13 @@ void test12(volatile int *ptr) __arm_inout("za")
+ **	msr	tpidr2_el0, xzr
+ **	bl	inout_za
+ **	...
++**	smstart	za
++**	...
+ **	msr	tpidr2_el0, x[0-9]+
+ **	...
+ **	bl	private_za
+ **	ldr	[^\n]+
+ **	cbnz	[^\n]+
+-**	smstart	za
+ **	msr	tpidr2_el0, xzr
+ **	bl	out_za
+ **	bl	in_za
+-- 
+2.34.1
+
diff --git a/0370-Add-hip12-core-definition-and-cost-model.patch b/0370-Add-hip12-core-definition-and-cost-model.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6b28a0bbf74e8d5f0dc4587bd991f5d80eb4fe23
--- /dev/null
+++ b/0370-Add-hip12-core-definition-and-cost-model.patch
@@ -0,0 +1,421 @@
+From c5970536c2caa3980bb1fded812ac0dc8ebf3681 Mon Sep 17 00:00:00 2001
+From: liyunfei 
+Date: Fri, 18 Apr 2025 14:44:24 +0800
+Subject: [PATCH] Add hip12 core definition and cost model
+
+This adds a cost model and core definition for hip12.
+
+Signed-off-by: liyunfei 
+---
+ gcc/config/aarch64/aarch64-cores.def     |   1 +
+ gcc/config/aarch64/aarch64-cost-tables.h | 108 ++++++++++++
+ gcc/config/aarch64/aarch64-tune.md       |   2 +-
+ gcc/config/aarch64/aarch64.cc            | 209 +++++++++++++++++++++++
+ gcc/doc/invoke.texi                      |   3 +-
+ 5 files changed, 321 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
+index 8f6210397..97d3c5df9 100644
+--- a/gcc/config/aarch64/aarch64-cores.def
++++ b/gcc/config/aarch64/aarch64-cores.def
+@@ -179,4 +179,5 @@ AARCH64_CORE("hip11", hip11, hip11, V8_5A,  (SVE, SVE2, F16), hip11, 0x48, 0xd22
+ AARCH64_CORE("demeter", demeter, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversev2, 0x41, 0xd4f, -1)
+ AARCH64_CORE("neoverse-v2", neoversev2, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversev2, 0x41, 0xd4f, -1)
+ 
++AARCH64_CORE("hip12", hip12, hip12, V9_2A, (SVE, SVE2, SVE2_BITPERM, SVE2_AES, SVE2_SM4, SVE2_SHA3, F16, RCPC, BF16, DOTPROD, LSE, SIMD, PAUTH, RDMA, LS64), hip12, 0x4e, 0xd06, -1)
+ #undef AARCH64_CORE
+diff --git a/gcc/config/aarch64/aarch64-cost-tables.h b/gcc/config/aarch64/aarch64-cost-tables.h
+index a39ace9ba..d59ef10be 100644
+--- a/gcc/config/aarch64/aarch64-cost-tables.h
++++ b/gcc/config/aarch64/aarch64-cost-tables.h
+@@ -665,6 +665,114 @@ const struct cpu_cost_table hip11_extra_costs =
+   }
+ };
+ 
++const struct cpu_cost_table hip12_extra_costs =
++{
++  /* ALU */
++  {
++    0,                 /* arith.  */
++    0,                 /* logical.  */
++    0,                 /* shift.  */
++    0,                 /* shift_reg.  */
++    0,                 /* arith_shift.  */
++    0,                 /* arith_shift_reg.  */
++    COSTS_N_INSNS (1), /* log_shift.  */
++    COSTS_N_INSNS (1), /* log_shift_reg.  */
++    COSTS_N_INSNS (1), /* extend.  */
++    0,                 /* extend_arith.  */
++    0,                 /* bfi.  */
++    0,                 /* bfx.  */
++    0,                 /* clz.  */
++    0,                 /* rev.  */
++    0,                 /* non_exec.  */
++    true               /* non_exec_costs_exec.  */
++  },
++
++  {
++    /* MULT SImode */
++    {
++      COSTS_N_INSNS (2),       /* simple.  */
++      0,                       /* flag_setting.  */
++      COSTS_N_INSNS (2),       /* extend.  */
++      COSTS_N_INSNS (2),       /* add.  */
++      COSTS_N_INSNS (2),       /* extend_add.  */
++      COSTS_N_INSNS (5)       /* idiv.  */
++    },
++    /* MULT DImode */
++    {
++      COSTS_N_INSNS (3),       /* simple.  */
++      0,                       /* flag_setting (N/A).  */
++      COSTS_N_INSNS (3),       /* extend.  */
++      COSTS_N_INSNS (3),       /* add.  */
++      COSTS_N_INSNS (3),       /* extend_add.  */
++      COSTS_N_INSNS (7)       /* idiv.  */
++    }
++  },
++  /* LD/ST */
++  {
++    COSTS_N_INSNS (3),         /* load.  */
++    COSTS_N_INSNS (4),         /* load_sign_extend.  */
++    COSTS_N_INSNS (3),         /* ldrd.  */
++    COSTS_N_INSNS (3),         /* ldm_1st.  */
++    1,                         /* ldm_regs_per_insn_1st.  */
++    2,                         /* ldm_regs_per_insn_subsequent.  */
++    COSTS_N_INSNS (5),         /* loadf.  */
++    COSTS_N_INSNS (5),         /* loadd.  */
++    COSTS_N_INSNS (4),         /* load_unaligned.  */
++    0,                         /* store.  */
++    0,                         /* strd.  */
++    0,                         /* stm_1st.  */
++    1,                         /* stm_regs_per_insn_1st.  */
++    2,                         /* stm_regs_per_insn_subsequent.  */
++    0,                         /* storef.  */
++    0,                         /* stored.  */
++    COSTS_N_INSNS (1),         /* store_unaligned.  */
++    COSTS_N_INSNS (5),         /* loadv.  */
++    COSTS_N_INSNS (2)          /* storev.  */
++  },
++  {
++    /* FP SFmode */
++    {
++      COSTS_N_INSNS (5),      /* div.  */
++      COSTS_N_INSNS (2),       /* mult.  */
++      COSTS_N_INSNS (4),       /* mult_addsub.  */
++      COSTS_N_INSNS (3),       /* fma.  */
++      COSTS_N_INSNS (1),       /* addsub.  */
++      COSTS_N_INSNS (1),       /* fpconst.  */
++      0,                       /* neg.  */
++      COSTS_N_INSNS (1),       /* compare.  */
++      COSTS_N_INSNS (2),       /* widen.  */
++      COSTS_N_INSNS (2),       /* narrow.  */
++      COSTS_N_INSNS (2),       /* toint.  */
++      COSTS_N_INSNS (3),       /* fromint.  */
++      COSTS_N_INSNS (2)        /* roundint.  */
++    },
++    /* FP DFmode */
++    {
++      COSTS_N_INSNS (7),      /* div.  */
++      COSTS_N_INSNS (2),       /* mult.  */
++      COSTS_N_INSNS (4),       /* mult_addsub.  */
++      COSTS_N_INSNS (3),       /* fma.  */
++      COSTS_N_INSNS (1),       /* addsub.  */
++      COSTS_N_INSNS (1),       /* fpconst.  */
++      0,                       /* neg.  */
++      COSTS_N_INSNS (1),       /* compare.  */
++      COSTS_N_INSNS (2),       /* widen.  */
++      COSTS_N_INSNS (2),       /* narrow.  */
++      COSTS_N_INSNS (2),       /* toint.  */
++      COSTS_N_INSNS (3),       /* fromint.  */
++      COSTS_N_INSNS (2)        /* roundint.  */
++    }
++  },
++  /* Vector */
++  {
++    COSTS_N_INSNS (1),  /* alu.  */
++    COSTS_N_INSNS (2),  /* mult.  */
++    COSTS_N_INSNS (1),  /* movi.  */
++    COSTS_N_INSNS (1),  /* dup.  */
++    COSTS_N_INSNS (1)   /* extract.  */
++  }
++};
++
+ const struct cpu_cost_table a64fx_extra_costs =
+ {
+   /* ALU */
+diff --git a/gcc/config/aarch64/aarch64-tune.md b/gcc/config/aarch64/aarch64-tune.md
+index 1cfa3559d..488e39b7c 100644
+--- a/gcc/config/aarch64/aarch64-tune.md
++++ b/gcc/config/aarch64/aarch64-tune.md
+@@ -1,5 +1,5 @@
+ ;; -*- buffer-read-only: t -*-
+ ;; Generated automatically by gentune.sh from aarch64-cores.def
+ (define_attr "tune"
+-	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,hip09,hip10a,hip10c,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,hip11,demeter,neoversev2"
++	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,hip09,hip10a,hip10c,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,hip11,demeter,neoversev2,hip12"
+ 	(const (symbol_ref "((enum attr_tune) aarch64_tune)")))
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 9f1fbf970..c4e2eba01 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -561,6 +561,24 @@ static const struct cpu_addrcost_table hip11_addrcost_table =
+   0, /* imm_offset  */
+ };
+ 
++static const struct cpu_addrcost_table hip12_addrcost_table =
++{
++    {
++      1, /* hi  */
++      0, /* si  */
++      0, /* di  */
++      1, /* ti  */
++    },
++  0, /* pre_modify  */
++  0, /* post_modify  */
++  2, /* post_modify_ld3_st3  */
++  2, /* post_modify_ld4_st4  */
++  0, /* register_offset  */
++  0, /* register_sextend  */
++  0, /* register_zextend  */
++  0, /* imm_offset  */
++};
++
+ static const struct cpu_addrcost_table qdf24xx_addrcost_table =
+ {
+     {
+@@ -756,6 +774,16 @@ static const struct cpu_regmove_cost hip11_regmove_cost =
+   2  /* FP2FP  */
+ };
+ 
++static const struct cpu_regmove_cost hip12_regmove_cost =
++{
++  1, /* GP2GP  */
++  /* Avoid the use of slow int<->fp moves for spilling by setting
++     their cost higher than memmov_cost.  */
++  6, /* GP2FP  */
++  2, /* FP2GP  */
++  2  /* FP2FP  */
++};
++
+ static const struct cpu_regmove_cost a64fx_regmove_cost =
+ {
+   1, /* GP2GP  */
+@@ -1231,6 +1259,143 @@ static const struct cpu_vector_cost hip11_vector_cost =
+   nullptr /* issue_info  */
+ };
+ 
++static const advsimd_vec_cost hip12_advsimd_vector_cost =
++{
++  2, /* int_stmt_cost  */
++  2, /* fp_stmt_cost  */
++  2, /* ld2_st2_permute_cost  */
++  2, /* ld3_st3_permute_cost  */
++  3, /* ld4_st4_permute_cost  */
++  2, /* permute_cost  */
++  9, /* reduc_i8_cost  */
++  7, /* reduc_i16_cost  */
++  5, /* reduc_i32_cost  */
++  3, /* reduc_i64_cost  */
++  3, /* reduc_f16_cost  */
++  3, /* reduc_f32_cost  */
++  3, /* reduc_f64_cost  */
++  3, /* store_elt_extra_cost  */
++  2, /* vec_to_scalar_cost  */
++  5, /* scalar_to_vec_cost  */
++  8, /* align_load_cost  */
++  8, /* unalign_load_cost  */
++  1, /* unalign_store_cost  */
++  1  /* store_cost  */
++};
++
++static const sve_vec_cost hip12_sve_vector_cost =
++{
++  {
++    2, /* int_stmt_cost  */
++    2, /* fp_stmt_cost  */
++    2, /* ld2_st2_permute_cost  */
++    3, /* ld3_st3_permute_cost  */
++    3, /* ld4_st4_permute_cost  */
++    2, /* permute_cost  */
++    /* Theoretically, a reduction involving 31 scalar ADDs could
++       complete in ~6 cycles and would have a cost of 31.  [SU]ADDV
++       completes in 13 cycles, so give it a cost of 31 + 7.  */
++    38, /* reduc_i8_cost  */
++    /* Likewise for 15 scalar ADDs (~3 cycles) vs. 10: 15 + 7.  */
++    22, /* reduc_i16_cost  */
++    /* Likewise for 7 scalar ADDs (~2 cycles) vs. 7: 7 + 5.  */
++    12, /* reduc_i32_cost  */
++    /* Likewise for 3 scalar ADDs (~1 cycles) vs. 4: 3 + 3.  */
++    6, /* reduc_i64_cost  */
++    /* Theoretically, a reduction involving 15 scalar FADDs could
++       complete in ~8 cycles and would have a cost of 30.  FADDV
++       completes in 15 cycles, so give it a cost of 30 + 7.  */
++    37, /* reduc_f16_cost  */
++    /* Likewise for 7 scalar FADDs (~4 cycles) vs. 12: 14 + 8.  */
++    22, /* reduc_f32_cost  */
++    /* Likewise for 3 scalar FADDs (~2 cycles) vs. 9: 6 + 7.  */
++    13, /* reduc_f64_cost  */
++    2, /* store_elt_extra_cost  */
++    /* This value is just inherited from the Cortex-A57 table.  */
++    2, /* vec_to_scalar_cost  */
++    /* See the comment above the Advanced SIMD versions.  */
++    5, /* scalar_to_vec_cost  */
++    8, /* align_load_cost  */
++    8, /* unalign_load_cost  */
++    /* Although stores have a latency of 2 and compete for the
++       vector pipes, in practice it's better not to model that.  */
++    1, /* unalign_store_cost  */
++    1  /* store_cost  */
++  },
++  3, /* clast_cost  */
++  42, /* fadda_f16_cost  */
++  26, /* fadda_f32_cost  */
++  20, /* fadda_f64_cost  */
++  6, /* gather_load_x32_cost  */
++  6, /* gather_load_x64_cost  */
++  1 /* scatter_store_elt_cost  */
++};
++
++static const aarch64_scalar_vec_issue_info hip12_scalar_issue_info =
++{
++  5, /* loads_stores_per_cycle  */
++  2, /* stores_per_cycle  */
++  8, /* general_ops_per_cycle  */
++  0, /* fp_simd_load_general_ops  */
++  1 /* fp_simd_store_general_ops  */
++};
++
++static const aarch64_advsimd_vec_issue_info hip12_advsimd_issue_info =
++{
++  {
++    5, /* loads_stores_per_cycle  */
++    2, /* stores_per_cycle  */
++    4, /* general_ops_per_cycle  */
++    0, /* fp_simd_load_general_ops  */
++    1 /* fp_simd_store_general_ops  */
++  },
++  2, /* ld2_st2_general_ops  */
++  2, /* ld3_st3_general_ops  */
++  3 /* ld4_st4_general_ops  */
++};
++
++static const aarch64_sve_vec_issue_info hip12_sve_issue_info =
++{
++  {
++    {
++      5, /* loads_per_cycle  */
++      2, /* stores_per_cycle  */
++      4, /* general_ops_per_cycle  */
++      0, /* fp_simd_load_general_ops  */
++      1 /* fp_simd_store_general_ops  */
++    },
++    2, /* ld2_st2_general_ops  */
++    2, /* ld3_st3_general_ops  */
++    3 /* ld4_st4_general_ops  */
++  },
++  2, /* pred_ops_per_cycle  */
++  1, /* while_pred_ops  */
++  0, /* int_cmp_pred_ops  */
++  0, /* fp_cmp_pred_ops  */
++  1, /* gather_scatter_pair_general_ops  */
++  1 /* gather_scatter_pair_pred_ops  */
++};
++
++static const aarch64_vec_issue_info hip12_vec_issue_info =
++{
++  &hip12_scalar_issue_info,
++  &hip12_advsimd_issue_info,
++  &hip12_sve_issue_info
++};
++
++static const struct cpu_vector_cost hip12_vector_cost =
++{
++  1, /* scalar_int_stmt_cost  */
++  2, /* scalar_fp_stmt_cost  */
++  4, /* scalar_load_cost  */
++  1, /* scalar_store_cost  */
++  1, /* cond_taken_branch_cost  */
++  1, /* cond_not_taken_branch_cost  */
++  &hip12_advsimd_vector_cost, /* advsimd  */
++  &hip12_sve_vector_cost, /* sve  */
++  &hip12_vec_issue_info /* issue_info  */
++};
++
+ static const advsimd_vec_cost cortexa57_advsimd_vector_cost =
+ {
+   2, /* int_stmt_cost  */
+@@ -1622,6 +1787,17 @@ static const cpu_prefetch_tune hip11_prefetch_tune =
+   -1                    /* default_opt_level  */
+ };
+ 
++static const cpu_prefetch_tune hip12_prefetch_tune =
++{
++  0,                    /* num_slots  */
++  64,                   /* l1_cache_size  */
++  64,                   /* l1_cache_line_size  */
++  512,                  /* l2_cache_size  */
++  true,                 /* prefetch_dynamic_strides */
++  -1,                   /* minimum_stride */
++  -1                    /* default_opt_level  */
++};
++
+ static const cpu_prefetch_tune xgene1_prefetch_tune =
+ {
+   8,			/* num_slots  */
+@@ -2121,6 +2297,39 @@ static const struct tune_params hip11_tunings =
+   &hip11_prefetch_tune
+ };
+ 
++static const struct tune_params hip12_tunings =
++{
++  &hip12_extra_costs,
++  &hip12_addrcost_table,
++  &hip12_regmove_cost,
++  &hip12_vector_cost,
++  &generic_branch_cost,
++  &generic_approx_modes,
++  SVE_256, /* sve_width  */
++  { 4, /* load_int.  */
++    1, /* store_int.  */
++    6, /* load_fp.  */
++    1, /* store_fp.  */
++    6, /* load_pred.  */
++    1 /* store_pred.  */
++  }, /* memmov_cost.  */
++  16,    /* issue_rate  */
++  (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_ALU_BRANCH
++   | AARCH64_FUSE_ALU_CBZ), /* fusible_ops  */
++  "16", /* function_align.  */
++  "4",  /* jump_align.  */
++  "8",  /* loop_align.  */
++  4,    /* int_reassoc_width.  */
++  4,    /* fp_reassoc_width.  */
++  4,    /* vec_reassoc_width.  */
++  2,    /* min_div_recip_mul_sf.  */
++  2,    /* min_div_recip_mul_df.  */
++  0,    /* max_case_values.  */
++  tune_params::AUTOPREFETCHER_WEAK,     /* autoprefetcher_model.  */
++  (AARCH64_EXTRA_TUNE_NONE),     /* tune_flags.  */
++  &generic_prefetch_tune
++};
++
+ static const struct tune_params xgene1_tunings =
+ {
+   &xgene1_extra_costs,
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 90073ac98..e985e6c2c 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -19221,7 +19221,8 @@ performance of the code.  Permissible values for this option are:
+ @samp{octeontx2}, @samp{octeontx2t98}, @samp{octeontx2t96}
+ @samp{octeontx2t93}, @samp{octeontx2f95}, @samp{octeontx2f95n},
+ @samp{octeontx2f95mm},
+-@samp{a64fx},@samp{hip09},@samp{hip10a},@samp{hip10c},@samp{hip11}
++@samp{a64fx},
++@samp{hip09},@samp{hip10a},@samp{hip10c},@samp{hip11},@samp{hip12},
+ @samp{thunderx}, @samp{thunderxt88},
+ @samp{thunderxt88p1}, @samp{thunderxt81}, @samp{tsv110},
+ @samp{thunderxt83}, @samp{thunderx2t99}, @samp{thunderx3t110}, @samp{zeus},
+-- 
+2.34.1
+
diff --git a/0371-SVE-Fix-std-find-with-sve.patch b/0371-SVE-Fix-std-find-with-sve.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a7af818227f0bc0f932c8463aab862b655fe2927
--- /dev/null
+++ b/0371-SVE-Fix-std-find-with-sve.patch
@@ -0,0 +1,70 @@
+From 707ec18abb4ec1a8af651197c8898021b999843a Mon Sep 17 00:00:00 2001
+From: blunce 
+Date: Mon, 21 Apr 2025 14:44:05 +0800
+Subject: [PATCH] [SVE] Fix std::find with sve
+
+---
+ libgcc/config/aarch64/sve_std_find.c | 46 ++++++++++++++++------------
+ 1 file changed, 26 insertions(+), 20 deletions(-)
+
+diff --git a/libgcc/config/aarch64/sve_std_find.c b/libgcc/config/aarch64/sve_std_find.c
+index 0caf1f4f6..86ff4cb5a 100644
+--- a/libgcc/config/aarch64/sve_std_find.c
++++ b/libgcc/config/aarch64/sve_std_find.c
+@@ -6,27 +6,33 @@
+ uint64_t *__sve_optimized_find_u64 (uint64_t *first, uint64_t *last,
+ 	uint64_t const *value, uint8_t threshold)
+ {
+-    if (first + threshold > last)
+-	goto Tail;
++	if (first + threshold > last)
++	{
++		goto Tail;
++	}
+ 
+-    uint64_t m = svcntd ();
+-    uint64_t n = (last - first) / m;
+-    svbool_t flag_true = svptrue_b64 ();
+-    for (; n-- > 0;)
+-    {
+-	svuint64_t v3 = svld1_u64 (flag_true, (uint64_t *)first);
+-	svbool_t v4 = svcmpeq_n_u64 (flag_true, v3, (uint64_t *)value);
+-	if (svptest_any (flag_true, v4))
+-	    break;
+-	first += m;
+-    }
++	uint64_t m = svcntd ();
++	uint64_t n = (last - first) / m;
++	svbool_t TRUE = svptrue_b64 ();
++	for (; n-- > 0;)
++	{
++		svuint64_t v3 = svld1_u64 (TRUE, (uint64_t *)first);
++		svbool_t v4 = svcmpeq_n_u64 (TRUE, v3, (uint64_t)*value);
++		if (svptest_any (TRUE, v4))
++		{
++			break;
++		}
++		first += m;
++	}
+ 
+ Tail:
+-    while (first < last)
+-    {
+-	if (*first == *value)
+-	    return first;
+-	++first;
+-    }
+-    return first;
++	while (first < last)
++	{
++		if (*first == *value)
++		{
++			return first;
++		}
++		++first;
++	}
++	return last;
+ }
+-- 
+2.34.1
+
diff --git a/0372-oeAware-Add-.GCC4OE_oeAware-section-for-optimization.patch b/0372-oeAware-Add-.GCC4OE_oeAware-section-for-optimization.patch
new file mode 100644
index 0000000000000000000000000000000000000000..564a058f7b59df4cbe65945702738d8959f2ff2b
--- /dev/null
+++ b/0372-oeAware-Add-.GCC4OE_oeAware-section-for-optimization.patch
@@ -0,0 +1,286 @@
+From 3963ed3b7993378d925084acd2adc83ee69e2b80 Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Sat, 19 Apr 2025 17:06:14 +0800
+Subject: [PATCH] [oeAware] Add .GCC4OE_oeAware section for optimization policy
+ tracking
+
+This commit introduces a mechanism to embed optimization policy metadata
+into the compiled binary through a dedicated ELF section. The feature is
+controlled by the new -foeaware-policy=[n] command-line option.
+
+Key components:
+- Creates mergeable section .GCC4OE_oeAware with SECTION_STRINGS flag
+- Only triggers section creation in translation units containing main()
+- Encodes 32-bit oeaware_optimize_policy value in little-endian format
+- Uses safe context checks (cfun validation, MAIN_NAME_P predicate)
+
+Implementation details:
+1. Section creation is guarded by function context checks to prevent
+   redundant section generation in non-main compilation units
+2. The SECTION_STRINGS flag allows linker merging of identical policy
+   values across different translation units
+3. Architecture-neutral implementation through byte-wise value emission
+4. Includes gcc_assert to validate policy value range
+
+Usage example:
+gcc -foeaware-policy=2 -O2 source.c
+
+This feature will be used by the OpenEuler runtime environment to guide
+binary-level optimization decisions. The section content can be verified
+with: objdump -s -j .GCC4OE_oeAware a.out
+---
+ gcc/common.opt                         |  8 +++
+ gcc/doc/invoke.texi                    |  4 ++
+ gcc/final.cc                           |  3 ++
+ gcc/opts.cc                            |  8 +++
+ gcc/testsuite/gcc.dg/dg.exp            |  1 +
+ gcc/testsuite/gcc.dg/oeaware-main.c    |  7 +++
+ gcc/testsuite/gcc.dg/oeaware-no-main.c |  6 +++
+ gcc/testsuite/lib/oeaware.exp          | 75 ++++++++++++++++++++++++++
+ gcc/varasm.cc                          | 29 ++++++++++
+ gcc/varasm.h                           |  2 +
+ 10 files changed, 143 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.dg/oeaware-main.c
+ create mode 100644 gcc/testsuite/gcc.dg/oeaware-no-main.c
+ create mode 100644 gcc/testsuite/lib/oeaware.exp
+
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 23544740d..2578c7cd0 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -3130,6 +3130,14 @@ foptimize-strlen
+ Common Var(flag_optimize_strlen) Optimization
+ Enable string length optimizations on trees.
+ 
++foeaware-policy
++Common Var(flag_oeaware) Init(0) Optimization
++Perform oeAware-gcc co-optimization.
++
++foeaware-policy=
++Common RejectNegative Joined UInteger Var(oeaware_optimize_policy) Init(1) IntegerRange(1, 7)
++Select the optimization policy.
++
+ fisolate-erroneous-paths-dereference
+ Common Var(flag_isolate_erroneous_paths_dereference) Optimization
+ Detect paths that trigger erroneous or undefined behavior due to
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 90073ac98..9a8332b1b 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -22898,6 +22898,10 @@ output file being linked.  See man ld(1) for more information.
+ When passed this option, GCC produces a dynamic library instead of
+ an executable when linking, using the Darwin @file{libtool} command.
+ 
++@item -foeaware-policy=@var{n}
++@opindex foeaware-policy
++Emit optimization policy value @var{n} into .GCC4OE_oeAware section.
++
+ @item -force_cpusubtype_ALL
+ @opindex force_cpusubtype_ALL
+ This causes GCC's output file to have the @samp{ALL} subtype, instead of
+diff --git a/gcc/final.cc b/gcc/final.cc
+index 0252250ba..033d2fff7 100644
+--- a/gcc/final.cc
++++ b/gcc/final.cc
+@@ -4699,6 +4699,9 @@ rest_of_handle_final (void)
+         dump_profile_to_elf_sections ();
+       }
+ 
++  if (flag_oeaware)
++    create_oeaware_section ();
++
+   return 0;
+ }
+ 
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index 162e14bc2..b69c43724 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -3180,6 +3180,14 @@ common_handle_option (struct gcc_options *opts,
+       }
+       break;
+ 
++    case OPT_foeaware_policy_:
++      opts->x_oeaware_optimize_policy = value;
++      /* No break here - do -foeaware processing.  */
++      /* FALLTHRU.  */
++    case OPT_foeaware_policy:
++      opts->x_flag_oeaware = value;
++      break;
++
+     case OPT_fipa_reorder_fields:
+       SET_OPTION_IF_UNSET (opts, opts_set, flag_ipa_struct_reorg, value);
+       break;
+diff --git a/gcc/testsuite/gcc.dg/dg.exp b/gcc/testsuite/gcc.dg/dg.exp
+index 9c8b0eac3..0e93b2098 100644
+--- a/gcc/testsuite/gcc.dg/dg.exp
++++ b/gcc/testsuite/gcc.dg/dg.exp
+@@ -18,6 +18,7 @@
+ 
+ # Load support procs.
+ load_lib gcc-dg.exp
++load_lib oeaware.exp
+ 
+ # If a testcase doesn't have special options, use these.
+ global DEFAULT_CFLAGS
+diff --git a/gcc/testsuite/gcc.dg/oeaware-main.c b/gcc/testsuite/gcc.dg/oeaware-main.c
+new file mode 100644
+index 000000000..c3da4a3d3
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/oeaware-main.c
+@@ -0,0 +1,7 @@
++/* { dg-do run { target *-*-linux* *-*-gnu* } }  */
++/* { dg-options "-foeaware-policy=1" }  */
++
++int main(void) { return 0; }
++
++/* { dg-final { check-section-exists ".GCC4OE_oeAware" } }  */
++/* { dg-final { check-section-content ".GCC4OE_oeAware" "....01000000" } }  */
+\ No newline at end of file
+diff --git a/gcc/testsuite/gcc.dg/oeaware-no-main.c b/gcc/testsuite/gcc.dg/oeaware-no-main.c
+new file mode 100644
+index 000000000..79996f6f9
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/oeaware-no-main.c
+@@ -0,0 +1,6 @@
++/* { dg-do compile { target *-*-linux* *-*-gnu* } }  */
++/* { dg-options "-foeaware-policy=1" }  */
++
++int test(void) { return 0; }
++
++/* { dg-final { scan-assembler-not "GCC4OE_oeAware" } }  */
+\ No newline at end of file
+diff --git a/gcc/testsuite/lib/oeaware.exp b/gcc/testsuite/lib/oeaware.exp
+new file mode 100644
+index 000000000..d8b9f0a9e
+--- /dev/null
++++ b/gcc/testsuite/lib/oeaware.exp
+@@ -0,0 +1,75 @@
++#   Copyright (C) 2025-2025 Free Software Foundation, Inc.
++
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3 of the License, or
++# (at your option) any later version.
++# 
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3.  If not see
++# .
++
++# gcc/testsuite/gcc.dg/oeaware.exp
++
++proc check-section-exists { section } {
++    for {set level 1} {$level <= 5} {incr level} {
++        if {[catch {upvar $level output_file output_file}]} {
++            continue;
++        }
++        if {[info exists output_file]} {
++            break
++        }
++    }
++    
++    if {![info exists output_file]} {
++        fail "Cannot find output_file in any parent scope"
++        return
++    }
++    
++    if {![file exists $output_file]} {
++        fail "Output file $output_file does not exist"
++        return
++    }
++    
++    set cmd "objdump -h $output_file | grep -q '$section'"
++    if {[catch {exec sh -c $cmd}]} {
++        fail "Section $section not found"
++    } else {
++        pass "Section $section exists"
++    }
++}
++
++proc check-section-content { section expected } {
++    for {set level 1} {$level <= 5} {incr level} {
++        if {[catch {upvar $level output_file output_file}]} { continue }
++        if {[info exists output_file]} { break }
++    }
++
++    if {![info exists output_file]} {
++        fail "Cannot find output_file in any parent scope"
++        return
++    }
++
++    if {![file exists $output_file]} {
++        fail "Output file $output_file does not exist"
++        return
++    }
++
++    set cmd "objdump -s -j $section $output_file | tail -n +5 | awk '{ printf \"%s%s%s%s\", \$5, \$4, \$3, \$2 }'"
++    if {[catch {set result [exec sh -c $cmd]} err]} {
++        fail "Failed to read section content: $err"
++        return
++    }
++
++    set result [string trim $result]
++    if {$result eq $expected} {
++        pass "Section $section content matches"
++    } else {
++        fail "Section $section content mismatch (got '$result', expected '$expected')"
++    }
++}
+\ No newline at end of file
+diff --git a/gcc/varasm.cc b/gcc/varasm.cc
+index d122730b5..bdf02edea 100644
+--- a/gcc/varasm.cc
++++ b/gcc/varasm.cc
+@@ -8564,4 +8564,33 @@ handle_vtv_comdat_section (section *sect, const_tree decl ATTRIBUTE_UNUSED)
+   switch_to_comdat_section(sect, DECL_NAME (decl));
+ }
+ 
++/* Create .GCC4OE_oeAware section with optimization policy value.
++   Only emitted for main function's translation unit.  The 4-byte
++   value is stored in target-endian format (little-endian here).
++   SECTION_STRINGS allows merging identical policy values.  */
++
++void
++create_oeaware_section ()
++{
++  /* To prevent inserting repeated segments and data,
++     we only perform the insertion in the file where the main
++     function is located.  */
++  if (!cfun || TREE_CODE (cfun->decl) != FUNCTION_DECL
++      || !DECL_NAME (cfun->decl) || !MAIN_NAME_P (DECL_NAME (cfun->decl)))
++    return;
++
++  int flags = SECTION_STRINGS;
++  section *oe_section = get_section (".GCC4OE_oeAware", flags, NULL, true);
++  switch_to_section (oe_section);
++
++  gcc_assert (oeaware_optimize_policy <= UINT8_MAX);
++  uint32_t value = oeaware_optimize_policy;
++  uint8_t *bytes = (uint8_t *)&value;
++
++  fprintf (asm_out_file, "\t.byte 0x%02x, 0x%02x, 0x%02x, 0x%02x\n",
++	  bytes[0], bytes[1], bytes[2], bytes[3]);
++
++  return;
++}
++
+ #include "gt-varasm.h"
+diff --git a/gcc/varasm.h b/gcc/varasm.h
+index 8ba8374e7..8dec57e0f 100644
+--- a/gcc/varasm.h
++++ b/gcc/varasm.h
+@@ -81,4 +81,6 @@ extern rtx assemble_trampoline_template (void);
+ 
+ extern void switch_to_comdat_section (section *, tree);
+ 
++extern void create_oeaware_section ();
++
+ #endif  // GCC_VARASM_H
+-- 
+2.34.1
+
diff --git a/0373-Include-insn-opinit.h-in-PLUGIN_H-PR110610.patch b/0373-Include-insn-opinit.h-in-PLUGIN_H-PR110610.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0f02406c26d8da420ab9887c54b358818dcc1a6c
--- /dev/null
+++ b/0373-Include-insn-opinit.h-in-PLUGIN_H-PR110610.patch
@@ -0,0 +1,33 @@
+From 284ed9c3f87c71ef98e24b048a9ce6d461e70aa5 Mon Sep 17 00:00:00 2001
+From: Andre Vieira 
+Date: Mon, 17 Jul 2023 17:00:54 +0100
+Subject: [PATCH] Include insn-opinit.h in PLUGIN_H [PR110610]
+
+This patch fixes PR110610 by including insn-opinit.h in the INTERNAL_FN_H list,
+as insn-opinit.h is now required by internal-fn.h. This will lead to
+insn-opinit.h being installed in the plugin directory.
+
+gcc/ChangeLog:
+
+	PR plugins/110610
+	* Makefile.in (INTERNAL_FN_H): Add insn-opinit.h.
+---
+ gcc/Makefile.in | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index ab6ad8206..c7a503235 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -942,7 +942,7 @@ READ_MD_H = $(OBSTACK_H) $(HASHTAB_H) read-md.h
+ BUILTINS_DEF = builtins.def sync-builtins.def omp-builtins.def \
+ 	gtm-builtins.def sanitizer.def
+ INTERNAL_FN_DEF = internal-fn.def
+-INTERNAL_FN_H = internal-fn.h $(INTERNAL_FN_DEF)
++INTERNAL_FN_H = internal-fn.h $(INTERNAL_FN_DEF) insn-opinit.h
+ TREE_CORE_H = tree-core.h $(CORETYPES_H) all-tree.def tree.def \
+ 	c-family/c-common.def $(lang_tree_files) \
+ 	$(BUILTINS_DEF) $(INPUT_H) statistics.h \
+-- 
+2.34.1
+
diff --git a/0374-Add-hip12-instructions-pipeline.patch b/0374-Add-hip12-instructions-pipeline.patch
new file mode 100644
index 0000000000000000000000000000000000000000..58cf8152698441e2b1d69eb8db6bcfbcf881b594
--- /dev/null
+++ b/0374-Add-hip12-instructions-pipeline.patch
@@ -0,0 +1,965 @@
+From d63119daeb54cd0c387c1b24981c47d795e5a672 Mon Sep 17 00:00:00 2001
+From: liyunfei 
+Date: Fri, 25 Apr 2025 16:04:53 +0800
+Subject: [PATCH] Add hip12 instructions pipeline
+
+This adds instructions pipeline for hip12.
+
+Signed-off-by: liyunfei 
+---
+ gcc/config/aarch64/aarch64-cores.def |   2 +-
+ gcc/config/aarch64/aarch64.cc        |   6 +-
+ gcc/config/aarch64/aarch64.md        |   1 +
+ gcc/config/aarch64/hip12.md          | 891 +++++++++++++++++++++++++++
+ 4 files changed, 896 insertions(+), 4 deletions(-)
+ create mode 100644 gcc/config/aarch64/hip12.md
+
+diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
+index 97d3c5df9..2cb752ca1 100644
+--- a/gcc/config/aarch64/aarch64-cores.def
++++ b/gcc/config/aarch64/aarch64-cores.def
+@@ -179,5 +179,5 @@ AARCH64_CORE("hip11", hip11, hip11, V8_5A,  (SVE, SVE2, F16), hip11, 0x48, 0xd22
+ AARCH64_CORE("demeter", demeter, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversev2, 0x41, 0xd4f, -1)
+ AARCH64_CORE("neoverse-v2", neoversev2, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversev2, 0x41, 0xd4f, -1)
+ 
+-AARCH64_CORE("hip12", hip12, hip12, V9_2A, (SVE, SVE2, SVE2_BITPERM, SVE2_AES, SVE2_SM4, SVE2_SHA3, F16, RCPC, BF16, DOTPROD, LSE, SIMD, PAUTH, RDMA, LS64), hip12, 0x4e, 0xd06, -1)
++AARCH64_CORE("hip12", hip12, hip12, V9_2A, (SVE, SVE2, SVE2_BITPERM, SVE2_AES, SVE2_SM4, SVE2_SHA3, F16, RCPC, BF16, DOTPROD, LSE, SIMD, PAUTH, RDMA, LS64), hip12, 0x48, 0xd06, -1)
+ #undef AARCH64_CORE
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index c4e2eba01..b62ba344f 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -1790,9 +1790,9 @@ static const cpu_prefetch_tune hip11_prefetch_tune =
+ static const cpu_prefetch_tune hip12_prefetch_tune =
+ {
+   0,                    /* num_slots  */
+-  64,                   /* l1_cache_size  */
++  128,                  /* l1_cache_size  */
+   64,                   /* l1_cache_line_size  */
+-  512,                  /* l2_cache_size  */
++  1024,                 /* l2_cache_size  */
+   true,                 /* prefetch_dynamic_strides */
+   -1,                   /* minimum_stride */
+   -1                    /* default_opt_level  */
+@@ -2327,7 +2327,7 @@ static const struct tune_params hip12_tunings =
+   0,    /* max_case_values.  */
+   tune_params::AUTOPREFETCHER_WEAK,     /* autoprefetcher_model.  */
+   (AARCH64_EXTRA_TUNE_NONE),     /* tune_flags.  */
+-  &generic_prefetch_tune
++  &hip12_prefetch_tune
+ };
+ 
+ static const struct tune_params xgene1_tunings =
+diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
+index 69d296556..74a1a56bc 100644
+--- a/gcc/config/aarch64/aarch64.md
++++ b/gcc/config/aarch64/aarch64.md
+@@ -552,6 +552,7 @@
+ (include "hip10a.md")
+ (include "hip10c.md")
+ (include "hip11.md")
++(include "hip12.md")
+ 
+ ;; -------------------------------------------------------------------
+ ;; Jumps and other miscellaneous insns
+diff --git a/gcc/config/aarch64/hip12.md b/gcc/config/aarch64/hip12.md
+new file mode 100644
+index 000000000..031a4c39d
+--- /dev/null
++++ b/gcc/config/aarch64/hip12.md
+@@ -0,0 +1,891 @@
++;; hip12 pipeline description
++;; Copyright (C) 2023 Free Software Foundation, Inc.
++;;
++;;Contributed by liyunfei
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify it
++;; under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful, but
++;; WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
++;; General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; .
++
++(define_automaton "hip12")
++(define_automaton "hip12_ldst")
++(define_automaton "hip12_v")
++
++; The hip12 core is modelled as issues pipeline that has
++; the following functional units.
++; 1. 4 pipelines for single cycle integer micro operations: ALU0, ALU1, ALU3, ALU4
++
++(define_cpu_unit "hip12_alu0" "hip12")
++(define_cpu_unit "hip12_alu1" "hip12")
++(define_cpu_unit "hip12_alu3" "hip12")
++(define_cpu_unit "hip12_alu4" "hip12")
++
++ (define_reservation "hip12_alu0134" "hip12_alu0|hip12_alu1|hip12_alu3|hip12_alu4")
++(define_reservation "hip12_alu14" "hip12_alu1|hip12_alu4")
++
++; 2. 2 pipelines for multi cycles integer micro operations: ALU2, ALU5
++
++(define_cpu_unit "hip12_alu2" "hip12")
++(define_cpu_unit "hip12_alu5" "hip12")
++
++(define_reservation "hip12_alu25" "hip12_alu2|hip12_alu5")
++(define_reservation "hip12_alu1425" "hip12_alu1|hip12_alu4|hip12_alu2|hip12_alu5")
++
++; 3. All ALU pipelines
++
++(define_reservation "hip12_alu" "hip12_alu0|hip12_alu1|hip12_alu2|hip12_alu3|hip12_alu4|hip12_alu5")
++
++; 4. 3 pipelines for load micro opetations: Load0, Load1, Load2
++
++(define_cpu_unit "hip12_load0" "hip12_ldst")
++(define_cpu_unit "hip12_load1" "hip12_ldst")
++(define_cpu_unit "hip12_load2" "hip12_ldst")
++
++(define_reservation "hip12_ld" "hip12_load0|hip12_load1|hip12_load2")
++
++; 5. 2 pipelines for store micro operations: Store1, Store2
++
++(define_cpu_unit "hip12_store0" "hip12_ldst")
++(define_cpu_unit "hip12_store1" "hip12_ldst")
++
++(define_reservation "hip12_st" "hip12_store0|hip12_store1")
++
++; 6. 2 pipelines for store data micro operations: STD0, STD1
++
++(define_cpu_unit "hip12_store_data0" "hip12_ldst")
++(define_cpu_unit "hip12_store_data1" "hip12_ldst")
++
++(define_reservation "hip12_std" "hip12_store_data0|hip12_store_data1")
++
++; 7. 4 asymmetric pipelines for Asimd/FP/SVE micro operations: V0, V1, V2, V3
++
++(define_cpu_unit "hip12_v0" "hip12_v")
++(define_cpu_unit "hip12_v1" "hip12_v")
++(define_cpu_unit "hip12_v2" "hip12_v")
++(define_cpu_unit "hip12_v3" "hip12_v")
++
++(define_reservation "hip12_v0123" "hip12_v0|hip12_v1|hip12_v2|hip12_v3")
++(define_reservation "hip12_v02" "hip12_v0|hip12_v2")
++
++; 8. 2 pipelines for branch operations: Branch0, Branch1
++
++(define_cpu_unit "hip12_b0" "hip12")
++(define_cpu_unit "hip12_b1" "hip12")
++
++(define_reservation "hip12_b" "hip12_b0|hip12_b1")
++
++;; Block all issue queues.
++
++(define_reservation "hip12_block" "
++          hip12_alu0+hip12_alu1+hip12_alu2+hip12_alu3
++          +hip12_alu4+hip12_alu5+hip12_load0+hip12_load1+hip12_load2+hip12_store0+hip12_store1+hip12_store_data0+hip12_store_data1+hip12_v0+hip12_v1+hip12_v2+hip12_v3")
++
++;; Branch execution Unit
++
++(define_insn_reservation "hip12_branch" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "branch"))
++  "hip12_b")
++
++(define_insn_reservation "hip12_branch_and_link" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "call"))
++  "hip12_b+hip12_alu14")
++
++;; Integer arithmetic/logic instructions.
++
++(define_insn_reservation "hip12_alu_basic" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "alu_imm,alu_sreg,\
++			adc_reg,adc_imm"))
++  "hip12_alu")
++
++(define_insn_reservation "hip12_alu_basic_flagset" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "alus_imm,alus_sreg,\
++			adcs_reg,adcs_imm"))
++  "hip12_alu1425")
++
++(define_insn_reservation "hip12_alu_basic_extend" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "alu_ext,alus_ext,\
++            alu_shift_imm_lsl_1to4,alu_shift_imm_other,\
++            alu_shift_reg,alus_shift_imm,alus_shift_reg"))
++  "hip12_alu25")
++
++(define_insn_reservation "hip12_alu_logical" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "logic_reg,logic_imm"))
++  "hip12_alu")
++
++(define_insn_reservation "hip12_alu_logical_imm" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "logic_imm"))
++  "hip12_alu14")
++
++(define_insn_reservation "hip12_alu_logical_flagset" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "logics_reg"))
++  "hip12_alu1425")
++
++(define_insn_reservation "hip12_alu_logical_flagset_imm" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "logics_imm"))
++  "hip12_alu25")
++
++(define_insn_reservation "hip12_alu_conditional" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "csel"))
++  "hip12_alu14")
++
++;; Divide and Multiply instructions.
++
++(define_insn_reservation "hip12_divide" 8
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "sdiv,udiv"))
++  "hip12_alu25")
++
++(define_insn_reservation "hip12_multiply" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "mul,muls"))
++  "hip12_alu25")
++
++(define_insn_reservation "hip12_multiply_long" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "smull,umull,smulls,umulls"))
++  "hip12_alu25")
++
++(define_insn_reservation "hip12_multiply_accumulate" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "mla,mlas"))
++  "hip12_alu25+hip12_alu0134")
++
++(define_insn_reservation "hip12_multiply_accumulate_long" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "smlal,umlal"))
++  "hip12_alu25+hip12_alu0134")
++
++;; no Pointer Authentication instructions in backend types.
++
++;; Miscellaneous Data-Processing instructions.
++
++(define_insn_reservation "hip12_address" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "adr"))
++  "hip12_alu14")
++
++(define_insn_reservation "hip12_bitfield" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "bfm,bfx"))
++  "hip12_alu14")
++
++;; Todo: Does hip12 have reg move or mvn instructions?
++(define_insn_reservation "hip12_move" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "mov_imm,mov_shift_reg"))
++  "hip12_alu")
++
++(define_insn_reservation "hip12_count_leading" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "clz"))
++  "hip12_alu14")
++
++(define_insn_reservation "hip12_reverse_bits_bytes" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "rbit,rev"))
++  "hip12_alu14")
++
++; Todo: Does hip12 have imm shift instructions?
++(define_insn_reservation "hip12_variable_shift" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "shift_reg"))
++  "hip12_alu14")
++
++; Block all issue pipes for a cycle
++(define_insn_reservation "hip12_block" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "block"))
++  "hip12_block")
++
++;; Load and Store instructions.
++
++(define_insn_reservation "hip12_load_register" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "load_4,load_8"))
++  "hip12_ld")
++
++(define_insn_reservation "hip12_load_pair" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "load_16"))
++  "hip12_ld")
++
++(define_insn_reservation "hip12_store" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "store_4,store_8"))
++  "hip12_st+hip12_std")
++
++(define_insn_reservation "hip12_store_pair" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "store_16"))
++  "hip12_st+hip12_std")
++
++;; FP Data Processing instructions.
++; abs/neg/cpy
++(define_insn_reservation "hip12_fp_arith" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "ffariths,ffarithd"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_fp_compare" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "fcmpd,fcmps"))
++  "hip12_v02+hip12_alu0134")
++
++(define_insn_reservation "hip12_fp_conditional_compare" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "fccmpd,fccmps"))
++  "hip12_alu14,hip12_v0123")
++
++(define_insn_reservation "hip12_fp_conditional_select" 6
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "fcsel"))
++  "hip12_alu14,hip12_v0123")
++
++(define_insn_reservation "hip12_fp_divide_single" 6
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "fdivs"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_fp_divide_double" 8
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "fdivd"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_fp_square_single" 6
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "fsqrts"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_fp_square_double" 8
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "fsqrtd"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_fp_fused_multiply_add" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "ffmad,ffmas"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_fp_max_min" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "f_minmaxd,f_minmaxs"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_fp_add" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "fadds,faddd"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_fp_multiply" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "fmuld,fmuls"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_fp_round_int" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "f_rintd,f_rints"))
++  "hip12_v0123")
++
++;; FP Miscellaneous instructions.
++
++(define_insn_reservation "hip12_fp_covert_i2f" 7
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "f_cvti2f"))
++  "hip12_alu14,hip12_v0123")
++
++(define_insn_reservation "hip12_fp_covert_f2i" 5
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "f_cvtf2i"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_fp_covert_f2f" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "f_cvt"))
++  "hip12_v0123")
++  
++(define_insn_reservation "hip12_fp_move" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "fmov"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_fp_transfer_arm2vfp" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "f_mcr"))
++  "hip12_alu14")
++
++; transfer low half + high half
++(define_insn_reservation "hip12_fp_transfer_2arm2vfp" 10
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "f_mcrr"))
++  "hip12_alu14,nothing*3,hip12_alu14,hip12_v0123")
++
++(define_insn_reservation "hip12_fp_transfer_vfp2arm" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "f_mrc,f_mrrc"))
++  "hip12_v0123")
++
++;; FP Load instructions.
++; only basic double/single load
++(define_insn_reservation "hip12_fp_load" 6
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "f_loadd,f_loads"))
++  "hip12_ld")
++
++(define_insn_reservation "hip12_fp_load_vector_pair" 6
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_ldr,neon_ldp,neon_ldp_q"))
++  "hip12_alu+hip12_ld")
++
++;; FP Store instructions.
++; only basic double/single store
++(define_insn_reservation "hip12_fp_store" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "f_stored,f_stores"))
++  "hip12_st+hip12_std")
++
++(define_insn_reservation "hip12_fp_store_vector" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_ldr"))
++  "hip12_alu+hip12_st+hip12_std")
++
++(define_insn_reservation "hip12_fp_store_pair" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_ldp,neon_ldp_q"))
++  "hip12_st+hip12_std+hip12_alu")
++
++;; ASIMD Int instructions.
++
++(define_insn_reservation "hip12_neon_absolute_diff" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_abd,neon_abd_q,neon_abd_long"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_arith_basic" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_abs,neon_abs_q,\
++			neon_add,neon_add_q,\
++			neon_sub,neon_sub_q,\
++			neon_neg,neon_neg_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_arith_long" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_add_long,neon_sub_long"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_arith_wide" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_add_widen,neon_sub_widen"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_arith_complex" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_qadd,neon_qadd_q,\
++       neon_qsub,neon_qsub_q,\
++       neon_qneg,neon_qneg_q,\
++       neon_qabs,neon_qabs_q"))
++  "hip12_v0123")
++; arith pair not specified
++
++; neon_reduc_add is used for both addp and [su]adalp
++(define_insn_reservation "hip12_neon_arith_reduce" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_reduc_add,neon_reduc_add_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_arith_cmp" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_compare,neon_compare_q,neon_compare_zero,\
++			 neon_tst,neon_tst_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_arith_dot" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_dot,neon_dot_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_logical" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_logic,neon_logic_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_multiply_accumulate" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_mla_b,neon_mla_b_q,\
++       neon_mla_h,neon_mla_h_q,\
++       neon_mla_s,neon_mla_s_q,\
++       neon_mla_b_long,neon_mla_h_long,\
++       neon_mla_s_long,neon_mla_h_scalar,\
++       neon_mla_h_scalar_q,neon_mla_s_scalar,\
++       neon_mla_s_scalar_q,neon_mla_h_scalar_long,\
++       neon_mla_s_scalar_long,neon_sat_mla_b_long,\
++       neon_sat_mla_h_long,neon_sat_mla_s_long,\
++       neon_sat_mla_h_scalar_long,neon_sat_mla_s_scalar_long"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_minmax" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_minmax,neon_minmax_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_minmax_reduce" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_reduc_minmax,neon_reduc_minmax_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_multiply" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_mul_b,neon_mul_b_q,\
++        neon_mul_h,neon_mul_h_q,\
++        neon_mul_s,neon_mul_s_q,\
++        neon_mul_b_long,neon_mul_h_long,\
++        neon_mul_s_long,neon_mul_d_long,\
++        neon_mul_h_scalar,neon_mul_h_scalar_q,\
++        neon_mul_s_scalar,neon_mul_s_scalar_q,\
++        neon_mul_h_scalar_long,neon_mul_s_scalar_long,\
++        neon_sat_mul_b,neon_sat_mul_b_q,\
++        neon_sat_mul_h,neon_sat_mul_h_q,\
++        neon_sat_mul_s,neon_sat_mul_s_q,\
++        neon_sat_mul_b_long,neon_sat_mul_h_long,\
++        neon_sat_mul_s_long,neon_sat_mul_h_scalar,\
++        neon_sat_mul_h_scalar_q,neon_sat_mul_s_scalar,\
++        neon_sat_mul_s_scalar_q,neon_sat_mul_h_scalar_long,\
++        neon_sat_mul_s_scalar_long"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_shift" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_shift_imm,neon_shift_imm_q,\
++        neon_shift_imm_narrow_q,neon_shift_imm_long,\
++        neon_shift_reg,neon_shift_reg_q,\
++        neon_sat_shift_imm,neon_sat_shift_imm_q,\
++        neon_sat_shift_imm_narrow_q,neon_sat_shift_reg,\
++        neon_sat_shift_reg_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_shift_accumulate" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_shift_acc,neon_shift_acc_q"))
++  "hip12_v0123")
++
++;; ASIMD FP instructions.
++
++(define_insn_reservation "hip12_neon_fp_abs" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_abs_s,neon_fp_abs_s_q,\
++       neon_fp_abs_d,neon_fp_abs_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_neg" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_neg_s,neon_fp_neg_s_q,\
++       neon_fp_neg_d,neon_fp_neg_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_abd" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_abd_s,neon_fp_abd_s_q,\
++       neon_fp_abd_d,neon_fp_abd_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_arith" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_addsub_s,neon_fp_addsub_s_q,\
++       neon_fp_addsub_d,neon_fp_addsub_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_compare" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_compare_s,neon_fp_compare_s_q,\
++       neon_fp_compare_d,neon_fp_compare_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_convert_narrow" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_cvt_narrow_s_q,neon_fp_cvt_narrow_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_convert_2int" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_to_int_s,neon_fp_to_int_d"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_convert_2int_q" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_to_int_s_q,neon_fp_to_int_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_convert_from_int" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_int_to_fp_s,neon_int_to_fp_d"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_convert_from_int_q" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_int_to_fp_s_q,neon_int_to_fp_d_q"))
++  "hip12_v0123")
++
++; D/F32
++(define_insn_reservation "hip12_neon_fp_divide_s" 6
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_div_s"))
++  "hip12_v0123")
++
++; Q/F32
++(define_insn_reservation "hip12_neon_fp_divide_s_q" 7
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_div_s_q"))
++  "hip12_v0123")
++
++; Q/F64
++(define_insn_reservation "hip12_neon_fp_divide_d" 9
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_div_d,neon_fp_div_d_q"))
++  "hip12_v0123")
++
++; D/F32
++(define_insn_reservation "hip12_neon_fp_sqrt_s" 6
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_sqrt_s"))
++  "hip12_v0123")
++
++; Q/F32
++(define_insn_reservation "hip12_neon_fp_sqrt_s_q" 7
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_sqrt_s_q"))
++  "hip12_v0123")
++
++; Q/F64
++(define_insn_reservation "hip12_neon_fp_sqrt_d" 9
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_sqrt_d,neon_fp_sqrt_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_minmax" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_minmax_s,neon_fp_minmax_s_q,\
++        neon_fp_minmax_d,neon_fp_minmax_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_minmax_reduce" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_reduc_minmax_s,neon_fp_reduc_minmax_s_q,\
++        neon_fp_reduc_minmax_d,neon_fp_reduc_minmax_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_multiply" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_mul_s,neon_fp_mul_s_q,\
++        neon_fp_mul_s_scalar,neon_fp_mul_s_scalar_q,\
++        neon_fp_mul_d,neon_fp_mul_d_q,\
++        neon_fp_mul_d_scalar_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_multiply_add" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_mla_s,neon_fp_mla_s_q,\
++        neon_fp_mla_s_scalar,neon_fp_mla_s_scalar_q,\
++        neon_fp_mla_d,neon_fp_mla_d_q,\
++        neon_fp_mla_d_scalar_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_round" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_round_s,neon_fp_round_d"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_round_q" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_round_s_q,neon_fp_round_d_q"))
++  "hip12_v0123")
++
++;; ASIMD Miscellaneous instructions
++
++(define_insn_reservation "hip12_neon_bit_reverse" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_rbit,neon_rbit_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_bitwise_insert" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_bsl,neon_bsl_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_count" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_cls,neon_cls_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_count_ds" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_cnt_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_count_bh" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_cnt"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_duplicate" 6
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_dup,neon_dup_q"))
++  "(hip12_v0123)+hip12_alu0134")
++
++(define_insn_reservation "hip12_neon_extract" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_dup,neon_dup_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_insert" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_ins,neon_ins_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_move" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_move,neon_move_q,neon_move_narrow_q"))
++  "hip12_v0123")
++
++; gcc only gen neon fp recp
++(define_insn_reservation "hip12_neon_fp_recp" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_recpe_s,neon_fp_recpe_d,\
++       neon_fp_rsqrte_s,neon_fp_rsqrte_d"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_recp_q" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_recpe_s_q,neon_fp_recpe_d_q,\
++       neon_fp_rsqrte_s_q,neon_fp_rsqrte_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_recpx" 3
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_recpx_s,neon_fp_recpx_s_q,\
++        neon_fp_recpx_d,neon_fp_recpx_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_fp_recps" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_fp_recps_s,neon_fp_recps_s_q,\
++        neon_fp_recps_d,neon_fp_recps_d_q,\
++        neon_fp_rsqrts_s,neon_fp_rsqrts_s_q,\
++        neon_fp_rsqrts_d,neon_fp_rsqrts_d_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_rev" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_rev,neon_rev_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_tbl_12" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_tbl1,neon_tbl1_q,\
++        neon_tbl2,neon_tbl2_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_tbl_3" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_tbl3,neon_tbl3_q,\
++        neon_tbl2,neon_tbl2_q"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_neon_tbl_4" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_tbl4,neon_tbl4_q,\
++        neon_tbl2,neon_tbl2_q"))
++  "hip12_v0123")
++; gcc only gen neon tbl, no tbx
++
++; no neon transfer specified
++
++(define_insn_reservation "hip12_neon_zip" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_zip,neon_zip_q"))
++  "hip12_v0123")
++
++;; ASIMD Load instructions.
++
++(define_insn_reservation "hip12_neon_ld1_12reg" 6
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_load1_1reg,neon_load1_1reg_q,\
++       neon_load1_2reg,neon_load1_2reg_q"))
++  "hip12_ld")
++
++(define_insn_reservation "hip12_neon_ld1_34reg" 7
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_load1_3reg,neon_load1_3reg_q,\
++       neon_load1_4reg,neon_load1_4reg_q"))
++  "hip12_ld")
++
++(define_insn_reservation "hip12_neon_ld1_lane" 8
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_load1_all_lanes,neon_load1_all_lanes_q,\
++       neon_load1_one_lane,neon_load1_one_lane_q"))
++  "hip12_ld+(hip12_v0123)")
++
++(define_insn_reservation "hip12_neon_ld2" 8
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_load2_2reg,neon_load2_2reg_q,\
++       neon_load2_4reg,neon_load2_4reg_q,\
++       neon_load2_all_lanes,neon_load2_all_lanes_q,\
++       neon_load2_one_lane,neon_load2_one_lane_q"))
++  "(hip12_ld)+(hip12_v0123)")
++
++(define_insn_reservation "hip12_neon_ld3" 8
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_load3_3reg,neon_load3_3reg_q,\
++       neon_load3_all_lanes,neon_load3_all_lanes_q,\
++       neon_load3_one_lane,neon_load3_one_lane_q"))
++  "hip12_ld+hip12_v0123")
++
++(define_insn_reservation "hip12_neon_ld4_reg" 10
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_load4_4reg,neon_load4_4reg_q"))
++  "hip12_ld+hip12_v0123")
++
++(define_insn_reservation "hip12_neon_ld4_lane" 8
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_load4_all_lanes,neon_load4_all_lanes_q,\
++       neon_load4_one_lane,neon_load4_one_lane_q"))
++  "hip12_ld+hip12_v0123")
++
++;; ASIMD Load instructions.
++
++(define_insn_reservation "hip12_neon_st1_12reg_4reg" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_store1_1reg,neon_store1_1reg_q,\
++       neon_store1_2reg,neon_store1_2reg_q,\
++       neon_store1_4reg"))
++  "hip12_st+hip12_std+hip12_v0123")
++
++(define_insn_reservation "hip12_neon_st1_3reg" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_store1_3reg,neon_store1_3reg_q"))
++  "hip12_st+hip12_std+hip12_v0123")
++
++(define_insn_reservation "hip12_neon_st1_4reg_q" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_store1_4reg_q"))
++  "hip12_st+hip12_std")
++
++(define_insn_reservation "hip12_neon_st1_lane_st2" 1
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_store1_one_lane,neon_store1_one_lane_q,\
++       neon_store2_2reg,neon_store2_2reg_q,\
++       neon_store2_4reg,neon_store2_4reg_q,\
++       neon_store2_one_lane,neon_store2_one_lane_q"))
++  "hip12_st+hip12_std")
++
++(define_insn_reservation "hip12_neon_st3_st4_q" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_store3_3reg,neon_store3_3reg_q,\
++       neon_store3_one_lane,neon_store3_one_lane_q,\
++       neon_store4_4reg_q"))
++  "hip12_v0123+hip12_st+hip12_std")
++
++(define_insn_reservation "hip12_neon_st4" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "neon_store4_4reg,\
++       neon_store4_one_lane,neon_store4_one_lane_q"))
++  "hip12_v0123+hip12_st+hip12_std")
++
++;; Cryptography Extensions
++
++(define_insn_reservation "hip12_crypto_aes" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "crypto_aese,crypto_aesmc"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_crypto_pmull" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "crypto_pmull"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_crypto_sha1_fast" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "crypto_sha1_fast,crypto_sha1_xor"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_crypto_sha256_fast" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "crypto_sha256_fast"))
++  "hip12_v02")
++
++(define_insn_reservation "hip12_crypto_complex_1" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "crypto_sha1_slow"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_crypto_complex_256" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "crypto_sha256_slow"))
++  "hip12_v02")
++
++(define_insn_reservation "hip12_crypto_sha512" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "crypto_sha512"))
++  "hip12_v02")
++
++(define_insn_reservation "hip12_crypto_sha3" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "crypto_sha3"))
++  "hip12_v0123")
++
++(define_insn_reservation "hip12_crypto_sm3" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "crypto_sm3"))
++  "hip12_v02")
++
++(define_insn_reservation "hip12_crypto_sm4" 4
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "crypto_sm4"))
++  "hip12_v0123")
++
++;; CRC instructions
++
++(define_insn_reservation "hip12_crc" 2
++  (and (eq_attr "tune" "hip12")
++       (eq_attr "type" "crc"))
++  "hip12_alu25")
++
++;; Simple execution unit bypasses
++(define_bypass 2 "hip12_fp_fused_multiply_add"
++	         "hip12_fp_fused_multiply_add")
++          
++(define_bypass 2 "hip12_neon_arith_dot"
++	         "hip12_neon_arith_dot")
++
++(define_bypass 2 "hip12_neon_multiply_accumulate"
++	         "hip12_neon_multiply_accumulate")
++
++(define_bypass 1 "hip12_neon_shift_accumulate"
++	         "hip12_neon_shift_accumulate")
++
++(define_bypass 2 "hip12_neon_fp_multiply_add"
++	         "hip12_neon_fp_multiply_add")
++
++(define_bypass 2 "hip12_neon_fp_recps"
++	         "hip12_neon_fp_recps")
+\ No newline at end of file
+-- 
+2.34.1
+
diff --git a/0375-SVE-Fix-gcc-cross-compile-error.patch b/0375-SVE-Fix-gcc-cross-compile-error.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6b9734c805515c5acdb6b3765f2ef5598ba25280
--- /dev/null
+++ b/0375-SVE-Fix-gcc-cross-compile-error.patch
@@ -0,0 +1,28 @@
+From 3d8943e2008a2ff0fdd49a967f201d45a2e33210 Mon Sep 17 00:00:00 2001
+From: blunce 
+Date: Thu, 8 May 2025 17:14:58 +0800
+Subject: [PATCH] [SVE] Fix gcc-cross compile error
+
+---
+ libgcc/config/aarch64/sve_std_find.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/libgcc/config/aarch64/sve_std_find.c b/libgcc/config/aarch64/sve_std_find.c
+index 86ff4cb5a..498c5d72d 100644
+--- a/libgcc/config/aarch64/sve_std_find.c
++++ b/libgcc/config/aarch64/sve_std_find.c
+@@ -1,3 +1,6 @@
++#include "auto-target.h"
++
++#if HAVE_STDINT_H
+ #include 
+ #include 
+ 
+@@ -36,3 +39,4 @@ Tail:
+ 	}
+ 	return last;
+ }
++#endif
+-- 
+2.28.0.windows.1
+
diff --git a/0376-Struct-dynamic-field-compression-optimization.patch b/0376-Struct-dynamic-field-compression-optimization.patch
new file mode 100644
index 0000000000000000000000000000000000000000..039903fdeb9c677cee0e0ad5f3b34ecd5e046ab4
--- /dev/null
+++ b/0376-Struct-dynamic-field-compression-optimization.patch
@@ -0,0 +1,6805 @@
+diff --git a/gcc/ai-optimizer.cc b/gcc/ai-optimizer.cc
+index 70ff24077..8908d1be3 100644
+--- a/gcc/ai-optimizer.cc
++++ b/gcc/ai-optimizer.cc
+@@ -30,14 +30,14 @@ along with GCC; see the file COPYING3.  If not see
+ #define M_OPTION_SIZE  11
+ #define M_MODE_SIZE  6
+ #define NATIVE_TUNE_SIZE  128
+-#define CATS_STRINGS_ROW  34
++#define CATS_STRINGS_ROW  35
+ #define CATS_STRINGS_COL  65
+ #define CATS_STRINGS1_ROW  10
+ #define CATS_STRINGS1_COL  65
+ #define OFFSET_ROW  6
+ #define SCALE_ROW  6
+ #define UNITY_ROW  1
+-#define COEFFICIENT_ROW  356
++#define COEFFICIENT_ROW  366
+ #define COEFFICIENT_COL  10
+ #define COEFFICIENT1_ROW  10
+ #define COEFFICIENT1_COL  1
+@@ -121,6 +121,7 @@ preprocess (int argc1, const char **argv1, const char *mops,
+   char *m_options[m_size];
+   char output_file[1024];
+   int m_index = 0;
++
+   for (int i = 0; i < argc1; i++)
+     {
+       if (strncmp (argv1[i], marco_prefix, 2) == 0)
+@@ -344,9 +345,9 @@ graph_infer (int argc1, const char **argv1, const char *mops,
+      the ONNX model. concat_result is a 1 × 18 matrix, and encoder_out is a
+      1 × 12 matrix.  */
+ 
+-  const int concat_out_size = 350;
++  const int concat_out_size = 360;
+   float concat_result[concat_out_size];
+-  const int encoder_out_size = 34;
++  const int encoder_out_size = 35;
+   const int encoder_last_size = 10;
+   int concat_size = 0;
+   const int size = encoder_out_size;
+@@ -378,7 +379,7 @@ graph_infer (int argc1, const char **argv1, const char *mops,
+   /* This requires performing matrix multiplication between a 1 × 356 matrix
+      and an 356 × 10 matrix  */
+ 
+-  const int m = 1, k = 356, n = 10;
++  const int m = 1, k = 366, n = 10;
+   float mul_result[n];
+   matmul (transformed_column, coefficient[0], m, k, n, mul_result);  
+ 
+@@ -412,7 +413,7 @@ graph_infer (int argc1, const char **argv1, const char *mops,
+   return argmax_output;
+ }
+ 
+-int
++void
+ get_optimize_decision_from_optimizer (int argc, const char **argv,
+ 				      const char *mops, int argc2,
+ 				      int64_t *argv2)
+@@ -422,5 +423,4 @@ get_optimize_decision_from_optimizer (int argc, const char **argv,
+     {
+       putenv ("AI_INFER_LEVEL=1");
+     }
+-  return model_pred;
+ }
+diff --git a/gcc/ai4c-infer.cc b/gcc/ai4c-infer.cc
+index 4cd4bfb00..4cf040be2 100644
+--- a/gcc/ai4c-infer.cc
++++ b/gcc/ai4c-infer.cc
+@@ -42,7 +42,7 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ /* Model info.  */
+ static int64_t argv_hw1[M_MODE_SIZE];
+-static char native_tune[NATIVE_TUNE_SIZE];
++char native_tune[NATIVE_TUNE_SIZE];
+ 
+ /* Intermediate computation results from the ONNX model.  */
+ static char cats_strings[CATS_STRINGS_ROW][CATS_STRINGS_COL];
+diff --git a/gcc/ai4c-infer.h b/gcc/ai4c-infer.h
+index fa5156ab1..41106a54e 100644
+--- a/gcc/ai4c-infer.h
++++ b/gcc/ai4c-infer.h
+@@ -37,7 +37,7 @@ execute_sha256 (const char *, char *, size_t);
+ extern float read_float_from_file (FILE*);
+ 
+ extern int get_optimize_decision_from_ai4c ();
+-extern int get_optimize_decision_from_optimizer (int, const char **,
++extern void get_optimize_decision_from_optimizer (int, const char **,
+ 						 const char *, int ,
+ 						 int64_t *);
+ extern void set_cache_info (int, int, int, int, int, int);
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 2578c7cd0..4cd2574e4 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -2094,6 +2094,18 @@ fipa-struct-sfc-shadow
+ Common Var(flag_ipa_struct_sfc_shadow) Init(0) Optimization
+ Enable field shadowing optimization in static struct field compression.
+ 
++fipa-struct-dfc
++Common Var(flag_ipa_struct_dfc) Init(0) Optimization
++Perform dynamic structure field compression.
++
++fipa-struct-dfc-bitfield
++Common Var(flag_ipa_struct_dfc_bitfield) Init(0) Optimization
++Enable compressing to bitfield in dynamic struct field compression.
++
++fipa-struct-dfc-shadow
++Common Var(flag_ipa_struct_dfc_shadow) Init(0) Optimization
++Enable field shadowing optimization in dynamic struct field compression.
++
+ fipa-extend-auto-profile
+ Common Var(flag_ipa_extend_auto_profile)
+ Use sample profile information for source code.
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index b62ba344f..80242ddf8 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -19095,7 +19095,9 @@ override_C_optimize_options (struct gcc_options *opts)
+   opts->x_struct_layout_optimize_level = 5;
+   opts->x_flag_ipa_struct_sfc = 1;
+   opts->x_flag_ipa_struct_sfc_bitfield = 1;
+-  opts->x_flag_ipa_struct_sfc_shadow = 1;
++  opts->x_flag_ipa_struct_dfc = 1;
++  opts->x_flag_ipa_struct_dfc_bitfield = 1;
++  opts->x_flag_ipa_struct_dfc_shadow = 1;
+   opts->x_flag_gnu89_inline = 1;
+   opts->x_flag_convert_minmax = 1;
+   opts->x_flag_tree_slp_transpose_vectorize = 1;
+@@ -19151,13 +19153,13 @@ override_CPP_optimize_options (struct gcc_options *opts)
+   opts->x_param_max_inline_insns_auto = 128;
+   opts->x_param_inline_unit_growth = 256;
+   opts->x_flag_cmlt_arith = 1;
++  opts->x_flag_if_conversion_gimple = 1;
+ }
+ 
+ static void
+ override_optimize_options_1 (struct gcc_options *opts)
+ {
+   opts->x_flag_split_ldp_stp = 1;
+-  opts->x_flag_if_conversion_gimple = 1;
+   opts->x_flag_ifcvt_allow_complicated_cmps = 1;
+   opts->x_param_ifcvt_allow_register_renaming = 2;
+   opts->x_param_max_rtl_if_conversion_unpredictable_cost = 48;
+@@ -19179,8 +19181,6 @@ override_Fortran_optimize_options (struct gcc_options *opts)
+   opts->x_flag_inline_functions_called_once = 0;
+   opts->x_flag_ira_algorithm = IRA_ALGORITHM_PRIORITY;
+   opts->x_flag_delayed_branch = 1;
+-  opts->x_flag_gcse_las = 1;
+-  opts->x_flag_gcse_sm = 1;
+   opts->x_flag_ipa_pta = 1;
+   opts->x_flag_reorder_blocks_and_partition = 1;
+   opts->x_flag_reorder_blocks = 1;
+@@ -19188,6 +19188,7 @@ override_Fortran_optimize_options (struct gcc_options *opts)
+   opts->x_param_flexible_seg_len = 1;
+   opts->x_flag_alias_analysis_expand_ssa = 1;
+   opts->x_flag_chrec_mul_fold_strict_overflow = 1;
++  opts->x_flag_if_conversion_gimple = 1;
+ }
+ 
+ /* Reset the optimize option.
+@@ -19196,14 +19197,6 @@ override_Fortran_optimize_options (struct gcc_options *opts)
+ static void
+ reset_machine_option (struct gcc_options *opts)
+ {
+-  if (!(opts->x_optimize_maximum)
+-      || opts->x_aarch64_cpu_string == NULL
+-      || (strstr (opts->x_aarch64_cpu_string, "tsv110") == NULL
+-      && strstr (opts->x_aarch64_cpu_string, "hip09") == NULL))
+-    {
+-      return;
+-    }
+-
+   const char *ai_infer_level = getenv ("AI_INFER_LEVEL");
+   if (ai_infer_level)
+     {
+diff --git a/gcc/gcc.cc b/gcc/gcc.cc
+index 5bc6b7bcf..e9851b28d 100644
+--- a/gcc/gcc.cc
++++ b/gcc/gcc.cc
+@@ -45,6 +45,7 @@ compilation is specified by a string called a "spec".  */
+ #include "filenames.h"
+ #include "spellcheck.h"
+ #include "opts-jobserver.h"
++#include "ai4c-infer.h"
+ 
+ 
+ 
+@@ -5809,6 +5810,7 @@ do_self_spec (const char *spec)
+     }
+   setenv ("GCC_AI4C_TUNE_INFO", tune_native, 1);
+ 
++
+   /* Mark %= sizeof (input))
++	    len = sizeof (input) - 1;
++	  strncpy (input, start, len);
++	  input[len] = '\0';
++	}
++    }
++  
++  bool flag_Om = false;
++  bool flag_O3 = false;
++  bool flag_mcpu = false;
++  bool flag_native = false;
++  char mcpu_name[64];
++
++  for (unsigned i = 1; i < argc; i ++)
++    {
++      if (strcmp (argv[i], "-Om") == 0)
++	flag_Om = true;
++      if (strstr (argv[i], "-O3") != NULL)
++	flag_O3 = true;
++      if (strstr (argv[i], "mcpu=native") != NULL)
++	flag_native = true;
++      if (strstr (argv[i], "mcpu=") != NULL)
++        {
++	  flag_mcpu = true;
++          const char* pos = strchr(argv[i], '=');
++	  int len = sizeof(mcpu_name) - 1;
++	  strncpy(mcpu_name, pos +1, len);
++	  mcpu_name[len] = '\0';
++        }
++    }
++
++  if ((!flag_native) & flag_mcpu)
++  {
++    strcpy(input, mcpu_name);
++  }
++
++  const int argc_hw = 6;
++  int64_t argv_hw[argc_hw] = {
++    global_options.x_param_simultaneous_prefetches,
++    global_options.x_param_l1_cache_size,
++    global_options.x_param_l1_cache_line_size,
++    global_options.x_param_l2_cache_size,
++    global_options.x_param_prefetch_latency,
++    global_options.x_param_ipa_prefetch_distance_factor};
++
++  const char *model_infer_level = secure_getenv ("AI_INFER_LEVEL");
++  
++  if ((flag_O3 || flag_Om) && (!model_infer_level) && (flag_mcpu || flag_native))
++  {
++    get_optimize_decision_from_optimizer (argc, argv, input, argc_hw, argv_hw);
++  }
++}
++
+ /* driver::main is implemented as a series of driver:: method calls.  */
+ 
+ int
+@@ -8135,6 +8220,7 @@ driver::main (int argc, char **argv)
+   maybe_putenv_OFFLOAD_TARGETS ();
+   putenv_ONNX_FDATA ();
+   handle_unrecognized_options ();
++  putenv_AI_INFER_LEVEL(argc, const_cast  (argv));
+ 
+   if (completion)
+     {
+diff --git a/gcc/gcc.h b/gcc/gcc.h
+index ff3ae8bed..2a7e28ce4 100644
+--- a/gcc/gcc.h
++++ b/gcc/gcc.h
+@@ -47,6 +47,7 @@ class driver
+   void putenv_ONNX_FDATA () const;
+   void maybe_putenv_OFFLOAD_TARGETS () const;
+   void handle_unrecognized_options ();
++  void putenv_AI_INFER_LEVEL(int argc, const char **argv);
+   int maybe_print_and_exit () const;
+   bool prepare_infiles ();
+   void do_spec_on_infiles () const;
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index b8a5f029c..18b41eb1b 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -75,7 +75,6 @@ along with GCC; see the file COPYING3.  If not see
+ #include "config.h"
+ #include "system.h"
+ #include "coretypes.h"
+-#include "tm.h"
+ #include "tree.h"
+ #include "tree-pass.h"
+ #include "cgraph.h"
+@@ -99,16 +98,17 @@ along with GCC; see the file COPYING3.  If not see
+ #include "tree-cfg.h"
+ #include "alloc-pool.h"
+ #include "symbol-summary.h"
+-#include "ipa-prop.h"
++#include "bitmap.h"
+ #include "ipa-struct-reorg.h"
+ #include "tree-eh.h"
+-#include "bitmap.h"
+ #include "tree-ssa-live.h"  /* For remove_unused_locals.  */
+ #include "ipa-param-manipulation.h"
+ #include "gimplify-me.h"
+ #include "cfgloop.h"
+ #include "langhooks.h"
+ #include "cfgexpand.h"
++#include "gimplify.h"
++#include 
+ 
+ /* Check whether in C language or LTO with only C language.  */
+ 
+@@ -149,17 +149,31 @@ using namespace struct_relayout;
+ #define VOID_POINTER_P(type) \
+   (POINTER_TYPE_P (type) && VOID_TYPE_P (TREE_TYPE (type)))
+ 
+-#define FC_DUMP_MSG(message) \
++#define FC_DUMP_MSG(...) \
+   do \
+     { \
+       if (dump_file && (dump_flags & TDF_DETAILS)) \
+-	fprintf (dump_file, "[field compress] %s", (message)); \
++	{ \
++	  fprintf (dump_file, "[field compress] "); \
++	  fprintf (dump_file, __VA_ARGS__); \
++	} \
+     } while (0)
+ 
++#define STRING_STARTS_WITH(s, suffix) \
++  (strncmp (s, suffix, sizeof (suffix) - 1) == 0)
++
+ /* Flags for operand_equal_p to treat decls with the same name equal.  */
+ 
+ #define COMPARE_DECL_FLAGS (OEP_DECL_NAME | OEP_LEXICOGRAPHIC)
+ 
++#define APPEND_GASSIGN_1(gsi, lhs, op, rhs) \
++  gsi_insert_after (&gsi, gimple_build_assign (lhs, op, rhs), \
++		    GSI_NEW_STMT)
++
++#define APPEND_GASSIGN_2(gsi, lhs, op, rhs1, rhs2) \
++  gsi_insert_after (&gsi, gimple_build_assign (lhs, op, rhs1, rhs2), \
++		    GSI_NEW_STMT)
++
+ static void
+ set_var_attributes (tree var)
+ {
+@@ -349,6 +363,212 @@ gimple_assign_rhs_code_p (gimple *stmt, enum tree_code code)
+ 	 && gimple_assign_rhs_code (stmt) == code;
+ }
+ 
++static fc_field *
++find_fc_field (const auto_vec &fc_fields, tree field)
++{
++  for (auto *fc_f : fc_fields)
++    if (fc_f->field == field)
++      return fc_f;
++
++  return NULL;
++}
++
++/* Return true if the stmt is a copy/convert to integer.  */
++
++static bool
++is_copy_int (const gimple *stmt)
++{
++  if (!is_gimple_assign (stmt))
++    return NULL_TREE;
++
++  tree rhs = gimple_assign_rhs1 (stmt);
++
++  if (gimple_assign_single_p (stmt))
++    if (TREE_CODE (rhs) == SSA_NAME)
++      return true;
++
++  if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt)))
++    {
++      tree lhs = gimple_assign_lhs (stmt);
++
++      if (TREE_CODE (rhs) == SSA_NAME
++	  && TREE_CODE (TREE_TYPE (lhs)) == INTEGER_TYPE)
++	return true;
++    }
++
++  return false;
++}
++
++/* Strip the copy with typecasting of int or unsigned int.  */
++
++static gimple *
++strip_copy_stmts (gimple *stmt)
++{
++  while (is_copy_int (stmt))
++    stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
++
++  return stmt;
++}
++
++const char *
++get_func_name (tree decl)
++{
++  if (!decl || TREE_CODE (decl) != FUNCTION_DECL || !DECL_NAME (decl))
++    return NULL;
++
++  tree decl_name = DECL_NAME (decl);
++  if (TREE_CODE (decl_name) != IDENTIFIER_NODE)
++    return NULL;
++
++  return IDENTIFIER_POINTER (decl_name);
++}
++
++/* Compare the gimple order of input_ssa for fc fields.  */
++
++static int
++input_order_cmp (const void *p, const void *q)
++{
++  const fc_field *a = *static_cast (p);
++  const fc_field *b = *static_cast (q);
++
++  gimple *ga = SSA_NAME_DEF_STMT (a->input_ssa);
++  gimple *gb = SSA_NAME_DEF_STMT (b->input_ssa);
++
++  if (gimple_uid (ga) < gimple_uid (gb))
++    return -1;
++  else if (gimple_uid (ga) > gimple_uid (gb))
++    return 1;
++  else
++    return 0;
++}
++
++/* Called by walk_tree to check if ssa_name DATA exists in an expression.  */
++
++static tree
++check_for_ssa (tree *opnd_ptr, int *walk_subtrees ATTRIBUTE_UNUSED, void *data)
++{
++  tree ssa = (tree) data;
++  if (*opnd_ptr == ssa)
++    return ssa;
++
++  return NULL_TREE;
++}
++
++/* Helper to create a function declaration together with arguments and result
++   declarations.  */
++
++static tree
++create_new_fn_decl (char *fn_name, int n_args, tree *arg_types,
++		    tree return_type)
++{
++  tree fn_type = build_function_type_array (return_type, n_args, arg_types);
++  tree fndecl = build_fn_decl (fn_name, fn_type);
++  tree id = get_identifier (fn_name);
++  SET_DECL_ASSEMBLER_NAME (fndecl, id);
++  DECL_NAME (fndecl) = id;
++  DECL_ARTIFICIAL (fndecl) = 1;
++  DECL_EXTERNAL (fndecl) = 0;
++  DECL_CONTEXT (fndecl) = NULL_TREE;
++  DECL_INITIAL (fndecl) = make_node (BLOCK);
++  DECL_STATIC_CONSTRUCTOR (fndecl) = 0;
++
++  /* Function result declairation.  */
++  tree resdecl
++    = build_decl (UNKNOWN_LOCATION, RESULT_DECL, NULL_TREE, return_type);
++  DECL_RESULT (fndecl) = resdecl;
++
++  /* Function arguments.  */
++  tree prev_arg = NULL_TREE;
++  for (int i = 0; i < n_args; i++)
++    {
++      tree arg_decl
++	= build_decl (UNKNOWN_LOCATION, PARM_DECL, NULL, arg_types[i]);
++      DECL_ARTIFICIAL (arg_decl) = 1;
++      DECL_IGNORED_P (arg_decl) = 1;
++      TREE_USED (arg_decl) = 1;
++      DECL_CONTEXT (arg_decl) = fndecl;
++      DECL_ARG_TYPE (arg_decl) = arg_types[i];
++      TREE_READONLY (arg_decl) = 1;
++      if (prev_arg)
++	TREE_CHAIN (prev_arg) = arg_decl;
++      else
++	DECL_ARGUMENTS (fndecl) = arg_decl;
++      prev_arg = arg_decl;
++    }
++
++  return fndecl;
++}
++
++static void
++release_srdecl_ssa_name (srdecl *srd)
++{
++  if (!srd->has_new_decl ())
++    return;
++
++  tree ssa_name = NULL_TREE;
++  if (srd->argumentnum >= 0)
++    ssa_name = ssa_default_def (cfun, srd->decl);
++  else if (TREE_CODE (srd->decl) == SSA_NAME)
++    ssa_name = srd->decl;
++
++  if (ssa_name && num_imm_uses (ssa_name) == 0)
++    release_ssa_name (ssa_name);
++}
++
++static char *
++append_suffix (const char *s1, unsigned suffix)
++{
++  char s2[32];
++  sprintf (s2, "%u", suffix);
++  return concat (s1, s2, NULL);
++}
++
++static unsigned HOST_WIDE_INT
++get_bitsize (tree field)
++{
++  tree bitsize = DECL_BIT_FIELD (field) ? DECL_SIZE (field)
++					: TYPE_SIZE (TREE_TYPE (field));
++  return tree_to_uhwi (bitsize);
++}
++
++/* Generate SSA_NAME for the given var.  */
++
++static tree
++generate_ssa_name (tree var, gimple_stmt_iterator *gsi)
++{
++  if (TREE_CODE (var) == SSA_NAME)
++    return var;
++
++  tree name = make_ssa_name (TREE_TYPE (var));
++  gimple *stmt = gimple_build_assign (name, var);
++  gsi_insert_after (gsi, stmt, GSI_NEW_STMT);
++
++  return name;
++}
++
++/* Get type node by bit precision and sign.  */
++
++static tree
++get_integer_type_node (unsigned precision, bool is_unsigned)
++{
++  switch (precision)
++    {
++      case 64:
++	return is_unsigned ? long_long_unsigned_type_node
++			   : long_long_integer_type_node;
++      case 32:
++	return is_unsigned ? unsigned_type_node : integer_type_node;
++      case 16:
++	return is_unsigned ? short_unsigned_type_node
++			   : short_integer_type_node;
++      case 8:
++	return is_unsigned ? unsigned_char_type_node
++			   : signed_char_type_node;
++      default:
++	return NULL_TREE;
++    }
++}
++
+ /* Enum the struct layout optimize level,
+    which should be the same as the option -fstruct-reorg=.  */
+ 
+@@ -364,6 +584,15 @@ enum struct_layout_opt_level
+   SEMI_RELAYOUT = 1 << 6
+ };
+ 
++enum class fc_level
++{
++  NONE,
++  STATIC,
++  DYNAMIC
++};
++
++fc_level current_fc_level;
++
+ srfunction *current_function;
+ vec csrfun_stack;
+ 
+@@ -387,6 +616,31 @@ public:
+ 
+ #define SET_CFUN(srfn) csrfun_context csrfn_ctx(srfn);
+ 
++/* RAII class to change current dump_file and dump_flags,
++   and restore when the object goes out of scope.  */
++
++class dump_file_saver
++{
++public:
++  dump_file_saver (FILE *file, dump_flags_t flags)
++  {
++    old_dump_file = dump_file;
++    old_dump_flags = dump_flags;
++    dump_file = file;
++    dump_flags = flags;
++  }
++  ~dump_file_saver ()
++  {
++    dump_file = old_dump_file;
++    dump_flags = old_dump_flags;
++  }
++private:
++  FILE *old_dump_file;
++  dump_flags_t old_dump_flags;
++};
++
++#define SET_DUMP_FILE(file, flags) dump_file_saver fd_saver(file, flags);
++
+ /* Defines the target pointer size of compressed pointer, which should be 8,
+    16, 32.  */
+ 
+@@ -502,7 +756,7 @@ srfield::srfield (tree field, srtype *base)
+     type (NULL),
+     clusternum (0),
+     field_access (EMPTY_FIELD),
+-    static_fc_field (NULL),
++    fc_f (NULL),
+     field_class (NULL)
+ {
+   for (int i = 0; i < max_split; i++)
+@@ -531,7 +785,8 @@ srtype::srtype (tree type)
+     {
+       if (TREE_CODE (field) == FIELD_DECL)
+ 	{
+-	  if (DECL_BIT_FIELD (field))
++	  if (current_fc_level != fc_level::DYNAMIC
++	      && DECL_BIT_FIELD (field))
+ 	    {
+ 	      escapes = escape_bitfields;
+ 	      continue;
+@@ -698,6 +953,20 @@ srfunction::record_decl (srtype *type, tree decl, int arg, tree orig_type)
+   return decl1;
+ }
+ 
++/* A function is either partially cloned or fully cloned (versioning).  */
++
++bool
++srfunction::partial_clone_p ()
++{
++  return fc_path.start_stmt != NULL;
++}
++
++bool
++srfunction::entry_function_p ()
++{
++  return strcmp (node->name (), "main") == 0 && !node->callers;
++}
++
+ /* Find the field at OFF offset.  */
+ 
+ srfield *
+@@ -716,6 +985,17 @@ srtype::find_field (unsigned HOST_WIDE_INT off)
+   return NULL;
+ }
+ 
++/* Find the field according to field decl.  */
++srfield *
++srtype::find_field_by_decl (tree fielddecl)
++{
++  for (auto *field : fields)
++    if (operand_equal_p (fielddecl, field->fielddecl, COMPARE_DECL_FLAGS))
++      return field;
++
++  return NULL;
++}
++
+ /* Add the function FN to the list of functions if it
+    is there not already.  */
+ 
+@@ -900,8 +1180,9 @@ srfield::reorder_fields (tree newfields[max_split], tree newlast[max_split],
+   else
+     {
+       tree tmp = newfields[clusternum];
+-      if (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field)))
+-	  > tree_to_uhwi (TYPE_SIZE (TREE_TYPE (tmp))))
++      auto field_bitsize = get_bitsize (field);
++      auto tmp_bitsize = get_bitsize (tmp);
++      if (field_bitsize > tmp_bitsize)
+ 	{
+ 	  DECL_CHAIN (field) = tmp;
+ 	  newfields[clusternum] = field;
+@@ -909,9 +1190,7 @@ srfield::reorder_fields (tree newfields[max_split], tree newlast[max_split],
+       else
+ 	{
+ 	  while (DECL_CHAIN (tmp)
+-		 && (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (field)))
+-		     <= tree_to_uhwi (
+-				TYPE_SIZE (TREE_TYPE (DECL_CHAIN (tmp))))))
++		 && field_bitsize <= get_bitsize (DECL_CHAIN (tmp)))
+ 	    tmp = DECL_CHAIN (tmp);
+ 
+ 	  /* Now tmp size > field size
+@@ -932,11 +1211,18 @@ srfield::create_new_reorder_fields (tree newtype[max_split],
+ 				    tree newfields[max_split],
+ 				    tree newlast[max_split])
+ {
++  /* For dynamic shadow.  */
++  if (current_fc_level == fc_level::DYNAMIC && fc_f && fc_f->original)
++    {
++      newfield[0] = NULL_TREE;
++      return;
++    }
++
+   /* newtype, corresponding to newtype[max_split] in srtype.  */
+   tree nt = NULL_TREE;
+   if (type == NULL)
+     /* Common var.  */
+-    nt = static_fc_field ? static_fc_field->new_type : fieldtype;
++    nt = fc_f ? fc_f->new_type : fieldtype;
+   else
+     /* RECORD_TYPE var.  */
+     nt = type->has_escaped () ? type->type : type->newtype[0];
+@@ -986,24 +1272,28 @@ srfield::create_new_reorder_fields (tree newtype[max_split],
+   TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (fielddecl);
+   DECL_CONTEXT (field) = newtype[clusternum];
+ 
+-  if (flag_ipa_struct_sfc && base->fc_info && base->fc_info->static_fc_p)
++  fc_type_info *info = base->fc_info;
++  if (info && (info->static_fc_p || info->dynamic_fc_p))
+     {
+       DECL_PACKED (field) = 1;
++      DECL_BIT_FIELD (field) = DECL_BIT_FIELD (fielddecl);
++      if (DECL_BIT_FIELD (field))
++	DECL_SIZE (field) = DECL_SIZE (fielddecl);
+ 
+-      if (static_fc_field)
++      if (fc_f)
+ 	{
+ 	  /* Always not align compressed fields.  */
+ 	  SET_DECL_ALIGN (field, 0);
+ 
+-	  if (static_fc_field->bits)
++	  if (fc_f->bits)
+ 	    {
+ 	      DECL_BIT_FIELD (field) = 1;
+-	      DECL_SIZE (field) = bitsize_int (static_fc_field->bits);
++	      DECL_SIZE (field) = bitsize_int (fc_f->bits);
+ 	      DECL_NONADDRESSABLE_P (field) = 1;
+ 	      /* Build unsigned bitfield integer type.  */
+-	      nt = build_nonstandard_integer_type (static_fc_field->bits, 1);
++	      nt = build_nonstandard_integer_type (fc_f->bits, 1);
+ 	      TREE_TYPE (field) = nt;
+-	      static_fc_field->new_type = nt;
++	      fc_f->new_type = nt;
+ 	    }
+ 	}
+     }
+@@ -1022,6 +1312,19 @@ srfield::dead_field_p ()
+ 	       && !FUNCTION_POINTER_TYPE_P (fieldtype);
+ }
+ 
++bool
++srfield::dfc_type_change_p ()
++{
++  return fc_f && fc_f->cond
++	 && fc_f->cond->old_type != TREE_TYPE (newfield[0]);
++}
++
++fc_closure *
++srfield::get_closure ()
++{
++  return &(field_class->closure);
++}
++
+ /* Given a struct s whose fields has already reordered by size, we try to
+    combine fields less than 8 bytes together to 8 bytes.  Example:
+    struct s {
+@@ -1095,22 +1398,38 @@ srtype::has_recursive_field_type ()
+ void
+ srtype::check_fc_fields ()
+ {
+-  if (!fc_info || !fc_info->static_fc_p)
++  if (!fc_info || (!fc_info->static_fc_p && !fc_info->dynamic_fc_p))
+     return;
+ 
+-  for (unsigned i = 0; i < fields.length (); i++)
++  for (auto *srf : fields)
+     {
+-      fc_field *fc_f;
+-      unsigned j;
+-      FOR_EACH_VEC_ELT (fc_info->static_fc_fields, j, fc_f)
+-      if (fields[i]->fielddecl == fc_f->field)
++      tree field = srf->fielddecl;
++      if (fc_info->static_fc_p)
++	srf->fc_f = find_fc_field (fc_info->static_fc_fields, field);
++      else
+ 	{
+-	  fields[i]->static_fc_field = fc_f;
+-	  break;
++	  srf->fc_f = find_fc_field (fc_info->dynamic_shadow_fields, field);
++	  if (!srf->fc_f)
++	    srf->fc_f = find_fc_field (fc_info->dynamic_fc_fields, field);
+ 	}
+     }
+ }
+ 
++bool
++srtype::reorg_name_p ()
++{
++  const char *name = get_type_name (type);
++  return name && strstr (name, ".reorg");
++}
++
++bool
++srtype::has_escaped ()
++{
++  return escapes != does_not_escape
++	 && (current_fc_level != fc_level::DYNAMIC
++	     || !reorg_name_p ());
++}
++
+ /* Create the new TYPE corresponding to THIS type.  */
+ 
+ bool
+@@ -1124,7 +1443,7 @@ srtype::create_new_type (void)
+ 
+   visited = true;
+ 
+-  if (escapes != does_not_escape)
++  if (has_escaped ())
+     {
+       newtype[0] = type;
+       return false;
+@@ -1176,10 +1495,10 @@ srtype::create_new_type (void)
+       if (tname)
+ 	{
+ 	  name = concat (tname, ".reorg.", id, NULL);
+-	  TYPE_NAME (newtype[i]) = build_decl (UNKNOWN_LOCATION,
+-					       TYPE_DECL,
+-					       get_identifier (name),
+-					       newtype[i]);
++	  tree name_id = get_identifier (name);
++	  TYPE_STUB_DECL (newtype[i])
++	    = build_decl (UNKNOWN_LOCATION, TYPE_DECL, name_id, newtype[i]);
++	  TYPE_NAME (newtype[i]) = name_id;
+ 	  free (name);
+ 	}
+     }
+@@ -1206,8 +1525,12 @@ srtype::create_new_type (void)
+     {
+       TYPE_FIELDS (newtype[i]) = newfields[i];
+       layout_type (newtype[i]);
+-      if (TYPE_NAME (newtype[i]) != NULL)
+-	layout_decl (TYPE_NAME (newtype[i]), 0);
++    }
++
++  if (current_fc_level == fc_level::DYNAMIC)
++    {
++      gcc_assert (maxclusters == 1);
++      fc_info->variant->new_type = newtype[0];
+     }
+ 
+   warn_padded = save_warn_padded;
+@@ -1277,7 +1600,7 @@ srfunction::create_new_decls (void)
+     return;
+ 
+   if (node)
+-    set_cfun (DECL_STRUCT_FUNCTION (node->decl));
++    push_cfun (DECL_STRUCT_FUNCTION (node->decl));
+ 
+   for (unsigned i = 0; i < decls.length (); i++)
+     {
+@@ -1388,7 +1711,8 @@ srfunction::create_new_decls (void)
+ 	}
+     }
+ 
+-  set_cfun (NULL);
++  if (node)
++    pop_cfun ();
+ }
+ 
+ /* Dump out the field structure to FILE.  */
+@@ -1459,34 +1783,34 @@ sraccess::dump (FILE *f) const
+   fprintf (f, "}\n");
+ }
+ 
+-/* Check if it's an assignment to the given type.  */
++/* Check if it's an assignment to the given field(fielddecl != NULL_TREE)
++   or any field(fielddecl == NULL_TREE).  */
+ 
+ bool
+-sraccess::write_type_p (tree type) const
++sraccess::write_field_p (tree fielddecl) const
+ {
+-  return this->type && this->type->type == type
+-	 && is_gimple_assign (stmt)
+-	 && index == 0;
++  return write_p () && field && (!fielddecl || field->fielddecl == fielddecl);
+ }
+ 
+-/* Check if it's an assignment to the given field.  */
++/* Check if it's an assignment that read the given
++   field(fielddecl != NULL_TREE) or any field(fielddecl == NULL_TREE).  */
+ 
+ bool
+-sraccess::write_field_p (tree fielddecl) const
++sraccess::read_field_p (tree fielddecl) const
+ {
+-  return field && field->fielddecl == fielddecl
+-	 && is_gimple_assign (stmt)
+-	 && index == 0;
++  return read_p () && field && (!fielddecl || field->fielddecl == fielddecl);
+ }
+ 
+-/* Check if it's an assignment that read the given field.  */
++bool
++sraccess::write_p () const
++{
++  return is_gimple_assign (stmt) && index == 0;
++}
+ 
+ bool
+-sraccess::read_field_p (tree fielddecl) const
++sraccess::read_p () const
+ {
+-  return field && field->fielddecl == fielddecl
+-	 && is_gimple_assign (stmt)
+-	 && index > 0;
++  return is_gimple_assign (stmt) && index > 0;
+ }
+ 
+ /* Dump out the decl structure to FILE.  */
+@@ -1504,6 +1828,133 @@ srdecl::dump (FILE *file)
+   type->simple_dump (file);
+ }
+ 
++void
++fc_closure::add_read_change (gimple *stmt)
++{
++  if (!read_change_set.contains (stmt))
++    read_change_set.add (stmt);
++}
++
++bool
++fc_closure::read_change_p (gimple *stmt)
++{
++  return read_change_set.contains (stmt);
++}
++
++void
++fc_closure::add_read_unchange (gimple *stmt)
++{
++  if (!read_unchange_set.contains (stmt))
++    read_unchange_set.add (stmt);
++}
++
++bool
++fc_closure::read_unchange_p (gimple *stmt)
++{
++  return read_unchange_set.contains (stmt);
++}
++
++void
++fc_closure::add_write_change (gimple *stmt)
++{
++  if (!write_change_set.contains (stmt))
++    write_change_set.add (stmt);
++}
++
++bool
++fc_closure::write_change_p (gimple *stmt)
++{
++  return write_change_set.contains (stmt);
++}
++
++void
++fc_closure::add_write_unchange (gimple *stmt)
++{
++  if (!write_unchange_set.contains (stmt))
++    write_unchange_set.add (stmt);
++}
++
++bool
++fc_closure::write_unchange_p (gimple *stmt)
++{
++  return write_unchange_set.contains (stmt);
++}
++
++bool
++fc_closure::change_p (gimple *stmt)
++{
++  return write_change_p (stmt) || read_change_p (stmt);
++}
++
++bool
++fc_closure::unchange_p (gimple *stmt)
++{
++  return write_unchange_p (stmt) || read_unchange_p (stmt);
++}
++
++/* Call compress/decompress function for rhs.  */
++
++tree
++fc_closure::convert_rhs (tree rhs, tree fn)
++{
++  tree newrhs = build_call_expr (fn, 1, rhs);
++  cgraph_node *callee = cgraph_node::get (fn);
++  cgraph_node *node = cgraph_node::get (current_function_decl);
++  node->create_edge (callee, NULL, profile_count::zero ());
++
++  return newrhs;
++}
++
++void
++closure_helper::record_origin_closure (basic_block bb)
++{
++  for (auto si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
++    {
++      gimple *stmt = gsi_stmt (si);
++      if (!is_gimple_assign (stmt))
++	continue;
++
++      uid++;
++
++      if (cinfo->read_change_p (stmt))
++	bitmap_set_bit (read_change_map, uid);
++      else if (cinfo->write_change_p (stmt))
++	bitmap_set_bit (write_change_map, uid);
++      else if (cinfo->read_unchange_p (stmt))
++	bitmap_set_bit (read_unchange_map, uid);
++      else if (cinfo->write_unchange_p (stmt))
++	bitmap_set_bit (write_unchange_map, uid);
++    }
++}
++
++void
++closure_helper::add_cloned_closure (basic_block bb)
++{
++  for (auto si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
++    {
++      gimple *stmt = gsi_stmt (si);
++      if (!is_gimple_assign (stmt))
++	continue;
++
++      uid++;
++
++      if (bitmap_bit_p (read_change_map, uid))
++	cinfo->add_read_change (stmt);
++      else if (bitmap_bit_p (write_change_map, uid))
++	cinfo->add_write_change (stmt);
++      else if (bitmap_bit_p (read_unchange_map, uid))
++	cinfo->add_read_unchange (stmt);
++      else if (bitmap_bit_p (write_unchange_map, uid))
++	cinfo->add_write_unchange (stmt);
++    }
++}
++
++void
++closure_helper::reset_uid ()
++{
++  uid = 0;
++}
++
+ void
+ fc_field_class::dump (FILE *file) const
+ {
+@@ -1545,6 +1996,37 @@ fc_field_class::get_field_index (srfield *field) const
+   return -1;
+ }
+ 
++void
++fc_ref::dump (FILE *file) const
++{
++  fprintf (file, "var: ");
++  print_generic_expr (dump_file, var);
++  fprintf (dump_file, ", type: ");
++  print_generic_expr (dump_file, orig_type ? orig_type : TREE_TYPE (var));
++  fprintf (dump_file, ", array: ");
++  print_generic_expr (dump_file, source->var);
++  if (size)
++    {
++      fprintf (dump_file, ", array size: ");
++      print_generic_expr (dump_file, size);
++    }
++  if (field)
++    {
++      fprintf (dump_file, ", field: ");
++      print_generic_expr (dump_file, field);
++    }
++  fprintf (dump_file, "\n");
++}
++
++fc_type_info::~fc_type_info ()
++{
++  if (variant)
++    {
++      delete variant;
++      variant = NULL;
++    }
++}
++
+ fc_field_class *
+ fc_type_info::find_field_class_by_type (tree type) const
+ {
+@@ -1577,12 +2059,131 @@ fc_type_info::record_field_class (srfield *srf)
+   return field_class;
+ }
+ 
+-} // namespace struct_reorg
++fc_cond *
++fc_type_info::find_cond (tree type) const
++{
++  for (auto *cond : fc_conds)
++    {
++      if (cond->old_type == type)
++	return cond;
++    }
+ 
++  return NULL;
++}
+ 
+-namespace struct_relayout {
++fc_cond *
++fc_type_info::create_cond (tree type)
++{
++  fc_cond *cond = find_cond (type);
++  if (cond)
++    return cond;
+ 
+-/* Complete Structure Relayout Optimization.
++  /* New cond will be stored in an auto_delete_vec(fc_conds).  */
++  cond = new fc_cond (type);
++  fc_conds.safe_push (cond);
++
++  /* Record the fc_cond to corresponding fc_field_class.  */
++  fc_field_class *field_class = find_field_class_by_type (type);
++  gcc_assert (field_class);
++  field_class->cond = cond;
++  cond->field_class = field_class;
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      FC_DUMP_MSG ("Create new fc_cond, type: ");
++      print_generic_expr (dump_file, type);
++      fprintf (dump_file, "\n");
++    }
++
++  return cond;
++}
++
++void
++fc_type_info::record_cond (fc_field *fc_f)
++{
++  if (fc_f->cond)
++    return;
++
++  fc_cond *cond = create_cond (TREE_TYPE (fc_f->field));
++  fc_f->cond = cond;
++  cond->fields.safe_push (fc_f);
++}
++
++fc_path_info::~fc_path_info ()
++{
++  if (cloned_func)
++    delete cloned_func;
++}
++
++/* Search and store basic_blocks that:
++   1) can reach STMT (when DIRECTION == PRED);
++   2) can be reached from STMT (when DIRECTION == SUCC).
++   Return false if field compression cannot be performed.  */
++
++bool
++fc_path_info::collect_blocks (gimple *stmt, direction dir)
++{
++  basic_block start_bb = gimple_bb (stmt);
++  if (!start_bb)
++    return false;
++
++  /* The start block should not be in a loop.  */
++  if (start_bb->loop_father != NULL
++      && loop_outer (start_bb->loop_father) != NULL)
++    return false;
++
++  bool prev = dir == direction::PRED;
++  basic_block stop_bb = prev ? ENTRY_BLOCK_PTR_FOR_FN (cfun)
++			     : EXIT_BLOCK_PTR_FOR_FN (cfun);
++  auto *store_list = prev ? &pre_bbs : &reach_bbs;
++
++  auto_bitmap visited;
++  auto_vec worklist;
++  worklist.safe_push (start_bb);
++  bool exit_p = false;
++
++  while (!worklist.is_empty ())
++    {
++      basic_block bb = worklist.pop ();
++      if (!bitmap_set_bit (visited, bb->index))
++	continue;
++
++      if (bb != stop_bb)
++	store_list->safe_push (bb);
++      else
++	exit_p = true;
++
++      if (prev)
++	for (auto *e : bb->preds)
++	  worklist.safe_push (e->src);
++      else
++	for (auto *e : bb->succs)
++	  worklist.safe_push (e->dest);
++    }
++
++  if (!exit_p)
++    return false;
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "Found %d blocks in func ", store_list->length ());
++      print_generic_expr (dump_file, cfun->decl);
++      fprintf (dump_file, " %s:\n",
++	       prev ? "before start-point" : "to be cloned");
++      for (auto *bb : store_list)
++	fprintf (dump_file, "%d ", bb->index);
++      fprintf (dump_file, "\n\n");
++    }
++
++  return true;
++}
++
++} // namespace struct_reorg
++
++
++namespace struct_relayout {
++
++/* Complete Structure Relayout Optimization.
+    It reorganizes all structure members, and puts same member together.
+    struct s {
+      long a;
+@@ -1680,7 +2281,11 @@ struct const_map
+ struct ipa_struct_reorg
+ {
+ private:
++  /* The current srfield set in rewrite_expr.  For dfc.  */
++  srfield *cur_srfd;
++
+   auto_vec_del global_consts;
++  hash_set visited_vars;
+ 
+ public:
+   // Constructors
+@@ -1712,6 +2317,8 @@ public:
+   void detect_cycles (void);
+   bool walk_field_for_cycles (srtype *);
+   void prune_escaped_types (void);
++  void prune_function (srfunction *);
++  void prune_globals ();
+   void propagate_escape (void);
+   void propagate_escape_via_original (void);
+   void propagate_escape_via_empty_with_no_original (void);
+@@ -1725,11 +2332,12 @@ public:
+   void create_new_functions (void);
+   void create_new_args (cgraph_node *new_node);
+   unsigned rewrite_functions (void);
++  void rewrite_block (basic_block);
+   srdecl *record_var (tree decl,
+ 		      escape_type escapes = does_not_escape,
+ 		      int arg = -1);
+   void record_safe_func_with_void_ptr_parm (void);
+-  srfunction *record_function (cgraph_node *node);
++  srfunction *record_function (cgraph_node *node, srfunction *sfn = NULL);
+   srfunction *find_function (cgraph_node *node);
+   void record_field_type (tree field, srtype *base_srtype);
+   void record_struct_field_types (tree base_type, srtype *base_srtype);
+@@ -1856,8 +2464,8 @@ public:
+ 			     const auto_vec &);
+   srfield *read_field_in_fc_class_p (gimple *, fc_field_class *);
+   srfield *write_field_in_fc_class_p (gimple *, fc_field_class *);
+-  fc_field *fc_fields_contains (auto_vec &, tree);
+   bool fc_pair_stmts_rhs_equal_p (const auto_vec &);
++  bool unique_init_const_p (const fc_shadow_info &);
+   bool fc_operand_equal_p (tree, tree);
+   bool fc_global_const_p (tree, HOST_WIDE_INT &);
+   bool fc_peephole_const_p (tree, HOST_WIDE_INT &);
+@@ -1873,10 +2481,92 @@ public:
+   bool find_hot_access (fc_type_info *, auto_vec &);
+   void cleanup_shadow_write (fc_type_info *);
+   void rewrite_shadow_read (fc_type_info *);
+-  void insert_shadow_stmt (gimple *, unsigned, fc_field *, tree);
++  void modify_shadow_read (gimple *, unsigned, fc_field *, tree);
+   bool compress_fields_static (fc_type_info *info);
+-  void compress_to_bitfields (fc_type_info *info);
++  bool compress_to_bitfield_static (fc_type_info *info);
++  bool compress_to_bitfield_dynamic (fc_type_info *info);
+   auto_vec collect_all_predecessor (gimple *);
++  bool types_fc_equal_p (tree, tree);
++  bool types_fc_compatible_p (tree, tree);
++  bool find_dynamic_fc_fields (fc_type_info *);
++  bool find_fields_in_input_stmt (fc_type_info *);
++  bool find_input_stmt (gimple *, gimple *&, gimple *&);
++  tree find_file_handler (gimple *);
++  bool find_fopen_fclose (fc_type_info *);
++  bool check_dynamic_shadow_fields (fc_type_info *);
++  bool find_fc_paths (fc_type_info *);
++  bool find_fc_data (fc_type_info *);
++  bool find_fc_arrays (fc_type_info *);
++  bool find_fc_array (fc_type_info *, tree, varpool_node *);
++  bool duplicative_array_p (fc_type_info *, tree);
++  bool is_stmt_before_fclose (fc_type_info *, gimple *, symtab_node *);
++  bool reorg_ptr_p (tree);
++  bool get_allocate_size_iterate (tree, gimple *, tree &, tree * = NULL);
++  bool get_allocate_size_assign (tree, gassign *, tree &, tree *);
++  bool get_allocate_size_call (tree, gcall *, tree &, tree *);
++  bool get_allocate_size_reorg_ptr (gimple *, tree &);
++  tree get_allocate_size (tree, tree, tree, gimple *);
++  bool find_fc_refs (fc_type_info *);
++  bool find_fc_refs_iterate (fc_type_info *, fc_array *, tree, bool);
++  bool find_fc_refs_ssa_name (fc_type_info *, fc_array *, tree, bool);
++  bool find_fc_refs_mem_ref (fc_type_info *, fc_array *, tree);
++  bool find_fc_refs_component_ref (fc_type_info *, fc_array *, tree);
++  bool fc_type_pointer_p (fc_type_info *, tree);
++  bool add_fc_ref (fc_type_info *, fc_array *, tree, tree);
++  check_ref_result check_duplicative_ref (fc_type_info *, fc_array *, tree,
++  					  tree, tree &, tree &);
++  gimple *find_def_stmt_before_fclose (fc_type_info *, tree);
++  tree get_ptr_decl (tree);
++  bool check_fc_array_uses (fc_type_info *);
++  void calc_fc_ref_count (fc_type_info *);
++  bool compress_fields_dynamic (fc_type_info *);
++  bool calc_dynamic_boundary (fc_type_info *);
++  bool fc_cond_field_p (tree, const fc_cond *);
++  bool fc_input_ssa_p (tree, const fc_cond *);
++  bool fc_field_load_p (tree, const fc_cond *);
++  void update_high_bound (fc_cond *, HOST_WIDE_INT);
++  bool check_closure (fc_type_info *);
++  bool check_closure (fc_type_info *, fc_cond *);
++  bool write_field_class_only_p (fc_type_info *, fc_field_class *, tree);
++  void collect_closure_read_change (fc_type_info *, fc_field_class *);
++  unsigned execute_dynamic_field_compression ();
++  unsigned dynamic_fc_rewrite ();
++  bool create_dynamic_fc_newtypes ();
++  void create_dynamic_fc_variant (fc_type_info *);
++  void create_global_var_dfc_path (fc_type_info *);
++  void create_dynamic_fc_convert_fn (fc_type_info *);
++  tree create_convert_fn (fc_cond *, unsigned, bool);
++  edge create_normal_part (fc_cond *);
++  void create_conversion_part (fc_cond *, edge, bool);
++  void clone_dynamic_fc_path (fc_type_info *);
++  void clone_partial_func (fc_type_info *, srfunction *);
++  void clone_whole_func (srfunction *);
++  void rewrite_dynamic_shadow_fields (fc_type_info *);
++  void rewrite_dynamic_fc_path ();
++  void record_dfc_path_info (fc_type_info *);
++  void collect_closure_info_dynamic (fc_type_info *);
++  void collect_closure_info_partial (srfunction *, fc_closure *);
++  void collect_closure_info_whole (srfunction *, fc_closure *);
++  void rewrite_partial_func (srfunction *);
++  void rewrite_whole_func (srfunction *);
++  void clean_func_after_rewrite (srfunction *);
++  void dynamic_fc_rewrite_assign (gimple *, tree, tree &, tree &);
++  void add_dynamic_checking (fc_type_info *);
++  void insert_code_calc_dfc_path (fc_type_info *);
++  void insert_code_calc_max_min_val (fc_type_info *);
++  tree insert_code_calc_cond (fc_type_info *, gimple_stmt_iterator *);
++  void insert_code_check_init_const (fc_type_info *, gimple_stmt_iterator *,
++				     tree &);
++  void insert_code_compress_data (fc_type_info *, edge);
++  void insert_code_compress_variant (fc_type_info *, basic_block,
++				     const auto_vec &,
++				     const auto_vec &);
++  void insert_code_compress_array (fc_type_info *, edge &,
++				   const auto_vec &,
++				   const auto_vec &);
++  void insert_code_modify_refs (fc_type_info *, edge);
++  void create_compress_object_fn (fc_type_info *);
++  edge insert_code_modify_single_ref (edge, tree, fc_array *, tree, tree);
+ };
+ 
+ struct ipa_struct_relayout
+@@ -2248,6 +2938,8 @@ ipa_struct_relayout::rewrite_address (tree xhs, gimple_stmt_iterator *gsi)
+   /* Emit gimple _X4 = gptr[I].  */
+   tree gptr_field_ssa = create_ssa (gptr[field_num], gsi);
+   tree new_address = make_ssa_name (TREE_TYPE (gptr[field_num]));
++  tree new_address_type = TREE_TYPE (new_address);
++  tree new_type = TREE_TYPE (new_address_type);
+   gassign *new_stmt = gimple_build_assign (new_address, POINTER_PLUS_EXPR,
+ 					   gptr_field_ssa, step3);
+   gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
+@@ -2257,9 +2949,15 @@ ipa_struct_relayout::rewrite_address (tree xhs, gimple_stmt_iterator *gsi)
+      should be transformed to
+        MEM[gptr + sizeof (member)] = 0B
+   */
+-  HOST_WIDE_INT size
+-    = tree_to_shwi (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_address))));
+-  tree new_size = rewrite_offset (pointer_offset, size);
++  tree new_size = NULL_TREE;
++  if (integer_zerop (pointer_offset))
++    new_size = build_int_cst (TREE_TYPE (new_address), 0);
++  else
++    {
++      HOST_WIDE_INT size = tree_to_shwi (TYPE_SIZE_UNIT (new_type));
++      new_size = rewrite_offset (pointer_offset, size);
++    }
++
+   if (new_size)
+     TREE_OPERAND (mem_ref, 1) = new_size;
+ 
+@@ -2531,7 +3229,7 @@ ipa_struct_reorg::dump_newtypes (FILE *f)
+     srtype *type = NULL;
+     FOR_EACH_VEC_ELT (types, i, type)
+     {
+-	if (type->has_escaped ())
++	if (!type->has_new_type ())
+ 	  continue;
+ 	fprintf (f, "======= the %dth newtype: ======\n", i);
+ 	fprintf (f, "type : ");
+@@ -3134,7 +3832,7 @@ check_each_call (cgraph_node *node, cgraph_edge *caller)
+ 	return false;
+     }
+ 
+-  if (!check_node_def (ptr_layers))
++  if (current_fc_level != fc_level::DYNAMIC && !check_node_def (ptr_layers))
+     return false;
+   return true;
+ }
+@@ -4066,61 +4764,10 @@ ipa_struct_reorg::handled_allocation_stmt (gimple *stmt)
+ tree
+ ipa_struct_reorg::allocate_size (srtype *type, srdecl *decl, gimple *stmt)
+ {
+-  if (!stmt
+-      || gimple_code (stmt) != GIMPLE_CALL
+-      || !handled_allocation_stmt (stmt))
+-    {
+-      if (dump_file && (dump_flags & TDF_DETAILS))
+-	{
+-	  fprintf (dump_file, "\nNot a allocate statment:\n");
+-	  print_gimple_stmt (dump_file, stmt, 0);
+-	  fprintf (dump_file, "\n");
+-	}
+-      return NULL;
+-    }
+-
+   if (type->has_escaped ())
+     return NULL;
+ 
+-  tree struct_size = TYPE_SIZE_UNIT (type->type);
+-
+-  /* Specify the correct size to relax multi-layer pointer.  */
+-  if (TREE_CODE (decl->decl) == SSA_NAME && isptrptr (decl->orig_type))
+-    struct_size = TYPE_SIZE_UNIT (decl->orig_type);
+-
+-  tree size = gimple_call_arg (stmt, 0);
+-
+-  if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)
+-      || gimple_call_builtin_p (stmt, BUILT_IN_ALIGNED_ALLOC))
+-    size = gimple_call_arg (stmt, 1);
+-  else if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))
+-    {
+-      tree arg1;
+-      arg1 = gimple_call_arg (stmt, 1);
+-      /* Check that second argument is a constant equal to
+-	 the size of structure.  */
+-      if (operand_equal_p (arg1, struct_size, 0))
+-	return size;
+-      /* ??? Check that first argument is a constant
+-	 equal to the size of structure.  */
+-      /* If the allocated number is equal to the value of struct_size,
+-	 the value of arg1 is changed to the allocated number.  */
+-      if (operand_equal_p (size, struct_size, 0))
+-	return arg1;
+-      if (dump_file && (dump_flags & TDF_DETAILS))
+-	{
+-	  fprintf (dump_file, "\ncalloc the correct size:\n");
+-	  print_gimple_stmt (dump_file, stmt, 0);
+-	  fprintf (dump_file, "\n");
+-	}
+-      return NULL;
+-    }
+-
+-  tree num;
+-  if (!is_result_of_mult (size, &num, struct_size))
+-    return NULL;
+-
+-  return num;
++  return get_allocate_size (type->type, decl->decl, decl->orig_type, stmt);
+ }
+ 
+ void
+@@ -4423,18 +5070,6 @@ ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect,
+       base = TREE_OPERAND (base, 0);
+     }
+ 
+-  if (offset != 0 && accesstype)
+-    {
+-      if (dump_file && (dump_flags & TDF_DETAILS))
+-	{
+-	  fprintf (dump_file, "Non zero offset (%d) with MEM.\n", (int)offset);
+-	  print_generic_expr (dump_file, expr);
+-	  fprintf (dump_file, "\n");
+-	  print_generic_expr (dump_file, base);
+-	  fprintf (dump_file, "\n");
+-	}
+-    }
+-
+   srdecl *d = find_decl (base);
+   srtype *t;
+ 
+@@ -4546,7 +5181,14 @@ ipa_struct_reorg::get_type_field (tree expr, tree &base, bool &indirect,
+       return true;
+     }
+ 
+-  srfield *f = t->find_field (offset);
++  srfield *f = NULL;
++  if (TREE_CODE (expr) == COMPONENT_REF
++      && DECL_BIT_FIELD (TREE_OPERAND (expr, 1)))
++    /* Static field compression may create bitfield.  In this case,
++       byte position is not reliable.  */
++    f = t->find_field_by_decl (TREE_OPERAND (expr, 1));
++  else
++    f = t->find_field (offset);
+   if (!f)
+     {
+       if (dump_file && (dump_flags & TDF_DETAILS))
+@@ -4751,8 +5393,15 @@ srfunction *
+ ipa_struct_reorg::find_function (cgraph_node *node)
+ {
+   for (unsigned i = 0; i < functions.length (); i++)
+-    if (functions[i]->node == node)
+-      return functions[i];
++    {
++      if (functions[i]->node == node)
++	return functions[i];
++
++      srfunction *cloned_func = functions[i]->fc_path.cloned_func;
++      if (current_fc_level == fc_level::DYNAMIC
++	  && cloned_func && cloned_func->node == node)
++	return cloned_func;
++    }
+   return NULL;
+ }
+ 
+@@ -5383,12 +6032,11 @@ ipa_struct_reorg::check_uses (srdecl *decl, vec &worklist)
+ /* Record function corresponding to NODE.  */
+ 
+ srfunction *
+-ipa_struct_reorg::record_function (cgraph_node *node)
++ipa_struct_reorg::record_function (cgraph_node *node, srfunction *sfn)
+ {
+   function *fn;
+   tree parm, var;
+   unsigned int i;
+-  srfunction *sfn = NULL;
+   escape_type escapes = does_not_escape;
+ 
+   if (dump_file  && (dump_flags & TDF_DETAILS))
+@@ -5408,8 +6056,11 @@ ipa_struct_reorg::record_function (cgraph_node *node)
+   if (!fn)
+     return sfn;
+ 
+-  sfn = new srfunction (node);
+-  functions.safe_push (sfn);
++  if (!sfn)
++    {
++      sfn = new srfunction (node);
++      functions.safe_push (sfn);
++    }
+ 
+   current_function = sfn;
+ 
+@@ -5935,38 +6586,7 @@ ipa_struct_reorg::prune_escaped_types (void)
+   for (unsigned i = 0; i < functions.length ();)
+     {
+       srfunction *function = functions[i];
+-
+-      /* Prune function arguments of types that escape.  */
+-      for (unsigned j = 0; j < function->args.length ();)
+-	{
+-	  if (function->args[j]->type->has_escaped ())
+-	    function->args.ordered_remove (j);
+-	  else
+-	    j++;
+-	}
+-
+-      /* Prune global variables that the function uses of types
+-	 that escape.  */
+-      for (unsigned j = 0; j < function->globals.length ();)
+-	{
+-	  if (function->globals[j]->type->has_escaped ())
+-	    function->globals.ordered_remove (j);
+-	  else
+-	    j++;
+-	}
+-
+-      /* Prune variables that the function uses of types that escape.  */
+-      for (unsigned j = 0; j < function->decls.length ();)
+-	{
+-	  srdecl *decl = function->decls[j];
+-	  if (decl->type->has_escaped ())
+-	    {
+-	      function->decls.ordered_remove (j);
+-	      delete decl;
+-	    }
+-	  else
+-	    j++;
+-	}
++      prune_function (function);
+ 
+       /* Prune functions which don't refer to any variables any more.  */
+       if (function->args.is_empty ()
+@@ -5981,19 +6601,7 @@ ipa_struct_reorg::prune_escaped_types (void)
+ 	i++;
+     }
+ 
+-  /* Prune globals of types that escape, all references to those decls
+-     will have been removed in the first loop.  */
+-  for (unsigned j = 0; j < globals.decls.length ();)
+-    {
+-      srdecl *decl = globals.decls[j];
+-      if (decl->type->has_escaped ())
+-	{
+-	  globals.decls.ordered_remove (j);
+-	  delete decl;
+-	}
+-      else
+-	j++;
+-    }
++  prune_globals ();
+ 
+   /* Prune types that escape, all references to those types
+      will have been removed in the above loops.  */
+@@ -6026,6 +6634,62 @@ ipa_struct_reorg::prune_escaped_types (void)
+     }
+ }
+ 
++/* Prune the decls in function SRFN.  */
++
++void
++ipa_struct_reorg::prune_function (srfunction *srfn)
++{
++  /* Prune function arguments of types that escape.  */
++  for (unsigned i = 0; i < srfn->args.length ();)
++    {
++      if (srfn->args[i]->type->has_escaped ())
++	srfn->args.ordered_remove (i);
++      else
++	i++;
++    }
++
++  /* Prune global variables that the function uses of types that escape.  */
++  for (unsigned i = 0; i < srfn->globals.length ();)
++    {
++      if (srfn->globals[i]->type->has_escaped ())
++	srfn->globals.ordered_remove (i);
++      else
++	i++;
++    }
++
++  /* Prune variables that the function uses of types that escape.  */
++  for (unsigned i = 0; i < srfn->decls.length ();)
++    {
++      srdecl *decl = srfn->decls[i];
++      if (decl->type->has_escaped ())
++	{
++	  srfn->decls.ordered_remove (i);
++	  delete decl;
++	}
++      else
++	i++;
++    }
++}
++
++/* Prune globals of types that escape, all references to those decls
++   will have been removed in the first loop.  */
++
++void
++ipa_struct_reorg::prune_globals ()
++{
++  for (unsigned i = 0; i < globals.decls.length ();)
++    {
++      srdecl *decl = globals.decls[i];
++      if (decl->type->has_escaped ())
++	{
++	  globals.decls.ordered_remove (i);
++	  delete decl;
++	}
++      else
++	i++;
++    }
++}
++
+ /* Analyze all of the types.  */
+ 
+ void
+@@ -6242,7 +6906,6 @@ ipa_struct_reorg::create_new_functions (void)
+       bool anyargchanges = false;
+       cgraph_node *new_node;
+       cgraph_node *node = f->node;
+-      int newargs = 0;
+       if (f->old)
+ 	continue;
+ 
+@@ -6254,10 +6917,7 @@ ipa_struct_reorg::create_new_functions (void)
+ 	  srdecl *d = f->args[j];
+ 	  srtype *t = d->type;
+ 	  if (t->has_new_type ())
+-	    {
+-	      newargs += t->newtype[1] != NULL;
+-	      anyargchanges = true;
+-	    }
++	    anyargchanges = true;
+ 	}
+       if (!anyargchanges)
+ 	continue;
+@@ -6389,6 +7049,7 @@ ipa_struct_reorg::rewrite_expr (tree expr,
+ 	}
+       return true;
+     }
++  cur_srfd = f;
+ 
+   tree newdecl = newbase[f->clusternum];
+   for (unsigned i = 0; i < max_split && f->newfield[i]; i++)
+@@ -6959,8 +7620,6 @@ ipa_struct_reorg::decompress_candidate_without_check (gimple_stmt_iterator *gsi,
+ 						      tree &new_lhs,
+ 						      tree &new_rhs)
+ {
+-  imm_use_iterator imm_iter;
+-  use_operand_p use_p;
+   bool processed = false;
+ 
+   if (!gsi_one_before_end_p (*gsi))
+@@ -7695,10 +8354,13 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 
+       if (!rewrite_lhs_rhs (lhs, rhs1, newlhs, newrhs))
+ 	return false;
+-      tree size = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (lhs)));
+-      tree num;
++      tree struct_type = TREE_TYPE (TREE_TYPE (lhs));
++      tree size = TYPE_SIZE_UNIT (struct_type);
++      tree num = NULL_TREE;
+       /* Check if rhs2 is a multiplication of the size of the type.  */
+-      if (!is_result_of_mult (rhs2, &num, size)
++      if ((current_fc_level != fc_level::DYNAMIC
++	   || !POINTER_TYPE_P (struct_type))
++	  && !is_result_of_mult (rhs2, &num, size)
+ 	  && !(current_layout_opt_level & SEMI_RELAYOUT))
+ 	internal_error (
+ 	  "The rhs of pointer is not a multiplicate and it slips through");
+@@ -7836,6 +8498,7 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 	}
+       tree newlhs[max_split];
+       tree newrhs[max_split];
++      cur_srfd = NULL;
+       if (!rewrite_lhs_rhs (lhs, rhs, newlhs, newrhs))
+ 	{
+ 	  if (dump_file && (dump_flags & TDF_DETAILS))
+@@ -7853,6 +8516,9 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 	  if (current_layout_opt_level >= POINTER_COMPRESSION_SAFE)
+ 	    try_rewrite_with_pointer_compression (stmt, gsi, lhs, rhs,
+ 						  newlhs[i], newrhs[i]);
++	  if (current_fc_level == fc_level::DYNAMIC
++	      && cur_srfd && cur_srfd->dfc_type_change_p ())
++	    dynamic_fc_rewrite_assign (stmt, rhs, newlhs[i], newrhs[i]);
+ 	  remove = true;
+ 	  if (fields_copied)
+ 	    continue;
+@@ -7862,7 +8528,10 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+ 	  tree conv_rhs = build_convert_gimple (lhs_expr, rhs_expr, gsi);
+ 	  if (conv_rhs)
+ 	    rhs_expr = conv_rhs;
+-
++	  if (rhs_expr && get_gimple_rhs_class (TREE_CODE (rhs_expr))
++			  == GIMPLE_INVALID_RHS)
++	    rhs_expr = gimplify_build1 (gsi, NOP_EXPR, TREE_TYPE (rhs_expr),
++					rhs_expr);
+ 	  gimple *newstmt = gimple_build_assign (lhs_expr, rhs_expr);
+ 	  if (dump_file && (dump_flags & TDF_DETAILS))
+ 	    {
+@@ -7997,6 +8666,9 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi)
+       if (!decl || !decl->type)
+ 	return false;
+       srtype *type = decl->type;
++      if (type->has_escaped () || !type->has_new_type ())
++	return false;
++
+       tree num = allocate_size (type, decl, stmt);
+       gcc_assert (num);
+       memset (newrhs1, 0, sizeof (newrhs1));
+@@ -8114,9 +8786,9 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi)
+ 	{
+ 	  if (t && t->semi_relayout)
+ 	    newexpr[0] = get_real_allocated_ptr (newexpr[0], gsi);
+-	    gimple_call_set_arg (stmt, 0, newexpr[0]);
+-	    update_stmt (stmt);
+-	    return false;
++	  gimple_call_set_arg (stmt, 0, newexpr[0]);
++	  update_stmt (stmt);
++	  return false;
+ 	}
+ 
+       for (unsigned i = 0; i < max_split && newexpr[i]; i++)
+@@ -8142,6 +8814,7 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi)
+ 
+   /* Add a safe func mechanism.  */
+   if (current_layout_opt_level >= STRUCT_REORDER_FIELDS
++      && current_fc_level != fc_level::DYNAMIC
+       && f && f->is_safe_func)
+     {
+       tree expr = gimple_call_arg (stmt, 0);
+@@ -8160,9 +8833,35 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi)
+ 
+   /* Did not find the function or had not cloned it return saying don't
+      change the function call.  */
+-  if (!f || !f->newf)
++  if (!f)
+     return false;
+ 
++  if (current_fc_level == fc_level::DYNAMIC)
++    {
++      if (f->partial_clone_p ())
++	return false;
++      f = f->fc_path.cloned_func;
++    }
++  else
++    {
++      if (!f->newf)
++	return false;
++      /* Move over to the new function.  */
++      f = f->newf;
++    }
++
++  if (current_fc_level == fc_level::DYNAMIC && f->is_safe_func)
++    {
++      tree expr = gimple_call_arg (stmt, 0);
++      tree newexpr[max_split] = {NULL_TREE};
++      if (rewrite_expr (expr, newexpr) && newexpr[1] == NULL_TREE)
++	gimple_call_set_arg (stmt, 0, newexpr[0]);
++
++      gimple_call_set_fndecl (stmt, f->node->decl);
++      update_stmt (stmt);
++      return false;
++    }
++
+   if (dump_file && (dump_flags & TDF_DETAILS))
+     {
+       fprintf (dump_file, "Changing arguments for function call :\n");
+@@ -8170,9 +8869,6 @@ ipa_struct_reorg::rewrite_call (gcall *stmt, gimple_stmt_iterator *gsi)
+       fprintf (dump_file, "\n");
+     }
+ 
+-  /* Move over to the new function.  */
+-  f = f->newf;
+-
+   tree chain = gimple_call_chain (stmt);
+   unsigned nargs = gimple_call_num_args (stmt);
+   auto_vec vargs (nargs);
+@@ -8456,9 +9152,8 @@ ipa_struct_reorg::rewrite_functions (void)
+ 
+   if (flag_ipa_struct_sfc_shadow)
+     {
+-      for (unsigned i = 0; i < fc_infos.length (); i++)
++      for (auto *info : fc_infos)
+ 	{
+-	  fc_type_info *info = fc_infos[i];
+ 	  if (!info || !info->static_fc_p)
+ 	    continue;
+ 	  cleanup_shadow_write (info);
+@@ -8571,39 +9266,7 @@ ipa_struct_reorg::rewrite_functions (void)
+ 		   i, f->node->name ());
+ 	}
+       FOR_EACH_BB_FN (bb, cfun)
+-	{
+-	  for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);)
+-	    {
+-	      if (rewrite_phi (si.phi ()))
+-		si = gsi_start_phis (bb);
+-	      else
+-		gsi_next (&si);
+-	    }
+-
+-	  for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);)
+-	    {
+-	      gimple *stmt = gsi_stmt (si);
+-	      if (rewrite_stmt (stmt, &si))
+-		gsi_remove (&si, true);
+-	      else
+-		gsi_next (&si);
+-	    }
+-	}
+-
+-      /* Debug statements need to happen after all other statements
+-	 have changed.  */
+-      FOR_EACH_BB_FN (bb, cfun)
+-	{
+-	  for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);)
+-	    {
+-	      gimple *stmt = gsi_stmt (si);
+-	      if (gimple_code (stmt) == GIMPLE_DEBUG
+-		  && rewrite_debug (stmt, &si))
+-		gsi_remove (&si, true);
+-	      else
+-		gsi_next (&si);
+-	    }
+-	}
++	rewrite_block (bb);
+ 
+       /* Release the old SSA_NAMES for old arguments.  */
+       if (f->old)
+@@ -8659,6 +9322,39 @@ ipa_struct_reorg::rewrite_functions (void)
+   return retval | TODO_verify_all;
+ }
+ 
++void
++ipa_struct_reorg::rewrite_block (basic_block bb)
++{
++  for (gphi_iterator si = gsi_start_phis (bb); !gsi_end_p (si);)
++    {
++      if (rewrite_phi (si.phi ()))
++	si = gsi_start_phis (bb);
++      else
++	gsi_next (&si);
++    }
++
++  for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);)
++    {
++      gimple *stmt = gsi_stmt (si);
++      if (rewrite_stmt (stmt, &si))
++	gsi_remove (&si, true);
++      else
++	gsi_next (&si);
++    }
++
++  /* Debug statements need to happen after all other statements
++     have changed.  */
++  for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);)
++    {
++      gimple *stmt = gsi_stmt (si);
++      if (gimple_code (stmt) == GIMPLE_DEBUG
++	  && rewrite_debug (stmt, &si))
++	gsi_remove (&si, true);
++      else
++	gsi_next (&si);
++    }
++}
++
+ unsigned int
+ ipa_struct_reorg::execute_struct_relayout (void)
+ {
+@@ -8919,7 +9615,7 @@ ipa_struct_reorg::check_and_prune_struct_for_field_compression (void)
+       if (!find_field_compression_candidate (type))
+ 	continue;
+ 
+-      gcc_assert (type->fc_info->static_fc_p);
++      gcc_assert (type->fc_info->static_fc_p ^ type->fc_info->dynamic_fc_p);
+       if (dump_file)
+ 	{
+ 	  fprintf (dump_file, "[field compress] Found candidate: ");
+@@ -8948,13 +9644,19 @@ ipa_struct_reorg::find_field_compression_candidate (srtype *type)
+   /* Classify fields by field type firstly.  */
+   classify_fields (info);
+ 
+-  if (flag_ipa_struct_sfc)
++  if (current_fc_level == fc_level::STATIC)
+     {
+       FC_DUMP_MSG ("Looking for static fc fields\n");
+       info->static_fc_p = find_static_fc_fields (info);
+     }
+ 
+-  if (!info->static_fc_p)
++  if (current_fc_level == fc_level::DYNAMIC)
++    {
++      FC_DUMP_MSG ("Looking for dynamic fc fields\n");
++      info->dynamic_fc_p = find_dynamic_fc_fields (info);
++    }
++
++  if (!info->static_fc_p && !info->dynamic_fc_p)
+     {
+       FC_DUMP_MSG ("Fail finding field compression candidate\n");
+       return false;
+@@ -8963,6 +9665,8 @@ ipa_struct_reorg::find_field_compression_candidate (srtype *type)
+   if (!compress_fields (info))
+     {
+       FC_DUMP_MSG ("Fail compressing fields\n");
++      info->static_fc_p = false;
++      info->dynamic_fc_p = false;
+       return false;
+     }
+ 
+@@ -9012,7 +9716,7 @@ ipa_struct_reorg::find_static_fc_fields (fc_type_info *info)
+ 	continue;
+ 
+       /* We have marked these fields as shadow, so skip them.  */
+-      if (fc_fields_contains (info->static_fc_fields, srf->fielddecl))
++      if (find_fc_field (info->static_fc_fields, srf->fielddecl))
+ 	continue;
+ 
+       found_static_compress |= static_compress_p (info, srf->fielddecl);
+@@ -9041,16 +9745,21 @@ ipa_struct_reorg::find_static_fc_fields (fc_type_info *info)
+ bool
+ ipa_struct_reorg::compress_fields (fc_type_info *info)
+ {
+-  if (info->static_fc_p && !compress_fields_static (info))
+-    info->static_fc_p = false;
+-
+-  if (!info->static_fc_p)
+-    return false;
+-
+-  compress_to_bitfields (info);
++  gcc_assert (info->static_fc_p ^ info->dynamic_fc_p);
+ 
+-  return true;
+-}
++  if (info->static_fc_p)
++    {
++      return compress_fields_static (info)
++	     && compress_to_bitfield_static (info);
++    }
++  else
++    {
++      return compress_fields_dynamic (info)
++	     && compress_to_bitfield_dynamic (info)
++	     && calc_dynamic_boundary (info)
++	     && check_closure (info);
++    }
++}
+ 
+ /* Check if the type has any field that can be shadowed.  */
+ 
+@@ -9101,6 +9810,7 @@ ipa_struct_reorg::find_shadow_fields (fc_type_info *info,
+   /* Unpair assignment checking.  */
+   auto &srfields = field_class->srfields;
+   unsigned original_index = 0;
++  tree init_const = NULL_TREE;
+   if (shadow_info.unpair_stmt)
+     {
+       if (dump_file && (dump_flags & TDF_DETAILS))
+@@ -9117,6 +9827,19 @@ ipa_struct_reorg::find_shadow_fields (fc_type_info *info,
+ 	return false;
+     }
+ 
++  if (current_fc_level == fc_level::DYNAMIC)
++    {
++      if (!shadow_info.unpair_stmt)
++	return false;
++      /* We have proved that the unpair_stmt is single assign.  */
++      init_const = gimple_assign_rhs1 (shadow_info.unpair_stmt);
++      if (TREE_CODE (init_const) != INTEGER_CST)
++	return false;
++
++      if (!unique_init_const_p (shadow_info))
++	return false;
++    }
++
+   /* Add a new static fc_field.  */
+   srfield *original_srf = srfields[original_index];
+ 
+@@ -9128,7 +9851,10 @@ ipa_struct_reorg::find_shadow_fields (fc_type_info *info,
+ 	continue;
+ 
+       fc_field *fc_f = new fc_field (shadow_srf->fielddecl, 1, original_srf);
+-      info->static_fc_fields.safe_push (fc_f);
++      auto &fc_fields = current_fc_level == fc_level::STATIC
++		      ? info->static_fc_fields : info->dynamic_shadow_fields;
++      fc_fields.safe_push (fc_f);
++      fc_f->init_const = init_const; /* Not NULL only in dynamic.  */
+ 
+       /* Record all shadow stmts to fc_field.  */
+       unsigned j;
+@@ -9289,15 +10015,30 @@ ipa_struct_reorg::write_field_in_fc_class_p (gimple *stmt,
+   return field;
+ }
+ 
+-fc_field *
+-ipa_struct_reorg::fc_fields_contains (auto_vec &fc_fields,
+-				      tree field)
++/* Check if the init_const is a unique constant, which is different from all
++   constant rhs of pair statements.  */
++
++bool
++ipa_struct_reorg::unique_init_const_p (const fc_shadow_info &shadow_info)
+ {
+-  for (auto *fc_f : fc_fields)
+-    if (fc_f->field == field)
+-      return fc_f;
++  tree init_const = gimple_assign_rhs1 (shadow_info.unpair_stmt);
++  HOST_WIDE_INT value = tree_to_shwi (init_const);
++  for (auto *stmts : shadow_info.pair_stmts_groups)
++    {
++      /* We have prove all rhs in a group are equal, checking one of them
++	 is enough.  */
++      tree rhs = gimple_assign_rhs1 ((*stmts)[0]);
++      if (TREE_CODE (rhs) != INTEGER_CST)
++	continue;
+ 
+-  return NULL;
++      if (tree_to_shwi (rhs) == value)
++	{
++	  FC_DUMP_MSG ("Init const is not unique.\n");
++	  return false;
++	}
++    }
++
++  return true;
+ }
+ 
+ /* Check if the right operands of all assignments are equal.  */
+@@ -9363,12 +10104,6 @@ ipa_struct_reorg::fc_operand_equal_p (tree var1, tree var2)
+ bool
+ ipa_struct_reorg::fc_global_const_p (tree var, HOST_WIDE_INT &value)
+ {
+-  srtype *type;
+-  srfield *field;
+-  tree base;
+-  if (!get_base_type (var, base, type, field) || type->has_escaped ())
+-    return false;
+-
+   const_map *cm = find_global_const (var);
+   if (cm)
+     {
+@@ -9376,33 +10111,44 @@ ipa_struct_reorg::fc_global_const_p (tree var, HOST_WIDE_INT &value)
+       return true;
+     }
+ 
++  if (visited_vars.contains (var))
++    return false;
++  visited_vars.add (var);
++
+   bool is_const = false;
+   HOST_WIDE_INT const_value = 0;
+-  for (auto *access : type->accesses)
++  for (auto *srfn : functions)
+     {
+-      SET_CFUN (access->function);
+-
+-      gimple *stmt = access->stmt;
+-      if (!gimple_assign_single_p (stmt)
+-	  || !operand_equal_p (gimple_assign_lhs (stmt), var))
+-	continue;
++      SET_CFUN (srfn);
++      basic_block bb = NULL;
++      FOR_EACH_BB_FN (bb, cfun)
++	{
++	  for (gimple_stmt_iterator si = gsi_start_bb (bb); !gsi_end_p (si);
++	       gsi_next (&si))
++	    {
++	      gimple *stmt = gsi_stmt (si);
++	      if (!gimple_assign_single_p (stmt)
++		  || !operand_equal_p (gimple_assign_lhs (stmt), var))
++		continue;
+ 
+-      if (!fc_peephole_const_p (gimple_assign_rhs1 (stmt), value))
+-	return false;
++	      if (!fc_peephole_const_p (gimple_assign_rhs1 (stmt), value))
++		return false;
+ 
+-      /* Make sure the value is never changed.  */
+-      if (is_const)
+-	{
+-	  if (value != const_value)
+-	    return false;
+-	  continue;
+-	}
++	      /* Make sure the value is never changed.  */
++	      if (is_const)
++		{
++		  if (value != const_value)
++		    return false;
++		  continue;
++		}
+ 
+-      is_const = true;
+-      const_value = value;
++	      is_const = true;
++	      const_value = value;
+ 
+-      /* Record a global constant here.  */
+-      global_consts.safe_push (new const_map (var, value));
++	      /* Record a global constant here.  */
++	      global_consts.safe_push (new const_map (var, value));
++	    }
++	}
+     }
+ 
+   return is_const;
+@@ -9785,7 +10531,7 @@ ipa_struct_reorg::struct_copy_p (gimple *stmt, tree type)
+ {
+   if (!gimple_assign_single_p (stmt)
+       || TREE_TYPE (gimple_assign_lhs (stmt)) != type
+-      || !types_compatible_p (TREE_TYPE (gimple_assign_rhs1 (stmt)), type))
++      || !types_fc_compatible_p (TREE_TYPE (gimple_assign_rhs1 (stmt)), type))
+     return false;
+ 
+   if (dump_file && (dump_flags & TDF_DETAILS))
+@@ -9815,8 +10561,7 @@ ipa_struct_reorg::find_hot_access (fc_type_info *info,
+       SET_CFUN (access->function);
+ 
+       basic_block bb = access->stmt->bb;
+-      if (!bb->loop_father->num
+-	  || !access->write_type_p (type->type))
++      if (!bb->loop_father->num || !access->write_p ())
+ 	continue;
+ 
+       /* Case (1).  */
+@@ -9828,7 +10573,7 @@ ipa_struct_reorg::find_hot_access (fc_type_info *info,
+ 	continue;
+ 
+       tree fielddecl = access->field->fielddecl;
+-      if (!fielddecl || !fc_fields_contains (fc_fields, fielddecl))
++      if (!fielddecl || !find_fc_field (fc_fields, fielddecl))
+ 	continue;
+ 
+       auto &set = write_map.get_or_insert (bb);
+@@ -9859,8 +10604,8 @@ ipa_struct_reorg::cleanup_shadow_write (fc_type_info *info)
+ 	{
+ 	  SET_CFUN (fc_f->shadow_stmts_func[i]);
+ 	  gcc_assert (gimple_assign_single_p (stmt));
+-	  gimple_assign_set_rhs1 (
+-	    stmt, build_int_cst (TREE_TYPE (fc_f->field), 1));
++	  tree newrhs = build_int_cst (TREE_TYPE (fc_f->field), 1);
++	  gimple_assign_set_rhs1 (stmt, newrhs);
+ 	  update_stmt (stmt);
+ 	}
+     }
+@@ -9882,7 +10627,7 @@ ipa_struct_reorg::rewrite_shadow_read (fc_type_info *info)
+ 	    continue;
+ 
+ 	  SET_CFUN (access->function);
+-	  insert_shadow_stmt (access->stmt, access->index,
++	  modify_shadow_read (access->stmt, access->index,
+ 			      fc_f, access->base);
+ 	}
+     }
+@@ -9891,14 +10636,16 @@ ipa_struct_reorg::rewrite_shadow_read (fc_type_info *info)
+ /* Insert the followings for shadow data read before STMT.
+    The IDX operand is the shadow data.
+ 
+-   * For static: (shadow_field == true) ? original_field : 0 */
++   * For static:  (shadow_field == true) ? original_field : 0
++   * For dynamic: (original_field != init_const) ? original_field : 0
++ */
+ 
+ void
+-ipa_struct_reorg::insert_shadow_stmt (gimple *stmt, unsigned idx,
+-				      fc_field *fc_field, tree base)
++ipa_struct_reorg::modify_shadow_read (gimple *stmt, unsigned idx,
++				      fc_field *field, tree base)
+ {
+   tree shadow = gimple_op (stmt, idx);
+-  tree original = build_field_ref (base, fc_field->original->fielddecl);
++  tree original = build_field_ref (base, field->original->fielddecl);
+ 
+   /* Insert new stmt immediately before stmt.  */
+   gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
+@@ -9909,15 +10656,26 @@ ipa_struct_reorg::insert_shadow_stmt (gimple *stmt, unsigned idx,
+   gsi_insert_before (&gsi, original_stmt, GSI_SAME_STMT);
+   update_stmt (original_stmt);
+ 
+-  /* shadow_ssa = shadow */
+-  tree shadow_ssa = make_temp_ssa_name (TREE_TYPE (shadow), NULL, "");
+-  gimple *shadow_stmt = gimple_build_assign (shadow_ssa, shadow);
+-  gsi_insert_before (&gsi, shadow_stmt, GSI_SAME_STMT);
+-  update_stmt (shadow_stmt);
++  tree cond = NULL_TREE;
++  if (current_fc_level == fc_level::DYNAMIC)
++    {
++      field->original->get_closure ()->add_read_change (original_stmt);
++      /* new_shadow_ssa = (original_ssa != init_const ? original_ssa : 0) */
++      cond = fold_build2 (NE_EXPR, boolean_type_node, original_ssa,
++			  field->init_const);
++    }
++  else
++    {
++      /* shadow_ssa = shadow */
++      tree shadow_ssa = make_temp_ssa_name (TREE_TYPE (shadow), NULL, "");
++      gimple *shadow_stmt = gimple_build_assign (shadow_ssa, shadow);
++      gsi_insert_before (&gsi, shadow_stmt, GSI_SAME_STMT);
++      update_stmt (shadow_stmt);
+ 
+-  /* new_shadow_ssa = (shadow_ssa == true ? original_ssa : 0) */
+-  tree cond = fold_build2 (EQ_EXPR, boolean_type_node, shadow_ssa,
+-			   build_int_cst (TREE_TYPE (shadow), 1));
++      /* new_shadow_ssa = (shadow_ssa == true ? original_ssa : 0) */
++      cond = fold_build2 (EQ_EXPR, boolean_type_node, shadow_ssa,
++			  build_int_cst (TREE_TYPE (shadow), 1));
++    }
+ 
+   tree new_shadow = build_cond_expr (cond, original_ssa,
+ 				     build_int_cst (TREE_TYPE (shadow), 0));
+@@ -9970,34 +10728,97 @@ ipa_struct_reorg::compress_fields_static (fc_type_info *info)
+ 
+ /* Compress fields to bitfield, for which bits will be the width.  */
+ 
+-void
+-ipa_struct_reorg::compress_to_bitfields (fc_type_info *info)
++bool
++ipa_struct_reorg::compress_to_bitfield_static (fc_type_info *info)
+ {
+-  /* For static compression.  Calculate bitsize for static field.  */
+-  if (flag_ipa_struct_sfc_bitfield && info->static_fc_p)
++  if (!flag_ipa_struct_sfc_bitfield)
++    return true;
++
++  for (auto *fc_f : info->static_fc_fields)
+     {
+-      for (auto *fc_f : info->static_fc_fields)
++      HOST_WIDE_INT max_value = fc_f->max_value;
++      gcc_assert (max_value > 0 && max_value <= UINT_MAX);
++
++      /* Calculate bitsize.  */
++      fc_f->bits = 0;
++      while (max_value)
+ 	{
+-	  HOST_WIDE_INT max_value = fc_f->max_value;
+-	  gcc_assert (max_value > 0 && max_value <= UINT_MAX);
++	  fc_f->bits++;
++	  max_value >>= 1;
++	}
+ 
+-	  /* Calculate bitsize.  */
+-	  fc_f->bits = 0;
+-	  while (max_value)
+-	    {
+-	      fc_f->bits++;
+-	      max_value >>= 1;
+-	    }
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Static bitfield: ");
++	  print_generic_expr (dump_file, fc_f->field);
++	  fprintf (dump_file, ":%d", fc_f->bits);
++	  fprintf (dump_file, "\n");
++	}
++    }
+ 
+-	  if (dump_file && (dump_flags & TDF_DETAILS))
+-	    {
+-	      FC_DUMP_MSG ("Bitfield: ");
+-	      print_generic_expr (dump_file, fc_f->field);
+-	      fprintf (dump_file, ":%d", fc_f->bits);
+-	      fprintf (dump_file, "\n");
+-	    }
++  return true;
++}
++
++/* Compress fields to bitfield for dynamic field compression.  */
++
++bool
++ipa_struct_reorg::compress_to_bitfield_dynamic (fc_type_info *info)
++{
++  if (!flag_ipa_struct_dfc_bitfield)
++    return true;
++
++  calc_fc_ref_count (info);
++
++  /* Collect existing bitfields.  */
++  unsigned total_static_bits = 0;
++  for (auto *srf : info->type->fields)
++    {
++      tree field = srf->fielddecl;
++      if (DECL_BIT_FIELD (field))
++	total_static_bits += tree_to_uhwi (DECL_SIZE (field));
++    }
++
++  unsigned max_ref_cnt = 0;
++  fc_field *max_f = NULL;
++  fc_cond *max_cond = NULL;
++  for (auto *cond : info->fc_conds)
++    {
++      /* Heuristically, only try bit field for big data size.  */
++      if (TYPE_MAIN_VARIANT (cond->old_type) != long_integer_type_node)
++	continue;
++
++      /* Find the hottest field.  */
++      for (auto *fc_f : cond->fields)
++	{
++	  if (fc_f->ref_cnt <= max_ref_cnt)
++	    continue;
++
++	  max_ref_cnt = fc_f->ref_cnt;
++	  max_f = fc_f;
++	  max_cond = cond;
+ 	}
+     }
++
++  /* Choose the hottest candidate to try bitfield.  */
++  unsigned new_type_bits = TYPE_PRECISION (max_f->new_type);
++  if (new_type_bits <= total_static_bits)
++    return false;
++
++  /* The fc condition covering this field is marked as bitfield,
++     although not all of the fields for this condition are marked as
++     bitfield.  */
++  max_f->bits = new_type_bits - total_static_bits;
++  max_cond->bits = max_f->bits;
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      FC_DUMP_MSG ("Dynamic bitfield: ");
++      print_generic_expr (dump_file, max_f->field);
++      fprintf (dump_file, ":%d", max_f->bits);
++      fprintf (dump_file, "\n");
++    }
++
++  return true;
+ }
+ 
+ /* Collect all blocks that can reach stmt.  */
+@@ -10031,127 +10852,2930 @@ ipa_struct_reorg::collect_all_predecessor (gimple *stmt)
+   return blocks;
+ }
+ 
+-/* Init pointer size from parameter param_pointer_compression_size.  */
+-
+-static void
+-init_pointer_size_for_pointer_compression (void)
++bool
++ipa_struct_reorg::types_fc_equal_p (tree type1, tree type2)
+ {
+-  switch (param_pointer_compression_size)
++  if (type1 == type2)
++    return true;
++  if (TREE_CODE (type1) != TREE_CODE (type2))
++    return false;
++
++  const char *tname1;
++  const char *tname2;
++  size_t len1;
++  size_t len2;
++  const char *p;
++
++  switch (TREE_CODE (type1))
+     {
+-      case 8:
+-      // FALLTHRU
+-      case 16:
+-      // FALLTHRU
+-      case 32: compressed_size = param_pointer_compression_size; break;
++      case POINTER_TYPE:
++	return types_fc_equal_p (inner_type (type1), inner_type (type2));
++
++      case RECORD_TYPE:
++	tname1 = get_type_name (type1);
++	tname2 = get_type_name (type2);
++	if (!tname1 || !tname2)
++	  return false;
++
++	len1 = strlen (tname1);
++	len2 = strlen (tname2);
++	if (len1 > len2)
++	  {
++	    std::swap (len1, len2);
++	    std::swap (tname1, tname2);
++	  }
++
++	p = strstr (tname2, tname1);
++	if (!p)
++	  return false;
++	p += len1;
++
++	/* As suffixes with '.' are generated by compiler, should be safe to
++	   skip the rest of p.  */
++	return STRING_STARTS_WITH (p, ".reorg");
++
+       default:
+-	error ("Invalid pointer compression size, using the following param: "
+-	       "\"--param compressed-pointer-size=[8,16,32]\"");
++	return false;
+     }
+ }
+ 
+-unsigned int
+-ipa_struct_reorg::execute (unsigned int opt)
++bool
++ipa_struct_reorg::types_fc_compatible_p (tree type1, tree type2)
+ {
+-  unsigned int ret = 0;
++  return types_compatible_p (type1, type2) || types_fc_equal_p (type1, type2);
++}
+ 
+-  if (dump_file)
+-    fprintf (dump_file, "\n\n====== ipa_struct_reorg level %d ======\n\n",
+-	     opt);
++/* Scan all of fields to check whether each can be dynamically
++   compressed or not.  */
+ 
+-  if (opt != COMPLETE_STRUCT_RELAYOUT)
++bool
++ipa_struct_reorg::find_dynamic_fc_fields (fc_type_info *info)
++{
++  if (flag_ipa_struct_dfc_shadow)
++    find_shadow_fields (info);
++
++  if (!find_fields_in_input_stmt (info))
+     {
+-      current_layout_opt_level = opt;
+-      /* If there is a top-level inline-asm,
+-	 the pass immediately returns.  */
+-      if (symtab->first_asm_symbol ())
+-	return 0;
+-      record_accesses ();
+-      prune_escaped_types ();
+-      if (current_layout_opt_level == STRUCT_SPLIT)
+-	analyze_types ();
++      FC_DUMP_MSG ("Fail finding fields in input stmt\n");
++      return false;
++    }
+ 
+-      if (opt >= POINTER_COMPRESSION_SAFE)
+-	check_and_prune_struct_for_pointer_compression ();
+-      if (opt >= SEMI_RELAYOUT)
+-	check_and_prune_struct_for_semi_relayout ();
+-      /* Avoid doing static field compression in STRUCT_SPLIT.  */
+-      if (opt >= STRUCT_REORDER_FIELDS && flag_ipa_struct_sfc)
+-	check_and_prune_struct_for_field_compression ();
+-      ret = rewrite_functions ();
++  if (!find_fopen_fclose (info))
++    {
++      FC_DUMP_MSG ("Fail finding fopen/fclose stmt\n");
++      return false;
+     }
+-  else
++
++  /* Avoid compressing fields without hot access.  */
++  if (!find_hot_access (info, info->dynamic_fc_fields))
+     {
+-      if (dump_file)
+-	fprintf (dump_file, "\n\nTry Complete Struct Relayout:\n");
+-      current_layout_opt_level = COMPLETE_STRUCT_RELAYOUT;
+-      if (symtab->first_asm_symbol ())
+-	return 0;
+-      record_accesses ();
+-      prune_escaped_types ();
++      FC_DUMP_MSG ("Fail finding hot access for dynamic\n");
++      return false;
++    }
+ 
+-      ret = execute_struct_relayout ();
++  if (!check_dynamic_shadow_fields (info))
++    {
++      FC_DUMP_MSG ("Fail checking dynamic shadow fields\n");
++      return false;
+     }
+ 
+-  return ret;
++  if (!find_fc_paths (info))
++    {
++      FC_DUMP_MSG ("Fail finding fc paths\n");
++      return false;
++    }
++
++  if (!find_fc_data (info))
++    {
++      FC_DUMP_MSG ("Fail finding fc data\n");
++      return false;
++    }
++
++  return true;
+ }
+ 
+-const pass_data pass_data_ipa_struct_reorg =
+-{
+-  SIMPLE_IPA_PASS, // type
+-  "struct_reorg",  // name
+-  OPTGROUP_NONE,   // optinfo_flags
+-  TV_IPA_STRUCT_REORG, // tv_id
+-  0, // properties_required
+-  0, // properties_provided
+-  0, // properties_destroyed
+-  0, // todo_flags_start
+-  0, // todo_flags_finish
+-};
++/* Find the stmt that read data from a file, the fields that can be affected
++   by the input data will be treated as the dynamic field compression
++   candidate fields.  */
+ 
+-class pass_ipa_struct_reorg : public simple_ipa_opt_pass
++bool
++ipa_struct_reorg::find_fields_in_input_stmt (fc_type_info *info)
+ {
+-public:
+-  pass_ipa_struct_reorg (gcc::context *ctxt)
+-    : simple_ipa_opt_pass (pass_data_ipa_struct_reorg, ctxt)
+-  {}
++  basic_block input_bb = NULL;
+ 
+-  /* opt_pass methods: */
+-  virtual bool gate (function *);
+-  virtual unsigned int execute (function *)
+-  {
+-    unsigned int ret = 0;
+-    unsigned int ret_reorg = 0;
+-    unsigned int level = 0;
+-    switch (struct_layout_optimize_level)
+-      {
+-	case 6: level |= SEMI_RELAYOUT;
+-	// FALLTHRU
+-	case 5: level |= POINTER_COMPRESSION_UNSAFE;
+-	// FALLTHRU
+-	case 4: level |= POINTER_COMPRESSION_SAFE;
+-	// FALLTHRU
+-	case 3: level |= DEAD_FIELD_ELIMINATION;
+-	// FALLTHRU
+-	case 2: level |= STRUCT_REORDER_FIELDS;
+-	// FALLTHRU
+-	case 1:
+-	  level |= COMPLETE_STRUCT_RELAYOUT;
+-	  level |= STRUCT_SPLIT;
+-	  break;
+-	case 0: break;
+-	default: gcc_unreachable ();
+-      }
++  for (auto *access : info->type->accesses)
++    {
++      if (!access->write_field_p ())
++	continue;
+ 
+-    if (level & POINTER_COMPRESSION_SAFE)
+-      init_pointer_size_for_pointer_compression ();
++      srfield *field = access->field;
++      if (find_fc_field (info->dynamic_shadow_fields, field->fielddecl)
++	  || find_fc_field (info->dynamic_fc_fields, field->fielddecl))
++	continue;
+ 
+-    if (level & SEMI_RELAYOUT)
+-      {
+-	semi_relayout_align = semi_relayout_level;
+-	relayout_part_size = 1 << semi_relayout_level;
+-      }
++      /* Skip dead field.  */
++      if (field->dead_field_p ())
++	continue;
+ 
+-    /* Preserved for backward compatibility, reorder fields needs run before
++      if (TREE_CODE (field->fieldtype) != INTEGER_TYPE)
++	continue;
++
++      SET_CFUN (access->function);
++      /* Guarantee this struct field is from a file.  */
++      gimple *input_stmt = NULL;
++      gimple *var_stmt = NULL;
++      if (!find_input_stmt (access->stmt, input_stmt, var_stmt)
++	  || gimple_bb (input_stmt)->loop_father->num == 0)
++	continue;
++
++      tree var = gimple_assign_rhs1 (var_stmt);
++
++      if (!info->input_stmt)
++	{
++	  info->input_stmt = input_stmt;
++	  info->input_var = access->base;
++	  info->input_file_handler = find_file_handler (input_stmt);
++	  if (!info->input_file_handler)
++	    return false;
++	}
++
++      /* Support only one input stmt now.  */
++      if (info->input_stmt != input_stmt
++	  || info->input_var != access->base)
++	return false;
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Found a dynamic compression field: ");
++	  print_generic_expr (dump_file, field->fielddecl);
++	  fprintf (dump_file, ", input var: ");
++	  print_generic_expr (dump_file, var);
++	  fprintf (dump_file, "\n");
++	}
++
++      tree in_ssa = gimple_assign_rhs1 (access->stmt);
++      fc_field *fc_f = new fc_field (field->fielddecl, var, in_ssa);
++      info->dynamic_fc_fields.safe_push (fc_f);
++
++      /* All fc fields should define their ssas in the same block.  */
++      if (!input_bb)
++	input_bb = gimple_bb (SSA_NAME_DEF_STMT (in_ssa));
++      else if (input_bb != gimple_bb (SSA_NAME_DEF_STMT (in_ssa)))
++	return false;
++
++      info->start_srfn = access->function;
++    }
++
++  if (info->dynamic_fc_fields.is_empty ())
++    return false;
++
++  /* Sort all fields in the order of input ssa position.  This is required
++     to simplify the min_val and max_val calculation.  */
++  SET_CFUN (info->start_srfn);
++  renumber_gimple_stmt_uids_in_blocks (&input_bb, 1);
++  info->dynamic_fc_fields.qsort (input_order_cmp);
++
++  return true;
++}
++
++/* Find the input stmt for the rhs of the given stmt.
++   Now we only support sscanf.  */
++
++bool
++ipa_struct_reorg::find_input_stmt (gimple *stmt, gimple *&input_stmt,
++				   gimple *&var_stmt)
++{
++  /* Check pattern fc_type->field = _ssa_name.  */
++  if (!gimple_assign_single_p (stmt)
++      || TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME)
++    return false;
++
++  stmt = strip_copy_stmts (SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)));
++  /* Check pattern _ssa_name = var.  */
++  if (!gimple_assign_single_p (stmt)
++      || !VAR_P (gimple_assign_rhs1 (stmt)))
++    return false;
++
++  var_stmt = stmt;
++  tree var = gimple_assign_rhs1 (stmt);
++
++  /* Search backward to find a sscanf stmt.  */
++  while (gimple_bb (stmt))
++    {
++      tree vuse = gimple_vuse (stmt);
++      if (!vuse)
++	break;
++
++      stmt = SSA_NAME_DEF_STMT (vuse);
++      if (!gimple_call_builtin_p (stmt, BUILT_IN_SSCANF))
++	continue;
++
++      /* Search '&var' from the 3th arguments.  */
++      for (unsigned i = 2; i < gimple_call_num_args (stmt); i++)
++	{
++	  tree arg = gimple_call_arg (stmt, i);
++	  if (TREE_CODE (arg) != ADDR_EXPR
++	      || TREE_OPERAND (arg, 0) != var)
++	    continue;
++
++	  input_stmt = stmt;
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      FC_DUMP_MSG ("Found input stmt: ");
++	      print_gimple_stmt (dump_file, stmt, 0);
++	    }
++	  return true;
++	}
++
++      /* The sscanf stmt doesn't contain 'var', so the check failed.  */
++      break;
++    }
++
++  return false;
++}
++
++/* Find the file handler, which holds the file from which the given stmt
++   read data.
++   Now only support sscanf.  */
++
++tree
++ipa_struct_reorg::find_file_handler (gimple *input_stmt)
++{
++  if (!gimple_call_builtin_p (input_stmt, BUILT_IN_SSCANF))
++    return NULL_TREE;
++
++  /* Find fgets stmt.  */
++  gimple *stmt = SSA_NAME_DEF_STMT (gimple_vuse (input_stmt));
++  if (gimple_code (stmt) != GIMPLE_CALL)
++    return NULL_TREE;
++
++  tree callee = gimple_call_fn (stmt);
++  if (callee && TREE_CODE (callee) == OBJ_TYPE_REF)
++    return NULL_TREE;
++
++  callee = gimple_call_fndecl (stmt);
++  const char *fn_name = get_func_name (callee);
++  if (!fn_name || strcmp (fn_name, "fgets") != 0)
++    return NULL_TREE;
++
++  /* Check fget is using the string for sscanf.  */
++  tree fget_arg0 = gimple_call_arg (stmt, 0);
++  tree sscanf_arg0 = gimple_call_arg (input_stmt, 0);
++  if (TREE_OPERAND (fget_arg0, 0) != TREE_OPERAND (sscanf_arg0, 0))
++    return NULL_TREE;
++
++  return gimple_call_arg (stmt, 2);
++}
++
++/* Find fclose in start function.  */
++
++bool
++ipa_struct_reorg::find_fopen_fclose (fc_type_info *info)
++{
++  SET_CFUN (info->start_srfn);
++
++  basic_block bb;
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      for (auto si = gsi_start_bb (bb); !gsi_end_p (si); gsi_next (&si))
++	{
++	  gimple *stmt = gsi_stmt (si);
++	  if (!is_gimple_call (stmt))
++	    continue;
++
++	  tree decl = gimple_call_fndecl (stmt);
++	  const char *callee = get_func_name (decl);
++	  if (!callee || strcmp (callee, "fclose") != 0)
++	    continue;
++
++	  /* The fclose must use the same file handler as the fget,
++	     which reads data for sscanf.  */
++	  tree fh = gimple_call_arg (stmt, 0);
++	  if (fh != info->input_file_handler)
++	    continue;
++
++	  info->fclose_stmt = stmt;
++	  renumber_gimple_stmt_uids_in_blocks (&bb, 1);
++
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      fprintf (dump_file, "\nFound fclose in function %s:\n",
++		       get_func_name (info->start_srfn->node->decl));
++	      print_gimple_stmt (dump_file, stmt, 0);
++	    }
++
++	  return true;
++	}
++    }
++
++  return false;
++}
++
++/* Check whether the original of a dynamic shadow field is one of
++   dynamic_fc_fields.  */
++
++bool
++ipa_struct_reorg::check_dynamic_shadow_fields (fc_type_info *info)
++{
++  for (auto *shadow_field : info->dynamic_shadow_fields)
++    {
++      srfield *original = shadow_field->original;
++      fc_field *input_field = find_fc_field (info->dynamic_fc_fields,
++					     original->fielddecl);
++      if (!input_field)
++	return false;
++
++      shadow_field->input_field = input_field;
++      shadow_field->input_ssa = input_field->input_ssa;
++    }
++
++  return true;
++}
++
++bool
++ipa_struct_reorg::find_fc_paths (fc_type_info *info)
++{
++  /* Start point function.  */
++  srfunction *srfn = info->start_srfn;
++  gimple *start_stmt = info->fclose_stmt;
++
++  while (srfn)
++    {
++      /* Already seen.  */
++      if (srfn->fc_path.start_stmt)
++	return false;
++
++      SET_CFUN (srfn);
++
++      srfn->fc_path.start_stmt = start_stmt;
++      if (!srfn->fc_path.collect_blocks (start_stmt, fc_path_info::PRED)
++	  || !srfn->fc_path.collect_blocks (start_stmt, fc_path_info::SUCC))
++	return false;
++
++      /* Start at the entry function.  */
++      if (srfn->entry_function_p ())
++	return true;
++
++      /* The current function should only be called once.  */
++      cgraph_edge *edge = srfn->node->callers;
++      if (!edge || edge->next_caller || !edge->call_stmt)
++	return false;
++
++      srfn = find_function (edge->caller);
++      start_stmt = edge->call_stmt;
++    }
++
++  return false;
++}
++
++bool
++ipa_struct_reorg::find_fc_data (fc_type_info *info)
++{
++  if (!find_fc_arrays (info))
++    {
++      FC_DUMP_MSG ("Fail finding fc arrays\n");
++      return false;
++    }
++
++  if (!check_fc_array_uses (info))
++    {
++      FC_DUMP_MSG ("Fail checking fc array uses\n");
++      return false;
++    }
++
++  if (!find_fc_refs (info))
++    {
++      FC_DUMP_MSG ("Fail finding fc refs\n");
++      return false;
++    }
++
++  return true;
++}
++
++/* Find all arrays to be cached:
++   1. Defined by malloc/calloc (before start-point)
++   2. Will be used after fclose (e.g. global variables)  */
++
++bool
++ipa_struct_reorg::find_fc_arrays (fc_type_info *info)
++{
++  varpool_node *vnode;
++  tree type = info->type->type;
++
++  /* 1) Process all global vars, search for arrays of cached objects.  */
++  FOR_EACH_VARIABLE (vnode)
++    {
++      tree node = vnode->decl;
++      tree node_type = TREE_TYPE (node);
++      /* Global object is not supported.  */
++      if (types_fc_compatible_p (node_type, type))
++	return false;
++
++      switch (TREE_CODE (node_type))
++	{
++	  /* POINTER->RECORD(fc_type) */
++	  case POINTER_TYPE:
++	    if (types_fc_compatible_p (TREE_TYPE (node_type), type)
++		&& !find_fc_array (info, node, vnode))
++	      return false;
++	    break;
++	  /* RECORD->POINTER->RECORD(fc_type) */
++	  case RECORD_TYPE:
++	    for (tree t = TYPE_FIELDS (node_type); t; t = DECL_CHAIN (t))
++	      {
++		if (TREE_CODE (t) != FIELD_DECL)
++		  continue;
++
++		tree field_type = TREE_TYPE (t);
++		if (TREE_CODE (field_type) == RECORD_TYPE)
++		  {
++		    FC_DUMP_MSG ("RECORD->RECORD->... not supported\n");
++		    return false;
++		  }
++
++		if (POINTER_TYPE_P (field_type)
++		    && types_fc_compatible_p (TREE_TYPE (field_type), type))
++		  {
++		    tree field_node = build3 (COMPONENT_REF, field_type, node,
++					      t, NULL_TREE);
++		    if (!find_fc_array (info, field_node, vnode))
++		      return false;
++		  }
++		/* More safe: trace-back following VDEF/VUSE.  */
++	      }
++	    break;
++	  case ARRAY_TYPE:
++	  case UNION_TYPE:
++	  case QUAL_UNION_TYPE:
++	    if (dump_file && (dump_flags & TDF_DETAILS))
++	      {
++		FC_DUMP_MSG ("node_type not handled: ");
++		print_generic_expr (dump_file, node_type);
++		fprintf (dump_file, "\n");
++	      }
++	    return false;
++	  default:
++	    break;
++	}
++    }
++
++  return !info->fc_arrays.is_empty ();
++}
++
++/* Check if node is a array to be cached, if so, record it.
++   vnode contains referrring to node.  */
++
++bool
++ipa_struct_reorg::find_fc_array (fc_type_info *info, tree node,
++				 varpool_node *vnode)
++{
++  tree size_expr = NULL_TREE;
++  tree ssa_def = NULL_TREE;
++
++  ipa_ref *ref = NULL;
++  for (unsigned i = 0; vnode->iterate_referring (i, ref); i++)
++    {
++      /* Filter for writes to NODE.  */
++      if (ref->use != IPA_REF_STORE)
++	continue;
++      /* Ignore assignments after start-point.  */
++      if (!is_stmt_before_fclose (info, ref->stmt, ref->referring))
++	continue;
++      tree lhs = gimple_get_lhs (ref->stmt);
++      if (!operand_equal_p (lhs, node, COMPARE_DECL_FLAGS))
++	continue;
++
++      tree new_size_expr = NULL_TREE;
++      if (!get_allocate_size_iterate (info->type->type, ref->stmt,
++				      new_size_expr, &ssa_def))
++	return false;
++
++      if (new_size_expr)
++	{
++	  if (size_expr)
++	    {
++	      FC_DUMP_MSG ("fc_array allocated twice before start-point\n");
++	      return false;
++	    }
++	  size_expr = new_size_expr;
++
++	  /* Allocation must happen at start function.  */
++	  if (ref->referring != info->start_srfn->node)
++	    return false;
++
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      FC_DUMP_MSG ("Add array: ");
++	      print_generic_expr (dump_file, node);
++	      fprintf (dump_file, ", size: ");
++	      print_generic_expr (dump_file, size_expr);
++	      fprintf (dump_file, ", ssa_def: ");
++	      print_generic_expr (dump_file, ssa_def);
++	      fprintf (dump_file, "\n");
++	    }
++	}
++    }
++
++  if (size_expr)
++    {
++      if (duplicative_array_p (info, ssa_def))
++	return false;
++
++      fc_array *array = new fc_array (node, size_expr, ssa_def, vnode);
++      info->fc_arrays.safe_push (array);
++    }
++
++  return true;
++}
++
++/* Give the SSA_DEF of a array, check if it's duplicative.  */
++
++bool
++ipa_struct_reorg::duplicative_array_p (fc_type_info *info, tree ssa_def)
++{
++  for (auto *array : info->fc_arrays)
++    {
++      if (array->ssa_def != ssa_def)
++	continue;
++
++      FC_DUMP_MSG ("Array assigned to multiple variable\n");
++      return true;
++    }
++
++  return false;
++}
++
++bool
++ipa_struct_reorg::is_stmt_before_fclose (fc_type_info *info, gimple *stmt,
++					 symtab_node *node)
++{
++  gcc_assert (info->fclose_stmt);
++  srfunction *f = find_function (as_a (node));
++  gcc_assert (f);
++
++  if (gimple_bb (stmt) == gimple_bb (info->fclose_stmt))
++    return gimple_uid (stmt) < gimple_uid (info->fclose_stmt);
++
++  /* If array allocations are outside start-point's function, we may need to
++     create global vars to record the sizes.  */
++  return f->fc_path.pre_bbs.contains (gimple_bb (stmt));
++}
++
++/* Check if the VAR is a global pointer created by reorg.  */
++
++bool
++ipa_struct_reorg::reorg_ptr_p (tree var)
++{
++  if (TREE_CODE (var) != VAR_DECL)
++    return false;
++
++  const char *decl_name = IDENTIFIER_POINTER (DECL_NAME (var));
++  if (!decl_name)
++    return false;
++
++  const char *reorg_name = strstr (decl_name, ".reorg");
++  if (!reorg_name)
++    return false;
++
++  return strstr (reorg_name, "_gptr");
++}
++
++/* Return number of objects of TYPE following define chain from STMT.
++   If the number is not certain, set ERROR so we can abort field compression.
++   If SSA_DEF is not NULL, the ssa_name of allocated ptr will be assigned to it.
++ */
++
++bool
++ipa_struct_reorg::get_allocate_size_iterate (tree type, gimple *stmt,
++					     tree &size, tree *ssa_def)
++{
++  if (!stmt)
++    return false;
++
++  switch (gimple_code (stmt))
++    {
++      case GIMPLE_ASSIGN:
++	return get_allocate_size_assign (type, as_a (stmt),
++					 size, ssa_def);
++      case GIMPLE_CALL:
++	return get_allocate_size_call (type, as_a (stmt),
++				       size, ssa_def);
++      default:
++	return false;
++    }
++}
++
++bool
++ipa_struct_reorg::get_allocate_size_assign (tree type, gassign *stmt,
++					    tree &size, tree *ssa_def)
++{
++  tree rhs = gimple_assign_rhs1 (stmt);
++  if ((!gimple_assign_single_p (stmt) && !gimple_assign_cast_p (stmt))
++      || TREE_CODE (rhs) != SSA_NAME)
++    return true;
++
++  gimple *def_stmt = SSA_NAME_DEF_STMT (rhs);
++  /* Handle the global arrays split by struct_reorg.  */
++  if (reorg_ptr_p (gimple_assign_lhs (stmt)))
++    return get_allocate_size_reorg_ptr (def_stmt, size);
++
++  return get_allocate_size_iterate (type, def_stmt, size, ssa_def);
++}
++
++bool
++ipa_struct_reorg::get_allocate_size_call (tree type, gcall *stmt,
++					  tree &size, tree *ssa_def)
++{
++  tree lhs = gimple_call_lhs (stmt);
++  gcc_assert (TREE_CODE (lhs) == SSA_NAME);
++  if (ssa_def)
++    *ssa_def = lhs;
++
++  size = get_allocate_size (type, lhs, NULL_TREE, stmt);
++
++  return size != NULL_TREE;
++}
++
++/* Handle the global arrays split by struct_reorg:
++   1) The new array ptrs are marked with suffix "_gptr".
++   2) The array ptr are calculated with form:
++      _gptr0 = calloc (NUM, size_all);
++      _gptr1 = _gptr0 + NUM * sizeof (TREE_TYPE (_gptr0));
++      _gptr2 = _gptr1 + NUM * sizeof (TREE_TYPE (_gptr1));
++      ...
++ */
++
++bool
++ipa_struct_reorg::get_allocate_size_reorg_ptr (gimple *plus_stmt, tree &size)
++{
++  /* Check the POINTER_PLUS_EXPR.  */
++  if (!is_gimple_assign (plus_stmt)
++      || gimple_assign_rhs_code (plus_stmt) != POINTER_PLUS_EXPR)
++    return false;
++
++  tree rhs1 = gimple_assign_rhs1 (plus_stmt);
++  tree rhs2 = gimple_assign_rhs2 (plus_stmt);
++  tree prev_type = TREE_TYPE (rhs1);
++
++  /* Check the MULT_EXPR.  */
++  gcc_assert (TREE_CODE (rhs2) == SSA_NAME);
++  gimple *mul_stmt = SSA_NAME_DEF_STMT (rhs2);
++  if (!is_gimple_assign (mul_stmt)
++      || gimple_assign_rhs_code (mul_stmt) != MULT_EXPR)
++    return false;
++
++  tree num = gimple_assign_rhs1 (mul_stmt);
++  tree mul_by = gimple_assign_rhs2 (mul_stmt);
++  if (TREE_CODE (mul_by) == SSA_NAME)
++    std::swap (num, mul_by);
++
++  if (TREE_CODE (num) != SSA_NAME || TREE_CODE (mul_by) != INTEGER_CST
++      || !operand_equal_p (mul_by, TYPE_SIZE_UNIT (prev_type)))
++    return false;
++
++  /* We can trace to original calloc/malloc to make this safer.  */
++
++  size = num;
++
++  return true;
++}
++
++/* Returns the allocated size / T size for STMT.  That is the number of
++   elements in the array allocated.  */
++
++tree
++ipa_struct_reorg::get_allocate_size (tree type, tree decl, tree orig_type,
++				     gimple *stmt)
++{
++  if (!stmt
++      || gimple_code (stmt) != GIMPLE_CALL
++      || !handled_allocation_stmt (stmt))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "\nNot an allocate statement:\n");
++	  print_gimple_stmt (dump_file, stmt, 0);
++	  fprintf (dump_file, "\n");
++	}
++      return NULL_TREE;
++    }
++
++  tree struct_size = TYPE_SIZE_UNIT (type);
++
++  /* Specify the correct size to relax multi-layer pointer.  */
++  if (TREE_CODE (decl) == SSA_NAME && orig_type && isptrptr (orig_type))
++    struct_size = TYPE_SIZE_UNIT (orig_type);
++
++  tree size = gimple_call_arg (stmt, 0);
++
++  if (gimple_call_builtin_p (stmt, BUILT_IN_REALLOC)
++      || gimple_call_builtin_p (stmt, BUILT_IN_ALIGNED_ALLOC))
++    size = gimple_call_arg (stmt, 1);
++  else if (gimple_call_builtin_p (stmt, BUILT_IN_CALLOC))
++    {
++      tree arg1;
++      arg1 = gimple_call_arg (stmt, 1);
++      /* Check that second argument is a constant equal to
++	 the size of structure.  */
++      if (operand_equal_p (arg1, struct_size, 0))
++	return size;
++      /* ??? Check that first argument is a constant
++	 equal to the size of structure.  */
++      /* If the allocated number is equal to the value of struct_size,
++	 the value of arg1 is changed to the allocated number.  */
++      if (operand_equal_p (size, struct_size, 0))
++	return arg1;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "\ncalloc the correct size:\n");
++	  print_gimple_stmt (dump_file, stmt, 0);
++	  fprintf (dump_file, "\n");
++	}
++      return NULL_TREE;
++    }
++
++  tree num;
++  if (!is_result_of_mult (size, &num, struct_size))
++    return NULL_TREE;
++
++  return num;
++}
++
++/* Find all fc_refs (variables/arrays to be modified according to some
++   fc_array):
++   - Will be used after fclose (e.g. global variables).
++   - Before fclose, value or array content is assigned with references to some
++     recognized fc_array. (If there are multiple fc_array variables referenced
++     by one fc_ref, quit field compression.  Because we don't know how to
++     modify it then.)  */
++
++bool
++ipa_struct_reorg::find_fc_refs (fc_type_info *info)
++{
++  /* For each fc_array, follow the use chains and search for fc_refs.  */
++  for (auto *array : info->fc_arrays)
++    {
++      gcc_assert (array->ssa_def);
++      SET_CFUN (info->start_srfn);
++      if (!find_fc_refs_iterate (info, array, array->ssa_def, true))
++	return false;
++
++      ipa_ref *ref = NULL;
++      for (unsigned i = 0; array->vnode->iterate_referring (i, ref); i++)
++	{
++	  /* Filter for memory loads.  */
++	  if (ref->use != IPA_REF_LOAD)
++	    continue;
++	  /* Ignore assignments after start-point.  */
++	  if (!is_stmt_before_fclose (info, ref->stmt, ref->referring))
++	    continue;
++	  if (!gimple_assign_single_p (ref->stmt))
++	    return false;
++	  tree rhs = gimple_assign_rhs1 (ref->stmt);
++	  if (!operand_equal_p (rhs, array->var, COMPARE_DECL_FLAGS))
++	    continue;
++
++	  SET_CFUN (find_function (as_a (ref->referring)));
++	  tree lhs = gimple_assign_lhs (ref->stmt);
++	  if (!find_fc_refs_iterate (info, array, lhs, true))
++	    return false;
++	}
++    }
++
++  return true;
++}
++
++/* Given a fc_array ARRAY and a variable VAR referring to ARRAY,
++   find fc_refs iteratively follow the use chain of VAR.  */
++
++bool
++ipa_struct_reorg::find_fc_refs_iterate (fc_type_info *info, fc_array *array,
++					tree var, bool loop_back)
++{
++  switch (TREE_CODE (var))
++    {
++      /* 1) For SSA_NAME, iterate through use chain.  */
++      case SSA_NAME:
++	return find_fc_refs_ssa_name (info, array, var, loop_back);
++
++      /* 2) For VAR_DECL, submit a fc_ref.  */
++      case VAR_DECL:
++	return add_fc_ref (info, array, var, NULL_TREE);
++
++      /* 3) For MEM_REF, find fc_ref following base's def chain.  */
++      case MEM_REF:
++	return find_fc_refs_mem_ref (info, array, var);
++
++      case COMPONENT_REF:
++	return find_fc_refs_component_ref (info, array, var);
++
++      default:
++	if (dump_file && (dump_flags & TDF_DETAILS))
++	  {
++	    FC_DUMP_MSG ("Unknown use kind, code: %s, var: ",
++			 get_tree_code_name (TREE_CODE (var)));
++	    print_generic_expr (dump_file, var);
++	    fprintf (dump_file, "\n");
++	  }
++	return false;
++    }
++}
++
++/* Find all fc_refs of a SSA_NAME var through its use chain.  */
++
++bool
++ipa_struct_reorg::find_fc_refs_ssa_name (fc_type_info *info, fc_array *array,
++					 tree var, bool loop_back)
++{
++  use_operand_p use_p;
++  imm_use_iterator iter;
++  FOR_EACH_IMM_USE_FAST (use_p, iter, var)
++    {
++      gimple *stmt = USE_STMT (use_p);
++      cgraph_node *cnode = current_function->node;
++      if (!is_stmt_before_fclose (info, stmt, cnode))
++	{
++	  gimple *def_stmt = SSA_NAME_DEF_STMT (var);
++	  if (!is_stmt_before_fclose (info, def_stmt, cnode))
++	    continue;
++
++	  /* If a local ptr of compressed type is defined before start-point
++	     and used after start-point, quit field compression. (Otherwise we
++	     need to clone and version the ptr's define statement.)  */
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      FC_DUMP_MSG ("Local usage not handled: ");
++	      print_gimple_stmt (dump_file, stmt, 0);
++	      fprintf (dump_file, "  Defined at: ");
++	      print_gimple_stmt (dump_file, def_stmt, 0);
++	    }
++
++	  return false;
++	}
++
++      tree lhs = gimple_get_lhs (stmt);
++      switch (gimple_code (stmt))
++	{
++	  case GIMPLE_ASSIGN:
++	    /* Rule out: expr(var) = X.  */
++	    if (walk_tree (&lhs, check_for_ssa, var, NULL)
++		|| !fc_type_pointer_p (info, lhs))
++	      break;
++	    if (!find_fc_refs_iterate (info, array, lhs, loop_back))
++	      return false;
++	    break;
++	  case GIMPLE_PHI:
++	    {
++	      /* Check if VAR is from back_edge.  */
++	      bool loop_var = false;
++	      gphi *phi = as_a (stmt);
++	      for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
++		{
++		  if (gimple_phi_arg_def (phi, i) != var)
++		    continue;
++		  edge e = gimple_phi_arg_edge (phi, i);
++		  if (e->flags & EDGE_DFS_BACK)
++		    {
++		      loop_var = true;
++		      break;
++		    }
++		}
++
++	      if (!loop_var)
++		{
++		  if (!find_fc_refs_iterate (info, array, lhs, loop_back))
++		    return false;
++		}
++	      else if (loop_back)
++		{
++		  if (!find_fc_refs_iterate (info, array, lhs, false))
++		    return false;
++		}
++	      break;
++	    }
++	  case GIMPLE_DEBUG:
++	  case GIMPLE_COND:
++	  case GIMPLE_SWITCH:
++	  case GIMPLE_NOP:
++	    break;
++	  default:
++	    /* Cannot be sure how fc_array is used, like GIMPLE_CALL?  */
++	    if (dump_file && (dump_flags & TDF_DETAILS))
++	      {
++		FC_DUMP_MSG ("fc_array usage not handled: ");
++		print_gimple_stmt (dump_file, stmt, 0);
++	      }
++	    return false;
++	}
++    }
++  return true;
++}
++
++/* Find all fc_refs of a MEM_REF var through its base's def chain.  */
++
++bool
++ipa_struct_reorg::find_fc_refs_mem_ref (fc_type_info *info, fc_array *array,
++					tree var)
++{
++  if (!integer_zerop (TREE_OPERAND (var, 1)))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("MEM_REF offset not handled: ");
++	  print_generic_expr (dump_file, var);
++	  fprintf (dump_file, "\n");
++	}
++      return false;
++    }
++
++  if (!fc_type_pointer_p (info, var))
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Type not compatible: ");
++	  print_generic_expr (dump_file, TREE_TYPE (var));
++	  fprintf (dump_file, "\n");
++	}
++      return false;
++    }
++
++  tree base = TREE_OPERAND (var, 0);
++  tree ref = get_ptr_decl (base);
++  if (!ref)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Failed to get array decl from: ");
++	  print_generic_expr (dump_file, base);
++	  fprintf (dump_file, "\n");
++	}
++      return false;
++    }
++
++  return add_fc_ref (info, array, ref, NULL_TREE);
++}
++
++/* Find fc_refs of a COMPONENT_REF var.  */
++
++bool
++ipa_struct_reorg::find_fc_refs_component_ref (fc_type_info *info,
++					      fc_array *array, tree var)
++{
++  tree base = TREE_OPERAND (var, 0);
++
++  if (TREE_CODE (base) == VAR_DECL)
++    return add_fc_ref (info, array, var, NULL_TREE);
++  else if (TREE_CODE (base) == MEM_REF)
++    base = TREE_OPERAND (base, 0);
++
++  tree ref = get_ptr_decl (base);
++  if (!ref)
++    {
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Failed to get array decl from: ");
++	  print_generic_expr (dump_file, base);
++	  fprintf (dump_file, "\n");
++	}
++      return false;
++    }
++
++  tree field = TREE_OPERAND (var, 1);
++  return add_fc_ref (info, array, ref, field);
++}
++
++/* Return the top level fc_type pointer tree node.  */
++
++bool
++ipa_struct_reorg::fc_type_pointer_p (fc_type_info *info, tree t)
++{
++  tree type = TREE_TYPE (t);
++
++  return POINTER_TYPE_P (type)
++	 && types_fc_compatible_p (TREE_TYPE (type), info->type->type);
++}
++
++/* Add VAR as a fc_ref entry into INFO.
++   1) VAR is a single pointer: fc_ref::size = NULL, fc_ref::field = NULL
++   2) VAR is an array of pointers: fc_ref::size is the size of array,
++				   fc_ref::field = NULL
++   3) VAR is an array of records(e.g. struct {fc_type *p;}):
++	fc_ref::size is the size of array, fc_ref::field is p
++ */
++
++bool
++ipa_struct_reorg::add_fc_ref (fc_type_info *info, fc_array *array, tree var,
++			      tree field)
++{
++  /* The way we're searching for fc_refs, fc_array vars will also meet the
++     requirements.  Rule out them.  */
++  for (auto *d : info->fc_arrays)
++    if (operand_equal_p (var, d->var, COMPARE_DECL_FLAGS))
++      return true;
++
++  tree type = NULL_TREE;
++  tree size_expr = NULL_TREE;
++
++  /* Rule out duplicants.  */
++  switch (check_duplicative_ref (info, array, var, field, type, size_expr))
++    {
++      case check_ref_result::NEW: break;
++      case check_ref_result::DUPLICATIVE: return true;
++      case check_ref_result::ERROR: return false;
++    }
++
++  if (!type)
++    {
++      type = TREE_TYPE (var);
++      /* Use the "real" type for void*.  */
++      if (VOID_POINTER_P (type))
++	{
++	  srdecl *decl = find_decl (var);
++	  if (!decl || !decl->orig_type || !POINTER_TYPE_P (decl->orig_type))
++	    return false;
++	  type = decl->orig_type;
++	}
++    }
++
++  /* If REF is an array, get the size it is allocated with.  */
++  if ((!size_expr) && POINTER_TYPE_P (type)
++      && !types_fc_compatible_p (TREE_TYPE (type), info->type->type))
++    {
++      gimple *stmt = NULL;
++      if (TREE_CODE (var) == SSA_NAME)
++	stmt = SSA_NAME_DEF_STMT (var);
++      else
++	stmt = find_def_stmt_before_fclose (info, var);
++
++      if (!get_allocate_size_iterate (TREE_TYPE (type), stmt, size_expr))
++	return false;
++    }
++
++  fc_ref *ref = new fc_ref (var, type, array, size_expr, field);
++  info->fc_refs.safe_push (ref);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      FC_DUMP_MSG ("Add fc_ref: ");
++      ref->dump (dump_file);
++    }
++
++  return true;
++}
++
++/* Check if we have found another fc_ref with the same var.  */
++
++check_ref_result
++ipa_struct_reorg::check_duplicative_ref (fc_type_info *info, fc_array *array,
++					 tree var, tree field,
++					 tree &type, tree &size_expr)
++{
++  for (auto *ref : info->fc_refs)
++    {
++      if (!operand_equal_p (var, ref->var, COMPARE_DECL_FLAGS))
++	continue;
++
++      /* The var refers to multiple fc_array.  */
++      if (ref->source != array)
++	{
++	  if (dump_file && (dump_flags & TDF_DETAILS))
++	    {
++	      FC_DUMP_MSG ("Variable ");
++	      print_generic_expr (dump_file, var);
++	      fprintf (dump_file, " referring to multiple arrays: ");
++	      print_generic_expr (dump_file, ref->source->var);
++	      fprintf (dump_file, " and ");
++	      print_generic_expr (dump_file, array->var);
++	      fprintf (dump_file, "\n");
++	    }
++	  return check_ref_result::ERROR;
++	}
++
++      if (ref->field)
++	{
++	  gcc_assert (field);
++	  /* Different fields in an array of structures.  */
++	  if (!operand_equal_p (field, ref->field, COMPARE_DECL_FLAGS))
++	    {
++	      type = ref->orig_type ? ref->orig_type : TREE_TYPE (var);
++	      size_expr = ref->size;
++	      continue;
++	    }
++	}
++
++      return check_ref_result::DUPLICATIVE;
++    }
++
++  return check_ref_result::NEW;
++}
++
++/* Find the single defination stmt before start-point for a var.  */
++
++gimple *
++ipa_struct_reorg::find_def_stmt_before_fclose (fc_type_info *info, tree var)
++{
++  tree base = TREE_CODE (var) == COMPONENT_REF ? TREE_OPERAND (var, 0) : var;
++  if (TREE_CODE (base) != VAR_DECL)
++    return NULL;
++
++  varpool_node *vnode = varpool_node::get (base);
++  /* Local array is not handled yet.  */
++  if (!vnode)
++    return NULL;
++
++  gimple *def_stmt = NULL;
++  ipa_ref *ref = NULL;
++  for (unsigned i = 0; vnode->iterate_referring (i, ref); i++)
++    {
++      if (ref->use != IPA_REF_STORE)
++	continue;
++
++      gimple *stmt = ref->stmt;
++      tree lhs = gimple_get_lhs (stmt);
++      if (!operand_equal_p (lhs, var, COMPARE_DECL_FLAGS)
++	  || !is_stmt_before_fclose (info, stmt, ref->referring))
++	continue;
++
++      if (gimple_assign_single_p (stmt)
++	  && integer_zerop (gimple_assign_rhs1 (stmt)))
++	continue;
++
++      if (def_stmt)
++	{
++	  FC_DUMP_MSG ("Multiple definations before start-point?\n");
++	  return NULL;
++	}
++
++      def_stmt = stmt;
++    }
++
++  return def_stmt;
++}
++
++/* VAR is an ssa_name defined by some array + offset.
++   1) For global variables, returns declaration of the array.
++   2) For arrays locally allocated with recogized functions, returns the
++      ssa_name it is assigned with.
++   3) Return NULL_TREE if cannot decide.  */
++
++tree
++ipa_struct_reorg::get_ptr_decl (tree var)
++{
++  if (TREE_CODE (var) != SSA_NAME)
++    return NULL_TREE;
++
++  gimple *stmt = SSA_NAME_DEF_STMT (var);
++  tree var_type = TREE_TYPE (var);
++
++  if (gimple_code (stmt) == GIMPLE_ASSIGN)
++    {
++      gassign *assign = as_a (stmt);
++      switch (gimple_assign_rhs_class (assign))
++	{
++	  case GIMPLE_BINARY_RHS:
++	    {
++	      if (gimple_assign_rhs_code (assign) != POINTER_PLUS_EXPR)
++		return NULL_TREE;
++	      tree lhs = gimple_assign_rhs1 (assign);
++	      if (types_fc_compatible_p (TREE_TYPE (lhs), var_type)
++		  || VOID_POINTER_P (TREE_TYPE (lhs)))
++		return get_ptr_decl (lhs);
++	      return NULL_TREE;
++	    }
++
++	  case GIMPLE_UNARY_RHS:
++	  case GIMPLE_SINGLE_RHS:
++	    {
++	      tree rhs = gimple_assign_rhs1 (stmt);
++	      if (TREE_CODE (rhs) == SSA_NAME)
++		return get_ptr_decl (rhs);
++	      else if (TREE_CODE (rhs) == VAR_DECL)
++		return rhs;
++	      else if (TREE_CODE (rhs) == COMPONENT_REF)
++		{
++		  tree base = TREE_OPERAND (rhs, 0);
++		  return DECL_P (base) ? rhs : NULL_TREE;
++		}
++	      else
++		return NULL_TREE;
++	    }
++	  default:
++	    return NULL_TREE;
++	}
++    }
++  else if (gimple_code (stmt) == GIMPLE_CALL)
++    return handled_allocation_stmt (stmt) ? gimple_get_lhs (stmt) : NULL_TREE;
++  else
++    return NULL_TREE;
++
++  /* TODO: GIMPLE_PHI can be supported (not affecting correctness).  */
++}
++
++/* Search info->input_var backward using def/use chain until finding one of
++   the arrays we have found in find_fc_arrays.  */
++
++bool
++ipa_struct_reorg::check_fc_array_uses (fc_type_info *info)
++{
++  hash_set visited;
++  auto_vec worklist;
++
++  visited.add (info->input_var);
++  worklist.safe_push (info->input_var);
++
++  while (!worklist.is_empty ())
++    {
++      tree t = worklist.pop ();
++      tree_code code = TREE_CODE (t);
++      if (code != SSA_NAME && code != VAR_DECL)
++	continue;
++
++      for (auto *array : info->fc_arrays)
++	if (t == array->ssa_def || t == array->var)
++	  return true;
++
++      /* If we reach a global variable, it must match a fc_array.  */
++      if (code == VAR_DECL)
++	return false;
++
++      gimple *stmt = SSA_NAME_DEF_STMT (t);
++      if (gimple_code (stmt) == GIMPLE_PHI)
++	{
++	  for (unsigned i = 0; i < gimple_phi_num_args (stmt); ++i)
++	    {
++	      tree arg = gimple_phi_arg_def (stmt, i);
++	      if (!visited.add (arg))
++		worklist.safe_push (arg);
++	    }
++	}
++      else if (gimple_assign_single_p (stmt)
++	       || gimple_assign_rhs_code_p (stmt, POINTER_PLUS_EXPR))
++	{
++	  tree rhs = gimple_assign_rhs1 (stmt);
++	  if (!visited.add (rhs))
++	    worklist.safe_push (rhs);
++	}
++    }
++
++  return false;
++}
++
++/* Calculate the reference count of all fc fields.  */
++
++void
++ipa_struct_reorg::calc_fc_ref_count (fc_type_info *info)
++{
++  for (auto *access : info->type->accesses)
++    {
++      if (!access->field)
++	continue;
++
++      fc_field *fc_f = find_fc_field (info->dynamic_fc_fields,
++				      access->field->fielddecl);
++      if (fc_f)
++	fc_f->ref_cnt++;
++    }
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      FC_DUMP_MSG ("Reference count:\n");
++      for (auto *fc_f : info->dynamic_fc_fields)
++	{
++	  print_generic_expr (dump_file, fc_f->field);
++	  fprintf (dump_file, " : %d\n", fc_f->ref_cnt);
++	}
++    }
++}
++
++/* For dynamic fields, we heuristically change data type.  */
++
++bool
++ipa_struct_reorg::compress_fields_dynamic (fc_type_info *info)
++{
++  const std::map precision_map{
++    {64, 16}, {32, 16}, {16, 8}
++  };
++
++  for (auto *fc_f : info->dynamic_fc_fields)
++    {
++      tree old_type = TREE_TYPE (fc_f->field);
++      bool is_unsigned = TYPE_UNSIGNED (old_type);
++      gcc_assert (TREE_CODE (old_type) == INTEGER_TYPE);
++
++      auto iter = precision_map.find (TYPE_PRECISION (old_type));
++      if (iter == precision_map.cend ())
++	return false;
++
++      fc_f->new_type = get_integer_type_node (iter->second, is_unsigned);
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Change the type of ");
++	  print_generic_expr (dump_file, fc_f->field);
++	  fprintf (dump_file, " from (prec=%d) to ", TYPE_PRECISION (old_type));
++	  print_generic_expr (dump_file, fc_f->new_type);
++	  fprintf (dump_file, "(prec=%d)\n", TYPE_PRECISION (fc_f->new_type));
++	}
++
++      info->record_cond (fc_f);
++      fc_f->cond->new_type = fc_f->new_type;
++    }
++
++    return true;
++}
++
++/* Tune the upper boundary for dynamic fields.  The data field may be
++   assigned a constant that is larger than the maximum value.  For this
++   case, we can reserve a special value for it, and then we can
++   compress/decompress it by creating a map between this reserved value
++   and the real big value.  In the meantime, we will have to reduce the
++   upper boundary for this specific field.  */
++
++bool
++ipa_struct_reorg::calc_dynamic_boundary (fc_type_info *info)
++{
++  /* Initialize the low_bound and high_bound.  */
++  for (auto *cond : info->fc_conds)
++    {
++      tree ssa_type = TREE_TYPE (cond->fields[0]->input_ssa);
++
++      /* Low bound is always zero.  */
++      cond->low_bound = fold_convert (ssa_type, integer_zero_node);
++
++      /* High bound is the max value of the type.  */
++      unsigned bits = cond->bits ? cond->bits
++				 : TYPE_PRECISION (cond->new_type);
++      unsigned max_value = wi::max_value (bits, UNSIGNED).to_uhwi ();
++      cond->high_bound = build_int_cst (ssa_type, max_value);
++
++      auto_vec special_values;
++      /* Calculate upper bound.  */
++      for (auto *access : info->type->accesses)
++	{
++	  gimple *stmt = access->stmt;
++	  if (!gimple_assign_single_p (stmt))
++	    continue;
++
++	  /* Skip if it is not an assignment to fc field.  */
++	  if (!fc_cond_field_p (gimple_assign_lhs (stmt), cond))
++	    continue;
++
++	  /* Skip if it is loaded from a fc field.  */
++	  tree rhs = gimple_assign_rhs1 (stmt);
++	  if (fc_field_load_p (rhs, cond))
++	    continue;
++
++	  /* Skip if it is from input_ssa.  */
++	  if (fc_input_ssa_p (rhs, cond))
++	    continue;
++
++	  /* Make sure the assignment is a constant.  If possible, we
++	     try to find all possible costants by peephole.  */
++	  HOST_WIDE_INT value;
++	  if (fc_peephole_const_p (rhs, value))
++	    {
++	      special_values.safe_push (value);
++	      cond->field_class->closure.write_special_rhs.put (rhs, value);
++	      continue;
++	    }
++	}
++
++      /* Execute multiple rounds cause we didn't sort the special_values. */
++      while (true)
++	{
++	  unsigned size = cond->special_values.length ();
++	  for (auto value : special_values)
++	    update_high_bound (cond, value);
++	  if (size == cond->special_values.length ())
++	    break;
++	}
++    }
++
++  return true;
++}
++
++/* Return true if the VAR is a mem reference of fc_field in the fc_cond.  */
++
++bool
++ipa_struct_reorg::fc_cond_field_p (tree var, const fc_cond *cond)
++{
++  if (TREE_CODE (var) != COMPONENT_REF)
++    return false;
++
++  /* Find the stmt assigning to the fc field.  */
++  tree field = TREE_OPERAND (var, 1);
++  return find_fc_field (cond->fields, field);
++}
++
++/* Return true if var is one of cond's input_ssa.  */
++
++bool
++ipa_struct_reorg::fc_input_ssa_p (tree var, const fc_cond *cond)
++{
++  if (TREE_CODE (var) != SSA_NAME)
++    return false;
++
++  for (auto *fc_f : cond->fields)
++    if (fc_f->input_ssa == var)
++      return true;
++
++  return false;
++}
++
++/* Return true the VAR is loaded from another fc field.  */
++
++bool
++ipa_struct_reorg::fc_field_load_p (tree var, const fc_cond *cond)
++{
++  if (TREE_CODE (var) != SSA_NAME)
++    return false;
++
++  gimple *stmt = SSA_NAME_DEF_STMT (var);
++  return gimple_assign_load_p (stmt)
++	 && fc_cond_field_p (gimple_assign_rhs1 (stmt), cond);
++}
++
++/* Reduce the high_bound of COND by 1, if the value is larger
++   than the high_bound.  */
++
++void
++ipa_struct_reorg::update_high_bound (fc_cond *cond, HOST_WIDE_INT value)
++{
++  HOST_WIDE_INT high_bound = tree_to_uhwi (cond->high_bound);
++  if (value >= 0 && value <= high_bound)
++    return;
++
++  if (cond->special_values.contains (value))
++    return;
++
++  high_bound--;
++  cond->high_bound = build_int_cst (TREE_TYPE (cond->high_bound), high_bound);
++  cond->special_values.safe_push (value);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    FC_DUMP_MSG ("Found special value %ld, and reduce high_bound to 0x%lx\n",
++		 value, high_bound);
++}
++
++/* Check all data in fc_cond refer to a closure.  */
++bool
++ipa_struct_reorg::check_closure (fc_type_info *info)
++{
++  for (auto *cond : info->fc_conds)
++    {
++      if (!check_closure (info, cond))
++	{
++	  FC_DUMP_MSG ("Fail checking closure\n");
++	  return false;
++	}
++    }
++
++  return true;
++}
++
++/* All write stmts could be
++   (1) unchange, i.e. optimize away compress/decompress
++   (2) change, i.e. use unoptimized compress
++
++   For case (2), we may have the scenario like below,
++
++     B = A->field;
++     ... = B;
++     C->field = B;
++
++   We still can prove C->field is from A->field, so they are
++   in a closure, but we must decompress A->field and compress
++   C->field, because B may be used outside the clsoure, for
++   which we don't care about.  */
++
++bool
++ipa_struct_reorg::check_closure (fc_type_info *info, fc_cond *cond)
++{
++  fc_field_class *field_class = cond->field_class;
++
++  for (auto *access : info->type->accesses)
++    {
++      if (!access->write_field_p ()
++	  || access->field->field_class != field_class
++	  || !fc_cond_field_p (access->expr, cond))
++	continue;
++
++      SET_CFUN (access->function);
++
++      gimple *stmt = access->stmt;
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Check closure: ");
++	  print_gimple_stmt (dump_file, stmt, 0);
++	}
++
++      /* Skip if we have already analyzed this stmt.  */
++      if (field_class->closure.write_unchange_p (stmt)
++	  || field_class->closure.write_change_p (stmt))
++	continue;
++
++      tree rhs = gimple_assign_rhs1 (stmt);
++      HOST_WIDE_INT *value = field_class->closure.write_special_rhs.get (rhs);
++      if (fc_input_ssa_p (rhs, cond)
++	  || (value && cond->special_values.contains (*value)))
++	{
++	  /* Case (2) */
++	  field_class->closure.add_write_change (stmt);
++	  FC_DUMP_MSG ("Need to change.\n");
++	  continue;
++	}
++      if (value && !cond->special_values.contains (*value))
++	{
++	  /* Case (2) */
++	  field_class->closure.add_write_unchange (stmt);
++	  FC_DUMP_MSG ("No need to change.\n");
++	  continue;
++	}
++
++      if (!gimple_assign_single_p (stmt)
++	  || TREE_CODE (rhs) != SSA_NAME)
++	return false;
++
++      gimple *def_stmt = SSA_NAME_DEF_STMT (rhs);
++      if (gimple_assign_single_p (def_stmt))
++	{
++	  /* Check if RHS is from the the same fc class.  */
++	  srtype *type = NULL;
++	  srfield *field = NULL;
++	  tree base = NULL_TREE;
++	  if (get_base_type (gimple_assign_rhs1 (def_stmt), base, type, field)
++	      && field->field_class == field_class)
++	    {
++	      if (write_field_class_only_p (info, field_class, rhs))
++		{
++		  /* Case (1).  */
++		  field_class->closure.add_write_unchange (stmt);
++		  field_class->closure.add_read_unchange (def_stmt);
++
++		  if (dump_file && (dump_flags & TDF_DETAILS))
++		    {
++		      FC_DUMP_MSG ("No need to change: ");
++		      print_gimple_stmt (dump_file, def_stmt, 0);
++		    }
++		}
++	      else
++		{
++		  /* Case (2).  */
++		  field_class->closure.add_write_change (stmt);
++		  FC_DUMP_MSG ("Need to change. \n");
++		}
++
++	      continue;
++	    }
++	}
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  FC_DUMP_MSG ("Check closure fail: ");
++	  print_gimple_stmt (dump_file, stmt, 0);
++	}
++
++      return false;
++    }
++
++  collect_closure_read_change (info, field_class);
++
++  return true;
++}
++
++/* Return true if all stmts using ssa_def are to write a fc field.  */
++
++bool
++ipa_struct_reorg::write_field_class_only_p (fc_type_info *info,
++					    fc_field_class *field_class,
++					    tree ssa_def)
++{
++  imm_use_iterator imm_iter;
++  gimple *stmt;
++  FOR_EACH_IMM_USE_STMT (stmt, imm_iter, ssa_def)
++    {
++      if (gimple_code (stmt) == GIMPLE_DEBUG)
++	continue;
++
++      /* We don't know if it is PHI.  */
++      if (!is_gimple_assign (stmt))
++	return false;
++
++      srtype *type = NULL;
++      srfield *field = NULL;
++      tree base = NULL_TREE;
++      if (!get_base_type (gimple_assign_lhs (stmt), base, type, field)
++	  || type != info->type
++	  || !field_class->srfields.contains (field))
++	return false;
++    }
++
++  return true;
++}
++
++/* Collect read_change.  */
++
++void
++ipa_struct_reorg::collect_closure_read_change (fc_type_info *info,
++					       fc_field_class *field_class)
++{
++  for (auto *access : info->type->accesses)
++    {
++      if (!access->read_field_p ()
++	  || access->field->field_class != field_class)
++	continue;
++
++      /* Skip statement that has been marked as unchanged.  */
++      if (field_class->closure.read_unchange_p (access->stmt))
++	continue;
++
++      field_class->closure.add_read_change (access->stmt);
++    }
++}
++
++unsigned
++ipa_struct_reorg::execute_dynamic_field_compression ()
++{
++  if (current_fc_level != fc_level::DYNAMIC)
++    return 0;
++
++  current_layout_opt_level = STRUCT_REORDER_FIELDS;
++  replace_type_map.empty ();
++  record_accesses ();
++  prune_escaped_types ();
++  check_and_prune_struct_for_field_compression ();
++
++  return dynamic_fc_rewrite ();
++}
++
++unsigned
++ipa_struct_reorg::dynamic_fc_rewrite ()
++{
++  if (!create_dynamic_fc_newtypes ())
++    {
++      FC_DUMP_MSG ("Failed to create newtypes for dfc\n");
++      return 0;
++    }
++
++  for (auto *info : fc_infos)
++    {
++      if (!info->dynamic_fc_p)
++	continue;
++      create_dynamic_fc_convert_fn (info);
++      clone_dynamic_fc_path (info);
++      record_dfc_path_info (info);
++      if (flag_ipa_struct_dfc_shadow)
++	rewrite_dynamic_shadow_fields (info);
++      rewrite_dynamic_fc_path ();
++      add_dynamic_checking (info);
++    }
++
++  return TODO_verify_all;
++}
++
++bool
++ipa_struct_reorg::create_dynamic_fc_newtypes ()
++{
++  bool created = false;
++  for (auto *info : fc_infos)
++    {
++      if (!info->dynamic_fc_p)
++	continue;
++
++      create_dynamic_fc_variant (info);
++      if (info->type->create_new_type ())
++	created = true;
++      else
++	info->dynamic_fc_p = false;
++    }
++
++  if (!created)
++    return false;
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "=========== all created newtypes ===========\n\n");
++      dump_newtypes (dump_file);
++    }
++
++  return true;
++}
++
++/* Create a new fc_variant for the given fc_type in terms of fc_conds.  */
++
++void
++ipa_struct_reorg::create_dynamic_fc_variant (fc_type_info *info)
++{
++  create_global_var_dfc_path (info);
++  info->variant = new fc_variant ();
++}
++
++/* Create a global variable to identify the current dynamic path.  */
++
++void
++ipa_struct_reorg::create_global_var_dfc_path (fc_type_info *info)
++{
++  tree name = get_identifier ("dfc.path");
++  tree var = build_decl (BUILTINS_LOCATION, VAR_DECL, name,
++			 boolean_type_node);
++
++  TREE_PUBLIC (var) = 1;
++  TREE_STATIC (var) = 1;
++  DECL_IGNORED_P (var) = 1;
++  DECL_ARTIFICIAL (var) = 1;
++  DECL_INITIAL (var) = boolean_false_node;
++  SET_DECL_ASSEMBLER_NAME (var, name);
++
++  varpool_node::finalize_decl (var);
++  info->dfc_path = var;
++}
++
++/* Insert compress/decompress functions.  */
++
++void
++ipa_struct_reorg::create_dynamic_fc_convert_fn (fc_type_info *info)
++{
++  for (unsigned i = 0; i < info->fc_conds.length (); i++)
++    {
++      fc_cond *cond = info->fc_conds[i];
++      cond->compress_fn = create_convert_fn (cond, i, false);
++      cond->decompress_fn = create_convert_fn (cond, i, true);
++    }
++}
++
++/* Create function to further compress fields with special_values.
++   - DECOMP == 0: create function to compress the field.
++   - DECOMP != 0: create function to decompress the field.
++   IDX is an unique number for function name.
++   Return declaration of created function.  */
++
++tree
++ipa_struct_reorg::create_convert_fn (fc_cond *fcond, unsigned idx,
++				     bool decompress)
++{
++  if (fcond->special_values.is_empty ())
++    return NULL_TREE;
++
++  push_cfun (NULL);
++
++  /* Init declarations.  */
++  char fn_name[64];
++  const char *name = decompress ? "dfc.decompress." : "dfc.compress.";
++  sprintf (fn_name, "%s%d", name, idx);
++
++  tree arg_type = decompress ? fcond->new_type : fcond->old_type;
++  tree return_type = decompress ? fcond->old_type : fcond->new_type;
++  tree fn_decl = create_new_fn_decl (fn_name, 1, &arg_type, return_type);
++
++  basic_block return_bb = init_lowered_empty_function (
++			    fn_decl, true, profile_count::uninitialized ());
++  calculate_dominance_info (CDI_DOMINATORS);
++
++  split_edge (single_pred_edge (return_bb));
++  tree result = make_ssa_name (return_type);
++  create_phi_node (result, return_bb);
++
++  /* Create compress/decompress function body.  */
++  edge exit_e = create_normal_part (fcond);
++  create_conversion_part (fcond, exit_e, decompress);
++
++  /* Return stmt.  */
++  update_stmt (gsi_start_phis (return_bb).phi ());
++  gimple *return_stmt = gimple_build_return (result);
++  gimple_stmt_iterator gsi = gsi_last_bb (return_bb);
++  gsi_insert_after (&gsi, return_stmt, GSI_NEW_STMT);
++
++  free_dominance_info (CDI_DOMINATORS);
++  update_ssa (TODO_update_ssa);
++
++  cgraph_node::create (fn_decl);
++  cgraph_node::add_new_function (fn_decl, true);
++  cgraph_edge::rebuild_edges ();
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      FC_DUMP_MSG ("Create %s field function:\n",
++		   decompress ? "decompress" : "compress");
++      dump_function_to_file (cfun->decl, dump_file, dump_flags);
++    }
++
++  pop_cfun ();
++
++  return fn_decl;
++}
++
++/* Insert code for values in the bound:
++    if (arg <= high_bound && arg >= low_bound)
++      return arg;
++
++   Return the exit_edge of the if region, whose dest is the return block.
++ */
++
++edge
++ipa_struct_reorg::create_normal_part (fc_cond *fcond)
++{
++  edge true_e = NULL;
++  edge false_e = NULL;
++  tree arg = DECL_ARGUMENTS (cfun->decl);
++
++  /* Create 'arg <= high_bound'.  */
++  basic_block bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
++  tree tmp_arg = fold_convert (TREE_TYPE (fcond->high_bound), arg);
++  tree cond = build2 (LE_EXPR, boolean_type_node, tmp_arg, fcond->high_bound);
++  edge exit_e = create_empty_if_region_on_edge (single_succ_edge (bb), cond);
++  extract_true_false_edges_from_block (single_succ (bb), &true_e, &false_e);
++
++  /* Create 'arg >= low_bound'.  */
++  bb = true_e->dest;
++  tmp_arg = fold_convert (TREE_TYPE (fcond->low_bound), arg);
++  cond = build2 (GE_EXPR, boolean_type_node, tmp_arg, fcond->low_bound);
++  create_empty_if_region_on_edge (single_succ_edge (bb), cond);
++  extract_true_false_edges_from_block (single_succ (bb), &true_e, &false_e);
++
++  /* Return the original value on true_edge.  */
++  bb = true_e->dest;
++  tree return_type = TREE_TYPE (TREE_TYPE (cfun->decl));
++  tree val = make_ssa_name (return_type);
++  gimple_stmt_iterator gsi = gsi_last_bb (bb);
++  APPEND_GASSIGN_1 (gsi, val, NOP_EXPR, arg);
++
++  basic_block return_bb = single_pred (EXIT_BLOCK_PTR_FOR_FN (cfun));
++  redirect_edge_succ (single_succ_edge (bb), return_bb);
++  gphi *phi = gsi_start_phis (return_bb).phi ();
++  add_phi_arg (phi, val, single_succ_edge (bb), UNKNOWN_LOCATION);
++
++  return exit_e;
++}
++
++/* Insert conversion code to compress/decompress special values.  */
++
++void
++ipa_struct_reorg::create_conversion_part (fc_cond *fcond, edge e, bool decomp)
++{
++  edge exit_e = e;
++  basic_block return_bb = single_pred (EXIT_BLOCK_PTR_FOR_FN (cfun));
++  HOST_WIDE_INT reserved_value = tree_to_uhwi (fcond->high_bound) + 1;
++  for (unsigned i = 0; i < fcond->special_values.length (); i++)
++    {
++      basic_block bb = exit_e->src;
++      tree special_cst = build_int_cst (signed_type_for (fcond->old_type),
++					fcond->special_values[i]);
++      tree compressed_cst = build_int_cst (fcond->new_type, reserved_value);
++
++      if (i == fcond->special_values.length () - 1)
++	{
++	  /* Omit condition check for the last special value.  */
++	  redirect_edge_and_branch (single_succ_edge (bb), return_bb);
++	  gphi *phi = gsi_start_phis (return_bb).phi ();
++	  add_phi_arg (phi, decomp ? special_cst : compressed_cst,
++		       single_succ_edge (bb), UNKNOWN_LOCATION);
++	}
++      else
++	{
++	  tree arg = DECL_ARGUMENTS (cfun->decl);
++	  tree cond = build2 (EQ_EXPR, boolean_type_node, arg,
++			      decomp ? compressed_cst : special_cst);
++	  exit_e = create_empty_if_region_on_edge (exit_e, cond);
++	  edge true_e = NULL;
++	  edge false_e = NULL;
++	  extract_true_false_edges_from_block (single_succ (bb), &true_e,
++					       &false_e);
++	  redirect_edge_and_branch (single_succ_edge (true_e->dest),
++				    return_bb);
++	  gphi *phi = gsi_start_phis (return_bb).phi ();
++	  add_phi_arg (phi, decomp ? special_cst : compressed_cst,
++		       single_succ_edge (true_e->dest), UNKNOWN_LOCATION);
++	  reserved_value++;
++	}
++    }
++}
++
++void
++ipa_struct_reorg::clone_dynamic_fc_path (fc_type_info *info)
++{
++  SET_DUMP_FILE (NULL, TDF_NONE);
++  for (auto *srfn : functions)
++    {
++      SET_CFUN (srfn);
++
++      if (srfn->partial_clone_p ())
++	clone_partial_func (info, srfn);
++      else
++	clone_whole_func (srfn);
++    }
++}
++
++/* start_bb:   if (dfc.path)
++		  /        \
++	       false       true
++		/            \
++	   origin-bbs     clone-bbs
++ */
++void
++ipa_struct_reorg::clone_partial_func (fc_type_info *info, srfunction *srfn)
++{
++  calculate_dominance_info (CDI_DOMINATORS);
++
++  fc_path_info &path = srfn->fc_path;
++  auto_vec &reach_bbs = path.reach_bbs;
++  gimple *start_stmt = path.start_stmt;
++  basic_block fclose_bb = reach_bbs[0];
++
++  gcc_assert (fclose_bb == gimple_bb (start_stmt));
++  edge e = split_block (fclose_bb, start_stmt);
++  reach_bbs[0] = e->dest;
++
++  unsigned n = reach_bbs.length ();
++  basic_block *origin_bbs = new basic_block[n];
++  for (unsigned i = 0; i < reach_bbs.length (); i++)
++    origin_bbs[i] = reach_bbs[i];
++
++  /* 1. Clone blocks reachable from start point.  */
++  initialize_original_copy_tables ();
++  basic_block *cloned_bbs = new basic_block[n];
++  copy_bbs (origin_bbs, n, cloned_bbs, NULL, 0, NULL, fclose_bb->loop_father,
++	    fclose_bb, true);
++  delete[] origin_bbs;
++
++  /* Add phis for edges from copied bbs.  */
++  add_phi_args_after_copy (cloned_bbs, n, NULL);
++  free_original_copy_tables ();
++
++  path.cloned_bbs.reserve (n);
++  for (unsigned i = 0; i < n; i++)
++    path.cloned_bbs.safe_push (cloned_bbs[i]);
++  delete[] cloned_bbs;
++
++  /* 2. Add if-else on dfc.path.  */
++  basic_block checking_bb = split_edge (e);
++  gimple_stmt_iterator gsi = gsi_last_bb (checking_bb);
++  tree dfc_path_ssa = make_ssa_name (info->dfc_path);
++  gassign *assign = gimple_build_assign (dfc_path_ssa, info->dfc_path);
++  gsi_insert_after (&gsi, assign, GSI_NEW_STMT);
++  gcond *dfc_path_cond = gimple_build_cond_from_tree (dfc_path_ssa,
++						      NULL_TREE, NULL_TREE);
++  gsi_insert_after (&gsi, dfc_path_cond, GSI_NEW_STMT);
++
++  e = single_succ_edge (checking_bb);
++  e->flags = (e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE;
++
++  make_edge (checking_bb, path.cloned_bbs[0], EDGE_TRUE_VALUE);
++
++  /* Necessary for visiting call stmts.  */
++  cgraph_edge::rebuild_edges ();
++  free_dominance_info (CDI_DOMINATORS);
++
++  if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
++    {
++      calculate_dominance_info (CDI_DOMINATORS);
++      fix_loop_structure (NULL);
++    }
++
++  update_ssa (TODO_update_ssa);
++}
++
++void
++ipa_struct_reorg::clone_whole_func (srfunction *srfn)
++{
++  cgraph_node *new_node;
++  cgraph_node *node = srfn->node;
++
++  statistics_counter_event (NULL, "Create new function", 1);
++  new_node = node->create_version_clone_with_body (vNULL, NULL, NULL, NULL,
++						   NULL, "dfc");
++  new_node->can_change_signature = node->can_change_signature;
++  new_node->make_local ();
++
++  srfunction *new_srfn = new srfunction (new_node);
++  if (srfn->is_safe_func)
++    {
++      safe_functions.add (new_srfn->node);
++      new_srfn->is_safe_func = true;
++    }
++
++  srfn->fc_path.cloned_func = new_srfn;
++}
++
++/* Rewrite dynamic shadow fields in cloned path.  */
++
++void
++ipa_struct_reorg::rewrite_dynamic_shadow_fields (fc_type_info *info)
++{
++  if (info->dynamic_shadow_fields.is_empty ())
++    return;
++
++  for (auto *access : info->type->accesses)
++    {
++      srfield *srf = access->field;
++      if (!srf || !srf->fc_f || !srf->fc_f->original)
++	continue;
++
++      /* Skip statements in original path.  */
++      srfunction *srfn = access->function;
++      gimple *stmt = access->stmt;
++      if (srfn->fc_path.cloned_func
++	  || (srfn->partial_clone_p ()
++	      && !srfn->fc_path.cloned_bbs.contains (gimple_bb (stmt))))
++	continue;
++
++      SET_CFUN (srfn);
++
++      if (access->write_p ())
++	{
++	  /* Remove stmt by replacing lhs by a dummy ssa.  */
++	  tree lhs = gimple_assign_lhs (stmt);
++	  tree dummy_ssa = make_ssa_name (TREE_TYPE (lhs));
++	  gimple_assign_set_lhs (stmt, dummy_ssa);
++	  update_stmt (stmt);
++	}
++      else if (access->read_p ())
++	modify_shadow_read (stmt, access->index, srf->fc_f, access->base);
++      else
++	gcc_unreachable ();
++    }
++}
++
++/* Rewrite functions either partially or wholely.  */
++
++void
++ipa_struct_reorg::rewrite_dynamic_fc_path ()
++{
++  for (auto *srfn : functions)
++    {
++      if (srfn->partial_clone_p ())
++	{
++	  SET_CFUN (srfn);
++
++	  /* 2.1 rewrite the original function for each path.  */
++	  rewrite_partial_func (srfn);
++	  clean_func_after_rewrite (srfn);
++	}
++      else
++	{
++	  /* 2.2 rewrite the cloned function for each path.  */
++	  srfunction *cloned_func = srfn->fc_path.cloned_func;
++	  SET_CFUN (cloned_func);
++
++	  rewrite_whole_func (cloned_func);
++	  clean_func_after_rewrite (cloned_func);
++	}
++    }
++}
++
++void
++ipa_struct_reorg::record_dfc_path_info (fc_type_info *info)
++{
++  /* 1. record accesse info for cloned stmts.  */
++  for (auto *srfn : functions)
++    {
++      SET_DUMP_FILE (NULL, TDF_NONE);
++      if (srfn->partial_clone_p ())
++	{
++	  record_function (srfn->node, srfn);
++	}
++      else
++	{
++	  srfunction *cloned_srfn = srfn->fc_path.cloned_func;
++	  record_function (cloned_srfn->node, cloned_srfn);
++	  prune_function (cloned_srfn);
++	}
++    }
++
++  prune_globals ();
++  gcc_assert (!info->type->has_escaped ());
++
++  /* 2. collect closure info for cloned paths.  */
++  collect_closure_info_dynamic (info);
++}
++
++/* Collect the closure info for all dynamic-fc cloned paths.  */
++
++void
++ipa_struct_reorg::collect_closure_info_dynamic (fc_type_info *info)
++{
++  for (auto *cond : info->fc_conds)
++    {
++      fc_field_class *field_class = cond->field_class;
++      for (auto *srfn : functions)
++	{
++	  if (srfn->partial_clone_p ())
++	    collect_closure_info_partial (srfn, &field_class->closure);
++	  else
++	    collect_closure_info_whole (srfn, &field_class->closure);
++	}
++    }
++}
++
++/* Collect closure info for partially cloned function SRFN in dynamic fc.  */
++
++void
++ipa_struct_reorg::collect_closure_info_partial (srfunction *srfn,
++						fc_closure *cinfo)
++{
++  closure_helper helper (cinfo);
++
++  for (auto *bb : srfn->fc_path.reach_bbs)
++    helper.record_origin_closure (bb);
++
++  helper.reset_uid ();
++
++  for (unsigned i = 0; i < srfn->fc_path.reach_bbs.length (); i++)
++    helper.add_cloned_closure (srfn->fc_path.cloned_bbs[i]);
++}
++
++/* Collect closure info for wholely cloned function SRFN in dfc.  */
++
++void
++ipa_struct_reorg::collect_closure_info_whole (srfunction *srfn,
++					      fc_closure *cinfo)
++{
++  closure_helper helper (cinfo);
++
++  basic_block bb = NULL;
++  FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (srfn->node->decl))
++    helper.record_origin_closure (bb);
++
++  helper.reset_uid ();
++
++  srfunction *cloned_srfn = srfn->fc_path.cloned_func;
++  FOR_EACH_BB_FN (bb, DECL_STRUCT_FUNCTION (cloned_srfn->node->decl))
++    helper.add_cloned_closure (bb);
++}
++
++void
++ipa_struct_reorg::rewrite_partial_func (srfunction *srfn)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      FC_DUMP_MSG ("Before rewrite: %s\n", srfn->node->name ());
++      dump_function_to_file (current_function_decl, dump_file,
++			     dump_flags | TDF_VOPS);
++      FC_DUMP_MSG ("Start to rewrite: %s\n", srfn->node->name ());
++      fprintf (dump_file, "\n\n");
++    }
++
++  srfn->create_new_decls ();
++
++  /* Rewrite each related stmts in the current path.  */
++  for (unsigned i = 0; i < srfn->fc_path.reach_bbs.length (); i++)
++    rewrite_block (srfn->fc_path.cloned_bbs[i]);
++}
++
++void
++ipa_struct_reorg::rewrite_whole_func (srfunction *srfn)
++{
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      FC_DUMP_MSG ("Before rewrite: %s\n", srfn->node->name ());
++      dump_function_to_file (current_function_decl, dump_file,
++			     dump_flags | TDF_VOPS);
++      FC_DUMP_MSG ("Start to rewrite: %s\n", srfn->node->name ());
++    }
++
++  create_new_args (srfn->node);
++  srfn->create_new_decls ();
++
++  basic_block bb = NULL;
++  FOR_EACH_BB_FN (bb, cfun)
++    rewrite_block (bb);
++}
++
++void
++ipa_struct_reorg::clean_func_after_rewrite (srfunction *srfn)
++{
++  if (!srfn->partial_clone_p ())
++    for (auto *srd : srfn->args)
++      release_srdecl_ssa_name (srd);
++
++  for (auto *srd : srfn->decls)
++    release_srdecl_ssa_name (srd);
++
++  {
++    SET_DUMP_FILE (NULL, TDF_NONE);
++    update_ssa (TODO_update_ssa_only_virtuals);
++
++    unsigned i;
++    tree ssa_name;
++    FOR_EACH_SSA_NAME (i, ssa_name, cfun)
++      {
++	if (SSA_NAME_IN_FREE_LIST (ssa_name))
++	  continue;
++
++	gimple *stmt = SSA_NAME_DEF_STMT (ssa_name);
++
++	if (!stmt || (!SSA_NAME_IS_DEFAULT_DEF (ssa_name)
++		      && !gimple_bb (stmt)))
++	  release_ssa_name (ssa_name);
++      }
++
++    if (flag_tree_pta)
++      compute_may_aliases ();
++
++    remove_unused_locals ();
++    cgraph_edge::rebuild_edges ();
++    free_dominance_info (CDI_DOMINATORS);
++  }
++
++  if (dump_file)
++    {
++      FC_DUMP_MSG ("After rewrite: %s\n", srfn->node->name ());
++      dump_function_to_file (current_function_decl, dump_file,
++			     dump_flags | TDF_VOPS);
++      fprintf (dump_file, "\n\n");
++    }
++}
++
++void
++ipa_struct_reorg::dynamic_fc_rewrite_assign (gimple *stmt, tree rhs,
++					     tree &newlhs, tree &newrhs)
++{
++  fc_closure *closure = cur_srfd->get_closure ();
++  if (closure->write_change_p (stmt))
++    {
++      /* For a write stmt _0->fld = rhs, should only rewrite lhs.  */
++      gcc_assert (newrhs == NULL_TREE);
++      tree compress_fn = cur_srfd->fc_f->cond->compress_fn;
++      if (compress_fn)
++	newrhs = closure->convert_rhs (rhs, compress_fn);
++    }
++  else if (closure->read_change_p (stmt))
++    {
++      /* For a read stmt lhs = _0->fld, should only rewrite rhs.  */
++      gcc_assert (newlhs == NULL_TREE);
++      tree decompress_fn = cur_srfd->fc_f->cond->decompress_fn;
++      if (decompress_fn)
++	newrhs = closure->convert_rhs (newrhs, decompress_fn);
++    }
++  else if (!closure->unchange_p (stmt))
++    gcc_unreachable ();
++}
++
++/* Add code for dynamic checking and data compressing.  */
++
++void
++ipa_struct_reorg::add_dynamic_checking (fc_type_info *info)
++{
++  basic_block bb = gimple_bb (info->fclose_stmt);
++  gcc_assert (single_succ_p (bb));
++
++  SET_CFUN (info->start_srfn);
++
++  insert_code_calc_dfc_path (info);
++  insert_code_compress_data (info, single_succ_edge (bb));
++
++  SET_DUMP_FILE (NULL, TDF_NONE);
++  cgraph_edge::rebuild_edges ();
++  update_ssa (TODO_update_ssa_only_virtuals);
++}
++
++/* Insert dynamic checking code to calculate info->dfc_path.  */
++
++void
++ipa_struct_reorg::insert_code_calc_dfc_path (fc_type_info *info)
++{
++  insert_code_calc_max_min_val (info);
++  gimple_stmt_iterator gsi = gsi_for_stmt (info->fclose_stmt);
++  tree dfc_path = insert_code_calc_cond (info, &gsi);
++  insert_code_check_init_const (info, &gsi, dfc_path);
++
++  /* Store dfc_path to global var.  */
++  gimple *dfc_path_stmt = gimple_build_assign (info->dfc_path, dfc_path);
++  gsi_insert_after (&gsi, dfc_path_stmt, GSI_NEW_STMT);
++
++  basic_block bb = gimple_bb (info->fclose_stmt);
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      fprintf (dump_file, "\n");
++      FC_DUMP_MSG ("Insert code to calculate dfc.path\n");
++      dump_bb (dump_file, bb, 0, TDF_DETAILS);
++    }
++}
++
++/* Insert code to calculate min and max after input_ssa inside the loop.  */
++
++void
++ipa_struct_reorg::insert_code_calc_max_min_val (fc_type_info *info)
++{
++  basic_block bb = gimple_bb (info->input_stmt);
++  class loop *loop = bb->loop_father;
++  edge latch_edge = loop_latch_edge (loop);
++  for (unsigned i = 0; i < info->fc_conds.length (); i++)
++    {
++      fc_cond *cond = info->fc_conds[i];
++      /* Use the old type for min and max value, as they will be used to
++	 compare with the input ssa, which is with old type.  */
++      tree ssa_type = TREE_TYPE (cond->fields[0]->input_ssa);
++      char *min_name = append_suffix ("dfc.min_cond.", i);
++      char *max_name = append_suffix ("dfc.max_cond.", i);
++      cond->min_val = make_temp_ssa_name (ssa_type, NULL, min_name);
++      cond->max_val = make_temp_ssa_name (ssa_type, NULL, max_name);
++
++      /* Insert phi for min and max in loop header.  */
++      gphi *min_phi = create_phi_node (cond->min_val, loop->header);
++      gphi *max_phi = create_phi_node (cond->max_val, loop->header);
++
++      /* For the input_ssa of each fc fields, we calculate min and max.
++	 Assume all of the fc_fields have been sorted in terms of the
++	 position of input_ssa.  We should always access an input_ssa in
++	 forward direction.  This way, all fields' input will be used to
++	 update min_val and max_val in order.  */
++      tree min_val = cond->min_val;
++      tree max_val = cond->max_val;
++      hash_set input_ssa;
++      for (auto *fc_f : cond->fields)
++	{
++	  /* We handle the same input_ssa only once.  */
++	  if (input_ssa.contains (fc_f->input_ssa))
++	    continue;
++
++	  input_ssa.add (fc_f->input_ssa);
++	  gcc_assert (TREE_TYPE (fc_f->input_ssa) == ssa_type);
++
++	  /* Insert new stmt immediately after input_ssa.  */
++	  gimple *def_stmt = SSA_NAME_DEF_STMT (fc_f->input_ssa);
++	  gimple_stmt_iterator input_gsi = gsi_for_stmt (def_stmt);
++	  bb = gimple_bb (def_stmt);
++
++	  /* min = (input < min) ? input : min_phi */
++	  tree min_cmp = fold_build2 (LT_EXPR, boolean_type_node,
++				      fc_f->input_ssa, min_val);
++	  tree input_min_rhs = build_cond_expr (min_cmp, fc_f->input_ssa,
++						min_val);
++	  min_val = make_temp_ssa_name (ssa_type, NULL, min_name);
++	  gimple *min_stmt = gimple_build_assign (min_val, input_min_rhs);
++	  gsi_insert_after (&input_gsi, min_stmt, GSI_NEW_STMT);
++
++	  /* max = (input < max) ? input : max_phi */
++	  tree max_cmp = fold_build2 (GT_EXPR, boolean_type_node,
++				      fc_f->input_ssa, max_val);
++	  tree input_max_rhs = build_cond_expr (max_cmp, fc_f->input_ssa,
++						max_val);
++	  max_val = make_temp_ssa_name (ssa_type, NULL, max_name);
++	  gimple *max_stmt = gimple_build_assign (max_val, input_max_rhs);
++	  gsi_insert_after (&input_gsi, max_stmt, GSI_NEW_STMT);
++	}
++      free (min_name);
++      free (max_name);
++
++      /* Add input_min_rhs and input_max_rhs phis.  */
++      add_phi_arg (min_phi, min_val, latch_edge, UNKNOWN_LOCATION);
++      add_phi_arg (max_phi, max_val, latch_edge, UNKNOWN_LOCATION);
++      edge entry_edge = NULL;
++      edge_iterator ei;
++      FOR_EACH_EDGE (entry_edge, ei, loop->header->preds)
++	{
++	  if (entry_edge == latch_edge)
++	    continue;
++	  add_phi_arg (min_phi, build_zero_cst (ssa_type), entry_edge,
++		       UNKNOWN_LOCATION);
++	  add_phi_arg (max_phi, build_zero_cst (ssa_type), entry_edge,
++		       UNKNOWN_LOCATION);
++	}
++    }
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      FC_DUMP_MSG ("Insert min/max calculation\n");
++      dump_bb (dump_file, loop->header, 0, TDF_DETAILS);
++      dump_bb (dump_file, bb, 0, TDF_DETAILS);
++    }
++}
++
++/* Insert code to calculate fc_cond after fclose.  */
++
++tree
++ipa_struct_reorg::insert_code_calc_cond (fc_type_info *info,
++					 gimple_stmt_iterator *gsi)
++{
++  tree dfc_path = boolean_true_node;
++  for (auto *cond : info->fc_conds)
++    {
++      /* min >= low_bound */
++      tree cmp_min = fold_build2 (GE_EXPR, boolean_type_node,
++				  cond->min_val, cond->low_bound);
++
++      /* max <= high_bound */
++      tree cmp_max = fold_build2 (LE_EXPR, boolean_type_node,
++				  cond->max_val, cond->high_bound);
++
++      /* ret = ((min >= low_bound) && (max <= high_bound)) */
++      tree cmp_ret = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
++				  cmp_min, cmp_max);
++
++      /* dfc.path.tmp = dfc.path.tmp && ret */
++      tree tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, dfc_path,
++			      cmp_ret);
++      dfc_path = force_gimple_operand_gsi (gsi, tmp, true, NULL, false,
++					   GSI_CONTINUE_LINKING);
++    }
++
++  return dfc_path;
++}
++
++/* Insert code to check init_const for dynamic shadow fields.  */
++
++void
++ipa_struct_reorg::insert_code_check_init_const (fc_type_info *info,
++						gimple_stmt_iterator *gsi,
++						tree &dfc_path)
++{
++  basic_block bb = gimple_bb (info->input_stmt);
++  class loop *loop = bb->loop_father;
++  edge latch_edge = loop_latch_edge (loop);
++
++  for (auto *fc_f : info->dynamic_shadow_fields)
++    {
++      gcc_assert (fc_f->init_const);
++
++      /* Skip a init_const that is in special_values, because the boundary
++	 check for fc_cond should have cover that.  */
++      tree init_const = fc_f->init_const;
++      if (fc_f->input_field->cond->special_values.contains (
++	    tree_to_uhwi (init_const)))
++	continue;
++
++      tree shadow_valid = make_temp_ssa_name (boolean_type_node, NULL,
++					      "dfc.shadow_valid");
++      gphi *shadow_valid_phi = create_phi_node (shadow_valid, loop->header);
++
++      auto input_gsi = gsi_for_stmt (SSA_NAME_DEF_STMT (fc_f->input_ssa));
++      /* input != init_const */
++      tree ne_ret = fold_build2 (NE_EXPR, boolean_type_node, fc_f->input_ssa,
++			      init_const);
++      tree ne_tmp = force_gimple_operand_gsi (&input_gsi, ne_ret, true, NULL,
++					      false, GSI_CONTINUE_LINKING);
++
++      /* shadow_valid = shadow_valid && (input != init_const) */
++      tree and_ret = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
++				  shadow_valid, ne_tmp);
++      tree and_tmp = force_gimple_operand_gsi (&input_gsi, and_ret, true,
++					       NULL, false,
++					       GSI_CONTINUE_LINKING);
++
++      /* Insert phi for shadow_valid in loop header.  */
++      add_phi_arg (shadow_valid_phi, and_tmp, latch_edge, UNKNOWN_LOCATION);
++      edge entry_edge = NULL;
++      edge_iterator ei;
++      FOR_EACH_EDGE (entry_edge, ei, loop->header->preds)
++	{
++	  if (entry_edge == latch_edge)
++	    continue;
++	  add_phi_arg (shadow_valid_phi, boolean_true_node, entry_edge,
++		       UNKNOWN_LOCATION);
++	}
++
++      /* dfc.path.tmp = dfc.path.tmp && shadow_valid */
++      tree tmp = fold_build2 (TRUTH_AND_EXPR, boolean_type_node, dfc_path,
++			      shadow_valid);
++      dfc_path = force_gimple_operand_gsi (gsi, tmp, true, NULL, false,
++					   GSI_CONTINUE_LINKING);
++    }
++}
++
++/* Split edge E and insert code to compress data.  */
++
++void
++ipa_struct_reorg::insert_code_compress_data (fc_type_info *info, edge e)
++{
++  if (!dom_info_available_p (CDI_DOMINATORS))
++    calculate_dominance_info (CDI_DOMINATORS);
++  if (!loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS))
++    loop_optimizer_init (LOOPS_HAVE_PREHEADERS);
++  record_loop_exits ();
++
++  auto_vec array_names;
++  auto_vec size_names;
++  basic_block bb = e->src;
++  gimple_stmt_iterator gsi = gsi_last_bb (bb);
++
++  /* Generate SSA_NAMEs for fc_array, if they are global addresses.  */
++  for (auto *array : info->fc_arrays)
++    {
++      array_names.safe_push (generate_ssa_name (array->var, &gsi));
++      size_names.safe_push (generate_ssa_name (array->size, &gsi));
++    }
++
++  insert_code_compress_variant (info, bb, array_names, size_names);
++}
++
++/* Insert code tot compress hot data for a fc_variant.  */
++
++void
++ipa_struct_reorg::insert_code_compress_variant (
++  fc_type_info *info, basic_block bb, const auto_vec &array_names,
++  const auto_vec &size_names)
++{
++  edge true_e = NULL;
++  edge false_e = NULL;
++  edge entry_e = single_succ_edge (bb);
++  create_empty_if_region_on_edge (entry_e, info->dfc_path);
++  extract_true_false_edges_from_block (entry_e->dest, &true_e, &false_e);
++
++  /* Create function decl and node for compressed single object.  */
++  create_compress_object_fn (info);
++
++  edge current_e = true_e;
++  insert_code_compress_array (info, current_e, array_names, size_names);
++  insert_code_modify_refs (info, current_e);
++}
++
++/* For each variable(array) to compress, insert loop like:
++      :
++       _1 = ARRAY_BASE;
++       _2 = ARRAY_SIZE;
++       goto ;
++
++      :
++       # i_1 = PHI <0, i_2(THEN)>
++       if (i_1 < _2)
++	 goto ;
++       else
++	 goto ;
++
++      :
++       dfc.compress_obj (_1, i_1);
++       i_2 = i_1 + 1;
++
++      :
++   */
++
++void
++ipa_struct_reorg::insert_code_compress_array (
++  fc_type_info *info, edge &e, const auto_vec &data_names,
++  const auto_vec &size_names)
++{
++  loop_p outer_loop = gimple_bb (info->fclose_stmt)->loop_father;
++  for (size_t i = 0; i < size_names.length (); ++i)
++    {
++      tree iv_before, iv_after;
++      tree size_type = TREE_TYPE (size_names[i]);
++      tree iv = create_tmp_reg (size_type, "dfc.compress_idx");
++      loop_p loop
++	= create_empty_loop_on_edge (e, build_zero_cst (size_type),
++				     build_int_cst (size_type, 1),
++				     size_names[i], iv, &iv_before,
++				     &iv_after, outer_loop);
++
++      /* Build call statement to compress a single object.  */
++      basic_block latch_bb = loop->latch;
++      auto gsi = gsi_last_bb (latch_bb);
++      tree fndecl = info->variant->compress_object_fn;
++      gcall *call = gimple_build_call (fndecl, 2, data_names[i], iv_before);
++      gsi_insert_after (&gsi, call, GSI_NEW_STMT);
++      cgraph_node *node = cgraph_node::get (current_function_decl);
++      cgraph_node *new_node = cgraph_node::get (fndecl);
++      node->create_edge (new_node, call, latch_bb->count);
++
++      e = single_exit (loop);
++    }
++}
++
++/* Insert code to modify all fc_refs.  */
++
++void
++ipa_struct_reorg::insert_code_modify_refs (fc_type_info *info, edge current_e)
++{
++  fc_variant *variant = info->variant;
++  loop_p outer_loop = gimple_bb (info->fclose_stmt)->loop_father;
++  for (auto *dr : info->fc_refs)
++    {
++      if (!dr->size)
++	{
++	  /* 1) fc_ref is a single ptr.  */
++	  current_e = insert_code_modify_single_ref (
++			current_e, dr->var, dr->source,
++			TYPE_SIZE_UNIT (info->type->type),
++			TYPE_SIZE_UNIT (variant->new_type));
++	  continue;
++	}
++
++      /* 2) fc_ref is an array, create a loop.  */
++      tree iv_before, iv_after;
++      tree ptr_type = dr->orig_type ? dr->orig_type : TREE_TYPE (dr->var);
++      tree size_type = TREE_TYPE (dr->size);
++      loop_p loop = create_empty_loop_on_edge (
++		      current_e, build_zero_cst (size_type),
++		      build_int_cst (size_type, 1), dr->size,
++		      create_tmp_reg (size_type, NULL), &iv_before,
++		      &iv_after, outer_loop);
++      /* Fetch array element.  */
++      auto gsi = gsi_last_bb (loop->latch);
++      tree var1 = make_ssa_name (ptr_type);
++      gsi_insert_after (&gsi, gimple_build_assign (var1, dr->var),
++			GSI_NEW_STMT);
++      tree var_mul = make_ssa_name (long_unsigned_type_node);
++      APPEND_GASSIGN_2 (gsi, var_mul, MULT_EXPR, iv_before,
++			TYPE_SIZE_UNIT (TREE_TYPE (ptr_type)));
++      tree var_plus = make_ssa_name (ptr_type);
++      APPEND_GASSIGN_2 (gsi, var_plus, POINTER_PLUS_EXPR, var1, var_mul);
++      tree ref_expr = build2 (MEM_REF, TREE_TYPE (ptr_type), var_plus,
++			      build_int_cst (ptr_type, 0));
++      if (dr->field)
++	ref_expr = build3 (COMPONENT_REF, TREE_TYPE (dr->field), ref_expr,
++			   dr->field, NULL_TREE);
++      /* Modify the ref's value.  */
++      insert_code_modify_single_ref (single_succ_edge (loop->latch),
++				     ref_expr, dr->source,
++				     TYPE_SIZE_UNIT (info->type->type),
++				     TYPE_SIZE_UNIT (variant->new_type));
++      current_e = single_exit (loop);
++    }
++}
++
++/* Create function to compress a single object.  Return function decl.  */
++
++void
++ipa_struct_reorg::create_compress_object_fn (fc_type_info *info)
++{
++  /* Function declairation.  */
++  tree orig_struct_type = info->type->type;
++  tree orig_struct_size = TYPE_SIZE_UNIT (orig_struct_type);
++  tree orig_ptr_type = build_pointer_type (orig_struct_type);
++  tree size_type = TREE_TYPE (orig_struct_size);
++  tree arg_types[2] = {orig_ptr_type, size_type};
++  char fn_name[32];
++  sprintf (fn_name, "%s", "dfc.compress_obj");
++  tree fndecl = create_new_fn_decl (fn_name, 2, arg_types, void_type_node);
++
++  /* Function arguments.  */
++  tree struct_array = DECL_ARGUMENTS (fndecl);
++  tree idx = TREE_CHAIN (struct_array);
++
++  /* Push NULL cfun.  */
++  push_cfun (NULL);
++  basic_block bb = init_lowered_empty_function (
++		     fndecl, true, profile_count::uninitialized ());
++
++  /* Function body.  */
++  /* Use a temporary struct to avoid overlapping.  */
++  tree tmp_obj = create_tmp_var (orig_struct_type, "tmp");
++  /* tmp = start[i];
++     =>
++     idx_1 = (long unsigned int) idx;
++     _2 = idx_1 * sizeof (orig_struct);
++     _3 = start + _2;
++     tmp = *_3;
++    */
++  gimple_stmt_iterator gsi = gsi_last_bb (bb);
++  tree offset = make_ssa_name (long_unsigned_type_node);
++  APPEND_GASSIGN_2 (gsi, offset, MULT_EXPR, idx, orig_struct_size);
++  tree address = make_ssa_name (orig_ptr_type);
++  APPEND_GASSIGN_2 (gsi, address, POINTER_PLUS_EXPR, struct_array, offset);
++  tree rhs = build2 (MEM_REF, orig_struct_type, address,
++		     build_int_cst (orig_ptr_type, 0));
++  APPEND_GASSIGN_1 (gsi, tmp_obj, MEM_REF, rhs);
++
++  /* Init: new_struct* ptr = start + idx_1 * sizeof (new_struct) */
++  fc_variant *variant = info->variant;
++  tree new_type = variant->new_type;
++  tree new_ptr_type = build_pointer_type (new_type);
++  tree new_ptr = create_tmp_var (new_ptr_type, "ptr");
++  offset = make_ssa_name (long_unsigned_type_node);
++  APPEND_GASSIGN_2 (gsi, offset, MULT_EXPR, idx, TYPE_SIZE_UNIT (new_type));
++  APPEND_GASSIGN_2 (gsi, new_ptr, POINTER_PLUS_EXPR, struct_array, offset);
++  tree ref = build2 (MEM_REF, new_type, new_ptr,
++		     build_int_cst (new_ptr_type, 0));
++
++  /* Compress and assign the fields.  */
++  for (auto *field : info->type->fields)
++    {
++      /* Skip shadow fields.  */
++      if (field->fc_f && field->fc_f->original)
++	continue;
++
++      tree old_field = field->fielddecl;
++      tree old_field_type = field->fieldtype;
++      tree new_field = field->newfield[0] ? field->newfield[0] : old_field;
++      tree new_field_type = TREE_TYPE (new_field);
++
++      tree var = make_ssa_name (old_field_type);
++      tree rhs = build3 (COMPONENT_REF, old_field_type, tmp_obj, old_field,
++			 NULL_TREE);
++      APPEND_GASSIGN_1 (gsi, var, COMPONENT_REF, rhs);
++      if (new_field_type != old_field_type)
++	{
++	  fc_cond *cond = field->fc_f->cond;
++	  if (cond && cond->compress_fn)
++	    {
++	      /* Need compressing.  */
++	      /* As we may have bitfield, so cond->new_type and new_type
++		 can be different.  */
++	      tree compressed_var = make_ssa_name (cond->new_type);
++	      gcall *stmt = gimple_build_call (cond->compress_fn, 1, var);
++	      gimple_call_set_lhs (stmt, compressed_var);
++	      gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
++	      var = compressed_var;
++	    }
++	  tree converted_var = make_ssa_name (new_field_type);
++	  APPEND_GASSIGN_1 (gsi, converted_var, NOP_EXPR, var);
++	  var = converted_var;
++	}
++      tree lhs = build3 (COMPONENT_REF, new_field_type, ref, new_field,
++			 NULL_TREE);
++      APPEND_GASSIGN_1 (gsi, lhs, MEM_REF, var);
++    }
++
++  /* Clobber and return.  */
++  tree clobber = build_clobber (orig_struct_type);
++  gimple *stmt = gimple_build_assign (tmp_obj, clobber);
++  gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
++  stmt = gimple_build_return (NULL);
++  gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
++
++  {
++    SET_DUMP_FILE (NULL, TDF_NONE);
++    update_ssa (TODO_update_ssa);
++  }
++
++  cgraph_node::create (fndecl);
++  cgraph_node::add_new_function (fndecl, true);
++  cgraph_edge::rebuild_edges ();
++
++  if (dump_file && (dump_flags & TDF_DETAILS))
++    {
++      FC_DUMP_MSG ("Create compress object function:\n");
++      dump_function_to_file (cfun->decl, dump_file, dump_flags);
++    }
++  pop_cfun ();
++
++  info->variant->compress_object_fn = fndecl;
++}
++
++/* Split edge E and insert codes to modify a single fc_ref expression.
++   Return the exit edge of created codes.  */
++
++edge
++ipa_struct_reorg::insert_code_modify_single_ref (edge e, tree ref,
++						 fc_array *array,
++						 tree orig_size,
++						 tree new_size)
++{
++  /* For each fc_ref, create code like:
++     if (REF)
++	REF = (long) ARRAY + ((long) REF - (long) ARRAY)
++			     / sizeof(old_type) * sizeof(new_type);
++   */
++
++  /* 1) Create ssa_name for fc_ref.  */
++  tree ref_ssa_name = create_tmp_reg (TREE_TYPE (ref));
++  gimple *stmt = gimple_build_assign (ref_ssa_name, unshare_expr (ref));
++  basic_block bb = split_edge (e);
++  gimple_stmt_iterator gsi = gsi_last_bb (bb);
++  gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
++
++  /* 2) Create the if-else structure.  */
++  tree cmp = build2 (EQ_EXPR, boolean_type_node, ref_ssa_name,
++		     null_pointer_node);
++  edge exit_e = create_empty_if_region_on_edge (single_succ_edge (bb), cmp);
++  edge true_e = NULL;
++  edge false_e = NULL;
++  extract_true_false_edges_from_block (single_succ (bb), &true_e, &false_e);
++  gsi = gsi_last_bb (false_e->dest);
++
++  /* 3) Create conversion codes.  */
++  tree ssa_def = array->ssa_def;
++  tree sub_var = make_ssa_name (ptrdiff_type_node);
++  APPEND_GASSIGN_2 (gsi, sub_var, POINTER_DIFF_EXPR, ref_ssa_name, ssa_def);
++  tree div_var = make_ssa_name (ptrdiff_type_node);
++  APPEND_GASSIGN_2 (gsi, div_var, TRUNC_DIV_EXPR, sub_var,
++		    fold_convert (ptrdiff_type_node, orig_size));
++  tree mul_var = make_ssa_name (ptrdiff_type_node);
++  APPEND_GASSIGN_2 (gsi, mul_var, MULT_EXPR, div_var,
++		    fold_convert (ptrdiff_type_node, new_size));
++  tree mul_var2 = make_ssa_name (size_type_node);
++  APPEND_GASSIGN_1 (gsi, mul_var2, NOP_EXPR, mul_var);
++  tree add_var = make_ssa_name (TREE_TYPE (ssa_def));
++  APPEND_GASSIGN_2 (gsi, add_var, POINTER_PLUS_EXPR, ssa_def, mul_var2);
++
++  /* 4) Store.  */
++  gsi_insert_after (&gsi, gimple_build_assign (unshare_expr (ref), add_var),
++		    GSI_NEW_STMT);
++  return exit_e;
++}
++
++/* Init pointer size from parameter param_pointer_compression_size.  */
++
++static void
++init_pointer_size_for_pointer_compression (void)
++{
++  switch (param_pointer_compression_size)
++    {
++      case 8:
++      // FALLTHRU
++      case 16:
++      // FALLTHRU
++      case 32: compressed_size = param_pointer_compression_size; break;
++      default:
++	error ("Invalid pointer compression size, using the following param: "
++	       "\"--param compressed-pointer-size=[8,16,32]\"");
++    }
++}
++
++unsigned int
++ipa_struct_reorg::execute (unsigned int opt)
++{
++  unsigned int ret = 0;
++
++  if (dump_file)
++    fprintf (dump_file, "\n\n====== ipa_struct_reorg level %d ======\n\n",
++	     opt);
++
++  if (opt != COMPLETE_STRUCT_RELAYOUT)
++    {
++      current_layout_opt_level = opt;
++      /* If there is a top-level inline-asm,
++	 the pass immediately returns.  */
++      if (symtab->first_asm_symbol ())
++	return 0;
++      record_accesses ();
++      prune_escaped_types ();
++      if (current_layout_opt_level == STRUCT_SPLIT)
++	analyze_types ();
++
++      if (opt >= POINTER_COMPRESSION_SAFE)
++	check_and_prune_struct_for_pointer_compression ();
++      if (opt >= SEMI_RELAYOUT)
++	check_and_prune_struct_for_semi_relayout ();
++      /* Avoid doing static field compression in STRUCT_SPLIT.  */
++      if (opt >= STRUCT_REORDER_FIELDS
++	  && current_fc_level == fc_level::STATIC)
++	check_and_prune_struct_for_field_compression ();
++      ret = rewrite_functions ();
++    }
++  else
++    {
++      if (dump_file)
++	fprintf (dump_file, "\n\nTry Complete Struct Relayout:\n");
++      current_layout_opt_level = COMPLETE_STRUCT_RELAYOUT;
++      if (symtab->first_asm_symbol ())
++	return 0;
++      record_accesses ();
++      prune_escaped_types ();
++
++      ret = execute_struct_relayout ();
++    }
++
++  return ret;
++}
++
++const pass_data pass_data_ipa_struct_reorg =
++{
++  SIMPLE_IPA_PASS, // type
++  "struct_reorg",  // name
++  OPTGROUP_NONE,   // optinfo_flags
++  TV_IPA_STRUCT_REORG, // tv_id
++  0, // properties_required
++  0, // properties_provided
++  0, // properties_destroyed
++  0, // todo_flags_start
++  0, // todo_flags_finish
++};
++
++class pass_ipa_struct_reorg : public simple_ipa_opt_pass
++{
++public:
++  pass_ipa_struct_reorg (gcc::context *ctxt)
++    : simple_ipa_opt_pass (pass_data_ipa_struct_reorg, ctxt)
++  {}
++
++  /* opt_pass methods: */
++  virtual bool gate (function *);
++  virtual unsigned int execute (function *)
++  {
++    unsigned int ret = 0;
++    unsigned int ret_reorg = 0;
++    unsigned int level = 0;
++    switch (struct_layout_optimize_level)
++      {
++	case 6: level |= SEMI_RELAYOUT;
++	// FALLTHRU
++	case 5: level |= POINTER_COMPRESSION_UNSAFE;
++	// FALLTHRU
++	case 4: level |= POINTER_COMPRESSION_SAFE;
++	// FALLTHRU
++	case 3: level |= DEAD_FIELD_ELIMINATION;
++	// FALLTHRU
++	case 2: level |= STRUCT_REORDER_FIELDS;
++	// FALLTHRU
++	case 1:
++	  level |= COMPLETE_STRUCT_RELAYOUT;
++	  level |= STRUCT_SPLIT;
++	  break;
++	case 0: break;
++	default: gcc_unreachable ();
++      }
++
++    if (level & POINTER_COMPRESSION_SAFE)
++      init_pointer_size_for_pointer_compression ();
++
++    if (level & SEMI_RELAYOUT)
++      {
++	semi_relayout_align = semi_relayout_level;
++	relayout_part_size = 1 << semi_relayout_level;
++      }
++
++    current_fc_level = fc_level::NONE;
++    if (flag_ipa_struct_sfc)
++      current_fc_level = fc_level::STATIC;
++
++    /* Preserved for backward compatibility, reorder fields needs run before
+        struct split and complete struct relayout.  */
+     if (flag_ipa_reorder_fields && level < STRUCT_REORDER_FIELDS)
+       ret = ipa_struct_reorg ().execute (STRUCT_REORDER_FIELDS);
+@@ -10159,6 +13783,9 @@ public:
+     if (level >= STRUCT_REORDER_FIELDS)
+       ret = ipa_struct_reorg ().execute (level);
+ 
++    /* Reset current_fc_level before struct_split and csr.  */
++    current_fc_level = fc_level::NONE;
++
+     if (ret & TODO_remove_functions)
+       symtab->remove_unreachable_nodes (dump_file);
+ 
+@@ -10172,6 +13799,13 @@ public:
+ 	if (!ret_reorg)
+ 	  ret_reorg = ipa_struct_reorg ().execute (COMPLETE_STRUCT_RELAYOUT);
+       }
++
++    if (ret && flag_ipa_struct_dfc)
++      {
++	current_fc_level = fc_level::DYNAMIC;
++	ret = ipa_struct_reorg ().execute_dynamic_field_compression ();
++      }
++
+     return ret | ret_reorg;
+   }
+ 
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.h b/gcc/ipa-struct-reorg/ipa-struct-reorg.h
+index 2ab6444d6..c7c6b7433 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.h
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.h
+@@ -59,14 +59,56 @@ const char *escape_type_string[escape_max_escape - 1] =
+ #include "escapes.def"
+ };
+ 
++enum class check_ref_result
++{
++  NEW,
++  DUPLICATIVE,
++  ERROR,
++};
++
+ struct srfield;
+ struct srtype;
+ struct sraccess;
+ struct srdecl;
+ struct srfunction;
++class fc_closure;
+ class fc_type_info;
+ class fc_field;
+ class fc_field_class;
++class fc_cond;
++
++class fc_path_info
++{
++public:
++  enum direction
++  {
++    PRED,
++    SUCC
++  };
++
++public:
++  /* The start stmt to clone blocks.  If it is NULL, the whole function is
++     cloned (i.e. versioning).  */
++  gimple *start_stmt = NULL;
++
++  /* Blocks reachable from the start_stmt.  */
++  auto_vec reach_bbs;
++
++  /* Blocks that can reach the start_stmt.  */
++  auto_vec pre_bbs;
++
++  /* Cloned basic blocks of reach_bbs.  */
++  auto_vec cloned_bbs;
++
++  /* Cloned whole function versions.  */
++  srfunction *cloned_func = NULL;
++
++  fc_path_info ()
++  {}
++  ~fc_path_info ();
++
++  bool collect_blocks (gimple *, direction);
++};
+ 
+ struct srfunction
+ {
+@@ -82,6 +124,8 @@ struct srfunction
+ 
+   bool is_safe_func;
+ 
++  fc_path_info fc_path;
++
+   // Constructors
+   srfunction (cgraph_node *n);
+ 
+@@ -93,6 +137,9 @@ struct srfunction
+   bool check_args (void);
+   void create_new_decls (void);
+   srdecl *find_decl (tree);
++
++  bool partial_clone_p ();
++  bool entry_function_p ();
+ };
+ 
+ struct srglobal : private srfunction
+@@ -155,6 +202,7 @@ public:
+   void add_field_site (srfield *);
+ 
+   srfield *find_field (unsigned HOST_WIDE_INT offset);
++  srfield *find_field_by_decl (tree);
+ 
+   bool create_new_type (void);
+   void analyze (void);
+@@ -164,10 +212,8 @@ public:
+   unsigned calculate_bucket_size ();
+   bool has_recursive_field_type ();
+   void check_fc_fields ();
+-  bool has_escaped (void)
+-  {
+-    return escapes != does_not_escape;
+-  }
++  bool reorg_name_p ();
++  bool has_escaped (void);
+   const char *escape_reason (void)
+   {
+     if (!has_escaped ())
+@@ -203,7 +249,7 @@ struct srfield
+   tree newfield[max_split];
+   unsigned field_access; /* FIELD_DECL -> bitflag (use for dfe).  */
+ 
+-  fc_field *static_fc_field;
++  fc_field *fc_f;
+   fc_field_class *field_class;
+ 
+   // Constructors
+@@ -222,6 +268,8 @@ struct srfield
+ 				  tree newfields[max_split],
+ 				  tree newlast[max_split]);
+   bool dead_field_p ();
++  bool dfc_type_change_p ();
++  fc_closure *get_closure ();
+ };
+ 
+ struct sraccess
+@@ -243,9 +291,10 @@ struct sraccess
+ 
+   // Methods
+   void dump (FILE *file) const;
+-  bool write_type_p (tree) const;
+-  bool write_field_p (tree) const;
+-  bool read_field_p (tree) const;
++  bool write_field_p (tree = NULL_TREE) const;
++  bool read_field_p (tree = NULL_TREE) const;
++  bool write_p () const;
++  bool read_p () const;
+ };
+ 
+ struct srdecl
+@@ -276,17 +325,76 @@ struct srdecl
+   }
+ };
+ 
++/* Describe stmt closure to help rewrite.  The closure could be either array
++   pointers for the same memory space, or normal data without calculation.  */
++
++class fc_closure
++{
++public:
++  /* The stmts for read/write of the fc field.  For read/write_change, we need
++     to add convert function for read and write respectively.  */
++  hash_set read_unchange_set;
++  hash_set read_change_set;
++  hash_set write_unchange_set;
++  hash_set write_change_set;
++
++  /* Record the known special rhs assigned to this fc field.  */
++  hash_map write_special_rhs;
++
++  void add_read_change (gimple *);
++  bool read_change_p (gimple *);
++  void add_read_unchange (gimple *);
++  bool read_unchange_p (gimple *);
++  void add_write_change (gimple *);
++  bool write_change_p (gimple *);
++  void add_write_unchange (gimple *);
++  bool write_unchange_p (gimple *);
++  bool change_p (gimple *);
++  bool unchange_p (gimple *);
++
++  /* Call compress/decompress function FN for RHS.  */
++  tree convert_rhs (tree, tree);
++};
++
++class closure_helper
++{
++private:
++  /* The unique id for assign stmts used in collecting closure info.  */
++  int uid;
++  fc_closure *cinfo;
++
++  auto_bitmap read_change_map;
++  auto_bitmap write_change_map;
++  auto_bitmap read_unchange_map;
++  auto_bitmap write_unchange_map;
++
++public:
++  closure_helper (fc_closure *cinfo)
++    : uid (0), cinfo (cinfo)
++  {}
++
++  void record_origin_closure (basic_block);
++  void add_cloned_closure (basic_block);
++  void reset_uid ();
++};
++
+ /* All fields belong to this class should have the same type.  */
+ 
+ class fc_field_class
+ {
+ public:
+   /* The same type for all of the fields in the class.  */
+-  tree fieldtype;
++  tree fieldtype = NULL_TREE;
+ 
+   /* The fields with the same type are in the same element of this vector.  */
+   auto_vec srfields;
+ 
++  /* Back reference to corresponding fc_cond.  */
++  fc_cond *cond = NULL;
++
++  /* Record all info related if the class is an identified closure.  */
++  fc_closure closure;
++
+   fc_field_class (tree fieldtype)
+     : fieldtype (fieldtype)
+   {}
+@@ -296,23 +404,80 @@ public:
+   int get_field_index (srfield *) const;
+ };
+ 
++/* The fc condition for a specified data type.  Multiple vars with the same
++   data type can map to the same fc_cond object.  */
++
++class fc_cond
++{
++public:
++  /* The old field data type for this condition.  */
++  tree old_type = NULL_TREE;
++
++  /* The new field data type for this condition.  */
++  tree new_type = NULL_TREE;
++
++  /* The bit width of the new_type if it is a bit field.  */
++  unsigned bits = 0;
++
++  /* The type class to which all of fc_fields in this condition belongs.  */
++  fc_field_class *field_class = NULL;
++
++  /* May have multiple fields mapping to this condition, as they have the
++     same data type.  */
++  auto_vec fields;
++
++  /* The condition variable we want to check.  */
++  tree cond_var = NULL_TREE;
++
++  /* The vars to hold the min and max input.  */
++  tree min_val = NULL_TREE;
++  tree max_val = NULL_TREE;
++
++  /* The constant value we need to check at run-time.  */
++  tree low_bound = NULL_TREE;
++  tree high_bound = NULL_TREE;
++
++  /* Hold all special constant values for this condition type.  */
++  auto_vec special_values;
++
++  /* Compress and decompress function decls, if there're special values.  */
++  tree compress_fn = NULL_TREE;
++  tree decompress_fn = NULL_TREE;
++
++  fc_cond (tree old_type = NULL_TREE)
++    : old_type (old_type)
++  {}
++  ~fc_cond ()
++  {}
++};
++
+ /* The field for field compression.  */
+ 
+ class fc_field
+ {
+ public:
+-  tree field;
+-  tree new_type;
++  tree field = NULL_TREE;
++  tree new_type = NULL_TREE;
+ 
+   /* This field's max value we can know at compile time.  If it is 0, it means
+      the max value cannot be determined at compile time.  */
+-  HOST_WIDE_INT max_value;
++  HOST_WIDE_INT max_value = 0;
+ 
+   /* The bit width of the field if it is not zero.  */
+-  unsigned bits;
++  unsigned bits = 0;
++
++  /* The total number of static reference count.  The bigger, the smaller
++     size for dynamic field compression.  */
++  unsigned ref_cnt = 0;
+ 
+   /* The original field of a shadow field if it is not NULL.  */
+-  srfield *original;
++  srfield *original = NULL;
++
++  /* A dynamic shadow field must have a input fc_field counter part.  */
++  fc_field *input_field = NULL;
++
++  /* Init constants of the original srfield.  */
++  tree init_const = NULL_TREE;
+ 
+   /* All assignments that need to be optimized as shadow.  */
+   auto_vec shadow_stmts;
+@@ -321,10 +486,23 @@ public:
+      stmt belongs to.  */
+   auto_vec shadow_stmts_func;
+ 
++  /* The input var that is read from a file, and assigned to this fc_field.  */
++  tree input_var = NULL_TREE;
++
++  /* The ssa for the input_var.  */
++  tree input_ssa = NULL_TREE;
++
++  /* The condition var descriptor for this field.  */
++  fc_cond *cond = NULL;
++
+   /* For static field compression.  */
+   fc_field (tree field, HOST_WIDE_INT max_value, srfield *original)
+-    : field (field), new_type (NULL_TREE), max_value (max_value),
+-      bits (0), original (original)
++    : field (field), max_value (max_value), original (original)
++  {}
++
++  /* For dynamic field compression.  */
++  fc_field (tree field, tree input_var, tree input_ssa)
++    : field (field), input_var (input_var), input_ssa (input_ssa)
+   {}
+ 
+   unsigned get_bits (void) const
+@@ -333,32 +511,138 @@ public:
+   }
+ };
+ 
++/* A hot array that needs to be cached.  */
++
++class fc_array
++{
++public:
++  /* The variable declaration that holds the data to be cached.  */
++  tree var = NULL_TREE;
++
++  /* The size expr to help data initialization.  */
++  tree size = NULL_TREE;
++
++  /* If fc_array is allocated in start-function, record the ssa_name of
++     allocated ptr, we may need this to create fc_refs.  */
++  tree ssa_def = NULL_TREE;
++
++  /* varpool_node for a global fc_array variable.  We may need this to search
++     for fc_refs.  */
++  varpool_node *vnode = NULL;
++
++  fc_array (tree var, tree size, tree ssa_def, varpool_node *vnode)
++    : var (var), size (size), ssa_def (ssa_def), vnode (vnode)
++  {}
++  ~fc_array ()
++  {}
++};
++
++/* A variable that needs to be modified according to the caching data.  */
++
++class fc_ref
++{
++public:
++  /* The variable's declaration.  */
++  tree var = NULL_TREE;
++
++  /* "real" type, for void*.  */
++  tree orig_type = NULL_TREE;
++
++  /* fc_array referred by this variable.  */
++  fc_array *source = NULL;
++
++  /* Number of elements, if this variable is an array.  */
++  tree size = NULL_TREE;
++
++  /* For array of records, this is the field to be modified.  */
++  tree field = NULL_TREE;
++
++  fc_ref (tree var, tree orig_type, fc_array *source,
++	   tree size, tree field)
++    : var (var), orig_type (orig_type), source (source),
++      size (size), field (field)
++  {}
++  ~fc_ref ()
++  {}
++
++  void dump (FILE *) const;
++};
++
++/* Variants for different dynamic checking condition combinations.  */
++class fc_variant
++{
++public:
++  /* New structure type.  */
++  tree new_type = NULL_TREE;
++
++  /* The function to compress a single object.  */
++  tree compress_object_fn = NULL_TREE;
++};
++
+ /* The class to hold field compression type information.
+    A single info object is only for one structure type.  */
+ 
+ class fc_type_info
+ {
+ public:
+-  srtype *type;
++  srtype *type = NULL;
+ 
+   /* The flag to control whether the type can do static field compression.  */
+   bool static_fc_p = false;
++  bool dynamic_fc_p = false;
+ 
+   /* Multiple fields of the data struct for static compression.  */
+   auto_delete_vec static_fc_fields;
+ 
++  /* Multiple fields of the data struct for dynamic compression.  */
++  auto_delete_vec dynamic_fc_fields;
++
++  /* Multiple fields of the data struct for dynamic shadow.  */
++  auto_delete_vec dynamic_shadow_fields;
++
++  /* The stmt that read data from file.  */
++  gimple *input_stmt = NULL;
++
++  /* The variable into which the data read from input stmt is assigned.  */
++  tree input_var = NULL_TREE;
++
++  /* The file handler of data file.  */
++  tree input_file_handler = NULL_TREE;
++
++  /* The fclose stmt of the data file.  */
++  gimple *fclose_stmt = NULL;
++
++  /* The function with start point.  */
++  srfunction *start_srfn = NULL;
++
++  /* All fc_array variables need to be compressed.  */
++  auto_delete_vec fc_arrays;
++
++  /* All variables to be modified according to compressed data.  */
++  auto_delete_vec fc_refs;
++
++  /* All indivisual fc conditions.  */
++  auto_delete_vec fc_conds;
++
++  /* The variant of data type after dfc.  Now we only support one variant.  */
++  fc_variant *variant = NULL;
++
++  /* The flag to indicate which path to run.  */
++  tree dfc_path = NULL_TREE;
++
+   /* The field classes classified by field type.  */
+   auto_delete_vec field_classes;
+ 
+   fc_type_info (srtype *type)
+     : type (type)
+   {}
+-  fc_type_info ()
+-    : type (NULL)
+-  {}
++  ~fc_type_info ();
+ 
+   fc_field_class *find_field_class_by_type (tree) const;
+   fc_field_class *record_field_class (srfield *);
++  fc_cond *find_cond (tree) const;
++  fc_cond *create_cond (tree);
++  void record_cond (fc_field *);
+ };
+ 
+ /* The structure to hold necessary information for field shadow.  */
+diff --git a/gcc/optimizer.fdata b/gcc/optimizer.fdata
+index ae0e584be..adc2f5399 100644
+--- a/gcc/optimizer.fdata
++++ b/gcc/optimizer.fdata
+@@ -1 +1 @@
+-656137356462666463346364333361623035396139323366643262383764363763323530613631653861653634666630333030316562323662346133633566313233326432366139383465346338376266393132363438333765656463366235613461313434346139333334396265306163333731646537376430643834323664623863366163343363643130313435636565623834363361316133393230363937653835653762353534626439663133633538623062353439646237616630333237666136663433386334626639643465303163653832333062643863333664336630376231643964316231663933656333386338656262303734376137313565643963396535653131303763646533393234333735613333633132353061393531333935623539643834373266303861633739373862366663376365383233326139383939363566373061373361613939336537366631353334346563313061373365663635633332663437653136383235343635623234366430373330366336363237623962656465373233346131343264313137653838643334616430346339363732613237623866636364313232613934343261643231386531356430343965303330306332326266336634626163333461643139653962326566303064343333623037313762303934626336363537616339343637306633633066333231613063623339333539376461316632653234353938616133616463623534356232346135666261616339646638373031356633306161626465643665633066616264623965656138613233353331303236363565616133323131653935643363353832366663633434626236376663386335666364333530336433353234383031636264353761616638663031613263353738326438656265623236653338323232386565626464393034633962373835363264656664616439353336623462376139333134656662373033626135336138333136643032636430653334303861616439333736306363383862306439623962646435383931613161653334623666313236366366373962356536656434396231303338646265323666386461366430396262353536313433636132653466623061346164303635636162336536383062306637306438626232646636393462353563366437386531316463383239373361643230643566333736663330656538643461313161306163666361663064383962373736636162323565383865336630333461343939336231366437386265323439626332336166376262623837353163376533353066636339313233323761613766333633343432623331373530376432376534623831333339383964633439653966303663303439623739346133626330646333373831393930316439326233646565623761646664356230336233363230383833616266643463626536336133346664656433373630343738343262373863646131373633653939636430616439393731306435313664323166313530636464393664613738643461356437333564343036316262353462313336663335313763323661363564343330623965363866616534323163616337613964356465333333663739313835623363316462346539643539636435303166306664366135313063333630336531336532346134306234626632313565333739356139613430643630613834353666623762363363326431316538313730316132333165383561666434623564623831616263316464353664323731373332656430323435663836393162646335646164333437366436663633353630373762316161316436333461393763343130396235396237333534376639343063316463386432316330663138373338386632643361386565636665653766643836373930326664346163333162613163396664663531626161326134333762666330393261316637333265326138663036363736333734326230373730613665323665333266326265356534376133373330393466336639623431383863383433643563346265613561326134616334626262646637666163376365333962323036323537333737313534313066323364623937303534623665623237306566636366623763623431373666383236383134326365613136653238623231646339353938373030656631646238653961643434653765313834316231346231323563636633356131376538636666613866303638323362643436636638353665346166323633623861616639363862613835306234643961666662656334393366613066653061383333653965343633653837613263636530633863303861313363636361346132613431363962336539613463313366633934633761363761623039396365393263633638656134376162616631363838616238613137323032663864613035313363353335396432373530363233663234343136346339663435333834656630366537353336306137366434646264303466303633663630386363636337383066316631323836353135393134646139373035323637346164303965323536666335393864636364346433383564616435383862366464326130336536313934636139386438373462333162366230623165323533336263313430386430643661386261633061313631613639313232313734383136336464636231323130336231613131373336636238656635353635626666303535663331363332353338313363396334313631333034656133666365353561643830356565633137346638643739636136376432303761633436666465336232356236653164353163346461656165333038653266336161383966633961323462376262386430363536323932373263343731313562356139336265383765336139643837333966623265386131666561366161333138353261663139303338393733346335313934363761393137633266616431663436343236613231663865636236346133616662623761373633663830623231393063616534633032316538626436633731643261313866666339353836373133363939623966636239333637373764323863663964376134323134613133346335326162363334363334386666396666336534646565616264363966356566323465346538613762623864303766646338666264643466666537303263623162326539653435643130313061386235623631306232303139633962653661303038376162323666343263356465313035393535313531373665623537363237373934366266303434333463303562646535623762313565326631393061643033613838363963333933396162363834393636613136613036656435356339616665636134313034663335343465646335333738663063616330353634316466336438373835326638633037656234396239303730613338336332613839323837643561333235393366323531303032666436616435313137313166383433396531626137353932616538333330653164326438656166343339363262366264326632376564396434396333356565343733383137666333386462313164333630303936336137323863313634613031393931613164363237643262353162376133643935373036306336346161376563383862316365613139376535626535626234666331363163303835336362393535613530636234643633626364386566396132333232373733633230393865663664636334393131643133383736373833653261383661623632343237653937366164386564633433396130313166306430316134653864643263343835653438383687144d4395a52c41e3842b41b03f3743f207264565084e41e0fbdf3bd429b13da15eb33d7e47b93b309daa3890d2d93d0000803f04a59dba191d513e33226b3acb4c80be39e785bb0bb9f63c65cdfb38ac15eebd303d01bb4419f23a55cf06bc7ff1453cfd78433b0bf6febd9013433b3b6da33db673a03b2b9491bd7ac8e03b25dbe63b684e0dbcbb0a3ebdc44800bdbcc493bd93e601bd4b6484bd0a60f4bc60dceb3d70e9debcf709e2bcfbb3e83bbdf4023e0ae182bcd48370bd6b8bb5bc431600be698b0abde7c93bbd7d1e83bcc37df5bc6bc82abc8cb32c3d068300bdd32812be02a169bc1040c7be90c1b2bb666f10becbb08fbcf74b17bc1b1602baf138b43d5224843abfb0d4bd60cdbe3a82a4993c8ae4a63a8c9fa8bdc8a86b3afcabb83a4f68c93e8af795bed2d6ff3e5dc599bec525003f902e14bf7e73e93eea321d3e3bf3d13eb012e03e90fde23e313b7bbeef3fe23ec1eb88be7e3eea3e4d0431bfff85d93eaf825c3e333dc33e4d1ad93e44b8cf3e02d697bec0bbf93e84ba9cbe99cbea3ebef11dbf493be73e1640303d18f7ce3e3e59e63e4f3c0a3f3f3e73be1d3cdb3eda1a7dbe56e3d83e8f333abf6d5fcb3e7268383e67b9b13e156ac73e78c507be519081be4deef33e857f81be2959073fb3003ebfd134eb3ee775bf3e5ce8d63e5cf3e63ead860fbef58193bec791043ffaf29cbe689e063fe7772ebf8659fc3ecac0a33e0be3df3ea97afa3ee85f82bdaf0398be529dfe3ef1a39cbee6cdfa3e616738bf1314f43e2425023e5e10d23efe12ed3e3069b8be5e9d7abe7bc2f23ee6b27fbe498d143f2bf545bf934f013f53442a3f23dfe13ec02fe13e48d3cbbd07bc62be8851013f064a90be5f340e3f2a8f42bfa2bcf63e60cdef3ec14dde3e41c9de3ebc90113f286f3cbe86e5db3ed02474bef976ec3e177231bfd8f5e43e9cd5d93ee0fdc23e3c93cf3e6035c03e176199bedfb0f43ee0f39bbe9c8fe73eded525bf8412e33e7485393d99acca3ec677e13e8a5bc83e5ac5a1be2528fb3ec1b9a2be4bd1f03ed92417bf2ae7ea3e7ebcc63c3b37d23edcfee73eb381c6bb8e48d73b5463ca3abdb1b13a9904c23b8f9948393f7ecb3aea64a0bde43c883bde8ce03afdab793cb6d945bc09d0a33c5bd255bc5c04a03c14a8243d153aaa3c3cd991bc6a70913cc9a38a3ca956d6b9ddfa903c944948bbbb53903ce48761bb6e0a0abca78a8dbb728827bd1d7904bc7ea513bc9f89cfbbdab6b23c30c117bc6dbbae3c19b1d5badb1d90bbb6d427bcd329ddbc3f8f1dbc879137bcb972d63a89bc4c3b9619283b9b4b133a8c65e13b3901103d9c7e643bbf128fbdef865d3b7f258b3b033a123be896e9baee8caa3c4efb683c7998c33c4aed8b3d50a2ad3c7aff60bd678b973c45cca63c73d7c6baf325473b210f1c3c0ec3173bba9d463cf2b3583df487003ced3709bdbb3eeb3b5210f63b41eb303b9665293c7953a73bf34d5c3cab738b3c1f0cf03cd77a813b1d6947bdbf322c3b25a7c6397325e93acb3c953a2266d33ba8ea28bbf4e2d53b3da9383d0bcfd63b2cfa20bd251aa03b2a8ab53b03529c3cbbbdc9bc0e1b053d7c27c9bc955f0c3dd4f3323e0edd023d9f9a0bbd506e003d347a073d4c951bbb7989343c8fedaf3bf6a1233c00eeaf3b63dba43cfc7e533ac83606bdb09646bab2af50ba072e9dbb2548b93c7737debbb6e0a43c5b1e02bcff8d3fbd274eacbba96c3dbd3e8ceabbf3fb13bc5714203cc98936bc98db9d3c83d268bc0148a83c9736483dc820a43c9de221bdb90e983c2fe3943c3a5fd4ba81ef3b3c751c973b23fadd3b4f8b333b37b4b53cd044e53a88a81abd4a7aa7b99dc501bad8cda93aa7c572bcfba0d63cc1da86bc4d3bd73cf09e943c827ccd3c1b8956bd3883c23c8637bf3cab889fbbb5c2d13acb99fb3b4295273cc41dfc3b65be0e3d1dddd53becc235bdf19ca43bbbdec43bd1b8b23b4ac6afba5fd6943c218509bb2bc0763c48c8f73c62c3813c565f25bd1374443c26cb633c1fda053ddebcf13cd9f829bce620e43cf20079bbe584b7bb3ade37bc40043dbd140f3fbc183f64bcb6b97fbc4640e33cbeff89bc9ecce53c627f3bbc92a8a0bde11326bc9058073c296f68bc939699bc7bed253b46d30cba6da4803b995d9aba7292253ca565a43cb7b8d03bd50711bd21df8f3b9a02803b3f98f83933cbb1bb98a95c3ca09600bc14c1753c3184703d7760523cf2f390bdb6663a3c91c9433cc137633b6be27d3c997ecab9060da63c1600cd3bafce23bdae80dbba1ccb88bc058da2bb467e21bce8aab63edd089dbe4f78fa3e1fef9fbe16a1f23ee01a18bfc853ec3ed3a3433d9ce5d03e226ce63ed83484be140466be9106fa3eb0117dbeb6c3133f0fb33bbf9787053fe44c0c3fa01cdd3e379ce73ec6904b3e0ba483be3ef5f53eb7fd87be03c1053f8d4c2dbf6257f73e6866ba3efd92cd3e0945e23e59a1323e199391be37c4023f598595be7f81f63ebe6c2ebfc3e3f33e33fa323e77f7d43ed851f13e7b92053fde4a37be0ba8cb3e5e0d53be7047da3ef3a24bbfddabd73edf0dce3e3c6aac3e8810c63e1a6f9f3ef0b686be44eae93e0b2685bec4bfe73e164c3abf6067e33ec99f4f3ec58ebf3e6252d03ec3eff43ea926a7beb70f063fd659aabed093fd3e73c6cabe1c69fb3e3f861d3d4d3ce13e0a74fa3e4c35063f755379be2f12f43e7d848ebe2060043f9d9103bfaf5af83ef9b1043f8e7ac83ef721d53eaeff133fb67d2dbead28cc3efc3668befd33ea3ee1b941bfed13dd3ea8940e3fcfb9ab3e82fbc33e38e7e33ee37590be93efe83e41ac91be6fe2df3e66f21ebfe293d83e6b91573de8eabf3e3abcd63ea5db103f5b217cbe49c9d13ef7ac80beaa60d13eeaa93ebff204cd3efe974e3eac14b23ef978c33ec984cd3ec2d8a2be4c3dfc3e149ba5bec9f1f03e00f911bf3f4deb3ee781273b690cd33e1ccfe83e7ed9ce3bf7dd993be26ca43aa8abba3b4c8a1b3b2357313d7eb9d73a64e851bdaa101e3b6a432a3a1b5457bb665899bc7993fb3c83c150bc13abec3c749d863df737e23c2f3b11bd8b92d03c1dacdb3c072077bb17e3d1ba1ad1323c254dd93a8bd12e3c5ea3213d4f9b263c158d73bd8c5d013c0cde153cbed5d0bb31ff0d3c59f8ff3a5b2df23b29da083bc4561b3d0491673be76542bc74fd963960539eba288b47ba9f2eca3aa558a73bd043abba8049c33bd32eb63ce799e73b4296ccbccda17c3bd5eec53bfddd8f3c66733bbc6a5e943c11bd53bc14dda13c7fae023dd800a23c034b55bdc354943c0c158b3c9c59a4bb82b70f3ccb7c783b7df2123c6d74be3b126bdf3c3ac9463aeeab5abd4df224bb11bdc1389b6322bb232aa8391f7c863cf42d3ebbbb0f803c049f493da7cf3f3c6c9924bd33876e3ca5165b3c972233ba2a880bbc8a30893cb66825bc1a729e3c6624933d492f883ccd7243bd00c5733cf019833c0308c93bc72d3b3b88a5c43bb947243bf435f23b3916afbbdb18df3bd7685ebde8eeba3bdf2ba03b223b07bcea544fbc0669fd3c81fe8bbc1275f73c5e02723df0b1cb3c51ae5cbd1d3fe33c8980de3c71bd48bba6c40b3dfa96ce3c08c5663d3ccf6e3cc800143d2a08933c0cd93f3b508e9d3ce864d33ccbf2adbbc124033c9a36233bf554ed3b5729123bc4b9a4bbcd83b53ac16774bd3cb33a3afa121c3a7ad068bb01c4603ca49e9b397efa6c3c0395df3b0cea40bb160a703be9dad9bcca8f593b66b085bb8d35a2bbae80203ccd53a5bb30d02a3c432e18bb9795e63c58c982bb874772bd99d64fbb118faabb5d1820bc388044baea1e3d3c7837df3b2164373c1f98673df94f183cfc0d45bdf80b223c2b146a3c8cd5d739622b99bb01a62f3ca437eabbc703323c1cc9e83c66002b3c451f67bd9e4f1e3c61a0233c797bf8bb9576c33ae97c8a3cc5ac933b991b553c41f85e3dedd8523ccc9946bd22335f3cbfec273c24fb0bbbaa53023b8c62623c3f8dd3baa4fe833cf033123d0131633c92ef79bd43a24d3c61be0c3c8f4de1b94e9ed1ba1e811f3cf46e85bb7e115a3cf3d5ae3ceca73e3c3c0c41bd57df253cdcd2213c263b3ebc345d733c81be6f3c8aadee3c8a5d393cdf08703b7361813c7d5222bdde9ba83b5465c138af778e3b2e20b83c9a0c69bcfef7be3c44b00bbc850599bcf7406cbc56d64fbd1cc44ebc73e483bc1f71883e25ab7ebe1807003fd1c286beef68043fe1ad19bfd38af93eb4a0c43ed240dc3efb15e63e487f4c3e640b7fbe4907fd3e871b81befe66173fe60e1ebf5886fd3e6022093f8c18e23ebb31003f63cfe83e27805bbe22b0e83e84c37cbeb6d1f03ee10634bfa8a4da3e3d3fbf3e372dc53e6920cf3ed1e1703e1f888fbebced023f8e4692be6bf0043ff0b121bf525ef13e6ed0763ecddfd23e197eeb3efe3d8abd61ab74be3ce8023fb0d484bead74073fca504ebfad57e73ebd250f3fa009c83ef804dc3e00864abd678da3be815f083f71d7a9bea740083f6d2e1ebff19dff3e8968d23d832ee23eeba4fb3eb51bfb3d7b298fbeacc2fa3e6b4d91bef1b7f03e642645bfe625ea3e460e0a3e602ccf3e7c86e23e7754f7bd306785be01ad033fcc7297bec0ab093f2aa034bfa508033fe0570b3f4acdd73e4e7afb3efd2fb53e5b688ebe8e41f63e8fb791be0563f13e9af428bf8858e63e54962f3e22bec83eb4c5e03e184e94bd78de98be707c073fb7f19fbe0ecb043f9c5d33bf7316fa3efcba313eb5eddd3e5283f53e2e92ff3ec37d61be00ebe43e06528cbe7949fa3e66ab29bf108de43e9b95ef3e8eb3cb3e7a2cd03e26bb71bd334794be2725063f18a097bec754053ff6d52fbfa253ff3e6f3d8a3e9d30dd3e5391f23e376e12bbd12e833ce1a93d3b3c98593cf3155d3bdc9895bbd4d0b73a64e731bdd2869b3bbf23bdbb14175fbbeca5a03c6921c7bbdcd9db3ca82787bbae172cbd01edc4bb4409c2bc296076bbc0953cbc70712d3cfafa62bc047fb93c822e8cbcb852c33c62d2833da0b2bf3c085509bd7573ac3c61cdb13cbef44e3c54518b3c2d402abcfb4a8a3c4f8a1abcceead03c35e13bbce83d47bd35ec3cbca71f32bc3d365c3a71c946bc48e8cb3cae9072bc6483c53c7441ad3dc78ebc3c05a12ebd49a4b13cd1ebb93cb7e544bb8d0f963c7216a0bbba6ec13cffad77bba48636bd1dd90bbc591d06bd82f1d5ba9d3114bc55807b393f0345bbb84e4b3c4788cdbb1e8a423cd5c7873d8ff43b3c4e5a51bd1e82293c0147383ccfc8163bb605d53b54eb323c2549cc3bd62c3c3caf80223d556e1e3cb95c42bd3f9d413c7dd2243c6b08c03af82effb91db7853c0378a9ba0802793c2e6b783d346e5f3c093b80bdd148663c0d6e5e3cfc6844b9f2e1cf3a7d18613b3b8734b953bb413bbdbc943cf6a8323b88962dbda482133be6971d3b12ad30bbac4f143cd94ca43b87984c3cdbf16a3b9dc7b13ce8e6633a286b77bdf5c7373b162fa73bce6c1a3c33e88cbb4363093c1c049fbbc6b8373c95c3953ca57d343c76441ebdbfbb0f3c3f56013ccae5863a4ddc20bcfcc7c73cbafe29bc10149b3cee14873d03ae953cfa1e36bd2aa5863c7b79903c5c5125bceebc103c7d270f3b0b70023c42fc8b3ab278eb3c84d4fcb816012abdbfd66639ea10e4b8cc1e11bbed1c073b1c3b803bd21e9a3ac633b53b2f52083d9c50713becd382bd6714543bc395063b79e0ae3b7b9e4ebb2d0d253c44b83abb4d6a393c23814b3d6e861f3ce06cb6bcfe8e0a3c511f173ccfa56c3b873eec3a1c34e53b4381d0394e17d73b2c49ec3cd516cc3bb9c42bbdd3fa933bf49e793b20936a3b5043823961cf3e3ccc562cbaed9e403cbf96063d9c28353ccb3f5bbdf558163cdfc9fb3b5bf28d3cd425acbc618d163de421c1bc0a86163d09ec2e3edc5e083d3d898cbdbec3113dc213043d3d38c4b9466a44ba1efb2f3cce0f9c3bfaf92a3c5388693d991d113c7ce746bd74ed1b3c08d21a3c382fb03c5592a0bbccf6563c24ee8bbb5908563cd779163d06b5503c0580fdbc91d9423cc241493c356d15ba05fd523ca5611cbbc379493c66e7f33b0c1f073dc62fe0bbd3d455bd42683fbb482ec2bbf0d0a83ee99383be0eda033fa74082be4c841c3fe391f7beb6a2063fb386103f5f2feb3e6bafec3e53dfbd3e983a56be1528eb3e1f3085beb3fdf33e12fe3dbfed47dc3ed362e53eb34fc33e0b0ad33ef34e0dbefeac85becdbdf43e75bf95be249c133f67db35bf21d8013faf1a133fd964da3e7c4afb3e8408823e7fdb8bbe77f7013f8f1595be6804053fa32217bf4881fe3e6341823e55beda3ed8fff53e0002db3e85dd6bbe93cedf3e11ee68be5e26d43eb9f24cbff431cf3eca59693ed2afaf3e60d4c43e81bc54bd13eaa5be9661053f5ab0aabefc59023ffba229bf847c003f92e1f53d31bae03e5082f73e9e01b63e759a8ebe7133e73ea36590be9342db3e680d37bf872fd63e7d461d3d8813be3e0816d43e5d82d53efaca83be81e5e23e01ad87beb5c6d53e2bd731bf007cd03eca1deb3dc447b93e4013cf3e154eff3ea8fc75be08b8e23e116189be36dafd3e4dde22bf2458d63e2faead3e25c6c73e0a54d83ed91bf23e02e993be25bfec3eaf6e96be0defec3e48a621bfc4c4e43ef7ad213e4974ca3ecda6df3ea114383fdaf953beb966d23ef24e4dbe291ce93edef430bf7100c63eb01ccb3e95b4b93eda29b93e8a49ca3ecc00a4bedfa8fc3e2bb1a5be2fe1f03ec66f16bfd2b2eb3ee1e29d3c966ed33e198de93e1904c63a3ae604bcfeff653c5d0bfbbb80629b3c76985c3d60918a3cd08057bdd35c933ca83e6b3cb9d066bbb36d47bb05eb803c4bb4a1bb48c3823c9bcc033d9e47763c32a143bd3cb3613c960d683c8189d4ba9301293cbd3f263bef5a323cbab8a63a3d59e83b56dc643b234648bd7674be38ef94443a9df5abbbc5be603c02d2df3accaa403cb260ca3b856f0e3d2191ceb9dacf39bd1bf821bb5766d73bb84111bccc86c93aacf4613c8371e5ba12d1533cf7936e3d66c8023cadd91abd5d2e313ce4e92f3c223b19bc5ac9183b1caf983b0cc0a73a64a1153cd31c0c3d1b5d923bfa1f24bdbff1a33b73bd7b3b8bd9693cd6ba7bbc2d09db3c758c83bc3085003dd3330b3eda22d93cf0ae09bdc729d63caf83d93c05610cb9a04e24bc8d2d9a3c82923cbcaadca33cb17a873d11cb9c3cf6b041bd449f913c7196923ce771feba39f6edba18b3143c1d3807395c21113cd00b253dfff4303c962c50bde69c1e3c5710de3bdb3c8a3b096e2ebaec6b373cece9383b9de8433c3b4ce13cdad3143cce5928bd2b30253c4de61b3c3e5925bc0d53e5398ee9f83b50c71b3a51bde63bd55e813dfbd6bf3b938257bdb3e9943b152ab03bc25b1cbc98f59c3b75ed303b8ec63c3b464e713bbe07743dd60e2a3b004d1bbd36ec863bd63c1c3b6bdf53bb3c309539a8c8743b973cad39656aa33bbda3aa3cc2e08b3b98d705bdc680563bfab04a3bf73b01bcd76e913c604527bcaec1813c9c9ae3bbba0438bd285216bc94cc29bdfc310fbcaf4c42bc138f3cbc7c9d66ba60164a3cdf580bbbb5af403c7261863dcabb2f3cec056abdf8051e3c83f1283cb600b9ba0b9fb4baba50213c095a5039ac32513c8fb07e3cb9371f3c50a40cbdf843383cd969f33b1c34fbba0cff823cf9d281bb7aeb6c3ca95f09bbc72cc5bcb93379ba67f4d9bc132aa2bb94a6afbbc79f74baf9082b3c7c42053b4adac33b969a483b1dd779bce885103befd8c9bc60d7f13a8a93db3a7055c6bcf0cbd03cc67c75bcc196ce3cdfda89bc85e3a2bd8aaa95bc66897dbc339896bcc08298bcb27911bb4fe4b23b3c89513cd8bb8e3bc0df4a3c58980a3d549f0b3cc3ed54bd565c563b3c35843bef71a33ab4b026381dab4f3c8822eb3b9f5b2e3cee511d3d170a113ccff352bd2d41ee3b321aee3b6479c43cab76bb3c2a3315bc329aca3c2df32ebcfef9fdbb244a8cbcde3444bd365a57bc33fc79bcfe524f3eabe282bea97ffe3ee0ea87bed85a043f4be633bf908bf23eec11043fd4d8d53e307fe93e2094803e506d83bedcbde23e1b2f87bef844dc3ec77244bf0c1ad53e572e163e5088b83e047dce3e05f512bd894199bec01e053f30039bbed405013ff1cd35bf4e51fa3efd5b213ea77edc3e2ca4f33ef42bd93ef78591be6cf5f03e362e93be94a5e43ee31d24bf596be03e801dd13d9d6fc63ef969dd3ea28ec23e365070be2d14de3e2e1076bea661da3e850350bf606dcf3ef091723eaf36b53ee0aac93e73e2033f75f074bee398e83ed00681bedd7bfa3e5c7817bf1ec2ee3e2915ac3ecdbdc93eff25d93e57c8a13ed2bc91bec6adfa3e5af895be74e5fa3e9fe712bf1f7af83e8a3c543e818ed33e749dec3e1dfe373e30b09abe2467053f98de9bbee5e8f73e1c4120bf6ba9f43ecfc0c83dc7e6d73e8d40f43ecf2395bdb64287bec2c2003f7f378cbe3318063fc8053bbf5738f13e54fdb33ef711cf3e374fe33ee988ff3e3e2284be68f1e53e0abe89bee926e73e1ba927bf55aae23e50a6183e6633c33ee12bcf3e0df5713f4249f9bddd1fb53e513604be7ca4d73e8c1049bf475fd33eaac3353f74008d3e15f7913e0404a73e064991be01fa063fd0c293bedde4023f7f460abf9c81f93e9408353e1f18dc3e3858f73efcf3d8381165bb3a1ed4f13b66faadba6c05063c7346ec3cafeddc3b45fe41bdf10bd43bdc3fba3b9fbaa6badb92df3a26b8523cfb88553aaf90453c8a984e3d6898463c298d1fbd98062c3c9bfc163c1cba5ebb8ec0eb3b5b56e33ba93de83b953c6f3c42588d3b1f74293c44d641bdd26cf13b2f2f9c3b8eeacbb949b7b2bb1a118c3cad7ffdbb8701843c78c8823d01cd843cd9b152bd70f3683ce2ad813c7212ccbaec4bf7b9ca32633c53adf2ba37bf673ce368703dbf3ef93bc5774ebd734c0e3c2eafd43b1d1166ba9ac4243befb80c3cc95518380381fa3b0947f93c49f6f73b44c628bdd475d03b6e0df33b43ecf4bb5077083c5064b9bacd2e153c9732e3baf237133d999cdfba632885bd1b7666bb7f95a9baeec6babbe3e49e3b3735093c8ae9883b28a7ac3b56fd633b33ccbb3b570d0cbd10d3453b81ea883b829da43c0180e23c1dd790bc5b77dc3c925e67bcf0fbccbc8dd6b9bcfc2463bdf94980bce22cafbc902ba4bb28a314bb5766383ccac575bacba2123ce74f463ddd9c153cc43136bd77f4013c9d15143ca77fa6bb6cc6243b4b2b443b7c95ed3a29e8043c6fe59a3cf516b03bafeb2ebdae27933b25d6153b8d463aba796e2cbadb1b533c1c11c4bb5a7d803c19972a3d7808623c1fe637bd6fad573c4b5b2b3c6eb3733bdfd29cbb02e38c3c6401e0bb72e1973c02ab743da4fd933c87250dbd83eb5f3cb8d6773c7e4514bbe5a1113c1e4f013c77e4543cc590453b93870f3c78a337ba301f52bd5af99c3a853e65bb63e106bb8b42eb3bdbc01f3b0c81e83bfa8de0b91447893dd66b2b3a928a4dbd9da10cbbec6f0abb9e397abccf5c173c0f0c2f3be3d7f53b16b0293b53d0cb3c95db6f3ab36af1bca4769a3a01d4a0baad745a3b260265bcc3b99d3c7b6549bca87fa23c147e453df66d9f3cd7ba39bdd2049d3cfb239c3cc7bb673c45b3a1bc36a3313d580191bc70291a3d41f8493e28d1143dc25c08bd013f2a3d05af213d61f820bb113eaa3b27c4c03b56e3d53bbdbbe33bb0dc203bf4e3d03b491469bdfe07123bddd1063b179a0abb86a584bbb240a63c599fd5bb495f8b3c464f443db62c793cd51220bd4ccc823c5af24f3cbcef4fbb83b4cebb9c92663cb6c7fabbefac6c3ce308783d2d5c533c905d2cbd78ab3e3c3fbe4f3cfaea4dbb49ee5fbb25c5233cef6b833b531c3f3c66ab2f3d5edf4e3cbb9210bd9fbf0b3ccf49193c51fafe3e90c756be715adb3eed0d49be25c5ed3e64503abf318bd73e99b8e73e6b79af3e420fba3ee950be3e07008cbef683e93e99c88dbee548dc3eed4a3dbfacaad73e77d18a3df25bbf3e9445d63e5d96123eec65aabeec39073fe313acbed47c023fb3ef17bfa431fd3ed00cdf3cf490e03e6703f93e004bcb3ed5c6a1beb268fa3ef2aca3be1898ee3eb1f111bf6baee93e84cd133c8a79d13ecd3de73effab5bbd2daf95be81f8063f874ca1bebfdd083f8c9c25bf21f6023f4e4e653e042ae83e537ef43e40d452be2d4e56be66c0fa3e7ad486becfca143f22f03ebfcb79003f73ab523fb6d8d13eeacbf33ef57611bdec8a96be17ad033fc25a98be4623003f61133abff137f83e56b62b3e9665da3eab4cf13ea1339d3d765973be63fd003fa5225ebe73a4023f223e46bf07c0fa3e1742153f6e6ad33ea276e13e5393c33e41a99ebe5eeefb3e879fa0be8360ed3e320016bfef4ce93ece150c3d34b9d03e157ae83e7cc1313f72685dbec4c4d43e7bb05ebe9657f63e2b2d23bf46f4e73eea41023f2d3db33e3de6c33ec32ef93e79894cbec235dd3efa0c80beff1b023faf442fbf7df7da3ee041f53e6556ca3ece48d23ece5906be323b93bef7bd013fd4e195be642a053fc2c439bf42e8ff3e86ea6e3e9581d73ead4ced3ee3ffa53c7981c4bc2fe9fe3c2ca2d8bcd059093dc6c3193e81fa023df8b6b7bc09f2033d79b8ff3c49f32a39562805ba7df3053c165180bbbcd0123c40d1e33cb49be63b564903bda22ae43bf8dcd43b28edc73a4a1e723c347001bba98a5a3cc3bb9a3941c603bceecf38bbe8d652bd101354bbb677d0bb93ac88b9e093f03b2fe2a13ba98afa3b5485cc3b48564d3ce6af1c3b989844bd6033003b5990803bd374b63b69edd7bb278c9e3ce958ecbb98c2d53c827201bc0f529e3c174b88bdad15833c416b893c4daf89bbbb16193bab8f993b9dfc253a4927f73bcafa553d0de1d73b59cb48bd2bccdd3bf5f0803b81f4bbbb480843bb8bfe6b3c10542fbb2ca88f3cbf488b3de92c8d3ca05422bd97e1813c92924b3c9b449abba9f73e3cb3f69cb90173763c750248baaed0b7bc915b12bb4e7c54bd70120eba414ebbbb13b6993b035b9f3b9b30003cf3fd003cfe37133cd137a23cc222e73b4bdf9cbc3907bc3ba4779b3b15cbb83aebfbd0b92e3de83b42e793b910661a3cc1bdee3c18b10e3c708314bdc7bcb33b64f1c83be5300a3c7965a93b7086d53b11fb083b321bd33b4aa3073daa59923b88101abd06ea0e3b1fc62b3b2d4f58ba12d5ab3ba506cf3bd5bb263c3877c83bf6ba753d244e8f3bf1813abd39185d3bd2e57d3b8276f2ba6709933a02406b3b62d8003ccd922f3c6b14913cdfb9a23b101c0bbdc016893bdf0e533bc2afae3b475ceebb6daf9f3c1898fcbbcf1e963ca4eab23ced34983c43a94fbdf4b57e3c859b903cedb0c93b6632dcbb6a74473c1d207ebb69dc803cb0c53a3d2edb573cd72a26bd2792533c0be43b3c22f1a7bbcc598f3b4580393ccc597fbab1ad323c0d642d3db418fe3bcdbe1abd144c0b3c251e043c6ff7c038b3fc19bcf1758f3c170f13bc55fa873c7cb63e3d4697913c162a2abdd9b3793c797f703c46f85f3b6ca3ccbb876b933cbd24f1bbca59923cb7e1be3db0a5993c757365bd3f1a723c19e48e3c98150d3d5ce97d3c696698bbb0af533c1d515cbbccc11e3ce72cc6bb2b555ebdadf9c3bb1574dfbb3211dcba1085913c4c0e38bc0aff843caaf59ebb773309bd361dc0bbd991abbcc9284fbce6b04ebcb78f543b472d26bbe403883c90229aba79c2893c44025e3d09d63f3c71a104bd8af2273cf4af483c94822bbc54b8cc3bcfe0e739245baa3b7380fd3aefd9803dee35a638e80d49bd70861939e25276b9b3746b3df0cf99be4a85013fbc149cbeb2c9fa3e395836bf5a68ef3e65b7d33d6180d63e70b0ea3e84046d3e665891bee293f63eab5895bedc01ff3efa2324bf050bf33e3af05c3e5277dc3efa8eee3ead753b3ff22e34be1f33ce3e5ddc5abe523ce83e9a032fbf8f88cd3e50ebef3efc00bb3ed07dbf3ef885133f7b5159beb055e23e1aa26fbeb22fed3e1a3e2ebf16a2cd3e2a13b53e0f88b73e619dc53e6681f63e14e591be1b05fd3e1c5b92bef153f33ed4570abf29cee43e011e063ed901cd3e4469e43e2b3cf03ea30a66be5616d33e6ed568bea2f0de3e4bda41bf3da6d03ef1108a3e726eb93e0556bf3e1cddcc3ea1669abe0ef1f63e7bf39cbe8e07e83e905422bfe4c0e43eb492a13d2010cd3ec405e43ef1d1ae3dfa3394be6873093f6e5598be8ed40a3f25161bbfb5ab063f2bb9db3d4cf1ea3ee06ff43ea95a20beb6ad9dbeab13043fdc539abeaafa0a3fe7272fbf9e84f83e6e168d3e7781e53eece7f73ec8ce2abebc067bbee90ef93ed53186bea243093fbbe73fbf7e21f53eb51dff3e3b3adf3e6ec8e13e8c3b82bdb42460beab37f43ebfad94bed1b10f3f0c213cbf2383ed3eb8671a3fd463ec3e0cacf63eae35cfbdfc817bbe1854043fb85f94be2348163f22ea30bf9e4a023faf214c3f3f85fe3e6c9ef73e355a28ba4c46e53b017e753c1c87c03bb5cd5a3c2838803d14e5343c6861c3bcbc67133cc47ed33b35432f3be20f213c9bff823cdd53483c42cb7d3cd7ecc13c147a113cdcac96bdf93e433c01fb103cf2ec67b8032b1fbc76d99f3cabc747bc2974993c1376ac3da7358f3cafe934bd3fdb803c6c62943cbb5343bb2b6496bb8ec43b3c6236cbbbd0c62a3c28b4833d90e7243c73e35fbd115c123c48e9283c1a7d903b70063bbc1933853c7a7d1dbce9ad8b3c4fd20f3dc7d9813ca69f20bdbf14803c57ab7f3c9e6d093b7a2d093b4302ea3b71b3afba6f23163c5642c93c1d8bd13b0f0004bdba12ce3be107a73b50cd62bb92904b3bda05f33bf1b6e53a94a5033c7894953c81b8943b58933dbd97f2a43b48ce4b3bdcd3593ce3e559bb69ffcf3c759780bb8354c63c51a8f93d48128c3cfc8107bd7246b63cab7c943c5f9ddaba8bfc18bc714da03c576349bcf85d9b3ce84e9c3d2d09933cea4c18bd8afe8e3c2cea923c57ef41390ba48bba674fba3cb1264ebbce30be3c3bbaec3c138bd53c6f5990bd9be4c93ceac7883c02898cbae2e441ba0e1abd3bb2f5d83ae905473c4056493c4e330f3c28c50cbdd2b7283c5bfaed3b1f638c3abd2cb53bed0ee33b4416b63bef75073c7a38753cbd91b53b954528bd3e33883b5456a53beb300bbb350e223be1e5cd3b23ddb53a281be93b6eac4b3d0f77cb3b2d1da4bc19348d3b71add33b25c33abaff77a9bbb1ab9c3c42fa05bc597d893cd1002d3d5a22853c90a4dfbc0d71853ca7f58a3c6eb77d39743fb93bbbda9f3b16e8943be58d9b3b6730fb3ceab25c3b229453bd9aca833b4aa1a43b96f507bb02dadabb1dd25b3c73ad04bcb1f24c3c9b72693dab604a3ce904cdbc8ab0333cd192473c502a5eb932c9143c9b6c013cdd07263c16cf513c3848c03c8d37f23b10fe15bdfc5e6d3c77369e3b7d18ce3a9601d2bc8c6e113d1ef3d5bc8be0183db487b03d05ba0e3d330441bd93390a3d05830a3d7df54abb3c1eec39ffe3823b19d4643b0ca3993b4201613c87817a3be0aa11bd68463c3bee21413b569fd43acb4351bb32323e3c1130b6bba088403c54cb133d71f2583c650a7dbd366e293c536a023c74d3b7bb9c6d163c51feceb9dfeac93bc622dc3a1c951a3d5e7681bad78f20bdaf9843bae03ad3ba6f648bbb7558213c8c35003bacf80f3c9d37633adab56a3b47c2e8ba429780bd8bf9b53a6d12a4b995b337be64908ebe376e053fc7a194be0502013f392b41bfc7fdf13ee94f873e230fd93ea502ed3e751cfa3e23fc84be96cbe43e558f87becf28d63e7a5d37bf8666d23e3e12cd3da892ba3e2ecad13ef3b3e0bd481a99be552a0a3f85999bbec033043f8f732ebfdc82fd3eb766413ec4cee13e152afa3ed5fbc9bdf7a87cbea07f023f212e98be70b60c3f99f531bfe83c023f65bbea3e76d5e33ea863f63e10c643bdcae24bbece820a3feed88dbeb8a60e3f20903bbf721ff53eaff42b3fe856d93e8b80ff3e4c3d6c3e2ae494be964a023f5b0ba1be9672043f47cc11bf8f93fa3ee0cc2a3e33dada3e301eea3e83ade5bdd7eb92be2233f93e43ec95be8e2af33e1b4445bf0ddaea3ebdac2d3ef7d0ce3e1f51e73edee566be687674bea12d0b3f26429abe4c2a0e3fd4a73abfd5daf23e32f0c83e924def3e2284fe3ea24716bedacc96bed509083fde4e96bea6430a3f6d9a28bf534fff3ea56e833e002fe03ef2c5013f8a6cee3eb0698fbeb11df13ecbaf90be5aa5e03e8b8824bfa06fdd3eb118fb3daec7c53ee528df3eeebf65bed1b58bbebacd063f7390a2be63ab0a3f5ca425bf4a3d013fd665c13ec5fbe23e1b66033f7013da3ec0e782be5132e63e12f686bef829de3e8b6d2fbf4b60d63ef2c00f3e659ebd3ef7e6d13ee10ed03bc67f0f3c45741d3bb0daf33b5e39df3b5ea9953be727893ba42d13bd2afdae3b0be0f93bb1a2223c6d45e43c17da55bcd75a0e3d3c3315bcc99f73bdf2b34dbcadb347bd863263bc4a6e89bc65b616bc4244fd3b32023f3b1d43d13bef37a53b98fb123d971b1a3bf6eadabc30f15c3b3ace153b58fa5e3b7a05a4bb52f3323cc3bd98bb9696313c5b19993dd413293c2e094bbd4e47113cdadf233cafd7603b4c1ed6b8dc521f3c63c984bb2103193c7aae9c3d50640e3ced2c4ebdb62fed3b1a9b0e3c219f31394773703bfbc3bf3b7ef7813bfaafed3b08ff4f3c1a058e3b8a7247bdf9f8a73bf284e93afd4eca3b7be13b3c7d27bfbbfcc3413c4cf90dbb59ecb0bce6f836bb338985bd3d3a69bbb96002bc4396583adeb88e3babe9f53a68c2a83b90d7433be703523c6c20dc3ad1f31fbd4769b63a3f9c4b39cd774bb8cae544bc5967cb3c06f107bcd6d0d53cd829333d41a6b93cf5f544bd1104bd3c12a1c53c72fd99ba591fc43a7149c53b53dd633bf1c42d3c6eaf003d6568a83bf17a49bdca33c53bdc83f73b32946e39579123bb74dd2b3c8bc165bb2f85a23cfb398f3d7e6c2a3c843b5dbde50f0b3c67941a3ca7533abc59089d3c36c3babbc0f7a53c898721bc100554bdb9474abc4f55c7ba098932bcc25b5fbc6748a13c343f8bbc081a2a3d8a2591bc6225273d8a724b3ec527363d916a51bd51a1213d32c6133ddd415638a4aafcbbd6b8933c8247e3bbe3c0a13cd7cc673dfbb0663c007a6bbd67177f3cf1ad763c427fd03b7768a6bbba29a73ca4c80dbc9809b43c10f9783da206b63cd5b072bd0066893cb03c933c7f80143b1ac112bba4cc573c177680bb84c6723c20c1ef3cb47e433c100043bd006b2f3cbc46403cc9ffbf3ab881703b80e7f13bb2c8863b27b70d3cf15c393d0375343b1fe28dbdcab2cf3b4103e63b15fbe6399b7a28bcc41a9c3ce82757bc40cc913c12ca8d3de1278f3cb9e22dbd6ac1833c5c308d3c2693b2b9146a553bc2da3f3ca5c9b4ba5377503c22ac393d17be303ce29c5cbdb02e213c9cd6fc3b6307d33b70b6a1bb10d6843cd567acbbdba95c3c5583423d8129633ceadfbcbc3d8c4c3c02ee513c71f21d3ba86b813b0ff95b3ca70c603b11131b3c4dbddc3bc8ecfc3bc7910bbd621dc43b5b669b3bdd3236ba1c6a22bcc7db8d3c52901ebccc0d933c6402343d20a3803cd9a321bdbf9e6d3cd0b8783c6ff7febdd72f8fbeb78f093f69ac9bbe0b810c3f90f323bf600c003fcd33bf3e9193e83e0d03003f72ace63eaa6c84be1f57e53eefd186bee796de3e844839bff336d23e99aa073e2f92ba3e47c2d03edf4bcb3e5cd19abe3ee8f53eada49cbeeeaae83e374019bf233ee43e4746553db2d1cb3e233fe33e8726e93eeee48dbe66b8f03ea2bc8fbee768e13e522b26bf6932dd3e52e3b83dc097c43e93acdb3ed827c43e6d8d99bef31df53eb44e9cbec848e93e8dd125bf457fe33e54d1693d7e8fcc3ebf9ce13e4dea28be3b2f83beeca6033f080f8fbe69fb133f473d39bfb70afd3ef5fa323fd174d63e7598ea3e46f87cbe51e663bee731013f1c40a3bee01a083f662637bf387f063fa480163f9417da3ede77fa3e4d7ee43ecca487be8528f23efe458abe3bb1e33e7e172abfe295e03e86b4053e7d58c33ee5a1dd3ea22de53e812d8fbe8a61f13e0ac292be3492e63ee25826bf714ae03ec7a1eb3d285ec83e21a0db3efdb0db3edb2980be5ee4db3e3b6f83becb59dc3e61d743bf9e39d03e0572563ed04fb53e7303cd3ed31b053f1a1461be3b24ed3edb938dbe66d4f53e67e320bfa5f7dc3ebea2963ecf62c13e3c6bd43ef01eb43eb41c5dbecd55e83e46ee89be0a1a013f087d27bfd2fafa3eaca30b3f476cd63ecab1d93e7e9725bc0aa6923b86da413c0203193b5e7e673c7d50253df6e9363ca01e4fbd3481453cef6ad83b800e913cd1685d3c107bbdbb18eb313c57317fbb8e55233debb5bdbb863567bd2850bcbbad13d4bb915a783b3cd9aebb70534f3c4e62d13bafe06f3cc4473d3dd4005d3caa5922bdde91503c94ce323c6c72c9ba0449b33b0e88c93b4cb2603b033fe73bb935dd3c42e8e03b53f621bd5561983b2efc753bf430caba702d4bbb00841f3cfb49713acf192e3cac562e3d260b0f3ceabc30bd7b25f53ba67a013c87a3e2bbaaf2263c37417a3c7484b63c0fd47d3c83f36f3df7c9d53bdb981dbdf112963b2295473ccdf5c1bb65a4423c3e59caba46581c3c9d6edebad1f5babc6cce85bba5329abddd9e61bb5af783bbf23a0d3b800ec9bbcf327c3cfb47c0bb7adc703c76c75f3d7a92623c127c1cbdc5e2503cebc76d3c65cde53bae1696bc308ee23c4855aebcb7a5dd3c1be8ab3d7244da3cc1c500bd7f44ce3c439bdb3cd47314bc56d26fbcc8d1c83c2ec16fbcd1b9d63c500b953da937be3ca49477bdfddbb53cfacab83ceca2813aa65c283b462b443b001b303b5219883b9b2c69bcd1fa3b3b6a4157bd25cff33a86d1733b7638723ca39df8bc31f0463d30040cbdb2063c3d17f85e3e7fff3f3dc244cdbc03b6353d22542e3d1d7dafba2e2490ba394d413c37a186ba2f70383c4d72613d5156d73ba180f4bcc1d2143c78010d3c4a3125bbe706003df3418dbca9bd033d0efa2fbc06d36cbd8f558ebceb3734bd801486bc0cddb4bc406612bc4227823c76d9b2bb53197d3c3df981390c2a883cc7d35fbbce8969bd179ad8ba4928cbbbe034ec3ae66c83bba9e3313c9215cabbcacb4f3ceed13c3dda43483c768817bd7f9e373cde071f3c98165cbb49c9e23b0661753be990bd372c4fe93bdf4acb3c5385823b24b868bdfa2d5c3b1ed5863b0019fabcfbc00c3d48f179bcfe80fe3c86da8dbc39f5a3bdc936abbc0d3891bcd834b3bc837dc8bcd4ed0b39513a9cbbec06a13c93209cbb59daad3cf298bc3df1e5a73cecabb7bc324f923c1ba7a33c6b6d19393bc7093c3050a33a3b71bb3bbd4aaf3b9742ad3aeb0eea3a193e37bd3377a73b8e7ec53ae00107bbd15c83ba0c6ff63b3b5e7ebace75e43be425843d61f4d73b05e2e6bce263a93bc565dd3b5a00b0bb6f1083bcedb9ef3c5a57a1bc699eee3cdff4443d633de73c655e39bddcd2d83c8d39dc3cc398ca3e30f799be5f55f33e35739cbe2ad7e73ecbdf1ebf6f40e23ee2a67e3d5f78ca3e55f2df3e843761bece365dbefb42003f64bc77bef20d103f607440bff1b4f53eca680b3f8539df3e0234f43e937096bec00968be8efa083f20d277beea60163fddf130bf06b8fe3e82a3183fd38aee3e92c0f83e3bd408bef2924dbec2e4013f19ec5cbe02d80a3f419c52bff40afe3e84dc143fe2fdd83e29a5cf3ef803ca3e1fab9dbe42c6f83e51c09ebe1e74eb3ef50b12bf7624e73e8676123d7719cf3ecab8e53e5eb7c63e7f5d98be361cf83e2bd69bbe5d9fec3e885722bf52cfe53eda77783db381cc3e28c6e23e3374eb3e21d688bef36ff73e7ef38abe84aaf13e6be51fbfa438e73ecc10703e2cb9c53e5b45db3eac07f73ef8af6cbe561dea3eccd584bef612fd3e5af929bf0f33e13e1156af3ef905cd3e3e60d23e461ac53e17749bbe00ebf23efe389dbe19e5e73eeded1fbf2cebe23e24843d3d12e3ca3e9fa3e03ef894283e365371be2621e63e7c137cbeb5bb053ffb7c39bf5e23f13e8ffb1f3fba81c33e1a22d63e2c2152be8ad160bea9c9093fb7d78abe9693143f72bd2ebf65d9033fca360f3f29dee43e622edb3ed02b52be21f498bea13f063f359c9dbe4282013fedee39bf9a96f93ee155693e017ed83e3bc5f43e3c7cd23b157c01bc7e4d513c87f017bc8a814a3c50586f3d79f9433ca6121fbd1f83353c3356403c558cbc3a889492bbd9f87c3ceedacabb514c6c3c1c71883dfd31693cd94165bd84e1563c9c2c633c99e71d3c623cf63c0ff7fdbbdf43023d0695ebbbb0c997bdb63d0cbc5f589cbc72303abc9db459bc5bafa03bad5e10bb5660f73bdcc52abbeeef133c1acf313dcaa2f03b21c216bd7d90e73b8bf3fb3b22b61c3cc686603d2e8789bc1d2a543defbdd5bc95240dbec970debc8b5c97bc527ad5bce64df7bc0242da3a826aa5bb7dcf4c3c1f06bbbbed98533c0837513dd80e463c641a39bd29263c3c2701403cce511d3a4960b53a0770df3b01385d3bbcc5d63bffc119b880c9b63bd29235bd6979a53b089fa23bcb94933b23be4abb0bed663c91f3873bd52a493cf181853d41b23d3ca3f032bd3bca323c79ae443c160176bbdc7390b90c90ca3b9ed10cbbd19afc3ba719f13cda02be3b8d495bbdd26fcb3b986ab93b1ee9fe3aafca30bb53910f3c6571cbb894f9443c1a9b583d53d2233cba7468bd2218353cb656043c33f7823b443281395f5f713ceb62153ad8ba4d3cd4e85f3d2a3b2b3c583b81bd26e3e33b301c1c3ce4f8be3b592629bb97fa283cef297dbb3aea443c0713373d7b733e3c7a7d06bd943d173c1d9d123ccacd4fbb259aef3cac93523c7170c73c8fff193c819dc93c9f3aef3bd2ef42bd71560d3c9fcb3cbaa084ea3cf5eb293d313ea83ab2a47e3d4ea4713c639f9ebc53d7383cc9f23abc912fa6b9c37fbfba2f5c9c3cf2435bbce756093d8dba75bc7472133d8371153e8f580e3df2a9efbc49b90a3dc2dcda3c89499cba2e1a35bb57957f3c8f919bbbd3a7803ce376303d2a46583c823910bdd59b4b3c41db153ca3245c3b5846893cd9bdd33aed3a8a3cea89583bfc1ba3bc04d0e3b9daf306bd3ad9fdba4bd896bb9dcd6a3bd53faabb5a18553c33e7adbbb04c4a3ce84a643dfe4c3b3cdceb0cbd9119273c58862f3c070a24bb0d62643c955fc1bb466b4e3c78d2a5bb6c98b53c5af18bbb2d2c4fbd14b3d8bb4af60abcd7999f3be7722bbb5628843c9ef3c2bbca4aa23ce11ba43d9de27e3c3ecc54bd07aa7f3c3b3c753c50ed473d1cc8b03ca4ae6fbca0e0ab3c6f8e68bc0b89c2bc709672bc8e96efbcad4884bc197e8cbc88ad0abb1e84a2bbfa37913c4fed96bb095b813c7cdd893d585a803c22fd07bdb11f593c44bf623c8126713fe675cabf69320240baa8d3bf09da0e407fa3a3bfffd00340ba7c15c0ed5ef73ffc72f73f7e93713f0e20cabff9a00540fd5fbfbfd2331040ad5f9dbf1a5f0540e7b31cc07341fa3fccadfc3fbf516bbe49806b3f7352b2bf8ed8843fb13fdabf85b205306eefc8bfd21f6c3f5901abbf05c0a6bf7c29adbe5b6f7c3f1ec0d3bfb1228b3f86f0dfbf7abe103ea357d4bf9bf1743fea1da1bf818ca8bf62c7b7bd4cde883fe0f3dcbf631c903fda1f03c05974bb2e3a86dabf7ef1793f5253abbfc846c1bf90be6cbea4208d3f01acdabf48ff983f7c3f06c0a786392e77e1d3bf4598853f2839adbf0c11c8bf800f5cbe6546673f99f4b3bf94f88a3f65a8e3bf60833a30ce94ccbf7344703fb92d8dbfa461a3bf7c3cb0be6d96353f448baabf220c4a3f9221b7bf1d15b2306e6f9cbf8e9b3d3f29fe84bf77d187bf0ca1c2be4005593faa5dcabf4de9683f0d7dd0bf0e34ae304cdcbebfe07d543f528799bfaa14b0bf8e2dcebefab07d3f7cafd3bffcd1833f5f70efbf1131902ed667bebfbdae763f5632a8bfcf1aa9bfc9989040e9b06dc0f01eb6401a0570c03bf6ad40c896d7c03634aa40b06a80c00ab49840326aa840b13f71bf87ec15401fb2f0bf0caf3040caf509c018e9f63f3cb8f8bf8e1ded3f0322ebbf6ba5e3bf28a371bf
+\ No newline at end of file
++363632656234393365653936383862396662623134303865623332333461363066633935643264653466326132626331353831633763636239616630353032653233326432366139383465346338376266393132363438333765656463366235613461313434346139333334396265306163333731646537376430643834323664623863366163343363643130313435636565623834363361316133393230363937653835653762353534626439663133633538623062353439646237616630333237666136663433386334626639643465303163653832333062643863333664336630376231643964316231663933656333386338656262303734376137313565643963396535653131303763646533393234333735613333633132353061393531333935623539643834373266303861633739373862366663376365383233326139383939363566373061373361613939336537366631353334346563313061373365663635633332663437653136383235343635623234366430373330366336363237623962656465373233346131343264313137653838643334616430346339363732613237623866636364313232613934343261643231386531356430343965303330306332326266336634626163333461643139653962326566303064343333623037313762303934626336363537616339343637306633633066333231613063623339333539376461316632653234353938616133616463623534356232346135666261616339646638373031356633306161626465643665633066616264623965656138613233353331303236363565616133323131653935643363353832366663633434626236376663386335666364333530336433353234383031636264353761616638663031613263353738326438656265623236653338323232386565626464393034633962373835363264656664616439353336623462376139333134656662373033626135336138333136643032636430653334303861616439333736306363383862306439623962646435383931613161656137356462666463346364333361623035396139323366643262383764363763323530613631653861653634666630333030316562323662346133633566316533346236663132363663663739623565366564343962313033386462653236663864613664303962623535363134336361326534666230613461643036356361623365363830623066373064386262326466363934623535633664373865313164633832393733616432306435663337366633306565386434613131613061636663616630643839623737366361623235653838653366303334613439393362313664373862653234396263323361663762626238373531633765333530666363393132333237616137663336333434326233313735303764323765346238313333393839646334396539663036633034396237393461336263306463333738313939303164393262336465656237616466643562303362333632303838336162666434636265363361333466646564333736303437383432623738636461313736336539396364306164393937313064353136643231663135306364643936646137386434613564373335643430363162623534623133366633353137633236613635643433306239653638666165343231636163376139643564653333336637393138356233633164623465396435396364353031663066643661353130633336303365313365323461343062346266323135653337393561396134306436306138343536666237623633633264313165383137303161323331653835616664346235646238316162633164643536643237313733326564303234356638363931626463356461643334373664366636333536303737623161613164363334613937633431303962353962373335343766393430633164633864323163306631383733383866326433613865656366656537666438363739303266643461633331626131633966646635316261613261343337626663303932613166373332653261386630363637363337343262303737306136653236653332663262653565343761333733303934663366396234313838633834336435633462656135613261346163346262626466376661633763653339623230363235373337373135343130663233646239373035346236656232373065666363666237636234313736663832363831343263656131366532386232316463393539383730306566316462386539616434346537653138343162313462313235636366333561313765386366666138663036383233626434366366383536653461663236336238616166393638626138353062346439616666626563343933666130666530613833336539653436336538376132636365306338633038613133636363613461326134313639623365396134633133666339346337613637616230393963653932636336386561343761626166313638386162386131373230326638646130353133633533353964323735303632336632343431363463396634353338346566303665373533363061373664346462643034663036336636303863636363373830663166313238363531353931346461393730353236373461643039653235366663353938646363643464333835646164353838623664643261303365363139346361393864383734623331623662306231653235333362633134303864306436613862616330613136316136393132323137343831363364646362313231303362316131313733366362386566353536356266663035356633313633323533383133633963343136313330346561336663653535616438303565656331373466386437396361363764323037616334366664653362323562366531643531633464616561653330386532663361613839666339613234623762623864303635363239323732633437313135623561393362653837653361396438373339666232653861316665613661613331383532616631393033383937333463353139343637613931376332666164316634363432366132316638656362363461336166626237613736336638306232313930636165346330323165386264366337316432613138666663393538363731333639396239666362393336373737643238636639643761343231346131333463353261623633343633343866663966663365346237313164613933626232343636366137616430363063643632626237306335353536396233353566633932393964346162636636376563656530396161306664656561626436396635656632346534653861376262386430376664633866626464346666653730326362316232653965343564313031306138623562363130623230313963396265366130303837616232366634326335646531303539353531353137366562353736323737393436626630343433346330356264653562376231356532663139306164303361383836396333393339616236383439363661313661303665643535633961666563613431303466333534346564633533373866306361633035363431646633643837383532663863303765623439623930373061333833633261383932383764356133323539336632353130303266643661643531313731316638343339653162613735393261653833333065316432643865616634333936326236626432663237656439643439633335656534373338313766633338646231316433363030393633613732386331363461303139393161316436323764326235316237613364393537303630633634616137656338386231636561313937653562653562623466633136316330383533636239353561353063623464363362636438656639613233323237373363323039386566366463587754430153274178ab274197be33430e5f1545710254412d9ade3b12f1c13df163bf3d6fc4c73bf7cbb4385e4ed83d0000803f2cb90e3da388cd3cc0d6913df226b13b37ea123df90a1fbdd201113df1081dbcfb240f3d5f280f3dfeddc33cb6dbcc3ca32f563d8f1439bd8f63c43c80ad8abb5381bd3ce8fd2bbc269fcd3cb97ec33c6ec41a3dc386f53c3d5d4d3d6cc298bb9f961a3db4aab0bc95381e3d5729653d44681d3dee941a3d90433fbc70e5923b3d7dbf3cff8520be91bf6dbc27c7c83b5d16f3bcca7df0bc45588fbccd02babc4734773bdea39e3b21d98cbc0efb3bbf915e7f3cbd8a693e81270f3df4800ebcfe88c53c1388723d8111eb3cc873ea3cd5229d3de385e33b7b1ff13c1f0716bde8d4ef3c7d7995bd3a59ef3c01ebed3ca01d9d3e56e4903efb8d87bf0ef454be0ec09f3e0fc3ba3d45b5853e4e5451be51c2933effca853e2089ba3e3c499f3eeca07abfd7e185bed66aba3e803e993c803c9f3e59ad82be5a3aad3e19029e3e40b19a3efa1c8f3e7f218ebf3d244abee2fa9b3e4c9ded3dafba803e967047be2b3c8e3e2442803ee242b13ec290993e4ed57dbf85b277beef3cb13e21ed283de6d4963eb12075be9ca4a43e7c86963e169ca43edf468d3e344288bf0a635fbe2d8da43e240a9a3dfe158b3e4e8862be131a983ede6d8a3e35359e3e6468843e6c3789bf41f853bea7ae9d3e4a86ec3d5b5a843e487c52be8037913e3569843e136ba13eb5b08c3e032d8cbfc8205abe124da13e52e7a33d2958873e76d758bed14e943eef50863e5b649a3e64bd9b3e616e88bfee8256be1afa993ed799e23de378833e478047be97698f3e063d833eefbea03e19e48e3ee8b784bf3e7294be1e05a03e55c5c83dbad5863e94de53bea379943e7209873ef5f9b23e1d759a3ea97e80bf32fb77becb04b33eb3e42b3d4049983e3eab73be7e12a63e9740973ecb00c43eb4cdd7be03fa8fbf8ad246be78a2b63ea3944e3e917d8d3e408b3bbeb9edad3e2c76933ee961ad3e6df49d3ec32481bf59c66ebe9b25ac3e42056c3df161933e17306ebe2feca03e2276923ea226ac3e6864953e869380bfca3a6fbed434ac3e93cf713dc559913e261366be5dcb9e3ecf8c8f3e8308303d1fa8f73c29b77a3d304de5bcc204303dd90991bd8200233dcf8b04bdfd2a2a3d3a5e223da196eb3cd8857e3cae62133d43e191bc3cc4eb3c88b65bbd0ec7d23c208ca8bc9c4ddf3c123fd23c388fd23c0543df3b72d31e3dd47550bc9099d23cb8078fbd9f0aba3c27607fbca930c73cbef0b83c4792f73c5ac3653ccce6d93cbd6898bcc4a1f73c17056dbd3133de3c99d0afbc2a31eb3cc8bfdc3cd64dc63c3223893c15f9053dcda1ceba5355c63cf64b8dbdd8bcac3c21517ebc3385ba3c287dab3cdf90923c831a0c3cc103253d785cd6bb6985923c6b0005bddf89743cc32410bce514873c708b813cd98bfcbbc6f28fbcc4494dbcd18fc53c7a33fcbb31fcc6bb639330bc1387963c4b7115bcbe7831bc34a0ca3cf3eaaa3b8c70263d7d688ebcd3baca3ced9ab2bd1af8bd3ca95b8abcc5dfc03c4e19c33c3e57c33c6d7e153c88ce473dc2c661bd4149c33cec8e39bdbbb4a83c79665bbcb664b63c9306a73ca846bc3c5e51bf3a57a7143d803c5d3b311dbc3c632858bd48fc9d3cdbf91fbcdca2b03c3ff0a33c5ee8143d5b98953c26eba93ce07bc33c9536153d64a087bd8fe1083dffb8dfbc21610f3dd624083d3cca21bcc31ee3bc7851873df730ebbdd9c821bce66076bb12fd52bc0b40943c48e339bc50e855bc7f51a63cf3c6fd3b80c6103d93bb3fbc0340a63cf3e489bdc830913c236b5dbc1a3f9d3ca1b3903cf7b4b23c1cdf853bd0845c3dd0bd8abc58a0b23c3aab2abdf0a89c3c348e66bcdf4ea93cf25a9b3caae50b3d52dd443c2cffa93ce424c03c51d10b3d8acab3bdeb00013d3a51d0bc84f9063d065f003d4303a73c7df79d3bc4fc0b3d804fbd3b0a15a73c82906cbdfd808b3cb7842bbccd80993cbed8893ce798d43c2d2b863cd80e133d73b586bc5ca8d43c09f402bd32ecbc3c383f84bcf163ca3c89e1bc3cb1dad63cd182ed3bdcc55a3de3801ebc1072d63ccba849bd60c8b83cfb299bbcc59fc73caeaeb63c6acc833cc716c9bbef34493d4061b83a43ea833c0617b0bde43c503c90da2cbb06d46e3cb3b3513c783b063d3d5b38bbe97b9d3dc22094bceb19063d1c62c6bdad65f33c0a63bdbc4fe8ff3cacc4f13c930c5d3c8bfe113b84e0443d81407dbb78d25c3c206262bde4282d3c823e5bbbe042483ce69a2b3c3651013d9acd873c07ac0d3d5ba1323bbc47013db7acaebd5359ec3c7a63bbbc53a4f73c704aeb3ce846aa3e5bc5933e155c81bf15296abef633aa3ea748863d5e938f3e088a67be4e479d3e0e278e3e2bb19d3e9d73c43ecbae8cbfd56543be6633a03eb6180b3e27f68c3e98f639be73119c3e9841873e0105a03eae559e3e0be287bfb3ef5dbe5700a03ec1a5a33db862863ed8b558be0436933e5b2d853e25d0b83eb7c09d3e264674bf8ae981beabadb83e10c62b3cf36e9e3ea3a07ebe7779ac3e22e59e3e8849bd3e8fdda23e94197abf5a3b87be853abd3e4d05443c55cea13ec2b984be82e5af3e3a40a03ea7f0bb3eccadb1bde73988bf9c1b6ebe252eb93edc77e13d325e9f3ed03970beefb1ae3e9cf59e3e94a6bb3e6bcaa03eda1f76bf1a6d85bef1a1bb3e080c223c9007a03e8a9482be525dae3e36e99e3eae5f973e822790be10f892bf8f0233be833ca33e112c503e59c27f3e6d2933bea41b943e81d07f3e3a75ac3ef97c943e349b82bfaa346fbe7775ac3e049e3c3d55fe903ed9ba69be0e689f3e9451913e601fa03e984e9d3ecb5988bf948b6cbebd049a3e0f9bd53d10d1853ee89d4ebef969913eefae813ea184923e6f74dc3e613992bf1d5ef2bdb8d27f3e1098493e190a703e826ad7bdf025803ee45d7b3e4e7e9d3ed220883e5c208bbf56964ebe564a9d3e5c20903d36f6823e33b747be0584903e647a813ee3f0a43e3c67903ee40d84bf1f6796becae5a43e7c49963d06228b3e1fbc5fbe1ea6983e68ec893ea15acf3c2013803c957fc13cdb357cbcdb90cf3cce0829bd492fbd3c34ba92bc9e80c63cf106bc3c6b5df43c91ab813c30a6e03c1888a53c5246f43c8d535dbd6205dc3c530ca2bc0127e83c1688da3c2dcef23cc81c2e3c6e022d3dbccf30bc35e3f23c4535a2bd5c38da3ce452a3bcd8bbe73cdc2ed93c5461db3c592fa43c05e8fb3c297c91bca84fdb3c29090fbd0d14c63c39b0a4bc8274d13c64bec43c2556523ddde19c3c3ef5113dc426283dc24c523dd0d6d1bda03f453df5df2fbd2ce44b3dc474443de402d63c672c6e3a61ec333d449869bceef7d53cbde897bd36cbc23cbdc581bc735ecc3c6624c03cdecd76bb47bcac3b9691a33c510366bdc19e7abbd27a2ebc1e2c92bb7c2b45bd98b587bb2bd290bb5c3e963c45a9fd3b00fa2c3d438d63bbaa3d963c3adc24bd32a3783c9c1915bcd75d893cbc5d7b3c52f5a83c1af84a3c82710b3d91790fbc7ae7a83c338218bd88a28f3cef2737bc4d559c3c05508e3c3866d33c6f19a33c670c023d73a51bbb62b1d33c563756bd3d50bb3c8cdf3fbc7f15c83ca09bba3c22478f3ca39fb33b0baa2f3d68addbbb83538f3c1c751bbd3885703c7a6a0abc67ac843ca718723cb056acbce32bd5bc32f9023d116e57bd5083acbcf553963c867cbdbc79a0ff3cb7a2b3bc58bebebca86a783ca906db3bd6f40a3d025f813b8583793cab2f3cbd6e654e3cde6424bbaa50653c6e454c3c17dac43b6bded03a16284f3c34b671bcb01bd73b072529bdd84eae3b97b5f2ba0d11bd3bc523ac3bf743df3ca411a03c7685bb3cc426863c6e17df3cf8c583bdf4aac93c1c18a5bc4531d53c0d47c83c9deec73cd80c4a3cf40eff3c036e8bbc3ccdc73cb3e72fbd17acb13ce69587bc08b4bd3cdf4bb03c175ebd3cd36b493c7c316b3d30ad47bcb786bd3c3f509dbdf735a73cb05d77bc3fa3b23c1b0ca63cc85ce43c75c93e3cf0aa063d232389bcbc4ce43c529f0dbd1305ce3cb8c49ebc869cd93c6eaccc3c4338ae3c12f5883c8666ff3c6addb43c7b2fae3c74fd67bd75b4963cf9f24dbca51ca33ccfcf953cbac4c83cb966b93bcb04103db8775bbc0cb1c83c4ccd56bdf87fb33cb37785bc7b74be3cb042b23cdfd73f3cba0f253bbf531d3d9cb5bcbcc90e403c934dd2bc109c123c63d911bae344293cee1e103c2c318a3c83aed33b4d92203d132343bb04718a3c566e48bdf281683c41df13bc96017f3c14bf653c7dac8f3e97c0763e2cee8dbf6d172cbe18778f3e01e5153ed9746a3e15a32bbe8f9e803e1879633e5496b23ebbcaa63e0f997fbf953778beb389b23ee506f73c824a973e89596fbe1648a53e0a41963ebf0ab63ec04c9b3ef9817abf6bdb7fbe53f6b53eaa16273d362e9c3ee6d079be157aa93e29f49a3ed4eb9c3e83ed9e3e529387bf3d0753be9d199e3e9e24d73d3e05843e214348bedeed8f3e69ef803e0ece983ec72a943e50528abf40573dbe1410983e779c1c3e0c19793e123b3bbe7cb28e3e5067743ecbd5a83e29af8f3e01b983bfa0ac68be68c3a83e1bf84c3d790e8e3efd7763be7adb9b3ea3a28c3ee9f0b93e46afa13eb4917cbfa22386be1cecb93ece69a93c6b029f3e3fb382beace9ac3e66ed9d3e40daae3e8b2ca03e5c6e80bf8af574be0f6eaf3e79c2903d73bd943ea09b70beba6ea13e3b3f933eb147a03e71c4893eadba89bf273f53be558aa03e3acc9f3d8cd3853e94344dbe1402943e1493853eb1f7a53e4927a13ed9e185bf65828ebe7b1fa73eb289bd3d6fb38b3e676860be2248983e7a998b3e7ac0a93e8f79963ed42987bfe8696bbe90e9a93e9c4a823d92e18f3e644868be26679c3e8bd68e3eea77903eeca0ba3e378590bf48673abe1194993eac2f1d3e1581713e01da3cbec9b68a3e15aa783ef257923e07d87c3ebc7e8dbf8e5d3ebef293923eb380cd3dfead6f3e96c53abe4c59853e01616d3e6c22c03ced16bc3b1a9b153dff7a20bdd42ac03c2b694cbd15c4a53cc91a4dbc1a32b33c21d8a33cbd319e3cb3918c3a7c041d3de002593c981a9e3c9854fdbd8432823c08e217bc5a3b913cbac1813cfdc58a3c94e459bb88d9143d612b2ebc45bb8a3c4ac952bd2ef5673c182717bcef037f3cd6356b3c2c2af83c3bd5053c20910d3da175abbc6a37f83c375461bd65abde3c962caebc6132ec3c9511dd3c9a7a8b3c93bcb13b6c3f2c3d4bf800bca5678b3c82b6febc872e683c97be20bccf39803c5689653cebc3cd3c39f40eba331c003dac9987bbc1a5cd3ca3d265bd6ec1b73ccd7b57bc7204c33c9f5bbc3c069fc1bbc6c4ebbc23a0793dd889f2bdbadfc1bbe438d8ba71950fbc4ef8943c1713efbb47f110bca31dc13cd28d1b3cee964f3dbfd759bb3c28c13c699ca0bda0dca43c6a6b3cbcf84fb33c701fa33ca7864c3ddb04fd3c777ef23bdae15a3d689a4c3d447f92bd9ff63f3dddbc2cbddad9463d26963f3dc42a9a3c19e7823b5f281e3d01d30dbc14ab9a3c23d31ebdf063843c9f9e49bc48938f3cdaeb823c5504b63c32e63bbca2e93a3d0c9b29bc4f9fb53cd414b1bde2869b3ca37b46bcf4c7a93c7ca69a3cb914d13cf389f23b3aac033d85e269bc2013d13c031c61bdd747ba3c36e690bc64dec53c3eefb83cac0b933c311910bcd170083dd00ec6ba9022933c069453bd95a1753cafc30fbc3d8c873c0e24763c6f0a2b3c03f36d3b9cb5413daa6320bd12242d3c627eb7bc5996153c05fc58bd2ac5243cc50e133c6d8ea43c0a10ce3a172d2f3d30c81e3cbd80a43c0a997abd28dd8a3ce1fd44bc4e12983c287b893c47c1113d8e2c9f3ce295123d15bde43cbda6113dfec5c1bd9516023d5e06bebce9700a3dde4a013d9886c43c6e20273b3c5ff53cc30e60bc4fc1c43cdf7fe8bcf370ae3c391a7ebca88eba3c1daaad3c6b86903ce290163b03360e3da759fbbbb857903ce37414bd2d0d753c11921cbce514863cdbc7733c3523923cd22736bb60f70c3dbd9dbabb621c923cad34f2bce147703ce156f3bbce1c863cb9796e3ce7da993c4782053c7415213d99f8c9bb2ff6993cbab564bd718f813c0d3f24bc513d8e3cbfd4893c30e5033da286243ca09b733dc912b6bcdce5033d3e2cafbdd645ee3ca92abdbc2c6cfb3ce2d7ec3c22b7ef3c6a973b3ce0d1d63c558b6fbcecdeef3c880274bda28ad73c0d07adbc9fc1e33ccbfcd53cc9f9ae3ede4e893eeeb685bf989567be5e7aae3e0fcb2b3df540933e825563be0dfca13e0951923e36c2ac3e6958b63d81ee89bf51ce64be8d0fad3ed4cfc23da1fb8e3ecd7a5fbe26639e3eda248d3ea1b0a63ee3288f3e21bb82bf69f961bea282a63ec633753dc6108b3e31915cbe40f6983e69b9893e1b37a33e7df58a3e70fa88bfadc060bea237a33eab35993d2582883e808c5abec00c963ebdda873e7cca9f3ea7e08b3efb3e88bfe92457be91ac9f3e76048c3d8794843e740451bed5a3923e9eec833ec662933e9a4d983e07be8dbf040249befff5933e342efa3dbcb2783e430243be8a388a3ed773783e8ff49a3e00aca53e190c8fbfaea851be838c9a3ef21a093efa2d823ef71732be9e66863e835e7b3e0128b23edb7d0bbea3638ebf347061be70deb03ee88ce93d87d7933ec9fb61bec783a23e244c943e7515a73ea5728f3e2e8387bf187f68be690ca73e140d7d3d22ab8c3ed07462be70219a3ece628b3e65eeb03eaf5a923e1df27fbf7e716fbee9b1b03e4371593d10f4953eab676fbe8305a33e6ef8933e4062a13e12cfab3ef2a385bf43fc91bec2e6a03e049bd33de41c8d3e4a1456be3908963e6cc5863e64d2bd3ef2d7a33e33b17dbfd26c88be0fd5bd3ea6e5823c22f1a23e4fc485beb13bb13ea079a23e40e7bb3e08eea03e2d2176bfd80286be2bdabb3ebaf34f3c6e95a13e998e82be5f3eaf3e0456a03ef02fbe3cb7835f3a6190183d0f6801bc7275be3cc390afbd1caca53c2f1975bc5d59b23c7928a73c087b393d96d11b3dc91e893c54fe903dee79393d4d4a9bbd5a392d3df75c16bdd48a333d63962c3d2210b93c7fdf463c94bf383dfa52fcbb3c0ab93c98a223bd39509e3c12295cbc7238ac3c5ab89c3c0a60a93cce1edb3b3cc6fe3cba1932bcd757a93cf7964abd6d40923cef4757bc78229e3cadd3903cf651de3cfee98e3c25dd503d8f2f9dbcc39cde3c76a96ebdeaaac63c995a9ebc5275d33c4531c73cb2150b3d7baf813c0c61903d2eb094bc541b0b3d006468bdd820fc3cabe0c7bc17b2043db0affa3c925b9e3cf9dd063c7c24593d9e9ae9bbb94c9e3c2fbf78bdbdad863cb47a26bcada1923c754c853c409eb63c83d7aa3b1a461b3d947342bc7ec7b63c7d5546bdee109e3cb2176cbc95eaaa3c08ca9c3c5892af3c78e1453c99010f3dc3a163bd22f9af3c00afd6bc4f1f9a3cbd0b50bcf6bba53cb989993ce353983c14be3e3cd90e2a3dff8d8ebdd685983cf087adbc208f833cb53383bdae108e3c97cf823c9607a43ca1a6933bad801b3d7c8894bb1f2aa43c4a9620bd2f8d8d3cbe9a38bc8bdf993c59f28b3c917fc63cbb0e173c7b66153d2a9bf13c2ceec63c2dfca8bd4532ae3c091f82bc2fadbb3cc07bad3c003bc83c5a9fa0bb9b0a183d73412ebcecacc63c6a48adbd0b78ac3c262250bc6f03bc3cf6e2a93ceb04d7bbf183bbbc4b88683dc410d1bdbe01d7bb6aabecbbe56619bcd335793cd9c801bc5b0d1cbc6e5c1a3ddb1ebf3c81c2863cb2ad113dac1b1a3d7d4ba3bde0e50e3dea50d5bc61b9143d43380e3d77d5cf3cdb7d3c3c41bf243d959285bc0ebbcf3ce85ed6bced6bb73c68d185bc4811c53c1d49b73cb3efd13c7733923c3594083d038a81bc98fbd13c502e40bdc842ba3c2b6194bceb3fc63cb3ebb83cc09bdc3c1ccc883c3e62043d22fb39bcc460dc3c6b2086bd90dbc03cd72655bc05e6ce3cfa2dbf3c2c2f8e3cea09d83b275b2f3d0211a8bbc4d88d3c110545bda4d16b3c3016f2bb1a25823c74a9683c341cbe3cd4740f3c67428a3c26b39f3a6c46c23c95a125bd282baf3c5f5b4fbbd274ba3cb679af3cc7829c3ce375df3bab4e313daad3d13a6a119c3c52c75fbd927b7d3c35b519bce6c88d3ca5c6793ca755983ce850843b5dd4043dd22b273c6592973c2fae55bd4b77853c3db180bb75c78e3cb3c4883cc96f9a3e96bc9c3ef0f887bf04d349be18239f3eb887be3d0219833eba374dbe2c38913e64c0843e4995aa3efc07923e30eb80bf49f16cbe5699aa3e489d823d1816903e748b67be8e729d3ee1ed8e3e07f4bb3e40b0a13e69fd7abfd56086be39d8bb3e2915803c10d9a03ee4d083be02ceae3e30e19f3e4797a73e87788f3edf1088bf87ae68bea18fa73e38c07c3dafec8c3eadd362beadfd9a3ec5838d3e38ceb23ea3b6993e14177ebfb09e7bbe3be2b23ed6bcee3cdb4c973e732075be889fa53e4d72963ef6dbb03eb8e29b3e2b527dbf121d8bbe1d24b13ebc1fc33c1b04963ec6b963be597ca33e8872953e8c37a53e42a5983e4e3988bfc6b669be1affa43e0fdfbc3d60978b3eebe361be9a42993e7ba18b3e5a289b3e2a96a33e4d3b87bffbb33ebe81ba973e08d6f63d38a5833e3e3137be80d98b3ea600773e60abab3e33f79a3e72ae86bf3b4b66be418bab3e82ab9b3dde9e8f3ec17767be7bb69c3ee3e68f3e20aaa93eb7a79a3ede4883bf95146bbe393da93e16324a3da3a18e3e003d65beb93c9c3ec4ec8c3ebc33973eee7aa23e73678abfeda138be796d973e070e1d3e66f77a3e644836be89ba8f3e75d6813ee98e9a3e95fa943ef28b87bfe66d4bbe38fc9e3e7f54fe3d5e8a823ea9e045be2fb3913e2676863e2e3c8c3e302aac3eb30b8fbf4df01ebece99833eb8b22c3e41e6593ed6b021be33d77c3eebe4653e64b6dd3c23c1213c08edd43cbf7d66bcddcbdd3c993b3ebdb445c43c3dc492bcf061d13cc981c33cb82ee93c1b7e5b3c7db3f83c503552bac311e93cce7b63bd788fd23ce40aa9bc9c33df3ceabdd03cdef0d43ca2f657b9c07b263d3f6a80bcaed9d43c201f63bd9787b83c338d74bc74d9c63c46b5b63c494aa03c443c7b3a3332fc3c2eecffbb0413a03c8e271ebd661b8b3c8bb044bc8610963c45d7893ce14ff33c1bbfa53a921bfd3c163974bcbafbf23c328f73bde18ada3c3e854abc3701e73cbf0cd93cbb66bd3c159717bb7d640e3d71bd79bbbb68bd3cfb1794bd1dd9a03c7eba3ebc6426af3cc50c9f3ced8b7d3ce15e453a9441f33cf626dcbb35747d3c65b892bda7a6533cbd8407bc7cdf693c950a523cbcc1be3c355c983a62b8443dcfc051bc8ba8be3cc388adbded13a53c791b67bcbf2fb23c7e82a33cc7e3083d3100063c956a093db14284bc4cc8083d18a252bd297cf43cd8a1bdbcbfaf013dc0bdf23c21e9cb3c1da5ea3b4b5b1b3d787a97ba6db7cb3c225c81bd42dbb33c232762bc26dcc23c75f3b13ccb47e5bba1c7c9bc7369843d6fb8d2bd3f8de5bbc2ac69bc6a9627bca1b5823cc3cc0cbc5db226bc39bb36b913854cbc64e94ebbf9b6963c26f937b9761305bce65e56bb1870903cea0de0baf6b063bb33920f3d7c73583c82d3733d0c4cc3bc548d0f3d195d90bdb191023d3f78d4bc301c093db6c5013d7f318a3c2fb774ba2c981e3d6f2a9e3cb4228a3c7c67b6bd21ff613cba810ebcda667b3c59435f3c3015483cab130c39dfff533d03441dbd5a5e473c6233b4bc348b1e3c24773abd800c353c9c1d1f3cf7a3673cc1903ab9f985323d79ba9d3a514a683c45a14abd3c8a393cfa88d3bb6bd5523c7193363c801d9f3c86f2fa3be68d1b3d29ec05bbf9759f3c595b65bd62ca853ccbeb19bcfd31933cf691843ce622953c589be53b8752243d0f6c32bc3536953c2d8242bce09f7c3cac121abc19248b3c75057b3ca00fa83ce060d83a64c5223da97a8abbc459a83c43becfbca5838d3c7c3f07bc22ad9a3c16d88b3c9927b03c4339ab3bea03f83c5f2122bcd312b03c608389bd06f6963cb44f3dbc3bd5a33c263b983cb7e5ce3cc5bb25bcba13393d3ced51bc2a61ce3c3d2781bd3121b83c2d8483bca0f9c33c7fbab63ccec6123d9c6f913cd42fa43c0d5d4e3d43b0123dc4ab98bd2095073d8e66e8bc32490d3dee28073d820c9c3ee5e7883e21d58bbf115e41be6998a63e8851ed3d4fa3823eb1323bbe3b03973eb62b833e9a929a3e9a719b3ef6b091bfd34d19bed30c8d3e09a5383ea0ec683e812e21befbcf8e3ec5e07c3ed040a33e91ef8b3e5e9b84bf75778bbee639a33ee8b2ac3d1d35893e7d095bbe6b60963ececc873e97d3a53ead48913e532387bfd3e364beed95a53e7944933dc5ce8b3e9c415fbe34e1983ed2ad8a3e1927b03ee5cc983e371882bfc65b78be4824b03e1f154b3df0ce953e79de72bea0a8a33e2d9f943e3e56c03ef31ca43e1b6972bf83bf88be0e48c03edc8e833ba500a43ef16385be98c7b23e34f9a23e544ca63e32dc7e3eb13a85bfafc55fbe54e6a53e1712b23d84678b3e234a5dbe45a5983e97808a3e42a3a83ebc55a23e10d084bf46ce64be9d7aa83e1b14a43d60648f3e808565be19669c3e62108e3e56d6a13eb755893e75b487bf84315dbe1bc7a13e3cde703d574f873ea04b56be5aeb943e5df0853e223d933edf64b73e54898bbf43cc20beb317943e6760343e0e13753eb3c526beccd9853efe237c3e591bad3e081d643ecd7b86bf56b86dbeab68ad3e41b2583df740913eaace67be689e9f3e288a8f3e93cea73e83a9923e6b6483bf176e69be48aca73e617c983d43498e3e029265be25fb9a3e87ae8c3e32d8a23ebf44843ef16887bfb02450be4c5fa33e7fecc33d8c718b3e782054be3aed963efd5c893ed197083dcaba823c6113003d1cae0f3cacb5083dd46f9fbda316f83cdbe4ccbc0474023d09acf63cf8a6a83c5da90d3ac2861d3dfc1c47bad185a83cebcf9abd2455903c090e47bc1b9f9c3c726e8f3cad5dbe3ba90d22bb26f5cbbac2f77ebbf18ebe3b8fbe61bccbda5d3b5457a93aa94f9b3b58725c3b8563c53cea9e0f3cdd7cec3c546664bc054cc53caf5188bdbf3caf3cd0847abc8750bb3c382cae3c96589c3c4c5e353bd5f21e3d3ec868bc097d9c3cdd365fbd631c863cb8fa28bc6aca923c92dc863c38d4023d838a433c574ec93c22538e3c5bcc023de06d7dbd2e20e93c2f91b5bc52f4f73c71cce73c4f17753c9c991dbbbba1763d53f09fbd9d567a3c9d630cbc6c514a3cb4e034bdee6c563c1e974c3ccdfd873cb2e7b4bb7f3f483da5fca1bbf09c873c78507bbdd2295f3c5f6b0ebc2687783c5f4c633c704d5c3c304cb33a9edd243d3909ac3add755c3cc30a80bdfd9e2b3c4af28bbb8f92453c8e9e293c339ba23c4f2696bbaa0e013d8d6e8fbbd490a23ce0cb93bd10dd893cd1033fbcebb0953cc9c6933cd4cfbe3c3d45ef3bd61b503d5fa76dbcd091be3cd92976bd2c77a73c6d477ebc56edb33c1d5aa63c3ae62a3d0a49a63c9efba73c13784c3d7a9f2a3d4e4ca7bdcad11e3db19005bd80fb243dde7b213db4ecc33c6c3e6f3bb1b8113d2bb68ebcd3b2c33cdf003fbd2ca4ac3ccd8e73bc81f4b83c7849ab3cd2a2c03cfc709339a2e0043d9f8a02bc674cc03c0e9944bd126bab3cb92c62bc6320b93c78eaaa3cca0b2cbc73e0e2bccd76913d2c9cedbd9d162cbc8d589dbbc6f962bc9c75a43c0cda46bc6fdb65bc15af763c68bd443b2e02223d90eb9c3cfa48763c99de89bd36dc4c3c1a0adfbb84da673ce7a74f3ceefdd93cfa98193cab141c3d4446613a71c4d93c1d159ebd08c2c03c465f82bc8be7ce3cd619bf3c282a963c5267223b7598253d5f769dbba94d963caddeeebc2d6b7d3c791924bc711f8b3c3272853ca4c5283b893cacbb26eeadbcb0603b3c27ad293ba5c885bcef5a083ae687fa3b27d5c93af2ad903a4cc7bc3c922b3d3c99ed353d0af795bb3addbc3c1dee89bd5326a13c16fa39bc0d1faf3cb1729f3cd0c2963cdb520439c743163daface8bb63d8963c72d358bdc0457b3cf4bdd0bb57548a3c9f707b3ce93eb03c516d2f3be5ea483d377824bcfc9ab03c24e597bd7440973cd88540bc90d1a43c7fce953c730ab03e7ed7a03e649b7fbfea7277bef776b23ee665563de5b2973ea9a975be64cea43e75dc963e1318963e01e98d3e0a908abf14b24abe0ada973e6d38cf3dfc3f7c3eab5145be6db48d3e8a187d3ea856aa3e3eeb943e098885bf636f6dbe9845aa3ec25c833d759c903e90d668be010d9e3ec46c8f3edf6ba13e098b973ee31087bf41665cbed9aba03e21aab53d048f863ed47554bed218943e8e2e863e7252a13e0b488c3e1ef985bf1e7a52bed3a0a13e0f0a923dc2ef863e4b774dbe0c93943e5c35863eb193a83e6f21973e641e85bff43869be96d4a83ecd648c3da6e68e3e92f464bec6209c3e80b58d3ea3f1ad3eb81e7e3ec19082bfa91d59be150bb53e0be5be3dc248943ee22866be46f7a23ee39b933e183ca03e868b873e1f0e89bfa7ef57becf28a03e59f58a3db348853eb2f651bed0f7923e290c843e66aaaa3ec9f78b3efe8886bf3ea95dbeadb1aa3e10eca03df3848e3eb8f563be7c409c3e33188e3e5086b83e91d987bedd0c8dbfdd9944be8ad8b13ee3c0353e5f5a953e190947be5457aa3ec113923e6b8d983ed309843e1c9a8cbfb46768be3855983e9869ec3db8227f3e0c164cbed8718c3e141e7e3e6b12be3ed1c9a23e500277bfa39387befff0bd3e2217f63bee7aa23e6d3784be1cdcb03ef3e2a03efebe9f3ec75c873e660a88bf546053be14b69f3e2d29853dfa5e843e830a51beb669923ee522833ed5f47d3c76dc8f3bab3a1f3ded72fdbb3be77d3cf4e503bd9edc4f3c1cf6debbaa59683cf778523c00b3ea3cd168373c2a152d3dc6670e3c4babea3c54a297bd60ffcc3c7c7996bc8041dc3ccf46cd3cd515a23c5d27983b1db9543dd402bcbb583ea23c993a54bd696e873cb54f1bbb634e953cb9c1853c112cf43ca1daba3ce697123d55674dbda931f53c184700bd0ea9dc3c3ca4b5bc630be93c6b22db3c428f083d9b33ca3c588b9b3dc59ebebc94b4083d08b764bdf676f63c7756bdbc0530023dc1fbf43c90ffbb3cc097523b7b69ea3ca90736bc6981bc3c5ab0fdbcd2f0a33c8a5b76bc1caab03c2b6ca43cf022953c81d5a5bac9373e3d626923bb3645953cf4dd58bdfafe7b3c2e1d30bc0dd8893ced6f7d3c6a1bcd3c1ff5543ccbeec83c1834f73caf07cd3cf0065dbdeccab73c856485bc0957c33c806ab63c38359f3c60ba3a3b3042253d8eaa443cc0ca9f3c50463abd6ccf813c26430eba7da2913c09c38a3c6a92b03c1852ce3bbcbae73c768b84bcfcbbb03c7bab71bd17b19b3c5c4068bc4b6da63ce9859a3cdbb3eb3cab0b733b4cbb053d146391bcfe6ceb3ca8806cbd100dd33cfeb3b0bcbf29e03c48f9d33ce397a43cc819043c0102583db3ab2fbc04c4a43c963166bd0319933ce3085abc9d119f3c5e04953cfcd6183d0831663c9cca383d6a098bbc8830193dccdf8fbded6e083dca0eaabca265113d73d9073d33b1373dcc15fb3cc9e0363c1692963df9a7373d8b2993bde8a9293d8abf11bdf1c7303d11d4283df46f653c1aab2bbcdbc1573d40e0e4babece673ccdbc5cbdcca1353c5092b2bb5d8a503c55ce323c0049d43c3a96863c4b83cd3c55c24d3d70b5d43cf82395bd9359bb3c3cd575bc2748c83c06d6ba3cdd90a63cbc29fd3b9fca203d928e5ebcd6d0a63cfc7b61bd96478e3cde524fbca0e99a3c67b18c3cf720153df7f3c63c58cde33c91acc0bc6815153de88348bd93ec073de7c2d7bcede90e3dc719073dfc32dcba0cccd8ba7f520f3b705e9ebdad49f9ba6f3624bc9c576abb2e7a213c846338bbb17f67bb9c86ebbb2e80c1bc7528853dcc27f6bd9108ebbb0db074bcc59127bc06ea863cdc230ebc1aa31fbcaef2f03cbdb8433cf0f3fb3cbc398dbce23df13c294209bd57edd63cd970a3bc5f74e43c5f97d53cc6e0f43b263ca03b333a563df14285bde690f53bfa138abb2adfb13bcf476fbd80ccd43b9cb0b03bdc17a23ef5c88f3efff784bf69036abe752da23e15b0863d8f6e883e32d259be758b953e431d873edc94b13efa2e9a3e72c780bfeb4281be3f9cb13ecf97363d9abb973e3ae279be51f9a43ea3b4983e4b5dab3e1ef4a03e18c185bf09666fbe89a1ac3e4846a63de1ef933e533e6dbe0a45a23e67bb933e7dcfb33efb11623e2b7384bfeebd54be3dd7b73ed5da9a3d0995973e0d055abe6469a93ef0e9993ec156a03eb186a33e1d4586bfd30f4ebe68999f3e821cd73d6ec3883e844d56be4742953e4ecc873e0d9d9a3e034f9c3e8a148cbfae2543bef7979e3e80d30a3e191f7d3e130448bed044903e62cd843e9721ab3ed76b913e2a5b83bf4ea18ebe5221ab3e6ba97a3de55c903e5cf566be3e6b9d3e78ae8e3e118c933e9fe9af3eb02f89bfd9d23cbe0cd9923ee37a073ec27f7c3e629338be808b893ef4c9823eedd4883e646f8d3e256995bfa1c925be98e1883e7eb2193e4e7f693ef5a325be5d467e3e009e6e3e32b4963ea7d58b3e0b758cbfcaa23bbe4b09953e186ae43d63f7783ec1613abe51bf873eee75753e2df1b73e59b59c3e00ed77bf176881be2fcfb73e8aa5023d55fe9b3ec86c7cbe9c84aa3e77be9a3e11f1a53ef114703eace587bf6d174bbe8258ac3e6a80f73db853893ee53154be25599c3e9281913e1a16983ecfd8ae3ecd138cbf996a47beddbb983efc4ff73d81387f3e681e49be65048c3e69b67c3ea8cadf3c28886e3c1307163d12f153bc72f3df3c4a7411bd18b9c53c287593bca1fdd23c4829c73c4a6e863cfaea7fbbdd75153da83e8bbbe749863cebb61dbd4300593c517fddbb0f98733c7af6583c0643e93c9dc787bb7be7553d801e88bc6611e93c8091a3bd098cd13c5bbda5bc684cdf3cc0dbd23c5d2c2c3d69548d3c0781c53c500ba33cc20a2c3d0393bdbda3421c3d70a1e1bc4bb6243dc4751b3d064a9b3c78d86a3a64dc1a3d7659193dfc439b3c5ae2a4bd5235813c4e7c0bbc5bbd8e3cdb707f3c25bcf23c49910b3c7d24253dc8acb4bcf699f23c2d5da7bd31a7d83c2912a2bc5437e73c7852db3c77f0c13cd0691d3b8678fb3c6094a33b33ecc13c4ec984bd3eeba53c14ed72bc013cb53c53b6a53cb0e92abb330ba8bcfb3a653ddc610ebe12dc29bb930b8839d9c7b2bba3eb373ca3787bbb902cb8bb43efd93c352ee83be083123dee1f8fbc66d7d93c57c392bc1001c23c36c58ebc0c95cf3c3e72c03c63b0ae3c889ecd3b4c7f143ddd0d40bccd99ae3cc33668bd25c6943c407656bc44e0a13c7e39933c0b68f83ce7dc593c7010f23cab366dbc4059f83c74bd8dbda48bde3ce4708dbcac03ec3ca118dd3cbb37c43c7473503cda1ded3c38583cbcd168c43cb401d7bc0550ac3cd0107bbc347db83c6ed9b33cca52a7bafadb3fbc4fa884bc8ab8953ccd17a8ba5a4d8fbcdb5694bb8895723c216b3dbb035c9abb1817ae3cd69cef3a4086193d7eae30ba6d33ae3c4dd42cbd0c80923c48ecbdbbf87ca03c7bb1903c1c9e813c68c040bc13403c3d74af21bda092813c584f23bd697d553ced45bbbbe61f6d3c0ed4523ce9d0c43c4944153c78c6103d209eedbb62e7c43c6ce52ebda354a83c510045bc745cb73c0685a73cd8a3823c1a4cb7bb733f333d30f0f13a44ee833c8cb91dbdf4885a3c54a6c0bba98f6f3c5a3a5a3cf714f13c10568f3c19b8393c8e53b83c249cf03ceeb8b8bded0cdc3c046facbceca5e63c59cada3c2dee9f3cb6e7ab3ad28c2d3dbd9f29bce43aa03c6d824dbdb36d883cd7f238bcf200973c8bec883c4dbee53c205f273b946af43cc5fe443b6ca3e53caa017dbdacbfc93ce4f38fbc0a3bd93c34d5c83ce4e46d3cc4784aba4a8e323d779a253b86e76d3c60f15bbd5dd0393c213e9dbbd1ba543c35dc373c6aaa293db865963c2eee063dc46f17bdfcce293df449c7bd8ab91e3d330905bd0985243dc79b1e3d6d82b33effd4973eafb089bf0c185bbed15cb43eab30603d6623993eb08774beee85a73e1581993e172e9d3eda51f53d0d938fbfcdda97bedad49b3eaedbec3d684d803e12de45bee7918e3ef8eb7d3ec828973ebd9f973ea62b8bbfa44749be47d3953e8b70073eefaf863ea4233dbeb66f953e8177843e6f06a83e0861a63ef8d587bff78551be175da93efb5dae3d14258d3efc625fbe2db69c3e688a8b3e2230a33e1a7b8f3e864289bfd23960be6f91a33e8f737c3d14c5893ef3245dbe3cca973e70d1883e5b4d9c3e2066953e4f0c8bbf4f3557be77639e3ea49cac3d9c05833e0d9351bea7b0913e31e1813e3edba43ebfd38d3e262284bf163b6bbe03daa43e878a5c3d3c8e8a3e61395abe780e983e6268893ec55bb33e540d9c3e3b8281bfcaba81beb853b33e40af323d37cc993eb6847dbe41b7a63e78c7983e2414993ef5f892bec46d94bf75fa1bbe10e59a3e7c9a7b3ea53a7f3e354107beed0fa63e1253933e638ba23ed9a78d3e7a8789bf98845fbe9a6ca23e0d587f3dd78e873e0eef53bead09953e26ee853eade2bb3ec607a13e6a437cbff23186bef9e9bb3e1204ed3cd250a03e4b9b82be7d7fae3ef81b9f3e3137b43e0c4d9c3ee8ce81bf636081be0f22b43ebb53263d0f4c9a3eabc37cbec4dda73e52e0993ee3d4b13e9ba9873eb3bd7cbf99a772be6eb7b13e20e7443d66d3953e3bdd6abe492ea43eb922943eb8170d3d72c6a73c9126893dbdc6bfbcdf0d0d3dee9b4fbda53c003d6c50d9bc9dc2063d54ecfe3c0aa2ce3c99dc5f3cd2dfa93c894494bcf151cf3c0b713bbd1753c03c8bc49ebcde53c83cf8bcbf3c827ecd3c8cb95e3caee7f53c00795c3d3d8acd3cb4ef8bbd5002b33c65d719bc3063c43c849cb83c55bdf83c4199863c7347633de43eb2bcfda6f83c79d4acbd9c5fe03c6c1ca1bc0008ed3c818ce03c2a63e23c0cc7a93c2588243d4809e43bf840e23c72d2cfbde883cc3c10eea3bcb72bd83c312ccb3c0449dc3c0f33f93b13cd243d419c45bc51f2db3c1c3d6fbd25b5c13cbc5e80bcf463cf3ce12cc03cd2199f3cb9a19d3b4482163d6c2bd0bbbc039f3cdf3306bd045b873c320a40bc024a933c5054863c9a5f9e3cca664d3bd7d3833d867369bd68de9e3c444a15bd4664863c67ad64bdb24a9b3cd9188d3ccae4a83cf4dbd13b06e4283d8a06fa3890cba83c949384bd2a1e8f3c05734cbc91689f3c96cb903cd1b4a93c94e4b33b7fd80f3d107f1bbcbb3baa3c68f10cbda64b923c592b56bca5849e3ca5de903c0334c13cce18b93b9f3f673dfa8631bca040c13c7daf86bd1ad4a63cf01572bcc302b53c6d42a53c2f36df3c9e22083cb016543d6d4c5cbcde08df3c42599dbdbc80c53c625f6ebc0591d23c4dd9c33cae59aa3cb2fc4e3b762c253d5761d03b7052aa3c3e618ebd4475913c859144bcc44a9e3c8df88f3c09aa863c47492b390b0fad3c17ce0b3d4656863cc1715ebde2766b3ce3cb05bc2905773c2235693c4e8ac63c0b59873c52cac93c5624b0bc8ab7c63cd514e2bc159fb23c646693bc63e9bc3c9409b33cb999833c0c33d93a95c2383df7b702bd367f833c061ddcbc0cca533c9ae4a5bb22fa6c3c695e513ccf92c83c9331823c65f1633dd4252fbc4f91c83cc1dd66bddd28ac3ce66a6abc93b2ba3ce865aa3c7050e33c31e34c3ce11c2c3d588d80bc4780e33c35f740bdd66bca3ca64099bcda44d73cae0ac93c0e3eb13cad4de33bebcc033d0a95b7bbb52fb13cbac576bd6831963ce05f09bca6f7a33c01ba943cc31f8a3cfc90e23bf2b0373dab405d39740b8a3cf67968bdb619603c647ee5bba66b7b3cc13d5d3ccd4ebabbd308b5bce3fe7d3d5385debd4859babb20e1cbbbbeb613bcbe229b3c4e8ae8bb26b811bc5d2fa13c032a2e3c0932063db9098ebcd252a13c6dbe5bbdd1e5903c48455bbc9e599b3cdbec8f3c88a39b3e65cf8a3eab9388bfa45656beb5739c3e26f6d43db006833e655e54be46568f3e0a13823e66d9a23e4a06943e1f0688bf09014bbecca1a23e89d19d3d34e3883e303b46befc1d963e7a31893e5835ad3eda26943e4e2884bfd92171be1235ad3e30d8133d1d47923e18446bbe2469a03ef82b913e26fdab3e23af963ecb9880bfaa8178bedaffab3eeb355f3d2a76913e28fc69be2c9b9e3e75a98f3eb27cb13ee4d1983e6f3581bf45917abe437eb13e3952293dcdbd963e6e1d74bed99ca43ee083953e3643a53e6b4e9f3e76e187bfc68e38be1fb9a23eda5abe3d5585893e977658be86e0963e2ccc8b3e3f47a33e92508d3eba0d88bf421a3abed58ea43e1bc1cd3d1015863e9e263fbeadef973e0f9d863e0875a53e23963a3e15e289bf9e524dbe4068a73e9f73e33d41028a3eaa9b4dbe8f6e973e614c8b3e7aa18a3ec72da33e84148fbfdc6e26beefca8e3eff033a3ef00d6f3eef7d28be002f803ec78c753e9a969b3e7619933e8e1b88bffca361be72929c3e64c3bc3d9dae823e435a4fbe25168f3ef835833e196bb13e6b369b3e198e84bf3d9180bed558b13e1453423d598e963e699974be397aa43e57d2953e5776a63e1be09f3e20ef80bfcda98cbee320a73e78e49a3d3e348f3e8e2964be6b1d9f3e69fd903e5109b93e36f5a03e069f7dbf872683be0203b93ed9d3a33c853a9e3ec0d380be58abab3ecfa79d3eb1eccb3c7bf5c53badcf1b3dfdf334bc3ee4cb3ca96835bd490bb23cf3ed79bc995bbf3c5279b03c5e1cb33cf6fb1e3a396f6b3dea2909bd1332b33c0de8cfbc1db79a3c5c0913bd1a3ca73cd736993ceca63c3c0c594abb63f8113d3d3b67bb14cf3c3c544b1bbd0b6e173c05b17cbb1aa62a3c3daa163cfc91093d7771953c37be013d3301e53b86ae093daa78c6bdd65cfd3c157ed1bccb9d043d5850fc3cb72106bc838eedbcfe987e3dbf3905beab7206bc638e69bbdf963abc6a8b963c04811ebca9ba3cbceae4f33cc580443c506d253d2ff2c1bb67f8f33c21bcb6bdbe22de3c43d8bcbc003ceb3c55aee43c05598b3caf968ebae5445a3d54c920bc2a2c8b3c0db078bd4896663c06d315bcca697f3c6903653c81ba553cbed6e83b2a243c3c5c26fc3b6c514a3c6340f2bc85cd233c9ecfd83954ca433cc67e2a3ce5bbac3c679e803b0900fa3c1a5f3ebc6eb3ac3c6df84ebd5ff5963c71cd6ebc23f9a13c41a5953c27929e3cc454133c9ea5093d77c040bc56ad9e3ce8acebbc2f72883c290a48bc484c943c845a873c91fdc03c46d3513c5b383a3d84fb79bc240ec13cbcf554bd7d10a73ce28d55bca73ab43c7686a53cebbfdb3cfe20e53bd2ff573de6ac73bca6bddb3c21d78ebd3d06bf3c4bba7abcea96cd3c0b54bd3cd449013cb35c20bc98b8023c3963b13bd39f013c04c9dcbcb219a83b37f0823bd29dd53b8b80a23be52df53cb222773b5f5b1d3d6ed672bce134f53c668f85bd5393da3cc44aa9bc9f62e83c1135da3c687a433d84fdf93c422c9f3cf78cac3d6d7f433d6009bdbdf7b0363d29111fbd456e3d3d58a0363d9255dc3ca950523bb778dc3c95428fbc794adc3c68cd46bd5314c13c1e81c0bbd319cf3c808ac03c1c6a8b3c39817e3a474e2e3dfa0f173caa568b3c2f2aaabd9d6c693c61ef07bcbf43803c9fad663c3faabf3c4a3d163c74e3f73c3d8c3dbb84a9bf3c83c79dbdd605a73c4a5e7fbc0c06b43c0cf9a53c2dc4da3cab84673c7056263d788f7dbc85a6da3c275122bd36cdc03cdfab94bc7cdbcd3c7141bf3c6edcf83b0712a6ba17098c3c483a0c3cd2f7fc3b0a50c6bc64deb73b5549193c21b3d83b5f52b43b78dec53c1403173c8fe6053d619053bc4aa0c53cd2bb41bdc518ac3c25ac7bbca3abb93ced81ab3cfda5e63ca2a9483cace2063dc2aa83bc2094e63cbaa92dbd3360cf3c1c7fa4bc9f76db3c3a8dce3c760a00404305a83fd2f79ebe24efc6bf787c004029cbdbbfae13e33f0c8ab3bfcb70f33f5832e13f1ff80140400cac3f9ce15dbea00ec9bf13c10240de3eefbf6f9de93fdb33c1bfe38ff83fce98e83fae8c064079bfac3fa3fc31be2af4c1bfd2ef06400bf0f3bfb7f4eb3f2aeebcbfab92fd3f0814ea3fe37673bfee12e2bed21e0f33fbc90f3f452076bfd3531a3f75943cbf80b40b3f71ef63bf163540bfcb0c8dbf5bde10bfd63996335731d83e56598abfa866263fd2ed4abffbbf0c3f39c278bfea2b5ebf503473bfc52da5be4f8d0837740d063f679f63bfaf30103fb6743fbfe75bfd3eba575dbf9d933abf5ffe77bf4abd08bf68351734e60ed93eb95b6cbfb45b073f8c2940bfbb28f03e49a74fbf0b6b36bf38ed89bfc856a8be7bf03234db750b3f2b707dbfc4831c3f3c3949bff020043faad57cbf11ff5bbf2a1651bf483afcbe862ec633063ce63ec33f55bf22a7083fb47f28bfae94f53e10fe4dbf690c33bfb3aa65bfb58025bf72474e34b17be83e6f5f81bf514a183f88b341bf01df013f36055cbf275a3ebfdf8d8e40dc717140d84d39c1f13b4bc06d6a8e40910d63c0accc7040928e45c01ab483401b416e40865ed3bf71c0a8bf29b7a53f8bcdfd3f034cd8bf4fb7e23f674cc9bf8eb0ff3fa92cd5bfa593cebff54f50bf
+\ No newline at end of file
+diff --git a/gcc/opts-common.cc b/gcc/opts-common.cc
+index ee94723fc..e88038a16 100644
+--- a/gcc/opts-common.cc
++++ b/gcc/opts-common.cc
+@@ -993,90 +993,6 @@ opts_concat (const char *first, ...)
+   return newstr;
+ }
+ 
+-static int
+-handle_lto_option (unsigned int lang_mask,
+-		   unsigned int num_decoded_options,
+-		   unsigned int argc,
+-		   const char **argv,
+-		   struct cl_decoded_option *&opt_array)
+-{
+-  int ret = 0;
+-  char *compiler = xstrdup (argv[0]);
+-  char *lan = strrchr (compiler, '/');
+-  if (lan != NULL)
+-    lan ++;
+-  else
+-    lan = compiler;
+-  if (strstr (lan, "gcc") != NULL)
+-    {
+-      opt_array = XRESIZEVEC (struct cl_decoded_option, opt_array, argc + 2);
+-      const char* lto_flag = "-flto=8";
+-      decode_cmdline_option (<o_flag, lang_mask,
+-			     &opt_array[num_decoded_options]);
+-      ret++;
+-      const char* ltopartition_flag = "-flto-partition=one";
+-      decode_cmdline_option (<opartition_flag, lang_mask,
+-			     &opt_array[num_decoded_options + 1]);
+-      ret++;
+-    }
+-  else if (strstr (lan, "g++") != NULL
+-	   || strstr (lan, "gfortran") != NULL)
+-    {
+-      opt_array = XRESIZEVEC (struct cl_decoded_option, opt_array, argc + 1);
+-      const char* lto_flag = "-flto=8";
+-      decode_cmdline_option (<o_flag, lang_mask,
+-			     &opt_array[num_decoded_options]);
+-      ret++;
+-    }
+-  if (compiler)
+-    free (compiler);
+-  return ret;
+-}
+-
+-static int
+-handle_machine_option (unsigned int lang_mask,
+-		       unsigned int num_decoded_options,
+-		       unsigned int argc,
+-		       const char **argv,
+-		       struct cl_decoded_option *&opt_array)
+-{
+-  int ret = 0;
+-  bool flag_Om = false;
+-  bool flag_hip09 = false;
+-  for (unsigned i = 1; i < argc; i ++)
+-    {
+-      if (strcmp (argv[i], "-Om") == 0)
+-	flag_Om = true;
+-      if (strstr (argv[i], "mcpu=hip09") != NULL)
+-	flag_hip09 = true;
+-    }
+-  if (!flag_hip09 || !flag_Om)
+-    {
+-      return ret;
+-    }
+-
+-  const char *ai_infer_level = getenv ("AI_INFER_LEVEL");
+-  if (ai_infer_level)
+-    {
+-      return ret;
+-    }
+-  const int argc_hw = 6;
+-  int64_t argv_hw[argc_hw] = {
+-    global_options.x_param_simultaneous_prefetches,
+-    global_options.x_param_l1_cache_size,
+-    global_options.x_param_l1_cache_line_size,
+-    global_options.x_param_l2_cache_size,
+-    global_options.x_param_prefetch_latency,
+-    global_options.x_param_ipa_prefetch_distance_factor};
+-  int64_t output_pred = get_optimize_decision_from_optimizer (
+-			  argc, argv, "hip09", argc_hw, argv_hw);
+-  if (output_pred != 1)
+-    return ret;
+-
+-  return handle_lto_option (lang_mask, num_decoded_options,
+-			    argc, argv, opt_array);
+-}
+-
+ /* Decode command-line options (ARGC and ARGV being the arguments of
+    main) into an array, setting *DECODED_OPTIONS to a pointer to that
+    array and *DECODED_OPTIONS_COUNT to the number of entries in the
+@@ -1218,9 +1134,6 @@ decode_cmdline_options_to_array (unsigned int argc, const char **argv,
+       num_decoded_options++;
+     }
+ 
+-  num_decoded_options += handle_machine_option (lang_mask, num_decoded_options,
+-						argc, argv, opt_array);
+-
+   if (lto_skip_stat == NEED_TO_SKIP)
+     {
+       const char * nolto = "-fno-lto";
+diff --git a/gcc/opts-global.cc b/gcc/opts-global.cc
+index 843ace666..79c8a963d 100644
+--- a/gcc/opts-global.cc
++++ b/gcc/opts-global.cc
+@@ -311,7 +311,9 @@ decode_options (struct gcc_options *opts, struct gcc_options *opts_set,
+ 		  global_options.x_param_l2_cache_size,
+ 		  global_options.x_param_prefetch_latency,
+ 		  global_options.x_param_ipa_prefetch_distance_factor);
++		  
+   const char *tune_native = getenv ("GCC_AI4C_TUNE_INFO");
++
+   if (tune_native != nullptr)
+     {
+       prepare_native_tune_str (tune_native);
+@@ -350,6 +352,31 @@ decode_options (struct gcc_options *opts, struct gcc_options *opts_set,
+     }
+ }
+ 
++/* handle lto options according to model inference result */
++void handle_lto_options(struct gcc_options *opts, char* compiler)
++{
++  const char *model_infer_level = getenv ("AI_INFER_LEVEL");
++  if (model_infer_level)
++    {
++      char *lan = strrchr (compiler, '/');
++      if (lan != NULL)
++        lan ++;
++      else
++        lan = compiler;
++      if (strstr (lan, "cc1") != NULL || strstr (lan, "lto1") != NULL)
++        {
++          global_options.x_flag_generate_lto = 1;
++          global_options.x_flag_lto_partition = LTO_PARTITION_ONE;
++          global_options.x_flag_lto = "8";
++        }
++      else if (strstr (lan, "gfortran") || strstr (lan, "cc1plus") || strstr (lan, "f951"))
++        {
++          global_options.x_flag_generate_lto = 1;
++          global_options.x_flag_lto = "8";
++        }
++    }
++}
++
+ /* Hold command-line options associated with stack limitation.  */
+ const char *opt_fstack_limit_symbol_arg = NULL;
+ int opt_fstack_limit_register_no = -1;
+diff --git a/gcc/opts.h b/gcc/opts.h
+index a43ce66cf..04ee995da 100644
+--- a/gcc/opts.h
++++ b/gcc/opts.h
+@@ -386,6 +386,7 @@ extern void decode_options (struct gcc_options *opts,
+ 			    location_t loc,
+ 			    diagnostic_context *dc,
+ 			    void (*target_option_override_hook) (void));
++extern void handle_lto_options(struct gcc_options *opts, char* compiler);
+ extern int option_enabled (int opt_idx, unsigned lang_mask, void *opts);
+ 
+ extern bool get_option_state (struct gcc_options *, int,
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc-shadow_non_unique_init_const.c b/gcc/testsuite/gcc.dg/struct/dfc-shadow_non_unique_init_const.c
+new file mode 100644
+index 000000000..428c2720e
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc-shadow_non_unique_init_const.c
+@@ -0,0 +1,56 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++
++int main() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    char line[101];
++    unsigned long a;
++    unsigned long b;
++
++    for (unsigned i = 0; i < MAX; i++) {
++        arcs[i].a = 100;
++    }
++
++    for (unsigned i = 0; i < MAX; i++) {
++        printf("a: %ld, b: %ld\n", arcs[i].a, arcs[i].b);
++    }
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs[i].a = a;
++        arcs[i].b = a;
++    }
++    fclose(file);
++
++    for (unsigned i = 0; i < MAX; i++) {
++        printf("a: %ld, b: %ld\n", arcs[i].a, arcs[i].b);
++    }
++
++    // Should fail because of 100 is the init_const.
++    for (unsigned i = 0; i < MAX; i++) {
++        arcs[i].a = 100;
++        arcs[i].b = 100;
++    }
++
++    for (unsigned i = 0; i < MAX; i++) {
++        printf("a: %ld, b: %ld\n", arcs[i].a, arcs[i].b);
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Init const is not unique" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc-shadow_original_not_dfc_candidate.c b/gcc/testsuite/gcc.dg/struct/dfc-shadow_original_not_dfc_candidate.c
+new file mode 100644
+index 000000000..5fd15eb20
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc-shadow_original_not_dfc_candidate.c
+@@ -0,0 +1,43 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++    int c;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++
++int main() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    char line[101];
++    int c;
++
++    for (unsigned i = 0; i < MAX; i++) {
++        arcs[i].a = 100;
++    }
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld", &c);
++        arcs[i].a = i;
++        arcs[i].b = i;
++        arcs[i].c = c;
++    }
++    fclose(file);
++
++    for (unsigned i = 0; i < MAX; i++) {
++        printf("a: %ld, b: %ld\n", arcs[i].a, arcs[i].b);
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail checking dynamic shadow fields" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc-shadow_two_fields.c b/gcc/testsuite/gcc.dg/struct/dfc-shadow_two_fields.c
+new file mode 100644
+index 000000000..f5c081709
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc-shadow_two_fields.c
+@@ -0,0 +1,42 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    unsigned long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++
++int main() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    char line[101];
++    unsigned long a;
++    unsigned long b;
++
++    for (unsigned i = 0; i < MAX; i++) {
++        arcs[i].a = 100;
++    }
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs[i].a = a;
++        arcs[i].b = a;
++    }
++    fclose(file);
++
++    for (unsigned i = 0; i < MAX; i++) {
++        printf("a: %ld, b: %ld\n", arcs[i].a, arcs[i].b);
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found shadow field: b" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc_calloc_not_in_start_func.c b/gcc/testsuite/gcc.dg/struct/dfc_calloc_not_in_start_func.c
+new file mode 100644
+index 000000000..c7abf1019
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc_calloc_not_in_start_func.c
+@@ -0,0 +1,43 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++
++void  __attribute__((noinline)) start_func() {
++    char line[101];
++    unsigned long a;
++    long b;
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs[i].a = a;
++        arcs[i].b = b;
++    }
++    fclose(file);
++}
++
++int main() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    start_func();
++
++    for (unsigned i = 0; i < MAX; i++) {
++        if (arcs[i].a != arcs[i].b)
++            abort();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding fc arrays" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc_compress.c b/gcc/testsuite/gcc.dg/struct/dfc_compress.c
+new file mode 100644
+index 000000000..52fd343e0
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc_compress.c
+@@ -0,0 +1,44 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++arc_t* stop_arc;
++
++int main() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    stop_arc = arcs + MAX;
++
++    char line[101];
++    unsigned long a;
++    long b;
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs[i].a = a;
++        arcs[i].b = b;
++    }
++    fclose(file);
++
++    arc_t* arc = arcs;
++    for (arc = arcs; arc != stop_arc; arc++) {
++        if (arc->a != arc->b)
++            return 1;
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found a dynamic compression field: a, input var: a" "struct_reorg" } } */
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found a dynamic compression field: b, input var: b" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc_dead_field.c b/gcc/testsuite/gcc.dg/struct/dfc_dead_field.c
+new file mode 100644
+index 000000000..fd1a01841
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc_dead_field.c
+@@ -0,0 +1,41 @@
++/* { dg-do compile } */
++/* { dg-additional-options "-fipa-struct-reorg=3" } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++
++int main() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    char line[101];
++    unsigned long a;
++    long b;
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs[i].a = a;
++        arcs[i].b = b;
++    }
++    fclose(file);
++
++    for (unsigned i = 0; i < MAX; i++) {
++        if (arcs[i].a != i)
++            abort();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found a dynamic compression field: a, input var: a" "struct_reorg" } } */
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Found a dynamic compression field: b, input var: b" "struct_reorg" { xfail *-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc_fclose_not_in_start_func.c b/gcc/testsuite/gcc.dg/struct/dfc_fclose_not_in_start_func.c
+new file mode 100644
+index 000000000..34e236701
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc_fclose_not_in_start_func.c
+@@ -0,0 +1,44 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++FILE *file;
++
++void  __attribute__((noinline)) start_func() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    char line[101];
++    unsigned long a;
++    long b;
++
++    file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs[i].a = a;
++        arcs[i].b = b;
++    }
++}
++
++int main() {
++    start_func();
++    fclose(file);
++
++    for (unsigned i = 0; i < MAX; i++) {
++        if (arcs[i].a != arcs[i].b)
++            abort();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding fopen/fclose stmt" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc_ignore_ref_after_startpoint.c b/gcc/testsuite/gcc.dg/struct/dfc_ignore_ref_after_startpoint.c
+new file mode 100644
+index 000000000..01b325dcd
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc_ignore_ref_after_startpoint.c
+@@ -0,0 +1,45 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++arc_t* stop_arc;
++
++int main() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    char line[101];
++    unsigned long a;
++    long b;
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs[i].a = a;
++        arcs[i].b = b;
++    }
++    fclose(file);
++
++    // stop_arc should not be recorded as fc_ref.
++    stop_arc = arcs + MAX;
++    printf("test\n");
++
++    arc_t* arc = arcs;
++    for (arc = arcs; arc != stop_arc; arc++) {
++        if (arc->a != arc->b)
++            return 1;
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Add fc_ref" "struct_reorg" { xfail *-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc_local_ref_ptr.c b/gcc/testsuite/gcc.dg/struct/dfc_local_ref_ptr.c
+new file mode 100644
+index 000000000..6d51de778
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc_local_ref_ptr.c
+@@ -0,0 +1,41 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++
++int main() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    arc_t* stop_arc = arcs + MAX;
++    char line[101];
++    unsigned long a;
++    long b;
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs[i].a = a;
++        arcs[i].b = b;
++    }
++    fclose(file);
++
++    arc_t* arc = arcs;
++    for (arc = arcs; arc != stop_arc; arc++) {
++        if (arc->a != arc->b)
++            return 1;
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Local usage not handled" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc_multiple_call_path_to_startpoint.c b/gcc/testsuite/gcc.dg/struct/dfc_multiple_call_path_to_startpoint.c
+new file mode 100644
+index 000000000..86de30568
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc_multiple_call_path_to_startpoint.c
+@@ -0,0 +1,47 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++
++void __attribute__((noinline)) read() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    char line[101];
++    unsigned long a;
++    long b;
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs[i].a = a;
++        arcs[i].b = b;
++    }
++    fclose(file);
++
++    for (unsigned i = 0; i < MAX; i++) {
++        if (arcs[i].a != arcs[i].b)
++            abort();
++    }
++}
++
++int main() {
++    int flag;
++    if (flag)
++        read();
++    else
++        read();
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail finding fc paths" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc_multiple_variable_same_array.c b/gcc/testsuite/gcc.dg/struct/dfc_multiple_variable_same_array.c
+new file mode 100644
+index 000000000..705b43ac5
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc_multiple_variable_same_array.c
+@@ -0,0 +1,41 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs1;
++arc_t* arcs2;
++
++int main() {
++    arcs1 = calloc(MAX, sizeof(arc_t));
++    arcs2 = arcs1;
++    char line[101];
++    unsigned long a;
++    long b;
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs1[i].a = a;
++        arcs1[i].b = b;
++    }
++    fclose(file);
++
++    for (unsigned i = 0; i < MAX; i++) {
++        if (arcs1[i].a != arcs2[i].b)
++            return 1;
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Array assigned to multiple variable" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc_not_direct_assign.c b/gcc/testsuite/gcc.dg/struct/dfc_not_direct_assign.c
+new file mode 100644
+index 000000000..4d4e994d1
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc_not_direct_assign.c
+@@ -0,0 +1,49 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++arc_t* stop_arc;
++
++int main() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    stop_arc = arcs + MAX;
++
++    char line[101];
++    unsigned long a;
++    long b;
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs[i].a = a;
++        arcs[i].b = b;
++    }
++    fclose(file);
++
++    arc_t* arc = arcs;
++    for (arc = arcs; arc != stop_arc; arc++) {
++        // a = a + 1, Value of field a may be outside the closure, and we can't guarantee the validity of its boundary
++        arc->a++;
++        arc->b = arc->a;
++    }
++
++    for (arc = arcs; arc != stop_arc; arc++) {
++        if (arc->a != arc->b)
++            return 1;
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] Fail checking closure" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc_variable_allocated_twice.c b/gcc/testsuite/gcc.dg/struct/dfc_variable_allocated_twice.c
+new file mode 100644
+index 000000000..fba127265
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc_variable_allocated_twice.c
+@@ -0,0 +1,41 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++#define MAX 16
++
++struct arc {
++    unsigned long a;
++    long b;
++};
++typedef struct arc arc_t;
++
++arc_t* arcs;
++
++int main() {
++    arcs = calloc(MAX, sizeof(arc_t));
++    if (arcs[0].a == 0)
++        arcs = calloc(MAX, sizeof(arc_t));
++    char line[101];
++    unsigned long a;
++    long b;
++
++    FILE* file = fopen("data.txt", "r");
++    for (unsigned i = 0; i < MAX; i++) {
++        fgets(line, 100, file);
++        sscanf(line, "%ld %ld", &a, &b);
++        arcs[i].a = a;
++        arcs[i].b = b;
++    }
++    fclose(file);
++
++    for (unsigned i = 0; i < MAX; i++) {
++        if (arcs[i].a != arcs[i].b)
++            abort();
++    }
++
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "\\\[field compress\\\] fc_array allocated twice before start-point" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+index 34606d025..452a8c606 100644
+--- a/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
++++ b/gcc/testsuite/gcc.dg/struct/struct-reorg.exp
+@@ -77,6 +77,14 @@ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/sfc-bitfield_*.c]] \
+ gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/sfc-shadow_*.c]] \
+ 	"" "-fipa-reorder-fields -fipa-struct-sfc -fipa-struct-sfc-shadow -fdump-ipa-struct_reorg-details -flto-partition=one -fwhole-program"
+ 
++# -fipa-struct-dfc
++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/dfc_*.c]] \
++	"" "-fipa-reorder-fields -fipa-struct-dfc -fdump-ipa-struct_reorg-details -flto-partition=one -fwhole-program"
++
++# -fipa-struct-dfc -fipa-struct-dfc-shadow
++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/dfc-shadow_*.c]] \
++	"" "-fipa-reorder-fields -fipa-struct-dfc -fipa-struct-dfc-shadow -fdump-ipa-struct_reorg-details -flto-partition=one -fwhole-program"
++
+ # All done.
+ torture-finish
+ dg-finish
+diff --git a/gcc/toplev.cc b/gcc/toplev.cc
+index bdbd4de63..9cda18601 100644
+--- a/gcc/toplev.cc
++++ b/gcc/toplev.cc
+@@ -88,7 +88,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "ipa-modref.h"
+ #include "ipa-param-manipulation.h"
+ #include "dbgcnt.h"
+-
++#include "ai4c-infer.h"
+ #include "selftest.h"
+ 
+ #ifdef HAVE_isl
+@@ -2250,6 +2250,9 @@ toplev::main (int argc, char **argv)
+ 		  UNKNOWN_LOCATION, global_dc,
+ 		  targetm.target_option.override);
+ 
++  char *compiler = xstrdup (argv[0]);
++  handle_lto_options(&global_options, compiler);
++
+   handle_common_deferred_options ();
+ 
+   init_local_tick ();
diff --git a/0377-oeAware-Fix-.GCC4OE_oeAware-section-dup-in-namespace.patch b/0377-oeAware-Fix-.GCC4OE_oeAware-section-dup-in-namespace.patch
new file mode 100644
index 0000000000000000000000000000000000000000..bc785181c2435cc1ff7bc6eb1df59c9c08095d3c
--- /dev/null
+++ b/0377-oeAware-Fix-.GCC4OE_oeAware-section-dup-in-namespace.patch
@@ -0,0 +1,55 @@
+From 81600a0743fd889363339bc1463a56bae84cda60 Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Sun, 18 May 2025 11:12:12 +0800
+Subject: [PATCH 1/2] [oeAware] Fix .GCC4OE_oeAware section dup in namespace
+ main
+
+This resolves an ICE "section already exists" caused
+by incorrectly creating the .GCC4OE_oeAware section
+for non-global main functions.
+---
+ gcc/testsuite/gcc.dg/oeaware-main-in-namespace.cpp | 10 ++++++++++
+ gcc/varasm.cc                                      |  8 +++++---
+ 2 files changed, 15 insertions(+), 3 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/oeaware-main-in-namespace.cpp
+
+diff --git a/gcc/testsuite/gcc.dg/oeaware-main-in-namespace.cpp b/gcc/testsuite/gcc.dg/oeaware-main-in-namespace.cpp
+new file mode 100644
+index 000000000..5e44f4a1c
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/oeaware-main-in-namespace.cpp
+@@ -0,0 +1,10 @@
++/* { dg-do compile { target *-*-linux* *-*-gnu* } }  */
++/* { dg-options "-foeaware-policy=1" }  */
++
++namespace radar8446940 {
++int main () {
++ return 0;
++}
++}
++
++/* { dg-final { scan-assembler-not "GCC4OE_oeAware" } }  */
+diff --git a/gcc/varasm.cc b/gcc/varasm.cc
+index bdf02edea..5134c0c1f 100644
+--- a/gcc/varasm.cc
++++ b/gcc/varasm.cc
+@@ -8572,11 +8572,13 @@ handle_vtv_comdat_section (section *sect, const_tree decl ATTRIBUTE_UNUSED)
+ void
+ create_oeaware_section ()
+ {
+-  /* To prevent inserting repeated segments and data,
+-     we only perform the insertion in the file where the main
++  /* To prevent inserting repeated segments and data, we only perform
++     the insertion in the file where the GLOBAL main
+      function is located.  */
+   if (!cfun || TREE_CODE (cfun->decl) != FUNCTION_DECL
+-      || !DECL_NAME (cfun->decl) || !MAIN_NAME_P (DECL_NAME (cfun->decl)))
++      || !DECL_NAME (cfun->decl) || !MAIN_NAME_P (DECL_NAME (cfun->decl))
++      || (DECL_CONTEXT (cfun->decl) != NULL_TREE &&
++	  TREE_CODE (DECL_CONTEXT (cfun->decl)) != TRANSLATION_UNIT_DECL))
+     return;
+ 
+   int flags = SECTION_STRINGS;
+-- 
+2.33.0
+
diff --git a/0378-Add-alignment-propagation-localize-array-array-dse.patch b/0378-Add-alignment-propagation-localize-array-array-dse.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3b5a05628918ea3d88ed3a692568c614c79b7867
--- /dev/null
+++ b/0378-Add-alignment-propagation-localize-array-array-dse.patch
@@ -0,0 +1,4997 @@
+From 83045e6ab5f34873af07ce6a9defd4dd7c9a1f8f Mon Sep 17 00:00:00 2001
+From: huzife <634763349@qq.com>
+Date: Tue, 20 May 2025 17:34:43 +0800
+Subject: [PATCH 2/2] Add alignment-propagation, localize-array, array-dse
+
+---
+ gcc/Makefile.in                          |    3 +
+ gcc/common.opt                           |   12 +
+ gcc/config/aarch64/aarch64.cc            |    4 +
+ gcc/ipa-alignment-propagation.cc         |  478 ++++
+ gcc/ipa-array-dse.cc                     | 3317 ++++++++++++++++++++++
+ gcc/ipa-array-dse.h                      |  263 ++
+ gcc/ipa-localize-array.cc                |  614 ++++
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc |   37 +-
+ gcc/ipa-utils.cc                         |   44 +
+ gcc/ipa-utils.h                          |   15 +
+ gcc/passes.def                           |   14 +
+ gcc/timevar.def                          |    3 +
+ gcc/tree-pass.h                          |    3 +
+ 13 files changed, 4806 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/ipa-alignment-propagation.cc
+ create mode 100644 gcc/ipa-array-dse.cc
+ create mode 100644 gcc/ipa-array-dse.h
+ create mode 100644 gcc/ipa-localize-array.cc
+
+diff --git a/gcc/Makefile.in b/gcc/Makefile.in
+index c7a503235..070e5e456 100644
+--- a/gcc/Makefile.in
++++ b/gcc/Makefile.in
+@@ -1451,6 +1451,9 @@ OBJS = \
+ 	inchash.o \
+ 	incpath.o \
+ 	init-regs.o \
++	ipa-alignment-propagation.o \
++	ipa-localize-array.o \
++	ipa-array-dse.o \
+ 	ipa-hardware-detection.o \
+ 	internal-fn.o \
+ 	ipa-struct-reorg/ipa-struct-reorg.o \
+diff --git a/gcc/common.opt b/gcc/common.opt
+index 4cd2574e4..ed4696b7a 100644
+--- a/gcc/common.opt
++++ b/gcc/common.opt
+@@ -2069,6 +2069,18 @@ fipa-matrix-reorg
+ Common Ignore
+ Does nothing. Preserved for backward compatibility.
+ 
++fipa-alignment-propagation
++Common Var(flag_ipa_alignment_propagation) Init(0) Optimization
++Propagate alignment of local variable's address
++
++fipa-localize-array
++Common Var(flag_ipa_localize_array) Init(0) Optimization
++Transform global calloced array to be specific function local.
++
++fipa-array-dse
++Common Var(flag_ipa_array_dse) Init(0) Optimization
++Array dead and redundant store elimination.
++
+ fipa-reorder-fields
+ Common Var(flag_ipa_reorder_fields) Init(0) Optimization
+ Perform structure fields reorder optimizations.
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index 80242ddf8..faa445e26 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -19090,6 +19090,9 @@ extern bool lang_c_p (void);
+ static void
+ override_C_optimize_options (struct gcc_options *opts)
+ {
++  opts->x_flag_ipa_alignment_propagation = 1;
++  opts->x_flag_ipa_localize_array = 1;
++  opts->x_flag_ipa_array_dse = 1;
+   opts->x_flag_ipa_reorder_fields = 1;
+   opts->x_flag_ipa_struct_reorg = 5;
+   opts->x_struct_layout_optimize_level = 5;
+@@ -19154,6 +19157,7 @@ override_CPP_optimize_options (struct gcc_options *opts)
+   opts->x_param_inline_unit_growth = 256;
+   opts->x_flag_cmlt_arith = 1;
+   opts->x_flag_if_conversion_gimple = 1;
++  opts->x_flag_find_with_sve = 1;
+ }
+ 
+ static void
+diff --git a/gcc/ipa-alignment-propagation.cc b/gcc/ipa-alignment-propagation.cc
+new file mode 100644
+index 000000000..3f14818ae
+--- /dev/null
++++ b/gcc/ipa-alignment-propagation.cc
+@@ -0,0 +1,478 @@
++/* Copyright (C) 2019-2022 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "tm.h"
++#include "tree.h"
++#include "tree-cfg.h"
++#include "tree-pass.h"
++#include "tm_p.h"
++#include "basic-block.h"
++#include "bitmap.h"
++#include "function.h"
++#include "cfg.h"
++#include "cgraph.h"
++#include "gimple.h"
++#include "gimple-iterator.h"
++#include "gimple-pretty-print.h"
++#include "gimple-ssa.h"
++#include "ipa-utils.h"
++
++class alignment_propagator
++{
++public:
++  alignment_propagator (cgraph_node *node);
++
++  void execute ();
++
++private:
++  void propagate_params_alignment ();
++  void transform ();
++  size_t get_param_alignment (unsigned param_index);
++  size_t get_var_alignment (tree var);
++  bool check_assign (gimple *stmt, auto_vec &worklist,
++		     size_t &alignment);
++  bool check_param (tree t, auto_vec &worklist, size_t &alignment);
++  int get_param_index_from_ssa (tree var);
++  size_t get_arg_alignment (cgraph_node *caller, tree arg);
++  size_t new_alignment (size_t orig, size_t new_value);
++  bool pow2_or_zerop (size_t value);
++  size_t abs_value (tree t);
++  bool candidate_stmt_p (gimple *stmt);
++
++private:
++  cgraph_node *node = nullptr;
++  hash_map alignment_map;
++};
++
++alignment_propagator::alignment_propagator (cgraph_node *node)
++  : node (node)
++{
++}
++
++void
++alignment_propagator::execute ()
++{
++  if (dump_file)
++    {
++      fprintf (dump_file, "Start to rewrite function: %s\n",
++	       node->dump_asm_name());
++      dump_function_to_file (node->decl, dump_file, dump_flags);
++    }
++
++  cfun_saver save (node);
++
++  propagate_params_alignment ();
++
++  /* If no alignment is propagated, there is no need to continue cause
++     the rest cases are covered by constant propagation.  */
++  if (!alignment_map.is_empty ())
++    transform ();
++}
++
++void
++alignment_propagator::propagate_params_alignment ()
++{
++  unsigned i = 0;
++  tree param = DECL_ARGUMENTS (node->decl);
++  while (param)
++    {
++      size_t alignment = get_param_alignment (i);
++      if (alignment)
++	alignment_map.put (param, alignment);
++
++      param = DECL_CHAIN (param);
++      i++;
++    }
++}
++
++void
++alignment_propagator::transform ()
++{
++  basic_block bb;
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      for (auto gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
++	{
++	  gimple *stmt = gsi_stmt (gsi);
++	  if (!candidate_stmt_p (stmt))
++	    continue;
++
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "Rewrite stmt:\n  ");
++	      print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
++	    }
++
++	  tree lhs = gimple_assign_lhs (stmt);
++	  tree new_rhs = build_int_cst (TREE_TYPE (lhs), 0);
++	  gimple_assign_set_rhs_from_tree (&gsi, new_rhs);
++	  update_stmt (stmt);
++
++	  if (dump_file)
++	    {
++	      fprintf (dump_file, "To:\n  ");
++	      print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);
++	      fprintf (dump_file, "\n");
++	    }
++	}
++    }
++}
++
++size_t
++alignment_propagator::get_param_alignment (unsigned param_index)
++{
++  size_t alignment = 0;
++  for (auto e = node->callers; e; e = e->next_caller)
++    {
++      if (e->caller == node)
++	continue;
++
++      if (gimple_call_num_args (e->call_stmt) <= param_index)
++	return 0;
++
++      tree arg = gimple_call_arg (e->call_stmt, param_index);
++      size_t arg_alignment = get_arg_alignment (e->caller, arg);
++      if (!arg_alignment)
++	return 0;
++
++      if (!alignment || arg_alignment < alignment)
++	alignment = arg_alignment;
++    }
++
++  return alignment;
++}
++
++size_t
++alignment_propagator::get_var_alignment (tree var)
++{
++  size_t alignment = 0;
++
++  auto_bitmap visited;
++  auto_vec worklist;
++  worklist.safe_push (var);
++
++  while (!worklist.is_empty ())
++    {
++      tree t = worklist.pop ();
++      if (TREE_CODE (t) == INTEGER_CST)
++	{
++	  size_t value = abs_value (t);
++	  if (!pow2_or_zerop (value))
++	    return 0;
++
++	  alignment = new_alignment (alignment, value);
++	  continue;
++	}
++
++      if (TREE_CODE (t) != SSA_NAME)
++	return 0;
++
++      if (!bitmap_set_bit (visited, SSA_NAME_VERSION (t)))
++	continue;
++
++      gimple *stmt = SSA_NAME_DEF_STMT (t);
++      switch (gimple_code (stmt))
++	{
++	  case GIMPLE_PHI:
++	    for (unsigned i = 0; i < gimple_phi_num_args (stmt); i++)
++	      worklist.safe_push (gimple_phi_arg_def (stmt, i));
++	    break;
++	  case GIMPLE_ASSIGN:
++	    if (!check_assign (stmt, worklist, alignment))
++	      return 0;
++	    break;
++	  case GIMPLE_NOP:
++	    /* If we reach a default def, try to get the argument's alignment
++	       from caller node.  */
++	    if (!check_param (t, worklist, alignment))
++	      return 0;
++	    break;
++	  default:
++	    return 0;
++	}
++    }
++
++  return alignment;
++}
++
++bool
++alignment_propagator::check_assign (gimple *stmt, auto_vec &worklist,
++				    size_t &alignment)
++{
++  if (gimple_assign_single_p (stmt) || gimple_assign_cast_p (stmt))
++    {
++      worklist.safe_push (gimple_assign_rhs1 (stmt));
++      return true;
++    }
++
++  switch (gimple_assign_rhs_code (stmt))
++    {
++      case NEGATE_EXPR:
++	worklist.safe_push (gimple_assign_rhs1 (stmt));
++	return true;
++      case MAX_EXPR:
++	[[fallthrough]];
++      case MIN_EXPR:
++	[[fallthrough]];
++      case POINTER_PLUS_EXPR:
++	[[fallthrough]];
++      case POINTER_DIFF_EXPR:
++	[[fallthrough]];
++      case PLUS_EXPR:
++	[[fallthrough]];
++      case MINUS_EXPR:
++	worklist.safe_push (gimple_assign_rhs1 (stmt));
++	worklist.safe_push (gimple_assign_rhs2 (stmt));
++	return true;
++      case MULT_EXPR:
++	break;
++      default:
++	return false;
++    }
++
++  /* For mult_expr, rhs2 must be an integer constant, so we can simply take
++     this constant as alignment.  Otherwise, return false.  */
++  tree rhs2 = gimple_assign_rhs2 (stmt);
++  if (TREE_CODE (rhs2) != INTEGER_CST)
++    return false;
++
++  alignment = new_alignment (alignment, abs_value (rhs2));
++  return true;
++}
++
++bool
++alignment_propagator::check_param (tree t, auto_vec &worklist,
++				   size_t &alignment)
++{
++  int index = get_param_index_from_ssa (t);
++  if (index == -1)
++    return false;
++
++  for (cgraph_edge *e = node->callers; e; e = e->next_caller)
++    {
++      if (gimple_call_num_args (e->call_stmt) <= index)
++	return false;
++
++      tree arg = gimple_call_arg (e->call_stmt, index);
++      if (e->caller == node)
++	worklist.safe_push (arg);
++      else
++	{
++	  auto *align = alignment_map.get (SSA_NAME_VAR (t));
++	  if (!align)
++	    return false;
++
++	  alignment = new_alignment (alignment, *align);
++	}
++    }
++
++  return true;
++}
++
++/* Find param from VAR and return its index.  Return -1 if fail.  */
++
++int
++alignment_propagator::get_param_index_from_ssa (tree var)
++{
++  if (!SSA_NAME_IS_DEFAULT_DEF (var) || !SSA_NAME_VAR (var))
++    return -1;
++
++  tree param = DECL_ARGUMENTS (cfun->decl);
++  int index = 0;
++  while (param && param != SSA_NAME_VAR (var))
++    {
++      param = DECL_CHAIN (param);
++      index++;
++    }
++
++  return index;
++}
++
++/* Get alignment of an argument if it is calculated from the address of a
++   local variable.  */
++
++size_t
++alignment_propagator::get_arg_alignment (cgraph_node *caller, tree arg)
++{
++  if (!caller || !arg)
++    return 0;
++
++  cfun_saver save (caller);
++
++  tree base = nullptr;
++  tree offset = nullptr;
++
++  /* Extract base and offset.  */
++  if (TREE_CODE (arg) == ADDR_EXPR)
++    {
++      base = arg;
++      tree op0 = TREE_OPERAND (base, 0);
++      if (TREE_CODE (op0) == MEM_REF)
++	{
++	  base = TREE_OPERAND (op0, 0);
++	  offset = TREE_OPERAND (op0, 1);
++	}
++    }
++  else
++    {
++      if (TREE_CODE (arg) != SSA_NAME)
++	return 0;
++
++      gimple *stmt = SSA_NAME_DEF_STMT (arg);
++      if (!is_gimple_assign (stmt))
++	return 0;
++
++      if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR)
++	offset = gimple_assign_rhs2 (stmt);
++      else if (!gimple_assign_single_p (stmt))
++	return 0;
++
++      base = gimple_assign_rhs1 (stmt);
++    }
++
++  /* Check if ARG uses the address of a local variable.  */
++  if (TREE_CODE (base) != ADDR_EXPR)
++    return 0;
++
++  tree decl = TREE_OPERAND (base, 0);
++  if (!decl || !VAR_P (decl)
++      || decl_function_context (decl) != current_function_decl)
++    return 0;
++
++  size_t alignment = LOCAL_DECL_ALIGNMENT (decl) / 8;
++
++  /* Update alignment if there is an offset.  */
++  if (offset)
++    {
++      if (TREE_CODE (offset) != INTEGER_CST)
++	return 0;
++
++      auto value = abs_value (offset);
++      if (!pow2_or_zerop (value))
++	return 0;
++
++      alignment = new_alignment (alignment, value);
++    }
++
++  return alignment;
++}
++
++size_t
++alignment_propagator::new_alignment (size_t orig_value, size_t new_value)
++{
++  if (!new_value)
++    return orig_value;
++
++  if (!orig_value || new_value < orig_value)
++    return new_value;
++
++  return orig_value;
++}
++
++bool
++alignment_propagator::pow2_or_zerop (size_t value)
++{
++  return !(value & (value - 1));
++}
++
++size_t
++alignment_propagator::abs_value (tree t)
++{
++  gcc_assert (TREE_CODE (t) == INTEGER_CST);
++  auto value = TREE_INT_CST_LOW (t);
++
++  return std::abs (static_cast  (value));
++}
++
++bool
++alignment_propagator::candidate_stmt_p (gimple *stmt)
++{
++  if (!is_gimple_assign (stmt)
++      || gimple_assign_rhs_code (stmt) != BIT_AND_EXPR)
++    return false;
++
++  tree var = gimple_assign_rhs1 (stmt);
++  tree rhs2 = gimple_assign_rhs2 (stmt);
++
++  return rhs2 && TREE_CODE (rhs2) == INTEGER_CST
++	 && TREE_INT_CST_LOW (rhs2) < get_var_alignment (var);
++}
++
++
++static unsigned
++ipa_propagate_alignment (void)
++{
++  auto_vec candidate_nodes;
++  cgraph_node *cnode = NULL;
++  FOR_EACH_FUNCTION (cnode)
++    {
++      if (!cnode->real_symbol_p () || !cnode->definition
++	  || !cnode->has_gimple_body_p () || cnode->inlined_to)
++	continue;
++
++      cnode->get_body ();
++      candidate_nodes.safe_push (cnode);
++    }
++
++  for (auto *node : candidate_nodes)
++    alignment_propagator (node).execute ();
++
++  return 0;
++}
++
++namespace {
++const pass_data pass_data_ipa_alignment_propagation = {
++  SIMPLE_IPA_PASS,
++  "alignment-propagation",
++  OPTGROUP_NONE,
++  TV_IPA_ALIGNMENT_PROPAGATION,
++  (PROP_cfg | PROP_ssa),
++  0,
++  0,
++  (TODO_update_ssa),
++  (TODO_verify_all),
++};
++
++class pass_ipa_alignment_propagation
++  : public simple_ipa_opt_pass
++{
++public:
++  pass_ipa_alignment_propagation (gcc::context *ctxt)
++    : simple_ipa_opt_pass (pass_data_ipa_alignment_propagation, ctxt)
++  {}
++
++  virtual bool gate (function *)
++  {
++    return optimize >= 3 && flag_ipa_alignment_propagation;
++  }
++
++  virtual unsigned execute (function *)
++  {
++    return ipa_propagate_alignment ();
++  }
++};
++} /* namespace.  */
++
++simple_ipa_opt_pass *
++make_pass_ipa_alignment_propagation (gcc::context *ctxt)
++{
++  return new pass_ipa_alignment_propagation (ctxt);
++}
+diff --git a/gcc/ipa-array-dse.cc b/gcc/ipa-array-dse.cc
+new file mode 100644
+index 000000000..df973e849
+--- /dev/null
++++ b/gcc/ipa-array-dse.cc
+@@ -0,0 +1,3317 @@
++/* Array dead store elimination
++   Copyright (C) 2021-2022 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include "ipa-array-dse.h"
++
++#include "basic-block.h"
++#include "bitmap.h"
++#include "cgraph.h"
++#include "cfghooks.h"
++#include "cfgloop.h"
++#include "cfg.h"
++#include "fold-const.h"
++#include "gimple.h"
++#include "gimple-builder.h"
++#include "gimple-iterator.h"
++#include "gimple-pretty-print.h"
++#include "gimple-ssa.h"
++#include "gimple-walk.h"
++#include "gimplify-me.h"
++#include "ipa-utils.h"
++#include "tree-phinodes.h"
++#include "ssa-iterators.h"
++#include "stringpool.h"
++#include "tree-cfg.h"
++#include "tree-dfa.h"
++#include "tree-inline.h"
++#include "tree-pass.h"
++#include "tree-pretty-print.h"
++#include "tree-ssanames.h"
++#include "tree-vrp.h"
++#include "tree.h"
++
++namespace array_dse {
++
++#define RANGE_TYPE long_long_integer_type_node
++#define RANGE_INF LONG_LONG_MAX
++#define RANGE_NINF LONG_LONG_MIN
++
++static inline bool
++integer_cst_p (tree t)
++{
++  return TREE_CODE (t) == INTEGER_CST && !TREE_OVERFLOW (t);
++}
++
++static tree
++strip_base (tree addr)
++{
++  tree base = get_base_address (addr);
++  return TREE_CODE (base) == MEM_REF ? TREE_OPERAND (base, 0) : nullptr;
++}
++
++static tree
++strip_ssa_copy (tree var)
++{
++  if (!var || TREE_CODE (var) != SSA_NAME)
++    return var;
++
++  while (true)
++    {
++      gimple *stmt = SSA_NAME_DEF_STMT (var);
++      if (!gimple_assign_single_p (stmt) && !gimple_assign_cast_p (stmt))
++	break;
++
++      tree rhs = gimple_assign_rhs1 (stmt);
++      if (!rhs || TREE_CODE (rhs) != SSA_NAME)
++	break;
++
++      var = rhs;
++    }
++
++  return var;
++}
++
++static inline unsigned
++greatest_common_divisor (unsigned a, unsigned b)
++{
++  return b == 0 ? a : greatest_common_divisor (b, a % b);
++}
++
++static compare_result
++opposite_compare_result (compare_result result)
++{
++  switch (result)
++    {
++      case COMPARE_ERROR: return COMPARE_ERROR;
++      case LT: return GT;
++      case EQ: return NE;
++      case GT: return LT;
++      case LE: return GE;
++      case GE: return LE;
++      case NE: return EQ;
++    }
++}
++
++static tree_code
++opposite_cond_code (tree_code code)
++{
++  switch (code)
++    {
++      case LT_EXPR: return GE_EXPR;
++      case LE_EXPR: return GT_EXPR;
++      case GT_EXPR: return LE_EXPR;
++      case GE_EXPR: return LT_EXPR;
++      case EQ_EXPR: return NE_EXPR;
++      case NE_EXPR: return EQ_EXPR;
++      default:
++	return ERROR_MARK;
++    }
++}
++
++/* Calculate step of a loop variable, record all stmts that plus step.  */
++
++static int
++calc_loop_var_step (tree loop_var, tree iterate_var,
++		    hash_set *iterate_stmts = nullptr)
++{
++  int step = 0;
++  auto_bitmap visited;
++  auto_vec worklist;
++  worklist.safe_push (iterate_var);
++
++  while (!worklist.is_empty ())
++    {
++      tree t = worklist.pop ();
++      if (TREE_CODE (t) != SSA_NAME)
++	return 0;
++
++      if (t == loop_var || !bitmap_set_bit (visited, SSA_NAME_VERSION (t)))
++	continue;
++
++      gimple *stmt = SSA_NAME_DEF_STMT (t);
++      if (gimple_code (stmt) == GIMPLE_PHI)
++	{
++	  for (unsigned i = 0; i < gimple_phi_num_args (stmt); i++)
++	    worklist.safe_push (gimple_phi_arg_def (stmt, i));
++	  continue;
++	}
++
++      /* Check iterate stmts' pattern: _2 = _1 + step.  */
++      if (!is_gimple_assign (stmt)
++	  || (gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR
++	      && gimple_assign_rhs_code (stmt) != PLUS_EXPR))
++	return 0;
++
++      tree ptr = gimple_assign_rhs1 (stmt);
++      tree offset = gimple_assign_rhs2 (stmt);
++      if (TREE_CODE (offset) != INTEGER_CST)
++	return 0;
++
++      worklist.safe_push (ptr);
++      HOST_WIDE_INT offset_val = TREE_INT_CST_LOW (offset);
++      if (step && offset_val != step)
++	return 0;
++      step = offset_val;
++
++      if (iterate_stmts)
++	iterate_stmts->add (stmt);
++    }
++
++  return step;
++}
++
++/* VAR is a loop var when:
++     1. VAR is defined by a phi in LOOP's header.
++     2. The defination phi should have two args, one comes from preheader
++	and the other comes from latch.  */
++
++static bool
++loop_var_p (loop_p loop, tree var)
++{
++  if (TREE_CODE (var) != SSA_NAME)
++    return false;
++
++  gimple *stmt = SSA_NAME_DEF_STMT (var);
++  if (gimple_code (stmt) != GIMPLE_PHI || gimple_bb (stmt) != loop->header)
++    return false;
++
++  edge preheader_edge = loop_preheader_edge (loop);
++  edge latch_edge = loop_latch_edge (loop);
++
++  return preheader_edge && latch_edge
++	 && PHI_ARG_DEF_FROM_EDGE (stmt, preheader_edge)
++	 && PHI_ARG_DEF_FROM_EDGE (stmt, latch_edge);
++}
++
++static inline tree
++build_value (HOST_WIDE_INT value)
++{
++  return build_int_cst (RANGE_TYPE, value);
++}
++
++static inline value_range
++make_range (HOST_WIDE_INT value)
++{
++  tree v = build_value (value);
++  return value_range{v, v};
++}
++
++static inline value_range
++make_range (HOST_WIDE_INT min, HOST_WIDE_INT max)
++{
++  return value_range{build_value (min), build_value (max)};
++}
++
++static infinite_kind
++infinite_p (tree value)
++{
++  tree type = TREE_TYPE (value);
++  if (TREE_CODE (value) != INTEGER_CST || TYPE_PRECISION (type) == 1)
++    return infinite_kind::NON_INF;
++
++  wide_int type_min = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
++  wide_int type_max = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
++
++  if (INTEGRAL_TYPE_P (type) && !TYPE_UNSIGNED (type)
++      && wi::to_wide (value) == type_min)
++    return infinite_kind::NINF;
++
++  if (wi::to_wide (value) == type_max)
++    return infinite_kind::INF;
++
++  return infinite_kind::NON_INF;
++}
++
++static inline HOST_WIDE_INT
++get_multiplier (tree t)
++{
++  if (TREE_CODE (t) != MULT_EXPR
++      || TREE_CODE (TREE_OPERAND (t, 1)) != INTEGER_CST)
++    return 0;
++
++  return TREE_INT_CST_LOW (TREE_OPERAND (t, 1));
++}
++
++static tree negate_tree (tree t);
++static tree minus_tree (tree t1, tree t2);
++
++/* Convert negative multiplier to positive.  */
++
++static void
++handle_negate_multiplier (tree t)
++{
++  HOST_WIDE_INT multiplier = get_multiplier (t);
++  if (multiplier >= 0)
++    return;
++
++  tree lhs = TREE_OPERAND (t, 0);
++  if (TREE_CODE (lhs) == PLUS_EXPR)
++    {
++      TREE_OPERAND (t, 0) = minus_tree (negate_tree (TREE_OPERAND (lhs, 0)),
++					TREE_OPERAND (lhs, 1));
++      TREE_OPERAND (t, 1) = build_int_cst (RANGE_TYPE, -multiplier);
++    }
++  else if (TREE_CODE (lhs) == MINUS_EXPR)
++    {
++      TREE_OPERAND (t, 0) = negate_tree (lhs);
++      TREE_OPERAND (t, 1) = build_int_cst (RANGE_TYPE, -multiplier);
++    }
++}
++
++static tree
++negate_tree (tree t)
++{
++  if (!t)
++    return nullptr;
++
++  if (infinite_p (t) == infinite_kind::INF)
++    return build_value (RANGE_NINF);
++  else if (infinite_p (t) == infinite_kind::NINF)
++    return build_value (RANGE_INF);
++  else
++    return fold_build1 (NEGATE_EXPR, RANGE_TYPE, t);
++}
++
++static tree
++plus_tree (tree t1, tree t2)
++{
++  if (!t1 || !t2)
++    return nullptr;
++
++  infinite_kind inf1 = infinite_p (t1);
++  infinite_kind inf2 = infinite_p (t2);
++
++  if ((inf1 == infinite_kind::INF && inf2 == infinite_kind::NINF)
++      || (inf1 == infinite_kind::NINF && inf2 == infinite_kind::INF))
++    return nullptr;
++
++  if (inf1 == infinite_kind::NINF || inf2 == infinite_kind::NINF)
++    return build_value (RANGE_NINF);
++
++  if (inf1 == infinite_kind::INF || inf2 == infinite_kind::INF)
++    return build_value (RANGE_INF);
++
++  tree ret = fold_build2 (PLUS_EXPR, RANGE_TYPE, t1, t2);
++  handle_negate_multiplier (ret);
++
++  return ret;
++}
++
++static tree
++minus_tree (tree t1, tree t2)
++{
++  if (!t1 || !t2)
++    return nullptr;
++
++  infinite_kind inf1 = infinite_p (t1);
++  infinite_kind inf2 = infinite_p (t2);
++
++  if ((inf1 == infinite_kind::INF && inf2 == infinite_kind::INF)
++      || (inf1 == infinite_kind::NINF && inf2 == infinite_kind::NINF))
++    return nullptr;
++
++  if (inf1 == infinite_kind::NINF || inf2 == infinite_kind::INF)
++    return build_value (RANGE_NINF);
++
++  if (inf1 == infinite_kind::INF || inf2 == infinite_kind::NINF)
++    return build_value (RANGE_INF);
++
++  tree ret = fold_build2 (MINUS_EXPR, RANGE_TYPE, t1, t2);
++  handle_negate_multiplier (ret);
++
++  return ret;
++}
++
++/* Callback for walk_tree, usage:
++     walk_tree (&A, sub_expr_p, B, nullptr)
++
++   Check if B is sub expr of A.
++ */
++
++static tree
++sub_expr_p (tree *opnd_ptr, int *walk_subtrees ATTRIBUTE_UNUSED, void *data)
++{
++  tree opnd = *opnd_ptr;
++  tree var = static_cast (data);
++
++  if (opnd == var)
++    return var;
++
++  return NULL_TREE;
++}
++
++static unsigned
++get_ptr_layers (tree expr)
++{
++  unsigned layers = 0;
++  while (POINTER_TYPE_P (expr) || TREE_CODE (expr) == ARRAY_TYPE)
++    {
++      layers++;
++      expr = TREE_TYPE (expr);
++    }
++
++  return layers;
++}
++
++static bool
++find_base (gimple *stmt ATTRIBUTE_UNUSED, tree base,
++	   tree var ATTRIBUTE_UNUSED, void *data)
++{
++  return (TREE_CODE (base) == MEM_REF
++	  && TREE_OPERAND (base, 0) == static_cast (data));
++}
++
++static bool
++gimple_phi_arg_p (gimple *stmt, tree var)
++{
++  for (unsigned i = 0; i < gimple_phi_num_args (stmt); i++)
++    if (gimple_phi_arg_def (stmt, i) == var)
++      return true;
++
++  return false;
++}
++
++/* Returns the number of FIELD_DECLs in TYPE.  */
++
++static unsigned
++fields_length (const_tree type)
++{
++  tree t = TYPE_FIELDS (type);
++  return list_length (t);
++}
++
++/* Get unique base address of VAR.  */
++
++tree
++addr_analyzer::get_address (tree var)
++{
++  if (tree *it = address_map.get (var))
++    return *it;
++
++  tree addr = analyze_address (var);
++  address_map.put (var, addr);
++
++  return addr;
++}
++
++/* Try to find the unique base address to which VAR is accessing in
++   current function.  */
++
++tree
++addr_analyzer::analyze_address (tree var)
++{
++  tree addr = nullptr;
++  auto_bitmap visited;
++  auto_vec worklist;
++  worklist.safe_push (var);
++
++  while (!worklist.is_empty ())
++    {
++      tree t = worklist.pop ();
++      if (TREE_CODE (t) != SSA_NAME || !POINTER_TYPE_P (TREE_TYPE (t)))
++	return nullptr;
++
++      if (!bitmap_set_bit (visited, SSA_NAME_VERSION (t)))
++	continue;
++
++      if (SSA_NAME_IS_DEFAULT_DEF (t))
++	{
++	  tree new_addr = SSA_NAME_VAR (t);
++	  if (!new_addr || (addr && addr != new_addr))
++	    return nullptr;
++
++	  addr = new_addr;
++	  continue;
++	}
++
++      gimple *def_stmt = SSA_NAME_DEF_STMT (t);
++      if (is_gimple_assign (def_stmt))
++	{
++	  if (!gimple_assign_single_p (def_stmt)
++	      && !gimple_assign_cast_p (def_stmt)
++	      && gimple_assign_rhs_code (def_stmt) != POINTER_PLUS_EXPR)
++	    return nullptr;
++
++	  worklist.safe_push (gimple_assign_rhs1 (def_stmt));
++	}
++      else if (gimple_code (def_stmt) == GIMPLE_PHI)
++	{
++	  for (unsigned i = 0; i < gimple_phi_num_args (def_stmt); i++)
++	    worklist.safe_push (gimple_phi_arg_def (def_stmt, i));
++	}
++      else
++	return nullptr;
++    }
++
++  return addr;
++}
++
++array_dse_callee::array_dse_callee (cgraph_node *node)
++  : node (node)
++{
++}
++
++/* Check if the node could be a candidate callee for array dse.  */
++
++bool
++array_dse_callee::analyze ()
++{
++  cfun_saver save (node, LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
++
++  return filter_function () && find_candidate_array ()
++	 && find_length_param () && check_array_usage ();
++}
++
++unsigned HOST_WIDE_INT
++array_dse_callee::get_len_param_max () const
++{
++  return len_param_max;
++}
++
++tree
++array_dse_callee::mult_tree (basic_block bb, tree t1, tree t2)
++{
++  if (!bb || !t1 || !t2 || !integer_cst_p (t2)
++      || infinite_p (t2) != infinite_kind::NON_INF)
++    return nullptr;
++
++  if (integer_zerop (t1) || integer_zerop (t2))
++    return integer_zero_node;
++
++  auto range1 = calc_tree_range (bb, t1);
++  if (tree_to_shwi (range1.min ()) < 0)
++    return nullptr;
++
++  HOST_WIDE_INT multiplier = tree_to_shwi (t2);
++  if (infinite_p (t1) != infinite_kind::NON_INF)
++    return build_value (multiplier > 0 ? RANGE_INF : RANGE_NINF);
++
++  return fold_build2 (MULT_EXPR, RANGE_TYPE, t1, t2);
++}
++
++tree
++array_dse_callee::div_tree (basic_block bb, tree t1, tree t2)
++{
++  if (!bb || !t1 || !t2 || !integer_cst_p (t2)
++      || infinite_p (t2) != infinite_kind::NON_INF)
++    return nullptr;
++
++  if (integer_zerop (t2))
++    return nullptr;
++
++  if (integer_zerop (t1))
++    return integer_zero_node;
++
++  auto range1 = calc_tree_range (bb, t1);
++  if (tree_to_shwi (range1.min ()) < 0)
++    return nullptr;
++
++  HOST_WIDE_INT divisor = tree_to_shwi (t2);
++
++  if (infinite_p (t1) != infinite_kind::NON_INF)
++    return build_value (divisor > 0 ? RANGE_INF : RANGE_NINF);
++
++  return fold_build2 (TRUNC_DIV_EXPR, RANGE_TYPE, t1, t2);
++}
++
++tree
++array_dse_callee::lshift_tree (tree t1, tree t2)
++{
++  if (!t1 || !t2 || !integer_cst_p (t2))
++    return nullptr;
++
++  if (infinite_p (t1) != infinite_kind::NON_INF)
++    return t1;
++
++  return fold_build2 (LSHIFT_EXPR, RANGE_TYPE, t1, t2);
++}
++
++tree
++array_dse_callee::rshift_tree (tree t1, tree t2)
++{
++  if (!t1 || !t2 || !integer_cst_p (t2))
++    return nullptr;
++
++  if (infinite_p (t1) != infinite_kind::NON_INF)
++    return t1;
++
++  return fold_build2 (RSHIFT_EXPR, RANGE_TYPE, t1, t2);
++}
++
++tree
++array_dse_callee::max_tree (basic_block bb, tree t1, tree t2)
++{
++  if (!bb || !t1 || !t2)
++    return nullptr;
++
++  switch (compare_tree (bb, t1, t2))
++    {
++      case EQ: return t1;
++      case GT: return t1;
++      case GE: return t1;
++      case LT: return t2;
++      case LE: return t2;
++      default: return nullptr;
++    }
++}
++
++tree
++array_dse_callee::min_tree (basic_block bb, tree t1, tree t2)
++{
++  if (!bb || !t1 || !t2)
++    return nullptr;
++
++  switch (compare_tree (bb, t1, t2))
++    {
++      case EQ: return t2;
++      case GT: return t2;
++      case GE: return t2;
++      case LT: return t1;
++      case LE: return t1;
++      default: return nullptr;
++    }
++}
++
++/* Calculate the value of T, where T is an expression with len_main_var and
++   N_VALUE is len_main_var's value.  */
++
++HOST_WIDE_INT
++array_dse_callee::calc_tree_value (tree t, HOST_WIDE_INT n_value)
++{
++  if (TREE_CODE (t) == INTEGER_CST)
++    return tree_to_shwi (t);
++
++  if (t == len_main_var || t == signed_len_var)
++    return n_value;
++
++  HOST_WIDE_INT op_value[2];
++  for (int i = 0; i < std::min (2, tree_operand_length (t)); i++)
++    op_value[i] = calc_tree_value (TREE_OPERAND (t, i), n_value);
++
++  switch (TREE_CODE (t))
++    {
++      case NEGATE_EXPR:
++	return -op_value[0];
++      case PLUS_EXPR:
++	return op_value[0] + op_value[1];
++      case MINUS_EXPR:
++	return op_value[0] - op_value[1];
++      case MULT_EXPR:
++	return op_value[0] * op_value[1];
++      case TRUNC_DIV_EXPR:
++	return op_value[0] / op_value[1];
++      case LSHIFT_EXPR:
++	return op_value[0] * (1 << op_value[1]);
++      case RSHIFT_EXPR:
++	return op_value[0] / (1 << op_value[1]);
++      default:
++	return 0;
++    }
++}
++
++/* Calculate expression T's range.  */
++
++value_range
++array_dse_callee::calc_tree_range (basic_block bb, tree t)
++{
++  if (!t)
++    return value_range{RANGE_TYPE};
++
++  if (TREE_CODE (t) == INTEGER_CST)
++    return make_range (tree_to_shwi (t));
++
++  if (t == len_main_var || t == signed_len_var)
++    return len_range_map.get_or_insert (bb);
++
++  int len = tree_operand_length (t);
++  gcc_assert (len > 0);
++  value_range range1 = calc_tree_range (bb, TREE_OPERAND (t, 0));
++  value_range range2;
++  if (len == 2)
++    range2 = calc_tree_range (bb, TREE_OPERAND (t, 1));
++
++  switch (TREE_CODE (t))
++    {
++      /* Since the variable in both expressions is len_main_var and both
++	 expressions are monotonically increasing, we can just substitute
++	 the maximum and minimum values of len_main_var to calculate the
++	 expression T's range.  */
++      case PLUS_EXPR:
++      case MINUS_EXPR:
++	{
++	  tree op[2] = {TREE_OPERAND (t, 0), TREE_OPERAND (t, 1)};
++	  if (integer_cst_p (op[0]) || integer_cst_p (op[1]))
++	    break;
++
++	  auto len_range = len_range_map.get_or_insert (bb);
++	  auto len_min = tree_to_shwi (len_range.min ());
++	  auto len_max = tree_to_shwi (len_range.max ());
++	  auto min1 = calc_tree_value (op[0], len_min);
++	  auto max1 = calc_tree_value (op[0], len_max);
++	  auto min2 = calc_tree_value (op[1], len_min);
++	  auto max2 = calc_tree_value (op[1], len_max);
++
++	  auto min = TREE_CODE (t) == PLUS_EXPR ? min1 + min2 : min1 - min2;
++	  auto max = TREE_CODE (t) == PLUS_EXPR ? max1 + max2 : max1 - max2;
++
++	  if (min > max)
++	    std::swap (min, max);
++
++	  return make_range (min, max);
++	}
++      default:
++	break;
++    }
++
++  return build_range (bb, TREE_CODE (t), range1, range2);
++}
++
++value_range
++array_dse_callee::build_range (basic_block bb, tree_code op,
++			       const value_range &r1, const value_range &r2)
++{
++  tree min = nullptr;
++  tree max = nullptr;
++  switch (op)
++    {
++      case NEGATE_EXPR:
++	min = negate_tree (r1.max ());
++	max = negate_tree (r1.min ());
++	break;
++      case PLUS_EXPR:
++	[[fallthrough]];
++      case POINTER_PLUS_EXPR:
++	min = plus_tree (r1.min (), r2.min ());
++	max = plus_tree (r1.max (), r2.max ());
++	break;
++      case MINUS_EXPR:
++	[[fallthrough]];
++      case POINTER_DIFF_EXPR:
++	min = minus_tree (r1.min(), r2.max ());
++	max = minus_tree (r1.max(), r2.min ());
++	break;
++      case MULT_EXPR:
++	min = mult_tree (bb, r1.min (), r2.min ());
++	max = mult_tree (bb, r1.max (), r2.max ());
++	break;
++      case TRUNC_DIV_EXPR:
++	min = div_tree (bb, r1.min (), r2.max ());
++	max = div_tree (bb, r1.max (), r2.min ());
++	break;
++      case LSHIFT_EXPR:
++	min = lshift_tree (r1.min (), r2.min ());
++	max = lshift_tree (r1.max (), r2.max ());
++	break;
++      case RSHIFT_EXPR:
++	min = rshift_tree (r1.min (), r2.max ());
++	max = rshift_tree (r1.max (), r2.min ());
++	break;
++      case MAX_EXPR:
++	min = max_tree (bb, r1.min (), r2.min ());
++	max = max_tree (bb, r1.max (), r2.max ());
++	break;
++      case MIN_EXPR:
++	min = min_tree (bb, r1.min (), r2.min ());
++	max = min_tree (bb, r1.max (), r2.max ());
++	break;
++      default:
++	break;
++    }
++
++  return min && max ? value_range{min, max} : value_range{RANGE_TYPE};
++}
++
++/* Compare two pointer range value in BB.  */
++
++compare_result
++array_dse_callee::compare_tree (basic_block bb, tree t1, tree t2)
++{
++  if (!bb || !t1 || !t2)
++    return COMPARE_ERROR;
++
++  if (operand_equal_p (t1, t2))
++    return EQ;
++
++  auto ret = compare_tree_by_minus (bb, t1, t2);
++  if (!ret)
++    ret = opposite_compare_result (compare_tree_by_minus (bb, t2, t1));
++
++  return ret;
++}
++
++compare_result
++array_dse_callee::compare_tree_by_minus (basic_block bb, tree t1, tree t2)
++{
++  tree expr = minus_tree (t1, t2);
++  auto range = calc_tree_range (bb, expr);
++  HOST_WIDE_INT min = tree_to_shwi (range.min ());
++  HOST_WIDE_INT max = tree_to_shwi (range.max ());
++  if (min == 0)
++    return GE;
++  if (min > 0)
++    return GT;
++  if (max == 0)
++    return LE;
++  if (max < 0)
++    return LT;
++
++  return COMPARE_ERROR;
++}
++
++bool
++array_dse_callee::filter_function () const
++{
++  return leaf_recursive_node_p (node) && no_return_p ()
++	 /* There must be two params: array and length.  */
++	 && list_length (DECL_ARGUMENTS (node->decl)) == PARAM_NUM;
++}
++
++/* Candidate callee must return no value.  Each return block can't have any
++   stmt except a return stmt.  */
++
++bool
++array_dse_callee::no_return_p () const
++{
++  tree return_type = TREE_TYPE (TREE_TYPE (cfun->decl));
++  if (TREE_CODE (return_type) != VOID_TYPE)
++    return false;
++
++  for (auto return_edge : EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
++    {
++      basic_block return_bb = return_edge->src;
++      if (!single_succ_p (return_bb))
++	return false;
++
++      gimple *stmt = first_stmt (return_bb);
++      if (gimple_code (stmt) != GIMPLE_RETURN
++	  || gimple_return_retval (as_a (stmt)))
++	return false;
++    }
++
++  return true;
++}
++
++bool
++array_dse_callee::find_main_vars ()
++{
++  auto_bitmap visited;
++  tree default_def[PARAM_NUM] = {nullptr, nullptr};
++
++  /* Collect all params' default def.  */
++  unsigned i;
++  tree name;
++  FOR_EACH_SSA_NAME (i, name, cfun)
++    {
++      if (!SSA_NAME_IS_DEFAULT_DEF (name)
++	  || SSA_NAME_IS_VIRTUAL_OPERAND (name))
++	continue;
++
++      gimple *stmt = SSA_NAME_DEF_STMT (name);
++      if (gimple_code (stmt) != GIMPLE_NOP)
++	return false;
++
++      /* Each param should have an unique default ssa def.  */
++      int index = find_param_index (SSA_NAME_VAR (name));
++      if (index == -1 || !bitmap_set_bit (visited, index))
++	return false;
++
++      default_def[index] = name;
++    }
++
++  if (bitmap_count_bits (visited) != PARAM_NUM)
++    return false;
++
++  array_main_var = default_def[array_param_index];
++  len_main_var = default_def[len_param_index];
++
++  find_tail_recursive_loop (default_def);
++
++  signed_len_var = fold_convert (RANGE_TYPE, len_main_var);
++
++  return true;
++}
++
++/* Try to find a tail recursive loop.  */
++
++void
++array_dse_callee::find_tail_recursive_loop (tree *default_def)
++{
++  tree main_loop_var[PARAM_NUM] = {nullptr, nullptr};
++  loop_p unique_loop = nullptr;
++
++  for (unsigned i = 0; i < PARAM_NUM; i++)
++    {
++      tree name = default_def[i];
++
++      use_operand_p use_p;
++      gimple *stmt = nullptr;
++      if (!single_imm_use (name, &use_p, &stmt)
++	  || gimple_code (stmt) != GIMPLE_PHI)
++	return;
++
++      main_loop_var[i] = gimple_phi_result (stmt);
++
++      /* Check if all main vars are defined in the same loop header.  */
++      basic_block bb = gimple_bb (stmt);
++      loop_p loop = bb->loop_father;
++      if (!loop || loop->num == 0 || !bb_loop_header_p (bb)
++	  || (unique_loop && unique_loop != loop))
++	return;
++
++      unique_loop = loop;
++    }
++
++  /* Multiple latch is not allow.  */
++  if (!unique_loop || !loop_latch_edge (unique_loop))
++    return;
++
++  /* The loop header must be the "first" block.  There shouldn't be any
++     stmt before entering main loop.  */
++  basic_block entry_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
++  basic_block preheader = loop_preheader_edge (unique_loop)->src;
++  if (single_succ_p (preheader) && single_pred_p (preheader)
++      && single_pred (preheader) == entry_bb && single_succ_p (entry_bb)
++      && empty_block_p (preheader))
++    {
++      main_loop = unique_loop;
++      array_main_var = main_loop_var[array_param_index];
++      len_main_var = main_loop_var[len_param_index];
++    }
++}
++
++/* Check if the function only store to the array passed by its param.  */
++
++bool
++array_dse_callee::find_candidate_array ()
++{
++  tree unique_array = nullptr;
++  basic_block bb;
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      for (auto gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
++	{
++	  gimple *stmt = gsi_stmt (gsi);
++	  if (gimple_clobber_p (stmt))
++	    continue;
++
++	  /* There are 3 kind of stmts may have store ops: GIMPLE_ASSIGN,
++	     GIMPLE_CALL and GIMPLE_ASM.  */
++	  if (gimple_has_volatile_ops (stmt)
++	      || gimple_code (stmt) == GIMPLE_ASM)
++	    return false;
++
++	  /* We have check that current function only has recursive call,
++	     and it doesn't return a value, so we can skip call stmt.  */
++	  if (gimple_code (stmt) != GIMPLE_ASSIGN)
++	    continue;
++
++	  tree lhs = gimple_assign_lhs (stmt);
++	  if (TREE_CODE (lhs) == SSA_NAME)
++	    continue;
++
++	  tree base = strip_base (lhs);
++	  if (!base || TREE_CODE (base) != SSA_NAME)
++	    return false;
++
++	  tree array = analyzer.get_address (base);
++	  if (!array || (unique_array && unique_array != array))
++	    return false;
++
++	  unique_array = array;
++	}
++    }
++
++  if (!unique_array)
++    return false;
++
++  int index = find_param_index (unique_array);
++  if (index < 0)
++    return false;
++
++  array_param = unique_array;
++  array_param_index = index;
++
++  if (dump_file)
++    {
++      fprintf (dump_file, "Found unique stored array: ");
++      print_generic_expr (dump_file, unique_array);
++      fprintf (dump_file, "\n");
++    }
++
++  return true;
++}
++
++/* Check if the function has length param.  */
++
++bool
++array_dse_callee::find_length_param ()
++{
++  collect_read_write_ptrs ();
++
++  tree len = nullptr;
++  unsigned size = 0;
++  for (auto ptr : all_ptrs)
++    if (!check_pointer (ptr, len, size))
++      return false;
++
++  if (!len || TREE_CODE (len) != SSA_NAME || !SSA_NAME_VAR (len))
++    return false;
++
++  int index = find_param_index (SSA_NAME_VAR (len));
++  if (index < 0)
++    return false;
++
++  len_param = SSA_NAME_VAR (len);
++  len_param_index = index;
++  elem_size = build_int_cst (RANGE_TYPE, size);
++  elem_size_cst = size;
++  calc_length_param_max ();
++
++  if (len && dump_file)
++    {
++      fprintf (dump_file, "Found unique array length: ");
++      print_generic_expr (dump_file, len_param);
++      fprintf (dump_file, "\n");
++    }
++
++  return true;
++}
++
++void
++array_dse_callee::collect_read_write_ptrs ()
++{
++  basic_block bb;
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      for (auto gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
++	{
++	  gimple *stmt = gsi_stmt (gsi);
++
++	  for (unsigned i = 0; i < gimple_num_ops (stmt); i++)
++	    {
++	      tree op = gimple_op (stmt, i);
++	      if (!op)
++		continue;
++
++	      tree base = strip_base (op);
++	      if (!base || TREE_CODE (base) != SSA_NAME)
++		continue;
++
++	      tree array = analyzer.get_address (base);
++	      if (array != array_param)
++		continue;
++
++	      all_ptrs.add (base);
++	    }
++	}
++    }
++}
++
++/* We heuristically set upper bound of length param to the max value with
++   half bits of its data type.
++
++   TODO: Overflows may still occur, we need a better implement.
++ */
++
++void
++array_dse_callee::calc_length_param_max ()
++{
++  unsigned bits = TYPE_PRECISION (TREE_TYPE (len_param));
++  len_param_max = 1L << (bits / 2);
++}
++
++/* Check pointer pattern: ptr = ptr1 + offset1
++				 |
++			    ptr2 + offset2
++			      |
++			     ...
++			      |
++			ARRAY + offset3
++
++   All ptrs we visited must be calculated by adding offset to array_param.
++   All offset must be an expression with the only variable len_param.
++   LEN will be set to the unique variable we founded.
++   SIZE will be set to the minimum offset unit, which will be treated as the
++   array element size.
++ */
++
++bool
++array_dse_callee::check_pointer (tree ptr, tree &len, unsigned &size)
++{
++  visited_offset.empty ();
++  auto_bitmap visited;
++  auto_vec worklist;
++  worklist.safe_push (ptr);
++
++  while (!worklist.is_empty ())
++    {
++      tree t = worklist.pop ();
++      if (!POINTER_TYPE_P (TREE_TYPE (t)) || TREE_CODE (t) != SSA_NAME)
++	return false;
++
++      if (!bitmap_set_bit (visited, SSA_NAME_VERSION (t)))
++	continue;
++
++      if (SSA_NAME_IS_DEFAULT_DEF (t))
++	{
++	  tree var = SSA_NAME_VAR (t);
++	  if (!var || var != array_param)
++	    return false;
++
++	  continue;
++	}
++
++      gimple *stmt = SSA_NAME_DEF_STMT (t);
++      if (is_gimple_assign (stmt))
++	{
++	  worklist.safe_push (gimple_assign_rhs1 (stmt));
++	  if (gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR
++	      && !check_offset (gimple_assign_rhs2 (stmt), len, size,
++				worklist))
++	    return false;
++	}
++      else if (gimple_code (stmt) == GIMPLE_PHI)
++	{
++	  for (unsigned i = 0; i < gimple_phi_num_args (stmt); i++)
++	    worklist.safe_push (gimple_phi_arg_def (stmt, i));
++	}
++      else
++	return false;
++    }
++
++  return true;
++}
++
++/* Check offset part.  */
++
++bool
++array_dse_callee::check_offset (tree var, tree &len, unsigned &size,
++				auto_vec &worklist)
++{
++  if (visited_offset.contains (var))
++    return true;
++  visited_offset.add (var);
++
++  if (TREE_CODE (TREE_TYPE (var)) != INTEGER_TYPE)
++    return false;
++
++  tree offset = strip_ssa_copy (var);
++  if (TREE_CODE (offset) == INTEGER_CST)
++    {
++      HOST_WIDE_INT value = TREE_INT_CST_LOW (offset);
++      value = std::abs (value);
++      size = size ? greatest_common_divisor (size, value) : value;
++      return true;
++    }
++
++  if (TREE_CODE (offset) != SSA_NAME)
++    return false;
++
++  gimple *stmt = SSA_NAME_DEF_STMT (offset);
++  if (gimple_code (stmt) == GIMPLE_PHI)
++    {
++      for (unsigned i = 0; i < gimple_phi_num_args (stmt); i++)
++	if (!check_offset (gimple_phi_arg_def (stmt, i), len, size, worklist))
++	  return false;
++    }
++  else if (!is_gimple_assign (stmt))
++    return false;
++
++  switch (gimple_assign_rhs_code (stmt))
++    {
++      case MAX_EXPR:
++	[[fallthrough]];
++      case MIN_EXPR:
++	[[fallthrough]];
++      case PLUS_EXPR:
++	[[fallthrough]];
++      case MINUS_EXPR:
++	return check_offset (gimple_assign_rhs1 (stmt), len, size, worklist)
++	       && check_offset (gimple_assign_rhs2 (stmt), len, size,
++				worklist);
++      case POINTER_DIFF_EXPR:
++	worklist.safe_push(gimple_assign_rhs1 (stmt));
++	worklist.safe_push(gimple_assign_rhs2 (stmt));
++	return true;
++      case NEGATE_EXPR:
++	return check_offset (gimple_assign_rhs1 (stmt), len, size, worklist);
++      case MULT_EXPR:
++	return check_mult_expr (stmt, len, size);
++      default:
++	return false;
++    }
++}
++
++/* Handle MULT_EXPR.  */
++
++bool
++array_dse_callee::check_mult_expr (gimple *stmt, tree &len, unsigned &size)
++{
++  tree rhs1 = gimple_assign_rhs1 (stmt);
++  tree rhs2 = gimple_assign_rhs2 (stmt);
++
++  /* Handle size.  */
++  if (TREE_CODE (rhs2) != INTEGER_CST)
++    return false;
++
++  HOST_WIDE_INT value = TREE_INT_CST_LOW (rhs2);
++  size = greatest_common_divisor (size, std::abs (value));
++
++  /* Handle index.  */
++  rhs1 = strip_ssa_copy (rhs1);
++  if (TREE_CODE (rhs1) != SSA_NAME)
++    return false;
++
++  gimple *index_stmt = SSA_NAME_DEF_STMT (rhs1);
++  if (is_gimple_assign (index_stmt) && gimple_num_ops (index_stmt) > 2)
++    {
++      if (TREE_CODE (gimple_assign_rhs2 (index_stmt)) != INTEGER_CST)
++	return false;
++      rhs1 = gimple_assign_rhs1 (index_stmt);
++    }
++
++  if (len && len != rhs1)
++    return false;
++  len = rhs1;
++
++  return true;
++}
++
++/* Find the param index of VAR in current function.
++   Return -1 if not found.  */
++
++int
++array_dse_callee::find_param_index (tree var)
++{
++  if (TREE_CODE (var) != PARM_DECL)
++    return -1;
++
++  tree param = DECL_ARGUMENTS (node->decl);
++  int index = 0;
++  while (param)
++    {
++      if (param == var)
++	return index;
++
++      param = DECL_CHAIN (param);
++      index++;
++    }
++
++  return -1;
++}
++
++bool
++array_dse_callee::check_array_usage ()
++{
++  find_main_vars ();
++
++  return calc_ptr_range () && check_ptr_range ()
++	 && check_recursive_call_arg ();
++}
++
++/* Calculate len_param's value range in each block.
++   We assume its initial range is [1, len_param_max], we will validate this
++   range at each call to this callee.  */
++
++void
++array_dse_callee::calc_len_range ()
++{
++  /* Init all blocks' len_range.  */
++  auto full_len_range = make_range (len_param_min, len_param_max);
++  basic_block bb;
++  FOR_EACH_BB_FN (bb, cfun)
++    len_range_map.put (bb, full_len_range);
++
++  /* Calculate new range according to condition.  */
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      gimple *stmt = gsi_stmt (gsi_last_bb (bb));
++      auto cond_range = get_range_from_cond (stmt);
++      if (cond_range.undefined_p ())
++	continue;
++
++      edge true_edge = nullptr;
++      edge false_edge = nullptr;
++      extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
++      update_len_range (true_edge->dest, cond_range);
++      update_len_range (false_edge->dest, invert_range (cond_range));
++    }
++}
++
++/* Update len_param's range in all block dominated by START.  */
++
++void
++array_dse_callee::update_len_range (basic_block start,
++				    const value_range &new_range)
++{
++  for (auto bb : get_all_dominated_blocks (CDI_DOMINATORS, start))
++    (*len_range_map.get (bb)).intersect (new_range);
++}
++
++value_range
++array_dse_callee::invert_range (const value_range &range) const
++{
++  auto new_range = range;
++  new_range.invert ();
++
++  return new_range;
++}
++
++/* Get range of len_param from a condition.  */
++
++value_range
++array_dse_callee::get_range_from_cond (gimple *stmt)
++{
++  if (!stmt || gimple_code (stmt) != GIMPLE_COND)
++    return value_range{};
++
++  gcond *cond = as_a (stmt);
++  tree_code code = gimple_cond_code (cond);
++  tree lhs = gimple_cond_lhs (cond);
++  tree rhs = gimple_cond_rhs (cond);
++
++  if (lhs != len_main_var || TREE_CODE (rhs) != INTEGER_CST)
++    return value_range{};
++
++  HOST_WIDE_INT value = TREE_INT_CST_LOW (rhs);
++
++  switch (code)
++    {
++      case LT_EXPR:
++	return make_range (RANGE_NINF, value - 1);
++      case LE_EXPR:
++	return make_range (RANGE_NINF, value);
++      case GT_EXPR:
++	return make_range (value + 1, RANGE_INF);
++      case GE_EXPR:
++	return make_range (value, RANGE_INF);
++      case EQ_EXPR:
++	return make_range (value);
++      case NE_EXPR:
++	return invert_range (make_range (value));
++      default:
++	return value_range{};
++    }
++}
++
++/* Get range of a variable, represented by len_param. If variable is a
++   pointer, return the range of its offset from array_param.  */
++
++value_range
++array_dse_callee::get_var_range (basic_block bb, tree var)
++{
++  if (var == array_main_var)
++    return make_range (0, 0);
++
++  if (var == len_main_var)
++    return value_range{signed_len_var, signed_len_var};
++
++  if (find_var_range (var, bb))
++    return var_range[var][bb];
++
++  /* If we can't calculate its range, keep it varying.  */
++  auto &range = var_range[var][bb];
++  range.set_varying (RANGE_TYPE);
++
++  if (TREE_CODE (var) == INTEGER_CST)
++    {
++      HOST_WIDE_INT value = TREE_INT_CST_LOW (var);
++      range = make_range (value);
++      return range;
++    }
++
++  if (TREE_CODE (var) != SSA_NAME)
++    return range;
++
++  /* Build range expression recursively.  */
++  gimple *stmt = SSA_NAME_DEF_STMT (var);
++  if (gimple_code (stmt) == GIMPLE_PHI)
++    {
++      range = get_var_range (bb, gimple_phi_arg_def (stmt, 0));
++      for (unsigned i = 1; i < gimple_phi_num_args (stmt); i++)
++	{
++	  tree arg = gimple_phi_arg_def (stmt, i);
++	  auto arg_range = get_var_range (bb, arg);
++	  tree min = min_tree (bb, range.min (), arg_range.min ());
++	  tree max = max_tree (bb, range.max (), arg_range.max ());
++	  if (!min || !max)
++	    {
++	      range.set_varying (RANGE_TYPE);
++	      break;
++	    }
++
++	  range = value_range{min, max};
++	}
++      return range;
++    }
++
++  if (!is_gimple_assign (stmt))
++    return range;
++
++  tree rhs1 = gimple_assign_rhs1 (stmt);
++  tree rhs2 = gimple_num_ops (stmt) > 2 ? gimple_assign_rhs2 (stmt) : nullptr;
++  value_range range1 = get_var_range (bb, rhs1);
++  value_range range2 = value_range{RANGE_TYPE};
++  if (rhs2)
++    range2 = get_var_range (bb, rhs2);
++
++  if (gimple_assign_single_p (stmt) || gimple_assign_cast_p (stmt))
++    range = range1;
++  else
++    range = build_range (bb, gimple_assign_rhs_code (stmt), range1, range2);
++
++  return range;
++}
++
++/* Calculate pointer's offset range by checking loop condition.  */
++
++bool
++array_dse_callee::calc_ptr_range ()
++{
++  calc_len_range ();
++
++  auto_bitmap visited;
++  auto_vec worklist;
++  loop_p l = main_loop ? main_loop : current_loops->tree_root;
++  worklist.safe_push (l->header);
++
++  while (!worklist.is_empty ())
++    {
++      basic_block bb = worklist.pop ();
++      if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
++	  || (main_loop && bb == main_loop->latch)
++	  || unreachable_blocks.contains (bb))
++	continue;
++
++      if (bb->flags & BB_IRREDUCIBLE_LOOP)
++	return false;
++
++      if (!bitmap_set_bit (visited, bb->index))
++	continue;
++
++      if (loop_header_p (bb) && !calc_loop_var_range (bb->loop_father))
++	  return false;
++
++      for (auto succ : bb->succs)
++	worklist.safe_push (succ->dest);
++    }
++
++  return true;
++}
++
++/* Check if offset range of all pointers calculated by array_param are
++   within [0, (len_param -1) * elem_size].  */
++
++bool
++array_dse_callee::check_ptr_range ()
++{
++  basic_block bb;
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      if (unreachable_blocks.contains (bb))
++	continue;
++
++      for (auto gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
++	{
++	  gimple *stmt = gsi_stmt (gsi);
++
++	  for (unsigned i = 0; i < gimple_num_ops (stmt); i++)
++	    {
++	      tree op = gimple_op (stmt, i);
++	      if (!op)
++		continue;
++
++	      tree base = strip_base (op);
++	      if (!base || TREE_CODE (base) != SSA_NAME
++		  || !all_ptrs.contains (base))
++		continue;
++
++	      auto range = get_var_range (bb, base);
++	      /* offset >= 0.  */
++	      auto ret = compare_tree (bb, range.min (), integer_zero_node);
++	      if (!ret || ret & LT)
++		return false;
++
++	      /* offset <= (n - 1) * elem_size.  */
++	      tree tmp = minus_tree (signed_len_var, integer_one_node);
++	      tmp = mult_tree (bb, tmp, elem_size);
++	      ret = compare_tree (bb, range.max (), tmp);
++	      if (!ret || ret & GT)
++		return false;
++	    }
++	}
++    }
++
++  return true;
++}
++
++/* Check range of recursive call arguments:
++     void func(a, n) {
++       ...
++       func(a1, n1);
++     }
++
++     1. (1) a1 >= a (tail recursive call)
++	(2) a1 == a (normal recursive call)
++     2. n1 >= 1
++     3. a1 + n1 * elem_size <= a + n * elem_size
++ */
++
++bool
++array_dse_callee::check_recursive_call_arg ()
++{
++  auto_vec array_args;
++  auto_vec len_args;
++  auto_vec blocks;
++  auto_vec is_tail_recursive_call;
++
++  collect_recursive_call_args (array_args, len_args, blocks,
++			       is_tail_recursive_call);
++
++  for (unsigned i = 0; i < array_args.length (); i++)
++    {
++      basic_block bb = blocks[i];
++      update_branch_range (bb);
++      auto array_range = get_var_range (bb, array_args[i]);
++      auto len_range = get_var_range (bb, len_args[i]);
++
++      /* Check requirement 2.  */
++      auto ret = compare_tree (bb, len_range.min (), integer_one_node);
++      if (!ret || ret & ~GE)
++	return false;
++
++      if (is_tail_recursive_call[i])
++	{
++	  /* Check requirement 1.1.  */
++	  ret = compare_tree (bb, array_range.min (), integer_zero_node);
++	  if (!ret || ret & ~GE)
++	    return false;
++	}
++      else
++	{
++	  /* Check requirement 1.2.  */
++	  if (!integer_zerop (array_range.min ())
++	      || !integer_zerop (array_range.max ()))
++	    return false;
++	}
++
++      /* Check requirement 3.  */
++      tree offset = build_recursive_offset (len_args[i]);
++      if (!offset)
++	return false;
++
++      tree recursive_ptr_max
++	= build_recursive_ptr_range_max (bb, array_args[i], offset);
++      if (!recursive_ptr_max)
++	return false;
++
++      tree upper_bound = mult_tree (bb, signed_len_var, elem_size);
++      ret = compare_tree (bb, recursive_ptr_max, upper_bound);
++      if (!ret || ret & ~LE)
++	return false;
++    }
++
++  return true;
++}
++
++void
++array_dse_callee::collect_recursive_call_args (
++  auto_vec &array_args, auto_vec &len_args,
++  auto_vec &blocks, auto_vec &is_tail_recursive_call)
++{
++  for (cgraph_edge *edge = node->callees; edge; edge = edge->next_callee)
++    {
++      if (node != edge->callee)
++	continue;
++
++      gcall *call = edge->call_stmt;
++      tree array_arg = gimple_call_arg (call, array_param_index);
++      tree len_arg = gimple_call_arg (call, len_param_index);
++
++      array_args.safe_push (array_arg);
++      len_args.safe_push (len_arg);
++      blocks.safe_push (gimple_bb (call));
++      is_tail_recursive_call.safe_push (tail_recursive_call_p (call));
++    }
++
++  if (main_loop)
++    {
++      gimple *array_def_stmt = SSA_NAME_DEF_STMT (array_main_var);
++      gimple *len_def_stmt = SSA_NAME_DEF_STMT (len_main_var);
++      edge latch_edge = loop_latch_edge (main_loop);
++      tree array_arg = PHI_ARG_DEF_FROM_EDGE (array_def_stmt, latch_edge);
++      tree len_arg = PHI_ARG_DEF_FROM_EDGE (len_def_stmt, latch_edge);
++      array_args.safe_push (array_arg);
++      len_args.safe_push (len_arg);
++      blocks.safe_push (latch_edge->src);
++      is_tail_recursive_call.safe_push (true);
++    }
++}
++
++/* If BB is first block after a condition jump, try to update range according
++   to the condition.  */
++
++void
++array_dse_callee::update_branch_range (basic_block bb)
++{
++  if (!single_pred_p (bb))
++    return;
++
++  basic_block pred = single_pred (bb);
++  gimple *stmt = gsi_stmt (gsi_last_bb (pred));
++  if (!stmt || gimple_code (stmt) != GIMPLE_COND)
++    return;
++
++  tree lhs = gimple_cond_lhs (stmt);
++  tree rhs = gimple_cond_rhs (stmt);
++  if (!integer_cst_p (rhs))
++    return;
++
++  tree_code code = gimple_cond_code (stmt);
++  if (single_pred_edge (bb)->flags & EDGE_FALSE_VALUE)
++    code = opposite_cond_code (code);
++
++  auto range = get_var_range (bb, lhs);
++  tree min = range.min ();
++  tree max = range.max ();
++  HOST_WIDE_INT value = TREE_INT_CST_LOW (rhs);
++
++  switch (code)
++    {
++      case LT_EXPR:
++	value--;
++	[[fallthrough]];
++      case LE_EXPR:
++	max = min_tree (bb, max, build_value (value));
++	break;
++      case GT_EXPR:
++	value++;
++	[[fallthrough]];
++      case GE_EXPR:
++	min = max_tree (bb, min, build_value (value));
++	break;
++      case EQ_EXPR:
++	var_range[lhs][bb] = make_range (value);
++	return;
++      default:
++	return;
++    }
++
++  var_range[lhs][bb] = value_range{min, max};
++}
++
++/* If LEN = (ptr1 - ptr2) / elem_size,
++   then recursive_offset = LEN * elem_size = (ptr1 - ptr2).
++
++   We can do this only when ptr1 and ptr2 comes from array_param,
++   so (ptr1 - ptr2) is an integer multiple of elem_size.
++ */
++
++tree
++array_dse_callee::build_recursive_offset (tree len_arg)
++{
++  if (TREE_CODE (len_arg) != SSA_NAME)
++    return nullptr;
++
++  gimple *stmt = SSA_NAME_DEF_STMT (len_arg);
++  if (!is_gimple_assign (stmt))
++    return nullptr;
++
++  /* Check pattern: (ptr1 - ptr2) / elem_size.  */
++  tree_code code = gimple_assign_rhs_code (stmt);
++  if (code != TRUNC_DIV_EXPR && code != RSHIFT_EXPR)
++    return nullptr;
++
++  tree rhs1 = gimple_assign_rhs1 (stmt);
++  if (TREE_CODE (rhs1) != SSA_NAME)
++    return nullptr;
++
++  gimple *def = SSA_NAME_DEF_STMT (strip_ssa_copy (rhs1));
++  if (!is_gimple_assign (def)
++      || gimple_assign_rhs_code (def) != POINTER_DIFF_EXPR)
++    return nullptr;
++
++  /* Check ptr1 and ptr2.  */
++  tree len = nullptr;
++  unsigned size = 0;
++  if (!check_pointer (gimple_assign_rhs1 (def), len, size)
++      || !check_pointer (gimple_assign_rhs2 (def), len, size)
++      || len != len_main_var || size != elem_size_cst)
++    return nullptr;
++
++  tree rhs2 = gimple_assign_rhs2 (stmt);
++  if (!integer_cst_p (rhs2))
++    return nullptr;
++
++  HOST_WIDE_INT value = TREE_INT_CST_LOW (rhs2);
++  if (code == RSHIFT_EXPR)
++    value = 1 << value;
++
++  if (value != elem_size_cst)
++    return nullptr;
++
++  return rhs1;
++}
++
++/* Build expression of recursive pointer range max.  */
++
++tree
++array_dse_callee::build_recursive_ptr_range_max (basic_block bb,
++						 tree array_arg,
++						 tree offset)
++{
++  if (TREE_CODE (array_arg) != SSA_NAME)
++    return nullptr;
++
++  tree recursive_ptr_max = nullptr;
++  gimple *stmt = SSA_NAME_DEF_STMT (array_arg);
++
++  /* If ARRAY_ARG = rhs1 - offset, return rhs1's range max directly.  */
++  if (is_gimple_assign (stmt)
++      && gimple_assign_rhs_code (stmt) == POINTER_PLUS_EXPR)
++    {
++      tree rhs1 = gimple_assign_rhs1 (stmt);
++      tree rhs2 = gimple_assign_rhs2 (stmt);
++      if (TREE_CODE (rhs2) == SSA_NAME)
++	{
++	  stmt = SSA_NAME_DEF_STMT (rhs2);
++	  if (is_gimple_assign (stmt)
++	      && gimple_assign_rhs_code (stmt) == NEGATE_EXPR
++	      && gimple_assign_rhs1 (stmt) == offset)
++	    recursive_ptr_max = get_var_range (bb, rhs1).max ();
++	}
++    }
++
++  if (!recursive_ptr_max)
++    {
++      auto range1 = get_var_range (bb, array_arg);
++      auto range2 = get_var_range (bb, offset);
++      recursive_ptr_max = plus_tree (range1.max (), range2.max ());
++    }
++
++  return recursive_ptr_max;
++}
++
++bool
++array_dse_callee::tail_recursive_call_p (gimple *stmt)
++{
++  if (stmt->next)
++    return false;
++
++  basic_block bb = gimple_bb (stmt);
++  return single_succ_p (bb) && return_bb_p (single_succ (bb));
++}
++
++bool
++array_dse_callee::return_bb_p (basic_block bb) const
++{
++  return bb && single_succ_p (bb) &&
++	 single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun);
++}
++
++/* Calculate the range of a loop variable according to initial value and
++   loop exit condition.  */
++
++bool
++array_dse_callee::calc_loop_var_range (loop_p loop)
++{
++  if (!loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
++      && loops_state_satisfies_p (LOOPS_MAY_HAVE_MULTIPLE_LATCHES))
++    return false;
++
++  basic_block header = loop->header;
++  for (auto gsi = gsi_start_phis (header); !gsi_end_p (gsi); gsi_next (&gsi))
++    {
++      gphi *phi = as_a (gsi_stmt (gsi));
++      tree result = gimple_phi_result (phi);
++      if (!loop_var_p (loop, result))
++	continue;
++
++      tree iterate_var = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop));
++      int step = calc_loop_var_step (result, iterate_var);
++      unsigned abs_step = static_cast (std::abs (step));
++      if (!step)
++	continue;
++
++      if (POINTER_TYPE_P (TREE_TYPE (result)))
++	{
++	  if (abs_step != elem_size_cst)
++	    return false;
++	  loop_ptrs[loop].add (result);
++	}
++      else if (TREE_CODE (TREE_TYPE (result)) != INTEGER_TYPE
++	       || abs_step != 1)
++	return false;
++
++      tree init_var = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop));
++      auto init_range = get_var_range (header, init_var);
++      tree min = step > 0 ? init_range.min () : build_value (RANGE_NINF);
++      tree max = step > 0 ? build_value (RANGE_INF) : init_range.max ();
++      auto new_range = value_range{min, max};
++      for (auto bb : get_all_dominated_blocks (CDI_DOMINATORS, header))
++	{
++	  if (!find_var_range (result, bb))
++	    {
++	      var_range[result][bb] = new_range;
++	      continue;
++	    }
++
++	  auto &range = var_range[result][bb];
++	  min = max_tree (bb, min, range.min ());
++	  max = min_tree (bb, max, range.max ());
++	  if (!min || !max)
++	    return false;
++	  range = value_range {min, max};
++	}
++    }
++
++  if (!check_loop_exits (loop))
++    return false;
++
++  return true;
++}
++
++bool
++array_dse_callee::check_loop_exits (loop_p loop)
++{
++  for (auto edge : get_loop_exit_edges (loop))
++    {
++      gimple *stmt = gsi_stmt (gsi_last_bb (edge->src));
++      if (gimple_code (stmt) != GIMPLE_COND)
++	continue;
++
++      gcond *cond = as_a (stmt);
++      tree lhs = gimple_cond_lhs (cond);
++      tree rhs = gimple_cond_rhs (cond);
++
++      bool lhs_cand_p = loop_var_p (loop, lhs) || iterate_var_p (loop, lhs);
++      bool rhs_cand_p = loop_var_p (loop, rhs) || iterate_var_p (loop, rhs);
++      if (!lhs_cand_p && !rhs_cand_p)
++	continue;
++
++      tree step = nullptr;
++      if (POINTER_TYPE_P (TREE_TYPE (lhs))
++	  && POINTER_TYPE_P (TREE_TYPE (rhs)))
++	{
++	  if (TREE_CODE (lhs) != SSA_NAME || TREE_CODE (rhs) != SSA_NAME)
++	    return false;
++	  step = elem_size;
++	}
++      else if (TREE_CODE (TREE_TYPE (lhs)) == INTEGER_TYPE
++	       && TREE_CODE (TREE_TYPE (rhs)) == INTEGER_TYPE)
++	{
++	  if (!lhs_cand_p && !integer_cst_p (rhs))
++	    return false;
++	  step = integer_one_node;
++	}
++      else
++	return false;
++
++      tree_code code = gimple_cond_code (cond);
++      if (edge->flags & EDGE_TRUE_VALUE)
++	code = opposite_cond_code (code);
++
++      if (!fill_loop_var_range (loop, code, lhs, rhs, step))
++	return false;
++
++      if (iterate_var_p (loop, lhs))
++	lhs = get_loop_var (loop, lhs);
++
++      if (!fill_loop_var_range (loop, code, lhs, rhs, step))
++	return false;
++    }
++
++  return true;
++}
++
++/* fill loop variable range according to loop exit's condition and step.  */
++
++bool
++array_dse_callee::fill_loop_var_range (loop_p loop, tree_code code,
++				       tree lhs, tree rhs, tree step)
++{
++  for (auto bb : get_all_dominated_blocks (CDI_DOMINATORS, loop->header))
++    {
++      auto lhs_range = get_var_range (bb, lhs);
++      auto rhs_range = get_var_range (bb, rhs);
++      tree lhs_min = lhs_range.min ();
++      tree lhs_max = lhs_range.max ();
++      tree rhs_min = rhs_range.min ();
++      tree rhs_max = rhs_range.max ();
++      bool in_loop = flow_bb_inside_loop_p (loop, bb);
++
++      switch (code)
++	{
++	  case LT_EXPR:
++	    lhs_max = in_loop ? minus_tree (rhs_max, step) : rhs_max;
++	    rhs_min = in_loop ? plus_tree (lhs_min, step) : lhs_min;
++	    break;
++	  case LE_EXPR:
++	    lhs_max = in_loop ? rhs_max : plus_tree (rhs_max, step);
++	    rhs_min = in_loop ? lhs_min : minus_tree (lhs_min, step);
++	    break;
++	  case GT_EXPR:
++	    lhs_min = in_loop ? plus_tree (rhs_min, step) : rhs_min;
++	    rhs_max = in_loop ? minus_tree (lhs_max, step) : lhs_max;
++	    break;
++	  case GE_EXPR:
++	    lhs_min = in_loop ? rhs_min : minus_tree (rhs_min, step);
++	    rhs_max = in_loop ? lhs_max : plus_tree (lhs_max, step);
++	    break;
++	  default:
++	    return false;
++	}
++
++      if (loop_var_p (loop, lhs) || iterate_var_p (loop, lhs))
++	var_range[lhs][bb] = value_range{lhs_min, lhs_max};
++      if (loop_var_p (loop, rhs) || iterate_var_p (loop, rhs))
++	var_range[rhs][bb] = value_range{rhs_min, rhs_max};
++
++      if (integer_onep (step) && loop_var_p (loop, lhs)
++	  && !fill_loop_ptr_range (loop, bb, minus_tree (lhs_max, lhs_min)))
++	return false;
++    }
++
++  return true;
++}
++
++/* If the variable in loop exit's condition is a integer, like
++     for (i = 0; i < n; i++)
++
++   fill other pointers' range in the same loop.
++ */
++
++bool
++array_dse_callee::fill_loop_ptr_range (loop_p loop, basic_block bb,
++				       tree loop_length)
++{
++  if (!loop_length)
++    return false;
++
++  auto ret = compare_tree (bb, loop_length, integer_zero_node);
++  if (!ret)
++    return false;
++
++  if (ret == LT)
++    {
++      unreachable_blocks.add (bb);
++      return true;
++    }
++
++  tree length = mult_tree (bb, loop_length, elem_size);
++  if (!length)
++    return false;
++
++  for (auto ptr : loop_ptrs[loop])
++    {
++      auto &range = var_range[ptr][bb];
++      tree min = range.min ();
++      tree max = range.max ();
++      if (infinite_p (min) != infinite_kind::NON_INF)
++	{
++	  if (infinite_p (max) != infinite_kind::NON_INF)
++	    return false;
++	  min = minus_tree (max, length);
++	}
++      else if (infinite_p (max) != infinite_kind::NON_INF)
++	{
++	  if (infinite_p (min) != infinite_kind::NON_INF)
++	    return false;
++	  max = plus_tree (min, length);
++	}
++      else
++	return false;
++
++      range = value_range{min, max};
++    }
++
++  return true;
++}
++
++bool
++array_dse_callee::loop_header_p (basic_block bb)
++{
++  return bb_loop_header_p (bb)
++	 && (!main_loop || bb->loop_father != main_loop);
++}
++
++bool
++array_dse_callee::iterate_var_p (loop_p loop, tree var)
++{
++  if (!var)
++    return false;
++
++  tree loop_var = get_loop_var (loop, var);
++  return loop_var && loop_var != var;
++}
++
++/* Find the loop variable from the GIVEN var throught its def chain.  */
++
++tree
++array_dse_callee::get_loop_var (loop_p loop, tree var)
++{
++  if (TREE_CODE (var) != SSA_NAME || !SSA_NAME_VAR (var))
++    return nullptr;
++
++  tree result = nullptr;
++
++  auto_bitmap visited;
++  auto_vec worklist;
++  worklist.safe_push (var);
++
++  while (!worklist.is_empty ())
++    {
++      tree t = worklist.pop ();
++      if (TREE_CODE (var) != SSA_NAME)
++	return nullptr;
++
++      if (!bitmap_set_bit (visited, SSA_NAME_VERSION (t)))
++	continue;
++
++      if (loop_var_p (loop, t) && SSA_NAME_VAR (t) == SSA_NAME_VAR (var))
++	{
++	  if (result && result != t)
++	    return nullptr;
++	  result = t;
++	  continue;
++	}
++
++      gimple *stmt = SSA_NAME_DEF_STMT (t);
++      basic_block bb = gimple_bb (stmt);
++      if (!bb || !flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
++	return nullptr;
++
++      if (gimple_code (stmt) == GIMPLE_PHI)
++	{
++	  for (unsigned i = 0; i < gimple_phi_num_args (stmt); i++)
++	    worklist.safe_push (gimple_phi_arg_def (stmt, i));
++	  continue;
++	}
++
++      if (!is_gimple_assign (stmt)
++	  || (gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR
++	      && gimple_assign_rhs_code (stmt) != PLUS_EXPR))
++	return nullptr;
++
++      worklist.safe_push (gimple_assign_rhs1 (stmt));
++    }
++
++  return result;
++}
++
++bool
++array_dse_callee::find_var_range (tree var, basic_block bb)
++{
++  auto iter1 = var_range.find (var);
++  if (iter1 == var_range.end ())
++    return false;
++
++  auto iter2 = iter1->second.find (bb);
++  return iter2 != iter1->second.end ();
++}
++
++array_dse_edge::array_dse_edge (cgraph_edge *edge, array_dse_callee *callee)
++  : call_edge (edge),
++    callee (callee)
++{
++}
++
++bool
++array_dse_edge::analyze ()
++{
++  cfun_saver save (call_edge->caller, LOOPS_NORMAL);
++
++  if (gimple_call_num_args (call_edge->call_stmt) != callee->PARAM_NUM)
++    return false;
++
++  return find_local_array_from_arg () && check_array_usage ()
++	 && check_len_arg_range ();
++}
++
++bool
++array_dse_edge::fully_redundant ()
++{
++  return array_arg_start > read_upper_bound;
++}
++
++tree
++array_dse_edge::get_bound_addr ()
++{
++  unsigned HOST_WIDE_INT bound_size = (read_upper_bound + 1) * elem_size;
++  tree bound_size_expr = build_int_cst (size_type_node, bound_size);
++
++  tree addr_type = build_pointer_type (TREE_TYPE (array));
++  tree array_addr = build1 (ADDR_EXPR, addr_type, array);
++
++  return build2 (POINTER_PLUS_EXPR, addr_type, array_addr, bound_size_expr);
++}
++
++/* Find the local array used by call argument.  */
++
++bool
++array_dse_edge::find_local_array_from_arg ()
++{
++  tree arg = gimple_call_arg (call_edge->call_stmt, callee->array_param_index);
++
++  while (TREE_CODE (arg) == ADDR_EXPR || TREE_CODE (arg) == MEM_REF)
++    arg = TREE_OPERAND (arg, 0);
++
++  if (!arg || !VAR_P (arg) || TREE_CODE (TREE_TYPE (arg)) != ARRAY_TYPE
++      || decl_function_context (arg) != current_function_decl)
++    return false;
++
++  array = arg;
++  elem_size = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (array))));
++  array_size = tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (array))) / elem_size;
++
++  return true;
++}
++
++bool
++array_dse_edge::check_array_usage ()
++{
++  if (!collect_array_accesses ())
++    return false;
++
++  if (!find_inner_array ())
++    return false;
++
++  for (auto [var, stmt] : array_accesses)
++    if (!check_access_kind (stmt, var))
++      return false;
++
++  collect_call_block_succs ();
++  if (!calc_read_bound () || !calc_array_arg_start ())
++    return false;
++
++  if (call_block_succs.contains (gimple_bb (call_edge->call_stmt))
++      && !check_optimized_area_rewrite ())
++    return false;
++
++  return true;
++}
++
++bool
++array_dse_edge::collect_array_accesses ()
++{
++  basic_block bb;
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      if (bb->flags & BB_IRREDUCIBLE_LOOP)
++	return false;
++
++      for (auto gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
++	{
++	  gphi *phi = as_a (gsi_stmt (gsi));
++	  tree result = gimple_phi_result (phi);
++	  if (walk_tree (&result, sub_expr_p, array, nullptr))
++	    if (!check_array_access (phi, result))
++	      return false;
++
++	  for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
++	    {
++	      tree arg = gimple_phi_arg_def (phi, i);
++	      if (!walk_tree (&arg, sub_expr_p, array, nullptr))
++		continue;
++
++	      if (!check_array_access (phi, arg))
++		return false;
++	    }
++	}
++
++      for (auto gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
++	{
++	  gimple *stmt = gsi_stmt (gsi);
++	  if (gimple_clobber_p (stmt) || call_stmt_p (stmt))
++	    continue;
++
++	  for (unsigned i = 0; i < gimple_num_ops (stmt); i++)
++	    {
++	      tree var = gimple_op (stmt, i);
++	      if (!var)
++		continue;
++
++	      if (!walk_tree (&var, sub_expr_p, array, nullptr))
++		continue;
++
++	      if (!is_gimple_assign (stmt))
++		return false;
++
++	      if (!check_array_access (stmt, var))
++		return false;
++	    }
++	}
++
++    }
++
++  renumber_gimple_stmt_uids (cfun);
++
++  return !array_accesses.is_empty ();
++}
++
++bool
++array_dse_edge::check_array_access (gimple *stmt, tree var)
++{
++  if (array_ref_p (var))
++    return gimple_assign_single_p (stmt) && !array_accesses.put (var, stmt);
++
++  if (array_addr_p (var))
++    return check_array_address (stmt, var);
++
++  return false;
++}
++
++bool
++array_dse_edge::check_array_address (gimple *stmt, tree addr)
++{
++  if (gimple_code (stmt) == GIMPLE_PHI)
++    return check_array_address (as_a (stmt), addr);
++
++  if (is_gimple_assign (stmt))
++    return check_array_address (as_a (stmt), addr);
++
++  return false;
++}
++
++bool
++array_dse_edge::check_array_address (gphi *phi, tree addr)
++{
++  tree result = gimple_phi_result (phi);
++  if (TREE_CODE (result) != SSA_NAME)
++    return false;
++
++  if (array_address_vars.contains (result))
++    return true;
++
++  for (unsigned i = 0; i < gimple_phi_num_args (phi); i++)
++    {
++      tree arg = gimple_phi_arg_def (phi, i);
++      if (arg == addr)
++	continue;
++
++      if (TREE_CODE (arg) != SSA_NAME)
++	return false;
++
++      /* Only support simple loop variable: VAR is the initial address of
++	 phi RESULT and other ARG must be defined by RESULT + offset.  */
++      gimple *stmt = SSA_NAME_DEF_STMT (arg);
++      if (!is_gimple_assign (stmt)
++	  || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR
++	  || gimple_assign_rhs1 (stmt) != result)
++	return false;
++    }
++
++  array_address_vars.add (result);
++
++  return check_access_from_address (result);
++}
++
++bool
++array_dse_edge::check_array_address (gassign *assign, tree addr)
++{
++  if (!gimple_assign_single_p (assign)
++      && gimple_assign_rhs_code (assign) != POINTER_PLUS_EXPR
++      && gimple_assign_rhs1 (assign) != addr)
++    return false;
++
++  tree lhs = gimple_assign_lhs (assign);
++  if (TREE_CODE (lhs) != SSA_NAME)
++    return false;
++
++  array_address_vars.add (lhs);
++
++  return check_access_from_address (lhs);
++}
++
++bool
++array_dse_edge::check_access_from_address (tree addr)
++{
++  gimple *stmt;
++  imm_use_iterator iter;
++  FOR_EACH_IMM_USE_STMT (stmt, iter, addr)
++    {
++      for (unsigned i = 0; i < gimple_num_ops (stmt); i++)
++	{
++	  tree op = gimple_op (stmt, i);
++	  if (walk_tree (&op, sub_expr_p, addr, nullptr)
++	      && !check_array_access (stmt, op))
++	    return false;
++	}
++    }
++
++  return true;
++}
++
++bool
++array_dse_edge::check_access_kind (gimple *stmt, tree var)
++{
++  gcc_assert (gimple_assign_single_p (stmt));
++
++  auto &kind = access_kinds.get_or_insert (var);
++
++  tree lhs = gimple_assign_lhs (stmt);
++  if (var == lhs)
++    {
++      kind = WRITE;
++      return true;
++    }
++
++  gcc_assert (var == gimple_assign_rhs1 (stmt));
++
++  if (!inner_array)
++    {
++      kind = READ;
++      return true;
++    }
++
++  auto_bitmap visited;
++  kind = check_access_kind_iterate (lhs, visited);
++  return kind != ACCESS_ERROR;
++}
++
++access_kind
++array_dse_edge::check_access_kind_iterate (tree var, auto_bitmap &visited)
++{
++  if (!var || TREE_CODE (var) != SSA_NAME)
++    return ACCESS_ERROR;
++
++  if (!bitmap_set_bit (visited, SSA_NAME_VERSION (var)))
++    return NONE;
++
++  int kind = NONE;
++
++  imm_use_iterator iter;
++  gimple *stmt = nullptr;
++  FOR_EACH_IMM_USE_STMT (stmt, iter, var)
++    {
++      if (walk_stmt_load_store_ops (stmt, var, find_base, nullptr))
++	kind |= READ;
++
++      if (walk_stmt_load_store_ops (stmt, var, nullptr, find_base))
++	kind |= WRITE;
++
++      if (kind)
++	continue;
++
++      tree next_var = nullptr;
++      if (is_gimple_assign (stmt))
++	{
++	  if ((!gimple_assign_single_p (stmt) && !gimple_assign_cast_p (stmt))
++	      || gimple_assign_rhs1 (stmt) != var)
++	    return ACCESS_ERROR;
++
++	  tree lhs = gimple_assign_lhs (stmt);
++	  if (array_ref_p (lhs))
++	    {
++	      kind |= READ;
++	      continue;
++	    }
++
++	  next_var = lhs;
++	}
++      else if (gimple_code (stmt) == GIMPLE_PHI)
++	{
++	  if (gimple_phi_arg_p (stmt, var))
++	    next_var = gimple_phi_result (stmt);
++	}
++      else if (gimple_code (stmt) == GIMPLE_COND)
++	{
++	  if (gimple_cond_lhs (stmt) == var || gimple_cond_rhs (stmt) == var)
++	    {
++	      kind |= READ;
++	      continue;
++	    }
++	}
++
++      access_kind next_kind = check_access_kind_iterate (next_var, visited);
++      if (next_kind == ACCESS_ERROR)
++	return ACCESS_ERROR;
++	
++      kind |= next_kind;
++    }
++
++  return static_cast (kind);
++}
++
++bool
++array_dse_edge::find_inner_array ()
++{
++  tree type = TREE_TYPE (array);
++  unsigned ptr_layers = get_ptr_layers (type);
++  gcc_assert (ptr_layers);
++
++  /* No inner source array.  */
++  if (ptr_layers == 1)
++    {
++      inner_elem_type = TREE_TYPE (array);
++      return true;
++    }
++
++  /* It's hard to trace all source of array.  */
++  if (ptr_layers > 2
++      || TREE_CODE (TREE_TYPE (TREE_TYPE (type))) != RECORD_TYPE)
++    return false;
++
++  inner_elem_type = TREE_TYPE (TREE_TYPE (type));
++
++  for (auto [var, stmt] : array_accesses)
++    {
++      tree lhs = gimple_get_lhs (stmt);
++      if (lhs != var)
++	continue;
++
++      if (!array_ref_p (lhs) || !is_gimple_assign (stmt))
++	return false;
++
++      tree rhs = gimple_assign_rhs1 (stmt);
++      if (array_ref_p (rhs))
++	continue;
++
++      if (TREE_CODE (rhs) != SSA_NAME)
++	return false;
++
++      tree base = rhs;
++      gimple *def_stmt = SSA_NAME_DEF_STMT (base);
++      while (is_gimple_assign (def_stmt)
++	     && gimple_assign_rhs_code (def_stmt) == POINTER_PLUS_EXPR)
++	{
++	  base = gimple_assign_rhs1 (def_stmt);
++	  if (TREE_CODE (base) != SSA_NAME)
++	    return false;
++	  def_stmt = SSA_NAME_DEF_STMT (base);
++	}
++
++      if (!gimple_call_builtin_p (def_stmt, BUILT_IN_CALLOC))
++	return false;
++
++      /* Only support unique source. The inner_array must be used only once,
++	 assigned its address to the candidate array.  */
++      if (inner_array)
++	return false;
++
++      /* array: T *[], base: T *.  */
++      if (TREE_TYPE (TREE_TYPE (array)) != TREE_TYPE (base))
++	return false;
++
++      if (!unique_use_p (base, stmt) || !initialize_assign_p (stmt))
++	return false;
++
++      inner_array = base;
++    }
++
++  return true;
++}
++
++bool
++array_dse_edge::unique_use_p (tree var, gimple *unique_assign) const
++{
++  auto_vec worklist;
++  auto_bitmap visited;
++  worklist.safe_push (var);
++
++  while (!worklist.is_empty ())
++    {
++      tree t = worklist.pop ();
++      if (TREE_CODE (t) != SSA_NAME)
++	return false;
++
++      if (!bitmap_set_bit (visited, SSA_NAME_VERSION (t)))
++	continue;
++
++      imm_use_iterator iter;
++      gimple *stmt = nullptr;
++      FOR_EACH_IMM_USE_STMT (stmt, iter, t)
++	{
++	  if (gimple_call_builtin_p (stmt, BUILT_IN_FREE))
++	    continue;
++
++	  if (!is_gimple_assign (stmt))
++	    return false;
++
++	  if (stmt == unique_assign)
++	    continue;
++
++	  worklist.safe_push (gimple_assign_lhs (stmt));
++	}
++    }
++
++  return true;
++}
++
++bool
++array_dse_edge::initialize_assign_p (gimple *stmt) const
++{
++  if (!stmt || !gimple_bb (stmt))
++    return false;
++
++  hash_set preds;
++  auto_vec worklist;
++  worklist.safe_push (gimple_bb (stmt));
++
++  while (!worklist.is_empty ())
++    {
++      basic_block bb = worklist.pop ();
++      if (preds.add (bb))
++	continue;
++
++      for (auto e : bb->preds)
++	worklist.safe_push (e->src);
++    }
++
++  for (auto [var, access_stmt] : array_accesses)
++    {
++      if (access_stmt == stmt)
++	continue;
++
++      if (preds.contains (gimple_bb (access_stmt)))
++	return false;
++    }
++
++  return true;
++}
++
++bool
++array_dse_edge::calc_read_bound ()
++{
++  for (auto [var, stmt] : array_accesses)
++    {
++      if (!after_call_stmt_p (stmt) || !read_array_p (var))
++	continue;
++
++      auto range = calc_ref_range (var);
++      if (!integer_cst_p (range.max ()))
++	return false;
++
++      auto max = tree_to_shwi (range.max ());
++      if (max % elem_size)
++	return false;
++
++      if (max / elem_size > read_upper_bound)
++	read_upper_bound = max / elem_size;
++    }
++
++  return true;
++}
++
++value_range
++array_dse_edge::calc_ref_range (tree var)
++{
++  tree_code code = TREE_CODE (var);
++  /* Array_ref's second op is an index.  Convert it to address offset.  */
++  if (code == ARRAY_REF)
++    {
++      auto r = calc_offset_range (TREE_OPERAND (var, 1));
++      if (r.varying_p ())
++	return r;
++
++      gcc_assert (integer_cst_p (r.min ()) && integer_cst_p (r.max ()));
++      return make_range (tree_to_shwi (r.min ()) * elem_size,
++			 tree_to_shwi (r.max ()) * elem_size);
++    }
++
++  gcc_assert (code == MEM_REF);
++  auto r1 = calc_addr_range (TREE_OPERAND (var, 0));
++  auto r2 = calc_offset_range (TREE_OPERAND (var, 1));
++
++  return value_range{plus_tree (r1.min (), r2.min ()),
++		     plus_tree (r1.max (), r2.max ())};
++}
++
++value_range
++array_dse_edge::calc_addr_range (tree var)
++{
++  if (array_address_vars.contains (var))
++    {
++      gcc_assert (TREE_CODE (var) == SSA_NAME);
++      gimple *stmt = SSA_NAME_DEF_STMT (var);
++      if (is_gimple_assign (stmt))
++	{
++	  auto r1 = calc_addr_range (gimple_assign_rhs1 (stmt));
++	  auto r2 = calc_offset_range (gimple_assign_rhs2 (stmt));
++	  return value_range{plus_tree (r1.min (), r2.min ()),
++			     plus_tree (r1.max (), r2.max ())};
++	}
++
++      return calc_simple_loop_range (var);
++    }
++
++  if (TREE_CODE (var) != ADDR_EXPR)
++    return value_range{RANGE_TYPE};
++
++  tree op = TREE_OPERAND (var, 0);
++  if (op == array)
++    return make_range (0);
++
++  if (!array_ref_p (op))
++    return value_range{RANGE_TYPE};
++
++  return calc_ref_range (op);
++}
++
++value_range
++array_dse_edge::calc_offset_range (tree offset)
++{
++  tree var = strip_ssa_copy (offset);
++  if (integer_cst_p (var))
++    return make_range (tree_to_shwi (var));
++
++  if (TREE_CODE (var) != SSA_NAME)
++    return value_range{RANGE_TYPE};
++
++  gimple *stmt = SSA_NAME_DEF_STMT (var);
++  if (gimple_code (stmt) == GIMPLE_PHI)
++    return calc_simple_loop_range (var);
++
++  if (!is_gimple_assign (stmt) || gimple_assign_rhs_code (stmt) != MULT_EXPR
++      || !integer_cst_p (gimple_assign_rhs2 (stmt))
++      || TREE_INT_CST_LOW (gimple_assign_rhs2 (stmt)) != elem_size)
++    return value_range{RANGE_TYPE};
++
++  auto range = calc_offset_range (gimple_assign_rhs1 (stmt));
++  if (!integer_cst_p (range.min ()) || !integer_cst_p (range.max ()))
++    return value_range{RANGE_TYPE};
++
++  return make_range (tree_to_shwi (range.min ()) * elem_size,
++		     tree_to_shwi (range.max ()) * elem_size);
++}
++
++value_range
++array_dse_edge::calc_simple_loop_range (tree var)
++{
++  gimple *stmt = SSA_NAME_DEF_STMT (var);
++  basic_block bb = gimple_bb (stmt);
++  loop_p loop = bb->loop_father;
++
++  if (!loop || loop->header != bb || !loop->any_upper_bound)
++    return value_range{RANGE_TYPE};
++
++  tree init_var = PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop));
++  tree iterate_var = PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (loop));
++
++  value_range init_range;
++  if (array_addr_p (init_var))
++    init_range = calc_addr_range (init_var);
++  else
++    init_range = calc_offset_range (init_var);
++
++  if (!init_range.singleton_p () || !integer_cst_p (init_range.min ()))
++    return value_range{RANGE_TYPE};
++
++  HOST_WIDE_INT init_value = tree_to_shwi (init_range.min ());
++  int step = calc_loop_var_step (var, iterate_var);
++  int upper_bound = loop->nb_iterations_upper_bound.to_shwi ();
++
++  return make_range (init_value, init_value + step * upper_bound);
++}
++
++void
++array_dse_edge::collect_call_block_succs ()
++{
++  basic_block call_block = gimple_bb (call_edge->call_stmt);
++  auto_vec worklist;
++  for (auto e : call_block->succs)
++    worklist.safe_push (e->dest);
++
++  while (!worklist.is_empty ())
++    {
++      basic_block bb = worklist.pop ();
++      if (call_block_succs.add (bb))
++	continue;
++
++      for (auto e : bb->succs)
++	worklist.safe_push (e->dest);
++    }
++}
++
++bool
++array_dse_edge::calc_array_arg_start ()
++{
++  tree array_arg = gimple_call_arg (call_edge->call_stmt,
++				    callee->array_param_index);
++  if (!array_addr_p (array_arg))
++    return false;
++
++  auto range = calc_addr_range (array_arg);
++  if (!range.singleton_p () || !integer_cst_p (range.min ()))
++    return false;
++
++  auto value = tree_to_shwi (range.min ());
++  if (value % elem_size)
++    return false;
++
++  array_arg_start = value / elem_size;
++  return true;
++}
++
++bool
++array_dse_edge::check_optimized_area_rewrite ()
++{
++  tree arg = gimple_call_arg (call_edge->call_stmt, callee->len_param_index);
++  if (!arg)
++    return false;
++
++  tree var = strip_ssa_copy (arg);
++  if (!var || TREE_CODE (var) != SSA_NAME)
++    return false;
++
++  gimple *stmt = SSA_NAME_DEF_STMT (var);
++  loop_p loop = gimple_bb (stmt)->loop_father;
++  if (!loop || !loop_var_p (loop, var))
++    return false;
++
++  /* To make sure the optimized area of array is fully rewritten, the loop
++     step must be 1.  We only support one iterate stmt now.  */
++  tree iterate_var = PHI_ARG_DEF_FROM_EDGE (stmt, loop_latch_edge (loop));
++  hash_set iterate_stmts;
++  if (calc_loop_var_step (var, iterate_var, &iterate_stmts) != 1
++      || iterate_stmts.elements () != 1)
++    return false;
++
++  gimple *iter_stmt = *iterate_stmts.begin ();
++  if (gimple_assign_rhs1 (iter_stmt) != var)
++    return false;
++
++  /* Check if the element has been fully written.  */
++  basic_block bb = gimple_bb (iter_stmt);
++  if (!check_full_write_elem (bb, gimple_assign_lhs (iter_stmt)))
++    return false;
++
++  /* Check if the start address is less equal than the read_upper_bound.  */
++  tree init_var = PHI_ARG_DEF_FROM_EDGE (stmt, loop_preheader_edge (loop));
++  init_var = strip_init_var (init_var, var);
++  if (!init_var)
++    return false;
++
++  auto range = calc_offset_range (init_var);
++  if (!integer_cst_p (range.min ()) || !integer_cst_p (range.max ()))
++    return false;
++
++  len_arg_min = tree_to_shwi (range.min ());
++
++  return tree_to_shwi (range.max ()) + array_arg_start <= read_upper_bound;
++}
++
++bool
++array_dse_edge::check_full_write_elem (basic_block bb, tree index)
++{
++  hash_set visited_fields;
++
++  for (auto [var, stmt] : array_accesses)
++    {
++      /* Must in the same block.  */
++      if (gimple_bb (stmt) != bb)
++	continue;
++
++      if (!write_array_p (var))
++	continue;
++
++      if (TREE_CODE (var) != MEM_REF
++	  || !array_index_of_addr_p (index, TREE_OPERAND (var, 0))
++	  || !integer_zerop (TREE_OPERAND (var, 1)))
++	continue;
++
++      /* Directly write to array.  */
++      tree lhs = gimple_assign_lhs (stmt);
++      if (var == lhs)
++	return true;
++      else if (!inner_array)
++	continue;
++
++      imm_use_iterator iter;
++      gimple *use_stmt = nullptr;
++      FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
++	{
++	  tree ref = gimple_get_lhs (use_stmt);
++	  if (!ref || TREE_CODE (ref) != COMPONENT_REF)
++	    continue;
++
++	  visited_fields.add (TREE_OPERAND (ref, 1));
++	}
++    }
++
++  return inner_array
++	 && visited_fields.elements () == fields_length (inner_elem_type);
++}
++
++bool
++array_dse_edge::array_index_of_addr_p (tree index, tree addr)
++{
++  if (TREE_CODE (index) != SSA_NAME || TREE_CODE (addr) != SSA_NAME)
++    return false;
++
++  /* Check pattern: addr = &array + offset.  */
++  gimple *stmt = SSA_NAME_DEF_STMT (addr);
++  if (!is_gimple_assign (stmt)
++      || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR)
++    return false;
++
++  tree rhs1 = gimple_assign_rhs1 (stmt);
++  if (TREE_CODE (rhs1) != ADDR_EXPR || TREE_OPERAND (rhs1, 0) != array)
++    return false;
++
++  tree rhs2 = gimple_assign_rhs2 (stmt);
++  if (TREE_CODE (rhs2) != SSA_NAME)
++    return false;
++
++  /* Check pattern: offset = index * elem_size.  */
++  stmt = SSA_NAME_DEF_STMT (rhs2);
++  if (!is_gimple_assign (stmt) || gimple_assign_rhs_code (stmt) != MULT_EXPR)
++    return false;
++
++  rhs1 = gimple_assign_rhs1 (stmt);
++  rhs2 = gimple_assign_rhs2 (stmt);
++
++  return strip_ssa_copy (rhs1) == index && integer_cst_p (rhs2)
++	 && TREE_INT_CST_LOW (rhs2) == elem_size;
++}
++
++tree
++array_dse_edge::strip_init_var (tree init_var, tree var)
++{
++  tree last = var;
++  while (true)
++    {
++      if (TREE_CODE (init_var) != SSA_NAME)
++	break;
++
++      gimple *stmt = SSA_NAME_DEF_STMT (init_var);
++      loop_p loop = gimple_bb (stmt)->loop_father;
++      if (!loop || !loop_var_p (loop, init_var))
++	break;
++
++      auto latch_edge = loop_latch_edge (loop);
++      auto preheader_edge = loop_preheader_edge (loop);
++      if (!latch_edge || !preheader_edge
++	  || PHI_ARG_DEF_FROM_EDGE (stmt, latch_edge) != last)
++	break;
++
++      last = init_var;
++      init_var = PHI_ARG_DEF_FROM_EDGE (stmt, preheader_edge);
++    }
++
++  return strip_ssa_copy (init_var);
++}
++
++bool
++array_dse_edge::check_len_arg_range ()
++{
++  return check_len_arg_lower_bound ()
++	 && check_len_arg_upper_bound ();
++}
++
++/* Check: ARG >= 1 (assumption in callee analysis).  */
++
++bool
++array_dse_edge::check_len_arg_lower_bound ()
++{
++  if (len_arg_min >= 1)
++    return true;
++
++  /* If the len_arg_min recorded previous doesn't meet the condition, try to
++     update it by checking condition jump.  */
++  tree arg = gimple_call_arg (call_edge->call_stmt, callee->len_param_index);
++  if (!arg)
++    return false;
++
++  tree var = strip_ssa_copy (arg);
++  if (!var || TREE_CODE (var) != SSA_NAME)
++    return false;
++
++  basic_block call_block = gimple_bb (call_edge->call_stmt);
++  basic_block bb;
++  FOR_EACH_BB_FN (bb, cfun)
++    {
++      gimple *stmt = last_stmt (bb);
++      if (!stmt || gimple_code (stmt) != GIMPLE_COND)
++	continue;
++
++      tree_code code = gimple_cond_code (stmt);
++      tree lhs = gimple_cond_lhs (stmt);
++      tree rhs = gimple_cond_rhs (stmt);
++
++      if (lhs != var || !integer_cst_p (rhs))
++	continue;
++
++      edge true_edge;
++      edge false_edge;
++      extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
++      if (!true_edge || !false_edge)
++	continue;
++
++      if (dominated_by_p (CDI_DOMINATORS, call_block, false_edge->dest))
++	code = opposite_cond_code (code);
++      else if (!dominated_by_p (CDI_DOMINATORS, call_block, true_edge->dest))
++	continue;
++
++      HOST_WIDE_INT rvalue = TREE_INT_CST_LOW (rhs);
++      switch (code)
++	{
++	  case GT_EXPR:
++	    len_arg_min = std::max (len_arg_min, rvalue + 1);
++	    break;
++	  case GE_EXPR:
++	    [[fallthrough]];
++	  case EQ_EXPR:
++	    len_arg_min = std::max (len_arg_min, rvalue);
++	    break;
++	  case NE_EXPR:
++	    if (len_arg_min == rvalue)
++	      len_arg_min++;
++	    break;
++	  default:
++	    break;
++	}
++    }
++
++  return len_arg_min >= 1;
++}
++
++/* We can assume that the array will not be accessed out of bounds.
++   So we use array_size as the upper bound of len arg.  */
++
++bool
++array_dse_edge::check_len_arg_upper_bound ()
++{
++  return array_size <= callee->get_len_param_max ();
++}
++
++bool
++array_dse_edge::after_call_stmt_p (gimple *stmt)
++{
++  if (call_stmt_p (stmt))
++    return false;
++
++  basic_block bb = gimple_bb (stmt);
++  if (bb == gimple_bb (call_edge->call_stmt)
++      && gimple_uid (stmt) > gimple_uid (call_edge->call_stmt))
++    return true;
++
++  return call_block_succs.contains (bb);
++}
++
++bool
++array_dse_edge::write_array_p (tree var)
++{
++  auto *kind = access_kinds.get (var);
++  if (!kind)
++    return false;
++
++  return *kind & WRITE;
++}
++
++bool
++array_dse_edge::read_array_p (tree var)
++{
++  auto *kind = access_kinds.get (var);
++  if (!kind)
++    return false;
++
++  return *kind & READ;
++}
++
++bool
++array_dse_edge::call_stmt_p (gimple *stmt) const
++{
++  return stmt == call_edge->call_stmt;
++}
++
++bool
++array_dse_edge::array_ref_p (tree var)
++{
++  if (!var)
++    return false;
++
++  if (TREE_CODE (var) == ARRAY_REF)
++    return TREE_OPERAND (var, 0) == array;
++
++  return (TREE_CODE (var) == MEM_REF && array_addr_p (TREE_OPERAND (var, 0)));
++}
++
++bool
++array_dse_edge::array_addr_p (tree var)
++{
++  if (array_address_vars.contains (var))
++    return true;
++
++  if (TREE_CODE (var) != ADDR_EXPR)
++    return false;
++
++  tree op = TREE_OPERAND (var, 0);
++  return op == array || array_ref_p (op);
++}
++
++unsigned
++ipa_array_dse::execute ()
++{
++  cgraph_node *node;
++  FOR_EACH_FUNCTION (node)
++    {
++      if (!node->real_symbol_p () || !node->definition
++	  || !node->has_gimple_body_p () || node->inlined_to)
++	continue;
++      node->get_body ();
++
++      if (!DECL_STRUCT_FUNCTION (node->decl))
++	continue;
++
++      nodes.safe_push (node);
++    }
++
++  if (!find_array_dse_candidate_callees ())
++    {
++      if (dump_file)
++	fprintf (dump_file, "Fail finding array dse candidate callees\n");
++      return 0;
++    }
++
++  if (!find_array_dse_candidate_edges ())
++    {
++      if (dump_file)
++	fprintf (dump_file, "Fail finding array dse candidate edges\n");
++      return 0;
++    }
++
++  for (auto edge : candidate_edges)
++    apply_array_dse (edge);
++
++  symtab->remove_unreachable_nodes (dump_file);
++
++  return TODO_update_ssa;
++}
++
++bool
++ipa_array_dse::find_array_dse_candidate_callees ()
++{
++  if (dump_file)
++    fprintf (dump_file, "Finding array dse candidate callees\n\n");
++
++  for (auto node : nodes)
++    {
++      if (!tree_versionable_function_p (node->decl)
++	  || !opt_for_fn (node->decl, optimize))
++	continue;
++
++      const char *fn_name = node->dump_asm_name ();
++      if (dump_file)
++	fprintf (dump_file, "Analyzing callee: %s\n", fn_name);
++
++      auto *callee = new array_dse_callee (node);
++      if (!callee->analyze ())
++	{
++	  delete callee;
++	  continue;
++	}
++
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Found candidate callee: %s\n", fn_name);
++	  if (dump_flags & TDF_DETAILS)
++	    dump_function_to_file (node->decl, dump_file, dump_flags);
++	  fprintf (dump_file, "\n");
++	}
++
++      candidate_callees.safe_push (callee);
++    }
++
++  return !candidate_callees.is_empty ();
++}
++
++bool
++ipa_array_dse::find_array_dse_candidate_edges ()
++{
++  if (dump_file)
++    fprintf (dump_file, "Finding array dse candidate call edges\n\n");
++
++  for (auto *callee : candidate_callees)
++    {
++      cgraph_edge *e = callee->node->callers;
++      while (e && e->caller == callee->node)
++	e = e->next_caller;
++
++      for (auto *c : candidate_callees)
++	if (e->caller == c->node)
++	  return false;
++
++      auto *edge = new array_dse_edge (e, callee);
++      if (!edge->analyze ())
++	{
++	  delete edge;
++	  continue;
++	}
++
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Found candidate call edge: ");
++	  print_gimple_stmt (dump_file, e->call_stmt, 0, TDF_NONE);
++	  if (dump_flags & TDF_DETAILS)
++	    dump_function_to_file (e->caller->decl, dump_file, dump_flags);
++	  fprintf (dump_file, "\n");
++	}
++
++      candidate_edges.safe_push (edge);
++    }
++
++  return !candidate_edges.is_empty ();
++}
++
++bool
++ipa_array_dse::apply_array_dse (array_dse_edge *ad_edge)
++{
++  /* Remove call stmt if it's fully redundant.  */
++  if (ad_edge->fully_redundant ())
++    {
++      cgraph_node *caller = ad_edge->call_edge->caller;
++      gimple *call_stmt = ad_edge->call_edge->call_stmt;
++
++      cfun_saver save (caller);
++
++      auto gsi = gsi_for_stmt (call_stmt);
++      basic_block call_bb = gimple_bb (call_stmt);
++      tree fndecl = gimple_call_fndecl (call_stmt);
++      caller->remove_stmt_references (call_stmt);
++      unlink_stmt_vdef (call_stmt);
++      if (gsi_remove (&gsi, true))
++	gimple_purge_dead_eh_edges (call_bb);
++      cgraph_update_edges_for_call_stmt (call_stmt, fndecl, nullptr);
++
++      return true;
++    }
++
++  /* Insert array redundant bound check to callee.  */
++  array_dse_callee *callee = ad_edge->callee;
++  cgraph_node *orig_callee = ad_edge->callee->node;
++  cgraph_node *new_callee
++    = orig_callee->create_version_clone_with_body (vNULL, NULL, NULL, NULL,
++						   NULL, "array_dse", NULL);
++
++  if (!transform_new_callee (callee, new_callee))
++    return false;
++
++  tree bound_addr = ad_edge->get_bound_addr ();
++  rewrite_call_edge (ad_edge->call_edge, new_callee, bound_addr);
++
++  return true;
++}
++
++tree
++ipa_array_dse::add_bound_param (tree param)
++{
++  vec *new_params = NULL;
++  auto_vec arg_decls;
++
++  push_function_arg_decls (&arg_decls, cfun->decl);
++  gcc_checking_assert (!arg_decls.is_empty ());
++  vec_safe_reserve (new_params, arg_decls.length () + 1);
++
++  for (unsigned i = 0; i < arg_decls.length (); ++i)
++    {
++      ipa_adjusted_param adj;
++
++      memset (&adj, 0, sizeof (adj));
++
++      adj.type = TREE_TYPE (arg_decls[i]);
++      adj.base_index = i;
++      adj.prev_clone_index = i;
++      adj.op = IPA_PARAM_OP_COPY;
++      new_params->quick_push (adj);
++    }
++
++  tree param_name = DECL_NAME (param);
++  const char *name = concat (IDENTIFIER_POINTER (param_name), ".bound", NULL);
++  ipa_adjusted_param adj;
++  adj.type = TREE_TYPE (param);
++  adj.base_index = arg_decls.length ();
++  adj.prev_clone_index = arg_decls.length ();
++  adj.op = IPA_PARAM_OP_NEW;
++  new_params->quick_push (adj);
++
++  auto adjustments = new ipa_param_body_adjustments (new_params, cfun->decl);
++  adjustments->modify_formal_parameters ();
++  delete adjustments;
++
++  arg_decls.truncate (0);
++  push_function_arg_decls (&arg_decls, cfun->decl);
++
++  tree new_param = arg_decls.last ();
++  DECL_NAME (new_param) = get_identifier (name);
++
++  return get_or_create_ssa_default_def (cfun, new_param);
++}
++
++tree
++ipa_array_dse::find_array_main_var (array_dse_callee *callee)
++{
++  int i = 0;
++  tree param = DECL_ARGUMENTS (cfun->decl);
++  while (i++ < callee->array_param_index)
++    param = DECL_CHAIN (param);
++
++  tree name;
++  FOR_EACH_SSA_NAME (i, name, cfun)
++    {
++      if (!SSA_NAME_IS_DEFAULT_DEF (name)
++	  || SSA_NAME_VAR (name) != param)
++	continue;
++
++      if (!callee->main_loop)
++	return name;
++
++      use_operand_p use_p;
++      gimple *stmt;
++      if (!single_imm_use (name, &use_p, &stmt)
++	  || gimple_code (stmt) != GIMPLE_PHI)
++	return nullptr;
++
++      return gimple_phi_result (stmt);
++    }
++
++  return nullptr;
++}
++
++bool
++ipa_array_dse::transform_new_callee (array_dse_callee *callee,
++				     cgraph_node *new_node)
++{
++  cfun_saver save (new_node);
++
++  tree bound_ssa = add_bound_param (callee->array_param);
++  tree array = find_array_main_var (callee);
++  if (!array)
++    return false;
++
++  edge e;
++  if (callee->main_loop)
++    {
++      gimple *array_def_stmt = SSA_NAME_DEF_STMT (array);
++      basic_block array_def_bb = gimple_bb (array_def_stmt);
++      gcc_assert (gimple_code (array_def_stmt) == GIMPLE_PHI);
++      e = split_block_after_labels (array_def_bb);
++    }
++  else
++    {
++      basic_block entry_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
++      gcc_assert (single_succ_p (entry_bb));
++      basic_block bb = split_edge (single_succ_edge (entry_bb));
++      e = single_succ_edge (bb);
++    }
++
++  auto gsi = gsi_last_bb (e->src);
++  gimple *cond = gimple_build_cond (GE_EXPR, array, bound_ssa, nullptr,
++				    nullptr);
++  gsi_insert_after (&gsi, cond, GSI_NEW_STMT);
++
++  edge return_edge = make_edge (e->src, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
++  basic_block return_bb = split_edge (return_edge);
++  auto return_gsi = gsi_last_bb (return_bb);
++  gsi_insert_after (&return_gsi, gimple_build_return (nullptr), GSI_NEW_STMT);
++
++  e->flags &= ~EDGE_FALLTHRU;
++  e->flags |= EDGE_FALSE_VALUE;
++  single_pred_edge (return_bb)->flags |= EDGE_TRUE_VALUE;
++  single_succ_edge (return_bb)->flags = 0;
++
++  for (auto call_edge = new_node->callees; call_edge;
++       call_edge = call_edge->next_callee)
++    rewrite_call_edge (call_edge, new_node, bound_ssa);
++
++  return true;
++}
++
++void
++ipa_array_dse::rewrite_call_edge (cgraph_edge *edge, cgraph_node *new_node,
++				  tree bound_addr)
++{
++  auto_vec args;
++  gcall *call_stmt = edge->call_stmt;
++  gimple_stmt_iterator gsi = gsi_for_stmt (call_stmt);
++  cgraph_node *caller = edge->caller;
++  cfun_saver save (caller);
++
++  for (unsigned i = 0; i < gimple_call_num_args (call_stmt); i++)
++    args.safe_push (gimple_call_arg (call_stmt, i));
++
++  bound_addr = force_gimple_operand_gsi (&gsi, bound_addr, true,
++					 NULL_TREE, true,
++					 GSI_SAME_STMT);
++  args.safe_push (bound_addr);
++
++  gcall *new_call = gimple_build_call_vec (new_node->decl, args);
++
++  if (tree vdef = gimple_vdef (call_stmt))
++    {
++      gimple_set_vdef (new_call, vdef);
++      SSA_NAME_DEF_STMT (vdef) = new_call;
++    }
++
++  gimple_set_vuse (new_call, gimple_vuse (call_stmt));
++  gimple_call_copy_flags (new_call, call_stmt);
++  gimple_call_set_chain (new_call, gimple_call_chain (call_stmt));
++  gsi_replace (&gsi, new_call, false);
++
++  cgraph_update_edges_for_call_stmt (call_stmt,
++				     gimple_call_fndecl (call_stmt), new_call);
++
++  caller->remove_stmt_references (call_stmt);
++  caller->record_stmt_references (new_call);
++}
++
++} // namespace array_dse
++
++namespace {
++
++const pass_data pass_data_ipa_array_dse =
++{
++  SIMPLE_IPA_PASS, /* type */
++  "array-dse", /* name */
++  OPTGROUP_NONE, /* optinfo_flags */
++  TV_IPA_ARRAY_DSE, /* tv_id */
++  PROP_cfg | PROP_ssa, /* properties_required */
++  0, /* properties_provided */
++  0, /* properties_destroyed */
++  0, /* todo_flags_start */
++  0, /* todo_flags_finish */
++};
++
++class pass_ipa_array_dse : public simple_ipa_opt_pass
++{
++public:
++  pass_ipa_array_dse (gcc::context *ctxt)
++    : simple_ipa_opt_pass (pass_data_ipa_array_dse, ctxt)
++  {}
++
++  /* opt_pass methods: */
++  virtual bool gate (function *)
++    {
++      return optimize >= 3 && flag_ipa_array_dse;
++    }
++
++  virtual unsigned int execute (function *)
++    {
++      return array_dse::ipa_array_dse ().execute ();
++    }
++
++}; // class pass_ipa_array_dse
++
++} // anon namespace
++
++simple_ipa_opt_pass *
++make_pass_ipa_array_dse (gcc::context *ctxt)
++{
++  return new pass_ipa_array_dse (ctxt);
++}
+diff --git a/gcc/ipa-array-dse.h b/gcc/ipa-array-dse.h
+new file mode 100644
+index 000000000..b1f5ee611
+--- /dev/null
++++ b/gcc/ipa-array-dse.h
+@@ -0,0 +1,263 @@
++/* Array dead store elimination
++   Copyright (C) 2021-2022 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify
++it under the terms of the GNU General Public License as published by
++the Free Software Foundation; either version 3, or (at your option)
++any later version.
++
++GCC is distributed in the hope that it will be useful,
++but WITHOUT ANY WARRANTY; without even the implied warranty of
++MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++GNU General Public License for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#ifndef IPA_ARRAY_DSE_H
++#define IPA_ARRAY_DSE_H
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "options.h"
++#include "function.h"
++#include "cfgloop.h"
++#include "hash-map.h"
++#include "tree-core.h"
++#include "tree.h"
++#include "bitmap.h"
++#include "value-range.h"
++#include 
++
++namespace array_dse {
++
++enum compare_result
++{
++  COMPARE_ERROR = 0,
++  LT = 1,
++  EQ = 2,
++  GT = 4,
++  LE = LT | EQ,
++  GE = GT | EQ,
++  NE = LT | GT
++};
++
++enum access_kind
++{
++  NONE = 0,
++  READ = 1,
++  WRITE = 2,
++  ACCESS_ERROR = 4
++};
++
++enum class infinite_kind
++{
++  NON_INF,
++  INF,
++  NINF
++};
++
++/* Address analyzer.  */
++class addr_analyzer
++{
++public:
++  tree get_address (tree var);
++
++private:
++  tree analyze_address (tree var);
++
++private:
++  hash_map address_map;
++};
++
++class array_dse_callee
++{
++public:
++  array_dse_callee (cgraph_node *node);
++
++  bool analyze ();
++  unsigned HOST_WIDE_INT get_len_param_max () const;
++
++private:
++  tree mult_tree (basic_block bb, tree t1, tree t2);
++  tree div_tree (basic_block bb, tree t1, tree t2);
++  tree lshift_tree (tree t1, tree t2);
++  tree rshift_tree (tree t1, tree t2);
++  tree max_tree (basic_block bb, tree t1, tree t2);
++  tree min_tree (basic_block bb, tree t1, tree t2);
++  HOST_WIDE_INT calc_tree_value (tree t, HOST_WIDE_INT n_value);
++  value_range calc_tree_range (basic_block bb, tree t);
++  bool get_factor (tree t, double &factor);
++  bool positive_factor_p (tree t);
++  value_range build_range (basic_block bb, tree_code op,
++			   const value_range &r1, const value_range &r2);
++  compare_result compare_tree (basic_block bb, tree t1, tree t2);
++  compare_result compare_tree_by_minus (basic_block bb, tree t1, tree t2);
++  bool filter_function () const;
++  bool no_return_p () const;
++  bool find_main_vars ();
++  void find_tail_recursive_loop (tree *default_def);
++  bool find_candidate_array ();
++  bool find_length_param ();
++  void collect_read_write_ptrs ();
++  void calc_length_param_max ();
++  bool check_pointer (tree var, tree &len, unsigned &size);
++  bool check_offset (tree var, tree &len, unsigned &size,
++		     auto_vec &worklist);
++  bool check_mult_expr (gimple *stmt, tree &len, unsigned &size);
++  int find_param_index (tree base);
++  bool check_array_usage ();
++  void calc_len_range ();
++  void update_len_range (basic_block start, const value_range &new_range);
++  value_range invert_range (const value_range &range) const;
++  value_range get_range_from_cond (gimple *stmt);
++  value_range get_var_range (basic_block bb, tree offset);
++  bool calc_ptr_range ();
++  bool check_ptr_range ();
++  bool check_recursive_call_arg ();
++  void collect_recursive_call_args (auto_vec &array_args,
++				    auto_vec &len_args,
++				    auto_vec &blocks,
++				    auto_vec &is_tail_recursive_call);
++  void update_branch_range (basic_block bb);
++  tree build_recursive_offset (tree len);
++  tree build_recursive_ptr_range_max (basic_block bb, tree array,
++				      tree offset);
++  bool tail_recursive_call_p (gimple *stmt);
++  bool return_bb_p (basic_block bb) const;
++  bool calc_loop_var_range (loop_p loop);
++  bool check_loop_exits (loop_p loop);
++  bool fill_loop_var_range (loop_p loop, tree_code code, tree lhs,
++			    tree rhs, tree step);
++  bool fill_loop_ptr_range (loop_p loop, basic_block bb,
++			    tree count_length);
++  bool loop_header_p (basic_block bb);
++  bool iterate_var_p (loop_p loop, tree var);
++  tree get_loop_var (loop_p loop, tree iterate_var);
++  bool find_var_range (tree var, basic_block bb);
++
++public:
++  cgraph_node *node = nullptr;
++  tree array_param = nullptr;
++  tree len_param = nullptr;
++  int array_param_index = -1;
++  int len_param_index = -1;
++  tree array_main_var = nullptr;
++  tree len_main_var = nullptr;
++  tree signed_len_var = nullptr;
++  tree elem_size = nullptr;
++  unsigned elem_size_cst = 0;
++
++  loop_p main_loop = nullptr;
++
++  static constexpr unsigned PARAM_NUM = 2;
++
++private:
++  addr_analyzer analyzer;
++
++  hash_set all_ptrs;
++  hash_set visited_offset;
++  hash_map branch_start_map;
++  hash_map len_range_map;
++  std::map> var_range;
++  std::map> loop_ptrs;
++  hash_set unreachable_blocks;
++
++  static constexpr unsigned HOST_WIDE_INT len_param_min = 1;
++  unsigned HOST_WIDE_INT len_param_max = 0;
++};
++
++class array_dse_edge
++{
++public:
++  array_dse_edge (cgraph_edge *edge, array_dse_callee *callee);
++
++  bool analyze ();
++  bool fully_redundant ();
++  tree get_bound_addr ();
++
++private:
++  bool find_local_array_from_arg ();
++  bool check_array_usage ();
++  bool collect_array_accesses ();
++  bool check_array_access (gimple *stmt, tree var);
++  bool check_array_address (gimple *stmt, tree addr);
++  bool check_array_address (gphi *phi, tree addr);
++  bool check_array_address (gassign *assign, tree addr);
++  bool check_access_from_address (tree addr);
++  bool check_access_kind (gimple *stmt, tree var);
++  access_kind check_access_kind_iterate (tree var, auto_bitmap &visited);
++  bool find_inner_array ();
++  bool unique_use_p (tree source, gimple *unique_assign) const;
++  bool initialize_assign_p (gimple *stmt) const;
++  bool calc_read_bound ();
++  value_range calc_ref_range (tree var);
++  value_range calc_addr_range (tree var);
++  value_range calc_offset_range (tree var);
++  value_range calc_simple_loop_range (tree var);
++  void collect_call_block_succs ();
++  bool calc_array_arg_start ();
++  bool check_optimized_area_rewrite ();
++  bool check_full_write_elem (basic_block bb, tree var);
++  bool array_index_of_addr_p (tree index, tree addr);
++  tree strip_init_var (tree var, tree last_var);
++  bool check_len_arg_range ();
++  bool check_len_arg_lower_bound ();
++  bool check_len_arg_upper_bound ();
++  bool after_call_stmt_p (gimple *stmt);
++  bool write_array_p (tree var);
++  bool read_array_p (tree var);
++  bool call_stmt_p (gimple *stmt) const;
++  bool array_ref_p (tree var);
++  bool array_addr_p (tree var);
++
++public:
++  cgraph_edge *call_edge = nullptr;
++  array_dse_callee *callee = nullptr;
++
++  tree array = nullptr;
++
++private:
++  unsigned array_size = 0;
++  unsigned elem_size = 0;
++  tree inner_array = nullptr;
++  tree inner_elem_type = nullptr;
++
++  hash_map array_accesses;
++  hash_map access_kinds;
++  hash_set array_address_vars;
++  hash_set call_block_succs;
++
++  HOST_WIDE_INT read_upper_bound = 0;
++  HOST_WIDE_INT array_arg_start = 0;
++  HOST_WIDE_INT len_arg_min = 0;
++};
++
++class ipa_array_dse
++{
++public:
++  unsigned execute ();
++
++private:
++  bool find_array_dse_candidate_callees ();
++  bool find_array_dse_candidate_edges ();
++  bool apply_array_dse (array_dse_edge *edge);
++  tree add_bound_param (tree param);
++  tree find_array_main_var (array_dse_callee *callee);
++  bool transform_new_callee (array_dse_callee *callee, cgraph_node *new_node);
++  void rewrite_call_edge (cgraph_edge *edge, cgraph_node *new_node,
++			  tree bound_ssa);
++
++private:
++  auto_vec nodes;
++  auto_delete_vec candidate_callees;
++  auto_delete_vec candidate_edges;
++};
++
++}
++
++#endif
+diff --git a/gcc/ipa-localize-array.cc b/gcc/ipa-localize-array.cc
+new file mode 100644
+index 000000000..4678756b2
+--- /dev/null
++++ b/gcc/ipa-localize-array.cc
+@@ -0,0 +1,614 @@
++/* IPA optimization to transform global calloced array to be
++   specific function local.
++   Copyright (C) 2021-2022 Free Software Foundation, Inc.
++
++This file is part of GCC.
++
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include "config.h"
++#include "system.h"
++#include "coretypes.h"
++#include "backend.h"
++#include "tree.h"
++#include "gimple.h"
++#include "gimple-iterator.h"
++#include "ssa.h"
++#include "tree-pass.h"
++#include "tree-cfg.h"
++#include "gimplify.h"
++#include "gimple-pretty-print.h"
++#include "tree-into-ssa.h"
++#include "ipa-utils.h"
++#include "fold-const.h"
++#include "tree-dfa.h"
++#include "cfgloop.h"
++
++class array_localizer
++{
++public:
++  array_localizer (varpool_node *var);
++  void localize ();
++
++private:
++  bool scalar_type_p (tree type);
++  bool scalar_memop_p (tree ref_val, gimple *use_stmt);
++  bool stmt_dominated_by_p (enum cdi_direction dir, gimple *stmt0,
++			    gimple *stmt1);
++  gimple *find_calloc_stmt (gimple *stmt);
++  gimple *find_free_stmt (tree var);
++  bool check_var_store ();
++  bool check_var_load ();
++  bool find_call_edge ();
++  void remove_referring_stmt (gimple *stmt);
++  void replace_store_with_ssa (gimple *stmt, tree var_ssa);
++  void replace_load_with_ssa (gimple *stmt, tree var_ssa);
++  gimple *copy_call_without_location (gimple *stmt);
++  void rewrite_array ();
++  void insert_new_init (tree var_ssa);
++  void insert_new_alloc_free (tree var_ssa);
++  void rewrite_access_in_callee (tree var_ssa);
++  void remove_orig_alloc_free ();
++
++private:
++  varpool_node *var = nullptr;
++  tree var_type = nullptr;
++  ipa_ref *alloc_ref = nullptr;
++  ipa_ref *free_ref = nullptr;
++  gimple *alloc_stmt = nullptr;
++  gimple *free_stmt = nullptr;
++  cgraph_node *caller = nullptr;
++  cgraph_node *callee = nullptr;
++  cgraph_edge *call_edge = nullptr;
++  gimple *call_stmt = nullptr;
++
++  bool scalar_alloc_p = false;
++
++  auto_vec removed_stmts;
++};
++
++array_localizer::array_localizer (varpool_node *var)
++  : var (var)
++{
++}
++
++void
++array_localizer::localize ()
++{
++  if (DECL_EXTERNAL (var->decl) || var->in_other_partition
++      || !var->can_remove_if_no_refs_p ())
++    return;
++
++  var_type = TREE_TYPE (var->decl);
++
++  /* Only care about pointer variable.  */
++  if (!POINTER_TYPE_P (var_type))
++    return;
++
++  if (!check_var_store () || !check_var_load ())
++    return;
++
++  if (callee->used_from_other_partition
++      || callee->cannot_return_p ()
++      || callee->get_availability () != AVAIL_LOCAL
++      || callee->has_aliases_p ())
++    return;
++
++  if (!find_call_edge ())
++    return;
++
++  {
++    cfun_saver save (caller);
++    if (!stmt_dominated_by_p (CDI_DOMINATORS, free_stmt, alloc_stmt)
++	|| !stmt_dominated_by_p (CDI_POST_DOMINATORS, alloc_stmt, free_stmt)
++	|| !stmt_dominated_by_p (CDI_POST_DOMINATORS, alloc_stmt, call_stmt)
++	|| !stmt_dominated_by_p (CDI_DOMINATORS, free_stmt, call_stmt))
++      return;
++  }
++
++  rewrite_array ();
++}
++
++bool
++array_localizer::scalar_type_p (tree type)
++{
++  if (INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
++      || SCALAR_FLOAT_TYPE_P (type))
++    return true;
++  return false;
++}
++
++bool
++array_localizer::scalar_memop_p (tree ref_val, gimple *use_stmt)
++{
++  if (gimple_has_volatile_ops (use_stmt))
++    return false;
++
++  if (!gimple_assign_load_p (use_stmt) && !gimple_store_p (use_stmt))
++    return false;
++
++  tree type = TREE_TYPE (ref_val);
++  if (!POINTER_TYPE_P (type))
++    return false;
++
++  tree lhs = gimple_get_lhs (use_stmt);
++  tree rhs1 = gimple_assign_rhs1 (use_stmt);
++  tree memref = gimple_store_p (use_stmt) ? lhs : rhs1;
++
++  HOST_WIDE_INT offset, size;
++  bool reverse;
++  memref = get_ref_base_and_extent_hwi (memref, &offset, &size, &reverse);
++
++  if (!memref || offset || TREE_CODE (memref) != MEM_REF
++      || !operand_equal_p (TREE_OPERAND (memref, 0), ref_val)
++      || !integer_zerop (TREE_OPERAND (memref, 1))
++      || !types_compatible_p (TREE_TYPE (lhs), TREE_TYPE (type)))
++    return false;
++
++  /* Exclude address-escape case like *var = var  */
++  ssa_op_iter iter;
++  tree use = nullptr;
++  int use_count = 0;
++  FOR_EACH_SSA_TREE_OPERAND (use, use_stmt, iter, SSA_OP_USE)
++    if (operand_equal_p (use, ref_val) && use_count++)
++      return false;
++
++  return true;
++}
++
++bool
++array_localizer::stmt_dominated_by_p (enum cdi_direction dir, gimple *stmt0,
++				      gimple *stmt1)
++{
++  basic_block bb0 = gimple_bb (stmt0);
++  basic_block bb1 = gimple_bb (stmt1);
++
++  if (bb0 == bb1)
++    {
++      renumber_gimple_stmt_uids_in_blocks (&bb0, 1);
++
++      if (dir == CDI_DOMINATORS)
++	return stmt0->uid > stmt1->uid;
++      else
++	return stmt0->uid < stmt1->uid;
++    }
++
++  return dominated_by_p (dir, bb0, bb1);
++}
++
++gimple *
++array_localizer::find_calloc_stmt (gimple *stmt)
++{
++  if (!gimple_assign_single_p (stmt))
++    return nullptr;
++
++  tree rhs = gimple_assign_rhs1 (stmt);
++  if (TREE_CODE (rhs) != SSA_NAME || !has_single_use (rhs))
++    return nullptr;
++
++  gimple *def_stmt = SSA_NAME_DEF_STMT (rhs);
++  if (!gimple_call_builtin_p (def_stmt, BUILT_IN_CALLOC))
++    return nullptr;
++
++  return def_stmt;
++}
++
++gimple *
++array_localizer::find_free_stmt (tree var)
++{
++  use_operand_p use_p = nullptr;
++  gimple *use_stmt = nullptr;
++  if (TREE_CODE (var) != SSA_NAME || !single_imm_use (var, &use_p, &use_stmt))
++    return nullptr;
++
++  if (!gimple_call_builtin_p (use_stmt, BUILT_IN_FREE))
++    return nullptr;
++
++  return use_stmt;
++}
++
++bool
++array_localizer::check_var_store ()
++{
++  ipa_ref *ref = nullptr;
++  for (unsigned i = 0; var->iterate_referring (i, ref); i++)
++    {
++      cgraph_node *node = dyn_cast (ref->referring);
++      if (!node)
++	return false;
++
++      if (!ref->stmt || gimple_has_volatile_ops (ref->stmt))
++	return false;
++
++      /* Only allow calloc.  */
++      if (ref->use == IPA_REF_STORE)
++	{
++	  /* Multiple alloc is not supported yet.  */
++	  if (alloc_ref)
++	    return false;
++
++	  if (!gimple_store_p (ref->stmt)
++	      || !operand_equal_p (var->decl, gimple_get_lhs (ref->stmt)))
++	    return false;
++
++	  alloc_stmt = find_calloc_stmt (ref->stmt);
++	  if (!alloc_stmt)
++	    return false;
++
++	  tree arg0 = gimple_call_arg (alloc_stmt, 0);
++	  tree arg1 = gimple_call_arg (alloc_stmt, 1);
++	  if (TREE_CODE (arg0) != INTEGER_CST
++	      || TREE_CODE (arg1) != INTEGER_CST)
++	    return false;
++
++	  tree elem_size = TYPE_SIZE_UNIT (TREE_TYPE (var_type));
++	  if (scalar_type_p (TREE_TYPE (var_type))
++	      && integer_onep (arg0)
++	      && tree_int_cst_equal (arg1, elem_size))
++	    scalar_alloc_p = true;
++
++	  alloc_ref = ref;
++	  caller = node;
++	}
++    }
++
++  return alloc_ref != nullptr;
++}
++
++bool
++array_localizer::check_var_load ()
++{
++  ipa_ref *ref = nullptr;
++  for (unsigned i = 0; var->iterate_referring (i, ref); i++)
++    {
++      if (ref->use == IPA_REF_STORE)
++	continue;
++
++      if (ref->use != IPA_REF_LOAD)
++	return false;
++
++      if (!gimple_assign_load_p (ref->stmt)
++	  || !operand_equal_p (var->decl, gimple_assign_rhs1 (ref->stmt)))
++	return false;
++
++      tree lhs = gimple_assign_lhs (ref->stmt);
++      if (TREE_CODE (lhs) != SSA_NAME)
++	return false;
++
++      if (!free_ref)
++	{
++	  gimple *stmt = find_free_stmt (lhs);
++	  if (stmt)
++	    {
++	      if (!operand_equal_p (gimple_call_arg (stmt, 0), lhs)
++		  || ref->referring != caller)
++		return false;
++
++	      free_ref = ref;
++	      free_stmt = stmt;
++	      continue;
++	    }
++	}
++
++      gimple *use_stmt = nullptr;
++      imm_use_iterator iter;
++      FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
++	{
++	  if (is_gimple_debug (use_stmt))
++	    continue;
++
++	  if (!is_gimple_assign (use_stmt))
++	    return false;
++
++	  if (scalar_alloc_p
++	      && !scalar_memop_p (lhs, use_stmt))
++	    scalar_alloc_p = false;
++
++	  /* All other reference must be in the same callee.  */
++	  cgraph_node *node = dyn_cast (ref->referring);
++	  if (!node || (callee && callee != node))
++	    return false;
++
++	  callee = node;
++	}
++    }
++
++  return callee && callee != caller;
++}
++
++/* Now we only allow function that is called only once by other
++   function (non-recursive call).  */
++
++bool
++array_localizer::find_call_edge ()
++{
++  cgraph_edge *e = callee->callers;
++  if (!e || e->next_caller || e->caller != caller)
++    return false;
++
++  call_edge = e;
++  call_stmt = e->call_stmt;
++  return true;
++}
++
++void
++array_localizer::remove_referring_stmt (gimple *stmt)
++{
++  gimple_stmt_iterator gsi;
++
++  if (dump_file)
++    {
++      fprintf (dump_file, "Remove statement:\n");
++      print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS);
++      fprintf (dump_file, "\n");
++    }
++
++  gsi = gsi_for_stmt (stmt);
++  unlink_stmt_vdef (stmt);
++  gsi_remove (&gsi, true);
++  release_defs (stmt);
++}
++
++void
++array_localizer::replace_store_with_ssa (gimple *stmt, tree var_ssa)
++{
++  if (dump_file)
++    {
++      fprintf (dump_file, "Update store statement:\n");
++      print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS);
++    }
++
++  create_new_def_for (var_ssa, stmt, NULL);
++  update_stmt (stmt);
++
++  if (dump_file)
++    {
++      fprintf (dump_file, "->\n");
++      print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS);
++      fprintf (dump_file, "\n");
++    }
++}
++
++void
++array_localizer::replace_load_with_ssa (gimple *stmt, tree var_ssa)
++{
++  if (dump_file)
++    {
++      fprintf (dump_file, "Update load statement:\n");
++      print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS);
++    }
++
++  gimple_assign_set_rhs1 (stmt, var_ssa);
++  update_stmt (stmt);
++
++  if (dump_file)
++    {
++      fprintf (dump_file, "->\n");
++      print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS);
++      fprintf (dump_file, "\n");
++    }
++}
++
++gimple *
++array_localizer::copy_call_without_location (gimple *stmt)
++{
++  tree callee = unshare_expr_without_location (gimple_call_fndecl (stmt));
++  auto_vec args;
++
++  for (unsigned i = 0; i < gimple_call_num_args (stmt); i++)
++    {
++      tree arg = gimple_call_arg (stmt, i);
++      args.safe_push (unshare_expr_without_location (arg));
++    }
++
++  return gimple_build_call_vec (callee, args);
++}
++
++void
++array_localizer::rewrite_array ()
++{
++  if (dump_file)
++    {
++      fprintf (dump_file, "Localize global array: ");
++      print_generic_expr (dump_file, var->decl);
++      fprintf (dump_file, "\n\n");
++    }
++
++  cfun_saver save (callee);
++
++  tree type = TREE_TYPE (scalar_alloc_p ? var_type : var->decl);
++  const char *name = get_name (var->decl);
++  tree var_ssa = make_temp_ssa_name (type, NULL, name ? name : "");
++
++  if (scalar_alloc_p)
++    insert_new_init (var_ssa);
++  else
++    insert_new_alloc_free (var_ssa);
++
++  rewrite_access_in_callee (var_ssa);
++  remove_orig_alloc_free ();
++
++  for (auto stmt : removed_stmts)
++    caller->remove_stmt_references (stmt);
++}
++
++void
++array_localizer::insert_new_init (tree var_ssa)
++{
++  tree init_value = build_zero_cst (TREE_TYPE (var_ssa));
++  gimple *init = gimple_build_assign (var_ssa, init_value);
++
++  basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
++  gimple_stmt_iterator gsi = gsi_start_nondebug_after_labels_bb (entry_bb);
++  gsi_insert_before (&gsi, init, GSI_SAME_STMT);
++}
++
++void
++array_localizer::insert_new_alloc_free (tree var_ssa)
++{
++  gimple *new_alloc = copy_call_without_location (alloc_stmt);
++  gimple *new_free = copy_call_without_location (free_stmt);
++  basic_block entry_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
++  gimple_stmt_iterator gsi = gsi_start_nondebug_after_labels_bb (entry_bb);
++
++  gimple_set_lhs (new_alloc, var_ssa);
++  gsi_insert_before (&gsi, new_alloc, GSI_SAME_STMT);
++
++  if (dump_file)
++    {
++      fprintf (dump_file, "Insert calloc statement:\n");
++      print_gimple_stmt (dump_file, new_alloc, 0, TDF_VOPS);
++      fprintf (dump_file, "\n");
++    }
++
++  bool free_used = false;
++  for (auto e : EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
++    {
++      gimple *stmt = last_stmt (e->src);
++      if (gimple_code (stmt) != GIMPLE_RETURN)
++	continue;
++
++      if (free_used)
++	new_free = gimple_copy (new_free);
++      else
++	free_used = true;
++
++      auto gsi = gsi_for_stmt (stmt);
++      gimple_call_set_arg (new_free, 0, var_ssa);
++      gsi_insert_before (&gsi, new_free, GSI_SAME_STMT);
++
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Insert free statement:\n");
++	  print_gimple_stmt (dump_file, new_free, 0, TDF_VOPS);
++	  fprintf (dump_file, "\n");
++	}
++    }
++}
++
++void
++array_localizer::rewrite_access_in_callee (tree var_ssa)
++{
++  ipa_ref *ref = nullptr;
++  for (unsigned i = 0; var->iterate_referring (i, ref); i++)
++    {
++      if (ref == alloc_ref || ref == free_ref)
++	continue;
++
++      gcc_assert (ref->referring == callee && ref->use == IPA_REF_LOAD);
++
++      if (scalar_alloc_p)
++	{
++	  tree lhs = gimple_assign_lhs (ref->stmt);
++	  gimple *use_stmt = nullptr;
++	  imm_use_iterator iter;
++	  FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
++	    {
++	      if (is_gimple_debug (use_stmt))
++		remove_referring_stmt (use_stmt);
++	      else if (gimple_store_p (use_stmt))
++		replace_store_with_ssa (use_stmt, var_ssa);
++	      else if (gimple_assign_load_p (use_stmt))
++		replace_load_with_ssa (use_stmt, var_ssa);
++	      else
++		gcc_unreachable ();
++	    }
++	  remove_referring_stmt (ref->stmt);
++	}
++      else
++	replace_load_with_ssa (ref->stmt, var_ssa);
++
++      removed_stmts.safe_push (ref->stmt);
++    }
++
++  update_ssa (TODO_update_ssa);
++}
++
++void
++array_localizer::remove_orig_alloc_free ()
++{
++  cfun_saver save (caller);
++
++  /* Remove calloc() and free().  */
++  remove_referring_stmt (alloc_stmt);
++  remove_referring_stmt (alloc_ref->stmt);
++  remove_referring_stmt (free_stmt);
++  remove_referring_stmt (free_ref->stmt);
++  removed_stmts.safe_push (alloc_ref->stmt);
++  removed_stmts.safe_push (free_ref->stmt);
++
++  update_ssa (TODO_update_ssa);
++}
++
++/* Execute the driver for IPA variable localization.  */
++
++static unsigned int
++ipa_localize_array (void)
++{
++  cgraph_node *node = nullptr;
++  FOR_EACH_FUNCTION (node)
++    {
++      if (!node->real_symbol_p () || !node->definition
++	  || !node->has_gimple_body_p () || node->inlined_to)
++	continue;
++      node->get_body ();
++    }
++
++  varpool_node *var = nullptr;
++  FOR_EACH_VARIABLE (var)
++    array_localizer (var).localize ();
++
++  return 0;
++}
++
++namespace {
++
++const pass_data pass_data_ipa_localize_array =
++{
++  SIMPLE_IPA_PASS, /* type */
++  "localize-array",  /* name */
++  OPTGROUP_NONE,   /* optinfo_flags */
++  TV_IPA_LOCALIZE_ARRAY, /* tv_id */
++  0, /* properties_required */
++  0, /* properties_provided */
++  0, /* properties_destroyed */
++  0, /* todo_flags_start */
++  0, /* todo_flags_finish */
++};
++
++class pass_ipa_localize_array : public simple_ipa_opt_pass
++{
++public:
++  pass_ipa_localize_array (gcc::context *ctxt)
++    : simple_ipa_opt_pass (pass_data_ipa_localize_array, ctxt)
++  {}
++
++  /* opt_pass methods: */
++  virtual bool gate (function *)
++    {
++      return optimize >= 3 && flag_ipa_localize_array;
++    }
++
++  virtual unsigned int execute (function *) { return ipa_localize_array (); }
++
++}; // class pass_ipa_localize_array
++
++} // anon namespace
++
++simple_ipa_opt_pass *
++make_pass_ipa_localize_array (gcc::context *ctxt)
++{
++  return new pass_ipa_localize_array (ctxt);
++}
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index 18b41eb1b..851bda65c 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -4356,6 +4356,14 @@ ipa_struct_reorg::find_vars (gimple *stmt)
+ 	  find_var (gimple_assign_rhs1 (stmt), stmt);
+ 	  find_var (gimple_assign_rhs2 (stmt), stmt);
+ 	}
++      else if (gimple_assign_rhs_code (stmt) == EQ_EXPR
++	       && types_compatible_p (
++		  TYPE_MAIN_VARIANT (TREE_TYPE (gimple_assign_rhs1 (stmt))),
++		  TYPE_MAIN_VARIANT (TREE_TYPE (gimple_assign_rhs2 (stmt)))))
++	{
++	  find_var (gimple_assign_rhs1 (stmt), stmt);
++	  find_var (gimple_assign_rhs2 (stmt), stmt);
++	}
+       else
+ 	{
+ 	  /* Because we won't handle these stmts in rewrite phase,
+@@ -8543,6 +8551,33 @@ ipa_struct_reorg::rewrite_assign (gassign *stmt, gimple_stmt_iterator *gsi)
+       return remove;
+     }
+ 
++  if (gimple_assign_cast_p (stmt))
++    {
++      tree rhs = gimple_assign_rhs1 (stmt);
++      tree newrhs[max_split];
++      if (!rewrite_expr (rhs, newrhs))
++	return false;
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "\nrewriting cast statement:\n");
++	  print_gimple_stmt (dump_file, stmt, 0);
++	}
++
++      tree lhs = gimple_assign_lhs (stmt);
++      tree conv_rhs = fold_convert (TREE_TYPE (lhs), newrhs[0]);
++      gimple *newstmt = gimple_build_assign (lhs, conv_rhs);
++      gsi_insert_before (gsi, newstmt, GSI_SAME_STMT);
++
++      if (dump_file && (dump_flags & TDF_DETAILS))
++	{
++	  fprintf (dump_file, "replaced with:\n");
++	  print_gimple_stmt (dump_file, newstmt, 0);
++	  fprintf (dump_file, "\n");
++	}
++      return true;
++    }
++
+   return remove;
+ }
+ 
+@@ -11203,7 +11238,7 @@ ipa_struct_reorg::find_fc_paths (fc_type_info *info)
+     {
+       /* Already seen.  */
+       if (srfn->fc_path.start_stmt)
+-	return false;
++	return srfn->fc_path.start_stmt == start_stmt;
+ 
+       SET_CFUN (srfn);
+ 
+diff --git a/gcc/ipa-utils.cc b/gcc/ipa-utils.cc
+index 67dd42f4f..5d9981c04 100644
+--- a/gcc/ipa-utils.cc
++++ b/gcc/ipa-utils.cc
+@@ -35,6 +35,30 @@ along with GCC; see the file COPYING3.  If not see
+ #include "tree-vrp.h"
+ #include "ipa-prop.h"
+ #include "ipa-fnsummary.h"
++#include "cfgloop.h"
++
++cfun_saver::cfun_saver (cgraph_node *node)
++{
++  push_cfun (DECL_STRUCT_FUNCTION (node->decl));
++  calculate_dominance_info (CDI_DOMINATORS);
++  calculate_dominance_info (CDI_POST_DOMINATORS);
++}
++
++cfun_saver::cfun_saver (cgraph_node *node, unsigned loop_flags)
++  : cfun_saver (node)
++{
++  loop_optimizer_init (loop_flags);
++  need_finalize_loop_optimizer = true;
++}
++
++cfun_saver::~cfun_saver ()
++{
++  if (need_finalize_loop_optimizer)
++    loop_optimizer_finalize ();
++  free_dominance_info (CDI_POST_DOMINATORS);
++  free_dominance_info (CDI_DOMINATORS);
++  pop_cfun ();
++}
+ 
+ /* Debugging function for postorder and inorder code. NOTE is a string
+    that is printed before the nodes are printed.  ORDER is an array of
+@@ -781,3 +805,23 @@ recursive_call_p (tree func, tree dest)
+       return false;
+   return true;
+ }
++
++/* Return true if NODE has only one non-recursive caller and no non-recursive
++   callee.  */
++bool
++leaf_recursive_node_p (cgraph_node *node)
++{
++  if (node->inlined_to || !node->has_gimple_body_p () || node->indirect_calls)
++    return false;
++
++  for (cgraph_edge *e = node->callees; e; e = e->next_callee)
++    if (node != e->callee)
++      return false;
++
++  unsigned non_recursive_caller_count = 0;
++  for (cgraph_edge *e = node->callers; e; e = e->next_caller)
++    if (node != e->caller)
++      non_recursive_caller_count++;
++
++  return non_recursive_caller_count == 1;
++}
+diff --git a/gcc/ipa-utils.h b/gcc/ipa-utils.h
+index dc6ba0d52..15c63e905 100644
+--- a/gcc/ipa-utils.h
++++ b/gcc/ipa-utils.h
+@@ -21,6 +21,9 @@ along with GCC; see the file COPYING3.  If not see
+ #ifndef GCC_IPA_UTILS_H
+ #define GCC_IPA_UTILS_H
+ 
++#include "cgraph.h"
++#include "function.h"
++
+ struct ipa_dfs_info {
+   int dfn_number;
+   int low_link;
+@@ -33,6 +36,17 @@ struct ipa_dfs_info {
+   PTR aux;
+ };
+ 
++/* Use RAII to help save cfun.  */
++class cfun_saver
++{
++public:
++  cfun_saver (cgraph_node *node);
++  cfun_saver (cgraph_node *node, unsigned loop_flags);
++  ~cfun_saver ();
++
++private:
++  bool need_finalize_loop_optimizer = false;
++};
+ 
+ /* In ipa-utils.cc  */
+ void ipa_print_order (FILE*, const char *, struct cgraph_node**, int);
+@@ -46,6 +60,7 @@ tree get_base_var (tree);
+ void ipa_merge_profiles (struct cgraph_node *dst,
+ 			 struct cgraph_node *src, bool preserve_body = false);
+ bool recursive_call_p (tree, tree);
++bool leaf_recursive_node_p (cgraph_node *node);
+ 
+ /* In ipa-pure-const.cc  */
+ bool finite_function_p ();
+diff --git a/gcc/passes.def b/gcc/passes.def
+index 08213f2bc..431f9c7c9 100644
+--- a/gcc/passes.def
++++ b/gcc/passes.def
+@@ -184,6 +184,20 @@ along with GCC; see the file COPYING3.  If not see
+      passes are executed after partitioning and thus see just parts of the
+      compiled unit.  */
+   INSERT_PASSES_AFTER (all_late_ipa_passes)
++  NEXT_PASS (pass_ipa_alignment_propagation);
++  PUSH_INSERT_PASSES_WITHIN (pass_ipa_alignment_propagation)
++      NEXT_PASS (pass_ccp, true /* nonzero_p */);
++      NEXT_PASS (pass_early_vrp);
++      NEXT_PASS (pass_cd_dce);
++      NEXT_PASS (pass_forwprop);
++      NEXT_PASS (pass_rebuild_cgraph_edges);
++  POP_INSERT_PASSES ()
++  NEXT_PASS (pass_ipa_localize_array);
++  PUSH_INSERT_PASSES_WITHIN (pass_ipa_localize_array)
++      NEXT_PASS (pass_forwprop);
++      NEXT_PASS (pass_rebuild_cgraph_edges);
++  POP_INSERT_PASSES ()
++  NEXT_PASS (pass_ipa_array_dse);
+   NEXT_PASS (pass_ipa_hardware_detection);
+   NEXT_PASS (pass_ipa_pta);
+   /* FIXME: this should be a normal IP pass.  */
+diff --git a/gcc/timevar.def b/gcc/timevar.def
+index 14129a500..4d3e54f75 100644
+--- a/gcc/timevar.def
++++ b/gcc/timevar.def
+@@ -81,6 +81,9 @@ DEFTIMEVAR (TV_IPA_CONSTANT_PROP     , "ipa cp")
+ DEFTIMEVAR (TV_IPA_INLINING          , "ipa inlining heuristics")
+ DEFTIMEVAR (TV_IPA_FNSPLIT           , "ipa function splitting")
+ DEFTIMEVAR (TV_IPA_COMDATS	     , "ipa comdats")
++DEFTIMEVAR (TV_IPA_ALIGNMENT_PROPAGATION, "ipa alignment propagation")
++DEFTIMEVAR (TV_IPA_LOCALIZE_ARRAY    , "ipa localize array")
++DEFTIMEVAR (TV_IPA_ARRAY_DSE	     , "ipa array dse")
+ DEFTIMEVAR (TV_IPA_HARDWARE_DETECTION, "ipa detection")
+ DEFTIMEVAR (TV_IPA_PREFETCH	     , "ipa prefetch")
+ DEFTIMEVAR (TV_IPA_STRUCT_REORG      , "ipa struct reorg optimization")
+diff --git a/gcc/tree-pass.h b/gcc/tree-pass.h
+index e01d86fb1..e89220303 100644
+--- a/gcc/tree-pass.h
++++ b/gcc/tree-pass.h
+@@ -534,6 +534,9 @@ extern ipa_opt_pass_d *make_pass_ipa_icp (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_odr (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_reference (gcc::context *ctxt);
+ extern ipa_opt_pass_d *make_pass_ipa_pure_const (gcc::context *ctxt);
++extern simple_ipa_opt_pass *make_pass_ipa_alignment_propagation (gcc::context *ctxt);
++extern simple_ipa_opt_pass *make_pass_ipa_localize_array (gcc::context *ctxt);
++extern simple_ipa_opt_pass *make_pass_ipa_array_dse (gcc::context *ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_hardware_detection (gcc::context *
+ 							      ctxt);
+ extern simple_ipa_opt_pass *make_pass_ipa_prefetch (gcc::context *ctxt);
+-- 
+2.33.0
+
diff --git a/0379-SVE-Add-container-restriction-for-std-find-with-sve.patch b/0379-SVE-Add-container-restriction-for-std-find-with-sve.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4ce8d966917206b47cc61a285c0d1a812b8b5367
--- /dev/null
+++ b/0379-SVE-Add-container-restriction-for-std-find-with-sve.patch
@@ -0,0 +1,508 @@
+From 6066b1a502c523625695a84bd65995eb6fd3c106 Mon Sep 17 00:00:00 2001
+From: blunce 
+Date: Mon, 19 May 2025 15:31:46 +0800
+Subject: [PATCH] [SVE] Add container restriction for std find with sve
+
+---
+ gcc/gimple-ssa-expand-sve.cc                  | 391 ++++++++++--------
+ gcc/testsuite/g++.dg/tree-ssa/find-with-sve.C |  38 ++
+ 2 files changed, 246 insertions(+), 183 deletions(-)
+
+diff --git a/gcc/gimple-ssa-expand-sve.cc b/gcc/gimple-ssa-expand-sve.cc
+index 9bac95212..e8c9e9abf 100644
+--- a/gcc/gimple-ssa-expand-sve.cc
++++ b/gcc/gimple-ssa-expand-sve.cc
+@@ -38,219 +38,244 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ namespace {
+ 
+-#define TRACE_FUNCTION(fun)					\
+-	if (dump_file)						\
+-	{							\
+-		fprintf (dump_file, "\nprocess function: \n");	\
+-		dump_function_to_file (fun, dump_file, TDF_NONE);\
+-		fprintf (dump_file, "\n");			\
+-	}
+-
+-#define TRACE_STMT(stmt)					\
+-	if (dump_file)						\
+-	{							\
+-		fprintf (dump_file, "\nprocess stmt: \n");	\
+-		print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);\
+-		fprintf (dump_file, "\n");			\
+-	}
+-
+-#define TRACE_REPLACE_STMT(stmt)				\
+-	if (dump_file)						\
+-	{							\
+-		fprintf (dump_file, "\nprocess replace stmt: \n");\
+-		print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);\
+-		fprintf (dump_file, "\n");			\
+-	}
+-
+-#define TRACE_ARG3_TYPE(type)					\
+-	if (dump_file)						\
+-	{							\
+-		fprintf (dump_file, "\nprocess arg3 type: \n");	\
+-		dump_node (type, TDF_NONE, dump_file);		\
+-		fprintf (dump_file, "\n");			\
+-	}
++#define TRACE_FUNCTION(fun)\
++  if (dump_file)\
++  {\
++    fprintf (dump_file, "\nprocess function: \n");\
++    dump_function_to_file (fun, dump_file, TDF_NONE);\
++    fprintf (dump_file, "\n");\
++  }
++
++#define TRACE_STMT(stmt)\
++  if (dump_file)\
++  {\
++    fprintf (dump_file, "\nprocess stmt: \n");\
++    print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);\
++    fprintf (dump_file, "\n");\
++  }
++
++#define TRACE_REPLACE_STMT(stmt)\
++  if (dump_file)\
++  {\
++    fprintf (dump_file, "\nprocess replace stmt: \n");\
++    print_gimple_stmt (dump_file, stmt, 0, TDF_NONE);\
++    fprintf (dump_file, "\n");\
++  }
++
++#define TRACE_ARG3_TYPE(type)\
++  if (dump_file)\
++  {\
++    fprintf (dump_file, "\nprocess arg3 type: \n");\
++    dump_node (type, TDF_NONE, dump_file);\
++    fprintf (dump_file, "\n");\
++  }
+ 
+ const pass_data pass_data_find_with_sve = {
+-	GIMPLE_PASS, /* type  */
+-	"find_with_sve", /* name */
+-	OPTGROUP_NONE, /* optinfo_flags */
+-	TV_NONE, /* tv_id */
+-	0, /* properties_required */
+-	0, /* properties_provided */
+-	0, /* properties_destroyed */
+-	0, /* todo_flags_start */
+-	TODO_cleanup_cfg | TODO_update_ssa | TODO_update_address_taken
+-	| TODO_rebuild_cgraph_edges, /* todo_flags_finish */
++  GIMPLE_PASS, /* type.  */
++  "find_with_sve", /* name.  */
++  OPTGROUP_NONE, /* optinfo_flags.  */
++  TV_NONE, /* tv_id.  */
++  0, /* properties_required.  */
++  0, /* properties_provided.  */
++  0, /* properties_destroyed.  */
++  0, /* todo_flags_start.  */
++  TODO_cleanup_cfg | TODO_update_ssa | TODO_update_address_taken
++  | TODO_rebuild_cgraph_edges, /* todo_flags_finish.  */
+ };
+ 
+ class pass_find_with_sve : public gimple_opt_pass {
+ public:
+-    pass_find_with_sve (gcc::context *ctx) :
+-    	gimple_opt_pass (pass_data_find_with_sve, ctx)
+-    {}
++  pass_find_with_sve (gcc::context *ctx) :
++    gimple_opt_pass (pass_data_find_with_sve, ctx)
++  {}
+ 
+-    virtual bool gate (function *fun) override
+-    {
+-	if (!flag_find_with_sve)
+-	    return false;
++  virtual bool gate (function *fun) override
++  {
++    if (!flag_find_with_sve)
++      return false;
+ 
+-	if (!targetm.vector_mode_supported_p (V2DImode))
+-	    return false;
++    if (!targetm.vector_mode_supported_p (V2DImode))
++      return false;
+ 
+-	return true;
+-    }
++    return true;
++  }
+ 
+-    virtual unsigned int execute (function *fun) override
++virtual unsigned int execute (function *fun) override
++{
++  TRACE_FUNCTION (fun->decl);
++  basic_block bb;
++  FOR_EACH_BB_FN (bb, fun)
++  {
++    for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
++      !gsi_end_p (gsi); gsi_next (&gsi))
+     {
+-	TRACE_FUNCTION (fun->decl);
+-	basic_block bb;
+-	FOR_EACH_BB_FN (bb, fun)
+-	{
+-	    for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
+-	    	!gsi_end_p (gsi); gsi_next (&gsi))
+-	    {
+-		gimple *stmt = gsi_stmt (gsi);
+-		if (std_find_check (stmt))
+-		    replace_std_find (gsi);
+-	    }
+-	}
+-
+-	return 0;
++      gimple *stmt = gsi_stmt (gsi);
++      if (std_find_check (stmt))
++	replace_std_find (gsi);
+     }
++  }
+ 
+-private:
+-    uint8_t bit_width;
+-    const char *null_name = "";
++  return 0;
++}
+ 
+-    bool std_find_check (gimple *stmt)
++private:
++  uint8_t bit_width;
++  const char *null_name = "";
++
++  bool std_find_check (gimple *stmt)
++  {
++    if (!is_gimple_call (stmt))
++      return false;
++
++    tree fndecl = gimple_call_fndecl (stmt);
++    if (fndecl == nullptr || DECL_NAME (fndecl) == nullptr)
++      return false;
++
++    const char *fn_name = IDENTIFIER_POINTER (DECL_NAME (fndecl));
++    if (fn_name == nullptr || strcmp (fn_name, "find") != 0)
++      return false;
++
++    if (DECL_CONTEXT (fndecl) == nullptr
++      || TREE_CODE (DECL_CONTEXT (fndecl)) != NAMESPACE_DECL)
++      return false;
++
++    const char *namespace_name
++      = IDENTIFIER_POINTER (DECL_NAME (DECL_CONTEXT (fndecl)));
++    if (namespace_name == nullptr || strcmp (namespace_name, "std") != 0)
++      return false;
++
++    /* Exclude the scenarios : xxx::std::find.  */
++    if (DECL_CONTEXT (DECL_CONTEXT (fndecl))
++	&& TREE_CODE (DECL_CONTEXT (DECL_CONTEXT (fndecl)))
++	== NAMESPACE_DECL)
++      return false;
++
++    if (gimple_call_num_args (stmt) != 3)
++      return false;
++
++    tree arg1 = DECL_ARGUMENTS (fndecl);
++    tree arg2 = TREE_CHAIN (arg1);
++    tree arg3 = TREE_CHAIN (arg2);
++
++    tree arg3_type = TREE_TYPE (arg3);
++    if (TREE_CODE (arg3_type) != REFERENCE_TYPE)
++      return false;
++
++    tree main_type = TREE_TYPE (arg3_type);
++    TRACE_ARG3_TYPE (main_type);
++    if (TREE_CODE (main_type) == INTEGER_TYPE)
+     {
+-	if (!is_gimple_call (stmt))
+-	    return false;
+-
+-	tree fndecl = gimple_call_fndecl (stmt);
+-	if (fndecl == nullptr || DECL_NAME (fndecl) == nullptr)
+-	    return false;
+-
+-	const char *fn_name = IDENTIFIER_POINTER (DECL_NAME (fndecl));
+-	if (strcmp (fn_name, "find") != 0)
+-	    return false;
+-
+-	if (DECL_CONTEXT (fndecl) == nullptr
+-		|| TREE_CODE (DECL_CONTEXT (fndecl)) != NAMESPACE_DECL)
+-	    return false;
+-
+-	const char *namespace_name
+-		= IDENTIFIER_POINTER (DECL_NAME (DECL_CONTEXT (fndecl)));
+-	if (strcmp (namespace_name, "std") != 0)
+-	    return false;
+-
+-	if (gimple_call_num_args (stmt) != 3)
+-	    return false;
+-
+-	tree arg1 = DECL_ARGUMENTS (fndecl);
+-	tree arg2 = TREE_CHAIN (arg1);
+-	tree arg3 = TREE_CHAIN (arg2);
+-
+-	tree arg3_type = TREE_TYPE (arg3);
+-	if (TREE_CODE (arg3_type) != REFERENCE_TYPE)
+-	    return false;
+-
+-	tree main_type = TREE_TYPE (arg3_type);
+-	TRACE_ARG3_TYPE (main_type);
+-	if (TREE_CODE (main_type) == INTEGER_TYPE)
+-	{
+-	    if (TYPE_PRECISION (main_type) != 64)
+-		return false;
+-
+-	    const char *type_name = get_type_name_arg3 (main_type);
+-	    if ((strcmp (type_name, "long unsigned int") != 0)
+-	    	&& (strcmp (type_name, "long int") != 0))
+-		return false;
+-
+-	    this->bit_width = 64;
+-	} else if (TREE_CODE (main_type) == POINTER_TYPE)
+-	    this->bit_width = 64;
+-	else
+-	    return false;
+-
++      if (TYPE_PRECISION (main_type) != 64)
++	return false;
++
++      const char *type_name = get_type_name_arg (main_type);
++      if ((strcmp (type_name, "long unsigned int") != 0)
++	&& (strcmp (type_name, "long int") != 0))
++	return false;
++
++      this->bit_width = 64;
++    } else if (TREE_CODE (main_type) == POINTER_TYPE)
++      this->bit_width = 64;
++    else
++      return false;
++
++    tree arg1_type = TREE_TYPE (arg1);
++    if (TREE_CODE (arg1_type) == POINTER_TYPE)
++      return true;
++    else if (TREE_CODE (arg1_type) == RECORD_TYPE)
++    {
++      const char *type_name = get_type_name_arg (arg1_type);
++      if (strcmp (type_name, "__normal_iterator") == 0)
+ 	return true;
+     }
+ 
+-    const char *get_type_name_arg3 (tree main_type)
+-    {
+-	enum tree_code code = TREE_CODE (main_type);
+-	enum tree_code_class tclass = TREE_CODE_CLASS (code);
++    return false;
++  }
+ 
+-	if (tclass == tcc_type)
+-	{
+-	    if (TYPE_NAME (main_type))
+-	    {
+-		if (TREE_CODE (TYPE_NAME (main_type)) == IDENTIFIER_NODE)
+-		    return IDENTIFIER_POINTER (TYPE_NAME (main_type));
+-		else if (TREE_CODE (TYPE_NAME (main_type)) == TYPE_DECL
+-			 && DECL_NAME (TYPE_NAME (main_type)))
+-		    return IDENTIFIER_POINTER (
+-			DECL_NAME (TYPE_NAME (main_type)));
+-	    }
+-	}
++  const char *get_type_name_arg (tree main_type)
++  {
++    enum tree_code code = TREE_CODE (main_type);
++    enum tree_code_class tclass = TREE_CODE_CLASS (code);
+ 
+-	return null_name;
+-    }
+-
+-    void replace_std_find (gimple_stmt_iterator gsi)
++    if (tclass == tcc_type)
+     {
+-	switch (this->bit_width)
++      if (TYPE_NAME (main_type))
++      {
++	if (TREE_CODE (TYPE_NAME (main_type)) == IDENTIFIER_NODE)
++	{
++	  const char *type_name = IDENTIFIER_POINTER (
++	    TYPE_NAME (main_type));
++	  if (type_name)
++	    return type_name;
++	}
++	else if (TREE_CODE (TYPE_NAME (main_type)) == TYPE_DECL
++	    && DECL_NAME (TYPE_NAME (main_type)))
+ 	{
+-	    case 64:
+-		replace_std_find_u64 (gsi);
+-		break;
+-	    case 32:
+-	    case 16:
+-	    case 8:
+-	    default:;
++	  const char *type_name = IDENTIFIER_POINTER (
++	    DECL_NAME (TYPE_NAME (main_type)));
++	  if (type_name)
++	    return type_name;
+ 	}
++      }
+     }
+ 
+-    void replace_std_find_u64 (gimple_stmt_iterator gsi)
++    return null_name;
++  }
++
++  void replace_std_find (gimple_stmt_iterator gsi)
++  {
++    switch (this->bit_width)
+     {
+-	gimple *stmt = gsi_stmt (gsi);
+-	tree old_fndecl = gimple_call_fndecl (stmt);
+-	TRACE_STMT (stmt);
+-
+-	// arguments list process:
+-	auto_vec args;
+-	for (unsigned i = 0; i < gimple_call_num_args (stmt); ++i)
+-	    args.safe_push (gimple_call_arg (stmt, i));
+-	tree new_arg = build_int_cst (unsigned_char_type_node,
+-			sve_expand_std_find_threshold);
+-	args.safe_push (new_arg);
+-
+-	// functon declare process:
+-	tree old_type = TREE_TYPE (old_fndecl);
+-	tree ret_type = TREE_TYPE (old_type);
+-	tree arg_types = NULL_TREE;
+-	for (tree t = TYPE_ARG_TYPES (old_type); t; t = TREE_CHAIN (t))
+-	    arg_types = tree_cons (NULL_TREE, TREE_VALUE (t), arg_types);
+-	arg_types = tree_cons (NULL_TREE, unsigned_char_type_node, arg_types);
+-	arg_types = nreverse (arg_types);
+-	tree new_fndecl_type = build_function_type (ret_type, arg_types);
+-	tree new_fndecl = build_fn_decl ("__sve_optimized_find_u64",
+-		new_fndecl_type);
+-	TREE_PUBLIC (new_fndecl) = 1;
+-	DECL_EXTERNAL (new_fndecl) = 1;
+-
+-	// call function process:
+-	gcall *new_call = gimple_build_call_vec (new_fndecl, args);
+-	if (gimple_has_lhs (stmt))
+-	    gimple_call_set_lhs (new_call, gimple_call_lhs (stmt));
+-	gsi_replace (&gsi, new_call, true);
+-	update_stmt (gsi_stmt (gsi));
+-	TRACE_REPLACE_STMT (gsi_stmt (gsi));
++      case 64:
++	replace_std_find_u64 (gsi);
++	break;
++      case 32:
++      case 16:
++      case 8:
++      default:;
+     }
++  }
++
++  void replace_std_find_u64 (gimple_stmt_iterator gsi)
++  {
++    gimple *stmt = gsi_stmt (gsi);
++    tree old_fndecl = gimple_call_fndecl (stmt);
++    TRACE_STMT (stmt);
++
++    // arguments list process:
++    auto_vec args;
++    for (unsigned i = 0; i < gimple_call_num_args (stmt); ++i)
++      args.safe_push (gimple_call_arg (stmt, i));
++    tree new_arg = build_int_cst (unsigned_char_type_node,
++      sve_expand_std_find_threshold);
++    args.safe_push (new_arg);
++
++    // functon declare process:
++    tree old_type = TREE_TYPE (old_fndecl);
++    tree ret_type = TREE_TYPE (old_type);
++    tree arg_types = NULL_TREE;
++    for (tree t = TYPE_ARG_TYPES (old_type); t; t = TREE_CHAIN (t))
++      arg_types = tree_cons (NULL_TREE, TREE_VALUE (t), arg_types);
++    arg_types = tree_cons (NULL_TREE, unsigned_char_type_node, arg_types);
++    arg_types = nreverse (arg_types);
++    tree new_fndecl_type = build_function_type (ret_type, arg_types);
++    tree new_fndecl = build_fn_decl ("__sve_optimized_find_u64",
++      new_fndecl_type);
++    TREE_PUBLIC (new_fndecl) = 1;
++    DECL_EXTERNAL (new_fndecl) = 1;
++
++    // call function process:
++    gcall *new_call = gimple_build_call_vec (new_fndecl, args);
++    if (gimple_has_lhs (stmt))
++      gimple_call_set_lhs (new_call, gimple_call_lhs (stmt));
++    gsi_replace (&gsi, new_call, true);
++    update_stmt (gsi_stmt (gsi));
++    TRACE_REPLACE_STMT (gsi_stmt (gsi));
++  }
+ };
+ }  // namespace
+ 
+ gimple_opt_pass *make_pass_find_with_sve (gcc::context *ctx)
+ {
+-    return new pass_find_with_sve (ctx);
++  return new pass_find_with_sve (ctx);
+ }
+diff --git a/gcc/testsuite/g++.dg/tree-ssa/find-with-sve.C b/gcc/testsuite/g++.dg/tree-ssa/find-with-sve.C
+index 66d03e2cf..e80fc9178 100644
+--- a/gcc/testsuite/g++.dg/tree-ssa/find-with-sve.C
++++ b/gcc/testsuite/g++.dg/tree-ssa/find-with-sve.C
+@@ -7,6 +7,7 @@
+ #include 
+ #include 
+ #include 
++#include 
+ 
+ void test_u64()
+ {
+@@ -122,6 +123,41 @@ void test_u16_point()
+ 	std::cout << "fail!\n";
+ }
+ 
++void test_set()
++{
++	std::set s = {1, 3, 5, 7, 9};
++	std::uint64_t ask = 4;
++
++	if (auto it = std::find (s.begin(), s.end(), ask); it != s.end()) // not matched
++		std::cout << "ok!\n";
++	else
++		std::cout << "fail!\n";
++}
++
++namespace myspace
++{
++    namespace std
++    {
++        struct Basic
++        {
++            ::std::uint64_t id;
++        };
++
++        ::std::uint64_t find(Basic *, Basic *, ::std::uint64_t &x)
++        {
++            return x;
++        }
++    }
++}
++
++void test_namespace()
++{
++    myspace::std::Basic b {1};
++    std::uint64_t y = 1;
++    std::uint64_t x = find(nullptr, &b, y);
++    printf("x = %d\n", x);
++}
++
+ int main()
+ {
+     test_u64();
+@@ -131,6 +167,8 @@ int main()
+     test_s32();
+     test_u16();
+     test_u16_point();
++    test_set();
++    test_namespace();
+     return 0;
+ }
+ 
+-- 
+2.33.0
+
diff --git a/0380-Fix-tune-params-error-in-hip09-and-hip11.patch b/0380-Fix-tune-params-error-in-hip09-and-hip11.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b708b3e987b14189a8a77f26febdaa85ca0a8324
--- /dev/null
+++ b/0380-Fix-tune-params-error-in-hip09-and-hip11.patch
@@ -0,0 +1,42 @@
+From d4f79bc035e1176fe8a37c8d324ab35384dfabc5 Mon Sep 17 00:00:00 2001
+From: huzife <634763349@qq.com>
+Date: Wed, 21 May 2025 18:06:09 +0800
+Subject: [PATCH] Fix tune params error in hip09 and hip11
+
+---
+ gcc/config/aarch64/aarch64.cc | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index f33e4611a..7bb343db7 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -2179,10 +2179,10 @@ static const struct tune_params hip09_tunings =
+     4, /* load_pred.  */
+     4 /* store_pred.  */
+   }, /* memmov_cost.  */
+-  4,    /* issue_rate  */
++  2,    /* issue_rate  */
+   (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_ALU_BRANCH
+    | AARCH64_FUSE_ALU_CBZ), /* fusible_ops  */
+-  "16", /* function_align.  */
++  "16:12", /* function_align.  */
+   "4",  /* jump_align.  */
+   "8",  /* loop_align.  */
+   2,    /* int_reassoc_width.  */
+@@ -2279,10 +2279,10 @@ static const struct tune_params hip11_tunings =
+     4, /* load_pred.  */
+     4 /* store_pred.  */
+   }, /* memmov_cost.  */
+-  2,    /* issue_rate  */
++  4,    /* issue_rate  */
+   (AARCH64_FUSE_AES_AESMC | AARCH64_FUSE_ALU_BRANCH
+    | AARCH64_FUSE_ALU_CBZ), /* fusible_ops  */
+-  "16:12", /* function_align.  */
++  "16", /* function_align.  */
+   "4",  /* jump_align.  */
+   "8",  /* loop_align.  */
+   2,    /* int_reassoc_width.  */
+-- 
+2.33.0
+
diff --git a/0381-dfc-Fix-error-in-function-wholeaccess.patch b/0381-dfc-Fix-error-in-function-wholeaccess.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9adfa569e26d2d7c69c0185e8bb32a0f16fd0830
--- /dev/null
+++ b/0381-dfc-Fix-error-in-function-wholeaccess.patch
@@ -0,0 +1,87 @@
+From 6de9d3011cda914ab1fecd73f1d517cc3b82f0cb Mon Sep 17 00:00:00 2001
+From: huzife <634763349@qq.com>
+Date: Fri, 23 May 2025 14:48:13 +0800
+Subject: [PATCH] [dfc] Fix error in function wholeaccess
+
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      | 32 ++++++-------------
+ gcc/testsuite/gcc.dg/struct/dfc_wholeaccess.c | 18 +++++++++++
+ 2 files changed, 27 insertions(+), 23 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/dfc_wholeaccess.c
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index 851bda65c..e8a84abbd 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -5014,35 +5014,21 @@ ipa_struct_reorg::wholeaccess (tree expr, tree base,
+   if (TREE_CODE (expr) == ADDR_EXPR && TREE_OPERAND (expr, 0) == base)
+     return true;
+ 
+-  if (!accesstype)
++  if (!accesstype || !handled_type (TREE_TYPE (expr)) || !t || !t->type)
+     return false;
+ 
+-  if (!types_compatible_p (TREE_TYPE (expr), TREE_TYPE (accesstype)))
+-    return false;
+-
+-  if (!handled_type (TREE_TYPE (expr)))
+-    return false;
+-
+-  if (!t || !t->type)
+-    return false;
+-
+-  tree type = TYPE_MAIN_VARIANT (t->type);
++  /* T *_1; _2 = MEM[(T *)_1].  */
+   if (TREE_CODE (expr) == MEM_REF
+-      && POINTER_TYPE_P (TREE_TYPE (expr))
+-      && POINTER_TYPE_P (accesstype)
+-      && POINTER_TYPE_P (TREE_TYPE (accesstype))
++      && integer_zerop (TREE_OPERAND (expr, 1))
+       && POINTER_TYPE_P (TREE_TYPE (base))
+-      && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (base))) == type
+-      && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (expr))) == type
+-      && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (accesstype))) == type)
+-    return false;
+-
+-  srtype *other_type = find_type (inner_type (TREE_TYPE (expr)));
++      && types_compatible_p (TREE_TYPE (TREE_TYPE (base)), t->type))
++    return POINTER_TYPE_P (accesstype)
++	   && types_compatible_p (TREE_TYPE (accesstype), t->type);
+ 
+-  if (t == other_type)
+-    return true;
++  if (!types_compatible_p (TREE_TYPE (expr), TREE_TYPE (accesstype)))
++    return false;
+ 
+-  return false;
++  return t == find_type (inner_type (TREE_TYPE (expr)));
+ }
+ 
+ bool
+diff --git a/gcc/testsuite/gcc.dg/struct/dfc_wholeaccess.c b/gcc/testsuite/gcc.dg/struct/dfc_wholeaccess.c
+new file mode 100644
+index 000000000..60d73f0f0
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/dfc_wholeaccess.c
+@@ -0,0 +1,18 @@
++/* { dg-do compile } */
++
++struct Type_A;
++
++// Optimized type.
++struct Type_B {
++    int b;
++};
++
++__attribute__((used)) static void test() {
++    struct Type_A* a;
++    struct Type_B* b;
++
++    // MEM[(Type_B*)a] = *b;
++    *((struct Type_B*)a) = *b;
++}
++
++// This testsuite should be compiled successfully without ICE.
+-- 
+2.43.0
+
diff --git a/0382-CFGO-Add-more-opts-to-cfgo.patch b/0382-CFGO-Add-more-opts-to-cfgo.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b94cc1cd839e77769d8d694ac333f8d359d76723
--- /dev/null
+++ b/0382-CFGO-Add-more-opts-to-cfgo.patch
@@ -0,0 +1,62 @@
+From 9f8d7de855feb149e2ea58b72167b22908afacdc Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Thu, 22 May 2025 11:15:39 +0800
+Subject: [PATCH] [CFGO] Add more opts to cfgo
+
+Add more opts for better performance
+---
+ gcc/opts.cc | 36 ++++++++++++++++++++++++++++++------
+ 1 file changed, 30 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/opts.cc b/gcc/opts.cc
+index b69c43724..82efdc802 100644
+--- a/gcc/opts.cc
++++ b/gcc/opts.cc
+@@ -2126,14 +2126,38 @@ enable_cfgo_optimizations (struct gcc_options *opts,
+   SET_OPTION_IF_UNSET (opts, opts_set, flag_selective_scheduling, value);
+   SET_OPTION_IF_UNSET (opts, opts_set, flag_rename_registers, value);
+   SET_OPTION_IF_UNSET (opts, opts_set, flag_profile_partial_training, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_ipa_alignment_propagation, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_ipa_localize_array, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_ipa_array_dse, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_gnu89_inline, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_convert_minmax, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_tree_slp_transpose_vectorize,
++		       value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_ipa_prefetch, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_ipa_ic, value);
++
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_find_with_sve, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_finite_loops, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_omit_frame_pointer, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_sized_deallocation, 0);
++
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_loop_elim, value);
++  SET_OPTION_IF_UNSET (opts, opts_set, flag_if_conversion_gimple, value);
+ 
+-  SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_insns_auto, 185);
+-  SET_OPTION_IF_UNSET (opts, opts_set, param_inline_unit_growth, 66);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_insns_auto, 331);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_inline_unit_growth, 60);
+   SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_recursive_depth_auto,
+-		       31);
+-  SET_OPTION_IF_UNSET (opts, opts_set, param_large_function_insns, 7286);
+-  SET_OPTION_IF_UNSET (opts, opts_set, param_large_function_growth, 89);
+-  SET_OPTION_IF_UNSET (opts, opts_set, param_large_unit_insns, 11783);
++		       7);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_insns_recursive, 3227);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_insns_recursive_auto,
++		       2571);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_early_inlining_insns, 256);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_early_inliner_max_iterations, 1);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_max_inline_insns_single, 2742);
++
++  SET_OPTION_IF_UNSET (opts, opts_set, param_large_function_insns, 9055);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_large_function_growth, 701);
++  SET_OPTION_IF_UNSET (opts, opts_set, param_large_unit_insns, 94216);
+   SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_eval_threshold, 864);
+   SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_loop_hint_bonus, 440);
+   SET_OPTION_IF_UNSET (opts, opts_set, param_ipa_cp_max_recursive_depth, 29);
+-- 
+2.34.1
+
diff --git a/0383-SVE-Add-aarch64-constraint.patch b/0383-SVE-Add-aarch64-constraint.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7657832d8b5fac73c7e1687efe7519dc5e575b6a
--- /dev/null
+++ b/0383-SVE-Add-aarch64-constraint.patch
@@ -0,0 +1,61 @@
+From ffd6171babb4378383d19163e6828e67a4934f20 Mon Sep 17 00:00:00 2001
+From: blunce 
+Date: Thu, 29 May 2025 12:58:09 +0800
+Subject: [PATCH] [SVE] Add aarch64 constraint.
+
+---
+ gcc/gimple-ssa-expand-sve.cc | 9 ++++++++-
+ 1 file changed, 8 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/gimple-ssa-expand-sve.cc b/gcc/gimple-ssa-expand-sve.cc
+index e8c9e9abf..bd7f88cfa 100644
+--- a/gcc/gimple-ssa-expand-sve.cc
++++ b/gcc/gimple-ssa-expand-sve.cc
+@@ -91,6 +91,7 @@ public:
+ 
+   virtual bool gate (function *fun) override
+   {
++#ifdef __aarch64__
+     if (!flag_find_with_sve)
+       return false;
+ 
+@@ -98,10 +99,14 @@ public:
+       return false;
+ 
+     return true;
++#else
++    return false;
++#endif
+   }
+ 
+ virtual unsigned int execute (function *fun) override
+ {
++#ifdef __aarch64__
+   TRACE_FUNCTION (fun->decl);
+   basic_block bb;
+   FOR_EACH_BB_FN (bb, fun)
+@@ -114,11 +119,12 @@ virtual unsigned int execute (function *fun) override
+ 	replace_std_find (gsi);
+     }
+   }
+-
++#endif
+   return 0;
+ }
+ 
+ private:
++#ifdef __aarch64__
+   uint8_t bit_width;
+   const char *null_name = "";
+ 
+@@ -272,6 +278,7 @@ private:
+     update_stmt (gsi_stmt (gsi));
+     TRACE_REPLACE_STMT (gsi_stmt (gsi));
+   }
++#endif
+ };
+ }  // namespace
+ 
+-- 
+2.28.0.windows.1
+
diff --git a/0384-x86-64-Don-t-use-temp-for-argument-in-a-TImode-regis.patch b/0384-x86-64-Don-t-use-temp-for-argument-in-a-TImode-regis.patch
new file mode 100644
index 0000000000000000000000000000000000000000..075dbbbd70151733ce2ce69fb41af7222d87aa1a
--- /dev/null
+++ b/0384-x86-64-Don-t-use-temp-for-argument-in-a-TImode-regis.patch
@@ -0,0 +1,119 @@
+From 3466859f47306e3e3006e810cc5a7fb64be2266a Mon Sep 17 00:00:00 2001
+From: "H.J. Lu" 
+Date: Fri, 6 Sep 2024 05:24:07 -0700
+Subject: [PATCH 1/4] x86-64: Don't use temp for argument in a TImode register
+
+Don't use temp for a PARALLEL BLKmode argument of an EXPR_LIST expression
+in a TImode register.  Otherwise, the TImode variable will be put in
+the GPR save area which guarantees only 8-byte alignment.
+
+gcc/
+
+	PR target/116621
+	* config/i386/i386.cc (ix86_gimplify_va_arg): Don't use temp for
+	a PARALLEL BLKmode container of an EXPR_LIST expression in a
+	TImode register.
+
+gcc/testsuite/
+
+	PR target/116621
+	* gcc.target/i386/pr116621.c: New test.
+
+Signed-off-by: H.J. Lu 
+(cherry picked from commit fa7bbb065c63aa802e0bbb04d605407dad58cf94)
+---
+ gcc/config/i386/i386.cc                  | 22 ++++++++++--
+ gcc/testsuite/gcc.target/i386/pr116621.c | 43 ++++++++++++++++++++++++
+ 2 files changed, 63 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr116621.c
+
+diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
+index e2743e0bd..8a5730a6b 100644
+--- a/gcc/config/i386/i386.cc
++++ b/gcc/config/i386/i386.cc
+@@ -4780,13 +4780,31 @@ ix86_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
+ 
+       examine_argument (nat_mode, type, 0, &needed_intregs, &needed_sseregs);
+ 
+-      need_temp = (!REG_P (container)
++      bool container_in_reg = false;
++      if (REG_P (container))
++	container_in_reg = true;
++      else if (GET_CODE (container) == PARALLEL
++	       && GET_MODE (container) == BLKmode
++	       && XVECLEN (container, 0) == 1)
++	{
++	  /* Check if it is a PARALLEL BLKmode container of an EXPR_LIST
++	     expression in a TImode register.  In this case, temp isn't
++	     needed.  Otherwise, the TImode variable will be put in the
++	     GPR save area which guarantees only 8-byte alignment.   */
++	  rtx x = XVECEXP (container, 0, 0);
++	  if (GET_CODE (x) == EXPR_LIST
++	      && REG_P (XEXP (x, 0))
++	      && XEXP (x, 1) == const0_rtx)
++	    container_in_reg = true;
++	}
++
++      need_temp = (!container_in_reg
+ 		   && ((needed_intregs && TYPE_ALIGN (type) > 64)
+ 		       || TYPE_ALIGN (type) > 128));
+ 
+       /* In case we are passing structure, verify that it is consecutive block
+          on the register save area.  If not we need to do moves.  */
+-      if (!need_temp && !REG_P (container))
++      if (!need_temp && !container_in_reg)
+ 	{
+ 	  /* Verify that all registers are strictly consecutive  */
+ 	  if (SSE_REGNO_P (REGNO (XEXP (XVECEXP (container, 0, 0), 0))))
+diff --git a/gcc/testsuite/gcc.target/i386/pr116621.c b/gcc/testsuite/gcc.target/i386/pr116621.c
+new file mode 100644
+index 000000000..704266458
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr116621.c
+@@ -0,0 +1,43 @@
++/* { dg-do run } */
++/* { dg-options "-O2" } */
++
++#include 
++#include 
++
++union S8302
++{
++  union
++  {
++    double b;
++    int c;
++  } a;
++  long double d;
++  unsigned short int f[5];
++};
++
++union S8302 s8302;
++extern void check8302va (int i, ...);
++
++int
++main (void)
++{
++  memset (&s8302, '\0', sizeof (s8302));
++  s8302.a.b = -221438.250000;
++  check8302va (1, s8302);
++  return 0;
++}
++
++__attribute__((noinline, noclone))
++void
++check8302va (int z, ...)
++{
++  union S8302 arg, *p;
++  va_list ap;
++
++  __builtin_va_start (ap, z);
++  p = &s8302;
++  arg = __builtin_va_arg (ap, union S8302);
++  if (p->a.b != arg.a.b)
++    __builtin_abort ();
++  __builtin_va_end (ap);
++}
+-- 
+2.31.1
+
diff --git a/0385-x86-Don-t-use-address-override-with-segment-regsiter.patch b/0385-x86-Don-t-use-address-override-with-segment-regsiter.patch
new file mode 100644
index 0000000000000000000000000000000000000000..88301aa517daeb34458d960bfbd703bdc655be42
--- /dev/null
+++ b/0385-x86-Don-t-use-address-override-with-segment-regsiter.patch
@@ -0,0 +1,126 @@
+From 9b75fdadfe3541634accd204d3ec5d8573978acf Mon Sep 17 00:00:00 2001
+From: "H.J. Lu" 
+Date: Wed, 25 Sep 2024 16:39:04 +0800
+Subject: [PATCH 2/4] x86: Don't use address override with segment regsiter
+
+Address override only applies to the (reg32) part in the thread address
+fs:(reg32).  Don't rewrite thread address like
+
+(set (reg:CCZ 17 flags)
+    (compare:CCZ (reg:SI 98 [ __gmpfr_emax.0_1 ])
+        (mem/c:SI (plus:SI (plus:SI (unspec:SI [
+                            (const_int 0 [0])
+                        ] UNSPEC_TP)
+                    (reg:SI 107))
+                (const:SI (unspec:SI [
+                            (symbol_ref:SI ("previous_emax") [flags 0x1a] )
+                        ] UNSPEC_DTPOFF))) [1 previous_emax+0 S4 A32])))
+
+if address override is used to avoid the invalid memory operand like
+
+	cmpl	%fs:previous_emax@dtpoff(%eax), %r12d
+
+gcc/
+
+	PR target/116839
+	* config/i386/i386.cc (ix86_rewrite_tls_address_1): Make it
+	static.  Return if TLS address is thread register plus an integer
+	register.
+
+gcc/testsuite/
+
+	PR target/116839
+	* gcc.target/i386/pr116839.c: New file.
+
+Signed-off-by: H.J. Lu 
+(cherry picked from commit c79cc30862d7255ca15884aa956d1ccfa279d86a)
+---
+ gcc/config/i386/i386.cc                  |  9 ++++-
+ gcc/testsuite/gcc.target/i386/pr116839.c | 48 ++++++++++++++++++++++++
+ 2 files changed, 56 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr116839.c
+
+diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
+index 8a5730a6b..0b9556f8e 100644
+--- a/gcc/config/i386/i386.cc
++++ b/gcc/config/i386/i386.cc
+@@ -11786,7 +11786,7 @@ ix86_tls_address_pattern_p (rtx op)
+ }
+ 
+ /* Rewrite *LOC so that it refers to a default TLS address space.  */
+-void
++static void
+ ix86_rewrite_tls_address_1 (rtx *loc)
+ {
+   subrtx_ptr_iterator::array_type array;
+@@ -11808,6 +11808,13 @@ ix86_rewrite_tls_address_1 (rtx *loc)
+ 		  if (GET_CODE (u) == UNSPEC
+ 		      && XINT (u, 1) == UNSPEC_TP)
+ 		    {
++		      /* NB: Since address override only applies to the
++			 (reg32) part in fs:(reg32), return if address
++			 override is used.  */
++		      if (Pmode != word_mode
++			  && REG_P (XEXP (*x, 1 - i)))
++			return;
++
+ 		      addr_space_t as = DEFAULT_TLS_SEG_REG;
+ 
+ 		      *x = XEXP (*x, 1 - i);
+diff --git a/gcc/testsuite/gcc.target/i386/pr116839.c b/gcc/testsuite/gcc.target/i386/pr116839.c
+new file mode 100644
+index 000000000..e5df82562
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr116839.c
+@@ -0,0 +1,48 @@
++/* { dg-do compile { target { ! ia32 } } } */
++/* { dg-require-effective-target maybe_x32 } */
++/* { dg-options "-mx32 -O2 -fPIC -mtls-dialect=gnu2" } */
++/* { dg-final { scan-assembler-not "cmpl\[ \t\]+%fs:previous_emax@dtpoff\\(%eax\\)" } } */
++
++typedef long mpfr_prec_t;
++typedef long mpfr_exp_t;
++typedef struct {
++  mpfr_prec_t _mpfr_prec;
++} __mpfr_struct;
++typedef __mpfr_struct mpfr_t[1];
++extern _Thread_local mpfr_exp_t __gmpfr_emax;
++static _Thread_local mpfr_exp_t previous_emax;
++static _Thread_local mpfr_t bound_emax;
++extern const mpfr_t __gmpfr_const_log2_RNDD;
++extern const mpfr_t __gmpfr_const_log2_RNDU;
++
++typedef enum {
++  MPFR_RNDN=0,
++  MPFR_RNDZ,
++  MPFR_RNDU,
++  MPFR_RNDD,
++  MPFR_RNDA,
++  MPFR_RNDF,
++  MPFR_RNDNA=-1
++} mpfr_rnd_t;
++typedef __mpfr_struct *mpfr_ptr;
++typedef const __mpfr_struct *mpfr_srcptr;
++void mpfr_mul (mpfr_ptr, mpfr_srcptr, mpfr_rnd_t);
++
++void
++foo (void)
++{
++  mpfr_exp_t saved_emax;
++
++  if (__gmpfr_emax != previous_emax)
++    {
++      saved_emax = __gmpfr_emax;
++
++      bound_emax->_mpfr_prec = 32;
++
++      mpfr_mul (bound_emax, saved_emax < 0 ?
++                __gmpfr_const_log2_RNDD : __gmpfr_const_log2_RNDU,
++                MPFR_RNDU);
++      previous_emax = saved_emax;
++      __gmpfr_emax = saved_emax;
++    }
++}
+-- 
+2.31.1
+
diff --git a/0386-x86-Disable-stack-protector-for-naked-functions.patch b/0386-x86-Disable-stack-protector-for-naked-functions.patch
new file mode 100644
index 0000000000000000000000000000000000000000..dd50ce1d53ab145fce7678f05b3de084b8be47de
--- /dev/null
+++ b/0386-x86-Disable-stack-protector-for-naked-functions.patch
@@ -0,0 +1,77 @@
+From 8ff948ada01311776617730bd62434ac2c9d6ef5 Mon Sep 17 00:00:00 2001
+From: "H.J. Lu" 
+Date: Fri, 4 Oct 2024 16:21:15 +0800
+Subject: [PATCH 3/4] x86: Disable stack protector for naked functions
+
+Since naked functions should not enable stack protector, define
+TARGET_STACK_PROTECT_RUNTIME_ENABLED_P to disable stack protector
+for naked functions.
+
+gcc/
+
+	PR target/116962
+	* config/i386/i386.cc (ix86_stack_protect_runtime_enabled_p): New
+	function.
+	(TARGET_STACK_PROTECT_RUNTIME_ENABLED_P): New.
+
+gcc/testsuite/
+
+	PR target/116962
+	* gcc.target/i386/pr116962.c: New file.
+
+Signed-off-by: H.J. Lu 
+(cherry picked from commit 7d2845da112214f064e7b24531cc67e256b5177e)
+---
+ gcc/config/i386/i386.cc                  | 11 +++++++++++
+ gcc/testsuite/gcc.target/i386/pr116962.c | 10 ++++++++++
+ 2 files changed, 21 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/i386/pr116962.c
+
+diff --git a/gcc/config/i386/i386.cc b/gcc/config/i386/i386.cc
+index 0b9556f8e..3e7e1b138 100644
+--- a/gcc/config/i386/i386.cc
++++ b/gcc/config/i386/i386.cc
+@@ -22606,6 +22606,13 @@ ix86_stack_protect_guard (void)
+   return default_stack_protect_guard ();
+ }
+ 
++static bool
++ix86_stack_protect_runtime_enabled_p (void)
++{
++  /* Naked functions should not enable stack protector.  */
++  return !ix86_function_naked (current_function_decl);
++}
++
+ /* For 32-bit code we can save PIC register setup by using
+    __stack_chk_fail_local hidden function instead of calling
+    __stack_chk_fail directly.  64-bit code doesn't need to setup any PIC
+@@ -24626,6 +24633,10 @@ ix86_libgcc_floating_mode_supported_p
+ #undef TARGET_STACK_PROTECT_GUARD
+ #define TARGET_STACK_PROTECT_GUARD ix86_stack_protect_guard
+ 
++#undef TARGET_STACK_PROTECT_RUNTIME_ENABLED_P
++#define TARGET_STACK_PROTECT_RUNTIME_ENABLED_P \
++  ix86_stack_protect_runtime_enabled_p
++
+ #if !TARGET_MACHO
+ #undef TARGET_STACK_PROTECT_FAIL
+ #define TARGET_STACK_PROTECT_FAIL ix86_stack_protect_fail
+diff --git a/gcc/testsuite/gcc.target/i386/pr116962.c b/gcc/testsuite/gcc.target/i386/pr116962.c
+new file mode 100644
+index 000000000..ced16eee7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/i386/pr116962.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile { target fstack_protector } } */
++/* { dg-options "-O2 -fstack-protector-all" } */
++/* { dg-final { scan-assembler-not "__stack_chk_fail" } } */
++
++__attribute__ ((naked))
++void
++foo (void)
++{
++  asm ("ret");
++}
+-- 
+2.31.1
+
diff --git a/0387-x86-Correct-ASM_OUTPUT_SYMBOL_REF.patch b/0387-x86-Correct-ASM_OUTPUT_SYMBOL_REF.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e48656b5342368f96a0751a53138d5cc064758f9
--- /dev/null
+++ b/0387-x86-Correct-ASM_OUTPUT_SYMBOL_REF.patch
@@ -0,0 +1,36 @@
+From 501d9c6c6af740c4b4327e720bd9957317e9e355 Mon Sep 17 00:00:00 2001
+From: "H.J. Lu" 
+Date: Tue, 11 Feb 2025 13:47:54 +0800
+Subject: [PATCH 4/4] x86: Correct ASM_OUTPUT_SYMBOL_REF
+
+x is not a macro argument.  It just happens to work as final.cc passes
+x for 2nd argument:
+
+final.cc:      ASM_OUTPUT_SYMBOL_REF (file, x);
+
+	PR target/118825
+	* config/i386/i386.h (ASM_OUTPUT_SYMBOL_REF): Replace x with
+	SYM.
+
+Signed-off-by: H.J. Lu 
+(cherry picked from commit 7317fc0b03380a83ad03a5fc4fabef5f38c44c9d)
+---
+ gcc/config/i386/i386.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/i386/i386.h b/gcc/config/i386/i386.h
+index 5052f878d..a198e36a1 100644
+--- a/gcc/config/i386/i386.h
++++ b/gcc/config/i386/i386.h
+@@ -2177,7 +2177,7 @@ extern int const svr4_dbx_register_map[FIRST_PSEUDO_REGISTER];
+ #define ASM_OUTPUT_SYMBOL_REF(FILE, SYM) \
+   do {							\
+     const char *name					\
+-      = assemble_name_resolve (XSTR (x, 0));		\
++      = assemble_name_resolve (XSTR (SYM, 0));		\
+     /* In -masm=att wrap identifiers that start with $	\
+        into parens.  */					\
+     if (ASSEMBLER_DIALECT == ASM_ATT			\
+-- 
+2.31.1
+
diff --git a/0388-struct-reorg-Escapes-some-void-pointers.patch b/0388-struct-reorg-Escapes-some-void-pointers.patch
new file mode 100644
index 0000000000000000000000000000000000000000..eb45ee941f4ae69001857d2bc3c54ea2321f9c25
--- /dev/null
+++ b/0388-struct-reorg-Escapes-some-void-pointers.patch
@@ -0,0 +1,110 @@
+From e73b06172ff00cf7004d2d50098bcbb74e92d519 Mon Sep 17 00:00:00 2001
+From: huzife <634763349@qq.com>
+Date: Fri, 30 May 2025 11:29:27 +0800
+Subject: [PATCH 1/2] [struct-reorg] Escapes some void pointers
+
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc      |  6 ++--
+ .../gcc.dg/struct/sr_early_void_ptr.c         | 28 +++++++++++++++++++
+ .../struct/sr_no_recorded_local_void_ptr.c    | 23 +++++++++++++++
+ 3 files changed, 54 insertions(+), 3 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sr_early_void_ptr.c
+ create mode 100644 gcc/testsuite/gcc.dg/struct/sr_no_recorded_local_void_ptr.c
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index e8a84abbd..1140edd1d 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -5365,8 +5365,8 @@ ipa_struct_reorg::record_stmt_expr (tree expr, cgraph_node *node, gimple *stmt)
+   srfield *field;
+   bool realpart, imagpart, address;
+   bool escape_from_base = false;
+-  if (!get_type_field (expr, base, indirect, type, field,
+-		       realpart, imagpart, address, escape_from_base))
++  if (!get_type_field (expr, base, indirect, type, field, realpart,
++		       imagpart, address, escape_from_base, false, true))
+     return;
+ 
+   if (current_layout_opt_level > NONE)
+@@ -5375,7 +5375,6 @@ ipa_struct_reorg::record_stmt_expr (tree expr, cgraph_node *node, gimple *stmt)
+ 	type->mark_escape (escape_non_optimize, stmt);
+     }
+ 
+-
+   /* Record it.  */
+   type->add_access (new sraccess (expr, stmt, node, find_function (node),
+ 				  type, base, field));
+@@ -6554,6 +6553,7 @@ ipa_struct_reorg::prune_escaped_types (void)
+       /* If contains or is contained by the escape type,
+ 	 mark them as escaping.  */
+       propagate_escape ();
++      propagate_escape_via_no_record_var ();
+     }
+   if (current_layout_opt_level >= STRUCT_REORDER_FIELDS)
+     {
+diff --git a/gcc/testsuite/gcc.dg/struct/sr_early_void_ptr.c b/gcc/testsuite/gcc.dg/struct/sr_early_void_ptr.c
+new file mode 100644
+index 000000000..5ff166f08
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sr_early_void_ptr.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++/* { dg-additional-options "-fgimple" } */
++
++#include 
++
++struct S {
++    int a;
++    int b;
++    int c;
++};
++
++__attribute__((noinline)) void __GIMPLE(ssa,startwith("struct_reorg")) test() {
++  void* _1;
++
++  __BB(2):
++  _1 = calloc(1UL, 12UL);
++  __MEM  ((int*)_1 + 4UL) = 0;
++  __MEM  ((struct S*)_1).a = 0;
++
++  return;
++}
++
++int main() {
++    test();
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "struct S(\[0-9\]*) has escaped" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sr_no_recorded_local_void_ptr.c b/gcc/testsuite/gcc.dg/struct/sr_no_recorded_local_void_ptr.c
+new file mode 100644
+index 000000000..d917b66f8
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/sr_no_recorded_local_void_ptr.c
+@@ -0,0 +1,23 @@
++/* { dg-do compile } */
++/* { dg-additional-options "-fgimple" } */
++
++#include 
++
++struct S {
++    int a;
++    int b;
++};
++
++__attribute__((noinline)) void __GIMPLE(startwith("struct_reorg")) test() {
++    void* ptr;
++
++    ptr = calloc(1UL, 8UL);
++    __MEM  ((struct S*)ptr).a = 0;
++}
++
++int main() {
++    test();
++    return 0;
++}
++
++/* { dg-final { scan-ipa-dump "struct S has escaped: \"Type escapes via no record var\"" "struct_reorg" } } */
+-- 
+2.33.0
+
diff --git a/0389-deja-Correct-pass-number-in-options.patch b/0389-deja-Correct-pass-number-in-options.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d452d805d654c512ff3062952ff534b7854df51f
--- /dev/null
+++ b/0389-deja-Correct-pass-number-in-options.patch
@@ -0,0 +1,2336 @@
+From e4bdf701ece96fd98730c02700b6d1d1d123b71d Mon Sep 17 00:00:00 2001
+From: huzife <634763349@qq.com>
+Date: Fri, 30 May 2025 11:30:53 +0800
+Subject: [PATCH 2/2] [deja] Correct pass number in options
+
+---
+ gcc/testsuite/g++.dg/pr83541.C                 |  6 +++---
+ gcc/testsuite/g++.dg/pr96707.C                 |  4 ++--
+ gcc/testsuite/g++.dg/tree-ssa/empty-loop.C     |  4 ++--
+ gcc/testsuite/g++.dg/tree-ssa/fwprop-align.C   |  4 ++--
+ gcc/testsuite/g++.dg/tree-ssa/mull64.C         |  4 ++--
+ gcc/testsuite/g++.dg/tree-ssa/pr106922.C       |  4 ++--
+ gcc/testsuite/g++.dg/tree-ssa/pr14814.C        |  6 +++---
+ gcc/testsuite/g++.dg/tree-ssa/pr19476-6.C      |  6 +++---
+ gcc/testsuite/g++.dg/tree-ssa/pr19476-7.C      |  4 ++--
+ gcc/testsuite/gcc.dg/graphite/fuse-1.c         |  4 ++--
+ gcc/testsuite/gcc.dg/lto/tbaa-1.c              |  4 ++--
+ gcc/testsuite/gcc.dg/pr102738.c                |  8 ++++----
+ gcc/testsuite/gcc.dg/pr102983.c                |  4 ++--
+ gcc/testsuite/gcc.dg/pr103359.c                |  4 ++--
+ gcc/testsuite/gcc.dg/pr104288.c                |  6 +++---
+ gcc/testsuite/gcc.dg/pr104526.c                |  4 ++--
+ gcc/testsuite/gcc.dg/pr106063.c                |  2 +-
+ gcc/testsuite/gcc.dg/pr43513.c                 |  4 ++--
+ gcc/testsuite/gcc.dg/pr68217.c                 |  2 +-
+ gcc/testsuite/gcc.dg/pr69047.c                 |  4 ++--
+ gcc/testsuite/gcc.dg/pr78888.c                 |  6 +++---
+ gcc/testsuite/gcc.dg/pr81192.c                 |  4 ++--
+ gcc/testsuite/gcc.dg/pr83072-2.c               |  4 ++--
+ gcc/testsuite/gcc.dg/pr83072.c                 |  4 ++--
+ gcc/testsuite/gcc.dg/pr83073.c                 |  4 ++--
+ gcc/testsuite/gcc.dg/pr90838.c                 | 10 +++++-----
+ gcc/testsuite/gcc.dg/pr91029.c                 |  4 ++--
+ gcc/testsuite/gcc.dg/pr93231.c                 |  4 ++--
+ gcc/testsuite/gcc.dg/pr96542.c                 |  6 +++---
+ gcc/testsuite/gcc.dg/pr97505.c                 |  4 ++--
+ gcc/testsuite/gcc.dg/pr97515.c                 |  6 +++---
+ gcc/testsuite/gcc.dg/pr97567-2.c               |  4 ++--
+ gcc/testsuite/gcc.dg/predict-1.c               |  2 +-
+ gcc/testsuite/gcc.dg/predict-9.c               |  2 +-
+ gcc/testsuite/gcc.dg/torture/pr97812.c         |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/20030808-1.c     |  6 +++---
+ gcc/testsuite/gcc.dg/tree-ssa/20040211-1.c     |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/alias-17.c       |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/andnot-2.c       |  4 ++--
+ .../gcc.dg/tree-ssa/builtin-sprintf-warn-22.c  |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/cunroll-9.c      |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/evrp-ignore.c    |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp-trans.c     |  6 +++---
+ gcc/testsuite/gcc.dg/tree-ssa/evrp-trans2.c    |  6 +++---
+ gcc/testsuite/gcc.dg/tree-ssa/evrp1.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp11.c         |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp12.c         |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp13.c         |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp2.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp20.c         |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp21.c         |  6 +++---
+ gcc/testsuite/gcc.dg/tree-ssa/evrp22.c         |  6 +++---
+ gcc/testsuite/gcc.dg/tree-ssa/evrp3.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp30.c         |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp4.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp6.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp7.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp8.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/evrp9.c          | 10 +++++-----
+ gcc/testsuite/gcc.dg/tree-ssa/foldconst-4.c    |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/forwprop-33.c    | 18 +++++++++---------
+ gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c      |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c      |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr14814.c        |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr20318.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr20657.c        |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr20702.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr21001.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr21086.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr21090.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr21294.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr21458-2.c      |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr21458.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr21559.c        |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr21563.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr23744.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr25382.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr45397.c        |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr49039.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr58480.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr61839_1.c      |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr61839_2.c      | 12 ++++++------
+ gcc/testsuite/gcc.dg/tree-ssa/pr61839_4.c      |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr64130.c        |  6 +++---
+ gcc/testsuite/gcc.dg/tree-ssa/pr68431.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr77445-2.c      |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/pr78153-1.c      |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr78153-2.c      |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr78154.c        |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr78655.c        |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr91029-1.c      |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr91029-2.c      |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr93781-1.c      |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr93781-2.c      |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr93781-3.c      |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/pr95906.c        |  6 +++---
+ gcc/testsuite/gcc.dg/tree-ssa/pr98513.c        |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-29.c     |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-3.c      |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/ssa-dse-30.c     |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-54.c     |  6 +++---
+ gcc/testsuite/gcc.dg/tree-ssa/ssa-sink-19.c    |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/vrp02.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp03.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp06.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp07.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp08.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp09.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp111.c         |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/vrp113.c         |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp114.c         |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/vrp115.c         |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/vrp117.c         |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/vrp120.c         |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/vrp16.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/vrp17.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/vrp18.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/vrp19.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp20.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp23.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/vrp24.c          |  4 ++--
+ gcc/testsuite/gcc.dg/tree-ssa/vrp33.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp35.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp36.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp92.c          |  2 +-
+ gcc/testsuite/gcc.dg/tree-ssa/vrp98-1.c        |  8 ++++----
+ gcc/testsuite/gcc.dg/tree-ssa/vrp98.c          |  2 +-
+ gcc/testsuite/gcc.dg/vrp-min-max-1.c           |  2 +-
+ gcc/testsuite/gcc.dg/vrp-min-max-3.c           |  6 +++---
+ gcc/testsuite/gcc.target/i386/vect-gather-1.c  |  4 ++--
+ .../gcc.target/powerpc/vect-gather-1.c         |  4 ++--
+ gcc/testsuite/gfortran.dg/pr45636.f90          |  4 ++--
+ 132 files changed, 262 insertions(+), 262 deletions(-)
+
+diff --git a/gcc/testsuite/g++.dg/pr83541.C b/gcc/testsuite/g++.dg/pr83541.C
+index f5b181e06..a55147a2b 100644
+--- a/gcc/testsuite/g++.dg/pr83541.C
++++ b/gcc/testsuite/g++.dg/pr83541.C
+@@ -1,6 +1,6 @@
+ // PR tree-optimization/83541
+ // { dg-do compile }
+-// { dg-options "-O3 -std=c++17 -ffast-math -fdump-tree-evrp"  }
++// { dg-options "-O3 -std=c++17 -ffast-math -fdump-tree-evrp1"  }
+ 
+ #include 
+ 
+@@ -13,5 +13,5 @@ int test(int x)
+     return 42;
+ }
+ 
+-// { dg-final { scan-tree-dump "return 42"  evrp } }
+-// { dg-final { scan-tree-dump-not "return _"  evrp } }
++// { dg-final { scan-tree-dump "return 42"  evrp1 } }
++// { dg-final { scan-tree-dump-not "return _"  evrp1 } }
+diff --git a/gcc/testsuite/g++.dg/pr96707.C b/gcc/testsuite/g++.dg/pr96707.C
+index 2653fe3d0..add3ae0bb 100644
+--- a/gcc/testsuite/g++.dg/pr96707.C
++++ b/gcc/testsuite/g++.dg/pr96707.C
+@@ -1,10 +1,10 @@
+ /* { dg-do compile} */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ bool f(unsigned x, unsigned y)
+ {
+     return (x >> y) <= x;
+ }
+ 
+-/* { dg-final { scan-tree-dump "return 1" "evrp" } }  */
++/* { dg-final { scan-tree-dump "return 1" "evrp1" } }  */
+ 
+diff --git a/gcc/testsuite/g++.dg/tree-ssa/empty-loop.C b/gcc/testsuite/g++.dg/tree-ssa/empty-loop.C
+index 6b1e879e6..a65724c4b 100644
+--- a/gcc/testsuite/g++.dg/tree-ssa/empty-loop.C
++++ b/gcc/testsuite/g++.dg/tree-ssa/empty-loop.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-cddce2 -ffinite-loops" } */
++/* { dg-options "-O2 -fdump-tree-cddce3 -ffinite-loops" } */
+ 
+ #include 
+ #include 
+@@ -29,5 +29,5 @@ int foo (vector &v, list &l, set &s, map &m
+ 
+   return 0;
+ }
+-/* { dg-final { scan-tree-dump-not "if" "cddce2"} } */
++/* { dg-final { scan-tree-dump-not "if" "cddce3"} } */
+ 
+diff --git a/gcc/testsuite/g++.dg/tree-ssa/fwprop-align.C b/gcc/testsuite/g++.dg/tree-ssa/fwprop-align.C
+index 5cc5f0d7d..2f8b6c04b 100644
+--- a/gcc/testsuite/g++.dg/tree-ssa/fwprop-align.C
++++ b/gcc/testsuite/g++.dg/tree-ssa/fwprop-align.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-forwprop2" } */
++/* { dg-options "-O2 -fdump-tree-forwprop4" } */
+ 
+ struct A
+ {
+@@ -16,4 +16,4 @@ int main()
+ }
+ 
+ /* We should eliminate the check if p points to a virtual function. */
+-/* { dg-final { scan-tree-dump-times "& 1" 0 "forwprop2" } } */
++/* { dg-final { scan-tree-dump-times "& 1" 0 "forwprop4" } } */
+diff --git a/gcc/testsuite/g++.dg/tree-ssa/mull64.C b/gcc/testsuite/g++.dg/tree-ssa/mull64.C
+index cad891e62..3505fb513 100644
+--- a/gcc/testsuite/g++.dg/tree-ssa/mull64.C
++++ b/gcc/testsuite/g++.dg/tree-ssa/mull64.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fmerge-mull -Wno-psabi -fdump-tree-forwprop1-details -fdump-tree-forwprop4-details" } */
++/* { dg-options "-O2 -fmerge-mull -Wno-psabi -fdump-tree-forwprop1-details -fdump-tree-forwprop6-details" } */
+ 
+ #  define BN_BITS4        32
+ #  define BN_MASK2        (0xffffffffffffffffL)
+@@ -32,4 +32,4 @@ void mul64(unsigned long in0, unsigned long in1,
+ }
+ 
+ /* { dg-final { scan-tree-dump "gimple_simplified to" "forwprop1" } } */
+-/* { dg-final { scan-tree-dump-times "gimple_simplified to" 1 "forwprop4" } } */
++/* { dg-final { scan-tree-dump-times "gimple_simplified to" 1 "forwprop6" } } */
+diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr106922.C b/gcc/testsuite/g++.dg/tree-ssa/pr106922.C
+index 2aec4975a..11fcb4eb4 100644
+--- a/gcc/testsuite/g++.dg/tree-ssa/pr106922.C
++++ b/gcc/testsuite/g++.dg/tree-ssa/pr106922.C
+@@ -1,5 +1,5 @@
+ // { dg-require-effective-target c++20 }
+-// { dg-options "-O2 -fdump-tree-cddce3" }
++// { dg-options "-O2 -fdump-tree-cddce4" }
+ 
+ template  struct __new_allocator {
+   void deallocate(int *, int) { operator delete(0); }
+@@ -87,4 +87,4 @@ void testfunctionfoo() {
+   }
+ }
+ 
+-// { dg-final { scan-tree-dump-not "m_initialized" "cddce3" } }
++// { dg-final { scan-tree-dump-not "m_initialized" "cddce4" } }
+diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr14814.C b/gcc/testsuite/g++.dg/tree-ssa/pr14814.C
+index f2177d257..9372de88d 100644
+--- a/gcc/testsuite/g++.dg/tree-ssa/pr14814.C
++++ b/gcc/testsuite/g++.dg/tree-ssa/pr14814.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-forwprop2" } */
++/* { dg-options "-O2 -fdump-tree-forwprop4" } */
+ 
+ class YY { public:
+   YY(const YY &v) { e[0] = v.e[0]; e[1] = v.e[1]; e[2] = v.e[2]; }
+@@ -14,6 +14,6 @@ int foo(XX& r) {
+   if (r.direction().y() < 0.000001) return 0;
+   return 1; }
+ 
+-/* { dg-final { scan-tree-dump-times "&this" 0 "forwprop2" } } */
+-/* { dg-final { scan-tree-dump-times "&r" 0 "forwprop2" } } */
++/* { dg-final { scan-tree-dump-times "&this" 0 "forwprop4" } } */
++/* { dg-final { scan-tree-dump-times "&r" 0 "forwprop4" } } */
+ 
+diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr19476-6.C b/gcc/testsuite/g++.dg/tree-ssa/pr19476-6.C
+index f6b06c93c..30918bcfd 100644
+--- a/gcc/testsuite/g++.dg/tree-ssa/pr19476-6.C
++++ b/gcc/testsuite/g++.dg/tree-ssa/pr19476-6.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp -fdelete-null-pointer-checks" } */
++/* { dg-options "-O2 -fdump-tree-evrp1 -fdelete-null-pointer-checks" } */
+ /* { dg-skip-if "" keeps_null_pointer_checks } */
+ 
+ // See pr19476-7.C for a version without including .
+@@ -12,5 +12,5 @@ int g(){
+   return 42 + (0 == new int[50]);
+ }
+ 
+-/* { dg-final { scan-tree-dump     "return 42" "evrp" } } */
+-/* { dg-final { scan-tree-dump-not "return 33" "evrp" } } */
++/* { dg-final { scan-tree-dump     "return 42" "evrp1" } } */
++/* { dg-final { scan-tree-dump-not "return 33" "evrp1" } } */
+diff --git a/gcc/testsuite/g++.dg/tree-ssa/pr19476-7.C b/gcc/testsuite/g++.dg/tree-ssa/pr19476-7.C
+index 38624f2ba..fb22528cc 100644
+--- a/gcc/testsuite/g++.dg/tree-ssa/pr19476-7.C
++++ b/gcc/testsuite/g++.dg/tree-ssa/pr19476-7.C
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp -fdelete-null-pointer-checks" } */
++/* { dg-options "-O2 -fdump-tree-evrp1 -fdelete-null-pointer-checks" } */
+ /* { dg-skip-if "" keeps_null_pointer_checks } */
+ 
+ // See pr19476-6.C for a version that includes .
+@@ -8,4 +8,4 @@ int g(){
+   return 42 + (0 == new int[50]);
+ }
+ 
+-/* { dg-final { scan-tree-dump     "return 42" "evrp" } } */
++/* { dg-final { scan-tree-dump     "return 42" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/graphite/fuse-1.c b/gcc/testsuite/gcc.dg/graphite/fuse-1.c
+index 61289d312..0b36b37a1 100644
+--- a/gcc/testsuite/gcc.dg/graphite/fuse-1.c
++++ b/gcc/testsuite/gcc.dg/graphite/fuse-1.c
+@@ -1,6 +1,6 @@
+ /* Check that the two loops are fused and that we manage to fold the two xor
+    operations.  */
+-/* { dg-options "-O2 -fno-tree-vectorize -floop-nest-optimize -fdump-tree-forwprop4 -fdump-tree-graphite-all" } */
++/* { dg-options "-O2 -fno-tree-vectorize -floop-nest-optimize -fdump-tree-forwprop6 -fdump-tree-graphite-all" } */
+ 
+ /* Make sure we fuse the loops like this:
+ AST generated by isl:
+@@ -12,7 +12,7 @@ for (int c0 = 0; c0 <= 99; c0 += 1) {
+ /* { dg-final { scan-tree-dump-times "AST generated by isl:.*for \\(int c0 = 0; c0 <= 99; c0 \\+= 1\\) \\{.*S_.*\\(c0\\);.*S_.*\\(c0\\);.*S_.*\\(c0\\);.*\\}" 1 "graphite" } } */
+ 
+ /* Check that after fusing the loops, the scalar computation is also fused.  */
+-/* { dg-final { scan-tree-dump-times " \\^ 12;" 2 "forwprop4" } } */
++/* { dg-final { scan-tree-dump-times " \\^ 12;" 2 "forwprop6" } } */
+ 
+ #define MAX 100
+ int A[MAX];
+diff --git a/gcc/testsuite/gcc.dg/lto/tbaa-1.c b/gcc/testsuite/gcc.dg/lto/tbaa-1.c
+index 74c049671..2047c70c8 100644
+--- a/gcc/testsuite/gcc.dg/lto/tbaa-1.c
++++ b/gcc/testsuite/gcc.dg/lto/tbaa-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -flto -fdump-tree-evrp" } */
++/* { dg-options "-O2 -flto -fdump-tree-evrp1" } */
+ typedef struct rtx_def *rtx;
+ typedef struct cselib_val_struct
+ {
+@@ -38,4 +38,4 @@ discard_useless_locs (x, info)
+       n_useless_values++;
+     }
+ }
+-/* { dg-final { scan-tree-dump-times "n_useless_values" 2 "evrp" } } */                 
++/* { dg-final { scan-tree-dump-times "n_useless_values" 2 "evrp1" } } */                 
+diff --git a/gcc/testsuite/gcc.dg/pr102738.c b/gcc/testsuite/gcc.dg/pr102738.c
+index cd58c2589..a5439e8cb 100644
+--- a/gcc/testsuite/gcc.dg/pr102738.c
++++ b/gcc/testsuite/gcc.dg/pr102738.c
+@@ -1,5 +1,5 @@
+ /* PR tree-optimization/102738 */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ /* { dg-do compile { target int128 } } */
+ 
+ /* Remove arithmetic shift right when the LHS is known to be 0 or -1.  */
+@@ -44,6 +44,6 @@ int a6(int f, int g)
+     __builtin_unreachable();
+ }
+ 
+-/* { dg-final { scan-tree-dump-times " >> 127" 1 "evrp" } } */
+-/* { dg-final { scan-tree-dump-times " >> 31" 1 "evrp" } } */
+-/* { dg-final { scan-tree-dump-times " >> " 2 "evrp" } } */
++/* { dg-final { scan-tree-dump-times " >> 127" 1 "evrp1" } } */
++/* { dg-final { scan-tree-dump-times " >> 31" 1 "evrp1" } } */
++/* { dg-final { scan-tree-dump-times " >> " 2 "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/pr102983.c b/gcc/testsuite/gcc.dg/pr102983.c
+index ef58af6de..c4a3dfad6 100644
+--- a/gcc/testsuite/gcc.dg/pr102983.c
++++ b/gcc/testsuite/gcc.dg/pr102983.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ void foo(void);
+ 
+ static int a = 1;
+@@ -18,4 +18,4 @@ int main() {
+   }
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "Folding predicate c_.* to 1" 1 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "Folding predicate c_.* to 1" 1 "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/pr103359.c b/gcc/testsuite/gcc.dg/pr103359.c
+index 13406f90d..187856702 100644
+--- a/gcc/testsuite/gcc.dg/pr103359.c
++++ b/gcc/testsuite/gcc.dg/pr103359.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O3 -fdump-tree-evrp" } */
++/* { dg-options "-O3 -fdump-tree-evrp1" } */
+ 
+ void foo();
+ static char a, c;
+@@ -18,4 +18,4 @@ int main() {
+     foo();
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "c = 0" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "c = 0" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/pr104288.c b/gcc/testsuite/gcc.dg/pr104288.c
+index 95eb196f9..020beb044 100644
+--- a/gcc/testsuite/gcc.dg/pr104288.c
++++ b/gcc/testsuite/gcc.dg/pr104288.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp -fdelete-null-pointer-checks" } */
++/* { dg-options "-O2 -fdump-tree-evrp1 -fdelete-null-pointer-checks" } */
+ /* { dg-skip-if "" { keeps_null_pointer_checks } } */
+ 
+ void keep(int result) __attribute__((noipa));
+@@ -19,5 +19,5 @@ void bar (void *p)
+     __builtin_abort ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "abort" "evrp" } } */
+-/* { dg-final { scan-tree-dump-times  "== 0B;" 1 "evrp" } } */
++/* { dg-final { scan-tree-dump-not "abort" "evrp1" } } */
++/* { dg-final { scan-tree-dump-times  "== 0B;" 1 "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/pr104526.c b/gcc/testsuite/gcc.dg/pr104526.c
+index a29530829..574d414bc 100644
+--- a/gcc/testsuite/gcc.dg/pr104526.c
++++ b/gcc/testsuite/gcc.dg/pr104526.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ void foo(void);
+ 
+@@ -12,4 +12,4 @@ int main() {
+   }
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "foo" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "foo" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/pr106063.c b/gcc/testsuite/gcc.dg/pr106063.c
+index 467b31dea..f715c295e 100644
+--- a/gcc/testsuite/gcc.dg/pr106063.c
++++ b/gcc/testsuite/gcc.dg/pr106063.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target int128 } } */
+-/* { dg-options "-O2 -fno-tree-forwprop --disable-tree-evrp" } */
++/* { dg-options "-O2 -fno-tree-forwprop --disable-tree-evrp1" } */
+ typedef __int128 __attribute__((__vector_size__ (16))) V;
+ 
+ V
+diff --git a/gcc/testsuite/gcc.dg/pr43513.c b/gcc/testsuite/gcc.dg/pr43513.c
+index 9383a802c..fb2673f08 100644
+--- a/gcc/testsuite/gcc.dg/pr43513.c
++++ b/gcc/testsuite/gcc.dg/pr43513.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-ccp2" } */
++/* { dg-options "-O2 -fdump-tree-ccp3" } */
+ 
+ void bar (int *);
+ void foo (char *, int);
+@@ -15,4 +15,4 @@ foo3 ()
+     foo ("%d ", results[i]);
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "alloca" 0 "ccp2"} } */
++/* { dg-final { scan-tree-dump-times "alloca" 0 "ccp3"} } */
+diff --git a/gcc/testsuite/gcc.dg/pr68217.c b/gcc/testsuite/gcc.dg/pr68217.c
+index eb4f15e04..178cb5008 100644
+--- a/gcc/testsuite/gcc.dg/pr68217.c
++++ b/gcc/testsuite/gcc.dg/pr68217.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fdump-tree-vrp1 -fno-tree-ccp" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fdump-tree-vrp1 -fno-tree-ccp" } */
+ 
+ int foo (void)
+ {
+diff --git a/gcc/testsuite/gcc.dg/pr69047.c b/gcc/testsuite/gcc.dg/pr69047.c
+index d562663d8..d32e449e9 100644
+--- a/gcc/testsuite/gcc.dg/pr69047.c
++++ b/gcc/testsuite/gcc.dg/pr69047.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O -fdump-tree-forwprop4" } */
++/* { dg-options "-O -fdump-tree-forwprop6" } */
+ 
+ __UINT8_TYPE__
+ f(__UINT16_TYPE__ b)
+@@ -15,4 +15,4 @@ f(__UINT16_TYPE__ b)
+   return a;
+ }
+ 
+-/* { dg-final { scan-tree-dump "_\[0-9\]+ = \\(\[^)\]+\\) b" "forwprop4" } } */
++/* { dg-final { scan-tree-dump "_\[0-9\]+ = \\(\[^)\]+\\) b" "forwprop6" } } */
+diff --git a/gcc/testsuite/gcc.dg/pr78888.c b/gcc/testsuite/gcc.dg/pr78888.c
+index 77a130cf1..2ab0995e7 100644
+--- a/gcc/testsuite/gcc.dg/pr78888.c
++++ b/gcc/testsuite/gcc.dg/pr78888.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/78888 */
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ void kill (void);
+ void keep (void);
+@@ -25,5 +25,5 @@ void g (int x)
+   if (__builtin_tolower ((unsigned char)x) == 'z')
+     keep ();
+ }
+-/* { dg-final { scan-tree-dump-not "kill" "evrp" } }  */
+-/* { dg-final { scan-tree-dump-times "keep" 4 "evrp"} } */
++/* { dg-final { scan-tree-dump-not "kill" "evrp1" } }  */
++/* { dg-final { scan-tree-dump-times "keep" 4 "evrp1"} } */
+diff --git a/gcc/testsuite/gcc.dg/pr81192.c b/gcc/testsuite/gcc.dg/pr81192.c
+index 6cab60565..3e808d67c 100644
+--- a/gcc/testsuite/gcc.dg/pr81192.c
++++ b/gcc/testsuite/gcc.dg/pr81192.c
+@@ -1,6 +1,6 @@
+-/* { dg-options "-Os -fdump-tree-pre-details -fdisable-tree-evrp -fno-tree-dse" } */
++/* { dg-options "-Os -fdump-tree-pre-details -fdisable-tree-evrp1 -fno-tree-dse" } */
+ 
+-/* Disable tree-evrp because the new version of evrp sees
++/* Disable tree-evrp1 because the new version of evrp1 sees
+  :
+   if (j_8(D) != 2147483647)
+     goto ; [50.00%]
+diff --git a/gcc/testsuite/gcc.dg/pr83072-2.c b/gcc/testsuite/gcc.dg/pr83072-2.c
+index f495f2582..c197a1a88 100644
+--- a/gcc/testsuite/gcc.dg/pr83072-2.c
++++ b/gcc/testsuite/gcc.dg/pr83072-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile} */
+-/* { dg-options "-O2 -fdump-tree-evrp-details" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details" } */
+ 
+ int f1(int a, int b, int c){
+   if(c==0)__builtin_unreachable();
+@@ -15,4 +15,4 @@ int f2(int a, int b, int c){
+   return a == b;
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "gimple_simplified to" 2 "evrp" } }  */
++/* { dg-final { scan-tree-dump-times "gimple_simplified to" 2 "evrp1" } }  */
+diff --git a/gcc/testsuite/gcc.dg/pr83072.c b/gcc/testsuite/gcc.dg/pr83072.c
+index 3bed8d890..71116bd0e 100644
+--- a/gcc/testsuite/gcc.dg/pr83072.c
++++ b/gcc/testsuite/gcc.dg/pr83072.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp -fno-tree-ccp -fno-tree-forwprop -fno-tree-fre" } */
++/* { dg-options "-O2 -fdump-tree-evrp1 -fno-tree-ccp -fno-tree-forwprop -fno-tree-fre" } */
+ 
+ void kill (void);
+ 
+@@ -11,4 +11,4 @@ int f(int c){
+   return c;
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "kill" "evrp" } }  */
++/* { dg-final { scan-tree-dump-not "kill" "evrp1" } }  */
+diff --git a/gcc/testsuite/gcc.dg/pr83073.c b/gcc/testsuite/gcc.dg/pr83073.c
+index 1168ae822..899a53caa 100644
+--- a/gcc/testsuite/gcc.dg/pr83073.c
++++ b/gcc/testsuite/gcc.dg/pr83073.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile} */
+-/* { dg-options "-O2 -fdump-tree-evrp-details -fno-tree-fre -fno-tree-ccp -fno-tree-forwprop" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details -fno-tree-fre -fno-tree-ccp -fno-tree-forwprop" } */
+ 
+ int f(int x)
+ {
+@@ -7,4 +7,4 @@ int f(int x)
+     return x & 1;
+ }
+ 
+-/* { dg-final { scan-tree-dump "gimple_simplified to.* = 1" "evrp" } }  */
++/* { dg-final { scan-tree-dump "gimple_simplified to.* = 1" "evrp1" } }  */
+diff --git a/gcc/testsuite/gcc.dg/pr90838.c b/gcc/testsuite/gcc.dg/pr90838.c
+index 7502b8463..8e8bdeca7 100644
+--- a/gcc/testsuite/gcc.dg/pr90838.c
++++ b/gcc/testsuite/gcc.dg/pr90838.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-forwprop2-details" } */
++/* { dg-options "-O2 -fdump-tree-forwprop4-details" } */
+ /* { dg-additional-options "-mbmi" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } */
+ /* { dg-additional-options "-march=rv64gc_zbb" { target { rv64 } } } */
+ /* { dg-additional-options "-march=rv32gc_zbb" { target { rv32 } } } */
+@@ -59,7 +59,7 @@ int ctz4 (unsigned long x)
+   return table[(lsb * magic) >> 58];
+ }
+ 
+-/* { dg-final { scan-tree-dump-times {= \.CTZ} 4 "forwprop2" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */
++/* { dg-final { scan-tree-dump-times {= \.CTZ} 4 "forwprop4" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */
+ /* { dg-final { scan-assembler-times "tzcntq\t" 1 { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */
+ /* { dg-final { scan-assembler-times "tzcntl\t" 3 { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */
+ /* { dg-final { scan-assembler-times "andl\t" 2 { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */
+@@ -67,18 +67,18 @@ int ctz4 (unsigned long x)
+ /* { dg-final { scan-assembler-not "imulq" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */
+ /* { dg-final { scan-assembler-not "shrq" { target { { i?86-*-* x86_64-*-* } && { ! { ia32 } } } } } } */
+ 
+-/* { dg-final { scan-tree-dump-times {= \.CTZ} 4 "forwprop2" { target aarch64*-*-* } } } */
++/* { dg-final { scan-tree-dump-times {= \.CTZ} 4 "forwprop4" { target aarch64*-*-* } } } */
+ /* { dg-final { scan-assembler-times "clz\t" 4 { target aarch64*-*-* } } } */
+ /* { dg-final { scan-assembler-times "and\t" 2 { target aarch64*-*-* } } } */
+ /* { dg-final { scan-assembler-not "cmp\t.*0" { target aarch64*-*-* } } } */
+ 
+-/* { dg-final { scan-tree-dump-times {= \.CTZ} 4 "forwprop2" { target { rv64 } } } } */
++/* { dg-final { scan-tree-dump-times {= \.CTZ} 4 "forwprop4" { target { rv64 } } } } */
+ /* { dg-final { scan-assembler-times "ctz\t"  1 { target { rv64 } } } } */
+ /* { dg-final { scan-assembler-times "ctzw\t" 3 { target { rv64 } } } } */
+ /* { dg-final { scan-assembler-times "andi\t" 2 { target { rv64 } } } } */
+ /* { dg-final { scan-assembler-not "mul" { target { rv64 } } } } */
+ 
+-/* { dg-final { scan-tree-dump-times {= \.CTZ} 3 "forwprop2" { target { rv32 } } } } */
++/* { dg-final { scan-tree-dump-times {= \.CTZ} 3 "forwprop4" { target { rv32 } } } } */
+ /* { dg-final { scan-assembler-times "ctz\t" 3 { target { rv32 } } } } */
+ /* { dg-final { scan-assembler-times "andi\t" 1 { target { rv32 } } } } */
+ /* { dg-final { scan-assembler-times "mul\t" 1 { target { rv32 } } } } */
+diff --git a/gcc/testsuite/gcc.dg/pr91029.c b/gcc/testsuite/gcc.dg/pr91029.c
+index 4904764e1..f29212c01 100644
+--- a/gcc/testsuite/gcc.dg/pr91029.c
++++ b/gcc/testsuite/gcc.dg/pr91029.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/91029 */
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ void kill (void);
+ int xx;
+@@ -45,4 +45,4 @@ void f4 (int i)
+     }
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "kill" "evrp" } }  */
++/* { dg-final { scan-tree-dump-not "kill" "evrp1" } }  */
+diff --git a/gcc/testsuite/gcc.dg/pr93231.c b/gcc/testsuite/gcc.dg/pr93231.c
+index cd0b3f320..1898935a9 100644
+--- a/gcc/testsuite/gcc.dg/pr93231.c
++++ b/gcc/testsuite/gcc.dg/pr93231.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-forwprop2-details -Wno-shift-count-negative" } */
++/* { dg-options "-O2 -fdump-tree-forwprop4-details -Wno-shift-count-negative" } */
+ 
+ int ctz_ice1 (int x)
+ {
+@@ -32,4 +32,4 @@ int ctz_fail (unsigned x)
+   return table[((x & -x) * 0x077CB531) >> 27];
+ }
+ 
+-/* { dg-final { scan-tree-dump-not {= \.CTZ} "forwprop2" } } */
++/* { dg-final { scan-tree-dump-not {= \.CTZ} "forwprop4" } } */
+diff --git a/gcc/testsuite/gcc.dg/pr96542.c b/gcc/testsuite/gcc.dg/pr96542.c
+index 5014f2aca..5b924d568 100644
+--- a/gcc/testsuite/gcc.dg/pr96542.c
++++ b/gcc/testsuite/gcc.dg/pr96542.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile} */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ 
+ unsigned char
+@@ -22,6 +22,6 @@ baz (unsigned int x)
+   return (-1U >> x) * 16;
+ }
+ 
+-/* { dg-final { scan-tree-dump-times  "254" 2 "evrp" } }  */
+-/* { dg-final { scan-tree-dump "= PHI <32.*, 4294967280" "evrp" } }  */
++/* { dg-final { scan-tree-dump-times  "254" 2 "evrp1" } }  */
++/* { dg-final { scan-tree-dump "= PHI <32.*, 4294967280" "evrp1" } }  */
+ 
+diff --git a/gcc/testsuite/gcc.dg/pr97505.c b/gcc/testsuite/gcc.dg/pr97505.c
+index f01d91206..efd011059 100644
+--- a/gcc/testsuite/gcc.dg/pr97505.c
++++ b/gcc/testsuite/gcc.dg/pr97505.c
+@@ -1,5 +1,5 @@
+ // { dg-do compile }
+-// { dg-options "-Os -fsanitize=signed-integer-overflow -fdump-tree-evrp" }
++// { dg-options "-Os -fsanitize=signed-integer-overflow -fdump-tree-evrp1" }
+ 
+ // Test that .UBSAN_CHECK_SUB(y, x) is treated as y-x for range
+ // purposes, where X and Y are related to each other.
+@@ -20,4 +20,4 @@ int foobar(int x, int y)
+   return 5;
+ }
+ 
+-// { dg-final { scan-tree-dump-not "unreachable" "evrp" } }
++// { dg-final { scan-tree-dump-not "unreachable" "evrp1" } }
+diff --git a/gcc/testsuite/gcc.dg/pr97515.c b/gcc/testsuite/gcc.dg/pr97515.c
+index b4f2481cb..7b860cc7b 100644
+--- a/gcc/testsuite/gcc.dg/pr97515.c
++++ b/gcc/testsuite/gcc.dg/pr97515.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-ccp2" } */
++/* { dg-options "-O2 -fdump-tree-ccp3" } */
+ 
+ int
+ e7 (int gg)
+@@ -22,6 +22,6 @@ e7 (int gg)
+ 
+ /* EVRP should be able to reduce this to a single goto when we can
+  * revisit statements to try folding again based on changed inputs.
+- * Until then, make sure its gone by ccp2.  */
++ * Until then, make sure its gone by ccp3.  */
+  
+-/* { dg-final { scan-tree-dump-times "goto" 1 "ccp2" } } */
++/* { dg-final { scan-tree-dump-times "goto" 1 "ccp3" } } */
+diff --git a/gcc/testsuite/gcc.dg/pr97567-2.c b/gcc/testsuite/gcc.dg/pr97567-2.c
+index c3ead54ea..69447736b 100644
+--- a/gcc/testsuite/gcc.dg/pr97567-2.c
++++ b/gcc/testsuite/gcc.dg/pr97567-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile} */
+-/* { dg-options "-O2 -fdump-tree-evrp -fdisable-tree-ethread" } */
++/* { dg-options "-O2 -fdump-tree-evrp1 -fdisable-tree-ethread" } */
+ 
+ char a[2];
+ 
+@@ -21,4 +21,4 @@ void gg (void)
+      foo ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "foo" "evrp" } }  */
++/* { dg-final { scan-tree-dump-not "foo" "evrp1" } }  */
+diff --git a/gcc/testsuite/gcc.dg/predict-1.c b/gcc/testsuite/gcc.dg/predict-1.c
+index d2e753e62..96e7474fd 100644
+--- a/gcc/testsuite/gcc.dg/predict-1.c
++++ b/gcc/testsuite/gcc.dg/predict-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-profile_estimate --disable-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-profile_estimate --disable-tree-evrp1" } */
+ 
+ extern int global;
+ 
+diff --git a/gcc/testsuite/gcc.dg/predict-9.c b/gcc/testsuite/gcc.dg/predict-9.c
+index cb68a218a..01a8361ca 100644
+--- a/gcc/testsuite/gcc.dg/predict-9.c
++++ b/gcc/testsuite/gcc.dg/predict-9.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fdump-tree-profile_estimate -fno-finite-loops -fdisable-tree-ethread" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fdump-tree-profile_estimate -fno-finite-loops -fdisable-tree-ethread" } */
+ 
+ /* Note: Threader causes removal of for loop.  */
+ 
+diff --git a/gcc/testsuite/gcc.dg/torture/pr97812.c b/gcc/testsuite/gcc.dg/torture/pr97812.c
+index 4d468adf8..7df29a69e 100644
+--- a/gcc/testsuite/gcc.dg/torture/pr97812.c
++++ b/gcc/testsuite/gcc.dg/torture/pr97812.c
+@@ -1,5 +1,5 @@
+ /* { dg-do run } */
+-/* { dg-additional-options "-fdisable-tree-evrp" } */
++/* { dg-additional-options "-fdisable-tree-evrp1" } */
+ 
+ unsigned char c;
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/20030808-1.c b/gcc/testsuite/gcc.dg/tree-ssa/20030808-1.c
+index 456f6f271..40b8e4cac 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/20030808-1.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/20030808-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O1 -fdump-tree-cddce3" } */
++/* { dg-options "-O1 -fdump-tree-cddce4" } */
+       
+ extern void abort (void);
+ 
+@@ -33,8 +33,8 @@ delete_dead_jumptables ()
+ /* There should be no loads of ->code.  If any exist, then we failed to
+    optimize away all the IF statements and the statements feeding
+    their conditions.  */
+-/* { dg-final { scan-tree-dump-times "->code" 0 "cddce3"} } */
++/* { dg-final { scan-tree-dump-times "->code" 0 "cddce4"} } */
+    
+ /* There should be no IF statements.  */
+-/* { dg-final { scan-tree-dump-times "if " 0 "cddce3"} } */
++/* { dg-final { scan-tree-dump-times "if " 0 "cddce4"} } */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/20040211-1.c b/gcc/testsuite/gcc.dg/tree-ssa/20040211-1.c
+index a9bdf2693..625426d20 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/20040211-1.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/20040211-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-cddce2 -fno-finite-loops" } */
++/* { dg-options "-O2 -fdump-tree-cddce3 -fno-finite-loops" } */
+ 
+ struct rtx_def;
+ typedef struct rtx_def *rtx;
+@@ -35,4 +35,4 @@ com (rtx insn, int blah)
+ 
+ /* Cddce cannot remove possibly infinite loops and there is no way how to
+    determine whether the loop in can_move_up ends.  */
+-/* { dg-final { scan-tree-dump "if " "cddce2"} } */
++/* { dg-final { scan-tree-dump "if " "cddce3"} } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/alias-17.c b/gcc/testsuite/gcc.dg/tree-ssa/alias-17.c
+index 62ef77622..0c05519df 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/alias-17.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/alias-17.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O -fno-early-inlining -fdump-tree-ccp2" } */
++/* { dg-options "-O -fno-early-inlining -fdump-tree-ccp3" } */
+ 
+ int *p;
+ int inline bar(void) { return 0; }
+@@ -14,4 +14,4 @@ int foo(int x)
+   return *q + *p;
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "NOTE: no flow-sensitive alias info for" "ccp2" } } */
++/* { dg-final { scan-tree-dump-not "NOTE: no flow-sensitive alias info for" "ccp3" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/andnot-2.c b/gcc/testsuite/gcc.dg/tree-ssa/andnot-2.c
+index e0955ce3f..6e1563d19 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/andnot-2.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/andnot-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-forwprop3-raw -w -Wno-psabi" } */
++/* { dg-options "-O2 -fdump-tree-forwprop5-raw -w -Wno-psabi" } */
+ 
+ typedef long vec __attribute__((vector_size(16)));
+ vec f(vec x){
+@@ -7,4 +7,4 @@ vec f(vec x){
+   return y & (y == 0);
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "_expr" "forwprop3" } } */
++/* { dg-final { scan-tree-dump-not "_expr" "forwprop5" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/builtin-sprintf-warn-22.c b/gcc/testsuite/gcc.dg/tree-ssa/builtin-sprintf-warn-22.c
+index 82eb5851c..e7fdd7390 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/builtin-sprintf-warn-22.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/builtin-sprintf-warn-22.c
+@@ -22,7 +22,7 @@ void g (char *s1, char *s2)
+      [1] n_6: size_t [0, 1023]
+      [2] d_8: size_t [0, 1023]
+ 
+-     Whereas evrp can't really:
++     Whereas evrp1 can't really:
+      [1] n_6: size_t [0, 9223372036854775805]
+      [2] d_8: size_t [0, 9223372036854775805]
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/cunroll-9.c b/gcc/testsuite/gcc.dg/tree-ssa/cunroll-9.c
+index 886dc147a..787e41a8a 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/cunroll-9.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/cunroll-9.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-cunrolli-details -fdisable-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-cunrolli-details -fdisable-tree-evrp1" } */
+ void abort (void);
+ int q (void);
+ int a[10];
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp-ignore.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp-ignore.c
+index 9bfaed6a5..b4e687dfe 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp-ignore.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp-ignore.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp -fno-tree-fre -fdisable-tree-ethread" } */
++/* { dg-options "-O2 -fdump-tree-evrp1 -fno-tree-fre -fdisable-tree-ethread" } */
+ 
+ void kill(void);
+ 
+@@ -25,4 +25,4 @@ void foo (int x, int y, int z)
+     kill();
+ 
+ }
+-/* { dg-final { scan-tree-dump-not "kill" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "kill" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp-trans.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp-trans.c
+index 8ee8e3c3f..f07eee75d 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp-trans.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp-trans.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ /* Simple tests to make sure transitives are working. */
+ void keep();
+@@ -140,5 +140,5 @@ f9 (int x, int y, int z)
+       }
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "kill" "evrp" } }  */
+-/* { dg-final { scan-tree-dump-times "keep" 13 "evrp"} } */
++/* { dg-final { scan-tree-dump-not "kill" "evrp1" } }  */
++/* { dg-final { scan-tree-dump-times "keep" 13 "evrp1"} } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp-trans2.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp-trans2.c
+index d6fe42714..7a39f126d 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp-trans2.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp-trans2.c
+@@ -1,8 +1,8 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ #define int unsigned
+ #include "evrp-trans.c"
+ 
+-/* { dg-final { scan-tree-dump-not "kill" "evrp" } }  */
+-/* { dg-final { scan-tree-dump-times "keep" 13 "evrp"} } */
++/* { dg-final { scan-tree-dump-not "kill" "evrp1" } }  */
++/* { dg-final { scan-tree-dump-times "keep" 13 "evrp1"} } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp1.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp1.c
+index f5f38c4ce..d2561f6c6 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp1.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-details" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details" } */
+ 
+ int foo (int i);
+ int bar (int j)
+@@ -10,4 +10,4 @@ int bar (int j)
+     return j;
+ }
+ 
+-/* { dg-final { scan-tree-dump "\\\[5, \\+INF" "evrp" } } */
++/* { dg-final { scan-tree-dump "\\\[5, \\+INF" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp11.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp11.c
+index d791305d4..457e15552 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp11.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp11.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp -fdelete-null-pointer-checks" } */
++/* { dg-options "-O2 -fdump-tree-evrp1 -fdelete-null-pointer-checks" } */
+ 
+ extern void link_error ();
+ 
+@@ -20,4 +20,4 @@ void bar (char *x, int a)
+     }
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "link_error" "evrp" } }  */
++/* { dg-final { scan-tree-dump-not "link_error" "evrp1" } }  */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp12.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp12.c
+index b3906c234..e26fbf73d 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp12.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp12.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ extern void link_error ();
+ 
+@@ -18,4 +18,4 @@ f3 (unsigned int s)
+     }
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "link_error" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "link_error" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp13.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp13.c
+index cfa4e8256..4f3ec19c3 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp13.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp13.c
+@@ -1,7 +1,7 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ #define ADD_NW(A,B) (__extension__({ __typeof(A+B) R; if(__builtin_add_overflow(A,B,&R)) __builtin_unreachable(); R ;}))
+ _Bool a_b2(unsigned A,  unsigned B) { return ADD_NW(A,B) >= B; }
+ 
+-/* { dg-final { scan-tree-dump "return 1;" "evrp" } } */
++/* { dg-final { scan-tree-dump "return 1;" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp2.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp2.c
+index fc92cdfbc..099ca4a0c 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp2.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-details" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details" } */
+ 
+ int foo (int i);
+ int bar2 (int j)
+@@ -15,4 +15,4 @@ int bar2 (int j)
+ }
+ 
+ 
+-/* { dg-final { scan-tree-dump "\\\[4, 7\\\]" "evrp" } } */
++/* { dg-final { scan-tree-dump "\\\[4, 7\\\]" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp20.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp20.c
+index 7d4d55f76..5f847daef 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp20.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp20.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ void call (void);
+ 
+@@ -16,4 +16,4 @@ void foo (int base)
+       }
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "call" "evrp"} } */
++/* { dg-final { scan-tree-dump-not "call" "evrp1"} } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp21.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp21.c
+index dae788cc2..ea03aa6ed 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp21.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp21.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ extern void vrp_keep (void);
+ extern void vrp_kill (void);
+@@ -24,5 +24,5 @@ f2 (int s, int b)
+     vrp_kill ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "vrp_keep \\(" 1 "evrp"} } */
+-/* { dg-final { scan-tree-dump-times "vrp_kill \\(" 0 "evrp"} } */
++/* { dg-final { scan-tree-dump-times "vrp_keep \\(" 1 "evrp1"} } */
++/* { dg-final { scan-tree-dump-times "vrp_kill \\(" 0 "evrp1"} } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp22.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp22.c
+index 3dd47e55d..777087810 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp22.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp22.c
+@@ -1,6 +1,6 @@
+ /* See backwards thru casts if the range fits the LHS type. */
+ /* { dg-do compile } */
+-/* { dg-options "-O2  -fdump-tree-evrp" } */
++/* { dg-options "-O2  -fdump-tree-evrp1" } */
+ 
+ extern void kill(int i);
+ extern void keep(int i);
+@@ -38,6 +38,6 @@ foo (int i)
+     }
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "kill \\(" 0 "evrp"} } */
+-/* { dg-final { scan-tree-dump-times "keep \\(" 1 "evrp"} } */
++/* { dg-final { scan-tree-dump-times "kill \\(" 0 "evrp1"} } */
++/* { dg-final { scan-tree-dump-times "keep \\(" 1 "evrp1"} } */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp3.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp3.c
+index 805652b95..e4ef79b38 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp3.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp3.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-details" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details" } */
+ 
+ int foo (int i);
+ void bar (int j)
+@@ -11,5 +11,5 @@ void bar (int j)
+     }
+ }
+ 
+-/* { dg-final { scan-tree-dump "\\\[1, 10\\\]" "evrp" } } */
++/* { dg-final { scan-tree-dump "\\\[1, 10\\\]" "evrp1" } } */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp30.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp30.c
+index 2c5ff41ec..a80ca9bfa 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp30.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp30.c
+@@ -1,6 +1,6 @@
+ /* Confirm the ranger is picking up a relationship with equivalences.  */
+ /* { dg-do compile } */
+-/* { dg-options "-O2  -fdump-tree-evrp" } */
++/* { dg-options "-O2  -fdump-tree-evrp1" } */
+ 
+ extern void foo ();
+ 
+@@ -12,5 +12,5 @@ void f (unsigned int a, unsigned int b)
+ 	foo (); /* Unreachable */
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "foo\\(" 0 "evrp"} } */
++/* { dg-final { scan-tree-dump-times "foo\\(" 0 "evrp1"} } */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp4.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp4.c
+index e3f4531b1..500a6828a 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp4.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp4.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-details" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details" } */
+ 
+ int foo (int *p);
+ 
+@@ -17,4 +17,4 @@ int bar (struct st *s)
+   foo (&s->a);
+ }
+ 
+-/* { dg-final { scan-tree-dump "\\\[1B, \\+INF\\\]" "evrp" } } */
++/* { dg-final { scan-tree-dump "\\\[1B, \\+INF\\\]" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp6.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp6.c
+index aaeec6886..27d569090 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp6.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp6.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-details" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details" } */
+ 
+ extern void abort (void);
+ 
+@@ -18,4 +18,4 @@ foo (int k, int j)
+ 
+   return j;
+ }
+-/* { dg-final { scan-tree-dump "\\\[12, \\+INF" "evrp" } } */
++/* { dg-final { scan-tree-dump "\\\[12, \\+INF" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp7.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp7.c
+index 16fbe65e4..3c193d0cc 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp7.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp7.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-details" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details" } */
+ 
+ int test1(int i, int k)
+ {
+@@ -11,4 +11,4 @@ int test1(int i, int k)
+   return 1;
+ }
+ 
+-/* { dg-final { scan-tree-dump "Removing dead stmt \[^\r\n\]* = j_.* == 10" "evrp" } } */
++/* { dg-final { scan-tree-dump "Removing dead stmt \[^\r\n\]* = j_.* == 10" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp8.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp8.c
+index b7e5c7aa2..a0690e1e2 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp8.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp8.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-details" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details" } */
+ 
+ int foo(int i)
+ {
+@@ -8,4 +8,4 @@ int foo(int i)
+   return 1;
+ }
+ 
+-/* { dg-final { scan-tree-dump "Removing dead stmt \[^\r\n\]* = i_.* == 1" "evrp" } } */
++/* { dg-final { scan-tree-dump "Removing dead stmt \[^\r\n\]* = i_.* == 1" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/evrp9.c b/gcc/testsuite/gcc.dg/tree-ssa/evrp9.c
+index fb7c319fc..853fd9b72 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/evrp9.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/evrp9.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/49039 */
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ extern void bar (void);
+ 
+@@ -24,7 +24,7 @@ foo (unsigned int x, unsigned int y)
+     bar ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "Folding predicate minv_.* == 5 to 0" 1 "evrp" } } */
+-/* { dg-final { scan-tree-dump-times "Folding predicate minv_.* == 6 to 0" 1 "evrp" } } */
+-/* { dg-final { scan-tree-dump-times "Folding predicate maxv_.* == 5 to 0" 1 "evrp" } } */
+-/* { dg-final { scan-tree-dump-times "Folding predicate maxv_.* == 6 to 0" 1 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "Folding predicate minv_.* == 5 to 0" 1 "evrp1" } } */
++/* { dg-final { scan-tree-dump-times "Folding predicate minv_.* == 6 to 0" 1 "evrp1" } } */
++/* { dg-final { scan-tree-dump-times "Folding predicate maxv_.* == 5 to 0" 1 "evrp1" } } */
++/* { dg-final { scan-tree-dump-times "Folding predicate maxv_.* == 6 to 0" 1 "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/foldconst-4.c b/gcc/testsuite/gcc.dg/tree-ssa/foldconst-4.c
+index 0e9b676f6..f58bf667f 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/foldconst-4.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/foldconst-4.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O -fdump-tree-ccp2" } */
++/* { dg-options "-O -fdump-tree-ccp3" } */
+ 
+ struct a {int a,b;};
+ const static struct a a;
+@@ -10,4 +10,4 @@ test()
+ {
+   return a.a+b[c];
+ }
+-/* { dg-final { scan-tree-dump "return 0;" "ccp2" } } */
++/* { dg-final { scan-tree-dump "return 0;" "ccp3" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/forwprop-33.c b/gcc/testsuite/gcc.dg/tree-ssa/forwprop-33.c
+index c7124deee..ab8b5ba3b 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/forwprop-33.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/forwprop-33.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-forwprop3" } */
++/* { dg-options "-O2 -fdump-tree-forwprop5" } */
+ 
+ unsigned short
+ test1 (unsigned short a)
+@@ -9,7 +9,7 @@ test1 (unsigned short a)
+   a |= 0x8000; /* Simplify to ((a >> 1) ^ 0xa001).  */
+   return a;
+ }
+-/* { dg-final { scan-tree-dump "\\^ 40961" "forwprop3" } } */
++/* { dg-final { scan-tree-dump "\\^ 40961" "forwprop5" } } */
+ 
+ unsigned short
+ test2 (unsigned short a)
+@@ -19,7 +19,7 @@ test2 (unsigned short a)
+   a ^= 0x0001; /* Simplify to ((a << 1) | 0x8005).  */
+   return a;
+ }
+-/* { dg-final { scan-tree-dump "\\| 32773" "forwprop3" } } */
++/* { dg-final { scan-tree-dump "\\| 32773" "forwprop5" } } */
+ 
+ unsigned short
+ test3 (unsigned short a)
+@@ -29,7 +29,7 @@ test3 (unsigned short a)
+   a |= 0xc031; /* Simplify to ((a & 0xd123) | 0xe071).  */
+   return a;
+ }
+-/* { dg-final { scan-tree-dump "\\| 57457" "forwprop3" } } */
++/* { dg-final { scan-tree-dump "\\| 57457" "forwprop5" } } */
+ 
+ unsigned short
+ test4 (unsigned short a)
+@@ -39,7 +39,7 @@ test4 (unsigned short a)
+   a |= 0x8000;
+   return a;
+ }
+-/* { dg-final { scan-tree-dump "\\^ 49153" "forwprop3" } } */
++/* { dg-final { scan-tree-dump "\\^ 49153" "forwprop5" } } */
+ 
+ unsigned short
+ test5 (unsigned short a)
+@@ -49,8 +49,8 @@ test5 (unsigned short a)
+   a |= 0x8001; /* Only move shift inward: (((a >> 1) ^ 0x4001) | 0x8001).  */
+   return a;
+ }
+-/* { dg-final { scan-tree-dump "\\^ 16385" "forwprop3" } } */
+-/* { dg-final { scan-tree-dump "\\| 32769" "forwprop3" } } */
++/* { dg-final { scan-tree-dump "\\^ 16385" "forwprop5" } } */
++/* { dg-final { scan-tree-dump "\\| 32769" "forwprop5" } } */
+ 
+ short
+ test6 (short a)
+@@ -59,7 +59,7 @@ test6 (short a)
+   a >>= 2;
+   return a;
+ }
+-/* { dg-final { scan-tree-dump "\\& 8191" "forwprop3" } } */
++/* { dg-final { scan-tree-dump "\\& 8191" "forwprop5" } } */
+ 
+ short
+ test7 (short a)
+@@ -68,4 +68,4 @@ test7 (short a)
+   a >>= 2;
+   return a;
+ }
+-/* { dg-final { scan-tree-dump "\\& -7169" "forwprop3" } } */
++/* { dg-final { scan-tree-dump "\\& -7169" "forwprop5" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c b/gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c
+index f5cd23ab2..37dcb2a5f 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/isolate-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */ 
+-/* { dg-options "-O2 -fdelete-null-pointer-checks -fisolate-erroneous-paths-attribute -fdump-tree-isolate-paths -fdump-tree-forwprop3" } */
++/* { dg-options "-O2 -fdelete-null-pointer-checks -fisolate-erroneous-paths-attribute -fdump-tree-isolate-paths -fdump-tree-forwprop5" } */
+ /* { dg-skip-if "" keeps_null_pointer_checks } */
+ 
+ 
+@@ -37,6 +37,6 @@ bar (void)
+    We also verify that after isolation cprop simplifies the
+    return statement so that it returns &z directly. */
+ /* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "isolate-paths"} } */
+-/* { dg-final { scan-tree-dump-times "return &z;" 1 "forwprop3"} } */
++/* { dg-final { scan-tree-dump-times "return &z;" 1 "forwprop5"} } */
+ 
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c b/gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c
+index f357e16d3..978d2936f 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/isolate-4.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */ 
+-/* { dg-options "-O2 -fdelete-null-pointer-checks -fisolate-erroneous-paths-attribute -fdump-tree-isolate-paths -fdump-tree-ccp3" } */
++/* { dg-options "-O2 -fdelete-null-pointer-checks -fisolate-erroneous-paths-attribute -fdump-tree-isolate-paths -fdump-tree-ccp4" } */
+ /* { dg-skip-if "" keeps_null_pointer_checks } */
+ 
+ 
+@@ -26,6 +26,6 @@ bar (void)
+    We also verify that after isolation phi-cprop simplifies the
+    return statement so that it returns &z directly. */
+ /* { dg-final { scan-tree-dump-times "__builtin_trap" 2 "isolate-paths"} } */
+-/* { dg-final { scan-tree-dump-times "foo .&z.;" 1 "ccp3"} } */
++/* { dg-final { scan-tree-dump-times "foo .&z.;" 1 "ccp4"} } */
+ 
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr14814.c b/gcc/testsuite/gcc.dg/tree-ssa/pr14814.c
+index 60d9649a1..5df4e49c2 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr14814.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr14814.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-sra -fdump-tree-forwprop2" } */
++/* { dg-options "-O2 -fno-tree-sra -fdump-tree-forwprop4" } */
+ 
+ struct YY {
+   double e[3];  };
+@@ -18,4 +18,4 @@ int foo(const struct XX* r) {
+   return 1;
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "= &" 0 "forwprop2" } } */
++/* { dg-final { scan-tree-dump-times "= &" 0 "forwprop4" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr20318.c b/gcc/testsuite/gcc.dg/tree-ssa/pr20318.c
+index 80fd72668..2537855f6 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr20318.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr20318.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target { ! keeps_null_pointer_checks } } } */
+-/* { dg-options "-O2 -fdump-tree-original -fdump-tree-vrp1 -fdelete-null-pointer-checks -fdisable-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-original -fdump-tree-vrp1 -fdelete-null-pointer-checks -fdisable-tree-evrp1" } */
+ 
+ extern int* f(int) __attribute__((returns_nonnull));
+ extern void eliminate ();
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr20657.c b/gcc/testsuite/gcc.dg/tree-ssa/pr20657.c
+index e67823150..f7cab6fe2 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr20657.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr20657.c
+@@ -3,7 +3,7 @@
+    statement, which was needed to eliminate the second "if" statement.  */
+ 
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-dominator-opts -fno-tree-fre -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fno-tree-dominator-opts -fno-tree-fre -fdump-tree-evrp1" } */
+ 
+ int
+ foo (int a)
+@@ -14,4 +14,4 @@ foo (int a)
+   return 0;
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "if" 1 "evrp"} } */
++/* { dg-final { scan-tree-dump-times "if" 1 "evrp1"} } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr20702.c b/gcc/testsuite/gcc.dg/tree-ssa/pr20702.c
+index 81129674d..9374bcdc3 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr20702.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr20702.c
+@@ -4,7 +4,7 @@
+    immediate successors of the basic block.  */
+ 
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-thread-jumps -fdisable-tree-evrp -fdump-tree-vrp1-details -fdelete-null-pointer-checks" } */
++/* { dg-options "-O2 -fno-thread-jumps -fdisable-tree-evrp1 -fdump-tree-vrp1-details -fdelete-null-pointer-checks" } */
+ 
+ extern void bar (int);
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr21001.c b/gcc/testsuite/gcc.dg/tree-ssa/pr21001.c
+index f9216a44a..d2c8ecf64 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr21001.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr21001.c
+@@ -5,7 +5,7 @@
+    range information out of the conditional.  */
+ 
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-fre -fdisable-tree-evrp -fno-thread-jumps -fdump-tree-vrp1-details" } */
++/* { dg-options "-O2 -fno-tree-fre -fdisable-tree-evrp1 -fno-thread-jumps -fdump-tree-vrp1-details" } */
+ /* { dg-additional-options "-fdisable-tree-ethread -fdisable-tree-thread1" } */
+ 
+ int
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr21086.c b/gcc/testsuite/gcc.dg/tree-ssa/pr21086.c
+index 9b93d39d4..19f15d084 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr21086.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr21086.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-thread-jumps -fdisable-tree-evrp -fdump-tree-vrp1 -fdump-tree-dce2 -fdelete-null-pointer-checks" } */
++/* { dg-options "-O2 -fno-thread-jumps -fdisable-tree-evrp1 -fdump-tree-vrp1 -fdump-tree-dce2 -fdelete-null-pointer-checks" } */
+ 
+ int
+ foo (int *p)
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr21090.c b/gcc/testsuite/gcc.dg/tree-ssa/pr21090.c
+index 92a876886..eaef51dc9 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr21090.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr21090.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-thread-jumps -fdisable-tree-evrp -fdump-tree-vrp1 -fdelete-null-pointer-checks" } */
++/* { dg-options "-O2 -fno-thread-jumps -fdisable-tree-evrp1 -fdump-tree-vrp1 -fdelete-null-pointer-checks" } */
+ 
+ int g, h;
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr21294.c b/gcc/testsuite/gcc.dg/tree-ssa/pr21294.c
+index 8c8f4479a..6a5343fd7 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr21294.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr21294.c
+@@ -4,7 +4,7 @@
+    allows us to eliminate the second "if" statement.  */
+ 
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-dominator-opts -fdisable-tree-evrp -fdisable-tree-ethread -fdisable-tree-threadfull1 -fdump-tree-vrp1-details" } */
++/* { dg-options "-O2 -fno-tree-dominator-opts -fdisable-tree-evrp1 -fdisable-tree-ethread -fdisable-tree-threadfull1 -fdump-tree-vrp1-details" } */
+ 
+ struct f {
+   int i;
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr21458-2.c b/gcc/testsuite/gcc.dg/tree-ssa/pr21458-2.c
+index 9610570e2..4bdf1b161 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr21458-2.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr21458-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-details -fdisable-tree-ethread" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details -fdisable-tree-ethread" } */
+ 
+ extern void g (void);
+ extern void bar (int);
+@@ -16,4 +16,4 @@ foo (int a)
+     }
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "Folding predicate.* to 1" 1 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "Folding predicate.* to 1" 1 "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr21458.c b/gcc/testsuite/gcc.dg/tree-ssa/pr21458.c
+index 97d17f21c..c6a0c51ff 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr21458.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr21458.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fdump-tree-vrp1" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fdump-tree-vrp1" } */
+ 
+ extern void g (void);
+ extern void bar (int);
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr21559.c b/gcc/testsuite/gcc.dg/tree-ssa/pr21559.c
+index 83b7c802e..c477fdf75 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr21559.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr21559.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-details" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details" } */
+ 
+ static int blocksize = 4096;
+ 
+@@ -33,4 +33,4 @@ void foo (void)
+ 
+ 
+ /* First, we should simplify the bits < 0 test within the loop.  */
+-/* { dg-final { scan-tree-dump-times "Simplified relational" 1 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "Simplified relational" 1 "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr21563.c b/gcc/testsuite/gcc.dg/tree-ssa/pr21563.c
+index 504b3cce6..2863d80e5 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr21563.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr21563.c
+@@ -2,7 +2,7 @@
+    Make sure VRP folds the second "if" statement.  */
+ 
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-thread-jumps -fdisable-tree-evrp -fdump-tree-vrp1-details" } */
++/* { dg-options "-O2 -fno-thread-jumps -fdisable-tree-evrp1 -fdump-tree-vrp1-details" } */
+ 
+ int
+ foo (int a)
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr23744.c b/gcc/testsuite/gcc.dg/tree-ssa/pr23744.c
+index f70f2546b..0a339f2f7 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr23744.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr23744.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-ccp -fdisable-tree-evrp -fdump-tree-vrp1" } */
++/* { dg-options "-O2 -fno-tree-ccp -fdisable-tree-evrp1 -fdump-tree-vrp1" } */
+ 
+ void h (void);
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr25382.c b/gcc/testsuite/gcc.dg/tree-ssa/pr25382.c
+index 8634c0a78..a302ec63a 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr25382.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr25382.c
+@@ -3,7 +3,7 @@
+    Check that VRP now gets ranges from BIT_AND_EXPRs.  */
+ 
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-thread-jumps -fno-tree-ccp -fdisable-tree-evrp -fdump-tree-vrp1" } */
++/* { dg-options "-O2 -fno-thread-jumps -fno-tree-ccp -fdisable-tree-evrp1 -fdump-tree-vrp1" } */
+ 
+ int
+ foo (int a)
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr45397.c b/gcc/testsuite/gcc.dg/tree-ssa/pr45397.c
+index 8eacb5187..79cfc9d00 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr45397.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr45397.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-phiopt -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-phiopt -fdump-tree-evrp1" } */
+ 
+ int foo_add (const unsigned char *tmp, int i, int val)
+ {
+@@ -20,7 +20,7 @@ int foo_mul (const unsigned char *tmp, int i, int val)
+    have no control flow.  */
+ /* EVRP leaves copies in the IL which confuses phiopt1 so we have
+    to rely on phiopt2 instead.  */
+-/* { dg-final { scan-tree-dump-not " & 255;" "evrp" } } */
++/* { dg-final { scan-tree-dump-not " & 255;" "evrp1" } } */
+ /* { dg-final { scan-tree-dump-times "MAX_EXPR" 3 "phiopt1" { xfail *-*-* } } } */
+ /* { dg-final { scan-tree-dump-times "MIN_EXPR" 3 "phiopt1" { xfail *-*-* } } } */
+ /* { dg-final { scan-tree-dump-not "if " "phiopt1" { xfail *-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr49039.c b/gcc/testsuite/gcc.dg/tree-ssa/pr49039.c
+index 102b07346..797a7cd44 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr49039.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr49039.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/49039 */
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fdump-tree-vrp1 -fno-thread-jumps" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fdump-tree-vrp1 -fno-thread-jumps" } */
+ 
+ extern void bar (void);
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr58480.c b/gcc/testsuite/gcc.dg/tree-ssa/pr58480.c
+index f11623b7c..2767e7ed5 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr58480.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr58480.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile { target { ! keeps_null_pointer_checks } } } */
+-/* { dg-options "-O2 -fno-thread-jumps -fdisable-tree-evrp -fdump-tree-vrp1 -fdelete-null-pointer-checks" } */
++/* { dg-options "-O2 -fno-thread-jumps -fdisable-tree-evrp1 -fdump-tree-vrp1 -fdelete-null-pointer-checks" } */
+ 
+ extern void eliminate (void);
+ extern void* f1 (void *a, void *b) __attribute__((nonnull));
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr61839_1.c b/gcc/testsuite/gcc.dg/tree-ssa/pr61839_1.c
+index f5af7a1d6..61c2c4591 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr61839_1.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr61839_1.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/61839.  */
+ /* { dg-do run } */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fdisable-tree-ethread -fdisable-tree-threadfull1 -fdump-tree-vrp1 -fdump-tree-optimized" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fdisable-tree-ethread -fdisable-tree-threadfull1 -fdump-tree-vrp1 -fdump-tree-optimized" } */
+ /* { dg-require-effective-target int32plus } */
+ 
+ __attribute__ ((noinline))
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr61839_2.c b/gcc/testsuite/gcc.dg/tree-ssa/pr61839_2.c
+index 0e0f4c021..ba1e9ba5e 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr61839_2.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr61839_2.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/61839.  */
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ /* { dg-require-effective-target int32plus } */
+ 
+ __attribute__ ((noinline))
+@@ -72,13 +72,13 @@ int mod (int a, int b)
+ 
+ /* EVRP now makes transformations in all functions, leaving a single
+  * builtin_abort call in bar2. */
+-/* { dg-final { scan-tree-dump-times "__builtin_abort" 1 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "__builtin_abort" 1 "evrp1" } } */
+ 
+ /* Make sure to optimize 972195717 / 0 in function foo.  */
+-/* { dg-final { scan-tree-dump-times "972195717 / " 0  "evrp" } } */
++/* { dg-final { scan-tree-dump-times "972195717 / " 0  "evrp1" } } */
+ /* Make sure  to optimize 972195717 % 0 in function bar.  */
+-/* { dg-final { scan-tree-dump-times "972195717 % " 0 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "972195717 % " 0 "evrp1" } } */
+ /* Make sure to optimize 972195717 % [1,2] function bar2.  */
+-/* { dg-final { scan-tree-dump-times "972195715 % " 0 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "972195715 % " 0 "evrp1" } } */
+ /* [12,12][24,24][48,48] % [0,0][3,3][6,6][12,12] == [0,0] */
+-/* { dg-final { scan-tree-dump-times "%" 0 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "%" 0 "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr61839_4.c b/gcc/testsuite/gcc.dg/tree-ssa/pr61839_4.c
+index a346912d1..bc0290dc1 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr61839_4.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr61839_4.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/61839.  */
+ /* { dg-do run } */
+-/* { dg-options "-O2 -fdump-tree-vrp1 -fdisable-tree-evrp -fdump-tree-optimized" } */
++/* { dg-options "-O2 -fdump-tree-vrp1 -fdisable-tree-evrp1 -fdump-tree-optimized" } */
+ /* { dg-require-effective-target int32plus } */
+ 
+ __attribute__ ((noinline))
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr64130.c b/gcc/testsuite/gcc.dg/tree-ssa/pr64130.c
+index b694ec171..17d71dcd9 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr64130.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr64130.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-details" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-details" } */
+ 
+ __extension__ typedef __UINT32_TYPE__ uint32_t;
+ 
+@@ -14,6 +14,6 @@ int funsigned2 (uint32_t a)
+   return (-1 * 0x1ffffffffL) / a == 0;
+ }
+ 
+-/* { dg-final { scan-tree-dump "int \\\[2, 8589934591\\\]" "evrp" } } */
+-/* { dg-final { scan-tree-dump "int \\\[-8589934591, -2\\\]" "evrp" } } */
++/* { dg-final { scan-tree-dump "int \\\[2, 8589934591\\\]" "evrp1" } } */
++/* { dg-final { scan-tree-dump "int \\\[-8589934591, -2\\\]" "evrp1" } } */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr68431.c b/gcc/testsuite/gcc.dg/tree-ssa/pr68431.c
+index 9c42563b6..1de9aa540 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr68431.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr68431.c
+@@ -1,5 +1,5 @@
+ /* PR tree-optimization/68431 */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fdump-tree-vrp1-details" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fdump-tree-vrp1-details" } */
+ 
+ unsigned int x = 1;
+ int
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr77445-2.c b/gcc/testsuite/gcc.dg/tree-ssa/pr77445-2.c
+index b3db1bca6..de49f7470 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr77445-2.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr77445-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fdump-tree-thread-details-blocks-stats -fdump-tree-threadfull1-blocks-stats -fdump-tree-threadfull2-blocks-stats" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fdump-tree-thread-details-blocks-stats -fdump-tree-threadfull1-blocks-stats -fdump-tree-threadfull2-blocks-stats" } */
+ typedef enum STATES {
+ 	START=0,
+ 	INVALID,
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr78153-1.c b/gcc/testsuite/gcc.dg/tree-ssa/pr78153-1.c
+index 2530ba08e..174adca33 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr78153-1.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr78153-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-slim" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-slim" } */
+ 
+ void f(const char *s)
+ {
+@@ -7,4 +7,4 @@ void f(const char *s)
+     __builtin_abort ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "__builtin_abort" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "__builtin_abort" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr78153-2.c b/gcc/testsuite/gcc.dg/tree-ssa/pr78153-2.c
+index de70450f1..11ace86d9 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr78153-2.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr78153-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-slim" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-slim" } */
+ 
+ void f(const char *s)
+ {
+@@ -8,4 +8,4 @@ void f(const char *s)
+     __builtin_abort ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "__builtin_abort" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "__builtin_abort" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr78154.c b/gcc/testsuite/gcc.dg/tree-ssa/pr78154.c
+index 3ba8f64e2..6dff0977a 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr78154.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr78154.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp-slim -fdelete-null-pointer-checks" } */
++/* { dg-options "-O2 -fdump-tree-evrp1-slim -fdelete-null-pointer-checks" } */
+ /* { dg-skip-if "" { keeps_null_pointer_checks } } */
+ 
+ void f(void *d, const void *s, __SIZE_TYPE__ n)
+@@ -41,4 +41,4 @@ void f(void *d, const void *s, __SIZE_TYPE__ n)
+     __builtin_abort ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "__builtin_abort" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "__builtin_abort" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr78655.c b/gcc/testsuite/gcc.dg/tree-ssa/pr78655.c
+index e9158e072..e406f0855 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr78655.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr78655.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-ccp -fno-tree-forwprop -fno-tree-fre -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fno-tree-ccp -fno-tree-forwprop -fno-tree-fre -fdump-tree-evrp1" } */
+ 
+ struct A{int a,b;};
+ inline int*f1(struct A*p){return&p->a;}   /* offset of 0.  */
+@@ -33,5 +33,5 @@ main()
+   
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "bad" "evrp"} } */
++/* { dg-final { scan-tree-dump-not "bad" "evrp1"} } */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr91029-1.c b/gcc/testsuite/gcc.dg/tree-ssa/pr91029-1.c
+index d52734b20..3e079712b 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr91029-1.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr91029-1.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/91029 */
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ void kill (void);
+ int xx;
+@@ -65,4 +65,4 @@ void f6 (int i, int j)
+     }
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "kill" "evrp" } }  */
++/* { dg-final { scan-tree-dump-not "kill" "evrp1" } }  */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr91029-2.c b/gcc/testsuite/gcc.dg/tree-ssa/pr91029-2.c
+index ad9213a41..8b10645f3 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr91029-2.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr91029-2.c
+@@ -1,6 +1,6 @@
+ /* PR tree-optimization/91029 */
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ void kill (void);
+ int xx;
+@@ -95,4 +95,4 @@ void f9 (unsigned int i, unsigned int j)
+     }
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "kill" "evrp" } }  */
++/* { dg-final { scan-tree-dump-not "kill" "evrp1" } }  */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr93781-1.c b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-1.c
+index b2505f395..9f0bb21a7 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr93781-1.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ void kill (void);
+ 
+@@ -16,5 +16,5 @@ void foo (unsigned int arg)
+     kill ();;
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "kill" "evrp" } }  */
++/* { dg-final { scan-tree-dump-not "kill" "evrp1" } }  */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr93781-2.c b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-2.c
+index c9b28783c..fc4f29248 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr93781-2.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ void kill (void);
+ 
+@@ -14,4 +14,4 @@ void foo (unsigned int arg)
+     kill ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "kill" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "kill" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr93781-3.c b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-3.c
+index e1d2be0ea..0f9298bd6 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr93781-3.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr93781-3.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ void kill (void);
+ 
+@@ -18,4 +18,4 @@ void foo (unsigned int arg)
+    kill ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "kill" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "kill" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr95906.c b/gcc/testsuite/gcc.dg/tree-ssa/pr95906.c
+index 3d820a58e..c9543d04e 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr95906.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr95906.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-forwprop3-raw -w -Wno-psabi" } */
++/* { dg-options "-O2 -fdump-tree-forwprop5-raw -w -Wno-psabi" } */
+ 
+ // FIXME: this should further optimize to a MAX_EXPR
+ typedef signed char v16i8 __attribute__((vector_size(16)));
+@@ -9,5 +9,5 @@ v16i8 f(v16i8 a, v16i8 b)
+     return (cmp & a) | (~cmp & b);
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "bit_(and|ior)_expr" "forwprop3" } } */
+-/* { dg-final { scan-tree-dump-times "vec_cond_expr" 1 "forwprop3" } } */
++/* { dg-final { scan-tree-dump-not "bit_(and|ior)_expr" "forwprop5" } } */
++/* { dg-final { scan-tree-dump-times "vec_cond_expr" 1 "forwprop5" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/pr98513.c b/gcc/testsuite/gcc.dg/tree-ssa/pr98513.c
+index c15d6bd70..8a32f2c0e 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/pr98513.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/pr98513.c
+@@ -2,7 +2,7 @@
+ /* { dg-options "-O2 -fgimple" } */
+ 
+ __attribute__((noipa))
+-void __GIMPLE (ssa,startwith("evrp"))
++void __GIMPLE (ssa,startwith("evrp1"))
+ foo (int x, int minus_1)
+ {
+   int tem;
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-29.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-29.c
+index 3c1a848f5..c93a8d0d2 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-29.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-29.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O -fdump-tree-ccp2" } */
++/* { dg-options "-O -fdump-tree-ccp3" } */
+ 
+ static double num;
+ int foo (void)
+@@ -7,4 +7,4 @@ int foo (void)
+   return *(unsigned *)#
+ }
+ 
+-/* { dg-final { scan-tree-dump "return 0;" "ccp2" } } */
++/* { dg-final { scan-tree-dump "return 0;" "ccp3" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-3.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-3.c
+index 1c8c318ce..48427de64 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-3.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-ccp-3.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O -fdump-tree-ccp2" } */
++/* { dg-options "-O -fdump-tree-ccp3" } */
+ 
+ extern void link_error (void);
+ 
+@@ -133,4 +133,4 @@ int* test666 (int * __restrict__ rp1, int * __restrict__ rp2, int *p1)
+    optimization has failed */
+ /* ??? While we indeed don't handle some of these, a couple of the
+    restrict tests are incorrect.  */
+-/* { dg-final { scan-tree-dump-times "link_error" 0 "ccp2" { xfail *-*-* } } } */
++/* { dg-final { scan-tree-dump-times "link_error" 0 "ccp3" { xfail *-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-dse-30.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-dse-30.c
+index 9f56b392c..1401d23e1 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-dse-30.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-dse-30.c
+@@ -28,8 +28,8 @@ void test_bzero (void)
+ 
+ /* { dg-final { scan-tree-dump-times "builtin_memset" 1 "dse1" } } */
+ 
+-/* Merging the evrp folder into substitute_and_fold_engine shuffled
+-   the order of gimple_fold a bit, so evrp is no longer folding the
++/* Merging the evrp1 folder into substitute_and_fold_engine shuffled
++   the order of gimple_fold a bit, so evrp1 is no longer folding the
+    memmove inline.  This folding is instead done by forwprop.  Thus, I
+    have remmoved the |memmove in the test below as this is not done
+    until after dse.
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-54.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-54.c
+index 02ebf068a..f0a1fab4c 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-54.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-fre-54.c
+@@ -1,6 +1,6 @@
+ /* { dg-do run } */
+ /* { dg-require-effective-target int32plus } */
+-/* { dg-options "-O -fdump-tree-forwprop4 -fdump-tree-dse1" } */
++/* { dg-options "-O -fdump-tree-forwprop6 -fdump-tree-dse1" } */
+ 
+ extern void abort (void);
+ 
+@@ -51,6 +51,6 @@ int main()
+   return 0;
+ }
+ 
+-/* { dg-final { scan-tree-dump "\\(char\\) i_" "forwprop4" } } */
+-/* { dg-final { scan-tree-dump "\\(short int\\) i_" "forwprop4" } } */
++/* { dg-final { scan-tree-dump "\\(char\\) i_" "forwprop6" } } */
++/* { dg-final { scan-tree-dump "\\(short int\\) i_" "forwprop6" } } */
+ /* { dg-final { scan-tree-dump-not "u.i =" "dse1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/ssa-sink-19.c b/gcc/testsuite/gcc.dg/tree-ssa/ssa-sink-19.c
+index e98d13fe8..ad6eb6270 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/ssa-sink-19.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/ssa-sink-19.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-sink1-details -fdump-tree-cddce2-details" } */
++/* { dg-options "-O2 -fdump-tree-sink1-details -fdump-tree-cddce3-details" } */
+ 
+ static int b=4;
+ int c;
+@@ -18,4 +18,4 @@ main()
+    applying store motion to c and b.  */
+ /* { dg-final { scan-tree-dump "Sinking # VUSE" "sink1" } } */
+ /* And remove the loop after final value replacement.  */
+-/* { dg-final { scan-tree-dump "fix_loop_structure: removing loop" "cddce2" } } */
++/* { dg-final { scan-tree-dump "fix_loop_structure: removing loop" "cddce3" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp02.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp02.c
+index 6e9c8df2f..df83d6a45 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp02.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp02.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-vrp1 -fdelete-null-pointer-checks -fdisable-tree-evrp -fno-thread-jumps" } */
++/* { dg-options "-O2 -fdump-tree-vrp1 -fdelete-null-pointer-checks -fdisable-tree-evrp1 -fno-thread-jumps" } */
+ 
+ struct A
+ {
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp03.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp03.c
+index 4cbaca413..8897d85bd 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp03.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp03.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fdump-tree-vrp1 -fno-thread-jumps" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fdump-tree-vrp1 -fno-thread-jumps" } */
+ 
+ struct A
+ {
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp06.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp06.c
+index 898477e42..59c431911 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp06.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp06.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fdump-tree-vrp1 -fno-thread-jumps" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fdump-tree-vrp1 -fno-thread-jumps" } */
+ 
+ int baz (void);
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp07.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp07.c
+index ec5f6cce4..f53ea54cc 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp07.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp07.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-fre -fdisable-tree-evrp -fdump-tree-vrp1-details -fdelete-null-pointer-checks -fno-thread-jumps" } */
++/* { dg-options "-O2 -fno-tree-fre -fdisable-tree-evrp1 -fdump-tree-vrp1-details -fdelete-null-pointer-checks -fno-thread-jumps" } */
+ 
+ int
+ foo (int i, int *p)
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp08.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp08.c
+index c9d902367..a9bc85686 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp08.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp08.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-fre -fdisable-tree-evrp -fdump-tree-vrp1-details -fno-thread-jumps -fdelete-null-pointer-checks" } */
++/* { dg-options "-O2 -fno-tree-fre -fdisable-tree-evrp1 -fdump-tree-vrp1-details -fno-thread-jumps -fdelete-null-pointer-checks" } */
+ 
+ /* Compile with -fno-tree-fre -O2 to prevent CSEing *p.  */
+ int
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp09.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp09.c
+index 354169692..48a79e428 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp09.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp09.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-fre -fdisable-tree-evrp -fdump-tree-vrp1 -std=gnu89 -fno-thread-jumps" } */
++/* { dg-options "-O2 -fno-tree-fre -fdisable-tree-evrp1 -fdump-tree-vrp1 -std=gnu89 -fno-thread-jumps" } */
+ 
+ foo (int *p)
+ {
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp111.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp111.c
+index cae2bc75a..a4e35833a 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp111.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp111.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp -fdelete-null-pointer-checks" } */
++/* { dg-options "-O2 -fdump-tree-evrp1 -fdelete-null-pointer-checks" } */
+ /* { dg-skip-if "" { keeps_null_pointer_checks } } */
+ 
+ void foo (void *p) __attribute__((nonnull(1)));
+@@ -11,4 +11,4 @@ void bar (void *p)
+     __builtin_abort ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "abort" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "abort" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp113.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp113.c
+index dfe4989d3..dc7e5e583 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp113.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp113.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-vrp1 -fdisable-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-vrp1 -fdisable-tree-evrp1" } */
+ 
+ int f(int a) {
+     switch (a & 1) {
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp114.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp114.c
+index 5c2c1a0b5..33adac61a 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp114.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp114.c
+@@ -1,5 +1,5 @@
+ /* { dg-do link { target int32plus } } */
+-/* { dg-options "-O2 -fdump-tree-fre1 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-fre1 -fdump-tree-evrp1" } */
+ 
+ extern void link_error ();
+ void foo (int a)
+@@ -21,4 +21,4 @@ int main()
+ }
+ 
+ /* { dg-final { scan-tree-dump-times "link_error" 1 "fre1" } } */
+-/* { dg-final { scan-tree-dump-times "link_error" 0 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "link_error" 0 "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp115.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp115.c
+index 6d1c9c50b..47fecebe1 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp115.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp115.c
+@@ -1,7 +1,7 @@
+ /* PR tree-optimization/80558 */
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
+-/* { dg-final { scan-tree-dump-not "link_error" "evrp" } } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
++/* { dg-final { scan-tree-dump-not "link_error" "evrp1" } } */
+ 
+ void link_error (void);
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp117.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp117.c
+index d07a6722c..47194eec0 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp117.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp117.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ void link_error (void);
+ 
+@@ -13,4 +13,4 @@ void foo (int i)
+     }
+ }
+ 
+-/* { dg-final { scan-tree-dump-not "link_error" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "link_error" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp120.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp120.c
+index 4dcee2341..b4ea5b1c3 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp120.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp120.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ #include "vrp113.c"
+ 
+-/* { dg-final { scan-tree-dump "return 3;" "evrp" } } */
++/* { dg-final { scan-tree-dump "return 3;" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp16.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp16.c
+index d09f3aea4..2243a87c3 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp16.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp16.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-fre -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fno-tree-fre -fdump-tree-evrp1" } */
+ 
+ 
+ extern void abort (void) __attribute__ ((__noreturn__));
+@@ -19,5 +19,5 @@ nonlocal_mentioned_p (rtx x)
+ 	abort ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "if" 0 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "if" 0 "evrp1" } } */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp17.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp17.c
+index b8470e7a3..3c0ec8c11 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp17.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp17.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ extern void abort (void) __attribute__ ((__noreturn__));
+ union tree_node;
+@@ -27,5 +27,5 @@ gimplify_for_stmt (tree stmt)
+     abort ();
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "Simplified relational" 1 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "Simplified relational" 1 "evrp1" } } */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp18.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp18.c
+index d7ab3f69f..393e7d402 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp18.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp18.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp" } */
++/* { dg-options "-O2 -fdump-tree-evrp1" } */
+ 
+ static int blocksize = 4096;
+ 
+@@ -30,4 +30,4 @@ void foo (void)
+     eof_reached = 1;
+ }
+ 
+-/* { dg-final { scan-tree-dump-times "Simplified relational" 1 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "Simplified relational" 1 "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp19.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp19.c
+index 98a8da6d0..ea95ecb91 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp19.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp19.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-fwrapv -O1 -ftree-vrp -fdisable-tree-evrp -fdump-tree-vrp1 -fdisable-tree-ethread -fdisable-tree-thread1" } */
++/* { dg-options "-fwrapv -O1 -ftree-vrp -fdisable-tree-evrp1 -fdump-tree-vrp1 -fdisable-tree-ethread -fdisable-tree-thread1" } */
+ 
+ #include 
+ extern void abort ();
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp20.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp20.c
+index e5822992f..52e489f9f 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp20.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp20.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-fwrapv -O1 -fno-tree-fre -fdisable-tree-evrp -ftree-vrp -fdump-tree-vrp1 -fdisable-tree-ethread -fdisable-tree-thread1" } */
++/* { dg-options "-fwrapv -O1 -fno-tree-fre -fdisable-tree-evrp1 -ftree-vrp -fdump-tree-vrp1 -fdisable-tree-ethread -fdisable-tree-thread1" } */
+ 
+ extern void abort ();
+ extern void exit (int);
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp23.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp23.c
+index 6ac8d5560..fa78a4c7f 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp23.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp23.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-forwprop -fdump-tree-evrp-details" } */
++/* { dg-options "-O2 -fno-tree-forwprop -fdump-tree-evrp1-details" } */
+ 
+ void aa (void);
+ void aos (void);
+@@ -45,5 +45,5 @@ L8:
+ /* The n_sets > 0 test can be simplified into n_sets == 1 since the
+    only way to reach the test is when n_sets <= 1, and the only value
+    which satisfies both conditions is n_sets == 1.  */
+-/* { dg-final { scan-tree-dump-times "Simplified relational" 1 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "Simplified relational" 1 "evrp1" } } */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp24.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp24.c
+index 91015da86..b4a0ecc5c 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp24.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp24.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-tree-forwprop -fdump-tree-evrp-details -fdump-tree-optimized -fno-tree-ccp" } */
++/* { dg-options "-O2 -fno-tree-forwprop -fdump-tree-evrp1-details -fdump-tree-optimized -fno-tree-ccp" } */
+ 
+ 
+ struct rtx_def;
+@@ -88,6 +88,6 @@ L7:
+    n_sets can only have the values [0, 1] as it's the result of a
+    boolean operation.  */
+ 
+-/* { dg-final { scan-tree-dump-times "Simplified relational" 2 "evrp" } } */
++/* { dg-final { scan-tree-dump-times "Simplified relational" 2 "evrp1" } } */
+ /* { dg-final { scan-tree-dump-times "if " 4 "optimized" } } */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp33.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp33.c
+index 470675ef5..82c63f556 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp33.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp33.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-vrp1 -fno-tree-fre -fdisable-tree-evrp -fno-thread-jumps" } */
++/* { dg-options "-O2 -fdump-tree-vrp1 -fno-tree-fre -fdisable-tree-evrp1 -fno-thread-jumps" } */
+ 
+ /* This is from PR14052.  */
+ 
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp35.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp35.c
+index a372a18cc..502070634 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp35.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp35.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fdump-tree-vrp1-details" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fdump-tree-vrp1-details" } */
+ 
+ int test1(int i, int k)
+ {
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp36.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp36.c
+index 1f77b539d..bff236e2c 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp36.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp36.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fdump-tree-vrp1-details" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fdump-tree-vrp1-details" } */
+ 
+ int foo(int i)
+ {
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp92.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp92.c
+index 9e53547da..7c3988ce0 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp92.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp92.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdisable-tree-evrp -fno-tree-fre -fdump-tree-vrp1 -fdisable-tree-ethread" } */
++/* { dg-options "-O2 -fdisable-tree-evrp1 -fno-tree-fre -fdump-tree-vrp1 -fdisable-tree-ethread" } */
+ 
+ void bar (void);
+ int foo (int i, int j)
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp98-1.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp98-1.c
+index daa3f073b..290a83a8f 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp98-1.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp98-1.c
+@@ -1,6 +1,6 @@
+ /* { dg-do compile } */
+ /* { dg-require-effective-target int128 } */
+-/* { dg-options "-Os -fdump-tree-evrp-details" } */
++/* { dg-options "-Os -fdump-tree-evrp1-details" } */
+ 
+ #include 
+ #include 
+@@ -36,6 +36,6 @@ foo (bigger_than_word a, word b, uint8_t c)
+   return ret;
+ }
+ 
+-/* { dg-final { scan-tree-dump "Folded into: if \\(_\[0-9\]+ == 1\\)" "evrp" } } */
+-/* { dg-final { scan-tree-dump-not "Folded into: if \\(_\[0-9\]+ == 2\\)" "evrp" } } */
+-/* { dg-final { scan-tree-dump "Folded into: if \\(_\[0-9\]+ == 3\\)" "evrp" } } */
++/* { dg-final { scan-tree-dump "Folded into: if \\(_\[0-9\]+ == 1\\)" "evrp1" } } */
++/* { dg-final { scan-tree-dump-not "Folded into: if \\(_\[0-9\]+ == 2\\)" "evrp1" } } */
++/* { dg-final { scan-tree-dump "Folded into: if \\(_\[0-9\]+ == 3\\)" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.dg/tree-ssa/vrp98.c b/gcc/testsuite/gcc.dg/tree-ssa/vrp98.c
+index 78d3bbaf4..b57c160d5 100644
+--- a/gcc/testsuite/gcc.dg/tree-ssa/vrp98.c
++++ b/gcc/testsuite/gcc.dg/tree-ssa/vrp98.c
+@@ -1,6 +1,6 @@
+ /* { dg-do compile } */
+ /* { dg-require-effective-target int128 } */
+-/* { dg-options "-Os -fdisable-tree-evrp -fdump-tree-vrp1-details" } */
++/* { dg-options "-Os -fdisable-tree-evrp1 -fdump-tree-vrp1-details" } */
+ 
+ #include 
+ #include 
+diff --git a/gcc/testsuite/gcc.dg/vrp-min-max-1.c b/gcc/testsuite/gcc.dg/vrp-min-max-1.c
+index b9c8379c8..93ff9f8d7 100644
+--- a/gcc/testsuite/gcc.dg/vrp-min-max-1.c
++++ b/gcc/testsuite/gcc.dg/vrp-min-max-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-vrp1 -fdisable-tree-evrp -fdump-tree-mergephi2" } */
++/* { dg-options "-O2 -fdump-tree-vrp1 -fdisable-tree-evrp1 -fdump-tree-mergephi2" } */
+ 
+ int bar (void);
+ 
+diff --git a/gcc/testsuite/gcc.dg/vrp-min-max-3.c b/gcc/testsuite/gcc.dg/vrp-min-max-3.c
+index 1fffee7bb..ab1fc1de0 100644
+--- a/gcc/testsuite/gcc.dg/vrp-min-max-3.c
++++ b/gcc/testsuite/gcc.dg/vrp-min-max-3.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fdump-tree-evrp -fdump-tree-fre1" } */
++/* { dg-options "-O2 -fdump-tree-evrp1 -fdump-tree-fre1" } */
+ 
+ int bar (void);
+ 
+@@ -23,5 +23,5 @@ int foo2 (int x, int y)
+ 
+ /* { dg-final { scan-tree-dump-times "MIN_EXPR" 1 "fre1" } } */
+ /* { dg-final { scan-tree-dump-times "MAX_EXPR" 1 "fre1" } } */
+-/* { dg-final { scan-tree-dump-not "MIN_EXPR" "evrp" } } */
+-/* { dg-final { scan-tree-dump-not "MAX_EXPR" "evrp" } } */
++/* { dg-final { scan-tree-dump-not "MIN_EXPR" "evrp1" } } */
++/* { dg-final { scan-tree-dump-not "MAX_EXPR" "evrp1" } } */
+diff --git a/gcc/testsuite/gcc.target/i386/vect-gather-1.c b/gcc/testsuite/gcc.target/i386/vect-gather-1.c
+index 261b66be0..76c9322a4 100644
+--- a/gcc/testsuite/gcc.target/i386/vect-gather-1.c
++++ b/gcc/testsuite/gcc.target/i386/vect-gather-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-Ofast -msse2 -fdump-tree-vect-details -fdump-tree-forwprop4" } */
++/* { dg-options "-Ofast -msse2 -fdump-tree-vect-details -fdump-tree-forwprop6" } */
+ 
+ #ifndef INDEXTYPE
+ #define INDEXTYPE int
+@@ -17,4 +17,4 @@ double vmul(INDEXTYPE *rowstart, INDEXTYPE *rowend,
+    even with plain SSE2.  */
+ /* { dg-final { scan-tree-dump "loop vectorized" "vect" } } */
+ /* The index vector loads and promotions should be scalar after forwprop.  */
+-/* { dg-final { scan-tree-dump-not "vec_unpack" "forwprop4" } } */
++/* { dg-final { scan-tree-dump-not "vec_unpack" "forwprop6" } } */
+diff --git a/gcc/testsuite/gcc.target/powerpc/vect-gather-1.c b/gcc/testsuite/gcc.target/powerpc/vect-gather-1.c
+index bf98045ab..bf00093c3 100644
+--- a/gcc/testsuite/gcc.target/powerpc/vect-gather-1.c
++++ b/gcc/testsuite/gcc.target/powerpc/vect-gather-1.c
+@@ -1,6 +1,6 @@
+ /* { dg-do compile } */
+ /* Profitable from Power8 since it supports efficient unaligned load.  */
+-/* { dg-options "-Ofast -mdejagnu-cpu=power8 -fdump-tree-vect-details -fdump-tree-forwprop4" } */
++/* { dg-options "-Ofast -mdejagnu-cpu=power8 -fdump-tree-vect-details -fdump-tree-forwprop6" } */
+ 
+ #ifndef INDEXTYPE
+ #define INDEXTYPE unsigned int
+@@ -17,4 +17,4 @@ double vmul(INDEXTYPE *rowstart, INDEXTYPE *rowend,
+ /* With gather emulation this should be profitable to vectorize from Power8.  */
+ /* { dg-final { scan-tree-dump "loop vectorized" "vect" } } */
+ /* The index vector loads and promotions should be scalar after forwprop.  */
+-/* { dg-final { scan-tree-dump-not "vec_unpack" "forwprop4" } } */
++/* { dg-final { scan-tree-dump-not "vec_unpack" "forwprop6" } } */
+diff --git a/gcc/testsuite/gfortran.dg/pr45636.f90 b/gcc/testsuite/gfortran.dg/pr45636.f90
+index 958833c35..304efa7c6 100644
+--- a/gcc/testsuite/gfortran.dg/pr45636.f90
++++ b/gcc/testsuite/gfortran.dg/pr45636.f90
+@@ -1,6 +1,6 @@
+ ! PR fortran/45636
+ ! { dg-do compile }
+-! { dg-options "-O2 -fdump-tree-forwprop2" }
++! { dg-options "-O2 -fdump-tree-forwprop4" }
+ ! PR 45636 - make sure no memset is needed for a short right-hand side.
+ program main
+   character(len=2), parameter :: x='a '
+@@ -12,4 +12,4 @@ program main
+ end program main
+ ! This test will fail on targets which prefer memcpy/memset over
+ ! move_by_pieces/store_by_pieces.
+-! { dg-final { scan-tree-dump-times "memset" 0 "forwprop2" { xfail { { hppa*-*-* && { ! lp64 } } || { mips*-*-* && { ! nomips16 } } } } } }
++! { dg-final { scan-tree-dump-times "memset" 0 "forwprop4" { xfail { { hppa*-*-* && { ! lp64 } } || { mips*-*-* && { ! nomips16 } } } } } }
+-- 
+2.33.0
+
diff --git a/0390-struct-reorg-Check-ADDR_EXPR-in-pointer-plus-assign.patch b/0390-struct-reorg-Check-ADDR_EXPR-in-pointer-plus-assign.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7adca8fbf99b9b2220ddc817be8668808d0a069c
--- /dev/null
+++ b/0390-struct-reorg-Check-ADDR_EXPR-in-pointer-plus-assign.patch
@@ -0,0 +1,60 @@
+From d8fe597607a6df62180d95d27105de746bf27842 Mon Sep 17 00:00:00 2001
+From: huzife <634763349@qq.com>
+Date: Tue, 3 Jun 2025 12:31:07 +0800
+Subject: [PATCH] [struct-reorg] Check ADDR_EXPR in pointer plus assign
+
+---
+ gcc/ipa-struct-reorg/ipa-struct-reorg.cc         |  2 +-
+ .../gcc.dg/struct/rf_defined_by_addr_expr.c      | 16 ++++++++++++++++
+ gcc/testsuite/gcc.dg/struct/sr_early_void_ptr.c  |  2 +-
+ 3 files changed, 18 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/struct/rf_defined_by_addr_expr.c
+
+diff --git a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+index 1140edd1d..f5be8de76 100644
+--- a/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
++++ b/gcc/ipa-struct-reorg/ipa-struct-reorg.cc
+@@ -5566,7 +5566,7 @@ ipa_struct_reorg::check_definition_assign (srdecl *decl,
+ 					  : TYPE_SIZE_UNIT (type->type)))
+ 	type->mark_escape (escape_non_multiply_size, stmt);
+ 
+-      if (TREE_CODE (rhs) == SSA_NAME)
++      if (TREE_CODE (rhs) == SSA_NAME || TREE_CODE (rhs) == ADDR_EXPR)
+ 	check_type_and_push (rhs, decl, worklist, stmt);
+       return;
+     }
+diff --git a/gcc/testsuite/gcc.dg/struct/rf_defined_by_addr_expr.c b/gcc/testsuite/gcc.dg/struct/rf_defined_by_addr_expr.c
+new file mode 100644
+index 000000000..65922be8e
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/struct/rf_defined_by_addr_expr.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++
++struct A {
++    int a;
++};
++
++extern unsigned long long offset();
++
++int main() {
++    unsigned char num[16];
++    ((struct A*)(num + offset() * 4))->a = 10;
++
++    return num[0];
++}
++
++/* { dg-final { scan-ipa-dump "struct A(\\\(\[0-9\]*\\\))? has escaped: \"Type escapes a cast to a different pointer\"" "struct_reorg" } } */
+diff --git a/gcc/testsuite/gcc.dg/struct/sr_early_void_ptr.c b/gcc/testsuite/gcc.dg/struct/sr_early_void_ptr.c
+index 5ff166f08..c52636925 100644
+--- a/gcc/testsuite/gcc.dg/struct/sr_early_void_ptr.c
++++ b/gcc/testsuite/gcc.dg/struct/sr_early_void_ptr.c
+@@ -25,4 +25,4 @@ int main() {
+     return 0;
+ }
+ 
+-/* { dg-final { scan-ipa-dump "struct S(\[0-9\]*) has escaped" "struct_reorg" } } */
++/* { dg-final { scan-ipa-dump "struct S(\\\(\[0-9\]*\\\))? has escaped" "struct_reorg" } } */
+-- 
+2.33.0
+
diff --git a/0391-oeAware-Fix-.GCC4OE_oeAware-section-dup-in-LTO.patch b/0391-oeAware-Fix-.GCC4OE_oeAware-section-dup-in-LTO.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0c181fc5609d3315a34843d9e0959b2b30f36f3f
--- /dev/null
+++ b/0391-oeAware-Fix-.GCC4OE_oeAware-section-dup-in-LTO.patch
@@ -0,0 +1,79 @@
+From 19282a59163914ad9c6ac732497d5d235aeba408 Mon Sep 17 00:00:00 2001
+From: liyancheng <412998149@qq.com>
+Date: Thu, 5 Jun 2025 22:06:48 +0800
+Subject: [PATCH] [oeAware] Fix .GCC4OE_oeAware section dup in LTO
+
+This resolves an ICE "section already exists" caused
+by incorrectly creating the .GCC4OE_oeAware section
+for non-global main functions with fat lto objects.
+---
+ gcc/testsuite/g++.dg/dg.exp                         |  1 +
+ .../oeaware-main-in-namespace.C}                    |  0
+ gcc/testsuite/g++.dg/oeaware-with-lto.C             | 12 ++++++++++++
+ gcc/varasm.cc                                       | 13 +++++++++++--
+ 4 files changed, 24 insertions(+), 2 deletions(-)
+ rename gcc/testsuite/{gcc.dg/oeaware-main-in-namespace.cpp => g++.dg/oeaware-main-in-namespace.C} (100%)
+ create mode 100644 gcc/testsuite/g++.dg/oeaware-with-lto.C
+
+diff --git a/gcc/testsuite/g++.dg/dg.exp b/gcc/testsuite/g++.dg/dg.exp
+index 23542ace8..e2cf62478 100644
+--- a/gcc/testsuite/g++.dg/dg.exp
++++ b/gcc/testsuite/g++.dg/dg.exp
+@@ -18,6 +18,7 @@
+ 
+ # Load support procs.
+ load_lib g++-dg.exp
++load_lib oeaware.exp
+ 
+ # If a testcase doesn't have special options, use these.
+ global DEFAULT_CXXFLAGS
+diff --git a/gcc/testsuite/gcc.dg/oeaware-main-in-namespace.cpp b/gcc/testsuite/g++.dg/oeaware-main-in-namespace.C
+similarity index 100%
+rename from gcc/testsuite/gcc.dg/oeaware-main-in-namespace.cpp
+rename to gcc/testsuite/g++.dg/oeaware-main-in-namespace.C
+diff --git a/gcc/testsuite/g++.dg/oeaware-with-lto.C b/gcc/testsuite/g++.dg/oeaware-with-lto.C
+new file mode 100644
+index 000000000..fe77daede
+--- /dev/null
++++ b/gcc/testsuite/g++.dg/oeaware-with-lto.C
+@@ -0,0 +1,12 @@
++/* { dg-do compile { target *-*-linux* *-*-gnu* } }  */
++/* { dg-options "-foeaware-policy=1 -flto -ffat-lto-objects" }  */
++
++namespace radar8446940 {
++int main () {
++ return 0;
++}
++}
++
++int main () {
++  return 0;
++}
+\ No newline at end of file
+diff --git a/gcc/varasm.cc b/gcc/varasm.cc
+index 5134c0c1f..c771ede71 100644
+--- a/gcc/varasm.cc
++++ b/gcc/varasm.cc
+@@ -8581,8 +8581,17 @@ create_oeaware_section ()
+ 	  TREE_CODE (DECL_CONTEXT (cfun->decl)) != TRANSLATION_UNIT_DECL))
+     return;
+ 
+-  int flags = SECTION_STRINGS;
+-  section *oe_section = get_section (".GCC4OE_oeAware", flags, NULL, true);
++  const char *sect_name = ".GCC4OE_oeAware";
++
++  /* If section already exists, just skip.  */
++  section **slot
++    = section_htab->find_slot_with_hash (sect_name,
++					 htab_hash_string (sect_name),
++					 INSERT);
++  if (!slot || *slot != NULL)
++    return;
++
++  section *oe_section = get_section (sect_name, SECTION_STRINGS, NULL, false);
+   switch_to_section (oe_section);
+ 
+   gcc_assert (oeaware_optimize_policy <= UINT8_MAX);
+-- 
+2.34.1
+
diff --git a/0392-Modify-cores-definition-for-hip-cores.patch b/0392-Modify-cores-definition-for-hip-cores.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a189635cecce2736aea7afb739622910f4aa059a
--- /dev/null
+++ b/0392-Modify-cores-definition-for-hip-cores.patch
@@ -0,0 +1,55 @@
+From 003adcdf4b918a346c5e2826e726430fe9af13a1 Mon Sep 17 00:00:00 2001
+From: liyunfei 
+Date: Tue, 10 Jun 2025 16:06:41 +0800
+Subject: [PATCH] Modify cores definition for hip cores
+
+Remove F32MM and F64MM, which is no longer supported after hip09
+Add RNG and SPE support for hip10a and hip12
+---
+ gcc/config/aarch64/aarch64-cores.def | 10 +++++-----
+ gcc/config/aarch64/aarch64-tune.md   |  2 +-
+ 2 files changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64-cores.def b/gcc/config/aarch64/aarch64-cores.def
+index 2cb752ca1dc..e69e13c0275 100644
+--- a/gcc/config/aarch64/aarch64-cores.def
++++ b/gcc/config/aarch64/aarch64-cores.def
+@@ -130,9 +130,9 @@ AARCH64_CORE("a64fx", a64fx, a64fx, V8_2A,  (F16, SVE), a64fx, 0x46, 0x001, -1)
+ 
+ /* HiSilicon ('H') cores. */
+ AARCH64_CORE("tsv110",  tsv110, tsv110, V8_2A,  (CRYPTO, F16), tsv110,   0x48, 0xd01, -1)
+-AARCH64_CORE("hip09", hip09, hip09, V8_5A,  (SVE, I8MM, F32MM, F64MM, PROFILE, PREDRES), hip09, 0x48, 0xd02, 0x0)
+-AARCH64_CORE("hip10a", hip10a, hip10a, V8_5A,  (SVE, I8MM, BF16, F32MM, F64MM, SSBS, SHA3, SM4, PREDRES, SVE2, SVE2_BITPERM, DOTPROD, F16FML), hip10a, 0x48, 0xd03, 0x0)
+-AARCH64_CORE("hip10c", hip10c, hip10c, V8_5A,  (SVE, I8MM, BF16, F32MM, F64MM, FLAGM, PAUTH, SSBS, SHA3, SM4, PROFILE, PREDRES), hip10c, 0x48, 0xd45, 0x0)
++AARCH64_CORE("hip09", hip09, hip09, V8_5A,  (SVE, I8MM, F32MM, F64MM, PROFILE, PREDRES, RNG), hip09, 0x48, 0xd02, -1)
++AARCH64_CORE("hip10c", hip10c, hip10c, V8_5A,  (SVE, I8MM, BF16, FLAGM, PAUTH, SSBS, SHA3, SM4, PROFILE, PREDRES, RNG), hip10c, 0x48, 0xd45, -1)
++AARCH64_CORE("hip11", hip11, hip11, V8_5A,  (SVE, SVE2, F16), hip11, 0x48, 0xd22, -1)
+ 
+ /* ARMv8.3-A Architecture Processors.  */
+ 
+@@ -174,10 +174,10 @@ AARCH64_CORE("cortex-a710",  cortexa710, cortexa57, V9A,  (SVE2_BITPERM, MEMTAG,
+ AARCH64_CORE("cortex-x2",  cortexx2, cortexa57, V9A,  (SVE2_BITPERM, MEMTAG, I8MM, BF16), neoversen2, 0x41, 0xd48, -1)
+ 
+ AARCH64_CORE("neoverse-n2", neoversen2, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversen2, 0x41, 0xd49, -1)
+-AARCH64_CORE("hip11", hip11, hip11, V8_5A,  (SVE, SVE2, F16), hip11, 0x48, 0xd22, -1)
+ 
+ AARCH64_CORE("demeter", demeter, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversev2, 0x41, 0xd4f, -1)
+ AARCH64_CORE("neoverse-v2", neoversev2, cortexa57, V9A, (I8MM, BF16, SVE2_BITPERM, RNG, MEMTAG, PROFILE), neoversev2, 0x41, 0xd4f, -1)
+ 
+-AARCH64_CORE("hip12", hip12, hip12, V9_2A, (SVE, SVE2, SVE2_BITPERM, SVE2_AES, SVE2_SM4, SVE2_SHA3, F16, RCPC, BF16, DOTPROD, LSE, SIMD, PAUTH, RDMA, LS64), hip12, 0x48, 0xd06, -1)
++AARCH64_CORE("hip10a", hip10a, hip10a, V9A,  (SVE, I8MM, BF16, SSBS, SHA3, SM4, PREDRES, SVE2, SVE2_BITPERM, DOTPROD, F16FML, RNG, PROFILE), hip10a, 0x48, 0xd03, -1)
++AARCH64_CORE("hip12", hip12, hip12, V9_2A, (SVE, SVE2, SVE2_BITPERM, SVE2_AES, SVE2_SM4, SVE2_SHA3, F16, RCPC, BF16, DOTPROD, LSE, SIMD, PAUTH, RDMA, LS64, RNG, PROFILE), hip12, 0x48, 0xd06, -1)
+ #undef AARCH64_CORE
+diff --git a/gcc/config/aarch64/aarch64-tune.md b/gcc/config/aarch64/aarch64-tune.md
+index 488e39b7cbb..c40faa2f26e 100644
+--- a/gcc/config/aarch64/aarch64-tune.md
++++ b/gcc/config/aarch64/aarch64-tune.md
+@@ -1,5 +1,5 @@
+ ;; -*- buffer-read-only: t -*-
+ ;; Generated automatically by gentune.sh from aarch64-cores.def
+ (define_attr "tune"
+-	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,hip09,hip10a,hip10c,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,hip11,demeter,neoversev2,hip12"
++	"cortexa34,cortexa35,cortexa53,cortexa57,cortexa72,cortexa73,thunderx,thunderxt88p1,thunderxt88,octeontx,octeontxt81,octeontxt83,thunderxt81,thunderxt83,ampere1,ampere1a,emag,xgene1,falkor,qdf24xx,exynosm1,phecda,thunderx2t99p1,vulcan,thunderx2t99,cortexa55,cortexa75,cortexa76,cortexa76ae,cortexa77,cortexa78,cortexa78ae,cortexa78c,cortexa65,cortexa65ae,cortexx1,ares,neoversen1,neoversee1,octeontx2,octeontx2t98,octeontx2t96,octeontx2t93,octeontx2f95,octeontx2f95n,octeontx2f95mm,a64fx,tsv110,hip09,hip10c,hip11,thunderx3t110,zeus,neoversev1,neoverse512tvb,saphira,cortexa57cortexa53,cortexa72cortexa53,cortexa73cortexa35,cortexa73cortexa53,cortexa75cortexa55,cortexa76cortexa55,cortexr82,cortexa510,cortexa710,cortexx2,neoversen2,demeter,neoversev2,hip10a,hip12"
+ 	(const (symbol_ref "((enum attr_tune) aarch64_tune)")))
+-- 
+Gitee
diff --git a/0393-array-dse-Ignore-debug-stmt-add-testsuites.patch b/0393-array-dse-Ignore-debug-stmt-add-testsuites.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f8f3c4c0ce85c418997572bc30d981767303877e
--- /dev/null
+++ b/0393-array-dse-Ignore-debug-stmt-add-testsuites.patch
@@ -0,0 +1,795 @@
+From 12b5da19994eedaeaec404859444b7704d204114 Mon Sep 17 00:00:00 2001
+From: huzife <634763349@qq.com>
+Date: Thu, 5 Jun 2025 14:59:49 +0800
+Subject: [PATCH] [array-dse] Ignore debug stmt, add testsuites
+
+---
+ gcc/ipa-array-dse.cc                          | 20 +++++++++--
+ .../alignment-propagation.exp                 | 35 +++++++++++++++++++
+ .../alignment_less_than_rhs.c                 | 18 ++++++++++
+ .../non_aligned_offset.c                      | 18 ++++++++++
+ .../not_address_of_local_var.c                | 18 ++++++++++
+ .../rewrite_aligned_bit_and.c                 | 26 ++++++++++++++
+ gcc/testsuite/gcc.dg/array-dse/array-dse.exp  | 35 +++++++++++++++++++
+ .../array-dse_callee_ptr_exceed_range.c       | 28 +++++++++++++++
+ .../array-dse/array-dse_fully_redundant.c     | 28 +++++++++++++++
+ .../array-dse_no_unique_length_param.c        | 33 +++++++++++++++++
+ .../array-dse_non_local_array_in_caller.c     | 29 +++++++++++++++
+ .../array-dse_non_unique_written_base.c       | 31 ++++++++++++++++
+ .../array-dse/array-dse_partial_redundant.c   | 28 +++++++++++++++
+ .../array-dse_read_bound_non_const.c          | 27 ++++++++++++++
+ .../gcc.dg/localize-array/bad-dominance.c     | 33 +++++++++++++++++
+ .../localize-array/escape-by-taking-address.c | 27 ++++++++++++++
+ .../localize-array-with-multiple-elem.c       | 28 +++++++++++++++
+ .../localize-array-with-single-elem.c         | 26 ++++++++++++++
+ .../gcc.dg/localize-array/localize-array.exp  | 35 +++++++++++++++++++
+ .../localize-array/not-allocated-by-calloc.c  | 27 ++++++++++++++
+ .../used-by-multiple-functions.c              | 34 ++++++++++++++++++
+ 21 files changed, 581 insertions(+), 3 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.dg/alignment-propagation/alignment-propagation.exp
+ create mode 100644 gcc/testsuite/gcc.dg/alignment-propagation/alignment_less_than_rhs.c
+ create mode 100644 gcc/testsuite/gcc.dg/alignment-propagation/non_aligned_offset.c
+ create mode 100644 gcc/testsuite/gcc.dg/alignment-propagation/not_address_of_local_var.c
+ create mode 100644 gcc/testsuite/gcc.dg/alignment-propagation/rewrite_aligned_bit_and.c
+ create mode 100644 gcc/testsuite/gcc.dg/array-dse/array-dse.exp
+ create mode 100644 gcc/testsuite/gcc.dg/array-dse/array-dse_callee_ptr_exceed_range.c
+ create mode 100644 gcc/testsuite/gcc.dg/array-dse/array-dse_fully_redundant.c
+ create mode 100644 gcc/testsuite/gcc.dg/array-dse/array-dse_no_unique_length_param.c
+ create mode 100644 gcc/testsuite/gcc.dg/array-dse/array-dse_non_local_array_in_caller.c
+ create mode 100644 gcc/testsuite/gcc.dg/array-dse/array-dse_non_unique_written_base.c
+ create mode 100644 gcc/testsuite/gcc.dg/array-dse/array-dse_partial_redundant.c
+ create mode 100644 gcc/testsuite/gcc.dg/array-dse/array-dse_read_bound_non_const.c
+ create mode 100644 gcc/testsuite/gcc.dg/localize-array/bad-dominance.c
+ create mode 100644 gcc/testsuite/gcc.dg/localize-array/escape-by-taking-address.c
+ create mode 100644 gcc/testsuite/gcc.dg/localize-array/localize-array-with-multiple-elem.c
+ create mode 100644 gcc/testsuite/gcc.dg/localize-array/localize-array-with-single-elem.c
+ create mode 100644 gcc/testsuite/gcc.dg/localize-array/localize-array.exp
+ create mode 100644 gcc/testsuite/gcc.dg/localize-array/not-allocated-by-calloc.c
+ create mode 100644 gcc/testsuite/gcc.dg/localize-array/used-by-multiple-functions.c
+
+diff --git a/gcc/ipa-array-dse.cc b/gcc/ipa-array-dse.cc
+index df973e849..7d8bb9f9a 100644
+--- a/gcc/ipa-array-dse.cc
++++ b/gcc/ipa-array-dse.cc
+@@ -907,7 +907,7 @@ array_dse_callee::find_candidate_array ()
+       for (auto gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ 	{
+ 	  gimple *stmt = gsi_stmt (gsi);
+-	  if (gimple_clobber_p (stmt))
++	  if (gimple_clobber_p (stmt) || is_gimple_debug (stmt))
+ 	    continue;
+ 
+ 	  /* There are 3 kind of stmts may have store ops: GIMPLE_ASSIGN,
+@@ -2138,7 +2138,8 @@ array_dse_edge::collect_array_accesses ()
+       for (auto gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
+ 	{
+ 	  gimple *stmt = gsi_stmt (gsi);
+-	  if (gimple_clobber_p (stmt) || call_stmt_p (stmt))
++	  if (gimple_clobber_p (stmt) || call_stmt_p (stmt)
++	      || is_gimple_debug (stmt))
+ 	    continue;
+ 
+ 	  for (unsigned i = 0; i < gimple_num_ops (stmt); i++)
+@@ -2495,7 +2496,8 @@ array_dse_edge::calc_read_bound ()
+ 	continue;
+ 
+       auto range = calc_ref_range (var);
+-      if (!integer_cst_p (range.max ()))
++      if (range.undefined_p() || range.varying_p()
++	  || !integer_cst_p (range.max ()))
+ 	return false;
+ 
+       auto max = tree_to_shwi (range.max ());
+@@ -3081,6 +3083,12 @@ ipa_array_dse::apply_array_dse (array_dse_edge *ad_edge)
+ 
+       cfun_saver save (caller);
+ 
++      if (dump_file)
++	{
++	  fprintf (dump_file, "Remove fully redundant call:\n");
++	  print_gimple_stmt (dump_file, call_stmt, 0);
++	}
++
+       auto gsi = gsi_for_stmt (call_stmt);
+       basic_block call_bb = gimple_bb (call_stmt);
+       tree fndecl = gimple_call_fndecl (call_stmt);
+@@ -3103,6 +3111,12 @@ ipa_array_dse::apply_array_dse (array_dse_edge *ad_edge)
+   if (!transform_new_callee (callee, new_callee))
+     return false;
+ 
++  if (dump_file)
++    {
++      fprintf (dump_file, "Rewrite partial redundant call:\n");
++      print_gimple_stmt (dump_file, ad_edge->call_edge->call_stmt, 0);
++    }
++
+   tree bound_addr = ad_edge->get_bound_addr ();
+   rewrite_call_edge (ad_edge->call_edge, new_callee, bound_addr);
+ 
+diff --git a/gcc/testsuite/gcc.dg/alignment-propagation/alignment-propagation.exp b/gcc/testsuite/gcc.dg/alignment-propagation/alignment-propagation.exp
+new file mode 100644
+index 000000000..941849499
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/alignment-propagation/alignment-propagation.exp
+@@ -0,0 +1,35 @@
++#   Copyright (C) 1997-2023 Free Software Foundation, Inc.
++
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3 of the License, or
++# (at your option) any later version.
++# 
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3.  If not see
++# .
++
++load_lib gcc-dg.exp
++load_lib torture-options.exp
++
++# Initialize `dg'.
++dg-init
++torture-init
++
++set ALIGNMENT_PROPAGATION_TORTURE_OPTIONS [list \
++	{ -O3 } \
++	{ -Ofast } ]
++
++set-torture-options $ALIGNMENT_PROPAGATION_TORTURE_OPTIONS {{}}
++
++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.c]] \
++	"" "-fipa-alignment-propagation -fdump-ipa-alignment-propagation-details"
++
++# All done.
++torture-finish
++dg-finish
+diff --git a/gcc/testsuite/gcc.dg/alignment-propagation/alignment_less_than_rhs.c b/gcc/testsuite/gcc.dg/alignment-propagation/alignment_less_than_rhs.c
+new file mode 100644
+index 000000000..4695968e2
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/alignment-propagation/alignment_less_than_rhs.c
+@@ -0,0 +1,18 @@
++/* { dg-do compile } */
++
++#include 
++
++void __attribute__((__noinline__)) and_alignment_128(void *p) {
++    if ((unsigned long)p & 127)
++        abort();
++}
++
++int main() {
++    int num[16];
++    and_alignment_128(num);
++
++    return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Rewrite" "alignment-propagation" { xfail *-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/alignment-propagation/non_aligned_offset.c b/gcc/testsuite/gcc.dg/alignment-propagation/non_aligned_offset.c
+new file mode 100644
+index 000000000..f60f4dc18
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/alignment-propagation/non_aligned_offset.c
+@@ -0,0 +1,18 @@
++/* { dg-do compile } */
++
++#include 
++
++void __attribute__((__noinline__)) and_alignment_4(void *p) {
++    if (((unsigned long)p + 3) & 3)
++        abort();
++}
++
++int main() {
++    int num;
++    and_alignment_4(&num);
++
++    return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Rewrite" "alignment-propagation" { xfail *-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/alignment-propagation/not_address_of_local_var.c b/gcc/testsuite/gcc.dg/alignment-propagation/not_address_of_local_var.c
+new file mode 100644
+index 000000000..81d490c92
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/alignment-propagation/not_address_of_local_var.c
+@@ -0,0 +1,18 @@
++/* { dg-do compile } */
++
++#include 
++
++void __attribute__((__noinline__)) and_alignment_4(void *p) {
++    if ((unsigned long)p & 3)
++        abort();
++}
++
++int main() {
++    int *p = NULL;
++    and_alignment_4(p);
++
++    return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Rewrite" "alignment-propagation" { xfail *-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/alignment-propagation/rewrite_aligned_bit_and.c b/gcc/testsuite/gcc.dg/alignment-propagation/rewrite_aligned_bit_and.c
+new file mode 100644
+index 000000000..bf3815964
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/alignment-propagation/rewrite_aligned_bit_and.c
+@@ -0,0 +1,26 @@
++/* { dg-do compile } */
++
++#include 
++
++void __attribute__((__noinline__)) and_alignment_4(void *p) {
++    if ((unsigned long)p & 3)
++        abort();
++}
++
++void __attribute__((__noinline__)) and_alignment_8(void *p) {
++    if ((unsigned long)p & 7)
++        abort();
++}
++
++int main() {
++    int num = 0;
++    int nums[16] = {0};
++    and_alignment_4(&num);
++    and_alignment_4(nums);
++    and_alignment_8(nums);
++
++    return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump-times "Rewrite" 2 "alignment-propagation" } } */
+diff --git a/gcc/testsuite/gcc.dg/array-dse/array-dse.exp b/gcc/testsuite/gcc.dg/array-dse/array-dse.exp
+new file mode 100644
+index 000000000..c04d54f00
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/array-dse/array-dse.exp
+@@ -0,0 +1,35 @@
++#   Copyright (C) 1997-2023 Free Software Foundation, Inc.
++
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3 of the License, or
++# (at your option) any later version.
++# 
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3.  If not see
++# .
++
++load_lib gcc-dg.exp
++load_lib torture-options.exp
++
++# Initialize `dg'.
++dg-init
++torture-init
++
++set ARRAY_DSE_TORTURE_OPTIONS [list \
++	{ -O3 } \
++	{ -Ofast } ]
++
++set-torture-options $ARRAY_DSE_TORTURE_OPTIONS {{}}
++
++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.c]] \
++	"" "-fipa-array-dse -fdump-ipa-array-dse-details"
++
++# All done.
++torture-finish
++dg-finish
+diff --git a/gcc/testsuite/gcc.dg/array-dse/array-dse_callee_ptr_exceed_range.c b/gcc/testsuite/gcc.dg/array-dse/array-dse_callee_ptr_exceed_range.c
+new file mode 100644
+index 000000000..91db9ded7
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/array-dse/array-dse_callee_ptr_exceed_range.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++void __attribute__((__noinline__)) test(int* a, unsigned long n) {
++    for (int* p = a + n; p >= a; p--) {
++        *p = p - a;
++    }
++}
++
++int main() {
++    int num[16];
++    int n = 0;
++    scanf("%d", &n);
++    if (n)
++        test(num, n);
++
++    for (unsigned i = 0; i < 8; i++) {
++        if (num[i] != i)
++            abort ();
++    }
++
++    return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Fail finding array dse candidate callees" "array-dse" } } */
+diff --git a/gcc/testsuite/gcc.dg/array-dse/array-dse_fully_redundant.c b/gcc/testsuite/gcc.dg/array-dse/array-dse_fully_redundant.c
+new file mode 100644
+index 000000000..c5d2dce58
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/array-dse/array-dse_fully_redundant.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++void __attribute__((__noinline__)) test(int* a, unsigned long n) {
++    for (int* p = a + (n - 1); p >= a; p--) {
++        *p = p - a;
++    }
++}
++
++int main() {
++    int num[16];
++    int n = 0;
++    scanf("%d", &n);
++    if (n)
++        test(num + 9, n);
++
++    for (unsigned i = 0; i < 8; i++) {
++        if (num[i] != i)
++            abort ();
++    }
++
++    return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Remove fully redundant call" "array-dse" } } */
+diff --git a/gcc/testsuite/gcc.dg/array-dse/array-dse_no_unique_length_param.c b/gcc/testsuite/gcc.dg/array-dse/array-dse_no_unique_length_param.c
+new file mode 100644
+index 000000000..cd20d1b52
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/array-dse/array-dse_no_unique_length_param.c
+@@ -0,0 +1,33 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++void __attribute__((__noinline__)) test(int* a, unsigned long n) {
++    for (int* p = a + (n - 1); p >= a; p--) {
++        *p = p - a;
++    }
++
++    int n1;
++    for (int* p = a + (n1 - 1); p >= a; p--) {
++        *p = p - a;
++    }
++}
++
++int main() {
++    int num[16];
++    int n = 0;
++    scanf("%d", &n);
++    if (n)
++        test(num, n);
++
++    for (unsigned i = 0; i < 8; i++) {
++        if (num[i] != i)
++            abort ();
++    }
++
++    return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Fail finding array dse candidate callees" "array-dse" } } */
+diff --git a/gcc/testsuite/gcc.dg/array-dse/array-dse_non_local_array_in_caller.c b/gcc/testsuite/gcc.dg/array-dse/array-dse_non_local_array_in_caller.c
+new file mode 100644
+index 000000000..99af6299c
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/array-dse/array-dse_non_local_array_in_caller.c
+@@ -0,0 +1,29 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++void __attribute__((__noinline__)) test(int* a, unsigned long n) {
++    for (int* p = a + (n - 1); p >= a; p--) {
++        *p = p - a;
++    }
++}
++
++int num[16];
++
++int main() {
++    int n = 0;
++    scanf("%d", &n);
++    if (n)
++        test(num, n);
++
++    for (unsigned i = 0; i < 8; i++) {
++        if (num[i] != i)
++            abort ();
++    }
++
++    return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Fail finding array dse candidate edges" "array-dse" } } */
+diff --git a/gcc/testsuite/gcc.dg/array-dse/array-dse_non_unique_written_base.c b/gcc/testsuite/gcc.dg/array-dse/array-dse_non_unique_written_base.c
+new file mode 100644
+index 000000000..8011371d9
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/array-dse/array-dse_non_unique_written_base.c
+@@ -0,0 +1,31 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++void __attribute__((__noinline__)) test(int* a, unsigned long n) {
++    int *a1;
++    *a1 = 0;
++
++    for (int* p = a + (n - 1); p >= a; p--) {
++        *p = p - a;
++    }
++}
++
++int main() {
++    int num[16];
++    int n = 0;
++    scanf("%d", &n);
++    if (n)
++        test(num, n);
++
++    for (unsigned i = 0; i < 8; i++) {
++        if (num[i] != i)
++            abort ();
++    }
++
++    return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Fail finding array dse candidate callees" "array-dse" } } */
+diff --git a/gcc/testsuite/gcc.dg/array-dse/array-dse_partial_redundant.c b/gcc/testsuite/gcc.dg/array-dse/array-dse_partial_redundant.c
+new file mode 100644
+index 000000000..7eb4af33c
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/array-dse/array-dse_partial_redundant.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++void __attribute__((__noinline__)) test(int* a, unsigned long n) {
++    for (int* p = a + (n - 1); p >= a; p--) {
++        *p = p - a;
++    }
++}
++
++int main() {
++    int num[16];
++    int n = 0;
++    scanf("%d", &n);
++    if (n)
++        test(num, n);
++
++    for (unsigned i = 0; i < 8; i++) {
++        if (num[i] != i)
++            abort ();
++    }
++
++    return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Rewrite partial redundant call" "array-dse" } } */
+diff --git a/gcc/testsuite/gcc.dg/array-dse/array-dse_read_bound_non_const.c b/gcc/testsuite/gcc.dg/array-dse/array-dse_read_bound_non_const.c
+new file mode 100644
+index 000000000..79cf118e9
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/array-dse/array-dse_read_bound_non_const.c
+@@ -0,0 +1,27 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++void __attribute__((__noinline__)) test(int* a, unsigned long n) {
++    for (int* p = a + (n - 1); p >= a; p--) {
++        *p = p - a;
++    }
++}
++
++int main() {
++    int num[16];
++    int n = 0;
++    scanf("%d", &n);
++    if (n)
++        test(num, n);
++
++    int n1;
++    if (num[n1])
++        abort();
++
++    return 0;
++}
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Fail finding array dse candidate edges" "array-dse" } } */
+diff --git a/gcc/testsuite/gcc.dg/localize-array/bad-dominance.c b/gcc/testsuite/gcc.dg/localize-array/bad-dominance.c
+new file mode 100644
+index 000000000..7f9ec730b
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/localize-array/bad-dominance.c
+@@ -0,0 +1,33 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++static int* p;
++
++void __attribute__((noinline)) test() {
++    for (unsigned i = 0; i < 10; i++) {
++        p[i] = i;
++    }
++}
++
++/* Set -O0 so that the ssa define by calloc and used by free
++   are not the same one.  */
++#pragma GCC push_options
++#pragma GCC optimize("O0")
++int main() {
++    int n;
++    scanf("%d", &n);
++
++    p = calloc(10, sizeof(int));
++    for (unsigned i = 0; i < n; i++) {
++        test();
++    }
++    free(p);
++
++    return 0;
++}
++#pragma GCC pop_options
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Localize global array: p" "localize-array" { xfail *-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/localize-array/escape-by-taking-address.c b/gcc/testsuite/gcc.dg/localize-array/escape-by-taking-address.c
+new file mode 100644
+index 000000000..17756c74d
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/localize-array/escape-by-taking-address.c
+@@ -0,0 +1,27 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++static int* p;
++int** a;
++
++void __attribute__((noinline)) test() {
++    a = &p;
++}
++
++/* Set -O0 so that the ssa define by calloc and used by free
++   are not the same one.  */
++#pragma GCC push_options
++#pragma GCC optimize("O0")
++int main() {
++    p = calloc(10, sizeof(int));
++    test();
++    int ret = **a;
++    free(p);
++    return ret;
++}
++#pragma GCC pop_options
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Localize global array" "localize-array" { xfail *-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/localize-array/localize-array-with-multiple-elem.c b/gcc/testsuite/gcc.dg/localize-array/localize-array-with-multiple-elem.c
+new file mode 100644
+index 000000000..6917fd451
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/localize-array/localize-array-with-multiple-elem.c
+@@ -0,0 +1,28 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++static int* p;
++
++void __attribute__((noinline)) test() {
++    for (unsigned i = 0; i < 10; i++) {
++        p[i] = i;
++    }
++}
++
++/* Set -O0 so that the ssa define by calloc and used by free
++   are not the same one.  */
++#pragma GCC push_options
++#pragma GCC optimize("O0")
++int main() {
++    p = calloc(10, sizeof(int));
++    test();
++    free(p);
++    return 0;
++}
++#pragma GCC pop_options
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Localize global array: p" "localize-array" } } */
++/* { dg-final { scan-ipa-dump "Insert calloc statement" "localize-array" } } */
+diff --git a/gcc/testsuite/gcc.dg/localize-array/localize-array-with-single-elem.c b/gcc/testsuite/gcc.dg/localize-array/localize-array-with-single-elem.c
+new file mode 100644
+index 000000000..b79c12d24
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/localize-array/localize-array-with-single-elem.c
+@@ -0,0 +1,26 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++static int* p;
++
++void __attribute__((noinline)) test() {
++    p[0] = 0;
++}
++
++/* Set -O0 so that the ssa define by calloc and used by free
++   are not the same one.  */
++#pragma GCC push_options
++#pragma GCC optimize("O0")
++int main() {
++    p = calloc(1, sizeof(int));
++    test();
++    free(p);
++    return 0;
++}
++#pragma GCC pop_options
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Localize global array: p" "localize-array" } } */
++/* { dg-final { scan-ipa-dump "Insert calloc statement" "localize-array" { xfail *-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/localize-array/localize-array.exp b/gcc/testsuite/gcc.dg/localize-array/localize-array.exp
+new file mode 100644
+index 000000000..0bf6cc2cd
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/localize-array/localize-array.exp
+@@ -0,0 +1,35 @@
++#   Copyright (C) 1997-2023 Free Software Foundation, Inc.
++
++# This program is free software; you can redistribute it and/or modify
++# it under the terms of the GNU General Public License as published by
++# the Free Software Foundation; either version 3 of the License, or
++# (at your option) any later version.
++# 
++# This program is distributed in the hope that it will be useful,
++# but WITHOUT ANY WARRANTY; without even the implied warranty of
++# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++# GNU General Public License for more details.
++# 
++# You should have received a copy of the GNU General Public License
++# along with GCC; see the file COPYING3.  If not see
++# .
++
++load_lib gcc-dg.exp
++load_lib torture-options.exp
++
++# Initialize `dg'.
++dg-init
++torture-init
++
++set ALIGNMENT_PROPAGATION_TORTURE_OPTIONS [list \
++	{ -O3 } \
++	{ -Ofast } ]
++
++set-torture-options $ALIGNMENT_PROPAGATION_TORTURE_OPTIONS {{}}
++
++gcc-dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/*.c]] \
++	"" "-fipa-localize-array -fdump-ipa-localize-array-details -fwhole-program"
++
++# All done.
++torture-finish
++dg-finish
+diff --git a/gcc/testsuite/gcc.dg/localize-array/not-allocated-by-calloc.c b/gcc/testsuite/gcc.dg/localize-array/not-allocated-by-calloc.c
+new file mode 100644
+index 000000000..e519a62d8
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/localize-array/not-allocated-by-calloc.c
+@@ -0,0 +1,27 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++static int* p;
++
++void __attribute__((noinline)) test() {
++    for (unsigned i = 0; i < 10; i++) {
++        p[i] = i;
++    }
++}
++
++/* Set -O0 so that the ssa define by calloc and used by free
++   are not the same one.  */
++#pragma GCC push_options
++#pragma GCC optimize("O0")
++int main() {
++    p = malloc(10 * sizeof(int));
++    test();
++    free(p);
++    return 0;
++}
++#pragma GCC pop_options
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Localize global array" "localize-array" { xfail *-*-* } } } */
+diff --git a/gcc/testsuite/gcc.dg/localize-array/used-by-multiple-functions.c b/gcc/testsuite/gcc.dg/localize-array/used-by-multiple-functions.c
+new file mode 100644
+index 000000000..1ac7969b6
+--- /dev/null
++++ b/gcc/testsuite/gcc.dg/localize-array/used-by-multiple-functions.c
+@@ -0,0 +1,34 @@
++/* { dg-do compile } */
++
++#include 
++#include 
++
++static int* p;
++
++void __attribute__((noinline)) test() {
++    for (unsigned i = 0; i < 10; i++) {
++        p[i] = i;
++    }
++}
++
++void __attribute__((noinline)) test2() {
++    for (unsigned i = 0; i < 10; i++) {
++        p[i] = i;
++    }
++}
++
++/* Set -O0 so that the ssa define by calloc and used by free
++   are not the same one.  */
++#pragma GCC push_options
++#pragma GCC optimize("O0")
++int main() {
++    p = calloc(10, sizeof(int));
++    test();
++    test2();
++    free(p);
++    return 0;
++}
++#pragma GCC pop_options
++
++/*--------------------------------------------------------------------------*/
++/* { dg-final { scan-ipa-dump "Localize global array: p" "localize-array" { xfail *-*-* } } } */
+-- 
+2.43.0
+
diff --git a/0394-SVE-Add-SVE-constraint.patch b/0394-SVE-Add-SVE-constraint.patch
new file mode 100644
index 0000000000000000000000000000000000000000..74bf50c8d7a2275d626bd5f2e30b9bade18c3906
--- /dev/null
+++ b/0394-SVE-Add-SVE-constraint.patch
@@ -0,0 +1,36 @@
+From 499bec4f75a9f8846e7a7f2bf7b537debb12d384 Mon Sep 17 00:00:00 2001
+From: blunce 
+Date: Tue, 17 Jun 2025 18:10:02 +0800
+Subject: [PATCH] [SVE] Add SVE constraint.
+
+---
+ gcc/gimple-ssa-expand-sve.cc | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/gcc/gimple-ssa-expand-sve.cc b/gcc/gimple-ssa-expand-sve.cc
+index bd7f88cfa..12f5fe3ab 100644
+--- a/gcc/gimple-ssa-expand-sve.cc
++++ b/gcc/gimple-ssa-expand-sve.cc
+@@ -35,6 +35,9 @@ along with GCC; see the file COPYING3.  If not see
+ #include "cfgloop.h"
+ #include "gimple-ssa.h"
+ #include "gimple-pretty-print.h"
++#ifdef __aarch64__
++#include "config/aarch64/aarch64.h"
++#endif
+ 
+ namespace {
+ 
+@@ -95,6 +98,9 @@ public:
+     if (!flag_find_with_sve)
+       return false;
+ 
++    if (!TARGET_SVE)
++      return false;
++
+     if (!targetm.vector_mode_supported_p (V2DImode))
+       return false;
+ 
+-- 
+2.28.0.windows.1
+
diff --git a/0395-update-ai-model.patch b/0395-update-ai-model.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b3d1f67ae816fe2ed2d85eca6b22bc137c024f80
--- /dev/null
+++ b/0395-update-ai-model.patch
@@ -0,0 +1,678 @@
+From d3fb4531d50499713bc47add60dd45f8e5932d21 Mon Sep 17 00:00:00 2001
+From: yinchuang 
+Date: Wed, 18 Jun 2025 14:33:46 +0800
+Subject: [PATCH] Update ai model
+
+Modify the logic of model parameter reading
+Remove optimize_maximum in pass_ipa_hardware_detection
+
+Signed-off-by: yinchuang 
+---
+ gcc/ai4c-common.h             | 181 ++++++++++++++++++
+ gcc/ai4c-infer.cc             | 335 ++++++++++------------------------
+ gcc/ipa-hardware-detection.cc |   1 -
+ gcc/onnx.fdata                |   2 +-
+ 4 files changed, 279 insertions(+), 240 deletions(-)
+ create mode 100644 gcc/ai4c-common.h
+
+diff --git a/gcc/ai4c-common.h b/gcc/ai4c-common.h
+new file mode 100644
+index 000000000..a4042546a
+--- /dev/null
++++ b/gcc/ai4c-common.h
+@@ -0,0 +1,181 @@
++/* Lightweight AI Inference Framework.
++   Copyright (C) 2024-2024 Free Software Foundation, Inc.
++This file is part of GCC.
++GCC is free software; you can redistribute it and/or modify it under
++the terms of the GNU General Public License as published by the Free
++Software Foundation; either version 3, or (at your option) any later
++version.
++GCC is distributed in the hope that it will be useful, but WITHOUT ANY
++WARRANTY; without even the implied warranty of MERCHANTABILITY or
++FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
++for more details.
++
++You should have received a copy of the GNU General Public License
++along with GCC; see the file COPYING3.  If not see
++.  */
++
++#include 
++#include 
++#include 
++#include 
++#include 
++#include 
++
++/* Implementation of model operators.  */
++
++void
++matmul (const float *lhs, const float *rhs, int m, int k, int n, float *out)
++{
++  for (int i = 0; i < m; i++)
++    {
++      for (int j = 0; j < n; j++)
++	{
++	  out[i * n + j] = 0.0f;
++	  for (int p = 0; p < k; p++)
++	    {
++	      out[i * n + j] += lhs[i * k + p] * rhs[p * n + j];
++	    }
++	}
++    }
++}
++
++void
++add (const float *lhs, const float *rhs, int length, float *out)
++{
++  for (int i = 0; i < length; i++)
++    {
++      out[i] = lhs[i] + rhs[i];
++    }
++}
++
++void
++sub (const float *lhs, const float *rhs, int length, float *out)
++{
++  for (int i = 0; i < length; i++)
++    {
++      out[i] = lhs[i] - rhs[i];
++    }
++}
++
++void
++sigmoid (const float *in, int length, float *out)
++{
++  for (int i = 0; i < length; i++)
++    {
++      out[i] = 1.0f / (1.0f + expf (-in[i]));
++    }
++}
++
++void
++relu (const float *data, int length, float *out)
++{
++  for (int i = 0; i < length; i++)
++    {
++      if (data[i] < 0)
++	{
++	  out[i] = 0;
++	}
++      else
++	{
++	  out[i] = data[i];
++	}
++    }
++}
++
++void
++line_concat (const float *in, int in_size, float *out, int out_size)
++{
++  for (int i = 0; i < in_size; i++)
++    out[out_size + i] = in[i];
++}
++
++void
++one_hot_encoder (const char *in, const char (*cats)[65], float *out,
++		 int out_size)
++{
++  for (int i = 0; i < out_size; i++)
++    {
++      if (i < out_size && strcmp (cats[i], in) == 0)
++	{
++	  out[i] = 1.0f;
++	}
++      else
++	{
++	  out[i] = 0.0f;
++	}
++    }
++}
++
++void
++imputer (const int64_t *in, int size, float *out)
++{
++  for (int i = 0; i < size; i++)
++    out[i] = in[i] * 1.0f;
++}
++
++void
++scaler (const float *in, const float *offset, const float *scale, int size,
++	float *out)
++{
++  for (int i = 0; i < size; i++)
++    out[i] = (in[i] - offset[i]) * scale[i];
++}
++
++int
++argmax (const float *in, int in_size)
++{
++  int out_idx = 0;
++  for (int i = 0; i < in_size; i++)
++    {
++      if (in[i] > in[out_idx])
++	out_idx = i;
++    }
++  return out_idx;
++}
++
++void
++execute_sha256 (const char *input, char *output, size_t output_size)
++{
++    char command[256];
++    snprintf (command, sizeof (command), "echo -n \"%s\" | sha256sum", input);
++
++    FILE *pipe = popen (command, "r");
++    if (pipe == NULL)
++      {
++	perror ("Failed to run sha256 command.");
++	return;
++      }
++
++    fgets (output, output_size, pipe);
++    pclose (pipe);
++}
++
++/* Read float from onnx.fdata.  */
++
++float
++read_float_from_file (FILE* file)
++{
++  char hex_float[8];
++  float result;
++
++  if (!file)
++    {
++      perror ("Can not open model file.");
++      return result;
++    }
++
++  if (fscanf (file, "%8s", hex_float) != 1)
++    {
++      perror ("Can't read hex from model file.");
++      return result;
++    }
++
++  unsigned char bytes[4];
++  for (int i = 0; i < 4; i++)
++    {
++      sscanf (hex_float + 2 * i, "%2hhx", &bytes[i]);
++    }
++
++  memcpy (&result, bytes, sizeof (float));
++  return result;
++}
+\ No newline at end of file
+diff --git a/gcc/ai4c-infer.cc b/gcc/ai4c-infer.cc
+index 4cf040be2..cb9a791a1 100644
+--- a/gcc/ai4c-infer.cc
++++ b/gcc/ai4c-infer.cc
+@@ -26,26 +26,31 @@ along with GCC; see the file COPYING3.  If not see
+ #include "config.h"
+ #include "system.h"
+ 
++#include "ai4c-common.h"
++
++/* Input info.  */
++#define M_OPTION_SIZE  11
+ #define M_MODE_SIZE  6
++
+ #define NATIVE_TUNE_SIZE 128
+-#define CATS_STRINGS_ROW  12
+-#define CATS_STRINGS_COL  65
++
++/* Dimension of model operator parameters.  */
++#define SHA256SUM_OUTPUT_LENGTH 65
++#define CATS_STRINGS_ROW  35
++#define CATS_STRINGS1_ROW  1000
+ #define OFFSET_ROW  6
+ #define SCALE_ROW  6
+-#define UNITY_ROW 1
+-#define COEFFICIENT_ROW  18
+-#define COEFFICIENT_COL  100
+-#define COEFFICIENT1_ROW  100
++#define UNITY_ROW  1
++#define COEFFICIENT_ROW  1356
++#define COEFFICIENT_COL  10
++#define COEFFICIENT1_ROW  10
+ #define COEFFICIENT1_COL  1
+-#define INTERCEPTS_ROW  100
++#define INTERCEPTS_ROW  10
+ #define INTERCEPTS1_ROW  1
+ 
+-/* Model info.  */
+-static int64_t argv_hw1[M_MODE_SIZE];
+-char native_tune[NATIVE_TUNE_SIZE];
+-
+-/* Intermediate computation results from the ONNX model.  */
+-static char cats_strings[CATS_STRINGS_ROW][CATS_STRINGS_COL];
++/* Intermediate results of the model, read in a fixed order.  */
++static char cats_strings[CATS_STRINGS_ROW][SHA256SUM_OUTPUT_LENGTH];
++static char cats_strings1[CATS_STRINGS1_ROW][SHA256SUM_OUTPUT_LENGTH];
+ static float offset[OFFSET_ROW];
+ static float scale[SCALE_ROW];
+ static float unity[UNITY_ROW];
+@@ -54,6 +59,10 @@ static float coefficient1[COEFFICIENT1_ROW][COEFFICIENT1_COL];
+ static float intercepts[INTERCEPTS_ROW];
+ static float intercepts1[INTERCEPTS1_ROW];
+ 
++/* Model info.  */
++static int64_t argv_hw1[M_MODE_SIZE];
++char native_tune[NATIVE_TUNE_SIZE];
++
+ /* Model result.  */
+ static int64_t initialized;
+ static int64_t optimize_result;
+@@ -87,38 +96,7 @@ set_cache_info (int prefetches, int l1_cache_size,
+   argv_hw1[5] = prefetch_distance_factor;
+ }
+ 
+-/* Read float from onnx.fdata.  */
+-
+-float
+-read_float_from_file (FILE* file)
+-{
+-  char hex_float[8];
+-  float result;
+-
+-  if (!file)
+-    {
+-      perror ("Can not open file.");
+-      return result;
+-    }
+-    
+-  if (fscanf (file, "%8s", hex_float) != 1)
+-    {
+-      perror ("Can not read hex from onnx.fdata.");
+-      return result;
+-    }
+-
+-  unsigned char bytes[4];
+-  for (int i = 0; i < 4; i++)
+-    {
+-      sscanf(hex_float + 2 * i, "%2hhx", &bytes[i]);
+-    }
+-
+-  memcpy(&result, bytes, sizeof(float));
+-  return result;
+-}
+-
+-/* To read model parameter information from onnx.fdata and store it into the
+-   appropriate arrays.  */
++/* Read model parameter and store it into the appropriate arrays.  */
+ 
+ static void
+ fill_node (const char *file_name)
+@@ -131,37 +109,53 @@ fill_node (const char *file_name)
+       return;
+     }
+ 
+-   /* Read cats_strings from onnx.fdata.  */
+   char hex_string[2];
+   for (int i = 0; i < CATS_STRINGS_ROW; i++)
+     {
+-      for (int j = 0; j < CATS_STRINGS_COL - 1; j++)
++      for (int j = 0; j < SHA256SUM_OUTPUT_LENGTH - 1; j++)
+ 	{
+-	  if (fscanf(file, "%2s", hex_string) != 1)
++	  if (fscanf (file, "%2s", hex_string) != 1)
+ 	    {
+ 	      perror ("Can not read cats_strings from onnx.fdata.");
+ 	      return;
+ 	    }
+-	  cats_strings[i][j] = (unsigned char)strtol(hex_string, NULL, 16);
++	  cats_strings[i][j] = (unsigned char) strtol (hex_string, NULL, 16);
+ 	}
+-	cats_strings[i][CATS_STRINGS_COL - 1] = '\0';
++      cats_strings[i][SHA256SUM_OUTPUT_LENGTH - 1] = '\0';
+     }
+-  
+-  /* Read offset from onnx.fdata.  */
++
++  for (int i = 0; i < CATS_STRINGS1_ROW; i++)
++    {
++      for (int j = 0; j < SHA256SUM_OUTPUT_LENGTH - 1; j++)
++	{
++	  if (fscanf (file, "%2s", hex_string) != 1)
++	    {
++	      perror ("Can not read cats_strings1 from onnx.fdata.");
++	      return;
++	    }
++	  cats_strings1[i][j] = (unsigned char) strtol (hex_string, NULL, 16);
++	}
++      cats_strings1[i][SHA256SUM_OUTPUT_LENGTH - 1] = '\0';
++    }
++
+   for (int i = 0; i < OFFSET_ROW; i++)
+     {
+       float result = read_float_from_file (file);
+       offset[i] = result;
+     }
+-  
+-  /* Read scale from onnx.fdata.  */
++
+   for (int i = 0; i < SCALE_ROW; i++)
+     {
+       float result = read_float_from_file (file);
+       scale[i] = result;
+     }
+ 
+-  /* Read coefficient from onnx.fdata.  */
++  for (int i = 0; i < UNITY_ROW; i++)
++    {
++      float result = read_float_from_file (file);
++      unity[i] = result;
++    }
++
+   for (int i = 0; i < COEFFICIENT_ROW; i++)
+     for (int j = 0; j < COEFFICIENT_COL; j++)
+       {
+@@ -169,7 +163,6 @@ fill_node (const char *file_name)
+ 	coefficient[i][j] = result;
+       }
+ 
+-  /* Read coefficient1 from onnx.fdata.  */
+   for (int i = 0; i < COEFFICIENT1_ROW; i++)
+     for (int j = 0; j < COEFFICIENT1_COL; j++)
+       {
+@@ -177,141 +170,22 @@ fill_node (const char *file_name)
+ 	coefficient1[i][j] = result;
+       }
+ 
+-  /* Read intercepts from onnx.fdata.  */
+   for (int i = 0; i < INTERCEPTS_ROW; i++)
+     {
+       float result = read_float_from_file (file);
+       intercepts[i] = result;
+     }
+ 
+-  /* Read intercepts1 from onnx.fdata.  */
+   for (int i = 0; i < INTERCEPTS1_ROW; i++)
+     {
+       float result = read_float_from_file (file);
+       intercepts1[i] = result;
+     }
+ 
+-  /* Read unity from onnx.fdata.  */
+-  for (int i = 0; i < UNITY_ROW; i++)
+-    {
+-      float result = read_float_from_file (file);
+-      unity[i] = result;
+-    }
+-
+   fclose (file);
+   return;
+ }
+ 
+-void
+-matmul (const float *lhs, const float *rhs, int m, int k, int n, float *out)
+-{
+-  for (int i = 0; i < m; i++)
+-    {
+-      for (int j = 0; j < n; j++)
+-	{
+-	  out[i * n + j] = 0.0f;
+-	  for (int p = 0; p < k; p++)
+-	    {
+-	      out[i * n + j] += lhs[i * k + p] * rhs[p * n + j];
+-	    }
+-	}
+-    }
+-}
+-
+-void
+-add (const float *lhs, const float *rhs, int length, float *out)
+-{
+-  for (int i = 0; i < length; i++)
+-    {
+-      out[i] = lhs[i] + rhs[i];
+-    }
+-}
+-
+-void
+-sub (const float *lhs, const float *rhs, int length, float *out)
+-{
+-  for (int i = 0; i < length; i++)
+-    {
+-      out[i] = lhs[i] - rhs[i];
+-    }
+-}
+-
+-void
+-sigmoid (const float *in, int length, float *out)
+-{
+-  for (int i = 0; i < length; i++)
+-    {
+-      out[i] = 1.0f / (1.0f + expf (-in[i]));
+-    }
+-}
+-
+-void
+-relu (const float *data, int length, float *out)
+-{
+-  for (int i = 0; i < length; i++)
+-    {
+-      if (data[i] < 0)
+-	{
+-	  out[i] = 0;
+-	}
+-      else
+-	{
+-	  out[i] = data[i];
+-	}
+-    }
+-}
+-
+-void
+-line_concat (const float *in, int in_size, float *out, int out_size)
+-{
+-  for (int i = 0; i < in_size; i++)
+-    out[out_size + i] = in[i];
+-}
+-
+-void
+-one_hot_encoder (const char *in, const char (*cats)[65], float *out,
+-		 int out_size)
+-{
+-  for (int i = 0; i < out_size; i++)
+-    {
+-      if (i < out_size && strcmp (cats[i], in) == 0)
+-	{
+-	  out[i] = 1.0f;
+-	}
+-      else
+-	{
+-	  out[i] = 0.0f;
+-	}
+-    }
+-}
+-
+-void
+-imputer (const int64_t *in, int size, float *out)
+-{
+-  for (int i = 0; i < size; i++)
+-    out[i] = in[i] * 1.0f;
+-}
+-
+-void
+-scaler (const float *in, const float *offset, const float *scale, int size,
+-	float *out)
+-{
+-  for (int i = 0; i < size; i++)
+-    out[i] = (in[i] - offset[i]) * scale[i];
+-}
+-
+-int
+-argmax (const float *in, int in_size)
+-{
+-  int out_idx = 0;
+-  for (int i = 0; i < in_size; i++)
+-    {
+-      if (in[i] > in[out_idx])
+-	out_idx = i;
+-    }
+-  return out_idx;
+-}
+-
+ static void
+ preprocess (int argc, int64_t *argv, int64_t *in_modes)
+ {
+@@ -339,86 +213,71 @@ graph_infer (int argc, const char *argv, int argc2, int64_t *argv2)
+   else
+     return 0;
+ 
+-  int64_t in_modes[M_MODE_SIZE];
+-
+-  preprocess (argc2, argv2, in_modes);
+-  
+-  /* concat_result and encoder_out are intermediate computation results from
+-     the ONNX model. concat_result is a 1 × 18 matrix, and encoder_out is a
+-     1 × 12 matrix.  */
++  static int64_t in_modes[M_MODE_SIZE];
++  static char in_options[M_OPTION_SIZE][1024];
++  strcpy (in_options[0], argv);
+ 
+-  const int concat_out_size = 18;
++  const int concat_out_size = COEFFICIENT_ROW;
+   float concat_result[concat_out_size];
+-  const int encoder_out_size = 12;
+-  float encoder_out[encoder_out_size];
++  const int encoder_out_size = CATS_STRINGS_ROW;
++  const int encoder_last_size = CATS_STRINGS1_ROW;
++  int concat_size = 0;
++  const int size = encoder_out_size;
+ 
+-  one_hot_encoder (argv, cats_strings, encoder_out, encoder_out_size);
+-
+-  line_concat (encoder_out, encoder_out_size, concat_result, 0);
+-
+-  float variable[M_MODE_SIZE];
+-  imputer (in_modes, M_MODE_SIZE, variable);
++  for (int i = 1; i < M_OPTION_SIZE; i++)
++    {
++      float encoder_out[size];
++      one_hot_encoder (in_options[i], cats_strings, encoder_out, size);
++      line_concat (encoder_out, size, concat_result, concat_size);
++      concat_size += size;
++    }
+ 
+-  float variable1[M_MODE_SIZE];
+-  scaler (variable, offset, scale, M_MODE_SIZE, variable1);
+-  float transformed_column[concat_out_size + M_MODE_SIZE];
+-  line_concat (variable1, M_MODE_SIZE, transformed_column, 0);
+-  line_concat (concat_result, concat_out_size, transformed_column, 6);
++    float encoder_out2[encoder_last_size];
++    one_hot_encoder (in_options[0], cats_strings1, encoder_out2,
++      encoder_last_size);
++    line_concat (encoder_out2, encoder_last_size, concat_result, concat_size);
++    concat_size += encoder_last_size;
+ 
+-  /* This requires performing matrix multiplication between a 1 × 18 matrix
+-     and an 18 × 100 matrix  */
++    float variable[M_MODE_SIZE];
++    imputer (in_modes, M_MODE_SIZE, variable);
++    float variable1[M_MODE_SIZE];
++    scaler (variable, offset, scale, M_MODE_SIZE, variable1);
+ 
+-  const int m = 1, k = 18, n = 100;
+-  float mul_result[n];
+-  matmul (transformed_column, coefficient[0], m, k, n, mul_result);
++    float transformed_column[concat_out_size + M_MODE_SIZE];
++    line_concat (variable1, M_MODE_SIZE, transformed_column, 0);
++    line_concat (concat_result, concat_out_size, transformed_column, 6);
+ 
+-  float add_result[n];
+-  add (mul_result, intercepts, n, add_result);
++    const int m = 1, k = COEFFICIENT_ROW, n = COEFFICIENT_COL;
++    float mul_result[n];
++    matmul (transformed_column, coefficient[0], m, k, n, mul_result);
+ 
+-  float next_activations[n];
+-  relu (add_result, n, next_activations);
++    float add_result[n];
++    add (mul_result, intercepts, n, add_result);
+ 
+-  /* This requires performing matrix multiplication between a 1 × 100 matrix
+-     and an 100 × 1 matrix  */
++    float next_activations[n];
++    relu (add_result, n, next_activations);
+ 
+-  const int m2 = 1, k2 = 100, n2 = 1;
+-  float mul_result1[n2];
+-  matmul (next_activations, coefficient1[0], m2, k2, n2, mul_result1);
++    const int m2 = 1, k2 = 10, n2 = 1;
++    float mul_result1[n2];
++    matmul (next_activations, coefficient1[0], m2, k2, n2, mul_result1);
+ 
+-  float add_result1[n2];
+-  add (mul_result1, intercepts1, n2, add_result1);
++    float add_result1[n2];
++    add (mul_result1, intercepts1, n2, add_result1);
+ 
+-  float out_activations_result[n2];
+-  sigmoid (add_result1, n2, out_activations_result);
++    float out_activations_result[n2];
++    sigmoid (add_result1, n2, out_activations_result);
+ 
+-  float negative_class_proba[n2];
+-  sub (unity, out_activations_result, n2, negative_class_proba);
+-  const int prob_size = n2 + n2;
+-  float probabilities[prob_size];
+-  line_concat (negative_class_proba, n2, probabilities, 0);
+-  line_concat (out_activations_result, n2, probabilities, n2);
++    float negative_class_proba[n2];
++    sub (unity, out_activations_result, n2, negative_class_proba);
++    const int prob_size = n2 + n2;
++    float probabilities[prob_size];
++    line_concat (negative_class_proba, n2, probabilities, 0);
++    line_concat (out_activations_result, n2, probabilities, n2);
+ 
+-  int argmax_output = argmax (probabilities, prob_size);
++    int argmax_output = argmax (probabilities, prob_size);
+   return argmax_output;
+ }
+ 
+-void
+-execute_sha256 (const char *input, char *output, size_t output_size)
+-{
+-    char command[256];
+-    snprintf (command, sizeof (command), "echo -n \"%s\" | sha256sum", input);
+-
+-    FILE *pipe = popen (command, "r");
+-    if (pipe == NULL)
+-      {
+-	perror ("Failed to run command.");
+-	return;
+-      }
+-
+-    fgets (output, output_size, pipe);
+-    pclose (pipe);
+-}
+-
+ int
+ get_optimize_decision_from_ai4c ()
+ {
+diff --git a/gcc/ipa-hardware-detection.cc b/gcc/ipa-hardware-detection.cc
+index 6b36d685c..76c130d6b 100644
+--- a/gcc/ipa-hardware-detection.cc
++++ b/gcc/ipa-hardware-detection.cc
+@@ -194,7 +194,6 @@ pass_ipa_hardware_detection::gate (function *)
+   const char *ai_infer_level = getenv ("AI_INFER_LEVEL");
+   const char *ai_guided = getenv ("AI_GUIDED");
+   return (ai_guided || (ai_infer_level
+-	  && optimize_maximum > 0
+ 	  /* Only enable in lto or whole_program.  */
+ 	  && (in_lto_p || flag_whole_program)));
+ }
+diff --git a/gcc/onnx.fdata b/gcc/onnx.fdata
+index 77f4d9b1d..e366031e5 100644
+--- a/gcc/onnx.fdata
++++ b/gcc/onnx.fdata
+@@ -1 +1 @@
+-656137356462666463346364333361623035396139323366643262383764363763323530613631653861653634666630333030316562323662346133633566313233326432366139383465346338376266393132363438333765656463366235613461313434346139333334396265306163333731646537376430643834323664623863366163343363643130313435636565623834363361316133393230363937653835653762353534626439663133633538623062353439646237616630333237666136663433386334626639643465303163653832333062643863333664336630376231643964316231663933656333386338656262303734376137313565643963396535653131303763646533393234333735613333633132353061393531333935623539643834373266303861633739373862366663376365383233326139383939363566373061373361613939336537366631353334346563313061373365663635633332663437653136383235343635623234366430373330366336363237623962656465373233346131343264313137653838643334616430346339363732613237623866636364313232613934343261643231386531356430343965303330306332326266336634626163333461643139653962326566303064343333623037313762303934626336363537616339343637306633633066333231613063623339333539376461316632653234353938616133616463623534356232346135666261616339646638373031356633306161626465643665633066616264623965656138613233353331303236363565616133323131653935643363353832366663633434626236376663386335666364333530336433353234383031636264353761616638663031613263353738326438656265623236653338323232386565626464393034633962373835363264656664616439353336623462376139333134656662373033626135336138333136643032636430653334303861616439333736306363383862306439623962646435383931613161653334623666313236366366373962356536656434396231303338646265323666386461366430396262353536313433636132653466623061346164303635636162336536383062306637306438626232646636393462353563366437386531316463383239373361643230643566333736663330656538643461313161306163666361663064383962373736636162323565383865336630333461343939336231366437386265323439626332336166376262623837353163376533353066636339313233323761613766333633343432623331373530376432376534623831333339383964633439653966303663303439623739346133626330646333373831393930316439326233646565623761646664356230336233363230383833616266643463626536336133346664656433373630343738343262373863646131373633653939636430616439393731306435313664323166313530636464393664613738643461356437333564343036316262353462313336663335313763323661363564343330623965363866616534323163616337613964356465333333663739313835623363316462346539643539636435303166306664366135313063333630336531336532346134306234626632313565333739356139613430643630613834353666623762363363326431316538313730316132333165383561666434623564623831616263316464353664323731373332656430323435663836393162646335646164333437366436663633353630373762316161316436333461393763343130396235396237333534376639343063316463386432316330663138373338386632643361386565636665653766643836373930326664346163333162613163396664663531626161326134333762666330393261316637333265326138663036363736333734326230373730613665323665333266326265356534376133373330393466336639623431383863383433643563346265613561326134616334626262646637666163376365333962323036323537333737313534313066323364623937303534623665623237306566636366623763623431373666383236383134326365613136653238623231646339353938373030656631646238653961643434653765313834316231346231323563636633356131376538636666613866303638323362643436636638353665346166323633623861616639363862613835306234643961666662656334393366613066653061383333653965343633653837613263636530633863303861313363636361346132613431363962336539613463313366633934633761363761623039396365393263633638656134376162616631363838616238613137323032663864613035313363353335396432373530363233663234343136346339663435333834656630366537353336306137366434646264303466303633663630386363636337383066316631323836353135393134646139373035323637346164303965323536666335393864636364346433383564616435383862366464326130336536313934636139386438373462333162366230623165323533336263313430386430643661386261633061313631613639313232313734383136336464636231323130336231613131373336636238656635353635626666303535663331363332353338313363396334313631333034656133666365353561643830356565633137346638643739636136376432303761633436666465336232356236653164353163346461656165333038653266336161383966633961323462376262386430363536323932373263343731313562356139336265383765336139643837333966623265386131666561366161333138353261663139303338393733346335313934363761393137633266616431663436343236613231663865636236346133616662623761373633663830623231393063616534633032316538626436633731643261313866666339353836373133363939623966636239333637373764323863663964376134323134613133346335326162363334363334386666396666336534646565616264363966356566323465346538613762623864303766646338666264643466666537303263623162326539653435643130313061386235623631306232303139633962653661303038376162323666343263356465313035393535313531373665623537363237373934366266303434333463303562646535623762313565326631393061643033613838363963333933396162363834393636613136613036656435356339616665636134313034663335343465646335333738663063616330353634316466336438373835326638633037656234396239303730613338336332613839323837643561333235393366323531303032666436616435313137313166383433396531626137353932616538333330653164326438656166343339363262366264326632376564396434396333356565343733383137666333386462313164333630303936336137323863313634613031393931613164363237643262353162376133643935373036306336346161376563383862316365613139376535626535626234666331363163303835336362393535613530636234643633626364386566396132333232373733633230393865663664636334393131643133383736373833653261383661623632343237653937366164386564633433396130313166306430316134653864643263343835653438383687144d4395a52c41e3842b41b03f3743f207264565084e41e0fbdf3bd429b13da15eb33d7e47b93b309daa3890d2d93d0000803f2bd5bbb95024b1399db4d33b2956c2392e99b93969b868ba90d4eb3bd611a9b92d66c739a2115b3b1257b6b9ed46b2b917deac3b5b0d2a3c14887e3ce74b3eba00000000386ea33945cf5abb9cbce9ba3c4ff0ba5cc05b3bb4bc9eb91e09b03a6113e9ba3e17b539267dab397fdea9b906eea5397713973bfc7a2c3b51ff733b50c313bb000000003ea5d239842c8c3b6d275e3af5c58b394139b2391f56403bf47e56bb357a81bbc956923b57d2cc39909aaf3944807e3c4a78cbba9938a9b9d84ba7b99796bcb9d7bea5b925e37a39ff19a73ba25dc1397357b1b9c13cd6bb2d36423b6da35a3b90e500bbd402a0b95f930bbbbddfaeb9071fa9395281a6b934e3a23b0f238a3b93e1973964d9a9b98b2f273ceb4a9aba3ccab6b9e3ba033b48220ebb19fcaf3b6290a93b034407bb782fa1b89335943bb388a7b9655cc0b96a361d3c67b9b0b99e28cf39076f2dbaf05afdbafa6cb43962bc2e3bd590bebc30339f3b4476a23963e6663bd7ad183ca5c00cbb8a69c13956f574ba97dcf93b97098d3bd471ae3bcbb06dbbfddca9b9d7c803bc3eb703bc9bdaa8bb1c6718bc8e5bcebb224c013bcb7684bb4d85943b2ef403bcc090893bf736aa3beb62a13b3c1cb6bbb821a7bbaee808bbfe3dd33c0000008027b0f5bbd6e09d3ce2db183d576fb7bb3b9761bc4111813be54cbcbbab65043c93ce1cbce0b3c6bbca4e953bb347fabbc466ff3a88f532bc11bf4c3b72636dbc00000080850afebbaaba5e3b476f973c7ae755bbde44dbbbfac1633bab7793bbf3ae90bb010a84bb3a1c2cbc6597d6badcd277bbddbbfe3b5898933b6063903b0d8fb33b028d8e3b2adcdcba9f1585bb2e91b9bb582aa03b6334f8bb6e31333b5569253b1e71533b8e977e3bcaf945bba6b59c3b603ab2bb02c88e3b0ae4493bfc457ebb2ae3ddbbe8f9943b0c38a4bb160ed73b73adab3bb6100e3bf28fd83baa70603bad2683bb9d90033c52703a3c9546693b7d34903b63a6b83bf1af9cbb3b19a23bedd0f8bb58efb13b105b72bb17300abc025488bb0a5b543e33573a3b24eff2bb286f75bb5d55a9bbd8e4803beea116bca9cc2d3b1e1205bc1adf75bbfcaa413bf8b491bb3050903b6537a83c1736213cc6271abaf7b6293c98bae33be7b710bb07e333ba55389dbb41441f3c8bf89abbb9acb6bbdeb9acbb416e053be7a63fba124e36bc396d97bb000000809a5a083c15d242bca07d0abca36bba3c09ac8e3cf4ae85bb51f5cd3b8abb18bccc0f383c4a58dc3b335a9ebba65c0b3c2324d9bb35bbb83c748069bba62961bc000000003e13193c863f71bb752b1cbc578e6b3b5807f33b633e9abb9e8dac3c78dcb03c7b35193ac5df483cca37e83ad74a50bcbfd529bc33469cbb786498bbafc6c1bbe0f895bba7a7ed3a8ef7a339cb5ccd3be140abbbc2041a3ce9e950bb828f5ebbd63a6fbbdd1d85bb9d07a93cdc24a7bb765ac53b467b96bbe863b4bb083ea73a7869f83b4cf49dbb874003ba2440dfbb356db8bbf63d2abb79b4d5bb3b1bafbb27487139a45115bc45e7a8bcf34775bbfb9e98bb68e4c7bb1e930fb736e8acbb6c4b753cce27b5bb830ba83c91ce1b3cdc1def3ae198133ef5e688bb6b67073cdd1ac53a27b589ba81469fbbe150293c3c2884bba35bdd3b1583ba3ac3a194bb3debb13c495799bb078c2ebc384b7d3c97a8493cebb6853c73bb4d3c024dd83c696ca13b1cc702bc6847243ce3ac5dbcfa771abc96221fbc6147fabb3641023cb496023d28f266bb00000000bf65693ccfff0d3cbf68a8bb3b268bbc15de51bca098e1bb9c5980bdec61ee3c66cf983c1024423c51e0febb12336c3c25bd1c3dcebf50bc88eb4cbc0eaea73c00000000c56c163c597c5fbcaa7b593cb7d2e33b7b235a3c0c2447bc95c9edbba8320abc75b98b3cb7d19a3cf51d623bcd0a023d7e91ef3c725604bc91fafdbbdf1f21bc2f3bfcbb274a493b7c78393cdb0e213c01a719bc0a0368bd6ad23cbc210741bc0288df3c6303d1bb8c93bbbb97420fbcbe16353cc8bc18bcf9a61c3d62882a3c8a25683cff2e01bcb0580e3c5b11f33c47dd10bc5b522abc4230df3c6ca21c3d13c0103c8129e33c199e5c3c05752dbc9515f0bba0b724bcac0b383c1f2b12bcbe768d3c80baf23cd6fbc9bb1b04843c84da173cae2dbabc64e63dbc08d8613cbd872b3c029c2c3ce63be03c5e41853c608888bd90f692bd68bf2c3cd0f843bc4497bcbb3161e4bb4ede983c42e72bbc0283dd3b176925bcd6d71bbc7ba42cbb911a6a3dadca0b3c68582fbcc664f23c629c1b3c4f33133cd046413c7eb83c3ddbe0d1baf307ad3b00000080101215bcc0708e3ca925dd3b7899f33cfdb1493dc7bbf43bfb8b5dbb6ee9d2bb584924bcb3cf0abc0b6c0c3ce93e14bccd51943a3302dd3acfabb03ca9c22cbc000000005cb22abcaef7f33cdf6aae3ba949d6bb1ef214bc878db23c3863783b62c5363b67d0eb3c5e192dbc75b57cbb3d43ecbae55cd1bb0cd10a3c00d4083c8efd223cccb5063c1b2d70bb3abcbd3c883c14bc0f01133cee04963c6d7bad3c7480353cba707fbbc16cf63bcdd4a939ad1f123cde2f0cbc2c76053c59ff09bb009bba3cb24a0cbca7750b3c64f9493d982287bbbe111e3c7f48483cd59b5bbbea43e1baf906243d99f3e1bbbd01863c4f95d73c25a8073cfa81273c42548f3cef4b153c800841bc318aa9bb7c98873a55c218bc1338373dfa26bb3c22f2c53c6bbc0dbcc7900e3d0819da3ccec359bb9a671fbcfee714bcbb3f74bb8851ce3c1c76ac3cd7fef039437e0a3ccb42bcba11ea103a388af63b94700f3a4ef6293ad86816b900078b3bd1c62cba4ee1213a7a4b2a3bb98b30ba291c30ba4d97813b71c3033c9815a33b627549ba000000804646103a39a742bb747c0dbbe3ff41bb1186043b9aad27ba947610bad46e52bac8a9053acb9e223a86122dbafce80f3ae1144cba610bc73a34b4543be4f2bbbb00000000dbcd253ac4727d3bcf8cf33a44811d3ac114213ab973fd3a78684ebbfd3384bb29a1ed3a4a70063a11fb103a3f3aa83b629d2aba74f52cba6d1d2cba7aad31bab36e2bba566e013a79481d3b5c663c3a09cf2fba731f29bcc28e1c3b7067313b8bbaa6b9454d28ba15bab6bad4e82eba107a263a51992bbabaa220babbdc023bfd62103a28172dba1db3003cd7ef11b9c15330bae79bbc3a25ae20ba21b1feb95d37183be0206fba67882bbb784a883bba292cba034132ba4df5f43b60222fbaf5b10a3a75e20a3996f8f1bad29f0e3adb20b8397d2257bc2faa893ba3d90f3ab059bf3abba4db3b7757cdb9cde80d3a934d85bb4b8be639fadd0e3b2bb99b3b6dc792bb95cf2dbae68660be8b1d7f3e2632363e6a588c3e0dc7453e8a4979bd1d81593e87b001beae177e3e4f7449be4d9119be437e10bed7162dbe5e43333e3d94acbe80726ebd000000008bb1683edfc6573ebafeb0bdda2e62be36b673bed771d6bd2c5f453e93cd52be9a55963e74b73f3e90e102be14af6c3e2db5afbe2920f4bec3a4fabdc533b53e00000080be5a753e34650dbe4d70473f2edcce3d45ca513ebb4615be2b554bbf9d8900bf1c142f3e7350a23efc363f3dce2ea5bed70f5dbe30da00be4a45fabd5c2c24bebf70f5bdec31433dac40363ef82f333e4d140fbe16dca83e1aabccbd39e8c2bd7f85c2bd7d4bd7bd62b629bf3e2d0bbe0a0a2d3edcbcf6bd11c8c3be4ca7123e92f7543ea87e02be0c1d2d3e5f362abebc491bbe7f4c95bd614426bee96ed8bed1ba3c3e666850bee6ddd53eda2819be1c5efbbd970e2abed2e41c3e043410be6a7d983ecb6910be90d301bf8248823e5c5f1f3e50043c3ed31cfbbdb4c6663e1b1d083e27e14b3eee2aecbd72008c3eb518e03e444e783e3fe10a3e59940dbe1c03e6be9f11fdbd845eda3dbbd702beb486b7bdce1210be4662cabd997aff3ca8acd8bd5d06843d914d02beb963c83d9ca89c3d954a933d7154ab3da863b4bd1ba5283ef949f03c000000004c7ceebddd4ad1bdb34f333d5226db3d8e4ee83d5ec3593d8c34cabd488ed83de8691abe0e1ec4bd1041853dd79cf2bd503b2f3e7b316f3e4cdf7b3d0f623abe000000807d95fbbd06008d3ddcd1c6be1f1152bd17cdd6bd0a1d983d8d68d03ea043833eba4ab0bdf7d026be8359c0bc3967213e041ce33d3e29833d14a97e3d139ca73da1ae793d8060c4bc8356b7bde92cb7bd2ed5913d1c2d2abe50a24f3ddcf4463d94d5473d83a45a3dfce4b13ed5ce8d3dd2d1b0bdde047b3d6462433e65aa94bdaa15dabdfbda843ddf6caebd139fae3db36f9e3d67e8183d6bf4aa3d48e8573e985cbdbdc40fd63dbacb50be5309983d43cb7f3ddaafad3d4ad59ebd6bfe923d7fa51cbe58b4933de72d873e13a905bede95a0bd7d6cbabdcc197e3d5e80ecbd63418abd023cccbd87c7723da8b70fbe2a2867be97f7febde8028dbd3f9c8e3d8e4a6b3e19c6803d64c364be492c7e3ea28b373e97c28b3e8750453edc6c7ebdc7ad5a3ebebd01bead267d3ef16349beeb7919beaf7510be7b8e2dbe0fc8333e5bb5a0bea8de6ebd00000080e2f0673e49ed5b3e0ff5b0bdadf362be411c79be6dc5d6bd38f5473e5cfe52be16a8953ea54c3f3efbec02bec1e56b3ea8e6a6beceeefcbe360af8bde438b13e00000000fe7f743e219a0bbe578b503f0c26cf3d7a3a513e365516be9dbc4bbfd3f500bf66902f3e5686a13efa6a403dd3e599be7b1e5dbe9ce800be3b6cfabddc0224be459ef5bd6a68443da0f7363ee9db323ee40d0fbe9c41a63e8c16cbbd8073c2bdc758c5bd759dd7bd9bd829bf9d2c0bbe6ac32c3e52e9f6bd1dcdb9be9ba9123eaa61543eaa8a02beaf7d2d3ebbb72abe432f1bbe509694bd083127be2a22cdbec25e3d3e8c9950be2984e23e170217be3d83fbbd95da29be504f1d3ebd2b10bec5ca973e92d110be045402bf55c9813e1ceb1f3ed27c403ef27ffabd3b0a663e5ed4073e34104d3ecfe2eebde06a8b3e4098ea3e7f7e7c3ea2ba0a3ead3e0dbe56e6e7be4d34fdbda585d93d565202be9adfb6bdd9830fbe879ac9bde754003dfad4d7bd69c8833d9cc801bea9a2c73def389c3ddaed923dddc2aa3d3cc1b3bdc90c283ebfadf13c00000000f887edbd1c7dd0bd6f8b333d324cda3da364e73d3ead593d736fc9bd35b7d73d87d519bedf5fc3bd5c00853d67a4f1bd58a12e3e929a6e3ecf6f7b3da3c539be000000006f93fabd0aa88c3d88b0c6bebaf651bd98f4d5bd99ae973df84dd03e3efc823e46afafbd813726bec6c4c1bc63d1203e0f38e23d56ed823d85417e3dfb17a73ddb51793d9acec5bc5aafb6bd5682b6bd407b913d899329be5d954f3dedfb463d3dd9473dd18b5a3d75b6b13ebc7c8d3d7832b0bdc9a57a3d02c6423e454294bde339d9bd419b843d8dd4adbdce06ae3d31fc9d3d5952193d8662aa3d264d573eafabbcbdf03bd53d532f50be229b973dad607f3dfe20ad3d86599ebd9fa1923d940f1cbe814e933d2fe8863eda2105be8016a0bd11c0b9bd30a57d3d8f8eebbd6eee89bd1e75cbbdf16a723d0f290fbe186166be26f5fdbd51aa8cbd14418e3da3b26a3ef78e803db37c55be8869803e551f343ee1688d3ef091463e67047abd32cf543e448a01beb9c97f3e5edf43be38bb19bef68410be0e5427be2b1c313eda8aa5be8b506bbd000000001f106a3ea4754d3e08b6afbd824756be411864be609dd5bd781b453ed7a654beea8f973e7373403ecebe02be091e6e3e4d0aacbe8d43eabed2b3f5bd1d09b73e00000080f3ed763e3d9d09be7ae8433fd0eccd3d6ccb523eb88314bec41f4cbfad7a00bfb3112d3eafc0a33e9b7c3a3d956b9ebe160a5fbeb9af00beb8d9f9bd597c24bebbf7f4bd1724403d29fd333e8996323ec9160fbee170a63ea862cabde5e5c1bd27dfc3bdf772d6bd58af2cbf06240bbe0b7d2d3eae46f6bdbadcbfbe0ae7113eb604563e525a02be213f2b3e9a632bbe177c1bbef8df94bd6ac827be0811d4be4cee393eff3152bee817cd3eac6e14be13f6fabd0d742abe89e31b3e3a3e10be16c5993ea6e910be3fe703bfac2f833ef7a21d3e7909373e8ee3f7bdd21e683eddac073e9689483e3d17eebdeb118d3edf18e23eadd2783e23610a3e06310bbef644e6be7ea1fcbdd5fc54be10f67e3ee44a323e40788c3e81c4443ef9a975bd46e1523e93c9ffbdb4e77d3ea3f442be80fe17be65d30ebeed6f26be7f4a2f3efb84a4be77da67bd00000000242e683e481d4c3e10a2adbdd0c855be48bc62bef58fd2bdbc95443e4eb652be269f963eb89e3e3ea51801bec03a6c3ea7feaabef522eabedd0ff4bdd458b63e000000001c0c753eadc408beb11d433f38f3ca3d58f1503e7ba413beaa0b4cbff38300bfe6452b3e46cfa23e204c383d95699dbe50115dbe5e19febd0d9cf6bd6fb722be14bef1bdc62d3c3d292d323ee5ef313e1d660dbe7c24a63e8ad5c8bdf659c0bd7e0dc1bd856cd3bd5f882cbf457609be5eb42b3e130df3bd07cabebe3a36103e2128543ed9b400be3f72293eb79729be3bbc19bea27493bd7b0026be49f4d2be4216383e044450be0965cc3e0f9213bee4b7f7bd54ab28be262a1a3ef8880ebeb615993e2f3b0fbe2ec103bf733d823e33e41b3e43a3353ea33af6bd793b663e5506063e46aa463e6cffeabd171e8c3e1e40e23e3653783e79b8083e09560abe8942e6be3d6ff9bdbd38d93d863e02bebf73b6bd4a890fbed80bc9bdf4adfb3c5db3d7bd6f6a823d34b501be4a36c73d35f69a3d6f9f913d6f17aa3dd251b3bd7c66283ede43ec3c00000080903fedbd9345d0bdc3a9303d1506da3d815ee73d29ce563d1dc8c7bdcc47d73ddbee19beeecac2bdf8a3833d3564f1bd010b2f3e814a6f3e1579793d78ea39be00000080c45dfabda5c78b3d4464c7be72ac4fbd4780d5bde0c0963d090bd13e3866833e4436afbd9f6326be3986bdbc511e213eb3dbe13d858e813d097d7b3dc9e3a53d9088763de970c1bcde44b6bd13e0b5bd472b903d739129bee15d4d3dd9b3443d1167453db9af573d3f56b23eca288c3d9c79afbdb1db773d2c51433e6f8f93bd58cfd8bd553e833d5e58adbdba47ad3d43bc9c3d72ed163d379ea93dedf6573e1a4fbcbd4fc8d43dae9a50be7ecf963d489e7c3d47f5ab3d02bc9dbd1253913d5c271cbef35d923de557873e491805be1b819fbdf021b9bd76a77b3d5f44ebbdc82689bd8737cbbd642b703d59300fbe636d65beae3efcbd3fe88bbd665d8d3d2b616b3e6f5e7e3d33c4ef3d5a4ff3bd7221abbd56eb05be0ea3bcbdde3df03cb7dfc9bdeb15773d1eb0f2bd5dc1ba3dd366923d99b6893d81cd9f3d1c33a8bd0aed1c3e4f05e13c00000080dacfddbd5efae8bdeae7273d86fcec3d8ad5023e45194c3db68abbbdffc7c93d53820fbe25d1b6bddc5d793dd2a2e1bdba0d233ec440853ea3646b3d857249be000000007043eabd30ab833d07e5cfbe76a644bdd424c8bdbff98d3d7c0be53e2564933ec465a4bd3e031bbe2e56b5bcca32163e3192d33daa7b753da05e6e3def8e9c3d62bf693d4910b9bc23efaabdd2cfaabd035c883d22501ebef256423d6b493a3d002e3b3dc8e94c3de1f9c33ea59f843d07eda4bdcdfd6a3daec8353e0fc58abd45e9cabd99a0783d9faaa2bd18e4a23d8b0c943d8d7d0f3db27d9f3d48dd483e4686b0bdc977c73d3ebb61beade78d3d7a6b6f3da931a23d523394bd726f893d4b9311be6fee893da5ab933ee588f8bdb6d395bdc374c6bda6756d3d41f9dbbdec1c81bde952bebd800b633d339705bec81c70be3e29ecbd59ab83bdd329853df8b67e3eb00a713ddfe7da3d302f03bedef5b7bd617710be3adecabd9925003dac34d9bdff59843d94a402be1adfc83dc9099d3d93a6933da9bbab3dbad0b4bd7523293e061df13c000000803316efbdebccd1bd67cf333d53b0db3d8ee3e83d1c555a3dfab0cabd6816d93d61d91abe7895c4bd4295853d833af3bdefc02f3e6800703e197b7c3d20f43abe00000000ea3afcbd76558d3dc78ec7becc9652bde652d7bd9478983d8d2ed13eacb8833e0bb5b0bda74d27be3905c1bcc5dd213ed4ace33d817c833dbf4b7f3deb03a83de74e7a3d9a0fc5bc92c5b7bdcf9ab7bd5030923de4ac2abe712a503d8179473d7a5a483d66365b3d318db23ea6278e3d893bb1bd31a67b3db0fe433ee10395bd989edabd042f853df9d5aebd7b08af3dc5d19e3dd85a193d715bab3d3f9c583ee0cfbdbde795d63d457751bed464983d2137803d951bae3da5349fbd095a933dee161dbe810d943d77a7873e8b0306be42f6a0bd9eddbabd9fb67e3db718edbd44958abd3cbaccbd105f733df81b10bef0ee67be7fa0ffbd3c588dbd8ff28e3def146c3e0718813de067dc3d06c0edbd469cb0bdeadf02be9afab7bdf09be93cdb30d3bd137f703d05c5ecbd0b1cc03da3928e3dee15863dd94aa73dd501adbd8925193ee9f7db3c0000008069b6d8bd8bd8d8bd54ad233dc8f8de3d90f3ef3d7c8c463dcc09b7bd9ce5c43de6400cbe894cb2bd5ab9723da474dcbd0a1c1f3e7f24853e7081683d7f3629be000000806996e4bd6029833d2114cebebb803fbda83cc3bd89748e3d60d6fe3ef831963e57f5a8bdeb7f17be8437b0bc0497123e6177ce3db5ee6e3d23fd673d627f983d127a633d12e6b3bc091ab0bd9d90a6bd6dc3843dcf6f1abe97e73d3d42b3353db840363d7958473d2e08d53e711d813d78cea0bde6b0643d7357313eab9c8cbda337c6bd9000723d57d4a6bd08de9e3db62e903d2adc0a3dc98a9b3da3e4433e9761b6bda3a2c23d88e95cbe44258e3dc703693d85019e3d260797bd4ed0853dad460ebec774863d7b2d9a3e4bddf2bdaf889cbd62bdbebdd3b36a3d1eead6bd600882bd10dac5bd982e5d3d5b8d02be3cc450be27a8e6bdc1da84bd8da8843d7dbe813ec89a6a3d5d4fd93d504a02bef881b6bdda840fbe6651c9bd08e5fc3c039fd7bde720833dfabf01be1a5ac73de7b49b3d255c923dbf54aa3d0961b3bd2b1b283e27d3ed3c00000000bf63edbd2f3ed0bd8cc5313d5e17da3d593de73d7e1b583daf29c9bd3481d73d4fdc19bef20ec3bdf15a843dc583f1bd42b22e3e17b56e3ef0137a3d86d939be00000080327bfabd7f108c3d1eafc6be445850bdf2b8d5bd3027973d7c49d03e1809833e8d49afbd494426beb813bebcb2dc203eb50ce23d8f44823d19e57c3d21a3a63dc8ed773d3b12c2bc7b52b6bd2322b6bd84e7903d63a229be96fa4d3d0855453df235463de0fc583de3bab13e86e38c3d66c9afbd4844793d62dc423e51b693bdd100d9bd36f5833da96cadbd4d9fad3d067b9d3de17a173d2ef6a93d9f66573ea456bcbd4703d53dde4750be7513973da7067e3d68b4ac3dbbdb9dbdc20f923d03181cbe9ac1923db0f4863e731b05bec99b9fbd8f67b9bdef4c7c3d4e68ebbd955389bd3531cbbdf603713db5290fbeafa966be4ce2fdbd4e138cbdb0ab8d3d20cd6a3ebec67f3d9ef957bec152813e51cb353e30558e3ebd52483e7b4a7fbd8d69563e912703beafcb803e766246beae671bbe1e3012bed9ca29be58b7323e5333a6be425070bd0000000002d56b3e9e104f3eb773b2bd96c058be00b365bea4b5d8bd05fb473e264856be637f983e1c29423e675d04be1de46f3e63e3acbeebabebbed539fabdc7e5b73e0000008056b7783e41f20bbe89cb433f31f4d03db489543e23e316be25e44cbf124b01bfc8ae2e3e2cb1a43e2ba9403df0169fbee4a860be1f4e02be7910fdbd392e26befc28f8bd68ad443d439d353ed46c353e7ac010be6a06a83ec8a1cebdf813c6bd4ed7c6bd5a93d9bd51652dbfe8c90cbe592c2f3e997af9bd79a1c0beb47e133eabc4573e0df903be26d92c3ea30c2dbe23271dbe1b9e98bde47029bea0c8d4bef08a3b3e45d453be30e9cd3e5fcf16beb92dfebdf1282cbe84801d3e71e411bedab19a3e8e8e12be2aad04bfee17843efd391f3e6195383e086cfcbde1e1693e263f093eca2b4a3edc29f1bd7efb8d3e0590e33ef7c97b3ebbf50b3e55880dbe9ddbe7be46e8ffbd1e25dc3d43edebbdb496b2bde2cb01be45cbb6bd770cdf3c01c6d8bdc1256f3d43f5eabd8362c53d3cc28d3d4c54853dd29ba53d9770afbd10b4173ecd44da3c000000007929d7bda647d6bdc5a7223d5f05df3dcf97f03d8766453d51a8aebd044bb93dab090bbe092cb1bd0b5d713d14dcdabd85401f3ef69e863e5642673d834627be0000000027e2e2bd9f81833d66c6cfbe2b5e3ebd45efc1bd2f718f3db1c6033fe13b983e9b6eabbd711b16bea4adaebcd0d0103e190bc23db6976d3d9daf663d159c973d4532623dd556b2bc09a2b2bde48ba5bdf103843d55930ebecdbc3a3dfc6c323d04ba2c3d4a32463d0351da3ec763803dded69fbdad67633de850323e85e68cbd5ce2c4bd54a5703dbedaa8bd0bf1953d535b8f3da31c073d67eb923d5e27463e09a1b9bd6537b73d18d55dbe873e8f3d0bb5673df5129d3d348197bdfb0e853dd2060dbeb05e7d3daf629c3e4dfaf0bd170c9abdae6abcbde6c6693d0463d5bdf8c081bd8700cabdae54513df47a01be929b4abeac77dabd8ba984bd5f2e853dc5af833e124a693da068db3d435203beb583b8bd3e9310be5542cbbd5318013dc8b9d9bd6eef843d55bf02beff62c93d6e9d9d3d903b943d7c4bac3d1b60b5bdac48293e54fff23c000000800d6aefbdb152d2bd28e5343d122edc3df060e93d657b5b3d5716cbbda297d93decde1abedbfcc4bd882a863d5e8cf3bd52e42f3e28f76f3e889f7d3df5ae3abe000000000d91fcbd8ce78d3dd882c7be65b053bd17b2d7bd7b08993d5422d13e67b2833e2a45b1bdc15b27beeca6c2bc9806223ef729e43df711843d263b803d8595a83d58797b3d59b0c6bcfd53b8bd1a0fb8bd67c5923d5abe2abe1d48513d1a93483da975493de55b5c3de780b23ee0bc8e3dbaa9b1bda4d07c3d8618443e2c9895bd10fddabd50c4853da666afbd4696af3def649f3d875f1a3d24eaab3de3ac583eff5cbebd1d18d73de03451bef2f5983d2acc803dc3abae3dd1c79fbde8ee933da02a1dbe939f943df69f873ee62506be5d89a1bdf268bbbdc9d87f3de56dedbd112a8bbdd742cdbde882743d663810be419467be8de4ffbdfcec8dbd77838f3dfa106c3effac813dbb9bd83df8e801be3be5b5bd5c1f0fbe06acc8bdc9bcfc3cc8ecd6bd0ab4823d0c5f01be68b1c63dba2d9b3d33de913d7bc1a93d9ec6b2bd76ae273e0ec5ed3c00000000c2aaecbd1590cfbd9163313d7863d93de182e63df77c573d8680c8bddaced63d3f7419bedf6dc2bd69ec833df5c8f0bdb5442e3e68416e3e4a4b793dc16a39be00000000a3bcf9bd9f9a8b3dbc70c6befed24fbd1c0cd5bdaea5963d030ad03ee3ce823e66b2aebdb3d925be5c59bebc4671203edb54e13d8bd8813d0d167c3dea11a63d5025773d6255c2bcd7b5b5bd8f8ab5bd1b6b903d653529bedd6c4d3df0d3443de5b2453dab5b583d587eb13e516b8c3dc436afbdba79783d666c423e6c3893bd4e52d8bd1787833dfcd6acbd3f09ad3d1ff29c3d1640173d2f63a93dfdf4563e91b5bbbd4c52d43d74d64fbe0592963dcf357d3de31dac3dd9539dbd6592913d44af1bbe5244923d61ba863e42b904be32129fbd88c8b8bd8a817b3d3fb0eabdcee088bd3986cabd3046703d5bc40ebe001b66be521ffdbd859d8bbd10348d3dc4596a3e52f37e3dd45312bfaa602f3fe11df63ebb01413fd9d0073f7a322cbe4829113ff6b1b0be71a42e3fb04c06bf5589d1be8e02c5be4ccee5be0ce2f13e93a161bf7fa221be0000008066e21f3fe5430c3f2b1371be2dd712bf8b9d1bbfebcd91be8376073f3e0611bfeac64e3f7c9e033f4456b2be03a1223f78686abf9a0ca0bf4028a9bed568793f0000000054a3283f4649bdbe97d3044098a78d3eb917103f8020ccbea7380bc00f93afbf6e6cec3e44555f3fdc9c023e1ef757bfe81018bf1187afbe5e6baabecc24e0bec01aa7bed44b053ef6d5f53e55fcf53e6312c3bed4af633f98978bbe09c985be245586bef96492be41baedbf6cb8bdbe2880ed3e13f7a7be76a582bfc688c73edc3f123fd4cbb1be33ebe93eab18eabe37eed3be14f64dbe0e34e5be785690bfe6ddfd3ea95b0fbf14af8b3f3207ccbef42fabbe9c3ee8be041ed53e8aa7c4bea8cd513faa28c6beffc3b4bf8917333f1f72d73eb7f2f93ebba5aabe9d8a1e3fcd9fb93ef1dd083f7006a3bef684403fd48d9a3fe9ab2a3f1f4fbd3e516fbfbe72649dbf5459acbe3ce2d63ea5fae63e06cfa93e0415b53e83fcd73e8a1c973ee8499d3e09299f3e10e6c63e0965983e4656b23e3343b03e9e43b23e57bd953ee7f3f33e658b873e941a7bbe04d2bb3efa54463f9b0d723ede9fb43e4aa5163f555c8c3e7521803ed201e23e9267873eb546b83e07bcb03e8701af3ef7e2da3e2b1a333fcc08b23e71089b3e6536c2bdf12ce43ec3c4a43ec9deee3e6c04af3e90f0c53e19aa783ed7025a3f4a48103f9794d23ef147973ed7634a3e9084ee3ef8e9e73e1837a83e1fe2ac3e7ea6bb3e24ec9b3e3dc32f3e27f7bd3e2315cd3e3a67af3e5dde993e6209bd3e991da23e0112953eeb4cc73e4ea7373f4389b63e53e5bd3e38aba33ea7beed3e2dded33eadb68e3ecaeeb23ec79a963ef6c5d53ef7deb93e4ef4a53e4b2ec33e7825013f4d50bf3efc34e03e747f973e4718a23eeadfc03ea089cb3eab6da43e8e25a33ebef7e43ea9b3ea3eebbb0f3fcbaa8f3ef829043fcc263e3f917b973e8b8aa43ea938eb3ef7ec9a3e9acda03e25379f3ef927b13e73b5b23eedf0d13e6f15993ef928033fdae3d63e81e4963d
+\ No newline at end of file
++3636326562343933656539363838623966626231343038656233323334613630666339356432646534663261326263313538316337636362396166303530326532333264323661393834653463383762663931323634383337656564633662356134613134343461393333343962653061633337316465373764306438343236646238633661633433636431303134356365656238343633613161333932303639376538356537623535346264396631336335386230623534396462376166303332376661366634333863346266396434653031636538323330626438633336643366303762316439643162316639336563333863386562623037343761373135656439633965356531313037636465333932343337356133336331323530613935313339356235396438343732663038616337393738623666633763653832333261393839393635663730613733616139393365373666313533343465633130613733656636356333326634376531363832353436356232343664303733303663363632376239626564653732333461313432643131376538386433346164303463393637326132376238666363643132326139343432616432313865313564303439653033303063323262663366346261633334616431396539623265663030643433336230373137623039346263363635376163393436373066336330663332316130636233393335393764613166326532343539386161336164636235343562323461356662616163396466383730313566333061616264656436656330666162646239656561386132333533313032363635656161333231316539356433633538323666636334346262363766633863356663643335303364333532343830316362643537616166386630316132633537383264386562656232366533383232323865656264643930346339623738353632646566646164393533366234623761393331346566623730336261353361383331366430326364306533343038616164393337363063633838623064396239626464353839316131616561373564626664633463643333616230353961393233666432623837643637633235306136316538616536346666303330303165623236623461336335663165333462366631323636636637396235653665643439623130333864626532366638646136643039626235353631343363613265346662306134616430363563616233653638306230663730643862623264663639346235356336643738653131646338323937336164323064356633373666333065653864346131316130616366636166306438396237373663616232356538386533663033346134393933623136643738626532343962633233616637626262383735316337653335306663633931323332376161376633363334343262333137353037643237653462383133333938396463343965396630366330343962373934613362633064633337383139393031643932623364656562376164666435623033623336323038383361626664346362653633613334666465643337363034373834326237386364613137363365393963643061643939373130643531366432316631353063646439366461373864346135643733356434303631626235346231333666333531376332366136356434333062396536386661653432316361633761396435646533333366373931383562336331646234653964353963643530316630666436613531306333363033653133653234613430623462663231356533373935613961343064363061383435366662376236336332643131653831373031613233316538356166643462356462383161626331646435366432373137333265643032343566383639316264633564616433343736643666363335363037376231616131643633346139376334313039623539623733353437663934306331646338643231633066313837333838663264336138656563666565376664383637393032666434616333316261316339666466353162616132613433376266633039326131663733326532613866303636373633373432623037373061366532366533326632626535653437613337333039346633663962343138386338343364356334626561356132613461633462626264663766616337636533396232303632353733373731353431306632336462393730353462366562323730656663636662376362343137366638323638313432636561313665323862323164633935393837303065663164623865396164343465376531383431623134623132356363663335613137653863666661386630363832336264343663663835366534616632363362386161663936386261383530623464396166666265633439336661306665306138333365396534363365383761326363653063386330386131336363636134613261343136396233653961346331336663393463376136376162303939636539326363363865613437616261663136383861623861313732303266386461303531336335333539643237353036323366323434313634633966343533383465663036653735333630613736643464626430346630363366363038636363633738306631663132383635313539313464613937303532363734616430396532353666633539386463636434643338356461643538386236646432613033653631393463613938643837346233316236623062316532353333626331343038643064366138626163306131363161363931323231373438313633646463623132313033623161313137333663623865663535363562666630353566333136333235333831336339633431363133303465613366636535356164383035656563313734663864373963613637643230376163343666646533623235623665316435316334646165616533303865326633616138396663396132346237626238643036353632393237326334373131356235613933626538376533613964383733396662326538613166656136616133313835326166313930333839373334633531393436376139313763326661643166343634323661323166386563623634613361666262376137363366383062323139306361653463303231653862643663373164326131386666633935383637313336393962396663623933363737376432386366396437613432313461313334633532616236333436333438666639666633653430383432636332303030653635353139336138646132393162303630376234313133653632633264666234386335663438393838366435363133643461323036386534353462333939633434303534393535323137643030656138323565613436336234623735303036336131346562623664383563306566373661323264666237313164613933626232343636366137616430363063643632626237306335353536396233353566633932393964346162636636376563656530396161306664656561626436396635656632346534653861376262386430376664633866626464346666653730326362316232653965343564313031306138623562363130623230313963396265366130303837616232366634326335646531303539353531353137366562353736323737393436626630343433346330356264653562376231356532663139306164303361383836396333393339616236383439363661313661303665643535633961666563613431303466333534346564633533373866306361633035363431646633643837383532663863303765623439623930373061333833633261383932383764356133323539336632353130303266643661643531313731316638343339653162613735393261653833333065316432643865616634333936326236626432663237656439643439633335656534373338313766633338646231316433363030393633613732386331363461303139393161316436323764326235316237613364393537303630633634616137656338386231636561313937653562653562623466633136316330383533636239353561353063623464363362636438656639613233323237373363323039386566366463633439313164313338373637383365326138366162363234323765393736616438656463343339613031316630643031613465386464326334383565343838366436326463393735666366333866653033616631663462326439303139353633386338316561653665623834646265633733363832333864653530363836393366373632343532613130323237303265373433623362623162633661633363303235613236383166313465396162353938363931613765316565313864313038393238623566386436663962663365636639383864313135306161396532353534653337323561646338346365303565306563613561383335343865666635346632323437643239326137353066363930376630633566663432376432663062613832346532393865383638653361663765646163613965653531353063396364626466613030376161643637346261326234313035356334336438663065363464613366386363306236303766636233353733376231623731613937616663616366623535373061643136373039653933616262326662353032306537353466656439373537333238303266656563646234383463626235663963373366643035323838393535623136363133383532626139353434616331333463663564623232323535393164356132663035316161646131326366376566666333663964616434653065353635323933393866343832333038613235313639353165376335383063383031623366653464386435333338363764613034383738336161316630653236646530333831643836303736323039396639613466613765396131323138653932333531356537393939383066383864616466376233366532303332363837356339316630303663653739343563613566656430393036353266666638623832303462346233303031666132396330653165383861373039306363656435383734313936613163393265336635323730633462353436653361316132303732313937353364316163366366376331373332613839346232666163393334353730656435393238323962323435353434633438333931616438373330343634373065626661376139373066653361333634663534376530353830646465363733353131646164383336393331646438363964396333646437386666333963313634313537323233356364316236323165326232613330643032646530333666303738336138383564313532643334336361303738646466663739616136383463333364333064316338636134636361396339333835623063333036373766663832666462653839316338303364363231303137633237313666373732366439623934386263656663613961323633393064383732363639366335666364623862666463613266376164373862346436626530383837313833323462363535333134343938356537656366326233383663333438666462626431303631626262633133623161366165633737626636316230616465643661383139656431396534373735663738353333306538363630333764623132323936376236363336386262613537393633313232633962396538363630643362663331383664373162303161633261306666363561663532356564656362313137316364623865643465626133343465326165643664613465613463626134663765316664623762376533626436356463663035623638366432376662383365373961653831343534326230353230613864373466383162326230663839356632643033653939636136356234393835636663353062656361386231623133386337343565356431346666356564313133323737363932376566643564363333343930383830353537653533636130623430376435383165643731313237333464636431633931393630626530333435396631343463653735336663366563333336383933613336306265306563333939623935313739653835613539333034343034636631623933363438343932303139663065613830373937396563643363303165383132323639386133663038643930383861333338353433383935343434353035373831653139376264323933666233376236373339653831663163633664326163643064646139313830663564343335343063376161343332636138346435643639373236613537303730383132386531303938363030386630633364633966633633356363383330643961373864383237346634663564663231663061663733633461303437376133643339363136623535633165346632316633343364306561343963613566383265373766646636656265313733383236343137333931326632303164386562636234336237383630343765386537353439366539386233656265356536333431333237343132366231313364643964363333666530336237393835333537626263623737323934323035643535313938333165643365303665343032323335646133616438636363343933636334396331663437373837386337663733376235346334626562343531626165663537386437653061363034333734363032363132386530666437316435626630333932303366623234363639323434353436373662303365623937656563373139313239613965336566643465633565303763373065653139396235343263313738623130303333616431356335303336316130323034633462393865663564613636336634323436386532343336396338623831313865633366396137393039383436386238626264643236343762613663396434383338343130343537366437326538633165326636383065356539336564346164333233306335353263373830373562666161393662366339393537303064333031303339663739636431353363643839616665303439313263346337303865326136353565656663373633343135346262663363333230393339306465633836623563323438316233623839353862393030653164623466393035376536616663393866323137393338626138306534383937396130363932356563333132303730383637353838663233333633643637663232333963623633623431646162383366313739316130343539383138366466306131356536336366663835636531336266616138613362613935396633333530626231356336626663383262366465363035653263383765323663303836313065326532376364616333303164663233633533633063656630326636306533393835616532666532666437326639363137653236316136306339373763656432616364616261353237353663633236636332663633663433626230363232616263646532323564613833643134333930376136393735326135366639303462613338656632396264323236336666656332393433613931376363383664383939316562646531373565643230386238323463396532313330353438643234373066303361316139653464383237396633306530653239373064316139306439383138323865613561353433336432623034316632363436633263366565376564343339626163303265396163313336356237303332316263386633346565346561383630356162383666626336336532346362663466613231393134306233353133323365333532356338303433356332666239386430393263353131383461346338366431343232313131343430373838386565303034333935343537353032623664376466666131313931383031373530623566386664393035353937656638643837303535646134373832373839353639643662306136656630353730326363663431316331613263663265356538383539653964383030333665383833656135316334633737393931336633346161613463353439383364373661303965623431363433616630376130386361653565316135353538643532613462633863623561643838616134636130323038656462633365663037653131653037623630373732643936396338613331396334316231613632356135626635383736386639353433393364613538363465396133616166376538323761363434633039663430346266653332626261633631636330663336666431363339623131616139636232343433613538663737383235653034306231353136343835393465653631613266653432633734323438663463383132313132663132623934343864393133346639363739366636363962373830626532386137303234383561376463353763626337323863646133313739616232336662663835646366383230343537353835336261633434313133663233626538376534346562353566326365373930323834636561343530383331623836323462383832376438333861373437626464336332616231316235343630626234353532373363613535316431356233326362363034363230623332636534323235376663656330303532353065663662623534663863376635653135333563646263343863613335613832306265313666383934643666336165646539393439656139316430633832383330336566316265643264306437636431313064633035636233656437323637616637316632393937383638653163333933366235333038326638653365623465386632646632663836646465666539643436623865373735316462303365303136316364353738353361613162363934333038636436633836633132363266356438343761336466343938323537633462303536306334643263383539623963373766616363636262343730383563336362653366316635396531323165626235346565356434623361316232306132356265646264343036366330346163616433383337613263376430313737646135383935383936643937653163623366656334323733663137303636636136666430363162383330386637366633326532643966396331663730646364363863393438616637316635633236613439623165353866626462326664383061366136393861306165316637353336616164373732653761643930646438616366373338346538313336343031333463623762363637306466366366666436653165353262326136346431646335343732646633623366313732643730643131323739353932306433316464333832356163316630623438666166623438633730373065366334356364626236653131323861313634653966373433306635386230623532353835336665333763323663323532643535306665653336633961313739376634326237353635646363386163343039623036356235373831636666636465343862373737343330366565393930333334393237323233616334303636306339373835343565323039363231663630656337623938333433386639626338363239663030626438366633323739343538626464633162303566383633613232623837623031626164366561363763333234383462633665663161393466393736653534653937623538646162633236666262646138333466313236323334356563343935323264653437333433616332396135666532343234663632346336383738353661643036306161346564383139633063623136363532363931316332353962373064376663326139393065633330343533666663393661663463326664303935653739643133303233303735333935303635653437393331653736373364303662323131353530613133346138363062623766636361373364626534343862343964393165663963633838366232366139323461306362383837386366633363613861316530633861646164646234363364353638326430336236613934623030646533343932323166333962633233343764313836396239383838666139663931366632343262376635316434356661346362316631613438383836386636643762363332383261333564343066653737613837303234666136663337656336343662643362643137666561663939346138303064386633643261633262663539396331653338376438646462343932366331613535653665326338323734623532656437383936656533383035646437323963613133393037666537393462336362333661313635636637343537376436636132663238633733353866313666376333653030616235386637343536393336656461386662633832363934326662376638326338313735623965383336646433333662313862383539366262663465336435326630636264343639633733663530393937653966636163633664353731343336343838373335303336623238313434313965666565613361303033333938653636373666626537313437373431373638353035383333633931623563653330633635396639663337653363323033623062313066383436373261613362336532643262386335613366316630383737376664666233393230623335353862666232646363623565663665366139656431623766396436333338333637373664333964303830386435613832303562613065383637633363396262333934313437613633323830353630613131333730353038636138353263386237663765316637623661613666626530623339363137373233643662643337346337313132663739336132646666356366323030366136373238306230653939323230656639613464366531386433643637636236353633393432343635353665303133383531643138366166333766346532363939616536313232626537393631376334363561373530316432333533663134343030633337663439653163326232636231313033333835343133373063623638613762646461643163313732646235616338633963323430393738646162306366306437663739653164333164333038643763393633386365373936666639346635306630316535366163316165626662303766303339613533313830653030616364663236623539303838656438333235386239343035393436633862616238353737626232303731333031393534346262666438306165336161313763623866623761626237303934336161343034343361303836336232653236353034656538623339383462326465393330643531306236663138656139386466356264613062623261306632386638303866396534303863386637316230356265346438616161366563386632383933663034336230633062663837646430333437616635616532626333646432643965623738373532326537666431386539313565396236633031353331373339633433333066633838373839336331636532653839643861366333666465383166666231633364636461313130623965303534366661333961386262613933366235323965613930353337346137333130303432646566666634646263666436336463363937303333323337613735346231653961386533353231363265333937376663383630336265343032653832323966366265383965343465396338316630366164656132626638616232653437636139386461316636666434303632363132356332643138633032366439393132643334633862396237646330333762396262303763343137323138313734633638643431646366393831366235363334663233353465643236663736613065336531393233323164613966316462343532396363616433376236353030633761383337366231366138653033653634376661326636653464303639383336613665633631643664353762646632646364376364366539363131396236663063653637366163633862376463306165363066313865306238656230663636343632353839346336303739316133656239386233333038393334643334646137633264616266626464313432613061363337386639343564613337333565346332393033626232353264306666303966313163343238636434633565356135663833393035656533386438633737336661363431326239343639396637376265323066356339393736663162323062353134653866373565663332343133346337623730616332613862343136333735653336373739396262363864636266346532353464623666333935313131646563373933353265623662356435613765626239343166316438663161623731336464656530633162316462313436306163376239623833373534643639636236366335396333353864623763363530343066663137303136393261643066613131656435303237346130376535653332353237356263333566333236383665393131393163396332323062356134363861353836633939656163663638613730363063366435373735343730626638343261666161343131633733623032653163336561326362613531653232633233313930373539633530643030316630626164386636333765303838343133343462633065653166323631303637616565623438663432653263616632643632303362323362386635623132313761656566383062623933646238653931303836326231323030313937313438383733313136326364616461643531663634326435313162313430633130326133643936323035323739316534636530643532313138623930653536303563666266333134323330663534303163623939313537373437633464383232333364323863323364316130386438626263626663306131383562333538616333633738356439353466396666636332343230333538386164656131316665356635313130303166323065313030646266393035616330363862613638636363333739353536666161356637656236313034646362663163343339663366616263613332666333616332643037373130633035336637303333643564663361303336346536623236353530633839663162363735363330343461633461313461366535363537646665646637666636303338643438303831393738633737616564316636366532663833613166336631653863326333376264373562623763313764646338376334363532343463626165646339343037333131616330326636333162323735656630643066313861623663386462656134353436376436336434633636396332353064346466373134656638656639393932653065643835363435333034353764323430653233303130666661313938303332363831636631363563343836326538316662623863336134376565643139343161643635653562336264633761393537363438633965633739633261623337653466303964353339336266613433386564363835356562363065626338356332393132396462396435306139323663643735656434353230636536356339326366613033306337616261333739656136346565313733653638663231613861346462346236656539363837353264326261663361313364653232303139316437343238353435363738643036383637383838323839316431333335396439346135653465616665353833396333666164353439366161376338653664636535396664363462326132323566363364306434333637653765616338646533613538393337303133383434343330386465323865636361363266343334383338383938343866623964313364383563363631343863366239626231306431663065666164666534393232383865306163323761623635343932363938383233326534303234313430643839663961336664613131353138343961666463303032643962643463636465303264306462643261623934373766363663373236626535363335623332626233623161303166653837656534626361306535316131623939623563383139363765386666346165383863363739653266343864323239643261323931663162366666623035656138393465663535383763643237323339313431323839623835343331326135333234376135313166363966643630343530343965396366336433623862316361386264373134393034656133333462633433313764333262373363376530363134343162373630636665633138346439383530653961393536373664393333613165663336633933353466653736393366306532363430633036653763323331303463653031343862623534646130373732646363626531636332353839653966383238363639333562616561363263613262313539323137376435656165366265653061316631633738356566373538356139376639643033363063376566646234376565366666346631373037333961343765613730313565343139663166343931623761373938363534663764343636616437636561373863323230313662636331306361656233656133306134323630646163353135633038383839366661303237373365356133363034616466663364626134643330386462316135353835646134666533356235656666356539373964393632363263323035633832623833346536626566616331336335363661353038666661623330373533333164303762323133356365333939653937323738646430373166333063303631396339326132343866336534353931313938303534363531653330363261666236656262636332313865643463646230643934663339333233346237333732383138393666376339306639333835353264306131613934356333633163613136616231323564643261353637633861346335666133386631653633386366646535643933303435626133626261373332613934626539376631666239306363303438616137633637653731346130323066363037623138306334393837303834623830366532666539613264663237656139366130643232326336643133306163663335643139646332643033666331376562393630323839623164333464303366313836396462666331386562353630373465656535323138396532346532643932663032643064616361336637376166333261376566663134636637626237346463306664613033626632613834616161343339343866393365323933353335663038326434623563393139613137326230363733373731653565393561333133366262613234316636396539326132316330393031373738336462363066646132333562616538323462623464643137383166613833626535303564663366656537343666643362626263326462346664626530373734346262616361643735663338653938363333633461616133336462383936316335636335663439366530353231353138373239316230643738656133323534376432656231623763663236323562646166313130636435383038373830663466323230343164343934623233643931623333613637636364636238393438336439393930653737396330353431656332313837396137636665653561353839643163386361653739663433363232306666623337366339613130643061343262653935393834356338393432396530343166366665633939366638396536346166393066326538353133356362356161316232633965323132313465663733656166343635323636333133303339613865333566346335353563343437366233656566623366386336376139313634666464663033613162653338613530653761313936636364393666613035386261353634363039386165343130663337646338353638646332613232646164396561346166313363386638393864336538626462616133306230333465303365633366386362343531353064643161666266313134646636396338373032313439376636316463303130336439356366653636356131373065303764663832373964613661373163613838643232386165383536633034636366613864646561353732643333303265323036333533326635353834306632636535633662636430636132653661636265343535303239316662356439373631636239346666613465643433626364313134333434333561333965383231613032663637326338393839643438663437353430373066356635643262346330633363386161623231643732393630623161626262633761613834306534626563663164663533386664393564626261333530356531343931393633343332313765316532323261383032356231633363623063333164373031373364636265393535346565326330363236336665336532656337663831666566663363336662303765316338353139373338656163306239623939646663366330616531366333363765363133653839313739653163666465363365656631343561653165363536353137643539353638666435303662616633366365636331326239303564333436653530343039366363323035383462386630376331313966356238633531313535353762653636323337363732396466326266303661356539306338376131623962633532326661333061656165353862393665376235366538666166613230333261653738656334373239326432613633343136626362303164346239386535656437393165623566383264383138376138363266396532393539333663666239333363363265656335373239326631323535306130363365366566643336356566656631323562666566316437633530343763636462333432333565353436316462663234653562383637373733376166303536343936356166666131306466343364626666626163623461393633323836363833646532396131373935343538373162396432616638393665383666366463383263396331306264613231326461303035623633316164383133376662343732613062353961643562353333373836663465363739346330343830393362633065666636386163613130656638646535343066343037343436383633623238333365306534306335343865303662646237613764346538303939373361666161346538343132353136313137396666396538333564393830333965363130313033343462396464653763383034653030653765666431653536633765366432346233383632396664333336346464643164643639613061626366616239613863353432626336663264373466613138656537623133353165336563343561666564333165353461626435613737366131383364303031353330356532343532393866376331306335353738353437633332323035363162623036393333363535383634353637666337366533666364306138646535653263333033303732353961343137346363393533386234633836656230643631363933363136346264666666663863643531336362313864333437386538616463316436323037363138643066626662383334353963366639336636623764326534323936613837623033356663633262376362346165663137326366653334323463336231376139393631343365613036346233373866326236633963616635373731303663663739613466613537633162613239386239653532366537343966613463636661613933356438626234376330343739336564373461626661623039336532363861393238393365396665343366323465383332333466353661313033353064656230663333323436656263396230616438613432396139656337363932653763653430646534303565373764393739663035353064663665346166363335343239623338363165336362333531343430666331663066633437373262323233653163616662666238393262333565313062396662343064323436653736626336333062363131373165656137623964336634396637386361313934666539343062653935386266663065626335666261386361663866333730396138383034333835653531386538366239373230383531373031663132643166653932333335326661626162353736326432366233333061303330613038636230346165653365373264366539643665383935666364356463316430323463346138303735646231646133343836386162643831313061616531396634666562313736623832346536666138313532653535306162316537343031306166623138363233623730623132316465303062383632616466306534363830306533316563643363633065313334326431313066323731663330326566646364643236333766366235333131373735346338326431376438666431393933656338393165623465613238613566626239636465376339613535313933386635363530373762313361623334633434386166383565323165396336306632353863653364356564383166396335333534393531656438313131373766303233653230666430613363343437326635656630626461303932313064643734363465303933643839666530376334636465383137366365666566653639356632653339306636333233633561316332313565643736326265383565336262373461363764366463356632643662663031356636623933646437636236626635396166363132363433363139356562363861396139326532356364646565633432353365323138306135356435643362306337643032633039393838643533663738373266396563326365323935333937633465616338316236653862646466386666353765396531346261393064306435663663643038316433333465336438313862383865663032346534376363653037636661396631336266326336363437616233303863616265663161663034646537616231623462646236346134343064626434623765653331376430303330623132613163633930346665393031653033653461363434623334313539313533643462326465623064353365313837663132613330633565613231363164653332366665623063643766326464623135393437346635663538363631663764373462353530396663656338343538313238376532633033623665313963376463626638353963396537636539633635623535393839326232616339396264613665386264383563636538313266663934613834333532633237633765643837376462323165313066323766643332646638396433393763316133373635626636613334363962663635663437336138393563396637613235313035393666613531363035313739346131623964343436653962356161343264656432383264653963393935663731343634653032653263363733666666663739663661333334356166326530333532663439326432353461383030636365333537343939303235323034646136356161396136666461633734323737356664313730326636353464663062613438656535366435333733323763653663363732383463346533633839313563363464663733666662356338363866633434353962363735333863643261356530393462643461386231646363363531643039393461336563653964306236373364313733383064633865343663323262386632633332383066326438326635316231396138386437353933613831326263613162373766323963383430666663663537353339323538633238613865386139633563666130663230633064653930343635343537613161373962383965386334656264393133653737313866633931626563303339383161386365343835356639363534656463333666353865323166333031393562396531373133353634616337666431326137636536333062653438626636376662306239323033626166616634646134386334633636336334323938346334666338333330306463353135336361363662613165316630376239316237636133666231623432616430323564376461636538303165313539376362373332633431643061643063393464643366306464626664623966653131633938336234666362653237356232656266373831666263376262346263363962376430363631643330613463396364346537653231353738326130363237313138386434386634356635303337383864393837316132633837643234636362383934346464623137376338323831333634643164633232323739323263383665363736326266376464613336313334316165393037383562363461366632663665316639323631353330656131623034373866656637653137616639333662333365323262376636363461356234333966333439356634366234353061346537656464313263393433663763313431633265666461613761356633313538303065333434656331306136323036343633656530373863323738353332333136363734343934343561393565636562623561343866386634656266663139323461393365323939663538336537643265343065386530356661376666613064363465643663383230343964343238653038376534393365653261613561626533383730383535613361636239326632623664633063636466363636323435373037323532623439643631663162353731353364626535356431636266626165303135653330396265633164616438306364386133616661396366356534386533393866343637356261616461613831353231613032396135323230373136303138306239323962343761313832633565396662323337363338633237393138396465306431313761643538666164316464393563663433333531636137653130363863383534636263663136333165316637653139643935653461656465613336653633643531336331653135366538656639616162643231383863373864333136663132333164396131653263333135323831653231333430613831366365636332656133623537306338383830303366353364383738396333316333393761393466643736636565646433633938613032613762646138376138653033653562646662323962623237396264653534626561366264626533373161646638343232623465373639383334343665356434636436343862326635656639393636653562326536333139306666303038393662353732653264376634646461386536303865626466303033383465663937323538323231386461653961363461396366343031393434663235643538666536326438666364366637613465323637393862626535306431306337613564303739323337316361656362333731343830643764346635646264613762376563313631633439623133306365373631326161393465623864353965643034306237323131333761323233636434653334316436326438333839376466333161653137376162376531663961383535343465633938333731393434376437663233313664353566356534383363613439343362303336623662323631623564663334643638333762663036666337376663326366366636626564383066663565633861626636643438663237303034653266653265386439316430623164363038363036633734613435323264373565346637343730343233363564613137643239306534653533393338663931643031643136626161613865623666353439396436393138613535646537353431393564363932373432383632613562356439636533346533363639646662313466343936323538303264366437333336626132313936323033393261393363643533373935623139646264363834343665646630616639666136616535343633363433343361376332363336656666363432663164623138633432316236313636636332386433343437383965323561316461376239653831653865343538366638363961343931633835616436313938626466356438623039326666336130346263626338653536663737396137356436303635353764663731613532323638333166396239383539636134653066653764313366383339303038343938353639303763336562346431623765346631386233646537633230373262623539616237333338373833336566656531643563616661363131353765353166346430663665646563366231393864303830363930356461333137383462333831623466393835306335353036383539373064303536333466373333303335373733303366356235616438613564393234323861666336363733366261356564616135663266336266633164643762366637353034666465333337353765663337383538386366656132396336643035306630383630343762663064656363386264636561353435306631353936623362653636636366613132366238366236623138393335663465316438353633623262393336323165636535653137366565323435623133353537383661666133643936396337326633313438343430393466303634663731386666373433366436393063626139623061333263633730383339383464636666366239346162613265373936656633323066343431306565646161343735393831323663626665323138666130653433323738396630633134333230333266393664353231613763386236383364393966383639376562633864643936373531336463386237633535336637313935393439373637653932626236376264653334363366633836313234333663663061396562343135336331663961633364663533663730666166613161636262386564306561613230643063313361333630616263616566336330343232336364656465613264303937656131643733323332616262613264633162313630313534626239303139646265326435363537613132633561646262653165663132616335653534323633633538336432303165386530336331663337303030376137303865313166313333653264653333313361336439393330613962386661333061653034343130306562396462326466663361623262613636383231323330343262313064613466376463363331373861613063316436373562303933396363356362313239616133373031336463643132346436623739363432653936623133336666303065646561383764363664323539343763646633343265376537306163393837393635366235303035613334643333653062623764333838363138333331313038303930326536396261343837656530386236653334313233306362303939613465663964336533303732626463636266303438663064346366356537383934633835313663663264326434616337373630303733373937653238616665636632356264386338626434643132323932663630336331393638303266396336353436633162326266333131306438303438646134313366656165376261656166303762316632356138666636373039383430613837323331373131633462383130363061663438373731363535633839366330366366313234653562343636343730323531336132303562323435623639336665373830333030303262303736383235336337636232393861393332396137323832336264303766316535303331313237313863646566656236363962363737306433383761353266616561613538613637633833343335623535363364663936346439616665366534386363346638666638386335326666313961613730316131663330363066363166626430363336396337616331336137343538303464323838613864666637383334313863393231623762303263333366313330336538383165343233626330616634616462623863333436656362393637386530616136343165343864333837623937376663396533336666383034336561646330376665613334383361386631343864633034616339616362376435613166333863323765636165616535306137616337343134366662353665313039636336613165336235616231663364313536396138383565663633303337643966636437393265393764346536373230346462353334306137663338636234303534386236663761666336623566646531303437623536363839396231386437366632336337353832313630333563643239313334356430363134653030396436303834633261656361366565356133356562656136616632393534316237323136616537613064653033303232356339653162633838316633373061613530313738316135336565363137336136633439386262373862333532353134613166303331623732653430326438326236663133613266386165646265626239356462373633646263313935326130653035623062636430343666643830613361636564666336373665333637366334616131613534316639383761373435333362633366363961353534366531663730633233633766326131323562326631316136353937363338343761393765623761376235663137353532613230336431313738393637306331363134633563646165643133313065386636656633393636373230396531343635643937663966306233656230393335633332613030333066353864383438353864643932386664393766393239633539613664373764306661323734376434626331303066323365383264613832383164646462623231653262356532613438316663326130366337396338616463623037376165643361323134633563643563663163366466653338336533396636376137616133323839633066666139313464343966643635343433313230303961633430653336373730333261366634383138363932653335653462353565303536623438373237323638323132613336623738633932366238393766616339346365363130616233376234343666643663343535643537343662393734626263343261346366383531663838623034666262633230353332623965653832356135343939633336303231663938396434323765383831636236323630326538656430313739623335646138316536396536616134623631646661363764643231316136633566323432366462303966383039363932653064643864613361323338333364633634336639366262353966393439303963326530393666336532303636376238623537373336616464373435376638326361323866353063653865383361396164393034363930666439636238326130373538353733376466366531383035353739343932646465333933386538636161373934393935313937386633356463336439346131633034376463656265623237656163346430656331643234353961626632613336393666393966373039613335393334623965353465646532333031613863366136666365316133666636383436636637623534346234356339653939393939623532653335343431396131306533623438383837663664396231663364613830653365353961326330313532663466663931346664626564306638643866333263396534353261363838323136633566613163376534393435343234353165373130656565613031323563616162386130666536313436346231653337643739636635616130366633393661353261386561343432646439313338353765653635633766633931643632333865643230313262313038653661613136663062373637393337316135313766313362653862636239353036666433306661666232623233646564396164363631396236653638336664363537373932663134313163366631343163386435373338363138333630306632323166363532333334663663396261666536373666303738323234336438643039363330373331613864626136303765313563636464633130333937353461646434616365613934366133626534373539353966366235396134633434323233303434313864663031353962666335343664306138333030633431333764313333666266383534666663383334333064386438616466633165353862393436323836663461626330383661356263643531366666386136653936323539353431323361343763633137613033363139383165376235326362313061353934323163663230663737636635356439646436326464666139666362373939303533323862303138376665663739363764356561346436343463653161316136336664613263343230613937316161643163343862653530656632386631643565316365363465656633656531643634333061346264393336616335343130363134636632303862613834313762373433323666363564653837653866613036386631646236313066613562373539356563636539393738656337343362343038663932326631613737363138643364663261666266666238613966353731333335383331326165326131363631616463663031346362303962666539313034666131303732343839636339383666306235616237366532653866356336313161626361626362306232623630653431343663653333363566363737303536343366623434396238303735353933333031303763393437353136333965623532646265383035633564643332366664313834386236346261663934346330643234643362663664306631316635646135646264313930366235343065353330396336353338613631383935333835623435623962336166323138303636363330653134353734343262373237623765313339656365336634653637646331666537373031373465333932306332393637366234366661643233333735363865663338613736323231393532653038356231386133346138333061376334306330626563393433646364643763666263393739353661323761633031366234373432323831383430663739616531356364633964616535336163396434356362666264353463386532636238363732343431373137623234386561623930356465373066336338643334366238376336646633306463303265636331363266626632393431306131333864656138656332333734643566343138613166643763373761323030663464363664373536653533343964353865343464623362383763646461323961363765376166366335623938616532643934336561643362643637356661353461323637323737656338343531323339333966326639373935393538393466393563383536376438343164336464633930343265643738653939316466376133363939316466303766613834333631643938326436613561306165383866363437383566356463323566376465303631323139316436656232653233343636343566663635393937353638633435383665396564646664643233383263623232373664626534373537343062323733323935326263303739333566623065393237636636396264386130353361373639326230346136373163623761303230306364393463396638326634383861613262613064363838343431353037366432633136356438363933353562393539326362656532633066663765303865386431343731623135356564346435643234393038326561353431613664323964313237653663636539313531623531623362326330343635356438386263376461313230363534393734306161373731616333346137346230343963366632613638646135303130313962383837363935396665366334326133613436343530356433393331343833396435366135663437326366663338653232643766326236613461333439656239323165353665396335353037323031346130363131306533376362656534626133356463666439316238333865636662636636323837383137336361323637373161393438376136303530373931316363383033663666303563653033626532316465396165393631623836636235396261336263393265376135366462633930313961313938396134653935313437613939323333353038316232616339616636316262343635386566626531623335353834633034326462326166663139386666633361333333366331336232636434636137346534393766656230663632326164326539313266353532346166306539366266336566613731646237356264346232616537343735653036623731393835633235633039643166383064623130623565633238636332343233393464383636313666303164313666346630373731346634656264396334336233623633356537363137626534363632616334613330613139653431303434313163346333323738616563626638343732663830313737383237373662333533663533323965613931306561363432646263663234303330643935316565653433643936316631313061636265303438343638356265353531396131333733613233613861326330343537363663366337303031626565343662373437393735666237633766323434306332333465643830353437346238663830323665336333633266643230323131306663356136633938343035306266383238363437313738623230623537313737616464653464356364663336663565336339396564303463303462363239613531646534663836373436366338343166666666663361653536363631326262303633626239343032643037613463323361623232333935616365663839333531643431646462633235353465656163666639653931336535303665373333353663323065623662643732333738343061343139303232343937353636613836333565653763393939306262633934643765663765643932633366363030323064303137613730336431636132356565663539653835326132623535353762383831373734653230363366623864336635316536616632636537383034343161306636623130363036633364376538613736373731616365396532656538363863626238633663633234396334366466323965346238626436616137646163656131376563313864373366656338383833656535303664306336393730663263333765303237306661613663343137363632653330303163656535633738373037666238306364643931326162303835373337353436306664313536623336306266383337376233316333663232313239663863646233663832646562336365343066343064363465393230636639613138326461386162633934356131373363303431613932613734386436383439616233613861633166306363393534666433333236616166353739623233376539386530333565393539333136623635366538333165636630376537636462613733313136316137373365313831653761346463376338383636376332373935386264636463653364373231343062326366653030333862626361393433633239623734363439323439613132373437326362383530633864353338333132646565656233363436316235393264303035363239333863363036616561613364663633623461386364383464663562346631376333373736373764393565663236373038306531333763666364306466643263303839643561623633383039393730383038613531333364396366303966323537343434323562366132363939386239313639383330323430633665656162313337666364666536376434633836323836373936343366346430303761353536323361366435666631643334666266646238383430653164386331353730613937313537326432373832303865623939653430353564616661316333363934643435633334653066653030316136653361616137316636326162383361323833353066313034643131653739383362616631313334386638623666353133633861616166383565336333323737306166376361356138383333323961383839653762333566623564643164366366653738646231336361313131646431636264653465306331613961356634306163633832316131373936353862656265316133396264613230643330303665343434616436613635353538623235323463373039363834643831383938613563636137346666623133336566343633353339323566386237316632326435623864633437393032666462366533623230666635373566616535646338363166313633396338393732393831656430626138303038383163346263353837613262376638336434343962613730626239666166383338353631636230653935643866316132663939333837653064663136323634653339643436373964323362623164623736623731656562333130306132636334323363303665643062343232336333613233313962333061663162626236313363636463666138656637623439386466333331663731666364653930333932316134616262613035363261373766313833353337643931666638636261343632326431653562386633316163646363376531623762366631616364323362373063663335356161303361373930386538316133666465373261616431313934326636653132356465356266383336366463353965353633626465353936626434353036613132393039363361356436626530653564313264646634346431623163336337666564313434363635623965653233646437323765303334316234626564333334616635386562323037326364316562313762353530306634633338313836343137363264643934373030653336313437303539666263393363363262343236663831616362613030336437613938633437383932613330366234353763373032636635623135363433633636316530386362343465343465643766656437373830346162616236333061393761666632343864393163643235346165366637386634626533633764626366343630353435383533353530633366613065646361303030326238613938623163303338333739666536363433316339613039393337633033303566303235303633656537303438303463646537343938613566666335623366663537353531666632633338333231346231663862313033303232323434626338623635626536663262333766313137646562343337653230333662326461623365303537353164313837393235666238646233346463336661323163653461623465343934663531336131303762633037666336383532636433373065333332653333393166356566343739613465316436613430386435376131336561383436646539646539613032616139373662343832623764393737343430323236666339346363643266646534376639643930656631373638346130356562663638336134626364633831636337396231396362653337643565333637663439383031306166303939323464326562343138336163303061616532616433646262376264646462396633643131373561313866363139623065386634353965346465316333353063356334326235353965643933333136663035353830646330326531616164336465353366303733313231303965343132373864653034383065363166663034663033643166616462353865636461373862316130386539656537616331346334356465396164393361613138623633323835343639306630346135343435663439376635363132333730346533646232663332343936336665323137383563633532616336353239326130636633643162646335303431666437643734626531626539353661316333643066343562656330616434396235663435616538633564356234333561366332613462383538333962303436323030373462393666356635393562616162356663623337353731653166653436386462376637363862356132333565643535656531383130333866373461373965623830626130643635333761323061623132366333353236663761303036636166653431353636323964353837366133373761366638316636366137393136656564666437316136636566333763333662393765623736316138353037373739343237616430656463353635613265343930623462363432643033646262653739393364393138653261656437363031326362343031373763333763333634373238366261363633333464666363643737633130333062396634323334396139393930316639363266336533343536653230393335306235306663366137646362636235346436363037313764396139323638663735373331636533313937643561613934306266613465323361613339653733333663613365313931316635616131363030313562336132663531646664643665396366656566356537636233356434633138366536626162346133353330376536373864326236323731656462653834303134643634353936353635346333626337306165353465363938643830623264343465613461313063386534373135333833343737333562363663656533396433313463303564626233326362316333396266343930613231373038643335393033373463323663393238646539386439316165323039396263656630666662626138393731636137393536653132326639663466396633353061666266393234383236366634616536313838643133373134383863303030383035653563363464333762376538643033396561653361323030343436613930306532346462633734356630393964356262386266313233653063356230646537316638623234393237306236373430383065363263653234653966323565643837393539343064396231633436306333343365393635643236356333653164656531333334356266373031373265343466646261316266616539303930613764623731653562323933323464303835666561356265623837356232636539633165656365363663393733656133383936353530343037653165626561336561366166343735356137653730643666343962373134653439656437656665626564613038393331333866616561363565313337336362356364363633353339316534616364636634346264326639623437653432316632343765666139323564336266326436353836333434333562366233393634346131333966303164636162363738313039393831336436383266313961363932613735323534343231613363636365366337633734333635333837303766386439316532336565376265643832393631316639343962333930313038663264633964363832623531393661366439396435623866313135346366386562363138626431353465353763353139616461366166626632343730303830626437366261663134376531333733326432366264653838383032666435646138643461346330373239343839313663656263336336313833623362633237326164303766656639333230643430626134653564613461326532316264343262613765643162333263666630636636616366353238336536653239666365623032353538646632666561336435343166343762336139373032303962353564623930613864376630643264633037626265303237366230643563383731653838633333643664663032613132396661313061386632613161663838336233626365303866626530646538323165363233396336653833636334376536646639356335613363636438376131323266393933636233656534363832316263393331316634376363383938353337613139373162623839303966313135343666326430666633373534323335363965386461643361333132326265623135306334643631303630346633653835643861656165336430303763363437383765383763646161336530666464376533393765303666393132346265323134346666386435343835663139633733326338366263363865656636333062363037366530343339363330626234653738396239626661616638333233643936306334653835663834303036333961356662303833346466656230623730663364636337343431623234623632313234353231633937326630343737373430336265366135326261356239393937666130306233653539623138613162396463613265316633373533633033313839613239636136333937393938633439376263336334366464646335643561353435323631623633363134666233646636303339633934656262373734643162313731383261313735656535653231343763636261636331663361373762343435616634396436653964366363333032376135633833313535643966623034613934626264343933323439373564363765396137623930633638303564653832316665623863613238396134643539316433363266343131393461363861303564643630353761343330333861313339663262373031363332616662653631343464363630333832343932373531663734653663383261643665343133623861373239363163643239623063616663663762326365373664396638326235613564313132633837653561366562623130646464386562313066346135663164313735663435663231663562303332646363626265666430656334633164326363376536633166393136333530376436383263636133613964613132626639653162366138633930633039353165333064336633653963643364393961623866643338646263306331386533653036653133306137646437306536623533333765393766623966346136323738613934373930643235353562383962303663313638636164636137306431383832336237316636616439633964343236343735343432323230366261623364363232613537313631303065396431386434633732366137336539363630376264303566656436373136353538653933356634643363616565306139663065356365393938653538303136353561336130646436323433626365613138383633356564353461653832366565303632373236316333333136366562396634613162373963393063663665663737633535323336643838343434383365346166653364653461353662333761373061343064646531633330393837666361326335663536343331316633626164363331326634333735653365313462663164356231613532303062303433623166303261633837376231666138663631373763323763623866626530316338353465626239663862633063393131323663306632643165336630346464613631623634353838303534656236373832353438396133363437386261316239326666393039366135323333363434356264393536633931323930323466653661333232626331663638346130303830623436386566363862333264363639373165626333313831373863663832646632343934326339303735643961656635363763376537663536333837343438653732326561373963383036326531336339306163653031393431663732633732383639393531653536343963623662396638366464346335373137326230636631663839306530383262373734336437616432666231353432346164626461363438633361663336336333366636386432386662396362306163633435373136363835323063383138393463653339306234336234366364366239303630306665643131393163623334383962616432323934393630643736303935363535323538356631633664313732376263646434623834333136623935646661323962313231373864356538356331623731646432306561383337666336303032393035333739336163623765356564393335373163353639366235633564653932656436396466386339366334636537663632653331393037376563363764633363643761646539386236313635363562383930623162353637343034666337366263353261666534366634343530353933373862653932333465356436393736643164613339306135353635653431313832643262316465636533643132353035306330303232663832613763323861373735373362366632363730663132636161616636366263656365313431653833306438653838636439623565356536343436393932646163656162653962393561326538306238376266343138316261643836663165316436383834663966383734356461303837613432363264346264336439333132306439646635383434333861383765363739613939356538353362623066363961623966633031666365663963386137353535376338316339353465653934663334323364306362303863323233323461623034326263616535336339643935613461366364373038366464323135366565343261346638333637303933653565623166653733663564643166313231396366613234643461303438383563633761626434333832623135373335326133653961333730613265653139313837393630636161356437623039663937666362363436363134663134366464363435366564396562613966633338346630363435316563643731623463626538313234323137646165343438656564373431643432646338323735613232636435653532356435616536383533646430653334393134343538353331343831623939356662663564633237336264353664656131313462333432383235306638303737653030333264336330636537613761383166373337366563666465333734616635633965326339633964623339316166386137333933663665343164633839623166393565393831656264633937326364363436626336353065386234356632326262613565306435613534613234643561346166363264643631643838646466346661346364353363333433356262313938346534383637353965643132373534613532633362623530333139393636326337636364313131613730393737653533666530306134306536626234616364346463336666616334323164303865653663323233613134653734346461663030373237623134353738386266376630356666373438633836646332653931643135343538633934636262366633323663336566383866643735373430333534663266326164353735323266313636613638316539373165636536313538363730646638323436633762396165396335343437396139303433303237633466653131613563356261646232653665323539633636343931623338623136393137643534643335353261663966653366666637343462663736373436366231326230336436373136363938393835353935613161303134653337356461393333366436323766363562653962383633373461626438653964306330303030383939636535343930646535326464393033663030653838383933343165333461323865653461343266633633333130656134353062623862633732373533396630636363373331333536633333393036653138643536666330613736633637326439656362646634343633346634313562643762633939363935323838376661656336383164386565323061646238383766646563343561383635633164373966326162346639323766326137346236643733646435616461653766613963323034343765663465376136303832376364383262643430643531373031623533646438643636613536666237636662646361343033346237376466666337623933326633326438663139666238313534396134333865663930396562386536613863313861313265373834333332643833646533373431353562303561333234306339393233306439653062633238366437616166646538666430343961663864346134663364386233666562363765666466323036376563303964386635623135346532353366333230383961646338323462303232366536623938636632356162386636386330373536646363363631613530643033366533663366393265336663636363326661616134396237636364336465636563396638653936363132356636666332313331613833376339373665646361623439336466393037636437346335653366653363323032346462333332363335313465373132396334356638333134383766373730356130633366666665363432326433643934666230643637366234323232363436616463363336393463613830626533326232383665663532626166366363346533323237313961363739636634636665613030363335323836346238353563633636333130653062623838623035316637353537343231323161623263663836366561393633623930383136653238666333383131633566343564613031316531663731626636376331313761643036626138393865323432393461353462323533333639363334373861306235323466316639373563636236373062363266616262313932326532666663323538383330316662333637346630333137383234656438613665666632636437646364356561323238633335643038373030336362373935623965616532646131366462616264313765313330653536393831303939626563613239326465323532303536323266626334653735363536303136316637633461633131636463393131313662613631616462306363396561633364656430376632616430303036363537623130613034316636323930396139333664636338303365343538613332306139653932616133613761376130646132393734376463633735306234373735373739666432646633666536353566343063343533343237353231626536373265306366646364313833333935393465383237373530303132336636376230613932616337383831373435633963373866653635666565623334646435663031333032303639613534313931343865643464653165376632373132356236333863653062363835633464663266343130353532623065303662613033316335623233396534383163346365373032366264303461353632616564383562306638326465363236623633343837346564633137613332373865323665333331363566376339303434663761646162373034633030356364616465383333323765633639666565353936636530396465383234626433326661383664636563383538306438313430396538323362313635323033623235323336316562393066316131393030656437386333353664623530353139363530363962336436336430643765663963393437636265613563383132376561623935373861323863646138366332386538356538353030323363326339366163363834356331343161303035323661366537383165666439633562383662366437346665663061383666656632613065363637373933656430383732393836666535623561333634383038646637343731363130313133333937313863356564366433343334656461643363656438623530613330336333326161396364653961323634376135643966656263313031663532363837316537363164336536306331653165343439383139356163316465356436396266376232343930613434613931653534646466316535393137663362313066326234316262386464363932653035323132366434313339333236643830656431326662336561366630393634653666316562363736326437313639343337353162613638393062633934613838333434623035633832643635663862373661653861363763326434653934353732336636393562623131393766656538323536336662356466363163323631663863313365393034396430306164633836323663656138373838613663343461346337633633626435626332353663333566616231313364343632326534653963343165653836333838336234613637336661316232356537623237306138616535656266373330386264333335346139633130656366646436303862623533346362666635613037353439346231306236643636666333626130343562323439323666323363313231626666636464363634396137383666333662633931363534316136623661386635363134376131646630626366316265383839646164373262663832646362643534373635313536383939643931623938333465643536623431333062643564633066306463313364316231323233323431353035653066306236316536623965306662376662353461336666613630323730393466316337313634616530363934666334303530666362613362626630643438633333343766633165366161373261313861626461333732643538663539393330383537393336643465326139313165353832396138366335623166393737623737646364633265663963343062613133613731656535616439393031333538623039306461343366346237626434373436316234303661633839303637613733623839306264333233333339643335306661306138623862323263373764303866666436613564333035386635626237643031663338303532643631393832613935626138333665306334303762386463363131643364343238656638393539666535353764613436343230666534393431333539366566336233616361313839343437333663646464663363653338643366346137366639613039353338653139376634353162653831666361663861636134633761313035303434636461393832396631653135633562393038646639623531336466323063376330663938343730363533663566373763393961336530383839353036356231343133323834646461613632643532366338373737633166313339626631336164326430633761653937653537313661393836636637366137373937303436656162343961663638333035626463373836373664303231363636623561396165646337613337633633393932363461363031396261373638316531623934343734666665346233666563646135306637326463623138386435646134633030376330653732356462383731623062376536356236396533383433646439396639346633626461316631323633303237346230313937386233626235616464643961613265373764333432316163323339346637336163306463663733656134333233313364376262326433376433323761376633343763633563366565666537396561343636353032333165663732623631323266336266346663643831306137653237346238343061323363313834343362626437643434643261666631626464383464636636363861366331303561386332623638336431616262393137643664343433646563633637306161643537353161646261363335633439316133663862333663653330633365343461303634663463383238366637313638376365383533323930346239323163656438396236613762653730656639326431616635303438663361666233326137343562656539616438663630383739616636613334376339383466313861623762356263353462376465343936623364366539323661326330376331313161663765343735316138623230646232323264313464326437643537373262343265383738343833623565613066373835616538623930636134363566623435373338623239306663366565653939346634343938303138326666633561643732363238363166643330633066346332323863356165373035306164323162646234626132663162383065613464323465323139356664613239626436643532373139386265383539353864336333356534366234396231346464656632613438653932346337623634386235666430386132396466663530333231336164633130626430623363366231623439666537376430356462643437646635663866373361386632313436336539386339353939616632386264326339353265316533656530393738363165633932643466623662343561356630633834393536633166653439643836323563363730346139363861623530386231396533333931353230393432326433393366343135666666653762343834313830613733623266663539373834393765323138326161353663646462363665333437643863643036353132313339373738363862653863353135623339383031346538383938623633636265326339653063393931633066386438366336616133376333643031663638366563633663643063626363616339343434383639653731653662343761383765646434623466653362633738343637366561366665626537333230376539373633666539343566663037613837326333646535623432616130666134656262613737633936326537643035373161623637393136616533303165336135643633376338323964343635653734386434393030353830323936623331653637353939393465386330373430323965316635303332393535303963666433386431633661613466363436616161656266326637326237326234643333626461383134663661623364373835303263623238326636353436333830383738626333386631356537643165616532353134306333656135343136306532663234623466376534376364343735306233636234336239323930323462666135363461363466326534323830393930383731336464323866316436336563343834633262653131336465663231353832383832326661613264643632363436373435613631356636646263356166333630313335616563373161666463303064303438366566346333663664636235633262333263656431393337383638376439373934313466643265386430646163376166336334646637363739306164643064666566313134643537396565306236336538336465626230346464393262623836373535623338633662666633663134303739613465363139343936646666303437353734313038656134623866333937313866356662633966316436346665343539343238376530313236396439633639313739366639616236313733643231376664323162326432393465306336373333393234343939653732373535646332353766366333333961313832346332623635653630613564656330336634323536376234333732643163376135303861616236363434306563383937396538353731653839666537356235303238623739366239386530613264383230623065343262323061626339663036333038393238616635356430656431613365613935386630626266316435616166623065356161366364643765666361666262373732343663376437306234613464646331386663376338626262363538623535313834613832613865323233353730666338363465666331663839313162353839616335353330323832613838646237306161636363313339313039356534373062616530336239653139373337356262343339383034626661306266316239343834316131323839303336343139623739363532646361373333373930333634646566666463656666393339623061303961313830363862323561323338623430393636663637323930646433373862343363633164363631613930363734363437623533333336316436376363306138333762343935343739356533373631386237383166346238376162346230313734346264393663323866356138333531333231393636333663386661626332306436623064643966356534316666373362646232366434626236643463373232326161643838663837643236303538646561373832363230346138356635363835616562333538616461393037323633653539383561306137373363333733306463613261343761383439316266336164666432393539356562653836663166646131313737623835656130616538323338653666626338343861636539386162616237383764353836323139353230666462306633386465373134323764616261616165323062306137313566363535636664346262613365623866316334343236363631313835336236363034643034373834613137373666306362353361643037376462326232386664366535383561376530393761623233343239376538633235306162396466376530643833656631326238336465663938303736366561396238346663313735633566343834643036366633623661343166363834643963303539336465646366633564333333383465376663303931383030316636393166346634633636383261623365366163386263366434396235363837323134363564656538636663666630326162393161356166313231646463643136373733303665333138306431666533623334663131663266356563326336306364316539366635303031646538376139646339393535386239313961663231633534343738643234343463643333353635313337363831383764663034616333666264383562366163646430633535343464313935626132353837343334333566643436313030353539663436636162623233653436306565373462346163633638373037663463363034376439333737326565336431393439323465343931636239353439346332636334356632663937616366393131353135333764363462643631343632353236353265376234386530323138613431303162333832633134393335333531366538396161353166343531343537623734313437303437353534303564323035376163336164396439623565353666396665633739323265376532396430363066623563666566366266373265393733643633356662646233303939393531356165326438326438306637356237356364663639316635643461653232613539336636366632303866323136656363336361376538653765636665366236653939333362333031366237343261363864383231656539663934643061396435383531663462303334646632316335363663326466386230333231343933666338356565663436313637363363386131313162363865373335346161623131316632376166353737663736356161616335393535393930303832393136316663393863306330303935326462663164363266316538313165303638363730616566623935643932353435346636613166373438373033336636303037393432336666623531386164646663383665643933383030633638653564313161353733383065323937373132306430383733623165646537303765393963626339613636633630326635396666656465353163626264386538363063346134643934313533376230363665623231663061336435343439393631373233306439373161356338336466623833373364373865376637656333393066353930393738343636386635623166343265323131653261376366626330306134316637363336623362373664306164303031646365323133656263396531646337663335313962653233663266616233346431623031636538663162383738383232633762633230363765653436613134306365653234333939336164366538366162356465393763633862613834316131316531663161663337636631336336343532373865633738623661663864653832346636303237393464623033393262313265356264626665363663643135663934653266663335356536643931333961666134613133373064623563636135303037663038613231636266346364343864326639313632366465316365363331353664396539343761383865313535366430633031626638313131666134636166343734393337633531353133393266333535626330626232306637636630623634326363393138323330313033373833386265343465306165373131386162333465303833366234656538383135346632343562396432623462323966626130313830643961303537373566363866643738653864613661643138303361303439613262363034343839316165373536373233373531333463653666353865616264653534333330656264396238373532653739613235303162303836633263353535323765366135353861633636383961613165356265623966353162303137663839663832346234386532643932646636643538396266656536353661353863373562376537353137383638366435666562636437626565343962636363363839333765373563343739633733393565306138616532333034666439316561383138333430363564663635666563663162306566363364303834363635313334613031666264626439643665626434333530306164393866653036353935616665333363333134653666623837353363363139333632613463663566316261373832383065316166643737643632636533643266633361383764306132623631613935343438363632373939653834663264303738393535383933313464653637623366646232313665363166333534356234363661623931366565303638656336643736333561303439343462323965376435353934366630393031633665363738656361656437386663656566663531336233666362623932373138613765383266323939353533313639383136366531316333306630393737396338323061303132366663363737303930323038323962643533663764653031343665313663616464626431323639643132666631613766346438613965353762316533326461613236313637653861336638663536633837656433313731353334336237646435623838663266636232376430346639326336346137373332623930313365646662653034616439343861363232316366373838643639313637333666303062343039613135636666613034656164383265623661633937343234373531306533363037653839656237633731373835356534316237633765333034313132656233373836313235376336636462383133356332346635363232373162316432663562323839376236383933663534306362353833343532633761333332636166313438646430656565626666336265646565313461363330373535356336303232393933393663633963613931346161626239326337643939663135363961353135336133383362383834396536616336653636663864653663373163316662666439383235383136626232656531656434383237616163333262653032346465653237653938643037613733303531666339386365363333316236326338383033353262653038643164316430363136393865386635636436366530626339393564666637383665383538353536373232393336646235613033613839363031376266383161646261353161663330613338323966373735623035653537383863626138653836663163646233653163363038643933306635353333386132393531333931333336333336646133373833396331326462646562613365653339373534626364373830616438623139643939393932303162306234613533646665346161633766336535656161633565353638633635646436353139363665386630326565343338356135393266623734353433396433616464343238636335353865353833633332393437643561343264316162313630656632336265656663323031383062306233336135386335363766626436663035313966623633393431666364646164376362653331373063303839613065386235393638643162633136343732353266393961333739666538623632363631393938636432643766643137313134396464356266353764313863656333353762366161396331636366623530303039373434643264646564323237373738363363656237393630306634653063353062363936613165363232336138623863313163643238303865386462663732393435323030623561393639623431383435356464643432356535356134383633373538373935303761663531656466306661386236393332343531363036306331306131393539626562343431643231663231336366363939313835313431326130393365663433636162636530383632333432653162363038326266366239383831336435323765626465383130376430306562353036616362663865333530396234363061626263323061373433323430393363353734373031393138333462396631653832316438373336323662326366313637643136373062393633306361303034616364653461636465633965383539313835343663386439643834383566623066376464303531633664633131376534643633333363306265373262326262373262653434623634393065393836343730396662313532643737366262393534323334303536663239363731376165393464383366336430623330646137613934396332616535626566303435613734343436646663633837323064633830653564646136626139633263336565393732383030636631393134366634336438656466626436643533656535346336373633663531313936303365646364393464626633306332363965323437343036383634366163366232353938653236333464386334613364623435346431313533636130346339643531363239366636323130633634333764366430613234363862663735626338373135313364313335633564396562616637313165306335346265383366646166633633663963326131616661623063626537363632336464346430323434363934643466626432336464343833386132313734366466343664363461373232626533356235343966646336663162313132303737396531656565376662373636376165343563346133663235393264633534363562363831363032383262663330313737626139646562646437336532626233313633666532343163323333373536643735343238353761623662333134386261663337343836373532373462316133653935376666643331363764363335383664306234616261306434333561646239636238323430313266616539656664623064343830343232373863363462613864373363316533613163613131623263383438613032666433326661326539326536653939616434396437613237663865653332623931303038316436626664643635383835663236653866616535383963663432656133393133326166616361333663643231613964323536353232663662616463363239373137623138653336326532343865333532353065616630353230383463376466346662613366333539353432336130326161616530353335326131663530636261333536393739383130313262323833323837336136623137363765613964623839626538363861666561376232646365383033383062316335316237383132306666386439376666643635623465363738346263613366646365616136313630303236633430666565333334333235333738363930623632333231383263313030373238643137363162353334336436633164616335633161653135313565386632346336613839656165636164623262656138323134323263306638613630303532383664646462346334363630633262373737323961613833633232316633633531303438613138373834363430336535353235653631303532376465316261323837323866643936383761363465326631643836636165623530653166313463376465363139386265313830316234356430316361366466623638613864303130636438366365633639616263393933303136326132353762343966343036626231366530623063346630623937383831343232636534313233613039623266666334643235663362386339343232346561663134373164313632343539643135363264306338333539643439356339313230383661663466386331383334346366646663313030353863613666616230633630643934353436363037333335633039623432333238653462316461363331363737363662323432333762353639313136666438623962343766356563303035343061383165313234316233366139306562333336333434396131346535663561373036653865363637363032323033393662353132353663376135376265323438393731633037633466353065626462346430643266313236383734663732353038636664366630316233333439613265633830346638346266316366323964613139326234613432633563313964383562336164356565643263626365323964323739303237623961323630346231356432393262303366323365366434363666313037623033323938326638373866623330626136626337663233353064313664343230646561656339306161343038353535313838353531363834363035333235613963636336373839633432353662643534623062393133306339616666343136373238326462666161396134353235353839623063316439623431613933303735646633313664633637376237303134393830336532316333363961343062303366333866336436333365636337633037393837626162643064343363306337333264393163313365616263366339383336633263353534333631656530393935343230303637393265306133326536636263613633613633373936333939313337366634353763646133366132666436663339613634663937653835643466313333663430646434656237303139393831343733386131346362343836653435376664383630646531376538653333303963336536333465643234623964646630353538366166396536373762666265306161353338633933656461323330353339313639656237316163373565333839626438303333626530383232306438643134383535376166303566373265663131303464613336303939373563366432613365623532396134363430653139306236386135613464346530373735333563646439643431303137653737633430323530633934666366616434303566333036653935316135356636393564393636373638643934303833353534623537333835616163613938656465643163313430613566343632353565303265353435306330363633626263663633623066333530613230646563326166316665393065623831386437623330633534383530326231356138323331343239343831663035363464646666313838613962626136636362663039663235376362323139613737383438313164623836363732376338373563323737616162626164356533663830613365303666376362336330633530376563626538663765326663306130373365326531356234633764326537363537616666396565363736663134326332336432393838663035346465663037313734653638306132663633353337336234306166613133373637666231333163653839383365303765663837623831373833336233616465663737313866343466356138346538373330373766313934383564623937363665656362363936373964383865323661383732613034643632333064346434346566393639316432386662313836366466343762353237353463643430353439613361636534333334343864633531626532383835353064343431633065626233336433653437393166666435386230343963646130666333316533376638376238633434396464313431313336366362666437393939623938643833653533643135656439666664666330393734336461373963643334653563613530373165643630646534613030366637336165373432623439333634663565313635346165303132633465373063366133663836393061616234383965363639623462623430376663616539373865366636396161663361663566326235636232636234356239353566386664653162316263633164656339346630313635656438323966306234353166646233333636346638613163316431393661626430623563346430306634376365373365396562646637346162666233356265396638343264313433623733636633303262303939653832393634656337366431346534633632383364396132633263663336316130633030346631373265353231643562643461656534646334373563363661323030333937343639396333653538333061376238643137303261623439343631643231313936616533323030326163356163356363663037656539366364646230656335346439633763326639616462366666343134353161396261373731383635326265656431323832353463633234386265343832303236656666336535643739623131363366356439623132303764353439353937353863313633616631316331353731616132303866376239623833306133356534613830393361633138643064653039366638363333353130333561333362393832303532376463323564306131373865363966613239306136616337393731643937313237626333653966306132636536663134336331643662646133613164633464316161663730393038663635326333333763343561393238343031303365666337386661346134373139393037373632326433306462323234383266336635313765663935636330633032306433363139386136386138633335313331646465373130643234653734363938373036323363633535356635336462626132343531303264663563616335626464393139646234313239366331366432313232343330626339303339383736306330316163656162323762333463333330383638613734353134353366346633383034313731636439313036636339376339346330313838393662303766613233356631323532373737633065366239316638366466626536343762656239616237666665643930306238366261616264663764396263663031663238353533666133613938383535666438356234616133613432366565366636366561316636353837333232356162366461323737303336366466373038663065313765623034366132663464613565613364396264356137623363356661633864383639373832316362613831306135303832373031633131633464313139343734663465343835313839646462656435663032623966353162373536373931616461306164666465393332666565346336316630313333353736333964323765623665653031623864363534303530363138623062326135633164653761373162356361663839366433323162326463636461366337633330326235633034646139316531353230636232343563616331353036356362666266323833653536633430666636666636376637333230343138656362333836306635336262343263623732646533633161333762396134343961313736653136363730346337333435663334636431306433623832323262336537636435656466306362313663333435333264346538373534323963313762353931393964306165333831326431323930323035343034653230373437613936616361616135336135373066313337656635303537383261666537386238326132663861373538633433623537333037303461336634613862646435643737626639303231346337646230396565653564653963396438343733313765323739336335363165383166633336336336643232343139396562396336353535353165353762663636353762356333633265623264383466333066666261623435633331376532316565613163303230386466313866373835626165363462396534623431646138306537313562633937343265343031356236373165656535346532386234303834623130303666323433663861666638616165316165303865663765663365656366396334663663316262326532383634383263666237383639323831396134623739643139303462396437343033663933336632356464366638653431363266636137353761306462396134613163373032646531396263623861373265383431393836376336303530646632646533636430663962326462316336303231313164656462373438383363396262333664313662373661653165393730316632653334623734616531336632616439643139373361323166336139316434343030353631363164373730383132663930353937386634316538623735376230633363366635393233323965306639623837393963366238396535643630653131316466303937333130383939613766396165663935656434623938363932636166363230363439663065366637626333356363633539353236303532653861393930303161363762383139663865393336313430366564386662323536633936643431626563353263393964366463366235323465313861343537316562643737333334616461323431323762653262376636306463353663313930303064326633616335363136393635316662326264626439356535636362313165343930343663616534653061616139316236663263316634646133613965346233363937633137646639386361613862346634333137356232663539663437623762336662316339643064353166356161343062333533386233346336396462303132376465623732636630393662386461663231643563666334366338383238643666396263326435323338376464653761336465623833396537383833313134323833653339386463366430666134333533303164633732633537373535376261373662666362623035353836623539373663393364343961383236336563353330373538373965346461353634633134646263373662393636343935333865393465313330356637633861636365343834393433353934393232663366343163623838346231396365333734366632383035376438646663323165313731663038336539383935383864386264386438386535353665626131623239353166386636353831623962343534613733383130303233386663336132336263663163373939313532373162363637613139343863353737333463643865666164363432363337316238306138386635346561623839393239386237313664653839386362313066316130363862643437636664303235306162303032353239646231373330303939346536633533326230653032663238623235373135323165626338353262313332323834363064336132393832646135346436356337636163306164316536363033313936336432333866356563306365653233636135666335323034333466626234633835323439336435666638313331356338383066393731633161393539396561616636356562356336306639356439353866336433326463303266373064353833373863666165656233333137653337633739313865376263353561386234643861663638613636376165346664623861623132636432396265363931663763383462313162373636616164616361396239303134316666323262386333373365316165383031653063353362353264626261323135643538613936656439396361353335316139666463653565633235656439356663383533396162353431313939373736393039616366643931643666333330626661616439356137386163343236366239653663623136653062396434383733626564396361326630313133396632623764363763636664366666316464376466313038356239313234373031356132666337623436653664303032393163303037633535633064636665333931623866393035663061623266393166633339303736643463353239633339666261363137326630636533313634326238343338366634656562376333396534613539626339303863633334366636386532393263393663353437366139616234373066373732363333363266396231316264633433626334623530316337393765666263646433373335316130616230316132393832366665346239653233623864393862316631323337366163313738336433643832343539383132393637646432383534333836393835313164376334373039316431333435393539363061333462613664663731303561656364656335366266373831663838346665653636303966653364623235346634313964303236363362383036633462663330613939643834633866633836306332353035643831333762356161623564363233313331646537613635343762313364623132323930343362326363616437616335353539376361386631343435373963323034623164323839366433663939303630633465363862393930613630643932653536353662353764393536346162653065376635346665323564643266333837323263356535653134636661633433643436353332313136303630346463356264386537366362326632303563646266636638356262323934363035373239363639373431343031353033366331376264313935666663366662363536346263346234663832306332373731376666356265396265633966613062313261643338326364343535383333343537636434393361323465656137623231653930636362393432326263643237623333313434313537666364383037363030343161626137626561383730386234366434663033623935663134333331336237356262376430323937376139623538616465623139623862643634643261336431623464613234616364653239346663646665633963363737663730616661363264383033323035663234383832383362353339393033303934336537656136656337303433373939333739393933616337343938363164346432373836623365653536306262346165663535393461353862336661643235336361346235646661643631646632636364353438323032343065383935613730623330303237383933306139383435343930373839623037386132353966363236366535373163623563383861336461313236366161653530383237646563656666303562396530613763376438666230373063633734643732626364616534636465303962313837666234633530613366663261643163316138323965346137376330343961616165643964376564316630393136316638653166313932666666663461343164393562363866363733386631313665663036373834333239393037663566626537613339356166323432653165636233326638613361663236386562313166613532303930663437363230353037346132363062363864343330396565313732323434656563346232653835653433393831666433643937336130363531376330363333363335353862316665306265343439346336633234373332356535313562323738613132313866326333393264396465636663363037323836656634363137613465373432383631666535323661633438333465616132346530363734663133343038326339386663336230623663336230306536663162376132386165343736633763383535303466386464613564393337636639323166353732653565363236376465383231383336643263353362653461313037616537376263353235346334366163626162306166636631343932313732396266633634613734663838313463616563656261643635623235373763643464316364333464393938666661316339346362363031396363373138633865616237656262663930623962393763666339363136616466653733306266643961633762323130613231343563633364306438613434343639303762316265306232323064373261666361333766633935343663646532306535363236396464373234633932366537323863383431636463383766653462653235383137386433333532393565393462313462353062353438656262613261653336333261366530303432626332313330313466633235383538346263643566616333333736303730623034386665306263383032396532326536373562323837376234376162646530623061333136626434373233643561616165373630653834303231623362636132623166626136656339336563343461353337663766636539366161656266633538666561333163363334353734373832643561363832353037656539343838333563616230396166323633633939613763323235326533306535636533653132626236323461656563653863643130346237656134616665343432663565383062383065623132366564626663613631623033323664313033313233356136356362616434613964666334623337623634323034343466623761623238316138663866363665333234333839663139363964313738653339613135633464393763313165653239646437613639303366626264633233646365616530333762633831663139666438613336386137383138643234383961636233383364363034616533613766393034333135663463313934303435623831396534613932396466336363393265373261383737313138323636653230383035653737623739326663616433353839363539643934666664343765633662303161643632316234356366336435333737386536636337366365306630366363633463633734373634366532643236363465616433343834633430623239343537663138653234306332626236326436333837343036373066613963396639646233336335346638336530366638376233623138383435613936303562376161663730343066356631326565663563663562356339653033646263666236656664343330626139396239313366383137313330646639326162373133356264313035626663336662393331666135383666653038326332306436653561346330396237376433333465333762336265363633333837333632653263326234353030656636633963326365336234343932306438643836353630626666663433643439623361386530383239666339363731623335363966633965383337623132366162643636666665643438636162633066653933663335383830663132363437316463656332623938336562626334323463656134663330626135646633336237313930633739633131623535333337313861386135326334633262653663373530326638353162656137373362356465613235663739653032646231343038643237333531346136613934613531383963363031346366393166336262646236313966313934653538313432646130376535643239373035653037616134376331333534353832656137616132393437393237383163313333383732643934396336373137323536393564393762353665386535386634363236383761643966373964303264343238643765393562313135636332616234396561323266623830376237306633343266383338306530643561633433663836353234393532643535313162306663386565366339343334353636333163336536326330633135636436613134366530636163653165336533396164636464633131613833326137663238613563656264323066616135316563666633363930323634383939353237373766373936656235626366303863613165353834346166343033303532623539623364393563333763613136353638663436353765363565323039336565373036376239646138303134623565383366633238313837633236663738616134626666636538356563306261643266383033353733386164633538353665313333356134633863613061333133353032636366653637343031383466343037656231313130636137386262366230343937653035373864306139653462353238306130343736383635613463373932613165313562636561393739323037366335383038333066353134616338663730306634666666643465393032623933356362623566643837323931633833333230366634323733366433643963316435383737323661643639363765313234653334343264396661373166323739653032626437633336353134643536376530383963663231626233366163636130333235313064656330383637376461346334386536366466333136343561613730613937393131383835353465383332383133633035326164623561653965343132333037666162623537373634613637623036346330363963643830323237313234643339386335326330373537366630313039313062323062646630643934336236653365643334393161666131616662303836353730616632366261663961313030636563626330663631626236386465336437333636363938383033633231396264653938356637666565353964323634383566323533343338363736396535613132623935366337353164653030373334336333333734643130396236663237376237643934386461633933643230353331653033353537356237343739306564306161626564643562396537626630323139646437653137383930663230346636346232343966616436643265663934333731666137653833393362353262323532663337346534393034306137313464353033386532393362666435393765393762623739653133366335363665333466316239366535366665633961306130353135623135616261613366363266313361386433633562656463326262356434333634653831393230663666623165353139323932313531653230323363613939303731313166343566343463646537326465373033323933623935653633376435376638626336336236333666663830303730633932343430313730633735343334323538646566626366366632353463393032373535653765336361333562363062306131666661653364633638393732363365313765636164663162666139653463343564323333646161343762663934613939666362386430323066393833623132613766636530396336316366373363306665373964393632623036613836643162646265326638333535643334633833356135663161396533373035316336613262323766643330316236336635663637313962313536613439393434363136323838646266353135313563666536303832316439313439306465623738643231663437313461333962373266376637323035386365333035323336316631306535306263396665323866396266383136373163653936633966363230323366626336633335613730363731333134363961333838326663336361313363373339616465366230633332613639323532643161613230386339306339373936303265313661373266343465633538363238306134346335313138386434633733656136313336666464646535623465616430646566613663316462613862386231633762616436636662623964623339393838613330646462396432336231343361386339333039663666636162613231626264366465393734373464353139386539393535326161643438636264633364646637353430626438353665613364363161353463643466396362353863363661336234623536343635323833333334343362396131313561323939333836636133373636336337306636666230623331373237313233356334316632393861326634613134623163643661636236303439376166303032343130633433666439383334313064646636663734646434633261336431386430393235326263396465633630386166643837613662653363303534393831383832366563646536636561626536346439613237613836356362336236613334373431383839323962633932366661636135646132313363613863313932666366363831623432323530366161366562613764666365393935316262353530623832386331653732306332396436366364306265373039383236393164626263303463323063636165623938666461343266326636366232376162656161623065333163646163333033326233666132366231613536663733663039643761656337353365316663326165306635396234393866356633376437313531386439646131616133363434336162663162393964313861333637663064623365356161656462316361313136326433313539383033646639663734326634373463333537623932663262626266353465313136343439383562316330626565313832303038373463323434316462633633373534643363656534326461316639663464643331643465373537656236366566323736383336313665663032653232376432616532386235386233306533316261663435363739353866383833633864663739316466366234306665396633303464636131643532383030386332623362663332653965306630393135393336396435376362303964643330643739663063653831313062323066373733366462393336393134313863343265333733326664323834633032366137646461356137346436376433363532363163383235643962343264343438383261383132646439386365636431653536376164343263653939613761353437646130343463353262383366653066303164323733363264633866336361396639643165346464323431303761396566363737373063326662393437623939653336333030383162306634313665326430356664663762323139643663373336643639393539326333653262366166646630313465386431626537383536646233323639393636366662616237613331666331323539313036393062656632646465386562633439336132643361646138636263386132353132323461613632383161353932666531383865666564656236363439303035643532393262393764343966323537356131323566646461333365386262333232376434386166663830336133666634303637613763393835356630383032383633373636333839663634656230323235353164663434316163323963363030666338386435626263656630383366386463323366663665333461386362653132623930343663373462613937613266383232613632366164336536396531653435383162306534623633336433313937616166346135326565356238613535626333343030373038346135366565356262333637326438356465373335303562316636616666353063643238663137313533353032363334313265656461303037383531383563326337373833323132633039666233646138303234656336336661656130373335363064633561363538633566633830383136366536333761383437643535313232326139333333313032623061336237643462383734343864363239393438396330653533303234313830366463393839313961363935646336363633616335396364626531633562633161316465623137366133363839363535373763646433636233373963663230303937663131386664623661333231343237303730643665646362636238366663376536303338396530383730366666393662346436383961613666356232363330636664366335663737323533666563623361366261396633653162343637666162366365376635616164643363333235653639653636663436616139366164356462653864636463616130616136323537626430363665663865386263396462653565386433313065633632643637363865336666313461366234326362623236363730303065643365336563353965633061343263646531383730396166383763303635323562313165383862333537636563626466383863313065306561626236303162373737386565313037636464636433656365613866306631396263353033326432363232373566656333633739346439343766376366386261383562663964353233313865656262616138356331386238373933616665366366613632333539623435386462396566663632316532386162313462373234646134343665393930646138613430643361396564306633353530386661336163633632313530313136643361383436626537336633316639633032343366356430356537316361666538376233663430326539336565383537323238353765336532613062636331666638383731306534353133663832343234393431383139383839613061313934383036383564626337323632356532373564303465663035666437616663666239326365666461353535653038656366303935623062323766353132643238613037646237613965663462623164653636353532303036666133613963383566653539313231663562636365656665633461623530636339346435343833346434383665623339356561643230333633393235333531376631636435363532643632653265356633323964626665363265653831316339626339646331663166343935656664376362626636373732343661623539633935663538646464666166313165303830626336363465643734663762383731373938663932343032366665663965343434313432316166353666346564653338386237316238356132656561386665373561326634393339636165333035306230316137336633613737666238396530303830363665313062663339316336653832373336333631666166393962306564353233656361393265313863626431656535373439636161376662626137396532363632376464343237353863363739666230643262613464666232353733356135323137616361626239613464366261303439623739656531626262613465663334633637373334346539346262653139303731343561323664666335383834386564653834396332616666626139396165646438306632383062393639363734633230383639366131623631333738646137663264396564396637363363363965626139373436323636313330303733653930623238653739333964613932616462666438333937306166363463666630663035623762316665393537363137636638353631333632623164303166666661646339303963306434653636333566333339383939396439623262306432656432653765313032373832346361356133376666373762313039353532313934656165353430656466316632366634623230626235663831653362313632653332383066363936366632383935323838326163366132666461613136626335353534636333313833343062306430383630303637616164353832656638616661626439396333313339663966353739313264633036623665613638643132333763326431626461333535643435366230303635313436396366663931346637346233613736356532653332613137616338343730646235313234353161313432663631373832623134303563326464366435663637663539313233356230393735613634623365656166623032646462393165633135383530653130616231306332363463383639373230386164663835313739363763313761363134343437623665366461356364333963376564306561376338326538363363613164646238303362366339653366623764653331643661616631396131663035366364643864356636643931363764626639663037316339356434366335663137363266393037666638313361653164616638303438663235333039303563323837656564363536363537623733366338623965326535313037316464616336313839643864623062656337653332626365633261343135323365656263653465316162633464643562613264633538663635376534633937346166323933633436366265633531326165316631373534373166666330663266343836623137633337356338633966313164663231303630613035316631363139353138306663303366646134623065396239616433333232313039663461636161343361313064353966336532336633643935626462653038383732323066353333316439363632343664303966376438626232396363346264316136393437303265363732373031643636363239636562663539393632303039316534636461613531343761336134386365333364636666383033373033393064373561613337376238333636353336666133396162303261396561386462616132316562333265323336346265316662666463643033353165316331643462633862336464643436353763363363373038343132393430346330393763633665633362646466383533333030623134386531343334336462643534643433643734636432666233643833373437653963633835373263396233346531623361303531366164303633396436343866396133303236623035666336316135376334653434343332353261383661663762643435386662323937623730373961306231313064663539316635306661396233323662663939623830646231363431663562333162623563653665663735376435386463666362613637346664633266633630333536343562343133663932653834666138623162313232623135616134333536353335396166336333303933383965633064393661393838316663366439363730643661366236373132383334323566663034306238333132613937616364336639306261356533663138336161356264333966653866663561643363346337383532383131353735663832363965343339633764653833643639353738633461353263363635323037363937333762633233343662393736313138303864306335616366636631646535393136626366336335396134653361633965326639303033326331363265623836303230383237386562616263313638326563356632626661633836616232663637313937333231376265346434616237613162653930353164623761356438343939666634666131643238646230346432616335353965393134643330616661616363333466323536643034373530373262356438373061636561316235393531396138656263633465616634316139316132613563343438353335343164613738643661396666363066386161376262663531633664316630326533653930343437353931316633396465663438386234663338626637643131376239353563373866646336343830396131653932616430333832333739633333363539363632616237383735393837623765663264643534376132623838633465386563366365633165366339303538646239393462393932333931386232353266346432616263323437663630626533346137636636386663663932613439643437303663316637333032313330653036373239303263393164313664316635653165366632393933646231396631313537363135636463653962663961343962343633373332626364633739653436323831383765646538373737313261636233323432613138353734353033636234363733303432653935303664366261376266663736343039366430396466633837643531366264363539653861663965326563613861353939653265636539373234623737323238616463396533333265383038626637303465356134386330326536656333323033303462646131343962343338333134613032633436333138366231383335353536653139376433303063333564343230353561633865323739626264386564626338353664666131666334653631313464653633396230613533366135336631316439626261666633616230343935373664613932653336643461333134663266386136356538366438323765393138663533326336343235643665633139393561396463636634373165323238383333313262396130613065393739656330386638373665653933343338653234386532663632396335373931303932373830643139663138623733626461626132653063353230313238363230316435363735646166306537653765383161376538653036666465326662323334366265613939663265346566336165383331343934303239646465393335323433383533646631306463653035306163323139316165663139653437333936663036343965303864393061316434323462393066633165646566393333666365613762333638346566626635646138346538376137633465376339333133623538346539393637616663613139613831343563333531343365323236346237353065613734646631626136363636326462666266623537303437303665343536663662326232363038326266333535343131306661323732396562613230383233643630613062336432353465323138376238343465353963643737333838313032366331616434646331656262373533363635633431383966336338623665313535623233636430626634373465613130613966313933306265363231636535616365373261643838636335306233313666303162333135663632343335393063643633613939656536396465656236303064613335336665313366343061643332336138613630636362623535343465306166336537353037613937333237653039396231323063613331303432316331626136333938656562313731643666626661666234313666663639376361643939613663373366663466373032653638316237316633343231663939633336333631313461643338373864303631306265353830313362323862633061326238363164386365616537653961333230326330303964343630613633623966306664386562353136363734326566656631623835386165373832633762316137343838613362303031626235326464356439656461666635613962616265663235323563666239353033616630376132663831616433303537643837373364386630303364663037306430346236383765366162363763356237666635393034616662346632356235636562623431316136613761306365393630613661383165393838376564346235626634346263636335643038633231396666326464313038353735666332323233623436656265336163356161313962343163616664646138346235643033373033396336303639313538353136343139636637633163373830333861396261336133373762326432373461353162643961363839323939613532643637316534346535376537383437353735323561306361326435613035663465373939653066313237373237303439393431333461613037326165336532373032633435376462616435383332306333353262303862353930626630626665363063353338633766386138393831363337616436623435383664303064356261323936303465343136633563353562653230303636393931356638343035616164333633366264663931353832613563383330313135366566323230653865366336666432313039383834333839666134613931353233613062306132356664636432346364363364663863656166363961653263643632613766393733346566326330386332306535663231633237303331326565336462353661636363376532613864383863353564643336643464326265323939613663626135303530346436346463373632303861396561323536303739653233336634663565303766303930363465613763363335626666396538336234653530373132613838393430633231633934626464363264336261626230633739373434343961636137623532303762343065616636626562306166643464646632373464333663623638323639383939633264643639326334383032303138656138366433626639393766343134346230356435383462303261633861386663313263663431653234303539356233363238316364626461373936666265383331313863393535633631303237373633663338383132333830653632653535396161323534323836393361626163396331656337376136376466346664643036336533656430353036666366366139313564666333393062326162363439346364386564363334623763373335323530633634383732333031383561316164356231373036386661666433623166383539343264626666333230636431643262376134653637633737616561303965623230626532313333353965303261326434643861326161366262643637336263323066356665616263366233626533653737383631383533393361383530346437643063646435343563633863316361616137623339373633656638363737616165376265363365323436353337613636303438633961616338326631643036346335633837646436326436373034393135373337323061643832653963383536343139396439633163616366376235383936323561336130313962393935613564333131663135363164386666363231626366343730383461323638383764666136373964643062323533363461623932653963343034346637383231323865383837306466653639346364306332653038643932396366623330326561666334373834623931313763653235353263613432653838643536366632326465373035336534376137653235356235663361313239643034616661366331356335626334343437616131353536366535373365363562346162636138323062303837373961333532326139653832613231356436373639393230353935373136643537656431626663343736333037343461663162386137336430336463366137333136386364613430646335396137626334623637376666626233303037663033373332646236306139363335346666316664333938316432323963656439616461643564316437313630623934636535653736386332663439383963373333313230636233363839393465373361393963353632613661386337393436316331383833643263653633616139643865326631383464623161363261323631323734643637383366376235383364656437363532396562346336303935326539313662303732363433323064303631613038386539643466363238613633663236346237646631646466366266633431666232353161633336633236343538663231646239313835616130653639363932346162313765663737363961393166653837646366636632653662306535353433386165633961313136393461346161613761306430626562323435643230393633346133656537306636313534633139356538616534363133303364656530316533363431346466313634343532653639626136613265333130316366653434343837666631353835383533356133326166666637306138646239396532636636653363656634336137376236623632373432633333643732613063373434393731663035656538383462643763373531616538323265366364323164636230326139336137326537643163373865363263653065356633386266383236646333353264313838393638383130363763643831343935356362653437626462306331643664663065613331376533613935633535393563353631333839346365663663353934636232323464663439326631373066316530326534313134323864646331386266386436646235353736373838616139393633643063616565613239373661383235356363613535383162306465343435613034353965373862663331386133373737303161343036326164363435663438383863663638346335666135653335613436393335643862646166653732626164313436663161346665653137386163353638656362656332363638383435346537326433303333393664613537393736653939393064666533303663636366376536666339663039613331313464363438623961623136666133633263313431623862343863663638323863346533626264373239303833306539663632663435643530646162633566316633643037313561656230633739386639353465326530613731623265633839666364343131366138623136343837616135333364653061333839653261313266626162643032303166663232366338326435663735613538383838306535623164376431383435316432373434653730326633346332393835323965643764333865333637356333396366656164653732623934376362623639303765626465366562306534333433663463633033366563646264383132663734643332613637393838613333646461643863386261646662386130333833326633636333623731636265376534346561666235363939626664613833386638363135353936306163653631373031383264623233343163613932343065623464376636313937613462346165643764396265343763616338383538343233613036636434363137303265396565313362363038313135653432316664373836333031626262353561613663366330663066373664633737653630303034613362326330626239353639336563393465636362326661663065616130323533343163323366383963633063626164633334356562386630666330613833633936366537663433383333636565333866366563306231636439363730336461343261356437336439613838373137396332343438653531663335376134356338613166353837303735323866393664323361346331623737653936613365623138336561396638386265656331343232623865633230656237393333366266656262623433666361343733396631333139626263326235383833353534636430383437653662346635653132346234633331363566386664616664336130663439313561343931613638666463303130346235343363666632313263633730383038616234303032623036376336613337316561383234656365636363306634313534656331323637363862656435333939343831343630306430666136366462343138326362346131326263643934373334373133636638333235333030656134393666343561366235663165363366636564366234663465343436323337386463393237373734633434666534643537666264613166383831366639356231663830623066343537386236333432323964376666613961636136363736333164636134333261363236373832353234303365353165643038336331303466623630336539666331353062663964383863333564633235646664383132333964386133623932333539353637643666323138636230623939623536663764643638383666653164343663643731623534316630386130383865626232303231306361353633653537323934356334326238303531663633373063613534366235323636326336643063336463303732646561353864386331373737656466306230386631356666396339333264346530323135363431306261313133363837613733313633353861306265643166653266613235613766346537396531333865653563616138376439356235303039386361626266633036633832363731316664653638313131383565316333306235363866383832383234346662306262316665343963386633376365323432306531616231393236393830656534623837306439653665656436326163643061323436626161386365646366386366623937366262386235356434623634356539663635303935613864336362626337303938653838333339656461333732643838666532623034663034656339653839663264326164666133303630363136616538303632313338343835636330333164343530623661616139343161663438393131333064653630366338326334626564353031613462636430616365653939313465626633313363356234373532646438626630363331303330373365313065616239306439393161393465303333623730303533353337353436306364323565346337323339316232373531303666616430333461383565623630323363393963393065363333353061623738636132366231373134373632376137656561303230316532643639613330626463303637636663383734353361653530653230386133653430623061653038656630313837383735633034663037313330643136646234393634383130303237383563383534396530653264616532303733636139396463313834633466353939663339656362643733316337613830343934353139386530363761376362386235353431616161343262633662303362613135666232653136663733383338326238366539633336643839613762333262323963633435343238303361653264343435666564336630346630353634663039636435396231363835323264346261653339393764323333356632356362313537313534393536366665333133636565313064663630363937393462363561356238366534356530666230653963633536383530313239643037643033396564313634306363656239373432636534313861643735333861653933303466613432633066323962386538646135626234373138343136373666663830643038303633646234316632303035343335356633363339326265613034336565656333613434303662613538656637396565646139623836643161666539666431313433393365653834633836666239303238316636373633366534303464326431666363643332633961623039363735636637393365646463323334393463336334353162646631303865336134356634376361623332363634373063323030313534646438613833616665303233323131613662366434323039616466623663323531383934333133636264663738663230306336626364616533313032646562623438643764366139353137336335393064376537313834373333666163623735373834653732366535383136383662343064383735363433373663663833306637643663623064653432336235383035393064643132363865336337366465626266653737356138356361386237623135343336306232646563326165316338326564343865353539633638333338356234396164643038653933333339353865306463633966393231373432313264373235633466386336633733363438666338303765643563353232663131386532376564393436313462346262373366393332356534333333383439323261366232666435373034303462613130303439663338316432333033303237396566303161373034623564633938646331653963636538313230666165623036343261333265363866336534333536323661376134396466386232363931633361303161353562666463643262653665313932623861393862656663643538353637326262393661373866626139376362333033323365373362663731616333643666356663633136346265636663663266336332326535333466373062653930343335633534653631323835653464326264303462333338323039613461306231386637376338316332356639323930666166373861336362666165333231336333393761303161393965326333333838616263346261363037306538646362626236323265336265613762636463346531363666626137326438643736636231396135623932336432393435626233313766636263666132666331346234656336316339373238666563333332626636386539666463393331616631306637333064333161666161626335393039356530376532393261643361633765636636636362653233393061363738643265613730626531386139383430396136366436663537363439346463336630626162326566663536636231336139613137323064383232666439613237323335643864323563363533616432343863623337396437653633643032306661333535656131613232386537663362336365616134343534383630653438303435323637343561326535636664353130376430623766623766333139383234656161346435626535633834643035343732666162613839643835616331386630383738396266613633613262363961666333386461303164303965643435323365353434633462383830386630386236333931633930316363386435336436363436303666346335653364316439623062383635633461386131343332306435366230613238663033616364353738353536356336666235343231363934616637653065653637616638376532323962343964353633373363626137323761623537316666616336363062346133663832356566363565323231346162363164623031386536383536626166636633353865633139616433333734643233326330636165643530663939396461333030353939313365303530363136643236396535346663613732353831633266346634653234633434623039333237343263363631303137633134363466623064643966323739353532613964653562646332343063393330653266653237393736323032326238643461663030633137323232663864633032616437626362663738366266356137323164343637373736373934623231646332303531363764323037653635656432633239663261666531643236616332323063663439323533333264306661613839396133626635323861376334383666633836613034373038303633666466343363613436393334346339356166326463313165323866323437373335376361353334303566363331393463363462323835363765613534343537306566373864663037386162386565366531653331326130383333333031383433363433333330356234333262396163323465383364376162363965386261336430323837656461383130326231376535373635386665363731663131343032616565393637393763373432346564613937616630343835663965376461333437646438326433303339316639383333336164393236396434356533333035323639313934613537326635303938646239383431623262336430643962663561343664306336653633633635613261633166326634363632303766383933333266313461326266326230633861366661363666643463333362633563333731383132653733616365306132623365313130383632663339383130353563303433383038346130373038323130353636313837353562623661336133303661336330303564623730333263373061353332366333666338353562303038353537646335383531623463383131393532656236633763363564633662376632626633656561303536306166363735616431643835393234343239343135616430636535383339303439376432323663653134643862626633623166386639613564333364616562353835303930383764313038613933356261626666373564633438646532633664643362366262633033666638386531373334633833376462653062353664373138363832343039653730623361633239636632373733346632623931333762306538333863306263623338313436363962643636653733373062646535653530346663633830373838656238643933613430633539303761663965626164663139366131363130663039616430373435393437613637353461663066353863333565343332633530626662663237636238306266626665326462353537323334636632396536626461393432653337613432616265393063343264306233376331366563626538633232326337336431333933306330343438306564623339363530613535363265363039396435656363356339616539643237343230643165333237393239393937653337316534393431393064313666393837356366626466386564643231393039373161613532383266316538383938326335353365616361623539663663373866373361323163626562336165653161666565386362636630373030653238313634313166306537643732666136363164333863313865323133643363393663663665366235306661623834386336643566393165623035376265303738663933336638376362643265313237643434366234306130643334333261643162633132663063373862623136313737633530616138333166383264616261383238616334643766303039346263373839306264663335663135663031653836343531336662356561373730393532393564666665336231333937383530313935363065633466316138653763663933626363303130616536613561303236613638616464303732613663666337636232376437643231666638333435626462386430333333636363373934326437653339656135663835663039646361366261323562396237373239393866313862633533376637643063393638653437336561623331353031613631656537646433653463323236616131316238653262316131353162643333613134323961373261636633333162626230373965313164383062373566313031363630366564396135663639623137353166363731313036393865363964613033386432303333393134386666623236373633643839666365626631386139633761666464363563366362303831623031386435653033633566343035663234613231323338373733396433353038333431386563383966323166376134303332616666336333343566323364376334626165643665303536623564653865656630366638383030616363643063353438383436303065316432313631343264623831313962373833666436626433393632316464383966356662323866356339386231306232626163663433303730623162663162643034613964343137323263303365356164303338303637626137303733376332656239616363633237633637636333343762633061306664623264383032316635333837346164373632373339326132616361306638396431326536336561356335376237643532613964663162373566373731366435313839393035393137326564626462343936396562313061386565353436396331316164323630316334656432313031363066663436343666633766363336666161326461663130313738346436396431376139363662646563303830643061656536666162383238333630346464386462656165306666373531363734353330393464353535616430653465373264326539366161653133636538303831313239306638646466626166393232306430376663663961333737326265363335373034346565323635383763363764376137326338356434646230663231363065326630333762663234353630646366633433303538396131363265353736633235333066303537313533353565396533646236636565386635666230633465313262636262663139623531363465616432373239333165653365323036376635393165306663356636633530646438353566336466353465303334383239373535363434316235326466623761393562353939313134613032323838336532396261656265633538303437666232363334333932363535636363663266323265356634323334323665653361653138666161626130383533656130323733363165626237616532346632383538643430346638366438343737383363366137373566613962626466383664353234386664396537653331653331643432393063666236353032653861346266653238386564376162396535356562383436323665313064386437333230353831623336626464393735393361326166616231623632316463613766353832623062303631613037616362663439386437303334633530383837646261326364376361366531316632376639333962366438653333346632306237306565333364306231653839303834646638306430653962336438383063373831626439636637356233653234343339663332333265353432663164303065373536373338336239656165383065386665313061633735313333373231633932366137306662306664316438653331633562616466666132353566666666636163643034616661613138663836383435636638313636356632353339626365613333353164636636363631376566623339306137636331353031376665356636666634313330353830616332653531326237626636646663636562633163383631343638323432666637323364343366326532343636616363303661363737323463373464383337623966653534666130643938656537333934343139313365636530306330623562646534666538303235336539346433643865363731353035393735386339353964333531396535343262643733323131616535373661363235383131363961356639396434373838613662356234326237336236356635306534333664633631656566366566343365666535386433616135613534373237333031626132616465643439643136326534636666663366353765326538386539303139643035326165633730373963326563643930616435623366393438613539653730343661316639616361326335666162373239366465616463356461313530343939303034343066323264663931363266313161663863646266373632313431343137366662326434653235653337383536643432313265396463306433636332633234333364656466313935323539333537613164663934393061356364373037336461303638313962383333353138346166303934343933376334313536323639643864356431316531313731343162626661306235343765633764613664666431653463363463373162393736656663343262633234656230646235636234623634316464393337386233646336613033383533656337323931616130313830356133613464346439393331373966386130623837613330643437373966346566666131353331313963626531393038373933373630643361653530313164383265333366656536653964633165666263313530326438383362663866393630663537373461353562663564313239643137656336373134643363643666343439376165383933393164363534623332333135343965326661626661633834653665353962636563303537656566333566663535363663343631376235613331323562316234383430613635613633383136306537353333616132643032396638333439333038643935633461373865623237366435326261343930336639356136373865376262313232356564613731323838363262353339636230393432346234353232656338353530306638373331316566373239363462633131356532656362346361303134356134666132366538383833653336616436656335656433396138376165653365643238393532346238643264356266363833373365383133303165383766633465663639633439633134393532383335313833343036663331313530303665613862366262616633343039663438306464646264353831303339366464376638373732633362666562386662636630626232373231656336303464326561316263613038306532393637636362373536616136643165366665666364373562376564323638313633323038363537393638633434333930616531303063396465383435333130343865383562613665353135323762316430363137323736393831643365626134393562636436313262376631633961666138643334363535333030376437323131613663633362336163376166343739316563363961643838313464663335646664313638643037386330623932646333636463336238393037623032386662353535613534646533306364313339313562323036393934333362353435393865323434383164366662616332393238343031363635376538383266656531643739336434393463643966306265396266373638303332666430636536376262343531383736333064363866316663323564663764666637336634656437306362326161383266623937653965396435343365353837336465326662623436326139343837383132346532323535353635653436386235316633353632316439633531643033316337656333383135346431643732666331383166643061373533646535623565666532616362356137303463376564323939383539386130356632333938353635383863386134666537346661383066636635386162353334633730343933613666356438383663656539373761616232346238633464313930626531383662386634633335646666363437326536666538656139636332333264646236373066343730643738616461323035663237306336393438323264663765633433316131313033363766303966373764663032626465666437623262393731626562363261383965666661363864666533386136383331663963383932626466336334616265356364666330636130633366336439356435386530326630353233386366366532353932666334623737303738633634613332313735363934333962316163643936626339613736626530663261356336333264383936666166323066343538643530386130373433383730613961613465333930346464613031346566616135353461623164313335343538323839623532356636303666663234303839373333613334343562333834346135333862303264666336353337616438623539313062666536636432643262373437396632323164346238613664343338376464646536363333333032316662326639333465363662656365373436383433306436666338663966346537643763616132373064313630663635636564653633616530376263373666323139636439633762363634646539303861326532336162633530343835396639323735333164623031373730663433623462626264626630343634346565646664653634636331663932616631643163343563313464666661366561373665623763623134663238323439636230656665316137313533663039633066626661343966316332303737316134623261633930613739313663303139396162656266366461373438613037666364363234393930613233323562366664383466306265326461313964393334303965343431373133663762663165653462383734626565303164376263306464356231613936313261646665366336346463376562303364373431636132323434353331333931633934306264656162386535313331343638393930633234383435343031343736343764653931616237613566373639373830376536326132396437393664626537356366613466373337643432306663353364346434363464373134306565333864306566643438643362636565653837333530653864336536363366623834386636333035383033643566383662383061383538343339616131346163323862366233626263313738373665363133316538386232376563373934323932333230626533653637396132396161306432376532353831356536396537333938363934336534353366373062323761626562353362643266313034313734373930326133616335613735633838363033343232386332393862666461646532656433356365386631366665356264386163333263363335346437346135613066633530323463366631356563343936343566376166363237376239373565316366336134663831363236636661613463333639356263373133363534623136333130643631333132396634343232653864323661343264653538666264396336643330393964653163343130646633613036306231376662353031353034313065633739353033316465343032313136396633393634393863666630613466313162633534353737386437303233343739383066303732343031383566666465643861626632353362646562653061663066366363613862646463313734363430343537333266633931343636613138656130613032393461326331323966613636646131333763373261343764343165623863323038326161633061646564343638633430303931643530613261393336643539643636313235666566366532396237343131306366623130646266663735316135326637376664613333653363366362343834376330653061346662366436653034303334353635326130663936373062616135623439363330393932383537613339626561363766353432373832626665356239636263303631653863636462643339666533393336396538646362343564646465343032363162396434326135663736346335306564343535353535343463646364393565623431326634643566363232663938623634646666373530333534663930656637623065633335356561376631663233363764393637306365366634616536323838373631323462303431353763636539623863303335376564376535656366643334393038656438323438623230383230666636313162356439313834373865306639643164666335633165376430356664653766643534333037613131333833666638323032323333363837656531663236363136376330353432303736313633333432363138323434633038346337666139386334663663643338623139633235303335643761343861623733633332326231313737333665383733303832383461613362363365333063386264356433633961383634356461653166646231656664633237303134363461623136383131633266303131663963646333346432333138636135393134383862316164383435666566306432643938623539623432613239666336306334636563633136636364363236306535343339316562386137616366353136656133323962343532613935666266653031356261376335663664333631386664396132383532326131323132636164386137643032653762366163656432663064316264313764333839626230326333363530316135626165356436333261653130393333653339653366346135633661323434363738636263613735646234666235353231363838373565663933663438313332313763366363316232613339636132386437613938626463333934656630333536373531616261393430363731396631393631623563643130626161636539623237663932393263336133643166663535376237306631346164313435363637663333653062373931316661386666343931636631303834373836316439383331666663383539656232313934623864343637623036383333353336373265376139343161373534386436353931626665323536636636343733316564396363376637313330326130343063646230383166383431356133643034363531653135386135333233336665336330613038313362386362613164323365333838343237393237636433633661303338333361376466653836633133613764623030393331396561666662326531376338633831313034636232383530646131356430633233363236333338623335303135353962346164633434333534303664396531393164333536326365396666613338303536653861633233623939333435663466373438646262353039396466666236363133353564636532343063623430306337323234396532393761306639373435303234326132663233346463313935333036643966666533643839623761373433353736333564396637376139343838636266353666666132626236363835346438633235366539386635393531363234653635636135666465393066663934313366323136363338613462666238343162323566653435646530636531653264303739633138323837616435386631346334346133306434623737356331336237633739306137353563626539623562396465653033336561613561366439636338633561636639666661363935353665623831623433393262383134383437663135356366633533303063326431303464666437316230643238653630663133386461643965616437656263353861323061336565663262383862326265323330336366333962626338343335306134346265333738626634393134663436313433383564353164653262363063646630323935646366353539643337653031323434626461623565353862376365636430396630663337646138386637393930653865346262343032336465343134376538623134323837383236363862353262663835353564333837613730333466323837373839643364383437333137323434626466376161323965393332623934346637656166633462663866366565326534363738363664326564313832626138373030636137633231356265633037323263623234313431303662646536356231366434663133366566343630653632383233383464333763626162373064616663613238363065373464636139326131303264646238646662393839636138373730343436633863323731653934653137303665383464306439636230363664373638343935376665343365646233336361633562663233623339636532313465333365633562336433306437303264666331643661373535373662313936303037656433636538353232643139626330636565643266623865646434666234656561373962643437396335303931323331616133623062643165623062336564316564313261633531616338356639623765633161663235663761636165376530613738633863613334343832326435373962336632623430643133393835653031376261626337666262663738366433386330343239323339313339383965663534303762316262373134363532306464386432333261626634383834336663616230313165353431636238663266303930393231363065656366303531643434383932383832646131363765356236623062636565376530613263623930633662616632303965373435343761316163616436653363663530623338336565353162646532313933343831396136616535643265616663336534396263353935376536613033373038656430316366353964303065623334653430306139633433333634633463663837333965616364623339386333653461356666313734333334656238663463333761393862643635663036316237363536363061666633333464653234623230383862663563646366373239616630353030633366383437383332613235363638623534313965653434346564623562363138633364653235663361333763373132376332616335333763383438393039373331393863366661333630373639306138653062386430633636353038316362333838333831343239376166386433393863373264653136653630643265303930623362383533306531653562643935383436356537633863356537663230323030393232386666313862613163393163636539363961303030393862366636306463363363396566653339663733316230363135383638326333623535616631383830626630663737323830373534386563303435663631353039306563613033623434616436663037373163323866613430373536633962366139643761373831326431316531393961383132313430326138383331303832343330666336336330633532666531383336306566393532373865663032366131386631313963306663663136376535663833346666393334316631633737303538333235326433356366643732636437663663306633376631633332623933383464373566353434346464393137333235626239333933633238393637626464303636353534626538383635653439323066333563653262363037373931666239376463356164376362303233326335386631626664643564346265303164633532646531643934333337653630353538613263623934363162626532633336333764636335313439353861656530306334336162303836323935353232663861346535653135383136323837646366393730323137646338653631633937303131326135363032646664303036386333653931343736353734363831323038323730316437666664643562343234316438383637373032333361313938353935613433396633646462633263613465386635376234343932333864306432383763653462666562323761333636643533353666393836306137306466303865343539633566333861386361623239343435383232313561373366666638656435623366323730356633646637633436333737343235393833616630373861303636323231616162393231343131313836623363663764376134636336356238653535363265643866373666656566373264653737306535363463643462616332616532643764323334626165646166646565373463336336356364396632386464373531653065623730353337616237323837653237326136613161623865333532633838333631393864363963366561656631383035653039346637333264313562626364393332646665376165623830303062643836386632383838626630316265373138396631323038306532643534386131313933383564636164643439313338636561623933353964643033363739666637623961323166623734636237343165313239393630346639646663666237643530656132343738316231663035666536343764656338306461633937373763613464323366633735623333623666313838353764386362663633633761613030353461363136623438666263383530303765313434623863363863363831366332623736383037346164663361396562303035313634343633623461666563663335633662306466353032626463363034656436343864313264666637656464373763303638646233626231636138376333623631303666373937343532623763376434343664323765313464633632656463373732653634386231393530636239393863393834323261353438393639336464666533356262343737323939613530616665383162386236653065343161663539363432626134323739653638313163663931303839393032333466656662373435346166303538326237383639383730336333373937396562383237356334373331313033306663616166636431373765323936633561643766323930313330356332653635323331373732646435363131303632646436343238396334356432636634336337363330343938646430323265393430613363326563643335393536353663336164356461633664663064386337646663373561343365633664376463363937373439333534383164323232356134396536313933316439376564646262653835316639303566343335383332313832393039313633306232356233356231393734393237666333336632313261646462333663303761636633636666653133346339653931646264353162313331353264316231626639353762333931376435303964666535376335326630343163363731373531343563346133376639636534383064383434303931303532643138643666333966313437613861343137633761623166643037366365393939383161386238303237346139383061613161626338383462653431626161386238393265356164313261383564326565323339656661386135393830313661666536633538626636623033666332653462616234353666343533336262333666633133313433356665316635613734396235613632306565393331623735316366356134393136653061663063626566306539646333393536656132336664633536326437346561343264643963333235636332383532303132343031333162333539353234376336393338303866326331616534663639363266633236666435346335633936626261376336373330316131376461346238663166646662306463623937313838346665373033656361626161303463666266303637616161646439396562336533653539633232356430636533376431306532373039373662643230353537366133653635396562303839666532666163373834653334373132643832383837353635643535663930376631623837366539316232613439343334613366636131346135373837373464326434306432623464623064653731653764626431333034333561646336326132313162663238376333356134633535303963653261303837353437646563646239653930336466356130666666303536353539326262306130316630653238636166373336313338663434336365373335383636313534343863373661626330303664623062348b9c464323022f41e1182e41dbf339436312354588d44841031fe23b24d3ab3dea0aab3d288cb13bc2dda2385040db3d0000803f3a54b9bc4907b0bcdf1ab4bcfdacf13cae39abbc1a78f33c74fcf33c4a76b9bc71f3fb3ccf8bb2bc4c82073bd4dd063b8586013ba9c88fbbcf940e3bda2c8ebb0cdba6bb5490f13aa061aebbc3e2eb3aa241cebb7d0ab7bb7b66e0bb79ae1c3b5c4aa6bbc971f43a97c9b63aaef1e5bb2c47a63a90cbaebb08622bbc249601bcc9a23cbc061924bbef2bd6bbb8b963bb0188dabb53fc2ebc05a802bce6df24bc0e8f653c82de373c7a6c483c545424bc907a383cd4f024bc054446bcf32c5f3c6cdb89bcebc7713c1f013fbca7da32bcdc2038bc323f8a3ce8b02cbcb67a8b3cb3cf8b3c2d033fbc8966913c212b36bc13ab3b3d550d293d705d363d37c11abdbe041c3d82b31bbd570c1cbdd5563f3d785020bddb142b3d1109113dd3a2003d01570c3d242ee8bca7d9ea3c7bc4e9bcac4deabcb84e143de38cf1bc9261023d65660f3d7f03f83c9ae4093deae9d9bcd8bedc3c08e5dbbc5a9cdcbc8b35133ddf8be5bcfc38fc3c87a73c3d32022a3d3359373dc3d91bbd8fe21c3d34cd1cbdde261dbd8d4b403d806e21bd2d072c3d4f4f373dd058243dfaf0313d047415bd8ee6163df66e16bde7c816bd96003b3dbc321bbd8667263d1c9d133d2a83013db96c0e3d11ebe7bc5929ea3cd4b2e9bc0a58eabc9b33173d5e6bf2bc4e73033db1b8353d6040223d3935303d4d5713bdbe8e143d395614bdceb314bd8d83393d482f19bd155e243d0f04fe3ca559db3c462bf43cc290c0bc8a06c33ccb51c2bcaeefc2bc0e6a023dcadfcabc1b21df3cd1d0203dabfa0d3d2e701b3d5031ffbc57e5003dcf8800bd10dd00bd9c89243d421e05bd8b01103d4085183d717a053d081d133dbe41edbc9417f03c1f34efbc7fe3efbc173c1c3dbfaff8bc7f8f073d998f073d8367e83c180d023d1e8ecabc7f3acd3cfb86ccbc5e3ecdbc045d0b3db824d6bcc799ec3c27b7363d0f5c233d403e313d8e4914bd2595153d3f4b15bd4ca915bdf1743a3d0a2f1abdcb77253d0b78263d9c3d133d3cfc203dc17b04bd38d1053d617405bd6cce05bdd6402a3d7a310abd6651153dd4152a3da4bf153de352243daa0906bd6f5e073d841507bd227507bd1f092e3d2a2d0cbd0ef6173d562ccc3ca888ab3cf7d0c23cbcc192bcebed943c316294bc8ffa94bc459fd23c02579cbc700baf3c1d40b03cf28d903c722fa73ca76171bc966d753cbf8574bceea775bc9a87b63ce9f281bca3ef933ce2392d3d4f5d1a3d6ddf273d70e90bbdb0140d3db7e00cbd14390dbd28e9303d099411bd416b1c3dcb0a0a3d1304f13cc707053de7ebd6bc75ced83c1aabd8bc6651d9bc707f0d3dbd36e1bc77caf43cb35d083d3912ec3cea26033ddd19d0bc2a50d23c7bf9d1bc24a8d2bc58f10b3d3a1cdbbc380ff03c7b21123d9f25013db34c0d3d3cb4e7bca066ea3c246fe9bce50beabc5674153dfedbf1bca0fc023d6d842d3d60c31b3d5277283dba410ebdf8560f3d4b280fbd177c0fbd7002313dfa8b13bdb5b01d3dd070033d9814e63c3485fd3c44ffccbc9629cf3c58a8cebca545cfbc3aaa063d59c4d6bcdf9fe93c8dda233d2c85123d72ed1e3d422505bd9a4c063d930806bd825a06bda53d273ddd5a0abd4e66143d4dcef93c2c35da3c90bdf03c0c30c2bc2e74c43c22bdc3bcd448c4bcc209003ded56cbbc1e98dd3cb5db103d2695fc3cf5910b3d7728e0bcb38ce23cea0be2bcb1bce2bcc87e143df240ebbcce4e003d2bed2b3d6b6e1a3daaf5263d03000dbda4100e3d89e60dbdc53a0ebdbb592f3d264812bd88551c3dea1aec3c5303c83ca8d3e13c4261acbc8cbaae3cda35aebcb7dcaebcd12ef33c2525b7bc4cebcb3c52048e3cd0e2573ce058843cbc6e23bc972b283cd8dc26bc260628bc2fbc943c9a9537bc79425f3ca0ee343dd33d223dec9e2f3dd89b13bd71fc143d539214bd5bea14bd7592383d8c4119bd9247243d0c18243d0a73123d63171f3d0cdf04bd8cfb053d26c705bde41a06bd9689273dd3300abd845d143d7745f13c9a93d13c333ae83c7e83b9bc1e72bb3c281cbbbc07b1bbbcfb82f73cf2e6c2bcb0ffd43ca24e2a3dbe08183dc218253d82f909bda7270b3d6ee90abdb0400bbdc5e12d3df8780fbd75021a3d4eb70c3dfbfcfa3ce759083dfd4fe4bc63fce53c5bcfe5bc005ae6bc20bc0f3d4623edbc623ffe3cef953c3dbf6b2a3d786e373db9601cbd25841d3d2c521dbdfaaa1dbdf91c403ddfe721bdbc662c3d2808103dee53fe3cca390b3dd159e4bc53b6e63cf70be6bc21a0e6bc7156133df150eebc7efc003de35d3b3dc242283dafee353de2c819bd72ea1a3de7bf1abdf3191bbd801b3f3d74741fbd29562a3dd8181c3dfcf30a3dae2c173d545afcbcda60fe3cad07febca9a2febc53801f3dba1e03bd0cc90c3dbb01013d54c3dd3c2db1f73c67e2c1bcd18ac43c60b4c3bc625bc4bc3f92043deb96ccbc3face13cd80ce43c2df1c03c8d0cda3cf0f5a5bcd88ea83c13b6a7bc8d53a8bc76f4ea3c1544b0bcd9b5c43c1e201a3d67b2093d7473153d892afabc194bfc3c3cd6fbbc1674fcbcf6541d3d03fd01bd937a0b3deb23263d025d133d79cc203dcab504bd0c15063d20aa05bd870006bdd3d3293d4e500abdb965153d69a6113d9870fd3caa420c3d758ee0bc3511e33c9575e2bcfe26e3bc665d153d59c0ebbcd3bd003d5c61193d4064073d5d44143da7e2f2bca665f53c22baf4bcd262f5bc94e01c3d94b0fdbccd56093d707c263da49f133d791f213d0de804bd1150063d4bdf05bdb33706bd0c2d2a3ddb920abdf5ac153d62b8243ded37133d51bb1f3dfdb105bd5ff6063d009306bdacdf06bd732b283d6bdb0abdf218153d5dcbfc3c26a0d43c2564f13c658db5bc7a3eb83ce29cb7bcdf5db8bce553023d65a8c1bc4dfdd83c315e2d3d2fc01a3d8b1a283d0d390cbd92780d3d5a300dbd098b0dbd7fff303d66e211bd1dc71c3de615143d9be4013d51e20e3d4f36e8bc488bea3ce708eabc99b5eabc77ad173deaeff2bc12da033d118b113d1c06003dce880c3d68a1e5bc31bbe73c3863e7bccb08e8bc87fb143dc7f7efbc52e9013d73e2263d45cc143d6dba213dcecf06bded27083decb807bd8a0908bd486c2a3d93290cbdfcc0163d823e333db869213d3f2b2e3dd3d413bd82ed143d0abd14bd961115bda7bd363d2f2819bdfd58233d0023f93c1d73da3cbe59f03c3840c3bc5d57c53c26c3c4bc0a4ec5bc5d33ff3c8528ccbc9bbfdd3cbc83133da0a7023d85b70e3da4a0ebbceeb1ed3c2c57edbca8f7edbc14d3163da7b1f5bc4b7c043d3341133d7aa1013dd0390e3db935e8bcd39fea3c73fde9bc0f9feabc24b3163dffacf2bc918a033d33801c3d53560a3d2459173da6a1f8bc85f0fa3c9581fabc852efbbc0507203d67d001bd884f0c3d0ed70e3d1c07fa3cb9be093d2e10dfbc9866e13c1dd6e0bcf978e1bc355f123d9581e9bccddbfd3cd1be073d99b9ec3c32cb023dfbfad1bc3047d43cc7bfd3bcda5fd4bc6a280b3d725edcbc9a7ef03c3699043da9cce43c64e4fe3c2975c8bc23fbca3c1957cabc3d03cbbc1827083dd480d3bc51c3e83c28461e3d10ab0c3d4a3f193d4977febc0f4f003d0a2100bdee7300bd69bd213dda7e04bdba930e3d65f9253dafe1133d8cd4203dd8d505bd9f14073d9ac406bda91a07bdc581293d244e0bbd08d7153df61e143d06ee033dec7e0f3d6717efbc527ff13ccdaff0bc253cf1bc4054173d177ef8bce8aa053dfdbc093df525f23c43f6043db27cd8bc5920db3c1e21dabccab1dabcaf0e0d3d7229e2bcaeb1f53c96f0043d6685e73c5b0a003dd735cdbcf067cf3c6bf3cebcf495cfbc734d083d3671d7bca53ceb3c509a163d7f63053db9b8113dc86ef0bcb25cf23cfa35f2bc35dbf2bc90f3193d43ddfabcad43073d4170043d4350ea3c2b13003da04dd3bc951ed53c33d3d4bcf25cd5bcf078073d3e41dcbc289fed3c38d50b3d5d94f63c6b1e073d28f5dcbc146ddf3c30a0debcab35dfbcbf140f3dd2c2e6bc5025fa3c1abf843c4092483c01f1763c5bde17bcf0c01b3c86141bbcda331cbc362f8b3c1cc82abc828d4f3c84371f3d665e0d3de12a1a3d900bffbc37a1003d017500bd12cd00bdebae223d8fff04bda6510f3d9ac3293d1d67173d0b8c243dd32b09bd29700a3d261d0abd2a740abd70562d3ddcb30ebd0067193d75ea2b3d782d1a3d3be1263dad670cbd71980d3df0520dbd51a90dbd445d2f3dbbca11bd141c1c3d8b69193df929083da782143d5bc0f5bcfe3cf83cf479f7bc4f11f8bcf0cc1c3d43e4ffbcd2040a3d4ce71a3d84aa093d5ffb153d320ef9bcc780fb3c86c3fabc955dfbbca44f1e3d518e01bde9840b3d829a3a3d496c283d7b6f353d2fb41abd75b41b3dbba11bbdd2fc1bbd7e2d3e3d7c2720bda9632a3d6e7f103db742003d09dd0b3d83b4e7bc46f4e93c2e53e9bcd3e7e9bcd1b3133d893cf1bc7700023ddb0c3a3d69ec263d05a1343ddc1f18bdd46e193d591a19bd737319bd05ca3d3d2cdd1dbd4802293db987243d850e133df28f1f3d927f05bd6eb4063d656506bde9b806bd5ef0273dbec30abdf2f4143dc6c6983ca826653c1be38d3cc4dd2abce7ba2f3c94b72ebcf01730bc3d41a03c658141bc39636d3ce7c1e23cbe38bd3c6f0dd83cd592a0bca031a33c4170a2bc1e18a3bc5525ea3c558fabbc9b41c13c39de073dd5e8eb3c13c2023dfd1cd0bc4a0cd33c46e6d1bc0184d2bc896a0b3de5a0dabc78c5ef3ca7cd1b3d59be093d71aa163d2cd6f7bc7348fa3ccfaaf9bc8d57fabcfc5d1f3d5b4c01bd6db10b3d5fed203d29690d3dbc631b3dbd69fcbc7d31ff3c0069febc1d21ffbcb2bc243d791104bd73870f3df8701b3ddadb093d4b6d163d7e9df8bcb738fb3c1860fabcddfbfabcb4e31e3d487b01bd86c30b3dfbcfd03cd49bae3c650fc73c665a94bc52e2963c4d0d96bc63a596bc8091d73cb25b9ebcf449b23cfe641f3d6d370e3def791a3d8a4701bd075b023d712102bd307102bd27cd223d954b06bd0710103d51d7203d0a180d3dfe361b3d03c9fbbcb86dfe3c4fccfdbcad87febc93b2243d26ce03bdb73c0f3d283a113d926ffd3cf0f30b3d76b7e0bce679e33ca397e2bc2243e3bc23d8143d68bdebbcb7b8003d311d0d3d999bf63cb610083d1af4dabc6375dd3cc3c9dcbca772ddbc5d94103d92b6e5bcad79fa3cf2fc093d5478ed3c7b83043de7accfbc2d4fd23c11a6d1bcab5bd2bcc9c00d3d8343dbbc69a7f13c2568153d939b033d2051103d210becbccc68ee3cb1d5edbcc37deebc77eb183d7b94f6bc9c84053d806e2e3dbe741b3de307293d43020dbd2f1f0e3d44fa0dbdcc550ebd8f25323dd4b212bd74831d3dc1e0193dbd63093d482d153d6bccf9bc10b0fb3c2978fbbc7212fcbc9a201d3da5ce01bdd42b0b3d5756133d8e8c013da5450e3dd710e8bc5333ea3cbadee9bc9186eabcead4163dd0abf2bc6079033d6a55223d7249103d13341d3d3cb202bd869c033dbd9f03bdeaf903bd84da253df32408bd3e41123d1e76293d9088173dc158243dd5120abdab110b3d39f90abd764d0bbd6ff82c3d435d0fbdc876193d5b1edf3cf543bf3cfdeed53c2ef5a7bc7596a93c8e80a9bccd12aabcac7ae53c0417b1bc7fa8c23cfc57263dd165143dc140213d109906bd9eb1073da38507bd2bda07bdf3d4293df6030cbd1757163d32b0003db05bdf3c11a5f73cf566c5bc3da0c73caa1dc7bc0fbcc7bc100b043dc17bcfbcd908e33ce385093de0d2f43c1430053d34fcddbcdefcdf3c5177dfbcb7f9dfbc7f8b0c3db8b6e6bc750ff83c5418ef3c7632ca3c0495e43ca61daebc5380b03c9ef8afbc7fa4b0bc5a4ff63c8208b9bca426ce3c04a50f3d93a0fc3c02ae0a3d102ce2bcad9ee43cb9e6e3bca684e4bc0413133d0a5aecbc1e2e003d134c1b3de6b4093d3f48163d699af8bcc9ddfa3c5062fabc4807fbbc34c21e3db48801bdb59b0b3dfcc6183d2a35083de90b143da464f7bcaa69f93c8a0ef9bc54abf9bc36091c3dae9600bdc3fd093da5dace3c07c5ac3cbb2bc53cf66992bc44d9943c0f2494bc61be94bc608fd53c0d909cbcbd77b03c7a97393d13da253d5df9333d04cd16bd8fec173d21d117bda03018bda6723d3de3c21cbd1f00283d32641f3d394f0f3dd5d31a3df1d802bd76ee033ddbab03bda0f603bd6c8c223de6ad07bd290f113d1c2a213dc1800e3d48da1b3d904d00bdc276013d3d3f01bd979701bd38d4243dbad905bdb487103d3e78f43cc534d03ca12cea3c2851b4bcfbd9b63c1327b6bca4cfb6bc468cfb3c2f1abfbc9821d43cc0ce203dc65b103da51e1c3d5ef203bd4de2043dd4c604bda81505bdf80a243df9d208bde420123d1d05183d4cb9053d84ce123d91c1efbc8cd5f13c9e9bf1bcc049f2bcb49d1b3de9a3fabc58b4073da0ea473d642f343ddb4d423da9ee24bdcc3b263dfff225bdda5226bd7fc04b3de5e42abd9954363d6a3b2c3dceb8193db4fa263d935d0bbd9ba10c3da1500cbd2ea70cbd83da2f3d75ef10bd58bb1b3d1d453d3d2e4f283d214f373de90518bd517e193dda1919bd597e19bd295a413de7581ebdbc972a3da8bd243dc7be113dc35b1f3d0cfc02bd3958043d52f503bd8f4e04bd0772283dc0b208bdabcd133de4aa2d3dcb721b3d8c7f283d13550dbdffab0e3dcc410ebd8f960ebd193c313decc212bd316b1d3dd2ba393d361b273df672343d15a618bd8bf9193d7f9a19bd2bf219bde65f3d3d953f1ebde720293d8ef4173d79d5073d455a133d1136f7bc8418f93caed7f8bc9b72f9bcc3291b3d876600bdfd92093dae1b143d5a19023dc8fa0e3dcb81e8bc201eeb3c5c4eeabc38f1eabc4fab173ddf16f3bc510b043df591143df57b023d516a0f3dff51e9bc96baeb3c1b23ebbcfecbebbc191e183d4302f4bc866f043d1f31263dc747123df485203db4fa02bd783e043dba0004bdbc6004bd36142a3dcefc08bddd72143d9535e53c2fc5c63c1079dc3c63eaafbc7cfdb13c5063b1bc69e5b1bcac43eb3c369db8bca105ca3c2c25043d12aae33c9ad2fd3c25d1c7bcc117ca3c3fa8c9bcff4ecabcd5be073dafa3d2bc109fe73c7bf40f3df923fe3ca9280b3d0130e4bcec4ce63cf0eae5bc6a8ae6bc7e42133d1058eebcabe6003dbc4c173d35be043d7c06123d5dc7ecbc6259ef3cfbaceebcd958efbce0ea1a3d80e9f7bccdc1063d8db1593cd0fa153c5d60463c8be8c5bbf66acd3b03b9ccbbc12ccfbbcbfd663ce901eebb01471d3c4185023d6e96df3c6859fa3c770cc3bcd191c53cd9ecc4bcc799c5bc9033063d6418cebcf4a0e33cb9691e3d726d0e3df3d8193dbf3d02bd2845033d710b03bde95503bd0391213d5af706bd6128103dce362f3df9761d3da2282a3dffd20fbd9a09113d26b910bdee0a11bdbbb2323dc01a15bd64641f3da8511c3d0b100b3d5e68173d5b21fcbce412fe3cb0defdbcc880febc68b21f3ded2d03bdf5ee0c3d3489023d865be33c2970fb3c77b6c9bcc5d1cb3c3169cbbc8709ccbc95d9053d6fb4d3bc23ffe63ce46b003daa3fe13c00c7f73c5771c9bc8e9ccb3c6cfdcabcde8acbbc968e033dc490d2bc1c9ee43cb34a2e3d741f1c3d721e293de03a0ebd6c480f3dbf290fbdba810fbda9d7313dedb413bd41191e3d7bbc293d15e6173d3ca9243dea1b0abd6d490b3d24070bbd1a5d0bbd1b372d3d04800fbd02d7193d85c1d93c6207b63c63a2cf3c4fde99bc9ec99c3c58b29bbc54549cbc3cb3e03c169ba4bc73edb93cbec3a73c1e21863c9c2d9e3c9d4958bcd8815d3c4ba35bbc4dc95cbc6d62ae3c04036cbcc7c0893c4ecf1e3db1150e3d33081a3d9f5501bdae6d023de42c02bdc47902bd0719223d294706bde0e30f3d0c49fc3cf58ed93c2d57f23c5244bfbcec9cc13c70fbc0bc9795c1bc2893013d9a5fc9bcfb47dd3ca16d373d1c79253dba55323d577517bd4ab3183dcc6318bd3bb918bdd8ea3a3d3cea1cbd396f273dd8f5083d3435f23c1c6c043dfc4cdabce360dc3c40dcdbbcda68dcbcc01c0c3d0d7ee3bc569af53c4ce2183da320083d701f143d7d94f6bc279af83c134bf8bc33e7f8bcae2d1c3d565100bd39f2093dc9f6013d806ae23c295bfa3c9e03c9bcc016cb3c6db4cabc4e53cbbc7045053d01f6d2bcc207e63c3c73153dc609033db939103d5a73e9bc9b16ec3cee54ebbc2bffebbc7411193dc67ef4bc2809053d62a5233d688f113dfe7d1e3deeb403bd55f5043d669d04bdd7f004bdc936273d220c09bd2383133d3926223d689e0f3dcadf1c3d256301bd789d023d0a5302bd73a902bd9acb253de3e406bd259d113d78c32d3d12911b3d5b8f283d09900dbdedc90e3d3f7d0ebd68d40ebdaf57313db20013bd96881d3d7f24223d6551103de60e1d3d45af02bdd6d5033d309703bd39ec03bdcca5253d360208bd713e123d95be183df965073d6ecb133d8021f4bcbb9cf63c76dff5bc4981f6bc2b291c3da760febcb346093ddc8d2d3dac331c3d2f97283d34f80ebdce2a103d27d50fbd6d2610bdf6fc303d810c14bdf00c1e3dd6cb863c7b82433cc369783c194c0bbcfdf20f3c52f90ebcec4310bc4d2a8e3c59f220bcd6784b3c56f4323da641203dbf9c2d3d382812bdeb46133dda1813bd8f7213bdb7a2363d98af17bd8946223d9439273d7504143d61c8213d895305bdb8aa063dbb4906bd62a006bd90fc2a3decfa0abd4716163dceab0b3ded29f53c17c9063db0fedabcfd4ddd3c8cb6dcbca556ddbc4e0c0f3d6519e5bcdedaf83c5ca03a3dc6cc283d8f91353d72e61abdf8361c3dfad01bbdc0251cbdc51e3e3d764520bdcbbd2a3dab20193dfd8a053d6b94133d9d98ecbc325eef3c7f9ceebcbe53efbc27f61c3dfd6af8bc34ad073daba3223d8d6f113d93bd1d3db23604bd175d053d001505bd426205bd4a06263d975009bdce46133d9897253d2c95133dad7d203d6ac705bd3bed063d60b306bd930907bd9617293d942f0bbd4e8a153d4e491d3dd88e0a3d85f5173d2d81f8bcd2bffa3c6d6afabc511bfbbca9f3203d91dd01bdff960c3d2a08f13ccbb5cf3caf86e73cb56db6bcb494b83c6917b8bcd1b3b8bc17a1f73c1c39c0bca747d33c1222163d57af043d1126113d2dfdeebcfb26f13cecbbf0bc855df1bc3190193dfe41f9bc8f90063db39b003dd7e0de3c4f6ff73c3ab0c4bc97f8c63c036cc6bca70dc7bc79f9033df6e2cebc1097e23c40c2193d5143083d17cc143dd781f5bcebccf73c244df7bc11eef7bc1a2d1d3dbe0300bd65280a3d2af5183d7507073d2bd6133ddf19f3bc982cf53c2fe7f4bc3b91f5bcb67d1c3d96b4fdbc9df5083d1097fc3ce5d9dc3c1b85f33c5e59c4bc5ce0c63c59eac5bc4c6ec6bc4e70013dd091cdbc943fe03c3745d83c925eb73c37d4ce3c06709ebcffb3a03cc50ea0bce1a0a0bc56d7de3c4bfda7bc10e5ba3cff801d3df0540c3d78a5183d53cefdbc5119003d8996ffbc7e1b00bd7fdb203d4b1f04bd98320e3d0494303dad171f3d9d982b3da4c711bd6fd2123d30ad12bd6c0313bd4cff333deb0a17bd89fc203df8ad013d2bf2e43c95aafa3c0484cebccfe6cf3c4308d0bc3c9ad0bcbeaf043d2572d7bc6d36e83c3507243daaa4133d61581f3d692e07bd2029083d200308bdda4f08bd463f273deb0f0cbd056a153df751fc3c44b2d73c22e3f13c3c7cbbbc4019be3ceb55bdbc6300bebc3ac1013deb5dc6bc88a6db3cc144063daaecef3c7c2b023d1861dabc7c32dc3cbdcbdbbcc251dcbcbc1c093d8cb9e2bc76fef23ccf42293d26c8173d2d48243d3f8a0abdc4970b3d466b0bbd00bd0bbd2fbb2c3da9b50fbda7ab193d7e15e73c2ddcc23c1ec4dc3c7809a7bcab7fa93cdddea8bc4f83a9bc422fee3c92d1b1bcdac6c63c2df2143d0216013d7e4d0f3d2668e3bc1332e63cec70e5bc082ae6bcd8d2183d2259efbc1f3f033dd53c143d0791013d6df00e3d62b5e6bc6be9e83cc19ce8bc864ee9bca1df173d86e3f1bcbb96033d5e7e0c3df61ef73c34ad073d2d13ddbc0974df3c00c9debc2464dfbcb5d10f3dbc21e7bc77cefa3c2f810a3d22edf33c80cb053d4707dabc2ec7dc3c98b1dbbc4345dcbcf3c00d3decd0e3bc5580f73c54c1103d2c2bfe3c02b90b3dc62be3bc0d9de53ca3ece4bc3c88e5bcb73c143daf7dedbc96fa003de32f243d1900123de2ff1e3d970a04bda647053d49f604bdfa4b05bd46c7273dd77309bd46f6133d50972a3d10ca183d0585253db7fe0abd65580c3d71e40bbd2c350cbdd51d2e3d6a4410bd13b61a3d1bd14f3cfd43063c6bf33a3c394e9abb1e44a53b91c1a1bbe455a4bb06325e3caa14c6bbd5390e3c9d4a003d6c97e23cafecf73c4c75ccbc6d6fce3ce4dbcdbc5b58cebcf24f033d10c3d4bc04bce53c4d83303dce5b1d3dda0d2b3d80ad0ebdcafb0f3ddaa50fbd8f0110bd0148343df06014bd676e1f3d4d7a0a3d09d0f03c2555053ddeefd4bcaeb5d73c2fbdd6bcef5fd7bc3d0c0e3dff89dfbc9ab2f43c6f771c3de9c70a3ddb6e173d4cc0fabc60c8fc3c3b8afcbc5930fdbcdff11f3dc8a102bd5db30c3df093ea3c153cca3c4454e13c9bbeb1bc0cf6b33cc556b3bcceeab3bc8b01f13cc424bbbc5eabcd3cb752af3ce0c08c3cd164a53c88bc64bc8a536a3c6e1768bca14869bc9832b63c498f78bc3875903cd078173de5f5043dda33123de86bedbca300f03c1a4aefbc93f3efbcd41e1b3dd866f8bcc7f3063d89ce203d1d610d3de5431b3dbce7fcbc07a9ff3c3adbfebc558affbc469f243d8d3004bd4a7a0f3dde2ee03c07e4b83cf600d53c128f9abc3c669d3c928a9cbc6d3e9dbcccdce73c4438a6bca420bd3ce3611b3dd2160a3d0f77163d61c3f9bc9ed4fb3cb586fbbc612bfcbc4ec61e3de70e02bdc0f60b3dcf390d3d2e2ffa3c2f9a083da000e2bcd6ede33cb199e3bc622fe4bcfc6b103d7b6aebbcf7a2fd3cdf711e3dd2970d3d2ca6193dceb600bd2bc6013d009301bd31e301bd2ac1213d1ac305bd5c680f3d39102a3d0c11183d1df5243dab310abd49640b3d651c0bbd57700bbd70972d3d58930fbd04071a3dc048093dc067ef3cdd45043ded49d4bcf0d8d63cfc0ed6bcacaed6bca8bf0c3d77b2debc0e35f33c752a253d43d8123d61f51f3d6fa704bdc7e2053d969805bd86ee05bd97c0283db92d0abdc0d6143d143c203d1bf10e3d144f1b3ddebb01bd20d2023d5f9d02bdcbef02bd599e233d5fe806bd4ed1103d331d213d3a3c0f3d7e0b1c3d6f6101bd0e9b023dec4c02bd4fa102bdaa99243d5ac606bdb52d113d2577073d9329ed3cf8a5023d1f8dd3bce181d53c8a46d5bc0aecd5bcddc50a3d46aeddbc24dcf03ca756153ddad6023dff12103db75de9bc91afeb3c023febbc18eaebbc77f7183d3268f4bcf7d5043d58e2253d2be6133d8fc9203d04eb05bd8f14073d97da06bd5e3107bd0c5f293d0e670bbd2cdd153dd830143d60e1033dec890f3d0209efbcc215f13c60acf0bc3747f1bc7768173de4abf8bc3da8053d4193133d344c033dd5f20e3d02a8edbcd2bbef3c0c50efbcade8efbcfdc0163dfd61f7bcff0f053daf271d3d9ca00b3d2a2d183da950fcbcea71fe3c021efebca5c5febc9495203ddc7103bdbc8a0d3d5b872f3d71891c3d53252a3dc5b90dbd02100f3dc6b40ebd990e0fbd3f3c333d7a7913bdd49a1e3d4aab1a3d014f083d996d153d6489f4bc4cdff63ce064f6bc630ff7bc90471e3da373ffbcf44b0a3df6541e3de4f60c3d3d61193d7f82ffbc12c9003d62a200bdf9f400bd6bbe213d79ec04bd16d80e3de5dc1a3d8d05083de97d153d4850f3bc75b2f53cee37f5bcf5e7f5bcbf8e1e3d3683febcd30f0a3d5e51123d9ad4003d51540d3d9d10e7bc395ae93c00d3e8bca975e9bca0c1153d8469f1bc7eb6023d5bd8233db42e113d0b901e3d53d102bd28f7033d65c803bde72304bd0f84273d817a08bd1b36133dd58e193de6dd073d3284143ddd7ef4bcacc9f63c974ff6bc7df9f6bcd6061d3d2226ffbc34ca093d4fef193d7d890a3d8192153dcd5cfdbcc037ff3c96f5febc818affbc3ef11c3d005c03bda6360c3d0217013def0dde3cbfcff73c0a1cc3bc273dc53ca9e2c4bc8d89c5bcf8ab043dcf96cdbc5dede13cdc1e043d5c4ce23c617bfd3cf0dac4bcaf92c73c08cbc6bc227dc7bc03d4073d2f3dd0bcfb69e63c2899193d5fdf093d4c1d153df10cfcbc9be6fd3c5d9ffdbca233febc6fb51c3d52a602bd86910b3dcf29c63ca0a5a33c3c4ebc3c9d0789bc5aae8b3c19bf8abcd3588bbc90f2cc3cd52393bc5a58a73c69e9223d86c60f3d017d1d3d40d600bd3237023d27d301bd752d02bd6da7263d2fa106bd17dd113d398c0f3dcfcefd3c0acf0a3d23fce3bc26bfe63c57a0e5bcf929e6bcafd5123d8fa4edbc17b3003d8b63173de6f8053d026a123dca99f1bc39cff33c8254f3bc28f6f3bc6fd31a3df6c8fbbc42d5073d2008263d7760133da4bc203d23e604bd442d063d52dc05bd843506bd85b4293d3f8a0abdeb66153dde5e383d7380253d2b02333d50f816bd2235183d21ee17bdcd4518bd14123c3d429b1cbdff8a273d7398363d2021223dc6c9303d796612bda1bc133d6d7213bd54d413bd2d993a3d218b18bda958243d09013b3dd2d3273d1791353da2e918bd6a3f1a3dc8e519bd9c401abdb7bb3e3da1b01ebd53eb293d8ae8233d12260f3d04081e3d1046febc9882003dbf3300bd3c9600bdc0ef273d746305bd9368113dfcd5ff3c85afdf3cf69ef63c3c38c7bce34cc93c28d3c8bc0367c9bcb217033d19abd0bce727e33c9a681b3d0367093d8446163d090cf7bc0ca4f93cf1dcf8bce680f9bc01f11e3d42dc00bd375a0b3db4b31a3d45be093da1e0153d2483f9bc9cb4fb3c3e3dfbbc0cdffbbcea071e3dd8d301bda1940b3d1b40193d21a4083ddf7e143da928f8bc095cfa3c1accf9bc1b5efabc1a871c3dd9e700bdb06c0a3d3be52a3d28f0163d5d37253dc7ae07bd86e9083d92b308bd3e1109bd43ca2e3d1eaa0dbd7f1a193d9441f83c8feed93cf194ef3cf651c3bca3f7c43cbcd5c4bc1866c5bcb345fe3cc93dccbc5737dd3c03222f3d57111d3d75012a3d95140fbd8a48103dad0210bd645910bd92a7323dec8814bd31081f3d7e93033d79a2e53c2c8efd3c9fc3ccbc8195ce3cfb6dcebc470fcfbc0ae7063d8c93d6bc703be93c6cb31b3de92c0a3d44b5163d4459f9bcb8bcfb3cef1ffbbc44c0fbbce3241f3d10e401bde9120c3d9a8d1b3da7cd073d18ec153dbb19f1bc7ae3f33c751bf3bceed1f3bc0a691f3d4ae3fcbcd8f0093d26c9fc3c87fbdc3c34b2f33c8bafc4bc36ebc63cef43c6bc93d0c6bcd187013d69fccdbc3765e03cfe6fd73cecb4b73c8e52ce3c8b00a0bc8ee8a13cc18da1bce01da2bc7bbbdd3c2f29a9bc6d16bb3c9474d23c7a19b23c352dc93cd94d99bc96de9b3ca2e29abcbc6c9bbcbae9d83cd5a0a2bcb890b53ccf3d2d3ddf401b3d4f1f283d8d420dbd18900e3d122e0ebd42810ebd14c0303d85a812bd0e341d3d90d5243dfd27133d97cd1f3ddbd105bd3dc0063d26b806bdc30c07bdbb4c283d131a0bbdd712153d26cd083da635ee3c56c6033deca2d2bcc728d53cdf75d4bce41fd5bc47420c3da457ddbcab0ff23ce8bd123d499e023d2c210e3d952dedbc11f3ee3c91c9eebc5765efbc99ed153d97a7f6bc8659043db4ae103df10efd3cb8880b3d9db8e0bc0756e33ce598e2bc6248e3bcbd35143d0dbaebbc5b81003d3442343d8a16213d45d12e3d774a12bd3a7d133d514813bd62a413bd3604383d9b1a18bde62d233d729c1e3db54a0c3deb66193dcf8afcbcc8d8fe3c3d69febca416ffbc9e31223d29c203bdba490e3ddc12153da45c023df5bf0f3d54ece7bc8b56ea3ce2d6e9bc4289eabcdac5183d2a2df3bc7965043d6bad353d9792233d8b8d303d0f8315bdd7ac163d227416bddacb16bd932f393d4a071bbd978d253d68b39e3ce5677b3cc343953c54444abcbed44d3c25864dbc91ac4ebc3743a53c80765dbc6840813c3c0a113dc07afa3cf5700b3dd46bdbbc7d5ade3cf677ddbc4231debc5ee0143d136ee7bcc0cbfe3ce92b3b3df0b9273df4a4353d74ae18bd07051a3d52ad19bda4081abd62fb3e3dd2851ebda1d5293d68cb003d4cd0e33c9afaf83c25cfcdbc63e0cf3c0334cfbcfeaccfbcbaca033dd310d6bcd5f4e63cb4fa0a3df303f23ca6d5053d7a9ad6bc1631d93c4a61d8bc2300d9bc8f890e3d3e11e1bceae0f53c38e31b3d37f50a3d7f11173d10b1fbbc1324fe3ca066fdbc1300febcef331f3d3ddd02bd93c90c3db4da263d1646153d6fd6213d19d907bd89ff083da6bc08bd310f09bd954b2a3dcc110dbd7b2d173db0c7233d7bd4113d24a51e3d045504bd5363053d0e3a05bdcb8c05bd0e53273d269909bd4dc3133df768143de7f6003dbcdf0e3dc09fe3bc0299e63c659be5bc7f52e6bc7c3d183d3d45efbcd10f033df71c2e3d7add1b3d03eb283dcbf10dbd30030f3d70e00ebdaa380fbdb1b0313d206b13bd6cd71d3dadf91a3d4f54093d7ef8153d433df7bcddc8f93ce80af9bc40aef9bc526e1e3d57e900bd9c3e0b3d857b0a3d3584f03cc047053d0a91d4bc9036d73c0064d6bc0f0bd7bc98130e3d934cdfbc4271f43c11e3173d3a46053dff99123d85dbedbc725bf03ceac0efbccb6ef0bc92841b3d2cfef8bc2f49073d9bab333da1f5213df9a42e3d476f14bd1e85153d455615bdf0a915bd0024373dd5ba19bd87e2233da7cc263dc4b3143dfbac213d06b106bd5deb073d18a007bdd4f707bd445b2a3d332b0cbdd7ab163df189083d68e5eb3cbb39033d94a7cfbc4e1cd23c1083d1bcac2dd2bcd1300c3d8997dabc63e7ef3cf813253da8c8143def69203da66d08bde473093df33d09bdd68709bd1c50283d53370dbdbb88163daad81c3dd3ef0a3d6fbe173d3b56fabcdcccfc3c7827fcbc41ccfcbc945c203d008102bd11e00c3d9947133d7255013d262a0e3d9d3fe7bcdba1e93c400fe9bc13b7e9bcc0d0163dd3e5f1bcac47033d6ea81a3d22760a3d2307163d8d8ffcbca957fe3c982dfebcf9c2febc3bdd1d3d040a03bd46350c3d7c01273df01e163d0037223d163f09bd46480a3d5a1a0abdc0670abd12542a3d19460ebd13f4173dbbd82e3d1a931b3dfe60293dcad40cbde6f50d3db5d20dbde92f0ebd4898323d79a612bde9ab1d3d7dda213d4f25103d8cd51c3dc74e02bd5d91033d573903bd2d8d03bdac4e253d13ae07bd6313123dd7c92a3c0189cd3b846a173c85414bbb51455d3b89eb58bbe5cc5dbbfc1f383ce5d18dbbe244dc3b66ec153dbeaa033d2bbc103d6a1bebbce88ced3c4dfcecbce6a8edbcb67a193d1a21f6bc05a7053d0a23043dce35e73c20d7fe3c01c0cdbcf62fd03c9a64cfbc13f5cfbc4d63073d6d6cd7bc29c2ea3cf75df93c4019d53c4c06ef3c9d6bb9bcbdd1bb3ce23fbbbc38e8bbbcae3f003d112dc4bca800d93c12bf1e3dc3c80d3d82e6193daced00bd64fa013d65c901bd861b02bda916223d88f905bdfe9c0f3d2fae1c3dd9bc0a3d088e173d3f36fabc4464fc3ca308fcbcc4b5fcbc3732203dd47502bd23ae0c3da4c1263d5b14153dcdba213d239907bdb9ab083d3c8008bde3d508bdfd3b2a3de8e50cbd0a02173de1a6193dad16073d6a5c143d92c2f1bcc51ef43c42a6f3bc7954f4bc1b4a1d3d08dcfcbc2619093dacfc293dba9d183dc00d253d4b3b0bbd05520c3d96200cbd00740cbde05f2d3df37b10bd1c831a3d7da0093d6986ef3c8f84043d5696d4bc3fabd63ca861d6bc630dd7bc97230d3d3c26dfbc7060f33ca5a8073d9c5aeb3c1585023d0842d0bce94dd23c0113d2bca5bfd2bca3340b3ddaf0dabcb944ef3cf5fec23c8a56a03c7820b93cff9385bcd718883ce05587bc7af787bc39c8c93c1de98fbc7f16a43c815dd53cc2f8b43ceb20cc3cac509cbcec889e3c11ee9dbc44829ebc08c4db3ca5d3a5bccb78b83c47bc353de62f213deceb2f3def2711bd6297123d4e3812bd299b12bde6bb393da66417bdaa6d233da210323dc2f41f3d35ec2c3df62112bd513f133d250e13bd446513bde99a353d628c17bdb1eb213d0c30183df608063d3307133d8fecefbc2d8ff23c88c3f1bc4b69f2bc28ba1b3df7b9fabc03ff073db671ff3c827cdd3cd2c9f53c1d41c3bc47a7c53c64fac4bcfa97c5bc3d0c033de161cdbcf726e13cee70193d0714043d8161133dcdbde6bc69fae93c81efe8bc45b6e9bc30991d3dc39ef3bc8b66063d5a6c293ddd03173d4f2d243de7d008bd73240a3d3bbd09bdbb100abd35112d3d983e0ebd21fa183db381373d0755243d8817323d295a15bd21aa163da55916bd48b516bd963a3b3db5321bbd006b263dfb36173df97a063dc570123d6c90f3bc67b5f53c573bf5bc45d6f5bc3d861a3de660fdbcb748083d791c113d4d42ff3c611d0c3d19fce4bc9615e73c3ebae6bc7f5ee7bca78e143d633eefbc8584013d33ef0d3d38ecf83c99f2083d6154debcf5b1e03c3e0de0bc47a4e0bc3e5f113db978e8bc2baffc3cf79c0d3dee5bfa3cceea083db6afe1bcad94e33cd350e3bc83e7e3bca0df103d0a49ebbc40e5fd3c4301183d7d65073d9344133dc750f5bcd5a3f73c65fbf6bcfb92f7bc5e481b3dd81cffbc7230093db2a22b3d8687183d2e36263dceab09bddd110b3d47a50abd35ff0abd635d2f3d8b640fbd62991a3d69a7ec3c54b1c73cff1fe23cac62abbca6bdad3c0c46adbc5cf7adbc84e6f33cea7bb6bc58b3cb3cc1020e3d9bf3f83c0006093d8113debc7c61e03c42dcdfbcb681e0bca96e113dfb8fe8bcc6befc3c97800c3de178f53c4f6f073dc50cdabc6c98dc3c5cdadbbc637edcbc9efb0f3d68a4e4bca659f93c073c053d5e87e83cd565003dcc27cebc51d7d03ca7decfbc6d7ad0bc0991083d4b3cd8bc7232ec3c9e44233dfcb2123df08e1e3d8dd205bd18f5063de5ac06bdb6fb06bd2887263d91d20abdfa7d143d335f213d20d60f3d13611c3daf6002bdfd8a033de44403bd669603bdd7cf243d559c07bd8bbb113d0fdc223d58a9103db2b01d3de8b502bd55c7033d9ca603bdd9fe03bd1c66263d1f3a08bd12a4123dad50da3c1e82b93c32eed03ca590a0bc55c8a23ce831a2bcbdc5a2bcc0cbe03c802aaabca80abd3c24872c3d420f1b3dcb89273decb60dbdbfd30e3d9b990ebdb4ec0ebdc8f72f3dd5ea12bdd1f11c3dee4a1f3d53100c3dded1193d18b9fabc5e21fd3c11affcbc1367fdbca50e233d191f03bdad250e3d31b9063dc87ee93c819d013d53c7cdbce712d03c6aa0cfbc5e4ad0bc843a0a3de19fd8bcf262ed3c3a88163d79ee053decc6113d8ac6f2bc2012f53c2c67f4bc30f8f4bc85cf193d745efcbc20b5073d09551e3df1760b3d76f5183d84d7f9bc195cfc3c7fc4fbbc4d74fcbc100f223d169302bd29830d3d49fb353c9b09f23b287d243ca9e198bba4cb9e3b0ecc9ebb7deea0bb2c11423c22cfbbbbbe30ff3bcdbb1a3d9408083db66c153da003f3bcfe8cf53cc4f1f4bc8ea2f5bce9611e3dec54febc98120a3da8680d3d5b28f73ca953083d655fdcbca651de3c502ddebc0dd9debc13ec103d55fce6bc1001fb3ca6551f3d43230f3de9b91a3d5b9802bd1ac0033db26a03bd17b403bdd182223de96a07bd45e8103d8cf3293de6fa183d8121253d16020cbdeffe0c3d8de10cbddd330dbd30452d3d0d2111bda9d31a3d321d1a3dd1cf083d642f153d0203f7bcad49f93c21c9f8bc2770f9bc25801d3d1ab600bd40b10a3d7f81ff3cdeb8de3c1a1ef63cbf27c6bc3c2ac83c44c6c7bcc85dc8bcfffe023d0eb4cfbcf83ae23c98201f3d74960c3d41db193dba9afcbca208ff3c7180febc892effbc0fc3223d35df03bd959a0e3d9faff63c5c20d23cdd41ec3c0d3eb6bca794b83c3e16b8bcc3bfb8bcccdefd3c6916c1bc9414d63ce0e3203df7880e3dfca91b3d562e00bd87b3013d8b1801bd5f6501bd4b83243d178f05bdfc82103dbfd3003d691ddf3c43c3f73c21edc4bc8b24c73c27a5c6bc1444c7bc723e043d5c0ccfbcbbd3e23cd1901e3de11a0d3d8d9e193d3224ffbc8cc5003d897700bdf7ca00bd69fb213d24d404bd7e000f3d4fb2043dc691e53c4019ff3ced87cbbc7e1dcd3cf548cdbc8cf2cdbcf642083dd9e2d5bc6d65e93c97d2383debcb243d4a1e333d5a7915bd31b0163d058016bde1e016bdb1b93c3d6d7f1bbd30f9263dfd520c3d95a1f33c860c073d4a8bd7bc8601da3c7263d9bca10cdabc61f70f3dbf66e2bce29ef73cf6a9ef3c1cb5c73c624de43cf9efa8bc72bdab3ce5f5aabc23aeabbc0076f73cd2d3b4bc1b0ccc3c995d0a3db25ff23cda72053d3b48d8bce088da3c71fed9bcc89bdabc07c30d3ddb5ce2bcd013f63c6eaf3b3d8347283d0e2e363d224c19bd7b8b1a3d074c1abdf5a91abdfe763f3d0c291fbd47642a3d8e32393dcd02263d75c3333d9e0a17bd7b70183d7a0718bdb96218bd04f03c3de8d41cbd6d18283da38a223dee70103d1e611d3dbb8902bd67dc033d477103bd60c303bd261b263d52dc07bd2e64123dfd53253d96b6123df00c203dfe1c04bd0571053dda1305bdec6c05bd9ff8283d70c409bd63bd143d21ac2c3d9c071b3daea7273da5730dbd58b50e3d99560ebd9ca60ebd0b23303d31a912bd4eef1c3d3ef22d3d64211b3dbf98283da0850cbdead60d3dc87d0dbd2bd70dbd509a313d9c3412bdcd2c1d3dc3d92c3d2264193d9a52273d86570abd0a990b3d51580bbda0b40bbdfea0303dd63910bd57811b3d3afd163d5b34063d3234123d3848f3bcf601f53ce2faf4bca39cf5bcde4a1a3da543fdbc7405083d8fe10b3d274bf53c19f6063daa33dbbc504cdd3c7feedcbc3b8dddbc13470f3d2161e5bc0804f93c3d0f1d3d993c0c3dc546183d4206ffbc9073003d1a5e00bd5aae00bd1459203d338904bd1d100e3dfb88e33c56f8c43cebc1da3cc03aaebc8b0cb03c7eb8afbccd44b0bccf9ae93c9b08b7bcd13bc83c63c3073de92cec3c39b7023d7405d1bcc99cd33c33c8d2bcb765d3bcae460b3db862dbbc7cfcef3ce45e243da9b9123dc45a1f3db62405bd2a57063d0c0a06bd1e5b06bd90d6273d6c670abd4fa1143d3f0ddf3b26cf3b3bb8d3b93b8d5dff371c975039473f25b9227268b932f9f83b9af48dba1a48573b7148303d29761f3d8b802b3dbe8c12bd2cab133d086713bd9db313bde392333d318e17bd4447213d82c11a3de782083d8695153d6a69f4bcf80ff73c8d4ff6bccefff6bcae4a1e3d588affbca4800a3d6ea6283d803e163d626d233d97fa07bd8c36093d7bed08bd5f4409bd8f3b2c3dcf8a0dbd4e3e183d64431e3d80cc0c3da94a193d70d7febc568f003dd14e00bdfba100bd4dae213d04a204bd31af0e3dedce2c3dd5551b3dccd9273d9cea0dbd1cf90e3d05d20ebd38270fbd5e34303d2b3713bda63b1d3d06581a3d90b8093de09e153de92ffabc6d48fc3c62dbfbbcfd74fcbcb49e1d3d890002bd06830b3dddbbd03c45d3ad3ca0d1c63c30a192bc1359953c5f6594bc500595bcfba5d73c09039dbcdf96b13c8ac6fa3c966cda3cb386f13c061fc2bc2f06c43c6ebbc3bc1852c4bc7b96003d229ccbbcb9e7dd3c8f9d033d2b07e33ca5e3fc3ce430c7bcfcb1c93c5904c9bcb7a7c9bcb729073de0ebd1bc3ef2e63ca100183d63e8043deb95123d701fecbc1bb7ee3cdd19eebcd1d2eebc86b61b3ddeb9f7bcf4fa063da0680e3d8753fc3c82c0093ddff4e3bc7af9e53cea8ee5bc9123e6bce9a8113d3266edbcc9cbff3c1874123d8f9d013dfda80d3dbf34e9bc5caceb3c83e8eabcf983ebbc32c0153d8735f3bc7f6f033d5d18f13cdbf6ca3c0434e63c3decadbc502db03c2eddafbc5b93b0bc77a6f83c3f55b9bc181acf3c8a5f9cbfe0d589bff11497bfe452773fc3b479bf2337793f5ce4793ff4fd9fbf0738813fc8d98bbf124c92bff00e81bf1b608dbf86c4673fbbf269bfb087693fed286a3f2baa95bf1b20723fb6ee82bfb4008dbf7ccd78bf024488bf3d625f3f538661bfa514613ff2af613fc93f90bfd05b693f97687cbf3e278abfc29a73bf8d7c85bf918d5a3f2cad5cbf203a5c3f37d35c3f2b598dbf2766643f5e2877bf0e7789bf67f172bfc6e484bfdca95a3fec855cbfb04e5c3f97e45c3f7c958cbf3055643f506d76bfce98bd3b6312a33b68f4b53b78a18fbb0af7903b1fef90bb496b91bbc6dec23bde5197bb06eaa53bfc6cc53b0f0ca63b595ebc3bfef28ebb1fd2903b3b7690bb510391bb70aecb3b56e597bb8465a93ba24ded3a5a2ccf3a25a9e43ada21b9bab952ba3ae5a6baba7f3bbbbae73ff33ae612c2ba696cd23a62482c3c07a61a3c294f273c6e200dbcc8ed0d3cd0110ebc116d0ebc1faa2f3c8fa312bcc6971c3cfd827a3b1d3b573b5b56703b65413dbbe8313f3b22fc3ebbbca13fbbf7c3803bf47647bb9e025b3b9cd6833bf28a673b37857e3be7814fbb1af9503b862851bbd6c751bbfff8863bf83359bbc4096b3bedfa8d3c07e0773c30e4883ce7a75bbc95eb5d3cc4905dbc9e425ebc1371913c94d566bc4cd97b3c4a18923b9c7d7f3b53d58c3bb65264bb5118663baf2a66bbacdb66bbd3b6953bf82b6fbb93b9813b3afa0c3cb906013c2f9e093cd979efbb219bf03b37c6f0bb1642f1bbf6410f3cae0bf7bbe959023c33e9783b08c25c3bd0e4703b078047bb3cef483babf348bb017f49bb71667e3b970450bb76d25f3b27ea0e3be8dff03a1369083b834bd0baec94d23ae474d2ba1941d3ba266a133bf314ddba75a9f53a25c5bb3bcaf1a33b23fab43bc22b92bbed42933be76393bb12da93bbe16cc03bb95599bbe787a63b1e1f053cf157f03b4f70013cc7c2dcbba302de3bf11adebb149bdebb0ba4073c5aa3e4bb5d2bf33b2fe2d53bb145be3b582ccf3bf085acbb4a90ad3be5bfadbb0836aebb4d78da3bfab6b3bb8ed9c03b1b8f973b8338813b7a1e913b2c1862bb1bff633be34a64bbe82065bbf2009c3b3a116fbbdf9c833b70cbb53bade79c3b94abae3bfb3d8abb87a08b3b907e8bbbd8f48bbb88b0ba3b729c91bb68989f3bf3089d3b204d8d3b8097983b555681bb38fb813bfd2c82bb347e82bb240fa03b703d86bb89088f3b957c493c68d6363ce642443c5d6928bc4327293c007029bce7d429bc3d074d3c8c632ebcd6e8383ccd89903b3069823b8f898c3bdb6c6fbb9b99703babeb70bbd27c71bb5244933b352c78bb25f6833bc57ae43b7245ca3b2301dd3bda9cb6bbcfddb73bf4f5b7bb5378b8bb539ae93b5386bebbb91ecd3b4781843b766e6a3b5226803b3a6453bb70e5543b40f854bb5e9055bb997c873bf6a65cbbcec26d3b0430213c49bb113c6cbc1c3c596e06bc731f073cf33207bcd37c07bc9542243c0df30abcdc63133cb48d4b3bba382d3bb4c1423b7a5c17bb2ddd183b65d018bb7b5a19bbcfa6513b60f41fbb5c71303b8f75603bd3f4443b7fa3583b267030bbcd89313be6dd31bb216a32bb01cf653be9d138bb67f4473b02703a3cf714223c5d89333c85fe0ebc65c8103cda4210bc95ba10bc4e243f3ce76d16bcd9bf243c61a4ff3bbc1fdf3bae59f63b9306c7bbfc74c83bbeadc8bbef4dc9bb4102033c86bed0bb75a5e23b119cf23b5352d83b3127eb3b6d46c4bb63a2c53b19a6c5bbbf29c6bb25b3f73b2153ccbba832db3b8929173b0d09083b43dd123b2b45f9ba8e91fa3a78dafaba6f72fbbaaf191a3b304501bb77b0093b5583873c3a96773cb93f843cf4fc64bce427663c9d4c66bcadcb66bcf2b6893c4b9d6cbc0e367a3c1ade2a3c7f99163cf921253c5afa06bc3f23083c810a08bc137008bc4fc62e3c57350dbcb5d2183c68bdc43b54c1ad3b8a36be3bd2819cbba2799d3b40b49dbb95289ebbae32c93b0f87a3bb2b44b03b7e6ece3be7b1b43b6d13c73bf9a9a1bba2c0a23b6ef9a2bb0b79a3bb317bd33b925da9bb587bb73b1791373bc7931d3b8916303bc9990abb8fcc0b3bf9e10bbbf65c0cbb5fb93c3b492912bb725d203b4d8e003c4857df3bfa78f73b554ac6bba0c2c73bf902c8bb3aaac8bb29dc033cf968d0bbca00e33bfcc0e73a8022d13a6f5ae13a93f4bfbaede8c03ad127c1ba719bc1ba2a1eec3a02f9c6baa29ed33a92da8a3bbad7613b555c833b03a93bbbcddb3e3bb8253ebbaf0b3fbb4c08903b0d624abb355f673b78a2fa3b6780de3b05abf23bccfac8bb9371ca3b0f75cabb6503cbbbdc08003c5ba2d1bb7495e13ba6934e3b66d9353bdd90473be1de22bbb344243b742a24bbfda724bb735c533b84762abb438e383b1d40563ccf1e433c67d7503cea8834bcee5d353c148e35bc57f135bc58f0593c457e3abcfe38453c76bf273cf4b3133c2d10223c130b04bc8472053c791705bcab7b05bc3aa02b3c77300abcffe5153c8ce5b13b1be9993b1306ab3b8c0b88bb1351893b8d3f89bbc9b289bbe09eb63b21218fbbca7f9c3b6200623b239e493bc60e5b3b406c37bba26e383be2ae38bb982839bb9dc1663b8ad03ebbc3454c3b45b5003b0c74db3a5867f63a6953c0ba8af2c13afb26c2bab6d6c2bab686043bd11fcbba667cdf3a59d4123b1a73f13a57280b3b84ceccbada9ccf3ae22ccfbab70bd0bae231183baee6daba5ae5f63a5744713ba391563ba39f693b3ad042bbedf9433b662b44bb00ae44bb2185763ba2c74abbfa74593b76fba03bbb88913b81a19c3b68a785bb1d58863b3c7c86bb95cc86bb69f0a33b76828abb8d3d933bff3d2d3c82381e3c46fd283c64ec12bcce76133cc2b713bc1c0514bc4224303ca69117bccdde1f3c355c7c3b924f563b3141713b2d5c3bbb01363d3bdd213dbbcbc83dbb3a0b823becdf45bb53515a3b15cfc83b2e16b13b2801c23ba6489fbb4caca03bcd78a0bba6e8a0bb3a7dcd3b7a46a6bb68a5b33b59633e3ab004123aa91f313a1f0ae8b9010eed396bccebb9592cedb9e4c4473a3395feb9c38a163a62318a3bd350723bc758853bac0a59bb354d5a3bbace5abb9a7c5bbbf0818d3b166863bb2f07763be6a9093c81f1f03be1c2043cf7cad6bba5d2d83bd58cd8bbaa31d9bb30060d3ce21ce1bbc2adf43b087f193c3f56073c074e143c3c50f3bbb15ef53ba726f5bb9ad4f5bb0d0f1d3c211efebbba4d093ce99d623be428453b510d5a3b21eb2fbbb870313b8e5331bb61d931bb2590683b413e38bb6248483bbfbe003c4222e73bbceaf93b309dd3bb56edd43b58efd4bb736dd5bb995a033c5361dbbbfef8e93be0cc0a3b1beee53ac3db033b3d73c3ba2656c63a1aaec5ba4e7dc6bab19f0f3b3faed0bac7f6ea3a4233913badb57e3bbe128c3b0a5d64bb2e16663b2a2666bb2dd166bbaabb943bdbde6ebba546813bfc1c7c3b0af7613bd1b1743bd4074ebb8e5b4f3bf8654fbb5ce84fbb0897803b610a56bb8dd3643b7411f53b98e8d83b1302ed3b8eeac3bb5e57c53b6056c5bbc5ddc5bbfa9afa3bbf45ccbbf4f3db3bd4636f3b0fd7543bb5d8673bcb9c40bb7809423bd2fd41bb748142bbd38b743b86b248bb33bd573b34718c3b694d783b38d9873bd69d5fbb40db603b445b61bbcd0662bb3c918f3bb8cb69bbbde37b3b90d5233b9af80e3bd5e11d3b5beefeba315e003b478800bb81ef00bbd9e9273b15bc05bb7d3c113b1bf24f3c8f2d353c2a51483c632b21bcff65223c028b22bc6f1023bcb22b553ca63c29bc7816383c97bd173ce9b0073c102a133cf761f7bba7cef83ba809f9bb96a9f9bb94e01a3c7e8b00bca16f093c78425e3c1831493c805e583c59c438bccaaf393c96ed39bcb55e3abc293e623c91883fbcd0894b3ce18fc93b2244b23b12f0c23b57b0a0bb86d0a13b43e5a1bbcf59a2bb3917ce3bd8c4a7bba4ceb43b6c46303c4dc2163c5bf3283c968403bc492d053c19c904bc0e3f05bc4b51353cbffc0abcab81193cd3f9183ca3820a3c14ea143c789efebbb9eaff3b9d1800bcb06400bc10bb1b3c14e603bc121d0c3c29aba53b8bd7923b8958a03b0cb584bbb15e853b75b485bbcb1686bb544ba93b9c8c8abbd2e8943be657e03b2c4ac93b37d4d93b9ca7b7bbc8bdb83b82e0b8bbfd55b9bbbbc7e43b01cebebb0dd3cb3b1814773b8d8f583b80596e3b0fbb41bb5351433bac4643bb81da43bb64127d3b24d14abb18dd5b3bd4cd833be837683bc5967e3bf23b51bb8093523b53ce52bb596653bb57e9863b917b5abb9b986b3b3409f83b1e86dc3bb73bf03bc9b9c7bbf5f5c83b4e2bc9bb1db8c9bb0e5efd3bf52dd0bb2688df3bc646db39d1f39a393f03c8393d845fb955136739cfd464b988ba66b92be8e83949817fb98e7ba139063d293cf934173cb724243c179909bc0d440a3c958e0abc95ec0abc4db52c3c51350fbc8030193c16c77f3bfb6c603b80d3763b28ec48bb8a834a3bab854abbc71f4bbb97f5823b225252bb21d3633b903b453c1ce82d3c349e3e3c56041cbc76551d3cec3b1dbc2ab01dbcaabf493cc92623bc4b75303cb8ab9a3b39e0873ba84b953b3ac073bba693753b02a875bbe85e76bba15c9e3bc3f37ebbf9e8893b0141133c46bb033cfddd0e3c13b3efbb5f31f13b6158f1bbfff6f1bbf53d163c0754f9bb0370053c8d22083c997af33b9a0f043c3d76ddbb85e6de3b0efadebb8b8adfbb90e90a3cb854e6bb4aa3f63b512aa33b5e19903b01c19d3be09181bb9b95823b139082bb95ef82bb10dda63b226487bb5c2e923b4703833bd183653b5db17c3b1d424dbb0e124f3bdde14ebbc77a4fbbec39863b04d156bb3e04693b5606713beca1513b720c683b2af939bb5fcd3b3b188f3bbb38263cbbad2f773b294d43bb6407553be6932439e2b1eb38666f16393e2eaeb83fbab338d9ecb1b84146b3b8f9a92e39c5c6c4b8b30ef538ca3eb33beacca13b3955ae3bb25d94bbf026953bab4e95bb1caa95bbae95b63b78dc99bb2abaa33b3dfe0f3c793f033c2f6a0c3c7e71f2bb6edbf33b2ed6f3bbe15cf4bb1e6c123cb98ffabb21a9043c0d16913bebd9823b60108d3bcbff6fbb6e3a713bd28371bbed1572bb4fd3933b13dc78bbe36a843bb868d83b1a92c23bdb45d23bf096b1bb3eb3b23befc4b2bba336b3bb9492dc3ba37ab8bbbafcc43bb69c0c3ad775e53925e2043a5adec2b9ee5ec539880ec5b90bdac5b9f412123ae4fdcfb96abdea394c5a033b72b6d93a178af93a94b3b9ba0236bc3aa3c7bbbacf8bbcba0bf1073b300cc6baa76fde3a815ad03b4741b63b2fe9c83b0bcea2bbb1fba33bac23a4bbb7a4a4bb1c74d53b70a5aabba716b93b1b7bcd3a76ccb33a931ec63a88aea0baf902a23aaff9a1ba1076a2ba828bd23a8d4aa8babe91b63a2c9aa23b99ff923baa399e3bc5e986bbab99873b51c387bb841588bb4791a53b1ddf8bbbe8ba943b8a43c13af9c7933a63b0b33a997f69bad2286f3a89546dbab8b16eba81d9ca3a323f80ba576c983a761e043ac321d2394848f8395d44acb9746faf399babaeb96388afb99cb5093aab9abab996bfd739bbd6cc3b8551ae3b9d09c43b6cd397bb358c993b8a5099bb34dc99bb8eead23b8a9da0bb7b95b13b183e7e3cb88d683c1730783c11b057bc7b7f583ce5e458bcd35a59bc6d2a813cc0b55ebc16fa6a3c90f4343cd465253c1d9a303cef5219bc6df2193c582e1abc75811abc6ee6373c15511ebc6d21273c18a7073c5a9ff13b3270033c2917dbbbd190dc3b21a3dcbb8237ddbb55880a3ce127e4bbb7ddf43b3977b53a94b1963a8f99ac3a85b17fba08c1813a485881ba54e481ba0495bb3a15b388baf0fc993a2785f83a2697d73a8a09ef3a0e63bfba190ec13ad102c1ba0f9dc1ba4910ff3af0f5c8ba311fdb3a16833a3be50a1b3bae6b313b420c04bb57c1053b819205bbd82306bb63cb403be60f0dbbbb661e3b9f62be3b20b3a53bf24fb73b328493bbc197943bd2c294bb793b95bb8a3fc33bc8d69abb695da83b80c03a3b2bd0283bd4b1353b8e1c1bbbd7d71b3bba121cbb486f1cbb6e303e3bf4ba20bb1ecb2a3b2c77853b2a6b5f3b6f5a7e3b0c533fbb6803423b506b41bb7e2c42bbc3d0893ba7b54bbbc80e643b83af1c3b5a8a0b3b82d1173b9aedfcba70c0fe3afdb5febacb5effba8f02203b74b003bb1e6a0d3b1d6c673ba1ee483b30a05e3bfca532bb3424343b7e2634bb57b734bb94806d3bb3803bbba2314c3be417a93b7bcd993b8dcea43b21f28dbbfd978e3b81c88ebb86198fbbdefeab3bd5d392bbac809b3b0335213b74f9093b37891a3ba4d1f1ba38f6f33a6a24f4bae707f5ba6bcc253ba482ffbac0790c3badbfc83b2e56b13b9513c23b8fad9fbb2fe5a03babe0a0bbd752a1bb5d51cd3bf2b8a6bbf4e1b33bec48453b9dd4303be0803f3b261421bbb826223bd82922bbef9122bb2139493b9c6c27bb2d14333b7479243ccb70113c58111f3c331a03bc19f7033c941704bc577704bc312b283c37e808bced84133c772ac13b07a6a83bc724ba3b8f0396bbf69f973b6e3f97bb8db297bbb4fec53bfb449dbbf64bab3b6d1abe3b569ba73bdbaeb73b12ca96bb14da973b63f097bb905e98bb3081c23bcf8a9dbb9b0caa3beda9393b7958203b5c5a323b43d60dbbcb210f3b00130fbb7c880fbb95b73e3b3b2315bb390d233b36bb3b3b2729253b4c4e353b244814bbc644153b7c7215bb90e315bb0921403b4c1f1bbb459e273b2cf6c63a8514b03aab6ec03aa2019fbaf806a03a702ea0badc9fa0ba0c70cb3a77e7a5baf290b23a73234f3b21d8323b480c473bddbb1dbb59301f3bc9291fbb78b31fbb4db1543b392526bb73e7353b00123e3cde05293c2e1b383c89ab18bcf8011a3c67c619bc4a2f1abced23423ce8231fbc5d542b3c74ff3f3b8049233b76a9373beda10ebb6c13103b320010bb158210bb47c7453b34bb16bb2555263b8d8d323bc6c4183ba61a2b3b16d905bb4c4a073b0e1a07bb059107bb44b4373b7c400dbb2d851b3bed42023a14b0e1394d82fa3965eec7b954c3c939b6a9c9b92b50cab9bcb6053a1722d2b9de6de539a26e113ca0a6013cbff70c3cc150ebbbebb8ec3be0fbecbbe99dedbb8f79143c0a15f5bb1462033c6670bb3b9381ab3b0bfeb63b951a9fbba6b49f3b27fd9fbb9252a0bb1a71be3ba441a4bb6449ad3bf946fe3b1699e23b9376f63b934ccdbbbcb7ce3bd7c4cebba952cfbb0fcc013c78e6d5bbb0a4e53bc380093b161aed3ab706043bff50d1ba5168d33aab28d3ba76d6d3ba814a0d3bd031dcba8125f13aae50c53a03deab3a93f9bd3a370199ba247a9a3a94419aba25b79aba9e62ca3abd61a0ba0f98ae3a0307fe3adcccda3a2ec4f33a6cadc1ba4150c33a305bc3ba70fcc3ba1c95023b1c9bcbba3b86de3ac0bcd73a3508be3a2a54d03a3007abba286bac3a5e4dacba9dc6acba40dbdc3a5587b2ba6dc9c03a6f33bd3b2729ac3bcb69b83bb5e69ebb77bc9f3bd1d39fbb542da0bbd572c03bb34da4bb8e0cae3bd9cdb03b207a9f3be8ecab3b293092bb02ee923baa1e93bbbe7893bb571eb43b56a197bb0b64a13b8befc53bfeb6ac3bbbbbbe3b61b799bbbf1a9b3bceff9abb8b7a9bbb2ee0ca3bed41a1bbf872af3b761ea23b4dd98f3b99f29c3b41f981bb3fce823be9ef82bbaa4d83bb68a5a53baf9d87bbfed9913bac52693a1e54413ab89d5d3a115425bac513273a0e3027ba5de327babf7d713ae65c30ba9c86453a168c483b9c9a303b00bf413b20c21ebb1fa41f3bd40220bbe47e20bb9f324d3bed1926bbbc37333bde3a273b96ec143b56f4213b816807bbf05b083b3d5108bb3ba708bbd0df2a3be5c10cbb40e3163b160a253c02de123caadf1f3cd33405bca304063ced2506bc388006bc3892283cd1ba0abca0d9143c07aea33ab7c5883a54df9b3a636d6abaa96b6d3ad4016dbaaaf66dba3318a93a85b079ba44a18b3a681b913b42a8793b2d478b3b2dbd5bbb5ad25d3b11c05dbbc6805ebb611f953b989867bb0d047e3b9c7d393c6e71213c28ab323ccaf90ebcb562103ce53910bc92b010bc01263e3c634f16bc9112243c50e8803be3b65e3b58c0773be6a744bbb87a463bc46846bb8a0f47bb615e843b81fb4ebb577f623b9bf1213c0dab0f3c24c81c3cc7c401bcd28f023c2cbd02bc091c03bc4075253cba7207bc5bad113cedf2983b2d06853b5a36933bb37e6cbbd1bb6e3b82776ebb85336fbbaae79c3bce1d78bbde29873b2de9633b7ee24a3bfdc75c3b8adc37bbec41393b432639bbe99f39bb28cb683bd16a3fbbd29b4d3b0795843b220e643b6e707e3b06f248bb14ee4a3b78be4abb95684bbb5d4a883be69153bb9a03683b3c7c0d3a7650d239a38a023af91da2b97441a6394a17a5b9a02aa6b93144153ac105b4b9baa3d939b7fa803b7a2b633bd625793b4c034cbb12c14d3b10924dbb6c264ebbbd01843b3f2b55bb9d7f663be7e4a73bfd26903bcb0ba13be3f27dbb01f47f3b6a2480bb2b9680bb569fac3b46dd85bba1b1923babfb923bbd6d7e3b9a438d3b33a061bb6bb2633bb18963bbbe3d64bbb0f0963b84eb6cbb5d52813b90061a3cb497053cfb2e143cdcceecbb18c5ee3b1ddfeebb5aa4efbb52091e3c8ef2f8bba1cd073c6979e33a877ac13adbb5d93adeb8a8baf71daa3a936aaaba2710abba4b32ea3acdb5b2baa122c53a15d2753bf1335c3bcc8f6e3ba0b248bb9dec493b7d0b4abbb68c4abb55c67a3b6e9550bb7d025f3b2e5ec23b3676ac3bc826bc3b5ae39bbb6fec9c3bb2079dbbbf759dbb5e9dc63b1495a2bbefdaae3be4ed9a3b5a8c893bb8fe953b46b078bbad6d7a3bbb807abb862f7bbb174d9e3b19a981bb92728b3b2441db3b391ebf3bc320d33b4f82aabbf9feab3bcfe1abbbe263acbb42dfe03b3f9cb2bbed1fc23b65d9b83b89a0a13bdc38b23ba06890bb3f62913ba59891bb220c92bbe564bd3bc36297bb6a25a43bb83fb73bea259d3b2ec7af3ba87589bba00f8b3b92c48abbfb3f8bbbe460bc3b2b2a91bb09f89f3b5bd2193c1685093cbb36153c5228fabbbfa0fb3bc9e4fbbb128ffcbbeaf51c3ce12702bc02500b3c3b7c3a3bf50a2b3b7e34363ba3d81ebb3a6e1f3b1ab91fbbf50d20bb075c3d3bccf023bba1c82c3b17a2fa3acc77d53a48e1ef3a765cbaba3550bc3a3e2bbcba7dd8bcbaae09013b8b09c5ba6a6ed93abec7763a0ae4523a67686c3a547438bab4913a3acc323abac4d73aba06f57d3aa0c042ba1db8563ab1fc493b0c16343b39a8433bc04d24bb372d253b816025bbc3c625bb7b5d4e3bfea02abb6e6d363b05991c3c8d36083c28d2163c80e5f0bb9572f33b3805f3bbafcdf3bbe289203c915afdbbdc710a3c7390083c242bf73b23e5043c3a79e3bb5e84e43b7cdae4bbce5fe5bb22100b3cd58aebbb3806fa3befe4c83a447aaf3adca7c13a325d9cba3b9a9d3a54ac9dba272b9eba23d9cd3a690da4ba963eb23a3485503af16a293a04f7443abc580eba386e103a011410ba52b410ba24a1583aeeac18bae1772d3a6ef9f23a0776d83a5b76eb3a5f7dc4ba528ac53a61e3c5ba176cc6ba5419f83ac6adccba285edb3a4a4c913bfaa5793b216b8b3b7cb95bbbaf715d3be9c55dbbef8d5ebb7958953bb7ca67bb970e7e3bb099f53b2cded13b925ceb3b5217b7bb1b29b93b7be0b8bb9a88b9bbc3a3fc3bfa9bc1bb14b9d53bd6a4133b373df83a69c50c3b8058d6bad931d93a8d8ad8ba7d58d9ba036f183b535de3ba6f30fd3a2b637d3b4c375e3bf58b743b38ca46bb7711483bdf6c48bb600e49bbecb5813b916050bb91a0613b13fe0c3cf8d4fc3bafd8083ce0b9e6bb1037e83bdf3ce8bb68cde8bb0ed40f3c6a96efbb9301003cd48f0c3c7bddfd3bedb8083cf1d2e8bbc220ea3b2e4aeabbfad7eabb8b2b0f3c0c63f1bb3470003cf2b22a3cb474183c0183253c61730abcfc8b0b3c32650bbc15be0bbc3e3f2e3c0efd0fbc54721a3c9547903bd4e77f3beeaa8b3b838f66bb5781683b824968bb0bef68bba46b933befaa70bb10bf813bc1ee9a3b666d833b5e1b943b499f64bb4859673b8cdd66bb4faf67bb3fab9f3b40e471bbd3eb853b3ddf0e3cb54c003cecc30a3c3b1feabb058feb3b19aeebbbde43ecbbc8a9113cbc3af3bb30e8013cbee1af3b01e59d3b25ceaa3bf81190bb04f0903ba70791bb456491bbe755b33b8fae95bb4be09f3bc787cd3bab13b53b0f92c63b67b6a2bb2cd9a33baef9a3bb0474a4bbed4ad23bbf1faabb93bdb73b9e71e23ac849c43aa7cfd93aa3e8adba175daf3a286fafbab403b0bae25fe83a80e2b6ba058cc73ad173053cdf47ee3bca64013ca770d8bb44e9d93bf2efd9bbf27edabb3439083ce835e1bbe269f13bd9f5aa397bab6a39e5be9a3941c424b969892a39fa0a29b9e3922ab92682b639ce9a3eb9786b75396371e93baacacd3b2678e13b838bb9bb8decba3bede6babbc767bbbb46f2ee3b3c8cc1bbfcc0d03bc308a33bb6ca8f3b14889d3b6c9281bbbb5f823b468d82bb57ec82bb2bd0a63b405487bb17e0913b29e0323b9adf193b00c22b3bbc6d06bbf54e083bc9b407bb8a2b08bb48bf373be5ef0dbbf5971c3b1e37013c40c0e83b1d3afb3bdecfd4bb4a02d63bb635d6bb5ebdd6bb1aa8033c21f8dcbb4099eb3b183e233bbf860c3bc0b31c3b37cff7ba97faf93a840dfaba33e5faba0fc1273be78402bb62f60e3b6b71013c3858e93be2b0fb3b73b2d5bbd4ddd63b7e11d7bbd095d7bb45e3033c73b5ddbb132aec3be6888a3b78437a3b29c1863bfaf765bbeae5663bbe6867bbe1f567bb391b8d3b0f626ebb3b387d3b893bf13ba2e2c73ba865e53bdda0a8bba74eab3bdcaeaabb516babbbbc5cf93b24beb4bb1659cc3b19a3fe3b00dce43b3c55f73b8c16d1bb317ad23b6070d2bb8ff0d2bb74cf013c5bffd8bb09afe73b2664f23bbdf2d93b0685eb3b21e1c6bb3535c83b3c32c8bb58b0c8bb8f0ef73b1c92cebb7aa6dc3b52296d3b5be5533baafe653bcbd240bb3aec413b852542bb13a542bb010e723bfa9248bb26a9563b7dcda93b2f5e973b1299a43be20889bb38188a3bb8048abb1e648abbc057ad3bf8c98ebb4866993b1bb0883b3c54703b07f7833bd1e857bb9146593be39759bb763b5abbe3ed8b3b32ce61bbd8e8733b2151c53beaf2ab3b300bbe3b704199bbb66a9a3b0c879abb7a019bbbd251ca3b39bea0bb1fafae3b09bd923b87d9823b593e8e3bb2886dbbf0f96e3bd6356fbb77d76fbb58cd953b285977bbe197843b77991c3cfe7b0c3c5302183c373d00bcf21d013c481201bc5d6101bc5ebe1f3c2b1e05bc463d0e3c8bb5b03b89a99a3b886baa3b451e8abb8a378b3b1d3f8bbba8ab8bbb8205b53baabe90bb2f0e9d3b3d3c043c51a3ea3b08ffff3bdb2ad4bb8e84d53bd1b7d5bb5f4dd6bb8d21073ca342ddbb27e5ed3bc4fb9e3b9d15893b94a7983b03f871bb217c743bf71774bb82df74bbe25ba33b9b847ebb196c8b3bc83c1f3c49190d3c461f1a3c510efebb5d06003ce0fdffbb225c00bc40b7223c5eb004bc22190f3c6f17213b2b10073b528d193bc8bfe8ba8921eb3ab240ebba8c31ecba0950263ba48ff7baafd4093b0324343c5fdb1f3cbd602e3c6a8110bcd07d113c728f11bc13f511bc2514383cb3b016bc8312223cb8ef803b6d93623bcbee783b353a4bbb4cb74c3b6ad24cbbdf6b4dbb7f00843b4e9954bb4ef8653b0dfb243c598f123cf7bc1f3c79a104bceb90053c069505bc10f005bc5991283c40360abc4291143ce545443b97e22a3bd4f93c3b2a4e18bb706e193b819119bbd10a1abb644c493b4fbf1fbba69d2d3b35f61c3c4a00083ca7f5163c47cff0bb77f7f23bcce9f2bb26b2f3bbcc15213cfb31fdbbdd430a3c7d3fb93b9a47a53b2898b33b4e0896bb0e05973b971597bbfe7a97bbaf1abd3bd6309cbb1078a73b3caac93bc013b23bb5f2c23b6059a0bb957aa13b7290a1bb6805a2bb5543ce3b3d7ba7bbf9a4b43bfe28543b674e2e3b5ffa483bd22914bba51e163bbed715bb947516bbb0005c3b622f1ebb953a323b4645bd3be12ea63bf6aab63bd7f694bb6720963b842196bbfe9096bba7cdc13ba6d39bbb6caea83b18cc033c1ba1ea3bab59ff3bafd3d4bb4637d63b2152d6bb26e1d6bb719e063c2a97ddbb84c8ed3ba1a3583bd3163e3b6802513b02af2abb8ac82b3ba7032cbbf5842cbb99e55d3b8d8332bb55f2403b6e5b9d3b539c8e3b612c993b0f6d83bb3814843bfc3384bb907e84bbd936a03ba2f887bb183a903ba744b13b3c8c963b969ca93b2c4182bbcc14843b199683bb0c1284bbe287b63bdb198abb416e993b7895873b1f78603bb0d4803b65583ebb1301413b979640bbc06941bb18418c3b479e4bbb7771653b18dd6b3bd359503bc101643bb2b63bbbbb133d3bd91e3dbbe4a43dbbf341713ba4f843bbe555533b6aef753ba530533b51f16b3b3c4a39bb89603b3b33013bbb6ba33bbbe6d17c3b476743bb13ec563bd327703b4ec1523be6b0673b821d3dbb8f993e3b6c913ebb981a3fbb9efe753bc3ac45bb2fe9553bc10cce3a4361b53a9df6c63ab434a3ba095ca43aa770a4baa3e6a4babaeed23abd77aaba0e09b83abf93d13b0e23b73bee0cca3b32e0a2bb737fa43be73ca4bb4fbda4bb55bbd63b27deaabb1704ba3b4054863ad920573a9ff37c3aca9e31baf96c343a460b34ba26ef34baf7cf8b3a9b0e40ba11b85c3ab560ac3a2e198e3a4c8ea33aae5270ba8a13743a912873baa33174bae782b23a089280ba174b913a8eb21f3cb67d0b3c2ef3193c0069f8bb807ffa3bf47efabbdc47fbbbf9a0233cc25302bc5db10d3c629c103c2dc6fe3b68b90b3c8490e4bb125ae63bdf5ae6bbdc05e7bba0f2133cbf10efbbfb44013c669bbc3b9642a23b0600b53b84f98ebb6c5f903bf64190bbc3bb90bb88dcc13b2c8d96bb8513a53bbdc6c53af4eaac3aad88be3a1f479bbadf519c3a06779cbad5e89cbac4d0ca3a1e4ba2baf88baf3afaf4433c7f86323c8c0e3f3ce71225bc01d0253cb90526bcef6126bc1648473c089b2abc3475343c98a7133a4245e339428a093aee50b4b90080b8397142b7b9a255b8b991c71a3a6aeec5b9f942ea39e03f593b24433f3b55cb513bf6272cbbd94a2d3bd8752dbb13f32dbb3e615e3b6ed433bb7710423b7828493b23862f3b7bbf413b4eb71cbb501b1e3bf3f71dbb0f6f1ebb97484e3bd71a24bb3343323b62f5c63b5916ac3bd745bf3b853c98bb9468993b189999bbb01d9abb6f3ccc3bf53ea0bb68feae3b75ef943b9707873b0d0c913bf74c78bb5796793b8ed579bb7a6a7abba190973b4a9e80bb0e94883bed49bf3bb562af3b73e1ba3b57c7a2bb156fa33b17aea3bbb704a4bb2c40c23b0404a8bb592db13b8cb2953bb5f37e3b23568f3bdf755dbb6a31603b99af5fbb148360bbe1109a3b57926abb34e0813b55d2963bb20b863b2209923bdbf372bbea8e743ba3ab74bb954f75bb111a9a3bbd097dbb3fde873b4cff383c18c32a3c080c353c5e8a1fbce707203c145c20bc58af20bc76a73b3c144a24bce75d2c3c44a4893b1fef753b9b78853b609e5fbb6727613b302461bbb9b461bb407e8c3b408b68bb3023793b24ad333c3239233cc3072f3c59a716bce35e173c498817bc72dd17bcdbd6363c93c91bbcfd08253c21a3b43b108e983bfaa0ac3bc10683bbcff0843b057284bbf1f484bb961fba3b7c608bbb3a9a9b3bf77bd33bde48be3b1174cd3b684caebbc446af3b4066afbb53cfafbb3c9cd73bcbbfb4bb1d99c03be5a8343ae529143a991b2b3ab0bdfab9be1cfe39bfb5fdb9f6d0feb9fd573b3a353206ba2e8e173a9cb1283bdbf10c3b33a9203b9988f1ba318bf43a8432f4ba112ff5ba7c402e3b15a600bbf5e50f3b6688653b75b1453b41485c3b16e72ebbee60303b116d30bb12ff30bb61f46b3b95e937bb8911493b8eab983bcf5c853bb928933ba0d46dbbadb46f3b5bce6fbb6e8e70bba3729c3b687079bb6175873b9e2df33b7589dc3bffccec3be917cbbb8e20cc3b3350ccbbddc5ccbbd183f73b3237d2bb6d09df3bbb35c73bdcc7aa3bde0fbf3bb19295bb571f973b72ff96bb998797bb0fd0cc3bbff69dbb01d9ad3b139d343b9bb2193b1ee02c3b690006bb263d073b5b5607bb10d807bbf5f1393bc4e00dbbde961c3b730aa53c4d79953c32b0a03c1bf488bc1b008a3ca2cf89bcbe208abc47fca73ceef28dbc4a35973cb404cb3bafd3b33b745fc43be087a2bb18bba33b07b3a3bb0a22a4bb3f96cf3bcd66a9bb1355b63bfc83c03bc2a2ad3b9936bb3b613b9fbb16d79f3b9044a0bb06aca0bbb61dc43bfd44a5bb65baaf3b583ad43b3847bf3b074dce3bdb5cafbb3943b03bf878b0bb4de4b0bb0a44d83be4dab5bb9d94c13b6748ab3b532b973bb492a53ba6dd87bbf8ea883b86e988bb204e89bb412faf3b4f008ebb9d5d993b334c743bb509553b164a6b3bed1e3ebbfbaa3f3b78a93fbb783c40bbd7837a3bd53447bb8363583bee089f3b533c8d3b8bf6993bcbe47fbb83b6803b6edd80bb703681bb6482a23b4f5585bb002c8f3b89bb7e3b18025f3b1998753b14af47bb9e49493b6a4049bb03d649bbc384823b2dec50bb5b69623ba302413ce02a293cea3f3a3c69c916bc2a3f183cd80618bc8e7c18bcc49f453c620f1ebc7cc62b3cb753083bb9cce63a9738023b971dc9ba2e11cb3a3017cbba82d5cbbafe930c3b6ed0d4ba6d36eb3a1f04183c2c7f0c3c2bcc143c137803bc32f8033ced1b04bc015a04bca8301a3c2c3107bcaec80d3c3552573c9b58453c9b48523c775937bce527383c705538bc01b438bc33bb5a3ce4153dbc7658473cf420dd3b38b2bd3be022d43b091ba6bb58e0a73b69afa7bb0644a8bb294ee33b5f67afbb2018c13b3424913bf3c6823be31a8d3b3b326fbb1cab703ba9bf70bbcb5471bb74e1933b7f4278bbd55d843b917c023c2819ec3ba9f3fd3bb4f0d8bbc52bda3bb844dabb87c4dabb85e0043c2bb3e0bb52d6ee3ba2d4e63a2169bf3a855edb3a2227a3bac926a53a2305a5ba2bb7a5ba2ac9ee3ada37aeba4c95c33a03250b3bd7ceef3aa28e053b661fd4bae009d63a60f5d5badda2d6ba72050f3b90fbdeba9ee3f33a1612ed3af856be3abb70df3a03099dba408d9f3aad349fbae201a0ba878cf63a71eea9baf342c33adeb75b3b44c6383b19a3513bcdca1ebb0502213b667e20bb871c21bbc5ae623bffd628bbfd823c3bbf38963ae0d07c3a80388f3a573d5bba9cdf5d3a4e695dbae6345ebaef1e9b3afe2468ba9ae4803a42900f3b84bde93a3fcc073b39d6c2baab49c63af857c5baed42c6baebf3143bfcb5d1bae362ef3acadd093b90e9e43ac809033b9b73c3baf4e2c53a92a8c5bad47cc6ba059e0e3b2688d0ba14dae93a450f2e3cde2a183c82d1273c89ad07bcf9d2083c1ecc08bc6a3709bca955323c61410ebcd18b1a3c7f51d53b53a9c13bfccdcf3b9370b2bb5048b33b1f83b3bb61eab3bb2a0ed93b91b1b8bbead7c33b854b9c3ac4c8823aeddb943a69bc61ba37c7633add2364ba070d65babc77a13a79fc6fba8779853ae889723b49ae583b96276b3b658b45bb298c463b87df46bb2c6047bb229b773bf0564dbbc17c5b3bcc5d6f3b73fe533b3c90673b46683fbbd1c3403b63d040bb565741bb9ab6743be5a947bb22f8563bd3c1ae3bd2189a3b0cd2a83b05ed8abbd3da8b3b9cf58bbb37598cbbf0d8b23b1e0291bb01529c3b616e7b3bbbc75b3b0053723bef7b44bbe90d463be40e46bb5ba646bb70db803bc6c04dbb732d5f3b9fe4bc3bdbbfa93bdd75b73b4f279bbb8c1c9c3b6a289cbb86889cbb8a9ac03b6608a1bb67d8ab3be446793b644b5b3bbdbd703b4fd044bb8d35463b065b46bbc6ef46bb961e7f3bc8dd4dbbab8e5e3befa4b03b80489a3b4242aa3b9e8e89bbc2ad8a3be2b18abb601f8bbb8806b53b9b3e90bbb0b49c3b98be903be1447d3b32888b3b34b062bb106c643be07b64bb6d2765bbe256943b31446dbb3c94803b8bcb963b6dfe833be86d913b7d066cbb14ab6d3ba8f36dbb9daf6ebb64799a3b935777bbd008863ba5afc33b089bad3ba063bd3b21e29cbb301a9e3b6b039ebb2e6e9ebb2600c83b7884a3bb9201b03b8380033ce83fe53be948fd3bcb4fccbbf027ce3bc8f9cdbb4f97cebbcbda063c191fd6bb83dee83b585f8e3b7684773bc2fd883b6a495cbba4525e3b53175ebbd5c15ebb6b18923b0cf166bba17c7b3b053a893b780d713b9d7b843b232558bb006e593b30e259bbba8c5abb2a788c3bbc5762bb82b2743b0d53ca3bb31ba73b1001c03b50ef8dbb6321903b958a8fbbdf1d90bb7f87d13b867c97bb88cbaa3b5d8c7a3a4fc5503af9626e3a6cf432bad8fd343aa7ee34ba75ac35ba1f80813abdab3ebaad2f553a4eb2ef3bb32ed13b3709e73bcfc7b9bb628fbb3be05dbbbb87f5bbbbe39cf53ba115c3bb1e84d43b95af963b795f823b6de3903bccac66bb3962683baebf68bb378b69bb17a99a3b5dde72bb5093843b142f813c76286f3c19057d3c4ade5fbc27cd603cb5f360bc7e5c61bcf3fc823c832866bcdb4f713c7fe3de3b0cb1c73bfd55d83ba6e0b5bb7406b73b261cb7bbda92b7bba259e33b2a16bdbb4e3eca3bc700bc3bcfffa43b5b6bb53bcbc093bb9302953bebe994bbbc5795bb3387c03bbe939abb507ca73b4d65a83b9654903b8983a13b43e87cbb2b3d7f3bae567fbb2d2180bb5d1ead3b169c85bbfeee923b88ff143b81d4f73a82bb0d3bae0dd3ba741ed63aaf74d5ba4e57d6ba70071a3bdc45e1ba1b2afd3ab663423cee9e2a3c54a13b3cc59418bc0cdb193cbdce19bcf9431abcf302473ca7c71fbc3c362d3c9e48fb3b0f69db3b4441f23bfb36c2bb9da5c43b66e2c3bb8c7bc4bb05b9003c1cfecbbbace7de3b35c36e3aac304f3a7aad653a3e4938ba9689393a88db39ba40753aba2a08753afd8a41bade93523a6aabb73a03f4a23ad3bdb13ac09493baa680943a7fa294ba9a0795bad1bdbb3a42c599ba6732a53a2be9343be9d5193b58ec2c3b140907bbd262083b014208bbe3b508bb1e843a3bc14d0ebbdaa51c3b2944013c1328e73b1ecdfa3bf83ed2bb217dd33bb5b3d3bb2641d4bbede5033c05c3dabbf02aea3be1e97c3c3f2e663c758b763ce73654bc4c82553c517555bcfdeb55bc289d803cbd765bbceeb4683c564d9c3be3b38a3b634b973bfbef7abb368b7c3b77c37cbb00747dbb2dbb9f3b3fd282bbbe9e8c3bba7d763be820583b0acc6d3b9ca141bbbe08433bd72a43bbefbe43bb5e777c3bd3a94abb15695b3bbd2aea3b43e1ce3bad6ee23b8e40babb2d79bb3b0eafbbbb013bbcbb6472ef3b29a4c2bb99ddd13b78138c3befed773b2a73873b62fc5fbb929c613b4f9c61bb733862bb6c438f3b688869bb4b677b3b8e34943bfcc57c3b21ee8d3b9dfc5cbbf4e35e3ba5255fbb53f75fbb9d87983b5bba69bba3ba803b3ee9a53bd5ff913b0e3ca03b160c83bb3f07843ba41184bb387484bbedcca93b030c89bbbf29943be5049a3cd7518e3c32c6963cb3e584bcdb84853c1d9085bc7dd085bc8e339c3cd1c288bc26a38f3c3b68933be918823ba2738e3bbd4e6abb171a6c3b1b106cbbbcb86cbbd6cf963bc39f74bb49f8833b9e55943ba0ee7d3b9f338e3bd5b85ebbd57c603b2cdc60bbe4ac61bb148e983b79516bbb6544813b80916b3bc3534d3bc6e9623b5c1f37bb2754383befa638bba13b39bb2583713b3d1f40bbd198503b52de8b3bf3c67b3bcfec873bb67666bb6096673b61f767bb578b68bb4a8d8e3b863d6fbbecdb7e3b7cd53b3c64ea283ca17e363c28691abc12481b3c026c1bbc20ce1bbc91773f3c1b5220bcedff2a3c4f8ddc3b79e5c23b7c46d53b6f63afbb87abb03bb8bab0bb373cb1bb4d85e13b6d3fb7bbcdb3c53bb2b0953bfc12853b53fa903b1b9870bb8e84723b015572bb48fb72bb79e8983b9cc67abb49e4863b8bdae63bffbfcb3bf028df3b9540b7bb5e86b83becaab8bb8034b9bb8e1cec3b628cbfbb0ab6ce3bdf70f43bbab7ca3b4d7ce83b8274abbb72f8ad3b1586adbbdd46aebbe5a7fc3b10a9b7bb4237cf3bc0957a3bd23d593b92e8703ba83441bb96d4423b9fcf42bb416943bb00a6803bf4b04abb6dc85c3b2d8b3b3b86dd1c3b77af323b6f8706bb5d15083b620508bb099508bbb0a9413b33590fbb5124203be0ed4e3adf2a2b3a6094443a32e810baee13133afba012ba2a4513ba3116563a00191bba41fa2e3af14ba03abc117f3a0ab5963ab6ee4fbac52c543ad9f052bac90554badffba63a51d161baaef8823a08947e3bdf045f3b388a753b1ca447bbf318493b7c3d49bbf8d849bb6965823be80a51bbd66d623b71e7d73b035dbf3b85ecd03bc7e6acbb050aae3bfd2baebb84a6aebbb6addc3b985ab4bbe609c23b4fa14e3bdc75373bc209483bb4ca25bb831c273b61fd26bbd56f27bbac24533b1ad12cbbcefb393bab6e093cfd58f03bdc82043c3980d6bbe228d83ba544d8bb48efd8bb8ecd0c3c8be2e0bb1f19f43bfee9f83b454cd83b9895ef3bd5b9bfbb899fc13b4b5fc1bba8fbc1bbf851ff3b3e69c9bb81d3db3b2198823bc9355e3b5bdb793b41d341bb762d443b70ad43bb425c44bbed86863b52ca4cbbf956623bb76b333bfcbd183b09a22b3bd9ed05bbcc17073b802e07bbfaa507bbb1d7383bf9590dbbc48d1b3bc7101d3bf6a1043b8004163bc097e5ba0903e83a13fde7ba9be4e8bab7ee213bbdbcf3ba3f3f073beb6d073cfaf4e83becff013cd7b9ccbbeabfce3b999fcebb5c54cfbb47290b3ccde6d7bbf10bed3b6cb4163ca03f033ce822113c6ab1e9bbba78eb3bdda8ebbb2767ecbb68871a3c3343f5bb3d5a053c7eef5c3c51613c3c9290533c1c9824bcef2e263c943026bc3cc926bc3a64633cea042ebc16df3f3ca473ba3b3733a53be564b43ba24f95bbb343963b5a6796bb7fd096bbc499be3ba1b99bbbff82a73bcf3a9e3b7d1f863be53c973bd55169bbd8eb6b3b359f6bbb32796cbb0d13a33b46f276bbf9ae883bce42ed3a007dc73a1d38e23ae3a4acbab79bae3a4967aebaf20dafba74f4f43af014b7ba1a76cb3af74a35385331b03767511838e89300b77dfb123724ac0db7b36c12b74d514a383bf350b7181dc237c6abd03b0d0db83b90a6c93b81a3a5bbfdcaa63b0ce6a6bbea5ea7bb417bd53be409adbbaab9ba3bae3ac43c59c4b13c98febe3c3dbba3bc3d9da43cd2b2a4bc650fa5bc30cdc73cee64a9bc13cab33cef7e063c9ff7ef3b745e023c0317dabb4972db3bb498dbbbd629dcbbbb51093cf6ebe2bbe121f33b2b6f3a3b594e1f3b649b323bf5870bbb0ad90c3b1cdc0cbb935b0dbb64d73f3b9c5f13bb2235223b8d33023c3719e23ba084fa3be4cfc8bb71abca3b4f7fcabbcf1ecbbb779c053cadc0d2bb8fc6e53b03be883b25b5723b1c53843b7add5bbb096c5d3bb1685dbb76fb5dbb79c88b3bf1f164bba806763bc8549f3beb9c8c3b7efa993b563e7dbb95207f3b7a257fbbe3db7fbb2602a33b153684bb7ba38e3b4adaae3b58e8963bfadba73bf7ce85bbe711873bcbee86bbbb5987bba8b9b33bb9778cbbe86d993bf571ea3be856cb3bac94e13b2849b4bb698ab53b8be1b5bb967db6bb2684f03bcea8bdbb0fb8ce3b3a72793b1659593b8946703bfc9641bb90f8423b273943bbffd743bb90bd7f3b3f314bbbecd25c3b5fc1f43bc313d93b3ccbec3bdabec4bb9c08c63b2120c6bbd5a4c6bb7b3efa3b02deccbb880ddc3b9c9c4e3cb5bf363c8dd3473c5ab224bc23e9253c9fed25bc396326bc2e40533ca0ec2bbcd959393c6584ea3bc3ffce3b75c3e23bb6fab9bbef1fbb3be375bbbb7609bcbbe3c8ef3beba3c2bb5c09d23b23c1013c542ae53b3ed7fa3b937ecebba0f5cf3b050ad0bb2f9dd0bb58ba043cd991d7bbb674e83bae40563b7849353b5dad4c3bc57f1dbb67491f3b990f1fbb69a21fbb0ae65c3bc6bf26bb1ac7383b2c312a3b0016133bac8b233bab1e02bb433e033b444303bb60b003bb83c52e3bf3db08bb7b91153bb972b13b7d4a9f3b854bac3b8c9191bbac62923bfc8392bb32de92bba2f7b43beb1d97bb0c47a13b4d05cb3add779d3a9497bd3aa81b7bbaf44e803ab1247fba374c80bafd6dd43a219a89ba8c31a23a0fc9413c69a2323c9b8e3d3c7eb526bc5361273ca68e27bc50e127bcc7a4443c82a32bbce353343c3ebfb63b0416a43bd365b13b243996bbb815973b7f2c97bb528897bbcb6dba3b16cd9bbb641aa63be0c2c83b0299a73bd72dbf3b103b8fbb3639913b6dd290bbf36591bb7e62cf3bbca598bbe121ab3b0b3e783cf3d95f3c3b55713c5b774dbc016c4e3c84c14ebc0d404fbc47f57c3c8c0255bc6286623cf1c1af3abc4f933aab7da73a50207dba4151803ac3d37fba386980baa97eb53a728f86ba3853963a05b5863bda63633b7eaa803bdace43bb7e95463b1be145bbf6a146bb7fde8a3b890750bbb7e7673b3176263b56ce0e3be99f1f3b7eecfaba9f93fd3a3f37fdba770efebab2302b3b3b3b04bb5d54113ba285003b3384dd3a79c4f63acf99c3bac779c53aac53c5ba3cf8c5ba7b14043b59cacdba504de13a9141cc3bbae7b63bb93bc63bcc7ba6bb1287a73b3c9fa7bb090da8bb6e5ad03b0b24adbbc241b93b1437a73b6eb6913bf313a13bd7a381bb8faf823b72bc82bb262683bb166cab3b571588bb9e0b943bfe3d253c183f153c15c2203cb9b608bc4e89093c8b9609bc2beb09bc8747283cedcf0dbcf805173ce9f7ad3b37f69e3bfbc2a93b9e4893bbe8f3933b331b94bb6e6a94bb71d1b03b761398bb3ba1a03b82b00a3b4924ed3a07cc043bf1c2d0bad0b6d23a5aa1d2bac052d3baf8cc0e3be2d8dbbac25df13a180e9f3bf3088c3b69a8993bda727bbb39197d3be16e7dbb2c307ebb86bda23b228b83bb2f1d8e3bc25db43bdd379a3bffcfac3bb42687bb1588883b4e6a88bb86e188bb5196b93ba79f8ebb12029d3bb8ce023bd608d73aa6eef73ae63fb6ba3ccfb83a905cb8ba5420b9bae596073bb8d0c2bad1e5db3a89ef083c8366ee3b0de5033cbaacd3bb7160d53b8382d5bba634d6bbbd610c3c8f72debb8b44f23bf24e703b93464f3b74b6663b6b7237bb402a393bd70439bb0c9839bb99f7763b55bf40bb4fc7523b12d0b43a28949c3a9adead3abe588aba1ec78b3a54918bba2e068cba2c96b93ae58791ba1a329f3ac32d453bc8d32b3b1eee3d3b28d018bba82f1a3bc5181abba2931abbe5274a3bb45c20bbf5912e3bb0d4e63b3553cd3b4497df3be30ebabbb23abb3b6a63bbbbabe3bbbba7c6eb3b02dbc1bbe91cd03b2f58bd3b71d8a43b4451b63b2c9292bbb3ec933b6fcb93bb5d3f94bb822dc23bedc699bbb77ca73b9154293cdadc173cac62243c1b7f0abc915f0b3cd66a0bbc3ec30bbc62b32c3c10e30fbcafc7193cd5ed8e3b529b7a3bd6e1893b101160bba01c623bc3d761bb707e62bbd765923bf4826abbcb6a7e3bcaa4a73a508b823a2fa89c3a8b3752ba821d563a477455ba12a156ba425daf3a9a9265bae85e863a0867bd3b99a9ab3b3663b83ba5269ebbe6e89e3b43189fbb0c739fbb8bd1c03bfbaaa3bb2e9dad3baf57bb3b65509f3bf54ab33b37788abb4f058c3be8dc8bbbf3608cbb95e2c03b23ae92bbba54a23b34841a3c7e9d0a3c4205163ce4c9fcbb316efe3b4d78febb8c18ffbb5d941d3c1f5003bcc35c0c3cfac1893b78d4733b3c41853b25f95bbba26f5d3bf89d5dbbb93c5ebb98d68c3bf99c65bbd048773b5bf8043cb480e53b228bff3be663cabb3312cc3bdf3dccbb1df0ccbb7888083cae47d5bb4974e93b8bc89d3b2a298a3bda1b983b81a077bb17c3793b028779bbe7397abb3cb5a13bf56c81bb0c418c3b3c6c9e3b3ff38c3b7a77993b0b697fbb2e88803b529e80bb3bf680bb70cea13bab0e85bb99dc8e3bd7a6183c9862083cb209143cb400f8bb8aa7f93bceb3f9bbeb56fabbc2cd1b3cc4fa00bcce290a3cf4f3c63b0a99af3b3b4bc03b150d9ebb58369f3b953f9fbb66b29fbb2f84cb3bb714a5bb9f22b23be0530e3c454cf53b96ae083c81d3d7bbad2bda3bf6c8d9bb3f82dabbd535123cb45ee3bbf989f93b60cd4e3b214a353b997d473bfe5522bb699d233b539e23bbd51824bb56d4533bcce129bb2a0b383b2da7c73b9af4b33b7913c23b8501a5bb06dda53bdd0ba6bbf36fa6bb1575cb3bd417abbbd21db63b10b5323ba1cb1f3b404c2d3bb69f11bb2192123bbd9612bb89f212bb196c363bc04917bb77d8213b94082a391013073970901f398c71deb80eb6e238966fe1b8968ce2b82c733139075af0b8aaa30a3929abbf3b7feba43b7efdb73bbe1291bb6a7f923bda6792bb33e792bbb2f3c43bceec98bbeacca73bb2e96d3b1c98523ba627663bd8d63dbbe63c3f3bf2423fbb55cc3fbb3236733b132e46bb4394553b2bcb613ba3d6453b69bf593b182231bb6ba4323b888532bb690a33bb4457673baa4f39bba3d7483b75562d3c8c351f3c9568293c12bf13bcaaa7143c2a8a14bc98d514bcddfb2f3cf55b18bca3cb203c4629f43baa84d73b7300ec3b3ee9c1bb5d62c33b2262c3bb5cefc3bbb5bff93bb28dcabb33a2da3be49a2c3b084f173bfe7e263bc79407bb3694083b94a608bb260d09bb5fcf303b8adf0dbbbf9a193b066f183c5560053c900c133c6ac1edbb6777ef3ba2c5efbbed8af0bb6f191c3c1391f9bb6f78073c3265cc3bbb89b23b0efec43bcf5e9fbb6d8fa03b88aea0bb982da1bb3e7cd13b1114a7bb2555b53b196f023c5a85e23b2119fb3bf1fcc7bb5c53ca3be6bfc9bbb663cabb4ec6053c9d55d2bb6e43e63b691f253c85d4133cdc3a203c4ba106bc9b73073c978b07bc5ae407bc7c75283c28fc0bbc5eba153c37dfb83bde02a33b7b99b23b5af092bb19e7933b900894bbf67094bbf42fbd3b706199bb4f5ea53b077f843bb26b683b98a87f3b3b3550bba9d3513b8fda51bb757952bbe9b4873b81e259bbd2ef6b3ba84d663b44f04b3b3acc5e3ba40838bb055e393ba56439bbe2e639bb19716b3b8d0240bb00cf4e3b8d50fd3b1424e03b9f0af53b2313cabba16acb3b9299cbbb652dccbb4a7b013c9703d3bbe554e33ba85ed83b3e22ba3bf7aecf3bb589a3bb2b5ca53bad08a5bb2d95a5bbbc59de3b935bacbbad62bd3b4405d83b9d70c13ba9a5d13b7012b0bbf632b13b7445b1bb1eb8b1bb2b5bdc3b8716b7bb1aedc33b1a63a83b0ba5913b7edba13b9ba080bb91fa813b5cc281bb702d82bbe2e1ac3b8d4b87bbc817943b00d2963b8386843bc091913bacff6dbbfec86f3b6ed46fbb4e8370bb5f6f9a3b63c678bb1b7f863b674fc73bb7d6ae3b4449c03b39c59cbbf3ee9d3ba0ff9dbb29769ebb3725cc3b67ffa3bbd579b13b30c6ad3b8bd79a3bf066a83b7a6a8cbbe1608d3beb678dbbe0c68dbb6571b13b323792bbe5e99c3b997e903b57f87e3bf5a48b3bc28065bbd213673bb24067bbd4e967bb16d2933b1ec56fbb3355813bcee9923bac607e3b97458d3b4ba460bbd4eb623b9da362bb8d6263bb49c9963be2636cbbf852813b8a83363b4d151a3b0f4f2e3b8ffb04bb60c9063bc15c06bb0ede06bb6d2d3c3b04230dbba21f1d3bc8de1e3c0b010f3c2f5e1a3c23d202bcc7b7033cffa503bca3f303bc0bf2213cbcaa07bc79bd103c3af1413bcec3273b33683a3b757f14bb82d9153b15ca15bb434516bb2123473b421d1cbbf6922a3bbf503d3ca795243c7740363c2c3a12bcf956133c457c13bcaaf613bc3829423cf79f19bc9343273c19593e3bbc07273b96b1373b436515bb55b6163b119616bbd50717bbf0e9423b67631cbb128f293b9553133c03ee043c02450f3c69bef3bb9c0af53ba149f5bb43def5bb7015163ca6c3fcbbcb84063ced8f943b3c0b7e3be26b8e3b0e775dbbefde5f3b46aa5fbb737c60bbdac4983b32666abbad5c813b020aa93b503e963bd9baa33bbbea87bba5c3883bf8e988bbe64a89bb2ca7ac3bc7c08dbb554f983bb984923b8ddf823bf60c8e3b7f376ebb4fd36f3b50d16fbb056a70bb8d95953b699c77bb2392843b904afe3b2adadf3ba49ff53b18cfc8bbd270ca3ba95fcabb05f5cabbca1c023ca0fed1bb1c2ae33b8e92a2392e49753925b79639a3d93db931064339dc4b41b90d8842b9beebaa39627a52b9597f7d39d8db9b3baf928c3bdb91973bc4b380bbad6a813bb28881bb51d981bbaac39e3b888e85bbe0448e3ba7b5e13bca3ac63b6fe0d93bf2aab1bb8df3b23bc813b3bbe39bb3bb5213e73b2cf2b9bb4937c93b4dd3863ba837633babad803bb06544bbe0c7463b4b6a46bbe42647bb36158b3b045a50bb89b9673b536aaa3b0f4a923b136aa33b44b280bb7017823b3cd981bbaa4382bb6d45af3bab8287bbf0d9943b9421913b669b823bfe008d3beb476fbbc99b703bfecb70bb0f5e71bb37f4933b7f2878bb6d31843b6d00483a44f41f3ad14e3c3a8bee02baec91053abac904ba077505bad924503a88f40dba532b243a43faef3a6292d13a2a40e73a10e9baba6198bc3a636ebcbab9ffbcba3ffdf53a74dcc3ba17d8d43afbc38e3ba50b813b53e78a3bc8f66cbba32f6e3b71716ebbd3ff6ebba963913bb09975bbcf8f823beb6ecd3bf2beb23b82c5c53be4df9ebb7e5da03b7633a0bb75b0a0bb13b5d23b54b0a6bb619eb53b31fc1f3cd87d0a3c8bd8193cc683f4bb6700f73ba9b1f6bb9e80f7bbfd31243c71aa00bc52d20c3c6de1a83bde20943b0af6a23bf97f84bbb296853bb38f85bb78f585bb19f0ac3b27bc8abbc561963b5267333c5135213cbf422e3c3c2513bc8826143c691b14bcf77514bc1de8363ca1c518bc6c35233cabed013c2572e73b05c4fb3bf40bd2bb256bd33bd883d3bb0611d4bb2cb3043c11a8dabbc589ea3b9a91b93b8cb4a63bff3bb43b7f5398bb2c2c993b965399bb02b499bb2234bd3b532d9ebb46c7a83baab3103a43a4023a7e9f0c3a302cf1b96617f2398591f2b97918f3b9eb88133a8c5ff9b97624043a9fb3b53b7fa5a43b28e7b03b148097bba542983bcc6b98bb67c498bbf4f5b83bf9df9cbb5088a63bcd6f2d3c337c1c3c92a9283c697a0fbcca37103c556310bc0dbb10bc0bae303c75cb14bc805b1e3c0ba99d3b79c6853bc3c3963b766268bbbe246b3b44b56abb7d916bbbc76ca23b431c76bbab53883b16f72d3aaba10b3a9adb233abd3ce7b9d7f4ea39b156eab9547eebb9090a353a3cb6f9b9da340f3af210683bd3424d3b0b66603b365b39bb42913a3bdab73abb173a3bbbc6546d3bcb5b41bb122a503b3a3eb33b0244a03b37ddad3b08d391bbafbc923bb7d292bb703393bbceeab63b98ab97bbc057a23bfff9703b55a04f3bce5a673b635137bbb1eb383b0af438bbd89039bbb89f773b5df740bb8631533b0bc3ae3b487f973b250ea83b1b5086bba29c873b8f7487bb4ae087bb3b64b33bf30c8dbb7cfd993bd408a83bd22e933b2d13a23bd89383bb68a6843ba7a284bb1f0885bb791fac3b38cc89bb2571953b9ba8193aa41fed39703e0f3a0554bcb9c1c4c039b862bfb9987dc0b900fd203af29bceb94d5af439d3d9183cbd6a053cab51133c235eedbbae7bef3b7d5defbb701bf0bb73a31c3c2218f9bb2888073caf0ac33b19c8aa3bf81cbc3be6b498bbe8e0993b28ef99bbef649abbf8cac73b30ed9fbbf968ad3b5c4e453c140e2e3c6fba3e3c5f4b1cbc256f1d3c8e851dbcaffc1dbc47ca493c2a7b23bc3d9b303cdfe6023c4c94e53be38bfc3b0e67cdbb5863cf3bba00cfbb7796cfbbed16063cfed3d6bb2e0ce93ba6fa8d3baa14813b65618a3b7ce86dbb5b136f3b9c566fbb70e06fbb7968903ba83c76bb9585823b5f97183b9cba033b4497123b6b6de8ba3cd6ea3a287eeabaaf42ebbacfb91c3b779af4ba65f8053b40a3733b9b8e523bac1a6a3b69503abb91f93b3b7af03bbbfa8a3cbb3e377a3b04e743bb281a563bf4fa8f3b211f6f3b07f2883bba654abb05db4d3b12c74cbbd3a44dbba8d4943b187658bbe25a743b36dad23ba760be3bb317cd3bd473aebb8182af3b718eafbb00f8afbb75c3d63b00e6b4bb50a4c03b05a8683ba0c6503b49db613b36e13ebb21e83f3b9b1e40bb919640bb14506d3b0e2546bbc060533b3d45183b6fba043b30ab123ba2c1ecba8667ee3aa5b9eeba4e79efba7e1f1c3bb457f8ba39d6063ba627123c3a5b013c71630d3cf0e9e8bb3ee0ea3b60a9eabb3c4febbb1669153ce928f3bb3c31033c3e20213bffeb053b9a27193bb1c0e5bab53de83a3145e8ba8938e9ba75b0263b04b4f4baf8c5083b2529793a5fa2403a6b41683a2cd51aba8baa1d3a193a1dba9e1d1ebae68e823ad73129baed64463a41245d3bfebb403b0aff543ba0702bbbb3152d3ba4dc2cbbda622dbb98be623b44cf33bb72cc433b8aa9ee3b8450cd3b211de53b429ab4bbee34b63b0a48b6bb4ceab6bba138f53b797cbebbd2ead03bafcd4e3c0867383ce066483c43bd27bc15c0283cf2e128bc975029bc3d33533c68732ebc3ad43a3cc58a013c05e0db3be3c5f73b4621bfbbc338c13b620ac1bb29bec1bbf172053c8f69cabb3c11e03b5837cd3ae867b23ad388c53a00589ebaa7d49f3a4db19fba0633a0ba017ed23af148a6ba964db53ad2ebfc3a8583da3af7f6f23a4593c1ba1e41c33ac63dc3bab1dbc3ba07e8013b576acbba3f2ede3ab3147b3a0860553a2edd6f3adbda3bbae1923d3ac7803dbacd1b3ebace7e813a92b045ba3640593afd655d3c30f2463cec0e573ce3bc35bc2fcb363c44ef36bc5a6337bc1bb7613c24bc3cbcb56a493c0bc4923b52fd803bcbac8d3b4e9767bbdc4c693b446269bb930d6abbbe43963b822272bb07e9823bb52d3a3cacfb213cfe4d333cb0530fbcd7e9103cd09110bcb80511bcd3df3e3c4fa016bc739f243c3e24233c7a1d123c01521e3c412a05bc6bf6053c040f06bc276406bc026e263cea650abc62fb133ceeaac83b32e1b13b5d29c23be7f1a0bb8feba13b211da2bbfb8ea2bb3121cd3babcea7bb635ab43b7cc41d3cbcc00c3c28f0183c608effbba0ac003cfba900bc4cfe00bca910213c87f804bc1c9d0e3c15befa3b7ab9d63b8d58f03b234cbcbb4f45be3b6b0dbebbb4b4bebbeef5003ce8a9c6bb5693da3b395c333c9f451b3c187a2c3c4a3c09bc19860a3c6e730abc74e70abc4e15383c6a6310bcf7e01d3c7447073c59e9f23bca5d033cc9ebddbbf224df3b0361dfbb57eddfbb3ff2093c9874e6bb0cf2f53b4f04cc3b036cb13b6a62c43b42b19dbbf0109f3b7a059fbb08849fbb1b45d13b6984a5bb9049b43bfb1d043c0c92ed3b565e003c50f8d8bb3837da3b8d68dabbd7f3dabb0ca9063c6b5fe1bb4485f03b48c6873b87b46b3bd8ae823b4c2950bb3062523bac0252bbf8b052bbcb408b3bb4045bbb169e6f3bdfd8c73bb8acb23b2fcec13b8e06a3bb08c1a33bb220a4bbae8ea4bb27fecb3be37aa9bb1efab43b7d0c2e3c24ff1b3cb7f5283cfd110ebc8af60e3ca7090fbc7d670fbc5882313c79b913bcc0fc1d3c6343253cd922143c7875203cb57706bc7f9e073c706507bcd0bb07bcda84283c27e30bbc8a09163c5ed47c3b40c65c3bff7b733b99e445bb4977473ba86847bbf4f847bbefa9813be3dd4ebb5929603ba83aac3bcd8c963b7706a63b297c86bb8c78873b919587bbf5ff87bbdd7db03b9df28cbb05e5983bdbc7f63bea6ada3b4dc3ee3bdcd1c4bb901ec63bf251c6bb4ee4c6bb963dfc3b379bcdbb7f88dd3bc0fa8d3b2b64763b7d8f883b95115bbba7c75c3ba7ea5cbbe39c5dbb0fb9913b5ef765bb83697a3bde25573c3a8a423cb353513c29d032bc1fb5333c78e933bca95334bc7f1c5b3c723c39bceece443c99c9493c6bfb343cf4e3433cfb1c25bcfb24263c083526bc779e26bc52d04d3c04842bbcbf42373c76e7963b0a34813bf2ae903be75861bbc259643b348663bb1f5364bb742e9b3b41296ebbe28b833b5ad51b3bb82f073b7ae2153b7f8df0bae321f23a0c99f2ba6e60f3baf0f01f3be59afcba1b66093baef8053c1899e83b4af7003cd16dcdbb1e72cf3b3d46cfbb62f5cfbb8d62093c2540d8bb4178ec3b17d60b3b02ade43acd33043b3d09c3ba761bc53ac335c5ba1809c6ba4d3d113b7b0ed0bab6dee93a6214883ba4635e3b69e8803b381a39bbdf8c3c3be8833bbb58603cbb4a078d3b4d5d47bb07b7633b4a130b3cecc2ec3b071e053c551fcebbf685d03b8f25d0bb86e3d0bbb62e0f3c3413dabbe532f13bb3f6cb3bd9c4ae3b0c98c33b5ab898bb079a9a3b6f2d9abb35b79abb51b7d13b8d4ea1bb50ebb13be7e6303cfe471a3c4a682a3c6aec08bc4ca40a3cea0e0abc387a0abce35e353cbb990fbcaeb81c3cd3f9203bc9300c3b4aff1a3b544ff9ba61d7fb3a305dfbbabd1cfcba2519253bedb402bb5d6c0e3b2e51493cf8a8343c917d433c41e924bcf5c4253c140426bcc96f26bccb474d3ca55e2bbcb2ef363c420e0e3a8467db39036c043acadbafb9a0eab23985a6b2b9c7b1b3b97cd9143a0894c0b93809e239b051a73bf9ca9a3bfadaa33bccc090bb374d913bdc7891bbdabd91bba0a4a93b26ec94bb87359c3bd5d5d73b2105bd3b823ad03b4122a9bbd7fda93b168aaabb2e16abbb5107dd3b0e60b1bbcbf3bf3bab5fb43b20679f3b4674ae3bdf4b8fbb3854903b9e6990bb1ad690bbd666b83bfad395bb0cb6a13b420f473bac7f2e3b3cf33f3b019a1cbb4ae71d3bfac91dbb7a3a1ebb50fb4b3be79c23bb851d313b2980593c30a3413cbdb8523c668b2fbcbbbd303c98c830bc4d3f31bcc9215e3c93cf36bc803e443cee52cb3b774ba93bea7dc13b6a1290bbc93e923b4cb791bb484e92bbd31fd23b5ec999bb26edac3b70f51e3cea1e103cecce1a3c34b104bc6633053cfe8205bce5d305bcbbc3213cde7709bc34c6113c0f46053c96fbe13bcddffe3b70d5c3bb2b40c63b6ed3c5bb1d8ec6bb034c093c4497cfbbc655e63b01b69e3b0cee8d3b3dff993b0cf880bb79bd813b58e081bbbf3882bbabe8a13b054586bb5cc98f3bc48b303bc530193be0ce293bed0908bbf548093bee2e09bb109c09bb2833353b7fca0ebb69b01b3b1daf463bf882313bb5a1403bb7bf21bb95b2223bc2d422bb943c23bbe0d64a3bc91928bbb9ce333b756f6a3b3935533b83c9633ba0ee41bb90fb423bd31d43bb058f43bb1b016f3b87e148bb44b9553b48c05d3ac7a52f3a042d503aa29e0fba50f7113aa6b011ba197412ba3441673a1ff61bba6f71343ae9d39d3ad2616b3accc6913a268735ba1b523a3a7ed638ba19033aba4560a63acd6e49ba8582733ae819803c3d88653cabaa783c241d51bca261523cda8752bc021153bcc9aa823cb36659bc0977683cac171a3c150f093c6243153c0437f8bb06e2f93b8efff9bb64abfabbaf631d3cdb5401bc41ec0a3c30d6803bb04a613bd073783bd46b49bba3864a3b30184bbbb7bd4bbb85fe833b6e3e53bb45d0643b51658e3c010c853ca0c88b3c899c7bbcd8357c3cf3a77cbc0e0e7dbcea28903cecd680bcd017863c36b5a83bdf83963b288ea33bb1b388bb9382893b1aaa89bb69088abb2239ac3b67558ebbb282983b31db3c3bd8be233b76a5353bc75511bb9b6f123b709712bb3c1113bb23d2413bdabb18bbec72263b478f503bffdc333b874e483b7f9b1ebbbb2a203b7a0720bb7e8e20bb833f563ba7fc26bb40f2363bb266813a5edb5d3a9b31783a4d1943ba0c67443aeef344ba1eab45baa10d853aef044ebaa5d5613abf8dbb3a6a56a53a0124b53a9f2495ba7526963aa83d96bad6a696ba75fcbf3af39b9bbaa7b7a73aedc6113c444a043ce6fa0d3c75c4f3bbabfff43b2f39f5bb15c6f5bb0c5b143c6a44fcbb24c8053c5194863b866a5a3be63c7e3b6fb036bb3caa393be5f638bb8bc739bb16cc8b3b113f44bb9ab35f3b3bfd923bf0007f3b5c5b8d3bf66c62bbbd8e643bfa5264bb410765bb65e2963b75a16dbb2795813bfa31803b9067653b1dc0783b0eeb50bb3020523bd45652bb74df52bbb9cc823be43c59bb5f5d683b4726b43a9e62983a070cac3a056a84bae2ff853ab4b785ba5c3286baf1cab93adf228cba9b4f9b3a121b703b8d79523ba7a0673bb5503cbb80e03d3b51cf3dbbe55c3ebb61ed753b531b45bb22ae553b0191733c78d65a3cf8916c3cc1e747bc8920493cf63649bc88b549bcba56783c44904fbc228d5d3c7a2a8d3b85ef693bdd17863b0e8f47bb42244a3b1ecc49bb0e9e4abbe017923b2cda54bb61056f3bad7b653b77f1453b69635c3be8b72ebbdc6e303b324430bb60d730bb18c36b3baad937bb1a52493bfb47fc3b55dbe63b644af63b5e2ad6bb5b0cd73b9059d7bb0bccd7bb752a003c0a10ddbbfd3ee93b13cd593bc6583a3b5fc2503b145023bb59c8243bf3de24bb4c7525bb9d07603b63802cbbbfb93d3b3c55c73bff04aa3b74cebe3bfb0795bb5f88963b4f6996bbaceb96bbcc40cd3b4e369dbb7e1ead3b372f503ca688393c29c8493cc03328bc0050293c4e6629bcc3d929bcd98b543c24362fbc71053c3c0390fb3b4d46dc3ba3adf23bcbb5c4bbff21c63bee55c6bb56f3c6bb36d1003cad3ecebb9bb0df3bb626c93b5cd2b03b5031c23b877c9ebbb5e19f3bd7b79fbb422da0bb96edcd3b61baa5bb9d74b33ba7d3c43bc1cbad3b733cbe3bc5979cbbc3c39d3b01c29dbb96309ebb595ac93b0c71a3bbd649b03b9d56c53b0d0eb03bbc4abf3bc503a0bb2cfaa03b0a1fa1bb1389a1bb9d78c93b3e80a6bbfe60b23bef210b3cf843ff3bd6ea073c333bedbbe24bee3b8380eebb07fbeebb764e0d3cd2a0f4bb3cea003cd31dcd3b3d38b13b320ac53bdec39cbb965f9e3b641c9ebb82999ebbe7b5d23bc5b6a4bbf230b43b2f00733b35ff4f3bfad7683b4b8e36bb0b85383b4f3a38bb24d738bb400b7a3b317240bb6db6533b406be53bf59dc73b0bfcdc3bbdb6b0bb0d63b23b0b45b2bb8fd7b2bb572beb3b1bd6b9bb0be2ca3b25aff53a205fcd3a81ece93a3eb2b0ba95abb23a2398b2ba3c4eb3ba02defd3a11f2bbbabd9fd13afbadca3bc42faf3bffc3c23b82ee9abb9a609c3b1c499cbb70c99cbbe422d03bb1eaa2bbe422b23b107a0a3c0061fa3b35c3063c06b6e5bb35cee63b032ee7bb78bee7bb5cfd0c3c5344eebbb256fd3b8700903b0ef87d3b46228b3b698264bb2747663ba73a66bb46dd66bb3658933bc79e6ebb85d3803b8701043b47d3e23a2542fd3a4e6cc7ba92a7c93a563ac9ba47e5c9ba1bba073bc114d2ba37cae63a67baec3b9453d43b9cd0e53bafc4c1bb27e5c23b270ec3bb7f8ac3bb3272f13b934cc9bb6600d73b4f7ab83a53bc993a227baf3a9cf083ba6892853aa95c85bab2e285bab3c1be3aad618cbaa6f59c3ad254b43bebd89d3b7ff4ad3b48078dbb32e58d3b54348ebb44a88ebb7eaeb83b39ed93bb954ea03b6faf2c3c6efa163c937c263cfbc306bc85d3073c6bde07bc034808bceaef303c41400dbcc554193c0f83393c47d9243c12ad333c0ffc14bc6206163cd41316bc2b7c16bc327c3d3c9b611bbce61e273c2d10aa3bf562953b092fa43b1ce585bb39c8863b55f886bbd16187bb8c15ae3ba1338cbb33a4973b193f4a3cb0ca373c5105453c5cbd29bc40942a3ce9b62abc2a142bbca7cf4d3cc5702fbc2ed1393cbc69943bb3a1823bc4508f3b612b6bbbc99a6c3bc9f76cbb91a76dbb79eb973b32be75bb0e8d843ba8965f3bddc3403b26be563b2de629bb78862b3b236f2bbb15012cbbd8ac653b98f132bbba15443b715a503bf7c53a3b32434a3b832a2abb663e2b3bf1502bbbfbbf2bbbea7f543b07e430bbee253d3bbe3f3d3cef79243cc92a363c4daf11bcda3c133cd5ee12bceb6213bc641c423c100719bc4e28273cd6e6ce3baf68b63bedf0c73b0be8a3bbb818a53be02ca5bbcda6a5bbb5a8d33bea59abbbf414b93b3f60b13b65419b3ba006ab3b30c98abb32f88b3bffe48bbbc54e8cbb89bfb53b1d4f91bbaca39d3bb278c53a9bb3a33ad2a5bb3a87248bba69388d3aa7bc8cba2b528dba674bcc3ae59694ba1e46a73a5199603b4e3f433bfc3c583b11592dbbc9a92e3b5dda2ebb686c2fbb5c53663bc43036bba96f463b0f502e3b6988153bf12d273bf6e602bbbc7f043baa2204bbe89704bb163c333be6290abb1132183b61aabe3bb409a43b7d0ab73b492b90bba197913b0b8291bbd30292bb16e7c33b8d0d98bb15eaa63b81b64f3c2a613f3c331b4b3c5ff732bc9f8a333c80d933bce43034bc55d8523c411e38bcf42e413c295fc13babfea43b0c3db93bc8ea8fbba267913b385691bb3fdf91bb13f6c63b694898bb300ea83b81ab433b6b3b253b92d53a3b7f2c0fbb25d9103b2da010bb892911bb3fcc493b43c417bb3076283b34d7d13a7163b03a0a22c83a5d1a98ba28f2993a20b499bae24a9aba9c93d83a1992a1ba3cf0b33af6af123b0d15003bb1520d3bdfede4ba89bce63af8c2e6ba4871e7bae963163b65bbefba9313023b7de23a3b108b1a3bed66313b28ba03bb676e053b103705bb31c505bb5581413b9d910cbb09ee1d3b09c8a63b38478f3b1601a03b04417cbb898a7e3bdb8d7ebb48687fbba975ab3b4bea84bb48cb913b4598143cdac3043c571f103c72b2f1bb5ac9f23b8262f3bbf508f4bb20a4173c3591fbbb8681063c7e3fe93b1a62ca3bb35fe03b415db3bb6d2ab53ba1e4b4bbef72b5bb4a5def3bb45ebcbb89b3cd3b08ec433c8a912d3cec9a3d3c87a91cbc7e8c1d3c42d91dbc144e1ebc7639483c439b23bc5006303c5ef4ec3bb95dc93ba1c5e23b888faebb76aeb03bdc58b0bb7000b1bbb3f2f33be014b9bb9736cd3b69f2763b2ce05f3b0273703be4254ebb1a264f3bbc634fbbecda4fbb905c7b3bad6555bbe06c623bf572fc3a97d3da3a92c5f23ae94bc2ba7dcac33a60f6c3baf896c4ba1590013b0c1dccba046fde3aac9b483b135a283bbc1c3f3b8eb911bbff7a133bd42f13bb22b913bb5a424f3b436b1abb1cb62b3bb9ffe13b40edce3bbaaddc3b4ef4bfbbaec7c03bf903c1bb186ac1bb9e96e53b2721c6bb810fd13b0095743b2e98563b640d6c3b195d40bb8476413b64ea41bb7a8242bba66a7a3b3a7949bb82dc593b0a0f2d3a9bd01e3abd04293add1e14ba1691143a24e214ba862c15ba97d12f3a039218ba6161203a90a9bd3a29bc923a9802b13a23cb68bab56a6e3aab9a6cba4df86dbace85c63ab7897fba6a31973a0e39883b42bf633bf0ab813bcfbc43bb2986463b2dca45bb5e8746bb49cc8c3bc3ec4fbbb96d683b443b553b539d3a3b437a4d3b31c027bba3d7283ba40529bb3a8029bbe39f5a3bb5432fbb386e3d3bc3d4253bbed6143b5ff9203b372d08bb2eec083bb20c09bbaa6009bbb92a293ba34b0dbbb1ae163b5f5e983bd991833b7c5b923b4f9068bbf2eb6a3b21966abbbe546bbb9b869c3b218174bb5bca853b6f68923b4846823b82d48d3b8f226cbb529f6d3b37d26dbb84746ebbe288953bdc0376bbf909843b2657653bf5953a3b16ea583b4da31bbbbf311e3b45a41dbb2f5e1ebb84f66d3b1d8827bb591b3f3b0faf2b3c3f37193c207a263cefe90abcdfe80b3c05e60bbcb4440cbc7e392f3c44ac10bc20401b3c3b3ddc3a0b9cbc3a9e18d33adda7a5ba0f31a73aa230a7bae0c1a7ba2790e23aecb7aebaebfbbf3acd738f3a3bab513aefbc833a2d7e1fbacdd4233a6e8b22ba1ba623ba39d0973a17f231ba6860593a258f383c67ad213cf5f3313c0af410bc9e20123c851112bcd17a12bc501f3d3c498917bcce1f243ca649d63b50bdbd3bf754cf3b820fabbb9743ac3be958acbb33d5acbb5308db3bda99b2bb946dc03bf7a4a43b6147893b9dbb9c3b9d4b6abb349c6d3b16ef6cbbc2e76dbbb81caa3be5e779bb8a328c3b87d5b43acb3b8d3ac32da93a5de962ba6f78673a9c7566ba26c167babbfcbc3ac31178ba035b913a4e132b3c656b153c96eb243c640705bcd021063c5f2606bcea9206bc3c482f3ce39b0bbc27c8173c67207d3b73625e3bc357743bce2947bbd1ef483bcab848bb0a4c49bb8593813b345350bb48b7613bb110ca3b647bb83ba21bc53bfb16abbbbcbdab3b6b09acbb0765acbb976fcd3b2d9eb0bb906cba3bb491c63bb494b23be9f0c03b2a04a3bbe41ea43b9816a4bb177da4bb1c65ca3b8547a9bb4bc9b43be983123cc468033ca8400e3cda95efbbb411f13bc731f1bb63ccf1bb7e6a153ca9fef8bb8e12053cf1f4973bcf6b843b2669923b713d6bbb49316d3ba7456dbb490b6ebbd8be9b3be02977bbea8d863b291a463bdedd2a3bfb503e3b47a516bb69ee173b5d0618bb398c18bb2d724b3be5c21ebb43d02d3b8646f43bd09ede3bb61cee3b6277cebb5544cf3bb998cfbb3106d0bb867ef83bff14d5bbf6fbe03b309e833b0afc633ba8117d3be9084abb38f34b3b93c34bbb6a674cbbf520873bd93b54bb00c2673b58361e3cf6a20d3c8486193c591501bc02c2013c5ef601bcfa4b02bc0868213ce43906bc05750f3c7f0bc53bbfc7a73b509ebc3b630c92bba9cf933ba57a93bb99ff93bb00dbca3bea7e9abbd2eaaa3badaefb3af795dc3a24b5f23a83bec5baf559c73aaf46c7ba6cd8c7ba94f1003bf0c4cebaf7e9df3a1600d43b42c5ba3b2cc9cc3b593ca8bb422ea93b6d85a9bb0a03aabbfef3d83b33caafbbca80bd3b1e16753a31c44f3a4a316a3a70f034ba7d16373a37b036ba545537badaac7c3a114e3fba2eb2533a2bd3c13bcec4a63b5916ba3b912992bbc5f6933baa8693bb180894bbe721c73ba2309abbb7b2a93b1d44b03b86cc973b2b3ba93ba0a985bb1e05873b16df86bb455187bb501db53b2bca8cbb326d9a3bffd5c83bf020b03b22d8c13b5c639dbba27f9e3b65b09ebbfa2e9fbb3a9acd3b9802a5bb77d6b23be5921f3bdc4e073bb27c183b7716ecba428dee3a9b5ceeba3534efbad582243baf90f9bacddc093b933a2c3bc4fb153b86ca253bdfd705bbedee063b49ec06bb055307bb82af303bbf370cbbc65a183b2aa4673aac8d523a86b1613a577642ba854a433a499843baa70444baeeaf6b3a371349ba7ee1543acf9e623b8da9453bf04b5a3bb83b30bb5ec0313bcaac31bbc33632bb485c683bc1b838bbeac5483b85c61a3c25f2043c5481143c3142e9bbd9e2eb3bf76aebbb7434ecbb83171f3c62fdf5bb5c4b073ca3a9823b2a755f3b155d7a3b6bfc43bb1bea453b49d045bb0b7e46bbf174863b92c94ebb5c7d633baf94c63aba84b33a8a2dc13a5110a5babee1a53a2e12a6bae072a6bac944ca3a36f4aabaf69bb53a1958bc3beae1a83badc5b63be1559abb034e9b3b78539bbbc8b19bbb0c2dc03b1626a0bbecfcaa3b5bb7d33b91ceb73b06adcb3b0683a3bb31b9a43b45e4a4bb3d6aa5bb4244d93b02a5abbb55ccba3bf4fd223c56e00c3cabb01c3c1af8f7bb04c2fa3b5739fabbd20dfbbba14e273cad9c02bc98470f3c83ffdf3840ee7638d69dc0386b1200b8f79b0938b5f406b8506b09b89bcff638c06f2ab80e13853861a4003c837de63b5eb6f93bb017d2bb5444d33b7883d3bbda0cd4bb8038033c0c68dabb7a6fe93bc378b03bb2139a3b021baa3b977a89bb01498a3bb5a38abb10168bbb45d3b43b984a90bb77849c3b35a69b3b7ccf873bc7fc953bc69671bbe0e4733b949a73bb695974bb4d879f3bdb6f7dbbb9f6893bc8c3103ccceefd3b80af0b3cb162e3bbf209e53b0f34e5bbb8e3e5bb603f143cf310eebbade5003cb9931f3c4d2a0c3c12151a3ce520fbbbf8a5fc3b7c2cfdbb02f6fdbba452233c878e03bc164b0e3c7ab2503bafb7383b92e9493b9eb426bbaa99273b98f827bbea7428bb8c52553b5f1e2ebbce583b3b9435cd3aa043ae3aa53dc43ae69c97ba9e73993a5d1899ba12a399ba886bd33a9e61a0ba4d8eb13ad8f32b3b6431133b09d4243b3bc500bbb53c023bf2fd01bbdc7102bb09dd303b9cf907bbd9d9153bae07ac3bafbd9a3b191da73bd7aa8dbb697f8e3ba4908ebb0be68ebb5b64af3b5ced92bbd8a09c3b5437533b651d423bdd754e3bc1a334bbda53353b599a35bb07f735bb216b563bc43c3abb1e09443b59c97639c04c313939d261391a5f03b90d5e0739c52f06b9ad3107b98fda8239a45614b9004a3839a9f63e3cd58b283c968c383cede117bc94f4183c1a0419bca47019bc4c5e433c298c1ebcc5f82a3c151d053c7e8de63ba203003cc1c5cbbbd1bbcd3bd993cdbb0541cebb059d083c2f64d6bbf469ea3b37b07c3c7104663c2f4a763c498154bcbea8553cc5b755bc462c56bca585803c99965bbcd683683c0768a03bcb308f3bfa8d9b3b74ff81bbd7c7823bdeea82bbf84383bb71b4a33b336087bb7b16913b5db24c3bfc4c2f3b0e36443bdcb119bbfe551b3b3a211bbb74aa1bbb9e8f523bbe2a22bb0b71323b6020333c1755213c111f2e3cc67b13bc026a143c777114bcf7cd14bce085363cc41619bc1c4d233c82a9833c6e66693c1bdb7e3c708d52bccb07543cb82054bc89b854bcbe8c863cfdc65bbc5bae6c3ca9f73a3bb772203bc14d333b3e120dbb77770e3beb5c0ebb79d80ebb0744403bfab214bba347233b53bfc03b8bbcad3b0063bb3bfc419fbb6502a03bc946a0bbbfa9a0bb6f64c43b1a36a5bb56d5af3b32dfbb3b26809f3b2fb7b33bb0408abb2e0f8c3bfea58bbb45278cbbf87dc13bba7c92bb278ca23be340313bc613183b1a0c2a3b788a05bb74a2063bcccf06bb914c07bb9f33363b37060dbb2bcc1a3b6e04373b4a06223b30f4303b07bd12bb8bb6133bfac413bbed2614bbbd353b3b7dd018bb2745243b44e8683c23e8593c59bc643c1c024ebc88a74e3c84db4ebc0b2d4fbc6eb86b3ce1ef52bc8d975b3c1abdf43b594cda3b1b3ced3bb25bc6bbe081c73b24bec7bb9644c8bb3fddf93b2078cebbac30dd3bf1ded63ba697be3bb2fecf3b3e18acbbd949ad3bc35eadbb0edaadbbf28fdb3bca90b3bb6440c13b5e15c83bbac4a93b2b54bf3b1ca193bb8c3c953b0c1995bb92a395bb4122ce3b034f9cbb4001ad3befec523c4156403c8ea34d3cf78232bce621333c4d7d33bce7de33bc5f8b563c893a38bcda5d423c7bcc403c6511273ce877393c7f4213bcf000153cd19214bcb50c15bc73d1453c8bfa1abcbedd293cdbf72737cefe7935e9a0ea3691436136ef7f4db6b9ab5236d3754d36b34e4e3742cd02360e2be6357d53cc3b5168b83b4aacc63ba62aa9bb2c39aa3bff35aabb6199aabbdb30d03b0147afbbf095ba3bc3cdf53ba115d43b38fceb3b86c0bbbb13b9bd3bf453bdbba6e5bdbb2da1fc3b0918c5bbefa4d73bbe48133c7429063c569c0f3cf9b9f7bbea07f93b712bf9bbb4b6f9bb5bc5153c431100bc339f073cdb0e323c2b791e3c6b812c3c8a870fbc0f88103c428e10bcf9f010bc02d9353c9d8a15bc1a9e203c0c7f853b93716e3b2e75813bf68b58bb00085a3b110d5abb039d5abbe33f883b465a61bb5a93713b03ab553c201a403cbe9a4f3c38742fbc9d73303c059e30bcc40e31bca3c8593cbb3f36bca37c423cd9fb673c8f4f503c3d47613c76013ebc016a3f3ca23f3fbcf5b53fbc2a8e6c3cc54845bc53e8523c041b623bdda3403bc769583b7c6228bba01d2a3bb8fd29bbf1942abb29d1683bf0e231bb5a33443b6a5b723babe6483b0a5e663b4c9a2abb5ff02c3b02992cbba9542dbb56a67a3b516736bb95514d3b23de193b4bfc023b774e133b329de3ba1a65e63aa4e5e5badebee6bacf601e3b0612f1baf273053b4e8e033c3e02ea3b9ee4fe3bcb8bd3bbb11fd53bbd16d5bb27abd5bb675a063c4493dcbb8135ed3b98b6113c5b99023cbe6f0d3c2001eebba291ef3bc099efbbef32f0bb41a1143c3258f7bbde41043c861fe33b2b9bd03b1cf4dd3ba918c2bb1ae3c23b0020c3bbf382c3bbed9ce63b0e15c8bb23add23b1002493a3c2b303a42e2413a67891dbae8f91e3abcc81ebafb3e1fbac4e94d3a90df24baacd8323adbc5283c98a4153c8c5d233c251707bc45f1073c9b1a08bc7d7d08bc0a752c3cdf050dbc6abe173c8468e13b816ac63ba69ad93b53ceb2bb4513b43b5c20b4bb009fb4bb8bcee63bb997babbac4cc93b03d4383b11af1c3b10a9303b085408bbdfce093ba5ac09bb1a2c0abbe37d3e3bdb4a10bb0bac1f3b33c5903b758c783b4edb8a3b297d5abb99a55c3b877e5cbbed3c5dbbd1da943bd75266bb1aef7c3be43e933b98e6833ba4de8e3b60cc70bbe033723b976172bb16fa72bba03e963bb4147abbf391853b0dbf503b775b2f3b8812473b482417bbadea183b66be18bbbe5619bb0c72573bcc9e20bb53e8323b9804413b79de1d3b1ac9363b5f7204bbe897063b631606bbb7ae06bb6220483b872f0ebb4794213b940e473bd069223b4f613c3bcf0308bbf12e0a3b29b909bbaf570abb207b4e3b4c2812bbbf46263bb13e003cbe56ed3b231cfb3b83cedebb0b47df3b1bdddfbb1d46e0bb1212023cecf3e4bb6c77ef3b1f4ace3b2060ba3bf3a7c83ba325abbbe814ac3b2d34acbbcf99acbbd820d23b9553b1bb8b90bc3b7e06143c1187003cd1670e3c873be4bbf26de63be925e6bbefdae6bb87e7173c7487efbbe29d023cceab073bd47de33a0b4d013bb8bac3ba993cc63a6dcdc5ba358ec6ba7d170c3bdc02d0baa822e83a922c3a3b7c34203ba1af323bcef70cbb8e820e3b1e3d0ebbb5b40ebb47583f3b567714bbeffb223bb4108b3bc1b1763b6996863b910c5fbbe891603bf3aa60bb054661bbc1218e3b168c68bb7a1d7a3b57c9653cdf6b513cc614603c019541bce679423cf7b242bc1a1f43bc56a6693ccd1748bc94af533cd27a9439c7225c3996ea8839e0c528b9e7682d39f6ed2bb9c6112db9ebb29c399fc33bb945e463396108ff3b7992e93b75eff83b8956d9bb5b47da3b5577dabbb9e3dabbe699013c96efdfbb86ebeb3b7f57863be40f6f3b851f823bc68758bb31305a3bf60e5abbc2a05abb113b893b597e61bb8249723b62c9b03b2278993bf50caa3be76688bb4ea4893b7b8989bbd4f489bb5771b53b211a8fbbc4f59b3b25b9483bb8ec2c3be2ac403b4ad918bbcc221a3b4b331abbdeb51abba84c4e3bd7d320bb75e22f3be664903c6fd67d3c616a8b3cc73263bcb94e653cbcfc64bc28a665bc21cd933cc4b56dbcf2d1803ce8c0273cae43163c0cd3223cfddd08bc6aa4093c96cd09bc54280abcf31a2b3c8e560ebc4831183c9fe3f03ba689d93b7555ea3bb270c7bb8a81c83bafb5c8bbb930c9bb2c55f53b42d9cebb8320dc3bff4b0f3b8ebcf33a331b093b62e7d3ba9ca0d63a4ffdd5baa5bfd6baf493133ba637e0bab350f83a6a0c773b828e593b4fa96e3b223943bb19be443b0fc044bb685345bb12ca7c3b252e4cbbf8c45c3b7cd2863b1e8b653bd503813ba16448bb10954a3b8a4f4abba8024bbbb4d78a3b8cbd53bb0dcf693b98d9ca3b4110b43bee51c43b9129a3bb6040a43b314fa4bb00bca4bbe256cf3b1fe8a9bb6f86b63b526e833bb4e1693b9d997e3b17dc53bb517f553b805955bb20e755bbe542863bd6995cbbad086d3b5371a13b48ca8d3b8fd89b3b9a287ebbcff07f3b9a1780bb047a80bb3046a53baa0685bbb3ed8f3b6f38dd3b1416bc3b669ad33b3503a4bb76e9a53b8996a5bb3f2aa6bb1fe3e33b4357adbb269abf3be90c673b4b0b493b5d685e3b69ea32bbe987343b1f6434bb0def34bb62036d3ba29d3bbbd9424c3bae4b2cb68a3bf5b50f1a1eb67668ac355348b2b55319b13565c9b235fcfb35b6425cc835eee2ffb54d16893c2387773cad58853c60a862bccc14643c651b64bc19a764bcab9f8b3cd51c6bbc927b7a3c4769753b25f35d3b34d16e3bbcdc4bbb39e04c3b28224dbb009d4dbb06e3793b154853bb0e8c603ba5f4bf3b46bda83bed4eb93bc64197bb1a9d983b666d98bb5fdc98bb0d86c43bc7239ebbb03fab3b554c843b05dc663b4bf07e3b94bf4dbb8e6d4f3bae734fbbf91650bb939d873bdfc457bbb9816a3b8916023cb940e93b808dfc3bab96d4bb5603d63bf501d6bb1f8ad6bb3fb0043caae4dcbb6834ec3ba292fc3bb1dbd43b1618f13bfb25b8bbad0dba3ba710babbfcc7babbf240023c2d7cc3bbad17d93b67a7fd3ba189de3b93b5f43b7499c7bbf820c93b6026c9bb0fbbc9bb96e8013ca6bad0bb4de2e13b1b845a3b01c63b3b578f513b3c9a25bbcc53273b290e27bb789627bbd9be603bd8332ebb37053f3be469b93b5fc0a33b0944b33b7b6d93bb4764943bb08e94bb55fc94bb9d9cbd3b620d9abb801ea63ba1f3a93be9e8933b89a7a33bd99b83bbbd87843bc1bb84bb052a85bb8a45ae3b2e388abb7a4c963ba1c9163b5dd7ec3a8b400d3b52b5bfba0d50c33a6f97c2ba45a4c3ba7a761d3bb0ead0ba0f91f33aa468403c696f2a3cb03b3a3c96cf18bc5a761a3c9afe19bce66d1abcff98443c1dba1fbc73de2c3c05ece93a05d0bd3a9914dd3a49bc9dba3495a03a26cd9fba5a8fa0ba9dd6f23a51ffa9bafa78c23a37815b3c0578483ca731563ccb9339bc71553a3cefa43abcad0e3bbc2d175f3c2ac83fbc2d994a3c3ec6593b67af3c3b1868513bba0127bb61bb283bca7128bb31f928bbec8a5f3b077b2fbb21cf3f3b9360ca3b335cb23b2089c33bbc2aa0bbe477a13b2d67a1bbb7dda1bb4410cf3be66ba7bbf2f8b43b1d8e6c3b8c834f3bf349643b097e39bb6f113b3b60fc3abb808a3bbb2436723bce4242bb22ad523bbec74a3909da0c39921a3839fde5c7b88805cf38c8e5ccb8eeadceb8711158399b0ce6b8a3141339aad8d53baf49bc3be48cce3b053ea9bb8363aa3b4c8daabb640cabbb2adada3b5ceeb0bb9b0fbf3be178ab3bfa8d9a3b77aba63bbbc68dbb80848e3b8ca98ebb80fe8ebbd8c0ae3b9ff692bbc7679c3baf81003cc85fe23b9539f83b569dcbbb3231cd3b5a25cdbb80b5cdbb8487033cb1a0d4bbb3ade53b1d82063c80bdec3b2ded013ce944d4bb98dfd53b2bf3d5bbec95d6bb91a3093cef1fdebbf343f03b34201e3c1c250d3c1c52193c10ffffbb34fe003c34e500bc803b01bc6e66213ce33f05bc72020f3c4cc2de3ba47cbc3bba01d53bee4da2bbf96fa43b680da4bb66b1a4bb0b6ee53b6f95acbb4837c03b565d2c3b301e023b0ae61f3bb16dc9ba1762ce3a3224cdba4579cebaaf19353befa0dfbab57e063b4acc0a3cda8cf63b595e063cef28dfbbfacce03b64bfe0bbef56e1bb14d60d3c0b7be8bb3debf93b1864fa3a2c66c93af01aec3a6770a6ba311aa93a96b7a8ba668fa9ba0829023bfcf8b3bad78fce3a2078483b9db62a3b01e73f3beff814bbe461163b797016bb2cfe16bbcb5f4e3b2ea11dbbd4e72d3b4252943baa7f873bf5a8903b1f727bbbe6df7c3b20c87cbb3a487dbb21d6963bbda081bbaae4883b09ee0f3c8bc6033c7d8b0c3c3967f4bb9975f53b32c3f5bb1046f6bb3636123c814ffcbb3823053c2df57f3c044a653cb970783c5b8350bc640c523c69ef51bc287752bc6888823c6ad458bc453b683cbea28c3b6f4c713b34ea863b8b7b53bb2d93553b357f55bb8d4156bb7b90903b2f575fbb9c9e753bc84eff3a0da6dc3a654cf53abe93c3ba6017c53a8146c5ba75eac5ba791d033b8598cdbac159e03aae85b53bac1ba13bcfbcaf3b537c91bbbe90923b738e92bbb1f592bbd077b93b6ec297bb3e58a33b87501f3cb6d40e3c6eac1a3c883102bc9ee8023c2e1403bc2d6a03bcc277223c125e07bc85a6103cd5d0083bc9e0e03ab399013ba624bfbafed3c13aa94bc1ba4115c2babce20d3b80fbcbbae5eae53afb5ba43be597913b5e089f3bbf9383bb1049843bb68e84bb90ee84bb3cffa73b2e5389bbfda4933b179aa33a42f6883af9d29b3a770d6cbab2c86e3a40886eba06716fba0204a93aa9c47aba50c48b3aa36a8f3bb587813bae828b3b4fa66dbb24e36e3b5e266fbb98b76fbb1b11923bf86876bb2e11833b96ee2f3c94091d3c449c2a3c51650ebc19610f3c82680fbce5c90fbca08c333c125014bc4a1f1f3c5ca60b3bfadaf23aca5b063b34bbd8ba7060da3aab7bdaba3e23dbba8f520f3bf714e3ba5eb9f63a2a2f903b0d7a783be7738a3ba43f5bbbb6075d3b373d5dbb5cfe5dbb0322943b5ff966bb85c47c3b990a913bcd16773b30c98a3b735b58bbcb6e5a3b48665abbdc295bbb8263953b1a7264bbf3a27b3bb9ebdc3b69fac43bd71bd63b6d06b3bb0f10b43b4344b4bb2fbcb4bbd994e13b284ebabbc896c73b937b8e3be5cb7d3b7e0f8a3bc40e66bb61a7673b78af67bb814b68bbb880913b8d986fbb3e9a803bbd82933b85e67f3bb4e08d3b1c5b63bbc122653b634a65bb860566bb0565973b74c36ebb960c823bf9af663cda15573c3c51623cc3d64abc74974b3c68b24bbcaa034cbc5fa5693c25d54fbcbfd2583cf5dda83b58bc943b8e1ba33b2d9485bb95b6863bf99886bb5af986bb6ad2ac3b28918bbb45ea963baeeac43b67b9af3bb7ddbe3b0ef19fbbc7dea03b2c07a1bbcc6fa1bb4d11c93b3851a6bb4306b23bb2ad693c4c23503c2068623c0bc53cbc14263e3c54153ebc07923ebc9ba76e3c817a44bc7beb523c0f1a6b3b1fbd503bb5a3633b40ed3cbbc6ed3d3be0503ebb8dd83ebb7330703b7f1045bb12a1533b9682aa3b6b31913b8d42a33bcc4c7cbb2c567f3bffd67ebbb0c97fbb557daf3b76a185bb80ed933b0d7c983b1114863b1f3e933ba97670bb1047723bfa5a72bb9c1073bbb0129c3b6d927bbbba14883bdb484b3c428b383c5d01463c970b2abcccf72a3c9e0d2bbca16e2bbc42df4e3c43ee2fbc309d3a3c7cd1313cce1d223c74592d3c105f16bc3dfc163c1e3117bcd78017bcbfe0343c512c1bbc68d4233cddbc333b078d183b8aec2b3bfa4b04bb46eb053b9fa305bb412306bb131e393b89370cbbed781b3bdad40b3b0847f73a8d2a073bfd6ddfba0114e13a8d08e1bad0a0e1ba650d0f3b58dfe8ba39c0fa3ae3e9db3b3c0ec33b4ee4d43b0903b0bb603cb13bf053b1bb30d3b1bb4cb1e03b99b7b7bbdec9c53b54aad23b24f9b23b0090c93bf63b9bbb9b369d3bc4cc9cbbb35f9dbb86eed83b0778a4bb9361b63b2152853aecfa6e3a2868813a35ca59baf02c5b3a05405bba6acb5bbacdfc873a365662bac004723aa9313e3ba828263b1b4e373b281314bb9b85153be24715bb05ba15bb1def423b852b1bbb21c0283bf8c3f83aa162c73a3c44ea3aa44fa4baef5aa73ad28ca6ba715ea7ba6d72013b56a4b1baeb8acc3ac878233b5b850a3beb371c3b86e4f0ba6aa9f33a8846f3baad29f4ba9d82283ba8fcfeba172a0d3b8187bd3be5b8aa3ba939b83bcb609cbb4e259d3bea629dbb00c59dbb3623c13b1b45a2bba9cbac3b8881d23be64bba3b0ca5cb3b30cea7bba012a93bbb12a9bbe88ca9bb6930d73bf33bafbb28f2bc3b6899723bfa6b573bccde6a3b85fc42bb0037443b796544bb6ced44bbafe2773b44414bbbca625a3be591193beabc013bbb9e123b0baee1bab7dee33a19eee3ba19c7e4ba7a671e3b1c03efbad440043bb55b1d3c687a0b3c2549183c05dcfbbb868ffd3b46bafdbbda6efebb4ad2203ca16703bc096f0d3c31a1223c3a30113c96a71d3c780904bcdeef043cfcee04bcea4405bc6d0a263cec4b09bc5c15133c18f9233cfae2133c2b701f3c6a7107bcfa43083ccb4d08bc4fa008bc4e0e273c39790cbc4aa9153c0d4d083b2fe5ed3aa856033bda20d4ba42bcd53ad2e3d5ba2c8ed6ba39b50b3bb27cdebacba6f13aaa93fd3b6a09da3ba172f33b3206bfbbac23c13b1cd6c0bbef81c1bb6241023cdcadc9bb51e6dd3b8974203cb4ca0e3c97731b3c516d01bcc330023ca55b02bcafb602bcbbde233c9be006bc23ba103c9f16883c91fc6c3c300d833c6f7052bc089b543c773354bcc2d854bc508c8b3ccecf5cbcb6c9703c01fa873b3652723b48c5833b01f55bbb75475d3b24815dbb1d165ebb87d98a3b590665bba58f753b80849e3b8128913bb9cb9a3bc8ae86bbf738873be36d87bbb8b587bb2e07a13b6a068bbb7ba7923bcf19c93a06faae3ad993c13af0cb9bbabe179d3a17169dba8a909dbabc4ace3a0566a3ba57c7b13a6b60eb3b4ebdd03b1fd4e33bbd61bcbb94c5bd3b2cc7bdbb5a4dbebb4e87f03ba58fc4bbd2a7d33bd271983afba16e3a68938e3ab76141ba7338453af03744ba863e45baa2689f3a515f52baf66a753ad176a93ca6459a3c3731a53cc75b8ebc603a8f3ce22d8fbc397c8fbc885dac3cb52593bccef39b3c00b3203c1836123c61a01c3c30f306bcd1a7073ca8bc07bcd60808bcb876233c318a0bbcd6d0133c20f9ec3b3756c43b7f3de13b636fa6bb34dda83b6564a8bb7e19a9bb3414f53bc403b2bb57acc83b8102813b465a643b8d90793b281d4ebb30814f3b5fa34fbb813550bbe0e7833bab0f57bb2394673bdaf8543c9761423cc5be4f3ccc3034bcccd6343c503435bc3e9935bca485583ca5193abc5570443ce20d143b7245013b97a50e3b7dd1e6ba17cde83a63ace8ba985be9baabc8173b36c0f1ba7e49033b7fb3c73b4e0eaa3b3830bf3bacf693bb34b3953b8b6d95bbe5f695bb548fcd3b079a9cbba73ead3b3207983b05188a3b1916943b6fe77ebb1817803b123180bb157880bbbfb69a3b87c683bb29a08b3b1cab003b6a1cd53a1c67f43a46dbb5ba9011b83ac0e8b7ba27adb8bae02c053bff0ac2baa4c1d93addaf9c3b8c63843b92a6953b743665bbbbfc673b548f67bb686d68bb578fa13b5b1573bbfef9863b964d2e3c17601b3c79f4283cf1d40cbcb4ca0d3cbfd50dbcfe350ebc36f2313c65b312bcb5741d3c4bb3aa3b5306953b3f7aa43b701c85bb1914863bf13186bb729a86bbf3fbae3b5d7e8bbbe25b973b4760e13bff39cd3b98bbdb3b895dbdbb0d6ebe3b9878bebba7e2bebb9f32e53be2cec3bb8377cf3b02dc023b5624da3a6302f93ac7b5baba0a50bd3a23bfbcbad97cbdba2048073b70c9c6ba38bcde3a9c223f3c472f243cea6d373c06f20fbc8b79113cec4c11bcf9cc11bc696c443cafec17bca119273c6b62793a2a3b3f3a7d20683a61b416ba458d1a3a4f3919ba33211aba74c3823aa8c625bac437453a3c12f83aee5dd23a2e24ed3a22b7b6baf603b93a9286b8ba1e32b9ba10a4ff3a736cc1ba7f60d63a5831ea3b6afcd13be65de33b5d6abfbbad87c03bceb5c0bb5c33c1bbf7d5ee3bf2fbc6bb4ea7d43be8e10c3b3ba2fb3a4e92083b2f3de5bab49ce63a01c6e6ba7e59e7ba5cd80f3b5b3feeba1ee5fe3a21776d3bbca24d3bbf61643b42ca35bb0760373bc86a37bbd50838bb46b1733b715a3fbb0017513bdfe8463b3e822f3b8a3f403b8afe1dbb85071f3b6e331fbbaea71fbbb3774b3bbe1325bbb20e323bb213513b8c603a3be4a74a3b0f1729bb051c2a3b984b2abb35c12abb3274553b3e2430bbccde3c3b4323ff3bff2ee03b975af63bee0ec9bbc03cca3b30adcabbcf4ccbbbb391023cc88bd2bbc690e33b853a913b1cf4823b67248d3b9abc70bbdff7713bbf3172bb54be72bba809943b824979bbbd7f843b312b3c3c9ee6273cb76c363ca36d18bcf173193ced7d19bcdbe319bca516403c3ea81ebc0b1f2a3cd0de4a3b196e373b755d453baa9d28bb6082293b12a529bbfa072abb7fa04e3be1a12ebb1a90393bc9b46d3b4ba74c3b9d28643b799b34bbf139363b5d3736bb55d036bbda4d743bb41b3ebb492f503b86f9c93a644db03a608bc23afe8b9dba2fe99e3ad5cb9eba44429fba891ecf3a36eba4ba2a0ab33a1dd3223cc21d113c51cc1d3c66a403bc8d8b043cbb9004bcdee804bc8242263c2f0d09bc310d133c2a5e7e3b96aa693b4683783b07c659bb1fc15a3b88e05abb31495bbb102e813b7d3760bb91f16b3b3d041d3b97da063b4286163bb43eeebaf7a1f03a0a4df0bab20ff1ba698c213b9770faba8a2d093bb0869b3cea58873c37d2953ce0186fbccad4713c5d3671bc17f971bce0699f3c3a7d7bbc0a90893c9ab5c73be591b03bda22c13b76e89ebb2733a03ba11ba0bb458ea0bbf233cc3bedf1a5bb4a18b33bb3ba3b3c20bf233c85f3343c811f11bc2c98123ce46212bcf9db12bcbf57403cb38618bcb061263c220d593b92803a3b5b48503b95e123bbe870253b426825bbd4fa25bb7c165f3bbbde2cbbbfca3d3bdb90493946e21239ce3f3939f839dbb82302e23823d5dfb82b7fe1b8011655393be4f6b8f8761839916e7a3b38a74b3ba9a56c3b84252bbb6db02d3b8e382dbbb7f82dbb840a823b368737bb9982503bcafa1c3b18990b3baefd173b768dfdba9d23ff3a014dffba82f4ffba346a203b4fea03bbbf780d3b06c5f83b4d95dd3b681cf13b6ed2c8bbdff6c93b8147cabb41d6cabb0dfafd3bdc57d1bb9094e03bce21c73815fd9638ec97b83839166eb817cb73383c0572b82c7773b8777dd13843e282b84bd49b389126993b45c4863b6ee6933b7ceb71bb7fd5733bacca73bb1f7d74bb68c09c3b4deb7cbbcac2883b3736a23a1933933ad4fb9d3a10be87bab24d883a6b8d88ba48db88bad515a53a6b788cbaefdb943abead1b3c473f0c3cb84b173c738e00bc024c013c295c01bccba801bc42ac1e3cd54305bc22ef0d3c6821a43bc3b98f3b3d4f9e3b1e5b80bbb36f813b4d6681bbd7ca81bba31ea83be17c86bbfaf0913bca21f53bd59fde3be3d0ee3b180ecdbbeb2bce3b0e48cebbccbdcebbcb69f93b2035d4bb351fe13bae9f803b0785643b1105793bb93d4fbb298c503bc9b150bbe53c51bb9473833b34c857bbfc9f673bbb6be63ab39cb43a1dccd73a3d7891ba9b5a943ae0b793ba608894ba51a3f03a56de9eba7acfb93a1c2b823b111f673be7087c3b7a0551bb0972523b108a52bbf41b53bbcc01853ba2eb59bb214f6a3bfb8ea33ba9e18b3be3a99c3b27df75bbb25a783b321878bb3fea78bbfd59a83bcc8581bb81618e3b32d10a3c143bf73b9a84063c93e6dfbb6c7ce13b9480e1bbab19e2bb13bf0d3c5747e9bb7591fa3bc2aa333ce235233c41092f3cbd7316bc1e41173c565717bc1bad17bca2cf363c32a41bbcff07253c4b02843b5cb3683bdd0e7f3b013c51bb12e5523baad152bbe76753bbdb15873bb08c5abb19176c3b35603b3c1c58283cd3f2353ce1141abcadf31a3c3b101bbc3b6f1bbc66183f3ca9d61fbc146a2a3c4f87ca3b7b5bb73b3220c53bd29aa8bbe37ba93b37a2a9bb0605aabb6433ce3b449daebb5a79b93b53f57b3b7a695e3baf84733b5c2548bb83c1493b6da649bb32354abb9fdf803bc8fb50bb029e613be7cf913b120c723b189f8a3b643a4ebb8adf503bfd9550bbf77351bba0cb963bb1315cbbd94e773bb779a83b01788c3b1746a03b5a1871bbbb51743b68aa73bb299a74bbe031ae3b683080bbbf678f3bab8c923bfdd9803b89818d3be41167bb3fd7683ba2e368bb159369bb2201963b5dc071bb94c6823bea72283cb138143c27b1223c46e104bce6fa053cefeb05bca14f06bc7f632c3c91fe0abc756c163ca138eb3b5a58ce3b5d02e33b3348b8bb5bf5b93b87c6b9bba255babbfad5f03bce0bc1bba27ed13bc460e73ba9dbca3b9a48df3b7d4bb5bb76a8b63b66c7b6bb2c57b7bbeee7ec3b1400bebb42f9cd3bce2c043c58f0e53bfc7dfe3b525accbbf61ece3b4e14cebb3cb9cebb0990073c5084d6bb44a7e93bae2be23b8bc2ca3bcc8fdb3b0ed3b8bb66ebb93b6312babb398bbabb3babe63b861ec0bb2b56cd3bfde7d03b3f72b93b963bca3b77d6a7bbcae0a83bbd0da9bb3d83a9bb9378d53b1df8aebb1c01bc3b5e4de83bc959cc3b9351e03b393cb7bb54d2b83b1aa8b8bb1e2fb9bbf7c6ed3bbc96bfbbe961cf3bdaa42d3b08b91d3b9133293b4e3511bb6af3113b2c1712bb516c12bbc0a4303bc55816bb2d801f3b2eb74c3b73c6323bfe47453b9faf1fbba5cf203bc4fd20bb9b7b21bbffd3513bcc5d27bb2193353bb50e953b00d8823bccd98f3bfd926abbf04f6c3b726b6cbbe21b6dbb56a1983b356d75bbf8d0843b0a86f73a390ed43a024bed3a8e1fbabaa5ddbb3a40debbbace85bcba9197fe3a126ac4ba19dad73a6e1aab3b7ae2913b49cea33bfb4a7fbba7e2803bffdc80bb8a5181bb7326b03b14d886bb4e92943b726fa53bc990903b737c9f3b101f81bb79f1813bd23082bb239982bbbe82a93b816787bb22d5923b4b5e8c3b592e7a3b6807883b223563bb6783643b2bcd64bb836765bb5d558f3be18c6cbb1a857d3b0f3a163cbc13033cc4bd103c5cafe9bb15a2eb3b2d9cebbb1155ecbbeffe193c6a04f5bb2325053c1b70003c173ce53b130bf93bc170d0bb2c85d13bd7e5d1bbaf74d2bb071c033c8ef9d8bbab43e83bf20c9b3b682c8b3b1193963b72317ebb1c6a7f3be2e57fbb614680bb39189e3b061484bbabec8c3bb79a9a3bcf7c8a3b3b0b963b21667cbb2de87d3be4197ebb8fbd7ebb88b69d3b1d2e83bbbc418c3bbfc0183ccdbe063c579d133c146cf2bb6143f43b7f43f4bb19f4f4bb53461c3c143dfdbb67b3083cf6c29c3b1c7e883bbcf7963b78f272bb07bc743beb0275bb99cc75bbf8bca03b51157fbb0eb08a3b9265b23b3b1b983beecfaa3bf8b584bb483f863b54fd85bb957586bb84a2b73b75438cbbc4ea9a3bd710183cc721083c648e133c430bf8bbfe79f93bbab9f9bbe15bfabbdf231b3c83f100bc63e1093c91ab103b50a6003b721f0c3bb217e9ba2f90ea3aa4c2eabae961ebba6dc6133baddef2bacd66023bb27b8a3b749a793bd19a863b96f064bbf20e663bd86166bbdfec66bb67218d3b1b616dbb639a7c3b1b3f113cf200ff3b5e340c3cfd6fe4bbf1e5e53b1c49e6bb8efee6bb62b1143cf946efbb5070013c6d65053c39eedd3bedeafd3b173bbcbb0727bf3b0a6fbebb9338bfbbced3093c513cc9bbd2c0e23beb42d43a5a3db63a159ccb3a3631a0ba29bfa13abdaaa1ba0137a2ba3e3bda3ab6e4a8ba0275b93af97e2a3cd9151b3c6832263ce3f90ebc29ab0f3cbbd50fbcec2910bc9c652d3c94f913bcf1ce1c3c96d2503b4552293b6e3f453b07490dbbe8930f3b1c160fbb61bd0fbbf3e5583b1b0118bb0c752d3bf7de323b391b1a3be8be2b3b0bca07bb8821093b810309bb387809bbe9c7373b6a020fbb41c41c3b87cf783b7a15563b40cf6e3be76b3cbb9d503e3b66233ebbc3c73ebbbeb57f3bdf8a46bb3ccf593bfa98343cfea9213c4d3f2f3c9a4013bc3515143cb54114bc0ca314bcee3d383cd32019bcadbe233c70d0783b80e0593ba3cd6f3badbb43bb8d39453b973445bb0ec045bb2a157f3b9e6f4cbb2a265d3b3526bd3bf469a43be70ab63b580c92bb6a5e933b174893bb29bd93bb960bc23b724f99bb7c13a73beb88023ca7d5e73b13cffc3bbc94d1bbcbe4d23b2721d3bbcbb7d3bb4e59053c22a5dabb820beb3b9a31cd3b707fbb3bdd2bc83b201eaebb86d7ae3bdd0dafbb4968afbb4ba0d03beb96b3bbe86fbd3b994d763bdb81523ba2f56b3be17638bb943d3a3bb6343abb80da3abbcb747d3b97bf42bb0054563be3eb883b04a06d3b86bb833bea7f52bbfb8d543b1a5154bb4afe54bbdc7c8c3b78325dbbea88713bfeedf23b2f9fd53b3b93ea3b6c5bbfbb9a10c13b1adbc0bbc368c1bbaaa7f83ba828c8bb34ced83b7b7cfd3bf225e43b504ef63bacfcd0bb1c0ad23b1354d2bb79d7d2bbde31013c40d5d8bbcbece63b8f2c343c4407223c950a2f3cd50214bc5609153c46f814bcd55315bc76ac373ca49e19bc0105243c6d11a83b18eb953baff5a23bfb2588bb76ca883be11f89bbd87f89bb8989ab3bdada8dbb2dec973b449eba3b2c13a63b09c0b43b8ace96bbedb4973b3ddb97bb4f4098bbf3a4be3b57f89cbb804da83b9f7b103c7e0bf73b1e6e0a3c865bd8bb9598da3b9e65dabb7526dbbb95aa143c6966e4bbd085fb3b57e24b3c6002383c2045463c7c9e28bc21bd293c04ac29bc1c102abc1db54f3cf1c62ebcbf313a3c72287b3bf9d3643b37e7743b708153bb616b543bc4bb54bba63255bb47647f3bf4a85abb1a50673b5c6e953b4409823bc0d78f3b397467bb2d8a693bd95d69bb93136abba749993b7aba72bbaf1d843bbe019e3bba9a8e3b84aa993b09d982bb3477833ba3ac83bb64fc83bb10f5a03b62ad87bbf54d903bc989f73a6428cc3a34dbea3a8d1badba7ca8af3a221eafbad3dcafba152f003b310cb9ba29b8d03a35160b3b12aef53a875e063be64bdeba14ccdf3a35dddfbafc71e0ba1f5c0e3bea8ce7ba3d22f93a3d4a6e3b2820563b2277673bc8cd43bb5bc1443b1c1745bbea9345bb96ef723bdc534bbbcac8583b983a2137efb19c3650c805377dcf15b6cdac213682831eb6c0bb21b638f935372f234db6d533ab366d03593b4be3433be90c533b65bd33bb75aa343b73dd34bba94835bb69125d3b20513abbf536463bd7d0f23bf65edb3b7e35ec3b5864c9bb6f68ca3b08a7cabb4421cbbb724ef73b52c1d0bb89f5dd3b83dc083cd9aef43b6abd043c36d8debba30fe03b9f5ce0bb3defe0bb69ad0b3ce5bbe7bb84dbf73bf902a53a59c38b3a99a59d3aed8d73ba8313763ae2ed75badecf76ba6023aa3a2ed080bad86c8e3ab88a1c3c3e010e3c936d183c2bcc02bc7d96033cf29003bc34da03bcd0581f3cc64a07bcd7990f3c333f233cffe0113ce5641e3c786b04bc8519053cf06005bcffbf05bc5b86263c19030abc9fd0133c71e98f3b87187f3b92368b3b97cc66bb3c7b683bcb6f68bbc40a69bba225933bf56e70bbc54f813b5100b83a7cdaa13a74a6b13ae07a91baee83923a769792ba0e0193ba915ebc3a300598ba733ea43a2c0a983c6ab8883cc0b5933cb3c079bcc7757b3c125f7bbc8ef77bbc1dfd9a3cb29c81bc8e678a3c2f24bb3b8871a63b5349b53b60ad96bb79a2973b4fc597bb152f98bbbf21bf3b1c149dbb2db7a83bd399693b24ae4c3b8061613bb39036bb1d44383bd90e38bb749d38bb55376f3b94543fbb11d64f3b78d02d3c9dad193cfa14283c97520abcf77d0b3cba5b0bbca1bd0bbc8dbc313cbd6610bc30df1b3c1130953ab5a1743a5a4c8d3a141d4fba97b5513a509151ba527b52ba56b39a3aa4b65dba4e3f7a3a1815d53be660bd3b4c59ce3bc17dabbbe797ac3b89b9acbbb630adbba3afd93bceb8b2bb36f7bf3b0774763b60a2573bee9b6d3b179c40bb2a5a423b5d2642bb2ab842bb7e8a7c3be5ac49bb87f45a3bcb3e003ce8d8e33bd067f83bb40ecebb7c48cf3b7f94cfbb2729d0bb0aff023c20f8d6bb79ffe63bd523483b8e7b213bd4ce3c3b234a06bb2165083b760a08bb8fae08bb050c503ba5b910bbbe85253bab943f3c39fe273cd5e3383cccf415bc3f40173c8b2e17bccca117bc7126443c8e241dbc70922a3cf90d343cfd3c203c75742e3c985911bc2b05123ca16712bc3ed012bca6de373c338517bc856a223c681e903b85777e3bc84f8b3b2b1365bb15ae663b63d166bba07967bbb568933bce4d6fbb7412813b0336903b39fe7d3b624d8b3b16eb63bb3bd7653bf7ad65bbe75466bb5693933b90426ebbd4dd803bdeddf13aafa1d43a6579e93a42d7beba946ec03a6b4dc0ba8fd7c0ba75a4f73a5f71c7ba88c8d73a184e273b0b7f133b459b213bf10105bb4fe0053b67fe05bbf75c06bb283c2b3b13d00abbfa9f153be8a2c43b5309a73be91fbc3bf5e190bb8bbf923b515792bb8bdf92bb4180ca3b6a7c99bb5337aa3bce7a8d3b3410763bcd34883b4b475abb92605c3bab245cbbabd55cbb0419913bb33f65bb100f7a3bc391053c7201f33b9e30023c0061e0bb3e49e13b76b4e1bbb535e2bb54db073c6e1be8bbc1b0f53bc4f1023c2400e93becbcfd3bd8c9d2bb5b38d43bb953d4bbf1e8d4bb73b7053c43cadbbb712eec3baf5a4d3c69d6383cda91473c34f928bc74082a3cdc112abc3c7b2abce049513c50612fbc66193b3cf3d78b3b3caf703b8d2b863bc4a254bb9eab563ba27c56bb002c57bbf5c88f3b12975fbbd5cf743bb7a2e53ae107ce3a13d5de3a8fb6bcba63c1bd3a0fe6bdba1458beba0957ea3acfacc3bab290d03a0579a73b1458943ba403a23b5a0586bb88ef863b430087bb4d5e87bb7437ab3b43c68bbb9d6b963b3238f73b3f88dd3b12f4ef3bfad5c9bb4f24cb3b4731cbbb4bb3cbbb1f2cfc3b5dc6d1bb255ae03b5e37bc3b87e5a43b3e8fb53bff7093bb399b943b87a094bb831195bb2bc8c03bf4689abbc86ca73b7786873bea096c3b616f823b6db152bb319b543b495d54bb0dfb54bb760f8b3be9925cbbaebf6f3b8167933bc79c843bd8338f3b07d472bb9c32743bb46074bbd4f574bb3347963b08e67bbb8b3a863b735f753b7e10503b75946a3b4ff434bb50dc363bfec236bb1b7037bbedd67c3b16a33fbb9c0a543b9f3a123c0027053c47930e3c77b4f5bb63fff63b5c27f7bb00b4f7bbf0b2143c0c24febb4e9c063c4613203ce9d30b3c985e1a3c5808f8bbeb74fa3be031fabbe902fbbb9af3233cc95702bcc70f0e3cd640d63b651fbf3b70b4cf3b0040adbb089dae3b2477aebbe4eaaebb7bb8da3bb55db4bb3da8c13bb61bfa3b7006db3b7c49f13b1983c3bba602c53b2421c5bb14bec5bbf910003c1400cdbbb26bde3b1430b83bcf0aa33bf42fb23be91d93bb7e0a943ba93894bb07a494bb6449bc3b609799bb5d5aa53b25fdc33a84e3a33a769aba3a8e2c8dbaa7ce8e3af0a78ebae6318fba7d87ca3a82f995babf42a73a705d113ceed2023c034a0d3c77dbeebb1967f03b0871f0bb080af1bbcb20143cbc19f8bbb56f043cb22aef3b2087d33b1f4ce73b48acbebb8115c03bb217c0bbe09ec0bb658df43beb01c7bb5389d63b28f7e13b8b9fce3b128adc3b8888bfbbb875c03ba796c0bbfbfac0bbd8a4e53b6fafc5bb5cc5d03b3621203c41fd0a3c2c1a1a3c0103f6bb6e59f83b0f2cf8bb28faf8bb6942243c575a01bc2c490d3c3d7af33a23c7ce3a50a1e83a8336b5ba513ab73af2dbb6ba6077b7ba5a18fb3a6802bfbaeb94d23ac09feb3b5884ce3bb95ee33b7109b8bb3dddb93b028db9bbf51cbabb1342f13b94eac0bb17b3d13b5bc85d3b9b0f423b2ccd553b6d7c2dbb56062f3b4ddd2ebbd7602fbbf64a633b9f9935bb4b09453bdd8ed73acc5eaa3ada41ca3a9faa8ababc408d3ab9b18cbac06e8dbad5dde03ad6c096bac612af3acfc2cb3a3a65a23ae593bf3ac7b285ba40ce873a908b87bae33688baeb49d43a29b690ba3db2a63a4312343b70871c3b8c4b2d3b33440bbba4650c3beb6e0cbb1bdf0cbb85bf383b172412bbd10d1f3b65459a3b5918883b681b953bd7b774bb1974763bbc9b76bbee5177bb1bcd9d3b3dcd7fbb50148a3bccc0863b0a4f6d3bed2e823b770d55bb2c79563be6bb56bbc75f57bbf4df893b18e95ebbe0d3703bbdfdc2388f1b6a384d01ab38c78909b8d8631138604c0fb8575c11b85a48d43895a62cb8b65579381e24243c03a70f3c334e1e3cda4300bcec45013c2c5001bc16b401bca022283c7b6c06bc7be1113c4f01823bf4e5673bb2f27b3b873353bb0e60543b02a054bb602955bba5c7843b93905bbbefee6a3b4a8fb23bdf45973b3ebbaa3bf9bf82bb017e843b351a84bb319984bb6ef1b73be0b98abb45369a3ba6348c3bc29d7a3b30fc873bd51164bbd256653ba4a465bb293e66bb09168f3b46476dbbc6e17d3b310e963b001b843b8fe6903b50b36dbb1c716f3b487e6fbbc02870bbd69a993bb94078bbeb09863bc086b63bf0ada23b3be5b03bec8693bb1d8f943bf19094bb03f594bbe45eba3b3c9d99bb23daa43be021803c9946663cabec783cb4f651bc6396533cfe5753bc1bdb53bc50a1823ce7085abc0223693ca0270c3c4470ec3b40d8053ca5adcbbb3c7ace3b54d2cdbb2098cebbfe80103cc655d8bbbf24f13b68d3a33b3b4d923b8fe09e3b7cdc84bbc5b6853bf5ca85bb1c2586bb2032a73b4a4f8abb843a943b1179f53a26e9d53af261ec3af6a8beba6f4bc03aed37c0badacbc0ba50befb3a9dd8c7ba4c4cd93ac649fa3b9f54dd3bcefff13baae1c7bb2235c93b4558c9bb92e4c9bba3fbff3b217cd0bb1376e03bf02bbf3b8238a83b53a5b83bf9c796bb8b07983b4af797bb6d6898bb85a2c33b01bc9dbbd5b8aa3baf640a3c33b6f33b78a2053c7b6fdbbb6bffdc3bfd13ddbb0ab1ddbbb8ac0d3cce1be5bbc243f73b43f3813b1e9e693b49757c3bd78b55bb47cd563b34f056bb907657bb757d843b04b05dbb1e806c3b40ec4c3baeed2d3bacf2433bff7917bbe709193bf8f818bb0a8819bbfa22533bff5120bb4b3a313b91b9403c75282d3c942f3b3cb0211ebc192c1f3c7c291fbc478c1fbcfe7f443c072a24bc354e2f3cdf27193b1c4a033bf3d6123b5ec9e6bad5e7e83a31ebe8ba1eb7e9baa4851d3b0a60f3baeb9f053bb615f83bc0cedb3b8712f03b4e64c6bbbbabc73b28e0c7bbef6fc8bba38dfd3b9a15cfbb55e7de3b9d0af63a6d1bd43a4040ec3a6241bbba9601bd3a7deabcba0988bdba1bcefc3aef0dc5baa2bcd73a515a123ce166013cee7a0d3ce3cde9bb697ceb3bc37cebbbfd1becbbcbb5153c80b7f3bb9339033cd77fb13bba6b9e3b071bac3b4aac8fbb50ba903b99af90bb531191bb712cb53b4f9995bbb984a03bb76cc63a0d20a43a285fbc3a4cef8bba17b08d3a86848dba231b8ebae070cd3a3f5695ba6fb8a73a1ec8a13b91c48c3b39c79b3b26497abb153b7c3b9b6c7cbb203c7dbb08e7a53ba46d83bb210b8f3b2934e03bdcbbbd3bdc45d63b9836a4bb993aa63b33e6a5bb3784a6bb620ee73b6a2aaebb2b6dc13b408d6c3bf7604e3b74d7633b134938bbb4c3393b7dc739bb9b583abb0593723b5a1541bbe59b513beab68f3b870a793b512a8a3b14105dbb03085f3b19ed5ebb179d5fbb638d933ba51068bb5b237d3be7edc33bf532a43b8dc4ba3bb9e88cbbbcba8e3b96718ebb37028fbbe541ca3be3fc95bb4b96a73b49e1c03be543a33bae47b83bf7f48dbbf3708f3b415f8fbb87e68fbbb0d8c63bb75496bb0668a63bd9574c3bb252343bb670453b0e7522bb4dc0233b7aa823bbbd1a24bb5519513bf18529bbc0e8363bb29f9e3b0cf27a3b12f5943b2f0f4bbbc54e4f3b701d4ebb47344fbb5a58a53bc8395dbb9ff6803b34c47c3bfda2583b5452723b833f3ebb1530403b60ff3fbb5ca440bbdafe813b7d9548bb807d5c3bbcc7113b5d35fb3ac9fc0b3b682cddbaed6edf3ab32edfba79eedfbaf5c4153b7002e9bab28cff3ae284863b52bc6f3b165d823ba1a959bb86e15a3b1f335bbb3ac85bbb155b893bbfaa62bb97f1723bf3f5573bd1b6393b440f4f3bbda724bb1424263b210626bb468726bb9a305e3b8fcb2cbbd8dd3c3bc9aebb3b22f3a83b3457b63b5fd99abbc3bf9b3b7dd19bbb2f2f9cbb1458bf3b7d87a0bb09fdaa3b6883d83aa485c13aaef9d13a146cb0ba1b3db13a409fb1ba4614b2baacfcdc3ab473b7bae607c43abc67153cb871043c7f97103cbc2befbb77d3f03b52f0f0bba69af1bb52b1183c1189f9bb4c4c063caaa7a73850105a380a749538f7b013b89bcb183851e917b8f07419b8f0dbb438967e2db82d6065389e5f2e3b68b3183b552c283bb4a008bb6c99093b5ebb09bb12270abb8ca1323b281e0fbb2a0c1b3be7541c3c61f80a3ce86a173ca080fbbb4223fd3b1455fdbb3606febb69ae1f3c081c03bcfcdf0c3c69a5413b7a8c223b3199383bd1630cbb60dc0d3b85dc0dbb36690ebb00ef473b571b15bbdcd5253b219e7e3b36fb553b1acd723b162638bb82dc3a3bc5123abbabc53abbae68833b958b43bb6f495a3b4379d23a3b1dbc3a9c28cc3a7eceaabaea0eac3a09feabba1f6facba82c6d63a1bbeb1ba6092be3a18c6af3b21dc973b01eba83b022586bbbf52873b7b5887bb6acc87bb257db43b2e378dbb83709a3b0ba925c078e211c03a0420c0af140340900804c0fe160440fb7504404a8729c022020940230814c06aa9873f0430713f8e21833fb5d81f3fe78b5c3faf43323f756a7a3fb27c8a3fa5e29f3fc5c4743fb4eaee3f
+\ No newline at end of file
+-- 
+2.25.1
+
diff --git a/Fix-indentation-and-numbering-errors.diff b/Fix-indentation-and-numbering-errors.diff
new file mode 100644
index 0000000000000000000000000000000000000000..261d622858a9384c795b890efd09e03ae06df8ce
--- /dev/null
+++ b/Fix-indentation-and-numbering-errors.diff
@@ -0,0 +1,205 @@
+diff --git a/libphobos/libdruntime/Makefile.in b/libphobos/libdruntime/Makefile.in
+index 91cd653623b..b686f5eb492 100644
+--- a/libphobos/libdruntime/Makefile.in
++++ b/libphobos/libdruntime/Makefile.in
+@@ -124,13 +124,13 @@ target_triplet = @target@
+ # CPU specific sources
+ @DRUNTIME_CPU_AARCH64_TRUE@am__append_11 = config/aarch64/switchcontext.S
+ @DRUNTIME_CPU_ARM_TRUE@am__append_12 = config/arm/switchcontext.S
+-@DRUNTIME_CPU_LOONGARCH_TRUE@am__append_13 = config/loongarch/switchcontext.S
+-@DRUNTIME_CPU_MIPS_TRUE@am__append_14 = config/mips/switchcontext.S
+-@DRUNTIME_CPU_POWERPC_TRUE@am__append_15 = config/powerpc/switchcontext.S
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__append_16 = config/mingw/switchcontext.S
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__append_17 = config/x86/switchcontext.S
+-@DRUNTIME_CPU_SYSTEMZ_TRUE@am__append_18 = config/systemz/get_tls_offset.S
+-@DRUNTIME_CPU_S390_TRUE@am__append_19 = config/s390/get_tls_offset.S
++@DRUNTIME_CPU_MIPS_TRUE@am__append_13 = config/mips/switchcontext.S
++@DRUNTIME_CPU_POWERPC_TRUE@am__append_14 = config/powerpc/switchcontext.S
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__append_15 = config/mingw/switchcontext.S
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__append_16 = config/x86/switchcontext.S
++@DRUNTIME_CPU_SYSTEMZ_TRUE@am__append_17 = config/systemz/get_tls_offset.S
++@DRUNTIME_CPU_S390_TRUE@am__append_18 = config/s390/get_tls_offset.S
++@DRUNTIME_CPU_LOONGARCH_TRUE@am__append_19 = config/loongarch/switchcontext.S
+ subdir = libdruntime
+ ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
+ am__aclocal_m4_deps = $(top_srcdir)/../config/acx.m4 \
+@@ -475,14 +475,14 @@ am__objects_22 = core/sys/solaris/dlfcn.lo core/sys/solaris/elf.lo \
+ @DRUNTIME_OS_SOLARIS_TRUE@am__objects_23 = $(am__objects_22)
+ @DRUNTIME_CPU_AARCH64_TRUE@am__objects_24 = config/aarch64/libgdruntime_la-switchcontext.lo
+ @DRUNTIME_CPU_ARM_TRUE@am__objects_25 = config/arm/libgdruntime_la-switchcontext.lo
+-@DRUNTIME_CPU_LOONGARCH_TRUE@am__objects_26 = config/loongarch/libgdruntime_la-switchcontext.lo
+-@DRUNTIME_CPU_MIPS_TRUE@am__objects_27 = config/mips/libgdruntime_la-switchcontext.lo
+-@DRUNTIME_CPU_POWERPC_TRUE@am__objects_28 = config/powerpc/libgdruntime_la-switchcontext.lo
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_29 = config/mingw/libgdruntime_la-switchcontext.lo
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_30 = config/x86/libgdruntime_la-switchcontext.lo
+-@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_31 = config/systemz/libgdruntime_la-get_tls_offset.lo
+-@DRUNTIME_CPU_S390_TRUE@am__objects_32 = config/s390/libgdruntime_la-get_tls_offset.lo
+-am__objects_33 = $(am__objects_6) $(am__objects_8) $(am__objects_10) \
++@DRUNTIME_CPU_MIPS_TRUE@am__objects_26 = config/mips/libgdruntime_la-switchcontext.lo
++@DRUNTIME_CPU_POWERPC_TRUE@am__objects_27 = config/powerpc/libgdruntime_la-switchcontext.lo
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_28 = config/mingw/libgdruntime_la-switchcontext.lo
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_29 = config/x86/libgdruntime_la-switchcontext.lo
++@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_30 = config/systemz/libgdruntime_la-get_tls_offset.lo
++@DRUNTIME_CPU_S390_TRUE@am__objects_31 = config/s390/libgdruntime_la-get_tls_offset.lo
++@DRUNTIME_CPU_LOONGARCH_TRUE@am__objects_32 = config/loongarch/libgdruntime_la-switchcontext.lo
++am__objects_33 = $(am__objects_5) $(am__objects_7) $(am__objects_9) \
+ 	$(am__objects_11) $(am__objects_13) $(am__objects_15) \
+ 	$(am__objects_17) $(am__objects_19) $(am__objects_21) \
+ 	$(am__objects_23) $(am__objects_24) $(am__objects_25) \
+@@ -500,22 +500,22 @@ am__objects_36 = core/stdc/libgdruntime_convenience_la-errno_.lo
+ @DRUNTIME_OS_MINGW_TRUE@	config/mingw/libgdruntime_convenience_la-msvc.lo
+ @DRUNTIME_CPU_AARCH64_TRUE@am__objects_38 = config/aarch64/libgdruntime_convenience_la-switchcontext.lo
+ @DRUNTIME_CPU_ARM_TRUE@am__objects_39 = config/arm/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_LOONGARCH_TRUE@am__objects_40 = config/loongarch/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_MIPS_TRUE@am__objects_41 = config/mips/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_POWERPC_TRUE@am__objects_42 = config/powerpc/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_43 = config/mingw/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_44 = config/x86/libgdruntime_convenience_la-switchcontext.lo
+-@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_45 = config/systemz/libgdruntime_convenience_la-get_tls_offset.lo
+-@DRUNTIME_CPU_S390_TRUE@am__objects_46 = config/s390/libgdruntime_convenience_la-get_tls_offset.lo
++@DRUNTIME_CPU_MIPS_TRUE@am__objects_40 = config/mips/libgdruntime_convenience_la-switchcontext.lo
++@DRUNTIME_CPU_POWERPC_TRUE@am__objects_41 = config/powerpc/libgdruntime_convenience_la-switchcontext.lo
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_TRUE@am__objects_42 = config/mingw/libgdruntime_convenience_la-switchcontext.lo
++@DRUNTIME_CPU_X86_TRUE@@DRUNTIME_OS_MINGW_FALSE@am__objects_43 = config/x86/libgdruntime_convenience_la-switchcontext.lo
++@DRUNTIME_CPU_SYSTEMZ_TRUE@am__objects_44 = config/systemz/libgdruntime_convenience_la-get_tls_offset.lo
++@DRUNTIME_CPU_S390_TRUE@am__objects_45 = config/s390/libgdruntime_convenience_la-get_tls_offset.lo
++@DRUNTIME_CPU_LOONGARCH_TRUE@am__objects_46 = config/loongarch/libgdruntime_convenience_la-switchcontext.lo
+ am__objects_47 = $(am__objects_5) $(am__objects_7) $(am__objects_9) \
+ 	$(am__objects_11) $(am__objects_13) $(am__objects_15) \
+-	$(am__objects_17) $(am__objects_19) $(am__objects_36) \
+-	$(am__objects_23) $(am__objects_37) $(am__objects_38) \
+-	$(am__objects_39) $(am__objects_40) $(am__objects_41) \
+-	$(am__objects_42) $(am__objects_43) $(am__objects_44) \
+-	$(am__objects_45) $(am__objects_46)
+-am__objects_48 = $(am__objects_1) $(am__objects_35) $(am__objects_3) \
+-	$(am__objects_47) $(am__objects_33)
++	$(am__objects_17) $(am__objects_19) $(am__objects_37) \
++	$(am__objects_23) $(am__objects_38) $(am__objects_39) \
++	$(am__objects_40) $(am__objects_41) $(am__objects_42) \
++	$(am__objects_43) $(am__objects_44) $(am__objects_45) \
++	$(am__objects_46)
++am__objects_48 = $(am__objects_1) $(am__objects_36) $(am__objects_3) \
++	$(am__objects_47) $(am__objects_34)
+ am__objects_49 = $(am__objects_48)
+ am_libgdruntime_convenience_la_OBJECTS = $(am__objects_49)
+ libgdruntime_convenience_la_OBJECTS =  \
+@@ -1905,11 +1905,6 @@ config/arm/$(am__dirstamp):
+ 	@: > config/arm/$(am__dirstamp)
+ config/arm/libgdruntime_la-switchcontext.lo:  \
+ 	config/arm/$(am__dirstamp)
+-config/loongarch/$(am__dirstamp):
+-	@$(MKDIR_P) config/loongarch
+-	@: > config/loongarch/$(am__dirstamp)
+-config/loongarch/libgdruntime_la-switchcontext.lo:  \
+-	config/loongarch/$(am__dirstamp)
+ config/mips/$(am__dirstamp):
+ 	@$(MKDIR_P) config/mips
+ 	@: > config/mips/$(am__dirstamp)
+@@ -1937,6 +1932,11 @@ config/s390/$(am__dirstamp):
+ 	@: > config/s390/$(am__dirstamp)
+ config/s390/libgdruntime_la-get_tls_offset.lo:  \
+ 	config/s390/$(am__dirstamp)
++config/loongarch/$(am__dirstamp):
++	@$(MKDIR_P) config/loongarch
++	@: > config/loongarch/$(am__dirstamp)
++config/loongarch/libgdruntime_la-switchcontext.lo:  \
++	config/loongarch/$(am__dirstamp)
+ gcc/config.lo: gcc/$(am__dirstamp)
+ gcc/libbacktrace.lo: gcc/$(am__dirstamp)
+ 
+@@ -1950,8 +1950,6 @@ config/aarch64/libgdruntime_convenience_la-switchcontext.lo:  \
+ 	config/aarch64/$(am__dirstamp)
+ config/arm/libgdruntime_convenience_la-switchcontext.lo:  \
+ 	config/arm/$(am__dirstamp)
+-config/loongarch/libgdruntime_convenience_la-switchcontext.lo:  \
+- config/loongarch/$(am__dirstamp)
+ config/mips/libgdruntime_convenience_la-switchcontext.lo:  \
+ 	config/mips/$(am__dirstamp)
+ config/powerpc/libgdruntime_convenience_la-switchcontext.lo:  \
+@@ -1964,6 +1962,8 @@ config/systemz/libgdruntime_convenience_la-get_tls_offset.lo:  \
+ 	config/systemz/$(am__dirstamp)
+ config/s390/libgdruntime_convenience_la-get_tls_offset.lo:  \
+ 	config/s390/$(am__dirstamp)
++config/loongarch/libgdruntime_convenience_la-switchcontext.lo:  \
++	config/loongarch/$(am__dirstamp)
+ 
+ libgdruntime_convenience.la: $(libgdruntime_convenience_la_OBJECTS) $(libgdruntime_convenience_la_DEPENDENCIES) $(EXTRA_libgdruntime_convenience_la_DEPENDENCIES) 
+ 	$(AM_V_GEN)$(libgdruntime_convenience_la_LINK)  $(libgdruntime_convenience_la_OBJECTS) $(libgdruntime_convenience_la_LIBADD) $(LIBS)
+@@ -1976,14 +1976,14 @@ mostlyclean-compile:
+ 	-rm -f config/arm/*.lo
+ 	-rm -f config/mingw/*.$(OBJEXT)
+ 	-rm -f config/mingw/*.lo
+-	-rm -f config/loongarch/*.$(OBJEXT)
+-	-rm -f config/loongarch/*.lo
+ 	-rm -f config/mips/*.$(OBJEXT)
+ 	-rm -f config/mips/*.lo
+ 	-rm -f config/powerpc/*.$(OBJEXT)
+ 	-rm -f config/powerpc/*.lo
+ 	-rm -f config/s390/*.$(OBJEXT)
+ 	-rm -f config/s390/*.lo
++	-rm -f config/loongarch/*.$(OBJEXT)
++	-rm -f config/loongarch/*.lo
+ 	-rm -f config/systemz/*.$(OBJEXT)
+ 	-rm -f config/systemz/*.lo
+ 	-rm -f config/x86/*.$(OBJEXT)
+@@ -2101,10 +2101,7 @@ config/aarch64/libgdruntime_la-switchcontext.lo: config/aarch64/switchcontext.S
+ config/arm/libgdruntime_la-switchcontext.lo: config/arm/switchcontext.S
+ 	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/arm/libgdruntime_la-switchcontext.lo `test -f 'config/arm/switchcontext.S' || echo '$(srcdir)/'`config/arm/switchcontext.S
+ 
+-config/loongarch/libgdruntime_la-switchcontext.lo: config/loongarch/switchcontext.S
+- $(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS)
+-
+-onfig/mips/libgdruntime_la-switchcontext.lo: config/mips/switchcontext.S
++config/mips/libgdruntime_la-switchcontext.lo: config/mips/switchcontext.S
+ 	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/mips/libgdruntime_la-switchcontext.lo `test -f 'config/mips/switchcontext.S' || echo '$(srcdir)/'`config/mips/switchcontext.S
+ 
+ config/powerpc/libgdruntime_la-switchcontext.lo: config/powerpc/switchcontext.S
+@@ -2122,18 +2119,21 @@ config/systemz/libgdruntime_la-get_tls_offset.lo: config/systemz/get_tls_offset.
+ config/s390/libgdruntime_la-get_tls_offset.lo: config/s390/get_tls_offset.S
+ 	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/s390/libgdruntime_la-get_tls_offset.lo `test -f 'config/s390/get_tls_offset.S' || echo '$(srcdir)/'`config/s390/get_tls_offset.S
+ 
++config/loongarch/libgdruntime_la-switchcontext.lo: config/loongarch/switchcontext.S
++	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/loongarch/libgdruntime_la-switchcontext.lo `test -f 'config/loongarch/switchcontext.S' || echo '$(srcdir)/'`config/loongarch/switchcontext.S
++
+ config/aarch64/libgdruntime_convenience_la-switchcontext.lo: config/aarch64/switchcontext.S
+ 	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_convenience_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/aarch64/libgdruntime_convenience_la-switchcontext.lo `test -f 'config/aarch64/switchcontext.S' || echo '$(srcdir)/'`config/aarch64/switchcontext.S
+ 
+ config/arm/libgdruntime_convenience_la-switchcontext.lo: config/arm/switchcontext.S
+ 	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_convenience_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/arm/libgdruntime_convenience_la-switchcontext.lo `test -f 'config/arm/switchcontext.S' || echo '$(srcdir)/'`config/arm/switchcontext.S
+ 
+-config/loongarch/libgdruntime_convenience_la-switchcontext.lo: config/loongarch/switchcontext.S
+- $(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_convenience_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM
+-
+ config/mips/libgdruntime_convenience_la-switchcontext.lo: config/mips/switchcontext.S
+ 	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_convenience_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/mips/libgdruntime_convenience_la-switchcontext.lo `test -f 'config/mips/switchcontext.S' || echo '$(srcdir)/'`config/mips/switchcontext.S
+ 
++config/loongarch/libgdruntime_convenience_la-switchcontext.lo: config/loongarch/switchcontext.S
++	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_convenience_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/loongarch/libgdruntime_convenience_la-switchcontext.lo `test -f 'config/loongarch/switchcontext.S' || echo '$(srcdir)/'`config/loongarch/switchcontext.S
++
+ config/powerpc/libgdruntime_convenience_la-switchcontext.lo: config/powerpc/switchcontext.S
+ 	$(AM_V_CPPAS)$(LIBTOOL) $(AM_V_lt) $(libgdruntime_convenience_la_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=compile $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CCASFLAGS) $(CCASFLAGS) -c -o config/powerpc/libgdruntime_convenience_la-switchcontext.lo `test -f 'config/powerpc/switchcontext.S' || echo '$(srcdir)/'`config/powerpc/switchcontext.S
+ 
+@@ -2178,10 +2178,10 @@ clean-libtool:
+ 	-rm -rf config/aarch64/.libs config/aarch64/_libs
+ 	-rm -rf config/arm/.libs config/arm/_libs
+ 	-rm -rf config/mingw/.libs config/mingw/_libs
+-	-rm -rf config/loongarch/.libs config/loongarch/_libs
+ 	-rm -rf config/mips/.libs config/mips/_libs
+ 	-rm -rf config/powerpc/.libs config/powerpc/_libs
+ 	-rm -rf config/s390/.libs config/s390/_libs
++	-rm -rf config/loongarch/.libs config/loongarch/_libs
+ 	-rm -rf config/systemz/.libs config/systemz/_libs
+ 	-rm -rf config/x86/.libs config/x86/_libs
+ 	-rm -rf core/.libs core/_libs
+@@ -2340,10 +2340,10 @@ distclean-generic:
+ 	-rm -f config/aarch64/$(am__dirstamp)
+ 	-rm -f config/arm/$(am__dirstamp)
+ 	-rm -f config/mingw/$(am__dirstamp)
+-	-rm -f config/loongarch/$(am__dirstamp)
+ 	-rm -f config/mips/$(am__dirstamp)
+ 	-rm -f config/powerpc/$(am__dirstamp)
+ 	-rm -f config/s390/$(am__dirstamp)
++	-rm -f config/loongarch/$(am__dirstamp)
+ 	-rm -f config/systemz/$(am__dirstamp)
+ 	-rm -f config/x86/$(am__dirstamp)
+ 	-rm -f core/$(am__dirstamp)
diff --git a/Libvtv-Add-loongarch-support.patch b/Libvtv-Add-loongarch-support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0350e5a56a900d5ac059e1b01dd9eed0636e81e4
--- /dev/null
+++ b/Libvtv-Add-loongarch-support.patch
@@ -0,0 +1,59 @@
+From 62ea18c632200edbbf46b4e957bc4d997f1c66f0 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Tue, 27 Sep 2022 15:28:43 +0800
+Subject: [PATCH 024/124] Libvtv: Add loongarch support.
+
+The loongarch64 specification permits page sizes of 4KiB, 16KiB and 64KiB,
+but only 16KiB pages are supported for now.
+
+Co-Authored-By: qijingwen 
+
+include/ChangeLog:
+
+	* vtv-change-permission.h (defined): Determines whether the macro
+	__loongarch_lp64 is defined
+	(VTV_PAGE_SIZE): Set VTV_PAGE_SIZE to 16KiB for loongarch64.
+
+libvtv/ChangeLog:
+
+	* configure.tgt: Add loongarch support.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ include/vtv-change-permission.h | 4 ++++
+ libvtv/configure.tgt            | 3 +++
+ 2 files changed, 7 insertions(+)
+
+diff --git a/include/vtv-change-permission.h b/include/vtv-change-permission.h
+index 70bdad92b..e7b9294a0 100644
+--- a/include/vtv-change-permission.h
++++ b/include/vtv-change-permission.h
+@@ -48,6 +48,10 @@ extern void __VLTChangePermission (int);
+ #else 
+ #if defined(__sun__) && defined(__svr4__) && defined(__sparc__)
+ #define VTV_PAGE_SIZE 8192
++#elif defined(__loongarch_lp64)
++/* The page size is configurable by the kernel to be 4, 16 or 64 KiB.
++   For now, only the default page size of 16KiB is supported.  */
++#define VTV_PAGE_SIZE 16384
+ #else
+ #define VTV_PAGE_SIZE 4096
+ #endif
+diff --git a/libvtv/configure.tgt b/libvtv/configure.tgt
+index aa2a3f675..6cdd1e97a 100644
+--- a/libvtv/configure.tgt
++++ b/libvtv/configure.tgt
+@@ -50,6 +50,9 @@ case "${target}" in
+ 	;;
+   x86_64-*-darwin[1]* | i?86-*-darwin[1]*)
+ 	;;
++  loongarch*-*-linux*)
++	VTV_SUPPORTED=yes
++	;;
+   *)
+ 	;;
+ esac
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-LA664-support.patch b/LoongArch-Add-LA664-support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8e2674d422ce31ea32c4bc5ffdb4098dd43e8c12
--- /dev/null
+++ b/LoongArch-Add-LA664-support.patch
@@ -0,0 +1,332 @@
+From c68463abbab98aa7f5a9b91e71ed6f6834c723df Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 16 Nov 2023 20:43:53 +0800
+Subject: [PATCH] LoongArch: Add LA664 support.
+
+Define ISA_BASE_LA64V110, which represents the base instruction set defined in LoongArch1.1.
+Support the configure setting --with-arch =la664, and support -march=la664,-mtune=la664.
+
+gcc/ChangeLog:
+
+	* config.gcc: Support LA664.
+	* config/loongarch/genopts/loongarch-strings: Likewise.
+	* config/loongarch/genopts/loongarch.opt.in: Likewise.
+	* config/loongarch/loongarch-cpu.cc (fill_native_cpu_config): Likewise.
+	* config/loongarch/loongarch-def.c: Likewise.
+	* config/loongarch/loongarch-def.h (N_ISA_BASE_TYPES): Likewise.
+	(ISA_BASE_LA64V110): Define macro.
+	(N_ARCH_TYPES): Update value.
+	(N_TUNE_TYPES): Update value.
+	(CPU_LA664): New macro.
+	* config/loongarch/loongarch-opts.cc (isa_default_abi): Likewise.
+	(isa_base_compat_p): Likewise.
+	* config/loongarch/loongarch-opts.h (TARGET_64BIT): This parameter is enabled
+	when la_target.isa.base is equal to ISA_BASE_LA64V100 or ISA_BASE_LA64V110.
+	(TARGET_uARCH_LA664): Define macro.
+	* config/loongarch/loongarch-str.h (STR_CPU_LA664): Likewise.
+	* config/loongarch/loongarch.cc (loongarch_cpu_sched_reassociation_width):
+	Add LA664 support.
+	* config/loongarch/loongarch.opt: Regenerate.
+
+Signed-off-by: ticat_fp 
+---
+ gcc/config.gcc                                | 10 ++++-----
+ .../loongarch/genopts/loongarch-strings       |  1 +
+ gcc/config/loongarch/genopts/loongarch.opt.in |  3 +++
+ gcc/config/loongarch/loongarch-cpu.cc         |  4 ++++
+ gcc/config/loongarch/loongarch-def.c          | 21 +++++++++++++++++++
+ gcc/config/loongarch/loongarch-def.h          |  8 ++++---
+ gcc/config/loongarch/loongarch-opts.cc        |  8 +++----
+ gcc/config/loongarch/loongarch-opts.h         |  4 +++-
+ gcc/config/loongarch/loongarch-str.h          |  1 +
+ gcc/config/loongarch/loongarch.cc             |  1 +
+ gcc/config/loongarch/loongarch.opt            |  3 +++
+ 11 files changed, 51 insertions(+), 13 deletions(-)
+
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 6d51bd93f3f..b88591b6fd8 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -5039,7 +5039,7 @@ case "${target}" in
+ 
+ 		# Perform initial sanity checks on --with-* options.
+ 		case ${with_arch} in
+-		"" | abi-default | loongarch64 | la464) ;; # OK, append here.
++		"" | abi-default | loongarch64 | la[46]64) ;; # OK, append here.
+ 		native)
+ 			if test x${host} != x${target}; then
+ 				echo "--with-arch=native is illegal for cross-compiler." 1>&2
+@@ -5088,7 +5088,7 @@ case "${target}" in
+ 		case ${abi_base}/${abi_ext} in
+ 		lp64*/base)
+ 			# architectures that support lp64* ABI
+-			arch_pattern="native|abi-default|loongarch64|la464"
++			arch_pattern="native|abi-default|loongarch64|la[46]64"
+ 			# default architecture for lp64* ABI
+ 			arch_default="abi-default"
+ 			;;
+@@ -5163,7 +5163,7 @@ case "${target}" in
+ 		# Check default with_tune configuration using with_arch.
+ 		case ${with_arch} in
+ 		loongarch64)
+-			tune_pattern="native|abi-default|loongarch64|la464"
++			tune_pattern="native|abi-default|loongarch64|la[46]64"
+ 			;;
+ 		*)
+ 			# By default, $with_tune == $with_arch
+@@ -5219,7 +5219,7 @@ case "${target}" in
+ 					# Fixed: use the default gcc configuration for all multilib
+ 					# builds by default.
+ 					with_multilib_default="" ;;
+-				arch,native|arch,loongarch64|arch,la464) # OK, append here.
++				arch,native|arch,loongarch64|arch,la[46]64) # OK, append here.
+ 					with_multilib_default="/march=${component}" ;;
+ 				arch,*)
+ 					with_multilib_default="/march=abi-default"
+@@ -5307,7 +5307,7 @@ case "${target}" in
+ 				if test x${parse_state} = x"arch"; then
+ 					# -march option
+ 					case ${component} in
+-					native | abi-default | loongarch64 | la464) # OK, append here.
++					native | abi-default | loongarch64 | la[46]64) # OK, append here.
+ 						# Append -march spec for each multilib variant.
+ 						loongarch_multilib_list_make="${loongarch_multilib_list_make}/march=${component}"
+ 						parse_state="opts"
+diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings
+index 8e412f7536e..7bc4824007e 100644
+--- a/gcc/config/loongarch/genopts/loongarch-strings
++++ b/gcc/config/loongarch/genopts/loongarch-strings
+@@ -26,6 +26,7 @@ STR_CPU_NATIVE	      native
+ STR_CPU_ABI_DEFAULT   abi-default
+ STR_CPU_LOONGARCH64   loongarch64
+ STR_CPU_LA464	      la464
++STR_CPU_LA664	      la664
+ 
+ # Base architecture
+ STR_ISA_BASE_LA64V100 la64
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index 158701d327a..00b4733d75b 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -107,6 +107,9 @@ Enum(cpu_type) String(@@STR_CPU_LOONGARCH64@@) Value(CPU_LOONGARCH64)
+ EnumValue
+ Enum(cpu_type) String(@@STR_CPU_LA464@@) Value(CPU_LA464)
+ 
++EnumValue
++Enum(cpu_type) String(@@STR_CPU_LA664@@) Value(CPU_LA664)
++
+ m@@OPTSTR_ARCH@@=
+ Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET)
+ -m@@OPTSTR_ARCH@@=PROCESSOR	Generate code for the given PROCESSOR ISA.
+diff --git a/gcc/config/loongarch/loongarch-cpu.cc b/gcc/config/loongarch/loongarch-cpu.cc
+index 7a2866f60f9..f3a13414143 100644
+--- a/gcc/config/loongarch/loongarch-cpu.cc
++++ b/gcc/config/loongarch/loongarch-cpu.cc
+@@ -106,6 +106,10 @@ fill_native_cpu_config (struct loongarch_target *tgt)
+       native_cpu_type = CPU_LA464;
+       break;
+ 
++    case 0x0014d000:   /* LA664 */
++      native_cpu_type = CPU_LA664;
++      break;
++
+     default:
+       /* Unknown PRID.  */
+       if (tune_native_p)
+diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c
+index 430ef8b2d95..067629141b6 100644
+--- a/gcc/config/loongarch/loongarch-def.c
++++ b/gcc/config/loongarch/loongarch-def.c
+@@ -28,6 +28,7 @@ loongarch_cpu_strings[N_TUNE_TYPES] = {
+   [CPU_ABI_DEFAULT]	  = STR_CPU_ABI_DEFAULT,
+   [CPU_LOONGARCH64]	  = STR_CPU_LOONGARCH64,
+   [CPU_LA464]		  = STR_CPU_LA464,
++  [CPU_LA664]		  = STR_CPU_LA664,
+ };
+ 
+ struct loongarch_isa
+@@ -42,6 +43,11 @@ loongarch_cpu_default_isa[N_ARCH_TYPES] = {
+       .fpu = ISA_EXT_FPU64,
+       .simd = ISA_EXT_SIMD_LASX,
+   },
++  [CPU_LA664] = {
++      .base = ISA_BASE_LA64V110,
++      .fpu = ISA_EXT_FPU64,
++      .simd = ISA_EXT_SIMD_LASX,
++  },
+ };
+ 
+ struct loongarch_cache
+@@ -58,6 +64,12 @@ loongarch_cpu_cache[N_TUNE_TYPES] = {
+       .l2d_size = 256,
+       .simultaneous_prefetches = 4,
+   },
++  [CPU_LA664] = {
++      .l1d_line_size = 64,
++      .l1d_size = 64,
++      .l2d_size = 256,
++      .simultaneous_prefetches = 4,
++  },
+ };
+ 
+ struct loongarch_align
+@@ -70,6 +82,10 @@ loongarch_cpu_align[N_TUNE_TYPES] = {
+     .function = "32",
+     .label = "16",
+   },
++  [CPU_LA664] = {
++    .function = "32",
++    .label = "16",
++  },
+ };
+ 
+ 
+@@ -104,6 +120,9 @@ loongarch_cpu_rtx_cost_data[N_TUNE_TYPES] = {
+   [CPU_LA464] = {
+       DEFAULT_COSTS
+   },
++  [CPU_LA664] = {
++      DEFAULT_COSTS
++  },
+ };
+ 
+ /* RTX costs to use when optimizing for size.  */
+@@ -127,6 +146,7 @@ loongarch_cpu_issue_rate[N_TUNE_TYPES] = {
+   [CPU_NATIVE]	      = 4,
+   [CPU_LOONGARCH64]   = 4,
+   [CPU_LA464]	      = 4,
++  [CPU_LA664]	      = 6,
+ };
+ 
+ int
+@@ -134,6 +154,7 @@ loongarch_cpu_multipass_dfa_lookahead[N_TUNE_TYPES] = {
+   [CPU_NATIVE]	      = 4,
+   [CPU_LOONGARCH64]   = 4,
+   [CPU_LA464]	      = 4,
++  [CPU_LA664]	      = 6,
+ };
+ 
+ /* Wiring string definitions from loongarch-str.h to global arrays
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index 6e2a6987910..db497f3ffe2 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -55,7 +55,8 @@ extern "C" {
+ /* enum isa_base */
+ extern const char* loongarch_isa_base_strings[];
+ #define ISA_BASE_LA64V100     0
+-#define N_ISA_BASE_TYPES      1
++#define ISA_BASE_LA64V110     1
++#define N_ISA_BASE_TYPES      2
+ 
+ /* enum isa_ext_* */
+ extern const char* loongarch_isa_ext_strings[];
+@@ -141,8 +142,9 @@ struct loongarch_target
+ #define CPU_ABI_DEFAULT   1
+ #define CPU_LOONGARCH64	  2
+ #define CPU_LA464	  3
+-#define N_ARCH_TYPES	  4
+-#define N_TUNE_TYPES	  4
++#define CPU_LA664	  4
++#define N_ARCH_TYPES	  5
++#define N_TUNE_TYPES	  5
+ 
+ /* parallel tables.  */
+ extern const char* loongarch_cpu_strings[];
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index e5921189a06..67a59152a01 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -552,17 +552,17 @@ isa_default_abi (const struct loongarch_isa *isa)
+   switch (isa->fpu)
+     {
+       case ISA_EXT_FPU64:
+-	if (isa->base == ISA_BASE_LA64V100)
++	if (isa->base >= ISA_BASE_LA64V100)
+ 	  abi.base = ABI_BASE_LP64D;
+ 	break;
+ 
+       case ISA_EXT_FPU32:
+-	if (isa->base == ISA_BASE_LA64V100)
++	if (isa->base >= ISA_BASE_LA64V100)
+ 	  abi.base = ABI_BASE_LP64F;
+ 	break;
+ 
+       case ISA_EXT_NONE:
+-	if (isa->base == ISA_BASE_LA64V100)
++	if (isa->base >= ISA_BASE_LA64V100)
+ 	  abi.base = ABI_BASE_LP64S;
+ 	break;
+ 
+@@ -582,7 +582,7 @@ isa_base_compat_p (const struct loongarch_isa *set1,
+   switch (set2->base)
+     {
+       case ISA_BASE_LA64V100:
+-	return (set1->base == ISA_BASE_LA64V100);
++	return (set1->base >= ISA_BASE_LA64V100);
+ 
+       default:
+ 	gcc_unreachable ();
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index 6dd309aad96..0e1b3e528a1 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -76,7 +76,8 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+ #define TARGET_DOUBLE_FLOAT	  (la_target.isa.fpu == ISA_EXT_FPU64)
+ #define TARGET_DOUBLE_FLOAT_ABI	  (la_target.abi.base == ABI_BASE_LP64D)
+ 
+-#define TARGET_64BIT		  (la_target.isa.base == ISA_BASE_LA64V100)
++#define TARGET_64BIT		  (la_target.isa.base == ISA_BASE_LA64V100 \
++				   || la_target.isa.base == ISA_BASE_LA64V110)
+ #define TARGET_ABI_LP64		  (la_target.abi.base == ABI_BASE_LP64D	\
+ 				   || la_target.abi.base == ABI_BASE_LP64F \
+ 				   || la_target.abi.base == ABI_BASE_LP64S)
+@@ -88,6 +89,7 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+ 
+ /* TARGET_ macros for use in *.md template conditionals */
+ #define TARGET_uARCH_LA464	  (la_target.cpu_tune == CPU_LA464)
++#define TARGET_uARCH_LA664	  (la_target.cpu_tune == CPU_LA664)
+ 
+ /* Note: optimize_size may vary across functions,
+    while -m[no]-memcpy imposes a global constraint.  */
+diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h
+index 072558c28f1..fc4f41bfc1e 100644
+--- a/gcc/config/loongarch/loongarch-str.h
++++ b/gcc/config/loongarch/loongarch-str.h
+@@ -30,6 +30,7 @@ along with GCC; see the file COPYING3.  If not see
+ #define STR_CPU_ABI_DEFAULT "abi-default"
+ #define STR_CPU_LOONGARCH64 "loongarch64"
+ #define STR_CPU_LA464 "la464"
++#define STR_CPU_LA664 "la664"
+ 
+ #define STR_ISA_BASE_LA64V100 "la64"
+ 
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 22ca24a1878..4cd509f11c6 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -10177,6 +10177,7 @@ loongarch_cpu_sched_reassociation_width (struct loongarch_target *target,
+     {
+     case CPU_LOONGARCH64:
+     case CPU_LA464:
++    case CPU_LA664:
+       /* Vector part.  */
+       if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))
+ 	{
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index a5988411fbb..7f129e53ba5 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -114,6 +114,9 @@ Enum(cpu_type) String(loongarch64) Value(CPU_LOONGARCH64)
+ EnumValue
+ Enum(cpu_type) String(la464) Value(CPU_LA464)
+ 
++EnumValue
++Enum(cpu_type) String(la664) Value(CPU_LA664)
++
+ march=
+ Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET)
+ -march=PROCESSOR	Generate code for the given PROCESSOR ISA.
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-Loongson-ASX-base-instruction-support.patch b/LoongArch-Add-Loongson-ASX-base-instruction-support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2cc4dda131b28c6ddb8010d4a933d08e406516fb
--- /dev/null
+++ b/LoongArch-Add-Loongson-ASX-base-instruction-support.patch
@@ -0,0 +1,8376 @@
+From 2f0874e6e6f5a866e71826983dc18295c408748b Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 16 Mar 2023 16:34:08 +0800
+Subject: [PATCH 065/124] LoongArch: Add Loongson ASX base instruction support.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-modes.def
+	(VECTOR_MODES): Add Loongson ASX instruction support.
+	* config/loongarch/loongarch-protos.h (loongarch_split_256bit_move): Ditto.
+	(loongarch_split_256bit_move_p): Ditto.
+	(loongarch_expand_vector_group_init): Ditto.
+	(loongarch_expand_vec_perm_1): Ditto.
+	* config/loongarch/loongarch.cc (loongarch_symbol_insns): Ditto.
+	(loongarch_valid_offset_p): Ditto.
+	(loongarch_address_insns): Ditto.
+	(loongarch_const_insns): Ditto.
+	(loongarch_legitimize_move): Ditto.
+	(loongarch_builtin_vectorization_cost): Ditto.
+	(loongarch_split_move_p): Ditto.
+	(loongarch_split_move): Ditto.
+	(loongarch_output_move_index_float): Ditto.
+	(loongarch_split_256bit_move_p): Ditto.
+	(loongarch_split_256bit_move): Ditto.
+	(loongarch_output_move): Ditto.
+	(loongarch_print_operand_reloc): Ditto.
+	(loongarch_print_operand): Ditto.
+	(loongarch_hard_regno_mode_ok_uncached): Ditto.
+	(loongarch_hard_regno_nregs): Ditto.
+	(loongarch_class_max_nregs): Ditto.
+	(loongarch_can_change_mode_class): Ditto.
+	(loongarch_mode_ok_for_mov_fmt_p): Ditto.
+	(loongarch_vector_mode_supported_p): Ditto.
+	(loongarch_preferred_simd_mode): Ditto.
+	(loongarch_autovectorize_vector_modes): Ditto.
+	(loongarch_lsx_output_division): Ditto.
+	(loongarch_expand_lsx_shuffle): Ditto.
+	(loongarch_expand_vec_perm): Ditto.
+	(loongarch_expand_vec_perm_interleave): Ditto.
+	(loongarch_try_expand_lsx_vshuf_const): Ditto.
+	(loongarch_expand_vec_perm_even_odd_1): Ditto.
+	(loongarch_expand_vec_perm_even_odd): Ditto.
+	(loongarch_expand_vec_perm_1): Ditto.
+	(loongarch_expand_vec_perm_const_2): Ditto.
+	(loongarch_is_quad_duplicate): Ditto.
+	(loongarch_is_double_duplicate): Ditto.
+	(loongarch_is_odd_extraction): Ditto.
+	(loongarch_is_even_extraction): Ditto.
+	(loongarch_is_extraction_permutation): Ditto.
+	(loongarch_is_center_extraction): Ditto.
+	(loongarch_is_reversing_permutation): Ditto.
+	(loongarch_is_di_misalign_extract): Ditto.
+	(loongarch_is_si_misalign_extract): Ditto.
+	(loongarch_is_lasx_lowpart_interleave): Ditto.
+	(loongarch_is_lasx_lowpart_interleave_2): Ditto.
+	(COMPARE_SELECTOR): Ditto.
+	(loongarch_is_lasx_lowpart_extract): Ditto.
+	(loongarch_is_lasx_highpart_interleave): Ditto.
+	(loongarch_is_lasx_highpart_interleave_2): Ditto.
+	(loongarch_is_elem_duplicate): Ditto.
+	(loongarch_is_op_reverse_perm): Ditto.
+	(loongarch_is_single_op_perm): Ditto.
+	(loongarch_is_divisible_perm): Ditto.
+	(loongarch_is_triple_stride_extract): Ditto.
+	(loongarch_vectorize_vec_perm_const): Ditto.
+	(loongarch_cpu_sched_reassociation_width): Ditto.
+	(loongarch_expand_vector_extract): Ditto.
+	(emit_reduc_half): Ditto.
+	(loongarch_expand_vec_unpack): Ditto.
+	(loongarch_expand_vector_group_init): Ditto.
+	(loongarch_expand_vector_init): Ditto.
+	(loongarch_expand_lsx_cmp): Ditto.
+	(loongarch_builtin_support_vector_misalignment): Ditto.
+	* config/loongarch/loongarch.h (UNITS_PER_LASX_REG): Ditto.
+	(BITS_PER_LASX_REG): Ditto.
+	(STRUCTURE_SIZE_BOUNDARY): Ditto.
+	(LASX_REG_FIRST): Ditto.
+	(LASX_REG_LAST): Ditto.
+	(LASX_REG_NUM): Ditto.
+	(LASX_REG_P): Ditto.
+	(LASX_REG_RTX_P): Ditto.
+	(LASX_SUPPORTED_MODE_P): Ditto.
+	* config/loongarch/loongarch.md: Ditto.
+	* config/loongarch/lasx.md: New file.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/lasx.md             | 5104 ++++++++++++++++++++++
+ gcc/config/loongarch/loongarch-modes.def |    1 +
+ gcc/config/loongarch/loongarch-protos.h  |    4 +
+ gcc/config/loongarch/loongarch.cc        | 2567 ++++++++++-
+ gcc/config/loongarch/loongarch.h         |   60 +-
+ gcc/config/loongarch/loongarch.md        |   20 +-
+ 6 files changed, 7637 insertions(+), 119 deletions(-)
+ create mode 100644 gcc/config/loongarch/lasx.md
+
+diff --git a/gcc/config/loongarch/lasx.md b/gcc/config/loongarch/lasx.md
+new file mode 100644
+index 000000000..8111c8bb7
+--- /dev/null
++++ b/gcc/config/loongarch/lasx.md
+@@ -0,0 +1,5104 @@
++;; Machine Description for LARCH Loongson ASX ASE
++;;
++;; Copyright (C) 2018 Free Software Foundation, Inc.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; .
++;;
++
++(define_c_enum "unspec" [
++  UNSPEC_LASX_XVABSD_S
++  UNSPEC_LASX_XVABSD_U
++  UNSPEC_LASX_XVAVG_S
++  UNSPEC_LASX_XVAVG_U
++  UNSPEC_LASX_XVAVGR_S
++  UNSPEC_LASX_XVAVGR_U
++  UNSPEC_LASX_XVBITCLR
++  UNSPEC_LASX_XVBITCLRI
++  UNSPEC_LASX_XVBITREV
++  UNSPEC_LASX_XVBITREVI
++  UNSPEC_LASX_XVBITSET
++  UNSPEC_LASX_XVBITSETI
++  UNSPEC_LASX_XVFCMP_CAF
++  UNSPEC_LASX_XVFCLASS
++  UNSPEC_LASX_XVFCMP_CUNE
++  UNSPEC_LASX_XVFCVT
++  UNSPEC_LASX_XVFCVTH
++  UNSPEC_LASX_XVFCVTL
++  UNSPEC_LASX_XVFLOGB
++  UNSPEC_LASX_XVFRECIP
++  UNSPEC_LASX_XVFRINT
++  UNSPEC_LASX_XVFRSQRT
++  UNSPEC_LASX_XVFCMP_SAF
++  UNSPEC_LASX_XVFCMP_SEQ
++  UNSPEC_LASX_XVFCMP_SLE
++  UNSPEC_LASX_XVFCMP_SLT
++  UNSPEC_LASX_XVFCMP_SNE
++  UNSPEC_LASX_XVFCMP_SOR
++  UNSPEC_LASX_XVFCMP_SUEQ
++  UNSPEC_LASX_XVFCMP_SULE
++  UNSPEC_LASX_XVFCMP_SULT
++  UNSPEC_LASX_XVFCMP_SUN
++  UNSPEC_LASX_XVFCMP_SUNE
++  UNSPEC_LASX_XVFTINT_S
++  UNSPEC_LASX_XVFTINT_U
++  UNSPEC_LASX_XVCLO
++  UNSPEC_LASX_XVSAT_S
++  UNSPEC_LASX_XVSAT_U
++  UNSPEC_LASX_XVREPLVE0
++  UNSPEC_LASX_XVREPL128VEI
++  UNSPEC_LASX_XVSRAR
++  UNSPEC_LASX_XVSRARI
++  UNSPEC_LASX_XVSRLR
++  UNSPEC_LASX_XVSRLRI
++  UNSPEC_LASX_XVSHUF
++  UNSPEC_LASX_XVSHUF_B
++  UNSPEC_LASX_BRANCH
++  UNSPEC_LASX_BRANCH_V
++
++  UNSPEC_LASX_XVMUH_S
++  UNSPEC_LASX_XVMUH_U
++  UNSPEC_LASX_MXVEXTW_U
++  UNSPEC_LASX_XVSLLWIL_S
++  UNSPEC_LASX_XVSLLWIL_U
++  UNSPEC_LASX_XVSRAN
++  UNSPEC_LASX_XVSSRAN_S
++  UNSPEC_LASX_XVSSRAN_U
++  UNSPEC_LASX_XVSRARN
++  UNSPEC_LASX_XVSSRARN_S
++  UNSPEC_LASX_XVSSRARN_U
++  UNSPEC_LASX_XVSRLN
++  UNSPEC_LASX_XVSSRLN_U
++  UNSPEC_LASX_XVSRLRN
++  UNSPEC_LASX_XVSSRLRN_U
++  UNSPEC_LASX_XVFRSTPI
++  UNSPEC_LASX_XVFRSTP
++  UNSPEC_LASX_XVSHUF4I
++  UNSPEC_LASX_XVBSRL_V
++  UNSPEC_LASX_XVBSLL_V
++  UNSPEC_LASX_XVEXTRINS
++  UNSPEC_LASX_XVMSKLTZ
++  UNSPEC_LASX_XVSIGNCOV
++  UNSPEC_LASX_XVFTINTRNE_W_S
++  UNSPEC_LASX_XVFTINTRNE_L_D
++  UNSPEC_LASX_XVFTINTRP_W_S
++  UNSPEC_LASX_XVFTINTRP_L_D
++  UNSPEC_LASX_XVFTINTRM_W_S
++  UNSPEC_LASX_XVFTINTRM_L_D
++  UNSPEC_LASX_XVFTINT_W_D
++  UNSPEC_LASX_XVFFINT_S_L
++  UNSPEC_LASX_XVFTINTRZ_W_D
++  UNSPEC_LASX_XVFTINTRP_W_D
++  UNSPEC_LASX_XVFTINTRM_W_D
++  UNSPEC_LASX_XVFTINTRNE_W_D
++  UNSPEC_LASX_XVFTINTH_L_S
++  UNSPEC_LASX_XVFTINTL_L_S
++  UNSPEC_LASX_XVFFINTH_D_W
++  UNSPEC_LASX_XVFFINTL_D_W
++  UNSPEC_LASX_XVFTINTRZH_L_S
++  UNSPEC_LASX_XVFTINTRZL_L_S
++  UNSPEC_LASX_XVFTINTRPH_L_S
++  UNSPEC_LASX_XVFTINTRPL_L_S
++  UNSPEC_LASX_XVFTINTRMH_L_S
++  UNSPEC_LASX_XVFTINTRML_L_S
++  UNSPEC_LASX_XVFTINTRNEL_L_S
++  UNSPEC_LASX_XVFTINTRNEH_L_S
++  UNSPEC_LASX_XVFRINTRNE_S
++  UNSPEC_LASX_XVFRINTRNE_D
++  UNSPEC_LASX_XVFRINTRZ_S
++  UNSPEC_LASX_XVFRINTRZ_D
++  UNSPEC_LASX_XVFRINTRP_S
++  UNSPEC_LASX_XVFRINTRP_D
++  UNSPEC_LASX_XVFRINTRM_S
++  UNSPEC_LASX_XVFRINTRM_D
++  UNSPEC_LASX_XVREPLVE0_Q
++  UNSPEC_LASX_XVPERM_W
++  UNSPEC_LASX_XVPERMI_Q
++  UNSPEC_LASX_XVPERMI_D
++
++  UNSPEC_LASX_XVADDWEV
++  UNSPEC_LASX_XVADDWEV2
++  UNSPEC_LASX_XVADDWEV3
++  UNSPEC_LASX_XVSUBWEV
++  UNSPEC_LASX_XVSUBWEV2
++  UNSPEC_LASX_XVMULWEV
++  UNSPEC_LASX_XVMULWEV2
++  UNSPEC_LASX_XVMULWEV3
++  UNSPEC_LASX_XVADDWOD
++  UNSPEC_LASX_XVADDWOD2
++  UNSPEC_LASX_XVADDWOD3
++  UNSPEC_LASX_XVSUBWOD
++  UNSPEC_LASX_XVSUBWOD2
++  UNSPEC_LASX_XVMULWOD
++  UNSPEC_LASX_XVMULWOD2
++  UNSPEC_LASX_XVMULWOD3
++  UNSPEC_LASX_XVMADDWEV
++  UNSPEC_LASX_XVMADDWEV2
++  UNSPEC_LASX_XVMADDWEV3
++  UNSPEC_LASX_XVMADDWOD
++  UNSPEC_LASX_XVMADDWOD2
++  UNSPEC_LASX_XVMADDWOD3
++  UNSPEC_LASX_XVHADDW_Q_D
++  UNSPEC_LASX_XVHSUBW_Q_D
++  UNSPEC_LASX_XVHADDW_QU_DU
++  UNSPEC_LASX_XVHSUBW_QU_DU
++  UNSPEC_LASX_XVROTR
++  UNSPEC_LASX_XVADD_Q
++  UNSPEC_LASX_XVSUB_Q
++  UNSPEC_LASX_XVREPLVE
++  UNSPEC_LASX_XVSHUF4
++  UNSPEC_LASX_XVMSKGEZ
++  UNSPEC_LASX_XVMSKNZ
++  UNSPEC_LASX_XVEXTH_Q_D
++  UNSPEC_LASX_XVEXTH_QU_DU
++  UNSPEC_LASX_XVEXTL_Q_D
++  UNSPEC_LASX_XVSRLNI
++  UNSPEC_LASX_XVSRLRNI
++  UNSPEC_LASX_XVSSRLNI
++  UNSPEC_LASX_XVSSRLNI2
++  UNSPEC_LASX_XVSSRLRNI
++  UNSPEC_LASX_XVSSRLRNI2
++  UNSPEC_LASX_XVSRANI
++  UNSPEC_LASX_XVSRARNI
++  UNSPEC_LASX_XVSSRANI
++  UNSPEC_LASX_XVSSRANI2
++  UNSPEC_LASX_XVSSRARNI
++  UNSPEC_LASX_XVSSRARNI2
++  UNSPEC_LASX_XVPERMI
++  UNSPEC_LASX_XVINSVE0
++  UNSPEC_LASX_XVPICKVE
++  UNSPEC_LASX_XVSSRLN
++  UNSPEC_LASX_XVSSRLRN
++  UNSPEC_LASX_XVEXTL_QU_DU
++  UNSPEC_LASX_XVLDI
++  UNSPEC_LASX_XVLDX
++  UNSPEC_LASX_XVSTX
++])
++
++;; All vector modes with 256 bits.
++(define_mode_iterator LASX [V4DF V8SF V4DI V8SI V16HI V32QI])
++
++;; Same as LASX.  Used by vcond to iterate two modes.
++(define_mode_iterator LASX_2 [V4DF V8SF V4DI V8SI V16HI V32QI])
++
++;; Only used for splitting insert_d and copy_{u,s}.d.
++(define_mode_iterator LASX_D [V4DI V4DF])
++
++;; Only used for splitting insert_d and copy_{u,s}.d.
++(define_mode_iterator LASX_WD [V4DI V4DF V8SI V8SF])
++
++;; Only used for copy256_{u,s}.w.
++(define_mode_iterator LASX_W    [V8SI V8SF])
++
++;; Only integer modes in LASX.
++(define_mode_iterator ILASX [V4DI V8SI V16HI V32QI])
++
++;; As ILASX but excludes V32QI.
++(define_mode_iterator ILASX_DWH [V4DI V8SI V16HI])
++
++;; As LASX but excludes V32QI.
++(define_mode_iterator LASX_DWH [V4DF V8SF V4DI V8SI V16HI])
++
++;; As ILASX but excludes V4DI.
++(define_mode_iterator ILASX_WHB [V8SI V16HI V32QI])
++
++;; Only integer modes equal or larger than a word.
++(define_mode_iterator ILASX_DW  [V4DI V8SI])
++
++;; Only integer modes smaller than a word.
++(define_mode_iterator ILASX_HB  [V16HI V32QI])
++
++;; Only floating-point modes in LASX.
++(define_mode_iterator FLASX  [V4DF V8SF])
++
++;; Only used for immediate set shuffle elements instruction.
++(define_mode_iterator LASX_WHB_W [V8SI V16HI V32QI V8SF])
++
++;; The attribute gives the integer vector mode with same size in Loongson ASX.
++(define_mode_attr VIMODE256
++  [(V4DF "V4DI")
++   (V8SF "V8SI")
++   (V4DI "V4DI")
++   (V8SI "V8SI")
++   (V16HI "V16HI")
++   (V32QI "V32QI")])
++
++;;attribute gives half modes for vector modes.
++;;attribute gives half modes (Same Size) for vector modes.
++(define_mode_attr VHSMODE256
++  [(V16HI "V32QI")
++   (V8SI "V16HI")
++   (V4DI "V8SI")])
++
++;;attribute gives half modes  for vector modes.
++(define_mode_attr VHMODE256
++  [(V32QI "V16QI")
++   (V16HI "V8HI")
++   (V8SI "V4SI")
++   (V4DI "V2DI")])
++
++;;attribute gives half float modes for vector modes.
++(define_mode_attr VFHMODE256
++   [(V8SF "V4SF")
++   (V4DF "V2DF")])
++
++;; The attribute gives double modes for vector modes in LASX.
++(define_mode_attr VDMODE256
++  [(V8SI "V4DI")
++   (V16HI "V8SI")
++   (V32QI "V16HI")])
++
++;; extended from VDMODE256
++(define_mode_attr VDMODEEXD256
++  [(V4DI "V4DI")
++   (V8SI "V4DI")
++   (V16HI "V8SI")
++   (V32QI "V16HI")])
++
++;; The attribute gives half modes with same number of elements for vector modes.
++(define_mode_attr VTRUNCMODE256
++  [(V16HI "V16QI")
++   (V8SI "V8HI")
++   (V4DI "V4SI")])
++
++;; Double-sized Vector MODE with same elemet type. "Vector, Enlarged-MODE"
++(define_mode_attr VEMODE256
++  [(V8SF "V16SF")
++   (V8SI "V16SI")
++   (V4DF "V8DF")
++   (V4DI "V8DI")])
++
++;; This attribute gives the mode of the result for "copy_s_b, copy_u_b" etc.
++(define_mode_attr VRES256
++  [(V4DF "DF")
++   (V8SF "SF")
++   (V4DI "DI")
++   (V8SI "SI")
++   (V16HI "SI")
++   (V32QI "SI")])
++
++;; Only used with LASX_D iterator.
++(define_mode_attr lasx_d
++  [(V4DI "reg_or_0")
++   (V4DF "register")])
++
++;; This attribute gives the 256 bit integer vector mode with same size.
++(define_mode_attr mode256_i
++  [(V4DF "v4di")
++   (V8SF "v8si")
++   (V4DI "v4di")
++   (V8SI "v8si")
++   (V16HI "v16hi")
++   (V32QI "v32qi")])
++
++
++;; This attribute gives the 256 bit float vector mode with same size.
++(define_mode_attr mode256_f
++  [(V4DF "v4df")
++   (V8SF "v8sf")
++   (V4DI "v4df")
++   (V8SI "v8sf")])
++
++ ;; This attribute gives suffix for LASX instructions.  HOW?
++(define_mode_attr lasxfmt
++  [(V4DF "d")
++   (V8SF "w")
++   (V4DI "d")
++   (V8SI "w")
++   (V16HI "h")
++   (V32QI "b")])
++
++(define_mode_attr flasxfmt
++  [(V4DF "d")
++   (V8SF "s")])
++
++(define_mode_attr lasxfmt_u
++  [(V4DF "du")
++   (V8SF "wu")
++   (V4DI "du")
++   (V8SI "wu")
++   (V16HI "hu")
++   (V32QI "bu")])
++
++(define_mode_attr ilasxfmt
++  [(V4DF "l")
++   (V8SF "w")])
++
++(define_mode_attr ilasxfmt_u
++  [(V4DF "lu")
++   (V8SF "wu")])
++
++;; This attribute gives suffix for integers in VHMODE256.
++(define_mode_attr hlasxfmt
++  [(V4DI "w")
++   (V8SI "h")
++   (V16HI "b")])
++
++(define_mode_attr hlasxfmt_u
++  [(V4DI "wu")
++   (V8SI "hu")
++   (V16HI "bu")])
++
++;; This attribute gives suffix for integers in VHSMODE256.
++(define_mode_attr hslasxfmt
++  [(V4DI "w")
++   (V8SI "h")
++   (V16HI "b")])
++
++;; This attribute gives define_insn suffix for LASX instructions that need
++;; distinction between integer and floating point.
++(define_mode_attr lasxfmt_f
++  [(V4DF "d_f")
++   (V8SF "w_f")
++   (V4DI "d")
++   (V8SI "w")
++   (V16HI "h")
++   (V32QI "b")])
++
++(define_mode_attr flasxfmt_f
++  [(V4DF "d_f")
++   (V8SF "s_f")
++   (V4DI "d")
++   (V8SI "w")
++   (V16HI "h")
++   (V32QI "b")])
++
++;; This attribute gives define_insn suffix for LASX instructions that need
++;; distinction between integer and floating point.
++(define_mode_attr lasxfmt_f_wd
++  [(V4DF "d_f")
++   (V8SF "w_f")
++   (V4DI "d")
++   (V8SI "w")])
++
++;; This attribute gives suffix for integers in VHMODE256.
++(define_mode_attr dlasxfmt
++  [(V8SI "d")
++   (V16HI "w")
++   (V32QI "h")])
++
++(define_mode_attr dlasxfmt_u
++  [(V8SI "du")
++   (V16HI "wu")
++   (V32QI "hu")])
++
++;; for VDMODEEXD256
++(define_mode_attr dlasxqfmt
++  [(V4DI "q")
++   (V8SI "d")
++   (V16HI "w")
++   (V32QI "h")])
++
++;; This is used to form an immediate operand constraint using
++;; "const__operand".
++(define_mode_attr indeximm256
++  [(V4DF "0_to_3")
++   (V8SF "0_to_7")
++   (V4DI "0_to_3")
++   (V8SI "0_to_7")
++   (V16HI "uimm4")
++   (V32QI "uimm5")])
++
++;; This is used to form an immediate operand constraint using to ref high half
++;; "const__operand".
++(define_mode_attr indeximm_hi
++  [(V4DF "2_or_3")
++   (V8SF "4_to_7")
++   (V4DI "2_or_3")
++   (V8SI "4_to_7")
++   (V16HI "8_to_15")
++   (V32QI "16_to_31")])
++
++;; This is used to form an immediate operand constraint using to ref low half
++;; "const__operand".
++(define_mode_attr indeximm_lo
++  [(V4DF "0_or_1")
++   (V8SF "0_to_3")
++   (V4DI "0_or_1")
++   (V8SI "0_to_3")
++   (V16HI "uimm3")
++   (V32QI "uimm4")])
++
++;; This attribute represents bitmask needed for vec_merge using in lasx
++;; "const__operand".
++(define_mode_attr bitmask256
++  [(V4DF "exp_4")
++   (V8SF "exp_8")
++   (V4DI "exp_4")
++   (V8SI "exp_8")
++   (V16HI "exp_16")
++   (V32QI "exp_32")])
++
++;; This attribute represents bitmask needed for vec_merge using to ref low half
++;; "const__operand".
++(define_mode_attr bitmask_lo
++  [(V4DF "exp_2")
++   (V8SF "exp_4")
++   (V4DI "exp_2")
++   (V8SI "exp_4")
++   (V16HI "exp_8")
++   (V32QI "exp_16")])
++
++
++;; This attribute is used to form an immediate operand constraint using
++;; "const__operand".
++(define_mode_attr bitimm256
++  [(V32QI "uimm3")
++   (V16HI  "uimm4")
++   (V8SI  "uimm5")
++   (V4DI  "uimm6")])
++
++
++(define_mode_attr d2lasxfmt
++  [(V8SI "q")
++   (V16HI "d")
++   (V32QI "w")])
++
++(define_mode_attr d2lasxfmt_u
++  [(V8SI "qu")
++   (V16HI "du")
++   (V32QI "wu")])
++
++(define_mode_attr VD2MODE256
++  [(V8SI "V4DI")
++   (V16HI "V4DI")
++   (V32QI "V8SI")])
++
++(define_mode_attr lasxfmt_wd
++  [(V4DI "d")
++   (V8SI "w")
++   (V16HI "w")
++   (V32QI "w")])
++
++(define_int_iterator FRINT256_S [UNSPEC_LASX_XVFRINTRP_S
++			       UNSPEC_LASX_XVFRINTRZ_S
++			       UNSPEC_LASX_XVFRINT
++			       UNSPEC_LASX_XVFRINTRM_S])
++
++(define_int_iterator FRINT256_D [UNSPEC_LASX_XVFRINTRP_D
++			       UNSPEC_LASX_XVFRINTRZ_D
++			       UNSPEC_LASX_XVFRINT
++			       UNSPEC_LASX_XVFRINTRM_D])
++
++(define_int_attr frint256_pattern_s
++  [(UNSPEC_LASX_XVFRINTRP_S  "ceil")
++   (UNSPEC_LASX_XVFRINTRZ_S  "btrunc")
++   (UNSPEC_LASX_XVFRINT	     "rint")
++   (UNSPEC_LASX_XVFRINTRM_S  "floor")])
++
++(define_int_attr frint256_pattern_d
++  [(UNSPEC_LASX_XVFRINTRP_D  "ceil")
++   (UNSPEC_LASX_XVFRINTRZ_D  "btrunc")
++   (UNSPEC_LASX_XVFRINT	     "rint")
++   (UNSPEC_LASX_XVFRINTRM_D  "floor")])
++
++(define_int_attr frint256_suffix
++  [(UNSPEC_LASX_XVFRINTRP_S  "rp")
++   (UNSPEC_LASX_XVFRINTRP_D  "rp")
++   (UNSPEC_LASX_XVFRINTRZ_S  "rz")
++   (UNSPEC_LASX_XVFRINTRZ_D  "rz")
++   (UNSPEC_LASX_XVFRINT	     "")
++   (UNSPEC_LASX_XVFRINTRM_S  "rm")
++   (UNSPEC_LASX_XVFRINTRM_D  "rm")])
++
++(define_expand "vec_init"
++  [(match_operand:LASX 0 "register_operand")
++   (match_operand:LASX 1 "")]
++  "ISA_HAS_LASX"
++{
++  loongarch_expand_vector_init (operands[0], operands[1]);
++  DONE;
++})
++
++(define_expand "vec_initv32qiv16qi"
++ [(match_operand:V32QI 0 "register_operand")
++  (match_operand:V16QI 1 "")]
++  "ISA_HAS_LASX"
++{
++  loongarch_expand_vector_group_init (operands[0], operands[1]);
++  DONE;
++})
++
++;; FIXME: Delete.
++(define_insn "vec_pack_trunc_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(vec_concat:
++	  (truncate:
++	    (match_operand:ILASX_DWH 1 "register_operand" "f"))
++	  (truncate:
++	    (match_operand:ILASX_DWH 2 "register_operand" "f"))))]
++  "ISA_HAS_LASX"
++  "xvpickev.\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "")
++   (set_attr "length" "8")])
++
++(define_expand "vec_unpacks_hi_v8sf"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(float_extend:V4DF
++	  (vec_select:V4SF
++	    (match_operand:V8SF 1 "register_operand" "f")
++	    (match_dup 2))))]
++  "ISA_HAS_LASX"
++{
++  operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode,
++						       true/*high_p*/);
++})
++
++(define_expand "vec_unpacks_lo_v8sf"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(float_extend:V4DF
++	  (vec_select:V4SF
++	    (match_operand:V8SF 1 "register_operand" "f")
++	    (match_dup 2))))]
++  "ISA_HAS_LASX"
++{
++  operands[2] = loongarch_lsx_vec_parallel_const_half (V8SFmode,
++						       false/*high_p*/);
++})
++
++(define_expand "vec_unpacks_hi_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILASX_WHB 1 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  loongarch_expand_vec_unpack (operands, false/*unsigned_p*/,
++			       true/*high_p*/);
++  DONE;
++})
++
++(define_expand "vec_unpacks_lo_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILASX_WHB 1 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, false/*high_p*/);
++  DONE;
++})
++
++(define_expand "vec_unpacku_hi_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILASX_WHB 1 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, true/*high_p*/);
++  DONE;
++})
++
++(define_expand "vec_unpacku_lo_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILASX_WHB 1 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, false/*high_p*/);
++  DONE;
++})
++
++(define_insn "lasx_xvinsgr2vr_"
++  [(set (match_operand:ILASX_DW 0 "register_operand" "=f")
++	(vec_merge:ILASX_DW
++	  (vec_duplicate:ILASX_DW
++	    (match_operand: 1 "reg_or_0_operand" "rJ"))
++	  (match_operand:ILASX_DW 2 "register_operand" "0")
++	  (match_operand 3 "const__operand" "")))]
++  "ISA_HAS_LASX"
++{
++#if 0
++  if (!TARGET_64BIT && (mode == V4DImode || mode == V4DFmode))
++    return "#";
++  else
++#endif
++    return "xvinsgr2vr.\t%u0,%z1,%y3";
++}
++  [(set_attr "type" "simd_insert")
++   (set_attr "mode" "")])
++
++(define_insn "vec_concatv4di"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(vec_concat:V4DI
++	  (match_operand:V2DI 1 "register_operand" "0")
++	  (match_operand:V2DI 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++{
++  return "xvpermi.q\t%u0,%u2,0x20";
++}
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "vec_concatv8si"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(vec_concat:V8SI
++	  (match_operand:V4SI 1 "register_operand" "0")
++	  (match_operand:V4SI 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++{
++  return "xvpermi.q\t%u0,%u2,0x20";
++}
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "vec_concatv16hi"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(vec_concat:V16HI
++	  (match_operand:V8HI 1 "register_operand" "0")
++	  (match_operand:V8HI 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++{
++  return "xvpermi.q\t%u0,%u2,0x20";
++}
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "vec_concatv32qi"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(vec_concat:V32QI
++	  (match_operand:V16QI 1 "register_operand" "0")
++	  (match_operand:V16QI 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++{
++  return "xvpermi.q\t%u0,%u2,0x20";
++}
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "vec_concatv4df"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(vec_concat:V4DF
++	  (match_operand:V2DF 1 "register_operand" "0")
++	  (match_operand:V2DF 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++{
++  return "xvpermi.q\t%u0,%u2,0x20";
++}
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "vec_concatv8sf"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(vec_concat:V8SF
++	  (match_operand:V4SF 1 "register_operand" "0")
++	  (match_operand:V4SF 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++{
++  return "xvpermi.q\t%u0,%u2,0x20";
++}
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "V4DI")])
++
++;; xshuf.w
++(define_insn "lasx_xvperm_"
++  [(set (match_operand:LASX_W 0 "register_operand" "=f")
++	(unspec:LASX_W
++	  [(match_operand:LASX_W 1 "nonimmediate_operand" "f")
++	   (match_operand:V8SI 2 "register_operand" "f")]
++	  UNSPEC_LASX_XVPERM_W))]
++  "ISA_HAS_LASX"
++  "xvperm.w\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
++;; xvpermi.d
++(define_insn "lasx_xvpermi_d_"
++  [(set (match_operand:LASX 0 "register_operand" "=f")
++	  (unspec:LASX
++	    [(match_operand:LASX 1 "register_operand" "f")
++	     (match_operand:SI     2 "const_uimm8_operand")]
++	    UNSPEC_LASX_XVPERMI_D))]
++  "ISA_HAS_LASX"
++  "xvpermi.d\t%u0,%u1,%2"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvpermi_d__1"
++  [(set (match_operand:LASX_D 0 "register_operand" "=f")
++	(vec_select:LASX_D
++	 (match_operand:LASX_D 1 "register_operand" "f")
++	 (parallel [(match_operand 2 "const_0_to_3_operand")
++		    (match_operand 3 "const_0_to_3_operand")
++		    (match_operand 4 "const_0_to_3_operand")
++		    (match_operand 5 "const_0_to_3_operand")])))]
++  "ISA_HAS_LASX"
++{
++  int mask = 0;
++  mask |= INTVAL (operands[2]) << 0;
++  mask |= INTVAL (operands[3]) << 2;
++  mask |= INTVAL (operands[4]) << 4;
++  mask |= INTVAL (operands[5]) << 6;
++  operands[2] = GEN_INT (mask);
++  return "xvpermi.d\t%u0,%u1,%2";
++}
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
++;; xvpermi.q
++(define_insn "lasx_xvpermi_q_"
++  [(set (match_operand:LASX 0 "register_operand" "=f")
++	(unspec:LASX
++	  [(match_operand:LASX 1 "register_operand" "0")
++	   (match_operand:LASX 2 "register_operand" "f")
++	   (match_operand     3 "const_uimm8_operand")]
++	  UNSPEC_LASX_XVPERMI_Q))]
++  "ISA_HAS_LASX"
++  "xvpermi.q\t%u0,%u2,%3"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvpickve2gr_d"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(any_extend:DI
++	  (vec_select:DI
++	    (match_operand:V4DI 1 "register_operand" "f")
++	    (parallel [(match_operand 2 "const_0_to_3_operand" "")]))))]
++  "ISA_HAS_LASX"
++  "xvpickve2gr.d\t%0,%u1,%2"
++  [(set_attr "type" "simd_copy")
++   (set_attr "mode" "V4DI")])
++
++(define_expand "vec_set"
++  [(match_operand:ILASX_DW 0 "register_operand")
++   (match_operand: 1 "reg_or_0_operand")
++   (match_operand 2 "const__operand")]
++  "ISA_HAS_LASX"
++{
++  rtx index = GEN_INT (1 << INTVAL (operands[2]));
++  emit_insn (gen_lasx_xvinsgr2vr_ (operands[0], operands[1],
++                      operands[0], index));
++  DONE;
++})
++
++(define_expand "vec_set"
++  [(match_operand:FLASX 0 "register_operand")
++   (match_operand: 1 "reg_or_0_operand")
++   (match_operand 2 "const__operand")]
++  "ISA_HAS_LASX"
++{
++  rtx index = GEN_INT (1 << INTVAL (operands[2]));
++  emit_insn (gen_lasx_xvinsve0__scalar (operands[0], operands[1],
++                      operands[0], index));
++  DONE;
++})
++
++(define_expand "vec_extract"
++  [(match_operand: 0 "register_operand")
++   (match_operand:LASX 1 "register_operand")
++   (match_operand 2 "const__operand")]
++  "ISA_HAS_LASX"
++{
++  loongarch_expand_vector_extract (operands[0], operands[1],
++      INTVAL (operands[2]));
++  DONE;
++})
++
++(define_expand "vec_perm"
++ [(match_operand:LASX 0 "register_operand")
++  (match_operand:LASX 1 "register_operand")
++  (match_operand:LASX 2 "register_operand")
++  (match_operand: 3 "register_operand")]
++  "ISA_HAS_LASX"
++{
++   loongarch_expand_vec_perm_1 (operands);
++   DONE;
++})
++
++;; FIXME: 256??
++(define_expand "vcondu"
++  [(match_operand:LASX 0 "register_operand")
++   (match_operand:LASX 1 "reg_or_m1_operand")
++   (match_operand:LASX 2 "reg_or_0_operand")
++   (match_operator 3 ""
++    [(match_operand:ILASX 4 "register_operand")
++     (match_operand:ILASX 5 "register_operand")])]
++  "ISA_HAS_LASX
++   && (GET_MODE_NUNITS (mode)
++       == GET_MODE_NUNITS (mode))"
++{
++  loongarch_expand_vec_cond_expr (mode, mode,
++				  operands);
++  DONE;
++})
++
++;; FIXME: 256??
++(define_expand "vcond"
++  [(match_operand:LASX 0 "register_operand")
++   (match_operand:LASX 1 "reg_or_m1_operand")
++   (match_operand:LASX 2 "reg_or_0_operand")
++   (match_operator 3 ""
++     [(match_operand:LASX_2 4 "register_operand")
++      (match_operand:LASX_2 5 "register_operand")])]
++  "ISA_HAS_LASX
++   && (GET_MODE_NUNITS (mode)
++       == GET_MODE_NUNITS (mode))"
++{
++  loongarch_expand_vec_cond_expr (mode, mode,
++				  operands);
++  DONE;
++})
++
++;; Same as vcond_
++(define_expand "vcond_mask_"
++  [(match_operand:ILASX 0 "register_operand")
++   (match_operand:ILASX 1 "reg_or_m1_operand")
++   (match_operand:ILASX 2 "reg_or_0_operand")
++   (match_operand:ILASX 3 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  loongarch_expand_vec_cond_mask_expr (mode,
++				      mode, operands);
++  DONE;
++})
++
++(define_expand "lasx_xvrepli"
++  [(match_operand:ILASX 0 "register_operand")
++   (match_operand 1 "const_imm10_operand")]
++  "ISA_HAS_LASX"
++{
++  if (mode == V32QImode)
++    operands[1] = GEN_INT (trunc_int_for_mode (INTVAL (operands[1]),
++					       mode));
++  emit_move_insn (operands[0],
++  loongarch_gen_const_int_vector (mode, INTVAL (operands[1])));
++  DONE;
++})
++
++(define_expand "mov"
++  [(set (match_operand:LASX 0)
++	(match_operand:LASX 1))]
++  "ISA_HAS_LASX"
++{
++  if (loongarch_legitimize_move (mode, operands[0], operands[1]))
++    DONE;
++})
++
++
++(define_expand "movmisalign"
++  [(set (match_operand:LASX 0)
++	(match_operand:LASX 1))]
++  "ISA_HAS_LASX"
++{
++  if (loongarch_legitimize_move (mode, operands[0], operands[1]))
++    DONE;
++})
++
++;; 256-bit LASX modes can only exist in LASX registers or memory.
++(define_insn "mov_lasx"
++  [(set (match_operand:LASX 0 "nonimmediate_operand" "=f,f,R,*r,*f")
++	(match_operand:LASX 1 "move_operand" "fYGYI,R,f,*f,*r"))]
++  "ISA_HAS_LASX"
++  { return loongarch_output_move (operands[0], operands[1]); }
++  [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert")
++   (set_attr "mode" "")
++   (set_attr "length" "8,4,4,4,4")])
++
++
++(define_split
++  [(set (match_operand:LASX 0 "nonimmediate_operand")
++	(match_operand:LASX 1 "move_operand"))]
++  "reload_completed && ISA_HAS_LASX
++   && loongarch_split_move_insn_p (operands[0], operands[1])"
++  [(const_int 0)]
++{
++  loongarch_split_move_insn (operands[0], operands[1], curr_insn);
++  DONE;
++})
++
++;; Offset load
++(define_expand "lasx_mxld_"
++  [(match_operand:LASX 0 "register_operand")
++   (match_operand 1 "pmode_register_operand")
++   (match_operand 2 "aq10_operand")]
++  "ISA_HAS_LASX"
++{
++  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
++				      INTVAL (operands[2]));
++  loongarch_emit_move (operands[0], gen_rtx_MEM (mode, addr));
++  DONE;
++})
++
++;; Offset store
++(define_expand "lasx_mxst_"
++  [(match_operand:LASX 0 "register_operand")
++   (match_operand 1 "pmode_register_operand")
++   (match_operand 2 "aq10_operand")]
++  "ISA_HAS_LASX"
++{
++  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
++			    INTVAL (operands[2]));
++  loongarch_emit_move (gen_rtx_MEM (mode, addr), operands[0]);
++  DONE;
++})
++
++;; LASX
++(define_insn "add3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f,f")
++	(plus:ILASX
++	  (match_operand:ILASX 1 "register_operand" "f,f,f")
++	  (match_operand:ILASX 2 "reg_or_vector_same_ximm5_operand" "f,Unv5,Uuv5")))]
++  "ISA_HAS_LASX"
++{
++  switch (which_alternative)
++    {
++    case 0:
++      return "xvadd.\t%u0,%u1,%u2";
++    case 1:
++      {
++	HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0));
++
++	operands[2] = GEN_INT (-val);
++	return "xvsubi.\t%u0,%u1,%d2";
++      }
++    case 2:
++      return "xvaddi.\t%u0,%u1,%E2";
++    default:
++      gcc_unreachable ();
++    }
++}
++  [(set_attr "alu_type" "simd_add")
++   (set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "sub3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f")
++	(minus:ILASX
++	  (match_operand:ILASX 1 "register_operand" "f,f")
++	  (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))]
++  "ISA_HAS_LASX"
++  "@
++   xvsub.\t%u0,%u1,%u2
++   xvsubi.\t%u0,%u1,%E2"
++  [(set_attr "alu_type" "simd_add")
++   (set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "mul3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(mult:ILASX (match_operand:ILASX 1 "register_operand" "f")
++		    (match_operand:ILASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvmul.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_mul")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvmadd_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(plus:ILASX (mult:ILASX (match_operand:ILASX 2 "register_operand" "f")
++				(match_operand:ILASX 3 "register_operand" "f"))
++		    (match_operand:ILASX 1 "register_operand" "0")))]
++  "ISA_HAS_LASX"
++  "xvmadd.\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_mul")
++   (set_attr "mode" "")])
++
++
++
++(define_insn "lasx_xvmsub_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(minus:ILASX (match_operand:ILASX 1 "register_operand" "0")
++		     (mult:ILASX (match_operand:ILASX 2 "register_operand" "f")
++				 (match_operand:ILASX 3 "register_operand" "f"))))]
++  "ISA_HAS_LASX"
++  "xvmsub.\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_mul")
++   (set_attr "mode" "")])
++
++(define_insn "div3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(div:ILASX (match_operand:ILASX 1 "register_operand" "f")
++		   (match_operand:ILASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++{
++  return loongarch_lsx_output_division ("xvdiv.\t%u0,%u1,%u2",
++					operands);
++}
++  [(set_attr "type" "simd_div")
++   (set_attr "mode" "")])
++
++(define_insn "udiv3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(udiv:ILASX (match_operand:ILASX 1 "register_operand" "f")
++		    (match_operand:ILASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++{
++  return loongarch_lsx_output_division ("xvdiv.\t%u0,%u1,%u2",
++					operands);
++}
++  [(set_attr "type" "simd_div")
++   (set_attr "mode" "")])
++
++(define_insn "mod3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(mod:ILASX (match_operand:ILASX 1 "register_operand" "f")
++		   (match_operand:ILASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++{
++  return loongarch_lsx_output_division ("xvmod.\t%u0,%u1,%u2",
++					operands);
++}
++  [(set_attr "type" "simd_div")
++   (set_attr "mode" "")])
++
++(define_insn "umod3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(umod:ILASX (match_operand:ILASX 1 "register_operand" "f")
++		    (match_operand:ILASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++{
++  return loongarch_lsx_output_division ("xvmod.\t%u0,%u1,%u2",
++					operands);
++}
++  [(set_attr "type" "simd_div")
++   (set_attr "mode" "")])
++
++(define_insn "xor3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f,f")
++	(xor:ILASX
++	  (match_operand:ILASX 1 "register_operand" "f,f,f")
++	  (match_operand:ILASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
++  "ISA_HAS_LASX"
++  "@
++   xvxor.v\t%u0,%u1,%u2
++   xvbitrevi.%v0\t%u0,%u1,%V2
++   xvxori.b\t%u0,%u1,%B2"
++  [(set_attr "type" "simd_logic,simd_bit,simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "ior3"
++  [(set (match_operand:LASX 0 "register_operand" "=f,f,f")
++	(ior:LASX
++	  (match_operand:LASX 1 "register_operand" "f,f,f")
++	  (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
++  "ISA_HAS_LASX"
++  "@
++   xvor.v\t%u0,%u1,%u2
++   xvbitseti.%v0\t%u0,%u1,%V2
++   xvori.b\t%u0,%u1,%B2"
++  [(set_attr "type" "simd_logic,simd_bit,simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "and3"
++  [(set (match_operand:LASX 0 "register_operand" "=f,f,f")
++	(and:LASX
++	  (match_operand:LASX 1 "register_operand" "f,f,f")
++	  (match_operand:LASX 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))]
++  "ISA_HAS_LASX"
++{
++  switch (which_alternative)
++    {
++    case 0:
++      return "xvand.v\t%u0,%u1,%u2";
++    case 1:
++      {
++	rtx elt0 = CONST_VECTOR_ELT (operands[2], 0);
++	unsigned HOST_WIDE_INT val = ~UINTVAL (elt0);
++	operands[2] = loongarch_gen_const_int_vector (mode, val & (-val));
++	return "xvbitclri.%v0\t%u0,%u1,%V2";
++      }
++    case 2:
++      return "xvandi.b\t%u0,%u1,%B2";
++    default:
++      gcc_unreachable ();
++    }
++}
++  [(set_attr "type" "simd_logic,simd_bit,simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "one_cmpl2"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(not:ILASX (match_operand:ILASX 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvnor.v\t%u0,%u1,%u1"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "V32QI")])
++
++;; LASX
++(define_insn "vlshr3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f")
++	(lshiftrt:ILASX
++	  (match_operand:ILASX 1 "register_operand" "f,f")
++	  (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))]
++  "ISA_HAS_LASX"
++  "@
++   xvsrl.\t%u0,%u1,%u2
++   xvsrli.\t%u0,%u1,%E2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++;; LASX ">>"
++(define_insn "vashr3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f")
++	(ashiftrt:ILASX
++	  (match_operand:ILASX 1 "register_operand" "f,f")
++	  (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))]
++  "ISA_HAS_LASX"
++  "@
++   xvsra.\t%u0,%u1,%u2
++   xvsrai.\t%u0,%u1,%E2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++;; LASX "<<"
++(define_insn "vashl3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f")
++	(ashift:ILASX
++	  (match_operand:ILASX 1 "register_operand" "f,f")
++	  (match_operand:ILASX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))]
++  "ISA_HAS_LASX"
++  "@
++   xvsll.\t%u0,%u1,%u2
++   xvslli.\t%u0,%u1,%E2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++
++(define_insn "add3"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(plus:FLASX (match_operand:FLASX 1 "register_operand" "f")
++		    (match_operand:FLASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvfadd.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fadd")
++   (set_attr "mode" "")])
++
++(define_insn "sub3"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(minus:FLASX (match_operand:FLASX 1 "register_operand" "f")
++		     (match_operand:FLASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvfsub.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fadd")
++   (set_attr "mode" "")])
++
++(define_insn "mul3"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(mult:FLASX (match_operand:FLASX 1 "register_operand" "f")
++		    (match_operand:FLASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvfmul.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fmul")
++   (set_attr "mode" "")])
++
++(define_insn "div3"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(div:FLASX (match_operand:FLASX 1 "register_operand" "f")
++		   (match_operand:FLASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvfdiv.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
++(define_insn "fma4"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(fma:FLASX (match_operand:FLASX 1 "register_operand" "f")
++		   (match_operand:FLASX 2 "register_operand" "f")
++		   (match_operand:FLASX 3 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvfmadd.\t%u0,%u1,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++(define_insn "fnma4"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(fma:FLASX (neg:FLASX (match_operand:FLASX 1 "register_operand" "f"))
++		   (match_operand:FLASX 2 "register_operand" "f")
++		   (match_operand:FLASX 3 "register_operand" "0")))]
++  "ISA_HAS_LASX"
++  "xvfnmsub.\t%u0,%u1,%u2,%u0"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++(define_insn "sqrt2"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(sqrt:FLASX (match_operand:FLASX 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvfsqrt.\t%u0,%u1"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvadda_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(plus:ILASX (abs:ILASX (match_operand:ILASX 1 "register_operand" "f"))
++		    (abs:ILASX (match_operand:ILASX 2 "register_operand" "f"))))]
++  "ISA_HAS_LASX"
++  "xvadda.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "ssadd3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(ss_plus:ILASX (match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvsadd.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "usadd3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(us_plus:ILASX (match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvsadd.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvabsd_s_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVABSD_S))]
++  "ISA_HAS_LASX"
++  "xvabsd.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvabsd_u_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVABSD_U))]
++  "ISA_HAS_LASX"
++  "xvabsd.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvavg_s_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVAVG_S))]
++  "ISA_HAS_LASX"
++  "xvavg.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvavg_u_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVAVG_U))]
++  "ISA_HAS_LASX"
++  "xvavg.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvavgr_s_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVAVGR_S))]
++  "ISA_HAS_LASX"
++  "xvavgr.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvavgr_u_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVAVGR_U))]
++  "ISA_HAS_LASX"
++  "xvavgr.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvbitclr_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVBITCLR))]
++  "ISA_HAS_LASX"
++  "xvbitclr.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvbitclri_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand 2 "const__operand" "")]
++		      UNSPEC_LASX_XVBITCLRI))]
++  "ISA_HAS_LASX"
++  "xvbitclri.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvbitrev_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVBITREV))]
++  "ISA_HAS_LASX"
++  "xvbitrev.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvbitrevi_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand 2 "const__operand" "")]
++		     UNSPEC_LASX_XVBITREVI))]
++  "ISA_HAS_LASX"
++  "xvbitrevi.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvbitsel_"
++  [(set (match_operand:LASX 0 "register_operand" "=f")
++	(ior:LASX (and:LASX (not:LASX
++			      (match_operand:LASX 3 "register_operand" "f"))
++			      (match_operand:LASX 1 "register_operand" "f"))
++		  (and:LASX (match_dup 3)
++			    (match_operand:LASX 2 "register_operand" "f"))))]
++  "ISA_HAS_LASX"
++  "xvbitsel.v\t%u0,%u1,%u2,%u3"
++  [(set_attr "type" "simd_bitmov")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvbitseli_b"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(ior:V32QI (and:V32QI (not:V32QI
++				(match_operand:V32QI 1 "register_operand" "0"))
++			      (match_operand:V32QI 2 "register_operand" "f"))
++		   (and:V32QI (match_dup 1)
++			      (match_operand:V32QI 3 "const_vector_same_val_operand" "Urv8"))))]
++  "ISA_HAS_LASX"
++  "xvbitseli.b\t%u0,%u2,%B3"
++  [(set_attr "type" "simd_bitmov")
++   (set_attr "mode" "V32QI")])
++
++(define_insn "lasx_xvbitset_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVBITSET))]
++  "ISA_HAS_LASX"
++  "xvbitset.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvbitseti_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand 2 "const__operand" "")]
++		      UNSPEC_LASX_XVBITSETI))]
++  "ISA_HAS_LASX"
++  "xvbitseti.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvs_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f")
++	(ICC:ILASX
++	  (match_operand:ILASX 1 "register_operand" "f,f")
++	  (match_operand:ILASX 2 "reg_or_vector_same_imm5_operand" "f,Uv5")))]
++  "ISA_HAS_LASX"
++  "@
++   xvs.\t%u0,%u1,%u2
++   xvs.\t%u0,%u1,%E2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_expand "vec_cmp"
++  [(set (match_operand: 0 "register_operand")
++	(match_operator 1 ""
++	  [(match_operand:LASX 2 "register_operand")
++	   (match_operand:LASX 3 "register_operand")]))]
++  "ISA_HAS_LASX"
++{
++  bool ok = loongarch_expand_vec_cmp (operands);
++  gcc_assert (ok);
++  DONE;
++})
++
++(define_expand "vec_cmpu"
++  [(set (match_operand: 0 "register_operand")
++	(match_operator 1 ""
++	  [(match_operand:ILASX 2 "register_operand")
++	   (match_operand:ILASX 3 "register_operand")]))]
++  "ISA_HAS_LASX"
++{
++  bool ok = loongarch_expand_vec_cmp (operands);
++  gcc_assert (ok);
++  DONE;
++})
++
++(define_insn "lasx_xvfclass_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLASX 1 "register_operand" "f")]
++			    UNSPEC_LASX_XVFCLASS))]
++  "ISA_HAS_LASX"
++  "xvfclass.\t%u0,%u1"
++  [(set_attr "type" "simd_fclass")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvfcmp_caf_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLASX 1 "register_operand" "f")
++			     (match_operand:FLASX 2 "register_operand" "f")]
++			    UNSPEC_LASX_XVFCMP_CAF))]
++  "ISA_HAS_LASX"
++  "xvfcmp.caf.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvfcmp_cune_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLASX 1 "register_operand" "f")
++			     (match_operand:FLASX 2 "register_operand" "f")]
++			    UNSPEC_LASX_XVFCMP_CUNE))]
++  "ISA_HAS_LASX"
++  "xvfcmp.cune.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++
++
++(define_int_iterator FSC256_UNS [UNSPEC_LASX_XVFCMP_SAF UNSPEC_LASX_XVFCMP_SUN
++				 UNSPEC_LASX_XVFCMP_SOR UNSPEC_LASX_XVFCMP_SEQ
++				 UNSPEC_LASX_XVFCMP_SNE UNSPEC_LASX_XVFCMP_SUEQ
++				 UNSPEC_LASX_XVFCMP_SUNE UNSPEC_LASX_XVFCMP_SULE
++				 UNSPEC_LASX_XVFCMP_SULT UNSPEC_LASX_XVFCMP_SLE
++				 UNSPEC_LASX_XVFCMP_SLT])
++
++(define_int_attr fsc256
++  [(UNSPEC_LASX_XVFCMP_SAF  "saf")
++   (UNSPEC_LASX_XVFCMP_SUN  "sun")
++   (UNSPEC_LASX_XVFCMP_SOR  "sor")
++   (UNSPEC_LASX_XVFCMP_SEQ  "seq")
++   (UNSPEC_LASX_XVFCMP_SNE  "sne")
++   (UNSPEC_LASX_XVFCMP_SUEQ "sueq")
++   (UNSPEC_LASX_XVFCMP_SUNE "sune")
++   (UNSPEC_LASX_XVFCMP_SULE "sule")
++   (UNSPEC_LASX_XVFCMP_SULT "sult")
++   (UNSPEC_LASX_XVFCMP_SLE  "sle")
++   (UNSPEC_LASX_XVFCMP_SLT  "slt")])
++
++(define_insn "lasx_xvfcmp__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(vfcond: (match_operand:FLASX 1 "register_operand" "f")
++			    (match_operand:FLASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvfcmp..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++
++(define_insn "lasx_xvfcmp__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLASX 1 "register_operand" "f")
++			     (match_operand:FLASX 2 "register_operand" "f")]
++			    FSC256_UNS))]
++  "ISA_HAS_LASX"
++  "xvfcmp..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++
++(define_mode_attr fint256
++  [(V8SF "v8si")
++   (V4DF "v4di")])
++
++(define_mode_attr FINTCNV256
++  [(V8SF "I2S")
++   (V4DF "I2D")])
++
++(define_mode_attr FINTCNV256_2
++  [(V8SF "S2I")
++   (V4DF "D2I")])
++
++(define_insn "float2"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(float:FLASX (match_operand: 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvffint..\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++(define_insn "floatuns2"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(unsigned_float:FLASX
++	  (match_operand: 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvffint..\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++(define_mode_attr FFQ256
++  [(V4SF "V16HI")
++   (V2DF "V8SI")])
++
++(define_insn "lasx_xvreplgr2vr_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f")
++	(vec_duplicate:ILASX
++	  (match_operand: 1 "reg_or_0_operand" "r,J")))]
++  "ISA_HAS_LASX"
++{
++  if (which_alternative == 1)
++    return "xvldi.b\t%u0,0" ;
++
++  if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode))
++    return "#";
++  else
++    return "xvreplgr2vr.\t%u0,%z1";
++}
++  [(set_attr "type" "simd_fill")
++   (set_attr "mode" "")
++   (set_attr "length" "8")])
++
++(define_insn "logb2"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
++		      UNSPEC_LASX_XVFLOGB))]
++  "ISA_HAS_LASX"
++  "xvflogb.\t%u0,%u1"
++  [(set_attr "type" "simd_flog2")
++   (set_attr "mode" "")])
++
++
++(define_insn "smax3"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(smax:FLASX (match_operand:FLASX 1 "register_operand" "f")
++		    (match_operand:FLASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvfmax.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fminmax")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvfmaxa_"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(if_then_else:FLASX
++	   (gt (abs:FLASX (match_operand:FLASX 1 "register_operand" "f"))
++	       (abs:FLASX (match_operand:FLASX 2 "register_operand" "f")))
++	   (match_dup 1)
++	   (match_dup 2)))]
++  "ISA_HAS_LASX"
++  "xvfmaxa.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fminmax")
++   (set_attr "mode" "")])
++
++(define_insn "smin3"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(smin:FLASX (match_operand:FLASX 1 "register_operand" "f")
++		    (match_operand:FLASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvfmin.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fminmax")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvfmina_"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(if_then_else:FLASX
++	   (lt (abs:FLASX (match_operand:FLASX 1 "register_operand" "f"))
++	       (abs:FLASX (match_operand:FLASX 2 "register_operand" "f")))
++	   (match_dup 1)
++	   (match_dup 2)))]
++  "ISA_HAS_LASX"
++  "xvfmina.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fminmax")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvfrecip_"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
++		      UNSPEC_LASX_XVFRECIP))]
++  "ISA_HAS_LASX"
++  "xvfrecip.\t%u0,%u1"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvfrint_"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
++		      UNSPEC_LASX_XVFRINT))]
++  "ISA_HAS_LASX"
++  "xvfrint.\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvfrsqrt_"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
++		      UNSPEC_LASX_XVFRSQRT))]
++  "ISA_HAS_LASX"
++  "xvfrsqrt.\t%u0,%u1"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvftint_s__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLASX 1 "register_operand" "f")]
++			    UNSPEC_LASX_XVFTINT_S))]
++  "ISA_HAS_LASX"
++  "xvftint..\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvftint_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLASX 1 "register_operand" "f")]
++			    UNSPEC_LASX_XVFTINT_U))]
++  "ISA_HAS_LASX"
++  "xvftint..\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++
++
++(define_insn "fix_trunc2"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(fix: (match_operand:FLASX 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvftintrz..\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++
++(define_insn "fixuns_trunc2"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unsigned_fix: (match_operand:FLASX 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvftintrz..\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvhw_h_b"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(addsub:V16HI
++	  (any_extend:V16HI
++	    (vec_select:V16QI
++	      (match_operand:V32QI 1 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)
++			 (const_int 17) (const_int 19)
++			 (const_int 21) (const_int 23)
++			 (const_int 25) (const_int 27)
++			 (const_int 29) (const_int 31)])))
++	  (any_extend:V16HI
++	    (vec_select:V16QI
++	      (match_operand:V32QI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)
++			 (const_int 16) (const_int 18)
++			 (const_int 20) (const_int 22)
++			 (const_int 24) (const_int 26)
++			 (const_int 28) (const_int 30)])))))]
++  "ISA_HAS_LASX"
++  "xvhw.h.b\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V16HI")])
++
++(define_insn "lasx_xvhw_w_h"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(addsub:V8SI
++	  (any_extend:V8SI
++	    (vec_select:V8HI
++	      (match_operand:V16HI 1 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)])))
++	  (any_extend:V8SI
++	    (vec_select:V8HI
++	      (match_operand:V16HI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)])))))]
++  "ISA_HAS_LASX"
++  "xvhw.w.h\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V8SI")])
++
++(define_insn "lasx_xvhw_d_w"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(addsub:V4DI
++	  (any_extend:V4DI
++	    (vec_select:V4SI
++	      (match_operand:V8SI 1 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)])))
++	  (any_extend:V4DI
++	    (vec_select:V4SI
++	      (match_operand:V8SI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)])))))]
++  "ISA_HAS_LASX"
++  "xvhw.d.w\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvpackev_b"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(vec_select:V32QI
++	  (vec_concat:V64QI
++	    (match_operand:V32QI 1 "register_operand" "f")
++	    (match_operand:V32QI 2 "register_operand" "f"))
++	  (parallel [(const_int 0)  (const_int 32)
++		     (const_int 2)  (const_int 34)
++		     (const_int 4)  (const_int 36)
++		     (const_int 6)  (const_int 38)
++		     (const_int 8)  (const_int 40)
++		     (const_int 10)  (const_int 42)
++		     (const_int 12)  (const_int 44)
++		     (const_int 14)  (const_int 46)
++		     (const_int 16)  (const_int 48)
++		     (const_int 18)  (const_int 50)
++		     (const_int 20)  (const_int 52)
++		     (const_int 22)  (const_int 54)
++		     (const_int 24)  (const_int 56)
++		     (const_int 26)  (const_int 58)
++		     (const_int 28)  (const_int 60)
++		     (const_int 30)  (const_int 62)])))]
++  "ISA_HAS_LASX"
++  "xvpackev.b\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V32QI")])
++
++
++(define_insn "lasx_xvpackev_h"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(vec_select:V16HI
++	  (vec_concat:V32HI
++	    (match_operand:V16HI 1 "register_operand" "f")
++	    (match_operand:V16HI 2 "register_operand" "f"))
++	  (parallel [(const_int 0)  (const_int 16)
++		     (const_int 2)  (const_int 18)
++		     (const_int 4)  (const_int 20)
++		     (const_int 6)  (const_int 22)
++		     (const_int 8)  (const_int 24)
++		     (const_int 10) (const_int 26)
++		     (const_int 12) (const_int 28)
++		     (const_int 14) (const_int 30)])))]
++  "ISA_HAS_LASX"
++  "xvpackev.h\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16HI")])
++
++(define_insn "lasx_xvpackev_w"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(vec_select:V8SI
++	  (vec_concat:V16SI
++	    (match_operand:V8SI 1 "register_operand" "f")
++	    (match_operand:V8SI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 8)
++		     (const_int 2) (const_int 10)
++		     (const_int 4) (const_int 12)
++		     (const_int 6) (const_int 14)])))]
++  "ISA_HAS_LASX"
++  "xvpackev.w\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8SI")])
++
++(define_insn "lasx_xvpackev_w_f"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(vec_select:V8SF
++	  (vec_concat:V16SF
++	    (match_operand:V8SF 1 "register_operand" "f")
++	    (match_operand:V8SF 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 8)
++		     (const_int 2) (const_int 10)
++		     (const_int 4) (const_int 12)
++		     (const_int 6) (const_int 14)])))]
++  "ISA_HAS_LASX"
++  "xvpackev.w\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvilvh_b"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(vec_select:V32QI
++	  (vec_concat:V64QI
++	    (match_operand:V32QI 1 "register_operand" "f")
++	    (match_operand:V32QI 2 "register_operand" "f"))
++	  (parallel [(const_int 8) (const_int 40)
++		     (const_int 9) (const_int 41)
++		     (const_int 10) (const_int 42)
++		     (const_int 11) (const_int 43)
++		     (const_int 12) (const_int 44)
++		     (const_int 13) (const_int 45)
++		     (const_int 14) (const_int 46)
++		     (const_int 15) (const_int 47)
++		     (const_int 24) (const_int 56)
++		     (const_int 25) (const_int 57)
++		     (const_int 26) (const_int 58)
++		     (const_int 27) (const_int 59)
++		     (const_int 28) (const_int 60)
++		     (const_int 29) (const_int 61)
++		     (const_int 30) (const_int 62)
++		     (const_int 31) (const_int 63)])))]
++  "ISA_HAS_LASX"
++  "xvilvh.b\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V32QI")])
++
++(define_insn "lasx_xvilvh_h"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(vec_select:V16HI
++	  (vec_concat:V32HI
++	    (match_operand:V16HI 1 "register_operand" "f")
++	    (match_operand:V16HI 2 "register_operand" "f"))
++	  (parallel [(const_int 4) (const_int 20)
++		     (const_int 5) (const_int 21)
++		     (const_int 6) (const_int 22)
++		     (const_int 7) (const_int 23)
++		     (const_int 12) (const_int 28)
++		     (const_int 13) (const_int 29)
++		     (const_int 14) (const_int 30)
++		     (const_int 15) (const_int 31)])))]
++  "ISA_HAS_LASX"
++  "xvilvh.h\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16HI")])
++
++(define_mode_attr xvilvh_suffix
++  [(V8SI "") (V8SF "_f")
++   (V4DI "") (V4DF "_f")])
++
++(define_insn "lasx_xvilvh_w"
++  [(set (match_operand:LASX_W 0 "register_operand" "=f")
++	(vec_select:LASX_W
++	  (vec_concat:
++	    (match_operand:LASX_W 1 "register_operand" "f")
++	    (match_operand:LASX_W 2 "register_operand" "f"))
++	  (parallel [(const_int 2) (const_int 10)
++		     (const_int 3) (const_int 11)
++		     (const_int 6) (const_int 14)
++		     (const_int 7) (const_int 15)])))]
++  "ISA_HAS_LASX"
++  "xvilvh.w\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvilvh_d"
++  [(set (match_operand:LASX_D 0 "register_operand" "=f")
++	(vec_select:LASX_D
++	  (vec_concat:
++	    (match_operand:LASX_D 1 "register_operand" "f")
++	    (match_operand:LASX_D 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 5)
++		     (const_int 3) (const_int 7)])))]
++  "ISA_HAS_LASX"
++  "xvilvh.d\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvpackod_b"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(vec_select:V32QI
++	  (vec_concat:V64QI
++	    (match_operand:V32QI 1 "register_operand" "f")
++	    (match_operand:V32QI 2 "register_operand" "f"))
++	  (parallel [(const_int 1)  (const_int 33)
++		     (const_int 3)  (const_int 35)
++		     (const_int 5)  (const_int 37)
++		     (const_int 7)  (const_int 39)
++		     (const_int 9)  (const_int 41)
++		     (const_int 11)  (const_int 43)
++		     (const_int 13)  (const_int 45)
++		     (const_int 15)  (const_int 47)
++		     (const_int 17)  (const_int 49)
++		     (const_int 19)  (const_int 51)
++		     (const_int 21)  (const_int 53)
++		     (const_int 23)  (const_int 55)
++		     (const_int 25)  (const_int 57)
++		     (const_int 27)  (const_int 59)
++		     (const_int 29)  (const_int 61)
++		     (const_int 31)  (const_int 63)])))]
++  "ISA_HAS_LASX"
++  "xvpackod.b\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V32QI")])
++
++
++(define_insn "lasx_xvpackod_h"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(vec_select:V16HI
++	  (vec_concat:V32HI
++	    (match_operand:V16HI 1 "register_operand" "f")
++	    (match_operand:V16HI 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 17)
++		     (const_int 3) (const_int 19)
++		     (const_int 5) (const_int 21)
++		     (const_int 7) (const_int 23)
++		     (const_int 9) (const_int 25)
++		     (const_int 11) (const_int 27)
++		     (const_int 13) (const_int 29)
++		     (const_int 15) (const_int 31)])))]
++  "ISA_HAS_LASX"
++  "xvpackod.h\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16HI")])
++
++
++(define_insn "lasx_xvpackod_w"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(vec_select:V8SI
++	  (vec_concat:V16SI
++	    (match_operand:V8SI 1 "register_operand" "f")
++	    (match_operand:V8SI 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 9)
++		     (const_int 3) (const_int 11)
++		     (const_int 5) (const_int 13)
++		     (const_int 7) (const_int 15)])))]
++  "ISA_HAS_LASX"
++  "xvpackod.w\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8SI")])
++
++
++(define_insn "lasx_xvpackod_w_f"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(vec_select:V8SF
++	  (vec_concat:V16SF
++	    (match_operand:V8SF 1 "register_operand" "f")
++	    (match_operand:V8SF 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 9)
++		     (const_int 3) (const_int 11)
++		     (const_int 5) (const_int 13)
++		     (const_int 7) (const_int 15)])))]
++  "ISA_HAS_LASX"
++  "xvpackod.w\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvilvl_b"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(vec_select:V32QI
++	  (vec_concat:V64QI
++	    (match_operand:V32QI 1 "register_operand" "f")
++	    (match_operand:V32QI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 32)
++		     (const_int 1) (const_int 33)
++		     (const_int 2) (const_int 34)
++		     (const_int 3) (const_int 35)
++		     (const_int 4) (const_int 36)
++		     (const_int 5) (const_int 37)
++		     (const_int 6) (const_int 38)
++		     (const_int 7) (const_int 39)
++		     (const_int 16) (const_int 48)
++		     (const_int 17) (const_int 49)
++		     (const_int 18) (const_int 50)
++		     (const_int 19) (const_int 51)
++		     (const_int 20) (const_int 52)
++		     (const_int 21) (const_int 53)
++		     (const_int 22) (const_int 54)
++		     (const_int 23) (const_int 55)])))]
++  "ISA_HAS_LASX"
++  "xvilvl.b\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V32QI")])
++
++(define_insn "lasx_xvilvl_h"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(vec_select:V16HI
++	  (vec_concat:V32HI
++	    (match_operand:V16HI 1 "register_operand" "f")
++	    (match_operand:V16HI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 16)
++		     (const_int 1) (const_int 17)
++		     (const_int 2) (const_int 18)
++		     (const_int 3) (const_int 19)
++		     (const_int 8) (const_int 24)
++		     (const_int 9) (const_int 25)
++		     (const_int 10) (const_int 26)
++		     (const_int 11) (const_int 27)])))]
++  "ISA_HAS_LASX"
++  "xvilvl.h\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16HI")])
++
++(define_insn "lasx_xvilvl_w"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(vec_select:V8SI
++	  (vec_concat:V16SI
++	    (match_operand:V8SI 1 "register_operand" "f")
++	    (match_operand:V8SI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 8)
++		     (const_int 1) (const_int 9)
++		     (const_int 4) (const_int 12)
++		     (const_int 5) (const_int 13)])))]
++  "ISA_HAS_LASX"
++  "xvilvl.w\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8SI")])
++
++(define_insn "lasx_xvilvl_w_f"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(vec_select:V8SF
++	  (vec_concat:V16SF
++	    (match_operand:V8SF 1 "register_operand" "f")
++	    (match_operand:V8SF 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 8)
++		     (const_int 1) (const_int 9)
++		     (const_int 4) (const_int 12)
++		     (const_int 5) (const_int 13)])))]
++  "ISA_HAS_LASX"
++  "xvilvl.w\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvilvl_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(vec_select:V4DI
++	  (vec_concat:V8DI
++	    (match_operand:V4DI 1 "register_operand" "f")
++	    (match_operand:V4DI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 4)
++		     (const_int 2) (const_int 6)])))]
++  "ISA_HAS_LASX"
++  "xvilvl.d\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvilvl_d_f"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(vec_select:V4DF
++	  (vec_concat:V8DF
++	    (match_operand:V4DF 1 "register_operand" "f")
++	    (match_operand:V4DF 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 4)
++		     (const_int 2) (const_int 6)])))]
++  "ISA_HAS_LASX"
++  "xvilvl.d\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "smax3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f")
++	(smax:ILASX (match_operand:ILASX 1 "register_operand" "f,f")
++		    (match_operand:ILASX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))]
++  "ISA_HAS_LASX"
++  "@
++   xvmax.\t%u0,%u1,%u2
++   xvmaxi.\t%u0,%u1,%E2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "umax3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f")
++	(umax:ILASX (match_operand:ILASX 1 "register_operand" "f,f")
++		    (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))]
++  "ISA_HAS_LASX"
++  "@
++   xvmax.\t%u0,%u1,%u2
++   xvmaxi.\t%u0,%u1,%B2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "smin3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f")
++	(smin:ILASX (match_operand:ILASX 1 "register_operand" "f,f")
++		    (match_operand:ILASX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))]
++  "ISA_HAS_LASX"
++  "@
++   xvmin.\t%u0,%u1,%u2
++   xvmini.\t%u0,%u1,%E2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "umin3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f")
++	(umin:ILASX (match_operand:ILASX 1 "register_operand" "f,f")
++		    (match_operand:ILASX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))]
++  "ISA_HAS_LASX"
++  "@
++   xvmin.\t%u0,%u1,%u2
++   xvmini.\t%u0,%u1,%B2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvclo_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(clz:ILASX (not:ILASX (match_operand:ILASX 1 "register_operand" "f"))))]
++  "ISA_HAS_LASX"
++  "xvclo.\t%u0,%u1"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "clz2"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(clz:ILASX (match_operand:ILASX 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvclz.\t%u0,%u1"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvnor_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f,f")
++	(and:ILASX (not:ILASX (match_operand:ILASX 1 "register_operand" "f,f"))
++		   (not:ILASX (match_operand:ILASX 2 "reg_or_vector_same_val_operand" "f,Urv8"))))]
++  "ISA_HAS_LASX"
++  "@
++   xvnor.v\t%u0,%u1,%u2
++   xvnori.b\t%u0,%u1,%B2"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvpickev_b"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(vec_select:V32QI
++	  (vec_concat:V64QI
++	    (match_operand:V32QI 1 "register_operand" "f")
++	    (match_operand:V32QI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 2)
++		     (const_int 4) (const_int 6)
++		     (const_int 8) (const_int 10)
++		     (const_int 12) (const_int 14)
++		     (const_int 32) (const_int 34)
++		     (const_int 36) (const_int 38)
++		     (const_int 40) (const_int 42)
++		     (const_int 44) (const_int 46)
++		     (const_int 16) (const_int 18)
++		     (const_int 20) (const_int 22)
++		     (const_int 24) (const_int 26)
++		     (const_int 28) (const_int 30)
++		     (const_int 48) (const_int 50)
++		     (const_int 52) (const_int 54)
++		     (const_int 56) (const_int 58)
++		     (const_int 60) (const_int 62)])))]
++  "ISA_HAS_LASX"
++  "xvpickev.b\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V32QI")])
++
++(define_insn "lasx_xvpickev_h"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(vec_select:V16HI
++	  (vec_concat:V32HI
++	    (match_operand:V16HI 1 "register_operand" "f")
++	    (match_operand:V16HI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 2)
++		     (const_int 4) (const_int 6)
++		     (const_int 16) (const_int 18)
++		     (const_int 20) (const_int 22)
++		     (const_int 8) (const_int 10)
++		     (const_int 12) (const_int 14)
++		     (const_int 24) (const_int 26)
++		     (const_int 28) (const_int 30)])))]
++  "ISA_HAS_LASX"
++  "xvpickev.h\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16HI")])
++
++(define_insn "lasx_xvpickev_w"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(vec_select:V8SI
++	  (vec_concat:V16SI
++	    (match_operand:V8SI 1 "register_operand" "f")
++	    (match_operand:V8SI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 2)
++		     (const_int 8) (const_int 10)
++		     (const_int 4) (const_int 6)
++		     (const_int 12) (const_int 14)])))]
++  "ISA_HAS_LASX"
++  "xvpickev.w\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8SI")])
++
++(define_insn "lasx_xvpickev_w_f"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(vec_select:V8SF
++	  (vec_concat:V16SF
++	    (match_operand:V8SF 1 "register_operand" "f")
++	    (match_operand:V8SF 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 2)
++		     (const_int 8) (const_int 10)
++		     (const_int 4) (const_int 6)
++		     (const_int 12) (const_int 14)])))]
++  "ISA_HAS_LASX"
++  "xvpickev.w\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvpickod_b"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(vec_select:V32QI
++	  (vec_concat:V64QI
++	    (match_operand:V32QI 1 "register_operand" "f")
++	    (match_operand:V32QI 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 3)
++		     (const_int 5) (const_int 7)
++		     (const_int 9) (const_int 11)
++		     (const_int 13) (const_int 15)
++		     (const_int 33) (const_int 35)
++		     (const_int 37) (const_int 39)
++		     (const_int 41) (const_int 43)
++		     (const_int 45) (const_int 47)
++		     (const_int 17) (const_int 19)
++		     (const_int 21) (const_int 23)
++		     (const_int 25) (const_int 27)
++		     (const_int 29) (const_int 31)
++		     (const_int 49) (const_int 51)
++		     (const_int 53) (const_int 55)
++		     (const_int 57) (const_int 59)
++		     (const_int 61) (const_int 63)])))]
++  "ISA_HAS_LASX"
++  "xvpickod.b\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V32QI")])
++
++(define_insn "lasx_xvpickod_h"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(vec_select:V16HI
++	  (vec_concat:V32HI
++	    (match_operand:V16HI 1 "register_operand" "f")
++	    (match_operand:V16HI 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 3)
++		     (const_int 5) (const_int 7)
++		     (const_int 17) (const_int 19)
++		     (const_int 21) (const_int 23)
++		     (const_int 9) (const_int 11)
++		     (const_int 13) (const_int 15)
++		     (const_int 25) (const_int 27)
++		     (const_int 29) (const_int 31)])))]
++  "ISA_HAS_LASX"
++  "xvpickod.h\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16HI")])
++
++(define_insn "lasx_xvpickod_w"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(vec_select:V8SI
++	  (vec_concat:V16SI
++	    (match_operand:V8SI 1 "register_operand" "f")
++	    (match_operand:V8SI 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 3)
++		     (const_int 9) (const_int 11)
++		     (const_int 5) (const_int 7)
++		     (const_int 13) (const_int 15)])))]
++  "ISA_HAS_LASX"
++  "xvpickod.w\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8SI")])
++
++(define_insn "lasx_xvpickod_w_f"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(vec_select:V8SF
++	  (vec_concat:V16SF
++	    (match_operand:V8SF 1 "register_operand" "f")
++	    (match_operand:V8SF 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 3)
++		     (const_int 9) (const_int 11)
++		     (const_int 5) (const_int 7)
++		     (const_int 13) (const_int 15)])))]
++  "ISA_HAS_LASX"
++  "xvpickod.w\t%u0,%u2,%u1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "popcount2"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(popcount:ILASX (match_operand:ILASX 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvpcnt.\t%u0,%u1"
++  [(set_attr "type" "simd_pcnt")
++   (set_attr "mode" "")])
++
++
++(define_insn "lasx_xvsat_s_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		      (match_operand 2 "const__operand" "")]
++		     UNSPEC_LASX_XVSAT_S))]
++  "ISA_HAS_LASX"
++  "xvsat.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_sat")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsat_u_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand 2 "const__operand" "")]
++		      UNSPEC_LASX_XVSAT_U))]
++  "ISA_HAS_LASX"
++  "xvsat.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_sat")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvshuf4i_"
++  [(set (match_operand:LASX_WHB_W 0 "register_operand" "=f")
++	(unspec:LASX_WHB_W [(match_operand:LASX_WHB_W 1 "register_operand" "f")
++			    (match_operand 2 "const_uimm8_operand")]
++			   UNSPEC_LASX_XVSHUF4I))]
++  "ISA_HAS_LASX"
++  "xvshuf4i.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_shf")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvshuf4i__1"
++  [(set (match_operand:LASX_W 0 "register_operand" "=f")
++    (vec_select:LASX_W
++      (match_operand:LASX_W 1 "nonimmediate_operand" "f")
++      (parallel [(match_operand 2 "const_0_to_3_operand")
++             (match_operand 3 "const_0_to_3_operand")
++             (match_operand 4 "const_0_to_3_operand")
++             (match_operand 5 "const_0_to_3_operand")
++             (match_operand 6 "const_4_to_7_operand")
++             (match_operand 7 "const_4_to_7_operand")
++             (match_operand 8 "const_4_to_7_operand")
++             (match_operand 9 "const_4_to_7_operand")])))]
++  "ISA_HAS_LASX
++   && INTVAL (operands[2]) + 4 == INTVAL (operands[6])
++   && INTVAL (operands[3]) + 4 == INTVAL (operands[7])
++   && INTVAL (operands[4]) + 4 == INTVAL (operands[8])
++   && INTVAL (operands[5]) + 4 == INTVAL (operands[9])"
++{
++  int mask = 0;
++  mask |= INTVAL (operands[2]) << 0;
++  mask |= INTVAL (operands[3]) << 2;
++  mask |= INTVAL (operands[4]) << 4;
++  mask |= INTVAL (operands[5]) << 6;
++  operands[2] = GEN_INT (mask);
++
++  return "xvshuf4i.w\t%u0,%u1,%2";
++}
++  [(set_attr "type" "simd_shf")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsrar_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVSRAR))]
++  "ISA_HAS_LASX"
++  "xvsrar.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsrari_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand 2 "const__operand" "")]
++		      UNSPEC_LASX_XVSRARI))]
++  "ISA_HAS_LASX"
++  "xvsrari.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsrlr_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVSRLR))]
++  "ISA_HAS_LASX"
++  "xvsrlr.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsrlri_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand 2 "const__operand" "")]
++		      UNSPEC_LASX_XVSRLRI))]
++  "ISA_HAS_LASX"
++  "xvsrlri.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssub_s_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(ss_minus:ILASX (match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvssub.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssub_u_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(us_minus:ILASX (match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvssub.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvshuf_"
++  [(set (match_operand:LASX_DWH 0 "register_operand" "=f")
++	(unspec:LASX_DWH [(match_operand:LASX_DWH 1 "register_operand" "0")
++			  (match_operand:LASX_DWH 2 "register_operand" "f")
++			  (match_operand:LASX_DWH 3 "register_operand" "f")]
++			UNSPEC_LASX_XVSHUF))]
++  "ISA_HAS_LASX"
++  "xvshuf.\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_sld")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvshuf_b"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")
++		       (match_operand:V32QI 2 "register_operand" "f")
++		       (match_operand:V32QI 3 "register_operand" "f")]
++		      UNSPEC_LASX_XVSHUF_B))]
++  "ISA_HAS_LASX"
++  "xvshuf.b\t%u0,%u1,%u2,%u3"
++  [(set_attr "type" "simd_sld")
++   (set_attr "mode" "V32QI")])
++
++(define_insn "lasx_xvreplve0_"
++  [(set (match_operand:LASX 0 "register_operand" "=f")
++	(vec_duplicate:LASX
++	  (vec_select:
++	    (match_operand:LASX 1 "register_operand" "f")
++	    (parallel [(const_int 0)]))))]
++  "ISA_HAS_LASX"
++  "xvreplve0.\t%u0,%u1"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvrepl128vei_b_internal"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(vec_duplicate:V32QI
++	  (vec_select:V32QI
++	    (match_operand:V32QI 1 "register_operand" "f")
++	    (parallel [(match_operand 2 "const_uimm4_operand" "")
++		       (match_dup 2) (match_dup 2) (match_dup 2)
++		       (match_dup 2) (match_dup 2) (match_dup 2)
++		       (match_dup 2) (match_dup 2) (match_dup 2)
++		       (match_dup 2) (match_dup 2) (match_dup 2)
++		       (match_dup 2) (match_dup 2) (match_dup 2)
++		       (match_operand 3 "const_16_to_31_operand" "")
++		       (match_dup 3) (match_dup 3) (match_dup 3)
++		       (match_dup 3) (match_dup 3) (match_dup 3)
++		       (match_dup 3) (match_dup 3) (match_dup 3)
++		       (match_dup 3) (match_dup 3) (match_dup 3)
++		       (match_dup 3) (match_dup 3) (match_dup 3)]))))]
++  "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 16)"
++  "xvrepl128vei.b\t%u0,%u1,%2"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "V32QI")])
++
++(define_insn "lasx_xvrepl128vei_h_internal"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(vec_duplicate:V16HI
++	  (vec_select:V16HI
++	    (match_operand:V16HI 1 "register_operand" "f")
++	    (parallel [(match_operand 2 "const_uimm3_operand" "")
++		       (match_dup 2) (match_dup 2) (match_dup 2)
++		       (match_dup 2) (match_dup 2) (match_dup 2)
++		       (match_dup 2)
++		       (match_operand 3 "const_8_to_15_operand" "")
++		       (match_dup 3) (match_dup 3) (match_dup 3)
++		       (match_dup 3) (match_dup 3) (match_dup 3)
++		       (match_dup 3)]))))]
++  "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 8)"
++  "xvrepl128vei.h\t%u0,%u1,%2"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "V16HI")])
++
++(define_insn "lasx_xvrepl128vei_w_internal"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(vec_duplicate:V8SI
++	  (vec_select:V8SI
++	    (match_operand:V8SI 1 "register_operand" "f")
++	    (parallel [(match_operand 2 "const_0_to_3_operand" "")
++		       (match_dup 2) (match_dup 2) (match_dup 2)
++		       (match_operand 3 "const_4_to_7_operand" "")
++		       (match_dup 3) (match_dup 3) (match_dup 3)]))))]
++  "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 4)"
++  "xvrepl128vei.w\t%u0,%u1,%2"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "V8SI")])
++
++(define_insn "lasx_xvrepl128vei_d_internal"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(vec_duplicate:V4DI
++	  (vec_select:V4DI
++	    (match_operand:V4DI 1 "register_operand" "f")
++	    (parallel [(match_operand 2 "const_0_or_1_operand" "")
++		       (match_dup 2)
++		       (match_operand 3 "const_2_or_3_operand" "")
++		       (match_dup 3)]))))]
++  "ISA_HAS_LASX && ((INTVAL (operands[3]) - INTVAL (operands[2])) == 2)"
++  "xvrepl128vei.d\t%u0,%u1,%2"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvrepl128vei_"
++  [(set (match_operand:LASX 0 "register_operand" "=f")
++	(unspec:LASX [(match_operand:LASX 1 "register_operand" "f")
++		      (match_operand 2 "const__operand" "")]
++		     UNSPEC_LASX_XVREPL128VEI))]
++  "ISA_HAS_LASX"
++  "xvrepl128vei.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvreplve0__scalar"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++    (vec_duplicate:FLASX
++      (match_operand: 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvreplve0.\t%u0,%u1"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvreplve0_q"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")]
++		      UNSPEC_LASX_XVREPLVE0_Q))]
++  "ISA_HAS_LASX"
++  "xvreplve0.q\t%u0,%u1"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "V32QI")])
++
++(define_insn "lasx_xvfcvt_h_s"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(unspec:V16HI [(match_operand:V8SF 1 "register_operand" "f")
++		       (match_operand:V8SF 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVFCVT))]
++  "ISA_HAS_LASX"
++  "xvfcvt.h.s\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V16HI")])
++
++(define_insn "lasx_xvfcvt_s_d"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(unspec:V8SF [(match_operand:V4DF 1 "register_operand" "f")
++		      (match_operand:V4DF 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVFCVT))]
++  "ISA_HAS_LASX"
++  "xvfcvt.s.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "vec_pack_trunc_v4df"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(vec_concat:V8SF
++	  (float_truncate:V4SF (match_operand:V4DF 1 "register_operand" "f"))
++	  (float_truncate:V4SF (match_operand:V4DF 2 "register_operand" "f"))))]
++  "ISA_HAS_LASX"
++  "xvfcvt.s.d\t%u0,%u2,%u1\n\txvpermi.d\t%u0,%u0,0xd8"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V8SF")
++   (set_attr "length" "8")])
++
++;; Define for builtin function.
++(define_insn "lasx_xvfcvth_s_h"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(unspec:V8SF [(match_operand:V16HI 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFCVTH))]
++  "ISA_HAS_LASX"
++  "xvfcvth.s.h\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V8SF")])
++
++;; Define for builtin function.
++(define_insn "lasx_xvfcvth_d_s"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(float_extend:V4DF
++	 (vec_select:V4SF
++	  (match_operand:V8SF 1 "register_operand" "f")
++	  (parallel [(const_int 2) (const_int 3)
++		      (const_int 6) (const_int 7)]))))]
++  "ISA_HAS_LASX"
++  "xvfcvth.d.s\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4DF")
++   (set_attr "length" "12")])
++
++;; Define for gen insn.
++(define_insn "lasx_xvfcvth_d_insn"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(float_extend:V4DF
++	(vec_select:V4SF
++	  (match_operand:V8SF 1 "register_operand" "f")
++	  (parallel [(const_int 4) (const_int 5)
++		     (const_int 6) (const_int 7)]))))]
++  "ISA_HAS_LASX"
++  "xvpermi.d\t%u0,%u1,0xfa\n\txvfcvtl.d.s\t%u0,%u0"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4DF")
++   (set_attr "length" "12")])
++
++;; Define for builtin function.
++(define_insn "lasx_xvfcvtl_s_h"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(unspec:V8SF [(match_operand:V16HI 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFCVTL))]
++  "ISA_HAS_LASX"
++  "xvfcvtl.s.h\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V8SF")])
++
++;; Define for builtin function.
++(define_insn "lasx_xvfcvtl_d_s"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(float_extend:V4DF
++	(vec_select:V4SF
++	  (match_operand:V8SF 1 "register_operand" "f")
++	  (parallel [(const_int 0) (const_int 1)
++		     (const_int 4) (const_int 5)]))))]
++  "ISA_HAS_LASX"
++  "xvfcvtl.d.s\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4DF")
++   (set_attr "length" "8")])
++
++;; Define for gen insn.
++(define_insn "lasx_xvfcvtl_d_insn"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(float_extend:V4DF
++	(vec_select:V4SF
++	  (match_operand:V8SF 1 "register_operand" "f")
++	  (parallel [(const_int 0) (const_int 1)
++		     (const_int 2) (const_int 3)]))))]
++  "ISA_HAS_LASX"
++  "xvpermi.d\t%u0,%u1,0x50\n\txvfcvtl.d.s\t%u0,%u0"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4DF")
++   (set_attr "length" "8")])
++
++(define_code_attr lasxbr
++  [(eq "xbz")
++   (ne "xbnz")])
++
++(define_code_attr lasxeq_v
++  [(eq "eqz")
++   (ne "nez")])
++
++(define_code_attr lasxne_v
++  [(eq "nez")
++   (ne "eqz")])
++
++(define_code_attr lasxeq
++  [(eq "anyeqz")
++   (ne "allnez")])
++
++(define_code_attr lasxne
++  [(eq "allnez")
++   (ne "anyeqz")])
++
++(define_insn "lasx__"
++  [(set (pc)
++	(if_then_else
++	  (equality_op
++	    (unspec:SI [(match_operand:LASX 1 "register_operand" "f")]
++		       UNSPEC_LASX_BRANCH)
++	    (match_operand:SI 2 "const_0_operand"))
++	  (label_ref (match_operand 0))
++	  (pc)))
++   (clobber (match_scratch:FCC 3 "=z"))]
++  "ISA_HAS_LASX"
++{
++  return loongarch_output_conditional_branch (insn, operands,
++					 "xvset.\t%Z3%u1\n\tbcnez\t%Z3%0",
++					 "xvset.\t%z3%u1\n\tbcnez\t%Z3%0");
++}
++  [(set_attr "type" "simd_branch")
++   (set_attr "mode" "")])
++
++(define_insn "lasx__v_"
++  [(set (pc)
++	(if_then_else
++	  (equality_op
++	    (unspec:SI [(match_operand:LASX 1 "register_operand" "f")]
++		       UNSPEC_LASX_BRANCH_V)
++	    (match_operand:SI 2 "const_0_operand"))
++	  (label_ref (match_operand 0))
++	  (pc)))
++   (clobber (match_scratch:FCC 3 "=z"))]
++  "ISA_HAS_LASX"
++{
++  return loongarch_output_conditional_branch (insn, operands,
++					 "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0",
++					 "xvset.v\t%Z3%u1\n\tbcnez\t%Z3%0");
++}
++  [(set_attr "type" "simd_branch")
++   (set_attr "mode" "")])
++
++;; loongson-asx.
++(define_insn "lasx_vext2xv_h_b"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(any_extend:V16HI
++	  (vec_select:V16QI
++	    (match_operand:V32QI 1 "register_operand" "f")
++	    (parallel [(const_int 0) (const_int 1)
++		       (const_int 2) (const_int 3)
++		       (const_int 4) (const_int 5)
++		       (const_int 6) (const_int 7)
++		       (const_int 8) (const_int 9)
++		       (const_int 10) (const_int 11)
++		       (const_int 12) (const_int 13)
++		       (const_int 14) (const_int 15)]))))]
++  "ISA_HAS_LASX"
++  "vext2xv.h.b\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V16HI")])
++
++(define_insn "lasx_vext2xv_w_h"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(any_extend:V8SI
++	  (vec_select:V8HI
++	    (match_operand:V16HI 1 "register_operand" "f")
++	    (parallel [(const_int 0) (const_int 1)
++		       (const_int 2) (const_int 3)
++		       (const_int 4) (const_int 5)
++		       (const_int 6) (const_int 7)]))))]
++  "ISA_HAS_LASX"
++  "vext2xv.w.h\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SI")])
++
++(define_insn "lasx_vext2xv_d_w"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(any_extend:V4DI
++	  (vec_select:V4SI
++	    (match_operand:V8SI 1 "register_operand" "f")
++	    (parallel [(const_int 0) (const_int 1)
++		       (const_int 2) (const_int 3)]))))]
++  "ISA_HAS_LASX"
++  "vext2xv.d.w\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_vext2xv_w_b"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(any_extend:V8SI
++	  (vec_select:V8QI
++	   (match_operand:V32QI 1 "register_operand" "f")
++	    (parallel [(const_int 0) (const_int 1)
++		       (const_int 2) (const_int 3)
++		       (const_int 4) (const_int 5)
++		       (const_int 6) (const_int 7)]))))]
++  "ISA_HAS_LASX"
++  "vext2xv.w.b\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SI")])
++
++(define_insn "lasx_vext2xv_d_h"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(any_extend:V4DI
++	  (vec_select:V4HI
++	    (match_operand:V16HI 1 "register_operand" "f")
++	    (parallel [(const_int 0) (const_int 1)
++		       (const_int 2) (const_int 3)]))))]
++  "ISA_HAS_LASX"
++  "vext2xv.d.h\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_vext2xv_d_b"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(any_extend:V4DI
++	  (vec_select:V4QI
++	    (match_operand:V32QI 1 "register_operand" "f")
++	    (parallel [(const_int 0) (const_int 1)
++		       (const_int 2) (const_int 3)]))))]
++  "ISA_HAS_LASX"
++  "vext2xv.d.b\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4DI")])
++
++;; Extend loongson-sx to loongson-asx.
++(define_insn "xvandn3"
++  [(set (match_operand:LASX 0 "register_operand" "=f")
++	(and:LASX (not:LASX (match_operand:LASX 1 "register_operand" "f"))
++			    (match_operand:LASX 2 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvandn.v\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "abs2"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(abs:ILASX (match_operand:ILASX 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvsigncov.\t%u0,%u1,%u1"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "neg2"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(neg:ILASX (match_operand:ILASX 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvneg.\t%u0,%u1"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvmuh_s_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVMUH_S))]
++  "ISA_HAS_LASX"
++  "xvmuh.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvmuh_u_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVMUH_U))]
++  "ISA_HAS_LASX"
++  "xvmuh.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsllwil_s__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_WHB 1 "register_operand" "f")
++			     (match_operand 2 "const__operand" "")]
++			    UNSPEC_LASX_XVSLLWIL_S))]
++  "ISA_HAS_LASX"
++  "xvsllwil..\t%u0,%u1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsllwil_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_WHB 1 "register_operand" "f")
++			     (match_operand 2 "const__operand" "")]
++			    UNSPEC_LASX_XVSLLWIL_U))]
++  "ISA_HAS_LASX"
++  "xvsllwil..\t%u0,%u1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsran__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSRAN))]
++  "ISA_HAS_LASX"
++  "xvsran..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssran_s__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSSRAN_S))]
++  "ISA_HAS_LASX"
++  "xvssran..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssran_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSSRAN_U))]
++  "ISA_HAS_LASX"
++  "xvssran..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsrarn__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSRARN))]
++  "ISA_HAS_LASX"
++  "xvsrarn..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrarn_s__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSSRARN_S))]
++  "ISA_HAS_LASX"
++  "xvssrarn..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrarn_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSSRARN_U))]
++  "ISA_HAS_LASX"
++  "xvssrarn..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsrln__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSRLN))]
++  "ISA_HAS_LASX"
++  "xvsrln..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrln_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSSRLN_U))]
++  "ISA_HAS_LASX"
++  "xvssrln..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsrlrn__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSRLRN))]
++  "ISA_HAS_LASX"
++  "xvsrlrn..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrlrn_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSSRLRN_U))]
++  "ISA_HAS_LASX"
++  "xvssrlrn..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvfrstpi_"
++  [(set (match_operand:ILASX_HB 0 "register_operand" "=f")
++	(unspec:ILASX_HB [(match_operand:ILASX_HB 1 "register_operand" "0")
++			  (match_operand:ILASX_HB 2 "register_operand" "f")
++			  (match_operand 3 "const_uimm5_operand" "")]
++			 UNSPEC_LASX_XVFRSTPI))]
++  "ISA_HAS_LASX"
++  "xvfrstpi.\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvfrstp_"
++  [(set (match_operand:ILASX_HB 0 "register_operand" "=f")
++	(unspec:ILASX_HB [(match_operand:ILASX_HB 1 "register_operand" "0")
++			  (match_operand:ILASX_HB 2 "register_operand" "f")
++			  (match_operand:ILASX_HB 3 "register_operand" "f")]
++			 UNSPEC_LASX_XVFRSTP))]
++  "ISA_HAS_LASX"
++  "xvfrstp.\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvshuf4i_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0")
++		      (match_operand:V4DI 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand")]
++		     UNSPEC_LASX_XVSHUF4I))]
++  "ISA_HAS_LASX"
++  "xvshuf4i.d\t%u0,%u2,%3"
++  [(set_attr "type" "simd_sld")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvbsrl_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand 2 "const_uimm5_operand" "")]
++		      UNSPEC_LASX_XVBSRL_V))]
++  "ISA_HAS_LASX"
++  "xvbsrl.v\t%u0,%u1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvbsll_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand 2 "const_uimm5_operand" "")]
++		      UNSPEC_LASX_XVBSLL_V))]
++  "ISA_HAS_LASX"
++  "xvbsll.v\t%u0,%u1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvextrins_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVEXTRINS))]
++  "ISA_HAS_LASX"
++  "xvextrins.\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvmskltz_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")]
++		      UNSPEC_LASX_XVMSKLTZ))]
++  "ISA_HAS_LASX"
++  "xvmskltz.\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsigncov_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVSIGNCOV))]
++  "ISA_HAS_LASX"
++  "xvsigncov.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_expand "copysign3"
++  [(set (match_dup 4)
++	(and:FLASX
++	  (not:FLASX (match_dup 3))
++	  (match_operand:FLASX 1 "register_operand")))
++   (set (match_dup 5)
++	(and:FLASX (match_dup 3)
++		   (match_operand:FLASX 2 "register_operand")))
++   (set (match_operand:FLASX 0 "register_operand")
++	(ior:FLASX (match_dup 4) (match_dup 5)))]
++  "ISA_HAS_LASX"
++{
++  operands[3] = loongarch_build_signbit_mask (mode, 1, 0);
++
++  operands[4] = gen_reg_rtx (mode);
++  operands[5] = gen_reg_rtx (mode);
++})
++
++
++(define_insn "absv4df2"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(abs:V4DF (match_operand:V4DF 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvbitclri.d\t%u0,%u1,63"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "absv8sf2"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(abs:V8SF (match_operand:V8SF 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvbitclri.w\t%u0,%u1,31"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "negv4df2"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(neg:V4DF (match_operand:V4DF 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvbitrevi.d\t%u0,%u1,63"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "negv8sf2"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(neg:V8SF (match_operand:V8SF 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvbitrevi.w\t%u0,%u1,31"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "xvfmadd4"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(fma:FLASX (match_operand:FLASX 1 "register_operand" "f")
++		   (match_operand:FLASX 2 "register_operand" "f")
++		   (match_operand:FLASX 3 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvfmadd.\t%u0,%u1,$u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++(define_insn "fms4"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(fma:FLASX (match_operand:FLASX 1 "register_operand" "f")
++		   (match_operand:FLASX 2 "register_operand" "f")
++		   (neg:FLASX (match_operand:FLASX 3 "register_operand" "f"))))]
++  "ISA_HAS_LASX"
++  "xvfmsub.\t%u0,%u1,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++(define_insn "xvfnmsub4_nmsub4"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(neg:FLASX
++	  (fma:FLASX
++	    (match_operand:FLASX 1 "register_operand" "f")
++	    (match_operand:FLASX 2 "register_operand" "f")
++	    (neg:FLASX (match_operand:FLASX 3 "register_operand" "f")))))]
++  "ISA_HAS_LASX"
++  "xvfnmsub.\t%u0,%u1,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++
++(define_insn "xvfnmadd4_nmadd4"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(neg:FLASX
++	  (fma:FLASX
++	    (match_operand:FLASX 1 "register_operand" "f")
++	    (match_operand:FLASX 2 "register_operand" "f")
++	    (match_operand:FLASX 3 "register_operand" "f"))))]
++  "ISA_HAS_LASX"
++  "xvfnmadd.\t%u0,%u1,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvftintrne_w_s"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRNE_W_S))]
++  "ISA_HAS_LASX"
++  "xvftintrne.w.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvftintrne_l_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRNE_L_D))]
++  "ISA_HAS_LASX"
++  "xvftintrne.l.d\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "lasx_xvftintrp_w_s"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRP_W_S))]
++  "ISA_HAS_LASX"
++  "xvftintrp.w.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvftintrp_l_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRP_L_D))]
++  "ISA_HAS_LASX"
++  "xvftintrp.l.d\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "lasx_xvftintrm_w_s"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(unspec:V8SI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRM_W_S))]
++  "ISA_HAS_LASX"
++  "xvftintrm.w.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvftintrm_l_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRM_L_D))]
++  "ISA_HAS_LASX"
++  "xvftintrm.l.d\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "lasx_xvftint_w_d"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f")
++		      (match_operand:V4DF 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINT_W_D))]
++  "ISA_HAS_LASX"
++  "xvftint.w.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "lasx_xvffint_s_l"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(unspec:V8SF [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVFFINT_S_L))]
++  "ISA_HAS_LASX"
++  "xvffint.s.l\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvftintrz_w_d"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f")
++		      (match_operand:V4DF 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRZ_W_D))]
++  "ISA_HAS_LASX"
++  "xvftintrz.w.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "lasx_xvftintrp_w_d"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f")
++		      (match_operand:V4DF 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRP_W_D))]
++  "ISA_HAS_LASX"
++  "xvftintrp.w.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "lasx_xvftintrm_w_d"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f")
++		      (match_operand:V4DF 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRM_W_D))]
++  "ISA_HAS_LASX"
++  "xvftintrm.w.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "lasx_xvftintrne_w_d"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(unspec:V8SI [(match_operand:V4DF 1 "register_operand" "f")
++		      (match_operand:V4DF 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRNE_W_D))]
++  "ISA_HAS_LASX"
++  "xvftintrne.w.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "lasx_xvftinth_l_s"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTH_L_S))]
++  "ISA_HAS_LASX"
++  "xvftinth.l.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvftintl_l_s"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTL_L_S))]
++  "ISA_HAS_LASX"
++  "xvftintl.l.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvffinth_d_w"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(unspec:V4DF [(match_operand:V8SI 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFFINTH_D_W))]
++  "ISA_HAS_LASX"
++  "xvffinth.d.w\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SI")])
++
++(define_insn "lasx_xvffintl_d_w"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(unspec:V4DF [(match_operand:V8SI 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFFINTL_D_W))]
++  "ISA_HAS_LASX"
++  "xvffintl.d.w\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SI")])
++
++(define_insn "lasx_xvftintrzh_l_s"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRZH_L_S))]
++  "ISA_HAS_LASX"
++  "xvftintrzh.l.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvftintrzl_l_s"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRZL_L_S))]
++  "ISA_HAS_LASX"
++  "xvftintrzl.l.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lasx_xvftintrph_l_s"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRPH_L_S))]
++  "ISA_HAS_LASX"
++  "xvftintrph.l.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lasx_xvftintrpl_l_s"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRPL_L_S))]
++  "ISA_HAS_LASX"
++  "xvftintrpl.l.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvftintrmh_l_s"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRMH_L_S))]
++  "ISA_HAS_LASX"
++  "xvftintrmh.l.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvftintrml_l_s"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRML_L_S))]
++  "ISA_HAS_LASX"
++  "xvftintrml.l.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvftintrneh_l_s"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRNEH_L_S))]
++  "ISA_HAS_LASX"
++  "xvftintrneh.l.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvftintrnel_l_s"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFTINTRNEL_L_S))]
++  "ISA_HAS_LASX"
++  "xvftintrnel.l.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvfrintrne_s"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFRINTRNE_S))]
++  "ISA_HAS_LASX"
++  "xvfrintrne.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvfrintrne_d"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFRINTRNE_D))]
++  "ISA_HAS_LASX"
++  "xvfrintrne.d\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "lasx_xvfrintrz_s"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFRINTRZ_S))]
++  "ISA_HAS_LASX"
++  "xvfrintrz.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvfrintrz_d"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFRINTRZ_D))]
++  "ISA_HAS_LASX"
++  "xvfrintrz.d\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "lasx_xvfrintrp_s"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFRINTRP_S))]
++  "ISA_HAS_LASX"
++  "xvfrintrp.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvfrintrp_d"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFRINTRP_D))]
++  "ISA_HAS_LASX"
++  "xvfrintrp.d\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4DF")])
++
++(define_insn "lasx_xvfrintrm_s"
++  [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFRINTRM_S))]
++  "ISA_HAS_LASX"
++  "xvfrintrm.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "lasx_xvfrintrm_d"
++  [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVFRINTRM_D))]
++  "ISA_HAS_LASX"
++  "xvfrintrm.d\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4DF")])
++
++;; Vector versions of the floating-point frint patterns.
++;; Expands to btrunc, ceil, floor, rint.
++(define_insn "v8sf2"
++ [(set (match_operand:V8SF 0 "register_operand" "=f")
++	(unspec:V8SF [(match_operand:V8SF 1 "register_operand" "f")]
++			 FRINT256_S))]
++  "ISA_HAS_LASX"
++  "xvfrint.s\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V8SF")])
++
++(define_insn "v4df2"
++ [(set (match_operand:V4DF 0 "register_operand" "=f")
++	(unspec:V4DF [(match_operand:V4DF 1 "register_operand" "f")]
++			 FRINT256_D))]
++  "ISA_HAS_LASX"
++  "xvfrint.d\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4DF")])
++
++;; Expands to round.
++(define_insn "round2"
++ [(set (match_operand:FLASX 0 "register_operand" "=f")
++	(unspec:FLASX [(match_operand:FLASX 1 "register_operand" "f")]
++			 UNSPEC_LASX_XVFRINT))]
++  "ISA_HAS_LASX"
++  "xvfrint.\t%u0,%u1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++;; Offset load and broadcast
++(define_expand "lasx_xvldrepl_"
++  [(match_operand:LASX 0 "register_operand")
++   (match_operand 2 "aq12_operand")
++   (match_operand 1 "pmode_register_operand")]
++  "ISA_HAS_LASX"
++{
++  emit_insn (gen_lasx_xvldrepl__insn
++	     (operands[0], operands[1], operands[2]));
++  DONE;
++})
++
++(define_insn "lasx_xvldrepl__insn"
++  [(set (match_operand:LASX 0 "register_operand" "=f")
++	(vec_duplicate:LASX
++	  (mem: (plus:DI (match_operand:DI 1 "register_operand" "r")
++				   (match_operand 2 "aq12_operand")))))]
++  "ISA_HAS_LASX"
++{
++  return "xvldrepl.\t%u0,%1,%2";
++}
++  [(set_attr "type" "simd_load")
++   (set_attr "mode" "")
++   (set_attr "length" "4")])
++
++;; Offset is "0"
++(define_insn "lasx_xvldrepl__insn_0"
++  [(set (match_operand:LASX 0 "register_operand" "=f")
++    (vec_duplicate:LASX
++      (mem: (match_operand:DI 1 "register_operand" "r"))))]
++  "ISA_HAS_LASX"
++{
++    return "xvldrepl.\t%u0,%1,0";
++}
++  [(set_attr "type" "simd_load")
++   (set_attr "mode" "")
++   (set_attr "length" "4")])
++
++;;XVADDWEV.H.B   XVSUBWEV.H.B   XVMULWEV.H.B
++;;XVADDWEV.H.BU  XVSUBWEV.H.BU  XVMULWEV.H.BU
++(define_insn "lasx_xvwev_h_b"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(addsubmul:V16HI
++	  (any_extend:V16HI
++	    (vec_select:V16QI
++	      (match_operand:V32QI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)
++			 (const_int 16) (const_int 18)
++			 (const_int 20) (const_int 22)
++			 (const_int 24) (const_int 26)
++			 (const_int 28) (const_int 30)])))
++	  (any_extend:V16HI
++	    (vec_select:V16QI
++	      (match_operand:V32QI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)
++			 (const_int 16) (const_int 18)
++			 (const_int 20) (const_int 22)
++			 (const_int 24) (const_int 26)
++			 (const_int 28) (const_int 30)])))))]
++  "ISA_HAS_LASX"
++  "xvwev.h.b\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V16HI")])
++
++;;XVADDWEV.W.H   XVSUBWEV.W.H   XVMULWEV.W.H
++;;XVADDWEV.W.HU  XVSUBWEV.W.HU  XVMULWEV.W.HU
++(define_insn "lasx_xvwev_w_h"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(addsubmul:V8SI
++	  (any_extend:V8SI
++	    (vec_select:V8HI
++	      (match_operand:V16HI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)])))
++	  (any_extend:V8SI
++	    (vec_select:V8HI
++	      (match_operand:V16HI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)])))))]
++  "ISA_HAS_LASX"
++  "xvwev.w.h\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V8SI")])
++
++;;XVADDWEV.D.W   XVSUBWEV.D.W   XVMULWEV.D.W
++;;XVADDWEV.D.WU  XVSUBWEV.D.WU  XVMULWEV.D.WU
++(define_insn "lasx_xvwev_d_w"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(addsubmul:V4DI
++	  (any_extend:V4DI
++	    (vec_select:V4SI
++	      (match_operand:V8SI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)])))
++	  (any_extend:V4DI
++	    (vec_select:V4SI
++	      (match_operand:V8SI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)])))))]
++  "ISA_HAS_LASX"
++  "xvwev.d.w\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVADDWEV.Q.D
++;;TODO2
++(define_insn "lasx_xvaddwev_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVADDWEV))]
++  "ISA_HAS_LASX"
++  "xvaddwev.q.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVSUBWEV.Q.D
++;;TODO2
++(define_insn "lasx_xvsubwev_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVSUBWEV))]
++  "ISA_HAS_LASX"
++  "xvsubwev.q.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMULWEV.Q.D
++;;TODO2
++(define_insn "lasx_xvmulwev_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVMULWEV))]
++  "ISA_HAS_LASX"
++  "xvmulwev.q.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++
++;;XVADDWOD.H.B   XVSUBWOD.H.B   XVMULWOD.H.B
++;;XVADDWOD.H.BU  XVSUBWOD.H.BU  XVMULWOD.H.BU
++(define_insn "lasx_xvwod_h_b"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(addsubmul:V16HI
++	  (any_extend:V16HI
++	    (vec_select:V16QI
++	      (match_operand:V32QI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)
++			 (const_int 17) (const_int 19)
++			 (const_int 21) (const_int 23)
++			 (const_int 25) (const_int 27)
++			 (const_int 29) (const_int 31)])))
++	  (any_extend:V16HI
++	    (vec_select:V16QI
++	      (match_operand:V32QI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)
++			 (const_int 17) (const_int 19)
++			 (const_int 21) (const_int 23)
++			 (const_int 25) (const_int 27)
++			 (const_int 29) (const_int 31)])))))]
++  "ISA_HAS_LASX"
++  "xvwod.h.b\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V16HI")])
++
++;;XVADDWOD.W.H   XVSUBWOD.W.H   XVMULWOD.W.H
++;;XVADDWOD.W.HU  XVSUBWOD.W.HU  XVMULWOD.W.HU
++(define_insn "lasx_xvwod_w_h"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(addsubmul:V8SI
++	  (any_extend:V8SI
++	    (vec_select:V8HI
++	      (match_operand:V16HI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)])))
++	  (any_extend:V8SI
++	    (vec_select:V8HI
++	      (match_operand:V16HI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)])))))]
++  "ISA_HAS_LASX"
++  "xvwod.w.h\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V8SI")])
++
++
++;;XVADDWOD.D.W   XVSUBWOD.D.W   XVMULWOD.D.W
++;;XVADDWOD.D.WU  XVSUBWOD.D.WU  XVMULWOD.D.WU
++(define_insn "lasx_xvwod_d_w"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(addsubmul:V4DI
++	  (any_extend:V4DI
++	    (vec_select:V4SI
++	      (match_operand:V8SI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)])))
++	  (any_extend:V4DI
++	    (vec_select:V4SI
++	      (match_operand:V8SI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)])))))]
++  "ISA_HAS_LASX"
++  "xvwod.d.w\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVADDWOD.Q.D
++;;TODO2
++(define_insn "lasx_xvaddwod_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVADDWOD))]
++  "ISA_HAS_LASX"
++  "xvaddwod.q.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVSUBWOD.Q.D
++;;TODO2
++(define_insn "lasx_xvsubwod_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVSUBWOD))]
++  "ISA_HAS_LASX"
++  "xvsubwod.q.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMULWOD.Q.D
++;;TODO2
++(define_insn "lasx_xvmulwod_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVMULWOD))]
++  "ISA_HAS_LASX"
++  "xvmulwod.q.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVADDWEV.Q.DU
++;;TODO2
++(define_insn "lasx_xvaddwev_q_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVADDWEV2))]
++  "ISA_HAS_LASX"
++  "xvaddwev.q.du\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVSUBWEV.Q.DU
++;;TODO2
++(define_insn "lasx_xvsubwev_q_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVSUBWEV2))]
++  "ISA_HAS_LASX"
++  "xvsubwev.q.du\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMULWEV.Q.DU
++;;TODO2
++(define_insn "lasx_xvmulwev_q_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVMULWEV2))]
++  "ISA_HAS_LASX"
++  "xvmulwev.q.du\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVADDWOD.Q.DU
++;;TODO2
++(define_insn "lasx_xvaddwod_q_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVADDWOD2))]
++  "ISA_HAS_LASX"
++  "xvaddwod.q.du\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVSUBWOD.Q.DU
++;;TODO2
++(define_insn "lasx_xvsubwod_q_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVSUBWOD2))]
++  "ISA_HAS_LASX"
++  "xvsubwod.q.du\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMULWOD.Q.DU
++;;TODO2
++(define_insn "lasx_xvmulwod_q_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVMULWOD2))]
++  "ISA_HAS_LASX"
++  "xvmulwod.q.du\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVADDWEV.H.BU.B   XVMULWEV.H.BU.B
++(define_insn "lasx_xvwev_h_bu_b"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(addmul:V16HI
++	  (zero_extend:V16HI
++	    (vec_select:V16QI
++	      (match_operand:V32QI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)
++			 (const_int 16) (const_int 18)
++			 (const_int 20) (const_int 22)
++			 (const_int 24) (const_int 26)
++			 (const_int 28) (const_int 30)])))
++	  (sign_extend:V16HI
++	    (vec_select:V16QI
++	      (match_operand:V32QI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)
++			 (const_int 16) (const_int 18)
++			 (const_int 20) (const_int 22)
++			 (const_int 24) (const_int 26)
++			 (const_int 28) (const_int 30)])))))]
++  "ISA_HAS_LASX"
++  "xvwev.h.bu.b\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V16HI")])
++
++;;XVADDWEV.W.HU.H   XVMULWEV.W.HU.H
++(define_insn "lasx_xvwev_w_hu_h"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(addmul:V8SI
++	  (zero_extend:V8SI
++	    (vec_select:V8HI
++	      (match_operand:V16HI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)])))
++	  (sign_extend:V8SI
++	    (vec_select:V8HI
++	      (match_operand:V16HI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)])))))]
++  "ISA_HAS_LASX"
++  "xvwev.w.hu.h\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V8SI")])
++
++;;XVADDWEV.D.WU.W   XVMULWEV.D.WU.W
++(define_insn "lasx_xvwev_d_wu_w"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(addmul:V4DI
++	  (zero_extend:V4DI
++	    (vec_select:V4SI
++	      (match_operand:V8SI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)])))
++	  (sign_extend:V4DI
++	    (vec_select:V4SI
++	      (match_operand:V8SI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)])))))]
++  "ISA_HAS_LASX"
++  "xvwev.d.wu.w\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVADDWOD.H.BU.B   XVMULWOD.H.BU.B
++(define_insn "lasx_xvwod_h_bu_b"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(addmul:V16HI
++	  (zero_extend:V16HI
++	    (vec_select:V16QI
++	      (match_operand:V32QI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)
++			 (const_int 17) (const_int 19)
++			 (const_int 21) (const_int 23)
++			 (const_int 25) (const_int 27)
++			 (const_int 29) (const_int 31)])))
++	  (sign_extend:V16HI
++	    (vec_select:V16QI
++	      (match_operand:V32QI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)
++			 (const_int 17) (const_int 19)
++			 (const_int 21) (const_int 23)
++			 (const_int 25) (const_int 27)
++			 (const_int 29) (const_int 31)])))))]
++  "ISA_HAS_LASX"
++  "xvwod.h.bu.b\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V16HI")])
++
++;;XVADDWOD.W.HU.H   XVMULWOD.W.HU.H
++(define_insn "lasx_xvwod_w_hu_h"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(addmul:V8SI
++	  (zero_extend:V8SI
++	    (vec_select:V8HI
++	      (match_operand:V16HI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)])))
++	  (sign_extend:V8SI
++	    (vec_select:V8HI
++	      (match_operand:V16HI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)])))))]
++  "ISA_HAS_LASX"
++  "xvwod.w.hu.h\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V8SI")])
++
++;;XVADDWOD.D.WU.W   XVMULWOD.D.WU.W
++(define_insn "lasx_xvwod_d_wu_w"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(addmul:V4DI
++	  (zero_extend:V4DI
++	    (vec_select:V4SI
++	      (match_operand:V8SI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)])))
++	  (sign_extend:V4DI
++	    (vec_select:V4SI
++	      (match_operand:V8SI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)])))))]
++  "ISA_HAS_LASX"
++  "xvwod.d.wu.w\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMADDWEV.H.B   XVMADDWEV.H.BU
++(define_insn "lasx_xvmaddwev_h_b"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(plus:V16HI
++	  (match_operand:V16HI 1 "register_operand" "0")
++	  (mult:V16HI
++	    (any_extend:V16HI
++	      (vec_select:V16QI
++		(match_operand:V32QI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)
++			   (const_int 16) (const_int 18)
++			   (const_int 20) (const_int 22)
++			   (const_int 24) (const_int 26)
++			   (const_int 28) (const_int 30)])))
++	    (any_extend:V16HI
++	      (vec_select:V16QI
++		(match_operand:V32QI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)
++			   (const_int 16) (const_int 18)
++			   (const_int 20) (const_int 22)
++			   (const_int 24) (const_int 26)
++			   (const_int 28) (const_int 30)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwev.h.b\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V16HI")])
++
++;;XVMADDWEV.W.H   XVMADDWEV.W.HU
++(define_insn "lasx_xvmaddwev_w_h"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(plus:V8SI
++	  (match_operand:V8SI 1 "register_operand" "0")
++	  (mult:V8SI
++	    (any_extend:V8SI
++	      (vec_select:V8HI
++		(match_operand:V16HI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)])))
++	    (any_extend:V8SI
++	      (vec_select:V8HI
++		(match_operand:V16HI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwev.w.h\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V8SI")])
++
++;;XVMADDWEV.D.W   XVMADDWEV.D.WU
++(define_insn "lasx_xvmaddwev_d_w"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(plus:V4DI
++	  (match_operand:V4DI 1 "register_operand" "0")
++	  (mult:V4DI
++	    (any_extend:V4DI
++	      (vec_select:V4SI
++		(match_operand:V8SI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)])))
++	    (any_extend:V4DI
++	      (vec_select:V4SI
++		(match_operand:V8SI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwev.d.w\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V4DI")])
++
++;;XVMADDWEV.Q.D
++;;TODO2
++(define_insn "lasx_xvmaddwev_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0")
++		      (match_operand:V4DI 2 "register_operand" "f")
++		      (match_operand:V4DI 3 "register_operand" "f")]
++		     UNSPEC_LASX_XVMADDWEV))]
++  "ISA_HAS_LASX"
++  "xvmaddwev.q.d\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMADDWOD.H.B   XVMADDWOD.H.BU
++(define_insn "lasx_xvmaddwod_h_b"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(plus:V16HI
++	  (match_operand:V16HI 1 "register_operand" "0")
++	  (mult:V16HI
++	    (any_extend:V16HI
++	      (vec_select:V16QI
++		(match_operand:V32QI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)
++			   (const_int 17) (const_int 19)
++			   (const_int 21) (const_int 23)
++			   (const_int 25) (const_int 27)
++			   (const_int 29) (const_int 31)])))
++	    (any_extend:V16HI
++	      (vec_select:V16QI
++		(match_operand:V32QI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)
++			   (const_int 17) (const_int 19)
++			   (const_int 21) (const_int 23)
++			   (const_int 25) (const_int 27)
++			   (const_int 29) (const_int 31)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwod.h.b\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V16HI")])
++
++;;XVMADDWOD.W.H   XVMADDWOD.W.HU
++(define_insn "lasx_xvmaddwod_w_h"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(plus:V8SI
++	  (match_operand:V8SI 1 "register_operand" "0")
++	  (mult:V8SI
++	    (any_extend:V8SI
++	      (vec_select:V8HI
++		(match_operand:V16HI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)])))
++	    (any_extend:V8SI
++	      (vec_select:V8HI
++		(match_operand:V16HI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwod.w.h\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V8SI")])
++
++;;XVMADDWOD.D.W   XVMADDWOD.D.WU
++(define_insn "lasx_xvmaddwod_d_w"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(plus:V4DI
++	  (match_operand:V4DI 1 "register_operand" "0")
++	  (mult:V4DI
++	    (any_extend:V4DI
++	      (vec_select:V4SI
++		(match_operand:V8SI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)])))
++	    (any_extend:V4DI
++	      (vec_select:V4SI
++		(match_operand:V8SI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwod.d.w\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V4DI")])
++
++;;XVMADDWOD.Q.D
++;;TODO2
++(define_insn "lasx_xvmaddwod_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0")
++		      (match_operand:V4DI 2 "register_operand" "f")
++		      (match_operand:V4DI 3 "register_operand" "f")]
++		     UNSPEC_LASX_XVMADDWOD))]
++  "ISA_HAS_LASX"
++  "xvmaddwod.q.d\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMADDWEV.Q.DU
++;;TODO2
++(define_insn "lasx_xvmaddwev_q_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0")
++		      (match_operand:V4DI 2 "register_operand" "f")
++		      (match_operand:V4DI 3 "register_operand" "f")]
++		     UNSPEC_LASX_XVMADDWEV2))]
++  "ISA_HAS_LASX"
++  "xvmaddwev.q.du\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMADDWOD.Q.DU
++;;TODO2
++(define_insn "lasx_xvmaddwod_q_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0")
++		      (match_operand:V4DI 2 "register_operand" "f")
++		      (match_operand:V4DI 3 "register_operand" "f")]
++		     UNSPEC_LASX_XVMADDWOD2))]
++  "ISA_HAS_LASX"
++  "xvmaddwod.q.du\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMADDWEV.H.BU.B
++(define_insn "lasx_xvmaddwev_h_bu_b"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(plus:V16HI
++	  (match_operand:V16HI 1 "register_operand" "0")
++	  (mult:V16HI
++	    (zero_extend:V16HI
++	      (vec_select:V16QI
++		(match_operand:V32QI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)
++			   (const_int 16) (const_int 18)
++			   (const_int 20) (const_int 22)
++			   (const_int 24) (const_int 26)
++			   (const_int 28) (const_int 30)])))
++	    (sign_extend:V16HI
++	      (vec_select:V16QI
++		(match_operand:V32QI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)
++			   (const_int 16) (const_int 18)
++			   (const_int 20) (const_int 22)
++			   (const_int 24) (const_int 26)
++			   (const_int 28) (const_int 30)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwev.h.bu.b\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V16HI")])
++
++;;XVMADDWEV.W.HU.H
++(define_insn "lasx_xvmaddwev_w_hu_h"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(plus:V8SI
++	  (match_operand:V8SI 1 "register_operand" "0")
++	  (mult:V8SI
++	    (zero_extend:V8SI
++	      (vec_select:V8HI
++		(match_operand:V16HI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)])))
++	    (sign_extend:V8SI
++	      (vec_select:V8HI
++		(match_operand:V16HI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwev.w.hu.h\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V8SI")])
++
++;;XVMADDWEV.D.WU.W
++(define_insn "lasx_xvmaddwev_d_wu_w"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(plus:V4DI
++	  (match_operand:V4DI 1 "register_operand" "0")
++	  (mult:V4DI
++	    (zero_extend:V4DI
++	      (vec_select:V4SI
++		(match_operand:V8SI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)])))
++	    (sign_extend:V4DI
++	      (vec_select:V4SI
++		(match_operand:V8SI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwev.d.wu.w\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V4DI")])
++
++;;XVMADDWEV.Q.DU.D
++;;TODO2
++(define_insn "lasx_xvmaddwev_q_du_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0")
++		      (match_operand:V4DI 2 "register_operand" "f")
++		      (match_operand:V4DI 3 "register_operand" "f")]
++		     UNSPEC_LASX_XVMADDWEV3))]
++  "ISA_HAS_LASX"
++  "xvmaddwev.q.du.d\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMADDWOD.H.BU.B
++(define_insn "lasx_xvmaddwod_h_bu_b"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(plus:V16HI
++	  (match_operand:V16HI 1 "register_operand" "0")
++	  (mult:V16HI
++	    (zero_extend:V16HI
++	      (vec_select:V16QI
++		(match_operand:V32QI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)
++			   (const_int 17) (const_int 19)
++			   (const_int 21) (const_int 23)
++			   (const_int 25) (const_int 27)
++			   (const_int 29) (const_int 31)])))
++	    (sign_extend:V16HI
++	      (vec_select:V16QI
++		(match_operand:V32QI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)
++			   (const_int 17) (const_int 19)
++			   (const_int 21) (const_int 23)
++			   (const_int 25) (const_int 27)
++			   (const_int 29) (const_int 31)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwod.h.bu.b\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V16HI")])
++
++;;XVMADDWOD.W.HU.H
++(define_insn "lasx_xvmaddwod_w_hu_h"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(plus:V8SI
++	  (match_operand:V8SI 1 "register_operand" "0")
++	  (mult:V8SI
++	    (zero_extend:V8SI
++	      (vec_select:V8HI
++		(match_operand:V16HI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)])))
++	    (sign_extend:V8SI
++	      (vec_select:V8HI
++		(match_operand:V16HI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwod.w.hu.h\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V8SI")])
++
++;;XVMADDWOD.D.WU.W
++(define_insn "lasx_xvmaddwod_d_wu_w"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(plus:V4DI
++	  (match_operand:V4DI 1 "register_operand" "0")
++	  (mult:V4DI
++	    (zero_extend:V4DI
++	      (vec_select:V4SI
++		(match_operand:V8SI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)])))
++	    (sign_extend:V4DI
++	      (vec_select:V4SI
++		(match_operand:V8SI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)]))))))]
++  "ISA_HAS_LASX"
++  "xvmaddwod.d.wu.w\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V4DI")])
++
++;;XVMADDWOD.Q.DU.D
++;;TODO2
++(define_insn "lasx_xvmaddwod_q_du_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "0")
++		      (match_operand:V4DI 2 "register_operand" "f")
++		      (match_operand:V4DI 3 "register_operand" "f")]
++		     UNSPEC_LASX_XVMADDWOD3))]
++  "ISA_HAS_LASX"
++  "xvmaddwod.q.du.d\t%u0,%u2,%u3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVHADDW.Q.D
++;;TODO2
++(define_insn "lasx_xvhaddw_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVHADDW_Q_D))]
++  "ISA_HAS_LASX"
++  "xvhaddw.q.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVHSUBW.Q.D
++;;TODO2
++(define_insn "lasx_xvhsubw_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVHSUBW_Q_D))]
++  "ISA_HAS_LASX"
++  "xvhsubw.q.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVHADDW.QU.DU
++;;TODO2
++(define_insn "lasx_xvhaddw_qu_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVHADDW_QU_DU))]
++  "ISA_HAS_LASX"
++  "xvhaddw.qu.du\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVHSUBW.QU.DU
++;;TODO2
++(define_insn "lasx_xvhsubw_qu_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVHSUBW_QU_DU))]
++  "ISA_HAS_LASX"
++  "xvhsubw.qu.du\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVROTR.B   XVROTR.H   XVROTR.W   XVROTR.D
++;;TODO-478
++(define_insn "lasx_xvrotr_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand:ILASX 2 "register_operand" "f")]
++		      UNSPEC_LASX_XVROTR))]
++  "ISA_HAS_LASX"
++  "xvrotr.\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++;;XVADD.Q
++;;TODO2
++(define_insn "lasx_xvadd_q"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVADD_Q))]
++  "ISA_HAS_LASX"
++  "xvadd.q\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVSUB.Q
++;;TODO2
++(define_insn "lasx_xvsub_q"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVSUB_Q))]
++  "ISA_HAS_LASX"
++  "xvsub.q\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVSSRLN.B.H   XVSSRLN.H.W   XVSSRLN.W.D
++(define_insn "lasx_xvssrln__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSSRLN))]
++  "ISA_HAS_LASX"
++  "xvssrln..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++;;XVREPLVE.B   XVREPLVE.H   XVREPLVE.W   XVREPLVE.D
++(define_insn "lasx_xvreplve_"
++  [(set (match_operand:LASX 0 "register_operand" "=f")
++	(unspec:LASX [(match_operand:LASX 1 "register_operand" "f")
++		      (match_operand:SI 2 "register_operand" "r")]
++		     UNSPEC_LASX_XVREPLVE))]
++  "ISA_HAS_LASX"
++  "xvreplve.\t%u0,%u1,%z2"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
++;;XVADDWEV.Q.DU.D
++(define_insn "lasx_xvaddwev_q_du_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVADDWEV3))]
++  "ISA_HAS_LASX"
++  "xvaddwev.q.du.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVADDWOD.Q.DU.D
++(define_insn "lasx_xvaddwod_q_du_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVADDWOD3))]
++  "ISA_HAS_LASX"
++  "xvaddwod.q.du.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMULWEV.Q.DU.D
++(define_insn "lasx_xvmulwev_q_du_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVMULWEV3))]
++  "ISA_HAS_LASX"
++  "xvmulwev.q.du.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;;XVMULWOD.Q.DU.D
++(define_insn "lasx_xvmulwod_q_du_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")
++		      (match_operand:V4DI 2 "register_operand" "f")]
++		     UNSPEC_LASX_XVMULWOD3))]
++  "ISA_HAS_LASX"
++  "xvmulwod.q.du.d\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvpickve2gr_w"
++  [(set (match_operand:SI 0 "register_operand" "=r")
++	(any_extend:SI
++	  (vec_select:SI
++	    (match_operand:V8SI 1 "register_operand" "f")
++	    (parallel [(match_operand 2 "const_0_to_7_operand" "")]))))]
++  "ISA_HAS_LASX"
++  "xvpickve2gr.w\t%0,%u1,%2"
++  [(set_attr "type" "simd_copy")
++   (set_attr "mode" "V8SI")])
++
++
++(define_insn "lasx_xvmskgez_b"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")]
++		      UNSPEC_LASX_XVMSKGEZ))]
++  "ISA_HAS_LASX"
++  "xvmskgez.b\t%u0,%u1"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "V32QI")])
++
++(define_insn "lasx_xvmsknz_b"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(unspec:V32QI [(match_operand:V32QI 1 "register_operand" "f")]
++		      UNSPEC_LASX_XVMSKNZ))]
++  "ISA_HAS_LASX"
++  "xvmsknz.b\t%u0,%u1"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "V32QI")])
++
++(define_insn "lasx_xvexth_h_b"
++  [(set (match_operand:V16HI 0 "register_operand" "=f")
++	(any_extend:V16HI
++	  (vec_select:V16QI
++	    (match_operand:V32QI 1 "register_operand" "f")
++	      (parallel [(const_int 16) (const_int 17)
++			 (const_int 18) (const_int 19)
++			 (const_int 20) (const_int 21)
++			 (const_int 22) (const_int 23)
++			 (const_int 24) (const_int 25)
++			 (const_int 26) (const_int 27)
++			 (const_int 28) (const_int 29)
++			 (const_int 30) (const_int 31)]))))]
++  "ISA_HAS_LASX"
++  "xvexth.h.b\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V16HI")])
++
++(define_insn "lasx_xvexth_w_h"
++  [(set (match_operand:V8SI 0 "register_operand" "=f")
++	(any_extend:V8SI
++	  (vec_select:V8HI
++	    (match_operand:V16HI 1 "register_operand" "f")
++	    (parallel [(const_int 8) (const_int 9)
++		       (const_int 10) (const_int 11)
++		       (const_int 12) (const_int 13)
++		       (const_int 14) (const_int 15)]))))]
++  "ISA_HAS_LASX"
++  "xvexth.w.h\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V8SI")])
++
++(define_insn "lasx_xvexth_d_w"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(any_extend:V4DI
++	  (vec_select:V4SI
++	    (match_operand:V8SI 1 "register_operand" "f")
++	    (parallel [(const_int 4) (const_int 5)
++		       (const_int 6) (const_int 7)]))))]
++  "ISA_HAS_LASX"
++  "xvexth.d.w\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvexth_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVEXTH_Q_D))]
++  "ISA_HAS_LASX"
++  "xvexth.q.d\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvexth_qu_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVEXTH_QU_DU))]
++  "ISA_HAS_LASX"
++  "xvexth.qu.du\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvrotri_"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(rotatert:ILASX (match_operand:ILASX 1 "register_operand" "f")
++		       (match_operand 2 "const__operand" "")))]
++  "ISA_HAS_LASX"
++  "xvrotri.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_shf")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvextl_q_d"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVEXTL_Q_D))]
++  "ISA_HAS_LASX"
++  "xvextl.q.d\t%u0,%u1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvsrlni__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSRLNI))]
++  "ISA_HAS_LASX"
++  "xvsrlni..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsrlrni__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSRLRNI))]
++  "ISA_HAS_LASX"
++  "xvsrlrni..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrlni__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSSRLNI))]
++  "ISA_HAS_LASX"
++  "xvssrlni..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrlni__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSSRLNI2))]
++  "ISA_HAS_LASX"
++  "xvssrlni..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrlrni__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSSRLRNI))]
++  "ISA_HAS_LASX"
++  "xvssrlrni..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrlrni__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSSRLRNI2))]
++  "ISA_HAS_LASX"
++  "xvssrlrni..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsrani__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSRANI))]
++  "ISA_HAS_LASX"
++  "xvsrani..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvsrarni__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSRARNI))]
++  "ISA_HAS_LASX"
++  "xvsrarni..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrani__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSSRANI))]
++  "ISA_HAS_LASX"
++  "xvssrani..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrani__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSSRANI2))]
++  "ISA_HAS_LASX"
++  "xvssrani..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrarni__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSSRARNI))]
++  "ISA_HAS_LASX"
++  "xvssrarni..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrarni__"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(unspec:ILASX [(match_operand:ILASX 1 "register_operand" "0")
++		       (match_operand:ILASX 2 "register_operand" "f")
++		       (match_operand 3 "const_uimm8_operand" "")]
++		      UNSPEC_LASX_XVSSRARNI2))]
++  "ISA_HAS_LASX"
++  "xvssrarni..\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_mode_attr VDOUBLEMODEW256
++  [(V8SI "V16SI")
++   (V8SF "V16SF")])
++
++(define_insn "lasx_xvpermi_"
++  [(set (match_operand:LASX_W 0 "register_operand" "=f")
++    (unspec:LASX_W [(match_operand:LASX_W 1 "register_operand" "0")
++               (match_operand:LASX_W 2 "register_operand" "f")
++                   (match_operand 3 "const_uimm8_operand" "")]
++             UNSPEC_LASX_XVPERMI))]
++  "ISA_HAS_LASX"
++  "xvpermi.w\t%u0,%u2,%3"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvpermi__1"
++  [(set (match_operand:LASX_W 0 "register_operand" "=f")
++     (vec_select:LASX_W
++       (vec_concat:
++         (match_operand:LASX_W 1 "register_operand" "f")
++         (match_operand:LASX_W 2 "register_operand" "0"))
++       (parallel [(match_operand 3  "const_0_to_3_operand")
++              (match_operand 4  "const_0_to_3_operand"  )
++              (match_operand 5  "const_8_to_11_operand" )
++              (match_operand 6  "const_8_to_11_operand" )
++              (match_operand 7  "const_4_to_7_operand"  )
++              (match_operand 8  "const_4_to_7_operand"  )
++              (match_operand 9  "const_12_to_15_operand")
++              (match_operand 10 "const_12_to_15_operand")])))]
++  "ISA_HAS_LASX
++  && INTVAL (operands[3]) + 4 == INTVAL (operands[7])
++  && INTVAL (operands[4]) + 4 == INTVAL (operands[8])
++  && INTVAL (operands[5]) + 4 == INTVAL (operands[9])
++  && INTVAL (operands[6]) + 4 == INTVAL (operands[10])"
++{
++  int mask = 0;
++  mask |= INTVAL (operands[3]) << 0;
++  mask |= INTVAL (operands[4]) << 2;
++  mask |= (INTVAL (operands[5]) - 8) << 4;
++  mask |= (INTVAL (operands[6]) - 8) << 6;
++  operands[3] = GEN_INT (mask);
++
++  return "xvpermi.w\t%u0,%u1,%3";
++}
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_expand "lasx_xvld"
++  [(match_operand:V32QI 0 "register_operand")
++   (match_operand 1 "pmode_register_operand")
++   (match_operand 2 "aq12b_operand")]
++  "ISA_HAS_LASX"
++{
++  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
++			    INTVAL (operands[2]));
++  loongarch_emit_move (operands[0], gen_rtx_MEM (V32QImode, addr));
++  DONE;
++})
++
++(define_expand "lasx_xvst"
++  [(match_operand:V32QI 0 "register_operand")
++   (match_operand 1 "pmode_register_operand")
++   (match_operand 2 "aq12b_operand")]
++  "ISA_HAS_LASX"
++{
++  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
++			    INTVAL (operands[2]));
++  loongarch_emit_move (gen_rtx_MEM (V32QImode, addr), operands[0]);
++  DONE;
++})
++
++(define_expand "lasx_xvstelm_"
++  [(match_operand:LASX 0 "register_operand")
++   (match_operand 3 "const__operand")
++   (match_operand 2 "aq8_operand")
++   (match_operand 1 "pmode_register_operand")]
++  "ISA_HAS_LASX"
++{
++  emit_insn (gen_lasx_xvstelm__insn
++	     (operands[1], operands[2], operands[0], operands[3]));
++  DONE;
++})
++
++(define_insn "lasx_xvstelm__insn"
++  [(set (mem: (plus:DI (match_operand:DI 0 "register_operand" "r")
++				 (match_operand 1 "aq8_operand")))
++	(vec_select:
++	  (match_operand:LASX 2 "register_operand" "f")
++	  (parallel [(match_operand 3 "const__operand" "")])))]
++  "ISA_HAS_LASX"
++{
++  return "xvstelm.\t%u2,%0,%1,%3";
++}
++  [(set_attr "type" "simd_store")
++   (set_attr "mode" "")
++   (set_attr "length" "4")])
++
++;; Offset is "0"
++(define_insn "lasx_xvstelm__insn_0"
++  [(set (mem: (match_operand:DI 0 "register_operand" "r"))
++    (vec_select:
++      (match_operand:LASX_WD 1 "register_operand" "f")
++      (parallel [(match_operand:SI 2 "const__operand")])))]
++  "ISA_HAS_LASX"
++{
++    return "xvstelm.\t%u1,%0,0,%2";
++}
++  [(set_attr "type" "simd_store")
++   (set_attr "mode" "")
++   (set_attr "length" "4")])
++
++(define_insn "lasx_xvinsve0_"
++  [(set (match_operand:LASX_WD 0 "register_operand" "=f")
++	(unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "0")
++			 (match_operand:LASX_WD 2 "register_operand" "f")
++			 (match_operand 3 "const__operand" "")]
++			UNSPEC_LASX_XVINSVE0))]
++  "ISA_HAS_LASX"
++  "xvinsve0.\t%u0,%u2,%3"
++  [(set_attr "type" "simd_shf")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvinsve0__scalar"
++  [(set (match_operand:FLASX 0 "register_operand" "=f")
++    (vec_merge:FLASX
++      (vec_duplicate:FLASX
++        (match_operand: 1 "register_operand" "f"))
++      (match_operand:FLASX 2 "register_operand" "0")
++      (match_operand 3 "const__operand" "")))]
++  "ISA_HAS_LASX"
++  "xvinsve0.\t%u0,%u1,%y3"
++  [(set_attr "type" "simd_insert")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvpickve_"
++  [(set (match_operand:LASX_WD 0 "register_operand" "=f")
++	(unspec:LASX_WD [(match_operand:LASX_WD 1 "register_operand" "f")
++			 (match_operand 2 "const__operand" "")]
++			UNSPEC_LASX_XVPICKVE))]
++  "ISA_HAS_LASX"
++  "xvpickve.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_shf")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvpickve__scalar"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(vec_select:
++	 (match_operand:FLASX 1 "register_operand" "f")
++	 (parallel [(match_operand 2 "const__operand" "")])))]
++  "ISA_HAS_LASX"
++  "xvpickve.\t%u0,%u1,%2"
++  [(set_attr "type" "simd_shf")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvssrlrn__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILASX_DWH 1 "register_operand" "f")
++			      (match_operand:ILASX_DWH 2 "register_operand" "f")]
++			     UNSPEC_LASX_XVSSRLRN))]
++  "ISA_HAS_LASX"
++  "xvssrlrn..\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "xvorn3"
++  [(set (match_operand:ILASX 0 "register_operand" "=f")
++	(ior:ILASX (not:ILASX (match_operand:ILASX 2 "register_operand" "f"))
++		   (match_operand:ILASX 1 "register_operand" "f")))]
++  "ISA_HAS_LASX"
++  "xvorn.v\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "lasx_xvextl_qu_du"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI [(match_operand:V4DI 1 "register_operand" "f")]
++		     UNSPEC_LASX_XVEXTL_QU_DU))]
++  "ISA_HAS_LASX"
++  "xvextl.qu.du\t%u0,%u1"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvldi"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++	(unspec:V4DI[(match_operand 1 "const_imm13_operand")]
++		    UNSPEC_LASX_XVLDI))]
++  "ISA_HAS_LASX"
++{
++  HOST_WIDE_INT val = INTVAL (operands[1]);
++  if (val < 0)
++    {
++      HOST_WIDE_INT modeVal = (val & 0xf00) >> 8;
++      if (modeVal < 13)
++	return  "xvldi\t%u0,%1";
++      else
++	{
++	  sorry ("imm13 only support 0000 ~ 1100 in bits '12 ~ 9' when bit '13' is 1");
++	  return "#";
++	}
++    }
++  else
++    return "xvldi\t%u0,%1";
++}
++  [(set_attr "type" "simd_load")
++   (set_attr "mode" "V4DI")])
++
++(define_insn "lasx_xvldx"
++  [(set (match_operand:V32QI 0 "register_operand" "=f")
++	(unspec:V32QI [(match_operand:DI 1 "register_operand" "r")
++		       (match_operand:DI 2 "reg_or_0_operand" "rJ")]
++		      UNSPEC_LASX_XVLDX))]
++  "ISA_HAS_LASX"
++{
++  return "xvldx\t%u0,%1,%z2";
++}
++  [(set_attr "type" "simd_load")
++   (set_attr "mode" "V32QI")])
++
++(define_insn "lasx_xvstx"
++  [(set (mem:V32QI (plus:DI (match_operand:DI 1 "register_operand" "r")
++			    (match_operand:DI 2 "reg_or_0_operand" "rJ")))
++	(unspec: V32QI[(match_operand:V32QI 0 "register_operand" "f")]
++		      UNSPEC_LASX_XVSTX))]
++
++  "ISA_HAS_LASX"
++{
++  return "xvstx\t%u0,%1,%z2";
++}
++  [(set_attr "type" "simd_store")
++   (set_attr "mode" "DI")])
++
++(define_insn "vec_widen_mult_even_v8si"
++  [(set (match_operand:V4DI 0 "register_operand" "=f")
++    (mult:V4DI
++      (any_extend:V4DI
++        (vec_select:V4SI
++          (match_operand:V8SI 1 "register_operand" "%f")
++          (parallel [(const_int 0) (const_int 2)
++                         (const_int 4) (const_int 6)])))
++      (any_extend:V4DI
++        (vec_select:V4SI
++          (match_operand:V8SI 2 "register_operand" "f")
++          (parallel [(const_int 0) (const_int 2)
++             (const_int 4) (const_int 6)])))))]
++  "ISA_HAS_LASX"
++  "xvmulwev.d.w\t%u0,%u1,%u2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4DI")])
++
++;; Vector reduction operation
++(define_expand "reduc_plus_scal_v4di"
++  [(match_operand:DI 0 "register_operand")
++   (match_operand:V4DI 1 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  rtx tmp = gen_reg_rtx (V4DImode);
++  rtx tmp1 = gen_reg_rtx (V4DImode);
++  rtx vec_res = gen_reg_rtx (V4DImode);
++  emit_insn (gen_lasx_xvhaddw_q_d (tmp, operands[1], operands[1]));
++  emit_insn (gen_lasx_xvpermi_d_v4di (tmp1, tmp, GEN_INT (2)));
++  emit_insn (gen_addv4di3 (vec_res, tmp, tmp1));
++  emit_insn (gen_vec_extractv4didi (operands[0], vec_res, const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_plus_scal_v8si"
++  [(match_operand:SI 0 "register_operand")
++   (match_operand:V8SI 1 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  rtx tmp = gen_reg_rtx (V4DImode);
++  rtx tmp1 = gen_reg_rtx (V4DImode);
++  rtx vec_res = gen_reg_rtx (V4DImode);
++  emit_insn (gen_lasx_xvhaddw_d_w (tmp, operands[1], operands[1]));
++  emit_insn (gen_lasx_xvhaddw_q_d (tmp1, tmp, tmp));
++  emit_insn (gen_lasx_xvpermi_d_v4di (tmp, tmp1, GEN_INT (2)));
++  emit_insn (gen_addv4di3 (vec_res, tmp, tmp1));
++  emit_insn (gen_vec_extractv8sisi (operands[0], gen_lowpart (V8SImode,vec_res),
++				    const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_plus_scal_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:FLASX 1 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_add3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc__scal_"
++  [(any_bitwise:
++     (match_operand: 0 "register_operand")
++     (match_operand:ILASX 1 "register_operand"))]
++  "ISA_HAS_LASX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_smax_scal_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:LASX 1 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_smax3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_smin_scal_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:LASX 1 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_smin3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_umax_scal_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILASX 1 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_umax3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_umin_scal_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILASX 1 "register_operand")]
++  "ISA_HAS_LASX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_umin3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
+diff --git a/gcc/config/loongarch/loongarch-modes.def b/gcc/config/loongarch/loongarch-modes.def
+index b69ad3d83..ac9ea3142 100644
+--- a/gcc/config/loongarch/loongarch-modes.def
++++ b/gcc/config/loongarch/loongarch-modes.def
+@@ -33,6 +33,7 @@ VECTOR_MODES (FLOAT, 8);      /*       V4HF V2SF */
+ VECTOR_MODES (INT, 16);	      /* V16QI V8HI V4SI V2DI */
+ VECTOR_MODES (FLOAT, 16);     /*	    V4SF V2DF */
+ 
++/* For LARCH LASX 256 bits.  */
+ VECTOR_MODES (INT, 32);	      /* V32QI V16HI V8SI V4DI */
+ VECTOR_MODES (FLOAT, 32);     /*	     V8SF V4DF */
+ 
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 24e42fa99..133ec9fa8 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -89,6 +89,8 @@ extern bool loongarch_split_move_insn_p (rtx, rtx);
+ extern void loongarch_split_move_insn (rtx, rtx, rtx);
+ extern void loongarch_split_128bit_move (rtx, rtx);
+ extern bool loongarch_split_128bit_move_p (rtx, rtx);
++extern void loongarch_split_256bit_move (rtx, rtx);
++extern bool loongarch_split_256bit_move_p (rtx, rtx);
+ extern void loongarch_split_lsx_copy_d (rtx, rtx, rtx, rtx (*)(rtx, rtx, rtx));
+ extern void loongarch_split_lsx_insert_d (rtx, rtx, rtx, rtx);
+ extern void loongarch_split_lsx_fill_d (rtx, rtx);
+@@ -174,9 +176,11 @@ union loongarch_gen_fn_ptrs
+ extern void loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs,
+ 					  rtx, rtx, rtx, rtx, rtx);
+ 
++extern void loongarch_expand_vector_group_init (rtx, rtx);
+ extern void loongarch_expand_vector_init (rtx, rtx);
+ extern void loongarch_expand_vec_unpack (rtx op[2], bool, bool);
+ extern void loongarch_expand_vec_perm (rtx, rtx, rtx, rtx);
++extern void loongarch_expand_vec_perm_1 (rtx[]);
+ extern void loongarch_expand_vector_extract (rtx, rtx, int);
+ extern void loongarch_expand_vector_reduc (rtx (*)(rtx, rtx, rtx), rtx, rtx);
+ 
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 40b83d72b..dae35a479 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -1927,7 +1927,7 @@ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode)
+ {
+   /* LSX LD.* and ST.* cannot support loading symbols via an immediate
+      operand.  */
+-  if (LSX_SUPPORTED_MODE_P (mode))
++  if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))
+     return 0;
+ 
+   switch (type)
+@@ -2060,6 +2060,11 @@ loongarch_valid_offset_p (rtx x, machine_mode mode)
+ 					loongarch_ldst_scaled_shift (mode)))
+     return false;
+ 
++  /* LASX XVLD.B and XVST.B supports 10-bit signed offsets without shift.  */
++  if (LASX_SUPPORTED_MODE_P (mode)
++      && !loongarch_signed_immediate_p (INTVAL (x), 10, 0))
++    return false;
++
+   return true;
+ }
+ 
+@@ -2272,7 +2277,9 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p)
+ {
+   struct loongarch_address_info addr;
+   int factor;
+-  bool lsx_p = !might_split_p && LSX_SUPPORTED_MODE_P (mode);
++  bool lsx_p = (!might_split_p
++		&& (LSX_SUPPORTED_MODE_P (mode)
++		    || LASX_SUPPORTED_MODE_P (mode)));
+ 
+   if (!loongarch_classify_address (&addr, x, mode, false))
+     return 0;
+@@ -2418,7 +2425,8 @@ loongarch_const_insns (rtx x)
+       return loongarch_integer_cost (INTVAL (x));
+ 
+     case CONST_VECTOR:
+-      if (LSX_SUPPORTED_MODE_P (GET_MODE (x))
++      if ((LSX_SUPPORTED_MODE_P (GET_MODE (x))
++	   || LASX_SUPPORTED_MODE_P (GET_MODE (x)))
+ 	  && loongarch_const_vector_same_int_p (x, GET_MODE (x), -512, 511))
+ 	return 1;
+       /* Fall through.  */
+@@ -3257,10 +3265,11 @@ loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src)
+ 
+   /* Both src and dest are non-registers;  one special case is supported where
+      the source is (const_int 0) and the store can source the zero register.
+-     LSX is never able to source the zero register directly in
++     LSX and LASX are never able to source the zero register directly in
+      memory operations.  */
+   if (!register_operand (dest, mode) && !register_operand (src, mode)
+-      && (!const_0_operand (src, mode) || LSX_SUPPORTED_MODE_P (mode)))
++      && (!const_0_operand (src, mode)
++	  || LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode)))
+     {
+       loongarch_emit_move (dest, force_reg (mode, src));
+       return true;
+@@ -3842,6 +3851,7 @@ loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
+ 				      int misalign ATTRIBUTE_UNUSED)
+ {
+   unsigned elements;
++  machine_mode mode = vectype != NULL ? TYPE_MODE (vectype) : DImode;
+ 
+   switch (type_of_cost)
+     {
+@@ -3858,7 +3868,8 @@ loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
+ 	return 1;
+ 
+       case vec_perm:
+-	return 1;
++	return LASX_SUPPORTED_MODE_P (mode)
++	  && !LSX_SUPPORTED_MODE_P (mode) ? 2 : 1;
+ 
+       case unaligned_load:
+       case vector_gather_load:
+@@ -3939,6 +3950,10 @@ loongarch_split_move_p (rtx dest, rtx src)
+   if (LSX_SUPPORTED_MODE_P (GET_MODE (dest)))
+     return loongarch_split_128bit_move_p (dest, src);
+ 
++  /* Check if LASX moves need splitting.  */
++  if (LASX_SUPPORTED_MODE_P (GET_MODE (dest)))
++    return loongarch_split_256bit_move_p (dest, src);
++
+   /* Otherwise split all multiword moves.  */
+   return size > UNITS_PER_WORD;
+ }
+@@ -3954,6 +3969,8 @@ loongarch_split_move (rtx dest, rtx src, rtx insn_)
+   gcc_checking_assert (loongarch_split_move_p (dest, src));
+   if (LSX_SUPPORTED_MODE_P (GET_MODE (dest)))
+     loongarch_split_128bit_move (dest, src);
++  else if (LASX_SUPPORTED_MODE_P (GET_MODE (dest)))
++    loongarch_split_256bit_move (dest, src);
+   else if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
+     {
+       if (!TARGET_64BIT && GET_MODE (dest) == DImode)
+@@ -4119,7 +4136,7 @@ const char *
+ loongarch_output_move_index_float (rtx x, machine_mode mode, bool ldr)
+ {
+   int index = exact_log2 (GET_MODE_SIZE (mode));
+-  if (!IN_RANGE (index, 2, 4))
++  if (!IN_RANGE (index, 2, 5))
+     return NULL;
+ 
+   struct loongarch_address_info info;
+@@ -4128,17 +4145,19 @@ loongarch_output_move_index_float (rtx x, machine_mode mode, bool ldr)
+       || !loongarch_legitimate_address_p (mode, x, false))
+     return NULL;
+ 
+-  const char *const insn[][3] =
++  const char *const insn[][4] =
+     {
+ 	{
+ 	  "fstx.s\t%1,%0",
+ 	  "fstx.d\t%1,%0",
+-	  "vstx\t%w1,%0"
++	  "vstx\t%w1,%0",
++	  "xvstx\t%u1,%0"
+ 	},
+ 	{
+ 	  "fldx.s\t%0,%1",
+ 	  "fldx.d\t%0,%1",
+-	  "vldx\t%w0,%1"
++	  "vldx\t%w0,%1",
++	  "xvldx\t%u0,%1"
+ 	}
+     };
+ 
+@@ -4172,6 +4191,34 @@ loongarch_split_128bit_move_p (rtx dest, rtx src)
+   return true;
+ }
+ 
++/* Return true if a 256-bit move from SRC to DEST should be split.  */
++
++bool
++loongarch_split_256bit_move_p (rtx dest, rtx src)
++{
++  /* LSX-to-LSX moves can be done in a single instruction.  */
++  if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
++    return false;
++
++  /* Check for LSX loads and stores.  */
++  if (FP_REG_RTX_P (dest) && MEM_P (src))
++    return false;
++  if (FP_REG_RTX_P (src) && MEM_P (dest))
++    return false;
++
++  /* Check for LSX set to an immediate const vector with valid replicated
++     element.  */
++  if (FP_REG_RTX_P (dest)
++      && loongarch_const_vector_same_int_p (src, GET_MODE (src), -512, 511))
++    return false;
++
++  /* Check for LSX load zero immediate.  */
++  if (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src)))
++    return false;
++
++  return true;
++}
++
+ /* Split a 128-bit move from SRC to DEST.  */
+ 
+ void
+@@ -4263,6 +4310,97 @@ loongarch_split_128bit_move (rtx dest, rtx src)
+     }
+ }
+ 
++/* Split a 256-bit move from SRC to DEST.  */
++
++void
++loongarch_split_256bit_move (rtx dest, rtx src)
++{
++  int byte, index;
++  rtx low_dest, low_src, d, s;
++
++  if (FP_REG_RTX_P (dest))
++    {
++      gcc_assert (!MEM_P (src));
++
++      rtx new_dest = dest;
++      if (!TARGET_64BIT)
++	{
++	  if (GET_MODE (dest) != V8SImode)
++	    new_dest = simplify_gen_subreg (V8SImode, dest, GET_MODE (dest), 0);
++	}
++      else
++	{
++	  if (GET_MODE (dest) != V4DImode)
++	    new_dest = simplify_gen_subreg (V4DImode, dest, GET_MODE (dest), 0);
++	}
++
++      for (byte = 0, index = 0; byte < GET_MODE_SIZE (GET_MODE (dest));
++	   byte += UNITS_PER_WORD, index++)
++	{
++	  s = loongarch_subword_at_byte (src, byte);
++	  if (!TARGET_64BIT)
++	    emit_insn (gen_lasx_xvinsgr2vr_w (new_dest, s, new_dest,
++					      GEN_INT (1 << index)));
++	  else
++	    emit_insn (gen_lasx_xvinsgr2vr_d (new_dest, s, new_dest,
++					      GEN_INT (1 << index)));
++	}
++    }
++  else if (FP_REG_RTX_P (src))
++    {
++      gcc_assert (!MEM_P (dest));
++
++      rtx new_src = src;
++      if (!TARGET_64BIT)
++	{
++	  if (GET_MODE (src) != V8SImode)
++	    new_src = simplify_gen_subreg (V8SImode, src, GET_MODE (src), 0);
++	}
++      else
++	{
++	  if (GET_MODE (src) != V4DImode)
++	    new_src = simplify_gen_subreg (V4DImode, src, GET_MODE (src), 0);
++	}
++
++      for (byte = 0, index = 0; byte < GET_MODE_SIZE (GET_MODE (src));
++	   byte += UNITS_PER_WORD, index++)
++	{
++	  d = loongarch_subword_at_byte (dest, byte);
++	  if (!TARGET_64BIT)
++	    emit_insn (gen_lsx_vpickve2gr_w (d, new_src, GEN_INT (index)));
++	  else
++	    emit_insn (gen_lsx_vpickve2gr_d (d, new_src, GEN_INT (index)));
++	}
++    }
++  else
++    {
++      low_dest = loongarch_subword_at_byte (dest, 0);
++      low_src = loongarch_subword_at_byte (src, 0);
++      gcc_assert (REG_P (low_dest) && REG_P (low_src));
++      /* Make sure the source register is not written before reading.  */
++      if (REGNO (low_dest) <= REGNO (low_src))
++	{
++	  for (byte = 0; byte < GET_MODE_SIZE (TImode);
++	       byte += UNITS_PER_WORD)
++	    {
++	      d = loongarch_subword_at_byte (dest, byte);
++	      s = loongarch_subword_at_byte (src, byte);
++	      loongarch_emit_move (d, s);
++	    }
++	}
++      else
++	{
++	  for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0;
++	       byte -= UNITS_PER_WORD)
++	    {
++	      d = loongarch_subword_at_byte (dest, byte);
++	      s = loongarch_subword_at_byte (src, byte);
++	      loongarch_emit_move (d, s);
++	    }
++	}
++    }
++}
++
+ 
+ /* Split a COPY_S.D with operands DEST, SRC and INDEX.  GEN is a function
+    used to generate subregs.  */
+@@ -4350,11 +4488,12 @@ loongarch_output_move (rtx dest, rtx src)
+   machine_mode mode = GET_MODE (dest);
+   bool dbl_p = (GET_MODE_SIZE (mode) == 8);
+   bool lsx_p = LSX_SUPPORTED_MODE_P (mode);
++  bool lasx_p = LASX_SUPPORTED_MODE_P (mode);
+ 
+   if (loongarch_split_move_p (dest, src))
+     return "#";
+ 
+-  if ((lsx_p)
++  if ((lsx_p || lasx_p)
+       && dest_code == REG && FP_REG_P (REGNO (dest))
+       && src_code == CONST_VECTOR
+       && CONST_INT_P (CONST_VECTOR_ELT (src, 0)))
+@@ -4364,6 +4503,8 @@ loongarch_output_move (rtx dest, rtx src)
+ 	{
+ 	case 16:
+ 	  return "vrepli.%v0\t%w0,%E1";
++	case 32:
++	  return "xvrepli.%v0\t%u0,%E1";
+ 	default: gcc_unreachable ();
+ 	}
+     }
+@@ -4378,13 +4519,15 @@ loongarch_output_move (rtx dest, rtx src)
+ 
+ 	  if (FP_REG_P (REGNO (dest)))
+ 	    {
+-	      if (lsx_p)
++	      if (lsx_p || lasx_p)
+ 		{
+ 		  gcc_assert (src == CONST0_RTX (GET_MODE (src)));
+ 		  switch (GET_MODE_SIZE (mode))
+ 		    {
+ 		    case 16:
+ 		      return "vrepli.b\t%w0,0";
++		    case 32:
++		      return "xvrepli.b\t%u0,0";
+ 		    default:
+ 		      gcc_unreachable ();
+ 		    }
+@@ -4517,12 +4660,14 @@ loongarch_output_move (rtx dest, rtx src)
+     {
+       if (dest_code == REG && FP_REG_P (REGNO (dest)))
+ 	{
+-	  if (lsx_p)
++	  if (lsx_p || lasx_p)
+ 	    {
+ 	      switch (GET_MODE_SIZE (mode))
+ 		{
+ 		case 16:
+ 		  return "vori.b\t%w0,%w1,0";
++		case 32:
++		  return "xvori.b\t%u0,%u1,0";
+ 		default:
+ 		  gcc_unreachable ();
+ 		}
+@@ -4540,12 +4685,14 @@ loongarch_output_move (rtx dest, rtx src)
+ 	  if (insn)
+ 	    return insn;
+ 
+-	  if (lsx_p)
++	  if (lsx_p || lasx_p)
+ 	    {
+ 	      switch (GET_MODE_SIZE (mode))
+ 		{
+ 		case 16:
+ 		  return "vst\t%w1,%0";
++		case 32:
++		  return "xvst\t%u1,%0";
+ 		default:
+ 		  gcc_unreachable ();
+ 		}
+@@ -4566,12 +4713,14 @@ loongarch_output_move (rtx dest, rtx src)
+ 	  if (insn)
+ 	    return insn;
+ 
+-	  if (lsx_p)
++	  if (lsx_p || lasx_p)
+ 	    {
+ 	      switch (GET_MODE_SIZE (mode))
+ 		{
+ 		case 16:
+ 		  return "vld\t%w0,%1";
++		case 32:
++		  return "xvld\t%u0,%1";
+ 		default:
+ 		  gcc_unreachable ();
+ 		}
+@@ -5599,18 +5748,27 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part,
+    'T'	Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
+ 	      'z' for (eq:?I ...), 'n' for (ne:?I ...).
+    't'	Like 'T', but with the EQ/NE cases reversed
+-   'V'	Print exact log2 of CONST_INT OP element 0 of a replicated
+-	  CONST_VECTOR in decimal.
++   'F'	Print the FPU branch condition for comparison OP.
++   'W'	Print the inverse of the FPU branch condition for comparison OP.
++   'w'	Print a LSX register.
++   'u'	Print a LASX register.
++   'T'	Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
++	      'z' for (eq:?I ...), 'n' for (ne:?I ...).
++   't'	Like 'T', but with the EQ/NE cases reversed
++   'Y'	Print loongarch_fp_conditions[INTVAL (OP)]
++   'Z'	Print OP and a comma for 8CC, otherwise print nothing.
++   'z'	Print $0 if OP is zero, otherwise print OP normally.
+    'v'	Print the insn size suffix b, h, w or d for vector modes V16QI, V8HI,
+ 	  V4SI, V2SI, and w, d for vector modes V4SF, V2DF respectively.
++   'V'	Print exact log2 of CONST_INT OP element 0 of a replicated
++	  CONST_VECTOR in decimal.
+    'W'	Print the inverse of the FPU branch condition for comparison OP.
+-   'w'	Print a LSX register.
+    'X'	Print CONST_INT OP in hexadecimal format.
+    'x'	Print the low 16 bits of CONST_INT OP in hexadecimal format.
+    'Y'	Print loongarch_fp_conditions[INTVAL (OP)]
+    'y'	Print exact log2 of CONST_INT OP in decimal.
+    'Z'	Print OP and a comma for 8CC, otherwise print nothing.
+-   'z'	Print $r0 if OP is zero, otherwise print OP normally.  */
++   'z'	Print $0 if OP is zero, otherwise print OP normally.  */
+ 
+ static void
+ loongarch_print_operand (FILE *file, rtx op, int letter)
+@@ -5752,46 +5910,11 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+ 	output_operand_lossage ("invalid use of '%%%c'", letter);
+       break;
+ 
+-    case 'v':
+-      switch (GET_MODE (op))
+-	{
+-	case E_V16QImode:
+-	case E_V32QImode:
+-	  fprintf (file, "b");
+-	  break;
+-	case E_V8HImode:
+-	case E_V16HImode:
+-	  fprintf (file, "h");
+-	  break;
+-	case E_V4SImode:
+-	case E_V4SFmode:
+-	case E_V8SImode:
+-	case E_V8SFmode:
+-	  fprintf (file, "w");
+-	  break;
+-	case E_V2DImode:
+-	case E_V2DFmode:
+-	case E_V4DImode:
+-	case E_V4DFmode:
+-	  fprintf (file, "d");
+-	  break;
+-	default:
+-	  output_operand_lossage ("invalid use of '%%%c'", letter);
+-	}
+-      break;
+-
+     case 'W':
+       loongarch_print_float_branch_condition (file, reverse_condition (code),
+ 					      letter);
+       break;
+ 
+-    case 'w':
+-      if (code == REG && LSX_REG_P (REGNO (op)))
+-	fprintf (file, "$vr%s", ®_names[REGNO (op)][2]);
+-      else
+-	output_operand_lossage ("invalid use of '%%%c'", letter);
+-      break;
+-
+     case 'x':
+       if (CONST_INT_P (op))
+ 	fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
+@@ -5833,6 +5956,48 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+       fputc (',', file);
+       break;
+ 
++    case 'w':
++      if (code == REG && LSX_REG_P (REGNO (op)))
++	fprintf (file, "$vr%s", ®_names[REGNO (op)][2]);
++      else
++	output_operand_lossage ("invalid use of '%%%c'", letter);
++      break;
++
++    case 'u':
++      if (code == REG && LASX_REG_P (REGNO (op)))
++	fprintf (file, "$xr%s", ®_names[REGNO (op)][2]);
++      else
++	output_operand_lossage ("invalid use of '%%%c'", letter);
++      break;
++
++    case 'v':
++      switch (GET_MODE (op))
++	{
++	case E_V16QImode:
++	case E_V32QImode:
++	  fprintf (file, "b");
++	  break;
++	case E_V8HImode:
++	case E_V16HImode:
++	  fprintf (file, "h");
++	  break;
++	case E_V4SImode:
++	case E_V4SFmode:
++	case E_V8SImode:
++	case E_V8SFmode:
++	  fprintf (file, "w");
++	  break;
++	case E_V2DImode:
++	case E_V2DFmode:
++	case E_V4DImode:
++	case E_V4DFmode:
++	  fprintf (file, "d");
++	  break;
++	default:
++	  output_operand_lossage ("invalid use of '%%%c'", letter);
++	}
++      break;
++
+     default:
+       switch (code)
+ 	{
+@@ -6163,13 +6328,18 @@ loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode)
+   size = GET_MODE_SIZE (mode);
+   mclass = GET_MODE_CLASS (mode);
+ 
+-  if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode))
++  if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode)
++      && !LASX_SUPPORTED_MODE_P (mode))
+     return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
+ 
+   /* For LSX, allow TImode and 128-bit vector modes in all FPR.  */
+   if (FP_REG_P (regno) && LSX_SUPPORTED_MODE_P (mode))
+     return true;
+ 
++  /* FIXED ME: For LASX, allow TImode and 256-bit vector modes in all FPR.  */
++  if (FP_REG_P (regno) && LASX_SUPPORTED_MODE_P (mode))
++    return true;
++
+   if (FP_REG_P (regno))
+     {
+       if (mclass == MODE_FLOAT
+@@ -6222,6 +6392,9 @@ loongarch_hard_regno_nregs (unsigned int regno, machine_mode mode)
+       if (LSX_SUPPORTED_MODE_P (mode))
+ 	return 1;
+ 
++      if (LASX_SUPPORTED_MODE_P (mode))
++	return 1;
++
+       return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
+     }
+ 
+@@ -6251,7 +6424,10 @@ loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode)
+     {
+       if (loongarch_hard_regno_mode_ok (FP_REG_FIRST, mode))
+ 	{
+-	  if (LSX_SUPPORTED_MODE_P (mode))
++	  /* Fixed me.  */
++	  if (LASX_SUPPORTED_MODE_P (mode))
++	    size = MIN (size, UNITS_PER_LASX_REG);
++	  else if (LSX_SUPPORTED_MODE_P (mode))
+ 	    size = MIN (size, UNITS_PER_LSX_REG);
+ 	  else
+ 	    size = MIN (size, UNITS_PER_FPREG);
+@@ -6269,6 +6445,10 @@ static bool
+ loongarch_can_change_mode_class (machine_mode from, machine_mode to,
+ 				 reg_class_t rclass)
+ {
++  /* Allow conversions between different LSX/LASX vector modes.  */
++  if (LASX_SUPPORTED_MODE_P (from) && LASX_SUPPORTED_MODE_P (to))
++    return true;
++
+   /* Allow conversions between different LSX vector modes.  */
+   if (LSX_SUPPORTED_MODE_P (from) && LSX_SUPPORTED_MODE_P (to))
+     return true;
+@@ -6292,7 +6472,8 @@ loongarch_mode_ok_for_mov_fmt_p (machine_mode mode)
+       return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
+ 
+     default:
+-      return LSX_SUPPORTED_MODE_P (mode);
++      return ISA_HAS_LASX ? LASX_SUPPORTED_MODE_P (mode)
++	: LSX_SUPPORTED_MODE_P (mode);
+     }
+ }
+ 
+@@ -6494,7 +6675,8 @@ loongarch_valid_pointer_mode (scalar_int_mode mode)
+ static bool
+ loongarch_vector_mode_supported_p (machine_mode mode)
+ {
+-  return LSX_SUPPORTED_MODE_P (mode);
++  return ISA_HAS_LASX ? LASX_SUPPORTED_MODE_P (mode)
++    : LSX_SUPPORTED_MODE_P (mode);
+ }
+ 
+ /* Implement TARGET_SCALAR_MODE_SUPPORTED_P.  */
+@@ -6520,19 +6702,19 @@ loongarch_preferred_simd_mode (scalar_mode mode)
+   switch (mode)
+     {
+     case E_QImode:
+-      return E_V16QImode;
++      return ISA_HAS_LASX ? E_V32QImode : E_V16QImode;
+     case E_HImode:
+-      return E_V8HImode;
++      return ISA_HAS_LASX ? E_V16HImode : E_V8HImode;
+     case E_SImode:
+-      return E_V4SImode;
++      return ISA_HAS_LASX ? E_V8SImode : E_V4SImode;
+     case E_DImode:
+-      return E_V2DImode;
++      return ISA_HAS_LASX ? E_V4DImode : E_V2DImode;
+ 
+     case E_SFmode:
+-      return E_V4SFmode;
++      return ISA_HAS_LASX ? E_V8SFmode : E_V4SFmode;
+ 
+     case E_DFmode:
+-      return E_V2DFmode;
++      return ISA_HAS_LASX ? E_V4DFmode : E_V2DFmode;
+ 
+     default:
+       break;
+@@ -6543,7 +6725,12 @@ loongarch_preferred_simd_mode (scalar_mode mode)
+ static unsigned int
+ loongarch_autovectorize_vector_modes (vector_modes *modes, bool)
+ {
+-  if (ISA_HAS_LSX)
++  if (ISA_HAS_LASX)
++    {
++      modes->safe_push (V32QImode);
++      modes->safe_push (V16QImode);
++    }
++  else if (ISA_HAS_LSX)
+     {
+       modes->safe_push (V16QImode);
+     }
+@@ -6723,11 +6910,18 @@ const char *
+ loongarch_lsx_output_division (const char *division, rtx *operands)
+ {
+   const char *s;
++  machine_mode mode = GET_MODE (*operands);
+ 
+   s = division;
+   if (TARGET_CHECK_ZERO_DIV)
+     {
+-      if (ISA_HAS_LSX)
++      if (ISA_HAS_LASX && GET_MODE_SIZE (mode) == 32)
++	{
++	  output_asm_insn ("xvsetallnez.%v0\t$fcc7,%u2",operands);
++	  output_asm_insn (s, operands);
++	  output_asm_insn ("bcnez\t$fcc7,1f", operands);
++	}
++      else if (ISA_HAS_LSX)
+ 	{
+ 	  output_asm_insn ("vsetallnez.%v0\t$fcc7,%w2",operands);
+ 	  output_asm_insn (s, operands);
+@@ -7566,7 +7760,7 @@ loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d)
+   rtx_insn *insn;
+   unsigned i;
+ 
+-  if (!ISA_HAS_LSX)
++  if (!ISA_HAS_LSX && !ISA_HAS_LASX)
+     return false;
+ 
+   for (i = 0; i < d->nelt; i++)
+@@ -7590,40 +7784,484 @@ loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d)
+   return true;
+ }
+ 
+-void
+-loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
++/* Try to simplify a two vector permutation using 2 intra-lane interleave
++   insns and cross-lane shuffle for 32-byte vectors.  */
++
++static bool
++loongarch_expand_vec_perm_interleave (struct expand_vec_perm_d *d)
+ {
+-  machine_mode vmode = GET_MODE (target);
++  unsigned i, nelt;
++  rtx t1,t2,t3;
++  rtx (*gen_high) (rtx, rtx, rtx);
++  rtx (*gen_low) (rtx, rtx, rtx);
++  machine_mode mode = GET_MODE (d->target);
+ 
+-  switch (vmode)
++  if (d->one_vector_p)
++    return false;
++  if (ISA_HAS_LASX && GET_MODE_SIZE (d->vmode) == 32)
++    ;
++  else
++    return false;
++
++  nelt = d->nelt;
++  if (d->perm[0] != 0 && d->perm[0] != nelt / 2)
++    return false;
++  for (i = 0; i < nelt; i += 2)
++    if (d->perm[i] != d->perm[0] + i / 2
++	|| d->perm[i + 1] != d->perm[0] + i / 2 + nelt)
++      return false;
++
++  if (d->testing_p)
++    return true;
++
++  switch (d->vmode)
+     {
+-    case E_V16QImode:
+-      emit_insn (gen_lsx_vshuf_b (target, op1, op0, sel));
++    case E_V32QImode:
++      gen_high = gen_lasx_xvilvh_b;
++      gen_low = gen_lasx_xvilvl_b;
+       break;
+-    case E_V2DFmode:
+-      emit_insn (gen_lsx_vshuf_d_f (target, sel, op1, op0));
++    case E_V16HImode:
++      gen_high = gen_lasx_xvilvh_h;
++      gen_low = gen_lasx_xvilvl_h;
+       break;
+-    case E_V2DImode:
+-      emit_insn (gen_lsx_vshuf_d (target, sel, op1, op0));
++    case E_V8SImode:
++      gen_high = gen_lasx_xvilvh_w;
++      gen_low = gen_lasx_xvilvl_w;
+       break;
+-    case E_V4SFmode:
+-      emit_insn (gen_lsx_vshuf_w_f (target, sel, op1, op0));
++    case E_V4DImode:
++      gen_high = gen_lasx_xvilvh_d;
++      gen_low = gen_lasx_xvilvl_d;
+       break;
+-    case E_V4SImode:
+-      emit_insn (gen_lsx_vshuf_w (target, sel, op1, op0));
++    case E_V8SFmode:
++      gen_high = gen_lasx_xvilvh_w_f;
++      gen_low = gen_lasx_xvilvl_w_f;
+       break;
+-    case E_V8HImode:
+-      emit_insn (gen_lsx_vshuf_h (target, sel, op1, op0));
++    case E_V4DFmode:
++      gen_high = gen_lasx_xvilvh_d_f;
++      gen_low = gen_lasx_xvilvl_d_f;
+       break;
+     default:
+-      break;
++      gcc_unreachable ();
++    }
++
++  t1 = gen_reg_rtx (mode);
++  t2 = gen_reg_rtx (mode);
++  emit_insn (gen_high (t1, d->op0, d->op1));
++  emit_insn (gen_low (t2, d->op0, d->op1));
++  if (mode == V4DFmode || mode == V8SFmode)
++    {
++      t3 = gen_reg_rtx (V4DFmode);
++      if (d->perm[0])
++	emit_insn (gen_lasx_xvpermi_q_v4df (t3, gen_lowpart (V4DFmode, t1),
++					    gen_lowpart (V4DFmode, t2),
++					    GEN_INT (0x31)));
++      else
++	emit_insn (gen_lasx_xvpermi_q_v4df (t3, gen_lowpart (V4DFmode, t1),
++					    gen_lowpart (V4DFmode, t2),
++					    GEN_INT (0x20)));
+     }
++  else
++    {
++      t3 = gen_reg_rtx (V4DImode);
++      if (d->perm[0])
++	emit_insn (gen_lasx_xvpermi_q_v4di (t3, gen_lowpart (V4DImode, t1),
++					    gen_lowpart (V4DImode, t2),
++					    GEN_INT (0x31)));
++      else
++	emit_insn (gen_lasx_xvpermi_q_v4di (t3, gen_lowpart (V4DImode, t1),
++					    gen_lowpart (V4DImode, t2),
++					    GEN_INT (0x20)));
++    }
++  emit_move_insn (d->target, gen_lowpart (mode, t3));
++  return true;
+ }
+ 
++/* Implement extract-even and extract-odd permutations.  */
++
+ static bool
+-loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
++loongarch_expand_vec_perm_even_odd_1 (struct expand_vec_perm_d *d, unsigned odd)
+ {
+-  int i;
++  rtx t1;
++  machine_mode mode = GET_MODE (d->target);
++
++  if (d->testing_p)
++    return true;
++
++  t1 = gen_reg_rtx (mode);
++
++  switch (d->vmode)
++    {
++    case E_V4DFmode:
++      /* Shuffle the lanes around into { 0 4 2 6 } and { 1 5 3 7 }.  */
++      if (odd)
++	emit_insn (gen_lasx_xvilvh_d_f (t1, d->op0, d->op1));
++      else
++	emit_insn (gen_lasx_xvilvl_d_f (t1, d->op0, d->op1));
++
++      /* Shuffle within the 256-bit lanes to produce the result required.
++	 { 0 2 4 6 } | { 1 3 5 7 }.  */
++      emit_insn (gen_lasx_xvpermi_d_v4df (d->target, t1, GEN_INT (0xd8)));
++      break;
++
++    case E_V4DImode:
++      if (odd)
++	emit_insn (gen_lasx_xvilvh_d (t1, d->op0, d->op1));
++      else
++	emit_insn (gen_lasx_xvilvl_d (t1, d->op0, d->op1));
++
++      emit_insn (gen_lasx_xvpermi_d_v4di (d->target, t1, GEN_INT (0xd8)));
++      break;
++
++    case E_V8SFmode:
++      /* Shuffle the lanes around into:
++	 { 0 2 8 a 4 6 c e } | { 1 3 9 b 5 7 d f }.  */
++      if (odd)
++	emit_insn (gen_lasx_xvpickod_w_f (t1, d->op0, d->op1));
++      else
++	emit_insn (gen_lasx_xvpickev_w_f (t1, d->op0, d->op1));
++
++      /* Shuffle within the 256-bit lanes to produce the result required.
++	 { 0 2 4 6 8 a c e } | { 1 3 5 7 9 b d f }.  */
++      emit_insn (gen_lasx_xvpermi_d_v8sf (d->target, t1, GEN_INT (0xd8)));
++      break;
++
++    case E_V8SImode:
++      if (odd)
++	emit_insn (gen_lasx_xvpickod_w (t1, d->op0, d->op1));
++      else
++	emit_insn (gen_lasx_xvpickev_w (t1, d->op0, d->op1));
++
++      emit_insn (gen_lasx_xvpermi_d_v8si (d->target, t1, GEN_INT (0xd8)));
++      break;
++
++    case E_V16HImode:
++      if (odd)
++	emit_insn (gen_lasx_xvpickod_h (t1, d->op0, d->op1));
++      else
++	emit_insn (gen_lasx_xvpickev_h (t1, d->op0, d->op1));
++
++      emit_insn (gen_lasx_xvpermi_d_v16hi (d->target, t1, GEN_INT (0xd8)));
++      break;
++
++    case E_V32QImode:
++      if (odd)
++	emit_insn (gen_lasx_xvpickod_b (t1, d->op0, d->op1));
++      else
++	emit_insn (gen_lasx_xvpickev_b (t1, d->op0, d->op1));
++
++      emit_insn (gen_lasx_xvpermi_d_v32qi (d->target, t1, GEN_INT (0xd8)));
++      break;
++
++    default:
++      gcc_unreachable ();
++    }
++
++  return true;
++}
++
++/* Pattern match extract-even and extract-odd permutations.  */
++
++static bool
++loongarch_expand_vec_perm_even_odd (struct expand_vec_perm_d *d)
++{
++  unsigned i, odd, nelt = d->nelt;
++  if (!ISA_HAS_LASX)
++    return false;
++
++  odd = d->perm[0];
++  if (odd != 0 && odd != 1)
++    return false;
++
++  for (i = 1; i < nelt; ++i)
++    if (d->perm[i] != 2 * i + odd)
++      return false;
++
++  return loongarch_expand_vec_perm_even_odd_1 (d, odd);
++}
++
++/* Expand a variable vector permutation for LASX.  */
++
++void
++loongarch_expand_vec_perm_1 (rtx operands[])
++{
++  rtx target = operands[0];
++  rtx op0 = operands[1];
++  rtx op1 = operands[2];
++  rtx mask = operands[3];
++
++  bool one_operand_shuffle = rtx_equal_p (op0, op1);
++  rtx t1 = NULL;
++  rtx t2 = NULL;
++  rtx t3, t4, t5, t6, vt = NULL;
++  rtx vec[32] = {NULL};
++  machine_mode mode = GET_MODE (op0);
++  machine_mode maskmode = GET_MODE (mask);
++  int w, i;
++
++  /* Number of elements in the vector.  */
++  w = GET_MODE_NUNITS (mode);
++
++  rtx round_data[MAX_VECT_LEN];
++  rtx round_reg, round_data_rtx;
++
++  if (mode != E_V32QImode)
++    {
++      for (int i = 0; i < w; i += 1)
++	{
++	  round_data[i] = GEN_INT (0x1f);
++	}
++
++      if (mode == E_V4DFmode)
++	{
++	  round_data_rtx = gen_rtx_CONST_VECTOR (E_V4DImode,
++						 gen_rtvec_v (w, round_data));
++	  round_reg = gen_reg_rtx (E_V4DImode);
++	}
++      else if (mode == E_V8SFmode)
++	{
++
++	  round_data_rtx = gen_rtx_CONST_VECTOR (E_V8SImode,
++						 gen_rtvec_v (w, round_data));
++	  round_reg = gen_reg_rtx (E_V8SImode);
++	}
++      else
++	{
++	  round_data_rtx = gen_rtx_CONST_VECTOR (mode,
++						 gen_rtvec_v (w, round_data));
++	  round_reg = gen_reg_rtx (mode);
++	}
++
++      emit_move_insn (round_reg, round_data_rtx);
++      switch (mode)
++	{
++	case E_V32QImode:
++	  emit_insn (gen_andv32qi3 (mask, mask, round_reg));
++	  break;
++	case E_V16HImode:
++	  emit_insn (gen_andv16hi3 (mask, mask, round_reg));
++	  break;
++	case E_V8SImode:
++	case E_V8SFmode:
++	  emit_insn (gen_andv8si3 (mask, mask, round_reg));
++	  break;
++	case E_V4DImode:
++	case E_V4DFmode:
++	  emit_insn (gen_andv4di3 (mask, mask, round_reg));
++	  break;
++	default:
++	  gcc_unreachable ();
++	  break;
++	}
++    }
++
++  if (mode == V4DImode || mode == V4DFmode)
++    {
++      maskmode = mode = V8SImode;
++      w = 8;
++      t1 = gen_reg_rtx (maskmode);
++
++      /* Replicate the low bits of the V4DImode mask into V8SImode:
++	 mask = { A B C D }
++	 t1 = { A A B B C C D D }.  */
++      for (i = 0; i < w / 2; ++i)
++	vec[i*2 + 1] = vec[i*2] = GEN_INT (i * 2);
++      vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec));
++      vt = force_reg (maskmode, vt);
++      mask = gen_lowpart (maskmode, mask);
++      emit_insn (gen_lasx_xvperm_w (t1, mask, vt));
++
++      /* Multiply the shuffle indicies by two.  */
++      t1 = expand_simple_binop (maskmode, PLUS, t1, t1, t1, 1,
++				OPTAB_DIRECT);
++
++      /* Add one to the odd shuffle indicies:
++	 t1 = { A*2, A*2+1, B*2, B*2+1, ... }.  */
++      for (i = 0; i < w / 2; ++i)
++	{
++	  vec[i * 2] = const0_rtx;
++	  vec[i * 2 + 1] = const1_rtx;
++	}
++      vt = gen_rtx_CONST_VECTOR (maskmode, gen_rtvec_v (w, vec));
++      vt = validize_mem (force_const_mem (maskmode, vt));
++      t1 = expand_simple_binop (maskmode, PLUS, t1, vt, t1, 1,
++				OPTAB_DIRECT);
++
++      /* Continue as if V8SImode (resp.  V32QImode) was used initially.  */
++      operands[3] = mask = t1;
++      target = gen_reg_rtx (mode);
++      op0 = gen_lowpart (mode, op0);
++      op1 = gen_lowpart (mode, op1);
++    }
++
++  switch (mode)
++    {
++    case E_V8SImode:
++      if (one_operand_shuffle)
++	{
++	  emit_insn (gen_lasx_xvperm_w (target, op0, mask));
++	  if (target != operands[0])
++	    emit_move_insn (operands[0],
++			    gen_lowpart (GET_MODE (operands[0]), target));
++	}
++      else
++	{
++	  t1 = gen_reg_rtx (V8SImode);
++	  t2 = gen_reg_rtx (V8SImode);
++	  emit_insn (gen_lasx_xvperm_w (t1, op0, mask));
++	  emit_insn (gen_lasx_xvperm_w (t2, op1, mask));
++	  goto merge_two;
++	}
++      return;
++
++    case E_V8SFmode:
++      mask = gen_lowpart (V8SImode, mask);
++      if (one_operand_shuffle)
++	emit_insn (gen_lasx_xvperm_w_f (target, op0, mask));
++      else
++	{
++	  t1 = gen_reg_rtx (V8SFmode);
++	  t2 = gen_reg_rtx (V8SFmode);
++	  emit_insn (gen_lasx_xvperm_w_f (t1, op0, mask));
++	  emit_insn (gen_lasx_xvperm_w_f (t2, op1, mask));
++	  goto merge_two;
++	}
++      return;
++
++    case E_V16HImode:
++      if (one_operand_shuffle)
++	{
++	  t1 = gen_reg_rtx (V16HImode);
++	  t2 = gen_reg_rtx (V16HImode);
++	  emit_insn (gen_lasx_xvpermi_d_v16hi (t1, op0, GEN_INT (0x44)));
++	  emit_insn (gen_lasx_xvpermi_d_v16hi (t2, op0, GEN_INT (0xee)));
++	  emit_insn (gen_lasx_xvshuf_h (target, mask, t2, t1));
++	}
++      else
++	{
++	  t1 = gen_reg_rtx (V16HImode);
++	  t2 = gen_reg_rtx (V16HImode);
++	  t3 = gen_reg_rtx (V16HImode);
++	  t4 = gen_reg_rtx (V16HImode);
++	  t5 = gen_reg_rtx (V16HImode);
++	  t6 = gen_reg_rtx (V16HImode);
++	  emit_insn (gen_lasx_xvpermi_d_v16hi (t3, op0, GEN_INT (0x44)));
++	  emit_insn (gen_lasx_xvpermi_d_v16hi (t4, op0, GEN_INT (0xee)));
++	  emit_insn (gen_lasx_xvshuf_h (t1, mask, t4, t3));
++	  emit_insn (gen_lasx_xvpermi_d_v16hi (t5, op1, GEN_INT (0x44)));
++	  emit_insn (gen_lasx_xvpermi_d_v16hi (t6, op1, GEN_INT (0xee)));
++	  emit_insn (gen_lasx_xvshuf_h (t2, mask, t6, t5));
++	  goto merge_two;
++	}
++      return;
++
++    case E_V32QImode:
++      if (one_operand_shuffle)
++	{
++	  t1 = gen_reg_rtx (V32QImode);
++	  t2 = gen_reg_rtx (V32QImode);
++	  emit_insn (gen_lasx_xvpermi_d_v32qi (t1, op0, GEN_INT (0x44)));
++	  emit_insn (gen_lasx_xvpermi_d_v32qi (t2, op0, GEN_INT (0xee)));
++	  emit_insn (gen_lasx_xvshuf_b (target, t2, t1, mask));
++	}
++      else
++	{
++	  t1 = gen_reg_rtx (V32QImode);
++	  t2 = gen_reg_rtx (V32QImode);
++	  t3 = gen_reg_rtx (V32QImode);
++	  t4 = gen_reg_rtx (V32QImode);
++	  t5 = gen_reg_rtx (V32QImode);
++	  t6 = gen_reg_rtx (V32QImode);
++	  emit_insn (gen_lasx_xvpermi_d_v32qi (t3, op0, GEN_INT (0x44)));
++	  emit_insn (gen_lasx_xvpermi_d_v32qi (t4, op0, GEN_INT (0xee)));
++	  emit_insn (gen_lasx_xvshuf_b (t1, t4, t3, mask));
++	  emit_insn (gen_lasx_xvpermi_d_v32qi (t5, op1, GEN_INT (0x44)));
++	  emit_insn (gen_lasx_xvpermi_d_v32qi (t6, op1, GEN_INT (0xee)));
++	  emit_insn (gen_lasx_xvshuf_b (t2, t6, t5, mask));
++	  goto merge_two;
++	}
++      return;
++
++    default:
++      gcc_assert (GET_MODE_SIZE (mode) == 32);
++      break;
++    }
++
++merge_two:
++  /* Then merge them together.  The key is whether any given control
++     element contained a bit set that indicates the second word.  */
++  rtx xops[6];
++  mask = operands[3];
++  vt = GEN_INT (w);
++  vt = gen_const_vec_duplicate (maskmode, vt);
++  vt = force_reg (maskmode, vt);
++  mask = expand_simple_binop (maskmode, AND, mask, vt,
++			      NULL_RTX, 0, OPTAB_DIRECT);
++  if (GET_MODE (target) != mode)
++    target = gen_reg_rtx (mode);
++  xops[0] = target;
++  xops[1] = gen_lowpart (mode, t2);
++  xops[2] = gen_lowpart (mode, t1);
++  xops[3] = gen_rtx_EQ (maskmode, mask, vt);
++  xops[4] = mask;
++  xops[5] = vt;
++
++  loongarch_expand_vec_cond_expr (mode, maskmode, xops);
++  if (target != operands[0])
++    emit_move_insn (operands[0],
++		    gen_lowpart (GET_MODE (operands[0]), target));
++}
++
++void
++loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
++{
++  machine_mode vmode = GET_MODE (target);
++  auto nelt = GET_MODE_NUNITS (vmode);
++  auto round_reg = gen_reg_rtx (vmode);
++  rtx round_data[MAX_VECT_LEN];
++
++  for (int i = 0; i < nelt; i += 1)
++    {
++      round_data[i] = GEN_INT (0x1f);
++    }
++
++  rtx round_data_rtx = gen_rtx_CONST_VECTOR (vmode, gen_rtvec_v (nelt, round_data));
++  emit_move_insn (round_reg, round_data_rtx);
++
++  switch (vmode)
++    {
++    case E_V16QImode:
++      emit_insn (gen_andv16qi3 (sel, sel, round_reg));
++      emit_insn (gen_lsx_vshuf_b (target, op1, op0, sel));
++      break;
++    case E_V2DFmode:
++      emit_insn (gen_andv2di3 (sel, sel, round_reg));
++      emit_insn (gen_lsx_vshuf_d_f (target, sel, op1, op0));
++      break;
++    case E_V2DImode:
++      emit_insn (gen_andv2di3 (sel, sel, round_reg));
++      emit_insn (gen_lsx_vshuf_d (target, sel, op1, op0));
++      break;
++    case E_V4SFmode:
++      emit_insn (gen_andv4si3 (sel, sel, round_reg));
++      emit_insn (gen_lsx_vshuf_w_f (target, sel, op1, op0));
++      break;
++    case E_V4SImode:
++      emit_insn (gen_andv4si3 (sel, sel, round_reg));
++      emit_insn (gen_lsx_vshuf_w (target, sel, op1, op0));
++      break;
++    case E_V8HImode:
++      emit_insn (gen_andv8hi3 (sel, sel, round_reg));
++      emit_insn (gen_lsx_vshuf_h (target, sel, op1, op0));
++      break;
++    default:
++      break;
++    }
++}
++
++static bool
++loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
++{
++  int i;
+   rtx target, op0, op1, sel, tmp;
+   rtx rperm[MAX_VECT_LEN];
+ 
+@@ -7724,25 +8362,1302 @@ loongarch_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
+ 	return true;
+     }
+ 
+-  if (loongarch_expand_lsx_shuffle (d))
+-    return true;
+-  return false;
+-}
+-
+-/* Implementation of constant vector permuatation.  This function identifies
+- * recognized pattern of permuation selector argument, and use one or more
+- * instruction(s) to finish the permutation job correctly.  For unsupported
+- * patterns, it will return false.  */
+-
+-static bool
+-loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
+-{
+-  /* Although we have the LSX vec_perm template, there's still some
+-     128bit vector permuatation operations send to vectorize_vec_perm_const.
+-     In this case, we just simpliy wrap them by single vshuf.* instruction,
+-     because LSX vshuf.* instruction just have the same behavior that GCC
+-     expects.  */
+-  return loongarch_try_expand_lsx_vshuf_const (d);
++  if (loongarch_expand_lsx_shuffle (d))
++    return true;
++  if (loongarch_expand_vec_perm_even_odd (d))
++    return true;
++  if (loongarch_expand_vec_perm_interleave (d))
++    return true;
++  return false;
++}
++
++/* Following are the assist function for const vector permutation support.  */
++static bool
++loongarch_is_quad_duplicate (struct expand_vec_perm_d *d)
++{
++  if (d->perm[0] >= d->nelt / 2)
++    return false;
++
++  bool result = true;
++  unsigned char lhs = d->perm[0];
++  unsigned char rhs = d->perm[d->nelt / 2];
++
++  if ((rhs - lhs) != d->nelt / 2)
++    return false;
++
++  for (int i = 1; i < d->nelt; i += 1)
++    {
++      if ((i < d->nelt / 2) && (d->perm[i] != lhs))
++	{
++	  result = false;
++	  break;
++	}
++      if ((i > d->nelt / 2) && (d->perm[i] != rhs))
++	{
++	  result = false;
++	  break;
++	}
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_double_duplicate (struct expand_vec_perm_d *d)
++{
++  if (!d->one_vector_p)
++    return false;
++
++  if (d->nelt < 8)
++    return false;
++
++  bool result = true;
++  unsigned char buf = d->perm[0];
++
++  for (int i = 1; i < d->nelt; i += 2)
++    {
++      if (d->perm[i] != buf)
++	{
++	  result = false;
++	  break;
++	}
++      if (d->perm[i - 1] != d->perm[i])
++	{
++	  result = false;
++	  break;
++	}
++      buf += d->nelt / 4;
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_odd_extraction (struct expand_vec_perm_d *d)
++{
++  bool result = true;
++  unsigned char buf = 1;
++
++  for (int i = 0; i < d->nelt; i += 1)
++    {
++      if (buf != d->perm[i])
++	{
++	  result = false;
++	  break;
++	}
++      buf += 2;
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_even_extraction (struct expand_vec_perm_d *d)
++{
++  bool result = true;
++  unsigned char buf = 0;
++
++  for (int i = 0; i < d->nelt; i += 1)
++    {
++      if (buf != d->perm[i])
++	{
++	  result = false;
++	  break;
++	}
++      buf += 1;
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_extraction_permutation (struct expand_vec_perm_d *d)
++{
++  bool result = true;
++  unsigned char buf = d->perm[0];
++
++  if (buf != 0 || buf != d->nelt)
++    return false;
++
++  for (int i = 0; i < d->nelt; i += 1)
++    {
++      if (buf != d->perm[i])
++	{
++	  result = false;
++	  break;
++	}
++      buf += 2;
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_center_extraction (struct expand_vec_perm_d *d)
++{
++  bool result = true;
++  unsigned buf = d->nelt / 2;
++
++  for (int i = 0; i < d->nelt; i += 1)
++    {
++      if (buf != d->perm[i])
++	{
++	  result = false;
++	  break;
++	}
++      buf += 1;
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_reversing_permutation (struct expand_vec_perm_d *d)
++{
++  if (!d->one_vector_p)
++    return false;
++
++  bool result = true;
++  unsigned char buf = d->nelt - 1;
++
++  for (int i = 0; i < d->nelt; i += 1)
++    {
++      if (d->perm[i] != buf)
++	{
++	  result = false;
++	  break;
++	}
++
++      buf -= 1;
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_di_misalign_extract (struct expand_vec_perm_d *d)
++{
++  if (d->nelt != 4 && d->nelt != 8)
++    return false;
++
++  bool result = true;
++  unsigned char buf;
++
++  if (d->nelt == 4)
++    {
++      buf = 1;
++      for (int i = 0; i < d->nelt; i += 1)
++	{
++	  if (buf != d->perm[i])
++	    {
++	      result = false;
++	      break;
++	    }
++
++	  buf += 1;
++	}
++    }
++  else if (d->nelt == 8)
++    {
++      buf = 2;
++      for (int i = 0; i < d->nelt; i += 1)
++	{
++	  if (buf != d->perm[i])
++	    {
++	      result = false;
++	      break;
++	    }
++
++	  buf += 1;
++	}
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_si_misalign_extract (struct expand_vec_perm_d *d)
++{
++  if (d->vmode != E_V8SImode && d->vmode != E_V8SFmode)
++    return false;
++  bool result = true;
++  unsigned char buf = 1;
++
++  for (int i = 0; i < d->nelt; i += 1)
++    {
++      if (buf != d->perm[i])
++	{
++	  result = false;
++	  break;
++	}
++      buf += 1;
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_lasx_lowpart_interleave (struct expand_vec_perm_d *d)
++{
++  bool result = true;
++  unsigned char buf = 0;
++
++  for (int i = 0;i < d->nelt; i += 2)
++    {
++      if (buf != d->perm[i])
++	{
++	  result = false;
++	  break;
++	}
++      buf += 1;
++    }
++
++  if (result)
++    {
++      buf = d->nelt;
++      for (int i = 1; i < d->nelt; i += 2)
++	{
++	  if (buf != d->perm[i])
++	    {
++	      result = false;
++	      break;
++	    }
++	  buf += 1;
++	}
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_lasx_lowpart_interleave_2 (struct expand_vec_perm_d *d)
++{
++  if (d->vmode != E_V32QImode)
++    return false;
++  bool result = true;
++  unsigned char buf = 0;
++
++#define COMPARE_SELECTOR(INIT, BEGIN, END) \
++  buf = INIT; \
++  for (int i = BEGIN; i < END && result; i += 1) \
++    { \
++      if (buf != d->perm[i]) \
++	{ \
++	  result = false; \
++	  break; \
++	} \
++      buf += 1; \
++    }
++
++  COMPARE_SELECTOR (0, 0, 8);
++  COMPARE_SELECTOR (32, 8, 16);
++  COMPARE_SELECTOR (8, 16, 24);
++  COMPARE_SELECTOR (40, 24, 32);
++
++#undef COMPARE_SELECTOR
++  return result;
++}
++
++static bool
++loongarch_is_lasx_lowpart_extract (struct expand_vec_perm_d *d)
++{
++  bool result = true;
++  unsigned char buf = 0;
++
++  for (int i = 0; i < d->nelt / 2; i += 1)
++    {
++      if (buf != d->perm[i])
++	{
++	  result = false;
++	  break;
++	}
++      buf += 1;
++    }
++
++  if (result)
++    {
++      buf = d->nelt;
++      for (int i = d->nelt / 2; i < d->nelt; i += 1)
++	{
++	  if (buf != d->perm[i])
++	    {
++	      result = false;
++	      break;
++	    }
++	  buf += 1;
++	}
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_lasx_highpart_interleave (expand_vec_perm_d *d)
++{
++  bool result = true;
++  unsigned char buf = d->nelt / 2;
++
++  for (int i = 0; i < d->nelt; i += 2)
++    {
++      if (buf != d->perm[i])
++	{
++	  result = false;
++	  break;
++	}
++      buf += 1;
++    }
++
++  if (result)
++    {
++      buf = d->nelt + d->nelt / 2;
++      for (int i = 1; i < d->nelt;i += 2)
++	{
++	  if (buf != d->perm[i])
++	    {
++	      result = false;
++	      break;
++	    }
++	  buf += 1;
++	}
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_lasx_highpart_interleave_2 (struct expand_vec_perm_d *d)
++{
++  if (d->vmode != E_V32QImode)
++    return false;
++
++  bool result = true;
++  unsigned char buf = 0;
++
++#define COMPARE_SELECTOR(INIT, BEGIN, END) \
++  buf = INIT; \
++  for (int i = BEGIN; i < END && result; i += 1) \
++    { \
++      if (buf != d->perm[i]) \
++	{ \
++	  result = false; \
++	  break; \
++	} \
++      buf += 1; \
++    }
++
++  COMPARE_SELECTOR (16, 0, 8);
++  COMPARE_SELECTOR (48, 8, 16);
++  COMPARE_SELECTOR (24, 16, 24);
++  COMPARE_SELECTOR (56, 24, 32);
++
++#undef COMPARE_SELECTOR
++  return result;
++}
++
++static bool
++loongarch_is_elem_duplicate (struct expand_vec_perm_d *d)
++{
++  bool result = true;
++  unsigned char buf = d->perm[0];
++
++  for (int i = 0; i < d->nelt; i += 1)
++    {
++      if (buf != d->perm[i])
++	{
++	  result = false;
++	  break;
++	}
++    }
++
++  return result;
++}
++
++inline bool
++loongarch_is_op_reverse_perm (struct expand_vec_perm_d *d)
++{
++  return (d->vmode == E_V4DFmode)
++    && d->perm[0] == 2 && d->perm[1] == 3
++    && d->perm[2] == 0 && d->perm[3] == 1;
++}
++
++static bool
++loongarch_is_single_op_perm (struct expand_vec_perm_d *d)
++{
++  bool result = true;
++
++  for (int i = 0; i < d->nelt; i += 1)
++    {
++      if (d->perm[i] >= d->nelt)
++	{
++	  result = false;
++	  break;
++	}
++    }
++
++  return result;
++}
++
++static bool
++loongarch_is_divisible_perm (struct expand_vec_perm_d *d)
++{
++  bool result = true;
++
++  for (int i = 0; i < d->nelt / 2; i += 1)
++    {
++      if (d->perm[i] >= d->nelt)
++	{
++	  result = false;
++	  break;
++	}
++    }
++
++  if (result)
++    {
++      for (int i = d->nelt / 2; i < d->nelt; i += 1)
++	{
++	  if (d->perm[i] < d->nelt)
++	    {
++	      result = false;
++	      break;
++	    }
++	}
++    }
++
++  return result;
++}
++
++inline bool
++loongarch_is_triple_stride_extract (struct expand_vec_perm_d *d)
++{
++  return (d->vmode == E_V4DImode || d->vmode == E_V4DFmode)
++    && d->perm[0] == 1 && d->perm[1] == 4
++    && d->perm[2] == 7 && d->perm[3] == 0;
++}
++
++/* In LASX, some permutation insn does not have the behavior that gcc expects
++ * when compiler wants to emit a vector permutation.
++ *
++ * 1. What GCC provides via vectorize_vec_perm_const ()'s paramater:
++ * When GCC wants to performs a vector permutation, it provides two op
++ * reigster, one target register, and a selector.
++ * In const vector permutation case, GCC provides selector as a char array
++ * that contains original value; in variable vector permuatation
++ * (performs via vec_perm insn template), it provides a vector register.
++ * We assume that nelt is the elements numbers inside single vector in current
++ * 256bit vector mode.
++ *
++ * 2. What GCC expects to perform:
++ * Two op registers (op0, op1) will "combine" into a 512bit temp vector storage
++ * that has 2*nelt elements inside it; the low 256bit is op0, and high 256bit
++ * is op1, then the elements are indexed as below:
++ *		  0 ~ nelt - 1		nelt ~ 2 * nelt - 1
++ *	  |-------------------------|-------------------------|
++ *		Low 256bit (op0)	High 256bit (op1)
++ * For example, the second element in op1 (V8SImode) will be indexed with 9.
++ * Selector is a vector that has the same mode and number of elements  with
++ * op0,op1 and target, it's look like this:
++ *	      0 ~ nelt - 1
++ *	  |-------------------------|
++ *	      256bit (selector)
++ * It describes which element from 512bit temp vector storage will fit into
++ * target's every element slot.
++ * GCC expects that every element in selector can be ANY indices of 512bit
++ * vector storage (Selector can pick literally any element from op0 and op1, and
++ * then fits into any place of target register). This is also what LSX 128bit
++ * vshuf.* instruction do similarly, so we can handle 128bit vector permutation
++ * by single instruction easily.
++ *
++ * 3. What LASX permutation instruction does:
++ * In short, it just execute two independent 128bit vector permuatation, and
++ * it's the reason that we need to do the jobs below.  We will explain it.
++ * op0, op1, target, and selector will be separate into high 128bit and low
++ * 128bit, and do permutation as the description below:
++ *
++ *  a) op0's low 128bit and op1's low 128bit "combines" into a 256bit temp
++ * vector storage (TVS1), elements are indexed as below:
++ *	    0 ~ nelt / 2 - 1	  nelt / 2 ~ nelt - 1
++ *	|---------------------|---------------------| TVS1
++ *	    op0's low 128bit      op1's low 128bit
++ *    op0's high 128bit and op1's high 128bit are "combined" into TVS2 in the
++ *    same way.
++ *	    0 ~ nelt / 2 - 1	  nelt / 2 ~ nelt - 1
++ *	|---------------------|---------------------| TVS2
++ *	    op0's high 128bit	op1's high 128bit
++ *  b) Selector's low 128bit describes which elements from TVS1 will fit into
++ *  target vector's low 128bit.  No TVS2 elements are allowed.
++ *  c) Selector's high 128bit describes which elements from TVS2 will fit into
++ *  target vector's high 128bit.  No TVS1 elements are allowed.
++ *
++ * As we can see, if we want to handle vector permutation correctly, we can
++ * achieve it in three ways:
++ *  a) Modify selector's elements, to make sure that every elements can inform
++ *  correct value that will put into target vector.
++    b) Generate extra instruction before/after permutation instruction, for
++    adjusting op vector or target vector, to make sure target vector's value is
++    what GCC expects.
++    c) Use other instructions to process op and put correct result into target.
++ */
++
++/* Implementation of constant vector permuatation.  This function identifies
++ * recognized pattern of permuation selector argument, and use one or more
++ * instruction(s) to finish the permutation job correctly.  For unsupported
++ * patterns, it will return false.  */
++
++static bool
++loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
++{
++  /* Although we have the LSX vec_perm template, there's still some
++     128bit vector permuatation operations send to vectorize_vec_perm_const.
++     In this case, we just simpliy wrap them by single vshuf.* instruction,
++     because LSX vshuf.* instruction just have the same behavior that GCC
++     expects.  */
++  if (GET_MODE_SIZE (d->vmode) == 16)
++    return loongarch_try_expand_lsx_vshuf_const (d);
++  else
++    return false;
++
++  bool ok = false, reverse_hi_lo = false, extract_ev_od = false,
++       use_alt_op = false;
++  unsigned char idx;
++  int i;
++  rtx target, op0, op1, sel, tmp;
++  rtx op0_alt = NULL_RTX, op1_alt = NULL_RTX;
++  rtx rperm[MAX_VECT_LEN];
++  unsigned int remapped[MAX_VECT_LEN];
++
++  /* Try to figure out whether is a recognized permutation selector pattern, if
++     yes, we will reassign some elements with new value in selector argument,
++     and in some cases we will generate some assist insn to complete the
++     permutation. (Even in some cases, we use other insn to impl permutation
++     instead of xvshuf!)
++
++     Make sure to check d->testing_p is false everytime if you want to emit new
++     insn, unless you want to crash into ICE directly.  */
++  if (loongarch_is_quad_duplicate (d))
++    {
++      /* Selector example: E_V8SImode, { 0, 0, 0, 0, 4, 4, 4, 4 }
++	 copy first elem from original selector to all elem in new selector.  */
++      idx = d->perm[0];
++      for (i = 0; i < d->nelt; i += 1)
++	{
++	  remapped[i] = idx;
++	}
++      /* Selector after: { 0, 0, 0, 0, 0, 0, 0, 0 }.  */
++    }
++  else if (loongarch_is_double_duplicate (d))
++    {
++      /* Selector example: E_V8SImode, { 1, 1, 3, 3, 5, 5, 7, 7 }
++	 one_vector_p == true.  */
++      for (i = 0; i < d->nelt / 2; i += 1)
++	{
++	  idx = d->perm[i];
++	  remapped[i] = idx;
++	  remapped[i + d->nelt / 2] = idx;
++	}
++      /* Selector after: { 1, 1, 3, 3, 1, 1, 3, 3 }.  */
++    }
++  else if (loongarch_is_odd_extraction (d)
++	   || loongarch_is_even_extraction (d))
++    {
++      /* Odd extraction selector sample: E_V4DImode, { 1, 3, 5, 7 }
++	 Selector after: { 1, 3, 1, 3 }.
++	 Even extraction selector sample: E_V4DImode, { 0, 2, 4, 6 }
++	 Selector after: { 0, 2, 0, 2 }.  */
++      for (i = 0; i < d->nelt / 2; i += 1)
++	{
++	  idx = d->perm[i];
++	  remapped[i] = idx;
++	  remapped[i + d->nelt / 2] = idx;
++	}
++      /* Additional insn is required for correct result.  See codes below.  */
++      extract_ev_od = true;
++    }
++  else if (loongarch_is_extraction_permutation (d))
++    {
++      /* Selector sample: E_V8SImode, { 0, 1, 2, 3, 4, 5, 6, 7 }.  */
++      if (d->perm[0] == 0)
++	{
++	  for (i = 0; i < d->nelt / 2; i += 1)
++	    {
++	      remapped[i] = i;
++	      remapped[i + d->nelt / 2] = i;
++	    }
++	}
++      else
++	{
++	  /* { 8, 9, 10, 11, 12, 13, 14, 15 }.  */
++	  for (i = 0; i < d->nelt / 2; i += 1)
++	    {
++	      idx = i + d->nelt / 2;
++	      remapped[i] = idx;
++	      remapped[i + d->nelt / 2] = idx;
++	    }
++	}
++      /* Selector after: { 0, 1, 2, 3, 0, 1, 2, 3 }
++	 { 8, 9, 10, 11, 8, 9, 10, 11 }  */
++    }
++  else if (loongarch_is_center_extraction (d))
++    {
++      /* sample: E_V4DImode, { 2, 3, 4, 5 }
++	 In this condition, we can just copy high 128bit of op0 and low 128bit
++	 of op1 to the target register by using xvpermi.q insn.  */
++      if (!d->testing_p)
++	{
++	  emit_move_insn (d->target, d->op1);
++	  switch (d->vmode)
++	    {
++	      case E_V4DImode:
++		emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target,
++						    d->op0, GEN_INT (0x21)));
++		break;
++	      case E_V4DFmode:
++		emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target,
++						    d->op0, GEN_INT (0x21)));
++		break;
++	      case E_V8SImode:
++		emit_insn (gen_lasx_xvpermi_q_v8si (d->target, d->target,
++						    d->op0, GEN_INT (0x21)));
++		break;
++	      case E_V8SFmode:
++		emit_insn (gen_lasx_xvpermi_q_v8sf (d->target, d->target,
++						    d->op0, GEN_INT (0x21)));
++		break;
++	      case E_V16HImode:
++		emit_insn (gen_lasx_xvpermi_q_v16hi (d->target, d->target,
++						     d->op0, GEN_INT (0x21)));
++		break;
++	      case E_V32QImode:
++		emit_insn (gen_lasx_xvpermi_q_v32qi (d->target, d->target,
++						     d->op0, GEN_INT (0x21)));
++		break;
++	      default:
++		break;
++	    }
++	}
++      ok = true;
++      /* Finish the funtion directly.  */
++      goto expand_perm_const_2_end;
++    }
++  else if (loongarch_is_reversing_permutation (d))
++    {
++      /* Selector sample: E_V8SImode, { 7, 6, 5, 4, 3, 2, 1, 0 }
++	 one_vector_p == true  */
++      idx = d->nelt / 2 - 1;
++      for (i = 0; i < d->nelt / 2; i += 1)
++	{
++	  remapped[i] = idx;
++	  remapped[i + d->nelt / 2] = idx;
++	  idx -= 1;
++	}
++      /* Selector after: { 3, 2, 1, 0, 3, 2, 1, 0 }
++	 Additional insn will be generated to swap hi and lo 128bit of target
++	 register.  */
++      reverse_hi_lo = true;
++    }
++  else if (loongarch_is_di_misalign_extract (d)
++	   || loongarch_is_si_misalign_extract (d))
++    {
++      /* Selector Sample:
++	 DI misalign: E_V4DImode, { 1, 2, 3, 4 }
++	 SI misalign: E_V8SImode, { 1, 2, 3, 4, 5, 6, 7, 8 }  */
++      if (!d->testing_p)
++	{
++	  /* Copy original op0/op1 value to new temp register.
++	     In some cases, operand register may be used in multiple place, so
++	     we need new regiter instead modify original one, to avoid runtime
++	     crashing or wrong value after execution.  */
++	  use_alt_op = true;
++	  op1_alt = gen_reg_rtx (d->vmode);
++	  emit_move_insn (op1_alt, d->op1);
++
++	  /* Adjust op1 for selecting correct value in high 128bit of target
++	     register.
++	     op1: E_V4DImode, { 4, 5, 6, 7 } -> { 2, 3, 4, 5 }.  */
++	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
++	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0);
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
++					      conv_op0, GEN_INT (0x21)));
++
++	  for (i = 0; i < d->nelt / 2; i += 1)
++	    {
++	      remapped[i] = d->perm[i];
++	      remapped[i + d->nelt / 2] = d->perm[i];
++	    }
++	  /* Selector after:
++	     DI misalign: { 1, 2, 1, 2 }
++	     SI misalign: { 1, 2, 3, 4, 1, 2, 3, 4 }  */
++	}
++    }
++  else if (loongarch_is_lasx_lowpart_interleave (d))
++    {
++      /* Elements from op0's low 18bit and op1's 128bit are inserted into
++	 target register alternately.
++	 sample: E_V4DImode, { 0, 4, 1, 5 }  */
++      if (!d->testing_p)
++	{
++	  /* Prepare temp register instead of modify original op.  */
++	  use_alt_op = true;
++	  op1_alt = gen_reg_rtx (d->vmode);
++	  op0_alt = gen_reg_rtx (d->vmode);
++	  emit_move_insn (op1_alt, d->op1);
++	  emit_move_insn (op0_alt, d->op0);
++
++	  /* Generate subreg for fitting into insn gen function.  */
++	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
++	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
++
++	  /* Adjust op value in temp register.
++	     op0 = {0,1,2,3}, op1 = {4,5,0,1}  */
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
++					      conv_op0, GEN_INT (0x02)));
++	  /* op0 = {0,1,4,5}, op1 = {4,5,0,1}  */
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0,
++					      conv_op1, GEN_INT (0x01)));
++
++	  /* Remap indices in selector based on the location of index inside
++	     selector, and vector element numbers in current vector mode.  */
++
++	  /* Filling low 128bit of new selector.  */
++	  for (i = 0; i < d->nelt / 2; i += 1)
++	    {
++	      /* value in odd-indexed slot of low 128bit part of selector
++		 vector.  */
++	      remapped[i] = i % 2 != 0 ? d->perm[i] - d->nelt / 2 : d->perm[i];
++	    }
++	  /* Then filling the high 128bit.  */
++	  for (i = d->nelt / 2; i < d->nelt; i += 1)
++	    {
++	      /* value in even-indexed slot of high 128bit part of
++		 selector vector.  */
++	      remapped[i] = i % 2 == 0
++		? d->perm[i] + (d->nelt / 2) * 3 : d->perm[i];
++	    }
++	}
++    }
++  else if (loongarch_is_lasx_lowpart_interleave_2 (d))
++    {
++      /* Special lowpart interleave case in V32QI vector mode.  It does the same
++	 thing as we can see in if branch that above this line.
++	 Selector sample: E_V32QImode,
++	 {0, 1, 2, 3, 4, 5, 6, 7, 32, 33, 34, 35, 36, 37, 38, 39, 8,
++	 9, 10, 11, 12, 13, 14, 15, 40, 41, 42, 43, 44, 45, 46, 47}  */
++      if (!d->testing_p)
++	{
++	  /* Solution for this case in very simple - covert op into V4DI mode,
++	     and do same thing as previous if branch.  */
++	  op1_alt = gen_reg_rtx (d->vmode);
++	  op0_alt = gen_reg_rtx (d->vmode);
++	  emit_move_insn (op1_alt, d->op1);
++	  emit_move_insn (op0_alt, d->op0);
++
++	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
++	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
++	  rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0);
++
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
++					      conv_op0, GEN_INT (0x02)));
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0,
++					      conv_op1, GEN_INT (0x01)));
++	  remapped[0] = 0;
++	  remapped[1] = 4;
++	  remapped[2] = 1;
++	  remapped[3] = 5;
++
++	  for (i = 0; i < d->nelt; i += 1)
++	    {
++	      rperm[i] = GEN_INT (remapped[i]);
++	    }
++
++	  sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (4, rperm));
++	  sel = force_reg (E_V4DImode, sel);
++	  emit_insn (gen_lasx_xvshuf_d (conv_target, sel,
++					conv_op1, conv_op0));
++	}
++
++      ok = true;
++      goto expand_perm_const_2_end;
++    }
++  else if (loongarch_is_lasx_lowpart_extract (d))
++    {
++      /* Copy op0's low 128bit to target's low 128bit, and copy op1's low
++	 128bit to target's high 128bit.
++	 Selector sample: E_V4DImode, { 0, 1, 4 ,5 }  */
++      if (!d->testing_p)
++	{
++	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0);
++	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0);
++	  rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0);
++
++	  /* We can achieve the expectation by using sinple xvpermi.q insn.  */
++	  emit_move_insn (conv_target, conv_op1);
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_target, conv_target,
++					      conv_op0, GEN_INT (0x20)));
++	}
++
++      ok = true;
++      goto expand_perm_const_2_end;
++    }
++  else if (loongarch_is_lasx_highpart_interleave (d))
++    {
++      /* Similar to lowpart interleave, elements from op0's high 128bit and
++	 op1's high 128bit are inserted into target regiter alternately.
++	 Selector sample: E_V8SImode, { 4, 12, 5, 13, 6, 14, 7, 15 }  */
++      if (!d->testing_p)
++	{
++	  /* Prepare temp op register.  */
++	  use_alt_op = true;
++	  op1_alt = gen_reg_rtx (d->vmode);
++	  op0_alt = gen_reg_rtx (d->vmode);
++	  emit_move_insn (op1_alt, d->op1);
++	  emit_move_insn (op0_alt, d->op0);
++
++	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
++	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
++	  /* Adjust op value in temp regiter.
++	     op0 = { 0, 1, 2, 3 }, op1 = { 6, 7, 2, 3 }  */
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
++					      conv_op0, GEN_INT (0x13)));
++	  /* op0 = { 2, 3, 6, 7 }, op1 = { 6, 7, 2, 3 }  */
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0,
++					      conv_op1, GEN_INT (0x01)));
++	  /* Remap indices in selector based on the location of index inside
++	     selector, and vector element numbers in current vector mode.  */
++
++	  /* Filling low 128bit of new selector.  */
++	 for (i = 0; i < d->nelt / 2; i += 1)
++	   {
++	     /* value in even-indexed slot of low 128bit part of selector
++		vector.  */
++	     remapped[i] = i % 2 == 0 ? d->perm[i] - d->nelt / 2 : d->perm[i];
++	   }
++	  /* Then filling the high 128bit.  */
++	 for (i = d->nelt / 2; i < d->nelt; i += 1)
++	   {
++	     /* value in odd-indexed slot of high 128bit part of selector
++		vector.  */
++	      remapped[i] = i % 2 != 0
++		? d->perm[i] - (d->nelt / 2) * 3 : d->perm[i];
++	   }
++	}
++    }
++  else if (loongarch_is_lasx_highpart_interleave_2 (d))
++    {
++      /* Special highpart interleave case in V32QI vector mode.  It does the
++	 same thing as the normal version above.
++	 Selector sample: E_V32QImode,
++	 {16, 17, 18, 19, 20, 21, 22, 23, 48, 49, 50, 51, 52, 53, 54, 55,
++	 24, 25, 26, 27, 28, 29, 30, 31, 56, 57, 58, 59, 60, 61, 62, 63}
++      */
++      if (!d->testing_p)
++	{
++	  /* Convert op into V4DImode and do the things.  */
++	  op1_alt = gen_reg_rtx (d->vmode);
++	  op0_alt = gen_reg_rtx (d->vmode);
++	  emit_move_insn (op1_alt, d->op1);
++	  emit_move_insn (op0_alt, d->op0);
++
++	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
++	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
++	  rtx conv_target = gen_rtx_SUBREG (E_V4DImode, d->target, 0);
++
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1, conv_op1,
++					      conv_op0, GEN_INT (0x13)));
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0, conv_op0,
++					      conv_op1, GEN_INT (0x01)));
++	  remapped[0] = 2;
++	  remapped[1] = 6;
++	  remapped[2] = 3;
++	  remapped[3] = 7;
++
++	  for (i = 0; i < d->nelt; i += 1)
++	    {
++	      rperm[i] = GEN_INT (remapped[i]);
++	    }
++
++	  sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (4, rperm));
++	  sel = force_reg (E_V4DImode, sel);
++	  emit_insn (gen_lasx_xvshuf_d (conv_target, sel,
++					conv_op1, conv_op0));
++	}
++
++	ok = true;
++	goto expand_perm_const_2_end;
++    }
++  else if (loongarch_is_elem_duplicate (d))
++    {
++      /* Brocast single element (from op0 or op1) to all slot of target
++	 register.
++	 Selector sample:E_V8SImode, { 2, 2, 2, 2, 2, 2, 2, 2 }  */
++      if (!d->testing_p)
++	{
++	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0);
++	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0);
++	  rtx temp_reg = gen_reg_rtx (d->vmode);
++	  rtx conv_temp = gen_rtx_SUBREG (E_V4DImode, temp_reg, 0);
++
++	  emit_move_insn (temp_reg, d->op0);
++
++	  idx = d->perm[0];
++	  /* We will use xvrepl128vei.* insn to achieve the result, but we need
++	     to make the high/low 128bit has the same contents that contain the
++	     value that we need to broardcast, because xvrepl128vei does the
++	     broardcast job from every 128bit of source register to
++	     corresponded part of target register! (A deep sigh.)  */
++	  if (/*idx >= 0 &&*/ idx < d->nelt / 2)
++	    {
++	      emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp,
++						  conv_op0, GEN_INT (0x0)));
++	    }
++	  else if (idx >= d->nelt / 2 && idx < d->nelt)
++	    {
++	      emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp,
++						  conv_op0, GEN_INT (0x11)));
++	      idx -= d->nelt / 2;
++	    }
++	  else if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2))
++	    {
++	      emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp,
++						  conv_op1, GEN_INT (0x0)));
++	    }
++	  else if (idx >= (d->nelt + d->nelt / 2) && idx < d->nelt * 2)
++	    {
++	      emit_insn (gen_lasx_xvpermi_q_v4di (conv_temp, conv_temp,
++						  conv_op1, GEN_INT (0x11)));
++	      idx -= d->nelt / 2;
++	    }
++
++	  /* Then we can finally generate this insn.  */
++	  switch (d->vmode)
++	    {
++	    case E_V4DImode:
++	      emit_insn (gen_lasx_xvrepl128vei_d (d->target, temp_reg,
++						  GEN_INT (idx)));
++	      break;
++	    case E_V4DFmode:
++	      emit_insn (gen_lasx_xvrepl128vei_d_f (d->target, temp_reg,
++						    GEN_INT (idx)));
++	      break;
++	    case E_V8SImode:
++	      emit_insn (gen_lasx_xvrepl128vei_w (d->target, temp_reg,
++						  GEN_INT (idx)));
++	      break;
++	    case E_V8SFmode:
++	      emit_insn (gen_lasx_xvrepl128vei_w_f (d->target, temp_reg,
++						    GEN_INT (idx)));
++	      break;
++	    case E_V16HImode:
++	      emit_insn (gen_lasx_xvrepl128vei_h (d->target, temp_reg,
++						  GEN_INT (idx)));
++	      break;
++	    case E_V32QImode:
++	      emit_insn (gen_lasx_xvrepl128vei_b (d->target, temp_reg,
++						  GEN_INT (idx)));
++	      break;
++	    default:
++	      gcc_unreachable ();
++	      break;
++	    }
++
++	  /* finish func directly.  */
++	  ok = true;
++	  goto expand_perm_const_2_end;
++	}
++    }
++  else if (loongarch_is_op_reverse_perm (d))
++    {
++      /* reverse high 128bit and low 128bit in op0.
++	 Selector sample: E_V4DFmode, { 2, 3, 0, 1 }
++	 Use xvpermi.q for doing this job.  */
++      if (!d->testing_p)
++	{
++	  if (d->vmode == E_V4DImode)
++	    {
++	      emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target, d->op0,
++						  GEN_INT (0x01)));
++	    }
++	  else if (d->vmode == E_V4DFmode)
++	    {
++	      emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target, d->op0,
++						  GEN_INT (0x01)));
++	    }
++	  else
++	    {
++	      gcc_unreachable ();
++	    }
++	}
++
++      ok = true;
++      goto expand_perm_const_2_end;
++    }
++  else if (loongarch_is_single_op_perm (d))
++    {
++      /* Permutation that only select elements from op0.  */
++      if (!d->testing_p)
++	{
++	  /* Prepare temp register instead of modify original op.  */
++	  use_alt_op = true;
++	  op0_alt = gen_reg_rtx (d->vmode);
++	  op1_alt = gen_reg_rtx (d->vmode);
++
++	  emit_move_insn (op0_alt, d->op0);
++	  emit_move_insn (op1_alt, d->op1);
++
++	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0);
++	  rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
++	  rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
++
++	  /* Duplicate op0's low 128bit in op0, then duplicate high 128bit
++	     in op1.  After this, xvshuf.* insn's selector argument can
++	     access all elements we need for correct permutation result.  */
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0a, conv_op0a, conv_op0,
++					      GEN_INT (0x00)));
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1a, conv_op1a, conv_op0,
++					      GEN_INT (0x11)));
++
++	  /* In this case, there's no need to remap selector's indices.  */
++	  for (i = 0; i < d->nelt; i += 1)
++	    {
++	      remapped[i] = d->perm[i];
++	    }
++	}
++    }
++  else if (loongarch_is_divisible_perm (d))
++    {
++      /* Divisible perm:
++	 Low 128bit of selector only selects elements of op0,
++	 and high 128bit of selector only selects elements of op1.  */
++
++      if (!d->testing_p)
++	{
++	  /* Prepare temp register instead of modify original op.  */
++	  use_alt_op = true;
++	  op0_alt = gen_reg_rtx (d->vmode);
++	  op1_alt = gen_reg_rtx (d->vmode);
++
++	  emit_move_insn (op0_alt, d->op0);
++	  emit_move_insn (op1_alt, d->op1);
++
++	  rtx conv_op0a = gen_rtx_SUBREG (E_V4DImode, op0_alt, 0);
++	  rtx conv_op1a = gen_rtx_SUBREG (E_V4DImode, op1_alt, 0);
++	  rtx conv_op0 = gen_rtx_SUBREG (E_V4DImode, d->op0, 0);
++	  rtx conv_op1 = gen_rtx_SUBREG (E_V4DImode, d->op1, 0);
++
++	  /* Reorganize op0's hi/lo 128bit and op1's hi/lo 128bit, to make sure
++	     that selector's low 128bit can access all op0's elements, and
++	     selector's high 128bit can access all op1's elements.  */
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op0a, conv_op0a, conv_op1,
++					      GEN_INT (0x02)));
++	  emit_insn (gen_lasx_xvpermi_q_v4di (conv_op1a, conv_op1a, conv_op0,
++					      GEN_INT (0x31)));
++
++	  /* No need to modify indices.  */
++	  for (i = 0; i < d->nelt;i += 1)
++	    {
++	      remapped[i] = d->perm[i];
++	    }
++	}
++    }
++  else if (loongarch_is_triple_stride_extract (d))
++    {
++      /* Selector sample: E_V4DFmode, { 1, 4, 7, 0 }.  */
++      if (!d->testing_p)
++	{
++	  /* Resolve it with brute force modification.  */
++	  remapped[0] = 1;
++	  remapped[1] = 2;
++	  remapped[2] = 3;
++	  remapped[3] = 0;
++	}
++    }
++  else
++    {
++      /* When all of the detections above are failed, we will try last
++	 strategy.
++	 The for loop tries to detect following rules based on indices' value,
++	 its position inside of selector vector ,and strange behavior of
++	 xvshuf.* insn; Then we take corresponding action. (Replace with new
++	 value, or give up whole permutation expansion.)  */
++      for (i = 0; i < d->nelt; i += 1)
++	{
++	  /* % (2 * d->nelt)  */
++	  idx = d->perm[i];
++
++	  /* if index is located in low 128bit of selector vector.  */
++	  if (i < d->nelt / 2)
++	    {
++	      /* Fail case 1: index tries to reach element that located in op0's
++		 high 128bit.  */
++	      if (idx >= d->nelt / 2 && idx < d->nelt)
++		{
++		  goto expand_perm_const_2_end;
++		}
++	      /* Fail case 2: index tries to reach element that located in
++		 op1's high 128bit.  */
++	      if (idx >= (d->nelt + d->nelt / 2))
++		{
++		  goto expand_perm_const_2_end;
++		}
++
++	      /* Success case: index tries to reach elements that located in
++		 op1's low 128bit.  Apply - (nelt / 2) offset to original
++		 value.  */
++	      if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2))
++		{
++		  idx -= d->nelt / 2;
++		}
++	    }
++	  /* if index is located in high 128bit of selector vector.  */
++	  else
++	    {
++	      /* Fail case 1: index tries to reach element that located in
++		 op1's low 128bit.  */
++	      if (idx >= d->nelt && idx < (d->nelt + d->nelt / 2))
++		{
++		  goto expand_perm_const_2_end;
++		}
++	      /* Fail case 2: index tries to reach element that located in
++		 op0's low 128bit.  */
++	      if (idx < (d->nelt / 2))
++		{
++		  goto expand_perm_const_2_end;
++		}
++	      /* Success case: index tries to reach element that located in
++		 op0's high 128bit.  */
++	      if (idx >= d->nelt / 2 && idx < d->nelt)
++		{
++		  idx -= d->nelt / 2;
++		}
++	    }
++	  /* No need to process other case that we did not mentioned.  */
++
++	  /* Assign with original or processed value.  */
++	  remapped[i] = idx;
++	}
++    }
++
++  ok = true;
++  /* If testing_p is true, compiler is trying to figure out that backend can
++     handle this permutation, but doesn't want to generate actual insn.  So
++     if true, exit directly.  */
++  if (d->testing_p)
++    {
++      goto expand_perm_const_2_end;
++    }
++
++  /* Convert remapped selector array to RTL array.  */
++  for (i = 0; i < d->nelt; i += 1)
++    {
++      rperm[i] = GEN_INT (remapped[i]);
++    }
++
++  /* Copy selector vector from memory to vector regiter for later insn gen
++     function.
++     If vector's element in floating point value, we cannot fit selector
++     argument into insn gen function directly, because of the insn template
++     definition.  As a solution, generate a integral mode subreg of target,
++     then copy selector vector (that is in integral mode) to this subreg.  */
++  switch (d->vmode)
++    {
++    case E_V4DFmode:
++      sel = gen_rtx_CONST_VECTOR (E_V4DImode, gen_rtvec_v (d->nelt, rperm));
++      tmp = gen_rtx_SUBREG (E_V4DImode, d->target, 0);
++      emit_move_insn (tmp, sel);
++      break;
++    case E_V8SFmode:
++      sel = gen_rtx_CONST_VECTOR (E_V8SImode, gen_rtvec_v (d->nelt, rperm));
++      tmp = gen_rtx_SUBREG (E_V8SImode, d->target, 0);
++      emit_move_insn (tmp, sel);
++      break;
++    default:
++      sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm));
++      emit_move_insn (d->target, sel);
++      break;
++    }
++
++  target = d->target;
++  /* If temp op registers are requested in previous if branch, then use temp
++     register intead of original one.  */
++  if (use_alt_op)
++    {
++      op0 = op0_alt != NULL_RTX ? op0_alt : d->op0;
++      op1 = op1_alt != NULL_RTX ? op1_alt : d->op1;
++    }
++  else
++    {
++      op0 = d->op0;
++      op1 = d->one_vector_p ? d->op0 : d->op1;
++    }
++
++  /* We FINALLY can generate xvshuf.* insn.  */
++  switch (d->vmode)
++    {
++    case E_V4DFmode:
++      emit_insn (gen_lasx_xvshuf_d_f (target, target, op1, op0));
++      break;
++    case E_V4DImode:
++      emit_insn (gen_lasx_xvshuf_d (target, target, op1, op0));
++      break;
++    case E_V8SFmode:
++      emit_insn (gen_lasx_xvshuf_w_f (target, target, op1, op0));
++      break;
++    case E_V8SImode:
++      emit_insn (gen_lasx_xvshuf_w (target, target, op1, op0));
++      break;
++    case E_V16HImode:
++      emit_insn (gen_lasx_xvshuf_h (target, target, op1, op0));
++      break;
++    case E_V32QImode:
++      emit_insn (gen_lasx_xvshuf_b (target, op1, op0, target));
++      break;
++    default:
++      gcc_unreachable ();
++      break;
++    }
++
++  /* Extra insn for swapping the hi/lo 128bit of target vector register.  */
++  if (reverse_hi_lo)
++    {
++      switch (d->vmode)
++	{
++	case E_V4DFmode:
++	  emit_insn (gen_lasx_xvpermi_q_v4df (d->target, d->target,
++					      d->target, GEN_INT (0x1)));
++	  break;
++	case E_V4DImode:
++	  emit_insn (gen_lasx_xvpermi_q_v4di (d->target, d->target,
++					      d->target, GEN_INT (0x1)));
++	  break;
++	case E_V8SFmode:
++	  emit_insn (gen_lasx_xvpermi_q_v8sf (d->target, d->target,
++					      d->target, GEN_INT (0x1)));
++	  break;
++	case E_V8SImode:
++	  emit_insn (gen_lasx_xvpermi_q_v8si (d->target, d->target,
++					      d->target, GEN_INT (0x1)));
++	  break;
++	case E_V16HImode:
++	  emit_insn (gen_lasx_xvpermi_q_v16hi (d->target, d->target,
++					       d->target, GEN_INT (0x1)));
++	  break;
++	case E_V32QImode:
++	  emit_insn (gen_lasx_xvpermi_q_v32qi (d->target, d->target,
++					       d->target, GEN_INT (0x1)));
++	  break;
++	default:
++	  break;
++	}
++    }
++  /* Extra insn required by odd/even extraction.  Swapping the second and third
++     64bit in target vector register.  */
++  else if (extract_ev_od)
++    {
++      rtx converted = gen_rtx_SUBREG (E_V4DImode, d->target, 0);
++      emit_insn (gen_lasx_xvpermi_d_v4di (converted, converted,
++					  GEN_INT (0xD8)));
++    }
++
++expand_perm_const_2_end:
++  return ok;
+ }
+ 
+ /* Implement TARGET_VECTORIZE_VEC_PERM_CONST.  */
+@@ -7813,6 +9728,12 @@ loongarch_vectorize_vec_perm_const (machine_mode vmode, machine_mode op_mode,
+       break;
+     }
+ 
++  // Do rounding for selector to avoid vshuf undefined behavior.
++  for (i = 0; i < d.nelt; i += 1)
++    {
++      d.perm[i] %= (d.nelt * 2);
++    }
++
+   if (d.testing_p)
+     {
+       d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
+@@ -7865,7 +9786,7 @@ loongarch_cpu_sched_reassociation_width (struct loongarch_target *target,
+     case CPU_LOONGARCH64:
+     case CPU_LA464:
+       /* Vector part.  */
+-      if (LSX_SUPPORTED_MODE_P (mode))
++      if (LSX_SUPPORTED_MODE_P (mode) || LASX_SUPPORTED_MODE_P (mode))
+ 	{
+ 	  /* Integer vector instructions execute in FP unit.
+ 	     The width of integer/float-point vector instructions is 3.  */
+@@ -7915,6 +9836,44 @@ loongarch_expand_vector_extract (rtx target, rtx vec, int elt)
+     case E_V16QImode:
+       break;
+ 
++    case E_V32QImode:
++      if (ISA_HAS_LASX)
++	{
++	  if (elt >= 16)
++	    {
++	      tmp = gen_reg_rtx (V32QImode);
++	      emit_insn (gen_lasx_xvpermi_d_v32qi (tmp, vec, GEN_INT (0xe)));
++	      loongarch_expand_vector_extract (target,
++					       gen_lowpart (V16QImode, tmp),
++					       elt & 15);
++	    }
++	  else
++	    loongarch_expand_vector_extract (target,
++					     gen_lowpart (V16QImode, vec),
++					     elt & 15);
++	  return;
++	}
++      break;
++
++    case E_V16HImode:
++      if (ISA_HAS_LASX)
++	{
++	  if (elt >= 8)
++	    {
++	      tmp = gen_reg_rtx (V16HImode);
++	      emit_insn (gen_lasx_xvpermi_d_v16hi (tmp, vec, GEN_INT (0xe)));
++	      loongarch_expand_vector_extract (target,
++					       gen_lowpart (V8HImode, tmp),
++					       elt & 7);
++	    }
++	  else
++	    loongarch_expand_vector_extract (target,
++					     gen_lowpart (V8HImode, vec),
++					     elt & 7);
++	  return;
++	}
++      break;
++
+     default:
+       break;
+     }
+@@ -7953,6 +9912,31 @@ emit_reduc_half (rtx dest, rtx src, int i)
+     case E_V2DFmode:
+       tem = gen_lsx_vbsrl_d_f (dest, src, GEN_INT (8));
+       break;
++    case E_V8SFmode:
++      if (i == 256)
++	tem = gen_lasx_xvpermi_d_v8sf (dest, src, GEN_INT (0xe));
++      else
++	tem = gen_lasx_xvshuf4i_w_f (dest, src,
++				     GEN_INT (i == 128 ? 2 + (3 << 2) : 1));
++      break;
++    case E_V4DFmode:
++      if (i == 256)
++	tem = gen_lasx_xvpermi_d_v4df (dest, src, GEN_INT (0xe));
++      else
++	tem = gen_lasx_xvpermi_d_v4df (dest, src, const1_rtx);
++      break;
++    case E_V32QImode:
++    case E_V16HImode:
++    case E_V8SImode:
++    case E_V4DImode:
++      d = gen_reg_rtx (V4DImode);
++      if (i == 256)
++	tem = gen_lasx_xvpermi_d_v4di (d, gen_lowpart (V4DImode, src),
++				       GEN_INT (0xe));
++      else
++	tem = gen_lasx_xvbsrl_d (d, gen_lowpart (V4DImode, src),
++				 GEN_INT (i/16));
++      break;
+     case E_V16QImode:
+     case E_V8HImode:
+     case E_V4SImode:
+@@ -8000,10 +9984,57 @@ loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p)
+ {
+   machine_mode imode = GET_MODE (operands[1]);
+   rtx (*unpack) (rtx, rtx, rtx);
++  rtx (*extend) (rtx, rtx);
+   rtx (*cmpFunc) (rtx, rtx, rtx);
++  rtx (*swap_hi_lo) (rtx, rtx, rtx, rtx);
+   rtx tmp, dest;
+ 
+-  if (ISA_HAS_LSX)
++  if (ISA_HAS_LASX && GET_MODE_SIZE (imode) == 32)
++    {
++      switch (imode)
++	{
++	case E_V8SImode:
++	  if (unsigned_p)
++	    extend = gen_lasx_vext2xv_du_wu;
++	  else
++	    extend = gen_lasx_vext2xv_d_w;
++	  swap_hi_lo = gen_lasx_xvpermi_q_v8si;
++	  break;
++
++	case E_V16HImode:
++	  if (unsigned_p)
++	    extend = gen_lasx_vext2xv_wu_hu;
++	  else
++	    extend = gen_lasx_vext2xv_w_h;
++	  swap_hi_lo = gen_lasx_xvpermi_q_v16hi;
++	  break;
++
++	case E_V32QImode:
++	  if (unsigned_p)
++	    extend = gen_lasx_vext2xv_hu_bu;
++	  else
++	    extend = gen_lasx_vext2xv_h_b;
++	  swap_hi_lo = gen_lasx_xvpermi_q_v32qi;
++	  break;
++
++	default:
++	  gcc_unreachable ();
++	  break;
++	}
++
++      if (high_p)
++	{
++	  tmp = gen_reg_rtx (imode);
++	  emit_insn (swap_hi_lo (tmp, tmp, operands[1], const1_rtx));
++	  emit_insn (extend (operands[0], tmp));
++	  return;
++	}
++
++      emit_insn (extend (operands[0], operands[1]));
++      return;
++
++    }
++  else if (ISA_HAS_LSX)
+     {
+       switch (imode)
+ 	{
+@@ -8104,8 +10135,17 @@ loongarch_gen_const_int_vector_shuffle (machine_mode mode, int val)
+   return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nunits, elts));
+ }
+ 
++
+ /* Expand a vector initialization.  */
+ 
++void
++loongarch_expand_vector_group_init (rtx target, rtx vals)
++{
++  rtx ops[2] = { XVECEXP (vals, 0, 0), XVECEXP (vals, 0, 1) };
++  emit_insn (gen_rtx_SET (target, gen_rtx_VEC_CONCAT (E_V32QImode, ops[0],
++						      ops[1])));
++}
++
+ void
+ loongarch_expand_vector_init (rtx target, rtx vals)
+ {
+@@ -8125,6 +10165,285 @@ loongarch_expand_vector_init (rtx target, rtx vals)
+ 	all_same = false;
+     }
+ 
++  if (ISA_HAS_LASX && GET_MODE_SIZE (vmode) == 32)
++    {
++      if (all_same)
++	{
++	  rtx same = XVECEXP (vals, 0, 0);
++	  rtx temp, temp2;
++
++	  if (CONST_INT_P (same) && nvar == 0
++	      && loongarch_signed_immediate_p (INTVAL (same), 10, 0))
++	    {
++	      switch (vmode)
++		{
++		case E_V32QImode:
++		case E_V16HImode:
++		case E_V8SImode:
++		case E_V4DImode:
++		  temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0));
++		  emit_move_insn (target, temp);
++		  return;
++
++		default:
++		  gcc_unreachable ();
++		}
++	    }
++
++	  temp = gen_reg_rtx (imode);
++	  if (imode == GET_MODE (same))
++	    temp2 = same;
++	  else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD)
++	    {
++	      if (GET_CODE (same) == MEM)
++		{
++		  rtx reg_tmp = gen_reg_rtx (GET_MODE (same));
++		  loongarch_emit_move (reg_tmp, same);
++		  temp2 = simplify_gen_subreg (imode, reg_tmp,
++					       GET_MODE (reg_tmp), 0);
++		}
++	      else
++		temp2 = simplify_gen_subreg (imode, same,
++					     GET_MODE (same), 0);
++	    }
++	  else
++	    {
++	      if (GET_CODE (same) == MEM)
++		{
++		  rtx reg_tmp = gen_reg_rtx (GET_MODE (same));
++		  loongarch_emit_move (reg_tmp, same);
++		  temp2 = lowpart_subreg (imode, reg_tmp,
++					  GET_MODE (reg_tmp));
++		}
++	      else
++		temp2 = lowpart_subreg (imode, same, GET_MODE (same));
++	    }
++	  emit_move_insn (temp, temp2);
++
++	  switch (vmode)
++	    {
++	    case E_V32QImode:
++	    case E_V16HImode:
++	    case E_V8SImode:
++	    case E_V4DImode:
++	      loongarch_emit_move (target,
++				   gen_rtx_VEC_DUPLICATE (vmode, temp));
++	      break;
++
++	    case E_V8SFmode:
++	      emit_insn (gen_lasx_xvreplve0_w_f_scalar (target, temp));
++	      break;
++
++	    case E_V4DFmode:
++	      emit_insn (gen_lasx_xvreplve0_d_f_scalar (target, temp));
++	      break;
++
++	    default:
++	      gcc_unreachable ();
++	    }
++	}
++      else
++	{
++	  rtvec vec = shallow_copy_rtvec (XVEC (vals, 0));
++
++	  for (i = 0; i < nelt; ++i)
++	    RTVEC_ELT (vec, i) = CONST0_RTX (imode);
++
++	  emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, vec));
++
++	  machine_mode half_mode = VOIDmode;
++	  rtx target_hi, target_lo;
++
++	  switch (vmode)
++	    {
++	    case E_V32QImode:
++	      half_mode=E_V16QImode;
++	      target_hi = gen_reg_rtx (half_mode);
++	      target_lo = gen_reg_rtx (half_mode);
++	      for (i = 0; i < nelt/2; ++i)
++		{
++		  rtx temp_hi = gen_reg_rtx (imode);
++		  rtx temp_lo = gen_reg_rtx (imode);
++		  emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2));
++		  emit_move_insn (temp_lo, XVECEXP (vals, 0, i));
++		  if (i == 0)
++		    {
++		      emit_insn (gen_lsx_vreplvei_b_scalar (target_hi,
++							    temp_hi));
++		      emit_insn (gen_lsx_vreplvei_b_scalar (target_lo,
++							    temp_lo));
++		    }
++		  else
++		    {
++		      emit_insn (gen_vec_setv16qi (target_hi, temp_hi,
++						   GEN_INT (i)));
++		      emit_insn (gen_vec_setv16qi (target_lo, temp_lo,
++						   GEN_INT (i)));
++		    }
++		}
++	      emit_insn (gen_rtx_SET (target,
++				      gen_rtx_VEC_CONCAT (vmode, target_hi,
++							  target_lo)));
++	      break;
++
++	    case E_V16HImode:
++	      half_mode=E_V8HImode;
++	      target_hi = gen_reg_rtx (half_mode);
++	      target_lo = gen_reg_rtx (half_mode);
++	      for (i = 0; i < nelt/2; ++i)
++		{
++		  rtx temp_hi = gen_reg_rtx (imode);
++		  rtx temp_lo = gen_reg_rtx (imode);
++		  emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2));
++		  emit_move_insn (temp_lo, XVECEXP (vals, 0, i));
++		  if (i == 0)
++		    {
++		      emit_insn (gen_lsx_vreplvei_h_scalar (target_hi,
++							    temp_hi));
++		      emit_insn (gen_lsx_vreplvei_h_scalar (target_lo,
++							    temp_lo));
++		    }
++		  else
++		    {
++		      emit_insn (gen_vec_setv8hi (target_hi, temp_hi,
++						  GEN_INT (i)));
++		      emit_insn (gen_vec_setv8hi (target_lo, temp_lo,
++						  GEN_INT (i)));
++		    }
++		}
++	      emit_insn (gen_rtx_SET (target,
++				      gen_rtx_VEC_CONCAT (vmode, target_hi,
++							  target_lo)));
++	      break;
++
++	    case E_V8SImode:
++	      half_mode=V4SImode;
++	      target_hi = gen_reg_rtx (half_mode);
++	      target_lo = gen_reg_rtx (half_mode);
++	      for (i = 0; i < nelt/2; ++i)
++		{
++		  rtx temp_hi = gen_reg_rtx (imode);
++		  rtx temp_lo = gen_reg_rtx (imode);
++		  emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2));
++		  emit_move_insn (temp_lo, XVECEXP (vals, 0, i));
++		  if (i == 0)
++		    {
++		      emit_insn (gen_lsx_vreplvei_w_scalar (target_hi,
++							    temp_hi));
++		      emit_insn (gen_lsx_vreplvei_w_scalar (target_lo,
++							    temp_lo));
++		    }
++		  else
++		    {
++		      emit_insn (gen_vec_setv4si (target_hi, temp_hi,
++						  GEN_INT (i)));
++		      emit_insn (gen_vec_setv4si (target_lo, temp_lo,
++						  GEN_INT (i)));
++		    }
++		}
++	      emit_insn (gen_rtx_SET (target,
++				      gen_rtx_VEC_CONCAT (vmode, target_hi,
++							  target_lo)));
++	      break;
++
++	    case E_V4DImode:
++	      half_mode=E_V2DImode;
++	      target_hi = gen_reg_rtx (half_mode);
++	      target_lo = gen_reg_rtx (half_mode);
++	      for (i = 0; i < nelt/2; ++i)
++		{
++		  rtx temp_hi = gen_reg_rtx (imode);
++		  rtx temp_lo = gen_reg_rtx (imode);
++		  emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2));
++		  emit_move_insn (temp_lo, XVECEXP (vals, 0, i));
++		  if (i == 0)
++		    {
++		      emit_insn (gen_lsx_vreplvei_d_scalar (target_hi,
++							    temp_hi));
++		      emit_insn (gen_lsx_vreplvei_d_scalar (target_lo,
++							    temp_lo));
++		    }
++		  else
++		    {
++		      emit_insn (gen_vec_setv2di (target_hi, temp_hi,
++						  GEN_INT (i)));
++		      emit_insn (gen_vec_setv2di (target_lo, temp_lo,
++						  GEN_INT (i)));
++		    }
++		}
++	      emit_insn (gen_rtx_SET (target,
++				      gen_rtx_VEC_CONCAT (vmode, target_hi,
++							  target_lo)));
++	      break;
++
++	    case E_V8SFmode:
++	      half_mode=E_V4SFmode;
++	      target_hi = gen_reg_rtx (half_mode);
++	      target_lo = gen_reg_rtx (half_mode);
++	      for (i = 0; i < nelt/2; ++i)
++		{
++		  rtx temp_hi = gen_reg_rtx (imode);
++		  rtx temp_lo = gen_reg_rtx (imode);
++		  emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2));
++		  emit_move_insn (temp_lo, XVECEXP (vals, 0, i));
++		  if (i == 0)
++		    {
++		      emit_insn (gen_lsx_vreplvei_w_f_scalar (target_hi,
++							      temp_hi));
++		      emit_insn (gen_lsx_vreplvei_w_f_scalar (target_lo,
++							      temp_lo));
++		    }
++		  else
++		    {
++		      emit_insn (gen_vec_setv4sf (target_hi, temp_hi,
++						  GEN_INT (i)));
++		      emit_insn (gen_vec_setv4sf (target_lo, temp_lo,
++						  GEN_INT (i)));
++		    }
++		}
++	      emit_insn (gen_rtx_SET (target,
++				      gen_rtx_VEC_CONCAT (vmode, target_hi,
++							  target_lo)));
++	      break;
++
++	    case E_V4DFmode:
++	      half_mode=E_V2DFmode;
++	      target_hi = gen_reg_rtx (half_mode);
++	      target_lo = gen_reg_rtx (half_mode);
++	      for (i = 0; i < nelt/2; ++i)
++		{
++		  rtx temp_hi = gen_reg_rtx (imode);
++		  rtx temp_lo = gen_reg_rtx (imode);
++		  emit_move_insn (temp_hi, XVECEXP (vals, 0, i+nelt/2));
++		  emit_move_insn (temp_lo, XVECEXP (vals, 0, i));
++		  if (i == 0)
++		    {
++		      emit_insn (gen_lsx_vreplvei_d_f_scalar (target_hi,
++							      temp_hi));
++		      emit_insn (gen_lsx_vreplvei_d_f_scalar (target_lo,
++							      temp_lo));
++		    }
++		  else
++		    {
++		      emit_insn (gen_vec_setv2df (target_hi, temp_hi,
++						  GEN_INT (i)));
++		      emit_insn (gen_vec_setv2df (target_lo, temp_lo,
++						  GEN_INT (i)));
++		    }
++		}
++	      emit_insn (gen_rtx_SET (target,
++				      gen_rtx_VEC_CONCAT (vmode, target_hi,
++							  target_lo)));
++	      break;
++
++	    default:
++	      gcc_unreachable ();
++	    }
++
++	}
++      return;
++    }
++
+   if (ISA_HAS_LSX)
+     {
+       if (all_same)
+@@ -8372,6 +10691,38 @@ loongarch_expand_lsx_cmp (rtx dest, enum rtx_code cond, rtx op0, rtx op1)
+ 	}
+       break;
+ 
++    case E_V8SFmode:
++    case E_V4DFmode:
++      switch (cond)
++	{
++	case UNORDERED:
++	case ORDERED:
++	case EQ:
++	case NE:
++	case UNEQ:
++	case UNLE:
++	case UNLT:
++	  break;
++	case LTGT: cond = NE; break;
++	case UNGE: cond = UNLE; std::swap (op0, op1); break;
++	case UNGT: cond = UNLT; std::swap (op0, op1); break;
++	case LE: unspec = UNSPEC_LASX_XVFCMP_SLE; break;
++	case LT: unspec = UNSPEC_LASX_XVFCMP_SLT; break;
++	case GE: unspec = UNSPEC_LASX_XVFCMP_SLE; std::swap (op0, op1); break;
++	case GT: unspec = UNSPEC_LASX_XVFCMP_SLT; std::swap (op0, op1); break;
++	default:
++		 gcc_unreachable ();
++	}
++      if (unspec < 0)
++	loongarch_emit_binary (cond, dest, op0, op1);
++      else
++	{
++	  rtx x = gen_rtx_UNSPEC (GET_MODE (dest),
++				  gen_rtvec (2, op0, op1), unspec);
++	  emit_insn (gen_rtx_SET (dest, x));
++	}
++      break;
++
+     default:
+       gcc_unreachable ();
+       break;
+@@ -8709,7 +11060,7 @@ loongarch_builtin_support_vector_misalignment (machine_mode mode,
+ 					       int misalignment,
+ 					       bool is_packed)
+ {
+-  if (ISA_HAS_LSX && STRICT_ALIGNMENT)
++  if ((ISA_HAS_LSX || ISA_HAS_LASX) && STRICT_ALIGNMENT)
+     {
+       if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
+ 	return false;
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index c3ebea2f2..b2295c589 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -186,6 +186,11 @@ along with GCC; see the file COPYING3.  If not see
+ /* Width of a LSX vector register in bits.  */
+ #define BITS_PER_LSX_REG (UNITS_PER_LSX_REG * BITS_PER_UNIT)
+ 
++/* Width of a LASX vector register in bytes.  */
++#define UNITS_PER_LASX_REG 32
++/* Width of a LASX vector register in bits.  */
++#define BITS_PER_LASX_REG (UNITS_PER_LASX_REG * BITS_PER_UNIT)
++
+ /* For LARCH, width of a floating point register.  */
+ #define UNITS_PER_FPREG (TARGET_DOUBLE_FLOAT ? 8 : 4)
+ 
+@@ -248,10 +253,11 @@ along with GCC; see the file COPYING3.  If not see
+ #define STRUCTURE_SIZE_BOUNDARY 8
+ 
+ /* There is no point aligning anything to a rounder boundary than
+-   LONG_DOUBLE_TYPE_SIZE, unless under LSX the bigggest alignment is
+-   BITS_PER_LSX_REG/..  */
++   LONG_DOUBLE_TYPE_SIZE, unless under LSX/LASX the bigggest alignment is
++   BITS_PER_LSX_REG/BITS_PER_LASX_REG/..  */
+ #define BIGGEST_ALIGNMENT \
+-  (ISA_HAS_LSX ? BITS_PER_LSX_REG : LONG_DOUBLE_TYPE_SIZE)
++  (ISA_HAS_LASX? BITS_PER_LASX_REG \
++   : (ISA_HAS_LSX ? BITS_PER_LSX_REG : LONG_DOUBLE_TYPE_SIZE))
+ 
+ /* All accesses must be aligned.  */
+ #define STRICT_ALIGNMENT (TARGET_STRICT_ALIGN)
+@@ -391,6 +397,10 @@ along with GCC; see the file COPYING3.  If not see
+ #define LSX_REG_LAST  FP_REG_LAST
+ #define LSX_REG_NUM   FP_REG_NUM
+ 
++#define LASX_REG_FIRST FP_REG_FIRST
++#define LASX_REG_LAST  FP_REG_LAST
++#define LASX_REG_NUM   FP_REG_NUM
++
+ /* The DWARF 2 CFA column which tracks the return address from a
+    signal handler context.  This means that to maintain backwards
+    compatibility, no hard register can be assigned this column if it
+@@ -409,9 +419,12 @@ along with GCC; see the file COPYING3.  If not see
+   ((unsigned int) ((int) (REGNO) - FCC_REG_FIRST) < FCC_REG_NUM)
+ #define LSX_REG_P(REGNO) \
+   ((unsigned int) ((int) (REGNO) - LSX_REG_FIRST) < LSX_REG_NUM)
++#define LASX_REG_P(REGNO) \
++  ((unsigned int) ((int) (REGNO) - LASX_REG_FIRST) < LASX_REG_NUM)
+ 
+ #define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
+ #define LSX_REG_RTX_P(X) (REG_P (X) && LSX_REG_P (REGNO (X)))
++#define LASX_REG_RTX_P(X) (REG_P (X) && LASX_REG_P (REGNO (X)))
+ 
+ /* Select a register mode required for caller save of hard regno REGNO.  */
+ #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
+@@ -733,6 +746,13 @@ enum reg_class
+    && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT		\
+        || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT))
+ 
++#define LASX_SUPPORTED_MODE_P(MODE)			\
++  (ISA_HAS_LASX						\
++   && (GET_MODE_SIZE (MODE) == UNITS_PER_LSX_REG	\
++       ||GET_MODE_SIZE (MODE) == UNITS_PER_LASX_REG)	\
++   && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT		\
++       || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT))
++
+ /* 1 if N is a possible register number for function argument passing.
+    We have no FP argument registers when soft-float.  */
+ 
+@@ -985,7 +1005,39 @@ typedef struct {
+   { "vr28",	28 + FP_REG_FIRST },					\
+   { "vr29",	29 + FP_REG_FIRST },					\
+   { "vr30",	30 + FP_REG_FIRST },					\
+-  { "vr31",	31 + FP_REG_FIRST }					\
++  { "vr31",	31 + FP_REG_FIRST },					\
++  { "xr0",	 0 + FP_REG_FIRST },					\
++  { "xr1",	 1 + FP_REG_FIRST },					\
++  { "xr2",	 2 + FP_REG_FIRST },					\
++  { "xr3",	 3 + FP_REG_FIRST },					\
++  { "xr4",	 4 + FP_REG_FIRST },					\
++  { "xr5",	 5 + FP_REG_FIRST },					\
++  { "xr6",	 6 + FP_REG_FIRST },					\
++  { "xr7",	 7 + FP_REG_FIRST },					\
++  { "xr8",	 8 + FP_REG_FIRST },					\
++  { "xr9",	 9 + FP_REG_FIRST },					\
++  { "xr10",	10 + FP_REG_FIRST },					\
++  { "xr11",	11 + FP_REG_FIRST },					\
++  { "xr12",	12 + FP_REG_FIRST },					\
++  { "xr13",	13 + FP_REG_FIRST },					\
++  { "xr14",	14 + FP_REG_FIRST },					\
++  { "xr15",	15 + FP_REG_FIRST },					\
++  { "xr16",	16 + FP_REG_FIRST },					\
++  { "xr17",	17 + FP_REG_FIRST },					\
++  { "xr18",	18 + FP_REG_FIRST },					\
++  { "xr19",	19 + FP_REG_FIRST },					\
++  { "xr20",	20 + FP_REG_FIRST },					\
++  { "xr21",	21 + FP_REG_FIRST },					\
++  { "xr22",	22 + FP_REG_FIRST },					\
++  { "xr23",	23 + FP_REG_FIRST },					\
++  { "xr24",	24 + FP_REG_FIRST },					\
++  { "xr25",	25 + FP_REG_FIRST },					\
++  { "xr26",	26 + FP_REG_FIRST },					\
++  { "xr27",	27 + FP_REG_FIRST },					\
++  { "xr28",	28 + FP_REG_FIRST },					\
++  { "xr29",	29 + FP_REG_FIRST },					\
++  { "xr30",	30 + FP_REG_FIRST },					\
++  { "xr31",	31 + FP_REG_FIRST }					\
+ }
+ 
+ /* Globalizing directive for a label.  */
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index fb3828262..3dde0ceb1 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -163,7 +163,7 @@
+ 
+ ;; Main data type used by the insn
+ (define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FCC,
+-  V2DI,V4SI,V8HI,V16QI,V2DF,V4SF"
++  V2DI,V4SI,V8HI,V16QI,V2DF,V4SF,V4DI,V8SI,V16HI,V32QI,V4DF,V8SF"
+   (const_string "unknown"))
+ 
+ ;; True if the main data type is twice the size of a word.
+@@ -422,12 +422,14 @@
+ ;; floating-point mode or vector mode.
+ (define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF") (V4SF "SF")
+ 			    (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI")
+-			    (V2DF "DF")])
++			    (V2DF "DF")(V8SF "SF")(V32QI "QI")(V16HI "HI")(V8SI "SI")(V4DI "DI")(V4DF "DF")])
+ 
+ ;; As above, but in lower case.
+ (define_mode_attr unitmode [(SF "sf") (DF "df") (V2SF "sf") (V4SF "sf")
+ 			    (V16QI "qi") (V8QI "qi") (V8HI "hi") (V4HI "hi")
+-			    (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df")])
++			    (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df")
++			    (V8SI "si") (V4DI "di") (V32QI "qi") (V16HI "hi")
++			    (V8SF "sf") (V4DF "df")])
+ 
+ ;; This attribute gives the integer mode that has half the size of
+ ;; the controlling mode.
+@@ -711,16 +713,17 @@
+   [(set_attr "alu_type" "sub")
+    (set_attr "mode" "")])
+ 
++
+ (define_insn "*subsi3_extended"
+-  [(set (match_operand:DI 0 "register_operand" "= r")
++  [(set (match_operand:DI 0 "register_operand" "=r")
+ 	(sign_extend:DI
+-	    (minus:SI (match_operand:SI 1 "reg_or_0_operand" " rJ")
+-		      (match_operand:SI 2 "register_operand" "  r"))))]
++	    (minus:SI (match_operand:SI 1 "reg_or_0_operand" "rJ")
++		      (match_operand:SI 2 "register_operand" "r"))))]
+   "TARGET_64BIT"
+   "sub.w\t%0,%z1,%2"
+   [(set_attr "type" "arith")
+    (set_attr "mode" "SI")])
+-
++
+ ;;
+ ;;  ....................
+ ;;
+@@ -3638,6 +3641,9 @@
+ ; The LoongArch SX Instructions.
+ (include "lsx.md")
+ 
++; The LoongArch ASX Instructions.
++(include "lasx.md")
++
+ (define_c_enum "unspec" [
+   UNSPEC_ADDRESS_FIRST
+ ])
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-Loongson-ASX-directive-builtin-functio.patch b/LoongArch-Add-Loongson-ASX-directive-builtin-functio.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2cc656da753ba5c5b40259bb93ad91b3a6785bbc
--- /dev/null
+++ b/LoongArch-Add-Loongson-ASX-directive-builtin-functio.patch
@@ -0,0 +1,7458 @@
+From 6871a6a4ef5f10bc75a9dd76fff37302057cf528 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 25 Nov 2022 11:09:49 +0800
+Subject: [PATCH 066/124] LoongArch: Add Loongson ASX directive builtin
+ function support.
+
+gcc/ChangeLog:
+
+	* config.gcc: Export the header file lasxintrin.h.
+	* config/loongarch/loongarch-builtins.cc (enum loongarch_builtin_type):
+	Add Loongson ASX builtin functions support.
+	(AVAIL_ALL): Ditto.
+	(LASX_BUILTIN): Ditto.
+	(LASX_NO_TARGET_BUILTIN): Ditto.
+	(LASX_BUILTIN_TEST_BRANCH): Ditto.
+	(CODE_FOR_lasx_xvsadd_b): Ditto.
+	(CODE_FOR_lasx_xvsadd_h): Ditto.
+	(CODE_FOR_lasx_xvsadd_w): Ditto.
+	(CODE_FOR_lasx_xvsadd_d): Ditto.
+	(CODE_FOR_lasx_xvsadd_bu): Ditto.
+	(CODE_FOR_lasx_xvsadd_hu): Ditto.
+	(CODE_FOR_lasx_xvsadd_wu): Ditto.
+	(CODE_FOR_lasx_xvsadd_du): Ditto.
+	(CODE_FOR_lasx_xvadd_b): Ditto.
+	(CODE_FOR_lasx_xvadd_h): Ditto.
+	(CODE_FOR_lasx_xvadd_w): Ditto.
+	(CODE_FOR_lasx_xvadd_d): Ditto.
+	(CODE_FOR_lasx_xvaddi_bu): Ditto.
+	(CODE_FOR_lasx_xvaddi_hu): Ditto.
+	(CODE_FOR_lasx_xvaddi_wu): Ditto.
+	(CODE_FOR_lasx_xvaddi_du): Ditto.
+	(CODE_FOR_lasx_xvand_v): Ditto.
+	(CODE_FOR_lasx_xvandi_b): Ditto.
+	(CODE_FOR_lasx_xvbitsel_v): Ditto.
+	(CODE_FOR_lasx_xvseqi_b): Ditto.
+	(CODE_FOR_lasx_xvseqi_h): Ditto.
+	(CODE_FOR_lasx_xvseqi_w): Ditto.
+	(CODE_FOR_lasx_xvseqi_d): Ditto.
+	(CODE_FOR_lasx_xvslti_b): Ditto.
+	(CODE_FOR_lasx_xvslti_h): Ditto.
+	(CODE_FOR_lasx_xvslti_w): Ditto.
+	(CODE_FOR_lasx_xvslti_d): Ditto.
+	(CODE_FOR_lasx_xvslti_bu): Ditto.
+	(CODE_FOR_lasx_xvslti_hu): Ditto.
+	(CODE_FOR_lasx_xvslti_wu): Ditto.
+	(CODE_FOR_lasx_xvslti_du): Ditto.
+	(CODE_FOR_lasx_xvslei_b): Ditto.
+	(CODE_FOR_lasx_xvslei_h): Ditto.
+	(CODE_FOR_lasx_xvslei_w): Ditto.
+	(CODE_FOR_lasx_xvslei_d): Ditto.
+	(CODE_FOR_lasx_xvslei_bu): Ditto.
+	(CODE_FOR_lasx_xvslei_hu): Ditto.
+	(CODE_FOR_lasx_xvslei_wu): Ditto.
+	(CODE_FOR_lasx_xvslei_du): Ditto.
+	(CODE_FOR_lasx_xvdiv_b): Ditto.
+	(CODE_FOR_lasx_xvdiv_h): Ditto.
+	(CODE_FOR_lasx_xvdiv_w): Ditto.
+	(CODE_FOR_lasx_xvdiv_d): Ditto.
+	(CODE_FOR_lasx_xvdiv_bu): Ditto.
+	(CODE_FOR_lasx_xvdiv_hu): Ditto.
+	(CODE_FOR_lasx_xvdiv_wu): Ditto.
+	(CODE_FOR_lasx_xvdiv_du): Ditto.
+	(CODE_FOR_lasx_xvfadd_s): Ditto.
+	(CODE_FOR_lasx_xvfadd_d): Ditto.
+	(CODE_FOR_lasx_xvftintrz_w_s): Ditto.
+	(CODE_FOR_lasx_xvftintrz_l_d): Ditto.
+	(CODE_FOR_lasx_xvftintrz_wu_s): Ditto.
+	(CODE_FOR_lasx_xvftintrz_lu_d): Ditto.
+	(CODE_FOR_lasx_xvffint_s_w): Ditto.
+	(CODE_FOR_lasx_xvffint_d_l): Ditto.
+	(CODE_FOR_lasx_xvffint_s_wu): Ditto.
+	(CODE_FOR_lasx_xvffint_d_lu): Ditto.
+	(CODE_FOR_lasx_xvfsub_s): Ditto.
+	(CODE_FOR_lasx_xvfsub_d): Ditto.
+	(CODE_FOR_lasx_xvfmul_s): Ditto.
+	(CODE_FOR_lasx_xvfmul_d): Ditto.
+	(CODE_FOR_lasx_xvfdiv_s): Ditto.
+	(CODE_FOR_lasx_xvfdiv_d): Ditto.
+	(CODE_FOR_lasx_xvfmax_s): Ditto.
+	(CODE_FOR_lasx_xvfmax_d): Ditto.
+	(CODE_FOR_lasx_xvfmin_s): Ditto.
+	(CODE_FOR_lasx_xvfmin_d): Ditto.
+	(CODE_FOR_lasx_xvfsqrt_s): Ditto.
+	(CODE_FOR_lasx_xvfsqrt_d): Ditto.
+	(CODE_FOR_lasx_xvflogb_s): Ditto.
+	(CODE_FOR_lasx_xvflogb_d): Ditto.
+	(CODE_FOR_lasx_xvmax_b): Ditto.
+	(CODE_FOR_lasx_xvmax_h): Ditto.
+	(CODE_FOR_lasx_xvmax_w): Ditto.
+	(CODE_FOR_lasx_xvmax_d): Ditto.
+	(CODE_FOR_lasx_xvmaxi_b): Ditto.
+	(CODE_FOR_lasx_xvmaxi_h): Ditto.
+	(CODE_FOR_lasx_xvmaxi_w): Ditto.
+	(CODE_FOR_lasx_xvmaxi_d): Ditto.
+	(CODE_FOR_lasx_xvmax_bu): Ditto.
+	(CODE_FOR_lasx_xvmax_hu): Ditto.
+	(CODE_FOR_lasx_xvmax_wu): Ditto.
+	(CODE_FOR_lasx_xvmax_du): Ditto.
+	(CODE_FOR_lasx_xvmaxi_bu): Ditto.
+	(CODE_FOR_lasx_xvmaxi_hu): Ditto.
+	(CODE_FOR_lasx_xvmaxi_wu): Ditto.
+	(CODE_FOR_lasx_xvmaxi_du): Ditto.
+	(CODE_FOR_lasx_xvmin_b): Ditto.
+	(CODE_FOR_lasx_xvmin_h): Ditto.
+	(CODE_FOR_lasx_xvmin_w): Ditto.
+	(CODE_FOR_lasx_xvmin_d): Ditto.
+	(CODE_FOR_lasx_xvmini_b): Ditto.
+	(CODE_FOR_lasx_xvmini_h): Ditto.
+	(CODE_FOR_lasx_xvmini_w): Ditto.
+	(CODE_FOR_lasx_xvmini_d): Ditto.
+	(CODE_FOR_lasx_xvmin_bu): Ditto.
+	(CODE_FOR_lasx_xvmin_hu): Ditto.
+	(CODE_FOR_lasx_xvmin_wu): Ditto.
+	(CODE_FOR_lasx_xvmin_du): Ditto.
+	(CODE_FOR_lasx_xvmini_bu): Ditto.
+	(CODE_FOR_lasx_xvmini_hu): Ditto.
+	(CODE_FOR_lasx_xvmini_wu): Ditto.
+	(CODE_FOR_lasx_xvmini_du): Ditto.
+	(CODE_FOR_lasx_xvmod_b): Ditto.
+	(CODE_FOR_lasx_xvmod_h): Ditto.
+	(CODE_FOR_lasx_xvmod_w): Ditto.
+	(CODE_FOR_lasx_xvmod_d): Ditto.
+	(CODE_FOR_lasx_xvmod_bu): Ditto.
+	(CODE_FOR_lasx_xvmod_hu): Ditto.
+	(CODE_FOR_lasx_xvmod_wu): Ditto.
+	(CODE_FOR_lasx_xvmod_du): Ditto.
+	(CODE_FOR_lasx_xvmul_b): Ditto.
+	(CODE_FOR_lasx_xvmul_h): Ditto.
+	(CODE_FOR_lasx_xvmul_w): Ditto.
+	(CODE_FOR_lasx_xvmul_d): Ditto.
+	(CODE_FOR_lasx_xvclz_b): Ditto.
+	(CODE_FOR_lasx_xvclz_h): Ditto.
+	(CODE_FOR_lasx_xvclz_w): Ditto.
+	(CODE_FOR_lasx_xvclz_d): Ditto.
+	(CODE_FOR_lasx_xvnor_v): Ditto.
+	(CODE_FOR_lasx_xvor_v): Ditto.
+	(CODE_FOR_lasx_xvori_b): Ditto.
+	(CODE_FOR_lasx_xvnori_b): Ditto.
+	(CODE_FOR_lasx_xvpcnt_b): Ditto.
+	(CODE_FOR_lasx_xvpcnt_h): Ditto.
+	(CODE_FOR_lasx_xvpcnt_w): Ditto.
+	(CODE_FOR_lasx_xvpcnt_d): Ditto.
+	(CODE_FOR_lasx_xvxor_v): Ditto.
+	(CODE_FOR_lasx_xvxori_b): Ditto.
+	(CODE_FOR_lasx_xvsll_b): Ditto.
+	(CODE_FOR_lasx_xvsll_h): Ditto.
+	(CODE_FOR_lasx_xvsll_w): Ditto.
+	(CODE_FOR_lasx_xvsll_d): Ditto.
+	(CODE_FOR_lasx_xvslli_b): Ditto.
+	(CODE_FOR_lasx_xvslli_h): Ditto.
+	(CODE_FOR_lasx_xvslli_w): Ditto.
+	(CODE_FOR_lasx_xvslli_d): Ditto.
+	(CODE_FOR_lasx_xvsra_b): Ditto.
+	(CODE_FOR_lasx_xvsra_h): Ditto.
+	(CODE_FOR_lasx_xvsra_w): Ditto.
+	(CODE_FOR_lasx_xvsra_d): Ditto.
+	(CODE_FOR_lasx_xvsrai_b): Ditto.
+	(CODE_FOR_lasx_xvsrai_h): Ditto.
+	(CODE_FOR_lasx_xvsrai_w): Ditto.
+	(CODE_FOR_lasx_xvsrai_d): Ditto.
+	(CODE_FOR_lasx_xvsrl_b): Ditto.
+	(CODE_FOR_lasx_xvsrl_h): Ditto.
+	(CODE_FOR_lasx_xvsrl_w): Ditto.
+	(CODE_FOR_lasx_xvsrl_d): Ditto.
+	(CODE_FOR_lasx_xvsrli_b): Ditto.
+	(CODE_FOR_lasx_xvsrli_h): Ditto.
+	(CODE_FOR_lasx_xvsrli_w): Ditto.
+	(CODE_FOR_lasx_xvsrli_d): Ditto.
+	(CODE_FOR_lasx_xvsub_b): Ditto.
+	(CODE_FOR_lasx_xvsub_h): Ditto.
+	(CODE_FOR_lasx_xvsub_w): Ditto.
+	(CODE_FOR_lasx_xvsub_d): Ditto.
+	(CODE_FOR_lasx_xvsubi_bu): Ditto.
+	(CODE_FOR_lasx_xvsubi_hu): Ditto.
+	(CODE_FOR_lasx_xvsubi_wu): Ditto.
+	(CODE_FOR_lasx_xvsubi_du): Ditto.
+	(CODE_FOR_lasx_xvpackod_d): Ditto.
+	(CODE_FOR_lasx_xvpackev_d): Ditto.
+	(CODE_FOR_lasx_xvpickod_d): Ditto.
+	(CODE_FOR_lasx_xvpickev_d): Ditto.
+	(CODE_FOR_lasx_xvrepli_b): Ditto.
+	(CODE_FOR_lasx_xvrepli_h): Ditto.
+	(CODE_FOR_lasx_xvrepli_w): Ditto.
+	(CODE_FOR_lasx_xvrepli_d): Ditto.
+	(CODE_FOR_lasx_xvandn_v): Ditto.
+	(CODE_FOR_lasx_xvorn_v): Ditto.
+	(CODE_FOR_lasx_xvneg_b): Ditto.
+	(CODE_FOR_lasx_xvneg_h): Ditto.
+	(CODE_FOR_lasx_xvneg_w): Ditto.
+	(CODE_FOR_lasx_xvneg_d): Ditto.
+	(CODE_FOR_lasx_xvbsrl_v): Ditto.
+	(CODE_FOR_lasx_xvbsll_v): Ditto.
+	(CODE_FOR_lasx_xvfmadd_s): Ditto.
+	(CODE_FOR_lasx_xvfmadd_d): Ditto.
+	(CODE_FOR_lasx_xvfmsub_s): Ditto.
+	(CODE_FOR_lasx_xvfmsub_d): Ditto.
+	(CODE_FOR_lasx_xvfnmadd_s): Ditto.
+	(CODE_FOR_lasx_xvfnmadd_d): Ditto.
+	(CODE_FOR_lasx_xvfnmsub_s): Ditto.
+	(CODE_FOR_lasx_xvfnmsub_d): Ditto.
+	(CODE_FOR_lasx_xvpermi_q): Ditto.
+	(CODE_FOR_lasx_xvpermi_d): Ditto.
+	(CODE_FOR_lasx_xbnz_v): Ditto.
+	(CODE_FOR_lasx_xbz_v): Ditto.
+	(CODE_FOR_lasx_xvssub_b): Ditto.
+	(CODE_FOR_lasx_xvssub_h): Ditto.
+	(CODE_FOR_lasx_xvssub_w): Ditto.
+	(CODE_FOR_lasx_xvssub_d): Ditto.
+	(CODE_FOR_lasx_xvssub_bu): Ditto.
+	(CODE_FOR_lasx_xvssub_hu): Ditto.
+	(CODE_FOR_lasx_xvssub_wu): Ditto.
+	(CODE_FOR_lasx_xvssub_du): Ditto.
+	(CODE_FOR_lasx_xvabsd_b): Ditto.
+	(CODE_FOR_lasx_xvabsd_h): Ditto.
+	(CODE_FOR_lasx_xvabsd_w): Ditto.
+	(CODE_FOR_lasx_xvabsd_d): Ditto.
+	(CODE_FOR_lasx_xvabsd_bu): Ditto.
+	(CODE_FOR_lasx_xvabsd_hu): Ditto.
+	(CODE_FOR_lasx_xvabsd_wu): Ditto.
+	(CODE_FOR_lasx_xvabsd_du): Ditto.
+	(CODE_FOR_lasx_xvavg_b): Ditto.
+	(CODE_FOR_lasx_xvavg_h): Ditto.
+	(CODE_FOR_lasx_xvavg_w): Ditto.
+	(CODE_FOR_lasx_xvavg_d): Ditto.
+	(CODE_FOR_lasx_xvavg_bu): Ditto.
+	(CODE_FOR_lasx_xvavg_hu): Ditto.
+	(CODE_FOR_lasx_xvavg_wu): Ditto.
+	(CODE_FOR_lasx_xvavg_du): Ditto.
+	(CODE_FOR_lasx_xvavgr_b): Ditto.
+	(CODE_FOR_lasx_xvavgr_h): Ditto.
+	(CODE_FOR_lasx_xvavgr_w): Ditto.
+	(CODE_FOR_lasx_xvavgr_d): Ditto.
+	(CODE_FOR_lasx_xvavgr_bu): Ditto.
+	(CODE_FOR_lasx_xvavgr_hu): Ditto.
+	(CODE_FOR_lasx_xvavgr_wu): Ditto.
+	(CODE_FOR_lasx_xvavgr_du): Ditto.
+	(CODE_FOR_lasx_xvmuh_b): Ditto.
+	(CODE_FOR_lasx_xvmuh_h): Ditto.
+	(CODE_FOR_lasx_xvmuh_w): Ditto.
+	(CODE_FOR_lasx_xvmuh_d): Ditto.
+	(CODE_FOR_lasx_xvmuh_bu): Ditto.
+	(CODE_FOR_lasx_xvmuh_hu): Ditto.
+	(CODE_FOR_lasx_xvmuh_wu): Ditto.
+	(CODE_FOR_lasx_xvmuh_du): Ditto.
+	(CODE_FOR_lasx_xvssran_b_h): Ditto.
+	(CODE_FOR_lasx_xvssran_h_w): Ditto.
+	(CODE_FOR_lasx_xvssran_w_d): Ditto.
+	(CODE_FOR_lasx_xvssran_bu_h): Ditto.
+	(CODE_FOR_lasx_xvssran_hu_w): Ditto.
+	(CODE_FOR_lasx_xvssran_wu_d): Ditto.
+	(CODE_FOR_lasx_xvssrarn_b_h): Ditto.
+	(CODE_FOR_lasx_xvssrarn_h_w): Ditto.
+	(CODE_FOR_lasx_xvssrarn_w_d): Ditto.
+	(CODE_FOR_lasx_xvssrarn_bu_h): Ditto.
+	(CODE_FOR_lasx_xvssrarn_hu_w): Ditto.
+	(CODE_FOR_lasx_xvssrarn_wu_d): Ditto.
+	(CODE_FOR_lasx_xvssrln_bu_h): Ditto.
+	(CODE_FOR_lasx_xvssrln_hu_w): Ditto.
+	(CODE_FOR_lasx_xvssrln_wu_d): Ditto.
+	(CODE_FOR_lasx_xvssrlrn_bu_h): Ditto.
+	(CODE_FOR_lasx_xvssrlrn_hu_w): Ditto.
+	(CODE_FOR_lasx_xvssrlrn_wu_d): Ditto.
+	(CODE_FOR_lasx_xvftint_w_s): Ditto.
+	(CODE_FOR_lasx_xvftint_l_d): Ditto.
+	(CODE_FOR_lasx_xvftint_wu_s): Ditto.
+	(CODE_FOR_lasx_xvftint_lu_d): Ditto.
+	(CODE_FOR_lasx_xvsllwil_h_b): Ditto.
+	(CODE_FOR_lasx_xvsllwil_w_h): Ditto.
+	(CODE_FOR_lasx_xvsllwil_d_w): Ditto.
+	(CODE_FOR_lasx_xvsllwil_hu_bu): Ditto.
+	(CODE_FOR_lasx_xvsllwil_wu_hu): Ditto.
+	(CODE_FOR_lasx_xvsllwil_du_wu): Ditto.
+	(CODE_FOR_lasx_xvsat_b): Ditto.
+	(CODE_FOR_lasx_xvsat_h): Ditto.
+	(CODE_FOR_lasx_xvsat_w): Ditto.
+	(CODE_FOR_lasx_xvsat_d): Ditto.
+	(CODE_FOR_lasx_xvsat_bu): Ditto.
+	(CODE_FOR_lasx_xvsat_hu): Ditto.
+	(CODE_FOR_lasx_xvsat_wu): Ditto.
+	(CODE_FOR_lasx_xvsat_du): Ditto.
+	(loongarch_builtin_vectorized_function): Ditto.
+	(loongarch_expand_builtin_insn): Ditto.
+	(loongarch_expand_builtin): Ditto.
+	* config/loongarch/loongarch-ftypes.def (1): Ditto.
+	(2): Ditto.
+	(3): Ditto.
+	(4): Ditto.
+	* config/loongarch/lasxintrin.h: New file.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config.gcc                             |    2 +-
+ gcc/config/loongarch/lasxintrin.h          | 5338 ++++++++++++++++++++
+ gcc/config/loongarch/loongarch-builtins.cc | 1180 ++++-
+ gcc/config/loongarch/loongarch-ftypes.def  |  271 +-
+ 4 files changed, 6788 insertions(+), 3 deletions(-)
+ create mode 100644 gcc/config/loongarch/lasxintrin.h
+
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 4e149e0ef..19f584344 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -456,7 +456,7 @@ mips*-*-*)
+ 	;;
+ loongarch*-*-*)
+ 	cpu_type=loongarch
+-	extra_headers="larchintrin.h lsxintrin.h"
++	extra_headers="larchintrin.h lsxintrin.h lasxintrin.h"
+ 	extra_objs="loongarch-c.o loongarch-builtins.o loongarch-cpu.o loongarch-opts.o loongarch-def.o"
+ 	extra_gcc_objs="loongarch-driver.o loongarch-cpu.o loongarch-opts.o loongarch-def.o"
+ 	extra_options="${extra_options} g.opt fused-madd.opt"
+diff --git a/gcc/config/loongarch/lasxintrin.h b/gcc/config/loongarch/lasxintrin.h
+new file mode 100644
+index 000000000..d39379927
+--- /dev/null
++++ b/gcc/config/loongarch/lasxintrin.h
+@@ -0,0 +1,5338 @@
++/* LARCH Loongson ASX intrinsics include file.
++
++   Copyright (C) 2018 Free Software Foundation, Inc.
++
++   This file is part of GCC.
++
++   GCC is free software; you can redistribute it and/or modify it
++   under the terms of the GNU General Public License as published
++   by the Free Software Foundation; either version 3, or (at your
++   option) any later version.
++
++   GCC is distributed in the hope that it will be useful, but WITHOUT
++   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
++   License for more details.
++
++   Under Section 7 of GPL version 3, you are granted additional
++   permissions described in the GCC Runtime Library Exception, version
++   3.1, as published by the Free Software Foundation.
++
++   You should have received a copy of the GNU General Public License and
++   a copy of the GCC Runtime Library Exception along with this program;
++   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
++   .  */
++
++#ifndef _GCC_LOONGSON_ASXINTRIN_H
++#define _GCC_LOONGSON_ASXINTRIN_H 1
++
++#if defined(__loongarch_asx)
++
++typedef signed char v32i8 __attribute__ ((vector_size(32), aligned(32)));
++typedef signed char v32i8_b __attribute__ ((vector_size(32), aligned(1)));
++typedef unsigned char v32u8 __attribute__ ((vector_size(32), aligned(32)));
++typedef unsigned char v32u8_b __attribute__ ((vector_size(32), aligned(1)));
++typedef short v16i16 __attribute__ ((vector_size(32), aligned(32)));
++typedef short v16i16_h __attribute__ ((vector_size(32), aligned(2)));
++typedef unsigned short v16u16 __attribute__ ((vector_size(32), aligned(32)));
++typedef unsigned short v16u16_h __attribute__ ((vector_size(32), aligned(2)));
++typedef int v8i32 __attribute__ ((vector_size(32), aligned(32)));
++typedef int v8i32_w __attribute__ ((vector_size(32), aligned(4)));
++typedef unsigned int v8u32 __attribute__ ((vector_size(32), aligned(32)));
++typedef unsigned int v8u32_w __attribute__ ((vector_size(32), aligned(4)));
++typedef long long v4i64 __attribute__ ((vector_size(32), aligned(32)));
++typedef long long v4i64_d __attribute__ ((vector_size(32), aligned(8)));
++typedef unsigned long long v4u64 __attribute__ ((vector_size(32), aligned(32)));
++typedef unsigned long long v4u64_d __attribute__ ((vector_size(32), aligned(8)));
++typedef float v8f32 __attribute__ ((vector_size(32), aligned(32)));
++typedef float v8f32_w __attribute__ ((vector_size(32), aligned(4)));
++typedef double v4f64 __attribute__ ((vector_size(32), aligned(32)));
++typedef double v4f64_d __attribute__ ((vector_size(32), aligned(8)));
++typedef float __m256 __attribute__ ((__vector_size__ (32),
++				     __may_alias__));
++typedef long long __m256i __attribute__ ((__vector_size__ (32),
++					  __may_alias__));
++typedef double __m256d __attribute__ ((__vector_size__ (32),
++				       __may_alias__));
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsll_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsll_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsll_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsll_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsll_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsll_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsll_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsll_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvslli_b(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvslli_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V16HI, V16HI, UQI.  */
++#define __lasx_xvslli_h(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvslli_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V8SI, V8SI, UQI.  */
++#define __lasx_xvslli_w(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslli_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V4DI, V4DI, UQI.  */
++#define __lasx_xvslli_d(/*__m256i*/ _1, /*ui6*/ _2) \
++  ((__m256i)__builtin_lasx_xvslli_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsra_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsra_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsra_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsra_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsra_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsra_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsra_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsra_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvsrai_b(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrai_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V16HI, V16HI, UQI.  */
++#define __lasx_xvsrai_h(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrai_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V8SI, V8SI, UQI.  */
++#define __lasx_xvsrai_w(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrai_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V4DI, V4DI, UQI.  */
++#define __lasx_xvsrai_d(/*__m256i*/ _1, /*ui6*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrai_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrar_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrar_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrar_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrar_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrar_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrar_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrar_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrar_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvsrari_b(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrari_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V16HI, V16HI, UQI.  */
++#define __lasx_xvsrari_h(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrari_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V8SI, V8SI, UQI.  */
++#define __lasx_xvsrari_w(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrari_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V4DI, V4DI, UQI.  */
++#define __lasx_xvsrari_d(/*__m256i*/ _1, /*ui6*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrari_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrl_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrl_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrl_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrl_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrl_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrl_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrl_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrl_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvsrli_b(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrli_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V16HI, V16HI, UQI.  */
++#define __lasx_xvsrli_h(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrli_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V8SI, V8SI, UQI.  */
++#define __lasx_xvsrli_w(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrli_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V4DI, V4DI, UQI.  */
++#define __lasx_xvsrli_d(/*__m256i*/ _1, /*ui6*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrli_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrlr_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrlr_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrlr_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrlr_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrlr_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrlr_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrlr_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrlr_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvsrlri_b(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrlri_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V16HI, V16HI, UQI.  */
++#define __lasx_xvsrlri_h(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrlri_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V8SI, V8SI, UQI.  */
++#define __lasx_xvsrlri_w(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrlri_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V4DI, V4DI, UQI.  */
++#define __lasx_xvsrlri_d(/*__m256i*/ _1, /*ui6*/ _2) \
++  ((__m256i)__builtin_lasx_xvsrlri_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitclr_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitclr_b ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitclr_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitclr_h ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitclr_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitclr_w ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitclr_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitclr_d ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UQI.  */
++#define __lasx_xvbitclri_b(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitclri_b ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UQI.  */
++#define __lasx_xvbitclri_h(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitclri_h ((v16u16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UQI.  */
++#define __lasx_xvbitclri_w(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitclri_w ((v8u32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UQI.  */
++#define __lasx_xvbitclri_d(/*__m256i*/ _1, /*ui6*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitclri_d ((v4u64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitset_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitset_b ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitset_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitset_h ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitset_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitset_w ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitset_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitset_d ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UQI.  */
++#define __lasx_xvbitseti_b(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitseti_b ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UQI.  */
++#define __lasx_xvbitseti_h(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitseti_h ((v16u16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UQI.  */
++#define __lasx_xvbitseti_w(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitseti_w ((v8u32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UQI.  */
++#define __lasx_xvbitseti_d(/*__m256i*/ _1, /*ui6*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitseti_d ((v4u64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitrev_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitrev_b ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitrev_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitrev_h ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitrev_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitrev_w ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitrev_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvbitrev_d ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UQI.  */
++#define __lasx_xvbitrevi_b(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitrevi_b ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UQI.  */
++#define __lasx_xvbitrevi_h(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitrevi_h ((v16u16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UQI.  */
++#define __lasx_xvbitrevi_w(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitrevi_w ((v8u32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UQI.  */
++#define __lasx_xvbitrevi_d(/*__m256i*/ _1, /*ui6*/ _2) \
++  ((__m256i)__builtin_lasx_xvbitrevi_d ((v4u64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvadd_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvadd_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvadd_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvadd_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvadd_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvadd_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvadd_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvadd_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvaddi_bu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvaddi_bu ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, V16HI, UQI.  */
++#define __lasx_xvaddi_hu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvaddi_hu ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V8SI, V8SI, UQI.  */
++#define __lasx_xvaddi_wu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvaddi_wu ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V4DI, V4DI, UQI.  */
++#define __lasx_xvaddi_du(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvaddi_du ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsub_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsub_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsub_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsub_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsub_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsub_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsub_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsub_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvsubi_bu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsubi_bu ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, V16HI, UQI.  */
++#define __lasx_xvsubi_hu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsubi_hu ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V8SI, V8SI, UQI.  */
++#define __lasx_xvsubi_wu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsubi_wu ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V4DI, V4DI, UQI.  */
++#define __lasx_xvsubi_du(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsubi_du ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmax_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmax_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmax_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmax_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmax_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmax_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmax_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmax_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V32QI, V32QI, QI.  */
++#define __lasx_xvmaxi_b(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmaxi_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V16HI, V16HI, QI.  */
++#define __lasx_xvmaxi_h(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmaxi_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V8SI, V8SI, QI.  */
++#define __lasx_xvmaxi_w(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmaxi_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V4DI, V4DI, QI.  */
++#define __lasx_xvmaxi_d(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmaxi_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmax_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmax_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmax_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmax_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmax_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmax_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmax_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmax_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UQI.  */
++#define __lasx_xvmaxi_bu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmaxi_bu ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UQI.  */
++#define __lasx_xvmaxi_hu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmaxi_hu ((v16u16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UQI.  */
++#define __lasx_xvmaxi_wu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmaxi_wu ((v8u32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UQI.  */
++#define __lasx_xvmaxi_du(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmaxi_du ((v4u64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmin_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmin_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmin_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmin_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmin_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmin_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmin_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmin_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V32QI, V32QI, QI.  */
++#define __lasx_xvmini_b(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmini_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V16HI, V16HI, QI.  */
++#define __lasx_xvmini_h(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmini_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V8SI, V8SI, QI.  */
++#define __lasx_xvmini_w(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmini_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V4DI, V4DI, QI.  */
++#define __lasx_xvmini_d(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmini_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmin_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmin_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmin_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmin_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmin_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmin_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmin_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmin_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UQI.  */
++#define __lasx_xvmini_bu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmini_bu ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UQI.  */
++#define __lasx_xvmini_hu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmini_hu ((v16u16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UQI.  */
++#define __lasx_xvmini_wu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmini_wu ((v8u32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UQI.  */
++#define __lasx_xvmini_du(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvmini_du ((v4u64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvseq_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvseq_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvseq_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvseq_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvseq_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvseq_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvseq_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvseq_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V32QI, V32QI, QI.  */
++#define __lasx_xvseqi_b(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvseqi_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V16HI, V16HI, QI.  */
++#define __lasx_xvseqi_h(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvseqi_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V8SI, V8SI, QI.  */
++#define __lasx_xvseqi_w(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvseqi_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V4DI, V4DI, QI.  */
++#define __lasx_xvseqi_d(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvseqi_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvslt_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvslt_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvslt_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvslt_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvslt_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvslt_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvslt_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvslt_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V32QI, V32QI, QI.  */
++#define __lasx_xvslti_b(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslti_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V16HI, V16HI, QI.  */
++#define __lasx_xvslti_h(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslti_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V8SI, V8SI, QI.  */
++#define __lasx_xvslti_w(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslti_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V4DI, V4DI, QI.  */
++#define __lasx_xvslti_d(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslti_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvslt_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvslt_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvslt_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvslt_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvslt_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvslt_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvslt_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvslt_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V32QI, UV32QI, UQI.  */
++#define __lasx_xvslti_bu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslti_bu ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, UV16HI, UQI.  */
++#define __lasx_xvslti_hu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslti_hu ((v16u16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V8SI, UV8SI, UQI.  */
++#define __lasx_xvslti_wu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslti_wu ((v8u32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V4DI, UV4DI, UQI.  */
++#define __lasx_xvslti_du(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslti_du ((v4u64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsle_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsle_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsle_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsle_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsle_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsle_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsle_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsle_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V32QI, V32QI, QI.  */
++#define __lasx_xvslei_b(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslei_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V16HI, V16HI, QI.  */
++#define __lasx_xvslei_h(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslei_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V8SI, V8SI, QI.  */
++#define __lasx_xvslei_w(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslei_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, si5.  */
++/* Data types in instruction templates:  V4DI, V4DI, QI.  */
++#define __lasx_xvslei_d(/*__m256i*/ _1, /*si5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslei_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsle_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsle_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsle_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsle_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsle_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsle_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsle_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsle_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V32QI, UV32QI, UQI.  */
++#define __lasx_xvslei_bu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslei_bu ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, UV16HI, UQI.  */
++#define __lasx_xvslei_hu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslei_hu ((v16u16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V8SI, UV8SI, UQI.  */
++#define __lasx_xvslei_wu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslei_wu ((v8u32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V4DI, UV4DI, UQI.  */
++#define __lasx_xvslei_du(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvslei_du ((v4u64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvsat_b(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvsat_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V16HI, V16HI, UQI.  */
++#define __lasx_xvsat_h(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvsat_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V8SI, V8SI, UQI.  */
++#define __lasx_xvsat_w(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsat_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V4DI, V4DI, UQI.  */
++#define __lasx_xvsat_d(/*__m256i*/ _1, /*ui6*/ _2) \
++  ((__m256i)__builtin_lasx_xvsat_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UQI.  */
++#define __lasx_xvsat_bu(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvsat_bu ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UQI.  */
++#define __lasx_xvsat_hu(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvsat_hu ((v16u16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UQI.  */
++#define __lasx_xvsat_wu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsat_wu ((v8u32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UQI.  */
++#define __lasx_xvsat_du(/*__m256i*/ _1, /*ui6*/ _2) \
++  ((__m256i)__builtin_lasx_xvsat_du ((v4u64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvadda_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvadda_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvadda_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvadda_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvadda_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvadda_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvadda_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvadda_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsadd_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsadd_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsadd_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsadd_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsadd_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsadd_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsadd_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsadd_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsadd_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsadd_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsadd_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsadd_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsadd_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsadd_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsadd_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsadd_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavg_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavg_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavg_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavg_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavg_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavg_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavg_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavg_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavg_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavg_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavg_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavg_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavg_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavg_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavg_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavg_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavgr_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavgr_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavgr_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavgr_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavgr_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavgr_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavgr_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavgr_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavgr_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavgr_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavgr_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavgr_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavgr_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavgr_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvavgr_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvavgr_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssub_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssub_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssub_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssub_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssub_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssub_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssub_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssub_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssub_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssub_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssub_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssub_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssub_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssub_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssub_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssub_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvabsd_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvabsd_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvabsd_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvabsd_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvabsd_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvabsd_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvabsd_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvabsd_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvabsd_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvabsd_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvabsd_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvabsd_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvabsd_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvabsd_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvabsd_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvabsd_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmul_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmul_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmul_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmul_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmul_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmul_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmul_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmul_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmadd_b (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmadd_b ((v32i8)_1, (v32i8)_2, (v32i8)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmadd_h (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmadd_h ((v16i16)_1, (v16i16)_2, (v16i16)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmadd_w (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmadd_w ((v8i32)_1, (v8i32)_2, (v8i32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmadd_d (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmadd_d ((v4i64)_1, (v4i64)_2, (v4i64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmsub_b (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmsub_b ((v32i8)_1, (v32i8)_2, (v32i8)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmsub_h (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmsub_h ((v16i16)_1, (v16i16)_2, (v16i16)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmsub_w (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmsub_w ((v8i32)_1, (v8i32)_2, (v8i32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmsub_d (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmsub_d ((v4i64)_1, (v4i64)_2, (v4i64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvdiv_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvdiv_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvdiv_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvdiv_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvdiv_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvdiv_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvdiv_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvdiv_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvdiv_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvdiv_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvdiv_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvdiv_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvdiv_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvdiv_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvdiv_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvdiv_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhaddw_h_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhaddw_h_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhaddw_w_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhaddw_w_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhaddw_d_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhaddw_d_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhaddw_hu_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhaddw_hu_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhaddw_wu_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhaddw_wu_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhaddw_du_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhaddw_du_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhsubw_h_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhsubw_h_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhsubw_w_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhsubw_w_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhsubw_d_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhsubw_d_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhsubw_hu_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhsubw_hu_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhsubw_wu_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhsubw_wu_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhsubw_du_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhsubw_du_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmod_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmod_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmod_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmod_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmod_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmod_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmod_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmod_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmod_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmod_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmod_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmod_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmod_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmod_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmod_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmod_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvrepl128vei_b(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvrepl128vei_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V16HI, V16HI, UQI.  */
++#define __lasx_xvrepl128vei_h(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvrepl128vei_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui2.  */
++/* Data types in instruction templates:  V8SI, V8SI, UQI.  */
++#define __lasx_xvrepl128vei_w(/*__m256i*/ _1, /*ui2*/ _2) \
++  ((__m256i)__builtin_lasx_xvrepl128vei_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui1.  */
++/* Data types in instruction templates:  V4DI, V4DI, UQI.  */
++#define __lasx_xvrepl128vei_d(/*__m256i*/ _1, /*ui1*/ _2) \
++  ((__m256i)__builtin_lasx_xvrepl128vei_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpickev_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpickev_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpickev_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpickev_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpickev_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpickev_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpickev_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpickev_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpickod_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpickod_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpickod_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpickod_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpickod_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpickod_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpickod_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpickod_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvilvh_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvilvh_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvilvh_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvilvh_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvilvh_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvilvh_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvilvh_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvilvh_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvilvl_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvilvl_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvilvl_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvilvl_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvilvl_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvilvl_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvilvl_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvilvl_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpackev_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpackev_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpackev_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpackev_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpackev_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpackev_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpackev_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpackev_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpackod_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpackod_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpackod_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpackod_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpackod_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpackod_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpackod_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvpackod_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk, xa.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvshuf_b (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvshuf_b ((v32i8)_1, (v32i8)_2, (v32i8)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvshuf_h (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvshuf_h ((v16i16)_1, (v16i16)_2, (v16i16)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvshuf_w (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvshuf_w ((v8i32)_1, (v8i32)_2, (v8i32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvshuf_d (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvshuf_d ((v4i64)_1, (v4i64)_2, (v4i64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvand_v (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvand_v ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UQI.  */
++#define __lasx_xvandi_b(/*__m256i*/ _1, /*ui8*/ _2) \
++  ((__m256i)__builtin_lasx_xvandi_b ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvor_v (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvor_v ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UQI.  */
++#define __lasx_xvori_b(/*__m256i*/ _1, /*ui8*/ _2) \
++  ((__m256i)__builtin_lasx_xvori_b ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvnor_v (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvnor_v ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UQI.  */
++#define __lasx_xvnori_b(/*__m256i*/ _1, /*ui8*/ _2) \
++  ((__m256i)__builtin_lasx_xvnori_b ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvxor_v (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvxor_v ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UQI.  */
++#define __lasx_xvxori_b(/*__m256i*/ _1, /*ui8*/ _2) \
++  ((__m256i)__builtin_lasx_xvxori_b ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk, xa.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvbitsel_v (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvbitsel_v ((v32u8)_1, (v32u8)_2, (v32u8)_3);
++}
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI, USI.  */
++#define __lasx_xvbitseli_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
++  ((__m256i)__builtin_lasx_xvbitseli_b ((v32u8)(_1), (v32u8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  V32QI, V32QI, USI.  */
++#define __lasx_xvshuf4i_b(/*__m256i*/ _1, /*ui8*/ _2) \
++  ((__m256i)__builtin_lasx_xvshuf4i_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  V16HI, V16HI, USI.  */
++#define __lasx_xvshuf4i_h(/*__m256i*/ _1, /*ui8*/ _2) \
++  ((__m256i)__builtin_lasx_xvshuf4i_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  V8SI, V8SI, USI.  */
++#define __lasx_xvshuf4i_w(/*__m256i*/ _1, /*ui8*/ _2) \
++  ((__m256i)__builtin_lasx_xvshuf4i_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, rj.  */
++/* Data types in instruction templates:  V32QI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplgr2vr_b (int _1)
++{
++  return (__m256i)__builtin_lasx_xvreplgr2vr_b ((int)_1);
++}
++
++/* Assembly instruction format:	xd, rj.  */
++/* Data types in instruction templates:  V16HI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplgr2vr_h (int _1)
++{
++  return (__m256i)__builtin_lasx_xvreplgr2vr_h ((int)_1);
++}
++
++/* Assembly instruction format:	xd, rj.  */
++/* Data types in instruction templates:  V8SI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplgr2vr_w (int _1)
++{
++  return (__m256i)__builtin_lasx_xvreplgr2vr_w ((int)_1);
++}
++
++/* Assembly instruction format:	xd, rj.  */
++/* Data types in instruction templates:  V4DI, DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplgr2vr_d (long int _1)
++{
++  return (__m256i)__builtin_lasx_xvreplgr2vr_d ((long int)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpcnt_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvpcnt_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpcnt_h (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvpcnt_h ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpcnt_w (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvpcnt_w ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvpcnt_d (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvpcnt_d ((v4i64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvclo_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvclo_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvclo_h (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvclo_h ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvclo_w (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvclo_w ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvclo_d (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvclo_d ((v4i64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvclz_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvclz_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvclz_h (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvclz_h ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvclz_w (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvclz_w ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvclz_d (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvclz_d ((v4i64)_1);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfadd_s (__m256 _1, __m256 _2)
++{
++  return (__m256)__builtin_lasx_xvfadd_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfadd_d (__m256d _1, __m256d _2)
++{
++  return (__m256d)__builtin_lasx_xvfadd_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfsub_s (__m256 _1, __m256 _2)
++{
++  return (__m256)__builtin_lasx_xvfsub_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfsub_d (__m256d _1, __m256d _2)
++{
++  return (__m256d)__builtin_lasx_xvfsub_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfmul_s (__m256 _1, __m256 _2)
++{
++  return (__m256)__builtin_lasx_xvfmul_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfmul_d (__m256d _1, __m256d _2)
++{
++  return (__m256d)__builtin_lasx_xvfmul_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfdiv_s (__m256 _1, __m256 _2)
++{
++  return (__m256)__builtin_lasx_xvfdiv_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfdiv_d (__m256d _1, __m256d _2)
++{
++  return (__m256d)__builtin_lasx_xvfdiv_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcvt_h_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcvt_h_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfcvt_s_d (__m256d _1, __m256d _2)
++{
++  return (__m256)__builtin_lasx_xvfcvt_s_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfmin_s (__m256 _1, __m256 _2)
++{
++  return (__m256)__builtin_lasx_xvfmin_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfmin_d (__m256d _1, __m256d _2)
++{
++  return (__m256d)__builtin_lasx_xvfmin_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfmina_s (__m256 _1, __m256 _2)
++{
++  return (__m256)__builtin_lasx_xvfmina_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfmina_d (__m256d _1, __m256d _2)
++{
++  return (__m256d)__builtin_lasx_xvfmina_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfmax_s (__m256 _1, __m256 _2)
++{
++  return (__m256)__builtin_lasx_xvfmax_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfmax_d (__m256d _1, __m256d _2)
++{
++  return (__m256d)__builtin_lasx_xvfmax_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfmaxa_s (__m256 _1, __m256 _2)
++{
++  return (__m256)__builtin_lasx_xvfmaxa_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfmaxa_d (__m256d _1, __m256d _2)
++{
++  return (__m256d)__builtin_lasx_xvfmaxa_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfclass_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvfclass_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfclass_d (__m256d _1)
++{
++  return (__m256i)__builtin_lasx_xvfclass_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfsqrt_s (__m256 _1)
++{
++  return (__m256)__builtin_lasx_xvfsqrt_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfsqrt_d (__m256d _1)
++{
++  return (__m256d)__builtin_lasx_xvfsqrt_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfrecip_s (__m256 _1)
++{
++  return (__m256)__builtin_lasx_xvfrecip_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfrecip_d (__m256d _1)
++{
++  return (__m256d)__builtin_lasx_xvfrecip_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfrint_s (__m256 _1)
++{
++  return (__m256)__builtin_lasx_xvfrint_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfrint_d (__m256d _1)
++{
++  return (__m256d)__builtin_lasx_xvfrint_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfrsqrt_s (__m256 _1)
++{
++  return (__m256)__builtin_lasx_xvfrsqrt_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfrsqrt_d (__m256d _1)
++{
++  return (__m256d)__builtin_lasx_xvfrsqrt_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvflogb_s (__m256 _1)
++{
++  return (__m256)__builtin_lasx_xvflogb_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvflogb_d (__m256d _1)
++{
++  return (__m256d)__builtin_lasx_xvflogb_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SF, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfcvth_s_h (__m256i _1)
++{
++  return (__m256)__builtin_lasx_xvfcvth_s_h ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfcvth_d_s (__m256 _1)
++{
++  return (__m256d)__builtin_lasx_xvfcvth_d_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SF, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfcvtl_s_h (__m256i _1)
++{
++  return (__m256)__builtin_lasx_xvfcvtl_s_h ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfcvtl_d_s (__m256 _1)
++{
++  return (__m256d)__builtin_lasx_xvfcvtl_d_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftint_w_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftint_w_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftint_l_d (__m256d _1)
++{
++  return (__m256i)__builtin_lasx_xvftint_l_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  UV8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftint_wu_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftint_wu_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  UV4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftint_lu_d (__m256d _1)
++{
++  return (__m256i)__builtin_lasx_xvftint_lu_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrz_w_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrz_w_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrz_l_d (__m256d _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrz_l_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  UV8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrz_wu_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrz_wu_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  UV4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrz_lu_d (__m256d _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrz_lu_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SF, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvffint_s_w (__m256i _1)
++{
++  return (__m256)__builtin_lasx_xvffint_s_w ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DF, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvffint_d_l (__m256i _1)
++{
++  return (__m256d)__builtin_lasx_xvffint_d_l ((v4i64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SF, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvffint_s_wu (__m256i _1)
++{
++  return (__m256)__builtin_lasx_xvffint_s_wu ((v8u32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DF, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvffint_d_lu (__m256i _1)
++{
++  return (__m256d)__builtin_lasx_xvffint_d_lu ((v4u64)_1);
++}
++
++/* Assembly instruction format:	xd, xj, rk.  */
++/* Data types in instruction templates:  V32QI, V32QI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplve_b (__m256i _1, int _2)
++{
++  return (__m256i)__builtin_lasx_xvreplve_b ((v32i8)_1, (int)_2);
++}
++
++/* Assembly instruction format:	xd, xj, rk.  */
++/* Data types in instruction templates:  V16HI, V16HI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplve_h (__m256i _1, int _2)
++{
++  return (__m256i)__builtin_lasx_xvreplve_h ((v16i16)_1, (int)_2);
++}
++
++/* Assembly instruction format:	xd, xj, rk.  */
++/* Data types in instruction templates:  V8SI, V8SI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplve_w (__m256i _1, int _2)
++{
++  return (__m256i)__builtin_lasx_xvreplve_w ((v8i32)_1, (int)_2);
++}
++
++/* Assembly instruction format:	xd, xj, rk.  */
++/* Data types in instruction templates:  V4DI, V4DI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplve_d (__m256i _1, int _2)
++{
++  return (__m256i)__builtin_lasx_xvreplve_d ((v4i64)_1, (int)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, USI.  */
++#define __lasx_xvpermi_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
++  ((__m256i)__builtin_lasx_xvpermi_w ((v8i32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvandn_v (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvandn_v ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvneg_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvneg_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvneg_h (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvneg_h ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvneg_w (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvneg_w ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvneg_d (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvneg_d ((v4i64)_1);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmuh_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmuh_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmuh_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmuh_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmuh_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmuh_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmuh_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmuh_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmuh_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmuh_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmuh_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmuh_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmuh_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmuh_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmuh_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmuh_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V16HI, V32QI, UQI.  */
++#define __lasx_xvsllwil_h_b(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvsllwil_h_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V8SI, V16HI, UQI.  */
++#define __lasx_xvsllwil_w_h(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvsllwil_w_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V4DI, V8SI, UQI.  */
++#define __lasx_xvsllwil_d_w(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsllwil_d_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  UV16HI, UV32QI, UQI.  */
++#define __lasx_xvsllwil_hu_bu(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvsllwil_hu_bu ((v32u8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  UV8SI, UV16HI, UQI.  */
++#define __lasx_xvsllwil_wu_hu(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvsllwil_wu_hu ((v16u16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV4DI, UV8SI, UQI.  */
++#define __lasx_xvsllwil_du_wu(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvsllwil_du_wu ((v8u32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsran_b_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsran_b_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsran_h_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsran_h_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsran_w_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsran_w_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssran_b_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssran_b_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssran_h_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssran_h_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssran_w_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssran_w_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssran_bu_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssran_bu_h ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssran_hu_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssran_hu_w ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssran_wu_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssran_wu_d ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrarn_b_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrarn_b_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrarn_h_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrarn_h_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrarn_w_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrarn_w_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrarn_b_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrarn_b_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrarn_h_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrarn_h_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrarn_w_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrarn_w_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrarn_bu_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrarn_bu_h ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrarn_hu_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrarn_hu_w ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrarn_wu_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrarn_wu_d ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrln_b_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrln_b_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrln_h_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrln_h_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrln_w_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrln_w_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrln_bu_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrln_bu_h ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrln_hu_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrln_hu_w ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrln_wu_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrln_wu_d ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrlrn_b_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrlrn_b_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrlrn_h_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrlrn_h_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsrlrn_w_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsrlrn_w_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV32QI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrlrn_bu_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrlrn_bu_h ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrlrn_hu_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrlrn_hu_w ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrlrn_wu_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrlrn_wu_d ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, UQI.  */
++#define __lasx_xvfrstpi_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvfrstpi_b ((v32i8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, UQI.  */
++#define __lasx_xvfrstpi_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvfrstpi_h ((v16i16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfrstp_b (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvfrstp_b ((v32i8)_1, (v32i8)_2, (v32i8)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfrstp_h (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvfrstp_h ((v16i16)_1, (v16i16)_2, (v16i16)_3);
++}
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, USI.  */
++#define __lasx_xvshuf4i_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
++  ((__m256i)__builtin_lasx_xvshuf4i_d ((v4i64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvbsrl_v(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvbsrl_v ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvbsll_v(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvbsll_v ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, USI.  */
++#define __lasx_xvextrins_b(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
++  ((__m256i)__builtin_lasx_xvextrins_b ((v32i8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, USI.  */
++#define __lasx_xvextrins_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
++  ((__m256i)__builtin_lasx_xvextrins_h ((v16i16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, USI.  */
++#define __lasx_xvextrins_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
++  ((__m256i)__builtin_lasx_xvextrins_w ((v8i32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, USI.  */
++#define __lasx_xvextrins_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
++  ((__m256i)__builtin_lasx_xvextrins_d ((v4i64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmskltz_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvmskltz_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmskltz_h (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvmskltz_h ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmskltz_w (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvmskltz_w ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmskltz_d (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvmskltz_d ((v4i64)_1);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsigncov_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsigncov_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsigncov_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsigncov_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsigncov_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsigncov_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsigncov_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsigncov_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk, xa.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfmadd_s (__m256 _1, __m256 _2, __m256 _3)
++{
++  return (__m256)__builtin_lasx_xvfmadd_s ((v8f32)_1, (v8f32)_2, (v8f32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk, xa.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfmadd_d (__m256d _1, __m256d _2, __m256d _3)
++{
++  return (__m256d)__builtin_lasx_xvfmadd_d ((v4f64)_1, (v4f64)_2, (v4f64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk, xa.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfmsub_s (__m256 _1, __m256 _2, __m256 _3)
++{
++  return (__m256)__builtin_lasx_xvfmsub_s ((v8f32)_1, (v8f32)_2, (v8f32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk, xa.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfmsub_d (__m256d _1, __m256d _2, __m256d _3)
++{
++  return (__m256d)__builtin_lasx_xvfmsub_d ((v4f64)_1, (v4f64)_2, (v4f64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk, xa.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfnmadd_s (__m256 _1, __m256 _2, __m256 _3)
++{
++  return (__m256)__builtin_lasx_xvfnmadd_s ((v8f32)_1, (v8f32)_2, (v8f32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk, xa.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfnmadd_d (__m256d _1, __m256d _2, __m256d _3)
++{
++  return (__m256d)__builtin_lasx_xvfnmadd_d ((v4f64)_1, (v4f64)_2, (v4f64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk, xa.  */
++/* Data types in instruction templates:  V8SF, V8SF, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfnmsub_s (__m256 _1, __m256 _2, __m256 _3)
++{
++  return (__m256)__builtin_lasx_xvfnmsub_s ((v8f32)_1, (v8f32)_2, (v8f32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk, xa.  */
++/* Data types in instruction templates:  V4DF, V4DF, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfnmsub_d (__m256d _1, __m256d _2, __m256d _3)
++{
++  return (__m256d)__builtin_lasx_xvfnmsub_d ((v4f64)_1, (v4f64)_2, (v4f64)_3);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrne_w_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrne_w_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrne_l_d (__m256d _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrne_l_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrp_w_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrp_w_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrp_l_d (__m256d _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrp_l_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrm_w_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrm_w_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrm_l_d (__m256d _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrm_l_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftint_w_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvftint_w_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SF, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvffint_s_l (__m256i _1, __m256i _2)
++{
++  return (__m256)__builtin_lasx_xvffint_s_l ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrz_w_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvftintrz_w_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrp_w_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvftintrp_w_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrm_w_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvftintrm_w_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrne_w_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvftintrne_w_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftinth_l_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftinth_l_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintl_l_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintl_l_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DF, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvffinth_d_w (__m256i _1)
++{
++  return (__m256d)__builtin_lasx_xvffinth_d_w ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DF, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvffintl_d_w (__m256i _1)
++{
++  return (__m256d)__builtin_lasx_xvffintl_d_w ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrzh_l_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrzh_l_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrzl_l_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrzl_l_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrph_l_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrph_l_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrpl_l_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrpl_l_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrmh_l_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrmh_l_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrml_l_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrml_l_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrneh_l_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrneh_l_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvftintrnel_l_s (__m256 _1)
++{
++  return (__m256i)__builtin_lasx_xvftintrnel_l_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfrintrne_s (__m256 _1)
++{
++  return (__m256)__builtin_lasx_xvfrintrne_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfrintrne_d (__m256d _1)
++{
++  return (__m256d)__builtin_lasx_xvfrintrne_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfrintrz_s (__m256 _1)
++{
++  return (__m256)__builtin_lasx_xvfrintrz_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfrintrz_d (__m256d _1)
++{
++  return (__m256d)__builtin_lasx_xvfrintrz_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfrintrp_s (__m256 _1)
++{
++  return (__m256)__builtin_lasx_xvfrintrp_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfrintrp_d (__m256d _1)
++{
++  return (__m256d)__builtin_lasx_xvfrintrp_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256 __lasx_xvfrintrm_s (__m256 _1)
++{
++  return (__m256)__builtin_lasx_xvfrintrm_s ((v8f32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256d __lasx_xvfrintrm_d (__m256d _1)
++{
++  return (__m256d)__builtin_lasx_xvfrintrm_d ((v4f64)_1);
++}
++
++/* Assembly instruction format:	xd, rj, si12.  */
++/* Data types in instruction templates:  V32QI, CVPOINTER, SI.  */
++#define __lasx_xvld(/*void **/ _1, /*si12*/ _2) \
++  ((__m256i)__builtin_lasx_xvld ((void *)(_1), (_2)))
++
++/* Assembly instruction format:	xd, rj, si12.  */
++/* Data types in instruction templates:  VOID, V32QI, CVPOINTER, SI.  */
++#define __lasx_xvst(/*__m256i*/ _1, /*void **/ _2, /*si12*/ _3) \
++  ((void)__builtin_lasx_xvst ((v32i8)(_1), (void *)(_2), (_3)))
++
++/* Assembly instruction format:	xd, rj, si8, idx.  */
++/* Data types in instruction templates:  VOID, V32QI, CVPOINTER, SI, UQI.  */
++#define __lasx_xvstelm_b(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
++  ((void)__builtin_lasx_xvstelm_b ((v32i8)(_1), (void *)(_2), (_3), (_4)))
++
++/* Assembly instruction format:	xd, rj, si8, idx.  */
++/* Data types in instruction templates:  VOID, V16HI, CVPOINTER, SI, UQI.  */
++#define __lasx_xvstelm_h(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
++  ((void)__builtin_lasx_xvstelm_h ((v16i16)(_1), (void *)(_2), (_3), (_4)))
++
++/* Assembly instruction format:	xd, rj, si8, idx.  */
++/* Data types in instruction templates:  VOID, V8SI, CVPOINTER, SI, UQI.  */
++#define __lasx_xvstelm_w(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
++  ((void)__builtin_lasx_xvstelm_w ((v8i32)(_1), (void *)(_2), (_3), (_4)))
++
++/* Assembly instruction format:	xd, rj, si8, idx.  */
++/* Data types in instruction templates:  VOID, V4DI, CVPOINTER, SI, UQI.  */
++#define __lasx_xvstelm_d(/*__m256i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
++  ((void)__builtin_lasx_xvstelm_d ((v4i64)(_1), (void *)(_2), (_3), (_4)))
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, UQI.  */
++#define __lasx_xvinsve0_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui3*/ _3) \
++  ((__m256i)__builtin_lasx_xvinsve0_w ((v8i32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui2.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, UQI.  */
++#define __lasx_xvinsve0_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui2*/ _3) \
++  ((__m256i)__builtin_lasx_xvinsve0_d ((v4i64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V8SI, V8SI, UQI.  */
++#define __lasx_xvpickve_w(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvpickve_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui2.  */
++/* Data types in instruction templates:  V4DI, V4DI, UQI.  */
++#define __lasx_xvpickve_d(/*__m256i*/ _1, /*ui2*/ _2) \
++  ((__m256i)__builtin_lasx_xvpickve_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrlrn_b_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrlrn_b_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrlrn_h_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrlrn_h_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrlrn_w_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrlrn_w_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrln_b_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrln_b_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrln_h_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrln_h_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvssrln_w_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvssrln_w_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvorn_v (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvorn_v ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, i13.  */
++/* Data types in instruction templates:  V4DI, HI.  */
++#define __lasx_xvldi(/*i13*/ _1) \
++  ((__m256i)__builtin_lasx_xvldi ((_1)))
++
++/* Assembly instruction format:	xd, rj, rk.  */
++/* Data types in instruction templates:  V32QI, CVPOINTER, DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvldx (void * _1, long int _2)
++{
++  return (__m256i)__builtin_lasx_xvldx ((void *)_1, (long int)_2);
++}
++
++/* Assembly instruction format:	xd, rj, rk.  */
++/* Data types in instruction templates:  VOID, V32QI, CVPOINTER, DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++void __lasx_xvstx (__m256i _1, void * _2, long int _3)
++{
++  return (void)__builtin_lasx_xvstx ((v32i8)_1, (void *)_2, (long int)_3);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvextl_qu_du (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvextl_qu_du ((v4u64)_1);
++}
++
++/* Assembly instruction format:	xd, rj, ui3.  */
++/* Data types in instruction templates:  V8SI, V8SI, SI, UQI.  */
++#define __lasx_xvinsgr2vr_w(/*__m256i*/ _1, /*int*/ _2, /*ui3*/ _3) \
++  ((__m256i)__builtin_lasx_xvinsgr2vr_w ((v8i32)(_1), (int)(_2), (_3)))
++
++/* Assembly instruction format:	xd, rj, ui2.  */
++/* Data types in instruction templates:  V4DI, V4DI, DI, UQI.  */
++#define __lasx_xvinsgr2vr_d(/*__m256i*/ _1, /*long int*/ _2, /*ui2*/ _3) \
++  ((__m256i)__builtin_lasx_xvinsgr2vr_d ((v4i64)(_1), (long int)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplve0_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvreplve0_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplve0_h (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvreplve0_h ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplve0_w (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvreplve0_w ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplve0_d (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvreplve0_d ((v4i64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvreplve0_q (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvreplve0_q ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V16HI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_h_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_h_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_w_h (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_w_h ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_d_w (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_d_w ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_w_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_w_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_d_h (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_d_h ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_d_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_d_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V16HI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_hu_bu (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_hu_bu ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_wu_hu (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_wu_hu ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_du_wu (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_du_wu ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_wu_bu (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_wu_bu ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_du_hu (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_du_hu ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_vext2xv_du_bu (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_vext2xv_du_bu ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, USI.  */
++#define __lasx_xvpermi_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui8*/ _3) \
++  ((__m256i)__builtin_lasx_xvpermi_q ((v32i8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui8.  */
++/* Data types in instruction templates:  V4DI, V4DI, USI.  */
++#define __lasx_xvpermi_d(/*__m256i*/ _1, /*ui8*/ _2) \
++  ((__m256i)__builtin_lasx_xvpermi_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvperm_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvperm_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, rj, si12.  */
++/* Data types in instruction templates:  V32QI, CVPOINTER, SI.  */
++#define __lasx_xvldrepl_b(/*void **/ _1, /*si12*/ _2) \
++  ((__m256i)__builtin_lasx_xvldrepl_b ((void *)(_1), (_2)))
++
++/* Assembly instruction format:	xd, rj, si11.  */
++/* Data types in instruction templates:  V16HI, CVPOINTER, SI.  */
++#define __lasx_xvldrepl_h(/*void **/ _1, /*si11*/ _2) \
++  ((__m256i)__builtin_lasx_xvldrepl_h ((void *)(_1), (_2)))
++
++/* Assembly instruction format:	xd, rj, si10.  */
++/* Data types in instruction templates:  V8SI, CVPOINTER, SI.  */
++#define __lasx_xvldrepl_w(/*void **/ _1, /*si10*/ _2) \
++  ((__m256i)__builtin_lasx_xvldrepl_w ((void *)(_1), (_2)))
++
++/* Assembly instruction format:	xd, rj, si9.  */
++/* Data types in instruction templates:  V4DI, CVPOINTER, SI.  */
++#define __lasx_xvldrepl_d(/*void **/ _1, /*si9*/ _2) \
++  ((__m256i)__builtin_lasx_xvldrepl_d ((void *)(_1), (_2)))
++
++/* Assembly instruction format:	rd, xj, ui3.  */
++/* Data types in instruction templates:  SI, V8SI, UQI.  */
++#define __lasx_xvpickve2gr_w(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((int)__builtin_lasx_xvpickve2gr_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	rd, xj, ui3.  */
++/* Data types in instruction templates:  USI, V8SI, UQI.  */
++#define __lasx_xvpickve2gr_wu(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((unsigned int)__builtin_lasx_xvpickve2gr_wu ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	rd, xj, ui2.  */
++/* Data types in instruction templates:  DI, V4DI, UQI.  */
++#define __lasx_xvpickve2gr_d(/*__m256i*/ _1, /*ui2*/ _2) \
++  ((long int)__builtin_lasx_xvpickve2gr_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	rd, xj, ui2.  */
++/* Data types in instruction templates:  UDI, V4DI, UQI.  */
++#define __lasx_xvpickve2gr_du(/*__m256i*/ _1, /*ui2*/ _2) \
++  ((unsigned long int)__builtin_lasx_xvpickve2gr_du ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_q_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_q_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_d_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_d_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_w_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_w_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_h_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_h_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_q_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_q_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_d_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_d_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_w_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_w_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_h_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_h_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwev_q_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwev_q_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwev_d_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwev_d_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwev_w_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwev_w_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwev_h_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwev_h_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwev_q_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwev_q_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwev_d_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwev_d_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwev_w_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwev_w_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwev_h_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwev_h_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_q_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_q_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_d_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_d_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_w_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_w_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_h_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_h_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_q_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_q_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_d_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_d_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_w_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_w_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_h_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_h_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_q_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_q_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_d_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_d_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_w_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_w_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_h_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_h_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_q_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_q_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_d_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_d_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_w_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_w_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_h_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_h_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwod_q_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwod_q_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwod_d_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwod_d_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwod_w_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwod_w_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwod_h_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwod_h_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwod_q_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwod_q_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwod_d_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwod_d_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwod_w_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwod_w_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsubwod_h_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsubwod_h_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_q_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_q_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_d_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_d_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_w_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_w_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_h_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_h_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_q_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_q_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_d_wu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_d_wu ((v8u32)_1, (v8u32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_w_hu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_w_hu ((v16u16)_1, (v16u16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_h_bu (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_h_bu ((v32u8)_1, (v32u8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_d_wu_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_d_wu_w ((v8u32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_w_hu_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_w_hu_h ((v16u16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_h_bu_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_h_bu_b ((v32u8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_d_wu_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_d_wu_w ((v8u32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_w_hu_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_w_hu_h ((v16u16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_h_bu_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_h_bu_b ((v32u8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_d_wu_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_d_wu_w ((v8u32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_w_hu_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_w_hu_h ((v16u16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_h_bu_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_h_bu_b ((v32u8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_d_wu_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_d_wu_w ((v8u32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, UV16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_w_hu_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_w_hu_h ((v16u16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, UV32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_h_bu_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_h_bu_b ((v32u8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhaddw_q_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhaddw_q_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhaddw_qu_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhaddw_qu_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhsubw_q_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhsubw_q_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvhsubw_qu_du (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvhsubw_qu_du ((v4u64)_1, (v4u64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_q_d (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_q_d ((v4i64)_1, (v4i64)_2, (v4i64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_d_w (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_d_w ((v4i64)_1, (v8i32)_2, (v8i32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_w_h (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_w_h ((v8i32)_1, (v16i16)_2, (v16i16)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_h_b (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_h_b ((v16i16)_1, (v32i8)_2, (v32i8)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_q_du (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_q_du ((v4u64)_1, (v4u64)_2, (v4u64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_d_wu (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_d_wu ((v4u64)_1, (v8u32)_2, (v8u32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_w_hu (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_w_hu ((v8u32)_1, (v16u16)_2, (v16u16)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_h_bu (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_h_bu ((v16u16)_1, (v32u8)_2, (v32u8)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_q_d (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_q_d ((v4i64)_1, (v4i64)_2, (v4i64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_d_w (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_d_w ((v4i64)_1, (v8i32)_2, (v8i32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_w_h (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_w_h ((v8i32)_1, (v16i16)_2, (v16i16)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_h_b (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_h_b ((v16i16)_1, (v32i8)_2, (v32i8)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_q_du (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_q_du ((v4u64)_1, (v4u64)_2, (v4u64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, UV8SI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_d_wu (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_d_wu ((v4u64)_1, (v8u32)_2, (v8u32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, UV16HI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_w_hu (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_w_hu ((v8u32)_1, (v16u16)_2, (v16u16)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, UV32QI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_h_bu (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_h_bu ((v16u16)_1, (v32u8)_2, (v32u8)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, UV4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_q_du_d (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_q_du_d ((v4i64)_1, (v4u64)_2, (v4i64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, UV8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_d_wu_w (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_d_wu_w ((v4i64)_1, (v8u32)_2, (v8i32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, UV16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_w_hu_h (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_w_hu_h ((v8i32)_1, (v16u16)_2, (v16i16)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, UV32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwev_h_bu_b (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwev_h_bu_b ((v16i16)_1, (v32u8)_2, (v32i8)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, UV4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_q_du_d (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_q_du_d ((v4i64)_1, (v4u64)_2, (v4i64)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, UV8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_d_wu_w (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_d_wu_w ((v4i64)_1, (v8u32)_2, (v8i32)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, UV16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_w_hu_h (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_w_hu_h ((v8i32)_1, (v16u16)_2, (v16i16)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, UV32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmaddwod_h_bu_b (__m256i _1, __m256i _2, __m256i _3)
++{
++  return (__m256i)__builtin_lasx_xvmaddwod_h_bu_b ((v16i16)_1, (v32u8)_2, (v32i8)_3);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvrotr_b (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvrotr_b ((v32i8)_1, (v32i8)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvrotr_h (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvrotr_h ((v16i16)_1, (v16i16)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvrotr_w (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvrotr_w ((v8i32)_1, (v8i32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvrotr_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvrotr_d ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvadd_q (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvadd_q ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvsub_q (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvsub_q ((v4i64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwev_q_du_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwev_q_du_d ((v4u64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvaddwod_q_du_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvaddwod_q_du_d ((v4u64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwev_q_du_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwev_q_du_d ((v4u64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, UV4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmulwod_q_du_d (__m256i _1, __m256i _2)
++{
++  return (__m256i)__builtin_lasx_xvmulwod_q_du_d ((v4u64)_1, (v4i64)_2);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmskgez_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvmskgez_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V32QI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvmsknz_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvmsknz_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V16HI, V32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvexth_h_b (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvexth_h_b ((v32i8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V8SI, V16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvexth_w_h (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvexth_w_h ((v16i16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvexth_d_w (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvexth_d_w ((v8i32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvexth_q_d (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvexth_q_d ((v4i64)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  UV16HI, UV32QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvexth_hu_bu (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvexth_hu_bu ((v32u8)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  UV8SI, UV16HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvexth_wu_hu (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvexth_wu_hu ((v16u16)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  UV4DI, UV8SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvexth_du_wu (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvexth_du_wu ((v8u32)_1);
++}
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  UV4DI, UV4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvexth_qu_du (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvexth_qu_du ((v4u64)_1);
++}
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V32QI, V32QI, UQI.  */
++#define __lasx_xvrotri_b(/*__m256i*/ _1, /*ui3*/ _2) \
++  ((__m256i)__builtin_lasx_xvrotri_b ((v32i8)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V16HI, V16HI, UQI.  */
++#define __lasx_xvrotri_h(/*__m256i*/ _1, /*ui4*/ _2) \
++  ((__m256i)__builtin_lasx_xvrotri_h ((v16i16)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V8SI, V8SI, UQI.  */
++#define __lasx_xvrotri_w(/*__m256i*/ _1, /*ui5*/ _2) \
++  ((__m256i)__builtin_lasx_xvrotri_w ((v8i32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V4DI, V4DI, UQI.  */
++#define __lasx_xvrotri_d(/*__m256i*/ _1, /*ui6*/ _2) \
++  ((__m256i)__builtin_lasx_xvrotri_d ((v4i64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj.  */
++/* Data types in instruction templates:  V4DI, V4DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvextl_q_d (__m256i _1)
++{
++  return (__m256i)__builtin_lasx_xvextl_q_d ((v4i64)_1);
++}
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, USI.  */
++#define __lasx_xvsrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrlni_b_h ((v32i8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, USI.  */
++#define __lasx_xvsrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrlni_h_w ((v16i16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, USI.  */
++#define __lasx_xvsrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrlni_w_d ((v8i32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, USI.  */
++#define __lasx_xvsrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrlni_d_q ((v4i64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, USI.  */
++#define __lasx_xvsrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrlrni_b_h ((v32i8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, USI.  */
++#define __lasx_xvsrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrlrni_h_w ((v16i16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, USI.  */
++#define __lasx_xvsrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrlrni_w_d ((v8i32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, USI.  */
++#define __lasx_xvsrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrlrni_d_q ((v4i64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, USI.  */
++#define __lasx_xvssrlni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlni_b_h ((v32i8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, USI.  */
++#define __lasx_xvssrlni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlni_h_w ((v16i16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, USI.  */
++#define __lasx_xvssrlni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlni_w_d ((v8i32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, USI.  */
++#define __lasx_xvssrlni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlni_d_q ((v4i64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, V32QI, USI.  */
++#define __lasx_xvssrlni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, V16HI, USI.  */
++#define __lasx_xvssrlni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, V8SI, USI.  */
++#define __lasx_xvssrlni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, V4DI, USI.  */
++#define __lasx_xvssrlni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlni_du_q ((v4u64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, USI.  */
++#define __lasx_xvssrlrni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlrni_b_h ((v32i8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, USI.  */
++#define __lasx_xvssrlrni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlrni_h_w ((v16i16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, USI.  */
++#define __lasx_xvssrlrni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlrni_w_d ((v8i32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, USI.  */
++#define __lasx_xvssrlrni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlrni_d_q ((v4i64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, V32QI, USI.  */
++#define __lasx_xvssrlrni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlrni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, V16HI, USI.  */
++#define __lasx_xvssrlrni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlrni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, V8SI, USI.  */
++#define __lasx_xvssrlrni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlrni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, V4DI, USI.  */
++#define __lasx_xvssrlrni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrlrni_du_q ((v4u64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, USI.  */
++#define __lasx_xvsrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrani_b_h ((v32i8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, USI.  */
++#define __lasx_xvsrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrani_h_w ((v16i16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, USI.  */
++#define __lasx_xvsrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrani_w_d ((v8i32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, USI.  */
++#define __lasx_xvsrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrani_d_q ((v4i64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, USI.  */
++#define __lasx_xvsrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrarni_b_h ((v32i8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, USI.  */
++#define __lasx_xvsrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrarni_h_w ((v16i16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, USI.  */
++#define __lasx_xvsrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrarni_w_d ((v8i32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, USI.  */
++#define __lasx_xvsrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvsrarni_d_q ((v4i64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, USI.  */
++#define __lasx_xvssrani_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrani_b_h ((v32i8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, USI.  */
++#define __lasx_xvssrani_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrani_h_w ((v16i16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, USI.  */
++#define __lasx_xvssrani_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrani_w_d ((v8i32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, USI.  */
++#define __lasx_xvssrani_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrani_d_q ((v4i64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, V32QI, USI.  */
++#define __lasx_xvssrani_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrani_bu_h ((v32u8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, V16HI, USI.  */
++#define __lasx_xvssrani_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrani_hu_w ((v16u16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, V8SI, USI.  */
++#define __lasx_xvssrani_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrani_wu_d ((v8u32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, V4DI, USI.  */
++#define __lasx_xvssrani_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrani_du_q ((v4u64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  V32QI, V32QI, V32QI, USI.  */
++#define __lasx_xvssrarni_b_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrarni_b_h ((v32i8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  V16HI, V16HI, V16HI, USI.  */
++#define __lasx_xvssrarni_h_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrarni_h_w ((v16i16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  V8SI, V8SI, V8SI, USI.  */
++#define __lasx_xvssrarni_w_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrarni_w_d ((v8i32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  V4DI, V4DI, V4DI, USI.  */
++#define __lasx_xvssrarni_d_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrarni_d_q ((v4i64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui4.  */
++/* Data types in instruction templates:  UV32QI, UV32QI, V32QI, USI.  */
++#define __lasx_xvssrarni_bu_h(/*__m256i*/ _1, /*__m256i*/ _2, /*ui4*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrarni_bu_h ((v32u8)(_1), (v32i8)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui5.  */
++/* Data types in instruction templates:  UV16HI, UV16HI, V16HI, USI.  */
++#define __lasx_xvssrarni_hu_w(/*__m256i*/ _1, /*__m256i*/ _2, /*ui5*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrarni_hu_w ((v16u16)(_1), (v16i16)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui6.  */
++/* Data types in instruction templates:  UV8SI, UV8SI, V8SI, USI.  */
++#define __lasx_xvssrarni_wu_d(/*__m256i*/ _1, /*__m256i*/ _2, /*ui6*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrarni_wu_d ((v8u32)(_1), (v8i32)(_2), (_3)))
++
++/* Assembly instruction format:	xd, xj, ui7.  */
++/* Data types in instruction templates:  UV4DI, UV4DI, V4DI, USI.  */
++#define __lasx_xvssrarni_du_q(/*__m256i*/ _1, /*__m256i*/ _2, /*ui7*/ _3) \
++  ((__m256i)__builtin_lasx_xvssrarni_du_q ((v4u64)(_1), (v4i64)(_2), (_3)))
++
++/* Assembly instruction format:	cd, xj.  */
++/* Data types in instruction templates:  SI, UV32QI.  */
++#define __lasx_xbnz_b(/*__m256i*/ _1) \
++  ((int)__builtin_lasx_xbnz_b ((v32u8)(_1)))
++
++/* Assembly instruction format:	cd, xj.  */
++/* Data types in instruction templates:  SI, UV4DI.  */
++#define __lasx_xbnz_d(/*__m256i*/ _1) \
++  ((int)__builtin_lasx_xbnz_d ((v4u64)(_1)))
++
++/* Assembly instruction format:	cd, xj.  */
++/* Data types in instruction templates:  SI, UV16HI.  */
++#define __lasx_xbnz_h(/*__m256i*/ _1) \
++  ((int)__builtin_lasx_xbnz_h ((v16u16)(_1)))
++
++/* Assembly instruction format:	cd, xj.  */
++/* Data types in instruction templates:  SI, UV32QI.  */
++#define __lasx_xbnz_v(/*__m256i*/ _1) \
++  ((int)__builtin_lasx_xbnz_v ((v32u8)(_1)))
++
++/* Assembly instruction format:	cd, xj.  */
++/* Data types in instruction templates:  SI, UV8SI.  */
++#define __lasx_xbnz_w(/*__m256i*/ _1) \
++  ((int)__builtin_lasx_xbnz_w ((v8u32)(_1)))
++
++/* Assembly instruction format:	cd, xj.  */
++/* Data types in instruction templates:  SI, UV32QI.  */
++#define __lasx_xbz_b(/*__m256i*/ _1) \
++  ((int)__builtin_lasx_xbz_b ((v32u8)(_1)))
++
++/* Assembly instruction format:	cd, xj.  */
++/* Data types in instruction templates:  SI, UV4DI.  */
++#define __lasx_xbz_d(/*__m256i*/ _1) \
++  ((int)__builtin_lasx_xbz_d ((v4u64)(_1)))
++
++/* Assembly instruction format:	cd, xj.  */
++/* Data types in instruction templates:  SI, UV16HI.  */
++#define __lasx_xbz_h(/*__m256i*/ _1) \
++  ((int)__builtin_lasx_xbz_h ((v16u16)(_1)))
++
++/* Assembly instruction format:	cd, xj.  */
++/* Data types in instruction templates:  SI, UV32QI.  */
++#define __lasx_xbz_v(/*__m256i*/ _1) \
++  ((int)__builtin_lasx_xbz_v ((v32u8)(_1)))
++
++/* Assembly instruction format:	cd, xj.  */
++/* Data types in instruction templates:  SI, UV8SI.  */
++#define __lasx_xbz_w(/*__m256i*/ _1) \
++  ((int)__builtin_lasx_xbz_w ((v8u32)(_1)))
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_caf_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_caf_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_caf_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_caf_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_ceq_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_ceq_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_ceq_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_ceq_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cle_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cle_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cle_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cle_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_clt_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_clt_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_clt_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_clt_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cne_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cne_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cne_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cne_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cor_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cor_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cor_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cor_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cueq_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cueq_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cueq_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cueq_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cule_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cule_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cule_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cule_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cult_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cult_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cult_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cult_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cun_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cun_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cune_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cune_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cune_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cune_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_cun_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_cun_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_saf_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_saf_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_saf_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_saf_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_seq_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_seq_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_seq_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_seq_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sle_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sle_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sle_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sle_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_slt_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_slt_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_slt_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_slt_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sne_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sne_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sne_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sne_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sor_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sor_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sor_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sor_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sueq_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sueq_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sueq_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sueq_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sule_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sule_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sule_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sule_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sult_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sult_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sult_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sult_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sun_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sun_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V4DI, V4DF, V4DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sune_d (__m256d _1, __m256d _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sune_d ((v4f64)_1, (v4f64)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sune_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sune_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, xk.  */
++/* Data types in instruction templates:  V8SI, V8SF, V8SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m256i __lasx_xvfcmp_sun_s (__m256 _1, __m256 _2)
++{
++  return (__m256i)__builtin_lasx_xvfcmp_sun_s ((v8f32)_1, (v8f32)_2);
++}
++
++/* Assembly instruction format:	xd, xj, ui2.  */
++/* Data types in instruction templates:  V4DF, V4DF, UQI.  */
++#define __lasx_xvpickve_d_f(/*__m256d*/ _1, /*ui2*/ _2) \
++  ((__m256d)__builtin_lasx_xvpickve_d_f ((v4f64)(_1), (_2)))
++
++/* Assembly instruction format:	xd, xj, ui3.  */
++/* Data types in instruction templates:  V8SF, V8SF, UQI.  */
++#define __lasx_xvpickve_w_f(/*__m256*/ _1, /*ui3*/ _2) \
++  ((__m256)__builtin_lasx_xvpickve_w_f ((v8f32)(_1), (_2)))
++
++/* Assembly instruction format:	xd, si10.  */
++/* Data types in instruction templates:  V32QI, HI.  */
++#define __lasx_xvrepli_b(/*si10*/ _1) \
++  ((__m256i)__builtin_lasx_xvrepli_b ((_1)))
++
++/* Assembly instruction format:	xd, si10.  */
++/* Data types in instruction templates:  V4DI, HI.  */
++#define __lasx_xvrepli_d(/*si10*/ _1) \
++  ((__m256i)__builtin_lasx_xvrepli_d ((_1)))
++
++/* Assembly instruction format:	xd, si10.  */
++/* Data types in instruction templates:  V16HI, HI.  */
++#define __lasx_xvrepli_h(/*si10*/ _1) \
++  ((__m256i)__builtin_lasx_xvrepli_h ((_1)))
++
++/* Assembly instruction format:	xd, si10.  */
++/* Data types in instruction templates:  V8SI, HI.  */
++#define __lasx_xvrepli_w(/*si10*/ _1) \
++  ((__m256i)__builtin_lasx_xvrepli_w ((_1)))
++
++#endif /* defined(__loongarch_asx).  */
++#endif /* _GCC_LOONGSON_ASXINTRIN_H.  */
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index de6428ac6..a4a7dbec9 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -74,6 +74,13 @@ enum loongarch_builtin_type
+   /* The function corresponds to an LSX conditional branch instruction
+      combined with a compare instruction.  */
+   LARCH_BUILTIN_LSX_TEST_BRANCH,
++
++  /* For generating LoongArch LASX.  */
++  LARCH_BUILTIN_LASX,
++
++  /* The function corresponds to an LASX conditional branch instruction
++     combined with a compare instruction.  */
++  LARCH_BUILTIN_LASX_TEST_BRANCH,
+ };
+ 
+ /* Declare an availability predicate for built-in functions that require
+@@ -112,6 +119,7 @@ struct loongarch_builtin_description
+ 
+ AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI)
+ AVAIL_ALL (lsx, ISA_HAS_LSX)
++AVAIL_ALL (lasx, ISA_HAS_LASX)
+ 
+ /* Construct a loongarch_builtin_description from the given arguments.
+ 
+@@ -173,6 +181,30 @@ AVAIL_ALL (lsx, ISA_HAS_LSX)
+     "__builtin_lsx_" #INSN,  LARCH_BUILTIN_DIRECT_NO_TARGET,		\
+     FUNCTION_TYPE, loongarch_builtin_avail_lsx }
+ 
++/* Define an LASX LARCH_BUILTIN_DIRECT function __builtin_lasx_
++   for instruction CODE_FOR_lasx_.  FUNCTION_TYPE is a builtin_description
++   field.  */
++#define LASX_BUILTIN(INSN, FUNCTION_TYPE)				\
++  { CODE_FOR_lasx_ ## INSN,						\
++    "__builtin_lasx_" #INSN,  LARCH_BUILTIN_LASX,			\
++    FUNCTION_TYPE, loongarch_builtin_avail_lasx }
++
++/* Define an LASX LARCH_BUILTIN_DIRECT_NO_TARGET function __builtin_lasx_
++   for instruction CODE_FOR_lasx_.  FUNCTION_TYPE is a builtin_description
++   field.  */
++#define LASX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE)			\
++  { CODE_FOR_lasx_ ## INSN,						\
++    "__builtin_lasx_" #INSN,  LARCH_BUILTIN_DIRECT_NO_TARGET,		\
++    FUNCTION_TYPE, loongarch_builtin_avail_lasx }
++
++/* Define an LASX LARCH_BUILTIN_LASX_TEST_BRANCH function __builtin_lasx_
++   for instruction CODE_FOR_lasx_.  FUNCTION_TYPE is a builtin_description
++   field.  */
++#define LASX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE)			\
++  { CODE_FOR_lasx_ ## INSN,						\
++    "__builtin_lasx_" #INSN, LARCH_BUILTIN_LASX_TEST_BRANCH,		\
++    FUNCTION_TYPE, loongarch_builtin_avail_lasx }
++
+ /* LoongArch SX define CODE_FOR_lsx_xxx */
+ #define CODE_FOR_lsx_vsadd_b CODE_FOR_ssaddv16qi3
+ #define CODE_FOR_lsx_vsadd_h CODE_FOR_ssaddv8hi3
+@@ -442,6 +474,276 @@ AVAIL_ALL (lsx, ISA_HAS_LSX)
+ #define CODE_FOR_lsx_vssrlrn_hu_w CODE_FOR_lsx_vssrlrn_u_hu_w
+ #define CODE_FOR_lsx_vssrlrn_wu_d CODE_FOR_lsx_vssrlrn_u_wu_d
+ 
++/* LoongArch ASX define CODE_FOR_lasx_mxxx */
++#define CODE_FOR_lasx_xvsadd_b CODE_FOR_ssaddv32qi3
++#define CODE_FOR_lasx_xvsadd_h CODE_FOR_ssaddv16hi3
++#define CODE_FOR_lasx_xvsadd_w CODE_FOR_ssaddv8si3
++#define CODE_FOR_lasx_xvsadd_d CODE_FOR_ssaddv4di3
++#define CODE_FOR_lasx_xvsadd_bu CODE_FOR_usaddv32qi3
++#define CODE_FOR_lasx_xvsadd_hu CODE_FOR_usaddv16hi3
++#define CODE_FOR_lasx_xvsadd_wu CODE_FOR_usaddv8si3
++#define CODE_FOR_lasx_xvsadd_du CODE_FOR_usaddv4di3
++#define CODE_FOR_lasx_xvadd_b CODE_FOR_addv32qi3
++#define CODE_FOR_lasx_xvadd_h CODE_FOR_addv16hi3
++#define CODE_FOR_lasx_xvadd_w CODE_FOR_addv8si3
++#define CODE_FOR_lasx_xvadd_d CODE_FOR_addv4di3
++#define CODE_FOR_lasx_xvaddi_bu CODE_FOR_addv32qi3
++#define CODE_FOR_lasx_xvaddi_hu CODE_FOR_addv16hi3
++#define CODE_FOR_lasx_xvaddi_wu CODE_FOR_addv8si3
++#define CODE_FOR_lasx_xvaddi_du CODE_FOR_addv4di3
++#define CODE_FOR_lasx_xvand_v CODE_FOR_andv32qi3
++#define CODE_FOR_lasx_xvandi_b CODE_FOR_andv32qi3
++#define CODE_FOR_lasx_xvbitsel_v CODE_FOR_lasx_xvbitsel_b
++#define CODE_FOR_lasx_xvseqi_b CODE_FOR_lasx_xvseq_b
++#define CODE_FOR_lasx_xvseqi_h CODE_FOR_lasx_xvseq_h
++#define CODE_FOR_lasx_xvseqi_w CODE_FOR_lasx_xvseq_w
++#define CODE_FOR_lasx_xvseqi_d CODE_FOR_lasx_xvseq_d
++#define CODE_FOR_lasx_xvslti_b CODE_FOR_lasx_xvslt_b
++#define CODE_FOR_lasx_xvslti_h CODE_FOR_lasx_xvslt_h
++#define CODE_FOR_lasx_xvslti_w CODE_FOR_lasx_xvslt_w
++#define CODE_FOR_lasx_xvslti_d CODE_FOR_lasx_xvslt_d
++#define CODE_FOR_lasx_xvslti_bu CODE_FOR_lasx_xvslt_bu
++#define CODE_FOR_lasx_xvslti_hu CODE_FOR_lasx_xvslt_hu
++#define CODE_FOR_lasx_xvslti_wu CODE_FOR_lasx_xvslt_wu
++#define CODE_FOR_lasx_xvslti_du CODE_FOR_lasx_xvslt_du
++#define CODE_FOR_lasx_xvslei_b CODE_FOR_lasx_xvsle_b
++#define CODE_FOR_lasx_xvslei_h CODE_FOR_lasx_xvsle_h
++#define CODE_FOR_lasx_xvslei_w CODE_FOR_lasx_xvsle_w
++#define CODE_FOR_lasx_xvslei_d CODE_FOR_lasx_xvsle_d
++#define CODE_FOR_lasx_xvslei_bu CODE_FOR_lasx_xvsle_bu
++#define CODE_FOR_lasx_xvslei_hu CODE_FOR_lasx_xvsle_hu
++#define CODE_FOR_lasx_xvslei_wu CODE_FOR_lasx_xvsle_wu
++#define CODE_FOR_lasx_xvslei_du CODE_FOR_lasx_xvsle_du
++#define CODE_FOR_lasx_xvdiv_b CODE_FOR_divv32qi3
++#define CODE_FOR_lasx_xvdiv_h CODE_FOR_divv16hi3
++#define CODE_FOR_lasx_xvdiv_w CODE_FOR_divv8si3
++#define CODE_FOR_lasx_xvdiv_d CODE_FOR_divv4di3
++#define CODE_FOR_lasx_xvdiv_bu CODE_FOR_udivv32qi3
++#define CODE_FOR_lasx_xvdiv_hu CODE_FOR_udivv16hi3
++#define CODE_FOR_lasx_xvdiv_wu CODE_FOR_udivv8si3
++#define CODE_FOR_lasx_xvdiv_du CODE_FOR_udivv4di3
++#define CODE_FOR_lasx_xvfadd_s CODE_FOR_addv8sf3
++#define CODE_FOR_lasx_xvfadd_d CODE_FOR_addv4df3
++#define CODE_FOR_lasx_xvftintrz_w_s CODE_FOR_fix_truncv8sfv8si2
++#define CODE_FOR_lasx_xvftintrz_l_d CODE_FOR_fix_truncv4dfv4di2
++#define CODE_FOR_lasx_xvftintrz_wu_s CODE_FOR_fixuns_truncv8sfv8si2
++#define CODE_FOR_lasx_xvftintrz_lu_d CODE_FOR_fixuns_truncv4dfv4di2
++#define CODE_FOR_lasx_xvffint_s_w CODE_FOR_floatv8siv8sf2
++#define CODE_FOR_lasx_xvffint_d_l CODE_FOR_floatv4div4df2
++#define CODE_FOR_lasx_xvffint_s_wu CODE_FOR_floatunsv8siv8sf2
++#define CODE_FOR_lasx_xvffint_d_lu CODE_FOR_floatunsv4div4df2
++#define CODE_FOR_lasx_xvfsub_s CODE_FOR_subv8sf3
++#define CODE_FOR_lasx_xvfsub_d CODE_FOR_subv4df3
++#define CODE_FOR_lasx_xvfmul_s CODE_FOR_mulv8sf3
++#define CODE_FOR_lasx_xvfmul_d CODE_FOR_mulv4df3
++#define CODE_FOR_lasx_xvfdiv_s CODE_FOR_divv8sf3
++#define CODE_FOR_lasx_xvfdiv_d CODE_FOR_divv4df3
++#define CODE_FOR_lasx_xvfmax_s CODE_FOR_smaxv8sf3
++#define CODE_FOR_lasx_xvfmax_d CODE_FOR_smaxv4df3
++#define CODE_FOR_lasx_xvfmin_s CODE_FOR_sminv8sf3
++#define CODE_FOR_lasx_xvfmin_d CODE_FOR_sminv4df3
++#define CODE_FOR_lasx_xvfsqrt_s CODE_FOR_sqrtv8sf2
++#define CODE_FOR_lasx_xvfsqrt_d CODE_FOR_sqrtv4df2
++#define CODE_FOR_lasx_xvflogb_s CODE_FOR_logbv8sf2
++#define CODE_FOR_lasx_xvflogb_d CODE_FOR_logbv4df2
++#define CODE_FOR_lasx_xvmax_b CODE_FOR_smaxv32qi3
++#define CODE_FOR_lasx_xvmax_h CODE_FOR_smaxv16hi3
++#define CODE_FOR_lasx_xvmax_w CODE_FOR_smaxv8si3
++#define CODE_FOR_lasx_xvmax_d CODE_FOR_smaxv4di3
++#define CODE_FOR_lasx_xvmaxi_b CODE_FOR_smaxv32qi3
++#define CODE_FOR_lasx_xvmaxi_h CODE_FOR_smaxv16hi3
++#define CODE_FOR_lasx_xvmaxi_w CODE_FOR_smaxv8si3
++#define CODE_FOR_lasx_xvmaxi_d CODE_FOR_smaxv4di3
++#define CODE_FOR_lasx_xvmax_bu CODE_FOR_umaxv32qi3
++#define CODE_FOR_lasx_xvmax_hu CODE_FOR_umaxv16hi3
++#define CODE_FOR_lasx_xvmax_wu CODE_FOR_umaxv8si3
++#define CODE_FOR_lasx_xvmax_du CODE_FOR_umaxv4di3
++#define CODE_FOR_lasx_xvmaxi_bu CODE_FOR_umaxv32qi3
++#define CODE_FOR_lasx_xvmaxi_hu CODE_FOR_umaxv16hi3
++#define CODE_FOR_lasx_xvmaxi_wu CODE_FOR_umaxv8si3
++#define CODE_FOR_lasx_xvmaxi_du CODE_FOR_umaxv4di3
++#define CODE_FOR_lasx_xvmin_b CODE_FOR_sminv32qi3
++#define CODE_FOR_lasx_xvmin_h CODE_FOR_sminv16hi3
++#define CODE_FOR_lasx_xvmin_w CODE_FOR_sminv8si3
++#define CODE_FOR_lasx_xvmin_d CODE_FOR_sminv4di3
++#define CODE_FOR_lasx_xvmini_b CODE_FOR_sminv32qi3
++#define CODE_FOR_lasx_xvmini_h CODE_FOR_sminv16hi3
++#define CODE_FOR_lasx_xvmini_w CODE_FOR_sminv8si3
++#define CODE_FOR_lasx_xvmini_d CODE_FOR_sminv4di3
++#define CODE_FOR_lasx_xvmin_bu CODE_FOR_uminv32qi3
++#define CODE_FOR_lasx_xvmin_hu CODE_FOR_uminv16hi3
++#define CODE_FOR_lasx_xvmin_wu CODE_FOR_uminv8si3
++#define CODE_FOR_lasx_xvmin_du CODE_FOR_uminv4di3
++#define CODE_FOR_lasx_xvmini_bu CODE_FOR_uminv32qi3
++#define CODE_FOR_lasx_xvmini_hu CODE_FOR_uminv16hi3
++#define CODE_FOR_lasx_xvmini_wu CODE_FOR_uminv8si3
++#define CODE_FOR_lasx_xvmini_du CODE_FOR_uminv4di3
++#define CODE_FOR_lasx_xvmod_b CODE_FOR_modv32qi3
++#define CODE_FOR_lasx_xvmod_h CODE_FOR_modv16hi3
++#define CODE_FOR_lasx_xvmod_w CODE_FOR_modv8si3
++#define CODE_FOR_lasx_xvmod_d CODE_FOR_modv4di3
++#define CODE_FOR_lasx_xvmod_bu CODE_FOR_umodv32qi3
++#define CODE_FOR_lasx_xvmod_hu CODE_FOR_umodv16hi3
++#define CODE_FOR_lasx_xvmod_wu CODE_FOR_umodv8si3
++#define CODE_FOR_lasx_xvmod_du CODE_FOR_umodv4di3
++#define CODE_FOR_lasx_xvmul_b CODE_FOR_mulv32qi3
++#define CODE_FOR_lasx_xvmul_h CODE_FOR_mulv16hi3
++#define CODE_FOR_lasx_xvmul_w CODE_FOR_mulv8si3
++#define CODE_FOR_lasx_xvmul_d CODE_FOR_mulv4di3
++#define CODE_FOR_lasx_xvclz_b CODE_FOR_clzv32qi2
++#define CODE_FOR_lasx_xvclz_h CODE_FOR_clzv16hi2
++#define CODE_FOR_lasx_xvclz_w CODE_FOR_clzv8si2
++#define CODE_FOR_lasx_xvclz_d CODE_FOR_clzv4di2
++#define CODE_FOR_lasx_xvnor_v CODE_FOR_lasx_xvnor_b
++#define CODE_FOR_lasx_xvor_v CODE_FOR_iorv32qi3
++#define CODE_FOR_lasx_xvori_b CODE_FOR_iorv32qi3
++#define CODE_FOR_lasx_xvnori_b CODE_FOR_lasx_xvnor_b
++#define CODE_FOR_lasx_xvpcnt_b CODE_FOR_popcountv32qi2
++#define CODE_FOR_lasx_xvpcnt_h CODE_FOR_popcountv16hi2
++#define CODE_FOR_lasx_xvpcnt_w CODE_FOR_popcountv8si2
++#define CODE_FOR_lasx_xvpcnt_d CODE_FOR_popcountv4di2
++#define CODE_FOR_lasx_xvxor_v CODE_FOR_xorv32qi3
++#define CODE_FOR_lasx_xvxori_b CODE_FOR_xorv32qi3
++#define CODE_FOR_lasx_xvsll_b CODE_FOR_vashlv32qi3
++#define CODE_FOR_lasx_xvsll_h CODE_FOR_vashlv16hi3
++#define CODE_FOR_lasx_xvsll_w CODE_FOR_vashlv8si3
++#define CODE_FOR_lasx_xvsll_d CODE_FOR_vashlv4di3
++#define CODE_FOR_lasx_xvslli_b CODE_FOR_vashlv32qi3
++#define CODE_FOR_lasx_xvslli_h CODE_FOR_vashlv16hi3
++#define CODE_FOR_lasx_xvslli_w CODE_FOR_vashlv8si3
++#define CODE_FOR_lasx_xvslli_d CODE_FOR_vashlv4di3
++#define CODE_FOR_lasx_xvsra_b CODE_FOR_vashrv32qi3
++#define CODE_FOR_lasx_xvsra_h CODE_FOR_vashrv16hi3
++#define CODE_FOR_lasx_xvsra_w CODE_FOR_vashrv8si3
++#define CODE_FOR_lasx_xvsra_d CODE_FOR_vashrv4di3
++#define CODE_FOR_lasx_xvsrai_b CODE_FOR_vashrv32qi3
++#define CODE_FOR_lasx_xvsrai_h CODE_FOR_vashrv16hi3
++#define CODE_FOR_lasx_xvsrai_w CODE_FOR_vashrv8si3
++#define CODE_FOR_lasx_xvsrai_d CODE_FOR_vashrv4di3
++#define CODE_FOR_lasx_xvsrl_b CODE_FOR_vlshrv32qi3
++#define CODE_FOR_lasx_xvsrl_h CODE_FOR_vlshrv16hi3
++#define CODE_FOR_lasx_xvsrl_w CODE_FOR_vlshrv8si3
++#define CODE_FOR_lasx_xvsrl_d CODE_FOR_vlshrv4di3
++#define CODE_FOR_lasx_xvsrli_b CODE_FOR_vlshrv32qi3
++#define CODE_FOR_lasx_xvsrli_h CODE_FOR_vlshrv16hi3
++#define CODE_FOR_lasx_xvsrli_w CODE_FOR_vlshrv8si3
++#define CODE_FOR_lasx_xvsrli_d CODE_FOR_vlshrv4di3
++#define CODE_FOR_lasx_xvsub_b CODE_FOR_subv32qi3
++#define CODE_FOR_lasx_xvsub_h CODE_FOR_subv16hi3
++#define CODE_FOR_lasx_xvsub_w CODE_FOR_subv8si3
++#define CODE_FOR_lasx_xvsub_d CODE_FOR_subv4di3
++#define CODE_FOR_lasx_xvsubi_bu CODE_FOR_subv32qi3
++#define CODE_FOR_lasx_xvsubi_hu CODE_FOR_subv16hi3
++#define CODE_FOR_lasx_xvsubi_wu CODE_FOR_subv8si3
++#define CODE_FOR_lasx_xvsubi_du CODE_FOR_subv4di3
++#define CODE_FOR_lasx_xvpackod_d CODE_FOR_lasx_xvilvh_d
++#define CODE_FOR_lasx_xvpackev_d CODE_FOR_lasx_xvilvl_d
++#define CODE_FOR_lasx_xvpickod_d CODE_FOR_lasx_xvilvh_d
++#define CODE_FOR_lasx_xvpickev_d CODE_FOR_lasx_xvilvl_d
++#define CODE_FOR_lasx_xvrepli_b CODE_FOR_lasx_xvrepliv32qi
++#define CODE_FOR_lasx_xvrepli_h CODE_FOR_lasx_xvrepliv16hi
++#define CODE_FOR_lasx_xvrepli_w CODE_FOR_lasx_xvrepliv8si
++#define CODE_FOR_lasx_xvrepli_d CODE_FOR_lasx_xvrepliv4di
++
++#define CODE_FOR_lasx_xvandn_v CODE_FOR_xvandnv32qi3
++#define CODE_FOR_lasx_xvorn_v CODE_FOR_xvornv32qi3
++#define CODE_FOR_lasx_xvneg_b CODE_FOR_negv32qi2
++#define CODE_FOR_lasx_xvneg_h CODE_FOR_negv16hi2
++#define CODE_FOR_lasx_xvneg_w CODE_FOR_negv8si2
++#define CODE_FOR_lasx_xvneg_d CODE_FOR_negv4di2
++#define CODE_FOR_lasx_xvbsrl_v CODE_FOR_lasx_xvbsrl_b
++#define CODE_FOR_lasx_xvbsll_v CODE_FOR_lasx_xvbsll_b
++#define CODE_FOR_lasx_xvfmadd_s CODE_FOR_fmav8sf4
++#define CODE_FOR_lasx_xvfmadd_d CODE_FOR_fmav4df4
++#define CODE_FOR_lasx_xvfmsub_s CODE_FOR_fmsv8sf4
++#define CODE_FOR_lasx_xvfmsub_d CODE_FOR_fmsv4df4
++#define CODE_FOR_lasx_xvfnmadd_s CODE_FOR_xvfnmaddv8sf4_nmadd4
++#define CODE_FOR_lasx_xvfnmadd_d CODE_FOR_xvfnmaddv4df4_nmadd4
++#define CODE_FOR_lasx_xvfnmsub_s CODE_FOR_xvfnmsubv8sf4_nmsub4
++#define CODE_FOR_lasx_xvfnmsub_d CODE_FOR_xvfnmsubv4df4_nmsub4
++
++#define CODE_FOR_lasx_xvpermi_q CODE_FOR_lasx_xvpermi_q_v32qi
++#define CODE_FOR_lasx_xvpermi_d CODE_FOR_lasx_xvpermi_d_v4di
++#define CODE_FOR_lasx_xbnz_v CODE_FOR_lasx_xbnz_v_b
++#define CODE_FOR_lasx_xbz_v CODE_FOR_lasx_xbz_v_b
++
++#define CODE_FOR_lasx_xvssub_b CODE_FOR_lasx_xvssub_s_b
++#define CODE_FOR_lasx_xvssub_h CODE_FOR_lasx_xvssub_s_h
++#define CODE_FOR_lasx_xvssub_w CODE_FOR_lasx_xvssub_s_w
++#define CODE_FOR_lasx_xvssub_d CODE_FOR_lasx_xvssub_s_d
++#define CODE_FOR_lasx_xvssub_bu CODE_FOR_lasx_xvssub_u_bu
++#define CODE_FOR_lasx_xvssub_hu CODE_FOR_lasx_xvssub_u_hu
++#define CODE_FOR_lasx_xvssub_wu CODE_FOR_lasx_xvssub_u_wu
++#define CODE_FOR_lasx_xvssub_du CODE_FOR_lasx_xvssub_u_du
++#define CODE_FOR_lasx_xvabsd_b CODE_FOR_lasx_xvabsd_s_b
++#define CODE_FOR_lasx_xvabsd_h CODE_FOR_lasx_xvabsd_s_h
++#define CODE_FOR_lasx_xvabsd_w CODE_FOR_lasx_xvabsd_s_w
++#define CODE_FOR_lasx_xvabsd_d CODE_FOR_lasx_xvabsd_s_d
++#define CODE_FOR_lasx_xvabsd_bu CODE_FOR_lasx_xvabsd_u_bu
++#define CODE_FOR_lasx_xvabsd_hu CODE_FOR_lasx_xvabsd_u_hu
++#define CODE_FOR_lasx_xvabsd_wu CODE_FOR_lasx_xvabsd_u_wu
++#define CODE_FOR_lasx_xvabsd_du CODE_FOR_lasx_xvabsd_u_du
++#define CODE_FOR_lasx_xvavg_b CODE_FOR_lasx_xvavg_s_b
++#define CODE_FOR_lasx_xvavg_h CODE_FOR_lasx_xvavg_s_h
++#define CODE_FOR_lasx_xvavg_w CODE_FOR_lasx_xvavg_s_w
++#define CODE_FOR_lasx_xvavg_d CODE_FOR_lasx_xvavg_s_d
++#define CODE_FOR_lasx_xvavg_bu CODE_FOR_lasx_xvavg_u_bu
++#define CODE_FOR_lasx_xvavg_hu CODE_FOR_lasx_xvavg_u_hu
++#define CODE_FOR_lasx_xvavg_wu CODE_FOR_lasx_xvavg_u_wu
++#define CODE_FOR_lasx_xvavg_du CODE_FOR_lasx_xvavg_u_du
++#define CODE_FOR_lasx_xvavgr_b CODE_FOR_lasx_xvavgr_s_b
++#define CODE_FOR_lasx_xvavgr_h CODE_FOR_lasx_xvavgr_s_h
++#define CODE_FOR_lasx_xvavgr_w CODE_FOR_lasx_xvavgr_s_w
++#define CODE_FOR_lasx_xvavgr_d CODE_FOR_lasx_xvavgr_s_d
++#define CODE_FOR_lasx_xvavgr_bu CODE_FOR_lasx_xvavgr_u_bu
++#define CODE_FOR_lasx_xvavgr_hu CODE_FOR_lasx_xvavgr_u_hu
++#define CODE_FOR_lasx_xvavgr_wu CODE_FOR_lasx_xvavgr_u_wu
++#define CODE_FOR_lasx_xvavgr_du CODE_FOR_lasx_xvavgr_u_du
++#define CODE_FOR_lasx_xvmuh_b CODE_FOR_lasx_xvmuh_s_b
++#define CODE_FOR_lasx_xvmuh_h CODE_FOR_lasx_xvmuh_s_h
++#define CODE_FOR_lasx_xvmuh_w CODE_FOR_lasx_xvmuh_s_w
++#define CODE_FOR_lasx_xvmuh_d CODE_FOR_lasx_xvmuh_s_d
++#define CODE_FOR_lasx_xvmuh_bu CODE_FOR_lasx_xvmuh_u_bu
++#define CODE_FOR_lasx_xvmuh_hu CODE_FOR_lasx_xvmuh_u_hu
++#define CODE_FOR_lasx_xvmuh_wu CODE_FOR_lasx_xvmuh_u_wu
++#define CODE_FOR_lasx_xvmuh_du CODE_FOR_lasx_xvmuh_u_du
++#define CODE_FOR_lasx_xvssran_b_h CODE_FOR_lasx_xvssran_s_b_h
++#define CODE_FOR_lasx_xvssran_h_w CODE_FOR_lasx_xvssran_s_h_w
++#define CODE_FOR_lasx_xvssran_w_d CODE_FOR_lasx_xvssran_s_w_d
++#define CODE_FOR_lasx_xvssran_bu_h CODE_FOR_lasx_xvssran_u_bu_h
++#define CODE_FOR_lasx_xvssran_hu_w CODE_FOR_lasx_xvssran_u_hu_w
++#define CODE_FOR_lasx_xvssran_wu_d CODE_FOR_lasx_xvssran_u_wu_d
++#define CODE_FOR_lasx_xvssrarn_b_h CODE_FOR_lasx_xvssrarn_s_b_h
++#define CODE_FOR_lasx_xvssrarn_h_w CODE_FOR_lasx_xvssrarn_s_h_w
++#define CODE_FOR_lasx_xvssrarn_w_d CODE_FOR_lasx_xvssrarn_s_w_d
++#define CODE_FOR_lasx_xvssrarn_bu_h CODE_FOR_lasx_xvssrarn_u_bu_h
++#define CODE_FOR_lasx_xvssrarn_hu_w CODE_FOR_lasx_xvssrarn_u_hu_w
++#define CODE_FOR_lasx_xvssrarn_wu_d CODE_FOR_lasx_xvssrarn_u_wu_d
++#define CODE_FOR_lasx_xvssrln_bu_h CODE_FOR_lasx_xvssrln_u_bu_h
++#define CODE_FOR_lasx_xvssrln_hu_w CODE_FOR_lasx_xvssrln_u_hu_w
++#define CODE_FOR_lasx_xvssrln_wu_d CODE_FOR_lasx_xvssrln_u_wu_d
++#define CODE_FOR_lasx_xvssrlrn_bu_h CODE_FOR_lasx_xvssrlrn_u_bu_h
++#define CODE_FOR_lasx_xvssrlrn_hu_w CODE_FOR_lasx_xvssrlrn_u_hu_w
++#define CODE_FOR_lasx_xvssrlrn_wu_d CODE_FOR_lasx_xvssrlrn_u_wu_d
++#define CODE_FOR_lasx_xvftint_w_s CODE_FOR_lasx_xvftint_s_w_s
++#define CODE_FOR_lasx_xvftint_l_d CODE_FOR_lasx_xvftint_s_l_d
++#define CODE_FOR_lasx_xvftint_wu_s CODE_FOR_lasx_xvftint_u_wu_s
++#define CODE_FOR_lasx_xvftint_lu_d CODE_FOR_lasx_xvftint_u_lu_d
++#define CODE_FOR_lasx_xvsllwil_h_b CODE_FOR_lasx_xvsllwil_s_h_b
++#define CODE_FOR_lasx_xvsllwil_w_h CODE_FOR_lasx_xvsllwil_s_w_h
++#define CODE_FOR_lasx_xvsllwil_d_w CODE_FOR_lasx_xvsllwil_s_d_w
++#define CODE_FOR_lasx_xvsllwil_hu_bu CODE_FOR_lasx_xvsllwil_u_hu_bu
++#define CODE_FOR_lasx_xvsllwil_wu_hu CODE_FOR_lasx_xvsllwil_u_wu_hu
++#define CODE_FOR_lasx_xvsllwil_du_wu CODE_FOR_lasx_xvsllwil_u_du_wu
++#define CODE_FOR_lasx_xvsat_b CODE_FOR_lasx_xvsat_s_b
++#define CODE_FOR_lasx_xvsat_h CODE_FOR_lasx_xvsat_s_h
++#define CODE_FOR_lasx_xvsat_w CODE_FOR_lasx_xvsat_s_w
++#define CODE_FOR_lasx_xvsat_d CODE_FOR_lasx_xvsat_s_d
++#define CODE_FOR_lasx_xvsat_bu CODE_FOR_lasx_xvsat_u_bu
++#define CODE_FOR_lasx_xvsat_hu CODE_FOR_lasx_xvsat_u_hu
++#define CODE_FOR_lasx_xvsat_wu CODE_FOR_lasx_xvsat_u_wu
++#define CODE_FOR_lasx_xvsat_du CODE_FOR_lasx_xvsat_u_du
++
+ static const struct loongarch_builtin_description loongarch_builtins[] = {
+ #define LARCH_MOVFCSR2GR 0
+   DIRECT_BUILTIN (movfcsr2gr, LARCH_USI_FTYPE_UQI, hard_float),
+@@ -1209,7 +1511,761 @@ static const struct loongarch_builtin_description loongarch_builtins[] = {
+   LSX_BUILTIN (vshuf_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI),
+   LSX_BUILTIN (vldx, LARCH_V16QI_FTYPE_CVPOINTER_DI),
+   LSX_NO_TARGET_BUILTIN (vstx, LARCH_VOID_FTYPE_V16QI_CVPOINTER_DI),
+-  LSX_BUILTIN (vextl_qu_du, LARCH_UV2DI_FTYPE_UV2DI)
++  LSX_BUILTIN (vextl_qu_du, LARCH_UV2DI_FTYPE_UV2DI),
++
++  /* Built-in functions for LASX */
++  LASX_BUILTIN (xvsll_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvsll_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsll_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsll_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvslli_b, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvslli_h, LARCH_V16HI_FTYPE_V16HI_UQI),
++  LASX_BUILTIN (xvslli_w, LARCH_V8SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvslli_d, LARCH_V4DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvsra_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvsra_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsra_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsra_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvsrai_b, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvsrai_h, LARCH_V16HI_FTYPE_V16HI_UQI),
++  LASX_BUILTIN (xvsrai_w, LARCH_V8SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvsrai_d, LARCH_V4DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvsrar_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvsrar_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsrar_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsrar_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvsrari_b, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvsrari_h, LARCH_V16HI_FTYPE_V16HI_UQI),
++  LASX_BUILTIN (xvsrari_w, LARCH_V8SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvsrari_d, LARCH_V4DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvsrl_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvsrl_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsrl_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsrl_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvsrli_b, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvsrli_h, LARCH_V16HI_FTYPE_V16HI_UQI),
++  LASX_BUILTIN (xvsrli_w, LARCH_V8SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvsrli_d, LARCH_V4DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvsrlr_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvsrlr_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsrlr_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsrlr_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvsrlri_b, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvsrlri_h, LARCH_V16HI_FTYPE_V16HI_UQI),
++  LASX_BUILTIN (xvsrlri_w, LARCH_V8SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvsrlri_d, LARCH_V4DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvbitclr_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvbitclr_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvbitclr_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvbitclr_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvbitclri_b, LARCH_UV32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvbitclri_h, LARCH_UV16HI_FTYPE_UV16HI_UQI),
++  LASX_BUILTIN (xvbitclri_w, LARCH_UV8SI_FTYPE_UV8SI_UQI),
++  LASX_BUILTIN (xvbitclri_d, LARCH_UV4DI_FTYPE_UV4DI_UQI),
++  LASX_BUILTIN (xvbitset_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvbitset_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvbitset_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvbitset_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvbitseti_b, LARCH_UV32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvbitseti_h, LARCH_UV16HI_FTYPE_UV16HI_UQI),
++  LASX_BUILTIN (xvbitseti_w, LARCH_UV8SI_FTYPE_UV8SI_UQI),
++  LASX_BUILTIN (xvbitseti_d, LARCH_UV4DI_FTYPE_UV4DI_UQI),
++  LASX_BUILTIN (xvbitrev_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvbitrev_h, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvbitrev_w, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvbitrev_d, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvbitrevi_b, LARCH_UV32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvbitrevi_h, LARCH_UV16HI_FTYPE_UV16HI_UQI),
++  LASX_BUILTIN (xvbitrevi_w, LARCH_UV8SI_FTYPE_UV8SI_UQI),
++  LASX_BUILTIN (xvbitrevi_d, LARCH_UV4DI_FTYPE_UV4DI_UQI),
++  LASX_BUILTIN (xvadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvaddi_bu, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvaddi_hu, LARCH_V16HI_FTYPE_V16HI_UQI),
++  LASX_BUILTIN (xvaddi_wu, LARCH_V8SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvaddi_du, LARCH_V4DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvsub_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvsub_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsub_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsub_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvsubi_bu, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvsubi_hu, LARCH_V16HI_FTYPE_V16HI_UQI),
++  LASX_BUILTIN (xvsubi_wu, LARCH_V8SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvsubi_du, LARCH_V4DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvmax_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvmax_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvmax_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvmax_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvmaxi_b, LARCH_V32QI_FTYPE_V32QI_QI),
++  LASX_BUILTIN (xvmaxi_h, LARCH_V16HI_FTYPE_V16HI_QI),
++  LASX_BUILTIN (xvmaxi_w, LARCH_V8SI_FTYPE_V8SI_QI),
++  LASX_BUILTIN (xvmaxi_d, LARCH_V4DI_FTYPE_V4DI_QI),
++  LASX_BUILTIN (xvmax_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvmax_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvmax_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvmax_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvmaxi_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvmaxi_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI),
++  LASX_BUILTIN (xvmaxi_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI),
++  LASX_BUILTIN (xvmaxi_du, LARCH_UV4DI_FTYPE_UV4DI_UQI),
++  LASX_BUILTIN (xvmin_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvmin_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvmin_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvmin_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvmini_b, LARCH_V32QI_FTYPE_V32QI_QI),
++  LASX_BUILTIN (xvmini_h, LARCH_V16HI_FTYPE_V16HI_QI),
++  LASX_BUILTIN (xvmini_w, LARCH_V8SI_FTYPE_V8SI_QI),
++  LASX_BUILTIN (xvmini_d, LARCH_V4DI_FTYPE_V4DI_QI),
++  LASX_BUILTIN (xvmin_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvmin_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvmin_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvmin_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvmini_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvmini_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI),
++  LASX_BUILTIN (xvmini_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI),
++  LASX_BUILTIN (xvmini_du, LARCH_UV4DI_FTYPE_UV4DI_UQI),
++  LASX_BUILTIN (xvseq_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvseq_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvseq_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvseq_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvseqi_b, LARCH_V32QI_FTYPE_V32QI_QI),
++  LASX_BUILTIN (xvseqi_h, LARCH_V16HI_FTYPE_V16HI_QI),
++  LASX_BUILTIN (xvseqi_w, LARCH_V8SI_FTYPE_V8SI_QI),
++  LASX_BUILTIN (xvseqi_d, LARCH_V4DI_FTYPE_V4DI_QI),
++  LASX_BUILTIN (xvslt_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvslt_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvslt_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvslt_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvslti_b, LARCH_V32QI_FTYPE_V32QI_QI),
++  LASX_BUILTIN (xvslti_h, LARCH_V16HI_FTYPE_V16HI_QI),
++  LASX_BUILTIN (xvslti_w, LARCH_V8SI_FTYPE_V8SI_QI),
++  LASX_BUILTIN (xvslti_d, LARCH_V4DI_FTYPE_V4DI_QI),
++  LASX_BUILTIN (xvslt_bu, LARCH_V32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvslt_hu, LARCH_V16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvslt_wu, LARCH_V8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvslt_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvslti_bu, LARCH_V32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvslti_hu, LARCH_V16HI_FTYPE_UV16HI_UQI),
++  LASX_BUILTIN (xvslti_wu, LARCH_V8SI_FTYPE_UV8SI_UQI),
++  LASX_BUILTIN (xvslti_du, LARCH_V4DI_FTYPE_UV4DI_UQI),
++  LASX_BUILTIN (xvsle_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvsle_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsle_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsle_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvslei_b, LARCH_V32QI_FTYPE_V32QI_QI),
++  LASX_BUILTIN (xvslei_h, LARCH_V16HI_FTYPE_V16HI_QI),
++  LASX_BUILTIN (xvslei_w, LARCH_V8SI_FTYPE_V8SI_QI),
++  LASX_BUILTIN (xvslei_d, LARCH_V4DI_FTYPE_V4DI_QI),
++  LASX_BUILTIN (xvsle_bu, LARCH_V32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvsle_hu, LARCH_V16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvsle_wu, LARCH_V8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvsle_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvslei_bu, LARCH_V32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvslei_hu, LARCH_V16HI_FTYPE_UV16HI_UQI),
++  LASX_BUILTIN (xvslei_wu, LARCH_V8SI_FTYPE_UV8SI_UQI),
++  LASX_BUILTIN (xvslei_du, LARCH_V4DI_FTYPE_UV4DI_UQI),
++
++  LASX_BUILTIN (xvsat_b, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvsat_h, LARCH_V16HI_FTYPE_V16HI_UQI),
++  LASX_BUILTIN (xvsat_w, LARCH_V8SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvsat_d, LARCH_V4DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvsat_bu, LARCH_UV32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvsat_hu, LARCH_UV16HI_FTYPE_UV16HI_UQI),
++  LASX_BUILTIN (xvsat_wu, LARCH_UV8SI_FTYPE_UV8SI_UQI),
++  LASX_BUILTIN (xvsat_du, LARCH_UV4DI_FTYPE_UV4DI_UQI),
++
++  LASX_BUILTIN (xvadda_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvadda_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvadda_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvadda_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvsadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvsadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvsadd_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvsadd_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvsadd_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvsadd_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++
++  LASX_BUILTIN (xvavg_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvavg_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvavg_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvavg_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvavg_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvavg_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvavg_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvavg_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++
++  LASX_BUILTIN (xvavgr_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvavgr_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvavgr_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvavgr_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvavgr_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvavgr_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvavgr_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvavgr_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++
++  LASX_BUILTIN (xvssub_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvssub_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvssub_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvssub_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvssub_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvssub_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvssub_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvssub_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvabsd_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvabsd_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvabsd_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvabsd_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvabsd_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvabsd_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvabsd_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvabsd_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++
++  LASX_BUILTIN (xvmul_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvmul_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvmul_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvmul_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvmadd_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI),
++  LASX_BUILTIN (xvmadd_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI),
++  LASX_BUILTIN (xvmadd_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI),
++  LASX_BUILTIN (xvmadd_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI),
++  LASX_BUILTIN (xvmsub_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI),
++  LASX_BUILTIN (xvmsub_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI),
++  LASX_BUILTIN (xvmsub_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI),
++  LASX_BUILTIN (xvmsub_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI),
++  LASX_BUILTIN (xvdiv_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvdiv_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvdiv_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvdiv_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvdiv_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvdiv_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvdiv_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvdiv_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvhaddw_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvhaddw_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvhaddw_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvhaddw_hu_bu, LARCH_UV16HI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvhaddw_wu_hu, LARCH_UV8SI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvhaddw_du_wu, LARCH_UV4DI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvhsubw_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvhsubw_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvhsubw_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvhsubw_hu_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvhsubw_wu_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvhsubw_du_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvmod_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvmod_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvmod_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvmod_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvmod_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvmod_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvmod_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvmod_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++
++  LASX_BUILTIN (xvrepl128vei_b, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvrepl128vei_h, LARCH_V16HI_FTYPE_V16HI_UQI),
++  LASX_BUILTIN (xvrepl128vei_w, LARCH_V8SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvrepl128vei_d, LARCH_V4DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvpickev_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvpickev_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvpickev_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvpickev_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvpickod_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvpickod_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvpickod_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvpickod_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvilvh_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvilvh_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvilvh_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvilvh_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvilvl_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvilvl_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvilvl_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvilvl_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvpackev_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvpackev_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvpackev_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvpackev_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvpackod_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvpackod_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvpackod_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvpackod_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvshuf_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI),
++  LASX_BUILTIN (xvshuf_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI),
++  LASX_BUILTIN (xvshuf_w, LARCH_V8SI_FTYPE_V8SI_V8SI_V8SI),
++  LASX_BUILTIN (xvshuf_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI),
++  LASX_BUILTIN (xvand_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvandi_b, LARCH_UV32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvnor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvnori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvxor_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvxori_b, LARCH_UV32QI_FTYPE_UV32QI_UQI),
++  LASX_BUILTIN (xvbitsel_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI_UV32QI),
++  LASX_BUILTIN (xvbitseli_b, LARCH_UV32QI_FTYPE_UV32QI_UV32QI_USI),
++
++  LASX_BUILTIN (xvshuf4i_b, LARCH_V32QI_FTYPE_V32QI_USI),
++  LASX_BUILTIN (xvshuf4i_h, LARCH_V16HI_FTYPE_V16HI_USI),
++  LASX_BUILTIN (xvshuf4i_w, LARCH_V8SI_FTYPE_V8SI_USI),
++
++  LASX_BUILTIN (xvreplgr2vr_b, LARCH_V32QI_FTYPE_SI),
++  LASX_BUILTIN (xvreplgr2vr_h, LARCH_V16HI_FTYPE_SI),
++  LASX_BUILTIN (xvreplgr2vr_w, LARCH_V8SI_FTYPE_SI),
++  LASX_BUILTIN (xvreplgr2vr_d, LARCH_V4DI_FTYPE_DI),
++  LASX_BUILTIN (xvpcnt_b, LARCH_V32QI_FTYPE_V32QI),
++  LASX_BUILTIN (xvpcnt_h, LARCH_V16HI_FTYPE_V16HI),
++  LASX_BUILTIN (xvpcnt_w, LARCH_V8SI_FTYPE_V8SI),
++  LASX_BUILTIN (xvpcnt_d, LARCH_V4DI_FTYPE_V4DI),
++  LASX_BUILTIN (xvclo_b, LARCH_V32QI_FTYPE_V32QI),
++  LASX_BUILTIN (xvclo_h, LARCH_V16HI_FTYPE_V16HI),
++  LASX_BUILTIN (xvclo_w, LARCH_V8SI_FTYPE_V8SI),
++  LASX_BUILTIN (xvclo_d, LARCH_V4DI_FTYPE_V4DI),
++  LASX_BUILTIN (xvclz_b, LARCH_V32QI_FTYPE_V32QI),
++  LASX_BUILTIN (xvclz_h, LARCH_V16HI_FTYPE_V16HI),
++  LASX_BUILTIN (xvclz_w, LARCH_V8SI_FTYPE_V8SI),
++  LASX_BUILTIN (xvclz_d, LARCH_V4DI_FTYPE_V4DI),
++
++  LASX_BUILTIN (xvrepli_b, LARCH_V32QI_FTYPE_HI),
++  LASX_BUILTIN (xvrepli_h, LARCH_V16HI_FTYPE_HI),
++  LASX_BUILTIN (xvrepli_w, LARCH_V8SI_FTYPE_HI),
++  LASX_BUILTIN (xvrepli_d, LARCH_V4DI_FTYPE_HI),
++  LASX_BUILTIN (xvfcmp_caf_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_caf_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_cor_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_cor_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_cun_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_cun_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_cune_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_cune_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_cueq_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_cueq_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_ceq_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_ceq_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_cne_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_cne_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_clt_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_clt_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_cult_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_cult_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_cle_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_cle_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_cule_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_cule_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_saf_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_saf_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_sor_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_sor_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_sun_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_sun_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_sune_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_sune_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_sueq_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_sueq_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_seq_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_seq_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_sne_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_sne_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_slt_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_slt_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_sult_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_sult_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_sle_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_sle_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcmp_sule_s, LARCH_V8SI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcmp_sule_d, LARCH_V4DI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfmul_s, LARCH_V8SF_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfmul_d, LARCH_V4DF_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfdiv_s, LARCH_V8SF_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfdiv_d, LARCH_V4DF_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfcvt_h_s, LARCH_V16HI_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfcvt_s_d, LARCH_V8SF_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfmin_s, LARCH_V8SF_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfmin_d, LARCH_V4DF_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfmina_s, LARCH_V8SF_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfmina_d, LARCH_V4DF_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfmax_s, LARCH_V8SF_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfmax_d, LARCH_V4DF_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfmaxa_s, LARCH_V8SF_FTYPE_V8SF_V8SF),
++  LASX_BUILTIN (xvfmaxa_d, LARCH_V4DF_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvfclass_s, LARCH_V8SI_FTYPE_V8SF),
++  LASX_BUILTIN (xvfclass_d, LARCH_V4DI_FTYPE_V4DF),
++  LASX_BUILTIN (xvfsqrt_s, LARCH_V8SF_FTYPE_V8SF),
++  LASX_BUILTIN (xvfsqrt_d, LARCH_V4DF_FTYPE_V4DF),
++  LASX_BUILTIN (xvfrecip_s, LARCH_V8SF_FTYPE_V8SF),
++  LASX_BUILTIN (xvfrecip_d, LARCH_V4DF_FTYPE_V4DF),
++  LASX_BUILTIN (xvfrint_s, LARCH_V8SF_FTYPE_V8SF),
++  LASX_BUILTIN (xvfrint_d, LARCH_V4DF_FTYPE_V4DF),
++  LASX_BUILTIN (xvfrsqrt_s, LARCH_V8SF_FTYPE_V8SF),
++  LASX_BUILTIN (xvfrsqrt_d, LARCH_V4DF_FTYPE_V4DF),
++  LASX_BUILTIN (xvflogb_s, LARCH_V8SF_FTYPE_V8SF),
++  LASX_BUILTIN (xvflogb_d, LARCH_V4DF_FTYPE_V4DF),
++  LASX_BUILTIN (xvfcvth_s_h, LARCH_V8SF_FTYPE_V16HI),
++  LASX_BUILTIN (xvfcvth_d_s, LARCH_V4DF_FTYPE_V8SF),
++  LASX_BUILTIN (xvfcvtl_s_h, LARCH_V8SF_FTYPE_V16HI),
++  LASX_BUILTIN (xvfcvtl_d_s, LARCH_V4DF_FTYPE_V8SF),
++  LASX_BUILTIN (xvftint_w_s, LARCH_V8SI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftint_l_d, LARCH_V4DI_FTYPE_V4DF),
++  LASX_BUILTIN (xvftint_wu_s, LARCH_UV8SI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftint_lu_d, LARCH_UV4DI_FTYPE_V4DF),
++  LASX_BUILTIN (xvftintrz_w_s, LARCH_V8SI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrz_l_d, LARCH_V4DI_FTYPE_V4DF),
++  LASX_BUILTIN (xvftintrz_wu_s, LARCH_UV8SI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrz_lu_d, LARCH_UV4DI_FTYPE_V4DF),
++  LASX_BUILTIN (xvffint_s_w, LARCH_V8SF_FTYPE_V8SI),
++  LASX_BUILTIN (xvffint_d_l, LARCH_V4DF_FTYPE_V4DI),
++  LASX_BUILTIN (xvffint_s_wu, LARCH_V8SF_FTYPE_UV8SI),
++  LASX_BUILTIN (xvffint_d_lu, LARCH_V4DF_FTYPE_UV4DI),
++
++  LASX_BUILTIN (xvreplve_b, LARCH_V32QI_FTYPE_V32QI_SI),
++  LASX_BUILTIN (xvreplve_h, LARCH_V16HI_FTYPE_V16HI_SI),
++  LASX_BUILTIN (xvreplve_w, LARCH_V8SI_FTYPE_V8SI_SI),
++  LASX_BUILTIN (xvreplve_d, LARCH_V4DI_FTYPE_V4DI_SI),
++  LASX_BUILTIN (xvpermi_w, LARCH_V8SI_FTYPE_V8SI_V8SI_USI),
++
++  LASX_BUILTIN (xvandn_v, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvneg_b, LARCH_V32QI_FTYPE_V32QI),
++  LASX_BUILTIN (xvneg_h, LARCH_V16HI_FTYPE_V16HI),
++  LASX_BUILTIN (xvneg_w, LARCH_V8SI_FTYPE_V8SI),
++  LASX_BUILTIN (xvneg_d, LARCH_V4DI_FTYPE_V4DI),
++  LASX_BUILTIN (xvmuh_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvmuh_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvmuh_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvmuh_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvmuh_bu, LARCH_UV32QI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvmuh_hu, LARCH_UV16HI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvmuh_wu, LARCH_UV8SI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvmuh_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvsllwil_h_b, LARCH_V16HI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvsllwil_w_h, LARCH_V8SI_FTYPE_V16HI_UQI),
++  LASX_BUILTIN (xvsllwil_d_w, LARCH_V4DI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvsllwil_hu_bu, LARCH_UV16HI_FTYPE_UV32QI_UQI), /* FIXME: U? */
++  LASX_BUILTIN (xvsllwil_wu_hu, LARCH_UV8SI_FTYPE_UV16HI_UQI),
++  LASX_BUILTIN (xvsllwil_du_wu, LARCH_UV4DI_FTYPE_UV8SI_UQI),
++  LASX_BUILTIN (xvsran_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsran_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsran_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvssran_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvssran_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvssran_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvssran_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvssran_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvssran_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvsrarn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsrarn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsrarn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvssrarn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvssrarn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvssrarn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvssrarn_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvssrarn_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvssrarn_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvsrln_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsrln_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsrln_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvssrln_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvssrln_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvssrln_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvsrlrn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsrlrn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsrlrn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvssrlrn_bu_h, LARCH_UV32QI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvssrlrn_hu_w, LARCH_UV16HI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvssrlrn_wu_d, LARCH_UV8SI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvfrstpi_b, LARCH_V32QI_FTYPE_V32QI_V32QI_UQI),
++  LASX_BUILTIN (xvfrstpi_h, LARCH_V16HI_FTYPE_V16HI_V16HI_UQI),
++  LASX_BUILTIN (xvfrstp_b, LARCH_V32QI_FTYPE_V32QI_V32QI_V32QI),
++  LASX_BUILTIN (xvfrstp_h, LARCH_V16HI_FTYPE_V16HI_V16HI_V16HI),
++  LASX_BUILTIN (xvshuf4i_d, LARCH_V4DI_FTYPE_V4DI_V4DI_USI),
++  LASX_BUILTIN (xvbsrl_v, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvbsll_v, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvextrins_b, LARCH_V32QI_FTYPE_V32QI_V32QI_USI),
++  LASX_BUILTIN (xvextrins_h, LARCH_V16HI_FTYPE_V16HI_V16HI_USI),
++  LASX_BUILTIN (xvextrins_w, LARCH_V8SI_FTYPE_V8SI_V8SI_USI),
++  LASX_BUILTIN (xvextrins_d, LARCH_V4DI_FTYPE_V4DI_V4DI_USI),
++  LASX_BUILTIN (xvmskltz_b, LARCH_V32QI_FTYPE_V32QI),
++  LASX_BUILTIN (xvmskltz_h, LARCH_V16HI_FTYPE_V16HI),
++  LASX_BUILTIN (xvmskltz_w, LARCH_V8SI_FTYPE_V8SI),
++  LASX_BUILTIN (xvmskltz_d, LARCH_V4DI_FTYPE_V4DI),
++  LASX_BUILTIN (xvsigncov_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvsigncov_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsigncov_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsigncov_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvfmadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF),
++  LASX_BUILTIN (xvfmadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF),
++  LASX_BUILTIN (xvfmsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF),
++  LASX_BUILTIN (xvfmsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF),
++  LASX_BUILTIN (xvfnmadd_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF),
++  LASX_BUILTIN (xvfnmadd_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF),
++  LASX_BUILTIN (xvfnmsub_s, LARCH_V8SF_FTYPE_V8SF_V8SF_V8SF),
++  LASX_BUILTIN (xvfnmsub_d, LARCH_V4DF_FTYPE_V4DF_V4DF_V4DF),
++  LASX_BUILTIN (xvftintrne_w_s, LARCH_V8SI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrne_l_d, LARCH_V4DI_FTYPE_V4DF),
++  LASX_BUILTIN (xvftintrp_w_s, LARCH_V8SI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrp_l_d, LARCH_V4DI_FTYPE_V4DF),
++  LASX_BUILTIN (xvftintrm_w_s, LARCH_V8SI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrm_l_d, LARCH_V4DI_FTYPE_V4DF),
++  LASX_BUILTIN (xvftint_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvffint_s_l, LARCH_V8SF_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvftintrz_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvftintrp_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvftintrm_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvftintrne_w_d, LARCH_V8SI_FTYPE_V4DF_V4DF),
++  LASX_BUILTIN (xvftinth_l_s, LARCH_V4DI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintl_l_s, LARCH_V4DI_FTYPE_V8SF),
++  LASX_BUILTIN (xvffinth_d_w, LARCH_V4DF_FTYPE_V8SI),
++  LASX_BUILTIN (xvffintl_d_w, LARCH_V4DF_FTYPE_V8SI),
++  LASX_BUILTIN (xvftintrzh_l_s, LARCH_V4DI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrzl_l_s, LARCH_V4DI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrph_l_s, LARCH_V4DI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrpl_l_s, LARCH_V4DI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrmh_l_s, LARCH_V4DI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrml_l_s, LARCH_V4DI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrneh_l_s, LARCH_V4DI_FTYPE_V8SF),
++  LASX_BUILTIN (xvftintrnel_l_s, LARCH_V4DI_FTYPE_V8SF),
++  LASX_BUILTIN (xvfrintrne_s, LARCH_V8SF_FTYPE_V8SF),
++  LASX_BUILTIN (xvfrintrne_d, LARCH_V4DF_FTYPE_V4DF),
++  LASX_BUILTIN (xvfrintrz_s, LARCH_V8SF_FTYPE_V8SF),
++  LASX_BUILTIN (xvfrintrz_d, LARCH_V4DF_FTYPE_V4DF),
++  LASX_BUILTIN (xvfrintrp_s, LARCH_V8SF_FTYPE_V8SF),
++  LASX_BUILTIN (xvfrintrp_d, LARCH_V4DF_FTYPE_V4DF),
++  LASX_BUILTIN (xvfrintrm_s, LARCH_V8SF_FTYPE_V8SF),
++  LASX_BUILTIN (xvfrintrm_d, LARCH_V4DF_FTYPE_V4DF),
++  LASX_BUILTIN (xvld, LARCH_V32QI_FTYPE_CVPOINTER_SI),
++  LASX_NO_TARGET_BUILTIN (xvst, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI),
++  LASX_NO_TARGET_BUILTIN (xvstelm_b, LARCH_VOID_FTYPE_V32QI_CVPOINTER_SI_UQI),
++  LASX_NO_TARGET_BUILTIN (xvstelm_h, LARCH_VOID_FTYPE_V16HI_CVPOINTER_SI_UQI),
++  LASX_NO_TARGET_BUILTIN (xvstelm_w, LARCH_VOID_FTYPE_V8SI_CVPOINTER_SI_UQI),
++  LASX_NO_TARGET_BUILTIN (xvstelm_d, LARCH_VOID_FTYPE_V4DI_CVPOINTER_SI_UQI),
++  LASX_BUILTIN (xvinsve0_w, LARCH_V8SI_FTYPE_V8SI_V8SI_UQI),
++  LASX_BUILTIN (xvinsve0_d, LARCH_V4DI_FTYPE_V4DI_V4DI_UQI),
++  LASX_BUILTIN (xvpickve_w, LARCH_V8SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvpickve_d, LARCH_V4DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvpickve_w_f, LARCH_V8SF_FTYPE_V8SF_UQI),
++  LASX_BUILTIN (xvpickve_d_f, LARCH_V4DF_FTYPE_V4DF_UQI),
++  LASX_BUILTIN (xvssrlrn_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvssrlrn_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvssrlrn_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvssrln_b_h, LARCH_V32QI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvssrln_h_w, LARCH_V16HI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvssrln_w_d, LARCH_V8SI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvorn_v, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvldi, LARCH_V4DI_FTYPE_HI),
++  LASX_BUILTIN (xvldx, LARCH_V32QI_FTYPE_CVPOINTER_DI),
++  LASX_NO_TARGET_BUILTIN (xvstx, LARCH_VOID_FTYPE_V32QI_CVPOINTER_DI),
++  LASX_BUILTIN (xvextl_qu_du, LARCH_UV4DI_FTYPE_UV4DI),
++
++  /* LASX */
++  LASX_BUILTIN (xvinsgr2vr_w, LARCH_V8SI_FTYPE_V8SI_SI_UQI),
++  LASX_BUILTIN (xvinsgr2vr_d, LARCH_V4DI_FTYPE_V4DI_DI_UQI),
++
++  LASX_BUILTIN (xvreplve0_b, LARCH_V32QI_FTYPE_V32QI),
++  LASX_BUILTIN (xvreplve0_h, LARCH_V16HI_FTYPE_V16HI),
++  LASX_BUILTIN (xvreplve0_w, LARCH_V8SI_FTYPE_V8SI),
++  LASX_BUILTIN (xvreplve0_d, LARCH_V4DI_FTYPE_V4DI),
++  LASX_BUILTIN (xvreplve0_q, LARCH_V32QI_FTYPE_V32QI),
++  LASX_BUILTIN (vext2xv_h_b, LARCH_V16HI_FTYPE_V32QI),
++  LASX_BUILTIN (vext2xv_w_h, LARCH_V8SI_FTYPE_V16HI),
++  LASX_BUILTIN (vext2xv_d_w, LARCH_V4DI_FTYPE_V8SI),
++  LASX_BUILTIN (vext2xv_w_b, LARCH_V8SI_FTYPE_V32QI),
++  LASX_BUILTIN (vext2xv_d_h, LARCH_V4DI_FTYPE_V16HI),
++  LASX_BUILTIN (vext2xv_d_b, LARCH_V4DI_FTYPE_V32QI),
++  LASX_BUILTIN (vext2xv_hu_bu, LARCH_V16HI_FTYPE_V32QI),
++  LASX_BUILTIN (vext2xv_wu_hu, LARCH_V8SI_FTYPE_V16HI),
++  LASX_BUILTIN (vext2xv_du_wu, LARCH_V4DI_FTYPE_V8SI),
++  LASX_BUILTIN (vext2xv_wu_bu, LARCH_V8SI_FTYPE_V32QI),
++  LASX_BUILTIN (vext2xv_du_hu, LARCH_V4DI_FTYPE_V16HI),
++  LASX_BUILTIN (vext2xv_du_bu, LARCH_V4DI_FTYPE_V32QI),
++  LASX_BUILTIN (xvpermi_q, LARCH_V32QI_FTYPE_V32QI_V32QI_USI),
++  LASX_BUILTIN (xvpermi_d, LARCH_V4DI_FTYPE_V4DI_USI),
++  LASX_BUILTIN (xvperm_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN_TEST_BRANCH (xbz_b, LARCH_SI_FTYPE_UV32QI),
++  LASX_BUILTIN_TEST_BRANCH (xbz_h, LARCH_SI_FTYPE_UV16HI),
++  LASX_BUILTIN_TEST_BRANCH (xbz_w, LARCH_SI_FTYPE_UV8SI),
++  LASX_BUILTIN_TEST_BRANCH (xbz_d, LARCH_SI_FTYPE_UV4DI),
++  LASX_BUILTIN_TEST_BRANCH (xbnz_b, LARCH_SI_FTYPE_UV32QI),
++  LASX_BUILTIN_TEST_BRANCH (xbnz_h, LARCH_SI_FTYPE_UV16HI),
++  LASX_BUILTIN_TEST_BRANCH (xbnz_w, LARCH_SI_FTYPE_UV8SI),
++  LASX_BUILTIN_TEST_BRANCH (xbnz_d, LARCH_SI_FTYPE_UV4DI),
++  LASX_BUILTIN_TEST_BRANCH (xbz_v, LARCH_SI_FTYPE_UV32QI),
++  LASX_BUILTIN_TEST_BRANCH (xbnz_v, LARCH_SI_FTYPE_UV32QI),
++  LASX_BUILTIN (xvldrepl_b, LARCH_V32QI_FTYPE_CVPOINTER_SI),
++  LASX_BUILTIN (xvldrepl_h, LARCH_V16HI_FTYPE_CVPOINTER_SI),
++  LASX_BUILTIN (xvldrepl_w, LARCH_V8SI_FTYPE_CVPOINTER_SI),
++  LASX_BUILTIN (xvldrepl_d, LARCH_V4DI_FTYPE_CVPOINTER_SI),
++  LASX_BUILTIN (xvpickve2gr_w, LARCH_SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvpickve2gr_wu, LARCH_USI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvpickve2gr_d, LARCH_DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvpickve2gr_du, LARCH_UDI_FTYPE_V4DI_UQI),
++
++  LASX_BUILTIN (xvaddwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvaddwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvaddwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvaddwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvaddwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvaddwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvaddwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvaddwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvsubwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvsubwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsubwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsubwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvsubwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvsubwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvsubwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvsubwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvmulwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvmulwev_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvmulwev_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvmulwev_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvmulwev_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvmulwev_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvmulwev_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvmulwev_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvaddwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvaddwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvaddwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvaddwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvaddwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvaddwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvaddwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvaddwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvsubwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvsubwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvsubwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvsubwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvsubwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvsubwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvsubwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvsubwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvmulwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvmulwod_d_w, LARCH_V4DI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvmulwod_w_h, LARCH_V8SI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvmulwod_h_b, LARCH_V16HI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvmulwod_q_du, LARCH_V4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvmulwod_d_wu, LARCH_V4DI_FTYPE_UV8SI_UV8SI),
++  LASX_BUILTIN (xvmulwod_w_hu, LARCH_V8SI_FTYPE_UV16HI_UV16HI),
++  LASX_BUILTIN (xvmulwod_h_bu, LARCH_V16HI_FTYPE_UV32QI_UV32QI),
++  LASX_BUILTIN (xvaddwev_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI),
++  LASX_BUILTIN (xvaddwev_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI),
++  LASX_BUILTIN (xvaddwev_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI),
++  LASX_BUILTIN (xvmulwev_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI),
++  LASX_BUILTIN (xvmulwev_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI),
++  LASX_BUILTIN (xvmulwev_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI),
++  LASX_BUILTIN (xvaddwod_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI),
++  LASX_BUILTIN (xvaddwod_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI),
++  LASX_BUILTIN (xvaddwod_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI),
++  LASX_BUILTIN (xvmulwod_d_wu_w, LARCH_V4DI_FTYPE_UV8SI_V8SI),
++  LASX_BUILTIN (xvmulwod_w_hu_h, LARCH_V8SI_FTYPE_UV16HI_V16HI),
++  LASX_BUILTIN (xvmulwod_h_bu_b, LARCH_V16HI_FTYPE_UV32QI_V32QI),
++  LASX_BUILTIN (xvhaddw_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvhaddw_qu_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvhsubw_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvhsubw_qu_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI),
++  LASX_BUILTIN (xvmaddwev_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI),
++  LASX_BUILTIN (xvmaddwev_d_w, LARCH_V4DI_FTYPE_V4DI_V8SI_V8SI),
++  LASX_BUILTIN (xvmaddwev_w_h, LARCH_V8SI_FTYPE_V8SI_V16HI_V16HI),
++  LASX_BUILTIN (xvmaddwev_h_b, LARCH_V16HI_FTYPE_V16HI_V32QI_V32QI),
++  LASX_BUILTIN (xvmaddwev_q_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI_UV4DI),
++  LASX_BUILTIN (xvmaddwev_d_wu, LARCH_UV4DI_FTYPE_UV4DI_UV8SI_UV8SI),
++  LASX_BUILTIN (xvmaddwev_w_hu, LARCH_UV8SI_FTYPE_UV8SI_UV16HI_UV16HI),
++  LASX_BUILTIN (xvmaddwev_h_bu, LARCH_UV16HI_FTYPE_UV16HI_UV32QI_UV32QI),
++  LASX_BUILTIN (xvmaddwod_q_d, LARCH_V4DI_FTYPE_V4DI_V4DI_V4DI),
++  LASX_BUILTIN (xvmaddwod_d_w, LARCH_V4DI_FTYPE_V4DI_V8SI_V8SI),
++  LASX_BUILTIN (xvmaddwod_w_h, LARCH_V8SI_FTYPE_V8SI_V16HI_V16HI),
++  LASX_BUILTIN (xvmaddwod_h_b, LARCH_V16HI_FTYPE_V16HI_V32QI_V32QI),
++  LASX_BUILTIN (xvmaddwod_q_du, LARCH_UV4DI_FTYPE_UV4DI_UV4DI_UV4DI),
++  LASX_BUILTIN (xvmaddwod_d_wu, LARCH_UV4DI_FTYPE_UV4DI_UV8SI_UV8SI),
++  LASX_BUILTIN (xvmaddwod_w_hu, LARCH_UV8SI_FTYPE_UV8SI_UV16HI_UV16HI),
++  LASX_BUILTIN (xvmaddwod_h_bu, LARCH_UV16HI_FTYPE_UV16HI_UV32QI_UV32QI),
++  LASX_BUILTIN (xvmaddwev_q_du_d, LARCH_V4DI_FTYPE_V4DI_UV4DI_V4DI),
++  LASX_BUILTIN (xvmaddwev_d_wu_w, LARCH_V4DI_FTYPE_V4DI_UV8SI_V8SI),
++  LASX_BUILTIN (xvmaddwev_w_hu_h, LARCH_V8SI_FTYPE_V8SI_UV16HI_V16HI),
++  LASX_BUILTIN (xvmaddwev_h_bu_b, LARCH_V16HI_FTYPE_V16HI_UV32QI_V32QI),
++  LASX_BUILTIN (xvmaddwod_q_du_d, LARCH_V4DI_FTYPE_V4DI_UV4DI_V4DI),
++  LASX_BUILTIN (xvmaddwod_d_wu_w, LARCH_V4DI_FTYPE_V4DI_UV8SI_V8SI),
++  LASX_BUILTIN (xvmaddwod_w_hu_h, LARCH_V8SI_FTYPE_V8SI_UV16HI_V16HI),
++  LASX_BUILTIN (xvmaddwod_h_bu_b, LARCH_V16HI_FTYPE_V16HI_UV32QI_V32QI),
++  LASX_BUILTIN (xvrotr_b, LARCH_V32QI_FTYPE_V32QI_V32QI),
++  LASX_BUILTIN (xvrotr_h, LARCH_V16HI_FTYPE_V16HI_V16HI),
++  LASX_BUILTIN (xvrotr_w, LARCH_V8SI_FTYPE_V8SI_V8SI),
++  LASX_BUILTIN (xvrotr_d, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvadd_q, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvsub_q, LARCH_V4DI_FTYPE_V4DI_V4DI),
++  LASX_BUILTIN (xvaddwev_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI),
++  LASX_BUILTIN (xvaddwod_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI),
++  LASX_BUILTIN (xvmulwev_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI),
++  LASX_BUILTIN (xvmulwod_q_du_d, LARCH_V4DI_FTYPE_UV4DI_V4DI),
++  LASX_BUILTIN (xvmskgez_b, LARCH_V32QI_FTYPE_V32QI),
++  LASX_BUILTIN (xvmsknz_b, LARCH_V32QI_FTYPE_V32QI),
++  LASX_BUILTIN (xvexth_h_b, LARCH_V16HI_FTYPE_V32QI),
++  LASX_BUILTIN (xvexth_w_h, LARCH_V8SI_FTYPE_V16HI),
++  LASX_BUILTIN (xvexth_d_w, LARCH_V4DI_FTYPE_V8SI),
++  LASX_BUILTIN (xvexth_q_d, LARCH_V4DI_FTYPE_V4DI),
++  LASX_BUILTIN (xvexth_hu_bu, LARCH_UV16HI_FTYPE_UV32QI),
++  LASX_BUILTIN (xvexth_wu_hu, LARCH_UV8SI_FTYPE_UV16HI),
++  LASX_BUILTIN (xvexth_du_wu, LARCH_UV4DI_FTYPE_UV8SI),
++  LASX_BUILTIN (xvexth_qu_du, LARCH_UV4DI_FTYPE_UV4DI),
++  LASX_BUILTIN (xvrotri_b, LARCH_V32QI_FTYPE_V32QI_UQI),
++  LASX_BUILTIN (xvrotri_h, LARCH_V16HI_FTYPE_V16HI_UQI),
++  LASX_BUILTIN (xvrotri_w, LARCH_V8SI_FTYPE_V8SI_UQI),
++  LASX_BUILTIN (xvrotri_d, LARCH_V4DI_FTYPE_V4DI_UQI),
++  LASX_BUILTIN (xvextl_q_d, LARCH_V4DI_FTYPE_V4DI),
++  LASX_BUILTIN (xvsrlni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI),
++  LASX_BUILTIN (xvsrlni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI),
++  LASX_BUILTIN (xvsrlni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI),
++  LASX_BUILTIN (xvsrlni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI),
++  LASX_BUILTIN (xvsrlrni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI),
++  LASX_BUILTIN (xvsrlrni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI),
++  LASX_BUILTIN (xvsrlrni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI),
++  LASX_BUILTIN (xvsrlrni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI),
++  LASX_BUILTIN (xvssrlni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI),
++  LASX_BUILTIN (xvssrlni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI),
++  LASX_BUILTIN (xvssrlni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI),
++  LASX_BUILTIN (xvssrlni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI),
++  LASX_BUILTIN (xvssrlni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI),
++  LASX_BUILTIN (xvssrlni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI),
++  LASX_BUILTIN (xvssrlni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI),
++  LASX_BUILTIN (xvssrlni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI),
++  LASX_BUILTIN (xvssrlrni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI),
++  LASX_BUILTIN (xvssrlrni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI),
++  LASX_BUILTIN (xvssrlrni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI),
++  LASX_BUILTIN (xvssrlrni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI),
++  LASX_BUILTIN (xvssrlrni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI),
++  LASX_BUILTIN (xvssrlrni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI),
++  LASX_BUILTIN (xvssrlrni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI),
++  LASX_BUILTIN (xvssrlrni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI),
++  LASX_BUILTIN (xvsrani_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI),
++  LASX_BUILTIN (xvsrani_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI),
++  LASX_BUILTIN (xvsrani_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI),
++  LASX_BUILTIN (xvsrani_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI),
++  LASX_BUILTIN (xvsrarni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI),
++  LASX_BUILTIN (xvsrarni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI),
++  LASX_BUILTIN (xvsrarni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI),
++  LASX_BUILTIN (xvsrarni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI),
++  LASX_BUILTIN (xvssrani_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI),
++  LASX_BUILTIN (xvssrani_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI),
++  LASX_BUILTIN (xvssrani_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI),
++  LASX_BUILTIN (xvssrani_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI),
++  LASX_BUILTIN (xvssrani_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI),
++  LASX_BUILTIN (xvssrani_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI),
++  LASX_BUILTIN (xvssrani_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI),
++  LASX_BUILTIN (xvssrani_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI),
++  LASX_BUILTIN (xvssrarni_b_h, LARCH_V32QI_FTYPE_V32QI_V32QI_USI),
++  LASX_BUILTIN (xvssrarni_h_w, LARCH_V16HI_FTYPE_V16HI_V16HI_USI),
++  LASX_BUILTIN (xvssrarni_w_d, LARCH_V8SI_FTYPE_V8SI_V8SI_USI),
++  LASX_BUILTIN (xvssrarni_d_q, LARCH_V4DI_FTYPE_V4DI_V4DI_USI),
++  LASX_BUILTIN (xvssrarni_bu_h, LARCH_UV32QI_FTYPE_UV32QI_V32QI_USI),
++  LASX_BUILTIN (xvssrarni_hu_w, LARCH_UV16HI_FTYPE_UV16HI_V16HI_USI),
++  LASX_BUILTIN (xvssrarni_wu_d, LARCH_UV8SI_FTYPE_UV8SI_V8SI_USI),
++  LASX_BUILTIN (xvssrarni_du_q, LARCH_UV4DI_FTYPE_UV4DI_V4DI_USI)
+ };
+ 
+ /* Index I is the function declaration for loongarch_builtins[I], or null if
+@@ -1446,11 +2502,15 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out,
+     {
+       if (out_n == 2 && in_n == 2)
+ 	return LARCH_GET_BUILTIN (lsx_vfrintrp_d);
++      if (out_n == 4 && in_n == 4)
++	return LARCH_GET_BUILTIN (lasx_xvfrintrp_d);
+     }
+       if (out_mode == SFmode && in_mode == SFmode)
+     {
+       if (out_n == 4 && in_n == 4)
+ 	return LARCH_GET_BUILTIN (lsx_vfrintrp_s);
++      if (out_n == 8 && in_n == 8)
++	return LARCH_GET_BUILTIN (lasx_xvfrintrp_s);
+     }
+       break;
+ 
+@@ -1459,11 +2519,15 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out,
+     {
+       if (out_n == 2 && in_n == 2)
+ 	return LARCH_GET_BUILTIN (lsx_vfrintrz_d);
++      if (out_n == 4 && in_n == 4)
++	return LARCH_GET_BUILTIN (lasx_xvfrintrz_d);
+     }
+       if (out_mode == SFmode && in_mode == SFmode)
+     {
+       if (out_n == 4 && in_n == 4)
+ 	return LARCH_GET_BUILTIN (lsx_vfrintrz_s);
++      if (out_n == 8 && in_n == 8)
++	return LARCH_GET_BUILTIN (lasx_xvfrintrz_s);
+     }
+       break;
+ 
+@@ -1473,11 +2537,15 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out,
+     {
+       if (out_n == 2 && in_n == 2)
+ 	return LARCH_GET_BUILTIN (lsx_vfrint_d);
++      if (out_n == 4 && in_n == 4)
++	return LARCH_GET_BUILTIN (lasx_xvfrint_d);
+     }
+       if (out_mode == SFmode && in_mode == SFmode)
+     {
+       if (out_n == 4 && in_n == 4)
+ 	return LARCH_GET_BUILTIN (lsx_vfrint_s);
++      if (out_n == 8 && in_n == 8)
++	return LARCH_GET_BUILTIN (lasx_xvfrint_s);
+     }
+       break;
+ 
+@@ -1486,11 +2554,15 @@ loongarch_builtin_vectorized_function (unsigned int fn, tree type_out,
+     {
+       if (out_n == 2 && in_n == 2)
+ 	return LARCH_GET_BUILTIN (lsx_vfrintrm_d);
++      if (out_n == 4 && in_n == 4)
++	return LARCH_GET_BUILTIN (lasx_xvfrintrm_d);
+     }
+       if (out_mode == SFmode && in_mode == SFmode)
+     {
+       if (out_n == 4 && in_n == 4)
+ 	return LARCH_GET_BUILTIN (lsx_vfrintrm_s);
++      if (out_n == 8 && in_n == 8)
++	return LARCH_GET_BUILTIN (lasx_xvfrintrm_s);
+     }
+       break;
+ 
+@@ -1565,6 +2637,30 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+     case CODE_FOR_lsx_vsubi_hu:
+     case CODE_FOR_lsx_vsubi_wu:
+     case CODE_FOR_lsx_vsubi_du:
++    case CODE_FOR_lasx_xvaddi_bu:
++    case CODE_FOR_lasx_xvaddi_hu:
++    case CODE_FOR_lasx_xvaddi_wu:
++    case CODE_FOR_lasx_xvaddi_du:
++    case CODE_FOR_lasx_xvslti_bu:
++    case CODE_FOR_lasx_xvslti_hu:
++    case CODE_FOR_lasx_xvslti_wu:
++    case CODE_FOR_lasx_xvslti_du:
++    case CODE_FOR_lasx_xvslei_bu:
++    case CODE_FOR_lasx_xvslei_hu:
++    case CODE_FOR_lasx_xvslei_wu:
++    case CODE_FOR_lasx_xvslei_du:
++    case CODE_FOR_lasx_xvmaxi_bu:
++    case CODE_FOR_lasx_xvmaxi_hu:
++    case CODE_FOR_lasx_xvmaxi_wu:
++    case CODE_FOR_lasx_xvmaxi_du:
++    case CODE_FOR_lasx_xvmini_bu:
++    case CODE_FOR_lasx_xvmini_hu:
++    case CODE_FOR_lasx_xvmini_wu:
++    case CODE_FOR_lasx_xvmini_du:
++    case CODE_FOR_lasx_xvsubi_bu:
++    case CODE_FOR_lasx_xvsubi_hu:
++    case CODE_FOR_lasx_xvsubi_wu:
++    case CODE_FOR_lasx_xvsubi_du:
+       gcc_assert (has_target_p && nops == 3);
+       /* We only generate a vector of constants iff the second argument
+ 	 is an immediate.  We also validate the range of the immediate.  */
+@@ -1603,6 +2699,26 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+     case CODE_FOR_lsx_vmini_h:
+     case CODE_FOR_lsx_vmini_w:
+     case CODE_FOR_lsx_vmini_d:
++    case CODE_FOR_lasx_xvseqi_b:
++    case CODE_FOR_lasx_xvseqi_h:
++    case CODE_FOR_lasx_xvseqi_w:
++    case CODE_FOR_lasx_xvseqi_d:
++    case CODE_FOR_lasx_xvslti_b:
++    case CODE_FOR_lasx_xvslti_h:
++    case CODE_FOR_lasx_xvslti_w:
++    case CODE_FOR_lasx_xvslti_d:
++    case CODE_FOR_lasx_xvslei_b:
++    case CODE_FOR_lasx_xvslei_h:
++    case CODE_FOR_lasx_xvslei_w:
++    case CODE_FOR_lasx_xvslei_d:
++    case CODE_FOR_lasx_xvmaxi_b:
++    case CODE_FOR_lasx_xvmaxi_h:
++    case CODE_FOR_lasx_xvmaxi_w:
++    case CODE_FOR_lasx_xvmaxi_d:
++    case CODE_FOR_lasx_xvmini_b:
++    case CODE_FOR_lasx_xvmini_h:
++    case CODE_FOR_lasx_xvmini_w:
++    case CODE_FOR_lasx_xvmini_d:
+       gcc_assert (has_target_p && nops == 3);
+       /* We only generate a vector of constants iff the second argument
+ 	 is an immediate.  We also validate the range of the immediate.  */
+@@ -1625,6 +2741,10 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+     case CODE_FOR_lsx_vori_b:
+     case CODE_FOR_lsx_vnori_b:
+     case CODE_FOR_lsx_vxori_b:
++    case CODE_FOR_lasx_xvandi_b:
++    case CODE_FOR_lasx_xvori_b:
++    case CODE_FOR_lasx_xvnori_b:
++    case CODE_FOR_lasx_xvxori_b:
+       gcc_assert (has_target_p && nops == 3);
+       if (!CONST_INT_P (ops[2].value))
+ 	break;
+@@ -1634,6 +2754,7 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+       break;
+ 
+     case CODE_FOR_lsx_vbitseli_b:
++    case CODE_FOR_lasx_xvbitseli_b:
+       gcc_assert (has_target_p && nops == 4);
+       if (!CONST_INT_P (ops[3].value))
+ 	break;
+@@ -1646,6 +2767,10 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+     case CODE_FOR_lsx_vreplgr2vr_h:
+     case CODE_FOR_lsx_vreplgr2vr_w:
+     case CODE_FOR_lsx_vreplgr2vr_d:
++    case CODE_FOR_lasx_xvreplgr2vr_b:
++    case CODE_FOR_lasx_xvreplgr2vr_h:
++    case CODE_FOR_lasx_xvreplgr2vr_w:
++    case CODE_FOR_lasx_xvreplgr2vr_d:
+       /* Map the built-ins to vector fill operations.  We need fix up the mode
+ 	 for the element being inserted.  */
+       gcc_assert (has_target_p && nops == 2);
+@@ -1674,6 +2799,26 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+     case CODE_FOR_lsx_vpickod_b:
+     case CODE_FOR_lsx_vpickod_h:
+     case CODE_FOR_lsx_vpickod_w:
++    case CODE_FOR_lasx_xvilvh_b:
++    case CODE_FOR_lasx_xvilvh_h:
++    case CODE_FOR_lasx_xvilvh_w:
++    case CODE_FOR_lasx_xvilvh_d:
++    case CODE_FOR_lasx_xvilvl_b:
++    case CODE_FOR_lasx_xvilvl_h:
++    case CODE_FOR_lasx_xvilvl_w:
++    case CODE_FOR_lasx_xvilvl_d:
++    case CODE_FOR_lasx_xvpackev_b:
++    case CODE_FOR_lasx_xvpackev_h:
++    case CODE_FOR_lasx_xvpackev_w:
++    case CODE_FOR_lasx_xvpackod_b:
++    case CODE_FOR_lasx_xvpackod_h:
++    case CODE_FOR_lasx_xvpackod_w:
++    case CODE_FOR_lasx_xvpickev_b:
++    case CODE_FOR_lasx_xvpickev_h:
++    case CODE_FOR_lasx_xvpickev_w:
++    case CODE_FOR_lasx_xvpickod_b:
++    case CODE_FOR_lasx_xvpickod_h:
++    case CODE_FOR_lasx_xvpickod_w:
+       /* Swap the operands 1 and 2 for interleave operations.  Built-ins follow
+ 	 convention of ISA, which have op1 as higher component and op2 as lower
+ 	 component.  However, the VEC_PERM op in tree and vec_concat in RTL
+@@ -1695,6 +2840,18 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+     case CODE_FOR_lsx_vsrli_h:
+     case CODE_FOR_lsx_vsrli_w:
+     case CODE_FOR_lsx_vsrli_d:
++    case CODE_FOR_lasx_xvslli_b:
++    case CODE_FOR_lasx_xvslli_h:
++    case CODE_FOR_lasx_xvslli_w:
++    case CODE_FOR_lasx_xvslli_d:
++    case CODE_FOR_lasx_xvsrai_b:
++    case CODE_FOR_lasx_xvsrai_h:
++    case CODE_FOR_lasx_xvsrai_w:
++    case CODE_FOR_lasx_xvsrai_d:
++    case CODE_FOR_lasx_xvsrli_b:
++    case CODE_FOR_lasx_xvsrli_h:
++    case CODE_FOR_lasx_xvsrli_w:
++    case CODE_FOR_lasx_xvsrli_d:
+       gcc_assert (has_target_p && nops == 3);
+       if (CONST_INT_P (ops[2].value))
+ 	{
+@@ -1755,6 +2912,25 @@ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+ 							     INTVAL (ops[2].value));
+       break;
+ 
++    case CODE_FOR_lasx_xvinsgr2vr_w:
++    case CODE_FOR_lasx_xvinsgr2vr_d:
++      /* Map the built-ins to insert operations.  We need to swap operands,
++	 fix up the mode for the element being inserted, and generate
++	 a bit mask for vec_merge.  */
++      gcc_assert (has_target_p && nops == 4);
++      std::swap (ops[1], ops[2]);
++      imode = GET_MODE_INNER (ops[0].mode);
++      ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode);
++      ops[1].mode = imode;
++      rangelo = 0;
++      rangehi = GET_MODE_NUNITS (ops[0].mode) - 1;
++      if (CONST_INT_P (ops[3].value)
++	  && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi))
++	ops[3].value = GEN_INT (1 << INTVAL (ops[3].value));
++      else
++	error_opno = 2;
++      break;
++
+     default:
+       break;
+   }
+@@ -1864,12 +3040,14 @@ loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
+     {
+     case LARCH_BUILTIN_DIRECT:
+     case LARCH_BUILTIN_LSX:
++    case LARCH_BUILTIN_LASX:
+       return loongarch_expand_builtin_direct (d->icode, target, exp, true);
+ 
+     case LARCH_BUILTIN_DIRECT_NO_TARGET:
+       return loongarch_expand_builtin_direct (d->icode, target, exp, false);
+ 
+     case LARCH_BUILTIN_LSX_TEST_BRANCH:
++    case LARCH_BUILTIN_LASX_TEST_BRANCH:
+       return loongarch_expand_builtin_lsx_test_branch (d->icode, exp);
+     }
+   gcc_unreachable ();
+diff --git a/gcc/config/loongarch/loongarch-ftypes.def b/gcc/config/loongarch/loongarch-ftypes.def
+index 2b0d50892..c7f849e88 100644
+--- a/gcc/config/loongarch/loongarch-ftypes.def
++++ b/gcc/config/loongarch/loongarch-ftypes.def
+@@ -67,6 +67,7 @@ DEF_LARCH_FTYPE (3, (UDI, UDI, UDI, USI))
+ DEF_LARCH_FTYPE (1, (DF, DF))
+ DEF_LARCH_FTYPE (2, (DF, DF, DF))
+ DEF_LARCH_FTYPE (1, (DF, V2DF))
++DEF_LARCH_FTYPE (1, (DF, V4DF))
+ 
+ DEF_LARCH_FTYPE (1, (DI, DI))
+ DEF_LARCH_FTYPE (1, (DI, SI))
+@@ -83,6 +84,7 @@ DEF_LARCH_FTYPE (2, (DI, SI, SI))
+ DEF_LARCH_FTYPE (2, (DI, USI, USI))
+ 
+ DEF_LARCH_FTYPE (2, (DI, V2DI, UQI))
++DEF_LARCH_FTYPE (2, (DI, V4DI, UQI))
+ 
+ DEF_LARCH_FTYPE (2, (INT, DF, DF))
+ DEF_LARCH_FTYPE (2, (INT, SF, SF))
+@@ -104,21 +106,31 @@ DEF_LARCH_FTYPE (3, (SI, SI, SI, SI))
+ DEF_LARCH_FTYPE (3, (SI, SI, SI, QI))
+ DEF_LARCH_FTYPE (1, (SI, UQI))
+ DEF_LARCH_FTYPE (1, (SI, UV16QI))
++DEF_LARCH_FTYPE (1, (SI, UV32QI))
+ DEF_LARCH_FTYPE (1, (SI, UV2DI))
++DEF_LARCH_FTYPE (1, (SI, UV4DI))
+ DEF_LARCH_FTYPE (1, (SI, UV4SI))
++DEF_LARCH_FTYPE (1, (SI, UV8SI))
+ DEF_LARCH_FTYPE (1, (SI, UV8HI))
++DEF_LARCH_FTYPE (1, (SI, UV16HI))
+ DEF_LARCH_FTYPE (2, (SI, V16QI, UQI))
++DEF_LARCH_FTYPE (2, (SI, V32QI, UQI))
+ DEF_LARCH_FTYPE (1, (SI, V2HI))
+ DEF_LARCH_FTYPE (2, (SI, V2HI, V2HI))
+ DEF_LARCH_FTYPE (1, (SI, V4QI))
+ DEF_LARCH_FTYPE (2, (SI, V4QI, V4QI))
+ DEF_LARCH_FTYPE (2, (SI, V4SI, UQI))
++DEF_LARCH_FTYPE (2, (SI, V8SI, UQI))
+ DEF_LARCH_FTYPE (2, (SI, V8HI, UQI))
+ DEF_LARCH_FTYPE (1, (SI, VOID))
+ 
+ DEF_LARCH_FTYPE (2, (UDI, UDI, UDI))
++DEF_LARCH_FTYPE (2, (USI, V32QI, UQI))
+ DEF_LARCH_FTYPE (2, (UDI, UV2SI, UV2SI))
++DEF_LARCH_FTYPE (2, (USI, V8SI, UQI))
+ DEF_LARCH_FTYPE (2, (UDI, V2DI, UQI))
++DEF_LARCH_FTYPE (2, (USI, V16HI, UQI))
++DEF_LARCH_FTYPE (2, (UDI, V4DI, UQI))
+ 
+ DEF_LARCH_FTYPE (2, (USI, V16QI, UQI))
+ DEF_LARCH_FTYPE (2, (USI, V4SI, UQI))
+@@ -142,6 +154,23 @@ DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, V2DI))
+ DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UV4SI))
+ DEF_LARCH_FTYPE (1, (UV2DI, V2DF))
+ 
++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, UQI))
++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, USI))
++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, UV32QI))
++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, UQI))
++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, USI))
++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, UV32QI, UV32QI))
++DEF_LARCH_FTYPE (2, (UV32QI, UV32QI, V32QI))
++
++DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, UQI))
++DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, UV4DI))
++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV4DI, UQI))
++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV4DI, UV4DI))
++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, UV8SI, UV8SI))
++DEF_LARCH_FTYPE (2, (UV4DI, UV4DI, V4DI))
++DEF_LARCH_FTYPE (2, (UV4DI, UV8SI, UV8SI))
++DEF_LARCH_FTYPE (1, (UV4DI, V4DF))
++
+ DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UQI))
+ DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UV2SI))
+ 
+@@ -170,7 +199,22 @@ DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UQI))
+ DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UV8HI))
+ DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, V8HI))
+ 
+-
++DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, UQI))
++DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, UV8SI))
++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV8SI, UQI))
++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV8SI, UV8SI))
++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, UV16HI, UV16HI))
++DEF_LARCH_FTYPE (2, (UV8SI, UV8SI, V8SI))
++DEF_LARCH_FTYPE (2, (UV8SI, UV16HI, UV16HI))
++DEF_LARCH_FTYPE (1, (UV8SI, V8SF))
++
++DEF_LARCH_FTYPE (2, (UV16HI, UV32QI, UV32QI))
++DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, UQI))
++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV32QI, UV32QI))
++DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, UV16HI))
++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV16HI, UQI))
++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, UV16HI, UV16HI))
++DEF_LARCH_FTYPE (2, (UV16HI, UV16HI, V16HI))
+ 
+ DEF_LARCH_FTYPE (2, (UV8QI, UV4HI, UV4HI))
+ DEF_LARCH_FTYPE (1, (UV8QI, UV8QI))
+@@ -196,6 +240,25 @@ DEF_LARCH_FTYPE (4, (V16QI, V16QI, V16QI, UQI, UQI))
+ DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, USI))
+ DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, V16QI))
+ 
++DEF_LARCH_FTYPE (2, (V32QI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (2, (V32QI, CVPOINTER, DI))
++DEF_LARCH_FTYPE (1, (V32QI, HI))
++DEF_LARCH_FTYPE (1, (V32QI, SI))
++DEF_LARCH_FTYPE (2, (V32QI, UV32QI, UQI))
++DEF_LARCH_FTYPE (2, (V32QI, UV32QI, UV32QI))
++DEF_LARCH_FTYPE (1, (V32QI, V32QI))
++DEF_LARCH_FTYPE (2, (V32QI, V32QI, QI))
++DEF_LARCH_FTYPE (2, (V32QI, V32QI, SI))
++DEF_LARCH_FTYPE (2, (V32QI, V32QI, UQI))
++DEF_LARCH_FTYPE (2, (V32QI, V32QI, USI))
++DEF_LARCH_FTYPE (3, (V32QI, V32QI, SI, UQI))
++DEF_LARCH_FTYPE (3, (V32QI, V32QI, UQI, V32QI))
++DEF_LARCH_FTYPE (2, (V32QI, V32QI, V32QI))
++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, SI))
++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, UQI))
++DEF_LARCH_FTYPE (4, (V32QI, V32QI, V32QI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, USI))
++DEF_LARCH_FTYPE (3, (V32QI, V32QI, V32QI, V32QI))
+ 
+ DEF_LARCH_FTYPE (1, (V2DF, DF))
+ DEF_LARCH_FTYPE (1, (V2DF, UV2DI))
+@@ -207,6 +270,16 @@ DEF_LARCH_FTYPE (1, (V2DF, V2DI))
+ DEF_LARCH_FTYPE (1, (V2DF, V4SF))
+ DEF_LARCH_FTYPE (1, (V2DF, V4SI))
+ 
++DEF_LARCH_FTYPE (1, (V4DF, DF))
++DEF_LARCH_FTYPE (1, (V4DF, UV4DI))
++DEF_LARCH_FTYPE (1, (V4DF, V4DF))
++DEF_LARCH_FTYPE (2, (V4DF, V4DF, V4DF))
++DEF_LARCH_FTYPE (3, (V4DF, V4DF, V4DF, V4DF))
++DEF_LARCH_FTYPE (2, (V4DF, V4DF, V4DI))
++DEF_LARCH_FTYPE (1, (V4DF, V4DI))
++DEF_LARCH_FTYPE (1, (V4DF, V8SF))
++DEF_LARCH_FTYPE (1, (V4DF, V8SI))
++
+ DEF_LARCH_FTYPE (2, (V2DI, CVPOINTER, SI))
+ DEF_LARCH_FTYPE (1, (V2DI, DI))
+ DEF_LARCH_FTYPE (1, (V2DI, HI))
+@@ -233,6 +306,32 @@ DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, V2DI))
+ DEF_LARCH_FTYPE (3, (V2DI, V2DI, V4SI, V4SI))
+ DEF_LARCH_FTYPE (2, (V2DI, V4SI, V4SI))
+ 
++DEF_LARCH_FTYPE (2, (V4DI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (1, (V4DI, DI))
++DEF_LARCH_FTYPE (1, (V4DI, HI))
++DEF_LARCH_FTYPE (2, (V4DI, UV4DI, UQI))
++DEF_LARCH_FTYPE (2, (V4DI, UV4DI, UV4DI))
++DEF_LARCH_FTYPE (2, (V4DI, UV8SI, UV8SI))
++DEF_LARCH_FTYPE (1, (V4DI, V4DF))
++DEF_LARCH_FTYPE (2, (V4DI, V4DF, V4DF))
++DEF_LARCH_FTYPE (1, (V4DI, V4DI))
++DEF_LARCH_FTYPE (1, (UV4DI, UV4DI))
++DEF_LARCH_FTYPE (2, (V4DI, V4DI, QI))
++DEF_LARCH_FTYPE (2, (V4DI, V4DI, SI))
++DEF_LARCH_FTYPE (2, (V4DI, V4DI, UQI))
++DEF_LARCH_FTYPE (2, (V4DI, V4DI, USI))
++DEF_LARCH_FTYPE (3, (V4DI, V4DI, DI, UQI))
++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UQI, V4DI))
++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UV8SI, UV8SI))
++DEF_LARCH_FTYPE (2, (V4DI, V4DI, V4DI))
++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, SI))
++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, USI))
++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, UQI))
++DEF_LARCH_FTYPE (4, (V4DI, V4DI, V4DI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V4DI, V4DI))
++DEF_LARCH_FTYPE (3, (V4DI, V4DI, V8SI, V8SI))
++DEF_LARCH_FTYPE (2, (V4DI, V8SI, V8SI))
++
+ DEF_LARCH_FTYPE (1, (V2HI, SI))
+ DEF_LARCH_FTYPE (2, (V2HI, SI, SI))
+ DEF_LARCH_FTYPE (3, (V2HI, SI, SI, SI))
+@@ -274,6 +373,17 @@ DEF_LARCH_FTYPE (3, (V4SF, V4SF, V4SF, V4SF))
+ DEF_LARCH_FTYPE (2, (V4SF, V4SF, V4SI))
+ DEF_LARCH_FTYPE (1, (V4SF, V4SI))
+ DEF_LARCH_FTYPE (1, (V4SF, V8HI))
++DEF_LARCH_FTYPE (1, (V8SF, V16HI))
++
++DEF_LARCH_FTYPE (1, (V8SF, SF))
++DEF_LARCH_FTYPE (1, (V8SF, UV8SI))
++DEF_LARCH_FTYPE (2, (V8SF, V4DF, V4DF))
++DEF_LARCH_FTYPE (1, (V8SF, V8SF))
++DEF_LARCH_FTYPE (2, (V8SF, V8SF, V8SF))
++DEF_LARCH_FTYPE (3, (V8SF, V8SF, V8SF, V8SF))
++DEF_LARCH_FTYPE (2, (V8SF, V8SF, V8SI))
++DEF_LARCH_FTYPE (1, (V8SF, V8SI))
++DEF_LARCH_FTYPE (1, (V8SF, V8HI))
+ 
+ DEF_LARCH_FTYPE (2, (V4SI, CVPOINTER, SI))
+ DEF_LARCH_FTYPE (1, (V4SI, HI))
+@@ -282,6 +392,7 @@ DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UQI))
+ DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UV4SI))
+ DEF_LARCH_FTYPE (2, (V4SI, UV8HI, UV8HI))
+ DEF_LARCH_FTYPE (2, (V4SI, V2DF, V2DF))
++DEF_LARCH_FTYPE (2, (V8SI, V4DF, V4DF))
+ DEF_LARCH_FTYPE (1, (V4SI, V4SF))
+ DEF_LARCH_FTYPE (2, (V4SI, V4SF, V4SF))
+ DEF_LARCH_FTYPE (1, (V4SI, V4SI))
+@@ -301,6 +412,32 @@ DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, V4SI))
+ DEF_LARCH_FTYPE (3, (V4SI, V4SI, V8HI, V8HI))
+ DEF_LARCH_FTYPE (2, (V4SI, V8HI, V8HI))
+ 
++DEF_LARCH_FTYPE (2, (V8SI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (1, (V8SI, HI))
++DEF_LARCH_FTYPE (1, (V8SI, SI))
++DEF_LARCH_FTYPE (2, (V8SI, UV8SI, UQI))
++DEF_LARCH_FTYPE (2, (V8SI, UV8SI, UV8SI))
++DEF_LARCH_FTYPE (2, (V8SI, UV16HI, UV16HI))
++DEF_LARCH_FTYPE (2, (V8SI, V2DF, V2DF))
++DEF_LARCH_FTYPE (1, (V8SI, V8SF))
++DEF_LARCH_FTYPE (2, (V8SI, V8SF, V8SF))
++DEF_LARCH_FTYPE (1, (V8SI, V8SI))
++DEF_LARCH_FTYPE (2, (V8SI, V8SI, QI))
++DEF_LARCH_FTYPE (2, (V8SI, V8SI, SI))
++DEF_LARCH_FTYPE (2, (V8SI, V8SI, UQI))
++DEF_LARCH_FTYPE (2, (V8SI, V8SI, USI))
++DEF_LARCH_FTYPE (3, (V8SI, V8SI, SI, UQI))
++DEF_LARCH_FTYPE (3, (V8SI, V8SI, UQI, V8SI))
++DEF_LARCH_FTYPE (3, (V8SI, V8SI, UV16HI, UV16HI))
++DEF_LARCH_FTYPE (2, (V8SI, V8SI, V8SI))
++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, SI))
++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, UQI))
++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, USI))
++DEF_LARCH_FTYPE (4, (V8SI, V8SI, V8SI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V8SI, V8SI))
++DEF_LARCH_FTYPE (3, (V8SI, V8SI, V16HI, V16HI))
++DEF_LARCH_FTYPE (2, (V8SI, V16HI, V16HI))
++
+ DEF_LARCH_FTYPE (2, (V8HI, CVPOINTER, SI))
+ DEF_LARCH_FTYPE (1, (V8HI, HI))
+ DEF_LARCH_FTYPE (1, (V8HI, SI))
+@@ -326,6 +463,31 @@ DEF_LARCH_FTYPE (4, (V8HI, V8HI, V8HI, UQI, UQI))
+ DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, USI))
+ DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, V8HI))
+ 
++DEF_LARCH_FTYPE (2, (V16HI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (1, (V16HI, HI))
++DEF_LARCH_FTYPE (1, (V16HI, SI))
++DEF_LARCH_FTYPE (2, (V16HI, UV32QI, UV32QI))
++DEF_LARCH_FTYPE (2, (V16HI, UV16HI, UQI))
++DEF_LARCH_FTYPE (2, (V16HI, UV16HI, UV16HI))
++DEF_LARCH_FTYPE (2, (V16HI, V32QI, V32QI))
++DEF_LARCH_FTYPE (2, (V16HI, V8SF, V8SF))
++DEF_LARCH_FTYPE (1, (V16HI, V16HI))
++DEF_LARCH_FTYPE (2, (V16HI, V16HI, QI))
++DEF_LARCH_FTYPE (2, (V16HI, V16HI, SI))
++DEF_LARCH_FTYPE (3, (V16HI, V16HI, SI, UQI))
++DEF_LARCH_FTYPE (2, (V16HI, V16HI, UQI))
++DEF_LARCH_FTYPE (2, (V16HI, V16HI, USI))
++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, SI))
++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, V16HI))
++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UV32QI, UV32QI))
++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V32QI, V32QI))
++DEF_LARCH_FTYPE (2, (V16HI, V16HI, V16HI))
++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, SI))
++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, UQI))
++DEF_LARCH_FTYPE (4, (V16HI, V16HI, V16HI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, USI))
++DEF_LARCH_FTYPE (3, (V16HI, V16HI, V16HI, V16HI))
++
+ DEF_LARCH_FTYPE (2, (V8QI, V4HI, V4HI))
+ DEF_LARCH_FTYPE (1, (V8QI, V8QI))
+ DEF_LARCH_FTYPE (2, (V8QI, V8QI, V8QI))
+@@ -337,62 +499,113 @@ DEF_LARCH_FTYPE (2, (VOID, USI, UQI))
+ DEF_LARCH_FTYPE (1, (VOID, UHI))
+ DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, SI))
+ DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, DI))
++DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (3, (VOID, V32QI, CVPOINTER, DI))
++DEF_LARCH_FTYPE (3, (VOID, V4DF, POINTER, SI))
+ DEF_LARCH_FTYPE (3, (VOID, V2DF, POINTER, SI))
+ DEF_LARCH_FTYPE (3, (VOID, V2DI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (3, (VOID, V4DI, CVPOINTER, SI))
+ DEF_LARCH_FTYPE (2, (VOID, V2HI, V2HI))
+ DEF_LARCH_FTYPE (2, (VOID, V4QI, V4QI))
+ DEF_LARCH_FTYPE (3, (VOID, V4SF, POINTER, SI))
++DEF_LARCH_FTYPE (3, (VOID, V8SF, POINTER, SI))
+ DEF_LARCH_FTYPE (3, (VOID, V4SI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (3, (VOID, V8SI, CVPOINTER, SI))
+ DEF_LARCH_FTYPE (3, (VOID, V8HI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (3, (VOID, V16HI, CVPOINTER, SI))
+ 
++DEF_LARCH_FTYPE (1, (V16HI, V32QI))
++DEF_LARCH_FTYPE (1, (UV16HI, UV32QI))
++DEF_LARCH_FTYPE (1, (V8SI, V32QI))
++DEF_LARCH_FTYPE (1, (V4DI, V32QI))
+ DEF_LARCH_FTYPE (1, (V8HI, V16QI))
+ DEF_LARCH_FTYPE (1, (V4SI, V16QI))
+ DEF_LARCH_FTYPE (1, (V2DI, V16QI))
++DEF_LARCH_FTYPE (1, (UV8SI, UV16HI))
++DEF_LARCH_FTYPE (1, (V8SI, V16HI))
++DEF_LARCH_FTYPE (1, (V4DI, V16HI))
+ DEF_LARCH_FTYPE (1, (V4SI, V8HI))
+ DEF_LARCH_FTYPE (1, (V2DI, V8HI))
+ DEF_LARCH_FTYPE (1, (V2DI, V4SI))
++DEF_LARCH_FTYPE (1, (V4DI, V8SI))
++DEF_LARCH_FTYPE (1, (UV4DI, UV8SI))
++DEF_LARCH_FTYPE (1, (UV16HI, V32QI))
++DEF_LARCH_FTYPE (1, (UV8SI, V32QI))
++DEF_LARCH_FTYPE (1, (UV4DI, V32QI))
+ DEF_LARCH_FTYPE (1, (UV8HI, V16QI))
+ DEF_LARCH_FTYPE (1, (UV4SI, V16QI))
+ DEF_LARCH_FTYPE (1, (UV2DI, V16QI))
++DEF_LARCH_FTYPE (1, (UV8SI, V16HI))
++DEF_LARCH_FTYPE (1, (UV4DI, V16HI))
+ DEF_LARCH_FTYPE (1, (UV4SI, V8HI))
+ DEF_LARCH_FTYPE (1, (UV2DI, V8HI))
+ DEF_LARCH_FTYPE (1, (UV2DI, V4SI))
++DEF_LARCH_FTYPE (1, (UV4DI, V8SI))
+ DEF_LARCH_FTYPE (1, (UV8HI, UV16QI))
+ DEF_LARCH_FTYPE (1, (UV4SI, UV16QI))
+ DEF_LARCH_FTYPE (1, (UV2DI, UV16QI))
++DEF_LARCH_FTYPE (1, (UV4DI, UV32QI))
+ DEF_LARCH_FTYPE (1, (UV4SI, UV8HI))
+ DEF_LARCH_FTYPE (1, (UV2DI, UV8HI))
+ DEF_LARCH_FTYPE (1, (UV2DI, UV4SI))
+ DEF_LARCH_FTYPE (2, (UV8HI, V16QI, V16QI))
+ DEF_LARCH_FTYPE (2, (UV4SI, V8HI, V8HI))
+ DEF_LARCH_FTYPE (2, (UV2DI, V4SI, V4SI))
++DEF_LARCH_FTYPE (2, (V16HI, V32QI, UQI))
++DEF_LARCH_FTYPE (2, (V8SI, V16HI, UQI))
++DEF_LARCH_FTYPE (2, (V4DI, V8SI, UQI))
+ DEF_LARCH_FTYPE (2, (V8HI, V16QI, UQI))
+ DEF_LARCH_FTYPE (2, (V4SI, V8HI, UQI))
+ DEF_LARCH_FTYPE (2, (V2DI, V4SI, UQI))
++DEF_LARCH_FTYPE (2, (UV16HI, UV32QI, UQI))
++DEF_LARCH_FTYPE (2, (UV8SI, UV16HI, UQI))
++DEF_LARCH_FTYPE (2, (UV4DI, UV8SI, UQI))
+ DEF_LARCH_FTYPE (2, (UV8HI, UV16QI, UQI))
+ DEF_LARCH_FTYPE (2, (UV4SI, UV8HI, UQI))
+ DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UQI))
++DEF_LARCH_FTYPE (2, (V32QI, V16HI, V16HI))
++DEF_LARCH_FTYPE (2, (V16HI, V8SI, V8SI))
++DEF_LARCH_FTYPE (2, (V8SI, V4DI, V4DI))
+ DEF_LARCH_FTYPE (2, (V16QI, V8HI, V8HI))
+ DEF_LARCH_FTYPE (2, (V8HI, V4SI, V4SI))
+ DEF_LARCH_FTYPE (2, (V4SI, V2DI, V2DI))
++DEF_LARCH_FTYPE (2, (UV32QI, UV16HI, UV16HI))
++DEF_LARCH_FTYPE (2, (UV16HI, UV8SI, UV8SI))
++DEF_LARCH_FTYPE (2, (UV8SI, UV4DI, UV4DI))
+ DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UV8HI))
+ DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UV4SI))
+ DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UV2DI))
++DEF_LARCH_FTYPE (2, (V32QI, V16HI, UQI))
++DEF_LARCH_FTYPE (2, (V16HI, V8SI, UQI))
++DEF_LARCH_FTYPE (2, (V8SI, V4DI, UQI))
+ DEF_LARCH_FTYPE (2, (V16QI, V8HI, UQI))
+ DEF_LARCH_FTYPE (2, (V8HI, V4SI, UQI))
+ DEF_LARCH_FTYPE (2, (V4SI, V2DI, UQI))
++DEF_LARCH_FTYPE (2, (UV32QI, UV16HI, UQI))
++DEF_LARCH_FTYPE (2, (UV16HI, UV8SI, UQI))
++DEF_LARCH_FTYPE (2, (UV8SI, UV4DI, UQI))
+ DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UQI))
+ DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UQI))
+ DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UQI))
++DEF_LARCH_FTYPE (2, (V32QI, V32QI, DI))
+ DEF_LARCH_FTYPE (2, (V16QI, V16QI, DI))
++DEF_LARCH_FTYPE (2, (V32QI, UQI, UQI))
+ DEF_LARCH_FTYPE (2, (V16QI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V32QI, V32QI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V16HI, V16HI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V8SI, V8SI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V4DI, V4DI, UQI, UQI))
+ DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, UQI))
+ DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, UQI))
+ DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, UQI))
+ DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, UQI))
++DEF_LARCH_FTYPE (2, (V8SF, V4DI, V4DI))
+ DEF_LARCH_FTYPE (2, (V4SF, V2DI, V2DI))
++DEF_LARCH_FTYPE (1, (V4DI, V8SF))
+ DEF_LARCH_FTYPE (1, (V2DI, V4SF))
++DEF_LARCH_FTYPE (2, (V4DI, UQI, USI))
+ DEF_LARCH_FTYPE (2, (V2DI, UQI, USI))
++DEF_LARCH_FTYPE (2, (V4DI, UQI, UQI))
+ DEF_LARCH_FTYPE (2, (V2DI, UQI, UQI))
+ DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V16QI, CVPOINTER))
+ DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V8HI, CVPOINTER))
+@@ -402,6 +615,17 @@ DEF_LARCH_FTYPE (2, (V16QI, SI, CVPOINTER))
+ DEF_LARCH_FTYPE (2, (V8HI, SI, CVPOINTER))
+ DEF_LARCH_FTYPE (2, (V4SI, SI, CVPOINTER))
+ DEF_LARCH_FTYPE (2, (V2DI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (4, (VOID, V32QI, UQI, SI,  CVPOINTER))
++DEF_LARCH_FTYPE (4, (VOID, V16HI, UQI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (4, (VOID, V8SI, UQI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (4, (VOID, V4DI, UQI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (3, (VOID, V32QI, SI,  CVPOINTER))
++DEF_LARCH_FTYPE (2, (V32QI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (2, (V16HI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (2, (V8SI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (2, (V4DI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (1, (V32QI, POINTER))
++DEF_LARCH_FTYPE (2, (VOID, V32QI, POINTER))
+ DEF_LARCH_FTYPE (2, (V8HI, UV16QI, V16QI))
+ DEF_LARCH_FTYPE (2, (V16QI, V16QI, UV16QI))
+ DEF_LARCH_FTYPE (2, (UV16QI, V16QI, UV16QI))
+@@ -431,6 +655,33 @@ DEF_LARCH_FTYPE (3, (V4SI, V4SI, V16QI, V16QI))
+ DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV16QI, V16QI))
+ DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV16QI, UV16QI))
+ 
++
++DEF_LARCH_FTYPE(2,(V4DI,V16HI,V16HI))
++DEF_LARCH_FTYPE(2,(V4DI,UV4SI,V4SI))
++DEF_LARCH_FTYPE(2,(V8SI,UV16HI,V16HI))
++DEF_LARCH_FTYPE(2,(V16HI,UV32QI,V32QI))
++DEF_LARCH_FTYPE(2,(V4DI,UV8SI,V8SI))
++DEF_LARCH_FTYPE(3,(V4DI,V4DI,V16HI,V16HI))
++DEF_LARCH_FTYPE(2,(UV32QI,V32QI,UV32QI))
++DEF_LARCH_FTYPE(2,(UV16HI,V16HI,UV16HI))
++DEF_LARCH_FTYPE(2,(UV8SI,V8SI,UV8SI))
++DEF_LARCH_FTYPE(2,(UV4DI,V4DI,UV4DI))
++DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV4DI,V4DI))
++DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV8SI,V8SI))
++DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV16HI,V16HI))
++DEF_LARCH_FTYPE(3,(V16HI,V16HI,UV32QI,V32QI))
++DEF_LARCH_FTYPE(2,(V4DI,UV4DI,V4DI))
++DEF_LARCH_FTYPE(2,(V8SI,V32QI,V32QI))
++DEF_LARCH_FTYPE(2,(UV4DI,UV16HI,UV16HI))
++DEF_LARCH_FTYPE(2,(V4DI,UV16HI,V16HI))
++DEF_LARCH_FTYPE(3,(V8SI,V8SI,V32QI,V32QI))
++DEF_LARCH_FTYPE(3,(UV8SI,UV8SI,UV32QI,UV32QI))
++DEF_LARCH_FTYPE(3,(UV4DI,UV4DI,UV16HI,UV16HI))
++DEF_LARCH_FTYPE(3,(V8SI,V8SI,UV32QI,V32QI))
++DEF_LARCH_FTYPE(3,(V4DI,V4DI,UV16HI,V16HI))
++DEF_LARCH_FTYPE(2,(UV8SI,UV32QI,UV32QI))
++DEF_LARCH_FTYPE(2,(V8SI,UV32QI,V32QI))
++
+ DEF_LARCH_FTYPE(4,(VOID,V16QI,CVPOINTER,SI,UQI))
+ DEF_LARCH_FTYPE(4,(VOID,V8HI,CVPOINTER,SI,UQI))
+ DEF_LARCH_FTYPE(4,(VOID,V4SI,CVPOINTER,SI,UQI))
+@@ -448,11 +699,29 @@ DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, V8HI, USI))
+ DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, V4SI, USI))
+ DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, V2DI, USI))
+ 
++DEF_LARCH_FTYPE (2, (DI, V8SI, UQI))
++DEF_LARCH_FTYPE (2, (UDI, V8SI, UQI))
++
++DEF_LARCH_FTYPE (3, (UV32QI, UV32QI, V32QI, USI))
++DEF_LARCH_FTYPE (3, (UV16HI, UV16HI, V16HI, USI))
++DEF_LARCH_FTYPE (3, (UV8SI, UV8SI, V8SI, USI))
++DEF_LARCH_FTYPE (3, (UV4DI, UV4DI, V4DI, USI))
++
++DEF_LARCH_FTYPE(4,(VOID,V32QI,CVPOINTER,SI,UQI))
++DEF_LARCH_FTYPE(4,(VOID,V16HI,CVPOINTER,SI,UQI))
++DEF_LARCH_FTYPE(4,(VOID,V8SI,CVPOINTER,SI,UQI))
++DEF_LARCH_FTYPE(4,(VOID,V4DI,CVPOINTER,SI,UQI))
++
+ DEF_LARCH_FTYPE (1, (BOOLEAN,V16QI))
+ DEF_LARCH_FTYPE(2,(V16QI,CVPOINTER,CVPOINTER))
+ DEF_LARCH_FTYPE(3,(VOID,V16QI,CVPOINTER,CVPOINTER))
++DEF_LARCH_FTYPE(2,(V32QI,CVPOINTER,CVPOINTER))
++DEF_LARCH_FTYPE(3,(VOID,V32QI,CVPOINTER,CVPOINTER))
+ 
+ DEF_LARCH_FTYPE (3, (V16QI, V16QI, SI, UQI))
+ DEF_LARCH_FTYPE (3, (V2DI, V2DI, SI, UQI))
+ DEF_LARCH_FTYPE (3, (V2DI, V2DI, DI, UQI))
+ DEF_LARCH_FTYPE (3, (V4SI, V4SI, SI, UQI))
++
++DEF_LARCH_FTYPE (2, (V8SF, V8SF, UQI))
++DEF_LARCH_FTYPE (2, (V4DF, V4DF, UQI))
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-Loongson-SX-base-instruction-support.patch b/LoongArch-Add-Loongson-SX-base-instruction-support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..1f1fc8c8bd95999e956348fb2c92cb070a9c82b8
--- /dev/null
+++ b/LoongArch-Add-Loongson-SX-base-instruction-support.patch
@@ -0,0 +1,8433 @@
+From 0b4626bb55886081e90922cf6d6869d551847a47 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 16 Mar 2023 16:29:42 +0800
+Subject: [PATCH 063/124] LoongArch: Add Loongson SX base instruction support.
+
+gcc/ChangeLog:
+
+	* config/loongarch/constraints.md (M): Add Loongson LSX base instruction support.
+	(N): Ditto.
+	(O): Ditto.
+	(P): Ditto.
+	(R): Ditto.
+	(S): Ditto.
+	(YG): Ditto.
+	(YA): Ditto.
+	(YB): Ditto.
+	(Yb): Ditto.
+	(Yh): Ditto.
+	(Yw): Ditto.
+	(YI): Ditto.
+	(YC): Ditto.
+	(YZ): Ditto.
+	(Unv5): Ditto.
+	(Uuv5): Ditto.
+	(Usv5): Ditto.
+	(Uuv6): Ditto.
+	(Urv8): Ditto.
+	* config/loongarch/genopts/loongarch.opt.in: Ditto.
+	* config/loongarch/loongarch-builtins.cc (loongarch_gen_const_int_vector): Ditto.
+	* config/loongarch/loongarch-modes.def (VECTOR_MODES): Ditto.
+	(VECTOR_MODE): Ditto.
+	(INT_MODE): Ditto.
+	* config/loongarch/loongarch-protos.h (loongarch_split_move_insn_p): Ditto.
+	(loongarch_split_move_insn): Ditto.
+	(loongarch_split_128bit_move): Ditto.
+	(loongarch_split_128bit_move_p): Ditto.
+	(loongarch_split_lsx_copy_d): Ditto.
+	(loongarch_split_lsx_insert_d): Ditto.
+	(loongarch_split_lsx_fill_d): Ditto.
+	(loongarch_expand_vec_cmp): Ditto.
+	(loongarch_const_vector_same_val_p): Ditto.
+	(loongarch_const_vector_same_bytes_p): Ditto.
+	(loongarch_const_vector_same_int_p): Ditto.
+	(loongarch_const_vector_shuffle_set_p): Ditto.
+	(loongarch_const_vector_bitimm_set_p): Ditto.
+	(loongarch_const_vector_bitimm_clr_p): Ditto.
+	(loongarch_lsx_vec_parallel_const_half): Ditto.
+	(loongarch_gen_const_int_vector): Ditto.
+	(loongarch_lsx_output_division): Ditto.
+	(loongarch_expand_vector_init): Ditto.
+	(loongarch_expand_vec_unpack): Ditto.
+	(loongarch_expand_vec_perm): Ditto.
+	(loongarch_expand_vector_extract): Ditto.
+	(loongarch_expand_vector_reduc): Ditto.
+	(loongarch_ldst_scaled_shift): Ditto.
+	(loongarch_expand_vec_cond_expr): Ditto.
+	(loongarch_expand_vec_cond_mask_expr): Ditto.
+	(loongarch_builtin_vectorized_function): Ditto.
+	(loongarch_gen_const_int_vector_shuffle): Ditto.
+	(loongarch_build_signbit_mask): Ditto.
+	* config/loongarch/loongarch.cc (loongarch_pass_aggregate_num_fpr): Ditto.
+	(loongarch_setup_incoming_varargs): Ditto.
+	(loongarch_emit_move): Ditto.
+	(loongarch_const_vector_bitimm_set_p): Ditto.
+	(loongarch_const_vector_bitimm_clr_p): Ditto.
+	(loongarch_const_vector_same_val_p): Ditto.
+	(loongarch_const_vector_same_bytes_p): Ditto.
+	(loongarch_const_vector_same_int_p): Ditto.
+	(loongarch_const_vector_shuffle_set_p): Ditto.
+	(loongarch_symbol_insns): Ditto.
+	(loongarch_cannot_force_const_mem): Ditto.
+	(loongarch_valid_offset_p): Ditto.
+	(loongarch_valid_index_p): Ditto.
+	(loongarch_classify_address): Ditto.
+	(loongarch_address_insns): Ditto.
+	(loongarch_ldst_scaled_shift): Ditto.
+	(loongarch_const_insns): Ditto.
+	(loongarch_split_move_insn_p): Ditto.
+	(loongarch_subword_at_byte): Ditto.
+	(loongarch_legitimize_move): Ditto.
+	(loongarch_builtin_vectorization_cost): Ditto.
+	(loongarch_split_move_p): Ditto.
+	(loongarch_split_move): Ditto.
+	(loongarch_split_move_insn): Ditto.
+	(loongarch_output_move_index_float): Ditto.
+	(loongarch_split_128bit_move_p): Ditto.
+	(loongarch_split_128bit_move): Ditto.
+	(loongarch_split_lsx_copy_d): Ditto.
+	(loongarch_split_lsx_insert_d): Ditto.
+	(loongarch_split_lsx_fill_d): Ditto.
+	(loongarch_output_move): Ditto.
+	(loongarch_extend_comparands): Ditto.
+	(loongarch_print_operand_reloc): Ditto.
+	(loongarch_print_operand): Ditto.
+	(loongarch_hard_regno_mode_ok_uncached): Ditto.
+	(loongarch_hard_regno_call_part_clobbered): Ditto.
+	(loongarch_hard_regno_nregs): Ditto.
+	(loongarch_class_max_nregs): Ditto.
+	(loongarch_can_change_mode_class): Ditto.
+	(loongarch_mode_ok_for_mov_fmt_p): Ditto.
+	(loongarch_secondary_reload): Ditto.
+	(loongarch_vector_mode_supported_p): Ditto.
+	(loongarch_preferred_simd_mode): Ditto.
+	(loongarch_autovectorize_vector_modes): Ditto.
+	(loongarch_lsx_output_division): Ditto.
+	(loongarch_option_override_internal): Ditto.
+	(loongarch_hard_regno_caller_save_mode): Ditto.
+	(MAX_VECT_LEN): Ditto.
+	(loongarch_spill_class): Ditto.
+	(struct expand_vec_perm_d): Ditto.
+	(loongarch_promote_function_mode): Ditto.
+	(loongarch_expand_vselect): Ditto.
+	(loongarch_starting_frame_offset): Ditto.
+	(loongarch_expand_vselect_vconcat): Ditto.
+	(TARGET_ASM_ALIGNED_DI_OP): Ditto.
+	(TARGET_OPTION_OVERRIDE): Ditto.
+	(TARGET_LEGITIMIZE_ADDRESS): Ditto.
+	(TARGET_ASM_SELECT_RTX_SECTION): Ditto.
+	(TARGET_ASM_FUNCTION_RODATA_SECTION): Ditto.
+	(loongarch_expand_lsx_shuffle): Ditto.
+	(TARGET_SCHED_INIT): Ditto.
+	(TARGET_SCHED_REORDER): Ditto.
+	(TARGET_SCHED_REORDER2): Ditto.
+	(TARGET_SCHED_VARIABLE_ISSUE): Ditto.
+	(TARGET_SCHED_ADJUST_COST): Ditto.
+	(TARGET_SCHED_ISSUE_RATE): Ditto.
+	(TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD): Ditto.
+	(TARGET_FUNCTION_OK_FOR_SIBCALL): Ditto.
+	(TARGET_VALID_POINTER_MODE): Ditto.
+	(TARGET_REGISTER_MOVE_COST): Ditto.
+	(TARGET_MEMORY_MOVE_COST): Ditto.
+	(TARGET_RTX_COSTS): Ditto.
+	(TARGET_ADDRESS_COST): Ditto.
+	(TARGET_IN_SMALL_DATA_P): Ditto.
+	(TARGET_PREFERRED_RELOAD_CLASS): Ditto.
+	(TARGET_ASM_FILE_START_FILE_DIRECTIVE): Ditto.
+	(TARGET_EXPAND_BUILTIN_VA_START): Ditto.
+	(loongarch_expand_vec_perm): Ditto.
+	(TARGET_PROMOTE_FUNCTION_MODE): Ditto.
+	(TARGET_RETURN_IN_MEMORY): Ditto.
+	(TARGET_FUNCTION_VALUE): Ditto.
+	(TARGET_LIBCALL_VALUE): Ditto.
+	(loongarch_try_expand_lsx_vshuf_const): Ditto.
+	(TARGET_ASM_OUTPUT_MI_THUNK): Ditto.
+	(TARGET_ASM_CAN_OUTPUT_MI_THUNK): Ditto.
+	(TARGET_PRINT_OPERAND): Ditto.
+	(TARGET_PRINT_OPERAND_ADDRESS): Ditto.
+	(TARGET_PRINT_OPERAND_PUNCT_VALID_P): Ditto.
+	(TARGET_SETUP_INCOMING_VARARGS): Ditto.
+	(TARGET_STRICT_ARGUMENT_NAMING): Ditto.
+	(TARGET_MUST_PASS_IN_STACK): Ditto.
+	(TARGET_PASS_BY_REFERENCE): Ditto.
+	(TARGET_ARG_PARTIAL_BYTES): Ditto.
+	(TARGET_FUNCTION_ARG): Ditto.
+	(TARGET_FUNCTION_ARG_ADVANCE): Ditto.
+	(TARGET_FUNCTION_ARG_BOUNDARY): Ditto.
+	(TARGET_SCALAR_MODE_SUPPORTED_P): Ditto.
+	(TARGET_INIT_BUILTINS): Ditto.
+	(loongarch_expand_vec_perm_const_1): Ditto.
+	(loongarch_expand_vec_perm_const_2): Ditto.
+	(loongarch_vectorize_vec_perm_const): Ditto.
+	(loongarch_cpu_sched_reassociation_width): Ditto.
+	(loongarch_sched_reassociation_width): Ditto.
+	(loongarch_expand_vector_extract): Ditto.
+	(emit_reduc_half): Ditto.
+	(loongarch_expand_vector_reduc): Ditto.
+	(loongarch_expand_vec_unpack): Ditto.
+	(loongarch_lsx_vec_parallel_const_half): Ditto.
+	(loongarch_constant_elt_p): Ditto.
+	(loongarch_gen_const_int_vector_shuffle): Ditto.
+	(loongarch_expand_vector_init): Ditto.
+	(loongarch_expand_lsx_cmp): Ditto.
+	(loongarch_expand_vec_cond_expr): Ditto.
+	(loongarch_expand_vec_cond_mask_expr): Ditto.
+	(loongarch_expand_vec_cmp): Ditto.
+	(loongarch_case_values_threshold): Ditto.
+	(loongarch_build_const_vector): Ditto.
+	(loongarch_build_signbit_mask): Ditto.
+	(loongarch_builtin_support_vector_misalignment): Ditto.
+	(TARGET_ASM_ALIGNED_HI_OP): Ditto.
+	(TARGET_ASM_ALIGNED_SI_OP): Ditto.
+	(TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST): Ditto.
+	(TARGET_VECTOR_MODE_SUPPORTED_P): Ditto.
+	(TARGET_VECTORIZE_PREFERRED_SIMD_MODE): Ditto.
+	(TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_MODES): Ditto.
+	(TARGET_VECTORIZE_VEC_PERM_CONST): Ditto.
+	(TARGET_SCHED_REASSOCIATION_WIDTH): Ditto.
+	(TARGET_CASE_VALUES_THRESHOLD): Ditto.
+	(TARGET_HARD_REGNO_CALL_PART_CLOBBERED): Ditto.
+	(TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT): Ditto.
+	* config/loongarch/loongarch.h (TARGET_SUPPORTS_WIDE_INT): Ditto.
+	(UNITS_PER_LSX_REG): Ditto.
+	(BITS_PER_LSX_REG): Ditto.
+	(BIGGEST_ALIGNMENT): Ditto.
+	(LSX_REG_FIRST): Ditto.
+	(LSX_REG_LAST): Ditto.
+	(LSX_REG_NUM): Ditto.
+	(LSX_REG_P): Ditto.
+	(LSX_REG_RTX_P): Ditto.
+	(IMM13_OPERAND): Ditto.
+	(LSX_SUPPORTED_MODE_P): Ditto.
+	* config/loongarch/loongarch.md (unknown,add,sub,not,nor,and,or,xor): Ditto.
+	(unknown,add,sub,not,nor,and,or,xor,simd_add): Ditto.
+	(unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FCC): Ditto.
+	(mode" ): Ditto.
+	(DF): Ditto.
+	(SF): Ditto.
+	(sf): Ditto.
+	(DI): Ditto.
+	(SI): Ditto.
+	* config/loongarch/loongarch.opt: Ditto.
+	* config/loongarch/predicates.md (const_lsx_branch_operand): Ditto.
+	(const_uimm3_operand): Ditto.
+	(const_8_to_11_operand): Ditto.
+	(const_12_to_15_operand): Ditto.
+	(const_uimm4_operand): Ditto.
+	(const_uimm6_operand): Ditto.
+	(const_uimm7_operand): Ditto.
+	(const_uimm8_operand): Ditto.
+	(const_imm5_operand): Ditto.
+	(const_imm10_operand): Ditto.
+	(const_imm13_operand): Ditto.
+	(reg_imm10_operand): Ditto.
+	(aq8b_operand): Ditto.
+	(aq8h_operand): Ditto.
+	(aq8w_operand): Ditto.
+	(aq8d_operand): Ditto.
+	(aq10b_operand): Ditto.
+	(aq10h_operand): Ditto.
+	(aq10w_operand): Ditto.
+	(aq10d_operand): Ditto.
+	(aq12b_operand): Ditto.
+	(aq12h_operand): Ditto.
+	(aq12w_operand): Ditto.
+	(aq12d_operand): Ditto.
+	(const_m1_operand): Ditto.
+	(reg_or_m1_operand): Ditto.
+	(const_exp_2_operand): Ditto.
+	(const_exp_4_operand): Ditto.
+	(const_exp_8_operand): Ditto.
+	(const_exp_16_operand): Ditto.
+	(const_exp_32_operand): Ditto.
+	(const_0_or_1_operand): Ditto.
+	(const_0_to_3_operand): Ditto.
+	(const_0_to_7_operand): Ditto.
+	(const_2_or_3_operand): Ditto.
+	(const_4_to_7_operand): Ditto.
+	(const_8_to_15_operand): Ditto.
+	(const_16_to_31_operand): Ditto.
+	(qi_mask_operand): Ditto.
+	(hi_mask_operand): Ditto.
+	(si_mask_operand): Ditto.
+	(d_operand): Ditto.
+	(db4_operand): Ditto.
+	(db7_operand): Ditto.
+	(db8_operand): Ditto.
+	(ib3_operand): Ditto.
+	(sb4_operand): Ditto.
+	(sb5_operand): Ditto.
+	(sb8_operand): Ditto.
+	(sd8_operand): Ditto.
+	(ub4_operand): Ditto.
+	(ub8_operand): Ditto.
+	(uh4_operand): Ditto.
+	(uw4_operand): Ditto.
+	(uw5_operand): Ditto.
+	(uw6_operand): Ditto.
+	(uw8_operand): Ditto.
+	(addiur2_operand): Ditto.
+	(addiusp_operand): Ditto.
+	(andi16_operand): Ditto.
+	(movep_src_register): Ditto.
+	(movep_src_operand): Ditto.
+	(fcc_reload_operand): Ditto.
+	(muldiv_target_operand): Ditto.
+	(const_vector_same_val_operand): Ditto.
+	(const_vector_same_simm5_operand): Ditto.
+	(const_vector_same_uimm5_operand): Ditto.
+	(const_vector_same_ximm5_operand): Ditto.
+	(const_vector_same_uimm6_operand): Ditto.
+	(par_const_vector_shf_set_operand): Ditto.
+	(reg_or_vector_same_val_operand): Ditto.
+	(reg_or_vector_same_simm5_operand): Ditto.
+	(reg_or_vector_same_uimm5_operand): Ditto.
+	(reg_or_vector_same_ximm5_operand): Ditto.
+	(reg_or_vector_same_uimm6_operand): Ditto.
+	* doc/md.texi: Ditto.
+	* config/loongarch/lsx.md: New file.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/constraints.md           |  131 +-
+ gcc/config/loongarch/genopts/loongarch.opt.in |    4 +
+ gcc/config/loongarch/loongarch-builtins.cc    |   10 +
+ gcc/config/loongarch/loongarch-modes.def      |   38 +
+ gcc/config/loongarch/loongarch-protos.h       |   31 +
+ gcc/config/loongarch/loongarch.cc             | 2226 +++++++-
+ gcc/config/loongarch/loongarch.h              |   65 +-
+ gcc/config/loongarch/loongarch.md             |   44 +-
+ gcc/config/loongarch/loongarch.opt            |    4 +
+ gcc/config/loongarch/lsx.md                   | 4467 +++++++++++++++++
+ gcc/config/loongarch/predicates.md            |  333 +-
+ gcc/doc/md.texi                               |   11 +
+ 12 files changed, 7181 insertions(+), 183 deletions(-)
+ create mode 100644 gcc/config/loongarch/lsx.md
+
+diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md
+index 25f3cda35..cec5d8857 100644
+--- a/gcc/config/loongarch/constraints.md
++++ b/gcc/config/loongarch/constraints.md
+@@ -76,12 +76,13 @@
+ ;;     "Le"
+ ;;	 "A signed 32-bit constant can be expressed as Lb + I, but not a
+ ;;	  single Lb or I."
+-;; "M" <-----unused
+-;; "N" <-----unused
+-;; "O" <-----unused
+-;; "P" <-----unused
++;; "M" "A constant that cannot be loaded using @code{lui}, @code{addiu}
++;;	or @code{ori}."
++;; "N" "A constant in the range -65535 to -1 (inclusive)."
++;; "O" "A signed 15-bit constant."
++;; "P" "A constant in the range 1 to 65535 (inclusive)."
+ ;; "Q" <-----unused
+-;; "R" <-----unused
++;; "R" "An address that can be used in a non-macro load or store."
+ ;; "S" <-----unused
+ ;; "T" <-----unused
+ ;; "U" <-----unused
+@@ -214,6 +215,63 @@
+   (and (match_code "const_int")
+        (match_test "loongarch_addu16i_imm12_operand_p (ival, SImode)")))
+ 
++(define_constraint "M"
++  "A constant that cannot be loaded using @code{lui}, @code{addiu}
++   or @code{ori}."
++  (and (match_code "const_int")
++       (not (match_test "IMM12_OPERAND (ival)"))
++       (not (match_test "IMM12_OPERAND_UNSIGNED (ival)"))
++       (not (match_test "LU12I_OPERAND (ival)"))))
++
++(define_constraint "N"
++  "A constant in the range -65535 to -1 (inclusive)."
++  (and (match_code "const_int")
++       (match_test "ival >= -0xffff && ival < 0")))
++
++(define_constraint "O"
++  "A signed 15-bit constant."
++  (and (match_code "const_int")
++       (match_test "ival >= -0x4000 && ival < 0x4000")))
++
++(define_constraint "P"
++  "A constant in the range 1 to 65535 (inclusive)."
++  (and (match_code "const_int")
++       (match_test "ival > 0 && ival < 0x10000")))
++
++;; General constraints
++
++(define_memory_constraint "R"
++  "An address that can be used in a non-macro load or store."
++  (and (match_code "mem")
++       (match_test "loongarch_address_insns (XEXP (op, 0), mode, false) == 1")))
++(define_constraint "S"
++  "@internal
++   A constant call address."
++  (and (match_operand 0 "call_insn_operand")
++       (match_test "CONSTANT_P (op)")))
++
++(define_constraint "YG"
++  "@internal
++   A vector zero."
++  (and (match_code "const_vector")
++       (match_test "op == CONST0_RTX (mode)")))
++
++(define_constraint "YA"
++  "@internal
++   An unsigned 6-bit constant."
++  (and (match_code "const_int")
++       (match_test "UIMM6_OPERAND (ival)")))
++
++(define_constraint "YB"
++  "@internal
++   A signed 10-bit constant."
++  (and (match_code "const_int")
++       (match_test "IMM10_OPERAND (ival)")))
++
++(define_constraint "Yb"
++   "@internal"
++   (match_operand 0 "qi_mask_operand"))
++
+ (define_constraint "Yd"
+   "@internal
+    A constant @code{move_operand} that can be safely loaded using
+@@ -221,10 +279,73 @@
+   (and (match_operand 0 "move_operand")
+        (match_test "CONSTANT_P (op)")))
+ 
++(define_constraint "Yh"
++   "@internal"
++    (match_operand 0 "hi_mask_operand"))
++
++(define_constraint "Yw"
++   "@internal"
++    (match_operand 0 "si_mask_operand"))
++
+ (define_constraint "Yx"
+    "@internal"
+    (match_operand 0 "low_bitmask_operand"))
+ 
++(define_constraint "YI"
++  "@internal
++   A replicated vector const in which the replicated value is in the range
++   [-512,511]."
++  (and (match_code "const_vector")
++       (match_test "loongarch_const_vector_same_int_p (op, mode, -512, 511)")))
++
++(define_constraint "YC"
++  "@internal
++   A replicated vector const in which the replicated value has a single
++   bit set."
++  (and (match_code "const_vector")
++       (match_test "loongarch_const_vector_bitimm_set_p (op, mode)")))
++
++(define_constraint "YZ"
++  "@internal
++   A replicated vector const in which the replicated value has a single
++   bit clear."
++  (and (match_code "const_vector")
++       (match_test "loongarch_const_vector_bitimm_clr_p (op, mode)")))
++
++(define_constraint "Unv5"
++  "@internal
++   A replicated vector const in which the replicated value is in the range
++   [-31,0]."
++  (and (match_code "const_vector")
++       (match_test "loongarch_const_vector_same_int_p (op, mode, -31, 0)")))
++
++(define_constraint "Uuv5"
++  "@internal
++   A replicated vector const in which the replicated value is in the range
++   [0,31]."
++  (and (match_code "const_vector")
++       (match_test "loongarch_const_vector_same_int_p (op, mode, 0, 31)")))
++
++(define_constraint "Usv5"
++  "@internal
++   A replicated vector const in which the replicated value is in the range
++   [-16,15]."
++  (and (match_code "const_vector")
++       (match_test "loongarch_const_vector_same_int_p (op, mode, -16, 15)")))
++
++(define_constraint "Uuv6"
++  "@internal
++   A replicated vector const in which the replicated value is in the range
++   [0,63]."
++  (and (match_code "const_vector")
++       (match_test "loongarch_const_vector_same_int_p (op, mode, 0, 63)")))
++
++(define_constraint "Urv8"
++  "@internal
++   A replicated vector const with replicated byte values as well as elements"
++  (and (match_code "const_vector")
++       (match_test "loongarch_const_vector_same_bytes_p (op, mode)")))
++
+ (define_memory_constraint "ZC"
+   "A memory operand whose address is formed by a base register and offset
+    that is suitable for use in instructions with the same addressing mode
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index c6e337d05..c53785a37 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -146,6 +146,10 @@ mbranch-cost=
+ Target RejectNegative Joined UInteger Var(loongarch_branch_cost)
+ -mbranch-cost=COST	Set the cost of branches to roughly COST instructions.
+ 
++mmemvec-cost=
++Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5)
++mmemvec-cost=COST      Set the cost of vector memory access instructions.
++
+ mcheck-zero-division
+ Target Mask(CHECK_ZERO_DIV)
+ Trap on integer divide by zero.
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index cb0ea1664..c8548a07f 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -36,6 +36,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "fold-const.h"
+ #include "expr.h"
+ #include "langhooks.h"
++#include "emit-rtl.h"
+ 
+ /* Macros to create an enumeration identifier for a function prototype.  */
+ #define LARCH_FTYPE_NAME1(A, B) LARCH_##A##_FTYPE_##B
+@@ -302,6 +303,15 @@ loongarch_prepare_builtin_arg (struct expand_operand *op, tree exp,
+   create_input_operand (op, value, TYPE_MODE (TREE_TYPE (arg)));
+ }
+ 
++/* Return a const_int vector of VAL with mode MODE.  */
++
++rtx
++loongarch_gen_const_int_vector (machine_mode mode, HOST_WIDE_INT val)
++{
++  rtx c = gen_int_mode (val, GET_MODE_INNER (mode));
++  return gen_const_vec_duplicate (mode, c);
++}
++
+ /* Expand instruction ICODE as part of a built-in function sequence.
+    Use the first NOPS elements of OPS as the instruction's operands.
+    HAS_TARGET_P is true if operand 0 is a target; it is false if the
+diff --git a/gcc/config/loongarch/loongarch-modes.def b/gcc/config/loongarch/loongarch-modes.def
+index 7f06e2d65..b69ad3d83 100644
+--- a/gcc/config/loongarch/loongarch-modes.def
++++ b/gcc/config/loongarch/loongarch-modes.def
+@@ -23,3 +23,41 @@ FLOAT_MODE (TF, 16, ieee_quad_format);
+ 
+ /* For floating point conditions in FCC registers.  */
+ CC_MODE (FCC);
++
++/* Vector modes.  */
++VECTOR_MODES (INT, 4);	      /* V4QI  V2HI      */
++VECTOR_MODES (INT, 8);	      /* V8QI  V4HI V2SI */
++VECTOR_MODES (FLOAT, 8);      /*       V4HF V2SF */
++
++/* For LARCH LSX 128 bits.  */
++VECTOR_MODES (INT, 16);	      /* V16QI V8HI V4SI V2DI */
++VECTOR_MODES (FLOAT, 16);     /*	    V4SF V2DF */
++
++VECTOR_MODES (INT, 32);	      /* V32QI V16HI V8SI V4DI */
++VECTOR_MODES (FLOAT, 32);     /*	     V8SF V4DF */
++
++/* Double-sized vector modes for vec_concat.  */
++/* VECTOR_MODE (INT, QI, 32);	  V32QI	*/
++/* VECTOR_MODE (INT, HI, 16);	  V16HI	*/
++/* VECTOR_MODE (INT, SI, 8);	  V8SI	*/
++/* VECTOR_MODE (INT, DI, 4);	  V4DI	*/
++/* VECTOR_MODE (FLOAT, SF, 8);	  V8SF	*/
++/* VECTOR_MODE (FLOAT, DF, 4);	  V4DF	*/
++
++VECTOR_MODE (INT, QI, 64);    /* V64QI	*/
++VECTOR_MODE (INT, HI, 32);    /* V32HI	*/
++VECTOR_MODE (INT, SI, 16);    /* V16SI	*/
++VECTOR_MODE (INT, DI, 8);     /* V8DI */
++VECTOR_MODE (FLOAT, SF, 16);  /* V16SF	*/
++VECTOR_MODE (FLOAT, DF, 8);   /* V8DF */
++
++VECTOR_MODES (FRACT, 4);	/* V4QQ  V2HQ */
++VECTOR_MODES (UFRACT, 4);	/* V4UQQ V2UHQ */
++VECTOR_MODES (ACCUM, 4);	/*       V2HA */
++VECTOR_MODES (UACCUM, 4);	/*       V2UHA */
++
++INT_MODE (OI, 32);
++
++/* Keep the OI modes from confusing the compiler into thinking
++   that these modes could actually be used for computation.  They are
++   only holders for vectors during data movement.  */
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 3ac3b5e19..24e42fa99 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -85,10 +85,18 @@ extern bool loongarch_split_move_p (rtx, rtx);
+ extern void loongarch_split_move (rtx, rtx, rtx);
+ extern bool loongarch_addu16i_imm12_operand_p (HOST_WIDE_INT, machine_mode);
+ extern void loongarch_split_plus_constant (rtx *, machine_mode);
++extern bool loongarch_split_move_insn_p (rtx, rtx);
++extern void loongarch_split_move_insn (rtx, rtx, rtx);
++extern void loongarch_split_128bit_move (rtx, rtx);
++extern bool loongarch_split_128bit_move_p (rtx, rtx);
++extern void loongarch_split_lsx_copy_d (rtx, rtx, rtx, rtx (*)(rtx, rtx, rtx));
++extern void loongarch_split_lsx_insert_d (rtx, rtx, rtx, rtx);
++extern void loongarch_split_lsx_fill_d (rtx, rtx);
+ extern const char *loongarch_output_move (rtx, rtx);
+ extern bool loongarch_cfun_has_cprestore_slot_p (void);
+ #ifdef RTX_CODE
+ extern void loongarch_expand_scc (rtx *);
++extern bool loongarch_expand_vec_cmp (rtx *);
+ extern void loongarch_expand_conditional_branch (rtx *);
+ extern void loongarch_expand_conditional_move (rtx *);
+ extern void loongarch_expand_conditional_trap (rtx);
+@@ -110,6 +118,15 @@ extern bool loongarch_small_data_pattern_p (rtx);
+ extern rtx loongarch_rewrite_small_data (rtx);
+ extern rtx loongarch_return_addr (int, rtx);
+ 
++extern bool loongarch_const_vector_same_val_p (rtx, machine_mode);
++extern bool loongarch_const_vector_same_bytes_p (rtx, machine_mode);
++extern bool loongarch_const_vector_same_int_p (rtx, machine_mode, HOST_WIDE_INT,
++					  HOST_WIDE_INT);
++extern bool loongarch_const_vector_shuffle_set_p (rtx, machine_mode);
++extern bool loongarch_const_vector_bitimm_set_p (rtx, machine_mode);
++extern bool loongarch_const_vector_bitimm_clr_p (rtx, machine_mode);
++extern rtx loongarch_lsx_vec_parallel_const_half (machine_mode, bool);
++extern rtx loongarch_gen_const_int_vector (machine_mode, HOST_WIDE_INT);
+ extern enum reg_class loongarch_secondary_reload_class (enum reg_class,
+ 							machine_mode,
+ 							rtx, bool);
+@@ -129,6 +146,7 @@ extern const char *loongarch_output_equal_conditional_branch (rtx_insn *,
+ 							      rtx *,
+ 							      bool);
+ extern const char *loongarch_output_division (const char *, rtx *);
++extern const char *loongarch_lsx_output_division (const char *, rtx *);
+ extern const char *loongarch_output_probe_stack_range (rtx, rtx, rtx);
+ extern bool loongarch_hard_regno_rename_ok (unsigned int, unsigned int);
+ extern int loongarch_dspalu_bypass_p (rtx, rtx);
+@@ -156,6 +174,13 @@ union loongarch_gen_fn_ptrs
+ extern void loongarch_expand_atomic_qihi (union loongarch_gen_fn_ptrs,
+ 					  rtx, rtx, rtx, rtx, rtx);
+ 
++extern void loongarch_expand_vector_init (rtx, rtx);
++extern void loongarch_expand_vec_unpack (rtx op[2], bool, bool);
++extern void loongarch_expand_vec_perm (rtx, rtx, rtx, rtx);
++extern void loongarch_expand_vector_extract (rtx, rtx, int);
++extern void loongarch_expand_vector_reduc (rtx (*)(rtx, rtx, rtx), rtx, rtx);
++
++extern int loongarch_ldst_scaled_shift (machine_mode);
+ extern bool loongarch_signed_immediate_p (unsigned HOST_WIDE_INT, int, int);
+ extern bool loongarch_unsigned_immediate_p (unsigned HOST_WIDE_INT, int, int);
+ extern bool loongarch_12bit_offset_address_p (rtx, machine_mode);
+@@ -171,6 +196,9 @@ extern bool loongarch_split_symbol_type (enum loongarch_symbol_type);
+ typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx);
+ 
+ extern void loongarch_register_frame_header_opt (void);
++extern void loongarch_expand_vec_cond_expr (machine_mode, machine_mode, rtx *);
++extern void loongarch_expand_vec_cond_mask_expr (machine_mode, machine_mode,
++						 rtx *);
+ 
+ /* Routines implemented in loongarch-c.c.  */
+ void loongarch_cpu_cpp_builtins (cpp_reader *);
+@@ -180,6 +208,9 @@ extern void loongarch_atomic_assign_expand_fenv (tree *, tree *, tree *);
+ extern tree loongarch_builtin_decl (unsigned int, bool);
+ extern rtx loongarch_expand_builtin (tree, rtx, rtx subtarget ATTRIBUTE_UNUSED,
+ 				     machine_mode, int);
++extern tree loongarch_builtin_vectorized_function (unsigned int, tree, tree);
++extern rtx loongarch_gen_const_int_vector_shuffle (machine_mode, int);
+ extern tree loongarch_build_builtin_va_list (void);
+ 
++extern rtx loongarch_build_signbit_mask (machine_mode, bool, bool);
+ #endif /* ! GCC_LOONGARCH_PROTOS_H */
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index bd774d4a9..40b83d72b 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -432,7 +432,7 @@ loongarch_flatten_aggregate_argument (const_tree type,
+ 
+ static unsigned
+ loongarch_pass_aggregate_num_fpr (const_tree type,
+-					loongarch_aggregate_field fields[2])
++				  loongarch_aggregate_field fields[2])
+ {
+   int n = loongarch_flatten_aggregate_argument (type, fields);
+ 
+@@ -770,7 +770,7 @@ loongarch_setup_incoming_varargs (cumulative_args_t cum,
+     {
+       rtx ptr = plus_constant (Pmode, virtual_incoming_args_rtx,
+ 			       REG_PARM_STACK_SPACE (cfun->decl)
+-				 - gp_saved * UNITS_PER_WORD);
++			       - gp_saved * UNITS_PER_WORD);
+       rtx mem = gen_frame_mem (BLKmode, ptr);
+       set_mem_alias_set (mem, get_varargs_alias_set ());
+ 
+@@ -1046,7 +1046,7 @@ rtx
+ loongarch_emit_move (rtx dest, rtx src)
+ {
+   return (can_create_pseudo_p () ? emit_move_insn (dest, src)
+-				 : emit_move_insn_1 (dest, src));
++	  : emit_move_insn_1 (dest, src));
+ }
+ 
+ /* Save register REG to MEM.  Make the instruction frame-related.  */
+@@ -1674,6 +1674,140 @@ loongarch_symbol_binds_local_p (const_rtx x)
+     return false;
+ }
+ 
++/* Return true if OP is a constant vector with the number of units in MODE,
++   and each unit has the same bit set.  */
++
++bool
++loongarch_const_vector_bitimm_set_p (rtx op, machine_mode mode)
++{
++  if (GET_CODE (op) == CONST_VECTOR && op != CONST0_RTX (mode))
++    {
++      unsigned HOST_WIDE_INT val = UINTVAL (CONST_VECTOR_ELT (op, 0));
++      int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode)));
++
++      if (vlog2 != -1)
++	{
++	  gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
++	  gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1);
++	  return loongarch_const_vector_same_val_p (op, mode);
++	}
++    }
++
++  return false;
++}
++
++/* Return true if OP is a constant vector with the number of units in MODE,
++   and each unit has the same bit clear.  */
++
++bool
++loongarch_const_vector_bitimm_clr_p (rtx op, machine_mode mode)
++{
++  if (GET_CODE (op) == CONST_VECTOR && op != CONSTM1_RTX (mode))
++    {
++      unsigned HOST_WIDE_INT val = ~UINTVAL (CONST_VECTOR_ELT (op, 0));
++      int vlog2 = exact_log2 (val & GET_MODE_MASK (GET_MODE_INNER (mode)));
++
++      if (vlog2 != -1)
++	{
++	  gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
++	  gcc_assert (vlog2 >= 0 && vlog2 <= GET_MODE_UNIT_BITSIZE (mode) - 1);
++	  return loongarch_const_vector_same_val_p (op, mode);
++	}
++    }
++
++  return false;
++}
++
++/* Return true if OP is a constant vector with the number of units in MODE,
++   and each unit has the same value.  */
++
++bool
++loongarch_const_vector_same_val_p (rtx op, machine_mode mode)
++{
++  int i, nunits = GET_MODE_NUNITS (mode);
++  rtx first;
++
++  if (GET_CODE (op) != CONST_VECTOR || GET_MODE (op) != mode)
++    return false;
++
++  first = CONST_VECTOR_ELT (op, 0);
++  for (i = 1; i < nunits; i++)
++    if (!rtx_equal_p (first, CONST_VECTOR_ELT (op, i)))
++      return false;
++
++  return true;
++}
++
++/* Return true if OP is a constant vector with the number of units in MODE,
++   and each unit has the same value as well as replicated bytes in the value.
++*/
++
++bool
++loongarch_const_vector_same_bytes_p (rtx op, machine_mode mode)
++{
++  int i, bytes;
++  HOST_WIDE_INT val, first_byte;
++  rtx first;
++
++  if (!loongarch_const_vector_same_val_p (op, mode))
++    return false;
++
++  first = CONST_VECTOR_ELT (op, 0);
++  bytes = GET_MODE_UNIT_SIZE (mode);
++  val = INTVAL (first);
++  first_byte = val & 0xff;
++  for (i = 1; i < bytes; i++)
++    {
++      val >>= 8;
++      if ((val & 0xff) != first_byte)
++	return false;
++    }
++
++  return true;
++}
++
++/* Return true if OP is a constant vector with the number of units in MODE,
++   and each unit has the same integer value in the range [LOW, HIGH].  */
++
++bool
++loongarch_const_vector_same_int_p (rtx op, machine_mode mode, HOST_WIDE_INT low,
++				   HOST_WIDE_INT high)
++{
++  HOST_WIDE_INT value;
++  rtx elem0;
++
++  if (!loongarch_const_vector_same_val_p (op, mode))
++    return false;
++
++  elem0 = CONST_VECTOR_ELT (op, 0);
++  if (!CONST_INT_P (elem0))
++    return false;
++
++  value = INTVAL (elem0);
++  return (value >= low && value <= high);
++}
++
++/* Return true if OP is a constant vector with repeated 4-element sets
++   in mode MODE.  */
++
++bool
++loongarch_const_vector_shuffle_set_p (rtx op, machine_mode mode)
++{
++  int nunits = GET_MODE_NUNITS (mode);
++  int nsets = nunits / 4;
++  int set = 0;
++  int i, j;
++
++  /* Check if we have the same 4-element sets.  */
++  for (j = 0; j < nsets; j++, set = 4 * j)
++    for (i = 0; i < 4; i++)
++      if ((INTVAL (XVECEXP (op, 0, i))
++	   != (INTVAL (XVECEXP (op, 0, set + i)) - set))
++	  || !IN_RANGE (INTVAL (XVECEXP (op, 0, set + i)), 0, set + 3))
++	return false;
++  return true;
++}
++
+ /* Return true if rtx constants of mode MODE should be put into a small
+    data section.  */
+ 
+@@ -1791,6 +1925,11 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type)
+ static int
+ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode)
+ {
++  /* LSX LD.* and ST.* cannot support loading symbols via an immediate
++     operand.  */
++  if (LSX_SUPPORTED_MODE_P (mode))
++    return 0;
++
+   switch (type)
+     {
+     case SYMBOL_GOT_DISP:
+@@ -1837,7 +1976,8 @@ loongarch_cannot_force_const_mem (machine_mode mode, rtx x)
+      references, reload will consider forcing C into memory and using
+      one of the instruction's memory alternatives.  Returning false
+      here will force it to use an input reload instead.  */
+-  if (CONST_INT_P (x) && loongarch_legitimate_constant_p (mode, x))
++  if ((CONST_INT_P (x) || GET_CODE (x) == CONST_VECTOR)
++      && loongarch_legitimate_constant_p (mode, x))
+     return true;
+ 
+   split_const (x, &base, &offset);
+@@ -1914,6 +2054,12 @@ loongarch_valid_offset_p (rtx x, machine_mode mode)
+       && !IMM12_OPERAND (INTVAL (x) + GET_MODE_SIZE (mode) - UNITS_PER_WORD))
+     return false;
+ 
++  /* LSX LD.* and ST.* supports 10-bit signed offsets.  */
++  if (LSX_SUPPORTED_MODE_P (mode)
++      && !loongarch_signed_immediate_p (INTVAL (x), 10,
++					loongarch_ldst_scaled_shift (mode)))
++    return false;
++
+   return true;
+ }
+ 
+@@ -1998,7 +2144,7 @@ loongarch_valid_lo_sum_p (enum loongarch_symbol_type symbol_type,
+ 
+ static bool
+ loongarch_valid_index_p (struct loongarch_address_info *info, rtx x,
+-			  machine_mode mode, bool strict_p)
++			 machine_mode mode, bool strict_p)
+ {
+   rtx index;
+ 
+@@ -2051,7 +2197,7 @@ loongarch_classify_address (struct loongarch_address_info *info, rtx x,
+ 	}
+ 
+       if (loongarch_valid_base_register_p (XEXP (x, 1), mode, strict_p)
+-	 && loongarch_valid_index_p (info, XEXP (x, 0), mode, strict_p))
++	  && loongarch_valid_index_p (info, XEXP (x, 0), mode, strict_p))
+ 	{
+ 	  info->reg = XEXP (x, 1);
+ 	  return true;
+@@ -2126,6 +2272,7 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p)
+ {
+   struct loongarch_address_info addr;
+   int factor;
++  bool lsx_p = !might_split_p && LSX_SUPPORTED_MODE_P (mode);
+ 
+   if (!loongarch_classify_address (&addr, x, mode, false))
+     return 0;
+@@ -2143,15 +2290,29 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p)
+     switch (addr.type)
+       {
+       case ADDRESS_REG:
++	if (lsx_p)
++	  {
++	    /* LSX LD.* and ST.* supports 10-bit signed offsets.  */
++	    if (loongarch_signed_immediate_p (INTVAL (addr.offset), 10,
++					      loongarch_ldst_scaled_shift (mode)))
++	      return 1;
++	    else
++	      return 0;
++	  }
++	return factor;
++
+       case ADDRESS_REG_REG:
+-      case ADDRESS_CONST_INT:
+ 	return factor;
+ 
++      case ADDRESS_CONST_INT:
++	return lsx_p ? 0 : factor;
++
+       case ADDRESS_LO_SUM:
+ 	return factor + 1;
+ 
+       case ADDRESS_SYMBOLIC:
+-	return factor * loongarch_symbol_insns (addr.symbol_type, mode);
++	return lsx_p ? 0
++	  : factor * loongarch_symbol_insns (addr.symbol_type, mode);
+       }
+   return 0;
+ }
+@@ -2177,6 +2338,19 @@ loongarch_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits,
+   return loongarch_unsigned_immediate_p (x, bits, shift);
+ }
+ 
++/* Return the scale shift that applied to LSX LD/ST address offset.  */
++
++int
++loongarch_ldst_scaled_shift (machine_mode mode)
++{
++  int shift = exact_log2 (GET_MODE_UNIT_SIZE (mode));
++
++  if (shift < 0 || shift > 8)
++    gcc_unreachable ();
++
++  return shift;
++}
++
+ /* Return true if X is a legitimate address with a 12-bit offset
+    or addr.type is ADDRESS_LO_SUM.
+    MODE is the mode of the value being accessed.  */
+@@ -2244,6 +2418,9 @@ loongarch_const_insns (rtx x)
+       return loongarch_integer_cost (INTVAL (x));
+ 
+     case CONST_VECTOR:
++      if (LSX_SUPPORTED_MODE_P (GET_MODE (x))
++	  && loongarch_const_vector_same_int_p (x, GET_MODE (x), -512, 511))
++	return 1;
+       /* Fall through.  */
+     case CONST_DOUBLE:
+       return x == CONST0_RTX (GET_MODE (x)) ? 1 : 0;
+@@ -2278,7 +2455,7 @@ loongarch_const_insns (rtx x)
+     case SYMBOL_REF:
+     case LABEL_REF:
+       return loongarch_symbol_insns (
+-	loongarch_classify_symbol (x), MAX_MACHINE_MODE);
++		loongarch_classify_symbol (x), MAX_MACHINE_MODE);
+ 
+     default:
+       return 0;
+@@ -2300,7 +2477,26 @@ loongarch_split_const_insns (rtx x)
+   return low + high;
+ }
+ 
+-static bool loongarch_split_move_insn_p (rtx dest, rtx src);
++bool loongarch_split_move_insn_p (rtx dest, rtx src);
++/* Return one word of 128-bit value OP, taking into account the fixed
++   endianness of certain registers.  BYTE selects from the byte address.  */
++
++rtx
++loongarch_subword_at_byte (rtx op, unsigned int byte)
++{
++  machine_mode mode;
++
++  mode = GET_MODE (op);
++  if (mode == VOIDmode)
++    mode = TImode;
++
++  gcc_assert (!FP_REG_RTX_P (op));
++
++  if (MEM_P (op))
++    return loongarch_rewrite_small_data (adjust_address (op, word_mode, byte));
++
++  return simplify_gen_subreg (word_mode, op, mode, byte);
++}
+ 
+ /* Return the number of instructions needed to implement INSN,
+    given that it loads from or stores to MEM.  */
+@@ -3061,9 +3257,10 @@ loongarch_legitimize_move (machine_mode mode, rtx dest, rtx src)
+ 
+   /* Both src and dest are non-registers;  one special case is supported where
+      the source is (const_int 0) and the store can source the zero register.
+-     */
++     LSX is never able to source the zero register directly in
++     memory operations.  */
+   if (!register_operand (dest, mode) && !register_operand (src, mode)
+-      && !const_0_operand (src, mode))
++      && (!const_0_operand (src, mode) || LSX_SUPPORTED_MODE_P (mode)))
+     {
+       loongarch_emit_move (dest, force_reg (mode, src));
+       return true;
+@@ -3635,6 +3832,54 @@ loongarch_rtx_costs (rtx x, machine_mode mode, int outer_code,
+     }
+ }
+ 
++/* Vectorizer cost model implementation.  */
++
++/* Implement targetm.vectorize.builtin_vectorization_cost.  */
++
++static int
++loongarch_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
++				      tree vectype,
++				      int misalign ATTRIBUTE_UNUSED)
++{
++  unsigned elements;
++
++  switch (type_of_cost)
++    {
++      case scalar_stmt:
++      case scalar_load:
++      case vector_stmt:
++      case vector_load:
++      case vec_to_scalar:
++      case scalar_to_vec:
++      case cond_branch_not_taken:
++      case vec_promote_demote:
++      case scalar_store:
++      case vector_store:
++	return 1;
++
++      case vec_perm:
++	return 1;
++
++      case unaligned_load:
++      case vector_gather_load:
++	return 2;
++
++      case unaligned_store:
++      case vector_scatter_store:
++	return 10;
++
++      case cond_branch_taken:
++	return 3;
++
++      case vec_construct:
++	elements = TYPE_VECTOR_SUBPARTS (vectype);
++	return elements / 2 + 1;
++
++      default:
++	gcc_unreachable ();
++    }
++}
++
+ /* Implement TARGET_ADDRESS_COST.  */
+ 
+ static int
+@@ -3689,6 +3934,11 @@ loongarch_split_move_p (rtx dest, rtx src)
+       if (FP_REG_RTX_P (src) && MEM_P (dest))
+ 	return false;
+     }
++
++  /* Check if LSX moves need splitting.  */
++  if (LSX_SUPPORTED_MODE_P (GET_MODE (dest)))
++    return loongarch_split_128bit_move_p (dest, src);
++
+   /* Otherwise split all multiword moves.  */
+   return size > UNITS_PER_WORD;
+ }
+@@ -3702,7 +3952,9 @@ loongarch_split_move (rtx dest, rtx src, rtx insn_)
+   rtx low_dest;
+ 
+   gcc_checking_assert (loongarch_split_move_p (dest, src));
+-  if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
++  if (LSX_SUPPORTED_MODE_P (GET_MODE (dest)))
++    loongarch_split_128bit_move (dest, src);
++  else if (FP_REG_RTX_P (dest) || FP_REG_RTX_P (src))
+     {
+       if (!TARGET_64BIT && GET_MODE (dest) == DImode)
+ 	emit_insn (gen_move_doubleword_fprdi (dest, src));
+@@ -3806,12 +4058,21 @@ loongarch_split_plus_constant (rtx *op, machine_mode mode)
+ 
+ /* Return true if a move from SRC to DEST in INSN should be split.  */
+ 
+-static bool
++bool
+ loongarch_split_move_insn_p (rtx dest, rtx src)
+ {
+   return loongarch_split_move_p (dest, src);
+ }
+ 
++/* Split a move from SRC to DEST in INSN, given that
++   loongarch_split_move_insn_p holds.  */
++
++void
++loongarch_split_move_insn (rtx dest, rtx src, rtx insn)
++{
++  loongarch_split_move (dest, src, insn);
++}
++
+ /* Implement TARGET_CONSTANT_ALIGNMENT.  */
+ 
+ static HOST_WIDE_INT
+@@ -3858,7 +4119,7 @@ const char *
+ loongarch_output_move_index_float (rtx x, machine_mode mode, bool ldr)
+ {
+   int index = exact_log2 (GET_MODE_SIZE (mode));
+-  if (!IN_RANGE (index, 2, 3))
++  if (!IN_RANGE (index, 2, 4))
+     return NULL;
+ 
+   struct loongarch_address_info info;
+@@ -3867,20 +4128,216 @@ loongarch_output_move_index_float (rtx x, machine_mode mode, bool ldr)
+       || !loongarch_legitimate_address_p (mode, x, false))
+     return NULL;
+ 
+-  const char *const insn[][2] =
++  const char *const insn[][3] =
+     {
+ 	{
+ 	  "fstx.s\t%1,%0",
+-	  "fstx.d\t%1,%0"
++	  "fstx.d\t%1,%0",
++	  "vstx\t%w1,%0"
+ 	},
+ 	{
+ 	  "fldx.s\t%0,%1",
+-	  "fldx.d\t%0,%1"
+-	},
++	  "fldx.d\t%0,%1",
++	  "vldx\t%w0,%1"
++	}
+     };
+ 
+   return insn[ldr][index-2];
+ }
++/* Return true if a 128-bit move from SRC to DEST should be split.  */
++
++bool
++loongarch_split_128bit_move_p (rtx dest, rtx src)
++{
++  /* LSX-to-LSX moves can be done in a single instruction.  */
++  if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
++    return false;
++
++  /* Check for LSX loads and stores.  */
++  if (FP_REG_RTX_P (dest) && MEM_P (src))
++    return false;
++  if (FP_REG_RTX_P (src) && MEM_P (dest))
++    return false;
++
++  /* Check for LSX set to an immediate const vector with valid replicated
++     element.  */
++  if (FP_REG_RTX_P (dest)
++      && loongarch_const_vector_same_int_p (src, GET_MODE (src), -512, 511))
++    return false;
++
++  /* Check for LSX load zero immediate.  */
++  if (FP_REG_RTX_P (dest) && src == CONST0_RTX (GET_MODE (src)))
++    return false;
++
++  return true;
++}
++
++/* Split a 128-bit move from SRC to DEST.  */
++
++void
++loongarch_split_128bit_move (rtx dest, rtx src)
++{
++  int byte, index;
++  rtx low_dest, low_src, d, s;
++
++  if (FP_REG_RTX_P (dest))
++    {
++      gcc_assert (!MEM_P (src));
++
++      rtx new_dest = dest;
++      if (!TARGET_64BIT)
++	{
++	  if (GET_MODE (dest) != V4SImode)
++	    new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
++	}
++      else
++	{
++	  if (GET_MODE (dest) != V2DImode)
++	    new_dest = simplify_gen_subreg (V2DImode, dest, GET_MODE (dest), 0);
++	}
++
++      for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode);
++	   byte += UNITS_PER_WORD, index++)
++	{
++	  s = loongarch_subword_at_byte (src, byte);
++	  if (!TARGET_64BIT)
++	    emit_insn (gen_lsx_vinsgr2vr_w (new_dest, s, new_dest,
++					    GEN_INT (1 << index)));
++	  else
++	    emit_insn (gen_lsx_vinsgr2vr_d (new_dest, s, new_dest,
++					    GEN_INT (1 << index)));
++	}
++    }
++  else if (FP_REG_RTX_P (src))
++    {
++      gcc_assert (!MEM_P (dest));
++
++      rtx new_src = src;
++      if (!TARGET_64BIT)
++	{
++	  if (GET_MODE (src) != V4SImode)
++	    new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0);
++	}
++      else
++	{
++	  if (GET_MODE (src) != V2DImode)
++	    new_src = simplify_gen_subreg (V2DImode, src, GET_MODE (src), 0);
++	}
++
++      for (byte = 0, index = 0; byte < GET_MODE_SIZE (TImode);
++	   byte += UNITS_PER_WORD, index++)
++	{
++	  d = loongarch_subword_at_byte (dest, byte);
++	  if (!TARGET_64BIT)
++	    emit_insn (gen_lsx_vpickve2gr_w (d, new_src, GEN_INT (index)));
++	  else
++	    emit_insn (gen_lsx_vpickve2gr_d (d, new_src, GEN_INT (index)));
++	}
++    }
++  else
++    {
++      low_dest = loongarch_subword_at_byte (dest, 0);
++      low_src = loongarch_subword_at_byte (src, 0);
++      gcc_assert (REG_P (low_dest) && REG_P (low_src));
++      /* Make sure the source register is not written before reading.  */
++      if (REGNO (low_dest) <= REGNO (low_src))
++	{
++	  for (byte = 0; byte < GET_MODE_SIZE (TImode);
++	       byte += UNITS_PER_WORD)
++	    {
++	      d = loongarch_subword_at_byte (dest, byte);
++	      s = loongarch_subword_at_byte (src, byte);
++	      loongarch_emit_move (d, s);
++	    }
++	}
++      else
++	{
++	  for (byte = GET_MODE_SIZE (TImode) - UNITS_PER_WORD; byte >= 0;
++	       byte -= UNITS_PER_WORD)
++	    {
++	      d = loongarch_subword_at_byte (dest, byte);
++	      s = loongarch_subword_at_byte (src, byte);
++	      loongarch_emit_move (d, s);
++	    }
++	}
++    }
++}
++
++
++/* Split a COPY_S.D with operands DEST, SRC and INDEX.  GEN is a function
++   used to generate subregs.  */
++
++void
++loongarch_split_lsx_copy_d (rtx dest, rtx src, rtx index,
++			    rtx (*gen_fn)(rtx, rtx, rtx))
++{
++  gcc_assert ((GET_MODE (src) == V2DImode && GET_MODE (dest) == DImode)
++	      || (GET_MODE (src) == V2DFmode && GET_MODE (dest) == DFmode));
++
++  /* Note that low is always from the lower index, and high is always
++     from the higher index.  */
++  rtx low = loongarch_subword (dest, false);
++  rtx high = loongarch_subword (dest, true);
++  rtx new_src = simplify_gen_subreg (V4SImode, src, GET_MODE (src), 0);
++
++  emit_insn (gen_fn (low, new_src, GEN_INT (INTVAL (index) * 2)));
++  emit_insn (gen_fn (high, new_src, GEN_INT (INTVAL (index) * 2 + 1)));
++}
++
++/* Split a INSERT.D with operand DEST, SRC1.INDEX and SRC2.  */
++
++void
++loongarch_split_lsx_insert_d (rtx dest, rtx src1, rtx index, rtx src2)
++{
++  int i;
++  gcc_assert (GET_MODE (dest) == GET_MODE (src1));
++  gcc_assert ((GET_MODE (dest) == V2DImode
++	       && (GET_MODE (src2) == DImode || src2 == const0_rtx))
++	      || (GET_MODE (dest) == V2DFmode && GET_MODE (src2) == DFmode));
++
++  /* Note that low is always from the lower index, and high is always
++     from the higher index.  */
++  rtx low = loongarch_subword (src2, false);
++  rtx high = loongarch_subword (src2, true);
++  rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
++  rtx new_src1 = simplify_gen_subreg (V4SImode, src1, GET_MODE (src1), 0);
++  i = exact_log2 (INTVAL (index));
++  gcc_assert (i != -1);
++
++  emit_insn (gen_lsx_vinsgr2vr_w (new_dest, low, new_src1,
++				  GEN_INT (1 << (i * 2))));
++  emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest,
++				  GEN_INT (1 << (i * 2 + 1))));
++}
++
++/* Split FILL.D.  */
++
++void
++loongarch_split_lsx_fill_d (rtx dest, rtx src)
++{
++  gcc_assert ((GET_MODE (dest) == V2DImode
++	       && (GET_MODE (src) == DImode || src == const0_rtx))
++	      || (GET_MODE (dest) == V2DFmode && GET_MODE (src) == DFmode));
++
++  /* Note that low is always from the lower index, and high is always
++     from the higher index.  */
++  rtx low, high;
++  if (src == const0_rtx)
++    {
++      low = src;
++      high = src;
++    }
++  else
++    {
++      low = loongarch_subword (src, false);
++      high = loongarch_subword (src, true);
++    }
++  rtx new_dest = simplify_gen_subreg (V4SImode, dest, GET_MODE (dest), 0);
++  emit_insn (gen_lsx_vreplgr2vr_w (new_dest, low));
++  emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 1)));
++  emit_insn (gen_lsx_vinsgr2vr_w (new_dest, high, new_dest, GEN_INT (1 << 3)));
++}
++
+ 
+ /* Return the appropriate instructions to move SRC into DEST.  Assume
+    that SRC is operand 1 and DEST is operand 0.  */
+@@ -3892,10 +4349,25 @@ loongarch_output_move (rtx dest, rtx src)
+   enum rtx_code src_code = GET_CODE (src);
+   machine_mode mode = GET_MODE (dest);
+   bool dbl_p = (GET_MODE_SIZE (mode) == 8);
++  bool lsx_p = LSX_SUPPORTED_MODE_P (mode);
+ 
+   if (loongarch_split_move_p (dest, src))
+     return "#";
+ 
++  if ((lsx_p)
++      && dest_code == REG && FP_REG_P (REGNO (dest))
++      && src_code == CONST_VECTOR
++      && CONST_INT_P (CONST_VECTOR_ELT (src, 0)))
++    {
++      gcc_assert (loongarch_const_vector_same_int_p (src, mode, -512, 511));
++      switch (GET_MODE_SIZE (mode))
++	{
++	case 16:
++	  return "vrepli.%v0\t%w0,%E1";
++	default: gcc_unreachable ();
++	}
++    }
++
+   if ((src_code == REG && GP_REG_P (REGNO (src)))
+       || (src == CONST0_RTX (mode)))
+     {
+@@ -3905,7 +4377,21 @@ loongarch_output_move (rtx dest, rtx src)
+ 	    return "or\t%0,%z1,$r0";
+ 
+ 	  if (FP_REG_P (REGNO (dest)))
+-	    return dbl_p ? "movgr2fr.d\t%0,%z1" : "movgr2fr.w\t%0,%z1";
++	    {
++	      if (lsx_p)
++		{
++		  gcc_assert (src == CONST0_RTX (GET_MODE (src)));
++		  switch (GET_MODE_SIZE (mode))
++		    {
++		    case 16:
++		      return "vrepli.b\t%w0,0";
++		    default:
++		      gcc_unreachable ();
++		    }
++		}
++
++	      return dbl_p ? "movgr2fr.d\t%0,%z1" : "movgr2fr.w\t%0,%z1";
++	    }
+ 	}
+       if (dest_code == MEM)
+ 	{
+@@ -3947,7 +4433,10 @@ loongarch_output_move (rtx dest, rtx src)
+     {
+       if (src_code == REG)
+ 	if (FP_REG_P (REGNO (src)))
+-	  return dbl_p ? "movfr2gr.d\t%0,%1" : "movfr2gr.s\t%0,%1";
++	  {
++	    gcc_assert (!lsx_p);
++	    return dbl_p ? "movfr2gr.d\t%0,%1" : "movfr2gr.s\t%0,%1";
++	  }
+ 
+       if (src_code == MEM)
+ 	{
+@@ -3992,7 +4481,7 @@ loongarch_output_move (rtx dest, rtx src)
+ 	  enum loongarch_symbol_type type = SYMBOL_PCREL;
+ 
+ 	  if (UNSPEC_ADDRESS_P (x))
+-	     type = UNSPEC_ADDRESS_TYPE (x);
++	    type = UNSPEC_ADDRESS_TYPE (x);
+ 
+ 	  if (type == SYMBOL_TLS_LE)
+ 	    return "lu12i.w\t%0,%h1";
+@@ -4027,7 +4516,20 @@ loongarch_output_move (rtx dest, rtx src)
+   if (src_code == REG && FP_REG_P (REGNO (src)))
+     {
+       if (dest_code == REG && FP_REG_P (REGNO (dest)))
+-	return dbl_p ? "fmov.d\t%0,%1" : "fmov.s\t%0,%1";
++	{
++	  if (lsx_p)
++	    {
++	      switch (GET_MODE_SIZE (mode))
++		{
++		case 16:
++		  return "vori.b\t%w0,%w1,0";
++		default:
++		  gcc_unreachable ();
++		}
++	    }
++
++	  return dbl_p ? "fmov.d\t%0,%1" : "fmov.s\t%0,%1";
++	}
+ 
+       if (dest_code == MEM)
+ 	{
+@@ -4038,6 +4540,17 @@ loongarch_output_move (rtx dest, rtx src)
+ 	  if (insn)
+ 	    return insn;
+ 
++	  if (lsx_p)
++	    {
++	      switch (GET_MODE_SIZE (mode))
++		{
++		case 16:
++		  return "vst\t%w1,%0";
++		default:
++		  gcc_unreachable ();
++		}
++	    }
++
+ 	  return dbl_p ? "fst.d\t%1,%0" : "fst.s\t%1,%0";
+ 	}
+     }
+@@ -4053,6 +4566,16 @@ loongarch_output_move (rtx dest, rtx src)
+ 	  if (insn)
+ 	    return insn;
+ 
++	  if (lsx_p)
++	    {
++	      switch (GET_MODE_SIZE (mode))
++		{
++		case 16:
++		  return "vld\t%w0,%1";
++		default:
++		  gcc_unreachable ();
++		}
++	    }
+ 	  return dbl_p ? "fld.d\t%0,%1" : "fld.s\t%0,%1";
+ 	}
+     }
+@@ -4252,6 +4775,7 @@ loongarch_extend_comparands (rtx_code code, rtx *op0, rtx *op1)
+     }
+ }
+ 
++
+ /* Convert a comparison into something that can be used in a branch.  On
+    entry, *OP0 and *OP1 are the values being compared and *CODE is the code
+    used to compare them.  Update them to describe the final comparison.  */
+@@ -5056,9 +5580,12 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part,
+ 
+    'A'	Print a _DB suffix if the memory model requires a release.
+    'b'	Print the address of a memory operand, without offset.
++   'B'	Print CONST_INT OP element 0 of a replicated CONST_VECTOR
++	  as an unsigned byte [0..255].
+    'c'  Print an integer.
+    'C'	Print the integer branch condition for comparison OP.
+    'd'	Print CONST_INT OP in decimal.
++   'E'	Print CONST_INT OP element 0 of a replicated CONST_VECTOR in decimal.
+    'F'	Print the FPU branch condition for comparison OP.
+    'G'	Print a DBAR insn if the memory model requires a release.
+    'H'  Print address 52-61bit relocation associated with OP.
+@@ -5074,13 +5601,16 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part,
+    't'	Like 'T', but with the EQ/NE cases reversed
+    'V'	Print exact log2 of CONST_INT OP element 0 of a replicated
+ 	  CONST_VECTOR in decimal.
++   'v'	Print the insn size suffix b, h, w or d for vector modes V16QI, V8HI,
++	  V4SI, V2SI, and w, d for vector modes V4SF, V2DF respectively.
+    'W'	Print the inverse of the FPU branch condition for comparison OP.
++   'w'	Print a LSX register.
+    'X'	Print CONST_INT OP in hexadecimal format.
+    'x'	Print the low 16 bits of CONST_INT OP in hexadecimal format.
+    'Y'	Print loongarch_fp_conditions[INTVAL (OP)]
+    'y'	Print exact log2 of CONST_INT OP in decimal.
+    'Z'	Print OP and a comma for 8CC, otherwise print nothing.
+-   'z'	Print $0 if OP is zero, otherwise print OP normally.  */
++   'z'	Print $r0 if OP is zero, otherwise print OP normally.  */
+ 
+ static void
+ loongarch_print_operand (FILE *file, rtx op, int letter)
+@@ -5102,6 +5632,18 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+       if (loongarch_memmodel_needs_rel_acq_fence ((enum memmodel) INTVAL (op)))
+        fputs ("_db", file);
+       break;
++    case 'E':
++      if (GET_CODE (op) == CONST_VECTOR)
++	{
++	  gcc_assert (loongarch_const_vector_same_val_p (op, GET_MODE (op)));
++	  op = CONST_VECTOR_ELT (op, 0);
++	  gcc_assert (CONST_INT_P (op));
++	  fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
++	}
++      else
++	output_operand_lossage ("invalid use of '%%%c'", letter);
++      break;
++
+ 
+     case 'c':
+       if (CONST_INT_P (op))
+@@ -5152,6 +5694,18 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+       loongarch_print_operand_reloc (file, op, false /* hi64_part*/,
+ 				     false /* lo_reloc */);
+       break;
++    case 'B':
++      if (GET_CODE (op) == CONST_VECTOR)
++	{
++	  gcc_assert (loongarch_const_vector_same_val_p (op, GET_MODE (op)));
++	  op = CONST_VECTOR_ELT (op, 0);
++	  gcc_assert (CONST_INT_P (op));
++	  unsigned HOST_WIDE_INT val8 = UINTVAL (op) & GET_MODE_MASK (QImode);
++	  fprintf (file, HOST_WIDE_INT_PRINT_UNSIGNED, val8);
++	}
++      else
++	output_operand_lossage ("invalid use of '%%%c'", letter);
++      break;
+ 
+     case 'm':
+       if (CONST_INT_P (op))
+@@ -5198,10 +5752,45 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+ 	output_operand_lossage ("invalid use of '%%%c'", letter);
+       break;
+ 
+-    case 'W':
+-      loongarch_print_float_branch_condition (file, reverse_condition (code),
+-					      letter);
+-      break;
++    case 'v':
++      switch (GET_MODE (op))
++	{
++	case E_V16QImode:
++	case E_V32QImode:
++	  fprintf (file, "b");
++	  break;
++	case E_V8HImode:
++	case E_V16HImode:
++	  fprintf (file, "h");
++	  break;
++	case E_V4SImode:
++	case E_V4SFmode:
++	case E_V8SImode:
++	case E_V8SFmode:
++	  fprintf (file, "w");
++	  break;
++	case E_V2DImode:
++	case E_V2DFmode:
++	case E_V4DImode:
++	case E_V4DFmode:
++	  fprintf (file, "d");
++	  break;
++	default:
++	  output_operand_lossage ("invalid use of '%%%c'", letter);
++	}
++      break;
++
++    case 'W':
++      loongarch_print_float_branch_condition (file, reverse_condition (code),
++					      letter);
++      break;
++
++    case 'w':
++      if (code == REG && LSX_REG_P (REGNO (op)))
++	fprintf (file, "$vr%s", ®_names[REGNO (op)][2]);
++      else
++	output_operand_lossage ("invalid use of '%%%c'", letter);
++      break;
+ 
+     case 'x':
+       if (CONST_INT_P (op))
+@@ -5574,9 +6163,13 @@ loongarch_hard_regno_mode_ok_uncached (unsigned int regno, machine_mode mode)
+   size = GET_MODE_SIZE (mode);
+   mclass = GET_MODE_CLASS (mode);
+ 
+-  if (GP_REG_P (regno))
++  if (GP_REG_P (regno) && !LSX_SUPPORTED_MODE_P (mode))
+     return ((regno - GP_REG_FIRST) & 1) == 0 || size <= UNITS_PER_WORD;
+ 
++  /* For LSX, allow TImode and 128-bit vector modes in all FPR.  */
++  if (FP_REG_P (regno) && LSX_SUPPORTED_MODE_P (mode))
++    return true;
++
+   if (FP_REG_P (regno))
+     {
+       if (mclass == MODE_FLOAT
+@@ -5603,6 +6196,17 @@ loongarch_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
+   return loongarch_hard_regno_mode_ok_p[mode][regno];
+ }
+ 
++
++static bool
++loongarch_hard_regno_call_part_clobbered (unsigned int,
++					  unsigned int regno, machine_mode mode)
++{
++  if (ISA_HAS_LSX && FP_REG_P (regno) && GET_MODE_SIZE (mode) > 8)
++    return true;
++
++  return false;
++}
++
+ /* Implement TARGET_HARD_REGNO_NREGS.  */
+ 
+ static unsigned int
+@@ -5614,7 +6218,12 @@ loongarch_hard_regno_nregs (unsigned int regno, machine_mode mode)
+     return (GET_MODE_SIZE (mode) + 3) / 4;
+ 
+   if (FP_REG_P (regno))
+-    return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
++    {
++      if (LSX_SUPPORTED_MODE_P (mode))
++	return 1;
++
++      return (GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG;
++    }
+ 
+   /* All other registers are word-sized.  */
+   return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
+@@ -5641,8 +6250,12 @@ loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode)
+   if (hard_reg_set_intersect_p (left, reg_class_contents[(int) FP_REGS]))
+     {
+       if (loongarch_hard_regno_mode_ok (FP_REG_FIRST, mode))
+-	size = MIN (size, UNITS_PER_FPREG);
+-
++	{
++	  if (LSX_SUPPORTED_MODE_P (mode))
++	    size = MIN (size, UNITS_PER_LSX_REG);
++	  else
++	    size = MIN (size, UNITS_PER_FPREG);
++	}
+       left &= ~reg_class_contents[FP_REGS];
+     }
+   if (!hard_reg_set_empty_p (left))
+@@ -5653,9 +6266,13 @@ loongarch_class_max_nregs (enum reg_class rclass, machine_mode mode)
+ /* Implement TARGET_CAN_CHANGE_MODE_CLASS.  */
+ 
+ static bool
+-loongarch_can_change_mode_class (machine_mode, machine_mode,
++loongarch_can_change_mode_class (machine_mode from, machine_mode to,
+ 				 reg_class_t rclass)
+ {
++  /* Allow conversions between different LSX vector modes.  */
++  if (LSX_SUPPORTED_MODE_P (from) && LSX_SUPPORTED_MODE_P (to))
++    return true;
++
+   return !reg_classes_intersect_p (FP_REGS, rclass);
+ }
+ 
+@@ -5675,7 +6292,7 @@ loongarch_mode_ok_for_mov_fmt_p (machine_mode mode)
+       return TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT;
+ 
+     default:
+-      return 0;
++      return LSX_SUPPORTED_MODE_P (mode);
+     }
+ }
+ 
+@@ -5832,7 +6449,12 @@ loongarch_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x,
+       if (regno < 0
+ 	  || (MEM_P (x)
+ 	      && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)))
+-	/* In this case we can use fld.s, fst.s, fld.d or fst.d.  */
++	/* In this case we can use lwc1, swc1, ldc1 or sdc1.  We'll use
++	   pairs of lwc1s and swc1s if ldc1 and sdc1 are not supported.  */
++	return NO_REGS;
++
++      if (MEM_P (x) && LSX_SUPPORTED_MODE_P (mode))
++	/* In this case we can use LSX LD.* and ST.*.  */
+ 	return NO_REGS;
+ 
+       if (GP_REG_P (regno) || x == CONST0_RTX (mode))
+@@ -5867,6 +6489,14 @@ loongarch_valid_pointer_mode (scalar_int_mode mode)
+   return mode == SImode || (TARGET_64BIT && mode == DImode);
+ }
+ 
++/* Implement TARGET_VECTOR_MODE_SUPPORTED_P.  */
++
++static bool
++loongarch_vector_mode_supported_p (machine_mode mode)
++{
++  return LSX_SUPPORTED_MODE_P (mode);
++}
++
+ /* Implement TARGET_SCALAR_MODE_SUPPORTED_P.  */
+ 
+ static bool
+@@ -5879,6 +6509,48 @@ loongarch_scalar_mode_supported_p (scalar_mode mode)
+   return default_scalar_mode_supported_p (mode);
+ }
+ 
++/* Implement TARGET_VECTORIZE_PREFERRED_SIMD_MODE.  */
++
++static machine_mode
++loongarch_preferred_simd_mode (scalar_mode mode)
++{
++  if (!ISA_HAS_LSX)
++    return word_mode;
++
++  switch (mode)
++    {
++    case E_QImode:
++      return E_V16QImode;
++    case E_HImode:
++      return E_V8HImode;
++    case E_SImode:
++      return E_V4SImode;
++    case E_DImode:
++      return E_V2DImode;
++
++    case E_SFmode:
++      return E_V4SFmode;
++
++    case E_DFmode:
++      return E_V2DFmode;
++
++    default:
++      break;
++    }
++  return word_mode;
++}
++
++static unsigned int
++loongarch_autovectorize_vector_modes (vector_modes *modes, bool)
++{
++  if (ISA_HAS_LSX)
++    {
++      modes->safe_push (V16QImode);
++    }
++
++  return 0;
++}
++
+ /* Return the assembly code for INSN, which has the operands given by
+    OPERANDS, and which branches to OPERANDS[0] if some condition is true.
+    BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[0]
+@@ -6043,6 +6715,29 @@ loongarch_output_division (const char *division, rtx *operands)
+   return s;
+ }
+ 
++/* Return the assembly code for LSX DIV_{S,U}.DF or MOD_{S,U}.DF instructions,
++   which has the operands given by OPERANDS.  Add in a divide-by-zero check
++   if needed.  */
++
++const char *
++loongarch_lsx_output_division (const char *division, rtx *operands)
++{
++  const char *s;
++
++  s = division;
++  if (TARGET_CHECK_ZERO_DIV)
++    {
++      if (ISA_HAS_LSX)
++	{
++	  output_asm_insn ("vsetallnez.%v0\t$fcc7,%w2",operands);
++	  output_asm_insn (s, operands);
++	  output_asm_insn ("bcnez\t$fcc7,1f", operands);
++	}
++      s = "break\t7\n1:";
++    }
++  return s;
++}
++
+ /* Implement TARGET_SCHED_ADJUST_COST.  We assume that anti and output
+    dependencies have no cost.  */
+ 
+@@ -6323,6 +7018,9 @@ loongarch_option_override_internal (struct gcc_options *opts,
+   if (TARGET_DIRECT_EXTERN_ACCESS && flag_shlib)
+     error ("%qs cannot be used for compiling a shared library",
+ 	   "-mdirect-extern-access");
++  if (loongarch_vector_access_cost == 0)
++    loongarch_vector_access_cost = 5;
++
+ 
+   switch (la_target.cmodel)
+     {
+@@ -6541,64 +7239,60 @@ loongarch_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
+   emit_insn (gen_clear_cache (addr, end_addr));
+ }
+ 
+-/* Implement HARD_REGNO_CALLER_SAVE_MODE.  */
+-
+-machine_mode
+-loongarch_hard_regno_caller_save_mode (unsigned int regno, unsigned int nregs,
+-				       machine_mode mode)
+-{
+-  /* For performance, avoid saving/restoring upper parts of a register
+-     by returning MODE as save mode when the mode is known.  */
+-  if (mode == VOIDmode)
+-    return choose_hard_reg_mode (regno, nregs, NULL);
+-  else
+-    return mode;
+-}
++/* Generate or test for an insn that supports a constant permutation.  */
+ 
+-/* Implement TARGET_SPILL_CLASS.  */
++#define MAX_VECT_LEN 32
+ 
+-static reg_class_t
+-loongarch_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED,
+-		       machine_mode mode ATTRIBUTE_UNUSED)
++struct expand_vec_perm_d
+ {
+-  return NO_REGS;
+-}
+-
+-/* Implement TARGET_PROMOTE_FUNCTION_MODE.  */
++  rtx target, op0, op1;
++  unsigned char perm[MAX_VECT_LEN];
++  machine_mode vmode;
++  unsigned char nelt;
++  bool one_vector_p;
++  bool testing_p;
++};
+ 
+-/* This function is equivalent to default_promote_function_mode_always_promote
+-   except that it returns a promoted mode even if type is NULL_TREE.  This is
+-   needed by libcalls which have no type (only a mode) such as fixed conversion
+-   routines that take a signed or unsigned char/short argument and convert it
+-   to a fixed type.  */
++/* Construct (set target (vec_select op0 (parallel perm))) and
++   return true if that's a valid instruction in the active ISA.  */
+ 
+-static machine_mode
+-loongarch_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
+-				 machine_mode mode,
+-				 int *punsignedp ATTRIBUTE_UNUSED,
+-				 const_tree fntype ATTRIBUTE_UNUSED,
+-				 int for_return ATTRIBUTE_UNUSED)
++static bool
++loongarch_expand_vselect (rtx target, rtx op0,
++			  const unsigned char *perm, unsigned nelt)
+ {
+-  int unsignedp;
++  rtx rperm[MAX_VECT_LEN], x;
++  rtx_insn *insn;
++  unsigned i;
+ 
+-  if (type != NULL_TREE)
+-    return promote_mode (type, mode, punsignedp);
++  for (i = 0; i < nelt; ++i)
++    rperm[i] = GEN_INT (perm[i]);
+ 
+-  unsignedp = *punsignedp;
+-  PROMOTE_MODE (mode, unsignedp, type);
+-  *punsignedp = unsignedp;
+-  return mode;
++  x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nelt, rperm));
++  x = gen_rtx_VEC_SELECT (GET_MODE (target), op0, x);
++  x = gen_rtx_SET (target, x);
++
++  insn = emit_insn (x);
++  if (recog_memoized (insn) < 0)
++    {
++      remove_insn (insn);
++      return false;
++    }
++  return true;
+ }
+ 
+-/* Implement TARGET_STARTING_FRAME_OFFSET.  See loongarch_compute_frame_info
+-   for details about the frame layout.  */
++/* Similar, but generate a vec_concat from op0 and op1 as well.  */
+ 
+-static HOST_WIDE_INT
+-loongarch_starting_frame_offset (void)
++static bool
++loongarch_expand_vselect_vconcat (rtx target, rtx op0, rtx op1,
++				  const unsigned char *perm, unsigned nelt)
+ {
+-  if (FRAME_GROWS_DOWNWARD)
+-    return 0;
+-  return crtl->outgoing_args_size;
++  machine_mode v2mode;
++  rtx x;
++
++  if (!GET_MODE_2XWIDER_MODE (GET_MODE (op0)).exists (&v2mode))
++    return false;
++  x = gen_rtx_VEC_CONCAT (v2mode, op0, op1);
++  return loongarch_expand_vselect (target, x, perm, nelt);
+ }
+ 
+ static tree
+@@ -6861,105 +7555,1291 @@ loongarch_set_handled_components (sbitmap components)
+ #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
+ #undef TARGET_ASM_ALIGNED_DI_OP
+ #define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
++/* Construct (set target (vec_select op0 (parallel selector))) and
++   return true if that's a valid instruction in the active ISA.  */
+ 
+-#undef TARGET_OPTION_OVERRIDE
+-#define TARGET_OPTION_OVERRIDE loongarch_option_override
+-
+-#undef TARGET_LEGITIMIZE_ADDRESS
+-#define TARGET_LEGITIMIZE_ADDRESS loongarch_legitimize_address
+-
+-#undef TARGET_ASM_SELECT_RTX_SECTION
+-#define TARGET_ASM_SELECT_RTX_SECTION loongarch_select_rtx_section
+-#undef TARGET_ASM_FUNCTION_RODATA_SECTION
+-#define TARGET_ASM_FUNCTION_RODATA_SECTION loongarch_function_rodata_section
++static bool
++loongarch_expand_lsx_shuffle (struct expand_vec_perm_d *d)
++{
++  rtx x, elts[MAX_VECT_LEN];
++  rtvec v;
++  rtx_insn *insn;
++  unsigned i;
+ 
+-#undef TARGET_SCHED_INIT
+-#define TARGET_SCHED_INIT loongarch_sched_init
+-#undef TARGET_SCHED_REORDER
+-#define TARGET_SCHED_REORDER loongarch_sched_reorder
+-#undef TARGET_SCHED_REORDER2
+-#define TARGET_SCHED_REORDER2 loongarch_sched_reorder2
+-#undef TARGET_SCHED_VARIABLE_ISSUE
+-#define TARGET_SCHED_VARIABLE_ISSUE loongarch_variable_issue
+-#undef TARGET_SCHED_ADJUST_COST
+-#define TARGET_SCHED_ADJUST_COST loongarch_adjust_cost
+-#undef TARGET_SCHED_ISSUE_RATE
+-#define TARGET_SCHED_ISSUE_RATE loongarch_issue_rate
+-#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
+-#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
+-  loongarch_multipass_dfa_lookahead
++  if (!ISA_HAS_LSX)
++    return false;
+ 
+-#undef TARGET_FUNCTION_OK_FOR_SIBCALL
+-#define TARGET_FUNCTION_OK_FOR_SIBCALL loongarch_function_ok_for_sibcall
++  for (i = 0; i < d->nelt; i++)
++    elts[i] = GEN_INT (d->perm[i]);
+ 
+-#undef TARGET_VALID_POINTER_MODE
+-#define TARGET_VALID_POINTER_MODE loongarch_valid_pointer_mode
+-#undef TARGET_REGISTER_MOVE_COST
+-#define TARGET_REGISTER_MOVE_COST loongarch_register_move_cost
+-#undef TARGET_MEMORY_MOVE_COST
+-#define TARGET_MEMORY_MOVE_COST loongarch_memory_move_cost
+-#undef TARGET_RTX_COSTS
+-#define TARGET_RTX_COSTS loongarch_rtx_costs
+-#undef TARGET_ADDRESS_COST
+-#define TARGET_ADDRESS_COST loongarch_address_cost
++  v = gen_rtvec_v (d->nelt, elts);
++  x = gen_rtx_PARALLEL (VOIDmode, v);
+ 
+-#undef TARGET_IN_SMALL_DATA_P
+-#define TARGET_IN_SMALL_DATA_P loongarch_in_small_data_p
++  if (!loongarch_const_vector_shuffle_set_p (x, d->vmode))
++    return false;
+ 
+-#undef TARGET_PREFERRED_RELOAD_CLASS
+-#define TARGET_PREFERRED_RELOAD_CLASS loongarch_preferred_reload_class
++  x = gen_rtx_VEC_SELECT (d->vmode, d->op0, x);
++  x = gen_rtx_SET (d->target, x);
+ 
+-#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
+-#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
++  insn = emit_insn (x);
++  if (recog_memoized (insn) < 0)
++    {
++      remove_insn (insn);
++      return false;
++    }
++  return true;
++}
+ 
+-#undef TARGET_EXPAND_BUILTIN_VA_START
+-#define TARGET_EXPAND_BUILTIN_VA_START loongarch_va_start
++void
++loongarch_expand_vec_perm (rtx target, rtx op0, rtx op1, rtx sel)
++{
++  machine_mode vmode = GET_MODE (target);
+ 
+-#undef TARGET_PROMOTE_FUNCTION_MODE
+-#define TARGET_PROMOTE_FUNCTION_MODE loongarch_promote_function_mode
+-#undef TARGET_RETURN_IN_MEMORY
+-#define TARGET_RETURN_IN_MEMORY loongarch_return_in_memory
++  switch (vmode)
++    {
++    case E_V16QImode:
++      emit_insn (gen_lsx_vshuf_b (target, op1, op0, sel));
++      break;
++    case E_V2DFmode:
++      emit_insn (gen_lsx_vshuf_d_f (target, sel, op1, op0));
++      break;
++    case E_V2DImode:
++      emit_insn (gen_lsx_vshuf_d (target, sel, op1, op0));
++      break;
++    case E_V4SFmode:
++      emit_insn (gen_lsx_vshuf_w_f (target, sel, op1, op0));
++      break;
++    case E_V4SImode:
++      emit_insn (gen_lsx_vshuf_w (target, sel, op1, op0));
++      break;
++    case E_V8HImode:
++      emit_insn (gen_lsx_vshuf_h (target, sel, op1, op0));
++      break;
++    default:
++      break;
++    }
++}
+ 
+-#undef TARGET_FUNCTION_VALUE
+-#define TARGET_FUNCTION_VALUE loongarch_function_value
+-#undef TARGET_LIBCALL_VALUE
+-#define TARGET_LIBCALL_VALUE loongarch_libcall_value
++static bool
++loongarch_try_expand_lsx_vshuf_const (struct expand_vec_perm_d *d)
++{
++  int i;
++  rtx target, op0, op1, sel, tmp;
++  rtx rperm[MAX_VECT_LEN];
+ 
+-#undef TARGET_ASM_OUTPUT_MI_THUNK
+-#define TARGET_ASM_OUTPUT_MI_THUNK loongarch_output_mi_thunk
+-#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
+-#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
+-  hook_bool_const_tree_hwi_hwi_const_tree_true
++  if (d->vmode == E_V2DImode || d->vmode == E_V2DFmode
++	|| d->vmode == E_V4SImode || d->vmode == E_V4SFmode
++	|| d->vmode == E_V8HImode || d->vmode == E_V16QImode)
++    {
++      target = d->target;
++      op0 = d->op0;
++      op1 = d->one_vector_p ? d->op0 : d->op1;
+ 
+-#undef TARGET_PRINT_OPERAND
+-#define TARGET_PRINT_OPERAND loongarch_print_operand
+-#undef TARGET_PRINT_OPERAND_ADDRESS
+-#define TARGET_PRINT_OPERAND_ADDRESS loongarch_print_operand_address
+-#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
+-#define TARGET_PRINT_OPERAND_PUNCT_VALID_P \
+-  loongarch_print_operand_punct_valid_p
++      if (GET_MODE (op0) != GET_MODE (op1)
++	  || GET_MODE (op0) != GET_MODE (target))
++	return false;
+ 
+-#undef TARGET_SETUP_INCOMING_VARARGS
+-#define TARGET_SETUP_INCOMING_VARARGS loongarch_setup_incoming_varargs
+-#undef TARGET_STRICT_ARGUMENT_NAMING
+-#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
+-#undef TARGET_MUST_PASS_IN_STACK
+-#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
+-#undef TARGET_PASS_BY_REFERENCE
+-#define TARGET_PASS_BY_REFERENCE loongarch_pass_by_reference
+-#undef TARGET_ARG_PARTIAL_BYTES
+-#define TARGET_ARG_PARTIAL_BYTES loongarch_arg_partial_bytes
+-#undef TARGET_FUNCTION_ARG
+-#define TARGET_FUNCTION_ARG loongarch_function_arg
+-#undef TARGET_FUNCTION_ARG_ADVANCE
+-#define TARGET_FUNCTION_ARG_ADVANCE loongarch_function_arg_advance
+-#undef TARGET_FUNCTION_ARG_BOUNDARY
+-#define TARGET_FUNCTION_ARG_BOUNDARY loongarch_function_arg_boundary
++      if (d->testing_p)
++	return true;
+ 
+-#undef TARGET_SCALAR_MODE_SUPPORTED_P
+-#define TARGET_SCALAR_MODE_SUPPORTED_P loongarch_scalar_mode_supported_p
++      for (i = 0; i < d->nelt; i += 1)
++	{
++	  rperm[i] = GEN_INT (d->perm[i]);
++	}
+ 
+-#undef TARGET_INIT_BUILTINS
++      if (d->vmode == E_V2DFmode)
++	{
++	  sel = gen_rtx_CONST_VECTOR (E_V2DImode, gen_rtvec_v (d->nelt, rperm));
++	  tmp = gen_rtx_SUBREG (E_V2DImode, d->target, 0);
++	  emit_move_insn (tmp, sel);
++	}
++      else if (d->vmode == E_V4SFmode)
++	{
++	  sel = gen_rtx_CONST_VECTOR (E_V4SImode, gen_rtvec_v (d->nelt, rperm));
++	  tmp = gen_rtx_SUBREG (E_V4SImode, d->target, 0);
++	  emit_move_insn (tmp, sel);
++	}
++      else
++	{
++	  sel = gen_rtx_CONST_VECTOR (d->vmode, gen_rtvec_v (d->nelt, rperm));
++	  emit_move_insn (d->target, sel);
++	}
++
++      switch (d->vmode)
++	{
++	case E_V2DFmode:
++	  emit_insn (gen_lsx_vshuf_d_f (target, target, op1, op0));
++	  break;
++	case E_V2DImode:
++	  emit_insn (gen_lsx_vshuf_d (target, target, op1, op0));
++	  break;
++	case E_V4SFmode:
++	  emit_insn (gen_lsx_vshuf_w_f (target, target, op1, op0));
++	  break;
++	case E_V4SImode:
++	  emit_insn (gen_lsx_vshuf_w (target, target, op1, op0));
++	  break;
++	case E_V8HImode:
++	  emit_insn (gen_lsx_vshuf_h (target, target, op1, op0));
++	  break;
++	case E_V16QImode:
++	  emit_insn (gen_lsx_vshuf_b (target, op1, op0, target));
++	  break;
++	default:
++	  break;
++	}
++
++      return true;
++    }
++  return false;
++}
++
++static bool
++loongarch_expand_vec_perm_const_1 (struct expand_vec_perm_d *d)
++{
++  unsigned int i, nelt = d->nelt;
++  unsigned char perm2[MAX_VECT_LEN];
++
++  if (d->one_vector_p)
++    {
++      /* Try interleave with alternating operands.  */
++      memcpy (perm2, d->perm, sizeof (perm2));
++      for (i = 1; i < nelt; i += 2)
++	perm2[i] += nelt;
++      if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1, perm2,
++					    nelt))
++	return true;
++    }
++  else
++    {
++      if (loongarch_expand_vselect_vconcat (d->target, d->op0, d->op1,
++					    d->perm, nelt))
++	return true;
++
++      /* Try again with swapped operands.  */
++      for (i = 0; i < nelt; ++i)
++	perm2[i] = (d->perm[i] + nelt) & (2 * nelt - 1);
++      if (loongarch_expand_vselect_vconcat (d->target, d->op1, d->op0, perm2,
++					    nelt))
++	return true;
++    }
++
++  if (loongarch_expand_lsx_shuffle (d))
++    return true;
++  return false;
++}
++
++/* Implementation of constant vector permuatation.  This function identifies
++ * recognized pattern of permuation selector argument, and use one or more
++ * instruction(s) to finish the permutation job correctly.  For unsupported
++ * patterns, it will return false.  */
++
++static bool
++loongarch_expand_vec_perm_const_2 (struct expand_vec_perm_d *d)
++{
++  /* Although we have the LSX vec_perm template, there's still some
++     128bit vector permuatation operations send to vectorize_vec_perm_const.
++     In this case, we just simpliy wrap them by single vshuf.* instruction,
++     because LSX vshuf.* instruction just have the same behavior that GCC
++     expects.  */
++  return loongarch_try_expand_lsx_vshuf_const (d);
++}
++
++/* Implement TARGET_VECTORIZE_VEC_PERM_CONST.  */
++
++static bool
++loongarch_vectorize_vec_perm_const (machine_mode vmode, machine_mode op_mode,
++				    rtx target, rtx op0, rtx op1,
++				    const vec_perm_indices &sel)
++{
++  if (vmode != op_mode)
++    return false;
++
++  struct expand_vec_perm_d d;
++  int i, nelt, which;
++  unsigned char orig_perm[MAX_VECT_LEN];
++  bool ok;
++
++  d.target = target;
++  if (op0)
++    {
++      rtx nop0 = force_reg (vmode, op0);
++      if (op0 == op1)
++	op1 = nop0;
++      op0 = nop0;
++    }
++  if (op1)
++    op1 = force_reg (vmode, op1);
++  d.op0 = op0;
++  d.op1 = op1;
++
++  d.vmode = vmode;
++  gcc_assert (VECTOR_MODE_P (vmode));
++  d.nelt = nelt = GET_MODE_NUNITS (vmode);
++  d.testing_p = !target;
++
++  /* This is overly conservative, but ensures we don't get an
++     uninitialized warning on ORIG_PERM.  */
++  memset (orig_perm, 0, MAX_VECT_LEN);
++  for (i = which = 0; i < nelt; ++i)
++    {
++      int ei = sel[i] & (2 * nelt - 1);
++      which |= (ei < nelt ? 1 : 2);
++      orig_perm[i] = ei;
++    }
++  memcpy (d.perm, orig_perm, MAX_VECT_LEN);
++
++  switch (which)
++    {
++    default:
++      gcc_unreachable ();
++
++    case 3:
++      d.one_vector_p = false;
++      if (d.testing_p || !rtx_equal_p (d.op0, d.op1))
++	break;
++      /* FALLTHRU */
++
++    case 2:
++      for (i = 0; i < nelt; ++i)
++	d.perm[i] &= nelt - 1;
++      d.op0 = d.op1;
++      d.one_vector_p = true;
++      break;
++
++    case 1:
++      d.op1 = d.op0;
++      d.one_vector_p = true;
++      break;
++    }
++
++  if (d.testing_p)
++    {
++      d.target = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 1);
++      d.op1 = d.op0 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 2);
++      if (!d.one_vector_p)
++	d.op1 = gen_raw_REG (d.vmode, LAST_VIRTUAL_REGISTER + 3);
++
++      ok = loongarch_expand_vec_perm_const_2 (&d);
++      if (ok)
++	return ok;
++
++      start_sequence ();
++      ok = loongarch_expand_vec_perm_const_1 (&d);
++      end_sequence ();
++      return ok;
++    }
++
++  ok = loongarch_expand_vec_perm_const_2 (&d);
++  if (!ok)
++    ok = loongarch_expand_vec_perm_const_1 (&d);
++
++  /* If we were given a two-vector permutation which just happened to
++     have both input vectors equal, we folded this into a one-vector
++     permutation.  There are several loongson patterns that are matched
++     via direct vec_select+vec_concat expansion, but we do not have
++     support in loongarch_expand_vec_perm_const_1 to guess the adjustment
++     that should be made for a single operand.  Just try again with
++     the original permutation.  */
++  if (!ok && which == 3)
++    {
++      d.op0 = op0;
++      d.op1 = op1;
++      d.one_vector_p = false;
++      memcpy (d.perm, orig_perm, MAX_VECT_LEN);
++      ok = loongarch_expand_vec_perm_const_1 (&d);
++    }
++
++  return ok;
++}
++
++static int
++loongarch_cpu_sched_reassociation_width (struct loongarch_target *target,
++					 unsigned int opc, machine_mode mode)
++{
++  /* unreferenced argument */
++  (void) opc;
++
++  switch (target->cpu_tune)
++    {
++    case CPU_LOONGARCH64:
++    case CPU_LA464:
++      /* Vector part.  */
++      if (LSX_SUPPORTED_MODE_P (mode))
++	{
++	  /* Integer vector instructions execute in FP unit.
++	     The width of integer/float-point vector instructions is 3.  */
++	  return 3;
++	}
++
++      /* Scalar part.  */
++      else if (INTEGRAL_MODE_P (mode))
++	return 1;
++      else if (FLOAT_MODE_P (mode))
++	{
++	  if (opc == PLUS_EXPR)
++	    {
++	      return 2;
++	    }
++	  return 4;
++	}
++      break;
++    default:
++      break;
++    }
++
++  /* default is 1 */
++  return 1;
++}
++
++/* Implement TARGET_SCHED_REASSOCIATION_WIDTH.  */
++
++static int
++loongarch_sched_reassociation_width (unsigned int opc, machine_mode mode)
++{
++  return loongarch_cpu_sched_reassociation_width (&la_target, opc, mode);
++}
++
++/* Implement extract a scalar element from vecotr register */
++
++void
++loongarch_expand_vector_extract (rtx target, rtx vec, int elt)
++{
++  machine_mode mode = GET_MODE (vec);
++  machine_mode inner_mode = GET_MODE_INNER (mode);
++  rtx tmp;
++
++  switch (mode)
++    {
++    case E_V8HImode:
++    case E_V16QImode:
++      break;
++
++    default:
++      break;
++    }
++
++  tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (elt)));
++  tmp = gen_rtx_VEC_SELECT (inner_mode, vec, tmp);
++
++  /* Let the rtl optimizers know about the zero extension performed.  */
++  if (inner_mode == QImode || inner_mode == HImode)
++    {
++      tmp = gen_rtx_ZERO_EXTEND (SImode, tmp);
++      target = gen_lowpart (SImode, target);
++    }
++  if (inner_mode == SImode || inner_mode == DImode)
++    {
++      tmp = gen_rtx_SIGN_EXTEND (inner_mode, tmp);
++    }
++
++  emit_insn (gen_rtx_SET (target, tmp));
++}
++
++/* Generate code to copy vector bits i / 2 ... i - 1 from vector SRC
++   to bits 0 ... i / 2 - 1 of vector DEST, which has the same mode.
++   The upper bits of DEST are undefined, though they shouldn't cause
++   exceptions (some bits from src or all zeros are ok).  */
++
++static void
++emit_reduc_half (rtx dest, rtx src, int i)
++{
++  rtx tem, d = dest;
++  switch (GET_MODE (src))
++    {
++    case E_V4SFmode:
++      tem = gen_lsx_vbsrl_w_f (dest, src, GEN_INT (i == 128 ? 8 : 4));
++      break;
++    case E_V2DFmode:
++      tem = gen_lsx_vbsrl_d_f (dest, src, GEN_INT (8));
++      break;
++    case E_V16QImode:
++    case E_V8HImode:
++    case E_V4SImode:
++    case E_V2DImode:
++      d = gen_reg_rtx (V2DImode);
++      tem = gen_lsx_vbsrl_d (d, gen_lowpart (V2DImode, src), GEN_INT (i/16));
++      break;
++    default:
++      gcc_unreachable ();
++    }
++  emit_insn (tem);
++  if (d != dest)
++    emit_move_insn (dest, gen_lowpart (GET_MODE (dest), d));
++}
++
++/* Expand a vector reduction.  FN is the binary pattern to reduce;
++   DEST is the destination; IN is the input vector.  */
++
++void
++loongarch_expand_vector_reduc (rtx (*fn) (rtx, rtx, rtx), rtx dest, rtx in)
++{
++  rtx half, dst, vec = in;
++  machine_mode mode = GET_MODE (in);
++  int i;
++
++  for (i = GET_MODE_BITSIZE (mode);
++       i > GET_MODE_UNIT_BITSIZE (mode);
++       i >>= 1)
++    {
++      half = gen_reg_rtx (mode);
++      emit_reduc_half (half, vec, i);
++      if (i == GET_MODE_UNIT_BITSIZE (mode) * 2)
++	dst = dest;
++      else
++	dst = gen_reg_rtx (mode);
++      emit_insn (fn (dst, half, vec));
++      vec = dst;
++    }
++}
++
++/* Expand an integral vector unpack operation.  */
++
++void
++loongarch_expand_vec_unpack (rtx operands[2], bool unsigned_p, bool high_p)
++{
++  machine_mode imode = GET_MODE (operands[1]);
++  rtx (*unpack) (rtx, rtx, rtx);
++  rtx (*cmpFunc) (rtx, rtx, rtx);
++  rtx tmp, dest;
++
++  if (ISA_HAS_LSX)
++    {
++      switch (imode)
++	{
++	case E_V4SImode:
++	  if (high_p != 0)
++	    unpack = gen_lsx_vilvh_w;
++	  else
++	    unpack = gen_lsx_vilvl_w;
++
++	  cmpFunc = gen_lsx_vslt_w;
++	  break;
++
++	case E_V8HImode:
++	  if (high_p != 0)
++	    unpack = gen_lsx_vilvh_h;
++	  else
++	    unpack = gen_lsx_vilvl_h;
++
++	  cmpFunc = gen_lsx_vslt_h;
++	  break;
++
++	case E_V16QImode:
++	  if (high_p != 0)
++	    unpack = gen_lsx_vilvh_b;
++	  else
++	    unpack = gen_lsx_vilvl_b;
++
++	  cmpFunc = gen_lsx_vslt_b;
++	  break;
++
++	default:
++	  gcc_unreachable ();
++	  break;
++	}
++
++      if (!unsigned_p)
++	{
++	  /* Extract sign extention for each element comparing each element
++	     with immediate zero.  */
++	  tmp = gen_reg_rtx (imode);
++	  emit_insn (cmpFunc (tmp, operands[1], CONST0_RTX (imode)));
++	}
++      else
++	tmp = force_reg (imode, CONST0_RTX (imode));
++
++      dest = gen_reg_rtx (imode);
++
++      emit_insn (unpack (dest, operands[1], tmp));
++      emit_move_insn (operands[0], gen_lowpart (GET_MODE (operands[0]), dest));
++      return;
++    }
++  gcc_unreachable ();
++}
++
++/* Construct and return PARALLEL RTX with CONST_INTs for HIGH (high_p == TRUE)
++   or LOW (high_p == FALSE) half of a vector for mode MODE.  */
++
++rtx
++loongarch_lsx_vec_parallel_const_half (machine_mode mode, bool high_p)
++{
++  int nunits = GET_MODE_NUNITS (mode);
++  rtvec v = rtvec_alloc (nunits / 2);
++  int base;
++  int i;
++
++  base = high_p ? nunits / 2 : 0;
++
++  for (i = 0; i < nunits / 2; i++)
++    RTVEC_ELT (v, i) = GEN_INT (base + i);
++
++  return gen_rtx_PARALLEL (VOIDmode, v);
++}
++
++/* A subroutine of loongarch_expand_vec_init, match constant vector
++   elements.  */
++
++static inline bool
++loongarch_constant_elt_p (rtx x)
++{
++  return CONST_INT_P (x) || GET_CODE (x) == CONST_DOUBLE;
++}
++
++rtx
++loongarch_gen_const_int_vector_shuffle (machine_mode mode, int val)
++{
++  int nunits = GET_MODE_NUNITS (mode);
++  int nsets = nunits / 4;
++  rtx elts[MAX_VECT_LEN];
++  int set = 0;
++  int i, j;
++
++  /* Generate a const_int vector replicating the same 4-element set
++     from an immediate.  */
++  for (j = 0; j < nsets; j++, set = 4 * j)
++    for (i = 0; i < 4; i++)
++      elts[set + i] = GEN_INT (set + ((val >> (2 * i)) & 0x3));
++
++  return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (nunits, elts));
++}
++
++/* Expand a vector initialization.  */
++
++void
++loongarch_expand_vector_init (rtx target, rtx vals)
++{
++  machine_mode vmode = GET_MODE (target);
++  machine_mode imode = GET_MODE_INNER (vmode);
++  unsigned i, nelt = GET_MODE_NUNITS (vmode);
++  unsigned nvar = 0;
++  bool all_same = true;
++  rtx x;
++
++  for (i = 0; i < nelt; ++i)
++    {
++      x = XVECEXP (vals, 0, i);
++      if (!loongarch_constant_elt_p (x))
++	nvar++;
++      if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
++	all_same = false;
++    }
++
++  if (ISA_HAS_LSX)
++    {
++      if (all_same)
++	{
++	  rtx same = XVECEXP (vals, 0, 0);
++	  rtx temp, temp2;
++
++	  if (CONST_INT_P (same) && nvar == 0
++	      && loongarch_signed_immediate_p (INTVAL (same), 10, 0))
++	    {
++	      switch (vmode)
++		{
++		case E_V16QImode:
++		case E_V8HImode:
++		case E_V4SImode:
++		case E_V2DImode:
++		  temp = gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0));
++		  emit_move_insn (target, temp);
++		  return;
++
++		default:
++		  gcc_unreachable ();
++		}
++	    }
++	  temp = gen_reg_rtx (imode);
++	  if (imode == GET_MODE (same))
++	    temp2 = same;
++	  else if (GET_MODE_SIZE (imode) >= UNITS_PER_WORD)
++	    {
++	      if (GET_CODE (same) == MEM)
++		{
++		  rtx reg_tmp = gen_reg_rtx (GET_MODE (same));
++		  loongarch_emit_move (reg_tmp, same);
++		  temp2 = simplify_gen_subreg (imode, reg_tmp,
++					       GET_MODE (reg_tmp), 0);
++		}
++	      else
++		temp2 = simplify_gen_subreg (imode, same, GET_MODE (same), 0);
++	    }
++	  else
++	    {
++	      if (GET_CODE (same) == MEM)
++		{
++		  rtx reg_tmp = gen_reg_rtx (GET_MODE (same));
++		  loongarch_emit_move (reg_tmp, same);
++		  temp2 = lowpart_subreg (imode, reg_tmp, GET_MODE (reg_tmp));
++		}
++	      else
++		temp2 = lowpart_subreg (imode, same, GET_MODE (same));
++	    }
++	  emit_move_insn (temp, temp2);
++
++	  switch (vmode)
++	    {
++	    case E_V16QImode:
++	    case E_V8HImode:
++	    case E_V4SImode:
++	    case E_V2DImode:
++	      loongarch_emit_move (target, gen_rtx_VEC_DUPLICATE (vmode, temp));
++	      break;
++
++	    case E_V4SFmode:
++	      emit_insn (gen_lsx_vreplvei_w_f_scalar (target, temp));
++	      break;
++
++	    case E_V2DFmode:
++	      emit_insn (gen_lsx_vreplvei_d_f_scalar (target, temp));
++	      break;
++
++	    default:
++	      gcc_unreachable ();
++	    }
++	}
++      else
++	{
++	  emit_move_insn (target, CONST0_RTX (vmode));
++
++	  for (i = 0; i < nelt; ++i)
++	    {
++	      rtx temp = gen_reg_rtx (imode);
++	      emit_move_insn (temp, XVECEXP (vals, 0, i));
++	      switch (vmode)
++		{
++		case E_V16QImode:
++		  if (i == 0)
++		    emit_insn (gen_lsx_vreplvei_b_scalar (target, temp));
++		  else
++		    emit_insn (gen_vec_setv16qi (target, temp, GEN_INT (i)));
++		  break;
++
++		case E_V8HImode:
++		  if (i == 0)
++		    emit_insn (gen_lsx_vreplvei_h_scalar (target, temp));
++		  else
++		    emit_insn (gen_vec_setv8hi (target, temp, GEN_INT (i)));
++		  break;
++
++		case E_V4SImode:
++		  if (i == 0)
++		    emit_insn (gen_lsx_vreplvei_w_scalar (target, temp));
++		  else
++		    emit_insn (gen_vec_setv4si (target, temp, GEN_INT (i)));
++		  break;
++
++		case E_V2DImode:
++		  if (i == 0)
++		    emit_insn (gen_lsx_vreplvei_d_scalar (target, temp));
++		  else
++		    emit_insn (gen_vec_setv2di (target, temp, GEN_INT (i)));
++		  break;
++
++		case E_V4SFmode:
++		  if (i == 0)
++		    emit_insn (gen_lsx_vreplvei_w_f_scalar (target, temp));
++		  else
++		    emit_insn (gen_vec_setv4sf (target, temp, GEN_INT (i)));
++		  break;
++
++		case E_V2DFmode:
++		  if (i == 0)
++		    emit_insn (gen_lsx_vreplvei_d_f_scalar (target, temp));
++		  else
++		    emit_insn (gen_vec_setv2df (target, temp, GEN_INT (i)));
++		  break;
++
++		default:
++		  gcc_unreachable ();
++		}
++	    }
++	}
++      return;
++    }
++
++  /* Load constants from the pool, or whatever's handy.  */
++  if (nvar == 0)
++    {
++      emit_move_insn (target, gen_rtx_CONST_VECTOR (vmode, XVEC (vals, 0)));
++      return;
++    }
++
++  /* For two-part initialization, always use CONCAT.  */
++  if (nelt == 2)
++    {
++      rtx op0 = force_reg (imode, XVECEXP (vals, 0, 0));
++      rtx op1 = force_reg (imode, XVECEXP (vals, 0, 1));
++      x = gen_rtx_VEC_CONCAT (vmode, op0, op1);
++      emit_insn (gen_rtx_SET (target, x));
++      return;
++    }
++
++  /* Loongson is the only cpu with vectors with more elements.  */
++  gcc_assert (0);
++}
++
++/* Implement HARD_REGNO_CALLER_SAVE_MODE.  */
++
++machine_mode
++loongarch_hard_regno_caller_save_mode (unsigned int regno, unsigned int nregs,
++				       machine_mode mode)
++{
++  /* For performance, avoid saving/restoring upper parts of a register
++     by returning MODE as save mode when the mode is known.  */
++  if (mode == VOIDmode)
++    return choose_hard_reg_mode (regno, nregs, NULL);
++  else
++    return mode;
++}
++
++/* Generate RTL for comparing CMP_OP0 and CMP_OP1 using condition COND and
++   store the result -1 or 0 in DEST.  */
++
++static void
++loongarch_expand_lsx_cmp (rtx dest, enum rtx_code cond, rtx op0, rtx op1)
++{
++  machine_mode cmp_mode = GET_MODE (op0);
++  int unspec = -1;
++  bool negate = false;
++
++  switch (cmp_mode)
++    {
++    case E_V16QImode:
++    case E_V32QImode:
++    case E_V8HImode:
++    case E_V16HImode:
++    case E_V4SImode:
++    case E_V8SImode:
++    case E_V2DImode:
++    case E_V4DImode:
++      switch (cond)
++	{
++	case NE:
++	  cond = reverse_condition (cond);
++	  negate = true;
++	  break;
++	case EQ:
++	case LT:
++	case LE:
++	case LTU:
++	case LEU:
++	  break;
++	case GE:
++	case GT:
++	case GEU:
++	case GTU:
++	  std::swap (op0, op1);
++	  cond = swap_condition (cond);
++	  break;
++	default:
++	  gcc_unreachable ();
++	}
++      loongarch_emit_binary (cond, dest, op0, op1);
++      if (negate)
++	emit_move_insn (dest, gen_rtx_NOT (GET_MODE (dest), dest));
++      break;
++
++    case E_V4SFmode:
++    case E_V2DFmode:
++      switch (cond)
++	{
++	case UNORDERED:
++	case ORDERED:
++	case EQ:
++	case NE:
++	case UNEQ:
++	case UNLE:
++	case UNLT:
++	  break;
++	case LTGT: cond = NE; break;
++	case UNGE: cond = UNLE; std::swap (op0, op1); break;
++	case UNGT: cond = UNLT; std::swap (op0, op1); break;
++	case LE: unspec = UNSPEC_LSX_VFCMP_SLE; break;
++	case LT: unspec = UNSPEC_LSX_VFCMP_SLT; break;
++	case GE: unspec = UNSPEC_LSX_VFCMP_SLE; std::swap (op0, op1); break;
++	case GT: unspec = UNSPEC_LSX_VFCMP_SLT; std::swap (op0, op1); break;
++	default:
++		 gcc_unreachable ();
++	}
++      if (unspec < 0)
++	loongarch_emit_binary (cond, dest, op0, op1);
++      else
++	{
++	  rtx x = gen_rtx_UNSPEC (GET_MODE (dest),
++				  gen_rtvec (2, op0, op1), unspec);
++	  emit_insn (gen_rtx_SET (dest, x));
++	}
++      break;
++
++    default:
++      gcc_unreachable ();
++      break;
++    }
++}
++
++/* Expand VEC_COND_EXPR, where:
++   MODE is mode of the result
++   VIMODE equivalent integer mode
++   OPERANDS operands of VEC_COND_EXPR.  */
++
++void
++loongarch_expand_vec_cond_expr (machine_mode mode, machine_mode vimode,
++				rtx *operands)
++{
++  rtx cond = operands[3];
++  rtx cmp_op0 = operands[4];
++  rtx cmp_op1 = operands[5];
++  rtx cmp_res = gen_reg_rtx (vimode);
++
++  loongarch_expand_lsx_cmp (cmp_res, GET_CODE (cond), cmp_op0, cmp_op1);
++
++  /* We handle the following cases:
++     1) r = a CMP b ? -1 : 0
++     2) r = a CMP b ? -1 : v
++     3) r = a CMP b ?  v : 0
++     4) r = a CMP b ? v1 : v2  */
++
++  /* Case (1) above.  We only move the results.  */
++  if (operands[1] == CONSTM1_RTX (vimode)
++      && operands[2] == CONST0_RTX (vimode))
++    emit_move_insn (operands[0], cmp_res);
++  else
++    {
++      rtx src1 = gen_reg_rtx (vimode);
++      rtx src2 = gen_reg_rtx (vimode);
++      rtx mask = gen_reg_rtx (vimode);
++      rtx bsel;
++
++      /* Move the vector result to use it as a mask.  */
++      emit_move_insn (mask, cmp_res);
++
++      if (register_operand (operands[1], mode))
++	{
++	  rtx xop1 = operands[1];
++	  if (mode != vimode)
++	    {
++	      xop1 = gen_reg_rtx (vimode);
++	      emit_move_insn (xop1, gen_rtx_SUBREG (vimode, operands[1], 0));
++	    }
++	  emit_move_insn (src1, xop1);
++	}
++      else
++	{
++	  gcc_assert (operands[1] == CONSTM1_RTX (vimode));
++	  /* Case (2) if the below doesn't move the mask to src2.  */
++	  emit_move_insn (src1, mask);
++	}
++
++      if (register_operand (operands[2], mode))
++	{
++	  rtx xop2 = operands[2];
++	  if (mode != vimode)
++	    {
++	      xop2 = gen_reg_rtx (vimode);
++	      emit_move_insn (xop2, gen_rtx_SUBREG (vimode, operands[2], 0));
++	    }
++	  emit_move_insn (src2, xop2);
++	}
++      else
++	{
++	  gcc_assert (operands[2] == CONST0_RTX (mode));
++	  /* Case (3) if the above didn't move the mask to src1.  */
++	  emit_move_insn (src2, mask);
++	}
++
++      /* We deal with case (4) if the mask wasn't moved to either src1 or src2.
++	 In any case, we eventually do vector mask-based copy.  */
++      bsel = gen_rtx_IOR (vimode,
++			  gen_rtx_AND (vimode,
++				       gen_rtx_NOT (vimode, mask), src2),
++			  gen_rtx_AND (vimode, mask, src1));
++      /* The result is placed back to a register with the mask.  */
++      emit_insn (gen_rtx_SET (mask, bsel));
++      emit_move_insn (operands[0], gen_rtx_SUBREG (mode, mask, 0));
++    }
++}
++
++void
++loongarch_expand_vec_cond_mask_expr (machine_mode mode, machine_mode vimode,
++				    rtx *operands)
++{
++  rtx cmp_res = operands[3];
++
++  /* We handle the following cases:
++     1) r = a CMP b ? -1 : 0
++     2) r = a CMP b ? -1 : v
++     3) r = a CMP b ?  v : 0
++     4) r = a CMP b ? v1 : v2  */
++
++  /* Case (1) above.  We only move the results.  */
++  if (operands[1] == CONSTM1_RTX (vimode)
++      && operands[2] == CONST0_RTX (vimode))
++    emit_move_insn (operands[0], cmp_res);
++  else
++    {
++      rtx src1 = gen_reg_rtx (vimode);
++      rtx src2 = gen_reg_rtx (vimode);
++      rtx mask = gen_reg_rtx (vimode);
++      rtx bsel;
++
++      /* Move the vector result to use it as a mask.  */
++      emit_move_insn (mask, cmp_res);
++
++      if (register_operand (operands[1], mode))
++	{
++	  rtx xop1 = operands[1];
++	  if (mode != vimode)
++	    {
++	      xop1 = gen_reg_rtx (vimode);
++	      emit_move_insn (xop1, gen_rtx_SUBREG (vimode, operands[1], 0));
++	    }
++	  emit_move_insn (src1, xop1);
++	}
++      else
++	{
++	  gcc_assert (operands[1] == CONSTM1_RTX (vimode));
++	  /* Case (2) if the below doesn't move the mask to src2.  */
++	  emit_move_insn (src1, mask);
++	}
++
++      if (register_operand (operands[2], mode))
++	{
++	  rtx xop2 = operands[2];
++	  if (mode != vimode)
++	    {
++	      xop2 = gen_reg_rtx (vimode);
++	      emit_move_insn (xop2, gen_rtx_SUBREG (vimode, operands[2], 0));
++	    }
++	  emit_move_insn (src2, xop2);
++	}
++      else
++	{
++	  gcc_assert (operands[2] == CONST0_RTX (mode));
++	  /* Case (3) if the above didn't move the mask to src1.  */
++	  emit_move_insn (src2, mask);
++	}
++
++      /* We deal with case (4) if the mask wasn't moved to either src1 or src2.
++	 In any case, we eventually do vector mask-based copy.  */
++      bsel = gen_rtx_IOR (vimode,
++			  gen_rtx_AND (vimode,
++				       gen_rtx_NOT (vimode, mask), src2),
++			  gen_rtx_AND (vimode, mask, src1));
++      /* The result is placed back to a register with the mask.  */
++      emit_insn (gen_rtx_SET (mask, bsel));
++      emit_move_insn (operands[0], gen_rtx_SUBREG (mode, mask, 0));
++    }
++}
++
++/* Expand integer vector comparison */
++bool
++loongarch_expand_vec_cmp (rtx operands[])
++{
++
++  rtx_code code = GET_CODE (operands[1]);
++  loongarch_expand_lsx_cmp (operands[0], code, operands[2], operands[3]);
++  return true;
++}
++
++/* Implement TARGET_CASE_VALUES_THRESHOLD.  */
++
++unsigned int
++loongarch_case_values_threshold (void)
++{
++  return default_case_values_threshold ();
++}
++
++/* Implement TARGET_SPILL_CLASS.  */
++
++static reg_class_t
++loongarch_spill_class (reg_class_t rclass ATTRIBUTE_UNUSED,
++		       machine_mode mode ATTRIBUTE_UNUSED)
++{
++  return NO_REGS;
++}
++
++/* Implement TARGET_PROMOTE_FUNCTION_MODE.  */
++
++/* This function is equivalent to default_promote_function_mode_always_promote
++   except that it returns a promoted mode even if type is NULL_TREE.  This is
++   needed by libcalls which have no type (only a mode) such as fixed conversion
++   routines that take a signed or unsigned char/short argument and convert it
++   to a fixed type.  */
++
++static machine_mode
++loongarch_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
++				 machine_mode mode,
++				 int *punsignedp ATTRIBUTE_UNUSED,
++				 const_tree fntype ATTRIBUTE_UNUSED,
++				 int for_return ATTRIBUTE_UNUSED)
++{
++  int unsignedp;
++
++  if (type != NULL_TREE)
++    return promote_mode (type, mode, punsignedp);
++
++  unsignedp = *punsignedp;
++  PROMOTE_MODE (mode, unsignedp, type);
++  *punsignedp = unsignedp;
++  return mode;
++}
++
++/* Implement TARGET_STARTING_FRAME_OFFSET.  See loongarch_compute_frame_info
++   for details about the frame layout.  */
++
++static HOST_WIDE_INT
++loongarch_starting_frame_offset (void)
++{
++  if (FRAME_GROWS_DOWNWARD)
++    return 0;
++  return crtl->outgoing_args_size;
++}
++
++/* A subroutine of loongarch_build_signbit_mask.  If VECT is true,
++   then replicate the value for all elements of the vector
++   register.  */
++
++rtx
++loongarch_build_const_vector (machine_mode mode, bool vect, rtx value)
++{
++  int i, n_elt;
++  rtvec v;
++  machine_mode scalar_mode;
++
++  switch (mode)
++    {
++    case E_V32QImode:
++    case E_V16QImode:
++    case E_V32HImode:
++    case E_V16HImode:
++    case E_V8HImode:
++    case E_V8SImode:
++    case E_V4SImode:
++    case E_V8DImode:
++    case E_V4DImode:
++    case E_V2DImode:
++      gcc_assert (vect);
++      /* FALLTHRU */
++    case E_V8SFmode:
++    case E_V4SFmode:
++    case E_V8DFmode:
++    case E_V4DFmode:
++    case E_V2DFmode:
++      n_elt = GET_MODE_NUNITS (mode);
++      v = rtvec_alloc (n_elt);
++      scalar_mode = GET_MODE_INNER (mode);
++
++      RTVEC_ELT (v, 0) = value;
++
++      for (i = 1; i < n_elt; ++i)
++	RTVEC_ELT (v, i) = vect ? value : CONST0_RTX (scalar_mode);
++
++      return gen_rtx_CONST_VECTOR (mode, v);
++
++    default:
++      gcc_unreachable ();
++    }
++}
++
++/* Create a mask for the sign bit in MODE
++   for an register.  If VECT is true, then replicate the mask for
++   all elements of the vector register.  If INVERT is true, then create
++   a mask excluding the sign bit.  */
++
++rtx
++loongarch_build_signbit_mask (machine_mode mode, bool vect, bool invert)
++{
++  machine_mode vec_mode, imode;
++  wide_int w;
++  rtx mask, v;
++
++  switch (mode)
++    {
++    case E_V16SImode:
++    case E_V16SFmode:
++    case E_V8SImode:
++    case E_V4SImode:
++    case E_V8SFmode:
++    case E_V4SFmode:
++      vec_mode = mode;
++      imode = SImode;
++      break;
++
++    case E_V8DImode:
++    case E_V4DImode:
++    case E_V2DImode:
++    case E_V8DFmode:
++    case E_V4DFmode:
++    case E_V2DFmode:
++      vec_mode = mode;
++      imode = DImode;
++      break;
++
++    case E_TImode:
++    case E_TFmode:
++      vec_mode = VOIDmode;
++      imode = TImode;
++      break;
++
++    default:
++      gcc_unreachable ();
++    }
++
++  machine_mode inner_mode = GET_MODE_INNER (mode);
++  w = wi::set_bit_in_zero (GET_MODE_BITSIZE (inner_mode) - 1,
++			   GET_MODE_BITSIZE (inner_mode));
++  if (invert)
++    w = wi::bit_not (w);
++
++  /* Force this value into the low part of a fp vector constant.  */
++  mask = immed_wide_int_const (w, imode);
++  mask = gen_lowpart (inner_mode, mask);
++
++  if (vec_mode == VOIDmode)
++    return force_reg (inner_mode, mask);
++
++  v = loongarch_build_const_vector (vec_mode, vect, mask);
++  return force_reg (vec_mode, v);
++}
++
++static bool
++loongarch_builtin_support_vector_misalignment (machine_mode mode,
++					       const_tree type,
++					       int misalignment,
++					       bool is_packed)
++{
++  if (ISA_HAS_LSX && STRICT_ALIGNMENT)
++    {
++      if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
++	return false;
++      if (misalignment == -1)
++	return false;
++    }
++  return default_builtin_support_vector_misalignment (mode, type, misalignment,
++						      is_packed);
++}
++
++/* Initialize the GCC target structure.  */
++#undef TARGET_ASM_ALIGNED_HI_OP
++#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
++#undef TARGET_ASM_ALIGNED_SI_OP
++#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
++#undef TARGET_ASM_ALIGNED_DI_OP
++#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
++
++#undef TARGET_OPTION_OVERRIDE
++#define TARGET_OPTION_OVERRIDE loongarch_option_override
++
++#undef TARGET_LEGITIMIZE_ADDRESS
++#define TARGET_LEGITIMIZE_ADDRESS loongarch_legitimize_address
++
++#undef TARGET_ASM_SELECT_RTX_SECTION
++#define TARGET_ASM_SELECT_RTX_SECTION loongarch_select_rtx_section
++#undef TARGET_ASM_FUNCTION_RODATA_SECTION
++#define TARGET_ASM_FUNCTION_RODATA_SECTION loongarch_function_rodata_section
++
++#undef TARGET_SCHED_INIT
++#define TARGET_SCHED_INIT loongarch_sched_init
++#undef TARGET_SCHED_REORDER
++#define TARGET_SCHED_REORDER loongarch_sched_reorder
++#undef TARGET_SCHED_REORDER2
++#define TARGET_SCHED_REORDER2 loongarch_sched_reorder2
++#undef TARGET_SCHED_VARIABLE_ISSUE
++#define TARGET_SCHED_VARIABLE_ISSUE loongarch_variable_issue
++#undef TARGET_SCHED_ADJUST_COST
++#define TARGET_SCHED_ADJUST_COST loongarch_adjust_cost
++#undef TARGET_SCHED_ISSUE_RATE
++#define TARGET_SCHED_ISSUE_RATE loongarch_issue_rate
++#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
++#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
++  loongarch_multipass_dfa_lookahead
++
++#undef TARGET_FUNCTION_OK_FOR_SIBCALL
++#define TARGET_FUNCTION_OK_FOR_SIBCALL loongarch_function_ok_for_sibcall
++
++#undef TARGET_VALID_POINTER_MODE
++#define TARGET_VALID_POINTER_MODE loongarch_valid_pointer_mode
++#undef TARGET_REGISTER_MOVE_COST
++#define TARGET_REGISTER_MOVE_COST loongarch_register_move_cost
++#undef TARGET_MEMORY_MOVE_COST
++#define TARGET_MEMORY_MOVE_COST loongarch_memory_move_cost
++#undef TARGET_RTX_COSTS
++#define TARGET_RTX_COSTS loongarch_rtx_costs
++#undef TARGET_ADDRESS_COST
++#define TARGET_ADDRESS_COST loongarch_address_cost
++#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
++#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
++  loongarch_builtin_vectorization_cost
++
++
++#undef TARGET_IN_SMALL_DATA_P
++#define TARGET_IN_SMALL_DATA_P loongarch_in_small_data_p
++
++#undef TARGET_PREFERRED_RELOAD_CLASS
++#define TARGET_PREFERRED_RELOAD_CLASS loongarch_preferred_reload_class
++
++#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
++#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
++
++#undef TARGET_EXPAND_BUILTIN_VA_START
++#define TARGET_EXPAND_BUILTIN_VA_START loongarch_va_start
++
++#undef TARGET_PROMOTE_FUNCTION_MODE
++#define TARGET_PROMOTE_FUNCTION_MODE loongarch_promote_function_mode
++#undef TARGET_RETURN_IN_MEMORY
++#define TARGET_RETURN_IN_MEMORY loongarch_return_in_memory
++
++#undef TARGET_FUNCTION_VALUE
++#define TARGET_FUNCTION_VALUE loongarch_function_value
++#undef TARGET_LIBCALL_VALUE
++#define TARGET_LIBCALL_VALUE loongarch_libcall_value
++
++#undef TARGET_ASM_OUTPUT_MI_THUNK
++#define TARGET_ASM_OUTPUT_MI_THUNK loongarch_output_mi_thunk
++#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
++#define TARGET_ASM_CAN_OUTPUT_MI_THUNK \
++  hook_bool_const_tree_hwi_hwi_const_tree_true
++
++#undef TARGET_PRINT_OPERAND
++#define TARGET_PRINT_OPERAND loongarch_print_operand
++#undef TARGET_PRINT_OPERAND_ADDRESS
++#define TARGET_PRINT_OPERAND_ADDRESS loongarch_print_operand_address
++#undef TARGET_PRINT_OPERAND_PUNCT_VALID_P
++#define TARGET_PRINT_OPERAND_PUNCT_VALID_P \
++  loongarch_print_operand_punct_valid_p
++
++#undef TARGET_SETUP_INCOMING_VARARGS
++#define TARGET_SETUP_INCOMING_VARARGS loongarch_setup_incoming_varargs
++#undef TARGET_STRICT_ARGUMENT_NAMING
++#define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
++#undef TARGET_MUST_PASS_IN_STACK
++#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
++#undef TARGET_PASS_BY_REFERENCE
++#define TARGET_PASS_BY_REFERENCE loongarch_pass_by_reference
++#undef TARGET_ARG_PARTIAL_BYTES
++#define TARGET_ARG_PARTIAL_BYTES loongarch_arg_partial_bytes
++#undef TARGET_FUNCTION_ARG
++#define TARGET_FUNCTION_ARG loongarch_function_arg
++#undef TARGET_FUNCTION_ARG_ADVANCE
++#define TARGET_FUNCTION_ARG_ADVANCE loongarch_function_arg_advance
++#undef TARGET_FUNCTION_ARG_BOUNDARY
++#define TARGET_FUNCTION_ARG_BOUNDARY loongarch_function_arg_boundary
++
++#undef TARGET_VECTOR_MODE_SUPPORTED_P
++#define TARGET_VECTOR_MODE_SUPPORTED_P loongarch_vector_mode_supported_p
++
++#undef TARGET_SCALAR_MODE_SUPPORTED_P
++#define TARGET_SCALAR_MODE_SUPPORTED_P loongarch_scalar_mode_supported_p
++
++#undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
++#define TARGET_VECTORIZE_PREFERRED_SIMD_MODE loongarch_preferred_simd_mode
++
++#undef TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_MODES
++#define TARGET_VECTORIZE_AUTOVECTORIZE_VECTOR_MODES \
++  loongarch_autovectorize_vector_modes
++
++#undef TARGET_INIT_BUILTINS
+ #define TARGET_INIT_BUILTINS loongarch_init_builtins
+ #undef TARGET_BUILTIN_DECL
+ #define TARGET_BUILTIN_DECL loongarch_builtin_decl
+@@ -7006,6 +8886,14 @@ loongarch_set_handled_components (sbitmap components)
+ 
+ #undef TARGET_MAX_ANCHOR_OFFSET
+ #define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
++#undef TARGET_VECTORIZE_VEC_PERM_CONST
++#define TARGET_VECTORIZE_VEC_PERM_CONST loongarch_vectorize_vec_perm_const
++
++#undef TARGET_SCHED_REASSOCIATION_WIDTH
++#define TARGET_SCHED_REASSOCIATION_WIDTH loongarch_sched_reassociation_width
++
++#undef TARGET_CASE_VALUES_THRESHOLD
++#define TARGET_CASE_VALUES_THRESHOLD loongarch_case_values_threshold
+ 
+ #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
+ #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV loongarch_atomic_assign_expand_fenv
+@@ -7024,6 +8912,10 @@ loongarch_set_handled_components (sbitmap components)
+ #undef TARGET_MODES_TIEABLE_P
+ #define TARGET_MODES_TIEABLE_P loongarch_modes_tieable_p
+ 
++#undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
++#define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
++  loongarch_hard_regno_call_part_clobbered
++
+ #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
+ #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 2
+ 
+@@ -7074,6 +8966,10 @@ loongarch_set_handled_components (sbitmap components)
+ #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS \
+   loongarch_set_handled_components
+ 
++#undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
++#define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
++  loongarch_builtin_support_vector_misalignment
++
+ struct gcc_target targetm = TARGET_INITIALIZER;
+ 
+ #include "gt-loongarch.h"
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index e0c9c9439..c3ebea2f2 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -23,6 +23,8 @@ along with GCC; see the file COPYING3.  If not see
+ 
+ #include "config/loongarch/loongarch-opts.h"
+ 
++#define TARGET_SUPPORTS_WIDE_INT 1
++
+ /* Macros to silence warnings about numbers being signed in traditional
+    C and unsigned in ISO C when compiled on 32-bit hosts.  */
+ 
+@@ -179,6 +181,11 @@ along with GCC; see the file COPYING3.  If not see
+ #define MIN_UNITS_PER_WORD 4
+ #endif
+ 
++/* Width of a LSX vector register in bytes.  */
++#define UNITS_PER_LSX_REG 16
++/* Width of a LSX vector register in bits.  */
++#define BITS_PER_LSX_REG (UNITS_PER_LSX_REG * BITS_PER_UNIT)
++
+ /* For LARCH, width of a floating point register.  */
+ #define UNITS_PER_FPREG (TARGET_DOUBLE_FLOAT ? 8 : 4)
+ 
+@@ -241,8 +248,10 @@ along with GCC; see the file COPYING3.  If not see
+ #define STRUCTURE_SIZE_BOUNDARY 8
+ 
+ /* There is no point aligning anything to a rounder boundary than
+-   LONG_DOUBLE_TYPE_SIZE.  */
+-#define BIGGEST_ALIGNMENT (LONG_DOUBLE_TYPE_SIZE)
++   LONG_DOUBLE_TYPE_SIZE, unless under LSX the bigggest alignment is
++   BITS_PER_LSX_REG/..  */
++#define BIGGEST_ALIGNMENT \
++  (ISA_HAS_LSX ? BITS_PER_LSX_REG : LONG_DOUBLE_TYPE_SIZE)
+ 
+ /* All accesses must be aligned.  */
+ #define STRICT_ALIGNMENT (TARGET_STRICT_ALIGN)
+@@ -378,6 +387,9 @@ along with GCC; see the file COPYING3.  If not see
+ #define FP_REG_FIRST 32
+ #define FP_REG_LAST 63
+ #define FP_REG_NUM (FP_REG_LAST - FP_REG_FIRST + 1)
++#define LSX_REG_FIRST FP_REG_FIRST
++#define LSX_REG_LAST  FP_REG_LAST
++#define LSX_REG_NUM   FP_REG_NUM
+ 
+ /* The DWARF 2 CFA column which tracks the return address from a
+    signal handler context.  This means that to maintain backwards
+@@ -395,8 +407,11 @@ along with GCC; see the file COPYING3.  If not see
+   ((unsigned int) ((int) (REGNO) - FP_REG_FIRST) < FP_REG_NUM)
+ #define FCC_REG_P(REGNO) \
+   ((unsigned int) ((int) (REGNO) - FCC_REG_FIRST) < FCC_REG_NUM)
++#define LSX_REG_P(REGNO) \
++  ((unsigned int) ((int) (REGNO) - LSX_REG_FIRST) < LSX_REG_NUM)
+ 
+ #define FP_REG_RTX_P(X) (REG_P (X) && FP_REG_P (REGNO (X)))
++#define LSX_REG_RTX_P(X) (REG_P (X) && LSX_REG_P (REGNO (X)))
+ 
+ /* Select a register mode required for caller save of hard regno REGNO.  */
+ #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
+@@ -577,6 +592,11 @@ enum reg_class
+ #define IMM12_OPERAND(VALUE) \
+   ((unsigned HOST_WIDE_INT) (VALUE) + IMM_REACH / 2 < IMM_REACH)
+ 
++/* True if VALUE is a signed 13-bit number.  */
++
++#define IMM13_OPERAND(VALUE) \
++  ((unsigned HOST_WIDE_INT) (VALUE) + 0x1000 < 0x2000)
++
+ /* True if VALUE is a signed 16-bit number.  */
+ 
+ #define IMM16_OPERAND(VALUE) \
+@@ -706,6 +726,13 @@ enum reg_class
+ #define FP_ARG_FIRST (FP_REG_FIRST + 0)
+ #define FP_ARG_LAST (FP_ARG_FIRST + MAX_ARGS_IN_REGISTERS - 1)
+ 
++/* True if MODE is vector and supported in a LSX vector register.  */
++#define LSX_SUPPORTED_MODE_P(MODE)			\
++  (ISA_HAS_LSX						\
++   && GET_MODE_SIZE (MODE) == UNITS_PER_LSX_REG		\
++   && (GET_MODE_CLASS (MODE) == MODE_VECTOR_INT		\
++       || GET_MODE_CLASS (MODE) == MODE_VECTOR_FLOAT))
++
+ /* 1 if N is a possible register number for function argument passing.
+    We have no FP argument registers when soft-float.  */
+ 
+@@ -926,7 +953,39 @@ typedef struct {
+   { "s7",	30 + GP_REG_FIRST },					\
+   { "s8",	31 + GP_REG_FIRST },					\
+   { "v0",	 4 + GP_REG_FIRST },					\
+-  { "v1",	 5 + GP_REG_FIRST }					\
++  { "v1",	 5 + GP_REG_FIRST },					\
++  { "vr0",	 0 + FP_REG_FIRST },					\
++  { "vr1",	 1 + FP_REG_FIRST },					\
++  { "vr2",	 2 + FP_REG_FIRST },					\
++  { "vr3",	 3 + FP_REG_FIRST },					\
++  { "vr4",	 4 + FP_REG_FIRST },					\
++  { "vr5",	 5 + FP_REG_FIRST },					\
++  { "vr6",	 6 + FP_REG_FIRST },					\
++  { "vr7",	 7 + FP_REG_FIRST },					\
++  { "vr8",	 8 + FP_REG_FIRST },					\
++  { "vr9",	 9 + FP_REG_FIRST },					\
++  { "vr10",	10 + FP_REG_FIRST },					\
++  { "vr11",	11 + FP_REG_FIRST },					\
++  { "vr12",	12 + FP_REG_FIRST },					\
++  { "vr13",	13 + FP_REG_FIRST },					\
++  { "vr14",	14 + FP_REG_FIRST },					\
++  { "vr15",	15 + FP_REG_FIRST },					\
++  { "vr16",	16 + FP_REG_FIRST },					\
++  { "vr17",	17 + FP_REG_FIRST },					\
++  { "vr18",	18 + FP_REG_FIRST },					\
++  { "vr19",	19 + FP_REG_FIRST },					\
++  { "vr20",	20 + FP_REG_FIRST },					\
++  { "vr21",	21 + FP_REG_FIRST },					\
++  { "vr22",	22 + FP_REG_FIRST },					\
++  { "vr23",	23 + FP_REG_FIRST },					\
++  { "vr24",	24 + FP_REG_FIRST },					\
++  { "vr25",	25 + FP_REG_FIRST },					\
++  { "vr26",	26 + FP_REG_FIRST },					\
++  { "vr27",	27 + FP_REG_FIRST },					\
++  { "vr28",	28 + FP_REG_FIRST },					\
++  { "vr29",	29 + FP_REG_FIRST },					\
++  { "vr30",	30 + FP_REG_FIRST },					\
++  { "vr31",	31 + FP_REG_FIRST }					\
+ }
+ 
+ /* Globalizing directive for a label.  */
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 2d269794f..fb3828262 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -158,11 +158,12 @@
+    const,signext,pick_ins,logical,arith,sll0,andi,shift_shift"
+   (const_string "unknown"))
+ 
+-(define_attr "alu_type" "unknown,add,sub,not,nor,and,or,xor"
++(define_attr "alu_type" "unknown,add,sub,not,nor,and,or,xor,simd_add"
+   (const_string "unknown"))
+ 
+ ;; Main data type used by the insn
+-(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FCC"
++(define_attr "mode" "unknown,none,QI,HI,SI,DI,TI,SF,DF,TF,FCC,
++  V2DI,V4SI,V8HI,V16QI,V2DF,V4SF"
+   (const_string "unknown"))
+ 
+ ;; True if the main data type is twice the size of a word.
+@@ -234,7 +235,12 @@
+    prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical,
+    shift,slt,signext,clz,trap,imul,idiv,move,
+    fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,flogb,fneg,fcmp,fcopysign,fcvt,
+-   fscaleb,fsqrt,frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost"
++   fscaleb,fsqrt,frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost,
++   simd_div,simd_fclass,simd_flog2,simd_fadd,simd_fcvt,simd_fmul,simd_fmadd,
++   simd_fdiv,simd_bitins,simd_bitmov,simd_insert,simd_sld,simd_mul,simd_fcmp,
++   simd_fexp2,simd_int_arith,simd_bit,simd_shift,simd_splat,simd_fill,
++   simd_permute,simd_shf,simd_sat,simd_pcnt,simd_copy,simd_branch,simd_clsx,
++   simd_fminmax,simd_logic,simd_move,simd_load,simd_store"
+   (cond [(eq_attr "jirl" "!unset") (const_string "call")
+ 	 (eq_attr "got" "load") (const_string "load")
+ 
+@@ -414,11 +420,20 @@
+ 
+ ;; This attribute gives the upper-case mode name for one unit of a
+ ;; floating-point mode or vector mode.
+-(define_mode_attr UNITMODE [(SF "SF") (DF "DF")])
++(define_mode_attr UNITMODE [(SF "SF") (DF "DF") (V2SF "SF") (V4SF "SF")
++			    (V16QI "QI") (V8HI "HI") (V4SI "SI") (V2DI "DI")
++			    (V2DF "DF")])
++
++;; As above, but in lower case.
++(define_mode_attr unitmode [(SF "sf") (DF "df") (V2SF "sf") (V4SF "sf")
++			    (V16QI "qi") (V8QI "qi") (V8HI "hi") (V4HI "hi")
++			    (V4SI "si") (V2SI "si") (V2DI "di") (V2DF "df")])
+ 
+ ;; This attribute gives the integer mode that has half the size of
+ ;; the controlling mode.
+-(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
++(define_mode_attr HALFMODE [(DF "SI") (DI "SI") (V2SF "SI")
++			    (V2SI "SI") (V4HI "SI") (V8QI "SI")
++			    (TF "DI")])
+ 
+ ;; This attribute gives the integer mode that has the same size of a
+ ;; floating-point mode.
+@@ -445,6 +460,18 @@
+ ;; from the same template.
+ (define_code_iterator any_div [div udiv mod umod])
+ 
++;; This code iterator allows addition and subtraction to be generated
++;; from the same template.
++(define_code_iterator addsub [plus minus])
++
++;; This code iterator allows addition and multiplication to be generated
++;; from the same template.
++(define_code_iterator addmul [plus mult])
++
++;; This code iterator allows addition subtraction and multiplication to be
++;; generated from the same template
++(define_code_iterator addsubmul [plus minus mult])
++
+ ;; This code iterator allows all native floating-point comparisons to be
+ ;; generated from the same template.
+ (define_code_iterator fcond [unordered uneq unlt unle eq lt le
+@@ -684,7 +711,6 @@
+   [(set_attr "alu_type" "sub")
+    (set_attr "mode" "")])
+ 
+-
+ (define_insn "*subsi3_extended"
+   [(set (match_operand:DI 0 "register_operand" "= r")
+ 	(sign_extend:DI
+@@ -1228,7 +1254,7 @@
+   "fmina.\t%0,%1,%2"
+   [(set_attr "type" "fmove")
+    (set_attr "mode" "")])
+-
++
+ ;;
+ ;;  ....................
+ ;;
+@@ -2541,7 +2567,6 @@
+   [(set_attr "type" "shift,shift")
+    (set_attr "mode" "")])
+ 
+-
+ ;; The following templates were added to generate "bstrpick.d + alsl.d"
+ ;; instruction pairs.
+ ;; It is required that the values of const_immalsl_operand and
+@@ -3610,6 +3635,9 @@
+ (include "generic.md")
+ (include "la464.md")
+ 
++; The LoongArch SX Instructions.
++(include "lsx.md")
++
+ (define_c_enum "unspec" [
+   UNSPEC_ADDRESS_FIRST
+ ])
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index f96d32769..8cc0c1d0b 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -153,6 +153,10 @@ mbranch-cost=
+ Target RejectNegative Joined UInteger Var(loongarch_branch_cost)
+ -mbranch-cost=COST	Set the cost of branches to roughly COST instructions.
+ 
++mmemvec-cost=
++Target RejectNegative Joined UInteger Var(loongarch_vector_access_cost) IntegerRange(1, 5)
++mmemvec-cost=COST      Set the cost of vector memory access instructions.
++
+ mcheck-zero-division
+ Target Mask(CHECK_ZERO_DIV)
+ Trap on integer divide by zero.
+diff --git a/gcc/config/loongarch/lsx.md b/gcc/config/loongarch/lsx.md
+new file mode 100644
+index 000000000..fb4d228ba
+--- /dev/null
++++ b/gcc/config/loongarch/lsx.md
+@@ -0,0 +1,4467 @@
++;; Machine Description for LARCH Loongson SX ASE
++;;
++;; Copyright (C) 2018 Free Software Foundation, Inc.
++;;
++;; This file is part of GCC.
++;;
++;; GCC is free software; you can redistribute it and/or modify
++;; it under the terms of the GNU General Public License as published by
++;; the Free Software Foundation; either version 3, or (at your option)
++;; any later version.
++;;
++;; GCC is distributed in the hope that it will be useful,
++;; but WITHOUT ANY WARRANTY; without even the implied warranty of
++;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
++;; GNU General Public License for more details.
++;;
++;; You should have received a copy of the GNU General Public License
++;; along with GCC; see the file COPYING3.  If not see
++;; .
++;;
++
++(define_c_enum "unspec" [
++  UNSPEC_LSX_ABSD_S
++  UNSPEC_LSX_VABSD_U
++  UNSPEC_LSX_VAVG_S
++  UNSPEC_LSX_VAVG_U
++  UNSPEC_LSX_VAVGR_S
++  UNSPEC_LSX_VAVGR_U
++  UNSPEC_LSX_VBITCLR
++  UNSPEC_LSX_VBITCLRI
++  UNSPEC_LSX_VBITREV
++  UNSPEC_LSX_VBITREVI
++  UNSPEC_LSX_VBITSET
++  UNSPEC_LSX_VBITSETI
++  UNSPEC_LSX_BRANCH_V
++  UNSPEC_LSX_BRANCH
++  UNSPEC_LSX_VFCMP_CAF
++  UNSPEC_LSX_VFCLASS
++  UNSPEC_LSX_VFCMP_CUNE
++  UNSPEC_LSX_VFCVT
++  UNSPEC_LSX_VFCVTH
++  UNSPEC_LSX_VFCVTL
++  UNSPEC_LSX_VFLOGB
++  UNSPEC_LSX_VFRECIP
++  UNSPEC_LSX_VFRINT
++  UNSPEC_LSX_VFRSQRT
++  UNSPEC_LSX_VFCMP_SAF
++  UNSPEC_LSX_VFCMP_SEQ
++  UNSPEC_LSX_VFCMP_SLE
++  UNSPEC_LSX_VFCMP_SLT
++  UNSPEC_LSX_VFCMP_SNE
++  UNSPEC_LSX_VFCMP_SOR
++  UNSPEC_LSX_VFCMP_SUEQ
++  UNSPEC_LSX_VFCMP_SULE
++  UNSPEC_LSX_VFCMP_SULT
++  UNSPEC_LSX_VFCMP_SUN
++  UNSPEC_LSX_VFCMP_SUNE
++  UNSPEC_LSX_VFTINT_S
++  UNSPEC_LSX_VFTINT_U
++  UNSPEC_LSX_VSAT_S
++  UNSPEC_LSX_VSAT_U
++  UNSPEC_LSX_VREPLVEI
++  UNSPEC_LSX_VSRAR
++  UNSPEC_LSX_VSRARI
++  UNSPEC_LSX_VSRLR
++  UNSPEC_LSX_VSRLRI
++  UNSPEC_LSX_VSHUF
++  UNSPEC_LSX_VMUH_S
++  UNSPEC_LSX_VMUH_U
++  UNSPEC_LSX_VEXTW_S
++  UNSPEC_LSX_VEXTW_U
++  UNSPEC_LSX_VSLLWIL_S
++  UNSPEC_LSX_VSLLWIL_U
++  UNSPEC_LSX_VSRAN
++  UNSPEC_LSX_VSSRAN_S
++  UNSPEC_LSX_VSSRAN_U
++  UNSPEC_LSX_VSRAIN
++  UNSPEC_LSX_VSRAINS_S
++  UNSPEC_LSX_VSRAINS_U
++  UNSPEC_LSX_VSRARN
++  UNSPEC_LSX_VSRLN
++  UNSPEC_LSX_VSRLRN
++  UNSPEC_LSX_VSSRLRN_U
++  UNSPEC_LSX_VFRSTPI
++  UNSPEC_LSX_VFRSTP
++  UNSPEC_LSX_VSHUF4I
++  UNSPEC_LSX_VBSRL_V
++  UNSPEC_LSX_VBSLL_V
++  UNSPEC_LSX_VEXTRINS
++  UNSPEC_LSX_VMSKLTZ
++  UNSPEC_LSX_VSIGNCOV
++  UNSPEC_LSX_VFTINTRNE
++  UNSPEC_LSX_VFTINTRP
++  UNSPEC_LSX_VFTINTRM
++  UNSPEC_LSX_VFTINT_W_D
++  UNSPEC_LSX_VFFINT_S_L
++  UNSPEC_LSX_VFTINTRZ_W_D
++  UNSPEC_LSX_VFTINTRP_W_D
++  UNSPEC_LSX_VFTINTRM_W_D
++  UNSPEC_LSX_VFTINTRNE_W_D
++  UNSPEC_LSX_VFTINTL_L_S
++  UNSPEC_LSX_VFFINTH_D_W
++  UNSPEC_LSX_VFFINTL_D_W
++  UNSPEC_LSX_VFTINTRZL_L_S
++  UNSPEC_LSX_VFTINTRZH_L_S
++  UNSPEC_LSX_VFTINTRPL_L_S
++  UNSPEC_LSX_VFTINTRPH_L_S
++  UNSPEC_LSX_VFTINTRMH_L_S
++  UNSPEC_LSX_VFTINTRML_L_S
++  UNSPEC_LSX_VFTINTRNEL_L_S
++  UNSPEC_LSX_VFTINTRNEH_L_S
++  UNSPEC_LSX_VFTINTH_L_H
++  UNSPEC_LSX_VFRINTRNE_S
++  UNSPEC_LSX_VFRINTRNE_D
++  UNSPEC_LSX_VFRINTRZ_S
++  UNSPEC_LSX_VFRINTRZ_D
++  UNSPEC_LSX_VFRINTRP_S
++  UNSPEC_LSX_VFRINTRP_D
++  UNSPEC_LSX_VFRINTRM_S
++  UNSPEC_LSX_VFRINTRM_D
++  UNSPEC_LSX_VSSRARN_S
++  UNSPEC_LSX_VSSRARN_U
++  UNSPEC_LSX_VSSRLN_U
++  UNSPEC_LSX_VSSRLN
++  UNSPEC_LSX_VSSRLRN
++  UNSPEC_LSX_VLDI
++  UNSPEC_LSX_VSHUF_B
++  UNSPEC_LSX_VLDX
++  UNSPEC_LSX_VSTX
++  UNSPEC_LSX_VEXTL_QU_DU
++  UNSPEC_LSX_VSETEQZ_V
++  UNSPEC_LSX_VADDWEV
++  UNSPEC_LSX_VADDWEV2
++  UNSPEC_LSX_VADDWEV3
++  UNSPEC_LSX_VADDWOD
++  UNSPEC_LSX_VADDWOD2
++  UNSPEC_LSX_VADDWOD3
++  UNSPEC_LSX_VSUBWEV
++  UNSPEC_LSX_VSUBWEV2
++  UNSPEC_LSX_VSUBWOD
++  UNSPEC_LSX_VSUBWOD2
++  UNSPEC_LSX_VMULWEV
++  UNSPEC_LSX_VMULWEV2
++  UNSPEC_LSX_VMULWEV3
++  UNSPEC_LSX_VMULWOD
++  UNSPEC_LSX_VMULWOD2
++  UNSPEC_LSX_VMULWOD3
++  UNSPEC_LSX_VHADDW_Q_D
++  UNSPEC_LSX_VHADDW_QU_DU
++  UNSPEC_LSX_VHSUBW_Q_D
++  UNSPEC_LSX_VHSUBW_QU_DU
++  UNSPEC_LSX_VMADDWEV
++  UNSPEC_LSX_VMADDWEV2
++  UNSPEC_LSX_VMADDWEV3
++  UNSPEC_LSX_VMADDWOD
++  UNSPEC_LSX_VMADDWOD2
++  UNSPEC_LSX_VMADDWOD3
++  UNSPEC_LSX_VROTR
++  UNSPEC_LSX_VADD_Q
++  UNSPEC_LSX_VSUB_Q
++  UNSPEC_LSX_VEXTH_Q_D
++  UNSPEC_LSX_VEXTH_QU_DU
++  UNSPEC_LSX_VMSKGEZ
++  UNSPEC_LSX_VMSKNZ
++  UNSPEC_LSX_VEXTL_Q_D
++  UNSPEC_LSX_VSRLNI
++  UNSPEC_LSX_VSRLRNI
++  UNSPEC_LSX_VSSRLNI
++  UNSPEC_LSX_VSSRLNI2
++  UNSPEC_LSX_VSSRLRNI
++  UNSPEC_LSX_VSSRLRNI2
++  UNSPEC_LSX_VSRANI
++  UNSPEC_LSX_VSRARNI
++  UNSPEC_LSX_VSSRANI
++  UNSPEC_LSX_VSSRANI2
++  UNSPEC_LSX_VSSRARNI
++  UNSPEC_LSX_VSSRARNI2
++  UNSPEC_LSX_VPERMI
++])
++
++;; This attribute gives suffix for integers in VHMODE.
++(define_mode_attr dlsxfmt
++  [(V2DI "q")
++   (V4SI "d")
++   (V8HI "w")
++   (V16QI "h")])
++
++(define_mode_attr dlsxfmt_u
++  [(V2DI "qu")
++   (V4SI "du")
++   (V8HI "wu")
++   (V16QI "hu")])
++
++(define_mode_attr d2lsxfmt
++  [(V4SI "q")
++   (V8HI "d")
++   (V16QI "w")])
++
++(define_mode_attr d2lsxfmt_u
++  [(V4SI "qu")
++   (V8HI "du")
++   (V16QI "wu")])
++
++;; The attribute gives two double modes for vector modes.
++(define_mode_attr VD2MODE
++  [(V4SI "V2DI")
++   (V8HI "V2DI")
++   (V16QI "V4SI")])
++
++;; All vector modes with 128 bits.
++(define_mode_iterator LSX      [V2DF V4SF V2DI V4SI V8HI V16QI])
++
++;; Same as LSX.  Used by vcond to iterate two modes.
++(define_mode_iterator LSX_2    [V2DF V4SF V2DI V4SI V8HI V16QI])
++
++;; Only used for vilvh and splitting insert_d and copy_{u,s}.d.
++(define_mode_iterator LSX_D    [V2DI V2DF])
++
++;; Only used for copy_{u,s}.w and vilvh.
++(define_mode_iterator LSX_W    [V4SI V4SF])
++
++;; Only integer modes.
++(define_mode_iterator ILSX     [V2DI V4SI V8HI V16QI])
++
++;; As ILSX but excludes V16QI.
++(define_mode_iterator ILSX_DWH [V2DI V4SI V8HI])
++
++;; As LSX but excludes V16QI.
++(define_mode_iterator LSX_DWH  [V2DF V4SF V2DI V4SI V8HI])
++
++;; As ILSX but excludes V2DI.
++(define_mode_iterator ILSX_WHB [V4SI V8HI V16QI])
++
++;; Only integer modes equal or larger than a word.
++(define_mode_iterator ILSX_DW  [V2DI V4SI])
++
++;; Only integer modes smaller than a word.
++(define_mode_iterator ILSX_HB  [V8HI V16QI])
++
++;;;; Only integer modes for fixed-point madd_q/maddr_q.
++;;(define_mode_iterator ILSX_WH  [V4SI V8HI])
++
++;; Only floating-point modes.
++(define_mode_iterator FLSX     [V2DF V4SF])
++
++;; Only used for immediate set shuffle elements instruction.
++(define_mode_iterator LSX_WHB_W [V4SI V8HI V16QI V4SF])
++
++;; The attribute gives the integer vector mode with same size.
++(define_mode_attr VIMODE
++  [(V2DF "V2DI")
++   (V4SF "V4SI")
++   (V2DI "V2DI")
++   (V4SI "V4SI")
++   (V8HI "V8HI")
++   (V16QI "V16QI")])
++
++;; The attribute gives half modes for vector modes.
++(define_mode_attr VHMODE
++  [(V8HI "V16QI")
++   (V4SI "V8HI")
++   (V2DI "V4SI")])
++
++;; The attribute gives double modes for vector modes.
++(define_mode_attr VDMODE
++  [(V2DI "V2DI")
++   (V4SI "V2DI")
++   (V8HI "V4SI")
++   (V16QI "V8HI")])
++
++;; The attribute gives half modes with same number of elements for vector modes.
++(define_mode_attr VTRUNCMODE
++  [(V8HI "V8QI")
++   (V4SI "V4HI")
++   (V2DI "V2SI")])
++
++;; Double-sized Vector MODE with same elemet type. "Vector, Enlarged-MODE"
++(define_mode_attr VEMODE
++  [(V4SF "V8SF")
++   (V4SI "V8SI")
++   (V2DI "V4DI")
++   (V2DF "V4DF")])
++
++;; This attribute gives the mode of the result for "vpickve2gr_b, copy_u_b" etc.
++(define_mode_attr VRES
++  [(V2DF "DF")
++   (V4SF "SF")
++   (V2DI "DI")
++   (V4SI "SI")
++   (V8HI "SI")
++   (V16QI "SI")])
++
++;; Only used with LSX_D iterator.
++(define_mode_attr lsx_d
++  [(V2DI "reg_or_0")
++   (V2DF "register")])
++
++;; This attribute gives the integer vector mode with same size.
++(define_mode_attr mode_i
++  [(V2DF "v2di")
++   (V4SF "v4si")
++   (V2DI "v2di")
++   (V4SI "v4si")
++   (V8HI "v8hi")
++   (V16QI "v16qi")])
++
++;; This attribute gives suffix for LSX instructions.
++(define_mode_attr lsxfmt
++  [(V2DF "d")
++   (V4SF "w")
++   (V2DI "d")
++   (V4SI "w")
++   (V8HI "h")
++   (V16QI "b")])
++
++;; This attribute gives suffix for LSX instructions.
++(define_mode_attr lsxfmt_u
++  [(V2DF "du")
++   (V4SF "wu")
++   (V2DI "du")
++   (V4SI "wu")
++   (V8HI "hu")
++   (V16QI "bu")])
++
++;; This attribute gives suffix for integers in VHMODE.
++(define_mode_attr hlsxfmt
++  [(V2DI "w")
++   (V4SI "h")
++   (V8HI "b")])
++
++;; This attribute gives suffix for integers in VHMODE.
++(define_mode_attr hlsxfmt_u
++  [(V2DI "wu")
++   (V4SI "hu")
++   (V8HI "bu")])
++
++;; This attribute gives define_insn suffix for LSX instructions that need
++;; distinction between integer and floating point.
++(define_mode_attr lsxfmt_f
++  [(V2DF "d_f")
++   (V4SF "w_f")
++   (V2DI "d")
++   (V4SI "w")
++   (V8HI "h")
++   (V16QI "b")])
++
++(define_mode_attr flsxfmt_f
++  [(V2DF "d_f")
++   (V4SF "s_f")
++   (V2DI "d")
++   (V4SI "w")
++   (V8HI "h")
++   (V16QI "b")])
++
++(define_mode_attr flsxfmt
++  [(V2DF "d")
++   (V4SF "s")
++   (V2DI "d")
++   (V4SI "s")])
++
++(define_mode_attr flsxfrint
++  [(V2DF "d")
++   (V4SF "s")])
++
++(define_mode_attr ilsxfmt
++  [(V2DF "l")
++   (V4SF "w")])
++
++(define_mode_attr ilsxfmt_u
++  [(V2DF "lu")
++   (V4SF "wu")])
++
++;; This is used to form an immediate operand constraint using
++;; "const__operand".
++(define_mode_attr indeximm
++  [(V2DF "0_or_1")
++   (V4SF "0_to_3")
++   (V2DI "0_or_1")
++   (V4SI "0_to_3")
++   (V8HI "uimm3")
++   (V16QI "uimm4")])
++
++;; This attribute represents bitmask needed for vec_merge using
++;; "const__operand".
++(define_mode_attr bitmask
++  [(V2DF "exp_2")
++   (V4SF "exp_4")
++   (V2DI "exp_2")
++   (V4SI "exp_4")
++   (V8HI "exp_8")
++   (V16QI "exp_16")])
++
++;; This attribute is used to form an immediate operand constraint using
++;; "const__operand".
++(define_mode_attr bitimm
++  [(V16QI "uimm3")
++   (V8HI  "uimm4")
++   (V4SI  "uimm5")
++   (V2DI  "uimm6")])
++
++
++(define_int_iterator FRINT_S [UNSPEC_LSX_VFRINTRP_S
++			    UNSPEC_LSX_VFRINTRZ_S
++			    UNSPEC_LSX_VFRINT
++			    UNSPEC_LSX_VFRINTRM_S])
++
++(define_int_iterator FRINT_D [UNSPEC_LSX_VFRINTRP_D
++			    UNSPEC_LSX_VFRINTRZ_D
++			    UNSPEC_LSX_VFRINT
++			    UNSPEC_LSX_VFRINTRM_D])
++
++(define_int_attr frint_pattern_s
++  [(UNSPEC_LSX_VFRINTRP_S  "ceil")
++   (UNSPEC_LSX_VFRINTRZ_S  "btrunc")
++   (UNSPEC_LSX_VFRINT	   "rint")
++   (UNSPEC_LSX_VFRINTRM_S  "floor")])
++
++(define_int_attr frint_pattern_d
++  [(UNSPEC_LSX_VFRINTRP_D  "ceil")
++   (UNSPEC_LSX_VFRINTRZ_D  "btrunc")
++   (UNSPEC_LSX_VFRINT	   "rint")
++   (UNSPEC_LSX_VFRINTRM_D  "floor")])
++
++(define_int_attr frint_suffix
++  [(UNSPEC_LSX_VFRINTRP_S  "rp")
++   (UNSPEC_LSX_VFRINTRP_D  "rp")
++   (UNSPEC_LSX_VFRINTRZ_S  "rz")
++   (UNSPEC_LSX_VFRINTRZ_D  "rz")
++   (UNSPEC_LSX_VFRINT	   "")
++   (UNSPEC_LSX_VFRINTRM_S  "rm")
++   (UNSPEC_LSX_VFRINTRM_D  "rm")])
++
++(define_expand "vec_init"
++  [(match_operand:LSX 0 "register_operand")
++   (match_operand:LSX 1 "")]
++  "ISA_HAS_LSX"
++{
++  loongarch_expand_vector_init (operands[0], operands[1]);
++  DONE;
++})
++
++;; vpickev pattern with implicit type conversion.
++(define_insn "vec_pack_trunc_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(vec_concat:
++	  (truncate:
++	    (match_operand:ILSX_DWH 1 "register_operand" "f"))
++	  (truncate:
++	    (match_operand:ILSX_DWH 2 "register_operand" "f"))))]
++  "ISA_HAS_LSX"
++  "vpickev.\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "")])
++
++(define_expand "vec_unpacks_hi_v4sf"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(float_extend:V2DF
++	  (vec_select:V2SF
++	    (match_operand:V4SF 1 "register_operand" "f")
++	    (match_dup 2))))]
++  "ISA_HAS_LSX"
++{
++  operands[2] = loongarch_lsx_vec_parallel_const_half (V4SFmode,
++      true/*high_p*/);
++})
++
++(define_expand "vec_unpacks_lo_v4sf"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(float_extend:V2DF
++	  (vec_select:V2SF
++	    (match_operand:V4SF 1 "register_operand" "f")
++	    (match_dup 2))))]
++  "ISA_HAS_LSX"
++{
++  operands[2] = loongarch_lsx_vec_parallel_const_half (V4SFmode,
++      false/*high_p*/);
++})
++
++(define_expand "vec_unpacks_hi_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILSX_WHB 1 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, true/*high_p*/);
++  DONE;
++})
++
++(define_expand "vec_unpacks_lo_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILSX_WHB 1 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  loongarch_expand_vec_unpack (operands, false/*unsigned_p*/, false/*high_p*/);
++  DONE;
++})
++
++(define_expand "vec_unpacku_hi_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILSX_WHB 1 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, true/*high_p*/);
++  DONE;
++})
++
++(define_expand "vec_unpacku_lo_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILSX_WHB 1 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  loongarch_expand_vec_unpack (operands, true/*unsigned_p*/, false/*high_p*/);
++  DONE;
++})
++
++(define_expand "vec_extract"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILSX 1 "register_operand")
++   (match_operand 2 "const__operand")]
++  "ISA_HAS_LSX"
++{
++  if (mode == QImode || mode == HImode)
++    {
++      rtx dest1 = gen_reg_rtx (SImode);
++      emit_insn (gen_lsx_vpickve2gr_ (dest1, operands[1], operands[2]));
++      emit_move_insn (operands[0],
++		      gen_lowpart (mode, dest1));
++    }
++  else
++    emit_insn (gen_lsx_vpickve2gr_ (operands[0], operands[1], operands[2]));
++  DONE;
++})
++
++(define_expand "vec_extract"
++  [(match_operand: 0 "register_operand")
++   (match_operand:FLSX 1 "register_operand")
++   (match_operand 2 "const__operand")]
++  "ISA_HAS_LSX"
++{
++  rtx temp;
++  HOST_WIDE_INT val = INTVAL (operands[2]);
++
++  if (val == 0)
++    temp = operands[1];
++  else
++    {
++      rtx n = GEN_INT (val * GET_MODE_SIZE (mode));
++      temp = gen_reg_rtx (mode);
++      emit_insn (gen_lsx_vbsrl_ (temp, operands[1], n));
++    }
++  emit_insn (gen_lsx_vec_extract_ (operands[0], temp));
++  DONE;
++})
++
++(define_insn_and_split "lsx_vec_extract_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(vec_select:
++	  (match_operand:FLSX 1 "register_operand" "f")
++	  (parallel [(const_int 0)])))]
++  "ISA_HAS_LSX"
++  "#"
++  "&& reload_completed"
++  [(set (match_dup 0) (match_dup 1))]
++{
++  operands[1] = gen_rtx_REG (mode, REGNO (operands[1]));
++}
++  [(set_attr "move_type" "fmove")
++   (set_attr "mode" "")])
++
++(define_expand "vec_set"
++  [(match_operand:ILSX 0 "register_operand")
++   (match_operand: 1 "reg_or_0_operand")
++   (match_operand 2 "const__operand")]
++  "ISA_HAS_LSX"
++{
++  rtx index = GEN_INT (1 << INTVAL (operands[2]));
++  emit_insn (gen_lsx_vinsgr2vr_ (operands[0], operands[1],
++					 operands[0], index));
++  DONE;
++})
++
++(define_expand "vec_set"
++  [(match_operand:FLSX 0 "register_operand")
++   (match_operand: 1 "register_operand")
++   (match_operand 2 "const__operand")]
++  "ISA_HAS_LSX"
++{
++  rtx index = GEN_INT (1 << INTVAL (operands[2]));
++  emit_insn (gen_lsx_vextrins__scalar (operands[0], operands[1],
++						 operands[0], index));
++  DONE;
++})
++
++(define_expand "vec_cmp"
++  [(set (match_operand: 0 "register_operand")
++	(match_operator 1 ""
++	  [(match_operand:LSX 2 "register_operand")
++	   (match_operand:LSX 3 "register_operand")]))]
++  "ISA_HAS_LSX"
++{
++  bool ok = loongarch_expand_vec_cmp (operands);
++  gcc_assert (ok);
++  DONE;
++})
++
++(define_expand "vec_cmpu"
++  [(set (match_operand: 0 "register_operand")
++	(match_operator 1 ""
++	  [(match_operand:ILSX 2 "register_operand")
++	   (match_operand:ILSX 3 "register_operand")]))]
++  "ISA_HAS_LSX"
++{
++  bool ok = loongarch_expand_vec_cmp (operands);
++  gcc_assert (ok);
++  DONE;
++})
++
++(define_expand "vcondu"
++  [(match_operand:LSX 0 "register_operand")
++   (match_operand:LSX 1 "reg_or_m1_operand")
++   (match_operand:LSX 2 "reg_or_0_operand")
++   (match_operator 3 ""
++     [(match_operand:ILSX 4 "register_operand")
++      (match_operand:ILSX 5 "register_operand")])]
++  "ISA_HAS_LSX
++   && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))"
++{
++  loongarch_expand_vec_cond_expr (mode, mode, operands);
++  DONE;
++})
++
++(define_expand "vcond"
++  [(match_operand:LSX 0 "register_operand")
++   (match_operand:LSX 1 "reg_or_m1_operand")
++   (match_operand:LSX 2 "reg_or_0_operand")
++   (match_operator 3 ""
++     [(match_operand:LSX_2 4 "register_operand")
++      (match_operand:LSX_2 5 "register_operand")])]
++  "ISA_HAS_LSX
++   && (GET_MODE_NUNITS (mode) == GET_MODE_NUNITS (mode))"
++{
++  loongarch_expand_vec_cond_expr (mode, mode, operands);
++  DONE;
++})
++
++(define_expand "vcond_mask_"
++  [(match_operand:ILSX 0 "register_operand")
++   (match_operand:ILSX 1 "reg_or_m1_operand")
++   (match_operand:ILSX 2 "reg_or_0_operand")
++   (match_operand:ILSX 3 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  loongarch_expand_vec_cond_mask_expr (mode,
++				      mode, operands);
++  DONE;
++})
++
++(define_insn "lsx_vinsgr2vr_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(vec_merge:ILSX
++	  (vec_duplicate:ILSX
++	    (match_operand: 1 "reg_or_0_operand" "rJ"))
++	  (match_operand:ILSX 2 "register_operand" "0")
++	  (match_operand 3 "const__operand" "")))]
++  "ISA_HAS_LSX"
++{
++  if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode))
++    return "#";
++  else
++    return "vinsgr2vr.\t%w0,%z1,%y3";
++}
++  [(set_attr "type" "simd_insert")
++   (set_attr "mode" "")])
++
++(define_split
++  [(set (match_operand:LSX_D 0 "register_operand")
++	(vec_merge:LSX_D
++	  (vec_duplicate:LSX_D
++	    (match_operand: 1 "_operand"))
++	  (match_operand:LSX_D 2 "register_operand")
++	  (match_operand 3 "const__operand")))]
++  "reload_completed && ISA_HAS_LSX && !TARGET_64BIT"
++  [(const_int 0)]
++{
++  loongarch_split_lsx_insert_d (operands[0], operands[2], operands[3], operands[1]);
++  DONE;
++})
++
++(define_insn "lsx_vextrins__internal"
++  [(set (match_operand:LSX 0 "register_operand" "=f")
++	(vec_merge:LSX
++	  (vec_duplicate:LSX
++	    (vec_select:
++	      (match_operand:LSX 1 "register_operand" "f")
++	      (parallel [(const_int 0)])))
++	  (match_operand:LSX 2 "register_operand" "0")
++	  (match_operand 3 "const__operand" "")))]
++  "ISA_HAS_LSX"
++  "vextrins.\t%w0,%w1,%y3<<4"
++  [(set_attr "type" "simd_insert")
++   (set_attr "mode" "")])
++
++;; Operand 3 is a scalar.
++(define_insn "lsx_vextrins__scalar"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(vec_merge:FLSX
++	  (vec_duplicate:FLSX
++	    (match_operand: 1 "register_operand" "f"))
++	  (match_operand:FLSX 2 "register_operand" "0")
++	  (match_operand 3 "const__operand" "")))]
++  "ISA_HAS_LSX"
++  "vextrins.\t%w0,%w1,%y3<<4"
++  [(set_attr "type" "simd_insert")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vpickve2gr_"
++  [(set (match_operand: 0 "register_operand" "=r")
++	(any_extend:
++	  (vec_select:
++	    (match_operand:ILSX_HB 1 "register_operand" "f")
++	    (parallel [(match_operand 2 "const__operand" "")]))))]
++  "ISA_HAS_LSX"
++  "vpickve2gr.\t%0,%w1,%2"
++  [(set_attr "type" "simd_copy")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vpickve2gr_"
++  [(set (match_operand: 0 "register_operand" "=r")
++	(any_extend:
++	  (vec_select:
++	    (match_operand:LSX_W 1 "register_operand" "f")
++	    (parallel [(match_operand 2 "const__operand" "")]))))]
++  "ISA_HAS_LSX"
++  "vpickve2gr.\t%0,%w1,%2"
++  [(set_attr "type" "simd_copy")
++   (set_attr "mode" "")])
++
++(define_insn_and_split "lsx_vpickve2gr_du"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(vec_select:DI
++	  (match_operand:V2DI 1 "register_operand" "f")
++	  (parallel [(match_operand 2 "const_0_or_1_operand" "")])))]
++  "ISA_HAS_LSX"
++{
++  if (TARGET_64BIT)
++    return "vpickve2gr.du\t%0,%w1,%2";
++  else
++    return "#";
++}
++  "reload_completed && ISA_HAS_LSX && !TARGET_64BIT"
++  [(const_int 0)]
++{
++  loongarch_split_lsx_copy_d (operands[0], operands[1], operands[2],
++			      gen_lsx_vpickve2gr_wu);
++  DONE;
++}
++  [(set_attr "type" "simd_copy")
++   (set_attr "mode" "V2DI")])
++
++(define_insn_and_split "lsx_vpickve2gr_"
++  [(set (match_operand: 0 "register_operand" "=r")
++	(vec_select:
++	  (match_operand:LSX_D 1 "register_operand" "f")
++	  (parallel [(match_operand 2 "const__operand" "")])))]
++  "ISA_HAS_LSX"
++{
++  if (TARGET_64BIT)
++    return "vpickve2gr.\t%0,%w1,%2";
++  else
++    return "#";
++}
++  "reload_completed && ISA_HAS_LSX && !TARGET_64BIT"
++  [(const_int 0)]
++{
++  loongarch_split_lsx_copy_d (operands[0], operands[1], operands[2],
++			      gen_lsx_vpickve2gr_w);
++  DONE;
++}
++  [(set_attr "type" "simd_copy")
++   (set_attr "mode" "")])
++
++
++(define_expand "abs2"
++  [(match_operand:ILSX 0 "register_operand" "=f")
++   (abs:ILSX (match_operand:ILSX 1 "register_operand" "f"))]
++  "ISA_HAS_LSX"
++{
++  if (ISA_HAS_LSX)
++  {
++    emit_insn (gen_vabs2 (operands[0], operands[1]));
++    DONE;
++  }
++  else
++  {
++    rtx reg = gen_reg_rtx (mode);
++    emit_move_insn (reg, CONST0_RTX (mode));
++    emit_insn (gen_lsx_vadda_ (operands[0], operands[1], reg));
++    DONE;
++  }
++})
++
++(define_expand "neg2"
++  [(set (match_operand:ILSX 0 "register_operand")
++	(neg:ILSX (match_operand:ILSX 1 "register_operand")))]
++  "ISA_HAS_LSX"
++{
++  emit_insn (gen_vneg2 (operands[0], operands[1]));
++  DONE;
++})
++
++(define_expand "neg2"
++  [(set (match_operand:FLSX 0 "register_operand")
++	(neg:FLSX (match_operand:FLSX 1 "register_operand")))]
++  "ISA_HAS_LSX"
++{
++  rtx reg = gen_reg_rtx (mode);
++  emit_move_insn (reg, CONST0_RTX (mode));
++  emit_insn (gen_sub3 (operands[0], reg, operands[1]));
++  DONE;
++})
++
++(define_expand "lsx_vrepli"
++  [(match_operand:ILSX 0 "register_operand")
++   (match_operand 1 "const_imm10_operand")]
++  "ISA_HAS_LSX"
++{
++  if (mode == V16QImode)
++    operands[1] = GEN_INT (trunc_int_for_mode (INTVAL (operands[1]),
++					       mode));
++  emit_move_insn (operands[0],
++		  loongarch_gen_const_int_vector (mode, INTVAL (operands[1])));
++  DONE;
++})
++
++(define_expand "vec_perm"
++ [(match_operand:LSX 0 "register_operand")
++  (match_operand:LSX 1 "register_operand")
++  (match_operand:LSX 2 "register_operand")
++  (match_operand:LSX 3 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  loongarch_expand_vec_perm (operands[0], operands[1],
++			     operands[2], operands[3]);
++  DONE;
++})
++
++(define_insn "lsx_vshuf_"
++  [(set (match_operand:LSX_DWH 0 "register_operand" "=f")
++	(unspec:LSX_DWH [(match_operand:LSX_DWH 1 "register_operand" "0")
++			 (match_operand:LSX_DWH 2 "register_operand" "f")
++			 (match_operand:LSX_DWH 3 "register_operand" "f")]
++			UNSPEC_LSX_VSHUF))]
++  "ISA_HAS_LSX"
++  "vshuf.\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_sld")
++   (set_attr "mode" "")])
++
++(define_expand "mov"
++  [(set (match_operand:LSX 0)
++	(match_operand:LSX 1))]
++  "ISA_HAS_LSX"
++{
++  if (loongarch_legitimize_move (mode, operands[0], operands[1]))
++    DONE;
++})
++
++(define_expand "movmisalign"
++  [(set (match_operand:LSX 0)
++	(match_operand:LSX 1))]
++  "ISA_HAS_LSX"
++{
++  if (loongarch_legitimize_move (mode, operands[0], operands[1]))
++    DONE;
++})
++
++(define_insn "mov_lsx"
++  [(set (match_operand:LSX 0 "nonimmediate_operand" "=f,f,R,*r,*f")
++	(match_operand:LSX 1 "move_operand" "fYGYI,R,f,*f,*r"))]
++  "ISA_HAS_LSX"
++{ return loongarch_output_move (operands[0], operands[1]); }
++  [(set_attr "type" "simd_move,simd_load,simd_store,simd_copy,simd_insert")
++   (set_attr "mode" "")])
++
++(define_split
++  [(set (match_operand:LSX 0 "nonimmediate_operand")
++	(match_operand:LSX 1 "move_operand"))]
++  "reload_completed && ISA_HAS_LSX
++   && loongarch_split_move_insn_p (operands[0], operands[1])"
++  [(const_int 0)]
++{
++  loongarch_split_move_insn (operands[0], operands[1], curr_insn);
++  DONE;
++})
++
++;; Offset load
++(define_expand "lsx_ld_"
++  [(match_operand:LSX 0 "register_operand")
++   (match_operand 1 "pmode_register_operand")
++   (match_operand 2 "aq10_operand")]
++  "ISA_HAS_LSX"
++{
++  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
++			    INTVAL (operands[2]));
++  loongarch_emit_move (operands[0], gen_rtx_MEM (mode, addr));
++  DONE;
++})
++
++;; Offset store
++(define_expand "lsx_st_"
++  [(match_operand:LSX 0 "register_operand")
++   (match_operand 1 "pmode_register_operand")
++   (match_operand 2 "aq10_operand")]
++  "ISA_HAS_LSX"
++{
++  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
++			    INTVAL (operands[2]));
++  loongarch_emit_move (gen_rtx_MEM (mode, addr), operands[0]);
++  DONE;
++})
++
++;; Integer operations
++(define_insn "add3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f,f")
++	(plus:ILSX
++	  (match_operand:ILSX 1 "register_operand" "f,f,f")
++	  (match_operand:ILSX 2 "reg_or_vector_same_ximm5_operand" "f,Unv5,Uuv5")))]
++  "ISA_HAS_LSX"
++{
++  switch (which_alternative)
++    {
++    case 0:
++      return "vadd.\t%w0,%w1,%w2";
++    case 1:
++      {
++	HOST_WIDE_INT val = INTVAL (CONST_VECTOR_ELT (operands[2], 0));
++
++	operands[2] = GEN_INT (-val);
++	return "vsubi.\t%w0,%w1,%d2";
++      }
++    case 2:
++      return "vaddi.\t%w0,%w1,%E2";
++    default:
++      gcc_unreachable ();
++    }
++}
++  [(set_attr "alu_type" "simd_add")
++   (set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "sub3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f")
++	(minus:ILSX
++	  (match_operand:ILSX 1 "register_operand" "f,f")
++	  (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))]
++  "ISA_HAS_LSX"
++  "@
++   vsub.\t%w0,%w1,%w2
++   vsubi.\t%w0,%w1,%E2"
++  [(set_attr "alu_type" "simd_add")
++   (set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "mul3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(mult:ILSX (match_operand:ILSX 1 "register_operand" "f")
++		   (match_operand:ILSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vmul.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_mul")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vmadd_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(plus:ILSX (mult:ILSX (match_operand:ILSX 2 "register_operand" "f")
++			      (match_operand:ILSX 3 "register_operand" "f"))
++		   (match_operand:ILSX 1 "register_operand" "0")))]
++  "ISA_HAS_LSX"
++  "vmadd.\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_mul")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vmsub_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(minus:ILSX (match_operand:ILSX 1 "register_operand" "0")
++		    (mult:ILSX (match_operand:ILSX 2 "register_operand" "f")
++			       (match_operand:ILSX 3 "register_operand" "f"))))]
++  "ISA_HAS_LSX"
++  "vmsub.\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_mul")
++   (set_attr "mode" "")])
++
++(define_insn "div3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(div:ILSX (match_operand:ILSX 1 "register_operand" "f")
++		  (match_operand:ILSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++{ return loongarch_lsx_output_division ("vdiv.\t%w0,%w1,%w2", operands); }
++  [(set_attr "type" "simd_div")
++   (set_attr "mode" "")])
++
++(define_insn "udiv3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(udiv:ILSX (match_operand:ILSX 1 "register_operand" "f")
++		   (match_operand:ILSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++{ return loongarch_lsx_output_division ("vdiv.\t%w0,%w1,%w2", operands); }
++  [(set_attr "type" "simd_div")
++   (set_attr "mode" "")])
++
++(define_insn "mod3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(mod:ILSX (match_operand:ILSX 1 "register_operand" "f")
++		  (match_operand:ILSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++{ return loongarch_lsx_output_division ("vmod.\t%w0,%w1,%w2", operands); }
++  [(set_attr "type" "simd_div")
++   (set_attr "mode" "")])
++
++(define_insn "umod3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(umod:ILSX (match_operand:ILSX 1 "register_operand" "f")
++		   (match_operand:ILSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++{ return loongarch_lsx_output_division ("vmod.\t%w0,%w1,%w2", operands); }
++  [(set_attr "type" "simd_div")
++   (set_attr "mode" "")])
++
++(define_insn "xor3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f,f")
++	(xor:ILSX
++	  (match_operand:ILSX 1 "register_operand" "f,f,f")
++	  (match_operand:ILSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
++  "ISA_HAS_LSX"
++  "@
++   vxor.v\t%w0,%w1,%w2
++   vbitrevi.%v0\t%w0,%w1,%V2
++   vxori.b\t%w0,%w1,%B2"
++  [(set_attr "type" "simd_logic,simd_bit,simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "ior3"
++  [(set (match_operand:LSX 0 "register_operand" "=f,f,f")
++	(ior:LSX
++	  (match_operand:LSX 1 "register_operand" "f,f,f")
++	  (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YC,Urv8")))]
++  "ISA_HAS_LSX"
++  "@
++   vor.v\t%w0,%w1,%w2
++   vbitseti.%v0\t%w0,%w1,%V2
++   vori.b\t%w0,%w1,%B2"
++  [(set_attr "type" "simd_logic,simd_bit,simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "and3"
++  [(set (match_operand:LSX 0 "register_operand" "=f,f,f")
++	(and:LSX
++	  (match_operand:LSX 1 "register_operand" "f,f,f")
++	  (match_operand:LSX 2 "reg_or_vector_same_val_operand" "f,YZ,Urv8")))]
++  "ISA_HAS_LSX"
++{
++  switch (which_alternative)
++    {
++    case 0:
++      return "vand.v\t%w0,%w1,%w2";
++    case 1:
++      {
++	rtx elt0 = CONST_VECTOR_ELT (operands[2], 0);
++	unsigned HOST_WIDE_INT val = ~UINTVAL (elt0);
++	operands[2] = loongarch_gen_const_int_vector (mode, val & (-val));
++	return "vbitclri.%v0\t%w0,%w1,%V2";
++      }
++    case 2:
++      return "vandi.b\t%w0,%w1,%B2";
++    default:
++      gcc_unreachable ();
++    }
++}
++  [(set_attr "type" "simd_logic,simd_bit,simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "one_cmpl2"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(not:ILSX (match_operand:ILSX 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vnor.v\t%w0,%w1,%w1"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "TI")])
++
++(define_insn "vlshr3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f")
++	(lshiftrt:ILSX
++	  (match_operand:ILSX 1 "register_operand" "f,f")
++	  (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))]
++  "ISA_HAS_LSX"
++  "@
++   vsrl.\t%w0,%w1,%w2
++   vsrli.\t%w0,%w1,%E2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "vashr3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f")
++	(ashiftrt:ILSX
++	  (match_operand:ILSX 1 "register_operand" "f,f")
++	  (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))]
++  "ISA_HAS_LSX"
++  "@
++   vsra.\t%w0,%w1,%w2
++   vsrai.\t%w0,%w1,%E2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "vashl3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f")
++	(ashift:ILSX
++	  (match_operand:ILSX 1 "register_operand" "f,f")
++	  (match_operand:ILSX 2 "reg_or_vector_same_uimm6_operand" "f,Uuv6")))]
++  "ISA_HAS_LSX"
++  "@
++   vsll.\t%w0,%w1,%w2
++   vslli.\t%w0,%w1,%E2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++;; Floating-point operations
++(define_insn "add3"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(plus:FLSX (match_operand:FLSX 1 "register_operand" "f")
++		   (match_operand:FLSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vfadd.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fadd")
++   (set_attr "mode" "")])
++
++(define_insn "sub3"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(minus:FLSX (match_operand:FLSX 1 "register_operand" "f")
++		    (match_operand:FLSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vfsub.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fadd")
++   (set_attr "mode" "")])
++
++(define_insn "mul3"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(mult:FLSX (match_operand:FLSX 1 "register_operand" "f")
++		   (match_operand:FLSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vfmul.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fmul")
++   (set_attr "mode" "")])
++
++(define_insn "div3"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(div:FLSX (match_operand:FLSX 1 "register_operand" "f")
++		  (match_operand:FLSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vfdiv.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
++(define_insn "fma4"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(fma:FLSX (match_operand:FLSX 1 "register_operand" "f")
++		  (match_operand:FLSX 2 "register_operand" "f")
++		  (match_operand:FLSX 3 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vfmadd.\t%w0,%w1,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++(define_insn "fnma4"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(fma:FLSX (neg:FLSX (match_operand:FLSX 1 "register_operand" "f"))
++		  (match_operand:FLSX 2 "register_operand" "f")
++		  (match_operand:FLSX 3 "register_operand" "0")))]
++  "ISA_HAS_LSX"
++  "vfnmsub.\t%w0,%w1,%w2,%w0"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++(define_insn "sqrt2"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(sqrt:FLSX (match_operand:FLSX 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vfsqrt.\t%w0,%w1"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
++;; Built-in functions
++(define_insn "lsx_vadda_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(plus:ILSX (abs:ILSX (match_operand:ILSX 1 "register_operand" "f"))
++		   (abs:ILSX (match_operand:ILSX 2 "register_operand" "f"))))]
++  "ISA_HAS_LSX"
++  "vadda.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "ssadd3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(ss_plus:ILSX (match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vsadd.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "usadd3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(us_plus:ILSX (match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vsadd.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vabsd_s_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_ABSD_S))]
++  "ISA_HAS_LSX"
++  "vabsd.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vabsd_u_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VABSD_U))]
++  "ISA_HAS_LSX"
++  "vabsd.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vavg_s_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VAVG_S))]
++  "ISA_HAS_LSX"
++  "vavg.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vavg_u_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VAVG_U))]
++  "ISA_HAS_LSX"
++  "vavg.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vavgr_s_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VAVGR_S))]
++  "ISA_HAS_LSX"
++  "vavgr.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vavgr_u_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VAVGR_U))]
++  "ISA_HAS_LSX"
++  "vavgr.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vbitclr_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VBITCLR))]
++  "ISA_HAS_LSX"
++  "vbitclr.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vbitclri_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand 2 "const__operand" "")]
++		     UNSPEC_LSX_VBITCLRI))]
++  "ISA_HAS_LSX"
++  "vbitclri.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vbitrev_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VBITREV))]
++  "ISA_HAS_LSX"
++  "vbitrev.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vbitrevi_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		       (match_operand 2 "const_lsx_branch_operand" "")]
++		     UNSPEC_LSX_VBITREVI))]
++  "ISA_HAS_LSX"
++  "vbitrevi.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vbitsel_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(ior:ILSX (and:ILSX (not:ILSX
++			      (match_operand:ILSX 3 "register_operand" "f"))
++			    (match_operand:ILSX 1 "register_operand" "f"))
++		  (and:ILSX (match_dup 3)
++			    (match_operand:ILSX 2 "register_operand" "f"))))]
++  "ISA_HAS_LSX"
++  "vbitsel.v\t%w0,%w1,%w2,%w3"
++  [(set_attr "type" "simd_bitmov")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vbitseli_b"
++  [(set (match_operand:V16QI 0 "register_operand" "=f")
++	(ior:V16QI (and:V16QI (not:V16QI
++				(match_operand:V16QI 1 "register_operand" "0"))
++			      (match_operand:V16QI 2 "register_operand" "f"))
++		   (and:V16QI (match_dup 1)
++			      (match_operand:V16QI 3 "const_vector_same_val_operand" "Urv8"))))]
++  "ISA_HAS_LSX"
++  "vbitseli.b\t%w0,%w2,%B3"
++  [(set_attr "type" "simd_bitmov")
++   (set_attr "mode" "V16QI")])
++
++(define_insn "lsx_vbitset_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VBITSET))]
++  "ISA_HAS_LSX"
++  "vbitset.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vbitseti_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand 2 "const__operand" "")]
++		     UNSPEC_LSX_VBITSETI))]
++  "ISA_HAS_LSX"
++  "vbitseti.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_code_iterator ICC [eq le leu lt ltu])
++
++(define_code_attr icc
++  [(eq  "eq")
++   (le  "le")
++   (leu "le")
++   (lt  "lt")
++   (ltu "lt")])
++
++(define_code_attr icci
++  [(eq  "eqi")
++   (le  "lei")
++   (leu "lei")
++   (lt  "lti")
++   (ltu "lti")])
++
++(define_code_attr cmpi
++  [(eq   "s")
++   (le   "s")
++   (leu  "u")
++   (lt   "s")
++   (ltu  "u")])
++
++(define_code_attr cmpi_1
++  [(eq   "")
++   (le   "")
++   (leu  "u")
++   (lt   "")
++   (ltu  "u")])
++
++(define_insn "lsx_vs_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f")
++	(ICC:ILSX
++	  (match_operand:ILSX 1 "register_operand" "f,f")
++	  (match_operand:ILSX 2 "reg_or_vector_same_imm5_operand" "f,Uv5")))]
++  "ISA_HAS_LSX"
++  "@
++   vs.\t%w0,%w1,%w2
++   vs.\t%w0,%w1,%E2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfclass_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLSX 1 "register_operand" "f")]
++			 UNSPEC_LSX_VFCLASS))]
++  "ISA_HAS_LSX"
++  "vfclass.\t%w0,%w1"
++  [(set_attr "type" "simd_fclass")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfcmp_caf_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLSX 1 "register_operand" "f")
++			  (match_operand:FLSX 2 "register_operand" "f")]
++			 UNSPEC_LSX_VFCMP_CAF))]
++  "ISA_HAS_LSX"
++  "vfcmp.caf.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfcmp_cune_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLSX 1 "register_operand" "f")
++			  (match_operand:FLSX 2 "register_operand" "f")]
++			 UNSPEC_LSX_VFCMP_CUNE))]
++  "ISA_HAS_LSX"
++  "vfcmp.cune.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++(define_code_iterator vfcond [unordered ordered eq ne le lt uneq unle unlt])
++
++(define_code_attr fcc
++  [(unordered "cun")
++   (ordered   "cor")
++   (eq	      "ceq")
++   (ne	      "cne")
++   (uneq      "cueq")
++   (unle      "cule")
++   (unlt      "cult")
++   (le	      "cle")
++   (lt	      "clt")])
++
++(define_int_iterator FSC_UNS [UNSPEC_LSX_VFCMP_SAF UNSPEC_LSX_VFCMP_SUN UNSPEC_LSX_VFCMP_SOR
++			      UNSPEC_LSX_VFCMP_SEQ UNSPEC_LSX_VFCMP_SNE UNSPEC_LSX_VFCMP_SUEQ
++			      UNSPEC_LSX_VFCMP_SUNE UNSPEC_LSX_VFCMP_SULE UNSPEC_LSX_VFCMP_SULT
++			      UNSPEC_LSX_VFCMP_SLE UNSPEC_LSX_VFCMP_SLT])
++
++(define_int_attr fsc
++  [(UNSPEC_LSX_VFCMP_SAF  "saf")
++   (UNSPEC_LSX_VFCMP_SUN  "sun")
++   (UNSPEC_LSX_VFCMP_SOR  "sor")
++   (UNSPEC_LSX_VFCMP_SEQ  "seq")
++   (UNSPEC_LSX_VFCMP_SNE  "sne")
++   (UNSPEC_LSX_VFCMP_SUEQ "sueq")
++   (UNSPEC_LSX_VFCMP_SUNE "sune")
++   (UNSPEC_LSX_VFCMP_SULE "sule")
++   (UNSPEC_LSX_VFCMP_SULT "sult")
++   (UNSPEC_LSX_VFCMP_SLE  "sle")
++   (UNSPEC_LSX_VFCMP_SLT  "slt")])
++
++(define_insn "lsx_vfcmp__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(vfcond: (match_operand:FLSX 1 "register_operand" "f")
++			 (match_operand:FLSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vfcmp..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfcmp__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLSX 1 "register_operand" "f")
++			  (match_operand:FLSX 2 "register_operand" "f")]
++			 FSC_UNS))]
++  "ISA_HAS_LSX"
++  "vfcmp..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "")])
++
++(define_mode_attr fint
++  [(V4SF "v4si")
++   (V2DF "v2di")])
++
++(define_mode_attr FINTCNV
++  [(V4SF "I2S")
++   (V2DF "I2D")])
++
++(define_mode_attr FINTCNV_2
++  [(V4SF "S2I")
++   (V2DF "D2I")])
++
++(define_insn "float2"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(float:FLSX (match_operand: 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vffint..\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++(define_insn "floatuns2"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(unsigned_float:FLSX
++	  (match_operand: 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vffint..\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++(define_mode_attr FFQ
++  [(V4SF "V8HI")
++   (V2DF "V4SI")])
++
++(define_insn "lsx_vreplgr2vr_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f")
++	(vec_duplicate:ILSX
++	  (match_operand: 1 "reg_or_0_operand" "r,J")))]
++  "ISA_HAS_LSX"
++{
++  if (which_alternative == 1)
++    return "ldi.\t%w0,0";
++
++  if (!TARGET_64BIT && (mode == V2DImode || mode == V2DFmode))
++    return "#";
++  else
++    return "vreplgr2vr.\t%w0,%z1";
++}
++  [(set_attr "type" "simd_fill")
++   (set_attr "mode" "")])
++
++(define_split
++  [(set (match_operand:LSX_D 0 "register_operand")
++	(vec_duplicate:LSX_D
++	  (match_operand: 1 "register_operand")))]
++  "reload_completed && ISA_HAS_LSX && !TARGET_64BIT"
++  [(const_int 0)]
++{
++  loongarch_split_lsx_fill_d (operands[0], operands[1]);
++  DONE;
++})
++
++(define_insn "logb2"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFLOGB))]
++  "ISA_HAS_LSX"
++  "vflogb.\t%w0,%w1"
++  [(set_attr "type" "simd_flog2")
++   (set_attr "mode" "")])
++
++(define_insn "smax3"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(smax:FLSX (match_operand:FLSX 1 "register_operand" "f")
++		   (match_operand:FLSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vfmax.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fminmax")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfmaxa_"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(if_then_else:FLSX
++	   (gt (abs:FLSX (match_operand:FLSX 1 "register_operand" "f"))
++	       (abs:FLSX (match_operand:FLSX 2 "register_operand" "f")))
++	   (match_dup 1)
++	   (match_dup 2)))]
++  "ISA_HAS_LSX"
++  "vfmaxa.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fminmax")
++   (set_attr "mode" "")])
++
++(define_insn "smin3"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(smin:FLSX (match_operand:FLSX 1 "register_operand" "f")
++		   (match_operand:FLSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vfmin.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fminmax")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfmina_"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(if_then_else:FLSX
++	   (lt (abs:FLSX (match_operand:FLSX 1 "register_operand" "f"))
++	       (abs:FLSX (match_operand:FLSX 2 "register_operand" "f")))
++	   (match_dup 1)
++	   (match_dup 2)))]
++  "ISA_HAS_LSX"
++  "vfmina.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fminmax")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfrecip_"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFRECIP))]
++  "ISA_HAS_LSX"
++  "vfrecip.\t%w0,%w1"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfrint_"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFRINT))]
++  "ISA_HAS_LSX"
++  "vfrint.\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfrsqrt_"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFRSQRT))]
++  "ISA_HAS_LSX"
++  "vfrsqrt.\t%w0,%w1"
++  [(set_attr "type" "simd_fdiv")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vftint_s__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLSX 1 "register_operand" "f")]
++			 UNSPEC_LSX_VFTINT_S))]
++  "ISA_HAS_LSX"
++  "vftint..\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vftint_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:FLSX 1 "register_operand" "f")]
++			 UNSPEC_LSX_VFTINT_U))]
++  "ISA_HAS_LSX"
++  "vftint..\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++(define_insn "fix_trunc2"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(fix: (match_operand:FLSX 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vftintrz..\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++(define_insn "fixuns_trunc2"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unsigned_fix: (match_operand:FLSX 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vftintrz..\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "cnv_mode" "")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vhw_h_b"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(addsub:V8HI
++	  (any_extend:V8HI
++	    (vec_select:V8QI
++	      (match_operand:V16QI 1 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)])))
++	  (any_extend:V8HI
++	    (vec_select:V8QI
++	      (match_operand:V16QI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)])))))]
++  "ISA_HAS_LSX"
++  "vhw.h.b\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vhw_w_h"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(addsub:V4SI
++	  (any_extend:V4SI
++	    (vec_select:V4HI
++	      (match_operand:V8HI 1 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)])))
++	  (any_extend:V4SI
++	    (vec_select:V4HI
++	      (match_operand:V8HI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)])))))]
++  "ISA_HAS_LSX"
++  "vhw.w.h\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vhw_d_w"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(addsub:V2DI
++	  (any_extend:V2DI
++	    (vec_select:V2SI
++	      (match_operand:V4SI 1 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)])))
++	  (any_extend:V2DI
++	    (vec_select:V2SI
++	      (match_operand:V4SI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)])))))]
++  "ISA_HAS_LSX"
++  "vhw.d.w\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vpackev_b"
++  [(set (match_operand:V16QI 0 "register_operand" "=f")
++	(vec_select:V16QI
++	  (vec_concat:V32QI
++	    (match_operand:V16QI 1 "register_operand" "f")
++	    (match_operand:V16QI 2 "register_operand" "f"))
++	  (parallel [(const_int 0)  (const_int 16)
++		     (const_int 2)  (const_int 18)
++		     (const_int 4)  (const_int 20)
++		     (const_int 6)  (const_int 22)
++		     (const_int 8)  (const_int 24)
++		     (const_int 10) (const_int 26)
++		     (const_int 12) (const_int 28)
++		     (const_int 14) (const_int 30)])))]
++  "ISA_HAS_LSX"
++  "vpackev.b\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16QI")])
++
++(define_insn "lsx_vpackev_h"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(vec_select:V8HI
++	  (vec_concat:V16HI
++	    (match_operand:V8HI 1 "register_operand" "f")
++	    (match_operand:V8HI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 8)
++		     (const_int 2) (const_int 10)
++		     (const_int 4) (const_int 12)
++		     (const_int 6) (const_int 14)])))]
++  "ISA_HAS_LSX"
++  "vpackev.h\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vpackev_w"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(vec_select:V4SI
++	  (vec_concat:V8SI
++	    (match_operand:V4SI 1 "register_operand" "f")
++	    (match_operand:V4SI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 4)
++		     (const_int 2) (const_int 6)])))]
++  "ISA_HAS_LSX"
++  "vpackev.w\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vpackev_w_f"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(vec_select:V4SF
++	  (vec_concat:V8SF
++	    (match_operand:V4SF 1 "register_operand" "f")
++	    (match_operand:V4SF 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 4)
++		     (const_int 2) (const_int 6)])))]
++  "ISA_HAS_LSX"
++  "vpackev.w\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vilvh_b"
++  [(set (match_operand:V16QI 0 "register_operand" "=f")
++	(vec_select:V16QI
++	  (vec_concat:V32QI
++	    (match_operand:V16QI 1 "register_operand" "f")
++	    (match_operand:V16QI 2 "register_operand" "f"))
++	  (parallel [(const_int 8)  (const_int 24)
++		     (const_int 9)  (const_int 25)
++		     (const_int 10) (const_int 26)
++		     (const_int 11) (const_int 27)
++		     (const_int 12) (const_int 28)
++		     (const_int 13) (const_int 29)
++		     (const_int 14) (const_int 30)
++		     (const_int 15) (const_int 31)])))]
++  "ISA_HAS_LSX"
++  "vilvh.b\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16QI")])
++
++(define_insn "lsx_vilvh_h"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(vec_select:V8HI
++	  (vec_concat:V16HI
++	    (match_operand:V8HI 1 "register_operand" "f")
++	    (match_operand:V8HI 2 "register_operand" "f"))
++	  (parallel [(const_int 4) (const_int 12)
++		     (const_int 5) (const_int 13)
++		     (const_int 6) (const_int 14)
++		     (const_int 7) (const_int 15)])))]
++  "ISA_HAS_LSX"
++  "vilvh.h\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8HI")])
++
++(define_mode_attr vilvh_suffix
++  [(V4SI "") (V4SF "_f")
++   (V2DI "") (V2DF "_f")])
++
++(define_insn "lsx_vilvh_w"
++  [(set (match_operand:LSX_W 0 "register_operand" "=f")
++	(vec_select:LSX_W
++	  (vec_concat:
++	    (match_operand:LSX_W 1 "register_operand" "f")
++	    (match_operand:LSX_W 2 "register_operand" "f"))
++	  (parallel [(const_int 2) (const_int 6)
++		     (const_int 3) (const_int 7)])))]
++  "ISA_HAS_LSX"
++  "vilvh.w\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vilvh_d"
++  [(set (match_operand:LSX_D 0 "register_operand" "=f")
++	(vec_select:LSX_D
++	  (vec_concat:
++	    (match_operand:LSX_D 1 "register_operand" "f")
++	    (match_operand:LSX_D 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 3)])))]
++  "ISA_HAS_LSX"
++  "vilvh.d\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vpackod_b"
++  [(set (match_operand:V16QI 0 "register_operand" "=f")
++	(vec_select:V16QI
++	  (vec_concat:V32QI
++	    (match_operand:V16QI 1 "register_operand" "f")
++	    (match_operand:V16QI 2 "register_operand" "f"))
++	  (parallel [(const_int 1)  (const_int 17)
++		     (const_int 3)  (const_int 19)
++		     (const_int 5)  (const_int 21)
++		     (const_int 7)  (const_int 23)
++		     (const_int 9)  (const_int 25)
++		     (const_int 11) (const_int 27)
++		     (const_int 13) (const_int 29)
++		     (const_int 15) (const_int 31)])))]
++  "ISA_HAS_LSX"
++  "vpackod.b\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16QI")])
++
++(define_insn "lsx_vpackod_h"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(vec_select:V8HI
++	  (vec_concat:V16HI
++	    (match_operand:V8HI 1 "register_operand" "f")
++	    (match_operand:V8HI 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 9)
++		     (const_int 3) (const_int 11)
++		     (const_int 5) (const_int 13)
++		     (const_int 7) (const_int 15)])))]
++  "ISA_HAS_LSX"
++  "vpackod.h\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vpackod_w"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(vec_select:V4SI
++	  (vec_concat:V8SI
++	    (match_operand:V4SI 1 "register_operand" "f")
++	    (match_operand:V4SI 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 5)
++		     (const_int 3) (const_int 7)])))]
++  "ISA_HAS_LSX"
++  "vpackod.w\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vpackod_w_f"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(vec_select:V4SF
++	  (vec_concat:V8SF
++	    (match_operand:V4SF 1 "register_operand" "f")
++	    (match_operand:V4SF 2 "register_operand" "f"))
++	  (parallel [(const_int 1) (const_int 5)
++		     (const_int 3) (const_int 7)])))]
++  "ISA_HAS_LSX"
++  "vpackod.w\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vilvl_b"
++  [(set (match_operand:V16QI 0 "register_operand" "=f")
++	(vec_select:V16QI
++	  (vec_concat:V32QI
++	    (match_operand:V16QI 1 "register_operand" "f")
++	    (match_operand:V16QI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 16)
++		     (const_int 1) (const_int 17)
++		     (const_int 2) (const_int 18)
++		     (const_int 3) (const_int 19)
++		     (const_int 4) (const_int 20)
++		     (const_int 5) (const_int 21)
++		     (const_int 6) (const_int 22)
++		     (const_int 7) (const_int 23)])))]
++  "ISA_HAS_LSX"
++  "vilvl.b\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16QI")])
++
++(define_insn "lsx_vilvl_h"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(vec_select:V8HI
++	  (vec_concat:V16HI
++	    (match_operand:V8HI 1 "register_operand" "f")
++	    (match_operand:V8HI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 8)
++		     (const_int 1) (const_int 9)
++		     (const_int 2) (const_int 10)
++		     (const_int 3) (const_int 11)])))]
++  "ISA_HAS_LSX"
++  "vilvl.h\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vilvl_w"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(vec_select:V4SI
++	  (vec_concat:V8SI
++	    (match_operand:V4SI 1 "register_operand" "f")
++	    (match_operand:V4SI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 4)
++		     (const_int 1) (const_int 5)])))]
++  "ISA_HAS_LSX"
++  "vilvl.w\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vilvl_w_f"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(vec_select:V4SF
++	  (vec_concat:V8SF
++	    (match_operand:V4SF 1 "register_operand" "f")
++	    (match_operand:V4SF 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 4)
++		     (const_int 1) (const_int 5)])))]
++  "ISA_HAS_LSX"
++  "vilvl.w\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vilvl_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(vec_select:V2DI
++	  (vec_concat:V4DI
++	    (match_operand:V2DI 1 "register_operand" "f")
++	    (match_operand:V2DI 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 2)])))]
++  "ISA_HAS_LSX"
++  "vilvl.d\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vilvl_d_f"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(vec_select:V2DF
++	  (vec_concat:V4DF
++	    (match_operand:V2DF 1 "register_operand" "f")
++	    (match_operand:V2DF 2 "register_operand" "f"))
++	  (parallel [(const_int 0) (const_int 2)])))]
++  "ISA_HAS_LSX"
++  "vilvl.d\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "smax3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f")
++	(smax:ILSX (match_operand:ILSX 1 "register_operand" "f,f")
++		   (match_operand:ILSX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))]
++  "ISA_HAS_LSX"
++  "@
++   vmax.\t%w0,%w1,%w2
++   vmaxi.\t%w0,%w1,%E2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "umax3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f")
++	(umax:ILSX (match_operand:ILSX 1 "register_operand" "f,f")
++		   (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))]
++  "ISA_HAS_LSX"
++  "@
++   vmax.\t%w0,%w1,%w2
++   vmaxi.\t%w0,%w1,%B2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "smin3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f")
++	(smin:ILSX (match_operand:ILSX 1 "register_operand" "f,f")
++		   (match_operand:ILSX 2 "reg_or_vector_same_simm5_operand" "f,Usv5")))]
++  "ISA_HAS_LSX"
++  "@
++   vmin.\t%w0,%w1,%w2
++   vmini.\t%w0,%w1,%E2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "umin3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f")
++	(umin:ILSX (match_operand:ILSX 1 "register_operand" "f,f")
++		   (match_operand:ILSX 2 "reg_or_vector_same_uimm5_operand" "f,Uuv5")))]
++  "ISA_HAS_LSX"
++  "@
++   vmin.\t%w0,%w1,%w2
++   vmini.\t%w0,%w1,%B2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vclo_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(clz:ILSX (not:ILSX (match_operand:ILSX 1 "register_operand" "f"))))]
++  "ISA_HAS_LSX"
++  "vclo.\t%w0,%w1"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "clz2"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(clz:ILSX (match_operand:ILSX 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vclz.\t%w0,%w1"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_nor_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f,f")
++	(and:ILSX (not:ILSX (match_operand:ILSX 1 "register_operand" "f,f"))
++		  (not:ILSX (match_operand:ILSX 2 "reg_or_vector_same_val_operand" "f,Urv8"))))]
++  "ISA_HAS_LSX"
++  "@
++   vnor.v\t%w0,%w1,%w2
++   vnori.b\t%w0,%w1,%B2"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vpickev_b"
++[(set (match_operand:V16QI 0 "register_operand" "=f")
++      (vec_select:V16QI
++	(vec_concat:V32QI
++	  (match_operand:V16QI 1 "register_operand" "f")
++	  (match_operand:V16QI 2 "register_operand" "f"))
++	(parallel [(const_int 0) (const_int 2)
++		   (const_int 4) (const_int 6)
++		   (const_int 8) (const_int 10)
++		   (const_int 12) (const_int 14)
++		   (const_int 16) (const_int 18)
++		   (const_int 20) (const_int 22)
++		   (const_int 24) (const_int 26)
++		   (const_int 28) (const_int 30)])))]
++  "ISA_HAS_LSX"
++  "vpickev.b\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16QI")])
++
++(define_insn "lsx_vpickev_h"
++[(set (match_operand:V8HI 0 "register_operand" "=f")
++      (vec_select:V8HI
++	(vec_concat:V16HI
++	  (match_operand:V8HI 1 "register_operand" "f")
++	  (match_operand:V8HI 2 "register_operand" "f"))
++	(parallel [(const_int 0) (const_int 2)
++		   (const_int 4) (const_int 6)
++		   (const_int 8) (const_int 10)
++		   (const_int 12) (const_int 14)])))]
++  "ISA_HAS_LSX"
++  "vpickev.h\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vpickev_w"
++[(set (match_operand:V4SI 0 "register_operand" "=f")
++      (vec_select:V4SI
++	(vec_concat:V8SI
++	  (match_operand:V4SI 1 "register_operand" "f")
++	  (match_operand:V4SI 2 "register_operand" "f"))
++	(parallel [(const_int 0) (const_int 2)
++		   (const_int 4) (const_int 6)])))]
++  "ISA_HAS_LSX"
++  "vpickev.w\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vpickev_w_f"
++[(set (match_operand:V4SF 0 "register_operand" "=f")
++      (vec_select:V4SF
++	(vec_concat:V8SF
++	  (match_operand:V4SF 1 "register_operand" "f")
++	  (match_operand:V4SF 2 "register_operand" "f"))
++	(parallel [(const_int 0) (const_int 2)
++		   (const_int 4) (const_int 6)])))]
++  "ISA_HAS_LSX"
++  "vpickev.w\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vpickod_b"
++[(set (match_operand:V16QI 0 "register_operand" "=f")
++      (vec_select:V16QI
++	(vec_concat:V32QI
++	  (match_operand:V16QI 1 "register_operand" "f")
++	  (match_operand:V16QI 2 "register_operand" "f"))
++	(parallel [(const_int 1) (const_int 3)
++		   (const_int 5) (const_int 7)
++		   (const_int 9) (const_int 11)
++		   (const_int 13) (const_int 15)
++		   (const_int 17) (const_int 19)
++		   (const_int 21) (const_int 23)
++		   (const_int 25) (const_int 27)
++		   (const_int 29) (const_int 31)])))]
++  "ISA_HAS_LSX"
++  "vpickod.b\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V16QI")])
++
++(define_insn "lsx_vpickod_h"
++[(set (match_operand:V8HI 0 "register_operand" "=f")
++      (vec_select:V8HI
++	(vec_concat:V16HI
++	  (match_operand:V8HI 1 "register_operand" "f")
++	  (match_operand:V8HI 2 "register_operand" "f"))
++	(parallel [(const_int 1) (const_int 3)
++		   (const_int 5) (const_int 7)
++		   (const_int 9) (const_int 11)
++		   (const_int 13) (const_int 15)])))]
++  "ISA_HAS_LSX"
++  "vpickod.h\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vpickod_w"
++[(set (match_operand:V4SI 0 "register_operand" "=f")
++      (vec_select:V4SI
++	(vec_concat:V8SI
++	  (match_operand:V4SI 1 "register_operand" "f")
++	  (match_operand:V4SI 2 "register_operand" "f"))
++	(parallel [(const_int 1) (const_int 3)
++		   (const_int 5) (const_int 7)])))]
++  "ISA_HAS_LSX"
++  "vpickod.w\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vpickod_w_f"
++[(set (match_operand:V4SF 0 "register_operand" "=f")
++      (vec_select:V4SF
++	(vec_concat:V8SF
++	  (match_operand:V4SF 1 "register_operand" "f")
++	  (match_operand:V4SF 2 "register_operand" "f"))
++	(parallel [(const_int 1) (const_int 3)
++		   (const_int 5) (const_int 7)])))]
++  "ISA_HAS_LSX"
++  "vpickod.w\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_permute")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "popcount2"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(popcount:ILSX (match_operand:ILSX 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vpcnt.\t%w0,%w1"
++  [(set_attr "type" "simd_pcnt")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsat_s_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand 2 "const__operand" "")]
++		     UNSPEC_LSX_VSAT_S))]
++  "ISA_HAS_LSX"
++  "vsat.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_sat")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsat_u_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand 2 "const__operand" "")]
++		     UNSPEC_LSX_VSAT_U))]
++  "ISA_HAS_LSX"
++  "vsat.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_sat")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vshuf4i_"
++  [(set (match_operand:LSX_WHB_W 0 "register_operand" "=f")
++	(vec_select:LSX_WHB_W
++	  (match_operand:LSX_WHB_W 1 "register_operand" "f")
++	  (match_operand 2 "par_const_vector_shf_set_operand" "")))]
++  "ISA_HAS_LSX"
++{
++  HOST_WIDE_INT val = 0;
++  unsigned int i;
++
++  /* We convert the selection to an immediate.  */
++  for (i = 0; i < 4; i++)
++    val |= INTVAL (XVECEXP (operands[2], 0, i)) << (2 * i);
++
++  operands[2] = GEN_INT (val);
++  return "vshuf4i.\t%w0,%w1,%X2";
++}
++  [(set_attr "type" "simd_shf")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsrar_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VSRAR))]
++  "ISA_HAS_LSX"
++  "vsrar.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsrari_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand 2 "const__operand" "")]
++		     UNSPEC_LSX_VSRARI))]
++  "ISA_HAS_LSX"
++  "vsrari.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsrlr_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VSRLR))]
++  "ISA_HAS_LSX"
++  "vsrlr.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsrlri_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand 2 "const__operand" "")]
++		     UNSPEC_LSX_VSRLRI))]
++  "ISA_HAS_LSX"
++  "vsrlri.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssub_s_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(ss_minus:ILSX (match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vssub.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssub_u_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(us_minus:ILSX (match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vssub.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vreplve_"
++  [(set (match_operand:LSX 0 "register_operand" "=f")
++	(vec_duplicate:LSX
++	  (vec_select:
++	    (match_operand:LSX 1 "register_operand" "f")
++	    (parallel [(match_operand:SI 2 "register_operand" "r")]))))]
++  "ISA_HAS_LSX"
++  "vreplve.\t%w0,%w1,%z2"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vreplvei_"
++  [(set (match_operand:LSX 0 "register_operand" "=f")
++	(vec_duplicate:LSX
++	  (vec_select:
++	    (match_operand:LSX 1 "register_operand" "f")
++	    (parallel [(match_operand 2 "const__operand" "")]))))]
++  "ISA_HAS_LSX"
++  "vreplvei.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vreplvei__scalar"
++  [(set (match_operand:LSX 0 "register_operand" "=f")
++      (vec_duplicate:LSX
++	(match_operand: 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vreplvei.\t%w0,%w1,0"
++  [(set_attr "type" "simd_splat")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfcvt_h_s"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(unspec:V8HI [(match_operand:V4SF 1 "register_operand" "f")
++		      (match_operand:V4SF 2 "register_operand" "f")]
++		     UNSPEC_LSX_VFCVT))]
++  "ISA_HAS_LSX"
++  "vfcvt.h.s\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vfcvt_s_d"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(unspec:V4SF [(match_operand:V2DF 1 "register_operand" "f")
++		      (match_operand:V2DF 2 "register_operand" "f")]
++		     UNSPEC_LSX_VFCVT))]
++  "ISA_HAS_LSX"
++  "vfcvt.s.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "vec_pack_trunc_v2df"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(vec_concat:V4SF
++	  (float_truncate:V2SF (match_operand:V2DF 1 "register_operand" "f"))
++	  (float_truncate:V2SF (match_operand:V2DF 2 "register_operand" "f"))))]
++  "ISA_HAS_LSX"
++  "vfcvt.s.d\t%w0,%w2,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vfcvth_s_h"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(unspec:V4SF [(match_operand:V8HI 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFCVTH))]
++  "ISA_HAS_LSX"
++  "vfcvth.s.h\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vfcvth_d_s"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(float_extend:V2DF
++	(vec_select:V2SF
++	  (match_operand:V4SF 1 "register_operand" "f")
++	  (parallel [(const_int 2) (const_int 3)]))))]
++  "ISA_HAS_LSX"
++  "vfcvth.d.s\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vfcvtl_s_h"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(unspec:V4SF [(match_operand:V8HI 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFCVTL))]
++  "ISA_HAS_LSX"
++  "vfcvtl.s.h\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vfcvtl_d_s"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(float_extend:V2DF
++	(vec_select:V2SF
++	  (match_operand:V4SF 1 "register_operand" "f")
++	  (parallel [(const_int 0) (const_int 1)]))))]
++  "ISA_HAS_LSX"
++  "vfcvtl.d.s\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V2DF")])
++
++(define_code_attr lsxbr
++  [(eq "bz")
++   (ne "bnz")])
++
++(define_code_attr lsxeq_v
++  [(eq "eqz")
++   (ne "nez")])
++
++(define_code_attr lsxne_v
++  [(eq "nez")
++   (ne "eqz")])
++
++(define_code_attr lsxeq
++  [(eq "anyeqz")
++   (ne "allnez")])
++
++(define_code_attr lsxne
++  [(eq "allnez")
++   (ne "anyeqz")])
++
++(define_insn "lsx__"
++ [(set (pc) (if_then_else
++	      (equality_op
++		(unspec:SI [(match_operand:LSX 1 "register_operand" "f")]
++			    UNSPEC_LSX_BRANCH)
++		  (match_operand:SI 2 "const_0_operand"))
++		  (label_ref (match_operand 0))
++		  (pc)))
++      (clobber (match_scratch:FCC 3 "=z"))]
++ "ISA_HAS_LSX"
++{
++  return loongarch_output_conditional_branch (insn, operands,
++					 "vset.\t%Z3%w1\n\tbcnez\t%Z3%0",
++					 "vset.\t%Z3%w1\n\tbcnez\t%Z3%0");
++}
++ [(set_attr "type" "simd_branch")
++  (set_attr "mode" "")])
++
++(define_insn "lsx__v_"
++ [(set (pc) (if_then_else
++	      (equality_op
++		(unspec:SI [(match_operand:LSX 1 "register_operand" "f")]
++			    UNSPEC_LSX_BRANCH_V)
++		  (match_operand:SI 2 "const_0_operand"))
++		  (label_ref (match_operand 0))
++		  (pc)))
++      (clobber (match_scratch:FCC 3 "=z"))]
++ "ISA_HAS_LSX"
++{
++  return loongarch_output_conditional_branch (insn, operands,
++					 "vset.v\t%Z3%w1\n\tbcnez\t%Z3%0",
++					 "vset.v\t%Z3%w1\n\tbcnez\t%Z3%0");
++}
++ [(set_attr "type" "simd_branch")
++  (set_attr "mode" "TI")])
++
++;; vec_concate
++(define_expand "vec_concatv2di"
++  [(set (match_operand:V2DI 0 "register_operand")
++	(vec_concat:V2DI
++	  (match_operand:DI 1 "register_operand")
++	  (match_operand:DI 2 "register_operand")))]
++  "ISA_HAS_LSX"
++{
++  emit_insn (gen_lsx_vinsgr2vr_d (operands[0], operands[1],
++				  operands[0], GEN_INT (0)));
++  emit_insn (gen_lsx_vinsgr2vr_d (operands[0], operands[2],
++				  operands[0], GEN_INT (1)));
++  DONE;
++})
++
++
++(define_insn "vandn3"
++  [(set (match_operand:LSX 0 "register_operand" "=f")
++	(and:LSX (not:LSX (match_operand:LSX 1 "register_operand" "f"))
++		 (match_operand:LSX 2 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vandn.v\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "vabs2"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(abs:ILSX (match_operand:ILSX 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vsigncov.\t%w0,%w1,%w1"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "vneg2"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(neg:ILSX (match_operand:ILSX 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vneg.\t%w0,%w1"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vmuh_s_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VMUH_S))]
++  "ISA_HAS_LSX"
++  "vmuh.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vmuh_u_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VMUH_U))]
++  "ISA_HAS_LSX"
++  "vmuh.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vextw_s_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SI 1 "register_operand" "f")]
++		     UNSPEC_LSX_VEXTW_S))]
++  "ISA_HAS_LSX"
++  "vextw_s.d\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vextw_u_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SI 1 "register_operand" "f")]
++		     UNSPEC_LSX_VEXTW_U))]
++  "ISA_HAS_LSX"
++  "vextw_u.d\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vsllwil_s__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_WHB 1 "register_operand" "f")
++			  (match_operand 2 "const__operand" "")]
++			 UNSPEC_LSX_VSLLWIL_S))]
++  "ISA_HAS_LSX"
++  "vsllwil..\t%w0,%w1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsllwil_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_WHB 1 "register_operand" "f")
++			  (match_operand 2 "const__operand" "")]
++			 UNSPEC_LSX_VSLLWIL_U))]
++  "ISA_HAS_LSX"
++  "vsllwil..\t%w0,%w1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsran__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSRAN))]
++  "ISA_HAS_LSX"
++  "vsran..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssran_s__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSSRAN_S))]
++  "ISA_HAS_LSX"
++  "vssran..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssran_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSSRAN_U))]
++  "ISA_HAS_LSX"
++  "vssran..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsrain_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand 2 "const__operand" "")]
++			 UNSPEC_LSX_VSRAIN))]
++  "ISA_HAS_LSX"
++  "vsrain.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++;; FIXME: bitimm
++(define_insn "lsx_vsrains_s_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand 2 "const__operand" "")]
++			 UNSPEC_LSX_VSRAINS_S))]
++  "ISA_HAS_LSX"
++  "vsrains_s.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++;; FIXME: bitimm
++(define_insn "lsx_vsrains_u_"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand 2 "const__operand" "")]
++			 UNSPEC_LSX_VSRAINS_U))]
++  "ISA_HAS_LSX"
++  "vsrains_u.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsrarn__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSRARN))]
++  "ISA_HAS_LSX"
++  "vsrarn..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrarn_s__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSSRARN_S))]
++  "ISA_HAS_LSX"
++  "vssrarn..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrarn_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSSRARN_U))]
++  "ISA_HAS_LSX"
++  "vssrarn..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsrln__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSRLN))]
++  "ISA_HAS_LSX"
++  "vsrln..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrln_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSSRLN_U))]
++  "ISA_HAS_LSX"
++  "vssrln..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsrlrn__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSRLRN))]
++  "ISA_HAS_LSX"
++  "vsrlrn..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrlrn_u__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSSRLRN_U))]
++  "ISA_HAS_LSX"
++  "vssrlrn..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfrstpi_"
++  [(set (match_operand:ILSX_HB 0 "register_operand" "=f")
++	(unspec:ILSX_HB [(match_operand:ILSX_HB 1 "register_operand" "0")
++			 (match_operand:ILSX_HB 2 "register_operand" "f")
++			 (match_operand 3 "const_uimm5_operand" "")]
++			UNSPEC_LSX_VFRSTPI))]
++  "ISA_HAS_LSX"
++  "vfrstpi.\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vfrstp_"
++  [(set (match_operand:ILSX_HB 0 "register_operand" "=f")
++	(unspec:ILSX_HB [(match_operand:ILSX_HB 1 "register_operand" "0")
++			 (match_operand:ILSX_HB 2 "register_operand" "f")
++			 (match_operand:ILSX_HB 3 "register_operand" "f")]
++			UNSPEC_LSX_VFRSTP))]
++  "ISA_HAS_LSX"
++  "vfrstp.\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vshuf4i_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
++		      (match_operand:V2DI 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand")]
++		     UNSPEC_LSX_VSHUF4I))]
++  "ISA_HAS_LSX"
++  "vshuf4i.d\t%w0,%w2,%3"
++  [(set_attr "type" "simd_sld")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vbsrl_"
++  [(set (match_operand:LSX 0 "register_operand" "=f")
++	(unspec:LSX [(match_operand:LSX 1 "register_operand" "f")
++		     (match_operand 2 "const_uimm5_operand" "")]
++		    UNSPEC_LSX_VBSRL_V))]
++  "ISA_HAS_LSX"
++  "vbsrl.v\t%w0,%w1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vbsll_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand 2 "const_uimm5_operand" "")]
++		     UNSPEC_LSX_VBSLL_V))]
++  "ISA_HAS_LSX"
++  "vbsll.v\t%w0,%w1,%2"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vextrins_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VEXTRINS))]
++  "ISA_HAS_LSX"
++  "vextrins.\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vmskltz_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")]
++		     UNSPEC_LSX_VMSKLTZ))]
++  "ISA_HAS_LSX"
++  "vmskltz.\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsigncov_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VSIGNCOV))]
++  "ISA_HAS_LSX"
++  "vsigncov.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_expand "copysign3"
++  [(set (match_dup 4)
++	(and:FLSX
++	  (not:FLSX (match_dup 3))
++	  (match_operand:FLSX 1 "register_operand")))
++   (set (match_dup 5)
++	(and:FLSX (match_dup 3)
++		  (match_operand:FLSX 2 "register_operand")))
++   (set (match_operand:FLSX 0 "register_operand")
++	(ior:FLSX (match_dup 4) (match_dup 5)))]
++  "ISA_HAS_LSX"
++{
++  operands[3] = loongarch_build_signbit_mask (mode, 1, 0);
++
++  operands[4] = gen_reg_rtx (mode);
++  operands[5] = gen_reg_rtx (mode);
++})
++
++(define_insn "absv2df2"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(abs:V2DF (match_operand:V2DF 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vbitclri.d\t%w0,%w1,63"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "absv4sf2"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(abs:V4SF (match_operand:V4SF 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vbitclri.w\t%w0,%w1,31"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "vfmadd4"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(fma:FLSX (match_operand:FLSX 1 "register_operand" "f")
++		  (match_operand:FLSX 2 "register_operand" "f")
++		  (match_operand:FLSX 3 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vfmadd.\t%w0,%w1,$w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++(define_insn "fms4"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(fma:FLSX (match_operand:FLSX 1 "register_operand" "f")
++		  (match_operand:FLSX 2 "register_operand" "f")
++		  (neg:FLSX (match_operand:FLSX 3 "register_operand" "f"))))]
++  "ISA_HAS_LSX"
++  "vfmsub.\t%w0,%w1,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++(define_insn "vfnmsub4_nmsub4"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(neg:FLSX
++	  (fma:FLSX
++	    (match_operand:FLSX 1 "register_operand" "f")
++	    (match_operand:FLSX 2 "register_operand" "f")
++	    (neg:FLSX (match_operand:FLSX 3 "register_operand" "f")))))]
++  "ISA_HAS_LSX"
++  "vfnmsub.\t%w0,%w1,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++
++(define_insn "vfnmadd4_nmadd4"
++  [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(neg:FLSX
++	  (fma:FLSX
++	    (match_operand:FLSX 1 "register_operand" "f")
++	    (match_operand:FLSX 2 "register_operand" "f")
++	    (match_operand:FLSX 3 "register_operand" "f"))))]
++  "ISA_HAS_LSX"
++  "vfnmadd.\t%w0,%w1,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vftintrne_w_s"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRNE))]
++  "ISA_HAS_LSX"
++  "vftintrne.w.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vftintrne_l_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRNE))]
++  "ISA_HAS_LSX"
++  "vftintrne.l.d\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vftintrp_w_s"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRP))]
++  "ISA_HAS_LSX"
++  "vftintrp.w.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vftintrp_l_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRP))]
++  "ISA_HAS_LSX"
++  "vftintrp.l.d\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vftintrm_w_s"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(unspec:V4SI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRM))]
++  "ISA_HAS_LSX"
++  "vftintrm.w.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vftintrm_l_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRM))]
++  "ISA_HAS_LSX"
++  "vftintrm.l.d\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vftint_w_d"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f")
++		      (match_operand:V2DF 2 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINT_W_D))]
++  "ISA_HAS_LSX"
++  "vftint.w.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vffint_s_l"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(unspec:V4SF [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VFFINT_S_L))]
++  "ISA_HAS_LSX"
++  "vffint.s.l\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vftintrz_w_d"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f")
++		      (match_operand:V2DF 2 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRZ_W_D))]
++  "ISA_HAS_LSX"
++  "vftintrz.w.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vftintrp_w_d"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f")
++		      (match_operand:V2DF 2 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRP_W_D))]
++  "ISA_HAS_LSX"
++  "vftintrp.w.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vftintrm_w_d"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f")
++		      (match_operand:V2DF 2 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRM_W_D))]
++  "ISA_HAS_LSX"
++  "vftintrm.w.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vftintrne_w_d"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(unspec:V4SI [(match_operand:V2DF 1 "register_operand" "f")
++		      (match_operand:V2DF 2 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRNE_W_D))]
++  "ISA_HAS_LSX"
++  "vftintrne.w.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vftinth_l_s"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTH_L_H))]
++  "ISA_HAS_LSX"
++  "vftinth.l.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vftintl_l_s"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTL_L_S))]
++  "ISA_HAS_LSX"
++  "vftintl.l.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vffinth_d_w"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(unspec:V2DF [(match_operand:V4SI 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFFINTH_D_W))]
++  "ISA_HAS_LSX"
++  "vffinth.d.w\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vffintl_d_w"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(unspec:V2DF [(match_operand:V4SI 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFFINTL_D_W))]
++  "ISA_HAS_LSX"
++  "vffintl.d.w\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vftintrzh_l_s"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRZH_L_S))]
++  "ISA_HAS_LSX"
++  "vftintrzh.l.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vftintrzl_l_s"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRZL_L_S))]
++  "ISA_HAS_LSX"
++  "vftintrzl.l.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vftintrph_l_s"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRPH_L_S))]
++  "ISA_HAS_LSX"
++  "vftintrph.l.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vftintrpl_l_s"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRPL_L_S))]
++  "ISA_HAS_LSX"
++  "vftintrpl.l.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vftintrmh_l_s"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRMH_L_S))]
++  "ISA_HAS_LSX"
++  "vftintrmh.l.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vftintrml_l_s"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRML_L_S))]
++  "ISA_HAS_LSX"
++  "vftintrml.l.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vftintrneh_l_s"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRNEH_L_S))]
++  "ISA_HAS_LSX"
++  "vftintrneh.l.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vftintrnel_l_s"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFTINTRNEL_L_S))]
++  "ISA_HAS_LSX"
++  "vftintrnel.l.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vfrintrne_s"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFRINTRNE_S))]
++  "ISA_HAS_LSX"
++  "vfrintrne.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vfrintrne_d"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFRINTRNE_D))]
++  "ISA_HAS_LSX"
++  "vfrintrne.d\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vfrintrz_s"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFRINTRZ_S))]
++  "ISA_HAS_LSX"
++  "vfrintrz.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vfrintrz_d"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFRINTRZ_D))]
++  "ISA_HAS_LSX"
++  "vfrintrz.d\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vfrintrp_s"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFRINTRP_S))]
++  "ISA_HAS_LSX"
++  "vfrintrp.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vfrintrp_d"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFRINTRP_D))]
++  "ISA_HAS_LSX"
++  "vfrintrp.d\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V2DF")])
++
++(define_insn "lsx_vfrintrm_s"
++  [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFRINTRM_S))]
++  "ISA_HAS_LSX"
++  "vfrintrm.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "lsx_vfrintrm_d"
++  [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
++		     UNSPEC_LSX_VFRINTRM_D))]
++  "ISA_HAS_LSX"
++  "vfrintrm.d\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V2DF")])
++
++;; Vector versions of the floating-point frint patterns.
++;; Expands to btrunc, ceil, floor, rint.
++(define_insn "v4sf2"
++ [(set (match_operand:V4SF 0 "register_operand" "=f")
++	(unspec:V4SF [(match_operand:V4SF 1 "register_operand" "f")]
++			 FRINT_S))]
++  "ISA_HAS_LSX"
++  "vfrint.s\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V4SF")])
++
++(define_insn "v2df2"
++ [(set (match_operand:V2DF 0 "register_operand" "=f")
++	(unspec:V2DF [(match_operand:V2DF 1 "register_operand" "f")]
++			 FRINT_D))]
++  "ISA_HAS_LSX"
++  "vfrint.d\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "V2DF")])
++
++;; Expands to round.
++(define_insn "round2"
++ [(set (match_operand:FLSX 0 "register_operand" "=f")
++	(unspec:FLSX [(match_operand:FLSX 1 "register_operand" "f")]
++			 UNSPEC_LSX_VFRINT))]
++  "ISA_HAS_LSX"
++  "vfrint.\t%w0,%w1"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++;; Offset load and broadcast
++(define_expand "lsx_vldrepl_"
++  [(match_operand:LSX 0 "register_operand")
++   (match_operand 1 "pmode_register_operand")
++   (match_operand 2 "aq12_operand")]
++  "ISA_HAS_LSX"
++{
++  emit_insn (gen_lsx_vldrepl__insn
++	     (operands[0], operands[1], operands[2]));
++  DONE;
++})
++
++(define_insn "lsx_vldrepl__insn"
++  [(set (match_operand:LSX 0 "register_operand" "=f")
++	(vec_duplicate:LSX
++	  (mem: (plus:DI (match_operand:DI 1 "register_operand" "r")
++				   (match_operand 2 "aq12_operand")))))]
++  "ISA_HAS_LSX"
++{
++    return "vldrepl.\t%w0,%1,%2";
++}
++  [(set_attr "type" "simd_load")
++   (set_attr "mode" "")
++   (set_attr "length" "4")])
++
++(define_insn "lsx_vldrepl__insn_0"
++  [(set (match_operand:LSX 0 "register_operand" "=f")
++    (vec_duplicate:LSX
++      (mem: (match_operand:DI 1 "register_operand" "r"))))]
++  "ISA_HAS_LSX"
++{
++    return "vldrepl.\t%w0,%1,0";
++}
++  [(set_attr "type" "simd_load")
++   (set_attr "mode" "")
++   (set_attr "length" "4")])
++
++;; Offset store by sel
++(define_expand "lsx_vstelm_"
++  [(match_operand:LSX 0 "register_operand")
++   (match_operand 3 "const__operand")
++   (match_operand 2 "aq8_operand")
++   (match_operand 1 "pmode_register_operand")]
++  "ISA_HAS_LSX"
++{
++  emit_insn (gen_lsx_vstelm__insn
++	     (operands[1], operands[2], operands[0], operands[3]));
++  DONE;
++})
++
++(define_insn "lsx_vstelm__insn"
++  [(set (mem: (plus:DI (match_operand:DI 0 "register_operand" "r")
++				 (match_operand 1 "aq8_operand")))
++	(vec_select:
++	  (match_operand:LSX 2 "register_operand" "f")
++	  (parallel [(match_operand 3 "const__operand" "")])))]
++
++  "ISA_HAS_LSX"
++{
++  return "vstelm.\t%w2,%0,%1,%3";
++}
++  [(set_attr "type" "simd_store")
++   (set_attr "mode" "")
++   (set_attr "length" "4")])
++
++;; Offset is "0"
++(define_insn "lsx_vstelm__insn_0"
++  [(set (mem: (match_operand:DI 0 "register_operand" "r"))
++    (vec_select:
++      (match_operand:LSX 1 "register_operand" "f")
++      (parallel [(match_operand:SI 2 "const__operand")])))]
++  "ISA_HAS_LSX"
++{
++    return "vstelm.\t%w1,%0,0,%2";
++}
++  [(set_attr "type" "simd_store")
++   (set_attr "mode" "")
++   (set_attr "length" "4")])
++
++(define_expand "lsx_vld"
++  [(match_operand:V16QI 0 "register_operand")
++   (match_operand 1 "pmode_register_operand")
++   (match_operand 2 "aq12b_operand")]
++  "ISA_HAS_LSX"
++{
++  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
++			    INTVAL (operands[2]));
++  loongarch_emit_move (operands[0], gen_rtx_MEM (V16QImode, addr));
++  DONE;
++})
++
++(define_expand "lsx_vst"
++  [(match_operand:V16QI 0 "register_operand")
++   (match_operand 1 "pmode_register_operand")
++   (match_operand 2 "aq12b_operand")]
++  "ISA_HAS_LSX"
++{
++  rtx addr = plus_constant (GET_MODE (operands[1]), operands[1],
++			    INTVAL (operands[2]));
++  loongarch_emit_move (gen_rtx_MEM (V16QImode, addr), operands[0]);
++  DONE;
++})
++
++(define_insn "lsx_vssrln__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSSRLN))]
++  "ISA_HAS_LSX"
++  "vssrln..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++
++(define_insn "lsx_vssrlrn__"
++  [(set (match_operand: 0 "register_operand" "=f")
++	(unspec: [(match_operand:ILSX_DWH 1 "register_operand" "f")
++			  (match_operand:ILSX_DWH 2 "register_operand" "f")]
++			 UNSPEC_LSX_VSSRLRN))]
++  "ISA_HAS_LSX"
++  "vssrlrn..\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "vorn3"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(ior:ILSX (not:ILSX (match_operand:ILSX 2 "register_operand" "f"))
++		  (match_operand:ILSX 1 "register_operand" "f")))]
++  "ISA_HAS_LSX"
++  "vorn.v\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_logic")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vldi"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand 1 "const_imm13_operand")]
++		    UNSPEC_LSX_VLDI))]
++  "ISA_HAS_LSX"
++{
++  HOST_WIDE_INT val = INTVAL (operands[1]);
++  if (val < 0)
++  {
++    HOST_WIDE_INT modeVal = (val & 0xf00) >> 8;
++    if (modeVal < 13)
++      return  "vldi\t%w0,%1";
++    else
++      sorry ("imm13 only support 0000 ~ 1100 in bits 9 ~ 12 when bit '13' is 1");
++    return "#";
++  }
++  else
++    return "vldi\t%w0,%1";
++}
++  [(set_attr "type" "simd_load")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vshuf_b"
++  [(set (match_operand:V16QI 0 "register_operand" "=f")
++	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")
++		       (match_operand:V16QI 2 "register_operand" "f")
++		       (match_operand:V16QI 3 "register_operand" "f")]
++		      UNSPEC_LSX_VSHUF_B))]
++  "ISA_HAS_LSX"
++  "vshuf.b\t%w0,%w1,%w2,%w3"
++  [(set_attr "type" "simd_shf")
++   (set_attr "mode" "V16QI")])
++
++(define_insn "lsx_vldx"
++  [(set (match_operand:V16QI 0 "register_operand" "=f")
++	(unspec:V16QI [(match_operand:DI 1 "register_operand" "r")
++		       (match_operand:DI 2 "reg_or_0_operand" "rJ")]
++		      UNSPEC_LSX_VLDX))]
++  "ISA_HAS_LSX"
++{
++  return "vldx\t%w0,%1,%z2";
++}
++  [(set_attr "type" "simd_load")
++   (set_attr "mode" "V16QI")])
++
++(define_insn "lsx_vstx"
++  [(set (mem:V16QI (plus:DI (match_operand:DI 1 "register_operand" "r")
++			    (match_operand:DI 2 "reg_or_0_operand" "rJ")))
++	(unspec: V16QI [(match_operand:V16QI 0 "register_operand" "f")]
++		      UNSPEC_LSX_VSTX))]
++
++  "ISA_HAS_LSX"
++{
++  return "vstx\t%w0,%1,%z2";
++}
++  [(set_attr "type" "simd_store")
++   (set_attr "mode" "DI")])
++
++(define_insn "lsx_vextl_qu_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")]
++		     UNSPEC_LSX_VEXTL_QU_DU))]
++  "ISA_HAS_LSX"
++  "vextl.qu.du\t%w0,%w1"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vseteqz_v"
++  [(set (match_operand:FCC 0 "register_operand" "=z")
++	(eq:FCC
++	  (unspec:SI [(match_operand:V16QI 1 "register_operand" "f")]
++		     UNSPEC_LSX_VSETEQZ_V)
++	  (match_operand:SI 2 "const_0_operand")))]
++  "ISA_HAS_LSX"
++{
++  return "vseteqz.v\t%0,%1";
++}
++  [(set_attr "type" "simd_fcmp")
++   (set_attr "mode" "FCC")])
++
++;; Vector reduction operation
++(define_expand "reduc_plus_scal_v2di"
++  [(match_operand:DI 0 "register_operand")
++   (match_operand:V2DI 1 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  rtx tmp = gen_reg_rtx (V2DImode);
++  emit_insn (gen_lsx_vhaddw_q_d (tmp, operands[1], operands[1]));
++  emit_insn (gen_vec_extractv2didi (operands[0], tmp, const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_plus_scal_v4si"
++  [(match_operand:SI 0 "register_operand")
++   (match_operand:V4SI 1 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  rtx tmp = gen_reg_rtx (V2DImode);
++  rtx tmp1 = gen_reg_rtx (V2DImode);
++  emit_insn (gen_lsx_vhaddw_d_w (tmp, operands[1], operands[1]));
++  emit_insn (gen_lsx_vhaddw_q_d (tmp1, tmp, tmp));
++  emit_insn (gen_vec_extractv4sisi (operands[0], gen_lowpart (V4SImode,tmp1),
++				    const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_plus_scal_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:FLSX 1 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_add3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc__scal_"
++  [(any_bitwise:
++      (match_operand: 0 "register_operand")
++      (match_operand:ILSX 1 "register_operand"))]
++  "ISA_HAS_LSX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_smax_scal_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:LSX 1 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_smax3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_smin_scal_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:LSX 1 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_smin3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_umax_scal_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILSX 1 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_umax3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
++(define_expand "reduc_umin_scal_"
++  [(match_operand: 0 "register_operand")
++   (match_operand:ILSX 1 "register_operand")]
++  "ISA_HAS_LSX"
++{
++  rtx tmp = gen_reg_rtx (mode);
++  loongarch_expand_vector_reduc (gen_umin3, tmp, operands[1]);
++  emit_insn (gen_vec_extract (operands[0], tmp,
++					      const0_rtx));
++  DONE;
++})
++
++(define_insn "lsx_vwev_d_w"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(addsubmul:V2DI
++	  (any_extend:V2DI
++	    (vec_select:V2SI
++	      (match_operand:V4SI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)])))
++	  (any_extend:V2DI
++	    (vec_select:V2SI
++	      (match_operand:V4SI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)])))))]
++  "ISA_HAS_LSX"
++  "vwev.d.w\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vwev_w_h"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(addsubmul:V4SI
++	  (any_extend:V4SI
++	    (vec_select:V4HI
++	      (match_operand:V8HI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)])))
++	  (any_extend:V4SI
++	    (vec_select:V4HI
++	      (match_operand:V8HI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)])))))]
++  "ISA_HAS_LSX"
++  "vwev.w.h\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vwev_h_b"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(addsubmul:V8HI
++	  (any_extend:V8HI
++	    (vec_select:V8QI
++	      (match_operand:V16QI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)])))
++	  (any_extend:V8HI
++	    (vec_select:V8QI
++	      (match_operand:V16QI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)])))))]
++  "ISA_HAS_LSX"
++  "vwev.h.b\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vwod_d_w"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(addsubmul:V2DI
++	  (any_extend:V2DI
++	    (vec_select:V2SI
++	      (match_operand:V4SI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)])))
++	  (any_extend:V2DI
++	    (vec_select:V2SI
++	      (match_operand:V4SI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)])))))]
++  "ISA_HAS_LSX"
++  "vwod.d.w\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vwod_w_h"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(addsubmul:V4SI
++	  (any_extend:V4SI
++	    (vec_select:V4HI
++	      (match_operand:V8HI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)])))
++	  (any_extend:V4SI
++	    (vec_select:V4HI
++	      (match_operand:V8HI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)])))))]
++  "ISA_HAS_LSX"
++  "vwod.w.h\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vwod_h_b"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(addsubmul:V8HI
++	  (any_extend:V8HI
++	    (vec_select:V8QI
++	      (match_operand:V16QI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)])))
++	  (any_extend:V8HI
++	    (vec_select:V8QI
++	      (match_operand:V16QI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)])))))]
++  "ISA_HAS_LSX"
++  "vwod.h.b\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vwev_d_wu_w"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(addmul:V2DI
++	  (zero_extend:V2DI
++	    (vec_select:V2SI
++	      (match_operand:V4SI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)])))
++	  (sign_extend:V2DI
++	    (vec_select:V2SI
++	      (match_operand:V4SI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)])))))]
++  "ISA_HAS_LSX"
++  "vwev.d.wu.w\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vwev_w_hu_h"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(addmul:V4SI
++	  (zero_extend:V4SI
++	    (vec_select:V4HI
++	      (match_operand:V8HI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)])))
++	  (sign_extend:V4SI
++	    (vec_select:V4HI
++	      (match_operand:V8HI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)])))))]
++  "ISA_HAS_LSX"
++  "vwev.w.hu.h\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vwev_h_bu_b"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(addmul:V8HI
++	  (zero_extend:V8HI
++	    (vec_select:V8QI
++	      (match_operand:V16QI 1 "register_operand" "%f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)])))
++	  (sign_extend:V8HI
++	    (vec_select:V8QI
++	      (match_operand:V16QI 2 "register_operand" "f")
++	      (parallel [(const_int 0) (const_int 2)
++			 (const_int 4) (const_int 6)
++			 (const_int 8) (const_int 10)
++			 (const_int 12) (const_int 14)])))))]
++  "ISA_HAS_LSX"
++  "vwev.h.bu.b\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vwod_d_wu_w"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(addmul:V2DI
++	  (zero_extend:V2DI
++	    (vec_select:V2SI
++	      (match_operand:V4SI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)])))
++	  (sign_extend:V2DI
++	    (vec_select:V2SI
++	      (match_operand:V4SI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)])))))]
++  "ISA_HAS_LSX"
++  "vwod.d.wu.w\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vwod_w_hu_h"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(addmul:V4SI
++	  (zero_extend:V4SI
++	    (vec_select:V4HI
++	      (match_operand:V8HI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)])))
++	  (sign_extend:V4SI
++	    (vec_select:V4HI
++	      (match_operand:V8HI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)])))))]
++  "ISA_HAS_LSX"
++  "vwod.w.hu.h\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vwod_h_bu_b"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(addmul:V8HI
++	  (zero_extend:V8HI
++	    (vec_select:V8QI
++	      (match_operand:V16QI 1 "register_operand" "%f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)])))
++	  (sign_extend:V8HI
++	    (vec_select:V8QI
++	      (match_operand:V16QI 2 "register_operand" "f")
++	      (parallel [(const_int 1) (const_int 3)
++			 (const_int 5) (const_int 7)
++			 (const_int 9) (const_int 11)
++			 (const_int 13) (const_int 15)])))))]
++  "ISA_HAS_LSX"
++  "vwod.h.bu.b\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vaddwev_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VADDWEV))]
++  "ISA_HAS_LSX"
++  "vaddwev.q.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vaddwev_q_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VADDWEV2))]
++  "ISA_HAS_LSX"
++  "vaddwev.q.du\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vaddwod_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VADDWOD))]
++  "ISA_HAS_LSX"
++  "vaddwod.q.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vaddwod_q_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VADDWOD2))]
++  "ISA_HAS_LSX"
++  "vaddwod.q.du\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vsubwev_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VSUBWEV))]
++  "ISA_HAS_LSX"
++  "vsubwev.q.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vsubwev_q_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VSUBWEV2))]
++  "ISA_HAS_LSX"
++  "vsubwev.q.du\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vsubwod_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VSUBWOD))]
++  "ISA_HAS_LSX"
++  "vsubwod.q.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vsubwod_q_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VSUBWOD2))]
++  "ISA_HAS_LSX"
++  "vsubwod.q.du\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vaddwev_q_du_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VADDWEV3))]
++  "ISA_HAS_LSX"
++  "vaddwev.q.du.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vaddwod_q_du_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VADDWOD3))]
++  "ISA_HAS_LSX"
++  "vaddwod.q.du.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmulwev_q_du_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VMULWEV3))]
++  "ISA_HAS_LSX"
++  "vmulwev.q.du.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmulwod_q_du_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VMULWOD3))]
++  "ISA_HAS_LSX"
++  "vmulwod.q.du.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmulwev_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VMULWEV))]
++  "ISA_HAS_LSX"
++  "vmulwev.q.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmulwev_q_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VMULWEV2))]
++  "ISA_HAS_LSX"
++  "vmulwev.q.du\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmulwod_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VMULWOD))]
++  "ISA_HAS_LSX"
++  "vmulwod.q.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmulwod_q_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VMULWOD2))]
++  "ISA_HAS_LSX"
++  "vmulwod.q.du\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vhaddw_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VHADDW_Q_D))]
++  "ISA_HAS_LSX"
++  "vhaddw.q.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vhaddw_qu_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VHADDW_QU_DU))]
++  "ISA_HAS_LSX"
++  "vhaddw.qu.du\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vhsubw_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VHSUBW_Q_D))]
++  "ISA_HAS_LSX"
++  "vhsubw.q.d\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vhsubw_qu_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VHSUBW_QU_DU))]
++  "ISA_HAS_LSX"
++  "vhsubw.qu.du\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmaddwev_d_w"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(plus:V2DI
++	  (match_operand:V2DI 1 "register_operand" "0")
++	  (mult:V2DI
++	    (any_extend:V2DI
++	      (vec_select:V2SI
++		(match_operand:V4SI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)])))
++	    (any_extend:V2DI
++	      (vec_select:V2SI
++		(match_operand:V4SI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwev.d.w\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmaddwev_w_h"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(plus:V4SI
++	  (match_operand:V4SI 1 "register_operand" "0")
++	  (mult:V4SI
++	    (any_extend:V4SI
++	      (vec_select:V4HI
++		(match_operand:V8HI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)])))
++	    (any_extend:V4SI
++	      (vec_select:V4HI
++		(match_operand:V8HI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwev.w.h\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vmaddwev_h_b"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(plus:V8HI
++	  (match_operand:V8HI 1 "register_operand" "0")
++	  (mult:V8HI
++	    (any_extend:V8HI
++	      (vec_select:V8QI
++		(match_operand:V16QI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)])))
++	    (any_extend:V8HI
++	      (vec_select:V8QI
++		(match_operand:V16QI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwev.h.b\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vmaddwod_d_w"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(plus:V2DI
++	  (match_operand:V2DI 1 "register_operand" "0")
++	  (mult:V2DI
++	    (any_extend:V2DI
++	      (vec_select:V2SI
++		(match_operand:V4SI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)])))
++	    (any_extend:V2DI
++	      (vec_select:V2SI
++		(match_operand:V4SI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwod.d.w\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmaddwod_w_h"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(plus:V4SI
++	  (match_operand:V4SI 1 "register_operand" "0")
++	  (mult:V4SI
++	    (any_extend:V4SI
++	      (vec_select:V4HI
++		(match_operand:V8HI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)])))
++	    (any_extend:V4SI
++	      (vec_select:V4HI
++		(match_operand:V8HI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwod.w.h\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vmaddwod_h_b"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(plus:V8HI
++	  (match_operand:V8HI 1 "register_operand" "0")
++	  (mult:V8HI
++	    (any_extend:V8HI
++	      (vec_select:V8QI
++		(match_operand:V16QI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)])))
++	    (any_extend:V8HI
++	      (vec_select:V8QI
++		(match_operand:V16QI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwod.h.b\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vmaddwev_d_wu_w"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(plus:V2DI
++	  (match_operand:V2DI 1 "register_operand" "0")
++	  (mult:V2DI
++	    (zero_extend:V2DI
++	      (vec_select:V2SI
++		(match_operand:V4SI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)])))
++	    (sign_extend:V2DI
++	      (vec_select:V2SI
++		(match_operand:V4SI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwev.d.wu.w\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmaddwev_w_hu_h"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(plus:V4SI
++	  (match_operand:V4SI 1 "register_operand" "0")
++	  (mult:V4SI
++	    (zero_extend:V4SI
++	      (vec_select:V4HI
++		(match_operand:V8HI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)])))
++	    (sign_extend:V4SI
++	      (vec_select:V4HI
++		(match_operand:V8HI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwev.w.hu.h\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vmaddwev_h_bu_b"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(plus:V8HI
++	  (match_operand:V8HI 1 "register_operand" "0")
++	  (mult:V8HI
++	    (zero_extend:V8HI
++	      (vec_select:V8QI
++		(match_operand:V16QI 2 "register_operand" "%f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)])))
++	    (sign_extend:V8HI
++	      (vec_select:V8QI
++		(match_operand:V16QI 3 "register_operand" "f")
++		(parallel [(const_int 0) (const_int 2)
++			   (const_int 4) (const_int 6)
++			   (const_int 8) (const_int 10)
++			   (const_int 12) (const_int 14)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwev.h.bu.b\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vmaddwod_d_wu_w"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(plus:V2DI
++	  (match_operand:V2DI 1 "register_operand" "0")
++	  (mult:V2DI
++	    (zero_extend:V2DI
++	      (vec_select:V2SI
++		(match_operand:V4SI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)])))
++	    (sign_extend:V2DI
++	      (vec_select:V2SI
++		(match_operand:V4SI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwod.d.wu.w\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmaddwod_w_hu_h"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(plus:V4SI
++	  (match_operand:V4SI 1 "register_operand" "0")
++	  (mult:V4SI
++	    (zero_extend:V4SI
++	      (vec_select:V4HI
++		(match_operand:V8HI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)])))
++	    (sign_extend:V4SI
++	      (vec_select:V4HI
++		(match_operand:V8HI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwod.w.hu.h\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vmaddwod_h_bu_b"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(plus:V8HI
++	  (match_operand:V8HI 1 "register_operand" "0")
++	  (mult:V8HI
++	    (zero_extend:V8HI
++	      (vec_select:V8QI
++		(match_operand:V16QI 2 "register_operand" "%f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)])))
++	    (sign_extend:V8HI
++	      (vec_select:V8QI
++		(match_operand:V16QI 3 "register_operand" "f")
++		(parallel [(const_int 1) (const_int 3)
++			   (const_int 5) (const_int 7)
++			   (const_int 9) (const_int 11)
++			   (const_int 13) (const_int 15)]))))))]
++  "ISA_HAS_LSX"
++  "vmaddwod.h.bu.b\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_fmadd")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vmaddwev_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
++		      (match_operand:V2DI 2 "register_operand" "f")
++		      (match_operand:V2DI 3 "register_operand" "f")]
++		     UNSPEC_LSX_VMADDWEV))]
++  "ISA_HAS_LSX"
++  "vmaddwev.q.d\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmaddwod_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
++		      (match_operand:V2DI 2 "register_operand" "f")
++		      (match_operand:V2DI 3 "register_operand" "f")]
++		     UNSPEC_LSX_VMADDWOD))]
++  "ISA_HAS_LSX"
++  "vmaddwod.q.d\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmaddwev_q_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
++		      (match_operand:V2DI 2 "register_operand" "f")
++		      (match_operand:V2DI 3 "register_operand" "f")]
++		     UNSPEC_LSX_VMADDWEV2))]
++  "ISA_HAS_LSX"
++  "vmaddwev.q.du\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmaddwod_q_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
++		      (match_operand:V2DI 2 "register_operand" "f")
++		      (match_operand:V2DI 3 "register_operand" "f")]
++		     UNSPEC_LSX_VMADDWOD2))]
++  "ISA_HAS_LSX"
++  "vmaddwod.q.du\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmaddwev_q_du_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
++		      (match_operand:V2DI 2 "register_operand" "f")
++		      (match_operand:V2DI 3 "register_operand" "f")]
++		     UNSPEC_LSX_VMADDWEV3))]
++  "ISA_HAS_LSX"
++  "vmaddwev.q.du.d\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmaddwod_q_du_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
++		      (match_operand:V2DI 2 "register_operand" "f")
++		      (match_operand:V2DI 3 "register_operand" "f")]
++		     UNSPEC_LSX_VMADDWOD3))]
++  "ISA_HAS_LSX"
++  "vmaddwod.q.du.d\t%w0,%w2,%w3"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vrotr_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand:ILSX 2 "register_operand" "f")]
++		     UNSPEC_LSX_VROTR))]
++  "ISA_HAS_LSX"
++  "vrotr.\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vadd_q"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VADD_Q))]
++  "ISA_HAS_LSX"
++  "vadd.q\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vsub_q"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")
++		      (match_operand:V2DI 2 "register_operand" "f")]
++		     UNSPEC_LSX_VSUB_Q))]
++  "ISA_HAS_LSX"
++  "vsub.q\t%w0,%w1,%w2"
++  [(set_attr "type" "simd_int_arith")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vmskgez_b"
++  [(set (match_operand:V16QI 0 "register_operand" "=f")
++	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")]
++		      UNSPEC_LSX_VMSKGEZ))]
++  "ISA_HAS_LSX"
++  "vmskgez.b\t%w0,%w1"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "V16QI")])
++
++(define_insn "lsx_vmsknz_b"
++  [(set (match_operand:V16QI 0 "register_operand" "=f")
++	(unspec:V16QI [(match_operand:V16QI 1 "register_operand" "f")]
++		      UNSPEC_LSX_VMSKNZ))]
++  "ISA_HAS_LSX"
++  "vmsknz.b\t%w0,%w1"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "V16QI")])
++
++(define_insn "lsx_vexth_h_b"
++  [(set (match_operand:V8HI 0 "register_operand" "=f")
++	(any_extend:V8HI
++	  (vec_select:V8QI
++	    (match_operand:V16QI 1 "register_operand" "f")
++	    (parallel [(const_int 8) (const_int 9)
++		       (const_int 10) (const_int 11)
++		       (const_int 12) (const_int 13)
++		       (const_int 14) (const_int 15)]))))]
++  "ISA_HAS_LSX"
++  "vexth.h.b\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V8HI")])
++
++(define_insn "lsx_vexth_w_h"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(any_extend:V4SI
++	  (vec_select:V4HI
++	    (match_operand:V8HI 1 "register_operand" "f")
++	    (parallel [(const_int 4) (const_int 5)
++		       (const_int 6) (const_int 7)]))))]
++  "ISA_HAS_LSX"
++  "vexth.w.h\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V4SI")])
++
++(define_insn "lsx_vexth_d_w"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(any_extend:V2DI
++	  (vec_select:V2SI
++	    (match_operand:V4SI 1 "register_operand" "f")
++	    (parallel [(const_int 2) (const_int 3)]))))]
++  "ISA_HAS_LSX"
++  "vexth.d.w\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vexth_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")]
++		     UNSPEC_LSX_VEXTH_Q_D))]
++  "ISA_HAS_LSX"
++  "vexth.q.d\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vexth_qu_du"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")]
++		     UNSPEC_LSX_VEXTH_QU_DU))]
++  "ISA_HAS_LSX"
++  "vexth.qu.du\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vrotri_"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(rotatert:ILSX (match_operand:ILSX 1 "register_operand" "f")
++		      (match_operand 2 "const__operand" "")))]
++  "ISA_HAS_LSX"
++  "vrotri.\t%w0,%w1,%2"
++  [(set_attr "type" "simd_shf")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vextl_q_d"
++  [(set (match_operand:V2DI 0 "register_operand" "=f")
++	(unspec:V2DI [(match_operand:V2DI 1 "register_operand" "f")]
++		     UNSPEC_LSX_VEXTL_Q_D))]
++  "ISA_HAS_LSX"
++  "vextl.q.d\t%w0,%w1"
++  [(set_attr "type" "simd_fcvt")
++   (set_attr "mode" "V2DI")])
++
++(define_insn "lsx_vsrlni__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VSRLNI))]
++  "ISA_HAS_LSX"
++  "vsrlni..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsrlrni__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VSRLRNI))]
++  "ISA_HAS_LSX"
++  "vsrlrni..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrlni__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VSSRLNI))]
++  "ISA_HAS_LSX"
++  "vssrlni..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrlni__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VSSRLNI2))]
++  "ISA_HAS_LSX"
++  "vssrlni..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrlrni__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VSSRLRNI))]
++  "ISA_HAS_LSX"
++  "vssrlrni..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrlrni__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VSSRLRNI2))]
++  "ISA_HAS_LSX"
++  "vssrlrni..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsrani__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VSRANI))]
++  "ISA_HAS_LSX"
++  "vsrani..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vsrarni__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VSRARNI))]
++  "ISA_HAS_LSX"
++  "vsrarni..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrani__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		    UNSPEC_LSX_VSSRANI))]
++  "ISA_HAS_LSX"
++  "vssrani..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrani__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VSSRANI2))]
++  "ISA_HAS_LSX"
++  "vssrani..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrarni__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VSSRARNI))]
++  "ISA_HAS_LSX"
++  "vssrarni..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vssrarni__"
++  [(set (match_operand:ILSX 0 "register_operand" "=f")
++	(unspec:ILSX [(match_operand:ILSX 1 "register_operand" "0")
++		      (match_operand:ILSX 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VSSRARNI2))]
++  "ISA_HAS_LSX"
++  "vssrarni..\t%w0,%w2,%3"
++  [(set_attr "type" "simd_shift")
++   (set_attr "mode" "")])
++
++(define_insn "lsx_vpermi_w"
++  [(set (match_operand:V4SI 0 "register_operand" "=f")
++	(unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
++		      (match_operand:V4SI 2 "register_operand" "f")
++		      (match_operand 3 "const_uimm8_operand" "")]
++		     UNSPEC_LSX_VPERMI))]
++  "ISA_HAS_LSX"
++  "vpermi.w\t%w0,%w2,%3"
++  [(set_attr "type" "simd_bit")
++   (set_attr "mode" "V4SI")])
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 4966d5569..cf9361b73 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -87,10 +87,42 @@
+   (and (match_code "const_int")
+        (match_test "IN_RANGE (INTVAL (op), 1, 4)")))
+ 
++(define_predicate "const_lsx_branch_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), -1024, 1023)")))
++
++(define_predicate "const_uimm3_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
++
++(define_predicate "const_8_to_11_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 8, 11)")))
++
++(define_predicate "const_12_to_15_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 12, 15)")))
++
++(define_predicate "const_uimm4_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
++
+ (define_predicate "const_uimm5_operand"
+   (and (match_code "const_int")
+        (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
+ 
++(define_predicate "const_uimm6_operand"
++  (and (match_code "const_int")
++       (match_test "UIMM6_OPERAND (INTVAL (op))")))
++
++(define_predicate "const_uimm7_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 0, 127)")))
++
++(define_predicate "const_uimm8_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
++
+ (define_predicate "const_uimm14_operand"
+   (and (match_code "const_int")
+        (match_test "IN_RANGE (INTVAL (op), 0, 16383)")))
+@@ -99,10 +131,74 @@
+   (and (match_code "const_int")
+        (match_test "IN_RANGE (INTVAL (op), 0, 32767)")))
+ 
++(define_predicate "const_imm5_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), -16, 15)")))
++
++(define_predicate "const_imm10_operand"
++  (and (match_code "const_int")
++       (match_test "IMM10_OPERAND (INTVAL (op))")))
++
+ (define_predicate "const_imm12_operand"
+   (and (match_code "const_int")
+        (match_test "IMM12_OPERAND (INTVAL (op))")))
+ 
++(define_predicate "const_imm13_operand"
++  (and (match_code "const_int")
++       (match_test "IMM13_OPERAND (INTVAL (op))")))
++
++(define_predicate "reg_imm10_operand"
++  (ior (match_operand 0 "const_imm10_operand")
++       (match_operand 0 "register_operand")))
++
++(define_predicate "aq8b_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)")))
++
++(define_predicate "aq8h_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 1)")))
++
++(define_predicate "aq8w_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 2)")))
++
++(define_predicate "aq8d_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 3)")))
++
++(define_predicate "aq10b_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 0)")))
++
++(define_predicate "aq10h_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 1)")))
++
++(define_predicate "aq10w_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 2)")))
++
++(define_predicate "aq10d_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 3)")))
++
++(define_predicate "aq12b_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 12, 0)")))
++
++(define_predicate "aq12h_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 11, 1)")))
++
++(define_predicate "aq12w_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 10, 2)")))
++
++(define_predicate "aq12d_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 9, 3)")))
++
+ (define_predicate "sle_operand"
+   (and (match_code "const_int")
+        (match_test "IMM12_OPERAND (INTVAL (op) + 1)")))
+@@ -112,29 +208,206 @@
+        (match_test "INTVAL (op) + 1 != 0")))
+ 
+ (define_predicate "const_0_operand"
+-  (and (match_code "const_int,const_double,const_vector")
++  (and (match_code "const_int,const_wide_int,const_double,const_vector")
+        (match_test "op == CONST0_RTX (GET_MODE (op))")))
+ 
++(define_predicate "const_m1_operand"
++  (and (match_code "const_int,const_wide_int,const_double,const_vector")
++       (match_test "op == CONSTM1_RTX (GET_MODE (op))")))
++
++(define_predicate "reg_or_m1_operand"
++  (ior (match_operand 0 "const_m1_operand")
++       (match_operand 0 "register_operand")))
++
+ (define_predicate "reg_or_0_operand"
+   (ior (match_operand 0 "const_0_operand")
+        (match_operand 0 "register_operand")))
+ 
+ (define_predicate "const_1_operand"
+-  (and (match_code "const_int,const_double,const_vector")
++  (and (match_code "const_int,const_wide_int,const_double,const_vector")
+        (match_test "op == CONST1_RTX (GET_MODE (op))")))
+ 
+ (define_predicate "reg_or_1_operand"
+   (ior (match_operand 0 "const_1_operand")
+        (match_operand 0 "register_operand")))
+ 
++;; These are used in vec_merge, hence accept bitmask as const_int.
++(define_predicate "const_exp_2_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 1)")))
++
++(define_predicate "const_exp_4_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 3)")))
++
++(define_predicate "const_exp_8_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 7)")))
++
++(define_predicate "const_exp_16_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 15)")))
++
++(define_predicate "const_exp_32_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (exact_log2 (INTVAL (op)), 0, 31)")))
++
++;; This is used for indexing into vectors, and hence only accepts const_int.
++(define_predicate "const_0_or_1_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 0, 1)")))
++
++(define_predicate "const_0_to_3_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 0, 3)")))
++
++(define_predicate "const_0_to_7_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
++
++(define_predicate "const_2_or_3_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 2, 3)")))
++
++(define_predicate "const_4_to_7_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 4, 7)")))
++
++(define_predicate "const_8_to_15_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
++
++(define_predicate "const_16_to_31_operand"
++  (and (match_code "const_int")
++       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
++
++(define_predicate "qi_mask_operand"
++  (and (match_code "const_int")
++       (match_test "UINTVAL (op) == 0xff")))
++
++(define_predicate "hi_mask_operand"
++  (and (match_code "const_int")
++       (match_test "UINTVAL (op) == 0xffff")))
++
+ (define_predicate "lu52i_mask_operand"
+   (and (match_code "const_int")
+        (match_test "UINTVAL (op) == 0xfffffffffffff")))
+ 
++(define_predicate "si_mask_operand"
++  (and (match_code "const_int")
++       (match_test "UINTVAL (op) == 0xffffffff")))
++
+ (define_predicate "low_bitmask_operand"
+   (and (match_code "const_int")
+        (match_test "low_bitmask_len (mode, INTVAL (op)) > 12")))
+ 
++(define_predicate "d_operand"
++  (and (match_code "reg")
++       (match_test "GP_REG_P (REGNO (op))")))
++
++(define_predicate "db4_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 4, 0)")))
++
++(define_predicate "db7_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 7, 0)")))
++
++(define_predicate "db8_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_unsigned_immediate_p (INTVAL (op) + 1, 8, 0)")))
++
++(define_predicate "ib3_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_unsigned_immediate_p (INTVAL (op) - 1, 3, 0)")))
++
++(define_predicate "sb4_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 4, 0)")))
++
++(define_predicate "sb5_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 5, 0)")))
++
++(define_predicate "sb8_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 0)")))
++
++(define_predicate "sd8_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_signed_immediate_p (INTVAL (op), 8, 3)")))
++
++(define_predicate "ub4_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 0)")))
++
++(define_predicate "ub8_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 0)")))
++
++(define_predicate "uh4_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 1)")))
++
++(define_predicate "uw4_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 4, 2)")))
++
++(define_predicate "uw5_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 5, 2)")))
++
++(define_predicate "uw6_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 6, 2)")))
++
++(define_predicate "uw8_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_unsigned_immediate_p (INTVAL (op), 8, 2)")))
++
++(define_predicate "addiur2_operand"
++  (and (match_code "const_int")
++	(ior (match_test "INTVAL (op) == -1")
++	     (match_test "INTVAL (op) == 1")
++	     (match_test "INTVAL (op) == 4")
++	     (match_test "INTVAL (op) == 8")
++	     (match_test "INTVAL (op) == 12")
++	     (match_test "INTVAL (op) == 16")
++	     (match_test "INTVAL (op) == 20")
++	     (match_test "INTVAL (op) == 24"))))
++
++(define_predicate "addiusp_operand"
++  (and (match_code "const_int")
++       (ior (match_test "(IN_RANGE (INTVAL (op), 2, 257))")
++	    (match_test "(IN_RANGE (INTVAL (op), -258, -3))"))))
++
++(define_predicate "andi16_operand"
++  (and (match_code "const_int")
++	(ior (match_test "IN_RANGE (INTVAL (op), 1, 4)")
++	     (match_test "IN_RANGE (INTVAL (op), 7, 8)")
++	     (match_test "IN_RANGE (INTVAL (op), 15, 16)")
++	     (match_test "IN_RANGE (INTVAL (op), 31, 32)")
++	     (match_test "IN_RANGE (INTVAL (op), 63, 64)")
++	     (match_test "INTVAL (op) == 255")
++	     (match_test "INTVAL (op) == 32768")
++	     (match_test "INTVAL (op) == 65535"))))
++
++(define_predicate "movep_src_register"
++  (and (match_code "reg")
++       (ior (match_test ("IN_RANGE (REGNO (op), 2, 3)"))
++	    (match_test ("IN_RANGE (REGNO (op), 16, 20)")))))
++
++(define_predicate "movep_src_operand"
++  (ior (match_operand 0 "const_0_operand")
++       (match_operand 0 "movep_src_register")))
++
++(define_predicate "fcc_reload_operand"
++  (and (match_code "reg,subreg")
++       (match_test "FCC_REG_P (true_regnum (op))")))
++
++(define_predicate "muldiv_target_operand"
++		(match_operand 0 "register_operand"))
++
+ (define_predicate "const_call_insn_operand"
+   (match_code "const,symbol_ref,label_ref")
+ {
+@@ -303,3 +576,59 @@
+ (define_predicate "non_volatile_mem_operand"
+   (and (match_operand 0 "memory_operand")
+        (not (match_test "MEM_VOLATILE_P (op)"))))
++
++(define_predicate "const_vector_same_val_operand"
++  (match_code "const_vector")
++{
++  return loongarch_const_vector_same_val_p (op, mode);
++})
++
++(define_predicate "const_vector_same_simm5_operand"
++  (match_code "const_vector")
++{
++  return loongarch_const_vector_same_int_p (op, mode, -16, 15);
++})
++
++(define_predicate "const_vector_same_uimm5_operand"
++  (match_code "const_vector")
++{
++  return loongarch_const_vector_same_int_p (op, mode, 0, 31);
++})
++
++(define_predicate "const_vector_same_ximm5_operand"
++  (match_code "const_vector")
++{
++  return loongarch_const_vector_same_int_p (op, mode, -31, 31);
++})
++
++(define_predicate "const_vector_same_uimm6_operand"
++  (match_code "const_vector")
++{
++  return loongarch_const_vector_same_int_p (op, mode, 0, 63);
++})
++
++(define_predicate "par_const_vector_shf_set_operand"
++  (match_code "parallel")
++{
++  return loongarch_const_vector_shuffle_set_p (op, mode);
++})
++
++(define_predicate "reg_or_vector_same_val_operand"
++  (ior (match_operand 0 "register_operand")
++       (match_operand 0 "const_vector_same_val_operand")))
++
++(define_predicate "reg_or_vector_same_simm5_operand"
++  (ior (match_operand 0 "register_operand")
++       (match_operand 0 "const_vector_same_simm5_operand")))
++
++(define_predicate "reg_or_vector_same_uimm5_operand"
++  (ior (match_operand 0 "register_operand")
++       (match_operand 0 "const_vector_same_uimm5_operand")))
++
++(define_predicate "reg_or_vector_same_ximm5_operand"
++  (ior (match_operand 0 "register_operand")
++       (match_operand 0 "const_vector_same_ximm5_operand")))
++
++(define_predicate "reg_or_vector_same_uimm6_operand"
++  (ior (match_operand 0 "register_operand")
++       (match_operand 0 "const_vector_same_uimm6_operand")))
+diff --git a/gcc/doc/md.texi b/gcc/doc/md.texi
+index 3b544358b..b58da0787 100644
+--- a/gcc/doc/md.texi
++++ b/gcc/doc/md.texi
+@@ -2764,6 +2764,17 @@ as @code{st.w} and @code{ld.w}.
+ A signed 12-bit constant (for arithmetic instructions).
+ @item K
+ An unsigned 12-bit constant (for logic instructions).
++@item M
++A constant that cannot be loaded using @code{lui}, @code{addiu}
++or @code{ori}.
++@item N
++A constant in the range -65535 to -1 (inclusive).
++@item O
++A signed 15-bit constant.
++@item P
++A constant in the range 1 to 65535 (inclusive).
++@item R
++An address that can be used in a non-macro load or store.
+ @item ZB
+ An address that is held in a general-purpose register.
+ The offset is zero.
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-Loongson-SX-directive-builtin-function.patch b/LoongArch-Add-Loongson-SX-directive-builtin-function.patch
new file mode 100644
index 0000000000000000000000000000000000000000..43b5ad219d8b88a2c739c92f52eb5b2119533afe
--- /dev/null
+++ b/LoongArch-Add-Loongson-SX-directive-builtin-function.patch
@@ -0,0 +1,7549 @@
+From aafa5ab8c53dd2919d417b2f47e0c0e63ca7e10d Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 16 Mar 2023 16:31:04 +0800
+Subject: [PATCH 064/124] LoongArch: Add Loongson SX directive builtin function
+ support.
+
+gcc/ChangeLog:
+
+	* config.gcc: Export the header file lsxintrin.h.
+	* config/loongarch/loongarch-builtins.cc (LARCH_FTYPE_NAME4): Add builtin function support.
+	(enum loongarch_builtin_type): Ditto.
+	(AVAIL_ALL): Ditto.
+	(LARCH_BUILTIN): Ditto.
+	(LSX_BUILTIN): Ditto.
+	(LSX_BUILTIN_TEST_BRANCH): Ditto.
+	(LSX_NO_TARGET_BUILTIN): Ditto.
+	(CODE_FOR_lsx_vsadd_b): Ditto.
+	(CODE_FOR_lsx_vsadd_h): Ditto.
+	(CODE_FOR_lsx_vsadd_w): Ditto.
+	(CODE_FOR_lsx_vsadd_d): Ditto.
+	(CODE_FOR_lsx_vsadd_bu): Ditto.
+	(CODE_FOR_lsx_vsadd_hu): Ditto.
+	(CODE_FOR_lsx_vsadd_wu): Ditto.
+	(CODE_FOR_lsx_vsadd_du): Ditto.
+	(CODE_FOR_lsx_vadd_b): Ditto.
+	(CODE_FOR_lsx_vadd_h): Ditto.
+	(CODE_FOR_lsx_vadd_w): Ditto.
+	(CODE_FOR_lsx_vadd_d): Ditto.
+	(CODE_FOR_lsx_vaddi_bu): Ditto.
+	(CODE_FOR_lsx_vaddi_hu): Ditto.
+	(CODE_FOR_lsx_vaddi_wu): Ditto.
+	(CODE_FOR_lsx_vaddi_du): Ditto.
+	(CODE_FOR_lsx_vand_v): Ditto.
+	(CODE_FOR_lsx_vandi_b): Ditto.
+	(CODE_FOR_lsx_bnz_v): Ditto.
+	(CODE_FOR_lsx_bz_v): Ditto.
+	(CODE_FOR_lsx_vbitsel_v): Ditto.
+	(CODE_FOR_lsx_vseqi_b): Ditto.
+	(CODE_FOR_lsx_vseqi_h): Ditto.
+	(CODE_FOR_lsx_vseqi_w): Ditto.
+	(CODE_FOR_lsx_vseqi_d): Ditto.
+	(CODE_FOR_lsx_vslti_b): Ditto.
+	(CODE_FOR_lsx_vslti_h): Ditto.
+	(CODE_FOR_lsx_vslti_w): Ditto.
+	(CODE_FOR_lsx_vslti_d): Ditto.
+	(CODE_FOR_lsx_vslti_bu): Ditto.
+	(CODE_FOR_lsx_vslti_hu): Ditto.
+	(CODE_FOR_lsx_vslti_wu): Ditto.
+	(CODE_FOR_lsx_vslti_du): Ditto.
+	(CODE_FOR_lsx_vslei_b): Ditto.
+	(CODE_FOR_lsx_vslei_h): Ditto.
+	(CODE_FOR_lsx_vslei_w): Ditto.
+	(CODE_FOR_lsx_vslei_d): Ditto.
+	(CODE_FOR_lsx_vslei_bu): Ditto.
+	(CODE_FOR_lsx_vslei_hu): Ditto.
+	(CODE_FOR_lsx_vslei_wu): Ditto.
+	(CODE_FOR_lsx_vslei_du): Ditto.
+	(CODE_FOR_lsx_vdiv_b): Ditto.
+	(CODE_FOR_lsx_vdiv_h): Ditto.
+	(CODE_FOR_lsx_vdiv_w): Ditto.
+	(CODE_FOR_lsx_vdiv_d): Ditto.
+	(CODE_FOR_lsx_vdiv_bu): Ditto.
+	(CODE_FOR_lsx_vdiv_hu): Ditto.
+	(CODE_FOR_lsx_vdiv_wu): Ditto.
+	(CODE_FOR_lsx_vdiv_du): Ditto.
+	(CODE_FOR_lsx_vfadd_s): Ditto.
+	(CODE_FOR_lsx_vfadd_d): Ditto.
+	(CODE_FOR_lsx_vftintrz_w_s): Ditto.
+	(CODE_FOR_lsx_vftintrz_l_d): Ditto.
+	(CODE_FOR_lsx_vftintrz_wu_s): Ditto.
+	(CODE_FOR_lsx_vftintrz_lu_d): Ditto.
+	(CODE_FOR_lsx_vffint_s_w): Ditto.
+	(CODE_FOR_lsx_vffint_d_l): Ditto.
+	(CODE_FOR_lsx_vffint_s_wu): Ditto.
+	(CODE_FOR_lsx_vffint_d_lu): Ditto.
+	(CODE_FOR_lsx_vfsub_s): Ditto.
+	(CODE_FOR_lsx_vfsub_d): Ditto.
+	(CODE_FOR_lsx_vfmul_s): Ditto.
+	(CODE_FOR_lsx_vfmul_d): Ditto.
+	(CODE_FOR_lsx_vfdiv_s): Ditto.
+	(CODE_FOR_lsx_vfdiv_d): Ditto.
+	(CODE_FOR_lsx_vfmax_s): Ditto.
+	(CODE_FOR_lsx_vfmax_d): Ditto.
+	(CODE_FOR_lsx_vfmin_s): Ditto.
+	(CODE_FOR_lsx_vfmin_d): Ditto.
+	(CODE_FOR_lsx_vfsqrt_s): Ditto.
+	(CODE_FOR_lsx_vfsqrt_d): Ditto.
+	(CODE_FOR_lsx_vflogb_s): Ditto.
+	(CODE_FOR_lsx_vflogb_d): Ditto.
+	(CODE_FOR_lsx_vmax_b): Ditto.
+	(CODE_FOR_lsx_vmax_h): Ditto.
+	(CODE_FOR_lsx_vmax_w): Ditto.
+	(CODE_FOR_lsx_vmax_d): Ditto.
+	(CODE_FOR_lsx_vmaxi_b): Ditto.
+	(CODE_FOR_lsx_vmaxi_h): Ditto.
+	(CODE_FOR_lsx_vmaxi_w): Ditto.
+	(CODE_FOR_lsx_vmaxi_d): Ditto.
+	(CODE_FOR_lsx_vmax_bu): Ditto.
+	(CODE_FOR_lsx_vmax_hu): Ditto.
+	(CODE_FOR_lsx_vmax_wu): Ditto.
+	(CODE_FOR_lsx_vmax_du): Ditto.
+	(CODE_FOR_lsx_vmaxi_bu): Ditto.
+	(CODE_FOR_lsx_vmaxi_hu): Ditto.
+	(CODE_FOR_lsx_vmaxi_wu): Ditto.
+	(CODE_FOR_lsx_vmaxi_du): Ditto.
+	(CODE_FOR_lsx_vmin_b): Ditto.
+	(CODE_FOR_lsx_vmin_h): Ditto.
+	(CODE_FOR_lsx_vmin_w): Ditto.
+	(CODE_FOR_lsx_vmin_d): Ditto.
+	(CODE_FOR_lsx_vmini_b): Ditto.
+	(CODE_FOR_lsx_vmini_h): Ditto.
+	(CODE_FOR_lsx_vmini_w): Ditto.
+	(CODE_FOR_lsx_vmini_d): Ditto.
+	(CODE_FOR_lsx_vmin_bu): Ditto.
+	(CODE_FOR_lsx_vmin_hu): Ditto.
+	(CODE_FOR_lsx_vmin_wu): Ditto.
+	(CODE_FOR_lsx_vmin_du): Ditto.
+	(CODE_FOR_lsx_vmini_bu): Ditto.
+	(CODE_FOR_lsx_vmini_hu): Ditto.
+	(CODE_FOR_lsx_vmini_wu): Ditto.
+	(CODE_FOR_lsx_vmini_du): Ditto.
+	(CODE_FOR_lsx_vmod_b): Ditto.
+	(CODE_FOR_lsx_vmod_h): Ditto.
+	(CODE_FOR_lsx_vmod_w): Ditto.
+	(CODE_FOR_lsx_vmod_d): Ditto.
+	(CODE_FOR_lsx_vmod_bu): Ditto.
+	(CODE_FOR_lsx_vmod_hu): Ditto.
+	(CODE_FOR_lsx_vmod_wu): Ditto.
+	(CODE_FOR_lsx_vmod_du): Ditto.
+	(CODE_FOR_lsx_vmul_b): Ditto.
+	(CODE_FOR_lsx_vmul_h): Ditto.
+	(CODE_FOR_lsx_vmul_w): Ditto.
+	(CODE_FOR_lsx_vmul_d): Ditto.
+	(CODE_FOR_lsx_vclz_b): Ditto.
+	(CODE_FOR_lsx_vclz_h): Ditto.
+	(CODE_FOR_lsx_vclz_w): Ditto.
+	(CODE_FOR_lsx_vclz_d): Ditto.
+	(CODE_FOR_lsx_vnor_v): Ditto.
+	(CODE_FOR_lsx_vor_v): Ditto.
+	(CODE_FOR_lsx_vori_b): Ditto.
+	(CODE_FOR_lsx_vnori_b): Ditto.
+	(CODE_FOR_lsx_vpcnt_b): Ditto.
+	(CODE_FOR_lsx_vpcnt_h): Ditto.
+	(CODE_FOR_lsx_vpcnt_w): Ditto.
+	(CODE_FOR_lsx_vpcnt_d): Ditto.
+	(CODE_FOR_lsx_vxor_v): Ditto.
+	(CODE_FOR_lsx_vxori_b): Ditto.
+	(CODE_FOR_lsx_vsll_b): Ditto.
+	(CODE_FOR_lsx_vsll_h): Ditto.
+	(CODE_FOR_lsx_vsll_w): Ditto.
+	(CODE_FOR_lsx_vsll_d): Ditto.
+	(CODE_FOR_lsx_vslli_b): Ditto.
+	(CODE_FOR_lsx_vslli_h): Ditto.
+	(CODE_FOR_lsx_vslli_w): Ditto.
+	(CODE_FOR_lsx_vslli_d): Ditto.
+	(CODE_FOR_lsx_vsra_b): Ditto.
+	(CODE_FOR_lsx_vsra_h): Ditto.
+	(CODE_FOR_lsx_vsra_w): Ditto.
+	(CODE_FOR_lsx_vsra_d): Ditto.
+	(CODE_FOR_lsx_vsrai_b): Ditto.
+	(CODE_FOR_lsx_vsrai_h): Ditto.
+	(CODE_FOR_lsx_vsrai_w): Ditto.
+	(CODE_FOR_lsx_vsrai_d): Ditto.
+	(CODE_FOR_lsx_vsrl_b): Ditto.
+	(CODE_FOR_lsx_vsrl_h): Ditto.
+	(CODE_FOR_lsx_vsrl_w): Ditto.
+	(CODE_FOR_lsx_vsrl_d): Ditto.
+	(CODE_FOR_lsx_vsrli_b): Ditto.
+	(CODE_FOR_lsx_vsrli_h): Ditto.
+	(CODE_FOR_lsx_vsrli_w): Ditto.
+	(CODE_FOR_lsx_vsrli_d): Ditto.
+	(CODE_FOR_lsx_vsub_b): Ditto.
+	(CODE_FOR_lsx_vsub_h): Ditto.
+	(CODE_FOR_lsx_vsub_w): Ditto.
+	(CODE_FOR_lsx_vsub_d): Ditto.
+	(CODE_FOR_lsx_vsubi_bu): Ditto.
+	(CODE_FOR_lsx_vsubi_hu): Ditto.
+	(CODE_FOR_lsx_vsubi_wu): Ditto.
+	(CODE_FOR_lsx_vsubi_du): Ditto.
+	(CODE_FOR_lsx_vpackod_d): Ditto.
+	(CODE_FOR_lsx_vpackev_d): Ditto.
+	(CODE_FOR_lsx_vpickod_d): Ditto.
+	(CODE_FOR_lsx_vpickev_d): Ditto.
+	(CODE_FOR_lsx_vrepli_b): Ditto.
+	(CODE_FOR_lsx_vrepli_h): Ditto.
+	(CODE_FOR_lsx_vrepli_w): Ditto.
+	(CODE_FOR_lsx_vrepli_d): Ditto.
+	(CODE_FOR_lsx_vsat_b): Ditto.
+	(CODE_FOR_lsx_vsat_h): Ditto.
+	(CODE_FOR_lsx_vsat_w): Ditto.
+	(CODE_FOR_lsx_vsat_d): Ditto.
+	(CODE_FOR_lsx_vsat_bu): Ditto.
+	(CODE_FOR_lsx_vsat_hu): Ditto.
+	(CODE_FOR_lsx_vsat_wu): Ditto.
+	(CODE_FOR_lsx_vsat_du): Ditto.
+	(CODE_FOR_lsx_vavg_b): Ditto.
+	(CODE_FOR_lsx_vavg_h): Ditto.
+	(CODE_FOR_lsx_vavg_w): Ditto.
+	(CODE_FOR_lsx_vavg_d): Ditto.
+	(CODE_FOR_lsx_vavg_bu): Ditto.
+	(CODE_FOR_lsx_vavg_hu): Ditto.
+	(CODE_FOR_lsx_vavg_wu): Ditto.
+	(CODE_FOR_lsx_vavg_du): Ditto.
+	(CODE_FOR_lsx_vavgr_b): Ditto.
+	(CODE_FOR_lsx_vavgr_h): Ditto.
+	(CODE_FOR_lsx_vavgr_w): Ditto.
+	(CODE_FOR_lsx_vavgr_d): Ditto.
+	(CODE_FOR_lsx_vavgr_bu): Ditto.
+	(CODE_FOR_lsx_vavgr_hu): Ditto.
+	(CODE_FOR_lsx_vavgr_wu): Ditto.
+	(CODE_FOR_lsx_vavgr_du): Ditto.
+	(CODE_FOR_lsx_vssub_b): Ditto.
+	(CODE_FOR_lsx_vssub_h): Ditto.
+	(CODE_FOR_lsx_vssub_w): Ditto.
+	(CODE_FOR_lsx_vssub_d): Ditto.
+	(CODE_FOR_lsx_vssub_bu): Ditto.
+	(CODE_FOR_lsx_vssub_hu): Ditto.
+	(CODE_FOR_lsx_vssub_wu): Ditto.
+	(CODE_FOR_lsx_vssub_du): Ditto.
+	(CODE_FOR_lsx_vabsd_b): Ditto.
+	(CODE_FOR_lsx_vabsd_h): Ditto.
+	(CODE_FOR_lsx_vabsd_w): Ditto.
+	(CODE_FOR_lsx_vabsd_d): Ditto.
+	(CODE_FOR_lsx_vabsd_bu): Ditto.
+	(CODE_FOR_lsx_vabsd_hu): Ditto.
+	(CODE_FOR_lsx_vabsd_wu): Ditto.
+	(CODE_FOR_lsx_vabsd_du): Ditto.
+	(CODE_FOR_lsx_vftint_w_s): Ditto.
+	(CODE_FOR_lsx_vftint_l_d): Ditto.
+	(CODE_FOR_lsx_vftint_wu_s): Ditto.
+	(CODE_FOR_lsx_vftint_lu_d): Ditto.
+	(CODE_FOR_lsx_vandn_v): Ditto.
+	(CODE_FOR_lsx_vorn_v): Ditto.
+	(CODE_FOR_lsx_vneg_b): Ditto.
+	(CODE_FOR_lsx_vneg_h): Ditto.
+	(CODE_FOR_lsx_vneg_w): Ditto.
+	(CODE_FOR_lsx_vneg_d): Ditto.
+	(CODE_FOR_lsx_vshuf4i_d): Ditto.
+	(CODE_FOR_lsx_vbsrl_v): Ditto.
+	(CODE_FOR_lsx_vbsll_v): Ditto.
+	(CODE_FOR_lsx_vfmadd_s): Ditto.
+	(CODE_FOR_lsx_vfmadd_d): Ditto.
+	(CODE_FOR_lsx_vfmsub_s): Ditto.
+	(CODE_FOR_lsx_vfmsub_d): Ditto.
+	(CODE_FOR_lsx_vfnmadd_s): Ditto.
+	(CODE_FOR_lsx_vfnmadd_d): Ditto.
+	(CODE_FOR_lsx_vfnmsub_s): Ditto.
+	(CODE_FOR_lsx_vfnmsub_d): Ditto.
+	(CODE_FOR_lsx_vmuh_b): Ditto.
+	(CODE_FOR_lsx_vmuh_h): Ditto.
+	(CODE_FOR_lsx_vmuh_w): Ditto.
+	(CODE_FOR_lsx_vmuh_d): Ditto.
+	(CODE_FOR_lsx_vmuh_bu): Ditto.
+	(CODE_FOR_lsx_vmuh_hu): Ditto.
+	(CODE_FOR_lsx_vmuh_wu): Ditto.
+	(CODE_FOR_lsx_vmuh_du): Ditto.
+	(CODE_FOR_lsx_vsllwil_h_b): Ditto.
+	(CODE_FOR_lsx_vsllwil_w_h): Ditto.
+	(CODE_FOR_lsx_vsllwil_d_w): Ditto.
+	(CODE_FOR_lsx_vsllwil_hu_bu): Ditto.
+	(CODE_FOR_lsx_vsllwil_wu_hu): Ditto.
+	(CODE_FOR_lsx_vsllwil_du_wu): Ditto.
+	(CODE_FOR_lsx_vssran_b_h): Ditto.
+	(CODE_FOR_lsx_vssran_h_w): Ditto.
+	(CODE_FOR_lsx_vssran_w_d): Ditto.
+	(CODE_FOR_lsx_vssran_bu_h): Ditto.
+	(CODE_FOR_lsx_vssran_hu_w): Ditto.
+	(CODE_FOR_lsx_vssran_wu_d): Ditto.
+	(CODE_FOR_lsx_vssrarn_b_h): Ditto.
+	(CODE_FOR_lsx_vssrarn_h_w): Ditto.
+	(CODE_FOR_lsx_vssrarn_w_d): Ditto.
+	(CODE_FOR_lsx_vssrarn_bu_h): Ditto.
+	(CODE_FOR_lsx_vssrarn_hu_w): Ditto.
+	(CODE_FOR_lsx_vssrarn_wu_d): Ditto.
+	(CODE_FOR_lsx_vssrln_bu_h): Ditto.
+	(CODE_FOR_lsx_vssrln_hu_w): Ditto.
+	(CODE_FOR_lsx_vssrln_wu_d): Ditto.
+	(CODE_FOR_lsx_vssrlrn_bu_h): Ditto.
+	(CODE_FOR_lsx_vssrlrn_hu_w): Ditto.
+	(CODE_FOR_lsx_vssrlrn_wu_d): Ditto.
+	(loongarch_builtin_vector_type): Ditto.
+	(loongarch_build_cvpointer_type): Ditto.
+	(LARCH_ATYPE_CVPOINTER): Ditto.
+	(LARCH_ATYPE_BOOLEAN): Ditto.
+	(LARCH_ATYPE_V2SF): Ditto.
+	(LARCH_ATYPE_V2HI): Ditto.
+	(LARCH_ATYPE_V2SI): Ditto.
+	(LARCH_ATYPE_V4QI): Ditto.
+	(LARCH_ATYPE_V4HI): Ditto.
+	(LARCH_ATYPE_V8QI): Ditto.
+	(LARCH_ATYPE_V2DI): Ditto.
+	(LARCH_ATYPE_V4SI): Ditto.
+	(LARCH_ATYPE_V8HI): Ditto.
+	(LARCH_ATYPE_V16QI): Ditto.
+	(LARCH_ATYPE_V2DF): Ditto.
+	(LARCH_ATYPE_V4SF): Ditto.
+	(LARCH_ATYPE_V4DI): Ditto.
+	(LARCH_ATYPE_V8SI): Ditto.
+	(LARCH_ATYPE_V16HI): Ditto.
+	(LARCH_ATYPE_V32QI): Ditto.
+	(LARCH_ATYPE_V4DF): Ditto.
+	(LARCH_ATYPE_V8SF): Ditto.
+	(LARCH_ATYPE_UV2DI): Ditto.
+	(LARCH_ATYPE_UV4SI): Ditto.
+	(LARCH_ATYPE_UV8HI): Ditto.
+	(LARCH_ATYPE_UV16QI): Ditto.
+	(LARCH_ATYPE_UV4DI): Ditto.
+	(LARCH_ATYPE_UV8SI): Ditto.
+	(LARCH_ATYPE_UV16HI): Ditto.
+	(LARCH_ATYPE_UV32QI): Ditto.
+	(LARCH_ATYPE_UV2SI): Ditto.
+	(LARCH_ATYPE_UV4HI): Ditto.
+	(LARCH_ATYPE_UV8QI): Ditto.
+	(loongarch_builtin_vectorized_function): Ditto.
+	(LARCH_GET_BUILTIN): Ditto.
+	(loongarch_expand_builtin_insn): Ditto.
+	(loongarch_expand_builtin_lsx_test_branch): Ditto.
+	(loongarch_expand_builtin): Ditto.
+	* config/loongarch/loongarch-ftypes.def (1): Ditto.
+	(2): Ditto.
+	(3): Ditto.
+	(4): Ditto.
+	* config/loongarch/lsxintrin.h: New file.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config.gcc                             |    2 +-
+ gcc/config/loongarch/loongarch-builtins.cc | 1498 +++++-
+ gcc/config/loongarch/loongarch-ftypes.def  |  395 +-
+ gcc/config/loongarch/lsxintrin.h           | 5181 ++++++++++++++++++++
+ 4 files changed, 7070 insertions(+), 6 deletions(-)
+ create mode 100644 gcc/config/loongarch/lsxintrin.h
+
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 61d81d8d8..4e149e0ef 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -456,7 +456,7 @@ mips*-*-*)
+ 	;;
+ loongarch*-*-*)
+ 	cpu_type=loongarch
+-	extra_headers="larchintrin.h"
++	extra_headers="larchintrin.h lsxintrin.h"
+ 	extra_objs="loongarch-c.o loongarch-builtins.o loongarch-cpu.o loongarch-opts.o loongarch-def.o"
+ 	extra_gcc_objs="loongarch-driver.o loongarch-cpu.o loongarch-opts.o loongarch-def.o"
+ 	extra_options="${extra_options} g.opt fused-madd.opt"
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index c8548a07f..de6428ac6 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -34,14 +34,18 @@ along with GCC; see the file COPYING3.  If not see
+ #include "recog.h"
+ #include "diagnostic.h"
+ #include "fold-const.h"
++#include "explow.h"
+ #include "expr.h"
+ #include "langhooks.h"
+ #include "emit-rtl.h"
++#include "case-cfn-macros.h"
+ 
+ /* Macros to create an enumeration identifier for a function prototype.  */
+ #define LARCH_FTYPE_NAME1(A, B) LARCH_##A##_FTYPE_##B
+ #define LARCH_FTYPE_NAME2(A, B, C) LARCH_##A##_FTYPE_##B##_##C
+ #define LARCH_FTYPE_NAME3(A, B, C, D) LARCH_##A##_FTYPE_##B##_##C##_##D
++#define LARCH_FTYPE_NAME4(A, B, C, D, E) \
++  LARCH_##A##_FTYPE_##B##_##C##_##D##_##E
+ 
+ /* Classifies the prototype of a built-in function.  */
+ enum loongarch_function_type
+@@ -64,6 +68,12 @@ enum loongarch_builtin_type
+      value and the arguments are mapped to operands 0 and above.  */
+   LARCH_BUILTIN_DIRECT_NO_TARGET,
+ 
++  /* For generating LoongArch LSX.  */
++  LARCH_BUILTIN_LSX,
++
++  /* The function corresponds to an LSX conditional branch instruction
++     combined with a compare instruction.  */
++  LARCH_BUILTIN_LSX_TEST_BRANCH,
+ };
+ 
+ /* Declare an availability predicate for built-in functions that require
+@@ -101,6 +111,7 @@ struct loongarch_builtin_description
+ };
+ 
+ AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI)
++AVAIL_ALL (lsx, ISA_HAS_LSX)
+ 
+ /* Construct a loongarch_builtin_description from the given arguments.
+ 
+@@ -120,8 +131,8 @@ AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI)
+ #define LARCH_BUILTIN(INSN, NAME, BUILTIN_TYPE, FUNCTION_TYPE, AVAIL) \
+   { \
+     CODE_FOR_loongarch_##INSN, "__builtin_loongarch_" NAME, \
+-      BUILTIN_TYPE, FUNCTION_TYPE, \
+-      loongarch_builtin_avail_##AVAIL \
++    BUILTIN_TYPE, FUNCTION_TYPE, \
++    loongarch_builtin_avail_##AVAIL \
+   }
+ 
+ /* Define __builtin_loongarch_, which is a LARCH_BUILTIN_DIRECT function
+@@ -137,6 +148,300 @@ AVAIL_ALL (hard_float, TARGET_HARD_FLOAT_ABI)
+   LARCH_BUILTIN (INSN, #INSN, LARCH_BUILTIN_DIRECT_NO_TARGET, \
+ 		 FUNCTION_TYPE, AVAIL)
+ 
++/* Define an LSX LARCH_BUILTIN_DIRECT function __builtin_lsx_
++   for instruction CODE_FOR_lsx_.  FUNCTION_TYPE is a builtin_description
++   field.  */
++#define LSX_BUILTIN(INSN, FUNCTION_TYPE)				\
++  { CODE_FOR_lsx_ ## INSN,						\
++    "__builtin_lsx_" #INSN,  LARCH_BUILTIN_DIRECT,			\
++    FUNCTION_TYPE, loongarch_builtin_avail_lsx }
++
++
++/* Define an LSX LARCH_BUILTIN_LSX_TEST_BRANCH function __builtin_lsx_
++   for instruction CODE_FOR_lsx_.  FUNCTION_TYPE is a builtin_description
++   field.  */
++#define LSX_BUILTIN_TEST_BRANCH(INSN, FUNCTION_TYPE)			\
++  { CODE_FOR_lsx_ ## INSN,						\
++    "__builtin_lsx_" #INSN, LARCH_BUILTIN_LSX_TEST_BRANCH,		\
++    FUNCTION_TYPE, loongarch_builtin_avail_lsx }
++
++/* Define an LSX LARCH_BUILTIN_DIRECT_NO_TARGET function __builtin_lsx_
++   for instruction CODE_FOR_lsx_.  FUNCTION_TYPE is a builtin_description
++   field.  */
++#define LSX_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE)			\
++  { CODE_FOR_lsx_ ## INSN,						\
++    "__builtin_lsx_" #INSN,  LARCH_BUILTIN_DIRECT_NO_TARGET,		\
++    FUNCTION_TYPE, loongarch_builtin_avail_lsx }
++
++/* LoongArch SX define CODE_FOR_lsx_xxx */
++#define CODE_FOR_lsx_vsadd_b CODE_FOR_ssaddv16qi3
++#define CODE_FOR_lsx_vsadd_h CODE_FOR_ssaddv8hi3
++#define CODE_FOR_lsx_vsadd_w CODE_FOR_ssaddv4si3
++#define CODE_FOR_lsx_vsadd_d CODE_FOR_ssaddv2di3
++#define CODE_FOR_lsx_vsadd_bu CODE_FOR_usaddv16qi3
++#define CODE_FOR_lsx_vsadd_hu CODE_FOR_usaddv8hi3
++#define CODE_FOR_lsx_vsadd_wu CODE_FOR_usaddv4si3
++#define CODE_FOR_lsx_vsadd_du CODE_FOR_usaddv2di3
++#define CODE_FOR_lsx_vadd_b CODE_FOR_addv16qi3
++#define CODE_FOR_lsx_vadd_h CODE_FOR_addv8hi3
++#define CODE_FOR_lsx_vadd_w CODE_FOR_addv4si3
++#define CODE_FOR_lsx_vadd_d CODE_FOR_addv2di3
++#define CODE_FOR_lsx_vaddi_bu CODE_FOR_addv16qi3
++#define CODE_FOR_lsx_vaddi_hu CODE_FOR_addv8hi3
++#define CODE_FOR_lsx_vaddi_wu CODE_FOR_addv4si3
++#define CODE_FOR_lsx_vaddi_du CODE_FOR_addv2di3
++#define CODE_FOR_lsx_vand_v CODE_FOR_andv16qi3
++#define CODE_FOR_lsx_vandi_b CODE_FOR_andv16qi3
++#define CODE_FOR_lsx_bnz_v CODE_FOR_lsx_bnz_v_b
++#define CODE_FOR_lsx_bz_v CODE_FOR_lsx_bz_v_b
++#define CODE_FOR_lsx_vbitsel_v CODE_FOR_lsx_vbitsel_b
++#define CODE_FOR_lsx_vseqi_b CODE_FOR_lsx_vseq_b
++#define CODE_FOR_lsx_vseqi_h CODE_FOR_lsx_vseq_h
++#define CODE_FOR_lsx_vseqi_w CODE_FOR_lsx_vseq_w
++#define CODE_FOR_lsx_vseqi_d CODE_FOR_lsx_vseq_d
++#define CODE_FOR_lsx_vslti_b CODE_FOR_lsx_vslt_b
++#define CODE_FOR_lsx_vslti_h CODE_FOR_lsx_vslt_h
++#define CODE_FOR_lsx_vslti_w CODE_FOR_lsx_vslt_w
++#define CODE_FOR_lsx_vslti_d CODE_FOR_lsx_vslt_d
++#define CODE_FOR_lsx_vslti_bu CODE_FOR_lsx_vslt_bu
++#define CODE_FOR_lsx_vslti_hu CODE_FOR_lsx_vslt_hu
++#define CODE_FOR_lsx_vslti_wu CODE_FOR_lsx_vslt_wu
++#define CODE_FOR_lsx_vslti_du CODE_FOR_lsx_vslt_du
++#define CODE_FOR_lsx_vslei_b CODE_FOR_lsx_vsle_b
++#define CODE_FOR_lsx_vslei_h CODE_FOR_lsx_vsle_h
++#define CODE_FOR_lsx_vslei_w CODE_FOR_lsx_vsle_w
++#define CODE_FOR_lsx_vslei_d CODE_FOR_lsx_vsle_d
++#define CODE_FOR_lsx_vslei_bu CODE_FOR_lsx_vsle_bu
++#define CODE_FOR_lsx_vslei_hu CODE_FOR_lsx_vsle_hu
++#define CODE_FOR_lsx_vslei_wu CODE_FOR_lsx_vsle_wu
++#define CODE_FOR_lsx_vslei_du CODE_FOR_lsx_vsle_du
++#define CODE_FOR_lsx_vdiv_b CODE_FOR_divv16qi3
++#define CODE_FOR_lsx_vdiv_h CODE_FOR_divv8hi3
++#define CODE_FOR_lsx_vdiv_w CODE_FOR_divv4si3
++#define CODE_FOR_lsx_vdiv_d CODE_FOR_divv2di3
++#define CODE_FOR_lsx_vdiv_bu CODE_FOR_udivv16qi3
++#define CODE_FOR_lsx_vdiv_hu CODE_FOR_udivv8hi3
++#define CODE_FOR_lsx_vdiv_wu CODE_FOR_udivv4si3
++#define CODE_FOR_lsx_vdiv_du CODE_FOR_udivv2di3
++#define CODE_FOR_lsx_vfadd_s CODE_FOR_addv4sf3
++#define CODE_FOR_lsx_vfadd_d CODE_FOR_addv2df3
++#define CODE_FOR_lsx_vftintrz_w_s CODE_FOR_fix_truncv4sfv4si2
++#define CODE_FOR_lsx_vftintrz_l_d CODE_FOR_fix_truncv2dfv2di2
++#define CODE_FOR_lsx_vftintrz_wu_s CODE_FOR_fixuns_truncv4sfv4si2
++#define CODE_FOR_lsx_vftintrz_lu_d CODE_FOR_fixuns_truncv2dfv2di2
++#define CODE_FOR_lsx_vffint_s_w CODE_FOR_floatv4siv4sf2
++#define CODE_FOR_lsx_vffint_d_l CODE_FOR_floatv2div2df2
++#define CODE_FOR_lsx_vffint_s_wu CODE_FOR_floatunsv4siv4sf2
++#define CODE_FOR_lsx_vffint_d_lu CODE_FOR_floatunsv2div2df2
++#define CODE_FOR_lsx_vfsub_s CODE_FOR_subv4sf3
++#define CODE_FOR_lsx_vfsub_d CODE_FOR_subv2df3
++#define CODE_FOR_lsx_vfmul_s CODE_FOR_mulv4sf3
++#define CODE_FOR_lsx_vfmul_d CODE_FOR_mulv2df3
++#define CODE_FOR_lsx_vfdiv_s CODE_FOR_divv4sf3
++#define CODE_FOR_lsx_vfdiv_d CODE_FOR_divv2df3
++#define CODE_FOR_lsx_vfmax_s CODE_FOR_smaxv4sf3
++#define CODE_FOR_lsx_vfmax_d CODE_FOR_smaxv2df3
++#define CODE_FOR_lsx_vfmin_s CODE_FOR_sminv4sf3
++#define CODE_FOR_lsx_vfmin_d CODE_FOR_sminv2df3
++#define CODE_FOR_lsx_vfsqrt_s CODE_FOR_sqrtv4sf2
++#define CODE_FOR_lsx_vfsqrt_d CODE_FOR_sqrtv2df2
++#define CODE_FOR_lsx_vflogb_s CODE_FOR_logbv4sf2
++#define CODE_FOR_lsx_vflogb_d CODE_FOR_logbv2df2
++#define CODE_FOR_lsx_vmax_b CODE_FOR_smaxv16qi3
++#define CODE_FOR_lsx_vmax_h CODE_FOR_smaxv8hi3
++#define CODE_FOR_lsx_vmax_w CODE_FOR_smaxv4si3
++#define CODE_FOR_lsx_vmax_d CODE_FOR_smaxv2di3
++#define CODE_FOR_lsx_vmaxi_b CODE_FOR_smaxv16qi3
++#define CODE_FOR_lsx_vmaxi_h CODE_FOR_smaxv8hi3
++#define CODE_FOR_lsx_vmaxi_w CODE_FOR_smaxv4si3
++#define CODE_FOR_lsx_vmaxi_d CODE_FOR_smaxv2di3
++#define CODE_FOR_lsx_vmax_bu CODE_FOR_umaxv16qi3
++#define CODE_FOR_lsx_vmax_hu CODE_FOR_umaxv8hi3
++#define CODE_FOR_lsx_vmax_wu CODE_FOR_umaxv4si3
++#define CODE_FOR_lsx_vmax_du CODE_FOR_umaxv2di3
++#define CODE_FOR_lsx_vmaxi_bu CODE_FOR_umaxv16qi3
++#define CODE_FOR_lsx_vmaxi_hu CODE_FOR_umaxv8hi3
++#define CODE_FOR_lsx_vmaxi_wu CODE_FOR_umaxv4si3
++#define CODE_FOR_lsx_vmaxi_du CODE_FOR_umaxv2di3
++#define CODE_FOR_lsx_vmin_b CODE_FOR_sminv16qi3
++#define CODE_FOR_lsx_vmin_h CODE_FOR_sminv8hi3
++#define CODE_FOR_lsx_vmin_w CODE_FOR_sminv4si3
++#define CODE_FOR_lsx_vmin_d CODE_FOR_sminv2di3
++#define CODE_FOR_lsx_vmini_b CODE_FOR_sminv16qi3
++#define CODE_FOR_lsx_vmini_h CODE_FOR_sminv8hi3
++#define CODE_FOR_lsx_vmini_w CODE_FOR_sminv4si3
++#define CODE_FOR_lsx_vmini_d CODE_FOR_sminv2di3
++#define CODE_FOR_lsx_vmin_bu CODE_FOR_uminv16qi3
++#define CODE_FOR_lsx_vmin_hu CODE_FOR_uminv8hi3
++#define CODE_FOR_lsx_vmin_wu CODE_FOR_uminv4si3
++#define CODE_FOR_lsx_vmin_du CODE_FOR_uminv2di3
++#define CODE_FOR_lsx_vmini_bu CODE_FOR_uminv16qi3
++#define CODE_FOR_lsx_vmini_hu CODE_FOR_uminv8hi3
++#define CODE_FOR_lsx_vmini_wu CODE_FOR_uminv4si3
++#define CODE_FOR_lsx_vmini_du CODE_FOR_uminv2di3
++#define CODE_FOR_lsx_vmod_b CODE_FOR_modv16qi3
++#define CODE_FOR_lsx_vmod_h CODE_FOR_modv8hi3
++#define CODE_FOR_lsx_vmod_w CODE_FOR_modv4si3
++#define CODE_FOR_lsx_vmod_d CODE_FOR_modv2di3
++#define CODE_FOR_lsx_vmod_bu CODE_FOR_umodv16qi3
++#define CODE_FOR_lsx_vmod_hu CODE_FOR_umodv8hi3
++#define CODE_FOR_lsx_vmod_wu CODE_FOR_umodv4si3
++#define CODE_FOR_lsx_vmod_du CODE_FOR_umodv2di3
++#define CODE_FOR_lsx_vmul_b CODE_FOR_mulv16qi3
++#define CODE_FOR_lsx_vmul_h CODE_FOR_mulv8hi3
++#define CODE_FOR_lsx_vmul_w CODE_FOR_mulv4si3
++#define CODE_FOR_lsx_vmul_d CODE_FOR_mulv2di3
++#define CODE_FOR_lsx_vclz_b CODE_FOR_clzv16qi2
++#define CODE_FOR_lsx_vclz_h CODE_FOR_clzv8hi2
++#define CODE_FOR_lsx_vclz_w CODE_FOR_clzv4si2
++#define CODE_FOR_lsx_vclz_d CODE_FOR_clzv2di2
++#define CODE_FOR_lsx_vnor_v CODE_FOR_lsx_nor_b
++#define CODE_FOR_lsx_vor_v CODE_FOR_iorv16qi3
++#define CODE_FOR_lsx_vori_b CODE_FOR_iorv16qi3
++#define CODE_FOR_lsx_vnori_b CODE_FOR_lsx_nor_b
++#define CODE_FOR_lsx_vpcnt_b CODE_FOR_popcountv16qi2
++#define CODE_FOR_lsx_vpcnt_h CODE_FOR_popcountv8hi2
++#define CODE_FOR_lsx_vpcnt_w CODE_FOR_popcountv4si2
++#define CODE_FOR_lsx_vpcnt_d CODE_FOR_popcountv2di2
++#define CODE_FOR_lsx_vxor_v CODE_FOR_xorv16qi3
++#define CODE_FOR_lsx_vxori_b CODE_FOR_xorv16qi3
++#define CODE_FOR_lsx_vsll_b CODE_FOR_vashlv16qi3
++#define CODE_FOR_lsx_vsll_h CODE_FOR_vashlv8hi3
++#define CODE_FOR_lsx_vsll_w CODE_FOR_vashlv4si3
++#define CODE_FOR_lsx_vsll_d CODE_FOR_vashlv2di3
++#define CODE_FOR_lsx_vslli_b CODE_FOR_vashlv16qi3
++#define CODE_FOR_lsx_vslli_h CODE_FOR_vashlv8hi3
++#define CODE_FOR_lsx_vslli_w CODE_FOR_vashlv4si3
++#define CODE_FOR_lsx_vslli_d CODE_FOR_vashlv2di3
++#define CODE_FOR_lsx_vsra_b CODE_FOR_vashrv16qi3
++#define CODE_FOR_lsx_vsra_h CODE_FOR_vashrv8hi3
++#define CODE_FOR_lsx_vsra_w CODE_FOR_vashrv4si3
++#define CODE_FOR_lsx_vsra_d CODE_FOR_vashrv2di3
++#define CODE_FOR_lsx_vsrai_b CODE_FOR_vashrv16qi3
++#define CODE_FOR_lsx_vsrai_h CODE_FOR_vashrv8hi3
++#define CODE_FOR_lsx_vsrai_w CODE_FOR_vashrv4si3
++#define CODE_FOR_lsx_vsrai_d CODE_FOR_vashrv2di3
++#define CODE_FOR_lsx_vsrl_b CODE_FOR_vlshrv16qi3
++#define CODE_FOR_lsx_vsrl_h CODE_FOR_vlshrv8hi3
++#define CODE_FOR_lsx_vsrl_w CODE_FOR_vlshrv4si3
++#define CODE_FOR_lsx_vsrl_d CODE_FOR_vlshrv2di3
++#define CODE_FOR_lsx_vsrli_b CODE_FOR_vlshrv16qi3
++#define CODE_FOR_lsx_vsrli_h CODE_FOR_vlshrv8hi3
++#define CODE_FOR_lsx_vsrli_w CODE_FOR_vlshrv4si3
++#define CODE_FOR_lsx_vsrli_d CODE_FOR_vlshrv2di3
++#define CODE_FOR_lsx_vsub_b CODE_FOR_subv16qi3
++#define CODE_FOR_lsx_vsub_h CODE_FOR_subv8hi3
++#define CODE_FOR_lsx_vsub_w CODE_FOR_subv4si3
++#define CODE_FOR_lsx_vsub_d CODE_FOR_subv2di3
++#define CODE_FOR_lsx_vsubi_bu CODE_FOR_subv16qi3
++#define CODE_FOR_lsx_vsubi_hu CODE_FOR_subv8hi3
++#define CODE_FOR_lsx_vsubi_wu CODE_FOR_subv4si3
++#define CODE_FOR_lsx_vsubi_du CODE_FOR_subv2di3
++
++#define CODE_FOR_lsx_vpackod_d CODE_FOR_lsx_vilvh_d
++#define CODE_FOR_lsx_vpackev_d CODE_FOR_lsx_vilvl_d
++#define CODE_FOR_lsx_vpickod_d CODE_FOR_lsx_vilvh_d
++#define CODE_FOR_lsx_vpickev_d CODE_FOR_lsx_vilvl_d
++
++#define CODE_FOR_lsx_vrepli_b CODE_FOR_lsx_vrepliv16qi
++#define CODE_FOR_lsx_vrepli_h CODE_FOR_lsx_vrepliv8hi
++#define CODE_FOR_lsx_vrepli_w CODE_FOR_lsx_vrepliv4si
++#define CODE_FOR_lsx_vrepli_d CODE_FOR_lsx_vrepliv2di
++#define CODE_FOR_lsx_vsat_b CODE_FOR_lsx_vsat_s_b
++#define CODE_FOR_lsx_vsat_h CODE_FOR_lsx_vsat_s_h
++#define CODE_FOR_lsx_vsat_w CODE_FOR_lsx_vsat_s_w
++#define CODE_FOR_lsx_vsat_d CODE_FOR_lsx_vsat_s_d
++#define CODE_FOR_lsx_vsat_bu CODE_FOR_lsx_vsat_u_bu
++#define CODE_FOR_lsx_vsat_hu CODE_FOR_lsx_vsat_u_hu
++#define CODE_FOR_lsx_vsat_wu CODE_FOR_lsx_vsat_u_wu
++#define CODE_FOR_lsx_vsat_du CODE_FOR_lsx_vsat_u_du
++#define CODE_FOR_lsx_vavg_b CODE_FOR_lsx_vavg_s_b
++#define CODE_FOR_lsx_vavg_h CODE_FOR_lsx_vavg_s_h
++#define CODE_FOR_lsx_vavg_w CODE_FOR_lsx_vavg_s_w
++#define CODE_FOR_lsx_vavg_d CODE_FOR_lsx_vavg_s_d
++#define CODE_FOR_lsx_vavg_bu CODE_FOR_lsx_vavg_u_bu
++#define CODE_FOR_lsx_vavg_hu CODE_FOR_lsx_vavg_u_hu
++#define CODE_FOR_lsx_vavg_wu CODE_FOR_lsx_vavg_u_wu
++#define CODE_FOR_lsx_vavg_du CODE_FOR_lsx_vavg_u_du
++#define CODE_FOR_lsx_vavgr_b CODE_FOR_lsx_vavgr_s_b
++#define CODE_FOR_lsx_vavgr_h CODE_FOR_lsx_vavgr_s_h
++#define CODE_FOR_lsx_vavgr_w CODE_FOR_lsx_vavgr_s_w
++#define CODE_FOR_lsx_vavgr_d CODE_FOR_lsx_vavgr_s_d
++#define CODE_FOR_lsx_vavgr_bu CODE_FOR_lsx_vavgr_u_bu
++#define CODE_FOR_lsx_vavgr_hu CODE_FOR_lsx_vavgr_u_hu
++#define CODE_FOR_lsx_vavgr_wu CODE_FOR_lsx_vavgr_u_wu
++#define CODE_FOR_lsx_vavgr_du CODE_FOR_lsx_vavgr_u_du
++#define CODE_FOR_lsx_vssub_b CODE_FOR_lsx_vssub_s_b
++#define CODE_FOR_lsx_vssub_h CODE_FOR_lsx_vssub_s_h
++#define CODE_FOR_lsx_vssub_w CODE_FOR_lsx_vssub_s_w
++#define CODE_FOR_lsx_vssub_d CODE_FOR_lsx_vssub_s_d
++#define CODE_FOR_lsx_vssub_bu CODE_FOR_lsx_vssub_u_bu
++#define CODE_FOR_lsx_vssub_hu CODE_FOR_lsx_vssub_u_hu
++#define CODE_FOR_lsx_vssub_wu CODE_FOR_lsx_vssub_u_wu
++#define CODE_FOR_lsx_vssub_du CODE_FOR_lsx_vssub_u_du
++#define CODE_FOR_lsx_vabsd_b CODE_FOR_lsx_vabsd_s_b
++#define CODE_FOR_lsx_vabsd_h CODE_FOR_lsx_vabsd_s_h
++#define CODE_FOR_lsx_vabsd_w CODE_FOR_lsx_vabsd_s_w
++#define CODE_FOR_lsx_vabsd_d CODE_FOR_lsx_vabsd_s_d
++#define CODE_FOR_lsx_vabsd_bu CODE_FOR_lsx_vabsd_u_bu
++#define CODE_FOR_lsx_vabsd_hu CODE_FOR_lsx_vabsd_u_hu
++#define CODE_FOR_lsx_vabsd_wu CODE_FOR_lsx_vabsd_u_wu
++#define CODE_FOR_lsx_vabsd_du CODE_FOR_lsx_vabsd_u_du
++#define CODE_FOR_lsx_vftint_w_s CODE_FOR_lsx_vftint_s_w_s
++#define CODE_FOR_lsx_vftint_l_d CODE_FOR_lsx_vftint_s_l_d
++#define CODE_FOR_lsx_vftint_wu_s CODE_FOR_lsx_vftint_u_wu_s
++#define CODE_FOR_lsx_vftint_lu_d CODE_FOR_lsx_vftint_u_lu_d
++#define CODE_FOR_lsx_vandn_v CODE_FOR_vandnv16qi3
++#define CODE_FOR_lsx_vorn_v CODE_FOR_vornv16qi3
++#define CODE_FOR_lsx_vneg_b CODE_FOR_vnegv16qi2
++#define CODE_FOR_lsx_vneg_h CODE_FOR_vnegv8hi2
++#define CODE_FOR_lsx_vneg_w CODE_FOR_vnegv4si2
++#define CODE_FOR_lsx_vneg_d CODE_FOR_vnegv2di2
++#define CODE_FOR_lsx_vshuf4i_d CODE_FOR_lsx_vshuf4i_d
++#define CODE_FOR_lsx_vbsrl_v CODE_FOR_lsx_vbsrl_b
++#define CODE_FOR_lsx_vbsll_v CODE_FOR_lsx_vbsll_b
++#define CODE_FOR_lsx_vfmadd_s CODE_FOR_fmav4sf4
++#define CODE_FOR_lsx_vfmadd_d CODE_FOR_fmav2df4
++#define CODE_FOR_lsx_vfmsub_s CODE_FOR_fmsv4sf4
++#define CODE_FOR_lsx_vfmsub_d CODE_FOR_fmsv2df4
++#define CODE_FOR_lsx_vfnmadd_s CODE_FOR_vfnmaddv4sf4_nmadd4
++#define CODE_FOR_lsx_vfnmadd_d CODE_FOR_vfnmaddv2df4_nmadd4
++#define CODE_FOR_lsx_vfnmsub_s CODE_FOR_vfnmsubv4sf4_nmsub4
++#define CODE_FOR_lsx_vfnmsub_d CODE_FOR_vfnmsubv2df4_nmsub4
++
++#define CODE_FOR_lsx_vmuh_b CODE_FOR_lsx_vmuh_s_b
++#define CODE_FOR_lsx_vmuh_h CODE_FOR_lsx_vmuh_s_h
++#define CODE_FOR_lsx_vmuh_w CODE_FOR_lsx_vmuh_s_w
++#define CODE_FOR_lsx_vmuh_d CODE_FOR_lsx_vmuh_s_d
++#define CODE_FOR_lsx_vmuh_bu CODE_FOR_lsx_vmuh_u_bu
++#define CODE_FOR_lsx_vmuh_hu CODE_FOR_lsx_vmuh_u_hu
++#define CODE_FOR_lsx_vmuh_wu CODE_FOR_lsx_vmuh_u_wu
++#define CODE_FOR_lsx_vmuh_du CODE_FOR_lsx_vmuh_u_du
++#define CODE_FOR_lsx_vsllwil_h_b CODE_FOR_lsx_vsllwil_s_h_b
++#define CODE_FOR_lsx_vsllwil_w_h CODE_FOR_lsx_vsllwil_s_w_h
++#define CODE_FOR_lsx_vsllwil_d_w CODE_FOR_lsx_vsllwil_s_d_w
++#define CODE_FOR_lsx_vsllwil_hu_bu CODE_FOR_lsx_vsllwil_u_hu_bu
++#define CODE_FOR_lsx_vsllwil_wu_hu CODE_FOR_lsx_vsllwil_u_wu_hu
++#define CODE_FOR_lsx_vsllwil_du_wu CODE_FOR_lsx_vsllwil_u_du_wu
++#define CODE_FOR_lsx_vssran_b_h CODE_FOR_lsx_vssran_s_b_h
++#define CODE_FOR_lsx_vssran_h_w CODE_FOR_lsx_vssran_s_h_w
++#define CODE_FOR_lsx_vssran_w_d CODE_FOR_lsx_vssran_s_w_d
++#define CODE_FOR_lsx_vssran_bu_h CODE_FOR_lsx_vssran_u_bu_h
++#define CODE_FOR_lsx_vssran_hu_w CODE_FOR_lsx_vssran_u_hu_w
++#define CODE_FOR_lsx_vssran_wu_d CODE_FOR_lsx_vssran_u_wu_d
++#define CODE_FOR_lsx_vssrarn_b_h CODE_FOR_lsx_vssrarn_s_b_h
++#define CODE_FOR_lsx_vssrarn_h_w CODE_FOR_lsx_vssrarn_s_h_w
++#define CODE_FOR_lsx_vssrarn_w_d CODE_FOR_lsx_vssrarn_s_w_d
++#define CODE_FOR_lsx_vssrarn_bu_h CODE_FOR_lsx_vssrarn_u_bu_h
++#define CODE_FOR_lsx_vssrarn_hu_w CODE_FOR_lsx_vssrarn_u_hu_w
++#define CODE_FOR_lsx_vssrarn_wu_d CODE_FOR_lsx_vssrarn_u_wu_d
++#define CODE_FOR_lsx_vssrln_bu_h CODE_FOR_lsx_vssrln_u_bu_h
++#define CODE_FOR_lsx_vssrln_hu_w CODE_FOR_lsx_vssrln_u_hu_w
++#define CODE_FOR_lsx_vssrln_wu_d CODE_FOR_lsx_vssrln_u_wu_d
++#define CODE_FOR_lsx_vssrlrn_bu_h CODE_FOR_lsx_vssrlrn_u_bu_h
++#define CODE_FOR_lsx_vssrlrn_hu_w CODE_FOR_lsx_vssrlrn_u_hu_w
++#define CODE_FOR_lsx_vssrlrn_wu_d CODE_FOR_lsx_vssrlrn_u_wu_d
++
+ static const struct loongarch_builtin_description loongarch_builtins[] = {
+ #define LARCH_MOVFCSR2GR 0
+   DIRECT_BUILTIN (movfcsr2gr, LARCH_USI_FTYPE_UQI, hard_float),
+@@ -184,6 +489,727 @@ static const struct loongarch_builtin_description loongarch_builtins[] = {
+   DIRECT_NO_TARGET_BUILTIN (asrtgt_d, LARCH_VOID_FTYPE_DI_DI, default),
+   DIRECT_NO_TARGET_BUILTIN (syscall, LARCH_VOID_FTYPE_USI, default),
+   DIRECT_NO_TARGET_BUILTIN (break, LARCH_VOID_FTYPE_USI, default),
++
++  /* Built-in functions for LSX.  */
++  LSX_BUILTIN (vsll_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vsll_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsll_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsll_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vslli_b, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vslli_h, LARCH_V8HI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vslli_w, LARCH_V4SI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vslli_d, LARCH_V2DI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vsra_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vsra_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsra_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsra_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vsrai_b, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vsrai_h, LARCH_V8HI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vsrai_w, LARCH_V4SI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vsrai_d, LARCH_V2DI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vsrar_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vsrar_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsrar_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsrar_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vsrari_b, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vsrari_h, LARCH_V8HI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vsrari_w, LARCH_V4SI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vsrari_d, LARCH_V2DI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vsrl_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vsrl_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsrl_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsrl_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vsrli_b, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vsrli_h, LARCH_V8HI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vsrli_w, LARCH_V4SI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vsrli_d, LARCH_V2DI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vsrlr_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vsrlr_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsrlr_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsrlr_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vsrlri_b, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vsrlri_h, LARCH_V8HI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vsrlri_w, LARCH_V4SI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vsrlri_d, LARCH_V2DI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vbitclr_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vbitclr_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vbitclr_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vbitclr_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vbitclri_b, LARCH_UV16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vbitclri_h, LARCH_UV8HI_FTYPE_UV8HI_UQI),
++  LSX_BUILTIN (vbitclri_w, LARCH_UV4SI_FTYPE_UV4SI_UQI),
++  LSX_BUILTIN (vbitclri_d, LARCH_UV2DI_FTYPE_UV2DI_UQI),
++  LSX_BUILTIN (vbitset_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vbitset_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vbitset_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vbitset_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vbitseti_b, LARCH_UV16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vbitseti_h, LARCH_UV8HI_FTYPE_UV8HI_UQI),
++  LSX_BUILTIN (vbitseti_w, LARCH_UV4SI_FTYPE_UV4SI_UQI),
++  LSX_BUILTIN (vbitseti_d, LARCH_UV2DI_FTYPE_UV2DI_UQI),
++  LSX_BUILTIN (vbitrev_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vbitrev_h, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vbitrev_w, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vbitrev_d, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vbitrevi_b, LARCH_UV16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vbitrevi_h, LARCH_UV8HI_FTYPE_UV8HI_UQI),
++  LSX_BUILTIN (vbitrevi_w, LARCH_UV4SI_FTYPE_UV4SI_UQI),
++  LSX_BUILTIN (vbitrevi_d, LARCH_UV2DI_FTYPE_UV2DI_UQI),
++  LSX_BUILTIN (vadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vaddi_bu, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vaddi_hu, LARCH_V8HI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vaddi_wu, LARCH_V4SI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vaddi_du, LARCH_V2DI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vsub_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vsub_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsub_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsub_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vsubi_bu, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vsubi_hu, LARCH_V8HI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vsubi_wu, LARCH_V4SI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vsubi_du, LARCH_V2DI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vmax_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vmax_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vmax_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vmax_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vmaxi_b, LARCH_V16QI_FTYPE_V16QI_QI),
++  LSX_BUILTIN (vmaxi_h, LARCH_V8HI_FTYPE_V8HI_QI),
++  LSX_BUILTIN (vmaxi_w, LARCH_V4SI_FTYPE_V4SI_QI),
++  LSX_BUILTIN (vmaxi_d, LARCH_V2DI_FTYPE_V2DI_QI),
++  LSX_BUILTIN (vmax_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vmax_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vmax_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vmax_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vmaxi_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vmaxi_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI),
++  LSX_BUILTIN (vmaxi_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI),
++  LSX_BUILTIN (vmaxi_du, LARCH_UV2DI_FTYPE_UV2DI_UQI),
++  LSX_BUILTIN (vmin_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vmin_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vmin_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vmin_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vmini_b, LARCH_V16QI_FTYPE_V16QI_QI),
++  LSX_BUILTIN (vmini_h, LARCH_V8HI_FTYPE_V8HI_QI),
++  LSX_BUILTIN (vmini_w, LARCH_V4SI_FTYPE_V4SI_QI),
++  LSX_BUILTIN (vmini_d, LARCH_V2DI_FTYPE_V2DI_QI),
++  LSX_BUILTIN (vmin_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vmin_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vmin_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vmin_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vmini_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vmini_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI),
++  LSX_BUILTIN (vmini_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI),
++  LSX_BUILTIN (vmini_du, LARCH_UV2DI_FTYPE_UV2DI_UQI),
++  LSX_BUILTIN (vseq_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vseq_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vseq_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vseq_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vseqi_b, LARCH_V16QI_FTYPE_V16QI_QI),
++  LSX_BUILTIN (vseqi_h, LARCH_V8HI_FTYPE_V8HI_QI),
++  LSX_BUILTIN (vseqi_w, LARCH_V4SI_FTYPE_V4SI_QI),
++  LSX_BUILTIN (vseqi_d, LARCH_V2DI_FTYPE_V2DI_QI),
++  LSX_BUILTIN (vslti_b, LARCH_V16QI_FTYPE_V16QI_QI),
++  LSX_BUILTIN (vslt_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vslt_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vslt_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vslt_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vslti_h, LARCH_V8HI_FTYPE_V8HI_QI),
++  LSX_BUILTIN (vslti_w, LARCH_V4SI_FTYPE_V4SI_QI),
++  LSX_BUILTIN (vslti_d, LARCH_V2DI_FTYPE_V2DI_QI),
++  LSX_BUILTIN (vslt_bu, LARCH_V16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vslt_hu, LARCH_V8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vslt_wu, LARCH_V4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vslt_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vslti_bu, LARCH_V16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vslti_hu, LARCH_V8HI_FTYPE_UV8HI_UQI),
++  LSX_BUILTIN (vslti_wu, LARCH_V4SI_FTYPE_UV4SI_UQI),
++  LSX_BUILTIN (vslti_du, LARCH_V2DI_FTYPE_UV2DI_UQI),
++  LSX_BUILTIN (vsle_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vsle_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsle_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsle_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vslei_b, LARCH_V16QI_FTYPE_V16QI_QI),
++  LSX_BUILTIN (vslei_h, LARCH_V8HI_FTYPE_V8HI_QI),
++  LSX_BUILTIN (vslei_w, LARCH_V4SI_FTYPE_V4SI_QI),
++  LSX_BUILTIN (vslei_d, LARCH_V2DI_FTYPE_V2DI_QI),
++  LSX_BUILTIN (vsle_bu, LARCH_V16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vsle_hu, LARCH_V8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vsle_wu, LARCH_V4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vsle_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vslei_bu, LARCH_V16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vslei_hu, LARCH_V8HI_FTYPE_UV8HI_UQI),
++  LSX_BUILTIN (vslei_wu, LARCH_V4SI_FTYPE_UV4SI_UQI),
++  LSX_BUILTIN (vslei_du, LARCH_V2DI_FTYPE_UV2DI_UQI),
++  LSX_BUILTIN (vsat_b, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vsat_h, LARCH_V8HI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vsat_w, LARCH_V4SI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vsat_d, LARCH_V2DI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vsat_bu, LARCH_UV16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vsat_hu, LARCH_UV8HI_FTYPE_UV8HI_UQI),
++  LSX_BUILTIN (vsat_wu, LARCH_UV4SI_FTYPE_UV4SI_UQI),
++  LSX_BUILTIN (vsat_du, LARCH_UV2DI_FTYPE_UV2DI_UQI),
++  LSX_BUILTIN (vadda_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vadda_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vadda_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vadda_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vsadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vsadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vsadd_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vsadd_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vsadd_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vsadd_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vavg_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vavg_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vavg_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vavg_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vavg_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vavg_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vavg_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vavg_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vavgr_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vavgr_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vavgr_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vavgr_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vavgr_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vavgr_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vavgr_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vavgr_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vssub_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vssub_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vssub_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vssub_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vssub_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vssub_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vssub_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vssub_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vabsd_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vabsd_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vabsd_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vabsd_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vabsd_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vabsd_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vabsd_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vabsd_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vmul_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vmul_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vmul_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vmul_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vmadd_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI),
++  LSX_BUILTIN (vmadd_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI),
++  LSX_BUILTIN (vmadd_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI),
++  LSX_BUILTIN (vmadd_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI),
++  LSX_BUILTIN (vmsub_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI),
++  LSX_BUILTIN (vmsub_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI),
++  LSX_BUILTIN (vmsub_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI),
++  LSX_BUILTIN (vmsub_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI),
++  LSX_BUILTIN (vdiv_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vdiv_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vdiv_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vdiv_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vdiv_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vdiv_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vdiv_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vdiv_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vhaddw_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vhaddw_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vhaddw_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vhaddw_hu_bu, LARCH_UV8HI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vhaddw_wu_hu, LARCH_UV4SI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vhaddw_du_wu, LARCH_UV2DI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vhsubw_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vhsubw_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vhsubw_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vhsubw_hu_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vhsubw_wu_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vhsubw_du_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vmod_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vmod_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vmod_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vmod_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vmod_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vmod_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vmod_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vmod_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vreplve_b, LARCH_V16QI_FTYPE_V16QI_SI),
++  LSX_BUILTIN (vreplve_h, LARCH_V8HI_FTYPE_V8HI_SI),
++  LSX_BUILTIN (vreplve_w, LARCH_V4SI_FTYPE_V4SI_SI),
++  LSX_BUILTIN (vreplve_d, LARCH_V2DI_FTYPE_V2DI_SI),
++  LSX_BUILTIN (vreplvei_b, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vreplvei_h, LARCH_V8HI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vreplvei_w, LARCH_V4SI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vreplvei_d, LARCH_V2DI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vpickev_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vpickev_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vpickev_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vpickev_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vpickod_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vpickod_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vpickod_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vpickod_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vilvh_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vilvh_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vilvh_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vilvh_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vilvl_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vilvl_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vilvl_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vilvl_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vpackev_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vpackev_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vpackev_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vpackev_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vpackod_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vpackod_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vpackod_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vpackod_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vshuf_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI),
++  LSX_BUILTIN (vshuf_w, LARCH_V4SI_FTYPE_V4SI_V4SI_V4SI),
++  LSX_BUILTIN (vshuf_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI),
++  LSX_BUILTIN (vand_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vandi_b, LARCH_UV16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vnor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vnori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vxor_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vxori_b, LARCH_UV16QI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vbitsel_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI_UV16QI),
++  LSX_BUILTIN (vbitseli_b, LARCH_UV16QI_FTYPE_UV16QI_UV16QI_USI),
++  LSX_BUILTIN (vshuf4i_b, LARCH_V16QI_FTYPE_V16QI_USI),
++  LSX_BUILTIN (vshuf4i_h, LARCH_V8HI_FTYPE_V8HI_USI),
++  LSX_BUILTIN (vshuf4i_w, LARCH_V4SI_FTYPE_V4SI_USI),
++  LSX_BUILTIN (vreplgr2vr_b, LARCH_V16QI_FTYPE_SI),
++  LSX_BUILTIN (vreplgr2vr_h, LARCH_V8HI_FTYPE_SI),
++  LSX_BUILTIN (vreplgr2vr_w, LARCH_V4SI_FTYPE_SI),
++  LSX_BUILTIN (vreplgr2vr_d, LARCH_V2DI_FTYPE_DI),
++  LSX_BUILTIN (vpcnt_b, LARCH_V16QI_FTYPE_V16QI),
++  LSX_BUILTIN (vpcnt_h, LARCH_V8HI_FTYPE_V8HI),
++  LSX_BUILTIN (vpcnt_w, LARCH_V4SI_FTYPE_V4SI),
++  LSX_BUILTIN (vpcnt_d, LARCH_V2DI_FTYPE_V2DI),
++  LSX_BUILTIN (vclo_b, LARCH_V16QI_FTYPE_V16QI),
++  LSX_BUILTIN (vclo_h, LARCH_V8HI_FTYPE_V8HI),
++  LSX_BUILTIN (vclo_w, LARCH_V4SI_FTYPE_V4SI),
++  LSX_BUILTIN (vclo_d, LARCH_V2DI_FTYPE_V2DI),
++  LSX_BUILTIN (vclz_b, LARCH_V16QI_FTYPE_V16QI),
++  LSX_BUILTIN (vclz_h, LARCH_V8HI_FTYPE_V8HI),
++  LSX_BUILTIN (vclz_w, LARCH_V4SI_FTYPE_V4SI),
++  LSX_BUILTIN (vclz_d, LARCH_V2DI_FTYPE_V2DI),
++  LSX_BUILTIN (vpickve2gr_b, LARCH_SI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vpickve2gr_h, LARCH_SI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vpickve2gr_w, LARCH_SI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vpickve2gr_d, LARCH_DI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vpickve2gr_bu, LARCH_USI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vpickve2gr_hu, LARCH_USI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vpickve2gr_wu, LARCH_USI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vpickve2gr_du, LARCH_UDI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vinsgr2vr_b, LARCH_V16QI_FTYPE_V16QI_SI_UQI),
++  LSX_BUILTIN (vinsgr2vr_h, LARCH_V8HI_FTYPE_V8HI_SI_UQI),
++  LSX_BUILTIN (vinsgr2vr_w, LARCH_V4SI_FTYPE_V4SI_SI_UQI),
++  LSX_BUILTIN (vinsgr2vr_d, LARCH_V2DI_FTYPE_V2DI_DI_UQI),
++  LSX_BUILTIN_TEST_BRANCH (bnz_b, LARCH_SI_FTYPE_UV16QI),
++  LSX_BUILTIN_TEST_BRANCH (bnz_h, LARCH_SI_FTYPE_UV8HI),
++  LSX_BUILTIN_TEST_BRANCH (bnz_w, LARCH_SI_FTYPE_UV4SI),
++  LSX_BUILTIN_TEST_BRANCH (bnz_d, LARCH_SI_FTYPE_UV2DI),
++  LSX_BUILTIN_TEST_BRANCH (bz_b, LARCH_SI_FTYPE_UV16QI),
++  LSX_BUILTIN_TEST_BRANCH (bz_h, LARCH_SI_FTYPE_UV8HI),
++  LSX_BUILTIN_TEST_BRANCH (bz_w, LARCH_SI_FTYPE_UV4SI),
++  LSX_BUILTIN_TEST_BRANCH (bz_d, LARCH_SI_FTYPE_UV2DI),
++  LSX_BUILTIN_TEST_BRANCH (bz_v, LARCH_SI_FTYPE_UV16QI),
++  LSX_BUILTIN_TEST_BRANCH (bnz_v,	LARCH_SI_FTYPE_UV16QI),
++  LSX_BUILTIN (vrepli_b, LARCH_V16QI_FTYPE_HI),
++  LSX_BUILTIN (vrepli_h, LARCH_V8HI_FTYPE_HI),
++  LSX_BUILTIN (vrepli_w, LARCH_V4SI_FTYPE_HI),
++  LSX_BUILTIN (vrepli_d, LARCH_V2DI_FTYPE_HI),
++  LSX_BUILTIN (vfcmp_caf_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_caf_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_cor_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_cor_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_cun_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_cun_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_cune_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_cune_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_cueq_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_cueq_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_ceq_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_ceq_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_cne_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_cne_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_clt_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_clt_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_cult_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_cult_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_cle_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_cle_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_cule_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_cule_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_saf_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_saf_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_sor_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_sor_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_sun_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_sun_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_sune_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_sune_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_sueq_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_sueq_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_seq_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_seq_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_sne_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_sne_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_slt_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_slt_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_sult_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_sult_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_sle_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_sle_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcmp_sule_s, LARCH_V4SI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcmp_sule_d, LARCH_V2DI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfmul_s, LARCH_V4SF_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfmul_d, LARCH_V2DF_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfdiv_s, LARCH_V4SF_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfdiv_d, LARCH_V2DF_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfcvt_h_s, LARCH_V8HI_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfcvt_s_d, LARCH_V4SF_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfmin_s, LARCH_V4SF_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfmin_d, LARCH_V2DF_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfmina_s, LARCH_V4SF_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfmina_d, LARCH_V2DF_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfmax_s, LARCH_V4SF_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfmax_d, LARCH_V2DF_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfmaxa_s, LARCH_V4SF_FTYPE_V4SF_V4SF),
++  LSX_BUILTIN (vfmaxa_d, LARCH_V2DF_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vfclass_s, LARCH_V4SI_FTYPE_V4SF),
++  LSX_BUILTIN (vfclass_d, LARCH_V2DI_FTYPE_V2DF),
++  LSX_BUILTIN (vfsqrt_s, LARCH_V4SF_FTYPE_V4SF),
++  LSX_BUILTIN (vfsqrt_d, LARCH_V2DF_FTYPE_V2DF),
++  LSX_BUILTIN (vfrecip_s, LARCH_V4SF_FTYPE_V4SF),
++  LSX_BUILTIN (vfrecip_d, LARCH_V2DF_FTYPE_V2DF),
++  LSX_BUILTIN (vfrint_s, LARCH_V4SF_FTYPE_V4SF),
++  LSX_BUILTIN (vfrint_d, LARCH_V2DF_FTYPE_V2DF),
++  LSX_BUILTIN (vfrsqrt_s, LARCH_V4SF_FTYPE_V4SF),
++  LSX_BUILTIN (vfrsqrt_d, LARCH_V2DF_FTYPE_V2DF),
++  LSX_BUILTIN (vflogb_s, LARCH_V4SF_FTYPE_V4SF),
++  LSX_BUILTIN (vflogb_d, LARCH_V2DF_FTYPE_V2DF),
++  LSX_BUILTIN (vfcvth_s_h, LARCH_V4SF_FTYPE_V8HI),
++  LSX_BUILTIN (vfcvth_d_s, LARCH_V2DF_FTYPE_V4SF),
++  LSX_BUILTIN (vfcvtl_s_h, LARCH_V4SF_FTYPE_V8HI),
++  LSX_BUILTIN (vfcvtl_d_s, LARCH_V2DF_FTYPE_V4SF),
++  LSX_BUILTIN (vftint_w_s, LARCH_V4SI_FTYPE_V4SF),
++  LSX_BUILTIN (vftint_l_d, LARCH_V2DI_FTYPE_V2DF),
++  LSX_BUILTIN (vftint_wu_s, LARCH_UV4SI_FTYPE_V4SF),
++  LSX_BUILTIN (vftint_lu_d, LARCH_UV2DI_FTYPE_V2DF),
++  LSX_BUILTIN (vftintrz_w_s, LARCH_V4SI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrz_l_d, LARCH_V2DI_FTYPE_V2DF),
++  LSX_BUILTIN (vftintrz_wu_s, LARCH_UV4SI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrz_lu_d, LARCH_UV2DI_FTYPE_V2DF),
++  LSX_BUILTIN (vffint_s_w, LARCH_V4SF_FTYPE_V4SI),
++  LSX_BUILTIN (vffint_d_l, LARCH_V2DF_FTYPE_V2DI),
++  LSX_BUILTIN (vffint_s_wu, LARCH_V4SF_FTYPE_UV4SI),
++  LSX_BUILTIN (vffint_d_lu, LARCH_V2DF_FTYPE_UV2DI),
++
++  LSX_BUILTIN (vandn_v, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vneg_b, LARCH_V16QI_FTYPE_V16QI),
++  LSX_BUILTIN (vneg_h, LARCH_V8HI_FTYPE_V8HI),
++  LSX_BUILTIN (vneg_w, LARCH_V4SI_FTYPE_V4SI),
++  LSX_BUILTIN (vneg_d, LARCH_V2DI_FTYPE_V2DI),
++  LSX_BUILTIN (vmuh_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vmuh_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vmuh_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vmuh_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vmuh_bu, LARCH_UV16QI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vmuh_hu, LARCH_UV8HI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vmuh_wu, LARCH_UV4SI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vmuh_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vsllwil_h_b, LARCH_V8HI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vsllwil_w_h, LARCH_V4SI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vsllwil_d_w, LARCH_V2DI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vsllwil_hu_bu, LARCH_UV8HI_FTYPE_UV16QI_UQI),
++  LSX_BUILTIN (vsllwil_wu_hu, LARCH_UV4SI_FTYPE_UV8HI_UQI),
++  LSX_BUILTIN (vsllwil_du_wu, LARCH_UV2DI_FTYPE_UV4SI_UQI),
++  LSX_BUILTIN (vsran_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsran_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsran_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vssran_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vssran_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vssran_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vssran_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vssran_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vssran_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vsrarn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsrarn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsrarn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vssrarn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vssrarn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vssrarn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vssrarn_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vssrarn_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vssrarn_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vsrln_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsrln_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsrln_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vssrln_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vssrln_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vssrln_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vsrlrn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsrlrn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsrlrn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vssrlrn_bu_h, LARCH_UV16QI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vssrlrn_hu_w, LARCH_UV8HI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vssrlrn_wu_d, LARCH_UV4SI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vfrstpi_b, LARCH_V16QI_FTYPE_V16QI_V16QI_UQI),
++  LSX_BUILTIN (vfrstpi_h, LARCH_V8HI_FTYPE_V8HI_V8HI_UQI),
++  LSX_BUILTIN (vfrstp_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI),
++  LSX_BUILTIN (vfrstp_h, LARCH_V8HI_FTYPE_V8HI_V8HI_V8HI),
++  LSX_BUILTIN (vshuf4i_d, LARCH_V2DI_FTYPE_V2DI_V2DI_USI),
++  LSX_BUILTIN (vbsrl_v, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vbsll_v, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vextrins_b, LARCH_V16QI_FTYPE_V16QI_V16QI_USI),
++  LSX_BUILTIN (vextrins_h, LARCH_V8HI_FTYPE_V8HI_V8HI_USI),
++  LSX_BUILTIN (vextrins_w, LARCH_V4SI_FTYPE_V4SI_V4SI_USI),
++  LSX_BUILTIN (vextrins_d, LARCH_V2DI_FTYPE_V2DI_V2DI_USI),
++  LSX_BUILTIN (vmskltz_b, LARCH_V16QI_FTYPE_V16QI),
++  LSX_BUILTIN (vmskltz_h, LARCH_V8HI_FTYPE_V8HI),
++  LSX_BUILTIN (vmskltz_w, LARCH_V4SI_FTYPE_V4SI),
++  LSX_BUILTIN (vmskltz_d, LARCH_V2DI_FTYPE_V2DI),
++  LSX_BUILTIN (vsigncov_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vsigncov_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsigncov_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsigncov_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vfmadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF),
++  LSX_BUILTIN (vfmadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF),
++  LSX_BUILTIN (vfmsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF),
++  LSX_BUILTIN (vfmsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF),
++  LSX_BUILTIN (vfnmadd_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF),
++  LSX_BUILTIN (vfnmadd_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF),
++  LSX_BUILTIN (vfnmsub_s, LARCH_V4SF_FTYPE_V4SF_V4SF_V4SF),
++  LSX_BUILTIN (vfnmsub_d, LARCH_V2DF_FTYPE_V2DF_V2DF_V2DF),
++  LSX_BUILTIN (vftintrne_w_s, LARCH_V4SI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrne_l_d, LARCH_V2DI_FTYPE_V2DF),
++  LSX_BUILTIN (vftintrp_w_s, LARCH_V4SI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrp_l_d, LARCH_V2DI_FTYPE_V2DF),
++  LSX_BUILTIN (vftintrm_w_s, LARCH_V4SI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrm_l_d, LARCH_V2DI_FTYPE_V2DF),
++  LSX_BUILTIN (vftint_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vffint_s_l, LARCH_V4SF_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vftintrz_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vftintrp_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vftintrm_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vftintrne_w_d, LARCH_V4SI_FTYPE_V2DF_V2DF),
++  LSX_BUILTIN (vftintl_l_s, LARCH_V2DI_FTYPE_V4SF),
++  LSX_BUILTIN (vftinth_l_s, LARCH_V2DI_FTYPE_V4SF),
++  LSX_BUILTIN (vffinth_d_w, LARCH_V2DF_FTYPE_V4SI),
++  LSX_BUILTIN (vffintl_d_w, LARCH_V2DF_FTYPE_V4SI),
++  LSX_BUILTIN (vftintrzl_l_s, LARCH_V2DI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrzh_l_s, LARCH_V2DI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrpl_l_s, LARCH_V2DI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrph_l_s, LARCH_V2DI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrml_l_s, LARCH_V2DI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrmh_l_s, LARCH_V2DI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrnel_l_s, LARCH_V2DI_FTYPE_V4SF),
++  LSX_BUILTIN (vftintrneh_l_s, LARCH_V2DI_FTYPE_V4SF),
++  LSX_BUILTIN (vfrintrne_s, LARCH_V4SF_FTYPE_V4SF),
++  LSX_BUILTIN (vfrintrne_d, LARCH_V2DF_FTYPE_V2DF),
++  LSX_BUILTIN (vfrintrz_s, LARCH_V4SF_FTYPE_V4SF),
++  LSX_BUILTIN (vfrintrz_d, LARCH_V2DF_FTYPE_V2DF),
++  LSX_BUILTIN (vfrintrp_s, LARCH_V4SF_FTYPE_V4SF),
++  LSX_BUILTIN (vfrintrp_d, LARCH_V2DF_FTYPE_V2DF),
++  LSX_BUILTIN (vfrintrm_s, LARCH_V4SF_FTYPE_V4SF),
++  LSX_BUILTIN (vfrintrm_d, LARCH_V2DF_FTYPE_V2DF),
++  LSX_NO_TARGET_BUILTIN (vstelm_b, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI_UQI),
++  LSX_NO_TARGET_BUILTIN (vstelm_h, LARCH_VOID_FTYPE_V8HI_CVPOINTER_SI_UQI),
++  LSX_NO_TARGET_BUILTIN (vstelm_w, LARCH_VOID_FTYPE_V4SI_CVPOINTER_SI_UQI),
++  LSX_NO_TARGET_BUILTIN (vstelm_d, LARCH_VOID_FTYPE_V2DI_CVPOINTER_SI_UQI),
++  LSX_BUILTIN (vaddwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vaddwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vaddwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vaddwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vaddwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vaddwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vaddwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vaddwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vaddwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vaddwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vaddwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vaddwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vaddwev_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI),
++  LSX_BUILTIN (vaddwev_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI),
++  LSX_BUILTIN (vaddwev_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI),
++  LSX_BUILTIN (vaddwod_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI),
++  LSX_BUILTIN (vaddwod_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI),
++  LSX_BUILTIN (vaddwod_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI),
++  LSX_BUILTIN (vsubwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsubwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsubwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vsubwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vsubwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vsubwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vsubwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vsubwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vsubwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vsubwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vsubwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vsubwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vaddwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vaddwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vaddwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vaddwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vsubwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vsubwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vsubwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vsubwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vaddwev_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI),
++  LSX_BUILTIN (vaddwod_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI),
++
++  LSX_BUILTIN (vmulwev_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vmulwev_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vmulwev_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vmulwod_d_w, LARCH_V2DI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vmulwod_w_h, LARCH_V4SI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vmulwod_h_b, LARCH_V8HI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vmulwev_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vmulwev_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vmulwev_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vmulwod_d_wu, LARCH_V2DI_FTYPE_UV4SI_UV4SI),
++  LSX_BUILTIN (vmulwod_w_hu, LARCH_V4SI_FTYPE_UV8HI_UV8HI),
++  LSX_BUILTIN (vmulwod_h_bu, LARCH_V8HI_FTYPE_UV16QI_UV16QI),
++  LSX_BUILTIN (vmulwev_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI),
++  LSX_BUILTIN (vmulwev_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI),
++  LSX_BUILTIN (vmulwev_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI),
++  LSX_BUILTIN (vmulwod_d_wu_w, LARCH_V2DI_FTYPE_UV4SI_V4SI),
++  LSX_BUILTIN (vmulwod_w_hu_h, LARCH_V4SI_FTYPE_UV8HI_V8HI),
++  LSX_BUILTIN (vmulwod_h_bu_b, LARCH_V8HI_FTYPE_UV16QI_V16QI),
++  LSX_BUILTIN (vmulwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vmulwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vmulwev_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vmulwod_q_du, LARCH_V2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vmulwev_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI),
++  LSX_BUILTIN (vmulwod_q_du_d, LARCH_V2DI_FTYPE_UV2DI_V2DI),
++  LSX_BUILTIN (vhaddw_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vhaddw_qu_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vhsubw_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vhsubw_qu_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI),
++  LSX_BUILTIN (vmaddwev_d_w, LARCH_V2DI_FTYPE_V2DI_V4SI_V4SI),
++  LSX_BUILTIN (vmaddwev_w_h, LARCH_V4SI_FTYPE_V4SI_V8HI_V8HI),
++  LSX_BUILTIN (vmaddwev_h_b, LARCH_V8HI_FTYPE_V8HI_V16QI_V16QI),
++  LSX_BUILTIN (vmaddwev_d_wu, LARCH_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI),
++  LSX_BUILTIN (vmaddwev_w_hu, LARCH_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI),
++  LSX_BUILTIN (vmaddwev_h_bu, LARCH_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI),
++  LSX_BUILTIN (vmaddwod_d_w, LARCH_V2DI_FTYPE_V2DI_V4SI_V4SI),
++  LSX_BUILTIN (vmaddwod_w_h, LARCH_V4SI_FTYPE_V4SI_V8HI_V8HI),
++  LSX_BUILTIN (vmaddwod_h_b, LARCH_V8HI_FTYPE_V8HI_V16QI_V16QI),
++  LSX_BUILTIN (vmaddwod_d_wu, LARCH_UV2DI_FTYPE_UV2DI_UV4SI_UV4SI),
++  LSX_BUILTIN (vmaddwod_w_hu, LARCH_UV4SI_FTYPE_UV4SI_UV8HI_UV8HI),
++  LSX_BUILTIN (vmaddwod_h_bu, LARCH_UV8HI_FTYPE_UV8HI_UV16QI_UV16QI),
++  LSX_BUILTIN (vmaddwev_d_wu_w, LARCH_V2DI_FTYPE_V2DI_UV4SI_V4SI),
++  LSX_BUILTIN (vmaddwev_w_hu_h, LARCH_V4SI_FTYPE_V4SI_UV8HI_V8HI),
++  LSX_BUILTIN (vmaddwev_h_bu_b, LARCH_V8HI_FTYPE_V8HI_UV16QI_V16QI),
++  LSX_BUILTIN (vmaddwod_d_wu_w, LARCH_V2DI_FTYPE_V2DI_UV4SI_V4SI),
++  LSX_BUILTIN (vmaddwod_w_hu_h, LARCH_V4SI_FTYPE_V4SI_UV8HI_V8HI),
++  LSX_BUILTIN (vmaddwod_h_bu_b, LARCH_V8HI_FTYPE_V8HI_UV16QI_V16QI),
++  LSX_BUILTIN (vmaddwev_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI),
++  LSX_BUILTIN (vmaddwod_q_d, LARCH_V2DI_FTYPE_V2DI_V2DI_V2DI),
++  LSX_BUILTIN (vmaddwev_q_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI),
++  LSX_BUILTIN (vmaddwod_q_du, LARCH_UV2DI_FTYPE_UV2DI_UV2DI_UV2DI),
++  LSX_BUILTIN (vmaddwev_q_du_d, LARCH_V2DI_FTYPE_V2DI_UV2DI_V2DI),
++  LSX_BUILTIN (vmaddwod_q_du_d, LARCH_V2DI_FTYPE_V2DI_UV2DI_V2DI),
++  LSX_BUILTIN (vrotr_b, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vrotr_h, LARCH_V8HI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vrotr_w, LARCH_V4SI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vrotr_d, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vadd_q, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vsub_q, LARCH_V2DI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vldrepl_b, LARCH_V16QI_FTYPE_CVPOINTER_SI),
++  LSX_BUILTIN (vldrepl_h, LARCH_V8HI_FTYPE_CVPOINTER_SI),
++  LSX_BUILTIN (vldrepl_w, LARCH_V4SI_FTYPE_CVPOINTER_SI),
++  LSX_BUILTIN (vldrepl_d, LARCH_V2DI_FTYPE_CVPOINTER_SI),
++
++  LSX_BUILTIN (vmskgez_b, LARCH_V16QI_FTYPE_V16QI),
++  LSX_BUILTIN (vmsknz_b, LARCH_V16QI_FTYPE_V16QI),
++  LSX_BUILTIN (vexth_h_b, LARCH_V8HI_FTYPE_V16QI),
++  LSX_BUILTIN (vexth_w_h, LARCH_V4SI_FTYPE_V8HI),
++  LSX_BUILTIN (vexth_d_w, LARCH_V2DI_FTYPE_V4SI),
++  LSX_BUILTIN (vexth_q_d, LARCH_V2DI_FTYPE_V2DI),
++  LSX_BUILTIN (vexth_hu_bu, LARCH_UV8HI_FTYPE_UV16QI),
++  LSX_BUILTIN (vexth_wu_hu, LARCH_UV4SI_FTYPE_UV8HI),
++  LSX_BUILTIN (vexth_du_wu, LARCH_UV2DI_FTYPE_UV4SI),
++  LSX_BUILTIN (vexth_qu_du, LARCH_UV2DI_FTYPE_UV2DI),
++  LSX_BUILTIN (vrotri_b, LARCH_V16QI_FTYPE_V16QI_UQI),
++  LSX_BUILTIN (vrotri_h, LARCH_V8HI_FTYPE_V8HI_UQI),
++  LSX_BUILTIN (vrotri_w, LARCH_V4SI_FTYPE_V4SI_UQI),
++  LSX_BUILTIN (vrotri_d, LARCH_V2DI_FTYPE_V2DI_UQI),
++  LSX_BUILTIN (vextl_q_d, LARCH_V2DI_FTYPE_V2DI),
++  LSX_BUILTIN (vsrlni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI),
++  LSX_BUILTIN (vsrlni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI),
++  LSX_BUILTIN (vsrlni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI),
++  LSX_BUILTIN (vsrlni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI),
++  LSX_BUILTIN (vsrlrni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI),
++  LSX_BUILTIN (vsrlrni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI),
++  LSX_BUILTIN (vsrlrni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI),
++  LSX_BUILTIN (vsrlrni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI),
++  LSX_BUILTIN (vssrlni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI),
++  LSX_BUILTIN (vssrlni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI),
++  LSX_BUILTIN (vssrlni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI),
++  LSX_BUILTIN (vssrlni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI),
++  LSX_BUILTIN (vssrlni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI),
++  LSX_BUILTIN (vssrlni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI),
++  LSX_BUILTIN (vssrlni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI),
++  LSX_BUILTIN (vssrlni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI),
++  LSX_BUILTIN (vssrlrni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI),
++  LSX_BUILTIN (vssrlrni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI),
++  LSX_BUILTIN (vssrlrni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI),
++  LSX_BUILTIN (vssrlrni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI),
++  LSX_BUILTIN (vssrlrni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI),
++  LSX_BUILTIN (vssrlrni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI),
++  LSX_BUILTIN (vssrlrni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI),
++  LSX_BUILTIN (vssrlrni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI),
++  LSX_BUILTIN (vsrani_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI),
++  LSX_BUILTIN (vsrani_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI),
++  LSX_BUILTIN (vsrani_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI),
++  LSX_BUILTIN (vsrani_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI),
++  LSX_BUILTIN (vsrarni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI),
++  LSX_BUILTIN (vsrarni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI),
++  LSX_BUILTIN (vsrarni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI),
++  LSX_BUILTIN (vsrarni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI),
++  LSX_BUILTIN (vssrani_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI),
++  LSX_BUILTIN (vssrani_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI),
++  LSX_BUILTIN (vssrani_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI),
++  LSX_BUILTIN (vssrani_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI),
++  LSX_BUILTIN (vssrani_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI),
++  LSX_BUILTIN (vssrani_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI),
++  LSX_BUILTIN (vssrani_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI),
++  LSX_BUILTIN (vssrani_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI),
++  LSX_BUILTIN (vssrarni_b_h, LARCH_V16QI_FTYPE_V16QI_V16QI_USI),
++  LSX_BUILTIN (vssrarni_h_w, LARCH_V8HI_FTYPE_V8HI_V8HI_USI),
++  LSX_BUILTIN (vssrarni_w_d, LARCH_V4SI_FTYPE_V4SI_V4SI_USI),
++  LSX_BUILTIN (vssrarni_d_q, LARCH_V2DI_FTYPE_V2DI_V2DI_USI),
++  LSX_BUILTIN (vssrarni_bu_h, LARCH_UV16QI_FTYPE_UV16QI_V16QI_USI),
++  LSX_BUILTIN (vssrarni_hu_w, LARCH_UV8HI_FTYPE_UV8HI_V8HI_USI),
++  LSX_BUILTIN (vssrarni_wu_d, LARCH_UV4SI_FTYPE_UV4SI_V4SI_USI),
++  LSX_BUILTIN (vssrarni_du_q, LARCH_UV2DI_FTYPE_UV2DI_V2DI_USI),
++  LSX_BUILTIN (vpermi_w, LARCH_V4SI_FTYPE_V4SI_V4SI_USI),
++  LSX_BUILTIN (vld, LARCH_V16QI_FTYPE_CVPOINTER_SI),
++  LSX_NO_TARGET_BUILTIN (vst, LARCH_VOID_FTYPE_V16QI_CVPOINTER_SI),
++  LSX_BUILTIN (vssrlrn_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vssrlrn_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vssrlrn_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vssrln_b_h, LARCH_V16QI_FTYPE_V8HI_V8HI),
++  LSX_BUILTIN (vssrln_h_w, LARCH_V8HI_FTYPE_V4SI_V4SI),
++  LSX_BUILTIN (vssrln_w_d, LARCH_V4SI_FTYPE_V2DI_V2DI),
++  LSX_BUILTIN (vorn_v, LARCH_V16QI_FTYPE_V16QI_V16QI),
++  LSX_BUILTIN (vldi, LARCH_V2DI_FTYPE_HI),
++  LSX_BUILTIN (vshuf_b, LARCH_V16QI_FTYPE_V16QI_V16QI_V16QI),
++  LSX_BUILTIN (vldx, LARCH_V16QI_FTYPE_CVPOINTER_DI),
++  LSX_NO_TARGET_BUILTIN (vstx, LARCH_VOID_FTYPE_V16QI_CVPOINTER_DI),
++  LSX_BUILTIN (vextl_qu_du, LARCH_UV2DI_FTYPE_UV2DI)
+ };
+ 
+ /* Index I is the function declaration for loongarch_builtins[I], or null if
+@@ -193,11 +1219,46 @@ static GTY (()) tree loongarch_builtin_decls[ARRAY_SIZE (loongarch_builtins)];
+    using the instruction code or return null if not defined for the target.  */
+ static GTY (()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES];
+ 
++
++/* MODE is a vector mode whose elements have type TYPE.  Return the type
++   of the vector itself.  */
++
++static tree
++loongarch_builtin_vector_type (tree type, machine_mode mode)
++{
++  static tree types[2 * (int) MAX_MACHINE_MODE];
++  int mode_index;
++
++  mode_index = (int) mode;
++
++  if (TREE_CODE (type) == INTEGER_TYPE && TYPE_UNSIGNED (type))
++    mode_index += MAX_MACHINE_MODE;
++
++  if (types[mode_index] == NULL_TREE)
++    types[mode_index] = build_vector_type_for_mode (type, mode);
++  return types[mode_index];
++}
++
++/* Return a type for 'const volatile void *'.  */
++
++static tree
++loongarch_build_cvpointer_type (void)
++{
++  static tree cache;
++
++  if (cache == NULL_TREE)
++    cache = build_pointer_type (build_qualified_type (void_type_node,
++						      TYPE_QUAL_CONST
++						      | TYPE_QUAL_VOLATILE));
++  return cache;
++}
++
+ /* Source-level argument types.  */
+ #define LARCH_ATYPE_VOID void_type_node
+ #define LARCH_ATYPE_INT integer_type_node
+ #define LARCH_ATYPE_POINTER ptr_type_node
+-
++#define LARCH_ATYPE_CVPOINTER loongarch_build_cvpointer_type ()
++#define LARCH_ATYPE_BOOLEAN boolean_type_node
+ /* Standard mode-based argument types.  */
+ #define LARCH_ATYPE_QI intQI_type_node
+ #define LARCH_ATYPE_UQI unsigned_intQI_type_node
+@@ -210,6 +1271,72 @@ static GTY (()) int loongarch_get_builtin_decl_index[NUM_INSN_CODES];
+ #define LARCH_ATYPE_SF float_type_node
+ #define LARCH_ATYPE_DF double_type_node
+ 
++/* Vector argument types.  */
++#define LARCH_ATYPE_V2SF						\
++  loongarch_builtin_vector_type (float_type_node, V2SFmode)
++#define LARCH_ATYPE_V2HI						\
++  loongarch_builtin_vector_type (intHI_type_node, V2HImode)
++#define LARCH_ATYPE_V2SI						\
++  loongarch_builtin_vector_type (intSI_type_node, V2SImode)
++#define LARCH_ATYPE_V4QI						\
++  loongarch_builtin_vector_type (intQI_type_node, V4QImode)
++#define LARCH_ATYPE_V4HI						\
++  loongarch_builtin_vector_type (intHI_type_node, V4HImode)
++#define LARCH_ATYPE_V8QI						\
++  loongarch_builtin_vector_type (intQI_type_node, V8QImode)
++
++#define LARCH_ATYPE_V2DI						\
++  loongarch_builtin_vector_type (long_long_integer_type_node, V2DImode)
++#define LARCH_ATYPE_V4SI						\
++  loongarch_builtin_vector_type (intSI_type_node, V4SImode)
++#define LARCH_ATYPE_V8HI						\
++  loongarch_builtin_vector_type (intHI_type_node, V8HImode)
++#define LARCH_ATYPE_V16QI						\
++  loongarch_builtin_vector_type (intQI_type_node, V16QImode)
++#define LARCH_ATYPE_V2DF						\
++  loongarch_builtin_vector_type (double_type_node, V2DFmode)
++#define LARCH_ATYPE_V4SF						\
++  loongarch_builtin_vector_type (float_type_node, V4SFmode)
++
++/* LoongArch ASX.  */
++#define LARCH_ATYPE_V4DI						\
++  loongarch_builtin_vector_type (long_long_integer_type_node, V4DImode)
++#define LARCH_ATYPE_V8SI						\
++  loongarch_builtin_vector_type (intSI_type_node, V8SImode)
++#define LARCH_ATYPE_V16HI						\
++  loongarch_builtin_vector_type (intHI_type_node, V16HImode)
++#define LARCH_ATYPE_V32QI						\
++  loongarch_builtin_vector_type (intQI_type_node, V32QImode)
++#define LARCH_ATYPE_V4DF						\
++  loongarch_builtin_vector_type (double_type_node, V4DFmode)
++#define LARCH_ATYPE_V8SF						\
++  loongarch_builtin_vector_type (float_type_node, V8SFmode)
++
++#define LARCH_ATYPE_UV2DI					\
++  loongarch_builtin_vector_type (long_long_unsigned_type_node, V2DImode)
++#define LARCH_ATYPE_UV4SI					\
++  loongarch_builtin_vector_type (unsigned_intSI_type_node, V4SImode)
++#define LARCH_ATYPE_UV8HI					\
++  loongarch_builtin_vector_type (unsigned_intHI_type_node, V8HImode)
++#define LARCH_ATYPE_UV16QI					\
++  loongarch_builtin_vector_type (unsigned_intQI_type_node, V16QImode)
++
++#define LARCH_ATYPE_UV4DI					\
++  loongarch_builtin_vector_type (long_long_unsigned_type_node, V4DImode)
++#define LARCH_ATYPE_UV8SI					\
++  loongarch_builtin_vector_type (unsigned_intSI_type_node, V8SImode)
++#define LARCH_ATYPE_UV16HI					\
++  loongarch_builtin_vector_type (unsigned_intHI_type_node, V16HImode)
++#define LARCH_ATYPE_UV32QI					\
++  loongarch_builtin_vector_type (unsigned_intQI_type_node, V32QImode)
++
++#define LARCH_ATYPE_UV2SI					\
++  loongarch_builtin_vector_type (unsigned_intSI_type_node, V2SImode)
++#define LARCH_ATYPE_UV4HI					\
++  loongarch_builtin_vector_type (unsigned_intHI_type_node, V4HImode)
++#define LARCH_ATYPE_UV8QI					\
++  loongarch_builtin_vector_type (unsigned_intQI_type_node, V8QImode)
++
+ /* LARCH_FTYPE_ATYPESN takes N LARCH_FTYPES-like type codes and lists
+    their associated LARCH_ATYPEs.  */
+ #define LARCH_FTYPE_ATYPES1(A, B) LARCH_ATYPE_##A, LARCH_ATYPE_##B
+@@ -288,6 +1415,92 @@ loongarch_builtin_decl (unsigned int code, bool initialize_p ATTRIBUTE_UNUSED)
+   return loongarch_builtin_decls[code];
+ }
+ 
++/* Implement TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION.  */
++
++tree
++loongarch_builtin_vectorized_function (unsigned int fn, tree type_out,
++				       tree type_in)
++{
++  machine_mode in_mode, out_mode;
++  int in_n, out_n;
++
++  if (TREE_CODE (type_out) != VECTOR_TYPE
++      || TREE_CODE (type_in) != VECTOR_TYPE
++      || !ISA_HAS_LSX)
++    return NULL_TREE;
++
++  out_mode = TYPE_MODE (TREE_TYPE (type_out));
++  out_n = TYPE_VECTOR_SUBPARTS (type_out);
++  in_mode = TYPE_MODE (TREE_TYPE (type_in));
++  in_n = TYPE_VECTOR_SUBPARTS (type_in);
++
++  /* INSN is the name of the associated instruction pattern, without
++     the leading CODE_FOR_.  */
++#define LARCH_GET_BUILTIN(INSN) \
++  loongarch_builtin_decls[loongarch_get_builtin_decl_index[CODE_FOR_##INSN]]
++
++  switch (fn)
++    {
++    CASE_CFN_CEIL:
++      if (out_mode == DFmode && in_mode == DFmode)
++    {
++      if (out_n == 2 && in_n == 2)
++	return LARCH_GET_BUILTIN (lsx_vfrintrp_d);
++    }
++      if (out_mode == SFmode && in_mode == SFmode)
++    {
++      if (out_n == 4 && in_n == 4)
++	return LARCH_GET_BUILTIN (lsx_vfrintrp_s);
++    }
++      break;
++
++    CASE_CFN_TRUNC:
++      if (out_mode == DFmode && in_mode == DFmode)
++    {
++      if (out_n == 2 && in_n == 2)
++	return LARCH_GET_BUILTIN (lsx_vfrintrz_d);
++    }
++      if (out_mode == SFmode && in_mode == SFmode)
++    {
++      if (out_n == 4 && in_n == 4)
++	return LARCH_GET_BUILTIN (lsx_vfrintrz_s);
++    }
++      break;
++
++    CASE_CFN_RINT:
++    CASE_CFN_ROUND:
++      if (out_mode == DFmode && in_mode == DFmode)
++    {
++      if (out_n == 2 && in_n == 2)
++	return LARCH_GET_BUILTIN (lsx_vfrint_d);
++    }
++      if (out_mode == SFmode && in_mode == SFmode)
++    {
++      if (out_n == 4 && in_n == 4)
++	return LARCH_GET_BUILTIN (lsx_vfrint_s);
++    }
++      break;
++
++    CASE_CFN_FLOOR:
++      if (out_mode == DFmode && in_mode == DFmode)
++    {
++      if (out_n == 2 && in_n == 2)
++	return LARCH_GET_BUILTIN (lsx_vfrintrm_d);
++    }
++      if (out_mode == SFmode && in_mode == SFmode)
++    {
++      if (out_n == 4 && in_n == 4)
++	return LARCH_GET_BUILTIN (lsx_vfrintrm_s);
++    }
++      break;
++
++    default:
++      break;
++    }
++
++  return NULL_TREE;
++}
++
+ /* Take argument ARGNO from EXP's argument list and convert it into
+    an expand operand.  Store the operand in *OP.  */
+ 
+@@ -323,7 +1536,236 @@ static rtx
+ loongarch_expand_builtin_insn (enum insn_code icode, unsigned int nops,
+ 			       struct expand_operand *ops, bool has_target_p)
+ {
+-  if (!maybe_expand_insn (icode, nops, ops))
++  machine_mode imode;
++  int rangelo = 0, rangehi = 0, error_opno = 0;
++
++  switch (icode)
++    {
++    case CODE_FOR_lsx_vaddi_bu:
++    case CODE_FOR_lsx_vaddi_hu:
++    case CODE_FOR_lsx_vaddi_wu:
++    case CODE_FOR_lsx_vaddi_du:
++    case CODE_FOR_lsx_vslti_bu:
++    case CODE_FOR_lsx_vslti_hu:
++    case CODE_FOR_lsx_vslti_wu:
++    case CODE_FOR_lsx_vslti_du:
++    case CODE_FOR_lsx_vslei_bu:
++    case CODE_FOR_lsx_vslei_hu:
++    case CODE_FOR_lsx_vslei_wu:
++    case CODE_FOR_lsx_vslei_du:
++    case CODE_FOR_lsx_vmaxi_bu:
++    case CODE_FOR_lsx_vmaxi_hu:
++    case CODE_FOR_lsx_vmaxi_wu:
++    case CODE_FOR_lsx_vmaxi_du:
++    case CODE_FOR_lsx_vmini_bu:
++    case CODE_FOR_lsx_vmini_hu:
++    case CODE_FOR_lsx_vmini_wu:
++    case CODE_FOR_lsx_vmini_du:
++    case CODE_FOR_lsx_vsubi_bu:
++    case CODE_FOR_lsx_vsubi_hu:
++    case CODE_FOR_lsx_vsubi_wu:
++    case CODE_FOR_lsx_vsubi_du:
++      gcc_assert (has_target_p && nops == 3);
++      /* We only generate a vector of constants iff the second argument
++	 is an immediate.  We also validate the range of the immediate.  */
++      if (CONST_INT_P (ops[2].value))
++	{
++	  rangelo = 0;
++	  rangehi = 31;
++	  if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi))
++	    {
++	      ops[2].mode = ops[0].mode;
++	      ops[2].value = loongarch_gen_const_int_vector (ops[2].mode,
++							     INTVAL (ops[2].value));
++	    }
++	  else
++	    error_opno = 2;
++	}
++      break;
++
++    case CODE_FOR_lsx_vseqi_b:
++    case CODE_FOR_lsx_vseqi_h:
++    case CODE_FOR_lsx_vseqi_w:
++    case CODE_FOR_lsx_vseqi_d:
++    case CODE_FOR_lsx_vslti_b:
++    case CODE_FOR_lsx_vslti_h:
++    case CODE_FOR_lsx_vslti_w:
++    case CODE_FOR_lsx_vslti_d:
++    case CODE_FOR_lsx_vslei_b:
++    case CODE_FOR_lsx_vslei_h:
++    case CODE_FOR_lsx_vslei_w:
++    case CODE_FOR_lsx_vslei_d:
++    case CODE_FOR_lsx_vmaxi_b:
++    case CODE_FOR_lsx_vmaxi_h:
++    case CODE_FOR_lsx_vmaxi_w:
++    case CODE_FOR_lsx_vmaxi_d:
++    case CODE_FOR_lsx_vmini_b:
++    case CODE_FOR_lsx_vmini_h:
++    case CODE_FOR_lsx_vmini_w:
++    case CODE_FOR_lsx_vmini_d:
++      gcc_assert (has_target_p && nops == 3);
++      /* We only generate a vector of constants iff the second argument
++	 is an immediate.  We also validate the range of the immediate.  */
++      if (CONST_INT_P (ops[2].value))
++	{
++	  rangelo = -16;
++	  rangehi = 15;
++	  if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi))
++	    {
++	      ops[2].mode = ops[0].mode;
++	      ops[2].value = loongarch_gen_const_int_vector (ops[2].mode,
++							     INTVAL (ops[2].value));
++	    }
++	  else
++	    error_opno = 2;
++	}
++      break;
++
++    case CODE_FOR_lsx_vandi_b:
++    case CODE_FOR_lsx_vori_b:
++    case CODE_FOR_lsx_vnori_b:
++    case CODE_FOR_lsx_vxori_b:
++      gcc_assert (has_target_p && nops == 3);
++      if (!CONST_INT_P (ops[2].value))
++	break;
++      ops[2].mode = ops[0].mode;
++      ops[2].value = loongarch_gen_const_int_vector (ops[2].mode,
++						     INTVAL (ops[2].value));
++      break;
++
++    case CODE_FOR_lsx_vbitseli_b:
++      gcc_assert (has_target_p && nops == 4);
++      if (!CONST_INT_P (ops[3].value))
++	break;
++      ops[3].mode = ops[0].mode;
++      ops[3].value = loongarch_gen_const_int_vector (ops[3].mode,
++						     INTVAL (ops[3].value));
++      break;
++
++    case CODE_FOR_lsx_vreplgr2vr_b:
++    case CODE_FOR_lsx_vreplgr2vr_h:
++    case CODE_FOR_lsx_vreplgr2vr_w:
++    case CODE_FOR_lsx_vreplgr2vr_d:
++      /* Map the built-ins to vector fill operations.  We need fix up the mode
++	 for the element being inserted.  */
++      gcc_assert (has_target_p && nops == 2);
++      imode = GET_MODE_INNER (ops[0].mode);
++      ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode);
++      ops[1].mode = imode;
++      break;
++
++    case CODE_FOR_lsx_vilvh_b:
++    case CODE_FOR_lsx_vilvh_h:
++    case CODE_FOR_lsx_vilvh_w:
++    case CODE_FOR_lsx_vilvh_d:
++    case CODE_FOR_lsx_vilvl_b:
++    case CODE_FOR_lsx_vilvl_h:
++    case CODE_FOR_lsx_vilvl_w:
++    case CODE_FOR_lsx_vilvl_d:
++    case CODE_FOR_lsx_vpackev_b:
++    case CODE_FOR_lsx_vpackev_h:
++    case CODE_FOR_lsx_vpackev_w:
++    case CODE_FOR_lsx_vpackod_b:
++    case CODE_FOR_lsx_vpackod_h:
++    case CODE_FOR_lsx_vpackod_w:
++    case CODE_FOR_lsx_vpickev_b:
++    case CODE_FOR_lsx_vpickev_h:
++    case CODE_FOR_lsx_vpickev_w:
++    case CODE_FOR_lsx_vpickod_b:
++    case CODE_FOR_lsx_vpickod_h:
++    case CODE_FOR_lsx_vpickod_w:
++      /* Swap the operands 1 and 2 for interleave operations.  Built-ins follow
++	 convention of ISA, which have op1 as higher component and op2 as lower
++	 component.  However, the VEC_PERM op in tree and vec_concat in RTL
++	 expects first operand to be lower component, because of which this
++	 swap is needed for builtins.  */
++      gcc_assert (has_target_p && nops == 3);
++      std::swap (ops[1], ops[2]);
++      break;
++
++    case CODE_FOR_lsx_vslli_b:
++    case CODE_FOR_lsx_vslli_h:
++    case CODE_FOR_lsx_vslli_w:
++    case CODE_FOR_lsx_vslli_d:
++    case CODE_FOR_lsx_vsrai_b:
++    case CODE_FOR_lsx_vsrai_h:
++    case CODE_FOR_lsx_vsrai_w:
++    case CODE_FOR_lsx_vsrai_d:
++    case CODE_FOR_lsx_vsrli_b:
++    case CODE_FOR_lsx_vsrli_h:
++    case CODE_FOR_lsx_vsrli_w:
++    case CODE_FOR_lsx_vsrli_d:
++      gcc_assert (has_target_p && nops == 3);
++      if (CONST_INT_P (ops[2].value))
++	{
++	  rangelo = 0;
++	  rangehi = GET_MODE_UNIT_BITSIZE (ops[0].mode) - 1;
++	  if (IN_RANGE (INTVAL (ops[2].value), rangelo, rangehi))
++	    {
++	      ops[2].mode = ops[0].mode;
++	      ops[2].value = loongarch_gen_const_int_vector (ops[2].mode,
++							     INTVAL (ops[2].value));
++	    }
++	  else
++	    error_opno = 2;
++	}
++      break;
++
++    case CODE_FOR_lsx_vinsgr2vr_b:
++    case CODE_FOR_lsx_vinsgr2vr_h:
++    case CODE_FOR_lsx_vinsgr2vr_w:
++    case CODE_FOR_lsx_vinsgr2vr_d:
++      /* Map the built-ins to insert operations.  We need to swap operands,
++	 fix up the mode for the element being inserted, and generate
++	 a bit mask for vec_merge.  */
++      gcc_assert (has_target_p && nops == 4);
++      std::swap (ops[1], ops[2]);
++      imode = GET_MODE_INNER (ops[0].mode);
++      ops[1].value = lowpart_subreg (imode, ops[1].value, ops[1].mode);
++      ops[1].mode = imode;
++      rangelo = 0;
++      rangehi = GET_MODE_NUNITS (ops[0].mode) - 1;
++      if (CONST_INT_P (ops[3].value)
++	  && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi))
++	ops[3].value = GEN_INT (1 << INTVAL (ops[3].value));
++      else
++	error_opno = 2;
++      break;
++
++      /* Map the built-ins to element insert operations.  We need to swap
++	 operands and generate a bit mask.  */
++      gcc_assert (has_target_p && nops == 4);
++      std::swap (ops[1], ops[2]);
++      std::swap (ops[1], ops[3]);
++      rangelo = 0;
++      rangehi = GET_MODE_NUNITS (ops[0].mode) - 1;
++      if (CONST_INT_P (ops[3].value)
++	  && IN_RANGE (INTVAL (ops[3].value), rangelo, rangehi))
++	ops[3].value = GEN_INT (1 << INTVAL (ops[3].value));
++      else
++	error_opno = 2;
++      break;
++
++    case CODE_FOR_lsx_vshuf4i_b:
++    case CODE_FOR_lsx_vshuf4i_h:
++    case CODE_FOR_lsx_vshuf4i_w:
++    case CODE_FOR_lsx_vshuf4i_w_f:
++      gcc_assert (has_target_p && nops == 3);
++      ops[2].value = loongarch_gen_const_int_vector_shuffle (ops[0].mode,
++							     INTVAL (ops[2].value));
++      break;
++
++    default:
++      break;
++  }
++
++  if (error_opno != 0)
++    {
++      error ("argument %d to the built-in must be a constant"
++	     " in range %d to %d", error_opno, rangelo, rangehi);
++      return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx;
++    }
++  else if (!maybe_expand_insn (icode, nops, ops))
+     {
+       error ("invalid argument to built-in function");
+       return has_target_p ? gen_reg_rtx (ops[0].mode) : const0_rtx;
+@@ -357,6 +1799,50 @@ loongarch_expand_builtin_direct (enum insn_code icode, rtx target, tree exp,
+   return loongarch_expand_builtin_insn (icode, opno, ops, has_target_p);
+ }
+ 
++/* Expand an LSX built-in for a compare and branch instruction specified by
++   ICODE, set a general-purpose register to 1 if the branch was taken,
++   0 otherwise.  */
++
++static rtx
++loongarch_expand_builtin_lsx_test_branch (enum insn_code icode, tree exp)
++{
++  struct expand_operand ops[3];
++  rtx_insn *cbranch;
++  rtx_code_label *true_label, *done_label;
++  rtx cmp_result;
++
++  true_label = gen_label_rtx ();
++  done_label = gen_label_rtx ();
++
++  create_input_operand (&ops[0], true_label, TYPE_MODE (TREE_TYPE (exp)));
++  loongarch_prepare_builtin_arg (&ops[1], exp, 0);
++  create_fixed_operand (&ops[2], const0_rtx);
++
++  /* Make sure that the operand 1 is a REG.  */
++  if (GET_CODE (ops[1].value) != REG)
++    ops[1].value = force_reg (ops[1].mode, ops[1].value);
++
++  if ((cbranch = maybe_gen_insn (icode, 3, ops)) == NULL_RTX)
++    error ("failed to expand built-in function");
++
++  cmp_result = gen_reg_rtx (SImode);
++
++  /* First assume that CMP_RESULT is false.  */
++  loongarch_emit_move (cmp_result, const0_rtx);
++
++  /* Branch to TRUE_LABEL if CBRANCH is taken and DONE_LABEL otherwise.  */
++  emit_jump_insn (cbranch);
++  emit_jump_insn (gen_jump (done_label));
++  emit_barrier ();
++
++  /* Set CMP_RESULT to true if the branch was taken.  */
++  emit_label (true_label);
++  loongarch_emit_move (cmp_result, const1_rtx);
++
++  emit_label (done_label);
++  return cmp_result;
++}
++
+ /* Implement TARGET_EXPAND_BUILTIN.  */
+ 
+ rtx
+@@ -377,10 +1863,14 @@ loongarch_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
+   switch (d->builtin_type)
+     {
+     case LARCH_BUILTIN_DIRECT:
++    case LARCH_BUILTIN_LSX:
+       return loongarch_expand_builtin_direct (d->icode, target, exp, true);
+ 
+     case LARCH_BUILTIN_DIRECT_NO_TARGET:
+       return loongarch_expand_builtin_direct (d->icode, target, exp, false);
++
++    case LARCH_BUILTIN_LSX_TEST_BRANCH:
++      return loongarch_expand_builtin_lsx_test_branch (d->icode, exp);
+     }
+   gcc_unreachable ();
+ }
+diff --git a/gcc/config/loongarch/loongarch-ftypes.def b/gcc/config/loongarch/loongarch-ftypes.def
+index 2babff414..2b0d50892 100644
+--- a/gcc/config/loongarch/loongarch-ftypes.def
++++ b/gcc/config/loongarch/loongarch-ftypes.def
+@@ -32,7 +32,7 @@ along with GCC; see the file COPYING3.  If not see
+       INT for integer_type_node
+       POINTER for ptr_type_node
+ 
+-   (we don't use PTR because that's a ANSI-compatibillity macro).
++   (we don't use PTR because that's a ANSI-compatibility macro).
+ 
+    Please keep this list lexicographically sorted by the LIST argument.  */
+ 
+@@ -63,3 +63,396 @@ DEF_LARCH_FTYPE (3, (VOID, USI, USI, SI))
+ DEF_LARCH_FTYPE (3, (VOID, USI, UDI, SI))
+ DEF_LARCH_FTYPE (3, (USI, USI, USI, USI))
+ DEF_LARCH_FTYPE (3, (UDI, UDI, UDI, USI))
++
++DEF_LARCH_FTYPE (1, (DF, DF))
++DEF_LARCH_FTYPE (2, (DF, DF, DF))
++DEF_LARCH_FTYPE (1, (DF, V2DF))
++
++DEF_LARCH_FTYPE (1, (DI, DI))
++DEF_LARCH_FTYPE (1, (DI, SI))
++DEF_LARCH_FTYPE (1, (DI, UQI))
++DEF_LARCH_FTYPE (2, (DI, DI, DI))
++DEF_LARCH_FTYPE (2, (DI, DI, SI))
++DEF_LARCH_FTYPE (3, (DI, DI, SI, SI))
++DEF_LARCH_FTYPE (3, (DI, DI, USI, USI))
++DEF_LARCH_FTYPE (3, (DI, DI, DI, QI))
++DEF_LARCH_FTYPE (3, (DI, DI, V2HI, V2HI))
++DEF_LARCH_FTYPE (3, (DI, DI, V4QI, V4QI))
++DEF_LARCH_FTYPE (2, (DI, POINTER, SI))
++DEF_LARCH_FTYPE (2, (DI, SI, SI))
++DEF_LARCH_FTYPE (2, (DI, USI, USI))
++
++DEF_LARCH_FTYPE (2, (DI, V2DI, UQI))
++
++DEF_LARCH_FTYPE (2, (INT, DF, DF))
++DEF_LARCH_FTYPE (2, (INT, SF, SF))
++
++DEF_LARCH_FTYPE (2, (INT, V2SF, V2SF))
++DEF_LARCH_FTYPE (4, (INT, V2SF, V2SF, V2SF, V2SF))
++
++DEF_LARCH_FTYPE (1, (SF, SF))
++DEF_LARCH_FTYPE (2, (SF, SF, SF))
++DEF_LARCH_FTYPE (1, (SF, V2SF))
++DEF_LARCH_FTYPE (1, (SF, V4SF))
++
++DEF_LARCH_FTYPE (2, (SI, POINTER, SI))
++DEF_LARCH_FTYPE (1, (SI, SI))
++DEF_LARCH_FTYPE (1, (SI, UDI))
++DEF_LARCH_FTYPE (2, (QI, QI, QI))
++DEF_LARCH_FTYPE (2, (HI, HI, HI))
++DEF_LARCH_FTYPE (3, (SI, SI, SI, SI))
++DEF_LARCH_FTYPE (3, (SI, SI, SI, QI))
++DEF_LARCH_FTYPE (1, (SI, UQI))
++DEF_LARCH_FTYPE (1, (SI, UV16QI))
++DEF_LARCH_FTYPE (1, (SI, UV2DI))
++DEF_LARCH_FTYPE (1, (SI, UV4SI))
++DEF_LARCH_FTYPE (1, (SI, UV8HI))
++DEF_LARCH_FTYPE (2, (SI, V16QI, UQI))
++DEF_LARCH_FTYPE (1, (SI, V2HI))
++DEF_LARCH_FTYPE (2, (SI, V2HI, V2HI))
++DEF_LARCH_FTYPE (1, (SI, V4QI))
++DEF_LARCH_FTYPE (2, (SI, V4QI, V4QI))
++DEF_LARCH_FTYPE (2, (SI, V4SI, UQI))
++DEF_LARCH_FTYPE (2, (SI, V8HI, UQI))
++DEF_LARCH_FTYPE (1, (SI, VOID))
++
++DEF_LARCH_FTYPE (2, (UDI, UDI, UDI))
++DEF_LARCH_FTYPE (2, (UDI, UV2SI, UV2SI))
++DEF_LARCH_FTYPE (2, (UDI, V2DI, UQI))
++
++DEF_LARCH_FTYPE (2, (USI, V16QI, UQI))
++DEF_LARCH_FTYPE (2, (USI, V4SI, UQI))
++DEF_LARCH_FTYPE (2, (USI, V8HI, UQI))
++DEF_LARCH_FTYPE (1, (USI, VOID))
++
++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UQI))
++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, USI))
++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, UV16QI))
++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, UQI))
++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, USI))
++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, UV16QI, UV16QI))
++DEF_LARCH_FTYPE (2, (UV16QI, UV16QI, V16QI))
++
++DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, UQI))
++DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, UV2DI))
++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV2DI, UQI))
++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV2DI, UV2DI))
++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV4SI, UV4SI))
++DEF_LARCH_FTYPE (2, (UV2DI, UV2DI, V2DI))
++DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UV4SI))
++DEF_LARCH_FTYPE (1, (UV2DI, V2DF))
++
++DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UQI))
++DEF_LARCH_FTYPE (2, (UV2SI, UV2SI, UV2SI))
++
++DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, UQI))
++DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, USI))
++DEF_LARCH_FTYPE (2, (UV4HI, UV4HI, UV4HI))
++DEF_LARCH_FTYPE (3, (UV4HI, UV4HI, UV4HI, UQI))
++DEF_LARCH_FTYPE (3, (UV4HI, UV4HI, UV4HI, USI))
++DEF_LARCH_FTYPE (1, (UV4HI, UV8QI))
++DEF_LARCH_FTYPE (2, (UV4HI, UV8QI, UV8QI))
++
++DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, UQI))
++DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, UV4SI))
++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV4SI, UQI))
++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV4SI, UV4SI))
++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV8HI, UV8HI))
++DEF_LARCH_FTYPE (2, (UV4SI, UV4SI, V4SI))
++DEF_LARCH_FTYPE (2, (UV4SI, UV8HI, UV8HI))
++DEF_LARCH_FTYPE (1, (UV4SI, V4SF))
++
++DEF_LARCH_FTYPE (2, (UV8HI, UV16QI, UV16QI))
++DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, UQI))
++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV16QI, UV16QI))
++DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, UV8HI))
++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UQI))
++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, UV8HI, UV8HI))
++DEF_LARCH_FTYPE (2, (UV8HI, UV8HI, V8HI))
++
++
++
++DEF_LARCH_FTYPE (2, (UV8QI, UV4HI, UV4HI))
++DEF_LARCH_FTYPE (1, (UV8QI, UV8QI))
++DEF_LARCH_FTYPE (2, (UV8QI, UV8QI, UV8QI))
++
++DEF_LARCH_FTYPE (2, (V16QI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (2, (V16QI, CVPOINTER, DI))
++DEF_LARCH_FTYPE (1, (V16QI, HI))
++DEF_LARCH_FTYPE (1, (V16QI, SI))
++DEF_LARCH_FTYPE (2, (V16QI, UV16QI, UQI))
++DEF_LARCH_FTYPE (2, (V16QI, UV16QI, UV16QI))
++DEF_LARCH_FTYPE (1, (V16QI, V16QI))
++DEF_LARCH_FTYPE (2, (V16QI, V16QI, QI))
++DEF_LARCH_FTYPE (2, (V16QI, V16QI, SI))
++DEF_LARCH_FTYPE (2, (V16QI, V16QI, USI))
++DEF_LARCH_FTYPE (2, (V16QI, V16QI, UQI))
++DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, SI))
++DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, V16QI))
++DEF_LARCH_FTYPE (2, (V16QI, V16QI, V16QI))
++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, SI))
++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, UQI))
++DEF_LARCH_FTYPE (4, (V16QI, V16QI, V16QI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, USI))
++DEF_LARCH_FTYPE (3, (V16QI, V16QI, V16QI, V16QI))
++
++
++DEF_LARCH_FTYPE (1, (V2DF, DF))
++DEF_LARCH_FTYPE (1, (V2DF, UV2DI))
++DEF_LARCH_FTYPE (1, (V2DF, V2DF))
++DEF_LARCH_FTYPE (2, (V2DF, V2DF, V2DF))
++DEF_LARCH_FTYPE (3, (V2DF, V2DF, V2DF, V2DF))
++DEF_LARCH_FTYPE (2, (V2DF, V2DF, V2DI))
++DEF_LARCH_FTYPE (1, (V2DF, V2DI))
++DEF_LARCH_FTYPE (1, (V2DF, V4SF))
++DEF_LARCH_FTYPE (1, (V2DF, V4SI))
++
++DEF_LARCH_FTYPE (2, (V2DI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (1, (V2DI, DI))
++DEF_LARCH_FTYPE (1, (V2DI, HI))
++DEF_LARCH_FTYPE (2, (V2DI, UV2DI, UQI))
++DEF_LARCH_FTYPE (2, (V2DI, UV2DI, UV2DI))
++DEF_LARCH_FTYPE (2, (V2DI, UV4SI, UV4SI))
++DEF_LARCH_FTYPE (1, (V2DI, V2DF))
++DEF_LARCH_FTYPE (2, (V2DI, V2DF, V2DF))
++DEF_LARCH_FTYPE (1, (V2DI, V2DI))
++DEF_LARCH_FTYPE (1, (UV2DI, UV2DI))
++DEF_LARCH_FTYPE (2, (V2DI, V2DI, QI))
++DEF_LARCH_FTYPE (2, (V2DI, V2DI, SI))
++DEF_LARCH_FTYPE (2, (V2DI, V2DI, UQI))
++DEF_LARCH_FTYPE (2, (V2DI, V2DI, USI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, DI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, V2DI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV4SI, UV4SI))
++DEF_LARCH_FTYPE (2, (V2DI, V2DI, V2DI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, SI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, UQI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, USI))
++DEF_LARCH_FTYPE (4, (V2DI, V2DI, V2DI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V2DI, V2DI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V4SI, V4SI))
++DEF_LARCH_FTYPE (2, (V2DI, V4SI, V4SI))
++
++DEF_LARCH_FTYPE (1, (V2HI, SI))
++DEF_LARCH_FTYPE (2, (V2HI, SI, SI))
++DEF_LARCH_FTYPE (3, (V2HI, SI, SI, SI))
++DEF_LARCH_FTYPE (1, (V2HI, V2HI))
++DEF_LARCH_FTYPE (2, (V2HI, V2HI, SI))
++DEF_LARCH_FTYPE (2, (V2HI, V2HI, V2HI))
++DEF_LARCH_FTYPE (1, (V2HI, V4QI))
++DEF_LARCH_FTYPE (2, (V2HI, V4QI, V2HI))
++
++DEF_LARCH_FTYPE (2, (V2SF, SF, SF))
++DEF_LARCH_FTYPE (1, (V2SF, V2SF))
++DEF_LARCH_FTYPE (2, (V2SF, V2SF, V2SF))
++DEF_LARCH_FTYPE (3, (V2SF, V2SF, V2SF, INT))
++DEF_LARCH_FTYPE (4, (V2SF, V2SF, V2SF, V2SF, V2SF))
++
++DEF_LARCH_FTYPE (2, (V2SI, V2SI, UQI))
++DEF_LARCH_FTYPE (2, (V2SI, V2SI, V2SI))
++DEF_LARCH_FTYPE (2, (V2SI, V4HI, V4HI))
++
++DEF_LARCH_FTYPE (2, (V4HI, V2SI, V2SI))
++DEF_LARCH_FTYPE (2, (V4HI, V4HI, UQI))
++DEF_LARCH_FTYPE (2, (V4HI, V4HI, USI))
++DEF_LARCH_FTYPE (2, (V4HI, V4HI, V4HI))
++DEF_LARCH_FTYPE (3, (V4HI, V4HI, V4HI, UQI))
++DEF_LARCH_FTYPE (3, (V4HI, V4HI, V4HI, USI))
++
++DEF_LARCH_FTYPE (1, (V4QI, SI))
++DEF_LARCH_FTYPE (2, (V4QI, V2HI, V2HI))
++DEF_LARCH_FTYPE (1, (V4QI, V4QI))
++DEF_LARCH_FTYPE (2, (V4QI, V4QI, SI))
++DEF_LARCH_FTYPE (2, (V4QI, V4QI, V4QI))
++
++DEF_LARCH_FTYPE (1, (V4SF, SF))
++DEF_LARCH_FTYPE (1, (V4SF, UV4SI))
++DEF_LARCH_FTYPE (2, (V4SF, V2DF, V2DF))
++DEF_LARCH_FTYPE (1, (V4SF, V4SF))
++DEF_LARCH_FTYPE (2, (V4SF, V4SF, V4SF))
++DEF_LARCH_FTYPE (3, (V4SF, V4SF, V4SF, V4SF))
++DEF_LARCH_FTYPE (2, (V4SF, V4SF, V4SI))
++DEF_LARCH_FTYPE (1, (V4SF, V4SI))
++DEF_LARCH_FTYPE (1, (V4SF, V8HI))
++
++DEF_LARCH_FTYPE (2, (V4SI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (1, (V4SI, HI))
++DEF_LARCH_FTYPE (1, (V4SI, SI))
++DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UQI))
++DEF_LARCH_FTYPE (2, (V4SI, UV4SI, UV4SI))
++DEF_LARCH_FTYPE (2, (V4SI, UV8HI, UV8HI))
++DEF_LARCH_FTYPE (2, (V4SI, V2DF, V2DF))
++DEF_LARCH_FTYPE (1, (V4SI, V4SF))
++DEF_LARCH_FTYPE (2, (V4SI, V4SF, V4SF))
++DEF_LARCH_FTYPE (1, (V4SI, V4SI))
++DEF_LARCH_FTYPE (2, (V4SI, V4SI, QI))
++DEF_LARCH_FTYPE (2, (V4SI, V4SI, SI))
++DEF_LARCH_FTYPE (2, (V4SI, V4SI, UQI))
++DEF_LARCH_FTYPE (2, (V4SI, V4SI, USI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, SI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, V4SI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV8HI, UV8HI))
++DEF_LARCH_FTYPE (2, (V4SI, V4SI, V4SI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, SI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, UQI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, USI))
++DEF_LARCH_FTYPE (4, (V4SI, V4SI, V4SI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V4SI, V4SI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V8HI, V8HI))
++DEF_LARCH_FTYPE (2, (V4SI, V8HI, V8HI))
++
++DEF_LARCH_FTYPE (2, (V8HI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (1, (V8HI, HI))
++DEF_LARCH_FTYPE (1, (V8HI, SI))
++DEF_LARCH_FTYPE (2, (V8HI, UV16QI, UV16QI))
++DEF_LARCH_FTYPE (2, (V8HI, UV8HI, UQI))
++DEF_LARCH_FTYPE (2, (V8HI, UV8HI, UV8HI))
++DEF_LARCH_FTYPE (2, (V8HI, V16QI, V16QI))
++DEF_LARCH_FTYPE (2, (V8HI, V4SF, V4SF))
++DEF_LARCH_FTYPE (1, (V8HI, V8HI))
++DEF_LARCH_FTYPE (2, (V8HI, V8HI, QI))
++DEF_LARCH_FTYPE (2, (V8HI, V8HI, SI))
++DEF_LARCH_FTYPE (3, (V8HI, V8HI, SI, UQI))
++DEF_LARCH_FTYPE (2, (V8HI, V8HI, UQI))
++DEF_LARCH_FTYPE (2, (V8HI, V8HI, USI))
++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, SI))
++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, V8HI))
++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UV16QI, UV16QI))
++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V16QI, V16QI))
++DEF_LARCH_FTYPE (2, (V8HI, V8HI, V8HI))
++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, SI))
++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, UQI))
++DEF_LARCH_FTYPE (4, (V8HI, V8HI, V8HI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, USI))
++DEF_LARCH_FTYPE (3, (V8HI, V8HI, V8HI, V8HI))
++
++DEF_LARCH_FTYPE (2, (V8QI, V4HI, V4HI))
++DEF_LARCH_FTYPE (1, (V8QI, V8QI))
++DEF_LARCH_FTYPE (2, (V8QI, V8QI, V8QI))
++
++DEF_LARCH_FTYPE (2, (VOID, SI, CVPOINTER))
++DEF_LARCH_FTYPE (2, (VOID, SI, SI))
++DEF_LARCH_FTYPE (2, (VOID, UQI, SI))
++DEF_LARCH_FTYPE (2, (VOID, USI, UQI))
++DEF_LARCH_FTYPE (1, (VOID, UHI))
++DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (3, (VOID, V16QI, CVPOINTER, DI))
++DEF_LARCH_FTYPE (3, (VOID, V2DF, POINTER, SI))
++DEF_LARCH_FTYPE (3, (VOID, V2DI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (2, (VOID, V2HI, V2HI))
++DEF_LARCH_FTYPE (2, (VOID, V4QI, V4QI))
++DEF_LARCH_FTYPE (3, (VOID, V4SF, POINTER, SI))
++DEF_LARCH_FTYPE (3, (VOID, V4SI, CVPOINTER, SI))
++DEF_LARCH_FTYPE (3, (VOID, V8HI, CVPOINTER, SI))
++
++DEF_LARCH_FTYPE (1, (V8HI, V16QI))
++DEF_LARCH_FTYPE (1, (V4SI, V16QI))
++DEF_LARCH_FTYPE (1, (V2DI, V16QI))
++DEF_LARCH_FTYPE (1, (V4SI, V8HI))
++DEF_LARCH_FTYPE (1, (V2DI, V8HI))
++DEF_LARCH_FTYPE (1, (V2DI, V4SI))
++DEF_LARCH_FTYPE (1, (UV8HI, V16QI))
++DEF_LARCH_FTYPE (1, (UV4SI, V16QI))
++DEF_LARCH_FTYPE (1, (UV2DI, V16QI))
++DEF_LARCH_FTYPE (1, (UV4SI, V8HI))
++DEF_LARCH_FTYPE (1, (UV2DI, V8HI))
++DEF_LARCH_FTYPE (1, (UV2DI, V4SI))
++DEF_LARCH_FTYPE (1, (UV8HI, UV16QI))
++DEF_LARCH_FTYPE (1, (UV4SI, UV16QI))
++DEF_LARCH_FTYPE (1, (UV2DI, UV16QI))
++DEF_LARCH_FTYPE (1, (UV4SI, UV8HI))
++DEF_LARCH_FTYPE (1, (UV2DI, UV8HI))
++DEF_LARCH_FTYPE (1, (UV2DI, UV4SI))
++DEF_LARCH_FTYPE (2, (UV8HI, V16QI, V16QI))
++DEF_LARCH_FTYPE (2, (UV4SI, V8HI, V8HI))
++DEF_LARCH_FTYPE (2, (UV2DI, V4SI, V4SI))
++DEF_LARCH_FTYPE (2, (V8HI, V16QI, UQI))
++DEF_LARCH_FTYPE (2, (V4SI, V8HI, UQI))
++DEF_LARCH_FTYPE (2, (V2DI, V4SI, UQI))
++DEF_LARCH_FTYPE (2, (UV8HI, UV16QI, UQI))
++DEF_LARCH_FTYPE (2, (UV4SI, UV8HI, UQI))
++DEF_LARCH_FTYPE (2, (UV2DI, UV4SI, UQI))
++DEF_LARCH_FTYPE (2, (V16QI, V8HI, V8HI))
++DEF_LARCH_FTYPE (2, (V8HI, V4SI, V4SI))
++DEF_LARCH_FTYPE (2, (V4SI, V2DI, V2DI))
++DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UV8HI))
++DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UV4SI))
++DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UV2DI))
++DEF_LARCH_FTYPE (2, (V16QI, V8HI, UQI))
++DEF_LARCH_FTYPE (2, (V8HI, V4SI, UQI))
++DEF_LARCH_FTYPE (2, (V4SI, V2DI, UQI))
++DEF_LARCH_FTYPE (2, (UV16QI, UV8HI, UQI))
++DEF_LARCH_FTYPE (2, (UV8HI, UV4SI, UQI))
++DEF_LARCH_FTYPE (2, (UV4SI, UV2DI, UQI))
++DEF_LARCH_FTYPE (2, (V16QI, V16QI, DI))
++DEF_LARCH_FTYPE (2, (V16QI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V16QI, V16QI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UQI, UQI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UQI, UQI))
++DEF_LARCH_FTYPE (2, (V4SF, V2DI, V2DI))
++DEF_LARCH_FTYPE (1, (V2DI, V4SF))
++DEF_LARCH_FTYPE (2, (V2DI, UQI, USI))
++DEF_LARCH_FTYPE (2, (V2DI, UQI, UQI))
++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V16QI, CVPOINTER))
++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V8HI, CVPOINTER))
++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V4SI, CVPOINTER))
++DEF_LARCH_FTYPE (4, (VOID, SI, UQI, V2DI, CVPOINTER))
++DEF_LARCH_FTYPE (2, (V16QI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (2, (V8HI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (2, (V4SI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (2, (V2DI, SI, CVPOINTER))
++DEF_LARCH_FTYPE (2, (V8HI, UV16QI, V16QI))
++DEF_LARCH_FTYPE (2, (V16QI, V16QI, UV16QI))
++DEF_LARCH_FTYPE (2, (UV16QI, V16QI, UV16QI))
++DEF_LARCH_FTYPE (2, (V8HI, V8HI, UV8HI))
++DEF_LARCH_FTYPE (2, (UV8HI, V8HI, UV8HI))
++DEF_LARCH_FTYPE (2, (V4SI, V4SI, UV4SI))
++DEF_LARCH_FTYPE (2, (UV4SI, V4SI, UV4SI))
++DEF_LARCH_FTYPE (2, (V4SI, V16QI, V16QI))
++DEF_LARCH_FTYPE (2, (V4SI, UV16QI, V16QI))
++DEF_LARCH_FTYPE (2, (UV4SI, UV16QI, UV16QI))
++DEF_LARCH_FTYPE (2, (V2DI, V2DI, UV2DI))
++DEF_LARCH_FTYPE (2, (UV2DI, UV8HI, UV8HI))
++DEF_LARCH_FTYPE (2, (V4SI, UV8HI, V8HI))
++DEF_LARCH_FTYPE (2, (V2DI, UV4SI, V4SI))
++DEF_LARCH_FTYPE (2, (V2DI, UV2DI, V2DI))
++DEF_LARCH_FTYPE (2, (V2DI, V8HI, V8HI))
++DEF_LARCH_FTYPE (2, (V2DI, UV8HI, V8HI))
++DEF_LARCH_FTYPE (2, (UV2DI, V2DI, UV2DI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV8HI, V8HI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV2DI, V2DI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV4SI, V4SI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, V8HI, V8HI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, UV8HI, V8HI))
++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, UV8HI, UV8HI))
++DEF_LARCH_FTYPE (3, (V8HI, V8HI, UV16QI, V16QI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, V16QI, V16QI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, UV16QI, V16QI))
++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, UV16QI, UV16QI))
++
++DEF_LARCH_FTYPE(4,(VOID,V16QI,CVPOINTER,SI,UQI))
++DEF_LARCH_FTYPE(4,(VOID,V8HI,CVPOINTER,SI,UQI))
++DEF_LARCH_FTYPE(4,(VOID,V4SI,CVPOINTER,SI,UQI))
++DEF_LARCH_FTYPE(4,(VOID,V2DI,CVPOINTER,SI,UQI))
++
++DEF_LARCH_FTYPE (2, (DI, V16QI, UQI))
++DEF_LARCH_FTYPE (2, (DI, V8HI, UQI))
++DEF_LARCH_FTYPE (2, (DI, V4SI, UQI))
++DEF_LARCH_FTYPE (2, (UDI, V16QI, UQI))
++DEF_LARCH_FTYPE (2, (UDI, V8HI, UQI))
++DEF_LARCH_FTYPE (2, (UDI, V4SI, UQI))
++
++DEF_LARCH_FTYPE (3, (UV16QI, UV16QI, V16QI, USI))
++DEF_LARCH_FTYPE (3, (UV8HI, UV8HI, V8HI, USI))
++DEF_LARCH_FTYPE (3, (UV4SI, UV4SI, V4SI, USI))
++DEF_LARCH_FTYPE (3, (UV2DI, UV2DI, V2DI, USI))
++
++DEF_LARCH_FTYPE (1, (BOOLEAN,V16QI))
++DEF_LARCH_FTYPE(2,(V16QI,CVPOINTER,CVPOINTER))
++DEF_LARCH_FTYPE(3,(VOID,V16QI,CVPOINTER,CVPOINTER))
++
++DEF_LARCH_FTYPE (3, (V16QI, V16QI, SI, UQI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, SI, UQI))
++DEF_LARCH_FTYPE (3, (V2DI, V2DI, DI, UQI))
++DEF_LARCH_FTYPE (3, (V4SI, V4SI, SI, UQI))
+diff --git a/gcc/config/loongarch/lsxintrin.h b/gcc/config/loongarch/lsxintrin.h
+new file mode 100644
+index 000000000..ec4206990
+--- /dev/null
++++ b/gcc/config/loongarch/lsxintrin.h
+@@ -0,0 +1,5181 @@
++/* LARCH Loongson SX intrinsics include file.
++
++   Copyright (C) 2018 Free Software Foundation, Inc.
++
++   This file is part of GCC.
++
++   GCC is free software; you can redistribute it and/or modify it
++   under the terms of the GNU General Public License as published
++   by the Free Software Foundation; either version 3, or (at your
++   option) any later version.
++
++   GCC is distributed in the hope that it will be useful, but WITHOUT
++   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
++   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
++   License for more details.
++
++   Under Section 7 of GPL version 3, you are granted additional
++   permissions described in the GCC Runtime Library Exception, version
++   3.1, as published by the Free Software Foundation.
++
++   You should have received a copy of the GNU General Public License and
++   a copy of the GCC Runtime Library Exception along with this program;
++   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
++   .  */
++
++#ifndef _GCC_LOONGSON_SXINTRIN_H
++#define _GCC_LOONGSON_SXINTRIN_H 1
++
++#if defined(__loongarch_sx)
++typedef signed char v16i8 __attribute__ ((vector_size(16), aligned(16)));
++typedef signed char v16i8_b __attribute__ ((vector_size(16), aligned(1)));
++typedef unsigned char v16u8 __attribute__ ((vector_size(16), aligned(16)));
++typedef unsigned char v16u8_b __attribute__ ((vector_size(16), aligned(1)));
++typedef short v8i16 __attribute__ ((vector_size(16), aligned(16)));
++typedef short v8i16_h __attribute__ ((vector_size(16), aligned(2)));
++typedef unsigned short v8u16 __attribute__ ((vector_size(16), aligned(16)));
++typedef unsigned short v8u16_h __attribute__ ((vector_size(16), aligned(2)));
++typedef int v4i32 __attribute__ ((vector_size(16), aligned(16)));
++typedef int v4i32_w __attribute__ ((vector_size(16), aligned(4)));
++typedef unsigned int v4u32 __attribute__ ((vector_size(16), aligned(16)));
++typedef unsigned int v4u32_w __attribute__ ((vector_size(16), aligned(4)));
++typedef long long v2i64 __attribute__ ((vector_size(16), aligned(16)));
++typedef long long v2i64_d __attribute__ ((vector_size(16), aligned(8)));
++typedef unsigned long long v2u64 __attribute__ ((vector_size(16), aligned(16)));
++typedef unsigned long long v2u64_d __attribute__ ((vector_size(16), aligned(8)));
++typedef float v4f32 __attribute__ ((vector_size(16), aligned(16)));
++typedef float v4f32_w __attribute__ ((vector_size(16), aligned(4)));
++typedef double v2f64 __attribute__ ((vector_size(16), aligned(16)));
++typedef double v2f64_d __attribute__ ((vector_size(16), aligned(8)));
++
++typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
++typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
++typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsll_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsll_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsll_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsll_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsll_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsll_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsll_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsll_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vslli_b(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vslli_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V8HI, V8HI, UQI.  */
++#define __lsx_vslli_h(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vslli_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V4SI, V4SI, UQI.  */
++#define __lsx_vslli_w(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vslli_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V2DI, V2DI, UQI.  */
++#define __lsx_vslli_d(/*__m128i*/ _1, /*ui6*/ _2) \
++  ((__m128i)__builtin_lsx_vslli_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsra_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsra_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsra_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsra_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsra_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsra_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsra_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsra_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vsrai_b(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vsrai_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V8HI, V8HI, UQI.  */
++#define __lsx_vsrai_h(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vsrai_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V4SI, V4SI, UQI.  */
++#define __lsx_vsrai_w(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsrai_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V2DI, V2DI, UQI.  */
++#define __lsx_vsrai_d(/*__m128i*/ _1, /*ui6*/ _2) \
++  ((__m128i)__builtin_lsx_vsrai_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrar_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrar_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrar_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrar_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrar_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrar_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrar_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrar_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vsrari_b(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vsrari_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V8HI, V8HI, UQI.  */
++#define __lsx_vsrari_h(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vsrari_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V4SI, V4SI, UQI.  */
++#define __lsx_vsrari_w(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsrari_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V2DI, V2DI, UQI.  */
++#define __lsx_vsrari_d(/*__m128i*/ _1, /*ui6*/ _2) \
++  ((__m128i)__builtin_lsx_vsrari_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrl_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrl_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrl_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrl_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrl_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrl_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrl_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrl_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vsrli_b(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vsrli_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V8HI, V8HI, UQI.  */
++#define __lsx_vsrli_h(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vsrli_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V4SI, V4SI, UQI.  */
++#define __lsx_vsrli_w(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsrli_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V2DI, V2DI, UQI.  */
++#define __lsx_vsrli_d(/*__m128i*/ _1, /*ui6*/ _2) \
++  ((__m128i)__builtin_lsx_vsrli_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrlr_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrlr_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrlr_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrlr_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrlr_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrlr_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrlr_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrlr_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vsrlri_b(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vsrlri_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V8HI, V8HI, UQI.  */
++#define __lsx_vsrlri_h(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vsrlri_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V4SI, V4SI, UQI.  */
++#define __lsx_vsrlri_w(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsrlri_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V2DI, V2DI, UQI.  */
++#define __lsx_vsrlri_d(/*__m128i*/ _1, /*ui6*/ _2) \
++  ((__m128i)__builtin_lsx_vsrlri_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitclr_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitclr_b ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitclr_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitclr_h ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitclr_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitclr_w ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitclr_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitclr_d ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UQI.  */
++#define __lsx_vbitclri_b(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vbitclri_b ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UQI.  */
++#define __lsx_vbitclri_h(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vbitclri_h ((v8u16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UQI.  */
++#define __lsx_vbitclri_w(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vbitclri_w ((v4u32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UQI.  */
++#define __lsx_vbitclri_d(/*__m128i*/ _1, /*ui6*/ _2) \
++  ((__m128i)__builtin_lsx_vbitclri_d ((v2u64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitset_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitset_b ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitset_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitset_h ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitset_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitset_w ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitset_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitset_d ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UQI.  */
++#define __lsx_vbitseti_b(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vbitseti_b ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UQI.  */
++#define __lsx_vbitseti_h(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vbitseti_h ((v8u16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UQI.  */
++#define __lsx_vbitseti_w(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vbitseti_w ((v4u32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UQI.  */
++#define __lsx_vbitseti_d(/*__m128i*/ _1, /*ui6*/ _2) \
++  ((__m128i)__builtin_lsx_vbitseti_d ((v2u64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitrev_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitrev_b ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitrev_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitrev_h ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitrev_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitrev_w ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitrev_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vbitrev_d ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UQI.  */
++#define __lsx_vbitrevi_b(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vbitrevi_b ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UQI.  */
++#define __lsx_vbitrevi_h(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vbitrevi_h ((v8u16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UQI.  */
++#define __lsx_vbitrevi_w(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vbitrevi_w ((v4u32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UQI.  */
++#define __lsx_vbitrevi_d(/*__m128i*/ _1, /*ui6*/ _2) \
++  ((__m128i)__builtin_lsx_vbitrevi_d ((v2u64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vadd_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vadd_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vadd_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vadd_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vadd_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vadd_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vadd_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vadd_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vaddi_bu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vaddi_bu ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, V8HI, UQI.  */
++#define __lsx_vaddi_hu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vaddi_hu ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V4SI, V4SI, UQI.  */
++#define __lsx_vaddi_wu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vaddi_wu ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V2DI, V2DI, UQI.  */
++#define __lsx_vaddi_du(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vaddi_du ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsub_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsub_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsub_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsub_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsub_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsub_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsub_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsub_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vsubi_bu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsubi_bu ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, V8HI, UQI.  */
++#define __lsx_vsubi_hu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsubi_hu ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V4SI, V4SI, UQI.  */
++#define __lsx_vsubi_wu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsubi_wu ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V2DI, V2DI, UQI.  */
++#define __lsx_vsubi_du(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsubi_du ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmax_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmax_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmax_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmax_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmax_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmax_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmax_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmax_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V16QI, V16QI, QI.  */
++#define __lsx_vmaxi_b(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vmaxi_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V8HI, V8HI, QI.  */
++#define __lsx_vmaxi_h(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vmaxi_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V4SI, V4SI, QI.  */
++#define __lsx_vmaxi_w(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vmaxi_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V2DI, V2DI, QI.  */
++#define __lsx_vmaxi_d(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vmaxi_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmax_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmax_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmax_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmax_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmax_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmax_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmax_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmax_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UQI.  */
++#define __lsx_vmaxi_bu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vmaxi_bu ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UQI.  */
++#define __lsx_vmaxi_hu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vmaxi_hu ((v8u16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UQI.  */
++#define __lsx_vmaxi_wu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vmaxi_wu ((v4u32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UQI.  */
++#define __lsx_vmaxi_du(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vmaxi_du ((v2u64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmin_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmin_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmin_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmin_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmin_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmin_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmin_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmin_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V16QI, V16QI, QI.  */
++#define __lsx_vmini_b(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vmini_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V8HI, V8HI, QI.  */
++#define __lsx_vmini_h(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vmini_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V4SI, V4SI, QI.  */
++#define __lsx_vmini_w(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vmini_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V2DI, V2DI, QI.  */
++#define __lsx_vmini_d(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vmini_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmin_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmin_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmin_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmin_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmin_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmin_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmin_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmin_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UQI.  */
++#define __lsx_vmini_bu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vmini_bu ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UQI.  */
++#define __lsx_vmini_hu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vmini_hu ((v8u16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UQI.  */
++#define __lsx_vmini_wu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vmini_wu ((v4u32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UQI.  */
++#define __lsx_vmini_du(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vmini_du ((v2u64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vseq_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vseq_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vseq_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vseq_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vseq_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vseq_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vseq_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vseq_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V16QI, V16QI, QI.  */
++#define __lsx_vseqi_b(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vseqi_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V8HI, V8HI, QI.  */
++#define __lsx_vseqi_h(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vseqi_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V4SI, V4SI, QI.  */
++#define __lsx_vseqi_w(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vseqi_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V2DI, V2DI, QI.  */
++#define __lsx_vseqi_d(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vseqi_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V16QI, V16QI, QI.  */
++#define __lsx_vslti_b(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vslti_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vslt_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vslt_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vslt_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vslt_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vslt_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vslt_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vslt_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vslt_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V8HI, V8HI, QI.  */
++#define __lsx_vslti_h(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vslti_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V4SI, V4SI, QI.  */
++#define __lsx_vslti_w(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vslti_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V2DI, V2DI, QI.  */
++#define __lsx_vslti_d(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vslti_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vslt_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vslt_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vslt_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vslt_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vslt_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vslt_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vslt_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vslt_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V16QI, UV16QI, UQI.  */
++#define __lsx_vslti_bu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vslti_bu ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, UV8HI, UQI.  */
++#define __lsx_vslti_hu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vslti_hu ((v8u16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V4SI, UV4SI, UQI.  */
++#define __lsx_vslti_wu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vslti_wu ((v4u32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V2DI, UV2DI, UQI.  */
++#define __lsx_vslti_du(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vslti_du ((v2u64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsle_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsle_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsle_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsle_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsle_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsle_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsle_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsle_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V16QI, V16QI, QI.  */
++#define __lsx_vslei_b(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vslei_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V8HI, V8HI, QI.  */
++#define __lsx_vslei_h(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vslei_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V4SI, V4SI, QI.  */
++#define __lsx_vslei_w(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vslei_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, si5.  */
++/* Data types in instruction templates:  V2DI, V2DI, QI.  */
++#define __lsx_vslei_d(/*__m128i*/ _1, /*si5*/ _2) \
++  ((__m128i)__builtin_lsx_vslei_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsle_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsle_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsle_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsle_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsle_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsle_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsle_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsle_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V16QI, UV16QI, UQI.  */
++#define __lsx_vslei_bu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vslei_bu ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, UV8HI, UQI.  */
++#define __lsx_vslei_hu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vslei_hu ((v8u16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V4SI, UV4SI, UQI.  */
++#define __lsx_vslei_wu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vslei_wu ((v4u32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V2DI, UV2DI, UQI.  */
++#define __lsx_vslei_du(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vslei_du ((v2u64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vsat_b(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vsat_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V8HI, V8HI, UQI.  */
++#define __lsx_vsat_h(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vsat_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V4SI, V4SI, UQI.  */
++#define __lsx_vsat_w(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsat_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V2DI, V2DI, UQI.  */
++#define __lsx_vsat_d(/*__m128i*/ _1, /*ui6*/ _2) \
++  ((__m128i)__builtin_lsx_vsat_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UQI.  */
++#define __lsx_vsat_bu(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vsat_bu ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UQI.  */
++#define __lsx_vsat_hu(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vsat_hu ((v8u16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UQI.  */
++#define __lsx_vsat_wu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsat_wu ((v4u32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UQI.  */
++#define __lsx_vsat_du(/*__m128i*/ _1, /*ui6*/ _2) \
++  ((__m128i)__builtin_lsx_vsat_du ((v2u64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vadda_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vadda_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vadda_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vadda_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vadda_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vadda_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vadda_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vadda_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsadd_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsadd_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsadd_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsadd_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsadd_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsadd_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsadd_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsadd_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsadd_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsadd_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsadd_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsadd_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsadd_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsadd_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsadd_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsadd_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavg_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavg_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavg_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavg_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavg_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavg_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavg_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavg_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavg_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavg_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavg_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavg_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavg_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavg_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavg_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavg_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavgr_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavgr_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavgr_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavgr_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavgr_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavgr_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavgr_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavgr_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavgr_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavgr_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavgr_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavgr_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavgr_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavgr_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vavgr_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vavgr_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssub_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssub_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssub_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssub_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssub_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssub_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssub_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssub_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssub_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssub_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssub_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssub_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssub_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssub_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssub_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssub_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vabsd_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vabsd_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vabsd_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vabsd_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vabsd_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vabsd_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vabsd_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vabsd_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vabsd_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vabsd_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vabsd_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vabsd_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vabsd_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vabsd_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vabsd_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vabsd_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmul_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmul_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmul_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmul_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmul_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmul_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmul_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmul_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmadd_b (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmadd_b ((v16i8)_1, (v16i8)_2, (v16i8)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmadd_h (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmadd_h ((v8i16)_1, (v8i16)_2, (v8i16)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmadd_w (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmadd_w ((v4i32)_1, (v4i32)_2, (v4i32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmadd_d (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmadd_d ((v2i64)_1, (v2i64)_2, (v2i64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmsub_b (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmsub_b ((v16i8)_1, (v16i8)_2, (v16i8)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmsub_h (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmsub_h ((v8i16)_1, (v8i16)_2, (v8i16)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmsub_w (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmsub_w ((v4i32)_1, (v4i32)_2, (v4i32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmsub_d (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmsub_d ((v2i64)_1, (v2i64)_2, (v2i64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vdiv_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vdiv_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vdiv_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vdiv_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vdiv_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vdiv_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vdiv_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vdiv_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vdiv_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vdiv_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vdiv_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vdiv_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vdiv_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vdiv_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vdiv_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vdiv_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhaddw_h_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhaddw_h_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhaddw_w_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhaddw_w_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhaddw_d_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhaddw_d_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhaddw_hu_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhaddw_hu_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhaddw_wu_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhaddw_wu_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhaddw_du_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhaddw_du_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhsubw_h_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhsubw_h_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhsubw_w_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhsubw_w_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhsubw_d_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhsubw_d_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhsubw_hu_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhsubw_hu_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhsubw_wu_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhsubw_wu_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhsubw_du_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhsubw_du_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmod_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmod_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmod_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmod_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmod_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmod_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmod_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmod_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmod_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmod_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmod_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmod_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmod_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmod_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmod_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmod_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, rk.  */
++/* Data types in instruction templates:  V16QI, V16QI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vreplve_b (__m128i _1, int _2)
++{
++  return (__m128i)__builtin_lsx_vreplve_b ((v16i8)_1, (int)_2);
++}
++
++/* Assembly instruction format:	vd, vj, rk.  */
++/* Data types in instruction templates:  V8HI, V8HI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vreplve_h (__m128i _1, int _2)
++{
++  return (__m128i)__builtin_lsx_vreplve_h ((v8i16)_1, (int)_2);
++}
++
++/* Assembly instruction format:	vd, vj, rk.  */
++/* Data types in instruction templates:  V4SI, V4SI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vreplve_w (__m128i _1, int _2)
++{
++  return (__m128i)__builtin_lsx_vreplve_w ((v4i32)_1, (int)_2);
++}
++
++/* Assembly instruction format:	vd, vj, rk.  */
++/* Data types in instruction templates:  V2DI, V2DI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vreplve_d (__m128i _1, int _2)
++{
++  return (__m128i)__builtin_lsx_vreplve_d ((v2i64)_1, (int)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vreplvei_b(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vreplvei_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  V8HI, V8HI, UQI.  */
++#define __lsx_vreplvei_h(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vreplvei_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui2.  */
++/* Data types in instruction templates:  V4SI, V4SI, UQI.  */
++#define __lsx_vreplvei_w(/*__m128i*/ _1, /*ui2*/ _2) \
++  ((__m128i)__builtin_lsx_vreplvei_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui1.  */
++/* Data types in instruction templates:  V2DI, V2DI, UQI.  */
++#define __lsx_vreplvei_d(/*__m128i*/ _1, /*ui1*/ _2) \
++  ((__m128i)__builtin_lsx_vreplvei_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpickev_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpickev_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpickev_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpickev_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpickev_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpickev_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpickev_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpickev_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpickod_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpickod_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpickod_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpickod_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpickod_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpickod_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpickod_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpickod_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vilvh_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vilvh_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vilvh_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vilvh_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vilvh_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vilvh_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vilvh_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vilvh_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vilvl_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vilvl_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vilvl_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vilvl_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vilvl_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vilvl_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vilvl_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vilvl_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpackev_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpackev_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpackev_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpackev_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpackev_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpackev_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpackev_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpackev_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpackod_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpackod_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpackod_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpackod_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpackod_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpackod_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpackod_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vpackod_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vshuf_h (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vshuf_h ((v8i16)_1, (v8i16)_2, (v8i16)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vshuf_w (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vshuf_w ((v4i32)_1, (v4i32)_2, (v4i32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vshuf_d (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vshuf_d ((v2i64)_1, (v2i64)_2, (v2i64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vand_v (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vand_v ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UQI.  */
++#define __lsx_vandi_b(/*__m128i*/ _1, /*ui8*/ _2) \
++  ((__m128i)__builtin_lsx_vandi_b ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vor_v (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vor_v ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UQI.  */
++#define __lsx_vori_b(/*__m128i*/ _1, /*ui8*/ _2) \
++  ((__m128i)__builtin_lsx_vori_b ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vnor_v (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vnor_v ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UQI.  */
++#define __lsx_vnori_b(/*__m128i*/ _1, /*ui8*/ _2) \
++  ((__m128i)__builtin_lsx_vnori_b ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vxor_v (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vxor_v ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UQI.  */
++#define __lsx_vxori_b(/*__m128i*/ _1, /*ui8*/ _2) \
++  ((__m128i)__builtin_lsx_vxori_b ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk, va.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vbitsel_v (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vbitsel_v ((v16u8)_1, (v16u8)_2, (v16u8)_3);
++}
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI, USI.  */
++#define __lsx_vbitseli_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
++  ((__m128i)__builtin_lsx_vbitseli_b ((v16u8)(_1), (v16u8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  V16QI, V16QI, USI.  */
++#define __lsx_vshuf4i_b(/*__m128i*/ _1, /*ui8*/ _2) \
++  ((__m128i)__builtin_lsx_vshuf4i_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  V8HI, V8HI, USI.  */
++#define __lsx_vshuf4i_h(/*__m128i*/ _1, /*ui8*/ _2) \
++  ((__m128i)__builtin_lsx_vshuf4i_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  V4SI, V4SI, USI.  */
++#define __lsx_vshuf4i_w(/*__m128i*/ _1, /*ui8*/ _2) \
++  ((__m128i)__builtin_lsx_vshuf4i_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, rj.  */
++/* Data types in instruction templates:  V16QI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vreplgr2vr_b (int _1)
++{
++  return (__m128i)__builtin_lsx_vreplgr2vr_b ((int)_1);
++}
++
++/* Assembly instruction format:	vd, rj.  */
++/* Data types in instruction templates:  V8HI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vreplgr2vr_h (int _1)
++{
++  return (__m128i)__builtin_lsx_vreplgr2vr_h ((int)_1);
++}
++
++/* Assembly instruction format:	vd, rj.  */
++/* Data types in instruction templates:  V4SI, SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vreplgr2vr_w (int _1)
++{
++  return (__m128i)__builtin_lsx_vreplgr2vr_w ((int)_1);
++}
++
++/* Assembly instruction format:	vd, rj.  */
++/* Data types in instruction templates:  V2DI, DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vreplgr2vr_d (long int _1)
++{
++  return (__m128i)__builtin_lsx_vreplgr2vr_d ((long int)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpcnt_b (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vpcnt_b ((v16i8)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpcnt_h (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vpcnt_h ((v8i16)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpcnt_w (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vpcnt_w ((v4i32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vpcnt_d (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vpcnt_d ((v2i64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vclo_b (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vclo_b ((v16i8)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vclo_h (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vclo_h ((v8i16)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vclo_w (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vclo_w ((v4i32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vclo_d (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vclo_d ((v2i64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vclz_b (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vclz_b ((v16i8)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vclz_h (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vclz_h ((v8i16)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vclz_w (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vclz_w ((v4i32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vclz_d (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vclz_d ((v2i64)_1);
++}
++
++/* Assembly instruction format:	rd, vj, ui4.  */
++/* Data types in instruction templates:  SI, V16QI, UQI.  */
++#define __lsx_vpickve2gr_b(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((int)__builtin_lsx_vpickve2gr_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	rd, vj, ui3.  */
++/* Data types in instruction templates:  SI, V8HI, UQI.  */
++#define __lsx_vpickve2gr_h(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((int)__builtin_lsx_vpickve2gr_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	rd, vj, ui2.  */
++/* Data types in instruction templates:  SI, V4SI, UQI.  */
++#define __lsx_vpickve2gr_w(/*__m128i*/ _1, /*ui2*/ _2) \
++  ((int)__builtin_lsx_vpickve2gr_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	rd, vj, ui1.  */
++/* Data types in instruction templates:  DI, V2DI, UQI.  */
++#define __lsx_vpickve2gr_d(/*__m128i*/ _1, /*ui1*/ _2) \
++  ((long int)__builtin_lsx_vpickve2gr_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	rd, vj, ui4.  */
++/* Data types in instruction templates:  USI, V16QI, UQI.  */
++#define __lsx_vpickve2gr_bu(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((unsigned int)__builtin_lsx_vpickve2gr_bu ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	rd, vj, ui3.  */
++/* Data types in instruction templates:  USI, V8HI, UQI.  */
++#define __lsx_vpickve2gr_hu(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((unsigned int)__builtin_lsx_vpickve2gr_hu ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	rd, vj, ui2.  */
++/* Data types in instruction templates:  USI, V4SI, UQI.  */
++#define __lsx_vpickve2gr_wu(/*__m128i*/ _1, /*ui2*/ _2) \
++  ((unsigned int)__builtin_lsx_vpickve2gr_wu ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	rd, vj, ui1.  */
++/* Data types in instruction templates:  UDI, V2DI, UQI.  */
++#define __lsx_vpickve2gr_du(/*__m128i*/ _1, /*ui1*/ _2) \
++  ((unsigned long int)__builtin_lsx_vpickve2gr_du ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, rj, ui4.  */
++/* Data types in instruction templates:  V16QI, V16QI, SI, UQI.  */
++#define __lsx_vinsgr2vr_b(/*__m128i*/ _1, /*int*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vinsgr2vr_b ((v16i8)(_1), (int)(_2), (_3)))
++
++/* Assembly instruction format:	vd, rj, ui3.  */
++/* Data types in instruction templates:  V8HI, V8HI, SI, UQI.  */
++#define __lsx_vinsgr2vr_h(/*__m128i*/ _1, /*int*/ _2, /*ui3*/ _3) \
++  ((__m128i)__builtin_lsx_vinsgr2vr_h ((v8i16)(_1), (int)(_2), (_3)))
++
++/* Assembly instruction format:	vd, rj, ui2.  */
++/* Data types in instruction templates:  V4SI, V4SI, SI, UQI.  */
++#define __lsx_vinsgr2vr_w(/*__m128i*/ _1, /*int*/ _2, /*ui2*/ _3) \
++  ((__m128i)__builtin_lsx_vinsgr2vr_w ((v4i32)(_1), (int)(_2), (_3)))
++
++/* Assembly instruction format:	vd, rj, ui1.  */
++/* Data types in instruction templates:  V2DI, V2DI, DI, UQI.  */
++#define __lsx_vinsgr2vr_d(/*__m128i*/ _1, /*long int*/ _2, /*ui1*/ _3) \
++  ((__m128i)__builtin_lsx_vinsgr2vr_d ((v2i64)(_1), (long int)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfadd_s (__m128 _1, __m128 _2)
++{
++  return (__m128)__builtin_lsx_vfadd_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfadd_d (__m128d _1, __m128d _2)
++{
++  return (__m128d)__builtin_lsx_vfadd_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfsub_s (__m128 _1, __m128 _2)
++{
++  return (__m128)__builtin_lsx_vfsub_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfsub_d (__m128d _1, __m128d _2)
++{
++  return (__m128d)__builtin_lsx_vfsub_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfmul_s (__m128 _1, __m128 _2)
++{
++  return (__m128)__builtin_lsx_vfmul_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfmul_d (__m128d _1, __m128d _2)
++{
++  return (__m128d)__builtin_lsx_vfmul_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfdiv_s (__m128 _1, __m128 _2)
++{
++  return (__m128)__builtin_lsx_vfdiv_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfdiv_d (__m128d _1, __m128d _2)
++{
++  return (__m128d)__builtin_lsx_vfdiv_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcvt_h_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcvt_h_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfcvt_s_d (__m128d _1, __m128d _2)
++{
++  return (__m128)__builtin_lsx_vfcvt_s_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfmin_s (__m128 _1, __m128 _2)
++{
++  return (__m128)__builtin_lsx_vfmin_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfmin_d (__m128d _1, __m128d _2)
++{
++  return (__m128d)__builtin_lsx_vfmin_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfmina_s (__m128 _1, __m128 _2)
++{
++  return (__m128)__builtin_lsx_vfmina_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfmina_d (__m128d _1, __m128d _2)
++{
++  return (__m128d)__builtin_lsx_vfmina_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfmax_s (__m128 _1, __m128 _2)
++{
++  return (__m128)__builtin_lsx_vfmax_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfmax_d (__m128d _1, __m128d _2)
++{
++  return (__m128d)__builtin_lsx_vfmax_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfmaxa_s (__m128 _1, __m128 _2)
++{
++  return (__m128)__builtin_lsx_vfmaxa_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfmaxa_d (__m128d _1, __m128d _2)
++{
++  return (__m128d)__builtin_lsx_vfmaxa_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfclass_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vfclass_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfclass_d (__m128d _1)
++{
++  return (__m128i)__builtin_lsx_vfclass_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfsqrt_s (__m128 _1)
++{
++  return (__m128)__builtin_lsx_vfsqrt_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfsqrt_d (__m128d _1)
++{
++  return (__m128d)__builtin_lsx_vfsqrt_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfrecip_s (__m128 _1)
++{
++  return (__m128)__builtin_lsx_vfrecip_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfrecip_d (__m128d _1)
++{
++  return (__m128d)__builtin_lsx_vfrecip_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfrint_s (__m128 _1)
++{
++  return (__m128)__builtin_lsx_vfrint_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfrint_d (__m128d _1)
++{
++  return (__m128d)__builtin_lsx_vfrint_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfrsqrt_s (__m128 _1)
++{
++  return (__m128)__builtin_lsx_vfrsqrt_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfrsqrt_d (__m128d _1)
++{
++  return (__m128d)__builtin_lsx_vfrsqrt_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vflogb_s (__m128 _1)
++{
++  return (__m128)__builtin_lsx_vflogb_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vflogb_d (__m128d _1)
++{
++  return (__m128d)__builtin_lsx_vflogb_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SF, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfcvth_s_h (__m128i _1)
++{
++  return (__m128)__builtin_lsx_vfcvth_s_h ((v8i16)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfcvth_d_s (__m128 _1)
++{
++  return (__m128d)__builtin_lsx_vfcvth_d_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SF, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfcvtl_s_h (__m128i _1)
++{
++  return (__m128)__builtin_lsx_vfcvtl_s_h ((v8i16)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfcvtl_d_s (__m128 _1)
++{
++  return (__m128d)__builtin_lsx_vfcvtl_d_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftint_w_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftint_w_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftint_l_d (__m128d _1)
++{
++  return (__m128i)__builtin_lsx_vftint_l_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  UV4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftint_wu_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftint_wu_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  UV2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftint_lu_d (__m128d _1)
++{
++  return (__m128i)__builtin_lsx_vftint_lu_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrz_w_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrz_w_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrz_l_d (__m128d _1)
++{
++  return (__m128i)__builtin_lsx_vftintrz_l_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  UV4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrz_wu_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrz_wu_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  UV2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrz_lu_d (__m128d _1)
++{
++  return (__m128i)__builtin_lsx_vftintrz_lu_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SF, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vffint_s_w (__m128i _1)
++{
++  return (__m128)__builtin_lsx_vffint_s_w ((v4i32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DF, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vffint_d_l (__m128i _1)
++{
++  return (__m128d)__builtin_lsx_vffint_d_l ((v2i64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SF, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vffint_s_wu (__m128i _1)
++{
++  return (__m128)__builtin_lsx_vffint_s_wu ((v4u32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DF, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vffint_d_lu (__m128i _1)
++{
++  return (__m128d)__builtin_lsx_vffint_d_lu ((v2u64)_1);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vandn_v (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vandn_v ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vneg_b (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vneg_b ((v16i8)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vneg_h (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vneg_h ((v8i16)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vneg_w (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vneg_w ((v4i32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vneg_d (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vneg_d ((v2i64)_1);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmuh_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmuh_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmuh_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmuh_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmuh_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmuh_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmuh_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmuh_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmuh_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmuh_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmuh_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmuh_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmuh_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmuh_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmuh_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmuh_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  V8HI, V16QI, UQI.  */
++#define __lsx_vsllwil_h_b(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vsllwil_h_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V4SI, V8HI, UQI.  */
++#define __lsx_vsllwil_w_h(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vsllwil_w_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V2DI, V4SI, UQI.  */
++#define __lsx_vsllwil_d_w(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsllwil_d_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  UV8HI, UV16QI, UQI.  */
++#define __lsx_vsllwil_hu_bu(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vsllwil_hu_bu ((v16u8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  UV4SI, UV8HI, UQI.  */
++#define __lsx_vsllwil_wu_hu(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vsllwil_wu_hu ((v8u16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV2DI, UV4SI, UQI.  */
++#define __lsx_vsllwil_du_wu(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vsllwil_du_wu ((v4u32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsran_b_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsran_b_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsran_h_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsran_h_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsran_w_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsran_w_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssran_b_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssran_b_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssran_h_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssran_h_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssran_w_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssran_w_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssran_bu_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssran_bu_h ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssran_hu_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssran_hu_w ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssran_wu_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssran_wu_d ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrarn_b_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrarn_b_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrarn_h_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrarn_h_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrarn_w_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrarn_w_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrarn_b_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrarn_b_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrarn_h_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrarn_h_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrarn_w_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrarn_w_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrarn_bu_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrarn_bu_h ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrarn_hu_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrarn_hu_w ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrarn_wu_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrarn_wu_d ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrln_b_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrln_b_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrln_h_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrln_h_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrln_w_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrln_w_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrln_bu_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrln_bu_h ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrln_hu_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrln_hu_w ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrln_wu_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrln_wu_d ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrlrn_b_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrlrn_b_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrlrn_h_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrlrn_h_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsrlrn_w_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsrlrn_w_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV16QI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrlrn_bu_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrlrn_bu_h ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrlrn_hu_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrlrn_hu_w ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrlrn_wu_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrlrn_wu_d ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, UQI.  */
++#define __lsx_vfrstpi_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vfrstpi_b ((v16i8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, UQI.  */
++#define __lsx_vfrstpi_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vfrstpi_h ((v8i16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfrstp_b (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vfrstp_b ((v16i8)_1, (v16i8)_2, (v16i8)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfrstp_h (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vfrstp_h ((v8i16)_1, (v8i16)_2, (v8i16)_3);
++}
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, USI.  */
++#define __lsx_vshuf4i_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
++  ((__m128i)__builtin_lsx_vshuf4i_d ((v2i64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vbsrl_v(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vbsrl_v ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vbsll_v(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vbsll_v ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, USI.  */
++#define __lsx_vextrins_b(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
++  ((__m128i)__builtin_lsx_vextrins_b ((v16i8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, USI.  */
++#define __lsx_vextrins_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
++  ((__m128i)__builtin_lsx_vextrins_h ((v8i16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, USI.  */
++#define __lsx_vextrins_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
++  ((__m128i)__builtin_lsx_vextrins_w ((v4i32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, USI.  */
++#define __lsx_vextrins_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
++  ((__m128i)__builtin_lsx_vextrins_d ((v2i64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmskltz_b (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vmskltz_b ((v16i8)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmskltz_h (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vmskltz_h ((v8i16)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmskltz_w (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vmskltz_w ((v4i32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmskltz_d (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vmskltz_d ((v2i64)_1);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsigncov_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsigncov_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsigncov_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsigncov_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsigncov_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsigncov_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsigncov_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsigncov_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk, va.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfmadd_s (__m128 _1, __m128 _2, __m128 _3)
++{
++  return (__m128)__builtin_lsx_vfmadd_s ((v4f32)_1, (v4f32)_2, (v4f32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk, va.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfmadd_d (__m128d _1, __m128d _2, __m128d _3)
++{
++  return (__m128d)__builtin_lsx_vfmadd_d ((v2f64)_1, (v2f64)_2, (v2f64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk, va.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfmsub_s (__m128 _1, __m128 _2, __m128 _3)
++{
++  return (__m128)__builtin_lsx_vfmsub_s ((v4f32)_1, (v4f32)_2, (v4f32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk, va.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfmsub_d (__m128d _1, __m128d _2, __m128d _3)
++{
++  return (__m128d)__builtin_lsx_vfmsub_d ((v2f64)_1, (v2f64)_2, (v2f64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk, va.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfnmadd_s (__m128 _1, __m128 _2, __m128 _3)
++{
++  return (__m128)__builtin_lsx_vfnmadd_s ((v4f32)_1, (v4f32)_2, (v4f32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk, va.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfnmadd_d (__m128d _1, __m128d _2, __m128d _3)
++{
++  return (__m128d)__builtin_lsx_vfnmadd_d ((v2f64)_1, (v2f64)_2, (v2f64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk, va.  */
++/* Data types in instruction templates:  V4SF, V4SF, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfnmsub_s (__m128 _1, __m128 _2, __m128 _3)
++{
++  return (__m128)__builtin_lsx_vfnmsub_s ((v4f32)_1, (v4f32)_2, (v4f32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk, va.  */
++/* Data types in instruction templates:  V2DF, V2DF, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfnmsub_d (__m128d _1, __m128d _2, __m128d _3)
++{
++  return (__m128d)__builtin_lsx_vfnmsub_d ((v2f64)_1, (v2f64)_2, (v2f64)_3);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrne_w_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrne_w_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrne_l_d (__m128d _1)
++{
++  return (__m128i)__builtin_lsx_vftintrne_l_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrp_w_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrp_w_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrp_l_d (__m128d _1)
++{
++  return (__m128i)__builtin_lsx_vftintrp_l_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrm_w_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrm_w_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrm_l_d (__m128d _1)
++{
++  return (__m128i)__builtin_lsx_vftintrm_l_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftint_w_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vftint_w_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SF, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vffint_s_l (__m128i _1, __m128i _2)
++{
++  return (__m128)__builtin_lsx_vffint_s_l ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrz_w_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vftintrz_w_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrp_w_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vftintrp_w_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrm_w_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vftintrm_w_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrne_w_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vftintrne_w_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintl_l_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintl_l_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftinth_l_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftinth_l_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DF, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vffinth_d_w (__m128i _1)
++{
++  return (__m128d)__builtin_lsx_vffinth_d_w ((v4i32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DF, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vffintl_d_w (__m128i _1)
++{
++  return (__m128d)__builtin_lsx_vffintl_d_w ((v4i32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrzl_l_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrzl_l_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrzh_l_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrzh_l_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrpl_l_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrpl_l_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrph_l_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrph_l_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrml_l_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrml_l_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrmh_l_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrmh_l_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrnel_l_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrnel_l_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vftintrneh_l_s (__m128 _1)
++{
++  return (__m128i)__builtin_lsx_vftintrneh_l_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfrintrne_s (__m128 _1)
++{
++  return (__m128)__builtin_lsx_vfrintrne_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfrintrne_d (__m128d _1)
++{
++  return (__m128d)__builtin_lsx_vfrintrne_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfrintrz_s (__m128 _1)
++{
++  return (__m128)__builtin_lsx_vfrintrz_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfrintrz_d (__m128d _1)
++{
++  return (__m128d)__builtin_lsx_vfrintrz_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfrintrp_s (__m128 _1)
++{
++  return (__m128)__builtin_lsx_vfrintrp_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfrintrp_d (__m128d _1)
++{
++  return (__m128d)__builtin_lsx_vfrintrp_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128 __lsx_vfrintrm_s (__m128 _1)
++{
++  return (__m128)__builtin_lsx_vfrintrm_s ((v4f32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128d __lsx_vfrintrm_d (__m128d _1)
++{
++  return (__m128d)__builtin_lsx_vfrintrm_d ((v2f64)_1);
++}
++
++/* Assembly instruction format:	vd, rj, si8, idx.  */
++/* Data types in instruction templates:  VOID, V16QI, CVPOINTER, SI, UQI.  */
++#define __lsx_vstelm_b(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
++  ((void)__builtin_lsx_vstelm_b ((v16i8)(_1), (void *)(_2), (_3), (_4)))
++
++/* Assembly instruction format:	vd, rj, si8, idx.  */
++/* Data types in instruction templates:  VOID, V8HI, CVPOINTER, SI, UQI.  */
++#define __lsx_vstelm_h(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
++  ((void)__builtin_lsx_vstelm_h ((v8i16)(_1), (void *)(_2), (_3), (_4)))
++
++/* Assembly instruction format:	vd, rj, si8, idx.  */
++/* Data types in instruction templates:  VOID, V4SI, CVPOINTER, SI, UQI.  */
++#define __lsx_vstelm_w(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
++  ((void)__builtin_lsx_vstelm_w ((v4i32)(_1), (void *)(_2), (_3), (_4)))
++
++/* Assembly instruction format:	vd, rj, si8, idx.  */
++/* Data types in instruction templates:  VOID, V2DI, CVPOINTER, SI, UQI.  */
++#define __lsx_vstelm_d(/*__m128i*/ _1, /*void **/ _2, /*si8*/ _3, /*idx*/ _4) \
++  ((void)__builtin_lsx_vstelm_d ((v2i64)(_1), (void *)(_2), (_3), (_4)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_d_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_d_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_w_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_w_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_h_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_h_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_d_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_d_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_w_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_w_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_h_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_h_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_d_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_d_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_w_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_w_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_h_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_h_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_d_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_d_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_w_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_w_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_h_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_h_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_d_wu_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_d_wu_w ((v4u32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_w_hu_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_w_hu_h ((v8u16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_h_bu_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_h_bu_b ((v16u8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_d_wu_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_d_wu_w ((v4u32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_w_hu_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_w_hu_h ((v8u16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_h_bu_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_h_bu_b ((v16u8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwev_d_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwev_d_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwev_w_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwev_w_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwev_h_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwev_h_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwod_d_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwod_d_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwod_w_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwod_w_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwod_h_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwod_h_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwev_d_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwev_d_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwev_w_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwev_w_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwev_h_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwev_h_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwod_d_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwod_d_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwod_w_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwod_w_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwod_h_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwod_h_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_q_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_q_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_q_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_q_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_q_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_q_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_q_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_q_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwev_q_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwev_q_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwod_q_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwod_q_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwev_q_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwev_q_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsubwod_q_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsubwod_q_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwev_q_du_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwev_q_du_d ((v2u64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vaddwod_q_du_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vaddwod_q_du_d ((v2u64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_d_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_d_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_w_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_w_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_h_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_h_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_d_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_d_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_w_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_w_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_h_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_h_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_d_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_d_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_w_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_w_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_h_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_h_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_d_wu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_d_wu ((v4u32)_1, (v4u32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_w_hu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_w_hu ((v8u16)_1, (v8u16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_h_bu (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_h_bu ((v16u8)_1, (v16u8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_d_wu_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_d_wu_w ((v4u32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_w_hu_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_w_hu_h ((v8u16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_h_bu_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_h_bu_b ((v16u8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_d_wu_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_d_wu_w ((v4u32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, UV8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_w_hu_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_w_hu_h ((v8u16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, UV16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_h_bu_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_h_bu_b ((v16u8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_q_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_q_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_q_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_q_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_q_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_q_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_q_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_q_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwev_q_du_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwev_q_du_d ((v2u64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, UV2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmulwod_q_du_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vmulwod_q_du_d ((v2u64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhaddw_q_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhaddw_q_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhaddw_qu_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhaddw_qu_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhsubw_q_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhsubw_q_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vhsubw_qu_du (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vhsubw_qu_du ((v2u64)_1, (v2u64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_d_w (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_d_w ((v2i64)_1, (v4i32)_2, (v4i32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_w_h (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_w_h ((v4i32)_1, (v8i16)_2, (v8i16)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_h_b (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_h_b ((v8i16)_1, (v16i8)_2, (v16i8)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_d_wu (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_d_wu ((v2u64)_1, (v4u32)_2, (v4u32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_w_hu (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_w_hu ((v4u32)_1, (v8u16)_2, (v8u16)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_h_bu (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_h_bu ((v8u16)_1, (v16u8)_2, (v16u8)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_d_w (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_d_w ((v2i64)_1, (v4i32)_2, (v4i32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_w_h (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_w_h ((v4i32)_1, (v8i16)_2, (v8i16)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_h_b (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_h_b ((v8i16)_1, (v16i8)_2, (v16i8)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV4SI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_d_wu (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_d_wu ((v2u64)_1, (v4u32)_2, (v4u32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, UV8HI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_w_hu (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_w_hu ((v4u32)_1, (v8u16)_2, (v8u16)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, UV16QI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_h_bu (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_h_bu ((v8u16)_1, (v16u8)_2, (v16u8)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, UV4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_d_wu_w (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_d_wu_w ((v2i64)_1, (v4u32)_2, (v4i32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, UV8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_w_hu_h (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_w_hu_h ((v4i32)_1, (v8u16)_2, (v8i16)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, UV16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_h_bu_b (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_h_bu_b ((v8i16)_1, (v16u8)_2, (v16i8)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, UV4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_d_wu_w (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_d_wu_w ((v2i64)_1, (v4u32)_2, (v4i32)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, UV8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_w_hu_h (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_w_hu_h ((v4i32)_1, (v8u16)_2, (v8i16)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, UV16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_h_bu_b (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_h_bu_b ((v8i16)_1, (v16u8)_2, (v16i8)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_q_d (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_q_d ((v2i64)_1, (v2i64)_2, (v2i64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_q_d (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_q_d ((v2i64)_1, (v2i64)_2, (v2i64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_q_du (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_q_du ((v2u64)_1, (v2u64)_2, (v2u64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_q_du (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_q_du ((v2u64)_1, (v2u64)_2, (v2u64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, UV2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwev_q_du_d (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwev_q_du_d ((v2i64)_1, (v2u64)_2, (v2i64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, UV2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmaddwod_q_du_d (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vmaddwod_q_du_d ((v2i64)_1, (v2u64)_2, (v2i64)_3);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vrotr_b (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vrotr_b ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vrotr_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vrotr_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vrotr_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vrotr_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vrotr_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vrotr_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vadd_q (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vadd_q ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vsub_q (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vsub_q ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, rj, si12.  */
++/* Data types in instruction templates:  V16QI, CVPOINTER, SI.  */
++#define __lsx_vldrepl_b(/*void **/ _1, /*si12*/ _2) \
++  ((__m128i)__builtin_lsx_vldrepl_b ((void *)(_1), (_2)))
++
++/* Assembly instruction format:	vd, rj, si11.  */
++/* Data types in instruction templates:  V8HI, CVPOINTER, SI.  */
++#define __lsx_vldrepl_h(/*void **/ _1, /*si11*/ _2) \
++  ((__m128i)__builtin_lsx_vldrepl_h ((void *)(_1), (_2)))
++
++/* Assembly instruction format:	vd, rj, si10.  */
++/* Data types in instruction templates:  V4SI, CVPOINTER, SI.  */
++#define __lsx_vldrepl_w(/*void **/ _1, /*si10*/ _2) \
++  ((__m128i)__builtin_lsx_vldrepl_w ((void *)(_1), (_2)))
++
++/* Assembly instruction format:	vd, rj, si9.  */
++/* Data types in instruction templates:  V2DI, CVPOINTER, SI.  */
++#define __lsx_vldrepl_d(/*void **/ _1, /*si9*/ _2) \
++  ((__m128i)__builtin_lsx_vldrepl_d ((void *)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmskgez_b (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vmskgez_b ((v16i8)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vmsknz_b (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vmsknz_b ((v16i8)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V8HI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vexth_h_b (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vexth_h_b ((v16i8)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V4SI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vexth_w_h (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vexth_w_h ((v8i16)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vexth_d_w (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vexth_d_w ((v4i32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vexth_q_d (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vexth_q_d ((v2i64)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  UV8HI, UV16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vexth_hu_bu (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vexth_hu_bu ((v16u8)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  UV4SI, UV8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vexth_wu_hu (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vexth_wu_hu ((v8u16)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  UV2DI, UV4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vexth_du_wu (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vexth_du_wu ((v4u32)_1);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vexth_qu_du (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vexth_qu_du ((v2u64)_1);
++}
++
++/* Assembly instruction format:	vd, vj, ui3.  */
++/* Data types in instruction templates:  V16QI, V16QI, UQI.  */
++#define __lsx_vrotri_b(/*__m128i*/ _1, /*ui3*/ _2) \
++  ((__m128i)__builtin_lsx_vrotri_b ((v16i8)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V8HI, V8HI, UQI.  */
++#define __lsx_vrotri_h(/*__m128i*/ _1, /*ui4*/ _2) \
++  ((__m128i)__builtin_lsx_vrotri_h ((v8i16)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V4SI, V4SI, UQI.  */
++#define __lsx_vrotri_w(/*__m128i*/ _1, /*ui5*/ _2) \
++  ((__m128i)__builtin_lsx_vrotri_w ((v4i32)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V2DI, V2DI, UQI.  */
++#define __lsx_vrotri_d(/*__m128i*/ _1, /*ui6*/ _2) \
++  ((__m128i)__builtin_lsx_vrotri_d ((v2i64)(_1), (_2)))
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vextl_q_d (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vextl_q_d ((v2i64)_1);
++}
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, USI.  */
++#define __lsx_vsrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vsrlni_b_h ((v16i8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, USI.  */
++#define __lsx_vsrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vsrlni_h_w ((v8i16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, USI.  */
++#define __lsx_vsrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vsrlni_w_d ((v4i32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, USI.  */
++#define __lsx_vsrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vsrlni_d_q ((v2i64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, USI.  */
++#define __lsx_vsrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vsrlrni_b_h ((v16i8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, USI.  */
++#define __lsx_vsrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vsrlrni_h_w ((v8i16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, USI.  */
++#define __lsx_vsrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vsrlrni_w_d ((v4i32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, USI.  */
++#define __lsx_vsrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vsrlrni_d_q ((v2i64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, USI.  */
++#define __lsx_vssrlni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlni_b_h ((v16i8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, USI.  */
++#define __lsx_vssrlni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlni_h_w ((v8i16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, USI.  */
++#define __lsx_vssrlni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlni_w_d ((v4i32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, USI.  */
++#define __lsx_vssrlni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlni_d_q ((v2i64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, V16QI, USI.  */
++#define __lsx_vssrlni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlni_bu_h ((v16u8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, V8HI, USI.  */
++#define __lsx_vssrlni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlni_hu_w ((v8u16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, V4SI, USI.  */
++#define __lsx_vssrlni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlni_wu_d ((v4u32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, V2DI, USI.  */
++#define __lsx_vssrlni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlni_du_q ((v2u64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, USI.  */
++#define __lsx_vssrlrni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlrni_b_h ((v16i8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, USI.  */
++#define __lsx_vssrlrni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlrni_h_w ((v8i16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, USI.  */
++#define __lsx_vssrlrni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlrni_w_d ((v4i32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, USI.  */
++#define __lsx_vssrlrni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlrni_d_q ((v2i64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, V16QI, USI.  */
++#define __lsx_vssrlrni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlrni_bu_h ((v16u8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, V8HI, USI.  */
++#define __lsx_vssrlrni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlrni_hu_w ((v8u16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, V4SI, USI.  */
++#define __lsx_vssrlrni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlrni_wu_d ((v4u32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, V2DI, USI.  */
++#define __lsx_vssrlrni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vssrlrni_du_q ((v2u64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, USI.  */
++#define __lsx_vsrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vsrani_b_h ((v16i8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, USI.  */
++#define __lsx_vsrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vsrani_h_w ((v8i16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, USI.  */
++#define __lsx_vsrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vsrani_w_d ((v4i32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, USI.  */
++#define __lsx_vsrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vsrani_d_q ((v2i64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, USI.  */
++#define __lsx_vsrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vsrarni_b_h ((v16i8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, USI.  */
++#define __lsx_vsrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vsrarni_h_w ((v8i16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, USI.  */
++#define __lsx_vsrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vsrarni_w_d ((v4i32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, USI.  */
++#define __lsx_vsrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vsrarni_d_q ((v2i64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, USI.  */
++#define __lsx_vssrani_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vssrani_b_h ((v16i8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, USI.  */
++#define __lsx_vssrani_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vssrani_h_w ((v8i16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, USI.  */
++#define __lsx_vssrani_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vssrani_w_d ((v4i32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, USI.  */
++#define __lsx_vssrani_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vssrani_d_q ((v2i64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, V16QI, USI.  */
++#define __lsx_vssrani_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vssrani_bu_h ((v16u8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, V8HI, USI.  */
++#define __lsx_vssrani_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vssrani_hu_w ((v8u16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, V4SI, USI.  */
++#define __lsx_vssrani_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vssrani_wu_d ((v4u32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, V2DI, USI.  */
++#define __lsx_vssrani_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vssrani_du_q ((v2u64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, USI.  */
++#define __lsx_vssrarni_b_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vssrarni_b_h ((v16i8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  V8HI, V8HI, V8HI, USI.  */
++#define __lsx_vssrarni_h_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vssrarni_h_w ((v8i16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, USI.  */
++#define __lsx_vssrarni_w_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vssrarni_w_d ((v4i32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  V2DI, V2DI, V2DI, USI.  */
++#define __lsx_vssrarni_d_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vssrarni_d_q ((v2i64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui4.  */
++/* Data types in instruction templates:  UV16QI, UV16QI, V16QI, USI.  */
++#define __lsx_vssrarni_bu_h(/*__m128i*/ _1, /*__m128i*/ _2, /*ui4*/ _3) \
++  ((__m128i)__builtin_lsx_vssrarni_bu_h ((v16u8)(_1), (v16i8)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui5.  */
++/* Data types in instruction templates:  UV8HI, UV8HI, V8HI, USI.  */
++#define __lsx_vssrarni_hu_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui5*/ _3) \
++  ((__m128i)__builtin_lsx_vssrarni_hu_w ((v8u16)(_1), (v8i16)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui6.  */
++/* Data types in instruction templates:  UV4SI, UV4SI, V4SI, USI.  */
++#define __lsx_vssrarni_wu_d(/*__m128i*/ _1, /*__m128i*/ _2, /*ui6*/ _3) \
++  ((__m128i)__builtin_lsx_vssrarni_wu_d ((v4u32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui7.  */
++/* Data types in instruction templates:  UV2DI, UV2DI, V2DI, USI.  */
++#define __lsx_vssrarni_du_q(/*__m128i*/ _1, /*__m128i*/ _2, /*ui7*/ _3) \
++  ((__m128i)__builtin_lsx_vssrarni_du_q ((v2u64)(_1), (v2i64)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, ui8.  */
++/* Data types in instruction templates:  V4SI, V4SI, V4SI, USI.  */
++#define __lsx_vpermi_w(/*__m128i*/ _1, /*__m128i*/ _2, /*ui8*/ _3) \
++  ((__m128i)__builtin_lsx_vpermi_w ((v4i32)(_1), (v4i32)(_2), (_3)))
++
++/* Assembly instruction format:	vd, rj, si12.  */
++/* Data types in instruction templates:  V16QI, CVPOINTER, SI.  */
++#define __lsx_vld(/*void **/ _1, /*si12*/ _2) \
++  ((__m128i)__builtin_lsx_vld ((void *)(_1), (_2)))
++
++/* Assembly instruction format:	vd, rj, si12.  */
++/* Data types in instruction templates:  VOID, V16QI, CVPOINTER, SI.  */
++#define __lsx_vst(/*__m128i*/ _1, /*void **/ _2, /*si12*/ _3) \
++  ((void)__builtin_lsx_vst ((v16i8)(_1), (void *)(_2), (_3)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrlrn_b_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrlrn_b_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrlrn_h_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrlrn_h_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrlrn_w_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrlrn_w_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V8HI, V8HI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrln_b_h (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrln_b_h ((v8i16)_1, (v8i16)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V8HI, V4SI, V4SI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrln_h_w (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrln_h_w ((v4i32)_1, (v4i32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V2DI, V2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vssrln_w_d (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vssrln_w_d ((v2i64)_1, (v2i64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vorn_v (__m128i _1, __m128i _2)
++{
++  return (__m128i)__builtin_lsx_vorn_v ((v16i8)_1, (v16i8)_2);
++}
++
++/* Assembly instruction format:	vd, i13.  */
++/* Data types in instruction templates:  V2DI, HI.  */
++#define __lsx_vldi(/*i13*/ _1) \
++  ((__m128i)__builtin_lsx_vldi ((_1)))
++
++/* Assembly instruction format:	vd, vj, vk, va.  */
++/* Data types in instruction templates:  V16QI, V16QI, V16QI, V16QI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vshuf_b (__m128i _1, __m128i _2, __m128i _3)
++{
++  return (__m128i)__builtin_lsx_vshuf_b ((v16i8)_1, (v16i8)_2, (v16i8)_3);
++}
++
++/* Assembly instruction format:	vd, rj, rk.  */
++/* Data types in instruction templates:  V16QI, CVPOINTER, DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vldx (void * _1, long int _2)
++{
++  return (__m128i)__builtin_lsx_vldx ((void *)_1, (long int)_2);
++}
++
++/* Assembly instruction format:	vd, rj, rk.  */
++/* Data types in instruction templates:  VOID, V16QI, CVPOINTER, DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++void __lsx_vstx (__m128i _1, void * _2, long int _3)
++{
++  return (void)__builtin_lsx_vstx ((v16i8)_1, (void *)_2, (long int)_3);
++}
++
++/* Assembly instruction format:	vd, vj.  */
++/* Data types in instruction templates:  UV2DI, UV2DI.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vextl_qu_du (__m128i _1)
++{
++  return (__m128i)__builtin_lsx_vextl_qu_du ((v2u64)_1);
++}
++
++/* Assembly instruction format:	cd, vj.  */
++/* Data types in instruction templates:  SI, UV16QI.  */
++#define __lsx_bnz_b(/*__m128i*/ _1) \
++  ((int)__builtin_lsx_bnz_b ((v16u8)(_1)))
++
++/* Assembly instruction format:	cd, vj.  */
++/* Data types in instruction templates:  SI, UV2DI.  */
++#define __lsx_bnz_d(/*__m128i*/ _1) \
++  ((int)__builtin_lsx_bnz_d ((v2u64)(_1)))
++
++/* Assembly instruction format:	cd, vj.  */
++/* Data types in instruction templates:  SI, UV8HI.  */
++#define __lsx_bnz_h(/*__m128i*/ _1) \
++  ((int)__builtin_lsx_bnz_h ((v8u16)(_1)))
++
++/* Assembly instruction format:	cd, vj.  */
++/* Data types in instruction templates:  SI, UV16QI.  */
++#define __lsx_bnz_v(/*__m128i*/ _1) \
++  ((int)__builtin_lsx_bnz_v ((v16u8)(_1)))
++
++/* Assembly instruction format:	cd, vj.  */
++/* Data types in instruction templates:  SI, UV4SI.  */
++#define __lsx_bnz_w(/*__m128i*/ _1) \
++  ((int)__builtin_lsx_bnz_w ((v4u32)(_1)))
++
++/* Assembly instruction format:	cd, vj.  */
++/* Data types in instruction templates:  SI, UV16QI.  */
++#define __lsx_bz_b(/*__m128i*/ _1) \
++  ((int)__builtin_lsx_bz_b ((v16u8)(_1)))
++
++/* Assembly instruction format:	cd, vj.  */
++/* Data types in instruction templates:  SI, UV2DI.  */
++#define __lsx_bz_d(/*__m128i*/ _1) \
++  ((int)__builtin_lsx_bz_d ((v2u64)(_1)))
++
++/* Assembly instruction format:	cd, vj.  */
++/* Data types in instruction templates:  SI, UV8HI.  */
++#define __lsx_bz_h(/*__m128i*/ _1) \
++  ((int)__builtin_lsx_bz_h ((v8u16)(_1)))
++
++/* Assembly instruction format:	cd, vj.  */
++/* Data types in instruction templates:  SI, UV16QI.  */
++#define __lsx_bz_v(/*__m128i*/ _1) \
++  ((int)__builtin_lsx_bz_v ((v16u8)(_1)))
++
++/* Assembly instruction format:	cd, vj.  */
++/* Data types in instruction templates:  SI, UV4SI.  */
++#define __lsx_bz_w(/*__m128i*/ _1) \
++  ((int)__builtin_lsx_bz_w ((v4u32)(_1)))
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_caf_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_caf_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_caf_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_caf_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_ceq_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_ceq_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_ceq_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_ceq_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cle_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cle_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cle_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cle_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_clt_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_clt_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_clt_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_clt_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cne_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cne_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cne_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cne_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cor_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cor_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cor_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cor_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cueq_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cueq_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cueq_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cueq_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cule_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cule_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cule_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cule_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cult_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cult_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cult_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cult_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cun_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cun_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cune_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cune_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cune_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cune_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_cun_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_cun_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_saf_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_saf_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_saf_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_saf_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_seq_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_seq_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_seq_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_seq_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sle_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sle_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sle_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sle_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_slt_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_slt_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_slt_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_slt_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sne_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sne_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sne_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sne_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sor_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sor_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sor_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sor_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sueq_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sueq_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sueq_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sueq_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sule_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sule_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sule_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sule_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sult_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sult_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sult_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sult_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sun_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sun_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V2DI, V2DF, V2DF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sune_d (__m128d _1, __m128d _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sune_d ((v2f64)_1, (v2f64)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sune_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sune_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, vj, vk.  */
++/* Data types in instruction templates:  V4SI, V4SF, V4SF.  */
++extern __inline __attribute__((__gnu_inline__, __always_inline__, __artificial__))
++__m128i __lsx_vfcmp_sun_s (__m128 _1, __m128 _2)
++{
++  return (__m128i)__builtin_lsx_vfcmp_sun_s ((v4f32)_1, (v4f32)_2);
++}
++
++/* Assembly instruction format:	vd, si10.  */
++/* Data types in instruction templates:  V16QI, HI.  */
++#define __lsx_vrepli_b(/*si10*/ _1) \
++  ((__m128i)__builtin_lsx_vrepli_b ((_1)))
++
++/* Assembly instruction format:	vd, si10.  */
++/* Data types in instruction templates:  V2DI, HI.  */
++#define __lsx_vrepli_d(/*si10*/ _1) \
++  ((__m128i)__builtin_lsx_vrepli_d ((_1)))
++
++/* Assembly instruction format:	vd, si10.  */
++/* Data types in instruction templates:  V8HI, HI.  */
++#define __lsx_vrepli_h(/*si10*/ _1) \
++  ((__m128i)__builtin_lsx_vrepli_h ((_1)))
++
++/* Assembly instruction format:	vd, si10.  */
++/* Data types in instruction templates:  V4SI, HI.  */
++#define __lsx_vrepli_w(/*si10*/ _1) \
++  ((__m128i)__builtin_lsx_vrepli_w ((_1)))
++
++#endif /* defined(__loongarch_sx) */
++#endif /* _GCC_LOONGSON_SXINTRIN_H */
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-built-in-functions-description-of-Loon.patch b/LoongArch-Add-built-in-functions-description-of-Loon.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b1817e2b7899209edfc08ea2862a88ca4d709acc
--- /dev/null
+++ b/LoongArch-Add-built-in-functions-description-of-Loon.patch
@@ -0,0 +1,166 @@
+From 7cfe6e057045ac794afbe9097b1b211c0e1ea723 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 6 Apr 2023 16:02:07 +0800
+Subject: [PATCH 039/124] LoongArch: Add built-in functions description of
+ LoongArch Base instruction set instructions.
+
+gcc/ChangeLog:
+
+	* doc/extend.texi: Add section for LoongArch Base Built-in functions.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/doc/extend.texi | 129 ++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 129 insertions(+)
+
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index 3c101ca89..1d1bac255 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -14678,6 +14678,7 @@ instructions, but allow the compiler to schedule those calls.
+ * Blackfin Built-in Functions::
+ * BPF Built-in Functions::
+ * FR-V Built-in Functions::
++* LoongArch Base Built-in Functions::
+ * MIPS DSP Built-in Functions::
+ * MIPS Paired-Single Support::
+ * MIPS Loongson Built-in Functions::
+@@ -16128,6 +16129,134 @@ Use the @code{nldub} instruction to load the contents of address @var{x}
+ into the data cache.  The instruction is issued in slot I1@.
+ @end table
+ 
++@node LoongArch Base Built-in Functions
++@subsection LoongArch Base Built-in Functions
++
++These built-in functions are available for LoongArch.
++
++Data Type Description:
++@itemize
++@item @code{imm0_31}, a compile-time constant in range 0 to 31;
++@item @code{imm0_16383}, a compile-time constant in range 0 to 16383;
++@item @code{imm0_32767}, a compile-time constant in range 0 to 32767;
++@item @code{imm_n2048_2047}, a compile-time constant in range -2048 to 2047;
++@end itemize
++
++The intrinsics provided are listed below:
++@smallexample
++    unsigned int __builtin_loongarch_movfcsr2gr (imm0_31)
++    void __builtin_loongarch_movgr2fcsr (imm0_31, unsigned int)
++    void __builtin_loongarch_cacop_d (imm0_31, unsigned long int, imm_n2048_2047)
++    unsigned int __builtin_loongarch_cpucfg (unsigned int)
++    void __builtin_loongarch_asrtle_d (long int, long int)
++    void __builtin_loongarch_asrtgt_d (long int, long int)
++    long int __builtin_loongarch_lddir_d (long int, imm0_31)
++    void __builtin_loongarch_ldpte_d (long int, imm0_31)
++
++    int __builtin_loongarch_crc_w_b_w (char, int)
++    int __builtin_loongarch_crc_w_h_w (short, int)
++    int __builtin_loongarch_crc_w_w_w (int, int)
++    int __builtin_loongarch_crc_w_d_w (long int, int)
++    int __builtin_loongarch_crcc_w_b_w (char, int)
++    int __builtin_loongarch_crcc_w_h_w (short, int)
++    int __builtin_loongarch_crcc_w_w_w (int, int)
++    int __builtin_loongarch_crcc_w_d_w (long int, int)
++
++    unsigned int __builtin_loongarch_csrrd_w (imm0_16383)
++    unsigned int __builtin_loongarch_csrwr_w (unsigned int, imm0_16383)
++    unsigned int __builtin_loongarch_csrxchg_w (unsigned int, unsigned int, imm0_16383)
++    unsigned long int __builtin_loongarch_csrrd_d (imm0_16383)
++    unsigned long int __builtin_loongarch_csrwr_d (unsigned long int, imm0_16383)
++    unsigned long int __builtin_loongarch_csrxchg_d (unsigned long int, unsigned long int, imm0_16383)
++
++    unsigned char __builtin_loongarch_iocsrrd_b (unsigned int)
++    unsigned short __builtin_loongarch_iocsrrd_h (unsigned int)
++    unsigned int __builtin_loongarch_iocsrrd_w (unsigned int)
++    unsigned long int __builtin_loongarch_iocsrrd_d (unsigned int)
++    void __builtin_loongarch_iocsrwr_b (unsigned char, unsigned int)
++    void __builtin_loongarch_iocsrwr_h (unsigned short, unsigned int)
++    void __builtin_loongarch_iocsrwr_w (unsigned int, unsigned int)
++    void __builtin_loongarch_iocsrwr_d (unsigned long int, unsigned int)
++
++    void __builtin_loongarch_dbar (imm0_32767)
++    void __builtin_loongarch_ibar (imm0_32767)
++
++    void __builtin_loongarch_syscall (imm0_32767)
++    void __builtin_loongarch_break (imm0_32767)
++@end smallexample
++
++@emph{Note:}Since the control register is divided into 32-bit and 64-bit,
++but the access instruction is not distinguished. So GCC renames the control
++instructions when implementing intrinsics.
++
++Take the csrrd instruction as an example, built-in functions are implemented as follows:
++@smallexample
++  __builtin_loongarch_csrrd_w  // When reading the 32-bit control register use.
++  __builtin_loongarch_csrrd_d  // When reading the 64-bit control register use.
++@end smallexample
++
++For the convenience of use, the built-in functions are encapsulated,
++the encapsulated functions and @code{__drdtime_t, __rdtime_t} are
++defined in the @code{larchintrin.h}. So if you call the following
++function you need to include @code{larchintrin.h}.
++
++@smallexample
++     typedef struct drdtime@{
++            unsigned long dvalue;
++            unsigned long dtimeid;
++     @} __drdtime_t;
++
++     typedef struct rdtime@{
++            unsigned int value;
++            unsigned int timeid;
++     @} __rdtime_t;
++@end smallexample
++
++@smallexample
++    __drdtime_t __rdtime_d (void)
++    __rdtime_t  __rdtimel_w (void)
++    __rdtime_t  __rdtimeh_w (void)
++    unsigned int  __movfcsr2gr (imm0_31)
++    void __movgr2fcsr (imm0_31, unsigned int)
++    void __cacop_d (imm0_31, unsigned long, imm_n2048_2047)
++    unsigned int  __cpucfg (unsigned int)
++    void __asrtle_d (long int, long int)
++    void __asrtgt_d (long int, long int)
++    long int  __lddir_d (long int, imm0_31)
++    void __ldpte_d (long int, imm0_31)
++
++    int  __crc_w_b_w (char, int)
++    int  __crc_w_h_w (short, int)
++    int  __crc_w_w_w (int, int)
++    int  __crc_w_d_w (long int, int)
++    int  __crcc_w_b_w (char, int)
++    int  __crcc_w_h_w (short, int)
++    int  __crcc_w_w_w (int, int)
++    int  __crcc_w_d_w (long int, int)
++
++    unsigned int  __csrrd_w (imm0_16383)
++    unsigned int  __csrwr_w (unsigned int, imm0_16383)
++    unsigned int  __csrxchg_w (unsigned int, unsigned int, imm0_16383)
++    unsigned long  __csrrd_d (imm0_16383)
++    unsigned long  __csrwr_d (unsigned long, imm0_16383)
++    unsigned long  __csrxchg_d (unsigned long, unsigned long, imm0_16383)
++
++    unsigned char   __iocsrrd_b (unsigned int)
++    unsigned short  __iocsrrd_h (unsigned int)
++    unsigned int  __iocsrrd_w (unsigned int)
++    unsigned long  __iocsrrd_d (unsigned int)
++    void __iocsrwr_b (unsigned char, unsigned int)
++    void __iocsrwr_h (unsigned short, unsigned int)
++    void __iocsrwr_w (unsigned int, unsigned int)
++    void __iocsrwr_d (unsigned long, unsigned int)
++
++    void __dbar (imm0_32767)
++    void __ibar (imm0_32767)
++
++    void __syscall (imm0_32767)
++    void __break (imm0_32767)
++@end smallexample
++
+ @node MIPS DSP Built-in Functions
+ @subsection MIPS DSP Built-in Functions
+ 
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-fcopysign-instructions.patch b/LoongArch-Add-fcopysign-instructions.patch
new file mode 100644
index 0000000000000000000000000000000000000000..af82e3fd3eab33b24b5337cb31beb064b7111049
--- /dev/null
+++ b/LoongArch-Add-fcopysign-instructions.patch
@@ -0,0 +1,107 @@
+From 41a4945886631a1b2898ae957389d5db18a07141 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 4 Nov 2022 15:12:22 +0800
+Subject: [PATCH 025/124] LoongArch: Add fcopysign instructions
+
+Add fcopysign.{s,d} with the names copysign{sf,df}3 so GCC will expand
+__builtin_copysign{f,} to a single instruction.
+
+Link: https://sourceware.org/pipermail/libc-alpha/2022-November/143177.html
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (UNSPEC_FCOPYSIGN): New unspec.
+	(type): Add fcopysign.
+	(copysign3): New instruction template.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/fcopysign.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md             | 22 ++++++++++++++++++-
+ .../gcc.target/loongarch/fcopysign.c          | 16 ++++++++++++++
+ 2 files changed, 37 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/fcopysign.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 214b14bdd..bda34d0f3 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -37,6 +37,7 @@
+   UNSPEC_FCLASS
+   UNSPEC_FMAX
+   UNSPEC_FMIN
++  UNSPEC_FCOPYSIGN
+ 
+   ;; Override return address for exception handling.
+   UNSPEC_EH_RETURN
+@@ -214,6 +215,7 @@
+ ;; fabs		floating point absolute value
+ ;; fneg		floating point negation
+ ;; fcmp		floating point compare
++;; fcopysign	floating point copysign
+ ;; fcvt		floating point convert
+ ;; fsqrt	floating point square root
+ ;; frsqrt       floating point reciprocal square root
+@@ -226,7 +228,7 @@
+   "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore,
+    prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical,
+    shift,slt,signext,clz,trap,imul,idiv,move,
+-   fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcvt,fsqrt,
++   fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcopysign,fcvt,fsqrt,
+    frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost"
+   (cond [(eq_attr "jirl" "!unset") (const_string "call")
+ 	 (eq_attr "got" "load") (const_string "load")
+@@ -976,6 +978,24 @@
+    (set_attr "mode" "")])
+ 
+ ;;
++;;  ....................
++;;
++;;	FLOATING POINT COPYSIGN
++;;
++;;  ....................
++
++(define_insn "copysign3"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")
++		      (match_operand:ANYF 2 "register_operand" "f")]
++		     UNSPEC_FCOPYSIGN))]
++  "TARGET_HARD_FLOAT"
++  "fcopysign.\t%0,%1,%2"
++  [(set_attr "type" "fcopysign")
++   (set_attr "mode" "")])
++
++
++;;
+ ;;  ...................
+ ;;
+ ;;  Count leading zeroes.
+diff --git a/gcc/testsuite/gcc.target/loongarch/fcopysign.c b/gcc/testsuite/gcc.target/loongarch/fcopysign.c
+new file mode 100644
+index 000000000..058ba2cf5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/fcopysign.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-mdouble-float" } */
++/* { dg-final { scan-assembler "fcopysign\\.s" } } */
++/* { dg-final { scan-assembler "fcopysign\\.d" } } */
++
++double
++my_copysign (double a, double b)
++{
++  return __builtin_copysign (a, b);
++}
++
++float
++my_copysignf (float a, float b)
++{
++  return __builtin_copysignf (a, b);
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-flogb.-s-d-instructions-and-expand-log.patch b/LoongArch-Add-flogb.-s-d-instructions-and-expand-log.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0a3ca0fbd7e3e1f45dfdb7db502e055fdeba8a09
--- /dev/null
+++ b/LoongArch-Add-flogb.-s-d-instructions-and-expand-log.patch
@@ -0,0 +1,123 @@
+From 2ae587a86bba31b91a127e353c31c9f861ff5326 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 8 Nov 2022 13:42:20 +0800
+Subject: [PATCH 030/124] LoongArch: Add flogb.{s,d} instructions and expand
+ logb{sf,df}2
+
+On LoongArch, flogb instructions extract the exponent of a non-negative
+floating point value, but produces NaN for negative values.  So we need
+to add a fabs instruction when we expand logb.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (UNSPEC_FLOGB): New unspec.
+	(type): Add flogb.
+	(logb_non_negative2): New instruction template.
+	(logb2): New define_expand.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/flogb.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md          | 35 ++++++++++++++++++++--
+ gcc/testsuite/gcc.target/loongarch/flogb.c | 18 +++++++++++
+ 2 files changed, 51 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/flogb.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index c141c9add..682ab9617 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -42,6 +42,7 @@
+   UNSPEC_FTINTRM
+   UNSPEC_FTINTRP
+   UNSPEC_FSCALEB
++  UNSPEC_FLOGB
+ 
+   ;; Override return address for exception handling.
+   UNSPEC_EH_RETURN
+@@ -217,6 +218,7 @@
+ ;; fdiv		floating point divide
+ ;; frdiv	floating point reciprocal divide
+ ;; fabs		floating point absolute value
++;; flogb	floating point exponent extract
+ ;; fneg		floating point negation
+ ;; fcmp		floating point compare
+ ;; fcopysign	floating point copysign
+@@ -233,8 +235,8 @@
+   "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore,
+    prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical,
+    shift,slt,signext,clz,trap,imul,idiv,move,
+-   fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcopysign,fcvt,fscaleb,
+-   fsqrt,frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost"
++   fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,flogb,fneg,fcmp,fcopysign,fcvt,
++   fscaleb,fsqrt,frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost"
+   (cond [(eq_attr "jirl" "!unset") (const_string "call")
+ 	 (eq_attr "got" "load") (const_string "load")
+ 
+@@ -1039,6 +1041,35 @@
+    (set_attr "mode" "")])
+ 
+ ;;
++;;  ....................
++;;
++;;	FLOATING POINT EXPONENT EXTRACT
++;;
++;;  ....................
++
++(define_insn "logb_non_negative2"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")]
++		     UNSPEC_FLOGB))]
++  "TARGET_HARD_FLOAT"
++  "flogb.\t%0,%1"
++  [(set_attr "type" "flogb")
++   (set_attr "mode" "")])
++
++(define_expand "logb2"
++  [(set (match_operand:ANYF 0 "register_operand")
++	(unspec:ANYF [(abs:ANYF (match_operand:ANYF 1 "register_operand"))]
++		     UNSPEC_FLOGB))]
++  "TARGET_HARD_FLOAT"
++{
++  rtx tmp = gen_reg_rtx (mode);
++
++  emit_insn (gen_abs2 (tmp, operands[1]));
++  emit_insn (gen_logb_non_negative2 (operands[0], tmp));
++  DONE;
++})
++
++;;
+ ;;  ...................
+ ;;
+ ;;  Count leading zeroes.
+diff --git a/gcc/testsuite/gcc.target/loongarch/flogb.c b/gcc/testsuite/gcc.target/loongarch/flogb.c
+new file mode 100644
+index 000000000..1daefe54e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/flogb.c
+@@ -0,0 +1,18 @@
++/* { dg-do compile } */
++/* { dg-options "-mdouble-float -fno-math-errno" } */
++/* { dg-final { scan-assembler "fabs\\.s" } } */
++/* { dg-final { scan-assembler "fabs\\.d" } } */
++/* { dg-final { scan-assembler "flogb\\.s" } } */
++/* { dg-final { scan-assembler "flogb\\.d" } } */
++
++double
++my_logb (double a)
++{
++  return __builtin_logb (a);
++}
++
++float
++my_logbf (float a)
++{
++  return __builtin_logbf (a);
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-fscaleb.-s-d-instructions-as-ldexp-sf-.patch b/LoongArch-Add-fscaleb.-s-d-instructions-as-ldexp-sf-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..736333d9b80eee42ceb1bdc42e1c6851cf9c6642
--- /dev/null
+++ b/LoongArch-Add-fscaleb.-s-d-instructions-as-ldexp-sf-.patch
@@ -0,0 +1,155 @@
+From e3d69a3b7a4e00e8bba88b8b4abaa1c17bc083d5 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 8 Nov 2022 12:14:35 +0800
+Subject: [PATCH 029/124] LoongArch: Add fscaleb.{s,d} instructions as
+ ldexp{sf,df}3
+
+This allows optimizing __builtin_ldexp{,f} and __builtin_scalbn{,f} with
+-fno-math-errno.
+
+IMODE is added because we can't hard code SI for operand 2: fscaleb.d
+instruction always take the high half of both source registers into
+account.  See my_ldexp_long in the test case.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (UNSPEC_FSCALEB): New unspec.
+	(type): Add fscaleb.
+	(IMODE): New mode attr.
+	(ldexp3): New instruction template.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/fscaleb.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md            | 26 ++++++++++-
+ gcc/testsuite/gcc.target/loongarch/fscaleb.c | 48 ++++++++++++++++++++
+ 2 files changed, 72 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/fscaleb.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index eb127c346..c141c9add 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -41,6 +41,7 @@
+   UNSPEC_FTINT
+   UNSPEC_FTINTRM
+   UNSPEC_FTINTRP
++  UNSPEC_FSCALEB
+ 
+   ;; Override return address for exception handling.
+   UNSPEC_EH_RETURN
+@@ -220,6 +221,7 @@
+ ;; fcmp		floating point compare
+ ;; fcopysign	floating point copysign
+ ;; fcvt		floating point convert
++;; fscaleb	floating point scale
+ ;; fsqrt	floating point square root
+ ;; frsqrt       floating point reciprocal square root
+ ;; multi	multiword sequence (or user asm statements)
+@@ -231,8 +233,8 @@
+   "unknown,branch,jump,call,load,fpload,fpidxload,store,fpstore,fpidxstore,
+    prefetch,prefetchx,condmove,mgtf,mftg,const,arith,logical,
+    shift,slt,signext,clz,trap,imul,idiv,move,
+-   fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcopysign,fcvt,fsqrt,
+-   frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost"
++   fmove,fadd,fmul,fmadd,fdiv,frdiv,fabs,fneg,fcmp,fcopysign,fcvt,fscaleb,
++   fsqrt,frsqrt,accext,accmod,multi,atomic,syncloop,nop,ghost"
+   (cond [(eq_attr "jirl" "!unset") (const_string "call")
+ 	 (eq_attr "got" "load") (const_string "load")
+ 
+@@ -418,6 +420,10 @@
+ ;; the controlling mode.
+ (define_mode_attr HALFMODE [(DF "SI") (DI "SI") (TF "DI")])
+ 
++;; This attribute gives the integer mode that has the same size of a
++;; floating-point mode.
++(define_mode_attr IMODE [(SF "SI") (DF "DI")])
++
+ ;; This code iterator allows signed and unsigned widening multiplications
+ ;; to use the same template.
+ (define_code_iterator any_extend [sign_extend zero_extend])
+@@ -1014,7 +1020,23 @@
+   "fcopysign.\t%0,%1,%2"
+   [(set_attr "type" "fcopysign")
+    (set_attr "mode" "")])
++
++;;
++;;  ....................
++;;
++;;	FLOATING POINT SCALE
++;;
++;;  ....................
+ 
++(define_insn "ldexp3"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(unspec:ANYF [(match_operand:ANYF    1 "register_operand" "f")
++		      (match_operand: 2 "register_operand" "f")]
++		     UNSPEC_FSCALEB))]
++  "TARGET_HARD_FLOAT"
++  "fscaleb.\t%0,%1,%2"
++  [(set_attr "type" "fscaleb")
++   (set_attr "mode" "")])
+ 
+ ;;
+ ;;  ...................
+diff --git a/gcc/testsuite/gcc.target/loongarch/fscaleb.c b/gcc/testsuite/gcc.target/loongarch/fscaleb.c
+new file mode 100644
+index 000000000..f18470fbb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/fscaleb.c
+@@ -0,0 +1,48 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d -mdouble-float -fno-math-errno" } */
++/* { dg-final { scan-assembler-times "fscaleb\\.s" 3 } } */
++/* { dg-final { scan-assembler-times "fscaleb\\.d" 4 } } */
++/* { dg-final { scan-assembler-times "slli\\.w" 1 } } */
++
++double
++my_scalbln (double a, long b)
++{
++  return __builtin_scalbln (a, b);
++}
++
++double
++my_scalbn (double a, int b)
++{
++  return __builtin_scalbn (a, b);
++}
++
++double
++my_ldexp (double a, int b)
++{
++  return __builtin_ldexp (a, b);
++}
++
++float
++my_scalblnf (float a, long b)
++{
++  return __builtin_scalblnf (a, b);
++}
++
++float
++my_scalbnf (float a, int b)
++{
++  return __builtin_scalbnf (a, b);
++}
++
++float
++my_ldexpf (float a, int b)
++{
++  return __builtin_ldexpf (a, b);
++}
++
++/* b must be sign-extended */
++double
++my_ldexp_long (double a, long b)
++{
++  return __builtin_ldexp (a, b);
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-ftint-rm-rp-.-w-l-.-s-d-instructions.patch b/LoongArch-Add-ftint-rm-rp-.-w-l-.-s-d-instructions.patch
new file mode 100644
index 0000000000000000000000000000000000000000..47dacdf510336d0d18e73555cf58272bf99db093
--- /dev/null
+++ b/LoongArch-Add-ftint-rm-rp-.-w-l-.-s-d-instructions.patch
@@ -0,0 +1,220 @@
+From 76d599c6d8f9cf78b51cd76a7ca8fbe11e2cda2b Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 6 Nov 2022 23:16:49 +0800
+Subject: [PATCH 028/124] LoongArch: Add ftint{,rm,rp}.{w,l}.{s,d} instructions
+
+This allows to optimize the following builtins if -fno-math-errno:
+
+- __builtin_lrint{,f}
+- __builtin_lfloor{,f}
+- __builtin_lceil{,f}
+
+Inspired by
+https://gcc.gnu.org/pipermail/gcc-patches/2022-November/605287.html.
+
+ANYFI is added so the compiler won't try ftint.l.s if -mfpu=32.  If we
+simply used GPR here an ICE would be triggered with __builtin_lrintf
+and -mfpu=32.
+
+ftint{rm,rp} instructions may raise inexact exception, so they can't be
+used if -fno-trapping-math -fno-fp-int-builtin-inexact.
+
+Note that the .w.{s,d} variants are not tested because we don't support
+ILP32 for now.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (UNSPEC_FTINT): New unspec.
+	(UNSPEC_FTINTRM): Likewise.
+	(UNSPEC_FTINTRP): Likewise.
+	(LRINT): New define_int_iterator.
+	(lrint_pattern): New define_int_attr.
+	(lrint_submenmonic): Likewise.
+	(lrint_allow_inexact): Likewise.
+	(ANYFI): New define_mode_iterator.
+	(lrint): New instruction template.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/ftint.c: New test.
+	* gcc.target/loongarch/ftint-no-inexact.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md             | 34 ++++++++++++++
+ .../gcc.target/loongarch/ftint-no-inexact.c   | 44 +++++++++++++++++++
+ gcc/testsuite/gcc.target/loongarch/ftint.c    | 44 +++++++++++++++++++
+ 3 files changed, 122 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/ftint-no-inexact.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/ftint.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index a14ab14ac..eb127c346 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -38,6 +38,9 @@
+   UNSPEC_FMAX
+   UNSPEC_FMIN
+   UNSPEC_FCOPYSIGN
++  UNSPEC_FTINT
++  UNSPEC_FTINTRM
++  UNSPEC_FTINTRP
+ 
+   ;; Override return address for exception handling.
+   UNSPEC_EH_RETURN
+@@ -374,6 +377,11 @@
+ (define_mode_iterator ANYF [(SF "TARGET_HARD_FLOAT")
+ 			    (DF "TARGET_DOUBLE_FLOAT")])
+ 
++;; Iterator for fixed-point modes which can be hold by a hardware
++;; floating-point register.
++(define_mode_iterator ANYFI [(SI "TARGET_HARD_FLOAT")
++			     (DI "TARGET_DOUBLE_FLOAT")])
++
+ ;; A mode for which moves involving FPRs may need to be split.
+ (define_mode_iterator SPLITF
+   [(DF "!TARGET_64BIT && TARGET_DOUBLE_FLOAT")
+@@ -515,6 +523,19 @@
+ (define_code_attr sel [(eq "masknez") (ne "maskeqz")])
+ (define_code_attr selinv [(eq "maskeqz") (ne "masknez")])
+ 
++;; Iterator and attributes for floating-point to fixed-point conversion
++;; instructions.
++(define_int_iterator LRINT [UNSPEC_FTINT UNSPEC_FTINTRM UNSPEC_FTINTRP])
++(define_int_attr lrint_pattern [(UNSPEC_FTINT "lrint")
++				(UNSPEC_FTINTRM "lfloor")
++				(UNSPEC_FTINTRP "lceil")])
++(define_int_attr lrint_submenmonic [(UNSPEC_FTINT "")
++				    (UNSPEC_FTINTRM "rm")
++				    (UNSPEC_FTINTRP "rp")])
++(define_int_attr lrint_allow_inexact [(UNSPEC_FTINT "1")
++				      (UNSPEC_FTINTRM "0")
++				      (UNSPEC_FTINTRP "0")])
++
+ ;;
+ ;;  ....................
+ ;;
+@@ -2022,6 +2043,19 @@
+   [(set_attr "type" "fcvt")
+    (set_attr "mode" "")])
+ 
++;; Convert floating-point numbers to integers
++(define_insn "2"
++  [(set (match_operand:ANYFI 0 "register_operand" "=f")
++	(unspec:ANYFI [(match_operand:ANYF 1 "register_operand" "f")]
++		      LRINT))]
++  "TARGET_HARD_FLOAT &&
++   (
++    || flag_fp_int_builtin_inexact
++    || !flag_trapping_math)"
++  "ftint.. %0,%1"
++  [(set_attr "type" "fcvt")
++   (set_attr "mode" "")])
++
+ ;; Load the low word of operand 0 with operand 1.
+ (define_insn "load_low"
+   [(set (match_operand:SPLITF 0 "register_operand" "=f,f")
+diff --git a/gcc/testsuite/gcc.target/loongarch/ftint-no-inexact.c b/gcc/testsuite/gcc.target/loongarch/ftint-no-inexact.c
+new file mode 100644
+index 000000000..88b83a9c0
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/ftint-no-inexact.c
+@@ -0,0 +1,44 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -mdouble-float -fno-math-errno -fno-fp-int-builtin-inexact" } */
++/* { dg-final { scan-assembler "ftint\\.l\\.s" } } */
++/* { dg-final { scan-assembler "ftint\\.l\\.d" } } */
++/* { dg-final { scan-assembler-not "ftintrm\\.l\\.s" } } */
++/* { dg-final { scan-assembler-not "ftintrm\\.l\\.d" } } */
++/* { dg-final { scan-assembler-not "ftintrp\\.l\\.s" } } */
++/* { dg-final { scan-assembler-not "ftintrp\\.l\\.d" } } */
++
++long
++my_lrint (double a)
++{
++  return __builtin_lrint (a);
++}
++
++long
++my_lrintf (float a)
++{
++  return __builtin_lrintf (a);
++}
++
++long
++my_lfloor (double a)
++{
++  return __builtin_lfloor (a);
++}
++
++long
++my_lfloorf (float a)
++{
++  return __builtin_lfloorf (a);
++}
++
++long
++my_lceil (double a)
++{
++  return __builtin_lceil (a);
++}
++
++long
++my_lceilf (float a)
++{
++  return __builtin_lceilf (a);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/ftint.c b/gcc/testsuite/gcc.target/loongarch/ftint.c
+new file mode 100644
+index 000000000..7a326a454
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/ftint.c
+@@ -0,0 +1,44 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -mdouble-float -fno-math-errno -ffp-int-builtin-inexact" } */
++/* { dg-final { scan-assembler "ftint\\.l\\.s" } } */
++/* { dg-final { scan-assembler "ftint\\.l\\.d" } } */
++/* { dg-final { scan-assembler "ftintrm\\.l\\.s" } } */
++/* { dg-final { scan-assembler "ftintrm\\.l\\.d" } } */
++/* { dg-final { scan-assembler "ftintrp\\.l\\.s" } } */
++/* { dg-final { scan-assembler "ftintrp\\.l\\.d" } } */
++
++long
++my_lrint (double a)
++{
++  return __builtin_lrint (a);
++}
++
++long
++my_lrintf (float a)
++{
++  return __builtin_lrintf (a);
++}
++
++long
++my_lfloor (double a)
++{
++  return __builtin_lfloor (a);
++}
++
++long
++my_lfloorf (float a)
++{
++  return __builtin_lfloorf (a);
++}
++
++long
++my_lceil (double a)
++{
++  return __builtin_lceil (a);
++}
++
++long
++my_lceilf (float a)
++{
++  return __builtin_lceilf (a);
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-new-code-model-medium.patch b/LoongArch-Add-new-code-model-medium.patch
new file mode 100644
index 0000000000000000000000000000000000000000..71974e06182e1f41429e706614e81d9fc65e0f6d
--- /dev/null
+++ b/LoongArch-Add-new-code-model-medium.patch
@@ -0,0 +1,1051 @@
+From 893322f214fbb916dc8eb6be5acbf7bdb7785e77 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Sat, 20 Aug 2022 15:19:51 +0800
+Subject: [PATCH 012/124] LoongArch: Add new code model 'medium'.
+
+The function jump instruction in normal mode is 'bl',
+so the scope of the function jump is +-128MB.
+
+Now we've added support for 'medium' mode, this mode is
+to complete the function jump through two instructions:
+	pcalau12i + jirl
+So in this mode the function jump range is increased to +-2GB.
+
+Compared with 'normal' mode, 'medium' mode only affects the
+jump range of functions.
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/loongarch-strings: Support code model medium.
+	* config/loongarch/genopts/loongarch.opt.in: Likewise.
+	* config/loongarch/loongarch-def.c: Likewise.
+	* config/loongarch/loongarch-def.h (CMODEL_LARGE): Likewise.
+	(CMODEL_EXTREME): Likewise.
+	(N_CMODEL_TYPES): Likewise.
+	(CMODEL_MEDIUM): Likewise.
+	* config/loongarch/loongarch-opts.cc: Likewise.
+	* config/loongarch/loongarch-opts.h (TARGET_CMODEL_MEDIUM): Likewise.
+	* config/loongarch/loongarch-str.h (STR_CMODEL_MEDIUM): Likewise.
+	* config/loongarch/loongarch.cc (loongarch_call_tls_get_addr):
+	Tls symbol Loading support medium mode.
+	(loongarch_legitimize_call_address): When medium mode, make a symbolic
+	jump with two instructions.
+	(loongarch_option_override_internal): Support medium.
+	* config/loongarch/loongarch.md (@pcalau12i): New template.
+	(@sibcall_internal_1): New function call templates added to support
+	medium mode.
+	(@sibcall_value_internal_1): Likewise.
+	(@sibcall_value_multiple_internal_1): Likewise.
+	(@call_internal_1): Likewise.
+	(@call_value_internal_1): Likewise.
+	(@call_value_multiple_internal_1): Likewise.
+	* config/loongarch/loongarch.opt: Support medium.
+	* config/loongarch/predicates.md: Add processing about medium mode.
+	* doc/invoke.texi: Document for '-mcmodel=medium'.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/func-call-medium-1.c: New test.
+	* gcc.target/loongarch/func-call-medium-2.c: New test.
+	* gcc.target/loongarch/func-call-medium-3.c: New test.
+	* gcc.target/loongarch/func-call-medium-4.c: New test.
+	* gcc.target/loongarch/func-call-medium-5.c: New test.
+	* gcc.target/loongarch/func-call-medium-6.c: New test.
+	* gcc.target/loongarch/func-call-medium-7.c: New test.
+	* gcc.target/loongarch/func-call-medium-8.c: New test.
+	* gcc.target/loongarch/tls-gd-noplt.c: Add compile parameter '-mexplicit-relocs'.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/genopts/loongarch-strings       |   1 +
+ gcc/config/loongarch/genopts/loongarch.opt.in |   3 +
+ gcc/config/loongarch/loongarch-def.c          |   1 +
+ gcc/config/loongarch/loongarch-def.h          |   7 +-
+ gcc/config/loongarch/loongarch-opts.cc        |  15 ++-
+ gcc/config/loongarch/loongarch-opts.h         |   1 +
+ gcc/config/loongarch/loongarch-str.h          |   1 +
+ gcc/config/loongarch/loongarch.cc             | 123 +++++++++++++----
+ gcc/config/loongarch/loongarch.md             | 125 +++++++++++++++++-
+ gcc/config/loongarch/loongarch.opt            |   3 +
+ gcc/config/loongarch/predicates.md            |  15 ++-
+ gcc/doc/invoke.texi                           |   3 +
+ .../gcc.target/loongarch/func-call-medium-1.c |  41 ++++++
+ .../gcc.target/loongarch/func-call-medium-2.c |  41 ++++++
+ .../gcc.target/loongarch/func-call-medium-3.c |  41 ++++++
+ .../gcc.target/loongarch/func-call-medium-4.c |  41 ++++++
+ .../gcc.target/loongarch/func-call-medium-5.c |  42 ++++++
+ .../gcc.target/loongarch/func-call-medium-6.c |  42 ++++++
+ .../gcc.target/loongarch/func-call-medium-7.c |  43 ++++++
+ .../gcc.target/loongarch/func-call-medium-8.c |  42 ++++++
+ .../gcc.target/loongarch/tls-gd-noplt.c       |   4 +-
+ 21 files changed, 595 insertions(+), 40 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c
+
+diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings
+index cb88ed56b..44ebb7ab1 100644
+--- a/gcc/config/loongarch/genopts/loongarch-strings
++++ b/gcc/config/loongarch/genopts/loongarch-strings
+@@ -54,5 +54,6 @@ OPTSTR_CMODEL	      cmodel
+ STR_CMODEL_NORMAL     normal
+ STR_CMODEL_TINY	      tiny
+ STR_CMODEL_TS	      tiny-static
++STR_CMODEL_MEDIUM     medium
+ STR_CMODEL_LARGE      large
+ STR_CMODEL_EXTREME    extreme
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index a571b6b75..ebdd9538d 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -172,6 +172,9 @@ Enum(cmodel) String(@@STR_CMODEL_TINY@@) Value(CMODEL_TINY)
+ EnumValue
+ Enum(cmodel) String(@@STR_CMODEL_TS@@) Value(CMODEL_TINY_STATIC)
+ 
++EnumValue
++Enum(cmodel) String(@@STR_CMODEL_MEDIUM@@) Value(CMODEL_MEDIUM)
++
+ EnumValue
+ Enum(cmodel) String(@@STR_CMODEL_LARGE@@) Value(CMODEL_LARGE)
+ 
+diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c
+index c8769b7d6..cbf995d81 100644
+--- a/gcc/config/loongarch/loongarch-def.c
++++ b/gcc/config/loongarch/loongarch-def.c
+@@ -152,6 +152,7 @@ loongarch_cmodel_strings[] = {
+   [CMODEL_NORMAL]	  = STR_CMODEL_NORMAL,
+   [CMODEL_TINY]		  = STR_CMODEL_TINY,
+   [CMODEL_TINY_STATIC]	  = STR_CMODEL_TS,
++  [CMODEL_MEDIUM]	  = STR_CMODEL_MEDIUM,
+   [CMODEL_LARGE]	  = STR_CMODEL_LARGE,
+   [CMODEL_EXTREME]	  = STR_CMODEL_EXTREME,
+ };
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index c2c35b6ba..b5985f070 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -82,9 +82,10 @@ extern const char* loongarch_cmodel_strings[];
+ #define CMODEL_NORMAL	      0
+ #define CMODEL_TINY	      1
+ #define CMODEL_TINY_STATIC    2
+-#define CMODEL_LARGE	      3
+-#define CMODEL_EXTREME	      4
+-#define N_CMODEL_TYPES	      5
++#define CMODEL_MEDIUM	      3
++#define CMODEL_LARGE	      4
++#define CMODEL_EXTREME	      5
++#define N_CMODEL_TYPES	      6
+ 
+ /* enum switches */
+ /* The "SW_" codes represent command-line switches (options that
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index 2ae89f234..e13eafb58 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -376,11 +376,24 @@ fallback:
+ 
+   /* 5.  Target code model */
+   t.cmodel = constrained.cmodel ? opt_cmodel : CMODEL_NORMAL;
+-  if (t.cmodel != CMODEL_NORMAL && t.cmodel != CMODEL_EXTREME)
++
++  switch (t.cmodel)
+     {
++    case CMODEL_TINY:
++    case CMODEL_TINY_STATIC:
++    case CMODEL_LARGE:
+       warning (0, "%qs is not supported, now cmodel is set to %qs",
+ 	       loongarch_cmodel_strings[t.cmodel], "normal");
+       t.cmodel = CMODEL_NORMAL;
++      break;
++
++    case CMODEL_NORMAL:
++    case CMODEL_MEDIUM:
++    case CMODEL_EXTREME:
++      break;
++
++    default:
++      gcc_unreachable ();
+     }
+ 
+   /* Cleanup and return.  */
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index da24ecd2b..3523a4cf7 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -46,6 +46,7 @@ loongarch_config_target (struct loongarch_target *target,
+ #define TARGET_CMODEL_NORMAL	    (la_target.cmodel == CMODEL_NORMAL)
+ #define TARGET_CMODEL_TINY	    (la_target.cmodel == CMODEL_TINY)
+ #define TARGET_CMODEL_TINY_STATIC   (la_target.cmodel == CMODEL_TINY_STATIC)
++#define TARGET_CMODEL_MEDIUM	    (la_target.cmodel == CMODEL_MEDIUM)
+ #define TARGET_CMODEL_LARGE	    (la_target.cmodel == CMODEL_LARGE)
+ #define TARGET_CMODEL_EXTREME	    (la_target.cmodel == CMODEL_EXTREME)
+ 
+diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h
+index 0e8889b8c..9f1b0989c 100644
+--- a/gcc/config/loongarch/loongarch-str.h
++++ b/gcc/config/loongarch/loongarch-str.h
+@@ -53,6 +53,7 @@ along with GCC; see the file COPYING3.  If not see
+ #define STR_CMODEL_NORMAL "normal"
+ #define STR_CMODEL_TINY "tiny"
+ #define STR_CMODEL_TS "tiny-static"
++#define STR_CMODEL_MEDIUM "medium"
+ #define STR_CMODEL_LARGE "large"
+ #define STR_CMODEL_EXTREME "extreme"
+ 
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 1a33f668f..04c4ddaed 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2461,44 +2461,96 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+     }
+ 
+   if (flag_plt)
+-    insn = emit_call_insn (gen_call_value_internal (v0,
+-						    loongarch_tls_symbol,
+-						    const0_rtx));
+-  else
+     {
+-      rtx dest = gen_reg_rtx (Pmode);
+-
+-      if (TARGET_CMODEL_EXTREME)
++      switch (la_opt_cmodel)
+ 	{
+-	  gcc_assert (TARGET_EXPLICIT_RELOCS);
++	case CMODEL_NORMAL:
++	  insn = emit_call_insn (gen_call_value_internal (v0,
++							  loongarch_tls_symbol,
++							  const0_rtx));
++	  break;
+ 
+-	  rtx tmp1 = gen_reg_rtx (Pmode);
+-	  rtx high = gen_reg_rtx (Pmode);
++	case CMODEL_MEDIUM:
++	    {
++	      rtx reg = gen_reg_rtx (Pmode);
++	      if (TARGET_EXPLICIT_RELOCS)
++		{
++		  emit_insn (gen_pcalau12i (Pmode, reg, loongarch_tls_symbol));
++		  rtx call = gen_call_value_internal_1 (Pmode, v0, reg,
++							loongarch_tls_symbol,
++							const0_rtx);
++		  insn = emit_call_insn (call);
++		}
++	      else
++		{
++		  emit_move_insn (reg, loongarch_tls_symbol);
++		  insn = emit_call_insn (gen_call_value_internal (v0,
++								  reg,
++								  const0_rtx));
++		}
++	      break;
++	    }
+ 
+-	  loongarch_emit_move (high,
+-			       gen_rtx_HIGH (Pmode, loongarch_tls_symbol));
+-	  loongarch_emit_move (tmp1, gen_rtx_LO_SUM (Pmode,
+-						     gen_rtx_REG (Pmode, 0),
+-						     loongarch_tls_symbol));
+-	  emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loongarch_tls_symbol));
+-	  emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loongarch_tls_symbol));
+-	  loongarch_emit_move (dest,
+-			       gen_rtx_MEM (Pmode,
+-					    gen_rtx_PLUS (Pmode, high, tmp1)));
++	/* code model extreme not support plt.  */
++	case CMODEL_EXTREME:
++	case CMODEL_LARGE:
++	case CMODEL_TINY:
++	case CMODEL_TINY_STATIC:
++	default:
++	  gcc_unreachable ();
+ 	}
+-      else
++    }
++  else
++    {
++      rtx dest = gen_reg_rtx (Pmode);
++
++      switch (la_opt_cmodel)
+ 	{
+-	  if (TARGET_EXPLICIT_RELOCS)
++	case CMODEL_NORMAL:
++	case CMODEL_MEDIUM:
++	    {
++	      if (TARGET_EXPLICIT_RELOCS)
++		{
++		  rtx high = gen_reg_rtx (Pmode);
++		  loongarch_emit_move (high,
++				       gen_rtx_HIGH (Pmode,
++						     loongarch_tls_symbol));
++		  emit_insn (gen_ld_from_got (Pmode, dest, high,
++					      loongarch_tls_symbol));
++		}
++	      else
++		loongarch_emit_move (dest, loongarch_tls_symbol);
++	      break;
++	    }
++
++	case CMODEL_EXTREME:
+ 	    {
++	      gcc_assert (TARGET_EXPLICIT_RELOCS);
++
++	      rtx tmp1 = gen_reg_rtx (Pmode);
+ 	      rtx high = gen_reg_rtx (Pmode);
++
+ 	      loongarch_emit_move (high,
+ 				   gen_rtx_HIGH (Pmode, loongarch_tls_symbol));
+-	      emit_insn (gen_ld_from_got (Pmode, dest, high,
+-					  loongarch_tls_symbol));
++	      loongarch_emit_move (tmp1, gen_rtx_LO_SUM (Pmode,
++							 gen_rtx_REG (Pmode, 0),
++							 loongarch_tls_symbol));
++	      emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loongarch_tls_symbol));
++	      emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loongarch_tls_symbol));
++	      loongarch_emit_move (dest,
++				   gen_rtx_MEM (Pmode,
++						gen_rtx_PLUS (Pmode,
++							      high, tmp1)));
+ 	    }
+-	  else
+-	    loongarch_emit_move (dest, loongarch_tls_symbol);
++	  break;
++
++	case CMODEL_LARGE:
++	case CMODEL_TINY:
++	case CMODEL_TINY_STATIC:
++	default:
++	  gcc_unreachable ();
+ 	}
++
+       insn = emit_call_insn (gen_call_value_internal (v0, dest, const0_rtx));
+     }
+ 
+@@ -2618,6 +2670,24 @@ loongarch_legitimize_call_address (rtx addr)
+       loongarch_emit_move (reg, addr);
+       return reg;
+     }
++
++  enum loongarch_symbol_type symbol_type = loongarch_classify_symbol (addr);
++
++  /* Split function call insn 'bl sym' or 'bl %plt(sym)' to :
++     pcalau12i $rd, %pc_hi20(sym)
++     jr $rd, %pc_lo12(sym).  */
++
++  if (TARGET_CMODEL_MEDIUM
++      && TARGET_EXPLICIT_RELOCS
++      && (SYMBOL_REF_P (addr) || LABEL_REF_P (addr))
++      && (symbol_type == SYMBOL_PCREL
++	  || (symbol_type == SYMBOL_GOT_DISP && flag_plt)))
++    {
++      rtx reg = gen_reg_rtx (Pmode);
++      emit_insn (gen_pcalau12i (Pmode, reg, addr));
++      return gen_rtx_LO_SUM (Pmode, reg, addr);
++    }
++
+   return addr;
+ }
+ 
+@@ -5996,6 +6066,7 @@ loongarch_option_override_internal (struct gcc_options *opts)
+ 	break;
+ 
+       case CMODEL_TINY_STATIC:
++      case CMODEL_MEDIUM:
+       case CMODEL_NORMAL:
+       case CMODEL_TINY:
+       case CMODEL_LARGE:
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 8fc10444c..3787fd823 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -59,11 +59,15 @@
+   UNSPEC_CRCC
+ 
+   UNSPEC_LOAD_FROM_GOT
++  UNSPEC_PCALAU12I
+   UNSPEC_ORI_L_LO12
+   UNSPEC_LUI_L_HI20
+   UNSPEC_LUI_H_LO20
+   UNSPEC_LUI_H_HI12
+   UNSPEC_TLS_LOW
++
++  UNSPEC_SIBCALL_VALUE_MULTIPLE_INTERNAL_1
++  UNSPEC_CALL_VALUE_MULTIPLE_INTERNAL_1
+ ])
+ 
+ (define_c_enum "unspecv" [
+@@ -1946,6 +1950,14 @@
+   [(set_attr "type" "move")]
+ )
+ 
++(define_insn "@pcalau12i"
++  [(set (match_operand:P 0 "register_operand" "=j")
++	(unspec:P [(match_operand:P 1 "symbolic_operand" "")]
++	UNSPEC_PCALAU12I))]
++  ""
++  "pcalau12i\t%0,%%pc_hi20(%1)"
++  [(set_attr "type" "move")])
++
+ (define_insn "@ori_l_lo12"
+   [(set (match_operand:P 0 "register_operand" "=r")
+ 	(unspec:P [(match_operand:P 1 "register_operand" "r")
+@@ -2877,7 +2889,12 @@
+ {
+   rtx target = loongarch_legitimize_call_address (XEXP (operands[0], 0));
+ 
+-  emit_call_insn (gen_sibcall_internal (target, operands[1]));
++  if (GET_CODE (target) == LO_SUM)
++    emit_call_insn (gen_sibcall_internal_1 (Pmode, XEXP (target, 0),
++					    XEXP (target, 1),
++					    operands[1]));
++  else
++    emit_call_insn (gen_sibcall_internal (target, operands[1]));
+   DONE;
+ })
+ 
+@@ -2891,6 +2908,14 @@
+    b\t%%plt(%0)"
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
++(define_insn "@sibcall_internal_1"
++  [(call (mem:P (lo_sum:P (match_operand:P 0 "register_operand" "j")
++			  (match_operand:P 1 "symbolic_operand" "")))
++	 (match_operand 2 "" ""))]
++  "SIBLING_CALL_P (insn) && TARGET_CMODEL_MEDIUM"
++  "jirl\t$r0,%0,%%pc_lo12(%1)"
++  [(set_attr "jirl" "indirect")])
++
+ (define_expand "sibcall_value"
+   [(parallel [(set (match_operand 0 "")
+ 		   (call (match_operand 1 "")
+@@ -2906,7 +2931,14 @@
+       rtx arg1 = XEXP (XVECEXP (operands[0],0, 0), 0);
+       rtx arg2 = XEXP (XVECEXP (operands[0],0, 1), 0);
+ 
+-      emit_call_insn (gen_sibcall_value_multiple_internal (arg1, target,
++      if (GET_CODE (target) == LO_SUM)
++	emit_call_insn (gen_sibcall_value_multiple_internal_1 (Pmode, arg1,
++							   XEXP (target, 0),
++							   XEXP (target, 1),
++							   operands[2],
++							   arg2));
++      else
++	emit_call_insn (gen_sibcall_value_multiple_internal (arg1, target,
+ 							   operands[2],
+ 							   arg2));
+     }
+@@ -2916,7 +2948,13 @@
+       if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1)
+ 	operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0);
+ 
+-      emit_call_insn (gen_sibcall_value_internal (operands[0], target,
++      if (GET_CODE (target) == LO_SUM)
++	emit_call_insn (gen_sibcall_value_internal_1 (Pmode, operands[0],
++						  XEXP (target, 0),
++						  XEXP (target, 1),
++						  operands[2]));
++      else
++	emit_call_insn (gen_sibcall_value_internal (operands[0], target,
+ 						  operands[2]));
+     }
+   DONE;
+@@ -2933,6 +2971,15 @@
+    b\t%%plt(%1)"
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
++(define_insn "@sibcall_value_internal_1"
++  [(set (match_operand 0 "register_operand" "")
++	(call (mem:P (lo_sum:P (match_operand:P 1 "register_operand" "j")
++			       (match_operand:P 2 "symbolic_operand" "")))
++	      (match_operand 3 "" "")))]
++  "SIBLING_CALL_P (insn) && TARGET_CMODEL_MEDIUM"
++  "jirl\t$r0,%1,%%pc_lo12(%2)"
++  [(set_attr "jirl" "indirect")])
++
+ (define_insn "sibcall_value_multiple_internal"
+   [(set (match_operand 0 "register_operand" "")
+ 	(call (mem:SI (match_operand 1 "call_insn_operand" "j,c,b"))
+@@ -2947,6 +2994,21 @@
+    b\t%%plt(%1)"
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
++(define_insn "@sibcall_value_multiple_internal_1"
++  [(set (match_operand 0 "register_operand" "")
++	(call (mem:P (unspec:P [(match_operand:P 1 "register_operand" "j")
++			        (match_operand:P 2 "symbolic_operand" "")]
++		      UNSPEC_SIBCALL_VALUE_MULTIPLE_INTERNAL_1))
++	      (match_operand 3 "" "")))
++   (set (match_operand 4 "register_operand" "")
++	(call (mem:P (unspec:P [(match_dup 1)
++			        (match_dup 2)]
++		      UNSPEC_SIBCALL_VALUE_MULTIPLE_INTERNAL_1))
++	      (match_dup 3)))]
++  "SIBLING_CALL_P (insn) && TARGET_CMODEL_MEDIUM"
++  "jirl\t$r0,%1,%%pc_lo12(%2)"
++  [(set_attr "jirl" "indirect")])
++
+ (define_expand "call"
+   [(parallel [(call (match_operand 0 "")
+ 		    (match_operand 1 ""))
+@@ -2956,7 +3018,11 @@
+ {
+   rtx target = loongarch_legitimize_call_address (XEXP (operands[0], 0));
+ 
+-  emit_call_insn (gen_call_internal (target, operands[1]));
++  if (GET_CODE (target) == LO_SUM)
++    emit_call_insn (gen_call_internal_1 (Pmode, XEXP (target, 0),
++					 XEXP (target, 1), operands[1]));
++  else
++    emit_call_insn (gen_call_internal (target, operands[1]));
+   DONE;
+ })
+ 
+@@ -2971,6 +3037,15 @@
+    bl\t%%plt(%0)"
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
++(define_insn "@call_internal_1"
++  [(call (mem:P (lo_sum:P (match_operand:P 0 "register_operand" "j")
++			  (match_operand:P 1 "symbolic_operand" "")))
++	 (match_operand 2 "" ""))
++   (clobber (reg:SI RETURN_ADDR_REGNUM))]
++  "TARGET_CMODEL_MEDIUM"
++  "jirl\t$r1,%0,%%pc_lo12(%1)"
++  [(set_attr "jirl" "indirect")])
++
+ (define_expand "call_value"
+   [(parallel [(set (match_operand 0 "")
+ 		   (call (match_operand 1 "")
+@@ -2985,7 +3060,13 @@
+       rtx arg1 = XEXP (XVECEXP (operands[0], 0, 0), 0);
+       rtx arg2 = XEXP (XVECEXP (operands[0], 0, 1), 0);
+ 
+-      emit_call_insn (gen_call_value_multiple_internal (arg1, target,
++      if (GET_CODE (target) == LO_SUM)
++	emit_call_insn (gen_call_value_multiple_internal_1 (Pmode, arg1,
++							    XEXP (target, 0),
++							    XEXP (target, 1),
++							    operands[2], arg2));
++      else
++	emit_call_insn (gen_call_value_multiple_internal (arg1, target,
+ 							operands[2], arg2));
+     }
+    else
+@@ -2994,7 +3075,13 @@
+       if (GET_CODE (operands[0]) == PARALLEL && XVECLEN (operands[0], 0) == 1)
+ 	    operands[0] = XEXP (XVECEXP (operands[0], 0, 0), 0);
+ 
+-      emit_call_insn (gen_call_value_internal (operands[0], target,
++      if (GET_CODE (target) == LO_SUM)
++	emit_call_insn (gen_call_value_internal_1 (Pmode, operands[0],
++						   XEXP (target, 0),
++						   XEXP (target, 1),
++						   operands[2]));
++      else
++	emit_call_insn (gen_call_value_internal (operands[0], target,
+ 					       operands[2]));
+     }
+   DONE;
+@@ -3012,6 +3099,16 @@
+    bl\t%%plt(%1)"
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
++(define_insn "@call_value_internal_1"
++  [(set (match_operand 0 "register_operand" "")
++	(call (mem:P (lo_sum:P (match_operand:P 1 "register_operand" "j")
++			       (match_operand:P 2 "symbolic_operand" "")))
++	      (match_operand 3 "" "")))
++   (clobber (reg:SI RETURN_ADDR_REGNUM))]
++  "TARGET_CMODEL_MEDIUM"
++  "jirl\t$r1,%1,%%pc_lo12(%2)"
++  [(set_attr "jirl" "indirect")])
++
+ (define_insn "call_value_multiple_internal"
+   [(set (match_operand 0 "register_operand" "")
+ 	(call (mem:SI (match_operand 1 "call_insn_operand" "e,c,b"))
+@@ -3027,6 +3124,22 @@
+    bl\t%%plt(%1)"
+   [(set_attr "jirl" "indirect,direct,direct")])
+ 
++(define_insn "@call_value_multiple_internal_1"
++  [(set (match_operand 0 "register_operand" "")
++	(call (mem:P (unspec:P [(match_operand:P 1 "register_operand" "j")
++			        (match_operand:P 2 "symbolic_operand" "")]
++		      UNSPEC_CALL_VALUE_MULTIPLE_INTERNAL_1))
++	      (match_operand 3 "" "")))
++   (set (match_operand 4 "register_operand" "")
++	(call (mem:P (unspec:P [(match_dup 1)
++			        (match_dup 2)]
++		      UNSPEC_CALL_VALUE_MULTIPLE_INTERNAL_1))
++	      (match_dup 3)))
++   (clobber (reg:SI RETURN_ADDR_REGNUM))]
++  "TARGET_CMODEL_MEDIUM"
++  "jirl\t$r1,%1,%%pc_lo12(%2)"
++  [(set_attr "jirl" "indirect")])
++
+ 
+ ;; Call subroutine returning any type.
+ (define_expand "untyped_call"
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 9df7e1872..639523421 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -179,6 +179,9 @@ Enum(cmodel) String(tiny) Value(CMODEL_TINY)
+ EnumValue
+ Enum(cmodel) String(tiny-static) Value(CMODEL_TINY_STATIC)
+ 
++EnumValue
++Enum(cmodel) String(medium) Value(CMODEL_MEDIUM)
++
+ EnumValue
+ Enum(cmodel) String(large) Value(CMODEL_LARGE)
+ 
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index e38c6fbdd..8bd0c1376 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -123,16 +123,27 @@
+   if (offset != const0_rtx)
+     return false;
+ 
++  /* When compiling with '-mcmodel=medium -mexplicit-relocs'
++     symbols are splited in loongarch_legitimize_call_address.
++
++     When compiling with '-mcmodel=medium -mno-explicit-relocs',
++     first obtain the symbolic address or the address of the
++     plt entry, and then perform an indirect jump, so return false.  */
++
+   switch (symbol_type)
+     {
+     case SYMBOL_PCREL:
+-      if (TARGET_CMODEL_EXTREME)
++      if (TARGET_CMODEL_EXTREME
++	  || (TARGET_CMODEL_MEDIUM && !TARGET_EXPLICIT_RELOCS))
+ 	return false;
+       else
+ 	return 1;
+ 
+     case SYMBOL_GOT_DISP:
+-      if (TARGET_CMODEL_EXTREME || !flag_plt)
++      if (TARGET_CMODEL_EXTREME
++	  || !flag_plt
++	  || (flag_plt && TARGET_CMODEL_MEDIUM
++	      && !TARGET_EXPLICIT_RELOCS))
+ 	return false;
+       else
+ 	return 1;
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index c4f83e62a..2a5592516 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -24625,6 +24625,9 @@ Set the code model to one of:
+ The text segment must be within 128MB addressing space.  The data segment must
+ be within 2GB addressing space.
+ 
++@item medium
++The text segment and data segment must be within 2GB addressing space.
++
+ @item large (Not implemented yet)
+ 
+ @item extreme
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c
+new file mode 100644
+index 000000000..276d73e5e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-1.c
+@@ -0,0 +1,41 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mno-explicit-relocs -mcmodel=medium" } */
++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
++/* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */
++/* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */
++/* { dg-final { scan-assembler "test3:.*la\.global\t.*\_\_tls\_get\_addr" } } */
++
++extern void g (void);
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
++
++__attribute__ ((tls_model ("global-dynamic"))) __thread int a;
++
++void
++test3 (void)
++{
++  a = 10;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c
+new file mode 100644
+index 000000000..237821c06
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-2.c
+@@ -0,0 +1,41 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mno-explicit-relocs -mcmodel=medium" } */
++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
++/* { dg-final { scan-assembler "test1:.*la\.local\t.*f\n\tjirl" } } */
++/* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */
++/* { dg-final { scan-assembler "test3:.*la\.global\t.*\_\_tls\_get\_addr" } } */
++
++extern void g (void);
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
++
++__attribute__ ((tls_model ("global-dynamic"))) __thread int a;
++
++void
++test3 (void)
++{
++  a = 10;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c
+new file mode 100644
+index 000000000..9a6e16103
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-3.c
+@@ -0,0 +1,41 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mno-explicit-relocs -mcmodel=medium" } */
++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
++/* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */
++/* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */
++/* { dg-final { scan-assembler "test3:.*la\.global\t.*\_\_tls\_get\_addr" } } */
++
++extern void g (void);
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
++
++__attribute__ ((tls_model ("global-dynamic"))) __thread int a;
++
++void
++test3 (void)
++{
++  a = 10;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c
+new file mode 100644
+index 000000000..2577e3452
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-4.c
+@@ -0,0 +1,41 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mno-explicit-relocs -mcmodel=medium" } */
++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
++/* { dg-final { scan-assembler "test1:.*la\.local\t.*f\n\tjirl" } } */
++/* { dg-final { scan-assembler "test2:.*la\.local\t.*l\n\tjirl" } } */
++/* { dg-final { scan-assembler "test3:.*la\.global\t.*\_\_tls\_get\_addr" } } */
++
++extern void g (void);
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
++
++__attribute__ ((tls_model ("global-dynamic"))) __thread int a;
++
++void
++test3 (void)
++{
++  a = 10;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c
+new file mode 100644
+index 000000000..d70b6ea46
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-5.c
+@@ -0,0 +1,42 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mexplicit-relocs -mcmodel=medium" } */
++/* { dg-final { scan-assembler "test:.*pcalau12i.*%pc_hi20\\(g\\)\n\tjirl.*pc_lo12\\(g\\)" } } */
++/* { dg-final { scan-assembler "test1:.*pcalau12i.*%pc_hi20\\(f\\)\n\tjirl.*%pc_lo12\\(f\\)" } } */
++/* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20\\(l\\)\n\tjirl.*%pc_lo12\\(l\\)" } } */
++/* { dg-final { scan-assembler "test3:.*pcalau12i.*%pc_hi20\\(__tls_get_addr\\)\n\t.*\n\tjirl.*%pc_lo12\\(__tls_get_addr\\)" } } */
++
++extern void g (void);
++
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
++
++__attribute__ ((tls_model ("global-dynamic"))) __thread int a;
++
++void
++test3 (void)
++{
++  a = 10;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c
+new file mode 100644
+index 000000000..f963a9944
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-6.c
+@@ -0,0 +1,42 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mexplicit-relocs -mcmodel=medium" } */
++/* { dg-final { scan-assembler "test:.*pcalau12i.*%pc_hi20\\(g\\)\n\tjirl.*pc_lo12\\(g\\)" } } */
++/* { dg-final { scan-assembler "test1:.*pcalau12i.*%pc_hi20\\(f\\)\n\tjirl.*%pc_lo12\\(f\\)" } } */
++/* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20\\(l\\)\n\tjirl.*%pc_lo12\\(l\\)" } } */
++/* { dg-final { scan-assembler "test3:.*pcalau12i.*%pc_hi20\\(__tls_get_addr\\)\n\t.*\n\tjirl.*%pc_lo12\\(__tls_get_addr\\)" } } */
++
++extern void g (void);
++
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
++
++__attribute__ ((tls_model ("global-dynamic"))) __thread int a;
++
++void
++test3 (void)
++{
++  a = 10;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c
+new file mode 100644
+index 000000000..f2818b2da
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-7.c
+@@ -0,0 +1,43 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs -mcmodel=medium" } */
++/* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */
++/* { dg-final { scan-assembler "test1:.*pcalau12i\t.*%got_pc_hi20\\(f\\)\n\tld\.d\t.*%got_pc_lo12\\(f\\)\n\tjirl" } } */
++/* { dg-final { scan-assembler "test2:.*pcalau12i\t.*%pc_hi20\\(l\\)\n\tjirl.*%pc_lo12\\(l\\)" } } */
++/* { dg-final { scan-assembler "test3:.*pcalau12i.*%got_pc_hi20\\(__tls_get_addr\\)\n\tld\.d.*%got_pc_lo12\\(__tls_get_addr\\)" } } */
++
++
++extern void g (void);
++
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
++
++__attribute__ ((tls_model ("global-dynamic"))) __thread int a;
++
++void
++test3 (void)
++{
++  a = 10;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c b/gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c
+new file mode 100644
+index 000000000..7fa873d84
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-medium-8.c
+@@ -0,0 +1,42 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs -mcmodel=medium" } */
++/* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */
++/* { dg-final { scan-assembler "test1:.*pcalau12i\t.*%pc_hi20\\(f\\)\n\tjirl.*%pc_lo12\\(f\\)" } } */
++/* { dg-final { scan-assembler "test2:.*pcalau12i\t.*%pc_hi20\\(l\\)\n\tjirl.*%pc_lo12\\(l\\)" } } */
++/* { dg-final { scan-assembler "test3:.*pcalau12i.*%got_pc_hi20\\(__tls_get_addr\\)\n\tld\.d.*%got_pc_lo12\\(__tls_get_addr\\)" } } */
++
++extern void g (void);
++
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
++
++__attribute__ ((tls_model ("global-dynamic"))) __thread int a;
++
++void
++test3 (void)
++{
++  a = 10;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c b/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c
+index 32a0acf9b..375663286 100644
+--- a/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c
++++ b/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c
+@@ -1,6 +1,6 @@
+ /* { dg-do compile } */
+-/* { dg-options "-O2 -fno-plt -mcmodel=normal" } */
+-/* { dg-final { scan-assembler "pcalau12i\t.*%got_pc_hi20\\(__tls_get_addr\\)" } } */
++/* { dg-options "-O0 -fno-plt -mcmodel=normal -mexplicit-relocs" } */
++/* { dg-final { scan-assembler "pcalau12i\t.*%got_pc_hi20\\(__tls_get_addr\\)\n\tld\.d.*%got_pc_lo12\\(__tls_get_addr\\)" } } */
+ 
+ __attribute__ ((tls_model ("global-dynamic"))) __thread int a;
+ 
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-prefetch-instructions.patch b/LoongArch-Add-prefetch-instructions.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a66d87cec5921ed05047d79ce5b4776b010fc473
--- /dev/null
+++ b/LoongArch-Add-prefetch-instructions.patch
@@ -0,0 +1,158 @@
+From 52a41006c2e8141a42de93ffcc2c040e034244b2 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Wed, 16 Nov 2022 09:25:14 +0800
+Subject: [PATCH 031/124] LoongArch: Add prefetch instructions.
+
+Enable sw prefetching at -O3 and higher.
+
+Co-Authored-By: xujiahao 
+
+gcc/ChangeLog:
+
+	* config/loongarch/constraints.md (ZD): New constraint.
+	* config/loongarch/loongarch-def.c: Initial number of parallel prefetch.
+	* config/loongarch/loongarch-tune.h (struct loongarch_cache):
+	Define number of parallel prefetch.
+	* config/loongarch/loongarch.cc (loongarch_option_override_internal):
+	Set up parameters to be used in prefetching algorithm.
+	* config/loongarch/loongarch.md (prefetch): New template.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/constraints.md   | 10 ++++++++++
+ gcc/config/loongarch/loongarch-def.c  |  2 ++
+ gcc/config/loongarch/loongarch-tune.h |  1 +
+ gcc/config/loongarch/loongarch.cc     | 28 +++++++++++++++++++++++++++
+ gcc/config/loongarch/loongarch.md     | 14 ++++++++++++++
+ 5 files changed, 55 insertions(+)
+
+diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md
+index 43cb7b5f0..46f7f63ae 100644
+--- a/gcc/config/loongarch/constraints.md
++++ b/gcc/config/loongarch/constraints.md
+@@ -86,6 +86,10 @@
+ ;;    "ZB"
+ ;;      "An address that is held in a general-purpose register.
+ ;;      The offset is zero"
++;;    "ZD"
++;;	"An address operand whose address is formed by a base register
++;;	 and offset that is suitable for use in instructions with the same
++;;	 addressing mode as @code{preld}."
+ ;; "<" "Matches a pre-dec or post-dec operand." (Global non-architectural)
+ ;; ">" "Matches a pre-inc or post-inc operand." (Global non-architectural)
+ 
+@@ -190,3 +194,9 @@
+   The offset is zero"
+   (and (match_code "mem")
+        (match_test "REG_P (XEXP (op, 0))")))
++
++(define_address_constraint "ZD"
++  "An address operand whose address is formed by a base register
++   and offset that is suitable for use in instructions with the same
++   addressing mode as @code{preld}."
++   (match_test "loongarch_12bit_offset_address_p (op, mode)"))
+diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c
+index cbf995d81..80ab10a52 100644
+--- a/gcc/config/loongarch/loongarch-def.c
++++ b/gcc/config/loongarch/loongarch-def.c
+@@ -62,11 +62,13 @@ loongarch_cpu_cache[N_TUNE_TYPES] = {
+       .l1d_line_size = 64,
+       .l1d_size = 64,
+       .l2d_size = 256,
++      .simultaneous_prefetches = 4,
+   },
+   [CPU_LA464] = {
+       .l1d_line_size = 64,
+       .l1d_size = 64,
+       .l2d_size = 256,
++      .simultaneous_prefetches = 4,
+   },
+ };
+ 
+diff --git a/gcc/config/loongarch/loongarch-tune.h b/gcc/config/loongarch/loongarch-tune.h
+index 6f3530f5c..8e3eb2947 100644
+--- a/gcc/config/loongarch/loongarch-tune.h
++++ b/gcc/config/loongarch/loongarch-tune.h
+@@ -45,6 +45,7 @@ struct loongarch_cache {
+     int l1d_line_size;  /* bytes */
+     int l1d_size;       /* KiB */
+     int l2d_size;       /* kiB */
++    int simultaneous_prefetches; /* number of parallel prefetch */
+ };
+ 
+ #endif /* LOONGARCH_TUNE_H */
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index d552b162a..622c9435b 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -63,6 +63,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "context.h"
+ #include "builtins.h"
+ #include "rtl-iter.h"
++#include "opts.h"
+ 
+ /* This file should be included last.  */
+ #include "target-def.h"
+@@ -6099,6 +6100,33 @@ loongarch_option_override_internal (struct gcc_options *opts)
+   if (loongarch_branch_cost == 0)
+     loongarch_branch_cost = loongarch_cost->branch_cost;
+ 
++  /* Set up parameters to be used in prefetching algorithm.  */
++  int simultaneous_prefetches
++    = loongarch_cpu_cache[LARCH_ACTUAL_TUNE].simultaneous_prefetches;
++
++  SET_OPTION_IF_UNSET (opts, &global_options_set,
++		       param_simultaneous_prefetches,
++		       simultaneous_prefetches);
++
++  SET_OPTION_IF_UNSET (opts, &global_options_set,
++		       param_l1_cache_line_size,
++		       loongarch_cpu_cache[LARCH_ACTUAL_TUNE].l1d_line_size);
++
++  SET_OPTION_IF_UNSET (opts, &global_options_set,
++		       param_l1_cache_size,
++		       loongarch_cpu_cache[LARCH_ACTUAL_TUNE].l1d_size);
++
++  SET_OPTION_IF_UNSET (opts, &global_options_set,
++		       param_l2_cache_size,
++		       loongarch_cpu_cache[LARCH_ACTUAL_TUNE].l2d_size);
++
++
++  /* Enable sw prefetching at -O3 and higher.  */
++  if (opts->x_flag_prefetch_loop_arrays < 0
++      && (opts->x_optimize >= 3 || opts->x_flag_profile_use)
++      && !opts->x_optimize_size)
++    opts->x_flag_prefetch_loop_arrays = 1;
++
+   if (TARGET_DIRECT_EXTERN_ACCESS && flag_shlib)
+     error ("%qs cannot be used for compiling a shared library",
+ 	   "-mdirect-extern-access");
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 682ab9617..2fda53819 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -3282,6 +3282,20 @@
+ ;;  ....................
+ ;;
+ 
++(define_insn "prefetch"
++  [(prefetch (match_operand 0 "address_operand" "ZD")
++	     (match_operand 1 "const_int_operand" "n")
++	     (match_operand 2 "const_int_operand" "n"))]
++  ""
++{
++  switch (INTVAL (operands[1]))
++  {
++    case 0: return "preld\t0,%a0";
++    case 1: return "preld\t8,%a0";
++    default: gcc_unreachable ();
++  }
++})
++
+ (define_insn "nop"
+   [(const_int 0)]
+   ""
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-support-code-model-extreme.patch b/LoongArch-Add-support-code-model-extreme.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c444055581defdc1a2a2318980b3ea8bf1ef7ab6
--- /dev/null
+++ b/LoongArch-Add-support-code-model-extreme.patch
@@ -0,0 +1,794 @@
+From b1c92fb9dab678e4c9c23fa77185011494d145b9 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 18 Aug 2022 17:26:13 +0800
+Subject: [PATCH 011/124] LoongArch: Add support code model extreme.
+
+Use five instructions to calculate a signed 64-bit offset relative to the pc.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-opts.cc: Allow cmodel to be extreme.
+	* config/loongarch/loongarch.cc (loongarch_call_tls_get_addr):
+	Add extreme support for TLS GD and LD types.
+	(loongarch_legitimize_tls_address): Add extreme support for TLS LE
+	and IE.
+	(loongarch_split_symbol): When compiling with -mcmodel=extreme,
+	the symbol address will be obtained through five instructions.
+	(loongarch_print_operand_reloc): Add support.
+	(loongarch_print_operand): Add support.
+	(loongarch_print_operand_address): Add support.
+	(loongarch_option_override_internal): Set '-mcmodel=extreme' option
+	incompatible with '-mno-explicit-relocs'.
+	* config/loongarch/loongarch.md (@lui_l_hi20):
+	Loads bits 12-31 of data into registers.
+	(lui_h_lo20): Load bits 32-51 of the data and spell bits 0-31 of
+	the source register.
+	(lui_h_hi12): Load bits 52-63 of the data and spell bits 0-51 of
+	the source register.
+	* config/loongarch/predicates.md: Symbols need to be decomposed
+	when defining the macro TARGET_CMODEL_EXTREME
+	* doc/invoke.texi: Modify the description information of cmodel in the document.
+	Document -W[no-]extreme-plt.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/func-call-1.c: Add option '-mcmodel=normal'.
+	* gcc.target/loongarch/func-call-2.c: Likewise.
+	* gcc.target/loongarch/func-call-3.c: Likewise.
+	* gcc.target/loongarch/func-call-4.c: Likewise.
+	* gcc.target/loongarch/func-call-5.c: Likewise.
+	* gcc.target/loongarch/func-call-6.c: Likewise.
+	* gcc.target/loongarch/func-call-7.c: Likewise.
+	* gcc.target/loongarch/func-call-8.c: Likewise.
+	* gcc.target/loongarch/relocs-symbol-noaddend.c: Likewise.
+	* gcc.target/loongarch/func-call-extreme-1.c: New test.
+	* gcc.target/loongarch/func-call-extreme-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch-opts.cc        |   3 +-
+ gcc/config/loongarch/loongarch.cc             | 222 +++++++++++++++---
+ gcc/config/loongarch/loongarch.md             |  34 ++-
+ gcc/config/loongarch/predicates.md            |   9 +-
+ gcc/doc/invoke.texi                           |  50 +---
+ .../gcc.target/loongarch/func-call-1.c        |   2 +-
+ .../gcc.target/loongarch/func-call-2.c        |   2 +-
+ .../gcc.target/loongarch/func-call-3.c        |   2 +-
+ .../gcc.target/loongarch/func-call-4.c        |   2 +-
+ .../gcc.target/loongarch/func-call-5.c        |   2 +-
+ .../gcc.target/loongarch/func-call-6.c        |   2 +-
+ .../gcc.target/loongarch/func-call-7.c        |   2 +-
+ .../gcc.target/loongarch/func-call-8.c        |   2 +-
+ .../loongarch/func-call-extreme-1.c           |  32 +++
+ .../loongarch/func-call-extreme-2.c           |  32 +++
+ .../loongarch/relocs-symbol-noaddend.c        |   2 +-
+ 16 files changed, 318 insertions(+), 82 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c
+
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index 3f70943de..2ae89f234 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -376,14 +376,13 @@ fallback:
+ 
+   /* 5.  Target code model */
+   t.cmodel = constrained.cmodel ? opt_cmodel : CMODEL_NORMAL;
+-  if (t.cmodel != CMODEL_NORMAL)
++  if (t.cmodel != CMODEL_NORMAL && t.cmodel != CMODEL_EXTREME)
+     {
+       warning (0, "%qs is not supported, now cmodel is set to %qs",
+ 	       loongarch_cmodel_strings[t.cmodel], "normal");
+       t.cmodel = CMODEL_NORMAL;
+     }
+ 
+-
+   /* Cleanup and return.  */
+   obstack_free (&msg_obstack, NULL);
+   *target = t;
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 76bf55ea4..1a33f668f 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2436,7 +2436,19 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+       /* Split tls symbol to high and low.  */
+       rtx high = gen_rtx_HIGH (Pmode, copy_rtx (loc));
+       high = loongarch_force_temporary (tmp, high);
+-      emit_insn (gen_tls_low (Pmode, a0, high, loc));
++
++      if (TARGET_CMODEL_EXTREME)
++	{
++	  gcc_assert (TARGET_EXPLICIT_RELOCS);
++
++	  rtx tmp1 = gen_reg_rtx (Pmode);
++	  emit_insn (gen_tls_low (Pmode, tmp1, gen_rtx_REG (Pmode, 0), loc));
++	  emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loc));
++	  emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loc));
++	  emit_move_insn (a0, gen_rtx_PLUS (Pmode, high, tmp1));
++	}
++      else
++	emit_insn (gen_tls_low (Pmode, a0, high, loc));
+     }
+   else
+     {
+@@ -2449,14 +2461,44 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+     }
+ 
+   if (flag_plt)
+-    insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol,
++    insn = emit_call_insn (gen_call_value_internal (v0,
++						    loongarch_tls_symbol,
+ 						    const0_rtx));
+   else
+     {
+       rtx dest = gen_reg_rtx (Pmode);
+-      rtx high = gen_reg_rtx (Pmode);
+-      loongarch_emit_move (high, gen_rtx_HIGH (Pmode, loongarch_tls_symbol));
+-      emit_insn (gen_ld_from_got (Pmode, dest, high, loongarch_tls_symbol));
++
++      if (TARGET_CMODEL_EXTREME)
++	{
++	  gcc_assert (TARGET_EXPLICIT_RELOCS);
++
++	  rtx tmp1 = gen_reg_rtx (Pmode);
++	  rtx high = gen_reg_rtx (Pmode);
++
++	  loongarch_emit_move (high,
++			       gen_rtx_HIGH (Pmode, loongarch_tls_symbol));
++	  loongarch_emit_move (tmp1, gen_rtx_LO_SUM (Pmode,
++						     gen_rtx_REG (Pmode, 0),
++						     loongarch_tls_symbol));
++	  emit_insn (gen_lui_h_lo20 (tmp1, tmp1, loongarch_tls_symbol));
++	  emit_insn (gen_lui_h_hi12 (tmp1, tmp1, loongarch_tls_symbol));
++	  loongarch_emit_move (dest,
++			       gen_rtx_MEM (Pmode,
++					    gen_rtx_PLUS (Pmode, high, tmp1)));
++	}
++      else
++	{
++	  if (TARGET_EXPLICIT_RELOCS)
++	    {
++	      rtx high = gen_reg_rtx (Pmode);
++	      loongarch_emit_move (high,
++				   gen_rtx_HIGH (Pmode, loongarch_tls_symbol));
++	      emit_insn (gen_ld_from_got (Pmode, dest, high,
++					  loongarch_tls_symbol));
++	    }
++	  else
++	    loongarch_emit_move (dest, loongarch_tls_symbol);
++	}
+       insn = emit_call_insn (gen_call_value_internal (v0, dest, const0_rtx));
+     }
+ 
+@@ -2508,7 +2550,23 @@ loongarch_legitimize_tls_address (rtx loc)
+ 	      tmp3 = gen_reg_rtx (Pmode);
+ 	      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2));
+ 	      high = loongarch_force_temporary (tmp3, high);
+-	      emit_insn (gen_ld_from_got (Pmode, tmp1, high, tmp2));
++
++	      if (TARGET_CMODEL_EXTREME)
++		{
++		  gcc_assert (TARGET_EXPLICIT_RELOCS);
++
++		  rtx tmp3 = gen_reg_rtx (Pmode);
++		  emit_insn (gen_tls_low (Pmode, tmp3,
++					  gen_rtx_REG (Pmode, 0), tmp2));
++		  emit_insn (gen_lui_h_lo20 (tmp3, tmp3, tmp2));
++		  emit_insn (gen_lui_h_hi12 (tmp3, tmp3, tmp2));
++		  emit_move_insn (tmp1,
++				  gen_rtx_MEM (Pmode,
++					       gen_rtx_PLUS (Pmode,
++							     high, tmp3)));
++		}
++	      else
++		emit_insn (gen_ld_from_got (Pmode, tmp1, high, tmp2));
+ 	    }
+ 	  else
+ 	    emit_insn (loongarch_got_load_tls_ie (tmp1, loc));
+@@ -2530,11 +2588,18 @@ loongarch_legitimize_tls_address (rtx loc)
+ 	      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2));
+ 	      high = loongarch_force_temporary (tmp3, high);
+ 	      emit_insn (gen_ori_l_lo12 (Pmode, tmp1, high, tmp2));
++
++	      if (TARGET_CMODEL_EXTREME)
++		{
++		  gcc_assert (TARGET_EXPLICIT_RELOCS);
++
++		  emit_insn (gen_lui_h_lo20 (tmp1, tmp1, tmp2));
++		  emit_insn (gen_lui_h_hi12 (tmp1, tmp1, tmp2));
++		}
+ 	    }
+ 	  else
+ 	    emit_insn (loongarch_got_load_tls_le (tmp1, loc));
+ 	  emit_insn (gen_add3_insn (dest, tmp1, tp));
+-
+ 	}
+       break;
+ 
+@@ -2603,7 +2668,6 @@ bool
+ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
+ {
+   enum loongarch_symbol_type symbol_type;
+-  rtx high;
+ 
+   /* If build with '-mno-explicit-relocs', don't split symbol.  */
+   if (!TARGET_EXPLICIT_RELOCS)
+@@ -2615,6 +2679,8 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
+       || !loongarch_split_symbol_type (symbol_type))
+     return false;
+ 
++  rtx high, temp1 = NULL;
++
+   if (temp == NULL)
+     temp = gen_reg_rtx (Pmode);
+ 
+@@ -2622,20 +2688,42 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
+   high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
+   high = loongarch_force_temporary (temp, high);
+ 
++  if (TARGET_CMODEL_EXTREME && can_create_pseudo_p ())
++    {
++      gcc_assert (TARGET_EXPLICIT_RELOCS);
++
++      temp1 = gen_reg_rtx (Pmode);
++      emit_move_insn (temp1, gen_rtx_LO_SUM (Pmode, gen_rtx_REG (Pmode, 0),
++					     addr));
++      emit_insn (gen_lui_h_lo20 (temp1, temp1, addr));
++      emit_insn (gen_lui_h_hi12 (temp1, temp1, addr));
++    }
++
+   if (low_out)
+     switch (symbol_type)
+       {
+       case SYMBOL_PCREL:
+-	*low_out = gen_rtx_LO_SUM (Pmode, high, addr);
+-	break;
++	{
++	  if (TARGET_CMODEL_EXTREME && can_create_pseudo_p ())
++	    *low_out = gen_rtx_PLUS (Pmode, high, temp1);
++	  else
++	    *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
++	  break;
++	}
+ 
+       case SYMBOL_GOT_DISP:
+ 	/* SYMBOL_GOT_DISP symbols are loaded from the GOT.  */
+ 	{
+-	  rtx low = gen_rtx_LO_SUM (Pmode, high, addr);
+-	  rtx mem = gen_rtx_MEM (Pmode, low);
+-	  *low_out = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, mem),
+-				     UNSPEC_LOAD_FROM_GOT);
++	  if (TARGET_CMODEL_EXTREME && can_create_pseudo_p ())
++	    *low_out = gen_rtx_MEM (Pmode, gen_rtx_PLUS (Pmode, high, temp1));
++	  else
++	    {
++	      rtx low = gen_rtx_LO_SUM (Pmode, high, addr);
++	      rtx mem = gen_rtx_MEM (Pmode, low);
++	      *low_out = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, mem),
++					 UNSPEC_LOAD_FROM_GOT);
++	    }
++
+ 	  break;
+ 	}
+ 
+@@ -4584,34 +4672,86 @@ loongarch_memmodel_needs_release_fence (enum memmodel model)
+    in context CONTEXT.  HI_RELOC indicates a high-part reloc.  */
+ 
+ static void
+-loongarch_print_operand_reloc (FILE *file, rtx op, bool hi_reloc)
++loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part,
++			       bool hi_reloc)
+ {
+   const char *reloc;
+ 
++  if (TARGET_CMODEL_EXTREME)
++    gcc_assert (TARGET_EXPLICIT_RELOCS);
++
+   switch (loongarch_classify_symbolic_expression (op))
+     {
+     case SYMBOL_PCREL:
+-      reloc = hi_reloc ? "%pc_hi20" : "%pc_lo12";
++      if (hi64_part)
++	{
++	  if (TARGET_CMODEL_EXTREME)
++	    reloc = hi_reloc ? "%pc64_hi12" : "%pc64_lo20";
++	  else
++	    gcc_unreachable ();
++	}
++      else
++	reloc = hi_reloc ? "%pc_hi20" : "%pc_lo12";
+       break;
+ 
+     case SYMBOL_GOT_DISP:
+-      reloc = hi_reloc ? "%got_pc_hi20" : "%got_pc_lo12";
++      if (hi64_part)
++	{
++	  if (TARGET_CMODEL_EXTREME)
++	    reloc = hi_reloc ? "%got64_pc_hi12" : "%got64_pc_lo20";
++	  else
++	    gcc_unreachable ();
++	}
++      else
++	reloc = hi_reloc ? "%got_pc_hi20" : "%got_pc_lo12";
+       break;
+ 
+     case SYMBOL_TLS_IE:
+-      reloc = hi_reloc ? "%ie_pc_hi20" : "%ie_pc_lo12";
++      if (hi64_part)
++	{
++	  if (TARGET_CMODEL_EXTREME)
++	    reloc = hi_reloc ? "%ie64_pc_hi12" : "%ie64_pc_lo20";
++	  else
++	    gcc_unreachable ();
++	}
++      else
++	reloc = hi_reloc ? "%ie_pc_hi20" : "%ie_pc_lo12";
+       break;
+ 
+     case SYMBOL_TLS_LE:
+-      reloc = hi_reloc ? "%le_hi20" : "%le_lo12";
++      if (hi64_part)
++	{
++	  if (TARGET_CMODEL_EXTREME)
++	    reloc = hi_reloc ? "%le64_hi12" : "%le64_lo20";
++	  else
++	    gcc_unreachable ();
++	}
++      else
++	reloc = hi_reloc ? "%le_hi20" : "%le_lo12";
+       break;
+ 
+     case SYMBOL_TLSGD:
+-      reloc = hi_reloc ? "%gd_pc_hi20" : "%got_pc_lo12";
++      if (hi64_part)
++	{
++	  if (TARGET_CMODEL_EXTREME)
++	    reloc = hi_reloc ? "%got64_pc_hi12" : "%got64_pc_lo20";
++	  else
++	    gcc_unreachable ();
++	}
++      else
++	reloc = hi_reloc ? "%gd_pc_hi20" : "%got_pc_lo12";
+       break;
+ 
+     case SYMBOL_TLSLDM:
+-      reloc = hi_reloc ? "%ld_pc_hi20" : "%got_pc_lo12";
++      if (hi64_part)
++	{
++	  if (TARGET_CMODEL_EXTREME)
++	    reloc = hi_reloc ? "%got64_pc_hi12" : "%got64_pc_lo20";
++	  else
++	    gcc_unreachable ();
++	}
++      else
++	reloc = hi_reloc ? "%ld_pc_hi20" : "%got_pc_lo12";
+       break;
+ 
+     default:
+@@ -4637,6 +4777,8 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi_reloc)
+    'L'  Print the low-part relocation associated with OP.
+    'm'	Print one less than CONST_INT OP in decimal.
+    'N'	Print the inverse of the integer branch condition for comparison OP.
++   'r'  Print address 12-31bit relocation associated with OP.
++   'R'  Print address 32-51bit relocation associated with OP.
+    'T'	Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
+ 	      'z' for (eq:?I ...), 'n' for (ne:?I ...).
+    't'	Like 'T', but with the EQ/NE cases reversed
+@@ -4694,7 +4836,13 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+     case 'h':
+       if (code == HIGH)
+ 	op = XEXP (op, 0);
+-      loongarch_print_operand_reloc (file, op, true /* hi_reloc */);
++      loongarch_print_operand_reloc (file, op, false /* hi64_part */,
++				     true /* hi_reloc */);
++      break;
++
++    case 'H':
++      loongarch_print_operand_reloc (file, op, true /* hi64_part */,
++				     true /* hi_reloc */);
+       break;
+ 
+     case 'i':
+@@ -4703,7 +4851,8 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+       break;
+ 
+     case 'L':
+-      loongarch_print_operand_reloc (file, op, false /* lo_reloc */);
++      loongarch_print_operand_reloc (file, op, false /* hi64_part*/,
++				     false /* lo_reloc */);
+       break;
+ 
+     case 'm':
+@@ -4718,6 +4867,16 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+ 					    letter);
+       break;
+ 
++    case 'r':
++      loongarch_print_operand_reloc (file, op, false /* hi64_part */,
++				     true /* lo_reloc */);
++      break;
++
++    case 'R':
++      loongarch_print_operand_reloc (file, op, true /* hi64_part */,
++				     false /* lo_reloc */);
++      break;
++
+     case 't':
+     case 'T':
+       {
+@@ -4848,7 +5007,8 @@ loongarch_print_operand_address (FILE *file, machine_mode /* mode  */, rtx x)
+ 
+       case ADDRESS_LO_SUM:
+ 	fprintf (file, "%s,", reg_names[REGNO (addr.reg)]);
+-	loongarch_print_operand_reloc (file, addr.offset, false /* hi_reloc */);
++	loongarch_print_operand_reloc (file, addr.offset, false /* hi64_part */,
++				       false /* hi_reloc */);
+ 	return;
+ 
+       case ADDRESS_CONST_INT:
+@@ -5821,13 +5981,21 @@ loongarch_option_override_internal (struct gcc_options *opts)
+ 
+   switch (la_target.cmodel)
+     {
+-      case CMODEL_TINY_STATIC:
+       case CMODEL_EXTREME:
++	if (!TARGET_EXPLICIT_RELOCS)
++	  error ("code model %qs needs %s",
++		 "extreme", "-mexplicit-relocs");
++
+ 	if (opts->x_flag_plt)
+-	  error ("code model %qs and %qs not support %s mode",
+-		 "tiny-static", "extreme", "plt");
++	  {
++	    if (global_options_set.x_flag_plt)
++	      error ("code model %qs is not compatible with %s",
++		     "extreme", "-fplt");
++	    opts->x_flag_plt = 0;
++	  }
+ 	break;
+ 
++      case CMODEL_TINY_STATIC:
+       case CMODEL_NORMAL:
+       case CMODEL_TINY:
+       case CMODEL_LARGE:
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 8e8868de9..8fc10444c 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -60,6 +60,9 @@
+ 
+   UNSPEC_LOAD_FROM_GOT
+   UNSPEC_ORI_L_LO12
++  UNSPEC_LUI_L_HI20
++  UNSPEC_LUI_H_LO20
++  UNSPEC_LUI_H_HI12
+   UNSPEC_TLS_LOW
+ ])
+ 
+@@ -1934,16 +1937,45 @@
+   [(set_attr "type" "move")]
+ )
+ 
++(define_insn "@lui_l_hi20"
++  [(set (match_operand:P 0 "register_operand" "=r")
++	(unspec:P [(match_operand:P 1 "symbolic_operand")]
++	UNSPEC_LUI_L_HI20))]
++  ""
++  "lu12i.w\t%0,%r1"
++  [(set_attr "type" "move")]
++)
++
+ (define_insn "@ori_l_lo12"
+   [(set (match_operand:P 0 "register_operand" "=r")
+ 	(unspec:P [(match_operand:P 1 "register_operand" "r")
+-		    (match_operand:P 2 "symbolic_operand")]
++		   (match_operand:P 2 "symbolic_operand")]
+ 	UNSPEC_ORI_L_LO12))]
+   ""
+   "ori\t%0,%1,%L2"
+   [(set_attr "type" "move")]
+ )
+ 
++(define_insn "lui_h_lo20"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(unspec:DI [(match_operand:DI 1 "register_operand" "0")
++		    (match_operand:DI 2 "symbolic_operand")]
++	UNSPEC_LUI_H_LO20))]
++  "TARGET_64BIT"
++  "lu32i.d\t%0,%R2"
++  [(set_attr "type" "move")]
++)
++
++(define_insn "lui_h_hi12"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(unspec:DI [(match_operand:DI 1 "register_operand" "r")
++		    (match_operand:DI 2 "symbolic_operand")]
++	UNSPEC_LUI_H_HI12))]
++  "TARGET_64BIT"
++  "lu52i.d\t%0,%1,%H2"
++  [(set_attr "type" "move")]
++)
++
+ ;; Convert floating-point numbers to integers
+ (define_insn "frint_"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index cd3528c7c..e38c6fbdd 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -111,7 +111,7 @@
+   (match_code "const,symbol_ref,label_ref")
+ {
+   /* Split symbol to high and low if return false.
+-     If defined TARGET_CMODEL_LARGE, all symbol would be splited,
++     If defined TARGET_CMODEL_EXTREME, all symbol would be splited,
+      else if offset is not zero, the symbol would be splited.  */
+ 
+   enum loongarch_symbol_type symbol_type;
+@@ -126,10 +126,13 @@
+   switch (symbol_type)
+     {
+     case SYMBOL_PCREL:
+-      return 1;
++      if (TARGET_CMODEL_EXTREME)
++	return false;
++      else
++	return 1;
+ 
+     case SYMBOL_GOT_DISP:
+-      if (TARGET_CMODEL_LARGE || !flag_plt)
++      if (TARGET_CMODEL_EXTREME || !flag_plt)
+ 	return false;
+       else
+ 	return 1;
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 1de2b2bd4..c4f83e62a 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -1006,6 +1006,7 @@ Objective-C and Objective-C++ Dialects}.
+ -mcond-move-float  -mno-cond-move-float @gol
+ -memcpy  -mno-memcpy -mstrict-align -mno-strict-align @gol
+ -mmax-inline-memcpy-size=@var{n} @gol
++-mexplicit-relocs -mno-explicit-relocs @gol
+ -mcmodel=@var{code-model}}
+ 
+ @emph{M32R/D Options}
+@@ -24617,50 +24618,19 @@ less than or equal to @var{n} bytes.  The default value of @var{n} is 1024.
+ @item -mcmodel=@var{code-model}
+ Set the code model to one of:
+ @table @samp
+-@item tiny-static
+-@itemize @bullet
+-@item
+-local symbol and global strong symbol: The data section must be within +/-2MiB addressing space.
+-The text section must be within +/-128MiB addressing space.
+-@item
+-global weak symbol: The got table must be within +/-2GiB addressing space.
+-@end itemize
+-
+-@item tiny
+-@itemize @bullet
+-@item
+-local symbol: The data section must be within +/-2MiB addressing space.
+-The text section must be within +/-128MiB
+-addressing space.
+-@item
+-global symbol: The got table must be within +/-2GiB addressing space.
+-@end itemize
++@item tiny-static (Not implemented yet)
++@item tiny (Not implemented yet)
+ 
+ @item normal
+-@itemize @bullet
+-@item
+-local symbol: The data section must be within +/-2GiB addressing space.
+-The text section must be within +/-128MiB addressing space.
+-@item
+-global symbol: The got table must be within +/-2GiB addressing space.
+-@end itemize
++The text segment must be within 128MB addressing space.  The data segment must
++be within 2GB addressing space.
+ 
+-@item large
+-@itemize @bullet
+-@item
+-local symbol: The data section must be within +/-2GiB addressing space.
+-The text section must be within +/-128GiB addressing space.
+-@item
+-global symbol: The got table must be within +/-2GiB addressing space.
+-@end itemize
++@item large (Not implemented yet)
+ 
+-@item extreme(Not implemented yet)
+-@itemize @bullet
+-@item
+-local symbol: The data and text section must be within +/-8EiB addressing space.
+-@item
+-global symbol: The data got table must be within +/-8EiB addressing space.
+-@end itemize
++@item extreme
++This mode does not limit the size of the code segment and data segment.
++The @option{-mcmodel=extreme} option is incompatible with @option{-fplt} and
++@option{-mno-explicit-relocs}.
+ @end table
+ The default code model is @code{normal}.
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-1.c
+index 01b8ea23f..76bf11b0c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mno-explicit-relocs" } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mno-explicit-relocs -mcmodel=normal" } */
+ /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */
+ /* { dg-final { scan-assembler "test1:.*bl\t%plt\\(f\\)\n" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-2.c
+index 4565baaec..4b468fef8 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mno-explicit-relocs" } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mno-explicit-relocs -mcmodel=normal" } */
+ /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */
+ /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-3.c b/gcc/testsuite/gcc.target/loongarch/func-call-3.c
+index 4f669a029..dd3a4882d 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-3.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mno-explicit-relocs" } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mno-explicit-relocs -mcmodel=normal" } */
+ /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-4.c b/gcc/testsuite/gcc.target/loongarch/func-call-4.c
+index 943adb640..f8158ec34 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-4.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-4.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mno-explicit-relocs" } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mno-explicit-relocs -mcmodel=normal" } */
+ /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-5.c b/gcc/testsuite/gcc.target/loongarch/func-call-5.c
+index 2c2a1c8a1..37994af43 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-5.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-5.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mexplicit-relocs" } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mexplicit-relocs -mcmodel=normal" } */
+ /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */
+ /* { dg-final { scan-assembler "test1:.*bl\t%plt\\(f\\)\n" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-6.c b/gcc/testsuite/gcc.target/loongarch/func-call-6.c
+index 4b0e4266e..8e366e376 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-6.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-6.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mexplicit-relocs" } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mexplicit-relocs -mcmodel=normal" } */
+ /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */
+ /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-7.c b/gcc/testsuite/gcc.target/loongarch/func-call-7.c
+index 51792711f..4177c3d96 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-7.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-7.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs" } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs -mcmodel=normal" } */
+ /* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*pcalau12i\t.*%got_pc_hi20\\(f\\)\n\tld\.d\t.*%got_pc_lo12\\(f\\)\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-8.c b/gcc/testsuite/gcc.target/loongarch/func-call-8.c
+index 330140d88..4254eaa16 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-8.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-8.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs" } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs -mcmodel=normal" } */
+ /* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c
+new file mode 100644
+index 000000000..db1e0f853
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-1.c
+@@ -0,0 +1,32 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs -mcmodel=extreme" } */
++/* { dg-final { scan-assembler "test:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
++/* { dg-final { scan-assembler "test1:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
++/* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
++
++extern void g (void);
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c
+new file mode 100644
+index 000000000..21bf81ae8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-extreme-2.c
+@@ -0,0 +1,32 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs -mcmodel=extreme" } */
++/* { dg-final { scan-assembler "test:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
++/* { dg-final { scan-assembler "test1:.*pcalau12i.*%got_pc_hi20.*\n\taddi\.d.*%got_pc_lo12.*\n\tlu32i\.d.*%got64_pc_lo20.*\n\tlu52i\.d.*%got64_pc_hi12.*\n\tldx\.d" } } */
++/* { dg-final { scan-assembler "test2:.*pcalau12i.*%pc_hi20.*\n\taddi\.d.*%pc_lo12.*\n\tlu32i\.d.*%pc64_lo20.*\n\tlu52i\.d.*pc64_hi12.*\n\tadd\.d" } } */
++
++extern void g (void);
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c b/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c
+index bfcc9bc33..3ec8bd229 100644
+--- a/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c
++++ b/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -mexplicit-relocs -fno-pic -O2" } */
++/* { dg-options "-mabi=lp64d -mexplicit-relocs -fno-pic -O2 -mcmodel=normal" } */
+ /* { dg-final { scan-assembler "pcalau12i.*%pc_hi20\\(\.LANCHOR0\\)\n" } } */
+ /* { dg-final { scan-assembler "addi\.d.*%pc_lo12\\(\.LANCHOR0\\)\n" } } */
+ /* { dg-final { scan-assembler "ldptr.d\t\\\$r4,.*,0\n" } } */
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-builtin-functions.patch b/LoongArch-Add-tests-for-ASX-builtin-functions.patch
new file mode 100644
index 0000000000000000000000000000000000000000..339904a0ca26b914b3e005cc3ced1503aa2ea15a
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-builtin-functions.patch
@@ -0,0 +1,4485 @@
+From fcf63744c4ceaa60cd57ab3c431ec63f690189d4 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:59:47 +0800
+Subject: [PATCH 109/124] LoongArch: Add tests for ASX builtin functions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-builtin.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-builtin.c      | 4460 +++++++++++++++++
+ 1 file changed, 4460 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c
+new file mode 100644
+index 000000000..b1a903b4a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-builtin.c
+@@ -0,0 +1,4460 @@
++/* Test builtins for LOONGARCH LASX ASE instructions */
++/* { dg-do compile } */
++/* { dg-options "-mlasx" } */
++/* { dg-final { scan-assembler-times "lasx_xvsll_b:.*xvsll\\.b.*lasx_xvsll_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsll_h:.*xvsll\\.h.*lasx_xvsll_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsll_w:.*xvsll\\.w.*lasx_xvsll_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsll_d:.*xvsll\\.d.*lasx_xvsll_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslli_b:.*xvslli\\.b.*lasx_xvslli_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslli_h:.*xvslli\\.h.*lasx_xvslli_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslli_w:.*xvslli\\.w.*lasx_xvslli_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslli_d:.*xvslli\\.d.*lasx_xvslli_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsra_b:.*xvsra\\.b.*lasx_xvsra_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsra_h:.*xvsra\\.h.*lasx_xvsra_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsra_w:.*xvsra\\.w.*lasx_xvsra_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsra_d:.*xvsra\\.d.*lasx_xvsra_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrai_b:.*xvsrai\\.b.*lasx_xvsrai_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrai_h:.*xvsrai\\.h.*lasx_xvsrai_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrai_w:.*xvsrai\\.w.*lasx_xvsrai_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrai_d:.*xvsrai\\.d.*lasx_xvsrai_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrar_b:.*xvsrar\\.b.*lasx_xvsrar_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrar_h:.*xvsrar\\.h.*lasx_xvsrar_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrar_w:.*xvsrar\\.w.*lasx_xvsrar_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrar_d:.*xvsrar\\.d.*lasx_xvsrar_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrari_b:.*xvsrari\\.b.*lasx_xvsrari_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrari_h:.*xvsrari\\.h.*lasx_xvsrari_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrari_w:.*xvsrari\\.w.*lasx_xvsrari_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrari_d:.*xvsrari\\.d.*lasx_xvsrari_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrl_b:.*xvsrl\\.b.*lasx_xvsrl_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrl_h:.*xvsrl\\.h.*lasx_xvsrl_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrl_w:.*xvsrl\\.w.*lasx_xvsrl_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrl_d:.*xvsrl\\.d.*lasx_xvsrl_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrli_b:.*xvsrli\\.b.*lasx_xvsrli_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrli_h:.*xvsrli\\.h.*lasx_xvsrli_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrli_w:.*xvsrli\\.w.*lasx_xvsrli_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrli_d:.*xvsrli\\.d.*lasx_xvsrli_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlr_b:.*xvsrlr\\.b.*lasx_xvsrlr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlr_h:.*xvsrlr\\.h.*lasx_xvsrlr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlr_w:.*xvsrlr\\.w.*lasx_xvsrlr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlr_d:.*xvsrlr\\.d.*lasx_xvsrlr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlri_b:.*xvsrlri\\.b.*lasx_xvsrlri_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlri_h:.*xvsrlri\\.h.*lasx_xvsrlri_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlri_w:.*xvsrlri\\.w.*lasx_xvsrlri_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlri_d:.*xvsrlri\\.d.*lasx_xvsrlri_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitclr_b:.*xvbitclr\\.b.*lasx_xvbitclr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitclr_h:.*xvbitclr\\.h.*lasx_xvbitclr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitclr_w:.*xvbitclr\\.w.*lasx_xvbitclr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitclr_d:.*xvbitclr\\.d.*lasx_xvbitclr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitclri_b:.*xvbitclri\\.b.*lasx_xvbitclri_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitclri_h:.*xvbitclri\\.h.*lasx_xvbitclri_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitclri_w:.*xvbitclri\\.w.*lasx_xvbitclri_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitclri_d:.*xvbitclri\\.d.*lasx_xvbitclri_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitset_b:.*xvbitset\\.b.*lasx_xvbitset_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitset_h:.*xvbitset\\.h.*lasx_xvbitset_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitset_w:.*xvbitset\\.w.*lasx_xvbitset_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitset_d:.*xvbitset\\.d.*lasx_xvbitset_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitseti_b:.*xvbitseti\\.b.*lasx_xvbitseti_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitseti_h:.*xvbitseti\\.h.*lasx_xvbitseti_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitseti_w:.*xvbitseti\\.w.*lasx_xvbitseti_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitseti_d:.*xvbitseti\\.d.*lasx_xvbitseti_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitrev_b:.*xvbitrev\\.b.*lasx_xvbitrev_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitrev_h:.*xvbitrev\\.h.*lasx_xvbitrev_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitrev_w:.*xvbitrev\\.w.*lasx_xvbitrev_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitrev_d:.*xvbitrev\\.d.*lasx_xvbitrev_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_b:.*xvbitrevi\\.b.*lasx_xvbitrevi_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_h:.*xvbitrevi\\.h.*lasx_xvbitrevi_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_w:.*xvbitrevi\\.w.*lasx_xvbitrevi_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitrevi_d:.*xvbitrevi\\.d.*lasx_xvbitrevi_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvadd_b:.*xvadd\\.b.*lasx_xvadd_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvadd_h:.*xvadd\\.h.*lasx_xvadd_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvadd_w:.*xvadd\\.w.*lasx_xvadd_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvadd_d:.*xvadd\\.d.*lasx_xvadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddi_bu:.*xvaddi\\.bu.*lasx_xvaddi_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddi_hu:.*xvaddi\\.hu.*lasx_xvaddi_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddi_wu:.*xvaddi\\.wu.*lasx_xvaddi_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddi_du:.*xvaddi\\.du.*lasx_xvaddi_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsub_b:.*xvsub\\.b.*lasx_xvsub_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsub_h:.*xvsub\\.h.*lasx_xvsub_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsub_w:.*xvsub\\.w.*lasx_xvsub_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsub_d:.*xvsub\\.d.*lasx_xvsub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubi_bu:.*xvsubi\\.bu.*lasx_xvsubi_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubi_hu:.*xvsubi\\.hu.*lasx_xvsubi_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubi_wu:.*xvsubi\\.wu.*lasx_xvsubi_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubi_du:.*xvsubi\\.du.*lasx_xvsubi_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmax_b:.*xvmax\\.b.*lasx_xvmax_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmax_h:.*xvmax\\.h.*lasx_xvmax_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmax_w:.*xvmax\\.w.*lasx_xvmax_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmax_d:.*xvmax\\.d.*lasx_xvmax_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaxi_b:.*xvmaxi\\.b.*lasx_xvmaxi_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaxi_h:.*xvmaxi\\.h.*lasx_xvmaxi_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaxi_w:.*xvmaxi\\.w.*lasx_xvmaxi_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaxi_d:.*xvmaxi\\.d.*lasx_xvmaxi_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmax_bu:.*xvmax\\.bu.*lasx_xvmax_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmax_hu:.*xvmax\\.hu.*lasx_xvmax_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmax_wu:.*xvmax\\.wu.*lasx_xvmax_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmax_du:.*xvmax\\.du.*lasx_xvmax_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaxi_bu:.*xvmaxi\\.bu.*lasx_xvmaxi_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaxi_hu:.*xvmaxi\\.hu.*lasx_xvmaxi_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaxi_wu:.*xvmaxi\\.wu.*lasx_xvmaxi_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaxi_du:.*xvmaxi\\.du.*lasx_xvmaxi_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmin_b:.*xvmin\\.b.*lasx_xvmin_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmin_h:.*xvmin\\.h.*lasx_xvmin_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmin_w:.*xvmin\\.w.*lasx_xvmin_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmin_d:.*xvmin\\.d.*lasx_xvmin_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmini_b:.*xvmini\\.b.*lasx_xvmini_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmini_h:.*xvmini\\.h.*lasx_xvmini_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmini_w:.*xvmini\\.w.*lasx_xvmini_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmini_d:.*xvmini\\.d.*lasx_xvmini_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmin_bu:.*xvmin\\.bu.*lasx_xvmin_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmin_hu:.*xvmin\\.hu.*lasx_xvmin_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmin_wu:.*xvmin\\.wu.*lasx_xvmin_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmin_du:.*xvmin\\.du.*lasx_xvmin_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmini_bu:.*xvmini\\.bu.*lasx_xvmini_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmini_hu:.*xvmini\\.hu.*lasx_xvmini_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmini_wu:.*xvmini\\.wu.*lasx_xvmini_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmini_du:.*xvmini\\.du.*lasx_xvmini_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvseq_b:.*xvseq\\.b.*lasx_xvseq_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvseq_h:.*xvseq\\.h.*lasx_xvseq_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvseq_w:.*xvseq\\.w.*lasx_xvseq_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvseq_d:.*xvseq\\.d.*lasx_xvseq_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvseqi_b:.*xvseqi\\.b.*lasx_xvseqi_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvseqi_h:.*xvseqi\\.h.*lasx_xvseqi_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvseqi_w:.*xvseqi\\.w.*lasx_xvseqi_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvseqi_d:.*xvseqi\\.d.*lasx_xvseqi_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslt_b:.*xvslt\\.b.*lasx_xvslt_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslt_h:.*xvslt\\.h.*lasx_xvslt_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslt_w:.*xvslt\\.w.*lasx_xvslt_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslt_d:.*xvslt\\.d.*lasx_xvslt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslti_b:.*xvslti\\.b.*lasx_xvslti_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslti_h:.*xvslti\\.h.*lasx_xvslti_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslti_w:.*xvslti\\.w.*lasx_xvslti_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslti_d:.*xvslti\\.d.*lasx_xvslti_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslt_bu:.*xvslt\\.bu.*lasx_xvslt_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslt_hu:.*xvslt\\.hu.*lasx_xvslt_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslt_wu:.*xvslt\\.wu.*lasx_xvslt_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslt_du:.*xvslt\\.du.*lasx_xvslt_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslti_bu:.*xvslti\\.bu.*lasx_xvslti_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslti_hu:.*xvslti\\.hu.*lasx_xvslti_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslti_wu:.*xvslti\\.wu.*lasx_xvslti_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslti_du:.*xvslti\\.du.*lasx_xvslti_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsle_b:.*xvsle\\.b.*lasx_xvsle_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsle_h:.*xvsle\\.h.*lasx_xvsle_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsle_w:.*xvsle\\.w.*lasx_xvsle_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsle_d:.*xvsle\\.d.*lasx_xvsle_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslei_b:.*xvslei\\.b.*lasx_xvslei_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslei_h:.*xvslei\\.h.*lasx_xvslei_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslei_w:.*xvslei\\.w.*lasx_xvslei_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslei_d:.*xvslei\\.d.*lasx_xvslei_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsle_bu:.*xvsle\\.bu.*lasx_xvsle_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsle_hu:.*xvsle\\.hu.*lasx_xvsle_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsle_wu:.*xvsle\\.wu.*lasx_xvsle_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsle_du:.*xvsle\\.du.*lasx_xvsle_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslei_bu:.*xvslei\\.bu.*lasx_xvslei_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslei_hu:.*xvslei\\.hu.*lasx_xvslei_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslei_wu:.*xvslei\\.wu.*lasx_xvslei_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvslei_du:.*xvslei\\.du.*lasx_xvslei_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsat_b:.*xvsat\\.b.*lasx_xvsat_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsat_h:.*xvsat\\.h.*lasx_xvsat_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsat_w:.*xvsat\\.w.*lasx_xvsat_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsat_d:.*xvsat\\.d.*lasx_xvsat_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsat_bu:.*xvsat\\.bu.*lasx_xvsat_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsat_hu:.*xvsat\\.hu.*lasx_xvsat_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsat_wu:.*xvsat\\.wu.*lasx_xvsat_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsat_du:.*xvsat\\.du.*lasx_xvsat_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvadda_b:.*xvadda\\.b.*lasx_xvadda_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvadda_h:.*xvadda\\.h.*lasx_xvadda_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvadda_w:.*xvadda\\.w.*lasx_xvadda_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvadda_d:.*xvadda\\.d.*lasx_xvadda_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsadd_b:.*xvsadd\\.b.*lasx_xvsadd_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsadd_h:.*xvsadd\\.h.*lasx_xvsadd_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsadd_w:.*xvsadd\\.w.*lasx_xvsadd_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsadd_d:.*xvsadd\\.d.*lasx_xvsadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsadd_bu:.*xvsadd\\.bu.*lasx_xvsadd_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsadd_hu:.*xvsadd\\.hu.*lasx_xvsadd_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsadd_wu:.*xvsadd\\.wu.*lasx_xvsadd_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsadd_du:.*xvsadd\\.du.*lasx_xvsadd_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavg_b:.*xvavg\\.b.*lasx_xvavg_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavg_h:.*xvavg\\.h.*lasx_xvavg_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavg_w:.*xvavg\\.w.*lasx_xvavg_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavg_d:.*xvavg\\.d.*lasx_xvavg_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavg_bu:.*xvavg\\.bu.*lasx_xvavg_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavg_hu:.*xvavg\\.hu.*lasx_xvavg_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavg_wu:.*xvavg\\.wu.*lasx_xvavg_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavg_du:.*xvavg\\.du.*lasx_xvavg_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavgr_b:.*xvavgr\\.b.*lasx_xvavgr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavgr_h:.*xvavgr\\.h.*lasx_xvavgr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavgr_w:.*xvavgr\\.w.*lasx_xvavgr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavgr_d:.*xvavgr\\.d.*lasx_xvavgr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavgr_bu:.*xvavgr\\.bu.*lasx_xvavgr_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavgr_hu:.*xvavgr\\.hu.*lasx_xvavgr_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavgr_wu:.*xvavgr\\.wu.*lasx_xvavgr_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvavgr_du:.*xvavgr\\.du.*lasx_xvavgr_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssub_b:.*xvssub\\.b.*lasx_xvssub_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssub_h:.*xvssub\\.h.*lasx_xvssub_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssub_w:.*xvssub\\.w.*lasx_xvssub_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssub_d:.*xvssub\\.d.*lasx_xvssub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssub_bu:.*xvssub\\.bu.*lasx_xvssub_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssub_hu:.*xvssub\\.hu.*lasx_xvssub_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssub_wu:.*xvssub\\.wu.*lasx_xvssub_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssub_du:.*xvssub\\.du.*lasx_xvssub_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvabsd_b:.*xvabsd\\.b.*lasx_xvabsd_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvabsd_h:.*xvabsd\\.h.*lasx_xvabsd_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvabsd_w:.*xvabsd\\.w.*lasx_xvabsd_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvabsd_d:.*xvabsd\\.d.*lasx_xvabsd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvabsd_bu:.*xvabsd\\.bu.*lasx_xvabsd_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvabsd_hu:.*xvabsd\\.hu.*lasx_xvabsd_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvabsd_wu:.*xvabsd\\.wu.*lasx_xvabsd_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvabsd_du:.*xvabsd\\.du.*lasx_xvabsd_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmul_b:.*xvmul\\.b.*lasx_xvmul_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmul_h:.*xvmul\\.h.*lasx_xvmul_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmul_w:.*xvmul\\.w.*lasx_xvmul_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmul_d:.*xvmul\\.d.*lasx_xvmul_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmadd_b:.*xvmadd\\.b.*lasx_xvmadd_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmadd_h:.*xvmadd\\.h.*lasx_xvmadd_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmadd_w:.*xvmadd\\.w.*lasx_xvmadd_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmadd_d:.*xvmadd\\.d.*lasx_xvmadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmsub_b:.*xvmsub\\.b.*lasx_xvmsub_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmsub_h:.*xvmsub\\.h.*lasx_xvmsub_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmsub_w:.*xvmsub\\.w.*lasx_xvmsub_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmsub_d:.*xvmsub\\.d.*lasx_xvmsub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvdiv_b:.*xvdiv\\.b.*lasx_xvdiv_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvdiv_h:.*xvdiv\\.h.*lasx_xvdiv_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvdiv_w:.*xvdiv\\.w.*lasx_xvdiv_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvdiv_d:.*xvdiv\\.d.*lasx_xvdiv_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvdiv_bu:.*xvdiv\\.bu.*lasx_xvdiv_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvdiv_hu:.*xvdiv\\.hu.*lasx_xvdiv_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvdiv_wu:.*xvdiv\\.wu.*lasx_xvdiv_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvdiv_du:.*xvdiv\\.du.*lasx_xvdiv_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhaddw_h_b:.*xvhaddw\\.h\\.b.*lasx_xvhaddw_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhaddw_w_h:.*xvhaddw\\.w\\.h.*lasx_xvhaddw_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhaddw_d_w:.*xvhaddw\\.d\\.w.*lasx_xvhaddw_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhaddw_hu_bu:.*xvhaddw\\.hu\\.bu.*lasx_xvhaddw_hu_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhaddw_wu_hu:.*xvhaddw\\.wu\\.hu.*lasx_xvhaddw_wu_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhaddw_du_wu:.*xvhaddw\\.du\\.wu.*lasx_xvhaddw_du_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhsubw_h_b:.*xvhsubw\\.h\\.b.*lasx_xvhsubw_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhsubw_w_h:.*xvhsubw\\.w\\.h.*lasx_xvhsubw_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhsubw_d_w:.*xvhsubw\\.d\\.w.*lasx_xvhsubw_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhsubw_hu_bu:.*xvhsubw\\.hu\\.bu.*lasx_xvhsubw_hu_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhsubw_wu_hu:.*xvhsubw\\.wu\\.hu.*lasx_xvhsubw_wu_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhsubw_du_wu:.*xvhsubw\\.du\\.wu.*lasx_xvhsubw_du_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmod_b:.*xvmod\\.b.*lasx_xvmod_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmod_h:.*xvmod\\.h.*lasx_xvmod_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmod_w:.*xvmod\\.w.*lasx_xvmod_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmod_d:.*xvmod\\.d.*lasx_xvmod_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmod_bu:.*xvmod\\.bu.*lasx_xvmod_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmod_hu:.*xvmod\\.hu.*lasx_xvmod_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmod_wu:.*xvmod\\.wu.*lasx_xvmod_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmod_du:.*xvmod\\.du.*lasx_xvmod_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_b:.*xvrepl128vei\\.b.*lasx_xvrepl128vei_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_h:.*xvrepl128vei\\.h.*lasx_xvrepl128vei_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_w:.*xvrepl128vei\\.w.*lasx_xvrepl128vei_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrepl128vei_d:.*xvrepl128vei\\.d.*lasx_xvrepl128vei_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickev_b:.*xvpickev\\.b.*lasx_xvpickev_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickev_h:.*xvpickev\\.h.*lasx_xvpickev_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickev_w:.*xvpickev\\.w.*lasx_xvpickev_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickev_d:.*xvilvl\\.d.*lasx_xvpickev_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickod_b:.*xvpickod\\.b.*lasx_xvpickod_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickod_h:.*xvpickod\\.h.*lasx_xvpickod_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickod_w:.*xvpickod\\.w.*lasx_xvpickod_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickod_d:.*xvilvh\\.d.*lasx_xvpickod_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvilvh_b:.*xvilvh\\.b.*lasx_xvilvh_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvilvh_h:.*xvilvh\\.h.*lasx_xvilvh_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvilvh_w:.*xvilvh\\.w.*lasx_xvilvh_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvilvh_d:.*xvilvh\\.d.*lasx_xvilvh_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvilvl_b:.*xvilvl\\.b.*lasx_xvilvl_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvilvl_h:.*xvilvl\\.h.*lasx_xvilvl_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvilvl_w:.*xvilvl\\.w.*lasx_xvilvl_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvilvl_d:.*xvilvl\\.d.*lasx_xvilvl_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpackev_b:.*xvpackev\\.b.*lasx_xvpackev_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpackev_h:.*xvpackev\\.h.*lasx_xvpackev_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpackev_w:.*xvpackev\\.w.*lasx_xvpackev_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpackev_d:.*xvilvl\\.d.*lasx_xvpackev_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpackod_b:.*xvpackod\\.b.*lasx_xvpackod_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpackod_h:.*xvpackod\\.h.*lasx_xvpackod_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpackod_w:.*xvpackod\\.w.*lasx_xvpackod_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpackod_d:.*xvilvh\\.d.*lasx_xvpackod_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvshuf_b:.*xvshuf\\.b.*lasx_xvshuf_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvshuf_h:.*xvshuf\\.h.*lasx_xvshuf_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvshuf_w:.*xvshuf\\.w.*lasx_xvshuf_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvshuf_d:.*xvshuf\\.d.*lasx_xvshuf_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvand_v:.*xvand\\.v.*lasx_xvand_v" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvandi_b:.*xvandi\\.b.*lasx_xvandi_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvor_v:.*xvor\\.v.*lasx_xvor_v" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvori_b:.*xvbitseti\\.b.*lasx_xvori_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvnor_v:.*xvnor\\.v.*lasx_xvnor_v" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvnori_b:.*xvnori\\.b.*lasx_xvnori_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvxor_v:.*xvxor\\.v.*lasx_xvxor_v" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvxori_b:.*xvbitrevi\\.b.*lasx_xvxori_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitsel_v:.*xvbitsel\\.v.*lasx_xvbitsel_v" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbitseli_b:.*xvbitseli\\.b.*lasx_xvbitseli_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_b:.*xvshuf4i\\.b.*lasx_xvshuf4i_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_h:.*xvshuf4i\\.h.*lasx_xvshuf4i_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_w:.*xvshuf4i\\.w.*lasx_xvshuf4i_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_b:.*xvreplgr2vr\\.b.*lasx_xvreplgr2vr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_h:.*xvreplgr2vr\\.h.*lasx_xvreplgr2vr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_w:.*xvreplgr2vr\\.w.*lasx_xvreplgr2vr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplgr2vr_d:.*xvreplgr2vr\\.d.*lasx_xvreplgr2vr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpcnt_b:.*xvpcnt\\.b.*lasx_xvpcnt_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpcnt_h:.*xvpcnt\\.h.*lasx_xvpcnt_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpcnt_w:.*xvpcnt\\.w.*lasx_xvpcnt_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpcnt_d:.*xvpcnt\\.d.*lasx_xvpcnt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvclo_b:.*xvclo\\.b.*lasx_xvclo_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvclo_h:.*xvclo\\.h.*lasx_xvclo_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvclo_w:.*xvclo\\.w.*lasx_xvclo_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvclo_d:.*xvclo\\.d.*lasx_xvclo_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvclz_b:.*xvclz\\.b.*lasx_xvclz_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvclz_h:.*xvclz\\.h.*lasx_xvclz_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvclz_w:.*xvclz\\.w.*lasx_xvclz_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvclz_d:.*xvclz\\.d.*lasx_xvclz_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfadd_s:.*xvfadd\\.s.*lasx_xvfadd_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfadd_d:.*xvfadd\\.d.*lasx_xvfadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfsub_s:.*xvfsub\\.s.*lasx_xvfsub_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfsub_d:.*xvfsub\\.d.*lasx_xvfsub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmul_s:.*xvfmul\\.s.*lasx_xvfmul_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmul_d:.*xvfmul\\.d.*lasx_xvfmul_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfdiv_s:.*xvfdiv\\.s.*lasx_xvfdiv_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfdiv_d:.*xvfdiv\\.d.*lasx_xvfdiv_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcvt_h_s:.*xvfcvt\\.h\\.s.*lasx_xvfcvt_h_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcvt_s_d:.*xvfcvt\\.s\\.d.*lasx_xvfcvt_s_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmin_s:.*xvfmin\\.s.*lasx_xvfmin_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmin_d:.*xvfmin\\.d.*lasx_xvfmin_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmina_s:.*xvfmina\\.s.*lasx_xvfmina_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmina_d:.*xvfmina\\.d.*lasx_xvfmina_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmax_s:.*xvfmax\\.s.*lasx_xvfmax_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmax_d:.*xvfmax\\.d.*lasx_xvfmax_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmaxa_s:.*xvfmaxa\\.s.*lasx_xvfmaxa_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmaxa_d:.*xvfmaxa\\.d.*lasx_xvfmaxa_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfclass_s:.*xvfclass\\.s.*lasx_xvfclass_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfclass_d:.*xvfclass\\.d.*lasx_xvfclass_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfsqrt_s:.*xvfsqrt\\.s.*lasx_xvfsqrt_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfsqrt_d:.*xvfsqrt\\.d.*lasx_xvfsqrt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrecip_s:.*xvfrecip\\.s.*lasx_xvfrecip_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrecip_d:.*xvfrecip\\.d.*lasx_xvfrecip_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrint_s:.*xvfrint\\.s.*lasx_xvfrint_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrint_d:.*xvfrint\\.d.*lasx_xvfrint_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrsqrt_s:.*xvfrsqrt\\.s.*lasx_xvfrsqrt_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrsqrt_d:.*xvfrsqrt\\.d.*lasx_xvfrsqrt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvflogb_s:.*xvflogb\\.s.*lasx_xvflogb_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvflogb_d:.*xvflogb\\.d.*lasx_xvflogb_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcvth_s_h:.*xvfcvth\\.s\\.h.*lasx_xvfcvth_s_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcvth_d_s:.*xvfcvth\\.d\\.s.*lasx_xvfcvth_d_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcvtl_s_h:.*xvfcvtl\\.s\\.h.*lasx_xvfcvtl_s_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcvtl_d_s:.*xvfcvtl\\.d\\.s.*lasx_xvfcvtl_d_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftint_w_s:.*xvftint\\.w\\.s.*lasx_xvftint_w_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftint_l_d:.*xvftint\\.l\\.d.*lasx_xvftint_l_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftint_wu_s:.*xvftint\\.wu\\.s.*lasx_xvftint_wu_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftint_lu_d:.*xvftint\\.lu\\.d.*lasx_xvftint_lu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrz_w_s:.*xvftintrz\\.w\\.s.*lasx_xvftintrz_w_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrz_l_d:.*xvftintrz\\.l\\.d.*lasx_xvftintrz_l_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrz_wu_s:.*xvftintrz\\.wu\\.s.*lasx_xvftintrz_wu_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrz_lu_d:.*xvftintrz\\.lu\\.d.*lasx_xvftintrz_lu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvffint_s_w:.*xvffint\\.s\\.w.*lasx_xvffint_s_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvffint_d_l:.*xvffint\\.d\\.l.*lasx_xvffint_d_l" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvffint_s_wu:.*xvffint\\.s\\.wu.*lasx_xvffint_s_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvffint_d_lu:.*xvffint\\.d\\.lu.*lasx_xvffint_d_lu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplve_b:.*xvreplve\\.b.*lasx_xvreplve_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplve_h:.*xvreplve\\.h.*lasx_xvreplve_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplve_w:.*xvreplve\\.w.*lasx_xvreplve_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplve_d:.*xvreplve\\.d.*lasx_xvreplve_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpermi_w:.*xvpermi\\.w.*lasx_xvpermi_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvandn_v:.*xvandn\\.v.*lasx_xvandn_v" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvneg_b:.*xvneg\\.b.*lasx_xvneg_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvneg_h:.*xvneg\\.h.*lasx_xvneg_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvneg_w:.*xvneg\\.w.*lasx_xvneg_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvneg_d:.*xvneg\\.d.*lasx_xvneg_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmuh_b:.*xvmuh\\.b.*lasx_xvmuh_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmuh_h:.*xvmuh\\.h.*lasx_xvmuh_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmuh_w:.*xvmuh\\.w.*lasx_xvmuh_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmuh_d:.*xvmuh\\.d.*lasx_xvmuh_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmuh_bu:.*xvmuh\\.bu.*lasx_xvmuh_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmuh_hu:.*xvmuh\\.hu.*lasx_xvmuh_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmuh_wu:.*xvmuh\\.wu.*lasx_xvmuh_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmuh_du:.*xvmuh\\.du.*lasx_xvmuh_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsllwil_h_b:.*xvsllwil\\.h\\.b.*lasx_xvsllwil_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsllwil_w_h:.*xvsllwil\\.w\\.h.*lasx_xvsllwil_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsllwil_d_w:.*xvsllwil\\.d\\.w.*lasx_xvsllwil_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsllwil_hu_bu:.*xvsllwil\\.hu\\.bu.*lasx_xvsllwil_hu_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsllwil_wu_hu:.*xvsllwil\\.wu\\.hu.*lasx_xvsllwil_wu_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsllwil_du_wu:.*xvsllwil\\.du\\.wu.*lasx_xvsllwil_du_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsran_b_h:.*xvsran\\.b\\.h.*lasx_xvsran_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsran_h_w:.*xvsran\\.h\\.w.*lasx_xvsran_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsran_w_d:.*xvsran\\.w\\.d.*lasx_xvsran_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssran_b_h:.*xvssran\\.b\\.h.*lasx_xvssran_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssran_h_w:.*xvssran\\.h\\.w.*lasx_xvssran_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssran_w_d:.*xvssran\\.w\\.d.*lasx_xvssran_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssran_bu_h:.*xvssran\\.bu\\.h.*lasx_xvssran_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssran_hu_w:.*xvssran\\.hu\\.w.*lasx_xvssran_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssran_wu_d:.*xvssran\\.wu\\.d.*lasx_xvssran_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrarn_b_h:.*xvsrarn\\.b\\.h.*lasx_xvsrarn_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrarn_h_w:.*xvsrarn\\.h\\.w.*lasx_xvsrarn_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrarn_w_d:.*xvsrarn\\.w\\.d.*lasx_xvsrarn_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarn_b_h:.*xvssrarn\\.b\\.h.*lasx_xvssrarn_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarn_h_w:.*xvssrarn\\.h\\.w.*lasx_xvssrarn_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarn_w_d:.*xvssrarn\\.w\\.d.*lasx_xvssrarn_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarn_bu_h:.*xvssrarn\\.bu\\.h.*lasx_xvssrarn_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarn_hu_w:.*xvssrarn\\.hu\\.w.*lasx_xvssrarn_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarn_wu_d:.*xvssrarn\\.wu\\.d.*lasx_xvssrarn_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrln_b_h:.*xvsrln\\.b\\.h.*lasx_xvsrln_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrln_h_w:.*xvsrln\\.h\\.w.*lasx_xvsrln_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrln_w_d:.*xvsrln\\.w\\.d.*lasx_xvsrln_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrln_bu_h:.*xvssrln\\.bu\\.h.*lasx_xvssrln_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrln_hu_w:.*xvssrln\\.hu\\.w.*lasx_xvssrln_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrln_wu_d:.*xvssrln\\.wu\\.d.*lasx_xvssrln_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlrn_b_h:.*xvsrlrn\\.b\\.h.*lasx_xvsrlrn_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlrn_h_w:.*xvsrlrn\\.h\\.w.*lasx_xvsrlrn_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlrn_w_d:.*xvsrlrn\\.w\\.d.*lasx_xvsrlrn_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_bu_h:.*xvssrlrn\\.bu\\.h.*lasx_xvssrlrn_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_hu_w:.*xvssrlrn\\.hu\\.w.*lasx_xvssrlrn_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_wu_d:.*xvssrlrn\\.wu\\.d.*lasx_xvssrlrn_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrstpi_b:.*xvfrstpi\\.b.*lasx_xvfrstpi_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrstpi_h:.*xvfrstpi\\.h.*lasx_xvfrstpi_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrstp_b:.*xvfrstp\\.b.*lasx_xvfrstp_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrstp_h:.*xvfrstp\\.h.*lasx_xvfrstp_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvshuf4i_d:.*xvshuf4i\\.d.*lasx_xvshuf4i_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbsrl_v:.*xvbsrl\\.v.*lasx_xvbsrl_v" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvbsll_v:.*xvbsll\\.v.*lasx_xvbsll_v" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvextrins_b:.*xvextrins\\.b.*lasx_xvextrins_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvextrins_h:.*xvextrins\\.h.*lasx_xvextrins_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvextrins_w:.*xvextrins\\.w.*lasx_xvextrins_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvextrins_d:.*xvextrins\\.d.*lasx_xvextrins_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmskltz_b:.*xvmskltz\\.b.*lasx_xvmskltz_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmskltz_h:.*xvmskltz\\.h.*lasx_xvmskltz_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmskltz_w:.*xvmskltz\\.w.*lasx_xvmskltz_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmskltz_d:.*xvmskltz\\.d.*lasx_xvmskltz_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsigncov_b:.*xvsigncov\\.b.*lasx_xvsigncov_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsigncov_h:.*xvsigncov\\.h.*lasx_xvsigncov_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsigncov_w:.*xvsigncov\\.w.*lasx_xvsigncov_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsigncov_d:.*xvsigncov\\.d.*lasx_xvsigncov_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmadd_s:.*xvfmadd\\.s.*lasx_xvfmadd_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmadd_d:.*xvfmadd\\.d.*lasx_xvfmadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmsub_s:.*xvfmsub\\.s.*lasx_xvfmsub_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfmsub_d:.*xvfmsub\\.d.*lasx_xvfmsub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfnmadd_s:.*xvfnmadd\\.s.*lasx_xvfnmadd_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfnmadd_d:.*xvfnmadd\\.d.*lasx_xvfnmadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfnmsub_s:.*xvfnmsub\\.s.*lasx_xvfnmsub_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfnmsub_d:.*xvfnmsub\\.d.*lasx_xvfnmsub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrne_w_s:.*xvftintrne\\.w\\.s.*lasx_xvftintrne_w_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrne_l_d:.*xvftintrne\\.l\\.d.*lasx_xvftintrne_l_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrp_w_s:.*xvftintrp\\.w\\.s.*lasx_xvftintrp_w_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrp_l_d:.*xvftintrp\\.l\\.d.*lasx_xvftintrp_l_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrm_w_s:.*xvftintrm\\.w\\.s.*lasx_xvftintrm_w_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrm_l_d:.*xvftintrm\\.l\\.d.*lasx_xvftintrm_l_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftint_w_d:.*xvftint\\.w\\.d.*lasx_xvftint_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvffint_s_l:.*xvffint\\.s\\.l.*lasx_xvffint_s_l" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrz_w_d:.*xvftintrz\\.w\\.d.*lasx_xvftintrz_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrp_w_d:.*xvftintrp\\.w\\.d.*lasx_xvftintrp_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrm_w_d:.*xvftintrm\\.w\\.d.*lasx_xvftintrm_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrne_w_d:.*xvftintrne\\.w\\.d.*lasx_xvftintrne_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftinth_l_s:.*xvftinth\\.l\\.s.*lasx_xvftinth_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintl_l_s:.*xvftintl\\.l\\.s.*lasx_xvftintl_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvffinth_d_w:.*xvffinth\\.d\\.w.*lasx_xvffinth_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvffintl_d_w:.*xvffintl\\.d\\.w.*lasx_xvffintl_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrzh_l_s:.*xvftintrzh\\.l\\.s.*lasx_xvftintrzh_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrzl_l_s:.*xvftintrzl\\.l\\.s.*lasx_xvftintrzl_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrph_l_s:.*xvftintrph\\.l\\.s.*lasx_xvftintrph_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrpl_l_s:.*xvftintrpl\\.l\\.s.*lasx_xvftintrpl_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrmh_l_s:.*xvftintrmh\\.l\\.s.*lasx_xvftintrmh_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrml_l_s:.*xvftintrml\\.l\\.s.*lasx_xvftintrml_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrneh_l_s:.*xvftintrneh\\.l\\.s.*lasx_xvftintrneh_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvftintrnel_l_s:.*xvftintrnel\\.l\\.s.*lasx_xvftintrnel_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrintrne_s:.*xvfrintrne\\.s.*lasx_xvfrintrne_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrintrne_d:.*xvfrintrne\\.d.*lasx_xvfrintrne_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrintrz_s:.*xvfrintrz\\.s.*lasx_xvfrintrz_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrintrz_d:.*xvfrintrz\\.d.*lasx_xvfrintrz_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrintrp_s:.*xvfrintrp\\.s.*lasx_xvfrintrp_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrintrp_d:.*xvfrintrp\\.d.*lasx_xvfrintrp_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrintrm_s:.*xvfrintrm\\.s.*lasx_xvfrintrm_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfrintrm_d:.*xvfrintrm\\.d.*lasx_xvfrintrm_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvld:.*xvld.*lasx_xvld" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvst:.*xvst.*lasx_xvst" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvstelm_b:.*xvstelm\\.b.*lasx_xvstelm_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvstelm_h:.*xvstelm\\.h.*lasx_xvstelm_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvstelm_w:.*xvstelm\\.w.*lasx_xvstelm_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvstelm_d:.*xvstelm\\.d.*lasx_xvstelm_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvinsve0_w:.*xvinsve0\\.w.*lasx_xvinsve0_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvinsve0_d:.*xvinsve0\\.d.*lasx_xvinsve0_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickve_w:.*xvpickve\\.w.*lasx_xvpickve_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickve_d:.*xvpickve\\.d.*lasx_xvpickve_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_b_h:.*xvssrlrn\\.b\\.h.*lasx_xvssrlrn_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_h_w:.*xvssrlrn\\.h\\.w.*lasx_xvssrlrn_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrn_w_d:.*xvssrlrn\\.w\\.d.*lasx_xvssrlrn_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrln_b_h:.*xvssrln\\.b\\.h.*lasx_xvssrln_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrln_h_w:.*xvssrln\\.h\\.w.*lasx_xvssrln_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrln_w_d:.*xvssrln\\.w\\.d.*lasx_xvssrln_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvorn_v:.*xvorn\\.v.*lasx_xvorn_v" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvldi:.*xvldi.*lasx_xvldi" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvldx:.*xvldx.*lasx_xvldx" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvstx:.*xvstx.*lasx_xvstx" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvextl_qu_du:.*xvextl\\.qu\\.du.*lasx_xvextl_qu_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvinsgr2vr_w:.*xvinsgr2vr\\.w.*lasx_xvinsgr2vr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvinsgr2vr_d:.*xvinsgr2vr\\.d.*lasx_xvinsgr2vr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplve0_b:.*xvreplve0\\.b.*lasx_xvreplve0_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplve0_h:.*xvreplve0\\.h.*lasx_xvreplve0_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplve0_w:.*xvreplve0\\.w.*lasx_xvreplve0_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplve0_d:.*xvreplve0\\.d.*lasx_xvreplve0_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvreplve0_q:.*xvreplve0\\.q.*lasx_xvreplve0_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_h_b:.*vext2xv\\.h\\.b.*lasx_vext2xv_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_w_h:.*vext2xv\\.w\\.h.*lasx_vext2xv_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_d_w:.*vext2xv\\.d\\.w.*lasx_vext2xv_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_w_b:.*vext2xv\\.w\\.b.*lasx_vext2xv_w_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_d_h:.*vext2xv\\.d\\.h.*lasx_vext2xv_d_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_d_b:.*vext2xv\\.d\\.b.*lasx_vext2xv_d_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_hu_bu:.*vext2xv\\.hu\\.bu.*lasx_vext2xv_hu_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_wu_hu:.*vext2xv\\.wu\\.hu.*lasx_vext2xv_wu_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_du_wu:.*vext2xv\\.du\\.wu.*lasx_vext2xv_du_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_wu_bu:.*vext2xv\\.wu\\.bu.*lasx_vext2xv_wu_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_du_hu:.*vext2xv\\.du\\.hu.*lasx_vext2xv_du_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_vext2xv_du_bu:.*vext2xv\\.du\\.bu.*lasx_vext2xv_du_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpermi_q:.*xvpermi\\.q.*lasx_xvpermi_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpermi_d:.*xvpermi\\.d.*lasx_xvpermi_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvperm_w:.*xvperm\\.w.*lasx_xvperm_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvldrepl_b:.*xvldrepl\\.b.*lasx_xvldrepl_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvldrepl_h:.*xvldrepl\\.h.*lasx_xvldrepl_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvldrepl_w:.*xvldrepl\\.w.*lasx_xvldrepl_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvldrepl_d:.*xvldrepl\\.d.*lasx_xvldrepl_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_w:.*xvpickve2gr\\.w.*lasx_xvpickve2gr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_wu:.*xvpickve2gr\\.wu.*lasx_xvpickve2gr_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_d:.*xvpickve2gr\\.d.*lasx_xvpickve2gr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickve2gr_du:.*xvpickve2gr\\.du.*lasx_xvpickve2gr_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_d:.*xvaddwev\\.q\\.d.*lasx_xvaddwev_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_w:.*xvaddwev\\.d\\.w.*lasx_xvaddwev_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_h:.*xvaddwev\\.w\\.h.*lasx_xvaddwev_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_b:.*xvaddwev\\.h\\.b.*lasx_xvaddwev_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_du:.*xvaddwev\\.q\\.du.*lasx_xvaddwev_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_wu:.*xvaddwev\\.d\\.wu.*lasx_xvaddwev_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_hu:.*xvaddwev\\.w\\.hu.*lasx_xvaddwev_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_bu:.*xvaddwev\\.h\\.bu.*lasx_xvaddwev_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwev_q_d:.*xvsubwev\\.q\\.d.*lasx_xvsubwev_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwev_d_w:.*xvsubwev\\.d\\.w.*lasx_xvsubwev_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwev_w_h:.*xvsubwev\\.w\\.h.*lasx_xvsubwev_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwev_h_b:.*xvsubwev\\.h\\.b.*lasx_xvsubwev_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwev_q_du:.*xvsubwev\\.q\\.du.*lasx_xvsubwev_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwev_d_wu:.*xvsubwev\\.d\\.wu.*lasx_xvsubwev_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwev_w_hu:.*xvsubwev\\.w\\.hu.*lasx_xvsubwev_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwev_h_bu:.*xvsubwev\\.h\\.bu.*lasx_xvsubwev_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_d:.*xvmulwev\\.q\\.d.*lasx_xvmulwev_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_w:.*xvmulwev\\.d\\.w.*lasx_xvmulwev_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_h:.*xvmulwev\\.w\\.h.*lasx_xvmulwev_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_b:.*xvmulwev\\.h\\.b.*lasx_xvmulwev_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_du:.*xvmulwev\\.q\\.du.*lasx_xvmulwev_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_wu:.*xvmulwev\\.d\\.wu.*lasx_xvmulwev_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_hu:.*xvmulwev\\.w\\.hu.*lasx_xvmulwev_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_bu:.*xvmulwev\\.h\\.bu.*lasx_xvmulwev_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_d:.*xvaddwod\\.q\\.d.*lasx_xvaddwod_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_w:.*xvaddwod\\.d\\.w.*lasx_xvaddwod_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_h:.*xvaddwod\\.w\\.h.*lasx_xvaddwod_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_b:.*xvaddwod\\.h\\.b.*lasx_xvaddwod_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_du:.*xvaddwod\\.q\\.du.*lasx_xvaddwod_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_wu:.*xvaddwod\\.d\\.wu.*lasx_xvaddwod_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_hu:.*xvaddwod\\.w\\.hu.*lasx_xvaddwod_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_bu:.*xvaddwod\\.h\\.bu.*lasx_xvaddwod_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwod_q_d:.*xvsubwod\\.q\\.d.*lasx_xvsubwod_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwod_d_w:.*xvsubwod\\.d\\.w.*lasx_xvsubwod_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwod_w_h:.*xvsubwod\\.w\\.h.*lasx_xvsubwod_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwod_h_b:.*xvsubwod\\.h\\.b.*lasx_xvsubwod_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwod_q_du:.*xvsubwod\\.q\\.du.*lasx_xvsubwod_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwod_d_wu:.*xvsubwod\\.d\\.wu.*lasx_xvsubwod_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwod_w_hu:.*xvsubwod\\.w\\.hu.*lasx_xvsubwod_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsubwod_h_bu:.*xvsubwod\\.h\\.bu.*lasx_xvsubwod_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_d:.*xvmulwod\\.q\\.d.*lasx_xvmulwod_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_w:.*xvmulwod\\.d\\.w.*lasx_xvmulwod_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_h:.*xvmulwod\\.w\\.h.*lasx_xvmulwod_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_b:.*xvmulwod\\.h\\.b.*lasx_xvmulwod_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_du:.*xvmulwod\\.q\\.du.*lasx_xvmulwod_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_wu:.*xvmulwod\\.d\\.wu.*lasx_xvmulwod_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_hu:.*xvmulwod\\.w\\.hu.*lasx_xvmulwod_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_bu:.*xvmulwod\\.h\\.bu.*lasx_xvmulwod_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_d_wu_w:.*xvaddwev\\.d\\.wu\\.w.*lasx_xvaddwev_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_w_hu_h:.*xvaddwev\\.w\\.hu\\.h.*lasx_xvaddwev_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_h_bu_b:.*xvaddwev\\.h\\.bu\\.b.*lasx_xvaddwev_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_d_wu_w:.*xvmulwev\\.d\\.wu\\.w.*lasx_xvmulwev_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_w_hu_h:.*xvmulwev\\.w\\.hu\\.h.*lasx_xvmulwev_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_h_bu_b:.*xvmulwev\\.h\\.bu\\.b.*lasx_xvmulwev_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_d_wu_w:.*xvaddwod\\.d\\.wu\\.w.*lasx_xvaddwod_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_w_hu_h:.*xvaddwod\\.w\\.hu\\.h.*lasx_xvaddwod_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_h_bu_b:.*xvaddwod\\.h\\.bu\\.b.*lasx_xvaddwod_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_d_wu_w:.*xvmulwod\\.d\\.wu\\.w.*lasx_xvmulwod_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_w_hu_h:.*xvmulwod\\.w\\.hu\\.h.*lasx_xvmulwod_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_h_bu_b:.*xvmulwod\\.h\\.bu\\.b.*lasx_xvmulwod_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhaddw_q_d:.*xvhaddw\\.q\\.d.*lasx_xvhaddw_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhaddw_qu_du:.*xvhaddw\\.qu\\.du.*lasx_xvhaddw_qu_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhsubw_q_d:.*xvhsubw\\.q\\.d.*lasx_xvhsubw_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvhsubw_qu_du:.*xvhsubw\\.qu\\.du.*lasx_xvhsubw_qu_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_d:.*xvmaddwev\\.q\\.d.*lasx_xvmaddwev_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_w:.*xvmaddwev\\.d\\.w.*lasx_xvmaddwev_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_h:.*xvmaddwev\\.w\\.h.*lasx_xvmaddwev_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_b:.*xvmaddwev\\.h\\.b.*lasx_xvmaddwev_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_du:.*xvmaddwev\\.q\\.du.*lasx_xvmaddwev_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_wu:.*xvmaddwev\\.d\\.wu.*lasx_xvmaddwev_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_hu:.*xvmaddwev\\.w\\.hu.*lasx_xvmaddwev_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_bu:.*xvmaddwev\\.h\\.bu.*lasx_xvmaddwev_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_d:.*xvmaddwod\\.q\\.d.*lasx_xvmaddwod_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_w:.*xvmaddwod\\.d\\.w.*lasx_xvmaddwod_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_h:.*xvmaddwod\\.w\\.h.*lasx_xvmaddwod_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_b:.*xvmaddwod\\.h\\.b.*lasx_xvmaddwod_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_du:.*xvmaddwod\\.q\\.du.*lasx_xvmaddwod_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_wu:.*xvmaddwod\\.d\\.wu.*lasx_xvmaddwod_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_hu:.*xvmaddwod\\.w\\.hu.*lasx_xvmaddwod_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_bu:.*xvmaddwod\\.h\\.bu.*lasx_xvmaddwod_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_q_du_d:.*xvmaddwev\\.q\\.du\\.d.*lasx_xvmaddwev_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_d_wu_w:.*xvmaddwev\\.d\\.wu\\.w.*lasx_xvmaddwev_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_w_hu_h:.*xvmaddwev\\.w\\.hu\\.h.*lasx_xvmaddwev_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwev_h_bu_b:.*xvmaddwev\\.h\\.bu\\.b.*lasx_xvmaddwev_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_q_du_d:.*xvmaddwod\\.q\\.du\\.d.*lasx_xvmaddwod_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_d_wu_w:.*xvmaddwod\\.d\\.wu\\.w.*lasx_xvmaddwod_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_w_hu_h:.*xvmaddwod\\.w\\.hu\\.h.*lasx_xvmaddwod_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmaddwod_h_bu_b:.*xvmaddwod\\.h\\.bu\\.b.*lasx_xvmaddwod_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrotr_b:.*xvrotr\\.b.*lasx_xvrotr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrotr_h:.*xvrotr\\.h.*lasx_xvrotr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrotr_w:.*xvrotr\\.w.*lasx_xvrotr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrotr_d:.*xvrotr\\.d.*lasx_xvrotr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvadd_q:.*xvadd\\.q.*lasx_xvadd_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsub_q:.*xvsub\\.q.*lasx_xvsub_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwev_q_du_d:.*xvaddwev\\.q\\.du\\.d.*lasx_xvaddwev_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvaddwod_q_du_d:.*xvaddwod\\.q\\.du\\.d.*lasx_xvaddwod_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwev_q_du_d:.*xvmulwev\\.q\\.du\\.d.*lasx_xvmulwev_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmulwod_q_du_d:.*xvmulwod\\.q\\.du\\.d.*lasx_xvmulwod_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmskgez_b:.*xvmskgez\\.b.*lasx_xvmskgez_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvmsknz_b:.*xvmsknz\\.b.*lasx_xvmsknz_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvexth_h_b:.*xvexth\\.h\\.b.*lasx_xvexth_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvexth_w_h:.*xvexth\\.w\\.h.*lasx_xvexth_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvexth_d_w:.*xvexth\\.d\\.w.*lasx_xvexth_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvexth_q_d:.*xvexth\\.q\\.d.*lasx_xvexth_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvexth_hu_bu:.*xvexth\\.hu\\.bu.*lasx_xvexth_hu_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvexth_wu_hu:.*xvexth\\.wu\\.hu.*lasx_xvexth_wu_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvexth_du_wu:.*xvexth\\.du\\.wu.*lasx_xvexth_du_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvexth_qu_du:.*xvexth\\.qu\\.du.*lasx_xvexth_qu_du" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrotri_b:.*xvrotri\\.b.*lasx_xvrotri_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrotri_h:.*xvrotri\\.h.*lasx_xvrotri_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrotri_w:.*xvrotri\\.w.*lasx_xvrotri_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrotri_d:.*xvrotri\\.d.*lasx_xvrotri_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvextl_q_d:.*xvextl\\.q\\.d.*lasx_xvextl_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlni_b_h:.*xvsrlni\\.b\\.h.*lasx_xvsrlni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlni_h_w:.*xvsrlni\\.h\\.w.*lasx_xvsrlni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlni_w_d:.*xvsrlni\\.w\\.d.*lasx_xvsrlni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlni_d_q:.*xvsrlni\\.d\\.q.*lasx_xvsrlni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_b_h:.*xvsrlrni\\.b\\.h.*lasx_xvsrlrni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_h_w:.*xvsrlrni\\.h\\.w.*lasx_xvsrlrni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_w_d:.*xvsrlrni\\.w\\.d.*lasx_xvsrlrni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrlrni_d_q:.*xvsrlrni\\.d\\.q.*lasx_xvsrlrni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlni_b_h:.*xvssrlni\\.b\\.h.*lasx_xvssrlni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlni_h_w:.*xvssrlni\\.h\\.w.*lasx_xvssrlni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlni_w_d:.*xvssrlni\\.w\\.d.*lasx_xvssrlni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlni_d_q:.*xvssrlni\\.d\\.q.*lasx_xvssrlni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlni_bu_h:.*xvssrlni\\.bu\\.h.*lasx_xvssrlni_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlni_hu_w:.*xvssrlni\\.hu\\.w.*lasx_xvssrlni_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlni_wu_d:.*xvssrlni\\.wu\\.d.*lasx_xvssrlni_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlni_du_q:.*xvssrlni\\.du\\.q.*lasx_xvssrlni_du_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_b_h:.*xvssrlrni\\.b\\.h.*lasx_xvssrlrni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_h_w:.*xvssrlrni\\.h\\.w.*lasx_xvssrlrni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_w_d:.*xvssrlrni\\.w\\.d.*lasx_xvssrlrni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_d_q:.*xvssrlrni\\.d\\.q.*lasx_xvssrlrni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_bu_h:.*xvssrlrni\\.bu\\.h.*lasx_xvssrlrni_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_hu_w:.*xvssrlrni\\.hu\\.w.*lasx_xvssrlrni_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_wu_d:.*xvssrlrni\\.wu\\.d.*lasx_xvssrlrni_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrlrni_du_q:.*xvssrlrni\\.du\\.q.*lasx_xvssrlrni_du_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrani_b_h:.*xvsrani\\.b\\.h.*lasx_xvsrani_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrani_h_w:.*xvsrani\\.h\\.w.*lasx_xvsrani_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrani_w_d:.*xvsrani\\.w\\.d.*lasx_xvsrani_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrani_d_q:.*xvsrani\\.d\\.q.*lasx_xvsrani_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrarni_b_h:.*xvsrarni\\.b\\.h.*lasx_xvsrarni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrarni_h_w:.*xvsrarni\\.h\\.w.*lasx_xvsrarni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrarni_w_d:.*xvsrarni\\.w\\.d.*lasx_xvsrarni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvsrarni_d_q:.*xvsrarni\\.d\\.q.*lasx_xvsrarni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrani_b_h:.*xvssrani\\.b\\.h.*lasx_xvssrani_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrani_h_w:.*xvssrani\\.h\\.w.*lasx_xvssrani_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrani_w_d:.*xvssrani\\.w\\.d.*lasx_xvssrani_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrani_d_q:.*xvssrani\\.d\\.q.*lasx_xvssrani_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrani_bu_h:.*xvssrani\\.bu\\.h.*lasx_xvssrani_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrani_hu_w:.*xvssrani\\.hu\\.w.*lasx_xvssrani_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrani_wu_d:.*xvssrani\\.wu\\.d.*lasx_xvssrani_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrani_du_q:.*xvssrani\\.du\\.q.*lasx_xvssrani_du_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarni_b_h:.*xvssrarni\\.b\\.h.*lasx_xvssrarni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarni_h_w:.*xvssrarni\\.h\\.w.*lasx_xvssrarni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarni_w_d:.*xvssrarni\\.w\\.d.*lasx_xvssrarni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarni_d_q:.*xvssrarni\\.d\\.q.*lasx_xvssrarni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarni_bu_h:.*xvssrarni\\.bu\\.h.*lasx_xvssrarni_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarni_hu_w:.*xvssrarni\\.hu\\.w.*lasx_xvssrarni_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarni_wu_d:.*xvssrarni\\.wu\\.d.*lasx_xvssrarni_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvssrarni_du_q:.*xvssrarni\\.du\\.q.*lasx_xvssrarni_du_q" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xbnz_b:.*xvsetanyeqz\\.b.*lasx_xbnz_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xbnz_d:.*xvsetanyeqz\\.d.*lasx_xbnz_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xbnz_h:.*xvsetanyeqz\\.h.*lasx_xbnz_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xbnz_v:.*xvseteqz\\.v.*lasx_xbnz_v" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xbnz_w:.*xvsetanyeqz\\.w.*lasx_xbnz_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xbz_b:.*xvsetallnez\\.b.*lasx_xbz_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xbz_d:.*xvsetallnez\\.d.*lasx_xbz_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xbz_h:.*xvsetallnez\\.h.*lasx_xbz_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xbz_v:.*xvsetnez\\.v.*lasx_xbz_v" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xbz_w:.*xvsetallnez\\.w.*lasx_xbz_w" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_caf_d:.*xvfcmp\\.caf\\.d.*lasx_xvfcmp_caf_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_caf_s:.*xvfcmp\\.caf\\.s.*lasx_xvfcmp_caf_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_ceq_d:.*xvfcmp\\.ceq\\.d.*lasx_xvfcmp_ceq_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_ceq_s:.*xvfcmp\\.ceq\\.s.*lasx_xvfcmp_ceq_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cle_d:.*xvfcmp\\.cle\\.d.*lasx_xvfcmp_cle_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cle_s:.*xvfcmp\\.cle\\.s.*lasx_xvfcmp_cle_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_clt_d:.*xvfcmp\\.clt\\.d.*lasx_xvfcmp_clt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_clt_s:.*xvfcmp\\.clt\\.s.*lasx_xvfcmp_clt_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cne_d:.*xvfcmp\\.cne\\.d.*lasx_xvfcmp_cne_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cne_s:.*xvfcmp\\.cne\\.s.*lasx_xvfcmp_cne_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cor_d:.*xvfcmp\\.cor\\.d.*lasx_xvfcmp_cor_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cor_s:.*xvfcmp\\.cor\\.s.*lasx_xvfcmp_cor_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cueq_d:.*xvfcmp\\.cueq\\.d.*lasx_xvfcmp_cueq_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cueq_s:.*xvfcmp\\.cueq\\.s.*lasx_xvfcmp_cueq_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cule_d:.*xvfcmp\\.cule\\.d.*lasx_xvfcmp_cule_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cule_s:.*xvfcmp\\.cule\\.s.*lasx_xvfcmp_cule_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cult_d:.*xvfcmp\\.cult\\.d.*lasx_xvfcmp_cult_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cult_s:.*xvfcmp\\.cult\\.s.*lasx_xvfcmp_cult_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cun_d:.*xvfcmp\\.cun\\.d.*lasx_xvfcmp_cun_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cune_d:.*xvfcmp\\.cune\\.d.*lasx_xvfcmp_cune_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cune_s:.*xvfcmp\\.cune\\.s.*lasx_xvfcmp_cune_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_cun_s:.*xvfcmp\\.cun\\.s.*lasx_xvfcmp_cun_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_saf_d:.*xvfcmp\\.saf\\.d.*lasx_xvfcmp_saf_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_saf_s:.*xvfcmp\\.saf\\.s.*lasx_xvfcmp_saf_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_seq_d:.*xvfcmp\\.seq\\.d.*lasx_xvfcmp_seq_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_seq_s:.*xvfcmp\\.seq\\.s.*lasx_xvfcmp_seq_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sle_d:.*xvfcmp\\.sle\\.d.*lasx_xvfcmp_sle_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sle_s:.*xvfcmp\\.sle\\.s.*lasx_xvfcmp_sle_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_slt_d:.*xvfcmp\\.slt\\.d.*lasx_xvfcmp_slt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_slt_s:.*xvfcmp\\.slt\\.s.*lasx_xvfcmp_slt_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sne_d:.*xvfcmp\\.sne\\.d.*lasx_xvfcmp_sne_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sne_s:.*xvfcmp\\.sne\\.s.*lasx_xvfcmp_sne_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sor_d:.*xvfcmp\\.sor\\.d.*lasx_xvfcmp_sor_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sor_s:.*xvfcmp\\.sor\\.s.*lasx_xvfcmp_sor_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sueq_d:.*xvfcmp\\.sueq\\.d.*lasx_xvfcmp_sueq_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sueq_s:.*xvfcmp\\.sueq\\.s.*lasx_xvfcmp_sueq_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sule_d:.*xvfcmp\\.sule\\.d.*lasx_xvfcmp_sule_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sule_s:.*xvfcmp\\.sule\\.s.*lasx_xvfcmp_sule_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sult_d:.*xvfcmp\\.sult\\.d.*lasx_xvfcmp_sult_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sult_s:.*xvfcmp\\.sult\\.s.*lasx_xvfcmp_sult_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sun_d:.*xvfcmp\\.sun\\.d.*lasx_xvfcmp_sun_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sune_d:.*xvfcmp\\.sune\\.d.*lasx_xvfcmp_sune_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sune_s:.*xvfcmp\\.sune\\.s.*lasx_xvfcmp_sune_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvfcmp_sun_s:.*xvfcmp\\.sun\\.s.*lasx_xvfcmp_sun_s" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickve_d_f:.*xvpickve\\.d.*lasx_xvpickve_d_f" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvpickve_w_f:.*xvpickve\\.w.*lasx_xvpickve_w_f" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrepli_b:.*xvrepli\\.b.*lasx_xvrepli_b" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrepli_d:.*xvrepli\\.d.*lasx_xvrepli_d" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrepli_h:.*xvrepli\\.h.*lasx_xvrepli_h" 1 } } */
++/* { dg-final { scan-assembler-times "lasx_xvrepli_w:.*xvrepli\\.w.*lasx_xvrepli_w" 1 } } */
++
++typedef signed char v32i8 __attribute__ ((vector_size (32), aligned (32)));
++typedef signed char v32i8_b __attribute__ ((vector_size (32), aligned (1)));
++typedef unsigned char v32u8 __attribute__ ((vector_size (32), aligned (32)));
++typedef unsigned char v32u8_b __attribute__ ((vector_size (32), aligned (1)));
++typedef short v16i16 __attribute__ ((vector_size (32), aligned (32)));
++typedef short v16i16_h __attribute__ ((vector_size (32), aligned (2)));
++typedef unsigned short v16u16 __attribute__ ((vector_size (32), aligned (32)));
++typedef unsigned short v16u16_h
++    __attribute__ ((vector_size (32), aligned (2)));
++typedef int v8i32 __attribute__ ((vector_size (32), aligned (32)));
++typedef int v8i32_w __attribute__ ((vector_size (32), aligned (4)));
++typedef unsigned int v8u32 __attribute__ ((vector_size (32), aligned (32)));
++typedef unsigned int v8u32_w __attribute__ ((vector_size (32), aligned (4)));
++typedef long long v4i64 __attribute__ ((vector_size (32), aligned (32)));
++typedef long long v4i64_d __attribute__ ((vector_size (32), aligned (8)));
++typedef unsigned long long v4u64
++    __attribute__ ((vector_size (32), aligned (32)));
++typedef unsigned long long v4u64_d
++    __attribute__ ((vector_size (32), aligned (8)));
++typedef float v8f32 __attribute__ ((vector_size (32), aligned (32)));
++typedef float v8f32_w __attribute__ ((vector_size (32), aligned (4)));
++typedef double v4f64 __attribute__ ((vector_size (32), aligned (32)));
++typedef double v4f64_d __attribute__ ((vector_size (32), aligned (8)));
++
++typedef double v4f64 __attribute__ ((vector_size (32), aligned (32)));
++typedef double v4f64_d __attribute__ ((vector_size (32), aligned (8)));
++
++typedef float __m256 __attribute__ ((__vector_size__ (32), __may_alias__));
++typedef long long __m256i
++    __attribute__ ((__vector_size__ (32), __may_alias__));
++typedef double __m256d __attribute__ ((__vector_size__ (32), __may_alias__));
++
++/* Unaligned version of the same types.  */
++typedef float __m256_u
++    __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1)));
++typedef long long __m256i_u
++    __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1)));
++typedef double __m256d_u
++    __attribute__ ((__vector_size__ (32), __may_alias__, __aligned__ (1)));
++
++v32i8
++__lasx_xvsll_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsll_b (_1, _2);
++}
++v16i16
++__lasx_xvsll_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsll_h (_1, _2);
++}
++v8i32
++__lasx_xvsll_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsll_w (_1, _2);
++}
++v4i64
++__lasx_xvsll_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsll_d (_1, _2);
++}
++v32i8
++__lasx_xvslli_b (v32i8 _1)
++{
++  return __builtin_lasx_xvslli_b (_1, 1);
++}
++v16i16
++__lasx_xvslli_h (v16i16 _1)
++{
++  return __builtin_lasx_xvslli_h (_1, 1);
++}
++v8i32
++__lasx_xvslli_w (v8i32 _1)
++{
++  return __builtin_lasx_xvslli_w (_1, 1);
++}
++v4i64
++__lasx_xvslli_d (v4i64 _1)
++{
++  return __builtin_lasx_xvslli_d (_1, 1);
++}
++v32i8
++__lasx_xvsra_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsra_b (_1, _2);
++}
++v16i16
++__lasx_xvsra_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsra_h (_1, _2);
++}
++v8i32
++__lasx_xvsra_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsra_w (_1, _2);
++}
++v4i64
++__lasx_xvsra_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsra_d (_1, _2);
++}
++v32i8
++__lasx_xvsrai_b (v32i8 _1)
++{
++  return __builtin_lasx_xvsrai_b (_1, 1);
++}
++v16i16
++__lasx_xvsrai_h (v16i16 _1)
++{
++  return __builtin_lasx_xvsrai_h (_1, 1);
++}
++v8i32
++__lasx_xvsrai_w (v8i32 _1)
++{
++  return __builtin_lasx_xvsrai_w (_1, 1);
++}
++v4i64
++__lasx_xvsrai_d (v4i64 _1)
++{
++  return __builtin_lasx_xvsrai_d (_1, 1);
++}
++v32i8
++__lasx_xvsrar_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsrar_b (_1, _2);
++}
++v16i16
++__lasx_xvsrar_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsrar_h (_1, _2);
++}
++v8i32
++__lasx_xvsrar_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsrar_w (_1, _2);
++}
++v4i64
++__lasx_xvsrar_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsrar_d (_1, _2);
++}
++v32i8
++__lasx_xvsrari_b (v32i8 _1)
++{
++  return __builtin_lasx_xvsrari_b (_1, 1);
++}
++v16i16
++__lasx_xvsrari_h (v16i16 _1)
++{
++  return __builtin_lasx_xvsrari_h (_1, 1);
++}
++v8i32
++__lasx_xvsrari_w (v8i32 _1)
++{
++  return __builtin_lasx_xvsrari_w (_1, 1);
++}
++v4i64
++__lasx_xvsrari_d (v4i64 _1)
++{
++  return __builtin_lasx_xvsrari_d (_1, 1);
++}
++v32i8
++__lasx_xvsrl_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsrl_b (_1, _2);
++}
++v16i16
++__lasx_xvsrl_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsrl_h (_1, _2);
++}
++v8i32
++__lasx_xvsrl_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsrl_w (_1, _2);
++}
++v4i64
++__lasx_xvsrl_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsrl_d (_1, _2);
++}
++v32i8
++__lasx_xvsrli_b (v32i8 _1)
++{
++  return __builtin_lasx_xvsrli_b (_1, 1);
++}
++v16i16
++__lasx_xvsrli_h (v16i16 _1)
++{
++  return __builtin_lasx_xvsrli_h (_1, 1);
++}
++v8i32
++__lasx_xvsrli_w (v8i32 _1)
++{
++  return __builtin_lasx_xvsrli_w (_1, 1);
++}
++v4i64
++__lasx_xvsrli_d (v4i64 _1)
++{
++  return __builtin_lasx_xvsrli_d (_1, 1);
++}
++v32i8
++__lasx_xvsrlr_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsrlr_b (_1, _2);
++}
++v16i16
++__lasx_xvsrlr_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsrlr_h (_1, _2);
++}
++v8i32
++__lasx_xvsrlr_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsrlr_w (_1, _2);
++}
++v4i64
++__lasx_xvsrlr_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsrlr_d (_1, _2);
++}
++v32i8
++__lasx_xvsrlri_b (v32i8 _1)
++{
++  return __builtin_lasx_xvsrlri_b (_1, 1);
++}
++v16i16
++__lasx_xvsrlri_h (v16i16 _1)
++{
++  return __builtin_lasx_xvsrlri_h (_1, 1);
++}
++v8i32
++__lasx_xvsrlri_w (v8i32 _1)
++{
++  return __builtin_lasx_xvsrlri_w (_1, 1);
++}
++v4i64
++__lasx_xvsrlri_d (v4i64 _1)
++{
++  return __builtin_lasx_xvsrlri_d (_1, 1);
++}
++v32u8
++__lasx_xvbitclr_b (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvbitclr_b (_1, _2);
++}
++v16u16
++__lasx_xvbitclr_h (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvbitclr_h (_1, _2);
++}
++v8u32
++__lasx_xvbitclr_w (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvbitclr_w (_1, _2);
++}
++v4u64
++__lasx_xvbitclr_d (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvbitclr_d (_1, _2);
++}
++v32u8
++__lasx_xvbitclri_b (v32u8 _1)
++{
++  return __builtin_lasx_xvbitclri_b (_1, 1);
++}
++v16u16
++__lasx_xvbitclri_h (v16u16 _1)
++{
++  return __builtin_lasx_xvbitclri_h (_1, 1);
++}
++v8u32
++__lasx_xvbitclri_w (v8u32 _1)
++{
++  return __builtin_lasx_xvbitclri_w (_1, 1);
++}
++v4u64
++__lasx_xvbitclri_d (v4u64 _1)
++{
++  return __builtin_lasx_xvbitclri_d (_1, 1);
++}
++v32u8
++__lasx_xvbitset_b (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvbitset_b (_1, _2);
++}
++v16u16
++__lasx_xvbitset_h (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvbitset_h (_1, _2);
++}
++v8u32
++__lasx_xvbitset_w (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvbitset_w (_1, _2);
++}
++v4u64
++__lasx_xvbitset_d (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvbitset_d (_1, _2);
++}
++v32u8
++__lasx_xvbitseti_b (v32u8 _1)
++{
++  return __builtin_lasx_xvbitseti_b (_1, 1);
++}
++v16u16
++__lasx_xvbitseti_h (v16u16 _1)
++{
++  return __builtin_lasx_xvbitseti_h (_1, 1);
++}
++v8u32
++__lasx_xvbitseti_w (v8u32 _1)
++{
++  return __builtin_lasx_xvbitseti_w (_1, 1);
++}
++v4u64
++__lasx_xvbitseti_d (v4u64 _1)
++{
++  return __builtin_lasx_xvbitseti_d (_1, 1);
++}
++v32u8
++__lasx_xvbitrev_b (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvbitrev_b (_1, _2);
++}
++v16u16
++__lasx_xvbitrev_h (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvbitrev_h (_1, _2);
++}
++v8u32
++__lasx_xvbitrev_w (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvbitrev_w (_1, _2);
++}
++v4u64
++__lasx_xvbitrev_d (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvbitrev_d (_1, _2);
++}
++v32u8
++__lasx_xvbitrevi_b (v32u8 _1)
++{
++  return __builtin_lasx_xvbitrevi_b (_1, 1);
++}
++v16u16
++__lasx_xvbitrevi_h (v16u16 _1)
++{
++  return __builtin_lasx_xvbitrevi_h (_1, 1);
++}
++v8u32
++__lasx_xvbitrevi_w (v8u32 _1)
++{
++  return __builtin_lasx_xvbitrevi_w (_1, 1);
++}
++v4u64
++__lasx_xvbitrevi_d (v4u64 _1)
++{
++  return __builtin_lasx_xvbitrevi_d (_1, 1);
++}
++v32i8
++__lasx_xvadd_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvadd_b (_1, _2);
++}
++v16i16
++__lasx_xvadd_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvadd_h (_1, _2);
++}
++v8i32
++__lasx_xvadd_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvadd_w (_1, _2);
++}
++v4i64
++__lasx_xvadd_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvadd_d (_1, _2);
++}
++v32i8
++__lasx_xvaddi_bu (v32i8 _1)
++{
++  return __builtin_lasx_xvaddi_bu (_1, 1);
++}
++v16i16
++__lasx_xvaddi_hu (v16i16 _1)
++{
++  return __builtin_lasx_xvaddi_hu (_1, 1);
++}
++v8i32
++__lasx_xvaddi_wu (v8i32 _1)
++{
++  return __builtin_lasx_xvaddi_wu (_1, 1);
++}
++v4i64
++__lasx_xvaddi_du (v4i64 _1)
++{
++  return __builtin_lasx_xvaddi_du (_1, 1);
++}
++v32i8
++__lasx_xvsub_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsub_b (_1, _2);
++}
++v16i16
++__lasx_xvsub_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsub_h (_1, _2);
++}
++v8i32
++__lasx_xvsub_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsub_w (_1, _2);
++}
++v4i64
++__lasx_xvsub_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsub_d (_1, _2);
++}
++v32i8
++__lasx_xvsubi_bu (v32i8 _1)
++{
++  return __builtin_lasx_xvsubi_bu (_1, 1);
++}
++v16i16
++__lasx_xvsubi_hu (v16i16 _1)
++{
++  return __builtin_lasx_xvsubi_hu (_1, 1);
++}
++v8i32
++__lasx_xvsubi_wu (v8i32 _1)
++{
++  return __builtin_lasx_xvsubi_wu (_1, 1);
++}
++v4i64
++__lasx_xvsubi_du (v4i64 _1)
++{
++  return __builtin_lasx_xvsubi_du (_1, 1);
++}
++v32i8
++__lasx_xvmax_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvmax_b (_1, _2);
++}
++v16i16
++__lasx_xvmax_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvmax_h (_1, _2);
++}
++v8i32
++__lasx_xvmax_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvmax_w (_1, _2);
++}
++v4i64
++__lasx_xvmax_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvmax_d (_1, _2);
++}
++v32i8
++__lasx_xvmaxi_b (v32i8 _1)
++{
++  return __builtin_lasx_xvmaxi_b (_1, 1);
++}
++v16i16
++__lasx_xvmaxi_h (v16i16 _1)
++{
++  return __builtin_lasx_xvmaxi_h (_1, 1);
++}
++v8i32
++__lasx_xvmaxi_w (v8i32 _1)
++{
++  return __builtin_lasx_xvmaxi_w (_1, 1);
++}
++v4i64
++__lasx_xvmaxi_d (v4i64 _1)
++{
++  return __builtin_lasx_xvmaxi_d (_1, 1);
++}
++v32u8
++__lasx_xvmax_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvmax_bu (_1, _2);
++}
++v16u16
++__lasx_xvmax_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvmax_hu (_1, _2);
++}
++v8u32
++__lasx_xvmax_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvmax_wu (_1, _2);
++}
++v4u64
++__lasx_xvmax_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvmax_du (_1, _2);
++}
++v32u8
++__lasx_xvmaxi_bu (v32u8 _1)
++{
++  return __builtin_lasx_xvmaxi_bu (_1, 1);
++}
++v16u16
++__lasx_xvmaxi_hu (v16u16 _1)
++{
++  return __builtin_lasx_xvmaxi_hu (_1, 1);
++}
++v8u32
++__lasx_xvmaxi_wu (v8u32 _1)
++{
++  return __builtin_lasx_xvmaxi_wu (_1, 1);
++}
++v4u64
++__lasx_xvmaxi_du (v4u64 _1)
++{
++  return __builtin_lasx_xvmaxi_du (_1, 1);
++}
++v32i8
++__lasx_xvmin_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvmin_b (_1, _2);
++}
++v16i16
++__lasx_xvmin_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvmin_h (_1, _2);
++}
++v8i32
++__lasx_xvmin_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvmin_w (_1, _2);
++}
++v4i64
++__lasx_xvmin_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvmin_d (_1, _2);
++}
++v32i8
++__lasx_xvmini_b (v32i8 _1)
++{
++  return __builtin_lasx_xvmini_b (_1, 1);
++}
++v16i16
++__lasx_xvmini_h (v16i16 _1)
++{
++  return __builtin_lasx_xvmini_h (_1, 1);
++}
++v8i32
++__lasx_xvmini_w (v8i32 _1)
++{
++  return __builtin_lasx_xvmini_w (_1, 1);
++}
++v4i64
++__lasx_xvmini_d (v4i64 _1)
++{
++  return __builtin_lasx_xvmini_d (_1, 1);
++}
++v32u8
++__lasx_xvmin_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvmin_bu (_1, _2);
++}
++v16u16
++__lasx_xvmin_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvmin_hu (_1, _2);
++}
++v8u32
++__lasx_xvmin_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvmin_wu (_1, _2);
++}
++v4u64
++__lasx_xvmin_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvmin_du (_1, _2);
++}
++v32u8
++__lasx_xvmini_bu (v32u8 _1)
++{
++  return __builtin_lasx_xvmini_bu (_1, 1);
++}
++v16u16
++__lasx_xvmini_hu (v16u16 _1)
++{
++  return __builtin_lasx_xvmini_hu (_1, 1);
++}
++v8u32
++__lasx_xvmini_wu (v8u32 _1)
++{
++  return __builtin_lasx_xvmini_wu (_1, 1);
++}
++v4u64
++__lasx_xvmini_du (v4u64 _1)
++{
++  return __builtin_lasx_xvmini_du (_1, 1);
++}
++v32i8
++__lasx_xvseq_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvseq_b (_1, _2);
++}
++v16i16
++__lasx_xvseq_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvseq_h (_1, _2);
++}
++v8i32
++__lasx_xvseq_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvseq_w (_1, _2);
++}
++v4i64
++__lasx_xvseq_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvseq_d (_1, _2);
++}
++v32i8
++__lasx_xvseqi_b (v32i8 _1)
++{
++  return __builtin_lasx_xvseqi_b (_1, 1);
++}
++v16i16
++__lasx_xvseqi_h (v16i16 _1)
++{
++  return __builtin_lasx_xvseqi_h (_1, 1);
++}
++v8i32
++__lasx_xvseqi_w (v8i32 _1)
++{
++  return __builtin_lasx_xvseqi_w (_1, 1);
++}
++v4i64
++__lasx_xvseqi_d (v4i64 _1)
++{
++  return __builtin_lasx_xvseqi_d (_1, 1);
++}
++v32i8
++__lasx_xvslt_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvslt_b (_1, _2);
++}
++v16i16
++__lasx_xvslt_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvslt_h (_1, _2);
++}
++v8i32
++__lasx_xvslt_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvslt_w (_1, _2);
++}
++v4i64
++__lasx_xvslt_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvslt_d (_1, _2);
++}
++v32i8
++__lasx_xvslti_b (v32i8 _1)
++{
++  return __builtin_lasx_xvslti_b (_1, 1);
++}
++v16i16
++__lasx_xvslti_h (v16i16 _1)
++{
++  return __builtin_lasx_xvslti_h (_1, 1);
++}
++v8i32
++__lasx_xvslti_w (v8i32 _1)
++{
++  return __builtin_lasx_xvslti_w (_1, 1);
++}
++v4i64
++__lasx_xvslti_d (v4i64 _1)
++{
++  return __builtin_lasx_xvslti_d (_1, 1);
++}
++v32i8
++__lasx_xvslt_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvslt_bu (_1, _2);
++}
++v16i16
++__lasx_xvslt_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvslt_hu (_1, _2);
++}
++v8i32
++__lasx_xvslt_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvslt_wu (_1, _2);
++}
++v4i64
++__lasx_xvslt_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvslt_du (_1, _2);
++}
++v32i8
++__lasx_xvslti_bu (v32u8 _1)
++{
++  return __builtin_lasx_xvslti_bu (_1, 1);
++}
++v16i16
++__lasx_xvslti_hu (v16u16 _1)
++{
++  return __builtin_lasx_xvslti_hu (_1, 1);
++}
++v8i32
++__lasx_xvslti_wu (v8u32 _1)
++{
++  return __builtin_lasx_xvslti_wu (_1, 1);
++}
++v4i64
++__lasx_xvslti_du (v4u64 _1)
++{
++  return __builtin_lasx_xvslti_du (_1, 1);
++}
++v32i8
++__lasx_xvsle_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsle_b (_1, _2);
++}
++v16i16
++__lasx_xvsle_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsle_h (_1, _2);
++}
++v8i32
++__lasx_xvsle_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsle_w (_1, _2);
++}
++v4i64
++__lasx_xvsle_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsle_d (_1, _2);
++}
++v32i8
++__lasx_xvslei_b (v32i8 _1)
++{
++  return __builtin_lasx_xvslei_b (_1, 1);
++}
++v16i16
++__lasx_xvslei_h (v16i16 _1)
++{
++  return __builtin_lasx_xvslei_h (_1, 1);
++}
++v8i32
++__lasx_xvslei_w (v8i32 _1)
++{
++  return __builtin_lasx_xvslei_w (_1, 1);
++}
++v4i64
++__lasx_xvslei_d (v4i64 _1)
++{
++  return __builtin_lasx_xvslei_d (_1, 1);
++}
++v32i8
++__lasx_xvsle_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvsle_bu (_1, _2);
++}
++v16i16
++__lasx_xvsle_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvsle_hu (_1, _2);
++}
++v8i32
++__lasx_xvsle_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvsle_wu (_1, _2);
++}
++v4i64
++__lasx_xvsle_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvsle_du (_1, _2);
++}
++v32i8
++__lasx_xvslei_bu (v32u8 _1)
++{
++  return __builtin_lasx_xvslei_bu (_1, 1);
++}
++v16i16
++__lasx_xvslei_hu (v16u16 _1)
++{
++  return __builtin_lasx_xvslei_hu (_1, 1);
++}
++v8i32
++__lasx_xvslei_wu (v8u32 _1)
++{
++  return __builtin_lasx_xvslei_wu (_1, 1);
++}
++v4i64
++__lasx_xvslei_du (v4u64 _1)
++{
++  return __builtin_lasx_xvslei_du (_1, 1);
++}
++v32i8
++__lasx_xvsat_b (v32i8 _1)
++{
++  return __builtin_lasx_xvsat_b (_1, 1);
++}
++v16i16
++__lasx_xvsat_h (v16i16 _1)
++{
++  return __builtin_lasx_xvsat_h (_1, 1);
++}
++v8i32
++__lasx_xvsat_w (v8i32 _1)
++{
++  return __builtin_lasx_xvsat_w (_1, 1);
++}
++v4i64
++__lasx_xvsat_d (v4i64 _1)
++{
++  return __builtin_lasx_xvsat_d (_1, 1);
++}
++v32u8
++__lasx_xvsat_bu (v32u8 _1)
++{
++  return __builtin_lasx_xvsat_bu (_1, 1);
++}
++v16u16
++__lasx_xvsat_hu (v16u16 _1)
++{
++  return __builtin_lasx_xvsat_hu (_1, 1);
++}
++v8u32
++__lasx_xvsat_wu (v8u32 _1)
++{
++  return __builtin_lasx_xvsat_wu (_1, 1);
++}
++v4u64
++__lasx_xvsat_du (v4u64 _1)
++{
++  return __builtin_lasx_xvsat_du (_1, 1);
++}
++v32i8
++__lasx_xvadda_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvadda_b (_1, _2);
++}
++v16i16
++__lasx_xvadda_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvadda_h (_1, _2);
++}
++v8i32
++__lasx_xvadda_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvadda_w (_1, _2);
++}
++v4i64
++__lasx_xvadda_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvadda_d (_1, _2);
++}
++v32i8
++__lasx_xvsadd_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsadd_b (_1, _2);
++}
++v16i16
++__lasx_xvsadd_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsadd_h (_1, _2);
++}
++v8i32
++__lasx_xvsadd_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsadd_w (_1, _2);
++}
++v4i64
++__lasx_xvsadd_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsadd_d (_1, _2);
++}
++v32u8
++__lasx_xvsadd_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvsadd_bu (_1, _2);
++}
++v16u16
++__lasx_xvsadd_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvsadd_hu (_1, _2);
++}
++v8u32
++__lasx_xvsadd_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvsadd_wu (_1, _2);
++}
++v4u64
++__lasx_xvsadd_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvsadd_du (_1, _2);
++}
++v32i8
++__lasx_xvavg_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvavg_b (_1, _2);
++}
++v16i16
++__lasx_xvavg_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvavg_h (_1, _2);
++}
++v8i32
++__lasx_xvavg_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvavg_w (_1, _2);
++}
++v4i64
++__lasx_xvavg_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvavg_d (_1, _2);
++}
++v32u8
++__lasx_xvavg_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvavg_bu (_1, _2);
++}
++v16u16
++__lasx_xvavg_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvavg_hu (_1, _2);
++}
++v8u32
++__lasx_xvavg_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvavg_wu (_1, _2);
++}
++v4u64
++__lasx_xvavg_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvavg_du (_1, _2);
++}
++v32i8
++__lasx_xvavgr_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvavgr_b (_1, _2);
++}
++v16i16
++__lasx_xvavgr_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvavgr_h (_1, _2);
++}
++v8i32
++__lasx_xvavgr_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvavgr_w (_1, _2);
++}
++v4i64
++__lasx_xvavgr_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvavgr_d (_1, _2);
++}
++v32u8
++__lasx_xvavgr_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvavgr_bu (_1, _2);
++}
++v16u16
++__lasx_xvavgr_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvavgr_hu (_1, _2);
++}
++v8u32
++__lasx_xvavgr_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvavgr_wu (_1, _2);
++}
++v4u64
++__lasx_xvavgr_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvavgr_du (_1, _2);
++}
++v32i8
++__lasx_xvssub_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvssub_b (_1, _2);
++}
++v16i16
++__lasx_xvssub_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssub_h (_1, _2);
++}
++v8i32
++__lasx_xvssub_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssub_w (_1, _2);
++}
++v4i64
++__lasx_xvssub_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssub_d (_1, _2);
++}
++v32u8
++__lasx_xvssub_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvssub_bu (_1, _2);
++}
++v16u16
++__lasx_xvssub_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvssub_hu (_1, _2);
++}
++v8u32
++__lasx_xvssub_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvssub_wu (_1, _2);
++}
++v4u64
++__lasx_xvssub_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvssub_du (_1, _2);
++}
++v32i8
++__lasx_xvabsd_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvabsd_b (_1, _2);
++}
++v16i16
++__lasx_xvabsd_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvabsd_h (_1, _2);
++}
++v8i32
++__lasx_xvabsd_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvabsd_w (_1, _2);
++}
++v4i64
++__lasx_xvabsd_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvabsd_d (_1, _2);
++}
++v32u8
++__lasx_xvabsd_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvabsd_bu (_1, _2);
++}
++v16u16
++__lasx_xvabsd_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvabsd_hu (_1, _2);
++}
++v8u32
++__lasx_xvabsd_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvabsd_wu (_1, _2);
++}
++v4u64
++__lasx_xvabsd_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvabsd_du (_1, _2);
++}
++v32i8
++__lasx_xvmul_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvmul_b (_1, _2);
++}
++v16i16
++__lasx_xvmul_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvmul_h (_1, _2);
++}
++v8i32
++__lasx_xvmul_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvmul_w (_1, _2);
++}
++v4i64
++__lasx_xvmul_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvmul_d (_1, _2);
++}
++v32i8
++__lasx_xvmadd_b (v32i8 _1, v32i8 _2, v32i8 _3)
++{
++  return __builtin_lasx_xvmadd_b (_1, _2, _3);
++}
++v16i16
++__lasx_xvmadd_h (v16i16 _1, v16i16 _2, v16i16 _3)
++{
++  return __builtin_lasx_xvmadd_h (_1, _2, _3);
++}
++v8i32
++__lasx_xvmadd_w (v8i32 _1, v8i32 _2, v8i32 _3)
++{
++  return __builtin_lasx_xvmadd_w (_1, _2, _3);
++}
++v4i64
++__lasx_xvmadd_d (v4i64 _1, v4i64 _2, v4i64 _3)
++{
++  return __builtin_lasx_xvmadd_d (_1, _2, _3);
++}
++v32i8
++__lasx_xvmsub_b (v32i8 _1, v32i8 _2, v32i8 _3)
++{
++  return __builtin_lasx_xvmsub_b (_1, _2, _3);
++}
++v16i16
++__lasx_xvmsub_h (v16i16 _1, v16i16 _2, v16i16 _3)
++{
++  return __builtin_lasx_xvmsub_h (_1, _2, _3);
++}
++v8i32
++__lasx_xvmsub_w (v8i32 _1, v8i32 _2, v8i32 _3)
++{
++  return __builtin_lasx_xvmsub_w (_1, _2, _3);
++}
++v4i64
++__lasx_xvmsub_d (v4i64 _1, v4i64 _2, v4i64 _3)
++{
++  return __builtin_lasx_xvmsub_d (_1, _2, _3);
++}
++v32i8
++__lasx_xvdiv_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvdiv_b (_1, _2);
++}
++v16i16
++__lasx_xvdiv_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvdiv_h (_1, _2);
++}
++v8i32
++__lasx_xvdiv_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvdiv_w (_1, _2);
++}
++v4i64
++__lasx_xvdiv_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvdiv_d (_1, _2);
++}
++v32u8
++__lasx_xvdiv_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvdiv_bu (_1, _2);
++}
++v16u16
++__lasx_xvdiv_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvdiv_hu (_1, _2);
++}
++v8u32
++__lasx_xvdiv_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvdiv_wu (_1, _2);
++}
++v4u64
++__lasx_xvdiv_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvdiv_du (_1, _2);
++}
++v16i16
++__lasx_xvhaddw_h_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvhaddw_h_b (_1, _2);
++}
++v8i32
++__lasx_xvhaddw_w_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvhaddw_w_h (_1, _2);
++}
++v4i64
++__lasx_xvhaddw_d_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvhaddw_d_w (_1, _2);
++}
++v16u16
++__lasx_xvhaddw_hu_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvhaddw_hu_bu (_1, _2);
++}
++v8u32
++__lasx_xvhaddw_wu_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvhaddw_wu_hu (_1, _2);
++}
++v4u64
++__lasx_xvhaddw_du_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvhaddw_du_wu (_1, _2);
++}
++v16i16
++__lasx_xvhsubw_h_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvhsubw_h_b (_1, _2);
++}
++v8i32
++__lasx_xvhsubw_w_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvhsubw_w_h (_1, _2);
++}
++v4i64
++__lasx_xvhsubw_d_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvhsubw_d_w (_1, _2);
++}
++v16i16
++__lasx_xvhsubw_hu_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvhsubw_hu_bu (_1, _2);
++}
++v8i32
++__lasx_xvhsubw_wu_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvhsubw_wu_hu (_1, _2);
++}
++v4i64
++__lasx_xvhsubw_du_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvhsubw_du_wu (_1, _2);
++}
++v32i8
++__lasx_xvmod_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvmod_b (_1, _2);
++}
++v16i16
++__lasx_xvmod_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvmod_h (_1, _2);
++}
++v8i32
++__lasx_xvmod_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvmod_w (_1, _2);
++}
++v4i64
++__lasx_xvmod_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvmod_d (_1, _2);
++}
++v32u8
++__lasx_xvmod_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvmod_bu (_1, _2);
++}
++v16u16
++__lasx_xvmod_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvmod_hu (_1, _2);
++}
++v8u32
++__lasx_xvmod_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvmod_wu (_1, _2);
++}
++v4u64
++__lasx_xvmod_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvmod_du (_1, _2);
++}
++v32i8
++__lasx_xvrepl128vei_b (v32i8 _1)
++{
++  return __builtin_lasx_xvrepl128vei_b (_1, 1);
++}
++v16i16
++__lasx_xvrepl128vei_h (v16i16 _1)
++{
++  return __builtin_lasx_xvrepl128vei_h (_1, 1);
++}
++v8i32
++__lasx_xvrepl128vei_w (v8i32 _1)
++{
++  return __builtin_lasx_xvrepl128vei_w (_1, 1);
++}
++v4i64
++__lasx_xvrepl128vei_d (v4i64 _1)
++{
++  return __builtin_lasx_xvrepl128vei_d (_1, 1);
++}
++v32i8
++__lasx_xvpickev_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvpickev_b (_1, _2);
++}
++v16i16
++__lasx_xvpickev_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvpickev_h (_1, _2);
++}
++v8i32
++__lasx_xvpickev_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvpickev_w (_1, _2);
++}
++v4i64
++__lasx_xvpickev_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvpickev_d (_1, _2);
++}
++v32i8
++__lasx_xvpickod_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvpickod_b (_1, _2);
++}
++v16i16
++__lasx_xvpickod_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvpickod_h (_1, _2);
++}
++v8i32
++__lasx_xvpickod_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvpickod_w (_1, _2);
++}
++v4i64
++__lasx_xvpickod_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvpickod_d (_1, _2);
++}
++v32i8
++__lasx_xvilvh_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvilvh_b (_1, _2);
++}
++v16i16
++__lasx_xvilvh_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvilvh_h (_1, _2);
++}
++v8i32
++__lasx_xvilvh_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvilvh_w (_1, _2);
++}
++v4i64
++__lasx_xvilvh_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvilvh_d (_1, _2);
++}
++v32i8
++__lasx_xvilvl_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvilvl_b (_1, _2);
++}
++v16i16
++__lasx_xvilvl_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvilvl_h (_1, _2);
++}
++v8i32
++__lasx_xvilvl_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvilvl_w (_1, _2);
++}
++v4i64
++__lasx_xvilvl_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvilvl_d (_1, _2);
++}
++v32i8
++__lasx_xvpackev_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvpackev_b (_1, _2);
++}
++v16i16
++__lasx_xvpackev_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvpackev_h (_1, _2);
++}
++v8i32
++__lasx_xvpackev_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvpackev_w (_1, _2);
++}
++v4i64
++__lasx_xvpackev_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvpackev_d (_1, _2);
++}
++v32i8
++__lasx_xvpackod_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvpackod_b (_1, _2);
++}
++v16i16
++__lasx_xvpackod_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvpackod_h (_1, _2);
++}
++v8i32
++__lasx_xvpackod_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvpackod_w (_1, _2);
++}
++v4i64
++__lasx_xvpackod_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvpackod_d (_1, _2);
++}
++v32i8
++__lasx_xvshuf_b (v32i8 _1, v32i8 _2, v32i8 _3)
++{
++  return __builtin_lasx_xvshuf_b (_1, _2, _3);
++}
++v16i16
++__lasx_xvshuf_h (v16i16 _1, v16i16 _2, v16i16 _3)
++{
++  return __builtin_lasx_xvshuf_h (_1, _2, _3);
++}
++v8i32
++__lasx_xvshuf_w (v8i32 _1, v8i32 _2, v8i32 _3)
++{
++  return __builtin_lasx_xvshuf_w (_1, _2, _3);
++}
++v4i64
++__lasx_xvshuf_d (v4i64 _1, v4i64 _2, v4i64 _3)
++{
++  return __builtin_lasx_xvshuf_d (_1, _2, _3);
++}
++v32u8
++__lasx_xvand_v (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvand_v (_1, _2);
++}
++v32u8
++__lasx_xvandi_b (v32u8 _1)
++{
++  return __builtin_lasx_xvandi_b (_1, 1);
++}
++v32u8
++__lasx_xvor_v (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvor_v (_1, _2);
++}
++v32u8
++__lasx_xvori_b (v32u8 _1)
++{
++  return __builtin_lasx_xvori_b (_1, 1);
++}
++v32u8
++__lasx_xvnor_v (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvnor_v (_1, _2);
++}
++v32u8
++__lasx_xvnori_b (v32u8 _1)
++{
++  return __builtin_lasx_xvnori_b (_1, 1);
++}
++v32u8
++__lasx_xvxor_v (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvxor_v (_1, _2);
++}
++v32u8
++__lasx_xvxori_b (v32u8 _1)
++{
++  return __builtin_lasx_xvxori_b (_1, 1);
++}
++v32u8
++__lasx_xvbitsel_v (v32u8 _1, v32u8 _2, v32u8 _3)
++{
++  return __builtin_lasx_xvbitsel_v (_1, _2, _3);
++}
++v32u8
++__lasx_xvbitseli_b (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvbitseli_b (_1, _2, 1);
++}
++v32i8
++__lasx_xvshuf4i_b (v32i8 _1)
++{
++  return __builtin_lasx_xvshuf4i_b (_1, 1);
++}
++v16i16
++__lasx_xvshuf4i_h (v16i16 _1)
++{
++  return __builtin_lasx_xvshuf4i_h (_1, 1);
++}
++v8i32
++__lasx_xvshuf4i_w (v8i32 _1)
++{
++  return __builtin_lasx_xvshuf4i_w (_1, 1);
++}
++v32i8
++__lasx_xvreplgr2vr_b (int _1)
++{
++  return __builtin_lasx_xvreplgr2vr_b (_1);
++}
++v16i16
++__lasx_xvreplgr2vr_h (int _1)
++{
++  return __builtin_lasx_xvreplgr2vr_h (_1);
++}
++v8i32
++__lasx_xvreplgr2vr_w (int _1)
++{
++  return __builtin_lasx_xvreplgr2vr_w (_1);
++}
++v4i64
++__lasx_xvreplgr2vr_d (int _1)
++{
++  return __builtin_lasx_xvreplgr2vr_d (_1);
++}
++v32i8
++__lasx_xvpcnt_b (v32i8 _1)
++{
++  return __builtin_lasx_xvpcnt_b (_1);
++}
++v16i16
++__lasx_xvpcnt_h (v16i16 _1)
++{
++  return __builtin_lasx_xvpcnt_h (_1);
++}
++v8i32
++__lasx_xvpcnt_w (v8i32 _1)
++{
++  return __builtin_lasx_xvpcnt_w (_1);
++}
++v4i64
++__lasx_xvpcnt_d (v4i64 _1)
++{
++  return __builtin_lasx_xvpcnt_d (_1);
++}
++v32i8
++__lasx_xvclo_b (v32i8 _1)
++{
++  return __builtin_lasx_xvclo_b (_1);
++}
++v16i16
++__lasx_xvclo_h (v16i16 _1)
++{
++  return __builtin_lasx_xvclo_h (_1);
++}
++v8i32
++__lasx_xvclo_w (v8i32 _1)
++{
++  return __builtin_lasx_xvclo_w (_1);
++}
++v4i64
++__lasx_xvclo_d (v4i64 _1)
++{
++  return __builtin_lasx_xvclo_d (_1);
++}
++v32i8
++__lasx_xvclz_b (v32i8 _1)
++{
++  return __builtin_lasx_xvclz_b (_1);
++}
++v16i16
++__lasx_xvclz_h (v16i16 _1)
++{
++  return __builtin_lasx_xvclz_h (_1);
++}
++v8i32
++__lasx_xvclz_w (v8i32 _1)
++{
++  return __builtin_lasx_xvclz_w (_1);
++}
++v4i64
++__lasx_xvclz_d (v4i64 _1)
++{
++  return __builtin_lasx_xvclz_d (_1);
++}
++v8f32
++__lasx_xvfadd_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfadd_s (_1, _2);
++}
++v4f64
++__lasx_xvfadd_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfadd_d (_1, _2);
++}
++v8f32
++__lasx_xvfsub_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfsub_s (_1, _2);
++}
++v4f64
++__lasx_xvfsub_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfsub_d (_1, _2);
++}
++v8f32
++__lasx_xvfmul_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfmul_s (_1, _2);
++}
++v4f64
++__lasx_xvfmul_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfmul_d (_1, _2);
++}
++v8f32
++__lasx_xvfdiv_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfdiv_s (_1, _2);
++}
++v4f64
++__lasx_xvfdiv_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfdiv_d (_1, _2);
++}
++v16i16
++__lasx_xvfcvt_h_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcvt_h_s (_1, _2);
++}
++v8f32
++__lasx_xvfcvt_s_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcvt_s_d (_1, _2);
++}
++v8f32
++__lasx_xvfmin_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfmin_s (_1, _2);
++}
++v4f64
++__lasx_xvfmin_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfmin_d (_1, _2);
++}
++v8f32
++__lasx_xvfmina_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfmina_s (_1, _2);
++}
++v4f64
++__lasx_xvfmina_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfmina_d (_1, _2);
++}
++v8f32
++__lasx_xvfmax_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfmax_s (_1, _2);
++}
++v4f64
++__lasx_xvfmax_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfmax_d (_1, _2);
++}
++v8f32
++__lasx_xvfmaxa_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfmaxa_s (_1, _2);
++}
++v4f64
++__lasx_xvfmaxa_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfmaxa_d (_1, _2);
++}
++v8i32
++__lasx_xvfclass_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfclass_s (_1);
++}
++v4i64
++__lasx_xvfclass_d (v4f64 _1)
++{
++  return __builtin_lasx_xvfclass_d (_1);
++}
++v8f32
++__lasx_xvfsqrt_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfsqrt_s (_1);
++}
++v4f64
++__lasx_xvfsqrt_d (v4f64 _1)
++{
++  return __builtin_lasx_xvfsqrt_d (_1);
++}
++v8f32
++__lasx_xvfrecip_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfrecip_s (_1);
++}
++v4f64
++__lasx_xvfrecip_d (v4f64 _1)
++{
++  return __builtin_lasx_xvfrecip_d (_1);
++}
++v8f32
++__lasx_xvfrint_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfrint_s (_1);
++}
++v4f64
++__lasx_xvfrint_d (v4f64 _1)
++{
++  return __builtin_lasx_xvfrint_d (_1);
++}
++v8f32
++__lasx_xvfrsqrt_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfrsqrt_s (_1);
++}
++v4f64
++__lasx_xvfrsqrt_d (v4f64 _1)
++{
++  return __builtin_lasx_xvfrsqrt_d (_1);
++}
++v8f32
++__lasx_xvflogb_s (v8f32 _1)
++{
++  return __builtin_lasx_xvflogb_s (_1);
++}
++v4f64
++__lasx_xvflogb_d (v4f64 _1)
++{
++  return __builtin_lasx_xvflogb_d (_1);
++}
++v8f32
++__lasx_xvfcvth_s_h (v16i16 _1)
++{
++  return __builtin_lasx_xvfcvth_s_h (_1);
++}
++v4f64
++__lasx_xvfcvth_d_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfcvth_d_s (_1);
++}
++v8f32
++__lasx_xvfcvtl_s_h (v16i16 _1)
++{
++  return __builtin_lasx_xvfcvtl_s_h (_1);
++}
++v4f64
++__lasx_xvfcvtl_d_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfcvtl_d_s (_1);
++}
++v8i32
++__lasx_xvftint_w_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftint_w_s (_1);
++}
++v4i64
++__lasx_xvftint_l_d (v4f64 _1)
++{
++  return __builtin_lasx_xvftint_l_d (_1);
++}
++v8u32
++__lasx_xvftint_wu_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftint_wu_s (_1);
++}
++v4u64
++__lasx_xvftint_lu_d (v4f64 _1)
++{
++  return __builtin_lasx_xvftint_lu_d (_1);
++}
++v8i32
++__lasx_xvftintrz_w_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrz_w_s (_1);
++}
++v4i64
++__lasx_xvftintrz_l_d (v4f64 _1)
++{
++  return __builtin_lasx_xvftintrz_l_d (_1);
++}
++v8u32
++__lasx_xvftintrz_wu_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrz_wu_s (_1);
++}
++v4u64
++__lasx_xvftintrz_lu_d (v4f64 _1)
++{
++  return __builtin_lasx_xvftintrz_lu_d (_1);
++}
++v8f32
++__lasx_xvffint_s_w (v8i32 _1)
++{
++  return __builtin_lasx_xvffint_s_w (_1);
++}
++v4f64
++__lasx_xvffint_d_l (v4i64 _1)
++{
++  return __builtin_lasx_xvffint_d_l (_1);
++}
++v8f32
++__lasx_xvffint_s_wu (v8u32 _1)
++{
++  return __builtin_lasx_xvffint_s_wu (_1);
++}
++v4f64
++__lasx_xvffint_d_lu (v4u64 _1)
++{
++  return __builtin_lasx_xvffint_d_lu (_1);
++}
++v32i8
++__lasx_xvreplve_b (v32i8 _1, int _2)
++{
++  return __builtin_lasx_xvreplve_b (_1, _2);
++}
++v16i16
++__lasx_xvreplve_h (v16i16 _1, int _2)
++{
++  return __builtin_lasx_xvreplve_h (_1, _2);
++}
++v8i32
++__lasx_xvreplve_w (v8i32 _1, int _2)
++{
++  return __builtin_lasx_xvreplve_w (_1, _2);
++}
++v4i64
++__lasx_xvreplve_d (v4i64 _1, int _2)
++{
++  return __builtin_lasx_xvreplve_d (_1, _2);
++}
++v8i32
++__lasx_xvpermi_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvpermi_w (_1, _2, 1);
++}
++v32u8
++__lasx_xvandn_v (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvandn_v (_1, _2);
++}
++v32i8
++__lasx_xvneg_b (v32i8 _1)
++{
++  return __builtin_lasx_xvneg_b (_1);
++}
++v16i16
++__lasx_xvneg_h (v16i16 _1)
++{
++  return __builtin_lasx_xvneg_h (_1);
++}
++v8i32
++__lasx_xvneg_w (v8i32 _1)
++{
++  return __builtin_lasx_xvneg_w (_1);
++}
++v4i64
++__lasx_xvneg_d (v4i64 _1)
++{
++  return __builtin_lasx_xvneg_d (_1);
++}
++v32i8
++__lasx_xvmuh_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvmuh_b (_1, _2);
++}
++v16i16
++__lasx_xvmuh_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvmuh_h (_1, _2);
++}
++v8i32
++__lasx_xvmuh_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvmuh_w (_1, _2);
++}
++v4i64
++__lasx_xvmuh_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvmuh_d (_1, _2);
++}
++v32u8
++__lasx_xvmuh_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvmuh_bu (_1, _2);
++}
++v16u16
++__lasx_xvmuh_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvmuh_hu (_1, _2);
++}
++v8u32
++__lasx_xvmuh_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvmuh_wu (_1, _2);
++}
++v4u64
++__lasx_xvmuh_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvmuh_du (_1, _2);
++}
++v16i16
++__lasx_xvsllwil_h_b (v32i8 _1)
++{
++  return __builtin_lasx_xvsllwil_h_b (_1, 1);
++}
++v8i32
++__lasx_xvsllwil_w_h (v16i16 _1)
++{
++  return __builtin_lasx_xvsllwil_w_h (_1, 1);
++}
++v4i64
++__lasx_xvsllwil_d_w (v8i32 _1)
++{
++  return __builtin_lasx_xvsllwil_d_w (_1, 1);
++}
++v16u16
++__lasx_xvsllwil_hu_bu (v32u8 _1)
++{
++  return __builtin_lasx_xvsllwil_hu_bu (_1, 1);
++}
++v8u32
++__lasx_xvsllwil_wu_hu (v16u16 _1)
++{
++  return __builtin_lasx_xvsllwil_wu_hu (_1, 1);
++}
++v4u64
++__lasx_xvsllwil_du_wu (v8u32 _1)
++{
++  return __builtin_lasx_xvsllwil_du_wu (_1, 1);
++}
++v32i8
++__lasx_xvsran_b_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsran_b_h (_1, _2);
++}
++v16i16
++__lasx_xvsran_h_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsran_h_w (_1, _2);
++}
++v8i32
++__lasx_xvsran_w_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsran_w_d (_1, _2);
++}
++v32i8
++__lasx_xvssran_b_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssran_b_h (_1, _2);
++}
++v16i16
++__lasx_xvssran_h_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssran_h_w (_1, _2);
++}
++v8i32
++__lasx_xvssran_w_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssran_w_d (_1, _2);
++}
++v32u8
++__lasx_xvssran_bu_h (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvssran_bu_h (_1, _2);
++}
++v16u16
++__lasx_xvssran_hu_w (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvssran_hu_w (_1, _2);
++}
++v8u32
++__lasx_xvssran_wu_d (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvssran_wu_d (_1, _2);
++}
++v32i8
++__lasx_xvsrarn_b_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsrarn_b_h (_1, _2);
++}
++v16i16
++__lasx_xvsrarn_h_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsrarn_h_w (_1, _2);
++}
++v8i32
++__lasx_xvsrarn_w_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsrarn_w_d (_1, _2);
++}
++v32i8
++__lasx_xvssrarn_b_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssrarn_b_h (_1, _2);
++}
++v16i16
++__lasx_xvssrarn_h_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssrarn_h_w (_1, _2);
++}
++v8i32
++__lasx_xvssrarn_w_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssrarn_w_d (_1, _2);
++}
++v32u8
++__lasx_xvssrarn_bu_h (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvssrarn_bu_h (_1, _2);
++}
++v16u16
++__lasx_xvssrarn_hu_w (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvssrarn_hu_w (_1, _2);
++}
++v8u32
++__lasx_xvssrarn_wu_d (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvssrarn_wu_d (_1, _2);
++}
++v32i8
++__lasx_xvsrln_b_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsrln_b_h (_1, _2);
++}
++v16i16
++__lasx_xvsrln_h_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsrln_h_w (_1, _2);
++}
++v8i32
++__lasx_xvsrln_w_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsrln_w_d (_1, _2);
++}
++v32u8
++__lasx_xvssrln_bu_h (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvssrln_bu_h (_1, _2);
++}
++v16u16
++__lasx_xvssrln_hu_w (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvssrln_hu_w (_1, _2);
++}
++v8u32
++__lasx_xvssrln_wu_d (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvssrln_wu_d (_1, _2);
++}
++v32i8
++__lasx_xvsrlrn_b_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsrlrn_b_h (_1, _2);
++}
++v16i16
++__lasx_xvsrlrn_h_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsrlrn_h_w (_1, _2);
++}
++v8i32
++__lasx_xvsrlrn_w_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsrlrn_w_d (_1, _2);
++}
++v32u8
++__lasx_xvssrlrn_bu_h (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvssrlrn_bu_h (_1, _2);
++}
++v16u16
++__lasx_xvssrlrn_hu_w (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvssrlrn_hu_w (_1, _2);
++}
++v8u32
++__lasx_xvssrlrn_wu_d (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvssrlrn_wu_d (_1, _2);
++}
++v32i8
++__lasx_xvfrstpi_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvfrstpi_b (_1, _2, 1);
++}
++v16i16
++__lasx_xvfrstpi_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvfrstpi_h (_1, _2, 1);
++}
++v32i8
++__lasx_xvfrstp_b (v32i8 _1, v32i8 _2, v32i8 _3)
++{
++  return __builtin_lasx_xvfrstp_b (_1, _2, _3);
++}
++v16i16
++__lasx_xvfrstp_h (v16i16 _1, v16i16 _2, v16i16 _3)
++{
++  return __builtin_lasx_xvfrstp_h (_1, _2, _3);
++}
++v4i64
++__lasx_xvshuf4i_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvshuf4i_d (_1, _2, 1);
++}
++v32i8
++__lasx_xvbsrl_v (v32i8 _1)
++{
++  return __builtin_lasx_xvbsrl_v (_1, 1);
++}
++v32i8
++__lasx_xvbsll_v (v32i8 _1)
++{
++  return __builtin_lasx_xvbsll_v (_1, 1);
++}
++v32i8
++__lasx_xvextrins_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvextrins_b (_1, _2, 1);
++}
++v16i16
++__lasx_xvextrins_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvextrins_h (_1, _2, 1);
++}
++v8i32
++__lasx_xvextrins_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvextrins_w (_1, _2, 1);
++}
++v4i64
++__lasx_xvextrins_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvextrins_d (_1, _2, 1);
++}
++v32i8
++__lasx_xvmskltz_b (v32i8 _1)
++{
++  return __builtin_lasx_xvmskltz_b (_1);
++}
++v16i16
++__lasx_xvmskltz_h (v16i16 _1)
++{
++  return __builtin_lasx_xvmskltz_h (_1);
++}
++v8i32
++__lasx_xvmskltz_w (v8i32 _1)
++{
++  return __builtin_lasx_xvmskltz_w (_1);
++}
++v4i64
++__lasx_xvmskltz_d (v4i64 _1)
++{
++  return __builtin_lasx_xvmskltz_d (_1);
++}
++v32i8
++__lasx_xvsigncov_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsigncov_b (_1, _2);
++}
++v16i16
++__lasx_xvsigncov_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsigncov_h (_1, _2);
++}
++v8i32
++__lasx_xvsigncov_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsigncov_w (_1, _2);
++}
++v4i64
++__lasx_xvsigncov_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsigncov_d (_1, _2);
++}
++v8f32
++__lasx_xvfmadd_s (v8f32 _1, v8f32 _2, v8f32 _3)
++{
++  return __builtin_lasx_xvfmadd_s (_1, _2, _3);
++}
++v4f64
++__lasx_xvfmadd_d (v4f64 _1, v4f64 _2, v4f64 _3)
++{
++  return __builtin_lasx_xvfmadd_d (_1, _2, _3);
++}
++v8f32
++__lasx_xvfmsub_s (v8f32 _1, v8f32 _2, v8f32 _3)
++{
++  return __builtin_lasx_xvfmsub_s (_1, _2, _3);
++}
++v4f64
++__lasx_xvfmsub_d (v4f64 _1, v4f64 _2, v4f64 _3)
++{
++  return __builtin_lasx_xvfmsub_d (_1, _2, _3);
++}
++v8f32
++__lasx_xvfnmadd_s (v8f32 _1, v8f32 _2, v8f32 _3)
++{
++  return __builtin_lasx_xvfnmadd_s (_1, _2, _3);
++}
++v4f64
++__lasx_xvfnmadd_d (v4f64 _1, v4f64 _2, v4f64 _3)
++{
++  return __builtin_lasx_xvfnmadd_d (_1, _2, _3);
++}
++v8f32
++__lasx_xvfnmsub_s (v8f32 _1, v8f32 _2, v8f32 _3)
++{
++  return __builtin_lasx_xvfnmsub_s (_1, _2, _3);
++}
++v4f64
++__lasx_xvfnmsub_d (v4f64 _1, v4f64 _2, v4f64 _3)
++{
++  return __builtin_lasx_xvfnmsub_d (_1, _2, _3);
++}
++v8i32
++__lasx_xvftintrne_w_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrne_w_s (_1);
++}
++v4i64
++__lasx_xvftintrne_l_d (v4f64 _1)
++{
++  return __builtin_lasx_xvftintrne_l_d (_1);
++}
++v8i32
++__lasx_xvftintrp_w_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrp_w_s (_1);
++}
++v4i64
++__lasx_xvftintrp_l_d (v4f64 _1)
++{
++  return __builtin_lasx_xvftintrp_l_d (_1);
++}
++v8i32
++__lasx_xvftintrm_w_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrm_w_s (_1);
++}
++v4i64
++__lasx_xvftintrm_l_d (v4f64 _1)
++{
++  return __builtin_lasx_xvftintrm_l_d (_1);
++}
++v8i32
++__lasx_xvftint_w_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvftint_w_d (_1, _2);
++}
++v8f32
++__lasx_xvffint_s_l (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvffint_s_l (_1, _2);
++}
++v8i32
++__lasx_xvftintrz_w_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvftintrz_w_d (_1, _2);
++}
++v8i32
++__lasx_xvftintrp_w_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvftintrp_w_d (_1, _2);
++}
++v8i32
++__lasx_xvftintrm_w_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvftintrm_w_d (_1, _2);
++}
++v8i32
++__lasx_xvftintrne_w_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvftintrne_w_d (_1, _2);
++}
++v4i64
++__lasx_xvftinth_l_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftinth_l_s (_1);
++}
++v4i64
++__lasx_xvftintl_l_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintl_l_s (_1);
++}
++v4f64
++__lasx_xvffinth_d_w (v8i32 _1)
++{
++  return __builtin_lasx_xvffinth_d_w (_1);
++}
++v4f64
++__lasx_xvffintl_d_w (v8i32 _1)
++{
++  return __builtin_lasx_xvffintl_d_w (_1);
++}
++v4i64
++__lasx_xvftintrzh_l_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrzh_l_s (_1);
++}
++v4i64
++__lasx_xvftintrzl_l_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrzl_l_s (_1);
++}
++v4i64
++__lasx_xvftintrph_l_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrph_l_s (_1);
++}
++v4i64
++__lasx_xvftintrpl_l_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrpl_l_s (_1);
++}
++v4i64
++__lasx_xvftintrmh_l_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrmh_l_s (_1);
++}
++v4i64
++__lasx_xvftintrml_l_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrml_l_s (_1);
++}
++v4i64
++__lasx_xvftintrneh_l_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrneh_l_s (_1);
++}
++v4i64
++__lasx_xvftintrnel_l_s (v8f32 _1)
++{
++  return __builtin_lasx_xvftintrnel_l_s (_1);
++}
++v8f32
++__lasx_xvfrintrne_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfrintrne_s (_1);
++}
++v4f64
++__lasx_xvfrintrne_d (v4f64 _1)
++{
++  return __builtin_lasx_xvfrintrne_d (_1);
++}
++v8f32
++__lasx_xvfrintrz_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfrintrz_s (_1);
++}
++v4f64
++__lasx_xvfrintrz_d (v4f64 _1)
++{
++  return __builtin_lasx_xvfrintrz_d (_1);
++}
++v8f32
++__lasx_xvfrintrp_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfrintrp_s (_1);
++}
++v4f64
++__lasx_xvfrintrp_d (v4f64 _1)
++{
++  return __builtin_lasx_xvfrintrp_d (_1);
++}
++v8f32
++__lasx_xvfrintrm_s (v8f32 _1)
++{
++  return __builtin_lasx_xvfrintrm_s (_1);
++}
++v4f64
++__lasx_xvfrintrm_d (v4f64 _1)
++{
++  return __builtin_lasx_xvfrintrm_d (_1);
++}
++v32i8
++__lasx_xvld (void *_1)
++{
++  return __builtin_lasx_xvld (_1, 1);
++}
++void
++__lasx_xvst (v32i8 _1, void *_2)
++{
++  return __builtin_lasx_xvst (_1, _2, 1);
++}
++void
++__lasx_xvstelm_b (v32i8 _1, void *_2)
++{
++  return __builtin_lasx_xvstelm_b (_1, _2, 1, 1);
++}
++void
++__lasx_xvstelm_h (v16i16 _1, void *_2)
++{
++  return __builtin_lasx_xvstelm_h (_1, _2, 2, 1);
++}
++void
++__lasx_xvstelm_w (v8i32 _1, void *_2)
++{
++  return __builtin_lasx_xvstelm_w (_1, _2, 4, 1);
++}
++void
++__lasx_xvstelm_d (v4i64 _1, void *_2)
++{
++  return __builtin_lasx_xvstelm_d (_1, _2, 8, 1);
++}
++v8i32
++__lasx_xvinsve0_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvinsve0_w (_1, _2, 1);
++}
++v4i64
++__lasx_xvinsve0_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvinsve0_d (_1, _2, 1);
++}
++v8i32
++__lasx_xvpickve_w (v8i32 _1)
++{
++  return __builtin_lasx_xvpickve_w (_1, 1);
++}
++v4i64
++__lasx_xvpickve_d (v4i64 _1)
++{
++  return __builtin_lasx_xvpickve_d (_1, 1);
++}
++v32i8
++__lasx_xvssrlrn_b_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssrlrn_b_h (_1, _2);
++}
++v16i16
++__lasx_xvssrlrn_h_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssrlrn_h_w (_1, _2);
++}
++v8i32
++__lasx_xvssrlrn_w_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssrlrn_w_d (_1, _2);
++}
++v32i8
++__lasx_xvssrln_b_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssrln_b_h (_1, _2);
++}
++v16i16
++__lasx_xvssrln_h_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssrln_h_w (_1, _2);
++}
++v8i32
++__lasx_xvssrln_w_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssrln_w_d (_1, _2);
++}
++v32i8
++__lasx_xvorn_v (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvorn_v (_1, _2);
++}
++v4i64
++__lasx_xvldi ()
++{
++  return __builtin_lasx_xvldi (1);
++}
++v32i8
++__lasx_xvldx (void *_1)
++{
++  return __builtin_lasx_xvldx (_1, 1);
++}
++void
++__lasx_xvstx (v32i8 _1, void *_2)
++{
++  return __builtin_lasx_xvstx (_1, _2, 1);
++}
++v4u64
++__lasx_xvextl_qu_du (v4u64 _1)
++{
++  return __builtin_lasx_xvextl_qu_du (_1);
++}
++v8i32
++__lasx_xvinsgr2vr_w (v8i32 _1)
++{
++  return __builtin_lasx_xvinsgr2vr_w (_1, 1, 1);
++}
++v4i64
++__lasx_xvinsgr2vr_d (v4i64 _1)
++{
++  return __builtin_lasx_xvinsgr2vr_d (_1, 1, 1);
++}
++v32i8
++__lasx_xvreplve0_b (v32i8 _1)
++{
++  return __builtin_lasx_xvreplve0_b (_1);
++}
++v16i16
++__lasx_xvreplve0_h (v16i16 _1)
++{
++  return __builtin_lasx_xvreplve0_h (_1);
++}
++v8i32
++__lasx_xvreplve0_w (v8i32 _1)
++{
++  return __builtin_lasx_xvreplve0_w (_1);
++}
++v4i64
++__lasx_xvreplve0_d (v4i64 _1)
++{
++  return __builtin_lasx_xvreplve0_d (_1);
++}
++v32i8
++__lasx_xvreplve0_q (v32i8 _1)
++{
++  return __builtin_lasx_xvreplve0_q (_1);
++}
++v16i16
++__lasx_vext2xv_h_b (v32i8 _1)
++{
++  return __builtin_lasx_vext2xv_h_b (_1);
++}
++v8i32
++__lasx_vext2xv_w_h (v16i16 _1)
++{
++  return __builtin_lasx_vext2xv_w_h (_1);
++}
++v4i64
++__lasx_vext2xv_d_w (v8i32 _1)
++{
++  return __builtin_lasx_vext2xv_d_w (_1);
++}
++v8i32
++__lasx_vext2xv_w_b (v32i8 _1)
++{
++  return __builtin_lasx_vext2xv_w_b (_1);
++}
++v4i64
++__lasx_vext2xv_d_h (v16i16 _1)
++{
++  return __builtin_lasx_vext2xv_d_h (_1);
++}
++v4i64
++__lasx_vext2xv_d_b (v32i8 _1)
++{
++  return __builtin_lasx_vext2xv_d_b (_1);
++}
++v16i16
++__lasx_vext2xv_hu_bu (v32i8 _1)
++{
++  return __builtin_lasx_vext2xv_hu_bu (_1);
++}
++v8i32
++__lasx_vext2xv_wu_hu (v16i16 _1)
++{
++  return __builtin_lasx_vext2xv_wu_hu (_1);
++}
++v4i64
++__lasx_vext2xv_du_wu (v8i32 _1)
++{
++  return __builtin_lasx_vext2xv_du_wu (_1);
++}
++v8i32
++__lasx_vext2xv_wu_bu (v32i8 _1)
++{
++  return __builtin_lasx_vext2xv_wu_bu (_1);
++}
++v4i64
++__lasx_vext2xv_du_hu (v16i16 _1)
++{
++  return __builtin_lasx_vext2xv_du_hu (_1);
++}
++v4i64
++__lasx_vext2xv_du_bu (v32i8 _1)
++{
++  return __builtin_lasx_vext2xv_du_bu (_1);
++}
++v32i8
++__lasx_xvpermi_q (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvpermi_q (_1, _2, 1);
++}
++v4i64
++__lasx_xvpermi_d (v4i64 _1)
++{
++  return __builtin_lasx_xvpermi_d (_1, 1);
++}
++v8i32
++__lasx_xvperm_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvperm_w (_1, _2);
++}
++v32i8
++__lasx_xvldrepl_b (void *_1)
++{
++  return __builtin_lasx_xvldrepl_b (_1, 1);
++}
++v16i16
++__lasx_xvldrepl_h (void *_1)
++{
++  return __builtin_lasx_xvldrepl_h (_1, 2);
++}
++v8i32
++__lasx_xvldrepl_w (void *_1)
++{
++  return __builtin_lasx_xvldrepl_w (_1, 4);
++}
++v4i64
++__lasx_xvldrepl_d (void *_1)
++{
++  return __builtin_lasx_xvldrepl_d (_1, 8);
++}
++int
++__lasx_xvpickve2gr_w (v8i32 _1)
++{
++  return __builtin_lasx_xvpickve2gr_w (_1, 1);
++}
++unsigned int
++__lasx_xvpickve2gr_wu (v8i32 _1)
++{
++  return __builtin_lasx_xvpickve2gr_wu (_1, 1);
++}
++long
++__lasx_xvpickve2gr_d (v4i64 _1)
++{
++  return __builtin_lasx_xvpickve2gr_d (_1, 1);
++}
++unsigned long int
++__lasx_xvpickve2gr_du (v4i64 _1)
++{
++  return __builtin_lasx_xvpickve2gr_du (_1, 1);
++}
++v4i64
++__lasx_xvaddwev_q_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvaddwev_q_d (_1, _2);
++}
++v4i64
++__lasx_xvaddwev_d_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvaddwev_d_w (_1, _2);
++}
++v8i32
++__lasx_xvaddwev_w_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvaddwev_w_h (_1, _2);
++}
++v16i16
++__lasx_xvaddwev_h_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvaddwev_h_b (_1, _2);
++}
++v4i64
++__lasx_xvaddwev_q_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvaddwev_q_du (_1, _2);
++}
++v4i64
++__lasx_xvaddwev_d_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvaddwev_d_wu (_1, _2);
++}
++v8i32
++__lasx_xvaddwev_w_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvaddwev_w_hu (_1, _2);
++}
++v16i16
++__lasx_xvaddwev_h_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvaddwev_h_bu (_1, _2);
++}
++v4i64
++__lasx_xvsubwev_q_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsubwev_q_d (_1, _2);
++}
++v4i64
++__lasx_xvsubwev_d_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsubwev_d_w (_1, _2);
++}
++v8i32
++__lasx_xvsubwev_w_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsubwev_w_h (_1, _2);
++}
++v16i16
++__lasx_xvsubwev_h_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsubwev_h_b (_1, _2);
++}
++v4i64
++__lasx_xvsubwev_q_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvsubwev_q_du (_1, _2);
++}
++v4i64
++__lasx_xvsubwev_d_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvsubwev_d_wu (_1, _2);
++}
++v8i32
++__lasx_xvsubwev_w_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvsubwev_w_hu (_1, _2);
++}
++v16i16
++__lasx_xvsubwev_h_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvsubwev_h_bu (_1, _2);
++}
++v4i64
++__lasx_xvmulwev_q_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvmulwev_q_d (_1, _2);
++}
++v4i64
++__lasx_xvmulwev_d_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvmulwev_d_w (_1, _2);
++}
++v8i32
++__lasx_xvmulwev_w_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvmulwev_w_h (_1, _2);
++}
++v16i16
++__lasx_xvmulwev_h_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvmulwev_h_b (_1, _2);
++}
++v4i64
++__lasx_xvmulwev_q_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvmulwev_q_du (_1, _2);
++}
++v4i64
++__lasx_xvmulwev_d_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvmulwev_d_wu (_1, _2);
++}
++v8i32
++__lasx_xvmulwev_w_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvmulwev_w_hu (_1, _2);
++}
++v16i16
++__lasx_xvmulwev_h_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvmulwev_h_bu (_1, _2);
++}
++v4i64
++__lasx_xvaddwod_q_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvaddwod_q_d (_1, _2);
++}
++v4i64
++__lasx_xvaddwod_d_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvaddwod_d_w (_1, _2);
++}
++v8i32
++__lasx_xvaddwod_w_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvaddwod_w_h (_1, _2);
++}
++v16i16
++__lasx_xvaddwod_h_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvaddwod_h_b (_1, _2);
++}
++v4i64
++__lasx_xvaddwod_q_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvaddwod_q_du (_1, _2);
++}
++v4i64
++__lasx_xvaddwod_d_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvaddwod_d_wu (_1, _2);
++}
++v8i32
++__lasx_xvaddwod_w_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvaddwod_w_hu (_1, _2);
++}
++v16i16
++__lasx_xvaddwod_h_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvaddwod_h_bu (_1, _2);
++}
++v4i64
++__lasx_xvsubwod_q_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsubwod_q_d (_1, _2);
++}
++v4i64
++__lasx_xvsubwod_d_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsubwod_d_w (_1, _2);
++}
++v8i32
++__lasx_xvsubwod_w_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsubwod_w_h (_1, _2);
++}
++v16i16
++__lasx_xvsubwod_h_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsubwod_h_b (_1, _2);
++}
++v4i64
++__lasx_xvsubwod_q_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvsubwod_q_du (_1, _2);
++}
++v4i64
++__lasx_xvsubwod_d_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvsubwod_d_wu (_1, _2);
++}
++v8i32
++__lasx_xvsubwod_w_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvsubwod_w_hu (_1, _2);
++}
++v16i16
++__lasx_xvsubwod_h_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvsubwod_h_bu (_1, _2);
++}
++v4i64
++__lasx_xvmulwod_q_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvmulwod_q_d (_1, _2);
++}
++v4i64
++__lasx_xvmulwod_d_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvmulwod_d_w (_1, _2);
++}
++v8i32
++__lasx_xvmulwod_w_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvmulwod_w_h (_1, _2);
++}
++v16i16
++__lasx_xvmulwod_h_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvmulwod_h_b (_1, _2);
++}
++v4i64
++__lasx_xvmulwod_q_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvmulwod_q_du (_1, _2);
++}
++v4i64
++__lasx_xvmulwod_d_wu (v8u32 _1, v8u32 _2)
++{
++  return __builtin_lasx_xvmulwod_d_wu (_1, _2);
++}
++v8i32
++__lasx_xvmulwod_w_hu (v16u16 _1, v16u16 _2)
++{
++  return __builtin_lasx_xvmulwod_w_hu (_1, _2);
++}
++v16i16
++__lasx_xvmulwod_h_bu (v32u8 _1, v32u8 _2)
++{
++  return __builtin_lasx_xvmulwod_h_bu (_1, _2);
++}
++v4i64
++__lasx_xvaddwev_d_wu_w (v8u32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvaddwev_d_wu_w (_1, _2);
++}
++v8i32
++__lasx_xvaddwev_w_hu_h (v16u16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvaddwev_w_hu_h (_1, _2);
++}
++v16i16
++__lasx_xvaddwev_h_bu_b (v32u8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvaddwev_h_bu_b (_1, _2);
++}
++v4i64
++__lasx_xvmulwev_d_wu_w (v8u32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvmulwev_d_wu_w (_1, _2);
++}
++v8i32
++__lasx_xvmulwev_w_hu_h (v16u16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvmulwev_w_hu_h (_1, _2);
++}
++v16i16
++__lasx_xvmulwev_h_bu_b (v32u8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvmulwev_h_bu_b (_1, _2);
++}
++v4i64
++__lasx_xvaddwod_d_wu_w (v8u32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvaddwod_d_wu_w (_1, _2);
++}
++v8i32
++__lasx_xvaddwod_w_hu_h (v16u16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvaddwod_w_hu_h (_1, _2);
++}
++v16i16
++__lasx_xvaddwod_h_bu_b (v32u8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvaddwod_h_bu_b (_1, _2);
++}
++v4i64
++__lasx_xvmulwod_d_wu_w (v8u32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvmulwod_d_wu_w (_1, _2);
++}
++v8i32
++__lasx_xvmulwod_w_hu_h (v16u16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvmulwod_w_hu_h (_1, _2);
++}
++v16i16
++__lasx_xvmulwod_h_bu_b (v32u8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvmulwod_h_bu_b (_1, _2);
++}
++v4i64
++__lasx_xvhaddw_q_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvhaddw_q_d (_1, _2);
++}
++v4u64
++__lasx_xvhaddw_qu_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvhaddw_qu_du (_1, _2);
++}
++v4i64
++__lasx_xvhsubw_q_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvhsubw_q_d (_1, _2);
++}
++v4u64
++__lasx_xvhsubw_qu_du (v4u64 _1, v4u64 _2)
++{
++  return __builtin_lasx_xvhsubw_qu_du (_1, _2);
++}
++v4i64
++__lasx_xvmaddwev_q_d (v4i64 _1, v4i64 _2, v4i64 _3)
++{
++  return __builtin_lasx_xvmaddwev_q_d (_1, _2, _3);
++}
++v4i64
++__lasx_xvmaddwev_d_w (v4i64 _1, v8i32 _2, v8i32 _3)
++{
++  return __builtin_lasx_xvmaddwev_d_w (_1, _2, _3);
++}
++v8i32
++__lasx_xvmaddwev_w_h (v8i32 _1, v16i16 _2, v16i16 _3)
++{
++  return __builtin_lasx_xvmaddwev_w_h (_1, _2, _3);
++}
++v16i16
++__lasx_xvmaddwev_h_b (v16i16 _1, v32i8 _2, v32i8 _3)
++{
++  return __builtin_lasx_xvmaddwev_h_b (_1, _2, _3);
++}
++v4u64
++__lasx_xvmaddwev_q_du (v4u64 _1, v4u64 _2, v4u64 _3)
++{
++  return __builtin_lasx_xvmaddwev_q_du (_1, _2, _3);
++}
++v4u64
++__lasx_xvmaddwev_d_wu (v4u64 _1, v8u32 _2, v8u32 _3)
++{
++  return __builtin_lasx_xvmaddwev_d_wu (_1, _2, _3);
++}
++v8u32
++__lasx_xvmaddwev_w_hu (v8u32 _1, v16u16 _2, v16u16 _3)
++{
++  return __builtin_lasx_xvmaddwev_w_hu (_1, _2, _3);
++}
++v16u16
++__lasx_xvmaddwev_h_bu (v16u16 _1, v32u8 _2, v32u8 _3)
++{
++  return __builtin_lasx_xvmaddwev_h_bu (_1, _2, _3);
++}
++v4i64
++__lasx_xvmaddwod_q_d (v4i64 _1, v4i64 _2, v4i64 _3)
++{
++  return __builtin_lasx_xvmaddwod_q_d (_1, _2, _3);
++}
++v4i64
++__lasx_xvmaddwod_d_w (v4i64 _1, v8i32 _2, v8i32 _3)
++{
++  return __builtin_lasx_xvmaddwod_d_w (_1, _2, _3);
++}
++v8i32
++__lasx_xvmaddwod_w_h (v8i32 _1, v16i16 _2, v16i16 _3)
++{
++  return __builtin_lasx_xvmaddwod_w_h (_1, _2, _3);
++}
++v16i16
++__lasx_xvmaddwod_h_b (v16i16 _1, v32i8 _2, v32i8 _3)
++{
++  return __builtin_lasx_xvmaddwod_h_b (_1, _2, _3);
++}
++v4u64
++__lasx_xvmaddwod_q_du (v4u64 _1, v4u64 _2, v4u64 _3)
++{
++  return __builtin_lasx_xvmaddwod_q_du (_1, _2, _3);
++}
++v4u64
++__lasx_xvmaddwod_d_wu (v4u64 _1, v8u32 _2, v8u32 _3)
++{
++  return __builtin_lasx_xvmaddwod_d_wu (_1, _2, _3);
++}
++v8u32
++__lasx_xvmaddwod_w_hu (v8u32 _1, v16u16 _2, v16u16 _3)
++{
++  return __builtin_lasx_xvmaddwod_w_hu (_1, _2, _3);
++}
++v16u16
++__lasx_xvmaddwod_h_bu (v16u16 _1, v32u8 _2, v32u8 _3)
++{
++  return __builtin_lasx_xvmaddwod_h_bu (_1, _2, _3);
++}
++v4i64
++__lasx_xvmaddwev_q_du_d (v4i64 _1, v4u64 _2, v4i64 _3)
++{
++  return __builtin_lasx_xvmaddwev_q_du_d (_1, _2, _3);
++}
++v4i64
++__lasx_xvmaddwev_d_wu_w (v4i64 _1, v8u32 _2, v8i32 _3)
++{
++  return __builtin_lasx_xvmaddwev_d_wu_w (_1, _2, _3);
++}
++v8i32
++__lasx_xvmaddwev_w_hu_h (v8i32 _1, v16u16 _2, v16i16 _3)
++{
++  return __builtin_lasx_xvmaddwev_w_hu_h (_1, _2, _3);
++}
++v16i16
++__lasx_xvmaddwev_h_bu_b (v16i16 _1, v32u8 _2, v32i8 _3)
++{
++  return __builtin_lasx_xvmaddwev_h_bu_b (_1, _2, _3);
++}
++v4i64
++__lasx_xvmaddwod_q_du_d (v4i64 _1, v4u64 _2, v4i64 _3)
++{
++  return __builtin_lasx_xvmaddwod_q_du_d (_1, _2, _3);
++}
++v4i64
++__lasx_xvmaddwod_d_wu_w (v4i64 _1, v8u32 _2, v8i32 _3)
++{
++  return __builtin_lasx_xvmaddwod_d_wu_w (_1, _2, _3);
++}
++v8i32
++__lasx_xvmaddwod_w_hu_h (v8i32 _1, v16u16 _2, v16i16 _3)
++{
++  return __builtin_lasx_xvmaddwod_w_hu_h (_1, _2, _3);
++}
++v16i16
++__lasx_xvmaddwod_h_bu_b (v16i16 _1, v32u8 _2, v32i8 _3)
++{
++  return __builtin_lasx_xvmaddwod_h_bu_b (_1, _2, _3);
++}
++v32i8
++__lasx_xvrotr_b (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvrotr_b (_1, _2);
++}
++v16i16
++__lasx_xvrotr_h (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvrotr_h (_1, _2);
++}
++v8i32
++__lasx_xvrotr_w (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvrotr_w (_1, _2);
++}
++v4i64
++__lasx_xvrotr_d (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvrotr_d (_1, _2);
++}
++v4i64
++__lasx_xvadd_q (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvadd_q (_1, _2);
++}
++v4i64
++__lasx_xvsub_q (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsub_q (_1, _2);
++}
++v4i64
++__lasx_xvaddwev_q_du_d (v4u64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvaddwev_q_du_d (_1, _2);
++}
++v4i64
++__lasx_xvaddwod_q_du_d (v4u64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvaddwod_q_du_d (_1, _2);
++}
++v4i64
++__lasx_xvmulwev_q_du_d (v4u64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvmulwev_q_du_d (_1, _2);
++}
++v4i64
++__lasx_xvmulwod_q_du_d (v4u64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvmulwod_q_du_d (_1, _2);
++}
++v32i8
++__lasx_xvmskgez_b (v32i8 _1)
++{
++  return __builtin_lasx_xvmskgez_b (_1);
++}
++v32i8
++__lasx_xvmsknz_b (v32i8 _1)
++{
++  return __builtin_lasx_xvmsknz_b (_1);
++}
++v16i16
++__lasx_xvexth_h_b (v32i8 _1)
++{
++  return __builtin_lasx_xvexth_h_b (_1);
++}
++v8i32
++__lasx_xvexth_w_h (v16i16 _1)
++{
++  return __builtin_lasx_xvexth_w_h (_1);
++}
++v4i64
++__lasx_xvexth_d_w (v8i32 _1)
++{
++  return __builtin_lasx_xvexth_d_w (_1);
++}
++v4i64
++__lasx_xvexth_q_d (v4i64 _1)
++{
++  return __builtin_lasx_xvexth_q_d (_1);
++}
++v16u16
++__lasx_xvexth_hu_bu (v32u8 _1)
++{
++  return __builtin_lasx_xvexth_hu_bu (_1);
++}
++v8u32
++__lasx_xvexth_wu_hu (v16u16 _1)
++{
++  return __builtin_lasx_xvexth_wu_hu (_1);
++}
++v4u64
++__lasx_xvexth_du_wu (v8u32 _1)
++{
++  return __builtin_lasx_xvexth_du_wu (_1);
++}
++v4u64
++__lasx_xvexth_qu_du (v4u64 _1)
++{
++  return __builtin_lasx_xvexth_qu_du (_1);
++}
++v32i8
++__lasx_xvrotri_b (v32i8 _1)
++{
++  return __builtin_lasx_xvrotri_b (_1, 1);
++}
++v16i16
++__lasx_xvrotri_h (v16i16 _1)
++{
++  return __builtin_lasx_xvrotri_h (_1, 1);
++}
++v8i32
++__lasx_xvrotri_w (v8i32 _1)
++{
++  return __builtin_lasx_xvrotri_w (_1, 1);
++}
++v4i64
++__lasx_xvrotri_d (v4i64 _1)
++{
++  return __builtin_lasx_xvrotri_d (_1, 1);
++}
++v4i64
++__lasx_xvextl_q_d (v4i64 _1)
++{
++  return __builtin_lasx_xvextl_q_d (_1);
++}
++v32i8
++__lasx_xvsrlni_b_h (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsrlni_b_h (_1, _2, 1);
++}
++v16i16
++__lasx_xvsrlni_h_w (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsrlni_h_w (_1, _2, 1);
++}
++v8i32
++__lasx_xvsrlni_w_d (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsrlni_w_d (_1, _2, 1);
++}
++v4i64
++__lasx_xvsrlni_d_q (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsrlni_d_q (_1, _2, 1);
++}
++v32i8
++__lasx_xvsrlrni_b_h (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsrlrni_b_h (_1, _2, 1);
++}
++v16i16
++__lasx_xvsrlrni_h_w (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsrlrni_h_w (_1, _2, 1);
++}
++v8i32
++__lasx_xvsrlrni_w_d (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsrlrni_w_d (_1, _2, 1);
++}
++v4i64
++__lasx_xvsrlrni_d_q (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsrlrni_d_q (_1, _2, 1);
++}
++v32i8
++__lasx_xvssrlni_b_h (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvssrlni_b_h (_1, _2, 1);
++}
++v16i16
++__lasx_xvssrlni_h_w (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssrlni_h_w (_1, _2, 1);
++}
++v8i32
++__lasx_xvssrlni_w_d (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssrlni_w_d (_1, _2, 1);
++}
++v4i64
++__lasx_xvssrlni_d_q (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssrlni_d_q (_1, _2, 1);
++}
++v32u8
++__lasx_xvssrlni_bu_h (v32u8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvssrlni_bu_h (_1, _2, 1);
++}
++v16u16
++__lasx_xvssrlni_hu_w (v16u16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssrlni_hu_w (_1, _2, 1);
++}
++v8u32
++__lasx_xvssrlni_wu_d (v8u32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssrlni_wu_d (_1, _2, 1);
++}
++v4u64
++__lasx_xvssrlni_du_q (v4u64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssrlni_du_q (_1, _2, 1);
++}
++v32i8
++__lasx_xvssrlrni_b_h (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvssrlrni_b_h (_1, _2, 1);
++}
++v16i16
++__lasx_xvssrlrni_h_w (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssrlrni_h_w (_1, _2, 1);
++}
++v8i32
++__lasx_xvssrlrni_w_d (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssrlrni_w_d (_1, _2, 1);
++}
++v4i64
++__lasx_xvssrlrni_d_q (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssrlrni_d_q (_1, _2, 1);
++}
++v32u8
++__lasx_xvssrlrni_bu_h (v32u8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvssrlrni_bu_h (_1, _2, 1);
++}
++v16u16
++__lasx_xvssrlrni_hu_w (v16u16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssrlrni_hu_w (_1, _2, 1);
++}
++v8u32
++__lasx_xvssrlrni_wu_d (v8u32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssrlrni_wu_d (_1, _2, 1);
++}
++v4u64
++__lasx_xvssrlrni_du_q (v4u64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssrlrni_du_q (_1, _2, 1);
++}
++v32i8
++__lasx_xvsrani_b_h (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsrani_b_h (_1, _2, 1);
++}
++v16i16
++__lasx_xvsrani_h_w (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsrani_h_w (_1, _2, 1);
++}
++v8i32
++__lasx_xvsrani_w_d (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsrani_w_d (_1, _2, 1);
++}
++v4i64
++__lasx_xvsrani_d_q (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsrani_d_q (_1, _2, 1);
++}
++v32i8
++__lasx_xvsrarni_b_h (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvsrarni_b_h (_1, _2, 1);
++}
++v16i16
++__lasx_xvsrarni_h_w (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvsrarni_h_w (_1, _2, 1);
++}
++v8i32
++__lasx_xvsrarni_w_d (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvsrarni_w_d (_1, _2, 1);
++}
++v4i64
++__lasx_xvsrarni_d_q (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvsrarni_d_q (_1, _2, 1);
++}
++v32i8
++__lasx_xvssrani_b_h (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvssrani_b_h (_1, _2, 1);
++}
++v16i16
++__lasx_xvssrani_h_w (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssrani_h_w (_1, _2, 1);
++}
++v8i32
++__lasx_xvssrani_w_d (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssrani_w_d (_1, _2, 1);
++}
++v4i64
++__lasx_xvssrani_d_q (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssrani_d_q (_1, _2, 1);
++}
++v32u8
++__lasx_xvssrani_bu_h (v32u8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvssrani_bu_h (_1, _2, 1);
++}
++v16u16
++__lasx_xvssrani_hu_w (v16u16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssrani_hu_w (_1, _2, 1);
++}
++v8u32
++__lasx_xvssrani_wu_d (v8u32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssrani_wu_d (_1, _2, 1);
++}
++v4u64
++__lasx_xvssrani_du_q (v4u64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssrani_du_q (_1, _2, 1);
++}
++v32i8
++__lasx_xvssrarni_b_h (v32i8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvssrarni_b_h (_1, _2, 1);
++}
++v16i16
++__lasx_xvssrarni_h_w (v16i16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssrarni_h_w (_1, _2, 1);
++}
++v8i32
++__lasx_xvssrarni_w_d (v8i32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssrarni_w_d (_1, _2, 1);
++}
++v4i64
++__lasx_xvssrarni_d_q (v4i64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssrarni_d_q (_1, _2, 1);
++}
++v32u8
++__lasx_xvssrarni_bu_h (v32u8 _1, v32i8 _2)
++{
++  return __builtin_lasx_xvssrarni_bu_h (_1, _2, 1);
++}
++v16u16
++__lasx_xvssrarni_hu_w (v16u16 _1, v16i16 _2)
++{
++  return __builtin_lasx_xvssrarni_hu_w (_1, _2, 1);
++}
++v8u32
++__lasx_xvssrarni_wu_d (v8u32 _1, v8i32 _2)
++{
++  return __builtin_lasx_xvssrarni_wu_d (_1, _2, 1);
++}
++v4u64
++__lasx_xvssrarni_du_q (v4u64 _1, v4i64 _2)
++{
++  return __builtin_lasx_xvssrarni_du_q (_1, _2, 1);
++}
++int
++__lasx_xbnz_b (v32u8 _1)
++{
++  return __builtin_lasx_xbnz_b (_1);
++}
++int
++__lasx_xbnz_d (v4u64 _1)
++{
++  return __builtin_lasx_xbnz_d (_1);
++}
++int
++__lasx_xbnz_h (v16u16 _1)
++{
++  return __builtin_lasx_xbnz_h (_1);
++}
++int
++__lasx_xbnz_v (v32u8 _1)
++{
++  return __builtin_lasx_xbnz_v (_1);
++}
++int
++__lasx_xbnz_w (v8u32 _1)
++{
++  return __builtin_lasx_xbnz_w (_1);
++}
++int
++__lasx_xbz_b (v32u8 _1)
++{
++  return __builtin_lasx_xbz_b (_1);
++}
++int
++__lasx_xbz_d (v4u64 _1)
++{
++  return __builtin_lasx_xbz_d (_1);
++}
++int
++__lasx_xbz_h (v16u16 _1)
++{
++  return __builtin_lasx_xbz_h (_1);
++}
++int
++__lasx_xbz_v (v32u8 _1)
++{
++  return __builtin_lasx_xbz_v (_1);
++}
++int
++__lasx_xbz_w (v8u32 _1)
++{
++  return __builtin_lasx_xbz_w (_1);
++}
++v4i64
++__lasx_xvfcmp_caf_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_caf_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_caf_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_caf_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_ceq_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_ceq_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_ceq_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_ceq_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_cle_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_cle_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_cle_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_cle_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_clt_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_clt_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_clt_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_clt_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_cne_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_cne_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_cne_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_cne_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_cor_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_cor_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_cor_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_cor_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_cueq_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_cueq_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_cueq_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_cueq_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_cule_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_cule_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_cule_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_cule_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_cult_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_cult_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_cult_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_cult_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_cun_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_cun_d (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_cune_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_cune_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_cune_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_cune_s (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_cun_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_cun_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_saf_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_saf_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_saf_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_saf_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_seq_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_seq_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_seq_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_seq_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_sle_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_sle_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_sle_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_sle_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_slt_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_slt_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_slt_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_slt_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_sne_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_sne_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_sne_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_sne_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_sor_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_sor_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_sor_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_sor_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_sueq_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_sueq_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_sueq_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_sueq_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_sule_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_sule_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_sule_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_sule_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_sult_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_sult_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_sult_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_sult_s (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_sun_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_sun_d (_1, _2);
++}
++v4i64
++__lasx_xvfcmp_sune_d (v4f64 _1, v4f64 _2)
++{
++  return __builtin_lasx_xvfcmp_sune_d (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_sune_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_sune_s (_1, _2);
++}
++v8i32
++__lasx_xvfcmp_sun_s (v8f32 _1, v8f32 _2)
++{
++  return __builtin_lasx_xvfcmp_sun_s (_1, _2);
++}
++v4f64
++__lasx_xvpickve_d_f (v4f64 _1)
++{
++  return __builtin_lasx_xvpickve_d_f (_1, 1);
++}
++v8f32
++__lasx_xvpickve_w_f (v8f32 _1)
++{
++  return __builtin_lasx_xvpickve_w_f (_1, 1);
++}
++v32i8
++__lasx_xvrepli_b ()
++{
++  return __builtin_lasx_xvrepli_b (1);
++}
++v4i64
++__lasx_xvrepli_d ()
++{
++  return __builtin_lasx_xvrepli_d (1);
++}
++v16i16
++__lasx_xvrepli_h ()
++{
++  return __builtin_lasx_xvrepli_h (1);
++}
++v8i32
++__lasx_xvrepli_w ()
++{
++  return __builtin_lasx_xvrepli_w (1);
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-comparison-and-se.patch b/LoongArch-Add-tests-for-ASX-vector-comparison-and-se.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6c5dc4452e88b2bbe6d5d0f95c0c1f4bc7f9d8e7
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-comparison-and-se.patch
@@ -0,0 +1,5363 @@
+From 9ccb5fcabdf69160eb360da7eab06a207f59334c Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 16:11:04 +0800
+Subject: [PATCH 113/124] LoongArch: Add tests for ASX vector comparison and
+ selection instruction.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvseq.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvseqi.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvseq.c        | 650 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvseqi.c       | 449 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsle-1.c      | 575 ++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsle-2.c      | 590 ++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvslei-1.c     | 515 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvslei-2.c     | 438 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvslt-1.c      | 455 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvslt-2.c      | 620 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvslti-1.c     | 548 +++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvslti-2.c     | 416 +++++++++++
+ 10 files changed, 5256 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c
+new file mode 100644
+index 000000000..2a42386ce
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseq.c
+@@ -0,0 +1,650 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffe000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100020001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fffffffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff000000010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000095120000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc9da000063f50000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc7387fff6bbfffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f0000007f000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f0000007f000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1555156a1555156a;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1555156a1555156a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1555156a1555156a;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1555156a1555156a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ffff00ff000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffefd;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffcf800fffcf800;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffff00fffffff0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff00fffffff0;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe161616161616161;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_op0[1]) = 0xe161616161616161;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000005be55bd2;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffcc8000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007dfdff4b;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0080000200000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010003;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ff00ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ffffff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ff00ff00;
++  __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4ffc3f783fc040c0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3fc03f803fc040c0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4ffc3f783fc040c0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3fc03f803fc040c0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffee0000004c0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff050000ff3c0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00f9000000780000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffa80000ff310000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8011ffee804c004c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00faff0500c3ff3c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x80f900f980780078;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0057ffa800ceff31;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xff000000ff000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xff000000ff000000;
++  __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op1[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op1[0]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000077fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x003f60041f636003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x003f60041f636003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000;
++  __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000005500000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001005500020000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000005500000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001005500020000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffefff7f00100080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffefff7f00100080;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff01fb0408;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf2b180c9fc1fefdc;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff01fb0408;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf2b180c9fc1fefdc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff00ffffffff;
++  __m256i_out = __lasx_xvseq_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0080000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000501ffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000701ffffce;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000501ffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000701ffffce;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000260a378;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000d02317;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000260a378;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000d02317;
++  *((unsigned long *)&__m256i_op1[3]) = 0x003f020001400200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x003f00ff003f00c4;
++  *((unsigned long *)&__m256i_op1[1]) = 0x003f020001400200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x003f00ff003f00c4;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseq_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c
+new file mode 100644
+index 000000000..5478d19c1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvseqi.c
+@@ -0,0 +1,449 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffdfe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffdfe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, -8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_d (__m256i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_h (__m256i_op0, -8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_b (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_h (__m256i_op0, -3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_b (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x009200f200840080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x009200f200840080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00b200b300800080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00b200b300800080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_h (__m256i_op0, 14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_d (__m256i_op0, 14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_b (__m256i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_h (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_h (__m256i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_b (__m256i_op0, -3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_h (__m256i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_d (__m256i_op0, -3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0010000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0010000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, -3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00197d3200197d56;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00197d3200197d56;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_h (__m256i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_b (__m256i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_b (__m256i_op0, -8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_b (__m256i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_h (__m256i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_b (__m256i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000bdfef907bc;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000bdfef907bc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_d (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_b (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_b (__m256i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_d (__m256i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_d (__m256i_op0, 14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x800fffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x800fffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x800fffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x800fffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvseqi_w (__m256i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c
+new file mode 100644
+index 000000000..ed752df00
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-1.c
+@@ -0,0 +1,575 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000460086;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f0079;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000f30028;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000df00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbf28b0686066be60;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffff00ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffff00ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ffffff00ff;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff00ff00ffff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xff000000ff00ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffff00ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00000000ff00ff;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00ffffff00ffff;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00ff0000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffbfffa0ffffff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00ff0000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffbfffa0ffffff80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbfffa004fffd8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbfffa004fffd8000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ffff0000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ffff0000ff;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0a0a000000000a0a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0a0a000000000a0a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffee;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffee;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001fff000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001fff000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffdfff80ffdfff80;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffdfff80ffdfff80;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffdfff80ffdfff80;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffdfff80ffdfff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00ffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00ffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff010100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff010100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff;
++  __m256i_out = __lasx_xvsle_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fef7fef7fef7fef;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fef7fef7fef7fef;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fef7fef7fef7fef;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fef7fef7fef7fef;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2aaaaa85aaaaaa85;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2aaa48f4aaaa48f4;
++  *((unsigned long *)&__m256i_op1[1]) = 0x2aaaaa85aaaaaa85;
++  *((unsigned long *)&__m256i_op1[0]) = 0x2aaa48f4aaaa48f4;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff;
++  __m256i_out = __lasx_xvsle_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000001a00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000001a00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000083f95466;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010100005400;
++  *((unsigned long *)&__m256i_op1[3]) = 0x007f00f8ff7fff80;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff6a9d8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x007f00f8ff7fff80;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff6a9d8;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fe0100000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fe0100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00197d3200197d56;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00197d3200197d56;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ffe0001fffe0001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ffe0001fffeffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fdfdfe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00f7000000f70007;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00f7000000f70007;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff01fffe00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff01fffe00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000002d;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc02dc02dc02dc02d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000002d;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc02dc02dc02dc02d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c
+new file mode 100644
+index 000000000..bc98b41af
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsle-2.c
+@@ -0,0 +1,590 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0010000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0010000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010000f0000000f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010000f0000000f;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000001ffe2000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x001fe020001fe020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000001ffe2000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x001fe020001fe020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000;
++  __m256i_out = __lasx_xvsle_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdf00000052a00000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5b7f00ff5b7f00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xdf00000052a00000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5b7f00ff5b7f00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8011ffae800c000c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00baff050083ff3c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x80b900b980380038;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0017ffa8008eff31;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff0000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff0008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff0008;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff;
++  __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000003ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000003ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfa15fa15fa15fa14;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfa15fa15fa15fa14;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x05ea05ea05ea05ec;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x05ea05ea05ea05ec;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000feb60000b7d0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000feb60000c7eb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000feb60000b7d0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000feb60000c7eb;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff010ff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff010ff0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb683007ffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c0df5b41cf;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb683007ffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c0df5b41cf;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001497c98ea4fca;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001497c98ea4fca;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010201010204;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010102;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00020421d7d41124;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00020421d7d41124;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000180007f7f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffafaf80000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000180007f7f;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffafaf80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff6361;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4d0a902890b800dc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff6361;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4d0a902890b800dc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x94d7fb5200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsle_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c
+new file mode 100644
+index 000000000..06717802c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-1.c
+@@ -0,0 +1,515 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, -3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000101ff01;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00010013000100fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00010013000100fb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, 6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, 3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_b (__m256i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_h (__m256i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_h (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x014200c200c200ae;
++  *((unsigned long *)&__m256i_op0[2]) = 0x014200c200c200ae;
++  *((unsigned long *)&__m256i_op0[1]) = 0x014200c200c200ae;
++  *((unsigned long *)&__m256i_op0[0]) = 0x014200c200c200ae;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_h (__m256i_op0, -4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_h (__m256i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_h (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_h (__m256i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_h (__m256i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_h (__m256i_op0, -1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffff8900000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffff8900000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000;
++  __m256i_out = __lasx_xvslei_h (__m256i_op0, -16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000;
++  __m256i_out = __lasx_xvslei_h (__m256i_op0, -8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000460086;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f0079;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000f30028;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000df00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, -8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfc2f3183ef7ffff7;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, 8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_w (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1828f0e09bad7249;
++  *((unsigned long *)&__m256i_op0[2]) = 0x07ffc1b723953cec;
++  *((unsigned long *)&__m256i_op0[1]) = 0x61f2e9b333aab104;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6bf742aa0d7856a0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_d (__m256i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_d (__m256i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_d (__m256i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc6c6c6c68787878a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8787878a00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_d (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_d (__m256i_op0, 1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_d (__m256i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_d (__m256i_op0, 1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_d (__m256i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_d (__m256i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_d (__m256i_op0, -3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c
+new file mode 100644
+index 000000000..093d5640e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslei-2.c
+@@ -0,0 +1,438 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff00;
++  __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f3c611818;
++  *((unsigned long *)&__m256i_op0[2]) = 0x032eafee29010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f3c611818;
++  *((unsigned long *)&__m256i_op0[0]) = 0x032eafee29010000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00000000ffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00000000ffffff;
++  __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000f788f788;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000f788f788;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvslei_bu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00217f19ffde80e6;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00037f94fffc806b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00217f19ffde80e6;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00037f94fffc806b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000;
++  __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000;
++  __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_op0[2]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_op0[0]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007f807f80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f807f80;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_hu (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f7e3f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffc6cc05c64d960e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f7e3f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff874dc687870000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_op0[2]) = 0xdbcbdbcb0000dbcb;
++  *((unsigned long *)&__m256i_op0[1]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_op0[0]) = 0xdbcbdbcb0000dbcb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x03802fc000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x03802fc000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_wu (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001ffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001ffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslei_du (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c
+new file mode 100644
+index 000000000..ca1f5e94f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-1.c
+@@ -0,0 +1,455 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ff000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ff000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff;
++  __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff0000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff0000ffffffff;
++  __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000860601934;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000860601934;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000003fffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000003fffc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffffffff;
++  __m256i_out = __lasx_xvslt_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000022222221;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3dddddddfbbb3bbc;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000022222221;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3dddddddfbbb3bbc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe05f8102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe05f8102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000f000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000f000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000500000005;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000500000005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000500000005;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000500000005;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf7f8f7f8f7f8f7f8;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf7f8f7f8f7f8f7f8;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0df9f8e;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0df9f8e;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe0df9f8e;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffe0df9f8e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c
+new file mode 100644
+index 000000000..6864f5eb8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslt-2.c
+@@ -0,0 +1,620 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1828f0e09bad7249;
++  *((unsigned long *)&__m256i_op0[2]) = 0x07ffc1b723953cec;
++  *((unsigned long *)&__m256i_op0[1]) = 0x61f2e9b333aab104;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6bf742aa0d7856a0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0d41c9a7bdd239a7;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0b025d0ef8fdf987;
++  *((unsigned long *)&__m256i_op1[1]) = 0x002944f92da5a708;
++  *((unsigned long *)&__m256i_op1[0]) = 0x038cf4ea999922ef;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff0000ffff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0xff000000ffffff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffff00ff;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000017000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000017000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x001175f10e4330e8;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff8f0842ff29211e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffff8d9ffa7103d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdb801b6d0962003f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xdb8a3109fe0f0024;
++  *((unsigned long *)&__m256i_op0[1]) = 0x9a7f997fff01ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbe632a4f1c3c5653;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00ff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000500000005;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000500000005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000500000005;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000500000005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202031;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202031;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x5252525252525252;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5252525252525252;
++  *((unsigned long *)&__m256i_op1[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5252525252525252;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000040b200002fd4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00007fff0000739c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000040b200002fd4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007fff0000739c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff;
++  __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbc74c3d108e05422;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbc1e3e6a5cace67c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbc74c3d108e0544a;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbc18e696a86565f4;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbc74c3d108e05422;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbc1e3e6a5cace67c;
++  *((unsigned long *)&__m256i_op1[1]) = 0xbc74c3d108e0544a;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbc18e696a86565f4;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x43ef87878000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43ef87878000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000fc38fc38;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000fc38fc38;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000017f00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007f7f03030000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000401000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000401000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000401000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000401000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00220021004a007e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00220021004a007e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00220021004a007e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00220021004a007e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100008000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100007fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100008000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100007fff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000003fbfc04;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000001fdfe02;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000003fbfc04;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000001fdfe02;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010511c54440437;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010511c54440437;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ffff8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffff8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslt_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c
+new file mode 100644
+index 000000000..7dd2778a5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-1.c
+@@ -0,0 +1,548 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_b (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_b (__m256i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_b (__m256i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_b (__m256i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_b (__m256i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_b (__m256i_op0, -3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_b (__m256i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_b (__m256i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, 3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffe05fc47b400;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffe06003fc000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffe05fc47b400;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffe06003fc000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, -3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff02000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff02000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, -4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_h (__m256i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc58a0a0a07070706;
++  *((unsigned long *)&__m256i_op0[2]) = 0x006b60e4180b0023;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1b39153f334b966a;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf1d75d79efcac002;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, -1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff90ff81;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff90ff81;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, -3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, -3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, -16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000045000d0005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000045000d0005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, -8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010000f0000000f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010000f0000000f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, -4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, -6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, 14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000004efffe00;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000047000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000004efffe00;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000047000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, -8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_w (__m256i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_d (__m256i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_d (__m256i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_d (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x80000000001529c1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80007073cadc3779;
++  *((unsigned long *)&__m256i_op0[1]) = 0x80000000001529c1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80007073cadc3779;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_d (__m256i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_d (__m256i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_d (__m256i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_d (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_d (__m256i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0004000f00100003;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000400030010000f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0004000f00100003;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000400030010000f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_d (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c
+new file mode 100644
+index 000000000..d93e4314e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslti-2.c
+@@ -0,0 +1,416 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00220021004a007e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00220021004a007e;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00ff00ff00ff00;
++  __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00007ff000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007ff000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffffffff;
++  __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_bu (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff;
++  __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffb3b4;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffff5ffff4738;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffb3b4;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffff5ffff4738;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff;
++  __m256i_out = __lasx_xvslti_hu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x009f00f8007e00f0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f007f0081007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x009f00f8007e00f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f007f0081007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000e0e0e0e0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvslti_wu (__m256i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_du (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007773;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003373;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_du (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvslti_du (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-floating-point-co.patch b/LoongArch-Add-tests-for-ASX-vector-floating-point-co.patch
new file mode 100644
index 0000000000000000000000000000000000000000..88069edee17ec906e02430bc2a5c11a58d9372ce
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-floating-point-co.patch
@@ -0,0 +1,7291 @@
+From 5a014f35ac194402adc08945480da44e2c0a772a Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 16:06:04 +0800
+Subject: [PATCH 112/124] LoongArch: Add tests for ASX vector floating-point
+ conversion instruction.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvffinth.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvftintl.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvfcvt.c       |  528 ++++++
+ .../loongarch/vector/lasx/lasx-xvfcvth.c      |  485 +++++
+ .../loongarch/vector/lasx/lasx-xvffint-1.c    |  375 ++++
+ .../loongarch/vector/lasx/lasx-xvffint-2.c    |  246 +++
+ .../loongarch/vector/lasx/lasx-xvffinth.c     |  262 +++
+ .../loongarch/vector/lasx/lasx-xvfrint_d.c    |  429 +++++
+ .../loongarch/vector/lasx/lasx-xvfrint_s.c    |  723 ++++++++
+ .../loongarch/vector/lasx/lasx-xvftint-1.c    |  471 +++++
+ .../loongarch/vector/lasx/lasx-xvftint-2.c    | 1565 ++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvftint-3.c    |  511 ++++++
+ .../loongarch/vector/lasx/lasx-xvftintl.c     | 1580 +++++++++++++++++
+ 11 files changed, 7175 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c
+new file mode 100644
+index 000000000..116399a7c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvt.c
+@@ -0,0 +1,528 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000003;
++  *((int *)&__m256_op1[6]) = 0x0000000c;
++  *((int *)&__m256_op1[5]) = 0x00000011;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000005;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000008;
++  *((int *)&__m256_op1[0]) = 0x00000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[6]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[5]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[4]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[3]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[2]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[1]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[0]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[7]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[6]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[5]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[4]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[3]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[2]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[1]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[0]) = 0x6d6d6d6d;
++  *((unsigned long *)&__m256i_result[3]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_result[2]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_result[1]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_result[0]) = 0x7c007c007c007c00;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00020000;
++  *((int *)&__m256_op1[6]) = 0x00020000;
++  *((int *)&__m256_op1[5]) = 0x00020000;
++  *((int *)&__m256_op1[4]) = 0x00010000;
++  *((int *)&__m256_op1[3]) = 0x00020000;
++  *((int *)&__m256_op1[2]) = 0x00020000;
++  *((int *)&__m256_op1[1]) = 0x00020000;
++  *((int *)&__m256_op1[0]) = 0x00010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x71717171;
++  *((int *)&__m256_op1[6]) = 0x71010101;
++  *((int *)&__m256_op1[5]) = 0x8e8e8e8e;
++  *((int *)&__m256_op1[4]) = 0x8f00ffff;
++  *((int *)&__m256_op1[3]) = 0x71717171;
++  *((int *)&__m256_op1[2]) = 0x71010101;
++  *((int *)&__m256_op1[1]) = 0x8e8e8e8e;
++  *((int *)&__m256_op1[0]) = 0x8f00ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7c007c0080008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7c007c0080008000;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xfff10000;
++  *((int *)&__m256_op0[4]) = 0xfff10000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xfff10000;
++  *((int *)&__m256_op0[0]) = 0xfff10000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0xfff10000;
++  *((int *)&__m256_op1[4]) = 0xfff10000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0xfff10000;
++  *((int *)&__m256_op1[0]) = 0xfff10000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ff88ff88;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00040000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00040000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xff00ff00;
++  *((int *)&__m256_op0[6]) = 0x3f003f00;
++  *((int *)&__m256_op0[5]) = 0xff0101fd;
++  *((int *)&__m256_op0[4]) = 0x00010100;
++  *((int *)&__m256_op0[3]) = 0xff00ff00;
++  *((int *)&__m256_op0[2]) = 0x3f003f00;
++  *((int *)&__m256_op0[1]) = 0xff0101fd;
++  *((int *)&__m256_op0[0]) = 0x00010100;
++  *((int *)&__m256_op1[7]) = 0x01ffff43;
++  *((int *)&__m256_op1[6]) = 0x00fffeff;
++  *((int *)&__m256_op1[5]) = 0xfe0000bc;
++  *((int *)&__m256_op1[4]) = 0xff000100;
++  *((int *)&__m256_op1[3]) = 0x01ffff43;
++  *((int *)&__m256_op1[2]) = 0x00fffeff;
++  *((int *)&__m256_op1[1]) = 0xfe0000bc;
++  *((int *)&__m256_op1[0]) = 0xff000100;
++  *((unsigned long *)&__m256i_result[3]) = 0xfc003802fc000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fc00fc00;
++  *((unsigned long *)&__m256i_result[1]) = 0xfc003802fc000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fc00fc00;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcvt_h_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0cc08723ff900001;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xcc9b89f2f6cef440;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xfffffff8;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xff800000;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xfffffff8;
++  __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xdbc8000000003fff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xdbc8000000003fff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0xff800000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0xff800000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xff800000ff800000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xff800000;
++  *((int *)&__m256_result[4]) = 0xff800000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xff800000;
++  *((int *)&__m256_result[0]) = 0xff800000;
++  __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xf7f8f7f8f800f800;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00003f784000ff80;
++  *((unsigned long *)&__m256d_op1[1]) = 0xf7f8f7f84000fff9;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00003f784000ff80;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xff800000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xff800000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000555500005555;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000555500005555;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000555500005555;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000555500005555;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffb6804cb9;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffb7bbdec0;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffb680489b;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffb7bc02a0;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xfffffffd;
++  *((int *)&__m256_result[4]) = 0xfffffffd;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xfffffffd;
++  *((int *)&__m256_result[0]) = 0xfffffffd;
++  __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0101010202020203;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0101010201010102;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0101010202020203;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0101010201010102;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x3fff3fff3fff3fc4;
++  *((unsigned long *)&__m256d_op1[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x3fff3fff3fff3fc4;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x3ff9fffa;
++  *((int *)&__m256_result[4]) = 0x3ff9fffa;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x3ff9fffa;
++  *((int *)&__m256_result[0]) = 0x3ff9fffa;
++  __m256_out = __lasx_xvfcvt_s_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c
+new file mode 100644
+index 000000000..001ce1c69
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcvth.c
+@@ -0,0 +1,485 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvth_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvth_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0x0000aaaa;
++  *((int *)&__m256_op0[6]) = 0x00008bfe;
++  *((int *)&__m256_op0[5]) = 0x0000aaaa;
++  *((int *)&__m256_op0[4]) = 0x0000aaaa;
++  *((int *)&__m256_op0[3]) = 0x0000aaaa;
++  *((int *)&__m256_op0[2]) = 0x00008bfe;
++  *((int *)&__m256_op0[1]) = 0x0000aaaa;
++  *((int *)&__m256_op0[0]) = 0x0000aaaa;
++  *((unsigned long *)&__m256d_result[3]) = 0x3795554000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x37917fc000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x3795554000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x37917fc000000000;
++  __m256d_out = __lasx_xvfcvth_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0404010008080808;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0408010008080808;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0404010008080808;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0408010008080808;
++  *((int *)&__m256_result[7]) = 0x38808000;
++  *((int *)&__m256_result[6]) = 0x37800000;
++  *((int *)&__m256_result[5]) = 0x39010000;
++  *((int *)&__m256_result[4]) = 0x39010000;
++  *((int *)&__m256_result[3]) = 0x38808000;
++  *((int *)&__m256_result[2]) = 0x37800000;
++  *((int *)&__m256_result[1]) = 0x39010000;
++  *((int *)&__m256_result[0]) = 0x39010000;
++  __m256_out = __lasx_xvfcvth_s_h (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvth_s_h (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvth_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvth_s_h (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvth_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000;
++  __m256d_out = __lasx_xvfcvth_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000100010001fffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000100010001fffe;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0x80000000;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfcvth_s_h (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvth_s_h (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00020006;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00020006;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00020006;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00020006;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x37b0003000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x37b0003000000000;
++  __m256d_out = __lasx_xvfcvth_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0xfffffff0;
++  *((int *)&__m256_op0[6]) = 0xfffffff0;
++  *((int *)&__m256_op0[5]) = 0xfffffff0;
++  *((int *)&__m256_op0[4]) = 0xfffffff0;
++  *((int *)&__m256_op0[3]) = 0xfffffff0;
++  *((int *)&__m256_op0[2]) = 0xfffffff0;
++  *((int *)&__m256_op0[1]) = 0xfffffff0;
++  *((int *)&__m256_op0[0]) = 0xfffffff0;
++  *((unsigned long *)&__m256d_result[3]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xfffffffe00000000;
++  __m256d_out = __lasx_xvfcvth_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000;
++  *((int *)&__m256_result[7]) = 0xc6000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0xc6000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc080ffff0049ffd2;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0002ff80ffb70000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fffeffb9ff9d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00010000002fff9e;
++  *((int *)&__m256_result[7]) = 0x34000000;
++  *((int *)&__m256_result[6]) = 0xfff00000;
++  *((int *)&__m256_result[5]) = 0xfff6e000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x33800000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x363c0000;
++  *((int *)&__m256_result[0]) = 0xfff3c000;
++  __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0x80000000;
++  *((int *)&__m256_op0[6]) = 0x80000000;
++  *((int *)&__m256_op0[5]) = 0x80000000;
++  *((int *)&__m256_op0[4]) = 0xff800000;
++  *((int *)&__m256_op0[3]) = 0x80000000;
++  *((int *)&__m256_op0[2]) = 0x80000000;
++  *((int *)&__m256_op0[1]) = 0x80000000;
++  *((int *)&__m256_op0[0]) = 0xff800000;
++  *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc1d75053f0000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc1d75053f0000000;
++  *((int *)&__m256_result[7]) = 0xc03ae000;
++  *((int *)&__m256_result[6]) = 0x420a6000;
++  *((int *)&__m256_result[5]) = 0xc6000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0xc03ae000;
++  *((int *)&__m256_result[2]) = 0x420a6000;
++  *((int *)&__m256_result[1]) = 0xc6000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x03802fc000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x03802fc000000000;
++  *((int *)&__m256_result[7]) = 0x38600000;
++  *((int *)&__m256_result[6]) = 0x3df80000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x38600000;
++  *((int *)&__m256_result[2]) = 0x3df80000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfcvtl_s_h (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfcvtl_d_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c
+new file mode 100644
+index 000000000..dd04fd788
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-1.c
+@@ -0,0 +1,375 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xbff0000000000000;
++  __m256d_out = __lasx_xvffint_d_l (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001700080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001700080;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x4177000800000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x4177000800000000;
++  __m256d_out = __lasx_xvffint_d_l (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffint_d_l (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffint_d_l (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffint_d_l (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffint_d_l (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffint_d_l (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xc1f0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xc1f0000000000000;
++  __m256d_out = __lasx_xvffint_d_l (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffint_d_l (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256d_result[3]) = 0x437fe01fe01fe020;
++  *((unsigned long *)&__m256d_result[2]) = 0x437fe01fe01fe020;
++  *((unsigned long *)&__m256d_result[1]) = 0x437fe01fe01fe020;
++  *((unsigned long *)&__m256d_result[0]) = 0x437fe01fe01fe020;
++  __m256d_out = __lasx_xvffint_d_l (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_op0[2]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_op0[0]) = 0x132feea900000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x4393a0a5bc606060;
++  *((unsigned long *)&__m256d_result[2]) = 0x43b32feea9000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x4393a0a5bc606060;
++  *((unsigned long *)&__m256d_result[0]) = 0x43b32feea9000000;
++  __m256d_out = __lasx_xvffint_d_l (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc5c085372cfabfba;
++  *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0658f2dc0eb21e3c;
++  *((int *)&__m256_result[7]) = 0x4e5cba76;
++  *((int *)&__m256_result[6]) = 0xcdbaaa78;
++  *((int *)&__m256_result[5]) = 0xce68fdeb;
++  *((int *)&__m256_result[4]) = 0x4e33eaff;
++  *((int *)&__m256_result[3]) = 0x4e45cc2d;
++  *((int *)&__m256_result[2]) = 0xcda41b30;
++  *((int *)&__m256_result[1]) = 0x4ccb1e5c;
++  *((int *)&__m256_result[0]) = 0x4d6b21e4;
++  __m256_out = __lasx_xvffint_s_w (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvffint_s_w (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvffint_s_w (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvffint_s_w (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvffint_s_w (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x4efffe00;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x47000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x4efffe00;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x47000000;
++  __m256_out = __lasx_xvffint_s_w (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff00;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x477f0000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x477f0000;
++  __m256_out = __lasx_xvffint_s_w (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0010001000030000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010001000030000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0010001000030000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010001000030000;
++  *((int *)&__m256_result[7]) = 0x49800080;
++  *((int *)&__m256_result[6]) = 0x48400000;
++  *((int *)&__m256_result[5]) = 0x49800080;
++  *((int *)&__m256_result[4]) = 0x48400000;
++  *((int *)&__m256_result[3]) = 0x49800080;
++  *((int *)&__m256_result[2]) = 0x48400000;
++  *((int *)&__m256_result[1]) = 0x49800080;
++  *((int *)&__m256_result[0]) = 0x48400000;
++  __m256_out = __lasx_xvffint_s_w (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x4f800000;
++  __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffc74180000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff884580000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0xbf800000;
++  *((int *)&__m256_result[6]) = 0xbf800000;
++  *((int *)&__m256_result[5]) = 0xd662fa00;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0xbf800000;
++  *((int *)&__m256_result[2]) = 0xbf800000;
++  *((int *)&__m256_result[1]) = 0xd6ef7500;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000005000000020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000005000000020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000;
++  *((int *)&__m256_result[7]) = 0xdf000000;
++  *((int *)&__m256_result[6]) = 0x52a00000;
++  *((int *)&__m256_result[5]) = 0x5b7f00ff;
++  *((int *)&__m256_result[4]) = 0x5b7f00ff;
++  *((int *)&__m256_result[3]) = 0xdf000000;
++  *((int *)&__m256_result[2]) = 0x52a00000;
++  *((int *)&__m256_result[1]) = 0x5b7f00ff;
++  *((int *)&__m256_result[0]) = 0x5b7f00ff;
++  __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x5d20a0a1;
++  *((int *)&__m256_result[6]) = 0x5d20a0a1;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x5d20a0a1;
++  *((int *)&__m256_result[2]) = 0x5d20a0a1;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvffint_s_l (__m256i_op0, __m256i_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c
+new file mode 100644
+index 000000000..3e2b15507
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffint-2.c
+@@ -0,0 +1,246 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x4370100000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x4370100000000000;
++  __m256d_out = __lasx_xvffint_d_lu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffint_d_lu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffint_d_lu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256d_result[3]) = 0x43c0101010101010;
++  *((unsigned long *)&__m256d_result[2]) = 0x43c0101010101032;
++  *((unsigned long *)&__m256d_result[1]) = 0x43c0101010101010;
++  *((unsigned long *)&__m256d_result[0]) = 0x43c0101010101032;
++  __m256d_out = __lasx_xvffint_d_lu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x40efffe09fa88260;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6b07ca8e013fbf01;
++  *((unsigned long *)&__m256i_op0[1]) = 0x40efffe09fa7e358;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80ce32be3e827f00;
++  *((unsigned long *)&__m256d_result[3]) = 0x43d03bfff827ea21;
++  *((unsigned long *)&__m256d_result[2]) = 0x43dac1f2a3804ff0;
++  *((unsigned long *)&__m256d_result[1]) = 0x43d03bfff827e9f9;
++  *((unsigned long *)&__m256d_result[0]) = 0x43e019c657c7d050;
++  __m256d_out = __lasx_xvffint_d_lu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0x43f0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x43f0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x43f0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x43f0000000000000;
++  __m256d_out = __lasx_xvffint_d_lu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x41f0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x41f0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x41f0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x41f0000000000000;
++  __m256d_out = __lasx_xvffint_d_lu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffint_d_lu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102;
++  *((unsigned long *)&__m256d_result[3]) = 0x4380100810101008;
++  *((unsigned long *)&__m256d_result[2]) = 0x4380100810101008;
++  *((unsigned long *)&__m256d_result[1]) = 0x4380100810101008;
++  *((unsigned long *)&__m256d_result[0]) = 0x4380100810101008;
++  __m256d_out = __lasx_xvffint_d_lu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffint_d_lu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x41f0000000000000;
++  __m256d_out = __lasx_xvffint_d_lu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffbf7f00007fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffe651ffffbfff;
++  *((int *)&__m256_result[7]) = 0x4f800000;
++  *((int *)&__m256_result[6]) = 0x4f800000;
++  *((int *)&__m256_result[5]) = 0x4f7fffbf;
++  *((int *)&__m256_result[4]) = 0x46fffe00;
++  *((int *)&__m256_result[3]) = 0x4f800000;
++  *((int *)&__m256_result[2]) = 0x4f800000;
++  *((int *)&__m256_result[1]) = 0x4f7fffe6;
++  *((int *)&__m256_result[0]) = 0x4f7fffc0;
++  __m256_out = __lasx_xvffint_s_wu (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((int *)&__m256_result[7]) = 0x4b808080;
++  *((int *)&__m256_result[6]) = 0x4b808080;
++  *((int *)&__m256_result[5]) = 0x4f800000;
++  *((int *)&__m256_result[4]) = 0x4f7fffff;
++  *((int *)&__m256_result[3]) = 0x4b808080;
++  *((int *)&__m256_result[2]) = 0x4b808080;
++  *((int *)&__m256_result[1]) = 0x4f800000;
++  *((int *)&__m256_result[0]) = 0x4f800000;
++  __m256_out = __lasx_xvffint_s_wu (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvffint_s_wu (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvffint_s_wu (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x41000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x41000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x41000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x41000000;
++  __m256_out = __lasx_xvffint_s_wu (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000020;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x42800000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x42000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x42800000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x42000000;
++  __m256_out = __lasx_xvffint_s_wu (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvffint_s_wu (__m256i_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c
+new file mode 100644
+index 000000000..e310ff5ee
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvffinth.c
+@@ -0,0 +1,262 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0e2d5626ff75cdbc;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5db4b156e2002a78;
++  *((unsigned long *)&__m256i_op0[1]) = 0xeeffbeb03ba3e6b0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0c16e25eb28d27ea;
++  *((unsigned long *)&__m256d_result[3]) = 0x41ac5aac4c000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xc161464880000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xc1b1004150000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x41cdd1f358000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000006f0000007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000006f0000007f;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe161616161616161;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_op0[1]) = 0xe161616161616161;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256d_result[3]) = 0xc1be9e9e9f000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x41d8585858400000;
++  *((unsigned long *)&__m256d_result[1]) = 0xc1be9e9e9f000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x41d8585858400000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x41dfffc000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x41dfffdfffc00000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000007f3a40;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffb79fb74;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffb79fb74;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xc192181230000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xc192181230000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xbff0000000000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ffffffff00;
++  *((unsigned long *)&__m256d_result[3]) = 0x40efffe000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x40efffe000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x41dffc0000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x41dffc0000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256d_result[3]) = 0xc039000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xc039000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xc039000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xc039000000000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffinth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x41d6600000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x41d6600000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffintl_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffintl_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256d_result[3]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256d_result[2]) = 0xc1d75053f0000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256d_result[0]) = 0xc1d75053f0000000;
++  __m256d_out = __lasx_xvffintl_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000001f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000001f;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x403f000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x403f000000000000;
++  __m256d_out = __lasx_xvffintl_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvffintl_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00f7000000f70006;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00f7000000f70006;
++  *((unsigned long *)&__m256d_result[3]) = 0x416ee00000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x416ee000c0000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x416ee00000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x416ee000c0000000;
++  __m256d_out = __lasx_xvffintl_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff000000000080;
++  *((unsigned long *)&__m256d_result[3]) = 0x416fe00000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x4060000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x416fe00000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x4060000000000000;
++  __m256d_out = __lasx_xvffintl_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffc01fc01;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffc01fc01;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x41cfe01dde000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x41cfe01dde000000;
++  __m256d_out = __lasx_xvffintl_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c
+new file mode 100644
+index 000000000..4babf1638
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_d.c
+@@ -0,0 +1,429 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++/* { dg-timeout 500 } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrint_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x0);
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffefffefffefffd;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xfffefffefffefffd;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrint_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000008050501;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000008050501;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrint_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfrint_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrint_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_result[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_result[2]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_result[1]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_result[0]) = 0xfffffffffffffff8;
++  __m256d_out = __lasx_xvfrint_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrne_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000080008001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000080008001;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrne_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7c00000880008000;
++  __m256d_out = __lasx_xvfrintrne_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrne_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256d_op0[2]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256d_op0[0]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256d_result[2]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256d_result[0]) = 0x6040190d00000000;
++  __m256d_out = __lasx_xvfrintrne_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256d_op0[2]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256d_op0[0]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x4084800000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x4084800000000000;
++  __m256d_out = __lasx_xvfrintrne_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffff0001ffff0001;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffff0001ffff0001;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffff0001ffff0001;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffff0001ffff0001;
++  *((unsigned long *)&__m256d_result[3]) = 0xffff0001ffff0001;
++  *((unsigned long *)&__m256d_result[2]) = 0xffff0001ffff0001;
++  *((unsigned long *)&__m256d_result[1]) = 0xffff0001ffff0001;
++  *((unsigned long *)&__m256d_result[0]) = 0xffff0001ffff0001;
++  __m256d_out = __lasx_xvfrintrne_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x3fffbfff80000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00004000007f8000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x3fffbfff80000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00004000007f8000;
++  *((unsigned long *)&__m256d_result[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrne_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrp_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x3ff0000000000000;
++  __m256d_out = __lasx_xvfrintrp_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffefe00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xfffffefe00000000;
++  __m256d_out = __lasx_xvfrintrp_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000100da000100fd;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0001ffe20001fefd;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0001009a000100fd;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0001ff640001fefd;
++  *((unsigned long *)&__m256d_result[3]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x3ff0000000000000;
++  __m256d_out = __lasx_xvfrintrp_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfrintrp_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256d_op0[2]) = 0x01fc03fc01fc03fc;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256d_op0[0]) = 0x01fc03fc01fc03fc;
++  *((unsigned long *)&__m256d_result[3]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256d_result[2]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256d_result[0]) = 0x3ff0000000000000;
++  __m256d_out = __lasx_xvfrintrp_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0218ff78fc38fc38;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfc00000000000048;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0218ff78fc38fc38;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfc00000000000048;
++  *((unsigned long *)&__m256d_result[3]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xfc00000000000048;
++  *((unsigned long *)&__m256d_result[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xfc00000000000048;
++  __m256d_out = __lasx_xvfrintrp_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000;
++  __m256d_out = __lasx_xvfrintrp_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256d_result[3]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256d_result[2]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256d_result[1]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256d_result[0]) = 0xfffffff0fffffff0;
++  __m256d_out = __lasx_xvfrintrp_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrm_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x017e017e01dd61de;
++  *((unsigned long *)&__m256d_op0[2]) = 0x5d637d043bc4fc43;
++  *((unsigned long *)&__m256d_op0[1]) = 0x01dcc2dce31bc35d;
++  *((unsigned long *)&__m256d_op0[0]) = 0x5e041d245b85fc43;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x5d637d043bc4fc43;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x5e041d245b85fc43;
++  __m256d_out = __lasx_xvfrintrm_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256d_op0[1]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256d_result[3]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256d_result[2]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256d_result[1]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256d_result[0]) = 0x7c007c007c007c00;
++  __m256d_out = __lasx_xvfrintrm_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfrintrm_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrm_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrm_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5);
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrm_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfrintrz_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000800000098;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000040000ffca;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000800000098;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000040000ff79;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrz_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrz_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrintrz_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000781;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrz_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000000001ffe2000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x001fe020001fe020;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000000001ffe2000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x001fe020001fe020;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrz_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfrintrz_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
+new file mode 100644
+index 000000000..9f2fa6747
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrint_s.c
+@@ -0,0 +1,723 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++/* { dg-timeout 500 } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffff5f5c;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffff605a;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffff5f5c;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffff605a;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffff5f5c;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffff605a;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffff5f5c;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffff605a;
++  __m256_out = __lasx_xvfrint_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xc5c5c5c4;
++  *((int *)&__m256_op0[6]) = 0xc5c5c5c4;
++  *((int *)&__m256_op0[5]) = 0x45c5c5c5;
++  *((int *)&__m256_op0[4]) = 0x45c5c5c5;
++  *((int *)&__m256_op0[3]) = 0xc5c5c5c4;
++  *((int *)&__m256_op0[2]) = 0xc5c5c5c4;
++  *((int *)&__m256_op0[1]) = 0x45c5c5c5;
++  *((int *)&__m256_op0[0]) = 0x45c5c5c5;
++  *((int *)&__m256_result[7]) = 0xc5c5c800;
++  *((int *)&__m256_result[6]) = 0xc5c5c800;
++  *((int *)&__m256_result[5]) = 0x45c5c800;
++  *((int *)&__m256_result[4]) = 0x45c5c800;
++  *((int *)&__m256_result[3]) = 0xc5c5c800;
++  *((int *)&__m256_result[2]) = 0xc5c5c800;
++  *((int *)&__m256_result[1]) = 0x45c5c800;
++  *((int *)&__m256_result[0]) = 0x45c5c800;
++  __m256_out = __lasx_xvfrint_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfrint_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrint_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xffff6f20;
++  *((int *)&__m256_op0[5]) = 0x0000781e;
++  *((int *)&__m256_op0[4]) = 0x0000f221;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xffff6f20;
++  *((int *)&__m256_op0[1]) = 0x0000781e;
++  *((int *)&__m256_op0[0]) = 0x0000f221;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0xffff6f20;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0xffff6f20;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrint_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xffffb3b4;
++  *((int *)&__m256_op0[5]) = 0xfffffff5;
++  *((int *)&__m256_op0[4]) = 0xffff4738;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xffffb3b4;
++  *((int *)&__m256_op0[1]) = 0xfffffff5;
++  *((int *)&__m256_op0[0]) = 0xffff4738;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0xffffb3b4;
++  *((int *)&__m256_result[5]) = 0xfffffff5;
++  *((int *)&__m256_result[4]) = 0xffff4738;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0xffffb3b4;
++  *((int *)&__m256_result[1]) = 0xfffffff5;
++  *((int *)&__m256_result[0]) = 0xffff4738;
++  __m256_out = __lasx_xvfrint_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00ff0000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00ff0000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00ff0000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00ff0000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrint_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00003fea;
++  *((int *)&__m256_op0[6]) = 0x00013feb;
++  *((int *)&__m256_op0[5]) = 0x00003fe9;
++  *((int *)&__m256_op0[4]) = 0x00014022;
++  *((int *)&__m256_op0[3]) = 0x00003fea;
++  *((int *)&__m256_op0[2]) = 0x00013feb;
++  *((int *)&__m256_op0[1]) = 0x00003fe9;
++  *((int *)&__m256_op0[0]) = 0x00014022;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrint_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrne_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrne_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfrintrne_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x01010101;
++  *((int *)&__m256_op0[6]) = 0x01010101;
++  *((int *)&__m256_op0[5]) = 0x01010101;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x01010101;
++  *((int *)&__m256_op0[2]) = 0x01010101;
++  *((int *)&__m256_op0[1]) = 0x01010101;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrne_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrne_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrne_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfrintrne_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x01010101;
++  *((int *)&__m256_op0[6]) = 0x01010101;
++  *((int *)&__m256_op0[5]) = 0x01010101;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x01010101;
++  *((int *)&__m256_op0[2]) = 0x01010101;
++  *((int *)&__m256_op0[1]) = 0x01010101;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrne_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x55555555;
++  *((int *)&__m256_op0[6]) = 0x36aaaaac;
++  *((int *)&__m256_op0[5]) = 0x55555555;
++  *((int *)&__m256_op0[4]) = 0xaaaaaaac;
++  *((int *)&__m256_op0[3]) = 0x55555555;
++  *((int *)&__m256_op0[2]) = 0x36aaaaac;
++  *((int *)&__m256_op0[1]) = 0x55555555;
++  *((int *)&__m256_op0[0]) = 0xaaaaaaac;
++  *((int *)&__m256_result[7]) = 0x55555555;
++  *((int *)&__m256_result[6]) = 0x3f800000;
++  *((int *)&__m256_result[5]) = 0x55555555;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0x55555555;
++  *((int *)&__m256_result[2]) = 0x3f800000;
++  *((int *)&__m256_result[1]) = 0x55555555;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfrintrp_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrp_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffc741;
++  *((int *)&__m256_op0[6]) = 0x8a023680;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffff8845;
++  *((int *)&__m256_op0[2]) = 0xbb954b00;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffc741;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0xffff8845;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrp_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfrintrp_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00200101;
++  *((int *)&__m256_op0[6]) = 0x01610000;
++  *((int *)&__m256_op0[5]) = 0x00612000;
++  *((int *)&__m256_op0[4]) = 0x00610000;
++  *((int *)&__m256_op0[3]) = 0x00200101;
++  *((int *)&__m256_op0[2]) = 0x01610000;
++  *((int *)&__m256_op0[1]) = 0x00612000;
++  *((int *)&__m256_op0[0]) = 0x00610000;
++  *((int *)&__m256_result[7]) = 0x3f800000;
++  *((int *)&__m256_result[6]) = 0x3f800000;
++  *((int *)&__m256_result[5]) = 0x3f800000;
++  *((int *)&__m256_result[4]) = 0x3f800000;
++  *((int *)&__m256_result[3]) = 0x3f800000;
++  *((int *)&__m256_result[2]) = 0x3f800000;
++  *((int *)&__m256_result[1]) = 0x3f800000;
++  *((int *)&__m256_result[0]) = 0x3f800000;
++  __m256_out = __lasx_xvfrintrp_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xfefefefe;
++  *((int *)&__m256_op0[4]) = 0x01010101;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xfefefefe;
++  *((int *)&__m256_op0[0]) = 0x01010101;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xfefefefe;
++  *((int *)&__m256_result[4]) = 0x3f800000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xfefefefe;
++  *((int *)&__m256_result[0]) = 0x3f800000;
++  __m256_out = __lasx_xvfrintrp_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x1c1c1c1c;
++  *((int *)&__m256_op0[6]) = 0x1c1c1c1c;
++  *((int *)&__m256_op0[5]) = 0xfffffffe;
++  *((int *)&__m256_op0[4]) = 0xffffff00;
++  *((int *)&__m256_op0[3]) = 0x1c1c1c1c;
++  *((int *)&__m256_op0[2]) = 0x1c1c1c1c;
++  *((int *)&__m256_op0[1]) = 0xfffffffe;
++  *((int *)&__m256_op0[0]) = 0xffffff00;
++  *((int *)&__m256_result[7]) = 0x3f800000;
++  *((int *)&__m256_result[6]) = 0x3f800000;
++  *((int *)&__m256_result[5]) = 0xfffffffe;
++  *((int *)&__m256_result[4]) = 0xffffff00;
++  *((int *)&__m256_result[3]) = 0x3f800000;
++  *((int *)&__m256_result[2]) = 0x3f800000;
++  *((int *)&__m256_result[1]) = 0xfffffffe;
++  *((int *)&__m256_result[0]) = 0xffffff00;
++  __m256_out = __lasx_xvfrintrp_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000008;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00080000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrm_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrm_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x0000ffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x0000ffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfrintrm_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x5d20a0a1;
++  *((int *)&__m256_op0[6]) = 0x5d20a0a1;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x5d20a0a1;
++  *((int *)&__m256_op0[2]) = 0x5d20a0a1;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x5d20a0a1;
++  *((int *)&__m256_result[6]) = 0x5d20a0a1;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x5d20a0a1;
++  *((int *)&__m256_result[2]) = 0x5d20a0a1;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrm_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x001d001d;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x001d001d;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrm_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000033;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000033;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrm_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrm_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000300;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000303;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrz_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xfffffffe;
++  *((int *)&__m256_op0[5]) = 0xfffffffe;
++  *((int *)&__m256_op0[4]) = 0xfffffefc;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xfffffffe;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xfffffffe;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xfffffffe;
++  *((int *)&__m256_result[5]) = 0xfffffffe;
++  *((int *)&__m256_result[4]) = 0xfffffefc;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xfffffffe;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xfffffffe;
++  __m256_out = __lasx_xvfrintrz_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfrintrz_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x0001c4e8;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x0001c4e8;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfrintrz_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x80000000;
++  *((int *)&__m256_op0[6]) = 0x80000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x80000000;
++  *((int *)&__m256_op0[2]) = 0x80000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfrintrz_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfrintrz_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xf5fffc00;
++  *((int *)&__m256_op0[6]) = 0xfc000000;
++  *((int *)&__m256_op0[5]) = 0xf5fffc00;
++  *((int *)&__m256_op0[4]) = 0xfc000000;
++  *((int *)&__m256_op0[3]) = 0xf5fffc00;
++  *((int *)&__m256_op0[2]) = 0xfc000000;
++  *((int *)&__m256_op0[1]) = 0xf5fffc00;
++  *((int *)&__m256_op0[0]) = 0xfc000000;
++  *((int *)&__m256_result[7]) = 0xf5fffc00;
++  *((int *)&__m256_result[6]) = 0xfc000000;
++  *((int *)&__m256_result[5]) = 0xf5fffc00;
++  *((int *)&__m256_result[4]) = 0xfc000000;
++  *((int *)&__m256_result[3]) = 0xf5fffc00;
++  *((int *)&__m256_result[2]) = 0xfc000000;
++  *((int *)&__m256_result[1]) = 0xf5fffc00;
++  *((int *)&__m256_result[0]) = 0xfc000000;
++  __m256_out = __lasx_xvfrintrz_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256_result, __m256_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c
+new file mode 100644
+index 000000000..c75468d42
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-1.c
+@@ -0,0 +1,471 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffcb423a587053;
++  *((unsigned long *)&__m256d_op0[2]) = 0x6d46f43e71141b81;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffcb423a584528;
++  *((unsigned long *)&__m256d_op0[0]) = 0x9bdf36c8d78158a1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x386000003df80000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x386000003df80000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x555555553f800000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x43f0000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x43f0000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x43f0000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x43f0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffc03b1fc5e050;
++  *((unsigned long *)&__m256d_op0[2]) = 0x6a9e3fa2603a2000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffc03b1fc5e050;
++  *((unsigned long *)&__m256d_op0[0]) = 0x6a9e3fa2603a2000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x555555553f800000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000000001c9880;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000001c9880;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x1828f0e09bad7249;
++  *((unsigned long *)&__m256d_op0[2]) = 0x07ffc1b723953cec;
++  *((unsigned long *)&__m256d_op0[1]) = 0x61f2e9b333aab104;
++  *((unsigned long *)&__m256d_op0[0]) = 0x6bf742aa0d7856a0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00ffffff1e9e9e9e;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffff9e9eb09e;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00ffffff1e9e9e9e;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff9e9eb09e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_l_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c
+new file mode 100644
+index 000000000..ad72f7596
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-2.c
+@@ -0,0 +1,1565 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x0000ffff;
++  *((int *)&__m256_op0[6]) = 0xc0008001;
++  *((int *)&__m256_op0[5]) = 0x0000ffff;
++  *((int *)&__m256_op0[4]) = 0xc0008001;
++  *((int *)&__m256_op0[3]) = 0x0000ffff;
++  *((int *)&__m256_op0[2]) = 0xc0008001;
++  *((int *)&__m256_op0[1]) = 0x0000ffff;
++  *((int *)&__m256_op0[0]) = 0xc0008001;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffe;
++  __m256i_out = __lasx_xvftint_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x3f3f3f3c;
++  *((int *)&__m256_op0[5]) = 0xc6c6c6c6;
++  *((int *)&__m256_op0[4]) = 0x8787878a;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x3f3f3f3c;
++  *((int *)&__m256_op0[1]) = 0x8787878a;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff9c9d00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x1f0fdf7f;
++  *((int *)&__m256_op0[6]) = 0x3e3b31d4;
++  *((int *)&__m256_op0[5]) = 0x7ff80000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x1f0fdf7f;
++  *((int *)&__m256_op0[2]) = 0x3e3b31d4;
++  *((int *)&__m256_op0[1]) = 0x7ff80000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000200000003;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff00010002;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0080000200000003;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000ffff00010002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x55555555;
++  *((int *)&__m256_op0[5]) = 0x00000001;
++  *((int *)&__m256_op0[4]) = 0x00000004;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x55555555;
++  *((int *)&__m256_op0[1]) = 0x00000001;
++  *((int *)&__m256_op0[0]) = 0x00000004;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00ff00ffff0000ff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00ff00ffff0000ff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x7fe36364661af18f;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7fe363637fe36364;
++  *((unsigned long *)&__m256d_op0[1]) = 0x7fe36364661af18f;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7fe363637fe36364;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffff5;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffff5;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffff5;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffff5;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000007;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000020000000b;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000007;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000020000000a;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x000000000000000a;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000000000000000a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000505;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x40000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x40000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000;
++  __m256i_out = __lasx_xvftintrne_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256d_op1[2]) = 0x4df5b1a3ed5e02c1;
++  *((unsigned long *)&__m256d_op1[1]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256d_op1[0]) = 0x4df5b1a3ed5e02c1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000004;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000004040104;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffd1108199;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000714910f9;
++  *((unsigned long *)&__m256d_op1[3]) = 0x000000030000000c;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000001100000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000500000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000800000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffff7e;
++  *((int *)&__m256_op0[4]) = 0xffffff46;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffff7e;
++  *((int *)&__m256_op0[0]) = 0xffffff46;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffe5ffffffe5;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffe5ffffffe5;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffe5ffffffe5;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffe5ffffffe5;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffe5ffffffe5;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffe5ffffffe5;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffe5ffffffe5;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffe5ffffffe5;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000001000000010;
++  *((unsigned long *)&__m256d_op1[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000000007c8;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000000007c8;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000001fe01fe;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000ff0100;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000001fe01fe;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000ff0100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0fffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x0fffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x0fffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x0fffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x000000010000685e;
++  *((unsigned long *)&__m256d_op1[2]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256d_op1[1]) = 0x000000010000685e;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000800000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfd12fd12;
++  *((int *)&__m256_op0[6]) = 0xfd12fd12;
++  *((int *)&__m256_op0[5]) = 0xfd12fd12;
++  *((int *)&__m256_op0[4]) = 0xfd12fd12;
++  *((int *)&__m256_op0[3]) = 0xfd12fd12;
++  *((int *)&__m256_op0[2]) = 0xfd12fd12;
++  *((int *)&__m256_op0[1]) = 0xfd12fd12;
++  *((int *)&__m256_op0[0]) = 0xfd12fd12;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvftintrne_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m256d_op1[2]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m256d_op1[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrne_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffe4ffe6;
++  *((int *)&__m256_op0[6]) = 0xffe5ffe6;
++  *((int *)&__m256_op0[5]) = 0xffe4ffe6;
++  *((int *)&__m256_op0[4]) = 0xffe5ffe6;
++  *((int *)&__m256_op0[3]) = 0xffe4ffe6;
++  *((int *)&__m256_op0[2]) = 0xffe5ffe6;
++  *((int *)&__m256_op0[1]) = 0xffe4ffe6;
++  *((int *)&__m256_op0[0]) = 0xffe5ffe6;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000001;
++  *((int *)&__m256_op0[4]) = 0x00010102;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x80008000;
++  *((int *)&__m256_op0[6]) = 0x80008000;
++  *((int *)&__m256_op0[5]) = 0x80008000;
++  *((int *)&__m256_op0[4]) = 0x80008000;
++  *((int *)&__m256_op0[3]) = 0x80008000;
++  *((int *)&__m256_op0[2]) = 0x80008000;
++  *((int *)&__m256_op0[1]) = 0x80008000;
++  *((int *)&__m256_op0[0]) = 0x80008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x10000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x10000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00ff00ff;
++  *((int *)&__m256_op0[6]) = 0x00ff00ff;
++  *((int *)&__m256_op0[5]) = 0x00ff00ff;
++  *((int *)&__m256_op0[4]) = 0x00ff00ff;
++  *((int *)&__m256_op0[3]) = 0x00ff00ff;
++  *((int *)&__m256_op0[2]) = 0x00ff00ff;
++  *((int *)&__m256_op0[1]) = 0x00ff00ff;
++  *((int *)&__m256_op0[0]) = 0x00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvftintrp_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0010001000100010;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0010001000107878;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0010001000107878;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0040000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0040000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0040000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0040000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00003fea00013fec;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00003fe50001c013;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00003fea00013fec;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00003fe50001c013;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000180000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000180000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvftintrp_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffff000000010000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000095120000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xc9da000063f50000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xc7387fff6bbfffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xfffe000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x4001000100020000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfffefffe;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xfffefffe;
++  *((int *)&__m256_op0[2]) = 0xfffefffd;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0707feb6;
++  *((int *)&__m256_op0[6]) = 0x0707b7d0;
++  *((int *)&__m256_op0[5]) = 0x45baa7ef;
++  *((int *)&__m256_op0[4]) = 0x6a95a985;
++  *((int *)&__m256_op0[3]) = 0x0707feb6;
++  *((int *)&__m256_op0[2]) = 0x0707b7d0;
++  *((int *)&__m256_op0[1]) = 0x45baa7ef;
++  *((int *)&__m256_op0[0]) = 0x6a95a985;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000017547fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000017547fffffff;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[6]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[5]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[4]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[3]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[2]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[1]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[0]) = 0x6d6d6d6d;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256d_op0[1]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00000000c0000005;
++  *((unsigned long *)&__m256d_op1[2]) = 0x21f8c3c4c0000005;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000000c0000005;
++  *((unsigned long *)&__m256d_op1[0]) = 0x21f8c3c4c0000005;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00000000ffe36780;
++  *((unsigned long *)&__m256d_op1[2]) = 0x8000000100000001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffe36780;
++  *((unsigned long *)&__m256d_op1[0]) = 0x8000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xfff10000;
++  *((int *)&__m256_op0[4]) = 0xfff10000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xfff10000;
++  *((int *)&__m256_op0[0]) = 0xfff10000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xfdfcfda8;
++  *((int *)&__m256_op0[5]) = 0x0000e282;
++  *((int *)&__m256_op0[4]) = 0x1d20ffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xfdfcfda8;
++  *((int *)&__m256_op0[1]) = 0x0000e282;
++  *((int *)&__m256_op0[0]) = 0x1d20ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0080000000800000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0080000000800000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0080000000800000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0080000000800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256d_op0[2]) = 0xff00ff007f007f00;
++  *((unsigned long *)&__m256d_op0[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256d_op0[0]) = 0xff00ff007f007f00;
++  *((unsigned long *)&__m256d_op1[3]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256d_op1[2]) = 0xff00ff007f007f00;
++  *((unsigned long *)&__m256d_op1[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256d_op1[0]) = 0xff00ff007f007f00;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0002fffc;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffff0000fffd0003;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0002fffc;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffff0000fffd0003;
++  *((unsigned long *)&__m256d_op1[3]) = 0x003f020001400200;
++  *((unsigned long *)&__m256d_op1[2]) = 0x003f00ff003f00c4;
++  *((unsigned long *)&__m256d_op1[1]) = 0x003f020001400200;
++  *((unsigned long *)&__m256d_op1[0]) = 0x003f00ff003f00c4;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrm_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffff0ffff0000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffff0ffff0000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[2]) = 0x3ff1808001020101;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[0]) = 0x3ff1808001020101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x002e2100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x34000000fff00000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfff6e00000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x3380000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x363c0000fff3c000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x000000030000000c;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000001100000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000500000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000800000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5;
++  *((unsigned long *)&__m256d_op1[2]) = 0xa5a5a5a5a5a5a5ff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5;
++  *((unsigned long *)&__m256d_op1[0]) = 0xa5a5a5a5a5a5a5ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x55555555;
++  *((int *)&__m256_op0[6]) = 0x55555555;
++  *((int *)&__m256_op0[5]) = 0x5d5d5d5d;
++  *((int *)&__m256_op0[4]) = 0x5d555d55;
++  *((int *)&__m256_op0[3]) = 0x55555555;
++  *((int *)&__m256_op0[2]) = 0x55555555;
++  *((int *)&__m256_op0[1]) = 0x5d5ca2a3;
++  *((int *)&__m256_op0[0]) = 0x5d54aaab;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvftintrz_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0b085bfc00000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0b004bc000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0b085bfc00000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0b004bc000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xffeeffaf;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000011;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xffeeffaf;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000011;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00ff00ff;
++  *((int *)&__m256_op0[6]) = 0x00ff00ff;
++  *((int *)&__m256_op0[5]) = 0x00ff00ff;
++  *((int *)&__m256_op0[4]) = 0x00ff00ff;
++  *((int *)&__m256_op0[3]) = 0x00ff00ff;
++  *((int *)&__m256_op0[2]) = 0x00ff00ff;
++  *((int *)&__m256_op0[1]) = 0x00ff00ff;
++  *((int *)&__m256_op0[0]) = 0x00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x001d001d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256d_op0[1]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x81fa28e4;
++  *((int *)&__m256_op0[6]) = 0x81fa28e4;
++  *((int *)&__m256_op0[5]) = 0x81fa28e4;
++  *((int *)&__m256_op0[4]) = 0x81fa28e4;
++  *((int *)&__m256_op0[3]) = 0x81fa28e4;
++  *((int *)&__m256_op0[2]) = 0x81fa28e4;
++  *((int *)&__m256_op0[1]) = 0x81fa28e4;
++  *((int *)&__m256_op0[0]) = 0x81fa28e4;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_w_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c
+new file mode 100644
+index 000000000..19db4e192
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftint-3.c
+@@ -0,0 +1,511 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfffefffe;
++  *((int *)&__m256_op0[6]) = 0xfffefffe;
++  *((int *)&__m256_op0[5]) = 0xfffefffe;
++  *((int *)&__m256_op0[4]) = 0xfffefffe;
++  *((int *)&__m256_op0[3]) = 0xfffefffe;
++  *((int *)&__m256_op0[2]) = 0xfffefffe;
++  *((int *)&__m256_op0[1]) = 0xfffefffe;
++  *((int *)&__m256_op0[0]) = 0xfffefffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000200;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000200;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000200;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000200;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfffffff1;
++  *((int *)&__m256_op0[6]) = 0xfffffff1;
++  *((int *)&__m256_op0[5]) = 0xfffffff1;
++  *((int *)&__m256_op0[4]) = 0xfffffff1;
++  *((int *)&__m256_op0[3]) = 0xfffffff1;
++  *((int *)&__m256_op0[2]) = 0xfffffff1;
++  *((int *)&__m256_op0[1]) = 0xfffffff1;
++  *((int *)&__m256_op0[0]) = 0xfffffff1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x43ef8787;
++  *((int *)&__m256_op0[4]) = 0x8000ffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x43ef8787;
++  *((int *)&__m256_op0[0]) = 0x8000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000001df00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000001df00000000;
++  __m256i_out = __lasx_xvftint_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0x00030005;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0x00030005;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x7ff80000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x7ff80000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x7ff80000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x7ff80000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000002;
++  *((int *)&__m256_op0[6]) = 0x00000002;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000002;
++  *((int *)&__m256_op0[2]) = 0x00000002;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x7ff00000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x7ff00000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x7ff00000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x7ff00000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00016e00;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00016e00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_wu_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftint_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x38a966b301f41ffd;
++  *((unsigned long *)&__m256d_op0[2]) = 0x5f6108ee13ff0000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xf41a56e8d10201f6;
++  *((unsigned long *)&__m256d_op0[0]) = 0x683b8b34f1020001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0045b8ae81bce1d8;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0045b8ae81bce1d8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256d_op0[2]) = 0xc2c2c2c2c2c29cc0;
++  *((unsigned long *)&__m256d_op0[1]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256d_op0[0]) = 0xc2c2c2c2c2c29cc0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000000007a00f8;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00ff00ff01640092;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000007a00f8;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00ff00ff01640092;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000000007fff80fe;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000007fff80fe;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff80007ffe;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000ff007fff80fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000781;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000408080c111414;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000408080c111414;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000408080c111414;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000008e8c000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000000fffc000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000008e8c000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000000fffc000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrz_lu_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c
+new file mode 100644
+index 000000000..b0fdf7e0b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvftintl.c
+@@ -0,0 +1,1580 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0xc58a0a0a;
++  *((int *)&__m256_op0[6]) = 0x07070706;
++  *((int *)&__m256_op0[5]) = 0x006b60e4;
++  *((int *)&__m256_op0[4]) = 0x180b0023;
++  *((int *)&__m256_op0[3]) = 0x1b39153f;
++  *((int *)&__m256_op0[2]) = 0x334b966a;
++  *((int *)&__m256_op0[1]) = 0xf1d75d79;
++  *((int *)&__m256_op0[0]) = 0xefcac002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvftintl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x40404040;
++  *((int *)&__m256_op0[6]) = 0x40404040;
++  *((int *)&__m256_op0[5]) = 0x40404040;
++  *((int *)&__m256_op0[4]) = 0x40404040;
++  *((int *)&__m256_op0[3]) = 0x40404040;
++  *((int *)&__m256_op0[2]) = 0x40404040;
++  *((int *)&__m256_op0[1]) = 0x40404040;
++  *((int *)&__m256_op0[0]) = 0x40404040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003;
++  __m256i_out = __lasx_xvftintl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00080000;
++  *((int *)&__m256_op0[4]) = 0x00000010;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00080000;
++  *((int *)&__m256_op0[0]) = 0x00000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x40f69fe6;
++  *((int *)&__m256_op0[6]) = 0x3c26f4f5;
++  *((int *)&__m256_op0[5]) = 0x7ff7ffff;
++  *((int *)&__m256_op0[4]) = 0x00000007;
++  *((int *)&__m256_op0[3]) = 0x40f69fe6;
++  *((int *)&__m256_op0[2]) = 0x3c26f4f5;
++  *((int *)&__m256_op0[1]) = 0x7ff7ffff;
++  *((int *)&__m256_op0[0]) = 0x00000007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftinth_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00060000;
++  *((int *)&__m256_op0[6]) = 0x00040000;
++  *((int *)&__m256_op0[5]) = 0x00020000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00060000;
++  *((int *)&__m256_op0[2]) = 0x00040000;
++  *((int *)&__m256_op0[1]) = 0x00020000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftinth_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftinth_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftinth_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffff0000;
++  *((int *)&__m256_op0[4]) = 0xffff0000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffff0000;
++  *((int *)&__m256_op0[0]) = 0xffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftinth_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x55550000;
++  *((int *)&__m256_op0[6]) = 0x55550000;
++  *((int *)&__m256_op0[5]) = 0x55550000;
++  *((int *)&__m256_op0[4]) = 0x55550000;
++  *((int *)&__m256_op0[3]) = 0x55550000;
++  *((int *)&__m256_op0[2]) = 0x55550000;
++  *((int *)&__m256_op0[1]) = 0x55550000;
++  *((int *)&__m256_op0[0]) = 0x55550000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000d5000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000d5000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000d5000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000d5000000000;
++  __m256i_out = __lasx_xvftinth_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x007f8080;
++  *((int *)&__m256_op0[6]) = 0x007f007f;
++  *((int *)&__m256_op0[5]) = 0x007f8080;
++  *((int *)&__m256_op0[4]) = 0x007f007f;
++  *((int *)&__m256_op0[3]) = 0x007f8080;
++  *((int *)&__m256_op0[2]) = 0x007f007f;
++  *((int *)&__m256_op0[1]) = 0x007f8080;
++  *((int *)&__m256_op0[0]) = 0x007f007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftinth_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftinth_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x08e8c000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0fffc000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x08e8c000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0fffc000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftinth_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftinth_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000002;
++  *((int *)&__m256_op0[4]) = 0x00000008;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000002;
++  *((int *)&__m256_op0[0]) = 0x00000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x7f1d7f7f;
++  *((int *)&__m256_op0[6]) = 0x7f1d7f3b;
++  *((int *)&__m256_op0[5]) = 0x02020102;
++  *((int *)&__m256_op0[4]) = 0x02020102;
++  *((int *)&__m256_op0[3]) = 0x7f1d7f7f;
++  *((int *)&__m256_op0[2]) = 0x7f1d7f3b;
++  *((int *)&__m256_op0[1]) = 0x02020102;
++  *((int *)&__m256_op0[0]) = 0x02020102;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrnel_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000102;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0x39ffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0x39ffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x80000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x80000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x80000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x80000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x000055ff;
++  *((int *)&__m256_op0[6]) = 0x01f90ab5;
++  *((int *)&__m256_op0[5]) = 0xaa95eaff;
++  *((int *)&__m256_op0[4]) = 0xfec6e01f;
++  *((int *)&__m256_op0[3]) = 0x000055ff;
++  *((int *)&__m256_op0[2]) = 0x01f90ab5;
++  *((int *)&__m256_op0[1]) = 0xaa95eaff;
++  *((int *)&__m256_op0[0]) = 0xfec6e01f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfffeb683;
++  *((int *)&__m256_op0[6]) = 0x9ffffd80;
++  *((int *)&__m256_op0[5]) = 0xfffe97c0;
++  *((int *)&__m256_op0[4]) = 0x20010001;
++  *((int *)&__m256_op0[3]) = 0xfffeb683;
++  *((int *)&__m256_op0[2]) = 0x9ffffd80;
++  *((int *)&__m256_op0[1]) = 0xfffe97c0;
++  *((int *)&__m256_op0[0]) = 0x20010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvftintrpl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x000000ff;
++  *((int *)&__m256_op0[6]) = 0x000000f8;
++  *((int *)&__m256_op0[5]) = 0xbc8ff0ff;
++  *((int *)&__m256_op0[4]) = 0xffffcff8;
++  *((int *)&__m256_op0[3]) = 0x000000ff;
++  *((int *)&__m256_op0[2]) = 0x000000f8;
++  *((int *)&__m256_op0[1]) = 0xbc8ff0ff;
++  *((int *)&__m256_op0[0]) = 0xffffcff8;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000001;
++  *((int *)&__m256_op0[6]) = 0x00000001;
++  *((int *)&__m256_op0[5]) = 0x00000001;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x00000001;
++  *((int *)&__m256_op0[2]) = 0x00000001;
++  *((int *)&__m256_op0[1]) = 0x00000001;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x7fe37fe3;
++  *((int *)&__m256_op0[6]) = 0x001d001d;
++  *((int *)&__m256_op0[5]) = 0x7fff7fff;
++  *((int *)&__m256_op0[4]) = 0x7fff0000;
++  *((int *)&__m256_op0[3]) = 0x7fe37fe3;
++  *((int *)&__m256_op0[2]) = 0x001d001d;
++  *((int *)&__m256_op0[1]) = 0x7fff7fff;
++  *((int *)&__m256_op0[0]) = 0x7fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000010;
++  *((int *)&__m256_op0[6]) = 0x00000010;
++  *((int *)&__m256_op0[5]) = 0x00000010;
++  *((int *)&__m256_op0[4]) = 0x00000010;
++  *((int *)&__m256_op0[3]) = 0x00000010;
++  *((int *)&__m256_op0[2]) = 0x00000010;
++  *((int *)&__m256_op0[1]) = 0x00000010;
++  *((int *)&__m256_op0[0]) = 0x00000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrml_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x8b141414;
++  *((int *)&__m256_op0[4]) = 0x0e0e0e0e;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x36722a7e;
++  *((int *)&__m256_op0[0]) = 0x66972cd6;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x6a9e3f9a;
++  *((int *)&__m256_op0[4]) = 0x603a2001;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x6a9e3f9a;
++  *((int *)&__m256_op0[0]) = 0x603a2001;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0000fafe;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000fafe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzl_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00fffefe;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xfffffffc;
++  *((int *)&__m256_op0[4]) = 0x5556aaa8;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xfffffffc;
++  *((int *)&__m256_op0[0]) = 0x5556aaa8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffcc80;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x7dfdff4b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x002a5429;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x002a5429;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x77777777;
++  *((int *)&__m256_op0[6]) = 0xf7777777;
++  *((int *)&__m256_op0[5]) = 0xf7777777;
++  *((int *)&__m256_op0[4]) = 0x77777777;
++  *((int *)&__m256_op0[3]) = 0x77777777;
++  *((int *)&__m256_op0[2]) = 0xf7777777;
++  *((int *)&__m256_op0[1]) = 0xf7777777;
++  *((int *)&__m256_op0[0]) = 0x77777777;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000009;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000009;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000009;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x010c7fbc;
++  *((int *)&__m256_op0[6]) = 0x7e1c7e1c;
++  *((int *)&__m256_op0[5]) = 0xfe000000;
++  *((int *)&__m256_op0[4]) = 0x00000024;
++  *((int *)&__m256_op0[3]) = 0x010c7fbc;
++  *((int *)&__m256_op0[2]) = 0x7e1c7e1c;
++  *((int *)&__m256_op0[1]) = 0xfe000000;
++  *((int *)&__m256_op0[0]) = 0x00000024;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfffffe20;
++  *((int *)&__m256_op0[6]) = 0x001dfe1f;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xfffffe20;
++  *((int *)&__m256_op0[2]) = 0x001dfe1f;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffe1;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffe1;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffe1;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffe1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000040;
++  *((int *)&__m256_op0[6]) = 0x00000020;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000040;
++  *((int *)&__m256_op0[2]) = 0x00000020;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrneh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrph_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xfefefeff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xff295329;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xfefefeff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xff295329;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvftintrph_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xff00ffff;
++  *((int *)&__m256_op0[6]) = 0xff00ffff;
++  *((int *)&__m256_op0[5]) = 0xff00ffff;
++  *((int *)&__m256_op0[4]) = 0xff00ffff;
++  *((int *)&__m256_op0[3]) = 0xff00ffff;
++  *((int *)&__m256_op0[2]) = 0xff00ffff;
++  *((int *)&__m256_op0[1]) = 0xff00ffff;
++  *((int *)&__m256_op0[0]) = 0xff00ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvftintrph_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrph_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x7fefffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x7fefffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrph_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrph_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrph_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrph_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrph_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x02020102;
++  *((int *)&__m256_op0[6]) = 0x02020102;
++  *((int *)&__m256_op0[5]) = 0x02020102;
++  *((int *)&__m256_op0[4]) = 0x02020102;
++  *((int *)&__m256_op0[3]) = 0x02020102;
++  *((int *)&__m256_op0[2]) = 0x02020102;
++  *((int *)&__m256_op0[1]) = 0x02020102;
++  *((int *)&__m256_op0[0]) = 0x02020102;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvftintrph_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000001;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000001;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvftintrph_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x515f93f0;
++  *((int *)&__m256_op0[6]) = 0x23600fb9;
++  *((int *)&__m256_op0[5]) = 0x948b39e0;
++  *((int *)&__m256_op0[4]) = 0xb7405f6f;
++  *((int *)&__m256_op0[3]) = 0x48ef0878;
++  *((int *)&__m256_op0[2]) = 0x00007c83;
++  *((int *)&__m256_op0[1]) = 0x78af877c;
++  *((int *)&__m256_op0[0]) = 0x7d7f86f9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000df93f0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000077843;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x17171717;
++  *((int *)&__m256_op0[6]) = 0x17171717;
++  *((int *)&__m256_op0[5]) = 0x000607f7;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x17171717;
++  *((int *)&__m256_op0[2]) = 0x17171717;
++  *((int *)&__m256_op0[1]) = 0x000607f7;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00ff00ff;
++  *((int *)&__m256_op0[6]) = 0x00ff00ff;
++  *((int *)&__m256_op0[5]) = 0x00ff00ff;
++  *((int *)&__m256_op0[4]) = 0x017e01fe;
++  *((int *)&__m256_op0[3]) = 0x017e00ff;
++  *((int *)&__m256_op0[2]) = 0x017e00ff;
++  *((int *)&__m256_op0[1]) = 0x00ff00ff;
++  *((int *)&__m256_op0[0]) = 0x017e01fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfefefefe;
++  *((int *)&__m256_op0[6]) = 0xfefefefe;
++  *((int *)&__m256_op0[5]) = 0xfe8bfe0e;
++  *((int *)&__m256_op0[4]) = 0xfe8bfe12;
++  *((int *)&__m256_op0[3]) = 0xfefefefe;
++  *((int *)&__m256_op0[2]) = 0xfefefefe;
++  *((int *)&__m256_op0[1]) = 0xfe8bfe0e;
++  *((int *)&__m256_op0[0]) = 0xfe8bfe12;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x71717171;
++  *((int *)&__m256_op0[6]) = 0x71010101;
++  *((int *)&__m256_op0[5]) = 0x8e8e8e8e;
++  *((int *)&__m256_op0[4]) = 0x8f00ffff;
++  *((int *)&__m256_op0[3]) = 0x71717171;
++  *((int *)&__m256_op0[2]) = 0x71010101;
++  *((int *)&__m256_op0[1]) = 0x8e8e8e8e;
++  *((int *)&__m256_op0[0]) = 0x8f00ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00060000;
++  *((int *)&__m256_op0[6]) = 0x00040000;
++  *((int *)&__m256_op0[5]) = 0x00020000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00060000;
++  *((int *)&__m256_op0[2]) = 0x00040000;
++  *((int *)&__m256_op0[1]) = 0x00020000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xff1cff1c;
++  *((int *)&__m256_op0[6]) = 0xff1cff1c;
++  *((int *)&__m256_op0[5]) = 0xff1cff1c;
++  *((int *)&__m256_op0[4]) = 0xff1cff1c;
++  *((int *)&__m256_op0[3]) = 0xff1cff1c;
++  *((int *)&__m256_op0[2]) = 0xff1cff1c;
++  *((int *)&__m256_op0[1]) = 0xff1cff1c;
++  *((int *)&__m256_op0[0]) = 0xff1cff1c;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x000fffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x000fffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrmh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00002262;
++  *((int *)&__m256_op0[6]) = 0x00005111;
++  *((int *)&__m256_op0[5]) = 0x0000165e;
++  *((int *)&__m256_op0[4]) = 0x0000480d;
++  *((int *)&__m256_op0[3]) = 0x00002262;
++  *((int *)&__m256_op0[2]) = 0x00005111;
++  *((int *)&__m256_op0[1]) = 0x0000165e;
++  *((int *)&__m256_op0[0]) = 0x0000480d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00040004;
++  *((int *)&__m256_op0[6]) = 0x00040004;
++  *((int *)&__m256_op0[5]) = 0x00040005;
++  *((int *)&__m256_op0[4]) = 0x00040005;
++  *((int *)&__m256_op0[3]) = 0x00040004;
++  *((int *)&__m256_op0[2]) = 0x00040004;
++  *((int *)&__m256_op0[1]) = 0x00040005;
++  *((int *)&__m256_op0[0]) = 0x00040005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvftintrzh_l_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-floating-point-op.patch b/LoongArch-Add-tests-for-ASX-vector-floating-point-op.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9232f2370c318b315e366719f6b795c820ba9ba2
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-floating-point-op.patch
@@ -0,0 +1,5614 @@
+From 9a9935e736a9289e0a1c0a77f4110c206ce36bd2 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 16:03:17 +0800
+Subject: [PATCH 111/124] LoongArch: Add tests for ASX vector floating-point
+ operation instruction.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvfadd_d.c     | 545 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvfadd_s.c     | 911 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvfclass_d.c   | 152 +++
+ .../loongarch/vector/lasx/lasx-xvfclass_s.c   |  95 ++
+ .../loongarch/vector/lasx/lasx-xvflogb_d.c    |  86 ++
+ .../loongarch/vector/lasx/lasx-xvflogb_s.c    | 115 +++
+ .../loongarch/vector/lasx/lasx-xvfmadd_d.c    | 382 ++++++++
+ .../loongarch/vector/lasx/lasx-xvfmadd_s.c    | 720 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvfmax_d.c     | 230 +++++
+ .../loongarch/vector/lasx/lasx-xvfmax_s.c     | 560 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvfmaxa_d.c    | 230 +++++
+ .../loongarch/vector/lasx/lasx-xvfmaxa_s.c    | 506 ++++++++++
+ .../loongarch/vector/lasx/lasx-xvfsqrt_d.c    | 482 +++++++++
+ .../loongarch/vector/lasx/lasx-xvfsqrt_s.c    | 457 +++++++++
+ 14 files changed, 5471 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c
+new file mode 100644
+index 000000000..657a19e58
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_d.c
+@@ -0,0 +1,545 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x7ffffffffffff7ff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffe06df0d7;
++  *((unsigned long *)&__m256d_op1[1]) = 0x7ffffffffffff7ff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffbe8b470f;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ffffffffffff7ff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ffffffffffff7ff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffbe8b470f;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x41d6600000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x41d6600000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0x41d6600000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0x41d6600000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7fffffffffffffff;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256d_result[2]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256d_result[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256d_result[0]) = 0x00007fff00007fff;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x000f000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000f000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256d_result[2]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256d_result[0]) = 0x7fffffffa2beb040;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000001c000000134;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000001c000000134;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x000001c000000134;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x000001c000000134;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000038000000268;
++  *((unsigned long *)&__m256d_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000038000000268;
++  *((unsigned long *)&__m256d_result[0]) = 0x7fff7fff7fff7fff;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000001010100;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000405;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000001010100;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000405;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000001010100;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000405;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000001010100;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000405;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000040;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256d_result[3]) = 0x00000000ff890000;
++  *((unsigned long *)&__m256d_result[2]) = 0x00000000ff790000;
++  *((unsigned long *)&__m256d_result[1]) = 0x00000000ff890000;
++  *((unsigned long *)&__m256d_result[0]) = 0x00000000ff790000;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000000000000006d;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000000010006d;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000000000000006d;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000000010006d;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000080040;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000080040;
++  *((unsigned long *)&__m256d_result[3]) = 0x00000000000000ad;
++  *((unsigned long *)&__m256d_result[2]) = 0x00000000001800ad;
++  *((unsigned long *)&__m256d_result[1]) = 0x00000000000000ad;
++  *((unsigned long *)&__m256d_result[0]) = 0x00000000001800ad;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x2020000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x2020000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7fffffffffffffff;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff8000;
++  __m256d_out = __lasx_xvfadd_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000400000001;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000400000001;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000010100000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000010100000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00008000003f0000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00390015003529c1;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00008000003f0000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00390015003529c1;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000;
++  __m256d_out = __lasx_xvfmul_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00007ffe81fdfe03;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x80007ffe81fdfe03;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xc1be9e9e9f000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x41d8585858400000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xc1be9e9e9f000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x41d8585858400000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xff00d5007f00ffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xff00d5007f00ffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256d_result[3]) = 0x7f00d5007f00ffff;
++  *((unsigned long *)&__m256d_result[2]) = 0x7f00ffffff00ffff;
++  *((unsigned long *)&__m256d_result[1]) = 0x7f00d5007f00ffff;
++  *((unsigned long *)&__m256d_result[0]) = 0x7f00ffffff00ffff;
++  __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000002;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000002;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffff00000002;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffff00000002;
++  __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_result[2]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_result[0]) = 0x00ff00fe00ff00fe;
++  __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfsub_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000005536aaaaac;
++  *((unsigned long *)&__m256d_op0[2]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000005536aaaaac;
++  *((unsigned long *)&__m256d_op0[0]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0002555400000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0002555400000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x3f2c678e38d1104c;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x3f2c678e38d1104c;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffe367cc82f8989a;
++  *((unsigned long *)&__m256d_op0[2]) = 0x4f90000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffc3aaa8d58f43c8;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000;
++  __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0010000000100000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0010000000100000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0010000000100000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0010000000100000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x41cc5bb8a95fd1eb;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x41cc5bb8a95fd1eb;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000;
++  __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000;
++  __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000;
++  __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000;
++  __m256d_out = __lasx_xvfdiv_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c
+new file mode 100644
+index 000000000..4002c4074
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfadd_s.c
+@@ -0,0 +1,911 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0x00000001;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000002;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0x00000001;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000002;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0x00000001;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000002;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0x00000001;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000002;
++  __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x40b2bf4d;
++  *((int *)&__m256_op0[6]) = 0x30313031;
++  *((int *)&__m256_op0[5]) = 0x50005000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x40b2bf4d;
++  *((int *)&__m256_op0[2]) = 0x30313031;
++  *((int *)&__m256_op0[1]) = 0x50005000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x22be22be;
++  *((int *)&__m256_op1[5]) = 0x7fff7fff;
++  *((int *)&__m256_op1[4]) = 0xa2bea2be;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x22be22be;
++  *((int *)&__m256_op1[1]) = 0x7fff7fff;
++  *((int *)&__m256_op1[0]) = 0xa2bea2be;
++  *((int *)&__m256_result[7]) = 0x40b2bf4d;
++  *((int *)&__m256_result[6]) = 0x30313031;
++  *((int *)&__m256_result[5]) = 0x7fff7fff;
++  *((int *)&__m256_result[4]) = 0xa2bea2be;
++  *((int *)&__m256_result[3]) = 0x40b2bf4d;
++  *((int *)&__m256_result[2]) = 0x30313031;
++  *((int *)&__m256_result[1]) = 0x7fff7fff;
++  *((int *)&__m256_result[0]) = 0xa2bea2be;
++  __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00ff0000;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00ff0000;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00ff0000;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00ff0000;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0000008c;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000008c;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x0000008c;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x0000008c;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000118;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000118;
++  __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffff8000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffff8000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffff8000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffff8000;
++  __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffff0101;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffff0101;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xffff0101;
++  *((int *)&__m256_result[4]) = 0x00000001;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xffff0101;
++  *((int *)&__m256_result[0]) = 0x00000001;
++  __m256_out = __lasx_xvfadd_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x10101011;
++  *((int *)&__m256_op1[4]) = 0x10101011;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x11111112;
++  *((int *)&__m256_op1[0]) = 0x11111112;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00060000;
++  *((int *)&__m256_op0[6]) = 0x00040000;
++  *((int *)&__m256_op0[5]) = 0x00020000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00060000;
++  *((int *)&__m256_op0[2]) = 0x00040000;
++  *((int *)&__m256_op0[1]) = 0x00020000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00060000;
++  *((int *)&__m256_op1[6]) = 0x00040000;
++  *((int *)&__m256_op1[5]) = 0x00020000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00060000;
++  *((int *)&__m256_op1[2]) = 0x00040000;
++  *((int *)&__m256_op1[1]) = 0x00020000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x000000ff;
++  *((int *)&__m256_op0[4]) = 0x000000ff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x000000ff;
++  *((int *)&__m256_op0[0]) = 0x000000ff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000101;
++  *((int *)&__m256_op1[4]) = 0x00000101;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000101;
++  *((int *)&__m256_op1[0]) = 0x00000101;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmul_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xffff001f;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x007fe268;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xffff001f;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x007fe268;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0xffff001f;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x007fe268;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0xffff001f;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x007fe268;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0xffff001f;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0xffff001f;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x7f800000;
++  *((int *)&__m256_op1[6]) = 0x7f800000;
++  *((int *)&__m256_op1[5]) = 0x7f800000;
++  *((int *)&__m256_op1[4]) = 0x7f800000;
++  *((int *)&__m256_op1[3]) = 0x7f800000;
++  *((int *)&__m256_op1[2]) = 0x7f800000;
++  *((int *)&__m256_op1[1]) = 0x7f800000;
++  *((int *)&__m256_op1[0]) = 0x7f800000;
++  *((int *)&__m256_result[7]) = 0xff800000;
++  *((int *)&__m256_result[6]) = 0xff800000;
++  *((int *)&__m256_result[5]) = 0xff800000;
++  *((int *)&__m256_result[4]) = 0xff800000;
++  *((int *)&__m256_result[3]) = 0xff800000;
++  *((int *)&__m256_result[2]) = 0xff800000;
++  *((int *)&__m256_result[1]) = 0xff800000;
++  *((int *)&__m256_result[0]) = 0xff800000;
++  __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x02a54290;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x02a54290;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x02a54290;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x0154dc84;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x02a54290;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000089;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x82a54290;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x028aa700;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x82a54290;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x02a54287;
++  __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00004200;
++  *((int *)&__m256_op0[6]) = 0x80000000;
++  *((int *)&__m256_op0[5]) = 0x5fff5fff;
++  *((int *)&__m256_op0[4]) = 0x607f0000;
++  *((int *)&__m256_op0[3]) = 0x00004200;
++  *((int *)&__m256_op0[2]) = 0x80000000;
++  *((int *)&__m256_op0[1]) = 0x5fff5fff;
++  *((int *)&__m256_op0[0]) = 0x607f0000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00004200;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0x5fff5fff;
++  *((int *)&__m256_result[4]) = 0x607f0000;
++  *((int *)&__m256_result[3]) = 0x00004200;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0x5fff5fff;
++  *((int *)&__m256_result[0]) = 0x607f0000;
++  __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00800080;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000202;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00800080;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000202;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00800080;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000202;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00800080;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000202;
++  __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xfffefffe;
++  *((int *)&__m256_op0[6]) = 0xfffefffe;
++  *((int *)&__m256_op0[5]) = 0xfffefffe;
++  *((int *)&__m256_op0[4]) = 0xfffefffe;
++  *((int *)&__m256_op0[3]) = 0xfffefffe;
++  *((int *)&__m256_op0[2]) = 0xfffefffe;
++  *((int *)&__m256_op0[1]) = 0xfffefffe;
++  *((int *)&__m256_op0[0]) = 0xfffefffe;
++  *((int *)&__m256_op1[7]) = 0x000023a3;
++  *((int *)&__m256_op1[6]) = 0x00003fff;
++  *((int *)&__m256_op1[5]) = 0x000023a3;
++  *((int *)&__m256_op1[4]) = 0x00003fef;
++  *((int *)&__m256_op1[3]) = 0x000023a3;
++  *((int *)&__m256_op1[2]) = 0x00003fff;
++  *((int *)&__m256_op1[1]) = 0x000023a3;
++  *((int *)&__m256_op1[0]) = 0x00003fef;
++  *((int *)&__m256_result[7]) = 0xfffefffe;
++  *((int *)&__m256_result[6]) = 0xfffefffe;
++  *((int *)&__m256_result[5]) = 0xfffefffe;
++  *((int *)&__m256_result[4]) = 0xfffefffe;
++  *((int *)&__m256_result[3]) = 0xfffefffe;
++  *((int *)&__m256_result[2]) = 0xfffefffe;
++  *((int *)&__m256_result[1]) = 0xfffefffe;
++  *((int *)&__m256_result[0]) = 0xfffefffe;
++  __m256_out = __lasx_xvfsub_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x002a542a;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x002a542a;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000001;
++  *((int *)&__m256_op0[6]) = 0x00000001;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000001;
++  *((int *)&__m256_op0[2]) = 0x00000001;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7fc00000;
++  *((int *)&__m256_result[4]) = 0x7fc00000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7fc00000;
++  *((int *)&__m256_result[0]) = 0x7fc00000;
++  __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00fe00fe;
++  *((int *)&__m256_op0[6]) = 0x00fe00fe;
++  *((int *)&__m256_op0[5]) = 0x00fe00fe;
++  *((int *)&__m256_op0[4]) = 0x00fe00fe;
++  *((int *)&__m256_op0[3]) = 0x00fe00fe;
++  *((int *)&__m256_op0[2]) = 0x00fe00fe;
++  *((int *)&__m256_op0[1]) = 0x00fe00fe;
++  *((int *)&__m256_op0[0]) = 0x00fe00fe;
++  *((int *)&__m256_op1[7]) = 0x00fe00fe;
++  *((int *)&__m256_op1[6]) = 0x00fe00fe;
++  *((int *)&__m256_op1[5]) = 0x00fe00fe;
++  *((int *)&__m256_op1[4]) = 0x00fe00fe;
++  *((int *)&__m256_op1[3]) = 0x00fe00fe;
++  *((int *)&__m256_op1[2]) = 0x00fe00fe;
++  *((int *)&__m256_op1[1]) = 0x00fe00fe;
++  *((int *)&__m256_op1[0]) = 0x00fe00fe;
++  *((int *)&__m256_result[7]) = 0x3f800000;
++  *((int *)&__m256_result[6]) = 0x3f800000;
++  *((int *)&__m256_result[5]) = 0x3f800000;
++  *((int *)&__m256_result[4]) = 0x3f800000;
++  *((int *)&__m256_result[3]) = 0x3f800000;
++  *((int *)&__m256_result[2]) = 0x3f800000;
++  *((int *)&__m256_result[1]) = 0x3f800000;
++  *((int *)&__m256_result[0]) = 0x3f800000;
++  __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7fc00000;
++  *((int *)&__m256_result[6]) = 0x7fc00000;
++  *((int *)&__m256_result[5]) = 0x7fc00000;
++  *((int *)&__m256_result[4]) = 0x7fc00000;
++  *((int *)&__m256_result[3]) = 0x7fc00000;
++  *((int *)&__m256_result[2]) = 0x7fc00000;
++  *((int *)&__m256_result[1]) = 0x7fc00000;
++  *((int *)&__m256_result[0]) = 0x7fc00000;
++  __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x803f6004;
++  *((int *)&__m256_op0[4]) = 0x1f636003;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x803f6004;
++  *((int *)&__m256_op0[0]) = 0x1f636003;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x007f0107;
++  *((int *)&__m256_op1[4]) = 0x00c70106;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x007f0107;
++  *((int *)&__m256_op1[0]) = 0x00c70106;
++  *((int *)&__m256_result[7]) = 0x7fc00000;
++  *((int *)&__m256_result[6]) = 0x7fc00000;
++  *((int *)&__m256_result[5]) = 0xbeff7cfd;
++  *((int *)&__m256_result[4]) = 0x5e123f94;
++  *((int *)&__m256_result[3]) = 0x7fc00000;
++  *((int *)&__m256_result[2]) = 0x7fc00000;
++  *((int *)&__m256_result[1]) = 0xbeff7cfd;
++  *((int *)&__m256_result[0]) = 0x5e123f94;
++  __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000008;
++  *((int *)&__m256_op0[6]) = 0x60601934;
++  *((int *)&__m256_op0[5]) = 0x00000008;
++  *((int *)&__m256_op0[4]) = 0x00200028;
++  *((int *)&__m256_op0[3]) = 0x00000008;
++  *((int *)&__m256_op0[2]) = 0x60601934;
++  *((int *)&__m256_op0[1]) = 0x00000008;
++  *((int *)&__m256_op0[0]) = 0x00200028;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfdiv_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c
+new file mode 100644
+index 000000000..5d5b4c43c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_d.c
+@@ -0,0 +1,152 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++/* { dg-timeout 500 } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000017f0000017d;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000017f0000017f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0002000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256d_op0[2]) = 0xbf84bf00bf00bf0e;
++  *((unsigned long *)&__m256d_op0[1]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256d_op0[0]) = 0xbf84bf00bf00bf0e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256d_op0[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256d_op0[0]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffff0001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000100;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000400000004000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000400000004000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000400000004000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100;
++  __m256i_out = __lasx_xvfclass_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c
+new file mode 100644
+index 000000000..888e85b6e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfclass_s.c
+@@ -0,0 +1,95 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++/* { dg-timeout 500 } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++
++  *((int *)&__m256_op0[7]) = 0xfffffff8;
++  *((int *)&__m256_op0[6]) = 0xffffff08;
++  *((int *)&__m256_op0[5]) = 0x00ff00f8;
++  *((int *)&__m256_op0[4]) = 0x00ffcff8;
++  *((int *)&__m256_op0[3]) = 0xfffffff8;
++  *((int *)&__m256_op0[2]) = 0xffffff08;
++  *((int *)&__m256_op0[1]) = 0x00ff00f8;
++  *((int *)&__m256_op0[0]) = 0x00ffcff8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000008000000080;
++  __m256i_out = __lasx_xvfclass_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002;
++  __m256i_out = __lasx_xvfclass_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000020000000200;
++  __m256i_out = __lasx_xvfclass_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x000000ff;
++  *((int *)&__m256_op0[4]) = 0x000000ff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x000000ff;
++  *((int *)&__m256_op0[0]) = 0x000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010000000100;
++  __m256i_out = __lasx_xvfclass_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfffffffb;
++  *((int *)&__m256_op0[6]) = 0xfffffffb;
++  *((int *)&__m256_op0[5]) = 0xfffffffb;
++  *((int *)&__m256_op0[4]) = 0xfffffffb;
++  *((int *)&__m256_op0[3]) = 0xfffffffb;
++  *((int *)&__m256_op0[2]) = 0xfffffffb;
++  *((int *)&__m256_op0[1]) = 0xfffffffb;
++  *((int *)&__m256_op0[0]) = 0xfffffffb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002;
++  __m256i_out = __lasx_xvfclass_s (__m256_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c
+new file mode 100644
+index 000000000..bba1a06f3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_d.c
+@@ -0,0 +1,86 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xc08f780000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256d_result[1]) = 0xc08f780000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvflogb_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000;
++  __m256d_out = __lasx_xvflogb_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000;
++  __m256d_out = __lasx_xvflogb_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000;
++  __m256d_out = __lasx_xvflogb_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000;
++  __m256d_out = __lasx_xvflogb_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000;
++  __m256d_out = __lasx_xvflogb_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c
+new file mode 100644
+index 000000000..b641c733f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvflogb_s.c
+@@ -0,0 +1,115 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvflogb_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x10101010;
++  *((int *)&__m256_op0[6]) = 0x10101012;
++  *((int *)&__m256_op0[5]) = 0x10101010;
++  *((int *)&__m256_op0[4]) = 0x10101012;
++  *((int *)&__m256_op0[3]) = 0x10101010;
++  *((int *)&__m256_op0[2]) = 0x10101093;
++  *((int *)&__m256_op0[1]) = 0x11111111;
++  *((int *)&__m256_op0[0]) = 0x11111113;
++  *((int *)&__m256_result[7]) = 0xc2be0000;
++  *((int *)&__m256_result[6]) = 0xc2be0000;
++  *((int *)&__m256_result[5]) = 0xc2be0000;
++  *((int *)&__m256_result[4]) = 0xc2be0000;
++  *((int *)&__m256_result[3]) = 0xc2be0000;
++  *((int *)&__m256_result[2]) = 0xc2be0000;
++  *((int *)&__m256_result[1]) = 0xc2ba0000;
++  *((int *)&__m256_result[0]) = 0xc2ba0000;
++  __m256_out = __lasx_xvflogb_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xff800000;
++  *((int *)&__m256_result[6]) = 0xff800000;
++  *((int *)&__m256_result[5]) = 0xff800000;
++  *((int *)&__m256_result[4]) = 0xff800000;
++  *((int *)&__m256_result[3]) = 0xff800000;
++  *((int *)&__m256_result[2]) = 0xff800000;
++  *((int *)&__m256_result[1]) = 0xff800000;
++  *((int *)&__m256_result[0]) = 0xff800000;
++  __m256_out = __lasx_xvflogb_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xff800000;
++  *((int *)&__m256_result[6]) = 0xff800000;
++  *((int *)&__m256_result[5]) = 0xff800000;
++  *((int *)&__m256_result[4]) = 0xff800000;
++  *((int *)&__m256_result[3]) = 0xff800000;
++  *((int *)&__m256_result[2]) = 0xff800000;
++  *((int *)&__m256_result[1]) = 0xff800000;
++  *((int *)&__m256_result[0]) = 0xff800000;
++  __m256_out = __lasx_xvflogb_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000087;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000087;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xff800000;
++  *((int *)&__m256_result[6]) = 0xff800000;
++  *((int *)&__m256_result[5]) = 0xc30e0000;
++  *((int *)&__m256_result[4]) = 0xff800000;
++  *((int *)&__m256_result[3]) = 0xff800000;
++  *((int *)&__m256_result[2]) = 0xff800000;
++  *((int *)&__m256_result[1]) = 0xc30e0000;
++  *((int *)&__m256_result[0]) = 0xff800000;
++  __m256_out = __lasx_xvflogb_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c
+new file mode 100644
+index 000000000..c85c94bf6
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_d.c
+@@ -0,0 +1,382 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xe37affb42fc05f69;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x65fb66c81da8e5ba;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x8b1414140e0e0e0e;
++  *((unsigned long *)&__m256d_op2[2]) = 0x00d6c1c830160048;
++  *((unsigned long *)&__m256d_op2[1]) = 0x36722a7e66972cd6;
++  *((unsigned long *)&__m256d_op2[0]) = 0xe3aebaf4df958004;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0x00d6c1c830160048;
++  *((unsigned long *)&__m256d_result[1]) = 0x36722a7e66972cd6;
++  *((unsigned long *)&__m256d_result[0]) = 0xe3aebaf4df958004;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ff80;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000468600007f79;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000f3280000dfff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xfe02fe02fee5fe22;
++  *((unsigned long *)&__m256d_op1[0]) = 0xff49fe4200000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x00020001ffb6ffe0;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0049004200000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xbf28b0686066be60;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0xc5c5c5c5c5c5c5c5;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2);
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00007f7f00000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00007f7f00007fff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000000f1a40;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000aaaa00008bfe;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000aaaa0000aaaa;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000aaaa00008bfe;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000aaaa0000aaaa;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000aaaa00008bfe;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000aaaa0000aaaa;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000aaaa00008bfe;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000aaaa0000aaaa;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000aaaa00008bfe;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000aaaa0000aaaa;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000aaaa00008bfe;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000aaaa0000aaaa;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0202020202020202;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0202810102020202;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0202020202020202;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0202810102020202;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x00007fff00000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x00007fff00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x00007fff00000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x00007fff00000000;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256d_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x000000000000ffff;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256d_op1[2]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256d_op1[0]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000100010001;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m256d_result[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000100010001;
++  *((unsigned long *)&__m256d_result[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000100010001;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffff000000;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op2[3]) = 0xd3d3d3d3d3d3d3d3;
++  *((unsigned long *)&__m256d_op2[2]) = 0xd3d3d3d3d3d3d3d3;
++  *((unsigned long *)&__m256d_op2[1]) = 0xd3d3d3d3d3d3d3d3;
++  *((unsigned long *)&__m256d_op2[0]) = 0xd3d3d3d3d3d3d3d3;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256d_op2[3]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256d_op2[2]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256d_op2[1]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256d_op2[0]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff5f5c;
++  __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000fff0e400;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000007380;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000000f1c00;
++  *((unsigned long *)&__m256d_op2[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op2[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256d_op2[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op2[0]) = 0x00000000fff0e400;
++  *((unsigned long *)&__m256d_result[3]) = 0x80000000ffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0x80000000ffff8c80;
++  *((unsigned long *)&__m256d_result[1]) = 0x80000000ffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0x80000000fff0e400;
++  __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000000000001dc;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000000001dc;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x00000000000001dc;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x00000000000001dc;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x80000000000001dc;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x80000000000001dc;
++  __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0404000004040000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0404000004040000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[3]) = 0x8011ffee804c004c;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00faff0500c3ff3c;
++  *((unsigned long *)&__m256d_op1[1]) = 0x80f900f980780078;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0057ffa800ceff31;
++  *((unsigned long *)&__m256d_op2[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256d_op2[2]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256d_op2[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256d_op2[0]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256d_result[2]) = 0x80003fc00000428a;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256d_result[0]) = 0x80003fc00000428a;
++  __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op2[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_op2[2]) = 0xffffb2f600006f48;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_op2[0]) = 0xffffb2f600006f48;
++  *((unsigned long *)&__m256d_result[3]) = 0x8000000100000001;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffb2f600006f48;
++  *((unsigned long *)&__m256d_result[1]) = 0x8000000100000001;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffb2f600006f48;
++  __m256d_out = __lasx_xvfmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c
+new file mode 100644
+index 000000000..bde41dd5c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmadd_s.c
+@@ -0,0 +1,720 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xf328dfff;
++  *((int *)&__m256_op1[1]) = 0x6651bfff;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x0000ffff;
++  *((int *)&__m256_op2[6]) = 0x0000ff80;
++  *((int *)&__m256_op2[5]) = 0x00004686;
++  *((int *)&__m256_op2[4]) = 0x00007f79;
++  *((int *)&__m256_op2[3]) = 0x0000ffff;
++  *((int *)&__m256_op2[2]) = 0x0000ffff;
++  *((int *)&__m256_op2[1]) = 0x0000f328;
++  *((int *)&__m256_op2[0]) = 0x0000dfff;
++  *((int *)&__m256_result[7]) = 0x0000ffff;
++  *((int *)&__m256_result[6]) = 0x0000ff80;
++  *((int *)&__m256_result[5]) = 0x00004686;
++  *((int *)&__m256_result[4]) = 0x00007f79;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0x0000ffff;
++  *((int *)&__m256_result[1]) = 0x0000f328;
++  *((int *)&__m256_result[0]) = 0x0000dfff;
++  __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xfff10000;
++  *((int *)&__m256_op0[4]) = 0xfff10000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xfff10000;
++  *((int *)&__m256_op0[0]) = 0xfff10000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xfff10000;
++  *((int *)&__m256_result[4]) = 0xfff10000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xfff10000;
++  *((int *)&__m256_result[0]) = 0xfff10000;
++  __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x803f6004;
++  *((int *)&__m256_op2[4]) = 0x1f636003;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x803f6004;
++  *((int *)&__m256_op2[0]) = 0x1f636003;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x803f6004;
++  *((int *)&__m256_result[4]) = 0x1f636003;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x803f6004;
++  *((int *)&__m256_result[0]) = 0x1f636003;
++  __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffb3430a;
++  *((int *)&__m256_op0[4]) = 0x006ed8b8;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffb3430a;
++  *((int *)&__m256_op0[0]) = 0x006ed8b8;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x000001ff;
++  *((int *)&__m256_op1[4]) = 0x000003fe;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x000001ff;
++  *((int *)&__m256_op1[0]) = 0x000003fe;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x000000ff;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x000000ff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xfff3430a;
++  *((int *)&__m256_result[4]) = 0x000000ff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xfff3430a;
++  *((int *)&__m256_result[0]) = 0x000000ff;
++  __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xfffeb683;
++  *((int *)&__m256_op0[6]) = 0x9ffffd80;
++  *((int *)&__m256_op0[5]) = 0xfffe97c0;
++  *((int *)&__m256_op0[4]) = 0x20010001;
++  *((int *)&__m256_op0[3]) = 0xfffeb683;
++  *((int *)&__m256_op0[2]) = 0x9ffffd80;
++  *((int *)&__m256_op0[1]) = 0xfffe97c0;
++  *((int *)&__m256_op0[0]) = 0x20010001;
++  *((int *)&__m256_op1[7]) = 0x00009fff;
++  *((int *)&__m256_op1[6]) = 0x9ffffd80;
++  *((int *)&__m256_op1[5]) = 0x0000ffff;
++  *((int *)&__m256_op1[4]) = 0x20010001;
++  *((int *)&__m256_op1[3]) = 0x00009fff;
++  *((int *)&__m256_op1[2]) = 0x9ffffd80;
++  *((int *)&__m256_op1[1]) = 0x0000ffff;
++  *((int *)&__m256_op1[0]) = 0x20010001;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00002080;
++  *((int *)&__m256_op2[4]) = 0xdf5b41cf;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00002080;
++  *((int *)&__m256_op2[0]) = 0xdf5b41cf;
++  *((int *)&__m256_result[7]) = 0xfffeb683;
++  *((int *)&__m256_result[6]) = 0x007ffd80;
++  *((int *)&__m256_result[5]) = 0xfffe97c0;
++  *((int *)&__m256_result[4]) = 0xdf5b41cf;
++  *((int *)&__m256_result[3]) = 0xfffeb683;
++  *((int *)&__m256_result[2]) = 0x007ffd80;
++  *((int *)&__m256_result[1]) = 0xfffe97c0;
++  *((int *)&__m256_result[0]) = 0xdf5b41cf;
++  __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0xfffeb664;
++  *((int *)&__m256_op1[6]) = 0x007ffd61;
++  *((int *)&__m256_op1[5]) = 0xfffe97a1;
++  *((int *)&__m256_op1[4]) = 0xdf5b41b0;
++  *((int *)&__m256_op1[3]) = 0xfffeb664;
++  *((int *)&__m256_op1[2]) = 0x007ffd61;
++  *((int *)&__m256_op1[1]) = 0xfffe97a1;
++  *((int *)&__m256_op1[0]) = 0xdf5b41b0;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x94d7fb52;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xfffeb664;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xfffe97a1;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xfffeb664;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xfffe97a1;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xb70036db;
++  *((int *)&__m256_op1[6]) = 0x12c4007e;
++  *((int *)&__m256_op1[5]) = 0xb7146213;
++  *((int *)&__m256_op1[4]) = 0xfc1e0049;
++  *((int *)&__m256_op1[3]) = 0x000000fe;
++  *((int *)&__m256_op1[2]) = 0xfe02fffe;
++  *((int *)&__m256_op1[1]) = 0xb71c413b;
++  *((int *)&__m256_op1[0]) = 0x199d04b5;
++  *((int *)&__m256_op2[7]) = 0xb70036db;
++  *((int *)&__m256_op2[6]) = 0x12c4007e;
++  *((int *)&__m256_op2[5]) = 0xb7146213;
++  *((int *)&__m256_op2[4]) = 0xfc1e0049;
++  *((int *)&__m256_op2[3]) = 0x000000fe;
++  *((int *)&__m256_op2[2]) = 0xfe02fffe;
++  *((int *)&__m256_op2[1]) = 0xb71c413b;
++  *((int *)&__m256_op2[0]) = 0x199d04b5;
++  *((int *)&__m256_result[7]) = 0x370036db;
++  *((int *)&__m256_result[6]) = 0x92c4007e;
++  *((int *)&__m256_result[5]) = 0x37146213;
++  *((int *)&__m256_result[4]) = 0x7c1e0049;
++  *((int *)&__m256_result[3]) = 0x800000fe;
++  *((int *)&__m256_result[2]) = 0x7e02fffe;
++  *((int *)&__m256_result[1]) = 0x371c413b;
++  *((int *)&__m256_result[0]) = 0x999d04b5;
++  __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x3f7f7f7e;
++  *((int *)&__m256_op1[4]) = 0xff800000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x3f7f7f7e;
++  *((int *)&__m256_op1[0]) = 0xff800000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x7fffffff;
++  *((int *)&__m256_op2[4]) = 0xff7fffff;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x7fffffff;
++  *((int *)&__m256_op2[0]) = 0xff7fffff;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x7fffffff;
++  *((int *)&__m256_result[4]) = 0x7fc00000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x7fffffff;
++  *((int *)&__m256_result[0]) = 0x7fc00000;
++  __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffafaf;
++  *((int *)&__m256_op0[4]) = 0xb3b3dc9d;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffafaf;
++  *((int *)&__m256_op0[0]) = 0xb3b3dc9d;
++  *((int *)&__m256_op1[7]) = 0x00020000;
++  *((int *)&__m256_op1[6]) = 0x00020000;
++  *((int *)&__m256_op1[5]) = 0x00220021;
++  *((int *)&__m256_op1[4]) = 0x004a007e;
++  *((int *)&__m256_op1[3]) = 0x00020000;
++  *((int *)&__m256_op1[2]) = 0x00020000;
++  *((int *)&__m256_op1[1]) = 0x00220021;
++  *((int *)&__m256_op1[0]) = 0x004a007e;
++  *((int *)&__m256_op2[7]) = 0x00000001;
++  *((int *)&__m256_op2[6]) = 0x00007f7f;
++  *((int *)&__m256_op2[5]) = 0x00000001;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000001;
++  *((int *)&__m256_op2[2]) = 0x00007f7f;
++  *((int *)&__m256_op2[1]) = 0x00000001;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x80000001;
++  *((int *)&__m256_result[6]) = 0x80007f7f;
++  *((int *)&__m256_result[5]) = 0xffffafaf;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0x80000001;
++  *((int *)&__m256_result[2]) = 0x80007f7f;
++  *((int *)&__m256_result[1]) = 0xffffafaf;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0xffffffe5;
++  *((int *)&__m256_op2[6]) = 0xffffffe5;
++  *((int *)&__m256_op2[5]) = 0xffffffe5;
++  *((int *)&__m256_op2[4]) = 0xffffffe5;
++  *((int *)&__m256_op2[3]) = 0xffffffe5;
++  *((int *)&__m256_op2[2]) = 0xffffffe5;
++  *((int *)&__m256_op2[1]) = 0xffffffe5;
++  *((int *)&__m256_op2[0]) = 0xffffffe5;
++  *((int *)&__m256_result[7]) = 0xffffffe5;
++  *((int *)&__m256_result[6]) = 0xffffffe5;
++  *((int *)&__m256_result[5]) = 0xffffffe5;
++  *((int *)&__m256_result[4]) = 0xffffffe5;
++  *((int *)&__m256_result[3]) = 0xffffffe5;
++  *((int *)&__m256_result[2]) = 0xffffffe5;
++  *((int *)&__m256_result[1]) = 0xffffffe5;
++  *((int *)&__m256_result[0]) = 0xffffffe5;
++  __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xbfffffff;
++  *((int *)&__m256_op0[6]) = 0xffff8000;
++  *((int *)&__m256_op0[5]) = 0xbfff8000;
++  *((int *)&__m256_op0[4]) = 0x80000000;
++  *((int *)&__m256_op0[3]) = 0xbfffffff;
++  *((int *)&__m256_op0[2]) = 0xffff8000;
++  *((int *)&__m256_op0[1]) = 0xbfff8000;
++  *((int *)&__m256_op0[0]) = 0x80000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0xffff8000;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0xffff8000;
++  *((int *)&__m256_result[1]) = 0x80000000;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x02020102;
++  *((int *)&__m256_op1[6]) = 0x02020102;
++  *((int *)&__m256_op1[5]) = 0x02020102;
++  *((int *)&__m256_op1[4]) = 0x02020102;
++  *((int *)&__m256_op1[3]) = 0x02020102;
++  *((int *)&__m256_op1[2]) = 0x02020102;
++  *((int *)&__m256_op1[1]) = 0x02020102;
++  *((int *)&__m256_op1[0]) = 0x02020102;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000008;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000008;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000008;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000008;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000008;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000008;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000008;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000008;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000001;
++  *((int *)&__m256_op2[4]) = 0x00000001;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000001;
++  *((int *)&__m256_op2[0]) = 0x00000001;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x80000001;
++  *((int *)&__m256_result[4]) = 0x80000001;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x80000001;
++  *((int *)&__m256_result[0]) = 0x80000001;
++  __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000040;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000040;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x40404040;
++  *((int *)&__m256_op2[6]) = 0x40404040;
++  *((int *)&__m256_op2[5]) = 0x40404040;
++  *((int *)&__m256_op2[4]) = 0x40404040;
++  *((int *)&__m256_op2[3]) = 0x40404040;
++  *((int *)&__m256_op2[2]) = 0x40404040;
++  *((int *)&__m256_op2[1]) = 0x40404040;
++  *((int *)&__m256_op2[0]) = 0x40404040;
++  *((int *)&__m256_result[7]) = 0xc0404040;
++  *((int *)&__m256_result[6]) = 0xc0404040;
++  *((int *)&__m256_result[5]) = 0xc0404040;
++  *((int *)&__m256_result[4]) = 0xc0404040;
++  *((int *)&__m256_result[3]) = 0xc0404040;
++  *((int *)&__m256_result[2]) = 0xc0404040;
++  *((int *)&__m256_result[1]) = 0xc0404040;
++  *((int *)&__m256_result[0]) = 0xc0404040;
++  __m256_out = __lasx_xvfmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c
+new file mode 100644
+index 000000000..207ba167f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_d.c
+@@ -0,0 +1,230 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000000040000fff8;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x000000040000fff8;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x000000040000fff8;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000008000000080;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000008000000080;
++  *((unsigned long *)&__m256d_op1[3]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256d_op1[2]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256d_op1[1]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256d_op1[0]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256d_result[2]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256d_result[0]) = 0x45c5c5c545c5c5c5;
++  __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000004290;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000002a96ba;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000004290;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000002a96ba;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000083f95466;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0101010100005400;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000004290;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000083f95466;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000004290;
++  *((unsigned long *)&__m256d_result[0]) = 0x0101010100005400;
++  __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0101000101010001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0101000101010001;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0101000101010001;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0101000101010001;
++  __m256d_out = __lasx_xvfmax_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000100010001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0200000202000002;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0200000202000002;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0101000101010001;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0101000101010001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0101000101010001;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0101000101010001;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0101000101010001;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0101000101010001;
++  __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256d_op1[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmin_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c
+new file mode 100644
+index 000000000..9b7703231
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmax_s.c
+@@ -0,0 +1,560 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00010101;
++  *((int *)&__m256_op1[6]) = 0x01010101;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00010100;
++  *((int *)&__m256_op1[1]) = 0x00010000;
++  *((int *)&__m256_op1[0]) = 0x01000100;
++  *((int *)&__m256_result[7]) = 0x00010101;
++  *((int *)&__m256_result[6]) = 0x01010101;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00010100;
++  *((int *)&__m256_result[1]) = 0x00010000;
++  *((int *)&__m256_result[0]) = 0x01000100;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x59800000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x59800000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x59800000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x59800000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00010001;
++  *((int *)&__m256_op1[6]) = 0x00010001;
++  *((int *)&__m256_op1[5]) = 0x00010001;
++  *((int *)&__m256_op1[4]) = 0x00010001;
++  *((int *)&__m256_op1[3]) = 0x00010001;
++  *((int *)&__m256_op1[2]) = 0x00010001;
++  *((int *)&__m256_op1[1]) = 0x00010001;
++  *((int *)&__m256_op1[0]) = 0x00010001;
++  *((int *)&__m256_result[7]) = 0x00010001;
++  *((int *)&__m256_result[6]) = 0x00010001;
++  *((int *)&__m256_result[5]) = 0x00010001;
++  *((int *)&__m256_result[4]) = 0x00010001;
++  *((int *)&__m256_result[3]) = 0x00010001;
++  *((int *)&__m256_result[2]) = 0x00010001;
++  *((int *)&__m256_result[1]) = 0x00010001;
++  *((int *)&__m256_result[0]) = 0x00010001;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x7fefffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x7fefffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x000000ff;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x000000ff;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00003fe0;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00003fe0;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00003fe0;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00003fe0;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x0000000e;
++  *((int *)&__m256_op1[6]) = 0x0000000e;
++  *((int *)&__m256_op1[5]) = 0x0000000e;
++  *((int *)&__m256_op1[4]) = 0x0000000e;
++  *((int *)&__m256_op1[3]) = 0x0000000e;
++  *((int *)&__m256_op1[2]) = 0x0000000e;
++  *((int *)&__m256_op1[1]) = 0x0000000e;
++  *((int *)&__m256_op1[0]) = 0x0000000e;
++  *((int *)&__m256_result[7]) = 0x0000000e;
++  *((int *)&__m256_result[6]) = 0x0000000e;
++  *((int *)&__m256_result[5]) = 0x0000000e;
++  *((int *)&__m256_result[4]) = 0x0000000e;
++  *((int *)&__m256_result[3]) = 0x0000000e;
++  *((int *)&__m256_result[2]) = 0x0000000e;
++  *((int *)&__m256_result[1]) = 0x0000000e;
++  *((int *)&__m256_result[0]) = 0x0000000e;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xffdbbbcf;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0xffb8579f;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xffdbbbcf;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xffb8579f;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0xfff8579f;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0xfff8579f;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x40404040;
++  *((int *)&__m256_op1[6]) = 0x40404040;
++  *((int *)&__m256_op1[5]) = 0x40404040;
++  *((int *)&__m256_op1[4]) = 0x40404040;
++  *((int *)&__m256_op1[3]) = 0x40404040;
++  *((int *)&__m256_op1[2]) = 0x40404040;
++  *((int *)&__m256_op1[1]) = 0x40404040;
++  *((int *)&__m256_op1[0]) = 0x40404040;
++  *((int *)&__m256_result[7]) = 0x40404040;
++  *((int *)&__m256_result[6]) = 0x40404040;
++  *((int *)&__m256_result[5]) = 0x40404040;
++  *((int *)&__m256_result[4]) = 0x40404040;
++  *((int *)&__m256_result[3]) = 0x40404040;
++  *((int *)&__m256_result[2]) = 0x40404040;
++  *((int *)&__m256_result[1]) = 0x40404040;
++  *((int *)&__m256_result[0]) = 0x40404040;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x0000006d;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0010006d;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x0000006d;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0010006d;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00080040;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00080040;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00080040;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00080040;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00080040;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x0010006d;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00080040;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x0010006d;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x000002ff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x000002ff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x000002ff;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x000002ff;
++  __m256_out = __lasx_xvfmax_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x7ff90000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x1ff60000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0xfffffffe;
++  *((int *)&__m256_op1[4]) = 0x00000001;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0xfffffffe;
++  *((int *)&__m256_op1[0]) = 0x00000001;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000001;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000001;
++  __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x0218ff78;
++  *((int *)&__m256_op1[6]) = 0xfc38fc38;
++  *((int *)&__m256_op1[5]) = 0xfc000000;
++  *((int *)&__m256_op1[4]) = 0x00000048;
++  *((int *)&__m256_op1[3]) = 0x0218ff78;
++  *((int *)&__m256_op1[2]) = 0xfc38fc38;
++  *((int *)&__m256_op1[1]) = 0xfc000000;
++  *((int *)&__m256_op1[0]) = 0x00000048;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0xfc38fc38;
++  *((int *)&__m256_result[5]) = 0xfc000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0xfc38fc38;
++  *((int *)&__m256_result[1]) = 0xfc000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x000000f0;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x000000f0;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x000000f0;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x000000f0;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffe7;
++  *((int *)&__m256_op0[6]) = 0xffffffe7;
++  *((int *)&__m256_op0[5]) = 0xffffffe7;
++  *((int *)&__m256_op0[4]) = 0xffffffe7;
++  *((int *)&__m256_op0[3]) = 0xffffffe7;
++  *((int *)&__m256_op0[2]) = 0xffffffe7;
++  *((int *)&__m256_op0[1]) = 0xffffffe7;
++  *((int *)&__m256_op0[0]) = 0xffffffe7;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmin_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c
+new file mode 100644
+index 000000000..96bbb942d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_d.c
+@@ -0,0 +1,230 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000000040000fff8;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x000000040000fff8;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffff8001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000018;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000018;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000018;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000018;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0002000000010000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0002000000010000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000;
++  __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000001;
++  __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmaxa_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000008000000080;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000008000000080;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000008000000080;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256d_op1[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffb2f600006f48;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffb2f600006f48;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x00000000000000ff;
++  __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7efefefe80ffffff;
++  __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0087ff87f807ff87;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0087ff87f807ff87;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfmina_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c
+new file mode 100644
+index 000000000..c73a8a74a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfmaxa_s.c
+@@ -0,0 +1,506 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00005555;
++  *((int *)&__m256_op1[6]) = 0x00005555;
++  *((int *)&__m256_op1[5]) = 0x000307ff;
++  *((int *)&__m256_op1[4]) = 0xfe72e815;
++  *((int *)&__m256_op1[3]) = 0x00005555;
++  *((int *)&__m256_op1[2]) = 0x00005555;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000015;
++  *((int *)&__m256_result[7]) = 0x00005555;
++  *((int *)&__m256_result[6]) = 0x00005555;
++  *((int *)&__m256_result[5]) = 0x000307ff;
++  *((int *)&__m256_result[4]) = 0xfe72e815;
++  *((int *)&__m256_result[3]) = 0x00005555;
++  *((int *)&__m256_result[2]) = 0x00005555;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000015;
++  __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00ff00ff;
++  *((int *)&__m256_op0[6]) = 0x00ff00ff;
++  *((int *)&__m256_op0[5]) = 0x00ff00ff;
++  *((int *)&__m256_op0[4]) = 0x000c0000;
++  *((int *)&__m256_op0[3]) = 0x00ff00ff;
++  *((int *)&__m256_op0[2]) = 0x00ff00ff;
++  *((int *)&__m256_op0[1]) = 0x00ff00ff;
++  *((int *)&__m256_op0[0]) = 0x00040000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00ff00ff;
++  *((int *)&__m256_result[6]) = 0x00ff00ff;
++  *((int *)&__m256_result[5]) = 0x00ff00ff;
++  *((int *)&__m256_result[4]) = 0x000c0000;
++  *((int *)&__m256_result[3]) = 0x00ff00ff;
++  *((int *)&__m256_result[2]) = 0x00ff00ff;
++  *((int *)&__m256_result[1]) = 0x00ff00ff;
++  *((int *)&__m256_result[0]) = 0x00040000;
++  __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x000007ff;
++  *((int *)&__m256_op0[6]) = 0x000007ff;
++  *((int *)&__m256_op0[5]) = 0x000007ff;
++  *((int *)&__m256_op0[4]) = 0xfffff800;
++  *((int *)&__m256_op0[3]) = 0x000007ff;
++  *((int *)&__m256_op0[2]) = 0x000007ff;
++  *((int *)&__m256_op0[1]) = 0x000007ff;
++  *((int *)&__m256_op0[0]) = 0xfffff800;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x000007ff;
++  *((int *)&__m256_result[6]) = 0x000007ff;
++  *((int *)&__m256_result[5]) = 0x000007ff;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x000007ff;
++  *((int *)&__m256_result[2]) = 0x000007ff;
++  *((int *)&__m256_result[1]) = 0x000007ff;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000001;
++  *((int *)&__m256_op0[5]) = 0x001f00e0;
++  *((int *)&__m256_op0[4]) = 0x1f1f1fff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000001;
++  *((int *)&__m256_op0[1]) = 0x001f00e0;
++  *((int *)&__m256_op0[0]) = 0x1f1f1fff;
++  *((int *)&__m256_op1[7]) = 0x80000000;
++  *((int *)&__m256_op1[6]) = 0x80000000;
++  *((int *)&__m256_op1[5]) = 0x80000000;
++  *((int *)&__m256_op1[4]) = 0xff800000;
++  *((int *)&__m256_op1[3]) = 0x80000000;
++  *((int *)&__m256_op1[2]) = 0x80000000;
++  *((int *)&__m256_op1[1]) = 0x80000000;
++  *((int *)&__m256_op1[0]) = 0xff800000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000001;
++  *((int *)&__m256_result[5]) = 0x001f00e0;
++  *((int *)&__m256_result[4]) = 0xff800000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000001;
++  *((int *)&__m256_result[1]) = 0x001f00e0;
++  *((int *)&__m256_result[0]) = 0xff800000;
++  __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000001;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000001;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00009fff;
++  *((int *)&__m256_op0[6]) = 0x00002001;
++  *((int *)&__m256_op0[5]) = 0x0000ffff;
++  *((int *)&__m256_op0[4]) = 0x0000ffff;
++  *((int *)&__m256_op0[3]) = 0x00009fff;
++  *((int *)&__m256_op0[2]) = 0x00002001;
++  *((int *)&__m256_op0[1]) = 0x0000ffff;
++  *((int *)&__m256_op0[0]) = 0x0000ffff;
++  *((int *)&__m256_op1[7]) = 0xfffeb683;
++  *((int *)&__m256_op1[6]) = 0x9ffffd80;
++  *((int *)&__m256_op1[5]) = 0xfffe97c0;
++  *((int *)&__m256_op1[4]) = 0x20010001;
++  *((int *)&__m256_op1[3]) = 0xfffeb683;
++  *((int *)&__m256_op1[2]) = 0x9ffffd80;
++  *((int *)&__m256_op1[1]) = 0xfffe97c0;
++  *((int *)&__m256_op1[0]) = 0x20010001;
++  *((int *)&__m256_result[7]) = 0x00009fff;
++  *((int *)&__m256_result[6]) = 0x9ffffd80;
++  *((int *)&__m256_result[5]) = 0x0000ffff;
++  *((int *)&__m256_result[4]) = 0x20010001;
++  *((int *)&__m256_result[3]) = 0x00009fff;
++  *((int *)&__m256_result[2]) = 0x9ffffd80;
++  *((int *)&__m256_result[1]) = 0x0000ffff;
++  *((int *)&__m256_result[0]) = 0x20010001;
++  __m256_out = __lasx_xvfmaxa_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000170;
++  *((int *)&__m256_op0[6]) = 0x00000080;
++  *((int *)&__m256_op0[5]) = 0xc0650055;
++  *((int *)&__m256_op0[4]) = 0x0055ffab;
++  *((int *)&__m256_op0[3]) = 0x00000170;
++  *((int *)&__m256_op0[2]) = 0x00000080;
++  *((int *)&__m256_op0[1]) = 0xc0650055;
++  *((int *)&__m256_op0[0]) = 0x0055ffab;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0xffff0000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xffff0000;
++  *((int *)&__m256_op1[7]) = 0xfffefffe;
++  *((int *)&__m256_op1[6]) = 0xfffefffe;
++  *((int *)&__m256_op1[5]) = 0xfffefffe;
++  *((int *)&__m256_op1[4]) = 0xfffefffe;
++  *((int *)&__m256_op1[3]) = 0xfffefffe;
++  *((int *)&__m256_op1[2]) = 0xfffefffe;
++  *((int *)&__m256_op1[1]) = 0xfffefffe;
++  *((int *)&__m256_op1[0]) = 0xfffefffe;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0xffff0000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0xffff0000;
++  __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00fe01f0;
++  *((int *)&__m256_op0[6]) = 0x00010000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00c40086;
++  *((int *)&__m256_op0[3]) = 0x00fe01f0;
++  *((int *)&__m256_op0[2]) = 0x00010000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00c40086;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x82a54290;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x028aa700;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x82a54290;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x02a54287;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00010000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00c40086;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00010000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00c40086;
++  __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x02a54290;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0154dc84;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x02a54290;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000089;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x02a54290;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x0154dc84;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x02a54290;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000089;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x02a54290;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x0154dc84;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x02a54290;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000089;
++  __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x04000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x04000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00100000;
++  *((int *)&__m256_op0[6]) = 0x00100000;
++  *((int *)&__m256_op0[5]) = 0x00100000;
++  *((int *)&__m256_op0[4]) = 0x00100000;
++  *((int *)&__m256_op0[3]) = 0x00100000;
++  *((int *)&__m256_op0[2]) = 0x00100000;
++  *((int *)&__m256_op0[1]) = 0x00100000;
++  *((int *)&__m256_op0[0]) = 0x00100000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000010;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000010;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000080;
++  *((int *)&__m256_op0[6]) = 0x00000080;
++  *((int *)&__m256_op0[5]) = 0x00000080;
++  *((int *)&__m256_op0[4]) = 0x00000080;
++  *((int *)&__m256_op0[3]) = 0x00000080;
++  *((int *)&__m256_op0[2]) = 0x00000080;
++  *((int *)&__m256_op0[1]) = 0x00000080;
++  *((int *)&__m256_op0[0]) = 0x00000080;
++  *((int *)&__m256_op1[7]) = 0x00000001;
++  *((int *)&__m256_op1[6]) = 0x00000001;
++  *((int *)&__m256_op1[5]) = 0x00000001;
++  *((int *)&__m256_op1[4]) = 0x00000001;
++  *((int *)&__m256_op1[3]) = 0x00000001;
++  *((int *)&__m256_op1[2]) = 0x00000001;
++  *((int *)&__m256_op1[1]) = 0x00000001;
++  *((int *)&__m256_op1[0]) = 0x00000001;
++  *((int *)&__m256_result[7]) = 0x00000001;
++  *((int *)&__m256_result[6]) = 0x00000001;
++  *((int *)&__m256_result[5]) = 0x00000001;
++  *((int *)&__m256_result[4]) = 0x00000001;
++  *((int *)&__m256_result[3]) = 0x00000001;
++  *((int *)&__m256_result[2]) = 0x00000001;
++  *((int *)&__m256_result[1]) = 0x00000001;
++  *((int *)&__m256_result[0]) = 0x00000001;
++  __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfmina_s (__m256_op0, __m256_op1);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c
+new file mode 100644
+index 000000000..18d5c51de
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_d.c
+@@ -0,0 +1,482 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x1e1800001e180000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x1e1800001e180000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x2f03988e2052463e;
++  *((unsigned long *)&__m256d_result[2]) = 0x2f03988e1409212e;
++  *((unsigned long *)&__m256d_result[1]) = 0x2f03988e2052463e;
++  *((unsigned long *)&__m256d_result[0]) = 0x2f03988e1409212e;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000000003f7e3f;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffc6cc05c64d960e;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000003f7e3f;
++  *((unsigned long *)&__m256d_op0[0]) = 0xff874dc687870000;
++  *((unsigned long *)&__m256d_result[3]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000100000018;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000100000018;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x1f60000000c00000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x1f60000000c00000;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0003030300000300;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0003030300000300;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0003030300000100;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0003030300000100;
++  *((unsigned long *)&__m256d_result[3]) = 0x1febc46085090ea0;
++  *((unsigned long *)&__m256d_result[2]) = 0x1febc46085090ea0;
++  *((unsigned long *)&__m256d_result[1]) = 0x1febc46085090567;
++  *((unsigned long *)&__m256d_result[0]) = 0x1febc46085090567;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000007f007f007f;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000007f007f007f;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x1f9689fdb16cabbd;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x1f9689fdb16cabbd;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff0000;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000000000000000;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000010000000100;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000010000000100;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x1fa0000000080000;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffff8000;
++  __m256d_out = __lasx_xvfsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffff00000000;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0209fefb08140000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0003fffc00060000;
++  *((unsigned long *)&__m256d_result[3]) = 0x6100000800060005;
++  *((unsigned long *)&__m256d_result[2]) = 0x5ee1c073b800c916;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x5ff00007fff9fff3;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x555555553f800000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x555555553f800000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x353bb67af686ad9b;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x353bb67af686ad9b;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000001f0000ffff;
++  *((unsigned long *)&__m256d_result[3]) = 0x60000007fffe0001;
++  *((unsigned long *)&__m256d_result[2]) = 0x60000007fffe0001;
++  *((unsigned long *)&__m256d_result[1]) = 0x6056fd4e7926d5c0;
++  *((unsigned long *)&__m256d_result[0]) = 0x6056fd4e1a4616c4;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00001bfa000000f9;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000f900004040;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00001bfa000000f9;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000f900004040;
++  *((unsigned long *)&__m256d_result[3]) = 0x60183329ceb52cf0;
++  *((unsigned long *)&__m256d_result[2]) = 0x6040392cdaf9b3ff;
++  *((unsigned long *)&__m256d_result[1]) = 0x60183329ceb52cf0;
++  *((unsigned long *)&__m256d_result[0]) = 0x6040392cdaf9b3ff;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x3de00103153ff5fb;
++  *((unsigned long *)&__m256d_op0[2]) = 0xbffffffe80000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x3de00103153ff5fb;
++  *((unsigned long *)&__m256d_op0[0]) = 0xbffffffe80000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x40f69fe73c26f4ee;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x40f69fe73c26f4ee;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff8000000000000;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256d_result[3]) = 0x606a20bd700e59a3;
++  *((unsigned long *)&__m256d_result[2]) = 0x6066a09e66c5f1bb;
++  *((unsigned long *)&__m256d_result[1]) = 0x606a20bd700e59a3;
++  *((unsigned long *)&__m256d_result[0]) = 0x6066a09e66c5f1bb;
++  __m256d_out = __lasx_xvfrsqrt_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x03fc03fc03f803f8;
++  *((unsigned long *)&__m256d_op0[2]) = 0x03fc03fc03f803f8;
++  *((unsigned long *)&__m256d_op0[1]) = 0x03fc03fc03f803f8;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7be2468acf15f39c;
++  *((unsigned long *)&__m256d_result[2]) = 0x7be2468acf15f39c;
++  *((unsigned long *)&__m256d_result[1]) = 0x7be2468acf15f39c;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000089;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256d_op0[2]) = 0xd0d8eecf383fdf0d;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256d_op0[0]) = 0xd0d8eecf383fdf0d;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xaf0489001bd4c0c3;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xaf0489001bd4c0c3;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000fffff614;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000fffff614;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000001e0000001e;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000001e0000001e;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000001e0000001e;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000001e0000001e;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xff80000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x8060000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x8060000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7ff0000000000000;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfrecip_d (__m256d_op0);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c
+new file mode 100644
+index 000000000..27df4a27d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfsqrt_s.c
+@@ -0,0 +1,457 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x00000000;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0x00000000;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0x00000000;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0x00000000;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0000ff80;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000ffff;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x60b53246;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x60b5054d;
++  __m256_out = __lasx_xvfrsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0x0060005a;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0x0060005a;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0x5f13ccf5;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0x5f13ccf5;
++  __m256_out = __lasx_xvfrsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000002;
++  *((int *)&__m256_op0[4]) = 0x00000008;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000002;
++  *((int *)&__m256_op0[0]) = 0x00000008;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x64800000;
++  *((int *)&__m256_result[4]) = 0x64000000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x64800000;
++  *((int *)&__m256_result[0]) = 0x64000000;
++  __m256_out = __lasx_xvfrsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x000000bd;
++  *((int *)&__m256_op0[4]) = 0xfef907bc;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x000000bd;
++  *((int *)&__m256_op0[0]) = 0xfef907bc;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x62d2acee;
++  *((int *)&__m256_result[4]) = 0x7fc00000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x62d2acee;
++  *((int *)&__m256_result[0]) = 0x7fc00000;
++  __m256_out = __lasx_xvfrsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfrsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x04e8296f;
++  *((int *)&__m256_op0[6]) = 0x18181818;
++  *((int *)&__m256_op0[5]) = 0x132feea9;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x04e8296f;
++  *((int *)&__m256_op0[2]) = 0x18181818;
++  *((int *)&__m256_op0[1]) = 0x132feea9;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x5cbe15f2;
++  *((int *)&__m256_result[6]) = 0x53261036;
++  *((int *)&__m256_result[5]) = 0x559a674d;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x5cbe15f2;
++  *((int *)&__m256_result[2]) = 0x53261036;
++  *((int *)&__m256_result[1]) = 0x559a674d;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrsqrt_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrecip_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x000000ff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000ff00;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrecip_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfrecip_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrecip_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrecip_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xfc003802;
++  *((int *)&__m256_op0[6]) = 0xfc000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0xfc00fc00;
++  *((int *)&__m256_op0[3]) = 0xfc003802;
++  *((int *)&__m256_op0[2]) = 0xfc000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xfc00fc00;
++  *((int *)&__m256_result[7]) = 0x82ff902d;
++  *((int *)&__m256_result[6]) = 0x83000000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x82fe0bd9;
++  *((int *)&__m256_result[3]) = 0x82ff902d;
++  *((int *)&__m256_result[2]) = 0x83000000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x82fe0bd9;
++  __m256_out = __lasx_xvfrecip_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrecip_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7f800000;
++  *((int *)&__m256_result[4]) = 0x7f800000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7f800000;
++  *((int *)&__m256_result[0]) = 0x7f800000;
++  __m256_out = __lasx_xvfrecip_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xfd02fd02;
++  *((int *)&__m256_op0[6]) = 0xfd02fd02;
++  *((int *)&__m256_op0[5]) = 0xfd02fd02;
++  *((int *)&__m256_op0[4]) = 0xfd02fd02;
++  *((int *)&__m256_op0[3]) = 0xfd02fd02;
++  *((int *)&__m256_op0[2]) = 0xfd02fd02;
++  *((int *)&__m256_op0[1]) = 0xfd02fd02;
++  *((int *)&__m256_op0[0]) = 0xfd02fd02;
++  *((int *)&__m256_result[7]) = 0x81fa28e4;
++  *((int *)&__m256_result[6]) = 0x81fa28e4;
++  *((int *)&__m256_result[5]) = 0x81fa28e4;
++  *((int *)&__m256_result[4]) = 0x81fa28e4;
++  *((int *)&__m256_result[3]) = 0x81fa28e4;
++  *((int *)&__m256_result[2]) = 0x81fa28e4;
++  *((int *)&__m256_result[1]) = 0x81fa28e4;
++  *((int *)&__m256_result[0]) = 0x81fa28e4;
++  __m256_out = __lasx_xvfrecip_s (__m256_op0);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-subtraction-instr.patch b/LoongArch-Add-tests-for-ASX-vector-subtraction-instr.patch
new file mode 100644
index 0000000000000000000000000000000000000000..eef0890dbc8a9e0b1819badcf093372a2b94df1d
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-subtraction-instr.patch
@@ -0,0 +1,4566 @@
+From dcd9959504b5e8a0d9346d9ffb45542c1250c538 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:21:25 +0800
+Subject: [PATCH 101/124] LoongArch: Add tests for ASX vector subtraction
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsub.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsubi.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvssub-1.c     | 425 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvssub-2.c     | 695 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsub.c        | 590 +++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsubi.c       | 482 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsubwev-1.c   | 530 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsubwev-2.c   | 440 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvsubwod-1.c   | 695 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsubwod-2.c   | 620 ++++++++++++++++
+ 8 files changed, 4477 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c
+new file mode 100644
+index 000000000..ada72a16a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-1.c
+@@ -0,0 +1,425 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000001dc;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000001dc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff24;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff24;
++  __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020;
++  __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000bdfef907bc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000bdfef907bc;
++  __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101000000010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101000000010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffc0;
++  __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2b2b2b2b1bd68080;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2a2ad4d4f2d8807e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x2b2b2b2b1bd68080;
++  *((unsigned long *)&__m256i_op1[0]) = 0x2a2ad4d4f2d8807e;
++  *((unsigned long *)&__m256i_result[3]) = 0xd4d5d4d5e42a7f80;
++  *((unsigned long *)&__m256i_result[2]) = 0xd5d62b2c0d287f82;
++  *((unsigned long *)&__m256i_result[1]) = 0xd4d5d4d5e42a7f80;
++  *((unsigned long *)&__m256i_result[0]) = 0xd5d62b2c0d287f82;
++  __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff07b4ffff0707;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000b8070000a787;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff07b4ffff0707;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000b8070000a787;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffb7650000d496;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001800000018000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffb7650000d496;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001800000018000;
++  __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000fc300000fc40;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000fc300000fc40;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff000003c0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff000003c0;
++  __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff81001dff9dff9e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff81001dff9d003b;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff81001dff9dff9e;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff81001dff9d003b;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff81001dff9dff9e;
++  *((unsigned long *)&__m256i_result[2]) = 0xff81001dff9d003b;
++  *((unsigned long *)&__m256i_result[1]) = 0xff81001dff9dff9e;
++  *((unsigned long *)&__m256i_result[0]) = 0xff81001dff9d003b;
++  __m256i_out = __lasx_xvssub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00fd0101;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00fd0101;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00fd0101;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00fd0101;
++  __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x40f69fe73c26f4ee;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x40f69fe73c26f4ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_result[3]) = 0x40f69fe63c26f4f5;
++  *((unsigned long *)&__m256i_result[2]) = 0x7ff7ffff00000007;
++  *((unsigned long *)&__m256i_result[1]) = 0x40f69fe63c26f4f5;
++  *((unsigned long *)&__m256i_result[0]) = 0x7ff7ffff00000007;
++  __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ffff8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffff8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff00007fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff00007fff;
++  __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010800;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010800;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffefef800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffefef800;
++  __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f0000007f0060;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f0000007f0060;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f0000007f0060;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f0000007f0060;
++  __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4393a0a5bc606060;
++  *((unsigned long *)&__m256i_op0[2]) = 0x43b32feea9000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4393a0a5bc606060;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43b32feea9000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256i_op1[2]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256i_op1[0]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x04e8296f3c611818;
++  *((unsigned long *)&__m256i_result[2]) = 0x032eafee29010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x04e8296f3c611818;
++  *((unsigned long *)&__m256i_result[0]) = 0x032eafee29010000;
++  __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001ff91ff100000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001ff91ff100000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000800080;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000800080;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffffff7fff80;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001ff91ff0ffdfe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffffff7fff80;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001ff91ff0ffdfe;
++  __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvssub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c
+new file mode 100644
+index 000000000..f42523850
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssub-2.c
+@@ -0,0 +1,695 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007;
++  __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf7fdd5ffebe1c9e3;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf7fdd5ffebe1c9e3;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000002467db99;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000003e143852;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000002467db99;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000003e143852;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffdb982466;
++  *((unsigned long *)&__m256i_result[2]) = 0xf7fdd5ffadcd9191;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffdb982466;
++  *((unsigned long *)&__m256i_result[0]) = 0xf7fdd5ffadcd9191;
++  __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fef0000ffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fef0000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000420080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000420080000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000f880f87e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000f880f87e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000;
++  __m256i_out = __lasx_xvssub_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010511c54440438;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010511c54440438;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000030b8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000030b8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000030b8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000030b8;
++  __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c;
++  __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100002000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000808000008080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000808000008081;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffec;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffec;
++  *((unsigned long *)&__m256i_result[3]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x45baa7ef6a95a985;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x45baa7ef6a95a985;
++  *((unsigned long *)&__m256i_result[3]) = 0x38f7414938f7882f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x38f7414938f78830;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000000;
++  __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0f0f0ef;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf0f0f0f0f0f0f0ef;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0f0f0ef;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf0f0f0f0f0f0f0ef;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000180007f7f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffafaf80000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000180007f7f;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffafaf80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000070f07170;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000070f0f0ef;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000070f07170;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000070f0f0ef;
++  __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000032;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000003c000000032;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000004e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff;
++  __m256i_out = __lasx_xvssub_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010100000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010100000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0feff00000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0feff00000000000;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2475cef801f0ffdd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x6580668200fe0002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x419cd5b11c3c5654;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1010100fefefeff0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0f8f0e8df676f778;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0020000000200000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0020000000200000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffdfffffffdfffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffdfffffffdfffff;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0100000001000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0100000001000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffe8ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffe8ffffffe8;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffe8ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffe8ffffffe8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000c0000005;
++  *((unsigned long *)&__m256i_op1[2]) = 0x21f8c3c4c0000005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000c0000005;
++  *((unsigned long *)&__m256i_op1[0]) = 0x21f8c3c4c0000005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssub_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c
+new file mode 100644
+index 000000000..c1de1e8d3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsub.c
+@@ -0,0 +1,590 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009;
++  __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000010100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010100000000;
++  __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffbe20fc;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000001cc7ee87;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000010bb83239;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000c409ed87;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0100020001bf1efd;
++  *((unsigned long *)&__m256i_result[2]) = 0x010002001ec8ec88;
++  *((unsigned long *)&__m256i_result[1]) = 0x010002010db9303a;
++  *((unsigned long *)&__m256i_result[0]) = 0x01000200c60aeb88;
++  __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010200000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010200000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010200000000;
++  __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007e1c7e1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7e00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007e1c7e1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7e00000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007e1c7e1c;
++  *((unsigned long *)&__m256i_result[2]) = 0x7e00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007e1c7e1c;
++  *((unsigned long *)&__m256i_result[0]) = 0x7e00000000000000;
++  __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000040004000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000040004000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01ffff4300ffff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01ffff4300ffff00;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00ff003f003f00;
++  *((unsigned long *)&__m256i_result[2]) = 0xff0101fd00010100;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00ff003f003f00;
++  *((unsigned long *)&__m256i_result[0]) = 0xff0101fd00010100;
++  __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsub_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[3]) = 0xff01ff010000fff9;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff19;
++  *((unsigned long *)&__m256i_result[1]) = 0xff02ff020001fffa;
++  *((unsigned long *)&__m256i_result[0]) = 0x000100010001fffa;
++  __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x43d03bfff827ea21;
++  *((unsigned long *)&__m256i_op1[2]) = 0x43dac1f2a3804ff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x43d03bfff827e9f9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x43e019c657c7d050;
++  *((unsigned long *)&__m256i_result[3]) = 0xbc30c40107d915df;
++  *((unsigned long *)&__m256i_result[2]) = 0xbc263e0e5c80b010;
++  *((unsigned long *)&__m256i_result[1]) = 0xbc30c40107d91607;
++  *((unsigned long *)&__m256i_result[0]) = 0xbc20e63aa8392fb0;
++  __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[3]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256i_result[2]) = 0xff21c241ff21c238;
++  *((unsigned long *)&__m256i_result[1]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256i_result[0]) = 0xff21c241ff21c238;
++  __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf7f8f7f8f7f8f7f8;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xf7f8f7f8f7f8f7f8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000e000e000e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000e000e000e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000e000e000e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000e0000000d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000e000e000e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000e0000000d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x207f207f207f2000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000207f2000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[3]) = 0xdf80df80df80dfff;
++  *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffdf80dfff;
++  *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080;
++  __m256i_out = __lasx_xvsub_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c5c5c5c5c5;
++  *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c645c5c5c6;
++  *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c5c5c5c5c5;
++  *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c645c5c5c6;
++  __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xe0f02081c1c4ce2c;
++  *((unsigned long *)&__m256i_result[2]) = 0x8008000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xe0f02081c1c4ce2c;
++  *((unsigned long *)&__m256i_result[0]) = 0x8008000000000000;
++  __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000001c9880;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000001c9880;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffe36780;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffe36780;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000100000001;
++  __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00b213171dff0606;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00e9a80014ff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00b213171dff0606;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00e9a80014ff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00b213181dff0607;
++  *((unsigned long *)&__m256i_result[2]) = 0x00e9a80114ff0001;
++  *((unsigned long *)&__m256i_result[1]) = 0x00b213181dff0607;
++  *((unsigned long *)&__m256i_result[0]) = 0x00e9a80114ff0001;
++  __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fdfdfe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7ffe0001fffe0001;
++  *((unsigned long *)&__m256i_result[2]) = 0x7ffe0001fffeffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000fdfdfe;
++  __m256i_out = __lasx_xvsub_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000006f0000007f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000006f0000007f;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8080808080808081;
++  *((unsigned long *)&__m256i_result[1]) = 0x8080808080808081;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x017e00ff017e00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_op1[1]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_result[3]) = 0x1f9d9f9d1f9db29f;
++  *((unsigned long *)&__m256i_result[2]) = 0x1f9d9f9d201cb39e;
++  *((unsigned long *)&__m256i_result[1]) = 0x201c9f9d201cb29f;
++  *((unsigned long *)&__m256i_result[0]) = 0x1f9d9f9d201cb39e;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffeffebfb7afb62;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffeffebfb7afb62;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc192181230000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc192181230000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x4010000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3e6ce7d9cb7afb62;
++  *((unsigned long *)&__m256i_result[1]) = 0x4010000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3e6ce7d9cb7afb62;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffed;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x90007fff90008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0ffffffe90008000;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff80000000;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x05ea05ea05ea05ec;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x05ea05ea05ea05ec;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfa15fa15fa15fa14;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfa15fa15fa15fa14;
++  __m256i_out = __lasx_xvsub_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsub_q (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0505070804040404;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0504070804040404;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0505070804040404;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0504070804040404;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ff000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ff000000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0504080804030405;
++  *((unsigned long *)&__m256i_result[2]) = 0x0504060904040305;
++  *((unsigned long *)&__m256i_result[1]) = 0x0504080804030405;
++  *((unsigned long *)&__m256i_result[0]) = 0x0504060904040305;
++  __m256i_out = __lasx_xvsub_q (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8fff8;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ff00fff8ffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8fff8;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00fff8ffc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000fff8ff40;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ff0100090040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000fff8ff40;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ff0100090040;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffefff80;
++  __m256i_out = __lasx_xvsub_q (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c
+new file mode 100644
+index 000000000..a3c0de6d3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubi.c
+@@ -0,0 +1,482 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xe9e9e9e9e9e9e9e9;
++  *((unsigned long *)&__m256i_result[2]) = 0xe9e9e9e9e9e9e9e9;
++  *((unsigned long *)&__m256i_result[1]) = 0xe9e9e9e9e9e9e9e9;
++  *((unsigned long *)&__m256i_result[0]) = 0xe9e9e9e9e9e9e9e9;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[3]) = 0xf9f8f9f8f9f9f900;
++  *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9e0;
++  *((unsigned long *)&__m256i_result[1]) = 0xf9f8f9f8f9f9f900;
++  *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f900;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xefefefefefefefef;
++  *((unsigned long *)&__m256i_result[2]) = 0xefefefefefefefef;
++  *((unsigned long *)&__m256i_result[1]) = 0xefefefefefefef6e;
++  *((unsigned long *)&__m256i_result[0]) = 0xeeeeeeeeeeeeeeee;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_result[2]) = 0x6aeaeaeaeaeaeaea;
++  *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_result[0]) = 0x6aeaeaeaeaeaeaea;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf6f6f6f6f6f6f6f6;
++  *((unsigned long *)&__m256i_result[2]) = 0xf6f6f6f6f6f6f6f6;
++  *((unsigned long *)&__m256i_result[1]) = 0xf6f6f6f6f6f6f6f6;
++  *((unsigned long *)&__m256i_result[0]) = 0xf6f6f6f6f6f6f6f6;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000002a54290;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_result[2]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_result[1]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_result[0]) = 0xe7e7e7e7e7e7e7e7;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_result[3]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_result[2]) = 0xdbcbdbcbdbcbdbcb;
++  *((unsigned long *)&__m256i_result[1]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_result[0]) = 0xdbcbdbcbdbcbdbcb;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0e0d0c0b0e0d0c0b;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0e0d0c0b0e0d0c0b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0e0d0c0b0e0d0c0b;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0e0d0c0b0e0d0c0b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0a0908070a090807;
++  *((unsigned long *)&__m256i_result[2]) = 0x0a0908070a090807;
++  *((unsigned long *)&__m256i_result[1]) = 0x0a0908070a090807;
++  *((unsigned long *)&__m256i_result[0]) = 0x0a0908070a090807;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[1]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f9f9;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xf3f3f3f3f3f3f3f3;
++  *((unsigned long *)&__m256i_result[2]) = 0xf2f2f2f2f2f2f2f2;
++  *((unsigned long *)&__m256i_result[1]) = 0xf3f3f3f3f3f3f3f3;
++  *((unsigned long *)&__m256i_result[0]) = 0xf2f2f2f2f2f2f2f2;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_result[2]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_result[0]) = 0xebebebebebebebeb;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfefefefefdfdfdfd;
++  *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfefefefefdfdfdfd;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xe4e4e4e4e4e4e4e4;
++  *((unsigned long *)&__m256i_result[2]) = 0xe4e4e4e4e4e4e4e4;
++  *((unsigned long *)&__m256i_result[1]) = 0xe4e4e4e4e4e4e4e4;
++  *((unsigned long *)&__m256i_result[0]) = 0xe4e4e4e4e4e4e4e4;
++  __m256i_out = __lasx_xvsubi_bu (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff7fff7fff7fff7;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff7fff7fff7fff7;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff7fff7fff7fff7;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff7fff7fff7fff7;
++  __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000022be22be;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fffa2bea2be;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000022be22be;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fffa2bea2be;
++  *((unsigned long *)&__m256i_result[3]) = 0xffe1ffe1229f229f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fe07fe0a29fa29f;
++  *((unsigned long *)&__m256i_result[1]) = 0xffe1ffe1229f229f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fe07fe0a29fa29f;
++  __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffe5ffe5ffe5ffe5;
++  *((unsigned long *)&__m256i_result[2]) = 0xffe5ffe5ffe5ffe5;
++  *((unsigned long *)&__m256i_result[1]) = 0xffe5ffe5ffe5ffe5;
++  *((unsigned long *)&__m256i_result[0]) = 0xffe5ffe5ffe5ffe5;
++  __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff1fff1fff1fff1;
++  __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffcfffcfffcfffc;
++  __m256i_out = __lasx_xvsubi_hu (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000004fb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffef000004ea;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffefffffffef;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffecffffffec;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000018;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000018;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffff30000000b;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff3fffffff3;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffff30000000b;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff3fffffff3;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffff5fffffff5;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff5fffffff5;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffff5fffffff5;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff5fffffff5;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffe5ffffffe5;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffe5ffffffe5;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffe5ffffffe5;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffe5ffffffe5;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffeaffffffea;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffeaffffffea;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffeaffffffea;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffeaffffffea;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x5d20a0a15d20a0a1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x5d20a0a15d20a0a1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x5d20a0895d20a089;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffe8ffffffe8;
++  *((unsigned long *)&__m256i_result[1]) = 0x5d20a0895d20a089;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffe8ffffffe8;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffe8ffffffe8;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffe8ffffffe8;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffe8ffffffe8;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffe8ffffffe8;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffcfffffffc;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffcfffffffc;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffcfffffffc;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffcfffffffc;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb683007ffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c0df5b41cf;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb683007ffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c0df5b41cf;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffeb664007ffd61;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffe97a1df5b41b0;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffeb664007ffd61;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffe97a1df5b41b0;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffe7ffffffe7;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000400000003ffb;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000400100004001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000400000003ffb;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000400100004001;
++  *((unsigned long *)&__m256i_result[3]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003ff000003ff0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003ff000003ff0;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffe4ffffffe4;
++  __m256i_out = __lasx_xvsubi_wu (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffefb;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffefb;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe;
++  __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffc0008001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffffc0008001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffc0008001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffffc0008001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffffc0007fe9;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffffc0007fe9;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffffc0007fe9;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffffc0007fe9;
++  __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff6;
++  __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffee;
++  __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe6;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffe6;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffe6;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffe6;
++  __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffe1;
++  __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100080;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000006d;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000010006d;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000006d;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000010006d;
++  __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffef;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffef;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffee;
++  __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff4;
++  __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffed;
++  __m256i_out = __lasx_xvsubi_du (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c
+new file mode 100644
+index 000000000..caa72ca61
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-1.c
+@@ -0,0 +1,530 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000030007;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000030007;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000030007;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000030007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffb10001ff8f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001004c0001ff87;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffb10001ff8f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001004c0001ff87;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff7;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ff02ff80fede;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ff02ff80fede;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000fffe00800022;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fffe00800022;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100040;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffc0;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fff0ffc0;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffc0;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0ffc0;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001d0000001c;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001d0000001c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001d0000001c;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001d0000001c;
++  __m256i_out = __lasx_xvsubwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffeff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffeff00000000;
++  __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010203;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffcfa;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe;
++  __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006;
++  __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000102;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000fffffffefe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000008080809;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000008080809;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000008080809;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000008080809;
++  __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000300000003;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000300000003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000300000003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000300000003;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffd;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffd;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffd;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffd;
++  __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffff1cff1c;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffff1cff18;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffff1cff1c;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffff1cff18;
++  __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000001400;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000003c01ff9;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000003c01ff9;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffec00;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffc3fe007;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffc3fe007;
++  __m256i_out = __lasx_xvsubwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010000;
++  __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000010100000102;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010100000102;
++  __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x007fffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007fffff007fffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x007fffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007fffff007fffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00c200c200c200c2;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00c200c200c200bb;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00c200c200c200c2;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00c200c200c200bb;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffbdff3cffbdff44;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffbdff3cffbdff44;
++  __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c
+new file mode 100644
+index 000000000..57d883c04
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwev-2.c
+@@ -0,0 +1,440 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000007f0000007f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000007f0000007f;
++  *((unsigned long *)&__m256i_result[1]) = 0xff01ff80ff01ff80;
++  *((unsigned long *)&__m256i_result[0]) = 0xff01ff800000007e;
++  __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0043030300400300;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0043030300400300;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0043030300400100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0043030300400100;
++  *((unsigned long *)&__m256i_result[3]) = 0xffdd001dffe00020;
++  *((unsigned long *)&__m256i_result[2]) = 0xffdd001dffe00031;
++  *((unsigned long *)&__m256i_result[1]) = 0xffdd001dffe00020;
++  *((unsigned long *)&__m256i_result[0]) = 0xffdd001dffe00031;
++  __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000001ffe2000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x001fe020001fe020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000001ffe2000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x001fe020001fe020;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff0020ff1f001f;
++  *((unsigned long *)&__m256i_result[2]) = 0xffe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff0020ff1f001f;
++  *((unsigned long *)&__m256i_result[0]) = 0xffe1ffe0ffe1ffe0;
++  __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffee00ba;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffee00ba;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80008000fff98000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80008000fff98000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00fffff500ba;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00fffff500ba;
++  __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000004efffe00;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000047000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000004efffe00;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000047000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01;
++  __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01;
++  __m256i_out = __lasx_xvsubwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000fffc0000fffc;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fffc0000fffc;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001504f4c4b2361;
++  *((unsigned long *)&__m256i_op0[2]) = 0x303338a48f374969;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001504f4c4b2361;
++  *((unsigned long *)&__m256i_op0[0]) = 0x303338a48f374969;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff47b4ffff5879;
++  __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000;
++  __m256i_out = __lasx_xvsubwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffbf4;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000308;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000010100000102;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000010100000102;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffefd;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffefd;
++  __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40;
++  *((unsigned long *)&__m256i_op1[3]) = 0x80000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80000000ffff8c80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x80000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80000000fff0e400;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000f1a40;
++  __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000003effe1;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000003effe1;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000003effe1;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000003effe1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffff7;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffff7;
++  __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0002;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0002;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0002;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0002;
++  __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000;
++  __m256i_out = __lasx_xvsubwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x5d20a0a15d20a0a1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x5d20a0a15d20a0a1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010000000001;
++  __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffeffffff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffeffffff00;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000100;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000100;
++  __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0040000000000003;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0040000000000003;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvsubwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c
+new file mode 100644
+index 000000000..1687729d3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-1.c
+@@ -0,0 +1,695 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[3]) = 0xffe4ffe6ffe5ffe6;
++  *((unsigned long *)&__m256i_result[2]) = 0xffe4ffe6ffe5ffe6;
++  *((unsigned long *)&__m256i_result[1]) = 0xffe4ffe6ffe5ffe6;
++  *((unsigned long *)&__m256i_result[0]) = 0xffe4ffe6ffe5ffe6;
++  __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001;
++  __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x017e01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0586060601fe0202;
++  *((unsigned long *)&__m256i_op1[1]) = 0x017e01fe01fe0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0586060601fe0004;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffbfffafffffffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffbfffaffff0000;
++  __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m256i_result[1]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffefffefffefffef;
++  __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000003ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100007fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100007fff;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff8000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000043efffff8000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff8000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000043efffff8000;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x003f60041f636003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x003f60041f636003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000003f00001f63;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000003f00001f63;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000400080ffc080;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000400080ffc080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff80ff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff80ff;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc3030000ff800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc3030000ff800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003cfc0000006f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003cfc0000006f;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff6361;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4d0a902890b800dc;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff6361;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4d0a902890b800dc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffb2f600006f48;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffb2f600006f48;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000001fffe;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000060000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000060000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000020202020;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ff40;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ff0100090040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ff40;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ff0100090040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff02;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff02;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000700000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000700000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000700000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000700000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffe00;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffe00;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe00;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffe00;
++  __m256i_out = __lasx_xvsubwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1e17ffffd0fc6772;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1e17ffffebf6ded2;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1e17ffffd0fc6772;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1e17ffffebf6ded2;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xe1e800002f03988d;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xe1e800002f03988d;
++  __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9cffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9cffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x6300000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x6300000000000001;
++  __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000808;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000e000e000e000e;
++  __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0a0a000000000a0a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0a0a000000000a0a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0a0a000000000a0a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0a0a000000000a0a;
++  __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c
+new file mode 100644
+index 000000000..8d6ed92a1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsubwod-2.c
+@@ -0,0 +1,620 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000020001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffcc8000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007dfdff4b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff01ff3400000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ff83ff01;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ff010000ff01;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ff010000ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ff010000ff01;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ff010000ff01;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff0fff0ff01ff01;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff0fff0fff0fff0;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff0fff0ff01ff01;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff0fff0fff0fff0;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdf80df80df80df80;
++  *((unsigned long *)&__m256i_op1[2]) = 0xdfc2df80df80df87;
++  *((unsigned long *)&__m256i_op1[1]) = 0xdf80df80df80df80;
++  *((unsigned long *)&__m256i_op1[0]) = 0xdfc2df80df80df87;
++  *((unsigned long *)&__m256i_result[3]) = 0xff21ff21ff21ff21;
++  *((unsigned long *)&__m256i_result[2]) = 0xff21ff21ff21ff21;
++  *((unsigned long *)&__m256i_result[1]) = 0xff21ff21ff21ff21;
++  *((unsigned long *)&__m256i_result[0]) = 0xff21ff21ff21ff21;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4079808280057efe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007ffcfcfd020202;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x004000800080007e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000fc00fd0002;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xff01ff0100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff01ff0100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff01ff0100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff01ff0100000000;
++  __m256i_out = __lasx_xvsubwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ffe00007f000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff000100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff7fff00007f00;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff000100007fff;
++  __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000b8f81b8c840e4;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000b8f81b8c840e4;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffb3b4;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff5ffff4738;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffb3b4;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff5ffff4738;
++  __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00009fff9ffffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff20010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00009fff9ffffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff20010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00002080df5b41cf;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00002080df5b41cf;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000009fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff40a6;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000009fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff40a6;
++  __m256i_out = __lasx_xvsubwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00007fffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00007fffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff8001;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff8001;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x020afefb08140000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0003fffc00060000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff00ffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff0001ff02;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff020afefc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000003fefd;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1514151415141514;
++  *((unsigned long *)&__m256i_op1[2]) = 0x151415141514e335;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1514151415141514;
++  *((unsigned long *)&__m256i_op1[0]) = 0x151415141514e335;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000e9ece9ec;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000e9ece9ec;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000e9ece9ec;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000e9ece9ec;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0008000800080008;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000c005e000c0029;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0004005600040020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000300000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000300000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000060008;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000c005b;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffe0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000040053;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf7f8f7f8f800f800;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003f784000ff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf7f8f7f84000fff9;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003f784000ff80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000f7f8f7f8;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000003f78;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000f7f8f7f8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000003f78;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000070007000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff8fff9000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff8fff9000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff8fff9000;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff37b737b8;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff77b737b8;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff37b737b8;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff77b737b8;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310;
++  *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff457db03f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff457db03f;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsubwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001;
++  __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000b2673a90896a4;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000b2673a90896a4;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffafafb3b3dc9d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffafafb3b3dc9d;
++  __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000008050501;
++  __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001fff000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000029170;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fff000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000029170;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001fff000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001fff000;
++  __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000090b0906;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsubwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvabsd-xvavg-xvav.patch b/LoongArch-Add-tests-for-ASX-vector-xvabsd-xvavg-xvav.patch
new file mode 100644
index 0000000000000000000000000000000000000000..bef950c0efa2d615f5cb79658d380b80e5daa892
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvabsd-xvavg-xvav.patch
@@ -0,0 +1,5595 @@
+From 02a3c7b1dc6b66bad2d7eca396176cb9fd731a79 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 16:42:49 +0800
+Subject: [PATCH 115/124] LoongArch: Add tests for ASX vector
+ xvabsd/xvavg/xvavgr/xvbsll/xvbsrl/xvneg/ xvsat instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvneg.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvabsd-1.c     | 485 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvabsd-2.c     | 650 +++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvavg-1.c      | 680 ++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvavg-2.c      | 560 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvavgr-1.c     | 770 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvavgr-2.c     | 650 +++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvbsll_v.c     | 130 +++
+ .../loongarch/vector/lasx/lasx-xvbsrl_v.c     |  64 ++
+ .../loongarch/vector/lasx/lasx-xvneg.c        | 526 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsat-1.c      | 537 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsat-2.c      | 427 ++++++++++
+ 11 files changed, 5479 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c
+new file mode 100644
+index 000000000..41fae32df
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-1.c
+@@ -0,0 +1,485 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x34598d0fd19314cb;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1820939b2280fa86;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4a1c269b8e892a3a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x063f2bb758abc664;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffc0fcffffcf83;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000288a00003c1c;
++  *((unsigned long *)&__m256i_result[3]) = 0x3459730f2f6d1435;
++  *((unsigned long *)&__m256i_result[2]) = 0x19212d61237f2b03;
++  *((unsigned long *)&__m256i_result[1]) = 0x4a1c266572772a3a;
++  *((unsigned long *)&__m256i_result[0]) = 0x063f032d58557648;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007f017f01;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007f017f01;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000b2673a90896a4;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000b2673a90896a4;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001504f4c4b2361;
++  *((unsigned long *)&__m256i_result[2]) = 0x303338a48f374969;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001504f4c4b2361;
++  *((unsigned long *)&__m256i_result[0]) = 0x303338a48f374969;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x807c7fffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80817fff00810000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x807c7fffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80817fff00810000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x80767f0101050101;
++  *((unsigned long *)&__m256i_result[2]) = 0x80817f01007f0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x80767f0101050101;
++  *((unsigned long *)&__m256i_result[0]) = 0x80817f01007f0000;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x437fe01fe01fe020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x437fe01fe01fe020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x037fe01f001fe020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x037fe01f001fe020;
++  *((unsigned long *)&__m256i_result[3]) = 0x437f201f201f2020;
++  *((unsigned long *)&__m256i_result[2]) = 0x037f201f001f2020;
++  *((unsigned long *)&__m256i_result[1]) = 0x437f201f201f2020;
++  *((unsigned long *)&__m256i_result[0]) = 0x037f201f001f2020;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1f60010000080100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1f60010000080100;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002780;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002780;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000010100020103;
++  *((unsigned long *)&__m256i_result[2]) = 0x040f040f040b236d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000010100020103;
++  *((unsigned long *)&__m256i_result[0]) = 0x040f040f040b236d;
++  __m256i_out = __lasx_xvabsd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100010000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100010000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100010080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100010000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100010080;
++  __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000073333333;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000073333333;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000073333333;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000073333333;
++  __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000050fd00000101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000040c100000101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000050fd00000101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000040c100000101;
++  *((unsigned long *)&__m256i_result[3]) = 0x000050fd00000101;
++  *((unsigned long *)&__m256i_result[2]) = 0x000040c100000101;
++  *((unsigned long *)&__m256i_result[1]) = 0x000050fd00000101;
++  *((unsigned long *)&__m256i_result[0]) = 0x000040c100000101;
++  __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01ffff4300fffeff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfe0000bcff000100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01ffff4300fffeff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfe0000bcff000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x81ff00bd80ff0101;
++  *((unsigned long *)&__m256i_result[2]) = 0x01ff00bd00ff0101;
++  *((unsigned long *)&__m256i_result[1]) = 0x81ff00bd80ff0101;
++  *((unsigned long *)&__m256i_result[0]) = 0x01ff00bd00ff0101;
++  __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x00003fea00013feb;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003fe900014022;
++  *((unsigned long *)&__m256i_result[1]) = 0x00003fea00013feb;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003fe900014022;
++  __m256i_out = __lasx_xvabsd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0cc08723ff900001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xcc9b89f2f6cef440;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0cc08723006fffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x3364760e09310bc0;
++  __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000;
++  __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000017f0000017d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000017f0000017f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000017f0000017d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000017f0000017f;
++  __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbf800000bf800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xd662fa0000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xbf800000bf800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xd6ef750000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x417e01f040800000;
++  *((unsigned long *)&__m256i_result[2]) = 0x299d060000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x417e01f040800000;
++  *((unsigned long *)&__m256i_result[0]) = 0x29108b0000000000;
++  __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001700170017;
++  __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffa0078fffa0074;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffa0078fffa0074;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffb79fb74;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffb79fb74;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m256i_result[3]) = 0x000100010485048a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0005ff870005ff86;
++  *((unsigned long *)&__m256i_result[1]) = 0x000100010485048a;
++  *((unsigned long *)&__m256i_result[0]) = 0x0005ff870005ff86;
++  __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001;
++  __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000400000003ffb;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000400100004001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000400000003ffb;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000400100004001;
++  __m256i_out = __lasx_xvabsd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000;
++  __m256i_out = __lasx_xvabsd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000;
++  __m256i_out = __lasx_xvabsd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c
+new file mode 100644
+index 000000000..bd7a9069d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvabsd-2.c
+@@ -0,0 +1,650 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0008000001010000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101000001010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0008000001010000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101000001010000;
++  __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000100010485048a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0005ff870005ff86;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000100010485048a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0005ff870005ff86;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffeffebfb7afb62;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffa0065fffa0066;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffeffebfb7afb62;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffa0065fffa0066;
++  __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009;
++  __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvabsd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff7fff05407fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff05407fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x400040003abf4000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x400040003abf4000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000003fff3fff;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0408040800008003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0408040800008003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff80800;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0408040800008003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x04080408fff87803;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0606060606060606;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0606060606060606;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0606060606060606;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0606060606060606;
++  *((unsigned long *)&__m256i_result[3]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[1]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f9f9;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0001;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00ff003f003f00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff0101fd00010100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00ff003f003f00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff0101fd00010100;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff00ff003f003f00;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff0101fd00010100;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff00ff003f003f00;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff0101fd00010100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000a0008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000a0008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffff5fff7;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffff5fff7;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001010000;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001400000014;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfbba01c0003f7e3f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffc6cc05c64d960e;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfbd884e7003f7e3f;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff874dc687870000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfbba01c0003f7e3f;
++  *((unsigned long *)&__m256i_result[2]) = 0xffc6cc05c64d960e;
++  *((unsigned long *)&__m256i_result[1]) = 0xfbd884e7003f7e3f;
++  *((unsigned long *)&__m256i_result[0]) = 0xff874dc687870000;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x800000007fff0001;
++  *((unsigned long *)&__m256i_result[2]) = 0x80000000ff7f0001;
++  *((unsigned long *)&__m256i_result[1]) = 0x800000007fff0001;
++  *((unsigned long *)&__m256i_result[0]) = 0x80000000ff7f0001;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe01fe01fe;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000010000080040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010000080040;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffd;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00040000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000;
++  __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000033;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000033;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003f3f0000400d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003f3f0000400d;
++  __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff88;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe98;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064;
++  __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f80ffffff808000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f80ffffff808000;
++  *((unsigned long *)&__m256i_result[3]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f0000007f7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f0000007f7fff;
++  __m256i_out = __lasx_xvabsd_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c
+new file mode 100644
+index 000000000..5ce31ebbd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-1.c
+@@ -0,0 +1,680 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000001f;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000001f;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000001f;
++  __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000100da000100fd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001ffe20001fefd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001009a000100fd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001ff640001fefd;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000edff00fffd;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000fff10000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000cdff00fffd;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ff320000ffff;
++  __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefeff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff295329;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefeff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff295329;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffe00f7ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffff629d7;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffe00f7ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffff629d7;
++  __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0xa020202020206431;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0xa020202020206431;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xd010101010101010;
++  *((unsigned long *)&__m256i_result[2]) = 0xd010101010103218;
++  *((unsigned long *)&__m256i_result[1]) = 0xd010101010101010;
++  *((unsigned long *)&__m256i_result[0]) = 0xd010101010103218;
++  __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000;
++  __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000400100013;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000400100014;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000400100013;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004;
++  __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvavg_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0080000200000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0080000200000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010002;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0008000800080008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0008000800080008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0008000800080008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0008000800080008;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[3]) = 0x1010101010101013;
++  *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[1]) = 0x1010101010101013;
++  *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000006170;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000006170;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000030b8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000030b8;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000405;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000405;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000800080;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000800080;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000202;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0004000500040005;
++  __m256i_out = __lasx_xvavg_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8b1414140e0e0e0e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00d6c1c830160048;
++  *((unsigned long *)&__m256i_op1[1]) = 0x36722a7e66972cd6;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe3aebaf4df958004;
++  *((unsigned long *)&__m256i_result[3]) = 0xc58a0a0a07070706;
++  *((unsigned long *)&__m256i_result[2]) = 0x006b60e4180b0023;
++  *((unsigned long *)&__m256i_result[1]) = 0x1b39153f334b966a;
++  *((unsigned long *)&__m256i_result[0]) = 0xf1d75d79efcac002;
++  __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007fff00007fff;
++  __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffe00000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffe00000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffff00000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffff00000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x007fffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x007fffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x003fffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x003fffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvavg_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007fff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fffffff;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0080808080808080;
++  *((unsigned long *)&__m256i_result[2]) = 0x0080808080808080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0080808100808080;
++  *((unsigned long *)&__m256i_result[0]) = 0x0080808000808080;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0888888888888888;
++  *((unsigned long *)&__m256i_result[2]) = 0x0888888888888888;
++  *((unsigned long *)&__m256i_result[1]) = 0x0888888888888888;
++  *((unsigned long *)&__m256i_result[0]) = 0x0888888888888888;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4010000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3e6ce7d9cb7afb62;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4010000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3e6ce7d9cb7afb62;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2008000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1f3673ece5bd7db1;
++  *((unsigned long *)&__m256i_result[1]) = 0x2008000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1f3673ece5bd7db1;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000400000003fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000400000003fff;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000020202000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000020202000;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffc01fc01;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffc01fc01;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffe00fe00;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000001fe01dde;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffe00fe00;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000001fe01dde;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000080040;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0040000000000003;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0040000000000003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[3]) = 0x0020000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0020000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvavg_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c
+new file mode 100644
+index 000000000..d04e42753
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavg-2.c
+@@ -0,0 +1,560 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1d1d1d1d1d1d1d1d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1d1d1d1d1d1d1d1d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x61d849f0c0794ced;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe75278c187b20039;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf90c0c0c00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0ca40c0c0c0c0cc0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0c0c0c0c0cb60cc0;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfbe0b80c960c96d0;
++  *((unsigned long *)&__m256i_result[3]) = 0x8b1414140e0e0e0e;
++  *((unsigned long *)&__m256i_result[2]) = 0x146014141414146e;
++  *((unsigned long *)&__m256i_result[1]) = 0x36722a7e66972cd6;
++  *((unsigned long *)&__m256i_result[0]) = 0xf19998668e5f4b84;
++  __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff00fff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000007f7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000007f7f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000007f7f;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007f007f78;
++  __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff02ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffff0100;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00fefffeff02ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00030006fa05f20e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00030081bd80f90e;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x00010003fc827a86;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007f7f7f7f0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f017fc0ddbf7d86;
++  __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101000101010001;
++  __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x007f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x007f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f00000000;
++  __m256i_out = __lasx_xvavg_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000007f00000022;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000007f00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000003f00000011;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000003f00000000;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3fff3fff3fff3fff;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffbfffafffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffbfffaffff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00fe01fc01fe01fc;
++  *((unsigned long *)&__m256i_op1[2]) = 0x012c002c001c0006;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00fe01fc01fe0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x012c002c001c000a;
++  *((unsigned long *)&__m256i_result[3]) = 0x807e80fd80fe80fd;
++  *((unsigned long *)&__m256i_result[2]) = 0x80938013800d8002;
++  *((unsigned long *)&__m256i_result[1]) = 0x807e80fd80fe0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x80938013800d0005;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000800080008000;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff810011;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff810011;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x3fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x3fff7fffffc08008;
++  *((unsigned long *)&__m256i_result[1]) = 0x3fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x3fff7fffffc08008;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x800000007fff0001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80000000ff7f0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x800000007fff0001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80000000ff7f0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x800000007fff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x80000000ff7f0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x800000007fff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x80000000ff7f0000;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff0000;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fc38fc38;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fc38fc38;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007e1c7e1c;
++  *((unsigned long *)&__m256i_result[2]) = 0x7e00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007e1c7e1c;
++  *((unsigned long *)&__m256i_result[0]) = 0x7e00000000000000;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000fffe00800022;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fffe00800022;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007fff00400011;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000008001ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007fff00400011;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0a09080706050403;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0a09080706050403;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0504840303028201;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0504840303028201;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200;
++  __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000005000000020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000005000000020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002800000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002800000010;
++  __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x40efffe000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x40efffe000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f;
++  __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x111ebb784f9c4100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1c386546809f3b50;
++  *((unsigned long *)&__m256i_op1[1]) = 0x111ebb784f9bf1ac;
++  *((unsigned long *)&__m256i_op1[0]) = 0x21f6050d955d3f68;
++  *((unsigned long *)&__m256i_result[3]) = 0x088f5dbc27ce2080;
++  *((unsigned long *)&__m256i_result[2]) = 0x161c32a2c04f9da7;
++  *((unsigned long *)&__m256i_result[1]) = 0x088f5dbc27cdf8d6;
++  *((unsigned long *)&__m256i_result[0]) = 0x10fb02864aae9fb4;
++  __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffeffee;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe0000fffe0012;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffeffee;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe0000fffe0012;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffeffee;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffe0000fffe0012;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffeffee;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffe0000fffe0012;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffeffee;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffe0000fffe0012;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffeffee;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffe0000fffe0012;
++  __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000800080008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000800080008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000800080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000800080008000;
++  __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000004444;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00007bbb0000f777;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000004444;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007bbb0000f777;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000002222;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003ddd80007bbb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000002222;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003ddd80007bbb;
++  __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000f0f0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000f0f0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007878;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000007878;
++  __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000001e00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000f00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavg_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c
+new file mode 100644
+index 000000000..37b78aa1b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-1.c
+@@ -0,0 +1,770 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x5555555536aaaaac;
++  *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256i_op0[1]) = 0x5555555536aaaaac;
++  *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff39ffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff39ffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x2b2b2b2b1bd5d5d6;
++  *((unsigned long *)&__m256i_result[2]) = 0x2a2a2a2af2d5d5d6;
++  *((unsigned long *)&__m256i_result[1]) = 0x2b2b2b2b1bd5d5d6;
++  *((unsigned long *)&__m256i_result[0]) = 0x2a2a2a2af2d5d5d6;
++  __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000c0;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000c0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000c0;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000c0;
++  __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0020002000400040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0020002000400040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0020002000400040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0020002000400040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010001000200020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010001000200020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010001000200020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000200020;
++  __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fe36364661af18f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fe36364661af18f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_result[3]) = 0x40f23232330df9c8;
++  *((unsigned long *)&__m256i_result[2]) = 0x40f2323240f23232;
++  *((unsigned long *)&__m256i_result[1]) = 0x40f23232330df9c8;
++  *((unsigned long *)&__m256i_result[0]) = 0x40f2323240f23232;
++  __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100c00000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0ff000000000f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000f00f000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0ff000000000f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000f00f000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00f8000000000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x000800f800000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00f8000000000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x000800f800000000;
++  __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000090b0906;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000005060503;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000073737;
++  __m256i_out = __lasx_xvavgr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xdff8000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xdff8000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xdff8000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xdff8000000000000;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ff7f0000ff7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ff7f0000ff7f;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fd0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fd0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000007f0000;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbf84bf00bf00bf0e;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbf84bf00bf00bf0e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xdf80df80df80df80;
++  *((unsigned long *)&__m256i_result[2]) = 0xdfc2df80df80df87;
++  *((unsigned long *)&__m256i_result[1]) = 0xdf80df80df80df80;
++  *((unsigned long *)&__m256i_result[0]) = 0xdfc2df80df80df87;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbf84bf00bf00bf0e;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbf84bf00bf00bf0e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_result[3]) = 0xdf80ff20df80ff20;
++  *((unsigned long *)&__m256i_result[2]) = 0xdfc2ff20df80ffa7;
++  *((unsigned long *)&__m256i_result[1]) = 0xdf80ff20df80ff20;
++  *((unsigned long *)&__m256i_result[0]) = 0xdfc2ff20df80ffa7;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000c0000005;
++  *((unsigned long *)&__m256i_result[2]) = 0x21f8c3c4c0000005;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000c0000005;
++  *((unsigned long *)&__m256i_result[0]) = 0x21f8c3c4c0000005;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op0[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op0[1]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op0[0]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_result[2]) = 0x8848c848c848c848;
++  *((unsigned long *)&__m256i_result[1]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_result[0]) = 0x8848c848c848c848;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x4000c08000000080;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000080c000c080;
++  *((unsigned long *)&__m256i_result[1]) = 0x4000c08000000080;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000080c000c080;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3fffffff3fffc000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3fffffff3fffc000;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x007fffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x007fffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x003fffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x003fffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x003fffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x003fffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x001fffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x001fffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000007ffffffce;
++  __m256i_out = __lasx_xvavgr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000808081;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000808081;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000808081;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000808081;
++  __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000f18080010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000f18080010000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000078c0c0008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000078c0c0008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1086658a18ba3594;
++  *((unsigned long *)&__m256i_op1[2]) = 0x160fe9f000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1086658a18ba3594;
++  *((unsigned long *)&__m256i_op1[0]) = 0x160fe9f000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x07a232640bfc1a73;
++  *((unsigned long *)&__m256i_result[2]) = 0x0a66f497ff9effa9;
++  *((unsigned long *)&__m256i_result[1]) = 0x07a232640bfc1a73;
++  *((unsigned long *)&__m256i_result[0]) = 0x0a66f497ff9effa9;
++  __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefeff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff295329;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefeff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff295329;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_result[3]) = 0x007f00f8ff7fff80;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fff6a9d8;
++  *((unsigned long *)&__m256i_result[1]) = 0x007f00f8ff7fff80;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fff6a9d8;
++  __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007ffe7ffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ffe7ffe7ffe8000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000807e7ffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ffe7ffe7ffe7ffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007ffe7ffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ffe7ffe7ffe8000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000807e7ffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x7ffe7ffe7ffe7ffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007ffe7ffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x7ffe7ffe7ffe8000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000807e7ffe;
++  __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00fe00fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00fe00fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00fe00fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00fe00fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x007f8080007f007f;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f8080007f007f;
++  *((unsigned long *)&__m256i_result[1]) = 0x007f8080007f007f;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f8080007f007f;
++  __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffe37fe3001d001d;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffe37fe3001d001d;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff8000;
++  __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000f00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000700000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000081;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000004a00000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000004a0000002a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000004a00000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000004a0000002a;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000fffffffefffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff7fffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000fffffffefffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002500000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x00008024ffff8014;
++  *((unsigned long *)&__m256i_result[1]) = 0xffc0002500000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x00008024ffff8014;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000001a00;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000023a20000a121;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000179e0000951d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000023a20000a121;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000179e0000951d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000125100005111;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000c4f00004b0f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000125100005111;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000c4f00004b0f;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000080008001;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000457d607d;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457d607f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000457d607d;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457d607f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffa2beb040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffa2beb040;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000005858585a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000005858585a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000005858585a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000005858585a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000023a300003fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000023a300003fef;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000023a300003fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000023a300003fef;
++  *((unsigned long *)&__m256i_result[3]) = 0x000011d1ac2c4c2d;
++  *((unsigned long *)&__m256i_result[2]) = 0x000011d1ac2c4c25;
++  *((unsigned long *)&__m256i_result[1]) = 0x000011d1ac2c4c2d;
++  *((unsigned long *)&__m256i_result[0]) = 0x000011d1ac2c4c25;
++  __m256i_out = __lasx_xvavgr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c
+new file mode 100644
+index 000000000..3944a6ac0
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvavgr-2.c
+@@ -0,0 +1,650 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080;
++  __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080;
++  __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000018803100188;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000018803100188;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000014402080144;
++  __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000086fe0000403e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000403e00004040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000086fe0000403e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000403e00004040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000437f0000201f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000201f00002020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000437f0000201f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000201f00002020;
++  __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x90007fff90008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0ffffffe90008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x87ffffff87ffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xc880bfffc880c080;
++  *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[0]) = 0x87ffffffc880c080;
++  __m256i_out = __lasx_xvavgr_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000082a54290;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000028aa700;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000082a54290;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54287;
++  *((unsigned long *)&__m256i_result[3]) = 0x007f00f841532148;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001a753c3;
++  *((unsigned long *)&__m256i_result[1]) = 0x007f00f841532148;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001b52187;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000004444;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00007bbb0000f777;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000004444;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007bbb0000f777;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000002222;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003dde00007bbc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000002222;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003dde00007bbc;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_result[3]) = 0x4ffc3f783fc040c0;
++  *((unsigned long *)&__m256i_result[2]) = 0x3fc03f803fc040c0;
++  *((unsigned long *)&__m256i_result[1]) = 0x4ffc3f783fc040c0;
++  *((unsigned long *)&__m256i_result[0]) = 0x3fc03f803fc040c0;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0505070804040404;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0504070804040404;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0505070804040404;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0504070804040404;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0283038402020202;
++  *((unsigned long *)&__m256i_result[2]) = 0x0282038402020202;
++  *((unsigned long *)&__m256i_result[1]) = 0x0283038402020202;
++  *((unsigned long *)&__m256i_result[0]) = 0x0282038402020202;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1010101010001000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x101010100000000e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0889088908810881;
++  *((unsigned long *)&__m256i_result[2]) = 0x0081010000810100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0889088900810088;
++  *((unsigned long *)&__m256i_result[0]) = 0x0081010000810100;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000001d001d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000080008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3e00000440004000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3e000004400f400f;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0100000001000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0100000001000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x3abb3abbbabababa;
++  *((unsigned long *)&__m256i_result[2]) = 0x0080000000800080;
++  *((unsigned long *)&__m256i_result[1]) = 0x3abb3abbbabababa;
++  *((unsigned long *)&__m256i_result[0]) = 0x0080000000800080;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xc0008000c0008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xc0008000c0008000;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffe000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100020001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fffffffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8b1414140e0e0e0e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00d6c1c830160048;
++  *((unsigned long *)&__m256i_op1[1]) = 0x36722a7e66972cd6;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe3aebaf4df958004;
++  *((unsigned long *)&__m256i_result[3]) = 0xc5890a0a07070707;
++  *((unsigned long *)&__m256i_result[2]) = 0x006be0e4180b8024;
++  *((unsigned long *)&__m256i_result[1]) = 0x1b399540334c966c;
++  *((unsigned long *)&__m256i_result[0]) = 0x71d7dd7aefcac001;
++  __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000808080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080404040;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0003800400038004;
++  *((unsigned long *)&__m256i_result[2]) = 0x000a800b000a800b;
++  *((unsigned long *)&__m256i_result[1]) = 0x0003800400038004;
++  *((unsigned long *)&__m256i_result[0]) = 0x000a800b000a800b;
++  __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00f9f9f900000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00f9f9f900000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007cfcfd80000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007cfcfd80000001;
++  __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff8001ffff8001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff8001ffff8001;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fff800000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffc0017fffc001;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fff800000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffc0017fffc001;
++  __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0080000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0040000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0040000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005;
++  __m256i_out = __lasx_xvavgr_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc5c085372cfabfba;
++  *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0658f2dc0eb21e3c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000501e99b;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000109973de7;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001020f22;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000001890b7a39;
++  *((unsigned long *)&__m256i_result[3]) = 0x1b974ebaf6d64d4e;
++  *((unsigned long *)&__m256i_result[2]) = 0x62e0429c1b48fed1;
++  *((unsigned long *)&__m256i_result[1]) = 0x18b985adf63f548c;
++  *((unsigned long *)&__m256i_result[0]) = 0x032c796ecbdecc3b;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3a2a3a2a3a2a3a2a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3a2a3a2a3aaa45aa;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3a553f7f7a2a3a2a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3a2a3a2a3aaa45aa;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x1d949d949d949d95;
++  *((unsigned long *)&__m256i_result[2]) = 0x1d949d949e1423d4;
++  *((unsigned long *)&__m256i_result[1]) = 0x1de9a03f3dd41d95;
++  *((unsigned long *)&__m256i_result[0]) = 0x1d949d949e1423d4;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000083f95466;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010100005400;
++  *((unsigned long *)&__m256i_op1[3]) = 0x001e001ea1bfa1bf;
++  *((unsigned long *)&__m256i_op1[2]) = 0x001e001e83e5422e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x001e001ea1bfa1bf;
++  *((unsigned long *)&__m256i_op1[0]) = 0x011f011f0244420e;
++  *((unsigned long *)&__m256i_result[3]) = 0x000f000fd0dfd0df;
++  *((unsigned long *)&__m256i_result[2]) = 0x000f000f83ef4b4a;
++  *((unsigned long *)&__m256i_result[1]) = 0x000f000fd0dfd0df;
++  *((unsigned long *)&__m256i_result[0]) = 0x0110011001224b07;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000030000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000030000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000018002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000018002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x223d76f09f3881ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3870ca8d013e76a0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x223d76f09f37e357;
++  *((unsigned long *)&__m256i_op1[0]) = 0x43ec0a1b2aba7ed0;
++  *((unsigned long *)&__m256i_result[3]) = 0x111ebb784f9c4100;
++  *((unsigned long *)&__m256i_result[2]) = 0x1c386546809f3b50;
++  *((unsigned long *)&__m256i_result[1]) = 0x111ebb784f9bf1ac;
++  *((unsigned long *)&__m256i_result[0]) = 0x21f6050d955d3f68;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000840100000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbffebffec0fe0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000840100000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbffebffec0fe0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000420080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000420080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x5fff5fff607f0000;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000457d607d;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457d607f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000457d607d;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457d607f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffa2beb040;
++  __m256i_out = __lasx_xvavgr_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c
+new file mode 100644
+index 000000000..3c1a8b8e6
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsll_v.c
+@@ -0,0 +1,130 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00555555553f8000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00555555553f8000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000030000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000030000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020643100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020643100000000;
++  __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000b2673a90896a4;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000b2673a90896a4;
++  *((unsigned long *)&__m256i_result[3]) = 0xa90896a400000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xa90896a400000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x003f003f003f0040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x003f003f003f0040;
++  *((unsigned long *)&__m256i_result[3]) = 0x00003f003f003f00;
++  *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00003f003f003f00;
++  *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000;
++  __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbsll_v (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c
+new file mode 100644
+index 000000000..340f7691b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbsrl_v.c
+@@ -0,0 +1,64 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000007d0d0d0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000007d0d0d0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000007d0d0d00000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000007d0d0d00000;
++  __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x01fffffffe000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x01fffffffe000000;
++  __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000018803100188;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000018803100188;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbsrl_v (__m256i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c
+new file mode 100644
+index 000000000..3cd1626d4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvneg.c
+@@ -0,0 +1,526 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x002e4db200000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000315ac0000d658;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00735278007cf94c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0003ed8800031b38;
++  *((unsigned long *)&__m256i_result[3]) = 0xffd1b24e00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffcea54ffff29a8;
++  *((unsigned long *)&__m256i_result[1]) = 0xff8cad88ff8306b4;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffc1278fffce4c8;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000ffff8000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x06f880008000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x800080008000b8f1;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000010180000101;
++  *((unsigned long *)&__m256i_result[2]) = 0xfa08800080000101;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x800080008000480f;
++  __m256i_out = __lasx_xvneg_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvneg_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010102;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010201010204;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010102;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010102;
++  __m256i_out = __lasx_xvneg_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010203;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvneg_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007380;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000f1c00;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000800000000000;
++  __m256i_out = __lasx_xvneg_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0081000100810001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0081000100810001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0081000100810001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0081000100810001;
++  __m256i_out = __lasx_xvneg_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvneg_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x223d76f09f3881ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3870ca8d013e76a0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x223d76f09f37e357;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43ec0a1b2aba7ed0;
++  *((unsigned long *)&__m256i_result[3]) = 0xdec38a1061c87f01;
++  *((unsigned long *)&__m256i_result[2]) = 0xc8903673ffc28a60;
++  *((unsigned long *)&__m256i_result[1]) = 0xdec38a1061c91da9;
++  *((unsigned long *)&__m256i_result[0]) = 0xbd14f6e5d6468230;
++  __m256i_out = __lasx_xvneg_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000007e8080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001fdda7dc4;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000007e8080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000001fdda7dc4;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ff827f80;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0226823c;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ff827f80;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0226823c;
++  __m256i_out = __lasx_xvneg_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000180000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000180000001;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001;
++  __m256i_out = __lasx_xvneg_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000f000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000f000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff1000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff1000000000000;
++  __m256i_out = __lasx_xvneg_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000008000165a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000008000165a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff00017fff005d;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fffe9a6;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff00017fff005d;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffe9a6;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f8;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffff0100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff0100000001;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0100004300000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0100004300000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xff0000bd00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xff0000bd00000000;
++  __m256i_out = __lasx_xvneg_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000010000080040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000010000080040;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000fff8ffc0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ff00fff8ffc0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000fff8ffc0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ff00fff8ffc0;
++  __m256i_out = __lasx_xvneg_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001;
++  __m256i_out = __lasx_xvneg_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000497fe0000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000683fe0000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000497fe0000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000683fe0000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffb6811fffff80;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff97c120000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffb6811fffff80;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff97c120000000;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefdfdfdfd;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefdfdfdfd;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010202020203;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010201010102;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010202020203;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010201010102;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000032;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000032;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffce;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffce;
++  __m256i_out = __lasx_xvneg_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvneg_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00007fde00007fd4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00007fe000007fe0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00007fde00007fd4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007fe000007fe0;
++  *((unsigned long *)&__m256i_result[3]) = 0x000081220000812c;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000812000008120;
++  *((unsigned long *)&__m256i_result[1]) = 0x000081220000812c;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000812000008120;
++  __m256i_out = __lasx_xvneg_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002780;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002780;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffd880;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffd880;
++  __m256i_out = __lasx_xvneg_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c
+new file mode 100644
+index 000000000..b4ac50271
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-1.c
+@@ -0,0 +1,537 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000800080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc9d8080067f50020;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc70000020000c000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf000f00000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000f000f0000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xf0f008000ff5000f;
++  *((unsigned long *)&__m256i_result[0]) = 0xf00000020000f000;
++  __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000fff8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000e000e000e000e;
++  __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00003fea00013fec;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003fe50001c013;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00003fea00013fec;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003fe50001c013;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff0000ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff0000ff00;
++  __m256i_out = __lasx_xvsat_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000399400003994;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000399400003994;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000399400003994;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000399400003994;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000fff00000fff;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_result[3]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_result[2]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_result[1]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_result[0]) = 0xffc0ffc0ffc0ffc0;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00002df900001700;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffe05ffffe911;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00002df900001700;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffe05ffffe911;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000300000003;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffcfffffffc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000300000003;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffcfffffffc;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff0001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffdd97dc4;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff0001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffdd97dc4;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0001;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffdd97dc4;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0001;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffdd97dc4;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000007f007f007f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000007f007f007f;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000003fffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000003fffff;
++  __m256i_out = __lasx_xvsat_h (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0002000200000022;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0049004200000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000007f00000022;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000007f00000000;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffefffffefd;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00c200c200c200c2;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00c200c200c200bb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00c200c200c200c2;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00c200c200c200bb;
++  *((unsigned long *)&__m256i_result[3]) = 0x007fffff007fffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x007fffff007fffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x007fffff007fffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x007fffff007fffff;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001ffff0001ffff;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000080000001000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000080000001000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000f0000000f;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000b8f81b8c840e4;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000b8f81b8c840e4;
++  *((unsigned long *)&__m256i_result[3]) = 0x000007ff000007ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000007fffffff800;
++  *((unsigned long *)&__m256i_result[1]) = 0x000007ff000007ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000007fffffff800;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_w (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_op0[2]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_op0[1]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_op0[0]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffc00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffc00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffc00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffc00000000;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x22);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff605a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff605a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff605a;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff605a;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x2d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001175f10e4330e8;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff8f0842ff29211e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffff8d9ffa7103d;
++  *((unsigned long *)&__m256i_result[3]) = 0x001175f10e4330e8;
++  *((unsigned long *)&__m256i_result[2]) = 0xff8f0842ff29211e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffff8d9ffa7103d;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x39);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x21);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffe000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffe000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffe000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffe000000000000;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x31);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000001ffffffff;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x21);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0fffffffffffffff;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000002c21ffeff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc0000000c0000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000002c21ffeff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc0000000c0000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_d (__m256i_op0, 0x32);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c
+new file mode 100644
+index 000000000..e5ee89deb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsat-2.c
+@@ -0,0 +1,427 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x3f3f3f3f3f3f3f3f;
++  *((unsigned long *)&__m256i_result[2]) = 0x3f3f3f3f3f3f3f3f;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000003f3f3f3f;
++  *((unsigned long *)&__m256i_result[0]) = 0x3f3f3f3f00000000;
++  __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000017f7f7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000017f7f7f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f00000000000000;
++  __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000700000007;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0007ffff0007ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000700000007;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0007ffff0007ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000700000007;
++  *((unsigned long *)&__m256i_result[2]) = 0x00071f1f00071f1f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000700000007;
++  *((unsigned long *)&__m256i_result[0]) = 0x00071f1f00071f1f;
++  __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d;
++  __m256i_out = __lasx_xvsat_bu (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000003fff3fff;
++  __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_result[3]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_result[2]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_result[1]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x001f001f001f001f;
++  __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00001fff00001fff;
++  __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000003f003f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000003f003f;
++  __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc0090000c0200060;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc0090000c0200060;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f0000007f0060;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f0000007f0060;
++  __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256i_op0[2]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256i_op0[0]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0003000300030003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0003000300030000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0003000300030003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0003000300030000;
++  __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001fff000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000029170;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fff000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000029170;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001ff03ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000203ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001ff03ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000203ff;
++  __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_hu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310;
++  *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000f0000000f;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000077fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000003ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffcfffc;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffcfffc;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffcfffc;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffcfffc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000003fff;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0fffffff0fffffff;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffff08;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffff08;
++  *((unsigned long *)&__m256i_result[3]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0fffffff0fffffff;
++  __m256i_out = __lasx_xvsat_wu (__m256i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000003ffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000003ffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000003ffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000003ffffffffff;
++  __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x29);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x34);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001ffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001ffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007;
++  __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x30);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2475cef801f0ffdd;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6580668200fe0002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x419cd5b11c3c5654;
++  *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_result[2]) = 0x2475cef801f0ffdd;
++  *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002;
++  *((unsigned long *)&__m256i_result[0]) = 0x419cd5b11c3c5654;
++  __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x22);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000001ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000001ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000001ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000001ff;
++  __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000007fffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000007fffff;
++  __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000017f00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007f7f03030000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000017f00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007f7f03030000;
++  __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x37);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsat_du (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvadd-xvadda-xvad.patch b/LoongArch-Add-tests-for-ASX-vector-xvadd-xvadda-xvad.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6c06f9abe40adbdd058cf5bcbeaf65678e7a4523
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvadd-xvadda-xvad.patch
@@ -0,0 +1,6368 @@
+From a6d51c0d69572f800f63c3215b7de6665024104c Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:15:40 +0800
+Subject: [PATCH 099/124] LoongArch: Add tests for ASX vector
+ xvadd/xvadda/xvaddi/xvaddwev/ xvaddwodxvsadd instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvadd.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvadda.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddi.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvadd.c        | 725 ++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvadda.c       | 785 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvaddi.c       | 427 ++++++++++
+ .../loongarch/vector/lasx/lasx-xvaddwev-1.c   | 740 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvaddwev-2.c   | 485 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvaddwev-3.c   | 515 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvaddwod-1.c   | 530 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvaddwod-2.c   | 560 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvaddwod-3.c   | 485 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvsadd-1.c     | 650 +++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsadd-2.c     | 350 ++++++++
+ 11 files changed, 6252 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c
+new file mode 100644
+index 000000000..293295723
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadd.c
+@@ -0,0 +1,725 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe;
++  __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x41cfe01dde000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x41cfe01dde000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x41cfe01dde000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x41cfe01dde000000;
++  __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040;
++  __m256i_out = __lasx_xvadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f8000007f7fffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f8000007f7fffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f8000007f7fffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f8000007f7fffff;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000800;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff900000800;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00b213171dff0606;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00e9a80014ff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00b213171dff0606;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00e9a80014ff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00b213171dff0606;
++  *((unsigned long *)&__m256i_result[2]) = 0x00e9a80014ff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00b213171dff0606;
++  *((unsigned long *)&__m256i_result[0]) = 0x00e9a80014ff0000;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000956a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000004efffe00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000956a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000004efffe00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x007fffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xb500000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x007fffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xb500000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x007fffffffff9569;
++  *((unsigned long *)&__m256i_result[2]) = 0xb50000004efffe00;
++  *((unsigned long *)&__m256i_result[1]) = 0x007fffffffff9569;
++  *((unsigned long *)&__m256i_result[0]) = 0xb50000004efffe00;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01;
++  __m256i_out = __lasx_xvadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000ffff8000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x06f880008000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x800080008000b8f1;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000010180000101;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfa08800080000101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x800080008000480f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001010000010100;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101000000010100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000000010100;
++  __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffff605a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffff605a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffff605a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffff605a;
++  __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x5555555536aaaaac;
++  *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256i_op0[1]) = 0x5555555536aaaaac;
++  *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x5555555536aaaaac;
++  *((unsigned long *)&__m256i_result[2]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256i_result[1]) = 0x5555555536aaaaac;
++  *((unsigned long *)&__m256i_result[0]) = 0x55555555aaaaaaac;
++  __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000089;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089;
++  __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000014402080144;
++  __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000800000008;
++  __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102;
++  __m256i_out = __lasx_xvadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x3fff3fff3fff4000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000403f3fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7ffe7ffe7ffe7ffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007ffe7ffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x7ffe7ffe7ffe8000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000807e7ffe;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op1[3]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op1[2]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op1[1]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_result[3]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_result[2]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_result[1]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_result[0]) = 0xc2c2c2c2c2c2c2c2;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000006040190d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000006040190d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000860601934;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000860601934;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000800200028;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000045;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000045;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000045;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000045;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000d0005;
++  __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000c0000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000040000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0020001f001f001e;
++  *((unsigned long *)&__m256i_result[2]) = 0x001f001fc01f001f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0020001f001f001e;
++  *((unsigned long *)&__m256i_result[0]) = 0x001f001f401f001f;
++  __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7ffeffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7ffeffffffff;
++  __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x800000ff800000ff;
++  __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000010000080040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000010000080040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000010000080040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010000080040;
++  __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffeffff0000;
++  __m256i_out = __lasx_xvadd_q (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c
+new file mode 100644
+index 000000000..d6b57d1cd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvadda.c
+@@ -0,0 +1,785 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101008000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101008000000080;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000402000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000402000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000402000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000402000000;
++  __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffeffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100010102;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000102;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000102;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xefefefefefefefef;
++  *((unsigned long *)&__m256i_op0[2]) = 0xefefefefefefefef;
++  *((unsigned long *)&__m256i_op0[1]) = 0xefefefefefefef6e;
++  *((unsigned long *)&__m256i_op0[0]) = 0xeeeeeeeeeeeeeeee;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x1010101010101012;
++  *((unsigned long *)&__m256i_result[2]) = 0x1010101010101012;
++  *((unsigned long *)&__m256i_result[1]) = 0x1010101010101093;
++  *((unsigned long *)&__m256i_result[0]) = 0x1111111111111113;
++  __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0110000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0110000000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0110000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0110000000000080;
++  __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1000000000000000;
++  __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1fe01e0000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1fe01e0000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1fe01e0000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1fe01e0000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xce7ffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xce7ffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x327f010101010102;
++  *((unsigned long *)&__m256i_result[2]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x327f010101010102;
++  *((unsigned long *)&__m256i_result[0]) = 0x6300000000000000;
++  __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff5556aaaa;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff5556aaaa;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0006ffff0004ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0006ffff0004ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0006ffff0004ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00020000aaa95556;
++  *((unsigned long *)&__m256i_result[1]) = 0x0006ffff0004ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00020000aaa95556;
++  __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdb801b6d0962003f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xdb8a3109fe0f0024;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9a7f997fff01ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbe632a4f1c3c5653;
++  *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_result[2]) = 0x2475cef801f0ffdd;
++  *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002;
++  *((unsigned long *)&__m256i_result[0]) = 0x419cd5b11c3c5654;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x017e01fe01fe01fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0586060601fe0202;
++  *((unsigned long *)&__m256i_op0[1]) = 0x017e01fe01fe0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0586060601fe0004;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010001000100001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010001000100001;
++  *((unsigned long *)&__m256i_result[3]) = 0x017f01fe01ff01fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x05960616020e0203;
++  *((unsigned long *)&__m256i_result[1]) = 0x017f01fe01ff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x05960616020e0005;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000045;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000045;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010146;
++  *((unsigned long *)&__m256i_result[2]) = 0x01010101010e0106;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010146;
++  *((unsigned long *)&__m256i_result[0]) = 0x01010101010e0106;
++  __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010000000100000;
++  __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffb79fb74;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffb79fb74;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000010486048c;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000006;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000010486048c;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000006;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000020000;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004411;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004411;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x00010001000c4411;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100044411;
++  __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000018;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000019;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000200000001e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000019;
++  *((unsigned long *)&__m256i_op1[3]) = 0x223d76f09f3881ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3870ca8d013e76a0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x223d76f09f37e357;
++  *((unsigned long *)&__m256i_op1[0]) = 0x43ec0a1b2aba7ed0;
++  *((unsigned long *)&__m256i_result[3]) = 0x223d771060c77e19;
++  *((unsigned long *)&__m256i_result[2]) = 0x3870caad013e76b9;
++  *((unsigned long *)&__m256i_result[1]) = 0x223d771060c81cc7;
++  *((unsigned long *)&__m256i_result[0]) = 0x43ec0a3b2aba7ee9;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_op0[2]) = 0xdbcbdbcb0000dbcb;
++  *((unsigned long *)&__m256i_op0[1]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_op0[0]) = 0xdbcbdbcb0000dbcb;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2535253514141414;
++  *((unsigned long *)&__m256i_result[2]) = 0x2535253500002535;
++  *((unsigned long *)&__m256i_result[1]) = 0x2535253514141414;
++  *((unsigned long *)&__m256i_result[0]) = 0x2535253500002535;
++  __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010000f0000000f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010000f0000000f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010000f0000000f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010000f0000000f;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000504f00002361;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff8f81000040e4;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000504f00002361;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff8f81000040e4;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000007ff000007ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000007ff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000007ff000007ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000007ff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000584e00002b60;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000787dffffbf1c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000584e00002b60;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000787dffffbf1c;
++  __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010200000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010200000000;
++  __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fef010000010100;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fef010000010100;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fef010000010100;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fef010000010100;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000001fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x40b2bf4d30313031;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fffa2bea2be;
++  *((unsigned long *)&__m256i_op0[1]) = 0x40b2bf4d30313031;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fffa2bea2be;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x40b240b330313031;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff5d425d42;
++  *((unsigned long *)&__m256i_result[1]) = 0x40b240b330313031;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff5d425d42;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000100040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000100040;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000100080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000100080;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff896099cbdbfff1;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc987ffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff896099cbdbfff1;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc987ffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00769f673424000f;
++  *((unsigned long *)&__m256i_result[2]) = 0x3678000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x00769f673424000f;
++  *((unsigned long *)&__m256i_result[0]) = 0x3678000100000001;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadda_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000500000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000700000032;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000500000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000700000032;
++  __m256i_out = __lasx_xvadda_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00003feec0108022;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003fe9c015802c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00003feec0108022;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003fe9c015802c;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007f124010c022;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007f174015c02c;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007f124010c022;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007f174015c02c;
++  __m256i_out = __lasx_xvadda_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfa15fa15fa15fa14;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfa15fa15fa15fa14;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x05ea05ea05ea05ec;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x05ea05ea05ea05ec;
++  __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000020202020202;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101000000010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000020202020202;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101000000010000;
++  __m256i_out = __lasx_xvadda_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c
+new file mode 100644
+index 000000000..054bf6e55
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddi.c
+@@ -0,0 +1,427 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x44bb2cd3a35c2fd0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xca355ba46a95e31c;
++  *((unsigned long *)&__m256i_result[3]) = 0x1d1d1d1d1d1d1d1d;
++  *((unsigned long *)&__m256i_result[2]) = 0x1d1d1d1d1d1d1d1d;
++  *((unsigned long *)&__m256i_result[1]) = 0x61d849f0c0794ced;
++  *((unsigned long *)&__m256i_result[0]) = 0xe75278c187b20039;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffbf7f7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe651bfff;
++  *((unsigned long *)&__m256i_result[3]) = 0x1d1d1d1d1d1d1d1d;
++  *((unsigned long *)&__m256i_result[2]) = 0x1d1d1d1ddd9d9d1d;
++  *((unsigned long *)&__m256i_result[1]) = 0x1d1d1d1d1d1d1d1d;
++  *((unsigned long *)&__m256i_result[0]) = 0x1d1d1d1d046fdd1d;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1515151515151515;
++  *((unsigned long *)&__m256i_result[2]) = 0x1515151515151515;
++  *((unsigned long *)&__m256i_result[1]) = 0x1515151515151515;
++  *((unsigned long *)&__m256i_result[0]) = 0x1515151515151515;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1818181818181818;
++  *((unsigned long *)&__m256i_result[2]) = 0x1818181818181818;
++  *((unsigned long *)&__m256i_result[1]) = 0x1818181818181818;
++  *((unsigned long *)&__m256i_result[0]) = 0x1818181818181818;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00007fff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007fff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_result[2]) = 0x0202810102020202;
++  *((unsigned long *)&__m256i_result[1]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_result[0]) = 0x0202810102020202;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0909090909090909;
++  *((unsigned long *)&__m256i_result[2]) = 0x0909090909090909;
++  *((unsigned long *)&__m256i_result[1]) = 0x0909090909090909;
++  *((unsigned long *)&__m256i_result[0]) = 0x0909090909090909;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ffce20;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ffce20;
++  *((unsigned long *)&__m256i_result[3]) = 0x1514151415141514;
++  *((unsigned long *)&__m256i_result[2]) = 0x151415141514e335;
++  *((unsigned long *)&__m256i_result[1]) = 0x1514151415141514;
++  *((unsigned long *)&__m256i_result[0]) = 0x151415141514e335;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0606060606060606;
++  *((unsigned long *)&__m256i_result[2]) = 0x0606060606060606;
++  *((unsigned long *)&__m256i_result[1]) = 0x0606060606060606;
++  *((unsigned long *)&__m256i_result[0]) = 0x0606060606060606;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1212121212121212;
++  *((unsigned long *)&__m256i_result[2]) = 0x1212121212121212;
++  *((unsigned long *)&__m256i_result[1]) = 0x1212121212121212;
++  *((unsigned long *)&__m256i_result[0]) = 0x1212121212121212;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000001200000012;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000001200000012;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000001200000012;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001200000012;
++  *((unsigned long *)&__m256i_result[3]) = 0x1a1a1a2c1a1a1a2c;
++  *((unsigned long *)&__m256i_result[2]) = 0x1a1a1a2c1a1a1a2c;
++  *((unsigned long *)&__m256i_result[1]) = 0x1a1a1a2c1a1a1a2c;
++  *((unsigned long *)&__m256i_result[0]) = 0x1a1a1a2c1a1a1a2c;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x1d1d1d1e1d1d1d1e;
++  *((unsigned long *)&__m256i_result[2]) = 0x1d1d1d1e1d1d1d1e;
++  *((unsigned long *)&__m256i_result[1]) = 0x1d1d1d1e1d1d1d1e;
++  *((unsigned long *)&__m256i_result[0]) = 0x1d1d1d1e1d1d1d1e;
++  __m256i_out = __lasx_xvaddi_bu (__m256i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_result[2]) = 0x5982000200020002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_result[0]) = 0x5982000200020002;
++  __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[3]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_result[2]) = 0x001f001f02c442af;
++  *((unsigned long *)&__m256i_result[1]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x001f001f02c442af;
++  __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010;
++  __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x807e80fd80fe80fd;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80938013800d8002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x807e80fd80fe0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80938013800d0005;
++  *((unsigned long *)&__m256i_result[3]) = 0x8091811081118110;
++  *((unsigned long *)&__m256i_result[2]) = 0x80a6802680208015;
++  *((unsigned long *)&__m256i_result[1]) = 0x8091811081110013;
++  *((unsigned long *)&__m256i_result[0]) = 0x80a6802680200018;
++  __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000003f00390035;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8015003f0006001f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000003f00390035;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8015003f0006001f;
++  *((unsigned long *)&__m256i_result[3]) = 0x000b004a00440040;
++  *((unsigned long *)&__m256i_result[2]) = 0x8020004a0011002a;
++  *((unsigned long *)&__m256i_result[1]) = 0x000b004a00440040;
++  *((unsigned long *)&__m256i_result[0]) = 0x8020004a0011002a;
++  __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0016001600160016;
++  *((unsigned long *)&__m256i_result[2]) = 0x0016001600160016;
++  *((unsigned long *)&__m256i_result[1]) = 0x0016001600160016;
++  *((unsigned long *)&__m256i_result[0]) = 0x0016001600160016;
++  __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1;
++  *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a15e5e5e5e;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1;
++  *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a15e5e5e5e;
++  *((unsigned long *)&__m256i_result[3]) = 0xa1bfa1bfa1bfa1bf;
++  *((unsigned long *)&__m256i_result[2]) = 0xa1bfa1bf5e7c5e7c;
++  *((unsigned long *)&__m256i_result[1]) = 0xa1bfa1bfa1bfa1bf;
++  *((unsigned long *)&__m256i_result[0]) = 0xa1bfa1bf5e7c5e7c;
++  __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100080;
++  *((unsigned long *)&__m256i_result[3]) = 0x001a001a001a009a;
++  *((unsigned long *)&__m256i_result[2]) = 0x001a001a002a009a;
++  *((unsigned long *)&__m256i_result[1]) = 0x001a001a001a009a;
++  *((unsigned long *)&__m256i_result[0]) = 0x001a001a002a009a;
++  __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x001c001c001c001c;
++  *((unsigned long *)&__m256i_result[2]) = 0x001c001c001c001c;
++  *((unsigned long *)&__m256i_result[1]) = 0x001c001c001c001c;
++  *((unsigned long *)&__m256i_result[0]) = 0x001c001c001d001d;
++  __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7200000072000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7200000072000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7200000072000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7200000072000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x721e001e721e001e;
++  *((unsigned long *)&__m256i_result[2]) = 0x721e001e721e001e;
++  *((unsigned long *)&__m256i_result[1]) = 0x721e001e721e001e;
++  *((unsigned long *)&__m256i_result[0]) = 0x721e001e721e001e;
++  __m256i_out = __lasx_xvaddi_hu (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001900000019;
++  __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006;
++  __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001a0000001a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001a0000001a;
++  __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001900000019;
++  __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001d0000001d;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001d0000001d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001d0000001d;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001d0000001d;
++  __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006;
++  __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000001fffd;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000001fffd;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000700020004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000700020004;
++  __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000800000008;
++  __m256i_out = __lasx_xvaddi_wu (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000019410000e69a;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf259905a0c126604;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000883a00000f20;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6d3c2d3aa1c82947;
++  *((unsigned long *)&__m256i_result[3]) = 0x000019410000e6aa;
++  *((unsigned long *)&__m256i_result[2]) = 0xf259905a0c126614;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000883a00000f30;
++  *((unsigned long *)&__m256i_result[0]) = 0x6d3c2d3aa1c82957;
++  __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000000d;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000000d;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000000d;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000000d;
++  __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff01;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff01;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fff0;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff0fff0ff01ff14;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff0fff0fff10003;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff0fff0ff01ff14;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff0fff0fff10003;
++  __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff47b4ffff5879;
++  __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_result[0]) = 0x0fffffff10000006;
++  __m256i_out = __lasx_xvaddi_du (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c
+new file mode 100644
+index 000000000..70f3bf783
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-1.c
+@@ -0,0 +1,740 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op0[2]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op0[1]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op0[0]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffeffffff88;
++  *((unsigned long *)&__m256i_op1[2]) = 0x61e0000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffeffffff88;
++  *((unsigned long *)&__m256i_op1[0]) = 0x61e0000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010ffc80010ff52;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff1ffca0011ffcb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010ffc80010ff52;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff1ffca0011ffcb;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffe90ffffff80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffe90ffffff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff90ffffff80;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000023;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01c601c6fe3afe3a;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01c601c6fe3afe3a;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000011;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000011;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310;
++  *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x007d003e007d003e;
++  *((unsigned long *)&__m256i_result[2]) = 0x007d003effa80010;
++  *((unsigned long *)&__m256i_result[1]) = 0x007d003e007d003e;
++  *((unsigned long *)&__m256i_result[0]) = 0x007d003effa80010;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0c6a240000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0c6a240000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ca0000fff80000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ca0000fff80000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5464fbfc416b9f71;
++  *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0d8264202b8ea3f0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x80c72fcd40fb3bc0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x84bd087966d4ace0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x26aa68b274dc1322;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe072db2bb9d4cd40;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffcd42ffffecc0;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000475ffff4c51;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000740dffffad17;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003f4bffff7130;
++  __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000022;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff80;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000468600008078;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffff328ffffe021;
++  __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op0[2]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op0[1]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op0[0]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op1[3]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op1[2]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op1[1]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op1[0]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000399400003994;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000399400003994;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000399400003994;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000399400003994;
++  __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xe161616161614f61;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe161616161614f61;
++  *((unsigned long *)&__m256i_op1[1]) = 0xe161616161614f61;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe161616161614f61;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000616100004f61;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000616100004f61;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000616100004f61;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000616100004f61;
++  __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000b8f81b8c840e4;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000b8f81b8c840e4;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000504f00002361;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff8f81000040e4;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000504f00002361;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff8f81000040e4;
++  __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000012;
++  __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x40b240b330313031;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff5d425d42;
++  *((unsigned long *)&__m256i_op1[1]) = 0x40b240b330313031;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff5d425d42;
++  *((unsigned long *)&__m256i_result[3]) = 0x000040b200002fd4;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007fff0000739c;
++  *((unsigned long *)&__m256i_result[1]) = 0x000040b200002fd4;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007fff0000739c;
++  __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003ff000003ff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003ff000003ff0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007fde00007fd4;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007fe000007fe0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007fde00007fd4;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007fe000007fe0;
++  __m256i_out = __lasx_xvaddwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000001000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000012e2110;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000583800;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000100000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000583800;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000100000;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007bbbbbbb;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007bbbbbbb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000073333333;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000073333333;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x007f807f007e8080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f807f007e806f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x007f807f007e8080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f807f007e806f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000007e8080;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000007e8092;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000007e8080;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000007e8092;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000062d4;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000006338;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0010000100000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010000100000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0010000100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010000100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff800080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff800080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff80000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff80000000;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfe01fe01fc01fc01;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfe01fe01fc01fc01;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffc01fc01;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffc01fc01;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000003fc03bbc;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1b9763952fc4c101;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe37affb42fc05f69;
++  *((unsigned long *)&__m256i_op1[1]) = 0x18b988e64facb558;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe5fb66c81da8e5bb;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xe37affb42fc05f69;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x65fb66c81da8e5ba;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1010101010101012;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1010101010101012;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1010101010101093;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1111111111111113;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1010101110101011;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1111111211111112;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x5980000000000000;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffefe00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffefe00000000;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000002800000010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000002800000010;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff0127000c0010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff012700040010;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc0008000c0008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc0008000c0008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc0008000c0008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc0008000c0008000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x8001000180010000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x8001000180010000;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff800200000002;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff800200000002;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000020000000200;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000001e001e001e0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000001e001e001e0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c
+new file mode 100644
+index 000000000..22528a14f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-2.c
+@@ -0,0 +1,485 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010100000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010100000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010000000000000;
++  __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffc0003fffa0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x01fb010201f900ff;
++  __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000005554;
++  *((unsigned long *)&__m256i_op1[2]) = 0xaaaa0000aaacfffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000005554;
++  *((unsigned long *)&__m256i_op1[0]) = 0xaaaa0000aaacfffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000054;
++  *((unsigned long *)&__m256i_result[2]) = 0x00aa000000ac00fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000054;
++  *((unsigned long *)&__m256i_result[0]) = 0x00aa000000ac00fe;
++  __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x017f01fe01ff01fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x05960616020e0203;
++  *((unsigned long *)&__m256i_op0[1]) = 0x017f01fe01ff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x05960616020e0005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x017f01fe01ff01fe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x05960616020e0203;
++  *((unsigned long *)&__m256i_op1[1]) = 0x017f01fe01ff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x05960616020e0005;
++  *((unsigned long *)&__m256i_result[3]) = 0x00fe01fc01fe01fc;
++  *((unsigned long *)&__m256i_result[2]) = 0x012c002c001c0006;
++  *((unsigned long *)&__m256i_result[1]) = 0x00fe01fc01fe0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x012c002c001c000a;
++  __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xd207e90001fb16ef;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc8eab25698f97e90;
++  *((unsigned long *)&__m256i_op0[1]) = 0xd207e90001fb16ef;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc8eab25698f97e90;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0007000000fb00ef;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ea005600f90090;
++  *((unsigned long *)&__m256i_result[1]) = 0x0007000000fb00ef;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ea005600f90090;
++  __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffc03b1fc5e050;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6a9e3fa2603a2000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffc03b1fc5e050;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6a9e3fa2603a2000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffc03fffffffc0;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffc00000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffc03fffffffc0;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffc00000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x01fe007a01c40110;
++  *((unsigned long *)&__m256i_result[2]) = 0x019d00a2003a0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x01fe007a01c40110;
++  *((unsigned long *)&__m256i_result[0]) = 0x019d00a2003a0000;
++  __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fe36364661af18f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fe36364661af18f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00e30064001a008f;
++  *((unsigned long *)&__m256i_result[2]) = 0x00e3006300e30063;
++  *((unsigned long *)&__m256i_result[1]) = 0x00e30064001a008f;
++  *((unsigned long *)&__m256i_result[0]) = 0x00e3006300e30063;
++  __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000013;
++  __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000010000685e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000010000685e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000a400ff004f;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000a400ff004f;
++  __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0002ffff00020002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x04f504f104f504f5;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0002ffff00020002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x04f504f104f504f5;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000200ff00020002;
++  *((unsigned long *)&__m256i_result[2]) = 0x00f500f100f500f5;
++  *((unsigned long *)&__m256i_result[1]) = 0x000200ff00020002;
++  *((unsigned long *)&__m256i_result[0]) = 0x00f500f100f500f5;
++  __m256i_out = __lasx_xvaddwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000019410000e69a;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf259905a0c126604;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000883a00000f20;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6d3c2d3aa1c82947;
++  *((unsigned long *)&__m256i_op1[3]) = 0x372e9d75e8aab100;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc5c085372cfabfba;
++  *((unsigned long *)&__m256i_op1[1]) = 0x31730b5beb7c99f5;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0658f2dc0eb21e3c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000b6b60001979a;
++  *((unsigned long *)&__m256i_result[2]) = 0x00011591000125be;
++  *((unsigned long *)&__m256i_result[1]) = 0x000093950000a915;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001201600004783;
++  __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffff6ff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffff6ff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000f6ff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000f6ff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff000000ff000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff000000ff000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffee0000ff4c;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ff050000ff3c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fff90000ff78;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffa80000ff31;
++  __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffc7f7f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffc000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffc7f7f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffc000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8001b0b1b4b5dd9f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8001b0b1b4b5dd9f;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000b0b100015d1e;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001fffe0001bfff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000b0b100015d1e;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001fffe0001bfff;
++  __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000fe200000fe1f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fe200000fe1f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffc0ffc1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x003f00000000003f;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffc0ffc1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x003f00000000003f;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001fffe0001ffc0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0001003e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001fffe0001ffc0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0001003e;
++  __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0020010101610000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0061200000610000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0020010101610000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0061200000610000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000101000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00011fff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000101000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00011fff0000ffff;
++  __m256i_out = __lasx_xvaddwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000013ffffffec;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000013ffffebd8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000013ffffffec;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000013ffffebd8;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffec;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffebd8;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffec;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffebd8;
++  __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000c0007;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000c0007;
++  *((unsigned long *)&__m256i_op1[3]) = 0x3abb3abbbabababa;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0080000000800080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x3abb3abbbabababa;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0080000000800080;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000babababa;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000008c0087;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000babababa;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000008c0087;
++  __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvaddwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000010000000a;
++  __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8060000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8060000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x805f0000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x805f0000ffffffff;
++  __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe010000fd02;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03fc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe010000fd02;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03fc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfe01fe010000fd02;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000003fc03fc0;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfe01fe010000fd02;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000003fc03fc0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007f807f80;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007f807f80;
++  __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c;
++  __m256i_out = __lasx_xvaddwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c
+new file mode 100644
+index 000000000..38a0a53d7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwev-3.c
+@@ -0,0 +1,515 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[3]) = 0x0036003200360032;
++  *((unsigned long *)&__m256i_result[2]) = 0x0036003200360032;
++  *((unsigned long *)&__m256i_result[1]) = 0x0036003200360032;
++  *((unsigned long *)&__m256i_result[0]) = 0x0036003200360032;
++  __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000170017;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000170017;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000170017;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000170017;
++  __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000100fe000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000100fe00010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x000100fe000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000100fe00010001;
++  __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000800000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000bf6e0000c916;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000030000fff3;
++  __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000b004a00440040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8020004a0011002a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000b004a00440040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8020004a0011002a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000004a00000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000004a0000002a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000004a00000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000004a0000002a;
++  __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00001fff00001fff;
++  __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000080800000808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000080800000808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001a001a001a009a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x001a001a002a009a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x001a001a001a009a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x001a001a002a009a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001a000000da;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001a000000da;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001a000000da;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001a000000da;
++  __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000007ffffffce;
++  __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000001fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000001ce;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000001fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000001ce;
++  __m256i_out = __lasx_xvaddwev_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000;
++  __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff;
++  __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000;
++  __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff000000010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8011ffae800c000c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00baff050083ff3c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x80b900b980380038;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0017ffa8008eff31;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff800c000c;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000084ff3c;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff80380038;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000008fff31;
++  __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000001001f001e;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000001001f001e;
++  __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff;
++  __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100f000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100f000ff;
++  __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffc0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff0ffc0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffc0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff0ffc0;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ff78ffc0;
++  __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000016e00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000016e00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000016e00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000016e00;
++  __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffff1cff1c;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffff1cff1c;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffff1cff1c;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffff1cff1c;
++  __m256i_out = __lasx_xvaddwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffff1f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffeff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff1f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffeff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0010ffc80010ff52;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff1ffca0011ffcb;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0010ffc80010ff52;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff1ffca0011ffcb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff1ffca0011feca;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff1ffca0011feca;
++  __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000002;
++  __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000017fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000017fff;
++  __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040;
++  __m256i_out = __lasx_xvaddwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c
+new file mode 100644
+index 000000000..a4dc565e9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-1.c
+@@ -0,0 +1,530 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x9240000000008025;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffff24affff8025;
++  *((unsigned long *)&__m256i_op0[1]) = 0xb2c0000000008006;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffb341ffff8006;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9240000000008025;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffff24affff8025;
++  *((unsigned long *)&__m256i_op1[1]) = 0xb2c0000000008006;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffb341ffff8006;
++  *((unsigned long *)&__m256i_result[3]) = 0xff2400000000ff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffeffe4fffeff00;
++  *((unsigned long *)&__m256i_result[1]) = 0xff6400000000ff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffeff66fffeff00;
++  __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff04ff00ff00ff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff04ff00ff00ff00;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe;
++  __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffe0000fffe0002;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffe0000fffe0002;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000fffeffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000fffeffff;
++  __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffc0003fffc0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffc0003fffc0;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff;
++  __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ffeffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7ffeffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000fc300000fc40;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000fc300000fc40;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f007bfffffffb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f007bfffffffb;
++  __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000201220001011c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000201220001011c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffe0ffe000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fa0001fff808000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffe0ffe000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fa0001fff808000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f0000ffffff80;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f0000ffffff80;
++  __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0004000400040004;
++  __m256i_out = __lasx_xvaddwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007ff000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007ff000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007ff000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000001fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000001fe;
++  __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000d24;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000d24;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4ffc3f7800000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3fc03f6400000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4ffc3f7800000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x3fc03f6400000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000050fd00000101;
++  *((unsigned long *)&__m256i_result[2]) = 0x000040c100000101;
++  *((unsigned long *)&__m256i_result[1]) = 0x000050fd00000101;
++  *((unsigned long *)&__m256i_result[0]) = 0x000040c100000101;
++  __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000800080008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000800080008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000800080008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fff;
++  __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000006d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000400008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000006d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000400008;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000800080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc9d8080067f50020;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc70000020000c000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ffffffffffff7ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe06df0d7;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ffffffffffff7ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffbe8b470f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007ffffffff7ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x49d8080067f4f81f;
++  __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff605a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff605a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffebeb8;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffebeb8;
++  __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1111111111111111;
++  __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffeffffffdd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffdc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d;
++  __m256i_out = __lasx_xvaddwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c
+new file mode 100644
+index 000000000..a2fbe9ed0
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-2.c
+@@ -0,0 +1,560 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ffe00007f000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x017e00ff017e00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff017e01fe;
++  __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00b7003600120000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00b7006200fc0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000fe00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00b7004100190004;
++  __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007aff7c00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffd017d00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007aff7c00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffd017d00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000008e7c00;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000067751500;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000008e7c00;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000067751500;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000007a00f8;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff01640092;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000007a00f8;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff01640092;
++  __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffa0078fffa0074;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffa0078fffa0074;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff000000ff0000;
++  __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff;
++  __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff008000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff008000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff008000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff008000000000;
++  __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0020;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff8001ffff0001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0020;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff8001ffff0001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff008000ff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff008000ff0000;
++  __m256i_out = __lasx_xvaddwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000;
++  __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000804000004141;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00017fff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007fff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000004444;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007bbb0000f777;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000004444;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007bbb0000f777;
++  __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4010000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3e6ce7d9cb7afb62;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4010000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3e6ce7d9cb7afb62;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000401000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003e6c0000cb7a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000401000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003e6c0000cb7a;
++  __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3aadec4f6c7975b1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3abac5447fffca89;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3aadec4f6c7975b1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3abac5447fffca89;
++  *((unsigned long *)&__m256i_op1[3]) = 0x3aadec4f6c7975b1;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3abac5447fffca89;
++  *((unsigned long *)&__m256i_op1[1]) = 0x3aadec4f6c7975b1;
++  *((unsigned long *)&__m256i_op1[0]) = 0x3abac5447fffca89;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000755a0000d8f2;
++  *((unsigned long *)&__m256i_result[2]) = 0x000075740000fffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000755a0000d8f2;
++  *((unsigned long *)&__m256i_result[0]) = 0x000075740000fffe;
++  __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffee00ba;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffee00ba;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffee;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffee;
++  __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9ffffd8020010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffff9fffffff9;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9ffffd8020010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffff9fffffff9;
++  *((unsigned long *)&__m256i_result[3]) = 0x00009fff00002001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00009fff00002001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvaddwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000001a00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000001a00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000001a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000001a;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009;
++  __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000800000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000800000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000800000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000800000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000800000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000800000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100010000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100010000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100010000;
++  __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000030007;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000030007;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00153f1594ea02ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffff0100;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff15c1ea95ea02ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000153f15;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ff15c1ea;
++  __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00fe00fe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00fe00fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00fe00fe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00fe00fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100fe04ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100fe04ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff;
++  __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00f9f9f900000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00f9f9f900000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000f9f9f9f9;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000faf3f3f2;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000f9f9f9f9;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000faf3f3f2;
++  __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001fff000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000029170;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fff000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000029170;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xc3f0c3f0c3f0c3f0;
++  __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op1[3]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_op1[1]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xe6e8e6e8e6e8d719;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xe6e8e6e8e6e8d719;
++  __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000003fffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000003fffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c
+new file mode 100644
+index 000000000..8c98fc4be
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvaddwod-3.c
+@@ -0,0 +1,485 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000033e87ef1;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000033007e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000021;
++  __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0020002000400040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0020002000400040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0020002000400040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0020002000400040;
++  __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000f000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000f000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x007fc0083fc7c007;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x007fc0083fc7c007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffc0003fffc0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffc0003fffc0;
++  __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffdbbbcf;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffb8579f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffdbbbcf;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffb8579f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00bb;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0057;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff00bb;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0057;
++  __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff;
++  __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000005060503;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000073737;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000050007;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000039;
++  __m256i_out = __lasx_xvaddwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000007070707;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0102040000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000020100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0703020000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000707;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000010200000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000070300000000;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007fffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007fffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000100640000ff92;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000100640000ff92;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007c0100007c01;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007c0100007c00;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007c0100007c01;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007c0100007c00;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000001ff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffe0000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000001ff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffe0000000000;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000048;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000048;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000800000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000800000010;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000fffe00009fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000fffe00002001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fffe00009fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000fffe00002001;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000027;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000027;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010080;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_w_hu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f00;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ffe7fffeffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffd84900000849;
++  *((unsigned long *)&__m256i_op0[0]) = 0x07fffc670800f086;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x311d9b643ec1fe01;
++  *((unsigned long *)&__m256i_op1[0]) = 0x344ade20fe00fd01;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007f00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x311d73ad3ec2064a;
++  __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff80cbfffffdf8;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000081500000104;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffa4fffffffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000700000002;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff80cbfffffdf8;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffa4fffffffd;
++  __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000008050501;
++  __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01fe007a01c40110;
++  *((unsigned long *)&__m256i_op0[2]) = 0x019d00a20039fff9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01fe007a01c40110;
++  *((unsigned long *)&__m256i_op0[0]) = 0x019d00a2003a0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x01fe007a01c40110;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x01fe007a01c40110;
++  __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x001ffffe00200000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x001ffffe00200000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0020001d001f;
++  __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000fef0ff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000fef0ff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000400080ffc080;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080ff0080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000400080ffc080;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080ff0080;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000400080ffc080;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000400080ffc080;
++  __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x007f010000000100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x007f010000000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvaddwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c
+new file mode 100644
+index 000000000..2a4f29b50
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-1.c
+@@ -0,0 +1,650 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_op1[2]) = 0xa020202020206431;
++  *((unsigned long *)&__m256i_op1[1]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_op1[0]) = 0xa020202020206431;
++  *((unsigned long *)&__m256i_result[3]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0xa020202020206431;
++  *((unsigned long *)&__m256i_result[1]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0xa020202020206431;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffee0000ff4c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ff050000ff3c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fff90000ff78;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffa80000ff31;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffee0000ff4c;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ff050000ff3c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fff90000ff78;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffa80000ff31;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff810011;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff810011;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ff810011;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ff810011;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00000200000008;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00000200000008;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7efefefe80ffffff;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00007fde00007fd4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00007fe000007fe0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00007fde00007fd4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007fe000007fe0;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff7eddffff7ed3;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff7edfffff7edf;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff7eddffff7ed3;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff7edfffff7edf;
++  __m256i_out = __lasx_xvsadd_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffc81aca;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003a0a9512;
++  *((unsigned long *)&__m256i_op0[1]) = 0x280ac9da313863f4;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe032c739adcc6bbd;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffe000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100020001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000fffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffdffffffc81aca;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff3a0b9512;
++  *((unsigned long *)&__m256i_result[1]) = 0x280bc9db313a63f5;
++  *((unsigned long *)&__m256i_result[0]) = 0xe032c738adcb6bbb;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1fe01e0000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1fe01e0000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1fe01e0100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x1fe01e0100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffa;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffa;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffa;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffa;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf800d0d8ffffeecf;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000383fffffdf0d;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf800d0d8ffffeecf;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000383fffffdf0d;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf000f000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf000f000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xe800c0d8fffeeece;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff383efffedf0c;
++  *((unsigned long *)&__m256i_result[1]) = 0xe800c0d8fffeeece;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff383efffedf0c;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffe0000fffe0002;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffe0000fffe0002;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[3]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[2]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[1]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[0]) = 0x7575757575757575;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfc003802fc000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fc00fc00;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfc003802fc000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fc00fc00;
++  *((unsigned long *)&__m256i_result[3]) = 0xfc003802fc000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fc00fc00;
++  *((unsigned long *)&__m256i_result[1]) = 0xfc003802fc000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fc00fc00;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000007b00f9007e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000007b00f9007e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000007b00f9007e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000007b00f9007e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000f601f200fc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000f601f200fc;
++  __m256i_out = __lasx_xvsadd_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000007f00000022;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000007f00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000007f00000022;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000007f00000000;
++  __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[0]) = 0x1c1b1a191c1b1a19;
++  __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0002ff80ffb70000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffb7ff80ffd0ffd8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00010000002fff9e;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffb5ff80ffd0ffd8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002ff80ffb70000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffb7ff80ffd0ffd8;
++  *((unsigned long *)&__m256i_result[1]) = 0x00010000002fff9e;
++  *((unsigned long *)&__m256i_result[0]) = 0xffb5ff80ffd0ffd8;
++  __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000;
++  __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvsadd_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x3fff3fff3fff4000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000403f3fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7ffe7ffe7ffe7ffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007ffe7ffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x7ffe7ffe7ffe8000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000807e7ffe;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op1[3]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op1[2]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op1[1]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_result[3]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_result[2]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_result[1]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_result[0]) = 0xc2c2c2c2c2c2c2c2;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000006040190d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000006040190d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000860601934;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000860601934;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000800200028;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvadd_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c
+new file mode 100644
+index 000000000..a3afc9811
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsadd-2.c
+@@ -0,0 +1,350 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000001700080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000001700080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001700080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001700080;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x43d03bfff827ea21;
++  *((unsigned long *)&__m256i_op0[2]) = 0x43dac1f2a3804ff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x43d03bfff827e9f9;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43e019c657c7d050;
++  *((unsigned long *)&__m256i_op1[3]) = 0x43d03bfff827ea21;
++  *((unsigned long *)&__m256i_op1[2]) = 0x43dac1f2a3804ff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x43d03bfff827e9f9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x43e019c657c7d050;
++  *((unsigned long *)&__m256i_result[3]) = 0x86ff76ffff4eff42;
++  *((unsigned long *)&__m256i_result[2]) = 0x86ffffffffff9eff;
++  *((unsigned long *)&__m256i_result[1]) = 0x86ff76ffff4effff;
++  *((unsigned long *)&__m256i_result[0]) = 0x86ff32ffaeffffa0;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff8910ffff7e01;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff3573ffff8960;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff8910ffff1ca9;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffff5e5ffff8130;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff8910ffff7e01;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff3573ffff8960;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff8910ffff1ca9;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffff5e5ffff8130;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f90;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000f90;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f90;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000f90;
++  __m256i_out = __lasx_xvsadd_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffe200000020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fffe00008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffe200000020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fffe00008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[3]) = 0x7575ffff75757595;
++  *((unsigned long *)&__m256i_result[2]) = 0x7575ffff7575f575;
++  *((unsigned long *)&__m256i_result[1]) = 0x7575ffff75757595;
++  *((unsigned long *)&__m256i_result[0]) = 0x7575ffff7575f575;
++  __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000f0f0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000f0f0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000f0f0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000f0f0;
++  __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000001400;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000003c01ff9;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000003c01ff9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000001400;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000003c01ff9;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000003c01ff9;
++  __m256i_out = __lasx_xvsadd_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c545c5c5c5;
++  __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007;
++  __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000080;
++  __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsadd_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvand-xvandi-xvan.patch b/LoongArch-Add-tests-for-ASX-vector-xvand-xvandi-xvan.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3b8855b896519c258d162297a728eb2a85317627
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvand-xvandi-xvan.patch
@@ -0,0 +1,1854 @@
+From ceef99197d4db1d34e5c8aeae2b5492d831685d0 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:42:34 +0800
+Subject: [PATCH 105/124] LoongArch: Add tests for ASX vector
+ xvand/xvandi/xvandn/xvor/xvori/ xvnor/xvnori/xvxor/xvxori instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvand.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvandi.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvandn.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvnor.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvnori.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvor.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvori.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvorn.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvxor.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvxori.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvand.c        | 155 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvandi.c       | 196 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvandn.c       | 125 +++++++++
+ .../loongarch/vector/lasx/lasx-xvnor.c        | 170 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvnori.c       | 152 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvor.c         | 215 +++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvori.c        | 141 ++++++++++
+ .../loongarch/vector/lasx/lasx-xvorn.c        | 245 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvxor.c        | 185 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvxori.c       | 163 ++++++++++++
+ 10 files changed, 1747 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c
+new file mode 100644
+index 000000000..e485786dd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvand.c
+@@ -0,0 +1,155 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfefee00000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfefee00000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xfefee00000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfefee00000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000004843ffdff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000004843ffdff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000001c;
++  __m256i_out = __lasx_xvand_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c
+new file mode 100644
+index 000000000..26cddc53a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandi.c
+@@ -0,0 +1,196 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xe2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000505;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001175f10e4330e8;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff8f0842ff29211e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffff8d9ffa7103d;
++  *((unsigned long *)&__m256i_result[3]) = 0x001151510a431048;
++  *((unsigned long *)&__m256i_result[2]) = 0x5b0b08425b09011a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x5b5b58595b031019;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x5b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_result[3]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_result[2]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_result[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_result[0]) = 0x0400040004000400;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x2d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffff900000003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffff900000003;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x3f3f3f3900000003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x3f3f3f3900000003;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_result[2]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_result[1]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_result[0]) = 0xbabababababababa;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xba);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[2]) = 0x4000404040004040;
++  *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[0]) = 0x4000404040004040;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x40);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff3c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff31;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x5e5e5e5e5e5e5e1c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x5e5e5e5e5e5e5e10;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x5e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x86);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f70000000000000;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x7f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xa3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0x98);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xd9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvandi_b (__m256i_op0, 0xcc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c
+new file mode 100644
+index 000000000..bc3590c21
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvandn.c
+@@ -0,0 +1,125 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1828f0e09bad7249;
++  *((unsigned long *)&__m256i_op0[2]) = 0x07ffc1b723953cec;
++  *((unsigned long *)&__m256i_op0[1]) = 0x61f2e9b333aab104;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6bf742aa0d7856a0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000019410000e69a;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf259905a09c23be0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000883a00000f20;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6d3c2d3a89167aeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000090100008492;
++  *((unsigned long *)&__m256i_result[2]) = 0xf000104808420300;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000800000e20;
++  *((unsigned long *)&__m256i_result[0]) = 0x04082d108006284b;
++  __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffdfffdfffdfffd;
++  __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1020102010201020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1020102010201020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1020102010201020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1020102010201020;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_result[1]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xefdfefdfefdfefdf;
++  __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d;
++  __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvandn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c
+new file mode 100644
+index 000000000..3a491ecab
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnor.c
+@@ -0,0 +1,170 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x40d74f979f99419f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xbf28b0686066be60;
++  __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000f6ff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000f6ff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffff6ff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffff6ff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000900ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000900ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8888888808888888;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0888888888888888;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8888888808888888;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0888888888888888;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x77777777f7777777;
++  *((unsigned long *)&__m256i_result[2]) = 0xf777777777777777;
++  *((unsigned long *)&__m256i_result[1]) = 0x77777777f7777777;
++  *((unsigned long *)&__m256i_result[0]) = 0xf777777777777777;
++  __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x40ff40ff40ff40ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x407b40ff40ff40f1;
++  *((unsigned long *)&__m256i_op0[1]) = 0x40ff40ff40ff40ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x407b40ff40ff40f1;
++  *((unsigned long *)&__m256i_op1[3]) = 0x40ff40ff40ff40ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x407b40ff40ff40f1;
++  *((unsigned long *)&__m256i_op1[1]) = 0x40ff40ff40ff40ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x407b40ff40ff40f1;
++  *((unsigned long *)&__m256i_result[3]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256i_result[2]) = 0xbf84bf00bf00bf0e;
++  *((unsigned long *)&__m256i_result[1]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256i_result[0]) = 0xbf84bf00bf00bf0e;
++  __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000033;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000033;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000420080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000420080000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffbdff7fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xa000a0009f80ffcc;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffbdff7fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xa000a0009f80ffcc;
++  __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op0[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op0[1]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op0[0]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[3]) = 0x6f6f6f6f6f6f6f6f;
++  *((unsigned long *)&__m256i_result[2]) = 0x6f6f6f6f6f6f6f6f;
++  *((unsigned long *)&__m256i_result[1]) = 0x6f6f6f6f6f6f6f6f;
++  *((unsigned long *)&__m256i_result[0]) = 0x6f6f6f6f6f6f6f6f;
++  __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff000300030000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffc000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff000300030000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffc000;
++  __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x800fffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x800fffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x800fffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x800fffffffffffff;
++  __m256i_out = __lasx_xvnor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c
+new file mode 100644
+index 000000000..995a34c18
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvnori.c
+@@ -0,0 +1,152 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c545c5c5c5;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007773;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003373;
++  *((unsigned long *)&__m256i_result[3]) = 0xbbbbbbbbbbbbbbbb;
++  *((unsigned long *)&__m256i_result[2]) = 0xbbbbbbbbbbbb8888;
++  *((unsigned long *)&__m256i_result[1]) = 0xbbbbbbbbbbbbbbbb;
++  *((unsigned long *)&__m256i_result[0]) = 0xbbbbbbbbbbbb8888;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x44);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xdededededededede;
++  *((unsigned long *)&__m256i_result[2]) = 0xdededededededede;
++  *((unsigned long *)&__m256i_result[1]) = 0xdededededededede;
++  *((unsigned long *)&__m256i_result[0]) = 0xdededededededede;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x21);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x33);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[1]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x6f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xf7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x5858585858585858;
++  *((unsigned long *)&__m256i_result[2]) = 0x5858585858585858;
++  *((unsigned long *)&__m256i_result[1]) = 0x5858585858585858;
++  *((unsigned long *)&__m256i_result[0]) = 0x5858585858585858;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xa7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_result[2]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_result[1]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_result[0]) = 0x3d3d3d3d3d3d3d3d;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xc2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x9d9d9d9d9d9d9d8d;
++  *((unsigned long *)&__m256i_result[2]) = 0x9d9d9d9d9d9d9d9d;
++  *((unsigned long *)&__m256i_result[1]) = 0x9d9d9d9d9d9d9d8d;
++  *((unsigned long *)&__m256i_result[0]) = 0x9d9d9d9d9d9d9d9d;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x62);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2a2a2a2a2a2a2a2a;
++  *((unsigned long *)&__m256i_result[2]) = 0x2a2a2a2a2a2a2a2a;
++  *((unsigned long *)&__m256i_result[1]) = 0x2a2a2a2a2a2a2a2a;
++  *((unsigned long *)&__m256i_result[0]) = 0x2a2a2a2a2a2a2a2a;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0xd5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000081220000812c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000812000008120;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000081220000812c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000812000008120;
++  *((unsigned long *)&__m256i_result[3]) = 0xe9e968c9e9e968c1;
++  *((unsigned long *)&__m256i_result[2]) = 0xe9e968c9e9e968c9;
++  *((unsigned long *)&__m256i_result[1]) = 0xe9e968c9e9e968c1;
++  *((unsigned long *)&__m256i_result[0]) = 0xe9e968c9e9e968c9;
++  __m256i_out = __lasx_xvnori_b (__m256i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c
+new file mode 100644
+index 000000000..27eef710d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvor.c
+@@ -0,0 +1,215 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff01fd7fff7fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fff01fd7fff7fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fff01fd7fff7fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fff7fff;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000005e02;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000005e02;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000005e02;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000005e02;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000089;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fe37fff001fffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fe37fff001fffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fffffff;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x003f60041f636003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x003f60041f636003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x003f60041f636003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x003f60041f636003;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x003f60041f636003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x003f60041f636003;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff1fff1fff1fff1;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x800080ff800080ff;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff80007fff0000;
++  __m256i_out = __lasx_xvor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c
+new file mode 100644
+index 000000000..ee91af95f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvori.c
+@@ -0,0 +1,141 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvori_b (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x6c6c6c6c6c6c6c6c;
++  *((unsigned long *)&__m256i_result[2]) = 0x6c6c6c6c6c6c6c6c;
++  *((unsigned long *)&__m256i_result[1]) = 0x6c6c6c6c6c6c6c6c;
++  *((unsigned long *)&__m256i_result[0]) = 0x6c6c6c6c6c6c6c6c;
++  __m256i_out = __lasx_xvori_b (__m256i_op0, 0x6c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffff00fffffff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x9f9f9f9f9f9f9f9f;
++  *((unsigned long *)&__m256i_result[2]) = 0x9f9f9f9fffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x9f9f9f9f9f9f9f9f;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff9fffffffff;
++  __m256i_out = __lasx_xvori_b (__m256i_op0, 0x9f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvori_b (__m256i_op0, 0x6a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffbdff3cffbdff44;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffbdff3cffbdff44;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffff7effffff46;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff7effffff46;
++  __m256i_out = __lasx_xvori_b (__m256i_op0, 0x42);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_result[1]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfbfbfbf;
++  __m256i_out = __lasx_xvori_b (__m256i_op0, 0xbf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x2c2c2c2c2c2c2c2c;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x2c2c2c2c2c2c2c2c;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvori_b (__m256i_op0, 0x2c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x5252525252525252;
++  *((unsigned long *)&__m256i_result[2]) = 0x5252525252525252;
++  *((unsigned long *)&__m256i_result[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m256i_result[0]) = 0x5252525252525252;
++  __m256i_out = __lasx_xvori_b (__m256i_op0, 0x52);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fe363637fe36363;
++  __m256i_out = __lasx_xvori_b (__m256i_op0, 0x63);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfefefefe3f800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfefefefe3f800000;
++  *((unsigned long *)&__m256i_result[3]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256i_result[2]) = 0xfefefefeffe0e0e0;
++  *((unsigned long *)&__m256i_result[1]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256i_result[0]) = 0xfefefefeffe0e0e0;
++  __m256i_out = __lasx_xvori_b (__m256i_op0, 0xe0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x6b6b6b6b6b6b6b6b;
++  *((unsigned long *)&__m256i_result[2]) = 0x6b6b6b6b6b6b6b6b;
++  *((unsigned long *)&__m256i_result[1]) = 0x6b6b6b6b6b6b6b6b;
++  *((unsigned long *)&__m256i_result[0]) = 0x6b6b6b6b6b6b6b6b;
++  __m256i_out = __lasx_xvori_b (__m256i_op0, 0x6b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c
+new file mode 100644
+index 000000000..fa6cdff31
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvorn.c
+@@ -0,0 +1,245 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbf28b0686066be60;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x40d74f979f99419f;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01480000052801a2;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffdcff64;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0002555500000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0002555500000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffdaaaaffffffff;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000022;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000022;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000236200005111;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000175e0000490d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000236200005111;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000175e0000490d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00220021004a007e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00220021004a007e;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffdfffffffdffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffddffdeffb5ff8d;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffdfffffffdffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffddffdeffb5ff8d;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00feffff00fe81;
++  *((unsigned long *)&__m256i_result[2]) = 0xfe01fe51ff00ff40;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00feffff00fe81;
++  *((unsigned long *)&__m256i_result[0]) = 0xfe01fe51ff00ff40;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0df9f8e;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0df9f8e;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffe0df9f8f;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffe0df9f8f;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff7fffffff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff7fffffff7fff;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdf80df80df80dfff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffdf80dfff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x498100814843ffe1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4981008168410001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x498100814843ffe1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4981008168410001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x40f69fe73c26f4ee;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x40f69fe73c26f4ee;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff896099cbdbfff1;
++  *((unsigned long *)&__m256i_result[2]) = 0xc987ffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xff896099cbdbfff1;
++  *((unsigned long *)&__m256i_result[0]) = 0xc987ffffffffffff;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffeffff97a1;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffdf5b000041b0;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffeffff97a1;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffdf5b000041b0;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000010000685e;
++  *((unsigned long *)&__m256i_result[2]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000010000685e;
++  *((unsigned long *)&__m256i_result[0]) = 0x000020a4ffffbe4f;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0040000000000003;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0040000000000003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[3]) = 0xffbffffffffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m256i_result[1]) = 0xffbffffffffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffa;
++  __m256i_out = __lasx_xvorn_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c
+new file mode 100644
+index 000000000..18b36c873
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxor.c
+@@ -0,0 +1,185 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7be2468acf15f39c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7be2468acf15f39c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7be2468acf15f39c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7be2468acf15f39c;
++  *((unsigned long *)&__m256i_result[2]) = 0x7be2468acf15f39c;
++  *((unsigned long *)&__m256i_result[1]) = 0x7be2468acf15f39c;
++  *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000;
++  __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00c100c100c100c1;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00c100c100c100c1;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0100000001000100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0100000001000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0100000001000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0100000001000100;
++  __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000f91;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000f91;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f90;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000f90;
++  __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6040190d20227a78;
++  *((unsigned long *)&__m256i_op0[1]) = 0x132feeabd2d33b38;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x9fe7fffffffff32e;
++  *((unsigned long *)&__m256i_result[2]) = 0x6040190ddfdd8587;
++  *((unsigned long *)&__m256i_result[1]) = 0xecd011542d2cc4c7;
++  *((unsigned long *)&__m256i_result[0]) = 0x6040190dffffffff;
++  __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000101000001010;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000101000001010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010;
++  __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvxor_v (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c
+new file mode 100644
+index 000000000..8fd6298f7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvxori.c
+@@ -0,0 +1,163 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000005e02;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000005e02;
++  *((unsigned long *)&__m256i_result[3]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_result[2]) = 0xc2c2c2c2c2c29cc0;
++  *((unsigned long *)&__m256i_result[1]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_result[0]) = 0xc2c2c2c2c2c29cc0;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xc2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1616161616161616;
++  *((unsigned long *)&__m256i_op0[2]) = 0x161616167fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ffe16167f161616;
++  *((unsigned long *)&__m256i_op0[0]) = 0x161616167fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xc7c7c7c7c7c7c7c7;
++  *((unsigned long *)&__m256i_result[2]) = 0xc7c7c7c7ae2e2e2e;
++  *((unsigned long *)&__m256i_result[1]) = 0xae2fc7c7aec7c7c7;
++  *((unsigned long *)&__m256i_result[0]) = 0xc7c7c7c7ae2e2e2e;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xd1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x5353535353535353;
++  *((unsigned long *)&__m256i_result[2]) = 0x5353535353535353;
++  *((unsigned long *)&__m256i_result[1]) = 0x5353535353535353;
++  *((unsigned long *)&__m256i_result[0]) = 0x5353535353535353;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x53);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x6d6d6d6d6d6d6d6d;
++  *((unsigned long *)&__m256i_result[2]) = 0x6d6d6d6d6d6d6d6d;
++  *((unsigned long *)&__m256i_result[1]) = 0x6d6d6d6d6d6d6d6d;
++  *((unsigned long *)&__m256i_result[0]) = 0x6d6d6d6d6d6d6d6d;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x6d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7171717171717171;
++  *((unsigned long *)&__m256i_result[2]) = 0x8e8e8e8e8e8e8e8e;
++  *((unsigned long *)&__m256i_result[1]) = 0x7171717171717171;
++  *((unsigned long *)&__m256i_result[0]) = 0x8e8e8e8e8e8e8e8e;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x71);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[2]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[1]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[0]) = 0x7575757575757575;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x75);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xa4a4a4a4a4a4a4a4;
++  *((unsigned long *)&__m256i_result[2]) = 0xa4a4a4a4a4a4a4a4;
++  *((unsigned long *)&__m256i_result[1]) = 0xa4a4a4a4a4a4a4a4;
++  *((unsigned long *)&__m256i_result[0]) = 0xa4a4a4a4a4a4a4a4;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xa4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xa1a1a1a1a1a1a1a1;
++  *((unsigned long *)&__m256i_result[2]) = 0xa1a1a1a15e5e5e5e;
++  *((unsigned long *)&__m256i_result[1]) = 0xa1a1a1a1a1a1a1a1;
++  *((unsigned long *)&__m256i_result[0]) = 0xa1a1a1a15e5e5e5e;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xa1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8d8d72728d8d7272;
++  *((unsigned long *)&__m256i_result[2]) = 0x8d8d72728d8d8d8d;
++  *((unsigned long *)&__m256i_result[1]) = 0x8d8d72728d8d7272;
++  *((unsigned long *)&__m256i_result[0]) = 0x8d8d72728d8d8d8d;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x8d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xb3b3b3b3b3b3b3b3;
++  *((unsigned long *)&__m256i_result[2]) = 0xb3b3b3b3b3b3b3b3;
++  *((unsigned long *)&__m256i_result[1]) = 0xb3b3b3b3b3b3b3b3;
++  *((unsigned long *)&__m256i_result[0]) = 0xb3b3b3b3b3b3b3b3;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x4c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f0000ff807f81;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f0000ff807f81;
++  *((unsigned long *)&__m256i_result[3]) = 0x5d5d5d5d5d22a2a2;
++  *((unsigned long *)&__m256i_result[2]) = 0xa2dda2a25d22dd23;
++  *((unsigned long *)&__m256i_result[1]) = 0x5d5d5d5d5d22a2a2;
++  *((unsigned long *)&__m256i_result[0]) = 0xa2dda2a25d22dd23;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xa2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xd3d3d3d3d3d3d3d3;
++  *((unsigned long *)&__m256i_result[2]) = 0xd3d3d3d3d3d3d3d3;
++  *((unsigned long *)&__m256i_result[1]) = 0xd3d3d3d3d3d3d3d3;
++  *((unsigned long *)&__m256i_result[0]) = 0xd3d3d3d3d3d3d3d3;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0xd3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfa15fa15fa15fa14;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfa15fa15fa15fa14;
++  *((unsigned long *)&__m256i_result[3]) = 0x8282828282828282;
++  *((unsigned long *)&__m256i_result[2]) = 0x8768876887688769;
++  *((unsigned long *)&__m256i_result[1]) = 0x8282828282828282;
++  *((unsigned long *)&__m256i_result[0]) = 0x8768876887688769;
++  __m256i_out = __lasx_xvxori_b (__m256i_op0, 0x7d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvbitclr-xvbitclr.patch b/LoongArch-Add-tests-for-ASX-vector-xvbitclr-xvbitclr.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8854a1369811b4daaf9190033036e786b0dcab84
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvbitclr-xvbitclr.patch
@@ -0,0 +1,5057 @@
+From a6390d1a6619b6bee4fc87b15ffd25936704eb21 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:57:18 +0800
+Subject: [PATCH 108/124] LoongArch: Add tests for ASX vector
+ xvbitclr/xvbitclri/xvbitrev/xvbitrevi/
+ xvbitsel/xvbitseli/xvbitset/xvbitseti/xvclo/xvclz/xvpcnt instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitset.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvclo.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvclz.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvbitclr.c     | 635 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvbitclri.c    | 515 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvbitrev.c     | 650 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvbitrevi.c    | 317 +++++++++
+ .../loongarch/vector/lasx/lasx-xvbitsel.c     | 134 ++++
+ .../loongarch/vector/lasx/lasx-xvbitseli.c    | 185 +++++
+ .../loongarch/vector/lasx/lasx-xvbitset.c     | 620 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvbitseti.c    | 405 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvclo.c        | 449 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvclz.c        | 504 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvpcnt.c       | 526 ++++++++++++++
+ 11 files changed, 4940 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c
+new file mode 100644
+index 000000000..def7b588e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclr.c
+@@ -0,0 +1,635 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000040000fff8;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffff1f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffeff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff1f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffeff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000105fffffefb;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffff02000000fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000105fffffefb;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff02000000fe;
++  *((unsigned long *)&__m256i_result[3]) = 0xf7ffffffffffff1f;
++  *((unsigned long *)&__m256i_result[2]) = 0xbffffffffffffeff;
++  *((unsigned long *)&__m256i_result[1]) = 0xf7ffffffffffff1f;
++  *((unsigned long *)&__m256i_result[0]) = 0xbffffffffffffeff;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fff7fff7fffdefd;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff;
++  __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1fe01e0000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1fe01e0000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000f0000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000f0000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0002555500000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0002555500000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0002555400000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0002555400000000;
++  __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a542a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a542a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000002a542a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000002a542a;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002;
++  __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ffff00ff00;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000fff00004542;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ffff00ff00;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000fff00004542;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ffff00ff00;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000fff00004542;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ffff00ff00;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000fff00004542;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000030007;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000030007;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00fe00feff02fe;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00fe00feff027f;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00fe00feff02fe;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff027f;
++  __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000023a20000a121;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000179e0000951d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000023a20000a121;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000179e0000951d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010000000100;
++  __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000236200005111;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000175e0000490d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000236200005111;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000175e0000490d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffeeffaf;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffeeffaf;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000226200005111;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000165e0000480d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000226200005111;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000165e0000480d;
++  __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fef;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fef;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fef;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fef;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fee;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffff00;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffff00;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000fefe7f00;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000fefe7f00;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff00000000;
++  __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f90;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000f90;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffefffe00000000;
++  __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007fe70000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fe70000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff8000ffa3;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007fe70000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff8000ffa3;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007fe70000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007f7f80007fa3;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007f670000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007f7f80007fa3;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007f670000;
++  __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffeffff10000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffeffff10000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7ffffffffffffffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7ffffffffffffffe;
++  __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x3e8000003e800000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3e8000003e800000;
++  *((unsigned long *)&__m256i_result[1]) = 0x3e8000003e800000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3e8000003e800000;
++  __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00001ff8d8d8c000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00001ff8d8d90000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00001ff8d8d8c000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00001ff8d8d90000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00001ef8d8d8c000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00001ef8d8d80000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00001ef8d8d8c000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00001ef8d8d80000;
++  __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000fffe0000000c;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fffe0000000c;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000;
++  __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003;
++  __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffe00000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffe00000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfefee00000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfefee00000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000fff000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000fff000000000;
++  __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ffff88ff88;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ffff88ff88;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ffff88ff88;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ffff88ff88;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00ff007f007f00;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00ff007f007f00;
++  __m256i_out = __lasx_xvbitclr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffffff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffeffffff00;
++  __m256i_out = __lasx_xvbitclr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c
+new file mode 100644
+index 000000000..713eb19d5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitclri.c
+@@ -0,0 +1,515 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe06df8d7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffbe8b470f;
++  *((unsigned long *)&__m256i_result[3]) = 0x7ffffffffffff7ff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffe06df0d7;
++  *((unsigned long *)&__m256i_result[1]) = 0x7ffffffffffff7ff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffbe8b470f;
++  __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0010ffc80010ff52;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff1ffca0011ffcb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0010ffc80010ff52;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff1ffca0011ffcb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010bfc80010bf52;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff1bfca0011bfcb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010bfc80010bf52;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff1bfca0011bfcb;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000005536aaaaac;
++  *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000005536aaaaac;
++  *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000005136aaaaa8;
++  *((unsigned long *)&__m256i_result[2]) = 0x55515551aaaaaaa8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000005136aaaaa8;
++  *((unsigned long *)&__m256i_result[0]) = 0x55515551aaaaaaa8;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fdf000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fdf000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fdf7fff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fdf7fff00000000;
++  __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x35);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000fd0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000fd0000;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007ffe7ffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ffe7ffe7ffe8000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000807e7ffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f7e7f7e7f7e7f7e;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007f7e7f7e;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f7e7f7e7f7e0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000007e7f7e;
++  __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x24);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdf01010153a10101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5b7f01ff5b7f10ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xdf01010153a10101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5b7f01ff5b7f10ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xcf01010143a10101;
++  *((unsigned long *)&__m256i_result[2]) = 0x4b6f01ef4b6f00ef;
++  *((unsigned long *)&__m256i_result[1]) = 0xcf01010143a10101;
++  *((unsigned long *)&__m256i_result[0]) = 0x4b6f01ef4b6f00ef;
++  __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xdfffffffdfffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xdfffffffdfffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff02ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0100;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ff7fff7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ff7f027f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ff7f0100;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00fe00fe7f027f;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31;
++  *((unsigned long *)&__m256i_result[3]) = 0x8011ffae800c000c;
++  *((unsigned long *)&__m256i_result[2]) = 0x00baff050083ff3c;
++  *((unsigned long *)&__m256i_result[1]) = 0x80b900b980380038;
++  *((unsigned long *)&__m256i_result[0]) = 0x0017ffa8008eff31;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x3b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000027262524;
++  *((unsigned long *)&__m256i_op0[2]) = 0x232221201f1e1d1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000027262524;
++  *((unsigned long *)&__m256i_op0[0]) = 0x232221201f1e1d1c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000027262524;
++  *((unsigned long *)&__m256i_result[2]) = 0x23222120171e151c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000027262524;
++  *((unsigned long *)&__m256i_result[0]) = 0x23222120171e151c;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000fefe0000fefe;
++  *((unsigned long *)&__m256i_result[2]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fefe0000fefe;
++  *((unsigned long *)&__m256i_result[0]) = 0x00fe00fe00fe00fe;
++  __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009;
++  __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x26);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_result[3]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_result[1]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fe1ffe0ffe1ffe0;
++  __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_d (__m256i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffb;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffb;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000800200028;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffee00ba;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffee00ba;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xefefefefefee00aa;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xefefefefefee00aa;
++  __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000f788f788;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000f788f788;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitclri_w (__m256i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffefffefffefffe;
++  __m256i_out = __lasx_xvbitclri_h (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c
+new file mode 100644
+index 000000000..2b0e7f8d1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrev.c
+@@ -0,0 +1,650 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0501030102141923;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffd5020738b43ddb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x010200023b8e4174;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff4ff4e11410b40;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01fa022a01a401e5;
++  *((unsigned long *)&__m256i_op1[2]) = 0x030d03aa0079029b;
++  *((unsigned long *)&__m256i_op1[1]) = 0x024c01f901950261;
++  *((unsigned long *)&__m256i_op1[0]) = 0x008102c2008a029f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101070102041903;
++  *((unsigned long *)&__m256i_result[2]) = 0xdfd506073ab435db;
++  *((unsigned long *)&__m256i_result[1]) = 0x110202023bae4176;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff6ff4a15418b40;
++  __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0edf8d7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffbe8bc70f;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe0edf8d7;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffbe8bc70f;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffe06df8d7;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffbe8b470f;
++  __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffe0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001e18;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffe0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001e18;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffe0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001e18;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffe0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001e18;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000010000ffe1;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000101001e18;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000010000ffe1;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000101001e18;
++  __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe;
++  __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x1d1a1b181d1a1b18;
++  *((unsigned long *)&__m256i_result[2]) = 0x9c9b9a999c9b9a99;
++  *((unsigned long *)&__m256i_result[1]) = 0x1d1a1b181d1a1b18;
++  *((unsigned long *)&__m256i_result[0]) = 0x9c9b9a999c9b9a99;
++  __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000033e87ef1;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x80008000b3e8fef1;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x80008000802ea100;
++  __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1c80780000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1c80780000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0200000200000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2c27000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0200000200000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x2c27000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000400000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000400000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000800080008000;
++  __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff00ff00ffff00;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff000000ff00ff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffff00ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00000000ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000180000000;
++  __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff8fff8fff8fff8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff8fff8fff8fff8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x8001800180018001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x8001800180018001;
++  __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0080000200000003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010002;
++  __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x80000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x80000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f6f7f7f7f6;
++  *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f6f7f7f7f6;
++  *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f6f7f7f7f6;
++  *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f6f7f7f7f6;
++  __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7eeefefefefefefe;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x7eeefefefefefefe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000010000fffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000010000fffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000010000fffe;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000010000fffe;
++  __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004;
++  __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvbitrev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000008000b;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000008000b;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000008000a;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000000a;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000008000a;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000000a;
++  __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000100010001fffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000100010001fffe;
++  __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x40fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x40fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x40fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x40fe00fe00fe00fe;
++  __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffc0007ffe0002;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000400000018002;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffc0007ffe0002;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000400000018002;
++  __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfefefefe01010101;
++  *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfefefefe01010101;
++  __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000006d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000400008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000006d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000400008;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000010006d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000010006d;
++  *((unsigned long *)&__m256i_result[3]) = 0x010101010101016c;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101410128;
++  *((unsigned long *)&__m256i_result[1]) = 0x010101010101016c;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101410128;
++  __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x800000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x800000ff000000ff;
++  __m256i_out = __lasx_xvbitrev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffb6811fffff80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff97c120000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffb6811fffff80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff97c120000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001;
++  __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000027;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000027;
++  *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefe7f;
++  *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefe7f;
++  __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010081;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100018080;
++  __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010110;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010110;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvbitrev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvbitrev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c
+new file mode 100644
+index 000000000..2b8327d91
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitrevi.c
+@@ -0,0 +1,317 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff00ff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff00ff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x01010101fe01fe01;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x01010101fe01fe01;
++  __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2000200020002000;
++  *((unsigned long *)&__m256i_result[2]) = 0x2000200020002000;
++  *((unsigned long *)&__m256i_result[1]) = 0x2000200020002000;
++  *((unsigned long *)&__m256i_result[0]) = 0x2000200020002000;
++  __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7ff77fff7ff7;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7ff77fff7ff7;
++  __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000020001;
++  *((unsigned long *)&__m256i_result[3]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[1]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[0]) = 0x1010101010121011;
++  __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040;
++  __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020;
++  __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000020000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000020000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000020000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000020000000000;
++  __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x29);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040;
++  __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001c4e8ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001c4e8ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0080000000800000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0081c4e8ff7fffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0080000000800000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0081c4e8ff7fffff;
++  __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f017ffd;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f017ffd;
++  __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000;
++  __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x3e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000002080100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002080100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000008000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000a080100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000008000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000a080100;
++  __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0100010001000100;
++  __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_result[3]) = 0xfee1057c01e10581;
++  *((unsigned long *)&__m256i_result[2]) = 0x011ec1210161057b;
++  *((unsigned long *)&__m256i_result[1]) = 0xfee1057c01e10581;
++  *((unsigned long *)&__m256i_result[0]) = 0x011ec1210161057b;
++  __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_result[3]) = 0xfdfdfdfdfdfdfdfd;
++  *((unsigned long *)&__m256i_result[2]) = 0xe27fe2821d226278;
++  *((unsigned long *)&__m256i_result[1]) = 0xfdfdfdfdfdfdfdfd;
++  *((unsigned long *)&__m256i_result[0]) = 0xe27fe2821d226278;
++  __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002;
++  __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000800000008;
++  __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_result[3]) = 0x080808000828082f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080008280820;
++  *((unsigned long *)&__m256i_result[1]) = 0x080808000828082f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080008280820;
++  __m256i_out = __lasx_xvbitrevi_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000800000000000;
++  __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x2f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0200000002000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x02000000fdffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0200000002000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x02000000fdffffff;
++  __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffeffed;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffeffed;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffeffed;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffeffed;
++  __m256i_out = __lasx_xvbitrevi_d (__m256i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xc03b000200020002;
++  *((unsigned long *)&__m256i_result[2]) = 0xc03b000200020002;
++  *((unsigned long *)&__m256i_result[1]) = 0xc03b000200020002;
++  *((unsigned long *)&__m256i_result[0]) = 0xc03b000200020002;
++  __m256i_out = __lasx_xvbitrevi_h (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff81007fff0100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff81007fff0100;
++  __m256i_out = __lasx_xvbitrevi_w (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c
+new file mode 100644
+index 000000000..c9847a615
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitsel.c
+@@ -0,0 +1,134 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xe9e9e9e9e9e9e9e9;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xe9e9e9e9e9e9e9e9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x001f001f02c442af;
++  *((unsigned long *)&__m256i_op0[1]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x001f001f02c442af;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xfffffffffefefeff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffff295329;
++  *((unsigned long *)&__m256i_op2[1]) = 0xfffffffffefefeff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffff295329;
++  *((unsigned long *)&__m256i_result[3]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_result[1]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000c40086;
++  __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbe21000100000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000505300000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xbe21000100000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000505300000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xc1d75053f0000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xc1d75053f0000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00005053000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00005053000000ff;
++  __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000040000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000040000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00000e0000000e00;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00000e0000000e00;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000040000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000040000;
++  __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvbitsel_v (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c
+new file mode 100644
+index 000000000..1edb4fca2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseli.c
+@@ -0,0 +1,185 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xef);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xcd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffd10000006459;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000441000000004;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000040400000104;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdb801b6d0962003f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xdb8a3109fe0f0024;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000007fff01ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xdb8e209d0cce025a;
++  *((unsigned long *)&__m256i_result[3]) = 0x88888a6d0962002e;
++  *((unsigned long *)&__m256i_result[2]) = 0xdb8a3109fe0f0020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000007fff01fffb;
++  *((unsigned long *)&__m256i_result[0]) = 0xdb8e20990cce025a;
++  __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x88);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000002b902b3e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000002b902b3e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000002a102a3a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000002a102a3a;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xd9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000090909090;
++  *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000090909090;
++  *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090;
++  __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x95);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x5555555555555555;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5555555555555555;
++  *((unsigned long *)&__m256i_op0[1]) = 0x5555555555555555;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5555555555555555;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x4545454545454545;
++  *((unsigned long *)&__m256i_result[2]) = 0x4545454545454545;
++  *((unsigned long *)&__m256i_result[1]) = 0x4545454545454545;
++  *((unsigned long *)&__m256i_result[0]) = 0x4545454545454545;
++  __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x4d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x21bb481000ff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01bf481000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x21bb481000ff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01bf481000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xb1b3b1b1b1b7b1b1;
++  *((unsigned long *)&__m256i_result[2]) = 0xb1b7b1b1b1b1b1b1;
++  *((unsigned long *)&__m256i_result[1]) = 0xb1b3b1b1b1b7b1b1;
++  *((unsigned long *)&__m256i_result[0]) = 0xb1b7b1b1b1b1b1b1;
++  __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xb7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc03fc03fc03fc03f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc03fc03fc03fc03f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000002d;
++  *((unsigned long *)&__m256i_result[2]) = 0xc02dc02dc02dc02d;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000002d;
++  *((unsigned long *)&__m256i_result[0]) = 0xc02dc02dc02dc02d;
++  __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0xed);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x60600000ffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x6060000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x60600000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x6060000000000000;
++  __m256i_out = __lasx_xvbitseli_b (__m256i_op0, __m256i_op1, 0x60);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c
+new file mode 100644
+index 000000000..c195cd91c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitset.c
+@@ -0,0 +1,620 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff000000010000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000095120000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc9da000063f50000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc7387fff6bbfffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffdffffffc81aca;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff3a0b9512;
++  *((unsigned long *)&__m256i_op1[1]) = 0x280bc9db313a63f5;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe032c738adcb6bbb;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff800001010400;
++  *((unsigned long *)&__m256i_result[2]) = 0x000180009d120004;
++  *((unsigned long *)&__m256i_result[1]) = 0xc9da080067f50020;
++  *((unsigned long *)&__m256i_result[0]) = 0xc73c7fff6bbfffff;
++  __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffff8046867f79;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff328dfff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6651bfff80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00010001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00010001;
++  __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000040000fff8;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00001f41ffffbf00;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x010180068080fff9;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x3ff1808001020101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x3ff1808001020101;
++  __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0800000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010103;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000040000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x4000000010000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000040000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000040000010;
++  __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbea2e127c046721f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1729c073816edebe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xde91f010000006f9;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5ef1f90efefaf30d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000060000108;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000001060005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000007fef0001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xbfa3e127c147721f;
++  *((unsigned long *)&__m256i_result[2]) = 0x1729c173836edfbe;
++  *((unsigned long *)&__m256i_result[1]) = 0xdf91f111808007fb;
++  *((unsigned long *)&__m256i_result[0]) = 0x5ff1f90ffffbf30f;
++  __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_op0[1]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xe161616161614f61;
++  *((unsigned long *)&__m256i_result[2]) = 0xe161616161614f61;
++  *((unsigned long *)&__m256i_result[1]) = 0xe161616161614f61;
++  *((unsigned long *)&__m256i_result[0]) = 0xe161616161614f61;
++  __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01010101010000ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x8080808280808082;
++  *((unsigned long *)&__m256i_result[2]) = 0x8080808280808082;
++  *((unsigned long *)&__m256i_result[1]) = 0x8080808280808080;
++  *((unsigned long *)&__m256i_result[0]) = 0x8080808280808082;
++  __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000082f8989a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000d58f43c8;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010183f9999b;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x01010101d58f43c9;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x45baa7ef6a95a985;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x45baa7ef6a95a985;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ffe7ffd7ffe7fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ffe7ffd7ffe8001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0707feb70707b7d1;
++  *((unsigned long *)&__m256i_result[2]) = 0x65baa7efea95a985;
++  *((unsigned long *)&__m256i_result[1]) = 0x0707feb70707b7d1;
++  *((unsigned long *)&__m256i_result[0]) = 0x65baa7ef6a95a987;
++  __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x41cc5bb8a95fd1eb;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x41cc5bb8a95fd1eb;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7b7b7b7b80000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xcacacb1011040500;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7b7b7b7b80000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xcacacb1011040500;
++  *((unsigned long *)&__m256i_result[3]) = 0x49cc5bb8a95fd1eb;
++  *((unsigned long *)&__m256i_result[2]) = 0x7ff4080102102001;
++  *((unsigned long *)&__m256i_result[1]) = 0x49cc5bb8a95fd1eb;
++  *((unsigned long *)&__m256i_result[0]) = 0x7ff4080102102001;
++  __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010401;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010401;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010401;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010401;
++  __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdf00000052a00000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5b7f00ff5b7f00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xdf00000052a00000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5b7f00ff5b7f00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_result[3]) = 0xdf01010153a10101;
++  *((unsigned long *)&__m256i_result[2]) = 0x5b7f01ff5b7f10ff;
++  *((unsigned long *)&__m256i_result[1]) = 0xdf01010153a10101;
++  *((unsigned long *)&__m256i_result[0]) = 0x5b7f01ff5b7f10ff;
++  __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_op1[2]) = 0xdbcbdbcb0000dbcb;
++  *((unsigned long *)&__m256i_op1[1]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_op1[0]) = 0xdbcbdbcb0000dbcb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000080000001000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000080000001000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800;
++  __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000027262524;
++  *((unsigned long *)&__m256i_op0[2]) = 0x23222120171e151c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000027262524;
++  *((unsigned long *)&__m256i_op0[0]) = 0x23222120171e151c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x201fdfe0201fdfe0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x201fdfe0201fdfe0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010127272525;
++  *((unsigned long *)&__m256i_result[2]) = 0x23a2a121179e951d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010127272525;
++  *((unsigned long *)&__m256i_result[0]) = 0x23a2a121179e951d;
++  __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x800080008000ffee;
++  *((unsigned long *)&__m256i_result[2]) = 0x800080008000ffee;
++  *((unsigned long *)&__m256i_result[1]) = 0x800080008000ffee;
++  *((unsigned long *)&__m256i_result[0]) = 0x800080008000ffee;
++  __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000100010001ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000100010001ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000100010001ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000100010001ffff;
++  __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00010000fffe0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00010000fffe0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00010000fffe0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00010000fffe0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x01010101010101c9;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x01010101010101c9;
++  __m256i_out = __lasx_xvbitset_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000affff800b;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000affff800b;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000affff800b;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000affff800b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000800;
++  __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0200000202000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0200000202000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000400010004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000400010004;
++  __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000f0001000f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000f0001000d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000f0001000f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000f0001000d;
++  __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f010000000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f010000000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f010100000101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f010100000101;
++  __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvbitset_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x498100814843ffe1;
++  *((unsigned long *)&__m256i_result[2]) = 0x4981008168410001;
++  *((unsigned long *)&__m256i_result[1]) = 0x498100814843ffe1;
++  *((unsigned long *)&__m256i_result[0]) = 0x4981008168410001;
++  __m256i_out = __lasx_xvbitset_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000090b0906;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100002000;
++  __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x03af03af03af03af;
++  *((unsigned long *)&__m256i_op0[2]) = 0x03acfc5303260e80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x03af03af03af03af;
++  *((unsigned long *)&__m256i_op0[0]) = 0x03acfc5303260e80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000002780;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000002780;
++  *((unsigned long *)&__m256i_result[3]) = 0x03af03af03af03af;
++  *((unsigned long *)&__m256i_result[2]) = 0x03acfc5303260e81;
++  *((unsigned long *)&__m256i_result[1]) = 0x03af03af03af03af;
++  *((unsigned long *)&__m256i_result[0]) = 0x03acfc5303260e81;
++  __m256i_out = __lasx_xvbitset_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c
+new file mode 100644
+index 000000000..47f37e4b3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvbitseti.c
+@@ -0,0 +1,405 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffcf800fffcf800;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffcf800fffcf800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800;
++  __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00007f7f00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007f7f00007fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000040000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007f7f00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000040000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007f7f00007fff;
++  __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x2a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_result[2]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_result[1]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_result[0]) = 0x0202020202020202;
++  __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000800000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000800000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000800000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000800000000;
++  __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x23);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[1]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010;
++  __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000004000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000004000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000004000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000004000000;
++  __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000013;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001000000fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000013;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000001000000fb;
++  *((unsigned long *)&__m256i_result[3]) = 0x8080808180808093;
++  *((unsigned long *)&__m256i_result[2]) = 0x80808081808080fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x8080808180808093;
++  *((unsigned long *)&__m256i_result[0]) = 0x80808081808080fb;
++  __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000020;
++  __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808;
++  __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010000000100000;
++  __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010;
++  __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_result[3]) = 0x1000100054445443;
++  *((unsigned long *)&__m256i_result[2]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_result[1]) = 0x1000100054445443;
++  *((unsigned long *)&__m256i_result[0]) = 0x7bbbbbbbf7777778;
++  __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020;
++  __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200;
++  __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffa0078fffa0074;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffa0078fffa0074;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffa2078fffa2074;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffa2078fffa2074;
++  __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffeffebfb7afb62;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffeffebfb7afb62;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffeffebfb7afb62;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffeffebfb7afb62;
++  __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_result[3]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_result[2]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_result[1]) = 0xe7e7e7e7e7e7e7e7;
++  *((unsigned long *)&__m256i_result[0]) = 0xe7e7e7e7e7e7e7e7;
++  __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004411;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004411;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020202020206431;
++  __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0003030300000300;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0003030300000300;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0003030300000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0003030300000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0043030300400300;
++  *((unsigned long *)&__m256i_result[2]) = 0x0043030300400300;
++  *((unsigned long *)&__m256i_result[1]) = 0x0043030300400100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0043030300400100;
++  __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x223d76f09f3881ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3870ca8d013e76a0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x223d76f09f37e357;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43ec0a1b2aba7ed0;
++  *((unsigned long *)&__m256i_result[3]) = 0x223d76f09f3881ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x3870ca9d013e76b0;
++  *((unsigned long *)&__m256i_result[1]) = 0x223d76f09f37e357;
++  *((unsigned long *)&__m256i_result[0]) = 0x43ec0a1b2aba7ed0;
++  __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf7f8f7f8f800f800;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003f780000ff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf7f8f7f80000fff9;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003f780000ff80;
++  *((unsigned long *)&__m256i_result[3]) = 0xf7f8f7f8f800f800;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003f784000ff80;
++  *((unsigned long *)&__m256i_result[1]) = 0xf7f8f7f84000fff9;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003f784000ff80;
++  __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040;
++  __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_result[3]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_result[2]) = 0xffe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_result[1]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_result[0]) = 0xffe1ffe0ffe1ffe0;
++  __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffefef800;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffefef800;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000008000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffefef800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000008000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffefef800;
++  __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x27);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0002000000020000;
++  __m256i_out = __lasx_xvbitseti_w (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000030b8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000030b8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_result[2]) = 0x00020002000230ba;
++  *((unsigned long *)&__m256i_result[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_result[0]) = 0x00020002000230ba;
++  __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x8100810081008100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x8100810081008100;
++  __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007878;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007878;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010001000107878;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000107878;
++  __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808;
++  __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffb2f600006f48;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffb2f600006f48;
++  *((unsigned long *)&__m256i_result[3]) = 0x4000400140004001;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffff2f640006f48;
++  *((unsigned long *)&__m256i_result[1]) = 0x4000400140004001;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffff2f640006f48;
++  __m256i_out = __lasx_xvbitseti_h (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvbitseti_d (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040;
++  __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_result[3]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_result[2]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_result[1]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_result[0]) = 0xfd12fd12fd12fd12;
++  __m256i_out = __lasx_xvbitseti_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c
+new file mode 100644
+index 000000000..dbc52f92b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclo.c
+@@ -0,0 +1,449 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffd1b24e00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffcea54ffff29a8;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff8cad88ff8306b4;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffc1278fffce4c8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0802010000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0806030008080001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0801010108010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0806000008060302;
++  __m256i_out = __lasx_xvclo_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfafafafafafafafa;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fefefe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvclo_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0fff0fff00000020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0fff0fff00000020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000;
++  __m256i_out = __lasx_xvclo_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01fc03fc01fc03fc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01fc03fc01fc03fc;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000200000001e;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000200000001e;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000808;
++  __m256i_out = __lasx_xvclo_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xd04752cdd5543b56;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6906e68064f3d78b;
++  *((unsigned long *)&__m256i_op0[1]) = 0xd04752cdd5543b56;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6906e68064f3d78b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000300000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000300000002;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc0000000c0000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc000000080400000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc0000000c0000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc000000080400000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0002000000010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0002000000010000;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010000100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010000100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010000100000000;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000004000000020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000004000000020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000201220001011c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000201220001011c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvclo_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000100010;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclo_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c
+new file mode 100644
+index 000000000..89191c467
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvclz.c
+@@ -0,0 +1,504 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x04481940fbb7e6bf;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf2781966e6991966;
++  *((unsigned long *)&__m256i_op0[1]) = 0x51258839aeda77c6;
++  *((unsigned long *)&__m256i_op0[0]) = 0xcf25f0e00f1ff0e0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0501030100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001030100000301;
++  *((unsigned long *)&__m256i_result[1]) = 0x0102000200000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0002000004030000;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000f0000000f;
++  __m256i_out = __lasx_xvclz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0045b8ae81bce1d8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0045b8ae81bce1d8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001a00000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000900000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001a00000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000900000000;
++  __m256i_out = __lasx_xvclz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010;
++  __m256i_out = __lasx_xvclz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080807;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808080807;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010001000100001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000100001;
++  __m256i_out = __lasx_xvclz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0008000000080000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0008000000080000;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000100010;
++  __m256i_out = __lasx_xvclz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000018;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000019;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000200000001e;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000019;
++  __m256i_out = __lasx_xvclz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0b085bfc00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0b004bc000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0b085bfc00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0b004bc000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0404010008080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0408010008080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0404010008080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0408010008080808;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000012;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0404010008080808;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0408010008080808;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0404010008080808;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0408010008080808;
++  *((unsigned long *)&__m256i_result[3]) = 0x0505070804040404;
++  *((unsigned long *)&__m256i_result[2]) = 0x0504070804040404;
++  *((unsigned long *)&__m256i_result[1]) = 0x0505070804040404;
++  *((unsigned long *)&__m256i_result[0]) = 0x0504070804040404;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020;
++  __m256i_out = __lasx_xvclz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020;
++  __m256i_out = __lasx_xvclz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020;
++  __m256i_out = __lasx_xvclz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010;
++  __m256i_out = __lasx_xvclz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0006ffff0004ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0002ffff0000ff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0006ffff0004ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0002ffff0000ff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000000d;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000000e;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000000d;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000000e;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000032;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000003c000000032;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[2]) = 0x001000100010000a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[0]) = 0x001000060010000a;
++  __m256i_out = __lasx_xvclz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0003800400038004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000a800b000a800b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0003800400038004;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000a800b000a800b;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000000e;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000000e;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000000c;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020;
++  __m256i_out = __lasx_xvclz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000008080800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000008080800;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0a0a000000000a0a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0a0a000000000a0a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0004001000100004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0004000400100010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0004001000100004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0004000400100010;
++  __m256i_out = __lasx_xvclz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020;
++  __m256i_out = __lasx_xvclz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000020;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f8000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000007f8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000029;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000029;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020;
++  __m256i_out = __lasx_xvclz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000027;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000027;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvclz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808;
++  __m256i_out = __lasx_xvclz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c
+new file mode 100644
+index 000000000..d2e742e81
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpcnt.c
+@@ -0,0 +1,526 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x639c3fffb5dffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xb8c7800094400001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0008000e000c000f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0009000100040001;
++  __m256i_out = __lasx_xvpcnt_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_op0[2]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_op0[1]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_op0[0]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_result[3]) = 0x0004000400040805;
++  *((unsigned long *)&__m256i_result[2]) = 0x0004000400040805;
++  *((unsigned long *)&__m256i_result[1]) = 0x0004000400040805;
++  *((unsigned long *)&__m256i_result[0]) = 0x0004000400040805;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffcf800fffcf800;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_result[3]) = 0x0008000800000003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0806050008060500;
++  *((unsigned long *)&__m256i_result[1]) = 0x0008000800000003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010000000100;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000002e2100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000040002;
++  __m256i_out = __lasx_xvpcnt_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x34000000fff00000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff6e00000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3380000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x363c0000fff3c000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000030000000c;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000500000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000800000010;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00c100c100c100c1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00c100c100c100c1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0003000300030003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0003000300030003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000080800000808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000080800000808;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808080808;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffe36780;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffe36780;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_result[2]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_result[0]) = 0x0100000100000001;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000020;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvpcnt_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000001555;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000015554001c003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000001555;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000015554001c003;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000304;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000030401010202;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000304;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000030401010202;
++  __m256i_out = __lasx_xvpcnt_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000a0008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000a0008;
++  __m256i_out = __lasx_xvpcnt_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010001000030000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010001000030000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010001000030000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000030000;
++  __m256i_out = __lasx_xvpcnt_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000010000685e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000010000685e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000040000001b;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000040000001b;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000b000b000b000b;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000b000b000b000b;
++  __m256i_out = __lasx_xvpcnt_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001f00000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001f00000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001200000012;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001200000012;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001200000012;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001200000012;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvpcnt_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvext2xv-xvexth-x.patch b/LoongArch-Add-tests-for-ASX-vector-xvext2xv-xvexth-x.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b633c301813295c0ccf9e2c44ee167cd489ac720
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvext2xv-xvexth-x.patch
@@ -0,0 +1,4600 @@
+From 5cf957f25df755431bc77845fecb5bec0624c097 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 16:51:19 +0800
+Subject: [PATCH 118/124] LoongArch: Add tests for ASX vector
+ xvext2xv/xvexth/xvextins/xvilvh/xvilvl/xvinsgr2vr/ xvinsve0/xvprem/xvpremi
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvextrins.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvilvh.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvilvl.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvprem.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpremi.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvext2xv-1.c   | 515 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvext2xv-2.c   | 669 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvexth-1.c     | 350 +++++++++
+ .../loongarch/vector/lasx/lasx-xvexth-2.c     | 592 ++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvextrins.c    | 515 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvilvh.c       | 530 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvilvl.c       | 620 ++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvinsgr2vr.c   | 272 +++++++
+ .../loongarch/vector/lasx/lasx-xvinsve0.c     | 380 ++++++++++
+ .../loongarch/vector/lasx/lasx-xvprem.c       |  20 +
+ .../loongarch/vector/lasx/lasx-xvpremi.c      |  20 +
+ 11 files changed, 4483 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c
+new file mode 100644
+index 000000000..94f31019c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-1.c
+@@ -0,0 +1,515 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2b2b2b2b1bd5d5d6;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2a2a2a2af2d5d5d6;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2b2b2b2b1bd5d5d6;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2a2a2a2af2d5d5d6;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002a0000002a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002a0000002a;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffff2ffffffd5;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffd5ffffffd6;
++  __m256i_out = __lasx_vext2xv_w_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_vext2xv_w_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_vext2xv_d_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_vext2xv_d_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff01;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff01;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fff0;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff0;
++  __m256i_out = __lasx_vext2xv_d_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_vext2xv_d_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_vext2xv_d_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000017f;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff00fff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff00fffffff0;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffe20;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001dfffffe1f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0200000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0200000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000020000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000498000000080;
++  *((unsigned long *)&__m256i_result[2]) = 0x00004843ffffffe0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000498000000080;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000684000000000;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000f6ff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000f6ff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000017;
++  __m256i_out = __lasx_vext2xv_d_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00020002ff820002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00020002ff820002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff82;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_vext2xv_d_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x03fbfffc03fc07fc;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x03fbfffc03fc07fc;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_vext2xv_d_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_vext2xv_d_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xd100645944100004;
++  *((unsigned long *)&__m256i_op0[2]) = 0xd1908469108400d1;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000404040104;
++  *((unsigned long *)&__m256i_op0[0]) = 0xd1108199714910f9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000004040104;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffd1108199;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000714910f9;
++  __m256i_out = __lasx_vext2xv_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_vext2xv_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c
+new file mode 100644
+index 000000000..d93201bc4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvext2xv-2.c
+@@ -0,0 +1,669 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x639c3fffb5dffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xb8c7800094400001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0063009c003f00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00b500df00ff00fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x00b800c700800000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0094004000000001;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00aa00ab00ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00aa00ab00ff00ff;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01ff01ff01c0003e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01ff01ff01c0003e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000100ff000100ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000100c00000003e;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000f0001000f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000f0001000d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000f0001000f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000f0001000d;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000010000000f;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000010000000f;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000010000000f;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000010000000d;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080;
++  __m256i_out = __lasx_vext2xv_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff;
++  __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000005f000000f0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000f9;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000f3;
++  __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000781;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064;
++  __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000029;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000029;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000029;
++  __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000;
++  __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_vext2xv_wu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffefd;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fd;
++  __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff7fff7f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff7f027f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff7f0100;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00fe7f027f;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000007f;
++  __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff;
++  __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000003fbfc04;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001fdfe02;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000003fbfc04;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001fdfe02;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fd;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ef;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080;
++  __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000002e0000002e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000002e0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000002e0000002e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000002e0000fffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000002e;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000002e;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000002e;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000fffe;
++  __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffcfee0fe00ffe0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffcfee0fe00ffe0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fffc0000fee0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000fe000000ffe0;
++  __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000001b0000001b;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000001b00fd0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000001b0000001b;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001b00fd0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000001b;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000001b;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000001b;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000fd00000000;
++  __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000017f7f7f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000017f7f7f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000017f00007f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007f0000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff0000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff0000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ff00000000ff;
++  __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000498000000080;
++  *((unsigned long *)&__m256i_result[2]) = 0x000048430000ffe0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000498000000080;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000684000000000;
++  __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff7eddffff7ed3;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff7edfffff7edf;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff7eddffff7ed3;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff7edfffff7edf;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00007edd;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00007ed3;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00007edf;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00007edf;
++  __m256i_out = __lasx_vext2xv_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000801380f380fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000801380f300fb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000008013;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000080f3;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fb;
++  __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000;
++  __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000007f;
++  __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_vext2xv_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c
+new file mode 100644
+index 000000000..9fb4e3ff0
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-1.c
+@@ -0,0 +1,350 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[3]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_result[2]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_result[1]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_result[0]) = 0x005500550055ffab;
++  __m256i_out = __lasx_xvexth_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000007f007f007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000007f007f007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff010ff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff010ff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_h_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffec;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffebd8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffec;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffebd8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffec;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffec;
++  __m256i_out = __lasx_xvexth_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvexth_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffff1cffffff1c;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffff1cffffff1c;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffff1cffffff1c;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff1cffffff1c;
++  __m256i_out = __lasx_xvexth_w_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001010101;
++  __m256i_out = __lasx_xvexth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff;
++  __m256i_out = __lasx_xvexth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010;
++  __m256i_out = __lasx_xvexth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvexth_d_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000007f00340040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000007f000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffec;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffec;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00080000002c0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0008000000080000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00080000002c0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0008000000080000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00080000002c0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00080000002c0000;
++  __m256i_out = __lasx_xvexth_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvexth_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f0000007f0060;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f0000007f0060;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c
+new file mode 100644
+index 000000000..fe6ff15d8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvexth-2.c
+@@ -0,0 +1,592 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000022;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000045f3fb;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000045f3fb;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000004500f300fb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000004500f300fb;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc1d75053f0000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc1d75053f0000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x004100df00ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00c000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x004100df00ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00c000000000;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f00ff007f00ff;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002000200010002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0002000200010002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0002000200010002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0002000200010002;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0080000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_hu_bu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffff8046867f79;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffff328dfff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6651bfff80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ff80;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000468600007f79;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000f3280000dfff;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000fffe;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffa30000165a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000104000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffa30000165a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000104000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000165a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000165a;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001010600000106;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001010600000106;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007cfcfd80000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007cfcfd80000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000020ff790020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000020ff790020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff03fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffec75c2d209f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff03fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffec75c2d209f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000001ff000003fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000001ff000003fe;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010100000101;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvexth_wu_hu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_du_wu (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8b1414140e0e0e0e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00d6c1c830160048;
++  *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe3aebaf4df958004;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8b1414140e0e0e0e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x36722a7e66972cd6;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff5f5c;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000102;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000a400ff004f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000a400ff004f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000010000005e;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000;
++  __m256i_out = __lasx_xvexth_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c
+new file mode 100644
+index 000000000..8e61f1c6d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextrins.c
+@@ -0,0 +1,515 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000020202;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000002020202;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000020200;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x25);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfe02fe02fee5fe22;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff49fe4200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbf28b0686066be60;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xff49fe4200000000;
++  __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0xbf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe;
++  __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0xfe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x9f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0xc4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x99);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000fffffefc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000fffffffe0;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000fffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000fffffffff;
++  __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x8f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xe161616161616161;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_op1[1]) = 0xe161616161616161;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000061;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000061;
++  __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x83);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007bbbbbbb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007bbbbbbb;
++  __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x8d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x66);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xda);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000800;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007f7f00007f00;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007f7f00007fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x87);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000;
++  __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xa5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2b2a292827262524;
++  *((unsigned long *)&__m256i_op0[2]) = 0x232221201f1e1d1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2b2a292827262524;
++  *((unsigned long *)&__m256i_op0[0]) = 0x232221201f1e1d1c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000027262524;
++  *((unsigned long *)&__m256i_result[2]) = 0x232221201f1e1d1c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000027262524;
++  *((unsigned long *)&__m256i_result[0]) = 0x232221201f1e1d1c;
++  __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0xbd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000080000000;
++  __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x33);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0xb8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000;
++  __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x54);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0xe7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x7e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00010001000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00010001000100;
++  __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x7b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0000000f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0000000d;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000000f;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000000d;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff0000000d;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000000d;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x56);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01;
++  __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x6f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000001010100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000405;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000001010100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000405;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006;
++  __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0xf6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000007f8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x7b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ffc0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ff00fff8ffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ffc0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00fff8ffc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000fff8ffc0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ff00fff8ffc0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000fff8ffc0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ff00fff8ffc0;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000fff8fff8;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ff00fff8ffc0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000fff8fff8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ff00fff8ffc0;
++  __m256i_out = __lasx_xvextrins_b (__m256i_op0, __m256i_op1, 0x82);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000002000000;
++  __m256i_out = __lasx_xvextrins_h (__m256i_op0, __m256i_op1, 0x43);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextrins_d (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffeb664007ffd61;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffe97a1df5b41b0;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffeb664007ffd61;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffe97a1df5b41b0;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff007ffd61;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff007ffd61;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001;
++  __m256i_out = __lasx_xvextrins_w (__m256i_op0, __m256i_op1, 0x62);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c
+new file mode 100644
+index 000000000..5a047a508
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvh.c
+@@ -0,0 +1,530 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xbff0800000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xbff0800000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffff90ffffff81;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffff90ffffff81;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000307fffe72e800;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020200008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0008010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0008000001010000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101000001010000;
++  __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5555555580000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5555555580000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x555555553f800000;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000003f00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000003f00000000;
++  __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2475cef801f0ffdd;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6580668200fe0002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x419cd5b11c3c5654;
++  *((unsigned long *)&__m256i_op1[3]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2475cef801f0ffdd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x6580668200fe0002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x419cd5b11c3c5654;
++  *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_result[2]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002;
++  *((unsigned long *)&__m256i_result[0]) = 0x6580668200fe0002;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf5f5f5f5f5f5f5f5;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf5f5f5f5f5f5f5f5;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000004000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000004000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xff04ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xff04ff00ff00ff00;
++  __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000003f00390035;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8015003f0006001f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000003f00390035;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8015003f0006001f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x80000000001529c1;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80007073cadc3779;
++  *((unsigned long *)&__m256i_op1[1]) = 0x80000000001529c1;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80007073cadc3779;
++  *((unsigned long *)&__m256i_result[3]) = 0x00008000003f0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00390015003529c1;
++  *((unsigned long *)&__m256i_result[1]) = 0x00008000003f0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00390015003529c1;
++  __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0020002000200020;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000002c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000002c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000002c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000002c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000002c0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000002c0000;
++  __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7eeefefefefefefe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7eeefefefefefefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7e00ee00fe00fe00;
++  *((unsigned long *)&__m256i_result[2]) = 0xfe00fe00fe00fe00;
++  *((unsigned long *)&__m256i_result[1]) = 0x7e00ee00fe00fe00;
++  *((unsigned long *)&__m256i_result[0]) = 0xfe00fe00fe00fe00;
++  __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xaad5555500000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xaad5555500000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1f001f00000007ef;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00001fff200007ef;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1f001f00000007ef;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00001fff200007ef;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff7bfffff1;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff80007fe9;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff7bfffff1;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff80007fe9;
++  *((unsigned long *)&__m256i_result[3]) = 0x40ff40ff40ff40ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x407b40ff40ff40f1;
++  *((unsigned long *)&__m256i_result[1]) = 0x40ff40ff40ff40ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x407b40ff40ff40f1;
++  __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff02000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff02000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0001fffa;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00018069;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0001fffa;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00018069;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff01fffffffeff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff01fffffffaff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff01fffffffeff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff01fffffffaff;
++  __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00001ff8d8d8c000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00001ff8d8d90000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00001ff8d8d8c000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00001ff8d8d90000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0200000202000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0200000202000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x00001ff800000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xd8d8c00000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00001ff800000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xd8d8c00000000000;
++  __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4000c08000000080;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000080c000c080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4000c08000000080;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000080c000c080;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000400080ffc080;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080ff0080;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000400080ffc080;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080ff0080;
++  __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ff03ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000203ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ff03ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000203ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001ff03ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001ff03ff;
++  __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000019ffdf403;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000011ffd97c3;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000019ffdf403;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000011ffd97c3;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000019ffdf403;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000019ffdf403;
++  __m256i_out = __lasx_xvilvh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x001f001fffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffe0ffe000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x001f001fffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffe0ffe000000000;
++  __m256i_out = __lasx_xvilvh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c
+new file mode 100644
+index 000000000..4393045c3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvilvl.c
+@@ -0,0 +1,620 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000001a00000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000900000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000001a00000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000900000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000;
++  __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff0000fffe0000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000fefc0000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000fffe0000;
++  __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffefdfffffefd;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f7f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f7f7f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007f7f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f007f78;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f00007f7f0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f00fffb7f78fffc;
++  __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8080808080808081;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8080808080808081;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000808000008080;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000808000008081;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff01fffffffeff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff01fffffffeff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff01fffffffeff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff01fffffffeff;
++  __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_op1[2]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_op1[1]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_op1[0]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x07efefefefefefee;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005;
++  __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00f300ff00f3;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00f300ff00f3;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00f300ff00f3;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00f300ff00f3;
++  __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00fe00fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00fe00fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00fe00fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00fe00fe;
++  __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007c000000810081;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007c000000810081;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x007c7fff00007fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00817fff00810000;
++  *((unsigned long *)&__m256i_result[1]) = 0x007c7fff00007fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00817fff00810000;
++  __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000001d001d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001d0000001d;
++  __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000e0e0e0e0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000070007000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_result[3]) = 0xe070e000e070e000;
++  *((unsigned long *)&__m256i_result[2]) = 0xe070e000e070e000;
++  *((unsigned long *)&__m256i_result[1]) = 0xe070e000e070e000;
++  *((unsigned long *)&__m256i_result[0]) = 0xe070e000e070e000;
++  __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x003f003f003f0040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x003f003f003f0040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x003f003f003f0040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x003f003f003f0040;
++  *((unsigned long *)&__m256i_result[3]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_result[1]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003f3f00004040;
++  __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100;
++  __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffe98;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064;
++  __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000e000e;
++  __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0003800400038004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000a800b000a800b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0003800400038004;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000a800b000a800b;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000a0080000b00;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000a0080000b00;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000a0080000b00;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000a0080000b00;
++  __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe01fd02fd02;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03fc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe01fd02fd02;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03fc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3f00c0003f00c000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3f00c0003f00c000;
++  __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_result[2]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_result[0]) = 0x4980008068400000;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf000f000f000f000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf000f010f000f010;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf000f000f000f000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf000f010f000f010;
++  *((unsigned long *)&__m256i_result[3]) = 0x00f0000000f00010;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff0ff00fff0ff10;
++  *((unsigned long *)&__m256i_result[1]) = 0x00f0000000f00010;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff0ff00fff0ff10;
++  __m256i_out = __lasx_xvilvl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvilvl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffed;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffed;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffed;
++  __m256i_out = __lasx_xvilvl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c
+new file mode 100644
+index 000000000..ce28c4857
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsgr2vr.c
+@@ -0,0 +1,272 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8b1414140e0e0e0e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x146014141414146e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf19998668e5f4b84;
++  long_op1 = 0x0000007942652524;
++  *((unsigned long *)&__m256i_result[3]) = 0x8b1414140e0e0e0e;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000007942652524;
++  *((unsigned long *)&__m256i_result[1]) = 0x36722a7e66972cd6;
++  *((unsigned long *)&__m256i_result[0]) = 0xf19998668e5f4b84;
++  __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0202020201010000;
++  int_op1 = 0x00000045eef14fe8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000eef14fe8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0202020201010000;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x012e2110012e2110;
++  int_op1 = 0x00000000000000ac;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000ac;
++  *((unsigned long *)&__m256i_result[0]) = 0x012e2110012e2110;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  long_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff800000ff800000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff80000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff0000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff0000ff;
++  int_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffff0000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffff0000ff;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe800c000fffeeece;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff383efffedf0c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xe800c000fffeeece;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff383efffedf0c;
++  int_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xe800c000fffeeece;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff383e000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0xe800c000fffeeece;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff383efffedf0c;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  long_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0020000000200000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0020000000200000;
++  long_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000048;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000048;
++  long_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000048;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  long_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  long_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffff7fffffff7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffff7fffffff7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffff7fffffff7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffff7fffffff7;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffff7fffffff7;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff7fffffff7;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffff700000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff7fffffff7;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  long_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_d (__m256i_op0, long_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d;
++  int_op1 = 0x00000000090b0906;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000090b0906;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x000000000000001e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001e00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000050005;
++  *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe;
++  __m256i_out = __lasx_xvinsgr2vr_w (__m256i_op0, int_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c
+new file mode 100644
+index 000000000..644d2ce4b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvinsve0.c
+@@ -0,0 +1,380 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000050005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000004fb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffefe00000000;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000170017;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000170017;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000170017;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000170017;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffefffffffe;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff000200000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff000200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x001f00e0ff800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x001f00e0ff800000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff80000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff000200000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff000200000000;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op0[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op0[1]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op0[0]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[3]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[1]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090;
++  __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000040b200002fd4;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00007fff0000739c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000040b200002fd4;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007fff0000739c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000739c;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff800080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000;
++  __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_op1[2]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_op1[0]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256i_result[2]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x6040190d00000000;
++  __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020;
++  __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x03fbfffc03fc07fc;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x03fbfffc03fc07fc;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000ffff0000ffff;
++  __m256i_out = __lasx_xvinsve0_d (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0020;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff8001ffff0001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0020;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff8001ffff0001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0020;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff8001ffff0001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff8001ffff0001;
++  __m256i_out = __lasx_xvinsve0_w (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c
+new file mode 100644
+index 000000000..9346f9bfb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvprem.c
+@@ -0,0 +1,20 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
+new file mode 100644
+index 000000000..9346f9bfb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpremi.c
+@@ -0,0 +1,20 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvextl-xvsra-xvsr.patch b/LoongArch-Add-tests-for-ASX-vector-xvextl-xvsra-xvsr.patch
new file mode 100644
index 0000000000000000000000000000000000000000..17c25a0a16573954b090ac75ba7332663e92e970
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvextl-xvsra-xvsr.patch
@@ -0,0 +1,4737 @@
+From bf5805833fc26d26a1fbbdc7dfe10109c0c676f9 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:49:41 +0800
+Subject: [PATCH 107/124] LoongArch: Add tests for ASX vector
+ xvextl/xvsra/xvsran/xvsrarn instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsra.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrai.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsran.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrani.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrar.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrari.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvextl-1.c     |  86 +++
+ .../loongarch/vector/lasx/lasx-xvextl-2.c     | 163 ++++
+ .../loongarch/vector/lasx/lasx-xvsra.c        | 545 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrai.c       | 504 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsran.c       | 455 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrani.c      | 545 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrar.c       | 725 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrari.c      | 471 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrarn.c      | 500 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrarni.c     | 636 +++++++++++++++
+ 10 files changed, 4630 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c
+new file mode 100644
+index 000000000..c0d3e8e75
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-1.c
+@@ -0,0 +1,86 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextl_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextl_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextl_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x43ef878780000009;
++  __m256i_out = __lasx_xvextl_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000201220001011c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000201220001011c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000201220001011c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000201220001011c;
++  __m256i_out = __lasx_xvextl_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextl_q_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c
+new file mode 100644
+index 000000000..8c7ab4ed3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvextl-2.c
+@@ -0,0 +1,163 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000083f95466;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010100005400;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000083f95466;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010100005400;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvextl_qu_du (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c
+new file mode 100644
+index 000000000..2bf9ae9c3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsra.c
+@@ -0,0 +1,545 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc5890a0a07070707;
++  *((unsigned long *)&__m256i_op1[2]) = 0x006be0e4180b8024;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1b399540334c966c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x71d7dd7aefcac001;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffbf7f7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe651bfff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffbf7f7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffe651bfff;
++  __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffe0000000;
++  __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000bf6e0000c916;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000030000fff3;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000800000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000bf6e0000c916;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000030000fff3;
++  __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffe40;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0e400;
++  __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9cffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9cffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1cfd000000000000;
++  __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000017e007ffe02;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000004500f300fb;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000004500f300fb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffa;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffa;
++  *((unsigned long *)&__m256i_result[3]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_result[2]) = 0x6161616100000018;
++  *((unsigned long *)&__m256i_result[1]) = 0x6161616161616161;
++  *((unsigned long *)&__m256i_result[0]) = 0x6161616100000018;
++  __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004411;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004411;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x009f00f8007e00f0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f007f0081007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x009f00f8007e00f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f007f0081007f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x009f00f8007e00f0;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f007f0081007f;
++  *((unsigned long *)&__m256i_result[1]) = 0x009f00f8007e00f0;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f007f0081007f;
++  __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256i_result[2]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256i_result[0]) = 0x01fe01ae00ff00ff;
++  __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_result[1]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fe1ffe0ffe1ffe0;
++  __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0007000700070007;
++  __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6040190d20227a78;
++  *((unsigned long *)&__m256i_op0[1]) = 0x132feeabd2d33b38;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000c0300000019a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0c08032100004044;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000265ffa5a6767;
++  *((unsigned long *)&__m256i_result[0]) = 0x0c08032100000000;
++  __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007f433c78;
++  __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00feff0100feff01;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00feff0100feff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff801000000010;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800300000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff801000000010;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800300000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff801000000010;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff800300000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff801000000010;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff800300000000;
++  __m256i_out = __lasx_xvsra_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000017fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000017fff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsra_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000f00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000f00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsra_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c
+new file mode 100644
+index 000000000..a51be899b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrai.c
+@@ -0,0 +1,504 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00d6c1c830160048;
++  *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe3aebaf4df958004;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x006be0e4180b0024;
++  *((unsigned long *)&__m256i_result[1]) = 0x1b39153f334b166b;
++  *((unsigned long *)&__m256i_result[0]) = 0xf1d7dd7aefcac002;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x36);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[3]) = 0x1555156a1555156a;
++  *((unsigned long *)&__m256i_result[2]) = 0x1555156a1555156a;
++  *((unsigned long *)&__m256i_result[1]) = 0x1555156a1555156a;
++  *((unsigned long *)&__m256i_result[0]) = 0x1555156a1555156a;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000bea20000e127;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000c0460000721f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000de910000f010;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000006f9;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000bea20;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000c0460;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000de910;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000003f00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000003f00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffff800fffff800;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffff800fffff800;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffff800fffff800;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffff800fffff800;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007f017f01;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007f017f01;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000007f017f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000007f017f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffd8ffc7ffdaff8a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffd8ffc7ffdaff8a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0fff01800fff0181;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0fff01800fff0181;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0007ff800007ff80;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0007ff800007ff80;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000;
++  __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x23);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000f91;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000f91;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000;
++  __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ffff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ffff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffc03fffffffc0;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffc00000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffc03fffffffc0;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffc00000000000;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000007ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000007ffffffff;
++  __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000fef0ff0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000fef0ff0;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc008fa01c0090000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3f804000c008f404;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc008fa01c0090000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3f804000c008f404;
++  *((unsigned long *)&__m256i_result[3]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_result[2]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_result[1]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_result[0]) = 0x001fc0200060047a;
++  __m256i_out = __lasx_xvsrai_d (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000fffe00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000fffe00000000;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_result[3]) = 0x1e9e1e9e1e9e1e9e;
++  *((unsigned long *)&__m256i_result[2]) = 0x1e9e1e9e1e9e1e9e;
++  *((unsigned long *)&__m256i_result[1]) = 0x1e9e1e9e1e9e1e9e;
++  *((unsigned long *)&__m256i_result[0]) = 0x1e9e1e9e1e9e1e9e;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrai_b (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff800000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffc0000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffc0000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0005fff9;
++  *((unsigned long *)&__m256i_op0[2]) = 0x04f004f204f204f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0005fff9;
++  *((unsigned long *)&__m256i_op0[0]) = 0x04f004f204f204f0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000900000009;
++  __m256i_out = __lasx_xvsrai_w (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x761ed60b5d7f0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xdc9938afafe904f1;
++  *((unsigned long *)&__m256i_op0[1]) = 0x761ed60b5d7f0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xdc9938afafe904f1;
++  *((unsigned long *)&__m256i_result[3]) = 0x03b0feb002eb0000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfee401c5fd7f0027;
++  *((unsigned long *)&__m256i_result[1]) = 0x03b0feb002eb0000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfee401c5fd7f0027;
++  __m256i_out = __lasx_xvsrai_h (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c
+new file mode 100644
+index 000000000..e08934b12
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsran.c
+@@ -0,0 +1,455 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7f80780000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7f80780000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000013ffffffec;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000013ffffebd8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000013ffffffec;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000013ffffebd8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfebdff3eff3dff52;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ffe7ffe7ffe7ffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007ffe7ffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ffe7ffe7ffe8000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000807e7ffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8091811081118110;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80a6802680208015;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8091811081110013;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80a6802680200018;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffefffe0000feff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffeff0000007e7f;
++  __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000800000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0010000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0010000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff000000000000;
++  __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000007c8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000007c8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001fe01fe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fe01fe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000c8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000c8;
++  __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000440800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000440800;
++  __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3d3d3d3d3d3d3d3d;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffc01fc01;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffc01fc01;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000405;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000405;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfc01fc0101fe01dd;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfc01fc0101fe01dd;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000055;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000054;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000;
++  __m256i_out = __lasx_xvsran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c
+new file mode 100644
+index 000000000..44c20a954
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrani.c
+@@ -0,0 +1,545 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000003ffffffff;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe1e800002f03988d;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe1e800002f03988d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff0f400001781cc4;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff0f400001781cc4;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc5c4c5c5c5c5c5c5;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc5c545c545c545c5;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc5c4c5c5c5c5c5c5;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc5c545c545c545c5;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000ff000000f8;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbc8ff0ffffffcff8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000f8;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbc8ff0ffffffcff8;
++  *((unsigned long *)&__m256i_result[3]) = 0xfcfcfcfcfc040404;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fbfffffc;
++  *((unsigned long *)&__m256i_result[1]) = 0xfcfcfcfcfc040404;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fbfffffc;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x14131211100f0e0d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0c0b0a0908070605;
++  *((unsigned long *)&__m256i_op0[1]) = 0x14131211100f0e0d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0c0b0a0908070605;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0a09080706050403;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0a09080706050403;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffefd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffefd;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x40);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000002a542a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000002a542a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000242;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000242;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0707feb608c9328b;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc237bd65fc892985;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0707feb608c9328b;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc237bd65fc892985;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00150015003a402f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x333568ce26dcd055;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00150015003a402f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x333568ce26dcd055;
++  *((unsigned long *)&__m256i_result[3]) = 0x0e0f1192846ff912;
++  *((unsigned long *)&__m256i_result[2]) = 0x002a0074666a4db9;
++  *((unsigned long *)&__m256i_result[1]) = 0x0e0f1192846ff912;
++  *((unsigned long *)&__m256i_result[0]) = 0x002a0074666a4db9;
++  __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffdfffffffdff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffdfffffffdff;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x37);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8080808000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8080808000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3f7f7f7eff800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x3f7f7f7eff800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007efeff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007efeff00;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff3eff3eff3eff3e;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff3e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff3e;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x70);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0002000200020018;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0002000200020008;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00c0000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0040000000000000;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000f0f0003;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000f1003;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000fc38fc38;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000fc38fc38;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000fefefe000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000fefefe000000;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01010101010101c9;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01010101010101c9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x2c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01010101010101c9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01010101010101c9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000781;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[3]) = 0x0008080808080808;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0008080808080808;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000003c;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x45);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00f3009500db00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00f3009500db00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000003cc0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000003cc0;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x6a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000400100013;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000400100014;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000400100013;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0a0a000000000a0a;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0a0a000000000a0a;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000020200000202;
++  *((unsigned long *)&__m256i_result[2]) = 0x4100004141410000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000020200000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x4100004141410000;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000956a00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000956a00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x007fffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xb500000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x007fffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xb500000000000000;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x29);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000001010100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000405;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000001010100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000405;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffe00000ffe00000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffe00000ffe00000;
++  __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrani_w_d (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrani_w_d (__m256i_op0, __m256i_op1, 0x34);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x66);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffc0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffc0;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffff80;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffff80;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrani_b_h (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrani_h_w (__m256i_op0, __m256i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffce;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffce;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x6b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000040e7;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000040e7;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000200000000000;
++  __m256i_out = __lasx_xvsrani_d_q (__m256i_op0, __m256i_op1, 0x21);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3ff9fffa3ff9fffa;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x3ff9fffa3ff9fffa;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007ff3;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000007ff3;
++  __m256i_out = __lasx_xvsrani_w_d (__m256i_op0, __m256i_op1, 0x2f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c
+new file mode 100644
+index 000000000..fb47385c0
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrar.c
+@@ -0,0 +1,725 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x38a966b31be83ee9;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5f6108dc25b80001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf41a56e8a20878d7;
++  *((unsigned long *)&__m256i_op0[0]) = 0x683b8b67e20c0001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000501e99b;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000109973de7;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001020f22;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000001890b7a39;
++  *((unsigned long *)&__m256i_result[3]) = 0x38a966b301f41ffd;
++  *((unsigned long *)&__m256i_result[2]) = 0x5f6108ee13ff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0xf41a56e8d10201f6;
++  *((unsigned long *)&__m256i_result[0]) = 0x683b8b34f1020001;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000707;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000010200000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000070300000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01480000052801a2;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffdcff64;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0008000001010000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101000001010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0008000001010000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101000001010000;
++  __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0020000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0020000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff01ff3400000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff83ff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffcc8000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff82037dfd0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x45baa7ef6a95a985;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x45baa7ef6a95a985;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_result[2]) = 0x45baa7ef6a95a985;
++  *((unsigned long *)&__m256i_result[1]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_result[0]) = 0x45baa7ef6a95a985;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x45baa7ef6a95a985;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x45baa7ef6a95a985;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000800;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000d0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000d0000;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000001dc;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000001dc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000001a00;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff02ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffff0100;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00fefffeff02ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000100;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00feff00000000;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2b2a292827262524;
++  *((unsigned long *)&__m256i_op1[2]) = 0x232221201f1e1d1c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x2b2a292827262524;
++  *((unsigned long *)&__m256i_op1[0]) = 0x232221201f1e1d1c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7171717171717171;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8e8e8e8e8f0e8e8e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7171717171717171;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8e8e8e8e8f0e8e8e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7171717171010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x8e8e8e8e8f00ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7171717171010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x8e8e8e8e8f00ffff;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000465;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000465;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000465;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000465;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001;
++  __m256i_out = __lasx_xvsrar_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe05f8102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe05f8102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffe05f8102;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffe05f8102;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000420080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000420080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000420080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000420080000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000420080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000001607f0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000420080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000001607f0000;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_result[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_result[0]) = 0x43ef878780000009;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00005053000000ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00005053000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000e0000000e00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000e0000000e00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000e0000000e00;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000e0000000e00;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000800200027;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000800200028;
++  *((unsigned long *)&__m256i_result[3]) = 0x006018000000001a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0060401900000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x006018000000001a;
++  *((unsigned long *)&__m256i_result[0]) = 0x0060401900000000;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrar_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfefefefe3f800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfefefefe3f800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000040404040;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000ffffff1dff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff1dffffff1dff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ffffff1dff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff1dffffff1dff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff1dffffff1dff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff1dffffff1dff;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrar_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c
+new file mode 100644
+index 000000000..63ba92ead
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrari.c
+@@ -0,0 +1,471 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x81f7f2599f0509c2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x51136d3c78388916;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffc0fcffffcf83;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000288a00003c1c;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8b1414140e0e0e0e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00d6c1c830160048;
++  *((unsigned long *)&__m256i_op0[1]) = 0x36722a7e66972cd6;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe3aebaf4df958004;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffe000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100020001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000fffffffffffe;
++  __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00001f41ffffbf00;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000040000fff8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x2a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00007dfd;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00007dfd;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x22);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000907;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000907;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffffa;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffffa;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x2a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x35);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x20fc000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x20fc000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x007f0000007f0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x007f0000007f0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000003f8000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000003f8000004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x10fbe1e2e0000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x10fbe1e2e0000002;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000040004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000040004;
++  __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_h (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff8000;
++  __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0x26);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000400000004000;
++  __m256i_out = __lasx_xvsrari_w (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrari_b (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff81007fff0100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000010000000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff81007fff0100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000008000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0003fffc0803fff8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000008000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0003fffc0803fff8;
++  __m256i_out = __lasx_xvsrari_d (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c
+new file mode 100644
+index 000000000..c145f7ff3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarn.c
+@@ -0,0 +1,500 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00080000000cc916;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000006fff3;
++  __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6100000800060005;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5ee1c073b800c916;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5ff00007fff9fff3;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ffff00ff000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00080005c073c916;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000100000007fff3;
++  __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00050008000e0010;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0007000800100010;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00050008000e0010;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0007000800100010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000002affaa;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff002affaa;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000002affaa;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffd50055;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x002affaa00000000;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001f0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00007f7f00007f00;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00007f7f00007fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0007fff8000ffff0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000007fff8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0007fff8000ffff0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000007fff8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f00ff00000000;
++  __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000abff0000abff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000abff0000abff;
++  __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0020000000200000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0020000000200000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffff800000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffff800000;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000070007000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4040403fd03fd040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4040403fd03fd040;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffd03fd040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4040403fd03fd040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001010000010100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010000010100;
++  __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc800c800c800c800;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8800c800c800c801;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc800c800c800c800;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8800c800c800c801;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000086000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00040ff288000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000086000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00040ff288000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x5555555555555555;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5555555555555555;
++  *((unsigned long *)&__m256i_op1[1]) = 0x5555555555555555;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5555555555555555;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000fc300000fc40;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000fc300000fc40;
++  __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc008fa01c0090000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3f804000c008f404;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc008fa01c0090000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3f804000c008f404;
++  *((unsigned long *)&__m256i_op1[3]) = 0x82ff902d83000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f80000082fe0bd9;
++  *((unsigned long *)&__m256i_op1[1]) = 0x82ff902d83000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f80000082fe0bd9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xc0090000c0200060;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xc0090000c0200060;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf3f3f3f3f3f3f4f3;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf3f3f3f3f3f3f4f3;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000f3f3f4f3;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000f3f3f4f3;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fff8579f;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfefefefe01010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfefefefe01010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfefefefe01010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfefefefe01010101;
++  __m256i_out = __lasx_xvsrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff010100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff010100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff010100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff010100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000810001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000810001;
++  __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010110;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010110;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8282828282828282;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8768876887688769;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8282828282828282;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8768876887688769;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000104000200;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000104000200;
++  __m256i_out = __lasx_xvsrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c
+new file mode 100644
+index 000000000..b5c0fca74
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrarni.c
+@@ -0,0 +1,636 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000007f007f5;
++  *((unsigned long *)&__m256i_op1[3]) = 0x002e4db200000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000315ac0000d658;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00735278007cf94c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0003ed8800031b38;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x3d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x3d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffcfa;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff8fffffff8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff8fc000000;
++  __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x25);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ff77fff7ff7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7ff77fff7ff7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000001000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000022;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000002000000022;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x3e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x22);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000016600000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000016600000000;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffefe00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x7f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000055;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000055;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000045;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000045;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x50);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x2f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x20);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00550f0000550f00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000015c015c0;
++  *((unsigned long *)&__m256i_result[2]) = 0xc0c0c0cdc0c0c0cd;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xc0c0c0cdc0c0c0cd;
++  __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0003030300000300;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0003030300000300;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0003030300000100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0003030300000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x00f800f800f800f8;
++  *((unsigned long *)&__m256i_result[2]) = 0x0018181800181818;
++  *((unsigned long *)&__m256i_result[1]) = 0x00f800f800f800f8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0018181800181818;
++  __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x43d03bfff827ea21;
++  *((unsigned long *)&__m256i_op1[2]) = 0x43dac1f2a3804ff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x43d03bfff827e9f9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x43e019c657c7d050;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xe8001411edf9c0f8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xe80014fdf0e3e428;
++  __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff14;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff10003;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff14;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff10003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfefee0e3fefefe00;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfefee0e3fefefe00;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000001fffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000001fffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000001fffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000001fffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x007f0000007f0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x007f0000007f0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x27);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf7f8f7f8f800f800;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003f780000ff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf7f8f7f80000fff9;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003f780000ff80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1f001f00000007ef;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00001fff200007ef;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x23);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7171717171717171;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8e8e8e8e8e8e8e8e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7171717171717171;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8e8e8e8e8e8e8e8e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x01c601c6fe3afe3a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x01c601c6fe3afe3a;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003f3f00004040;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f010700c70106;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f010700c70106;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000010211921;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000010211921;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x82ff902d83000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f80000082fe0bd9;
++  *((unsigned long *)&__m256i_op1[1]) = 0x82ff902d83000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f80000082fe0bd9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001;
++  __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_w_d (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000080ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000080ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x08000000000000f8;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x08000000000000f8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020000000000000;
++  __m256i_out = __lasx_xvsrarni_d_q (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x4);
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_b_h (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x03af03af03af03af;
++  *((unsigned long *)&__m256i_op0[2]) = 0x03acfc5303260e81;
++  *((unsigned long *)&__m256i_op0[1]) = 0x03af03af03af03af;
++  *((unsigned long *)&__m256i_op0[0]) = 0x03acfc5303260e81;
++  *((unsigned long *)&__m256i_op1[3]) = 0x03af03af03af03af;
++  *((unsigned long *)&__m256i_op1[2]) = 0x03acfc5303260e81;
++  *((unsigned long *)&__m256i_op1[1]) = 0x03af03af03af03af;
++  *((unsigned long *)&__m256i_op1[0]) = 0x03acfc5303260e81;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrarni_h_w (__m256i_op0, __m256i_op1, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvfcmp-caf-ceq-cl.patch b/LoongArch-Add-tests-for-ASX-vector-xvfcmp-caf-ceq-cl.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c56e27e86a67f97f89566c6f2cb52a0eb4a2c739
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvfcmp-caf-ceq-cl.patch
@@ -0,0 +1,4510 @@
+From ab8716fe8109c738ac02b641160350d2b351466b Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 16:45:33 +0800
+Subject: [PATCH 116/124] LoongArch: Add tests for ASX vector
+ xvfcmp{caf/ceq/cle/clt/cne/cor/cun} instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvfcmp_caf_s.c | 446 ++++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c | 977 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_cle_s.c | 759 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_clt_s.c | 675 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_cne_s.c | 872 ++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_cor_s.c | 340 ++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_cun_s.c | 361 +++++++
+ 7 files changed, 4430 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c
+new file mode 100644
+index 000000000..fa3372358
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_caf_s.c
+@@ -0,0 +1,446 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xff56ff55;
++  *((int *)&__m256_op0[4]) = 0xff01ff01;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xff56ff55;
++  *((int *)&__m256_op0[0]) = 0xff01ff01;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x0000abff;
++  *((int *)&__m256_op1[4]) = 0x0000abff;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x0000abff;
++  *((int *)&__m256_op1[0]) = 0x0000abff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000001;
++  *((int *)&__m256_op0[4]) = 0x0000000a;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000001;
++  *((int *)&__m256_op0[0]) = 0x0000000a;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000040;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x5d20a0a1;
++  *((int *)&__m256_op1[6]) = 0x5d20a0a1;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x5d20a0a1;
++  *((int *)&__m256_op1[2]) = 0x5d20a0a1;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0003ffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffff8000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffff8000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffff8000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffff8000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xe07de080;
++  *((int *)&__m256_op0[4]) = 0x1f20607a;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xe07de080;
++  *((int *)&__m256_op0[0]) = 0x1f20607a;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xe07de080;
++  *((int *)&__m256_op1[4]) = 0x1f20607a;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xe07de080;
++  *((int *)&__m256_op1[0]) = 0x1f20607a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xe07de080;
++  *((int *)&__m256_op1[4]) = 0x1f20607a;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xe07de080;
++  *((int *)&__m256_op1[0]) = 0x1f20607a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000010;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000010;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xa5a5a5a5a5a5a5a5;
++  *((unsigned long *)&__m256d_op1[2]) = 0xa5a5a5a5a5a99e03;
++  *((unsigned long *)&__m256d_op1[1]) = 0xa5a5a5a5a5a5a5a5;
++  *((unsigned long *)&__m256d_op1[0]) = 0xa5a5a5a5a5a99e03;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000045;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000045;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000045;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000045;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x24342434ffff2435;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x24342434ffff2435;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000013;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000013;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000013;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000013;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffeb664007ffd61;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffe97a1df5b41b0;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffeb664007ffd61;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffe97a1df5b41b0;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_caf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c
+new file mode 100644
+index 000000000..6d6649f6f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_ceq_s.c
+@@ -0,0 +1,977 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00010101;
++  *((int *)&__m256_op0[6]) = 0x01010101;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00010100;
++  *((int *)&__m256_op0[1]) = 0x00010000;
++  *((int *)&__m256_op0[0]) = 0x01000100;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xbf7f7fff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xe651bfff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x000000ff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x000000ff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x0000ffff;
++  *((int *)&__m256_op1[6]) = 0xc0008001;
++  *((int *)&__m256_op1[5]) = 0x0000ffff;
++  *((int *)&__m256_op1[4]) = 0xc0008001;
++  *((int *)&__m256_op1[3]) = 0x0000ffff;
++  *((int *)&__m256_op1[2]) = 0xc0008001;
++  *((int *)&__m256_op1[1]) = 0x0000ffff;
++  *((int *)&__m256_op1[0]) = 0xc0008001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffc6ffc6;
++  *((int *)&__m256_op0[6]) = 0x003a003a;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffc6ffc6;
++  *((int *)&__m256_op0[2]) = 0x003a003a;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x71717171;
++  *((int *)&__m256_op1[6]) = 0x71010101;
++  *((int *)&__m256_op1[5]) = 0x8e8e8e8e;
++  *((int *)&__m256_op1[4]) = 0x8f00ffff;
++  *((int *)&__m256_op1[3]) = 0x71717171;
++  *((int *)&__m256_op1[2]) = 0x71010101;
++  *((int *)&__m256_op1[1]) = 0x8e8e8e8e;
++  *((int *)&__m256_op1[0]) = 0x8f00ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x000e000e;
++  *((int *)&__m256_op1[4]) = 0x000e000e;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x000e000e;
++  *((int *)&__m256_op1[0]) = 0x000e000e;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000043;
++  *((int *)&__m256_op0[4]) = 0x0207f944;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000043;
++  *((int *)&__m256_op0[0]) = 0x0207f944;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000001;
++  *((int *)&__m256_op1[6]) = 0x9ffdf403;
++  *((int *)&__m256_op1[5]) = 0x00000001;
++  *((int *)&__m256_op1[4]) = 0x1ffd97c3;
++  *((int *)&__m256_op1[3]) = 0x00000001;
++  *((int *)&__m256_op1[2]) = 0x9ffdf403;
++  *((int *)&__m256_op1[1]) = 0x00000001;
++  *((int *)&__m256_op1[0]) = 0x1ffd97c3;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x7fff7fff;
++  *((int *)&__m256_op0[4]) = 0x7fff7fff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x7fff7fff;
++  *((int *)&__m256_op0[0]) = 0x7fff7fff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000808;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xbea2e127;
++  *((int *)&__m256_op1[6]) = 0xc046721f;
++  *((int *)&__m256_op1[5]) = 0x1729c073;
++  *((int *)&__m256_op1[4]) = 0x816edebe;
++  *((int *)&__m256_op1[3]) = 0xde91f010;
++  *((int *)&__m256_op1[2]) = 0x000006f9;
++  *((int *)&__m256_op1[1]) = 0x5ef1f90e;
++  *((int *)&__m256_op1[0]) = 0xfefaf30d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000200;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000200;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000200;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000200;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000009;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000009;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000009;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xffb80000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xffb80000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0000ffff;
++  *((int *)&__m256_op0[6]) = 0x0000ffff;
++  *((int *)&__m256_op0[5]) = 0x0000ffff;
++  *((int *)&__m256_op0[4]) = 0x0000ffff;
++  *((int *)&__m256_op0[3]) = 0x0000ffff;
++  *((int *)&__m256_op0[2]) = 0x0000ffff;
++  *((int *)&__m256_op0[1]) = 0x0000ffff;
++  *((int *)&__m256_op0[0]) = 0x0000ffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfff0fff0;
++  *((int *)&__m256_op0[6]) = 0xff01ff01;
++  *((int *)&__m256_op0[5]) = 0xfff0fff0;
++  *((int *)&__m256_op0[4]) = 0xfff0fff0;
++  *((int *)&__m256_op0[3]) = 0xfff0fff0;
++  *((int *)&__m256_op0[2]) = 0xff01ff01;
++  *((int *)&__m256_op0[1]) = 0xfff0fff0;
++  *((int *)&__m256_op0[0]) = 0xfff0fff0;
++  *((int *)&__m256_op1[7]) = 0xffefffef;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0xffefffef;
++  *((int *)&__m256_op1[4]) = 0xffefffef;
++  *((int *)&__m256_op1[3]) = 0xffefffef;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0xffefffef;
++  *((int *)&__m256_op1[0]) = 0xffefffef;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x0000ffb1;
++  *((int *)&__m256_op1[6]) = 0x0001ff8f;
++  *((int *)&__m256_op1[5]) = 0x0001004c;
++  *((int *)&__m256_op1[4]) = 0x0001ff87;
++  *((int *)&__m256_op1[3]) = 0x0000ffb1;
++  *((int *)&__m256_op1[2]) = 0x0001ff8f;
++  *((int *)&__m256_op1[1]) = 0x0001004c;
++  *((int *)&__m256_op1[0]) = 0x0001ff87;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00010001;
++  *((int *)&__m256_op1[6]) = 0x00010001;
++  *((int *)&__m256_op1[5]) = 0x00010001;
++  *((int *)&__m256_op1[4]) = 0x00010001;
++  *((int *)&__m256_op1[3]) = 0x00010001;
++  *((int *)&__m256_op1[2]) = 0x00010001;
++  *((int *)&__m256_op1[1]) = 0x00010001;
++  *((int *)&__m256_op1[0]) = 0x00010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffff0000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffff0000;
++  *((int *)&__m256_op0[4]) = 0xffff0000;
++  *((int *)&__m256_op0[3]) = 0xffff0000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffff0000;
++  *((int *)&__m256_op0[0]) = 0xffff0000;
++  *((int *)&__m256_op1[7]) = 0x007f8080;
++  *((int *)&__m256_op1[6]) = 0x007f007f;
++  *((int *)&__m256_op1[5]) = 0x007f8080;
++  *((int *)&__m256_op1[4]) = 0x007f007f;
++  *((int *)&__m256_op1[3]) = 0x007f8080;
++  *((int *)&__m256_op1[2]) = 0x007f007f;
++  *((int *)&__m256_op1[1]) = 0x007f8080;
++  *((int *)&__m256_op1[0]) = 0x007f007f;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000033;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000033;
++  *((int *)&__m256_op1[7]) = 0x00004200;
++  *((int *)&__m256_op1[6]) = 0x80000000;
++  *((int *)&__m256_op1[5]) = 0x5fff5fff;
++  *((int *)&__m256_op1[4]) = 0x607f0000;
++  *((int *)&__m256_op1[3]) = 0x00004200;
++  *((int *)&__m256_op1[2]) = 0x80000000;
++  *((int *)&__m256_op1[1]) = 0x5fff5fff;
++  *((int *)&__m256_op1[0]) = 0x607f0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x7fff8000;
++  *((int *)&__m256_op1[6]) = 0x7fff0000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00008000;
++  *((int *)&__m256_op1[3]) = 0x7fff8000;
++  *((int *)&__m256_op1[2]) = 0x7fff0000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00008000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x0000ffff;
++  *((int *)&__m256_op0[4]) = 0x0000ffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x0000ffff;
++  *((int *)&__m256_op0[0]) = 0x0000ffff;
++  *((int *)&__m256_op1[7]) = 0x00100010;
++  *((int *)&__m256_op1[6]) = 0x00030000;
++  *((int *)&__m256_op1[5]) = 0x00100010;
++  *((int *)&__m256_op1[4]) = 0x00030000;
++  *((int *)&__m256_op1[3]) = 0x00100010;
++  *((int *)&__m256_op1[2]) = 0x00030000;
++  *((int *)&__m256_op1[1]) = 0x00100010;
++  *((int *)&__m256_op1[0]) = 0x00030000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xf90c0c0c00000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0ca40c0c0c0c0cc0;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0c0c0c0c0cb60cc0;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfbe0b80c960c96d0;
++  *((unsigned long *)&__m256d_op1[3]) = 0x1b9763952fc4c101;
++  *((unsigned long *)&__m256d_op1[2]) = 0xe37affb42fc05f69;
++  *((unsigned long *)&__m256d_op1[1]) = 0x18b988e64facb558;
++  *((unsigned long *)&__m256d_op1[0]) = 0xe5fb66c81da8e5bb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x001e001ea1bfa1bf;
++  *((unsigned long *)&__m256d_op0[2]) = 0x001e001e83e5422e;
++  *((unsigned long *)&__m256d_op0[1]) = 0x001e001ea1bfa1bf;
++  *((unsigned long *)&__m256d_op0[0]) = 0x011f011f0244420e;
++  *((unsigned long *)&__m256d_op1[3]) = 0xfffe00f7ffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffff629d7;
++  *((unsigned long *)&__m256d_op1[1]) = 0xfffe00f7ffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffff629d7;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfc003802fc000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfc003802fc000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7ffffffffffffffe;
++  *((unsigned long *)&__m256d_op1[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7ffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256d_op1[2]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256d_op1[0]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256d_op0[2]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256d_op0[0]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x41dffbffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffff00ff800000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x41dffbffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffff00ff800000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffe6ffe6e6800001;
++  *((unsigned long *)&__m256d_op1[2]) = 0x19660019ff806680;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffe6ffe6e6800001;
++  *((unsigned long *)&__m256d_op1[0]) = 0x19660019ff806680;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000010100000101;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000010100000101;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000010100000101;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_ceq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000001000000010;
++  *((unsigned long *)&__m256d_op1[3]) = 0x45d5555545d55555;
++  *((unsigned long *)&__m256d_op1[2]) = 0x74555555e8aaaaaa;
++  *((unsigned long *)&__m256d_op1[1]) = 0x45d5555545d55555;
++  *((unsigned long *)&__m256d_op1[0]) = 0x74555555e8aaaaaa;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256d_op1[1]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffffffff6;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0003030300000300;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0003030300000300;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0003030300000100;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0003030300000100;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256d_op1[2]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256d_op1[0]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffff0007a861;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff0007a861;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000007f00000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000007f00000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000007f00000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000007f00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c
+new file mode 100644
+index 000000000..a64dd7598
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cle_s.c
+@@ -0,0 +1,759 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0018796d;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00fffb04;
++  *((int *)&__m256_op0[6]) = 0x02fddf20;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00fffb04;
++  *((int *)&__m256_op0[2]) = 0x02fddf20;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x41dfffc0;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x41dfffdf;
++  *((int *)&__m256_op1[2]) = 0xffc00000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffee;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffee;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffee;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffee;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x01fe007a;
++  *((int *)&__m256_op0[6]) = 0x01c40110;
++  *((int *)&__m256_op0[5]) = 0x019d00a2;
++  *((int *)&__m256_op0[4]) = 0x0039fff9;
++  *((int *)&__m256_op0[3]) = 0x01fe007a;
++  *((int *)&__m256_op0[2]) = 0x01c40110;
++  *((int *)&__m256_op0[1]) = 0x019d00a2;
++  *((int *)&__m256_op0[0]) = 0x003a0000;
++  *((int *)&__m256_op1[7]) = 0x0000fffe;
++  *((int *)&__m256_op1[6]) = 0x00800022;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0x0000fffe;
++  *((int *)&__m256_op1[2]) = 0x00800022;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000001;
++  *((int *)&__m256_op0[5]) = 0x7fff7ffe;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000001;
++  *((int *)&__m256_op0[1]) = 0x7fff7ffe;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000002;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000002;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000002;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000002;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x04000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x04000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x04000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x04000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000040;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000040;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00010001;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00010001;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00010001;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256d_op0[2]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256d_op0[1]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256d_op0[0]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000007773;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000003373;
++  *((unsigned long *)&__m256d_op1[3]) = 0x1616161616161616;
++  *((unsigned long *)&__m256d_op1[2]) = 0x161616167fffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x7ffe16167f161616;
++  *((unsigned long *)&__m256d_op1[0]) = 0x161616167fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffcc8000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000007dfdff4b;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xbabababababababa;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0101010183f9999b;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[1]) = 0x01010101d58f43c9;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff00000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0x1010100f10100fd4;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff00000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0x1010100f10100fd4;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0100000001000100;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0100000001000100;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000002070145;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000002070145;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffff0007a861;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff0007a861;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x1b976395;
++  *((int *)&__m256_op0[6]) = 0x2fc4c101;
++  *((int *)&__m256_op0[5]) = 0xe37affb4;
++  *((int *)&__m256_op0[4]) = 0x2fc05f69;
++  *((int *)&__m256_op0[3]) = 0x18b988e6;
++  *((int *)&__m256_op0[2]) = 0x4facb558;
++  *((int *)&__m256_op0[1]) = 0xe5fb66c8;
++  *((int *)&__m256_op0[0]) = 0x1da8e5bb;
++  *((int *)&__m256_op1[7]) = 0x01a72334;
++  *((int *)&__m256_op1[6]) = 0xffff00ff;
++  *((int *)&__m256_op1[5]) = 0xff4f6838;
++  *((int *)&__m256_op1[4]) = 0xff937648;
++  *((int *)&__m256_op1[3]) = 0x00a2afb7;
++  *((int *)&__m256_op1[2]) = 0xfff00ecb;
++  *((int *)&__m256_op1[1]) = 0xffce110f;
++  *((int *)&__m256_op1[0]) = 0x004658c7;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0x00001000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0x00001000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0000ff00;
++  *((int *)&__m256_op0[6]) = 0x0000ffff;
++  *((int *)&__m256_op0[5]) = 0x000000ff;
++  *((int *)&__m256_op0[4]) = 0x000000ff;
++  *((int *)&__m256_op0[3]) = 0x0000ff00;
++  *((int *)&__m256_op0[2]) = 0x0000ffff;
++  *((int *)&__m256_op0[1]) = 0x000000ff;
++  *((int *)&__m256_op0[0]) = 0x000000ff;
++  *((int *)&__m256_op1[7]) = 0x0000ffee;
++  *((int *)&__m256_op1[6]) = 0x0000ff4c;
++  *((int *)&__m256_op1[5]) = 0x0000ff05;
++  *((int *)&__m256_op1[4]) = 0x0000ff3c;
++  *((int *)&__m256_op1[3]) = 0x0000fff9;
++  *((int *)&__m256_op1[2]) = 0x0000ff78;
++  *((int *)&__m256_op1[1]) = 0x0000ffa8;
++  *((int *)&__m256_op1[0]) = 0x0000ff31;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffff0000;
++  *((int *)&__m256_op1[4]) = 0xffff0000;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffff0000;
++  *((int *)&__m256_op1[0]) = 0xffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0000ff01;
++  *((int *)&__m256_op0[6]) = 0x00ff0000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0000ff01;
++  *((int *)&__m256_op0[3]) = 0x0000ff01;
++  *((int *)&__m256_op0[2]) = 0x00ff0000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000ff01;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00010000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00010000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x02000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x02000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x01010000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x01010000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffff0101;
++  *((int *)&__m256_op1[4]) = 0x00000001;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffff0101;
++  *((int *)&__m256_op1[0]) = 0x00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfffffffb;
++  *((int *)&__m256_op0[6]) = 0xfffffffb;
++  *((int *)&__m256_op0[5]) = 0xfffffffb;
++  *((int *)&__m256_op0[4]) = 0xfffffffb;
++  *((int *)&__m256_op0[3]) = 0xfffffffb;
++  *((int *)&__m256_op0[2]) = 0xfffffffb;
++  *((int *)&__m256_op0[1]) = 0xfffffffb;
++  *((int *)&__m256_op0[0]) = 0xfffffffb;
++  *((int *)&__m256_op1[7]) = 0x0000ffff;
++  *((int *)&__m256_op1[6]) = 0x0001000e;
++  *((int *)&__m256_op1[5]) = 0x0000ffff;
++  *((int *)&__m256_op1[4]) = 0x0000ffff;
++  *((int *)&__m256_op1[3]) = 0x0000ffff;
++  *((int *)&__m256_op1[2]) = 0x0000ffff;
++  *((int *)&__m256_op1[1]) = 0x0000ffff;
++  *((int *)&__m256_op1[0]) = 0x0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x8080808280808082;
++  *((unsigned long *)&__m256d_op0[2]) = 0x8080808280808082;
++  *((unsigned long *)&__m256d_op0[1]) = 0x8080808280808080;
++  *((unsigned long *)&__m256d_op0[0]) = 0x8080808280808082;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cule_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfffeffff10000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffeffff10000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cule_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0c6a240000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0f00204000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0c6a240000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0f00204000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cule_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c
+new file mode 100644
+index 000000000..733cc00ee
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_clt_s.c
+@@ -0,0 +1,675 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xfe02fe02;
++  *((int *)&__m256_op0[2]) = 0xfee5fe22;
++  *((int *)&__m256_op0[1]) = 0xff49fe42;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x0000ffff;
++  *((int *)&__m256_op1[6]) = 0x0000ff80;
++  *((int *)&__m256_op1[5]) = 0x00004686;
++  *((int *)&__m256_op1[4]) = 0x00007f79;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0x0000ffff;
++  *((int *)&__m256_op1[1]) = 0x0000f328;
++  *((int *)&__m256_op1[0]) = 0x0000dfff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x01000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x01000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffff80cb;
++  *((int *)&__m256_op1[6]) = 0xfffffdf8;
++  *((int *)&__m256_op1[5]) = 0x00000815;
++  *((int *)&__m256_op1[4]) = 0x00000104;
++  *((int *)&__m256_op1[3]) = 0xffffffa4;
++  *((int *)&__m256_op1[2]) = 0xfffffffd;
++  *((int *)&__m256_op1[1]) = 0x00000007;
++  *((int *)&__m256_op1[0]) = 0x00000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffff0000;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00003f3f;
++  *((int *)&__m256_op1[4]) = 0xc6c68787;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00003f3f;
++  *((int *)&__m256_op1[0]) = 0x87870000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000002;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0x0101ffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0x0101ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x01000100;
++  *((int *)&__m256_op0[6]) = 0x01000100;
++  *((int *)&__m256_op0[5]) = 0x01000100;
++  *((int *)&__m256_op0[4]) = 0x01000100;
++  *((int *)&__m256_op0[3]) = 0x01000100;
++  *((int *)&__m256_op0[2]) = 0x01000100;
++  *((int *)&__m256_op0[1]) = 0x01000100;
++  *((int *)&__m256_op0[0]) = 0x01000100;
++  *((int *)&__m256_op1[7]) = 0x7f800000;
++  *((int *)&__m256_op1[6]) = 0x7f800000;
++  *((int *)&__m256_op1[5]) = 0x62d2acee;
++  *((int *)&__m256_op1[4]) = 0x7fc00000;
++  *((int *)&__m256_op1[3]) = 0x7f800000;
++  *((int *)&__m256_op1[2]) = 0x7f800000;
++  *((int *)&__m256_op1[1]) = 0x62d2acee;
++  *((int *)&__m256_op1[0]) = 0x7fc00000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0000ff01;
++  *((int *)&__m256_op0[6]) = 0x00ff0000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0000ff01;
++  *((int *)&__m256_op0[3]) = 0x0000ff01;
++  *((int *)&__m256_op0[2]) = 0x00ff0000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000ff01;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000808;
++  *((int *)&__m256_op1[4]) = 0x00000808;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000808;
++  *((int *)&__m256_op1[0]) = 0x00000808;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffff8000;
++  *((int *)&__m256_op0[5]) = 0x7efefefe;
++  *((int *)&__m256_op0[4]) = 0x80ffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x7efefefe;
++  *((int *)&__m256_op0[0]) = 0x80ffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_clt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x0001ffaa;
++  *((int *)&__m256_op1[6]) = 0x0000040e;
++  *((int *)&__m256_op1[5]) = 0x00007168;
++  *((int *)&__m256_op1[4]) = 0x00007bb6;
++  *((int *)&__m256_op1[3]) = 0x0001ffe8;
++  *((int *)&__m256_op1[2]) = 0x0001fe9c;
++  *((int *)&__m256_op1[1]) = 0x00002282;
++  *((int *)&__m256_op1[0]) = 0x00001680;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x01010101;
++  *((int *)&__m256_op0[5]) = 0x55555501;
++  *((int *)&__m256_op0[4]) = 0xfefefeab;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x01010101;
++  *((int *)&__m256_op0[1]) = 0x55555501;
++  *((int *)&__m256_op0[0]) = 0xfefefeab;
++  *((int *)&__m256_op1[7]) = 0x00000105;
++  *((int *)&__m256_op1[6]) = 0xfffffefb;
++  *((int *)&__m256_op1[5]) = 0xffffff02;
++  *((int *)&__m256_op1[4]) = 0x000000fe;
++  *((int *)&__m256_op1[3]) = 0x00000105;
++  *((int *)&__m256_op1[2]) = 0xfffffefb;
++  *((int *)&__m256_op1[1]) = 0xffffff02;
++  *((int *)&__m256_op1[0]) = 0x000000fe;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000080;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000080;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x0000ffce;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0000fc7c;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x0000ffce;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000fc7c;
++  *((int *)&__m256_op1[7]) = 0xe7e7e7e7;
++  *((int *)&__m256_op1[6]) = 0xe7e7e7e7;
++  *((int *)&__m256_op1[5]) = 0xe7e7e7e7;
++  *((int *)&__m256_op1[4]) = 0xe7e7e7e7;
++  *((int *)&__m256_op1[3]) = 0xe7e7e7e7;
++  *((int *)&__m256_op1[2]) = 0xe7e7e7e7;
++  *((int *)&__m256_op1[1]) = 0xe7e7e7e7;
++  *((int *)&__m256_op1[0]) = 0xe7e7e7e7;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0x0007a861;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0x0007a861;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00100010;
++  *((int *)&__m256_op1[5]) = 0x00100010;
++  *((int *)&__m256_op1[4]) = 0x00100010;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00100010;
++  *((int *)&__m256_op1[1]) = 0x00100010;
++  *((int *)&__m256_op1[0]) = 0x00100010;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x01010101;
++  *((int *)&__m256_op0[6]) = 0x01010101;
++  *((int *)&__m256_op0[5]) = 0x01010101;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x01010101;
++  *((int *)&__m256_op0[2]) = 0x01010101;
++  *((int *)&__m256_op0[1]) = 0x01010101;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((int *)&__m256_op1[7]) = 0x000001e0;
++  *((int *)&__m256_op1[6]) = 0x01e001e0;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x000001e0;
++  *((int *)&__m256_op1[2]) = 0x01e001e0;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x000000000000007f;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256d_op1[3]) = 0xf800d0d8ffffeecf;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000383fffffdf0d;
++  *((unsigned long *)&__m256d_op1[1]) = 0xf800d0d8ffffeecf;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000383fffffdf0d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x001ffffe00200000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x001ffffe00200000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_clt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x2020000020200000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x2020000020200000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0008000001010000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0101000001010000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0002000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfff1000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfff1000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256d_op1[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c
+new file mode 100644
+index 000000000..190741070
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cne_s.c
+@@ -0,0 +1,872 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x7fff7fff;
++  *((int *)&__m256_op0[4]) = 0x7fff7fff;
++  *((int *)&__m256_op0[3]) = 0x7fff01fd;
++  *((int *)&__m256_op0[2]) = 0x7fff7fff;
++  *((int *)&__m256_op0[1]) = 0x00007fff;
++  *((int *)&__m256_op0[0]) = 0x7fff7fff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000001;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000001;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xdededede;
++  *((int *)&__m256_op0[6]) = 0xdededede;
++  *((int *)&__m256_op0[5]) = 0xdededede;
++  *((int *)&__m256_op0[4]) = 0xdededede;
++  *((int *)&__m256_op0[3]) = 0xdededede;
++  *((int *)&__m256_op0[2]) = 0xdededede;
++  *((int *)&__m256_op0[1]) = 0xdededede;
++  *((int *)&__m256_op0[0]) = 0xdededede;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000051;
++  *((int *)&__m256_op1[5]) = 0x00001010;
++  *((int *)&__m256_op1[4]) = 0x00000fff;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000051;
++  *((int *)&__m256_op1[1]) = 0x00001010;
++  *((int *)&__m256_op1[0]) = 0x00000fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000051;
++  *((int *)&__m256_op0[5]) = 0x00001010;
++  *((int *)&__m256_op0[4]) = 0x00000fff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000051;
++  *((int *)&__m256_op0[1]) = 0x00001010;
++  *((int *)&__m256_op0[0]) = 0x00000fff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0000ffff;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x0000ffff;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x0000ffff;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x0000ffff;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x000007c8;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x000007c8;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x80000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0000001f;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000001f;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x0000001f;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x0000001f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xfff8ff40;
++  *((int *)&__m256_op0[5]) = 0x0000ff01;
++  *((int *)&__m256_op0[4]) = 0x00090040;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xfff8ff40;
++  *((int *)&__m256_op0[1]) = 0x0000ff01;
++  *((int *)&__m256_op0[0]) = 0x00090040;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0xff1cff1c;
++  *((int *)&__m256_op1[6]) = 0xff1cff1c;
++  *((int *)&__m256_op1[5]) = 0xff1cff1c;
++  *((int *)&__m256_op1[4]) = 0xff1cff1c;
++  *((int *)&__m256_op1[3]) = 0xff1cff1c;
++  *((int *)&__m256_op1[2]) = 0xff1cff1c;
++  *((int *)&__m256_op1[1]) = 0xff1cff1c;
++  *((int *)&__m256_op1[0]) = 0xff1cff1c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00fe01f0;
++  *((int *)&__m256_op0[6]) = 0x00010000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00c40086;
++  *((int *)&__m256_op0[3]) = 0x00fe01f0;
++  *((int *)&__m256_op0[2]) = 0x00010000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00c40086;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x0000ffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0000ffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x0000ffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000ffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000001;
++  *((int *)&__m256_op0[6]) = 0x00000001;
++  *((int *)&__m256_op0[5]) = 0x0fff0180;
++  *((int *)&__m256_op0[4]) = 0x0fff0181;
++  *((int *)&__m256_op0[3]) = 0x00000001;
++  *((int *)&__m256_op0[2]) = 0x00000001;
++  *((int *)&__m256_op0[1]) = 0x0fff0180;
++  *((int *)&__m256_op0[0]) = 0x0fff0181;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0003ffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xfffffe20;
++  *((int *)&__m256_op0[5]) = 0x0000001d;
++  *((int *)&__m256_op0[4]) = 0xfffffe1f;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x5fa00000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x5fa00000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000004;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00007f95;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000004;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00007f95;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x7f010000;
++  *((int *)&__m256_op0[5]) = 0x00010000;
++  *((int *)&__m256_op0[4]) = 0x00007f7f;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x7f010000;
++  *((int *)&__m256_op0[1]) = 0x00010000;
++  *((int *)&__m256_op0[0]) = 0x00007f7f;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x003f0200;
++  *((int *)&__m256_op0[6]) = 0x01400200;
++  *((int *)&__m256_op0[5]) = 0x003f00ff;
++  *((int *)&__m256_op0[4]) = 0x003f00c4;
++  *((int *)&__m256_op0[3]) = 0x003f0200;
++  *((int *)&__m256_op0[2]) = 0x01400200;
++  *((int *)&__m256_op0[1]) = 0x003f00ff;
++  *((int *)&__m256_op0[0]) = 0x003f00c4;
++  *((int *)&__m256_op1[7]) = 0x00000101;
++  *((int *)&__m256_op1[6]) = 0x01010101;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000101;
++  *((int *)&__m256_op1[2]) = 0x01010101;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x01fe000000ff00ff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x01fe000001fe0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0101010101010102;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0101010201010204;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0101010101010102;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0101010101010102;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000e00ff00ff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256d_op1[1]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0101010127272525;
++  *((unsigned long *)&__m256d_op1[2]) = 0x23a2a121179e951d;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0101010127272525;
++  *((unsigned long *)&__m256d_op1[0]) = 0x23a2a121179e951d;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256d_op1[3]) = 0xdff8000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xdff8000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xdff8000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xdff8000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000040002;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x000000000000007f;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xc600000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xc600000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0002000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0002000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0002000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xff56ff55ff01ff01;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xff56ff55ff01ff01;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000800000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000800000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0004000400040004;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0004000500040005;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c
+new file mode 100644
+index 000000000..8dd58f228
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cor_s.c
+@@ -0,0 +1,340 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x01fe007a;
++  *((int *)&__m256_op1[6]) = 0x01c40110;
++  *((int *)&__m256_op1[5]) = 0x019d00a2;
++  *((int *)&__m256_op1[4]) = 0x0039fff9;
++  *((int *)&__m256_op1[3]) = 0x01fe007a;
++  *((int *)&__m256_op1[2]) = 0x01c40110;
++  *((int *)&__m256_op1[1]) = 0x019d00a2;
++  *((int *)&__m256_op1[0]) = 0x003a0000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xfff10000;
++  *((int *)&__m256_op0[4]) = 0xfff10000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xfff10000;
++  *((int *)&__m256_op0[0]) = 0xfff10000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0xfff10000;
++  *((int *)&__m256_op1[4]) = 0xfff10000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffff00ffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffff00ffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffefefeff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffff295329;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffefefeff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffff295329;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256d_op1[2]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256d_op1[0]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000020006;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000020006;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000020006;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x7);
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffff0100000001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffff0100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2);
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000050007;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000039;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c
+new file mode 100644
+index 000000000..3230c101d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_cun_s.c
+@@ -0,0 +1,361 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x01ffffff;
++  *((int *)&__m256_op1[4]) = 0xfe000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x01ffffff;
++  *((int *)&__m256_op1[0]) = 0xfe000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0000504f;
++  *((int *)&__m256_op0[6]) = 0xffff3271;
++  *((int *)&__m256_op0[5]) = 0xffff47b4;
++  *((int *)&__m256_op0[4]) = 0xffff5879;
++  *((int *)&__m256_op0[3]) = 0x0000504f;
++  *((int *)&__m256_op0[2]) = 0xffff3271;
++  *((int *)&__m256_op0[1]) = 0xffff47b4;
++  *((int *)&__m256_op0[0]) = 0xffff5879;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xde00fe00;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x0000fe01;
++  *((int *)&__m256_op0[4]) = 0x0000fe01;
++  *((int *)&__m256_op0[3]) = 0xde00fe00;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x0000fe01;
++  *((int *)&__m256_op0[0]) = 0x0000fe01;
++  *((int *)&__m256_op1[7]) = 0x0000ffff;
++  *((int *)&__m256_op1[6]) = 0x0000ffff;
++  *((int *)&__m256_op1[5]) = 0x00ff00fe;
++  *((int *)&__m256_op1[4]) = 0x00ff00fe;
++  *((int *)&__m256_op1[3]) = 0x0000ffff;
++  *((int *)&__m256_op1[2]) = 0x0000ffff;
++  *((int *)&__m256_op1[1]) = 0x00ff00fe;
++  *((int *)&__m256_op1[0]) = 0x00ff00fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xf3f3f3f3;
++  *((int *)&__m256_op0[6]) = 0xf3f3f3f3;
++  *((int *)&__m256_op0[5]) = 0xf3f3f3f3;
++  *((int *)&__m256_op0[4]) = 0xf3f3f3f3;
++  *((int *)&__m256_op0[3]) = 0xf3f3f3f3;
++  *((int *)&__m256_op0[2]) = 0xf3f3f3f3;
++  *((int *)&__m256_op0[1]) = 0xf3f3f3f3;
++  *((int *)&__m256_op0[0]) = 0xf3f3f3f3;
++  *((int *)&__m256_op1[7]) = 0xf3f3f3f3;
++  *((int *)&__m256_op1[6]) = 0xf3f3f3f3;
++  *((int *)&__m256_op1[5]) = 0xf3f3f3f3;
++  *((int *)&__m256_op1[4]) = 0xf3f3f3f3;
++  *((int *)&__m256_op1[3]) = 0xf3f3f3f3;
++  *((int *)&__m256_op1[2]) = 0xf3f3f3f3;
++  *((int *)&__m256_op1[1]) = 0xf3f3f3f3;
++  *((int *)&__m256_op1[0]) = 0xf3f3f3f3;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0x0007a861;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0x0007a861;
++  *((int *)&__m256_op1[7]) = 0x80008000;
++  *((int *)&__m256_op1[6]) = 0x80008000;
++  *((int *)&__m256_op1[5]) = 0x80008000;
++  *((int *)&__m256_op1[4]) = 0xfff98000;
++  *((int *)&__m256_op1[3]) = 0x80008000;
++  *((int *)&__m256_op1[2]) = 0x80008000;
++  *((int *)&__m256_op1[1]) = 0x80008000;
++  *((int *)&__m256_op1[0]) = 0xfff98000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000015d050192cb;
++  *((unsigned long *)&__m256d_op0[2]) = 0x028e509508b16ee9;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000033ff01020e23;
++  *((unsigned long *)&__m256d_op0[0]) = 0x151196b58fd1114d;
++  *((unsigned long *)&__m256d_op1[3]) = 0x372e9d75e8aab100;
++  *((unsigned long *)&__m256d_op1[2]) = 0xc5c085372cfabfba;
++  *((unsigned long *)&__m256d_op1[1]) = 0x31730b5beb7c99f5;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0658f2dc0eb21e3c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x1e1800001e180000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x1e1800001e180000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010203;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00fe01f000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00fe01f000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x000b000b000b000b;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000b000b000b000b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_cun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvfcmp-saf-seq-sl.patch b/LoongArch-Add-tests-for-ASX-vector-xvfcmp-saf-seq-sl.patch
new file mode 100644
index 0000000000000000000000000000000000000000..afc40a21757bd57974d7974ff292ba17785a6841
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvfcmp-saf-seq-sl.patch
@@ -0,0 +1,4824 @@
+From beaeb3f05a71c637d47a0e5f86f5781345e10f97 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 16:48:35 +0800
+Subject: [PATCH 117/124] LoongArch: Add tests for ASX vector
+ xvfcmp{saf/seq/sle/slt/sne/sor/sun} instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvfcmp_saf_s.c |  424 ++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_seq_s.c |  924 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_sle_s.c |  627 +++++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_slt_s.c | 1212 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_sne_s.c |  756 ++++++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_sor_s.c |  438 ++++++
+ .../loongarch/vector/lasx/lasx-xvfcmp_sun_s.c |  363 +++++
+ 7 files changed, 4744 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c
+new file mode 100644
+index 000000000..23cbc4bf0
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_saf_s.c
+@@ -0,0 +1,424 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x0000ffff;
++  *((int *)&__m256_op0[4]) = 0x0000ffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x0000ffff;
++  *((int *)&__m256_op0[0]) = 0x0000ffff;
++  *((int *)&__m256_op1[7]) = 0x0eb7aaaa;
++  *((int *)&__m256_op1[6]) = 0xa6e6ac80;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x0eb7aaaa;
++  *((int *)&__m256_op1[2]) = 0xa6e6ac80;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x3fff3fff;
++  *((int *)&__m256_op0[6]) = 0x3fff3fff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x3fff3fff;
++  *((int *)&__m256_op0[3]) = 0x3fff3fff;
++  *((int *)&__m256_op0[2]) = 0x3fff3fff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x3fff3fff;
++  *((int *)&__m256_op1[7]) = 0x017e01fe;
++  *((int *)&__m256_op1[6]) = 0x01fe01fe;
++  *((int *)&__m256_op1[5]) = 0x05860606;
++  *((int *)&__m256_op1[4]) = 0x01fe0202;
++  *((int *)&__m256_op1[3]) = 0x017e01fe;
++  *((int *)&__m256_op1[2]) = 0x01fe0000;
++  *((int *)&__m256_op1[1]) = 0x05860606;
++  *((int *)&__m256_op1[0]) = 0x01fe0004;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0000003f;
++  *((int *)&__m256_op0[6]) = 0x00390035;
++  *((int *)&__m256_op0[5]) = 0x8015003f;
++  *((int *)&__m256_op0[4]) = 0x0006001f;
++  *((int *)&__m256_op0[3]) = 0x0000003f;
++  *((int *)&__m256_op0[2]) = 0x00390035;
++  *((int *)&__m256_op0[1]) = 0x8015003f;
++  *((int *)&__m256_op0[0]) = 0x0006001f;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xefdfefdf;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0xefdfefdf;
++  *((int *)&__m256_op1[4]) = 0xefdfefdf;
++  *((int *)&__m256_op1[3]) = 0xefdfefdf;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0xefdfefdf;
++  *((int *)&__m256_op1[0]) = 0xefdfefdf;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00ff00ff;
++  *((int *)&__m256_op1[6]) = 0x00ff00ff;
++  *((int *)&__m256_op1[5]) = 0x00ff00ff;
++  *((int *)&__m256_op1[4]) = 0x00ff00ff;
++  *((int *)&__m256_op1[3]) = 0x00ff00ff;
++  *((int *)&__m256_op1[2]) = 0x00ff00ff;
++  *((int *)&__m256_op1[1]) = 0x00ff00ff;
++  *((int *)&__m256_op1[0]) = 0x00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000001;
++  *((int *)&__m256_op0[6]) = 0x7bfffff0;
++  *((int *)&__m256_op0[5]) = 0x00000001;
++  *((int *)&__m256_op0[4]) = 0x80007fe8;
++  *((int *)&__m256_op0[3]) = 0x00000001;
++  *((int *)&__m256_op0[2]) = 0x7bfffff0;
++  *((int *)&__m256_op0[1]) = 0x00000001;
++  *((int *)&__m256_op0[0]) = 0x80007fe8;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x02020102;
++  *((int *)&__m256_op0[6]) = 0x02020102;
++  *((int *)&__m256_op0[5]) = 0x02020102;
++  *((int *)&__m256_op0[4]) = 0x02020102;
++  *((int *)&__m256_op0[3]) = 0x02020102;
++  *((int *)&__m256_op0[2]) = 0x02020102;
++  *((int *)&__m256_op0[1]) = 0x02020102;
++  *((int *)&__m256_op0[0]) = 0x02020102;
++  *((int *)&__m256_op1[7]) = 0x3e800000;
++  *((int *)&__m256_op1[6]) = 0x3e800000;
++  *((int *)&__m256_op1[5]) = 0x3e800000;
++  *((int *)&__m256_op1[4]) = 0x3e800000;
++  *((int *)&__m256_op1[3]) = 0x3e800000;
++  *((int *)&__m256_op1[2]) = 0x3e800000;
++  *((int *)&__m256_op1[1]) = 0x3e800000;
++  *((int *)&__m256_op1[0]) = 0x3e800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00ff00ff;
++  *((int *)&__m256_op0[6]) = 0x00ff00ff;
++  *((int *)&__m256_op0[5]) = 0x00ff00ff;
++  *((int *)&__m256_op0[4]) = 0x00ff00ff;
++  *((int *)&__m256_op0[3]) = 0x00ff00ff;
++  *((int *)&__m256_op0[2]) = 0x00ff00ff;
++  *((int *)&__m256_op0[1]) = 0x00ff00ff;
++  *((int *)&__m256_op0[0]) = 0x00ff00ff;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x80000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0xff88ff88;
++  *((int *)&__m256_op0[3]) = 0x80000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xff88ff88;
++  *((int *)&__m256_op1[7]) = 0xfe01fe01;
++  *((int *)&__m256_op1[6]) = 0x0000fd02;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x3fc03fc0;
++  *((int *)&__m256_op1[3]) = 0xfe01fe01;
++  *((int *)&__m256_op1[2]) = 0x0000fd02;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x3fc03fc0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000001;
++  *((int *)&__m256_op0[6]) = 0x00000001;
++  *((int *)&__m256_op0[5]) = 0xffffb2f6;
++  *((int *)&__m256_op0[4]) = 0x00006f48;
++  *((int *)&__m256_op0[3]) = 0x00000001;
++  *((int *)&__m256_op0[2]) = 0x00000001;
++  *((int *)&__m256_op0[1]) = 0xffffb2f6;
++  *((int *)&__m256_op0[0]) = 0x00006f48;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x000000ff;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00100010;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00100010;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00100010;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00100010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000020;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0020000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0020000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffffefe00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m256d_op1[3]) = 0x000408080c111414;
++  *((unsigned long *)&__m256d_op1[2]) = 0x000408080c111414;
++  *((unsigned long *)&__m256d_op1[1]) = 0x000408080c111414;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000000ffdbbbcf;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffb8579f;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000ffdbbbcf;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffb8579f;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000200000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000200000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000004000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x3fffbfff80000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00004000007f8000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x3fffbfff80000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00004000007f8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_saf_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c
+new file mode 100644
+index 000000000..6641d2c58
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_seq_s.c
+@@ -0,0 +1,924 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x59800000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x59800000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x41d66000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x41d66000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xa41aa42e;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xa41aa42e;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x83f95466;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x01010101;
++  *((int *)&__m256_op0[0]) = 0x00005400;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xfefefeff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xff295329;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xfefefeff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xff295329;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x111ebb78;
++  *((int *)&__m256_op1[6]) = 0x4f9c4100;
++  *((int *)&__m256_op1[5]) = 0x1c386546;
++  *((int *)&__m256_op1[4]) = 0x809f3b50;
++  *((int *)&__m256_op1[3]) = 0x111ebb78;
++  *((int *)&__m256_op1[2]) = 0x4f9bf1ac;
++  *((int *)&__m256_op1[1]) = 0x21f6050d;
++  *((int *)&__m256_op1[0]) = 0x955d3f68;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffff0000;
++  *((int *)&__m256_op1[4]) = 0xffff0001;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffff0000;
++  *((int *)&__m256_op1[0]) = 0xffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000100;
++  *((int *)&__m256_op0[5]) = 0x00000002;
++  *((int *)&__m256_op0[4]) = 0xff910072;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000100;
++  *((int *)&__m256_op0[1]) = 0x00000002;
++  *((int *)&__m256_op0[0]) = 0xff910072;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0xffff97a2;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xffff97a2;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_seq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x55555555;
++  *((int *)&__m256_op0[6]) = 0x3f800000;
++  *((int *)&__m256_op0[5]) = 0x55555555;
++  *((int *)&__m256_op0[4]) = 0x80000000;
++  *((int *)&__m256_op0[3]) = 0x55555555;
++  *((int *)&__m256_op0[2]) = 0x3f800000;
++  *((int *)&__m256_op0[1]) = 0x55555555;
++  *((int *)&__m256_op0[0]) = 0x80000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x0001fffe;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x0001fffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00018002;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000002;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00018002;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000002;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00030000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00030000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xfff70156;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xfff70156;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xfff70156;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xfff70156;
++  *((int *)&__m256_op1[7]) = 0x7fefffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0x7fefffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0x7fefffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0x7fefffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0000ff70;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000ff70;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000100;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000100;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000002;
++  *((int *)&__m256_op1[4]) = 0x00000008;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000002;
++  *((int *)&__m256_op1[0]) = 0x00000008;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x4393a0a5;
++  *((int *)&__m256_op0[6]) = 0xbc606060;
++  *((int *)&__m256_op0[5]) = 0x43b32fee;
++  *((int *)&__m256_op0[4]) = 0xa9000000;
++  *((int *)&__m256_op0[3]) = 0x4393a0a5;
++  *((int *)&__m256_op0[2]) = 0xbc606060;
++  *((int *)&__m256_op0[1]) = 0x43b32fee;
++  *((int *)&__m256_op0[0]) = 0xa9000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000001;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000003;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000003;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000003;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000003;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfffeb664;
++  *((int *)&__m256_op0[6]) = 0x007ffd61;
++  *((int *)&__m256_op0[5]) = 0xfffe97a1;
++  *((int *)&__m256_op0[4]) = 0xdf5b41b0;
++  *((int *)&__m256_op0[3]) = 0xfffeb664;
++  *((int *)&__m256_op0[2]) = 0x007ffd61;
++  *((int *)&__m256_op0[1]) = 0xfffe97a1;
++  *((int *)&__m256_op0[0]) = 0xdf5b41b0;
++  *((int *)&__m256_op1[7]) = 0xfffeb683;
++  *((int *)&__m256_op1[6]) = 0x9ffffd80;
++  *((int *)&__m256_op1[5]) = 0xfffe97c0;
++  *((int *)&__m256_op1[4]) = 0x20010001;
++  *((int *)&__m256_op1[3]) = 0xfffeb683;
++  *((int *)&__m256_op1[2]) = 0x9ffffd80;
++  *((int *)&__m256_op1[1]) = 0xfffe97c0;
++  *((int *)&__m256_op1[0]) = 0x20010001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_sueq_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x053531f7c6334908;
++  *((unsigned long *)&__m256d_op0[2]) = 0x8e41dcbff87e7900;
++  *((unsigned long *)&__m256d_op0[1]) = 0x12eb8332e3e15093;
++  *((unsigned long *)&__m256d_op0[0]) = 0x9a7491f9e016ccd4;
++  *((unsigned long *)&__m256d_op1[3]) = 0x345947dcd192b5c4;
++  *((unsigned long *)&__m256d_op1[2]) = 0x182100c72280e687;
++  *((unsigned long *)&__m256d_op1[1]) = 0x4a1c80bb8e892e00;
++  *((unsigned long *)&__m256d_op1[0]) = 0x063ecfbd58abc4b7;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffff0002fffeffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffff0002fffeffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x000000010486048c;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000100000006;
++  *((unsigned long *)&__m256d_op1[1]) = 0x000000010486048c;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000006;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256d_op0[2]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256d_op0[1]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256d_op0[0]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00ff00ff00ef0120;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00ff00ff00ef0120;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xff00ffff00000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256d_op0[1]) = 0xff00ffff00000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x04e8296f08181818;
++  *((unsigned long *)&__m256d_op1[2]) = 0x032feea900000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x04e8296f08181818;
++  *((unsigned long *)&__m256d_op1[0]) = 0x032feea900000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffff1cff1c;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffff1cff1c;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffff1cff1c;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffff1cff1c;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x1400080008000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x1400080008000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x1400080008000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x1400080008000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_seq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xc1be9e9e9f000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x41d8585858400000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xc1be9e9e9f000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x41d8585858400000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffff040000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffff040000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256d_op1[2]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256d_op1[1]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256d_op1[0]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffe045fffffeff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffff7d;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sueq_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c
+new file mode 100644
+index 000000000..d25fc25da
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sle_s.c
+@@ -0,0 +1,627 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffff90;
++  *((int *)&__m256_op0[4]) = 0xffffff80;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffff90;
++  *((int *)&__m256_op0[0]) = 0xffffff80;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfefee0e3;
++  *((int *)&__m256_op0[6]) = 0xfefefe00;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0xfefee0e3;
++  *((int *)&__m256_op0[2]) = 0xfefefe00;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0000ffff;
++  *((int *)&__m256_op0[6]) = 0x0000ffff;
++  *((int *)&__m256_op0[5]) = 0x0000ffff;
++  *((int *)&__m256_op0[4]) = 0x0000ffff;
++  *((int *)&__m256_op0[3]) = 0x0000ffff;
++  *((int *)&__m256_op0[2]) = 0x0000ffff;
++  *((int *)&__m256_op0[1]) = 0x0000ffff;
++  *((int *)&__m256_op0[0]) = 0x0000ffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sle_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x8000000a;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x8000000a;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x01010101;
++  *((int *)&__m256_op0[6]) = 0x01010101;
++  *((int *)&__m256_op0[5]) = 0x01010101;
++  *((int *)&__m256_op0[4]) = 0x01010101;
++  *((int *)&__m256_op0[3]) = 0x01010101;
++  *((int *)&__m256_op0[2]) = 0x01010101;
++  *((int *)&__m256_op0[1]) = 0x01010101;
++  *((int *)&__m256_op0[0]) = 0x01010101;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffd8ffc7;
++  *((int *)&__m256_op0[4]) = 0xffdaff8a;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffd8ffc7;
++  *((int *)&__m256_op0[0]) = 0xffdaff8a;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0xffffb3b4;
++  *((int *)&__m256_op1[5]) = 0xfffffff5;
++  *((int *)&__m256_op1[4]) = 0xffff4738;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0xffffb3b4;
++  *((int *)&__m256_op1[1]) = 0xfffffff5;
++  *((int *)&__m256_op1[0]) = 0xffff4738;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xf7f7f7f7;
++  *((int *)&__m256_op1[6]) = 0xf7f7f7f8;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0xf7f7f7f7;
++  *((int *)&__m256_op1[2]) = 0xf7f7f7f8;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x5fa00000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x5fa00000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256d_op0[2]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256d_op0[1]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256d_op0[0]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256d_op1[3]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256d_op1[2]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256d_op1[1]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256d_op1[0]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000105fffffefb;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffff02000000fe;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000105fffffefb;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffff02000000fe;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000020afefb1;
++  *((unsigned long *)&__m256d_op0[2]) = 0x7f350104f7ebffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000003fffc1;
++  *((unsigned long *)&__m256d_op0[0]) = 0x005c0003fff9ffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0209fefb08140000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0003fffc00060000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00000003f8000004;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000003f8000004;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256d_op1[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000010100000101;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000010100000101;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256d_op1[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256d_op1[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x001f001fffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffe0ffe000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x001f001fffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffe0ffe000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sle_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xf5f5f5f5f5f5f5f5;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xf5f5f5f5f5f5f5f5;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x8d8d72728d8d7272;
++  *((unsigned long *)&__m256d_op0[2]) = 0x8d8d72728d8d8d8d;
++  *((unsigned long *)&__m256d_op0[1]) = 0x8d8d72728d8d7272;
++  *((unsigned long *)&__m256d_op0[0]) = 0x8d8d72728d8d8d8d;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffff010100000001;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffff010100000001;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sule_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c
+new file mode 100644
+index 000000000..8210f749b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_slt_s.c
+@@ -0,0 +1,1212 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000101;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xc08f7800;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xfffffefd;
++  *((int *)&__m256_op0[3]) = 0xc08f7800;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000101;
++  *((int *)&__m256_op1[4]) = 0x00000102;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000101;
++  *((int *)&__m256_op1[0]) = 0x00000102;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x001f1f3e;
++  *((int *)&__m256_op1[6]) = 0x3e1f1f00;
++  *((int *)&__m256_op1[5]) = 0x00030609;
++  *((int *)&__m256_op1[4]) = 0x09060300;
++  *((int *)&__m256_op1[3]) = 0x001f1f3e;
++  *((int *)&__m256_op1[2]) = 0x3e1f1f00;
++  *((int *)&__m256_op1[1]) = 0x00030609;
++  *((int *)&__m256_op1[0]) = 0x09060300;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000001;
++  *((int *)&__m256_op0[6]) = 0x00000001;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000001;
++  *((int *)&__m256_op0[2]) = 0x00000001;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x7fffffff;
++  *((int *)&__m256_op0[6]) = 0x7fffffff;
++  *((int *)&__m256_op0[5]) = 0x7fffffff;
++  *((int *)&__m256_op0[4]) = 0x7fffffff;
++  *((int *)&__m256_op0[3]) = 0x7fffffff;
++  *((int *)&__m256_op0[2]) = 0x7fffffff;
++  *((int *)&__m256_op0[1]) = 0x7fffffff;
++  *((int *)&__m256_op0[0]) = 0x7fffffff;
++  *((int *)&__m256_op1[7]) = 0x20fc0000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x20fc0000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffff0400;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0xffff0400;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x08050501;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x08050501;
++  *((int *)&__m256_op1[7]) = 0x90909090;
++  *((int *)&__m256_op1[6]) = 0x90909090;
++  *((int *)&__m256_op1[5]) = 0x90909090;
++  *((int *)&__m256_op1[4]) = 0x90909090;
++  *((int *)&__m256_op1[3]) = 0x90909090;
++  *((int *)&__m256_op1[2]) = 0x90909090;
++  *((int *)&__m256_op1[1]) = 0x90909090;
++  *((int *)&__m256_op1[0]) = 0x90909090;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00001ff8;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xd8d8c000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00001ff8;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xd8d8c000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x02020102;
++  *((int *)&__m256_op1[6]) = 0x02020102;
++  *((int *)&__m256_op1[5]) = 0x02020102;
++  *((int *)&__m256_op1[4]) = 0x02020102;
++  *((int *)&__m256_op1[3]) = 0x02020102;
++  *((int *)&__m256_op1[2]) = 0x02020102;
++  *((int *)&__m256_op1[1]) = 0x02020102;
++  *((int *)&__m256_op1[0]) = 0x02020102;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00ff00ff;
++  *((int *)&__m256_op0[6]) = 0x00ff00ff;
++  *((int *)&__m256_op0[5]) = 0x00ff00ff;
++  *((int *)&__m256_op0[4]) = 0x00ff00ff;
++  *((int *)&__m256_op0[3]) = 0x00ff00ff;
++  *((int *)&__m256_op0[2]) = 0x00ff00ff;
++  *((int *)&__m256_op0[1]) = 0x00ff00ff;
++  *((int *)&__m256_op0[0]) = 0x00ff00ff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000001;
++  *((int *)&__m256_op1[6]) = 0xffe00000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000001;
++  *((int *)&__m256_op1[2]) = 0xffe00000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x60000108;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x01060005;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x7fef0001;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0xfffffff8;
++  *((int *)&__m256_op1[4]) = 0xfffffff8;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0xfffffff8;
++  *((int *)&__m256_op1[0]) = 0xfc000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x327f0101;
++  *((int *)&__m256_op0[6]) = 0x01010102;
++  *((int *)&__m256_op0[5]) = 0x63000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x327f0101;
++  *((int *)&__m256_op0[2]) = 0x01010102;
++  *((int *)&__m256_op0[1]) = 0x63000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xce7fffff;
++  *((int *)&__m256_op1[6]) = 0xfffffffe;
++  *((int *)&__m256_op1[5]) = 0x63000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0xce7fffff;
++  *((int *)&__m256_op1[2]) = 0xfffffffe;
++  *((int *)&__m256_op1[1]) = 0x63000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x59800000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x59800000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x0eb7aaaa;
++  *((int *)&__m256_op1[6]) = 0xa6e6ac80;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x0eb7aaaa;
++  *((int *)&__m256_op1[2]) = 0xa6e6ac80;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000007;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xdbc80000;
++  *((int *)&__m256_op1[6]) = 0x00003fff;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0xdbc80000;
++  *((int *)&__m256_op1[2]) = 0x00003fff;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000002;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000007f;
++  *((int *)&__m256_op1[7]) = 0xfffffff3;
++  *((int *)&__m256_op1[6]) = 0x0000000b;
++  *((int *)&__m256_op1[5]) = 0xfffffff3;
++  *((int *)&__m256_op1[4]) = 0xfffffff3;
++  *((int *)&__m256_op1[3]) = 0xfffffff3;
++  *((int *)&__m256_op1[2]) = 0x0000000b;
++  *((int *)&__m256_op1[1]) = 0xfffffff3;
++  *((int *)&__m256_op1[0]) = 0xfffffff3;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x223d76f0;
++  *((int *)&__m256_op0[6]) = 0x9f3881ff;
++  *((int *)&__m256_op0[5]) = 0x3870ca8d;
++  *((int *)&__m256_op0[4]) = 0x013e76a0;
++  *((int *)&__m256_op0[3]) = 0x223d76f0;
++  *((int *)&__m256_op0[2]) = 0x9f37e357;
++  *((int *)&__m256_op0[1]) = 0x43ec0a1b;
++  *((int *)&__m256_op0[0]) = 0x2aba7ed0;
++  *((int *)&__m256_op1[7]) = 0x111ebb78;
++  *((int *)&__m256_op1[6]) = 0x4f9c4100;
++  *((int *)&__m256_op1[5]) = 0x1c386546;
++  *((int *)&__m256_op1[4]) = 0x809f3b50;
++  *((int *)&__m256_op1[3]) = 0x111ebb78;
++  *((int *)&__m256_op1[2]) = 0x4f9bf1ac;
++  *((int *)&__m256_op1[1]) = 0x21f6050d;
++  *((int *)&__m256_op1[0]) = 0x955d3f68;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x01010101;
++  *((int *)&__m256_op0[6]) = 0x27272525;
++  *((int *)&__m256_op0[5]) = 0x23a2a121;
++  *((int *)&__m256_op0[4]) = 0x179e951d;
++  *((int *)&__m256_op0[3]) = 0x01010101;
++  *((int *)&__m256_op0[2]) = 0x27272525;
++  *((int *)&__m256_op0[1]) = 0x23a2a121;
++  *((int *)&__m256_op0[0]) = 0x179e951d;
++  *((int *)&__m256_op1[7]) = 0x00001251;
++  *((int *)&__m256_op1[6]) = 0x00005111;
++  *((int *)&__m256_op1[5]) = 0x00000c4f;
++  *((int *)&__m256_op1[4]) = 0x00004b0f;
++  *((int *)&__m256_op1[3]) = 0x00001251;
++  *((int *)&__m256_op1[2]) = 0x00005111;
++  *((int *)&__m256_op1[1]) = 0x00000c4f;
++  *((int *)&__m256_op1[0]) = 0x00004b0f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x80000000;
++  *((int *)&__m256_op1[6]) = 0xff800000;
++  *((int *)&__m256_op1[5]) = 0x80000000;
++  *((int *)&__m256_op1[4]) = 0x80000000;
++  *((int *)&__m256_op1[3]) = 0x80000000;
++  *((int *)&__m256_op1[2]) = 0xff800000;
++  *((int *)&__m256_op1[1]) = 0x80000000;
++  *((int *)&__m256_op1[0]) = 0x80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000001;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000001;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((int *)&__m256_op1[7]) = 0x7ff00000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x7ff00000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x7ff00000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x7ff00000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x000000ff;
++  *((int *)&__m256_op1[6]) = 0x000000ff;
++  *((int *)&__m256_op1[5]) = 0x000000ff;
++  *((int *)&__m256_op1[4]) = 0x000000ff;
++  *((int *)&__m256_op1[3]) = 0x000000ff;
++  *((int *)&__m256_op1[2]) = 0x000000ff;
++  *((int *)&__m256_op1[1]) = 0x000000ff;
++  *((int *)&__m256_op1[0]) = 0x000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xfe01fe01;
++  *((int *)&__m256_op0[6]) = 0x7e81fd02;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x3fc001fe;
++  *((int *)&__m256_op0[3]) = 0xfe01fe01;
++  *((int *)&__m256_op0[2]) = 0x7e81fd02;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x3fc001fe;
++  *((int *)&__m256_op1[7]) = 0xfe01fe01;
++  *((int *)&__m256_op1[6]) = 0x7e81fd02;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x3fc001fe;
++  *((int *)&__m256_op1[3]) = 0xfe01fe01;
++  *((int *)&__m256_op1[2]) = 0x7e81fd02;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x3fc001fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x80000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x80000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x80000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x80000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0ff80100ffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0ff80100ffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000017000000080;
++  *((unsigned long *)&__m256d_op1[2]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000017000000080;
++  *((unsigned long *)&__m256d_op1[0]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x01480000052801a2;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffdcff64;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010203;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffff000100000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000fe0100000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000fe0100000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000001900000019;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000001900000019;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000001900000019;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00ff00ffff00ff00;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000fff00004542;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00ff00ffff00ff00;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000fff00004542;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00aa00ab00ff00ff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00aa00ab00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffe37fe3001d001d;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffff8000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffe37fe3001d001d;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffff8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffa30000165a;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000104000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffa30000165a;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000104000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffff0008;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffff0008;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x000000430207f944;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfe01fe017e81fd02;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000003fc001fe;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfe01fe017e81fd02;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000003fc001fe;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_slt_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffff000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0045b8ae81bce1d8;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0045b8ae81bce1d8;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x001175f10e4330e8;
++  *((unsigned long *)&__m256d_op0[2]) = 0xff8f0842ff29211e;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffff8d9ffa7103d;
++  *((unsigned long *)&__m256d_op1[3]) = 0x001175f10e4330e8;
++  *((unsigned long *)&__m256d_op1[2]) = 0xff8f0842ff29211e;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffff8d9ffa7103d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000080000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000b004a00440040;
++  *((unsigned long *)&__m256d_op0[2]) = 0x8020004a0011002a;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000b004a00440040;
++  *((unsigned long *)&__m256d_op0[0]) = 0x8020004a0011002a;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0fff0fff00000020;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0fff0fff00000020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256d_op0[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256d_op0[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256d_op0[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0002000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfffefffefffeffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffefffefffeffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000860601934;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000800200028;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000860601934;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000800200028;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256d_op1[2]) = 0x4079808280057efe;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x007ffcfcfd020202;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000400000004;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sult_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c
+new file mode 100644
+index 000000000..9d015a5c8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sne_s.c
+@@ -0,0 +1,756 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x80808080;
++  *((int *)&__m256_op0[6]) = 0x80808080;
++  *((int *)&__m256_op0[5]) = 0x80808080;
++  *((int *)&__m256_op0[4]) = 0x80808080;
++  *((int *)&__m256_op0[3]) = 0x80808080;
++  *((int *)&__m256_op0[2]) = 0x80808080;
++  *((int *)&__m256_op0[1]) = 0x80808080;
++  *((int *)&__m256_op0[0]) = 0x80808080;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xefefefef;
++  *((int *)&__m256_op0[6]) = 0xefefefef;
++  *((int *)&__m256_op0[5]) = 0xefefefef;
++  *((int *)&__m256_op0[4]) = 0xefefefef;
++  *((int *)&__m256_op0[3]) = 0xefefefef;
++  *((int *)&__m256_op0[2]) = 0xefefef6e;
++  *((int *)&__m256_op0[1]) = 0xeeeeeeee;
++  *((int *)&__m256_op0[0]) = 0xeeeeeeee;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x7f800000;
++  *((int *)&__m256_op0[6]) = 0x7f800000;
++  *((int *)&__m256_op0[5]) = 0x7f800000;
++  *((int *)&__m256_op0[4]) = 0x7f800000;
++  *((int *)&__m256_op0[3]) = 0x7f800000;
++  *((int *)&__m256_op0[2]) = 0x7f800000;
++  *((int *)&__m256_op0[1]) = 0x7f800000;
++  *((int *)&__m256_op0[0]) = 0x7f800000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x0000ffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x0000ffff;
++  *((int *)&__m256_op1[7]) = 0x00ff00ff;
++  *((int *)&__m256_op1[6]) = 0x00ff00ff;
++  *((int *)&__m256_op1[5]) = 0x00ff00ff;
++  *((int *)&__m256_op1[4]) = 0x00ff00ff;
++  *((int *)&__m256_op1[3]) = 0x00ff00ff;
++  *((int *)&__m256_op1[2]) = 0x00ff00ff;
++  *((int *)&__m256_op1[1]) = 0x00ff00ff;
++  *((int *)&__m256_op1[0]) = 0x00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x80000000;
++  *((int *)&__m256_op1[4]) = 0x80000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x80000000;
++  *((int *)&__m256_op1[0]) = 0x80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x40404040;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x40404040;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0xfefefefe;
++  *((int *)&__m256_op1[4]) = 0x3f800000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0xfefefefe;
++  *((int *)&__m256_op1[0]) = 0x3f800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffff0101;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffff0101;
++  *((int *)&__m256_op0[0]) = 0x00000001;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_sne_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x01010101;
++  *((int *)&__m256_op0[5]) = 0x55555501;
++  *((int *)&__m256_op0[4]) = 0xfefefeab;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x01010101;
++  *((int *)&__m256_op0[1]) = 0x55555501;
++  *((int *)&__m256_op0[0]) = 0xfefefeab;
++  *((int *)&__m256_op1[7]) = 0x0010bfc8;
++  *((int *)&__m256_op1[6]) = 0x0010bf52;
++  *((int *)&__m256_op1[5]) = 0xfff1bfca;
++  *((int *)&__m256_op1[4]) = 0x0011bfcb;
++  *((int *)&__m256_op1[3]) = 0x0010bfc8;
++  *((int *)&__m256_op1[2]) = 0x0010bf52;
++  *((int *)&__m256_op1[1]) = 0xfff1bfca;
++  *((int *)&__m256_op1[0]) = 0x0011bfcb;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x80008000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x80008000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x80008000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x80008000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00060000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00060000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000166;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000166;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0000004a;
++  *((int *)&__m256_op0[6]) = 0x557baac4;
++  *((int *)&__m256_op0[5]) = 0x556caad9;
++  *((int *)&__m256_op0[4]) = 0xaabbaa88;
++  *((int *)&__m256_op0[3]) = 0x0000004a;
++  *((int *)&__m256_op0[2]) = 0x557baac4;
++  *((int *)&__m256_op0[1]) = 0x556caad9;
++  *((int *)&__m256_op0[0]) = 0xaabbaa88;
++  *((int *)&__m256_op1[7]) = 0x09090909;
++  *((int *)&__m256_op1[6]) = 0x09090909;
++  *((int *)&__m256_op1[5]) = 0x09090909;
++  *((int *)&__m256_op1[4]) = 0x09090909;
++  *((int *)&__m256_op1[3]) = 0x09090909;
++  *((int *)&__m256_op1[2]) = 0x09090909;
++  *((int *)&__m256_op1[1]) = 0x09090909;
++  *((int *)&__m256_op1[0]) = 0x09090909;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x80000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x80000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x80000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x80000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x80000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x80000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x80000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x80000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000020;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000020;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sune_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xbabababababababa;
++  *((unsigned long *)&__m256d_op0[2]) = 0xbabababababababa;
++  *((unsigned long *)&__m256d_op0[1]) = 0xbabababababababa;
++  *((unsigned long *)&__m256d_op0[0]) = 0xbabababababababa;
++  *((unsigned long *)&__m256d_op1[3]) = 0x88888a6d0962002e;
++  *((unsigned long *)&__m256d_op1[2]) = 0xdb8a3109fe0f0020;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000007fff01fffb;
++  *((unsigned long *)&__m256d_op1[0]) = 0xdb8e20990cce025a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfffffffffffffe00;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfffffffffffffe00;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfffffffffffffe00;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfffffffffffffe00;
++  *((unsigned long *)&__m256d_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x000000ffff88ff88;
++  *((unsigned long *)&__m256d_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x000000ffff88ff88;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sne_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256d_op0[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffffebeb8;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffffebeb8;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xfafafafafafafafa;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000fefefe;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0101010101010203;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffee0000ff4c;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000ff050000ff3c;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000fff90000ff78;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000ffa80000ff31;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0020002000200020;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0020000000200000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0100000001000100;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0100000001000100;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0c6a240000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0f00204000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0c6a240000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0f00204000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000000a0008;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000000a0008;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x000000010000685e;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256d_op0[1]) = 0x000000010000685e;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0087ff87f807ff87;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0087ff87f807ff87;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256d_op1[1]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sune_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c
+new file mode 100644
+index 000000000..a61681073
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sor_s.c
+@@ -0,0 +1,438 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000001;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000001;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000001;
++  *((int *)&__m256_op0[1]) = 0x80000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x000000ff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x000000ff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000064;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000781;
++  *((int *)&__m256_op0[0]) = 0x00000064;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x0c6a2400;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x0f002040;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x0c6a2400;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x0f002040;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x0000000c;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x0000000c;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0feff00000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0feff00000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0045b8ae81bce1d8;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0045b8ae81bce1d8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[2]) = 0x3ff1808001020101;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256d_op1[0]) = 0x3ff1808001020101;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000005;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffdc;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffdc;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256d_op0[2]) = 0xfefefefeffe0e0e0;
++  *((unsigned long *)&__m256d_op0[1]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256d_op0[0]) = 0xfefefefeffe0e0e0;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000040004000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000040004000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_op1[2]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_op1[1]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256d_op1[0]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sor_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c
+new file mode 100644
+index 000000000..41f274920
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfcmp_sun_s.c
+@@ -0,0 +1,363 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x1e180000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x1e180000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x1e180000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x1e180000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00802000;
++  *((int *)&__m256_op1[6]) = 0x00802000;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0x00802000;
++  *((int *)&__m256_op1[2]) = 0x00802000;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000055;
++  *((int *)&__m256_op0[6]) = 0x36aaaaac;
++  *((int *)&__m256_op0[5]) = 0x55555555;
++  *((int *)&__m256_op0[4]) = 0xaaaaaaac;
++  *((int *)&__m256_op0[3]) = 0x00000055;
++  *((int *)&__m256_op0[2]) = 0x36aaaaac;
++  *((int *)&__m256_op0[1]) = 0x55555555;
++  *((int *)&__m256_op0[0]) = 0xaaaaaaac;
++  *((int *)&__m256_op1[7]) = 0x00060000;
++  *((int *)&__m256_op1[6]) = 0x00040000;
++  *((int *)&__m256_op1[5]) = 0x00025555;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00060000;
++  *((int *)&__m256_op1[2]) = 0x00040000;
++  *((int *)&__m256_op1[1]) = 0x00025555;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xff240000;
++  *((int *)&__m256_op0[6]) = 0x0000ff00;
++  *((int *)&__m256_op0[5]) = 0xfffeffe4;
++  *((int *)&__m256_op0[4]) = 0xfffeff00;
++  *((int *)&__m256_op0[3]) = 0xff640000;
++  *((int *)&__m256_op0[2]) = 0x0000ff00;
++  *((int *)&__m256_op0[1]) = 0xfffeff66;
++  *((int *)&__m256_op0[0]) = 0xfffeff00;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x80808082;
++  *((int *)&__m256_op0[6]) = 0x80808082;
++  *((int *)&__m256_op0[5]) = 0x80808082;
++  *((int *)&__m256_op0[4]) = 0x80808082;
++  *((int *)&__m256_op0[3]) = 0x80808082;
++  *((int *)&__m256_op0[2]) = 0x80808080;
++  *((int *)&__m256_op0[1]) = 0x80808082;
++  *((int *)&__m256_op0[0]) = 0x80808082;
++  *((int *)&__m256_op1[7]) = 0x55555555;
++  *((int *)&__m256_op1[6]) = 0x55555555;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x55555555;
++  *((int *)&__m256_op1[2]) = 0x55555555;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[6]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[5]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[4]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[3]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[2]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[1]) = 0x6d6d6d6d;
++  *((int *)&__m256_op0[0]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[7]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[6]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[5]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[4]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[3]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[2]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[1]) = 0x6d6d6d6d;
++  *((int *)&__m256_op1[0]) = 0x6d6d6d6d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_s (__m256_op0, __m256_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x00000000ffff0008;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x00000000ffff0008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000118;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000118;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000027;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000027;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfcmp_sun_d (__m256d_op0, __m256d_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvfnmadd-xvfrstp-.patch b/LoongArch-Add-tests-for-ASX-vector-xvfnmadd-xvfrstp-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..48b3b297d685754b3f8b3a7cb71d9a12d41b012a
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvfnmadd-xvfrstp-.patch
@@ -0,0 +1,4991 @@
+From d0108f9375bd6eede5f7f4e289dce580b180848d Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 16:22:49 +0800
+Subject: [PATCH 114/124] LoongArch: Add tests for ASX vector
+ xvfnmadd/xvfrstp/xvfstpi/xvhsubw/ xvmsub/xvrotr/xvrotri/xvld/xvst
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvld.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmsub.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvrotr.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvrotri.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvst.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvfnmadd_d.c   | 324 +++++++
+ .../loongarch/vector/lasx/lasx-xvfnmadd_s.c   | 895 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvfrstp.c      | 381 ++++++++
+ .../loongarch/vector/lasx/lasx-xvfrstpi.c     | 350 +++++++
+ .../loongarch/vector/lasx/lasx-xvhsubw-1.c    | 620 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvhsubw-2.c    | 545 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvld.c         |  86 ++
+ .../loongarch/vector/lasx/lasx-xvmsub.c       | 647 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvrotr.c       | 530 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvrotri.c      | 394 ++++++++
+ .../loongarch/vector/lasx/lasx-xvst.c         | 102 ++
+ 11 files changed, 4874 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c
+new file mode 100644
+index 000000000..d161c850c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_d.c
+@@ -0,0 +1,324 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0001010101010101;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000010100;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0001000001000100;
++  *((unsigned long *)&__m256d_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op2[2]) = 0xffffffffbf7f7fff;
++  *((unsigned long *)&__m256d_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op2[0]) = 0xffffffffe651bfff;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffbf7f7fff;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffe651bfff;
++  __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x3ff73ff83ff73ff8;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x3ff73ff83ff73ff8;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256d_op2[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256d_op2[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256d_op2[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256d_result[3]) = 0xa020202020202020;
++  *((unsigned long *)&__m256d_result[2]) = 0xa020202020206431;
++  *((unsigned long *)&__m256d_result[1]) = 0xa020202020202020;
++  *((unsigned long *)&__m256d_result[0]) = 0xa020202020206431;
++  __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256d_op0[2]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256d_op0[1]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256d_op0[0]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0001b0b1b4b5dd9f;
++  *((unsigned long *)&__m256d_op2[2]) = 0x7f7f7f5c8f374980;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0001b0b1b4b5dd9f;
++  *((unsigned long *)&__m256d_op2[0]) = 0x7f7f7f5c8f374980;
++  *((unsigned long *)&__m256d_result[3]) = 0x8001b0b1b4b5dd9f;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0x8001b0b1b4b5dd9f;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xff21ff21ff21ff21;
++  *((unsigned long *)&__m256d_op0[2]) = 0xff21ff21ff21ff21;
++  *((unsigned long *)&__m256d_op0[1]) = 0xff21ff21ff21ff21;
++  *((unsigned long *)&__m256d_op0[0]) = 0xff21ff21ff21ff21;
++  *((unsigned long *)&__m256d_op1[3]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256d_op1[2]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256d_op1[1]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256d_op1[0]) = 0xff21c241ff21c241;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xfff0000000000000;
++  __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000;
++  __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x1080108010060002;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x1080108010060002;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m256d_op2[3]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256d_result[3]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256d_result[2]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256d_result[1]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256d_result[0]) = 0x7fff00017fff0000;
++  __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x1716151417161514;
++  *((unsigned long *)&__m256d_op0[2]) = 0x1716151417161514;
++  *((unsigned long *)&__m256d_op0[1]) = 0x1716151417161514;
++  *((unsigned long *)&__m256d_op0[0]) = 0x1716151417161514;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000002780;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000002780;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000002780;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000002780;
++  *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x8000000000002780;
++  *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x8000000000002780;
++  __m256d_out = __lasx_xvfnmadd_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0080200000802000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0080200000802000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0080200000802000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0080200000802000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffffffff;
++  __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000;
++  __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256d_op0[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256d_op0[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256d_op0[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffffffffffba0c05;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffffffffffba0c05;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffffba0c05;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffffba0c05;
++  __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000;
++  __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000005000000020;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000005000000020;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000005000000020;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000005000000020;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000005000000020;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000005000000020;
++  *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0000005000000020;
++  *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0000005000000020;
++  __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0010000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0008000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0010000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0008000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0010000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x0008000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0010000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x0008000000000000;
++  __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xff0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256d_op0[2]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256d_op0[1]) = 0xff0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256d_op0[0]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256d_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000;
++  __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_op0[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_op1[3]) = 0xffff801000000010;
++  *((unsigned long *)&__m256d_op1[2]) = 0xffff800300000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0xffff801000000010;
++  *((unsigned long *)&__m256d_op1[0]) = 0xffff800300000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256d_result[0]) = 0xffffffffe0000000;
++  __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  *((unsigned long *)&__m256d_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[2]) = 0x00000000ffffffce;
++  *((unsigned long *)&__m256d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op0[0]) = 0x00000000ffffffce;
++  *((unsigned long *)&__m256d_op1[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256d_op1[2]) = 0x0000000700000000;
++  *((unsigned long *)&__m256d_op1[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256d_op1[0]) = 0x0000000700000000;
++  *((unsigned long *)&__m256d_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256d_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256d_result[0]) = 0x8000000000000000;
++  __m256d_out = __lasx_xvfnmsub_d (__m256d_op0, __m256d_op1, __m256d_op2);
++  ASSERTEQ_64 (__LINE__, __m256d_result, __m256d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c
+new file mode 100644
+index 000000000..c5e9576ea
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfnmadd_s.c
+@@ -0,0 +1,895 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffffff;
++  *((int *)&__m256_op1[6]) = 0xffff5f5c;
++  *((int *)&__m256_op1[5]) = 0xffffffff;
++  *((int *)&__m256_op1[4]) = 0xffff5f5c;
++  *((int *)&__m256_op1[3]) = 0xffffffff;
++  *((int *)&__m256_op1[2]) = 0xffff5f5c;
++  *((int *)&__m256_op1[1]) = 0xffffffff;
++  *((int *)&__m256_op1[0]) = 0xffff5f5c;
++  *((int *)&__m256_op2[7]) = 0x0000000f;
++  *((int *)&__m256_op2[6]) = 0x0000000f;
++  *((int *)&__m256_op2[5]) = 0xff00ff0f;
++  *((int *)&__m256_op2[4]) = 0xff005f0f;
++  *((int *)&__m256_op2[3]) = 0x0000000f;
++  *((int *)&__m256_op2[2]) = 0x0000000f;
++  *((int *)&__m256_op2[1]) = 0xff00ff0f;
++  *((int *)&__m256_op2[0]) = 0xff005f0f;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffff5f5c;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffff5f5c;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffff5f5c;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffff5f5c;
++  __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00010001;
++  *((int *)&__m256_op0[6]) = 0x00010000;
++  *((int *)&__m256_op0[5]) = 0x020afefb;
++  *((int *)&__m256_op0[4]) = 0x08140000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000001;
++  *((int *)&__m256_op0[1]) = 0x0003fffc;
++  *((int *)&__m256_op0[0]) = 0x00060000;
++  *((int *)&__m256_op1[7]) = 0x80000000;
++  *((int *)&__m256_op1[6]) = 0x40000000;
++  *((int *)&__m256_op1[5]) = 0x40000000;
++  *((int *)&__m256_op1[4]) = 0x10000010;
++  *((int *)&__m256_op1[3]) = 0x80000000;
++  *((int *)&__m256_op1[2]) = 0x40000000;
++  *((int *)&__m256_op1[1]) = 0x80000000;
++  *((int *)&__m256_op1[0]) = 0x40000010;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x000000ff;
++  *((int *)&__m256_op2[4]) = 0x0001ffff;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x0000ffff;
++  *((int *)&__m256_op2[0]) = 0x00010000;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80020000;
++  *((int *)&__m256_result[5]) = 0x828aff0b;
++  *((int *)&__m256_result[4]) = 0x8001ffff;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000002;
++  *((int *)&__m256_result[1]) = 0x8000ffff;
++  *((int *)&__m256_result[0]) = 0x800d0002;
++  __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x1f3d2101;
++  *((int *)&__m256_op0[6]) = 0x1f3d2101;
++  *((int *)&__m256_op0[5]) = 0x1f3d2101;
++  *((int *)&__m256_op0[4]) = 0xd07dbf01;
++  *((int *)&__m256_op0[3]) = 0x9f1fd080;
++  *((int *)&__m256_op0[2]) = 0x1f3d2101;
++  *((int *)&__m256_op0[1]) = 0x1f3d2101;
++  *((int *)&__m256_op0[0]) = 0xd07dbf01;
++  *((int *)&__m256_op1[7]) = 0x1d949d94;
++  *((int *)&__m256_op1[6]) = 0x9d949d95;
++  *((int *)&__m256_op1[5]) = 0x1d949d94;
++  *((int *)&__m256_op1[4]) = 0x9e1423d4;
++  *((int *)&__m256_op1[3]) = 0x1de9a03f;
++  *((int *)&__m256_op1[2]) = 0x3dd41d95;
++  *((int *)&__m256_op1[1]) = 0x1d949d94;
++  *((int *)&__m256_op1[0]) = 0x9e1423d4;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x8001b72e;
++  *((int *)&__m256_result[6]) = 0x0001b72e;
++  *((int *)&__m256_result[5]) = 0x8001b72e;
++  *((int *)&__m256_result[4]) = 0xaf12d5f0;
++  *((int *)&__m256_result[3]) = 0x00024763;
++  *((int *)&__m256_result[2]) = 0x9d9cb530;
++  *((int *)&__m256_result[1]) = 0x8001b72e;
++  *((int *)&__m256_result[0]) = 0xaf12d5f0;
++  __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x1f0fdf7f;
++  *((int *)&__m256_op0[6]) = 0x3e3b31d4;
++  *((int *)&__m256_op0[5]) = 0x7ff80000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x1f0fdf7f;
++  *((int *)&__m256_op0[2]) = 0x3e3b31d4;
++  *((int *)&__m256_op0[1]) = 0x7ff80000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0x7ff80000;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0x7ff80000;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x80000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x80000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x80000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x80000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x0000ffff;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x0000ffff;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000001;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000001;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000001;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000001;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80000001;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0x80000001;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000001;
++  *((int *)&__m256_result[1]) = 0x80000000;
++  *((int *)&__m256_result[0]) = 0x80000001;
++  __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000200;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000200;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000200;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000200;
++  *((int *)&__m256_op2[7]) = 0xffffffa0;
++  *((int *)&__m256_op2[6]) = 0x00000001;
++  *((int *)&__m256_op2[5]) = 0xffffffe0;
++  *((int *)&__m256_op2[4]) = 0x00000001;
++  *((int *)&__m256_op2[3]) = 0xffffffa0;
++  *((int *)&__m256_op2[2]) = 0x00000001;
++  *((int *)&__m256_op2[1]) = 0xffffffe0;
++  *((int *)&__m256_op2[0]) = 0x00000001;
++  *((int *)&__m256_result[7]) = 0xffffffa0;
++  *((int *)&__m256_result[6]) = 0x80000001;
++  *((int *)&__m256_result[5]) = 0xffffffe0;
++  *((int *)&__m256_result[4]) = 0x80000001;
++  *((int *)&__m256_result[3]) = 0xffffffa0;
++  *((int *)&__m256_result[2]) = 0x80000001;
++  *((int *)&__m256_result[1]) = 0xffffffe0;
++  *((int *)&__m256_result[0]) = 0x80000001;
++  __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0x80000000;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x49810081;
++  *((int *)&__m256_op1[6]) = 0x4843ffe1;
++  *((int *)&__m256_op1[5]) = 0x49810081;
++  *((int *)&__m256_op1[4]) = 0x68410001;
++  *((int *)&__m256_op1[3]) = 0x49810081;
++  *((int *)&__m256_op1[2]) = 0x4843ffe1;
++  *((int *)&__m256_op1[1]) = 0x49810081;
++  *((int *)&__m256_op1[0]) = 0x68410001;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0x80000000;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00009fff;
++  *((int *)&__m256_op0[6]) = 0x00002001;
++  *((int *)&__m256_op0[5]) = 0x0000ffff;
++  *((int *)&__m256_op0[4]) = 0x0000ffff;
++  *((int *)&__m256_op0[3]) = 0x00009fff;
++  *((int *)&__m256_op0[2]) = 0x00002001;
++  *((int *)&__m256_op0[1]) = 0x0000ffff;
++  *((int *)&__m256_op0[0]) = 0x0000ffff;
++  *((int *)&__m256_op1[7]) = 0xfffeb683;
++  *((int *)&__m256_op1[6]) = 0x9ffffd80;
++  *((int *)&__m256_op1[5]) = 0xfffe97c0;
++  *((int *)&__m256_op1[4]) = 0x20010001;
++  *((int *)&__m256_op1[3]) = 0xfffeb683;
++  *((int *)&__m256_op1[2]) = 0x9ffffd80;
++  *((int *)&__m256_op1[1]) = 0xfffe97c0;
++  *((int *)&__m256_op1[0]) = 0x20010001;
++  *((int *)&__m256_op2[7]) = 0x00009fff;
++  *((int *)&__m256_op2[6]) = 0x00002001;
++  *((int *)&__m256_op2[5]) = 0x0000ffff;
++  *((int *)&__m256_op2[4]) = 0x0000ffff;
++  *((int *)&__m256_op2[3]) = 0x00009fff;
++  *((int *)&__m256_op2[2]) = 0x00002001;
++  *((int *)&__m256_op2[1]) = 0x0000ffff;
++  *((int *)&__m256_op2[0]) = 0x0000ffff;
++  *((int *)&__m256_result[7]) = 0xfffeb683;
++  *((int *)&__m256_result[6]) = 0x80002001;
++  *((int *)&__m256_result[5]) = 0xfffe97c0;
++  *((int *)&__m256_result[4]) = 0x8000ffff;
++  *((int *)&__m256_result[3]) = 0xfffeb683;
++  *((int *)&__m256_result[2]) = 0x80002001;
++  *((int *)&__m256_result[1]) = 0xfffe97c0;
++  *((int *)&__m256_result[0]) = 0x8000ffff;
++  __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x7fffffff;
++  *((int *)&__m256_op0[6]) = 0x80000000;
++  *((int *)&__m256_op0[5]) = 0x7fffffff;
++  *((int *)&__m256_op0[4]) = 0x80000000;
++  *((int *)&__m256_op0[3]) = 0x7fffffff;
++  *((int *)&__m256_op0[2]) = 0x80000000;
++  *((int *)&__m256_op0[1]) = 0x7fffffff;
++  *((int *)&__m256_op0[0]) = 0x80000000;
++  *((int *)&__m256_op1[7]) = 0xfd02fd02;
++  *((int *)&__m256_op1[6]) = 0xfd02fd02;
++  *((int *)&__m256_op1[5]) = 0xfd02fd02;
++  *((int *)&__m256_op1[4]) = 0xfd02fd02;
++  *((int *)&__m256_op1[3]) = 0xfd02fd02;
++  *((int *)&__m256_op1[2]) = 0xfd02fd02;
++  *((int *)&__m256_op1[1]) = 0xfd02fd02;
++  *((int *)&__m256_op1[0]) = 0xfd02fd02;
++  *((int *)&__m256_op2[7]) = 0xfd02fd02;
++  *((int *)&__m256_op2[6]) = 0xfd02fd02;
++  *((int *)&__m256_op2[5]) = 0xfd02fd02;
++  *((int *)&__m256_op2[4]) = 0xfd02fd02;
++  *((int *)&__m256_op2[3]) = 0xfd02fd02;
++  *((int *)&__m256_op2[2]) = 0xfd02fd02;
++  *((int *)&__m256_op2[1]) = 0xfd02fd02;
++  *((int *)&__m256_op2[0]) = 0xfd02fd02;
++  *((int *)&__m256_result[7]) = 0x7fffffff;
++  *((int *)&__m256_result[6]) = 0x7d02fd02;
++  *((int *)&__m256_result[5]) = 0x7fffffff;
++  *((int *)&__m256_result[4]) = 0x7d02fd02;
++  *((int *)&__m256_result[3]) = 0x7fffffff;
++  *((int *)&__m256_result[2]) = 0x7d02fd02;
++  *((int *)&__m256_result[1]) = 0x7fffffff;
++  *((int *)&__m256_result[0]) = 0x7d02fd02;
++  __m256_out = __lasx_xvfnmadd_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xffffffff;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xbf7f7fff;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xffffffff;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xe651bfff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0xffffffff;
++  *((int *)&__m256_op2[2]) = 0xf328dfff;
++  *((int *)&__m256_op2[1]) = 0x6651bfff;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0x00000000;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0x00000000;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x07070707;
++  *((int *)&__m256_op0[5]) = 0x01020400;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00020100;
++  *((int *)&__m256_op0[1]) = 0x07030200;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0xffffff80;
++  *((int *)&__m256_op1[6]) = 0xfefeff00;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x01000400;
++  *((int *)&__m256_op1[3]) = 0xffffff80;
++  *((int *)&__m256_op1[2]) = 0xfeff0000;
++  *((int *)&__m256_op1[1]) = 0x02020080;
++  *((int *)&__m256_op1[0]) = 0x5c800400;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0xffffffff;
++  *((int *)&__m256_op2[2]) = 0xf328dfff;
++  *((int *)&__m256_op2[1]) = 0x6651bfff;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffff80;
++  *((int *)&__m256_result[6]) = 0x46867f79;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xf328dfff;
++  *((int *)&__m256_result[1]) = 0x6651bfff;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0xffffffff;
++  *((int *)&__m256_op0[6]) = 0xe0000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xe0000000;
++  *((int *)&__m256_op0[3]) = 0xffffffff;
++  *((int *)&__m256_op0[2]) = 0xe0000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xe0000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x80000000;
++  *((int *)&__m256_op1[4]) = 0x80000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x80000000;
++  *((int *)&__m256_op1[0]) = 0x80000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0x00000000;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0x00000000;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0x80000000;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0x80000000;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x7f800000;
++  *((int *)&__m256_op2[6]) = 0x7f800000;
++  *((int *)&__m256_op2[5]) = 0x7fc00000;
++  *((int *)&__m256_op2[4]) = 0x7fc00000;
++  *((int *)&__m256_op2[3]) = 0x7f800000;
++  *((int *)&__m256_op2[2]) = 0x7f800000;
++  *((int *)&__m256_op2[1]) = 0x7fc00000;
++  *((int *)&__m256_op2[0]) = 0x7fc00000;
++  *((int *)&__m256_result[7]) = 0x7f800000;
++  *((int *)&__m256_result[6]) = 0x7f800000;
++  *((int *)&__m256_result[5]) = 0x7fc00000;
++  *((int *)&__m256_result[4]) = 0x7fc00000;
++  *((int *)&__m256_result[3]) = 0x7f800000;
++  *((int *)&__m256_result[2]) = 0x7f800000;
++  *((int *)&__m256_result[1]) = 0x7fc00000;
++  *((int *)&__m256_result[0]) = 0x7fc00000;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0x80000000;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x7fefffff;
++  *((int *)&__m256_op1[6]) = 0xffffffff;
++  *((int *)&__m256_op1[5]) = 0x7fefffff;
++  *((int *)&__m256_op1[4]) = 0xffffffff;
++  *((int *)&__m256_op1[3]) = 0x7fefffff;
++  *((int *)&__m256_op1[2]) = 0xffffffff;
++  *((int *)&__m256_op1[1]) = 0x7fefffff;
++  *((int *)&__m256_op1[0]) = 0xffffffff;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x7fefffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0x7fefffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0x7fefffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0x7fefffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0xf7f8f7f8;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00003f78;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0xf7f8f7f8;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00003f78;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0xf7f8f7f8;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00003f78;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0xf7f8f7f8;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00003f78;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0xff800000;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0x80000000;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0xff800000;
++  *((int *)&__m256_result[1]) = 0x80000000;
++  *((int *)&__m256_result[0]) = 0x80000000;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0xffffffff;
++  *((int *)&__m256_op0[4]) = 0xffffffff;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0xffffffff;
++  *((int *)&__m256_op0[0]) = 0xffffffff;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0xffffffff;
++  *((int *)&__m256_op2[4]) = 0xffffffff;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0xffffffff;
++  *((int *)&__m256_op2[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x01010100;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000405;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x01010100;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000405;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x01010100;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000405;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x01010100;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000405;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x01010100;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0x00000405;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x01010100;
++  *((int *)&__m256_result[1]) = 0x80000000;
++  *((int *)&__m256_result[0]) = 0x00000405;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00800080;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000202;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00800080;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000202;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0xff88ff88;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0xff88ff88;
++  *((int *)&__m256_op2[7]) = 0x00000000;
++  *((int *)&__m256_op2[6]) = 0x00000000;
++  *((int *)&__m256_op2[5]) = 0x00000000;
++  *((int *)&__m256_op2[4]) = 0x00000000;
++  *((int *)&__m256_op2[3]) = 0x00000000;
++  *((int *)&__m256_op2[2]) = 0x00000000;
++  *((int *)&__m256_op2[1]) = 0x00000000;
++  *((int *)&__m256_op2[0]) = 0x00000000;
++  *((int *)&__m256_result[7]) = 0x80000000;
++  *((int *)&__m256_result[6]) = 0x80000000;
++  *((int *)&__m256_result[5]) = 0x80000000;
++  *((int *)&__m256_result[4]) = 0xffc8ff88;
++  *((int *)&__m256_result[3]) = 0x80000000;
++  *((int *)&__m256_result[2]) = 0x80000000;
++  *((int *)&__m256_result[1]) = 0x80000000;
++  *((int *)&__m256_result[0]) = 0xffc8ff88;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x00000000;
++  *((int *)&__m256_op1[4]) = 0x00000000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x00000000;
++  *((int *)&__m256_op1[0]) = 0x00000000;
++  *((int *)&__m256_op2[7]) = 0x001fffff;
++  *((int *)&__m256_op2[6]) = 0xffffffff;
++  *((int *)&__m256_op2[5]) = 0xffffffff;
++  *((int *)&__m256_op2[4]) = 0xffffffff;
++  *((int *)&__m256_op2[3]) = 0x001fffff;
++  *((int *)&__m256_op2[2]) = 0xffffffff;
++  *((int *)&__m256_op2[1]) = 0xffffffff;
++  *((int *)&__m256_op2[0]) = 0xffffffff;
++  *((int *)&__m256_result[7]) = 0x001fffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffffff;
++  *((int *)&__m256_result[3]) = 0x001fffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffffff;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  *((int *)&__m256_op0[7]) = 0x00000000;
++  *((int *)&__m256_op0[6]) = 0x00000000;
++  *((int *)&__m256_op0[5]) = 0x00000000;
++  *((int *)&__m256_op0[4]) = 0x00000000;
++  *((int *)&__m256_op0[3]) = 0x00000000;
++  *((int *)&__m256_op0[2]) = 0x00000000;
++  *((int *)&__m256_op0[1]) = 0x00000000;
++  *((int *)&__m256_op0[0]) = 0x00000000;
++  *((int *)&__m256_op1[7]) = 0x00000000;
++  *((int *)&__m256_op1[6]) = 0x00000000;
++  *((int *)&__m256_op1[5]) = 0x7fff8000;
++  *((int *)&__m256_op1[4]) = 0x7fff0000;
++  *((int *)&__m256_op1[3]) = 0x00000000;
++  *((int *)&__m256_op1[2]) = 0x00000000;
++  *((int *)&__m256_op1[1]) = 0x7fff8000;
++  *((int *)&__m256_op1[0]) = 0x7fff0000;
++  *((int *)&__m256_op2[7]) = 0xffffffff;
++  *((int *)&__m256_op2[6]) = 0xffffffff;
++  *((int *)&__m256_op2[5]) = 0xffffffff;
++  *((int *)&__m256_op2[4]) = 0xffffff10;
++  *((int *)&__m256_op2[3]) = 0xffffffff;
++  *((int *)&__m256_op2[2]) = 0xffffffff;
++  *((int *)&__m256_op2[1]) = 0xffffffff;
++  *((int *)&__m256_op2[0]) = 0xffffff10;
++  *((int *)&__m256_result[7]) = 0xffffffff;
++  *((int *)&__m256_result[6]) = 0xffffffff;
++  *((int *)&__m256_result[5]) = 0xffffffff;
++  *((int *)&__m256_result[4]) = 0xffffff10;
++  *((int *)&__m256_result[3]) = 0xffffffff;
++  *((int *)&__m256_result[2]) = 0xffffffff;
++  *((int *)&__m256_result[1]) = 0xffffffff;
++  *((int *)&__m256_result[0]) = 0xffffff10;
++  __m256_out = __lasx_xvfnmsub_s (__m256_op0, __m256_op1, __m256_op2);
++  ASSERTEQ_32 (__LINE__, __m256_result, __m256_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c
+new file mode 100644
+index 000000000..557f9f8b5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstp.c
+@@ -0,0 +1,381 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000080000;
++  __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00007f7f00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007f7f00007fff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000007f00340040;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000007f000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020200008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0008010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000008;
++  __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x03f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[2]) = 0x03f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[1]) = 0x03f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[0]) = 0x03f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op2[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op2[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op2[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op2[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010;
++  __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff10;
++  __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_result[3]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_result[1]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfbfbfbf;
++  __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010;
++  __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010;
++  __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff10;
++  __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x007f007bfffffffb;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x007f007bfffffffb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000010000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000010000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffdbbbcf;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffb8579f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffdbbbcf;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffb8579f;
++  *((unsigned long *)&__m256i_op2[3]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_op2[2]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_op2[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_op2[0]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000c040c0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000c040c0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00000004843ffdff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00000004843ffdff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffe000ffffffff08;
++  *((unsigned long *)&__m256i_result[1]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffe000ffffffff08;
++  __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000;
++  __m256i_out = __lasx_xvfrstp_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00007fff00000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00007fff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffff10;
++  __m256i_out = __lasx_xvfrstp_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c
+new file mode 100644
+index 000000000..cdb7b11aa
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvfrstpi.c
+@@ -0,0 +1,350 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x38a966b31be83ee9;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5f6108dc25b8e028;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf41a56e8a20878d7;
++  *((unsigned long *)&__m256i_op0[0]) = 0x683b8b67e20c8ee5;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffcd42ffffecc0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000475ffff4c51;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000740dffffad17;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003f4bffff7130;
++  *((unsigned long *)&__m256i_result[3]) = 0x38a966b31be83ee9;
++  *((unsigned long *)&__m256i_result[2]) = 0x5f6108dc25b80001;
++  *((unsigned long *)&__m256i_result[1]) = 0xf41a56e8a20878d7;
++  *((unsigned long *)&__m256i_result[0]) = 0x683b8b67e20c0001;
++  __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1000000000000000;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0008;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0008;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff0008ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff0008ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ffffff1e9e9e9e;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff9e9eb09e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ffffff1e9e9e9e;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff9e9eb09e;
++  *((unsigned long *)&__m256i_result[3]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_result[2]) = 0xffc00000ffc0ffc0;
++  *((unsigned long *)&__m256i_result[1]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_result[0]) = 0xffc00000ffc0ffc0;
++  __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000226200005111;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000165e0000480d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000226200005111;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000165e0000480d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffd8ffc7ffdaff8a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffd8ffc7ffdaff8a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000226200005111;
++  *((unsigned long *)&__m256i_result[2]) = 0x000016000000480d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000226200005111;
++  *((unsigned long *)&__m256i_result[0]) = 0x000016000000480d;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe800c0d8fffeeece;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff383efffedf0c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xe800c0d8fffeeece;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff383efffedf0c;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xe800c000fffeeece;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff383efffedf0c;
++  *((unsigned long *)&__m256i_result[1]) = 0xe800c000fffeeece;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff383efffedf0c;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff000200000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff000200000000;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffff00ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff00ffffffff;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7c007c0080008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7c007c0080008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7c00000880008000;
++  __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310;
++  *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000f000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000f000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0008b03e457db03e;
++  *((unsigned long *)&__m256i_result[2]) = 0x457db03e45a87310;
++  *((unsigned long *)&__m256i_result[1]) = 0x0008b03e457db03e;
++  *((unsigned long *)&__m256i_result[0]) = 0x457db03e45a87310;
++  __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000008000b;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000008000b;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000000b;
++  __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000dfffffff1;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000cfffffff3;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000dfffffff1;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000cfffffff3;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000001000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000001000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0008000000000000;
++  __m256i_out = __lasx_xvfrstpi_h (__m256i_op0, __m256i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000007fff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000007fff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff01fffe00000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xff01fffe00000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x10ffffff10000006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_result[0]) = 0x10ffffff10000006;
++  __m256i_out = __lasx_xvfrstpi_b (__m256i_op0, __m256i_op1, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c
+new file mode 100644
+index 000000000..fa4d5fd6f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-1.c
+@@ -0,0 +1,620 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fffefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000fffefe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffb80000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffb80000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000012;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000f0f0003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000f1003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000f0001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000011;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffff08;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffff08;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000001fffffff9;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100002000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffc0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000;
++  __m256i_out = __lasx_xvhsubw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00b7003600120000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00b7006200fc0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000fe00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00b7004100190004;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdb801b6d0962003f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xdb8a3109fe0f0024;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9a7f997fff01ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbe632a4f1c3c5653;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffe54affffffd3;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffcfae000000d8;
++  *((unsigned long *)&__m256i_result[1]) = 0x00006681000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffd668ffffa9c6;
++  __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbff00000bff00000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbff00000bff00000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffbff1ffffbff1;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffbff1ffffbff1;
++  __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000f0000000f000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000f0000000f000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff1fffffff1;
++  __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1010100f10100fd4;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1010100f10100fd4;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffeeffaf;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffeeffaf;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000051;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000101000000fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000051;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000101000000fff;
++  __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff8180ffff8181;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff8180ffff8181;
++  __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00feff0100feff01;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00feff0100feff01;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff;
++  __m256i_out = __lasx_xvhsubw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007efeff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007efeff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007aff7c00;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffd017d00;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007aff7c00;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffd017d00;
++  __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe;
++  __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffe00;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffe00;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe00;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffe00;
++  __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffc0c0ffffbfc0;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffc0c0ffffbfc0;
++  __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000010000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000010000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffeffff10000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffeffff10000000;
++  __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff8579f;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff0007a861;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff0007a861;
++  __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c
+new file mode 100644
+index 000000000..87c3e25b1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhsubw-2.c
+@@ -0,0 +1,545 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff01ff70ff01ff80;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff01ff70ff01ff80;
++  __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[3]) = 0x00c200c200c200c2;
++  *((unsigned long *)&__m256i_result[2]) = 0x00c200c200c200bb;
++  *((unsigned long *)&__m256i_result[1]) = 0x00c200c200c200c2;
++  *((unsigned long *)&__m256i_result[0]) = 0x00c200c200c200bb;
++  __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f90;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000f90;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff70;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff70;
++  __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000f9f9f9f9;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000faf3f3f2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000f9f9f9f9;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000faf3f3f2;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00bb;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0057;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff00bb;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0057;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000fffa003e;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fffb009c;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000fffa003e;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fffb009c;
++  __m256i_out = __lasx_xvhsubw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x6300000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6300000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffc001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000c000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffc001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000c000;
++  __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x9ffffd8020010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffff9fffffff9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x9ffffd8020010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffff9fffffff9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x40f69fe73c26f4ee;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x40f69fe73c26f4ee;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000018ffff2b13;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000018ffff2b13;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvhsubw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00800080ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00800080ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffe0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001e18;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffe0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001e18;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff001f;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000007fe268;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000007fe268;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffbfffc;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffff00fffffff0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffff00;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000055;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000055;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffefefeff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff295329;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffefefeff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff295329;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff01010101;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00d6acd7;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff01010101;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00d6acd7;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x120e120dedf1edf2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x120e120dedf1edf2;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000120e120d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000120e120d;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000001;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000dfffffff1;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000cfffffff3;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000dfffffff1;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000cfffffff3;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff0000000f;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000000d;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000000f;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000000d;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0002ff80ffb70000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffb7ff80ffd0ffd8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00010000002fff9e;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffb5ff80ffd0ffd8;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0048007f002f0028;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x004a007f002f0028;
++  __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_op1[2]) = 0xdbcbdbcb0000dbcb;
++  *((unsigned long *)&__m256i_op1[1]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_op1[0]) = 0xdbcbdbcb0000dbcb;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x24342434ffff2435;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x24342434ffff2435;
++  __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffba8300004fc2;
++  __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff8001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffebffffffebfff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffebffffffebfff;
++  __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff7eddffff7ed3;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff7edfffff7edf;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff7eddffff7ed3;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff7edfffff7edf;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003ff000003ff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00003fef00003fea;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003ff000003ff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff3eedffff3ee3;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff3eedffff3ee3;
++  __m256i_out = __lasx_xvhsubw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c
+new file mode 100644
+index 000000000..c1eda6c6c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvld.c
+@@ -0,0 +1,86 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_result[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_result[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_result[0]) = 0x0ad152a5ad72feeb;
++  __m256i_out = __lasx_xvld ((unsigned long *)&__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_result[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_result[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_result[0]) = 0x0ad152a5ad72feeb;
++  __m256i_out = __lasx_xvldx ((unsigned long *)&__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_result[2]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_result[0]) = 0xebebebebebebebeb;
++  __m256i_out = __lasx_xvldrepl_b ((unsigned long *)&__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0xfeebfeebfeebfeeb;
++  *((unsigned long *)&__m256i_result[2]) = 0xfeebfeebfeebfeeb;
++  *((unsigned long *)&__m256i_result[1]) = 0xfeebfeebfeebfeeb;
++  *((unsigned long *)&__m256i_result[0]) = 0xfeebfeebfeebfeeb;
++  __m256i_out = __lasx_xvldrepl_h ((unsigned long *)&__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0xad72feebad72feeb;
++  *((unsigned long *)&__m256i_result[2]) = 0xad72feebad72feeb;
++  *((unsigned long *)&__m256i_result[1]) = 0xad72feebad72feeb;
++  *((unsigned long *)&__m256i_result[0]) = 0xad72feebad72feeb;
++  __m256i_out = __lasx_xvldrepl_w ((unsigned long *)&__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[2]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[0]) = 0x0ad152a5ad72feeb;
++  __m256i_out = __lasx_xvldrepl_d ((unsigned long *)&__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c
+new file mode 100644
+index 000000000..8c8d4996b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsub.c
+@@ -0,0 +1,647 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x074132a240000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00007ffe81fdfe03;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op1[3]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op1[2]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op1[1]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op1[0]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[2]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[1]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[0]) = 0x555555ab555555ab;
++  __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000017f0000017d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000017f0000017f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000002e0000002e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000002e0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000002e0000002e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000002e0000fffe;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000002e0000002e;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000002e0000ffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000002e0000002e;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000002e0000fffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000f7bc0001f7bd;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000f93b0000017c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000f7bc0001f7bd;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000f93b0000017b;
++  __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x1410141014101410;
++  *((unsigned long *)&__m256i_result[2]) = 0x1410141014101410;
++  *((unsigned long *)&__m256i_result[1]) = 0x1410141014101410;
++  *((unsigned long *)&__m256i_result[0]) = 0x1410141014101410;
++  __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdb801b6d0962003f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xdb8a3109fe0f0024;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000007fff01ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xdb8e209d0cce025a;
++  *((unsigned long *)&__m256i_op1[3]) = 0xb70036db12c4007e;
++  *((unsigned long *)&__m256i_op1[2]) = 0xb7146213fc1e0049;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000fefe02fffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xb71c413b199d04b5;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffcc8000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x000000007dfdff4b;
++  *((unsigned long *)&__m256i_result[3]) = 0xdb801b6d0962003f;
++  *((unsigned long *)&__m256i_result[2]) = 0xdb8a3109fe0f0024;
++  *((unsigned long *)&__m256i_result[1]) = 0x9a7f997fff01ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xbe632a4f1c3c5653;
++  __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x01010101010000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000004800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000004500f300fb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000004800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000004500f300fb;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000004800000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000004500f300fb;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000004800000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000004500f300fb;
++  *((unsigned long *)&__m256i_result[3]) = 0x7b7b7b7b80000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xcacacb1011040500;
++  *((unsigned long *)&__m256i_result[1]) = 0x7b7b7b7b80000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xcacacb1011040500;
++  __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffefffffffe;
++  __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000001a00;
++  __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfe7fffecfe7fffec;
++  *((unsigned long *)&__m256i_result[2]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfe7fffecfe7fffec;
++  *((unsigned long *)&__m256i_result[0]) = 0xff80000000000000;
++  __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0xa020202020206431;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0xa020202020206431;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0xa020202020206431;
++  *((unsigned long *)&__m256i_result[1]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0xa020202020206431;
++  __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00000000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007fff80fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fff80fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff80007ffe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ff007fff80fe;
++  __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_result[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f;
++  __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x1f001f00000007ef;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00001fff200007ef;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000;
++  __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457db03f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457db03f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffff457db03f;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffff457db03f;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff457db03f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff457db03f;
++  __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000fe200000fe1f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fe200000fe1f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x001ffffe00200000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x001ffffe00200000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000fe200000fe1f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fe200000fe1f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000009e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000009e;
++  __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffff0078ffff0078;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffff0078ffff0078;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000038ea4d4a;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000038ea4d4a;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff8;
++  __m256i_out = __lasx_xvmsub_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d;
++  __m256i_out = __lasx_xvmsub_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c
+new file mode 100644
+index 000000000..21446e55e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotr.c
+@@ -0,0 +1,530 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0001ff02;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff020afefc;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000003fefd;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffefffefff7fff7;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7ffffffbfffb;
++  __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff0001ff02;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff020afefc;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000003fefd;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0209fefb08140000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc00060000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff0001ff04;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff02a0fefc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000cfefd;
++  __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff01ff010000fff9;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff19;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff02ff020001fffa;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000100010001fffa;
++  *((unsigned long *)&__m256i_result[3]) = 0x807f807f00000380;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007380;
++  *((unsigned long *)&__m256i_result[1]) = 0xc03fc03f000001c0;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000001c0;
++  __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffe40;
++  __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000fedd;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000fedd;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000fedd;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000fedd;
++  __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x805f0000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x805f0000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x805f0000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x805f0000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[2]) = 0x80be0000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[0]) = 0x80be0000ffffffff;
++  __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457db03f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457db03f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000457d607d;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff457d607f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000457d607d;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff457d607f;
++  __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x07ffffff07ffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x07ffffff07ffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x07ffffff07ffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x07ffffff07ffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x0ffffffe0ffffffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0ffffffe0ffffffe;
++  __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_result[2]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_result[1]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_result[0]) = 0x001fc0200060047a;
++  __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ca0000fff80000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ca0000fff80000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x381800007af80000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x381800007af80000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000086fe0000403e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000403e00004040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000086fe0000403e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000403e00004040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000086fe0000403e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000403e00004040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000086fe0000403e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000403e00004040;
++  *((unsigned long *)&__m256i_result[3]) = 0x00001bfa000000f9;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000f900004040;
++  *((unsigned long *)&__m256i_result[1]) = 0x00001bfa000000f9;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000f900004040;
++  __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000faf3f3f2;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000faf3f3f2;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff0607ffff0383;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0607ffffc0c1;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff0607ffff0383;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0607ffffc0c1;
++  __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvrotr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007f433c79;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007f433c79;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000007f8000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000007f8000;
++  __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000001fff000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000001fff000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffdfff80;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffdfff80;
++  __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c
+new file mode 100644
+index 000000000..c1b8e1752
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvrotri.c
+@@ -0,0 +1,394 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbea2e127c046721f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1729c073816edebe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xde91f010000006f9;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5ef1f90efefaf30d;
++  *((unsigned long *)&__m256i_result[3]) = 0x515f93f023600fb9;
++  *((unsigned long *)&__m256i_result[2]) = 0x948b39e0b7405f6f;
++  *((unsigned long *)&__m256i_result[1]) = 0x48ef087800007c83;
++  *((unsigned long *)&__m256i_result[0]) = 0x78af877c7d7f86f9;
++  __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7fff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7f007f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f7f7fff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfff807f;
++  *((unsigned long *)&__m256i_result[1]) = 0xbf803fbfbfbfbfbf;
++  *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfff807f;
++  __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffef;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffef;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010;
++  __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000002a54290;
++  __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000907;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000907;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000007f0000007f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000007f0000007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff01ff80ff01ff80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff01ff800000007e;
++  *((unsigned long *)&__m256i_result[3]) = 0x003f8000003f8000;
++  *((unsigned long *)&__m256i_result[2]) = 0x003f8000003f8000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffc07f80ffc07f80;
++  *((unsigned long *)&__m256i_result[0]) = 0xffc07f80003f0000;
++  __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x24);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff6f20;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff6f20;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xdbc8000000003fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xdbc8000000003fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[3]) = 0x4343434343434343;
++  *((unsigned long *)&__m256i_result[2]) = 0x4343434343434343;
++  *((unsigned long *)&__m256i_result[1]) = 0x4343434343434343;
++  *((unsigned long *)&__m256i_result[0]) = 0x4343434343434343;
++  __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x38);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffdffd;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffdffd;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffdffd;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffdffd;
++  __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x35);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000f0000000f000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000f0000000f000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000f0000000f000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000f0000000f000;
++  __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000007fc00000400;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000040000000400;
++  *((unsigned long *)&__m256i_result[1]) = 0x000007fc00000400;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000040000000400;
++  __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x35);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000;
++  __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_w (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f0000007f0060;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f0000007f0060;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00f7000000f70006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00f7000000f70006;
++  __m256i_out = __lasx_xvrotri_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_d (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffbfffffffb;
++  __m256i_out = __lasx_xvrotri_h (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c
+new file mode 100644
+index 000000000..3c5e775ff
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvst.c
+@@ -0,0 +1,102 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0;
++  __lasx_xvst (__m256i_op0, (unsigned long *)&__m256i_result, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_op0, __m256i_result);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0;
++  __lasx_xvstx (__m256i_op0, (unsigned long *)&__m256i_result, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_op0, __m256i_result);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0;
++  *((unsigned long *)&__m256i_result[0]) = 0x8d;
++  *((unsigned long *)&__m256i_out[3]) = 0x0;
++  *((unsigned long *)&__m256i_out[2]) = 0x0;
++  *((unsigned long *)&__m256i_out[1]) = 0x0;
++  *((unsigned long *)&__m256i_out[0]) = 0x0;
++  __lasx_xvstelm_b (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0;
++  *((unsigned long *)&__m256i_result[0]) = 0x9100;
++  *((unsigned long *)&__m256i_out[3]) = 0x0;
++  *((unsigned long *)&__m256i_out[2]) = 0x0;
++  *((unsigned long *)&__m256i_out[1]) = 0x0;
++  *((unsigned long *)&__m256i_out[0]) = 0x0;
++  __lasx_xvstelm_h (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0;
++  *((unsigned long *)&__m256i_result[0]) = 0xe9179100;
++  *((unsigned long *)&__m256i_out[3]) = 0x0;
++  *((unsigned long *)&__m256i_out[2]) = 0x0;
++  *((unsigned long *)&__m256i_out[1]) = 0x0;
++  *((unsigned long *)&__m256i_out[0]) = 0x0;
++  __lasx_xvstelm_w (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0;
++  *((unsigned long *)&__m256i_result[0]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_out[3]) = 0x0;
++  *((unsigned long *)&__m256i_out[2]) = 0x0;
++  *((unsigned long *)&__m256i_out[1]) = 0x0;
++  *((unsigned long *)&__m256i_out[0]) = 0x0;
++  __lasx_xvstelm_d (__m256i_op0, (unsigned long *)&__m256i_out, 0x0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvhadd-xvhaddw-xv.patch b/LoongArch-Add-tests-for-ASX-vector-xvhadd-xvhaddw-xv.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8bf92a09a03c53ea49f49192dc776da69eb9693a
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvhadd-xvhaddw-xv.patch
@@ -0,0 +1,6930 @@
+From 03f7a61fa5efb197cdd66014552aa8727677b891 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:19:28 +0800
+Subject: [PATCH 100/124] LoongArch: Add tests for ASX vector
+ xvhadd/xvhaddw/xvmaddwev/xvmaddwod instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmadd.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvhaddw-1.c    | 560 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvhaddw-2.c    | 650 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmadd.c       | 742 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmaddwev-1.c  | 856 ++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmaddwev-2.c  | 723 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmaddwev-3.c  | 940 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmaddwod-1.c  | 742 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmaddwod-2.c  | 799 +++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmaddwod-3.c  | 820 +++++++++++++++
+ 9 files changed, 6832 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c
+new file mode 100644
+index 000000000..1cf0ec698
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-1.c
+@@ -0,0 +1,560 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf7ffffffffffff1f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbffffffffffffeff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf7ffffffffffff1f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbffffffffffffeff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff6fffefffe005b;
++  *((unsigned long *)&__m256i_result[2]) = 0xffbefffefffe005a;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff6fffefffe005b;
++  *((unsigned long *)&__m256i_result[0]) = 0xffbefffefffe005a;
++  __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000000000000;
++  __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000060000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000060000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000060000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000060000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000fffffffefffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xff7fffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000fffffffefffe;
++  __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000023;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000023;
++  __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000033;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000033;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff8fffffff8ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff8fffffff8ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff8fffffff8ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff8fffffff8ffff;
++  __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffecffffffec;
++  __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffefffffffe;
++  __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffc000400780087;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fe80fffc0183;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffc000400f8ff87;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff80ff00ff7c0183;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000800;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffc00000078;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffc;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffc000000f8;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff790000077c;
++  __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007ff000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007ff000000000;
++  __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff1fffffff1;
++  __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000001ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000001ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000f6ff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000f6ff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000f6ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000f6ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000017f0000017d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000017f0000017f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000017f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000017f;
++  __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000017000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000017000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000017000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000017000000080;
++  __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff000000000000;
++  __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000;
++  __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffefef800;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffefef800;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000400010004;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000400010004;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000400010004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000400010004;
++  __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffffffffffff;
++  __m256i_out = __lasx_xvhaddw_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c
+new file mode 100644
+index 000000000..14ec081a4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvhaddw-2.c
+@@ -0,0 +1,650 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000015d050192cb;
++  *((unsigned long *)&__m256i_op0[2]) = 0x028e509508b16ee9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000033ff01020e23;
++  *((unsigned long *)&__m256i_op0[0]) = 0x151196b58fd1114d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001ffaa0000040e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000716800007bb6;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001ffe80001fe9c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000228200001680;
++  *((unsigned long *)&__m256i_result[3]) = 0x000100ab000500a0;
++  *((unsigned long *)&__m256i_result[2]) = 0x000200b800080124;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001011b000200aa;
++  *((unsigned long *)&__m256i_result[0]) = 0x00150118008f0091;
++  __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001341c4000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001000310000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00007f7f00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007f7f00007fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000007f00340040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000007f000000ff;
++  __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000180007f7f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffafaf80000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000180007f7f;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffafaf80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256i_result[2]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256i_result[0]) = 0x01fe01ae00ff00ff;
++  __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0102;
++  *((unsigned long *)&__m256i_result[2]) = 0x007c000000810081;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0102;
++  *((unsigned long *)&__m256i_result[0]) = 0x007c000000810081;
++  __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffc0003fffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffc0003fffc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x007fc0083fc7c007;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x007fc0083fc7c007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f010700c70106;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f010700c70106;
++  __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0010000e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0010000e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff;
++  __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000000010000;
++  __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_hu_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000002a5;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000002a5;
++  __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0e0e0e0e0e0e0e0e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000e0e0e0e0e0e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff8fff9000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff8fff9000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff8fff9000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00010e0d00009e0e;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00009000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000e0e;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00009000;
++  __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000300000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000300000004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000300000004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000300000004;
++  __m256i_out = __lasx_xvhaddw_wu_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0501030102141923;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffd5020738b43ddb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x010200023b8e4174;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff4ff4e11410b40;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000019410000e69a;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf259905a09c23be0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000883a00000f20;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6d3c2d3a89167aeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000501e99b;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000109973de7;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001020f22;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000001890b7a39;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7f00000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007f000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fff0000;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000001fff9fff8;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000001fff9fff8;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000001fff9fff8;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000001fff9fff8;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffff81ffffeb2f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003f6ee0570b4e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000018de;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffb4ffcec0f1;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffff81ffffeb2f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003f6ee0570b4e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000018de;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffb4ffcec0f1;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000001ffffeab0;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000e0574abc;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000018de;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000001ffcec0a5;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffe367cc82f8989a;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4f90000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffc3aaa8d58f43c8;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000082f8989a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000d58f43c8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000170017;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000170017;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004411;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004411;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000004411;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000004411;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000236200005111;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000175e0000490d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000236200005111;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000175e0000490d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000002362;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000010000175d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000002362;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000010000175d;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000010000ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000010000ff00;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100003ffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100003fcd;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100003ffe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100003fcd;
++  __m256i_out = __lasx_xvhaddw_du_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffcfa;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x9090909090909090;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000000f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000000f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000000f;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf3f3f3f3f3f3f3f3;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf3f3f3f3f3f3f3f3;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf3f3f3f3f3f3f3f3;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf3f3f3f3f3f3f3f3;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xf3f3f3f3f3f3f4f3;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xf3f3f3f3f3f3f4f3;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0080000000000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0080000000000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000800080008000;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000001fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000001ce;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000001fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000001ce;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000001fd;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000001fd;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xfa15fa15fa15fa14;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xfa15fa15fa15fa14;
++  __m256i_out = __lasx_xvhaddw_qu_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c
+new file mode 100644
+index 000000000..f9634b128
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmadd.c
+@@ -0,0 +1,742 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x042f0500cfea969a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x58569d7be9179100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa98d4f7a77c308ee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0ad152a5ad72feeb;
++  *((unsigned long *)&__m256i_op1[3]) = 0x34ec5670cd4b5ec0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4f111e4b8e0d7291;
++  *((unsigned long *)&__m256i_op1[1]) = 0xeaa81f47dc3bdd09;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0e0d5fde5df99830;
++  *((unsigned long *)&__m256i_op2[3]) = 0x80c72fcd40fb3bc0;
++  *((unsigned long *)&__m256i_op2[2]) = 0x84bd087966d4ace0;
++  *((unsigned long *)&__m256i_op2[1]) = 0x26aa68b274dc1322;
++  *((unsigned long *)&__m256i_op2[0]) = 0xe072db2bb9d4cd40;
++  *((unsigned long *)&__m256i_result[3]) = 0x044819410d87e69a;
++  *((unsigned long *)&__m256i_result[2]) = 0x21d3905ae3e93be0;
++  *((unsigned long *)&__m256i_result[1]) = 0x5125883a30da0f20;
++  *((unsigned long *)&__m256i_result[0]) = 0x6d7b2d3ac2777aeb;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffeff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffeff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff001f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff001f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x000000000000ffe0;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000001e18;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000000000ffe0;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000001e18;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff1f;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffeff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff1f;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffeff;
++  __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000fffe00010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000fffe00010001;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1717171717171717;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000607f700000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1717171717171717;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000607f700000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000002e0000002e;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000002e0000ffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000002e0000002e;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000002e0000fffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x1717171717171717;
++  *((unsigned long *)&__m256i_result[2]) = 0x000607f700000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x1717171717171717;
++  *((unsigned long *)&__m256i_result[0]) = 0x000607f700000001;
++  __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000003f00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000003f00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000003f00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000003f00000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000003f00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000003f00000000;
++  __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x370036db92c4007e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x371462137c1e0049;
++  *((unsigned long *)&__m256i_op0[1]) = 0x800000fe7e02fffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x371c413b999d04b5;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0002ff80ffb70000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffb7ff80ffd0ffd8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00010000002fff9e;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffb5ff80ffd0ffd8;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffff00ff00ffff00;
++  *((unsigned long *)&__m256i_op2[2]) = 0xff000000ff00ff00;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffff00ffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xff00000000ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x37fe365b920d007e;
++  *((unsigned long *)&__m256i_result[2]) = 0x381462137d1e0149;
++  *((unsigned long *)&__m256i_result[1]) = 0x80ff00fe7e020060;
++  *((unsigned long *)&__m256i_result[0]) = 0x381c413b99cd04dd;
++  __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xb70036db12c4007e;
++  *((unsigned long *)&__m256i_op1[2]) = 0xb7146213fc1e0049;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000fefe02fffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xb71c413b199d04b5;
++  *((unsigned long *)&__m256i_op2[3]) = 0xb70036db12c4007e;
++  *((unsigned long *)&__m256i_op2[2]) = 0xb7146213fc1e0049;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000fefe02fffe;
++  *((unsigned long *)&__m256i_op2[0]) = 0xb71c413b199d04b5;
++  *((unsigned long *)&__m256i_result[3]) = 0xd100645944100004;
++  *((unsigned long *)&__m256i_result[2]) = 0xd1908469108400d1;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000404040104;
++  *((unsigned long *)&__m256i_result[0]) = 0xd1108199714910f9;
++  __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256i_op2[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256i_op2[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x61f1000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0108000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x61f1a18100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0108000000000000;
++  __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000055555555;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000004;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000055555555;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000004;
++  *((unsigned long *)&__m256i_op2[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2aaaaaaa2aaaaaab;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x2aaaaaaa2aaaaaab;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_result[2]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_result[1]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_result[0]) = 0x7c007c007c007c00;
++  __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000fd00ffff02fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fffeff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00007f7f00007f00;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00007f7f00007fff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0100;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffee0000ff4c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ff050000ff3c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000fff90000ff78;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffa80000ff31;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0101010127272525;
++  *((unsigned long *)&__m256i_op2[2]) = 0x23a2a121179e951d;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0101010127272525;
++  *((unsigned long *)&__m256i_op2[0]) = 0x23a2a121179e951d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op2[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fefffffffffffff;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff;
++  __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x008e8e8e8e8e8e8e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x008e8e8e8e8e8e8e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000700000007;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0007ffff0007ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000700000007;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0007ffff0007ffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x008e8e8e8e8e8e8e;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x008e8e8e8e8e8e8e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007000008e700000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007000008e700000;
++  __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op2[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op2[1]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op2[0]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_result[2]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_result[0]) = 0x0100000100000001;
++  __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000080040;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00009fff00002001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00009fff00002001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0001497c98ea4fca;
++  *((unsigned long *)&__m256i_op2[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0001497c98ea4fca;
++  *((unsigned long *)&__m256i_op2[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000006715b036;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000006715b036;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvmadd_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000007f80;
++  __m256i_out = __lasx_xvmadd_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d;
++  __m256i_out = __lasx_xvmadd_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f80ffffff808000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f80ffffff808000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x001f001fffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffe0ffe000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x001f001fffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffe0ffe000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffe0ffe000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fa0001fff808000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffe0ffe000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fa0001fff808000;
++  __m256i_out = __lasx_xvmadd_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c
+new file mode 100644
+index 000000000..6238685bc
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-1.c
+@@ -0,0 +1,856 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff000100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff000100000000;
++  __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff8fff8fff8fff8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff8fff8fff8fff8;
++  __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000f7f8f7f8;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000003f78;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000f7f8f7f8;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003f78;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[2]) = 0x805f0000ffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[0]) = 0x805f0000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000f7f8f7f8;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000003f78;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000f7f8f7f8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000003f78;
++  __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fc38fc38;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fc38fc38;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0002001800ff0078;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01f8007001f80070;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0002001800ff0078;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01f8007001f80070;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0218ff78fc38fc38;
++  *((unsigned long *)&__m256i_op2[2]) = 0xfc00000000000048;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0218ff78fc38fc38;
++  *((unsigned long *)&__m256i_op2[0]) = 0xfc00000000000048;
++  *((unsigned long *)&__m256i_result[3]) = 0x00300b40fc001678;
++  *((unsigned long *)&__m256i_result[2]) = 0xfc00000000001f80;
++  *((unsigned long *)&__m256i_result[1]) = 0x00300b40fc001678;
++  *((unsigned long *)&__m256i_result[0]) = 0xfc00000000001f80;
++  __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe8440000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe8440000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffe8440000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffe8440000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffe8440000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffe8440000;
++  __m256i_out = __lasx_xvmaddwev_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m256i_op1[3]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op1[1]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m256i_result[1]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffefffefffefffef;
++  __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000e0e0e0e0;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe0e0e0e0e0e0e0e0;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_w_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xebfd15f000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01700498ff8f1600;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf520c7c024221300;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00802fd0ff540a80;
++  *((unsigned long *)&__m256i_op1[3]) = 0xebfd15f000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01700498ff8f1600;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf520c7c024221300;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00802fd0ff540a80;
++  *((unsigned long *)&__m256i_op2[3]) = 0xf96d674800000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x44a4330e2c7116c0;
++  *((unsigned long *)&__m256i_op2[1]) = 0x14187a7822b653c0;
++  *((unsigned long *)&__m256i_op2[0]) = 0xfbe0b866962b96d0;
++  *((unsigned long *)&__m256i_result[3]) = 0xebfd15f000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x015c6a7facc39600;
++  *((unsigned long *)&__m256i_result[1]) = 0xfa070a51cbd95300;
++  *((unsigned long *)&__m256i_result[0]) = 0x00c7463075439280;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0555550000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0555550000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256i_result[3]) = 0x0555550000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0555550000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x807e80fd80fe80fd;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80938013800d8002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x807e80fd80fe0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80938013800d0005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00001fff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00001fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x807e80fd80fe80fd;
++  *((unsigned long *)&__m256i_result[2]) = 0x80938013800d8002;
++  *((unsigned long *)&__m256i_result[1]) = 0x807e80fd80fe0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x80938013800d0005;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000004a557baac4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x556caad9aabbaa88;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000004a557baac4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x556caad9aabbaa88;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000004a557baac4;
++  *((unsigned long *)&__m256i_op1[2]) = 0x556caad9aabbaa88;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000004a557baac4;
++  *((unsigned long *)&__m256i_op1[0]) = 0x556caad9aabbaa88;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000004a557baac4;
++  *((unsigned long *)&__m256i_result[2]) = 0x556caad9aabbaa88;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000004a557baac4;
++  *((unsigned long *)&__m256i_result[0]) = 0x556caad9aabbaa88;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff7f7f7fff7fffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff7f7f7fff7fffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3f7f7f7eff800000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3f7f7f7eff800000;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffeffffffdd;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffdc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x002affaa00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffffffdd;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffdc;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000001000b000b;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000001000b000b;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000e0000000e00;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000e0000000e00;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op2[3]) = 0x2020080800000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000004044f4f;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0ef11ae55a5a6767;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256i_result[2]) = 0x6040190d20227a78;
++  *((unsigned long *)&__m256i_result[1]) = 0x132feeabd2d33b38;
++  *((unsigned long *)&__m256i_result[0]) = 0x6040190d00000000;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x3);
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfefefefe3f800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfefefefe3f800000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000fe0000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000fe0000000;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000118;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000118;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff01fd7fff7fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fff01fd7fff7fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fff7fff;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff3cff3cff3cff3c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff3cff3cff3cff3c;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff3cff3cff3cff3c;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff3cff3cff3cff3c;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1086658a18ba3594;
++  *((unsigned long *)&__m256i_op0[2]) = 0x160fe9f000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1086658a18ba3594;
++  *((unsigned long *)&__m256i_op0[0]) = 0x160fe9f000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xe161616161614f61;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe161616161614f61;
++  *((unsigned long *)&__m256i_op1[1]) = 0xe161616161614f61;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe161616161614f61;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000616100004f61;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000616100004f61;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000616100004f61;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000616100004f61;
++  *((unsigned long *)&__m256i_result[3]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256i_result[2]) = 0x4df5b1a3ed5e02c1;
++  *((unsigned long *)&__m256i_result[1]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256i_result[0]) = 0x4df5b1a3ed5e02c1;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff000100000000;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01fffffffe000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01fffffffe000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x01fffffffe000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x01fffffffe000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfe00000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff810011;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff810011;
++  *((unsigned long *)&__m256i_op2[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x817f11ed81800ff0;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00000000ffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00000000ffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00000000ffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00000000ffffff;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000101000001010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000101000001010;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffff80000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c
+new file mode 100644
+index 000000000..5fa080375
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-2.c
+@@ -0,0 +1,723 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007f7f7f80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f7f7f80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007f7f7f80;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007f7f7f80;
++  __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0200000002000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0200000002000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff01fb0408;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf2b180c9fc1fefdc;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff01fb0408;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf2b180c9fc1fefdc;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00003cfc0000006f;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00003cfc0000006f;
++  *((unsigned long *)&__m256i_result[3]) = 0x02007f8002000400;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000c5dc02005f64;
++  *((unsigned long *)&__m256i_result[1]) = 0x02007f8002000400;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000c5dc02005f64;
++  __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000700020004;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000700020004;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0040000000000003;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0040000000000003;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000070002000a;
++  __m256i_out = __lasx_xvmaddwev_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f7e3f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffc6cc05c64d960e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f7e3f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff874dc687870000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x41dfffc000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x41dfffdfffc00000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0001fbf9fbe29f52;
++  *((unsigned long *)&__m256i_op2[2]) = 0x5b409c0000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0001fbf9fbe29f52;
++  *((unsigned long *)&__m256i_op2[0]) = 0x5b409c0000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfbba01c0003f7e3f;
++  *((unsigned long *)&__m256i_result[2]) = 0xffc6cc05c64d960e;
++  *((unsigned long *)&__m256i_result[1]) = 0xfbd884e7003f7e3f;
++  *((unsigned long *)&__m256i_result[0]) = 0xff874dc687870000;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff4000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000403f3fff;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x40effc0000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x40effc0000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00007f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00010003fc827a86;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00007f7f7f7f0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f017fc0ddbf7d86;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00153f1594ea02ff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000ffffffff0100;
++  *((unsigned long *)&__m256i_op2[0]) = 0xff15c1ea95ea02ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xc06e7c817f7e8081;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000bd3f016f177a;
++  *((unsigned long *)&__m256i_result[1]) = 0xc06e7c8100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x60c485800178147a;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000011f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000011f;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000000000000165a;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_op2[0]) = 0x000000000000165a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000192540;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000192540;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ff88ff88;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffeffff97a1;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffdf5b000041b0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffeffff97a1;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffdf5b000041b0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x07fee332883f86b0;
++  *((unsigned long *)&__m256i_op2[2]) = 0x07fed3c8f7ad28d0;
++  *((unsigned long *)&__m256i_op2[1]) = 0x07fee332883f86b0;
++  *((unsigned long *)&__m256i_op2[0]) = 0x07fed3c8f7ad28d0;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffeffff97a1;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffdf5b000041b0;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffeffff97a1;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffdf5b000041b0;
++  __m256i_out = __lasx_xvmaddwev_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000fb8000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000fb8000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x807f807f00000380;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007380;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc03fc03f000001c0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000001c0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_result[3]) = 0x807f807f00000380;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007380;
++  *((unsigned long *)&__m256i_result[1]) = 0xc03fc03f000001c0;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000001c0;
++  __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff80ff00ff80ff01;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff80ff00ff80ff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x007f00ff007f00fe;
++  *((unsigned long *)&__m256i_op2[2]) = 0xf711ee11f711ee91;
++  *((unsigned long *)&__m256i_op2[1]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xf711ee11f711ee11;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff80ff00ff80ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff80ff00ff80ff01;
++  __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffff801000000010;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffff800300000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffff801000000010;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffff800300000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000004843ffdff;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000004843ffdff;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvmaddwev_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe00000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000022;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000022;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000045ff740023;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000045ff740023;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000fffe00800022;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000fffe00800022;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000016e00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000016e00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000000000155b200;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000b70000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000016e00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000016e00;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000001e001e001e0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000001e001e001e0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c
+new file mode 100644
+index 000000000..40549448e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwev-3.c
+@@ -0,0 +1,940 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0003ff540000081c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0003ffd00003fd38;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001ffaa0000040e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000716800007bb6;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001ffe80001fe9c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000228200001680;
++  *((unsigned long *)&__m256i_op2[3]) = 0x372e9d75e8aab100;
++  *((unsigned long *)&__m256i_op2[2]) = 0xc5c085372cfabfba;
++  *((unsigned long *)&__m256i_op2[1]) = 0x31730b5beb7c99f5;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0658f2dc0eb21e3c;
++  *((unsigned long *)&__m256i_result[3]) = 0x002e4db200000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000315ac0000d658;
++  *((unsigned long *)&__m256i_result[1]) = 0x00735278007cf94c;
++  *((unsigned long *)&__m256i_result[0]) = 0x0003ed8800031b38;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff0001ff04;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff02a0fefc;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000cfefd;
++  *((unsigned long *)&__m256i_op1[3]) = 0x6100000800060005;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5ee1c073b800c916;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5ff00007fff9fff3;
++  *((unsigned long *)&__m256i_op2[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op2[2]) = 0xfffffffefffffefc;
++  *((unsigned long *)&__m256i_op2[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op2[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffff7fffbfefa;
++  *((unsigned long *)&__m256i_result[2]) = 0xff1eff1902a0fea4;
++  *((unsigned long *)&__m256i_result[1]) = 0xff10000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff10fff9ff13fd17;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfafafafafafafafa;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fefefe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xf9fbf9fbf9fbf9fb;
++  *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0xfdfffdfffdfffdff;
++  *((unsigned long *)&__m256i_result[0]) = 0xff01ff01fffffdff;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000627;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000627;
++  *((unsigned long *)&__m256i_op2[3]) = 0x7fff7fff05407fff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x7fff7fff05407fff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000003fff3fff;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000400;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000400;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00ff00ff00ef32;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffff0001;
++  *((unsigned long *)&__m256i_op2[2]) = 0xfffffffffdd97dc4;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffff0001;
++  *((unsigned long *)&__m256i_op2[0]) = 0xfffffffffdd97dc4;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x1010100f10100fd4;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x1010100f10100fd4;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_op0[2]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_op0[1]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_op0[0]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_result[2]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_result[1]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_result[0]) = 0xebebebebebebebeb;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x001f001fffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffe0ffe000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x001f001fffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffe0ffe000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x34ec5670cd4b5ec0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4f111e4b8e0d7291;
++  *((unsigned long *)&__m256i_op0[1]) = 0xeaa81f47dc3bdd09;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0e0d5fde5df99830;
++  *((unsigned long *)&__m256i_op1[3]) = 0x67390c19e4b17547;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbacda0f96d2cec01;
++  *((unsigned long *)&__m256i_op1[1]) = 0xee20ad1adae2cc16;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5a2003c6a406fe53;
++  *((unsigned long *)&__m256i_op2[3]) = 0x80c72fcd40fb3bc0;
++  *((unsigned long *)&__m256i_op2[2]) = 0x84bd087966d4ace0;
++  *((unsigned long *)&__m256i_op2[1]) = 0x26aa68b274dc1322;
++  *((unsigned long *)&__m256i_op2[0]) = 0xe072db2bb9d4cd40;
++  *((unsigned long *)&__m256i_result[3]) = 0x372e9d75e8aab100;
++  *((unsigned long *)&__m256i_result[2]) = 0x5464fbfc416b9f71;
++  *((unsigned long *)&__m256i_result[1]) = 0x31730b5beb7c99f5;
++  *((unsigned long *)&__m256i_result[0]) = 0x0d8264202b8ea3f0;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff0000ffff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff000000ffffff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffffffff00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01fa022a01a401e5;
++  *((unsigned long *)&__m256i_op1[2]) = 0x030d03aa0079029b;
++  *((unsigned long *)&__m256i_op1[1]) = 0x024c01f901950261;
++  *((unsigned long *)&__m256i_op1[0]) = 0x008102c2008a029f;
++  *((unsigned long *)&__m256i_op2[3]) = 0x002e4db200000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000315ac0000d658;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00735278007cf94c;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0003ed8800031b38;
++  *((unsigned long *)&__m256i_result[3]) = 0x01a72334ffff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0xff4f6838ff937648;
++  *((unsigned long *)&__m256i_result[1]) = 0x00a2afb7fff00ecb;
++  *((unsigned long *)&__m256i_result[0]) = 0xffce110f004658c7;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff0001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003a099512;
++  *((unsigned long *)&__m256i_op0[1]) = 0x280ac9da313763f5;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe032c738adcc6bbf;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xfffe000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000ffff00010000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0001000100020001;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000fffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff0001;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000003a099512;
++  *((unsigned long *)&__m256i_result[1]) = 0x280ac9da313763f5;
++  *((unsigned long *)&__m256i_result[0]) = 0xe032c738adcc6bbf;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7f00000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000045f3fb;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000045f3fb;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010003;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00010003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0080000200000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00010003;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001f0000ffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000060008;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000000000c005b;
++  *((unsigned long *)&__m256i_op2[1]) = 0xfffffffffffe0000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000040053;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff0007fff7;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff005affa4;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffe100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000053ffac;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000420080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000420080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000420080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000420080000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000420080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x5fff5fff607f0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000420080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x5fff5fff607f0000;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0100004300000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0100004300000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op2[2]) = 0xff00010001000100;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op2[0]) = 0xff00010001000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x01ffff4300ffff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x01ffff4300ffff00;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000001000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000001000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0607ffff0607;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00000000f9f9f9f9;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000000faf3f3f2;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00000000f9f9f9f9;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000faf3f3f2;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffdbbbcf;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffb8579f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffdbbbcf;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffb8579f;
++  __m256i_out
++      = __lasx_xvmaddwev_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out
++      = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x009200f200840080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x009200f200840080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00b200b300800080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00b200b300800080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x009200f200840080;
++  *((unsigned long *)&__m256i_result[2]) = 0x009200f200840080;
++  *((unsigned long *)&__m256i_result[1]) = 0x00b200b300800080;
++  *((unsigned long *)&__m256i_result[0]) = 0x00b200b300800080;
++  __m256i_out
++      = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000404040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000404040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000404040;
++  __m256i_out
++      = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000800080;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000800080;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1fa0000000080000;
++  __m256i_out
++      = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001;
++  __m256i_out
++      = __lasx_xvmaddwev_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000eef14fe8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0202020201010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000eef14fe8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0202020201010000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xfe02fe02fee5fe22;
++  *((unsigned long *)&__m256i_op2[0]) = 0xff49fe4200000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000eef14fe8;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffe928f1313c9cc;
++  *((unsigned long *)&__m256i_result[0]) = 0x4244020201010000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[2]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[0]) = 0x0005000500050005;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffff6;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffff6;
++  *((unsigned long *)&__m256i_op2[3]) = 0x3f3f3f3f3f3f3f3f;
++  *((unsigned long *)&__m256i_op2[2]) = 0x3f3f3f3f3f3f3f3f;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3f;
++  *((unsigned long *)&__m256i_op2[0]) = 0x3f3f3f3f00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_result[2]) = 0xc6c6c6c68787878a;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_result[0]) = 0x8787878a00000000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffff6;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffff6;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op2[3]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_op2[2]) = 0xc6c6c6c68787878a;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_op2[0]) = 0x8787878a00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe3;
++  *((unsigned long *)&__m256i_result[2]) = 0x63636344c3c3c4f6;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffc3;
++  *((unsigned long *)&__m256i_result[0]) = 0xc3c3c500fffffff6;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffbfffcffeffff0;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffbfffcffeffff0;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000b0cfffff4f3;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000f9bb562f56c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000b0cfffff4f3;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000f9bb562f56c80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op2[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op2[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_result[3]) = 0x0018761ed60b5d7f;
++  *((unsigned long *)&__m256i_result[2]) = 0xabdcdc9938afafe9;
++  *((unsigned long *)&__m256i_result[1]) = 0x0018761ed60b5d7f;
++  *((unsigned long *)&__m256i_result[0]) = 0xabdcdc9938afafe9;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c
+new file mode 100644
+index 000000000..683876933
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-1.c
+@@ -0,0 +1,742 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ffffffffffffff;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1020102010201020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1020102010201020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1020102010201020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1020102010201020;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[3]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op2[1]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_result[3]) = 0x1031146010201020;
++  *((unsigned long *)&__m256i_result[2]) = 0x1020102010201020;
++  *((unsigned long *)&__m256i_result[1]) = 0x1031146010201020;
++  *((unsigned long *)&__m256i_result[0]) = 0x1020102010201020;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000080800000808;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000080800000808;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000080800000808;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000080800000808;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3d3d3d3d3d3d3d3d;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xfff8fffffff8ffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xfff8fffffff8ffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xfff8fffffff8ffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xfff8fffffff8ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x94d7fb5200000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x94d7fb5200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000038ea4d4a;
++  *((unsigned long *)&__m256i_op2[2]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000038ea4d4a;
++  *((unsigned long *)&__m256i_op2[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x94d7fb5200000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x94d7fb5200000000;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000020000010201;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000020000010201;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000020000010201;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000020000010201;
++  __m256i_out = __lasx_xvmaddwod_h_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op2[2]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op2[1]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op2[0]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000017e;
++  __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003f3fc6c68787;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f87870000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003e3ec6c68686;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffeff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003e3e87870000;
++  __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01fe007a01c40110;
++  *((unsigned long *)&__m256i_op0[2]) = 0x019d00a2003a0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01fe007a01c40110;
++  *((unsigned long *)&__m256i_op0[0]) = 0x019d00a2003a0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000077fff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x01fe007a01c40110;
++  *((unsigned long *)&__m256i_result[2]) = 0x019d00a20039fff9;
++  *((unsigned long *)&__m256i_result[1]) = 0x01fe007a01c40110;
++  *((unsigned long *)&__m256i_result[0]) = 0x019d00a2003a0000;
++  __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000003cc0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000003cc0;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000003cc0;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000003cc0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000002780;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000002780;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_w_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fff01fd7fff7fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fff7fff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ff80;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000468600007f79;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000f3280000dfff;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffff90ffffff80;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff90ffffff80;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff80000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff80000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3f2c678e38d1104c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3f2c678e38d1104c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x3f2c678e38d1104c;
++  *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x3f2c678e38d1104c;
++  *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3fffffffff7f0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3fffffffff7f0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000c7aff7c00;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffd017d00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000c7aff7c00;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffd017d00;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000002030000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x030303670101fd90;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000002030000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x030303670101fd90;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3ffffffffc7bfc99;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3ffffffffc7bfc99;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0200000202000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0200000202000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0200000202000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0200000202000002;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0200000202000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0200000202000002;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000100080;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000100080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf800f800f800c000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf800f800f800a000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf800f800f800e000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf800f800f800e000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff00ffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0001000100010000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x020afefb08140000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0003fffc00060000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf800f7fff8ffc0ff;
++  *((unsigned long *)&__m256i_result[2]) = 0xf8fff7fff7ffa000;
++  *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800e000;
++  *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800e000;
++  __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffff39ffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffff39ffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000fc300000fc40;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000fc300000fc40;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_q_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c
+new file mode 100644
+index 000000000..f9f88b654
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-2.c
+@@ -0,0 +1,799 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000ffff8000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff80008000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x800080008000b8f1;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x074132a240000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000ffff8000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x06f880008000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x800080008000b8f1;
++  __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffefe00000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000c0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000c0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000c0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000c0;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000012481e4950;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000001658166830;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040;
++  __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7;
++  __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1f60010000080100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1f60010000080100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x1fa0000000080000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1f60010000080100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1f60010000080100;
++  __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffeffed;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffeffed;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffeffed;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffeffed;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffeffed;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffeffed;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffeffed;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffeffed;
++  *((unsigned long *)&__m256i_op2[3]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xbf3ffffffffeffed;
++  *((unsigned long *)&__m256i_result[2]) = 0xbf3ffffffffeffed;
++  *((unsigned long *)&__m256i_result[1]) = 0xbf3ffffffffeffed;
++  *((unsigned long *)&__m256i_result[0]) = 0xbf3ffffffffeffed;
++  __m256i_out = __lasx_xvmaddwod_h_bu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0ff80100ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0ff80100ffffffff;
++  __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xfffffefe00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x34000000fff00000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff6e00000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3380000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x363c0000fff3c000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffb7146213;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffc1e0049;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffb71c413b;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf3317da580000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x34000000fff00000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff6e00000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x3380000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x363c0000fff3c000;
++  __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffe000ffffffffff;
++  __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01c03f8034c03200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3dc02b400a003400;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01c03f8034c03200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3dc02b400a003400;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01c03f8034c03200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3dc02b400a003400;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01c03f8034c03200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x3dc02b400a003400;
++  *((unsigned long *)&__m256i_op2[3]) = 0x07fee332883f86b0;
++  *((unsigned long *)&__m256i_op2[2]) = 0x07fed3c8f7ad28d0;
++  *((unsigned long *)&__m256i_op2[1]) = 0x07fee332883f86b0;
++  *((unsigned long *)&__m256i_op2[0]) = 0x07fed3c8f7ad28d0;
++  *((unsigned long *)&__m256i_result[3]) = 0x01ce3c0050d32d40;
++  *((unsigned long *)&__m256i_result[2]) = 0x3fadafc013acf600;
++  *((unsigned long *)&__m256i_result[1]) = 0x01ce3c0050d32d40;
++  *((unsigned long *)&__m256i_result[0]) = 0x3fadafc013acf600;
++  __m256i_out = __lasx_xvmaddwod_w_hu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op2[2]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op2[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op2[0]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffecffffffec;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffecffffffec;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000100;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xfffffefdfffffefd;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000100;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffff7d80000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000100;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000001fdfffffe02;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff01fefffeff02;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000001fdfffffe02;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000001fefe;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff01fefffeff02;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffdfffffffdffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffddffdeffb5ff8d;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffdfffffffdffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffddffdeffb5ff8d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffeeffaf;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1010100f10100fd4;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffeeffaf;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1010100f10100fd4;
++  *((unsigned long *)&__m256i_op2[3]) = 0xfffdfffffffdffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffddffdeffb5ff8d;
++  *((unsigned long *)&__m256i_op2[1]) = 0xfffdfffffffdffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffddffdeffb5ff8d;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffefffcffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0febedc9bb95dd8f;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffefffcffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0febedc9bb95dd8f;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000545400;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000545400;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffff040000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffff040000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff7bfffff1;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff80007fe9;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff7bfffff1;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff80007fe9;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010511c54440437;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010511c54440437;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000103fca1bd;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000103fca1bd;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000103fca1bd;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000103fca1bd;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010511c54440438;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010511c54440438;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0c6a240000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0c6a240000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwod_d_wu (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe00000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000022;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000022;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000045ff740023;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000045ff740023;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000fffe00800022;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000fffe00800022;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000016e00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000016e00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000000000155b200;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000b70000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000016e00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000016e00;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000001e001e001e0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000001e001e001e0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaddwev_q_du (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c
+new file mode 100644
+index 000000000..5210e4cf9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaddwod-3.c
+@@ -0,0 +1,820 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[0]) = 0xff01ff01ff01ff01;
++  __m256i_out
++      = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000;
++  __m256i_out
++      = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000b8f81b8c850f4;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000b8f81b8c850f4;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d;
++  *((unsigned long *)&__m256i_op2[3]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000b8f81b8c850f4;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_op2[0]) = 0x000b8f81b8c850f4;
++  *((unsigned long *)&__m256i_result[3]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_result[2]) = 0x000b2673a90896a4;
++  *((unsigned long *)&__m256i_result[1]) = 0x000050504c4c2362;
++  *((unsigned long *)&__m256i_result[0]) = 0x000b2673a90896a4;
++  __m256i_out
++      = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffc03fffffffc0;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffc00000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffc03fffffffc0;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffc00000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_result[2]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_result[0]) = 0xc600000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff000003c0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff000003c0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000fc300000fc40;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000fc300000fc40;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7c030000ffc4;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7c030000ffc4;
++  __m256i_out
++      = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00f7000000f70006;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00f7000000f70006;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out
++      = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0007a861;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0007a861;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out
++      = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_h_bu_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xbff0000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0002fffeffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0002fffeffff;
++  __m256i_out
++      = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000505;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000627;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000627;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1f60000000c00000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1f60000000c00000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x7fff7fff05407fff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x7fff7fff05407fff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000627;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000627;
++  __m256i_out
++      = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out
++      = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x437f201f201f2020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x037f201f001f2020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x437f201f201f2020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x037f201f001f2020;
++  *((unsigned long *)&__m256i_op2[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x21bb481000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x01bf481000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x21bb481000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x01bf481000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x000000010000685e;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000010000685e;
++  *((unsigned long *)&__m256i_op2[0]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x000000ffffff1dff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffff1dffffff1dff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000ffffff1dff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffff1dffffff1dff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0020;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff8001ffff0001;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0020;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff8001ffff0001;
++  __m256i_out
++      = __lasx_xvmaddwod_w_hu_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffe40;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00f9f90079f9f9f9;
++  *((unsigned long *)&__m256i_op1[2]) = 0x79f9f9f900000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00f9f90079f9f9f9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x79f9f9f900000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff8c80;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffe40;
++  __m256i_out
++      = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089;
++  __m256i_out
++      = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffdc;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffdc;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffdc;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffdc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffeffffffdd;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffdc;
++  __m256i_out
++      = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out
++      = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00000000f9f9f9f9;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000000faf3f3f2;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00000000f9f9f9f9;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000faf3f3f2;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfbff0000ffff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfbff0000ffff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfbff0000ffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfbff0000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0101010101010110;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0101010101010110;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwod_d_wu_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000eef14fe8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0202020201010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000eef14fe8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0202020201010000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xfe02fe02fee5fe22;
++  *((unsigned long *)&__m256i_op2[0]) = 0xff49fe4200000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000eef14fe8;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffe928f1313c9cc;
++  *((unsigned long *)&__m256i_result[0]) = 0x4244020201010000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[2]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[0]) = 0x0005000500050005;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffff6;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffff6;
++  *((unsigned long *)&__m256i_op2[3]) = 0x3f3f3f3f3f3f3f3f;
++  *((unsigned long *)&__m256i_op2[2]) = 0x3f3f3f3f3f3f3f3f;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3f;
++  *((unsigned long *)&__m256i_op2[0]) = 0x3f3f3f3f00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_result[2]) = 0xc6c6c6c68787878a;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_result[0]) = 0x8787878a00000000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffffff6;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffff6;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op2[3]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_op2[2]) = 0xc6c6c6c68787878a;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_op2[0]) = 0x8787878a00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffe3;
++  *((unsigned long *)&__m256i_result[2]) = 0x63636344c3c3c4f6;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffc3;
++  *((unsigned long *)&__m256i_result[0]) = 0xc3c3c500fffffff6;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffbfffcffeffff0;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffbfffcffeffff0;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000b0cfffff4f3;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000f9bb562f56c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000b0cfffff4f3;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000f9bb562f56c80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op2[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op2[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_result[3]) = 0x0018761ed60b5d7f;
++  *((unsigned long *)&__m256i_result[2]) = 0xabdcdc9938afafe9;
++  *((unsigned long *)&__m256i_result[1]) = 0x0018761ed60b5d7f;
++  *((unsigned long *)&__m256i_result[0]) = 0xabdcdc9938afafe9;
++  __m256i_out
++      = __lasx_xvmaddwev_q_du_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvldi-xvmskgez-xv.patch b/LoongArch-Add-tests-for-ASX-vector-xvldi-xvmskgez-xv.patch
new file mode 100644
index 0000000000000000000000000000000000000000..100fe7e0db748f1b4fa5dc9ad6c3a62cfea33985
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvldi-xvmskgez-xv.patch
@@ -0,0 +1,2735 @@
+From 8d8564be4eaa8134acab6a184da36f3620a82f6f Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:39:31 +0800
+Subject: [PATCH 104/124] LoongArch: Add tests for ASX vector
+ xvldi/xvmskgez/xvmskltz/xvmsknz/xvmuh /xvsigncov instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvldi.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvldi.c        |  83 +++
+ .../loongarch/vector/lasx/lasx-xvmskgez.c     |  86 +++
+ .../loongarch/vector/lasx/lasx-xvmskltz.c     | 373 ++++++++++
+ .../loongarch/vector/lasx/lasx-xvmsknz.c      | 163 +++++
+ .../loongarch/vector/lasx/lasx-xvmuh-1.c      | 650 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmuh-2.c      | 635 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsigncov.c    | 665 ++++++++++++++++++
+ 7 files changed, 2655 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c
+new file mode 100644
+index 000000000..84b3c6599
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldi.c
+@@ -0,0 +1,83 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010;
++  __m256i_out = __lasx_xvldi (-4080);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_result[3]) = 0xfebcfebcfebcfebc;
++  *((unsigned long *)&__m256i_result[2]) = 0xfebcfebcfebcfebc;
++  *((unsigned long *)&__m256i_result[1]) = 0xfebcfebcfebcfebc;
++  *((unsigned long *)&__m256i_result[0]) = 0xfebcfebcfebcfebc;
++  __m256i_out = __lasx_xvldi (1724);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_result[3]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3fd1000000000000;
++  __m256i_out = __lasx_xvldi (-943);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c;
++  __m256i_out = __lasx_xvldi (1820);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_result[3]) = 0x7200000072000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7200000072000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7200000072000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7200000072000000;
++  __m256i_out = __lasx_xvldi (-3214);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_result[3]) = 0xffffff1dffffff1d;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffff1dffffff1d;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffff1dffffff1d;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff1dffffff1d;
++  __m256i_out = __lasx_xvldi (2845);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010;
++  __m256i_out = __lasx_xvldi (-4080);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_result[3]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3fd1000000000000;
++  __m256i_out = __lasx_xvldi (-943);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_result[3]) = 0x7200000072000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7200000072000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7200000072000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7200000072000000;
++  __m256i_out = __lasx_xvldi (-3214);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c
+new file mode 100644
+index 000000000..15e66ae38
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskgez.c
+@@ -0,0 +1,86 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00;
++  __m256i_out = __lasx_xvmskgez_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvmskgez_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskgez_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskgez_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff03ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000203ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff03ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000203ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000fafe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000fafe;
++  __m256i_out = __lasx_xvmskgez_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvmskgez_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c
+new file mode 100644
+index 000000000..53b21f98b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmskltz.c
+@@ -0,0 +1,373 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007;
++  __m256i_out = __lasx_xvmskltz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3922d40000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000c85221c0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf7ebfab800000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f20;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000009f0;
++  __m256i_out = __lasx_xvmskltz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x40d74f979f99419f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000022;
++  __m256i_out = __lasx_xvmskltz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010100000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvmskltz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1f9d9f9d1f9db29f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1f9d9f9d201cb39e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x201c9f9d201cb29f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1f9d9f9d201cb39e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007773;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000003373;
++  __m256i_out = __lasx_xvmskltz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc080ffff0049ffd2;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0049ffd2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fffeffb9ff9d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00630064004bffd0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe0f02081c1c4ce2c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8008000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xe0f02081c1c4ce2c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8008000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000b8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000b8;
++  __m256i_out = __lasx_xvmskltz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003;
++  __m256i_out = __lasx_xvmskltz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000001fffc0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000022;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000022;
++  __m256i_out = __lasx_xvmskltz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010200000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010200000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvmskltz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000088;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000088;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x296e000018170000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x296e000018170000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000404;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000404;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffc000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffeff000c057c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffc000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffeff000c057c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000f0f0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000f0f0;
++  __m256i_out = __lasx_xvmskltz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffb2f600006f48;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffb2f600006f48;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000008c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000008c;
++  __m256i_out = __lasx_xvmskltz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff801000000010;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800300000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff801000000010;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800300000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000cc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000cc;
++  __m256i_out = __lasx_xvmskltz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5);
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000055;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000054;
++  __m256i_out = __lasx_xvmskltz_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmskltz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c
+new file mode 100644
+index 000000000..81865fd32
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmsknz.c
+@@ -0,0 +1,163 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0020002000400040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0020002000400040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0020002000400040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0020002000400040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000005555;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000005555;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000300000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000300000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000004411;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000004411;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000033;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000033;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000f91;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000f91;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000001f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000001f;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x006018000000001a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0060401900000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x006018000000001a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0060401900000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000006170;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000006170;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf2b180c9fc1fefdc;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf2b180c9fc1fefdc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000002ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000002ff;
++  __m256i_out = __lasx_xvmsknz_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c
+new file mode 100644
+index 000000000..58ad8bfcd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-1.c
+@@ -0,0 +1,650 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0fff0ff01ff01;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0fff0ff01ff01;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fff0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000003ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000007ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000fdfcfda8;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000e2821d20ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000fdfcfda8;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000e2821d20ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffff00;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ff8000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffff00;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ff8000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x372e9d75e8aab100;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc5c085372cfabfba;
++  *((unsigned long *)&__m256i_op0[1]) = 0x31730b5beb7c99f5;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0658f2dc0eb21e3c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000019410000e69a;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf259905a0c126604;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000883a00000f20;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6d3c2d3aa1c82947;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000f647000007d6;
++  *((unsigned long *)&__m256i_result[2]) = 0x031b358c021ee663;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000faaf0000f9f8;
++  *((unsigned long *)&__m256i_result[0]) = 0x02b4fdadfa9704df;
++  __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf7ffffffffffff1f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbffffffffffffeff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf7ffffffffffff1f;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbffffffffffffeff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffe05fc47b400;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffe06003fc000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffe05fc47b400;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffe06003fc000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffff8900000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff8900000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000aaaa00008bfe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000aaaa0000aaaa;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000aaaa00008bfe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000aaaa0000aaaa;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff5556aaaa;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff5556aaaa;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000001fff0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000feff0001ffb8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fff0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000feff0001ffb8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffe000ffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xb70036db12c4007e;
++  *((unsigned long *)&__m256i_op1[2]) = 0xb7146213fc1e0049;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000fefe02fffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xb71c413b199d04b5;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000007fffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000036a37;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000007fffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000004def9;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000fffe0001;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fffe0001;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000fffe0001;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fffe0001;
++  __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[3]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_result[2]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_result[1]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_result[0]) = 0x000408080c111414;
++  __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c
+new file mode 100644
+index 000000000..85d24fe44
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmuh-2.c
+@@ -0,0 +1,635 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffbdff3cffbdff44;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffbdff3cffbdff44;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffbdff3cffbdff44;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffbdff3cffbdff44;
++  *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfe8bfe0efe8bfe12;
++  *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfe8bfe0efe8bfe12;
++  __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000027;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000027;
++  __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffefffefffefffd;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xd207e90001fb16ef;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc8eab25698f97e90;
++  *((unsigned long *)&__m256i_op0[1]) = 0xd207e90001fb16ef;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc8eab25698f97e90;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00020002ff820002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00020002ff820002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x40efffe09fa88260;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6b07ca8e013fbf01;
++  *((unsigned long *)&__m256i_op0[1]) = 0x40efffe09fa7e358;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80ce32be3e827f00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x86ff76ffff4eff42;
++  *((unsigned long *)&__m256i_op1[2]) = 0x86ffffffffff9eff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x86ff76ffff4effff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x86ff32ffaeffffa0;
++  *((unsigned long *)&__m256i_result[3]) = 0x223d76f09f3881ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x3870ca8d013e76a0;
++  *((unsigned long *)&__m256i_result[1]) = 0x223d76f09f37e357;
++  *((unsigned long *)&__m256i_result[0]) = 0x43ec0a1b2aba7ed0;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffc020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffc020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x5fa0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x5fa0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0c6a240000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0f00204000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0c6a240000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0f00204000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x04a3000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x04a3000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff8000fffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00017fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff8000fffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00017fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000007f00fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000fe0000007f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000007f00fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000fe0000007f;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffe00000ffe00000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffe00000ffe00000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000fafe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000fafe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff01ff01;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff01c000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff01ff01;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000f1000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000001341c4000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001000310000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000033e87ef1;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000002e2100;
++  __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000045f3fb;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000045f3fb;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdbc8000000003fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xdbc8000000003fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7575ffff75757595;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7575ffff7575f575;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7575ffff75757595;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7575ffff7575f575;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x3aadec4f6c7975b1;
++  *((unsigned long *)&__m256i_result[2]) = 0x3abac5447fffca89;
++  *((unsigned long *)&__m256i_result[1]) = 0x3aadec4f6c7975b1;
++  *((unsigned long *)&__m256i_result[0]) = 0x3abac5447fffca89;
++  __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f;
++  __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x41dffbffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ff800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x41dffbffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ff800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000010000f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000010000f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmuh_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c
+new file mode 100644
+index 000000000..2a6eee0fd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsigncov.c
+@@ -0,0 +1,665 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_op0[2]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_op0[1]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_op0[0]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffff605a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffff605a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101000000000000;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff39ffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff39ffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0202810102020202;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0202810102020202;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000fefe0000fefe;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007fff0000fefe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fefe0000fefe;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007fff0000fefe;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000017547fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000017547fffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x807e80fd80fe80fd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80938013800d8002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x807e80fd80fe0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80938013800d0005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000801380f380fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000801380f300fb;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffd5a98;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000101ff01;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff80ff00ff80ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff80ff00ff80ff01;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fd;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fd;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff000000010000;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3880800037800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3901000039010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3880800037800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3901000039010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003fc00000428a;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffeffee;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe0000fffe0012;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffeffee;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe0000fffe0012;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000001ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000001ffff;
++  __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80be0000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80be0000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000100000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000100000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff00000000;
++  __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdf80df80df80df80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xdfc2df80df80df87;
++  *((unsigned long *)&__m256i_op0[1]) = 0xdf80df80df80df80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xdfc2df80df80df87;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdf80df80df80df80;
++  *((unsigned long *)&__m256i_op1[2]) = 0xdfc2df80df80df87;
++  *((unsigned long *)&__m256i_op1[1]) = 0xdf80df80df80df80;
++  *((unsigned long *)&__m256i_op1[0]) = 0xdfc2df80df80df87;
++  *((unsigned long *)&__m256i_result[3]) = 0x2080208020802080;
++  *((unsigned long *)&__m256i_result[2]) = 0x203e208020802079;
++  *((unsigned long *)&__m256i_result[1]) = 0x2080208020802080;
++  *((unsigned long *)&__m256i_result[0]) = 0x203e208020802079;
++  __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f00004040;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffe05f8102;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffe05f8102;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000004e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffba8300004fc2;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffba8300004fc2;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x004100df00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00c000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x004100df00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00c000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc1d75053f0000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc1d75053f0000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_result[2]) = 0xc1d75053f0000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_result[0]) = 0xc1d75053f0000000;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffa30000165a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000104000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffa30000165a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000104000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc1d75053f0000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc1d75053f0000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xbe21000100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000505300000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xbe21000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000505300000000;
++  __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000001880310877e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000001880310877e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000f788f788;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000f788f788;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff6361;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4d0a902890b800dc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff6361;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4d0a902890b800dc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000001faf19b60;
++  *((unsigned long *)&__m256i_op1[2]) = 0x6c2905ae7c14c561;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000001faf19b60;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6c2905ae7c14c561;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x94d7fb5200000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x94d7fb5200000000;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffeb664007ffd61;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffe97a1df5b41b0;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffeb664007ffd61;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffe97a1df5b41b0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000180;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsigncov_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8282828282828282;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8768876887688769;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8282828282828282;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8768876887688769;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000003fffc0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000003fffc0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffc00040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffc00040;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffdbff980038ffaf;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffafffe80004fff1;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffdbff980038ffaf;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffafffe80004fff1;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffc;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0000fffd0003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffc;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000fffd0003;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff0000fffd0004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff0000fffd0004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0002fffd;
++  __m256i_out = __lasx_xvsigncov_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvmax-xvmaxi-xvmi.patch b/LoongArch-Add-tests-for-ASX-vector-xvmax-xvmaxi-xvmi.patch
new file mode 100644
index 0000000000000000000000000000000000000000..63f6aa074cf72a4a5b037c44b140b2d79a6fff84
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvmax-xvmaxi-xvmi.patch
@@ -0,0 +1,4124 @@
+From 00deb43164bce9740d6e2e103afce647bebc6ee3 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:31:02 +0800
+Subject: [PATCH 103/124] LoongArch: Add tests for ASX vector
+ xvmax/xvmaxi/xvmin/xvmini instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvmax-1.c      | 545 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmax-2.c      | 560 +++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmaxi-1.c     | 471 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmaxi-2.c     | 504 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmin-1.c      | 575 +++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmin-2.c      | 680 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmini-1.c     | 416 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvmini-2.c     | 284 ++++++++
+ 8 files changed, 4035 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c
+new file mode 100644
+index 000000000..96c6671f2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-1.c
+@@ -0,0 +1,545 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f0000007f000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f0000007f000000;
++  __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffff000000;
++  __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff000000000000;
++  __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0004000400040004;
++  __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x5900000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x5900000000000000;
++  __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ffce20;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ffce20;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ee1100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000004560408;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ee1100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000004560408;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff1100;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000004560420;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff1100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000004560420;
++  __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000;
++  __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100;
++  __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007f433c78;
++  __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffce;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffce;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000;
++  __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff;
++  __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000;
++  __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff800080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff800080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000400010004;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000400010004;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000e0001000e;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000e0001000e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000e0001000e;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000e0001000e;
++  __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0080000000000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0080000000000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0080000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0080000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1090918800000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1090918800000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1c80780000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1c80780000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1c80780000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1c80780000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000004000;
++  __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007f7f817f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007f7f817f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f807f007f7f817f;
++  __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffebeeaaefafb;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffebeeaaeeeeb;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffebeeaaefafb;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffebeeaaeeeeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_op0[2]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_op0[1]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_op0[0]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00;
++  __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000038ea4d4a;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000038ea4d4a;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000038ea4d4a;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000038ea4d4a;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff00007fff0000;
++  __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmax_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010;
++  __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000004a557baac4;
++  *((unsigned long *)&__m256i_op1[2]) = 0x556caad9aabbaa88;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000004a557baac4;
++  *((unsigned long *)&__m256i_op1[0]) = 0x556caad9aabbaa88;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000004a557baac4;
++  *((unsigned long *)&__m256i_result[2]) = 0x556caad9aabbaa88;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000004a557baac4;
++  *((unsigned long *)&__m256i_result[0]) = 0x556caad9aabbaa88;
++  __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000020006;
++  __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0000000f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0000000d;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000000f;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000000d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffe97c020010001;
++  __m256i_out = __lasx_xvmax_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c
+new file mode 100644
+index 000000000..38f2c0afe
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmax-2.c
+@@ -0,0 +1,560 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffdfffffffdfffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffdfffffffdfffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffee0000ff4c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ff050000ff3c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000fff90000ff78;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffa80000ff31;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x817f11ed81800ff0;
++  __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000;
++  __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x800000ff800000ff;
++  __m256i_out = __lasx_xvmax_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1010100f10100fd4;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1010100f10100fd4;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffeeffaf;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffeeffaf;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffeeffaf;
++  *((unsigned long *)&__m256i_result[2]) = 0x1010100f10100fd4;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffeeffaf;
++  *((unsigned long *)&__m256i_result[0]) = 0x1010100f10100fd4;
++  __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_result[2]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_result[0]) = 0xf0f0f0f0f0f0f0f0;
++  __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_result[2]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_result[0]) = 0xc600000000000000;
++  __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffd8ffc7ffdaff8a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffd8ffc7ffdaff8a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000d0d8ffffeecf;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000383fffffdf0d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000d0d8ffffeecf;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000383fffffdf0d;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffd8ffc7ffffdf0d;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffd8ffc7ffffdf0d;
++  __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000003f8000004;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000003f8000004;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000003f8000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000003f8000004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000014402080144;
++  __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000a0008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000a0008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007f433c78;
++  __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff97a2;
++  __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000070002000a;
++  __m256i_out = __lasx_xvmax_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000004fb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000004fb;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffcf800fffcf800;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffcf800fffcf800;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800;
++  __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000005e02;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000005e02;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffc7418a023680;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff8845bb954b00;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffc7418a023680;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000002a5429;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff8845bb954b00;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000002a5429;
++  __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003f800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003f800000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000040404040;
++  __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000001400;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000003c01ff9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000003c01ff9;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffff08a7de0;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffff07c4170;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff08a7de0;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffff07c4170;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffff08a7de0;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffff07c4170;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffff08a7de0;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffff07c4170;
++  __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmax_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c
+new file mode 100644
+index 000000000..e804a0a45
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-1.c
+@@ -0,0 +1,471 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffd10000006459;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000441000000004;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000040400000104;
++  *((unsigned long *)&__m256i_result[3]) = 0x0f0f0f0f0f0f6459;
++  *((unsigned long *)&__m256i_result[2]) = 0x0f0f44100f0f0f0f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0f0f0f0f0f0f0f0f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0f0f0f0f0f0f0f0f;
++  __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8080808180808093;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80808081808080fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8080808180808093;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80808081808080fb;
++  *((unsigned long *)&__m256i_result[3]) = 0xf5f5f5f5f5f5f5f5;
++  *((unsigned long *)&__m256i_result[2]) = 0xf5f5f5f5f5f5f5fe;
++  *((unsigned long *)&__m256i_result[1]) = 0xf5f5f5f5f5f5f5f5;
++  *((unsigned long *)&__m256i_result[0]) = 0xf5f5f5f5f5f5f5fb;
++  __m256i_out = __lasx_xvmaxi_b (__m256i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0909090909090909;
++  *((unsigned long *)&__m256i_result[2]) = 0x0909090909090909;
++  *((unsigned long *)&__m256i_result[1]) = 0x0909090909090909;
++  *((unsigned long *)&__m256i_result[0]) = 0x0909090909090909;
++  __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaxi_b (__m256i_op0, -4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d;
++  __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_result[0]) = 0x0a0a0a0a7f0a0a0a;
++  __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0707070707070707;
++  *((unsigned long *)&__m256i_result[2]) = 0x0707070707070707;
++  *((unsigned long *)&__m256i_result[1]) = 0x0707070707070707;
++  *((unsigned long *)&__m256i_result[0]) = 0x0707070707070707;
++  __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d;
++  __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2a2a2a2a2a2a2a2a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2a2a2a2a2a2a2a2a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2a2a2a2a2a2a2a2a;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2a2a2a2a2a2a2a2a;
++  *((unsigned long *)&__m256i_result[3]) = 0x2a2a2a2a2a2a2a2a;
++  *((unsigned long *)&__m256i_result[2]) = 0x2a2a2a2a2a2a2a2a;
++  *((unsigned long *)&__m256i_result[1]) = 0x2a2a2a2a2a2a2a2a;
++  *((unsigned long *)&__m256i_result[0]) = 0x2a2a2a2a2a2a2a2a;
++  __m256i_out = __lasx_xvmaxi_b (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0c0c0c0c0c0c0c0c;
++  *((unsigned long *)&__m256i_result[2]) = 0x0c0c0c0c0c0c0c0c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0c0c0c0c0c0c0c0c;
++  *((unsigned long *)&__m256i_result[0]) = 0x0c0c0c0c0c0c0c0c;
++  __m256i_out = __lasx_xvmaxi_b (__m256i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[2]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[0]) = 0x0005000500050005;
++  __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffc00000ffc0ffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffc00000ffc0ffc0;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff90000fff9fff9;
++  __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00040000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000;
++  __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000001ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffe0000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000001ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffe0000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00080008000801ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0008000800080008;
++  *((unsigned long *)&__m256i_result[1]) = 0x00080008000801ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0008000800080008;
++  __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000c9;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000c9;
++  __m256i_out = __lasx_xvmaxi_h (__m256i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000008000165a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000008000165a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0009000900090009;
++  *((unsigned long *)&__m256i_result[2]) = 0x000900090009165a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0009000900090009;
++  *((unsigned long *)&__m256i_result[0]) = 0x000900090009165a;
++  __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_result[3]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m256i_result[2]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m256i_result[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m256i_result[0]) = 0x000a000a000a000a;
++  __m256i_out = __lasx_xvmaxi_h (__m256i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000401000000;
++  __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0110000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0110000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0110000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0110000000000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0110000000000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0110000000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0110000000000004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0110000000000080;
++  __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002;
++  __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000e0000000e;
++  __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffff400000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffff400000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000900000009;
++  __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000081f20607a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000081f20607a;
++  __m256i_out = __lasx_xvmaxi_w (__m256i_op0, 8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvmaxi_w (__m256i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009;
++  __m256i_out = __lasx_xvmaxi_d (__m256i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff1100;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000004560420;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff1100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000004560420;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff1100;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000004560420;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff1100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000004560420;
++  __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007e1c7e1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7e00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007e1c7e1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7e00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007e1c7e1c;
++  *((unsigned long *)&__m256i_result[2]) = 0x7e00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007e1c7e1c;
++  *((unsigned long *)&__m256i_result[0]) = 0x7e00000000000000;
++  __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff5;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff5;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff5;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff5;
++  __m256i_out = __lasx_xvmaxi_d (__m256i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007;
++  __m256i_out = __lasx_xvmaxi_d (__m256i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000007b007e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000007b007e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000007b007e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000007b007e;
++  __m256i_out = __lasx_xvmaxi_d (__m256i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c
+new file mode 100644
+index 000000000..b6b34063c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmaxi-2.c
+@@ -0,0 +1,504 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0a0a0a0a0a0a0a0a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0a0a0a0a0a0a0a0a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0a0a0a0a0a0a0a0a;
++  *((unsigned long *)&__m256i_result[0]) = 0x0a0a0a0a0a0a0a0a;
++  __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1717171717171717;
++  *((unsigned long *)&__m256i_result[2]) = 0x1717171717171717;
++  *((unsigned long *)&__m256i_result[1]) = 0x1717171717171717;
++  *((unsigned long *)&__m256i_result[0]) = 0x1717171717171717;
++  __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ffe00007f000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x1616161616161616;
++  *((unsigned long *)&__m256i_result[2]) = 0x161616167fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7ffe16167f161616;
++  *((unsigned long *)&__m256i_result[0]) = 0x161616167fffffff;
++  __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000feb60000b7d0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000feb60000c7eb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000feb60000b7d0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000feb60000c7eb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0707feb60707c7eb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0707feb60707c7eb;
++  __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_result[2]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_result[1]) = 0x1111111111111111;
++  *((unsigned long *)&__m256i_result[0]) = 0x1111111111111111;
++  __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000165a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffa3;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000165a;
++  *((unsigned long *)&__m256i_result[3]) = 0x1818ffff1818ffa3;
++  *((unsigned long *)&__m256i_result[2]) = 0x181818181818185a;
++  *((unsigned long *)&__m256i_result[1]) = 0x1818ffff1818ffa3;
++  *((unsigned long *)&__m256i_result[0]) = 0x181818181818185a;
++  __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_result[2]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_result[1]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_result[0]) = 0x1c1c1c1c1c1c1c1c;
++  __m256i_out = __lasx_xvmaxi_bu (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xeffc000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf064c6098d214127;
++  *((unsigned long *)&__m256i_op0[1]) = 0xeffc000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf064c6098d214127;
++  *((unsigned long *)&__m256i_result[3]) = 0xeffc001800180018;
++  *((unsigned long *)&__m256i_result[2]) = 0xf064c6098d214127;
++  *((unsigned long *)&__m256i_result[1]) = 0xeffc001800180018;
++  *((unsigned long *)&__m256i_result[0]) = 0xf064c6098d214127;
++  __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_result[2]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_result[1]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_result[0]) = 0x0007000700070007;
++  __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0018001800180018;
++  *((unsigned long *)&__m256i_result[2]) = 0x0018001800180018;
++  *((unsigned long *)&__m256i_result[1]) = 0x0018001800180018;
++  *((unsigned long *)&__m256i_result[0]) = 0x0018001800180018;
++  __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0017001700176d6d;
++  *((unsigned long *)&__m256i_result[2]) = 0x0017001700176d6d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0017001700176d6d;
++  *((unsigned long *)&__m256i_result[0]) = 0x0017001700176d6d;
++  __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x001fffffffe00011;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x001fffffffe00011;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvmaxi_hu (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001400000014;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000e00000080;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000e00000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000e00000080;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000e00000080;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fd0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000fd0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001b0000001b;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001b00fd0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001b0000001b;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001b00fd0000;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007aff7c00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffd017d00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007aff7c00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffd017d00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000c7aff7c00;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffd017d00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000c7aff7c00;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffd017d00;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001f0000ffff;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000300000003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000300000003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000300000003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000300000003;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1010101010001000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1010101000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x1010101010001000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x101010100000000e;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000007ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001e0007ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001e0007ffff;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fd;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000004000000fd;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000004000000fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000001f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000001f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000001f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000001f;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001700000017;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001700000017;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001700000017;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001700000017;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x07fee332883f86b0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x07fed3c8f7ad28d0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x07fee332883f86b0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x07fed3c8f7ad28d0;
++  *((unsigned long *)&__m256i_result[3]) = 0x07fee332883f86b0;
++  *((unsigned long *)&__m256i_result[2]) = 0x07fed3c8f7ad28d0;
++  *((unsigned long *)&__m256i_result[1]) = 0x07fee332883f86b0;
++  *((unsigned long *)&__m256i_result[0]) = 0x07fed3c8f7ad28d0;
++  __m256i_out = __lasx_xvmaxi_wu (__m256i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000001e;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000001e;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[0]) = 0x1c1b1a191c1b1a19;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000001c;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000001c;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000001c;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000001c;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000600000006;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000012;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000000b;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000000b;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000013;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000014;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000014;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000014;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000014;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000014;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000014;
++  __m256i_out = __lasx_xvmaxi_du (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c
+new file mode 100644
+index 000000000..7dbf335c1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-1.c
+@@ -0,0 +1,575 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8001000080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000800080000728;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8001800080008000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x800080008000b8f1;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000ffff8000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff80008000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x800080008000b8f1;
++  __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000180007fe8;
++  __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848;
++  *((unsigned long *)&__m256i_result[3]) = 0xc800c800c800c800;
++  *((unsigned long *)&__m256i_result[2]) = 0x8800c800c800c801;
++  *((unsigned long *)&__m256i_result[1]) = 0xc800c800c800c800;
++  *((unsigned long *)&__m256i_result[0]) = 0x8800c800c800c801;
++  __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7070545438381c1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7070545438381c1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7070545438381c1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7070545438381c1c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ffff8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ffff8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffff00ffff8000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff00ffff8000;
++  __m256i_out = __lasx_xvmin_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x003ff18080010201;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0100000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x003ff18080010201;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0100000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000f18080010000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000f18080010000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000d24;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000fffe;
++  __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffff81ff7d;
++  __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000017f7f7f7f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000017f7f7f7f;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmin_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000004040104;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffd1108199;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000714910f9;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffd10000006459;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000441000000004;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000040400000104;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffd10000000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffd1108199;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000104;
++  __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00010003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0080000200000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00010003;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000fd00ffff02ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fffeff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff02ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0100;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff02ff;
++  __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ff1f001f;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffe1ffe0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ff1f001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffe1ffe0;
++  __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf000f00000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf000f00000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xf000f00000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xf000f00000000001;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007c000000810081;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007c000000810081;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4545454545454545;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4545454545454545;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4545454545454545;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4545454545454545;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffbfffffffb;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffbfffffffb;
++  __m256i_out = __lasx_xvmin_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c
+new file mode 100644
+index 000000000..9eaa0e9e7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmin-2.c
+@@ -0,0 +1,680 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f017f807f017d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f017f807f017f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000017f0000017d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000017f0000017f;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f70000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f70000000000000;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffff800;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffff800;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000002080100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000002080100;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff0000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff0000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000001de2dc20;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000001de2dc20;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000001000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffbf7f7fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffe651bfff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000010100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000001000100;
++  __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ff80;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000468600007f79;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000f3280000dfff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1d1d1d1d1d1d1d1d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1d1d1d1ddd9d9d1d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1d1d1d1d1d1d1d1d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1d1d1d1d046fdd1d;
++  *((unsigned long *)&__m256i_result[3]) = 0x00001d1d00001d1d;
++  *((unsigned long *)&__m256i_result[2]) = 0x00001d1d00007f79;
++  *((unsigned long *)&__m256i_result[1]) = 0x00001d1d00001d1d;
++  *((unsigned long *)&__m256i_result[0]) = 0x00001d1d0000dd1d;
++  __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0106010601060106;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0106010601060106;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0106010601060106;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0106010601060106;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00011ffb0000bee1;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00011ffb0000bee1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001010600000106;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001010600000106;
++  __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffd5d5ffffd5d6;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffd5d5ffffd5d6;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8b1414140e0e0e0e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x36722a7e66972cd6;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc58a0a0a07070706;
++  *((unsigned long *)&__m256i_op1[2]) = 0x006b60e4180b0023;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1b39153f334b966a;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf1d75d79efcac002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x006b60e40e0e0e0e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x36722a7e66972cd6;
++  __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000101ff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001;
++  __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00040000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00040000;
++  __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00010e0d00009e0e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00009000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000e0e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00009000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000033;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000033;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000033;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000033;
++  __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffe36780;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffe36780;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80008000fff98000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80008000fff98000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00f0000000f00010;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff0ff00fff0ff10;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00f0000000f00010;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff0ff00fff0ff10;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000400000003ffb;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000400100004001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000400000003ffb;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000400100004001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000400000003ffb;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000400100004001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000400000003ffb;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000400100004001;
++  __m256i_out = __lasx_xvmin_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x60f02081c1c4ce2c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8008000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x60f02081c1c4ce2c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8008000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010183f9999b;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01010101d58f43c9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010183f9999b;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x01010101d58f43c9;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ee;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ee;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f00ff007f00ff;
++  __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007f7f7f7f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007f7f7f7f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000001fffe;
++  __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmin_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c
+new file mode 100644
+index 000000000..01aabada8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-1.c
+@@ -0,0 +1,416 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf96d674800000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x44a4330e2c7116c0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x14187a7822b653c0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfbe0b866962b96d0;
++  *((unsigned long *)&__m256i_result[3]) = 0xf90c0c0c00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0ca40c0c0c0c0cc0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0c0c0c0c0cb60cc0;
++  *((unsigned long *)&__m256i_result[0]) = 0xfbe0b80c960c96d0;
++  __m256i_out = __lasx_xvmini_b (__m256i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0010bfc80010bf52;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff1bfca0011bfcb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0010bfc80010bf52;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff1bfca0011bfcb;
++  *((unsigned long *)&__m256i_result[3]) = 0xf5f5bfc8f5f5bff5;
++  *((unsigned long *)&__m256i_result[2]) = 0xf5f1bfcaf5f5bfcb;
++  *((unsigned long *)&__m256i_result[1]) = 0xf5f5bfc8f5f5bff5;
++  *((unsigned long *)&__m256i_result[0]) = 0xf5f1bfcaf5f5bfcb;
++  __m256i_out = __lasx_xvmini_b (__m256i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf8f8f8f8f8f8f8f8;
++  *((unsigned long *)&__m256i_result[2]) = 0xf8f8f8f8f8f8f8f8;
++  *((unsigned long *)&__m256i_result[1]) = 0xf8f8f8f8f8f8f8f8;
++  *((unsigned long *)&__m256i_result[0]) = 0xf8f8f8f8f8f8f8f8;
++  __m256i_out = __lasx_xvmini_b (__m256i_op0, -8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000aaabffff;
++  __m256i_out = __lasx_xvmini_b (__m256i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff47b4ffff5878;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000b84b0000a787;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff47b4ffff5878;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000b84b0000a787;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff07b4ffff0707;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000b8070000a787;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff07b4ffff0707;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000b8070000a787;
++  __m256i_out = __lasx_xvmini_b (__m256i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[0]) = 0xf7f7f7f7f7f7f7f7;
++  __m256i_out = __lasx_xvmini_b (__m256i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_b (__m256i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_b (__m256i_op0, 14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf3f3f3f3f3f3f3f3;
++  *((unsigned long *)&__m256i_result[2]) = 0xf3f3f3f3f3f3f3f3;
++  *((unsigned long *)&__m256i_result[1]) = 0xf3f3f3f3f3f3f3f3;
++  *((unsigned long *)&__m256i_result[0]) = 0xf3f3f3f3f3f3f3f3;
++  __m256i_out = __lasx_xvmini_b (__m256i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[1]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f9f9f9f9f9;
++  __m256i_out = __lasx_xvmini_b (__m256i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc30e0000ff800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc30e0000ff800000;
++  *((unsigned long *)&__m256i_result[3]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_result[2]) = 0xc3030000ff800000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_result[0]) = 0xc3030000ff800000;
++  __m256i_out = __lasx_xvmini_b (__m256i_op0, 3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff6fff6fff6fff6;
++  __m256i_out = __lasx_xvmini_h (__m256i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1fffffff1fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0383634303836343;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1fffffff1fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0383634303836343;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002ffff0002ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0002ffff0002ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002;
++  __m256i_out = __lasx_xvmini_h (__m256i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000f7bc0001f7bd;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000f93b0000017c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000f7bc0001f7bd;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000f93b0000017b;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff2f7bcfff2f7bd;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff2f93bfff2fff2;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff2f7bcfff2f7bd;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff2f93bfff2fff2;
++  __m256i_out = __lasx_xvmini_h (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmini_h (__m256i_op0, 6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_h (__m256i_op0, 13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff9fff9fff9fff9;
++  __m256i_out = __lasx_xvmini_h (__m256i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff3fff3fff3fff3;
++  __m256i_out = __lasx_xvmini_h (__m256i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff2fff2fff2fff2;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff2fff2fff2fff2;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff2fff2fff2fff2;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff2fff2fff2fff2;
++  __m256i_out = __lasx_xvmini_h (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_h (__m256i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004;
++  __m256i_out = __lasx_xvmini_w (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0e400;
++  __m256i_out = __lasx_xvmini_w (__m256i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffff2fffffff2;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff2fffffff2;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffff2fffffff2;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff2fffffff2;
++  __m256i_out = __lasx_xvmini_w (__m256i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000010000000a;
++  __m256i_out = __lasx_xvmini_w (__m256i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffff8fffffff8;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff8fffffff8;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffff8fffffff8;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff8fffffff8;
++  __m256i_out = __lasx_xvmini_w (__m256i_op0, -8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffff7fffffff7;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff7fffffff7;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffff7fffffff7;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff7fffffff7;
++  __m256i_out = __lasx_xvmini_w (__m256i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmini_w (__m256i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff0fffffff0;
++  __m256i_out = __lasx_xvmini_w (__m256i_op0, -16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmini_w (__m256i_op0, -1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_d (__m256i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x327f010101010102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x327f010101010102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffff4;
++  __m256i_out = __lasx_xvmini_d (__m256i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000900000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000009;
++  __m256i_out = __lasx_xvmini_d (__m256i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_d (__m256i_op0, 13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvmini_d (__m256i_op0, -1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ff007f007f00;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ff007f007f00;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00ff007f007f00;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00ff007f007f00;
++  __m256i_out = __lasx_xvmini_d (__m256i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_d (__m256i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c
+new file mode 100644
+index 000000000..8eb7d9355
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmini-2.c
+@@ -0,0 +1,284 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op0[1]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d;
++  __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x1b1b1b1b1b1b1b1b;
++  *((unsigned long *)&__m256i_result[2]) = 0x1b1b1b1b1b1b1b1b;
++  *((unsigned long *)&__m256i_result[1]) = 0x1b1b1b1b1b1b1b1b;
++  *((unsigned long *)&__m256i_result[0]) = 0x1b1b1b1b1b1b1b1b;
++  __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1e1e1e0000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1e1e1e0000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1e1e1e0000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1e1e1e0000000000;
++  __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6018000000000cd1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6040190d00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0a0a000000000a0a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0a0a0a0a00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0a0a000000000a0a;
++  *((unsigned long *)&__m256i_result[0]) = 0x0a0a0a0a00000000;
++  __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0008001c0010001c;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0008001c0010001c;
++  __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_bu (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0007000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0007000000000000;
++  __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000002222;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003ddd80007bbb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000002222;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003ddd80007bbb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001700170017;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000017;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001700170017;
++  __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_hu (__m256i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_du (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_du (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffe400000707;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000af100001455;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffe400000707;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000af100001455;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff61010380;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff61010380;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000006;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000006;
++  __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmini_du (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvmul-xvmod-xvdiv.patch b/LoongArch-Add-tests-for-ASX-vector-xvmul-xvmod-xvdiv.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c50e4292b0594174252ecaca1e8c8f08384df525
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvmul-xvmod-xvdiv.patch
@@ -0,0 +1,5766 @@
+From 95ce2bef98ebcebebcdb3a9411d1c9783935ac89 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:23:35 +0800
+Subject: [PATCH 102/124] LoongArch: Add tests for ASX vector xvmul/xvmod/xvdiv
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmul.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvdiv-1.c      | 485 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvdiv-2.c      | 500 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmod-1.c      | 395 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvmod-2.c      | 410 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmul.c        | 620 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmulwev-1.c   | 590 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmulwev-2.c   | 590 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmulwev-3.c   | 605 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmulwod-1.c   | 545 +++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmulwod-2.c   | 470 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvmulwod-3.c   | 440 +++++++++++++
+ 11 files changed, 5650 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c
+new file mode 100644
+index 000000000..0d7c67703
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-1.c
+@@ -0,0 +1,485 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00080000000cc916;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000006fff3;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00f8000000f41bfb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000fa0106;
++  __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000fe000000fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000fe000000fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000fe000000fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000fe000000fe;
++  __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01fe8001b72e0001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xb72e8001b72eaf12;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01fe000247639d9c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xb5308001b72eaf12;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002ff80ffb70000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffb7ff80ffd0ffd8;
++  *((unsigned long *)&__m256i_result[1]) = 0x00010000002fff9e;
++  *((unsigned long *)&__m256i_result[0]) = 0xffb5ff80ffd0ffd8;
++  __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8091811081118110;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80a6802680208015;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8091811081110013;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80a6802680200018;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8091811081118110;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80a6802680208015;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8091811081110013;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80a6802680200018;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x1f831f80e0e09f86;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x1f831f80e0e09f86;
++  __m256i_out = __lasx_xvdiv_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdededededededede;
++  *((unsigned long *)&__m256i_op1[2]) = 0xdededededededede;
++  *((unsigned long *)&__m256i_op1[1]) = 0xdededededededede;
++  *((unsigned long *)&__m256i_op1[0]) = 0xdededededededede;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffa080000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffe080000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffa080000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffe080000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0010002000100020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010002000100020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0010002000100020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010002000100020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fd00ffff02ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fffeff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff81ffffff00;
++  __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffff00;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffff00;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff0000000f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff0000000d;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff0000000f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff0000000d;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff0000000f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff0000000d;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000000f;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff0000000d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvdiv_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x3f2c678e38d1104c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x3f2c678e38d1104c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000d000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000d000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000583800;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000100000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000583800;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000100000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000d0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000d0000;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffff30000000b;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffff3fffffff3;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffff30000000b;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffff3fffffff3;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0010000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0010000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000007fef;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fef;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000007fef;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fef;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000008050501;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0100000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01010101010101c9;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01010101010101c9;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffe;
++  __m256i_out = __lasx_xvdiv_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c
+new file mode 100644
+index 000000000..fd8b6d38c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvdiv-2.c
+@@ -0,0 +1,500 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000017e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0202810102020202;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0202810102020202;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000003f;
++  __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffba0c05;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffba0c05;
++  *((unsigned long *)&__m256i_op1[3]) = 0x5353535353535353;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5353535353535353;
++  *((unsigned long *)&__m256i_op1[1]) = 0x5353535353535353;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5353535353535353;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0303030303020000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0303030303020000;
++  __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0100010001000100;
++  __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000030b8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000030b8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9fe7fffffffff32e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x6040190ddfdd8587;
++  *((unsigned long *)&__m256i_op1[1]) = 0xecd011542d2cc4c7;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6040190dffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f7fff7f7f7fff7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f7fff7f7f7fff7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f7fff7f7f7fff7f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f7fff7f7f7fff7f;
++  __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001010000;
++  __m256i_out = __lasx_xvdiv_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fee;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe2e2e202ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe2e2e202ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffc6ffc6003a003a;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000465;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000465;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001;
++  __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010202020203;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010201010102;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010202020203;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010201010102;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00020001ffb6ffe0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0049004200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ff80;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000468600007f79;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000f3280000dfff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffb7;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000004c00000000;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000003fb000003fb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000003fb000003fb;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1fe01e0100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1fe01e0100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1fe01e0100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1fe01e0100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff827f80;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0226823c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff827f80;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0226823c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x007d003e007d003e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x007d003effa80010;
++  *((unsigned long *)&__m256i_op1[1]) = 0x007d003e007d003e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x007d003effa80010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf800f800f800f800;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf800f800f800f800;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf800f800f800f800;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf800f800f800f800;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01ffff4300ffff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01ffff4300ffff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000008000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000008000000100;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007fff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007fff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9cffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9cffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000045;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000045;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000d0005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000013b13380;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000013b13380;
++  __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000e2e20000e2e2;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00011d1c00011d9c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000e2e20000e2e2;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00011d1c00011d9c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000e2e20000e2e2;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00011d1c00011d9c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000e2e20000e2e2;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00011d1c00011d9c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x232221201f1e1d1c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1b1a191817161514;
++  *((unsigned long *)&__m256i_op1[1]) = 0x232221201f1e1d1c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1b1a191817161514;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000101;
++  __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010202020203;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010201010102;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010202020203;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010201010102;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffff0fffffff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvdiv_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c
+new file mode 100644
+index 000000000..6f34f6ffc
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-1.c
+@@ -0,0 +1,395 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000;
++  __m256i_out = __lasx_xvmod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8001b72e0001b72e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8001b72eaf12d5f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000247639d9cb530;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8001b72eaf12d5f0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_result[3]) = 0xff81ffe50001ffe5;
++  *((unsigned long *)&__m256i_result[2]) = 0xff81ffe5ffa6ffc6;
++  *((unsigned long *)&__m256i_result[1]) = 0x000200aafe9affe5;
++  *((unsigned long *)&__m256i_result[0]) = 0xff81ffe5ffa6ffc6;
++  __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_op1[2]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_op1[1]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_op1[0]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x80008000b70fb810;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3c0f3c0f3911b910;
++  *((unsigned long *)&__m256i_op0[1]) = 0x80008000b70fb810;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3c0f3c0f3911b910;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff6f20;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000781e0000f221;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff6f20;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000781e0000f221;
++  __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc800c800c800c800;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8800c800c800c801;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc800c800c800c800;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8800c800c800c801;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fe36364661af18f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fe36364661af18f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fe363637fe36363;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101000101010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101000101010001;
++  __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc3030000ff800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc3030000ff800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x41dffbffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ff800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x41dffbffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ff800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000001000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000800000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000800080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc9d8080067f50020;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc70000020000c000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000010100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000001000100;
++  __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff3cff3cff3cff3c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff3cff3cff3cff3c;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff3cff3cff3cff3c;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff3cff3cff3cff3c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001400000014;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7c007c007c007c00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007efeff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007efeff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000008e7c00;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000067751500;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000008e7c00;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000067751500;
++  __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff00ff0000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff00ff0000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[3]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m256i_result[1]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffefffefffefffef;
++  __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8848c848c848c848;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8848c848c848c848;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000e0001000e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000e0001000e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000e0001000e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000e0001000e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c
+new file mode 100644
+index 000000000..d0a9e9d2f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmod-2.c
+@@ -0,0 +1,410 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1e18000000000000;
++  __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0909090909090909;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0909090909090909;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0909090909090909;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0909090909090909;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01010101010101c9;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01010101010101c9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002;
++  __m256i_out = __lasx_xvmod_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffe000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffe000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000e000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000e000;
++  __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe06df8d7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffbe8b470f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ffffffffffff7ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe06df0d7;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ffffffffffff7ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffbe8b470f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1;
++  *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1;
++  *((unsigned long *)&__m256i_result[3]) = 0x081abb9d36ee1037;
++  *((unsigned long *)&__m256i_result[2]) = 0x1617eb17129bfd38;
++  *((unsigned long *)&__m256i_result[1]) = 0x081abb9d36ee1037;
++  *((unsigned long *)&__m256i_result[0]) = 0x1617eb17129bfd38;
++  __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfe8bfe0efe8bfe12;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfe8bfe0efe8bfe12;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7171717171717171;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8e8e8e8e8e8e8e8e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7171717171717171;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8e8e8e8e8e8e8e8e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000005500000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001005500020000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000005500000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001005500020000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000100010001fffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000100010001fffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000005500000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000005400000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000005500000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000005400000002;
++  __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffff800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x007f0000ff807f81;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffff800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x007f0000ff807f81;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000;
++  __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff8000;
++  __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff000000010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000095120000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc9da000063f50000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc7387fff6bbfffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1fffffff1fffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0383634303836343;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1fffffff1fffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0383634303836343;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001000000;
++  __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmod_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c
+new file mode 100644
+index 000000000..be3c8e718
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmul.c
+@@ -0,0 +1,620 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffd1b24e00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffcea54ffff29a8;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff8cad88ff8306b4;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffc1278fffce4c8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0e2d5626ff75cdbc;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5db4b156e2002a78;
++  *((unsigned long *)&__m256i_op1[1]) = 0xeeffbeb03ba3e6b0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0c16e25eb28d27ea;
++  *((unsigned long *)&__m256i_result[3]) = 0xf96d674800000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x44a4330e2c7116c0;
++  *((unsigned long *)&__m256i_result[1]) = 0x14187a7822b653c0;
++  *((unsigned long *)&__m256i_result[0]) = 0xfbe0b866962b96d0;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffff01ffffff08;
++  *((unsigned long *)&__m256i_op1[2]) = 0x43700f0100003008;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffff01ffffff08;
++  *((unsigned long *)&__m256i_op1[0]) = 0x43700f0100003008;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000f8;
++  *((unsigned long *)&__m256i_result[2]) = 0xbc8ff0ffffffcff8;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000f8;
++  *((unsigned long *)&__m256i_result[0]) = 0xbc8ff0ffffffcff8;
++  __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x353bb67af686ad9b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x353bb67af686ad9b;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0200000200000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2c27000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0200000200000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x2c27000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1cfd000000000000;
++  __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000180000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc080ffff0049ffd2;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0002ff80ffb70000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000fffeffb9ff9d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00010000002fff9e;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffd2;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ff8000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000080000000;
++  __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007fff003f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000627;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000627;
++  __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffd5a98;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffd5a98;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000007f3a40;
++  __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000400;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000400;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x120e120dedf1edf2;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x120e120dedf1edf2;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000907;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000907;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1010000010100000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1010000010100000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1010000010100000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1010000010100000;
++  __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00007fff00000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0040000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00007fff00000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffefffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdf00000052a00000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5b7f00ff5b7f00ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xdf00000052a00000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5b7f00ff5b7f00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffff30000000b;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffff3fffffff3;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffff30000000b;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffff3fffffff3;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbc30c40108a45423;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbc263e0e5d00e69f;
++  *((unsigned long *)&__m256i_op1[1]) = 0xbc30c40108a4544b;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbc20e63aa8b9663f;
++  *((unsigned long *)&__m256i_result[3]) = 0x71860bf35f0f9d81;
++  *((unsigned long *)&__m256i_result[2]) = 0x720ed94a46f449ed;
++  *((unsigned long *)&__m256i_result[1]) = 0x71860bf35f0f9f39;
++  *((unsigned long *)&__m256i_result[0]) = 0x72544f0e6e95cecd;
++  __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x111ebb784f9c4100;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1c386546809f3b50;
++  *((unsigned long *)&__m256i_op0[1]) = 0x111ebb784f9bf1ac;
++  *((unsigned long *)&__m256i_op0[0]) = 0x21f6050d955d3f68;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xbab0c4b000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xaa0ac09800000000;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00bf00bf00bf00bf;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00bf00bf00bf00bf;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00bf00bf00bf00bf;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00bf00bf00bf00bf;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000011;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000011;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000088;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000088;
++  __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc0008000c0008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc0008000c0008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000800080008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80008000fff98000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80008000fff98000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffeffff97a1;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffdf5b000041b0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffeffff97a1;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffdf5b000041b0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00f8000000000008;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000800f800000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00f8000000000008;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000800f800000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xe3f7fff7fffcbd08;
++  *((unsigned long *)&__m256i_result[2]) = 0x0dbfa28000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xe3f7fff7fffcbd08;
++  *((unsigned long *)&__m256i_result[0]) = 0x0dbfa28000000000;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x7070545438381c1c;
++  *((unsigned long *)&__m256i_result[2]) = 0x7070545438381c1c;
++  *((unsigned long *)&__m256i_result[1]) = 0x7070545438381c1c;
++  *((unsigned long *)&__m256i_result[0]) = 0x7070545438381c1c;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1400080008000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1400080008000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1400080008000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1400080008000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmul_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c
+new file mode 100644
+index 000000000..01ff71649
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-1.c
+@@ -0,0 +1,590 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf96d674800000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x44a4330e2c7116c0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x14187a7822b653c0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfbe0b866962b96d0;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffd1b24e00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffcea54ffff29a8;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff8cad88ff8306b4;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffc1278fffce4c8;
++  *((unsigned long *)&__m256i_result[3]) = 0xebfd15f000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x01700498ff8f1600;
++  *((unsigned long *)&__m256i_result[1]) = 0xf520c7c024221300;
++  *((unsigned long *)&__m256i_result[0]) = 0x00802fd0ff540a80;
++  __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffff90ffffff81;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffff90ffffff81;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffbdff3cffbdff44;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffbdff3cffbdff44;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000001dc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000001dc;
++  __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ff00ff00ef32;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ee;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ee;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffce;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000fc7c;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffce;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000fc7c;
++  __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010100000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010100000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf800f800f800c000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf800f800f800a000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf800f800f800e000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf800f800f800e000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff00ffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff8080000004000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000080000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff8080000000000;
++  __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002;
++  __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00001ff800000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xd8d8c00000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00001ff800000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xd8d8c00000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3f80000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x3f80000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000ffff88ff88;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000ffff88ff88;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffff8ffffff08;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00f800ffcff8;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffff8ffffff08;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00f800ffcff8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256i_result[2]) = 0x0045b8ae81bce1d8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256i_result[0]) = 0x0045b8ae81bce1d8;
++  __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x6c6c6c6c6c6c6c6c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x6c6c6c6c6c6c6c6c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x6c6c6c6c6c6c6c6c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6c6c6c6c6c6c6c6c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00ffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00ffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfd12fd12fd12fd12;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000060000108;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001060005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fef0001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x201fdfe0201fdfe0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x201fdfe0201fdfe0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000017bfffff0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000180007fe8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000fd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000062d4;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c
+new file mode 100644
+index 000000000..32088f4ae
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-2.c
+@@ -0,0 +1,590 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4e5cba76cdbaaa78;
++  *((unsigned long *)&__m256i_op0[2]) = 0xce68fdeb4e33eaff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4e45cc2dcda41b30;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4ccb1e5c4d6b21e4;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x44bb2cd3a35c2fd0;
++  *((unsigned long *)&__m256i_result[0]) = 0xca355ba46a95e31c;
++  __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0202000002020202;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0202000002010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0202000002020202;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0202000002020000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x01fe000000ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x01fe000001fe0000;
++  __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000001ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffe0000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000001ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffe0000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff80000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff80000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000f788f788;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000f788f788;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff01ff68;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000070ff017de6;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff01ff68;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000070ff017de6;
++  *((unsigned long *)&__m256i_op1[3]) = 0x761ed60b5d7f0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xdc9938afafe904f1;
++  *((unsigned long *)&__m256i_op1[1]) = 0x761ed60b5d7f0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xdc9938afafe904f1;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00004c9000e9d886;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00004c9000e9d886;
++  __m256i_out = __lasx_xvmulwev_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffff328dfff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6651bfff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff328dfff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6651bfff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffe0001c3fe4001;
++  *((unsigned long *)&__m256i_result[0]) = 0x8ffe800100000000;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff01ff010000fff9;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff19;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff02ff020001fffa;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000100010001fffa;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[3]) = 0x00fe01ff0006ffcf;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000e62f8f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00fe02fe0006ffd6;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000006ffd6;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01010101010000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffef;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffef;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0100feff0100eeef;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000001010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0100feff00feef11;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000001010;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x201fdfe0201fdfe0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x201fdfe0201fdfe0;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff47b4ffff5878;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000b84b0000a787;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff47b4ffff5878;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000b84b0000a787;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000010100000101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010100000101;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ff1b00e4;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m256i_result[3]) = 0x0807f7f80807f7f8;
++  *((unsigned long *)&__m256i_result[2]) = 0x0807f7f80807f7f8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0807f7f80807f7f8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0807f7f80807f7f8;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc2c2c2c2c2c29cc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc2c2c2c2c2c29cc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000004e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001;
++  __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000000a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000000a;
++  __m256i_out = __lasx_xvmulwev_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000007f000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007fff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0408040800008003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0408040800008003;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0408040800008002;
++  *((unsigned long *)&__m256i_result[0]) = 0xfbf7fbf7ffff7ffd;
++  __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000009;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc192181230000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc192181230000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op1[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_result[3]) = 0x04080c1014182d35;
++  *((unsigned long *)&__m256i_result[2]) = 0x716d696573765161;
++  *((unsigned long *)&__m256i_result[1]) = 0x04080c1014182d35;
++  *((unsigned long *)&__m256i_result[0]) = 0x716d696573765161;
++  __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c
+new file mode 100644
+index 000000000..19157f682
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwev-3.c
+@@ -0,0 +1,605 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffefe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffefe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01fe02;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01fe02;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff;
++  __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000505;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000004fb;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fff0e400;
++  *((unsigned long *)&__m256i_op1[3]) = 0x80000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80000000ffff8c80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x80000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80000000fff0e400;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ff01ff01;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ff01c000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ff01ff01;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000f1000000;
++  __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01fe04;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01fe04;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000022ffdd;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000022ffdd;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000f4b6ff23;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000f4b6ff23;
++  __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fffe00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fffe00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x386000003df80000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x5fa0000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x5fa0000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f20;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000009f0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a5429;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a5429;
++  *((unsigned long *)&__m256i_op1[3]) = 0x417e01f040800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x299d060000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x417e01f040800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x29108b0000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x45baa7ef6a95a985;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0707feb60707b7d0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x45baa7ef6a95a985;
++  *((unsigned long *)&__m256i_result[3]) = 0x0707b7cff8f84830;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000354ad4c28;
++  *((unsigned long *)&__m256i_result[1]) = 0x0707b7cff8f84830;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000354ad4c28;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00d5007f00ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00d5007f00ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0080000000800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0080000000800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0080000000800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0080000000800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ef;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000016e00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000016e00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000155b200;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000b70000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff03fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffec75c2d209f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff03fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffec75c2d209f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000008b;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff010000008b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op0[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op0[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op1[3]) = 0x03af03af03af03af;
++  *((unsigned long *)&__m256i_op1[2]) = 0x03acfc5303260e80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x03af03af03af03af;
++  *((unsigned long *)&__m256i_op1[0]) = 0x03acfc5303260e80;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000b0cfffff4f3;
++  *((unsigned long *)&__m256i_result[2]) = 0x000f9bb562f56c80;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000b0cfffff4f3;
++  *((unsigned long *)&__m256i_result[0]) = 0x000f9bb562f56c80;
++  __m256i_out = __lasx_xvmulwev_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3ff1808001020101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3ff1808001020101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000ff7f1080ef8;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0100000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000ff7f1080ef8;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0100000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x003ff18080010201;
++  *((unsigned long *)&__m256i_result[2]) = 0x0100000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x003ff18080010201;
++  *((unsigned long *)&__m256i_result[0]) = 0x0100000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfbba01c0003f7e3f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffc6cc05c64d960e;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfbd884e7003f7e3f;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff874dc687870000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffe367cc82f8989a;
++  *((unsigned long *)&__m256i_result[2]) = 0x4f90000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffc3aaa8d58f43c8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffdfffffffdfffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffdfffffffdfffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0020000000200001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0020000000200001;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000aaabffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffff000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff01;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffff2;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1010101010001000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x101010100000000e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000fe;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff01feffff01ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000fe;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff01feffff01ff;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff02000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff02000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x5fa0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x5fa0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwev_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c
+new file mode 100644
+index 000000000..80fdcda63
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-1.c
+@@ -0,0 +1,545 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7a7cad6eca32ccc1;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7a7cad6efe69abd1;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7a7cad6eca32ccc1;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7a7cad6efe69abd1;
++  *((unsigned long *)&__m256i_result[3]) = 0xff86005300360034;
++  *((unsigned long *)&__m256i_result[2]) = 0xff86005300020055;
++  *((unsigned long *)&__m256i_result[1]) = 0xff86005300360034;
++  *((unsigned long *)&__m256i_result[0]) = 0xff86005300020055;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x2c27000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x2c27000000000000;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000007f3a40;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f3a40;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000d24;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000073333333;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000073333333;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff0020001d001f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fffe00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fffe00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ca0000fff80000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ca0000fff80000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010080;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000015d050192cb;
++  *((unsigned long *)&__m256i_op0[2]) = 0x028e509508b16ee9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000033ff01020e23;
++  *((unsigned long *)&__m256i_op0[0]) = 0x151196b58fd1114d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff0000ffff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff000000ffffff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffffffff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000fffffaff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffd7200fffff74f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000702f;
++  __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000808;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0408040800008003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x04080408fff87803;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0707b7cff8f84830;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000354ad4c28;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0707b7cff8f84830;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000354ad4c28;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fffd5a98;
++  __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000007e8080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000007e8092;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000007e8080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000007e8092;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe07de080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000001f20607a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe07de080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000001f20607a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_w_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000f6ff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000f6ff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010000000100000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010000000000;
++  __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00153f1594ea02ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffff0100;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff15c1ea95ea02ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000030007;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000030007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfe7ffffffeffffc0;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfe7ffffffeffffc0;
++  __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000017fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000017fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_q_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c
+new file mode 100644
+index 000000000..1a4b221fe
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-2.c
+@@ -0,0 +1,470 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01480000052801a2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffdcff64;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbea2e127c046721f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1729c073816edebe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xde91f010000006f9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5ef1f90efefaf30d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00170000028500de;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fd02f20d;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffffffa;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffffa;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4ffc3f7800000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3fc03f6400000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4ffc3f7800000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3fc03f6400000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x4eb13ec100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3ec13ec100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x4eb13ec100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3ec13ec100000000;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x457db03e457db03e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x457db03e45a87310;
++  *((unsigned long *)&__m256i_op0[1]) = 0x457db03e457db03e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x457db03e45a87310;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfe01fe01fd02fd02;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000003fc03fc0;
++  *((unsigned long *)&__m256i_result[1]) = 0xfe01fe01fd02fd02;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000003fc03fc0;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc039000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x05ea05ea05ea05ec;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x05ea05ea05ea05ec;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x04f104f104f104f1;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x04f104f104f104f1;
++  __m256i_out = __lasx_xvmulwod_h_bu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[2]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[1]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_op1[0]) = 0x9090909090909090;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_w_hu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0ff8010000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0ff8010000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x001175f10e4330e8;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff8f0842ff29211e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffff8d9ffa7103d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020206431;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00f800f800f800f8;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0018181800181818;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00f800f800f800f8;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0018181800181818;
++  *((unsigned long *)&__m256i_result[3]) = 0x001f1f3e3e1f1f00;
++  *((unsigned long *)&__m256i_result[2]) = 0x0003060909060300;
++  *((unsigned long *)&__m256i_result[1]) = 0x001f1f3e3e1f1f00;
++  *((unsigned long *)&__m256i_result[0]) = 0x0003060909060300;
++  __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x800000ff000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x800000ff000000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff0fffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x90007fff90008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0ffffffe90008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x4800408ef07f7f01;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0800000eeffffe02;
++  __m256i_out = __lasx_xvmulwod_d_wu (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01fc03e000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01fc03e000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00fffb0402fddf20;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00fffb0402fddf20;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001fbf9fbe29f52;
++  *((unsigned long *)&__m256i_result[2]) = 0x5b409c0000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001fbf9fbe29f52;
++  *((unsigned long *)&__m256i_result[0]) = 0x5b409c0000000000;
++  __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000c0000005;
++  *((unsigned long *)&__m256i_op0[2]) = 0x21f8c3c4c0000005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000c0000005;
++  *((unsigned long *)&__m256i_op0[0]) = 0x21f8c3c4c0000005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff8000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000043efffff8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff8000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000043efffff8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xbfffa004fffd8000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xbfffa004fffd8000;
++  __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff6361;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4d0a902890b800dc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff6361;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4d0a902890b800dc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ff03ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000203ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ff03ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000203ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001ff03fe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffec75c2d209f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001ff03fe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffec75c2d209f;
++  __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0002fffc;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000fffd0003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0002fffc;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000fffd0003;
++  __m256i_out = __lasx_xvmulwod_q_du (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c
+new file mode 100644
+index 000000000..9fcd3ce0c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvmulwod-3.c
+@@ -0,0 +1,440 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3f2c678e38d1104c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3f2c678e38d1104c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00aa000000ac00fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00aa000000ac00fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdf01010153a10101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5b7f01ff5b7f10ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xdf01010153a10101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5b7f01ff5b7f10ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffd8ffc7ffdaff8a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffd8ffc7ffdaff8a;
++  *((unsigned long *)&__m256i_result[3]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000080000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_h_bu_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fff01fd7fff7fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007ffe81fdfe03;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000000f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_d_wu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000017f00007f7f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00007f0000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fd;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff810000000000;
++  __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x71860bf35f0f9d81;
++  *((unsigned long *)&__m256i_op0[2]) = 0x720ed94a46f449ed;
++  *((unsigned long *)&__m256i_op0[1]) = 0x71860bf35f0f9f39;
++  *((unsigned long *)&__m256i_op0[0]) = 0x72544f0e6e95cecd;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff8910ffff7e01;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff3573ffff8960;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff8910ffff1ca9;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffff5e5ffff8130;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffcb423a587053;
++  *((unsigned long *)&__m256i_result[2]) = 0x6d46f43e71141b81;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffcb423a584528;
++  *((unsigned long *)&__m256i_result[0]) = 0x9bdf36c8d78158a1;
++  __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x800000007fff0001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80000000ff7f0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x800000007fff0001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80000000ff7f0001;
++  *((unsigned long *)&__m256i_result[3]) = 0xbfffffffffff8000;
++  *((unsigned long *)&__m256i_result[2]) = 0xbfff800080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xbfffffffffff8000;
++  *((unsigned long *)&__m256i_result[0]) = 0xbfff800080000000;
++  __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffe00000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffe00000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000007f8;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000002de;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000007f8;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000002de;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000007f7;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffff808;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000007f7;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffff808;
++  __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf5fffc00fc000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf5fffc00fc000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf5fffc00fc000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf5fffc00fc000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvmulwod_q_du_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvpackev-xvpackod.patch b/LoongArch-Add-tests-for-ASX-vector-xvpackev-xvpackod.patch
new file mode 100644
index 0000000000000000000000000000000000000000..efb9490f8527dba0776e79df28d64b0b294865ed
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvpackev-xvpackod.patch
@@ -0,0 +1,5364 @@
+From 9789698300a07a107bf78cd1c7fb9cf8fbddfca1 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 17:07:28 +0800
+Subject: [PATCH 119/124] LoongArch: Add tests for ASX vector
+ xvpackev/xvpackod/xvpickev/xvpickod/
+ xvpickve2gr/xvreplgr2vr/xvreplve/xvreplve0/xvreplvei/xvshuf4i/xvshuf
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvpackev.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpackod.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpickev.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpickod.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpickve.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvreplve.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvpackev.c     | 501 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvpackod.c     | 575 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvpickev.c     | 515 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvpickod.c     | 530 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvpickve.c     | 130 +++
+ .../loongarch/vector/lasx/lasx-xvpickve2gr.c  | 388 +++++++++
+ .../loongarch/vector/lasx/lasx-xvreplgr2vr.c  | 380 +++++++++
+ .../loongarch/vector/lasx/lasx-xvreplve.c     | 536 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvreplve0.c    | 471 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvreplvei.c    |  20 +
+ .../loongarch/vector/lasx/lasx-xvshuf4i_b.c   | 430 ++++++++++
+ .../loongarch/vector/lasx/lasx-xvshuf_b.c     | 761 ++++++++++++++++++
+ 12 files changed, 5237 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c
+new file mode 100644
+index 000000000..33b96d657
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackev.c
+@@ -0,0 +1,501 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x81f7f2599f0509c2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x51136d3c78388916;
++  *((unsigned long *)&__m256i_op1[3]) = 0x044819410d87e69a;
++  *((unsigned long *)&__m256i_op1[2]) = 0x21d3905ae3e93be0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x5125883a30da0f20;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6d7b2d3ac2777aeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x000019410000e69a;
++  *((unsigned long *)&__m256i_result[2]) = 0xf259905a09c23be0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000883a00000f20;
++  *((unsigned long *)&__m256i_result[0]) = 0x6d3c2d3a89167aeb;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4f7fffbf0000fe00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000004f800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4f7fffe64f7fffc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfe02fe02fee5fe22;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff49fe4200000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffbf0000fe000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fe020000fe22;
++  *((unsigned long *)&__m256i_result[0]) = 0xffe6fe42ffc00000;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc06500550055ffab;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00550000ffab0001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00550000ffab0001;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000400000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000400000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000400000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000400000000;
++  __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x01fe01fe00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe00000000;
++  __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ffffffffff;
++  __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000089;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000089;
++  __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200;
++  __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0080010000800100;
++  *((unsigned long *)&__m256i_result[2]) = 0x00c0000000c00000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0080010000800100;
++  *((unsigned long *)&__m256i_result[0]) = 0x00c0000000c00000;
++  __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000001fdfffffe02;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000001fefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff01fefffeff02;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000fd00ffff02ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001fffeff;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff02ff;
++  __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8011ffee804c004c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00faff0500c3ff3c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x80f900f980780078;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0057ffa800ceff31;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffee0000004c0000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff050000ff3c0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00f9000000780000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffa80000ff310000;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001d0000001d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00001d0000001d00;
++  __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000;
++  __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x6);
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ff890000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff790000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff890000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff790000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ff790000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ff790000;
++  __m256i_out = __lasx_xvpackev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x41dffbffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffff00ff800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x41dffbffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffff00ff800000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfbff0000ffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfbff0000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000007b007e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000007b007e;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffe700000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffe7007b007e;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffe700000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffe7007b007e;
++  __m256i_out = __lasx_xvpackev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000008000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0003fffc0803fff8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000008000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0003fffc0803fff8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000fffc0000fff8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000fffc0000fff8;
++  __m256i_out = __lasx_xvpackev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c
+new file mode 100644
+index 000000000..cdd20e881
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpackod.c
+@@ -0,0 +1,575 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f057f0b7f5b007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f7fff7fff7fff00;
++  __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff;
++  __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000fff00000fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00ff0fff005f0f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00ff0fff005f0f;
++  __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff000607f7;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000010017e7d1;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff000607f7;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000001001807f1;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000e7;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00000007;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007;
++  __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0002555500000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0002555500000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000002a542a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000002a542a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000005400;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000005400;
++  __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0007fff8000ffff0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000007fff8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0007fff8000ffff0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000007fff8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0007fff8000ffff0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0007fff8000ffff0;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffefffef00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00ff0000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00ff0000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00ff00ff00ff00;
++  __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000f0000000f000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000f0000000f000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000f0000000f000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000f0000000f000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000f0000000f000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000f0000000f000;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvpackod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000c8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000c8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000022beb03f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffa2beb040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000022be22be;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fffa2bea2be;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000022be22be;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fffa2bea2be;
++  __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff10000fff10000;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff1000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff1000000000000;
++  __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ff0000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ff0000000000;
++  __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000555500005555;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000555500005555;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000555500005555;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000555500005555;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000a0008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000a0008;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ffc0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ff00fff8ffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ffc0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ff00fff8ffc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000fff80000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fff80000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000fff80000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fff80000;
++  __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpackod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007fff00007fff;
++  __m256i_out = __lasx_xvpackod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c
+new file mode 100644
+index 000000000..66faa74d0
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickev.c
+@@ -0,0 +1,515 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffff90ffffff81;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffff90ffffff81;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ff90ff81;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ff90ff81;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000007f;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffe81;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe81;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001341c4000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001000310000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000033e87ef1;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000002e2100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000011c00;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000e8f1;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000103100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000002e00;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000004290;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000004290;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000004290;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000004290;
++  __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_op1[1]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xbfbfbfbfbfbfbfbf;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000;
++  __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_op1[2]) = 0xa020202020206431;
++  *((unsigned long *)&__m256i_op1[1]) = 0xa020202020202020;
++  *((unsigned long *)&__m256i_op1[0]) = 0xa020202020206431;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020202031;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020202020202031;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0004040404000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0004040404000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0004040404000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0004040404000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf800d0d8ffffeecf;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000383fffffdf0d;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf800d0d8ffffeecf;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000383fffffdf0d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[2]) = 0xd0d8eecf383fdf0d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0xd0d8eecf383fdf0d;
++  __m256i_out = __lasx_xvpickev_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdf80ff20df80ff20;
++  *((unsigned long *)&__m256i_op0[2]) = 0xdfc2ff20df80ffa7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xdf80ff20df80ff20;
++  *((unsigned long *)&__m256i_op0[0]) = 0xdfc2ff20df80ffa7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x80208020c22080a7;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x80208020c22080a7;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000040000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000040000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000400;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000400;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_op1[2]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_op1[0]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xe07de0801f20607a;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000800080010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000800080010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000800080010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000800080010000;
++  __m256i_out = __lasx_xvpickev_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_result[3]) = 0x9ffffd8020010001;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff9fffffff9;
++  *((unsigned long *)&__m256i_result[1]) = 0x9ffffd8020010001;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff9fffffff9;
++  __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvpickev_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000060002000a;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000060002000a;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickev_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c
+new file mode 100644
+index 000000000..a9778809f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickod.c
+@@ -0,0 +1,530 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0003f8040002f607;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0002728b00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffff328dfff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6651bfff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0003f8040002f607;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffff328dfff;
++  __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0080200000802000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0080200000802000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00200020ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x1e0000001e000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00200020ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x1e0000001e000000;
++  __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0080200000802000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0080200000802000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00800080ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00800080ffffffff;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffffe40;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000040004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0400040004000400;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xb70036db12c4007e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xb7146213fc1e0049;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000fefe02fffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xb71c413b199d04b5;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e00ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e01fe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xb70012c4b714fc1e;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff017e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fe02b71c199d;
++  *((unsigned long *)&__m256i_result[0]) = 0x017e017e00ff017e;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc080ffff0049ffd2;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0049ffd2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fffeffb9ff9d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01620133004b0032;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0002ff80ffb70000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffb7ff80ffd0ffd8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00010000002fff9e;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffb5ff80ffd0ffd8;
++  *((unsigned long *)&__m256i_result[3]) = 0xc080ffff0049ffd2;
++  *((unsigned long *)&__m256i_result[2]) = 0x0002ff80ffb70000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000fffeffb9ff9d;
++  *((unsigned long *)&__m256i_result[0]) = 0x00010000002fff9e;
++  __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_op1[1]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xbabababababababa;
++  __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc6c6c6c68787878a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000003f3f3f3c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8787878a00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003f3fc6c68787;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003f3f87870000;
++  __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007fff003f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007fff003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000007fff;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000002467db99;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003e143852;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000002467db99;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003e143852;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000044444443;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7bbbbbbbf7777778;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000246700003e14;
++  *((unsigned long *)&__m256i_result[2]) = 0x000044447bbbf777;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000246700003e14;
++  *((unsigned long *)&__m256i_result[0]) = 0x000044447bbbf777;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0006000000020000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0006000000020000;
++  __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xbff00000bff00000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xbff00000bff00000;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x9ff87ef07f7f817f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x9ff87f7f7f807f7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x9ff87f7f7f807f7f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffe98;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffe98;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000007f00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000007f00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000007f00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000007f00000000;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickod_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvpickod_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c
+new file mode 100644
+index 000000000..a2edbb80a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve.c
+@@ -0,0 +1,130 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x010180068080fff9;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00fe01f000010000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000c40086;
++  __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff820002ff820002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff820002ff820002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002;
++  __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x4000000000000000;
++  __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvpickve_d (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvpickve_w (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c
+new file mode 100644
+index 000000000..8bd3a8273
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvpickve2gr.c
+@@ -0,0 +1,388 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0cc08723ff900001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xcc9b89f2f6cef440;
++  int_result = 0x0000000000000000;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x7);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  int_result = 0x000000000000ffff;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff90ff81;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff90ff81;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f;
++  int_result = 0x000000000000007f;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x4);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  int_result = 0x00000000ffffffff;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffefdfffffefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  int_result = 0x00000000fffffefd;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x4);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  long_int_result = 0x0000000000000000;
++  long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5555555580000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5555555580000000;
++  int_result = 0x0000000055555555;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x5);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  long_int_result = 0x0000000000000000;
++  long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0002000400000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0002000200020006;
++  unsigned_int_result = 0x0000000000020006;
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000;
++  long_int_result = 0x1f0fdf7f3e3b31d4;
++  long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00fe01fc01fe01fc;
++  *((unsigned long *)&__m256i_op0[2]) = 0x012c002c001c0006;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00fe01fc01fe0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x012c002c001c000a;
++  long_int_result = 0xfe01fc01fe0000;
++  long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  unsigned_long_int_result = 0x00000000ffffffff;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_out, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x5);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  long_int_result = 0x00000000ffff0100;
++  long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  int_result = 0x000000007ff00000;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  unsigned_long_int_result = 0x00000000ffffffff;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff;
++  int_result = 0x0000000000000000;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0xffffffffffffffff;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffff0100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffff0100000001;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x7);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff0008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff0008;
++  int_result = 0x0000000000000000;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  long_int_result = 0x0000000000000000;
++  long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  long_int_result = 0x0000000000000000;
++  long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  long_int_result = 0x000000000000ffff;
++  long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010;
++  unsigned_int_result = 0x0000000000100010;
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000100040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000100040;
++  unsigned_int_result = 0x0000000000000040;
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x6);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x6);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  long_int_result = 0xffffffffffffffff;
++  long_int_out = __lasx_xvpickve2gr_d (__m256i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  unsigned_int_result = 0x00000000ffffffff;
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x5);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  unsigned_int_result = 0x00000000ffffffff;
++  unsigned_int_out = __lasx_xvpickve2gr_wu (__m256i_op0, 0x4);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lasx_xvpickve2gr_du (__m256i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  int_result = 0x00000000ffffffff;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffd880;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffd880;
++  int_result = 0x0000000000000000;
++  int_out = __lasx_xvpickve2gr_w (__m256i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c
+new file mode 100644
+index 000000000..81456bc1b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplgr2vr.c
+@@ -0,0 +1,380 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  int_op0 = 0x0000001b3c4c0a5c;
++  *((unsigned long *)&__m256i_result[3]) = 0x3c4c0a5c3c4c0a5c;
++  *((unsigned long *)&__m256i_result[2]) = 0x3c4c0a5c3c4c0a5c;
++  *((unsigned long *)&__m256i_result[1]) = 0x3c4c0a5c3c4c0a5c;
++  *((unsigned long *)&__m256i_result[0]) = 0x3c4c0a5c3c4c0a5c;
++  __m256i_out = __lasx_xvreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000397541c58;
++  *((unsigned long *)&__m256i_result[3]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_result[2]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_result[1]) = 0x97541c5897541c58;
++  *((unsigned long *)&__m256i_result[0]) = 0x97541c5897541c58;
++  __m256i_out = __lasx_xvreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  long_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000400;
++  *((unsigned long *)&__m256i_result[3]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_result[2]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_result[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_result[0]) = 0x0400040004000400;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000084;
++  *((unsigned long *)&__m256i_result[3]) = 0x0084008400840084;
++  *((unsigned long *)&__m256i_result[2]) = 0x0084008400840084;
++  *((unsigned long *)&__m256i_result[1]) = 0x0084008400840084;
++  *((unsigned long *)&__m256i_result[0]) = 0x0084008400840084;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  long_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff;
++  __m256i_out = __lasx_xvreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020;
++  __m256i_out = __lasx_xvreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  long_op0 = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000020202020;
++  __m256i_out = __lasx_xvreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  long_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  long_op0 = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000020006;
++  __m256i_out = __lasx_xvreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff;
++  __m256i_out = __lasx_xvreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  long_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  long_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  long_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff;
++  __m256i_out = __lasx_xvreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  long_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  long_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  long_op0 = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000020006;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000020006;
++  __m256i_out = __lasx_xvreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  int_op0 = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c
+new file mode 100644
+index 000000000..7aa76c2ba
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve.c
+@@ -0,0 +1,536 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000001b3c4c0a5c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefb;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffefb;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe;
++  int_op1 = 0x0000000059815d00;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000fe;
++  __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[2]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[1]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op0[0]) = 0x555555ab555555ab;
++  int_op1 = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[2]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[1]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[0]) = 0x555555ab555555ab;
++  __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100010001;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000012e2110;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_result[2]) = 0x0202020202020202;
++  *((unsigned long *)&__m256i_result[1]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f;
++  int_op1 = 0x0000000000000400;
++  *((unsigned long *)&__m256i_result[3]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_result[2]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_result[1]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_result[0]) = 0x003f003f003f003f;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x003f003f003f003f;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_result[2]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_result[1]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_result[0]) = 0x003f003f003f003f;
++  __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000003f0000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe161616161616161;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_op0[1]) = 0xe161616161616161;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe161616161614e60;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_result[2]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_result[1]) = 0xe161616161614e60;
++  *((unsigned long *)&__m256i_result[0]) = 0xe161616161614e60;
++  __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080;
++  int_op1 = 0x00000000000000ac;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000080;
++  __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000400;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00d5007f00ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00d5007f00ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202020;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020;
++  __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffff7fffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffff7fffff;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc192181230000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc192181230000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff00ff;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff;
++  __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fef7fef7fef7fef;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fef7fef7fef7fef;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fef7fef7fef7fef;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fef7fef7fef7fef;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffff00ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffff00ffffffff;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff0000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f010700c70106;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f010700c70106;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0106010601060106;
++  *((unsigned long *)&__m256i_result[2]) = 0x0106010601060106;
++  *((unsigned long *)&__m256i_result[1]) = 0x0106010601060106;
++  *((unsigned long *)&__m256i_result[0]) = 0x0106010601060106;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_h (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplve_w (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000003fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000003fff;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000404;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000404;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0404040404040404;
++  *((unsigned long *)&__m256i_result[2]) = 0x0404040404040404;
++  *((unsigned long *)&__m256i_result[1]) = 0x0404040404040404;
++  *((unsigned long *)&__m256i_result[0]) = 0x0404040404040404;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000800080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000800080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000202;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000202;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000202;
++  __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x000000003ddc5dac;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_d (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002;
++  int_op1 = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve_b (__m256i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c
+new file mode 100644
+index 000000000..a2bc2da52
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplve0.c
+@@ -0,0 +1,471 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffeffffff88;
++  *((unsigned long *)&__m256i_op0[2]) = 0x61e0000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffeffffff88;
++  *((unsigned long *)&__m256i_op0[0]) = 0x61e0000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvreplve0_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff80fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xd52aaaaa555555ab;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffff80fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xd52aaaaa555555ab;
++  *((unsigned long *)&__m256i_result[3]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[2]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[1]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_result[0]) = 0x555555ab555555ab;
++  __m256i_out = __lasx_xvreplve0_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplve0_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080;
++  __m256i_out = __lasx_xvreplve0_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fff3fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3fff3fff3fff4000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000403f3fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x3fff3fff3fff3fff;
++  __m256i_out = __lasx_xvreplve0_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000001;
++  __m256i_out = __lasx_xvreplve0_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202020;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020;
++  __m256i_out = __lasx_xvreplve0_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_q (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0007fd00000f02ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fffeff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_result[1]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00fe00feff02ff;
++  __m256i_out = __lasx_xvreplve0_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfc00ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000100fe000100fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfc00ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000100fe000100fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_result[2]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_result[1]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m256i_result[0]) = 0x00fe00fe00fe00fe;
++  __m256i_out = __lasx_xvreplve0_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[0]) = 0x4040404040404040;
++  __m256i_out = __lasx_xvreplve0_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_q (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000781;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064;
++  __m256i_out = __lasx_xvreplve0_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_q (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffe20001dfe1f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_q (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplve0_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplve0_q (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_op0[2]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffe0047d00e00480;
++  *((unsigned long *)&__m256i_op0[0]) = 0x001fc0200060047a;
++  *((unsigned long *)&__m256i_result[3]) = 0x047a047a047a047a;
++  *((unsigned long *)&__m256i_result[2]) = 0x047a047a047a047a;
++  *((unsigned long *)&__m256i_result[1]) = 0x047a047a047a047a;
++  *((unsigned long *)&__m256i_result[0]) = 0x047a047a047a047a;
++  __m256i_out = __lasx_xvreplve0_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x037fe01f001fe020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x037fe01f001fe020;
++  *((unsigned long *)&__m256i_result[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x2020202020202020;
++  __m256i_out = __lasx_xvreplve0_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0d0d0d0d0d;
++  __m256i_out = __lasx_xvreplve0_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102;
++  __m256i_out = __lasx_xvreplve0_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff00ff00ff;
++  __m256i_out = __lasx_xvreplve0_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001;
++  __m256i_out = __lasx_xvreplve0_d (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x800080ff800080ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x800080ff800080ff;
++  __m256i_out = __lasx_xvreplve0_w (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_q (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvreplve0_q (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_result[3]) = 0x97a297a297a297a2;
++  *((unsigned long *)&__m256i_result[2]) = 0x97a297a297a297a2;
++  *((unsigned long *)&__m256i_result[1]) = 0x97a297a297a297a2;
++  *((unsigned long *)&__m256i_result[0]) = 0x97a297a297a297a2;
++  __m256i_out = __lasx_xvreplve0_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvreplve0_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_h (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvreplve0_b (__m256i_op0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c
+new file mode 100644
+index 000000000..9346f9bfb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvreplvei.c
+@@ -0,0 +1,20 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c
+new file mode 100644
+index 000000000..c8a00ca89
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf4i_b.c
+@@ -0,0 +1,430 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00007ffffffff7ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x49d8080067f4f81f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007f00fffff7ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xd8490849f467f867;
++  __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0xb7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0xdb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x95);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffb3b4;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffff5ffff4738;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffb3b4;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffff5ffff4738;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0xee);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x2f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x6f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf4i_b (__m256i_op0, 0x23);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00007ffffffff7ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x49d8080067f4f81f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7ffff7ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x080008000800f81f;
++  __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xa8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_result[3]) = 0xc5c4c5c5c5c5c5c5;
++  *((unsigned long *)&__m256i_result[2]) = 0xc5c545c545c545c5;
++  *((unsigned long *)&__m256i_result[1]) = 0xc5c4c5c5c5c5c5c5;
++  *((unsigned long *)&__m256i_result[0]) = 0xc5c545c545c545c5;
++  __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x3d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xf7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000;
++  __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xa7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c;
++  __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0xdc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffff0020;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff8001ffff0001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0020;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff8001ffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff8001ffff8001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff8001ffff8001;
++  __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x6e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x9f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op0[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op0[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002ffff00020002;
++  *((unsigned long *)&__m256i_result[2]) = 0x04f504f104f504f5;
++  *((unsigned long *)&__m256i_result[1]) = 0x0002ffff00020002;
++  *((unsigned long *)&__m256i_result[0]) = 0x04f504f104f504f5;
++  __m256i_out = __lasx_xvshuf4i_h (__m256i_op0, 0x65);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1e1800001e180000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1e1800001e180000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1e18000000000000;
++  __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xfe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x64);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_result[3]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_result[2]) = 0x45c5c5c545c5c5c5;
++  *((unsigned long *)&__m256i_result[1]) = 0xc5c5c5c4c5c5c5c4;
++  *((unsigned long *)&__m256i_result[0]) = 0x45c5c5c545c5c5c5;
++  __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xb0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000f9f900;
++  *((unsigned long *)&__m256i_op0[2]) = 0x79f9f9f900f9f9e0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000f9f900;
++  *((unsigned long *)&__m256i_op0[0]) = 0x79f9f9f900f9f900;
++  *((unsigned long *)&__m256i_result[3]) = 0x00f9f90079f9f9f9;
++  *((unsigned long *)&__m256i_result[2]) = 0x79f9f9f900000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00f9f90079f9f9f9;
++  *((unsigned long *)&__m256i_result[0]) = 0x79f9f9f900000000;
++  __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x97);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007aff7c00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffd017d00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007aff7c00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffd017d00;
++  *((unsigned long *)&__m256i_result[3]) = 0x7aff7c0000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfd017d0000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7aff7c0000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfd017d0000000000;
++  __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xb3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_result[3]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_result[2]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_result[1]) = 0xc3f0c3f0c3f0c3f0;
++  *((unsigned long *)&__m256i_result[0]) = 0xc3f0c3f0c3f0c3f0;
++  __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x3c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xf4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_result[3]) = 0xff81ff7dffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_result[1]) = 0xff81ff7dffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffff81ff7d;
++  __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0x28);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000002000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000020ff790020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000002000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000020ff790020;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000020;
++  __m256i_out = __lasx_xvshuf4i_w (__m256i_op0, 0xa5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010183f95466;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01010101d58efe94;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010183f95466;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x01010101d58efe94;
++  __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xa7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xd9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00001fff00001fff;
++  __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff80be0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000f0f0002;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff80be0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000f1002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x80000000ff800000;
++  __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xdb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op1[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op1[0]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x43ef878780000009;
++  __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x36);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x5a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff00017fff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_result[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_result[0]) = 0x04f104f104f504ed;
++  __m256i_out = __lasx_xvshuf4i_d (__m256i_op0, __m256i_op1, 0x7e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
+new file mode 100644
+index 000000000..641ea2315
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
+@@ -0,0 +1,761 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000007070707;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0102040000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000020100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0703020000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfe02fe02fee5fe22;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff49fe4200000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffcfa;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0xfffffff8fffffff8;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0xfffffff8fc000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfafafafafafafafa;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000fefefe;
++  __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x3ff0010000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x3ff0010000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000;
++  __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000003ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000003ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000077fff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x67eee33567eee435;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x67eee33567eee435;
++  *((unsigned long *)&__m256i_op2[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff80000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff80000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op1[1]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7575ffff75757595;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7575ffff7575f575;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7575ffff75757595;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7575ffff7575f575;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op2[3]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op2[2]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op2[1]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_op2[0]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffff800;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffff800;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x00000000fffff800;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000fffff800;
++  *((unsigned long *)&__m256i_result[3]) = 0xf800f800f800f800;
++  *((unsigned long *)&__m256i_result[2]) = 0xf800f800f800f800;
++  *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800f800;
++  *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800f800;
++  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000000ffff88ff88;
++  *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x000000ffff88ff88;
++  *((unsigned long *)&__m256i_result[3]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_result[1]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff88ff88ff880000;
++  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000010000ffe1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000101001e18;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000010000ffe1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000101001e18;
++  *((unsigned long *)&__m256i_op1[3]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op1[2]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op1[1]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op1[0]) = 0x98111cca98111cca;
++  *((unsigned long *)&__m256i_op2[3]) = 0x000000010000ffe1;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000101001e18;
++  *((unsigned long *)&__m256i_op2[1]) = 0x000000010000ffe1;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000101001e18;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000101001e18;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000101001e18;
++  __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80008000b3e8fef1;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80008000802ea100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op2[0]) = 0x00000000012e2110;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x012e2110012e2110;
++  __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000082a54290;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000028aa700;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000082a54290;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54287;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000002a542a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000002a542a;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000007fc00000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007fc00000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000007fc00000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007fc00000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xdfffffffdfffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0xdfffffffdfffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000104000200;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000104000200;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_result[2]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_result[0]) = 0x0004000500040005;
++  __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op1[2]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op1[1]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op1[0]) = 0x555555ab555555ab;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000080008000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000080008000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff;
++  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000fffffe01fe52;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff01ff02;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fffffe01fe52;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff01ff02;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000080008001;
++  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_op2[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000ff800000ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff800000ff;
++  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000080040;
++  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffeb8649d0d6250;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffeb8649d0d6250;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op2[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op2[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op2[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op2[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvsll-xvsrl-instr.patch b/LoongArch-Add-tests-for-ASX-vector-xvsll-xvsrl-instr.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ff6535633a07c9a4e72e340308860886498c89fe
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvsll-xvsrl-instr.patch
@@ -0,0 +1,5611 @@
+From e90910ab68c43259f898fb7b2cba02d4eb457428 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:44:49 +0800
+Subject: [PATCH 106/124] LoongArch: Add tests for ASX vector xvsll/xvsrl
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvsll.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvslli.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrl.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrli.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrln.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvsll.c        | 425 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvslli.c       | 416 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvsllwil-1.c   | 339 +++++++++
+ .../loongarch/vector/lasx/lasx-xvsllwil-2.c   | 350 +++++++++
+ .../loongarch/vector/lasx/lasx-xvsrl.c        | 650 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrli.c       | 405 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrln.c       | 425 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrlni.c      | 680 ++++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrlr.c       | 515 +++++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrlri.c      | 416 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrlrn.c      | 410 +++++++++++
+ .../loongarch/vector/lasx/lasx-xvsrlrni.c     | 455 ++++++++++++
+ 12 files changed, 5486 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c
+new file mode 100644
+index 000000000..7179e715c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsll.c
+@@ -0,0 +1,425 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00001f41ffffbf00;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00001f41ffffbf00;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffe0000000;
++  __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000fffefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000fffefe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000808080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffcfa;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffcfa;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff2f7bcfff2f7bd;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff2f93bfff2fff2;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff2f7bcfff2f7bd;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff2f93bfff2fff2;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffcf800fffcfffc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fffcfffc;
++  __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffffe40;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fff0e400;
++  __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x5980000000000000;
++  __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[2]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_result[0]) = 0x8080808080808080;
++  __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000800000000;
++  __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000001ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffe0000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000001ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffe0000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001ff8000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001ff8000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff0000ffff;
++  __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvsll_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000;
++  __m256i_out = __lasx_xvsll_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00010001000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00010001000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x800000ff800000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x800000ff800000ff;
++  __m256i_out = __lasx_xvsll_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1400080008000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1400080008000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1400080008000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1400080008000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsll_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c
+new file mode 100644
+index 000000000..003e29b67
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvslli.c
+@@ -0,0 +1,416 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01fa022a01a401e5;
++  *((unsigned long *)&__m256i_op0[2]) = 0x030d03aa0079029b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x024c01f901950261;
++  *((unsigned long *)&__m256i_op0[0]) = 0x008102c2008a029f;
++  *((unsigned long *)&__m256i_result[3]) = 0x54000000ca000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x5400000036000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xf2000000c2000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x840000003e000000;
++  __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff1001100100000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010100000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff1001100100000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010100000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfcc4004400400000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0040400000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfcc4004400400000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0040400000000000;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffef000004ea;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffef000004ea;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffefffffffef;
++  __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffbf4;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256i_result[3]) = 0xf800f800f800c000;
++  *((unsigned long *)&__m256i_result[2]) = 0xf800f800f800a000;
++  *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800e000;
++  *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800e000;
++  __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffefefffffefe;
++  __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0100010001000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0100010001000000;
++  __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xf000000000000000;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1616161616161616;
++  *((unsigned long *)&__m256i_op0[2]) = 0x161616167fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ffe16167f161616;
++  *((unsigned long *)&__m256i_op0[0]) = 0x161616167fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x2c2c2c2c2c2c2c2c;
++  *((unsigned long *)&__m256i_result[2]) = 0x2c2c2c2cfefefefe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfefc2c2cfe2c2c2c;
++  *((unsigned long *)&__m256i_result[0]) = 0x2c2c2c2cfefefefe;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xf8f8f8f8f8f8f8f8;
++  *((unsigned long *)&__m256i_result[2]) = 0xf8f8f8f8f8f8f8f8;
++  *((unsigned long *)&__m256i_result[1]) = 0xf8f8f8f8f8f8f8f8;
++  *((unsigned long *)&__m256i_result[0]) = 0xf8f8f8f8f8f8f8f8;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1f60000000c00000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1f60000000c00000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x60000000c0000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x60000000c0000000;
++  __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff8fff8fff8fff8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff8fff8fff8fff8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff80ff80ff80ff80;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff80ff80ff80ff80;
++  __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000008000000080;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00080008000801ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0008000800080008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00080008000801ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0008000800080008;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_result[2]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_result[0]) = 0xf0f0f0f0f0f0f0f0;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x03f0000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x03f0000000000000;
++  __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x34);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_w (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffff80000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffff80000;
++  __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xf800f800f800f800;
++  *((unsigned long *)&__m256i_result[2]) = 0xf800f800f800f800;
++  *((unsigned long *)&__m256i_result[1]) = 0xf800f800f800f800;
++  *((unsigned long *)&__m256i_result[0]) = 0xf800f800f800f800;
++  __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_h (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0404000004040000;
++  __m256i_out = __lasx_xvslli_w (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000004843ffdff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000004843ffdff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000c040c0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000c040c0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_b (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffff000000;
++  __m256i_out = __lasx_xvslli_d (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvslli_h (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c
+new file mode 100644
+index 000000000..ef3a47da5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-1.c
+@@ -0,0 +1,339 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffe0ffe0ffe0ffe0;
++  *((unsigned long *)&__m256i_result[2]) = 0xffe0ffe0ffe0ffe0;
++  *((unsigned long *)&__m256i_result[1]) = 0xffe0ffe0ffe0ffe0;
++  *((unsigned long *)&__m256i_result[0]) = 0xffe0ffe0ffe0ffe0;
++  __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000003f0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000003f0;
++  __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op0[2]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000080000009;
++  *((unsigned long *)&__m256i_op0[0]) = 0x43ef878780000009;
++  *((unsigned long *)&__m256i_result[3]) = 0x0218ff78fc38fc38;
++  *((unsigned long *)&__m256i_result[2]) = 0xfc00000000000048;
++  *((unsigned long *)&__m256i_result[1]) = 0x0218ff78fc38fc38;
++  *((unsigned long *)&__m256i_result[0]) = 0xfc00000000000048;
++  __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffc0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff0fff0fff0fc00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff0fff0fff0fc00;
++  __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_h_b (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfc00000000000000;
++  __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002;
++  __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffc00fffffc00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffc00fffffc00;
++  __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0040000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0040000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000a000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000a000;
++  __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000400000004000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000400000004000;
++  __m256i_out = __lasx_xvsllwil_w_h (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffc00;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffc00;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffc00;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffc00;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffefffffefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffbf4;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffffffffffc;
++  __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdbc8000000003fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xdbc8000000003fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffbff1ffffbff1;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffbff1ffffbff1;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffeffc4000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffeffc4000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffeffc4000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffeffc4000000;
++  __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffe05fc47b400;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffe06003fc000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffe05fc47b400;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffe06003fc000;
++  __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000feccfecc;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000feccfecc;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fe36364661af18f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fe363637fe36364;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fe36364661af18f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fe363637fe36364;
++  *((unsigned long *)&__m256i_result[3]) = 0x00001ff8d8d8c000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00001ff8d8d90000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00001ff8d8d8c000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00001ff8d8d90000;
++  __m256i_out = __lasx_xvsllwil_d_w (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c
+new file mode 100644
+index 000000000..76651af63
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsllwil-2.c
+@@ -0,0 +1,350 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f80780000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f80780000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000004000;
++  __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x03fc03fc03f803f8;
++  *((unsigned long *)&__m256i_result[2]) = 0x03fc03fc03f803f8;
++  *((unsigned long *)&__m256i_result[1]) = 0x03fc03fc03f803f8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe01fe01fe;
++  __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1f0fdf7f3e3b31d4;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x01fc03e000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x01fc03e000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00fe01e000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00fe01e000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x07fee332883f86b0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x07fed3c8f7ad28d0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x07fee332883f86b0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x07fed3c8f7ad28d0;
++  *((unsigned long *)&__m256i_result[3]) = 0x01c03f8034c03200;
++  *((unsigned long *)&__m256i_result[2]) = 0x3dc02b400a003400;
++  *((unsigned long *)&__m256i_result[1]) = 0x01c03f8034c03200;
++  *((unsigned long *)&__m256i_result[0]) = 0x3dc02b400a003400;
++  __m256i_out = __lasx_xvsllwil_hu_bu (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000054;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00aa000000ac00fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000054;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00aa000000ac00fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002a80000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0002b0000003f800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0002a80000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0002b0000003f800;
++  __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc1be9e9e9f000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x41d8585858400000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc1be9e9e9f000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x41d8585858400000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1076000016160000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1610000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1076000016160000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1610000000000000;
++  __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000;
++  __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_wu_hu (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007f00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x311d73ad3ec2064a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000001fc000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000c475ceb40000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000fb0819280000;
++  __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff0000;
++  __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0004040404000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0004040404000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0004040404000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0004040404000000;
++  __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000007c8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000007c8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000007c8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000007c8;
++  __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000086000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00040ff288000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000086000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00040ff288000000;
++  __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fff000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fff000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000001ffe00000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000001ffe00000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80000000ffc8ff88;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80000000ffc8ff88;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001ff91ff100000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001ff91ff100000;
++  __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000008c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000008c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001180000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001180000000;
++  __m256i_out = __lasx_xvsllwil_du_wu (__m256i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c
+new file mode 100644
+index 000000000..1d591c35c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrl.c
+@@ -0,0 +1,650 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6580668200fe0002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6580668200fe0002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_result[2]) = 0x247fe49409620040;
++  *((unsigned long *)&__m256i_result[1]) = 0x6580668200fe0002;
++  *((unsigned long *)&__m256i_result[0]) = 0x6580668200fe0002;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000003f7e3f;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffc6cc05c64d960e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000003f7e3f;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff874dc687870000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000003f7e3f;
++  *((unsigned long *)&__m256i_result[2]) = 0xffc6cc05c64d960e;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000003f7e3f;
++  *((unsigned long *)&__m256i_result[0]) = 0xff874dc687870000;
++  __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffba0c05;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffba0c05;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000483800;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00ffffff00ffff;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000200;
++  __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000001fffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800;
++  __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffff70156;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffff70156;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffff70156;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffff70156;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fe37fe3001d001d;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff0000;
++  __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010800;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010800;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000008e4bfc4eff0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001ffee10000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000008e4bfc4eff0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001ffee10000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0d0d0d000000000d;
++  *((unsigned long *)&__m256i_result[2]) = 0x0d0d0d0000060d0d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0d0d0d000000000d;
++  *((unsigned long *)&__m256i_result[0]) = 0x0d0d0d0000060d0d;
++  __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000e0000000d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000e0000000d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffff03ffffff07;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffff03ffffff07;
++  __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000800080008000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x80008000fff98000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000800080008000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x80008000fff98000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrl_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000040004000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000040004000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000040004000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000040004000;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000040404040;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe01fd02fd02;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03fc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe01fd02fd02;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03fc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000405;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000405;
++  *((unsigned long *)&__m256i_result[3]) = 0xfe01fe017e81fd02;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000003fc001fe;
++  *((unsigned long *)&__m256i_result[1]) = 0xfe01fe017e81fd02;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000003fc001fe;
++  __m256i_out = __lasx_xvsrl_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000010000685e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000010000685e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000020a4ffffbe4f;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000003ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001ffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000003ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001ffffffffffff;
++  __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1c1c1c1c1c1c1c1c;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000001ffff8000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000001ffff8000;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_result[2]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_result[1]) = 0xfd02fd02fd02fd02;
++  *((unsigned long *)&__m256i_result[0]) = 0xfd02fd02fd02fd02;
++  __m256i_out = __lasx_xvsrl_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0005fff9;
++  *((unsigned long *)&__m256i_op0[2]) = 0x04f004f204f204f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0005fff9;
++  *((unsigned long *)&__m256i_op0[0]) = 0x04f004f204f204f0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000002780;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000002780;
++  __m256i_out = __lasx_xvsrl_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c
+new file mode 100644
+index 000000000..e8696701f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrli.c
+@@ -0,0 +1,405 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000050005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1010101110101011;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1111111211111112;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000004040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000004444;
++  __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x2e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x3e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffcc8000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007dfdff4b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x003ffff300000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000001f7f7f;
++  __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x9240f24a84b18025;
++  *((unsigned long *)&__m256i_op0[2]) = 0x9240f24a84b18025;
++  *((unsigned long *)&__m256i_op0[1]) = 0xb2c0b341807f8006;
++  *((unsigned long *)&__m256i_op0[0]) = 0xb2c0b341807f8006;
++  *((unsigned long *)&__m256i_result[3]) = 0x009200f200840080;
++  *((unsigned long *)&__m256i_result[2]) = 0x009200f200840080;
++  *((unsigned long *)&__m256i_result[1]) = 0x00b200b300800080;
++  *((unsigned long *)&__m256i_result[0]) = 0x00b200b300800080;
++  __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001ffff0001ffff;
++  __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffcb423a587053;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6d46f43e71141b81;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffcb423a584528;
++  *((unsigned long *)&__m256i_op0[0]) = 0x9bdf36c8d78158a1;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000007fffe;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000036a37;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000007fffe;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000004def9;
++  __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x2d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0889088908810881;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0081010000810100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0889088900810088;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0081010000810100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0004448444844084;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000408080004080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0004448444804080;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000408080004080;
++  __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000001d001d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000001d001d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000030003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000030003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x22);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000077fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000307;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000014402080144;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000a0010400a;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000a0010400a;
++  __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000598;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000598;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007f807f80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f807f80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff00;
++  __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_b (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001ffff0001ffff;
++  __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_d (__m256i_op0, 0x23);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x3fffffff3fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x3fffffff3fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003fff00003fff;
++  __m256i_out = __lasx_xvsrli_w (__m256i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_result[3]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x3fff3fff3fff3fc4;
++  *((unsigned long *)&__m256i_result[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x3fff3fff3fff3fc4;
++  __m256i_out = __lasx_xvsrli_h (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c
+new file mode 100644
+index 000000000..d54991051
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrln.c
+@@ -0,0 +1,425 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0045b8ae81bce1d8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000003868686a20;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0045b8ae81bce1d8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00386a20b8aee1d8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00386a20b8aee1d8;
++  __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x2020000020200000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0008000001010000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101000001010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x017e00ff017e00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff017e01fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x88888a6d0962002e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xdb8a3109fe0f0020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000007fff01fffb;
++  *((unsigned long *)&__m256i_op0[0]) = 0xdb8e20990cce025a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff01ff3400000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff83ff01;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0962002efe0f0020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff01fffb8667012d;
++  __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff8fff8fff8fff8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff8fff8fff8fff8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffeffeb;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fb7afb62;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffeffeb;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fb7afb62;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffeffebfb7afb62;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffeffebfb7afb62;
++  __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000040;
++  __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff010000ff017e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01fe01ae00ff00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000a00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000010000000a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff017e6b803fc0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff017e6b803fc0;
++  __m256i_out = __lasx_xvsrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000781;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000078100000064;
++  __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1;
++  *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a15e5e5e5e;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1;
++  *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a15e5e5e5e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xa1a1a1a1a1a15e5e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xa1a1a1a1a1a15e5e;
++  __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0003800400038004;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000a800b000a800b;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0003800400038004;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000a800b000a800b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0080000000800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0080000000800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0080000000800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0080000000800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0404040404040404;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0404040404040404;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0404040404040404;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0404040404040404;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000;
++  __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000027;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000027;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffff10;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1716151417161514;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1716151417161514;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1716151417161514;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1716151417161514;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0fff0fff0fff0fff;
++  __m256i_out = __lasx_xvsrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c
+new file mode 100644
+index 000000000..0fb6483cf
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlni.c
+@@ -0,0 +1,680 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7f00000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffc500000002d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000034;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbfa3e127c147721f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1729c173836edfbe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xdf91f111808007fb;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5ff1f90ffffbf30f;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ff280016;
++  *((unsigned long *)&__m256i_result[2]) = 0xd193a30f94b9b7df;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000001001a;
++  *((unsigned long *)&__m256i_result[0]) = 0xc88840fdf887fd87;
++  __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000000f;
++  __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffc5556aaa8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffc5556aaa8;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x555555553f800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000007070205;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000002020100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000007070205;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000002020100;
++  __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x5980000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffefe00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000003ff000003ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x36);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x73);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffe01fe01f;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffe01fe01f;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffe01fe01f;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffe01fe01f;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000fe01020b0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000fe01020b0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0fff0fff00000020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0fff0fff00000020;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xd207e90001fb16ef;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc8eab25698f97e90;
++  *((unsigned long *)&__m256i_op0[1]) = 0xd207e90001fb16ef;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc8eab25698f97e90;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x01fb16ef98f97e90;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x01fb16ef98f97e90;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffa0078fffa0074;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffa0078fffa0074;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffa2078fffa2074;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffa2078fffa2074;
++  *((unsigned long *)&__m256i_result[3]) = 0x01ff01ff01ff01ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x01ff01ff01ff01ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x01ff01ff01ff01ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x01ff01ff01ff01ff;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000401000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003e6c0000cb7a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000401000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003e6c0000cb7a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x40000000b000032d;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x40000000b000032d;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x007f00ff007f00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256i_result[2]) = 0x01fc03fc01fc03fc;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m256i_result[0]) = 0x01fc03fc01fc03fc;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x3e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ef0120;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ef0120;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000e9ece9ec;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000e9ece9ec;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000e9ece9ec;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000e9ece9ec;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ff00ff0120;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000e9ec0000e9ec;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ff00ff0120;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000e9ec0000e9ec;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffdd001dffe00020;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffdd001dffe00031;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffdd001dffe00020;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffdd001dffe00031;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x3ff73ff83ff73ff8;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x3ff73ff83ff73ff8;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0003000300030003;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0003000300030003;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0003000300030003;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0003000300030003;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[3]) = 0x0600060000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0600060000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe00010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0007fff8000ffff0;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000007fff8;
++  *((unsigned long *)&__m256i_result[1]) = 0x0007fff8000ffff0;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000007fff8;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010000f0000000f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010000f0000000f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1e0000001e002000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1e0000001e002000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x27);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff3225;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff3225;
++  *((unsigned long *)&__m256i_op1[3]) = 0x2221201f1e1d1c1b;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1a19181716151413;
++  *((unsigned long *)&__m256i_op1[1]) = 0x2221201f1e1d1c1b;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1a19181716151413;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000004442403;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000004442403;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x63);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fef0000ffff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fef0000ffff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0xde00fe0000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000fe010000fe01;
++  *((unsigned long *)&__m256i_result[1]) = 0xde00fe0000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000fe010000fe01;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00fe00ff00fe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000007070707;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff00ff07070707;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000007070707;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff00ff07070707;
++  __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x03ff000003ff03ff;
++  *((unsigned long *)&__m256i_result[2]) = 0x03ff000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x03ff000003ff03ff;
++  *((unsigned long *)&__m256i_result[0]) = 0x03ff000000000000;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000700000007;
++  *((unsigned long *)&__m256i_result[2]) = 0x0007ffff0007ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000700000007;
++  *((unsigned long *)&__m256i_result[0]) = 0x0007ffff0007ffff;
++  __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x2d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x66);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000e000e;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000e0000000e00;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000e0000000e00;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfc003802fc000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfc003802fc000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x03802fc000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x03802fc000000000;
++  __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x5a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x080808000828082f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0808080008280820;
++  *((unsigned long *)&__m256i_op0[1]) = 0x080808000828082f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0808080008280820;
++  *((unsigned long *)&__m256i_op1[3]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_op1[2]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_op1[0]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00828082f0808080;
++  *((unsigned long *)&__m256i_result[2]) = 0xf18181818132feea;
++  *((unsigned long *)&__m256i_result[1]) = 0x00828082f0808080;
++  *((unsigned long *)&__m256i_result[0]) = 0xf18181818132feea;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x24);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_w_d (__m256i_op0, __m256i_op1, 0x39);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x43);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfe01fe01fc01fc01;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfe01fe01fc01fc01;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfc01000000003fc0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfc01000000003fc0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fff0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000feff0001ffb8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fff0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000feff0001ffb8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_h_w (__m256i_op0, __m256i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000126000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2555205ea7bc4020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000126000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2555205ea7bc4020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_op1[2]) = 0x10ffffff10000006;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0fffffff10000006;
++  *((unsigned long *)&__m256i_op1[0]) = 0x10ffffff10000006;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000004980008;
++  *((unsigned long *)&__m256i_result[2]) = 0x003ffffffc400000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000004980008;
++  *((unsigned long *)&__m256i_result[0]) = 0x003ffffffc400000;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x46);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00f0000000f00010;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0ff00fff0ff10;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00f0000000f00010;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0ff00fff0ff10;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0087ff87f807ff87;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0087ff87f807ff87;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x68);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_b_h (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x50);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000050005;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf007fe76f008fe19;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf08aff01f07cc291;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf007fe76f008fe19;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf08aff01f07cc291;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000001400;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000003c01ff9;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000003c01ff9;
++  __m256i_out = __lasx_xvsrlni_d_q (__m256i_op0, __m256i_op1, 0x66);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c
+new file mode 100644
+index 000000000..22e62a3e7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlr.c
+@@ -0,0 +1,515 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x40d74f979f99419f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x40d74f979f99419f;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff8080000004000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000080000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff8080000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000200000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000000;
++  __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfff0000000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfff0000000000080;
++  __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7ff0000000000000;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001020202;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001020202;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000002222;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003ddd80007bbb;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000002222;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003ddd80007bbb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff800000000000;
++  __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x009f00f8007e00f0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f007f0081007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x009f00f8007e00f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f007f0081007f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0ea85f60984a8555;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00a21ef3246995f3;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1189ce8000fa14ed;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0e459089665f40f3;
++  *((unsigned long *)&__m256i_result[3]) = 0x000100f800000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0020001000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000f800000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0004000000000010;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003fc00000428a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffc040ffffc09d;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000;
++  __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7f7f000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7f7f000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100010001;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x80000000ff800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7c00000880008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0100000001000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0100000001000100;
++  __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000064;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000064;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000008;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffff80;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffff80;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000430207f944;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000038000000268;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000038000000268;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff010ff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff010ff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000201;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000201;
++  __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000010006d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000010006d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff01fb0408;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf2b180c9fc1fefdc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff01fb0408;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf2b180c9fc1fefdc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[2]) = 0xf2b180c9fc1fefdc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_result[0]) = 0xf2b180c9fc1fefdc;
++  __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000080;
++  *((unsigned long *)&__m256i_result[3]) = 0xff1cff1cff1c3fc7;
++  *((unsigned long *)&__m256i_result[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_result[1]) = 0xff1cff1cff1c3fc7;
++  *((unsigned long *)&__m256i_result[0]) = 0xff1cff1cff1cff1c;
++  __m256i_out = __lasx_xvsrlr_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x6b6b6b6b6b6b6b6b;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6b6b6b6b6b6b6b6b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x6b6b6b6b6b6b6b6b;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6b6b6b6b6b6b6b6b;
++  *((unsigned long *)&__m256i_op1[3]) = 0x6b6b6b6b6b6b6b6b;
++  *((unsigned long *)&__m256i_op1[2]) = 0x6b6b6b6b6b6b6b6b;
++  *((unsigned long *)&__m256i_op1[1]) = 0x6b6b6b6b6b6b6b6b;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6b6b6b6b6b6b6b6b;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000d6d6d;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000d6d6d;
++  __m256i_out = __lasx_xvsrlr_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff01ff01ff01f010;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff01ff01ff01f010;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff01ff01ff01f010;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff01ff01ff01f010;
++  *((unsigned long *)&__m256i_result[3]) = 0x000078780000f0f1;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000078780000f0f1;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlr_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffc00040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffc00040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x04f104f104f504ed;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1080108010060002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1080108010060002;
++  __m256i_out = __lasx_xvsrlr_b (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c
+new file mode 100644
+index 000000000..71f770aff
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlri.c
+@@ -0,0 +1,416 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x33);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001000000;
++  __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x28);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000505;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff0002fffefffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0002ff7e8286;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff0002fffefffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0002ffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0202000002020202;
++  *((unsigned long *)&__m256i_result[2]) = 0x0202000002010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0202000002020202;
++  *((unsigned long *)&__m256i_result[0]) = 0x0202000002020000;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010;
++  __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_op0[1]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_result[3]) = 0x0703030307030203;
++  *((unsigned long *)&__m256i_result[2]) = 0x0703030307030203;
++  *((unsigned long *)&__m256i_result[1]) = 0x0703030307030203;
++  *((unsigned long *)&__m256i_result[0]) = 0x0703030307030203;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003f3fc6c68787;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003f3f87870000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003f3fc6c68787;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003f3f87870000;
++  __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010183f95466;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01010101d58efe94;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000101000083f95;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010;
++  *((unsigned long *)&__m256i_result[1]) = 0x00001010000d58f0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010;
++  __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x23);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0010002000100020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010002000100020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0010002000100020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010002000100020;
++  __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0020000000200000;
++  __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000040000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000040000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000020000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000020000;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000e000e000e000e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x39);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000040000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000040000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000040000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000040000000000;
++  __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_op0[2]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_op0[0]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_result[2]) = 0x132feea900000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x04e8296f18181818;
++  *((unsigned long *)&__m256i_result[0]) = 0x132feea900000000;
++  __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000038000000268;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000038000000268;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000001200000011a;
++  *((unsigned long *)&__m256i_result[2]) = 0x2040204020402040;
++  *((unsigned long *)&__m256i_result[1]) = 0x000001200000011a;
++  *((unsigned long *)&__m256i_result[0]) = 0x2040204020402040;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff81001dff9dff9e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff81001dff9d003b;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff81001dff9dff9e;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff81001dff9d003b;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010;
++  __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_w (__m256i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffa003e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000fffb009c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffa003e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000fffb009c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvsrlri_d (__m256i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0020004000400040;
++  *((unsigned long *)&__m256i_result[2]) = 0x0020004000400040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0020004000400040;
++  *((unsigned long *)&__m256i_result[0]) = 0x0020004000400040;
++  __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000800000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000800000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000800000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000800000;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffbfffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffbfffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0102020202010202;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0102020202010202;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000;
++  __m256i_out = __lasx_xvsrlri_b (__m256i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0008000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0008000000000000;
++  __m256i_out = __lasx_xvsrlri_h (__m256i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c
+new file mode 100644
+index 000000000..cbc1de371
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrn.c
+@@ -0,0 +1,410 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffff328dfff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6651bfff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0202020201010000;
++  __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000050005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000505;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1;
++  *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000000000000;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000001a00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff820002ff820002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff820002ff820002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00020002ff820002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00020002ff820002;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00020421d7d41124;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00020421d7d41124;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff;
++  __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff000200000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff000200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ff020000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ff020000;
++  __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001fe01fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000ff0100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001fe01fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000ff0100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000000007c8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000007c8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x01fe01fe0000ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x01fe01fe0000ff01;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf9f9f9f9f9f9f9f9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xf9f9f9f900000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xf9f9f9f900000002;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ff0100ff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000004843ffdff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000004843ffdff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00043fff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00043fff00000000;
++  __m256i_out = __lasx_xvsrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff1cff1b00e300e4;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff1cff1b00e300e4;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff1cff1b00e300e4;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff1cff1b00e30100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x002000000020ffff;
++  __m256i_out = __lasx_xvsrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffdbff980038ffaf;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffafffe80004fff1;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffdbff980038ffaf;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffafffe80004fff1;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000020202020202;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101000000010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000020202020202;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101000000010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000e3fec0004fff1;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000e3fec0004fff1;
++  __m256i_out = __lasx_xvsrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c
+new file mode 100644
+index 000000000..8fc7a0029
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvsrlrni.c
+@@ -0,0 +1,455 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x7a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0100010001000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000808000008080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000808000008081;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000081;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x68);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000002a5429;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000002a5429;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x30);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000801380f380fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000801380f300fb;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000007f3a40;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x42);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x56);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xf0000000f0000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xf0000000f0000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0404040404040404;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0404040404040404;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x817f11ed81800ff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000004fc480040;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000004fc480040;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000004fc480040;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000004fc480040;
++  __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0004000404040404;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000400000004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0004000400000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000400000004;
++  __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x80208020c22080a7;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x80208020c22080a7;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdf80ff20df80ff20;
++  *((unsigned long *)&__m256i_op1[2]) = 0xdfc2ff20df80ffa7;
++  *((unsigned long *)&__m256i_op1[1]) = 0xdf80ff20df80ff20;
++  *((unsigned long *)&__m256i_op1[0]) = 0xdfc2ff20df80ffa7;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000840100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xbffebffec0febfff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000840100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xbffebffec0febfff;
++  __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffc0c0ffffbfc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffc0c0ffffbfc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00003f3f0000400d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00003f3f0000400d;
++  *((unsigned long *)&__m256i_result[3]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x44);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffe00000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffe00000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000048;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000048;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbfffa004fffd8000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbfffa004fffd8000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00003f0000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00002fffe8013fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00003f0000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00002fffe8013fff;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000080800000808;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000080800000808;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000101000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000101000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00010001000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00010001000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000040004000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000004000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000040004000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000004000000000;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x5a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00b2fe28e4420609;
++  *((unsigned long *)&__m256i_op0[2]) = 0x028da7fe15020000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00b2fe28e4420609;
++  *((unsigned long *)&__m256i_op0[0]) = 0x028da7fe15020000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000598;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000598;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0x6d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000800000010;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000800000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000002000000;
++  __m256i_out = __lasx_xvsrlrni_d_q (__m256i_op0, __m256i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000003ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001ffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000003ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001ffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_w_d (__m256i_op0, __m256i_op1, 0x3c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0040000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0040000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0040000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0040000000000000;
++  __m256i_out = __lasx_xvsrlrni_w_d (__m256i_op0, __m256i_op1, 0x2a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000001200000012;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000001200000012;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000001200000012;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001200000012;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0fff0fff0fc00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0fff0fff0fc00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000f880f87e;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000f880f87e;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000008000;
++  __m256i_out = __lasx_xvsrlrni_h_w (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000081220000812c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000812000008120;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000081220000812c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000812000008120;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_result[0]) = 0xfefefefefefefefe;
++  __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000;
++  __m256i_out = __lasx_xvsrlrni_b_h (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvssran-xvssrani-.patch b/LoongArch-Add-tests-for-ASX-vector-xvssran-xvssrani-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..534b17d51c718f31cc5b3bb5efc88e73b780b454
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvssran-xvssrani-.patch
@@ -0,0 +1,4258 @@
+From 445ae07ab55a647f7aec97c2334fb276a44f2af1 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Wed, 13 Sep 2023 12:37:41 +0800
+Subject: [PATCH 121/124] LoongArch: Add tests for ASX vector
+ xvssran/xvssrani/xvssrarn/xvssrarni instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvssran.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrani.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvssran.c      |  905 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvssrani.c     | 1235 +++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvssrarn.c     |  905 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvssrarni.c    | 1160 ++++++++++++++++
+ 4 files changed, 4205 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c
+new file mode 100644
+index 000000000..fdb0c25f1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssran.c
+@@ -0,0 +1,905 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00007ffe81fdfe03;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7ffe800000000000;
++  __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffef000004ea;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1717171717171717;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000607f700000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1717171717171717;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000607f700000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffe81;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00f9f90079f9f9f9;
++  *((unsigned long *)&__m256i_op1[2]) = 0x79f9f9f900000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00f9f90079f9f9f9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x79f9f9f900000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f7f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000007f7f7f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007f7f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f007f78;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000033007e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000021;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007f7f00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007f7f00007fff;
++  __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000080;
++  __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff00000000;
++  __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000002aaad555;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000002aaad555;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007fff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007fff00000000;
++  __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffc00000ffc0ffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffc00000ffc0ffc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1;
++  *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffcfee0fe00ffe0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffcfee0fe00ffe0;
++  __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000001fff9fff8;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001fff9fff8;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000001fff9fff8;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000001fff9fff8;
++  *((unsigned long *)&__m256i_op1[3]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_op1[1]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe1616161e1614e60;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffff900000003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffff900000003;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0000;
++  __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000100000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00000000;
++  __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff2400000000ff00;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffeffe4fffeff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff6400000000ff00;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffeff66fffeff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffd;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000100da000100fd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001ffe20001fefd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001009a000100fd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001ff640001fefd;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fe0100000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fe0100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00c200c200c200c2;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00c200c200c200bb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00c200c200c200c2;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00c200c200c200bb;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc2c2c2c2c2c2c2c2;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_op1[2]) = 0xdbcbdbcb0000dbcb;
++  *((unsigned long *)&__m256i_op1[1]) = 0xdbcbdbcbecececec;
++  *((unsigned long *)&__m256i_op1[0]) = 0xdbcbdbcb0000dbcb;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000226200005111;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000016000000480d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000226200005111;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000016000000480d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1131288800000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1131288800000002;
++  __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010200000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0a0a0a0a7f0a0a0a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff040000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffff040000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x007f8080007f007f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f8080007f007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x007f8080007f007f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f8080007f007f;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007f3f7f007f1f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007f3f7f007f1f;
++  __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x007f8080007f007f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x007f8080007f007f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x007f8080007f007f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x007f8080007f007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff00000000;
++  __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000077fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000000007ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8d8d72728d8d7272;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8d8d72728d8d8d8d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8d8d72728d8d7272;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8d8d72728d8d8d8d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000001010800;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001010800;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fffff800;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fffff800;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001010800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001010800;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000430207f944;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffff0008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffff0008;
++  __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfdfdfdfdfdfdfdfd;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe27fe2821d226278;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfdfdfdfdfdfdfdfd;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe27fe2821d226278;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000000e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000000d;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001;
++  __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000400080ffc080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080ff0080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000400080ffc080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080ff0080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00ff000000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00ff000000000080;
++  __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007f807f80;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007f807f80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000007f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000007f7f;
++  __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000001fff0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000feff0001ffb8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000001fff0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000feff0001ffb8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffff1cff1c;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff1cff18;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffff1cff1c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff1cff18;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssran_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000070002000a;
++  *((unsigned long *)&__m256i_op1[3]) = 0x001fffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x001fffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffe7ffffffe7;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbf3ffffffffeffed;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbf3ffffffffeffed;
++  *((unsigned long *)&__m256i_op1[1]) = 0xbf3ffffffffeffed;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbf3ffffffffeffed;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe013fcf2e015fc38;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe013fd00dff78420;
++  *((unsigned long *)&__m256i_op0[1]) = 0xe013fcf2e015fc38;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe013fd00dff78420;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssran_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8282828282828282;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8768876887688769;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8282828282828282;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8768876887688769;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000003fffc0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000003fffc0;
++  __m256i_out = __lasx_xvssran_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c
+new file mode 100644
+index 000000000..dd3c2c6f6
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrani.c
+@@ -0,0 +1,1235 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f057f0b7f5b007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000007f007f5;
++  __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000001fc000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000c475ceb40000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fb0819280000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x074132a240000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000003a0200;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000000c9;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00007fff7fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007fff7fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x37);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffff8001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffff0ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffff0ffff0000;
++  __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000080008000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000080008000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001ffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001ffffff;
++  __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x73);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0100010001000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0100010001000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0004000400040004;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff0000000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff0000000000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7f80780000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7f80780000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000f0000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000f0000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x1fe01e0000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1fe01e0000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x22);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x6b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xce7ffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xce7ffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6300000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff39ffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff39ffffff;
++  __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x5e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffc0000fffc0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffc0000fffc0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffc0000fffc0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffc0000fffc0000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01fe8001b72e0001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xb72e8001b72eaf12;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01fe000247639d9c;
++  *((unsigned long *)&__m256i_op0[0]) = 0xb5308001b72eaf12;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x26);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000c40086;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff7fff05407fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff7fff05407fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00001fff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00001fff;
++  __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x38f7414938f7882f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x38f7414938f78830;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000801380f380fe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000801380f300fb;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000008;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x2c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0303030303020000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0303030303020000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x31);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000007;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x4d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x59);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xd04752cdd5543b56;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6906e68064f3d78b;
++  *((unsigned long *)&__m256i_op0[1]) = 0xd04752cdd5543b56;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6906e68064f3d78b;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff1100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000004560420;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff1100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000004560420;
++  *((unsigned long *)&__m256i_result[3]) = 0x00ff00ffff00ff00;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000fff00004542;
++  *((unsigned long *)&__m256i_result[1]) = 0x00ff00ffff00ff00;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000fff00004542;
++  __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdf00000052a00000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x5b7f00ff5b7f00ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xdf00000052a00000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x5b7f00ff5b7f00ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00c0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0040000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000c0000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000040000000;
++  __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffffe02;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000300000005fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffff02;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000300000005fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0007fd00000f02ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001fffeff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00fe00feff02ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ffffffff00;
++  __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000007f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000018;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000002000000019;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000200000001e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000002000000019;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0004000000030000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000400000003c000;
++  __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x33);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x009c3e201e39e7e3;
++  *((unsigned long *)&__m256i_op0[2]) = 0x87c1135043408bba;
++  *((unsigned long *)&__m256i_op0[1]) = 0x009c3e201e39e7e3;
++  *((unsigned long *)&__m256i_op0[0]) = 0x87c1135043408bba;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001b0b1b4b5dd9f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f7f7f5c8f374980;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001b0b1b4b5dd9f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f7f7f5c8f374980;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xd0d8eecf383fdf0d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xd0d8eecf383fdf0d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100007f7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100007f7f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000;
++  __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x30);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x39);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x007c7fff00007fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00817fff00810000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x007c7fff00007fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00817fff00810000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x7c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x4000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000457d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000b03f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000457d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000b03f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x3b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x2000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x2000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0f000f000f000f00;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0f000f000f000f00;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007fc0083fc7c007;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007fc0083fc7c007;
++  __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x42);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00067fff00047fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00027fff000080fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00067fff00047fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00027fff000080fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x067f047f027f0080;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x067f047f027f0080;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000007f007f007f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000007f007f007f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0af57272788754ab;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000005e80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0af57272788754ab;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000005e80;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000000f0f0f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f0000007f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000000f0f0f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f0000007f;
++  __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x32);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01ffff4300ffff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01ffff4300ffff00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000040004000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000040004000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000100000000;
++  __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x2e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x4b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0004000f00100003;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000400030010000f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0004000f00100003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000400030010000f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0400100004001000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0400100004001000;
++  __m256i_out = __lasx_xvssrani_hu_w (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256i_op0[2]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3eab77367fff4848;
++  *((unsigned long *)&__m256i_op0[0]) = 0x408480007fff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000700000008;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000700000008;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x55);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xc07f8000c07f8000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xc07f8000c07f8000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000fff01fe0;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000fff01fe0;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x2a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fe96fe95;
++  *((unsigned long *)&__m256i_op0[2]) = 0x6afc01000001ff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fe96fe95;
++  *((unsigned long *)&__m256i_op0[0]) = 0x6afc01000001ff00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000010000ff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000010000ff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x7e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000404;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000404;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0404000004040000;
++  *((unsigned long *)&__m256i_result[3]) = 0x4000400040004000;
++  *((unsigned long *)&__m256i_result[2]) = 0x4000400040004000;
++  *((unsigned long *)&__m256i_result[1]) = 0x4000400040004000;
++  *((unsigned long *)&__m256i_result[0]) = 0x4000400040004000;
++  __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000020202000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000020202000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x3d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007f433c78;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000001ff1;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000001ff1;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x53);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x03fbfffc03fc07fc;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x03fbfffc03fc07fc;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff80000000;
++  __m256i_out = __lasx_xvssrani_w_d (__m256i_op0, __m256i_op1, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff003fffc0;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000003fffc0;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffc00fffffc00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffc00fffffc00;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff00ff007f007f00;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00ff007f007f00;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[2]) = 0xc03fc03fc03fc03f;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[0]) = 0xc03fc03fc03fc03f;
++  __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000000ff;
++  __m256i_out = __lasx_xvssrani_b_h (__m256i_op0, __m256i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ff80;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_wu_d (__m256i_op0, __m256i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x6c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrani_h_w (__m256i_op0, __m256i_op1, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000700000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000700000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000005;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000005;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x60);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0004000500040005;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrani_du_q (__m256i_op0, __m256i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff80007fff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrani_bu_h (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffff0000fffd0004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff0000fffd0004;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffff0000fffd0004;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0002fffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff0000fffd0004;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000000f;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000000f;
++  __m256i_out = __lasx_xvssrani_d_q (__m256i_op0, __m256i_op1, 0x6c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c
+new file mode 100644
+index 000000000..7848ddd41
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarn.c
+@@ -0,0 +1,905 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000;
++  __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff80000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff80000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000100da000100fd;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001ffe20001fefd;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001009a000100fd;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001ff640001fefd;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000100da000100fd;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001ffe20001fefd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001009a000100fd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001ff640001fefd;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007ff90000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000001ff60000;
++  __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000001;
++  __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000001b0000001b;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001b00fd0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000001b0000001b;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001b00fd0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff00ffffff00ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00010002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0080000200000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00010002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00c200c200c200c2;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00c200c200c200bb;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00c200c200c200c2;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00c200c200c200bb;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffc2c2ffffc2c2;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffc2c2ffffc2c2;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffc2c2ffffc2c2;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffc2c2ffffc2c2;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x003100310031002f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x003100310031002f;
++  __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffefffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffe0001fffe0003;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001000000010000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001000000000002;
++  __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffff6f20;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000781e0000f221;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff6f20;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000781e0000f221;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000fffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256i_op0[2]) = 0xbf84bf00bf00bf0e;
++  *((unsigned long *)&__m256i_op0[1]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256i_op0[0]) = 0xbf84bf00bf00bf0e;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbf84bf00bf00bf0e;
++  *((unsigned long *)&__m256i_op1[1]) = 0xbf00bf00bf00bf00;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbf84bf00bf00bf0e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00007f7f80007fa3;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007f670000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00007f7f80007fa3;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007f670000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000008;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000008;
++  __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfff1fff1fff1fff1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffff000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffff000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffff000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffff000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffff0001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000408080c111414;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0002000200010002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0002000200010002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0002000200010002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0002000200010002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff;
++  __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff88ff88;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000010006d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000010006d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000800400010006d;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000800400010006d;
++  __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0200000002000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x02000000fdffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0200000002000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x02000000fdffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000000000b7;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffefff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000004ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000004ffffffff;
++  __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x3fd1000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff000000ff000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff000000ff000000;
++  __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffb6811fffff80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffff97c120000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffb6811fffff80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffff97c120000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000001fffffff9;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xdb410010cbe10010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xdb410010cbe10010;
++  __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000019ffdf403;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000011ffd97c3;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000019ffdf403;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000011ffd97c3;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0020000000200000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x002000000020ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000004000000040;
++  __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000004000000040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1a1a1a2c1a1a1a2c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1a1a1a2c1a1a1a2c;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1a1a1a2c1a1a1a2c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1a1a1a2c1a1a1a2c;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3838383838383838;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffdfffffe00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x3838383838383838;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffdfffffe00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00000000;
++  __m256i_out = __lasx_xvssrarn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000020002000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000020002000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffbffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffbffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000007b007e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000007b007e;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc03b000200020002;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc03b000200020002;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc03b000200020002;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc03b000200020002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000001ec020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000001ec020;
++  __m256i_out = __lasx_xvssrarn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c
+new file mode 100644
+index 000000000..b1c16baf4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrarni.c
+@@ -0,0 +1,1160 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffc00;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffc00;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffc00;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffc00;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000020000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000020000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x23);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000f20;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000000009f0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00001f41ffffbf00;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000400000000;
++  __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x2b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000010000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000010000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000010000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000010000000;
++  __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000100;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x5d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf5f5bfbaf5f5bfbe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf5f0bfb8f5d8bfe8;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf5f5bfbaf5f5bfbe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf5f0bfb8f5d8bfe8;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf5f5bfbaf5f5bfbe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf5f0bfb8f5d8bfe8;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf5f5bfbaf5f5bfbe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf5f0bfb8f5d8bfe8;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff5f5c;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff5f5c;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x6c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_op0[2]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_op0[1]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_op0[0]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_op1[3]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_op1[2]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_op1[1]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_op1[0]) = 0x005500550055ffab;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fffff6ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fffff6ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x28);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0a09080706050403;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0a09080706050403;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0003000200000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0003000200000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
++  __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010000;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x30);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001010300010102;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000410041;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000df93f0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000077843;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000003800000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x27);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x2000200020002000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x3b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x73);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8001b72e0001b72e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8001b72eaf12d5f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000247639d9cb530;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8001b72eaf12d5f0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffe056fd9d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffceba70;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00150015003a402f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x333568ce26dcd055;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00150015003a402f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x333568ce26dcd055;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000007d0d0d0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000007d0d0d0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000098;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000040000ffca;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000800000098;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000040000ff79;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff04ff00ff00ff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff04ff00ff00ff00;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000008000000a;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000008000000a;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x44);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000120e120d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000120e120d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000907;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000907;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x32);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x27);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0016001600160016;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0016001600160016;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0016001600160016;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0016001600160016;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x3b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0010002000100020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010002000100020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0010002000100020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010002000100020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000fffffffe;
++  __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x3e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffe000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffe000;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x54);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00030006fa05f20e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00030081bd80f90e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000018;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000018;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_w_d (__m256i_op0, __m256i_op1, 0x2d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x02407a3c00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0d0cf2f30d0cf2f3;
++  *((unsigned long *)&__m256i_op0[1]) = 0x02407a3c00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0d0cf2f30d0cf2f3;
++  *((unsigned long *)&__m256i_op1[3]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_op1[1]) = 0xefdfefdf00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0010000f0000000f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0020000f0000000f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0010000f0000000f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000f0f0f0f0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf0f0f0f0f0f0f0f0;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000ff0fff0fff0f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000ff0fff0fff0f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffff70156;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffff70156;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffff70156;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffff70156;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x74);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xde00fe0000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000fe010000fe01;
++  *((unsigned long *)&__m256i_op0[1]) = 0xde00fe0000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fe010000fe01;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000100010001ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000100010001ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000100010001ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000100010001ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00007ff000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00007ff000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x79);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000070007000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7000700070007000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[3]) = 0x0e0e0e0e0e0e0e0e;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000e0e0e0e0e0e;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8848c848c848c848;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc848c848c848c848;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8848c848c848c848;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a1a1a15e5e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a1a1a15e5e;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003fe000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003fe000000000;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x2b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x45);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001fffe0001fffa;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001fffe00018069;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001fffe0001fffa;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0001fffe00018069;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000002000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000002000;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x64);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000004000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000004000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00b213171dff0606;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00e9a80014ff0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00b213171dff0606;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00e9a80014ff0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff00000000ffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00000000ffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x3b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000038000000268;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000038000000268;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001010101;
++  __m256i_out = __lasx_xvssrarni_bu_h (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0400000004000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000400;
++  *((unsigned long *)&__m256i_result[1]) = 0x0400000004000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000400;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x5b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0080000000000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0080000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x08000000000000f8;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x08000000000000f8;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0200000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x2000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0200000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x2000000000000000;
++  __m256i_out = __lasx_xvssrarni_wu_d (__m256i_op0, __m256i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000013;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x6a);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x36);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x498000804843ffe0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4980008068400000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrarni_hu_w (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000008;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000040000001b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000008;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000040000001b;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x41dffbffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffff00ff800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x41dffbffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffff00ff800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f80ffffff808000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f80ffffff808000;
++  __m256i_out = __lasx_xvssrarni_b_h (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000001e00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0002000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrarni_h_w (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000500020002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000700020033;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000500020002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000700020033;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000500020002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000700020033;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000500020002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000700020033;
++  *((unsigned long *)&__m256i_result[3]) = 0x1400080008000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x1400080008000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x1400080008000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x1400080008000000;
++  __m256i_out = __lasx_xvssrarni_d_q (__m256i_op0, __m256i_op1, 0x26);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000001c;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000001de;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000001c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000001de;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000060000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000060000000;
++  __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x44);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00003fea0014734d;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00003fe900140d85;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00003fea0014734d;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00003fe900140d85;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000ff0000ff00;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000ff0000ff00;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrarni_du_q (__m256i_op0, __m256i_op1, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-vector-xvssrln-xvssrlni-.patch b/LoongArch-Add-tests-for-ASX-vector-xvssrln-xvssrlni-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..cd078c59c6b4c9bb4e1e01f63f698a0d360331e7
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-vector-xvssrln-xvssrlni-.patch
@@ -0,0 +1,4123 @@
+From 983fd43b599dd252bc7f869be27bf1677f8eeca7 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Wed, 13 Sep 2023 12:35:41 +0800
+Subject: [PATCH 120/124] LoongArch: Add tests for ASX vector
+ xvssrln/xvssrlni/xvssrlrn/xvssrlrni instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrln.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvssrln.c      |  965 ++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvssrlni.c     | 1130 ++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvssrlrn.c     |  815 ++++++++++++
+ .../loongarch/vector/lasx/lasx-xvssrlrni.c    | 1160 +++++++++++++++++
+ 4 files changed, 4070 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c
+new file mode 100644
+index 000000000..356eb2182
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrln.c
+@@ -0,0 +1,965 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x44bb2cd3a35c2fd0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xca355ba46a95e31c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000100ab000500a0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000200b800080124;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001011b000200aa;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00150118008f0091;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f057f0b7f5b007f;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff;
++  __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000020000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000020000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000007f00;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fff7ffe7fffeffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffd84900000849;
++  *((unsigned long *)&__m256i_op0[0]) = 0x07fffc670800f086;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000000;
++  __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000017ffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffff0ffff0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffff0ffff0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000017000000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000017000000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001700080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001700080;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2000200020002000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2000200020002000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2000200020002000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2000200020002000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffff8c80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff0e400;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000005536aaaaac;
++  *((unsigned long *)&__m256i_op0[2]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000005536aaaaac;
++  *((unsigned long *)&__m256i_op0[0]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfff9fffffffbffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffdaaaaffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000060102150101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000060102150101;
++  __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfe00000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1cfd000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff00000000000000;
++  __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000003f00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000003f00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000003f0000;
++  __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007f7f7f7f0000;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000154dc84;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000089;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007fff00000089;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfe7fffecfe7fffec;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfe7fffecfe7fffec;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000001;
++  __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffff600000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff000009ec;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffff600000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff000009ec;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8060000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8060000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff000000010000;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000008000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000003f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff00000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff00000001;
++  __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff81ff7dffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff81ff7dffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff81ff7d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f7f7f7f7f017ffd;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f7f7f7f7f017ffd;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000100000007;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000100000007;
++  __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000001;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000077fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x01ff0020ff1f001f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fe1ffe0ffe1ffe0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00011ffb0000bee1;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00011ffb0000bee1;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff00000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000007f007f007f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000007f007f007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01ffff4300ffff00;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0100010001000100;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01ffff4300ffff00;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff00000000;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x04e8296f08181818;
++  *((unsigned long *)&__m256i_op0[2]) = 0x032feea900000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x04e8296f08181818;
++  *((unsigned long *)&__m256i_op0[0]) = 0x032feea900000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffff0000;
++  __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fffcfffcfffc;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffc01fc01;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffc01fc01;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000003fc03bbc;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x41cfe01dde000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x41cfe01dde000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000013fc03bbc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000013fc03bbc;
++  __m256i_out = __lasx_xvssrln_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fff8ff40;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ff0100090040;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fff8ff40;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ff0100090040;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000017f00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007f7f03030000;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xdf80df80df80dfff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffdf80dfff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0101010101010101;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000017f7f7f7f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000017f7f7f7f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000017fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000017fff;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffff800000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffff800000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff000000017fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff000000017fff;
++  __m256i_out = __lasx_xvssrln_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvssrln_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000003fffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000003fffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffff010100000001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffff010100000001;
++  __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000000c;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrln_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c
+new file mode 100644
+index 000000000..116bebbb6
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlni.c
+@@ -0,0 +1,1130 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7f7f7f7f00007f7f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3f28306860663e60;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x40d74f979f99419f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fff01fd7fff7fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007fff7fff7fff;
++  __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffe0ffe0ffe0ffe0;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffe0ffe0ffe0ffe0;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffe0ffe0ffe0ffe0;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffe0ffe0ffe0ffe0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1e1800001e180000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1e1800001e180000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1e18000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffe0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000001e18;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ffe0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000001e18;
++  __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x70);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[1]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_op1[0]) = 0x1c1b1a191c1b1a19;
++  *((unsigned long *)&__m256i_result[3]) = 0x1fffffff1fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0383634303836343;
++  *((unsigned long *)&__m256i_result[1]) = 0x1fffffff1fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0383634303836343;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x23);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000401000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x68);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000001000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x6c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0036003200360032;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0036003200360032;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0036003200360032;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0036003200360032;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000800000004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000bf6e0000c916;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000030000fff3;
++  *((unsigned long *)&__m256i_op1[3]) = 0x001175f10e4330e8;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff8f0842ff29211e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffff8d9ffa7103d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000e00ff00ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000ff00ff;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x18);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7f80780000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7f80780000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffff00001000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff00001000;
++  __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x39);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_op1[2]) = 0x6aeaeaeaeaeaeaea;
++  *((unsigned long *)&__m256i_op1[1]) = 0xebebebebebebebeb;
++  *((unsigned long *)&__m256i_op1[0]) = 0x6aeaeaeaeaeaeaea;
++  *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000003f0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000003f0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x30);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000fffc0000fee0;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000fe000000ffe0;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffff900000003;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffff900000003;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7ffe00007f000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ffe00007f000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffff0000ffff;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe17cec8fe08008ac;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe0801f41e0800168;
++  *((unsigned long *)&__m256i_op1[3]) = 0x9240f24a84b18025;
++  *((unsigned long *)&__m256i_op1[2]) = 0x9240f24a84b18025;
++  *((unsigned long *)&__m256i_op1[1]) = 0xb2c0b341807f8006;
++  *((unsigned long *)&__m256i_op1[0]) = 0xb2c0b341807f8006;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000012481e4950;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001658166830;
++  __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x5b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x77777777f7777777;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf777777777777777;
++  *((unsigned long *)&__m256i_op0[1]) = 0x77777777f7777777;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf777777777777777;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ff24;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ff24;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003;
++  __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000040404240;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000040404240;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000040404240;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000040404040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000040404240;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007f7f00007f7f;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00010001000c4411;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100044411;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000002800000010;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000002800000010;
++  *((unsigned long *)&__m256i_result[3]) = 0x0002000200020018;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0002000200020008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000c0000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000040000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000c0000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000040000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0003030300000300;
++  *((unsigned long *)&__m256i_result[2]) = 0x0003030300000300;
++  *((unsigned long *)&__m256i_result[1]) = 0x0003030300000100;
++  *((unsigned long *)&__m256i_result[0]) = 0x0003030300000100;
++  __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000800000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000002000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000800000;
++  __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x28);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x00003fff00003fff;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x32);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f00ff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0007fff8000ffff0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0007fff8000ffff0;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000030007;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000003f0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000030007;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000007f7f817f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000007f7f817f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7f807f007f7f817f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4ffc3f783fc040c0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3fc03f803fc040c0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4ffc3f783fc040c0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x3fc03f803fc040c0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0003fbfc0bfbfc03;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0003fbfc0bfbfc03;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x2d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff56ff55ff01ff01;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff56ff55ff01ff01;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007f7f7f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007f7f7f7f;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xa90896a400000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa90896a400000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000504fffff3271;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff47b4ffff5879;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f7f000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x007f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f7f000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x007f7f7f7f7f7f7f;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff80017fff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff80017fff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fffffff;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf7f7f7f7f7f7f7f7;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffffffe00000000;
++  __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x80000000ff810011;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x80000000ff810011;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff8180ffff8181;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff8180ffff8181;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000008000ff00;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ff81ff81;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000008000ff00;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ff81ff81;
++  __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffebeeaaefafb;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffebeeaaeeeeb;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffebeeaaefafb;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffebeeaaeeeeb;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fefffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x01ffbfff00000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x03ffffff03ffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x01ffbfff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x03ffffff03ffffff;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x26);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x001f001f001f001f;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x61);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0200000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0200000000000000;
++  __m256i_out = __lasx_xvssrlni_du_q (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x1f001f00000007ef;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00001fff200007ef;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000003030000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000030400;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007000008e700000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007000008e700000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7171717171010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8e8e8e8e8f00ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7171717171010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8e8e8e8e8f00ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_result[2]) = 0xe2e2e202ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000000000ff00;
++  *((unsigned long *)&__m256i_result[0]) = 0xe2e2e202ffffffff;
++  __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xc800c800c800c800;
++  *((unsigned long *)&__m256i_op0[2]) = 0x8800c800c800c801;
++  *((unsigned long *)&__m256i_op0[1]) = 0xc800c800c800c800;
++  *((unsigned long *)&__m256i_op0[0]) = 0x8800c800c800c801;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0003800400038004;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000a800b000a800b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0003800400038004;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000a800b000a800b;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000e0010000e;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000e0010000e;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x4e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x38);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xe07de0801f20607a;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x01ff01ff01c0003e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x01ff01ff01c0003e;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0707070707070707;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0707070707070707;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0018001800180018;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0018001800180018;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0018001800180018;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0018001800180018;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x3000300030003000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x3000300030003000;
++  __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op0[2]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op0[0]) = 0x01fe01fe01fe01fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_wu_d (__m256i_op0, __m256i_op1, 0x35);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000598;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000598;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000002cc0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000002cc0000;
++  __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x31);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff81001dff9dff9e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff81001dff9d003b;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff81001dff9dff9e;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff81001dff9d003b;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0002000200010002;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0002000200010002;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0002000200010002;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0002000200010002;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f1d7f7f7f1d7f3b;
++  *((unsigned long *)&__m256i_result[2]) = 0x0202010202020102;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f1d7f7f7f1d7f3b;
++  *((unsigned long *)&__m256i_result[0]) = 0x0202010202020102;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000dfffffff1;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000cfffffff3;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000dfffffff1;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000cfffffff3;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00003f3f00003f3f;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4000c08000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000080c000c080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4000c08000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000080c000c080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000004000;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x31);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000010006d;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000010006d;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000004000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000004000000080;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000118;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000118;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0x2e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_w_d (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x007efffefffefffe;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff80fffffffffffe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x007efffefffefffe;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff80fffffffffffe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000e3ab0001352b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000e3ab0001352b;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000038ea4d4a;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000038ea4d4a;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff00007fff0000;
++  __m256i_out = __lasx_xvssrlni_h_w (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_bu_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000a400ff004f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000a400ff004f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000a400ff004f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000010000005e;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000a400ff004f;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_b_h (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x001fffffffe00011;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x001fffffffe00011;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffffffffffff;
++  __m256i_out = __lasx_xvssrlni_d_q (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlni_hu_w (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c
+new file mode 100644
+index 000000000..977061097
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrn.c
+@@ -0,0 +1,815 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x1515151515151515;
++  *((unsigned long *)&__m256i_op0[2]) = 0x1515151515151515;
++  *((unsigned long *)&__m256i_op0[1]) = 0x1515151515151515;
++  *((unsigned long *)&__m256i_op0[0]) = 0x1515151515151515;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xf800f800f800c000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf800f800f800a000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xf800f800f800e000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf800f800f800e000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf800f800f800c000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf800f800f800a000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf800f800f800e000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf800f800f800e000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffefefffffcfa;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x5555555536aaaaac;
++  *((unsigned long *)&__m256i_op1[2]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256i_op1[1]) = 0x5555555536aaaaac;
++  *((unsigned long *)&__m256i_op1[0]) = 0x55555555aaaaaaac;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffc0000fffc0000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffc0000fffc0000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffc0000fffc0000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffc0000fffc0000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0002000200020002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0002000200020002;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfff9fff9fff9fff9;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff90000fff9fff9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4df5b1a3ed5e02c1;
++  *((unsigned long *)&__m256i_op1[1]) = 0x108659e46485f7e1;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4df5b1a3ed5e02c1;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffff0004ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffff0004ff;
++  __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000005be55bd2;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbabababababababa;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffef;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffef;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0404ffff00000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0404040800000010;
++  __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x007f00f8ff7fff80;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000fff6a9d8;
++  *((unsigned long *)&__m256i_op1[1]) = 0x007f00f8ff7fff80;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000fff6a9d8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54290;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ffff;
++  __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000001b0000001b;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000001b00fd0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000001b0000001b;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000001b00fd0000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000019;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000019;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000070700000707;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000009091b1b1212;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000070700000707;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000009091b1b1212;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000027d00f8;
++  *((unsigned long *)&__m256i_op1[2]) = 0x040204660265fe22;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000027d00f8;
++  *((unsigned long *)&__m256i_op1[0]) = 0x040204660265fe22;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xe273e273e273e273;
++  *((unsigned long *)&__m256i_op0[2]) = 0xe273e273e273e273;
++  *((unsigned long *)&__m256i_op0[1]) = 0xe273e273e273e273;
++  *((unsigned long *)&__m256i_op0[0]) = 0xe273e273e273e273;
++  *((unsigned long *)&__m256i_op1[3]) = 0xd207e90001fb16ef;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc8eab25698f97e90;
++  *((unsigned long *)&__m256i_op1[1]) = 0xd207e90001fb16ef;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc8eab25698f97e90;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0001c4e8ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0001c4e8ffffffff;
++  __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000000000ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffff0000ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffff0000ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00007f0200007f02;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00007f0200007f02;
++  __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0097011900f4009f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x003200d4010f0144;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0097011900f301cd;
++  *((unsigned long *)&__m256i_op0[0]) = 0x010b008800f80153;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff810011;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff810011;
++  *((unsigned long *)&__m256i_op1[3]) = 0x3fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x3fff8000ffa08004;
++  *((unsigned long *)&__m256i_op1[1]) = 0x3fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x3fff8000ffa08004;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000000ff01;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000000000ff01;
++  __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000fc38fc38;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000fc38fc38;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfc00000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xff00ff0000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffbfffa0ffffff80;
++  *((unsigned long *)&__m256i_op1[1]) = 0xff00ff0000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffbfffa0ffffff80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff02000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff02000000;
++  __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xa1a1a1a1a1a1a1a1;
++  *((unsigned long *)&__m256i_op0[2]) = 0xa1a1a1a15e5e5e5e;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa1a1a1a1a1a1a1a1;
++  *((unsigned long *)&__m256i_op0[0]) = 0xa1a1a1a15e5e5e5e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f;
++  __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff457db03f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000000457db03e;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff457db03f;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000ffff00020001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000ffff00020001;
++  __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000007f7f7f80;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000007f7f7f80;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffff00000080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000007f007f007f;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000007f007f007f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000002;
++  __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00ff0000ffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00ff0000ffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x4000c08000000080;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000080c000c080;
++  *((unsigned long *)&__m256i_op0[1]) = 0x4000c08000000080;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000080c000c080;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000101000001010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000101000001010;
++  __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000404;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000001010101;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000404;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000ff88ffc0;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00000000ff78ffc0;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000002000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000002000000000;
++  __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0001000100800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000200a000020020;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000200a000020020;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_bu_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xff1cff1cff1c3fc7;
++  *((unsigned long *)&__m256i_op0[2]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op0[1]) = 0xff1cff1cff1c3fc7;
++  *((unsigned long *)&__m256i_op0[0]) = 0xff1cff1cff1cff1c;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000200000002;
++  __m256i_out = __lasx_xvssrlrn_w_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000100;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000100;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0002000200000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0002000200000000;
++  __m256i_out = __lasx_xvssrlrn_h_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000017f7f7f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000017f7f7f7f;
++  __m256i_out = __lasx_xvssrlrn_b_h (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00000005ffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000007ffffffce;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000020000000200;
++  *((unsigned long *)&__m256i_op1[3]) = 0xf5fffc00fc000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xf5fffc00fc000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf5fffc00fc000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xf5fffc00fc000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlrn_wu_d (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xf5fffc00fc000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xf5fffc00fc000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0001001900010019;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0a02041904010019;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0001001900010019;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0a02041904010019;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000007b007e;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000007b007e;
++  __m256i_out = __lasx_xvssrlrn_hu_w (__m256i_op0, __m256i_op1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c
+new file mode 100644
+index 000000000..b55e388b1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvssrlrni.c
+@@ -0,0 +1,1160 @@
++/* { dg-do run } */
++/* { dg-options "-mlasx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m256i __m256i_op0, __m256i_op1, __m256i_op2, __m256i_out, __m256i_result;
++  __m256 __m256_op0, __m256_op1, __m256_op2, __m256_out, __m256_result;
++  __m256d __m256d_op0, __m256d_op1, __m256d_op2, __m256d_out, __m256d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x3133c6409eecf8b0;
++  *((unsigned long *)&__m256i_op0[2]) = 0xddf50db3c617a115;
++  *((unsigned long *)&__m256i_op0[1]) = 0xa432ea5a0913dc8e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x29d403af367b4545;
++  *((unsigned long *)&__m256i_op1[3]) = 0x38a966b31be83ee9;
++  *((unsigned long *)&__m256i_op1[2]) = 0x5f6108dc25b8e028;
++  *((unsigned long *)&__m256i_op1[1]) = 0xf41a56e8a20878d7;
++  *((unsigned long *)&__m256i_op1[0]) = 0x683b8b67e20c8ee5;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7ffffffffffff7ff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffe06df0d7;
++  *((unsigned long *)&__m256i_op0[1]) = 0x988eb37e000fb33d;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffed95be394b1e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x8000ffff8000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x06f880008000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x800080008000b8f1;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x10);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ff00ff00;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ff00ff00;
++  __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000040100000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000040100000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000040100000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000040100000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0080200000802000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0080200000802000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000f18080010000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000f18080010000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x3b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x28);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000808080;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000808;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffefffffefc;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000010;
++  __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x7c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000020afefb1;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f350104f7ebffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000003fffc1;
++  *((unsigned long *)&__m256i_op1[0]) = 0x005c0003fff9ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000000fe6a021;
++  *((unsigned long *)&__m256i_result[1]) = 0x2000000020000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000000b8000;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x23);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff;
++  __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000020001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x2e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff7fff7fff;
++  __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0020000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0020000000000000;
++  __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x4b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x25);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0400040004000400;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x33);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000100000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x2c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000002020000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000201eff0;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000002020000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000001fef010;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0010000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0010001000000000;
++  __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffefffefffefffd;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffefffefffefffd;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fff000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fff7fff00000000;
++  __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x29);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0e0f1192846ff912;
++  *((unsigned long *)&__m256i_op0[2]) = 0x002a0074666a4db9;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0e0f1192846ff912;
++  *((unsigned long *)&__m256i_op0[0]) = 0x002a0074666a4db9;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000100000018;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000100000018;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fff7fff05407fff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fff7fff05407fff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001900000019;
++  *((unsigned long *)&__m256i_op1[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000007fff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0408040800000004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0408040800000004;
++  __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_op1[2]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_op1[1]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_op1[0]) = 0x07efefefefefefee;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000001fbfbfc;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000001fbfbfc;
++  __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x62);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000fe01020b0001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000fe01020b0001;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x2020202020202020;
++  *((unsigned long *)&__m256i_op1[3]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000202020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000404040;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000202020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000404040;
++  __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x68);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000010486048c;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000010486048c;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x6f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfe7fffecfe7fffec;
++  *((unsigned long *)&__m256i_op1[2]) = 0xff800000ff800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfe7fffecfe7fffec;
++  *((unsigned long *)&__m256i_op1[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0808080808000800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0808080808000000;
++  __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff000c0000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00040000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xd010101010101010;
++  *((unsigned long *)&__m256i_op0[2]) = 0xd010101010103218;
++  *((unsigned long *)&__m256i_op0[1]) = 0xd010101010101010;
++  *((unsigned long *)&__m256i_op0[0]) = 0xd010101010103218;
++  *((unsigned long *)&__m256i_op1[3]) = 0xd010101010101010;
++  *((unsigned long *)&__m256i_op1[2]) = 0xd010101010103218;
++  *((unsigned long *)&__m256i_op1[1]) = 0xd010101010101010;
++  *((unsigned long *)&__m256i_op1[0]) = 0xd010101010103218;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x7fffffff7fffffff;
++  __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000001ff8000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffffe0000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000001ff8000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffffe0000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0020000000200000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0020000000200000;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x2b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xbc30c40108a45423;
++  *((unsigned long *)&__m256i_op1[2]) = 0xbc263e0e5d00e69f;
++  *((unsigned long *)&__m256i_op1[1]) = 0xbc30c40108a4544b;
++  *((unsigned long *)&__m256i_op1[0]) = 0xbc20e63aa8b9663f;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0504080804030405;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0504060904040305;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0504080804030405;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0504060904040305;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000141020;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000141020;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x66);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffffffe00000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000080000000800;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000080000000800;
++  __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x35);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000010101010;
++  *((unsigned long *)&__m256i_result[2]) = 0x1010101010101010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000010101010;
++  *((unsigned long *)&__m256i_result[0]) = 0x1010101010101010;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000fe;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0020000000200000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x1010101010001000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m256i_result[1]) = 0x1010101000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000ff000000ff;
++  __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fff800000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x7fff800000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x27);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000465;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000465;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000008d00000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000008d00000000;
++  __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x2d);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_du_q (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000ffff8000ffa3;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000007fe70000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000ffff8000ffa3;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000007fe70000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0xc03ae000ffff6000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xc600000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000003;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000003;
++  __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x7e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfff10000fff10000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[2]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_op1[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000001ffe2000;
++  *((unsigned long *)&__m256i_result[2]) = 0x001fe020001fe020;
++  *((unsigned long *)&__m256i_result[1]) = 0x000000001ffe2000;
++  *((unsigned long *)&__m256i_result[0]) = 0x001fe020001fe020;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x23);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000002000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000002000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x38);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000004;
++  __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x7e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_result[2]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0007000700070007;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000ffffffff;
++  __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x17);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x007f010100000101;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x007f010100000101;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000200000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000004000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0008000000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0008000000000010;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfefefefe3f800000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfefefefe3f800000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x000000007fff7fff;
++  __m256i_out = __lasx_xvssrlrni_h_w (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000080040;
++  *((unsigned long *)&__m256i_result[3]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000008002d;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000008002d;
++  *((unsigned long *)&__m256i_op1[3]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x00000000007f0000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000010000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000010000000000;
++  __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x7fffffffffffbfff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x3f7f7f7f407fffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0x3f7f7f7f407fffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x7efefefe80ffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000fdfdfe;
++  __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x27);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x07ffffff07ffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0x07ffffff08000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x07ffffff08000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x207f207f207f2000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000207f2000;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb68380002001;
++  *((unsigned long *)&__m256i_op0[2]) = 0xfffe97c08000ffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb68380002001;
++  *((unsigned long *)&__m256i_op0[0]) = 0xfffe97c08000ffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_op1[0]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000007fff5b41c0;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000007fff5b41d0;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000007fff5b41c0;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000007fff5b41d0;
++  __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x59);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000001000000010;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000001000000010;
++  __m256i_out = __lasx_xvssrlrni_w_d (__m256i_op0, __m256i_op1, 0x3c);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ffff97a2;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000001010000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_d_q (__m256i_op0, __m256i_op1, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x00ff00ff00c00040;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000008000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x00ff00ff00c00040;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000008000000001;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_bu_h (__m256i_op0, __m256i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0002000200000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0002000200000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000020002000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000020002000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffff010100000001;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffff010100000001;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000008000000080;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000008000000080;
++  __m256i_out = __lasx_xvssrlrni_wu_d (__m256i_op0, __m256i_op1, 0x39);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  __m256i_out = __lasx_xvssrlrni_hu_w (__m256i_op0, __m256i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[3]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[2]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m256i_result[0]) = 0x7f7f7f7f7f7f7f7f;
++  __m256i_out = __lasx_xvssrlrni_b_h (__m256i_op0, __m256i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-ASX-xvldrepl-xvstelm-instruc.patch b/LoongArch-Add-tests-for-ASX-xvldrepl-xvstelm-instruc.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9f78bc7b50db0a418a00af298410f1681234a834
--- /dev/null
+++ b/LoongArch-Add-tests-for-ASX-xvldrepl-xvstelm-instruc.patch
@@ -0,0 +1,65 @@
+From 2ef90d604d7bae207d5b2067b4ce38d04d4835be Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 16:00:48 +0800
+Subject: [PATCH 110/124] LoongArch: Add tests for ASX xvldrepl/xvstelm
+ instruction generation.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c: New test.
+	* gcc.target/loongarch/vector/lasx/lasx-xvstelm.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvldrepl.c        | 16 ++++++++++++++++
+ .../loongarch/vector/lasx/lasx-xvstelm.c         | 14 ++++++++++++++
+ 2 files changed, 30 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c
+new file mode 100644
+index 000000000..105567951
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvldrepl.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -mlasx" } */
++/* { dg-final { scan-assembler-times "xvldrepl.w" 2} } */
++
++#define N 258
++
++float a[N], b[N], c[N];
++
++void
++test ()
++{
++  for (int i = 0; i < 256; i++)
++    {
++      a[i] = c[0] * b[i] + c[1];
++    }
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c
+new file mode 100644
+index 000000000..1a7b0e86f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvstelm.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-O3 -mlasx" } */
++/* { dg-final { scan-assembler-times "xvstelm.w" 8} } */
++
++#define LEN 256
++
++float a[LEN], b[LEN], c[LEN];
++
++void
++test ()
++{
++  for (int i = 0; i < LEN; i += 2)
++    a[i] = b[i] + c[i];
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-Loongson-SX-builtin-function.patch b/LoongArch-Add-tests-for-Loongson-SX-builtin-function.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8502fe719e5c4256c164a3acdb2e0534ee86a35f
--- /dev/null
+++ b/LoongArch-Add-tests-for-Loongson-SX-builtin-function.patch
@@ -0,0 +1,4354 @@
+From 1e9d9ec99e65201d8d926fddc89b6176abe9a4e6 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 09:38:42 +0800
+Subject: [PATCH 078/124] LoongArch: Add tests for Loongson SX builtin
+ functions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-builtin.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-builtin.c        | 4328 +++++++++++++++++
+ 1 file changed, 4328 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c
+new file mode 100644
+index 000000000..13013114d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-builtin.c
+@@ -0,0 +1,4328 @@
++/* Test builtins for LOONGARCH LSX ASE instructions */
++/* { dg-do compile } */
++/* { dg-options "-mlsx" } */
++/* { dg-final { scan-assembler-times "lsx_vsll_b:.*vsll\\.b.*lsx_vsll_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsll_h:.*vsll\\.h.*lsx_vsll_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsll_w:.*vsll\\.w.*lsx_vsll_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsll_d:.*vsll\\.d.*lsx_vsll_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslli_b:.*vslli\\.b.*lsx_vslli_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslli_h:.*vslli\\.h.*lsx_vslli_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslli_w:.*vslli\\.w.*lsx_vslli_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslli_d:.*vslli\\.d.*lsx_vslli_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsra_b:.*vsra\\.b.*lsx_vsra_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsra_h:.*vsra\\.h.*lsx_vsra_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsra_w:.*vsra\\.w.*lsx_vsra_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsra_d:.*vsra\\.d.*lsx_vsra_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrai_b:.*vsrai\\.b.*lsx_vsrai_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrai_h:.*vsrai\\.h.*lsx_vsrai_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrai_w:.*vsrai\\.w.*lsx_vsrai_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrai_d:.*vsrai\\.d.*lsx_vsrai_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrar_b:.*vsrar\\.b.*lsx_vsrar_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrar_h:.*vsrar\\.h.*lsx_vsrar_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrar_w:.*vsrar\\.w.*lsx_vsrar_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrar_d:.*vsrar\\.d.*lsx_vsrar_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrari_b:.*vsrari\\.b.*lsx_vsrari_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrari_h:.*vsrari\\.h.*lsx_vsrari_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrari_w:.*vsrari\\.w.*lsx_vsrari_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrari_d:.*vsrari\\.d.*lsx_vsrari_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrl_b:.*vsrl\\.b.*lsx_vsrl_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrl_h:.*vsrl\\.h.*lsx_vsrl_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrl_w:.*vsrl\\.w.*lsx_vsrl_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrl_d:.*vsrl\\.d.*lsx_vsrl_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrli_b:.*vsrli\\.b.*lsx_vsrli_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrli_h:.*vsrli\\.h.*lsx_vsrli_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrli_w:.*vsrli\\.w.*lsx_vsrli_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrli_d:.*vsrli\\.d.*lsx_vsrli_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlr_b:.*vsrlr\\.b.*lsx_vsrlr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlr_h:.*vsrlr\\.h.*lsx_vsrlr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlr_w:.*vsrlr\\.w.*lsx_vsrlr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlr_d:.*vsrlr\\.d.*lsx_vsrlr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlri_b:.*vsrlri\\.b.*lsx_vsrlri_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlri_h:.*vsrlri\\.h.*lsx_vsrlri_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlri_w:.*vsrlri\\.w.*lsx_vsrlri_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlri_d:.*vsrlri\\.d.*lsx_vsrlri_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitclr_b:.*vbitclr\\.b.*lsx_vbitclr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitclr_h:.*vbitclr\\.h.*lsx_vbitclr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitclr_w:.*vbitclr\\.w.*lsx_vbitclr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitclr_d:.*vbitclr\\.d.*lsx_vbitclr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitclri_b:.*vbitclri\\.b.*lsx_vbitclri_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitclri_h:.*vbitclri\\.h.*lsx_vbitclri_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitclri_w:.*vbitclri\\.w.*lsx_vbitclri_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitclri_d:.*vbitclri\\.d.*lsx_vbitclri_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitset_b:.*vbitset\\.b.*lsx_vbitset_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitset_h:.*vbitset\\.h.*lsx_vbitset_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitset_w:.*vbitset\\.w.*lsx_vbitset_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitset_d:.*vbitset\\.d.*lsx_vbitset_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitseti_b:.*vbitseti\\.b.*lsx_vbitseti_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitseti_h:.*vbitseti\\.h.*lsx_vbitseti_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitseti_w:.*vbitseti\\.w.*lsx_vbitseti_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitseti_d:.*vbitseti\\.d.*lsx_vbitseti_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitrev_b:.*vbitrev\\.b.*lsx_vbitrev_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitrev_h:.*vbitrev\\.h.*lsx_vbitrev_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitrev_w:.*vbitrev\\.w.*lsx_vbitrev_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitrev_d:.*vbitrev\\.d.*lsx_vbitrev_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitrevi_b:.*vbitrevi\\.b.*lsx_vbitrevi_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitrevi_h:.*vbitrevi\\.h.*lsx_vbitrevi_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitrevi_w:.*vbitrevi\\.w.*lsx_vbitrevi_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitrevi_d:.*vbitrevi\\.d.*lsx_vbitrevi_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vadd_b:.*vadd\\.b.*lsx_vadd_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vadd_h:.*vadd\\.h.*lsx_vadd_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vadd_w:.*vadd\\.w.*lsx_vadd_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vadd_d:.*vadd\\.d.*lsx_vadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddi_bu:.*vaddi\\.bu.*lsx_vaddi_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddi_hu:.*vaddi\\.hu.*lsx_vaddi_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddi_wu:.*vaddi\\.wu.*lsx_vaddi_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddi_du:.*vaddi\\.du.*lsx_vaddi_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsub_b:.*vsub\\.b.*lsx_vsub_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsub_h:.*vsub\\.h.*lsx_vsub_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsub_w:.*vsub\\.w.*lsx_vsub_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsub_d:.*vsub\\.d.*lsx_vsub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubi_bu:.*vsubi\\.bu.*lsx_vsubi_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubi_hu:.*vsubi\\.hu.*lsx_vsubi_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubi_wu:.*vsubi\\.wu.*lsx_vsubi_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubi_du:.*vsubi\\.du.*lsx_vsubi_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmax_b:.*vmax\\.b.*lsx_vmax_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmax_h:.*vmax\\.h.*lsx_vmax_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmax_w:.*vmax\\.w.*lsx_vmax_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmax_d:.*vmax\\.d.*lsx_vmax_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaxi_b:.*vmaxi\\.b.*lsx_vmaxi_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaxi_h:.*vmaxi\\.h.*lsx_vmaxi_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaxi_w:.*vmaxi\\.w.*lsx_vmaxi_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaxi_d:.*vmaxi\\.d.*lsx_vmaxi_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmax_bu:.*vmax\\.bu.*lsx_vmax_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmax_hu:.*vmax\\.hu.*lsx_vmax_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmax_wu:.*vmax\\.wu.*lsx_vmax_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmax_du:.*vmax\\.du.*lsx_vmax_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaxi_bu:.*vmaxi\\.bu.*lsx_vmaxi_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaxi_hu:.*vmaxi\\.hu.*lsx_vmaxi_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaxi_wu:.*vmaxi\\.wu.*lsx_vmaxi_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaxi_du:.*vmaxi\\.du.*lsx_vmaxi_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmin_b:.*vmin\\.b.*lsx_vmin_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmin_h:.*vmin\\.h.*lsx_vmin_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmin_w:.*vmin\\.w.*lsx_vmin_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmin_d:.*vmin\\.d.*lsx_vmin_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmini_b:.*vmini\\.b.*lsx_vmini_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmini_h:.*vmini\\.h.*lsx_vmini_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmini_w:.*vmini\\.w.*lsx_vmini_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmini_d:.*vmini\\.d.*lsx_vmini_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmin_bu:.*vmin\\.bu.*lsx_vmin_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmin_hu:.*vmin\\.hu.*lsx_vmin_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmin_wu:.*vmin\\.wu.*lsx_vmin_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmin_du:.*vmin\\.du.*lsx_vmin_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmini_bu:.*vmini\\.bu.*lsx_vmini_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmini_hu:.*vmini\\.hu.*lsx_vmini_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmini_wu:.*vmini\\.wu.*lsx_vmini_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmini_du:.*vmini\\.du.*lsx_vmini_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vseq_b:.*vseq\\.b.*lsx_vseq_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vseq_h:.*vseq\\.h.*lsx_vseq_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vseq_w:.*vseq\\.w.*lsx_vseq_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vseq_d:.*vseq\\.d.*lsx_vseq_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vseqi_b:.*vseqi\\.b.*lsx_vseqi_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vseqi_h:.*vseqi\\.h.*lsx_vseqi_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vseqi_w:.*vseqi\\.w.*lsx_vseqi_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vseqi_d:.*vseqi\\.d.*lsx_vseqi_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslti_b:.*vslti\\.b.*lsx_vslti_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslt_b:.*vslt\\.b.*lsx_vslt_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslt_h:.*vslt\\.h.*lsx_vslt_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslt_w:.*vslt\\.w.*lsx_vslt_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslt_d:.*vslt\\.d.*lsx_vslt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslti_h:.*vslti\\.h.*lsx_vslti_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslti_w:.*vslti\\.w.*lsx_vslti_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslti_d:.*vslti\\.d.*lsx_vslti_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslt_bu:.*vslt\\.bu.*lsx_vslt_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslt_hu:.*vslt\\.hu.*lsx_vslt_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslt_wu:.*vslt\\.wu.*lsx_vslt_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslt_du:.*vslt\\.du.*lsx_vslt_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslti_bu:.*vslti\\.bu.*lsx_vslti_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslti_hu:.*vslti\\.hu.*lsx_vslti_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslti_wu:.*vslti\\.wu.*lsx_vslti_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslti_du:.*vslti\\.du.*lsx_vslti_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsle_b:.*vsle\\.b.*lsx_vsle_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsle_h:.*vsle\\.h.*lsx_vsle_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsle_w:.*vsle\\.w.*lsx_vsle_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsle_d:.*vsle\\.d.*lsx_vsle_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslei_b:.*vslei\\.b.*lsx_vslei_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslei_h:.*vslei\\.h.*lsx_vslei_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslei_w:.*vslei\\.w.*lsx_vslei_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslei_d:.*vslei\\.d.*lsx_vslei_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsle_bu:.*vsle\\.bu.*lsx_vsle_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsle_hu:.*vsle\\.hu.*lsx_vsle_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsle_wu:.*vsle\\.wu.*lsx_vsle_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsle_du:.*vsle\\.du.*lsx_vsle_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslei_bu:.*vslei\\.bu.*lsx_vslei_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslei_hu:.*vslei\\.hu.*lsx_vslei_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslei_wu:.*vslei\\.wu.*lsx_vslei_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vslei_du:.*vslei\\.du.*lsx_vslei_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsat_b:.*vsat\\.b.*lsx_vsat_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsat_h:.*vsat\\.h.*lsx_vsat_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsat_w:.*vsat\\.w.*lsx_vsat_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsat_d:.*vsat\\.d.*lsx_vsat_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsat_bu:.*vsat\\.bu.*lsx_vsat_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsat_hu:.*vsat\\.hu.*lsx_vsat_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsat_wu:.*vsat\\.wu.*lsx_vsat_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsat_du:.*vsat\\.du.*lsx_vsat_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vadda_b:.*vadda\\.b.*lsx_vadda_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vadda_h:.*vadda\\.h.*lsx_vadda_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vadda_w:.*vadda\\.w.*lsx_vadda_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vadda_d:.*vadda\\.d.*lsx_vadda_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsadd_b:.*vsadd\\.b.*lsx_vsadd_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsadd_h:.*vsadd\\.h.*lsx_vsadd_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsadd_w:.*vsadd\\.w.*lsx_vsadd_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsadd_d:.*vsadd\\.d.*lsx_vsadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsadd_bu:.*vsadd\\.bu.*lsx_vsadd_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsadd_hu:.*vsadd\\.hu.*lsx_vsadd_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsadd_wu:.*vsadd\\.wu.*lsx_vsadd_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsadd_du:.*vsadd\\.du.*lsx_vsadd_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavg_b:.*vavg\\.b.*lsx_vavg_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavg_h:.*vavg\\.h.*lsx_vavg_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavg_w:.*vavg\\.w.*lsx_vavg_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavg_d:.*vavg\\.d.*lsx_vavg_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavg_bu:.*vavg\\.bu.*lsx_vavg_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavg_hu:.*vavg\\.hu.*lsx_vavg_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavg_wu:.*vavg\\.wu.*lsx_vavg_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavg_du:.*vavg\\.du.*lsx_vavg_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavgr_b:.*vavgr\\.b.*lsx_vavgr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavgr_h:.*vavgr\\.h.*lsx_vavgr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavgr_w:.*vavgr\\.w.*lsx_vavgr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavgr_d:.*vavgr\\.d.*lsx_vavgr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavgr_bu:.*vavgr\\.bu.*lsx_vavgr_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavgr_hu:.*vavgr\\.hu.*lsx_vavgr_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavgr_wu:.*vavgr\\.wu.*lsx_vavgr_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vavgr_du:.*vavgr\\.du.*lsx_vavgr_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssub_b:.*vssub\\.b.*lsx_vssub_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssub_h:.*vssub\\.h.*lsx_vssub_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssub_w:.*vssub\\.w.*lsx_vssub_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssub_d:.*vssub\\.d.*lsx_vssub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssub_bu:.*vssub\\.bu.*lsx_vssub_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssub_hu:.*vssub\\.hu.*lsx_vssub_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssub_wu:.*vssub\\.wu.*lsx_vssub_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssub_du:.*vssub\\.du.*lsx_vssub_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vabsd_b:.*vabsd\\.b.*lsx_vabsd_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vabsd_h:.*vabsd\\.h.*lsx_vabsd_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vabsd_w:.*vabsd\\.w.*lsx_vabsd_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vabsd_d:.*vabsd\\.d.*lsx_vabsd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vabsd_bu:.*vabsd\\.bu.*lsx_vabsd_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vabsd_hu:.*vabsd\\.hu.*lsx_vabsd_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vabsd_wu:.*vabsd\\.wu.*lsx_vabsd_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vabsd_du:.*vabsd\\.du.*lsx_vabsd_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmul_b:.*vmul\\.b.*lsx_vmul_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmul_h:.*vmul\\.h.*lsx_vmul_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmul_w:.*vmul\\.w.*lsx_vmul_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmul_d:.*vmul\\.d.*lsx_vmul_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmadd_b:.*vmadd\\.b.*lsx_vmadd_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmadd_h:.*vmadd\\.h.*lsx_vmadd_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmadd_w:.*vmadd\\.w.*lsx_vmadd_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmadd_d:.*vmadd\\.d.*lsx_vmadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmsub_b:.*vmsub\\.b.*lsx_vmsub_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmsub_h:.*vmsub\\.h.*lsx_vmsub_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmsub_w:.*vmsub\\.w.*lsx_vmsub_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmsub_d:.*vmsub\\.d.*lsx_vmsub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vdiv_b:.*vdiv\\.b.*lsx_vdiv_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vdiv_h:.*vdiv\\.h.*lsx_vdiv_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vdiv_w:.*vdiv\\.w.*lsx_vdiv_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vdiv_d:.*vdiv\\.d.*lsx_vdiv_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vdiv_bu:.*vdiv\\.bu.*lsx_vdiv_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vdiv_hu:.*vdiv\\.hu.*lsx_vdiv_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vdiv_wu:.*vdiv\\.wu.*lsx_vdiv_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vdiv_du:.*vdiv\\.du.*lsx_vdiv_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhaddw_h_b:.*vhaddw\\.h\\.b.*lsx_vhaddw_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhaddw_w_h:.*vhaddw\\.w\\.h.*lsx_vhaddw_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhaddw_d_w:.*vhaddw\\.d\\.w.*lsx_vhaddw_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhaddw_hu_bu:.*vhaddw\\.hu\\.bu.*lsx_vhaddw_hu_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhaddw_wu_hu:.*vhaddw\\.wu\\.hu.*lsx_vhaddw_wu_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhaddw_du_wu:.*vhaddw\\.du\\.wu.*lsx_vhaddw_du_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhsubw_h_b:.*vhsubw\\.h\\.b.*lsx_vhsubw_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhsubw_w_h:.*vhsubw\\.w\\.h.*lsx_vhsubw_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhsubw_d_w:.*vhsubw\\.d\\.w.*lsx_vhsubw_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhsubw_hu_bu:.*vhsubw\\.hu\\.bu.*lsx_vhsubw_hu_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhsubw_wu_hu:.*vhsubw\\.wu\\.hu.*lsx_vhsubw_wu_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhsubw_du_wu:.*vhsubw\\.du\\.wu.*lsx_vhsubw_du_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmod_b:.*vmod\\.b.*lsx_vmod_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmod_h:.*vmod\\.h.*lsx_vmod_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmod_w:.*vmod\\.w.*lsx_vmod_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmod_d:.*vmod\\.d.*lsx_vmod_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmod_bu:.*vmod\\.bu.*lsx_vmod_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmod_hu:.*vmod\\.hu.*lsx_vmod_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmod_wu:.*vmod\\.wu.*lsx_vmod_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmod_du:.*vmod\\.du.*lsx_vmod_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplve_b:.*vreplve\\.b.*lsx_vreplve_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplve_h:.*vreplve\\.h.*lsx_vreplve_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplve_w:.*vreplve\\.w.*lsx_vreplve_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplve_d:.*vreplve\\.d.*lsx_vreplve_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplvei_b:.*vreplvei\\.b.*lsx_vreplvei_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplvei_h:.*vreplvei\\.h.*lsx_vreplvei_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplvei_w:.*vreplvei\\.w.*lsx_vreplvei_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplvei_d:.*vreplvei\\.d.*lsx_vreplvei_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickev_b:.*vpickev\\.b.*lsx_vpickev_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickev_h:.*vpickev\\.h.*lsx_vpickev_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickev_w:.*vpickev\\.w.*lsx_vpickev_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickev_d:.*vilvl\\.d.*lsx_vpickev_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickod_b:.*vpickod\\.b.*lsx_vpickod_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickod_h:.*vpickod\\.h.*lsx_vpickod_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickod_w:.*vpickod\\.w.*lsx_vpickod_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickod_d:.*vilvh\\.d.*lsx_vpickod_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vilvh_b:.*vilvh\\.b.*lsx_vilvh_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vilvh_h:.*vilvh\\.h.*lsx_vilvh_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vilvh_w:.*vilvh\\.w.*lsx_vilvh_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vilvh_d:.*vilvh\\.d.*lsx_vilvh_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vilvl_b:.*vilvl\\.b.*lsx_vilvl_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vilvl_h:.*vilvl\\.h.*lsx_vilvl_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vilvl_w:.*vilvl\\.w.*lsx_vilvl_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vilvl_d:.*vilvl\\.d.*lsx_vilvl_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpackev_b:.*vpackev\\.b.*lsx_vpackev_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpackev_h:.*vpackev\\.h.*lsx_vpackev_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpackev_w:.*vpackev\\.w.*lsx_vpackev_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpackev_d:.*vilvl\\.d.*lsx_vpackev_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpackod_b:.*vpackod\\.b.*lsx_vpackod_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpackod_h:.*vpackod\\.h.*lsx_vpackod_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpackod_w:.*vpackod\\.w.*lsx_vpackod_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpackod_d:.*vilvh\\.d.*lsx_vpackod_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vshuf_h:.*vshuf\\.h.*lsx_vshuf_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vshuf_w:.*vshuf\\.w.*lsx_vshuf_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vshuf_d:.*vshuf\\.d.*lsx_vshuf_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vand_v:.*vand\\.v.*lsx_vand_v" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vandi_b:.*vandi\\.b.*lsx_vandi_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vor_v:.*vor\\.v.*lsx_vor_v" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vori_b:.*vbitseti\\.b.*lsx_vori_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vnor_v:.*vnor\\.v.*lsx_vnor_v" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vnori_b:.*vnori\\.b.*lsx_vnori_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vxor_v:.*vxor\\.v.*lsx_vxor_v" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vxori_b:.*vbitrevi\\.b.*lsx_vxori_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitsel_v:.*vbitsel\\.v.*lsx_vbitsel_v" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbitseli_b:.*vbitseli\\.b.*lsx_vbitseli_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vshuf4i_b:.*vshuf4i\\.b.*lsx_vshuf4i_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vshuf4i_h:.*vshuf4i\\.h.*lsx_vshuf4i_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vshuf4i_w:.*vshuf4i\\.w.*lsx_vshuf4i_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_b:.*vreplgr2vr\\.b.*lsx_vreplgr2vr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_h:.*vreplgr2vr\\.h.*lsx_vreplgr2vr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_w:.*vreplgr2vr\\.w.*lsx_vreplgr2vr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vreplgr2vr_d:.*vreplgr2vr\\.d.*lsx_vreplgr2vr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpcnt_b:.*vpcnt\\.b.*lsx_vpcnt_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpcnt_h:.*vpcnt\\.h.*lsx_vpcnt_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpcnt_w:.*vpcnt\\.w.*lsx_vpcnt_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpcnt_d:.*vpcnt\\.d.*lsx_vpcnt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vclo_b:.*vclo\\.b.*lsx_vclo_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vclo_h:.*vclo\\.h.*lsx_vclo_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vclo_w:.*vclo\\.w.*lsx_vclo_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vclo_d:.*vclo\\.d.*lsx_vclo_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vclz_b:.*vclz\\.b.*lsx_vclz_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vclz_h:.*vclz\\.h.*lsx_vclz_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vclz_w:.*vclz\\.w.*lsx_vclz_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vclz_d:.*vclz\\.d.*lsx_vclz_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_b:.*vpickve2gr\\.b.*lsx_vpickve2gr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_h:.*vpickve2gr\\.h.*lsx_vpickve2gr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_w:.*vpickve2gr\\.w.*lsx_vpickve2gr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_d:.*vpickve2gr\\.d.*lsx_vpickve2gr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_bu:.*vpickve2gr\\.bu.*lsx_vpickve2gr_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_hu:.*vpickve2gr\\.hu.*lsx_vpickve2gr_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_wu:.*vpickve2gr\\.wu.*lsx_vpickve2gr_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpickve2gr_du:.*vpickve2gr\\.du.*lsx_vpickve2gr_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_b:.*vinsgr2vr\\.b.*lsx_vinsgr2vr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_h:.*vinsgr2vr\\.h.*lsx_vinsgr2vr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_w:.*vinsgr2vr\\.w.*lsx_vinsgr2vr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vinsgr2vr_d:.*vinsgr2vr\\.d.*lsx_vinsgr2vr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfadd_s:.*vfadd\\.s.*lsx_vfadd_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfadd_d:.*vfadd\\.d.*lsx_vfadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfsub_s:.*vfsub\\.s.*lsx_vfsub_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfsub_d:.*vfsub\\.d.*lsx_vfsub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmul_s:.*vfmul\\.s.*lsx_vfmul_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmul_d:.*vfmul\\.d.*lsx_vfmul_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfdiv_s:.*vfdiv\\.s.*lsx_vfdiv_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfdiv_d:.*vfdiv\\.d.*lsx_vfdiv_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcvt_h_s:.*vfcvt\\.h\\.s.*lsx_vfcvt_h_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcvt_s_d:.*vfcvt\\.s\\.d.*lsx_vfcvt_s_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmin_s:.*vfmin\\.s.*lsx_vfmin_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmin_d:.*vfmin\\.d.*lsx_vfmin_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmina_s:.*vfmina\\.s.*lsx_vfmina_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmina_d:.*vfmina\\.d.*lsx_vfmina_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmax_s:.*vfmax\\.s.*lsx_vfmax_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmax_d:.*vfmax\\.d.*lsx_vfmax_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmaxa_s:.*vfmaxa\\.s.*lsx_vfmaxa_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmaxa_d:.*vfmaxa\\.d.*lsx_vfmaxa_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfclass_s:.*vfclass\\.s.*lsx_vfclass_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfclass_d:.*vfclass\\.d.*lsx_vfclass_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfsqrt_s:.*vfsqrt\\.s.*lsx_vfsqrt_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfsqrt_d:.*vfsqrt\\.d.*lsx_vfsqrt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrecip_s:.*vfrecip\\.s.*lsx_vfrecip_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrecip_d:.*vfrecip\\.d.*lsx_vfrecip_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrint_s:.*vfrint\\.s.*lsx_vfrint_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrint_d:.*vfrint\\.d.*lsx_vfrint_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrsqrt_s:.*vfrsqrt\\.s.*lsx_vfrsqrt_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrsqrt_d:.*vfrsqrt\\.d.*lsx_vfrsqrt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vflogb_s:.*vflogb\\.s.*lsx_vflogb_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vflogb_d:.*vflogb\\.d.*lsx_vflogb_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcvth_s_h:.*vfcvth\\.s\\.h.*lsx_vfcvth_s_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcvth_d_s:.*vfcvth\\.d\\.s.*lsx_vfcvth_d_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcvtl_s_h:.*vfcvtl\\.s\\.h.*lsx_vfcvtl_s_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcvtl_d_s:.*vfcvtl\\.d\\.s.*lsx_vfcvtl_d_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftint_w_s:.*vftint\\.w\\.s.*lsx_vftint_w_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftint_l_d:.*vftint\\.l\\.d.*lsx_vftint_l_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftint_wu_s:.*vftint\\.wu\\.s.*lsx_vftint_wu_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftint_lu_d:.*vftint\\.lu\\.d.*lsx_vftint_lu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrz_w_s:.*vftintrz\\.w\\.s.*lsx_vftintrz_w_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrz_l_d:.*vftintrz\\.l\\.d.*lsx_vftintrz_l_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrz_wu_s:.*vftintrz\\.wu\\.s.*lsx_vftintrz_wu_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrz_lu_d:.*vftintrz\\.lu\\.d.*lsx_vftintrz_lu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vffint_s_w:.*vffint\\.s\\.w.*lsx_vffint_s_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vffint_d_l:.*vffint\\.d\\.l.*lsx_vffint_d_l" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vffint_s_wu:.*vffint\\.s\\.wu.*lsx_vffint_s_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vffint_d_lu:.*vffint\\.d\\.lu.*lsx_vffint_d_lu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vandn_v:.*vandn\\.v.*lsx_vandn_v" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vneg_b:.*vneg\\.b.*lsx_vneg_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vneg_h:.*vneg\\.h.*lsx_vneg_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vneg_w:.*vneg\\.w.*lsx_vneg_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vneg_d:.*vneg\\.d.*lsx_vneg_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmuh_b:.*vmuh\\.b.*lsx_vmuh_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmuh_h:.*vmuh\\.h.*lsx_vmuh_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmuh_w:.*vmuh\\.w.*lsx_vmuh_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmuh_d:.*vmuh\\.d.*lsx_vmuh_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmuh_bu:.*vmuh\\.bu.*lsx_vmuh_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmuh_hu:.*vmuh\\.hu.*lsx_vmuh_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmuh_wu:.*vmuh\\.wu.*lsx_vmuh_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmuh_du:.*vmuh\\.du.*lsx_vmuh_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsllwil_h_b:.*vsllwil\\.h\\.b.*lsx_vsllwil_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsllwil_w_h:.*vsllwil\\.w\\.h.*lsx_vsllwil_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsllwil_d_w:.*vsllwil\\.d\\.w.*lsx_vsllwil_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsllwil_hu_bu:.*vsllwil\\.hu\\.bu.*lsx_vsllwil_hu_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsllwil_wu_hu:.*vsllwil\\.wu\\.hu.*lsx_vsllwil_wu_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsllwil_du_wu:.*vsllwil\\.du\\.wu.*lsx_vsllwil_du_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsran_b_h:.*vsran\\.b\\.h.*lsx_vsran_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsran_h_w:.*vsran\\.h\\.w.*lsx_vsran_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsran_w_d:.*vsran\\.w\\.d.*lsx_vsran_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssran_b_h:.*vssran\\.b\\.h.*lsx_vssran_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssran_h_w:.*vssran\\.h\\.w.*lsx_vssran_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssran_w_d:.*vssran\\.w\\.d.*lsx_vssran_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssran_bu_h:.*vssran\\.bu\\.h.*lsx_vssran_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssran_hu_w:.*vssran\\.hu\\.w.*lsx_vssran_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssran_wu_d:.*vssran\\.wu\\.d.*lsx_vssran_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrarn_b_h:.*vsrarn\\.b\\.h.*lsx_vsrarn_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrarn_h_w:.*vsrarn\\.h\\.w.*lsx_vsrarn_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrarn_w_d:.*vsrarn\\.w\\.d.*lsx_vsrarn_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarn_b_h:.*vssrarn\\.b\\.h.*lsx_vssrarn_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarn_h_w:.*vssrarn\\.h\\.w.*lsx_vssrarn_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarn_w_d:.*vssrarn\\.w\\.d.*lsx_vssrarn_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarn_bu_h:.*vssrarn\\.bu\\.h.*lsx_vssrarn_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarn_hu_w:.*vssrarn\\.hu\\.w.*lsx_vssrarn_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarn_wu_d:.*vssrarn\\.wu\\.d.*lsx_vssrarn_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrln_b_h:.*vsrln\\.b\\.h.*lsx_vsrln_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrln_h_w:.*vsrln\\.h\\.w.*lsx_vsrln_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrln_w_d:.*vsrln\\.w\\.d.*lsx_vsrln_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrln_bu_h:.*vssrln\\.bu\\.h.*lsx_vssrln_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrln_hu_w:.*vssrln\\.hu\\.w.*lsx_vssrln_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrln_wu_d:.*vssrln\\.wu\\.d.*lsx_vssrln_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlrn_b_h:.*vsrlrn\\.b\\.h.*lsx_vsrlrn_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlrn_h_w:.*vsrlrn\\.h\\.w.*lsx_vsrlrn_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlrn_w_d:.*vsrlrn\\.w\\.d.*lsx_vsrlrn_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrn_bu_h:.*vssrlrn\\.bu\\.h.*lsx_vssrlrn_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrn_hu_w:.*vssrlrn\\.hu\\.w.*lsx_vssrlrn_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrn_wu_d:.*vssrlrn\\.wu\\.d.*lsx_vssrlrn_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrstpi_b:.*vfrstpi\\.b.*lsx_vfrstpi_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrstpi_h:.*vfrstpi\\.h.*lsx_vfrstpi_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrstp_b:.*vfrstp\\.b.*lsx_vfrstp_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrstp_h:.*vfrstp\\.h.*lsx_vfrstp_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vshuf4i_d:.*vshuf4i\\.d.*lsx_vshuf4i_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbsrl_v:.*vbsrl\\.v.*lsx_vbsrl_v" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vbsll_v:.*vbsll\\.v.*lsx_vbsll_v" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vextrins_b:.*vextrins\\.b.*lsx_vextrins_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vextrins_h:.*vextrins\\.h.*lsx_vextrins_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vextrins_w:.*vextrins\\.w.*lsx_vextrins_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vextrins_d:.*vextrins\\.d.*lsx_vextrins_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmskltz_b:.*vmskltz\\.b.*lsx_vmskltz_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmskltz_h:.*vmskltz\\.h.*lsx_vmskltz_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmskltz_w:.*vmskltz\\.w.*lsx_vmskltz_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmskltz_d:.*vmskltz\\.d.*lsx_vmskltz_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsigncov_b:.*vsigncov\\.b.*lsx_vsigncov_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsigncov_h:.*vsigncov\\.h.*lsx_vsigncov_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsigncov_w:.*vsigncov\\.w.*lsx_vsigncov_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsigncov_d:.*vsigncov\\.d.*lsx_vsigncov_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmadd_s:.*vfmadd\\.s.*lsx_vfmadd_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmadd_d:.*vfmadd\\.d.*lsx_vfmadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmsub_s:.*vfmsub\\.s.*lsx_vfmsub_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfmsub_d:.*vfmsub\\.d.*lsx_vfmsub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfnmadd_s:.*vfnmadd\\.s.*lsx_vfnmadd_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfnmadd_d:.*vfnmadd\\.d.*lsx_vfnmadd_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfnmsub_s:.*vfnmsub\\.s.*lsx_vfnmsub_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfnmsub_d:.*vfnmsub\\.d.*lsx_vfnmsub_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrne_w_s:.*vftintrne\\.w\\.s.*lsx_vftintrne_w_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrne_l_d:.*vftintrne\\.l\\.d.*lsx_vftintrne_l_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrp_w_s:.*vftintrp\\.w\\.s.*lsx_vftintrp_w_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrp_l_d:.*vftintrp\\.l\\.d.*lsx_vftintrp_l_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrm_w_s:.*vftintrm\\.w\\.s.*lsx_vftintrm_w_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrm_l_d:.*vftintrm\\.l\\.d.*lsx_vftintrm_l_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftint_w_d:.*vftint\\.w\\.d.*lsx_vftint_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vffint_s_l:.*vffint\\.s\\.l.*lsx_vffint_s_l" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrz_w_d:.*vftintrz\\.w\\.d.*lsx_vftintrz_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrp_w_d:.*vftintrp\\.w\\.d.*lsx_vftintrp_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrm_w_d:.*vftintrm\\.w\\.d.*lsx_vftintrm_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrne_w_d:.*vftintrne\\.w\\.d.*lsx_vftintrne_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintl_l_s:.*vftintl\\.l\\.s.*lsx_vftintl_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftinth_l_s:.*vftinth\\.l\\.s.*lsx_vftinth_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vffinth_d_w:.*vffinth\\.d\\.w.*lsx_vffinth_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vffintl_d_w:.*vffintl\\.d\\.w.*lsx_vffintl_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrzl_l_s:.*vftintrzl\\.l\\.s.*lsx_vftintrzl_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrzh_l_s:.*vftintrzh\\.l\\.s.*lsx_vftintrzh_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrpl_l_s:.*vftintrpl\\.l\\.s.*lsx_vftintrpl_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrph_l_s:.*vftintrph\\.l\\.s.*lsx_vftintrph_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrml_l_s:.*vftintrml\\.l\\.s.*lsx_vftintrml_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrmh_l_s:.*vftintrmh\\.l\\.s.*lsx_vftintrmh_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrnel_l_s:.*vftintrnel\\.l\\.s.*lsx_vftintrnel_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vftintrneh_l_s:.*vftintrneh\\.l\\.s.*lsx_vftintrneh_l_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrintrne_s:.*vfrintrne\\.s.*lsx_vfrintrne_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrintrne_d:.*vfrintrne\\.d.*lsx_vfrintrne_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrintrz_s:.*vfrintrz\\.s.*lsx_vfrintrz_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrintrz_d:.*vfrintrz\\.d.*lsx_vfrintrz_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrintrp_s:.*vfrintrp\\.s.*lsx_vfrintrp_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrintrp_d:.*vfrintrp\\.d.*lsx_vfrintrp_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrintrm_s:.*vfrintrm\\.s.*lsx_vfrintrm_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfrintrm_d:.*vfrintrm\\.d.*lsx_vfrintrm_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vstelm_b:.*vstelm\\.b.*lsx_vstelm_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vstelm_h:.*vstelm\\.h.*lsx_vstelm_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vstelm_w:.*vstelm\\.w.*lsx_vstelm_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vstelm_d:.*vstelm\\.d.*lsx_vstelm_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_d_w:.*vaddwev\\.d\\.w.*lsx_vaddwev_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_w_h:.*vaddwev\\.w\\.h.*lsx_vaddwev_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_h_b:.*vaddwev\\.h\\.b.*lsx_vaddwev_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_d_w:.*vaddwod\\.d\\.w.*lsx_vaddwod_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_w_h:.*vaddwod\\.w\\.h.*lsx_vaddwod_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_h_b:.*vaddwod\\.h\\.b.*lsx_vaddwod_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_d_wu:.*vaddwev\\.d\\.wu.*lsx_vaddwev_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_w_hu:.*vaddwev\\.w\\.hu.*lsx_vaddwev_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_h_bu:.*vaddwev\\.h\\.bu.*lsx_vaddwev_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_d_wu:.*vaddwod\\.d\\.wu.*lsx_vaddwod_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_w_hu:.*vaddwod\\.w\\.hu.*lsx_vaddwod_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_h_bu:.*vaddwod\\.h\\.bu.*lsx_vaddwod_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_d_wu_w:.*vaddwev\\.d\\.wu\\.w.*lsx_vaddwev_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_w_hu_h:.*vaddwev\\.w\\.hu\\.h.*lsx_vaddwev_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_h_bu_b:.*vaddwev\\.h\\.bu\\.b.*lsx_vaddwev_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_d_wu_w:.*vaddwod\\.d\\.wu\\.w.*lsx_vaddwod_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_w_hu_h:.*vaddwod\\.w\\.hu\\.h.*lsx_vaddwod_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_h_bu_b:.*vaddwod\\.h\\.bu\\.b.*lsx_vaddwod_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwev_d_w:.*vsubwev\\.d\\.w.*lsx_vsubwev_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwev_w_h:.*vsubwev\\.w\\.h.*lsx_vsubwev_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwev_h_b:.*vsubwev\\.h\\.b.*lsx_vsubwev_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwod_d_w:.*vsubwod\\.d\\.w.*lsx_vsubwod_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwod_w_h:.*vsubwod\\.w\\.h.*lsx_vsubwod_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwod_h_b:.*vsubwod\\.h\\.b.*lsx_vsubwod_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwev_d_wu:.*vsubwev\\.d\\.wu.*lsx_vsubwev_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwev_w_hu:.*vsubwev\\.w\\.hu.*lsx_vsubwev_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwev_h_bu:.*vsubwev\\.h\\.bu.*lsx_vsubwev_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwod_d_wu:.*vsubwod\\.d\\.wu.*lsx_vsubwod_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwod_w_hu:.*vsubwod\\.w\\.hu.*lsx_vsubwod_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwod_h_bu:.*vsubwod\\.h\\.bu.*lsx_vsubwod_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_q_d:.*vaddwev\\.q\\.d.*lsx_vaddwev_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_q_d:.*vaddwod\\.q\\.d.*lsx_vaddwod_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_q_du:.*vaddwev\\.q\\.du.*lsx_vaddwev_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_q_du:.*vaddwod\\.q\\.du.*lsx_vaddwod_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwev_q_d:.*vsubwev\\.q\\.d.*lsx_vsubwev_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwod_q_d:.*vsubwod\\.q\\.d.*lsx_vsubwod_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwev_q_du:.*vsubwev\\.q\\.du.*lsx_vsubwev_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsubwod_q_du:.*vsubwod\\.q\\.du.*lsx_vsubwod_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwev_q_du_d:.*vaddwev\\.q\\.du\\.d.*lsx_vaddwev_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vaddwod_q_du_d:.*vaddwod\\.q\\.du\\.d.*lsx_vaddwod_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_d_w:.*vmulwev\\.d\\.w.*lsx_vmulwev_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_w_h:.*vmulwev\\.w\\.h.*lsx_vmulwev_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_h_b:.*vmulwev\\.h\\.b.*lsx_vmulwev_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_d_w:.*vmulwod\\.d\\.w.*lsx_vmulwod_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_w_h:.*vmulwod\\.w\\.h.*lsx_vmulwod_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_h_b:.*vmulwod\\.h\\.b.*lsx_vmulwod_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_d_wu:.*vmulwev\\.d\\.wu.*lsx_vmulwev_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_w_hu:.*vmulwev\\.w\\.hu.*lsx_vmulwev_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_h_bu:.*vmulwev\\.h\\.bu.*lsx_vmulwev_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_d_wu:.*vmulwod\\.d\\.wu.*lsx_vmulwod_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_w_hu:.*vmulwod\\.w\\.hu.*lsx_vmulwod_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_h_bu:.*vmulwod\\.h\\.bu.*lsx_vmulwod_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_d_wu_w:.*vmulwev\\.d\\.wu\\.w.*lsx_vmulwev_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_w_hu_h:.*vmulwev\\.w\\.hu\\.h.*lsx_vmulwev_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_h_bu_b:.*vmulwev\\.h\\.bu\\.b.*lsx_vmulwev_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_d_wu_w:.*vmulwod\\.d\\.wu\\.w.*lsx_vmulwod_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_w_hu_h:.*vmulwod\\.w\\.hu\\.h.*lsx_vmulwod_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_h_bu_b:.*vmulwod\\.h\\.bu\\.b.*lsx_vmulwod_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_q_d:.*vmulwev\\.q\\.d.*lsx_vmulwev_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_q_d:.*vmulwod\\.q\\.d.*lsx_vmulwod_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_q_du:.*vmulwev\\.q\\.du.*lsx_vmulwev_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_q_du:.*vmulwod\\.q\\.du.*lsx_vmulwod_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwev_q_du_d:.*vmulwev\\.q\\.du\\.d.*lsx_vmulwev_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmulwod_q_du_d:.*vmulwod\\.q\\.du\\.d.*lsx_vmulwod_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhaddw_q_d:.*vhaddw\\.q\\.d.*lsx_vhaddw_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhaddw_qu_du:.*vhaddw\\.qu\\.du.*lsx_vhaddw_qu_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhsubw_q_d:.*vhsubw\\.q\\.d.*lsx_vhsubw_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vhsubw_qu_du:.*vhsubw\\.qu\\.du.*lsx_vhsubw_qu_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_w:.*vmaddwev\\.d\\.w.*lsx_vmaddwev_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_h:.*vmaddwev\\.w\\.h.*lsx_vmaddwev_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_b:.*vmaddwev\\.h\\.b.*lsx_vmaddwev_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_wu:.*vmaddwev\\.d\\.wu.*lsx_vmaddwev_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_hu:.*vmaddwev\\.w\\.hu.*lsx_vmaddwev_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_bu:.*vmaddwev\\.h\\.bu.*lsx_vmaddwev_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_w:.*vmaddwod\\.d\\.w.*lsx_vmaddwod_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_h:.*vmaddwod\\.w\\.h.*lsx_vmaddwod_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_b:.*vmaddwod\\.h\\.b.*lsx_vmaddwod_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_wu:.*vmaddwod\\.d\\.wu.*lsx_vmaddwod_d_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_hu:.*vmaddwod\\.w\\.hu.*lsx_vmaddwod_w_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_bu:.*vmaddwod\\.h\\.bu.*lsx_vmaddwod_h_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_d_wu_w:.*vmaddwev\\.d\\.wu\\.w.*lsx_vmaddwev_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_w_hu_h:.*vmaddwev\\.w\\.hu\\.h.*lsx_vmaddwev_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_h_bu_b:.*vmaddwev\\.h\\.bu\\.b.*lsx_vmaddwev_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_d_wu_w:.*vmaddwod\\.d\\.wu\\.w.*lsx_vmaddwod_d_wu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_w_hu_h:.*vmaddwod\\.w\\.hu\\.h.*lsx_vmaddwod_w_hu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_h_bu_b:.*vmaddwod\\.h\\.bu\\.b.*lsx_vmaddwod_h_bu_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_d:.*vmaddwev\\.q\\.d.*lsx_vmaddwev_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_d:.*vmaddwod\\.q\\.d.*lsx_vmaddwod_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_du:.*vmaddwev\\.q\\.du.*lsx_vmaddwev_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_du:.*vmaddwod\\.q\\.du.*lsx_vmaddwod_q_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwev_q_du_d:.*vmaddwev\\.q\\.du\\.d.*lsx_vmaddwev_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmaddwod_q_du_d:.*vmaddwod\\.q\\.du\\.d.*lsx_vmaddwod_q_du_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrotr_b:.*vrotr\\.b.*lsx_vrotr_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrotr_h:.*vrotr\\.h.*lsx_vrotr_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrotr_w:.*vrotr\\.w.*lsx_vrotr_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrotr_d:.*vrotr\\.d.*lsx_vrotr_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vadd_q:.*vadd\\.q.*lsx_vadd_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsub_q:.*vsub\\.q.*lsx_vsub_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vldrepl_b:.*vldrepl\\.b.*lsx_vldrepl_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vldrepl_h:.*vldrepl\\.h.*lsx_vldrepl_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vldrepl_w:.*vldrepl\\.w.*lsx_vldrepl_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vldrepl_d:.*vldrepl\\.d.*lsx_vldrepl_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmskgez_b:.*vmskgez\\.b.*lsx_vmskgez_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vmsknz_b:.*vmsknz\\.b.*lsx_vmsknz_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vexth_h_b:.*vexth\\.h\\.b.*lsx_vexth_h_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vexth_w_h:.*vexth\\.w\\.h.*lsx_vexth_w_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vexth_d_w:.*vexth\\.d\\.w.*lsx_vexth_d_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vexth_q_d:.*vexth\\.q\\.d.*lsx_vexth_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vexth_hu_bu:.*vexth\\.hu\\.bu.*lsx_vexth_hu_bu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vexth_wu_hu:.*vexth\\.wu\\.hu.*lsx_vexth_wu_hu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vexth_du_wu:.*vexth\\.du\\.wu.*lsx_vexth_du_wu" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vexth_qu_du:.*vexth\\.qu\\.du.*lsx_vexth_qu_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrotri_b:.*vrotri\\.b.*lsx_vrotri_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrotri_h:.*vrotri\\.h.*lsx_vrotri_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrotri_w:.*vrotri\\.w.*lsx_vrotri_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrotri_d:.*vrotri\\.d.*lsx_vrotri_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vextl_q_d:.*vextl\\.q\\.d.*lsx_vextl_q_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlni_b_h:.*vsrlni\\.b\\.h.*lsx_vsrlni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlni_h_w:.*vsrlni\\.h\\.w.*lsx_vsrlni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlni_w_d:.*vsrlni\\.w\\.d.*lsx_vsrlni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlni_d_q:.*vsrlni\\.d\\.q.*lsx_vsrlni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlrni_b_h:.*vsrlrni\\.b\\.h.*lsx_vsrlrni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlrni_h_w:.*vsrlrni\\.h\\.w.*lsx_vsrlrni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlrni_w_d:.*vsrlrni\\.w\\.d.*lsx_vsrlrni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrlrni_d_q:.*vsrlrni\\.d\\.q.*lsx_vsrlrni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlni_b_h:.*vssrlni\\.b\\.h.*lsx_vssrlni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlni_h_w:.*vssrlni\\.h\\.w.*lsx_vssrlni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlni_w_d:.*vssrlni\\.w\\.d.*lsx_vssrlni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlni_d_q:.*vssrlni\\.d\\.q.*lsx_vssrlni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlni_bu_h:.*vssrlni\\.bu\\.h.*lsx_vssrlni_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlni_hu_w:.*vssrlni\\.hu\\.w.*lsx_vssrlni_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlni_wu_d:.*vssrlni\\.wu\\.d.*lsx_vssrlni_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlni_du_q:.*vssrlni\\.du\\.q.*lsx_vssrlni_du_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrni_b_h:.*vssrlrni\\.b\\.h.*lsx_vssrlrni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrni_h_w:.*vssrlrni\\.h\\.w.*lsx_vssrlrni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrni_w_d:.*vssrlrni\\.w\\.d.*lsx_vssrlrni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrni_d_q:.*vssrlrni\\.d\\.q.*lsx_vssrlrni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrni_bu_h:.*vssrlrni\\.bu\\.h.*lsx_vssrlrni_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrni_hu_w:.*vssrlrni\\.hu\\.w.*lsx_vssrlrni_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrni_wu_d:.*vssrlrni\\.wu\\.d.*lsx_vssrlrni_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrni_du_q:.*vssrlrni\\.du\\.q.*lsx_vssrlrni_du_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrani_b_h:.*vsrani\\.b\\.h.*lsx_vsrani_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrani_h_w:.*vsrani\\.h\\.w.*lsx_vsrani_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrani_w_d:.*vsrani\\.w\\.d.*lsx_vsrani_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrani_d_q:.*vsrani\\.d\\.q.*lsx_vsrani_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrarni_b_h:.*vsrarni\\.b\\.h.*lsx_vsrarni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrarni_h_w:.*vsrarni\\.h\\.w.*lsx_vsrarni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrarni_w_d:.*vsrarni\\.w\\.d.*lsx_vsrarni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vsrarni_d_q:.*vsrarni\\.d\\.q.*lsx_vsrarni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrani_b_h:.*vssrani\\.b\\.h.*lsx_vssrani_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrani_h_w:.*vssrani\\.h\\.w.*lsx_vssrani_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrani_w_d:.*vssrani\\.w\\.d.*lsx_vssrani_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrani_d_q:.*vssrani\\.d\\.q.*lsx_vssrani_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrani_bu_h:.*vssrani\\.bu\\.h.*lsx_vssrani_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrani_hu_w:.*vssrani\\.hu\\.w.*lsx_vssrani_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrani_wu_d:.*vssrani\\.wu\\.d.*lsx_vssrani_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrani_du_q:.*vssrani\\.du\\.q.*lsx_vssrani_du_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarni_b_h:.*vssrarni\\.b\\.h.*lsx_vssrarni_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarni_h_w:.*vssrarni\\.h\\.w.*lsx_vssrarni_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarni_w_d:.*vssrarni\\.w\\.d.*lsx_vssrarni_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarni_d_q:.*vssrarni\\.d\\.q.*lsx_vssrarni_d_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarni_bu_h:.*vssrarni\\.bu\\.h.*lsx_vssrarni_bu_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarni_hu_w:.*vssrarni\\.hu\\.w.*lsx_vssrarni_hu_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarni_wu_d:.*vssrarni\\.wu\\.d.*lsx_vssrarni_wu_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrarni_du_q:.*vssrarni\\.du\\.q.*lsx_vssrarni_du_q" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vpermi_w:.*vpermi\\.w.*lsx_vpermi_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vld:.*vld.*lsx_vld" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vst:.*vst.*lsx_vst" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrn_b_h:.*vssrlrn\\.b\\.h.*lsx_vssrlrn_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrn_h_w:.*vssrlrn\\.h\\.w.*lsx_vssrlrn_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrlrn_w_d:.*vssrlrn\\.w\\.d.*lsx_vssrlrn_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrln_b_h:.*vssrln\\.b\\.h.*lsx_vssrln_b_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrln_h_w:.*vssrln\\.h\\.w.*lsx_vssrln_h_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vssrln_w_d:.*vssrln\\.w\\.d.*lsx_vssrln_w_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vorn_v:.*vorn\\.v.*lsx_vorn_v" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vldi:.*vldi.*lsx_vldi" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vshuf_b:.*vshuf\\.b.*lsx_vshuf_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vldx:.*vldx.*lsx_vldx" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vstx:.*vstx.*lsx_vstx" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vextl_qu_du:.*vextl\\.qu\\.du.*lsx_vextl_qu_du" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_bnz_b:.*vsetanyeqz\\.b.*lsx_bnz_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_bnz_d:.*vsetanyeqz\\.d.*lsx_bnz_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_bnz_h:.*vsetanyeqz\\.h.*lsx_bnz_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_bnz_v:.*vseteqz\\.v.*lsx_bnz_v" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_bnz_w:.*vsetanyeqz\\.w.*lsx_bnz_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_bz_b:.*vsetallnez\\.b.*lsx_bz_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_bz_d:.*vsetallnez\\.d.*lsx_bz_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_bz_h:.*vsetallnez\\.h.*lsx_bz_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_bz_v:.*vsetnez\\.v.*lsx_bz_v" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_bz_w:.*vsetallnez\\.w.*lsx_bz_w" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_caf_d:.*vfcmp\\.caf\\.d.*lsx_vfcmp_caf_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_caf_s:.*vfcmp\\.caf\\.s.*lsx_vfcmp_caf_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_ceq_d:.*vfcmp\\.ceq\\.d.*lsx_vfcmp_ceq_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_ceq_s:.*vfcmp\\.ceq\\.s.*lsx_vfcmp_ceq_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cle_d:.*vfcmp\\.cle\\.d.*lsx_vfcmp_cle_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cle_s:.*vfcmp\\.cle\\.s.*lsx_vfcmp_cle_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_clt_d:.*vfcmp\\.clt\\.d.*lsx_vfcmp_clt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_clt_s:.*vfcmp\\.clt\\.s.*lsx_vfcmp_clt_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cne_d:.*vfcmp\\.cne\\.d.*lsx_vfcmp_cne_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cne_s:.*vfcmp\\.cne\\.s.*lsx_vfcmp_cne_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cor_d:.*vfcmp\\.cor\\.d.*lsx_vfcmp_cor_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cor_s:.*vfcmp\\.cor\\.s.*lsx_vfcmp_cor_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cueq_d:.*vfcmp\\.cueq\\.d.*lsx_vfcmp_cueq_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cueq_s:.*vfcmp\\.cueq\\.s.*lsx_vfcmp_cueq_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cule_d:.*vfcmp\\.cule\\.d.*lsx_vfcmp_cule_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cule_s:.*vfcmp\\.cule\\.s.*lsx_vfcmp_cule_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cult_d:.*vfcmp\\.cult\\.d.*lsx_vfcmp_cult_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cult_s:.*vfcmp\\.cult\\.s.*lsx_vfcmp_cult_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cun_d:.*vfcmp\\.cun\\.d.*lsx_vfcmp_cun_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cune_d:.*vfcmp\\.cune\\.d.*lsx_vfcmp_cune_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cune_s:.*vfcmp\\.cune\\.s.*lsx_vfcmp_cune_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_cun_s:.*vfcmp\\.cun\\.s.*lsx_vfcmp_cun_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_saf_d:.*vfcmp\\.saf\\.d.*lsx_vfcmp_saf_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_saf_s:.*vfcmp\\.saf\\.s.*lsx_vfcmp_saf_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_seq_d:.*vfcmp\\.seq\\.d.*lsx_vfcmp_seq_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_seq_s:.*vfcmp\\.seq\\.s.*lsx_vfcmp_seq_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sle_d:.*vfcmp\\.sle\\.d.*lsx_vfcmp_sle_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sle_s:.*vfcmp\\.sle\\.s.*lsx_vfcmp_sle_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_slt_d:.*vfcmp\\.slt\\.d.*lsx_vfcmp_slt_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_slt_s:.*vfcmp\\.slt\\.s.*lsx_vfcmp_slt_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sne_d:.*vfcmp\\.sne\\.d.*lsx_vfcmp_sne_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sne_s:.*vfcmp\\.sne\\.s.*lsx_vfcmp_sne_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sor_d:.*vfcmp\\.sor\\.d.*lsx_vfcmp_sor_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sor_s:.*vfcmp\\.sor\\.s.*lsx_vfcmp_sor_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sueq_d:.*vfcmp\\.sueq\\.d.*lsx_vfcmp_sueq_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sueq_s:.*vfcmp\\.sueq\\.s.*lsx_vfcmp_sueq_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sule_d:.*vfcmp\\.sule\\.d.*lsx_vfcmp_sule_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sule_s:.*vfcmp\\.sule\\.s.*lsx_vfcmp_sule_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sult_d:.*vfcmp\\.sult\\.d.*lsx_vfcmp_sult_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sult_s:.*vfcmp\\.sult\\.s.*lsx_vfcmp_sult_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sun_d:.*vfcmp\\.sun\\.d.*lsx_vfcmp_sun_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sune_d:.*vfcmp\\.sune\\.d.*lsx_vfcmp_sune_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sune_s:.*vfcmp\\.sune\\.s.*lsx_vfcmp_sune_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vfcmp_sun_s:.*vfcmp\\.sun\\.s.*lsx_vfcmp_sun_s" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrepli_b:.*vrepli\\.b.*lsx_vrepli_b" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrepli_d:.*vrepli\\.d.*lsx_vrepli_d" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrepli_h:.*vrepli\\.h.*lsx_vrepli_h" 1 } } */
++/* { dg-final { scan-assembler-times "lsx_vrepli_w:.*vrepli\\.w.*lsx_vrepli_w" 1 } } */
++
++typedef signed char v16i8 __attribute__ ((vector_size (16), aligned (16)));
++typedef signed char v16i8_b __attribute__ ((vector_size (16), aligned (1)));
++typedef unsigned char v16u8 __attribute__ ((vector_size (16), aligned (16)));
++typedef unsigned char v16u8_b __attribute__ ((vector_size (16), aligned (1)));
++typedef short v8i16 __attribute__ ((vector_size (16), aligned (16)));
++typedef short v8i16_h __attribute__ ((vector_size (16), aligned (2)));
++typedef unsigned short v8u16 __attribute__ ((vector_size (16), aligned (16)));
++typedef unsigned short v8u16_h __attribute__ ((vector_size (16), aligned (2)));
++typedef int v4i32 __attribute__ ((vector_size (16), aligned (16)));
++typedef int v4i32_w __attribute__ ((vector_size (16), aligned (4)));
++typedef unsigned int v4u32 __attribute__ ((vector_size (16), aligned (16)));
++typedef unsigned int v4u32_w __attribute__ ((vector_size (16), aligned (4)));
++typedef long long v2i64 __attribute__ ((vector_size (16), aligned (16)));
++typedef long long v2i64_d __attribute__ ((vector_size (16), aligned (8)));
++typedef unsigned long long v2u64
++    __attribute__ ((vector_size (16), aligned (16)));
++typedef unsigned long long v2u64_d
++    __attribute__ ((vector_size (16), aligned (8)));
++typedef float v4f32 __attribute__ ((vector_size (16), aligned (16)));
++typedef float v4f32_w __attribute__ ((vector_size (16), aligned (4)));
++typedef double v2f64 __attribute__ ((vector_size (16), aligned (16)));
++typedef double v2f64_d __attribute__ ((vector_size (16), aligned (8)));
++
++typedef long long __m128i
++    __attribute__ ((__vector_size__ (16), __may_alias__));
++typedef float __m128 __attribute__ ((__vector_size__ (16), __may_alias__));
++typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
++
++v16i8
++__lsx_vsll_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsll_b (_1, _2);
++}
++v8i16
++__lsx_vsll_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsll_h (_1, _2);
++}
++v4i32
++__lsx_vsll_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsll_w (_1, _2);
++}
++v2i64
++__lsx_vsll_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsll_d (_1, _2);
++}
++v16i8
++__lsx_vslli_b (v16i8 _1)
++{
++  return __builtin_lsx_vslli_b (_1, 1);
++}
++v8i16
++__lsx_vslli_h (v8i16 _1)
++{
++  return __builtin_lsx_vslli_h (_1, 1);
++}
++v4i32
++__lsx_vslli_w (v4i32 _1)
++{
++  return __builtin_lsx_vslli_w (_1, 1);
++}
++v2i64
++__lsx_vslli_d (v2i64 _1)
++{
++  return __builtin_lsx_vslli_d (_1, 1);
++}
++v16i8
++__lsx_vsra_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsra_b (_1, _2);
++}
++v8i16
++__lsx_vsra_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsra_h (_1, _2);
++}
++v4i32
++__lsx_vsra_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsra_w (_1, _2);
++}
++v2i64
++__lsx_vsra_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsra_d (_1, _2);
++}
++v16i8
++__lsx_vsrai_b (v16i8 _1)
++{
++  return __builtin_lsx_vsrai_b (_1, 1);
++}
++v8i16
++__lsx_vsrai_h (v8i16 _1)
++{
++  return __builtin_lsx_vsrai_h (_1, 1);
++}
++v4i32
++__lsx_vsrai_w (v4i32 _1)
++{
++  return __builtin_lsx_vsrai_w (_1, 1);
++}
++v2i64
++__lsx_vsrai_d (v2i64 _1)
++{
++  return __builtin_lsx_vsrai_d (_1, 1);
++}
++v16i8
++__lsx_vsrar_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsrar_b (_1, _2);
++}
++v8i16
++__lsx_vsrar_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsrar_h (_1, _2);
++}
++v4i32
++__lsx_vsrar_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsrar_w (_1, _2);
++}
++v2i64
++__lsx_vsrar_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsrar_d (_1, _2);
++}
++v16i8
++__lsx_vsrari_b (v16i8 _1)
++{
++  return __builtin_lsx_vsrari_b (_1, 1);
++}
++v8i16
++__lsx_vsrari_h (v8i16 _1)
++{
++  return __builtin_lsx_vsrari_h (_1, 1);
++}
++v4i32
++__lsx_vsrari_w (v4i32 _1)
++{
++  return __builtin_lsx_vsrari_w (_1, 1);
++}
++v2i64
++__lsx_vsrari_d (v2i64 _1)
++{
++  return __builtin_lsx_vsrari_d (_1, 1);
++}
++v16i8
++__lsx_vsrl_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsrl_b (_1, _2);
++}
++v8i16
++__lsx_vsrl_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsrl_h (_1, _2);
++}
++v4i32
++__lsx_vsrl_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsrl_w (_1, _2);
++}
++v2i64
++__lsx_vsrl_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsrl_d (_1, _2);
++}
++v16i8
++__lsx_vsrli_b (v16i8 _1)
++{
++  return __builtin_lsx_vsrli_b (_1, 1);
++}
++v8i16
++__lsx_vsrli_h (v8i16 _1)
++{
++  return __builtin_lsx_vsrli_h (_1, 1);
++}
++v4i32
++__lsx_vsrli_w (v4i32 _1)
++{
++  return __builtin_lsx_vsrli_w (_1, 1);
++}
++v2i64
++__lsx_vsrli_d (v2i64 _1)
++{
++  return __builtin_lsx_vsrli_d (_1, 1);
++}
++v16i8
++__lsx_vsrlr_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsrlr_b (_1, _2);
++}
++v8i16
++__lsx_vsrlr_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsrlr_h (_1, _2);
++}
++v4i32
++__lsx_vsrlr_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsrlr_w (_1, _2);
++}
++v2i64
++__lsx_vsrlr_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsrlr_d (_1, _2);
++}
++v16i8
++__lsx_vsrlri_b (v16i8 _1)
++{
++  return __builtin_lsx_vsrlri_b (_1, 1);
++}
++v8i16
++__lsx_vsrlri_h (v8i16 _1)
++{
++  return __builtin_lsx_vsrlri_h (_1, 1);
++}
++v4i32
++__lsx_vsrlri_w (v4i32 _1)
++{
++  return __builtin_lsx_vsrlri_w (_1, 1);
++}
++v2i64
++__lsx_vsrlri_d (v2i64 _1)
++{
++  return __builtin_lsx_vsrlri_d (_1, 1);
++}
++v16u8
++__lsx_vbitclr_b (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vbitclr_b (_1, _2);
++}
++v8u16
++__lsx_vbitclr_h (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vbitclr_h (_1, _2);
++}
++v4u32
++__lsx_vbitclr_w (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vbitclr_w (_1, _2);
++}
++v2u64
++__lsx_vbitclr_d (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vbitclr_d (_1, _2);
++}
++v16u8
++__lsx_vbitclri_b (v16u8 _1)
++{
++  return __builtin_lsx_vbitclri_b (_1, 1);
++}
++v8u16
++__lsx_vbitclri_h (v8u16 _1)
++{
++  return __builtin_lsx_vbitclri_h (_1, 1);
++}
++v4u32
++__lsx_vbitclri_w (v4u32 _1)
++{
++  return __builtin_lsx_vbitclri_w (_1, 1);
++}
++v2u64
++__lsx_vbitclri_d (v2u64 _1)
++{
++  return __builtin_lsx_vbitclri_d (_1, 1);
++}
++v16u8
++__lsx_vbitset_b (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vbitset_b (_1, _2);
++}
++v8u16
++__lsx_vbitset_h (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vbitset_h (_1, _2);
++}
++v4u32
++__lsx_vbitset_w (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vbitset_w (_1, _2);
++}
++v2u64
++__lsx_vbitset_d (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vbitset_d (_1, _2);
++}
++v16u8
++__lsx_vbitseti_b (v16u8 _1)
++{
++  return __builtin_lsx_vbitseti_b (_1, 1);
++}
++v8u16
++__lsx_vbitseti_h (v8u16 _1)
++{
++  return __builtin_lsx_vbitseti_h (_1, 1);
++}
++v4u32
++__lsx_vbitseti_w (v4u32 _1)
++{
++  return __builtin_lsx_vbitseti_w (_1, 1);
++}
++v2u64
++__lsx_vbitseti_d (v2u64 _1)
++{
++  return __builtin_lsx_vbitseti_d (_1, 1);
++}
++v16u8
++__lsx_vbitrev_b (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vbitrev_b (_1, _2);
++}
++v8u16
++__lsx_vbitrev_h (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vbitrev_h (_1, _2);
++}
++v4u32
++__lsx_vbitrev_w (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vbitrev_w (_1, _2);
++}
++v2u64
++__lsx_vbitrev_d (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vbitrev_d (_1, _2);
++}
++v16u8
++__lsx_vbitrevi_b (v16u8 _1)
++{
++  return __builtin_lsx_vbitrevi_b (_1, 1);
++}
++v8u16
++__lsx_vbitrevi_h (v8u16 _1)
++{
++  return __builtin_lsx_vbitrevi_h (_1, 1);
++}
++v4u32
++__lsx_vbitrevi_w (v4u32 _1)
++{
++  return __builtin_lsx_vbitrevi_w (_1, 1);
++}
++v2u64
++__lsx_vbitrevi_d (v2u64 _1)
++{
++  return __builtin_lsx_vbitrevi_d (_1, 1);
++}
++v16i8
++__lsx_vadd_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vadd_b (_1, _2);
++}
++v8i16
++__lsx_vadd_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vadd_h (_1, _2);
++}
++v4i32
++__lsx_vadd_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vadd_w (_1, _2);
++}
++v2i64
++__lsx_vadd_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vadd_d (_1, _2);
++}
++v16i8
++__lsx_vaddi_bu (v16i8 _1)
++{
++  return __builtin_lsx_vaddi_bu (_1, 1);
++}
++v8i16
++__lsx_vaddi_hu (v8i16 _1)
++{
++  return __builtin_lsx_vaddi_hu (_1, 1);
++}
++v4i32
++__lsx_vaddi_wu (v4i32 _1)
++{
++  return __builtin_lsx_vaddi_wu (_1, 1);
++}
++v2i64
++__lsx_vaddi_du (v2i64 _1)
++{
++  return __builtin_lsx_vaddi_du (_1, 1);
++}
++v16i8
++__lsx_vsub_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsub_b (_1, _2);
++}
++v8i16
++__lsx_vsub_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsub_h (_1, _2);
++}
++v4i32
++__lsx_vsub_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsub_w (_1, _2);
++}
++v2i64
++__lsx_vsub_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsub_d (_1, _2);
++}
++v16i8
++__lsx_vsubi_bu (v16i8 _1)
++{
++  return __builtin_lsx_vsubi_bu (_1, 1);
++}
++v8i16
++__lsx_vsubi_hu (v8i16 _1)
++{
++  return __builtin_lsx_vsubi_hu (_1, 1);
++}
++v4i32
++__lsx_vsubi_wu (v4i32 _1)
++{
++  return __builtin_lsx_vsubi_wu (_1, 1);
++}
++v2i64
++__lsx_vsubi_du (v2i64 _1)
++{
++  return __builtin_lsx_vsubi_du (_1, 1);
++}
++v16i8
++__lsx_vmax_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vmax_b (_1, _2);
++}
++v8i16
++__lsx_vmax_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vmax_h (_1, _2);
++}
++v4i32
++__lsx_vmax_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vmax_w (_1, _2);
++}
++v2i64
++__lsx_vmax_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vmax_d (_1, _2);
++}
++v16i8
++__lsx_vmaxi_b (v16i8 _1)
++{
++  return __builtin_lsx_vmaxi_b (_1, 1);
++}
++v8i16
++__lsx_vmaxi_h (v8i16 _1)
++{
++  return __builtin_lsx_vmaxi_h (_1, 1);
++}
++v4i32
++__lsx_vmaxi_w (v4i32 _1)
++{
++  return __builtin_lsx_vmaxi_w (_1, 1);
++}
++v2i64
++__lsx_vmaxi_d (v2i64 _1)
++{
++  return __builtin_lsx_vmaxi_d (_1, 1);
++}
++v16u8
++__lsx_vmax_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vmax_bu (_1, _2);
++}
++v8u16
++__lsx_vmax_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vmax_hu (_1, _2);
++}
++v4u32
++__lsx_vmax_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vmax_wu (_1, _2);
++}
++v2u64
++__lsx_vmax_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vmax_du (_1, _2);
++}
++v16u8
++__lsx_vmaxi_bu (v16u8 _1)
++{
++  return __builtin_lsx_vmaxi_bu (_1, 1);
++}
++v8u16
++__lsx_vmaxi_hu (v8u16 _1)
++{
++  return __builtin_lsx_vmaxi_hu (_1, 1);
++}
++v4u32
++__lsx_vmaxi_wu (v4u32 _1)
++{
++  return __builtin_lsx_vmaxi_wu (_1, 1);
++}
++v2u64
++__lsx_vmaxi_du (v2u64 _1)
++{
++  return __builtin_lsx_vmaxi_du (_1, 1);
++}
++v16i8
++__lsx_vmin_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vmin_b (_1, _2);
++}
++v8i16
++__lsx_vmin_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vmin_h (_1, _2);
++}
++v4i32
++__lsx_vmin_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vmin_w (_1, _2);
++}
++v2i64
++__lsx_vmin_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vmin_d (_1, _2);
++}
++v16i8
++__lsx_vmini_b (v16i8 _1)
++{
++  return __builtin_lsx_vmini_b (_1, 1);
++}
++v8i16
++__lsx_vmini_h (v8i16 _1)
++{
++  return __builtin_lsx_vmini_h (_1, 1);
++}
++v4i32
++__lsx_vmini_w (v4i32 _1)
++{
++  return __builtin_lsx_vmini_w (_1, 1);
++}
++v2i64
++__lsx_vmini_d (v2i64 _1)
++{
++  return __builtin_lsx_vmini_d (_1, 1);
++}
++v16u8
++__lsx_vmin_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vmin_bu (_1, _2);
++}
++v8u16
++__lsx_vmin_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vmin_hu (_1, _2);
++}
++v4u32
++__lsx_vmin_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vmin_wu (_1, _2);
++}
++v2u64
++__lsx_vmin_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vmin_du (_1, _2);
++}
++v16u8
++__lsx_vmini_bu (v16u8 _1)
++{
++  return __builtin_lsx_vmini_bu (_1, 1);
++}
++v8u16
++__lsx_vmini_hu (v8u16 _1)
++{
++  return __builtin_lsx_vmini_hu (_1, 1);
++}
++v4u32
++__lsx_vmini_wu (v4u32 _1)
++{
++  return __builtin_lsx_vmini_wu (_1, 1);
++}
++v2u64
++__lsx_vmini_du (v2u64 _1)
++{
++  return __builtin_lsx_vmini_du (_1, 1);
++}
++v16i8
++__lsx_vseq_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vseq_b (_1, _2);
++}
++v8i16
++__lsx_vseq_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vseq_h (_1, _2);
++}
++v4i32
++__lsx_vseq_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vseq_w (_1, _2);
++}
++v2i64
++__lsx_vseq_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vseq_d (_1, _2);
++}
++v16i8
++__lsx_vseqi_b (v16i8 _1)
++{
++  return __builtin_lsx_vseqi_b (_1, 1);
++}
++v8i16
++__lsx_vseqi_h (v8i16 _1)
++{
++  return __builtin_lsx_vseqi_h (_1, 1);
++}
++v4i32
++__lsx_vseqi_w (v4i32 _1)
++{
++  return __builtin_lsx_vseqi_w (_1, 1);
++}
++v2i64
++__lsx_vseqi_d (v2i64 _1)
++{
++  return __builtin_lsx_vseqi_d (_1, 1);
++}
++v16i8
++__lsx_vslti_b (v16i8 _1)
++{
++  return __builtin_lsx_vslti_b (_1, 1);
++}
++v16i8
++__lsx_vslt_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vslt_b (_1, _2);
++}
++v8i16
++__lsx_vslt_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vslt_h (_1, _2);
++}
++v4i32
++__lsx_vslt_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vslt_w (_1, _2);
++}
++v2i64
++__lsx_vslt_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vslt_d (_1, _2);
++}
++v8i16
++__lsx_vslti_h (v8i16 _1)
++{
++  return __builtin_lsx_vslti_h (_1, 1);
++}
++v4i32
++__lsx_vslti_w (v4i32 _1)
++{
++  return __builtin_lsx_vslti_w (_1, 1);
++}
++v2i64
++__lsx_vslti_d (v2i64 _1)
++{
++  return __builtin_lsx_vslti_d (_1, 1);
++}
++v16i8
++__lsx_vslt_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vslt_bu (_1, _2);
++}
++v8i16
++__lsx_vslt_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vslt_hu (_1, _2);
++}
++v4i32
++__lsx_vslt_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vslt_wu (_1, _2);
++}
++v2i64
++__lsx_vslt_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vslt_du (_1, _2);
++}
++v16i8
++__lsx_vslti_bu (v16u8 _1)
++{
++  return __builtin_lsx_vslti_bu (_1, 1);
++}
++v8i16
++__lsx_vslti_hu (v8u16 _1)
++{
++  return __builtin_lsx_vslti_hu (_1, 1);
++}
++v4i32
++__lsx_vslti_wu (v4u32 _1)
++{
++  return __builtin_lsx_vslti_wu (_1, 1);
++}
++v2i64
++__lsx_vslti_du (v2u64 _1)
++{
++  return __builtin_lsx_vslti_du (_1, 1);
++}
++v16i8
++__lsx_vsle_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsle_b (_1, _2);
++}
++v8i16
++__lsx_vsle_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsle_h (_1, _2);
++}
++v4i32
++__lsx_vsle_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsle_w (_1, _2);
++}
++v2i64
++__lsx_vsle_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsle_d (_1, _2);
++}
++v16i8
++__lsx_vslei_b (v16i8 _1)
++{
++  return __builtin_lsx_vslei_b (_1, 1);
++}
++v8i16
++__lsx_vslei_h (v8i16 _1)
++{
++  return __builtin_lsx_vslei_h (_1, 1);
++}
++v4i32
++__lsx_vslei_w (v4i32 _1)
++{
++  return __builtin_lsx_vslei_w (_1, 1);
++}
++v2i64
++__lsx_vslei_d (v2i64 _1)
++{
++  return __builtin_lsx_vslei_d (_1, 1);
++}
++v16i8
++__lsx_vsle_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vsle_bu (_1, _2);
++}
++v8i16
++__lsx_vsle_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vsle_hu (_1, _2);
++}
++v4i32
++__lsx_vsle_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vsle_wu (_1, _2);
++}
++v2i64
++__lsx_vsle_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vsle_du (_1, _2);
++}
++v16i8
++__lsx_vslei_bu (v16u8 _1)
++{
++  return __builtin_lsx_vslei_bu (_1, 1);
++}
++v8i16
++__lsx_vslei_hu (v8u16 _1)
++{
++  return __builtin_lsx_vslei_hu (_1, 1);
++}
++v4i32
++__lsx_vslei_wu (v4u32 _1)
++{
++  return __builtin_lsx_vslei_wu (_1, 1);
++}
++v2i64
++__lsx_vslei_du (v2u64 _1)
++{
++  return __builtin_lsx_vslei_du (_1, 1);
++}
++v16i8
++__lsx_vsat_b (v16i8 _1)
++{
++  return __builtin_lsx_vsat_b (_1, 1);
++}
++v8i16
++__lsx_vsat_h (v8i16 _1)
++{
++  return __builtin_lsx_vsat_h (_1, 1);
++}
++v4i32
++__lsx_vsat_w (v4i32 _1)
++{
++  return __builtin_lsx_vsat_w (_1, 1);
++}
++v2i64
++__lsx_vsat_d (v2i64 _1)
++{
++  return __builtin_lsx_vsat_d (_1, 1);
++}
++v16u8
++__lsx_vsat_bu (v16u8 _1)
++{
++  return __builtin_lsx_vsat_bu (_1, 1);
++}
++v8u16
++__lsx_vsat_hu (v8u16 _1)
++{
++  return __builtin_lsx_vsat_hu (_1, 1);
++}
++v4u32
++__lsx_vsat_wu (v4u32 _1)
++{
++  return __builtin_lsx_vsat_wu (_1, 1);
++}
++v2u64
++__lsx_vsat_du (v2u64 _1)
++{
++  return __builtin_lsx_vsat_du (_1, 1);
++}
++v16i8
++__lsx_vadda_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vadda_b (_1, _2);
++}
++v8i16
++__lsx_vadda_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vadda_h (_1, _2);
++}
++v4i32
++__lsx_vadda_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vadda_w (_1, _2);
++}
++v2i64
++__lsx_vadda_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vadda_d (_1, _2);
++}
++v16i8
++__lsx_vsadd_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsadd_b (_1, _2);
++}
++v8i16
++__lsx_vsadd_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsadd_h (_1, _2);
++}
++v4i32
++__lsx_vsadd_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsadd_w (_1, _2);
++}
++v2i64
++__lsx_vsadd_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsadd_d (_1, _2);
++}
++v16u8
++__lsx_vsadd_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vsadd_bu (_1, _2);
++}
++v8u16
++__lsx_vsadd_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vsadd_hu (_1, _2);
++}
++v4u32
++__lsx_vsadd_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vsadd_wu (_1, _2);
++}
++v2u64
++__lsx_vsadd_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vsadd_du (_1, _2);
++}
++v16i8
++__lsx_vavg_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vavg_b (_1, _2);
++}
++v8i16
++__lsx_vavg_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vavg_h (_1, _2);
++}
++v4i32
++__lsx_vavg_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vavg_w (_1, _2);
++}
++v2i64
++__lsx_vavg_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vavg_d (_1, _2);
++}
++v16u8
++__lsx_vavg_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vavg_bu (_1, _2);
++}
++v8u16
++__lsx_vavg_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vavg_hu (_1, _2);
++}
++v4u32
++__lsx_vavg_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vavg_wu (_1, _2);
++}
++v2u64
++__lsx_vavg_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vavg_du (_1, _2);
++}
++v16i8
++__lsx_vavgr_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vavgr_b (_1, _2);
++}
++v8i16
++__lsx_vavgr_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vavgr_h (_1, _2);
++}
++v4i32
++__lsx_vavgr_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vavgr_w (_1, _2);
++}
++v2i64
++__lsx_vavgr_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vavgr_d (_1, _2);
++}
++v16u8
++__lsx_vavgr_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vavgr_bu (_1, _2);
++}
++v8u16
++__lsx_vavgr_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vavgr_hu (_1, _2);
++}
++v4u32
++__lsx_vavgr_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vavgr_wu (_1, _2);
++}
++v2u64
++__lsx_vavgr_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vavgr_du (_1, _2);
++}
++v16i8
++__lsx_vssub_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vssub_b (_1, _2);
++}
++v8i16
++__lsx_vssub_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssub_h (_1, _2);
++}
++v4i32
++__lsx_vssub_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssub_w (_1, _2);
++}
++v2i64
++__lsx_vssub_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssub_d (_1, _2);
++}
++v16u8
++__lsx_vssub_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vssub_bu (_1, _2);
++}
++v8u16
++__lsx_vssub_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vssub_hu (_1, _2);
++}
++v4u32
++__lsx_vssub_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vssub_wu (_1, _2);
++}
++v2u64
++__lsx_vssub_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vssub_du (_1, _2);
++}
++v16i8
++__lsx_vabsd_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vabsd_b (_1, _2);
++}
++v8i16
++__lsx_vabsd_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vabsd_h (_1, _2);
++}
++v4i32
++__lsx_vabsd_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vabsd_w (_1, _2);
++}
++v2i64
++__lsx_vabsd_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vabsd_d (_1, _2);
++}
++v16u8
++__lsx_vabsd_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vabsd_bu (_1, _2);
++}
++v8u16
++__lsx_vabsd_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vabsd_hu (_1, _2);
++}
++v4u32
++__lsx_vabsd_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vabsd_wu (_1, _2);
++}
++v2u64
++__lsx_vabsd_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vabsd_du (_1, _2);
++}
++v16i8
++__lsx_vmul_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vmul_b (_1, _2);
++}
++v8i16
++__lsx_vmul_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vmul_h (_1, _2);
++}
++v4i32
++__lsx_vmul_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vmul_w (_1, _2);
++}
++v2i64
++__lsx_vmul_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vmul_d (_1, _2);
++}
++v16i8
++__lsx_vmadd_b (v16i8 _1, v16i8 _2, v16i8 _3)
++{
++  return __builtin_lsx_vmadd_b (_1, _2, _3);
++}
++v8i16
++__lsx_vmadd_h (v8i16 _1, v8i16 _2, v8i16 _3)
++{
++  return __builtin_lsx_vmadd_h (_1, _2, _3);
++}
++v4i32
++__lsx_vmadd_w (v4i32 _1, v4i32 _2, v4i32 _3)
++{
++  return __builtin_lsx_vmadd_w (_1, _2, _3);
++}
++v2i64
++__lsx_vmadd_d (v2i64 _1, v2i64 _2, v2i64 _3)
++{
++  return __builtin_lsx_vmadd_d (_1, _2, _3);
++}
++v16i8
++__lsx_vmsub_b (v16i8 _1, v16i8 _2, v16i8 _3)
++{
++  return __builtin_lsx_vmsub_b (_1, _2, _3);
++}
++v8i16
++__lsx_vmsub_h (v8i16 _1, v8i16 _2, v8i16 _3)
++{
++  return __builtin_lsx_vmsub_h (_1, _2, _3);
++}
++v4i32
++__lsx_vmsub_w (v4i32 _1, v4i32 _2, v4i32 _3)
++{
++  return __builtin_lsx_vmsub_w (_1, _2, _3);
++}
++v2i64
++__lsx_vmsub_d (v2i64 _1, v2i64 _2, v2i64 _3)
++{
++  return __builtin_lsx_vmsub_d (_1, _2, _3);
++}
++v16i8
++__lsx_vdiv_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vdiv_b (_1, _2);
++}
++v8i16
++__lsx_vdiv_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vdiv_h (_1, _2);
++}
++v4i32
++__lsx_vdiv_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vdiv_w (_1, _2);
++}
++v2i64
++__lsx_vdiv_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vdiv_d (_1, _2);
++}
++v16u8
++__lsx_vdiv_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vdiv_bu (_1, _2);
++}
++v8u16
++__lsx_vdiv_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vdiv_hu (_1, _2);
++}
++v4u32
++__lsx_vdiv_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vdiv_wu (_1, _2);
++}
++v2u64
++__lsx_vdiv_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vdiv_du (_1, _2);
++}
++v8i16
++__lsx_vhaddw_h_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vhaddw_h_b (_1, _2);
++}
++v4i32
++__lsx_vhaddw_w_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vhaddw_w_h (_1, _2);
++}
++v2i64
++__lsx_vhaddw_d_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vhaddw_d_w (_1, _2);
++}
++v8u16
++__lsx_vhaddw_hu_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vhaddw_hu_bu (_1, _2);
++}
++v4u32
++__lsx_vhaddw_wu_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vhaddw_wu_hu (_1, _2);
++}
++v2u64
++__lsx_vhaddw_du_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vhaddw_du_wu (_1, _2);
++}
++v8i16
++__lsx_vhsubw_h_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vhsubw_h_b (_1, _2);
++}
++v4i32
++__lsx_vhsubw_w_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vhsubw_w_h (_1, _2);
++}
++v2i64
++__lsx_vhsubw_d_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vhsubw_d_w (_1, _2);
++}
++v8i16
++__lsx_vhsubw_hu_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vhsubw_hu_bu (_1, _2);
++}
++v4i32
++__lsx_vhsubw_wu_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vhsubw_wu_hu (_1, _2);
++}
++v2i64
++__lsx_vhsubw_du_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vhsubw_du_wu (_1, _2);
++}
++v16i8
++__lsx_vmod_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vmod_b (_1, _2);
++}
++v8i16
++__lsx_vmod_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vmod_h (_1, _2);
++}
++v4i32
++__lsx_vmod_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vmod_w (_1, _2);
++}
++v2i64
++__lsx_vmod_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vmod_d (_1, _2);
++}
++v16u8
++__lsx_vmod_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vmod_bu (_1, _2);
++}
++v8u16
++__lsx_vmod_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vmod_hu (_1, _2);
++}
++v4u32
++__lsx_vmod_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vmod_wu (_1, _2);
++}
++v2u64
++__lsx_vmod_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vmod_du (_1, _2);
++}
++v16i8
++__lsx_vreplve_b (v16i8 _1, int _2)
++{
++  return __builtin_lsx_vreplve_b (_1, _2);
++}
++v8i16
++__lsx_vreplve_h (v8i16 _1, int _2)
++{
++  return __builtin_lsx_vreplve_h (_1, _2);
++}
++v4i32
++__lsx_vreplve_w (v4i32 _1, int _2)
++{
++  return __builtin_lsx_vreplve_w (_1, _2);
++}
++v2i64
++__lsx_vreplve_d (v2i64 _1, int _2)
++{
++  return __builtin_lsx_vreplve_d (_1, _2);
++}
++v16i8
++__lsx_vreplvei_b (v16i8 _1)
++{
++  return __builtin_lsx_vreplvei_b (_1, 1);
++}
++v8i16
++__lsx_vreplvei_h (v8i16 _1)
++{
++  return __builtin_lsx_vreplvei_h (_1, 1);
++}
++v4i32
++__lsx_vreplvei_w (v4i32 _1)
++{
++  return __builtin_lsx_vreplvei_w (_1, 1);
++}
++v2i64
++__lsx_vreplvei_d (v2i64 _1)
++{
++  return __builtin_lsx_vreplvei_d (_1, 1);
++}
++v16i8
++__lsx_vpickev_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vpickev_b (_1, _2);
++}
++v8i16
++__lsx_vpickev_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vpickev_h (_1, _2);
++}
++v4i32
++__lsx_vpickev_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vpickev_w (_1, _2);
++}
++v2i64
++__lsx_vpickev_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vpickev_d (_1, _2);
++}
++v16i8
++__lsx_vpickod_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vpickod_b (_1, _2);
++}
++v8i16
++__lsx_vpickod_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vpickod_h (_1, _2);
++}
++v4i32
++__lsx_vpickod_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vpickod_w (_1, _2);
++}
++v2i64
++__lsx_vpickod_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vpickod_d (_1, _2);
++}
++v16i8
++__lsx_vilvh_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vilvh_b (_1, _2);
++}
++v8i16
++__lsx_vilvh_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vilvh_h (_1, _2);
++}
++v4i32
++__lsx_vilvh_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vilvh_w (_1, _2);
++}
++v2i64
++__lsx_vilvh_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vilvh_d (_1, _2);
++}
++v16i8
++__lsx_vilvl_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vilvl_b (_1, _2);
++}
++v8i16
++__lsx_vilvl_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vilvl_h (_1, _2);
++}
++v4i32
++__lsx_vilvl_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vilvl_w (_1, _2);
++}
++v2i64
++__lsx_vilvl_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vilvl_d (_1, _2);
++}
++v16i8
++__lsx_vpackev_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vpackev_b (_1, _2);
++}
++v8i16
++__lsx_vpackev_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vpackev_h (_1, _2);
++}
++v4i32
++__lsx_vpackev_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vpackev_w (_1, _2);
++}
++v2i64
++__lsx_vpackev_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vpackev_d (_1, _2);
++}
++v16i8
++__lsx_vpackod_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vpackod_b (_1, _2);
++}
++v8i16
++__lsx_vpackod_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vpackod_h (_1, _2);
++}
++v4i32
++__lsx_vpackod_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vpackod_w (_1, _2);
++}
++v2i64
++__lsx_vpackod_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vpackod_d (_1, _2);
++}
++v8i16
++__lsx_vshuf_h (v8i16 _1, v8i16 _2, v8i16 _3)
++{
++  return __builtin_lsx_vshuf_h (_1, _2, _3);
++}
++v4i32
++__lsx_vshuf_w (v4i32 _1, v4i32 _2, v4i32 _3)
++{
++  return __builtin_lsx_vshuf_w (_1, _2, _3);
++}
++v2i64
++__lsx_vshuf_d (v2i64 _1, v2i64 _2, v2i64 _3)
++{
++  return __builtin_lsx_vshuf_d (_1, _2, _3);
++}
++v16u8
++__lsx_vand_v (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vand_v (_1, _2);
++}
++v16u8
++__lsx_vandi_b (v16u8 _1)
++{
++  return __builtin_lsx_vandi_b (_1, 1);
++}
++v16u8
++__lsx_vor_v (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vor_v (_1, _2);
++}
++v16u8
++__lsx_vori_b (v16u8 _1)
++{
++  return __builtin_lsx_vori_b (_1, 1);
++}
++v16u8
++__lsx_vnor_v (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vnor_v (_1, _2);
++}
++v16u8
++__lsx_vnori_b (v16u8 _1)
++{
++  return __builtin_lsx_vnori_b (_1, 1);
++}
++v16u8
++__lsx_vxor_v (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vxor_v (_1, _2);
++}
++v16u8
++__lsx_vxori_b (v16u8 _1)
++{
++  return __builtin_lsx_vxori_b (_1, 1);
++}
++v16u8
++__lsx_vbitsel_v (v16u8 _1, v16u8 _2, v16u8 _3)
++{
++  return __builtin_lsx_vbitsel_v (_1, _2, _3);
++}
++v16u8
++__lsx_vbitseli_b (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vbitseli_b (_1, _2, 1);
++}
++v16i8
++__lsx_vshuf4i_b (v16i8 _1)
++{
++  return __builtin_lsx_vshuf4i_b (_1, 1);
++}
++v8i16
++__lsx_vshuf4i_h (v8i16 _1)
++{
++  return __builtin_lsx_vshuf4i_h (_1, 1);
++}
++v4i32
++__lsx_vshuf4i_w (v4i32 _1)
++{
++  return __builtin_lsx_vshuf4i_w (_1, 1);
++}
++v16i8
++__lsx_vreplgr2vr_b (int _1)
++{
++  return __builtin_lsx_vreplgr2vr_b (_1);
++}
++v8i16
++__lsx_vreplgr2vr_h (int _1)
++{
++  return __builtin_lsx_vreplgr2vr_h (_1);
++}
++v4i32
++__lsx_vreplgr2vr_w (int _1)
++{
++  return __builtin_lsx_vreplgr2vr_w (_1);
++}
++v2i64
++__lsx_vreplgr2vr_d (long _1)
++{
++  return __builtin_lsx_vreplgr2vr_d (_1);
++}
++v16i8
++__lsx_vpcnt_b (v16i8 _1)
++{
++  return __builtin_lsx_vpcnt_b (_1);
++}
++v8i16
++__lsx_vpcnt_h (v8i16 _1)
++{
++  return __builtin_lsx_vpcnt_h (_1);
++}
++v4i32
++__lsx_vpcnt_w (v4i32 _1)
++{
++  return __builtin_lsx_vpcnt_w (_1);
++}
++v2i64
++__lsx_vpcnt_d (v2i64 _1)
++{
++  return __builtin_lsx_vpcnt_d (_1);
++}
++v16i8
++__lsx_vclo_b (v16i8 _1)
++{
++  return __builtin_lsx_vclo_b (_1);
++}
++v8i16
++__lsx_vclo_h (v8i16 _1)
++{
++  return __builtin_lsx_vclo_h (_1);
++}
++v4i32
++__lsx_vclo_w (v4i32 _1)
++{
++  return __builtin_lsx_vclo_w (_1);
++}
++v2i64
++__lsx_vclo_d (v2i64 _1)
++{
++  return __builtin_lsx_vclo_d (_1);
++}
++v16i8
++__lsx_vclz_b (v16i8 _1)
++{
++  return __builtin_lsx_vclz_b (_1);
++}
++v8i16
++__lsx_vclz_h (v8i16 _1)
++{
++  return __builtin_lsx_vclz_h (_1);
++}
++v4i32
++__lsx_vclz_w (v4i32 _1)
++{
++  return __builtin_lsx_vclz_w (_1);
++}
++v2i64
++__lsx_vclz_d (v2i64 _1)
++{
++  return __builtin_lsx_vclz_d (_1);
++}
++int
++__lsx_vpickve2gr_b (v16i8 _1)
++{
++  return __builtin_lsx_vpickve2gr_b (_1, 1);
++}
++int
++__lsx_vpickve2gr_h (v8i16 _1)
++{
++  return __builtin_lsx_vpickve2gr_h (_1, 1);
++}
++int
++__lsx_vpickve2gr_w (v4i32 _1)
++{
++  return __builtin_lsx_vpickve2gr_w (_1, 1);
++}
++long
++__lsx_vpickve2gr_d (v2i64 _1)
++{
++  return __builtin_lsx_vpickve2gr_d (_1, 1);
++}
++unsigned int
++__lsx_vpickve2gr_bu (v16i8 _1)
++{
++  return __builtin_lsx_vpickve2gr_bu (_1, 1);
++}
++unsigned int
++__lsx_vpickve2gr_hu (v8i16 _1)
++{
++  return __builtin_lsx_vpickve2gr_hu (_1, 1);
++}
++unsigned int
++__lsx_vpickve2gr_wu (v4i32 _1)
++{
++  return __builtin_lsx_vpickve2gr_wu (_1, 1);
++}
++unsigned long int
++__lsx_vpickve2gr_du (v2i64 _1)
++{
++  return __builtin_lsx_vpickve2gr_du (_1, 1);
++}
++v16i8
++__lsx_vinsgr2vr_b (v16i8 _1)
++{
++  return __builtin_lsx_vinsgr2vr_b (_1, 1, 1);
++}
++v8i16
++__lsx_vinsgr2vr_h (v8i16 _1)
++{
++  return __builtin_lsx_vinsgr2vr_h (_1, 1, 1);
++}
++v4i32
++__lsx_vinsgr2vr_w (v4i32 _1)
++{
++  return __builtin_lsx_vinsgr2vr_w (_1, 1, 1);
++}
++v2i64
++__lsx_vinsgr2vr_d (v2i64 _1)
++{
++  return __builtin_lsx_vinsgr2vr_d (_1, 1, 1);
++}
++v4f32
++__lsx_vfadd_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfadd_s (_1, _2);
++}
++v2f64
++__lsx_vfadd_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfadd_d (_1, _2);
++}
++v4f32
++__lsx_vfsub_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfsub_s (_1, _2);
++}
++v2f64
++__lsx_vfsub_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfsub_d (_1, _2);
++}
++v4f32
++__lsx_vfmul_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfmul_s (_1, _2);
++}
++v2f64
++__lsx_vfmul_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfmul_d (_1, _2);
++}
++v4f32
++__lsx_vfdiv_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfdiv_s (_1, _2);
++}
++v2f64
++__lsx_vfdiv_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfdiv_d (_1, _2);
++}
++v8i16
++__lsx_vfcvt_h_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcvt_h_s (_1, _2);
++}
++v4f32
++__lsx_vfcvt_s_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcvt_s_d (_1, _2);
++}
++v4f32
++__lsx_vfmin_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfmin_s (_1, _2);
++}
++v2f64
++__lsx_vfmin_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfmin_d (_1, _2);
++}
++v4f32
++__lsx_vfmina_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfmina_s (_1, _2);
++}
++v2f64
++__lsx_vfmina_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfmina_d (_1, _2);
++}
++v4f32
++__lsx_vfmax_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfmax_s (_1, _2);
++}
++v2f64
++__lsx_vfmax_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfmax_d (_1, _2);
++}
++v4f32
++__lsx_vfmaxa_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfmaxa_s (_1, _2);
++}
++v2f64
++__lsx_vfmaxa_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfmaxa_d (_1, _2);
++}
++v4i32
++__lsx_vfclass_s (v4f32 _1)
++{
++  return __builtin_lsx_vfclass_s (_1);
++}
++v2i64
++__lsx_vfclass_d (v2f64 _1)
++{
++  return __builtin_lsx_vfclass_d (_1);
++}
++v4f32
++__lsx_vfsqrt_s (v4f32 _1)
++{
++  return __builtin_lsx_vfsqrt_s (_1);
++}
++v2f64
++__lsx_vfsqrt_d (v2f64 _1)
++{
++  return __builtin_lsx_vfsqrt_d (_1);
++}
++v4f32
++__lsx_vfrecip_s (v4f32 _1)
++{
++  return __builtin_lsx_vfrecip_s (_1);
++}
++v2f64
++__lsx_vfrecip_d (v2f64 _1)
++{
++  return __builtin_lsx_vfrecip_d (_1);
++}
++v4f32
++__lsx_vfrint_s (v4f32 _1)
++{
++  return __builtin_lsx_vfrint_s (_1);
++}
++v2f64
++__lsx_vfrint_d (v2f64 _1)
++{
++  return __builtin_lsx_vfrint_d (_1);
++}
++v4f32
++__lsx_vfrsqrt_s (v4f32 _1)
++{
++  return __builtin_lsx_vfrsqrt_s (_1);
++}
++v2f64
++__lsx_vfrsqrt_d (v2f64 _1)
++{
++  return __builtin_lsx_vfrsqrt_d (_1);
++}
++v4f32
++__lsx_vflogb_s (v4f32 _1)
++{
++  return __builtin_lsx_vflogb_s (_1);
++}
++v2f64
++__lsx_vflogb_d (v2f64 _1)
++{
++  return __builtin_lsx_vflogb_d (_1);
++}
++v4f32
++__lsx_vfcvth_s_h (v8i16 _1)
++{
++  return __builtin_lsx_vfcvth_s_h (_1);
++}
++v2f64
++__lsx_vfcvth_d_s (v4f32 _1)
++{
++  return __builtin_lsx_vfcvth_d_s (_1);
++}
++v4f32
++__lsx_vfcvtl_s_h (v8i16 _1)
++{
++  return __builtin_lsx_vfcvtl_s_h (_1);
++}
++v2f64
++__lsx_vfcvtl_d_s (v4f32 _1)
++{
++  return __builtin_lsx_vfcvtl_d_s (_1);
++}
++v4i32
++__lsx_vftint_w_s (v4f32 _1)
++{
++  return __builtin_lsx_vftint_w_s (_1);
++}
++v2i64
++__lsx_vftint_l_d (v2f64 _1)
++{
++  return __builtin_lsx_vftint_l_d (_1);
++}
++v4u32
++__lsx_vftint_wu_s (v4f32 _1)
++{
++  return __builtin_lsx_vftint_wu_s (_1);
++}
++v2u64
++__lsx_vftint_lu_d (v2f64 _1)
++{
++  return __builtin_lsx_vftint_lu_d (_1);
++}
++v4i32
++__lsx_vftintrz_w_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrz_w_s (_1);
++}
++v2i64
++__lsx_vftintrz_l_d (v2f64 _1)
++{
++  return __builtin_lsx_vftintrz_l_d (_1);
++}
++v4u32
++__lsx_vftintrz_wu_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrz_wu_s (_1);
++}
++v2u64
++__lsx_vftintrz_lu_d (v2f64 _1)
++{
++  return __builtin_lsx_vftintrz_lu_d (_1);
++}
++v4f32
++__lsx_vffint_s_w (v4i32 _1)
++{
++  return __builtin_lsx_vffint_s_w (_1);
++}
++v2f64
++__lsx_vffint_d_l (v2i64 _1)
++{
++  return __builtin_lsx_vffint_d_l (_1);
++}
++v4f32
++__lsx_vffint_s_wu (v4u32 _1)
++{
++  return __builtin_lsx_vffint_s_wu (_1);
++}
++v2f64
++__lsx_vffint_d_lu (v2u64 _1)
++{
++  return __builtin_lsx_vffint_d_lu (_1);
++}
++v16u8
++__lsx_vandn_v (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vandn_v (_1, _2);
++}
++v16i8
++__lsx_vneg_b (v16i8 _1)
++{
++  return __builtin_lsx_vneg_b (_1);
++}
++v8i16
++__lsx_vneg_h (v8i16 _1)
++{
++  return __builtin_lsx_vneg_h (_1);
++}
++v4i32
++__lsx_vneg_w (v4i32 _1)
++{
++  return __builtin_lsx_vneg_w (_1);
++}
++v2i64
++__lsx_vneg_d (v2i64 _1)
++{
++  return __builtin_lsx_vneg_d (_1);
++}
++v16i8
++__lsx_vmuh_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vmuh_b (_1, _2);
++}
++v8i16
++__lsx_vmuh_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vmuh_h (_1, _2);
++}
++v4i32
++__lsx_vmuh_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vmuh_w (_1, _2);
++}
++v2i64
++__lsx_vmuh_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vmuh_d (_1, _2);
++}
++v16u8
++__lsx_vmuh_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vmuh_bu (_1, _2);
++}
++v8u16
++__lsx_vmuh_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vmuh_hu (_1, _2);
++}
++v4u32
++__lsx_vmuh_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vmuh_wu (_1, _2);
++}
++v2u64
++__lsx_vmuh_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vmuh_du (_1, _2);
++}
++v8i16
++__lsx_vsllwil_h_b (v16i8 _1)
++{
++  return __builtin_lsx_vsllwil_h_b (_1, 1);
++}
++v4i32
++__lsx_vsllwil_w_h (v8i16 _1)
++{
++  return __builtin_lsx_vsllwil_w_h (_1, 1);
++}
++v2i64
++__lsx_vsllwil_d_w (v4i32 _1)
++{
++  return __builtin_lsx_vsllwil_d_w (_1, 1);
++}
++v8u16
++__lsx_vsllwil_hu_bu (v16u8 _1)
++{
++  return __builtin_lsx_vsllwil_hu_bu (_1, 1);
++}
++v4u32
++__lsx_vsllwil_wu_hu (v8u16 _1)
++{
++  return __builtin_lsx_vsllwil_wu_hu (_1, 1);
++}
++v2u64
++__lsx_vsllwil_du_wu (v4u32 _1)
++{
++  return __builtin_lsx_vsllwil_du_wu (_1, 1);
++}
++v16i8
++__lsx_vsran_b_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsran_b_h (_1, _2);
++}
++v8i16
++__lsx_vsran_h_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsran_h_w (_1, _2);
++}
++v4i32
++__lsx_vsran_w_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsran_w_d (_1, _2);
++}
++v16i8
++__lsx_vssran_b_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssran_b_h (_1, _2);
++}
++v8i16
++__lsx_vssran_h_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssran_h_w (_1, _2);
++}
++v4i32
++__lsx_vssran_w_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssran_w_d (_1, _2);
++}
++v16u8
++__lsx_vssran_bu_h (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vssran_bu_h (_1, _2);
++}
++v8u16
++__lsx_vssran_hu_w (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vssran_hu_w (_1, _2);
++}
++v4u32
++__lsx_vssran_wu_d (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vssran_wu_d (_1, _2);
++}
++v16i8
++__lsx_vsrarn_b_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsrarn_b_h (_1, _2);
++}
++v8i16
++__lsx_vsrarn_h_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsrarn_h_w (_1, _2);
++}
++v4i32
++__lsx_vsrarn_w_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsrarn_w_d (_1, _2);
++}
++v16i8
++__lsx_vssrarn_b_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssrarn_b_h (_1, _2);
++}
++v8i16
++__lsx_vssrarn_h_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssrarn_h_w (_1, _2);
++}
++v4i32
++__lsx_vssrarn_w_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssrarn_w_d (_1, _2);
++}
++v16u8
++__lsx_vssrarn_bu_h (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vssrarn_bu_h (_1, _2);
++}
++v8u16
++__lsx_vssrarn_hu_w (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vssrarn_hu_w (_1, _2);
++}
++v4u32
++__lsx_vssrarn_wu_d (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vssrarn_wu_d (_1, _2);
++}
++v16i8
++__lsx_vsrln_b_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsrln_b_h (_1, _2);
++}
++v8i16
++__lsx_vsrln_h_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsrln_h_w (_1, _2);
++}
++v4i32
++__lsx_vsrln_w_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsrln_w_d (_1, _2);
++}
++v16u8
++__lsx_vssrln_bu_h (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vssrln_bu_h (_1, _2);
++}
++v8u16
++__lsx_vssrln_hu_w (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vssrln_hu_w (_1, _2);
++}
++v4u32
++__lsx_vssrln_wu_d (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vssrln_wu_d (_1, _2);
++}
++v16i8
++__lsx_vsrlrn_b_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsrlrn_b_h (_1, _2);
++}
++v8i16
++__lsx_vsrlrn_h_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsrlrn_h_w (_1, _2);
++}
++v4i32
++__lsx_vsrlrn_w_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsrlrn_w_d (_1, _2);
++}
++v16u8
++__lsx_vssrlrn_bu_h (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vssrlrn_bu_h (_1, _2);
++}
++v8u16
++__lsx_vssrlrn_hu_w (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vssrlrn_hu_w (_1, _2);
++}
++v4u32
++__lsx_vssrlrn_wu_d (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vssrlrn_wu_d (_1, _2);
++}
++v16i8
++__lsx_vfrstpi_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vfrstpi_b (_1, _2, 1);
++}
++v8i16
++__lsx_vfrstpi_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vfrstpi_h (_1, _2, 1);
++}
++v16i8
++__lsx_vfrstp_b (v16i8 _1, v16i8 _2, v16i8 _3)
++{
++  return __builtin_lsx_vfrstp_b (_1, _2, _3);
++}
++v8i16
++__lsx_vfrstp_h (v8i16 _1, v8i16 _2, v8i16 _3)
++{
++  return __builtin_lsx_vfrstp_h (_1, _2, _3);
++}
++v2i64
++__lsx_vshuf4i_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vshuf4i_d (_1, _2, 1);
++}
++v16i8
++__lsx_vbsrl_v (v16i8 _1)
++{
++  return __builtin_lsx_vbsrl_v (_1, 1);
++}
++v16i8
++__lsx_vbsll_v (v16i8 _1)
++{
++  return __builtin_lsx_vbsll_v (_1, 1);
++}
++v16i8
++__lsx_vextrins_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vextrins_b (_1, _2, 1);
++}
++v8i16
++__lsx_vextrins_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vextrins_h (_1, _2, 1);
++}
++v4i32
++__lsx_vextrins_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vextrins_w (_1, _2, 1);
++}
++v2i64
++__lsx_vextrins_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vextrins_d (_1, _2, 1);
++}
++v16i8
++__lsx_vmskltz_b (v16i8 _1)
++{
++  return __builtin_lsx_vmskltz_b (_1);
++}
++v8i16
++__lsx_vmskltz_h (v8i16 _1)
++{
++  return __builtin_lsx_vmskltz_h (_1);
++}
++v4i32
++__lsx_vmskltz_w (v4i32 _1)
++{
++  return __builtin_lsx_vmskltz_w (_1);
++}
++v2i64
++__lsx_vmskltz_d (v2i64 _1)
++{
++  return __builtin_lsx_vmskltz_d (_1);
++}
++v16i8
++__lsx_vsigncov_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsigncov_b (_1, _2);
++}
++v8i16
++__lsx_vsigncov_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsigncov_h (_1, _2);
++}
++v4i32
++__lsx_vsigncov_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsigncov_w (_1, _2);
++}
++v2i64
++__lsx_vsigncov_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsigncov_d (_1, _2);
++}
++v4f32
++__lsx_vfmadd_s (v4f32 _1, v4f32 _2, v4f32 _3)
++{
++  return __builtin_lsx_vfmadd_s (_1, _2, _3);
++}
++v2f64
++__lsx_vfmadd_d (v2f64 _1, v2f64 _2, v2f64 _3)
++{
++  return __builtin_lsx_vfmadd_d (_1, _2, _3);
++}
++v4f32
++__lsx_vfmsub_s (v4f32 _1, v4f32 _2, v4f32 _3)
++{
++  return __builtin_lsx_vfmsub_s (_1, _2, _3);
++}
++v2f64
++__lsx_vfmsub_d (v2f64 _1, v2f64 _2, v2f64 _3)
++{
++  return __builtin_lsx_vfmsub_d (_1, _2, _3);
++}
++v4f32
++__lsx_vfnmadd_s (v4f32 _1, v4f32 _2, v4f32 _3)
++{
++  return __builtin_lsx_vfnmadd_s (_1, _2, _3);
++}
++v2f64
++__lsx_vfnmadd_d (v2f64 _1, v2f64 _2, v2f64 _3)
++{
++  return __builtin_lsx_vfnmadd_d (_1, _2, _3);
++}
++v4f32
++__lsx_vfnmsub_s (v4f32 _1, v4f32 _2, v4f32 _3)
++{
++  return __builtin_lsx_vfnmsub_s (_1, _2, _3);
++}
++v2f64
++__lsx_vfnmsub_d (v2f64 _1, v2f64 _2, v2f64 _3)
++{
++  return __builtin_lsx_vfnmsub_d (_1, _2, _3);
++}
++v4i32
++__lsx_vftintrne_w_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrne_w_s (_1);
++}
++v2i64
++__lsx_vftintrne_l_d (v2f64 _1)
++{
++  return __builtin_lsx_vftintrne_l_d (_1);
++}
++v4i32
++__lsx_vftintrp_w_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrp_w_s (_1);
++}
++v2i64
++__lsx_vftintrp_l_d (v2f64 _1)
++{
++  return __builtin_lsx_vftintrp_l_d (_1);
++}
++v4i32
++__lsx_vftintrm_w_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrm_w_s (_1);
++}
++v2i64
++__lsx_vftintrm_l_d (v2f64 _1)
++{
++  return __builtin_lsx_vftintrm_l_d (_1);
++}
++v4i32
++__lsx_vftint_w_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vftint_w_d (_1, _2);
++}
++v4f32
++__lsx_vffint_s_l (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vffint_s_l (_1, _2);
++}
++v4i32
++__lsx_vftintrz_w_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vftintrz_w_d (_1, _2);
++}
++v4i32
++__lsx_vftintrp_w_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vftintrp_w_d (_1, _2);
++}
++v4i32
++__lsx_vftintrm_w_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vftintrm_w_d (_1, _2);
++}
++v4i32
++__lsx_vftintrne_w_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vftintrne_w_d (_1, _2);
++}
++v2i64
++__lsx_vftintl_l_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintl_l_s (_1);
++}
++v2i64
++__lsx_vftinth_l_s (v4f32 _1)
++{
++  return __builtin_lsx_vftinth_l_s (_1);
++}
++v2f64
++__lsx_vffinth_d_w (v4i32 _1)
++{
++  return __builtin_lsx_vffinth_d_w (_1);
++}
++v2f64
++__lsx_vffintl_d_w (v4i32 _1)
++{
++  return __builtin_lsx_vffintl_d_w (_1);
++}
++v2i64
++__lsx_vftintrzl_l_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrzl_l_s (_1);
++}
++v2i64
++__lsx_vftintrzh_l_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrzh_l_s (_1);
++}
++v2i64
++__lsx_vftintrpl_l_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrpl_l_s (_1);
++}
++v2i64
++__lsx_vftintrph_l_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrph_l_s (_1);
++}
++v2i64
++__lsx_vftintrml_l_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrml_l_s (_1);
++}
++v2i64
++__lsx_vftintrmh_l_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrmh_l_s (_1);
++}
++v2i64
++__lsx_vftintrnel_l_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrnel_l_s (_1);
++}
++v2i64
++__lsx_vftintrneh_l_s (v4f32 _1)
++{
++  return __builtin_lsx_vftintrneh_l_s (_1);
++}
++v4f32
++__lsx_vfrintrne_s (v4f32 _1)
++{
++  return __builtin_lsx_vfrintrne_s (_1);
++}
++v2f64
++__lsx_vfrintrne_d (v2f64 _1)
++{
++  return __builtin_lsx_vfrintrne_d (_1);
++}
++v4f32
++__lsx_vfrintrz_s (v4f32 _1)
++{
++  return __builtin_lsx_vfrintrz_s (_1);
++}
++v2f64
++__lsx_vfrintrz_d (v2f64 _1)
++{
++  return __builtin_lsx_vfrintrz_d (_1);
++}
++v4f32
++__lsx_vfrintrp_s (v4f32 _1)
++{
++  return __builtin_lsx_vfrintrp_s (_1);
++}
++v2f64
++__lsx_vfrintrp_d (v2f64 _1)
++{
++  return __builtin_lsx_vfrintrp_d (_1);
++}
++v4f32
++__lsx_vfrintrm_s (v4f32 _1)
++{
++  return __builtin_lsx_vfrintrm_s (_1);
++}
++v2f64
++__lsx_vfrintrm_d (v2f64 _1)
++{
++  return __builtin_lsx_vfrintrm_d (_1);
++}
++void
++__lsx_vstelm_b (v16i8 _1, void *_2)
++{
++  return __builtin_lsx_vstelm_b (_1, _2, 1, 1);
++}
++void
++__lsx_vstelm_h (v8i16 _1, void *_2)
++{
++  return __builtin_lsx_vstelm_h (_1, _2, 2, 1);
++}
++void
++__lsx_vstelm_w (v4i32 _1, void *_2)
++{
++  return __builtin_lsx_vstelm_w (_1, _2, 4, 1);
++}
++void
++__lsx_vstelm_d (v2i64 _1, void *_2)
++{
++  return __builtin_lsx_vstelm_d (_1, _2, 8, 1);
++}
++v2i64
++__lsx_vaddwev_d_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vaddwev_d_w (_1, _2);
++}
++v4i32
++__lsx_vaddwev_w_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vaddwev_w_h (_1, _2);
++}
++v8i16
++__lsx_vaddwev_h_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vaddwev_h_b (_1, _2);
++}
++v2i64
++__lsx_vaddwod_d_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vaddwod_d_w (_1, _2);
++}
++v4i32
++__lsx_vaddwod_w_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vaddwod_w_h (_1, _2);
++}
++v8i16
++__lsx_vaddwod_h_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vaddwod_h_b (_1, _2);
++}
++v2i64
++__lsx_vaddwev_d_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vaddwev_d_wu (_1, _2);
++}
++v4i32
++__lsx_vaddwev_w_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vaddwev_w_hu (_1, _2);
++}
++v8i16
++__lsx_vaddwev_h_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vaddwev_h_bu (_1, _2);
++}
++v2i64
++__lsx_vaddwod_d_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vaddwod_d_wu (_1, _2);
++}
++v4i32
++__lsx_vaddwod_w_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vaddwod_w_hu (_1, _2);
++}
++v8i16
++__lsx_vaddwod_h_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vaddwod_h_bu (_1, _2);
++}
++v2i64
++__lsx_vaddwev_d_wu_w (v4u32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vaddwev_d_wu_w (_1, _2);
++}
++v4i32
++__lsx_vaddwev_w_hu_h (v8u16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vaddwev_w_hu_h (_1, _2);
++}
++v8i16
++__lsx_vaddwev_h_bu_b (v16u8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vaddwev_h_bu_b (_1, _2);
++}
++v2i64
++__lsx_vaddwod_d_wu_w (v4u32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vaddwod_d_wu_w (_1, _2);
++}
++v4i32
++__lsx_vaddwod_w_hu_h (v8u16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vaddwod_w_hu_h (_1, _2);
++}
++v8i16
++__lsx_vaddwod_h_bu_b (v16u8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vaddwod_h_bu_b (_1, _2);
++}
++v2i64
++__lsx_vsubwev_d_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsubwev_d_w (_1, _2);
++}
++v4i32
++__lsx_vsubwev_w_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsubwev_w_h (_1, _2);
++}
++v8i16
++__lsx_vsubwev_h_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsubwev_h_b (_1, _2);
++}
++v2i64
++__lsx_vsubwod_d_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsubwod_d_w (_1, _2);
++}
++v4i32
++__lsx_vsubwod_w_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsubwod_w_h (_1, _2);
++}
++v8i16
++__lsx_vsubwod_h_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsubwod_h_b (_1, _2);
++}
++v2i64
++__lsx_vsubwev_d_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vsubwev_d_wu (_1, _2);
++}
++v4i32
++__lsx_vsubwev_w_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vsubwev_w_hu (_1, _2);
++}
++v8i16
++__lsx_vsubwev_h_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vsubwev_h_bu (_1, _2);
++}
++v2i64
++__lsx_vsubwod_d_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vsubwod_d_wu (_1, _2);
++}
++v4i32
++__lsx_vsubwod_w_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vsubwod_w_hu (_1, _2);
++}
++v8i16
++__lsx_vsubwod_h_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vsubwod_h_bu (_1, _2);
++}
++v2i64
++__lsx_vaddwev_q_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vaddwev_q_d (_1, _2);
++}
++v2i64
++__lsx_vaddwod_q_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vaddwod_q_d (_1, _2);
++}
++v2i64
++__lsx_vaddwev_q_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vaddwev_q_du (_1, _2);
++}
++v2i64
++__lsx_vaddwod_q_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vaddwod_q_du (_1, _2);
++}
++v2i64
++__lsx_vsubwev_q_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsubwev_q_d (_1, _2);
++}
++v2i64
++__lsx_vsubwod_q_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsubwod_q_d (_1, _2);
++}
++v2i64
++__lsx_vsubwev_q_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vsubwev_q_du (_1, _2);
++}
++v2i64
++__lsx_vsubwod_q_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vsubwod_q_du (_1, _2);
++}
++v2i64
++__lsx_vaddwev_q_du_d (v2u64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vaddwev_q_du_d (_1, _2);
++}
++v2i64
++__lsx_vaddwod_q_du_d (v2u64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vaddwod_q_du_d (_1, _2);
++}
++v2i64
++__lsx_vmulwev_d_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vmulwev_d_w (_1, _2);
++}
++v4i32
++__lsx_vmulwev_w_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vmulwev_w_h (_1, _2);
++}
++v8i16
++__lsx_vmulwev_h_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vmulwev_h_b (_1, _2);
++}
++v2i64
++__lsx_vmulwod_d_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vmulwod_d_w (_1, _2);
++}
++v4i32
++__lsx_vmulwod_w_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vmulwod_w_h (_1, _2);
++}
++v8i16
++__lsx_vmulwod_h_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vmulwod_h_b (_1, _2);
++}
++v2i64
++__lsx_vmulwev_d_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vmulwev_d_wu (_1, _2);
++}
++v4i32
++__lsx_vmulwev_w_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vmulwev_w_hu (_1, _2);
++}
++v8i16
++__lsx_vmulwev_h_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vmulwev_h_bu (_1, _2);
++}
++v2i64
++__lsx_vmulwod_d_wu (v4u32 _1, v4u32 _2)
++{
++  return __builtin_lsx_vmulwod_d_wu (_1, _2);
++}
++v4i32
++__lsx_vmulwod_w_hu (v8u16 _1, v8u16 _2)
++{
++  return __builtin_lsx_vmulwod_w_hu (_1, _2);
++}
++v8i16
++__lsx_vmulwod_h_bu (v16u8 _1, v16u8 _2)
++{
++  return __builtin_lsx_vmulwod_h_bu (_1, _2);
++}
++v2i64
++__lsx_vmulwev_d_wu_w (v4u32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vmulwev_d_wu_w (_1, _2);
++}
++v4i32
++__lsx_vmulwev_w_hu_h (v8u16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vmulwev_w_hu_h (_1, _2);
++}
++v8i16
++__lsx_vmulwev_h_bu_b (v16u8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vmulwev_h_bu_b (_1, _2);
++}
++v2i64
++__lsx_vmulwod_d_wu_w (v4u32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vmulwod_d_wu_w (_1, _2);
++}
++v4i32
++__lsx_vmulwod_w_hu_h (v8u16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vmulwod_w_hu_h (_1, _2);
++}
++v8i16
++__lsx_vmulwod_h_bu_b (v16u8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vmulwod_h_bu_b (_1, _2);
++}
++v2i64
++__lsx_vmulwev_q_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vmulwev_q_d (_1, _2);
++}
++v2i64
++__lsx_vmulwod_q_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vmulwod_q_d (_1, _2);
++}
++v2i64
++__lsx_vmulwev_q_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vmulwev_q_du (_1, _2);
++}
++v2i64
++__lsx_vmulwod_q_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vmulwod_q_du (_1, _2);
++}
++v2i64
++__lsx_vmulwev_q_du_d (v2u64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vmulwev_q_du_d (_1, _2);
++}
++v2i64
++__lsx_vmulwod_q_du_d (v2u64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vmulwod_q_du_d (_1, _2);
++}
++v2i64
++__lsx_vhaddw_q_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vhaddw_q_d (_1, _2);
++}
++v2u64
++__lsx_vhaddw_qu_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vhaddw_qu_du (_1, _2);
++}
++v2i64
++__lsx_vhsubw_q_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vhsubw_q_d (_1, _2);
++}
++v2u64
++__lsx_vhsubw_qu_du (v2u64 _1, v2u64 _2)
++{
++  return __builtin_lsx_vhsubw_qu_du (_1, _2);
++}
++v2i64
++__lsx_vmaddwev_d_w (v2i64 _1, v4i32 _2, v4i32 _3)
++{
++  return __builtin_lsx_vmaddwev_d_w (_1, _2, _3);
++}
++v4i32
++__lsx_vmaddwev_w_h (v4i32 _1, v8i16 _2, v8i16 _3)
++{
++  return __builtin_lsx_vmaddwev_w_h (_1, _2, _3);
++}
++v8i16
++__lsx_vmaddwev_h_b (v8i16 _1, v16i8 _2, v16i8 _3)
++{
++  return __builtin_lsx_vmaddwev_h_b (_1, _2, _3);
++}
++v2u64
++__lsx_vmaddwev_d_wu (v2u64 _1, v4u32 _2, v4u32 _3)
++{
++  return __builtin_lsx_vmaddwev_d_wu (_1, _2, _3);
++}
++v4u32
++__lsx_vmaddwev_w_hu (v4u32 _1, v8u16 _2, v8u16 _3)
++{
++  return __builtin_lsx_vmaddwev_w_hu (_1, _2, _3);
++}
++v8u16
++__lsx_vmaddwev_h_bu (v8u16 _1, v16u8 _2, v16u8 _3)
++{
++  return __builtin_lsx_vmaddwev_h_bu (_1, _2, _3);
++}
++v2i64
++__lsx_vmaddwod_d_w (v2i64 _1, v4i32 _2, v4i32 _3)
++{
++  return __builtin_lsx_vmaddwod_d_w (_1, _2, _3);
++}
++v4i32
++__lsx_vmaddwod_w_h (v4i32 _1, v8i16 _2, v8i16 _3)
++{
++  return __builtin_lsx_vmaddwod_w_h (_1, _2, _3);
++}
++v8i16
++__lsx_vmaddwod_h_b (v8i16 _1, v16i8 _2, v16i8 _3)
++{
++  return __builtin_lsx_vmaddwod_h_b (_1, _2, _3);
++}
++v2u64
++__lsx_vmaddwod_d_wu (v2u64 _1, v4u32 _2, v4u32 _3)
++{
++  return __builtin_lsx_vmaddwod_d_wu (_1, _2, _3);
++}
++v4u32
++__lsx_vmaddwod_w_hu (v4u32 _1, v8u16 _2, v8u16 _3)
++{
++  return __builtin_lsx_vmaddwod_w_hu (_1, _2, _3);
++}
++v8u16
++__lsx_vmaddwod_h_bu (v8u16 _1, v16u8 _2, v16u8 _3)
++{
++  return __builtin_lsx_vmaddwod_h_bu (_1, _2, _3);
++}
++v2i64
++__lsx_vmaddwev_d_wu_w (v2i64 _1, v4u32 _2, v4i32 _3)
++{
++  return __builtin_lsx_vmaddwev_d_wu_w (_1, _2, _3);
++}
++v4i32
++__lsx_vmaddwev_w_hu_h (v4i32 _1, v8u16 _2, v8i16 _3)
++{
++  return __builtin_lsx_vmaddwev_w_hu_h (_1, _2, _3);
++}
++v8i16
++__lsx_vmaddwev_h_bu_b (v8i16 _1, v16u8 _2, v16i8 _3)
++{
++  return __builtin_lsx_vmaddwev_h_bu_b (_1, _2, _3);
++}
++v2i64
++__lsx_vmaddwod_d_wu_w (v2i64 _1, v4u32 _2, v4i32 _3)
++{
++  return __builtin_lsx_vmaddwod_d_wu_w (_1, _2, _3);
++}
++v4i32
++__lsx_vmaddwod_w_hu_h (v4i32 _1, v8u16 _2, v8i16 _3)
++{
++  return __builtin_lsx_vmaddwod_w_hu_h (_1, _2, _3);
++}
++v8i16
++__lsx_vmaddwod_h_bu_b (v8i16 _1, v16u8 _2, v16i8 _3)
++{
++  return __builtin_lsx_vmaddwod_h_bu_b (_1, _2, _3);
++}
++v2i64
++__lsx_vmaddwev_q_d (v2i64 _1, v2i64 _2, v2i64 _3)
++{
++  return __builtin_lsx_vmaddwev_q_d (_1, _2, _3);
++}
++v2i64
++__lsx_vmaddwod_q_d (v2i64 _1, v2i64 _2, v2i64 _3)
++{
++  return __builtin_lsx_vmaddwod_q_d (_1, _2, _3);
++}
++v2u64
++__lsx_vmaddwev_q_du (v2u64 _1, v2u64 _2, v2u64 _3)
++{
++  return __builtin_lsx_vmaddwev_q_du (_1, _2, _3);
++}
++v2u64
++__lsx_vmaddwod_q_du (v2u64 _1, v2u64 _2, v2u64 _3)
++{
++  return __builtin_lsx_vmaddwod_q_du (_1, _2, _3);
++}
++v2i64
++__lsx_vmaddwev_q_du_d (v2i64 _1, v2u64 _2, v2i64 _3)
++{
++  return __builtin_lsx_vmaddwev_q_du_d (_1, _2, _3);
++}
++v2i64
++__lsx_vmaddwod_q_du_d (v2i64 _1, v2u64 _2, v2i64 _3)
++{
++  return __builtin_lsx_vmaddwod_q_du_d (_1, _2, _3);
++}
++v16i8
++__lsx_vrotr_b (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vrotr_b (_1, _2);
++}
++v8i16
++__lsx_vrotr_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vrotr_h (_1, _2);
++}
++v4i32
++__lsx_vrotr_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vrotr_w (_1, _2);
++}
++v2i64
++__lsx_vrotr_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vrotr_d (_1, _2);
++}
++v2i64
++__lsx_vadd_q (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vadd_q (_1, _2);
++}
++v2i64
++__lsx_vsub_q (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsub_q (_1, _2);
++}
++v16i8
++__lsx_vldrepl_b (void *_1)
++{
++  return __builtin_lsx_vldrepl_b (_1, 1);
++}
++v8i16
++__lsx_vldrepl_h (void *_1)
++{
++  return __builtin_lsx_vldrepl_h (_1, 2);
++}
++v4i32
++__lsx_vldrepl_w (void *_1)
++{
++  return __builtin_lsx_vldrepl_w (_1, 4);
++}
++v2i64
++__lsx_vldrepl_d (void *_1)
++{
++  return __builtin_lsx_vldrepl_d (_1, 8);
++}
++v16i8
++__lsx_vmskgez_b (v16i8 _1)
++{
++  return __builtin_lsx_vmskgez_b (_1);
++}
++v16i8
++__lsx_vmsknz_b (v16i8 _1)
++{
++  return __builtin_lsx_vmsknz_b (_1);
++}
++v8i16
++__lsx_vexth_h_b (v16i8 _1)
++{
++  return __builtin_lsx_vexth_h_b (_1);
++}
++v4i32
++__lsx_vexth_w_h (v8i16 _1)
++{
++  return __builtin_lsx_vexth_w_h (_1);
++}
++v2i64
++__lsx_vexth_d_w (v4i32 _1)
++{
++  return __builtin_lsx_vexth_d_w (_1);
++}
++v2i64
++__lsx_vexth_q_d (v2i64 _1)
++{
++  return __builtin_lsx_vexth_q_d (_1);
++}
++v8u16
++__lsx_vexth_hu_bu (v16u8 _1)
++{
++  return __builtin_lsx_vexth_hu_bu (_1);
++}
++v4u32
++__lsx_vexth_wu_hu (v8u16 _1)
++{
++  return __builtin_lsx_vexth_wu_hu (_1);
++}
++v2u64
++__lsx_vexth_du_wu (v4u32 _1)
++{
++  return __builtin_lsx_vexth_du_wu (_1);
++}
++v2u64
++__lsx_vexth_qu_du (v2u64 _1)
++{
++  return __builtin_lsx_vexth_qu_du (_1);
++}
++v16i8
++__lsx_vrotri_b (v16i8 _1)
++{
++  return __builtin_lsx_vrotri_b (_1, 1);
++}
++v8i16
++__lsx_vrotri_h (v8i16 _1)
++{
++  return __builtin_lsx_vrotri_h (_1, 1);
++}
++v4i32
++__lsx_vrotri_w (v4i32 _1)
++{
++  return __builtin_lsx_vrotri_w (_1, 1);
++}
++v2i64
++__lsx_vrotri_d (v2i64 _1)
++{
++  return __builtin_lsx_vrotri_d (_1, 1);
++}
++v2i64
++__lsx_vextl_q_d (v2i64 _1)
++{
++  return __builtin_lsx_vextl_q_d (_1);
++}
++v16i8
++__lsx_vsrlni_b_h (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsrlni_b_h (_1, _2, 1);
++}
++v8i16
++__lsx_vsrlni_h_w (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsrlni_h_w (_1, _2, 1);
++}
++v4i32
++__lsx_vsrlni_w_d (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsrlni_w_d (_1, _2, 1);
++}
++v2i64
++__lsx_vsrlni_d_q (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsrlni_d_q (_1, _2, 1);
++}
++v16i8
++__lsx_vsrlrni_b_h (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsrlrni_b_h (_1, _2, 1);
++}
++v8i16
++__lsx_vsrlrni_h_w (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsrlrni_h_w (_1, _2, 1);
++}
++v4i32
++__lsx_vsrlrni_w_d (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsrlrni_w_d (_1, _2, 1);
++}
++v2i64
++__lsx_vsrlrni_d_q (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsrlrni_d_q (_1, _2, 1);
++}
++v16i8
++__lsx_vssrlni_b_h (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vssrlni_b_h (_1, _2, 1);
++}
++v8i16
++__lsx_vssrlni_h_w (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssrlni_h_w (_1, _2, 1);
++}
++v4i32
++__lsx_vssrlni_w_d (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssrlni_w_d (_1, _2, 1);
++}
++v2i64
++__lsx_vssrlni_d_q (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssrlni_d_q (_1, _2, 1);
++}
++v16u8
++__lsx_vssrlni_bu_h (v16u8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vssrlni_bu_h (_1, _2, 1);
++}
++v8u16
++__lsx_vssrlni_hu_w (v8u16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssrlni_hu_w (_1, _2, 1);
++}
++v4u32
++__lsx_vssrlni_wu_d (v4u32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssrlni_wu_d (_1, _2, 1);
++}
++v2u64
++__lsx_vssrlni_du_q (v2u64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssrlni_du_q (_1, _2, 1);
++}
++v16i8
++__lsx_vssrlrni_b_h (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vssrlrni_b_h (_1, _2, 1);
++}
++v8i16
++__lsx_vssrlrni_h_w (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssrlrni_h_w (_1, _2, 1);
++}
++v4i32
++__lsx_vssrlrni_w_d (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssrlrni_w_d (_1, _2, 1);
++}
++v2i64
++__lsx_vssrlrni_d_q (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssrlrni_d_q (_1, _2, 1);
++}
++v16u8
++__lsx_vssrlrni_bu_h (v16u8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vssrlrni_bu_h (_1, _2, 1);
++}
++v8u16
++__lsx_vssrlrni_hu_w (v8u16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssrlrni_hu_w (_1, _2, 1);
++}
++v4u32
++__lsx_vssrlrni_wu_d (v4u32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssrlrni_wu_d (_1, _2, 1);
++}
++v2u64
++__lsx_vssrlrni_du_q (v2u64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssrlrni_du_q (_1, _2, 1);
++}
++v16i8
++__lsx_vsrani_b_h (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsrani_b_h (_1, _2, 1);
++}
++v8i16
++__lsx_vsrani_h_w (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsrani_h_w (_1, _2, 1);
++}
++v4i32
++__lsx_vsrani_w_d (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsrani_w_d (_1, _2, 1);
++}
++v2i64
++__lsx_vsrani_d_q (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsrani_d_q (_1, _2, 1);
++}
++v16i8
++__lsx_vsrarni_b_h (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vsrarni_b_h (_1, _2, 1);
++}
++v8i16
++__lsx_vsrarni_h_w (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vsrarni_h_w (_1, _2, 1);
++}
++v4i32
++__lsx_vsrarni_w_d (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vsrarni_w_d (_1, _2, 1);
++}
++v2i64
++__lsx_vsrarni_d_q (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vsrarni_d_q (_1, _2, 1);
++}
++v16i8
++__lsx_vssrani_b_h (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vssrani_b_h (_1, _2, 1);
++}
++v8i16
++__lsx_vssrani_h_w (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssrani_h_w (_1, _2, 1);
++}
++v4i32
++__lsx_vssrani_w_d (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssrani_w_d (_1, _2, 1);
++}
++v2i64
++__lsx_vssrani_d_q (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssrani_d_q (_1, _2, 1);
++}
++v16u8
++__lsx_vssrani_bu_h (v16u8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vssrani_bu_h (_1, _2, 1);
++}
++v8u16
++__lsx_vssrani_hu_w (v8u16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssrani_hu_w (_1, _2, 1);
++}
++v4u32
++__lsx_vssrani_wu_d (v4u32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssrani_wu_d (_1, _2, 1);
++}
++v2u64
++__lsx_vssrani_du_q (v2u64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssrani_du_q (_1, _2, 1);
++}
++v16i8
++__lsx_vssrarni_b_h (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vssrarni_b_h (_1, _2, 1);
++}
++v8i16
++__lsx_vssrarni_h_w (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssrarni_h_w (_1, _2, 1);
++}
++v4i32
++__lsx_vssrarni_w_d (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssrarni_w_d (_1, _2, 1);
++}
++v2i64
++__lsx_vssrarni_d_q (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssrarni_d_q (_1, _2, 1);
++}
++v16u8
++__lsx_vssrarni_bu_h (v16u8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vssrarni_bu_h (_1, _2, 1);
++}
++v8u16
++__lsx_vssrarni_hu_w (v8u16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssrarni_hu_w (_1, _2, 1);
++}
++v4u32
++__lsx_vssrarni_wu_d (v4u32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssrarni_wu_d (_1, _2, 1);
++}
++v2u64
++__lsx_vssrarni_du_q (v2u64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssrarni_du_q (_1, _2, 1);
++}
++v4i32
++__lsx_vpermi_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vpermi_w (_1, _2, 1);
++}
++v16i8
++__lsx_vld (void *_1)
++{
++  return __builtin_lsx_vld (_1, 1);
++}
++void
++__lsx_vst (v16i8 _1, void *_2)
++{
++  return __builtin_lsx_vst (_1, _2, 1);
++}
++v16i8
++__lsx_vssrlrn_b_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssrlrn_b_h (_1, _2);
++}
++v8i16
++__lsx_vssrlrn_h_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssrlrn_h_w (_1, _2);
++}
++v4i32
++__lsx_vssrlrn_w_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssrlrn_w_d (_1, _2);
++}
++v16i8
++__lsx_vssrln_b_h (v8i16 _1, v8i16 _2)
++{
++  return __builtin_lsx_vssrln_b_h (_1, _2);
++}
++v8i16
++__lsx_vssrln_h_w (v4i32 _1, v4i32 _2)
++{
++  return __builtin_lsx_vssrln_h_w (_1, _2);
++}
++v4i32
++__lsx_vssrln_w_d (v2i64 _1, v2i64 _2)
++{
++  return __builtin_lsx_vssrln_w_d (_1, _2);
++}
++v16i8
++__lsx_vorn_v (v16i8 _1, v16i8 _2)
++{
++  return __builtin_lsx_vorn_v (_1, _2);
++}
++v2i64
++__lsx_vldi ()
++{
++  return __builtin_lsx_vldi (1);
++}
++v16i8
++__lsx_vshuf_b (v16i8 _1, v16i8 _2, v16i8 _3)
++{
++  return __builtin_lsx_vshuf_b (_1, _2, _3);
++}
++v16i8
++__lsx_vldx (void *_1)
++{
++  return __builtin_lsx_vldx (_1, 1);
++}
++void
++__lsx_vstx (v16i8 _1, void *_2)
++{
++  return __builtin_lsx_vstx (_1, _2, 1);
++}
++v2u64
++__lsx_vextl_qu_du (v2u64 _1)
++{
++  return __builtin_lsx_vextl_qu_du (_1);
++}
++int
++__lsx_bnz_b (v16u8 _1)
++{
++  return __builtin_lsx_bnz_b (_1);
++}
++int
++__lsx_bnz_d (v2u64 _1)
++{
++  return __builtin_lsx_bnz_d (_1);
++}
++int
++__lsx_bnz_h (v8u16 _1)
++{
++  return __builtin_lsx_bnz_h (_1);
++}
++int
++__lsx_bnz_v (v16u8 _1)
++{
++  return __builtin_lsx_bnz_v (_1);
++}
++int
++__lsx_bnz_w (v4u32 _1)
++{
++  return __builtin_lsx_bnz_w (_1);
++}
++int
++__lsx_bz_b (v16u8 _1)
++{
++  return __builtin_lsx_bz_b (_1);
++}
++int
++__lsx_bz_d (v2u64 _1)
++{
++  return __builtin_lsx_bz_d (_1);
++}
++int
++__lsx_bz_h (v8u16 _1)
++{
++  return __builtin_lsx_bz_h (_1);
++}
++int
++__lsx_bz_v (v16u8 _1)
++{
++  return __builtin_lsx_bz_v (_1);
++}
++int
++__lsx_bz_w (v4u32 _1)
++{
++  return __builtin_lsx_bz_w (_1);
++}
++v2i64
++__lsx_vfcmp_caf_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_caf_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_caf_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_caf_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_ceq_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_ceq_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_ceq_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_ceq_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_cle_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_cle_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_cle_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_cle_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_clt_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_clt_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_clt_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_clt_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_cne_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_cne_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_cne_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_cne_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_cor_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_cor_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_cor_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_cor_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_cueq_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_cueq_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_cueq_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_cueq_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_cule_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_cule_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_cule_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_cule_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_cult_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_cult_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_cult_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_cult_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_cun_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_cun_d (_1, _2);
++}
++v2i64
++__lsx_vfcmp_cune_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_cune_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_cune_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_cune_s (_1, _2);
++}
++v4i32
++__lsx_vfcmp_cun_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_cun_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_saf_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_saf_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_saf_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_saf_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_seq_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_seq_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_seq_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_seq_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_sle_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_sle_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_sle_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_sle_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_slt_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_slt_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_slt_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_slt_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_sne_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_sne_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_sne_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_sne_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_sor_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_sor_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_sor_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_sor_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_sueq_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_sueq_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_sueq_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_sueq_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_sule_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_sule_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_sule_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_sule_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_sult_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_sult_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_sult_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_sult_s (_1, _2);
++}
++v2i64
++__lsx_vfcmp_sun_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_sun_d (_1, _2);
++}
++v2i64
++__lsx_vfcmp_sune_d (v2f64 _1, v2f64 _2)
++{
++  return __builtin_lsx_vfcmp_sune_d (_1, _2);
++}
++v4i32
++__lsx_vfcmp_sune_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_sune_s (_1, _2);
++}
++v4i32
++__lsx_vfcmp_sun_s (v4f32 _1, v4f32 _2)
++{
++  return __builtin_lsx_vfcmp_sun_s (_1, _2);
++}
++v16i8
++__lsx_vrepli_b ()
++{
++  return __builtin_lsx_vrepli_b (1);
++}
++v2i64
++__lsx_vrepli_d ()
++{
++  return __builtin_lsx_vrepli_d (1);
++}
++v8i16
++__lsx_vrepli_h ()
++{
++  return __builtin_lsx_vrepli_h (1);
++}
++v4i32
++__lsx_vrepli_w ()
++{
++  return __builtin_lsx_vrepli_w (1);
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-addition-instructi.patch b/LoongArch-Add-tests-for-SX-vector-addition-instructi.patch
new file mode 100644
index 0000000000000000000000000000000000000000..441b2c0b2556835ed09d77f119cd08bd01b619d6
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-addition-instructi.patch
@@ -0,0 +1,7181 @@
+From 2cb3122527add8fee54dca91824d82a02d5602e3 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 09:58:48 +0800
+Subject: [PATCH 080/124] LoongArch: Add tests for SX vector addition
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vadd.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vadda.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddi.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmadd.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vadd.c           | 416 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vadda.c          | 344 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vaddi.c          | 251 +++++++++
+ .../loongarch/vector/lsx/lsx-vaddwev-1.c      | 335 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vaddwev-2.c      | 344 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vaddwev-3.c      | 425 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vaddwod-1.c      | 408 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vaddwod-2.c      | 344 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vaddwod-3.c      | 237 +++++++++
+ .../loongarch/vector/lsx/lsx-vhaddw-1.c       | 488 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vhaddw-2.c       | 452 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmadd.c          | 450 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmaddwev-1.c     | 472 +++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmaddwev-2.c     | 383 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmaddwev-3.c     | 383 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmaddwod-1.c     | 372 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vmaddwod-2.c     | 438 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmaddwod-3.c     | 460 +++++++++++++++++
+ 18 files changed, 7002 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c
+new file mode 100644
+index 000000000..7cfb989e4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadd.c
+@@ -0,0 +1,416 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000b0000000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fc0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002010000fc000b;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000017fda829;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000001fffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f7f7f7f00107f04;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f0000fd7f0000fd;
++  *((unsigned long *)&__m128i_result[1]) = 0x7e7e7e7eff0f7f04;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f0000fd7f01fffb;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf4b6f3f52f4ef4a8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x195f307a5d04acbb;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6a1a3fbb3c90260e;
++  *((unsigned long *)&__m128i_result[1]) = 0x19df307a5d04acbb;
++  *((unsigned long *)&__m128i_result[0]) = 0x5ed032b06bde1ab6;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5555001400005111;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffabbeab55110140;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5555001400005111;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffabbeab55110140;
++  *((unsigned long *)&__m128i_result[1]) = 0xaaaa00280000a222;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe567c56aa220280;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf51cf8dad6040188;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0982e2daf234ed87;
++  *((unsigned long *)&__m128i_result[1]) = 0xf51cf8dad6040188;
++  *((unsigned long *)&__m128i_result[0]) = 0x0982e2daf234ed87;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000490000004d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000073;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000002a;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000049000000c0;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffff29;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000bd30;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000d7fff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000007a6d;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000dfefe0000;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffd000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfefa000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0038000000051fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003c000000022021;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff0101ffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffa0204000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f370101ff04ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f3bffffa0226021;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1baf8eabd26bc629;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1c2640b9a8e9fb49;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0002dab8746acf8e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00036dd1c5c15856;
++  *((unsigned long *)&__m128i_result[1]) = 0x1bb1686346d595b7;
++  *((unsigned long *)&__m128i_result[0]) = 0x1c29ad8a6daa539f;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfeffffffffff0002;
++  __m128i_out = __lsx_vadd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001ffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff800000c3080000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff81ffffc3080000;
++  __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x004200a000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x004200a000200001;
++  __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001f0000001f;
++  __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0029aeaca57d74e6;
++  *((unsigned long *)&__m128i_op0[0]) = 0xdbe332365392c686;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000056f64adb9464;
++  *((unsigned long *)&__m128i_op1[0]) = 0x29ca096f235819c2;
++  *((unsigned long *)&__m128i_result[1]) = 0x002a05a2f059094a;
++  *((unsigned long *)&__m128i_result[0]) = 0x05ad3ba576eae048;
++  __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000400;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000040d;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000100;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000100;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001000000ff;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000300000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000002fffffffb;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000010000fffb;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000060000000e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001201fe01e9;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000060000000e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001201fe01e9;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000c0000001c;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002403fc03d2;
++  __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff1000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff1000100010001;
++  __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa;
++  *((unsigned long *)&__m128i_result[1]) = 0xd70b30c96ea9f4e8;
++  *((unsigned long *)&__m128i_result[0]) = 0xa352bfac9269e0aa;
++  __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa;
++  __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001001100110068;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001001100110067;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x379674c000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3789f68000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x379674c000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3789f68000000000;
++  __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000555889;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000002580f01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00060fbf02040fbf;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00020fbf02000fbf;
++  *((unsigned long *)&__m128i_result[1]) = 0x00060fbf02596848;
++  *((unsigned long *)&__m128i_result[0]) = 0x00020fbf04581ec0;
++  __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001388928513889;
++  *((unsigned long *)&__m128i_op0[0]) = 0x006938094a013889;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001388928513889;
++  *((unsigned long *)&__m128i_op1[0]) = 0x006938094a013889;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002711250a27112;
++  *((unsigned long *)&__m128i_result[0]) = 0x00d2701294027112;
++  __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2006454690d3de87;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2006454690d3de87;
++  *((unsigned long *)&__m128i_result[1]) = 0x202544f490f2de35;
++  *((unsigned long *)&__m128i_result[0]) = 0x202544f490f2de35;
++  __m128i_out = __lsx_vadd_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c
+new file mode 100644
+index 000000000..4bb699eab
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vadda.c
+@@ -0,0 +1,344 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_result[1]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_result[0]) = 0x52527d7d52527d7d;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffc001f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010202050120;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010102020202;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0003000300030003;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003000700020005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0003000300030003;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003000700020005;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4f8000004f800000;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0003000300030004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003000300030004;
++  __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5c9c9c9ce3636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x63635c9e63692363;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf0fd800080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000a00028004000;
++  *((unsigned long *)&__m128i_result[1]) = 0x6b9fe3649c9d6363;
++  *((unsigned long *)&__m128i_result[0]) = 0x6363bc9e8b696363;
++  __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1111111111111111;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111111111111111;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[1]) = 0x1111113111111131;
++  *((unsigned long *)&__m128i_result[0]) = 0x1111113111111131;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000006a9a5c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000092444;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000006a9a5c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000092444;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000d4ccb8;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000124888;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x76f424887fffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff082f000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003f000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000f7d1000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x773324887fffffff;
++  __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5a6f5c53ebed3faa;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa36aca4435b8b8e1;
++  *((unsigned long *)&__m128i_result[1]) = 0x5a6f61865d36d3aa;
++  *((unsigned long *)&__m128i_result[0]) = 0x7bea6962a0bfb621;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000008140c80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000008140c80;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000fffe0000ff45;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff000000b9;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffd5002affffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x343d8dc6b0ed5a08;
++  *((unsigned long *)&__m128i_result[1]) = 0x012b012c01010246;
++  *((unsigned long *)&__m128i_result[0]) = 0x353e743b50135a4f;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003c853c843c87e;
++  __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe000ffdf;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000200000002001;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000001fff0021;
++  __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010109;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000005452505;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000004442403e4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000005452505;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000044525043c;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5d7f5d807fea807f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xbafebb00ffd500fe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadda_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000208000002080;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003f0000003f0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003f0000003f0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x803e0000803e0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x803e0000803e0000;
++  __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000800000008000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000800000008000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000800000008000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000800000008000;
++  __m128i_out = __lsx_vadda_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001400000014;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001400000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff9000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffc000400000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0007001400000014;
++  *((unsigned long *)&__m128i_result[0]) = 0x0004001000000000;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0xfefeff00fefeff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefeff00fefeff00;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000024170000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000020300000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000044470000;
++  __m128i_out = __lsx_vadda_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff01ff01ac025c87;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff01ff01ac465ca1;
++  *((unsigned long *)&__m128i_result[1]) = 0x64616462b76106dc;
++  *((unsigned long *)&__m128i_result[0]) = 0x64616462b71d06c2;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffaeffaeffaeffae;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffaeffaeffaeffae;
++  *((unsigned long *)&__m128i_result[1]) = 0x0051005200510052;
++  *((unsigned long *)&__m128i_result[0]) = 0x0051005200510052;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0a0000000a000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a0000000a000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x4480000044800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x45c0000044800000;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[0]) = 0x6363636463636363;
++  __m128i_out = __lsx_vadda_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c
+new file mode 100644
+index 000000000..77afabe92
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddi.c
+@@ -0,0 +1,251 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x1414141414141415;
++  *((unsigned long *)&__m128i_result[0]) = 0x1414141414141415;
++  __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0505050505050505;
++  *((unsigned long *)&__m128i_result[0]) = 0x0505050504040404;
++  __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000008140c80;
++  *((unsigned long *)&__m128i_result[1]) = 0x1f1f1f1f1f1f1f1f;
++  *((unsigned long *)&__m128i_result[0]) = 0x1f1f1f1f27332b9f;
++  __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303;
++  *((unsigned long *)&__m128i_result[0]) = 0x0303030303030304;
++  __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x8f8f8f8f8f8f8f8f;
++  *((unsigned long *)&__m128i_result[0]) = 0x8f8f8f8f8f8f8f8f;
++  __m128i_out = __lsx_vaddi_bu (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0018001800180018;
++  *((unsigned long *)&__m128i_result[0]) = 0x0018001800180018;
++  __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0019081900190019;
++  *((unsigned long *)&__m128i_result[0]) = 0x0019081900190019;
++  __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a;
++  __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffc1000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffcc000b000b000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000b000b010a000b;
++  __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x001f001f001f001f;
++  *((unsigned long *)&__m128i_result[0]) = 0x001f001f001f001f;
++  __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x001c001c001c001c;
++  *((unsigned long *)&__m128i_result[0]) = 0x001c001c001c001c;
++  __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0x680485c8b304b019;
++  *((unsigned long *)&__m128i_result[0]) = 0xc89d7f0fed582019;
++  __m128i_out = __lsx_vaddi_hu (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000a0000000a;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000a0000000a;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffff1fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000090100000a;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe009ffff2008;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000300000003;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfc01fcfefc02fdf7;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe00fcfffe01fd01;
++  *((unsigned long *)&__m128i_result[1]) = 0xfc01fd13fc02fe0c;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe00fd14fe01fd16;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000c0000bd49;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000c7fff000c;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000500000005;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000005fffe0006;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000fffffeff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000009ffffff08;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000900000009;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x55aa55aa55aa55ab;
++  *((unsigned long *)&__m128i_op0[0]) = 0xaa55555655aaaaa8;
++  *((unsigned long *)&__m128i_result[1]) = 0x55aa55c355aa55c4;
++  *((unsigned long *)&__m128i_result[0]) = 0xaa55556f55aaaac1;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000e0000002e;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000004e;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x003f000400000003;
++  *((unsigned long *)&__m128i_result[0]) = 0x003f000400000003;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff8000010f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000900000009;
++  *((unsigned long *)&__m128i_result[0]) = 0xff80000a0f800009;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03;
++  *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220;
++  *((unsigned long *)&__m128i_result[1]) = 0x30eb022002101b20;
++  *((unsigned long *)&__m128i_result[0]) = 0x020310edc003023d;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd7059f7fd70;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001700000017;
++  *((unsigned long *)&__m128i_result[0]) = 0x59f7fd8759f7fd87;
++  __m128i_out = __lsx_vaddi_wu (__m128i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6420e0208400c4c4;
++  *((unsigned long *)&__m128i_op0[0]) = 0x20c4e0c4e0da647a;
++  *((unsigned long *)&__m128i_result[1]) = 0x6420e0208400c4e3;
++  *((unsigned long *)&__m128i_result[0]) = 0x20c4e0c4e0da6499;
++  __m128i_out = __lsx_vaddi_du (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d001b1a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312;
++  *((unsigned long *)&__m128i_result[1]) = 0x21201f1e1d001b25;
++  *((unsigned long *)&__m128i_result[0]) = 0x191817161514131d;
++  __m128i_out = __lsx_vaddi_du (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014;
++  __m128i_out = __lsx_vaddi_du (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000004000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff9411;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000400000004c;
++  *((unsigned long *)&__m128i_result[0]) = 0x00007770ffff941d;
++  __m128i_out = __lsx_vaddi_du (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000016;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000016;
++  __m128i_out = __lsx_vaddi_du (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000080000000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b;
++  __m128i_out = __lsx_vaddi_du (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c
+new file mode 100644
+index 000000000..b7b16a325
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-1.c
+@@ -0,0 +1,335 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000100010001007c;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3fffffff80000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00003ffd000a4000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffcffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000fffd000a0000;
++  __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0800080008000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0800080008000800;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000;
++  __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000490000004d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff000000ff00ff00;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff00ff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000049ffffff4d;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff01ffffffff;
++  __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000005e695e95;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5e695e96c396b402;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000005e94;
++  *((unsigned long *)&__m128i_result[0]) = 0x00005e96ffffb402;
++  __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffb;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffb;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000100000000fc;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000100000000fc;
++  __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000158;
++  __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000005d5d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000005d5d;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5c9c9c9ce3636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x63635c9e63692363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffe3636363;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000063692363;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0202020202020203;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0202020202020203;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000002020202;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000002020202;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1817161517161514;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1615141315141312;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x76f424887fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000017161515;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000095141311;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000fffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfdfef9ff0efff900;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffcfd000000fb00;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001fe00f8000700;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000fb01;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000007000000;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000080806362;
++  *((unsigned long *)&__m128i_op1[0]) = 0x807f808000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff80806362;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000010002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff960015;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000010002;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffff960015;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf6548a1747e59090;
++  *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000047e59090;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffb8145f50;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00bbfff7fffffff7;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff008ff820;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00bbfff7fffffff7;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff008ff820;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000011ff040;
++  __m128i_out = __lsx_vaddwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000100010001fffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000100010001fffd;
++  __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffc2ffe700000007;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffc100010001;
++  __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff80df00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000dfa6e0c6;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000d46cdc13;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000d46cdc13;
++  __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfe813f00fe813f00;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe813f00fe813f00;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe813f00fe813f00;
++  __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe;
++  __m128i_out = __lsx_vaddwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c
+new file mode 100644
+index 000000000..a407cadfb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-2.c
+@@ -0,0 +1,344 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x061006100613030c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4d6814ef9c77ce46;
++  *((unsigned long *)&__m128i_result[1]) = 0x010f010f0112010b;
++  *((unsigned long *)&__m128i_result[0]) = 0x016701ee01760145;
++  __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffac0a000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ac00000000;
++  __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf589caff5605f2fa;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000014eb54ab;
++  *((unsigned long *)&__m128i_op1[0]) = 0x14eb6a002a406a00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000eb00ab;
++  *((unsigned long *)&__m128i_result[0]) = 0x017400ff004500fa;
++  __m128i_out = __lsx_vaddwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x10f917d72d3d01e4;
++  *((unsigned long *)&__m128i_op0[0]) = 0x203e16d116de012b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000017d7000001e4;
++  *((unsigned long *)&__m128i_result[0]) = 0x000016d10000012b;
++  __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff3fbfffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000100fe000100fe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001c8520000c97d;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001c8520001c87d;
++  __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffac0a000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000085af0000b000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00017ea200002000;
++  __m128i_out = __lsx_vaddwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000024;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000024;
++  __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00307028003f80b0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0040007fff800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000003f80b0;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ff800000;
++  __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000001f;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f;
++  __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff80ffffff80ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000018080807f;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001ffff80fe;
++  __m128i_out = __lsx_vaddwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffff8000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffff8000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff8000000000;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1000000010000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000180100100000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000b5207f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00001801b5307f80;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000003fbf3fbf;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7ff8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff8007;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0002000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f8000007f800000;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffd27db010d20fbf;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0xffa4fb6021a41f7e;
++  __m128i_out = __lsx_vaddwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c
+new file mode 100644
+index 000000000..4d5c60998
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwev-3.c
+@@ -0,0 +1,425 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0403cfcf01c1595e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x837cd5db43fc55d4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_result[1]) = 0x0007005200440062;
++  *((unsigned long *)&__m128i_result[0]) = 0x0080005e007f00d8;
++  __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcafff8ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe6d4572c8a5835bc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe5017c2ac9ca9fd0;
++  *((unsigned long *)&__m128i_result[1]) = 0x00d3012b015700bb;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001002affca0070;
++  __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000fea0000fffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363771163631745;
++  *((unsigned long *)&__m128i_op1[0]) = 0x636363ec6363636c;
++  *((unsigned long *)&__m128i_result[1]) = 0x006300fb00630143;
++  *((unsigned long *)&__m128i_result[0]) = 0x0063ffec0063006c;
++  __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000;
++  __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080ffffffff8080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00008080ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xff80ffffffffff80;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff80ffffffff;
++  __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00197f26cb658837;
++  *((unsigned long *)&__m128i_op0[0]) = 0x01009aa4a301084b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_result[1]) = 0x0037ffd40083ffe5;
++  *((unsigned long *)&__m128i_result[0]) = 0x001e0052001ffff9;
++  __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff00ffffff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000000900;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000090900000998;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff000900ffff98;
++  __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x10f881a20ffd02b0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff8ffa2fffdffb0;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ff800000;
++  __m128i_out = __lsx_vaddwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000007b;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1e0200001e020000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffd;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffdfffcfffd;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffcfffffffd;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffdfffffffd;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000010100000101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010100000101;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa2f54a1ea2f54a1e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x00004a1e00004a1e;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000868686868686;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000868600008785;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741;
++  *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe363636363abdf16;
++  *((unsigned long *)&__m128i_op1[0]) = 0x41f8e08016161198;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000cecd00004657;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000c90000011197;
++  __m128i_out = __lsx_vaddwev_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001000f000e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fff1000ffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000f000e;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000ffffe;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0c07e181ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3430af9effffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00ff;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00060012000e002b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000049ffffffaa;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000e002b;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffaa;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000bfffffffe0f6;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff7a53;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff7f80ffff7f80;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff7f80ffff7f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff7f80ffff7f80;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff7f80ffff7f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000fffeff00;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fffeff00;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000003dffc2;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0080008000800080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0080006b0000000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000800080;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000055555555;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff7f810100001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000400530050ffa6;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff007fff810001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000400530050ffa6;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffff811001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000a1ff4c;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000001f;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000008000001e;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x86dd8341b164f12b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9611c3985b3159f5;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000035697d4e;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000013ecaadf2;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ef00ff010f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff010f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc1f03e1042208410;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001000110;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000431f851f;
++  __m128i_out = __lsx_vaddwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000030000003f;
++  __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffbfffffffbe;
++  __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x06b1213ef1efa299;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8312f5424ca4a07f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1f1f1f1f1f1f1f00;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1f1f1f27332b9f00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xa23214697fd03f7f;
++  __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x80000000ffffd860;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff80000000;
++  __m128i_out = __lsx_vaddwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c
+new file mode 100644
+index 000000000..0ebe8c8a9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-1.c
+@@ -0,0 +1,408 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ca354688;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_result[1]) = 0x00040003ff83ff84;
++  *((unsigned long *)&__m128i_result[0]) = 0x00040003ff4dffca;
++  __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000040d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004;
++  __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00001f5400000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001f00000000;
++  __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000f80007;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xb);
++  *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000;
++  __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff0100ff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffeffff;
++  __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x478b478b38031779;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6b769e690fa1e119;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000001030103;
++  *((unsigned long *)&__m128i_result[1]) = 0x0047004700380017;
++  *((unsigned long *)&__m128i_result[0]) = 0x006bff9e0010ffe2;
++  __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3;
++  *((unsigned long *)&__m128i_result[1]) = 0xff80ffa2fff0ff74;
++  *((unsigned long *)&__m128i_result[0]) = 0xff76ffd8ffe6ffaa;
++  __m128i_out = __lsx_vaddwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00001f5400000000;
++  __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffd70b00006ea9;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffa352ffff9269;
++  __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffd70b00006ea9;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffa352ffff9269;
++  __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe593c8c4e593c8c4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8144ffff01c820a4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9b2ee1a4034b4e34;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff80c400000148;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff80c1ffffe8de;
++  __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffffffe;
++  __m128i_out = __lsx_vaddwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa486c90f6537b8d7;
++  *((unsigned long *)&__m128i_op0[0]) = 0x58bcc2013ea1cc1e;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffa486c90f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000058bcc201;
++  __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00001802041b0014;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000003004;
++  __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff02000200;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffdfff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffdfff;
++  __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fbf83468;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fbf83468;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff82bb9784;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc6bb97ac;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007ffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001000fbff9;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000002ff9afef;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000004f804f81;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000004f804f80;
++  __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020;
++  __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000fff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010;
++  __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffe00029f9f6061;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x64e464e464e464e4;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffeffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000064e264e6;
++  __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0305030203020502;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0301030203020502;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000003050302;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000003010302;
++  __m128i_out = __lsx_vaddwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ff0000ff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x01fc020000fe0100;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000ff0000;
++  __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff84fff4ff84fff4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00a6ffceffb60052;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xff84fff4ff84fff4;
++  __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000fefefe6a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fefefe6a;
++  __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020;
++  __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000;
++  __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5a57bacbd7e39680;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6bae051ffed76001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf3e6586b60d7b152;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf7077b934ac0e000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4e3e133738bb47d2;
++  __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000117d00007f7b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000093d0000187f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7d7f027f7c7f7c79;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7e7f7e7f027f032f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7d7f13fc7c7ffbf4;
++  __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c
+new file mode 100644
+index 000000000..379517f39
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-2.c
+@@ -0,0 +1,344 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x08fdc221bfdb1927;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4303c67e9b7fb213;
++  *((unsigned long *)&__m128i_op1[1]) = 0x08fdc221bfdb1927;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4303c67e9b7fb213;
++  *((unsigned long *)&__m128i_result[1]) = 0x00100184017e0032;
++  *((unsigned long *)&__m128i_result[0]) = 0x0086018c01360164;
++  __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff77777807777775;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe6eeef00eeeeeebf;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000f00f;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff007700070077;
++  *((unsigned long *)&__m128i_result[0]) = 0x00e600ef00ee01de;
++  __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vaddwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4429146a7b4c88b2;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe22b3595efa4aa0c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000442900007b4c;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000e22b0000efa4;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000600000004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000636500006363;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a6;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000080800000808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000080800000808;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe0001fefc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001fffe0001fefc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff80000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff8000010f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff8000010f78;
++  __m128i_out = __lsx_vaddwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffc01;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffc01;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffffe;
++  __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op1[0]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000006e17bfd8;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000006e17bfd8;
++  __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003f000400000003;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003f000400000003;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000400004;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000003f0004;
++  __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000017f800001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000017f800001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007f800001;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007f800001;
++  __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000;
++  __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x379674c000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3789f68000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x379674c000000000;
++  __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c
+new file mode 100644
+index 000000000..30dc83518
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vaddwod-3.c
+@@ -0,0 +1,237 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000a16316b0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x16161616a16316b0;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ffffa10016;
++  *((unsigned long *)&__m128i_result[0]) = 0x01150115ffa10016;
++  __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x007e007e007e007e;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000120002000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000200020;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000003f;
++  __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000fe00fe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00fe00fe00fe00fe;
++  __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000011ffee;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000dfff2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff;
++  __m128i_out = __lsx_vaddwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00e0000000e00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000e0000000e0;
++  __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff7100fffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ffffa10016;
++  *((unsigned long *)&__m128i_op1[0]) = 0x01150115ffa10016;
++  *((unsigned long *)&__m128i_result[1]) = 0x000100fe000070a1;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000115ffffffa1;
++  __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000fffe0000fffe;
++  __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000001000f00fe00;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000017fff00fe7f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000f00;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffff00;
++  __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x04faf60009f5f092;
++  *((unsigned long *)&__m128i_op0[0]) = 0x04fafa9200000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff9fffefff9ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000004fa000009f5;
++  *((unsigned long *)&__m128i_result[0]) = 0x000004f3fffffff9;
++  __m128i_out = __lsx_vaddwod_w_hu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000c2f90000bafa;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000c2fa8000c2fa;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000c2f90000bafa;
++  __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000003fffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00001fff00001fff;
++  __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000;
++  __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000807bf0a1f80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000800ecedee68;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5847b72626ce61ef;
++  *((unsigned long *)&__m128i_op1[0]) = 0x110053f401e7cced;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x5847bf2de5d8816f;
++  __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000155;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vaddwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c
+new file mode 100644
+index 000000000..af75f8e4e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-1.c
+@@ -0,0 +1,488 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4ee85545068f3133;
++  *((unsigned long *)&__m128i_op0[0]) = 0x870968c1f56bb3cd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x004e005500060031;
++  *((unsigned long *)&__m128i_result[0]) = 0xff870068fff5ffb3;
++  __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff082f000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003f000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc04d600d3aded151;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x004cff8fffde0051;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x800000007fffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x800000007fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x003f0000ffffffff;
++  __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0042003e0042002f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001fffc0001fffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0042003e0042002f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001fffc0001fffc;
++  __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000750500006541;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000100fffffefd;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffff00ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00ffff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffe000000f6;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffffe;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4050000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000f80007;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000f8;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff80ffffffffff80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff80ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff7ffffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fffffffe;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xce9035c49ffff570;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0xce9035c49ffff574;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000454ffff9573;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x80007fc000003f00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7d187e427c993f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7500000075000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7500000075000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00007d1800007c99;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5555000054100000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5555000154100155;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000155;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffebe6ed565;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffebe6ed565;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffbe6ed563;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x78c00000ff000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000078c00000;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7d3ac60000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007d3ac600;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff82bb9784;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffc6bb97ac;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff82bb9784;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffc6bb97ac;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000003effff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000003effff;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf359f359f359f359;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf359f359f359f359;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffff359f358;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffff359f358;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000e2e36363;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000063636363;
++  __m128i_out = __lsx_vhaddw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff02000200;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe00001ffe200;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000383;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe400000003ffc001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff1fff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe000ffff2382;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x03574e39e496cbc9;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x03574e38e496cbc9;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xabff54e911f71b07;
++  *((unsigned long *)&__m128i_op0[0]) = 0xa9ec4882f216ea11;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfc01fcfefc02fdf7;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe00fcfffe01fd01;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xaa0051e90ff91808;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe00000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000030000003f;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffff000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0001ffff9514;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x9c9c9c9c9c9c9c9c;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002000200000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000400000001;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff80ff807e017f01;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f3b7f3f7f3b7f21;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0a0000001e000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a000000f6000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0980ff8174017f01;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000ef0000000003b;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000011ff040;
++  __m128i_out = __lsx_vhaddw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c
+new file mode 100644
+index 000000000..37c769a2d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhaddw-2.c
+@@ -0,0 +1,452 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000fff8fff8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff80000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff0000;
++  __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0080000000000000;
++  __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0052005200520052;
++  *((unsigned long *)&__m128i_result[0]) = 0x0052005200520052;
++  __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fffd;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff000000ff;
++  __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00060012000e002b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000049ffffffaa;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000060000000e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000127fffffea;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000060000000e;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001201fe01e9;
++  __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5d7f5d807fea807f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x007f008000ea007f;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00009f0000009f00;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000bd3d00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000bd3d00000000;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001f;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff007f00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff007f00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000007f00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000007f00000000;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ef400ad21fc7081;
++  *((unsigned long *)&__m128i_op1[0]) = 0x28bf0351ec69b5f2;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ad00007081;
++  *((unsigned long *)&__m128i_result[0]) = 0x000003510000b5f2;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5252adadadadadad;
++  *((unsigned long *)&__m128i_op1[0]) = 0xadad52525252adad;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000adad0000adad;
++  *((unsigned long *)&__m128i_result[0]) = 0x000052520000adad;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff8000010f78;
++  *((unsigned long *)&__m128i_op1[1]) = 0x002a001a001a000b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001a0000000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7500000075007500;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00feff8000ff80ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007d1800007c99;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000f50000007500;
++  *((unsigned long *)&__m128i_result[0]) = 0x00007e1600007d98;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff760386bdae46;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc1fc7941bc7e00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0802080408060803;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff000086bd;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ca000000c481;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000007fff9;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff2356fe165486;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5efeb3165bd7653d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000235600005486;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000b31600006544;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9c83e21a22001818;
++  *((unsigned long *)&__m128i_op0[0]) = 0xdd3b8b02563b2d7b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ebd20000714f;
++  *((unsigned long *)&__m128i_result[0]) = 0x00012c8a0000a58a;
++  __m128i_out = __lsx_vhaddw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa8beed87bc3f2be1;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0024d8f6a494006a;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001a8beed86;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000010024d8f5;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000078c00000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6a57a30ff0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000f0000000;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000040d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffffe;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x80000000b57ec564;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000083ff0be0;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001b57ec563;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000183ff0bdf;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000e2e3ffffd1d3;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000008000e2e3;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200010002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000200010002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000010004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4ee85545ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x870968c1f56bb3cd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x870968c1f56bb3cd;
++  __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000013d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0010001000030000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0006000200000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0006000200000000;
++  __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe;
++  __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000001b0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000001b0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000001b001b;
++  __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000004870ba0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000004870ba0;
++  __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000010000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000010000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff8000010f800000;
++  __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007fff7fff8000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000003e2;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhaddw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c
+new file mode 100644
+index 000000000..3fade5157
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmadd.c
+@@ -0,0 +1,450 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffa486c90f;
++  *((unsigned long *)&__m128i_op2[0]) = 0x1f52d710bf295626;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff7f01ff01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x78c00000ff000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff7f01ff01;
++  __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfa31dfa21672e711;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1304db85e468073a;
++  *((unsigned long *)&__m128i_op2[1]) = 0x887c8beb969e00f2;
++  *((unsigned long *)&__m128i_op2[0]) = 0x101f8b680b6f8095;
++  *((unsigned long *)&__m128i_result[1]) = 0x7582ed22cb1c6e12;
++  *((unsigned long *)&__m128i_result[0]) = 0x35aaa61c944f34c2;
++  __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_result[0]) = 0x5252525252525252;
++  __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xc);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op2[1]) = 0xbfffbfffbfffbffe;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x4000400040004002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfe01fe01fe01fe01;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe01fe01fe01fe01;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfe01fe01fe01fe01;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe01fe01fe01fe01;
++  *((unsigned long *)&__m128i_op2[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op2[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0xf10cf508f904fd01;
++  *((unsigned long *)&__m128i_result[0]) = 0xf10cf508f904fd01;
++  __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffb080ffffb080;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffb080ffffb080;
++  *((unsigned long *)&__m128i_op2[1]) = 0x004fcfcfd01f9f9f;
++  *((unsigned long *)&__m128i_op2[0]) = 0x9f4fcfcfcf800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3504b5fd2dee1f80;
++  *((unsigned long *)&__m128i_result[0]) = 0x4676f70fc0000000;
++  __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf7f7f7ff8e8c6d7e;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf7f7f7f7f7f7fbff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xf7f7f7ff8e8c6d7e;
++  *((unsigned long *)&__m128i_result[0]) = 0xf7f7f7f7f7f7fbff;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0fbc1df53c1ae3f9;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff820f81;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xf144e32bc4e61d27;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000020017ef19f;
++  __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000004b01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00d3012acc56f9bb;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a0;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000004b01;
++  __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff0000ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffefffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xf001f0010101f002;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000fffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007f41;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000fffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010000000000001;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x01ff020000ff03ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x01346b8d00b04c5a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x002affd600000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcbc2723a4f12a5f8;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x01ff020000ff03ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x01346b8d00b04c5a;
++  __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000080808000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000080808000;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000455555555;
++  __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x007f00ff00ff00fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x7ffffffe00000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7ffffffe00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x007f00ff00ff00fe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xdcec560380000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x08ec7f7f80000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102;
++  *((unsigned long *)&__m128i_op2[1]) = 0x32d8f0a905b6c59b;
++  *((unsigned long *)&__m128i_op2[0]) = 0x322a52fc2ba83b96;
++  *((unsigned long *)&__m128i_result[1]) = 0xaa14efac3bb62636;
++  *((unsigned long *)&__m128i_result[0]) = 0xd6c22c8353a80d2c;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op2[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xff000000001f1f00;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00009c7c00007176;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf00040fbf;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf00000fbf;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x9727b8499727b849;
++  *((unsigned long *)&__m128i_op2[0]) = 0x12755900b653f081;
++  *((unsigned long *)&__m128i_result[1]) = 0x00060fbf00040fbf;
++  *((unsigned long *)&__m128i_result[0]) = 0x00020fbf00000fbf;
++  __m128i_out = __lsx_vmadd_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000021100000211;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfb141d31fb141d31;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op2[1]) = 0x2006454690d3de87;
++  *((unsigned long *)&__m128i_op2[0]) = 0x2006454690d3de87;
++  *((unsigned long *)&__m128i_result[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_result[0]) = 0xbbc8ecc5f3ced5f3;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0674c886fcba4e98;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003fffc0ffc0003f;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffc0ffc0003f003f;
++  *((unsigned long *)&__m128i_op2[1]) = 0x002a05a2f059094a;
++  *((unsigned long *)&__m128i_op2[0]) = 0x05ad3ba576eae048;
++  *((unsigned long *)&__m128i_result[1]) = 0xd4a6cc27d02397ce;
++  *((unsigned long *)&__m128i_result[0]) = 0x24b85f887e903abe;
++  __m128i_out = __lsx_vmadd_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0700f8ff0700f8ff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000007020701;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000007010701;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f8000008680f1ff;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636463abdf17;
++  *((unsigned long *)&__m128i_op0[0]) = 0x41f8e08016161198;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x6363636463abdf17;
++  *((unsigned long *)&__m128i_result[0]) = 0x41f8e08016161198;
++  __m128i_out = __lsx_vmadd_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff00ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x17c64aaef639f093;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xf6548a1747e59090;
++  *((unsigned long *)&__m128i_op2[0]) = 0x27b169bbb8145f50;
++  *((unsigned long *)&__m128i_result[1]) = 0x10f881a20ffd02b0;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ff800000;
++  __m128i_out = __lsx_vmadd_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c
+new file mode 100644
+index 000000000..d3fd83da7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-1.c
+@@ -0,0 +1,472 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000036de0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000003be14000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00000000ffff7a53;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000001f0000;
++  __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a;
++  __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000cdc1;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55;
++  *((unsigned long *)&__m128i_op2[1]) = 0x05d0ae6002e8748e;
++  *((unsigned long *)&__m128i_op2[0]) = 0xcd1de80217374041;
++  *((unsigned long *)&__m128i_result[1]) = 0xf490ee600180ce20;
++  *((unsigned long *)&__m128i_result[0]) = 0x063bff74fb46e356;
++  __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op2[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op2[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0fff0fff0fff0fff;
++  __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[0]) = 0x6363636363636363;
++  __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffe00029f9f6061;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3f5ec0a0feefa0b0;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffe00029fb060b1;
++  __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8d78336c83652b86;
++  *((unsigned long *)&__m128i_op1[0]) = 0x39c51f389c0d6112;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffff0001ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ff9b0082;
++  *((unsigned long *)&__m128i_result[0]) = 0x003a0037fff2fff8;
++  __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff0000857a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x05fafe0101fe000e;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ff0000857a;
++  *((unsigned long *)&__m128i_result[0]) = 0x05fafe0101fe000e;
++  __m128i_out = __lsx_vmaddwev_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff82bb9784;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc6bb97ac;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x7fffffff82bb9784;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7fffffffc6bb97ac;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff82bb9784;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffc6bb97ac;
++  __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x05d0ba0002e8802e;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd005e802174023d6;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc000c000c000ff81;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0ba00ba00ba00ba0;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0ba00ba00ba011eb;
++  *((unsigned long *)&__m128i_result[1]) = 0x05d0ae6002e8748e;
++  *((unsigned long *)&__m128i_result[0]) = 0xcd1de80217374041;
++  __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000010000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00fe00ff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00000fff00000e36;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000fef01000e27ca;
++  __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040;
++  __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x680485c8b304b019;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc89d7f0fed582019;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000003ddc5dac;
++  *((unsigned long *)&__m128i_op2[1]) = 0x67157b5100005000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x387c7e0a133f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0x680485c8b304b019;
++  *((unsigned long *)&__m128i_result[0]) = 0xc89d7f0ff90da019;
++  __m128i_out = __lsx_vmaddwev_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ff0000ff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x01fc020000fe0100;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100fe000100fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000100fe000100fe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa23214697fd03f7f;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7c7c9c0000007176;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00000000f3040705;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7c7c9c0000007176;
++  __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x022002101b200203;
++  *((unsigned long *)&__m128i_op0[0]) = 0x022002101b200203;
++  *((unsigned long *)&__m128i_op1[1]) = 0x022002101b200203;
++  *((unsigned long *)&__m128i_op1[0]) = 0x022002101b200203;
++  *((unsigned long *)&__m128i_op2[1]) = 0x000000080c43b700;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x036caeeca7592703;
++  *((unsigned long *)&__m128i_result[0]) = 0x022002101b200203;
++  __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x9c83e21a22001818;
++  *((unsigned long *)&__m128i_op1[0]) = 0xdd3b8b02563b2d7b;
++  *((unsigned long *)&__m128i_op2[1]) = 0x000000009c83e21a;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000022001818;
++  *((unsigned long *)&__m128i_result[1]) = 0xf2c97aaa7d8fa270;
++  *((unsigned long *)&__m128i_result[0]) = 0x0b73e427f7cfcb88;
++  __m128i_out = __lsx_vmaddwev_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0010001000030000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0006000200000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7505445465593af1;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0100d6effefd0498;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000030000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0006000200000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0007000000040000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000780000007800;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0007000000040000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003000000010000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf047ef0000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80800001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff80800001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff7fff7ef;
++  *((unsigned long *)&__m128i_op1[0]) = 0x80808080ffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000080800000808;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000080800000808;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffbff8888080a;
++  *((unsigned long *)&__m128i_result[0]) = 0x080803ff807ff7f9;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3fc03fc000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f801fe000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x3fc03fc000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f801fe000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x01fe01fd01fd01fd;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x5d7f5d007f6a007f;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7fff7fff7fff7f00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000060000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000500000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000060000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xf8f8f8f8f8f8f8f8;
++  *((unsigned long *)&__m128i_op2[0]) = 0xf8f8f8f8f8f8f8f8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c
+new file mode 100644
+index 000000000..839285685
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-2.c
+@@ -0,0 +1,383 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff00000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7fffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7e00fe0000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffbffffff85;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffc0000fdfc;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3941248880000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3941248880000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x40f3fa0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x76f4248880000000;
++  __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000e36400005253;
++  *((unsigned long *)&__m128i_op2[0]) = 0x000035ed0000e000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0080008000800080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x400000003fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4000000040000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x400000003fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x4000000040000000;
++  __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf1f1f1f149ed7273;
++  *((unsigned long *)&__m128i_op0[0]) = 0x78508ad4ec2ffcde;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffdfdc0d;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00000000ffdfdc0d;
++  *((unsigned long *)&__m128i_result[1]) = 0xf1f1f1f149ed7273;
++  *((unsigned long *)&__m128i_result[0]) = 0x78508ad4ae70fd87;
++  __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000440efffff000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000003b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x440ef000440ef000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x4400000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000440efffff000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000003b;
++  __m128i_out = __lsx_vmaddwev_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000ffc2f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00201df000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffc2ffe700000007;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00010020fffeffde;
++  *((unsigned long *)&__m128i_op2[0]) = 0x011f57c100201a46;
++  *((unsigned long *)&__m128i_result[1]) = 0x001ffce00016fb41;
++  *((unsigned long *)&__m128i_result[0]) = 0x57cb857100001a46;
++  __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7c7c9c0000007176;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00ff000000001f1f;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7c7c9c0000007176;
++  __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbfd10d0d7b6b6b73;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc5c53492f25acbf2;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff000000001f1f00;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xbfd10d0d7b6b6b73;
++  *((unsigned long *)&__m128i_result[0]) = 0xc5c53492f25acbf2;
++  __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x34947b4b11684f92;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd73691661e5b68b4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000016f303dff6d2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000016f303dff6d2;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7fffffff00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x34947b4b11684f92;
++  *((unsigned long *)&__m128i_result[0]) = 0xee297a731e5c5f86;
++  __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0177fff0fffffff0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff8bc;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00bbfff7fffffff7;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffff008ff820;
++  *((unsigned long *)&__m128i_result[1]) = 0xffe8008fffe7008f;
++  *((unsigned long *)&__m128i_result[0]) = 0x00010001f1153780;
++  __m128i_out = __lsx_vmaddwev_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000021;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op2[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op2[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001808281820102;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001808201018081;
++  __m128i_out = __lsx_vmaddwev_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000010100fe0101;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffff0200ffff01ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000017fda829;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x007f7f80807f7f80;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000020302030;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000020302030;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x3fffffffc0000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000006e17bfd8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000006e17bfd8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffff0100000001;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffff0100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000006e17bfd8;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000006e17bfd8;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op2[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op2[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x00010000fffffffc;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000008000e2e3;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000008000e2e3;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000080806362;
++  *((unsigned long *)&__m128i_result[0]) = 0x807f808000000000;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8101010181010101;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8101010181010101;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000101010015;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffed00010001;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c
+new file mode 100644
+index 000000000..bab2c6cf3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwev-3.c
+@@ -0,0 +1,383 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xc0c00000c0c00000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xc0c00c01c2cd0009;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f78787f00f7f700;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000f7f700f7f700;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000400;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000400;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000400;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000400;
++  __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x000000000000040d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000080003f80ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op2[1]) = 0x3ff0010000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x3ff0010000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000080003f80ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xff81ff82ff810081;
++  *((unsigned long *)&__m128i_op2[0]) = 0xff82ff810081ff81;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x841f000fc28f801f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x841f000fc28f801f;
++  *((unsigned long *)&__m128i_op2[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xe593c8c4e593c8c4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x76ecfc8b85ac78db;
++  __m128i_out = __lsx_vmaddwev_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fff3;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x000000000000040d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000010400;
++  __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0;
++  __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000002b0995850;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80005613;
++  *((unsigned long *)&__m128i_op1[0]) = 0x007f800000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffff80005613;
++  *((unsigned long *)&__m128i_op2[0]) = 0x007f800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff00011cf0c569;
++  *((unsigned long *)&__m128i_result[0]) = 0xc0000002b0995850;
++  __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ffffff81fe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffff00ffff7e01;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x000000fffe01fd02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00fe00fffe86f901;
++  __m128i_out = __lsx_vmaddwev_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000004000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff8004000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbfffbfffbfffbffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xbfffbfffbfffbffe;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xbfffbfffbfffbffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffd3000000130000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffd3000000130000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffd3000000130000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffd3000000130000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffd3000000130000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffd3000000130000;
++  __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000f02e1f80f04;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000f02e1f80f04;
++  __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x5a57bacbd7e39680;
++  *((unsigned long *)&__m128i_op2[0]) = 0x6bae051ffed76001;
++  *((unsigned long *)&__m128i_result[1]) = 0xf3eb458161080000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffe9454286c0e000;
++  __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0051005200510052;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0051005200510052;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffaeffaeffaeffae;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffaeffaeffaeffae;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffe65ecc1be5bc;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe65ecc1be5bc;
++  __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf1f1f1f149ed7273;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf1f1f1f1865e65a1;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff941d;
++  *((unsigned long *)&__m128i_op2[1]) = 0xf1f1f1f149ed7273;
++  *((unsigned long *)&__m128i_op2[0]) = 0xf1f1f1f1865e65a1;
++  *((unsigned long *)&__m128i_result[1]) = 0xf1f1f1f149ed7273;
++  *((unsigned long *)&__m128i_result[0]) = 0x78508ad4ec2ffcde;
++  __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x000000120000000d;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000cfffffff2;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000dfffffff1;
++  __m128i_out = __lsx_vmaddwev_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000001ff000001ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000001ff000001ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000001ff000001ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000001ff000001ff;
++  *((unsigned long *)&__m128i_op2[1]) = 0xff80ffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7ffffffeffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000002fe800000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7ffffe0100000000;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe80000000000001;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c
+new file mode 100644
+index 000000000..5875aa597
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-1.c
+@@ -0,0 +1,372 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000008000000080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000c7fff000c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xfffff000f0008d3c;
++  *((unsigned long *)&__m128i_op2[0]) = 0xfffff0016fff8d3d;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000100f8100002;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff0ff8006f0f950;
++  __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x007ffd0001400840;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x007ffd0001400840;
++  __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000002000;
++  __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010058;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010058;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffac0a000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000200000001b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffac0a000000;
++  __m128i_out = __lsx_vmaddwod_h_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000017fda829;
++  __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff8000000000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000800000000ffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x697eba2bedfa9c82;
++  *((unsigned long *)&__m128i_op2[0]) = 0xd705c77a7025c899;
++  *((unsigned long *)&__m128i_result[1]) = 0xffcb410000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffeb827ffffffff;
++  __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffffc00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fffffc00;
++  __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000c5ac01015b;
++  *((unsigned long *)&__m128i_op1[0]) = 0xaaacac88a3a9a96a;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f;
++  __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ef4002d21fc7001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x28bf02d1ec6a35b2;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffb96bffff57c9;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff6080ffff4417;
++  *((unsigned long *)&__m128i_op2[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xff8000007fc00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7ef400ad21fc7081;
++  *((unsigned long *)&__m128i_result[0]) = 0x28bf0351ec69b5f2;
++  __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001200100012001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbf8000000000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcf00000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xbf80000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xcf00000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1040400000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0961000100000001;
++  __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7da9b23a624082fd;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x03574e39e496cbc9;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001010000;
++  __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xfffffffff8f8dada;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffff01018888;
++  *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3f77aab500000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001;
++  *((unsigned long *)&__m128i_op2[1]) = 0x3f77aab500000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000ffc100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0fbc1df53c1ae3f9;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ff820f81;
++  __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000020000020;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000020000020;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00000000ff801c9e;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000810000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000700000004e000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003000000012020;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00000000e00a18f5;
++  *((unsigned long *)&__m128i_op2[0]) = 0x000000002023dcdc;
++  *((unsigned long *)&__m128i_result[1]) = 0x000700000004e000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003000000012020;
++  __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000120000000d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000011ffee;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000dfff2;
++  __m128i_out = __lsx_vmaddwod_d_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffff7fffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffff8000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffff7fffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffff8000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000003fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7ff8010000000001;
++  __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ff0000ff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x01fc020000fe0100;
++  __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x78c00000ff000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x78c00000ff000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x78c00000ff000000;
++  __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x1000100012030e02;
++  *((unsigned long *)&__m128i_result[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe;
++  __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000080800000808;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000080800000808;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x010105017878f8f6;
++  *((unsigned long *)&__m128i_op2[0]) = 0xf8f8fd0180810907;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000080800000808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000080800000808;
++  __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010058;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000158;
++  __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x328e1080889415a0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3960b1a401811060;
++  *((unsigned long *)&__m128i_op1[1]) = 0x328e1080889415a0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3960b1a401811060;
++  *((unsigned long *)&__m128i_op2[1]) = 0x020310edc003023d;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x32f3c7a38f9f4b8b;
++  *((unsigned long *)&__m128i_result[0]) = 0x2c9e5069f5d57780;
++  __m128i_out = __lsx_vmaddwod_q_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c
+new file mode 100644
+index 000000000..4be7fce82
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-2.c
+@@ -0,0 +1,438 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd8248069ffe78077;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0d0d0d0d0d0d0d0d;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xd8248069ffe78077;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xe31c86e90cda86f7;
++  __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00010020fffeffde;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100400100200e68;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00010020fffeffde;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0100400100200e68;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x1ff85ffe2ae5d973;
++  *((unsigned long *)&__m128i_result[1]) = 0x00010020fffeffde;
++  *((unsigned long *)&__m128i_result[0]) = 0x011f57c100201a46;
++  __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbafebb00ffd500fe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc0800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffc0800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffc0800000;
++  __m128i_out = __lsx_vmaddwod_h_bu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0020002000200020;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0020002000200020;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000003a24;
++  *((unsigned long *)&__m128i_result[0]) = 0x003dc288077c7cc1;
++  __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffc0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffc0000000000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000455555555;
++  *((unsigned long *)&__m128i_result[1]) = 0xffc0000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffc0000000000004;
++  __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000053a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000036280001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x42a0000042a02001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000005555555554;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000005555555554;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000036280001;
++  *((unsigned long *)&__m128i_result[0]) = 0x42a0000042a02001;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000ff00fe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00ff;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff946c;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff946b;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff3c992b2e;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff730f;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffff946c;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffff946b;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff946c;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffdffff946c;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000401000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffff800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffff800;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0006ffff0004ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002ffff0000ffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffff7f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002fffefffd0001;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffe000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c6fde000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xe000e0006080b040;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffe000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000c6fde000;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x021b7d24c9678a35;
++  *((unsigned long *)&__m128i_op1[0]) = 0x030298a6a1030a49;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000a752a55;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0a753500950fa306;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x000000000a752a55;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0a753500950fa306;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000a752a55;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a753500a9fa0d06;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_result[0]) = 0x4f4f4f4f4f4f4f4f;
++  __m128i_out = __lsx_vmaddwod_d_wu (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000017fda829;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x007f7f80807f7f80;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000020302030;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000020302030;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x3fffffffc0000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000006e17bfd8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000006e17bfd8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffff0100000001;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffff0100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000006e17bfd8;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000006e17bfd8;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op2[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op2[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x00010000fffffffc;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000008000e2e3;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000008000e2e3;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000080806362;
++  *((unsigned long *)&__m128i_result[0]) = 0x807f808000000000;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8101010181010101;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8101010181010101;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000101010015;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffed00010001;
++  __m128i_out = __lsx_vmaddwev_q_du (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c
+new file mode 100644
+index 000000000..8a4c39502
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaddwod-3.c
+@@ -0,0 +1,460 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00003fe00ffe3fe0;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000b5207f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000b5207f80;
++  __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xfffff208fffffa02;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffe218ffffea10;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02;
++  __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000bd3d00000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000bd3d00000000;
++  __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_op2[0]) = 0x2020202020207f7f;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000;
++  __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00d3012acc56f9bb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000120002000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_result[1]) = 0x00d3012acc56f9bb;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001021;
++  __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0002000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014;
++  __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op2[1]) = 0x4399d3221a29d3f2;
++  *((unsigned long *)&__m128i_op2[0]) = 0xc3818bffe7b7a7b8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000467fe000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000003ff8;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000003ff8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000467fef81;
++  __m128i_out = __lsx_vmaddwod_h_bu_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fc0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1e801ffc00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ff020000fff4;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fc0000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1e801ffc00000000;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3c600000ff800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe80000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe80000000000001;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff03ffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00013fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000088500000f6a0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001fffd00000407;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000442900007b4c;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000e22b0000efa4;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffffff03ffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00013fff;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320176a4d2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x685670d37e80682a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x14ccc6320176a4d2;
++  *((unsigned long *)&__m128i_result[0]) = 0x685670d37e80682a;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc0411fe800000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x601fbfbeffffffff;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffc105d1aa;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffbc19ecca;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff3efa;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff43e6;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffa7;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00c2758000bccf42;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00a975be00accf03;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00c2758000bccf42;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00a975be00accf03;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000930400008a10;
++  *((unsigned long *)&__m128i_result[0]) = 0x00006f9100007337;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_w_hu_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op2[0]) = 0x001000100010c410;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff02ff1bff02ff23;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffff02fff4;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff02ff1bff02ff23;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffff02fff4;
++  *((unsigned long *)&__m128i_op2[1]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x1e801ffc7fc00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7e44bde9b842ff23;
++  *((unsigned long *)&__m128i_result[0]) = 0x00011e80007edff8;
++  __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffeffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xe0d56a9774f3ea31;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe0dd268932a5edf9;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe0d56a9774f3ea31;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe0dd268932a5edf9;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xd8248069ffe78077;
++  *((unsigned long *)&__m128i_result[1]) = 0xe0d56a9774f3ea31;
++  *((unsigned long *)&__m128i_result[0]) = 0xbddaa86803e33c2a;
++  __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0028280000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0028280000282800;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x7505853d654185f5;
++  *((unsigned long *)&__m128i_op2[0]) = 0x01010000fefe0101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0028280000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x012927ffff272800;
++  __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ffff7f00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff007f0101017f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000020000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000183fffffe5;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000073;
++  *((unsigned long *)&__m128i_op2[0]) = 0x000000000000002a;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffff7f00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff007f0101017f;
++  __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461;
++  *((unsigned long *)&__m128i_op2[1]) = 0x3f8000003f800001;
++  *((unsigned long *)&__m128i_op2[0]) = 0x3f8000003f800001;
++  *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3f8000003f800000;
++  __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000095896a760000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x006f0efe258ca851;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffff7fc8ffff8000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffff200000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000015516a768038;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff9ed2e1c000;
++  __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000120002000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2000200000013fa0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000013fa0;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000120002000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0;
++  __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0080008000800080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0080006b00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001b19b1c9c6da5a;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x001b19b1c9c6da5a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0080008000800080;
++  *((unsigned long *)&__m128i_result[0]) = 0x008003496dea0c61;
++  __m128i_out = __lsx_vmaddwod_d_wu_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000001ff000001ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000001ff000001ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000001ff000001ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000001ff000001ff;
++  *((unsigned long *)&__m128i_op2[1]) = 0xff80ffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7ffffffeffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000002fe800000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7ffffe0100000000;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe80000000000001;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaddwev_q_du_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-addition-vsadd-ins.patch b/LoongArch-Add-tests-for-SX-vector-addition-vsadd-ins.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e7e02a17dce65fa467c36cf07645e10246524c51
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-addition-vsadd-ins.patch
@@ -0,0 +1,715 @@
+From 243656b5b87a3125c2a885d11f022a79cca98b39 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 10:07:24 +0800
+Subject: [PATCH 082/124] LoongArch: Add tests for SX vector addition vsadd
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vsadd-1.c        | 335 +++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vsadd-2.c        | 345 ++++++++++++++++++
+ 2 files changed, 680 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c
+new file mode 100644
+index 000000000..1bc27c983
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-1.c
+@@ -0,0 +1,335 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe;
++  __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff3c992b2e;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff730f;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff3c992b2e;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff730f;
++  __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461;
++  *((unsigned long *)&__m128i_result[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000002bfd9461;
++  __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00d3012acc56f9bb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001021;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00d3012acc56f9bb;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001021;
++  __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000;
++  __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x80808080806b000b;
++  __m128i_out = __lsx_vsadd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3c600000ff800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x3c5fffffff7fffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffefffeff00feff;
++  __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x40f3fa0000000000;
++  __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000008a0000008a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000008900000009;
++  *((unsigned long *)&__m128i_op1[1]) = 0x63637687636316bb;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x6363771163631745;
++  *((unsigned long *)&__m128i_result[0]) = 0x636363ec6363636c;
++  __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004;
++  __m128i_out = __lsx_vsadd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000fefefe68;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001ffff0003ffff0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000fffefffefffef;
++  *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0;
++  *((unsigned long *)&__m128i_result[0]) = 0x028c026bfff027af;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0007000000040000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0007000000040000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003000000010000;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3fffff0000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3fffff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f7fff003f800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f7fff003f800000;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000820202020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00fe01fc0005fff4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000003a24;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003dbe88077c78c1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000820205a44;
++  *((unsigned long *)&__m128i_result[0]) = 0x013bc084078278b5;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000140001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000140001;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x67eb85b0b2ebb001;
++  *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000014eb54ab;
++  *((unsigned long *)&__m128i_op1[0]) = 0x14eb6a002a406a00;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff14eb54ab;
++  *((unsigned long *)&__m128i_result[0]) = 0x14ea6a002a406a00;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xce9035c49ffff570;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[0]) = 0xce9035c49ffff574;
++  __m128i_out = __lsx_vsadd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000400;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000040d;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000100;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000100;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001000000ff;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000300000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000002fffffffb;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000010000fffb;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vadd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c
+new file mode 100644
+index 000000000..67d189991
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsadd-2.c
+@@ -0,0 +1,345 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x10f917d72d3d01e4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x203e16d116de012b;
++  *((unsigned long *)&__m128i_result[1]) = 0x10f917d72d3d01e4;
++  *((unsigned long *)&__m128i_result[0]) = 0x203e16d116de012b;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffebd06fffe820c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff7ffe7fff3506;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffebd06fffe820c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7ffe7fff3506;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff0cffffff18;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefffefffeff6a0c;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffff60ca7104649;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff790a15db63d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffff60ca710464a;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffff790a15db63e;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff46;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00fe000100cf005f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5f675e96e29a5a60;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x5fff5e97e2ff5abf;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefffefffefffeff;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001000100010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010058;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001001100110068;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102;
++  *((unsigned long *)&__m128i_result[1]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfeffffffffffffff;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ebd20000714f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00012c8a0000a58a;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffb81a6f70;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000d48eaa1a2;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffb81ae0bf;
++  *((unsigned long *)&__m128i_result[0]) = 0x00012c9748eaffff;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0177fff0fffffff0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff8bc;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsadd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200;
++  __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000d0000000d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8006000000040000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8002000000000007;
++  *((unsigned long *)&__m128i_result[1]) = 0x8006000000040000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8002000d00000014;
++  __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014;
++  __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000600007fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000008ffffa209;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000600007fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000008ffffa209;
++  __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x636363633f3e47c1;
++  *((unsigned long *)&__m128i_op0[0]) = 0x41f8e080f1ef4eaa;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000807bf0a1f80;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000800ecedee68;
++  *((unsigned long *)&__m128i_result[1]) = 0x63636b6afe486741;
++  *((unsigned long *)&__m128i_result[0]) = 0x41f8e880ffffffff;
++  __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ebd20000714f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00012c8a0000a58a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ebd20000714f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00012c8a0000a58a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000e29e;
++  *((unsigned long *)&__m128i_result[0]) = 0x000259140000ffff;
++  __m128i_out = __lsx_vsadd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffeffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffeffffffff;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11;
++  *((unsigned long *)&__m128i_op0[0]) = 0x342caf9be55700b5;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00040003ff83ff84;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00040003ff4dffca;
++  *((unsigned long *)&__m128i_result[1]) = 0x0c07e181ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x3430af9effffffff;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffa8ff9f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffabff99;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000100000002007d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000000020001;
++  *((unsigned long *)&__m128i_result[1]) = 0x00010000ffab001c;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001ffffffadff9a;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0800080008000800;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0800080008000800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0800080008000800;
++  *((unsigned long *)&__m128i_result[0]) = 0x0800080008000800;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x76f424887fffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc110000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc00d060000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xc110000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff7fffffff;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfbfbfb17fbfb38ea;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfbfb47fbfbfb0404;
++  *((unsigned long *)&__m128i_result[1]) = 0xfbfbfb17fbfb3919;
++  *((unsigned long *)&__m128i_result[0]) = 0xfbfb47fbfbfb042d;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808081;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x80808080ffffffff;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00123fff00120012;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0012001200120012;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000005003a;
++  *((unsigned long *)&__m128i_result[1]) = 0x00123fff00120012;
++  *((unsigned long *)&__m128i_result[0]) = 0x001200120017004c;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed;
++  *((unsigned long *)&__m128i_result[1]) = 0xbfd10d0d7b6b6b73;
++  *((unsigned long *)&__m128i_result[0]) = 0xc5c534920000c4ed;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000aa822a79308f6;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000aa822a79308f6;
++  *((unsigned long *)&__m128i_op1[0]) = 0x03aa558e1d37b5a1;
++  *((unsigned long *)&__m128i_result[1]) = 0x00155044ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x03aa558e2584c86f;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x021b7d24c9678a35;
++  *((unsigned long *)&__m128i_op0[0]) = 0x030298a6a1030a49;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x021b7d24c9678a35;
++  *((unsigned long *)&__m128i_result[0]) = 0x030298a6a1030a49;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00007a8000000480;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000485000004cc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00007a8000000480;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000485000004cc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000f50000000900;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000090a00000998;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x004eff6200d2ff76;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff70002800be00a0;
++  *((unsigned long *)&__m128i_result[1]) = 0x004eff6200d2ff76;
++  *((unsigned long *)&__m128i_result[0]) = 0xff70002800be00a0;
++  __m128i_out = __lsx_vsadd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-floating-point-ari.patch b/LoongArch-Add-tests-for-SX-vector-floating-point-ari.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5c4d6f361e5a592bacd982f9d0a51ca659aa188c
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-floating-point-ari.patch
@@ -0,0 +1,2928 @@
+From 4ccb21b6d2d23046c6a71c4540a1eb288609f041 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 11:25:20 +0800
+Subject: [PATCH 093/124] LoongArch: Add tests for SX vector floating point
+ arithmetic instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vfadd_d.c        | 407 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vfadd_s.c        | 470 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vfclass_d.c      |  83 ++++
+ .../loongarch/vector/lsx/lsx-vfclass_s.c      |  74 +++
+ .../loongarch/vector/lsx/lsx-vflogb_d.c       |  76 +++
+ .../loongarch/vector/lsx/lsx-vflogb_s.c       | 185 +++++++
+ .../loongarch/vector/lsx/lsx-vfmax_d.c        | 200 ++++++++
+ .../loongarch/vector/lsx/lsx-vfmax_s.c        | 335 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vfmaxa_d.c       | 155 ++++++
+ .../loongarch/vector/lsx/lsx-vfmaxa_s.c       | 230 +++++++++
+ .../loongarch/vector/lsx/lsx-vfsqrt_d.c       | 216 ++++++++
+ .../loongarch/vector/lsx/lsx-vfsqrt_s.c       | 372 ++++++++++++++
+ 12 files changed, 2803 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c
+new file mode 100644
+index 000000000..7ffbd385e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_d.c
+@@ -0,0 +1,407 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000fea8ff44;
++  *((unsigned long *)&__m128d_op1[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128d_op1[0]) = 0x2020202020202020;
++  *((unsigned long *)&__m128d_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128d_result[0]) = 0x2020202020202020;
++  __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128d_result[0]) = 0x1000100010001000;
++  __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000000000000f;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x000000000000000f;
++  __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000010100fe0101;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffff0200ffff01ff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x0001010100fe0100;
++  *((unsigned long *)&__m128d_result[0]) = 0xffff0200ffff01ff;
++  __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7fff0101ffffe000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7fffffffa0204000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x7f370101ff04ffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7f3bffffa0226021;
++  *((unsigned long *)&__m128d_result[1]) = 0x7fff0101ffffe000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7fffffffa0204000;
++  __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000ebd20000714f;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00012c8a0000a58a;
++  *((unsigned long *)&__m128d_op1[1]) = 0xf654ad7447e59090;
++  *((unsigned long *)&__m128d_op1[0]) = 0x27b1b106b8145f50;
++  *((unsigned long *)&__m128d_result[1]) = 0xf654ad7447e59090;
++  *((unsigned long *)&__m128d_result[0]) = 0x27b1b106b8145f50;
++  __m128d_out = __lsx_vfadd_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000001300000013;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000100000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x1000100000001000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000100000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x1000100000001000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000007000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x01533b5e7489ae24;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffab7e71e33848;
++  *((unsigned long *)&__m128d_op1[1]) = 0x01533b5e7489ae24;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffab7e71e33848;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffab7e71e33848;
++  __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmul_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128d_result[1]) = 0x800000ff000000ff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000000fff8fff8;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000fff80000;
++  *((unsigned long *)&__m128d_result[1]) = 0x80000000fff8fff8;
++  *((unsigned long *)&__m128d_result[0]) = 0x80000000fff80000;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128d_op1[1]) = 0xb55ccf30f52a6a68;
++  *((unsigned long *)&__m128d_op1[0]) = 0x4e0018eceb82c53a;
++  *((unsigned long *)&__m128d_result[1]) = 0x355ccf30f52a6a68;
++  *((unsigned long *)&__m128d_result[0]) = 0xce0018eceb82c53a;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffff00006c82;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00009b140000917b;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffff00006c82;
++  *((unsigned long *)&__m128d_result[0]) = 0x00009b140000917b;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000100000020;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000083b00000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xe93d0bd19ff0c170;
++  *((unsigned long *)&__m128d_op0[0]) = 0x5237c1bac9eadf55;
++  *((unsigned long *)&__m128d_op1[1]) = 0xe6d4572c8a5835bc;
++  *((unsigned long *)&__m128d_op1[0]) = 0xe5017c2ac9ca9fd0;
++  *((unsigned long *)&__m128d_result[1]) = 0xe93d0bd19ff07013;
++  *((unsigned long *)&__m128d_result[0]) = 0x65017c2ac9ca9fd0;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xe93d0bd19ff07013;
++  *((unsigned long *)&__m128d_op0[0]) = 0x65017c2ac9ca9fd0;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00008bf700017052;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000f841000091aa;
++  *((unsigned long *)&__m128d_result[1]) = 0xe93d0bd19ff07013;
++  *((unsigned long *)&__m128d_result[0]) = 0x65017c2ac9ca9fd0;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000004000000002;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x5555410154551515;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0004455501500540;
++  *((unsigned long *)&__m128d_result[1]) = 0xd555410154551515;
++  *((unsigned long *)&__m128d_result[0]) = 0x8004455501500540;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000300037ff000ff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0003000300a10003;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000000007ff000ff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0003000300000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0003000300a10003;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x56a09e662ab46b31;
++  *((unsigned long *)&__m128d_op1[0]) = 0xb4b8122ef4054bb3;
++  *((unsigned long *)&__m128d_result[1]) = 0xd6a09e662ab46b31;
++  *((unsigned long *)&__m128d_result[0]) = 0x34b8122ef4054bb3;
++  __m128d_out = __lsx_vfsub_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7f4000007f040000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7f0200007f020000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfffffffff8f8dada;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffff01018888;
++  *((unsigned long *)&__m128d_result[1]) = 0xfffffffff8f8dada;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffff01018888;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000100007f01;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffefefffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0400000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffefefffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000000ff801c9e;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000810000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x40eff02383e383e4;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000cd630000cd63;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xffff00000000ffff;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000aa822a79308f6;
++  *((unsigned long *)&__m128d_op1[0]) = 0x03aa558e1d37b5a1;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfffefffe011df03e;
++  *((unsigned long *)&__m128d_result[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128d_result[0]) = 0xfffffffefffffffe;
++  __m128d_out = __lsx_vfdiv_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c
+new file mode 100644
+index 000000000..388430278
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfadd_s.c
+@@ -0,0 +1,470 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x05050505;
++  *((int *)&__m128_op0[2]) = 0x05050505;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x05050000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x03574e38;
++  *((int *)&__m128_op1[0]) = 0xe496cbc9;
++  *((int *)&__m128_result[3]) = 0x05050505;
++  *((int *)&__m128_result[2]) = 0x05050505;
++  *((int *)&__m128_result[1]) = 0x03574e38;
++  *((int *)&__m128_result[0]) = 0xe496cbc9;
++  __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x0000000f;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00077f88;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00077f97;
++  __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x000000ff;
++  *((int *)&__m128_op0[0]) = 0x000000ff;
++  *((int *)&__m128_op1[3]) = 0x370bdfec;
++  *((int *)&__m128_op1[2]) = 0xffecffec;
++  *((int *)&__m128_op1[1]) = 0x370bdfec;
++  *((int *)&__m128_op1[0]) = 0xffecffec;
++  *((int *)&__m128_result[3]) = 0x370bdfec;
++  *((int *)&__m128_result[2]) = 0xffecffec;
++  *((int *)&__m128_result[1]) = 0x370bdfec;
++  *((int *)&__m128_result[0]) = 0xffecffec;
++  __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x0000ff00;
++  *((int *)&__m128_op1[0]) = 0x00ff0000;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xffffffff;
++  __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffff0000;
++  *((int *)&__m128_op0[2]) = 0xffff0000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x40088040;
++  *((int *)&__m128_op1[2]) = 0x80040110;
++  *((int *)&__m128_op1[1]) = 0x40408010;
++  *((int *)&__m128_op1[0]) = 0x80200110;
++  *((int *)&__m128_result[3]) = 0xffff0000;
++  *((int *)&__m128_result[2]) = 0xffff0000;
++  *((int *)&__m128_result[1]) = 0x40408010;
++  *((int *)&__m128_result[0]) = 0x80200110;
++  __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xfffffffc;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xfffffffc;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xfffffffc;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xfffffffc;
++  __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x0000001b;
++  *((int *)&__m128_op0[2]) = 0x0000001b;
++  *((int *)&__m128_op0[1]) = 0x0000001b;
++  *((int *)&__m128_op0[0]) = 0x0000001b;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x0000001b;
++  *((int *)&__m128_result[2]) = 0x0000001b;
++  *((int *)&__m128_result[1]) = 0x0000001b;
++  *((int *)&__m128_result[0]) = 0x0000001b;
++  __m128_out = __lsx_vfadd_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x56411278;
++  *((int *)&__m128_op0[2]) = 0x43c0d41e;
++  *((int *)&__m128_op0[1]) = 0x0124d8f6;
++  *((int *)&__m128_op0[0]) = 0xa494006b;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x05010501;
++  *((int *)&__m128_op1[2]) = 0x05010501;
++  *((int *)&__m128_op1[1]) = 0x05010501;
++  *((int *)&__m128_op1[0]) = 0x0501050c;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x21f32eaf;
++  *((int *)&__m128_op0[2]) = 0x5b7a02c8;
++  *((int *)&__m128_op0[1]) = 0x407c2ca3;
++  *((int *)&__m128_op0[0]) = 0x2cbd0357;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00010400;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xfffe0001;
++  *((int *)&__m128_op0[2]) = 0xfffe0001;
++  *((int *)&__m128_op0[1]) = 0xfffe0001;
++  *((int *)&__m128_op0[0]) = 0xfffe0001;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xfffe0001;
++  *((int *)&__m128_result[2]) = 0xfffe0001;
++  *((int *)&__m128_result[1]) = 0xfffe0001;
++  *((int *)&__m128_result[0]) = 0xfffe0001;
++  __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00002ebf;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xffffffff;
++  __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x01000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00081f1f;
++  *((int *)&__m128_op0[2]) = 0x1f1f1f1f;
++  *((int *)&__m128_op0[1]) = 0x1f1f1f1f;
++  *((int *)&__m128_op0[0]) = 0x1f1f1f1f;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x021b7d24;
++  *((int *)&__m128_op0[2]) = 0x49678a35;
++  *((int *)&__m128_op0[1]) = 0x030298a6;
++  *((int *)&__m128_op0[0]) = 0x21030a49;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000002;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xf6548a17;
++  *((int *)&__m128_op0[2]) = 0x47e59090;
++  *((int *)&__m128_op0[1]) = 0x27b169bb;
++  *((int *)&__m128_op0[0]) = 0xb8145f50;
++  *((int *)&__m128_op1[3]) = 0x004eff62;
++  *((int *)&__m128_op1[2]) = 0x00d2ff76;
++  *((int *)&__m128_op1[1]) = 0xff700028;
++  *((int *)&__m128_op1[0]) = 0x00be00a0;
++  *((int *)&__m128_result[3]) = 0xb7032c34;
++  *((int *)&__m128_result[2]) = 0x093d35ab;
++  *((int *)&__m128_result[1]) = 0xe7a6533b;
++  *((int *)&__m128_result[0]) = 0x800001b8;
++  __m128_out = __lsx_vfmul_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x7fff0101;
++  *((int *)&__m128_op0[2]) = 0x81010102;
++  *((int *)&__m128_op0[1]) = 0x7fffffff;
++  *((int *)&__m128_op0[0]) = 0x81010102;
++  *((int *)&__m128_op1[3]) = 0x00000fff;
++  *((int *)&__m128_op1[2]) = 0xffffe000;
++  *((int *)&__m128_op1[1]) = 0x00001020;
++  *((int *)&__m128_op1[0]) = 0x20204000;
++  *((int *)&__m128_result[3]) = 0x7fff0101;
++  *((int *)&__m128_result[2]) = 0xffffe000;
++  *((int *)&__m128_result[1]) = 0x7fffffff;
++  *((int *)&__m128_result[0]) = 0xa0204000;
++  __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000fff;
++  *((int *)&__m128_op1[2]) = 0xffffe000;
++  *((int *)&__m128_op1[1]) = 0x00001020;
++  *((int *)&__m128_op1[0]) = 0x20204000;
++  *((int *)&__m128_result[3]) = 0x80000fff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0x80001020;
++  *((int *)&__m128_result[0]) = 0xffffffff;
++  __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfsub_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x54feed87;
++  *((int *)&__m128_op0[2]) = 0xbc3f2be1;
++  *((int *)&__m128_op0[1]) = 0x8064d8f6;
++  *((int *)&__m128_op0[0]) = 0xa494afcb;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0xff800000;
++  *((int *)&__m128_result[1]) = 0xff800000;
++  *((int *)&__m128_result[0]) = 0xff800000;
++  __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xd8248069;
++  *((int *)&__m128_op0[0]) = 0x7f678077;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xd8248069;
++  *((int *)&__m128_op1[0]) = 0x7f678077;
++  *((int *)&__m128_result[3]) = 0x7fc00000;
++  *((int *)&__m128_result[2]) = 0x7fc00000;
++  *((int *)&__m128_result[1]) = 0x3f800000;
++  *((int *)&__m128_result[0]) = 0x3f800000;
++  __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7fc00000;
++  *((int *)&__m128_result[2]) = 0x7fc00000;
++  *((int *)&__m128_result[1]) = 0x7fc00000;
++  *((int *)&__m128_result[0]) = 0x7fc00000;
++  __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00070000;
++  *((int *)&__m128_op0[2]) = 0x00040000;
++  *((int *)&__m128_op0[1]) = 0x00030000;
++  *((int *)&__m128_op0[0]) = 0x00010000;
++  *((int *)&__m128_op1[3]) = 0x00070000;
++  *((int *)&__m128_op1[2]) = 0x00040000;
++  *((int *)&__m128_op1[1]) = 0x00030000;
++  *((int *)&__m128_op1[0]) = 0x00010000;
++  *((int *)&__m128_result[3]) = 0x3f800000;
++  *((int *)&__m128_result[2]) = 0x3f800000;
++  *((int *)&__m128_result[1]) = 0x3f800000;
++  *((int *)&__m128_result[0]) = 0x3f800000;
++  __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00010001;
++  *((int *)&__m128_op1[2]) = 0x0001007c;
++  *((int *)&__m128_op1[1]) = 0x00010001;
++  *((int *)&__m128_op1[0]) = 0x00010001;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00001fff;
++  *((int *)&__m128_op0[2]) = 0x00001fff;
++  *((int *)&__m128_op0[1]) = 0x00000003;
++  *((int *)&__m128_op0[0]) = 0xfffffffc;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0xfffffffc;
++  __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7fc00000;
++  *((int *)&__m128_result[2]) = 0x7fc00000;
++  *((int *)&__m128_result[1]) = 0x7fc00000;
++  *((int *)&__m128_result[0]) = 0x7fc00000;
++  __m128_out = __lsx_vfdiv_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c
+new file mode 100644
+index 000000000..9706d7adc
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_d.c
+@@ -0,0 +1,83 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100;
++  __m128i_out = __lsx_vfclass_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200;
++  __m128i_out = __lsx_vfclass_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200;
++  __m128i_out = __lsx_vfclass_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002;
++  __m128i_out = __lsx_vfclass_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128d_op0[0]) = 0xff00000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008;
++  __m128i_out = __lsx_vfclass_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200;
++  __m128i_out = __lsx_vfclass_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x14ccc6320176a4d2;
++  *((unsigned long *)&__m128d_op0[0]) = 0x685670d37e80682a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000080;
++  __m128i_out = __lsx_vfclass_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200;
++  __m128i_out = __lsx_vfclass_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200;
++  __m128i_out = __lsx_vfclass_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c
+new file mode 100644
+index 000000000..7166f954b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfclass_s.c
+@@ -0,0 +1,74 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x7fff8000;
++  *((int *)&__m128_op0[1]) = 0x00010081;
++  *((int *)&__m128_op0[0]) = 0x00000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000020000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100;
++  __m128i_out = __lsx_vfclass_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xfe02fe02;
++  *((int *)&__m128_op0[2]) = 0xfe02fe02;
++  *((int *)&__m128_op0[1]) = 0xfe02fe02;
++  *((int *)&__m128_op0[0]) = 0xfe02fe02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000800000008;
++  __m128i_out = __lsx_vfclass_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x0000000c;
++  *((int *)&__m128_op0[2]) = 0x7fff000c;
++  *((int *)&__m128_op0[1]) = 0x10001000;
++  *((int *)&__m128_op0[0]) = 0x10001000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000010000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080;
++  __m128i_out = __lsx_vfclass_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000020000000200;
++  __m128i_out = __lsx_vfclass_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000020000000200;
++  __m128i_out = __lsx_vfclass_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x0c0b0a09;
++  *((int *)&__m128_op0[2]) = 0x0b0a0908;
++  *((int *)&__m128_op0[1]) = 0x0a090807;
++  *((int *)&__m128_op0[0]) = 0x09080706;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080;
++  __m128i_out = __lsx_vfclass_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c
+new file mode 100644
+index 000000000..cc36bf136
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_d.c
+@@ -0,0 +1,76 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xc090c40000000000;
++  __m128d_out = __lsx_vflogb_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000;
++  __m128d_out = __lsx_vflogb_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000;
++  __m128d_out = __lsx_vflogb_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000;
++  __m128d_out = __lsx_vflogb_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000;
++  __m128d_out = __lsx_vflogb_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000;
++  __m128d_out = __lsx_vflogb_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000;
++  __m128d_out = __lsx_vflogb_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000001000000048;
++  *((unsigned long *)&__m128d_result[1]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m128d_result[0]) = 0xc090380000000000;
++  __m128d_out = __lsx_vflogb_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c
+new file mode 100644
+index 000000000..624589620
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vflogb_s.c
+@@ -0,0 +1,185 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00003004;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0xff800000;
++  *((int *)&__m128_result[1]) = 0xff800000;
++  *((int *)&__m128_result[0]) = 0xc3080000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0xff800000;
++  *((int *)&__m128_result[0]) = 0xff800000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0xff800000;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xffffffff;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x01010101;
++  *((int *)&__m128_op0[2]) = 0x01010101;
++  *((int *)&__m128_op0[1]) = 0x01010101;
++  *((int *)&__m128_op0[0]) = 0x01010101;
++  *((int *)&__m128_result[3]) = 0xc2fa0000;
++  *((int *)&__m128_result[2]) = 0xc2fa0000;
++  *((int *)&__m128_result[1]) = 0xc2fa0000;
++  *((int *)&__m128_result[0]) = 0xc2fa0000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x01ff01ff;
++  *((int *)&__m128_op0[2]) = 0x01ff01ff;
++  *((int *)&__m128_op0[1]) = 0x01ff01ff;
++  *((int *)&__m128_op0[0]) = 0x01ff01ff;
++  *((int *)&__m128_result[3]) = 0xc2f80000;
++  *((int *)&__m128_result[2]) = 0xc2f80000;
++  *((int *)&__m128_result[1]) = 0xc2f80000;
++  *((int *)&__m128_result[0]) = 0xc2f80000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xd46cdc13;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0xff800000;
++  *((int *)&__m128_result[1]) = 0xff800000;
++  *((int *)&__m128_result[0]) = 0x7fc00000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00fe00fe;
++  *((int *)&__m128_op0[2]) = 0x000200fe;
++  *((int *)&__m128_op0[1]) = 0x00fe00fe;
++  *((int *)&__m128_op0[0]) = 0x000200fe;
++  *((int *)&__m128_result[3]) = 0xc2fc0000;
++  *((int *)&__m128_result[2]) = 0xc3040000;
++  *((int *)&__m128_result[1]) = 0xc2fc0000;
++  *((int *)&__m128_result[0]) = 0xc3040000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x01010101;
++  *((int *)&__m128_op0[0]) = 0x00000100;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0xff800000;
++  *((int *)&__m128_result[1]) = 0xc2fa0000;
++  *((int *)&__m128_result[0]) = 0xc30d0000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000014;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000014;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0xc3110000;
++  *((int *)&__m128_result[1]) = 0xff800000;
++  *((int *)&__m128_result[0]) = 0xc3110000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x4e3e1337;
++  *((int *)&__m128_op0[0]) = 0x38bb47d2;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0xff800000;
++  *((int *)&__m128_result[1]) = 0x41e80000;
++  *((int *)&__m128_result[0]) = 0xc1600000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0xff800000;
++  *((int *)&__m128_result[1]) = 0xff800000;
++  *((int *)&__m128_result[0]) = 0xff800000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0xff800000;
++  *((int *)&__m128_result[1]) = 0xff800000;
++  *((int *)&__m128_result[0]) = 0xff800000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0xff800000;
++  *((int *)&__m128_result[1]) = 0xff800000;
++  *((int *)&__m128_result[0]) = 0xff800000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00003ff8;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0xff800000;
++  *((int *)&__m128_result[1]) = 0xff800000;
++  *((int *)&__m128_result[0]) = 0xc3080000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xf1f181a2;
++  *((int *)&__m128_op0[2]) = 0xf1f1f1b0;
++  *((int *)&__m128_op0[1]) = 0xf1f1f1f1;
++  *((int *)&__m128_op0[0]) = 0xf180f1f1;
++  *((int *)&__m128_result[3]) = 0x7fc00000;
++  *((int *)&__m128_result[2]) = 0x7fc00000;
++  *((int *)&__m128_result[1]) = 0x7fc00000;
++  *((int *)&__m128_result[0]) = 0x7fc00000;
++  __m128_out = __lsx_vflogb_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c
+new file mode 100644
+index 000000000..442473fb4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_d.c
+@@ -0,0 +1,200 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0400040004000400;
++  *((unsigned long *)&__m128d_result[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128d_result[0]) = 0x0400040004000400;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x01ff01ff01ff01ff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x01ff01ff01ff01ff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x01ff01ff01ff01ff;
++  *((unsigned long *)&__m128d_result[0]) = 0x01ff01ff01ff01ff;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128d_result[0]) = 0xfffcfffcfffcfffc;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x000000000000ffff;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128d_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfdfef9ff0efff900;
++  *((unsigned long *)&__m128d_result[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128d_result[0]) = 0x6363636363636363;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xd70b30c96ea9f4e8;
++  *((unsigned long *)&__m128d_op0[0]) = 0xa352bfac9269e0aa;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x377b810912c0e000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128d_result[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128d_result[0]) = 0x377b810912c0e000;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x4399d3221a29d3f2;
++  *((unsigned long *)&__m128d_op0[0]) = 0xc3818bffe7b7a7b8;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x4399d3221a29d3f2;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x17c64aaef639f093;
++  *((unsigned long *)&__m128d_op0[0]) = 0xdb8f439722ec502d;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x17c64aaef639f093;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x10f881a20ffd02b0;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x10f881a20ffd02b0;
++  *((unsigned long *)&__m128d_result[0]) = 0x00000000ff800000;
++  __m128d_out = __lsx_vfmax_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000c000ffffc000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000958affff995d;
++  __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x1748c4f9ed1a5870;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmin_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c
+new file mode 100644
+index 000000000..876588827
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmax_s.c
+@@ -0,0 +1,335 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x0000ffff;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x0000ffff;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xc2409eda;
++  *((int *)&__m128_op1[2]) = 0xb019323f;
++  *((int *)&__m128_op1[1]) = 0x460f3b39;
++  *((int *)&__m128_op1[0]) = 0x3ef4be3a;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x460f3b39;
++  *((int *)&__m128_result[0]) = 0x3ef4be3a;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000001;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000001;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000001;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xfefd7f7f;
++  *((int *)&__m128_op1[2]) = 0x7f7f7f7e;
++  *((int *)&__m128_op1[1]) = 0xdffdbffe;
++  *((int *)&__m128_op1[0]) = 0xba6f5543;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x7f7f7f7e;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xff84fff4;
++  *((int *)&__m128_op0[2]) = 0xff84fff4;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xfffffff0;
++  *((int *)&__m128_op1[3]) = 0xff84fff4;
++  *((int *)&__m128_op1[2]) = 0xff84fff4;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xfffffff0;
++  *((int *)&__m128_result[3]) = 0xffc4fff4;
++  *((int *)&__m128_result[2]) = 0xffc4fff4;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xfffffff0;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00007fff;
++  *((int *)&__m128_op1[2]) = 0x00007fff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00007fff;
++  *((int *)&__m128_result[2]) = 0x00007fff;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000001;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000001;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x01010001;
++  *((int *)&__m128_op0[0]) = 0x01010001;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00020000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00020000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00020000;
++  *((int *)&__m128_result[1]) = 0x01010001;
++  *((int *)&__m128_result[0]) = 0x01010001;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000020;
++  *((int *)&__m128_op1[2]) = 0x00000020;
++  *((int *)&__m128_op1[1]) = 0x0000001f;
++  *((int *)&__m128_op1[0]) = 0x0000001f;
++  *((int *)&__m128_result[3]) = 0x00000020;
++  *((int *)&__m128_result[2]) = 0x00000020;
++  *((int *)&__m128_result[1]) = 0x0000001f;
++  *((int *)&__m128_result[0]) = 0x0000001f;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xf3040705;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0xf3040705;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0xf3040705;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000004;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000004;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000004;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000004;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000004;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000004;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmax_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x0000ffff;
++  *((int *)&__m128_op0[2]) = 0x0000ffff;
++  *((int *)&__m128_op0[1]) = 0x0000ffff;
++  *((int *)&__m128_op0[0]) = 0x0000fffe;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffe5;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffe5;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x13121110;
++  *((int *)&__m128_op0[2]) = 0x1211100f;
++  *((int *)&__m128_op0[1]) = 0x11100f0e;
++  *((int *)&__m128_op0[0]) = 0x100f0e0d;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xfffffff3;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000008;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000088;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000008;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000088;
++  __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x52525252;
++  *((int *)&__m128_op0[2]) = 0xadadadad;
++  *((int *)&__m128_op0[1]) = 0x52525252;
++  *((int *)&__m128_op0[0]) = 0xadadadad;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0xadadadad;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0xadadadad;
++  __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x0000ffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x0000ffff;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x0000ffff;
++  __m128_out = __lsx_vfmin_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c
+new file mode 100644
+index 000000000..c2766d5c6
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_d.c
+@@ -0,0 +1,155 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000800000000000;
++  __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x01203f1e3d1c3b1a;
++  *((unsigned long *)&__m128d_op0[0]) = 0x3918371635143312;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000af555555555;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000af555555555;
++  *((unsigned long *)&__m128d_result[1]) = 0x01203f1e3d1c3b1a;
++  *((unsigned long *)&__m128d_result[0]) = 0x3918371635143312;
++  __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000010000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000010000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000010000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000010000000000;
++  __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x10f8000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfff8ffa2fffdffb0;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128d_result[1]) = 0x10f8000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x00000000ff800000;
++  __m128d_out = __lsx_vfmaxa_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x80000000fff6fc00;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000080000000;
++  __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000080000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000158;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfffe0004fffe0004;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x002a001a001a000b;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x002a001a001a000b;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmina_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c
+new file mode 100644
+index 000000000..5fcdedd3f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmaxa_s.c
+@@ -0,0 +1,230 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xf436f3f5;
++  *((int *)&__m128_op0[0]) = 0x2f4ef4a8;
++  *((int *)&__m128_op1[3]) = 0xff800000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xff800000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0xff800000;
++  *((int *)&__m128_result[0]) = 0x2f4ef4a8;
++  __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000800;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000800;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000800;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000800;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xc0c0c000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00800080;
++  *((int *)&__m128_op1[2]) = 0x00800080;
++  *((int *)&__m128_op1[1]) = 0x0080006b;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00800080;
++  *((int *)&__m128_result[2]) = 0xc0c0c000;
++  *((int *)&__m128_result[1]) = 0x0080006b;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x80000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x80000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmaxa_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xff01ff01;
++  *((int *)&__m128_op1[2]) = 0x0000ff7d;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x0000fffc;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xdfa6e0c6;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xd46cdc13;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x01010101;
++  *((int *)&__m128_op0[2]) = 0x01010101;
++  *((int *)&__m128_op0[1]) = 0x010101fe;
++  *((int *)&__m128_op0[0]) = 0x0101fe87;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xffff0000;
++  *((int *)&__m128_op1[2]) = 0xffff0000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmina_s (__m128_op0, __m128_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c
+new file mode 100644
+index 000000000..8a35dfe24
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_d.c
+@@ -0,0 +1,216 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffa486c90f;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000058bcc201;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffa486c90f;
++  *((unsigned long *)&__m128d_result[0]) = 0x1f52d710bf295626;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffff01ff01;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000be00be;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x1f1b917c9f3d5e05;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000001400000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x1f81e3779b97f4a8;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x001effae001effae;
++  *((unsigned long *)&__m128d_op0[0]) = 0x001effae001effae;
++  *((unsigned long *)&__m128d_result[1]) = 0x2006454690d3de87;
++  *((unsigned long *)&__m128d_result[0]) = 0x2006454690d3de87;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128d_op0[0]) = 0xbbc8ecc5f3ced5f3;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000;
++  __m128d_out = __lsx_vfsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000;
++  __m128d_out = __lsx_vfrsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0001ffff00000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x5ff6a0a40ea8f47c;
++  *((unsigned long *)&__m128d_result[0]) = 0x5ff6a0a40e9da42a;
++  __m128d_out = __lsx_vfrsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000000000000f;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x61608654a2d4f6da;
++  __m128d_out = __lsx_vfrsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000;
++  __m128d_out = __lsx_vfrsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00fe000100cf005f;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128d_result[1]) = 0x5f675e96e29a5a60;
++  *((unsigned long *)&__m128d_result[0]) = 0x7fff7fff7fff7fff;
++  __m128d_out = __lsx_vfrsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000;
++  __m128d_out = __lsx_vfrsqrt_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff8000000000000;
++  __m128d_out = __lsx_vfrecip_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000;
++  __m128d_out = __lsx_vfrecip_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00003f8000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00003f8000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000;
++  __m128d_out = __lsx_vfrecip_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000;
++  __m128d_out = __lsx_vfrecip_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000;
++  __m128d_out = __lsx_vfrecip_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfrecip_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000000fffa0000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000fffa0000;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000;
++  __m128d_out = __lsx_vfrecip_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xe593c8c4e593c8c4;
++  *((unsigned long *)&__m128d_result[1]) = 0x805ffffe01001fe0;
++  *((unsigned long *)&__m128d_result[0]) = 0x9a49e11102834d70;
++  __m128d_out = __lsx_vfrecip_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m128d_op0[0]) = 0x5252dcdcdcdcdcdc;
++  *((unsigned long *)&__m128d_result[1]) = 0x2d8bf1f8fc7e3f20;
++  *((unsigned long *)&__m128d_result[0]) = 0x2d8b24b936d1b24d;
++  __m128d_out = __lsx_vfrecip_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c
+new file mode 100644
+index 000000000..ffd80540b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfsqrt_s.c
+@@ -0,0 +1,372 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0xfe07e5fe;
++  *((int *)&__m128_op0[2]) = 0xfefdddfe;
++  *((int *)&__m128_op0[1]) = 0x00020100;
++  *((int *)&__m128_op0[0]) = 0xfedd0c00;
++  *((int *)&__m128_result[3]) = 0x7fc00000;
++  *((int *)&__m128_result[2]) = 0x7fc00000;
++  *((int *)&__m128_result[1]) = 0x1e801ffc;
++  *((int *)&__m128_result[0]) = 0x7fc00000;
++  __m128_out = __lsx_vfsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xff00ff00;
++  *((int *)&__m128_op0[2]) = 0xff00ff00;
++  *((int *)&__m128_op0[1]) = 0xff00ff00;
++  *((int *)&__m128_op0[0]) = 0xff00ff00;
++  *((int *)&__m128_result[3]) = 0x7fc00000;
++  *((int *)&__m128_result[2]) = 0x7fc00000;
++  *((int *)&__m128_result[1]) = 0x7fc00000;
++  *((int *)&__m128_result[0]) = 0x7fc00000;
++  __m128_out = __lsx_vfsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x8c7fc73a;
++  *((int *)&__m128_op0[2]) = 0x137e54af;
++  *((int *)&__m128_op0[1]) = 0xbc84cf6f;
++  *((int *)&__m128_op0[0]) = 0x76208329;
++  *((int *)&__m128_result[3]) = 0x7fc00000;
++  *((int *)&__m128_result[2]) = 0x297f29fe;
++  *((int *)&__m128_result[1]) = 0x7fc00000;
++  *((int *)&__m128_result[0]) = 0x5acab5a5;
++  __m128_out = __lsx_vfsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffff9727;
++  *((int *)&__m128_op0[2]) = 0xffff9727;
++  *((int *)&__m128_op0[1]) = 0xfffffe79;
++  *((int *)&__m128_op0[0]) = 0xffffba5f;
++  *((int *)&__m128_result[3]) = 0xffff9727;
++  *((int *)&__m128_result[2]) = 0xffff9727;
++  *((int *)&__m128_result[1]) = 0xfffffe79;
++  *((int *)&__m128_result[0]) = 0xffffba5f;
++  __m128_out = __lsx_vfsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xfff8fff8;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xfff80000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0xfff8fff8;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0xfff80000;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xffffffff;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x1f1b917c;
++  *((int *)&__m128_op0[0]) = 0x9f3d5e05;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x4fa432d6;
++  *((int *)&__m128_result[0]) = 0x7fc00000;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x12835580;
++  *((int *)&__m128_op0[0]) = 0xb880eb98;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0x55fcbad1;
++  *((int *)&__m128_result[0]) = 0x7fc00000;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x06070607;
++  *((int *)&__m128_op0[2]) = 0x00000807;
++  *((int *)&__m128_op0[1]) = 0x0707f8f8;
++  *((int *)&__m128_op0[0]) = 0x03e8157e;
++  *((int *)&__m128_result[3]) = 0x5c303f97;
++  *((int *)&__m128_result[2]) = 0x61ff9049;
++  *((int *)&__m128_result[1]) = 0x5bafa1dd;
++  *((int *)&__m128_result[0]) = 0x5d3e1e1d;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xfff7fffe;
++  *((int *)&__m128_op0[2]) = 0xfffa01ff;
++  *((int *)&__m128_op0[1]) = 0xfffbfffe;
++  *((int *)&__m128_op0[0]) = 0xfffe01ff;
++  *((int *)&__m128_result[3]) = 0xfff7fffe;
++  *((int *)&__m128_result[2]) = 0xfffa01ff;
++  *((int *)&__m128_result[1]) = 0xfffbfffe;
++  *((int *)&__m128_result[0]) = 0xfffe01ff;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x45000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x44000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x3cb504f3;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x3d3504f3;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00020001;
++  *((int *)&__m128_op0[0]) = 0x00020002;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x607fffc0;
++  *((int *)&__m128_result[0]) = 0x607fff80;
++  __m128_out = __lsx_vfrsqrt_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000002;
++  *((int *)&__m128_op0[2]) = 0x00000002;
++  *((int *)&__m128_op0[1]) = 0x00000003;
++  *((int *)&__m128_op0[0]) = 0x00000003;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrecip_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xf6e91c00;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x51cfd7c0;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x880c91b8;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x2d1da85b;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrecip_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrecip_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrecip_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xfffffffa;
++  *((int *)&__m128_op0[2]) = 0xfffffffa;
++  *((int *)&__m128_op0[1]) = 0xfffffffa;
++  *((int *)&__m128_op0[0]) = 0xfffffffa;
++  *((int *)&__m128_result[3]) = 0xfffffffa;
++  *((int *)&__m128_result[2]) = 0xfffffffa;
++  *((int *)&__m128_result[1]) = 0xfffffffa;
++  *((int *)&__m128_result[0]) = 0xfffffffa;
++  __m128_out = __lsx_vfrecip_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrecip_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffff0001;
++  *((int *)&__m128_op0[2]) = 0xffff0001;
++  *((int *)&__m128_op0[1]) = 0xffff0001;
++  *((int *)&__m128_op0[0]) = 0xffff0001;
++  *((int *)&__m128_result[3]) = 0xffff0001;
++  *((int *)&__m128_result[2]) = 0xffff0001;
++  *((int *)&__m128_result[1]) = 0xffff0001;
++  *((int *)&__m128_result[0]) = 0xffff0001;
++  __m128_out = __lsx_vfrecip_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x0a000000;
++  *((int *)&__m128_op0[2]) = 0x0a000000;
++  *((int *)&__m128_op0[1]) = 0x0a000000;
++  *((int *)&__m128_op0[0]) = 0x0a000000;
++  *((int *)&__m128_result[3]) = 0x75000000;
++  *((int *)&__m128_result[2]) = 0x75000000;
++  *((int *)&__m128_result[1]) = 0x75000000;
++  *((int *)&__m128_result[0]) = 0x75000000;
++  __m128_out = __lsx_vfrecip_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfrecip_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-floating-point-ins.patch b/LoongArch-Add-tests-for-SX-vector-floating-point-ins.patch
new file mode 100644
index 0000000000000000000000000000000000000000..35f73308f6ac107d28704b3602511568c51e0363
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-floating-point-ins.patch
@@ -0,0 +1,4316 @@
+From f9098b58fe79ba960e41b7ec6a05ba2ea18ca02e Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 09:42:39 +0800
+Subject: [PATCH 079/124] LoongArch: Add tests for SX vector floating-point
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vffint-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vffint-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vffint-3.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vftint-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vftint-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vftint-3.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vftint-4.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vfcvt-1.c        |  398 +++++++
+ .../loongarch/vector/lsx/lsx-vfcvt-2.c        |  278 +++++
+ .../loongarch/vector/lsx/lsx-vffint-1.c       |  161 +++
+ .../loongarch/vector/lsx/lsx-vffint-2.c       |  264 +++++
+ .../loongarch/vector/lsx/lsx-vffint-3.c       |  102 ++
+ .../loongarch/vector/lsx/lsx-vfrint_d.c       |  230 ++++
+ .../loongarch/vector/lsx/lsx-vfrint_s.c       |  350 ++++++
+ .../loongarch/vector/lsx/lsx-vftint-1.c       |  349 ++++++
+ .../loongarch/vector/lsx/lsx-vftint-2.c       |  695 +++++++++++
+ .../loongarch/vector/lsx/lsx-vftint-3.c       | 1028 +++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vftint-4.c       |  345 ++++++
+ 11 files changed, 4200 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c
+new file mode 100644
+index 000000000..d4a86e262
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-1.c
+@@ -0,0 +1,398 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00e0000000e00000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000002a55005501;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000002a55000001;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x36280000;
++  *((int *)&__m128_result[1]) = 0x42a00000;
++  *((int *)&__m128_result[0]) = 0x42a02000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xf436f3f5;
++  *((int *)&__m128_op0[0]) = 0x2f4ef4a8;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfcvth_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcfb799f1;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0282800002828282;
++  *((int *)&__m128_result[3]) = 0xffffe000;
++  *((int *)&__m128_result[2]) = 0xffffe000;
++  *((int *)&__m128_result[1]) = 0xc1f6e000;
++  *((int *)&__m128_result[0]) = 0xbb3e2000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000040004000100;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x36de0000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x3be14000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x41dfffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x403be000;
++  *((int *)&__m128_result[2]) = 0xffffe000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x63637687;
++  *((int *)&__m128_op0[2]) = 0x636316bb;
++  *((int *)&__m128_op0[1]) = 0x63636363;
++  *((int *)&__m128_op0[0]) = 0x63636363;
++  *((unsigned long *)&__m128d_result[1]) = 0x446c6ed0e0000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x446c62d760000000;
++  __m128d_out = __lsx_vfcvth_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfcvth_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((int *)&__m128_op0[3]) = 0x000000ff;
++  *((int *)&__m128_op0[2]) = 0x000000ff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x371fe00000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x371fe00000000000;
++  __m128d_out = __lsx_vfcvth_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff7fff7ef;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80808080ffffffff;
++  *((int *)&__m128_result[3]) = 0xffffe000;
++  *((int *)&__m128_result[2]) = 0xffffe000;
++  *((int *)&__m128_result[1]) = 0xc6ffe000;
++  *((int *)&__m128_result[0]) = 0xc6fde000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffe0000000;
++  __m128d_out = __lsx_vfcvth_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffe1ffc100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000400000;
++  *((int *)&__m128_result[3]) = 0xfffc2000;
++  *((int *)&__m128_result[2]) = 0xfff82000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfcvth_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((int *)&__m128_op0[3]) = 0x0000b3a6;
++  *((int *)&__m128_op0[2]) = 0x000067da;
++  *((int *)&__m128_op0[1]) = 0x00004e42;
++  *((int *)&__m128_op0[0]) = 0x0000c26a;
++  *((unsigned long *)&__m128d_result[1]) = 0x379674c000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x3789f68000000000;
++  __m128d_out = __lsx_vfcvth_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xffff0000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffe0000000;
++  __m128d_out = __lsx_vfcvth_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001001001000080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4195d926d8018000;
++  *((int *)&__m128_result[3]) = 0x33800000;
++  *((int *)&__m128_result[2]) = 0x35800000;
++  *((int *)&__m128_result[1]) = 0x37800000;
++  *((int *)&__m128_result[0]) = 0x37000000;
++  __m128_out = __lsx_vfcvth_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfcvth_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((int *)&__m128_result[3]) = 0xffffe000;
++  *((int *)&__m128_result[2]) = 0xffffe000;
++  *((int *)&__m128_result[1]) = 0xffffe000;
++  *((int *)&__m128_result[0]) = 0xffffe000;
++  __m128_out = __lsx_vfcvtl_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfcvtl_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a;
++  *((int *)&__m128_result[3]) = 0x35200000;
++  *((int *)&__m128_result[2]) = 0x35200000;
++  *((int *)&__m128_result[1]) = 0x35200000;
++  *((int *)&__m128_result[0]) = 0x35200000;
++  __m128_out = __lsx_vfcvtl_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfcvtl_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000100;
++  *((int *)&__m128_op0[2]) = 0x0f00fe00;
++  *((int *)&__m128_op0[1]) = 0x0000017f;
++  *((int *)&__m128_op0[0]) = 0xff00fe7f;
++  *((unsigned long *)&__m128d_result[1]) = 0x3727f00000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xc7e01fcfe0000000;
++  __m128d_out = __lsx_vfcvtl_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfcvtl_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000020;
++  *((int *)&__m128_op0[0]) = 0x00000020;
++  *((unsigned long *)&__m128d_result[1]) = 0x36f0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x36f0000000000000;
++  __m128d_out = __lsx_vfcvtl_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xbd994889;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x0a092444;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x3941248880000000;
++  __m128d_out = __lsx_vfcvtl_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x62cbf96e4acfaf40;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf0bc9a5278285a4a;
++  *((int *)&__m128_result[3]) = 0xc6178000;
++  *((int *)&__m128_result[2]) = 0xbb4a4000;
++  *((int *)&__m128_result[1]) = 0x47050000;
++  *((int *)&__m128_result[0]) = 0x43494000;
++  __m128_out = __lsx_vfcvtl_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00040004;
++  *((int *)&__m128_op0[2]) = 0x00040004;
++  *((int *)&__m128_op0[1]) = 0x00040004;
++  *((int *)&__m128_op0[0]) = 0x00040004;
++  *((unsigned long *)&__m128d_result[1]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x37c0001000000000;
++  __m128d_out = __lsx_vfcvtl_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00;
++  *((int *)&__m128_result[3]) = 0xffe00000;
++  *((int *)&__m128_result[2]) = 0xffe00000;
++  *((int *)&__m128_result[1]) = 0xffe00000;
++  *((int *)&__m128_result[0]) = 0xffe00000;
++  __m128_out = __lsx_vfcvtl_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvtl_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvtl_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0xffffe000;
++  *((int *)&__m128_result[0]) = 0xffffe000;
++  __m128_out = __lsx_vfcvtl_s_h (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfcvtl_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfcvtl_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x007f7f7f;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x380fdfdfc0000000;
++  __m128d_out = __lsx_vfcvtl_d_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c
+new file mode 100644
+index 000000000..e8f4f12b9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcvt-2.c
+@@ -0,0 +1,278 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x004200a0;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x004200a0;
++  *((int *)&__m128_op0[0]) = 0x00200001;
++  *((int *)&__m128_op1[3]) = 0x004200a0;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x004200a0;
++  *((int *)&__m128_op1[0]) = 0x00200000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00010001;
++  *((int *)&__m128_op1[2]) = 0x0001007c;
++  *((int *)&__m128_op1[1]) = 0x00010001;
++  *((int *)&__m128_op1[0]) = 0x00010001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x80808080;
++  *((int *)&__m128_op1[2]) = 0x80808080;
++  *((int *)&__m128_op1[1]) = 0x80808080;
++  *((int *)&__m128_op1[0]) = 0x80808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000;
++  __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xfffffffc;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xfffffffc;
++  *((int *)&__m128_op1[3]) = 0x00000001;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000103;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcvt_h_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000049000000c0;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffff29;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000100000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7ff0000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000002c002400;
++  *((unsigned long *)&__m128d_op1[1]) = 0x7ef400ad21fc7081;
++  *((unsigned long *)&__m128d_op1[0]) = 0x28bf0351ec69b5f2;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000dc300003ffb;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000dc300003ffb;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000ffff3fbfffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7fffffff7fffffff;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x7ffffffb;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xbba0c07b51230d5c;
++  *((unsigned long *)&__m128d_op0[0]) = 0xa15f3f9e8763c2b9;
++  *((unsigned long *)&__m128d_op1[1]) = 0xbba0c07b51230d5c;
++  *((unsigned long *)&__m128d_op1[0]) = 0xa15f3f9e8763c2b9;
++  *((int *)&__m128_result[3]) = 0x9d0603db;
++  *((int *)&__m128_result[2]) = 0x80000000;
++  *((int *)&__m128_result[1]) = 0x9d0603db;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128d_op1[1]) = 0x8101010181010101;
++  *((unsigned long *)&__m128d_op1[0]) = 0x8101010181010101;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x80000000;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffc00000ff800000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xffffffff;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfffdfffe80008000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0xffeffff4;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000090;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000090;
++  *((unsigned long *)&__m128d_op1[1]) = 0x004eff6200d2ff76;
++  *((unsigned long *)&__m128d_op1[0]) = 0xff70002800be00a0;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0xff800000;
++  __m128_out = __lsx_vfcvt_s_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c
+new file mode 100644
+index 000000000..85db95762
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-1.c
+@@ -0,0 +1,161 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffinth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffinth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffinth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m128d_result[0]) = 0xbff0000000000000;
++  __m128d_out = __lsx_vffinth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x40cd120000000000;
++  __m128d_out = __lsx_vffinth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffinth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x4050000000000000;
++  __m128d_out = __lsx_vffinth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0086000000040000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0082000000000007;
++  *((unsigned long *)&__m128d_result[1]) = 0x4160c00000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x4110000000000000;
++  __m128d_out = __lsx_vffinth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffinth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff8000010f800000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffinth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000051649b6;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000003e0000003f;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x41945926d8000000;
++  __m128d_out = __lsx_vffinth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffintl_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfe82fe0200000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe82fe0200000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xc177d01fe0000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffintl_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffintl_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128d_result[1]) = 0x40f0001000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x40f0001000000000;
++  __m128d_out = __lsx_vffintl_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x40f3fa0000000000;
++  __m128d_out = __lsx_vffintl_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0001;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xc0fffff000000000;
++  __m128d_out = __lsx_vffintl_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffintl_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffintl_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffintl_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c
+new file mode 100644
+index 000000000..f8839cfcd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-2.c
+@@ -0,0 +1,264 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x03ff03ff03ff03ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x438ff81ff81ff820;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffint_d_l (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m128d_result[1]) = 0x43d3e0000013e000;
++  *((unsigned long *)&__m128d_result[0]) = 0x43d3e0000013e000;
++  __m128d_out = __lsx_vffint_d_l (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffint_d_l (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffint_d_l (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xbff0000000000000;
++  __m128d_out = __lsx_vffint_d_l (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0674c8868a74fc80;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906;
++  *((unsigned long *)&__m128d_result[1]) = 0x4399d3221a29d3f2;
++  *((unsigned long *)&__m128d_result[0]) = 0xc3818bffe7b7a7b8;
++  __m128d_out = __lsx_vffint_d_l (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((int *)&__m128_result[3]) = 0x4b7f00ff;
++  *((int *)&__m128_result[2]) = 0x4b7f00ff;
++  *((int *)&__m128_result[1]) = 0x4b7f00ff;
++  *((int *)&__m128_result[0]) = 0x4b7f00ff;
++  __m128_out = __lsx_vffint_s_w (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000401000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000004;
++  *((int *)&__m128_result[3]) = 0x40800000;
++  *((int *)&__m128_result[2]) = 0x4b800000;
++  *((int *)&__m128_result[1]) = 0x47800080;
++  *((int *)&__m128_result[0]) = 0x40800000;
++  __m128_out = __lsx_vffint_s_w (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x47000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_w (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x76f424887fffffff;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x3f800000;
++  *((int *)&__m128_result[1]) = 0x4eede849;
++  *((int *)&__m128_result[0]) = 0x4f000000;
++  __m128_out = __lsx_vffint_s_w (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd70b30c96ea9f4e8;
++  *((unsigned long *)&__m128i_op0[0]) = 0xa352bfac9269e0aa;
++  *((int *)&__m128_result[3]) = 0xce23d33d;
++  *((int *)&__m128_result[2]) = 0x4edd53ea;
++  *((int *)&__m128_result[1]) = 0xceb95a81;
++  *((int *)&__m128_result[0]) = 0xcedb2c3f;
++  __m128_out = __lsx_vffint_s_w (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x3f800000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_w (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_w (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_w (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x467fe000;
++  __m128_out = __lsx_vffint_s_w (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0xbf800000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0xcf000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x5eff0000;
++  *((int *)&__m128_result[2]) = 0x5eff0000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000e3;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfda9b23a624082fd;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000;
++  *((int *)&__m128_result[3]) = 0x43630000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0xdc159371;
++  *((int *)&__m128_result[0]) = 0x4f7fff00;
++  __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000040;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x42800000;
++  *((int *)&__m128_result[0]) = 0x42800000;
++  __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x43800000;
++  *((int *)&__m128_result[0]) = 0x43800000;
++  __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x59f7fd70;
++  *((int *)&__m128_result[0]) = 0x59f7fd70;
++  __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b;
++  *((int *)&__m128_result[3]) = 0x577fff00;
++  *((int *)&__m128_result[2]) = 0x577fff00;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x596f0000;
++  __m128_out = __lsx_vffint_s_l (__m128i_op0, __m128i_op1);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c
+new file mode 100644
+index 000000000..9150e27ca
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vffint-3.c
+@@ -0,0 +1,102 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8493941335f5cc0c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x625a7312befcb21e;
++  *((unsigned long *)&__m128d_result[1]) = 0x43e092728266beba;
++  *((unsigned long *)&__m128d_result[0]) = 0x43d8969cc4afbf2d;
++  __m128d_out = __lsx_vffint_d_lu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffint_d_lu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffint_d_lu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vffint_d_lu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_wu (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001600000016;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001600000016;
++  *((int *)&__m128_result[3]) = 0x41b00000;
++  *((int *)&__m128_result[2]) = 0x41b00000;
++  *((int *)&__m128_result[1]) = 0x41b00000;
++  *((int *)&__m128_result[0]) = 0x41b00000;
++  __m128_out = __lsx_vffint_s_wu (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((int *)&__m128_result[3]) = 0x4f800000;
++  *((int *)&__m128_result[2]) = 0x4f800000;
++  *((int *)&__m128_result[1]) = 0x4f800000;
++  *((int *)&__m128_result[0]) = 0x4f800000;
++  __m128_out = __lsx_vffint_s_wu (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000442800007b50;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0204;
++  *((int *)&__m128_result[3]) = 0x46885000;
++  *((int *)&__m128_result[2]) = 0x46f6a000;
++  *((int *)&__m128_result[1]) = 0x4f800000;
++  *((int *)&__m128_result[0]) = 0x4f7fff02;
++  __m128_out = __lsx_vffint_s_wu (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_wu (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vffint_s_wu (__m128i_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c
+new file mode 100644
+index 000000000..c60ff2b46
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_d.c
+@@ -0,0 +1,230 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++/* { dg-timeout 500 } */
++#include "../simd_correctness_check.h"
++#include 
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrint_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0003000300030003;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0003000700020005;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrint_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrint_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000;
++  __m128d_out = __lsx_vfrint_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00ff000100ff00fe;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00ff003000ff00a0;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrint_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfd200ed2fd370775;
++  *((unsigned long *)&__m128d_op0[0]) = 0x96198318780e32c5;
++  *((unsigned long *)&__m128d_result[1]) = 0xfd200ed2fd370775;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000;
++  __m128d_out = __lsx_vfrint_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrne_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xe0404041e0404041;
++  *((unsigned long *)&__m128d_op0[0]) = 0xe0404041e0404041;
++  *((unsigned long *)&__m128d_result[1]) = 0xe0404041e0404041;
++  *((unsigned long *)&__m128d_result[0]) = 0xe0404041e0404041;
++  __m128d_out = __lsx_vfrintrne_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000080800000808;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000080800000808;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrne_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfrintrne_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000868686868686;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrne_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrp_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfffc002000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfffc002000000000;
++  __m128d_out = __lsx_vfrintrp_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x9c9c9c9c00000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000;
++  __m128d_out = __lsx_vfrintrp_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrp_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000007f00ff00ff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128d_result[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x3ff0000000000000;
++  __m128d_out = __lsx_vfrintrp_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000077af9450;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x3ff0000000000000;
++  __m128d_out = __lsx_vfrintrp_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xff02ff1bff02ff23;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000ffffff02fff4;
++  *((unsigned long *)&__m128d_result[1]) = 0xff02ff1bff02ff23;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrm_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrm_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128d_op0[0]) = 0x6a57a30ff0000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x6a57a30ff0000000;
++  __m128d_out = __lsx_vfrintrm_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000001300000013;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrm_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfrintrm_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffff02000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x1f81e3779b97f4a8;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffff02000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrm_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0001000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrm_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrz_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x98ff98ff220e220d;
++  *((unsigned long *)&__m128d_op0[0]) = 0xa2e1a2601ff01ff0;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000;
++  __m128d_out = __lsx_vfrintrz_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrz_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000000abba7980;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000ccf98000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrz_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfe3bfb01fe3bfe01;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfe03fe3ffe01fa21;
++  *((unsigned long *)&__m128d_result[1]) = 0xfe3bfb01fe3bfe01;
++  *((unsigned long *)&__m128d_result[0]) = 0xfe03fe3ffe01fa21;
++  __m128d_out = __lsx_vfrintrz_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x5847b72626ce61ef;
++  *((unsigned long *)&__m128d_op0[0]) = 0x110053f401e7cced;
++  *((unsigned long *)&__m128d_result[1]) = 0x5847b72626ce61ef;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfrintrz_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
+new file mode 100644
+index 000000000..12cb02303
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrint_s.c
+@@ -0,0 +1,350 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++/* { dg-timeout 500 } */
++#include "../simd_correctness_check.h"
++#include 
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00100010;
++  *((int *)&__m128_op0[2]) = 0x00030000;
++  *((int *)&__m128_op0[1]) = 0x00060002;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrint_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrint_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000001;
++  *((int *)&__m128_op0[2]) = 0xca02f854;
++  *((int *)&__m128_op0[1]) = 0x00000001;
++  *((int *)&__m128_op0[0]) = 0x00013fa0;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0xca02f854;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrint_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x000000ad;
++  *((int *)&__m128_op0[2]) = 0x00007081;
++  *((int *)&__m128_op0[1]) = 0x00000351;
++  *((int *)&__m128_op0[0]) = 0x0000b5f2;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrint_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00ff00ef;
++  *((int *)&__m128_op0[2]) = 0x00ff010f;
++  *((int *)&__m128_op0[1]) = 0x00ff00ff;
++  *((int *)&__m128_op0[0]) = 0x00ff010f;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrint_s (__m128_op0);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrne_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00130013;
++  *((int *)&__m128_op0[2]) = 0x00130013;
++  *((int *)&__m128_op0[1]) = 0x00130013;
++  *((int *)&__m128_op0[0]) = 0x00130013;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrne_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x20202020;
++  *((int *)&__m128_op0[2]) = 0x20202020;
++  *((int *)&__m128_op0[1]) = 0x20202020;
++  *((int *)&__m128_op0[0]) = 0x20207fff;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrne_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x01f50000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrne_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xffffffff;
++  __m128_out = __lsx_vfrintrne_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000001;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrne_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00020004;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrne_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xfffbfffb;
++  *((int *)&__m128_op0[2]) = 0xfffbfffb;
++  *((int *)&__m128_op0[1]) = 0xfffbfffb;
++  *((int *)&__m128_op0[0]) = 0xfffbfffb;
++  *((int *)&__m128_result[3]) = 0xfffbfffb;
++  *((int *)&__m128_result[2]) = 0xfffbfffb;
++  *((int *)&__m128_result[1]) = 0xfffbfffb;
++  *((int *)&__m128_result[0]) = 0xfffbfffb;
++  __m128_out = __lsx_vfrintrne_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x0ff780a1;
++  *((int *)&__m128_op0[2]) = 0x0efc01af;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xfe7f0000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0xfe7f0000;
++  __m128_out = __lsx_vfrintrne_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrp_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xefffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0xefffffff;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrp_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffff00;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffff00;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffff00;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xffffff00;
++  __m128_out = __lsx_vfrintrp_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffb96b;
++  *((int *)&__m128_op0[2]) = 0xffff57c9;
++  *((int *)&__m128_op0[1]) = 0xffff6080;
++  *((int *)&__m128_op0[0]) = 0xffff4417;
++  *((int *)&__m128_result[3]) = 0xffffb96b;
++  *((int *)&__m128_result[2]) = 0xffff57c9;
++  *((int *)&__m128_result[1]) = 0xffff6080;
++  *((int *)&__m128_result[0]) = 0xffff4417;
++  __m128_out = __lsx_vfrintrp_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00ff00ff;
++  *((int *)&__m128_op0[2]) = 0x00ff00ff;
++  *((int *)&__m128_op0[1]) = 0x62cbf96e;
++  *((int *)&__m128_op0[0]) = 0x4acfaf40;
++  *((int *)&__m128_result[3]) = 0x3f800000;
++  *((int *)&__m128_result[2]) = 0x3f800000;
++  *((int *)&__m128_result[1]) = 0x62cbf96e;
++  *((int *)&__m128_result[0]) = 0x4acfaf40;
++  __m128_out = __lsx_vfrintrp_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00002000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x1fe02000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x3f800000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x3f800000;
++  __m128_out = __lsx_vfrintrp_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xffffffff;
++  __m128_out = __lsx_vfrintrp_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x63636363;
++  *((int *)&__m128_op0[2]) = 0x63abdf16;
++  *((int *)&__m128_op0[1]) = 0x41f8e080;
++  *((int *)&__m128_op0[0]) = 0x16161198;
++  *((int *)&__m128_result[3]) = 0x63636363;
++  *((int *)&__m128_result[2]) = 0x63abdf16;
++  *((int *)&__m128_result[1]) = 0x42000000;
++  *((int *)&__m128_result[0]) = 0x3f800000;
++  __m128_out = __lsx_vfrintrp_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrm_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xa5c4c774;
++  *((int *)&__m128_op0[2]) = 0x856ba83b;
++  *((int *)&__m128_op0[1]) = 0x8003caef;
++  *((int *)&__m128_op0[0]) = 0x54691124;
++  *((int *)&__m128_result[3]) = 0xbf800000;
++  *((int *)&__m128_result[2]) = 0xbf800000;
++  *((int *)&__m128_result[1]) = 0xbf800000;
++  *((int *)&__m128_result[0]) = 0x54691124;
++  __m128_out = __lsx_vfrintrm_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00010002;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xff960015;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xffd60015;
++  __m128_out = __lsx_vfrintrm_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0x3c992b2e;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffff730f;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0xffff730f;
++  __m128_out = __lsx_vfrintrz_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000001;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000016;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrz_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x18171615;
++  *((int *)&__m128_op0[2]) = 0x17161514;
++  *((int *)&__m128_op0[1]) = 0x16151413;
++  *((int *)&__m128_op0[0]) = 0x15141312;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrz_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x62cbf96e;
++  *((int *)&__m128_op0[2]) = 0x4acfaf40;
++  *((int *)&__m128_op0[1]) = 0xf0bc9a52;
++  *((int *)&__m128_op0[0]) = 0x78285a4a;
++  *((int *)&__m128_result[3]) = 0x62cbf96e;
++  *((int *)&__m128_result[2]) = 0x4acfaf40;
++  *((int *)&__m128_result[1]) = 0xf0bc9a52;
++  *((int *)&__m128_result[0]) = 0x78285a4a;
++  __m128_out = __lsx_vfrintrz_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfrintrz_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128_result, __m128_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c
+new file mode 100644
+index 000000000..8d0d56632
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-1.c
+@@ -0,0 +1,349 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000000017fff9000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000210011084;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000000000040d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0008000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x004f1fcfd01f9f9f;
++  *((unsigned long *)&__m128d_op0[0]) = 0x9f4fcfcfcf800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x9c7c266e3faa293c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000000ffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000015d926c7;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000000000e41b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vftintrp_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000777777777777;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffff7777ffff7777;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vftintrp_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000004000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xf4b6f3f52f4ef4a8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xff80ffffffffff80;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000ff80ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000b5207f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffff007f00000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffff007f00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xbff0000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xc0f3fa0080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffec060;
++  __m128i_out = __lsx_vftintrz_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000ebd20000714f;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00012c8a0000a58a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_l_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c
+new file mode 100644
+index 000000000..5dba807f6
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-2.c
+@@ -0,0 +1,695 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00d4ccb8;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00124888;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xfff00000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xfff00000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x80000000;
++  *((int *)&__m128_op0[2]) = 0xffffd860;
++  *((int *)&__m128_op0[1]) = 0x7fffffff;
++  *((int *)&__m128_op0[0]) = 0x80000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00008000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00008000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xff80ffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x7ffffffe;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x4f804f80;
++  *((int *)&__m128_op0[0]) = 0x4f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x0000007b;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000600;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x3f800000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x04870ba0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00009c7c;
++  *((int *)&__m128_op0[0]) = 0x00007176;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x0667ae56;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000020;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftinth_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrnel_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrnel_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x887c8beb;
++  *((int *)&__m128_op0[2]) = 0x969e00f2;
++  *((int *)&__m128_op0[1]) = 0x101f8b68;
++  *((int *)&__m128_op0[0]) = 0x0b6f8095;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrnel_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00020000;
++  *((int *)&__m128_op0[2]) = 0x00020000;
++  *((int *)&__m128_op0[1]) = 0x000001fc;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrnel_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrnel_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrnel_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrnel_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00020000;
++  *((int *)&__m128_op0[0]) = 0xffff0001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrnel_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x0a752a55;
++  *((int *)&__m128_op0[1]) = 0x0a753500;
++  *((int *)&__m128_op0[0]) = 0xa9fa0d06;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrnel_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrpl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x7fffffff;
++  *((int *)&__m128_op0[2]) = 0x7fffffff;
++  *((int *)&__m128_op0[1]) = 0x7fffffff;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrpl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x000d0254;
++  *((int *)&__m128_op0[2]) = 0x0000007e;
++  *((int *)&__m128_op0[1]) = 0x00000014;
++  *((int *)&__m128_op0[0]) = 0x00140014;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vftintrpl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrpl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x39412488;
++  *((int *)&__m128_op0[0]) = 0x80000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrpl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrpl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000014;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000014;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vftintrpl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00010001;
++  *((int *)&__m128_op0[2]) = 0x00010001;
++  *((int *)&__m128_op0[1]) = 0x00010001;
++  *((int *)&__m128_op0[0]) = 0x00010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vftintrpl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrpl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrpl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x2e34594c;
++  *((int *)&__m128_op0[0]) = 0x3b000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vftintrpl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrml_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrml_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x7ffffffe;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrml_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00010001;
++  *((int *)&__m128_op0[2]) = 0x00010001;
++  *((int *)&__m128_op0[1]) = 0x00010001;
++  *((int *)&__m128_op0[0]) = 0x00010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrml_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x7ff000ff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrml_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrml_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00ff00ff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xfffefffe;
++  *((int *)&__m128_op0[2]) = 0xfffeffff;
++  *((int *)&__m128_op0[1]) = 0xfffefffe;
++  *((int *)&__m128_op0[0]) = 0xfffeffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x0000033a;
++  *((int *)&__m128_op0[2]) = 0x0bde0853;
++  *((int *)&__m128_op0[1]) = 0x0a960e6b;
++  *((int *)&__m128_op0[0]) = 0x0a4f0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzl_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x7ffffffe;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrneh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xfffffffe;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrneh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrneh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000001;
++  *((int *)&__m128_op0[2]) = 0x7ffeffff;
++  *((int *)&__m128_op0[1]) = 0x00000001;
++  *((int *)&__m128_op0[0]) = 0x7ffeffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrneh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrneh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrneh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x80808080;
++  *((int *)&__m128_op0[0]) = 0x80638063;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrph_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrph_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000080;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vftintrph_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrph_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x80000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrmh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrmh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00ff00ff;
++  *((int *)&__m128_op0[2]) = 0x00ff00ff;
++  *((int *)&__m128_op0[1]) = 0x62cbf96e;
++  *((int *)&__m128_op0[0]) = 0x4acfaf40;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrmh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0x0000ac26;
++  *((int *)&__m128_op0[1]) = 0x00ff0000;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrmh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x6420e020;
++  *((int *)&__m128_op0[2]) = 0x8400c4e3;
++  *((int *)&__m128_op0[1]) = 0x20c4e0c4;
++  *((int *)&__m128_op0[0]) = 0xe0da6499;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xfbffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x7bffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x18171615;
++  *((int *)&__m128_op0[2]) = 0x17161514;
++  *((int *)&__m128_op0[1]) = 0x16151413;
++  *((int *)&__m128_op0[0]) = 0x15141312;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x046a09ec;
++  *((int *)&__m128_op0[0]) = 0x009c0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrzh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x4f4f4f4f;
++  *((int *)&__m128_op0[2]) = 0x4f4f4f4f;
++  *((int *)&__m128_op0[1]) = 0x4f4f4f4f;
++  *((int *)&__m128_op0[0]) = 0x4f4f4f4f;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000cf4f4f00;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000cf4f4f00;
++  __m128i_out = __lsx_vftintrzh_l_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c
+new file mode 100644
+index 000000000..7f6d2f4d1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-3.c
+@@ -0,0 +1,1028 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128d_op1[0]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x0000ffff;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x0000ffff;
++  *((int *)&__m128_op0[0]) = 0x0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000001;
++  *((int *)&__m128_op0[2]) = 0xfffffffe;
++  *((int *)&__m128_op0[1]) = 0x00000001;
++  *((int *)&__m128_op0[0]) = 0xfffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00040100;
++  *((int *)&__m128_op0[1]) = 0x00010001;
++  *((int *)&__m128_op0[0]) = 0x00010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfffffffffffff800;
++  *((unsigned long *)&__m128d_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000080;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000001;
++  *((int *)&__m128_op0[2]) = 0xfffffffe;
++  *((int *)&__m128_op0[1]) = 0x00000001;
++  *((int *)&__m128_op0[0]) = 0xfffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000e0180000e810;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000f0080000f800;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000e0180000e810;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000f0080000f800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffd30000;
++  *((int *)&__m128_op0[2]) = 0x00130000;
++  *((int *)&__m128_op0[1]) = 0xffd30000;
++  *((int *)&__m128_op0[0]) = 0x00130000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xe1000000;
++  *((int *)&__m128_op0[2]) = 0x4deb2610;
++  *((int *)&__m128_op0[1]) = 0xe101e001;
++  *((int *)&__m128_op0[0]) = 0x4dec4089;
++  *((unsigned long *)&__m128i_result[1]) = 0x800000001d64c200;
++  *((unsigned long *)&__m128i_result[0]) = 0x800000001d881120;
++  __m128i_out = __lsx_vftint_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x76f42488;
++  *((int *)&__m128_op0[0]) = 0x80000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000;
++  __m128i_out = __lsx_vftint_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x0000001f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0202f5f80000ff00;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x003fffc0;
++  *((int *)&__m128_op0[2]) = 0xffc0003f;
++  *((int *)&__m128_op0[1]) = 0xffc0ffc0;
++  *((int *)&__m128_op0[0]) = 0x003f003f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffff7fffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffff8000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x42652524;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000003900000000;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xff00ff7f;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0x7f800000;
++  *((int *)&__m128_op0[1]) = 0x2d1da85b;
++  *((int *)&__m128_op0[0]) = 0x7f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007fffffff;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x80307028;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x8040007f;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000000fefefe6a;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000c2bac2c2;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000002bfd9461;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000000004fc04f81;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000004fc04f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000000000000001f;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000000000001f;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000003a0000003a;
++  *((unsigned long *)&__m128d_op1[1]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000068;
++  *((unsigned long *)&__m128d_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128d_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x4429146a7b4c88b2;
++  *((unsigned long *)&__m128d_op0[0]) = 0xe22b3595efa4aa0c;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffff80000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0001000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000400000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000fffffff5;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0xe7e5560400010001;
++  *((unsigned long *)&__m128d_op1[0]) = 0xe7e5dabf00010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x03050302;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x03010302;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000600007fff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000008ffffa209;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x046a09ec009c0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x000aa822;
++  *((int *)&__m128_op0[2]) = 0xa79308f6;
++  *((int *)&__m128_op0[1]) = 0x03aa355e;
++  *((int *)&__m128_op0[0]) = 0x1d37b5a1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffff00;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrne_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00001802;
++  *((int *)&__m128_op0[0]) = 0x041b0013;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vftintrp_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x004200a000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x004200a000200000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000fe00ff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0001000101fd01fe;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000c2f90000bafa;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000fffff800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xff80ffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x7ffffffe;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0101080408040804;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0804080407040804;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0101080408040804;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0804080407040804;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00010001;
++  *((int *)&__m128_op0[2]) = 0x00010001;
++  *((int *)&__m128_op0[1]) = 0x00010001;
++  *((int *)&__m128_op0[0]) = 0x00010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vftintrp_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000003ffda00f3;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000003ffda00f3;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xfffffadf;
++  *((int *)&__m128_op0[2]) = 0xfedbfefe;
++  *((int *)&__m128_op0[1]) = 0x5f5f7bfe;
++  *((int *)&__m128_op0[0]) = 0xdefb5ada;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff80000000;
++  __m128i_out = __lsx_vftintrp_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffa6ff91fdd8ef77;
++  *((unsigned long *)&__m128d_op0[0]) = 0x061202bffb141c38;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfefffffffed08f77;
++  *((unsigned long *)&__m128d_op1[0]) = 0x8160cdd2f365ed0d;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000001;
++  *((int *)&__m128_op0[2]) = 0x084314a6;
++  *((int *)&__m128_op0[1]) = 0x00000001;
++  *((int *)&__m128_op0[0]) = 0x084314a6;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vftintrp_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x3f413f4100000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7f801fe000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000017fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfe3bfb01fe3bfe01;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfe03fe3ffe01fa21;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfe3bfb01fe3bfe01;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfe03fe3ffe01fa21;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000;
++  __m128i_out = __lsx_vftintrp_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x3a800000;
++  *((int *)&__m128_op0[2]) = 0x3a800000;
++  *((int *)&__m128_op0[1]) = 0x000ef000;
++  *((int *)&__m128_op0[0]) = 0x0000003b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vftintrp_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x10404000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x09610001;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x0000001a;
++  *((int *)&__m128_op0[2]) = 0xfffffff7;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0800080008000800;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0800080008000800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000000202fe02;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffff00fc0000ff02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00040004;
++  *((int *)&__m128_op0[2]) = 0x00040004;
++  *((int *)&__m128_op0[1]) = 0x00040004;
++  *((int *)&__m128_op0[0]) = 0x00040004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00ffff00;
++  *((int *)&__m128_op0[2]) = 0xff00ff00;
++  *((int *)&__m128_op0[1]) = 0x00ffff00;
++  *((int *)&__m128_op0[0]) = 0xff00ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x013ec13e;
++  *((int *)&__m128_op0[1]) = 0xc03fc03f;
++  *((int *)&__m128_op0[0]) = 0xc0ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffdfffffff8;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7fffffff7ffffffb;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x43800000;
++  *((int *)&__m128_op0[0]) = 0x43800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000014;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000014;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrm_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000017fda829;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xfffffff7;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x80307028ffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x8040007fffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xff84fff4;
++  *((int *)&__m128_op0[2]) = 0xff84fff4;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xfffffff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x7fff0007e215b122;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7ffeffff7bfff828;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x07ffc000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffff0000;
++  *((int *)&__m128_op0[0]) = 0x0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xf039b8c0;
++  *((int *)&__m128_op0[2]) = 0xc61e81ef;
++  *((int *)&__m128_op0[1]) = 0x6db7da53;
++  *((int *)&__m128_op0[0]) = 0xfbd2e34b;
++  *((unsigned long *)&__m128i_result[1]) = 0x80000000ffffd860;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff80000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00020000;
++  *((int *)&__m128_op0[0]) = 0xffff0001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00027113;
++  *((int *)&__m128_op0[2]) = 0x50a27112;
++  *((int *)&__m128_op0[1]) = 0x00d57017;
++  *((int *)&__m128_op0[0]) = 0x94027113;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xff80ff80;
++  *((int *)&__m128_op0[2]) = 0x7e017f01;
++  *((int *)&__m128_op0[1]) = 0x7f3b7f3f;
++  *((int *)&__m128_op0[0]) = 0x7f3b7f21;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vftintrz_w_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000011ff040;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000000047fe2f0;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000047fe2f0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_w_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c
+new file mode 100644
+index 000000000..9c5bb9131
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vftint-4.c
+@@ -0,0 +1,345 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x0000c77c;
++  *((int *)&__m128_op0[2]) = 0x000047cd;
++  *((int *)&__m128_op0[1]) = 0x0000c0f1;
++  *((int *)&__m128_op0[0]) = 0x00006549;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xfffffff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0x00000001;
++  *((int *)&__m128_op0[1]) = 0xffffffee;
++  *((int *)&__m128_op0[0]) = 0x00000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x0000ffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x63636363;
++  *((int *)&__m128_op0[2]) = 0x63636363;
++  *((int *)&__m128_op0[1]) = 0x63636363;
++  *((int *)&__m128_op0[0]) = 0x63636363;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vftint_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xfffffffe;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x02020004;
++  *((int *)&__m128_op0[2]) = 0x02020202;
++  *((int *)&__m128_op0[1]) = 0x00002000;
++  *((int *)&__m128_op0[0]) = 0x00010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x7fff7fff;
++  *((int *)&__m128_op0[2]) = 0x7fff7fff;
++  *((int *)&__m128_op0[1]) = 0x00000001;
++  *((int *)&__m128_op0[0]) = 0x0000003f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x000000ff;
++  *((int *)&__m128_op0[2]) = 0x808000ff;
++  *((int *)&__m128_op0[1]) = 0x000000ff;
++  *((int *)&__m128_op0[0]) = 0x808000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x7f7f7f7f;
++  *((int *)&__m128_op0[1]) = 0x00000001;
++  *((int *)&__m128_op0[0]) = 0x00000010;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00002000;
++  *((int *)&__m128_op0[2]) = 0x00002000;
++  *((int *)&__m128_op0[1]) = 0x10000000;
++  *((int *)&__m128_op0[0]) = 0x10000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000001;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x67eb85af;
++  *((int *)&__m128_op0[2]) = 0xb2ebb000;
++  *((int *)&__m128_op0[1]) = 0xc8847ef6;
++  *((int *)&__m128_op0[0]) = 0xed3f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_wu_s (__m128_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000400000004000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000400000007004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x3c600000ff800000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x6a57a30ff0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vftint_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x40f0001000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x40f0001000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000010001;
++  __m128i_out = __lsx_vftint_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000ef0000000003b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000000009c83e21a;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000022001818;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftint_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x800000001d64c200;
++  *((unsigned long *)&__m128d_op0[0]) = 0x800000001d881120;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000000f0009d3c;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000016fff9dff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000040a04000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000040a04000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x03fc03fc03fc03fc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vftintrz_lu_d (__m128d_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-handling-and-shuff.patch b/LoongArch-Add-tests-for-SX-vector-handling-and-shuff.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a65a289a028c6f79f65098f011e87c4207178dc8
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-handling-and-shuff.patch
@@ -0,0 +1,5411 @@
+From ab7f1db887733fabf41c7a39730c48376e29100c Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 11:34:56 +0800
+Subject: [PATCH 096/124] LoongArch: Add tests for SX vector handling and
+ shuffle instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vbsll.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vbsrl.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vextrins.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vilvh.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vilvl.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vpackev.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vpackod.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vpickev.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vpickod.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vpremi.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vreplve.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vreplvei.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vshuf.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vbsll.c          |  83 +++
+ .../loongarch/vector/lsx/lsx-vbsrl.c          |  55 ++
+ .../loongarch/vector/lsx/lsx-vextrins.c       | 479 +++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vilvh.c          | 353 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vilvl.c          | 327 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vinsgr2vr.c      | 278 ++++++++++
+ .../loongarch/vector/lsx/lsx-vpackev.c        | 452 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vpackod.c        | 461 +++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vpickev.c        | 362 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vpickod.c        | 336 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vpickve2gr.c     | 488 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vpremi.c         |  20 +
+ .../loongarch/vector/lsx/lsx-vreplgr2vr.c     | 212 ++++++++
+ .../loongarch/vector/lsx/lsx-vreplve.c        | 300 +++++++++++
+ .../loongarch/vector/lsx/lsx-vreplvei.c       | 293 +++++++++++
+ .../loongarch/vector/lsx/lsx-vshuf.c          | 394 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vshuf4i.c        | 348 +++++++++++++
+ 17 files changed, 5241 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c
+new file mode 100644
+index 000000000..34246c551
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsll.c
+@@ -0,0 +1,83 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ffffff000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff000000ff00;
++  __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xff00000000000000;
++  __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0a00000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_result[1]) = 0x4101010141010100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbsll_v (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001580000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbsll_v (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c
+new file mode 100644
+index 000000000..986b7d566
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbsrl.c
+@@ -0,0 +1,55 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000401000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000040100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010000;
++  __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000003fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000003fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x003fffffff000000;
++  __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0005fe0300010101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe03000101010000;
++  __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000d3259a;
++  __m128i_out = __lsx_vbsrl_v (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c
+new file mode 100644
+index 000000000..8d4158b57
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextrins.c
+@@ -0,0 +1,479 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbf8000000000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcf00000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x92);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x3d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0200020002000200;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0200020002000200;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff02000200;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11;
++  *((unsigned long *)&__m128i_op0[0]) = 0x342caf9be55700b5;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000040400000383;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff1fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0c03e17edd781b11;
++  *((unsigned long *)&__m128i_result[0]) = 0x342caf9bffff1fff;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xcc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000a16316b0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000063636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x16161616a16316b0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000a16316b0;
++  *((unsigned long *)&__m128i_result[0]) = 0x16161616a16316b0;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xa7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff489b693120950;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffc45a851c40c18;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffc45a851c40c18;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x48);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xcc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000005d5d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x41);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000fefefe6a;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x7c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ffffffeffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0xff80ffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7ffffffeffffffff;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xe6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000a0000000a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000a00000009;
++  *((unsigned long *)&__m128i_result[1]) = 0x000a000a0000000a;
++  *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xaf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x67);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x004fcfcfd01f9f9f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9f4fcfcfcf800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x004fcfcfd01f9f9f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9f4fcfcfcf800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x004f1fcfd01f9f9f;
++  *((unsigned long *)&__m128i_result[0]) = 0x9f4fcfcfcf800000;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xda);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x75b043c4d17db125;
++  *((unsigned long *)&__m128i_op0[0]) = 0xeef8227b596117b1;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x75b043c4d17db125;
++  *((unsigned long *)&__m128i_result[0]) = 0xeef8227b4f8017b1;
++  __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000de32400;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0;
++  __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x77);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363797c63996399;
++  *((unsigned long *)&__m128i_op0[0]) = 0x171f0a1f6376441f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x6363797c63990099;
++  *((unsigned long *)&__m128i_result[0]) = 0x171f0a1f6376441f;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0x94);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0bd80bd80bdfffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0bd80bd80bd80000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0bd80bd80bd80000;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xf9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x41dfbe1f41e0ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffc2ffe000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffc100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x41dfbe1f41e0ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffc100010001;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xec);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55;
++  *((unsigned long *)&__m128i_result[1]) = 0x5237c1baffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x7d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffbd994889;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000a092444;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000890000000000;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0x58);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000fea0000fffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffff8607db959f;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff0cff78ff96ff14;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000fea0000fffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xff0cff78ff96ff14;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0xc2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x01ef013f01e701f8;
++  *((unsigned long *)&__m128i_op1[0]) = 0x35bb8d32b2625c00;
++  *((unsigned long *)&__m128i_result[1]) = 0x00008d3200000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0xea);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8003000000020000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4040ffffc0400004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8003000000020000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x64);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x74);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff0001ffff9515;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff53d9;
++  *((unsigned long *)&__m128i_result[0]) = 0xff000001ffff9515;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0x67);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xf4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x71);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x82);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xd5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xf3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbbe5560400010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe7e5dabf00010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbbe5560400010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe7e5dabf00010001;
++  *((unsigned long *)&__m128i_result[1]) = 0xe7e5560400010001;
++  *((unsigned long *)&__m128i_result[0]) = 0xe7e5dabf00010001;
++  __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0xf3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x2c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x27);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x5d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x24);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xb6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x975ca6046e2e4889;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1748c4f9ed1a5870;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1748c4f9ed1a5870;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x6a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffc606ec5;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000014155445;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x76);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000024170000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000aa822a79308f6;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000024170000;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x32);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000024170000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0x56);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vextrins_b (__m128i_op0, __m128i_op1, 0xc5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034;
++  *((unsigned long *)&__m128i_op1[1]) = 0x01017f3c00000148;
++  *((unsigned long *)&__m128i_op1[0]) = 0x117d7f7b093d187f;
++  *((unsigned long *)&__m128i_result[1]) = 0x117d7f7b093d187f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000034;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x70);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x01533b5e7489ae24;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe519ab7e71e33848;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x01533b5e7489ae24;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffab7e71e33848;
++  __m128i_out = __lsx_vextrins_h (__m128i_op0, __m128i_op1, 0xbc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff760386bdae46;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc1fc7941bc7e00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff7603;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0xc3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff2356fe165486;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000003b0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff2356fe165486;
++  __m128i_out = __lsx_vextrins_w (__m128i_op0, __m128i_op1, 0x70);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextrins_d (__m128i_op0, __m128i_op1, 0x8a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c
+new file mode 100644
+index 000000000..aa802b295
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvh.c
+@@ -0,0 +1,353 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x007fffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x007fffff00000000;
++  __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x195f307a5d04acbb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6a1a3fbb3c90260e;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x195f307a5d04acbb;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8644000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xaed495f03343a685;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffbe6ed563;
++  *((unsigned long *)&__m128i_result[1]) = 0x8644ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000fffe;
++  __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000;
++  __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000e13;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000e13;
++  __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000a000a00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000a000a00000000;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00;
++  __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000004f804f80;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000004f804f80;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x004f0080004f0080;
++  *((unsigned long *)&__m128i_result[0]) = 0x004f0080004f0080;
++  __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ffa7f8ff81;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000003f0080ffc0;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007fff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000a7f87fffff81;
++  __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00003f8000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00003f8000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000ffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000ffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000080003f80ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x202020202020ff20;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x2000200020002000;
++  *((unsigned long *)&__m128i_result[0]) = 0x2000200020002000;
++  __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808ffff0808ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808ffff0808ffff;
++  __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000157;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010058;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010058;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000200;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002008360500088;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000f3040705;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000f3040705;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00;
++  __m128i_out = __lsx_vilvh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vilvh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c
+new file mode 100644
+index 000000000..88c66f220
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vilvl.c
+@@ -0,0 +1,327 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000b0000000b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000201000000000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000201000000000b;
++  __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffcff;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7404443064403aec;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000d6eefefc0498;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff7f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2d1da85b7f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x002d001dd6a8ee5b;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe7ffc8004009800;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001000000010;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000c7fff000c;
++  *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000;
++  __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff0000007f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000001e8e1d8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000e400000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000001e8e1d8;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000e400000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000e4e4;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000101;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0008000000000000;
++  __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffe0;
++  __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbafebb00ffd500fe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80808080806b000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80005613;
++  *((unsigned long *)&__m128i_op1[0]) = 0x007f800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000807f80808000;
++  *((unsigned long *)&__m128i_result[0]) = 0x80006b0000000b00;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000080808000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0080008000800080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0080006b0000000b;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc0808000c0808000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xc080800000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xc080800000000000;
++  __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x007ffff001000300;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff0001000300;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ffffffe00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ffffffe00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x007f00ff00ff00fe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x8);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0014001400140000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001400000014;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001400000000;
++  __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000009c007c00;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000071007600;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000060002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000060002;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe4c8b96e2560afe9;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc001a1867fffa207;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000c0010000a186;
++  *((unsigned long *)&__m128i_result[0]) = 0x00067fff0002a207;
++  __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000014414104505;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1011050040004101;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000014414104505;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1011050040004101;
++  *((unsigned long *)&__m128i_result[1]) = 0x1010111105050000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4040000041410101;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffac5cffffac5c;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffac5cffffac5c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x010169d9010169d9;
++  *((unsigned long *)&__m128i_op1[0]) = 0x01010287010146a1;
++  *((unsigned long *)&__m128i_result[1]) = 0xff01ff01ac025c87;
++  *((unsigned long *)&__m128i_result[0]) = 0xff01ff01ac465ca1;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff01ff01ac025c87;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff01ff01ac465ca1;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff01ff0100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xac465ca100000000;
++  __m128i_out = __lsx_vilvl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000002427c2ee;
++  *((unsigned long *)&__m128i_result[1]) = 0xf8e10000a03a0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff2427e3e2c2ee;
++  __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffe4ffe4ffe4ffe4;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffe4ffe4ffe4ffe4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040;
++  *((unsigned long *)&__m128i_result[1]) = 0xff00e400ff00e400;
++  *((unsigned long *)&__m128i_result[0]) = 0xff01e41ffff0e440;
++  __m128i_out = __lsx_vilvl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff01ffffe41f0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff00000ffff0000;
++  __m128i_out = __lsx_vilvl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c
+new file mode 100644
+index 000000000..2b9dcc0b5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vinsgr2vr.c
+@@ -0,0 +1,278 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000007942652524;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4265252400000000;
++  __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  int_op1 = 0x0000007942652524;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff2524ffffffff;
++  __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000017fff9000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000;
++  __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  long_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000;
++  __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0080000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0080000000000000;
++  __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5d5d5d5d5d5d5d55;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x5d5d5d005d5d5d55;
++  __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1);
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000;
++  __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000;
++  __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2020202020202020;
++  int_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_result[0]) = 0x202020202020ff20;
++  __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00fe01fc0005fff4;
++  int_op1 = 0x0000000020202020;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000820202020;
++  *((unsigned long *)&__m128i_result[0]) = 0x00fe01fc0005fff4;
++  __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffbfffffffbf;
++  long_op1 = 0x0000000000003a24;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000003a24;
++  __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ef8000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ef8000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7ef8000000000000;
++  __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000;
++  long_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  int_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff000000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001000;
++  int_op1 = 0x000000007ff00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000;
++  __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000020006;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000060000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000020006;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000600;
++  __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003;
++  __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001f1f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff000000001f1f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  long_op1 = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040;
++  __m128i_out = __lsx_vinsgr2vr_d (__m128i_op0, long_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000;
++  int_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ffffff0000;
++  __m128i_out = __lsx_vinsgr2vr_w (__m128i_op0, int_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vinsgr2vr_h (__m128i_op0, int_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x04faf60009f5f092;
++  *((unsigned long *)&__m128i_op0[0]) = 0x04fafa9200000000;
++  int_op1 = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x04faf600fff5f092;
++  *((unsigned long *)&__m128i_result[0]) = 0x04fafa9200000000;
++  __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vinsgr2vr_b (__m128i_op0, int_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c
+new file mode 100644
+index 000000000..030e87fd8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackev.c
+@@ -0,0 +1,452 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00001802041b0013;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff;
++  __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf4b6f3f52f4ef4a8;
++  *((unsigned long *)&__m128i_result[1]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xf4b6f3f52f4ef4a8;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x10f917d72d3d01e4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x203e16d116de012b;
++  *((unsigned long *)&__m128i_result[1]) = 0x00f900d7003d00e4;
++  *((unsigned long *)&__m128i_result[0]) = 0x003e00d100de002b;
++  __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000;
++  __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc2f9bafac2fac2fa;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbdf077eee7e20468;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe3b1cc6953e7db29;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000e7e20468;
++  *((unsigned long *)&__m128i_result[0]) = 0xc2fac2fa53e7db29;
++  __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf8f8e018f8f8e810;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf8f8f008f8f8f800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000e0180000e810;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000f0080000f800;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1211100f11100f0e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x100f0e0d0f0e0d0c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[1]) = 0x11000f2010000e20;
++  *((unsigned long *)&__m128i_result[0]) = 0x0f000d200e000c20;
++  __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xe3e3e3e3e3e3e3e3;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ffe7ffe7ffe7ffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00007ffe00007ffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001c00ffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007f7f00007f7f;
++  *((unsigned long *)&__m128i_result[1]) = 0x000001000f00fe00;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000017fff00fe7f;
++  __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000f0009d3c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000016fff9d3d;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffff000f0008d3c;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffff0016fff8d3d;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff000000003c3c;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff0101ffff3d3d;
++  __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000958affff995d;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffefffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffefffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffefefffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002fffefffd0001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1202120212021202;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1202120212021202;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0202fe02fd020102;
++  __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5a6f5c53ebed3faa;
++  *((unsigned long *)&__m128i_op0[0]) = 0xa36aca4435b8b8e1;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5a6f5c53ebed3faa;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa36aca4435b8b8e1;
++  *((unsigned long *)&__m128i_result[1]) = 0x5c535c533faa3faa;
++  *((unsigned long *)&__m128i_result[0]) = 0xca44ca44b8e1b8e1;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x77c0404a4000403a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x77c03fd640003fc6;
++  *((unsigned long *)&__m128i_result[1]) = 0x04c0044a0400043a;
++  *((unsigned long *)&__m128i_result[0]) = 0x04c004d6040004c6;
++  __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000006362ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000d0000000d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000dffff000d;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_result[1]) = 0x2080208020802080;
++  *((unsigned long *)&__m128i_result[0]) = 0x2080208020802080;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000001b0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000001b0000;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000053a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff9000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffc000400000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffc000400000000;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001f00000000;
++  __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe593c8c4e593c8c4;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8080000080800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x9380c4009380c400;
++  __m128i_out = __lsx_vpackev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffc2007aff230027;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0080005eff600001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x01017f3c00000148;
++  *((unsigned long *)&__m128i_op1[0]) = 0x117d7f7b093d187f;
++  *((unsigned long *)&__m128i_result[1]) = 0xff23002700000148;
++  *((unsigned long *)&__m128i_result[0]) = 0xff600001093d187f;
++  __m128i_out = __lsx_vpackev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0002711250a27112;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00d2701294027112;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff7112ffff7112;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff7012ffff7112;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03;
++  *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220;
++  *((unsigned long *)&__m128i_op1[1]) = 0x30eb020302101b03;
++  *((unsigned long *)&__m128i_op1[0]) = 0x020310d0c0030220;
++  *((unsigned long *)&__m128i_result[1]) = 0x020310d0c0030220;
++  *((unsigned long *)&__m128i_result[0]) = 0x020310d0c0030220;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000eefff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf8e1a03affffe3e2;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000efffefff;
++  *((unsigned long *)&__m128i_result[0]) = 0xa03aa03ae3e2e3e2;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x02b010f881a281a2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8140001;
++  *((unsigned long *)&__m128i_result[1]) = 0x000010f8000081a2;
++  *((unsigned long *)&__m128i_result[0]) = 0x000069bb00000001;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c
+new file mode 100644
+index 000000000..783eedae1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpackod.c
+@@ -0,0 +1,461 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000201000000000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000020100;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffc002000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00003ff000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000fffc00000000;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x03574e3a62407e03;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000001010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x03574e3a03574e3a;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001fe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000003a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000015;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xe0404041e0404041;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe0404041e0404041;
++  *((unsigned long *)&__m128i_op1[1]) = 0x803f800080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe0404041c0404040;
++  *((unsigned long *)&__m128i_result[1]) = 0xe0404041e0404041;
++  *((unsigned long *)&__m128i_result[0]) = 0x803f800080000000;
++  __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00fe000000000000;
++  __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe80ff80ffff0000;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x11000f2000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0f000d2000000000;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_result[0]) = 0x4f804f804f804f80;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000c000ffffc000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000006f00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000c00000000000;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x40f0001000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x40f0001000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x40f0001000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc;
++  __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2222272011111410;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2222272011111410;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020;
++  __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffef8;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffdfffdfffdffee0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffdfffdf;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0010100000100000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1000100000101000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000000010;
++  __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x01203f1e3d1c3b1a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3918371635143312;
++  *((unsigned long *)&__m128i_result[1]) = 0x21011f3f193d173b;
++  *((unsigned long *)&__m128i_result[0]) = 0xff39ff37ff35ff33;
++  __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000003fbf3fbf;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7ff8;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff3fbfffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80806362;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00008080;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000100000000fc;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000100000000fc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000;
++  __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0404050404040404;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0404050404040404;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000004040504;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000004040504;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000807f80808000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80006b0000000b00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000807f00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x80006b0080808080;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000400000004000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00004000ffffffff;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010000;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000080008;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001400000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffefffe00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffefffe00000000;
++  __m128i_out = __lsx_vpackod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x00cd006300cd0063;
++  *((unsigned long *)&__m128i_result[0]) = 0x00cd006300cd0063;
++  __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000aa822a79308f6;
++  *((unsigned long *)&__m128i_op0[0]) = 0x03aa558e1d37b5a1;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ff80fd820000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000aa822a79308f6;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000084d12ce;
++  __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2e34594c3b000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x002e0059003b0000;
++  __m128i_out = __lsx_vpackod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001e001e001e001e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001e001e001e001e;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffaeffaeffaeffae;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffaeffaeffaeffae;
++  *((unsigned long *)&__m128i_result[1]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_result[0]) = 0x001effae001effae;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000440efffff000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000003b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000440efffff000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000003b;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff2356fe165486;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5efeb3165bd7653d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff2356fe165486;
++  __m128i_out = __lsx_vpackod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000cecd00004657;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000c90000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00019d9a00008cae;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vpackod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c
+new file mode 100644
+index 000000000..58591f1bb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickev.c
+@@ -0,0 +1,362 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc2409edab019323f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x460f3b393ef4be3a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x460f3b393ef4be3a;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0004007c00fc0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000fc0000;
++  __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xedfaedfaedfaedfa;
++  *((unsigned long *)&__m128i_op0[0]) = 0xedfaedfaedfaedfa;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xedfaedfaedfaedfa;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4811fda96793b23a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8f10624016be82fd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfda9b23a624082fd;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000;
++  __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xaaaaffebcfb748e0;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfd293eab528e7ebe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0xffeb48e03eab7ebe;
++  __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffff00010000fff;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000120002000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_result[1]) = 0x2000200000013fa0;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000013fa0;
++  __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000f7d1000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x773324887fffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000017161515;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000095141311;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000017fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x1716151595141311;
++  __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_result[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_result[0]) = 0x4040404040404040;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000dfa6e0c6;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ef400ad21fc7081;
++  *((unsigned long *)&__m128i_op1[0]) = 0x28bf0351ec69b5f2;
++  *((unsigned long *)&__m128i_result[1]) = 0xdfa6e0c6d46cdc13;
++  *((unsigned long *)&__m128i_result[0]) = 0x21fc7081ec69b5f2;
++  __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x04c0044a0400043a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x04c004d6040004c6;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_result[1]) = 0x044a043a04d604c6;
++  *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004;
++  __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00001b4a00007808;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00001b4a00007808;
++  *((unsigned long *)&__m128i_result[1]) = 0x00001b4a00007808;
++  *((unsigned long *)&__m128i_result[0]) = 0x00001b4a00007808;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3fc03fc000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f801fe000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3fc03fc000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f801fe000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3fc03fc000000004;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffffffff;
++  __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000103030102ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000010102ffff;
++  __m128i_out = __lsx_vpickev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20;
++  *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x020310edc003023d;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x01533b5e7489ae24;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffab7e71e33848;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x3b5eae24ab7e3848;
++  __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000009c83e21a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000022001818;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000e21a00001818;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4f4f4f4f4f4f4f4f;
++  __m128i_out = __lsx_vpickev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c
+new file mode 100644
+index 000000000..74269e319
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickod.c
+@@ -0,0 +1,336 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000401000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff1fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000401000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001000001;
++  __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf436f3f52f4ef4a8;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf4b6f3f52f4ef4a8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf4b6f3f52f4ef4a8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0080000000000000;
++  __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff51cf8da;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffd6040188;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000101fffff8b68;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000b6fffff8095;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffff51cffffd604;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xa);
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff0cffffff18;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfefffefffeff6a0c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc2f9bafac2fac2fa;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffefefe6a;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2;
++  __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x11000f2010000e20;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0f000d200e000c20;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x11000f200f000d20;
++  __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000805;
++  *((unsigned long *)&__m128i_op0[0]) = 0x978d95ac768d8784;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000104000800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000897957687;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000408;
++  __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ff91fffffff5;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff00650001ffb0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffff0001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ca02f854;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ca02f854;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ca0200000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ca0200000000;
++  __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000c6c6ee22;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c6c62e8a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000c6c6ee22;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6c62e8a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d001b1a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x21201f1e19181716;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff;
++  __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0006000000040000;
++  __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x6363636363636363;
++  __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000020000020;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000200000002000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f801fe000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3fc03fc000000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3fc03fc000000003;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f7f1fd800000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f1f00003f3f0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3f3f00007f1f0000;
++  __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff9f017f1fa0b199;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1197817fd839ea3e;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000033;
++  *((unsigned long *)&__m128i_result[1]) = 0xff011fb11181d8ea;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x80808080806b000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000080808000;
++  __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000fffefffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000003ddc5dac;
++  *((unsigned long *)&__m128i_result[1]) = 0x67ebb2ebc884ed3f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000003ddc;
++  __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000003e2;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ebd20000714f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00012c8a0000a58a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000;
++  __m128i_out = __lsx_vpickod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpickod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c
+new file mode 100644
+index 000000000..acca2bee9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpickve2gr.c
+@@ -0,0 +1,488 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x7);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x4);
++  int_result = 0x0000000000000000;
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  long_int_result = 0x0000000000000000;
++  long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ff0000ff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x01fc020000fe0100;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x7);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000463fd2902d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5ccd54bbfcac806c;
++  unsigned_int_result = 0x00000000000000ac;
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x697eba2bedfa9c82;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd705c77a7025c899;
++  unsigned_int_result = 0x000000000000edfa;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0400040004000400;
++  unsigned_int_result = 0x0000000000000400;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000007d3ac600;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x7);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dffbfff00000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0200400000000001;
++  unsigned_int_result = 0x0000000000000001;
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000003fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000003fffffff;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000490000004d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffffff;
++  long_int_result = 0x00000001ffffffff;
++  long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff84fff4ff84fff4;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00a6ffceffb60052;
++  unsigned_int_result = 0x0000000000000084;
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xa);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  long_int_result = 0x0000000000000000;
++  long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101;
++  int_result = 0x00000000ffffffff;
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xc);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  unsigned_int_result = 0x00000000ffffffff;
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  long_int_result = 0xffffffffffffffff;
++  long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  long_int_result = 0x0000000000000000;
++  long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000;
++  unsigned_long_int_result = 0x3f8000003f800000;
++  unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0bd80bd80bd80bd8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0bd80bd80bd80bd8;
++  unsigned_long_int_result = 0x0bd80bd80bd80bd8;
++  unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x8);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xb);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000100c6ffef10c;
++  unsigned_int_result = 0x00000000000000ff;
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207f7f;
++  unsigned_int_result = 0x0000000020202020;
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1;
++  int_result = 0x0000000000003a24;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  int_result = 0x00000000ffffffff;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  unsigned_int_result = 0x00000000000000ff;
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x9);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000000;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xb);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffe080f6efc100f7;
++  *((unsigned long *)&__m128i_op0[0]) = 0xefd32176ffe100f7;
++  int_result = 0x0000000000002176;
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  long_int_result = 0x0000000000000000;
++  long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002;
++  int_result = 0x0000000000000002;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x5);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x80008000ec82ab51;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000800089e08000;
++  int_result = 0x0000000089e08000;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  int_result = 0x00000000ffffffff;
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6453f5e01d6e5000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000fdec000000000;
++  int_result = 0x000000001d6e5000;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6453f5e01d6e5000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000fdec000000000;
++  int_result = 0x0000000001d6e5000;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  int_result = 0x00000000ffffffff;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86;
++  *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112;
++  int_result = 0x000000009c0d6112;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e;
++  unsigned_int_result = 0x000000000000857a;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35;
++  *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49;
++  int_result = 0x00000000ffff8a35;
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x4);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae;
++  unsigned_int_result = 0x000000000000001e;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  int_result = 0x00000000ffffffff;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86;
++  *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112;
++  int_result = 0x000000009c0d6112;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e;
++  unsigned_int_result = 0x000000000000857a;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35;
++  *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49;
++  int_result = 0x00000000ffff8a35;
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x4);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae;
++  unsigned_int_result = 0x000000000000001e;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x8);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  unsigned_int_result = 0x000000000000001e;
++  unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  unsigned_int_result = 0x0000000000000000;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac;
++  long_int_result = 0x000000003ddc5dac;
++  long_int_out = __lsx_vpickve2gr_d (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, long_int_result, long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6453f5e01d6e5000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000fdec000000000;
++  int_result = 0x000000001d6e5000;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  int_result = 0x00000000ffffffff;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86;
++  *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112;
++  int_result = 0x000000009c0d6112;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e;
++  unsigned_int_result = 0x000000000000857a;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x1);
++  ASSERTEQ_int (__LINE__, int_out, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35;
++  *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49;
++  int_result = 0x00000000ffff8a35;
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x4);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae;
++  unsigned_int_result = 0x000000000000001e;
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0674c8868a74fc80;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906;
++  int_result = 0x00000000090b0906;
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_result = 0x0000000000000000;
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x3);
++  ASSERTEQ_int (__LINE__, int_result, int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000;
++  unsigned_int_result = 0x00000000000000ff;
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xc);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  unsigned_long_int_result = 0x0000000000000000;
++  unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f4f00004f4f0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f4f00004f4f0000;
++  unsigned_int_result = 0x000000004f4f0000;
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, unsigned_int_result, unsigned_int_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000120000000d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000e0000000e;
++  unsigned_long_int_result = 0x0000000e0000000e;
++  unsigned_long_int_out = __lsx_vpickve2gr_du (__m128i_op0, 0x0);
++  ASSERTEQ_int (__LINE__, unsigned_long_int_result, unsigned_long_int_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c
+new file mode 100644
+index 000000000..ef0ad676e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpremi.c
+@@ -0,0 +1,20 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c
+new file mode 100644
+index 000000000..a5f02b1b1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplgr2vr.c
+@@ -0,0 +1,212 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  int_op0 = 0x0000000059815d00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000400;
++  *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_result[0]) = 0x0400040004000400;
++  __m128i_out = __lsx_vreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000400;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  long_op0 = 0x0000000000000400;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000400;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000400;
++  __m128i_out = __lsx_vreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  long_op0 = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3f8000003f800000;
++  __m128i_out = __lsx_vreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  long_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000020202020;
++  *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_result[0]) = 0x2020202020202020;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff;
++  __m128i_out = __lsx_vreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  long_op0 = 0x000000007ff00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007ff00000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007ff00000;
++  __m128i_out = __lsx_vreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  long_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_d (long_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x000000000000001e;
++  *((unsigned long *)&__m128i_result[1]) = 0x1e1e1e1e1e1e1e1e;
++  *((unsigned long *)&__m128i_result[0]) = 0x1e1e1e1e1e1e1e1e;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_w (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_h (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_op0 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplgr2vr_b (int_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c
+new file mode 100644
+index 000000000..463adb48e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplve.c
+@@ -0,0 +1,300 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x00000045eef14fe8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x00000000000000ac;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x78c00000ff000000;
++  int_op1 = 0x0000000000000400;
++  *((unsigned long *)&__m128i_result[1]) = 0xff000000ff000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff000000ff000000;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x803f800080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe0404041c0404040;
++  int_op1 = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xe0404041e0404041;
++  *((unsigned long *)&__m128i_result[0]) = 0xe0404041e0404041;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffff0001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  int_op1 = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000020006;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffb4ff;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffb4ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffb4ff;
++  __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000020202020;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x000000007ff00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000020006;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff4;
++  int_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ffff00ff00ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ffff00ff00ff00;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  int_op1 = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080;
++  __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b;
++  int_op1 = 0xffffffff89e08000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001b0000001b;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001b0000001b;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfefefefdbffefdfe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfefefeeffef7fefe;
++  int_op1 = 0xffffffff9c0d6112;
++  *((unsigned long *)&__m128i_result[1]) = 0xbffefdfebffefdfe;
++  *((unsigned long *)&__m128i_result[0]) = 0xbffefdfebffefdfe;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff800000ff800000;
++  __m128i_out = __lsx_vreplve_w (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf;
++  int_op1 = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0fbf0fbf0fbf0fbf;
++  *((unsigned long *)&__m128i_result[0]) = 0x0fbf0fbf0fbf0fbf;
++  __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0x00000000090b0906;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_b (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  int_op1 = 0xffffffffffff8a35;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x05dfffc3ffffffc0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000047fe2f0;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000047fe2f0;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000047fe2f0;
++  __m128i_out = __lsx_vreplve_d (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffe011df03e;
++  int_op1 = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xf03ef03ef03ef03e;
++  *((unsigned long *)&__m128i_result[0]) = 0xf03ef03ef03ef03e;
++  __m128i_out = __lsx_vreplve_h (__m128i_op0, int_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c
+new file mode 100644
+index 000000000..a81be76f1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vreplvei.c
+@@ -0,0 +1,293 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000055555501;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000005555555554;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000005555555554;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000005555555554;
++  __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000036280000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x42a0000042a02000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x697eba2bedfa9c82;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd705c77a7025c899;
++  *((unsigned long *)&__m128i_result[1]) = 0xedfaedfaedfaedfa;
++  *((unsigned long *)&__m128i_result[0]) = 0xedfaedfaedfaedfa;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000300000003;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000a0a08000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5350a08000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x80010009816ac5de;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8001000184000bd8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0bd80bd80bd80bd8;
++  *((unsigned long *)&__m128i_result[0]) = 0x0bd80bd80bd80bd8;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1149a96eb1a08000;
++  *((unsigned long *)&__m128i_result[1]) = 0xb1a08000b1a08000;
++  *((unsigned long *)&__m128i_result[0]) = 0xb1a08000b1a08000;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffcc9a989a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000adadadad;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000adadadad;
++  *((unsigned long *)&__m128i_result[1]) = 0xadadadadadadadad;
++  *((unsigned long *)&__m128i_result[0]) = 0xadadadadadadadad;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3131313131313131;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_d (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000a752a55;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0a753500a9fa0d06;
++  *((unsigned long *)&__m128i_result[1]) = 0x0d060d060d060d06;
++  *((unsigned long *)&__m128i_result[0]) = 0x0d060d060d060d06;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_b (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vreplvei_w (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vreplvei_h (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
+new file mode 100644
+index 000000000..4e7fcc02b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
+@@ -0,0 +1,394 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000007f00000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000401000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000004;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00000000007f0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0404040404040404;
++  *((unsigned long *)&__m128i_result[0]) = 0x0404040404000404;
++  __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x000000000000002f;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000029;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00;
++  __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7efefefe82010201;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x418181017dfefdff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff81;
++  __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x52525252adadadad;
++  *((unsigned long *)&__m128i_op1[0]) = 0x52525252adadadad;
++  *((unsigned long *)&__m128i_op2[1]) = 0x800000007fffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x800000007fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00adadad00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00adadad00000000;
++  __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xedededededededed;
++  *((unsigned long *)&__m128i_result[0]) = 0xedededededededed;
++  __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_op1[1]) = 0x04040403fafafafc;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ff80;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080;
++  __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000001a0000000b;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000080000000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff6cffb5ff98ff6e;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffd7ff8dffa4ff7a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x34947b4b11684f92;
++  *((unsigned long *)&__m128i_op1[0]) = 0xee297a731e5c5f86;
++  *((unsigned long *)&__m128i_op2[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffc0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000868686868686;
++  __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_result[1]) = 0x000d000d000d000d;
++  *((unsigned long *)&__m128i_result[0]) = 0x000d000d000d000d;
++  __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000300037ff000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003000300a10003;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000300037ff000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0003000300a10003;
++  *((unsigned long *)&__m128i_op2[1]) = 0x000000007ff000ff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0909000009090000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0909000009090000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0909000009090000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0909000009090000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x002a05a2f059094a;
++  *((unsigned long *)&__m128i_op2[0]) = 0x05ad3ba576eae048;
++  *((unsigned long *)&__m128i_result[1]) = 0x0909e0480909e048;
++  *((unsigned long *)&__m128i_result[0]) = 0x0909e0480909e048;
++  __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000c0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffff29;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00000000000000c0;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00000001ffffff29;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff2900000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op2[0]) = 0x010101fe0101fe87;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101fe870101fe87;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101fe8700000000;
++  __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000007fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000020000020;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000020000020;
++  *((unsigned long *)&__m128i_result[1]) = 0x2000002000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x2000002020000020;
++  __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000004870ba0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_op2[1]) = 0x8000000100000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x8000000000000103;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000010300000103;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010300000000;
++  __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xada4808924882588;
++  *((unsigned long *)&__m128i_op0[0]) = 0xacad25090caca5a4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x021b7d24c9678a35;
++  *((unsigned long *)&__m128i_op1[0]) = 0x030298a6a1030a49;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xdfa6e0c6d46cdc13;
++  *((unsigned long *)&__m128i_op0[0]) = 0x21fc7081ec69b5f2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000002c002400;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffb96bffff57c9;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffff6080ffff4417;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffff0015172b;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff0015172b;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff0015172b;
++  __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf0003000f0003000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35;
++  *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x021b7d24c9678a35;
++  *((unsigned long *)&__m128i_op2[0]) = 0x030298a6a1030a49;
++  *((unsigned long *)&__m128i_result[1]) = 0x021b7d24c9678a35;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f7f00007f7f0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f7f80807f7f8080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000fffe0000fffe;
++  *((unsigned long *)&__m128i_op2[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffff10000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c
+new file mode 100644
+index 000000000..cd441b841
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf4i.c
+@@ -0,0 +1,348 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000030000;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xc9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0004007c00fc0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x047c0404fc00fcfc;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x8a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff00ff7f00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x32);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x85);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff51cf8da;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffd6040188;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffff8f8dada;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff01018888;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x50);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x007d00c50177ac5b;
++  *((unsigned long *)&__m128i_op0[0]) = 0xac82aa88a972a36a;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000c5ac01015b;
++  *((unsigned long *)&__m128i_result[0]) = 0xaaacac88a3a9a96a;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x7c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000a0000000a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000a00000009;
++  *((unsigned long *)&__m128i_result[1]) = 0x0a0a0a000a0a0a00;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a0a0a0009090900;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001000100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00003f8000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00003f8000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x003f800000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x003f800000000000;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xd2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x6c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x81);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000dffff000d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffffff;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x6b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5f675e96e29a5a60;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x965f5e9660e25a60;
++  *((unsigned long *)&__m128i_result[0]) = 0xff7f7fffff7f7fff;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x34);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x131211101211100f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x11100f0e100f0e0d;
++  *((unsigned long *)&__m128i_result[1]) = 0x13101213120f1112;
++  *((unsigned long *)&__m128i_result[0]) = 0x110e1011100d0f10;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xcb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000110;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000431f851f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001011010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000043431f1f;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xf0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3;
++  *((unsigned long *)&__m128i_result[1]) = 0xd1c0c0a5baf8f8d3;
++  *((unsigned long *)&__m128i_result[0]) = 0xecbbbbc5d5f3f3f3;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0x7c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000454ffff9573;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000454ffff9573;
++  __m128i_out = __lsx_vshuf4i_b (__m128i_op0, 0xa4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xf3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x2c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xd2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003f000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x007c000d00400000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000003f00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000007c00000040;
++  __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x31);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xb9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ffffffe00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ffffffe00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff0000;
++  __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0xcd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff;
++  __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x93);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f7f7f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x007f007f00007f7f;
++  __m128i_out = __lsx_vshuf4i_h (__m128i_op0, 0x58);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000080808000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000080808000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x8b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffdfffdfffdfffd;
++  __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x7e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfefefefdbffefdfe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfefefeeffef7fefe;
++  *((unsigned long *)&__m128i_result[1]) = 0xfef7fefebffefdfe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefefefdfefefeef;
++  __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x2d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x002a001a001a000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000002a001a;
++  *((unsigned long *)&__m128i_result[0]) = 0x001a000b00000000;
++  __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x78);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x98);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000010f8000081a2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000069bb00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001000010f8;
++  __m128i_out = __lsx_vshuf4i_w (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x44);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffff800;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000fffff800;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fffff800;
++  __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x8a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x36);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffda6e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffe3d6;
++  *((unsigned long *)&__m128i_op1[1]) = 0xeeb1e4f4bc3763f3;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6f5edf5ada6fe3d7;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffe3d6;
++  *((unsigned long *)&__m128i_result[0]) = 0xeeb1e4f4bc3763f3;
++  __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x23);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100200001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100200001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xce23d33e43d9736c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x63b2ac27aa076aeb;
++  *((unsigned long *)&__m128i_result[1]) = 0x63b2ac27aa076aeb;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0xc8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0xc9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0xbf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x801d5de0000559e0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x77eb86788eebaf00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x2e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x89582bf870006860;
++  *((unsigned long *)&__m128i_op1[0]) = 0x89582bf870006860;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vshuf4i_d (__m128i_op0, __m128i_op1, 0x94);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-subtraction-instru.patch b/LoongArch-Add-tests-for-SX-vector-subtraction-instru.patch
new file mode 100644
index 0000000000000000000000000000000000000000..165df9370185196d120453a5145434c84d919ad7
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-subtraction-instru.patch
@@ -0,0 +1,4150 @@
+From dc800193eb03dc87e702d4f3aeb886337b6be870 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 10:05:37 +0800
+Subject: [PATCH 081/124] LoongArch: Add tests for SX vector subtraction
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmsub.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vssub-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vssub-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsub.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsubi.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vhsubw-1.c       | 327 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vhsubw-2.c       | 353 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmsub.c          | 461 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vssub-1.c        | 398 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vssub-2.c        | 408 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vsub.c           | 381 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vsubi.c          | 329 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vsubwev-1.c      | 326 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vsubwev-2.c      | 417 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vsubwod-1.c      | 326 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vsubwod-2.c      | 308 ++++++++++++
+ 11 files changed, 4034 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c
+new file mode 100644
+index 000000000..0b51cb8cf
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-1.c
+@@ -0,0 +1,327 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffc00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff07effffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100110002;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2);
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffff01;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffeff400000df4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ff91fffffff5;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff00650001ffb0;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000bfffffffe0f6;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000010001000a;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x41dfffffffc00000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xbff0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0039ffffffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffbeffffffffffff;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000008140c80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0037ffdfffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0037ffdfffeb007f;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x377b810912c0e000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4e3e133738bb47d2;
++  *((unsigned long *)&__m128i_result[1]) = 0xff98007a004d0050;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff9ff4a0057000e;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000501ffff0005;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000600000001;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00020000ffff0001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100000001;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffae001effae;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_result[1]) = 0xffaeffadffaeffad;
++  *((unsigned long *)&__m128i_result[0]) = 0xffaeffadffaeffad;
++  __m128i_out = __lsx_vhsubw_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fc0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000;
++  __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffff02;
++  __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffff01;
++  __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9727b8499727b849;
++  *((unsigned long *)&__m128i_op0[0]) = 0x12755900b653f081;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7d7f13fc7c7ffbf4;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff9727ffff9727;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffe79ffffba5f;
++  __m128i_out = __lsx_vhsubw_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000100010;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000100c6ffef10c;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff70;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff9001a47e;
++  __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a6;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff59;
++  __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc;
++  __m128i_out = __lsx_vhsubw_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x002affd600000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcbc2723a4f12a5f8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x343d8dc5b0ed5a08;
++  __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000fffe00006aea;
++  __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000003fe0000141e;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffc01ffffebe2;
++  __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x67eb8590b2ebafe1;
++  __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x56a09e662ab46b31;
++  *((unsigned long *)&__m128i_op1[0]) = 0xb4b8122ef4054bb3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4b47edd10bfab44d;
++  __m128i_out = __lsx_vhsubw_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c
+new file mode 100644
+index 000000000..26b51ee14
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vhsubw-2.c
+@@ -0,0 +1,353 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffff0000010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfe00fe00fe00fd01;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe00fffefe0100f6;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff0001ffffff0a;
++  __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000017161515;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000095141311;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x76f424887fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000170014;
++  *((unsigned long *)&__m128i_result[0]) = 0xff0cff78ff96ff14;
++  __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff7cffd6ffc700b0;
++  __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vhsubw_hu_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f80000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000;
++  __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0001ffff0001;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0001ffff0001;
++  __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000010100000101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010100000101;
++  __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfefeff00fefeff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfefeff00fefeff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x00007e7e00007e7e;
++  *((unsigned long *)&__m128i_result[0]) = 0x00007e7e00007e7e;
++  __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa2e3a36363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0xa2e3a36463636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000a2e300006363;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000a2e300006363;
++  __m128i_out = __lsx_vhsubw_wu_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000052527d7d;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000052527d7d;
++  __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000002400180004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000024;
++  __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffff0000010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xabff54f1ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa5f7458b000802ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fff7fc01;
++  __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000002;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000002;
++  __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000014eb54ab;
++  *((unsigned long *)&__m128i_op0[0]) = 0x14eb6a002a406a00;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff80008a7555aa;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a7535006af05cf9;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff758aaa56;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffa9fb0d07;
++  __m128i_out = __lsx_vhsubw_du_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0f180000ffe00000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001ca02f854;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000004b01;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffb4ff;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00001b4a00007808;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe4b5ffff87f8;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3fc03fc000000003;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f7f1fd800000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x3fc03fc000000004;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc080800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc080800000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7efefefe82010201;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x418181017dfefdff;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x67eb85afb2ebb000;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe593c8c4e593c8c4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ff8000010f78;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff7f0080ff7ef088;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000155;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000f0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffff10000;
++  __m128i_out = __lsx_vhsubw_qu_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c
+new file mode 100644
+index 000000000..47cf33cfd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsub.c
+@@ -0,0 +1,461 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffff0002;
++  *((unsigned long *)&__m128i_op2[1]) = 0x54beed87bc3f2be1;
++  *((unsigned long *)&__m128i_op2[0]) = 0x8024d8f6a494afcb;
++  *((unsigned long *)&__m128i_result[1]) = 0xa8beed87bc3f2be1;
++  *((unsigned long *)&__m128i_result[0]) = 0x0024d8f6a494006a;
++  __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000fc0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001ffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001ffff0001ffff;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xfffffff0ffe04000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001fc0000;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000200010;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_op0[0]) = 0x040004000400040d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_result[0]) = 0x040004000400040d;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xb327b9363c99d32e;
++  *((unsigned long *)&__m128i_op0[0]) = 0xa1e7b475d925730f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000003f80b0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m128i_result[1]) = 0xb327b9363c992b2e;
++  *((unsigned long *)&__m128i_result[0]) = 0xa1e7b475d925730f;
++  __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffff800;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000;
++  __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op2[1]) = 0x000000004c7f4c7f;
++  *((unsigned long *)&__m128i_op2[0]) = 0xe0c0c0c0d1c7d1c6;
++  *((unsigned long *)&__m128i_result[1]) = 0x061006100613030c;
++  *((unsigned long *)&__m128i_result[0]) = 0x4d6814ef9c77ce46;
++  __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7ffe7ffe7ffe7ffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000002bfd9461;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000f00;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00000000ffffff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000002bfd9461;
++  __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3727f00000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc7e01fcfe0000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3727112c00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x39201f7120000040;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xe5b9012c00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xc7e01fcfe0000000;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000004;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0204;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000442900007b4c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000e22b0000efa4;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000442800007b50;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0204;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffffffe;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000003a24;
++  *((unsigned long *)&__m128i_op2[0]) = 0x003dbe88077c78c1;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000002f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000029;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff0000007f800000;
++  __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0fff0fff0fff0fff;
++  __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000003f0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffc3ffff003e;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000003f0000ffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffc3ffff003e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000f07f0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffff177fffff0fc;
++  __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffbfffefffc9510;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffbfffefffc9510;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706;
++  *((unsigned long *)&__m128i_op2[1]) = 0xfffbfffefffc9510;
++  *((unsigned long *)&__m128i_op2[0]) = 0xfffbfffefffc9510;
++  *((unsigned long *)&__m128i_result[1]) = 0x29c251319c3a5c90;
++  *((unsigned long *)&__m128i_result[0]) = 0x62fb9272df7da6b0;
++  __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8f8f8f8f8f8f8f8f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8f8f8f8f8f8f8f8f;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x800000007fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x800000007fffffff;
++  __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000;
++  __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001400000014;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001400000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000053a4f452;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001400000014;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001400000000;
++  __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00680486ffffffda;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff913bfffffffd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00680486ffffffda;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff913bfffffffd;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x000000003ddc5dac;
++  *((unsigned long *)&__m128i_result[1]) = 0x00680486ffffffda;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff913bb9951901;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0021b761002c593c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x002584710016cc56;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000001e03;
++  *((unsigned long *)&__m128i_result[1]) = 0x0021b761002c593c;
++  *((unsigned long *)&__m128i_result[0]) = 0x002584710016ea59;
++  __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000290;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000290;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0002000400000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003000500000001;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001700000017;
++  *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd8759f7fd87;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffae001effae;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000001700000017;
++  *((unsigned long *)&__m128i_op2[0]) = 0x59f7fd8759f7fd87;
++  *((unsigned long *)&__m128i_result[1]) = 0xfd200ed2fd370775;
++  *((unsigned long *)&__m128i_result[0]) = 0x96198318780e32c5;
++  __m128i_out = __lsx_vmsub_d (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xfe3bfb01fe3bfe01;
++  *((unsigned long *)&__m128i_op2[0]) = 0xfe03fe3ffe01fa21;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsub_w (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c
+new file mode 100644
+index 000000000..fc4cbb4e5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-1.c
+@@ -0,0 +1,398 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00001801f0307f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00001801f0307f80;
++  __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff8f8dada;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff01018888;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010108082626;
++  *((unsigned long *)&__m128i_result[0]) = 0x01010101ffff7878;
++  __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00fe000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x027e0000000000ff;
++  __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffb4ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffff98dea;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x40f3fa0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xc00fffffffffb4ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xbf0c05fffff98dea;
++  __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000002f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x010101010101012f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010129;
++  __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m128i_result[1]) = 0x1202120212021202;
++  *((unsigned long *)&__m128i_result[0]) = 0x1202120212021202;
++  __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0fffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x41957fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[0]) = 0xbf6b810181018101;
++  __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff8ffa2fffdffb0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0108015e01030150;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000017f0000;
++  __m128i_out = __lsx_vssub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x007fffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf436f3f52f4ef4a8;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xf4b6f3f52f4ef4a8;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000101fd01fe;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0040004000400040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0040004000400040;
++  *((unsigned long *)&__m128i_result[1]) = 0xffc0ffc0ffc0ffc0;
++  *((unsigned long *)&__m128i_result[0]) = 0xffc0ffc0ffc0ffc0;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fc0010181020103;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fc0ffff81020103;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001e03;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000011e04;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363abdf16;
++  *((unsigned long *)&__m128i_op0[0]) = 0x41f8e08016161198;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000002427c2ee;
++  *((unsigned long *)&__m128i_result[1]) = 0x636363633f3e47c1;
++  *((unsigned long *)&__m128i_result[0]) = 0x41f8e080f1ef4eaa;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001fffe00014b41;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001fffe0001ffde;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0002ffffb4bf;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0002ffff0022;
++  __m128i_out = __lsx_vssub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001fc0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000002010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001fbdff0;
++  __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x03f1e3d28b1a8a1a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000001d5d4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000150d707009;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000fffe2a2c;
++  *((unsigned long *)&__m128i_result[0]) = 0x03f1e3bd80000000;
++  __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffd5002affffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x343d8dc6b0ed5a08;
++  *((unsigned long *)&__m128i_result[1]) = 0x002affd600000001;
++  *((unsigned long *)&__m128i_result[0]) = 0xcbc2723a4f12a5f8;
++  __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff7ffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffdfffffffe0;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffdfffffffe0;
++  __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000c2f90000bafa;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000c2f90000bafa;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000c2fa8000c2fa;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff3d06ffff4506;
++  *((unsigned long *)&__m128i_result[0]) = 0x7ffffffe7ffff800;
++  __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffff3fffffff3;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffff3fffffff3;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffff3fffffff4;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff3fffffff4;
++  __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ef8000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8108000000000000;
++  __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000063b2ac27;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffaa076aeb;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff63b3584e;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000fffdaa07d5d6;
++  __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff81;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff7c;
++  __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff7cffd6ffc700b0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x008300290038ff50;
++  __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0dec4d1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff3f213b2f;
++  __m128i_out = __lsx_vssub_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c
+new file mode 100644
+index 000000000..0d5987567
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssub-2.c
+@@ -0,0 +1,408 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f801fa06451ef11;
++  *((unsigned long *)&__m128i_op0[0]) = 0x68bcf93435ed25ed;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffb64c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000003900;
++  *((unsigned long *)&__m128i_result[0]) = 0x68bcf93435ed25ed;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x460f3b393ef4be3a;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x04e00060ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x04e00060ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x04e00060ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x04e00060ffffffff;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001c;
++  *((unsigned long *)&__m128i_result[1]) = 0x004200a000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x004200a000200000;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11;
++  *((unsigned long *)&__m128i_op0[0]) = 0x342caf9be5579ebe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000f909;
++  *((unsigned long *)&__m128i_result[1]) = 0x0c03e17edd781b11;
++  *((unsigned long *)&__m128i_result[0]) = 0x342caf9be55700b5;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x11000f2010000e20;
++  *((unsigned long *)&__m128i_result[0]) = 0x0f000d200e000c20;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0;
++  *((unsigned long *)&__m128i_result[0]) = 0x000fffefffefffef;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4e3e13368c17f6e6;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1111311111114111;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111311111112111;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x1111311111114111;
++  *((unsigned long *)&__m128i_result[0]) = 0x1111311111110000;
++  __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0202fe02fd020102;
++  *((unsigned long *)&__m128i_result[1]) = 0xfefcfefcfefcfefc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfcfc00fc01fcfdfc;
++  __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00004000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbfd10d0d7b6b6b73;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc5c53492f25acbf2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000f3040705;
++  *((unsigned long *)&__m128i_result[1]) = 0xbfd10d0d7b6b6b73;
++  *((unsigned long *)&__m128i_result[0]) = 0xc5c534920000c4ed;
++  __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff000000ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff000000000000;
++  __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc14eef7fc14ea000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000ea000010fa101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xb);
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000c7fff000c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000006ffef000;
++  __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffc2ffe700000007;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffc100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x41dfffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xbde2ffe800000007;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffc100010001;
++  __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000f3040705;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000f3040705;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xa000308000008002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0500847b00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3fffff0000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3fffff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x006f0efe258ca851;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff90ffffe0f5;
++  *((unsigned long *)&__m128i_result[0]) = 0x006e7973258d0ef4;
++  __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ca02f854;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000d0000000d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x6363635663636356;
++  __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssub_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c
+new file mode 100644
+index 000000000..f5c82bc74
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsub.c
+@@ -0,0 +1,381 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ff02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000001fe;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc6ffe000c6fde000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808081;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_result[0]) = 0x467f6080467d607f;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00fe00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000010000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00fe00fe00ff;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff0000;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0dec4d1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000040223c2e;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfd200ed2fd370775;
++  *((unsigned long *)&__m128i_op0[0]) = 0x96198318780e32c5;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffe65ecc1be5bc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffe65ecc1be5bc;
++  *((unsigned long *)&__m128i_result[1]) = 0xfe212874311c22b9;
++  *((unsigned long *)&__m128i_result[0]) = 0x971a9dbaacf34d09;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x0);
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_result[0]) = 0x4f4f4f4f4f4f4f4f;
++  __m128i_out = __lsx_vsub_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf000e001bf84df83;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff8e001ff84e703;
++  *((unsigned long *)&__m128i_result[1]) = 0x14042382c3ffa481;
++  *((unsigned long *)&__m128i_result[0]) = 0x040c238283ff9d01;
++  __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_result[1]) = 0xfebffefffebffeff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfebffefffebffeff;
++  __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1111111111111111;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111111111111111;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1111111111111111;
++  *((unsigned long *)&__m128i_result[0]) = 0x1111111111111111;
++  __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000700000004fdff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000300000000fdff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff7fffefffa01ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffbfffefffe01ff;
++  __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000cd630000cd63;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000329d0000329d;
++  __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x08080807f7f7f7f8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x08080805f5f5f5f8;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00;
++  __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00060eb000000006;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000075c00000cf0;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffaf1500000fffa;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000f8a40000f310;
++  __m128i_out = __lsx_vsub_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff100fffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffdf100fffc;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000007f7f7f7f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007f7f7f7f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000010;
++  __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000;
++  __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x00001802041b0014;
++  __m128i_out = __lsx_vsub_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000f7d1000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x773324887fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff082efffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x88cbdb7780000001;
++  __m128i_out = __lsx_vsub_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000001f50000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffe0b0000;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000fffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010000000000001;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000800080;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffeb;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffeb;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000015;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0007000000050000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001fffe0001fefc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0006000100040001;
++  *((unsigned long *)&__m128i_result[0]) = 0x00010002ffff0105;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000003fffffffd;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000003fffffffd;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000003fffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000003fffffffd;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363abdf16;
++  *((unsigned long *)&__m128i_op1[0]) = 0x41f8e08016161198;
++  *((unsigned long *)&__m128i_result[1]) = 0x9c9d9b9bbfaa20e9;
++  *((unsigned long *)&__m128i_result[0]) = 0xbe081c963e6fee68;
++  __m128i_out = __lsx_vsub_q (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c
+new file mode 100644
+index 000000000..37e0ccf4d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubi.c
+@@ -0,0 +1,329 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff489b693120950;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffc45a851c40c18;
++  *((unsigned long *)&__m128i_result[1]) = 0xe0d56a9774f3ea31;
++  *((unsigned long *)&__m128i_result[0]) = 0xe0dd268932a5edf9;
++  __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff88;
++  *((unsigned long *)&__m128i_result[1]) = 0xe5e5e5e5e5e5e5e5;
++  *((unsigned long *)&__m128i_result[0]) = 0xe5e5e5e5e4e4e46d;
++  __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408;
++  *((unsigned long *)&__m128i_result[1]) = 0xf7f7f7ff8e8c6d7e;
++  *((unsigned long *)&__m128i_result[0]) = 0xf7f7f7f7f7f7fbff;
++  __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x1);
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xe6e6e6e6e6e6e6e6;
++  *((unsigned long *)&__m128i_result[0]) = 0xe6e6e6e6e6e6e6e6;
++  __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xf8f8f8f8f8f8f8f8;
++  *((unsigned long *)&__m128i_result[0]) = 0xf8f8f8f8f8f8f8f8;
++  __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2e34594c3b000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xe9e9e9e9e9e9e9e9;
++  *((unsigned long *)&__m128i_result[0]) = 0x171d423524e9e9e9;
++  __m128i_out = __lsx_vsubi_bu (__m128i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffe2ffe2ffe2ffe2;
++  *((unsigned long *)&__m128i_result[0]) = 0xffe2ffe2ffe2ffe2;
++  __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9795698585057dec;
++  *((unsigned long *)&__m128i_op0[0]) = 0x87f82867431a1d08;
++  *((unsigned long *)&__m128i_result[1]) = 0x9780697084f07dd7;
++  *((unsigned long *)&__m128i_result[0]) = 0x87e3285243051cf3;
++  __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc;
++  __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffc00fd;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc;
++  __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x371fe00000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x371fe00000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_result[0]) = 0x370bdfecffecffec;
++  __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000040600000406;
++  *((unsigned long *)&__m128i_op0[0]) = 0x020202020202fe02;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff503fbfff503fb;
++  *((unsigned long *)&__m128i_result[0]) = 0x01f701f701f7fdf7;
++  __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffdfffdfffdfffd;
++  __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x803e0000803e0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x803e0000803e0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x803bfffd803bfffd;
++  *((unsigned long *)&__m128i_result[0]) = 0x803bfffd803bfffd;
++  __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffdfffdfffdfffd;
++  __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffedffedffedffed;
++  *((unsigned long *)&__m128i_result[0]) = 0xffedffedffedffed;
++  __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffe4ffe4ffe4ffe4;
++  *((unsigned long *)&__m128i_result[0]) = 0xffe4ffe4ffe4ffe4;
++  __m128i_out = __lsx_vsubi_hu (__m128i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffefffffffef;
++  __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffe6ffffffe6;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffe6ffffffe6;
++  __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff1fffffff1;
++  __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffff6fffffff6;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff6fffffff6;
++  __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffe4ffffffe4;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffe4ffffffe4;
++  __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffe1ffffffe1;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffe1ffffffe1;
++  __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffff1fffffff1;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff1fffffff1;
++  __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffab7e71e33848;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffe1ffffffe1;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffab5f71e33829;
++  __m128i_out = __lsx_vsubi_wu (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa8beed87bc3f2be1;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0024d8f6a494006a;
++  *((unsigned long *)&__m128i_result[1]) = 0xa8beed87bc3f2bd3;
++  *((unsigned long *)&__m128i_result[0]) = 0x0024d8f6a494005c;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffeb;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffeb;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffe1;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffe5;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf2f2e5e5e5e5e5e5;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xf2f2e5e5e5e5e5dc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3fffff0000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3fffff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3ffffeffffffffe5;
++  *((unsigned long *)&__m128i_result[0]) = 0x3ffffeffffffffe5;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000070;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff5;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe6;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffe6;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x000100010000fffb;
++  *((unsigned long *)&__m128i_result[0]) = 0x000100010000fffb;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffeb;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffeb;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffa;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffe80008000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe2;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffdfffe80007fe2;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001a001a001a001a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001a001a001a001a;
++  *((unsigned long *)&__m128i_result[1]) = 0x001a001a001a000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x001a001a001a000b;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000234545b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0dec4d1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000002345454;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000c0dec4ca;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0f8d33000f8d3300;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003b80000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0f8d33000f8d32fd;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003b7fffffffffd;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubi_du (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c
+new file mode 100644
+index 000000000..f0d391a09
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-1.c
+@@ -0,0 +1,326 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x43d3e0000013e000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x43d3e0000013e000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffd3000000130000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffd3000000130000;
++  __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff0001ffff9515;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000100010000ffda;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000016;
++  __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffbfbfbfc0;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffbfbfbfc0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_result[1]) = 0xffbfffbfff7fff80;
++  *((unsigned long *)&__m128i_result[0]) = 0xffbfffbfff7fff80;
++  __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000808000020200;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff8000020000;
++  __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x413e276583869d79;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f7f017f9d8726d3;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7c7cd2eb63637c52;
++  *((unsigned long *)&__m128i_op1[0]) = 0x82ffd2210127add2;
++  *((unsigned long *)&__m128i_result[1]) = 0xffc2007aff230027;
++  *((unsigned long *)&__m128i_result[0]) = 0x0080005eff600001;
++  __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010012;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffe1ffc0;
++  __m128i_out = __lsx_vsubwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000004000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffc000000000;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ffff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000100c6ffef10c;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffff01;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffeff400000df4;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000002050320;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000002050320;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000001c88bf0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000320;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000007730;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001030103;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000103;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x33eac9fdca42f660;
++  *((unsigned long *)&__m128i_op0[0]) = 0xaa472d26fe867091;
++  *((unsigned long *)&__m128i_op1[1]) = 0x33eac9fdca42f660;
++  *((unsigned long *)&__m128i_op1[0]) = 0xaa472d26fe867091;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004;
++  __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff0000857a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x05fafe0101fe000e;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff7a86;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffe01fff2;
++  __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf654ad7447e59090;
++  *((unsigned long *)&__m128i_op1[0]) = 0x27b1b106b8145f50;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffb81a6f70;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000047eba0b0;
++  __m128i_out = __lsx_vsubwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000c01020d8009;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000003004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000c01020d5005;
++  __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffff01ff01;
++  __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4f804f804f804f80;
++  __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xb9fe3640e4eb1b18;
++  *((unsigned long *)&__m128i_op0[0]) = 0x800000005b4b1b18;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffb9fe00003640;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffe4eb00001b18;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x80001b155b4b0000;
++  __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100080000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffefff80000;
++  __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3fc03fc000000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3fc03fc000000003;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f7f1fd800000004;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xc0411fe800000000;
++  __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0e440;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xff01e420fff0e442;
++  __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c
+new file mode 100644
+index 000000000..3b18bc13c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwev-2.c
+@@ -0,0 +1,417 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00000083;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0xff01ff010000ff7d;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000fffc;
++  __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff00fc0000ff02;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xff01ff040000fffe;
++  __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x21011f3f193d173b;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff39ff37ff35ff33;
++  *((unsigned long *)&__m128i_result[1]) = 0x00fe008e009e0071;
++  *((unsigned long *)&__m128i_result[0]) = 0x001c006f00c4008d;
++  __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9c9ca19d509ae734;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd1b09480f2123460;
++  *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001fffeff98;
++  *((unsigned long *)&__m128i_result[0]) = 0x0014ffe4ff76ffc4;
++  __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x34947b4b11684f92;
++  *((unsigned long *)&__m128i_op1[0]) = 0xee297a731e5c5f86;
++  *((unsigned long *)&__m128i_result[1]) = 0xff6cffb5ff98ff6e;
++  *((unsigned long *)&__m128i_result[0]) = 0xffd7ff8dffa4ff7a;
++  __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8dada;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff3ea5016b;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffefffe3f6fb04d;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000d96f;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffd83b;
++  __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000f0009d3c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000016fff9d3d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000bd0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000007f0;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000916c;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000010000954d;
++  __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000100010000fe01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000050000007b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000500000005;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffbffffff85;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffc0000fdfc;
++  __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000032;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000032;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff80df00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xa5c4c774856ba837;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2a569f8081c3bbe9;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffb96bffff57c9;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff6080ffff4417;
++  __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000063b2ac27;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffaa076aeb;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff53d9;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0001ffff9515;
++  __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf00040fbf;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf00000fbf;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffac5cffffac5c;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffac5cffffac5c;
++  __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffaefffbffaefffb;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffaefffbffaefffb;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0005ffff0005;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000500000004;
++  __m128i_out = __lsx_vsubwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000a1630000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000a1630000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001fd0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001fd0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff7ffffef77fffdd;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf77edf9cffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000008800022;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000001;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffda6f;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffe3d7;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffda6e;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffe3d6;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000807f00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80006b0080808080;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff00011cf0c569;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc0000002b0995850;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffe30f3a97;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffcfe72830;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff9f5c25;
++  *((unsigned long *)&__m128i_op0[0]) = 0x58fa6b4000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ff9f5c25;
++  *((unsigned long *)&__m128i_op1[0]) = 0x58fa6b4000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcda585aebbb2836a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000080808080;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffc4cdfd16;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2);
++  *((unsigned long *)&__m128i_op0[1]) = 0x801dd5cb0004e058;
++  *((unsigned long *)&__m128i_op0[0]) = 0x77eb15638eeb5fc2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000200000001b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000004e03d;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000008eeb5fc2;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000100c6ffef00d;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000c00000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000bfffffffe0f6;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffd;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffdfffcfffd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffcfffdfffcfffd;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff7e00000081;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0a0a0a0a0a0a0a0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0a0a0a0a0a0a0a0a;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffb96bffff57c9;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff6080ffff4417;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a0aa9890a0ac5f3;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x36fbdfdcffdcffdc;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffeffff;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000a752a55;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a753500a9fa0d06;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xf589caff5605f2fa;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x087c000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000087c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x10f8000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001000010f8;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffff784;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000000000000;
++  __m128i_out = __lsx_vsubwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c
+new file mode 100644
+index 000000000..39ebff154
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-1.c
+@@ -0,0 +1,326 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc485edbcc0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x003f000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x007c000d00400000;
++  __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x841f000fc28f801f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x007c0000003e0080;
++  __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vsubwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000007fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001001;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff8000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff8000000000;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffefe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffc2ba;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000027f000000fe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000018000000000;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff7a53;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000ff0000ff86;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffa6ff91fdd8ef77;
++  *((unsigned long *)&__m128i_op1[0]) = 0x061202bffb141c38;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000005a00000228;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffff9ee000004ec;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000001fe02000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000001fe02000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03;
++  *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000002345454;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0dec4ca;
++  *((unsigned long *)&__m128i_result[1]) = 0x000030ebffffffdc;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000203ffffff25;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x380fdfdfc0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffc7f100004000;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00005dcbe7e830c0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000015d926c7;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000e41b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000005dcb;
++  __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00f0008100800080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00f000807000009e;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000ec382e;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000ec382d;
++  __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcfcfcfcfcfd;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcfcfcfc0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00009c7c00007176;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffcfcfcfc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffcfc6080;
++  __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffaefffbffaefffb;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffaefffbffaefffb;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffc105d1aa;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffbc19ecca;
++  __m128i_out = __lsx_vsubwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000101fd01fe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffff0000000ad3d;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff000fffff000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xefffdffff0009d3d;
++  __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffff01;
++  __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000100010001007c;
++  __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5f675e96e29a5a60;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x5e695e95e1cb5a01;
++  __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7efefefe82010201;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c
+new file mode 100644
+index 000000000..62837f1ac
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsubwod-2.c
+@@ -0,0 +1,308 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfe07e5fefefdddfe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00020100fedd0c00;
++  *((unsigned long *)&__m128i_result[1]) = 0xff02ff1bff02ff23;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffffff02fff4;
++  __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefff6fff80002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x82c53a0000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc72ef153fc02fdf7;
++  *((unsigned long *)&__m128i_result[1]) = 0x007d00c500ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0038000e0003ff03;
++  __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000007b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x007f000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x007f000000000000;
++  __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000040000000400;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff800000000000;
++  __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfc01fd1300000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe00fd1400010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfc01fd1300000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe00fd1400010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffff800;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfe813f00fe813f00;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe813f00fe813f00;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff017fffff017f;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff017fffff017f;
++  __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e71768fa4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00009c7c00007176;
++  __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vsubwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000009;
++  __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff0007e215b122;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ffeffff7bfff828;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff80010001;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff80010001;
++  __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000af555555555;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000af555555555;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000af5;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000af5;
++  __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2e34594c3b000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000002e34594c;
++  __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsubwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000036280001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x42a0000042a02001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000036280001;
++  __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd0b1ffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9d519ee8d2d84f1d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8644ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000fffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4a6d0000ffff0000;
++  __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x82c539ffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc72df14afbfafdf9;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7d3ac60000000000;
++  __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00fe00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000;
++  __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000fffffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000102020204000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefff00000001fff;
++  __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0003000300000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0003000300a10003;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffcfffd00000000;
++  __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002000200000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x6363636163636363;
++  __m128i_out = __lsx_vsubwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vabsd-vmskgez-vmsk.patch b/LoongArch-Add-tests-for-SX-vector-vabsd-vmskgez-vmsk.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c99d450095b8eed6a0f173683016860ed8cbdcac
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vabsd-vmskgez-vmsk.patch
@@ -0,0 +1,1710 @@
+From 7fc7953897e6ff488eebd5ea769447b7a1a7a0ed Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 18:48:08 +0800
+Subject: [PATCH 087/124] LoongArch: Add tests for SX vector
+ vabsd/vmskgez/vmskltz/vmsknz/vsigncov instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmskgez.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmskltz.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmsknz.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsigncov.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vabsd-1.c        | 272 +++++++++++
+ .../loongarch/vector/lsx/lsx-vabsd-2.c        | 398 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmskgez.c        | 119 +++++
+ .../loongarch/vector/lsx/lsx-vmskltz.c        | 321 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vmsknz.c         | 104 +++++
+ .../loongarch/vector/lsx/lsx-vsigncov.c       | 425 ++++++++++++++++++
+ 6 files changed, 1639 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c
+new file mode 100644
+index 000000000..e336581f3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-1.c
+@@ -0,0 +1,272 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfda9b23a624082fd;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x03574e3a62407e03;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001010000;
++  __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffd000700000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0014fff500000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f03000780000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f15000a7f010101;
++  __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000060000000e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000127fffffea;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f0101070101010f;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000127f010116;
++  __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b;
++  __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x67eb85af0000b000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0x67157b5100005000;
++  *((unsigned long *)&__m128i_result[0]) = 0x387c7e0a133f2000;
++  __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff7fffefffa01ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffbfffefffe01ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0305030203020502;
++  *((unsigned long *)&__m128i_result[0]) = 0x0301030203020502;
++  __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4ee376188658d85f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5728dcc85ac760d2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x4e1d76187a58285f;
++  *((unsigned long *)&__m128i_result[0]) = 0x572824385a39602e;
++  __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf654ad7447e59090;
++  *((unsigned long *)&__m128i_op1[0]) = 0x27b1b106b8145f50;
++  *((unsigned long *)&__m128i_result[1]) = 0x0a545374471b7070;
++  *((unsigned long *)&__m128i_result[0]) = 0x274f4f0648145f50;
++  __m128i_out = __lsx_vabsd_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21f32eafa486fd38;
++  *((unsigned long *)&__m128i_op0[0]) = 0x407c2ca3d3430357;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x21f32eaf5b7a02c8;
++  *((unsigned long *)&__m128i_result[0]) = 0x407c2ca32cbd0357;
++  __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000003bfb4000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000003bfb4000;
++  __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100010001;
++  __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffdf;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000021;
++  __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000700000004e000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0003000000012020;
++  *((unsigned long *)&__m128i_result[1]) = 0x0038000000051fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x003c000000022021;
++  __m128i_out = __lsx_vabsd_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9c9d9b9bbfaa20e9;
++  *((unsigned long *)&__m128i_op0[0]) = 0xbe081c963e6fee68;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000feff23560000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000fd1654860000;
++  *((unsigned long *)&__m128i_result[1]) = 0x6363636463abdf17;
++  *((unsigned long *)&__m128i_result[0]) = 0x41f8e08016161198;
++  __m128i_out = __lsx_vabsd_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff01fe0400000006;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000005fffa;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00fe01fc0005fff4;
++  __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x010003f00000ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x017f03000000ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x010003f00000ff00;
++  *((unsigned long *)&__m128i_op1[0]) = 0x017f03000000ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000001fffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000001ffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffac0a000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffac0a000000;
++  __m128i_out = __lsx_vabsd_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c
+new file mode 100644
+index 000000000..c1af80e14
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vabsd-2.c
+@@ -0,0 +1,398 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa8beed87bc3f2be1;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0024d8f6a494006a;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x5641127843c0d41e;
++  *((unsigned long *)&__m128i_result[0]) = 0xfedb27095b6bff95;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000383ffff1fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ca354688;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000038335ca2777;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000fff8fff8;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fff80000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000fff8fff8;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fff80000;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffd000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002ffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001fd0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001fd0;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ff08ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ff08ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfefff00000001fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffe1ffc100000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000400000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffe1ffc100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefff00000401fff;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff000000ff000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff000000ff000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff000000ff000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff000000ff000000;
++  __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x13f9c5b60028a415;
++  *((unsigned long *)&__m128i_op0[0]) = 0x545cab1d7e57c415;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x13f9c5b60028a415;
++  *((unsigned long *)&__m128i_result[0]) = 0x545cab1d81a83bea;
++  __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcfb799f1;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0282800002828282;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5555001400005111;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffabbeab55110140;
++  *((unsigned long *)&__m128i_result[1]) = 0xaaaaffebcfb748e0;
++  *((unsigned long *)&__m128i_result[0]) = 0xfd293eab528e7ebe;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7505443065413aed;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0100d6effefd0498;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x56a09e662ab46b31;
++  *((unsigned long *)&__m128i_op0[0]) = 0xb4b8122ef4054bb3;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x56a09e662ab46b31;
++  *((unsigned long *)&__m128i_result[0]) = 0xb4b8122ef4054bb3;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3c600000ff800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0xc39fffff007fffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00fd;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8006000080020000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8004000080020000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8006000080020000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8004000080020000;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0015172b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffb00151727;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffbfffffff8;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffbfffffff8;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffdc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffbffffffd8;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffbfffffff8;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003;
++  __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000490000004d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000490000004d;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001fffffff9;
++  __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x64b680a2ae3af8c8;
++  *((unsigned long *)&__m128i_op1[0]) = 0x161c0c363c200824;
++  *((unsigned long *)&__m128i_result[1]) = 0x23b57fa16d39f7c8;
++  *((unsigned long *)&__m128i_result[0]) = 0x161c0c363c200824;
++  __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3ff0010000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ff0010000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3fffff0000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3fffff0000000000;
++  __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1ffffffff8001000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf0bd80bd80bd8000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1ffffffff8001000;
++  *((unsigned long *)&__m128i_result[0]) = 0xf0bd80bd80bd8000;
++  __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0020002000200020;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffef8;
++  *((unsigned long *)&__m128i_result[0]) = 0xffdfffdfffdffee0;
++  __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080;
++  __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000003ddc5dac;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vabsd_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c
+new file mode 100644
+index 000000000..64a950f81
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskgez.c
+@@ -0,0 +1,119 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0403cfcf01c1595e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x837cd5db43fc55d4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000cb4a;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff7f01ff01;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x2);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe813f00fe813f00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000033;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fffe00006aea;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffce;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskgez_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c
+new file mode 100644
+index 000000000..8f743ec2e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmskltz.c
+@@ -0,0 +1,321 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003;
++  __m128i_out = __lsx_vmskltz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x85bd6b0e94d89998;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd83c8081ffff8080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f;
++  __m128i_out = __lsx_vmskltz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000013d;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000f0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000100010001fffd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000001007c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9780697084f07dd7;
++  *((unsigned long *)&__m128i_op0[0]) = 0x87e3285243051cf3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000cdc1;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x05d0ae6002e8748e;
++  *((unsigned long *)&__m128i_op0[0]) = 0xcd1de80217374041;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000065a0;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00d3012acc56f9bb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000004b01;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d1c1b1a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff08ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vmskltz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000003f3f;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000022;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003;
++  __m128i_out = __lsx_vmskltz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000008080600;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0018;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000035697d4e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000013ecaadf2;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000006de1;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5f9ccf33cf600000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003ffffe00800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000034;
++  __m128i_out = __lsx_vmskltz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc3818bffe7b7a7b8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vmskltz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741;
++  *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000027;
++  __m128i_out = __lsx_vmskltz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmskltz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c
+new file mode 100644
+index 000000000..d547af0d3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmsknz.c
+@@ -0,0 +1,104 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001e1f;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c63636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x009500b10113009c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x009500b10113009c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000005d5d;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffff000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000fe;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000fffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000007f41;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0014001400140000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000554;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x202544f490f2de35;
++  *((unsigned long *)&__m128i_op0[0]) = 0x202544f490f2de35;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000a74aa8a55ab;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6adeb5dfcb000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000003ff8;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x317fce80317fce80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00;
++  __m128i_out = __lsx_vmsknz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c
+new file mode 100644
+index 000000000..0fb1bc18f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsigncov.c
+@@ -0,0 +1,425 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00003f803f800100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x870968c1f56bb3cd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf000e001bf84df83;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff8e001ff84e703;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ca354688;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff35cab978;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6a57a30ff0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfe00fe00fe00fd01;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe00fffefe0100f6;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0100010000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0100010000010000;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000183fffffe5;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000400000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000400000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff3d06ffff4506;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ffffffe7ffff800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff6fff6fff6fff6;
++  __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3fffff0000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3fffff0000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3f80000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3f80000000000000;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff8fff8fff8fff8;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff8fff8fff8fff8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x52525252525252cb;
++  *((unsigned long *)&__m128i_op1[0]) = 0x52525252525252cb;
++  *((unsigned long *)&__m128i_result[1]) = 0xaeaeaeaeaeaeae35;
++  *((unsigned long *)&__m128i_result[0]) = 0xaeaeaeaeaeaeae35;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op1[0]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_result[1]) = 0x370bdfec00130014;
++  *((unsigned long *)&__m128i_result[0]) = 0x370bdfec00130014;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002020002020200;
++  *((unsigned long *)&__m128i_op0[0]) = 0x021f3b0205150600;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000300400002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000100010040fffb;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000300400002;
++  *((unsigned long *)&__m128i_result[0]) = 0x000100010040fffb;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff801c9e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000810000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0080008000800080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x008003496dea0c61;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101030100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1ab6021f72496458;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7750af4954c29940;
++  *((unsigned long *)&__m128i_result[1]) = 0xe64afee18eb79ca8;
++  *((unsigned long *)&__m128i_result[0]) = 0x89b051b7ac3e67c0;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x441ba9fcffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x181b2541ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7ffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7ffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff81010102;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000045340a6;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000028404044;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0010001000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0x67eb85af0000b000;
++  *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000;
++  __m128i_out = __lsx_vsigncov_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000103;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003ffffe00800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x004001be00dc008e;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ffff0100010001;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff9fffefff9ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x04faf60009f5f092;
++  *((unsigned long *)&__m128i_op1[0]) = 0x04fafa9200000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfc06066e00000000;
++  __m128i_out = __lsx_vsigncov_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000667ae56;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000667ae56;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000100020002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002000100020002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000100020002;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010012;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffe1ffc0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010012;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffe1ffc0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010012;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffe1ffc0;
++  __m128i_out = __lsx_vsigncov_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vand-vandi-vandn-v.patch b/LoongArch-Add-tests-for-SX-vector-vand-vandi-vandn-v.patch
new file mode 100644
index 0000000000000000000000000000000000000000..82ff843e32f6147d086cfa8cbbc45398d98cedb3
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vand-vandi-vandn-v.patch
@@ -0,0 +1,1209 @@
+From ea0d56b6569735448905780fe8468c9b3c6aad14 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 18:58:17 +0800
+Subject: [PATCH 097/124] LoongArch: Add tests for SX vector
+ vand/vandi/vandn/vor/vori/vnor/ vnori/vxor/vxori instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vand.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vandi.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vandn.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vnor.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vnori.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vor.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vori.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vorn.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vxor.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vxori.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vand.c           | 159 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vandi.c          |  67 +++++++
+ .../loongarch/vector/lsx/lsx-vandn.c          | 129 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vnor.c           | 109 +++++++++++
+ .../loongarch/vector/lsx/lsx-vnori.c          |  91 ++++++++++
+ .../gcc.target/loongarch/vector/lsx/lsx-vor.c | 169 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vori.c           | 123 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vorn.c           | 109 +++++++++++
+ .../loongarch/vector/lsx/lsx-vxor.c           |  79 ++++++++
+ .../loongarch/vector/lsx/lsx-vxori.c          |  67 +++++++
+ 10 files changed, 1102 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c
+new file mode 100644
+index 000000000..1597749b5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vand.c
+@@ -0,0 +1,159 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x03574e3a62407e03;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000001010000;
++  *((unsigned long*)& __m128i_result[1]) = 0x03574e3a62407e03;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[1]) = 0x001fffff001fffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0x001fffff001fffff;
++  *((unsigned long*)& __m128i_result[1]) = 0x001fffff001fffff;
++  *((unsigned long*)& __m128i_result[0]) = 0x001fffff001fffff;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00000000003dffc2;
++  *((unsigned long*)& __m128i_op0[0]) = 0x00000000003dffc2;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0008000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9;
++  *((unsigned long*)& __m128i_op0[0]) = 0xffff0001ffff9515;
++  *((unsigned long*)& __m128i_op1[1]) = 0x00000000ffff53d9;
++  *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9515;
++  *((unsigned long*)& __m128i_result[1]) = 0x00000000ffff53d9;
++  *((unsigned long*)& __m128i_result[0]) = 0xffff0001ffff9515;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x67eb85af0000b000;
++  *((unsigned long*)& __m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x67eb85af0000b000;
++  *((unsigned long*)& __m128i_op1[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long*)& __m128i_result[1]) = 0x67eb85af0000b000;
++  *((unsigned long*)& __m128i_result[0]) = 0xc8847ef6ed3f2000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0313100003131000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0313100003131000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000200000002;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0007000000050000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0003000100010001;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00007a8000000480;
++  *((unsigned long*)& __m128i_op0[0]) = 0x00000485000004cc;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0a0000000a000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0a0000000a000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vand_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c
+new file mode 100644
+index 000000000..906da69ca
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandi.c
+@@ -0,0 +1,67 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandi_b(__m128i_op0,0x36);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000800;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000800;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandi_b(__m128i_op0,0x39);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandi_b(__m128i_op0,0x27);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandi_b(__m128i_op0,0x25);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandi_b(__m128i_op0,0xbd);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000a95afc60a5c5;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000b6e414157f84;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000204264602444;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000266404046604;
++  __m128i_out = __lsx_vandi_b(__m128i_op0,0x66);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c
+new file mode 100644
+index 000000000..3ae2d7694
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vandn.c
+@@ -0,0 +1,129 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x000000017fff9000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000210011084;
++  *((unsigned long*)& __m128i_result[1]) = 0x000000017fff9000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000210011084;
++  __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x00000049000000c0;
++  *((unsigned long*)& __m128i_op1[0]) = 0x00000001ffffff29;
++  *((unsigned long*)& __m128i_result[1]) = 0x00000049000000c0;
++  *((unsigned long*)& __m128i_result[0]) = 0x00000000ffffff29;
++  __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x010f00000111fffc;
++  *((unsigned long*)& __m128i_op0[0]) = 0x016700dc0176003a;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0003000000010000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0002000000010000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffff000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x8000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0xfffffffffffff000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x62cbf96e4acfaf40;
++  *((unsigned long*)& __m128i_op1[0]) = 0xf0bc9a5278285a4a;
++  *((unsigned long*)& __m128i_result[1]) = 0x62cbf96e4acfaf40;
++  *((unsigned long*)& __m128i_result[0]) = 0xf0bc9a5278285a4a;
++  __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0xfffe0004fffe0004;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x9c7c266e71768fa4;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x9c7c266e71768fa4;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vandn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c
+new file mode 100644
+index 000000000..a7a3acce9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnor.c
+@@ -0,0 +1,109 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00000000fff8fff8;
++  *((unsigned long*)& __m128i_op0[0]) = 0x00000000fff80000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x00000000fff8fff8;
++  *((unsigned long*)& __m128i_op1[0]) = 0x00000000fff80000;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffff00070007;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffff0007ffff;
++  __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xce23d33e43d9736c;
++  *((unsigned long*)& __m128i_op0[0]) = 0x63b2ac27aa076aeb;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x31dc2cc1bc268c93;
++  *((unsigned long*)& __m128i_result[0]) = 0x9c4d53d855f89514;
++  __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x000000000000000c;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffffff3;
++  __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000400080003fff;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000bc2000007e04;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000400080003fff;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000bc2000007e04;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffbfff7fffc000;
++  *((unsigned long*)& __m128i_result[0]) = 0xffff43dfffff81fb;
++  __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x021b7d24c9678a35;
++  *((unsigned long*)& __m128i_op0[0]) = 0x030298a6a1030a49;
++  *((unsigned long*)& __m128i_op1[1]) = 0x5252525252525252;
++  *((unsigned long*)& __m128i_op1[0]) = 0x5252525252525252;
++  *((unsigned long*)& __m128i_result[1]) = 0xada4808924882588;
++  *((unsigned long*)& __m128i_result[0]) = 0xacad25090caca5a4;
++  __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0xfffefffe0000ff18;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffff000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0x0001000100000000;
++  __m128i_out = __lsx_vnor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c
+new file mode 100644
+index 000000000..a07a02ab2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vnori.c
+@@ -0,0 +1,91 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff0000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffff0000;
++  *((unsigned long*)& __m128i_result[1]) = 0xcccccccc0000cccc;
++  *((unsigned long*)& __m128i_result[0]) = 0xcccccccc0000cccc;
++  __m128i_out = __lsx_vnori_b(__m128i_op0,0x33);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vnori_b(__m128i_op0,0xa6);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799;
++  *((unsigned long*)& __m128i_result[1]) = 0x9292929292929292;
++  *((unsigned long*)& __m128i_result[0]) = 0x8090808280909002;
++  __m128i_out = __lsx_vnori_b(__m128i_op0,0x6d);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00000000000ffc2f;
++  *((unsigned long*)& __m128i_op0[0]) = 0x00201df000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x3838383838300010;
++  *((unsigned long*)& __m128i_result[0]) = 0x3818200838383838;
++  __m128i_out = __lsx_vnori_b(__m128i_op0,0xc7);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x2020202020202020;
++  *((unsigned long*)& __m128i_op0[0]) = 0x2020202020207f7f;
++  *((unsigned long*)& __m128i_result[1]) = 0x5d5d5d5d5d5d5d5d;
++  *((unsigned long*)& __m128i_result[0]) = 0x5d5d5d5d5d5d0000;
++  __m128i_out = __lsx_vnori_b(__m128i_op0,0xa2);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x8080808080808080;
++  *((unsigned long*)& __m128i_result[0]) = 0x8080808080808080;
++  __m128i_out = __lsx_vnori_b(__m128i_op0,0x7f);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long*)& __m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long*)& __m128i_result[1]) = 0x1313131313131313;
++  *((unsigned long*)& __m128i_result[0]) = 0x1313131313131313;
++  __m128i_out = __lsx_vnori_b(__m128i_op0,0xec);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x9d9d9d9d9d9d9d9d;
++  *((unsigned long*)& __m128i_result[0]) = 0x9d9d9d9d9d9d9d9d;
++  __m128i_out = __lsx_vnori_b(__m128i_op0,0x62);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00f525682ffd27f2;
++  *((unsigned long*)& __m128i_op0[0]) = 0x00365c60317ff930;
++  *((unsigned long*)& __m128i_result[1]) = 0xe500c085c000c005;
++  *((unsigned long*)& __m128i_result[0]) = 0xe5c1a185c48004c5;
++  __m128i_out = __lsx_vnori_b(__m128i_op0,0x1a);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c
+new file mode 100644
+index 000000000..537a1bb3b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vor.c
+@@ -0,0 +1,169 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[1]) = 0x7e44bde9b842ff23;
++  *((unsigned long*)& __m128i_op1[0]) = 0x00011e80007edff8;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long*)& __m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long*)& __m128i_op1[0]) = 0xfffc001fffffffff;
++  *((unsigned long*)& __m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xfffc001fffffffff;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000200010;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000200010;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000200010;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x4f804f804f804f80;
++  *((unsigned long*)& __m128i_op0[0]) = 0x4f804f804f804f80;
++  *((unsigned long*)& __m128i_op1[1]) = 0x4f804f804f804f80;
++  *((unsigned long*)& __m128i_op1[0]) = 0x4f804f804f804f80;
++  *((unsigned long*)& __m128i_result[1]) = 0x4f804f804f804f80;
++  *((unsigned long*)& __m128i_result[0]) = 0x4f804f804f804f80;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x3e035e51522f0799;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x3e035e51522f0799;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x3e035e51522f0799;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long*)& __m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffff0000ffff0000;
++  *((unsigned long*)& __m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long*)& __m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xfffb00fdfdf7ffff;
++  *((unsigned long*)& __m128i_op0[0]) = 0xfff8000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0xfffb00fdfdf7ffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0xfff8000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0xfffb00fdfdf7ffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xfff8000000000000;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xffffffff80005613;
++  *((unsigned long*)& __m128i_op0[0]) = 0x81000080806b000b;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffff00011cf0c569;
++  *((unsigned long*)& __m128i_op1[0]) = 0xc0000002b0995850;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffff9cf0d77b;
++  *((unsigned long*)& __m128i_result[0]) = 0xc1000082b0fb585b;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0xfffffffffffbfff8;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x80808080806b000b;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0xfffffffffffbfffb;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0001000101010001;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long*)& __m128i_result[1]) = 0x0001ffff0101ffff;
++  *((unsigned long*)& __m128i_result[0]) = 0x0001ffff0001ffff;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xffffffffc105d1aa;
++  *((unsigned long*)& __m128i_op0[0]) = 0xffffffffbc19ecca;
++  *((unsigned long*)& __m128i_op1[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long*)& __m128i_op1[0]) = 0xbbc8ecc5f3ced5f3;
++  *((unsigned long*)& __m128i_result[1]) = 0xfffffffff9bffbfb;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffdffdfb;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c
+new file mode 100644
+index 000000000..8a6e035c9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vori.c
+@@ -0,0 +1,123 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x8282828282828282;
++  *((unsigned long*)& __m128i_result[0]) = 0x8282828282828282;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x82);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x7505853d654185f5;
++  *((unsigned long*)& __m128i_op0[0]) = 0x01010000fefe0101;
++  *((unsigned long*)& __m128i_result[1]) = 0x7545c57d6541c5f5;
++  *((unsigned long*)& __m128i_result[0]) = 0x41414040fefe4141;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x40);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000c2f90000bafa;
++  *((unsigned long*)& __m128i_op0[0]) = 0x8000c2fa8000c2fa;
++  *((unsigned long*)& __m128i_result[1]) = 0x7474f6fd7474fefe;
++  *((unsigned long*)& __m128i_result[0]) = 0xf474f6fef474f6fe;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x74);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long*)& __m128i_result[0]) = 0x3d3d3d3d3d3d3d3d;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x3d);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xfffffacdb6dbecac;
++  *((unsigned long*)& __m128i_op0[0]) = 0x1f5533a694f902c0;
++  *((unsigned long*)& __m128i_result[1]) = 0xfffffadffedbfefe;
++  *((unsigned long*)& __m128i_result[0]) = 0x5f5f7bfedefb5ada;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x5a);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x38);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0d1202e19235e2bc;
++  *((unsigned long*)& __m128i_op0[0]) = 0xea38e0f75f6e56d1;
++  *((unsigned long*)& __m128i_result[1]) = 0x2f3626e7b637e6be;
++  *((unsigned long*)& __m128i_result[0]) = 0xee3ee6f77f6e76f7;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x26);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0003c853c843c87e;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0003c853c843c87e;
++  *((unsigned long*)& __m128i_result[1]) = 0xd6d7ded7ded7defe;
++  *((unsigned long*)& __m128i_result[0]) = 0xd6d7ded7ded7defe;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0xd6);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0xfffe0000fffe0000;
++  *((unsigned long*)& __m128i_result[1]) = 0x7777777777777777;
++  *((unsigned long*)& __m128i_result[0]) = 0xffff7777ffff7777;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x77);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x55);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x8000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x8000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0xd454545454545454;
++  *((unsigned long*)& __m128i_result[0]) = 0xd454545454545454;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x54);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long*)& __m128i_result[0]) = 0x4f4f4f4f4f4f4f4f;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x4f);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x8a8a8a8a8a8a8a8a;
++  *((unsigned long*)& __m128i_result[0]) = 0x8a8a8a8a8a8a8a8a;
++  __m128i_out = __lsx_vori_b(__m128i_op0,0x8a);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c
+new file mode 100644
+index 000000000..bb59bc312
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vorn.c
+@@ -0,0 +1,109 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long*)& __m128i_op0[0]) = 0x000100010001fffe;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x00d3012b015700bb;
++  *((unsigned long*)& __m128i_op1[0]) = 0x00010000ffca0070;
++  *((unsigned long*)& __m128i_result[1]) = 0xff2cfed4fea8ff44;
++  *((unsigned long*)& __m128i_result[0]) = 0xfffeffff0035ff8f;
++  __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00fe00fe00fe00fe;
++  *((unsigned long*)& __m128i_op0[0]) = 0x00fe00fe00fe0045;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[1]) = 0x00fe00fe00fe00fe;
++  *((unsigned long*)& __m128i_result[0]) = 0x00fe00fe00fe0045;
++  __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000010000010101;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0101000001000100;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000010000010101;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0101000001000100;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000002;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000002;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x33f5c2d7d975d7fe;
++  *((unsigned long*)& __m128i_op0[0]) = 0xe4423f7b769f8ffe;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x33f5c2d7d975d7fe;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00050eb00000fffa;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000f8a50000f310;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00bbfff7fffffff7;
++  *((unsigned long*)& __m128i_op0[0]) = 0xffffffff008ff820;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0001000100010012;
++  *((unsigned long*)& __m128i_op1[0]) = 0x00000000ffe1ffc0;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffff009ff83f;
++  __m128i_out = __lsx_vorn_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c
+new file mode 100644
+index 000000000..72fa97174
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxor.c
+@@ -0,0 +1,79 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x00000000f4012ceb;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x00000000f4012ceb;
++  __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x3ff0000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x40f3fa0000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x3ff0000000000001;
++  *((unsigned long*)& __m128i_result[0]) = 0x40f3fa0000000000;
++  __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op1[0]) = 0x0000000080000068;
++  *((unsigned long*)& __m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[0]) = 0x0000000080000068;
++  __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00000000ffff53d9;
++  *((unsigned long*)& __m128i_op0[0]) = 0xff000001ffff9515;
++  *((unsigned long*)& __m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long*)& __m128i_op1[0]) = 0xffff0001ffff9514;
++  *((unsigned long*)& __m128i_result[1]) = 0xffffffff0000ac26;
++  *((unsigned long*)& __m128i_result[0]) = 0x00ff000000000001;
++  __m128i_out = __lsx_vxor_v(__m128i_op0,__m128i_op1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c
+new file mode 100644
+index 000000000..cc823d4ba
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vxori.c
+@@ -0,0 +1,67 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i=1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x0404040404040404;
++  *((unsigned long*)& __m128i_result[0]) = 0x0404040404040404;
++  __m128i_out = __lsx_vxori_b(__m128i_op0,0x4);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000001000100;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000001000100;
++  *((unsigned long*)& __m128i_result[1]) = 0x5a5a5a5a5b5a5b5a;
++  *((unsigned long*)& __m128i_result[0]) = 0x5a5a5a5a5b5a5b5a;
++  __m128i_out = __lsx_vxori_b(__m128i_op0,0x5a);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0xe3e3e3e3e3e3e3e3;
++  *((unsigned long*)& __m128i_result[0]) = 0xe3e3e3e3e3e3e3e3;
++  __m128i_out = __lsx_vxori_b(__m128i_op0,0xe3);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0020002000200020;
++  *((unsigned long*)& __m128i_result[1]) = 0x9a9a9a9a9a9a9a9a;
++  *((unsigned long*)& __m128i_result[0]) = 0x9aba9aba9aba9aba;
++  __m128i_out = __lsx_vxori_b(__m128i_op0,0x9a);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long*)& __m128i_result[1]) = 0x9090909090909090;
++  *((unsigned long*)& __m128i_result[0]) = 0x9090909090909090;
++  __m128i_out = __lsx_vxori_b(__m128i_op0,0x90);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  *((unsigned long*)& __m128i_op0[1]) = 0x00000000b81c8382;
++  *((unsigned long*)& __m128i_op0[0]) = 0x0000000077af9450;
++  *((unsigned long*)& __m128i_result[1]) = 0xf1f1f1f149ed7273;
++  *((unsigned long*)& __m128i_result[0]) = 0xf1f1f1f1865e65a1;
++  __m128i_out = __lsx_vxori_b(__m128i_op0,0xf1);
++  ASSERTEQ_64(__LINE__, __m128i_result, __m128i_out);
++
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vavg-vavgr-instruc.patch b/LoongArch-Add-tests-for-SX-vector-vavg-vavgr-instruc.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2d03fdd914af4d755b975e7a91d273df13fa0744
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vavg-vavgr-instruc.patch
@@ -0,0 +1,1375 @@
+From 4fba531ee417a29234e8be84e17cddc7dd9ec343 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 18:35:55 +0800
+Subject: [PATCH 084/124] LoongArch: Add tests for SX vector vavg/vavgr
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vavg-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vavg-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vavg-1.c         | 398 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vavg-2.c         | 308 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vavgr-1.c        | 299 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vavgr-2.c        | 317 ++++++++++++++
+ 4 files changed, 1322 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c
+new file mode 100644
+index 000000000..2177ca3f6
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-1.c
+@@ -0,0 +1,398 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff8fff8fff8fff8;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff8fff8fff8fff8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc;
++  __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4050000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x2028000000000000;
++  __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000014155445;
++  *((unsigned long *)&__m128i_result[1]) = 0x33f5c2d7d9f5d800;
++  *((unsigned long *)&__m128i_result[0]) = 0xe4c23ffb002a3a22;
++  __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000f000e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000ffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x003fffff00070007;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000007ffff;
++  __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000400028000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000020001c020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000022;
++  __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x08080807f5f5f5f8;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x04040403fafafafc;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff80;
++  __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x10f8000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001000010f8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x087c000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000087c;
++  __m128i_out = __lsx_vavg_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5641127843c0d41e;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfedb27095b6bff95;
++  *((unsigned long *)&__m128i_op1[1]) = 0xa8beed87bc3f2be1;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0024d8f6a494006a;
++  *((unsigned long *)&__m128i_result[1]) = 0xff7fffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xff7fffffffffffff;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00007fff;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff80ff0010ff06;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xedfaedfaedfaedfa;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xf6fd377cf705f680;
++  *((unsigned long *)&__m128i_result[0]) = 0xc0000000bfff8000;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000007fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000002bfd9461;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000f00;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x1ff800000000477f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000015fec9b0;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000037;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x003fffff00000000;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000800000008000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000800000008000;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd6a09e662ab46b31;
++  *((unsigned long *)&__m128i_op0[0]) = 0x34b8122ef4054bb3;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xeb504f33155a3598;
++  *((unsigned long *)&__m128i_result[0]) = 0x1a5c0917fa02a5d9;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0177fff0fffffff0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff8bc;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffefffff784;
++  *((unsigned long *)&__m128i_result[1]) = 0x00bbfff7fffffff7;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff008ff820;
++  __m128i_out = __lsx_vavg_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0014;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000c01020d8009;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007fff8000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001008100000005;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfc01fd1300000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe00fd1400010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fff7fc01;
++  *((unsigned long *)&__m128i_result[1]) = 0xfe00fe8980000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff007e8a7ffc7e00;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffff46000000ba;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffa30000005c;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000070007;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000007ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000068;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000038003;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000040033;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff0000ac26;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff80005613;
++  *((unsigned long *)&__m128i_result[0]) = 0x007f800000000000;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000040000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000040000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3fc000005fc00000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3fc000005fc00000;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000010000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002000200000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000fffe0001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0001fffe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff0000ffff;
++  __m128i_out = __lsx_vavg_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c
+new file mode 100644
+index 000000000..1b0d879e4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavg-2.c
+@@ -0,0 +1,308 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f;
++  __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f;
++  __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f;
++  __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000100000001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x37b951002d81a921;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000047404f4f040d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000082000000826;
++  *((unsigned long *)&__m128i_result[0]) = 0x1b5c4c203e685617;
++  __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00c2758000bccf42;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00a975be00accf03;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00c2758000bccf42;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00a975be00accf03;
++  *((unsigned long *)&__m128i_result[1]) = 0x00c2758000bccf42;
++  *((unsigned long *)&__m128i_result[0]) = 0x00a975be00accf03;
++  __m128i_out = __lsx_vavg_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff732a;
++  *((unsigned long *)&__m128i_result[1]) = 0x807f7fff807f807f;
++  *((unsigned long *)&__m128i_result[0]) = 0x807f807f7fff3995;
++  __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000007f7f7f7f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000003fbf3fbf;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7ff8;
++  __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x353c8cc4b1ec5b09;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8080008000808080;
++  *((unsigned long *)&__m128i_result[0]) = 0x1a9e466258f62d84;
++  __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ac;
++  __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_result[1]) = 0x4e4e4e4e00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010;
++  __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000868686868686;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1e1e1e1e1e1e1e1e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1e1e1e1e1e1e1e1e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0f0f0f0f0f0f0f0f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0f0f525252525252;
++  __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000014eb54ab;
++  *((unsigned long *)&__m128i_op0[0]) = 0x14eb6a002a406a00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffdfdc0d;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000a752a55;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a753500950fa306;
++  __m128i_out = __lsx_vavg_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffff00010000fff;
++  __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000002ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000017fffffff;
++  __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0101000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0101030100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0080800000008000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0080818000008000;
++  __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002;
++  __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0017004800c400f9;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ed001a00580070;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x800b7fe38062007b;
++  *((unsigned long *)&__m128i_result[0]) = 0x0076800d802c0037;
++  __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff80ffa2fff0ff74;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff76ffd8ffe6ffaa;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3;
++  *((unsigned long *)&__m128i_result[1]) = 0xe01ae8a3fc55dd23;
++  *((unsigned long *)&__m128i_result[0]) = 0xdd9ff64ef9daeace;
++  __m128i_out = __lsx_vavg_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007fffffff;
++  __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3f80000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3f80000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x1fc0000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1fc07f8000007f80;
++  __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000043cf26c7;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000e31d4cae8636;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000021e79364;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000718ea657431b;
++  __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7ff8000000000000;
++  __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff80ffff7e02;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00feff8000ff80ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf931fd04f832fe02;
++  *((unsigned long *)&__m128i_result[1]) = 0x80007fc000003f00;
++  *((unsigned long *)&__m128i_result[0]) = 0x7d187e427c993f80;
++  __m128i_out = __lsx_vavg_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c
+new file mode 100644
+index 000000000..4b7262537
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-1.c
+@@ -0,0 +1,299 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0040000000ff00ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0040000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0020000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0020c00000000000;
++  __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xb9fe3640e4eb1b18;
++  *((unsigned long *)&__m128i_op0[0]) = 0x800000005b4b1b18;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xdcfe1b20f2f60e0c;
++  *((unsigned long *)&__m128i_result[0]) = 0xc00000002e260e0c;
++  __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x111110ff11111141;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfbffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7bffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x060808ff08080820;
++  *((unsigned long *)&__m128i_result[0]) = 0x4608081808080810;
++  __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000fff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ac26;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff80000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000060000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000003000000d613;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000c0000000;
++  __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff2;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff2;
++  __m128i_out = __lsx_vavgr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000073;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000002a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000003a;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000015;
++  __m128i_out = __lsx_vavgr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000004000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff8004000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffc002000000000;
++  __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffc002000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffc002000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffc002000000000;
++  __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000;
++  __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000007fff0018;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000003fff800c;
++  __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0280000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7500000075000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7500000075000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3bc000003a800000;
++  __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007d1800007c99;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0a0000001e000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a621b3ebe5e1c02;
++  *((unsigned long *)&__m128i_result[1]) = 0x04ffc0000f000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x05314c2bdf2f4c4e;
++  __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001000000000;
++  __m128i_out = __lsx_vavgr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3fc000003fc00000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3fc000003fc00000;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3fffffffc0000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0xff807f807f807f80;
++  *((unsigned long *)&__m128i_result[0]) = 0xff807f807f807f80;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000280000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000140001;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff46;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe00fe0045;
++  *((unsigned long *)&__m128i_result[1]) = 0x007f007f007f007e;
++  *((unsigned long *)&__m128i_result[0]) = 0x007f007f007effc6;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x33f5c2d7d975d7fe;
++  *((unsigned long *)&__m128i_result[0]) = 0xe4423f7b769f8ffe;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff9dff9dff9dff9d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffceffceffcf1fcb;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe7fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x1d4000001d400000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1e5f007f5d400000;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000400000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000007f80;
++  __m128i_out = __lsx_vavgr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c
+new file mode 100644
+index 000000000..22908b1ea
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vavgr-2.c
+@@ -0,0 +1,317 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8dada;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff8f8dada;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff01018888;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffff8f8dada;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff01018888;
++  __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4080808080808080;
++  __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000010000003f;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000010000003f;
++  __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1817161517161514;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1615141315141312;
++  *((unsigned long *)&__m128i_result[1]) = 0x0c0c8b8a8b8b0b0a;
++  *((unsigned long *)&__m128i_result[0]) = 0x8b8a8a898a8a8909;
++  __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000208000002080;
++  __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffd60015;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x80808080806b000b;
++  __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff81010102;
++  __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc1bdceee242071db;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe8c7b756d76aa578;
++  *((unsigned long *)&__m128i_result[1]) = 0xe0dee7779210b8ed;
++  *((unsigned long *)&__m128i_result[0]) = 0xf463dbabebb5d2bc;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff80000000000000;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000400400004004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000015ff4a31;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2a7b7c9260f90ee2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1b1c6cdfd57f5736;
++  *((unsigned long *)&__m128i_result[1]) = 0x153e3e49307d0771;
++  *((unsigned long *)&__m128i_result[0]) = 0x0d8e36706ac02b9b;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xdd6156076967d8c9;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2e3ab5266375e71b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x6eb12b0634b46c67;
++  *((unsigned long *)&__m128i_result[0]) = 0x171d5a9531bb7390;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000000900;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000090900000998;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00007a8000000480;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000485000004cc;
++  __m128i_out = __lsx_vavgr_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00003ff000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fffc00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffc001fffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00001ff800000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x7ffe800e80000000;
++  __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff000001ffff9515;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007fffa9ed;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f8000017fffca8b;
++  __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffdfffffff8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7ffffffc;
++  __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffeff98;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0014ffe4ff76ffc4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3131313131313131;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000017fff7fcc;
++  *((unsigned long *)&__m128i_result[0]) = 0x18a3188b9854187b;
++  __m128i_out = __lsx_vavgr_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001c88bf0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000001c88bf0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001c88bf0;
++  __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x807fffff80800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007;
++  *((unsigned long *)&__m128i_result[1]) = 0x8003000000020000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4040ffffc0400004;
++  __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000010000010101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101000001000100;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000008000008080;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080800000800080;
++  __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vavgr_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vbitclr-vbitclri-v.patch b/LoongArch-Add-tests-for-SX-vector-vbitclr-vbitclri-v.patch
new file mode 100644
index 0000000000000000000000000000000000000000..10f72c8676991859e2fdd4872588609ba99df5ee
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vbitclr-vbitclri-v.patch
@@ -0,0 +1,3324 @@
+From 0b75b581703b0eb1eb9ca9e898255de7f4cb51ad Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 11:20:44 +0800
+Subject: [PATCH 092/124] LoongArch: Add tests for SX vector
+ vbitclr/vbitclri/vbitrev/vbitrevi/
+ vbitsel/vbitseli/vbitset/vbitseti/vclo/vclz/vpcnt instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vbitclr.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitclri.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitrev.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitsel.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitseli.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitset.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vbitseti.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vclo.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vclz.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vpcnt.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vbitclr.c        | 461 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vbitclri.c       | 279 +++++++++++
+ .../loongarch/vector/lsx/lsx-vbitrev.c        | 407 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vbitrevi.c       | 336 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vbitsel.c        | 109 +++++
+ .../loongarch/vector/lsx/lsx-vbitseli.c       |  84 ++++
+ .../loongarch/vector/lsx/lsx-vbitset.c        | 371 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vbitseti.c       | 279 +++++++++++
+ .../loongarch/vector/lsx/lsx-vclo.c           | 266 ++++++++++
+ .../loongarch/vector/lsx/lsx-vclz.c           | 265 ++++++++++
+ .../loongarch/vector/lsx/lsx-vpcnt.c          | 350 +++++++++++++
+ 11 files changed, 3207 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c
+new file mode 100644
+index 000000000..411dcaa40
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclr.c
+@@ -0,0 +1,461 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000e0000000e0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00e0000000e00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000e0000000e0;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000004000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff8004000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x19df307a5d04acbb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5ed032b06bde1ab6;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x19de307a5d04acba;
++  *((unsigned long *)&__m128i_result[0]) = 0x5ed032b06bde1ab6;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0018001800180018;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0018001800180018;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd8248069ffe78077;
++  *((unsigned long *)&__m128i_op1[1]) = 0x85bd6b0e94d89998;
++  *((unsigned long *)&__m128i_op1[0]) = 0xd83c8081ffff808f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xd82480697f678077;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000006597cc3d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7505853d654185f5;
++  *((unsigned long *)&__m128i_op1[0]) = 0x01010000fefe0101;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000006595cc1d;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffe0000fffe0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffe0000fffe0000;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff7fc01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x80000000fff6fc00;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffff800;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000fffefffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fffef800;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000100;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000100;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffefffffffe;
++  __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x4101010141010100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x23b57fa16d39f7c8;
++  *((unsigned long *)&__m128i_op1[0]) = 0x161c0c363c200824;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000ffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000ffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000fefe00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000fefe00000000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1ffffffff8001000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf0bd80bd80bd8000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff7ffffffefffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xdfffdfffdffffffe;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000037;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000036;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000100000001007c;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000000010000;
++  __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfefa000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfefa000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67b7cf643c9d636a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x39d70e366f547977;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0002ffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x66b34f643c9c626a;
++  *((unsigned long *)&__m128i_result[0]) = 0x38d60e366e547876;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_result[0]) = 0x2020202020207f7f;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x7ef8000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7ef8000000000000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000077f97;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffeff7f0000;
++  __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320176a4d2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x685670d37e80682a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x14ccc6320076a4d2;
++  *((unsigned long *)&__m128i_result[0]) = 0x685670d27e00682a;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00001b4a00007808;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100010001000100;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x5d7f5d007f6a007f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000fffefffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fffefffe;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x207fffff22bd04fb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x207fffff22bd04fb;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000002000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000002000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x207fffff22bd04fa;
++  *((unsigned long *)&__m128i_result[0]) = 0x207fffff22bd04fa;
++  __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffefffe;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014;
++  __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007fff7fff8000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000b81c8382;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000077af9450;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00007efe7f7f8000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000667ae56;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000004ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000667ae56;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020;
++  __m128i_out = __lsx_vbitclr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c
+new file mode 100644
+index 000000000..5d7d66e06
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitclri.c
+@@ -0,0 +1,279 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000b0000000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000201000000000b;
++  __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200000;
++  *((unsigned long *)&__m128i_result[1]) = 0x004200a000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x004200a000200000;
++  __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000efffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002ff5;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc2cf2471e9b7d7a4;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000027f5;
++  *((unsigned long *)&__m128i_result[0]) = 0xc2cf2471e9b7d7a4;
++  __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498;
++  *((unsigned long *)&__m128i_result[1]) = 0x7404443064403aec;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000d6eefefc0498;
++  __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x36);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x64b680a2ae3af8ca;
++  *((unsigned long *)&__m128i_op0[0]) = 0x161c0c363c200826;
++  *((unsigned long *)&__m128i_result[1]) = 0x64b680a2ae3af8c8;
++  *((unsigned long *)&__m128i_result[0]) = 0x161c0c363c200824;
++  __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xbfffbfffbfffbffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff807f807f807f80;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff807f807f807f80;
++  *((unsigned long *)&__m128i_result[1]) = 0xfb807b807b807b80;
++  *((unsigned long *)&__m128i_result[0]) = 0xfb807b807b807b80;
++  __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000;
++  __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfbffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfbffffffffffffff;
++  __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9941d1d5f4ba9d08;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x9941d155f43a9d08;
++  __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffbfffffffbf;
++  __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x03f1e3d28b1a8a1a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x03f1e3d28b1a8a1a;
++  __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffda6f;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffe3d7;
++  *((unsigned long *)&__m128i_result[1]) = 0xfefffffffeffda6f;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefffffffeffe3d7;
++  __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x26);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x30);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040;
++  __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080638063;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080808080638063;
++  __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004;
++  __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_d (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_w (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000200008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000200008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000200000;
++  __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000200000001;
++  __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xdfdfdfdfdfdfdfdf;
++  *((unsigned long *)&__m128i_result[0]) = 0xdfdfdfdfdfdfdfdf;
++  __m128i_out = __lsx_vbitclri_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitclri_h (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c
+new file mode 100644
+index 000000000..ba4f4b6dc
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrev.c
+@@ -0,0 +1,407 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1b71a083b3dec3cd;
++  *((unsigned long *)&__m128i_op1[0]) = 0x373a13323b4cdbc1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0802010808400820;
++  *((unsigned long *)&__m128i_result[0]) = 0x8004080408100802;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vbitrev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000501000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000008;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000040100;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010400100203;
++  *((unsigned long *)&__m128i_result[0]) = 0x0103010301020109;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffbe6ed563;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd0b1ffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9d519ee8d2d84f1d;
++  *((unsigned long *)&__m128i_result[1]) = 0xfefd7f7f7f7f7f7e;
++  *((unsigned long *)&__m128i_result[0]) = 0xdffdbffeba6f5543;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7da9b23a624082fd;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x2002040404010420;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010180800101;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fffe0000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001ffff0001fffe;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0e7ffffc01fffffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000003f803f4;
++  *((unsigned long *)&__m128i_result[1]) = 0x1000000010000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100100000;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_op0[0]) = 0x040004000400040d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0501050105010501;
++  *((unsigned long *)&__m128i_result[0]) = 0x050105010501050c;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vbitrev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x000100010001fffe;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000007f00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7ffffffeffffffff;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0040000000400000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0040000000400000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0141010101410101;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x65b780a3ae3bf8cb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x161d0c363c200826;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x65b780a2ae3bf8ca;
++  *((unsigned long *)&__m128i_result[0]) = 0x161d0c373c200827;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfe01fe01fe01fe01;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe01fe01fe01fe01;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000003bfb4000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0040004000400040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0040004000400040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000021ffffffdf;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000e60;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1ff85ffe2ae5d973;
++  *((unsigned long *)&__m128i_result[1]) = 0x00010020fffeffde;
++  *((unsigned long *)&__m128i_result[0]) = 0x0100400100200e68;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00d3012acc56f9bb;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001021;
++  *((unsigned long *)&__m128i_result[1]) = 0x0108020410400208;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010102;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000ff0000ff86;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x010101fe0101fe87;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x343d8dc5b0ed5a08;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x353c8cc4b1ec5b09;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0037ffc8d7ff2800;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ffffff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0038d800ff000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00fffe00fffffe00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0137ffc9d7fe2801;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f00ff017fffff01;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001200100012001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffe7fffffff;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000010000000;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffdfffcfffdfffc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffdfffcfffdfffc;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001ffff0101ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0103fefd0303fefd;
++  *((unsigned long *)&__m128i_result[0]) = 0x0103fefd0103fefd;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6a5d5b056f2f4978;
++  *((unsigned long *)&__m128i_op1[0]) = 0x17483c07141b5971;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002001000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000008000020000;
++  __m128i_out = __lsx_vbitrev_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffefffe;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001ce28f9c0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000004e06b0890;
++  *((unsigned long *)&__m128i_result[1]) = 0xfefefefdbffefdfe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefefeeffef7fefe;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003ffffe00800000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff810001ff810002;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f804000ff810001;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff1affff01001fe0;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff1aff6d02834d70;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000034;
++  *((unsigned long *)&__m128i_result[1]) = 0xfe1bfefe00011ee1;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe1bfe6c03824c60;
++  __m128i_out = __lsx_vbitrev_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x41945926d8000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00001e5410082727;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007f7f00107f7f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001001001000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x4195d926d8018000;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f8100017f810001;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f8100017f810001;
++  __m128i_out = __lsx_vbitrev_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x545501550001113a;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xd45501550001113a;
++  __m128i_out = __lsx_vbitrev_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c
+new file mode 100644
+index 000000000..9739182cd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitrevi.c
+@@ -0,0 +1,336 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000400000004000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000400000007004;
++  __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfeffffffffffffff;
++  __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x38);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x4000400040004000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4000400040004000;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000007fff8000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001008100000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0800080077ff8800;
++  *((unsigned long *)&__m128i_result[0]) = 0x0801088108000805;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0202020202020202;
++  *((unsigned long *)&__m128i_result[0]) = 0x0202020202020202;
++  __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe86ce7eb5e9ce950;
++  *((unsigned long *)&__m128i_result[1]) = 0x0404040404040404;
++  *((unsigned long *)&__m128i_result[0]) = 0xec68e3ef5a98ed54;
++  __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000400000004000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000400000204010;
++  __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_result[0]) = 0x0400040004000400;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_result[0]) = 0x04000400fbfffb02;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010000000100000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010000000100000;
++  __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_result[0]) = 0x040004000400040d;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000004f804f81;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000004f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000004fc04f81;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000004fc04f80;
++  __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0040004000400040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0040004000400040;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x1010101010101010;
++  *((unsigned long *)&__m128i_result[0]) = 0xefefefefefefefef;
++  __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_result[0]) = 0x4040404040404040;
++  __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d1c1b1a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312;
++  *((unsigned long *)&__m128i_result[1]) = 0x01203f1e3d1c3b1a;
++  *((unsigned long *)&__m128i_result[0]) = 0x3918371635143312;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x61608654a2d4f6da;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff0800080008000;
++  *((unsigned long *)&__m128i_result[0]) = 0xe160065422d476da;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x77c0401040004000;
++  *((unsigned long *)&__m128i_result[0]) = 0x77c0401040004000;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6;
++  *((unsigned long *)&__m128i_result[1]) = 0x75c0404a4200403a;
++  *((unsigned long *)&__m128i_result[0]) = 0x75c03fd642003fc6;
++  __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808280808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808280808;
++  __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000100fffffeff;
++  __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0404050404040404;
++  *((unsigned long *)&__m128i_result[0]) = 0x0404050404040404;
++  __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xbfbfbfbfbfbfbfbf;
++  *((unsigned long *)&__m128i_result[0]) = 0xbfbfbfbfbfbfbfbf;
++  __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000040000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000040000000;
++  __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000020000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000020000;
++  __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x2000200020002000;
++  *((unsigned long *)&__m128i_result[0]) = 0x2000200020002000;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x441ba9fcffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x181b2541ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x401fadf8fbfbfbfb;
++  *((unsigned long *)&__m128i_result[0]) = 0x1c1f2145fbfbfbfb;
++  __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100;
++  __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffefff00001000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffefff00001000;
++  __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080;
++  __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000;
++  __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x21);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000002000;
++  __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000010000000100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100;
++  __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd6a09e662ab46b31;
++  *((unsigned long *)&__m128i_op0[0]) = 0x34b8122ef4054bb3;
++  *((unsigned long *)&__m128i_result[1]) = 0xd6e09e262af46b71;
++  *((unsigned long *)&__m128i_result[0]) = 0x34f8126ef4454bf3;
++  __m128i_out = __lsx_vbitrevi_h (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000200008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000200000;
++  __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfefefefdbffefdfe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfefefeeffef7feff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcffbdfcfffc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcedfcf5fcfd;
++  __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000555889;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000002580f01;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010000000455889;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010000002480f01;
++  __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf00040fbf;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf00000fbf;
++  *((unsigned long *)&__m128i_result[1]) = 0x00060fbf02040fbf;
++  *((unsigned long *)&__m128i_result[0]) = 0x00020fbf02000fbf;
++  __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000007fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x400000003fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x4000000040000000;
++  __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x021b7d24c9678a35;
++  *((unsigned long *)&__m128i_op0[0]) = 0x030298a6a1030a49;
++  *((unsigned long *)&__m128i_result[1]) = 0x00197f26cb658837;
++  *((unsigned long *)&__m128i_result[0]) = 0x01009aa4a301084b;
++  __m128i_out = __lsx_vbitrevi_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x3);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000c6c60000c6c6;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000c6c58000c6b2;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000c6c40000c6c6;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000c6c78000c6b2;
++  __m128i_out = __lsx_vbitrevi_d (__m128i_op0, 0x21);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff7fffffff7f;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff7fffffff7f;
++  __m128i_out = __lsx_vbitrevi_w (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c
+new file mode 100644
+index 000000000..52ac9939f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitsel.c
+@@ -0,0 +1,109 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000007fff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000005050000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0505000005050505;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000d02540000007e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001400140014;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0505050505050505;
++  *((unsigned long *)&__m128i_op2[0]) = 0x03574e38e496cbc9;
++  *((unsigned long *)&__m128i_result[1]) = 0x0005000400000004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0400001001150404;
++  __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0080001300000013;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0080001300000013;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0080001300000013;
++  *((unsigned long *)&__m128i_result[0]) = 0x0080001300000013;
++  __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x43d3e0000013e000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x43d3e0000013e000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffe0001fffe0001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffe0001fffe0001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0xfffe0001fffe0001;
++  *((unsigned long *)&__m128i_op2[0]) = 0xfffe0001fffe0001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x62cbf96e4acfaf40;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf0bc9a5278285a4a;
++  *((unsigned long *)&__m128i_op2[1]) = 0xfffffacdb6dbecac;
++  *((unsigned long *)&__m128i_op2[0]) = 0x1f5533a694f902c0;
++  *((unsigned long *)&__m128i_result[1]) = 0x62cbf84c02cbac00;
++  *((unsigned long *)&__m128i_result[0]) = 0x1014120210280240;
++  __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff59;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff59;
++  __m128i_out = __lsx_vbitsel_v (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c
+new file mode 100644
+index 000000000..f2d6fb042
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseli.c
+@@ -0,0 +1,84 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x6664666466646664;
++  *((unsigned long *)&__m128i_result[0]) = 0x6664666466646664;
++  __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x66);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x5d5d5d5d5d5d5d55;
++  __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x5d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x2);
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x5959595959595959;
++  *((unsigned long *)&__m128i_result[0]) = 0x5959595959595959;
++  __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x59);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffd000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffd000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0xaa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0b4c600000000002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0004280808080808;
++  __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0xa4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000004000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff9411;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff941d;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000400000004c;
++  *((unsigned long *)&__m128i_result[0]) = 0x000047404f4f040d;
++  __m128i_out = __lsx_vbitseli_b (__m128i_op0, __m128i_op1, 0x4f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c
+new file mode 100644
+index 000000000..e05af675e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitset.c
+@@ -0,0 +1,371 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffe001ffffe001;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe001ffffe001;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000038335ca2777;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000800800000;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xf51df8dbd6050189;
++  *((unsigned long *)&__m128i_result[0]) = 0x0983e2dbf235ed87;
++  __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfc01fcfefc02fdf7;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe00fcfffe01fd01;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5d5d5d5d5d5d5d55;
++  *((unsigned long *)&__m128i_result[1]) = 0xfc01fcfefc02fdf7;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe00fcfffe21fd01;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x80000000fff7fc01;
++  __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe00000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff01010105;
++  __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001c00ffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010201808040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010280808040;
++  __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3f8000003f800001;
++  *((unsigned long *)&__m128i_result[0]) = 0x3f8000003f800001;
++  __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000010a000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000104000800;
++  __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000897957687;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000408;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000010000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000100;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffe0001fffe0001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffe0001fffe0001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff994cb09c;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc3639d96;
++  *((unsigned long *)&__m128i_op1[1]) = 0x20de27761210386d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x34632935195a123c;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff994db09c;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffc7639d96;
++  __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000545cab1d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000081a83bea;
++  *((unsigned long *)&__m128i_op1[1]) = 0x13f9c5b60028a415;
++  *((unsigned long *)&__m128i_op1[0]) = 0x545cab1d81a83bea;
++  *((unsigned long *)&__m128i_result[1]) = 0x00400000547cab1d;
++  *((unsigned long *)&__m128i_result[0]) = 0x2000000081a83fea;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000038003;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000040033;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100080000;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[1]) = 0x0909090909090909;
++  *((unsigned long *)&__m128i_result[0]) = 0x0909090909090909;
++  __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00a600e000a600e0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x01500178010000f8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0100000001000000;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfefbff06fffa0004;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfefeff04fffd0004;
++  *((unsigned long *)&__m128i_result[1]) = 0x4008804080040110;
++  *((unsigned long *)&__m128i_result[0]) = 0x4040801080200110;
++  __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8101010181010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x8101010181010101;
++  __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000020000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101030101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101030101;
++  __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd78cfd70b5f65d76;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5779108fdedda7e4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xd78cfd70b5f65d77;
++  *((unsigned long *)&__m128i_result[0]) = 0x5779108fdedda7e5;
++  __m128i_out = __lsx_vbitset_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00004a1e00004a1e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000100;
++  *((unsigned long *)&__m128i_result[0]) = 0x4000000040000000;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0007000000050000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0003000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0080000100200001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0008000200020002;
++  __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff80ffff7e02;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00feff8000ff80ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0280000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff81ffff7f03;
++  *((unsigned long *)&__m128i_result[0]) = 0x04ffff8101ff81ff;
++  __m128i_out = __lsx_vbitset_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4480000044800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x45c0000044800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe7fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x4481000144810001;
++  *((unsigned long *)&__m128i_result[0]) = 0x45c04000c4808000;
++  __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe7fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x3a8100013a810001;
++  *((unsigned long *)&__m128i_result[0]) = 0x7bc04000ba808000;
++  __m128i_out = __lsx_vbitset_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000cecd00004657;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000c90000011197;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000200000800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100800000;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f8000017f800001;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f8000017f800001;
++  __m128i_out = __lsx_vbitset_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c
+new file mode 100644
+index 000000000..540a724a7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vbitseti.c
+@@ -0,0 +1,279 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0020002000200020;
++  __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0040000000ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0040000000000000;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x36);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x54beed87bc3f2be1;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8024d8f6a494afcb;
++  *((unsigned long *)&__m128i_result[1]) = 0x54feed87bc3f2be1;
++  *((unsigned long *)&__m128i_result[0]) = 0x8064d8f6a494afcb;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x36);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000c400;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x001000100010c410;
++  __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2e2b34ca59fa4c88;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3b2c8aefd44be966;
++  *((unsigned long *)&__m128i_result[1]) = 0x3e2b34ca59fa4c88;
++  *((unsigned long *)&__m128i_result[0]) = 0x3b2c8aefd44be966;
++  __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829;
++  *((unsigned long *)&__m128i_result[1]) = 0x0040004000400040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0040004017fda869;
++  __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x800000ff000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x800000ff080000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000000010000;
++  __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0004000000040000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0004000000040000;
++  __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87;
++  *((unsigned long *)&__m128i_result[1]) = 0xf51cf8dad6040188;
++  *((unsigned long *)&__m128i_result[0]) = 0x0982eadaf234ed87;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x2b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000000000000;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x31);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000080000006;
++  __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x2b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000030000003f;
++  __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xe5e5e5e5e5e5e5e5;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe5e5e5e5e4e4e46d;
++  *((unsigned long *)&__m128i_result[1]) = 0xe5e5e5e5e5e5e5e5;
++  *((unsigned long *)&__m128i_result[0]) = 0xe5e5e5e5e4e4e46d;
++  __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000;
++  __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0800080008000800;
++  *((unsigned long *)&__m128i_result[0]) = 0x0800080008000800;
++  __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0100000001000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0100000001000000;
++  __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000007fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_result[0]) = 0x2020202020207fff;
++  __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000900000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000900013fa0;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x23);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3ff0008000800080;
++  *((unsigned long *)&__m128i_result[0]) = 0x40f3fa8000800080;
++  __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000040000000000;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x2a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0404040404040404;
++  *((unsigned long *)&__m128i_result[0]) = 0xc404040404040404;
++  __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000040804000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000040804000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000040a04000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000040a04000;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1f81e3779b97f4a8;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff02000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1f81e3779b97f4a8;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000008000000080;
++  __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0100010001000101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0100010001000101;
++  __m128i_out = __lsx_vbitseti_h (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000010000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000010000000;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002711250a27112;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00d2701294027112;
++  *((unsigned long *)&__m128i_result[1]) = 0x080a791a58aa791a;
++  *((unsigned long *)&__m128i_result[0]) = 0x08da781a9c0a791a;
++  __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0303030303030303;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0303030303030303;
++  *((unsigned long *)&__m128i_result[1]) = 0x1313131313131313;
++  *((unsigned long *)&__m128i_result[0]) = 0x1313131313131313;
++  __m128i_out = __lsx_vbitseti_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000000000000;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x30);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000000;
++  __m128i_out = __lsx_vbitseti_d (__m128i_op0, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff0008000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff0008000000080;
++  __m128i_out = __lsx_vbitseti_w (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c
+new file mode 100644
+index 000000000..2c1099a04
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclo.c
+@@ -0,0 +1,266 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000005555555554;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000005555555554;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040;
++  __m128i_out = __lsx_vclo_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xe2ecd48adedc7c82;
++  *((unsigned long *)&__m128i_op0[0]) = 0x25d666472b01d18d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0303020102020001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000000000201;
++  __m128i_out = __lsx_vclo_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000fefefe6a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000007070700;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000002010202;
++  __m128i_out = __lsx_vclo_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3d3d3d3d3d3d3d3d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000007e8a60;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000001edde;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003;
++  __m128i_out = __lsx_vclo_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x05d0ae6002e8748e;
++  *((unsigned long *)&__m128i_op0[0]) = 0xcd1de80217374041;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000001fffff59;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vclo_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000aaaa;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2);
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffe500ffffc085;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffc000ffffc005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001300000012;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001200000012;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001000000000;
++  __m128i_out = __lsx_vclo_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000a00000009;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vclo_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x413e276583869d79;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f7f017f9d8726d3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc090380000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000200000000d;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010012;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fec20704;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclo_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000200000001c;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000200000001c;
++  __m128i_out = __lsx_vclo_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c
+new file mode 100644
+index 000000000..12df2c670
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vclz.c
+@@ -0,0 +1,265 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010000800100008;
++  __m128i_out = __lsx_vclz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000001fc1a568;
++  *((unsigned long *)&__m128i_op0[0]) = 0x02693fe0e7beb077;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000030000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0006000200000000;
++  __m128i_out = __lsx_vclz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020;
++  __m128i_out = __lsx_vclz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vclz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020;
++  __m128i_out = __lsx_vclz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020;
++  __m128i_out = __lsx_vclz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020;
++  __m128i_out = __lsx_vclz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f7f000b000b000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000b000b010a000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101080408040804;
++  *((unsigned long *)&__m128i_result[0]) = 0x0804080407040804;
++  __m128i_out = __lsx_vclz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1ffffffff8001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf0bd80bd80bd8000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100010000fe7c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000100010000fe01;
++  *((unsigned long *)&__m128i_result[1]) = 0x000f000f00100000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000f000f00100000;
++  __m128i_out = __lsx_vclz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vclz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vclz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x41dfffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0100000008080808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vclz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040;
++  __m128i_out = __lsx_vclz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000039;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000039;
++  __m128i_out = __lsx_vclz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020;
++  __m128i_out = __lsx_vclz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vclz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff000100ff00fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff003000ff00a0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0008000f00080008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0008000a00080008;
++  __m128i_out = __lsx_vclz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfe813f00fe813f00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe813f00fe813f00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002;
++  __m128i_out = __lsx_vclz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000bffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040;
++  __m128i_out = __lsx_vclz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000c0c00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vclz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vclz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x687a8373f249bc44;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7861145d9241a14a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101030100010001;
++  __m128i_out = __lsx_vclz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040;
++  __m128i_out = __lsx_vclz_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020;
++  __m128i_out = __lsx_vclz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080700000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vclz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vclz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000f0000000f;
++  __m128i_out = __lsx_vclz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000008000001e;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000200000001b;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000000;
++  __m128i_out = __lsx_vclz_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808080805;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080805;
++  __m128i_out = __lsx_vclz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vclz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vclz_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000000000;
++  __m128i_out = __lsx_vclz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vclz_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c
+new file mode 100644
+index 000000000..66982d89f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vpcnt.c
+@@ -0,0 +1,350 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x7);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefefe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000003c;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000800000008;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f80000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0701000007010000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0701000000000000;
++  __m128i_out = __lsx_vpcnt_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x807f7f8000ffff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00feff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0107070100080800;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000080800070800;
++  __m128i_out = __lsx_vpcnt_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303;
++  *((unsigned long *)&__m128i_result[0]) = 0x0303030303030303;
++  __m128i_out = __lsx_vpcnt_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000100010;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xe0404041e0404041;
++  *((unsigned long *)&__m128i_op0[0]) = 0x803f800080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000000e;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000009;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0007000000040000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0003000000010000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000000010000;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1111111111111111;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111111111111111;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0ba00ba00ba00ba0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0ba00ba00ba011eb;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000a0000000a;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000a0000000d;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfbfbfb17fbfb38ea;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfbfb47fbfbfb0404;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000002f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000029;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffbfc0ffffbfc0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000032;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0003000900050007;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff0800080008000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe160065422d476da;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000d00000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000b00000010;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001000000000;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000010100000101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000010100000101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vpcnt_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0103000201030002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000200000001e;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000200000001e;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbbe5560400010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe7e5dabf00010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x000b000500010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x000b000c00010001;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001f0000001f;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000600007fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000008ffffa209;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000011;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000016;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000467fef81;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000013;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000fe03fe01;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fe01fe01;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000007020701;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000007010701;
++  __m128i_out = __lsx_vpcnt_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f80000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf654ad7447e59090;
++  *((unsigned long *)&__m128i_op0[0]) = 0x27b1b106b8145f50;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000120000000d;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000000e;
++  __m128i_out = __lsx_vpcnt_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vdiv-vmod-instruct.patch b/LoongArch-Add-tests-for-SX-vector-vdiv-vmod-instruct.patch
new file mode 100644
index 0000000000000000000000000000000000000000..447db395cf222980e673a2b3e1a1b0014fb4e814
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vdiv-vmod-instruct.patch
@@ -0,0 +1,1114 @@
+From 1a3f6886143b0fd334d1d7530bce0a746b106b27 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 18:51:44 +0800
+Subject: [PATCH 088/124] LoongArch: Add tests for SX vector vdiv/vmod
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmod-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmod-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vdiv-1.c         | 299 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vdiv-2.c         | 254 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmod-1.c         | 254 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmod-2.c         | 254 +++++++++++++++
+ 4 files changed, 1061 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c
+new file mode 100644
+index 000000000..cb4be0475
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-1.c
+@@ -0,0 +1,299 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x40f3fa0000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffb4ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffb4ff;
++  *((unsigned long *)&__m128i_result[1]) = 0xc110000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xc00d060000000000;
++  __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101000101010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000fe0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff00ffffff00ff;
++  __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010100000000;
++  __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff9727ffff9727;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffe79ffffba5f;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x010169d9010169d9;
++  *((unsigned long *)&__m128i_result[0]) = 0x01010287010146a1;
++  __m128i_out = __lsx_vdiv_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408;
++  *((unsigned long *)&__m128i_op1[1]) = 0x80010001b57fc565;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8001000184000be0;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000080001fffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff9cf0d77b;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc1000082b0fb585b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x33f5c2d7d975d7fe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff010000ff01;
++  __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363abdf16;
++  *((unsigned long *)&__m128i_op1[0]) = 0x41f8e08016161198;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000030000;
++  __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00003ff000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fffc00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00001ff800000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ffe800e80000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x195f307a5d04acbb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6a1a3fbb3c90260e;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xe6a0cf86a2fb5345;
++  *((unsigned long *)&__m128i_result[0]) = 0x95e5c045c36fd9f2;
++  __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000fffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0010000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op1[0]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa2e3a36363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0xa2e3a36463636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f80000000000007;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000700000007;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000e32c50e;
++  *((unsigned long *)&__m128i_result[0]) = 0xf2b2ce330e32c50e;
++  __m128i_out = __lsx_vdiv_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000001;
++  __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000;
++  __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001084314a6;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001084314a6;
++  __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000ffef0010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000010000010101;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0101000001000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4280000042800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xbd7fffffbd800000;
++  __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x30eb022002101b20;
++  *((unsigned long *)&__m128i_op1[0]) = 0x020310edc003023d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000004ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000667ae56;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020;
++  __m128i_out = __lsx_vdiv_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c
+new file mode 100644
+index 000000000..f2bc7df27
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vdiv-2.c
+@@ -0,0 +1,254 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe3e3e3e3e3e3e3e3;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe3e3e3e3e3e3e3e3;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe3e3e3e3e3e3e3e3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007;
++  *((unsigned long *)&__m128i_op1[1]) = 0x31b1777777777776;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6eee282828282829;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000010100000101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000010100000101;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0effeffefdffa1e0;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe6004c5f64284224;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfeffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x10f917d72d3d01e4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x203e16d116de012b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000073;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000002a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00ffffff00ff00ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000003f200001e01;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000014bf000019da;
++  *((unsigned long *)&__m128i_op1[1]) = 0x9c9c99aed5b88fcf;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7c3650c5f79a61a3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080800008;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffd700;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffdfffdf;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffbfff8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0080008000800080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0080006b0000000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000001ff1745745c;
++  __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff14eb54ab;
++  *((unsigned long *)&__m128i_op0[0]) = 0x14ea6a002a406a00;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff80008a7555aa;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a7535006af05cf9;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000000;
++  __m128i_out = __lsx_vdiv_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfebffefffebffeff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfebffefffebffeff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363797c63996399;
++  *((unsigned long *)&__m128i_op0[0]) = 0x171f0a1f6376441f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363797c63996399;
++  *((unsigned long *)&__m128i_op1[0]) = 0x171f0a1f6376441f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000036de0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000003be14000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000007e8a60;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000001edde;
++  __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000015d926c7;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000e41b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0042003e0042002f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001fffc0001fffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0042003e0042002f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001fffc0001fffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000feff2356;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fd165486;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000007;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000007;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000246d9755;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000002427c2ee;
++  __m128i_out = __lsx_vdiv_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c
+new file mode 100644
+index 000000000..5470d40dd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-1.c
+@@ -0,0 +1,254 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x82c539ffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc72df14afbfafdf9;
++  *((unsigned long *)&__m128i_op1[1]) = 0x82c539ffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc72df14afbfafdf9;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff994cb09c;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc3639d96;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c844;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c844;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001808281820102;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001808201018081;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001008281820102;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001008201010081;
++  __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010240010202;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0101080408040804;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0804080407040804;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000104000800;
++  __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1202120212021202;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1202120212021202;
++  *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000;
++  __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003;
++  __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf6548a1747e59090;
++  *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf6548a1747e59090;
++  *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op0[0]) = 0x370bdfeca2eb9931;
++  *((unsigned long *)&__m128i_op1[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op1[0]) = 0x370bdfeca2eb9931;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x805ffffe01001fe0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9a49e11102834d70;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8144ffff01c820a4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9b2ee1a4034b4e34;
++  *((unsigned long *)&__m128i_result[1]) = 0xff1affff01001fe0;
++  *((unsigned long *)&__m128i_result[0]) = 0xff1aff6d02834d70;
++  __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001d001d001d001d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001d001d001d0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001d001d001d001d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001d001d001d0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f8000004f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ffff000000ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x03c0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x03c0038000000380;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ffff000000ff00;
++  __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x80000000307d0771;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0d8e36706ac02b9b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x80000000307d0771;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0d8e36706ac02b9b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x413e276583869d79;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f7f017f9d8726d3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000011ffee;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000dfff2;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c
+new file mode 100644
+index 000000000..8deb04427
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmod-2.c
+@@ -0,0 +1,254 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x16161616a16316b0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x16161616a16316b0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5a5a5a5a5b5a5b5a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5a5a5a5a5b5a5b5a;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001494b494a;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001494b494a;
++  __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd70b30c96ea9f4e8;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa352bfac9269e0aa;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffeb;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffeb;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f801fa06451ef11;
++  *((unsigned long *)&__m128i_op1[0]) = 0x68bcf93435ed25ed;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000022666621;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffdd9999da;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f7f7f7f00107f04;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f0000fd7f0000fd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000066621;
++  *((unsigned long *)&__m128i_result[0]) = 0x01ff00085e9900ab;
++  __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000bd3d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xefffdffff0009d3d;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000bd3d;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0000;
++  __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000004870ba0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x478b478b38031779;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6b769e690fa1e119;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000004870ba0;
++  __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2006454690d3de87;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2006454690d3de87;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2006454690d3de87;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2006454690d3de87;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x02b010f881a281a2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002;
++  __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff100000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000f000000000000;
++  __m128i_out = __lsx_vmod_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fffe0000fffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000fffe0000fffe;
++  __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000101fd01fe;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff80ff80ff80ff80;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff80ff8080008000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000101fd01fe;
++  __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcafff8ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff2cfed4fea8ff44;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffeffff0035ff8f;
++  *((unsigned long *)&__m128i_result[1]) = 0x00d3012acc56f9bb;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000a0;
++  __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0003c853c843c844;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003c853c843c844;
++  __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000003ddc5dac;
++  __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffefffff784;
++  *((unsigned long *)&__m128i_op1[1]) = 0x10f8000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001000010f8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0177fff0fffffff0;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000011ff8bc;
++  __m128i_out = __lsx_vmod_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vexth-vextl-vldi-v.patch b/LoongArch-Add-tests-for-SX-vector-vexth-vextl-vldi-v.patch
new file mode 100644
index 0000000000000000000000000000000000000000..17909c03c9ba667ae498b57b73b1502926dc59dc
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vexth-vextl-vldi-v.patch
@@ -0,0 +1,1664 @@
+From ed55869f2ae380ac36d09746e7e04ce675e197b0 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 18:44:16 +0800
+Subject: [PATCH 086/124] LoongArch: Add tests for SX vector
+ vexth/vextl/vldi/vneg/vsat instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vexth-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vexth-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vextl-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vextl-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vldi.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vneg.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsat-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsat-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vexth-1.c        | 342 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vexth-2.c        | 182 ++++++++++
+ .../loongarch/vector/lsx/lsx-vextl-1.c        |  83 +++++
+ .../loongarch/vector/lsx/lsx-vextl-2.c        |  83 +++++
+ .../loongarch/vector/lsx/lsx-vldi.c           |  61 ++++
+ .../loongarch/vector/lsx/lsx-vneg.c           | 321 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vsat-1.c         | 231 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vsat-2.c         | 272 ++++++++++++++
+ 8 files changed, 1575 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c
+new file mode 100644
+index 000000000..f6390800d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-1.c
+@@ -0,0 +1,342 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x007fffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000f909;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1010111105050000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4040000041410101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000110011;
++  *((unsigned long *)&__m128i_result[0]) = 0x0005000500000000;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000003ffe2;
++  __m128i_out = __lsx_vexth_h_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe;
++  __m128i_out = __lsx_vexth_w_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00003ff000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_w_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x03c0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x03c0038000000380;
++  *((unsigned long *)&__m128i_result[1]) = 0x000003c000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_w_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_w_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xb9fe3640e4eb1b18;
++  *((unsigned long *)&__m128i_op0[0]) = 0x800000005b4b1b18;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffb9fe00003640;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe4eb00001b18;
++  __m128i_out = __lsx_vexth_w_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x370bdfec00130014;
++  *((unsigned long *)&__m128i_op0[0]) = 0x370bdfec00130014;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000370bffffdfec;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001300000014;
++  __m128i_out = __lsx_vexth_w_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xe500c085c000c005;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe5c1a185c48004c5;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffe500ffffc085;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffc000ffffc005;
++  __m128i_out = __lsx_vexth_w_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_w_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_w_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5c9c9c9ce3636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x63635c9e63692363;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000005c9c9c9c;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffe3636363;
++  __m128i_out = __lsx_vexth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x63b2ac27aa076aeb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000063b2ac27;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffaa076aeb;
++  __m128i_out = __lsx_vexth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x002a001a001a000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000002a001a;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000001a000b;
++  __m128i_out = __lsx_vexth_d_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0028280000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x012927ffff272800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0028280000000000;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ff000000ff;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000020000020;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000020000020;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000080;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3a8000003a800000;
++  __m128i_out = __lsx_vexth_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c
+new file mode 100644
+index 000000000..6ab217e97
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vexth-2.c
+@@ -0,0 +1,182 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_hu_bu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_hu_bu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_hu_bu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x004f0080004f0080;
++  *((unsigned long *)&__m128i_result[0]) = 0x004f0080004f0080;
++  __m128i_out = __lsx_vexth_hu_bu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_hu_bu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff007f00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff007f00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff0000007f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_hu_bu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vexth_hu_bu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x5);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_hu_bu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x007fffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x002cffacffacffab;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000007f00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vexth_hu_bu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000082020201;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000820200000201;
++  __m128i_out = __lsx_vexth_wu_hu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010012;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fec20704;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000012;
++  __m128i_out = __lsx_vexth_wu_hu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vexth_du_wu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_du_wu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_du_wu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_du_wu (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001000001;
++  __m128i_out = __lsx_vexth_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000b5207f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000001fc00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000000020000;
++  __m128i_out = __lsx_vexth_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000b4a00008808;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080800000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000b4a00008808;
++  __m128i_out = __lsx_vexth_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000400080003fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000bc2000007e10;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000400080003fff;
++  __m128i_out = __lsx_vexth_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vexth_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c
+new file mode 100644
+index 000000000..99854dbd8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-1.c
+@@ -0,0 +1,83 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextl_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextl_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000170014;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff0cff78ff96ff14;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xff0cff78ff96ff14;
++  __m128i_out = __lsx_vextl_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffe500ffffc085;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffc000ffffc005;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffc000ffffc005;
++  __m128i_out = __lsx_vextl_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextl_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextl_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vextl_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000;
++  __m128i_out = __lsx_vextl_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3131313131313131;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3131313131313131;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3131313131313131;
++  __m128i_out = __lsx_vextl_q_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c
+new file mode 100644
+index 000000000..73bb530c9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vextl-2.c
+@@ -0,0 +1,83 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000101fffff8b68;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000b6fffff8095;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000b6fffff8095;
++  __m128i_out = __lsx_vextl_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000104000800;
++  __m128i_out = __lsx_vextl_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100010000fe7c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000100010000fe01;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000100010000fe01;
++  __m128i_out = __lsx_vextl_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextl_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextl_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000;
++  __m128i_out = __lsx_vextl_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x33f5c2d7d975d7fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vextl_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000d82;
++  *((unsigned long *)&__m128i_op0[0]) = 0x046a09ec009c0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x046a09ec009c0000;
++  __m128i_out = __lsx_vextl_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vextl_qu_du (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c
+new file mode 100644
+index 000000000..089500ea9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vldi.c
+@@ -0,0 +1,61 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_result[1]) = 0x00a300a300a300a3;
++  *((unsigned long *)&__m128i_result[0]) = 0x00a300a300a300a3;
++  __m128i_out = __lsx_vldi (1187);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffe15;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffe15;
++  __m128i_out = __lsx_vldi (3605);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_result[1]) = 0xecececececececec;
++  *((unsigned long *)&__m128i_result[0]) = 0xecececececececec;
++  __m128i_out = __lsx_vldi (1004);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffff00ff00ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ffff00ff00ff00;
++  __m128i_out = __lsx_vldi (-1686);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x3);
++  *((unsigned long *)&__m128i_result[1]) = 0x004d004d004d004d;
++  *((unsigned long *)&__m128i_result[0]) = 0x004d004d004d004d;
++  __m128i_out = __lsx_vldi (1101);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_result[1]) = 0x0a0000000a000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a0000000a000000;
++  __m128i_out = __lsx_vldi (-3318);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffff00ff00ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ffff00ff00ff00;
++  __m128i_out = __lsx_vldi (-1686);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_result[1]) = 0x0a0000000a000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a0000000a000000;
++  __m128i_out = __lsx_vldi (-3318);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c
+new file mode 100644
+index 000000000..9441ba50e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vneg.c
+@@ -0,0 +1,321 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffeffffffff;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffffffc;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffff01;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000fff3;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff0001ffffff0a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100000101;
++  *((unsigned long *)&__m128i_result[0]) = 0x000100ff010101f6;
++  __m128i_out = __lsx_vneg_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vneg_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff000000ff00ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00ff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0100000001000100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0100010000000000;
++  __m128i_out = __lsx_vneg_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000fffffeff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffbff8888080a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x080803ff807ff7f9;
++  *((unsigned long *)&__m128i_result[1]) = 0x010105017878f8f6;
++  *((unsigned long *)&__m128i_result[0]) = 0xf8f8fd0180810907;
++  __m128i_out = __lsx_vneg_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000;
++  __m128i_out = __lsx_vneg_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000300000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffdffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffeffff;
++  __m128i_out = __lsx_vneg_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x441ba9fcffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x181b2541ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xbbe5560400010001;
++  *((unsigned long *)&__m128i_result[0]) = 0xe7e5dabf00010001;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000060a3db;
++  *((unsigned long *)&__m128i_op0[0]) = 0xa70594c000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ff9f5c25;
++  *((unsigned long *)&__m128i_result[0]) = 0x58fa6b4000000000;
++  __m128i_out = __lsx_vneg_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vneg_b (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000008000001e;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffe1;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff7fffffe2;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128i_result[0]) = 0x377b810912c0e000;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffc00001ff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x003ffffe00800000;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vneg_h (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x087c000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000087c;
++  *((unsigned long *)&__m128i_result[1]) = 0xf784000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffff784;
++  __m128i_out = __lsx_vneg_d (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vneg_w (__m128i_op0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c
+new file mode 100644
+index 000000000..cd8eefb47
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-1.c
+@@ -0,0 +1,231 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xf000000000000000;
++  __m128i_out = __lsx_vsat_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsat_b (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102;
++  *((unsigned long *)&__m128i_result[1]) = 0x03ff0101fc010102;
++  *((unsigned long *)&__m128i_result[0]) = 0x03fffffffc010102;
++  __m128i_out = __lsx_vsat_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsat_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000040400000383;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff8383ffff7d0d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000040400000383;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe000ffff1fff;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8da00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffff00ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00ffff00;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000000010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000000010001;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000c000ffffc000;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0038d800ff000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00fffe00fffffe00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0038f000ff000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00fffe00fffffe00;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x003f0000003f0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x003f0000003f0000;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0674c886fcba4e98;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfdce8003090b0906;
++  *((unsigned long *)&__m128i_result[1]) = 0x003fffc0ffc0003f;
++  *((unsigned long *)&__m128i_result[0]) = 0xffc0ffc0003f003f;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_h (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x04e00060ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x04e00060ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x007fffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x007fffffffffffff;
++  __m128i_out = __lsx_vsat_w (__m128i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000017f0a82;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000003f;
++  __m128i_out = __lsx_vsat_w (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_w (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x3);
++  *((unsigned long *)&__m128i_op0[1]) = 0x8006000080020000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8004000080020000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffff8fffffff8;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff8fffffff8;
++  __m128i_out = __lsx_vsat_w (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_w (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsat_w (__m128i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_w (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_w (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff00000000f;
++  __m128i_out = __lsx_vsat_w (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000003ff8;
++  __m128i_out = __lsx_vsat_w (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_d (__m128i_op0, 0x35);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c
+new file mode 100644
+index 000000000..31e3919bf
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsat-2.c
+@@ -0,0 +1,272 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff1739ffff48aa;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff2896ffff5b88;
++  *((unsigned long *)&__m128i_result[1]) = 0x3f3f17393f3f3f3f;
++  *((unsigned long *)&__m128i_result[0]) = 0x3f3f283f3f3f3f3f;
++  __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000001fc00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010100000000;
++  __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffcc000b000b000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000b000b010a000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f7f000b000b000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000b000b010a000b;
++  __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000068;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f;
++  __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffcd63ffffcd63;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffd765ffffd765;
++  *((unsigned long *)&__m128i_result[1]) = 0x1f1f1f1f1f1f1f1f;
++  *((unsigned long *)&__m128i_result[0]) = 0x1f1f1f1f1f1f1f1f;
++  __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000120000000d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vsat_bu (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x003f00000000003f;
++  *((unsigned long *)&__m128i_result[0]) = 0x003f000000000000;
++  __m128i_out = __lsx_vsat_hu (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000007f8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000007f8;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff;
++  __m128i_out = __lsx_vsat_hu (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_hu (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_hu (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000006de1;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5f9ccf33cf600000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000007;
++  *((unsigned long *)&__m128i_result[0]) = 0x0007000700070000;
++  __m128i_out = __lsx_vsat_hu (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_wu (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000f;
++  __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000bd3d00000000;
++  __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0007000000050000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00003fff00003fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00003fff00003fff;
++  __m128i_out = __lsx_vsat_wu (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001a323b5430048c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x008f792cab1cb915;
++  *((unsigned long *)&__m128i_result[1]) = 0x001a323b00ffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x008f792c00ffffff;
++  __m128i_out = __lsx_vsat_wu (__m128i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_wu (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x20);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x3e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636389038903;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636389038903;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000001ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000001ffff;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x22);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x36);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000001fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x34);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000001fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa8a74bff9e9e0070;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9e9e72ff9e9ff9ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff;
++  __m128i_out = __lsx_vsat_du (__m128i_op0, 0x2f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vfcmp-instructions.patch b/LoongArch-Add-tests-for-SX-vector-vfcmp-instructions.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4a64bb81151984addc33a3ab1d359450fa8f0453
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vfcmp-instructions.patch
@@ -0,0 +1,5295 @@
+From 8cea23eb3f7e7aee77d0cf87581754c017691b91 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 11:31:16 +0800
+Subject: [PATCH 095/124] LoongArch: Add tests for SX vector vfcmp
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vfcmp_caf.c      | 244 ++++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_ceq.c      | 516 +++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_cle.c      | 530 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_clt.c      | 476 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_cne.c      | 378 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_cor.c      | 170 ++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_cun.c      | 253 +++++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_saf.c      | 214 +++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_seq.c      | 450 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_sle.c      | 407 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_slt.c      | 512 +++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_sne.c      | 398 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_sor.c      | 269 +++++++++
+ .../loongarch/vector/lsx/lsx-vfcmp_sun.c      | 335 +++++++++++
+ 14 files changed, 5152 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c
+new file mode 100644
+index 000000000..b448c2076
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_caf.c
+@@ -0,0 +1,244 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x01010101;
++  *((int *)&__m128_op0[0]) = 0x01010101;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x7ef400ad;
++  *((int *)&__m128_op0[2]) = 0x21fc7081;
++  *((int *)&__m128_op0[1]) = 0x28bf0351;
++  *((int *)&__m128_op0[0]) = 0xec69b5f2;
++  *((int *)&__m128_op1[3]) = 0xff800000;
++  *((int *)&__m128_op1[2]) = 0xff800000;
++  *((int *)&__m128_op1[1]) = 0xff800000;
++  *((int *)&__m128_op1[0]) = 0x7fc00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x01000100;
++  *((int *)&__m128_op0[0]) = 0x01000100;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x64e464e4;
++  *((int *)&__m128_op1[0]) = 0x64e464e4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffc0ff80;
++  *((int *)&__m128_op1[2]) = 0xff800000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xc0800000;
++  *((int *)&__m128_op1[3]) = 0x0000001b;
++  *((int *)&__m128_op1[2]) = 0x0000001b;
++  *((int *)&__m128_op1[1]) = 0x0000001b;
++  *((int *)&__m128_op1[0]) = 0x0000001b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000002;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000002;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x34500292;
++  *((int *)&__m128_op1[0]) = 0x0f3017d6;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00830029;
++  *((int *)&__m128_op0[0]) = 0x0038ff50;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xff7fff80;
++  *((int *)&__m128_op0[2]) = 0xff800001;
++  *((int *)&__m128_op0[1]) = 0xe593d844;
++  *((int *)&__m128_op0[0]) = 0xe593c8c4;
++  *((int *)&__m128_op1[3]) = 0xff800000;
++  *((int *)&__m128_op1[2]) = 0xff800000;
++  *((int *)&__m128_op1[1]) = 0xe593c8c4;
++  *((int *)&__m128_op1[0]) = 0xe593c8c4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x8a8a8a8a;
++  *((int *)&__m128_op1[2]) = 0x8a8a8a8a;
++  *((int *)&__m128_op1[1]) = 0x8a8a8a8a;
++  *((int *)&__m128_op1[0]) = 0x8a8a8a8a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x66b34f643c9c626a;
++  *((unsigned long *)&__m128d_op0[0]) = 0x38d60e366e547876;
++  *((unsigned long *)&__m128d_op1[1]) = 0x66b34f643c9c626a;
++  *((unsigned long *)&__m128d_op1[0]) = 0x38d60e366e547876;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000700000004fdff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000300000000fdff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xf2f444429d96dbe1;
++  *((unsigned long *)&__m128d_op0[0]) = 0xddd76c75f2f44442;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128d_op1[0]) = 0xc1f03e1042208410;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffbfff7fffc000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffff43dfffff81fb;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_caf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c
+new file mode 100644
+index 000000000..98941b47d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_ceq.c
+@@ -0,0 +1,516 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00007f00;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x01000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x08fdc221;
++  *((int *)&__m128_op0[2]) = 0xbfdb1927;
++  *((int *)&__m128_op0[1]) = 0x4303c67e;
++  *((int *)&__m128_op0[0]) = 0x9b7fb213;
++  *((int *)&__m128_op1[3]) = 0x0000800c;
++  *((int *)&__m128_op1[2]) = 0x0004300c;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000800;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00007fff;
++  *((int *)&__m128_op0[2]) = 0x00007fff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x2bfd9461;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x2bfd9461;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x01000000;
++  *((int *)&__m128_op0[0]) = 0x01000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xcd636363;
++  *((int *)&__m128_op1[2]) = 0xcd636363;
++  *((int *)&__m128_op1[1]) = 0xcd636363;
++  *((int *)&__m128_op1[0]) = 0xcd636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x007fffff;
++  *((int *)&__m128_op0[1]) = 0x007fffff;
++  *((int *)&__m128_op0[0]) = 0xff800000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x0000cecd;
++  *((int *)&__m128_op1[2]) = 0x00004657;
++  *((int *)&__m128_op1[1]) = 0x0000c900;
++  *((int *)&__m128_op1[0]) = 0x00011197;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_ceq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xf51df8db;
++  *((int *)&__m128_op0[2]) = 0xd6050189;
++  *((int *)&__m128_op0[1]) = 0x0983e2db;
++  *((int *)&__m128_op0[0]) = 0xf235ed87;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0x3ea5016b;
++  *((int *)&__m128_op1[1]) = 0xfffefffe;
++  *((int *)&__m128_op1[0]) = 0x3f6fb04d;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0xffa8ff9f;
++  *((int *)&__m128_op1[1]) = 0x0000ffff;
++  *((int *)&__m128_op1[0]) = 0xffabff99;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x0000ff00;
++  *((int *)&__m128_op1[3]) = 0x40404040;
++  *((int *)&__m128_op1[2]) = 0x40404040;
++  *((int *)&__m128_op1[1]) = 0x40404040;
++  *((int *)&__m128_op1[0]) = 0x40404040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x3bcc5098;
++  *((int *)&__m128_op1[2]) = 0x703fa5f0;
++  *((int *)&__m128_op1[1]) = 0xab7b3134;
++  *((int *)&__m128_op1[0]) = 0x9703f605;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x000000ff;
++  *((int *)&__m128_op0[0]) = 0xfe01fd02;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x0001fe01;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x000000ff;
++  *((int *)&__m128_op0[0]) = 0xfe01fd02;
++  *((int *)&__m128_op1[3]) = 0x00000001;
++  *((int *)&__m128_op1[2]) = 0x00000100;
++  *((int *)&__m128_op1[1]) = 0x00000001;
++  *((int *)&__m128_op1[0]) = 0x00000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00800000;
++  *((int *)&__m128_op0[0]) = 0x00800000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00800000;
++  *((int *)&__m128_op1[0]) = 0x00800000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xc2409edab019323f;
++  *((unsigned long *)&__m128d_op0[0]) = 0x460f3b393ef4be3a;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0100000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000000000ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000014eb54ab;
++  *((unsigned long *)&__m128d_op0[0]) = 0x14eb6a002a406a00;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000014eb54ab;
++  *((unsigned long *)&__m128d_op1[0]) = 0x14eb6a002a406a00;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_ceq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0100000001000100;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0100010000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000c000ffffc000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffe000ffdf;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000010100000101;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000010100000101;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000000000000000b;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000000000000b;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xd78cfd70b5f65d77;
++  *((unsigned long *)&__m128d_op1[0]) = 0x5779108fdedda7e5;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000ff0000ffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c
+new file mode 100644
+index 000000000..409bce0ec
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cle.c
+@@ -0,0 +1,530 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00001802;
++  *((int *)&__m128_op0[0]) = 0x041b0013;
++  *((int *)&__m128_op1[3]) = 0xff800000;
++  *((int *)&__m128_op1[2]) = 0xff800000;
++  *((int *)&__m128_op1[1]) = 0xff800000;
++  *((int *)&__m128_op1[0]) = 0xc3080000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x17fda829;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000404;
++  *((int *)&__m128_op1[2]) = 0x00000383;
++  *((int *)&__m128_op1[1]) = 0xffffe000;
++  *((int *)&__m128_op1[0]) = 0xffff1fff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x000000fe;
++  *((int *)&__m128_op0[2]) = 0x808000ff;
++  *((int *)&__m128_op0[1]) = 0x000000fe;
++  *((int *)&__m128_op0[0]) = 0x808000fe;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000020;
++  *((int *)&__m128_op0[2]) = 0x00000020;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x0000ffc1;
++  *((int *)&__m128_op1[0]) = 0x00010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000004;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xe0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x01010001;
++  *((int *)&__m128_op1[2]) = 0x00010001;
++  *((int *)&__m128_op1[1]) = 0x01010301;
++  *((int *)&__m128_op1[0]) = 0x00010001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xffffff00;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000001;
++  *((int *)&__m128_op1[2]) = 0x00000001;
++  *((int *)&__m128_op1[1]) = 0x00000001;
++  *((int *)&__m128_op1[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000001;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00010001;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00060fbf;
++  *((int *)&__m128_op1[2]) = 0x02040fbf;
++  *((int *)&__m128_op1[1]) = 0x00020fbf;
++  *((int *)&__m128_op1[0]) = 0x02000fbf;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x0a752a55;
++  *((int *)&__m128_op0[1]) = 0x0a753500;
++  *((int *)&__m128_op0[0]) = 0x950fa306;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x380fdfdf;
++  *((int *)&__m128_op1[0]) = 0xc0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000001;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000007fff800000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000080007f80800;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000001000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000080800000808;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000080800000808;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffff80800001;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffff80800001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x5f675e96a8d359f5;
++  *((unsigned long *)&__m128d_op0[0]) = 0x46387f95d9a68001;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x131211101211100f;
++  *((unsigned long *)&__m128d_op0[0]) = 0x11100f0e100f0e0d;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000000002a000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffff7f8c;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x377b810912c0e000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00009c7c00007176;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfcfcfcfcfcfcfcfd;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfcfcfcfcfcfc0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfffffffff359f358;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfffffffff359f358;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000003ff8;
++  *((unsigned long *)&__m128d_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128d_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000001;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000001;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x4f804f81;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x4f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x7fff0007;
++  *((int *)&__m128_op0[2]) = 0xe215b122;
++  *((int *)&__m128_op0[1]) = 0x7ffeffff;
++  *((int *)&__m128_op0[0]) = 0x7bfff828;
++  *((int *)&__m128_op1[3]) = 0x80010009;
++  *((int *)&__m128_op1[2]) = 0x816ac5de;
++  *((int *)&__m128_op1[1]) = 0x80010001;
++  *((int *)&__m128_op1[0]) = 0x84000bd8;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xfefa0000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x9c9c9c9c;
++  *((int *)&__m128_op1[2]) = 0x9c9c9c9c;
++  *((int *)&__m128_op1[1]) = 0x9c9c9c9c;
++  *((int *)&__m128_op1[0]) = 0x9c9c9c9c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x0c0b0a09;
++  *((int *)&__m128_op0[2]) = 0x0b0a0908;
++  *((int *)&__m128_op0[1]) = 0x0a090807;
++  *((int *)&__m128_op0[0]) = 0x09080706;
++  *((int *)&__m128_op1[3]) = 0x0c0b0a09;
++  *((int *)&__m128_op1[2]) = 0x0b0a0908;
++  *((int *)&__m128_op1[1]) = 0x0a090807;
++  *((int *)&__m128_op1[0]) = 0x09080706;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000020;
++  *((int *)&__m128_op1[2]) = 0x00000020;
++  *((int *)&__m128_op1[1]) = 0x0000001f;
++  *((int *)&__m128_op1[0]) = 0x0000001f;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x7ff80000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x7ff80000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffff80ff0010ff06;
++  *((unsigned long *)&__m128d_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xedfaedfaedfaedfa;
++  *((unsigned long *)&__m128d_op1[0]) = 0xedfaedfaedfaedfa;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c
+new file mode 100644
+index 000000000..39c9cf7a7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_clt.c
+@@ -0,0 +1,476 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x56411278;
++  *((int *)&__m128_op0[2]) = 0x43c0d41e;
++  *((int *)&__m128_op0[1]) = 0x0124d8f6;
++  *((int *)&__m128_op0[0]) = 0xa494006b;
++  *((int *)&__m128_op1[3]) = 0x7f800000;
++  *((int *)&__m128_op1[2]) = 0xff800000;
++  *((int *)&__m128_op1[1]) = 0xff800000;
++  *((int *)&__m128_op1[0]) = 0xff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x84939413;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000002;
++  *((int *)&__m128_op0[0]) = 0xbefcb21e;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xfffefff6;
++  *((int *)&__m128_op0[0]) = 0xfff80002;
++  *((int *)&__m128_op1[3]) = 0x000000c5;
++  *((int *)&__m128_op1[2]) = 0xac01015b;
++  *((int *)&__m128_op1[1]) = 0xaaacac88;
++  *((int *)&__m128_op1[0]) = 0xa3a9a96a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xff84fff4;
++  *((int *)&__m128_op0[2]) = 0xff84fff4;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xfffffff0;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x11000f20;
++  *((int *)&__m128_op0[2]) = 0x10000e20;
++  *((int *)&__m128_op0[1]) = 0x0f000d20;
++  *((int *)&__m128_op0[0]) = 0x0e000c20;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000c00;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00bd003d;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000005;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000005;
++  *((int *)&__m128_op1[3]) = 0xfffefffe;
++  *((int *)&__m128_op1[2]) = 0xfffefffe;
++  *((int *)&__m128_op1[1]) = 0xfffefffe;
++  *((int *)&__m128_op1[0]) = 0xfffefffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xff800001;
++  *((int *)&__m128_op0[0]) = 0x0f800000;
++  *((int *)&__m128_op1[3]) = 0x00000009;
++  *((int *)&__m128_op1[2]) = 0x00000009;
++  *((int *)&__m128_op1[1]) = 0xff80000a;
++  *((int *)&__m128_op1[0]) = 0x0f800009;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x0000ffff;
++  *((int *)&__m128_op0[1]) = 0x3b5eae24;
++  *((int *)&__m128_op0[0]) = 0xab7e3848;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00003f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x63636363;
++  *((int *)&__m128_op0[2]) = 0x3f3e47c1;
++  *((int *)&__m128_op0[1]) = 0x41f8e080;
++  *((int *)&__m128_op0[0]) = 0xf1ef4eaa;
++  *((int *)&__m128_op1[3]) = 0x0000cecd;
++  *((int *)&__m128_op1[2]) = 0x00004657;
++  *((int *)&__m128_op1[1]) = 0x0000c900;
++  *((int *)&__m128_op1[0]) = 0x00011197;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_clt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x454c2996;
++  *((int *)&__m128_op0[2]) = 0x0ffe354e;
++  *((int *)&__m128_op0[1]) = 0x9e063f80;
++  *((int *)&__m128_op0[0]) = 0x2742ba3e;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x42652524;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00070000;
++  *((int *)&__m128_op0[2]) = 0x00050000;
++  *((int *)&__m128_op0[1]) = 0x00030000;
++  *((int *)&__m128_op0[0]) = 0x00010000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0xff81007c;
++  *((int *)&__m128_op1[1]) = 0xffb7005f;
++  *((int *)&__m128_op1[0]) = 0x0070007c;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x0000006f;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xfbffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x7bffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xfbffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x7bffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x0002a000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x0002a000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xfc606ec5;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x14155445;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x01030103;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00060fbf;
++  *((int *)&__m128_op0[2]) = 0x02040fbf;
++  *((int *)&__m128_op0[1]) = 0x00020fbf;
++  *((int *)&__m128_op0[0]) = 0x02000fbf;
++  *((int *)&__m128_op1[3]) = 0x63636363;
++  *((int *)&__m128_op1[2]) = 0x63636363;
++  *((int *)&__m128_op1[1]) = 0xffd27db0;
++  *((int *)&__m128_op1[0]) = 0x10d20fbf;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x7f800000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000008;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000010000003f;
++  *((unsigned long *)&__m128d_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000010000003f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000036de0000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000003be14000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x1111113111111141;
++  *((unsigned long *)&__m128d_op0[0]) = 0x1111113111111121;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0032000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xe93d0bd19ff07013;
++  *((unsigned long *)&__m128d_op1[0]) = 0x65017c2ac9ca9fd0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x007f007f007f007e;
++  *((unsigned long *)&__m128d_op1[0]) = 0x007f007f007effc6;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000015800000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7f8100017f810001;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7f8100017f810001;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_clt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x004200a000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x004200a000200001;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7fff00007fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128d_op1[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000000000001e;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000455555555;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000001580000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c
+new file mode 100644
+index 000000000..c3da43bb4
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cne.c
+@@ -0,0 +1,378 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x7ff80000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x0bd80bd8;
++  *((int *)&__m128_op1[2]) = 0x0bdfffff;
++  *((int *)&__m128_op1[1]) = 0x0bd80bd8;
++  *((int *)&__m128_op1[0]) = 0x0bd80000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00ff0077;
++  *((int *)&__m128_op0[2]) = 0x00070077;
++  *((int *)&__m128_op0[1]) = 0x00e600ef;
++  *((int *)&__m128_op0[0]) = 0x00ee01de;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00007fff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00100010;
++  *((int *)&__m128_op0[2]) = 0x00100010;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x000000ff;
++  *((int *)&__m128_op1[0]) = 0xfe01fd02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xbf800000;
++  *((int *)&__m128_op0[2]) = 0x0000ffff;
++  *((int *)&__m128_op0[1]) = 0xcf000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x003f0000;
++  *((int *)&__m128_op1[2]) = 0x0000003f;
++  *((int *)&__m128_op1[1]) = 0x003f0000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x01ff01ff;
++  *((int *)&__m128_op0[2]) = 0x01ff01ff;
++  *((int *)&__m128_op0[1]) = 0x01ff01ff;
++  *((int *)&__m128_op0[0]) = 0x01ff01ff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x20202020;
++  *((int *)&__m128_op0[2]) = 0x20202020;
++  *((int *)&__m128_op0[1]) = 0x20202020;
++  *((int *)&__m128_op0[0]) = 0x20207fff;
++  *((int *)&__m128_op1[3]) = 0x32d3f35e;
++  *((int *)&__m128_op1[2]) = 0xcd509d13;
++  *((int *)&__m128_op1[1]) = 0x3e081b3c;
++  *((int *)&__m128_op1[0]) = 0x93f6b356;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffff0000;
++  *((int *)&__m128_op0[2]) = 0xffff0000;
++  *((int *)&__m128_op0[1]) = 0x40408010;
++  *((int *)&__m128_op0[0]) = 0x80200110;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x80000000;
++  *((int *)&__m128_op0[2]) = 0x80000008;
++  *((int *)&__m128_op0[1]) = 0xa2f54a1e;
++  *((int *)&__m128_op0[0]) = 0xa2f54a1e;
++  *((int *)&__m128_op1[3]) = 0x80000000;
++  *((int *)&__m128_op1[2]) = 0x80000008;
++  *((int *)&__m128_op1[1]) = 0xa2f54a1e;
++  *((int *)&__m128_op1[0]) = 0xa2f54a1e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000000000000fc00;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000000000fc00;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0001000100000004;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000020302030;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000020302030;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000100010;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000100010;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x5d7f5d007f6a007f;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7fff7fff7fff7f00;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x7ff000ff6220c0c1;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffe8081000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x1c083b1f3b1f3b1f;
++  *((unsigned long *)&__m128d_op0[0]) = 0xf244b948a323ab42;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000100fe000070a1;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000115ffffffa1;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfffffffff8f8da00;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffff01018888;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000000003ea5016c;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfffefefd3f7027c5;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000000000ffce;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x80808080806b000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x400000003fffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x4000000040000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000014eb54ab;
++  *((unsigned long *)&__m128d_op0[0]) = 0x14eb6a002a406a00;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00007fff7fff8000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c
+new file mode 100644
+index 000000000..5228dbede
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cor.c
+@@ -0,0 +1,170 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xfffffeff;
++  *((int *)&__m128_op0[2]) = 0xfffffeff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xfffffcff;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00800000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xf4b6f3f5;
++  *((int *)&__m128_op0[0]) = 0x2f4ef4a8;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x08080808;
++  *((int *)&__m128_op1[2]) = 0x08080808;
++  *((int *)&__m128_op1[1]) = 0x08080808;
++  *((int *)&__m128_op1[0]) = 0x08080808;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x0000ffce;
++  *((int *)&__m128_op1[3]) = 0xffff0001;
++  *((int *)&__m128_op1[2]) = 0x1cf0c569;
++  *((int *)&__m128_op1[1]) = 0xc0000002;
++  *((int *)&__m128_op1[0]) = 0xb0995850;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x0a752a55;
++  *((int *)&__m128_op0[1]) = 0x0a753500;
++  *((int *)&__m128_op0[0]) = 0x950fa306;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x0a752a55;
++  *((int *)&__m128_op1[1]) = 0x0a753500;
++  *((int *)&__m128_op1[0]) = 0x950fa306;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000ffffffdfffdf;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000ffffffdfffdf;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xd70b30c96ea9f4e8;
++  *((unsigned long *)&__m128d_op0[0]) = 0xa352bfac9269e0aa;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c
+new file mode 100644
+index 000000000..a2beff53f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_cun.c
+@@ -0,0 +1,253 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x0000ffff;
++  *((int *)&__m128_op0[2]) = 0x0000ffff;
++  *((int *)&__m128_op0[1]) = 0x0000ffff;
++  *((int *)&__m128_op0[0]) = 0x0000fffe;
++  *((int *)&__m128_op1[3]) = 0x0000ffff;
++  *((int *)&__m128_op1[2]) = 0x0000ffff;
++  *((int *)&__m128_op1[1]) = 0x0000ffff;
++  *((int *)&__m128_op1[0]) = 0x0000fffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00200010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x7f800000;
++  *((int *)&__m128_op0[2]) = 0x7f800000;
++  *((int *)&__m128_op0[1]) = 0x7f800000;
++  *((int *)&__m128_op0[0]) = 0x7f800000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x0000fffe;
++  *((int *)&__m128_op0[0]) = 0x0000ffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffff0008;
++  *((int *)&__m128_op1[3]) = 0xffc2ffe0;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x0000ffc1;
++  *((int *)&__m128_op1[0]) = 0x00010001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000008;
++  *((int *)&__m128_op0[1]) = 0x00200020;
++  *((int *)&__m128_op0[0]) = 0x00200020;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xfffefffe;
++  *((int *)&__m128_op0[2]) = 0xfffefffe;
++  *((int *)&__m128_op0[1]) = 0xfffefffe;
++  *((int *)&__m128_op0[0]) = 0xfffefffe;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xf001f001;
++  *((int *)&__m128_op1[0]) = 0x0101f002;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xfeffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xfeffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000fff;
++  *((int *)&__m128_op1[2]) = 0xffffe000;
++  *((int *)&__m128_op1[1]) = 0x00001020;
++  *((int *)&__m128_op1[0]) = 0x20204000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfefd7f7f7f7f7f7e;
++  *((unsigned long *)&__m128d_op0[0]) = 0xdffdbffeba6f5543;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffff3d06ffff4506;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7ffffffe7ffff800;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000003bfb4000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000100010100;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffff00011cf0c569;
++  *((unsigned long *)&__m128d_op0[0]) = 0xc0000002b0995850;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x80808080806b000b;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128d_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000024170000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000044470000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_cun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c
+new file mode 100644
+index 000000000..bfa4914be
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_saf.c
+@@ -0,0 +1,214 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0x80000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00a300a3;
++  *((int *)&__m128_op1[2]) = 0x00a300a3;
++  *((int *)&__m128_op1[1]) = 0x00a300a3;
++  *((int *)&__m128_op1[0]) = 0x00a300a3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xb8ec43be;
++  *((int *)&__m128_op1[2]) = 0xfe38e64b;
++  *((int *)&__m128_op1[1]) = 0x6477d042;
++  *((int *)&__m128_op1[0]) = 0x343cce24;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000010;
++  *((int *)&__m128_op0[2]) = 0x00100010;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00020000;
++  *((int *)&__m128_op0[0]) = 0xffff0001;
++  *((int *)&__m128_op1[3]) = 0x63636363;
++  *((int *)&__m128_op1[2]) = 0x63636363;
++  *((int *)&__m128_op1[1]) = 0x63636363;
++  *((int *)&__m128_op1[0]) = 0x63636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x03080401;
++  *((int *)&__m128_op0[2]) = 0x0d090107;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0a0a0a000a0a0a00;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0a0a0a0009090900;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffff01;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x80808080806b000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000000007ff000ff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x67157b5100005000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x387c7e0a133f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xf359f359f359f359;
++  *((unsigned long *)&__m128d_op0[0]) = 0xf359f359f359f359;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0177fff0fffffff0;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000011ff8bc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_saf_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c
+new file mode 100644
+index 000000000..bc573936d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_seq.c
+@@ -0,0 +1,450 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xf2f2e5e5;
++  *((int *)&__m128_op0[2]) = 0xe5e5e5e5;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xe5e5e5e5;
++  *((int *)&__m128_op1[2]) = 0xe5e5e5e5;
++  *((int *)&__m128_op1[1]) = 0xe5e5e5e5;
++  *((int *)&__m128_op1[0]) = 0xe4e4e46d;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00800000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x1f400000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x14ccc632;
++  *((int *)&__m128_op0[2]) = 0x0076a4d2;
++  *((int *)&__m128_op0[1]) = 0x685670d2;
++  *((int *)&__m128_op0[0]) = 0x7e00682a;
++  *((int *)&__m128_op1[3]) = 0x14ccc632;
++  *((int *)&__m128_op1[2]) = 0x0076a4d2;
++  *((int *)&__m128_op1[1]) = 0x685670d2;
++  *((int *)&__m128_op1[0]) = 0x7e00682a;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00010001;
++  *((int *)&__m128_op0[2]) = 0x00010001;
++  *((int *)&__m128_op0[1]) = 0x00010001;
++  *((int *)&__m128_op0[0]) = 0x00010001;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xc6c6c6c6;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xc6c6c6c6;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0xc6c6c6c6;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0xc6c6c6c6;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000006;
++  *((int *)&__m128_op1[2]) = 0x00007fff;
++  *((int *)&__m128_op1[1]) = 0x00000008;
++  *((int *)&__m128_op1[0]) = 0xffffa209;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_seq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00fc0000;
++  *((int *)&__m128_op1[3]) = 0xfe07e5fe;
++  *((int *)&__m128_op1[2]) = 0xfefdddfe;
++  *((int *)&__m128_op1[1]) = 0x00020100;
++  *((int *)&__m128_op1[0]) = 0xfedd0c00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffff0000;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x0000fffd;
++  *((int *)&__m128_op1[3]) = 0x7fffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00008000;
++  *((int *)&__m128_op1[2]) = 0x3f80ffff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x370bdfec;
++  *((int *)&__m128_op0[2]) = 0xffecffec;
++  *((int *)&__m128_op0[1]) = 0x370bdfec;
++  *((int *)&__m128_op0[0]) = 0xa2eb9931;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000040;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000040;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xc2fc0000;
++  *((int *)&__m128_op1[2]) = 0xc3040000;
++  *((int *)&__m128_op1[1]) = 0xc2fc0000;
++  *((int *)&__m128_op1[0]) = 0xc3040000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00fe00fe;
++  *((int *)&__m128_op0[2]) = 0x000200fe;
++  *((int *)&__m128_op0[1]) = 0x00fe00fe;
++  *((int *)&__m128_op0[0]) = 0x000200fe;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000004;
++  *((int *)&__m128_op1[0]) = 0x55555555;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000158;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xffffffa8;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xf3e6586b;
++  *((int *)&__m128_op0[2]) = 0x60d7b152;
++  *((int *)&__m128_op0[1]) = 0xf7077b93;
++  *((int *)&__m128_op0[0]) = 0x4ac0e000;
++  *((int *)&__m128_op1[3]) = 0x1498507a;
++  *((int *)&__m128_op1[2]) = 0x144d0050;
++  *((int *)&__m128_op1[1]) = 0x7b370981;
++  *((int *)&__m128_op1[0]) = 0xc01200e0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffc2007a;
++  *((int *)&__m128_op0[2]) = 0xff230027;
++  *((int *)&__m128_op0[1]) = 0x0080005e;
++  *((int *)&__m128_op0[0]) = 0xff600001;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sueq_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000100010001fffd;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000000004fc04f81;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000004fc04f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x13f9c5b60028a415;
++  *((unsigned long *)&__m128d_op1[0]) = 0x545cab1d81a83bea;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_seq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfefd7f7f7f7f7f7e;
++  *((unsigned long *)&__m128d_op0[0]) = 0xdffdbffeba6f5543;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfefd7f7f7f7f7f7e;
++  *((unsigned long *)&__m128d_op1[0]) = 0xdffdbffeba6f5543;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfffffff700000009;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfffffff700000009;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x4fa432d67fc00000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0141010101410101;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfffcffff00000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000fffd000a0000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xf0fd800080000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000a00028004000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00820082ff81ff81;
++  *((unsigned long *)&__m128d_op0[0]) = 0xff81ff81ff81ff81;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000001000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000120002000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0007000100040102;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0003000100010101;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0007000100040102;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0003000100010101;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sueq_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c
+new file mode 100644
+index 000000000..87cb8da7c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sle.c
+@@ -0,0 +1,407 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffdfff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffdfff;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffe000;
++  *((int *)&__m128_op1[0]) = 0x01ffe200;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00010002;
++  *((int *)&__m128_op0[2]) = 0x0000fe7d;
++  *((int *)&__m128_op0[1]) = 0x00010002;
++  *((int *)&__m128_op0[0]) = 0x0000fe02;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x0000007b;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x1223dabf;
++  *((int *)&__m128_op0[2]) = 0x4c3b3549;
++  *((int *)&__m128_op0[1]) = 0x8e8f8626;
++  *((int *)&__m128_op0[0]) = 0xf15be124;
++  *((int *)&__m128_op1[3]) = 0xfffffacd;
++  *((int *)&__m128_op1[2]) = 0xb6dbecac;
++  *((int *)&__m128_op1[1]) = 0x1f5533a6;
++  *((int *)&__m128_op1[0]) = 0x94f902c0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xfbffffff;
++  *((int *)&__m128_op0[0]) = 0x27001517;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x0000ffff;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xffff53d9;
++  *((int *)&__m128_op0[1]) = 0xffff0001;
++  *((int *)&__m128_op0[0]) = 0xffff9515;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00010001;
++  *((int *)&__m128_op1[2]) = 0x00010001;
++  *((int *)&__m128_op1[1]) = 0x00010001;
++  *((int *)&__m128_op1[0]) = 0x00010001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000080;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00df020f;
++  *((int *)&__m128_op0[0]) = 0x0078007f;
++  *((int *)&__m128_op1[3]) = 0x0037ffd4;
++  *((int *)&__m128_op1[2]) = 0x0083ffe5;
++  *((int *)&__m128_op1[1]) = 0x001e0052;
++  *((int *)&__m128_op1[0]) = 0x001ffff9;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sle_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00ff00ff;
++  *((int *)&__m128_op0[0]) = 0x00ff00ff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x05452505;
++  *((int *)&__m128_op0[1]) = 0x00000004;
++  *((int *)&__m128_op0[0]) = 0x442403e4;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000101;
++  *((int *)&__m128_op0[2]) = 0x00000101;
++  *((int *)&__m128_op0[1]) = 0x00000101;
++  *((int *)&__m128_op0[0]) = 0x00000101;
++  *((int *)&__m128_op1[3]) = 0x00000002;
++  *((int *)&__m128_op1[2]) = 0x00000002;
++  *((int *)&__m128_op1[1]) = 0x00000002;
++  *((int *)&__m128_op1[0]) = 0x00000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00010000;
++  *((int *)&__m128_op0[2]) = 0x00010000;
++  *((int *)&__m128_op0[1]) = 0x0000cd63;
++  *((int *)&__m128_op0[0]) = 0x0000cd63;
++  *((int *)&__m128_op1[3]) = 0xffffcd63;
++  *((int *)&__m128_op1[2]) = 0xffffcd63;
++  *((int *)&__m128_op1[1]) = 0xffffd765;
++  *((int *)&__m128_op1[0]) = 0xffffd765;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffff00ff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffff0000;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0x0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sule_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffff0c8000c212;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfefffeff7f002d06;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00f0008100800080;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00f000807000009e;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfffe00029f9f6061;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffc0800000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000c0010000a186;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00067fff0002a207;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sle_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffff80000001;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x3fbf3fbf00007fff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000000003fbf3fbf;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7fff7fff7fff7ff8;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000000013ec13e;
++  *((unsigned long *)&__m128d_op1[0]) = 0xc03fc03fc0ff00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffff00018d8b;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x67eb85af0000b000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfe3bfb01fe3bfe01;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfe03fe3ffe01fa21;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sule_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c
+new file mode 100644
+index 000000000..3845e8ec3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_slt.c
+@@ -0,0 +1,512 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x0000ffff;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x0000ffff;
++  *((int *)&__m128_op1[0]) = 0x0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x7f800000;
++  *((int *)&__m128_op0[2]) = 0x7f800000;
++  *((int *)&__m128_op0[1]) = 0x7f800000;
++  *((int *)&__m128_op0[0]) = 0x7f800000;
++  *((int *)&__m128_op1[3]) = 0x7f800000;
++  *((int *)&__m128_op1[2]) = 0x7f800000;
++  *((int *)&__m128_op1[1]) = 0x7f800000;
++  *((int *)&__m128_op1[0]) = 0x7f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000001;
++  *((int *)&__m128_op0[2]) = 0x00007f01;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffff02;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x05452505;
++  *((int *)&__m128_op1[1]) = 0x00000004;
++  *((int *)&__m128_op1[0]) = 0x442403e4;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x5f675e96;
++  *((int *)&__m128_op0[2]) = 0xe29a5a60;
++  *((int *)&__m128_op0[1]) = 0x7fff7fff;
++  *((int *)&__m128_op0[0]) = 0x7fff7fff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x5e695e95;
++  *((int *)&__m128_op1[0]) = 0xe1cb5a01;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00800080;
++  *((int *)&__m128_op0[2]) = 0x00800080;
++  *((int *)&__m128_op0[1]) = 0x0080006b;
++  *((int *)&__m128_op0[0]) = 0x0000000b;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x80808080;
++  *((int *)&__m128_op1[0]) = 0x806b000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x7f800000;
++  *((int *)&__m128_op0[2]) = 0x7f800000;
++  *((int *)&__m128_op0[1]) = 0x7f800000;
++  *((int *)&__m128_op0[0]) = 0x7f800000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xff800000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xff800000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xfffefffe;
++  *((int *)&__m128_op0[0]) = 0xfffffffc;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xfffefffe;
++  *((int *)&__m128_op1[0]) = 0xfffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffc2ffe7;
++  *((int *)&__m128_op0[2]) = 0x00000007;
++  *((int *)&__m128_op0[1]) = 0x0000ffc1;
++  *((int *)&__m128_op0[0]) = 0x00010001;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0xfffff1a0;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x153e3e49;
++  *((int *)&__m128_op0[2]) = 0x307d0771;
++  *((int *)&__m128_op0[1]) = 0x0d8e3670;
++  *((int *)&__m128_op0[0]) = 0x6ac02b9b;
++  *((int *)&__m128_op1[3]) = 0x55aa55c3;
++  *((int *)&__m128_op1[2]) = 0xd5aa55c4;
++  *((int *)&__m128_op1[1]) = 0xaa55556f;
++  *((int *)&__m128_op1[0]) = 0xd5aaaac1;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0x0015172b;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xfffffffe;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xfffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xffff0000;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00010001;
++  *((int *)&__m128_op0[2]) = 0x00010001;
++  *((int *)&__m128_op0[1]) = 0x00010001;
++  *((int *)&__m128_op0[0]) = 0x00010001;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x86dd8341;
++  *((int *)&__m128_op1[2]) = 0xb164f12b;
++  *((int *)&__m128_op1[1]) = 0x9611c398;
++  *((int *)&__m128_op1[0]) = 0x5b3159f5;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_sult_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000000000001f;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000300000003;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xe93d0bd19ff07013;
++  *((unsigned long *)&__m128d_op0[0]) = 0x65017c2ac9ca9fd0;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00d3012acc56f9bb;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000001021;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffb4ff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffb4ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x14ccc6320176a4d2;
++  *((unsigned long *)&__m128d_op0[0]) = 0x685670d37e80682a;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0080000000800000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x345002920f3017d6;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_slt_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffff8607db959f;
++  *((unsigned long *)&__m128d_op0[0]) = 0xff0cff78ff96ff14;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7ef8000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x14ccc6320176a4d2;
++  *((unsigned long *)&__m128d_op0[0]) = 0x685670d37e80682a;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffee00000004;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x80808080806b000b;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sult_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c
+new file mode 100644
+index 000000000..964eff79f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sne.c
+@@ -0,0 +1,398 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00003fee;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000004;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000002;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x03574e3a;
++  *((int *)&__m128_op1[2]) = 0x03574e3a;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00010400;
++  *((int *)&__m128_op1[3]) = 0x10f917d7;
++  *((int *)&__m128_op1[2]) = 0x2d3d01e4;
++  *((int *)&__m128_op1[1]) = 0x203e16d1;
++  *((int *)&__m128_op1[0]) = 0x16de012b;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x0000101f;
++  *((int *)&__m128_op0[2]) = 0xffff8b68;
++  *((int *)&__m128_op0[1]) = 0x00000b6f;
++  *((int *)&__m128_op0[0]) = 0xffff8095;
++  *((int *)&__m128_op1[3]) = 0x10f917d7;
++  *((int *)&__m128_op1[2]) = 0x2d3d01e4;
++  *((int *)&__m128_op1[1]) = 0x203e16d1;
++  *((int *)&__m128_op1[0]) = 0x16de012b;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x11000f20;
++  *((int *)&__m128_op0[2]) = 0x10000e20;
++  *((int *)&__m128_op0[1]) = 0x0f000d20;
++  *((int *)&__m128_op0[0]) = 0x0e000c20;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00050005;
++  *((int *)&__m128_op0[2]) = 0x00050005;
++  *((int *)&__m128_op0[1]) = 0x00050005;
++  *((int *)&__m128_op0[0]) = 0x00050005;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x15d926c7;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x0000e41b;
++  *((int *)&__m128_op1[3]) = 0xfffffacd;
++  *((int *)&__m128_op1[2]) = 0xb6dbecac;
++  *((int *)&__m128_op1[1]) = 0x1f5533a6;
++  *((int *)&__m128_op1[0]) = 0x94f902c0;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x04040504;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x04040504;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x0001000c;
++  *((int *)&__m128_op0[2]) = 0xfffffff2;
++  *((int *)&__m128_op0[1]) = 0x0001000d;
++  *((int *)&__m128_op0[0]) = 0xfffffff1;
++  *((int *)&__m128_op1[3]) = 0xffff8a17;
++  *((int *)&__m128_op1[2]) = 0xffffc758;
++  *((int *)&__m128_op1[1]) = 0xffff69bb;
++  *((int *)&__m128_op1[0]) = 0xffffad3b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sne_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xff800000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xff800000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffff1739;
++  *((int *)&__m128_op1[2]) = 0xffff48aa;
++  *((int *)&__m128_op1[1]) = 0xffff2896;
++  *((int *)&__m128_op1[0]) = 0xffff5b88;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000003;
++  *((int *)&__m128_op0[0]) = 0x0000003f;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000003;
++  *((int *)&__m128_op1[0]) = 0x0000003f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x084d12ce;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x24170000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sune_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7474f6fd7474fefe;
++  *((unsigned long *)&__m128d_op0[0]) = 0xf474f6fef474f6fe;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x1817161517161514;
++  *((unsigned long *)&__m128d_op1[0]) = 0x1615141315141312;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0101fe870101fe87;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0101fe8700000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x62cbf96e4acfaf40;
++  *((unsigned long *)&__m128d_op1[0]) = 0xf0bc9a5278285a4a;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000000007fffa9ed;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7f8000017fffca8b;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffff7603;
++  *((unsigned long *)&__m128d_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sne_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x1111113111111141;
++  *((unsigned long *)&__m128d_op1[0]) = 0x1111113111111121;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00ff000100ff00fe;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00ff003000ff00a0;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000000005e695e95;
++  *((unsigned long *)&__m128d_op1[0]) = 0x5e695e96c396b402;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x000300037ff000ff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0003000300a10003;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000008000000080;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000003ff8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x01533b5e7489ae24;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffab7e71e33848;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sune_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c
+new file mode 100644
+index 000000000..ea47baf40
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sor.c
+@@ -0,0 +1,269 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x0000007f;
++  *((int *)&__m128_op0[2]) = 0x0000007f;
++  *((int *)&__m128_op0[1]) = 0x0000007f;
++  *((int *)&__m128_op0[0]) = 0x0000007f;
++  *((int *)&__m128_op1[3]) = 0x3ff00000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xfffc0020;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x2757de72;
++  *((int *)&__m128_op0[2]) = 0x33d771a3;
++  *((int *)&__m128_op0[1]) = 0x166891d5;
++  *((int *)&__m128_op0[0]) = 0x1e8b7eff;
++  *((int *)&__m128_op1[3]) = 0x2757de72;
++  *((int *)&__m128_op1[2]) = 0x33d771a3;
++  *((int *)&__m128_op1[1]) = 0x166891d5;
++  *((int *)&__m128_op1[0]) = 0x1e8b7eff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00fe00ff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000001;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xfffffffe;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0xffffff02;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x0000000d;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xfffffe03;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xfffffe03;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xbafebb00;
++  *((int *)&__m128_op1[2]) = 0xffd500fe;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0xbffffffe;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x80000000;
++  *((int *)&__m128_op0[2]) = 0x80000000;
++  *((int *)&__m128_op0[1]) = 0x80000000;
++  *((int *)&__m128_op0[0]) = 0x80000000;
++  *((int *)&__m128_op1[3]) = 0x000000ff;
++  *((int *)&__m128_op1[2]) = 0x0000857a;
++  *((int *)&__m128_op1[1]) = 0x05fafe01;
++  *((int *)&__m128_op1[0]) = 0x01fe000e;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x7fff7fff;
++  *((int *)&__m128_op0[2]) = 0x7fff7fff;
++  *((int *)&__m128_op0[1]) = 0xbf6b8101;
++  *((int *)&__m128_op0[0]) = 0x81018101;
++  *((int *)&__m128_op1[3]) = 0xe3636363;
++  *((int *)&__m128_op1[2]) = 0x63abdf16;
++  *((int *)&__m128_op1[1]) = 0x41f8e080;
++  *((int *)&__m128_op1[0]) = 0x16161198;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000005d5d;
++  *((unsigned long *)&__m128d_op1[1]) = 0x08fdc221bfdb1927;
++  *((unsigned long *)&__m128d_op1[0]) = 0x4303c67e9b7fb213;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7fffffff7ffffffb;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000040002;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfffffff000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000d00000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sor_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c
+new file mode 100644
+index 000000000..68cb5a52f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfcmp_sun.c
+@@ -0,0 +1,335 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xe17fe003;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0x0000ffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffffff;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00190819;
++  *((int *)&__m128_op1[2]) = 0x00190019;
++  *((int *)&__m128_op1[1]) = 0x00190819;
++  *((int *)&__m128_op1[0]) = 0x00190019;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xfe800000;
++  *((int *)&__m128_op0[0]) = 0x00000001;
++  *((int *)&__m128_op1[3]) = 0x7fffffff;
++  *((int *)&__m128_op1[2]) = 0x82bb9784;
++  *((int *)&__m128_op1[1]) = 0x7fffffff;
++  *((int *)&__m128_op1[0]) = 0xc6bb97ac;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x7f3f0180;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0xa2321469;
++  *((int *)&__m128_op0[0]) = 0x7fd03f7f;
++  *((int *)&__m128_op1[3]) = 0x00000406;
++  *((int *)&__m128_op1[2]) = 0x00000406;
++  *((int *)&__m128_op1[1]) = 0x02020202;
++  *((int *)&__m128_op1[0]) = 0x0202fe02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0xfffffff5;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000014;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000014;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0xfffc0004;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x67eb85b0;
++  *((int *)&__m128_op0[2]) = 0xb2ebb001;
++  *((int *)&__m128_op0[1]) = 0xc8847ef6;
++  *((int *)&__m128_op0[0]) = 0xed3f2000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0x0002de46;
++  *((int *)&__m128_op0[2]) = 0x682de060;
++  *((int *)&__m128_op0[1]) = 0x09b50da6;
++  *((int *)&__m128_op0[0]) = 0xe67b8fc0;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x084d12ce;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x24170000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0x00ffffff;
++  *((int *)&__m128_op0[0]) = 0x00ffffff;
++  *((int *)&__m128_op1[3]) = 0x0000feff;
++  *((int *)&__m128_op1[2]) = 0x23560000;
++  *((int *)&__m128_op1[1]) = 0x0000fd16;
++  *((int *)&__m128_op1[0]) = 0x54860000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xfffffffe;
++  *((int *)&__m128_op0[0]) = 0xfffff784;
++  *((int *)&__m128_op1[3]) = 0x0177fff0;
++  *((int *)&__m128_op1[2]) = 0xfffffff0;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x011ff8bc;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sun_s (__m128_op0, __m128_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0002000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0002000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00ff00ff00ff00fe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfffffffffffff800;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfffebd06fffe820c;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7fff7ffe7fff3506;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7ffffffeffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128d_op0[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000000000000002f;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000029;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000003a24;
++  *((unsigned long *)&__m128d_op1[0]) = 0x003dbe88077c78c1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0001fffe0001fefc;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0007000000050000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0003000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfcmp_sun_d (__m128d_op0, __m128d_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vfmadd-vfnmadd-vld.patch b/LoongArch-Add-tests-for-SX-vector-vfmadd-vfnmadd-vld.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b639554078cc7270f9b00da6fa79a5c50d4fdf27
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vfmadd-vfnmadd-vld.patch
@@ -0,0 +1,1412 @@
+From 5cc6bce7753e1029149839d58ed81f046087ad31 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 15:05:09 +0800
+Subject: [PATCH 098/124] LoongArch: Add tests for SX vector
+ vfmadd/vfnmadd/vld/vst instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vld.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vst.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vfmadd_d.c       | 251 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vfmadd_s.c       | 381 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vfnmadd_d.c      | 196 +++++++++
+ .../loongarch/vector/lsx/lsx-vfnmadd_s.c      | 381 ++++++++++++++++++
+ .../gcc.target/loongarch/vector/lsx/lsx-vld.c |  62 +++
+ .../gcc.target/loongarch/vector/lsx/lsx-vst.c |  70 ++++
+ 6 files changed, 1341 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c
+new file mode 100644
+index 000000000..c5de1ac7a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_d.c
+@@ -0,0 +1,251 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x8a228acac14e440a;
++  *((unsigned long *)&__m128d_op1[0]) = 0xc77c47cdc0f16549;
++  *((unsigned long *)&__m128d_op2[1]) = 0xffffffffd24271c4;
++  *((unsigned long *)&__m128d_op2[0]) = 0x2711bad1e8e309ed;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffd24271c4;
++  *((unsigned long *)&__m128d_result[0]) = 0x2711bad1e8e309ed;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000040400000383;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffe000ffff1fff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000040400000383;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffe000ffff1fff;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffe000ffff1fff;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x00000000003f80b0;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0080200000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000401000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000080000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000080000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000080000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000080000000000;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000000000001e;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[0]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m128d_op2[1]) = 0xfffb00fdfdf7ffff;
++  *((unsigned long *)&__m128d_op2[0]) = 0xfff8000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xfffb00fdfdf7ffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xfff8000000000000;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000009000900;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000009000900;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000009000900;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000009000900;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x9c83e21a22001818;
++  *((unsigned long *)&__m128d_op0[0]) = 0xdd3b8b02563b2d7b;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x7f7f7f007f7f7f00;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x7f7f7f007f7f7f00;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xff00e400ff00e400;
++  *((unsigned long *)&__m128d_op0[0]) = 0xff01e41ffff0ffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x5555000054100000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x5555000154100155;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000;
++  __m128d_out = __lsx_vfmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000000000000010;
++  __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfc01fcfefc02fdf7;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfe00fcfffe01fd01;
++  *((unsigned long *)&__m128d_op1[1]) = 0xfc01fd1300000001;
++  *((unsigned long *)&__m128d_op1[0]) = 0xfe00fd1400010000;
++  *((unsigned long *)&__m128d_op2[1]) = 0xfc01fcfefc02fdf7;
++  *((unsigned long *)&__m128d_op2[0]) = 0xfe00fcfffe01fd01;
++  *((unsigned long *)&__m128d_result[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x7ff0000000000000;
++  __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000bd3d00000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0038d800ff000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00fffe00fffffe00;
++  *((unsigned long *)&__m128d_op2[1]) = 0x8000008000008080;
++  *((unsigned long *)&__m128d_op2[0]) = 0x8080800000800080;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000008000008080;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00ff80ff00ff80ff;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000900000009;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x000000007ff000ff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffff7ffffffffe;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffff7ffffffffe;
++  *((unsigned long *)&__m128d_result[0]) = 0x0000000000000000;
++  __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000103;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000100000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000000000000103;
++  __m128d_out = __lsx_vfmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c
+new file mode 100644
+index 000000000..6b85e87bd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfmadd_s.c
+@@ -0,0 +1,381 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000002;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000002;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x0028e0a1;
++  *((int *)&__m128_op0[2]) = 0xa000a041;
++  *((int *)&__m128_op0[1]) = 0x01000041;
++  *((int *)&__m128_op0[0]) = 0x00010001;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x01000001;
++  *((int *)&__m128_op1[1]) = 0x00010001;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x01000001;
++  *((int *)&__m128_op2[1]) = 0xffffe000;
++  *((int *)&__m128_op2[0]) = 0xffff1fff;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x01000001;
++  *((int *)&__m128_result[1]) = 0xffffe000;
++  *((int *)&__m128_result[0]) = 0xffff1fff;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x7f800000;
++  *((int *)&__m128_op0[2]) = 0x7f800000;
++  *((int *)&__m128_op0[1]) = 0x7f800000;
++  *((int *)&__m128_op0[0]) = 0x7f800000;
++  *((int *)&__m128_op1[3]) = 0x00000002;
++  *((int *)&__m128_op1[2]) = 0x00000002;
++  *((int *)&__m128_op1[1]) = 0x00000003;
++  *((int *)&__m128_op1[0]) = 0x00000003;
++  *((int *)&__m128_op2[3]) = 0x3fc00000;
++  *((int *)&__m128_op2[2]) = 0x3fc00000;
++  *((int *)&__m128_op2[1]) = 0x3fc00000;
++  *((int *)&__m128_op2[0]) = 0x3fc00000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xc1bdceee;
++  *((int *)&__m128_op0[2]) = 0x242070db;
++  *((int *)&__m128_op0[1]) = 0xe8c7b756;
++  *((int *)&__m128_op0[0]) = 0xd76aa478;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x7f400000;
++  *((int *)&__m128_op0[2]) = 0x7f040000;
++  *((int *)&__m128_op0[1]) = 0x7f020000;
++  *((int *)&__m128_op0[0]) = 0x7f020000;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0x0014002c;
++  *((int *)&__m128_op1[1]) = 0xfffefffe;
++  *((int *)&__m128_op1[0]) = 0x003b0013;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0x3ea5016b;
++  *((int *)&__m128_result[1]) = 0xfffefffe;
++  *((int *)&__m128_result[0]) = 0x3f6fb04d;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x004f0080;
++  *((int *)&__m128_op0[2]) = 0x004f0080;
++  *((int *)&__m128_op0[1]) = 0x004f0080;
++  *((int *)&__m128_op0[0]) = 0x004f0080;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x7fff7fff;
++  *((int *)&__m128_op2[2]) = 0x7fff7fff;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7fff7fff;
++  *((int *)&__m128_result[2]) = 0x7fff7fff;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x3d3d3d3d;
++  *((int *)&__m128_op0[2]) = 0x3d3d3d3d;
++  *((int *)&__m128_op0[1]) = 0x3d3d3d3d;
++  *((int *)&__m128_op0[0]) = 0x3d3d3d3d;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00100000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x0000bd3d;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00050005;
++  *((int *)&__m128_op1[2]) = 0x00050005;
++  *((int *)&__m128_op1[1]) = 0x00050005;
++  *((int *)&__m128_op1[0]) = 0x00050005;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xe500c085;
++  *((int *)&__m128_op0[2]) = 0xc000c005;
++  *((int *)&__m128_op0[1]) = 0xe5c1a185;
++  *((int *)&__m128_op0[0]) = 0xc48004c5;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffc000;
++  *((int *)&__m128_op1[0]) = 0xffffc005;
++  *((int *)&__m128_op2[3]) = 0xff550025;
++  *((int *)&__m128_op2[2]) = 0x002a004b;
++  *((int *)&__m128_op2[1]) = 0x00590013;
++  *((int *)&__m128_op2[0]) = 0x005cffca;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0xffffc000;
++  *((int *)&__m128_result[0]) = 0xffffc005;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00fe0001;
++  *((int *)&__m128_op1[2]) = 0x00cf005f;
++  *((int *)&__m128_op1[1]) = 0x7fff7fff;
++  *((int *)&__m128_op1[0]) = 0x7fff7f00;
++  *((int *)&__m128_op2[3]) = 0x5d7f5d00;
++  *((int *)&__m128_op2[2]) = 0x7f6a007f;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x5d7f5d00;
++  *((int *)&__m128_result[2]) = 0x7f6a007f;
++  *((int *)&__m128_result[1]) = 0x7fff7fff;
++  *((int *)&__m128_result[0]) = 0x7fff7f00;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00680486;
++  *((int *)&__m128_op0[2]) = 0xffffffda;
++  *((int *)&__m128_op0[1]) = 0xffff913b;
++  *((int *)&__m128_op0[0]) = 0xb9951901;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x01030103;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00200060;
++  *((int *)&__m128_op2[0]) = 0x00200060;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0xffffffda;
++  *((int *)&__m128_result[1]) = 0xffff913b;
++  *((int *)&__m128_result[0]) = 0x001fed4d;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x001a001a;
++  *((int *)&__m128_op0[2]) = 0x001a0008;
++  *((int *)&__m128_op0[1]) = 0x001a001a;
++  *((int *)&__m128_op0[0]) = 0x001a000b;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xff800001;
++  *((int *)&__m128_op1[0]) = 0x0f800000;
++  *((int *)&__m128_op2[3]) = 0xff800000;
++  *((int *)&__m128_op2[2]) = 0xff800000;
++  *((int *)&__m128_op2[1]) = 0xff800000;
++  *((int *)&__m128_op2[0]) = 0xff800000;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0xffc00001;
++  *((int *)&__m128_result[0]) = 0xff800000;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xfe3bfb01;
++  *((int *)&__m128_op0[2]) = 0xfe3bfe01;
++  *((int *)&__m128_op0[1]) = 0xfe03fe3f;
++  *((int *)&__m128_op0[0]) = 0xfe01fa21;
++  *((int *)&__m128_op1[3]) = 0xfe3bfb01;
++  *((int *)&__m128_op1[2]) = 0xfe3bfe01;
++  *((int *)&__m128_op1[1]) = 0xfe03fe3f;
++  *((int *)&__m128_op1[0]) = 0xfe01fa21;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x7f800000;
++  *((int *)&__m128_result[2]) = 0x7f800000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x7f800000;
++  __m128_out = __lsx_vfmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffe001;
++  *((int *)&__m128_op0[2]) = 0xffffe001;
++  *((int *)&__m128_op0[1]) = 0xffffe001;
++  *((int *)&__m128_op0[0]) = 0xffffe001;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0xffffe000;
++  *((int *)&__m128_op1[0]) = 0x01ffe200;
++  *((int *)&__m128_op2[3]) = 0x04040383;
++  *((int *)&__m128_op2[2]) = 0x83838404;
++  *((int *)&__m128_op2[1]) = 0x04040383;
++  *((int *)&__m128_op2[0]) = 0x83838404;
++  *((int *)&__m128_result[3]) = 0xffffe001;
++  *((int *)&__m128_result[2]) = 0xffffe001;
++  *((int *)&__m128_result[1]) = 0xffffe001;
++  *((int *)&__m128_result[0]) = 0xffffe001;
++  __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x18171615;
++  *((int *)&__m128_op0[2]) = 0x17161514;
++  *((int *)&__m128_op0[1]) = 0x16151413;
++  *((int *)&__m128_op0[0]) = 0x151d3756;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x39412488;
++  *((int *)&__m128_op1[0]) = 0x80000000;
++  *((int *)&__m128_op2[3]) = 0x3ff00000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x40f3fa00;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xbff00000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0xc0f3fa00;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000005;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x00000000;
++  *((int *)&__m128_result[2]) = 0x00000000;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x3ddc5dac;
++  *((int *)&__m128_op1[3]) = 0xffffffff;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0x00000000;
++  *((int *)&__m128_result[0]) = 0x00000000;
++  __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x63636b6a;
++  *((int *)&__m128_op0[2]) = 0xfe486741;
++  *((int *)&__m128_op0[1]) = 0x41f8e880;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0xe3636363;
++  *((int *)&__m128_op1[2]) = 0x63abdf16;
++  *((int *)&__m128_op1[1]) = 0x41f8e080;
++  *((int *)&__m128_op1[0]) = 0x16161198;
++  *((int *)&__m128_op2[3]) = 0x00c27580;
++  *((int *)&__m128_op2[2]) = 0x00bccf42;
++  *((int *)&__m128_op2[1]) = 0x00a975be;
++  *((int *)&__m128_op2[0]) = 0x00accf03;
++  *((int *)&__m128_result[3]) = 0xff800000;
++  *((int *)&__m128_result[2]) = 0xff800000;
++  *((int *)&__m128_result[1]) = 0x4471fb84;
++  *((int *)&__m128_result[0]) = 0xffffffff;
++  __m128_out = __lsx_vfmsub_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c
+new file mode 100644
+index 000000000..96b14aad6
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_d.c
+@@ -0,0 +1,196 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xef0179a47c793879;
++  *((unsigned long *)&__m128d_op0[0]) = 0x9f9e7e3e9ea3ff41;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x1e801ffc7fc00000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffc000007fc00000;
++  *((unsigned long *)&__m128d_result[0]) = 0x9e801ffc7fc00000;
++  __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000ffff00000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000ffff00000000;
++  __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128d_op0[0]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000008800022;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128d_op2[1]) = 0xb8ec43befe38e64b;
++  *((unsigned long *)&__m128d_op2[0]) = 0x6477d042343cce24;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffbfffffffbf;
++  __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffff000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000060000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfffffffffffff000;
++  __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xfffffffafffffffa;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfffffffafffffffa;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xf8f8f8f8f8f8f8f8;
++  *((unsigned long *)&__m128d_op1[0]) = 0xf8f8f8f8f8f8f8f8;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000;
++  __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000;
++  __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000008000000080;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000;
++  __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xff80ffa2fff0ff74;
++  *((unsigned long *)&__m128d_op0[0]) = 0xff76ffd8ffe6ffaa;
++  *((unsigned long *)&__m128d_op1[1]) = 0xff80ffa2fff0ff74;
++  *((unsigned long *)&__m128d_op1[0]) = 0xff76ffd8ffe6ffaa;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0303030303030303;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0303030303030303;
++  *((unsigned long *)&__m128d_result[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfff0000000000000;
++  __m128d_out = __lsx_vfnmadd_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000;
++  __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0001ffff00000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000;
++  __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128d_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128d_op1[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128d_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x3c600000ff800000;
++  *((unsigned long *)&__m128d_result[0]) = 0xfffffffffffffffe;
++  __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x00000000b5207f80;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x00000000b5207f80;
++  __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0xffffffffffffffff;
++  __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000009000900;
++  *((unsigned long *)&__m128d_op1[0]) = 0x0000000009000900;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x8000000000000000;
++  __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  *((unsigned long *)&__m128d_op0[1]) = 0x00c2758000bccf42;
++  *((unsigned long *)&__m128d_op0[0]) = 0x00a975be00accf03;
++  *((unsigned long *)&__m128d_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128d_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128d_op2[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128d_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128d_result[0]) = 0x00000000ffffffff;
++  __m128d_out = __lsx_vfnmsub_d (__m128d_op0, __m128d_op1, __m128d_op2);
++  ASSERTEQ_64 (__LINE__, __m128d_result, __m128d_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c
+new file mode 100644
+index 000000000..bf8414b49
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfnmadd_s.c
+@@ -0,0 +1,381 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xfffffffe;
++  *((int *)&__m128_op0[0]) = 0xbe6ed565;
++  *((int *)&__m128_op1[3]) = 0x195f307a;
++  *((int *)&__m128_op1[2]) = 0x5d04acbb;
++  *((int *)&__m128_op1[1]) = 0x6a1a3fbb;
++  *((int *)&__m128_op1[0]) = 0x3c90260e;
++  *((int *)&__m128_op2[3]) = 0xffffffff;
++  *((int *)&__m128_op2[2]) = 0xffffffff;
++  *((int *)&__m128_op2[1]) = 0xfffffffe;
++  *((int *)&__m128_op2[0]) = 0xbe6ed565;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0xfffffffe;
++  *((int *)&__m128_result[0]) = 0x3e730941;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffffffff;
++  *((int *)&__m128_op0[1]) = 0xffffffff;
++  *((int *)&__m128_op0[0]) = 0xff01ff01;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0xffffffff;
++  *((int *)&__m128_op2[2]) = 0xffffffff;
++  *((int *)&__m128_op2[1]) = 0xffffffff;
++  *((int *)&__m128_op2[0]) = 0xff01ff01;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0xffffffff;
++  *((int *)&__m128_result[0]) = 0x7f01ff01;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0xffffffff;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0xffffffff;
++  *((int *)&__m128_op2[3]) = 0x00307028;
++  *((int *)&__m128_op2[2]) = 0x003f80b0;
++  *((int *)&__m128_op2[1]) = 0x0040007f;
++  *((int *)&__m128_op2[0]) = 0xff800000;
++  *((int *)&__m128_result[3]) = 0x80307028;
++  *((int *)&__m128_result[2]) = 0xffffffff;
++  *((int *)&__m128_result[1]) = 0x8040007f;
++  *((int *)&__m128_result[0]) = 0xffffffff;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000049;
++  *((int *)&__m128_op0[2]) = 0x0000004d;
++  *((int *)&__m128_op0[1]) = 0x00000001;
++  *((int *)&__m128_op0[0]) = 0xffffffff;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000001;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000001;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x80000000;
++  *((int *)&__m128_result[2]) = 0x80000000;
++  *((int *)&__m128_result[1]) = 0x80000001;
++  *((int *)&__m128_result[0]) = 0xffffffff;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffffffff;
++  *((int *)&__m128_op0[2]) = 0xffff0000;
++  *((int *)&__m128_op0[1]) = 0x00ff0000;
++  *((int *)&__m128_op0[0]) = 0x00ff0000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000800;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0xffffffff;
++  *((int *)&__m128_op2[2]) = 0xfffff800;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xfffff800;
++  *((int *)&__m128_result[1]) = 0x80000000;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x80000000;
++  *((int *)&__m128_result[2]) = 0x80000000;
++  *((int *)&__m128_result[1]) = 0x80000000;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00030000;
++  *((int *)&__m128_op0[2]) = 0x00010000;
++  *((int *)&__m128_op0[1]) = 0x00020000;
++  *((int *)&__m128_op0[0]) = 0x00010000;
++  *((int *)&__m128_op1[3]) = 0x3f800000;
++  *((int *)&__m128_op1[2]) = 0x3f800000;
++  *((int *)&__m128_op1[1]) = 0x3f800000;
++  *((int *)&__m128_op1[0]) = 0x3f800000;
++  *((int *)&__m128_op2[3]) = 0x00030000;
++  *((int *)&__m128_op2[2]) = 0x00010000;
++  *((int *)&__m128_op2[1]) = 0x00020000;
++  *((int *)&__m128_op2[0]) = 0x00010000;
++  *((int *)&__m128_result[3]) = 0x80060000;
++  *((int *)&__m128_result[2]) = 0x80020000;
++  *((int *)&__m128_result[1]) = 0x80040000;
++  *((int *)&__m128_result[0]) = 0x80020000;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000008;
++  *((int *)&__m128_op0[2]) = 0x97957687;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000408;
++  *((int *)&__m128_op1[3]) = 0x00000008;
++  *((int *)&__m128_op1[2]) = 0x97957687;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000408;
++  *((int *)&__m128_op2[3]) = 0x00010001;
++  *((int *)&__m128_op2[2]) = 0x00010001;
++  *((int *)&__m128_op2[1]) = 0x00010001;
++  *((int *)&__m128_op2[0]) = 0x04000800;
++  *((int *)&__m128_result[3]) = 0x80010001;
++  *((int *)&__m128_result[2]) = 0x80010001;
++  *((int *)&__m128_result[1]) = 0x80010001;
++  *((int *)&__m128_result[0]) = 0x84000800;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xffc2ffe7;
++  *((int *)&__m128_op0[2]) = 0x00000007;
++  *((int *)&__m128_op0[1]) = 0x0000ffc1;
++  *((int *)&__m128_op0[0]) = 0x00010001;
++  *((int *)&__m128_op1[3]) = 0xffc2ffe7;
++  *((int *)&__m128_op1[2]) = 0x00000007;
++  *((int *)&__m128_op1[1]) = 0x0000ffc1;
++  *((int *)&__m128_op1[0]) = 0x00010001;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x000ffc2f;
++  *((int *)&__m128_op2[1]) = 0x00201df0;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0xffc2ffe7;
++  *((int *)&__m128_result[2]) = 0x800ffc2f;
++  *((int *)&__m128_result[1]) = 0x80201df0;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x80000000;
++  *((int *)&__m128_result[2]) = 0x80000000;
++  *((int *)&__m128_result[1]) = 0x80000000;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000005;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x80000000;
++  *((int *)&__m128_result[2]) = 0x80000000;
++  *((int *)&__m128_result[1]) = 0x80000000;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x80808080;
++  *((int *)&__m128_op0[2]) = 0x80808080;
++  *((int *)&__m128_op0[1]) = 0x80808080;
++  *((int *)&__m128_op0[0]) = 0x80800008;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x80000000;
++  *((int *)&__m128_result[2]) = 0x80000000;
++  *((int *)&__m128_result[1]) = 0x80000000;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x31313131;
++  *((int *)&__m128_op0[0]) = 0x31313131;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x31313131;
++  *((int *)&__m128_op1[0]) = 0x31313131;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000008;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x80000000;
++  *((int *)&__m128_result[2]) = 0x80000008;
++  *((int *)&__m128_result[1]) = 0xa2f54a1e;
++  *((int *)&__m128_result[0]) = 0xa2f54a1e;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x80000000;
++  *((int *)&__m128_result[2]) = 0x80000000;
++  *((int *)&__m128_result[1]) = 0x80000000;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfnmadd_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0xa486c90f;
++  *((int *)&__m128_op0[2]) = 0x157ca12e;
++  *((int *)&__m128_op0[1]) = 0x58bcc201;
++  *((int *)&__m128_op0[0]) = 0x2e635d65;
++  *((int *)&__m128_op1[3]) = 0x6d564875;
++  *((int *)&__m128_op1[2]) = 0xf8760005;
++  *((int *)&__m128_op1[1]) = 0x8dc5a4d1;
++  *((int *)&__m128_op1[0]) = 0x79ffa22f;
++  *((int *)&__m128_op2[3]) = 0xffffffff;
++  *((int *)&__m128_op2[2]) = 0xd2436487;
++  *((int *)&__m128_op2[1]) = 0x0fa96b88;
++  *((int *)&__m128_op2[0]) = 0x5f94ab13;
++  *((int *)&__m128_result[3]) = 0xffffffff;
++  *((int *)&__m128_result[2]) = 0xd24271c4;
++  *((int *)&__m128_result[1]) = 0x2711bad1;
++  *((int *)&__m128_result[0]) = 0xe8e309ed;
++  __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x00000000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x80000000;
++  *((int *)&__m128_result[2]) = 0x80000000;
++  *((int *)&__m128_result[1]) = 0x80000000;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x007ffd00;
++  *((int *)&__m128_op2[0]) = 0x01400840;
++  *((int *)&__m128_result[3]) = 0x80000000;
++  *((int *)&__m128_result[2]) = 0x80000000;
++  *((int *)&__m128_result[1]) = 0x007ffd00;
++  *((int *)&__m128_result[0]) = 0x01400840;
++  __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0x00000000;
++  *((int *)&__m128_op2[2]) = 0x00000000;
++  *((int *)&__m128_op2[1]) = 0x7f800000;
++  *((int *)&__m128_op2[0]) = 0x00000000;
++  *((int *)&__m128_result[3]) = 0x80000000;
++  *((int *)&__m128_result[2]) = 0x80000000;
++  *((int *)&__m128_result[1]) = 0x7f800000;
++  *((int *)&__m128_result[0]) = 0x80000000;
++  __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  *((int *)&__m128_op0[3]) = 0x00000000;
++  *((int *)&__m128_op0[2]) = 0x00000000;
++  *((int *)&__m128_op0[1]) = 0x00000000;
++  *((int *)&__m128_op0[0]) = 0x00000000;
++  *((int *)&__m128_op1[3]) = 0x00000000;
++  *((int *)&__m128_op1[2]) = 0x00000000;
++  *((int *)&__m128_op1[1]) = 0x00000000;
++  *((int *)&__m128_op1[0]) = 0x00000000;
++  *((int *)&__m128_op2[3]) = 0xcd636363;
++  *((int *)&__m128_op2[2]) = 0xcd636363;
++  *((int *)&__m128_op2[1]) = 0xcd636363;
++  *((int *)&__m128_op2[0]) = 0xcd636363;
++  *((int *)&__m128_result[3]) = 0xcd636363;
++  *((int *)&__m128_result[2]) = 0xcd636363;
++  *((int *)&__m128_result[1]) = 0xcd636363;
++  *((int *)&__m128_result[0]) = 0xcd636363;
++  __m128_out = __lsx_vfnmsub_s (__m128_op0, __m128_op1, __m128_op2);
++  ASSERTEQ_32 (__LINE__, __m128_result, __m128_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c
+new file mode 100644
+index 000000000..7cd9abb7c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vld.c
+@@ -0,0 +1,62 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_result[0]) = 0x3ab7a3fc47a5c31a;
++  __m128i_out = __lsx_vld ((unsigned long *)&__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_result[0]) = 0x3ab7a3fc47a5c31a;
++  __m128i_out = __lsx_vldx ((unsigned long *)&__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0xc3c3c3c3c3c3c3c3;
++  *((unsigned long *)&__m128i_result[0]) = 0xc3c3c3c3c3c3c3c3;
++  __m128i_out = __lsx_vldrepl_b ((unsigned long *)&__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0xc31ac31ac31ac31a;
++  *((unsigned long *)&__m128i_result[0]) = 0xc31ac31ac31ac31a;
++  __m128i_out = __lsx_vldrepl_h ((unsigned long *)&__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0x47a5c31a47a5c31a;
++  *((unsigned long *)&__m128i_result[0]) = 0x47a5c31a47a5c31a;
++  __m128i_out = __lsx_vldrepl_w ((unsigned long *)&__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[0]) = 0x3ab7a3fc47a5c31a;
++  __m128i_out = __lsx_vldrepl_d ((unsigned long *)&__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c
+new file mode 100644
+index 000000000..8afdffa50
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vst.c
+@@ -0,0 +1,70 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0;
++  *((unsigned long *)&__m128i_result[0]) = 0x0;
++  __lsx_vst (__m128i_op0, (unsigned long *)&__m128i_result, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_op0, __m128i_result);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0;
++  *((unsigned long *)&__m128i_result[0]) = 0x0;
++  __lsx_vstx (__m128i_op0, (unsigned long *)&__m128i_result, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_op0, __m128i_result);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0;
++  *((unsigned long *)&__m128i_result[0]) = 0x05;
++  *((unsigned long *)&__m128i_out[1]) = 0x0;
++  *((unsigned long *)&__m128i_out[0]) = 0x0;
++  __lsx_vstelm_b (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0;
++  *((unsigned long *)&__m128i_result[0]) = 0x5c05;
++  *((unsigned long *)&__m128i_out[1]) = 0x0;
++  *((unsigned long *)&__m128i_out[0]) = 0x0;
++  __lsx_vstelm_h (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0;
++  *((unsigned long *)&__m128i_result[0]) = 0xc9d85c05;
++  *((unsigned long *)&__m128i_out[1]) = 0x0;
++  *((unsigned long *)&__m128i_out[0]) = 0x0;
++  __lsx_vstelm_w (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0;
++  *((unsigned long *)&__m128i_result[0]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_out[1]) = 0x0;
++  *((unsigned long *)&__m128i_out[0]) = 0x0;
++  __lsx_vstelm_d (__m128i_op0, (unsigned long *)&__m128i_out, 0x0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vfrstp-vfrstpi-vse.patch b/LoongArch-Add-tests-for-SX-vector-vfrstp-vfrstpi-vse.patch
new file mode 100644
index 0000000000000000000000000000000000000000..59ba09c7a475a2f3136654d01c291d14e4b4c1b3
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vfrstp-vfrstpi-vse.patch
@@ -0,0 +1,3926 @@
+From 06a477566d282d87ce187901904c4bae2c2c4aaf Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 11:28:29 +0800
+Subject: [PATCH 094/124] LoongArch: Add tests for SX vector
+ vfrstp/vfrstpi/vseq/vseqi/vsle /vslei/vslt/vslti instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vfrstp.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vseq.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vseqi.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsle-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsle-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vslei-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vslei-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vslt-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vslt-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vslti-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vslti-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vfrstp.c         | 218 ++++++++
+ .../loongarch/vector/lsx/lsx-vfrstpi.c        | 209 ++++++++
+ .../loongarch/vector/lsx/lsx-vseq.c           | 470 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vseqi.c          | 328 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vsle-1.c         | 290 +++++++++++
+ .../loongarch/vector/lsx/lsx-vsle-2.c         | 444 +++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vslei-1.c        | 258 ++++++++++
+ .../loongarch/vector/lsx/lsx-vslei-2.c        | 293 +++++++++++
+ .../loongarch/vector/lsx/lsx-vslt-1.c         | 434 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vslt-2.c         | 236 +++++++++
+ .../loongarch/vector/lsx/lsx-vslti-1.c        | 328 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vslti-2.c        | 293 +++++++++++
+ 12 files changed, 3801 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c
+new file mode 100644
+index 000000000..ac0ade8b1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstp.c
+@@ -0,0 +1,218 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfe07e5fefefdddfe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00020100fedd0c00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0005000501800005;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfe07e5fefefdddfe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00020100fedd0008;
++  __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_op2[1]) = 0x03ff03ff03ff03ff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010;
++  __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010;
++  __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0e7ffffc01fffffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000003f803f4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0e7ffffc01fffffc;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000003f803f4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0e7ffffc01fffffc;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001003f803f4;
++  __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010;
++  __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000020000007d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000746400016388;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000586100015567;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0800000200000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000020000007d;
++  __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0008;
++  __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x61608654a2d4f6da;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ff08ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x36fbdfdcffdcffdc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000008140c80;
++  *((unsigned long *)&__m128i_op2[1]) = 0x1f1f1f1f1f1f1f00;
++  *((unsigned long *)&__m128i_op2[0]) = 0x1f1f1f27332b9f00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x36fbdfdcffdc0008;
++  __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000aaaa;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000545cab1d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000081a83bea;
++  *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00d3007c014e00bd;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000aaaa;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000003a0000003a;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x37c0001000000008;
++  __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080808080800008;
++  __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1f1f1f1f1f1f1f1f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1f1f1f1f1f1f1f1f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x1f1f1f1f1f1f1f1f;
++  *((unsigned long *)&__m128i_op2[0]) = 0x1f1f1f1f1f1f1f1f;
++  *((unsigned long *)&__m128i_result[1]) = 0x00081f1f1f1f1f1f;
++  *((unsigned long *)&__m128i_result[0]) = 0x1f1f1f1f1f1f1f1f;
++  __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000400080003fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000bc2000007e10;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000400080003fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000bc2000007e04;
++  __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000a752a55;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0a753500950fa306;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff14eb54ab;
++  *((unsigned long *)&__m128i_op1[0]) = 0x14ea6a002a406a00;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00007fff7fff8000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000a752a55;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a753500950fa306;
++  __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x02b010f881a281a2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50;
++  *((unsigned long *)&__m128i_op1[1]) = 0x02b010f881a281a2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x02b010f881a281a2;
++  *((unsigned long *)&__m128i_result[0]) = 0x27b169bbb8140001;
++  __m128i_out = __lsx_vfrstp_h (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000155;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff100000000000;
++  __m128i_out = __lsx_vfrstp_b (__m128i_op0, __m128i_op1, __m128i_op2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c
+new file mode 100644
+index 000000000..a2b110f21
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vfrstpi.c
+@@ -0,0 +1,209 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0027002a00030018;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f4300177f7a7f59;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0027002a00080018;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f4300177f7a7f59;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000007f00000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000401000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000110000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000007f00000004;
++  __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000800000000;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x75b043c4d17db125;
++  *((unsigned long *)&__m128i_op0[0]) = 0xeef8227b4f8017b1;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x027c027c000027c0;
++  *((unsigned long *)&__m128i_result[1]) = 0x75b043c4007db125;
++  *((unsigned long *)&__m128i_result[0]) = 0xeef8227b4f8017b1;
++  __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x03c0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x03c0038000000380;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00;
++  __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000010a000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ffff000000ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000010a000b;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5b35342c979955da;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000009;
++  *((unsigned long *)&__m128i_result[0]) = 0x5b35342c970455da;
++  __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010000000000000;
++  __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00d3012b015700bb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001002affca0070;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_result[1]) = 0x00d3012b015700bb;
++  *((unsigned long *)&__m128i_result[0]) = 0x00010000ffca0070;
++  __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffe0001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000bf;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000002bb;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00080000fffe0001;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000545cffffab1d;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff81a800003bea;
++  *((unsigned long *)&__m128i_op1[1]) = 0x13f9c5b60028a415;
++  *((unsigned long *)&__m128i_op1[0]) = 0x545cab1d81a83bea;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000545cffff0001;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff81a800003bea;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000001b;
++  __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0008000000000000;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x379674c000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x379674c000000000;
++  __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001a001a001a000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001a001a001a000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001a001a001a000b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001a001a001a000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x001a001a001a0008;
++  *((unsigned long *)&__m128i_result[0]) = 0x001a001a001a000b;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x02f3030303030303;
++  *((unsigned long *)&__m128i_op1[1]) = 0x004d004d004d004d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x004d004d004d004d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x02f3030303100303;
++  __m128i_out = __lsx_vfrstpi_b (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000400000004c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff941d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff941d;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000400000004c;
++  *((unsigned long *)&__m128i_result[0]) = 0x00007770ffff941d;
++  __m128i_out = __lsx_vfrstpi_h (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c
+new file mode 100644
+index 000000000..4362941ab
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseq.c
+@@ -0,0 +1,470 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ed0008005e00a2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x007a007600150077;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00ed0008005e00a2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x007a007600150077;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c63636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff489b693120950;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffc45a851c40c18;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfda9b23a624082fd;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff7f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2d1da85b7f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7505853d654185f5;
++  *((unsigned long *)&__m128i_op1[0]) = 0x01010000fefe0101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1fc000001fc00000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1fc000001fc00000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000000010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000067400002685;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0800080008000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0800080008000800;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9795698585057dec;
++  *((unsigned long *)&__m128i_op0[0]) = 0x87f82867431a1d08;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1149a96eb1a08000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffacdb6dbecac;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1f5533a694f902c0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffe1ffffffe1;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffe1ffffffe1;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000002050320;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000002050320;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000002050320;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000002050320;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0202020202020202;
++  *((unsigned long *)&__m128i_op1[0]) = 0x363d753d50155c0a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0f0f0f0f00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000fffe01fd02;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff0000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000adadadad;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000adadadad;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000adadadad;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000adadadad;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5b5b5b5aadadadad;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000052525253;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff00ffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff00ffffffffff;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x33f5c2d7d9f5d800;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe4c23ffb002a3a22;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000004870ba0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000044470000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff0000ffff;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000404040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000005c000000b2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000007600000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffffffff;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0dec4d1;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vseq_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000048;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffeffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000000900;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000090900000998;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff00ffffff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f7f7f007f7f7f00;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf2c97aaa7d8fa270;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0b73e427f7cfcb88;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff01fe03ff01fe03;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseq_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vseq_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c
+new file mode 100644
+index 000000000..c16a291de
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vseqi.c
+@@ -0,0 +1,328 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff800000c3080002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_d (__m128i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfedb27095b6bff95;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_b (__m128i_op0, 13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, -3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_d (__m128i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_d (__m128i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000040000000400;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_d (__m128i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_d (__m128i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_w (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_w (__m128i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0010000000100000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0010000000100000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_b (__m128i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_w (__m128i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_b (__m128i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000001000f00fe00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000017fff00fe7f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_w (__m128i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_d (__m128i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_b (__m128i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_b (__m128i_op0, 13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x007ffd0001400840;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff01ff010000ff7d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000fffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_b (__m128i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_d (__m128i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_d (__m128i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffa6ff91fdd8ef77;
++  *((unsigned long *)&__m128i_op0[0]) = 0x061202bffb141c38;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_b (__m128i_op0, 13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_w (__m128i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_d (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_w (__m128i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000fef01000f27ca;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_w (__m128i_op0, -4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2a29282726252423;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2221201f1e1d1c1b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_b (__m128i_op0, -1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_w (__m128i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff80ff00ff80ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_b (__m128i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseqi_b (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, -8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e71768fa4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, -4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0313100003131000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0313100003131000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_w (__m128i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001a0000000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_b (__m128i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000002a001a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000001a000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x05f5e2320605e1e2;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_d (__m128i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0d060d060d060d06;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0d060d060d060d06;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_w (__m128i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff2356fe165486;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5efeb3165bd7653d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vseqi_w (__m128i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vseqi_h (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c
+new file mode 100644
+index 000000000..a26eb0a3d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-1.c
+@@ -0,0 +1,290 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x004200a000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x004200a000200000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffffff;
++  __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000aaaaaaaa;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000aaab555b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000aaaaaaaa;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000aaab555b;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000897957687;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000408;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000ed0e0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000004080;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00;
++  __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x004cff8fffde0051;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00;
++  __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000040400000404;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000040400000404;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000501000002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000008;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000;
++  __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x55aa55aa55aa55ab;
++  *((unsigned long *)&__m128i_op0[0]) = 0xaa55555655aaaaa8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff;
++  __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6b6c4beb636443e3;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0507070805070708;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000085af0000b000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00017ea200002000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc0800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfe3bfb01fe3bfe01;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe03fe3ffe01fa21;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100000001000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0040004000400040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0040004000400040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000005003a;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c
+new file mode 100644
+index 000000000..15c6cedc2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsle-2.c
+@@ -0,0 +1,444 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0005000400000004;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0400001001150404;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0005000400000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0400001001150404;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000490000004d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffffff00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ffffffffff;
++  __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000020000000200;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000020000000200;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000008680f1ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0280000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ffffff00000000;
++  __m128i_out = __lsx_vsle_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000036280000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x42a0000042a02000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff80ff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff80000000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff0600d50e9ef518;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffefffa8007c000f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000001faea9ec;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100007f01;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfbfbfb17fbfb38ea;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfbfb47fbfbfb0404;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000005fffa;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100fe000100fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000003fffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000040002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000bffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f7f7f;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff040;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001000100010c410;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffcafff8ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a0;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffcafff8ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000a0;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x004d004d004d004d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x004d004d004d004d;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000007f0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000ed0e0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000004080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000ed0e0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000004080;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x8);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003030000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00fffbfffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff01ff1100000048;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsle_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c
+new file mode 100644
+index 000000000..0e72a33dd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-1.c
+@@ -0,0 +1,258 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_b (__m128i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_b (__m128i_op0, -6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_b (__m128i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000200008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffff00ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffff00ffff;
++  __m128i_out = __lsx_vslei_b (__m128i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ffffff00ffffff;
++  __m128i_out = __lsx_vslei_b (__m128i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, 3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, 6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, -16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00008080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, -4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x31dc2cc1bc268c93;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c4d53d855f89514;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, 13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, 14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, -16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_h (__m128i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_w (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_w (__m128i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3fc000003fc00000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3fc000003fc00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_w (__m128i_op0, 1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_w (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffb96bffff57c9;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff6080ffff4417;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_w (__m128i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd78cfd70b5f65d76;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5779108fdedda7e4;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vslei_w (__m128i_op0, -16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_w (__m128i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_d (__m128i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000001fc1a568;
++  *((unsigned long *)&__m128i_op0[0]) = 0x02693fe0e7beb077;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_d (__m128i_op0, -6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_d (__m128i_op0, -4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_d (__m128i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf1819b7c0732a6b6;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffb9917a6e7fffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_d (__m128i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_d (__m128i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0037ffc8d7ff2800;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00ffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_d (__m128i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_d (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_d (__m128i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf03ef03ef03ef03e;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf03ef03ef03ef03e;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_d (__m128i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c
+new file mode 100644
+index 000000000..685a1bb36
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslei-2.c
+@@ -0,0 +1,293 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd82480697f678077;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff489b693120950;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffc45a851c40c18;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1268f057137a0267;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0048137ef886fae0;
++  *((unsigned long *)&__m128i_result[1]) = 0xff000000ff00ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xff00ff0000000000;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000202fe02;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff00ff;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_bu (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff7a53;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vslei_hu (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x40f0001000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x40f0001000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff;
++  __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000;
++  __m128i_out = __lsx_vslei_hu (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000f;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff0000;
++  __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00001b4a00007808;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000;
++  __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff;
++  __m128i_out = __lsx_vslei_hu (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_hu (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003900;
++  *((unsigned long *)&__m128i_op0[0]) = 0x68bcf93435ed25ed;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000f0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_wu (__m128i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_wu (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_du (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_du (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_du (__m128i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_du (__m128i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_du (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_du (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslei_du (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00250023001c001d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x309d2f342a5d2b34;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslei_du (__m128i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c
+new file mode 100644
+index 000000000..15c96ccfe
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-1.c
+@@ -0,0 +1,434 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0007658000115de0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001a8960001d2cc0;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffc000007fc00000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9e801ffc7fc00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ffff0000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff00ff0000ff;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000040100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000384;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe3f0200004003ffd;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff00ff00ff00;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f0101070101010f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000127f010116;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ffffffffff;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000ffef0010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000ff0000;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff02000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000002a001a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001a000b00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff001a00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003fffc0ffc0003f;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffc0ffc0003f003f;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff0000000000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff;
++  __m128i_out = __lsx_vslt_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0403cfcf01c1595e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x837cd5db43fc55d4;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0004000000040000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0004000000040000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffefefffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffefefffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000080000000800;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1748c4f9ed1a5870;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff00000000ffff;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000024170000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff000086bd;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ca000000c481;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003e2;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00050eb00000fffa;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000f8a50000f310;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000011f0000f040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0177fff0fffffff0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000011ff8bc;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vslt_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001000100010c410;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x800000ff000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ffffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xe0404041e0404041;
++  *((unsigned long *)&__m128i_op0[0]) = 0x803f800080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00001f5400000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002008360500088;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000400028000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003ff8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000467fef81;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslt_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c63636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000003f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4eede8494f000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1817161517161514;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1615141315141312;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0fff0fff7f800fff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000fffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0010000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c
+new file mode 100644
+index 000000000..e8d69f0e9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslt-2.c
+@@ -0,0 +1,236 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0007658000115de0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001a8960001d2cc0;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffffff00ffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffffff;
++  __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00;
++  __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff359f358;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffff359f358;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffff00ff00;
++  __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x317fce80317fce80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000fffe0000fffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vslt_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0007658000115de0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001a8960001d2cc0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001c88bf0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000320;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007730;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslt_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4050000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636163636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vslt_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff8f8dada;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff01018888;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000145ad;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000300003e6e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00005dcbe7e830c0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x03f21e0114bf19da;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5e695e95e1cb5a01;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0313100003131000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0313100003131000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000010a7;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000046ebaa2c;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf1f1f1f149ed7273;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf1f1f1f1865e65a1;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00050eb00000fffa;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000f8a50000f310;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslt_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c
+new file mode 100644
+index 000000000..5bf3ce6e8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-1.c
+@@ -0,0 +1,328 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vslti_b (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00feff0000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00feff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ffff0000000000;
++  __m128i_out = __lsx_vslti_b (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffefffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffefffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_b (__m128i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_b (__m128i_op0, 8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_b (__m128i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_b (__m128i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_b (__m128i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, 6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x807f7f8000ffff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00feff00;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff0000ffff;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x195f307a5d04acbb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, 3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3a3a3a3b3a3a3a3a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3a3a00003a3a0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, 14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_h (__m128i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5ff6a0a40ea8f47c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5ff6a0a40e9da42a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd82480697f678077;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffe15;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffe15;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, 1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, 3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffc000ffffc005;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, 6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0006000100040001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00010002ffff0105;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, 15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa000308000008002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0500847b00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vslti_w (__m128i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_d (__m128i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_d (__m128i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_d (__m128i_op0, -16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000005e695e95;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5e695e96c396b402;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_d (__m128i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_d (__m128i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0103000201030002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_d (__m128i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000455555555;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_d (__m128i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_d (__m128i_op0, 14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741;
++  *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_d (__m128i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c
+new file mode 100644
+index 000000000..768df528f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslti-2.c
+@@ -0,0 +1,293 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003f800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003f800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff0000ffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xff0000ffffffffff;
++  __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000008a0000008a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000008900000009;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffffff;
++  __m128i_out = __lsx_vslti_bu (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x371fe00000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x371fe00000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ffffffffff;
++  __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbe8282a0793636d3;
++  *((unsigned long *)&__m128i_op0[0]) = 0x793636d3793636d3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_bu (__m128i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2e2b34ca59fa4c88;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3b2c8aefd44be966;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000100000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000080000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_hu (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7505445465593af1;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00010000ffab001c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001ffffffadff9a;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vslti_hu (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff7300000ca00430;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001a00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000009c83e21a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000022001818;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vslti_hu (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00001802041b0013;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0007000000050000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003000000010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100010001000100;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_wu (__m128i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x975ca6046e2e4889;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1748c4f9ed1a5870;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000235600005486;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000b31600006544;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_wu (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000007e8a60;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000001edde;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0aa077b7054c9554;
++  *((unsigned long *)&__m128i_op0[0]) = 0x40c7ee1f38e4c4e8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6b75948a91407a42;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0b5471b633e54fde;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslti_du (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vmax-vmaxi-vmin-vm.patch b/LoongArch-Add-tests-for-SX-vector-vmax-vmaxi-vmin-vm.patch
new file mode 100644
index 0000000000000000000000000000000000000000..727fdc45746369d277a8142a6d8427d0680196ab
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vmax-vmaxi-vmin-vm.patch
@@ -0,0 +1,2578 @@
+From dd0b9d05c2e18dc8082931dbfe612bb1acf9e5e9 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 18:38:46 +0800
+Subject: [PATCH 085/124] LoongArch: Add tests for SX vector
+ vmax/vmaxi/vmin/vmini instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vmax-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmax-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmin-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmin-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmini-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmini-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vmax-1.c         | 317 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vmax-2.c         | 362 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmaxi-1.c        | 279 +++++++++++
+ .../loongarch/vector/lsx/lsx-vmaxi-2.c        | 223 +++++++++
+ .../loongarch/vector/lsx/lsx-vmin-1.c         | 434 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmin-2.c         | 344 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmini-1.c        | 314 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vmini-2.c        | 216 +++++++++
+ 8 files changed, 2489 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c
+new file mode 100644
+index 000000000..b0e22f955
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-1.c
+@@ -0,0 +1,317 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f007f007f007f00;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000010000003f;
++  __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000010000f00;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000010000f01;
++  __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffd;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffdfffcfffd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff80df00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0010100000100000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1000100000101000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010100000100000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1000100000101000;
++  __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0040000000ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0040000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0040000000ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0040000000000000;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xb327b9363c992b2e;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa1e7b475d925730f;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff3c992b2e;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffff730f;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x4101010141010100;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000001ff;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000003d0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000003d0000;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0007001400000014;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0004001000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000053a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000700140000053a;
++  *((unsigned long *)&__m128i_result[0]) = 0x0004001000000000;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff80c400000148;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff80c1ffffe8de;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000148;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000034;
++  __m128i_out = __lsx_vmax_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000;
++  __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_result[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a;
++  __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000b3a6000067da;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00004e420000c26a;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd78cfd70b5f65d76;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5779108fdedda7e4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000b3a6000067da;
++  *((unsigned long *)&__m128i_result[0]) = 0x5779108f0000c26a;
++  __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20;
++  *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x30eb020302101b03;
++  *((unsigned long *)&__m128i_op1[0]) = 0x020310d0c0030220;
++  *((unsigned long *)&__m128i_result[1]) = 0x30eb022002101b20;
++  *((unsigned long *)&__m128i_result[0]) = 0x020310edc003023d;
++  __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfe03fe01fe01fe01;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe3bfa3ffe3bfb21;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001d001d001d001d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001d001d001d0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x001d001d001d001d;
++  *((unsigned long *)&__m128i_result[0]) = 0x001d001d001d0000;
++  __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000155;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000155;
++  __m128i_out = __lsx_vmax_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000051649b6;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd2f005e44bb43416;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000003e0000003f;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000051649b6;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000003e0000003f;
++  __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ebd20000714f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00012c8a0000a58a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ebd20000714f;
++  *((unsigned long *)&__m128i_result[0]) = 0x00012c8a0000a58a;
++  __m128i_out = __lsx_vmax_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c
+new file mode 100644
+index 000000000..51a9a92e8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmax-2.c
+@@ -0,0 +1,362 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000007f0000;
++  __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000003a0000003a;
++  *((unsigned long *)&__m128i_result[1]) = 0x77c0404a4000403a;
++  *((unsigned long *)&__m128i_result[0]) = 0x77c03fd640003fc6;
++  __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbafebb00ffd500fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbafebb00ffd500fe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xbafebb00ffd500fe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x52525252adadadad;
++  *((unsigned long *)&__m128i_op0[0]) = 0x52525252adadadad;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5b5b5b5aa4a4a4a6;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x5b5b5b5aadadadad;
++  __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfe3bfb01fe3bfe01;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe03fe3ffe01fa21;
++  *((unsigned long *)&__m128i_result[1]) = 0xfe3bfb01fe3bfe01;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe03fe3ffe01fa21;
++  __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000007500;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007e1600007d98;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000fe00fe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000f50000fe75fe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00fe7efe00fe7dfe;
++  __m128i_out = __lsx_vmax_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2002040404010420;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0101010180800101;
++  *((unsigned long *)&__m128i_result[1]) = 0x2002040404010420;
++  *((unsigned long *)&__m128i_result[0]) = 0x9c9c9c9c80800101;
++  __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff84fff4ff84fff4;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff84fff4ff84fff4;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m128i_result[1]) = 0xff84fff4ff84fff4;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0;
++  __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffdfffdf;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffdf;
++  __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf001f0010101f002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x14ccc6320076a4d2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x685670d27e00682a;
++  *((unsigned long *)&__m128i_result[1]) = 0x14ccc6320076a4d2;
++  *((unsigned long *)&__m128i_result[0]) = 0x685670d27e00682a;
++  __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf6548a1747e59090;
++  *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50;
++  *((unsigned long *)&__m128i_result[1]) = 0xf6548a1747e59090;
++  *((unsigned long *)&__m128i_result[0]) = 0x27b169bbb8145f50;
++  __m128i_out = __lsx_vmax_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff80ff0010ff06;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007f01000eff0a;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff80ff0010ff06;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000;
++  __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000002bfd9461;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3ff0000000007fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000002bfd9461;
++  __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffff0000000ad3d;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff000fffff000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffff00010001000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffff000fffff000;
++  __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001f;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000001f;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f;
++  __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040;
++  __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xc605c000aedd0000;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x09e8e9012fded7fd;
++  *((unsigned long *)&__m128i_op1[0]) = 0x479f64b03373df61;
++  *((unsigned long *)&__m128i_result[1]) = 0x09e8e9012fded7fd;
++  *((unsigned long *)&__m128i_result[0]) = 0x479f64b03373df61;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x52525252adadadad;
++  *((unsigned long *)&__m128i_op1[0]) = 0x52525252adadadad;
++  *((unsigned long *)&__m128i_result[1]) = 0x52525252adadadad;
++  *((unsigned long *)&__m128i_result[0]) = 0x52525252adadadad;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0808080700000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080808080808;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100;
++  __m128i_out = __lsx_vmax_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c
+new file mode 100644
+index 000000000..7cff1d848
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-1.c
+@@ -0,0 +1,279 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0a0a0a0a0a0a0a0a;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a0a0a0a0a0a0a0a;
++  __m128i_out = __lsx_vmaxi_b (__m128i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaxi_b (__m128i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1000100010001000;
++  __m128i_out = __lsx_vmaxi_b (__m128i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000003be14000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000003bfb4000;
++  __m128i_out = __lsx_vmaxi_b (__m128i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0b0b0b0b0b0b0b0b;
++  *((unsigned long *)&__m128i_result[0]) = 0x0b0b0b0b0b0b0b0b;
++  __m128i_out = __lsx_vmaxi_b (__m128i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000007ffffffb;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x010101017f010101;
++  __m128i_out = __lsx_vmaxi_b (__m128i_op0, 1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000007f8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000007f8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0b0b0b0b0b0b0b0b;
++  *((unsigned long *)&__m128i_result[0]) = 0x0b0b0b0b0b0b0b0b;
++  __m128i_out = __lsx_vmaxi_b (__m128i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000c;
++  __m128i_out = __lsx_vmaxi_b (__m128i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vmaxi_b (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0606060606060606;
++  *((unsigned long *)&__m128i_result[0]) = 0x0606060606060606;
++  __m128i_out = __lsx_vmaxi_b (__m128i_op0, 6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0fffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaxi_b (__m128i_op0, -16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaxi_h (__m128i_op0, -1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaxi_h (__m128i_op0, -11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0;
++  __m128i_out = __lsx_vmaxi_h (__m128i_op0, -6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000001fc00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000001fc00000000;
++  __m128i_out = __lsx_vmaxi_h (__m128i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000fff;
++  __m128i_out = __lsx_vmaxi_h (__m128i_op0, -14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000007ff000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000a1ff4c;
++  *((unsigned long *)&__m128i_result[1]) = 0x000300037ff000ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003000300a10003;
++  __m128i_out = __lsx_vmaxi_h (__m128i_op0, 3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaxi_h (__m128i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x000b000b000b000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000b000b000b000b;
++  __m128i_out = __lsx_vmaxi_h (__m128i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004;
++  __m128i_out = __lsx_vmaxi_h (__m128i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfe07e5fefefdddfe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00020100fedd0c00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000201000000000b;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000401000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100000004;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000050000007b;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000500000005;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000400000004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001fffff001fffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001fffff001fffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x001fffff001fffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x001fffff001fffff;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000b0000000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000b0000000b;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000000e;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, 14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000900000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000900000009;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000600000006;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000600000006;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, 6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f80000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f80000000000007;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000700000007;
++  __m128i_out = __lsx_vmaxi_w (__m128i_op0, 7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002;
++  __m128i_out = __lsx_vmaxi_d (__m128i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000007f00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000007f00;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001000000;
++  __m128i_out = __lsx_vmaxi_d (__m128i_op0, -4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff489b693120950;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffc45a851c40c18;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000000a;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000a;
++  __m128i_out = __lsx_vmaxi_d (__m128i_op0, 10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmaxi_d (__m128i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x63636b6afe486741;
++  *((unsigned long *)&__m128i_op0[0]) = 0x41f8e880ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x63636b6afe486741;
++  *((unsigned long *)&__m128i_result[0]) = 0x41f8e880ffffffff;
++  __m128i_out = __lsx_vmaxi_d (__m128i_op0, -2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c
+new file mode 100644
+index 000000000..b79af2228
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmaxi-2.c
+@@ -0,0 +1,223 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000020002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000020002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303;
++  *((unsigned long *)&__m128i_result[0]) = 0x0303030303030303;
++  __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1111111111111111;
++  *((unsigned long *)&__m128i_result[0]) = 0x1111111111111111;
++  __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1111111111111111;
++  *((unsigned long *)&__m128i_result[0]) = 0x1111111111111111;
++  __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0a0a0a0a0a0a0a0a;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a0a0a0a0a0a0a0a;
++  __m128i_out = __lsx_vmaxi_bu (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0011001100110011;
++  __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20;
++  *((unsigned long *)&__m128i_result[1]) = 0x11000f2010000e20;
++  *((unsigned long *)&__m128i_result[0]) = 0x0f000d200e000c20;
++  __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef;
++  *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0;
++  *((unsigned long *)&__m128i_result[0]) = 0x000fffefffefffef;
++  __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0005000500050005;
++  *((unsigned long *)&__m128i_result[0]) = 0x0005000500050005;
++  __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000020000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000020000020;
++  *((unsigned long *)&__m128i_result[1]) = 0x001d001d20000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x001d001d20000020;
++  __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00003fff00010000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00123fff00120012;
++  *((unsigned long *)&__m128i_result[0]) = 0x0012001200120012;
++  __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x001a001a001a001a;
++  *((unsigned long *)&__m128i_result[0]) = 0x001a001a001a001a;
++  __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x001e001e001e001e;
++  *((unsigned long *)&__m128i_result[0]) = 0x001e001e001e001e;
++  __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x001d001d001d001d;
++  *((unsigned long *)&__m128i_result[0]) = 0x001d001d001d001d;
++  __m128i_out = __lsx_vmaxi_hu (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000800000008;
++  __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001600000016;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001600000016;
++  __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffacdb6dbecac;
++  *((unsigned long *)&__m128i_result[0]) = 0x1f5533a694f902c0;
++  __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x37c0001000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x37c0001000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x37c0001000000001;
++  __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vmaxi_wu (__m128i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xbf8000000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xcf00000000000000;
++  __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000011;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000011;
++  __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000001c;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000001c;
++  __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d;
++  __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b;
++  __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x43d3e0000013e000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x43d3e0000013e000;
++  *((unsigned long *)&__m128i_result[1]) = 0x43d3e0000013e000;
++  *((unsigned long *)&__m128i_result[0]) = 0x43d3e0000013e000;
++  __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100010001007c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x000100010001007c;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000001d;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000001d;
++  __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000001b;
++  __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000004;
++  __m128i_out = __lsx_vmaxi_du (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c
+new file mode 100644
+index 000000000..b2a7a35bd
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-1.c
+@@ -0,0 +1,434 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3fffff0000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3fffff0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffff0000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ffff000000ff00;
++  __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ff91fffffff5;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff00650001ffb0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000067400002685;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ff91fffffff5;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff00650000ff85;
++  __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffcafff8ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000a0;
++  __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000008680f1ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xff80ffffff80ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xff80ffff8680f1ff;
++  __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0e440;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffe4ffffffe4ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe4fffff0e4ff;
++  __m128i_out = __lsx_vmin_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000a16316b0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000063636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000a1630000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffc0ff81000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000600000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffc0ff81000000;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000401000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00fdffffffffff02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe80000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe80ffffffffff02;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x027e0000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe80ffffffffff02;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffe0000000;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_op1[0]) = 0xbbc8ecc5f3ced5f3;
++  *((unsigned long *)&__m128i_result[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_result[0]) = 0xbbc8ecc5f3ced5f3;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc090380000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc090380000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffbfffc;
++  *((unsigned long *)&__m128i_result[0]) = 0xc090380000000000;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8493941335f5cc0c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x625a7312befcb21e;
++  *((unsigned long *)&__m128i_result[1]) = 0x8493941300000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000002befcb21e;
++  __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000078c00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000d;
++  __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_result[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_result[0]) = 0x000a000a000a000a;
++  __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5d5d5d5d5d5d0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xa2a2a2a3a2a2a2a3;
++  *((unsigned long *)&__m128i_result[0]) = 0xc605c000aedd0000;
++  __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000003000000d613;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000c0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000c0000000;
++  __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000200000001b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff800000ff800000;
++  __m128i_out = __lsx_vmin_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000017fff9000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000210011084;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000017fff9000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000210011084;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000017f0a82;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5a5a5a5a5b5a5b5a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5a5a5a5a5b5a5b5a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x027c027c000027c0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000010000000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3e25c8317394dae6;
++  *((unsigned long *)&__m128i_op0[0]) = 0xcda585aebbb2836a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xcda585aebbb2836a;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x377b810912c0e000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000;
++  *((unsigned long *)&__m128i_result[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128i_result[0]) = 0x377b810912c0e000;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcfcfcfcfcfd;
++  *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcfcfcfcfcfd;
++  *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcfcfcfcfcfd;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd;
++  *((unsigned long *)&__m128i_result[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_result[0]) = 0xf9796558e39953fd;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffff00ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmin_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c
+new file mode 100644
+index 000000000..c90cae75e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmin-2.c
+@@ -0,0 +1,344 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000300000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffe0004fffe0004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd;
++  *((unsigned long *)&__m128i_result[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_result[0]) = 0xf9796558e39953fd;
++  __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf6548a1747e59090;
++  *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf6548a1747e59090;
++  *((unsigned long *)&__m128i_op1[0]) = 0x27b169bbb8145f50;
++  *((unsigned long *)&__m128i_result[1]) = 0xf6548a1747e59090;
++  *((unsigned long *)&__m128i_result[0]) = 0x27b169bbb8145f50;
++  __m128i_out = __lsx_vmin_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c63636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x9c9c9c9c00000000;
++  __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207f7f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f417f417f027e03;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m128i_result[0]) = 0x2020202020207e03;
++  __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00008d3200000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x09e8e9012fded7fd;
++  *((unsigned long *)&__m128i_op1[0]) = 0x479f64b03373df61;
++  *((unsigned long *)&__m128i_result[1]) = 0x00008d3200000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0c0b0a090b0a0908;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0a09080709080706;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706;
++  *((unsigned long *)&__m128i_result[1]) = 0x0c0b0a090b0a0908;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a09080709080706;
++  __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xa87745dbd93e4ea1;
++  *((unsigned long *)&__m128i_op1[0]) = 0xaa49601e26d39860;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2006454690d3de87;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2006454690d3de87;
++  *((unsigned long *)&__m128i_result[1]) = 0x2006454652525252;
++  *((unsigned long *)&__m128i_result[0]) = 0x2006454652525252;
++  __m128i_out = __lsx_vmin_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000040100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffff2382;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000040100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010000;
++  __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7da9b23a624082fd;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x03574e3a62407e03;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001010000;
++  __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0505050505050505;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000005050000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0028280000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0028280000282800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0028280000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000282800;
++  __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfc01fd13fc02fe0c;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe00fd14fe01fd16;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfc01fd1300000001;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe00fd1400010000;
++  __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001ca02f854;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100013fa0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2000200020002000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2000200020002000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000120002000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100013fa0;
++  __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000005003a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x86dd8341b164f12b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9611c3985b3159f5;
++  *((unsigned long *)&__m128i_result[1]) = 0x86dd8341b164f12b;
++  *((unsigned long *)&__m128i_result[0]) = 0x9611c3985b3159f5;
++  __m128i_out = __lsx_vmin_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000de0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000006f00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a;
++  __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000fea0000fffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff0cff78ff96ff14;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff007fff810001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000400530050ffa6;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff7f810100001000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001fffc0ffffe001;
++  *((unsigned long *)&__m128i_result[1]) = 0xff7f810100001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000400530050ffa6;
++  __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007efe7f7f8000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000b81c8382;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000077af9450;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000077af9450;
++  __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmin_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c
+new file mode 100644
+index 000000000..772d040c3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-1.c
+@@ -0,0 +1,314 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffefffefffffffc;
++  __m128i_out = __lsx_vmini_b (__m128i_op0, 4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00002f0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000958aefff895e;
++  *((unsigned long *)&__m128i_result[1]) = 0xfafafafafafafafa;
++  *((unsigned long *)&__m128i_result[0]) = 0xfafa958aeffa89fa;
++  __m128i_out = __lsx_vmini_b (__m128i_op0, -6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmini_b (__m128i_op0, 1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000adadadad;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000adadadad;
++  *((unsigned long *)&__m128i_result[1]) = 0xfbfbfbfbadadadad;
++  *((unsigned long *)&__m128i_result[0]) = 0xfbfbfbfbadadadad;
++  __m128i_out = __lsx_vmini_b (__m128i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_b (__m128i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100;
++  __m128i_out = __lsx_vmini_b (__m128i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xf1f1f1f1f1f1f1f1;
++  *((unsigned long *)&__m128i_result[0]) = 0xf1f1f1f1f1f1f1f1;
++  __m128i_out = __lsx_vmini_b (__m128i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000007500;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007e1600007d98;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000f50000000900;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000090900000998;
++  __m128i_out = __lsx_vmini_b (__m128i_op0, 9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x10f881a20ffd02b0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0xf1f181a2f1f1f1b0;
++  *((unsigned long *)&__m128i_result[0]) = 0xf1f1f1f1f180f1f1;
++  __m128i_out = __lsx_vmini_b (__m128i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff6fff6fff6fff6;
++  __m128i_out = __lsx_vmini_h (__m128i_op0, -10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1716151416151413;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1514131214131211;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff3fff3fff3fff3;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff3fff3fff3fff3;
++  __m128i_out = __lsx_vmini_h (__m128i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m128i_result[1]) = 0xfefefefefefefefe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefefefefefefefe;
++  __m128i_out = __lsx_vmini_h (__m128i_op0, 2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_h (__m128i_op0, 3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_h (__m128i_op0, 11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_h (__m128i_op0, 3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffff4fffffff4;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff4fffffff4;
++  __m128i_out = __lsx_vmini_w (__m128i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_w (__m128i_op0, 1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_w (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffff3fffffff3;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff3fffffff3;
++  __m128i_out = __lsx_vmini_w (__m128i_op0, -13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffefffef;
++  __m128i_out = __lsx_vmini_w (__m128i_op0, 0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff01fe0400000006;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000500000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff01fe0400000005;
++  __m128i_out = __lsx_vmini_w (__m128i_op0, 5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffafffffffa;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffafffffffa;
++  __m128i_out = __lsx_vmini_w (__m128i_op0, -6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000d0000000d;
++  __m128i_out = __lsx_vmini_w (__m128i_op0, 13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080;
++  __m128i_out = __lsx_vmini_w (__m128i_op0, 8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x345002920f3017d6;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffff7fffffff7;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff7fffffff7;
++  __m128i_out = __lsx_vmini_w (__m128i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, 1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x03574e3a62407e03;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1000000010000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100100000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff1;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff1;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, -15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000006;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, 6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000006;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, 6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff84fff4ff84fff4;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00a6ffceffb60052;
++  *((unsigned long *)&__m128i_result[1]) = 0xff84fff4ff84fff4;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff0;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, -16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff9;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff9;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, -7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x111110ff11111141;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111100;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, -1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x55aa55c3d5aa55c4;
++  *((unsigned long *)&__m128i_op0[0]) = 0xaa55556fd5aaaac1;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000000c;
++  *((unsigned long *)&__m128i_result[0]) = 0xaa55556fd5aaaac1;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, 12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff4;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffb;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffb;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, -5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcdcfcfcfcdc;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, 3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001030103;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffc;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, -4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000085af0000b000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00017ea200002000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff7;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, -9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffff4;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffff4;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, -12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff01e41ffff0ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xff00e400ff00e400;
++  *((unsigned long *)&__m128i_result[0]) = 0xff01e41ffff0ffff;
++  __m128i_out = __lsx_vmini_d (__m128i_op0, 14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c
+new file mode 100644
+index 000000000..6eaae2134
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmini-2.c
+@@ -0,0 +1,216 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0001ffff0001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000a163000016b0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0303000103030001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000030300000303;
++  __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd8248069ffe78077;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0d0d0d0d0d0d0d0d;
++  __m128i_out = __lsx_vmini_bu (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7da9b23a624082fd;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0505050505050505;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000005050000;
++  __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000e0000000e;
++  __m128i_out = __lsx_vmini_bu (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_bu (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000001fffdfffdff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000001fffdfffdff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010101010101;
++  __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000009c007c00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000071007600;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000009000900;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000009000900;
++  __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0303030303030303;
++  *((unsigned long *)&__m128i_result[0]) = 0x0303030303030303;
++  __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd3220000d3f20000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8bff0000a7b80000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0909000009090000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0909000009090000;
++  __m128i_out = __lsx_vmini_bu (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_hu (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x80000000b57ec564;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000083ff0be0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0014000000140014;
++  *((unsigned long *)&__m128i_result[0]) = 0x0014000000140014;
++  __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0013001300130013;
++  *((unsigned long *)&__m128i_result[0]) = 0x0013001300130013;
++  __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x02b010f881a281a2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x27b169bbb8145f50;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002;
++  __m128i_out = __lsx_vmini_hu (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000040004000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0010002000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff0000007f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000003fc00ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001fe01fe00;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000000a;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000a;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000000b;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000d3460001518a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000084300000e55f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000016;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000016;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff2356fe165486;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5efeb3165bd7653d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000007;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000007;
++  __m128i_out = __lsx_vmini_du (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vrotr-vrotri-vsra-.patch b/LoongArch-Add-tests-for-SX-vector-vrotr-vrotri-vsra-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..a46d041be2163b7b3fec57430aa0919043e062cb
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vrotr-vrotri-vsra-.patch
@@ -0,0 +1,3173 @@
+From 67c36add58d634551a200f1473be3c7368530da1 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 11:13:32 +0800
+Subject: [PATCH 090/124] LoongArch: Add tests for SX vector
+ vrotr/vrotri/vsra/vsrai/vsran/vsrani /vsrarn/vsrarni instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vrotr.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vrotri.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsra.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrai.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsran.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrani.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrar.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrari.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrarn.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrarni.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vrotr.c          | 381 +++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vrotri.c         | 294 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vsra.c           | 344 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vsrai.c          | 258 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vsran.c          | 290 +++++++++++++
+ .../loongarch/vector/lsx/lsx-vsrani.c         | 246 +++++++++++
+ .../loongarch/vector/lsx/lsx-vsrar.c          | 354 ++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vsrari.c         | 265 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vsrarn.c         | 236 +++++++++++
+ .../loongarch/vector/lsx/lsx-vsrarni.c        | 398 ++++++++++++++++++
+ 10 files changed, 3066 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c
+new file mode 100644
+index 000000000..c42440cea
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotr.c
+@@ -0,0 +1,381 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2001240128032403;
++  *((unsigned long *)&__m128i_op1[0]) = 0x288b248c00010401;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffdfffefffff7ffe;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x5);
++  *((unsigned long *)&__m128i_op0[1]) = 0x2700000000002727;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000002727;
++  *((unsigned long *)&__m128i_op1[1]) = 0x697eba2bedfa9c82;
++  *((unsigned long *)&__m128i_op1[0]) = 0xd705c77a7025c899;
++  *((unsigned long *)&__m128i_result[1]) = 0xc9c00000000009c9;
++  *((unsigned long *)&__m128i_result[0]) = 0x0013938000000000;
++  __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1000000010000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100100000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x2000000020000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200200000;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x10f917d72d3d01e4;
++  *((unsigned long *)&__m128i_op0[0]) = 0x203e16d116de012b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x10f917d72d3d01e4;
++  *((unsigned long *)&__m128i_result[0]) = 0x203e16d116de012b;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x9f009f009f009f00;
++  *((unsigned long *)&__m128i_result[0]) = 0x9f009f009f009f00;
++  __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000004fc04f81;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000004fc04f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000004fc04f81;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000004fc04f80;
++  __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000958affff995d;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000de0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001f0a;
++  __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x41dfffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff000200000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfbffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7bffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfbffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7bffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xf7ffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xf7feffffffffffff;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0ba00ba00ba00ba0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0ba00ba00ba011eb;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf1819b7c0732a6b6;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffb9917a6e7fffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x05d0ba0002e8802e;
++  *((unsigned long *)&__m128i_result[0]) = 0xd005e802174023d6;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000691a6c843c8fc;
++  *((unsigned long *)&__m128i_result[0]) = 0x000691a6918691fc;
++  __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000003f0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffc3ffff003e;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000003dffc2;
++  *((unsigned long *)&__m128i_result[1]) = 0xc000000fc0003fff;
++  *((unsigned long *)&__m128i_result[0]) = 0xbffffff0ffffc00f;
++  __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffdfffdfffdfffd;
++  *((unsigned long *)&__m128i_result[1]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m128i_result[0]) = 0xffefffefffefffef;
++  __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000001010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001010002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000010002;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080;
++  __m128i_out = __lsx_vrotr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4e3e133738bb47d2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x9c7c266e71768fa4;
++  __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001a64b345308091;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001f2f2cab1c732a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000014414104505;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1011050040004101;
++  *((unsigned long *)&__m128i_result[1]) = 0x001a323b5430048c;
++  *((unsigned long *)&__m128i_result[0]) = 0x008f792cab1cb915;
++  __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000001e03;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001a64b345308091;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001f2f2cab1c732a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000780c00000;
++  __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00020000ffff0001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000b000b000b000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000b000b000b000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000b000b000b000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x000b000b000b000b;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0005840100000005;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0005847b00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x636363633f3e47c1;
++  *((unsigned long *)&__m128i_op1[0]) = 0x41f8e080f1ef4eaa;
++  *((unsigned long *)&__m128i_result[1]) = 0xa000308000008002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0500847b00000000;
++  __m128i_out = __lsx_vrotr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c
+new file mode 100644
+index 000000000..4ae4dbf8b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vrotri.c
+@@ -0,0 +1,294 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000000020000;
++  __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0d1bffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd915e98e2d8df4d1;
++  *((unsigned long *)&__m128i_result[1]) = 0xd0b1ffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x9d519ee8d2d84f1d;
++  __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x10f917d72d3d01e4;
++  *((unsigned long *)&__m128i_op0[0]) = 0x203e16d116de012b;
++  *((unsigned long *)&__m128i_result[1]) = 0x887c8beb969e00f2;
++  *((unsigned long *)&__m128i_result[0]) = 0x101f8b680b6f8095;
++  __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x2);
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0800000008000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0800000008000000;
++  __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000c00;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffff01;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffeff400000df4;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff03fe;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe9df0000e81b;
++  __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000de00003e14;
++  *((unsigned long *)&__m128i_result[0]) = 0x00012b15ffff32ba;
++  __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000007b;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80001b155b4b0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x80001b155b4b0000;
++  __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffefffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffefffff;
++  __m128i_out = __lsx_vrotri_w (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121;
++  *((unsigned long *)&__m128i_result[1]) = 0x1111311111114111;
++  *((unsigned long *)&__m128i_result[0]) = 0x1111311111112111;
++  __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vrotri_h (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x35);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0008000000000000;
++  __m128i_out = __lsx_vrotri_d (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000007fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff800000003;
++  __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000003f0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffc3ffff003e;
++  *((unsigned long *)&__m128i_result[1]) = 0x00001f80007fff80;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe1ffff801f7f;
++  __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff0000ffff0000f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff02d060;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ff02d060;
++  __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x27b9331b8e77ead9;
++  *((unsigned long *)&__m128i_op0[0]) = 0x58d6bf1867ace738;
++  *((unsigned long *)&__m128i_result[1]) = 0xe4cc6c9edfab6639;
++  *((unsigned long *)&__m128i_result[0]) = 0x5afc6163b39ce19e;
++  __m128i_out = __lsx_vrotri_w (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x2c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_w (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_h (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_h (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vrotri_d (__m128i_op0, 0x21);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vrotri_h (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c
+new file mode 100644
+index 000000000..fd7c22a82
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsra.c
+@@ -0,0 +1,344 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1e801ffc7fc00000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00ed0008005e00a2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x007a007600150077;
++  *((unsigned long *)&__m128i_result[1]) = 0x0003000000010000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0007007f03fe0000;
++  __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffe001ffffe001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffe001ffffe001;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3fc000003fc00000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3fc000003fc00000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3fc000003fc00000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3fc000003fc00000;
++  __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00003ff000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000fffc00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x85bd6b0e94d89998;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd83c8081ffff8080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x85bd6b0e94d89998;
++  *((unsigned long *)&__m128i_result[0]) = 0xd83c8081ffff8080;
++  __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xe0d56a9774f3ea31;
++  *((unsigned long *)&__m128i_op0[0]) = 0xbddaa86803e33c2a;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe0d56a9774f3ea31;
++  *((unsigned long *)&__m128i_op1[0]) = 0xbddaa86803e33c2a;
++  *((unsigned long *)&__m128i_result[1]) = 0xff0600d50e9ef518;
++  *((unsigned long *)&__m128i_result[0]) = 0xffefffa8007c000f;
++  __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xaaaaffebcfb748e0;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfd293eab528e7ebe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffefff6fff80002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe0404041e0404041;
++  *((unsigned long *)&__m128i_op1[0]) = 0x803f800080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000700ff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000040004000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0010002000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000700ff00000000;
++  __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000820000ff81;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff810000ff81;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000820000ff81;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff810000ff81;
++  __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x800080007f008000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a0aa9890a0ac5f3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffff000;
++  __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x01203f1e3d1c3b1a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3918371635143312;
++  *((unsigned long *)&__m128i_op1[1]) = 0x21201f1e1d001b25;
++  *((unsigned long *)&__m128i_op1[0]) = 0x191817161514131d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001e8e1d8;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000e400000001;
++  __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000080008;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000fffe01fd02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000040002;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffc0ff80ff800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000c0c00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffac0a000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x801d5de0000559e0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x77eb86788eebafe1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffac00000000;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcfcfcfcfcfd;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcfcfcfc0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_result[0]) = 0x5252525252525252;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2e2b34ca59fa4c88;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3b2c8aefd44be966;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0802080408060803;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00001fffe0001fff;
++  __m128i_out = __lsx_vsra_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f8000007f800000;
++  __m128i_out = __lsx_vsra_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000047fe2f0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000047fe2f0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010012;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fec20704;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000043fe2fc;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000001fffff;
++  __m128i_out = __lsx_vsra_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c
+new file mode 100644
+index 000000000..2ca4f0b7a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrai.c
+@@ -0,0 +1,258 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x21);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001ffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ca354688;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000040400000383;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffff1fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000800000007;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffc0ffff003f;
++  __m128i_out = __lsx_vsrai_h (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x2e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf6e91c0000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x51cfd7c000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffd000700000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0014fff500000000;
++  __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x3c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3c600000ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0f180000ffe00000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21f32eaf5b7a02c8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x407c2ca32cbd0357;
++  *((unsigned long *)&__m128i_result[1]) = 0x10f917d72d3d01e4;
++  *((unsigned long *)&__m128i_result[0]) = 0x203e16d116de012b;
++  __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_w (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x01ff000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x01ff000000000000;
++  __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1268f057137a0267;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0048137ef886fae0;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000490000004d;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffffe2;
++  __m128i_out = __lsx_vsrai_w (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffffff00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ffffffffff;
++  __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffe80;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x30);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001800000039;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000049ffffffaa;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000060000000e;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000127fffffea;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x28);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0aa077b7054c9554;
++  *((unsigned long *)&__m128i_op0[0]) = 0x40c7ee1f38e4c4e8;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000ffff;
++  __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_w (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x3fff3fff3fff3fff;
++  __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000002ebf;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x31);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x31);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_w (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000190;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010058;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001000100010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00f0001000000010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x00f0001000000010;
++  __m128i_out = __lsx_vsrai_h (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrai_d (__m128i_op0, 0x3d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vsrai_h (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c
+new file mode 100644
+index 000000000..4e7c7ab7e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsran.c
+@@ -0,0 +1,290 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x007fffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000fffe0001fffe;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0303020102020001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000201;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xd82480697f678077;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0301020100000004;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff02;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3c5fffffff7fffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffefffeff00feff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000e0180000e810;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000f0080000f800;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000e0180000e810;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000f0080000f800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000f0f800;
++  __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff00000000;
++  __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100089bde;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000104000800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x80044def00000001;
++  __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000100f8100002;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff0ff8006f0f950;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000001f0a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff7a53;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000bf;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000002bb;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000021e79364;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000718ea657431b;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfefffffffeffda6f;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfefffffffeffe3d7;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ff0000ff86;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0101fe870101fe87;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0101fe8700000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x353c8cc4b1ec5b09;
++  *((unsigned long *)&__m128i_op1[1]) = 0x002affd600000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcbc2723a4f12a5f8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080808000000035;
++  __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001000000ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff80ff00ff80ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000;
++  __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff000ff6220c0c1;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffe8081000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ff000ff6220c0c1;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffe8081000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xb110606000000000;
++  __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0037ffd40083ffe5;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001e0052001ffff9;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001effae001effae;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00df020f0078007f;
++  __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff80ffa2fff0ff74;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff76ffd8ffe6ffaa;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffc105d1aa;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffbc19ecca;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffe03ff63ff9bf;
++  __m128i_out = __lsx_vsran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x06d9090909090909;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0039d21e3229d4e8;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6d339b4f3b439885;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000db24848;
++  __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfe3bfb01fe3bfe01;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe03fe3ffe01fa21;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vsran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c
+new file mode 100644
+index 000000000..92988035d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrani.c
+@@ -0,0 +1,246 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000b0000000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000201000000000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0005000501800005;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x438ff81ff81ff820;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x03ff03ff03ff03ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000043;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003;
++  __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x78);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000002020202;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x5b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000009;
++  *((unsigned long *)&__m128i_op1[1]) = 0x697eba2bedfa9c82;
++  *((unsigned long *)&__m128i_op1[0]) = 0xd705c77a7025c899;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x03fdfffcfefe03fe;
++  __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00ffffff00ff00ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff00ffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000010001000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff00ff00ffffff;
++  __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x40f0001000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x40f0001000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1e0200001e020000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0800080008000800;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0800080008000800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0040004000400040;
++  __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000040000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001ffce00016fb41;
++  *((unsigned long *)&__m128i_op0[0]) = 0x57cb857100001a46;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfbffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7bffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000150000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffeffff001effff;
++  __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_wu (__m128i_op0, 0x1);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2020202020207fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x01010101010101ff;
++  __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff082f000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003f000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff000000000000;
++  __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00005dcbe7e830c0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x03f21e0114bf19da;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000003f200001e01;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000014bf000019da;
++  *((unsigned long *)&__m128i_result[1]) = 0x0005fe0300010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100010001;
++  __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x62cbf96e4acfaf40;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf0bc9a5278285a4a;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x62cbf96e4acfaf40;
++  __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x40);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffacdb6dbecac;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1f5533a694f902c0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffb6d01f5f94f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001f50000;
++  __m128i_out = __lsx_vsrani_h_w (__m128i_op0, __m128i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x808080e280808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080636380806363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080808080638063;
++  __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x63);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0f07697100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000076971000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000003020302;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff81;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000c0c00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe;
++  __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrani_d_q (__m128i_op0, __m128i_op1, 0x58);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrani_b_h (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5847b72626ce61ef;
++  *((unsigned long *)&__m128i_op0[0]) = 0x110053f401e7cced;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5847b72626ce61ef;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0005847b00011005;
++  *((unsigned long *)&__m128i_result[0]) = 0x0005847b00000000;
++  __m128i_out = __lsx_vsrani_w_d (__m128i_op0, __m128i_op1, 0x2c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c
+new file mode 100644
+index 000000000..6a842d9ce
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrar.c
+@@ -0,0 +1,354 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff02ff1bff02ff23;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffff02fff4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001300000013;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xff00ff00ff00ff00;
++  __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000400000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffefefe6a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c2bac2c2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000c2bac2c2;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000010000003f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000010000003f;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x4f804f804f804f80;
++  __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x80010001b57fc565;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8001000184000be0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x80010001b57fc565;
++  *((unsigned long *)&__m128i_result[0]) = 0x8001000184000be0;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0bd80bd80bdfffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0bd80bd80bd80000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000006f00001f0a;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000958affff995d;
++  __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc0fffff000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000bf;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000002bb;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xc0fffff000000000;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffb96bffff57c9;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff6080ffff4417;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffb96bffff57c9;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff6080ffff4417;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3fbf3fbf00007fff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000003a0000003a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000003a0000003a;
++  __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0086000000040000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0082000000000007;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0086000000040000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0082000000000007;
++  __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x467f6080467d607f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0037ffc8d7ff2800;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[1]) = 0x001bffe4ebff9400;
++  *((unsigned long *)&__m128i_result[0]) = 0xff80000000000000;
++  __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2a29282726252423;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2221201f1e1d1c1b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2a29282726252423;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2221201f1e1d1c1b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000005452505;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000004442403e4;
++  __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0100010001000100;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002;
++  __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000c0c00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffc00000ff800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7ffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7ffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffe4866c86;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe4866c86;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000002000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000002000000;
++  __m128i_out = __lsx_vsrar_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1748c4f9ed1a5870;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1748c4f9ed1a5870;
++  __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x680485c8b304b019;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc89d7f0ff90da019;
++  *((unsigned long *)&__m128i_op1[1]) = 0x680485c8b304b019;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc89d7f0ff90da019;
++  *((unsigned long *)&__m128i_result[1]) = 0x00680486ffffffda;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff913bfffffffd;
++  __m128i_out = __lsx_vsrar_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrar_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c
+new file mode 100644
+index 000000000..2a353d65a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrari.c
+@@ -0,0 +1,265 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_h (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000cb4a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000f909;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf4b6f3f52f4ef4a8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff5fff4002ffff5;
++  __m128i_out = __lsx_vsrari_h (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffc0ff81000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff0ffe04000;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000000f3;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000f3;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000958affff995d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000fdfc0000fd03;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000017161515;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000095141311;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x34);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e19181716;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000109000000c9;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x77c0404a4000403a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x77c03fd640003fc6;
++  *((unsigned long *)&__m128i_result[1]) = 0x00f0008100800080;
++  *((unsigned long *)&__m128i_result[0]) = 0x00f0008000800080;
++  __m128i_out = __lsx_vsrari_h (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000006c80031;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x3c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000a6;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_h (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001200100012001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000080000000800;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000404040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x30eb020302101b03;
++  *((unsigned long *)&__m128i_op0[0]) = 0x020310d0c0030220;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x004d004d004d004d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x004d004d004d004d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001340134013401;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001340134013401;
++  __m128i_out = __lsx_vsrari_d (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_w (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrari_h (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c
+new file mode 100644
+index 000000000..60d474203
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarn.c
+@@ -0,0 +1,236 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffefffffffef;
++  __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffff1;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefff6fff80002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff000000fefb0000;
++  __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000200;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000c2f90000bafa;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000c2fa8000c2fa;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xc2f9bafac2fac2fa;
++  __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0204;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x01203f1e3d1c3b1a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3918371635143312;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000001d5d4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000150d707009;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x03f1e3d28b1a8a1a;
++  __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffefffefffeffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffefffefffeffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff7f810100001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001fffc0ffffe001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000002259662;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc4dbe60354005d25;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f01000000f8ff00;
++  __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff6ff4ffff8db8;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffbaf4ffffb805;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9c7c266e71768fa4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff4ffb800ff0080;
++  __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000044470000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00004dce00004700;
++  __m128i_out = __lsx_vsrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0b4c600000000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x08080807f5f5f5f8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0202f5f80000ff00;
++  __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0d060d060d060d06;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0d060d060d060d06;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0d060d060d060d06;
++  __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffee;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff040;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff01fe03ff01fe03;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff01fe03ff01fe03;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff01fe03ff01fe03;
++  __m128i_out = __lsx_vsrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c
+new file mode 100644
+index 000000000..3aa23bdc8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrarni.c
+@@ -0,0 +1,398 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ff020000fff4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff020000fff4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fc0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000080007f80800;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001000000;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff0000ff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x4b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000001e5;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x5000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff80000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff8000002f4ef4a8;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000f4a8;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00100184017e0032;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0086018c01360164;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffff33c4b1e67;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000800c0004300c;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000e0000000e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x66);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4101010141010100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000001ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0020808100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x29);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x64);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001ffff0003ffff0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x028c026bfff027af;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000003fc03fc00;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffc00a3009b000;
++  __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ffa7f8ff81;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000003f0080ffc0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000007fff00ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000a7f87fffff81;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffd400000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000004000000040;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003f800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003f800000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000080003f80ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000001fc00000000;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff80010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff80010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0bd80bd80bdfffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0bd80bd80bd80000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1ffffffff8001000;
++  *((unsigned long *)&__m128i_result[0]) = 0xf0bd80bd80bd8000;
++  __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x24);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xecec006c00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xecec006c00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff007f00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff007f00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001ff85ffdc0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000332ae5d97330;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1ff85ffe2ae5d973;
++  __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000043c5ea7b6;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000008fc4ef7b4;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000fea0000fffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x48);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000dfa6e0c6;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x64);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x09e8e9012fded7fd;
++  *((unsigned long *)&__m128i_op0[0]) = 0x479f64b03373df61;
++  *((unsigned long *)&__m128i_op1[1]) = 0x04c0044a0400043a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x04c004d6040004c6;
++  *((unsigned long *)&__m128i_result[1]) = 0x1d20db00ec967bec;
++  *((unsigned long *)&__m128i_result[0]) = 0x00890087009b0099;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000080800000808;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000080800000808;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080000180800001;
++  __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000003e;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00fe00fe000200fe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe000200fe;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000003e;
++  *((unsigned long *)&__m128i_result[0]) = 0xfefe02fefefe02fe;
++  __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1000000010000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0103000201030002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x26);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffc000400000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00003fff00010000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x6d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff010000ff01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_result[1]) = 0xf359f359f359f359;
++  *((unsigned long *)&__m128i_result[0]) = 0xf359f359f359f359;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000016;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_b_h (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x01533b5e7489ae24;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffab7e71e33848;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xce9135c49ffff570;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_d_q (__m128i_op0, __m128i_op1, 0x23);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000807bf0a1f80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000800ecedee68;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0005840100000005;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0005847b00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001f0a20001cedf;
++  *((unsigned long *)&__m128i_result[0]) = 0x0058000000580000;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffb1fb1000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf2c97aaa7d8fa270;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0b73e427f7cfcb88;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrarni_w_d (__m128i_op0, __m128i_op1, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0a545374471b7070;
++  *((unsigned long *)&__m128i_op0[0]) = 0x274f4f0648145f50;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_result[1]) = 0xa8a736e19e9e28bf;
++  *((unsigned long *)&__m128i_result[0]) = 0x9e9f9e9f9e9f9e9f;
++  __m128i_out = __lsx_vsrarni_h_w (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vsll-vslli-vsrl-vs.patch b/LoongArch-Add-tests-for-SX-vector-vsll-vslli-vsrl-vs.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e0e8d11e59b8ec0056f56f7988ad005681a242b5
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vsll-vslli-vsrl-vs.patch
@@ -0,0 +1,4023 @@
+From 64d3c9507fdf2829659affdb7d0490e7b2888787 Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 10:55:35 +0800
+Subject: [PATCH 089/124] LoongArch: Add tests for SX vector
+ vsll/vslli/vsrl/vsrli/vsrln/vsrlni/vsrlr /vsrlri/vslrlrn/vsrlrni
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vsll.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vslli.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrl.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrli.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrln.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrlni.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrlr.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrlri.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vsll.c           | 254 +++++++
+ .../loongarch/vector/lsx/lsx-vslli.c          | 293 ++++++++
+ .../loongarch/vector/lsx/lsx-vsllwil-1.c      | 244 +++++++
+ .../loongarch/vector/lsx/lsx-vsllwil-2.c      | 189 +++++
+ .../loongarch/vector/lsx/lsx-vsrl.c           | 389 ++++++++++
+ .../loongarch/vector/lsx/lsx-vsrli.c          | 328 +++++++++
+ .../loongarch/vector/lsx/lsx-vsrln.c          | 335 +++++++++
+ .../loongarch/vector/lsx/lsx-vsrlni.c         | 281 +++++++
+ .../loongarch/vector/lsx/lsx-vsrlr.c          | 434 +++++++++++
+ .../loongarch/vector/lsx/lsx-vsrlri.c         | 300 ++++++++
+ .../loongarch/vector/lsx/lsx-vsrlrn.c         | 164 +++++
+ .../loongarch/vector/lsx/lsx-vsrlrni.c        | 686 ++++++++++++++++++
+ 12 files changed, 3897 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c
+new file mode 100644
+index 000000000..7b8ad7d5a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsll.c
+@@ -0,0 +1,254 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1dcc4255c9d85c05;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3ab7a3fc47a5c31a;
++  *((unsigned long *)&__m128i_result[1]) = 0xb9884ab93b0b80a0;
++  *((unsigned long *)&__m128i_result[0]) = 0xf11e970c68000000;
++  __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0100000100010001;
++  __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00307028003f80b0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0040007fff800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffc0ffffff81;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff008000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0060e050007f0160;
++  *((unsigned long *)&__m128i_result[0]) = 0x0040007fff800000;
++  __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000401000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000401000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3fffffff80000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00003ffd000a4000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffcffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000fffd000a0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xf000800080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000a00028004000;
++  __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6b9fe3649c9d6363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363bc9e8b696363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6b9fe3649c9d6363;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363bc9e8b696363;
++  *((unsigned long *)&__m128i_result[1]) = 0xb9fe3640e4eb1b18;
++  *((unsigned long *)&__m128i_result[0]) = 0x800000005b4b1b18;
++  __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80001b155b4b0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00006c82;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00009b140000917b;
++  *((unsigned long *)&__m128i_result[1]) = 0x80000000fffffffc;
++  *((unsigned long *)&__m128i_result[0]) = 0xb150000000000000;
++  __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff7e00000081;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000008000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x03f1e3d28b1a8a1a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x03f1e3d28b1a8a1a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x18e2184858682868;
++  __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff02d060;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff02d060;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff02d06000000000;
++  __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100010001;
++  __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000;
++  __m128i_out = __lsx_vsll_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000200000001c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000200000001c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000200000001c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000200000001c;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000020000000c0;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000020000000c0;
++  __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsll_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c
+new file mode 100644
+index 000000000..7a77e80c0
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vslli.c
+@@ -0,0 +1,293 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_d (__m128i_op0, 0x35);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_w (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xaaaaffebcfb748e0;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfd293eab528e7ebe;
++  *((unsigned long *)&__m128i_result[1]) = 0xf6e91c0000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x51cfd7c000000000;
++  __m128i_out = __lsx_vslli_d (__m128i_op0, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffff0ffe04000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_d (__m128i_op0, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffcfffcfffcfffc;
++  __m128i_out = __lsx_vslli_h (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc39fffff007fffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00fd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0e7ffffc01fffffc;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000003f803f4;
++  __m128i_out = __lsx_vslli_w (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vslli_h (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000040;
++  __m128i_out = __lsx_vslli_h (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_w (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_d (__m128i_op0, 0x3c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff00ffff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_result[1]) = 0xfcfcfc00fcfc00fc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcfcfcfcfc00;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000060;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_d (__m128i_op0, 0x38);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_w (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000f00f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000f00f;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000060000000;
++  __m128i_out = __lsx_vslli_w (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x61608654a2d4f6da;
++  *((unsigned long *)&__m128i_result[1]) = 0xfee0000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xc2c00ca844a8ecb4;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_h (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0100000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0100000000000000;
++  __m128i_out = __lsx_vslli_d (__m128i_op0, 0x36);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_w (__m128i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_d (__m128i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_result[0]) = 0xf0003000f0003000;
++  __m128i_out = __lsx_vslli_h (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_w (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000;
++  __m128i_out = __lsx_vslli_w (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff800fff01;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff001ffe02;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_d (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd78cfd70b5f65d76;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5779108fdedda7e4;
++  *((unsigned long *)&__m128i_result[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_result[0]) = 0xc8847ef6ed3f2000;
++  __m128i_out = __lsx_vslli_d (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffff7fffffff7;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffff7fffffff7;
++  *((unsigned long *)&__m128i_result[1]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_result[0]) = 0xfcfcfcdcfcfcfcdc;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xc0c0c0c0c0c0c0c0;
++  *((unsigned long *)&__m128i_result[0]) = 0xc0c0c0c0c0c0c0c0;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_h (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xe2560afe9c001a18;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe2560afe9c001a18;
++  *((unsigned long *)&__m128i_result[1]) = 0x89582bf870006860;
++  *((unsigned long *)&__m128i_result[0]) = 0x89582bf870006860;
++  __m128i_out = __lsx_vslli_w (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x841f000fc28f801f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x107c003c083c007c;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff9727ffff9727;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffe79ffffba5f;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff972700000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffba5f00000000;
++  __m128i_out = __lsx_vslli_d (__m128i_op0, 0x20);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x101b0330eb022002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x030220020310edc0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0080800080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000080008000;
++  __m128i_out = __lsx_vslli_b (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x317fce80317fce80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xf0000000f0000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vslli_h (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0177fff0fffffff0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000011ff8bc;
++  *((unsigned long *)&__m128i_result[1]) = 0x05dfffc3ffffffc0;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000047fe2f0;
++  __m128i_out = __lsx_vslli_d (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c
+new file mode 100644
+index 000000000..796e88cad
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-1.c
+@@ -0,0 +1,244 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0020002000200020;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002000000020;
++  __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000e0000000e0;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000fc00;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000fc00;
++  __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffeb48e03eab7ebe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffc0fac01200f800;
++  *((unsigned long *)&__m128i_result[0]) = 0x0f80eac01f80ef80;
++  __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000e7e20468;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc2fac2fa53e7db29;
++  *((unsigned long *)&__m128i_result[1]) = 0xff84fff4ff84fff4;
++  *((unsigned long *)&__m128i_result[0]) = 0x00a6ffceffb60052;
++  __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x002e0059003b0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000005c000000b2;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000007600000000;
++  __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2e34594c3b000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x017001a002c80260;
++  *((unsigned long *)&__m128i_result[0]) = 0x01d8000000000000;
++  __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f4f4f;
++  *((unsigned long *)&__m128i_result[1]) = 0x09e009e009e009e0;
++  *((unsigned long *)&__m128i_result[0]) = 0x09e009e009e009e0;
++  __m128i_out = __lsx_vsllwil_h_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000040000000400;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000005050000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0505000005050505;
++  *((unsigned long *)&__m128i_result[1]) = 0x0028280000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0028280000282800;
++  __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffff800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffc0000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffc0000000000000;
++  __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff00ffffff00;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff00ffffff00;
++  __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf10cf508f904fd01;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf10cf508f904fd01;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffe218ffffea10;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02;
++  __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21201f1e1d001b1a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1918171615141312;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001918000017160;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001514000013120;
++  __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffff60ca7104649;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff790a15db63d;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffc00ffde4000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfe857400fed8f400;
++  __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1c6c80007fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0038d800ff000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00fffe00fffffe00;
++  __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff800000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff800000000000;
++  __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsllwil_w_h (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000007fff800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x007fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff80ff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff80000000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000001fffe;
++  __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000040004000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010002000000000;
++  __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000017fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x003fffffff800000;
++  __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x57f160c4a1750eda;
++  *((unsigned long *)&__m128i_result[1]) = 0x000002bf8b062000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffd0ba876d000;
++  __m128i_out = __lsx_vsllwil_d_w (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c
+new file mode 100644
+index 000000000..5f46293dc
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsllwil-2.c
+@@ -0,0 +1,189 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f7f02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00003f803f800100;
++  __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0014000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f807f807f807f80;
++  __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001030103;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0020006000200060;
++  __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0808080808080805;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080808080805;
++  *((unsigned long *)&__m128i_result[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0020002000200014;
++  __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe0001fffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000201fe01fc;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000201fe01fc;
++  __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff1affff01001fe0;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff1aff6d02834d70;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f800d007f803680;
++  *((unsigned long *)&__m128i_result[0]) = 0x0100418026803800;
++  __m128i_out = __lsx_vsllwil_hu_bu (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3e2b34ca59fa4c88;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3b2c8aefd44be966;
++  *((unsigned long *)&__m128i_result[1]) = 0x0007658000115de0;
++  *((unsigned long *)&__m128i_result[0]) = 0x001a8960001d2cc0;
++  __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffff000000ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ff00;
++  __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000040600000406;
++  *((unsigned long *)&__m128i_op0[0]) = 0x020202020202fe02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0020200000202000;
++  *((unsigned long *)&__m128i_result[0]) = 0x002020000fe02000;
++  __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000001ffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002;
++  __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3131313131313131;
++  *((unsigned long *)&__m128i_result[1]) = 0x0313100003131000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0313100003131000;
++  __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000900000009;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000900000009;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000090;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000090;
++  __m128i_out = __lsx_vsllwil_wu_hu (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000020000007d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000001f400000;
++  __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000280000;
++  __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000fef01000e27ca;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001fde020000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001c4f940000;
++  __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ffffffff00;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ffffffff00;
++  __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff81010102;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000fffffffe000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000102020204000;
++  __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000008000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0);
++  *((unsigned long *)&__m128i_op0[1]) = 0x8d78336c83652b86;
++  *((unsigned long *)&__m128i_op0[0]) = 0x39c51f389c0d6112;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001ce28f9c0;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000004e06b0890;
++  __m128i_out = __lsx_vsllwil_du_wu (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c
+new file mode 100644
+index 000000000..f9c789855
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrl.c
+@@ -0,0 +1,389 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffefffffffef;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffefffffffef;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101010100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000005555555554;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000005555555554;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001000f000e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000fff1000ffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002a55005501;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000002a55000001;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x80000000fff8fff8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff80000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f800000fff8fff8;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f800000fff80000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x80000000fff80000;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0004000000040000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0004000000040000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff6fff6fff6fff6;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000750500006541;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000100fffffefd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00f900d7003d00e4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003e00d100de002b;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f4000007f040000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f0200007f020000;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe000000f6;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x01010101ffffff00;
++  *((unsigned long *)&__m128i_result[0]) = 0x01010101000000f6;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000049000000c0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffff29;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffff7f00ff00ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff007f0101017f;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff2900000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000401000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff2900000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc2f9bafac2fac2fa;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101080408040804;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0804080407040804;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000010a000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0101080408040804;
++  *((unsigned long *)&__m128i_result[0]) = 0x000100810080e081;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4688500046f6a000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f8000004f7fff02;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ffffff03ffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00013fff;
++  __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000021ffffffdf;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000e60;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0202fe02fd020102;
++  *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_result[0]) = 0x0400040004000400;
++  __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101fe870101fe87;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101fe8700000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x61608654a2d4f6da;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000fb01;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000007000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002000000000007;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000fb01;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000e0000;
++  __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff0000ff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ff0000ff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000000000;
++  __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff000100ff00fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff003000ff00a0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff000100ff00fe;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff003000ff00a0;
++  __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5d7f5d807fea807f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100010100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffe0000000;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000ff00ff;
++  __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe7fffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000001fd02;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffe1fffffff;
++  __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000900000009;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff7fffffff7f;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff007fff810001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000400530050ffa6;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff800fff01;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000007ff000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000f3040705;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000f3040705;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000056f64adb9464;
++  *((unsigned long *)&__m128i_op1[0]) = 0x29ca096f235819c2;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000004399d32;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffffffffffff;
++  __m128i_out = __lsx_vsrl_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrl_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c
+new file mode 100644
+index 000000000..7b5e9a7bf
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrli.c
+@@ -0,0 +1,328 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001ffff0001ffff;
++  __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000020000000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000100000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000080000;
++  __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000017f0a82;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x03ff03ff03ff03ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000400000004000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000400000204010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000020000000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000020000010200;
++  __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000003fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000003fffffff;
++  __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x37);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0020002000200020;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0020002000200020;
++  __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffefffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffefffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0007000700070007;
++  *((unsigned long *)&__m128i_result[0]) = 0x0007000700070007;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000c000c000c000c;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000003d0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000003d0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000030000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000030000;
++  __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000010000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00fe00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x3d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001000100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100;
++  __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000400000000;
++  __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xaa14efac3bb62636;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd6c22c8353a80d2c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000300000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003000000010000;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3fff3fff3fff3fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0080000700000014;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffbffda;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001010101;
++  __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffefffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x000001fffdfffdff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000001fffdfffdff;
++  __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_w (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd;
++  *((unsigned long *)&__m128i_result[1]) = 0x001a64b345308091;
++  *((unsigned long *)&__m128i_result[0]) = 0x001f2f2cab1c732a;
++  __m128i_out = __lsx_vsrli_d (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000290;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000290;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002;
++  __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00020000ffff0001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000003030000;
++  __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000002345454;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0dec4ca;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000060006;
++  __m128i_out = __lsx_vsrli_h (__m128i_op0, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000200000000d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_b (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000003e0000003f;
++  __m128i_out = __lsx_vsrli_w (__m128i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrli_d (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c
+new file mode 100644
+index 000000000..5a8f4f70a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrln.c
+@@ -0,0 +1,335 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000c77c000047cd;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000c0f100006549;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffdfff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffdfff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffe00001ffe200;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffdfff;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff35cab978;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff35cab978;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000010035;
++  __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000020;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x80307028ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8040007fffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0101ff010101;
++  __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000001000100;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4180418041804180;
++  __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001ffff0003ffff0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000fffefffefffef;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00000000;
++  __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00008bf700017052;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000f841000091aa;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe6d4572c8a5835bc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe5017c2ac9ca9fd0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000f8410000;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff0000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001010001;
++  __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000100000001000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0ed5ced7e51023e5;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00001000e51023e5;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffbfff8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000010001;
++  __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000020002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000017ffeffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000017ffeffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x379674c000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3789f68000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfefeff00fefeff00;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfefeff00fefeff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00c0000000800000;
++  __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e71768fa4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000071768fa4;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffdfdc0d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffdfdc0d;
++  __m128i_out = __lsx_vsrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000002427c2ee;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c
+new file mode 100644
+index 000000000..ca462c834
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlni.c
+@@ -0,0 +1,281 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc7fc00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00003fe00ffe3fe0;
++  __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff00ff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000001f;
++  __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x7b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc39fffff007fffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000fe00fd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x78c00000ff000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x61cf003f0000007f;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000003c607f80;
++  __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff7f01ff01;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff7f01ff01;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffe03;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffe03;
++  __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff8001ffff8001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x001ffff0003ffff0;
++  *((unsigned long *)&__m128i_result[0]) = 0x000fffefffefffef;
++  __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x4b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363797c63990099;
++  *((unsigned long *)&__m128i_op0[0]) = 0x171f0a1f6376441f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363797c63990099;
++  *((unsigned long *)&__m128i_op1[0]) = 0x171f0a1f6376441f;
++  *((unsigned long *)&__m128i_result[1]) = 0x181e180005021811;
++  *((unsigned long *)&__m128i_result[0]) = 0x181e180005021811;
++  __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00003fff00003fff;
++  __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf0fd800080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000a00028004000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000f000800000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x000f000000000000;
++  __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xaeaeaeaeaeaeae35;
++  *((unsigned long *)&__m128i_op0[0]) = 0xaeaeaeaeaeaeae35;
++  *((unsigned long *)&__m128i_op1[1]) = 0xaeaeaeaeaeaeae35;
++  *((unsigned long *)&__m128i_op1[0]) = 0xaeaeaeaeaeaeae35;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000002;
++  __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x3e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00000000;
++  __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000008140c80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000008140c80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000002050320;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000002050320;
++  __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000002050320;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000002050320;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[0]) = 0x010101017f010101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000040600000406;
++  *((unsigned long *)&__m128i_result[0]) = 0x020202020202fe02;
++  __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe364525335ede000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000fff00000e36;
++  __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x34);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x601fbfbeffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffb00fdfdf7ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff8000000000000;
++  __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsrlni_h_w (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000455555555;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000008;
++  __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlni_w_d (__m128i_op0, __m128i_op1, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7c7c000000007176;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000f3040705;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000001f1f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x32);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000bffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000040001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x6d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xe4c8b96e2560afe9;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc001a1867fffa207;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe4c8b96e2560afe9;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc001a1867fffa207;
++  *((unsigned long *)&__m128i_result[1]) = 0xe2560afe9c001a18;
++  *((unsigned long *)&__m128i_result[0]) = 0xe2560afe9c001a18;
++  __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x24);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000042ab41;
++  *((unsigned long *)&__m128i_op0[0]) = 0xb1b1b1b1b16f0670;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000044470000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000100;
++  __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x020310edc003023d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000080c43b700;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x56);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20;
++  *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x30eb022002101b20;
++  *((unsigned long *)&__m128i_op1[0]) = 0x020310edc003023d;
++  *((unsigned long *)&__m128i_result[1]) = 0x022002101b200203;
++  *((unsigned long *)&__m128i_result[0]) = 0x022002101b200203;
++  __m128i_out = __lsx_vsrlni_d_q (__m128i_op0, __m128i_op1, 0x30);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlni_b_h (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c
+new file mode 100644
+index 000000000..211339bb8
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlr.c
+@@ -0,0 +1,434 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x43e092728266beba;
++  *((unsigned long *)&__m128i_op1[0]) = 0x43d8969cc4afbf2d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f8000007f800000;
++  __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffc001fffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffff8000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010000200020002;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffff0ffe04000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000200010;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff01ff01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000101fd01fe;
++  *((unsigned long *)&__m128i_result[1]) = 0xff80ff80ff80ff80;
++  *((unsigned long *)&__m128i_result[0]) = 0xff80ff8080008000;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff51cf8da;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffd6040188;
++  *((unsigned long *)&__m128i_result[1]) = 0x00020002000d0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000020f2300ee;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007f8000007f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000003fc;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000003fc;
++  __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000006;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0040000000400000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0040000000400000;
++  __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0020808100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffe218ffffea10;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff208fffffa02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffe218ffffea10;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffff208fffffa02;
++  __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x111110ff11111141;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000f00f;
++  *((unsigned long *)&__m128i_result[1]) = 0x111110ff11111141;
++  *((unsigned long *)&__m128i_result[0]) = 0x1111113111111100;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000003fbf3fbf;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7ff8;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000200000100;
++  __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op0[0]) = 0x370bdfeca2eb9931;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00d3007c014e00bd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200020002;
++  *((unsigned long *)&__m128i_result[0]) = 0x06e1000e00030005;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0202020202020202;
++  *((unsigned long *)&__m128i_op0[0]) = 0x363d753d50155c0a;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe500c085c000c005;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe5c1a185c48004c5;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002020002020200;
++  *((unsigned long *)&__m128i_result[0]) = 0x021f3b0205150600;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffe000ffdf;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000200000002000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffe000ffdf;
++  __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffe080f6efc100f7;
++  *((unsigned long *)&__m128i_op1[0]) = 0xefd32176ffe100f7;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000040000000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000040000000000;
++  __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffdfe01;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffdfe0200000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4000000000000000;
++  __m128i_out = __lsx_vsrlr_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd70b30c96ea9f4e8;
++  *((unsigned long *)&__m128i_op0[0]) = 0xa352bfac9269e0aa;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xd70b30c96ea9f4e8;
++  *((unsigned long *)&__m128i_result[0]) = 0xa352bfac9269e0aa;
++  __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000005;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000158;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00009c7c00007176;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000001fffeff98;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0014ffe4ff76ffc4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000010;
++  __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4399d3221a29d3f2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x084d1a0907151a3d;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff9fffefff9ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0280000000000000;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0700f8ff0700f8ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0700f8ff0700f8ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3bc000003a800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000000900;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000090a00000998;
++  *((unsigned long *)&__m128i_result[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000ef0000000003b;
++  __m128i_out = __lsx_vsrlr_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0005847b00011005;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0005847b00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000807bf0a1f80;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000800ecedee68;
++  *((unsigned long *)&__m128i_result[1]) = 0x0005840100000005;
++  *((unsigned long *)&__m128i_result[0]) = 0x0005847b00000000;
++  __m128i_out = __lsx_vsrlr_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00c2758000bccf42;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00a975be00accf03;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00250023001c001d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x309d2f342a5d2b34;
++  *((unsigned long *)&__m128i_result[1]) = 0x00060eb000000006;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000075c00000cf0;
++  __m128i_out = __lsx_vsrlr_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c
+new file mode 100644
+index 000000000..2c3a53416
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlri.c
+@@ -0,0 +1,300 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000800000000000;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0005252800052528;
++  *((unsigned long *)&__m128i_result[0]) = 0x0005252800052528;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0200020002000200;
++  *((unsigned long *)&__m128i_result[0]) = 0x0200020002000200;
++  __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffc001fffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000200000;
++  *((unsigned long *)&__m128i_result[0]) = 0x001fff8004000000;
++  __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0010001000030000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00060001fffe8003;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000200010;
++  __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000078c00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000078c00000;
++  __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x4000400000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000040004000;
++  __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001800390049ffaa;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0029ff96005cff88;
++  *((unsigned long *)&__m128i_result[1]) = 0x001800390049ffaa;
++  *((unsigned long *)&__m128i_result[0]) = 0x0029ff96005cff88;
++  __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x28);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x03c0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x03c0038000000380;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x28);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xa2a2a2a3a2a2a2a3;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc605c000aedd0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000005151515;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000006302e00;
++  __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2000200000013fa0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000013fa0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000020000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000020000000;
++  __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x23);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_op0[0]) = 0x370bdfecffecffec;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000dc300003ffb;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000dc300003ffb;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808000000035;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000200000000;
++  __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00018d8e00018d8e;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f801fe000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3fc03fc000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000003fc00ff00;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001fe01fe00;
++  __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002;
++  __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000;
++  __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x045340a628404044;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001030103;
++  __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlri_h (__m128i_op0, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x86dd8341b164f12b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9611c3985b3159f5;
++  *((unsigned long *)&__m128i_result[1]) = 0x0021b761002c593c;
++  *((unsigned long *)&__m128i_result[0]) = 0x002584710016cc56;
++  __m128i_out = __lsx_vsrlri_w (__m128i_op0, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_op0[0]) = 0xbbc8ecc5f3ced5f3;
++  *((unsigned long *)&__m128i_result[1]) = 0xc0b4d1a5f8babad3;
++  *((unsigned long *)&__m128i_result[0]) = 0xbbc8ecc5f3ced5f3;
++  __m128i_out = __lsx_vsrlri_d (__m128i_op0, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000feff23560000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fd1654860000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000080801030000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000080103040000;
++  __m128i_out = __lsx_vsrlri_b (__m128i_op0, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c
+new file mode 100644
+index 000000000..c630b4261
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrn.c
+@@ -0,0 +1,164 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001ffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001ffff0001ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000efffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000040400000383;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffe000ffff1fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000383ffff1fff;
++  __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000003fc;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000003fc;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x002affd600000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xcbc2723a4f12a5f8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffd60001723aa5f8;
++  __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x467f6080467d607f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808081;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xe000e0006080b040;
++  __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101010101030101;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010101030101;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000fffa0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffa0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101000101010001;
++  __m128i_out = __lsx_vsrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff80ffffffffff80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff80ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6a5d5b056f2f4978;
++  *((unsigned long *)&__m128i_op1[0]) = 0x17483c07141b5971;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0800010001ff8000;
++  __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff01ff01ac025c87;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff01ff01ac465ca1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c
+new file mode 100644
+index 000000000..468a17c15
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vsrlrni.c
+@@ -0,0 +1,686 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff8969ffffd7e2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000d688ffffbd95;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xf12dfafc1ad1f7b3;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x34);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000200000002000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000200000002000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010000000100;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x2f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000c0002000c0002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000400c600700153;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000c0002000c0002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000400c600700153;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000010000007f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000fffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0800000400000800;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001515151500;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001515151500;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001515000015150;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fdfd0404;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3fffffff3fffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3fffffff3fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000fc08;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000800080008000;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000fc08;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000800080008000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffba420000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x000007e044000400;
++  *((unsigned long *)&__m128i_result[0]) = 0xfdd2100000000000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000081e003f3f3f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3f3f3f0e00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000081e003f3f3f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3f3f3f0e00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000103c007e7e8;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000103c007e7e8;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x43);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0202022302023212;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0202ff3f02022212;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000002100003010;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ff3f00002010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x79);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffff7fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe2bb5ff00e20aceb;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe2bb5ff00e20aceb;
++  *((unsigned long *)&__m128i_result[1]) = 0x0100010000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00e3000e00e3000e;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf58df7841423142a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3f7477f8ff4e2152;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3d3e0505101e4008;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2bd5d429e34a1efb;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfc0203fccbedbba7;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc9f66947f077afd0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x89fed7c07fdf5d00;
++  *((unsigned long *)&__m128i_result[1]) = 0x14f1a50ffe65f6de;
++  *((unsigned long *)&__m128i_result[0]) = 0xa3f83bd8e03fefaf;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6ed694e00e0355db;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000010600000106;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xe00e035606000001;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xe739e7ade77ae725;
++  *((unsigned long *)&__m128i_op0[0]) = 0xbb9013bd049bc9ec;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x56aca41400000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7ade77ae3bd049bd;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000041400000;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1010101010101010;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1010101010101010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8081808180818081;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000006ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0037f80000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x69);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0020202020202020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0080808080c04040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0101010001808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000202000008081;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001010100010101;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x28);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00fff00000001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x28);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x6b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000adf0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001e00;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0040000000400040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000020002020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808102;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001010102;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001000100010000b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x03fc03fc03fc03fc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x04000400ff01ff01;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1010101010101010;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000fff800000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000001ed68;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1ff6a09e667f3bd8;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000007b5a;
++  *((unsigned long *)&__m128i_result[0]) = 0x999fcef600000000;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffe5c8000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x91f80badc162a0c4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x99d1ffff0101ff01;
++  *((unsigned long *)&__m128i_result[1]) = 0x00ff400000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x905d0b06cf0008f8;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3802f4fd025800f7;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc8ff0bffff00ffae;
++  *((unsigned long *)&__m128i_op1[0]) = 0x91ff40fffff8ff50;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000200000000700;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000192000001240;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x33);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff0ffd0ffd;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff0ffc0001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbb7743ca4c78461f;
++  *((unsigned long *)&__m128i_op1[0]) = 0xd9743eb5fb4deb3a;
++  *((unsigned long *)&__m128i_result[1]) = 0x003fffffffc3ff44;
++  *((unsigned long *)&__m128i_result[0]) = 0x002eddd0f2931e12;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x4a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbb7743ca4c78461f;
++  *((unsigned long *)&__m128i_op0[0]) = 0xd9743eb5fb4deb3a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x22445e1ad9c3e4f0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1b43e8a30a570a63;
++  *((unsigned long *)&__m128i_result[1]) = 0x743ca4c843eb5fb5;
++  *((unsigned long *)&__m128i_result[0]) = 0x45e1ad9c3e8a30a5;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1204900f62f72565;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x4901725600000000;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x6a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000400000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000300000003;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x32);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3f3f3f7fbf3fffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x47);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000040804080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000020100000000;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffe8ffff28fc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffa;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00007fff0000803e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000006ffff81e1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0ffffffe8ffff290;
++  *((unsigned long *)&__m128i_result[0]) = 0x000007fff0000804;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x44);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000418200000008e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000002100047;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636362;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636362;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636362;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636362;
++  *((unsigned long *)&__m128i_result[1]) = 0x0032003200320032;
++  *((unsigned long *)&__m128i_result[0]) = 0x0032003200320032;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff01010102;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ffdf87f0b0c7f7f;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf6b3eb63f6b3f6b3;
++  *((unsigned long *)&__m128i_op1[0]) = 0x363953e42b56432e;
++  *((unsigned long *)&__m128i_result[1]) = 0x010000010080000b;
++  *((unsigned long *)&__m128i_result[0]) = 0x00f700f70036002b;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xed67d6c7ed67ed67;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6c72a7c856ac865c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000700000003;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x3d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff40ff83;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1010101010101010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000003030103;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000003030103;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000006060;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000006060;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000002408beb26c8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000706e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000028c27;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000070;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x80000b0b80000b0b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000101080001010;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffefefffffeff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0061006100020002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000fe00fe;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000078087f08;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000078087f08;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000e0fc0000e0fc;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff0bff76;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x75);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x33);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff00ff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000ff00ffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8282828282828282;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000828282828282;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0008000800000008;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00f7000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000005150;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000005150;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000f7000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x24);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x41afddcb1c000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd09e1bd99a2c6eb1;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe82f7c27bb0778af;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000040002;
++  *((unsigned long *)&__m128i_result[0]) = 0x000d000a000f000c;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff8000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffdff0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0144329880000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x007fffc0007ffff0;
++  *((unsigned long *)&__m128i_result[0]) = 0x004000004c400000;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x17);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001e0000001e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffafff0fff9ff01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000d800cff8;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vsrlrni_h_w (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000002000007d7;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000300000ff1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x000007d700000ff1;
++  __m128i_out = __lsx_vsrlrni_w_d (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffff00ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffff00ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000ff8;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000;
++  __m128i_out = __lsx_vsrlrni_d_q (__m128i_op0, __m128i_op1, 0x74);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000f08;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x2020202020202020;
++  __m128i_out = __lsx_vsrlrni_b_h (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-SX-vector-vssran-vssrani-vss.patch b/LoongArch-Add-tests-for-SX-vector-vssran-vssrani-vss.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3618a4f6ef2c6c02f2bec7134d0b2e9af440f90b
--- /dev/null
+++ b/LoongArch-Add-tests-for-SX-vector-vssran-vssrani-vss.patch
@@ -0,0 +1,4954 @@
+From 1009120c617c050d02a6d2abe786728dccf5cb5b Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Tue, 12 Sep 2023 11:17:38 +0800
+Subject: [PATCH 091/124] LoongArch: Add tests for SX vector
+ vssran/vssrani/vssrarn/vssrarni/vssrln /vssrlni/vssrlrn/vssrlrni
+ instructions.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vssran.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrani.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrarn.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrarni.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrln.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrlni.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vssran.c         | 390 ++++++++
+ .../loongarch/vector/lsx/lsx-vssrani.c        | 679 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vssrarn.c        | 669 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vssrarni.c       | 848 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vssrln.c         | 543 +++++++++++
+ .../loongarch/vector/lsx/lsx-vssrlni.c        | 668 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vssrlrn.c        | 470 ++++++++++
+ .../loongarch/vector/lsx/lsx-vssrlrni.c       | 597 ++++++++++++
+ 8 files changed, 4864 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c
+new file mode 100644
+index 000000000..e45ca36f0
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssran.c
+@@ -0,0 +1,390 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x003f00000000003f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003f000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000017fff9000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000210011084;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000001000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffc000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0000;
++  __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffefffffffeff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffcff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000;
++  __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x02b504f305a5c091;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x02b504f305a5c091;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000005602d2;
++  __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000003f80b0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xb327b9363c992b2e;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa1e7b475d925730f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000001ff00;
++  __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0060e050007f0160;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0040007fff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ffffff00ff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1268f057137a0267;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0048137ef886fae0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000000;
++  __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0141010101410101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x75b043c4d17db125;
++  *((unsigned long *)&__m128i_op1[0]) = 0xeef8227b4f8017b1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x027c027c000027c0;
++  __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000006f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000006f00000000;
++  __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffd000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff994db09c;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc7639d96;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0fff0fff0fff0fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x9);
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f80000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x800080007f008000;
++  __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000695d00009b8f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000074f20000d272;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00001f5400000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000;
++  __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00010000fffffffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00010000fffffffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff00000000;
++  __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x31b1777777777776;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6eee282828282829;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000006362ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff801c9e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000810000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x40eff02383e383e4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000800000007fff;
++  __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffb00fdfdf7ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff8000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000c0c00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000a74aa8a55ab;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6adeb5dfcb000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a7480007fff8000;
++  __m128i_out = __lsx_vssran_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000fe00fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00fe00fe00fe00fe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000f50000007500;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007e1600007d98;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00fe00fe7fffffff;
++  __m128i_out = __lsx_vssran_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4f4f4f4f4f4f0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4f4f4f4f4f4f0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f007f7f7f00;
++  __m128i_out = __lsx_vssran_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c
+new file mode 100644
+index 000000000..7ffcecde7
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrani.c
+@@ -0,0 +1,679 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00001802041b0013;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000007f7f02;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff7fffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff7fffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffff7ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x64);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000010000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000007fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x47);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0004007c00fc0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f7f7f7f00107f04;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f0000fd7f0000fd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00cf01fe01fe01fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000301de01fe01fe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffc002000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0f00000000000000;
++  __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe31c86e90cda86f7;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000000e3;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x38);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc39fffff007fffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00fd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffff0e700000000;
++  __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x32);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffff0000010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfc01fd1300000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe00fd1400010000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f0000007f000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080000180800100;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fff7fc01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x82c539ffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc72df14afbfafdf9;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x23);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000c0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001ffffff29;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000020000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000183fffffe5;
++  __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000080000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff000000ff0000;
++  __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x2a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fefefe6a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000fbf9;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000007f8;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x2d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0a000a000a000a00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x4d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f007f007f007f00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000030000003f;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff0003003f;
++  __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x4c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x11000f2010000e20;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0f000d200e000c20;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x007b01ec007b3a9e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fff9fff9;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001fff9fffa;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x007ffe7ffe400000;
++  __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x2a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc485edbcc0000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000c485;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000;
++  __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x30);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x21011f3f193d173b;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff39ff37ff35ff33;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000015d926c7;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000e41b;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007f7f7f7f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c0c0c0c0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0014000100000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x35);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00003f80000000ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffff46;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x4c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffee00000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3a3a3a3b3a3a3a3a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3a3a00003a3a0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000003a0000003a;
++  __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x38);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000080000068;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000038003;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000040033;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000007ffc000;
++  __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000fff0;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000004000000040;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x28);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000005e94;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00005e96ffffb402;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000000bd;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001fc0000fffeff;
++  __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x27);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000002fffffffb;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000010000fffb;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000bffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x42);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff0000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x79);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000777777777777;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff7777ffff7777;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000003bbbbbbbbbb;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x45);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0007fff800000000;
++  __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6a5d5b056f2f4978;
++  *((unsigned long *)&__m128i_op1[0]) = 0x17483c07141b5971;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xd4bade5e2e902836;
++  __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0010001000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1000000010001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_hu_w (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00680486ffffffda;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff913bb9951901;
++  *((unsigned long *)&__m128i_op1[1]) = 0x67157b5100005000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x387c7e0a133f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_result[0]) = 0x0c0f000a070f0204;
++  __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x377b810912c0e000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x98147a504d145000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x377b810912c0e000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000000000000;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0xe);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c7c266e3faa293c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000f3040705;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x30);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x2e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x86dd8341b164f12b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9611c3985b3159f5;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xff86dd83ff9611c3;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_w_d (__m128i_op0, __m128i_op1, 0x28);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf9796558e39953fd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1010111105050000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4040000041410101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000808000020200;
++  __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x2d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2e2b34ca59fa4c88;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3b2c8aefd44be966;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x2e34594c3b000000;
++  __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff1afffefec0ec85;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff1aff6d48ce567f;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff80c400000148;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff80c1ffffe8de;
++  *((unsigned long *)&__m128i_result[1]) = 0xffe3ffd8ffe30919;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffffffff;
++  __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1313131313131313;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1313131313131313;
++  *((unsigned long *)&__m128i_op1[1]) = 0x34947b4b11684f92;
++  *((unsigned long *)&__m128i_op1[0]) = 0xd73691661e5b68b4;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x084d1a0907151a3d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000007d07fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff;
++  __m128i_out = __lsx_vssrani_b_h (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000014eb54ab;
++  *((unsigned long *)&__m128i_op1[0]) = 0x14eb6a002a406a00;
++  *((unsigned long *)&__m128i_result[1]) = 0xe0001fffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_d_q (__m128i_op0, __m128i_op1, 0x60);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffaf1500000fffa;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000f8a40000f310;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000003e2;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x26);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_h_w (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf654ad7447e59090;
++  *((unsigned long *)&__m128i_op0[0]) = 0x27b1b106b8145f50;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_wu_d (__m128i_op0, __m128i_op1, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff8ffa2fffdffb0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_du_q (__m128i_op0, __m128i_op1, 0x50);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrani_bu_h (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c
+new file mode 100644
+index 000000000..a23ad7cd2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarn.c
+@@ -0,0 +1,669 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffd24271c4;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2711bad1e8e309ed;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbf8000000000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcf00000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000017fff9000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000210011084;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000017fda829;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0403cfcf01c1595e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x837cd5db43fc55d4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0404038383838404;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff80007fff;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffcb410000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffeb827ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000800000008;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc1bdceee242070db;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe8c7b756d76aa478;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfefd7f7f7f7f7f7e;
++  *((unsigned long *)&__m128i_op0[0]) = 0xdffdbffeba6f5543;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ffffff000000ff;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ffffff000000ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000002010;
++  __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff00000000000001;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc1bdceee242070db;
++  *((unsigned long *)&__m128i_op1[0]) = 0xe8c7b756d76aa478;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4f804f804f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000003fffff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000003fffff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff000000ff00;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000007ae567a3e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000700ff00000000;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0bd80bd80bdfffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0bd80bd80bd80000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x006f0efe258ca851;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffff00010000fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffff00;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000f00f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000007fff;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207f7f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff0000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1111311111114111;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111311111110000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2020202020202020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2020202020207fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f417f417f027e03;
++  *((unsigned long *)&__m128i_op1[1]) = 0x9780697084f07dd7;
++  *((unsigned long *)&__m128i_op1[0]) = 0x87e3285243051cf3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fea8ff44;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fea8ff44;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000008000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00;
++  __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x13f9c5b60028a415;
++  *((unsigned long *)&__m128i_op0[0]) = 0x545cab1d81a83bea;
++  *((unsigned long *)&__m128i_op1[1]) = 0x13f9c5b60028a415;
++  *((unsigned long *)&__m128i_op1[0]) = 0x545cab1d81a83bea;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff0015172b;
++  __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x14ccc631eb3339ce;
++  *((unsigned long *)&__m128i_op0[0]) = 0x685670d197a98f2e;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000010000;
++  __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003c853c843c87e;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff0000ffff0000;
++  __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000e36400015253;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000035ed0001e000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000e36400015253;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000035ed0001e000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1c6c80007fffffff;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000b4a00008808;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0808080800000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc2fc0000c3040000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc2fc0000c3040000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000060000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000060000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0600000100000001;
++  __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0080008000800080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0080006b00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000500000000;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7efefefe82010201;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ff0000ff;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffc0ff80ff800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffff00;
++  __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000002ffffffff;
++  __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000045340a6;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000028404044;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000fffffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000102020204000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x045340a628404044;
++  __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000014;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001400000014;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000adad0000adad;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000052520000adad;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xd6a09e662ab46b31;
++  *((unsigned long *)&__m128i_op0[0]) = 0x34b8122ef4054bb3;
++  *((unsigned long *)&__m128i_op1[1]) = 0x9c9c9c9b509be72f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3513f2e3a1774d2c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000501ffff0005;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0021b761002c593c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x002584710016cc56;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ff0000ffff;
++  __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000200000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00020000ffff0001;
++  __m128i_out = __lsx_vssrarn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x004001be00dc008e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1f3f06d4fcba4e98;
++  *((unsigned long *)&__m128i_op0[0]) = 0x2e1135681fa8d951;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4399d3221a29d3f2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000007d07fffffff;
++  __m128i_out = __lsx_vssrarn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_w (__m128i_op0, 0x0);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000008686;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00008e5680008685;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00007fff7fff8000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffc7f100004000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000c7f14000;
++  __m128i_out = __lsx_vssrarn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4500000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4400000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff000000ff000000;
++  __m128i_out = __lsx_vssrarn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8a8a8a8a8a8a8a8a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8a8a8a8a8a8a8a8a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c
+new file mode 100644
+index 000000000..76fac97be
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrarni.c
+@@ -0,0 +1,848 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0020002000200020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0020002000200020;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff60090958;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0fa96b88d9944d42;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00001802041b0013;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x72);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0200020002000200;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x3f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101010100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000017fda829;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x5c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0002000000020000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0002000000020000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xda4643d5301c4000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc1fc0d3bf55c4000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7505853d654185f5;
++  *((unsigned long *)&__m128i_op1[0]) = 0x01010000fefe0101;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000fe00ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffff;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00020002000d0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000020f2300ee;
++  *((unsigned long *)&__m128i_result[1]) = 0x0400040004000400;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000000f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x79);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000073;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000010000002b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000400000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x01ff01ff01ff01ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x01ff01ff01ff01ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff;
++  __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x59);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000000f0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001800390049ffaa;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0029ff96005cff88;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x03c0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x03c0038000000380;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0f0000000f000000;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0bef0b880bd80bd8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0bd80bd80bdfffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0bd80bd80bd80000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000017b017b01;
++  __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x5b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffe0001fffe0001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffe0001fffe0001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x32);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x30);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf0800320fff1fa20;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0032000000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1111113111111141;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1111113111111121;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7f417f417f027e03;
++  *((unsigned long *)&__m128i_op1[1]) = 0xe93d0bd19ff0c170;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5237c1bac9eadf55;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x60);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000065a0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x2e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9941d155f43a9d08;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0c0c8b8a8b8b0b0a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8b8a8a898a8a8909;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1817161517161514;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1615141315141312;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc0fffff000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffe00000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x19);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x29);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0010001000000010;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000080000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x58);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100fe000100fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x31);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0d1202e19235e2bc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xea38e0f75f6e56d1;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffe500ffffc085;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffc000ffffc005;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff00000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100080000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0400400204004002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x32);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffe080f6efc100f7;
++  *((unsigned long *)&__m128i_op0[0]) = 0xefd32176ffe100f7;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffe080f6efc100f7;
++  *((unsigned long *)&__m128i_op1[0]) = 0xefd32176ffe100f7;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x2c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000005452505;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000004442403e4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x03fc03fc03fc03fc;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000b4a00008808;
++  *((unsigned long *)&__m128i_result[0]) = 0x0808080800000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffff01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x71);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x2ea268972ea2966a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4026f4ffbc175bff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x5d7f5d807fea807f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfff0fffffff00001;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff0fffffff09515;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000ff00000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000003000000d612;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000bfffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff53d9;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0001ffff9515;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000500000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80808080806b000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000c0c0c000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffefffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffe1fffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7ffffffb;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000080008;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1ab6021f72496458;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7750af4954c29940;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1ab6021f72496458;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7750af4954c29940;
++  *((unsigned long *)&__m128i_result[1]) = 0x6ad8ffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x6ad8ffffffffffff;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002008300500088;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000088;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000020000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x2d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1200091212121212;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000008000000080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x51);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_du_q (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_h_w (__m128i_op0, __m128i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000c6c6c6c6;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000c6c6c6c6;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffeff98;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0014ffe4ff76ffc4;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000011;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000016;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000011;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x2b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff86dd83ff9611c3;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000035697d4e;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000013ecaadf2;
++  *((unsigned long *)&__m128i_result[1]) = 0xe280e67f00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00007f80;
++  __m128i_out = __lsx_vssrarni_b_h (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x8000000080000000;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x017001a002c80260;
++  *((unsigned long *)&__m128i_op0[0]) = 0x01d8000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2e34594c3b000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vssrarni_wu_d (__m128i_op0, __m128i_op1, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00060fbf02596848;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00020fbf04581ec0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x010169d9010169d9;
++  *((unsigned long *)&__m128i_op1[0]) = 0x01010287010146a1;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000200000001;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op1[1]) = 0x004d004d004d004d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x004d004d004d004d;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff;
++  __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x06d9090909090909;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x48);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0039d21e3229d4e8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6d339b4f3b439885;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffff000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000d00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffc0000000000000;
++  __m128i_out = __lsx_vssrarni_d_q (__m128i_op0, __m128i_op1, 0x2e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000100000001000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x37b951002d81a921;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x3e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000075dbe982;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000071e48cca;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0ebb7d300e3c9199;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vssrarni_w_d (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000930400008a10;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00006f9100007337;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00c2758000bccf42;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00a975be00accf03;
++  *((unsigned long *)&__m128i_result[1]) = 0x00250023001c001d;
++  *((unsigned long *)&__m128i_result[0]) = 0x309d2f342a5d2b34;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff01ffffe41f0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff00000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000155;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000002b;
++  __m128i_out = __lsx_vssrarni_bu_h (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00e400ff00e400;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfee1f6f18800ff7f;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrarni_hu_w (__m128i_op0, __m128i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c
+new file mode 100644
+index 000000000..ed600c72d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrln.c
+@@ -0,0 +1,543 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808000008080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080000080800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffff0000;
++  __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5ff6a0a40ea8f47c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5ff6a0a40e9da42a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00003ff000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000fffc00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000fffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001afffffff7;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000750500006541;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000100fffffefd;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000;
++  __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff6fc00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f0000007f000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080000180800100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ff00ffff;
++  __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_b (__m128i_op0, 0x7);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffefff6fff80002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101017f0101017f;
++  __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00005a5a00005a5a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00005b5a00005b5a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00ff00ff00ff00ff;
++  __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x65b780a2ae3bf8ca;
++  *((unsigned long *)&__m128i_op1[0]) = 0x161d0c373c200827;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000001ff;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf10cf508f904fd01;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf10cf508f904fd01;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xf8f8e018f8f8e810;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf8f8f008f8f8f800;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001ffff0003ffff0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000fffefffefffef;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffefffef;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00007fff00007fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000f00;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000;
++  __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000c0000bd49;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000c7fff000c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000f0009d3c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000016fff9d3d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000c000000060003;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000003a24;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003dbe88077c78c1;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffe0001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00003a247fff7fff;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000003fbf3fbf;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff7fff7fff7ff8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffff0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3fbf3fbf00007fff;
++  __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000fff00000e36;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000fff0e36;
++  __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffe000ffdf;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffff53d9;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff0001ffff9515;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010101010101;
++  __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffc0800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000001b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0018;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffefffefffefffef;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0080000700000014;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffbffda;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3e25c8317394dae6;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcda585aebbb2836a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000ac00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x9c9c9c9c9c9c9c9c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000c6c6c6c6;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6c6c6c6;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f;
++  __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vssrln_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x64616462b76106dc;
++  *((unsigned long *)&__m128i_op1[0]) = 0x64616462b71d06c2;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00c0c000c0000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc0000000c000c000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00c0c000c0000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc0000000c000c000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001e001e001e001e;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001e001e001e001e;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001700000017;
++  *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd8759f7fd87;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001700000017;
++  *((unsigned long *)&__m128i_op1[0]) = 0x59f7fd8759f7fd87;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007fff7fff;
++  __m128i_out = __lsx_vssrln_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffc0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000001;
++  __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007fff7fff8000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000007f7f7f;
++  __m128i_out = __lsx_vssrln_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf589caff5605f2fa;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000a74aa8a55ab;
++  *((unsigned long *)&__m128i_op0[0]) = 0x6adeb5dfcb000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vssrln_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7f8000007f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vssrln_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c
+new file mode 100644
+index 000000000..613668143
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlni.c
+@@ -0,0 +1,668 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff80000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001ffff00000000;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x2f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x4f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x004e005500060031;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff870068fff5ffb3;
++  *((unsigned long *)&__m128i_op1[1]) = 0x004e005500060031;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff870068fff5ffb3;
++  *((unsigned long *)&__m128i_result[1]) = 0x04e00060ffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x04e00060ffffffff;
++  __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x52527d7d52527d7d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808000008080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080000080800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001010100010100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x2f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000080007f80800;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000001000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00047fff00007fff;
++  __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ff0000ff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x01fc020000fe0100;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000003fc0003;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x56);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000017fda829;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x27);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0c03e17edd781b11;
++  *((unsigned long *)&__m128i_op0[0]) = 0x342caf9bffff1fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000040000000400;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0c037fff342c7fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000fff8fff8;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fff80000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x37);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff100fffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff00000000;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x21);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffff100fffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffff100fffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x38);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffff800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x001fffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x4b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a000a000a000a00;
++  __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf2f2e5e5e5e5e5dc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000003fc0;
++  __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x22);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x35);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff;
++  __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x35);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x10);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0008000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[1]) = 0x41dfffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000083b00000000;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x33);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000003;
++  __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x7e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1ff85ffe2ae5d973;
++  *((unsigned long *)&__m128i_op1[1]) = 0x403be000ffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000ffc2f;
++  *((unsigned long *)&__m128i_result[0]) = 0x00201df000000000;
++  __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x29);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000005151515;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000006302e00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000000000003f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f417f417f027e03;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001fd0;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x32);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x1b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffbfffffffbf;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000001ffffff7f;
++  __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x5f);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000202fe02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x11);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x01203f1e3d1c3b1a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3918371635143312;
++  *((unsigned long *)&__m128i_op1[1]) = 0x21201f1e1d1c1b1a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1918171615141312;
++  *((unsigned long *)&__m128i_result[1]) = 0x480f7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff;
++  __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00005dcbe7e830c0;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffacdb6dbecac;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1f5533a694f902c0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000001fffff59;
++  __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x63);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000007f41;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000002000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000002000;
++  __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x39);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320076a4d2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x685670d27e00682a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x14ccc6320076a4d2;
++  *((unsigned long *)&__m128i_op1[0]) = 0x685670d27e00682a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0001000100000000;
++  __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc000000fc0003fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xbffffff0ffffc00f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000003f0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffc3ffff003e;
++  *((unsigned long *)&__m128i_result[1]) = 0x00c0000000bfffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000ffffff;
++  __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x28);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x800000810000807f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x808080010080007f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x800000810000807f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x808080010080007f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000020000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000020000020;
++  __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x62);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0400400204004002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000200000002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000002002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x6d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2a29282726252423;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2221201f1e1d1c1b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x26);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000002002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2a29282726252423;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2221201f1e1d1c1b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00a8009800880078;
++  __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000807f00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x80006b0080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff7fff;
++  __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x3);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001010101;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000001fe01;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000001fe01;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000f0000000f;
++  *((unsigned long *)&__m128i_result[0]) = 0x0f0f0f0f00000000;
++  __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff010300ff0103;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x555500adfffc5cab;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0101010100000100;
++  __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x03ff0101fc010102;
++  *((unsigned long *)&__m128i_op0[0]) = 0x03fffffffc010102;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff010181010102;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff81010102;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x0);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000300037ff000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0003000300a10003;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_wu_d (__m128i_op0, __m128i_op1, 0x3c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000007070707;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x45);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffdfffcfffdfffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffdfffcfffdfffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x13);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000053a4f452;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000053a;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x14);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_b_h (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000b3a6000067da;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00004e420000c26a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x7a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff;
++  __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x38);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7c7c000000007176;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x3e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000c6c7;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8d8d8d8d8d8cc6c6;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_w_d (__m128i_op0, __m128i_op1, 0x3c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000aa822a8228222;
++  *((unsigned long *)&__m128i_op0[0]) = 0x03aa558ec8546eb6;
++  *((unsigned long *)&__m128i_op1[1]) = 0x001a64b345308091;
++  *((unsigned long *)&__m128i_op1[0]) = 0x001f2f2cab1c732a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0155ffff754affff;
++  *((unsigned long *)&__m128i_result[0]) = 0x034cffff03e5ffff;
++  __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0xb);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc1bdceee242070dc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe907b754d7eaa478;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_h_w (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_hu_w (__m128i_op0, __m128i_op1, 0x5);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002711350a27112;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00d5701794027113;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_du_q (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000203000010d0;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffc00300000220;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x27);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000f50000000900;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000090900000998;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff;
++  __m128i_out = __lsx_vssrlni_d_q (__m128i_op0, __m128i_op1, 0x20);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000001000010f8;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff8ffa2fffdffb0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0f0f0f0f00000f00;
++  __m128i_out = __lsx_vssrlni_bu_h (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c
+new file mode 100644
+index 000000000..ec688bb12
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrn.c
+@@ -0,0 +1,470 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000200020002;
++  __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x004200a000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x004200a000200001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff00007fff7fff;
++  __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00040003ff83ff84;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00040003ff4dffca;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffefffefffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000002020202;
++  __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f;
++  __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffbe6ed563;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000008;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0100000001000100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100010000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff732a;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000fbf9;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000007f00000000;
++  __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000a000a000a000a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000004fc04f81;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000004fc04f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00007f7f;
++  __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffc1000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffc1000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff000000007fff;
++  __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000bd3d00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffff0000000ad3d;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffff000fffff000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007fff0000;
++  __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf001f0010101f002;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1000100010001000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000dfa6e0c6;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000d46cdc13;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff80df00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00007f7f;
++  __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff3fbfffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3fbf3fbf00007fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x007f7f7f01027f02;
++  __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000400000004;
++  __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffe0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3f413f4100000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f801fe000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff;
++  __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000010000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000100000000fc;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000100000000fc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0100000001000000;
++  __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0c0b0a090b0a0908;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a09080709080706;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000040a04000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000040a04000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00123fff00120012;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0012001200120012;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00003fff00010000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1200091212121212;
++  __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_b_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0800010001ff8000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2e9028362e902836;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2e9028362e902836;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_h_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001000000010;
++  __m128i_out = __lsx_vssrlrn_wu_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x33f5c2d7d975d7fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000084d12ce;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000024170000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrn_w_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0002711350a27112;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00d5701794027113;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x4399d3221a29d3f2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0674c886fcba4e98;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfdce8003090b0906;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffff001a00000000;
++  __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001000000010;
++  __m128i_out = __lsx_vssrlrn_hu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001fffe00014b41;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001fffe0001ffde;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0002000100020002;
++  __m128i_out = __lsx_vssrlrn_bu_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c
+new file mode 100644
+index 000000000..02f7ca08b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vssrlrni.c
+@@ -0,0 +1,597 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x3d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x8080808000008080;
++  *((unsigned long *)&__m128i_result[0]) = 0x8080000080800000;
++  __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000080000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000080000000;
++  __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000017fff9000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000210011084;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000007f0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000007f00;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001000000;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x8);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101010400100203;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0103010301020109;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000110000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000007f00000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0202000402020202;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000200000010000;
++  __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x56);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x6d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0001ffff8002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0010000400020004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffff20ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffc0020ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x07fff80000008000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000007ffe001;
++  __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_d_q (__m128i_op0, __m128i_op1, 0x7c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x03574e3b94f2ca31;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000001f807b89;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000005050000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0505000005050505;
++  *((unsigned long *)&__m128i_result[1]) = 0x000d02540000007e;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001400140014;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x41);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x3b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x56a09e662ab46b31;
++  *((unsigned long *)&__m128i_op1[0]) = 0xb4b8122ef4054bb3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x02b504f305a5c091;
++  __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x37);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000d000d000d000d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000d000d000d000d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000680000006800;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x25);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000400;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000400;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xe);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00005555aaabfffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x003fffffff000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000000ab;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000000000ff;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x43);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007fff7fff;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffff0000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000080;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x18);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000;
++  __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x34);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x027c027c000027c0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000004f804f81;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000004f804f80;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000010000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001400000014;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff81007c;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffb7005f0070007c;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff80007e028401;
++  *((unsigned long *)&__m128i_op1[0]) = 0x9a10144000400000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000001ffff00010;
++  __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x5b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x29);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000040000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000080000000000;
++  __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x7);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffff9cff05;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff9cfebd;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000002;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff7ffffef77fffdd;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf77edf9cffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001fffff001fffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001fffff001fffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x21201f1e1d1c1b1a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1918171615141312;
++  *((unsigned long *)&__m128i_result[1]) = 0x10ff10ff10ff10ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffa6ff91fdd8ef77;
++  *((unsigned long *)&__m128i_op0[0]) = 0x061202bffb141c38;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0101010101010101;
++  *((unsigned long *)&__m128i_op1[0]) = 0x010101fe0101fe87;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000004000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffd60001723aa5f8;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000007f007f7f;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f7f7f7f7f7f7f7f;
++  __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x808080e280808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080636380806363;
++  *((unsigned long *)&__m128i_op1[1]) = 0x808080e280808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080636380806363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0004000400040004;
++  *((unsigned long *)&__m128i_result[0]) = 0x0004000400040004;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1d);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000d0000000d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000dffff000d;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000070007;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000007ffff;
++  __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000800c00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000800000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000007fff7fff;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x9);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffff0100ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0607060700000807;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0707f8f803e8157e;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x31);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x21);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xc);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op0[0]) = 0x5252525252525252;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc0808000c0808000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000003020302;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x16);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffc0ff80ff800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_wu_d (__m128i_op0, __m128i_op1, 0x15);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff0000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x4);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ffffffe00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ffffffe00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x3a);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffc0800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000008080600;
++  __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x52525252adadadad;
++  *((unsigned long *)&__m128i_op0[0]) = 0x52525252adadadad;
++  *((unsigned long *)&__m128i_op1[1]) = 0x800000007fffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x800000007fffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
++  __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x6);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003ef89df07f0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003ec0fc0fbfe001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3ff800ff2fe6c00d;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfff40408ece0e0de;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0xa);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x4000400040004000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x12);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ff960001005b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ffa500010003;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffff7ffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_result[0]) = 0x0020000000000000;
++  __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x2b);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1748c4f9ed1a5870;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffffffffffff;
++  __m128i_out = __lsx_vssrlrni_d_q (__m128i_op0, __m128i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff7ffffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000fffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfcfcfcdcfcfcfcdc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010001000000010;
++  *((unsigned long *)&__m128i_result[0]) = 0x0010001000100010;
++  __m128i_out = __lsx_vssrlrni_hu_w (__m128i_op0, __m128i_op1, 0x1c);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001f0000001f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x4000000040000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x27);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0x28);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_du_q (__m128i_op0, __m128i_op1, 0x26);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x117d7f7b093d187f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000034;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfe1bfefe00011ee1;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe1bfe6c03824c60;
++  *((unsigned long *)&__m128i_result[1]) = 0x7f7f7f7f0000001a;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f7f017f7f7f7f7f;
++  __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffff3a81ffff89fd;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffb3c3ffff51ba;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0802080408060803;
++  __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0xd);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffff00ffffff00ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff000900ffff98;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fffffff7fffffff;
++  __m128i_out = __lsx_vssrlrni_w_d (__m128i_op0, __m128i_op1, 0xf);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0xc);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000056000056;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000efffefff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xa03aa03ae3e2e3e2;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_d_q (__m128i_op0, __m128i_op1, 0x75);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000760151;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003e0021009a009a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000246d9755;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000003e2427c2ee;
++  *((unsigned long *)&__m128i_result[1]) = 0x00001e5410082727;
++  *((unsigned long *)&__m128i_result[0]) = 0x00007f7f00107f7f;
++  __m128i_out = __lsx_vssrlrni_b_h (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000f1384;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000004ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ffffffff;
++  __m128i_out = __lsx_vssrlrni_bu_h (__m128i_op0, __m128i_op1, 0x2);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x10f8000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vssrlrni_h_w (__m128i_op0, __m128i_op1, 0x1e);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-for-the-SX-vector-multiplication.patch b/LoongArch-Add-tests-for-the-SX-vector-multiplication.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b39a8e32e2adc8e6ba6d07cc6fe7f4a76a4d9004
--- /dev/null
+++ b/LoongArch-Add-tests-for-the-SX-vector-multiplication.patch
@@ -0,0 +1,2990 @@
+From 239d4bdbbc72f83efba3830203443b0b2ba4f2ca Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 10:15:12 +0800
+Subject: [PATCH 083/124] LoongArch: Add tests for the SX vector multiplication
+ instruction.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmul.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c: New test.
+	* gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lsx/lsx-vmuh-1.c         | 353 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmuh-2.c         | 372 +++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmul.c           | 282 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vmulwev-1.c      | 434 ++++++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmulwev-2.c      | 344 ++++++++++++++
+ .../loongarch/vector/lsx/lsx-vmulwev-3.c      | 245 ++++++++++
+ .../loongarch/vector/lsx/lsx-vmulwod-1.c      | 272 +++++++++++
+ .../loongarch/vector/lsx/lsx-vmulwod-2.c      | 282 ++++++++++++
+ .../loongarch/vector/lsx/lsx-vmulwod-3.c      | 308 +++++++++++++
+ 9 files changed, 2892 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c
+new file mode 100644
+index 000000000..ab650a024
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-1.c
+@@ -0,0 +1,353 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x059a35ef139a8e00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_result[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_result[0]) = 0x4040404040404040;
++  __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xc0c00000c0c00000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc0c00c01c2cd0009;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc0fffff000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffe00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff0000ac26;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00ff000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ffffff81fe;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffff00ffff7e01;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000fffe01fd02;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ffff0000fe86;
++  __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7f7f7f7f7f7f7f7f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff8000010f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff800000ff800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fff80000;
++  __m128i_out = __lsx_vmuh_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbf3efff536d5169b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ebdfffffddf3f40;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3f5ec0a0feefa0b0;
++  __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7ffffffe;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff7ffffffe;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffff7ffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffff7ffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x3fffffff3ffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x3fffffff3ffffffe;
++  __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003f0000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff0101ffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffa0204000;
++  *((unsigned long *)&__m128i_result[1]) = 0x001f7fc100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x001f7fff00000000;
++  __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000000010000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000cd630000cd63;
++  *((unsigned long *)&__m128i_op1[1]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcd636363cd636363;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffcd63ffffcd63;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffd765ffffd765;
++  __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xff7fffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0040000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000015516a768038;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffff9ed2e1c000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x007ffd0001400840;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x007ffd0001400840;
++  *((unsigned long *)&__m128i_result[1]) = 0x3fffffff80000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00003ffd000a4000;
++  __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0032000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0032000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000009c400000000;
++  __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0002000200020002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0202fe02fd020102;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000202fe02;
++  __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000006362ffff;
++  __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fffe0002;
++  __m128i_out = __lsx_vmuh_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c
+new file mode 100644
+index 000000000..60b6e3503
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmuh-2.c
+@@ -0,0 +1,372 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000011;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000011;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000011;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000011;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000c5ac01015b;
++  *((unsigned long *)&__m128i_op0[0]) = 0xaaacac88a3a9a96a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000001300000013;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000038003;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000040033;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000080000068;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000014;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x10f881a20ffd02b0;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ff800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0ff780a10efc01af;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fe7f0000;
++  __m128i_out = __lsx_vmuh_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000efffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001001100110068;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x1d8000001d800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1d8000001d800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1d8000001d800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1d8000001d800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0366000003660000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0366000003660000;
++  __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000800;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ef400ad21fc7081;
++  *((unsigned long *)&__m128i_op0[0]) = 0x28bf0351ec69b5f2;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffb96bffff57c9;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffff6080ffff4417;
++  *((unsigned long *)&__m128i_result[1]) = 0x7ef3ddac21fc5a2c;
++  *((unsigned long *)&__m128i_result[0]) = 0x28bee9edec690869;
++  __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  int_out = __lsx_vpickve2gr_h (__m128i_op0, 0x0);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000200000002000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe000ffdf;
++  *((unsigned long *)&__m128i_result[1]) = 0x00001fff00001fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000800000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000214f;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc31b63d846ebc810;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00ff0000800000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffff941d;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000010a7;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000046ebaa2c;
++  __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000cf4f4f00;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000cf4f4f00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000007c;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000005f0003e000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000897957687;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000408;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000003397dd140;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000004bd7cdd20;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0016ffb00016ffb0;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0016ffb00016ffb0;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000004a294b;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000006d04bc;
++  __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x007ffe7ffe400000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x007ffd0001400840;
++  __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffcfffcfffcfffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffffffffe;
++  __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffa800000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000157;
++  __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001a64b345308091;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001f2f2cab1c732a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1baf8eabd26bc629;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1c2640b9a8e9fb49;
++  *((unsigned long *)&__m128i_result[1]) = 0x0002dab8746acf8e;
++  *((unsigned long *)&__m128i_result[0]) = 0x00036dd1c5c15856;
++  __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x00003a7fc58074ff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000eeff1100e;
++  __m128i_out = __lsx_vmuh_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c
+new file mode 100644
+index 000000000..8ba666275
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmul.c
+@@ -0,0 +1,282 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x54feed87bc3f2be1;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8064d8f6a494afcb;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc7fc00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffffe003c1f0077;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffff0074230438;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000ff0000000438;
++  __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_bu (__m128i_op0, 0x2);
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000800800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000800800000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000004000000000;
++  __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfff5fff4002ffff5;
++  *((unsigned long *)&__m128i_op1[1]) = 0xaa858644fb8b3d49;
++  *((unsigned long *)&__m128i_op1[0]) = 0x18499e2cee2cc251;
++  *((unsigned long *)&__m128i_result[1]) = 0x8644000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xaed495f03343a685;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7505443065413aed;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0100d6effefd0498;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7505443065413aed;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0100d6effefd0498;
++  *((unsigned long *)&__m128i_result[1]) = 0xb71289fdfbea3f69;
++  *((unsigned long *)&__m128i_result[0]) = 0x4e17c2ffb4851a40;
++  __m128i_out = __lsx_vmul_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfc01fcfefc02fdf7;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe00fcfffe01fd01;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfc01fd1300000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfe00fd1400010000;
++  *((unsigned long *)&__m128i_result[1]) = 0xc72ef153fc02fdf7;
++  *((unsigned long *)&__m128i_result[0]) = 0xca31bf15fd010000;
++  __m128i_out = __lsx_vmul_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff00ff00ff00ff00;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc000c000c000ff81;
++  *((unsigned long *)&__m128i_op1[1]) = 0x5d5d5d5d5d5d5d5d;
++  *((unsigned long *)&__m128i_op1[0]) = 0x5d5d5d5d5d5d0000;
++  *((unsigned long *)&__m128i_result[1]) = 0xa2a2a2a3a2a2a2a3;
++  *((unsigned long *)&__m128i_result[0]) = 0xc605c000aedd0000;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xacc8c794af2caf01;
++  *((unsigned long *)&__m128i_op0[0]) = 0xa91e2048938c40f0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xeeb1e4f43c3763f3;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff5a6fe3d7;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000021e79364;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000718ea657431b;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000006ca193ec;
++  *((unsigned long *)&__m128i_result[0]) = 0x00008e72b5b94cad;
++  __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffe000ffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x467f6080467d607f;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000001000;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x007f008000ea007f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00ff00ff00ff00ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0042003e0042002f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001fffc0001fffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffe0004fffe0004;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc1bdceee242070db;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe8c7b756d76aa478;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3f433212dce09025;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf359f359f359f359;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf359f359f359f359;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd3259a2984048c23;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf9796558e39953fd;
++  *((unsigned long *)&__m128i_result[1]) = 0x86dd8341b164f12b;
++  *((unsigned long *)&__m128i_result[0]) = 0x9611c3985b3159f5;
++  __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffd27db010d20fbf;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffd27db010d20fbf;
++  *((unsigned long *)&__m128i_result[1]) = 0x9727b8499727b849;
++  *((unsigned long *)&__m128i_result[0]) = 0x12755900b653f081;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0303030303030303;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0303030303030303;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x02f3030303030303;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x06d9090909090909;
++  __m128i_out = __lsx_vmul_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff81ffff7f03;
++  *((unsigned long *)&__m128i_op0[0]) = 0x04ffff8101ff81ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0a0000000a000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0a0000000a000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0a0000001e000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0a000000f6000000;
++  __m128i_out = __lsx_vmul_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x317fce80317fce80;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmul_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c
+new file mode 100644
+index 000000000..8357f4e80
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-1.c
+@@ -0,0 +1,434 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000158;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x001f7fc100000404;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000002a000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff0101ffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fffffffa0204000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffe1ffc100000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000400000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000009000900;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000009000900;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x30eb022002101b20;
++  *((unsigned long *)&__m128i_op0[0]) = 0x020310edc003023d;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000ffc3;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6363636363636363;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xff9dff9dff9dff9d;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000efffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffe50000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000ff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffe020;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3fc00000010a000b;
++  *((unsigned long *)&__m128i_result[1]) = 0x00001b0000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ff81007c;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffb7005f0070007c;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000104000800;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000007c;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000005f0003e000;
++  __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x4040404040404040;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffbfc0ffffbfc0;
++  __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000ffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff0100000001;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff0100000001;
++  __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000208000002080;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000004870ba0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x478b478b38031779;
++  *((unsigned long *)&__m128i_op1[0]) = 0x6b769e690fa1e119;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fe98c2a0;
++  __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000004000000040;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00007770ffff9411;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000004000000040;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff9411;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000100000001000;
++  *((unsigned long *)&__m128i_result[0]) = 0x37b951002d81a921;
++  __m128i_out = __lsx_vmulwev_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000e0000000e0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000e0000000e0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000c400;
++  __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000ffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff80000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffb4ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffb4ff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000016;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffb4ff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffffffff98dea;
++  __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0006000000040000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000000000007;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000f80007;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000006c80031;
++  __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000101010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000010001;
++  __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000202020200;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0808080808080808;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0004280808080808;
++  *((unsigned long *)&__m128i_result[1]) = 0x0010203030201000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000808080800;
++  __m128i_out = __lsx_vmulwev_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000b5207f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x2000000020000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000200200000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0x6a57a30ff0000000;
++  __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffff7;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffffff700000009;
++  __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000104000800;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8001000180010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8001000184000800;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff80007e028401;
++  *((unsigned long *)&__m128i_result[0]) = 0x9a10144000400000;
++  __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000bd003d;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000fffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0010000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000077af9450;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000047404f4f040d;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000214f;
++  *((unsigned long *)&__m128i_result[0]) = 0xc31b63d846ebc810;
++  __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c
+new file mode 100644
+index 000000000..e4afc8247
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-2.c
+@@ -0,0 +1,344 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xfe01fe01fe01fe01;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000fe01fe01;
++  __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000200020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x8000000080000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x6a5d5b056f2f4978;
++  *((unsigned long *)&__m128i_op0[0]) = 0x17483c07141b5971;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xd4bade5e2e902836;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x345002920f3017d6;
++  __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000c0010000a186;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00067fff0002a207;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffff0002;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000ff0000857a;
++  *((unsigned long *)&__m128i_result[0]) = 0x05fafe0101fe000e;
++  __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc1f03e1042208410;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_result[0]) = 0x00f0001000000010;
++  __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000eefff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf8e1a03affffe3e2;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3a80613fda5dcb4a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x93f0b81a914c003b;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000051649b6;
++  *((unsigned long *)&__m128i_result[0]) = 0xd2f005e44bb43416;
++  __m128i_out = __lsx_vmulwev_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000001fc0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fffffff7fffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000200000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0001fffe00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000001fffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffff000f0008d3c;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff0016fff8d3d;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffff000f0008d3c;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffff0016fff8d3d;
++  *((unsigned long *)&__m128i_result[1]) = 0xe10000004deb2610;
++  *((unsigned long *)&__m128i_result[0]) = 0xe101e0014dec4089;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x111110ff11111141;
++  *((unsigned long *)&__m128i_op1[0]) = 0x11111131111116a6;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x2028000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001001100110068;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xd400c02000002acf;
++  *((unsigned long *)&__m128i_op1[0]) = 0xf4000020c4000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x6453f5e01d6e5000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000fdec000000000;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xbfd10d0d7b6b6b73;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc5c534920000c4ed;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001700000017;
++  *((unsigned long *)&__m128i_op0[0]) = 0x59f7fd8759f7fd87;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000001700000017;
++  *((unsigned long *)&__m128i_op1[0]) = 0x59f7fd8759f7fd87;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000021100000211;
++  *((unsigned long *)&__m128i_result[0]) = 0xfb141d31fb141d31;
++  __m128i_out = __lsx_vmulwev_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f800000976801fe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x837c1ae57f8012ed;
++  *((unsigned long *)&__m128i_result[1]) = 0x976801fd6897fe02;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f8012ec807fed13;
++  __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000008000000080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0080000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000100010001fffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000800080;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0002ffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff8000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0909090900000909;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0909090909090909;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000100;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3a80613fda5dcb4a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x93f0b81a914c003b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000feff23560000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000fd1654860000;
++  *((unsigned long *)&__m128i_result[1]) = 0x1e242e4d68dc0000;
++  *((unsigned long *)&__m128i_result[0]) = 0x2ff8fddb7ae20000;
++  __m128i_out = __lsx_vmulwev_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000060000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000060000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff000ff6220c0c1;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffe8081000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000007ff000ff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c
+new file mode 100644
+index 000000000..346f0316a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwev-3.c
+@@ -0,0 +1,245 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000017fda829;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0xff01ff01ff01ff01;
++  *((unsigned long *)&__m128i_result[0]) = 0xff01ff01ff01fc10;
++  __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0042003e0042002f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001fffc0001fffc;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffbeffc2ffbeffd1;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000003f80;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff80000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x80000000fff80000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000004000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfff8004000000000;
++  __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffff8607db959f;
++  *((unsigned long *)&__m128i_op0[0]) = 0xff0cff78ff96ff14;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000008a0000008a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000008900000009;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000043c5ea7b6;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000008fc4ef7b4;
++  __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffff46;
++  *((unsigned long *)&__m128i_result[1]) = 0xfffffffe00000002;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffff46000000ba;
++  __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffc;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf8f8372f752402ee;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffc0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80044def00000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ff000000ff00;
++  *((unsigned long *)&__m128i_result[1]) = 0x00007f8449a19084;
++  *((unsigned long *)&__m128i_result[0]) = 0x49a210000000ff00;
++  __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfffcfd000000fb00;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001fe00f8000700;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0xfdfef9ff0efff900;
++  __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7efefefe82010201;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[0]) = 0x7afafaf88a050a05;
++  __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xcda585aebbb2836a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xcda585aebbb2836a;
++  *((unsigned long *)&__m128i_result[1]) = 0xd78cfd70b5f65d76;
++  *((unsigned long *)&__m128i_result[0]) = 0x5779108fdedda7e4;
++  __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x67eb85afb2ebb000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xc8847ef6ed3f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0xd48acbfe13102acf;
++  *((unsigned long *)&__m128i_result[0]) = 0xf4af70d0c4000000;
++  __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffe0000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000ef0000000003b;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000056;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000ffffff86;
++  *((unsigned long *)&__m128i_result[1]) = 0x00000000000eefff;
++  *((unsigned long *)&__m128i_result[0]) = 0xf8e1a03affffe3e2;
++  __m128i_out = __lsx_vmulwev_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c
+new file mode 100644
+index 000000000..6eea49a61
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-1.c
+@@ -0,0 +1,272 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000006;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0100010000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0100010000010000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000003ddc5dac;
++  *((unsigned long *)&__m128i_op1[1]) = 0x67157b5100005000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x387c7e0a133f2000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000004870ba0;
++  __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfefe000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff000000000155;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffff8001ffff8001;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7ff0000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x3ff0010000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x3ff0010000000000;
++  __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00009c7c00007176;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00009c7c00007176;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x440ef000440ef000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x4400000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3a8000003a800000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000ef0000000003b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0f8d33000f8d3300;
++  *((unsigned long *)&__m128i_result[0]) = 0x0003b80000000000;
++  __m128i_out = __lsx_vmulwod_w_h (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0018001800180018;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0018001800180018;
++  *((unsigned long *)&__m128i_op1[1]) = 0x85bd6b0e94d89998;
++  *((unsigned long *)&__m128i_op1[0]) = 0xd83c8081ffff808f;
++  *((unsigned long *)&__m128i_result[1]) = 0xfff489b693120950;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffc45a851c40c18;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffe5;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000001;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3e1f321529232736;
++  *((unsigned long *)&__m128i_op1[0]) = 0x161d0c373c200826;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x3f8000003f800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0001000000010001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0001000000010001;
++  *((unsigned long *)&__m128i_result[1]) = 0x00003f8000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00003f8000000000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff000000007fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffffffdfffdf;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffe000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6fde000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000fef01000f27ca;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000010000010101;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0101000001000100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000ffef0010000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000ffffffe0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000005452505;
++  *((unsigned long *)&__m128i_op1[0]) = 0x000000044525043c;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3fc03fc000000004;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffc03fc040;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000000a;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000000a;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00fe000100cf005f;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7f00;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000400028000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000004;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xc110000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xc00d060000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x3ff0000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x40f3fa0000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xf047ef0000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007fff7fff8000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffff100000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000f0000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c
+new file mode 100644
+index 000000000..f3e4e0390
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-2.c
+@@ -0,0 +1,282 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x004e005500060031;
++  *((unsigned long *)&__m128i_op1[0]) = 0xff870068fff5ffb3;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000200010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xfa31dfa21672e711;
++  *((unsigned long *)&__m128i_op0[0]) = 0x1304db85e468073a;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000150000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffeffff001effff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000ffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000fffff1a0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000000000000f00f;
++  __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000080;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  unsigned_int_out = __lsx_vpickve2gr_hu (__m128i_op0, 0x4);
++  *((unsigned long *)&__m128i_op0[1]) = 0xe2560afe9c001a18;
++  *((unsigned long *)&__m128i_op0[0]) = 0xe2560afe9c001a18;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000ff0000857a;
++  *((unsigned long *)&__m128i_op1[0]) = 0x05fafe0101fe000e;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000d82;
++  *((unsigned long *)&__m128i_result[0]) = 0x046a09ec009c0000;
++  __m128i_out = __lsx_vmulwod_h_bu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfe80ffffffffff02;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff00000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f3f018000000000;
++  __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf0fd800080000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000a00028004000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000005a00000228;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffff9ee000004ec;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffacdb6dbecac;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1f5533a694f902c0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x1f54e0ab00000000;
++  __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00e4880080000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0080810080808100;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xff011fb11181d8ea;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80ff800000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00fe00fe000200fe;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00fe00fe000200fe;
++  *((unsigned long *)&__m128i_result[1]) = 0x00fd02fe00002302;
++  *((unsigned long *)&__m128i_result[0]) = 0x007ffd0200000000;
++  __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfffe0001fffe0001;
++  __m128i_out = __lsx_vmulwod_w_hu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x14ccc6320076a4d2;
++  *((unsigned long *)&__m128i_op0[0]) = 0x685670d27e00682a;
++  *((unsigned long *)&__m128i_op1[1]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_op1[0]) = 0xfffffffffffffffe;
++  *((unsigned long *)&__m128i_result[1]) = 0x14ccc631eb3339ce;
++  *((unsigned long *)&__m128i_result[0]) = 0x685670d197a98f2e;
++  __m128i_out = __lsx_vmulwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xf000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003fffc0ffc0003f;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffc0ffc0003f003f;
++  *((unsigned long *)&__m128i_op1[1]) = 0x000000400000004c;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00007770ffff941d;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000ffff000;
++  *((unsigned long *)&__m128i_result[0]) = 0x000077529b522400;
++  __m128i_out = __lsx_vmulwod_d_wu (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1111113111111141;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1111113111111121;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000111111312;
++  *((unsigned long *)&__m128i_result[0]) = 0x2222272111111410;
++  __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000001c88bf0;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000001c88bf0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xfffffff800000003;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffff0015172b;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000800000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003fffffff800000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0001000100010001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0001000600000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000c6c6c6c6;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000c6c6c6c6;
++  *((unsigned long *)&__m128i_result[1]) = 0x000000000000c6c7;
++  *((unsigned long *)&__m128i_result[0]) = 0x8d8d8d8d8d8cc6c6;
++  __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0a0000000a000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0a0000000a000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7f7f00007f7f7500;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3b42017f3a7f7f01;
++  *((unsigned long *)&__m128i_result[1]) = 0x04faf60009f5f092;
++  *((unsigned long *)&__m128i_result[0]) = 0x04fafa9200000000;
++  __m128i_out = __lsx_vmulwod_q_du (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c
+new file mode 100644
+index 000000000..9f5702e2c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vmulwod-3.c
+@@ -0,0 +1,308 @@
++/* { dg-do run } */
++/* { dg-options "-mlsx -w -fno-strict-aliasing" } */
++#include "../simd_correctness_check.h"
++#include 
++
++int
++main ()
++{
++  __m128i __m128i_op0, __m128i_op1, __m128i_op2, __m128i_out, __m128i_result;
++  __m128 __m128_op0, __m128_op1, __m128_op2, __m128_out, __m128_result;
++  __m128d __m128d_op0, __m128d_op1, __m128d_op2, __m128d_out, __m128d_result;
++
++  int int_op0, int_op1, int_op2, int_out, int_result, i = 1, fail;
++  long int long_op0, long_op1, long_op2, lont_out, lont_result;
++  long int long_int_out, long_int_result;
++  unsigned int unsigned_int_out, unsigned_int_result;
++  unsigned long int unsigned_long_int_out, unsigned_long_int_result;
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xbf8000000000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xcf00000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000000000ffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000ff020000fff4;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7fc000007fc00000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1e801ffc7fc00000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00001ee100000000;
++  __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000007fff7fff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x3f5ec0a0feefa0b0;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x00000000ff02d060;
++  __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_h_bu_b (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x80000000fff8fff8;
++  *((unsigned long *)&__m128i_op0[0]) = 0x80000000fff80000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000010;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0010001000100010;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000001000100;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000001000100;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00000000004a294b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x00000000006d04bc;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x55aa55aa55aa55ab;
++  *((unsigned long *)&__m128i_op0[0]) = 0xaa55555655aaaaa8;
++  *((unsigned long *)&__m128i_op1[1]) = 0x7ef4002d21fc7001;
++  *((unsigned long *)&__m128i_op1[0]) = 0x28bf02d1ec6a35b2;
++  *((unsigned long *)&__m128i_result[1]) = 0x2a7b7c9260f90ee2;
++  *((unsigned long *)&__m128i_result[0]) = 0x1b1c6cdfd57f5736;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x003fffff00000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000004040504;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000004040504;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000010100000101;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000010100000101;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x7fff00007fff0000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_d_wu_w (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000ffff00000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000feff01;
++  *((unsigned long *)&__m128i_result[0]) = 0x00feff0100000000;
++  __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0101010202050120;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0101010102020202;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xf51cf8dad6040188;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0982e2daf234ed87;
++  *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_result[0]) = 0x0ae3072529fbfe78;
++  __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x00000000000007f8;
++  *((unsigned long *)&__m128i_op1[0]) = 0x00000000000007f8;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x00ff000000ff0000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000005;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  *((unsigned long *)&__m128i_op0[1]) = 0x030804010d090107;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op1[1]) = 0x1313131313131313;
++  *((unsigned long *)&__m128i_op1[0]) = 0x1313131313131313;
++  *((unsigned long *)&__m128i_result[1]) = 0x0039d21e3229d4e8;
++  *((unsigned long *)&__m128i_result[0]) = 0x6d339b4f3b439885;
++  __m128i_out = __lsx_vmulwod_q_du_d (__m128i_op0, __m128i_op1);
++  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
++
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-tests-of-mstrict-align-option.patch b/LoongArch-Add-tests-of-mstrict-align-option.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f35b146de43582e6c50d7da0195625a3a86ecd25
--- /dev/null
+++ b/LoongArch-Add-tests-of-mstrict-align-option.patch
@@ -0,0 +1,37 @@
+From f07b91862055533d779fbf76c12cb7c0ae75b53d Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 09:35:24 +0800
+Subject: [PATCH 076/124] LoongArch: Add tests of -mstrict-align option.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/strict-align.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/testsuite/gcc.target/loongarch/strict-align.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/strict-align.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/strict-align.c b/gcc/testsuite/gcc.target/loongarch/strict-align.c
+new file mode 100644
+index 000000000..040d84958
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/strict-align.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-Ofast -mstrict-align -mlasx" } */
++/* { dg-final { scan-assembler-not "vfadd.s" } } */
++
++void
++foo (float *restrict x, float *restrict y)
++{
++  x[0] = x[0] + y[0];
++  x[1] = x[1] + y[1];
++  x[2] = x[2] + y[2];
++  x[3] = x[3] + y[3];
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Add-testsuite-framework-for-Loongson-SX-AS.patch b/LoongArch-Add-testsuite-framework-for-Loongson-SX-AS.patch
new file mode 100644
index 0000000000000000000000000000000000000000..23e5f38a96c08b93ac5363cc030c57391fda7987
--- /dev/null
+++ b/LoongArch-Add-testsuite-framework-for-Loongson-SX-AS.patch
@@ -0,0 +1,131 @@
+From aebd03c944312be767f03d129eeebc0c4cdf5b4a Mon Sep 17 00:00:00 2001
+From: Xiaolong Chen 
+Date: Mon, 11 Sep 2023 09:36:35 +0800
+Subject: [PATCH 077/124] LoongArch: Add testsuite framework for Loongson
+ SX/ASX.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/vector/loongarch-vector.exp: New test.
+	* gcc.target/loongarch/vector/simd_correctness_check.h: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/loongarch-vector.exp     | 42 +++++++++++++++
+ .../loongarch/vector/simd_correctness_check.h | 54 +++++++++++++++++++
+ 2 files changed, 96 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp b/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp
+new file mode 100644
+index 000000000..2c37aa91d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/loongarch-vector.exp
+@@ -0,0 +1,42 @@
++#Copyright(C) 2023 Free Software Foundation, Inc.
++
++#This program is free software; you can redistribute it and / or modify
++#it under the terms of the GNU General Public License as published by
++#the Free Software Foundation; either version 3 of the License, or
++#(at your option) any later version.
++#
++#This program is distributed in the hope that it will be useful,
++#but WITHOUT ANY WARRANTY; without even the implied warranty of
++#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
++#GNU General Public License for more details.
++#
++#You should have received a copy of the GNU General Public License
++#along with GCC; see the file COPYING3.If not see
++# .
++
++#GCC testsuite that uses the `dg.exp' driver.
++
++#Exit immediately if this isn't a LoongArch target.
++if ![istarget loongarch*-*-*] then {
++    return
++}
++
++#Load support procs.
++load_lib gcc-dg.exp
++
++#If a testcase doesn't have special options, use these.
++global DEFAULT_CFLAGS
++if ![info exists DEFAULT_CFLAGS] then {
++    set DEFAULT_CFLAGS " "
++}
++
++#Initialize `dg'.
++dg-init
++
++#Main loop.
++dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/lsx/*.\[cS\]]] \
++	" -mlsx" $DEFAULT_CFLAGS
++dg-runtest [lsort [glob -nocomplain $srcdir/$subdir/lasx/*.\[cS\]]] \
++	" -mlasx" $DEFAULT_CFLAGS
++# All done.
++dg-finish
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h b/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h
+new file mode 100644
+index 000000000..eb7fbd59c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/vector/simd_correctness_check.h
+@@ -0,0 +1,54 @@
++#include 
++#include 
++#include 
++
++#define ASSERTEQ_64(line, ref, res)                                           \
++  do                                                                          \
++    {                                                                         \
++      int fail = 0;                                                           \
++      for (size_t i = 0; i < sizeof (res) / sizeof (res[0]); ++i)             \
++        {                                                                     \
++          long *temp_ref = &ref[i], *temp_res = &res[i];                      \
++          if (abs (*temp_ref - *temp_res) > 0)                                \
++            {                                                                 \
++              printf (" error: %s at line %ld , expected " #ref               \
++                      "[%ld]:0x%lx, got: 0x%lx\n",                            \
++                      __FILE__, line, i, *temp_ref, *temp_res);               \
++              fail = 1;                                                       \
++            }                                                                 \
++        }                                                                     \
++      if (fail == 1)                                                          \
++        abort ();                                                             \
++    }                                                                         \
++  while (0)
++
++#define ASSERTEQ_32(line, ref, res)                                           \
++  do                                                                          \
++    {                                                                         \
++      int fail = 0;                                                           \
++      for (size_t i = 0; i < sizeof (res) / sizeof (res[0]); ++i)             \
++        {                                                                     \
++          int *temp_ref = &ref[i], *temp_res = &res[i];                       \
++          if (abs (*temp_ref - *temp_res) > 0)                                \
++            {                                                                 \
++              printf (" error: %s at line %ld , expected " #ref               \
++                      "[%ld]:0x%x, got: 0x%x\n",                              \
++                      __FILE__, line, i, *temp_ref, *temp_res);               \
++              fail = 1;                                                       \
++            }                                                                 \
++        }                                                                     \
++      if (fail == 1)                                                          \
++        abort ();                                                             \
++    }                                                                         \
++  while (0)
++
++#define ASSERTEQ_int(line, ref, res)                                          \
++  do                                                                          \
++    {                                                                         \
++      if (ref != res)                                                         \
++        {                                                                     \
++          printf (" error: %s at line %ld , expected %d, got %d\n", __FILE__, \
++                  line, ref, res);                                            \
++        }                                                                     \
++    }                                                                         \
++  while (0)
+-- 
+2.33.0
+
diff --git a/LoongArch-Adjust-C-multilib-header-layout.patch b/LoongArch-Adjust-C-multilib-header-layout.patch
new file mode 100644
index 0000000000000000000000000000000000000000..8fc16d8c6e505784cf7b95ca7d5e9e10687bcea1
--- /dev/null
+++ b/LoongArch-Adjust-C-multilib-header-layout.patch
@@ -0,0 +1,53 @@
+From a4bf17e87a965ed7f2bb1d2921fb9dd820c79a96 Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Thu, 7 Sep 2023 14:50:10 +0800
+Subject: [PATCH 069/124] LoongArch: Adjust C++ multilib header layout.
+
+For LoongArch, the toplevel library build is always aliased to
+one of the multilib variants.  This patch installs it with the
+actual MULTISUBDIR (instead of ".") so that the headers can be
+reached by the compiler.
+
+This patch is an update of
+https://gcc.gnu.org/pipermail/gcc-patches/2023-September/629435.html
+
+libstdc++-v3/ChangeLog:
+
+	* configure.host: Register t-loongarch in tmake_file.
+	* config/cpu/loongarch/t-loongarch: New file.  Manually refresh
+	MULTISUBDIR with $(shell $(CXX) --print-multi-directory).
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ libstdc++-v3/config/cpu/loongarch/t-loongarch | 1 +
+ libstdc++-v3/configure.host                   | 5 ++++-
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+ create mode 100644 libstdc++-v3/config/cpu/loongarch/t-loongarch
+
+diff --git a/libstdc++-v3/config/cpu/loongarch/t-loongarch b/libstdc++-v3/config/cpu/loongarch/t-loongarch
+new file mode 100644
+index 000000000..adfc8ebb9
+--- /dev/null
++++ b/libstdc++-v3/config/cpu/loongarch/t-loongarch
+@@ -0,0 +1 @@
++AM_MAKEFLAGS += " MULTISUBDIR=/$(shell $(CXX) --print-multi-directory)"
+diff --git a/libstdc++-v3/configure.host b/libstdc++-v3/configure.host
+index ec32980aa..592160e6d 100644
+--- a/libstdc++-v3/configure.host
++++ b/libstdc++-v3/configure.host
+@@ -315,7 +315,10 @@ esac
+ # Set any OS-dependent and CPU-dependent bits.
+ # THIS TABLE IS SORTED.  KEEP IT THAT WAY.
+ case "${host}" in
+-  *-*-linux* | *-*-uclinux*)
++ loongarch*)
++    tmake_file="cpu/loongarch/t-loongarch"
++    ;;
++ *-*-linux* | *-*-uclinux*)
+     case "${host_cpu}" in
+       i[567]86)
+         abi_baseline_pair=i486-linux-gnu
+-- 
+2.33.0
+
diff --git a/LoongArch-Allow-attributes-in-non-gnu-namespaces.diff b/LoongArch-Allow-attributes-in-non-gnu-namespaces.diff
new file mode 100644
index 0000000000000000000000000000000000000000..ae7ac9be1055138d4c4d70b97e0283df2f5f715d
--- /dev/null
+++ b/LoongArch-Allow-attributes-in-non-gnu-namespaces.diff
@@ -0,0 +1,23 @@
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 6be0d80b3..12af95f70 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -7917,15 +7917,13 @@ loongarch_handle_model_attribute (tree *node, tree name, tree arg, int,
+   return NULL_TREE;
+ }
+
+-static const struct attribute_spec loongarch_attribute_table[] =
++TARGET_GNU_ATTRIBUTES (loongarch_attribute_table,
+ {
+   /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
+        affects_type_identity, handler, exclude } */
+   { "model", 1, 1, true, false, false, false,
+-    loongarch_handle_model_attribute, NULL },
+-  /* The last attribute spec is set to be NULL.  */
+-  {}
+-};
++    loongarch_handle_model_attribute, NULL }
++});
+
+ bool
+ loongarch_use_anchors_for_symbol_p (const_rtx symbol)
diff --git a/LoongArch-Avoid-RTL-flag-check-failure-in-loongarch_.patch b/LoongArch-Avoid-RTL-flag-check-failure-in-loongarch_.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d034f41c68d9b8cfe65f7ef95afb71c6b842ce0f
--- /dev/null
+++ b/LoongArch-Avoid-RTL-flag-check-failure-in-loongarch_.patch
@@ -0,0 +1,55 @@
+From e82403e918e18fa8e8ecd0c9e26f2657cc814e12 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 24 Aug 2022 21:31:34 +0800
+Subject: [PATCH 013/124] LoongArch: Avoid RTL flag check failure in
+ loongarch_classify_symbol
+
+SYMBOL_REF_TLS_MODEL invokes SYMBOL_REF_FLAGS, and SYMBOL_REF_FLAGS
+invokes RTL_FLAG_CHECK1 and aborts when RTL code is not SYMBOL_REF.
+
+r13-1833 removed "gcc_assert (SYMBOL_REF_P (x))" before invoking
+"SYMBOL_REF_TLS_MODEL (x)", indicating that it's now possible that "x"
+is not a SYMBOL_REF.  So we need to check if "x" is SYMBOL_REF first.
+
+This fixes a test failure happening with r13-2173 with RTL flag
+checking enabled:
+
+    pr106096.C:26:1: internal compiler error: RTL flag check:
+    SYMBOL_REF_FLAGS used with unexpected rtx code 'const' in
+    loongarch_classify_symbol
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_classify_symbol):
+	Return early if the rtx is not SYMBOL_REF.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 04c4ddaed..452aba9d4 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -1633,14 +1633,13 @@ loongarch_rtx_constant_in_small_data_p (machine_mode mode)
+ static enum loongarch_symbol_type
+ loongarch_classify_symbol (const_rtx x)
+ {
+-  if (LABEL_REF_P (x))
++  if (!SYMBOL_REF_P (x))
+     return SYMBOL_PCREL;
+ 
+   if (SYMBOL_REF_TLS_MODEL (x))
+     return SYMBOL_TLS;
+ 
+-  if (SYMBOL_REF_P (x)
+-      && !loongarch_symbol_binds_local_p (x))
++  if (!loongarch_symbol_binds_local_p (x))
+     return SYMBOL_GOT_DISP;
+ 
+   return SYMBOL_PCREL;
+-- 
+2.33.0
+
diff --git a/LoongArch-Avoid-non-returning-indirect-jumps-through.patch b/LoongArch-Avoid-non-returning-indirect-jumps-through.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3fa6556c964758a3b0ce09f0ae207dbcb521a741
--- /dev/null
+++ b/LoongArch-Avoid-non-returning-indirect-jumps-through.patch
@@ -0,0 +1,62 @@
+From 7e759740048ee6f24c1055c32868fa21cabb4f75 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Wed, 7 Jun 2023 10:21:58 +0800
+Subject: [PATCH 048/124] LoongArch: Avoid non-returning indirect jumps through
+ $ra [PR110136]
+
+Micro-architecture unconditionally treats a "jr $ra" as "return from subroutine",
+hence doing "jr $ra" would interfere with both subroutine return prediction and
+the more general indirect branch prediction.
+
+Therefore, a problem like PR110136 can cause a significant increase in branch error
+prediction rate and affect performance. The same problem exists with "indirect_jump".
+
+gcc/ChangeLog:
+
+	PR target/110136
+	* config/loongarch/loongarch.md: Modify the register constraints for template
+	"jumptable" and "indirect_jump" from "r" to "e".
+
+Co-authored-by: Andrew Pinski 
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index b23248c33..c79951c1d 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -2895,6 +2895,10 @@
+ }
+   [(set_attr "type" "branch")])
+ 
++;; Micro-architecture unconditionally treats a "jr $ra" as "return from subroutine",
++;; non-returning indirect jumps through $ra would interfere with both subroutine
++;; return prediction and the more general indirect branch prediction.
++
+ (define_expand "indirect_jump"
+   [(set (pc) (match_operand 0 "register_operand"))]
+   ""
+@@ -2905,7 +2909,7 @@
+ })
+ 
+ (define_insn "@indirect_jump"
+-  [(set (pc) (match_operand:P 0 "register_operand" "r"))]
++  [(set (pc) (match_operand:P 0 "register_operand" "e"))]
+   ""
+   "jr\t%0"
+   [(set_attr "type" "jump")
+@@ -2928,7 +2932,7 @@
+ 
+ (define_insn "@tablejump"
+   [(set (pc)
+-	(match_operand:P 0 "register_operand" "r"))
++	(match_operand:P 0 "register_operand" "e"))
+    (use (label_ref (match_operand 1 "" "")))]
+   ""
+   "jr\t%0"
+-- 
+2.33.0
+
diff --git a/LoongArch-Change-the-default-value-of-LARCH_CALL_RAT.patch b/LoongArch-Change-the-default-value-of-LARCH_CALL_RAT.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f18f741064c68082620c224da9595d7b1f736c3c
--- /dev/null
+++ b/LoongArch-Change-the-default-value-of-LARCH_CALL_RAT.patch
@@ -0,0 +1,41 @@
+From 59824f1062d77d0e02ea82d47415bf95c235de87 Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Thu, 15 Jun 2023 02:46:24 +0000
+Subject: [PATCH 046/124] LoongArch: Change the default value of
+ LARCH_CALL_RATIO to 6.
+
+During the regression testing of the LoongArch architecture GCC, it was found
+that the tests in the pr90883.C file failed. The problem was modulated and
+found that the error was caused by setting the macro LARCH_CALL_RATIO to a too
+large value. Combined with the actual LoongArch architecture, the different
+thresholds for meeting the test conditions were tested using the engineering method
+(SPEC CPU 2006), and the results showed that its optimal threshold should be set
+to 6.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.h (LARCH_CALL_RATIO): Modify the value
+	of macro LARCH_CALL_RATIO on LoongArch to make it perform optimally.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 44ebadfaa..0e35d4dec 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -1073,7 +1073,7 @@ typedef struct {
+ /* The base cost of a memcpy call, for MOVE_RATIO and friends.  These
+    values were determined experimentally by benchmarking with CSiBE.
+ */
+-#define LARCH_CALL_RATIO 8
++#define LARCH_CALL_RATIO 6
+ 
+ /* Any loop-based implementation of cpymemsi will have at least
+    LARCH_MAX_MOVE_OPS_PER_LOOP_ITER memory-to-memory
+-- 
+2.33.0
+
diff --git a/LoongArch-Change-the-value-of-branch_cost-from-2-to-.patch b/LoongArch-Change-the-value-of-branch_cost-from-2-to-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7b92012534ad57a905f10ffc49236411d5c279e4
--- /dev/null
+++ b/LoongArch-Change-the-value-of-branch_cost-from-2-to-.patch
@@ -0,0 +1,69 @@
+From 7e843ed8da168a05eb04eee0b14cbe681bf798fe Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Wed, 13 Sep 2023 11:01:34 +0800
+Subject: [PATCH 123/124] LoongArch: Change the value of branch_cost from 2 to
+ 6.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-def.c: Modify the default value of
+	branch_cost.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/cmov_ii.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch-def.c         |  4 ++--
+ gcc/testsuite/gcc.target/loongarch/cmov_ii.c | 15 +++++++++++++++
+ 2 files changed, 17 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/cmov_ii.c
+
+diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c
+index d29d5f001..eeb32dbf6 100644
+--- a/gcc/config/loongarch/loongarch-def.c
++++ b/gcc/config/loongarch/loongarch-def.c
+@@ -85,7 +85,7 @@ loongarch_cpu_align[N_TUNE_TYPES] = {
+     .int_mult_di	= COSTS_N_INSNS (1),	\
+     .int_div_si		= COSTS_N_INSNS (4),	\
+     .int_div_di		= COSTS_N_INSNS (6),	\
+-    .branch_cost	= 2,			\
++    .branch_cost	= 6,			\
+     .memory_latency	= 4
+ 
+ /* The following properties cannot be looked up directly using "cpucfg".
+@@ -118,7 +118,7 @@ loongarch_rtx_cost_optimize_size = {
+     .int_mult_di      = 4,
+     .int_div_si	      = 4,
+     .int_div_di	      = 4,
+-    .branch_cost      = 2,
++    .branch_cost      = 6,
+     .memory_latency   = 4,
+ };
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/cmov_ii.c b/gcc/testsuite/gcc.target/loongarch/cmov_ii.c
+new file mode 100644
+index 000000000..21b468e8a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/cmov_ii.c
+@@ -0,0 +1,15 @@
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++/* { dg-final { scan-assembler "test:.*xor.*maskeqz.*masknez.*or.*" } } */
++
++extern void foo_ii (int *, int *, int *, int *);
++
++int
++test (void)
++{
++  int a, b;
++  int c, d, out;
++  foo_ii (&a, &b, &c, &d);
++  out = a == b ? c : d;
++  return out;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Change-the-value-of-macro-TRY_EMPTY_VM_SPA.patch b/LoongArch-Change-the-value-of-macro-TRY_EMPTY_VM_SPA.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c8840b82d9b2daa640fe13d5d9caf79203cd4efc
--- /dev/null
+++ b/LoongArch-Change-the-value-of-macro-TRY_EMPTY_VM_SPA.patch
@@ -0,0 +1,49 @@
+From 6e9265e571a63deb2584704a0b088a6d67ec8af5 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Mon, 20 Feb 2023 16:47:11 +0800
+Subject: [PATCH 037/124] LoongArch: Change the value of macro
+ TRY_EMPTY_VM_SPACE from 0x8000000000 to 0x1000000000.
+
+The PCH mechanism first tries to map the .gch file to the virtual memory
+space pointed to by TRY_EMPTY_VM_SPACE during the compilation process.
+
+The original value of TRY_EMPTY_VM_SPACE macro is 0x8000000000,
+but like la464 only has 40 bits of virtual address space, this value
+just exceeds the address range.
+
+If we want to support chips with less than 40 bits virtual addresses,
+then the value of this macro needs to be set small. I think setting
+this value small will increase the probability of virtual address
+mapping failure. And the purpose of pch is to make compilation faster,
+but I think we rarely compile on embedded systems. So this situation
+may not be within our consideration.
+
+So change the value of this macro to 0x1000000000.
+
+gcc/ChangeLog:
+
+	* config/host-linux.cc (TRY_EMPTY_VM_SPACE): Modify the value of
+	the macro to 0x1000000000.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/host-linux.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/host-linux.cc b/gcc/config/host-linux.cc
+index 817d3c087..d93cfc064 100644
+--- a/gcc/config/host-linux.cc
++++ b/gcc/config/host-linux.cc
+@@ -99,7 +99,7 @@
+ #elif defined(__riscv) && defined (__LP64__)
+ # define TRY_EMPTY_VM_SPACE	0x1000000000
+ #elif defined(__loongarch__) && defined(__LP64__)
+-# define TRY_EMPTY_VM_SPACE	0x8000000000
++# define TRY_EMPTY_VM_SPACE	0x1000000000
+ #else
+ # define TRY_EMPTY_VM_SPACE	0
+ #endif
+-- 
+2.33.0
+
diff --git a/LoongArch-Define-the-macro-ASM_PREFERRED_EH_DATA_FOR.patch b/LoongArch-Define-the-macro-ASM_PREFERRED_EH_DATA_FOR.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f5b0e6135e2dee7a2ed137b6ada7c7445fac8d43
--- /dev/null
+++ b/LoongArch-Define-the-macro-ASM_PREFERRED_EH_DATA_FOR.patch
@@ -0,0 +1,139 @@
+From 05c1df09c70cd0ed48f0644890f69a0128b17a98 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Fri, 29 Jul 2022 09:44:52 +0800
+Subject: [PATCH 008/124] LoongArch: Define the macro
+ ASM_PREFERRED_EH_DATA_FORMAT by checking the assembler's support for eh_frame
+ encoding.
+
+.eh_frame DW_EH_PE_pcrel encoding format is not supported by gas <= 2.39.
+Check if the assembler support DW_EH_PE_PCREL encoding and define .eh_frame
+encoding type.
+
+gcc/ChangeLog:
+
+	* config.in: Regenerate.
+	* config/loongarch/loongarch.h (ASM_PREFERRED_EH_DATA_FORMAT):
+	Select the value of the macro definition according to whether
+	HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT is defined.
+	* configure: Regenerate.
+	* configure.ac: Reinstate HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config.in                    |  8 +++++++-
+ gcc/config/loongarch/loongarch.h |  5 +++++
+ gcc/configure                    | 34 ++++++++++++++++++++++++++++++++
+ gcc/configure.ac                 |  8 ++++++++
+ 4 files changed, 54 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/config.in b/gcc/config.in
+index 64c27c9cf..67ce422f2 100644
+--- a/gcc/config.in
++++ b/gcc/config.in
+@@ -404,13 +404,19 @@
+ #endif
+ 
+ 
++/* Define if your assembler supports eh_frame pcrel encoding. */
++#ifndef USED_FOR_TARGET
++#undef HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT
++#endif
++
++
+ /* Define if your assembler supports the R_PPC64_ENTRY relocation. */
+ #ifndef USED_FOR_TARGET
+ #undef HAVE_AS_ENTRY_MARKERS
+ #endif
+ 
+ 
+-/* Define if your assembler supports explicit relocations. */
++/* Define if your assembler supports explicit relocation. */
+ #ifndef USED_FOR_TARGET
+ #undef HAVE_AS_EXPLICIT_RELOCS
+ #endif
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 12f209047..a52a81adf 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -1130,8 +1130,13 @@ struct GTY (()) machine_function
+ };
+ #endif
+ 
++#ifdef HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT
++#define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
++  (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_pcrel | DW_EH_PE_sdata4)
++#else
+ #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
+   (((GLOBAL) ? DW_EH_PE_indirect : 0) | DW_EH_PE_absptr)
++#endif
+ 
+ /* Do emit .note.GNU-stack by default.  */
+ #ifndef NEED_INDICATE_EXEC_STACK
+diff --git a/gcc/configure b/gcc/configure
+index 840eddc7c..3788e240a 100755
+--- a/gcc/configure
++++ b/gcc/configure
+@@ -28857,6 +28857,40 @@ if test $gcc_cv_as_loongarch_explicit_relocs = yes; then
+ 
+ $as_echo "#define HAVE_AS_EXPLICIT_RELOCS 1" >>confdefs.h
+ 
++fi
++
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for eh_frame pcrel encoding support" >&5
++$as_echo_n "checking assembler for eh_frame pcrel encoding support... " >&6; }
++if ${gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support=no
++  if test x$gcc_cv_as != x; then
++    $as_echo '.cfi_startproc
++       .cfi_personality 0x9b,a
++       .cfi_lsda 0x1b,b
++       .cfi_endproc' > conftest.s
++    if { ac_try='$gcc_cv_as $gcc_cv_as_flags  -o conftest.o conftest.s >&5'
++  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
++  (eval $ac_try) 2>&5
++  ac_status=$?
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; }
++    then
++	gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support=yes
++    else
++      echo "configure: failed program was" >&5
++      cat conftest.s >&5
++    fi
++    rm -f conftest.o conftest.s
++  fi
++fi
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support" >&5
++$as_echo "$gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support" >&6; }
++if test $gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support = yes; then
++
++$as_echo "#define HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT 1" >>confdefs.h
++
+ fi
+ 
+     ;;
+diff --git a/gcc/configure.ac b/gcc/configure.ac
+index 975c852c6..1c376e0d4 100644
+--- a/gcc/configure.ac
++++ b/gcc/configure.ac
+@@ -5324,6 +5324,14 @@ x:
+       [a:pcalau12i $t0,%pc_hi20(a)],,
+       [AC_DEFINE(HAVE_AS_EXPLICIT_RELOCS, 1,
+ 	  [Define if your assembler supports explicit relocation.])])
++    gcc_GAS_CHECK_FEATURE([eh_frame pcrel encoding support],
++      gcc_cv_as_loongarch_eh_frame_pcrel_encoding_support,,
++      [.cfi_startproc
++       .cfi_personality 0x9b,a
++       .cfi_lsda 0x1b,b
++       .cfi_endproc],,
++      [AC_DEFINE(HAVE_AS_EH_FRAME_PCREL_ENCODING_SUPPORT, 1,
++	  [Define if your assembler supports eh_frame pcrel encoding.])])
+     ;;
+     s390*-*-*)
+     gcc_GAS_CHECK_FEATURE([.gnu_attribute support],
+-- 
+2.33.0
+
diff --git a/LoongArch-Don-t-add-crtfastmath.o-for-shared.patch b/LoongArch-Don-t-add-crtfastmath.o-for-shared.patch
new file mode 100644
index 0000000000000000000000000000000000000000..88250406e851e01279d00031d38cd7706dcc469a
--- /dev/null
+++ b/LoongArch-Don-t-add-crtfastmath.o-for-shared.patch
@@ -0,0 +1,34 @@
+From 2e19311d1bf4f932f5e67f6866123b895b12c97f Mon Sep 17 00:00:00 2001
+From: Richard Biener 
+Date: Fri, 13 Jan 2023 09:01:12 +0100
+Subject: [PATCH 035/124] LoongArch: Don't add crtfastmath.o for -shared
+
+Don't add crtfastmath.o for -shared to avoid altering the FP
+environment when loading a shared library.
+
+	PR target/55522
+	* config/loongarch/gnu-user.h (GNU_USER_TARGET_MATHFILE_SPEC):
+	Don't add crtfastmath.o for -shared.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/gnu-user.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h
+index c5b1afe53..1dc6add62 100644
+--- a/gcc/config/loongarch/gnu-user.h
++++ b/gcc/config/loongarch/gnu-user.h
+@@ -49,7 +49,7 @@ along with GCC; see the file COPYING3.  If not see
+ /* Similar to standard Linux, but adding -ffast-math support.  */
+ #undef GNU_USER_TARGET_MATHFILE_SPEC
+ #define GNU_USER_TARGET_MATHFILE_SPEC \
+-  "%{Ofast|ffast-math|funsafe-math-optimizations:crtfastmath.o%s}"
++  "%{Ofast|ffast-math|funsafe-math-optimizations:%{!shared:crtfastmath.o%s}}"
+ 
+ #undef LIB_SPEC
+ #define LIB_SPEC GNU_USER_TARGET_LIB_SPEC
+-- 
+2.33.0
+
diff --git a/LoongArch-Enable-free-starting-at-O2.patch b/LoongArch-Enable-free-starting-at-O2.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7e6bbfb7d69f2c44bf6d1b919a57f3e4d1c00ffb
--- /dev/null
+++ b/LoongArch-Enable-free-starting-at-O2.patch
@@ -0,0 +1,71 @@
+From 0369836718ffb25ac64c135e748f409302068a56 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Mon, 28 Aug 2023 11:30:21 +0800
+Subject: [PATCH 052/124] LoongArch: Enable '-free' starting at -O2.
+
+gcc/ChangeLog:
+
+	* common/config/loongarch/loongarch-common.cc:
+	Enable '-free' on O2 and above.
+	* doc/invoke.texi: Modify the description information
+	of the '-free' compilation option and add the LoongArch
+	description.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/sign-extend.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../config/loongarch/loongarch-common.cc      |  1 +
+ .../gcc.target/loongarch/sign-extend.c        | 25 +++++++++++++++++++
+ 2 files changed, 26 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/sign-extend.c
+
+diff --git a/gcc/common/config/loongarch/loongarch-common.cc b/gcc/common/config/loongarch/loongarch-common.cc
+index f8b4660fa..309fcb280 100644
+--- a/gcc/common/config/loongarch/loongarch-common.cc
++++ b/gcc/common/config/loongarch/loongarch-common.cc
+@@ -35,6 +35,7 @@ static const struct default_options loongarch_option_optimization_table[] =
+ {
+   { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 },
+   { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
++  { OPT_LEVELS_2_PLUS, OPT_free, NULL, 1 },
+   { OPT_LEVELS_NONE, 0, NULL, 0 }
+ };
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/sign-extend.c b/gcc/testsuite/gcc.target/loongarch/sign-extend.c
+new file mode 100644
+index 000000000..3f339d06b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/sign-extend.c
+@@ -0,0 +1,25 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O2" } */
++/* { dg-final { scan-assembler-times "slli.w" 1 } } */
++
++extern int PL_savestack_ix;
++extern int PL_regsize;
++extern int PL_savestack_max;
++void Perl_savestack_grow_cnt (int need);
++extern void Perl_croak (char *);
++
++int
++S_regcppush(int parenfloor)
++{
++  int retval = PL_savestack_ix;
++  int paren_elems_to_push = (PL_regsize - parenfloor) * 4;
++  int p;
++
++  if (paren_elems_to_push < 0)
++    Perl_croak ("panic: paren_elems_to_push < 0");
++
++  if (PL_savestack_ix + (paren_elems_to_push + 6) > PL_savestack_max)
++    Perl_savestack_grow_cnt (paren_elems_to_push + 6);
++
++  return retval;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Enable-fsched-pressure-by-default-at-O1-an.patch b/LoongArch-Enable-fsched-pressure-by-default-at-O1-an.patch
new file mode 100644
index 0000000000000000000000000000000000000000..52641c5d12d5b675149903980745d9a2b7df8fb5
--- /dev/null
+++ b/LoongArch-Enable-fsched-pressure-by-default-at-O1-an.patch
@@ -0,0 +1,33 @@
+From a9f72e237d5c176e4ef8ba03a8b4ee5c5daa25fb Mon Sep 17 00:00:00 2001
+From: Guo Jie 
+Date: Fri, 8 Sep 2023 10:00:21 +0800
+Subject: [PATCH 071/124] LoongArch: Enable -fsched-pressure by default at -O1
+ and higher.
+
+gcc/ChangeLog:
+
+	* common/config/loongarch/loongarch-common.cc:
+	(default_options loongarch_option_optimization_table):
+	Default to -fsched-pressure.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/common/config/loongarch/loongarch-common.cc | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/gcc/common/config/loongarch/loongarch-common.cc b/gcc/common/config/loongarch/loongarch-common.cc
+index 309fcb280..c8bc5718d 100644
+--- a/gcc/common/config/loongarch/loongarch-common.cc
++++ b/gcc/common/config/loongarch/loongarch-common.cc
+@@ -36,6 +36,7 @@ static const struct default_options loongarch_option_optimization_table[] =
+   { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 },
+   { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
+   { OPT_LEVELS_2_PLUS, OPT_free, NULL, 1 },
++  { OPT_LEVELS_1_PLUS, OPT_fsched_pressure, NULL, 1 },
+   { OPT_LEVELS_NONE, 0, NULL, 0 }
+ };
+ 
+-- 
+2.33.0
+
diff --git a/LoongArch-Enable-shrink-wrapping.patch b/LoongArch-Enable-shrink-wrapping.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7d8302559d8671b7f96b1d2d704722685440ca4f
--- /dev/null
+++ b/LoongArch-Enable-shrink-wrapping.patch
@@ -0,0 +1,309 @@
+From e86c9ece7ae922fe80017ba2ffe22f6267531682 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 23 Apr 2023 20:52:22 +0800
+Subject: [PATCH 045/124] LoongArch: Enable shrink wrapping
+
+This commit implements the target macros for shrink wrapping of function
+prologues/epilogues shrink wrapping on LoongArch.
+
+Bootstrapped and regtested on loongarch64-linux-gnu.  I don't have an
+access to SPEC CPU so I hope the reviewer can perform a benchmark to see
+if there is real benefit.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.h (struct machine_function): Add
+	reg_is_wrapped_separately array for register wrapping
+	information.
+	* config/loongarch/loongarch.cc
+	(loongarch_get_separate_components): New function.
+	(loongarch_components_for_bb): Likewise.
+	(loongarch_disqualify_components): Likewise.
+	(loongarch_process_components): Likewise.
+	(loongarch_emit_prologue_components): Likewise.
+	(loongarch_emit_epilogue_components): Likewise.
+	(loongarch_set_handled_components): Likewise.
+	(TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS): Define.
+	(TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB): Likewise.
+	(TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS): Likewise.
+	(TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS): Likewise.
+	(TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS): Likewise.
+	(TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS): Likewise.
+	(loongarch_for_each_saved_reg): Skip registers that are wrapped
+	separately.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/shrink-wrap.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc             | 179 +++++++++++++++++-
+ gcc/config/loongarch/loongarch.h              |   2 +
+ .../gcc.target/loongarch/shrink-wrap.c        |  19 ++
+ 3 files changed, 197 insertions(+), 3 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/shrink-wrap.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index d3c6f22ad..4c0f393b6 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -64,6 +64,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "builtins.h"
+ #include "rtl-iter.h"
+ #include "opts.h"
++#include "function-abi.h"
+ 
+ /* This file should be included last.  */
+ #include "target-def.h"
+@@ -1014,19 +1015,23 @@ loongarch_for_each_saved_reg (HOST_WIDE_INT sp_offset,
+   for (int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
+     if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
+       {
+-	loongarch_save_restore_reg (word_mode, regno, offset, fn);
++	if (!cfun->machine->reg_is_wrapped_separately[regno])
++	  loongarch_save_restore_reg (word_mode, regno, offset, fn);
++
+ 	offset -= UNITS_PER_WORD;
+       }
+ 
+   /* This loop must iterate over the same space as its companion in
+      loongarch_compute_frame_info.  */
+   offset = cfun->machine->frame.fp_sp_offset - sp_offset;
++  machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode;
++
+   for (int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
+     if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
+       {
+-	machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode;
++	if (!cfun->machine->reg_is_wrapped_separately[regno])
++	  loongarch_save_restore_reg (word_mode, regno, offset, fn);
+ 
+-	loongarch_save_restore_reg (mode, regno, offset, fn);
+ 	offset -= GET_MODE_SIZE (mode);
+       }
+ }
+@@ -6630,6 +6635,151 @@ loongarch_asan_shadow_offset (void)
+   return TARGET_64BIT ? (HOST_WIDE_INT_1 << 46) : 0;
+ }
+ 
++static sbitmap
++loongarch_get_separate_components (void)
++{
++  HOST_WIDE_INT offset;
++  sbitmap components = sbitmap_alloc (FIRST_PSEUDO_REGISTER);
++  bitmap_clear (components);
++  offset = cfun->machine->frame.gp_sp_offset;
++
++  /* The stack should be aligned to 16-bytes boundary, so we can make the use
++     of ldptr instructions.  */
++  gcc_assert (offset % UNITS_PER_WORD == 0);
++
++  for (unsigned int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
++    if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
++      {
++	/* We can wrap general registers saved at [sp, sp + 32768) using the
++	   ldptr/stptr instructions.  For large offsets a pseudo register
++	   might be needed which cannot be created during the shrink
++	   wrapping pass.
++
++	   TODO: This may need a revise when we add LA32 as ldptr.w is not
++	   guaranteed available by the manual.  */
++	if (offset < 32768)
++	  bitmap_set_bit (components, regno);
++
++	offset -= UNITS_PER_WORD;
++      }
++
++  offset = cfun->machine->frame.fp_sp_offset;
++  for (unsigned int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++    if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
++      {
++	/* We can only wrap FP registers with imm12 offsets.  For large
++	   offsets a pseudo register might be needed which cannot be
++	   created during the shrink wrapping pass.  */
++	if (IMM12_OPERAND (offset))
++	  bitmap_set_bit (components, regno);
++
++	offset -= UNITS_PER_FPREG;
++      }
++
++  /* Don't mess with the hard frame pointer.  */
++  if (frame_pointer_needed)
++    bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
++
++  bitmap_clear_bit (components, RETURN_ADDR_REGNUM);
++
++  return components;
++}
++
++static sbitmap
++loongarch_components_for_bb (basic_block bb)
++{
++  /* Registers are used in a bb if they are in the IN, GEN, or KILL sets.  */
++  auto_bitmap used;
++  bitmap_copy (used, DF_LIVE_IN (bb));
++  bitmap_ior_into (used, &DF_LIVE_BB_INFO (bb)->gen);
++  bitmap_ior_into (used, &DF_LIVE_BB_INFO (bb)->kill);
++
++  sbitmap components = sbitmap_alloc (FIRST_PSEUDO_REGISTER);
++  bitmap_clear (components);
++
++  function_abi_aggregator callee_abis;
++  rtx_insn *insn;
++  FOR_BB_INSNS (bb, insn)
++    if (CALL_P (insn))
++      callee_abis.note_callee_abi (insn_callee_abi (insn));
++
++  HARD_REG_SET extra_caller_saves =
++    callee_abis.caller_save_regs (*crtl->abi);
++
++  for (unsigned int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
++    if (!fixed_regs[regno]
++	&& !crtl->abi->clobbers_full_reg_p (regno)
++	&& (TEST_HARD_REG_BIT (extra_caller_saves, regno) ||
++	    bitmap_bit_p (used, regno)))
++      bitmap_set_bit (components, regno);
++
++  for (unsigned int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++    if (!fixed_regs[regno]
++	&& !crtl->abi->clobbers_full_reg_p (regno)
++	&& (TEST_HARD_REG_BIT (extra_caller_saves, regno) ||
++	    bitmap_bit_p (used, regno)))
++      bitmap_set_bit (components, regno);
++
++  return components;
++}
++
++static void
++loongarch_disqualify_components (sbitmap, edge, sbitmap, bool)
++{
++  /* Do nothing.  */
++}
++
++static void
++loongarch_process_components (sbitmap components, loongarch_save_restore_fn fn)
++{
++  HOST_WIDE_INT offset = cfun->machine->frame.gp_sp_offset;
++
++  for (unsigned int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
++    if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
++      {
++	if (bitmap_bit_p (components, regno))
++	  loongarch_save_restore_reg (word_mode, regno, offset, fn);
++
++	offset -= UNITS_PER_WORD;
++      }
++
++  offset = cfun->machine->frame.fp_sp_offset;
++  machine_mode mode = TARGET_DOUBLE_FLOAT ? DFmode : SFmode;
++
++  for (unsigned int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++    if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
++      {
++	if (bitmap_bit_p (components, regno))
++	  loongarch_save_restore_reg (mode, regno, offset, fn);
++
++	offset -= UNITS_PER_FPREG;
++      }
++}
++
++static void
++loongarch_emit_prologue_components (sbitmap components)
++{
++  loongarch_process_components (components, loongarch_save_reg);
++}
++
++static void
++loongarch_emit_epilogue_components (sbitmap components)
++{
++  loongarch_process_components (components, loongarch_restore_reg);
++}
++
++static void
++loongarch_set_handled_components (sbitmap components)
++{
++    for (unsigned int regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
++      if (bitmap_bit_p (components, regno))
++	cfun->machine->reg_is_wrapped_separately[regno] = true;
++
++    for (unsigned int regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
++      if (bitmap_bit_p (components, regno))
++	cfun->machine->reg_is_wrapped_separately[regno] = true;
++}
++
+ /* Initialize the GCC target structure.  */
+ #undef TARGET_ASM_ALIGNED_HI_OP
+ #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
+@@ -6827,6 +6977,29 @@ loongarch_asan_shadow_offset (void)
+ #undef TARGET_ASAN_SHADOW_OFFSET
+ #define TARGET_ASAN_SHADOW_OFFSET loongarch_asan_shadow_offset
+ 
++#undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
++#define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS \
++  loongarch_get_separate_components
++
++#undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
++#define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB loongarch_components_for_bb
++
++#undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
++#define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS \
++  loongarch_disqualify_components
++
++#undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
++#define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS \
++  loongarch_emit_prologue_components
++
++#undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
++#define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS \
++  loongarch_emit_epilogue_components
++
++#undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
++#define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS \
++  loongarch_set_handled_components
++
+ struct gcc_target targetm = TARGET_INITIALIZER;
+ 
+ #include "gt-loongarch.h"
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index af24bfa01..44ebadfaa 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -1147,6 +1147,8 @@ struct GTY (()) machine_function
+   /* The current frame information, calculated by loongarch_compute_frame_info.
+    */
+   struct loongarch_frame_info frame;
++
++  bool reg_is_wrapped_separately[FIRST_PSEUDO_REGISTER];
+ };
+ #endif
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/shrink-wrap.c b/gcc/testsuite/gcc.target/loongarch/shrink-wrap.c
+new file mode 100644
+index 000000000..1431536c5
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/shrink-wrap.c
+@@ -0,0 +1,19 @@
++/* { dg-do compile } */
++/* { dg-options "-O -fshrink-wrap" } */
++
++/* We should not save anything before checking the value of x.  */
++/* { dg-final { scan-assembler-not "st(ptr)?\\\.\[dw\].*b(eq|ne)z" } } */
++
++int
++foo (int x)
++{
++  __asm__ ("nop" :);
++  if (x)
++    {
++      __asm__ ("" ::: "s0", "s1");
++      return x;
++    }
++
++  __asm__ ("" ::: "s2", "s3");
++  return 0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Fix-MUSL_DYNAMIC_LINKER.patch b/LoongArch-Fix-MUSL_DYNAMIC_LINKER.patch
new file mode 100644
index 0000000000000000000000000000000000000000..53d5dfeabf1a806dd18f105b9742fb01b49e45ad
--- /dev/null
+++ b/LoongArch-Fix-MUSL_DYNAMIC_LINKER.patch
@@ -0,0 +1,43 @@
+From 3db61acfbaa773568fad2bc31d950c6d9b3729b0 Mon Sep 17 00:00:00 2001
+From: Peng Fan 
+Date: Wed, 19 Apr 2023 16:23:42 +0800
+Subject: [PATCH 044/124] LoongArch: Fix MUSL_DYNAMIC_LINKER
+
+The system based on musl has no '/lib64', so change it.
+
+https://wiki.musl-libc.org/guidelines-for-distributions.html,
+"Multilib/multi-arch" section of this introduces it.
+
+gcc/
+	* config/loongarch/gnu-user.h (MUSL_DYNAMIC_LINKER): Redefine.
+
+Signed-off-by: Peng Fan 
+Suggested-by: Xi Ruoyao 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/gnu-user.h | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h
+index 1dc6add62..44e4f2575 100644
+--- a/gcc/config/loongarch/gnu-user.h
++++ b/gcc/config/loongarch/gnu-user.h
+@@ -33,9 +33,14 @@ along with GCC; see the file COPYING3.  If not see
+ #define GLIBC_DYNAMIC_LINKER \
+   "/lib" ABI_GRLEN_SPEC "/ld-linux-loongarch-" ABI_SPEC ".so.1"
+ 
++#define MUSL_ABI_SPEC \
++  "%{mabi=lp64d:-lp64d}" \
++  "%{mabi=lp64f:-lp64f}" \
++  "%{mabi=lp64s:-lp64s}"
++
+ #undef MUSL_DYNAMIC_LINKER
+ #define MUSL_DYNAMIC_LINKER \
+-  "/lib" ABI_GRLEN_SPEC "/ld-musl-loongarch-" ABI_SPEC ".so.1"
++  "/lib/ld-musl-loongarch" ABI_GRLEN_SPEC MUSL_ABI_SPEC ".so.1"
+ 
+ #undef GNU_USER_TARGET_LINK_SPEC
+ #define GNU_USER_TARGET_LINK_SPEC \
+-- 
+2.33.0
+
diff --git a/LoongArch-Fix-bug-in-loongarch_emit_stack_tie-PR1104.patch b/LoongArch-Fix-bug-in-loongarch_emit_stack_tie-PR1104.patch
new file mode 100644
index 0000000000000000000000000000000000000000..23f46aec0fd6304c89dca9a6dc5fcaefd104273c
--- /dev/null
+++ b/LoongArch-Fix-bug-in-loongarch_emit_stack_tie-PR1104.patch
@@ -0,0 +1,43 @@
+From 7c8fc6b414dc1718e71e0d05c7a78498e06eb499 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 29 Jun 2023 19:30:59 +0800
+Subject: [PATCH 053/124] LoongArch: Fix bug in loongarch_emit_stack_tie
+ [PR110484].
+
+Which may result in implicit references to $fp when frame_pointer_needed is false,
+causing regs_ever_live[$fp] to be true when $fp is not explicitly used,
+resulting in $fp being used as the target replacement register in the rnreg pass.
+
+The bug originates from SPEC2017 541.leela_r(-flto).
+
+gcc/ChangeLog:
+
+	PR target/110484
+	* config/loongarch/loongarch.cc (loongarch_emit_stack_tie): Use the
+	frame_pointer_needed to determine whether to use the $fp register.
+
+Co-authored-by: Guo Jie 
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index caacfa8a3..7b48e3216 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -1109,7 +1109,9 @@ loongarch_first_stack_step (struct loongarch_frame_info *frame)
+ static void
+ loongarch_emit_stack_tie (void)
+ {
+-  emit_insn (gen_stack_tie (Pmode, stack_pointer_rtx, hard_frame_pointer_rtx));
++  emit_insn (gen_stack_tie (Pmode, stack_pointer_rtx,
++			    frame_pointer_needed ? hard_frame_pointer_rtx
++			    : stack_pointer_rtx));
+ }
+ 
+ #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
+-- 
+2.33.0
+
diff --git a/LoongArch-Fix-bug-of-optab-di3_fake.patch b/LoongArch-Fix-bug-of-optab-di3_fake.patch
new file mode 100644
index 0000000000000000000000000000000000000000..df1874aec9377b934a031500be86e2e9e4714e93
--- /dev/null
+++ b/LoongArch-Fix-bug-of-optab-di3_fake.patch
@@ -0,0 +1,123 @@
+From df1df2e7b7e27bd9fba77f572d74d833aff4a202 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Mon, 11 Sep 2023 16:20:29 +0800
+Subject: [PATCH 122/124] LoongArch: Fix bug of 'di3_fake'.
+
+	PR target/111334
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md: Fix bug of 'di3_fake'.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/pr111334.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md             | 20 ++++++----
+ gcc/testsuite/gcc.target/loongarch/pr111334.c | 39 +++++++++++++++++++
+ 2 files changed, 52 insertions(+), 7 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/pr111334.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 264cd325c..7746116e6 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -72,6 +72,9 @@
+   UNSPEC_LUI_H_HI12
+   UNSPEC_TLS_LOW
+ 
++  ;; Fake div.w[u] mod.w[u]
++  UNSPEC_FAKE_ANY_DIV
++
+   UNSPEC_SIBCALL_VALUE_MULTIPLE_INTERNAL_1
+   UNSPEC_CALL_VALUE_MULTIPLE_INTERNAL_1
+ ])
+@@ -900,7 +903,7 @@
+ 		     (match_operand:GPR 2 "register_operand")))]
+   ""
+ {
+- if (GET_MODE (operands[0]) == SImode)
++ if (GET_MODE (operands[0]) == SImode && TARGET_64BIT)
+   {
+     rtx reg1 = gen_reg_rtx (DImode);
+     rtx reg2 = gen_reg_rtx (DImode);
+@@ -920,9 +923,9 @@
+ })
+ 
+ (define_insn "*3"
+-  [(set (match_operand:GPR 0 "register_operand" "=r,&r,&r")
+-	(any_div:GPR (match_operand:GPR 1 "register_operand" "r,r,0")
+-		     (match_operand:GPR 2 "register_operand" "r,r,r")))]
++  [(set (match_operand:X 0 "register_operand" "=r,&r,&r")
++	(any_div:X (match_operand:X 1 "register_operand" "r,r,0")
++		   (match_operand:X 2 "register_operand" "r,r,r")))]
+   ""
+ {
+   return loongarch_output_division (".\t%0,%1,%2", operands);
+@@ -938,9 +941,12 @@
+ (define_insn "di3_fake"
+   [(set (match_operand:DI 0 "register_operand" "=r,&r,&r")
+ 	(sign_extend:DI
+-	  (any_div:SI (match_operand:DI 1 "register_operand" "r,r,0")
+-		      (match_operand:DI 2 "register_operand" "r,r,r"))))]
+-  ""
++	  (unspec:SI
++	   [(subreg:SI
++	     (any_div:DI (match_operand:DI 1 "register_operand" "r,r,0")
++			 (match_operand:DI 2 "register_operand" "r,r,r")) 0)]
++	  UNSPEC_FAKE_ANY_DIV)))]
++  "TARGET_64BIT"
+ {
+   return loongarch_output_division (".w\t%0,%1,%2", operands);
+ }
+diff --git a/gcc/testsuite/gcc.target/loongarch/pr111334.c b/gcc/testsuite/gcc.target/loongarch/pr111334.c
+new file mode 100644
+index 000000000..47366afcb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/pr111334.c
+@@ -0,0 +1,39 @@
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++
++unsigned
++util_next_power_of_two (unsigned x)
++{
++  return (1 << __builtin_clz (x - 1));
++}
++
++extern int create_vec_from_array (void);
++
++struct ac_shader_args {
++    struct {
++	unsigned char offset;
++	unsigned char size;
++    } args[384];
++};
++
++struct isel_context {
++    const struct ac_shader_args* args;
++    int arg_temps[384];
++};
++
++
++void
++add_startpgm (struct isel_context* ctx, unsigned short arg_count)
++{
++
++  for (unsigned i = 0, arg = 0; i < arg_count; i++)
++    {
++      unsigned size = ctx->args->args[i].size;
++      unsigned reg = ctx->args->args[i].offset;
++
++      if (reg % ( 4 < util_next_power_of_two (size)
++		 ? 4 : util_next_power_of_two (size)))
++	  ctx->arg_temps[i] = create_vec_from_array ();
++    }
++}
++
+-- 
+2.33.0
+
diff --git a/LoongArch-Fix-internal-error-running-gcc-march-nativ.patch b/LoongArch-Fix-internal-error-running-gcc-march-nativ.patch
new file mode 100644
index 0000000000000000000000000000000000000000..84c3b91b288708aec9012f94ad1767e59d265e14
--- /dev/null
+++ b/LoongArch-Fix-internal-error-running-gcc-march-nativ.patch
@@ -0,0 +1,106 @@
+From 56752a6bbfb3d3501d0899b23020c3e2eb58882c Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 17 Nov 2023 20:44:17 +0800
+Subject: [PATCH] LoongArch: Fix internal error running "gcc -march=native" on
+ LA664
+
+On LA664, the PRID preset is ISA_BASE_LA64V110 but the base architecture
+is guessed ISA_BASE_LA64V100.  This causes a warning to be outputed:
+
+    cc1: warning: base architecture 'la64' differs from PRID preset '?'
+
+But we've not set the "?" above in loongarch_isa_base_strings, thus it's
+a nullptr and then an ICE is triggered.
+
+Add ISA_BASE_LA64V110 to genopts and initialize
+loongarch_isa_base_strings[ISA_BASE_LA64V110] correctly to fix the ICE.
+The warning itself will be fixed later.
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/loongarch-strings:
+	(STR_ISA_BASE_LA64V110): Add.
+	* config/loongarch/genopts/loongarch.opt.in:
+	(ISA_BASE_LA64V110): Add.
+	* config/loongarch/loongarch-def.c
+	(loongarch_isa_base_strings): Initialize [ISA_BASE_LA64V110]
+	to STR_ISA_BASE_LA64V110.
+	* config/loongarch/loongarch.opt: Regenerate.
+	* config/loongarch/loongarch-str.h: Regenerate.
+
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/genopts/loongarch-strings | 1 +
+ gcc/config/loongarch/genopts/loongarch.opt.in  | 3 +++
+ gcc/config/loongarch/loongarch-def.c           | 1 +
+ gcc/config/loongarch/loongarch-str.h           | 1 +
+ gcc/config/loongarch/loongarch.opt             | 3 +++
+ 5 files changed, 9 insertions(+)
+
+diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings
+index 7bc4824007e..b2070c83ed0 100644
+--- a/gcc/config/loongarch/genopts/loongarch-strings
++++ b/gcc/config/loongarch/genopts/loongarch-strings
+@@ -30,6 +30,7 @@ STR_CPU_LA664	      la664
+ 
+ # Base architecture
+ STR_ISA_BASE_LA64V100 la64
++STR_ISA_BASE_LA64V110 la64v1.1
+ 
+ # -mfpu
+ OPTSTR_ISA_EXT_FPU    fpu
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index 00b4733d75b..b274b3fb21e 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -32,6 +32,9 @@ Basic ISAs of LoongArch:
+ EnumValue
+ Enum(isa_base) String(@@STR_ISA_BASE_LA64V100@@) Value(ISA_BASE_LA64V100)
+ 
++EnumValue
++Enum(isa_base) String(@@STR_ISA_BASE_LA64V110@@) Value(ISA_BASE_LA64V110)
++
+ ;; ISA extensions / adjustments
+ Enum
+ Name(isa_ext_fpu) Type(int)
+diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c
+index 067629141b6..f22d488acb2 100644
+--- a/gcc/config/loongarch/loongarch-def.c
++++ b/gcc/config/loongarch/loongarch-def.c
+@@ -165,6 +165,7 @@ loongarch_cpu_multipass_dfa_lookahead[N_TUNE_TYPES] = {
+ const char*
+ loongarch_isa_base_strings[N_ISA_BASE_TYPES] = {
+   [ISA_BASE_LA64V100] = STR_ISA_BASE_LA64V100,
++  [ISA_BASE_LA64V110] = STR_ISA_BASE_LA64V110,
+ };
+ 
+ const char*
+diff --git a/gcc/config/loongarch/loongarch-str.h b/gcc/config/loongarch/loongarch-str.h
+index fc4f41bfc1e..114dbc692d7 100644
+--- a/gcc/config/loongarch/loongarch-str.h
++++ b/gcc/config/loongarch/loongarch-str.h
+@@ -33,6 +33,7 @@ along with GCC; see the file COPYING3.  If not see
+ #define STR_CPU_LA664 "la664"
+ 
+ #define STR_ISA_BASE_LA64V100 "la64"
++#define STR_ISA_BASE_LA64V110 "la64v1.1"
+ 
+ #define OPTSTR_ISA_EXT_FPU "fpu"
+ #define STR_NONE "none"
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 7f129e53ba5..350ca30d232 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -39,6 +39,9 @@ Basic ISAs of LoongArch:
+ EnumValue
+ Enum(isa_base) String(la64) Value(ISA_BASE_LA64V100)
+ 
++EnumValue
++Enum(isa_base) String(la64v1.1) Value(ISA_BASE_LA64V110)
++
+ ;; ISA extensions / adjustments
+ Enum
+ Name(isa_ext_fpu) Type(int)
+-- 
+2.33.0
+
diff --git a/LoongArch-Fix-lsx-vshuf.c-and-lasx-xvshuf_b.c-tests-.patch b/LoongArch-Fix-lsx-vshuf.c-and-lasx-xvshuf_b.c-tests-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e0bcc7c9695be8bfaa3bf307a9cc0c9a0274c003
--- /dev/null
+++ b/LoongArch-Fix-lsx-vshuf.c-and-lasx-xvshuf_b.c-tests-.patch
@@ -0,0 +1,907 @@
+From 40366b89e9c8e727af70ecf7007cba6c51e4b7d2 Mon Sep 17 00:00:00 2001
+From: Jiahao Xu 
+Date: Wed, 29 Nov 2023 11:16:59 +0800
+Subject: [PATCH] LoongArch: Fix lsx-vshuf.c and lasx-xvshuf_b.c tests fail on
+ LA664 [PR112611]
+
+For [x]vshuf instructions, if the index value in the selector exceeds 63, it triggers
+undefined behavior on LA464, but not on LA664. To ensure compatibility of these two
+tests on both LA464 and LA664, we have modified both tests to ensure that the index
+value in the selector does not exceed 63.
+
+gcc/testsuite/ChangeLog:
+
+	PR target/112611
+	* gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c: Sure index less than 64.
+	* gcc.target/loongarch/vector/lsx/lsx-vshuf.c: Ditto.
+
+Signed-off-by: ticat_fp 
+---
+ .../loongarch/vector/lasx/lasx-xvshuf_b.c     | 343 ++++++------------
+ .../loongarch/vector/lsx/lsx-vshuf.c          | 162 +++------
+ 2 files changed, 164 insertions(+), 341 deletions(-)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
+index d8a29dbd225..b8ab387118a 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lasx/lasx-xvshuf_b.c
+@@ -43,9 +43,9 @@ main ()
+   *((unsigned long *)&__m256i_op1[1]) = 0xfffffefefffffefe;
+   *((unsigned long *)&__m256i_op1[0]) = 0xfffffefefffffefe;
+   *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[2]) = 0xfffffff8fffffff8;
++  *((unsigned long *)&__m256i_op2[2]) = 0x3f3f3f383f3f3f38;
+   *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[0]) = 0xfffffff8fc000000;
++  *((unsigned long *)&__m256i_op2[0]) = 0x3f3f3f383c000000;
+   *((unsigned long *)&__m256i_result[3]) = 0xfafafafafafafafa;
+   *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_result[1]) = 0xfefefefefefefefe;
+@@ -137,33 +137,14 @@ main ()
+   *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[0]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_op2[3]) = 0x0000ffffffffffff;
+-  *((unsigned long *)&__m256i_op2[2]) = 0x0000ffff0000ffff;
+-  *((unsigned long *)&__m256i_op2[1]) = 0x0000ffffffffffff;
+-  *((unsigned long *)&__m256i_op2[0]) = 0x0000ffff0000ffff;
++  *((unsigned long *)&__m256i_op2[3]) = 0x0000111111111111;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000222200002222;
++  *((unsigned long *)&__m256i_op2[1]) = 0x0000111111111111;
++  *((unsigned long *)&__m256i_op2[0]) = 0x0000222200002222;
+   *((unsigned long *)&__m256i_result[3]) = 0xffff000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0xffff0000ffff0000;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_result[1]) = 0xffff000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0xffff0000ffff0000;
+-  __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
+-  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+-
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[3]) = 0x000000000000ffff;
+-  *((unsigned long *)&__m256i_op2[2]) = 0x000000000000ffff;
+-  *((unsigned long *)&__m256i_op2[1]) = 0x000000000000ffff;
+-  *((unsigned long *)&__m256i_op2[0]) = 0x000000000000ffff;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
+   __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+@@ -176,7 +157,7 @@ main ()
+   *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000077fff;
++  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000032f1f;
+   *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
+@@ -186,9 +167,9 @@ main ()
+   __m256i_out = __lasx_xvshuf_b (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0xfffffffffffffefe;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000101;
+-  *((unsigned long *)&__m256i_op0[1]) = 0xfffffffffffffefe;
++  *((unsigned long *)&__m256i_op0[3]) = 0x0011001100110011;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0011001100110011;
+   *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000101;
+   *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_op1[2]) = 0x67eee33567eee435;
+@@ -198,35 +179,16 @@ main ()
+   *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_op2[1]) = 0x00000000ffffffff;
+   *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
+   __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[3]) = 0x0022002200000000;
+   *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
+-  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
+-  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+-
+-  *((unsigned long *)&__m256i_op0[3]) = 0xffffffff80000000;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[1]) = 0xffffffff80000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x001f001f00000000;
+   *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
+@@ -243,10 +205,10 @@ main ()
+   __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[3]) = 0x0011001100110011;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0011001100110011;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0011001100110011;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0011001100110011;
+   *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_op1[2]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
+@@ -255,17 +217,17 @@ main ()
+   *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[2]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[1]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_result[0]) = 0xffffffffffffffff;
+   __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[3]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op0[2]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op0[1]) = 0x003f003f003f003f;
++  *((unsigned long *)&__m256i_op0[0]) = 0x003f003f003f003f;
+   *((unsigned long *)&__m256i_op1[3]) = 0xefdfefdf00000000;
+   *((unsigned long *)&__m256i_op1[2]) = 0xefdfefdfefdfefdf;
+   *((unsigned long *)&__m256i_op1[1]) = 0xefdfefdf00000000;
+@@ -274,36 +236,17 @@ main ()
+   *((unsigned long *)&__m256i_op2[2]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_op2[1]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_op2[0]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
+-  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
+-  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+-
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_result[2]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_result[1]) = 0xefdfefdfefdfefdf;
++  *((unsigned long *)&__m256i_result[0]) = 0xefdfefdfefdfefdf;
+   __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0x7575ffff75757595;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x7575ffff7575f575;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x7575ffff75757595;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x7575ffff7575f575;
++  *((unsigned long *)&__m256i_op0[3]) = 0x0035000000350005;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0035000000350015;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0035000000350025;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0035000000350035;
+   *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000003;
+   *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
+   *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000003;
+@@ -312,10 +255,10 @@ main ()
+   *((unsigned long *)&__m256i_op2[2]) = 0x7575757575757575;
+   *((unsigned long *)&__m256i_op2[1]) = 0x7575757575757575;
+   *((unsigned long *)&__m256i_op2[0]) = 0x7575757575757575;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[2]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[1]) = 0x7575757575757575;
++  *((unsigned long *)&__m256i_result[0]) = 0x7575757575757575;
+   __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+@@ -357,29 +300,10 @@ main ()
+   __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
+-  __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
+-  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+-
+-  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000fffe;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x00000000000000f0;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000fffe;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x00000000000000f0;
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000003e;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000003e;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000010;
+   *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+@@ -389,16 +313,16 @@ main ()
+   *((unsigned long *)&__m256i_op2[1]) = 0x8000000000000000;
+   *((unsigned long *)&__m256i_op2[0]) = 0x000000ffff88ff88;
+   *((unsigned long *)&__m256i_result[3]) = 0xff88ff88ff880000;
+-  *((unsigned long *)&__m256i_result[2]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_result[2]) = 0xff88ff88ff88ff88;
+   *((unsigned long *)&__m256i_result[1]) = 0xff88ff88ff880000;
+-  *((unsigned long *)&__m256i_result[0]) = 0xff88ff88ff880000;
++  *((unsigned long *)&__m256i_result[0]) = 0xff88ff88ff88ff88;
+   __m256i_out = __lasx_xvshuf_h (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0x000000010000ffe1;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000000101001e18;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x000000010000ffe1;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000000101001e18;
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000011;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000018;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000001;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000008;
+   *((unsigned long *)&__m256i_op1[3]) = 0x98111cca98111cca;
+   *((unsigned long *)&__m256i_op1[2]) = 0x98111cca98111cca;
+   *((unsigned long *)&__m256i_op1[1]) = 0x98111cca98111cca;
+@@ -407,17 +331,17 @@ main ()
+   *((unsigned long *)&__m256i_op2[2]) = 0x0000000101001e18;
+   *((unsigned long *)&__m256i_op2[1]) = 0x000000010000ffe1;
+   *((unsigned long *)&__m256i_op2[0]) = 0x0000000101001e18;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000100000001;
+   *((unsigned long *)&__m256i_result[2]) = 0x0000000101001e18;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000100000001;
+   *((unsigned long *)&__m256i_result[0]) = 0x0000000101001e18;
+   __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x80008000b3e8fef1;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x80008000802ea100;
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000010000001a;  
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000001100000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000002100000010; 
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000310000001f;
+   *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+@@ -426,17 +350,17 @@ main ()
+   *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000002;
+   *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000001;
+   *((unsigned long *)&__m256i_op2[0]) = 0x00000000012e2110;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000001;
+   *((unsigned long *)&__m256i_result[2]) = 0x0000000200000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x012e2110012e2110;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000012e2110;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
+   __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0000000082a54290;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x00000000028aa700;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000000082a54290;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000000002a54287;
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000002f00000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000001a00000000;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000010000001c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000e0000000c;
+   *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[2]) = 0x00000000002a542a;
+   *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+@@ -447,8 +371,8 @@ main ()
+   *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x00000000002a542a;
++  *((unsigned long *)&__m256i_result[0]) = 0x00000000002a542a;
+   __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+@@ -471,10 +395,10 @@ main ()
+   __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_op0[2]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_op0[1]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m256i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000100000031;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000100000031;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000100000031;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000100000031;
+   *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+@@ -490,10 +414,10 @@ main ()
+   __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0001000100010001;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0001000100010001;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0001000100010001;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0001000100010001;
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000200000001;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000400000003;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000600000005;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000800000007;
+   *((unsigned long *)&__m256i_op1[3]) = 0x000000007fc00000;
+   *((unsigned long *)&__m256i_op1[2]) = 0x000000007fc00000;
+   *((unsigned long *)&__m256i_op1[1]) = 0x000000007fc00000;
+@@ -503,7 +427,7 @@ main ()
+   *((unsigned long *)&__m256i_op2[1]) = 0xdfffffffdfffffff;
+   *((unsigned long *)&__m256i_op2[0]) = 0x8000000080000000;
+   *((unsigned long *)&__m256i_result[3]) = 0x8000000080000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x8000000080000000;
++  *((unsigned long *)&__m256i_result[2]) = 0x7fc00000dfffffff;
+   *((unsigned long *)&__m256i_result[1]) = 0x8000000080000000;
+   *((unsigned long *)&__m256i_result[0]) = 0x8000000080000000;
+   __m256i_out = __lasx_xvshuf_w (__m256i_op0, __m256i_op1, __m256i_op2);
+@@ -529,9 +453,9 @@ main ()
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+   *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0001000104000200;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000030;
+   *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0001000104000200;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000001000000000;
+   *((unsigned long *)&__m256i_op1[3]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m256i_op1[2]) = 0xffff0000ffff0000;
+   *((unsigned long *)&__m256i_op1[1]) = 0xffffffffffffffff;
+@@ -585,10 +509,10 @@ main ()
+   __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0000fffffe01fe52;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x00000000ff01ff02;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000fffffe01fe52;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x00000000ff01ff02;
++  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000001;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000003;
+   *((unsigned long *)&__m256i_op1[3]) = 0x0000800000000000;
+   *((unsigned long *)&__m256i_op1[2]) = 0x0000000080008001;
+   *((unsigned long *)&__m256i_op1[1]) = 0x0000800000000000;
+@@ -597,36 +521,17 @@ main ()
+   *((unsigned long *)&__m256i_op2[2]) = 0x000000000000ffff;
+   *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op2[0]) = 0x000000000000ffff;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000080008001;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000080008001;
+-  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
+-  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+-
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x000000000000ffff;
+   *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000080008001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000800000000000;
+   __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+   *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000011;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000022;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000033;
+   *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+@@ -642,44 +547,6 @@ main ()
+   __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
+-  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
+-  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+-
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[2]) = 0x0008000000000000;
+-  *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op1[0]) = 0x0008000000000000;
+-  *((unsigned long *)&__m256i_op2[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
+-  __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
+-  ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+-
+   *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+@@ -700,9 +567,9 @@ main ()
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+   *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000002000000000;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000002000000000;
++  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000010;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000030;
+   *((unsigned long *)&__m256i_op1[3]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[2]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op1[1]) = 0x0000000000000000;
+@@ -718,10 +585,10 @@ main ()
+   __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0xfffeb6839ffffd80;
+-  *((unsigned long *)&__m256i_op0[2]) = 0xfffeb8649d0d6250;
+-  *((unsigned long *)&__m256i_op0[1]) = 0xfffeb6839ffffd80;
+-  *((unsigned long *)&__m256i_op0[0]) = 0xfffeb8649d0d6250;
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000000a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000001b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x000000000000002c;
++  *((unsigned long *)&__m256i_op0[0]) = 0x000000000000003d;
+   *((unsigned long *)&__m256i_op1[3]) = 0xfffeb6839ffffd80;
+   *((unsigned long *)&__m256i_op1[2]) = 0xfffe97c020010001;
+   *((unsigned long *)&__m256i_op1[1]) = 0xfffeb6839ffffd80;
+@@ -730,17 +597,17 @@ main ()
+   *((unsigned long *)&__m256i_op2[2]) = 0xfffe97c020010001;
+   *((unsigned long *)&__m256i_op2[1]) = 0xfffeb6839ffffd80;
+   *((unsigned long *)&__m256i_op2[0]) = 0xfffe97c020010001;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[2]) = 0xfffeb6839ffffd80;
++  *((unsigned long *)&__m256i_result[1]) = 0xfffe97c020010001;
++  *((unsigned long *)&__m256i_result[0]) = 0xfffeb6839ffffd80;
+   __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+-  *((unsigned long *)&__m256i_op0[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_op0[3]) = 0x000000000000001a;
++  *((unsigned long *)&__m256i_op0[2]) = 0x000000000000001b;
++  *((unsigned long *)&__m256i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m256i_op0[0]) = 0x0000000000000007;
+   *((unsigned long *)&__m256i_op1[3]) = 0x0000000000010001;
+   *((unsigned long *)&__m256i_op1[2]) = 0x0000000000010001;
+   *((unsigned long *)&__m256i_op1[1]) = 0x0000000000010001;
+@@ -749,10 +616,10 @@ main ()
+   *((unsigned long *)&__m256i_op2[2]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op2[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m256i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[3]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[2]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m256i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m256i_result[3]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[2]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[1]) = 0x0000000000010001;
++  *((unsigned long *)&__m256i_result[0]) = 0x0000000000010001;
+   __m256i_out = __lasx_xvshuf_d (__m256i_op0, __m256i_op1, __m256i_op2);
+   ASSERTEQ_64 (__LINE__, __m256i_result, __m256i_out);
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
+index 8153964cf1d..f3b800f8804 100644
+--- a/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
++++ b/gcc/testsuite/gcc.target/loongarch/vector/lsx/lsx-vshuf.c
+@@ -20,7 +20,7 @@ main ()
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000401000001;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0001000100000004;
+   *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op2[0]) = 0x00000000007f0000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x00000000003f0000;
+   *((unsigned long *)&__m128i_result[1]) = 0x0404040404040404;
+   *((unsigned long *)&__m128i_result[0]) = 0x0404040404000404;
+   __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
+@@ -31,7 +31,7 @@ main ()
+   *((unsigned long *)&__m128i_op1[1]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op2[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_op2[0]) = 0x3f2f1f0f00000000;
+   *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
+   __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
+@@ -63,10 +63,10 @@ main ()
+   *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m128i_op1[1]) = 0x52525252adadadad;
+   *((unsigned long *)&__m128i_op1[0]) = 0x52525252adadadad;
+-  *((unsigned long *)&__m128i_op2[1]) = 0x800000007fffffff;
+-  *((unsigned long *)&__m128i_op2[0]) = 0x800000007fffffff;
+-  *((unsigned long *)&__m128i_result[1]) = 0x00adadad00000000;
+-  *((unsigned long *)&__m128i_result[0]) = 0x00adadad00000000;
++  *((unsigned long *)&__m128i_op2[1]) = 0x2000000004030201;
++  *((unsigned long *)&__m128i_op2[0]) = 0x2000000014131211;
++  *((unsigned long *)&__m128i_result[1]) = 0xadadadad52adadad;
++  *((unsigned long *)&__m128i_result[0]) = 0xadadadadffffffff;
+   __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+@@ -96,10 +96,10 @@ main ()
+   *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000100;
+   *((unsigned long *)&__m128i_op1[1]) = 0x04040403fafafafc;
+   *((unsigned long *)&__m128i_op1[0]) = 0x000000000000ff80;
+-  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[1]) = 0x8080808080808080;
+-  *((unsigned long *)&__m128i_result[0]) = 0x8080808080808080;
++  *((unsigned long *)&__m128i_op2[1]) = 0x00101a1b1c1d1e1f;
++  *((unsigned long *)&__m128i_op2[0]) = 0x0807060504030201;
++  *((unsigned long *)&__m128i_result[1]) = 0x8000020202000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xfc000000000000ff;
+   __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+@@ -118,10 +118,10 @@ main ()
+   *((unsigned long *)&__m128i_op0[0]) = 0xffd7ff8dffa4ff7a;
+   *((unsigned long *)&__m128i_op1[1]) = 0x34947b4b11684f92;
+   *((unsigned long *)&__m128i_op1[0]) = 0xee297a731e5c5f86;
+-  *((unsigned long *)&__m128i_op2[1]) = 0x7fffffffffffffff;
+-  *((unsigned long *)&__m128i_op2[0]) = 0xffc0000000000000;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000868686868686;
++  *((unsigned long *)&__m128i_op2[1]) = 0x1f0710301a2b332d;
++  *((unsigned long *)&__m128i_op2[0]) = 0x1f20000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffee7a7a9811ff7b;
++  *((unsigned long *)&__m128i_result[0]) = 0xff86868686868686;
+   __m128i_out = __lsx_vshuf_b (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+@@ -136,19 +136,19 @@ main ()
+   __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[1]) = 0x001f002f003f000f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x001f002f003f000f;
+   *((unsigned long *)&__m128i_op1[1]) = 0x7fffffffffffffff;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x7fff7fff7fff7fff;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff;
+   __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[1]) = 0x000100040010001f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0002000300110012;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x000000002bfd9461;
+   *((unsigned long *)&__m128i_op2[1]) = 0x00007fff00007fff;
+@@ -169,74 +169,41 @@ main ()
+   __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
+-  __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
+-  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+-
+-  *((unsigned long *)&__m128i_op0[1]) = 0x000300037ff000ff;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x0003000300a10003;
++  *((unsigned long *)&__m128i_op0[1]) = 0x000300030000001f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0003000300000003;
+   *((unsigned long *)&__m128i_op1[1]) = 0x000300037ff000ff;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0003000300a10003;
+   *((unsigned long *)&__m128i_op2[1]) = 0x000000007ff000ff;
+   *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000003;
+   *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
+   __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0x0909000009090000;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x0909000009090000;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0019000000090000;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0019000000090000;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0909000009090000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0909000009090000;
+   *((unsigned long *)&__m128i_op2[1]) = 0x002a05a2f059094a;
+   *((unsigned long *)&__m128i_op2[0]) = 0x05ad3ba576eae048;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0909e0480909e048;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0909e0480909e048;
+-  __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
+-  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+-
+-  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x909e0480909e048;
++  *((unsigned long *)&__m128i_result[0]) = 0x909e0480909e048;
+   __m128i_out = __lsx_vshuf_h (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0x00000000000000c0;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x00000001ffffff29;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000030;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000029;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op2[1]) = 0x00000000000000c0;
+   *((unsigned long *)&__m128i_op2[0]) = 0x00000001ffffff29;
+-  *((unsigned long *)&__m128i_result[1]) = 0xffffff2900000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffff29ffffff29;
+   *((unsigned long *)&__m128i_result[0]) = 0x0000000100000001;
+   __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+   *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
+-  __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+-  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+-
+-  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x00000000000000ff;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001f;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x1f54e0ab00000000;
+   *((unsigned long *)&__m128i_op2[1]) = 0x0101010101010101;
+@@ -246,19 +213,8 @@ main ()
+   __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op2[0]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
+-  __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+-  ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+-
+-  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000007fff;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000002f0000002f;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000001000000000;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op2[1]) = 0x0000000020000020;
+@@ -279,30 +235,30 @@ main ()
+   __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x0000000004870ba0;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000900000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000003;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000001000000010;
+   *((unsigned long *)&__m128i_op2[1]) = 0x8000000100000000;
+   *((unsigned long *)&__m128i_op2[0]) = 0x8000000000000103;
+   *((unsigned long *)&__m128i_result[1]) = 0x0000010300000103;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000010300000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x0000010380000001;
+   __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0x000000ff0000857a;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x05fafe0101fe000e;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001000000007;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000002000000001;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op2[1]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m128i_op2[0]) = 0xffffffffffffffff;
+   *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[0]) = 0xffffffff00000000;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffffffffffffff;
+   __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0xada4808924882588;
+-  *((unsigned long *)&__m128i_op0[0]) = 0xacad25090caca5a4;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000001a0000001b;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000a0000000b;
+   *((unsigned long *)&__m128i_op1[1]) = 0x021b7d24c9678a35;
+   *((unsigned long *)&__m128i_op1[0]) = 0x030298a6a1030a49;
+   *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+@@ -312,8 +268,8 @@ main ()
+   __m128i_out = __lsx_vshuf_w (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0x00000000ffffffff;
+-  *((unsigned long *)&__m128i_op0[0]) = 0xffffffffffffffff;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000003;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000013;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+@@ -323,14 +279,14 @@ main ()
+   __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0xdfa6e0c6d46cdc13;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x21fc7081ec69b5f2;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000001;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000011;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x000000002c002400;
+   *((unsigned long *)&__m128i_op2[1]) = 0xffffb96bffff57c9;
+   *((unsigned long *)&__m128i_op2[0]) = 0xffff6080ffff4417;
+-  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0xffffb96bffff57c9;
++  *((unsigned long *)&__m128i_result[0]) = 0xffffb96bffff57c9;
+   __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+@@ -345,8 +301,8 @@ main ()
+   __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000000;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000020;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000010;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000002000;
+   *((unsigned long *)&__m128i_op1[0]) = 0xf0003000f0003000;
+   *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+@@ -356,30 +312,30 @@ main ()
+   __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0x021b7d2449678a35;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x030298a621030a49;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000007;
++  *((unsigned long *)&__m128i_op0[0]) = 0x000000000000001a;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x7fff7fff7fff7fff;
+   *((unsigned long *)&__m128i_op2[1]) = 0x021b7d24c9678a35;
+   *((unsigned long *)&__m128i_op2[0]) = 0x030298a6a1030a49;
+-  *((unsigned long *)&__m128i_result[1]) = 0x021b7d24c9678a35;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[0]) = 0x7fff7fff7fff7fff;
+   __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0x7f7f00007f7f0000;
+-  *((unsigned long *)&__m128i_op0[0]) = 0x7f7f80807f7f8080;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000002;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000001;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000fffe0000fffe;
+   *((unsigned long *)&__m128i_op2[1]) = 0x7f8000007f800000;
+   *((unsigned long *)&__m128i_op2[0]) = 0x7f8000007f800000;
+-  *((unsigned long *)&__m128i_result[1]) = 0x7f8000007f800000;
+-  *((unsigned long *)&__m128i_result[0]) = 0x0000000000000000;
++  *((unsigned long *)&__m128i_result[1]) = 0x0000fffe0000fffe;
++  *((unsigned long *)&__m128i_result[0]) = 0x7f8000007f800000;
+   __m128i_out = __lsx_vshuf_d (__m128i_op0, __m128i_op1, __m128i_op2);
+   ASSERTEQ_64 (__LINE__, __m128i_result, __m128i_out);
+ 
+-  *((unsigned long *)&__m128i_op0[1]) = 0xffffffffffffffff;
+-  *((unsigned long *)&__m128i_op0[0]) = 0xfffffffffff10000;
++  *((unsigned long *)&__m128i_op0[1]) = 0x0000000000000010;
++  *((unsigned long *)&__m128i_op0[0]) = 0x0000000000000020;
+   *((unsigned long *)&__m128i_op1[1]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op1[0]) = 0x0000000000000000;
+   *((unsigned long *)&__m128i_op2[1]) = 0x0000000000000000;
+-- 
+2.33.0
+
diff --git a/LoongArch-Fix-pr106828-by-define-hook-TARGET_ASAN_SH.patch b/LoongArch-Fix-pr106828-by-define-hook-TARGET_ASAN_SH.patch
new file mode 100644
index 0000000000000000000000000000000000000000..20cbb0479f0349ba7a21826498b9a496b9230526
--- /dev/null
+++ b/LoongArch-Fix-pr106828-by-define-hook-TARGET_ASAN_SH.patch
@@ -0,0 +1,69 @@
+From a70fe51d9813d490a89cbc8da1ae4b040bf8b37e Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Wed, 7 Sep 2022 11:25:45 +0800
+Subject: [PATCH 017/124] LoongArch: Fix pr106828 by define hook
+ TARGET_ASAN_SHADOW_OFFSET in loongarch backend [PR106828].
+
+gcc/ChangeLog:
+
+	PR target/106828
+	* config/loongarch/loongarch.cc (loongarch_asan_shadow_offset): New.
+	(TARGET_ASAN_SHADOW_OFFSET): New.
+
+gcc/testsuite/ChangeLog:
+
+	PR target/106828
+	* g++.target/loongarch/pr106828.C: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc             | 13 +++++++++++++
+ gcc/testsuite/g++.target/loongarch/pr106828.C |  4 ++++
+ 2 files changed, 17 insertions(+)
+ create mode 100644 gcc/testsuite/g++.target/loongarch/pr106828.C
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index c9187bf81..98c0e26cd 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -6466,6 +6466,16 @@ loongarch_use_anchors_for_symbol_p (const_rtx symbol)
+   return default_use_anchors_for_symbol_p (symbol);
+ }
+ 
++/* Implement the TARGET_ASAN_SHADOW_OFFSET hook.  */
++
++static unsigned HOST_WIDE_INT
++loongarch_asan_shadow_offset (void)
++{
++  /* We only have libsanitizer support for LOONGARCH64 at present.
++     This value is taken from the file libsanitizer/asan/asan_mappint.h.  */
++  return TARGET_64BIT ? (HOST_WIDE_INT_1 << 46) : 0;
++}
++
+ /* Initialize the GCC target structure.  */
+ #undef TARGET_ASM_ALIGNED_HI_OP
+ #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
+@@ -6660,6 +6670,9 @@ loongarch_use_anchors_for_symbol_p (const_rtx symbol)
+ #undef  TARGET_USE_ANCHORS_FOR_SYMBOL_P
+ #define TARGET_USE_ANCHORS_FOR_SYMBOL_P loongarch_use_anchors_for_symbol_p
+ 
++#undef TARGET_ASAN_SHADOW_OFFSET
++#define TARGET_ASAN_SHADOW_OFFSET loongarch_asan_shadow_offset
++
+ struct gcc_target targetm = TARGET_INITIALIZER;
+ 
+ #include "gt-loongarch.h"
+diff --git a/gcc/testsuite/g++.target/loongarch/pr106828.C b/gcc/testsuite/g++.target/loongarch/pr106828.C
+new file mode 100644
+index 000000000..190c1db71
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/pr106828.C
+@@ -0,0 +1,4 @@
++/* { dg-do-preprocess } */
++/* { dg-options "-mabi=lp64d -fsanitize=address" } */
++
++/* Tests whether the compiler supports compile option '-fsanitize=address'.  */
+-- 
+2.33.0
+
diff --git a/LoongArch-Fix-unintentional-bash-ism-in-r14-3665.patch b/LoongArch-Fix-unintentional-bash-ism-in-r14-3665.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7d6b160d5208cb79800a4e5f9e6035c6a3eb62c6
--- /dev/null
+++ b/LoongArch-Fix-unintentional-bash-ism-in-r14-3665.patch
@@ -0,0 +1,31 @@
+From 8e5c9f349877af07dde4804974d47625c1292956 Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Wed, 6 Sep 2023 17:57:47 +0800
+Subject: [PATCH 070/124] LoongArch: Fix unintentional bash-ism in r14-3665.
+
+gcc/ChangeLog:
+
+	* config.gcc: remove non-POSIX syntax "<<<".
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config.gcc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 19f584344..57e724080 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -5263,7 +5263,7 @@ case "${target}" in
+ 				if test x${parse_state} = x"abi-base"; then
+ 					# Base ABI type
+ 					case ${component} in
+-					lp64d | lp64f | lp64s) elem_tmp="ABI_BASE_$(tr a-z A-Z <<< ${component}),";;
++					lp64d | lp64f | lp64s) elem_tmp="ABI_BASE_$(echo ${component} | tr a-z A-Z),";;
+ 					*)
+ 						echo "Unknown base ABI \"${component}\" in --with-multilib-list." 1>&2
+ 						exit 1
+-- 
+2.33.0
+
diff --git a/LoongArch-Fix-unintentionally-breakage-in-r14-3665.patch b/LoongArch-Fix-unintentionally-breakage-in-r14-3665.patch
new file mode 100644
index 0000000000000000000000000000000000000000..93c427d1ea4be4869e2417fd0180664f80e048da
--- /dev/null
+++ b/LoongArch-Fix-unintentionally-breakage-in-r14-3665.patch
@@ -0,0 +1,34 @@
+From 8de6f5e1aad2a1ff85ff3a4b732055d625c61139 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 5 Sep 2023 20:02:51 +0800
+Subject: [PATCH 067/124] LoongArch: Fix unintentionally breakage in r14-3665
+
+Fix a build failure with no system assembler or system old assembler.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-opts.h (HAVE_AS_EXPLICIT_RELOCS):
+	Define to 0 if not defined yet.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch-opts.h | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index e3f9b6f99..0d148e43b 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -93,4 +93,8 @@ loongarch_update_gcc_opt_status (struct loongarch_target *target,
+    while -m[no]-memcpy imposes a global constraint.  */
+ #define TARGET_DO_OPTIMIZE_BLOCK_MOVE_P  loongarch_do_optimize_block_move_p()
+ 
++#ifndef HAVE_AS_EXPLICIT_RELOCS
++#define HAVE_AS_EXPLICIT_RELOCS 0
++#endif
++
+ #endif /* LOONGARCH_OPTS_H */
+-- 
+2.33.0
+
diff --git a/LoongArch-Fix-up-memcpy-vec-3.c-test-case.patch b/LoongArch-Fix-up-memcpy-vec-3.c-test-case.patch
new file mode 100644
index 0000000000000000000000000000000000000000..492f37f47ba0113cc310a402d536494533974b19
--- /dev/null
+++ b/LoongArch-Fix-up-memcpy-vec-3.c-test-case.patch
@@ -0,0 +1,33 @@
+From 78896e68f50164af7827e8da01a7220764d1e296 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 9 Sep 2023 16:18:06 +0800
+Subject: [PATCH 075/124] LoongArch: Fix up memcpy-vec-3.c test case
+
+The generic code will split 16-byte copy into two 8-byte copies, so the
+vector code wouldn't be used even if -mno-strict-align.  This
+contradicted with the purpose of this test case.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/memcpy-vec-3.c: Increase the amount of
+	copied bytes to 32.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c
+index 233ed2150..db2ea510b 100644
+--- a/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c
+@@ -3,4 +3,4 @@
+ /* { dg-final { scan-assembler-not "vst" } } */
+ 
+ extern char a[], b[];
+-void test() { __builtin_memcpy(a, b, 16); }
++void test() { __builtin_memcpy(a, b, 32); }
+-- 
+2.33.0
+
diff --git a/LoongArch-Fixed-a-bug-in-the-loongarch-architecture-.patch b/LoongArch-Fixed-a-bug-in-the-loongarch-architecture-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3f1a351173706f6c4330fa300bb3fa1969005c28
--- /dev/null
+++ b/LoongArch-Fixed-a-bug-in-the-loongarch-architecture-.patch
@@ -0,0 +1,43 @@
+From 80ed9ab39d9b1b08ad9d054f16d65b2a249a89e5 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Wed, 12 Oct 2022 11:02:11 +0800
+Subject: [PATCH 022/124] LoongArch: Fixed a bug in the loongarch architecture
+ of libitm package.
+
+Add a soft floating point condition to the register recovery part of the code.
+
+libitm/ChangeLog:
+
+	* config/loongarch/sjlj.S: Add a soft floating point condition to the
+	register recovery part of the code.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ libitm/config/loongarch/sjlj.S | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/libitm/config/loongarch/sjlj.S b/libitm/config/loongarch/sjlj.S
+index a5f9fadde..f896e400e 100644
+--- a/libitm/config/loongarch/sjlj.S
++++ b/libitm/config/loongarch/sjlj.S
+@@ -104,6 +104,8 @@ GTM_longjmp:
+         GPR_L  $s7, $r5, 10*SZ_GPR
+         GPR_L  $s8, $r5, 11*SZ_GPR
+ 
++#if !defined(__loongarch_soft_float)
++        /* Callee-saved scratch FPRs (f24-f31) */
+         FPR_L  $f24, $r5, 12*SZ_GPR + 0*SZ_FPR
+         FPR_L  $f25, $r5, 12*SZ_GPR + 1*SZ_FPR
+         FPR_L  $f26, $r5, 12*SZ_GPR + 2*SZ_FPR
+@@ -112,6 +114,7 @@ GTM_longjmp:
+         FPR_L  $f29, $r5, 12*SZ_GPR + 5*SZ_FPR
+         FPR_L  $f30, $r5, 12*SZ_GPR + 6*SZ_FPR
+         FPR_L  $f31, $r5, 12*SZ_GPR + 7*SZ_FPR
++#endif
+ 
+         GPR_L  $r7, $r5, 2*SZ_GPR
+         GPR_L  $fp, $r5, 0*SZ_GPR
+-- 
+2.33.0
+
diff --git a/LoongArch-Fixed-a-compilation-failure-with-c-in-inli.patch b/LoongArch-Fixed-a-compilation-failure-with-c-in-inli.patch
new file mode 100644
index 0000000000000000000000000000000000000000..52ca4bc01473d243acc95abd3f6c358925d40111
--- /dev/null
+++ b/LoongArch-Fixed-a-compilation-failure-with-c-in-inli.patch
@@ -0,0 +1,182 @@
+From 49a63dbaf3b4296f0b1f8a0e11790cc3455aeec7 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Wed, 18 Jan 2023 11:06:56 +0800
+Subject: [PATCH 034/124] LoongArch: Fixed a compilation failure with '%c' in
+ inline assembly [PR107731].
+
+Co-authored-by: Yang Yujie 
+
+	PR target/107731
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_classify_address):
+	Add precessint for CONST_INT.
+	(loongarch_print_operand_reloc): Operand modifier 'c' is supported.
+	(loongarch_print_operand): Increase the processing of '%c'.
+	* doc/extend.texi: Adds documents for LoongArch operand modifiers.
+	And port the public operand modifiers information to this document.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/tst-asm-const.c: Moved to...
+	* gcc.target/loongarch/pr107731.c: ...here.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc             | 14 +++++
+ gcc/doc/extend.texi                           | 51 +++++++++++++++++--
+ .../loongarch/{tst-asm-const.c => pr107731.c} |  6 +--
+ 3 files changed, 64 insertions(+), 7 deletions(-)
+ rename gcc/testsuite/gcc.target/loongarch/{tst-asm-const.c => pr107731.c} (78%)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index e59edc4cd..1a4686f03 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2074,6 +2074,11 @@ loongarch_classify_address (struct loongarch_address_info *info, rtx x,
+       return (loongarch_valid_base_register_p (info->reg, mode, strict_p)
+ 	      && loongarch_valid_lo_sum_p (info->symbol_type, mode,
+ 					   info->offset));
++    case CONST_INT:
++      /* Small-integer addresses don't occur very often, but they
++	 are legitimate if $r0 is a valid base register.  */
++      info->type = ADDRESS_CONST_INT;
++      return IMM12_OPERAND (INTVAL (x));
+ 
+     default:
+       return false;
+@@ -4932,6 +4937,7 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part,
+ 
+    'A'	Print a _DB suffix if the memory model requires a release.
+    'b'	Print the address of a memory operand, without offset.
++   'c'  Print an integer.
+    'C'	Print the integer branch condition for comparison OP.
+    'd'	Print CONST_INT OP in decimal.
+    'F'	Print the FPU branch condition for comparison OP.
+@@ -4978,6 +4984,14 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+        fputs ("_db", file);
+       break;
+ 
++    case 'c':
++      if (CONST_INT_P (op))
++	fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op));
++      else
++	output_operand_lossage ("unsupported operand for code '%c'", letter);
++
++      break;
++
+     case 'C':
+       loongarch_print_int_branch_condition (file, code, letter);
+       break;
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index da2840c23..3c101ca89 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -10414,8 +10414,10 @@ ensures that modifying @var{a} does not affect the address referenced by
+ is undefined if @var{a} is modified before using @var{b}.
+ 
+ @code{asm} supports operand modifiers on operands (for example @samp{%k2} 
+-instead of simply @samp{%2}). Typically these qualifiers are hardware 
+-dependent. The list of supported modifiers for x86 is found at 
++instead of simply @samp{%2}). @ref{GenericOperandmodifiers,
++Generic Operand modifiers} lists the modifiers that are available
++on all targets.  Other modifiers are hardware dependent.
++For example, the list of supported modifiers for x86 is found at
+ @ref{x86Operandmodifiers,x86 Operand modifiers}.
+ 
+ If the C code that follows the @code{asm} makes no use of any of the output 
+@@ -10683,8 +10685,10 @@ optimizers may discard the @code{asm} statement as unneeded
+ (see @ref{Volatile}).
+ 
+ @code{asm} supports operand modifiers on operands (for example @samp{%k2} 
+-instead of simply @samp{%2}). Typically these qualifiers are hardware 
+-dependent. The list of supported modifiers for x86 is found at 
++instead of simply @samp{%2}). @ref{GenericOperandmodifiers,
++Generic Operand modifiers} lists the modifiers that are available
++on all targets.  Other modifiers are hardware dependent.
++For example, the list of supported modifiers for x86 is found at
+ @ref{x86Operandmodifiers,x86 Operand modifiers}.
+ 
+ In this example using the fictitious @code{combine} instruction, the 
+@@ -11036,6 +11040,30 @@ lab:
+ @}
+ @end example
+ 
++@anchor{GenericOperandmodifiers}
++@subsubsection Generic Operand Modifiers
++@noindent
++The following table shows the modifiers supported by all targets and their effects:
++
++@multitable {Modifier} {Description} {Example}
++@headitem Modifier @tab Description @tab Example
++@item @code{c}
++@tab Require a constant operand and print the constant expression with no punctuation.
++@tab @code{%c0}
++@item @code{n}
++@tab Like @samp{%c} except that the value of the constant is negated before printing.
++@tab @code{%n0}
++@item @code{a}
++@tab Substitute a memory reference, with the actual operand treated as the address.
++This may be useful when outputting a ``load address'' instruction, because
++often the assembler syntax for such an instruction requires you to write the
++operand as if it were a memory reference.
++@tab @code{%a0}
++@item @code{l}
++@tab Print the label name with no punctuation.
++@tab @code{%l0}
++@end multitable
++
+ @anchor{x86Operandmodifiers}
+ @subsubsection x86 Operand Modifiers
+ 
+@@ -11386,6 +11414,21 @@ constant.  Used to select the specified bit position.
+ @item @code{x} @tab Equivialent to @code{X}, but only for pointers.
+ @end multitable
+ 
++@anchor{loongarchOperandmodifiers}
++@subsubsection LoongArch Operand Modifiers
++
++The list below describes the supported modifiers and their effects for LoongArch.
++
++@multitable @columnfractions .10 .90
++@headitem Modifier @tab Description
++@item @code{d} @tab Same as @code{c}.
++@item @code{i} @tab Print the character ''@code{i}'' if the operand is not a register.
++@item @code{m} @tab Same as @code{c}, but the printed value is @code{operand - 1}.
++@item @code{X} @tab Print a constant integer operand in hexadecimal.
++@item @code{z} @tab Print the operand in its unmodified form, followed by a comma.
++@end multitable
++
++
+ @lowersections
+ @include md.texi
+ @raisesections
+diff --git a/gcc/testsuite/gcc.target/loongarch/tst-asm-const.c b/gcc/testsuite/gcc.target/loongarch/pr107731.c
+similarity index 78%
+rename from gcc/testsuite/gcc.target/loongarch/tst-asm-const.c
+rename to gcc/testsuite/gcc.target/loongarch/pr107731.c
+index 2e04b99e3..80d84c48c 100644
+--- a/gcc/testsuite/gcc.target/loongarch/tst-asm-const.c
++++ b/gcc/testsuite/gcc.target/loongarch/pr107731.c
+@@ -1,13 +1,13 @@
+-/* Test asm const. */
+ /* { dg-do compile } */
+ /* { dg-final { scan-assembler-times "foo:.*\\.long 1061109567.*\\.long 52" 1 } } */
++
+ int foo ()
+ {
+   __asm__ volatile (
+           "foo:"
+           "\n\t"
+-	  ".long %a0\n\t"
+-	  ".long %a1\n\t"
++	  ".long %c0\n\t"
++	  ".long %c1\n\t"
+ 	  :
+ 	  :"i"(0x3f3f3f3f), "i"(52)
+ 	  :
+-- 
+2.33.0
+
diff --git a/LoongArch-Fixed-a-typo-in-the-comment-information-of.patch b/LoongArch-Fixed-a-typo-in-the-comment-information-of.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7013c90fbd4752e8f9dc52c455a90d12f47b7613
--- /dev/null
+++ b/LoongArch-Fixed-a-typo-in-the-comment-information-of.patch
@@ -0,0 +1,33 @@
+From cbb5f181544e35b119fee4ed150bec24eee7179c Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Wed, 28 Sep 2022 16:35:06 +0800
+Subject: [PATCH 020/124] LoongArch: Fixed a typo in the comment information of
+ the function loongarch_asan_shadow_offset.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_asan_shadow_offset):
+	Fixed typo in "asan_mapping.h".
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 98c0e26cd..e9ba3374e 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -6472,7 +6472,7 @@ static unsigned HOST_WIDE_INT
+ loongarch_asan_shadow_offset (void)
+ {
+   /* We only have libsanitizer support for LOONGARCH64 at present.
+-     This value is taken from the file libsanitizer/asan/asan_mappint.h.  */
++     This value is taken from the file libsanitizer/asan/asan_mapping.h.  */
+   return TARGET_64BIT ? (HOST_WIDE_INT_1 << 46) : 0;
+ }
+ 
+-- 
+2.33.0
+
diff --git a/LoongArch-Generate-bytepick.-wd-for-suitable-bit-ope.patch b/LoongArch-Generate-bytepick.-wd-for-suitable-bit-ope.patch
new file mode 100644
index 0000000000000000000000000000000000000000..1b870196851fa57f1248c7fa352d27776348e156
--- /dev/null
+++ b/LoongArch-Generate-bytepick.-wd-for-suitable-bit-ope.patch
@@ -0,0 +1,196 @@
+From 9311c0f56086e38fe5e9bf4bbfc2e37d0f18347c Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 3 Feb 2023 17:06:06 +0800
+Subject: [PATCH 036/124] LoongArch: Generate bytepick.[wd] for suitable bit
+ operation pattern
+
+We can use bytepick.[wd] for
+
+    a << (8 * x) | b >> (8 * (sizeof(a) - x))
+
+while a and b are uint32_t or uint64_t.  This is useful for some cases,
+for example:
+https://sourceware.org/pipermail/libc-alpha/2023-February/145203.html
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (bytepick_w_ashift_amount):
+	New define_int_iterator.
+	(bytepick_d_ashift_amount): Likewise.
+	(bytepick_imm): New define_int_attr.
+	(bytepick_w_lshiftrt_amount): Likewise.
+	(bytepick_d_lshiftrt_amount): Likewise.
+	(bytepick_w_): New define_insn template.
+	(bytepick_w__extend): Likewise.
+	(bytepick_d_): Likewise.
+	(bytepick_w): Remove unused define_insn.
+	(bytepick_d): Likewise.
+	(UNSPEC_BYTEPICK_W): Remove unused unspec.
+	(UNSPEC_BYTEPICK_D): Likewise.
+	* config/loongarch/predicates.md (const_0_to_3_operand):
+	Remove unused define_predicate.
+	(const_0_to_7_operand): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.target/loongarch/bytepick.C: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md             | 60 ++++++++++++++-----
+ gcc/config/loongarch/predicates.md            |  8 ---
+ gcc/testsuite/g++.target/loongarch/bytepick.C | 32 ++++++++++
+ 3 files changed, 77 insertions(+), 23 deletions(-)
+ create mode 100644 gcc/testsuite/g++.target/loongarch/bytepick.C
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index f61db66d5..833b94753 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -48,8 +48,6 @@
+   UNSPEC_EH_RETURN
+ 
+   ;; Bit operation
+-  UNSPEC_BYTEPICK_W
+-  UNSPEC_BYTEPICK_D
+   UNSPEC_BITREV_4B
+   UNSPEC_BITREV_8B
+ 
+@@ -544,6 +542,27 @@
+ 				      (UNSPEC_FTINTRM "0")
+ 				      (UNSPEC_FTINTRP "0")])
+ 
++;; Iterator and attributes for bytepick.d
++(define_int_iterator bytepick_w_ashift_amount [8 16 24])
++(define_int_attr bytepick_w_lshiftrt_amount [(8 "24")
++					     (16 "16")
++					     (24 "8")])
++(define_int_iterator bytepick_d_ashift_amount [8 16 24 32 40 48 56])
++(define_int_attr bytepick_d_lshiftrt_amount [(8 "56")
++					     (16 "48")
++					     (24 "40")
++					     (32 "32")
++					     (40 "24")
++					     (48 "16")
++					     (56 "8")])
++(define_int_attr bytepick_imm [(8 "1")
++				 (16 "2")
++				 (24 "3")
++				 (32 "4")
++				 (40 "5")
++				 (48 "6")
++				 (56 "7")])
++
+ ;;
+ ;;  ....................
+ ;;
+@@ -3364,24 +3383,35 @@
+   [(set_attr "type" "unknown")
+    (set_attr "mode" "")])
+ 
+-(define_insn "bytepick_w"
++(define_insn "bytepick_w_"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+-	(unspec:SI [(match_operand:SI 1 "register_operand" "r")
+-		    (match_operand:SI 2 "register_operand" "r")
+-		    (match_operand:SI 3 "const_0_to_3_operand" "n")]
+-		    UNSPEC_BYTEPICK_W))]
++	(ior:SI (lshiftrt (match_operand:SI 1 "register_operand" "r")
++			  (const_int ))
++		(ashift (match_operand:SI 2 "register_operand" "r")
++			(const_int bytepick_w_ashift_amount))))]
+   ""
+-  "bytepick.w\t%0,%1,%2,%z3"
++  "bytepick.w\t%0,%1,%2,"
+   [(set_attr "mode" "SI")])
+ 
+-(define_insn "bytepick_d"
++(define_insn "bytepick_w__extend"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+-	(unspec:DI [(match_operand:DI 1 "register_operand" "r")
+-		    (match_operand:DI 2 "register_operand" "r")
+-		    (match_operand:DI 3 "const_0_to_7_operand" "n")]
+-		    UNSPEC_BYTEPICK_D))]
+-  ""
+-  "bytepick.d\t%0,%1,%2,%z3"
++	(sign_extend:DI
++	  (ior:SI (lshiftrt (match_operand:SI 1 "register_operand" "r")
++			    (const_int ))
++		  (ashift (match_operand:SI 2 "register_operand" "r")
++			  (const_int bytepick_w_ashift_amount)))))]
++  "TARGET_64BIT"
++  "bytepick.w\t%0,%1,%2,"
++  [(set_attr "mode" "SI")])
++
++(define_insn "bytepick_d_"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(ior:DI (lshiftrt (match_operand:DI 1 "register_operand" "r")
++			  (const_int ))
++		(ashift (match_operand:DI 2 "register_operand" "r")
++			(const_int bytepick_d_ashift_amount))))]
++  "TARGET_64BIT"
++  "bytepick.d\t%0,%1,%2,"
+   [(set_attr "mode" "DI")])
+ 
+ (define_insn "bitrev_4b"
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 58c3dc226..3c32b2987 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -91,14 +91,6 @@
+   (ior (match_operand 0 "const_1_operand")
+        (match_operand 0 "register_operand")))
+ 
+-(define_predicate "const_0_to_3_operand"
+-  (and (match_code "const_int")
+-       (match_test "IN_RANGE (INTVAL (op), 0, 3)")))
+-
+-(define_predicate "const_0_to_7_operand"
+-  (and (match_code "const_int")
+-       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
+-
+ (define_predicate "lu52i_mask_operand"
+   (and (match_code "const_int")
+        (match_test "UINTVAL (op) == 0xfffffffffffff")))
+diff --git a/gcc/testsuite/g++.target/loongarch/bytepick.C b/gcc/testsuite/g++.target/loongarch/bytepick.C
+new file mode 100644
+index 000000000..a39e2fa65
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/bytepick.C
+@@ -0,0 +1,32 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d" } */
++/* { dg-final { scan-assembler-times "bytepick.w\t\\\$r4,\\\$r5,\\\$r4" 3 } } */
++/* { dg-final { scan-assembler-times "bytepick.d\t\\\$r4,\\\$r5,\\\$r4" 7 } } */
++/* { dg-final { scan-assembler-not "slli.w" } } */
++
++template 
++T
++merge (T a, T b)
++{
++  return a << offs | b >> (8 * sizeof (T) - offs);
++}
++
++using u32 = __UINT32_TYPE__;
++using u64 = __UINT64_TYPE__;
++using i64 = __INT64_TYPE__;
++
++template u32 merge (u32, u32);
++template u32 merge (u32, u32);
++template u32 merge (u32, u32);
++
++template u64 merge (u64, u64);
++template u64 merge (u64, u64);
++template u64 merge (u64, u64);
++template u64 merge (u64, u64);
++template u64 merge (u64, u64);
++template u64 merge (u64, u64);
++template u64 merge (u64, u64);
++
++/* we cannot use bytepick for the following cases */
++template i64 merge (i64, i64);
++template u64 merge (u64, u64);
+-- 
+2.33.0
+
diff --git a/LoongArch-Get-__tls_get_addr-address-through-got-tab.patch b/LoongArch-Get-__tls_get_addr-address-through-got-tab.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e1879f7725bbee9b640c870dd2f147bbc1994674
--- /dev/null
+++ b/LoongArch-Get-__tls_get_addr-address-through-got-tab.patch
@@ -0,0 +1,71 @@
+From a96dee6ba3c916f9a4329b196a0c5a1652fe294f Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 18 Aug 2022 09:57:14 +0800
+Subject: [PATCH 010/124] LoongArch: Get __tls_get_addr address through got
+ table when disable plt.
+
+Fix bug, ICE with tls gd/ld var with -fno-plt.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_call_tls_get_addr):
+	Get __tls_get_addr address through got table when disable plt.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/tls-gd-noplt.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc                 | 14 ++++++++++++--
+ gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c | 12 ++++++++++++
+ 2 files changed, 24 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 1b5af2c7d..76bf55ea4 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -2448,8 +2448,18 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 	gcc_unreachable ();
+     }
+ 
+-  insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol,
+-						  const0_rtx));
++  if (flag_plt)
++    insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol,
++						    const0_rtx));
++  else
++    {
++      rtx dest = gen_reg_rtx (Pmode);
++      rtx high = gen_reg_rtx (Pmode);
++      loongarch_emit_move (high, gen_rtx_HIGH (Pmode, loongarch_tls_symbol));
++      emit_insn (gen_ld_from_got (Pmode, dest, high, loongarch_tls_symbol));
++      insn = emit_call_insn (gen_call_value_internal (v0, dest, const0_rtx));
++    }
++
+   RTL_CONST_CALL_P (insn) = 1;
+   use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
+   insn = get_insns ();
+diff --git a/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c b/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c
+new file mode 100644
+index 000000000..32a0acf9b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/tls-gd-noplt.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fno-plt -mcmodel=normal" } */
++/* { dg-final { scan-assembler "pcalau12i\t.*%got_pc_hi20\\(__tls_get_addr\\)" } } */
++
++__attribute__ ((tls_model ("global-dynamic"))) __thread int a;
++
++void
++test (void)
++{
++  a = 10;
++}
++
+-- 
+2.33.0
+
diff --git a/LoongArch-Implement-128-bit-floating-point-functions.patch b/LoongArch-Implement-128-bit-floating-point-functions.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5d23401b8bd4cdbe9daa41c91edececeedd49676
--- /dev/null
+++ b/LoongArch-Implement-128-bit-floating-point-functions.patch
@@ -0,0 +1,204 @@
+From 12ab9eae9e8a5b83c778182f15c6216bcbc3dc36 Mon Sep 17 00:00:00 2001
+From: chenxiaolong 
+Date: Fri, 1 Sep 2023 11:22:42 +0800
+Subject: [PATCH 054/124] LoongArch: Implement 128-bit floating point functions
+ in gcc.
+
+During implementation, float128_type_node is bound with the type "__float128"
+so that the compiler can correctly identify the type   of the function. The
+"q" suffix is associated with the "f128" function, which makes GCC more
+flexible to support different user input cases, implementing functions such
+as __builtin_{huge_valq, infq, fabsq, copysignq, nanq, nansq}.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-builtins.cc (loongarch_init_builtins):
+	Associate the __float128 type to float128_type_node so that it can
+	be recognized by the compiler.
+	* config/loongarch/loongarch-c.cc (loongarch_cpu_cpp_builtins):
+	Add the flag "FLOAT128_TYPE" to gcc and associate a function
+	with the suffix "q" to "f128".
+	* doc/extend.texi:Added support for 128-bit floating-point functions on
+	the LoongArch architecture.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/math-float-128.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch-builtins.cc    |  5 ++
+ gcc/config/loongarch/loongarch-c.cc           | 11 +++
+ gcc/doc/extend.texi                           | 20 ++++-
+ .../gcc.target/loongarch/math-float-128.c     | 81 +++++++++++++++++++
+ 4 files changed, 114 insertions(+), 3 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/math-float-128.c
+
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index 64fe11168..cb0ea1664 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -256,6 +256,11 @@ loongarch_init_builtins (void)
+   unsigned int i;
+   tree type;
+ 
++  /* Register the type float128_type_node as a built-in type and
++     give it an alias "__float128".  */
++  (*lang_hooks.types.register_builtin_type) (float128_type_node,
++					    "__float128");
++
+   /* Iterate through all of the bdesc arrays, initializing all of the
+      builtin functions.  */
+   for (i = 0; i < ARRAY_SIZE (loongarch_builtins); i++)
+diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc
+index d6e3e19f0..f779a7355 100644
+--- a/gcc/config/loongarch/loongarch-c.cc
++++ b/gcc/config/loongarch/loongarch-c.cc
+@@ -99,6 +99,17 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile)
+   else
+     builtin_define ("__loongarch_frlen=0");
+ 
++  /* Add support for FLOAT128_TYPE on the LoongArch architecture.  */
++  builtin_define ("__FLOAT128_TYPE__");
++
++  /* Map the old _Float128 'q' builtins into the new 'f128' builtins.  */
++  builtin_define ("__builtin_fabsq=__builtin_fabsf128");
++  builtin_define ("__builtin_copysignq=__builtin_copysignf128");
++  builtin_define ("__builtin_nanq=__builtin_nanf128");
++  builtin_define ("__builtin_nansq=__builtin_nansf128");
++  builtin_define ("__builtin_infq=__builtin_inff128");
++  builtin_define ("__builtin_huge_valq=__builtin_huge_valf128");
++
+   /* Native Data Sizes.  */
+   builtin_define_with_int_value ("_LOONGARCH_SZINT", INT_TYPE_SIZE);
+   builtin_define_with_int_value ("_LOONGARCH_SZLONG", LONG_TYPE_SIZE);
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index 1d1bac255..bb19d0f27 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -1085,10 +1085,10 @@ types.
+ As an extension, GNU C and GNU C++ support additional floating
+ types, which are not supported by all targets.
+ @itemize @bullet
+-@item @code{__float128} is available on i386, x86_64, IA-64, and
+-hppa HP-UX, as well as on PowerPC GNU/Linux targets that enable
++@item @code{__float128} is available on i386, x86_64, IA-64, LoongArch
++and hppa HP-UX, as well as on PowerPC GNU/Linux targets that enable
+ the vector scalar (VSX) instruction set.  @code{__float128} supports
+-the 128-bit floating type.  On i386, x86_64, PowerPC, and IA-64
++the 128-bit floating type.  On i386, x86_64, PowerPC, LoongArch and IA-64,
+ other than HP-UX, @code{__float128} is an alias for @code{_Float128}.
+ On hppa and IA-64 HP-UX, @code{__float128} is an alias for @code{long
+ double}.
+@@ -16257,6 +16257,20 @@ function you need to include @code{larchintrin.h}.
+     void __break (imm0_32767)
+ @end smallexample
+ 
++Additional built-in functions are available for LoongArch family
++processors to efficiently use 128-bit floating-point (__float128)
++values.
++
++The following are the basic built-in functions supported.
++@smallexample
++__float128 __builtin_fabsq (__float128);
++__float128 __builtin_copysignq (__float128, __float128);
++__float128 __builtin_infq (void);
++__float128 __builtin_huge_valq (void);
++__float128 __builtin_nanq (void);
++__float128 __builtin_nansq (void);
++@end smallexample
++
+ @node MIPS DSP Built-in Functions
+ @subsection MIPS DSP Built-in Functions
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/math-float-128.c b/gcc/testsuite/gcc.target/loongarch/math-float-128.c
+new file mode 100644
+index 000000000..387566a57
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/math-float-128.c
+@@ -0,0 +1,81 @@
++/* { dg-do compile } */
++/* { dg-options " -march=loongarch64 -O2 " } */
++/* { dg-final { scan-assembler-not "my_fabsq2:.*\\bl\t%plt\\(__builtin_fabsq\\).*my_fabsq2" } } */
++/* { dg-final { scan-assembler-not "my_copysignq2:.*\\bl\t%plt\\(__builtin_copysignq\\).*my_copysignq2" } } */
++/* { dg-final { scan-assembler-not "my_infq2:.*\\bl\t%plt\\(__builtin_infq\\).*my_infq2" } } */
++/* { dg-final { scan-assembler-not "my_huge_valq2:.*\\bl\t%plt\\(__builtin_huge_valq\\).*my_huge_valq2" } } */
++/* { dg-final { scan-assembler-not "my_nanq2:.*\\bl\t%plt\\(__builtin_nanq\\).*my_nanq2" } } */
++/* { dg-final { scan-assembler-not "my_nansq2:.*\\bl\t%plt\\(__builtin_nansq\\).*my_nansq2" } } */
++
++__float128
++my_fabsq1 (__float128 a)
++{
++  return __builtin_fabsq (a);
++}
++
++_Float128
++my_fabsq2 (_Float128 a)
++{
++  return __builtin_fabsq (a);
++}
++
++__float128
++my_copysignq1 (__float128 a, __float128 b)
++{
++  return __builtin_copysignq (a, b);
++}
++
++_Float128
++my_copysignq2 (_Float128 a, _Float128 b)
++{
++  return __builtin_copysignq (a, b);
++}
++
++__float128
++my_infq1 (void)
++{
++  return __builtin_infq ();
++}
++
++_Float128
++my_infq2 (void)
++{
++  return __builtin_infq ();
++}
++
++__float128
++my_huge_valq1 (void)
++{
++  return __builtin_huge_valq ();
++}
++
++_Float128
++my_huge_valq2 (void)
++{
++  return __builtin_huge_valq ();
++}
++
++__float128
++my_nanq1 (void)
++{
++  return __builtin_nanq ("");
++}
++
++_Float128
++my_nanq2 (void)
++{
++  return __builtin_nanq ("");
++}
++
++__float128
++my_nansq1 (void)
++{
++  return __builtin_nansq ("");
++}
++
++_Float128
++my_nansq2 (void)
++{
++  return __builtin_nansq ("");
++}
++
+-- 
+2.33.0
+
diff --git a/LoongArch-Improve-GAR-store-for-va_list.patch b/LoongArch-Improve-GAR-store-for-va_list.patch
new file mode 100644
index 0000000000000000000000000000000000000000..af418a954e91da7a3cbd5532705b108ba509a7ac
--- /dev/null
+++ b/LoongArch-Improve-GAR-store-for-va_list.patch
@@ -0,0 +1,83 @@
+From 4075f299ca6a5d15fdb46f877cbe11b7166a19ff Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 29 Mar 2023 01:36:09 +0800
+Subject: [PATCH 042/124] LoongArch: Improve GAR store for va_list
+
+LoongArch backend used to save all GARs for a function with variable
+arguments.  But sometimes a function only accepts variable arguments for
+a purpose like C++ function overloading.  For example, POSIX defines
+open() as:
+
+    int open(const char *path, int oflag, ...);
+
+But only two forms are actually used:
+
+    int open(const char *pathname, int flags);
+    int open(const char *pathname, int flags, mode_t mode);
+
+So it's obviously a waste to save all 8 GARs in open().  We can use the
+cfun->va_list_gpr_size field set by the stdarg pass to only save the
+GARs necessary to be saved.
+
+If the va_list escapes (for example, in fprintf() we pass it to
+vfprintf()), stdarg would set cfun->va_list_gpr_size to 255 so we
+don't need a special case.
+
+With this patch, only one GAR ($a2/$r6) is saved in open().  Ideally
+even this stack store should be omitted too, but doing so is not trivial
+and AFAIK there are no compilers (for any target) performing the "ideal"
+optimization here, see https://godbolt.org/z/n1YqWq9c9.
+
+Bootstrapped and regtested on loongarch64-linux-gnu.  Ok for trunk
+(GCC 14 or now)?
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc
+	(loongarch_setup_incoming_varargs): Don't save more GARs than
+	cfun->va_list_gpr_size / UNITS_PER_WORD.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/va_arg.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/testsuite/gcc.target/loongarch/va_arg.c | 24 +++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/va_arg.c
+
+diff --git a/gcc/testsuite/gcc.target/loongarch/va_arg.c b/gcc/testsuite/gcc.target/loongarch/va_arg.c
+new file mode 100644
+index 000000000..980c96d0e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/va_arg.c
+@@ -0,0 +1,24 @@
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++
++/* Technically we shouldn't save any register for this function: it should be
++   compiled as if it accepts 3 named arguments.  But AFAIK no compilers can
++   achieve this "perfect" optimization now, so just ensure we are using the
++   knowledge provided by stdarg pass and we won't save GARs impossible to be
++   accessed with __builtin_va_arg () when the va_list does not escape.  */
++
++/* { dg-final { scan-assembler-not "st.*r7" } } */
++
++int
++test (int a0, ...)
++{
++  void *arg;
++  int a1, a2;
++
++  __builtin_va_start (arg, a0);
++  a1 = __builtin_va_arg (arg, int);
++  a2 = __builtin_va_arg (arg, int);
++  __builtin_va_end (arg);
++
++  return a0 + a1 + a2;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Improve-cpymemsi-expansion-PR109465.patch b/LoongArch-Improve-cpymemsi-expansion-PR109465.patch
new file mode 100644
index 0000000000000000000000000000000000000000..3c8d3aa884c6ad41ca67caa9c0dfa9004966ee47
--- /dev/null
+++ b/LoongArch-Improve-cpymemsi-expansion-PR109465.patch
@@ -0,0 +1,339 @@
+From 33fff578e7df7aa7e236efc6c9c85c595918d86a Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 12 Apr 2023 11:45:48 +0000
+Subject: [PATCH 043/124] LoongArch: Improve cpymemsi expansion [PR109465]
+
+We'd been generating really bad block move sequences which is recently
+complained by kernel developers who tried __builtin_memcpy.  To improve
+it:
+
+1. Take the advantage of -mno-strict-align.  When it is set, set mode
+   size to UNITS_PER_WORD regardless of the alignment.
+2. Half the mode size when (block size) % (mode size) != 0, instead of
+   falling back to ld.bu/st.b at once.
+3. Limit the length of block move sequence considering the number of
+   instructions, not the size of block.  When -mstrict-align is set and
+   the block is not aligned, the old size limit for straight-line
+   implementation (64 bytes) was definitely too large (we don't have 64
+   registers anyway).
+
+Change since v1: add a comment about the calculation of num_reg.
+
+gcc/ChangeLog:
+
+	PR target/109465
+	* config/loongarch/loongarch-protos.h
+	(loongarch_expand_block_move): Add a parameter as alignment RTX.
+	* config/loongarch/loongarch.h:
+	(LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER): Remove.
+	(LARCH_MAX_MOVE_BYTES_STRAIGHT): Remove.
+	(LARCH_MAX_MOVE_OPS_PER_LOOP_ITER): Define.
+	(LARCH_MAX_MOVE_OPS_STRAIGHT): Define.
+	(MOVE_RATIO): Use LARCH_MAX_MOVE_OPS_PER_LOOP_ITER instead of
+	LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER.
+	* config/loongarch/loongarch.cc (loongarch_expand_block_move):
+	Take the alignment from the parameter, but set it to
+	UNITS_PER_WORD if !TARGET_STRICT_ALIGN.  Limit the length of
+	straight-line implementation with LARCH_MAX_MOVE_OPS_STRAIGHT
+	instead of LARCH_MAX_MOVE_BYTES_STRAIGHT.
+	(loongarch_block_move_straight): When there are left-over bytes,
+	half the mode size instead of falling back to byte mode at once.
+	(loongarch_block_move_loop): Limit the length of loop body with
+	LARCH_MAX_MOVE_OPS_PER_LOOP_ITER instead of
+	LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER.
+	* config/loongarch/loongarch.md (cpymemsi): Pass the alignment
+	to loongarch_expand_block_move.
+
+gcc/testsuite/ChangeLog:
+
+	PR target/109465
+	* gcc.target/loongarch/pr109465-1.c: New test.
+	* gcc.target/loongarch/pr109465-2.c: New test.
+	* gcc.target/loongarch/pr109465-3.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch-protos.h       |  2 +-
+ gcc/config/loongarch/loongarch.cc             | 95 +++++++++++--------
+ gcc/config/loongarch/loongarch.h              | 10 +-
+ gcc/config/loongarch/loongarch.md             |  3 +-
+ .../gcc.target/loongarch/pr109465-1.c         |  9 ++
+ .../gcc.target/loongarch/pr109465-2.c         |  9 ++
+ .../gcc.target/loongarch/pr109465-3.c         | 12 +++
+ 7 files changed, 91 insertions(+), 49 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/pr109465-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/pr109465-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/pr109465-3.c
+
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 0a9b47722..3ac3b5e19 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -95,7 +95,7 @@ extern void loongarch_expand_conditional_trap (rtx);
+ #endif
+ extern void loongarch_set_return_address (rtx, rtx);
+ extern bool loongarch_move_by_pieces_p (unsigned HOST_WIDE_INT, unsigned int);
+-extern bool loongarch_expand_block_move (rtx, rtx, rtx);
++extern bool loongarch_expand_block_move (rtx, rtx, rtx, rtx);
+ extern bool loongarch_do_optimize_block_move_p (void);
+ 
+ extern bool loongarch_expand_ext_as_unaligned_load (rtx, rtx, HOST_WIDE_INT,
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 233dddbac..d3c6f22ad 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4456,41 +4456,46 @@ loongarch_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
+    Assume that the areas do not overlap.  */
+ 
+ static void
+-loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
++loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length,
++			       HOST_WIDE_INT delta)
+ {
+-  HOST_WIDE_INT offset, delta;
+-  unsigned HOST_WIDE_INT bits;
++  HOST_WIDE_INT offs, delta_cur;
+   int i;
+   machine_mode mode;
+   rtx *regs;
+ 
+-  bits = MIN (BITS_PER_WORD, MIN (MEM_ALIGN (src), MEM_ALIGN (dest)));
+-
+-  mode = int_mode_for_size (bits, 0).require ();
+-  delta = bits / BITS_PER_UNIT;
++  /* Calculate how many registers we'll need for the block move.
++     We'll emit length / delta move operations with delta as the size
++     first.  Then we may still have length % delta bytes not copied.
++     We handle these remaining bytes by move operations with smaller
++     (halfed) sizes.  For example, if length = 21 and delta = 8, we'll
++     emit two ld.d/st.d pairs, one ld.w/st.w pair, and one ld.b/st.b
++     pair.  For each load/store pair we use a dedicated register to keep
++     the pipeline as populated as possible.  */
++  HOST_WIDE_INT num_reg = length / delta;
++  for (delta_cur = delta / 2; delta_cur != 0; delta_cur /= 2)
++    num_reg += !!(length & delta_cur);
+ 
+   /* Allocate a buffer for the temporary registers.  */
+-  regs = XALLOCAVEC (rtx, length / delta);
++  regs = XALLOCAVEC (rtx, num_reg);
+ 
+-  /* Load as many BITS-sized chunks as possible.  Use a normal load if
+-     the source has enough alignment, otherwise use left/right pairs.  */
+-  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
++  for (delta_cur = delta, i = 0, offs = 0; offs < length; delta_cur /= 2)
+     {
+-      regs[i] = gen_reg_rtx (mode);
+-      loongarch_emit_move (regs[i], adjust_address (src, mode, offset));
+-    }
++      mode = int_mode_for_size (delta_cur * BITS_PER_UNIT, 0).require ();
+ 
+-  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
+-    loongarch_emit_move (adjust_address (dest, mode, offset), regs[i]);
++      for (; offs + delta_cur <= length; offs += delta_cur, i++)
++	{
++	  regs[i] = gen_reg_rtx (mode);
++	  loongarch_emit_move (regs[i], adjust_address (src, mode, offs));
++	}
++    }
+ 
+-  /* Mop up any left-over bytes.  */
+-  if (offset < length)
++  for (delta_cur = delta, i = 0, offs = 0; offs < length; delta_cur /= 2)
+     {
+-      src = adjust_address (src, BLKmode, offset);
+-      dest = adjust_address (dest, BLKmode, offset);
+-      move_by_pieces (dest, src, length - offset,
+-		      MIN (MEM_ALIGN (src), MEM_ALIGN (dest)),
+-		      (enum memop_ret) 0);
++      mode = int_mode_for_size (delta_cur * BITS_PER_UNIT, 0).require ();
++
++      for (; offs + delta_cur <= length; offs += delta_cur, i++)
++	loongarch_emit_move (adjust_address (dest, mode, offs), regs[i]);
+     }
+ }
+ 
+@@ -4520,10 +4525,11 @@ loongarch_adjust_block_mem (rtx mem, HOST_WIDE_INT length, rtx *loop_reg,
+ 
+ static void
+ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
+-			   HOST_WIDE_INT bytes_per_iter)
++			   HOST_WIDE_INT align)
+ {
+   rtx_code_label *label;
+   rtx src_reg, dest_reg, final_src, test;
++  HOST_WIDE_INT bytes_per_iter = align * LARCH_MAX_MOVE_OPS_PER_LOOP_ITER;
+   HOST_WIDE_INT leftover;
+ 
+   leftover = length % bytes_per_iter;
+@@ -4543,7 +4549,7 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
+   emit_label (label);
+ 
+   /* Emit the loop body.  */
+-  loongarch_block_move_straight (dest, src, bytes_per_iter);
++  loongarch_block_move_straight (dest, src, bytes_per_iter, align);
+ 
+   /* Move on to the next block.  */
+   loongarch_emit_move (src_reg,
+@@ -4560,7 +4566,7 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
+ 
+   /* Mop up any left-over bytes.  */
+   if (leftover)
+-    loongarch_block_move_straight (dest, src, leftover);
++    loongarch_block_move_straight (dest, src, leftover, align);
+   else
+     /* Temporary fix for PR79150.  */
+     emit_insn (gen_nop ());
+@@ -4570,25 +4576,32 @@ loongarch_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length,
+    memory reference SRC to memory reference DEST.  */
+ 
+ bool
+-loongarch_expand_block_move (rtx dest, rtx src, rtx length)
++loongarch_expand_block_move (rtx dest, rtx src, rtx r_length, rtx r_align)
+ {
+-  int max_move_bytes = LARCH_MAX_MOVE_BYTES_STRAIGHT;
++  if (!CONST_INT_P (r_length))
++    return false;
++
++  HOST_WIDE_INT length = INTVAL (r_length);
++  if (length > loongarch_max_inline_memcpy_size)
++    return false;
++
++  HOST_WIDE_INT align = INTVAL (r_align);
++
++  if (!TARGET_STRICT_ALIGN || align > UNITS_PER_WORD)
++    align = UNITS_PER_WORD;
+ 
+-  if (CONST_INT_P (length)
+-      && INTVAL (length) <= loongarch_max_inline_memcpy_size)
++  if (length <= align * LARCH_MAX_MOVE_OPS_STRAIGHT)
+     {
+-      if (INTVAL (length) <= max_move_bytes)
+-	{
+-	  loongarch_block_move_straight (dest, src, INTVAL (length));
+-	  return true;
+-	}
+-      else if (optimize)
+-	{
+-	  loongarch_block_move_loop (dest, src, INTVAL (length),
+-				     LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER);
+-	  return true;
+-	}
++      loongarch_block_move_straight (dest, src, length, align);
++      return true;
++    }
++
++  if (optimize)
++    {
++      loongarch_block_move_loop (dest, src, length, align);
++      return true;
+     }
++
+   return false;
+ }
+ 
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 9d3cd9ca0..af24bfa01 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -1062,13 +1062,13 @@ typedef struct {
+ 
+ /* The maximum number of bytes that can be copied by one iteration of
+    a cpymemsi loop; see loongarch_block_move_loop.  */
+-#define LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER (UNITS_PER_WORD * 4)
++#define LARCH_MAX_MOVE_OPS_PER_LOOP_ITER 4
+ 
+ /* The maximum number of bytes that can be copied by a straight-line
+    implementation of cpymemsi; see loongarch_block_move_straight.  We want
+    to make sure that any loop-based implementation will iterate at
+    least twice.  */
+-#define LARCH_MAX_MOVE_BYTES_STRAIGHT (LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER * 2)
++#define LARCH_MAX_MOVE_OPS_STRAIGHT (LARCH_MAX_MOVE_OPS_PER_LOOP_ITER * 2)
+ 
+ /* The base cost of a memcpy call, for MOVE_RATIO and friends.  These
+    values were determined experimentally by benchmarking with CSiBE.
+@@ -1076,7 +1076,7 @@ typedef struct {
+ #define LARCH_CALL_RATIO 8
+ 
+ /* Any loop-based implementation of cpymemsi will have at least
+-   LARCH_MAX_MOVE_BYTES_STRAIGHT / UNITS_PER_WORD memory-to-memory
++   LARCH_MAX_MOVE_OPS_PER_LOOP_ITER memory-to-memory
+    moves, so allow individual copies of fewer elements.
+ 
+    When cpymemsi is not available, use a value approximating
+@@ -1087,9 +1087,7 @@ typedef struct {
+    value of LARCH_CALL_RATIO to take that into account.  */
+ 
+ #define MOVE_RATIO(speed) \
+-  (HAVE_cpymemsi \
+-   ? LARCH_MAX_MOVE_BYTES_PER_LOOP_ITER / UNITS_PER_WORD \
+-   : CLEAR_RATIO (speed) / 2)
++  (HAVE_cpymemsi ? LARCH_MAX_MOVE_OPS_PER_LOOP_ITER : CLEAR_RATIO (speed) / 2)
+ 
+ /* For CLEAR_RATIO, when optimizing for size, give a better estimate
+    of the length of a memset call, but use the default otherwise.  */
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index b2f7c7f78..b23248c33 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -2488,7 +2488,8 @@
+   ""
+ {
+   if (TARGET_DO_OPTIMIZE_BLOCK_MOVE_P
+-      && loongarch_expand_block_move (operands[0], operands[1], operands[2]))
++      && loongarch_expand_block_move (operands[0], operands[1],
++				      operands[2], operands[3]))
+     DONE;
+   else
+     FAIL;
+diff --git a/gcc/testsuite/gcc.target/loongarch/pr109465-1.c b/gcc/testsuite/gcc.target/loongarch/pr109465-1.c
+new file mode 100644
+index 000000000..4cd35d139
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/pr109465-1.c
+@@ -0,0 +1,9 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d -mno-strict-align" } */
++/* { dg-final { scan-assembler-times "st\\.d|stptr\\.d" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.w|stptr\\.w" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.h" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.b" 1 } } */
++
++extern char a[], b[];
++void test() { __builtin_memcpy(a, b, 15); }
+diff --git a/gcc/testsuite/gcc.target/loongarch/pr109465-2.c b/gcc/testsuite/gcc.target/loongarch/pr109465-2.c
+new file mode 100644
+index 000000000..703eb951c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/pr109465-2.c
+@@ -0,0 +1,9 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d -mstrict-align" } */
++/* { dg-final { scan-assembler-times "st\\.d|stptr\\.d" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.w|stptr\\.w" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.h" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.b" 1 } } */
++
++extern long a[], b[];
++void test() { __builtin_memcpy(a, b, 15); }
+diff --git a/gcc/testsuite/gcc.target/loongarch/pr109465-3.c b/gcc/testsuite/gcc.target/loongarch/pr109465-3.c
+new file mode 100644
+index 000000000..d6a80659b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/pr109465-3.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d -mstrict-align" } */
++
++/* Three loop iterations each contains 4 st.b, and 3 st.b after the loop */
++/* { dg-final { scan-assembler-times "st\\.b" 7 } } */
++
++/* { dg-final { scan-assembler-not "st\\.h" } } */
++/* { dg-final { scan-assembler-not "st\\.w|stptr\\.w" } } */
++/* { dg-final { scan-assembler-not "st\\.d|stptr\\.d" } } */
++
++extern char a[], b[];
++void test() { __builtin_memcpy(a, b, 15); }
+-- 
+2.33.0
+
diff --git a/LoongArch-Libitm-add-LoongArch-support.patch b/LoongArch-Libitm-add-LoongArch-support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f202ea7ed4d662b822409d65c58bb6d7670d54fa
--- /dev/null
+++ b/LoongArch-Libitm-add-LoongArch-support.patch
@@ -0,0 +1,291 @@
+From 7f9f1dd3c87cffeab58150997e22e8fff707646b Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Mon, 26 Sep 2022 09:42:51 +0800
+Subject: [PATCH 019/124] LoongArch: Libitm add LoongArch support.
+
+Co-Authored-By: Yang Yujie 
+
+libitm/ChangeLog:
+
+	* configure.tgt: Add loongarch support.
+	* config/loongarch/asm.h: New file.
+	* config/loongarch/sjlj.S: New file.
+	* config/loongarch/target.h: New file.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ libitm/config/loongarch/asm.h    |  54 +++++++++++++
+ libitm/config/loongarch/sjlj.S   | 127 +++++++++++++++++++++++++++++++
+ libitm/config/loongarch/target.h |  50 ++++++++++++
+ libitm/configure.tgt             |   2 +
+ 4 files changed, 233 insertions(+)
+ create mode 100644 libitm/config/loongarch/asm.h
+ create mode 100644 libitm/config/loongarch/sjlj.S
+ create mode 100644 libitm/config/loongarch/target.h
+
+diff --git a/libitm/config/loongarch/asm.h b/libitm/config/loongarch/asm.h
+new file mode 100644
+index 000000000..a8e3304bb
+--- /dev/null
++++ b/libitm/config/loongarch/asm.h
+@@ -0,0 +1,54 @@
++/* Copyright (C) 2022 Free Software Foundation, Inc.
++   Contributed by Loongson Co. Ltd.
++
++   This file is part of the GNU Transactional Memory Library (libitm).
++
++   Libitm is free software; you can redistribute it and/or modify it
++   under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3 of the License, or
++   (at your option) any later version.
++
++   Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
++   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++   more details.
++
++   Under Section 7 of GPL version 3, you are granted additional
++   permissions described in the GCC Runtime Library Exception, version
++   3.1, as published by the Free Software Foundation.
++
++   You should have received a copy of the GNU General Public License and
++   a copy of the GCC Runtime Library Exception along with this program;
++   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
++   .  */
++
++#ifndef _LA_ASM_H
++#define _LA_ASM_H
++
++#if defined(__loongarch_lp64)
++#  define GPR_L ld.d
++#  define GPR_S st.d
++#  define SZ_GPR 8
++#  define ADDSP(si)   addi.d  $sp, $sp, si
++#elif defined(__loongarch64_ilp32)
++#  define GPR_L ld.w
++#  define GPR_S st.w
++#  define SZ_GPR 4
++#  define ADDSP(si)   addi.w  $sp, $sp, si
++#else
++#  error Unsupported GPR size (must be 64-bit or 32-bit).
++#endif
++
++#if defined(__loongarch_double_float)
++#  define FPR_L fld.d
++#  define FPR_S fst.d
++#  define SZ_FPR 8
++#elif defined(__loongarch_single_float)
++#  define FPR_L fld.s
++#  define FPR_S fst.s
++#  define SZ_FPR 4
++#else
++#  define SZ_FPR 0
++#endif
++
++#endif  /* _LA_ASM_H */
+diff --git a/libitm/config/loongarch/sjlj.S b/libitm/config/loongarch/sjlj.S
+new file mode 100644
+index 000000000..a5f9fadde
+--- /dev/null
++++ b/libitm/config/loongarch/sjlj.S
+@@ -0,0 +1,127 @@
++/* Copyright (C) 2022 Free Software Foundation, Inc.
++   Contributed by Loongson Co. Ltd.
++
++   This file is part of the GNU Transactional Memory Library (libitm).
++
++   Libitm is free software; you can redistribute it and/or modify it
++   under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3 of the License, or
++   (at your option) any later version.
++
++   Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
++   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for more details.
++
++   Under Section 7 of GPL version 3, you are granted additional
++   permissions described in the GCC Runtime Library Exception, version
++   3.1, as published by the Free Software Foundation.
++
++   You should have received a copy of the GNU General Public License and
++   a copy of the GCC Runtime Library Exception along with this program;
++   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
++   .  */
++
++#include "asmcfi.h"
++#include "asm.h"
++
++	.text
++	.align	2
++	.global	_ITM_beginTransaction
++	.type	_ITM_beginTransaction, @function
++
++_ITM_beginTransaction:
++        cfi_startproc
++        move   $r5, $sp
++        ADDSP(-(12*SZ_GPR+8*SZ_FPR))
++        cfi_adjust_cfa_offset(12*SZ_GPR+8*SZ_FPR)
++
++        /* Frame Pointer */
++        GPR_S  $fp, $sp, 0*SZ_GPR
++        cfi_rel_offset(22, 0)
++
++        /* Return Address */
++        GPR_S  $r1, $sp, 1*SZ_GPR
++        cfi_rel_offset(1, SZ_GPR)
++
++        /* Caller's $sp */
++        GPR_S  $r5, $sp, 2*SZ_GPR
++
++        /* Callee-saved scratch GPRs (r23-r31) */
++        GPR_S  $s0, $sp, 3*SZ_GPR
++        GPR_S  $s1, $sp, 4*SZ_GPR
++        GPR_S  $s2, $sp, 5*SZ_GPR
++        GPR_S  $s3, $sp, 6*SZ_GPR
++        GPR_S  $s4, $sp, 7*SZ_GPR
++        GPR_S  $s5, $sp, 8*SZ_GPR
++        GPR_S  $s6, $sp, 9*SZ_GPR
++        GPR_S  $s7, $sp, 10*SZ_GPR
++        GPR_S  $s8, $sp, 11*SZ_GPR
++
++#if !defined(__loongarch_soft_float)
++        /* Callee-saved scratch FPRs (f24-f31) */
++        FPR_S  $f24, $sp, 12*SZ_GPR + 0*SZ_FPR
++        FPR_S  $f25, $sp, 12*SZ_GPR + 1*SZ_FPR
++        FPR_S  $f26, $sp, 12*SZ_GPR + 2*SZ_FPR
++        FPR_S  $f27, $sp, 12*SZ_GPR + 3*SZ_FPR
++        FPR_S  $f28, $sp, 12*SZ_GPR + 4*SZ_FPR
++        FPR_S  $f29, $sp, 12*SZ_GPR + 5*SZ_FPR
++        FPR_S  $f30, $sp, 12*SZ_GPR + 6*SZ_FPR
++        FPR_S  $f31, $sp, 12*SZ_GPR + 7*SZ_FPR
++#endif
++        move   $fp, $sp
++
++        /* Invoke GTM_begin_transaction with the struct we've just built.  */
++        move   $r5, $sp
++        bl     %plt(GTM_begin_transaction)
++
++        /* Return. (no call-saved scratch reg needs to be restored here)  */
++        GPR_L  $fp, $sp, 0*SZ_GPR
++        cfi_restore(22)
++        GPR_L  $r1, $sp, 1*SZ_GPR
++        cfi_restore(1)
++
++        ADDSP(12*SZ_GPR+8*SZ_FPR)
++        cfi_adjust_cfa_offset(-(12*SZ_GPR+8*SZ_FPR))
++
++        jr     $r1
++        cfi_endproc
++	.size	_ITM_beginTransaction, . - _ITM_beginTransaction
++
++	.align	2
++	.global	GTM_longjmp
++	.hidden	GTM_longjmp
++	.type	GTM_longjmp, @function
++
++GTM_longjmp:
++        cfi_startproc
++        GPR_L  $s0, $r5, 3*SZ_GPR
++        GPR_L  $s1, $r5, 4*SZ_GPR
++        GPR_L  $s2, $r5, 5*SZ_GPR
++        GPR_L  $s3, $r5, 6*SZ_GPR
++        GPR_L  $s4, $r5, 7*SZ_GPR
++        GPR_L  $s5, $r5, 8*SZ_GPR
++        GPR_L  $s6, $r5, 9*SZ_GPR
++        GPR_L  $s7, $r5, 10*SZ_GPR
++        GPR_L  $s8, $r5, 11*SZ_GPR
++
++        FPR_L  $f24, $r5, 12*SZ_GPR + 0*SZ_FPR
++        FPR_L  $f25, $r5, 12*SZ_GPR + 1*SZ_FPR
++        FPR_L  $f26, $r5, 12*SZ_GPR + 2*SZ_FPR
++        FPR_L  $f27, $r5, 12*SZ_GPR + 3*SZ_FPR
++        FPR_L  $f28, $r5, 12*SZ_GPR + 4*SZ_FPR
++        FPR_L  $f29, $r5, 12*SZ_GPR + 5*SZ_FPR
++        FPR_L  $f30, $r5, 12*SZ_GPR + 6*SZ_FPR
++        FPR_L  $f31, $r5, 12*SZ_GPR + 7*SZ_FPR
++
++        GPR_L  $r7, $r5, 2*SZ_GPR
++        GPR_L  $fp, $r5, 0*SZ_GPR
++        GPR_L  $r1, $r5, 1*SZ_GPR
++        cfi_def_cfa(5, 0)
++        move   $sp, $r7
++        jr     $r1
++        cfi_endproc
++	.size	GTM_longjmp, . - GTM_longjmp
++
++#ifdef __linux__
++.section .note.GNU-stack, "", @progbits
++#endif
+diff --git a/libitm/config/loongarch/target.h b/libitm/config/loongarch/target.h
+new file mode 100644
+index 000000000..0c5cf3ada
+--- /dev/null
++++ b/libitm/config/loongarch/target.h
+@@ -0,0 +1,50 @@
++/* Copyright (C) 2022 Free Software Foundation, Inc.
++   Contributed by Loongson Co. Ltd.
++
++   This file is part of the GNU Transactional Memory Library (libitm).
++
++   Libitm is free software; you can redistribute it and/or modify it
++   under the terms of the GNU General Public License as published by
++   the Free Software Foundation; either version 3 of the License, or
++   (at your option) any later version.
++
++   Libitm is distributed in the hope that it will be useful, but WITHOUT ANY
++   WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
++   FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
++   more details.
++
++   Under Section 7 of GPL version 3, you are granted additional
++   permissions described in the GCC Runtime Library Exception, version
++   3.1, as published by the Free Software Foundation.
++
++   You should have received a copy of the GNU General Public License and
++   a copy of the GCC Runtime Library Exception along with this program;
++   see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
++   .  */
++
++namespace GTM HIDDEN {
++
++typedef struct gtm_jmpbuf
++  {
++    long int fp;        /* Frame Pointer: r22 */
++    long int pc;        /* Return Address: r1 */
++    void *cfa;          /* CFA: r3 */
++    long int gpr[9];	/* Callee-saved scratch GPRs: r23(s0)-r31(s8) */
++
++    /* Callee-saved scratch FPRs: f24-f31 */
++#if defined(__loongarch_double_float)
++    double fpr[8];
++#elif defined(__loongarch_single_float)
++    float fpr[8];
++#endif
++  } gtm_jmpbuf;
++
++#define HW_CACHELINE_SIZE 128
++
++static inline void
++cpu_relax (void)
++{
++    __asm__ volatile ("" : : : "memory");
++}
++
++} // namespace GTM
+diff --git a/libitm/configure.tgt b/libitm/configure.tgt
+index 06e90973e..4c0e78cff 100644
+--- a/libitm/configure.tgt
++++ b/libitm/configure.tgt
+@@ -80,6 +80,8 @@ EOF
+ 	ARCH=x86
+ 	;;
+ 
++  loongarch*)   ARCH=loongarch ;;
++
+   sh*)		ARCH=sh ;;
+ 
+   sparc)
+-- 
+2.33.0
+
diff --git a/LoongArch-Modify-the-output-message-string-of-the-wa.patch b/LoongArch-Modify-the-output-message-string-of-the-wa.patch
new file mode 100644
index 0000000000000000000000000000000000000000..392f935a87dbb2463e1a3e779ea3e67ed4bf73ad
--- /dev/null
+++ b/LoongArch-Modify-the-output-message-string-of-the-wa.patch
@@ -0,0 +1,37 @@
+From 83d6cfbbdc41766af9d7941d00204cc0f26ff40c Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Tue, 26 Jul 2022 21:03:52 +0800
+Subject: [PATCH 005/124] LoongArch: Modify the output message string of the
+ warning.
+
+Fix bug for "error: spurious trailing punctuation sequence '.' in format [-Werror=format-diag]".
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-opts.cc: Modify the output message string
+	of the warning.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch-opts.cc | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index fc477bfd4..3f70943de 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -378,8 +378,8 @@ fallback:
+   t.cmodel = constrained.cmodel ? opt_cmodel : CMODEL_NORMAL;
+   if (t.cmodel != CMODEL_NORMAL)
+     {
+-      warning (0, "%qs is not supported, now cmodel is set to 'normal'.",
+-	       loongarch_cmodel_strings[t.cmodel]);
++      warning (0, "%qs is not supported, now cmodel is set to %qs",
++	       loongarch_cmodel_strings[t.cmodel], "normal");
+       t.cmodel = CMODEL_NORMAL;
+     }
+ 
+-- 
+2.33.0
+
diff --git a/LoongArch-Optimize-additions-with-immediates.patch b/LoongArch-Optimize-additions-with-immediates.patch
new file mode 100644
index 0000000000000000000000000000000000000000..69acae4cf336d00d38f93a6d0024805ff0d20c1d
--- /dev/null
+++ b/LoongArch-Optimize-additions-with-immediates.patch
@@ -0,0 +1,445 @@
+From a31baa1e437fa4acedfaf03db91c1d6e5ce78013 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 2 Apr 2023 21:37:49 +0800
+Subject: [PATCH 041/124] LoongArch: Optimize additions with immediates
+
+1. Use addu16i.d for TARGET_64BIT and suitable immediates.
+2. Split one addition with immediate into two addu16i.d or addi.{d/w}
+   instructions if possible.  This can avoid using a temp register w/o
+   increase the count of instructions.
+
+Inspired by https://reviews.llvm.org/D143710 and
+https://reviews.llvm.org/D147222.
+
+Bootstrapped and regtested on loongarch64-linux-gnu.  Ok for GCC 14?
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-protos.h
+	(loongarch_addu16i_imm12_operand_p): New function prototype.
+	(loongarch_split_plus_constant): Likewise.
+	* config/loongarch/loongarch.cc
+	(loongarch_addu16i_imm12_operand_p): New function.
+	(loongarch_split_plus_constant): Likewise.
+	* config/loongarch/loongarch.h (ADDU16I_OPERAND): New macro.
+	(DUAL_IMM12_OPERAND): Likewise.
+	(DUAL_ADDU16I_OPERAND): Likewise.
+	* config/loongarch/constraints.md (La, Lb, Lc, Ld, Le): New
+	constraint.
+	* config/loongarch/predicates.md (const_dual_imm12_operand): New
+	predicate.
+	(const_addu16i_operand): Likewise.
+	(const_addu16i_imm12_di_operand): Likewise.
+	(const_addu16i_imm12_si_operand): Likewise.
+	(plus_di_operand): Likewise.
+	(plus_si_operand): Likewise.
+	(plus_si_extend_operand): Likewise.
+	* config/loongarch/loongarch.md (add3): Convert to
+	define_insn_and_split.  Use plus__operand predicate
+	instead of arith_operand.  Add alternatives for La, Lb, Lc, Ld,
+	and Le constraints.
+	(*addsi3_extended): Convert to define_insn_and_split.  Use
+	plus_si_extend_operand instead of arith_operand.  Add
+	alternatives for La and Le alternatives.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/add-const.c: New test.
+	* gcc.target/loongarch/stack-check-cfa-1.c: Adjust for stack
+	frame size change.
+	* gcc.target/loongarch/stack-check-cfa-2.c: Likewise.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/constraints.md           | 46 ++++++++++++-
+ gcc/config/loongarch/loongarch-protos.h       |  2 +
+ gcc/config/loongarch/loongarch.cc             | 44 +++++++++++++
+ gcc/config/loongarch/loongarch.h              | 19 ++++++
+ gcc/config/loongarch/loongarch.md             | 66 +++++++++++++++----
+ gcc/config/loongarch/predicates.md            | 36 ++++++++++
+ .../gcc.target/loongarch/add-const.c          | 45 +++++++++++++
+ .../gcc.target/loongarch/stack-check-cfa-1.c  |  2 +-
+ .../gcc.target/loongarch/stack-check-cfa-2.c  |  2 +-
+ 9 files changed, 246 insertions(+), 16 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/add-const.c
+
+diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md
+index 46f7f63ae..25f3cda35 100644
+--- a/gcc/config/loongarch/constraints.md
++++ b/gcc/config/loongarch/constraints.md
+@@ -60,7 +60,22 @@
+ ;; "I" "A signed 12-bit constant (for arithmetic instructions)."
+ ;; "J" "Integer zero."
+ ;; "K" "An unsigned 12-bit constant (for logic instructions)."
+-;; "L" <-----unused
++;; "L" -
++;;     "La"
++;;	 "A signed constant in [-4096, 2048) or (2047, 4094]."
++;;     "Lb"
++;;	 "A signed 32-bit constant and low 16-bit is zero, which can be
++;;	  added onto a register with addu16i.d.  It matches nothing if
++;;	  the addu16i.d instruction is not available."
++;;     "Lc"
++;;	 "A signed 64-bit constant can be expressed as Lb + I, but not a
++;;	  single Lb or I."
++;;     "Ld"
++;;	 "A signed 64-bit constant can be expressed as Lb + Lb, but not a
++;;	  single Lb."
++;;     "Le"
++;;	 "A signed 32-bit constant can be expressed as Lb + I, but not a
++;;	  single Lb or I."
+ ;; "M" <-----unused
+ ;; "N" <-----unused
+ ;; "O" <-----unused
+@@ -170,6 +185,35 @@
+   (and (match_code "const_int")
+        (match_test "IMM12_OPERAND_UNSIGNED (ival)")))
+ 
++(define_constraint "La"
++  "A signed constant in [-4096, 2048) or (2047, 4094]."
++  (and (match_code "const_int")
++       (match_test "DUAL_IMM12_OPERAND (ival)")))
++
++(define_constraint "Lb"
++  "A signed 32-bit constant and low 16-bit is zero, which can be added
++   onto a register with addu16i.d."
++  (and (match_code "const_int")
++       (match_test "ADDU16I_OPERAND (ival)")))
++
++(define_constraint "Lc"
++  "A signed 64-bit constant can be expressed as Lb + I, but not a single Lb
++   or I."
++  (and (match_code "const_int")
++       (match_test "loongarch_addu16i_imm12_operand_p (ival, DImode)")))
++
++(define_constraint "Ld"
++  "A signed 64-bit constant can be expressed as Lb + Lb, but not a single
++   Lb."
++  (and (match_code "const_int")
++       (match_test "DUAL_ADDU16I_OPERAND (ival)")))
++
++(define_constraint "Le"
++  "A signed 32-bit constant can be expressed as Lb + I, but not a single Lb
++   or I."
++  (and (match_code "const_int")
++       (match_test "loongarch_addu16i_imm12_operand_p (ival, SImode)")))
++
+ (define_constraint "Yd"
+   "@internal
+    A constant @code{move_operand} that can be safely loaded using
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 77b221724..0a9b47722 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -83,6 +83,8 @@ extern rtx loongarch_legitimize_call_address (rtx);
+ extern rtx loongarch_subword (rtx, bool);
+ extern bool loongarch_split_move_p (rtx, rtx);
+ extern void loongarch_split_move (rtx, rtx, rtx);
++extern bool loongarch_addu16i_imm12_operand_p (HOST_WIDE_INT, machine_mode);
++extern void loongarch_split_plus_constant (rtx *, machine_mode);
+ extern const char *loongarch_output_move (rtx, rtx);
+ extern bool loongarch_cfun_has_cprestore_slot_p (void);
+ #ifdef RTX_CODE
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 1a4686f03..233dddbac 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -3753,6 +3753,50 @@ loongarch_split_move (rtx dest, rtx src, rtx insn_)
+     }
+ }
+ 
++/* Check if adding an integer constant value for a specific mode can be
++   performed with an addu16i.d instruction and an addi.{w/d}
++   instruction.  */
++
++bool
++loongarch_addu16i_imm12_operand_p (HOST_WIDE_INT value, machine_mode mode)
++{
++  /* Not necessary, but avoid unnecessary calculation if !TARGET_64BIT.  */
++  if (!TARGET_64BIT)
++    return false;
++
++  if ((value & 0xffff) == 0)
++    return false;
++
++  if (IMM12_OPERAND (value))
++    return false;
++
++  value = (value & ~HWIT_UC_0xFFF) + ((value & 0x800) << 1);
++  return ADDU16I_OPERAND (trunc_int_for_mode (value, mode));
++}
++
++/* Split one integer constant op[0] into two (op[1] and op[2]) for constant
++   plus operation in a specific mode.  The splitted constants can be added
++   onto a register with a single instruction (addi.{d/w} or addu16i.d).  */
++
++void
++loongarch_split_plus_constant (rtx *op, machine_mode mode)
++{
++  HOST_WIDE_INT v = INTVAL (op[0]), a;
++
++  if (DUAL_IMM12_OPERAND (v))
++    a = (v > 0 ? 2047 : -2048);
++  else if (loongarch_addu16i_imm12_operand_p (v, mode))
++    a = (v & ~HWIT_UC_0xFFF) + ((v & 0x800) << 1);
++  else if (mode == DImode && DUAL_ADDU16I_OPERAND (v))
++    a = (v > 0 ? 0x7fff : -0x8000) << 16;
++  else
++    gcc_unreachable ();
++
++  op[1] = gen_int_mode (a, mode);
++  v = v - (unsigned HOST_WIDE_INT) a;
++  op[2] = gen_int_mode (v, mode);
++}
++
+ /* Return true if a move from SRC to DEST in INSN should be split.  */
+ 
+ static bool
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index c6e37b1b4..9d3cd9ca0 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -612,6 +612,25 @@ enum reg_class
+ 
+ #define CONST_LOW_PART(VALUE) ((VALUE) - CONST_HIGH_PART (VALUE))
+ 
++/* True if VALUE can be added onto a register with one addu16i.d
++   instruction.  */
++
++#define ADDU16I_OPERAND(VALUE)			\
++  (TARGET_64BIT && (((VALUE) & 0xffff) == 0	\
++   && IMM16_OPERAND ((HOST_WIDE_INT) (VALUE) / 65536)))
++
++/* True if VALUE can be added onto a register with two addi.{d/w}
++   instructions, but not one addi.{d/w} instruction.  */
++#define DUAL_IMM12_OPERAND(VALUE) \
++  (IN_RANGE ((VALUE), -4096, 4094) && !IMM12_OPERAND (VALUE))
++
++/* True if VALUE can be added onto a register with two addu16i.d
++   instruction, but not one addu16i.d instruction.  */
++#define DUAL_ADDU16I_OPERAND(VALUE)		\
++  (TARGET_64BIT && (((VALUE) & 0xffff) == 0	\
++   && !ADDU16I_OPERAND (VALUE)			\
++   && IN_RANGE ((VALUE) / 65536, -0x10000, 0xfffe)))
++
+ #define IMM12_INT(X) IMM12_OPERAND (INTVAL (X))
+ #define IMM12_INT_UNSIGNED(X) IMM12_OPERAND_UNSIGNED (INTVAL (X))
+ #define LU12I_INT(X) LU12I_OPERAND (INTVAL (X))
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 833b94753..b2f7c7f78 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -598,24 +598,64 @@
+   [(set_attr "type" "fadd")
+    (set_attr "mode" "")])
+ 
+-(define_insn "add3"
+-  [(set (match_operand:GPR 0 "register_operand" "=r,r")
+-	(plus:GPR (match_operand:GPR 1 "register_operand" "r,r")
+-		  (match_operand:GPR 2 "arith_operand" "r,I")))]
++(define_insn_and_split "add3"
++  [(set (match_operand:GPR 0 "register_operand" "=r,r,r,r,r,r,r")
++	(plus:GPR (match_operand:GPR 1 "register_operand" "r,r,r,r,r,r,r")
++		  (match_operand:GPR 2 "plus__operand"
++				       "r,I,La,Lb,Lc,Ld,Le")))]
+   ""
+-  "add%i2.\t%0,%1,%2";
++  "@
++   add.\t%0,%1,%2
++   addi.\t%0,%1,%2
++   #
++   * operands[2] = GEN_INT (INTVAL (operands[2]) / 65536); \
++     return \"addu16i.d\t%0,%1,%2\";
++   #
++   #
++   #"
++  "CONST_INT_P (operands[2]) && !IMM12_INT (operands[2]) \
++   && !ADDU16I_OPERAND (INTVAL (operands[2]))"
++  [(set (match_dup 0) (plus:GPR (match_dup 1) (match_dup 3)))
++   (set (match_dup 0) (plus:GPR (match_dup 0) (match_dup 4)))]
++  {
++    loongarch_split_plus_constant (&operands[2], mode);
++  }
+   [(set_attr "alu_type" "add")
+-   (set_attr "mode" "")])
+-
+-(define_insn "*addsi3_extended"
+-  [(set (match_operand:DI 0 "register_operand" "=r,r")
++   (set_attr "mode" "")
++   (set_attr "insn_count" "1,1,2,1,2,2,2")
++   (set (attr "enabled")
++      (cond
++	[(match_test "mode != DImode && which_alternative == 4")
++	 (const_string "no")
++	 (match_test "mode != DImode && which_alternative == 5")
++	 (const_string "no")
++	 (match_test "mode != SImode && which_alternative == 6")
++	 (const_string "no")]
++	(const_string "yes")))])
++
++(define_insn_and_split "*addsi3_extended"
++  [(set (match_operand:DI 0 "register_operand" "=r,r,r,r")
+ 	(sign_extend:DI
+-	     (plus:SI (match_operand:SI 1 "register_operand" "r,r")
+-		      (match_operand:SI 2 "arith_operand" "r,I"))))]
++	     (plus:SI (match_operand:SI 1 "register_operand" "r,r,r,r")
++		      (match_operand:SI 2 "plus_si_extend_operand"
++					  "r,I,La,Le"))))]
+   "TARGET_64BIT"
+-  "add%i2.w\t%0,%1,%2"
++  "@
++   add.w\t%0,%1,%2
++   addi.w\t%0,%1,%2
++   #
++   #"
++  "CONST_INT_P (operands[2]) && !IMM12_INT (operands[2])"
++  [(set (subreg:SI (match_dup 0) 0) (plus:SI (match_dup 1) (match_dup 3)))
++   (set (match_dup 0)
++	(sign_extend:DI (plus:SI (subreg:SI (match_dup 0) 0)
++				 (match_dup 4))))]
++  {
++    loongarch_split_plus_constant (&operands[2], SImode);
++  }
+   [(set_attr "alu_type" "add")
+-   (set_attr "mode" "SI")])
++   (set_attr "mode" "SI")
++   (set_attr "insn_count" "1,1,2,2")])
+ 
+ 
+ ;;
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 3c32b2987..4966d5569 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -39,14 +39,50 @@
+   (and (match_code "const_int")
+        (match_test "IMM12_OPERAND (INTVAL (op))")))
+ 
++(define_predicate "const_dual_imm12_operand"
++  (and (match_code "const_int")
++       (match_test "DUAL_IMM12_OPERAND (INTVAL (op))")))
++
+ (define_predicate "const_imm16_operand"
+   (and (match_code "const_int")
+        (match_test "IMM16_OPERAND (INTVAL (op))")))
+ 
++(define_predicate "const_addu16i_operand"
++  (and (match_code "const_int")
++       (match_test "ADDU16I_OPERAND (INTVAL (op))")))
++
++(define_predicate "const_addu16i_imm12_di_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_addu16i_imm12_operand_p (INTVAL (op), DImode)")))
++
++(define_predicate "const_addu16i_imm12_si_operand"
++  (and (match_code "const_int")
++       (match_test "loongarch_addu16i_imm12_operand_p (INTVAL (op), SImode)")))
++
++(define_predicate "const_dual_addu16i_operand"
++  (and (match_code "const_int")
++       (match_test "DUAL_ADDU16I_OPERAND (INTVAL (op))")))
++
+ (define_predicate "arith_operand"
+   (ior (match_operand 0 "const_arith_operand")
+        (match_operand 0 "register_operand")))
+ 
++(define_predicate "plus_di_operand"
++  (ior (match_operand 0 "arith_operand")
++       (match_operand 0 "const_dual_imm12_operand")
++       (match_operand 0 "const_addu16i_operand")
++       (match_operand 0 "const_addu16i_imm12_di_operand")
++       (match_operand 0 "const_dual_addu16i_operand")))
++
++(define_predicate "plus_si_extend_operand"
++  (ior (match_operand 0 "arith_operand")
++       (match_operand 0 "const_dual_imm12_operand")
++       (match_operand 0 "const_addu16i_imm12_si_operand")))
++
++(define_predicate "plus_si_operand"
++  (ior (match_operand 0 "plus_si_extend_operand")
++       (match_operand 0 "const_addu16i_operand")))
++
+ (define_predicate "const_immalsl_operand"
+   (and (match_code "const_int")
+        (match_test "IN_RANGE (INTVAL (op), 1, 4)")))
+diff --git a/gcc/testsuite/gcc.target/loongarch/add-const.c b/gcc/testsuite/gcc.target/loongarch/add-const.c
+new file mode 100644
+index 000000000..7b6a7cb92
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/add-const.c
+@@ -0,0 +1,45 @@
++/* { dg-do compile } */
++/* { dg-options "-O -mabi=lp64d" } */
++
++/* None of these functions should load the const operand into a temp
++   register.  */
++
++/* { dg-final { scan-assembler-not "add\\.[dw]" } } */
++
++unsigned long f01 (unsigned long x) { return x + 1; }
++unsigned long f02 (unsigned long x) { return x - 1; }
++unsigned long f03 (unsigned long x) { return x + 2047; }
++unsigned long f04 (unsigned long x) { return x + 4094; }
++unsigned long f05 (unsigned long x) { return x - 2048; }
++unsigned long f06 (unsigned long x) { return x - 4096; }
++unsigned long f07 (unsigned long x) { return x + 0x7fff0000; }
++unsigned long f08 (unsigned long x) { return x - 0x80000000l; }
++unsigned long f09 (unsigned long x) { return x + 0x7fff0000l * 2; }
++unsigned long f10 (unsigned long x) { return x - 0x80000000l * 2; }
++unsigned long f11 (unsigned long x) { return x + 0x7fff0000 + 0x1; }
++unsigned long f12 (unsigned long x) { return x + 0x7fff0000 - 0x1; }
++unsigned long f13 (unsigned long x) { return x + 0x7fff0000 + 0x7ff; }
++unsigned long f14 (unsigned long x) { return x + 0x7fff0000 - 0x800; }
++unsigned long f15 (unsigned long x) { return x - 0x80000000l - 1; }
++unsigned long f16 (unsigned long x) { return x - 0x80000000l + 1; }
++unsigned long f17 (unsigned long x) { return x - 0x80000000l - 0x800; }
++unsigned long f18 (unsigned long x) { return x - 0x80000000l + 0x7ff; }
++
++unsigned int g01 (unsigned int x) { return x + 1; }
++unsigned int g02 (unsigned int x) { return x - 1; }
++unsigned int g03 (unsigned int x) { return x + 2047; }
++unsigned int g04 (unsigned int x) { return x + 4094; }
++unsigned int g05 (unsigned int x) { return x - 2048; }
++unsigned int g06 (unsigned int x) { return x - 4096; }
++unsigned int g07 (unsigned int x) { return x + 0x7fff0000; }
++unsigned int g08 (unsigned int x) { return x - 0x80000000l; }
++unsigned int g09 (unsigned int x) { return x + 0x7fff0000l * 2; }
++unsigned int g10 (unsigned int x) { return x - 0x80000000l * 2; }
++unsigned int g11 (unsigned int x) { return x + 0x7fff0000 + 0x1; }
++unsigned int g12 (unsigned int x) { return x + 0x7fff0000 - 0x1; }
++unsigned int g13 (unsigned int x) { return x + 0x7fff0000 + 0x7ff; }
++unsigned int g14 (unsigned int x) { return x + 0x7fff0000 - 0x800; }
++unsigned int g15 (unsigned int x) { return x - 0x80000000l - 1; }
++unsigned int g16 (unsigned int x) { return x - 0x80000000l + 1; }
++unsigned int g17 (unsigned int x) { return x - 0x80000000l - 0x800; }
++unsigned int g18 (unsigned int x) { return x - 0x80000000l + 0x7ff; }
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c
+index 3533fe7b6..cd72154f4 100644
+--- a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c
+@@ -6,7 +6,7 @@
+ #define SIZE 128*1024
+ #include "stack-check-prologue.h"
+ 
+-/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 131088} 1 } } */
++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 131072} 1 } } */
+ /* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 0} 1 } } */
+ 
+ /* Checks that the CFA notes are correct for every sp adjustment.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c
+index e5e711105..3e5ca05b2 100644
+--- a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c
+@@ -6,7 +6,7 @@
+ #define SIZE 1280*1024 + 512
+ #include "stack-check-prologue.h"
+ 
+-/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 1311248} 1 } } */
++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 1311232} 1 } } */
+ /* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 0} 1 } } */
+ 
+ /* Checks that the CFA notes are correct for every sp adjustment.  */
+-- 
+2.33.0
+
diff --git a/LoongArch-Optimize-immediate-load.patch b/LoongArch-Optimize-immediate-load.patch
new file mode 100644
index 0000000000000000000000000000000000000000..b4d9538cb5589a84c1f4afac3773796ebd3db17d
--- /dev/null
+++ b/LoongArch-Optimize-immediate-load.patch
@@ -0,0 +1,338 @@
+From b533b615ae47b97d51eeb83e1a63f7c72407430f Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 17 Nov 2022 17:08:36 +0800
+Subject: [PATCH 032/124] LoongArch: Optimize immediate load.
+
+The immediate number is split in the Split pass, not in the expand pass.
+
+Because loop2_invariant pass will extract the instructions that do not change
+in the loop out of the loop, some instructions will not meet the extraction
+conditions if the machine performs immediate decomposition while expand pass,
+so the immediate decomposition will be transferred to the split process.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (enum loongarch_load_imm_method):
+	Remove the member METHOD_INSV that is not currently used.
+	(struct loongarch_integer_op): Define a new member curr_value,
+	that records the value of the number stored in the destination
+	register immediately after the current instruction has run.
+	(loongarch_build_integer): Assign a value to the curr_value member variable.
+	(loongarch_move_integer): Adds information for the immediate load instruction.
+	* config/loongarch/loongarch.md (*movdi_32bit): Redefine as define_insn_and_split.
+	(*movdi_64bit): Likewise.
+	(*movsi_internal): Likewise.
+	(*movhi_internal): Likewise.
+	* config/loongarch/predicates.md: Return true as long as it is CONST_INT, ensure
+	that the immediate number is not optimized by decomposition during expand
+	optimization loop.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/imm-load.c: New test.
+	* gcc.target/loongarch/imm-load1.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc             | 62 ++++++++++---------
+ gcc/config/loongarch/loongarch.md             | 44 +++++++++++--
+ gcc/config/loongarch/predicates.md            |  2 +-
+ gcc/testsuite/gcc.target/loongarch/imm-load.c | 10 +++
+ .../gcc.target/loongarch/imm-load1.c          | 26 ++++++++
+ 5 files changed, 110 insertions(+), 34 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/imm-load.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/imm-load1.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 622c9435b..f45a49f90 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -139,22 +139,21 @@ struct loongarch_address_info
+ 
+    METHOD_LU52I:
+      Load 52-63 bit of the immediate number.
+-
+-   METHOD_INSV:
+-     immediate like 0xfff00000fffffxxx
+-   */
++*/
+ enum loongarch_load_imm_method
+ {
+   METHOD_NORMAL,
+   METHOD_LU32I,
+-  METHOD_LU52I,
+-  METHOD_INSV
++  METHOD_LU52I
+ };
+ 
+ struct loongarch_integer_op
+ {
+   enum rtx_code code;
+   HOST_WIDE_INT value;
++  /* Represent the result of the immediate count of the load instruction at
++     each step.  */
++  HOST_WIDE_INT curr_value;
+   enum loongarch_load_imm_method method;
+ };
+ 
+@@ -1474,24 +1473,27 @@ loongarch_build_integer (struct loongarch_integer_op *codes,
+     {
+       /* The value of the lower 32 bit be loaded with one instruction.
+ 	 lu12i.w.  */
+-      codes[0].code = UNKNOWN;
+-      codes[0].method = METHOD_NORMAL;
+-      codes[0].value = low_part;
++      codes[cost].code = UNKNOWN;
++      codes[cost].method = METHOD_NORMAL;
++      codes[cost].value = low_part;
++      codes[cost].curr_value = low_part;
+       cost++;
+     }
+   else
+     {
+       /* lu12i.w + ior.  */
+-      codes[0].code = UNKNOWN;
+-      codes[0].method = METHOD_NORMAL;
+-      codes[0].value = low_part & ~(IMM_REACH - 1);
++      codes[cost].code = UNKNOWN;
++      codes[cost].method = METHOD_NORMAL;
++      codes[cost].value = low_part & ~(IMM_REACH - 1);
++      codes[cost].curr_value = codes[cost].value;
+       cost++;
+       HOST_WIDE_INT iorv = low_part & (IMM_REACH - 1);
+       if (iorv != 0)
+ 	{
+-	  codes[1].code = IOR;
+-	  codes[1].method = METHOD_NORMAL;
+-	  codes[1].value = iorv;
++	  codes[cost].code = IOR;
++	  codes[cost].method = METHOD_NORMAL;
++	  codes[cost].value = iorv;
++	  codes[cost].curr_value = low_part;
+ 	  cost++;
+ 	}
+     }
+@@ -1514,11 +1516,14 @@ loongarch_build_integer (struct loongarch_integer_op *codes,
+ 	{
+ 	  codes[cost].method = METHOD_LU52I;
+ 	  codes[cost].value = value & LU52I_B;
++	  codes[cost].curr_value = value;
+ 	  return cost + 1;
+ 	}
+ 
+       codes[cost].method = METHOD_LU32I;
+       codes[cost].value = (value & LU32I_B) | (sign51 ? LU52I_B : 0);
++      codes[cost].curr_value = (value & 0xfffffffffffff)
++	| (sign51 ? LU52I_B : 0);
+       cost++;
+ 
+       /* Determine whether the 52-61 bits are sign-extended from the low order,
+@@ -1527,6 +1532,7 @@ loongarch_build_integer (struct loongarch_integer_op *codes,
+ 	{
+ 	  codes[cost].method = METHOD_LU52I;
+ 	  codes[cost].value = value & LU52I_B;
++	  codes[cost].curr_value = value;
+ 	  cost++;
+ 	}
+     }
+@@ -2910,6 +2916,9 @@ loongarch_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
+       else
+ 	x = force_reg (mode, x);
+ 
++      set_unique_reg_note (get_last_insn (), REG_EQUAL,
++			   GEN_INT (codes[i-1].curr_value));
++
+       switch (codes[i].method)
+ 	{
+ 	case METHOD_NORMAL:
+@@ -2917,22 +2926,17 @@ loongarch_move_integer (rtx temp, rtx dest, unsigned HOST_WIDE_INT value)
+ 			      GEN_INT (codes[i].value));
+ 	  break;
+ 	case METHOD_LU32I:
+-	  emit_insn (
+-	    gen_rtx_SET (x,
+-			 gen_rtx_IOR (DImode,
+-				      gen_rtx_ZERO_EXTEND (
+-					DImode, gen_rtx_SUBREG (SImode, x, 0)),
+-				      GEN_INT (codes[i].value))));
++	  gcc_assert (mode == DImode);
++	  x = gen_rtx_IOR (DImode,
++			   gen_rtx_ZERO_EXTEND (DImode,
++						gen_rtx_SUBREG (SImode, x, 0)),
++			   GEN_INT (codes[i].value));
+ 	  break;
+ 	case METHOD_LU52I:
+-	  emit_insn (gen_lu52i_d (x, x, GEN_INT (0xfffffffffffff),
+-				  GEN_INT (codes[i].value)));
+-	  break;
+-	case METHOD_INSV:
+-	  emit_insn (
+-	    gen_rtx_SET (gen_rtx_ZERO_EXTRACT (DImode, x, GEN_INT (20),
+-					       GEN_INT (32)),
+-			 gen_rtx_REG (DImode, 0)));
++	  gcc_assert (mode == DImode);
++	  x = gen_rtx_IOR (DImode,
++			   gen_rtx_AND (DImode, x, GEN_INT (0xfffffffffffff)),
++			   GEN_INT (codes[i].value));
+ 	  break;
+ 	default:
+ 	  gcc_unreachable ();
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 2fda53819..f61db66d5 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1718,23 +1718,41 @@
+     DONE;
+ })
+ 
+-(define_insn "*movdi_32bit"
++(define_insn_and_split "*movdi_32bit"
+   [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m")
+        (match_operand:DI 1 "move_operand" "r,i,w,r,*J*r,*m,*f,*f"))]
+   "!TARGET_64BIT
+    && (register_operand (operands[0], DImode)
+        || reg_or_0_operand (operands[1], DImode))"
+   { return loongarch_output_move (operands[0], operands[1]); }
++  "CONST_INT_P (operands[1]) && REG_P (operands[0]) && GP_REG_P (REGNO
++  (operands[0]))"
++  [(const_int 0)]
++  "
++{
++  loongarch_move_integer (operands[0], operands[0], INTVAL (operands[1]));
++  DONE;
++}
++  "
+   [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore")
+    (set_attr "mode" "DI")])
+ 
+-(define_insn "*movdi_64bit"
++(define_insn_and_split "*movdi_64bit"
+   [(set (match_operand:DI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m")
+ 	(match_operand:DI 1 "move_operand" "r,Yd,w,rJ,*r*J,*m,*f,*f"))]
+   "TARGET_64BIT
+    && (register_operand (operands[0], DImode)
+        || reg_or_0_operand (operands[1], DImode))"
+   { return loongarch_output_move (operands[0], operands[1]); }
++  "CONST_INT_P (operands[1]) && REG_P (operands[0]) && GP_REG_P (REGNO
++  (operands[0]))"
++  [(const_int 0)]
++  "
++{
++  loongarch_move_integer (operands[0], operands[0], INTVAL (operands[1]));
++  DONE;
++}
++  "
+   [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore")
+    (set_attr "mode" "DI")])
+ 
+@@ -1749,12 +1767,21 @@
+     DONE;
+ })
+ 
+-(define_insn "*movsi_internal"
++(define_insn_and_split "*movsi_internal"
+   [(set (match_operand:SI 0 "nonimmediate_operand" "=r,r,r,w,*f,*f,*r,*m,*r,*z")
+ 	(match_operand:SI 1 "move_operand" "r,Yd,w,rJ,*r*J,*m,*f,*f,*z,*r"))]
+   "(register_operand (operands[0], SImode)
+     || reg_or_0_operand (operands[1], SImode))"
+   { return loongarch_output_move (operands[0], operands[1]); }
++  "CONST_INT_P (operands[1]) && REG_P (operands[0]) && GP_REG_P (REGNO
++  (operands[0]))"
++  [(const_int 0)]
++  "
++{
++  loongarch_move_integer (operands[0], operands[0], INTVAL (operands[1]));
++  DONE;
++}
++  "
+   [(set_attr "move_type" "move,const,load,store,mgtf,fpload,mftg,fpstore,mftg,mgtf")
+    (set_attr "mode" "SI")])
+ 
+@@ -1774,12 +1801,21 @@
+     DONE;
+ })
+ 
+-(define_insn "*movhi_internal"
++(define_insn_and_split "*movhi_internal"
+   [(set (match_operand:HI 0 "nonimmediate_operand" "=r,r,r,r,m,r,k")
+ 	(match_operand:HI 1 "move_operand" "r,Yd,I,m,rJ,k,rJ"))]
+   "(register_operand (operands[0], HImode)
+        || reg_or_0_operand (operands[1], HImode))"
+   { return loongarch_output_move (operands[0], operands[1]); }
++  "CONST_INT_P (operands[1]) && REG_P (operands[0]) && GP_REG_P (REGNO
++  (operands[0]))"
++  [(const_int 0)]
++  "
++{
++  loongarch_move_integer (operands[0], operands[0], INTVAL (operands[1]));
++  DONE;
++}
++  "
+   [(set_attr "move_type" "move,const,const,load,store,load,store")
+    (set_attr "mode" "HI")])
+ 
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 8bd0c1376..58c3dc226 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -226,7 +226,7 @@
+   switch (GET_CODE (op))
+     {
+     case CONST_INT:
+-      return !splittable_const_int_operand (op, mode);
++      return true;
+ 
+     case CONST:
+     case SYMBOL_REF:
+diff --git a/gcc/testsuite/gcc.target/loongarch/imm-load.c b/gcc/testsuite/gcc.target/loongarch/imm-load.c
+new file mode 100644
+index 000000000..c04ca3399
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/imm-load.c
+@@ -0,0 +1,10 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O2 -fdump-rtl-split1" } */
++
++long int
++test (void)
++{
++  return 0x1234567890abcdef;
++}
++/* { dg-final { scan-rtl-dump-times "scanning new insn with uid" 6 "split1" } } */
++
+diff --git a/gcc/testsuite/gcc.target/loongarch/imm-load1.c b/gcc/testsuite/gcc.target/loongarch/imm-load1.c
+new file mode 100644
+index 000000000..2ff029712
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/imm-load1.c
+@@ -0,0 +1,26 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O2" } */
++/* { dg-final { scan-assembler "test:.*lu52i\.d.*\n\taddi\.w.*\n\.L2:" } } */
++
++
++extern long long b[10];
++static inline long long
++repeat_bytes (void)
++{
++  long long r = 0x0101010101010101;
++
++  return r;
++}
++
++static inline long long
++highbit_mask (long long m)
++{
++  return m & repeat_bytes ();
++}
++
++void test(long long *a)
++{
++  for (int i = 0; i < 10; i++)
++    b[i] = highbit_mask (a[i]);
++
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Optimize-switch-with-sign-extended-index.patch b/LoongArch-Optimize-switch-with-sign-extended-index.patch
new file mode 100644
index 0000000000000000000000000000000000000000..c80a21764cbb229fffaa2c0bc7d49c48ac0db382
--- /dev/null
+++ b/LoongArch-Optimize-switch-with-sign-extended-index.patch
@@ -0,0 +1,114 @@
+From c6d4efda0fa064dfe1d3cc1b9abf8051a82cd74f Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Sat, 2 Sep 2023 10:59:55 +0800
+Subject: [PATCH 056/124] LoongArch: Optimize switch with sign-extended index.
+
+The patch refers to the submission of RISCV
+7bbce9b50302959286381d9177818642bceaf301.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_extend_comparands):
+	In unsigned QImode test, check for sign extended subreg and/or
+	constant operands, and do a sign extension in that case.
+	* config/loongarch/loongarch.md (TARGET_64BIT): Define
+	template cbranchqi4.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/switch-qi.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc              | 16 +++++++++++++---
+ gcc/config/loongarch/loongarch.md              | 10 +++++++---
+ gcc/testsuite/gcc.target/loongarch/switch-qi.c | 16 ++++++++++++++++
+ 3 files changed, 36 insertions(+), 6 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/switch-qi.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 7b48e3216..41d344b82 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4221,11 +4221,21 @@ loongarch_allocate_fcc (machine_mode mode)
+ static void
+ loongarch_extend_comparands (rtx_code code, rtx *op0, rtx *op1)
+ {
+-  /* Comparisons consider all XLEN bits, so extend sub-XLEN values.  */
++  /* Comparisons consider all GRLEN bits, so extend sub-GRLEN values.  */
+   if (GET_MODE_SIZE (word_mode) > GET_MODE_SIZE (GET_MODE (*op0)))
+     {
+-      /* TODO: checkout It is more profitable to zero-extend QImode values.  */
+-      if (unsigned_condition (code) == code && GET_MODE (*op0) == QImode)
++      /* It is more profitable to zero-extend QImode values.  But not if the
++	 first operand has already been sign-extended, and the second one is
++	 is a constant or has already been sign-extended also.  */
++      if (unsigned_condition (code) == code
++	  && (GET_MODE (*op0) == QImode
++	      && ! (GET_CODE (*op0) == SUBREG
++		    && SUBREG_PROMOTED_VAR_P (*op0)
++		    && SUBREG_PROMOTED_SIGNED_P (*op0)
++		    && (CONST_INT_P (*op1)
++			|| (GET_CODE (*op1) == SUBREG
++			    && SUBREG_PROMOTED_VAR_P (*op1)
++			    && SUBREG_PROMOTED_SIGNED_P (*op1))))))
+ 	{
+ 	  *op0 = gen_rtx_ZERO_EXTEND (word_mode, *op0);
+ 	  if (CONST_INT_P (*op1))
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index cf7441e0b..a5e9352ca 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -357,7 +357,7 @@
+ ;; pointer-sized quantities.  Exactly one of the two alternatives will match.
+ (define_mode_iterator P [(SI "Pmode == SImode") (DI "Pmode == DImode")])
+ 
+-;; Likewise, but for XLEN-sized quantities.
++;; Likewise, but for GRLEN-sized quantities.
+ (define_mode_iterator X [(SI "!TARGET_64BIT") (DI "TARGET_64BIT")])
+ 
+ ;; 64-bit modes for which we provide move patterns.
+@@ -2733,11 +2733,15 @@
+   [(set_attr "type" "branch")])
+ 
+ 
++;; Branches operate on GRLEN-sized quantities, but for LoongArch64 we accept
++;; QImode values so we can force zero-extension.
++(define_mode_iterator BR [(QI "TARGET_64BIT") SI (DI "TARGET_64BIT")])
++
+ (define_expand "cbranch4"
+   [(set (pc)
+ 	(if_then_else (match_operator 0 "comparison_operator"
+-			[(match_operand:GPR 1 "register_operand")
+-			 (match_operand:GPR 2 "nonmemory_operand")])
++			[(match_operand:BR 1 "register_operand")
++			 (match_operand:BR 2 "nonmemory_operand")])
+ 		      (label_ref (match_operand 3 ""))
+ 		      (pc)))]
+   ""
+diff --git a/gcc/testsuite/gcc.target/loongarch/switch-qi.c b/gcc/testsuite/gcc.target/loongarch/switch-qi.c
+new file mode 100644
+index 000000000..dd192fd49
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/switch-qi.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-march=loongarch64 -mabi=lp64d" } */
++/* { dg-final { scan-assembler-not "bstrpick" } } */
++
++/* Test for loongarch_extend_comparands patch.  */
++extern void asdf (int);
++void
++foo (signed char x) {
++    switch (x) {
++      case 0: asdf (10); break;
++      case 1: asdf (11); break;
++      case 2: asdf (12); break;
++      case 3: asdf (13); break;
++      case 4: asdf (14); break;
++    }
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Optimize-the-implementation-of-stack-check.patch b/LoongArch-Optimize-the-implementation-of-stack-check.patch
new file mode 100644
index 0000000000000000000000000000000000000000..f365ff257b50caf498fbb8aaabc9d2fc43e2e983
--- /dev/null
+++ b/LoongArch-Optimize-the-implementation-of-stack-check.patch
@@ -0,0 +1,810 @@
+From d3615b555d6885dba298f7b339740be11cb65a8f Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Tue, 29 Nov 2022 16:06:12 +0800
+Subject: [PATCH 033/124] LoongArch: Optimize the implementation of stack
+ check.
+
+The old stack check was performed before the stack was dropped,
+which would cause the detection tool to report a memory leak.
+
+The current stack check scheme is as follows:
+
+'-fstack-clash-protection':
+1. When the frame->total_size is smaller than the guard page size,
+   the stack is dropped according to the original scheme, and there
+   is no need to perform stack detection in the prologue.
+2. When frame->total_size is greater than or equal to guard page size,
+   the first step to drop the stack is to drop the space required by
+   the caller-save registers. This space needs to save the caller-save
+   registers, so an implicit stack check is performed.
+   So just need to check the rest of the stack space.
+
+'-fstack-check':
+There is no one-time stack drop and then page-by-page detection as
+described in the document. It is also the same as
+'-fstack-clash-protection', which is detected immediately after page drop.
+
+It is judged that when frame->total_size is not 0, only the size required
+to save the s register is dropped for the first stack down.
+
+The test cases are referenced from aarch64.
+
+gcc/ChangeLog:
+
+	* config/loongarch/linux.h (STACK_CHECK_MOVING_SP):
+	Define this macro to 1.
+	* config/loongarch/loongarch.cc (STACK_CLASH_PROTECTION_GUARD_SIZE):
+	Size of guard page.
+	(loongarch_first_stack_step): Return the size of the first drop stack
+	according to whether stack checking is performed.
+	(loongarch_emit_probe_stack_range): Adjust the method of stack checking in prologue.
+	(loongarch_output_probe_stack_range): Delete useless code.
+	(loongarch_expand_prologue): Adjust the method of stack checking in prologue.
+	(loongarch_option_override_internal): Enforce that interval is the same
+	size as size so the mid-end does the right thing.
+	* config/loongarch/loongarch.h (STACK_CLASH_MAX_UNROLL_PAGES):
+	New macro decide whether to loop stack detection.
+
+gcc/testsuite/ChangeLog:
+
+	* lib/target-supports.exp:
+	* gcc.target/loongarch/stack-check-alloca-1.c: New test.
+	* gcc.target/loongarch/stack-check-alloca-2.c: New test.
+	* gcc.target/loongarch/stack-check-alloca-3.c: New test.
+	* gcc.target/loongarch/stack-check-alloca-4.c: New test.
+	* gcc.target/loongarch/stack-check-alloca-5.c: New test.
+	* gcc.target/loongarch/stack-check-alloca-6.c: New test.
+	* gcc.target/loongarch/stack-check-alloca.h: New test.
+	* gcc.target/loongarch/stack-check-cfa-1.c: New test.
+	* gcc.target/loongarch/stack-check-cfa-2.c: New test.
+	* gcc.target/loongarch/stack-check-prologue-1.c: New test.
+	* gcc.target/loongarch/stack-check-prologue-2.c: New test.
+	* gcc.target/loongarch/stack-check-prologue-3.c: New test.
+	* gcc.target/loongarch/stack-check-prologue-4.c: New test.
+	* gcc.target/loongarch/stack-check-prologue-5.c: New test.
+	* gcc.target/loongarch/stack-check-prologue-6.c: New test.
+	* gcc.target/loongarch/stack-check-prologue-7.c: New test.
+	* gcc.target/loongarch/stack-check-prologue.h: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/linux.h                  |   3 +
+ gcc/config/loongarch/loongarch.cc             | 248 +++++++++++-------
+ gcc/config/loongarch/loongarch.h              |   4 +
+ .../loongarch/stack-check-alloca-1.c          |  15 ++
+ .../loongarch/stack-check-alloca-2.c          |  12 +
+ .../loongarch/stack-check-alloca-3.c          |  12 +
+ .../loongarch/stack-check-alloca-4.c          |  12 +
+ .../loongarch/stack-check-alloca-5.c          |  13 +
+ .../loongarch/stack-check-alloca-6.c          |  13 +
+ .../gcc.target/loongarch/stack-check-alloca.h |  15 ++
+ .../gcc.target/loongarch/stack-check-cfa-1.c  |  12 +
+ .../gcc.target/loongarch/stack-check-cfa-2.c  |  12 +
+ .../loongarch/stack-check-prologue-1.c        |  11 +
+ .../loongarch/stack-check-prologue-2.c        |  11 +
+ .../loongarch/stack-check-prologue-3.c        |  11 +
+ .../loongarch/stack-check-prologue-4.c        |  11 +
+ .../loongarch/stack-check-prologue-5.c        |  12 +
+ .../loongarch/stack-check-prologue-6.c        |  11 +
+ .../loongarch/stack-check-prologue-7.c        |  12 +
+ .../loongarch/stack-check-prologue.h          |   5 +
+ gcc/testsuite/lib/target-supports.exp         |   7 +-
+ 21 files changed, 361 insertions(+), 101 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-4.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-5.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca-6.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-alloca.h
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-4.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-5.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-6.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue-7.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/stack-check-prologue.h
+
+diff --git a/gcc/config/loongarch/linux.h b/gcc/config/loongarch/linux.h
+index 110d0fab9..00039ac18 100644
+--- a/gcc/config/loongarch/linux.h
++++ b/gcc/config/loongarch/linux.h
+@@ -48,3 +48,6 @@ along with GCC; see the file COPYING3.  If not see
+ #define STACK_CHECK_PROTECT (TARGET_64BIT ? 16 * 1024 : 12 * 1024)
+ 
+ #define TARGET_ASM_FILE_END file_end_indicate_exec_stack
++
++/* The stack pointer needs to be moved while checking the stack.  */
++#define STACK_CHECK_MOVING_SP 1
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index f45a49f90..e59edc4cd 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -257,6 +257,10 @@ const char *const
+ loongarch_fp_conditions[16]= {LARCH_FP_CONDITIONS (STRINGIFY)};
+ #undef STRINGIFY
+ 
++/* Size of guard page.  */
++#define STACK_CLASH_PROTECTION_GUARD_SIZE \
++  (1 << param_stack_clash_protection_guard_size)
++
+ /* Implement TARGET_FUNCTION_ARG_BOUNDARY.  Every parameter gets at
+    least PARM_BOUNDARY bits of alignment, but will be given anything up
+    to PREFERRED_STACK_BOUNDARY bits if the type requires it.  */
+@@ -1069,11 +1073,20 @@ loongarch_restore_reg (rtx reg, rtx mem)
+ static HOST_WIDE_INT
+ loongarch_first_stack_step (struct loongarch_frame_info *frame)
+ {
++  HOST_WIDE_INT min_first_step
++    = LARCH_STACK_ALIGN (frame->total_size - frame->fp_sp_offset);
++
++  /* When stack checking is required, if the sum of frame->total_size
++     and stack_check_protect is greater than stack clash protection guard
++     size, then return min_first_step.  */
++  if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
++      || (flag_stack_clash_protection
++	  && frame->total_size > STACK_CLASH_PROTECTION_GUARD_SIZE))
++    return min_first_step;
++
+   if (IMM12_OPERAND (frame->total_size))
+     return frame->total_size;
+ 
+-  HOST_WIDE_INT min_first_step
+-    = LARCH_STACK_ALIGN (frame->total_size - frame->fp_sp_offset);
+   HOST_WIDE_INT max_first_step = IMM_REACH / 2 - PREFERRED_STACK_BOUNDARY / 8;
+   HOST_WIDE_INT min_second_step = frame->total_size - max_first_step;
+   gcc_assert (min_first_step <= max_first_step);
+@@ -1106,103 +1119,109 @@ loongarch_emit_stack_tie (void)
+ static void
+ loongarch_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
+ {
+-  /* See if we have a constant small number of probes to generate.  If so,
+-     that's the easy case.  */
+-  if ((TARGET_64BIT && (first + size <= 32768))
+-      || (!TARGET_64BIT && (first + size <= 2048)))
+-    {
+-      HOST_WIDE_INT i;
++  HOST_WIDE_INT rounded_size;
++  HOST_WIDE_INT interval;
+ 
+-      /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
+-	 it exceeds SIZE.  If only one probe is needed, this will not
+-	 generate any code.  Then probe at FIRST + SIZE.  */
+-      for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
+-	emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+-					 -(first + i)));
++  if (flag_stack_clash_protection)
++    interval = STACK_CLASH_PROTECTION_GUARD_SIZE;
++  else
++    interval = PROBE_INTERVAL;
+ 
+-      emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
+-				       -(first + size)));
+-    }
++  rtx r12 = LARCH_PROLOGUE_TEMP2 (Pmode);
++  rtx r14 = LARCH_PROLOGUE_TEMP3 (Pmode);
+ 
+-  /* Otherwise, do the same as above, but in a loop.  Note that we must be
+-     extra careful with variables wrapping around because we might be at
+-     the very top (or the very bottom) of the address space and we have
+-     to be able to handle this case properly; in particular, we use an
+-     equality test for the loop condition.  */
+-  else
+-    {
+-      HOST_WIDE_INT rounded_size;
+-      rtx r13 = LARCH_PROLOGUE_TEMP (Pmode);
+-      rtx r12 = LARCH_PROLOGUE_TEMP2 (Pmode);
+-      rtx r14 = LARCH_PROLOGUE_TEMP3 (Pmode);
++  size = size + first;
+ 
+-      /* Sanity check for the addressing mode we're going to use.  */
+-      gcc_assert (first <= 16384);
++  /* Sanity check for the addressing mode we're going to use.  */
++  gcc_assert (first <= 16384);
+ 
++  /* Step 1: round SIZE to the previous multiple of the interval.  */
+ 
+-      /* Step 1: round SIZE to the previous multiple of the interval.  */
++  rounded_size = ROUND_DOWN (size, interval);
+ 
+-      rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
++  /* Step 2: compute initial and final value of the loop counter.  */
+ 
+-      /* TEST_ADDR = SP + FIRST */
+-      if (first != 0)
+-	{
+-	  emit_move_insn (r14, GEN_INT (first));
+-	  emit_insn (gen_rtx_SET (r13, gen_rtx_MINUS (Pmode,
+-						      stack_pointer_rtx,
+-						      r14)));
+-	}
+-      else
+-	emit_move_insn (r13, stack_pointer_rtx);
++  emit_move_insn (r14, GEN_INT (interval));
++
++  /* If rounded_size is zero, it means that the space requested by
++     the local variable is less than the interval, and there is no
++     need to display and detect the allocated space.  */
++  if (rounded_size != 0)
++    {
++      /* Step 3: the loop
++
++	 do
++	 {
++	 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
++	 probe at TEST_ADDR
++	 }
++	 while (TEST_ADDR != LAST_ADDR)
+ 
+-      /* Step 2: compute initial and final value of the loop counter.  */
++	 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
++	 until it is equal to ROUNDED_SIZE.  */
+ 
+-      emit_move_insn (r14, GEN_INT (PROBE_INTERVAL));
+-      /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE.  */
+-      if (rounded_size == 0)
+-	emit_move_insn (r12, r13);
++      if (rounded_size <= STACK_CLASH_MAX_UNROLL_PAGES * interval)
++	{
++	  for (HOST_WIDE_INT i = 0; i < rounded_size; i += interval)
++	    {
++	      emit_insn (gen_rtx_SET (stack_pointer_rtx,
++				      gen_rtx_MINUS (Pmode,
++						     stack_pointer_rtx,
++						     r14)));
++	      emit_move_insn (gen_rtx_MEM (Pmode,
++					   gen_rtx_PLUS (Pmode,
++							 stack_pointer_rtx,
++							 const0_rtx)),
++			      const0_rtx);
++	      emit_insn (gen_blockage ());
++	    }
++	  dump_stack_clash_frame_info (PROBE_INLINE, size != rounded_size);
++	}
+       else
+ 	{
+ 	  emit_move_insn (r12, GEN_INT (rounded_size));
+-	  emit_insn (gen_rtx_SET (r12, gen_rtx_MINUS (Pmode, r13, r12)));
+-	  /* Step 3: the loop
+-
+-	     do
+-	     {
+-	     TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
+-	     probe at TEST_ADDR
+-	     }
+-	     while (TEST_ADDR != LAST_ADDR)
+-
+-	     probes at FIRST + N * PROBE_INTERVAL for values of N from 1
+-	     until it is equal to ROUNDED_SIZE.  */
+-
+-	  emit_insn (gen_probe_stack_range (Pmode, r13, r13, r12, r14));
++	  emit_insn (gen_rtx_SET (r12,
++				  gen_rtx_MINUS (Pmode,
++						 stack_pointer_rtx,
++						 r12)));
++
++	  emit_insn (gen_probe_stack_range (Pmode, stack_pointer_rtx,
++					    stack_pointer_rtx, r12, r14));
++	  emit_insn (gen_blockage ());
++	  dump_stack_clash_frame_info (PROBE_LOOP, size != rounded_size);
+ 	}
++    }
++  else
++    dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME, true);
++
+ 
+-      /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
+-	 that SIZE is equal to ROUNDED_SIZE.  */
++  /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
++     that SIZE is equal to ROUNDED_SIZE.  */
+ 
+-      if (size != rounded_size)
++  if (size != rounded_size)
++    {
++      if (size - rounded_size >= 2048)
+ 	{
+-	  if (TARGET_64BIT)
+-	    emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
+-	  else
+-	    {
+-	      HOST_WIDE_INT i;
+-	      for (i = 2048; i < (size - rounded_size); i += 2048)
+-		{
+-		  emit_stack_probe (plus_constant (Pmode, r12, -i));
+-		  emit_insn (gen_rtx_SET (r12,
+-					  plus_constant (Pmode, r12, -2048)));
+-		}
+-	      rtx r1 = plus_constant (Pmode, r12,
+-				      -(size - rounded_size - i + 2048));
+-	      emit_stack_probe (r1);
+-	    }
++	  emit_move_insn (r14, GEN_INT (size - rounded_size));
++	  emit_insn (gen_rtx_SET (stack_pointer_rtx,
++				  gen_rtx_MINUS (Pmode,
++						 stack_pointer_rtx,
++						 r14)));
+ 	}
++      else
++	emit_insn (gen_rtx_SET (stack_pointer_rtx,
++				gen_rtx_PLUS (Pmode,
++					      stack_pointer_rtx,
++					      GEN_INT (rounded_size - size))));
+     }
+ 
++  if (first)
++    {
++      emit_move_insn (r12, GEN_INT (first));
++      emit_insn (gen_rtx_SET (stack_pointer_rtx,
++			      gen_rtx_PLUS (Pmode,
++					    stack_pointer_rtx, r12)));
++    }
+   /* Make sure nothing is scheduled before we are done.  */
+   emit_insn (gen_blockage ());
+ }
+@@ -1223,7 +1242,6 @@ loongarch_output_probe_stack_range (rtx reg1, rtx reg2, rtx reg3)
+ 
+   /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL.  */
+   xops[0] = reg1;
+-  xops[1] = GEN_INT (-PROBE_INTERVAL);
+   xops[2] = reg3;
+   if (TARGET_64BIT)
+     output_asm_insn ("sub.d\t%0,%0,%2", xops);
+@@ -1249,28 +1267,11 @@ loongarch_expand_prologue (void)
+ {
+   struct loongarch_frame_info *frame = &cfun->machine->frame;
+   HOST_WIDE_INT size = frame->total_size;
+-  HOST_WIDE_INT tmp;
+   rtx insn;
+ 
+   if (flag_stack_usage_info)
+     current_function_static_stack_size = size;
+ 
+-  if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
+-      || flag_stack_clash_protection)
+-    {
+-      if (crtl->is_leaf && !cfun->calls_alloca)
+-	{
+-	  if (size > PROBE_INTERVAL && size > get_stack_check_protect ())
+-	    {
+-	      tmp = size - get_stack_check_protect ();
+-	      loongarch_emit_probe_stack_range (get_stack_check_protect (),
+-						tmp);
+-	    }
+-	}
+-      else if (size > 0)
+-	loongarch_emit_probe_stack_range (get_stack_check_protect (), size);
+-    }
+-
+   /* Save the registers.  */
+   if ((frame->mask | frame->fmask) != 0)
+     {
+@@ -1283,7 +1284,6 @@ loongarch_expand_prologue (void)
+       loongarch_for_each_saved_reg (size, loongarch_save_reg);
+     }
+ 
+-
+   /* Set up the frame pointer, if we're using one.  */
+   if (frame_pointer_needed)
+     {
+@@ -1294,7 +1294,45 @@ loongarch_expand_prologue (void)
+       loongarch_emit_stack_tie ();
+     }
+ 
+-  /* Allocate the rest of the frame.  */
++  if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK
++       || flag_stack_clash_protection)
++    {
++      HOST_WIDE_INT first = get_stack_check_protect ();
++
++      if (frame->total_size == 0)
++	{
++	  /* do nothing.  */
++	  dump_stack_clash_frame_info (NO_PROBE_NO_FRAME, false);
++	  return;
++	}
++
++      if (crtl->is_leaf && !cfun->calls_alloca)
++	{
++	  HOST_WIDE_INT interval;
++
++	  if (flag_stack_clash_protection)
++	    interval = STACK_CLASH_PROTECTION_GUARD_SIZE;
++	  else
++	    interval = PROBE_INTERVAL;
++
++	  if (size > interval && size > first)
++	    loongarch_emit_probe_stack_range (first, size - first);
++	  else
++	    loongarch_emit_probe_stack_range (first, size);
++	}
++      else
++	loongarch_emit_probe_stack_range (first, size);
++
++      if (size > 0)
++	{
++	  /* Describe the effect of the previous instructions.  */
++	  insn = plus_constant (Pmode, stack_pointer_rtx, -size);
++	  insn = gen_rtx_SET (stack_pointer_rtx, insn);
++	  loongarch_set_frame_expr (insn);
++	}
++      return;
++    }
++
+   if (size > 0)
+     {
+       if (IMM12_OPERAND (-size))
+@@ -1305,7 +1343,8 @@ loongarch_expand_prologue (void)
+ 	}
+       else
+ 	{
+-	  loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode), GEN_INT (-size));
++	  loongarch_emit_move (LARCH_PROLOGUE_TEMP (Pmode),
++			       GEN_INT (-size));
+ 	  emit_insn (gen_add3_insn (stack_pointer_rtx, stack_pointer_rtx,
+ 				    LARCH_PROLOGUE_TEMP (Pmode)));
+ 
+@@ -6162,6 +6201,15 @@ loongarch_option_override_internal (struct gcc_options *opts)
+ 	gcc_unreachable ();
+     }
+ 
++  /* Validate the guard size.  */
++  int guard_size = param_stack_clash_protection_guard_size;
++
++  /* Enforce that interval is the same size as size so the mid-end does the
++     right thing.  */
++  SET_OPTION_IF_UNSET (opts, &global_options_set,
++		       param_stack_clash_protection_probe_interval,
++		       guard_size);
++
+   loongarch_init_print_operand_punct ();
+ 
+   /* Set up array to map GCC register number to debug register number.
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index a52a81adf..392597943 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -668,6 +668,10 @@ enum reg_class
+ 
+ #define STACK_BOUNDARY (TARGET_ABI_LP64 ? 128 : 64)
+ 
++/* This value controls how many pages we manually unroll the loop for when
++   generating stack clash probes.  */
++#define STACK_CLASH_MAX_UNROLL_PAGES 4
++
+ /* Symbolic macros for the registers used to return integer and floating
+    point values.  */
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-1.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-1.c
+new file mode 100644
+index 000000000..6ee589c4b
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-1.c
+@@ -0,0 +1,15 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-require-effective-target alloca } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE y
++#include "stack-check-alloca.h"
++
++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r\d{1,2},-8} 1 } } */
++/* { dg-final { scan-assembler-times {stx\.d\t\$r0,\$r3,\$r12} 1 } } */
++
++/* Dynamic alloca, expect loop, and 1 probes with top at sp.
++   1st probe is inside the loop for the full guard-size allocations, second
++   probe is for the case where residual is zero.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-2.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-2.c
+new file mode 100644
+index 000000000..8deaa5873
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-2.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-require-effective-target alloca } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 0
++#include "stack-check-alloca.h"
++
++/* { dg-final { scan-assembler-not {stp*t*r*\.d\t\$r0,\$r3,4088} } } */
++
++/* Alloca of 0 should emit no probes, boundary condition.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-3.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-3.c
+new file mode 100644
+index 000000000..e326ba9a0
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-3.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-require-effective-target alloca } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 100
++#include "stack-check-alloca.h"
++
++/* { dg-final { scan-assembler-times {st\.d\t\$r0,\$r3,104} 1 } } */
++
++/* Alloca is less than guard-size, 1 probe at the top of the new allocation.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-4.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-4.c
+new file mode 100644
+index 000000000..b9f7572de
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-4.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-require-effective-target alloca } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 64 * 1024
++#include "stack-check-alloca.h"
++
++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r\d{1,2},-8} 1 } } */
++
++/* Alloca is exactly one guard-size, 1 probe expected at top.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-5.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-5.c
+new file mode 100644
+index 000000000..0ff6e493f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-5.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-require-effective-target alloca } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 65 * 1024
++#include "stack-check-alloca.h"
++
++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r\d{1,2},-8} 1 } } */
++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,1016} 1 } } */
++
++/* Alloca is more than one guard-page. 2 probes expected.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-6.c b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-6.c
+new file mode 100644
+index 000000000..c5cf74fcb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca-6.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-require-effective-target alloca } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 127 * 64 * 1024
++#include "stack-check-alloca.h"
++
++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r\d{1,2},-8} 1 } } */
++
++/* Large alloca of a constant amount which is a multiple of a guard-size.
++   Loop expected with top probe.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-alloca.h b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca.h
+new file mode 100644
+index 000000000..8c75f6c0f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-alloca.h
+@@ -0,0 +1,15 @@
++
++/* Avoid inclusion of alloca.h, unavailable on some systems.  */
++#define alloca __builtin_alloca
++
++__attribute__((noinline, noipa))
++void g (char* ptr, int y)
++{
++  ptr[y] = '\0';
++}
++
++void f_caller (int y)
++{
++  char* pStr = alloca(SIZE);
++  g (pStr, y);
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c
+new file mode 100644
+index 000000000..f0c6877fc
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-1.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16 -funwind-tables" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 128*1024
++#include "stack-check-prologue.h"
++
++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 131088} 1 } } */
++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 0} 1 } } */
++
++/* Checks that the CFA notes are correct for every sp adjustment.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c
+new file mode 100644
+index 000000000..c6e07bc56
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-cfa-2.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16 -funwind-tables" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 1280*1024 + 512
++#include "stack-check-prologue.h"
++
++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 1311248} 1 } } */
++/* { dg-final { scan-assembler-times {\.cfi_def_cfa_offset 0} 1 } } */
++
++/* Checks that the CFA notes are correct for every sp adjustment.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-1.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-1.c
+new file mode 100644
+index 000000000..351bc1f61
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-1.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 128
++#include "stack-check-prologue.h"
++
++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,0} 0 } } */
++
++/* SIZE is smaller than guard-size so no probe expected.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-2.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-2.c
+new file mode 100644
+index 000000000..6bba659a3
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-2.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 63 * 1024
++#include "stack-check-prologue.h"
++
++/* { dg-final { scan-assembler-times {stp*t*r*.d\t\$r0,\$r3,0} 0 } } */
++
++/* SIZE is smaller than guard-size so no probe expected.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-3.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-3.c
+new file mode 100644
+index 000000000..164956c37
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-3.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 64 * 1024
++#include "stack-check-prologue.h"
++
++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,0} 1 } } */
++
++/* SIZE is equal to guard-size, 1 probe expected, boundary condition.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-4.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-4.c
+new file mode 100644
+index 000000000..f53da6b0d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-4.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 65 * 1024
++#include "stack-check-prologue.h"
++
++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,0} 1 } } */
++
++/* SIZE is more than guard-size 1 probe expected.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-5.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-5.c
+new file mode 100644
+index 000000000..c092317ea
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-5.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 127 * 1024
++#include "stack-check-prologue.h"
++
++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,0} 1 } } */
++
++/* SIZE is more than 1x guard-size and remainder small than guard-size,
++   1 probe expected, unrolled, no loop.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-6.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-6.c
+new file mode 100644
+index 000000000..70a2f53f6
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-6.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 128 * 1024
++#include "stack-check-prologue.h"
++
++/* { dg-final { scan-assembler-times {stp*t*r*\.d\t\$r0,\$r3,0} 2 } } */
++
++/* SIZE is more than 2x guard-size and no remainder, unrolled, no loop.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-7.c b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-7.c
+new file mode 100644
+index 000000000..e2df89acc
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue-7.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fstack-clash-protection --param stack-clash-protection-guard-size=16" } */
++/* { dg-require-effective-target supports_stack_clash_protection } */
++/* { dg-skip-if "" { *-*-* } { "-fstack-check" } { "" } } */
++
++#define SIZE 6 * 64 * 1024
++#include "stack-check-prologue.h"
++
++/* { dg-final { scan-assembler-times {stp*t*r*.d\t\$r0,\$r3,0} 1 } } */
++
++/* SIZE is more than 4x guard-size and no remainder, 1 probe expected in a loop
++   and no residual probe.  */
+diff --git a/gcc/testsuite/gcc.target/loongarch/stack-check-prologue.h b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue.h
+new file mode 100644
+index 000000000..b7e06aedb
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/stack-check-prologue.h
+@@ -0,0 +1,5 @@
++int f_test (int x)
++{
++  char arr[SIZE];
++  return arr[x];
++}
+diff --git a/gcc/testsuite/lib/target-supports.exp b/gcc/testsuite/lib/target-supports.exp
+index c858bd93b..3a326ea1c 100644
+--- a/gcc/testsuite/lib/target-supports.exp
++++ b/gcc/testsuite/lib/target-supports.exp
+@@ -11292,7 +11292,8 @@ proc check_effective_target_supports_stack_clash_protection { } {
+ 
+     if { [istarget x86_64-*-*] || [istarget i?86-*-*] 
+ 	  || [istarget powerpc*-*-*] || [istarget rs6000*-*-*]
+-	  || [istarget aarch64*-**] || [istarget s390*-*-*] } {
++	  || [istarget aarch64*-**] || [istarget s390*-*-*]
++	  || [istarget loongarch64*-**] } {
+ 	return 1
+     }
+   return 0
+@@ -11343,6 +11344,10 @@ proc check_effective_target_caller_implicit_probes { } {
+ 	return 1;
+   }
+ 
++  if { [istarget loongarch64*-*-*] } {
++	return 1;
++  }
++
+   return 0
+ }
+ 
+-- 
+2.33.0
+
diff --git a/LoongArch-Optimized-multiply-instruction-generation.patch b/LoongArch-Optimized-multiply-instruction-generation.patch
new file mode 100644
index 0000000000000000000000000000000000000000..1bd00c15ec2a636961b3aefb7a7fa36f596334aa
--- /dev/null
+++ b/LoongArch-Optimized-multiply-instruction-generation.patch
@@ -0,0 +1,232 @@
+From aa1dc79c9a5ff3df241a94cbfb1c857cfa89c686 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Tue, 5 Sep 2023 11:09:03 +0800
+Subject: [PATCH 074/124] LoongArch: Optimized multiply instruction generation.
+
+	1. Can generate mulh.w[u] instruction.
+	2. Can generate mulw.d.wu instruction.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (mulsidi3_64bit):
+	Field unsigned extension support.
+	(muldi3_highpart): Modify template name.
+	(mulsi3_highpart): Likewise.
+	(mulsidi3_64bit): Field unsigned extension support.
+	(muldi3_highpart): Modify muldi3_highpart to
+	smuldi3_highpart.
+	(mulsi3_highpart): Modify mulsi3_highpart to
+	smulsi3_highpart.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/mulw_d_wu.c: New test.
+	* gcc.target/loongarch/smuldi3_highpart.c: New test.
+	* gcc.target/loongarch/smulsi3_highpart.c: New test.
+	* gcc.target/loongarch/umulsi3_highpart.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md             | 66 ++++++++++++-------
+ .../gcc.target/loongarch/mulw_d_wu.c          |  9 +++
+ .../gcc.target/loongarch/smuldi3_highpart.c   | 13 ++++
+ .../gcc.target/loongarch/smulsi3_highpart.c   | 15 +++++
+ .../gcc.target/loongarch/umulsi3_highpart.c   | 14 ++++
+ 5 files changed, 94 insertions(+), 23 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/mulw_d_wu.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/smuldi3_highpart.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/smulsi3_highpart.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/umulsi3_highpart.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 11c18bf15..264cd325c 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -750,15 +750,6 @@
+   [(set_attr "type" "imul")
+    (set_attr "mode" "")])
+ 
+-(define_insn "mulsidi3_64bit"
+-  [(set (match_operand:DI 0 "register_operand" "=r")
+-	(mult:DI (sign_extend:DI (match_operand:SI 1 "register_operand" "r"))
+-		 (sign_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
+-  "TARGET_64BIT"
+-  "mulw.d.w\t%0,%1,%2"
+-  [(set_attr "type" "imul")
+-   (set_attr "mode" "DI")])
+-
+ (define_insn "*mulsi3_extended"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+ 	(sign_extend:DI
+@@ -787,14 +778,14 @@
+   emit_insn (gen_muldi3 (low, operands[1], operands[2]));
+ 
+   rtx high = gen_reg_rtx (DImode);
+-  emit_insn (gen_muldi3_highpart (high, operands[1], operands[2]));
++  emit_insn (gen_muldi3_highpart (high, operands[1], operands[2]));
+ 
+   emit_move_insn (gen_lowpart (DImode, operands[0]), low);
+   emit_move_insn (gen_highpart (DImode, operands[0]), high);
+   DONE;
+ })
+ 
+-(define_insn "muldi3_highpart"
++(define_insn "muldi3_highpart"
+   [(set (match_operand:DI 0 "register_operand" "=r")
+ 	(truncate:DI
+ 	  (lshiftrt:TI
+@@ -809,22 +800,34 @@
+    (set_attr "mode" "DI")])
+ 
+ (define_expand "mulsidi3"
+-  [(set (match_operand:DI 0 "register_operand" "=r")
++  [(set (match_operand:DI 0 "register_operand")
+ 	(mult:DI (any_extend:DI
+-		   (match_operand:SI 1 "register_operand" " r"))
++		   (match_operand:SI 1 "register_operand"))
+ 		 (any_extend:DI
+-		   (match_operand:SI 2 "register_operand" " r"))))]
+-  "!TARGET_64BIT"
++		   (match_operand:SI 2 "register_operand"))))]
++  ""
+ {
+-  rtx temp = gen_reg_rtx (SImode);
+-  emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
+-  emit_insn (gen_mulsi3_highpart (loongarch_subword (operands[0], true),
+-				     operands[1], operands[2]));
+-  emit_insn (gen_movsi (loongarch_subword (operands[0], false), temp));
+-  DONE;
++  if (!TARGET_64BIT)
++  {
++    rtx temp = gen_reg_rtx (SImode);
++    emit_insn (gen_mulsi3 (temp, operands[1], operands[2]));
++    emit_insn (gen_mulsi3_highpart (loongarch_subword (operands[0], true),
++				       operands[1], operands[2]));
++    emit_insn (gen_movsi (loongarch_subword (operands[0], false), temp));
++    DONE;
++  }
+ })
+ 
+-(define_insn "mulsi3_highpart"
++(define_insn "mulsidi3_64bit"
++  [(set (match_operand:DI 0 "register_operand" "=r")
++	(mult:DI (any_extend:DI (match_operand:SI 1 "register_operand" "r"))
++		 (any_extend:DI (match_operand:SI 2 "register_operand" "r"))))]
++  "TARGET_64BIT"
++  "mulw.d.w\t%0,%1,%2"
++  [(set_attr "type" "imul")
++   (set_attr "mode" "DI")])
++
++(define_insn "mulsi3_highpart"
+   [(set (match_operand:SI 0 "register_operand" "=r")
+ 	(truncate:SI
+ 	  (lshiftrt:DI
+@@ -833,11 +836,28 @@
+ 		     (any_extend:DI
+ 		       (match_operand:SI 2 "register_operand" " r")))
+ 	    (const_int 32))))]
+-  "!TARGET_64BIT"
++  ""
+   "mulh.w\t%0,%1,%2"
+   [(set_attr "type" "imul")
+    (set_attr "mode" "SI")])
+ 
++;; Under the LoongArch architecture, the mulh.w[u] instruction performs
++;; sign extension by default, so the sign extension instruction can be
++;; eliminated.
++(define_peephole
++  [(set (match_operand:SI 0 "register_operand")
++	(truncate:SI
++	  (lshiftrt:DI
++	    (mult:DI (any_extend:DI
++		       (match_operand:SI 1 "register_operand"))
++		     (any_extend:DI
++		       (match_operand:SI 2 "register_operand")))
++	    (const_int 32))))
++   (set (match_operand:DI 3 "register_operand")
++	(sign_extend:DI (match_dup 0)))]
++   "TARGET_64BIT && REGNO (operands[0]) == REGNO (operands[3])"
++   "mulh.w\t%0,%1,%2")
++
+ ;;
+ ;;  ....................
+ ;;
+diff --git a/gcc/testsuite/gcc.target/loongarch/mulw_d_wu.c b/gcc/testsuite/gcc.target/loongarch/mulw_d_wu.c
+new file mode 100644
+index 000000000..16163d667
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/mulw_d_wu.c
+@@ -0,0 +1,9 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d" } */
++/* { dg-final { scan-assembler "mulw.d.wu" } } */
++
++__attribute__((noipa, noinline)) unsigned long
++f(unsigned long a, unsigned long b)
++{
++  return (unsigned long)(unsigned int)a * (unsigned long)(unsigned int)b;
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/smuldi3_highpart.c b/gcc/testsuite/gcc.target/loongarch/smuldi3_highpart.c
+new file mode 100644
+index 000000000..6f5c686ca
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/smuldi3_highpart.c
+@@ -0,0 +1,13 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O2 -fdump-rtl-expand-all" } */
++
++typedef int TI __attribute ((mode(TI)));
++typedef int DI __attribute__((mode(DI)));
++
++DI
++test (DI x, DI y)
++{
++  return ((TI)x * y) >> 64;
++}
++
++/* { dg-final { scan-rtl-dump "highparttmp" "expand" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/smulsi3_highpart.c b/gcc/testsuite/gcc.target/loongarch/smulsi3_highpart.c
+new file mode 100644
+index 000000000..c4dbf8afc
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/smulsi3_highpart.c
+@@ -0,0 +1,15 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -fdump-rtl-expand-all" } */
++
++typedef unsigned int DI __attribute__((mode(DI)));
++typedef unsigned int SI __attribute__((mode(SI)));
++
++SI
++f (SI x, SI y)
++{
++  return ((DI) x * y) >> 32;
++}
++
++/* { dg-final { scan-rtl-dump "highparttmp" "expand" } } */
++/* { dg-final { scan-assembler "mulh\\.w" } } */
++/* { dg-final { scan-assembler-not "slli\\.w" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/umulsi3_highpart.c b/gcc/testsuite/gcc.target/loongarch/umulsi3_highpart.c
+new file mode 100644
+index 000000000..e208803e2
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/umulsi3_highpart.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++
++typedef unsigned int DI __attribute__((mode(DI)));
++typedef unsigned int SI __attribute__((mode(SI)));
++
++SI
++f (SI x, SI y)
++{
++  return ((DI) x * y) >> 32;
++}
++
++/* { dg-final { scan-assembler "mulh\\.wu" } } */
++/* { dg-final { scan-assembler-not "slli\\.w" } } */
+-- 
+2.33.0
+
diff --git a/LoongArch-Prepare-static-PIE-support.patch b/LoongArch-Prepare-static-PIE-support.patch
new file mode 100644
index 0000000000000000000000000000000000000000..325525ab1d6b1158494754f8137480e7a36418e0
--- /dev/null
+++ b/LoongArch-Prepare-static-PIE-support.patch
@@ -0,0 +1,44 @@
+From aa2d9e0e1dc4bf0b612618cf0e3fcea514f92f95 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 13 Sep 2022 23:21:39 +0800
+Subject: [PATCH 018/124] LoongArch: Prepare static PIE support
+
+Static PIE allows us to extend the ASLR to cover static executables and
+it's not too difficult to support it.  On GCC side, we just pass a group
+of options to the linker, like other ports with static PIE support.
+
+The real implementation of static PIE (rcrt1.o) will be added into Glibc
+later.
+
+gcc/ChangeLog:
+
+	* config/loongarch/gnu-user.h (GNU_USER_TARGET_LINK_SPEC): For
+	-static-pie, pass -static -pie --no-dynamic-linker -z text to
+	the linker, and do not pass --dynamic-linker.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/gnu-user.h | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/gnu-user.h b/gcc/config/loongarch/gnu-user.h
+index 664dc9206..c5b1afe53 100644
+--- a/gcc/config/loongarch/gnu-user.h
++++ b/gcc/config/loongarch/gnu-user.h
+@@ -40,8 +40,10 @@ along with GCC; see the file COPYING3.  If not see
+ #undef GNU_USER_TARGET_LINK_SPEC
+ #define GNU_USER_TARGET_LINK_SPEC \
+   "%{G*} %{shared} -m " GNU_USER_LINK_EMULATION \
+-  "%{!shared: %{static} %{!static: %{rdynamic:-export-dynamic} " \
+-  "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}}"
++  "%{!shared: %{static} " \
++  "%{!static: %{!static-pie: %{rdynamic:-export-dynamic} " \
++  "-dynamic-linker " GNU_USER_DYNAMIC_LINKER "}} " \
++  "%{static-pie: -static -pie --no-dynamic-linker -z text}}"
+ 
+ 
+ /* Similar to standard Linux, but adding -ffast-math support.  */
+-- 
+2.33.0
+
diff --git a/LoongArch-Provide-fmin-fmax-RTL-pattern.patch b/LoongArch-Provide-fmin-fmax-RTL-pattern.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2200e0b2dca2d837d9d9f5f461418385d2996899
--- /dev/null
+++ b/LoongArch-Provide-fmin-fmax-RTL-pattern.patch
@@ -0,0 +1,100 @@
+From b065c84206cdf463a377ca28f719dae7acbed0f7 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 16 Aug 2022 15:34:36 +0800
+Subject: [PATCH 009/124] LoongArch: Provide fmin/fmax RTL pattern
+
+We already had smin/smax RTL pattern using fmin/fmax instruction.  But
+for smin/smax, it's unspecified what will happen if either operand is
+NaN.  So we would generate calls to libc fmin/fmax functions with
+-fno-finite-math-only (the default for all optimization levels expect
+-Ofast).
+
+But, LoongArch fmin/fmax instruction is IEEE-754-2008 conformant so we
+can also use the instruction for fmin/fmax pattern and avoid the library
+function call.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (fmax3): New RTL pattern.
+	(fmin3): Likewise.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/fmax-fmin.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md             | 18 +++++++++++
+ .../gcc.target/loongarch/fmax-fmin.c          | 30 +++++++++++++++++++
+ 2 files changed, 48 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/fmax-fmin.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 6b6df22a5..8e8868de9 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1023,6 +1023,24 @@
+   [(set_attr "type" "fmove")
+    (set_attr "mode" "")])
+ 
++(define_insn "fmax3"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
++		   (match_operand:ANYF 2 "register_operand" "f")))]
++  ""
++  "fmax.\t%0,%1,%2"
++  [(set_attr "type" "fmove")
++   (set_attr "mode" "")])
++
++(define_insn "fmin3"
++  [(set (match_operand:ANYF 0 "register_operand" "=f")
++	(smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
++		   (match_operand:ANYF 2 "register_operand" "f")))]
++  ""
++  "fmin.\t%0,%1,%2"
++  [(set_attr "type" "fmove")
++   (set_attr "mode" "")])
++
+ (define_insn "smaxa3"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+ 	(if_then_else:ANYF
+diff --git a/gcc/testsuite/gcc.target/loongarch/fmax-fmin.c b/gcc/testsuite/gcc.target/loongarch/fmax-fmin.c
+new file mode 100644
+index 000000000..92cf8a150
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/fmax-fmin.c
+@@ -0,0 +1,30 @@
++/* { dg-do compile } */
++/* { dg-options "-mdouble-float -fno-finite-math-only" } */
++/* { dg-final { scan-assembler "fmin\\.s" } } */
++/* { dg-final { scan-assembler "fmin\\.d" } } */
++/* { dg-final { scan-assembler "fmax\\.s" } } */
++/* { dg-final { scan-assembler "fmax\\.d" } } */
++
++double
++_fmax(double a, double b)
++{
++  return __builtin_fmax(a, b);
++}
++
++float
++_fmaxf(float a, float b)
++{
++  return __builtin_fmaxf(a, b);
++}
++
++double
++_fmin(double a, double b)
++{
++  return __builtin_fmin(a, b);
++}
++
++float
++_fminf(float a, float b)
++{
++  return __builtin_fminf(a, b);
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Remove-redundant-sign-extension-instructio.patch b/LoongArch-Remove-redundant-sign-extension-instructio.patch
new file mode 100644
index 0000000000000000000000000000000000000000..7f9d62e34c6dd047c9916d318c554a65c3913212
--- /dev/null
+++ b/LoongArch-Remove-redundant-sign-extension-instructio.patch
@@ -0,0 +1,180 @@
+From fbe6421c5600a151dbae96d18db2fd31aca2fe7c Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 24 Aug 2023 16:44:56 +0800
+Subject: [PATCH 051/124] LoongArch: Remove redundant sign extension
+ instructions caused by SLT instructions.
+
+Since the SLT instruction does not distinguish between 64-bit operations and 32-bit
+operations under the 64-bit LoongArch architecture, if the operand of slt is SImode,
+the sign extension of the operand needs to be displayed.
+
+But similar to the test case below, the sign extension is redundant:
+
+	extern int src1, src2, src3;
+
+	int
+	test (void)
+	{
+	  int data1 = src1 + src2;
+	  int data2 = src1 + src3;
+	  return data1 > data2 ? data1 : data2;
+	}
+Assembly code before optimization:
+ 	...
+	add.w	$r4,$r4,$r14
+	add.w	$r13,$r13,$r14
+	slli.w	$r12,$r4,0
+	slli.w	$r14,$r13,0
+	slt	$r12,$r12,$r14
+	masknez	$r4,$r4,$r12
+	maskeqz	$r12,$r13,$r12
+	or	$r4,$r4,$r12
+	slli.w	$r4,$r4,0
+	...
+
+After optimization:
+	...
+	add.w	$r12,$r12,$r14
+	add.w	$r13,$r13,$r14
+	slt	$r4,$r12,$r13
+	masknez	$r12,$r12,$r4
+	maskeqz	$r4,$r13,$r4
+	or	$r4,$r12,$r4
+	...
+
+Similar to this test example, the two operands of SLT are obtained by the
+addition operation, and add.w implicitly sign-extends, so the two operands
+of SLT do not require sign-extend.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_expand_conditional_move):
+	Optimize the function implementation.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/slt-sign-extend.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc             | 53 +++++++++++++++++--
+ .../gcc.target/loongarch/slt-sign-extend.c    | 14 +++++
+ 2 files changed, 63 insertions(+), 4 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/slt-sign-extend.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index f14de5cce..caacfa8a3 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4380,14 +4380,30 @@ loongarch_expand_conditional_move (rtx *operands)
+   enum rtx_code code = GET_CODE (operands[1]);
+   rtx op0 = XEXP (operands[1], 0);
+   rtx op1 = XEXP (operands[1], 1);
++  rtx op0_extend = op0;
++  rtx op1_extend = op1;
++
++  /* Record whether operands[2] and operands[3] modes are promoted to word_mode.  */
++  bool promote_p = false;
++  machine_mode mode = GET_MODE (operands[0]);
+ 
+   if (FLOAT_MODE_P (GET_MODE (op1)))
+     loongarch_emit_float_compare (&code, &op0, &op1);
+   else
+     {
++      if ((REGNO (op0) == REGNO (operands[2])
++	   || (REGNO (op1) == REGNO (operands[3]) && (op1 != const0_rtx)))
++	  && (GET_MODE_SIZE (GET_MODE (op0)) < word_mode))
++	{
++	  mode = word_mode;
++	  promote_p = true;
++	}
++
+       loongarch_extend_comparands (code, &op0, &op1);
+ 
+       op0 = force_reg (word_mode, op0);
++      op0_extend = op0;
++      op1_extend = force_reg (word_mode, op1);
+ 
+       if (code == EQ || code == NE)
+ 	{
+@@ -4414,23 +4430,52 @@ loongarch_expand_conditional_move (rtx *operands)
+       && register_operand (operands[2], VOIDmode)
+       && register_operand (operands[3], VOIDmode))
+     {
+-      machine_mode mode = GET_MODE (operands[0]);
++      rtx op2 = operands[2];
++      rtx op3 = operands[3];
++
++      if (promote_p)
++	{
++	  if (REGNO (XEXP (operands[1], 0)) == REGNO (operands[2]))
++	    op2 = op0_extend;
++	  else
++	    {
++	      loongarch_extend_comparands (code, &op2, &const0_rtx);
++	      op2 = force_reg (mode, op2);
++	    }
++
++	  if (REGNO (XEXP (operands[1], 1)) == REGNO (operands[3]))
++	    op3 = op1_extend;
++	  else
++	    {
++	      loongarch_extend_comparands (code, &op3, &const0_rtx);
++	      op3 = force_reg (mode, op3);
++	    }
++	}
++
+       rtx temp = gen_reg_rtx (mode);
+       rtx temp2 = gen_reg_rtx (mode);
+ 
+       emit_insn (gen_rtx_SET (temp,
+ 			      gen_rtx_IF_THEN_ELSE (mode, cond,
+-						    operands[2], const0_rtx)));
++						    op2, const0_rtx)));
+ 
+       /* Flip the test for the second operand.  */
+       cond = gen_rtx_fmt_ee ((code == EQ) ? NE : EQ, GET_MODE (op0), op0, op1);
+ 
+       emit_insn (gen_rtx_SET (temp2,
+ 			      gen_rtx_IF_THEN_ELSE (mode, cond,
+-						    operands[3], const0_rtx)));
++						    op3, const0_rtx)));
+ 
+       /* Merge the two results, at least one is guaranteed to be zero.  */
+-      emit_insn (gen_rtx_SET (operands[0], gen_rtx_IOR (mode, temp, temp2)));
++      if (promote_p)
++	{
++	  rtx temp3 = gen_reg_rtx (mode);
++	  emit_insn (gen_rtx_SET (temp3, gen_rtx_IOR (mode, temp, temp2)));
++	  temp3 = gen_lowpart (GET_MODE (operands[0]), temp3);
++	  loongarch_emit_move (operands[0], temp3);
++	}
++      else
++	emit_insn (gen_rtx_SET (operands[0], gen_rtx_IOR (mode, temp, temp2)));
+     }
+   else
+     emit_insn (gen_rtx_SET (operands[0],
+diff --git a/gcc/testsuite/gcc.target/loongarch/slt-sign-extend.c b/gcc/testsuite/gcc.target/loongarch/slt-sign-extend.c
+new file mode 100644
+index 000000000..ea6b28b7c
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/slt-sign-extend.c
+@@ -0,0 +1,14 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O2" } */
++/* { dg-final { scan-assembler-not "slli.w" } } */
++
++extern int src1, src2, src3;
++
++int
++test (void)
++{
++  int data1 = src1 + src2;
++  int data2 = src1 + src3;
++
++  return data1 > data2 ? data1 : data2;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Remove-the-definition-of-the-macro-LOGICAL.patch b/LoongArch-Remove-the-definition-of-the-macro-LOGICAL.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6ca420c106185d3a98a451ff11d54185aee631fe
--- /dev/null
+++ b/LoongArch-Remove-the-definition-of-the-macro-LOGICAL.patch
@@ -0,0 +1,36 @@
+From 297b8c5770ad85bf468526602e28aff8a66dc01a Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 13 Apr 2023 19:24:38 +0800
+Subject: [PATCH 040/124] LoongArch: Remove the definition of the macro
+ LOGICAL_OP_NON_SHORT_CIRCUIT under the architecture and use the default
+ definition instead.
+
+In some cases, setting this macro as the default can reduce the number of conditional
+branch instructions.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.h (LOGICAL_OP_NON_SHORT_CIRCUIT): Remove the macro
+	definition.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.h | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 392597943..c6e37b1b4 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -836,7 +836,6 @@ typedef struct {
+    1 is the default; other values are interpreted relative to that.  */
+ 
+ #define BRANCH_COST(speed_p, predictable_p) loongarch_branch_cost
+-#define LOGICAL_OP_NON_SHORT_CIRCUIT 0
+ 
+ /* Return the asm template for a conditional branch instruction.
+    OPCODE is the opcode's mnemonic and OPERANDS is the asm template for
+-- 
+2.33.0
+
diff --git a/LoongArch-Rename-frint_-fmt-to-rint-mode-2.patch b/LoongArch-Rename-frint_-fmt-to-rint-mode-2.patch
new file mode 100644
index 0000000000000000000000000000000000000000..21c7f9a0b20fa47182353acf2ff966b17b107e97
--- /dev/null
+++ b/LoongArch-Rename-frint_-fmt-to-rint-mode-2.patch
@@ -0,0 +1,65 @@
+From 7584716b03b13c06b8bb9956b9f49e0cfc29c6b3 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sun, 6 Nov 2022 20:41:38 +0800
+Subject: [PATCH 027/124] LoongArch: Rename frint_ to rint2
+
+Use standard name so __builtin_rint{,f} can be expanded to one
+instruction.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md (frint_): Rename to ..
+	(rint2): .. this.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/frint.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md          |  4 ++--
+ gcc/testsuite/gcc.target/loongarch/frint.c | 16 ++++++++++++++++
+ 2 files changed, 18 insertions(+), 2 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/frint.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index bda34d0f3..a14ab14ac 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -2012,8 +2012,8 @@
+   [(set_attr "type" "move")]
+ )
+ 
+-;; Convert floating-point numbers to integers
+-(define_insn "frint_"
++;; Round floating-point numbers to integers
++(define_insn "rint2"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+ 	(unspec:ANYF [(match_operand:ANYF 1 "register_operand" "f")]
+ 		      UNSPEC_FRINT))]
+diff --git a/gcc/testsuite/gcc.target/loongarch/frint.c b/gcc/testsuite/gcc.target/loongarch/frint.c
+new file mode 100644
+index 000000000..3ee6a8f97
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/frint.c
+@@ -0,0 +1,16 @@
++/* { dg-do compile } */
++/* { dg-options "-mdouble-float" } */
++/* { dg-final { scan-assembler "frint\\.s" } } */
++/* { dg-final { scan-assembler "frint\\.d" } } */
++
++double
++my_rint (double a)
++{
++  return __builtin_rint (a);
++}
++
++float
++my_rintf (float a)
++{
++  return __builtin_rintf (a);
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Set-default-alignment-for-functions-and-la.patch b/LoongArch-Set-default-alignment-for-functions-and-la.patch
new file mode 100644
index 0000000000000000000000000000000000000000..40a01cfaa4329af0ec9de89ec97000774367c08d
--- /dev/null
+++ b/LoongArch-Set-default-alignment-for-functions-and-la.patch
@@ -0,0 +1,113 @@
+From 129d96b9ab5a2445d0fc2c3f7b72baa0453bd93f Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 14 Jun 2023 08:24:05 +0800
+Subject: [PATCH 047/124] LoongArch: Set default alignment for functions and
+ labels with -mtune
+
+The LA464 micro-architecture is sensitive to alignment of code.  The
+Loongson team has benchmarked various combinations of function, the
+results [1] show that 16-byte label alignment together with 32-byte
+function alignment gives best results in terms of SPEC score.
+
+Add a mtune-based table-driven mechanism to set the default of
+-falign-{functions,labels}.  As LA464 is the first (and the only for
+now) uarch supported by GCC, the same setting is also used for
+the "generic" -mtune=loongarch64.  In the future we may set different
+settings for LA{2,3,6}64 once we add the support for them.
+
+Bootstrapped and regtested on loongarch64-linux-gnu.  Ok for trunk?
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-tune.h (loongarch_align): New
+	struct.
+	* config/loongarch/loongarch-def.h (loongarch_cpu_align): New
+	array.
+	* config/loongarch/loongarch-def.c (loongarch_cpu_align): Define
+	the array.
+	* config/loongarch/loongarch.cc
+	(loongarch_option_override_internal): Set the value of
+	-falign-functions= if -falign-functions is enabled but no value
+	is given.  Likewise for -falign-labels=.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch-def.c  | 12 ++++++++++++
+ gcc/config/loongarch/loongarch-def.h  |  1 +
+ gcc/config/loongarch/loongarch-tune.h |  8 ++++++++
+ gcc/config/loongarch/loongarch.cc     |  6 ++++++
+ 4 files changed, 27 insertions(+)
+
+diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c
+index 80ab10a52..74d422ce0 100644
+--- a/gcc/config/loongarch/loongarch-def.c
++++ b/gcc/config/loongarch/loongarch-def.c
+@@ -72,6 +72,18 @@ loongarch_cpu_cache[N_TUNE_TYPES] = {
+   },
+ };
+ 
++struct loongarch_align
++loongarch_cpu_align[N_TUNE_TYPES] = {
++  [CPU_LOONGARCH64] = {
++    .function = "32",
++    .label = "16",
++  },
++  [CPU_LA464] = {
++    .function = "32",
++    .label = "16",
++  },
++};
++
+ /* The following properties cannot be looked up directly using "cpucfg".
+  So it is necessary to provide a default value for "unknown native"
+  tune targets (i.e. -mtune=native while PRID does not correspond to
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index b5985f070..eb87a79a5 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -144,6 +144,7 @@ extern int loongarch_cpu_issue_rate[];
+ extern int loongarch_cpu_multipass_dfa_lookahead[];
+ 
+ extern struct loongarch_cache loongarch_cpu_cache[];
++extern struct loongarch_align loongarch_cpu_align[];
+ extern struct loongarch_rtx_cost_data loongarch_cpu_rtx_cost_data[];
+ 
+ #ifdef __cplusplus
+diff --git a/gcc/config/loongarch/loongarch-tune.h b/gcc/config/loongarch/loongarch-tune.h
+index 8e3eb2947..d961963f0 100644
+--- a/gcc/config/loongarch/loongarch-tune.h
++++ b/gcc/config/loongarch/loongarch-tune.h
+@@ -48,4 +48,12 @@ struct loongarch_cache {
+     int simultaneous_prefetches; /* number of parallel prefetch */
+ };
+ 
++/* Alignment for functions and labels for best performance.  For new uarchs
++   the value should be measured via benchmarking.  See the documentation for
++   -falign-functions and -falign-labels in invoke.texi for the format.  */
++struct loongarch_align {
++  const char *function;	/* default value for -falign-functions */
++  const char *label;	/* default value for -falign-labels */
++};
++
+ #endif /* LOONGARCH_TUNE_H */
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 4c0f393b6..f14de5cce 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -6246,6 +6246,12 @@ loongarch_option_override_internal (struct gcc_options *opts)
+       && !opts->x_optimize_size)
+     opts->x_flag_prefetch_loop_arrays = 1;
+ 
++  if (opts->x_flag_align_functions && !opts->x_str_align_functions)
++    opts->x_str_align_functions = loongarch_cpu_align[LARCH_ACTUAL_TUNE].function;
++
++  if (opts->x_flag_align_labels && !opts->x_str_align_labels)
++    opts->x_str_align_labels = loongarch_cpu_align[LARCH_ACTUAL_TUNE].label;
++
+   if (TARGET_DIRECT_EXTERN_ACCESS && flag_shlib)
+     error ("%qs cannot be used for compiling a shared library",
+ 	   "-mdirect-extern-access");
+-- 
+2.33.0
+
diff --git a/LoongArch-Slightly-simplify-loongarch_block_move_str.patch b/LoongArch-Slightly-simplify-loongarch_block_move_str.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5ff00e0ec79295ec9da1318396d488d812ee5927
--- /dev/null
+++ b/LoongArch-Slightly-simplify-loongarch_block_move_str.patch
@@ -0,0 +1,37 @@
+From f4a0248c80fedff3a6841407ff95b732dfbb93a1 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 8 Sep 2023 00:29:57 +0800
+Subject: [PATCH 073/124] LoongArch: Slightly simplify
+ loongarch_block_move_straight
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_block_move_straight):
+	Check precondition (delta must be a power of 2) and use
+	popcount_hwi instead of a homebrew loop.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index baa5c2354..baa9831aa 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -5221,9 +5221,8 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length,
+      emit two ld.d/st.d pairs, one ld.w/st.w pair, and one ld.b/st.b
+      pair.  For each load/store pair we use a dedicated register to keep
+      the pipeline as populated as possible.  */
+-  HOST_WIDE_INT num_reg = length / delta;
+-  for (delta_cur = delta / 2; delta_cur != 0; delta_cur /= 2)
+-    num_reg += !!(length & delta_cur);
++  gcc_assert (pow2p_hwi (delta));
++  HOST_WIDE_INT num_reg = length / delta + popcount_hwi (length % delta);
+ 
+   /* Allocate a buffer for the temporary registers.  */
+   regs = XALLOCAVEC (rtx, num_reg);
+-- 
+2.33.0
+
diff --git a/LoongArch-Subdivision-symbol-type-add-SYMBOL_PCREL-s.patch b/LoongArch-Subdivision-symbol-type-add-SYMBOL_PCREL-s.patch
new file mode 100644
index 0000000000000000000000000000000000000000..4c811fbffd62b3cbcae608ef143b062a5fdadb40
--- /dev/null
+++ b/LoongArch-Subdivision-symbol-type-add-SYMBOL_PCREL-s.patch
@@ -0,0 +1,1234 @@
+From 68bb2a2d0b94b9bde3c22ff1dfe08abb6f036e7f Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 21 Jul 2022 10:32:51 +0800
+Subject: [PATCH 003/124] LoongArch: Subdivision symbol type, add SYMBOL_PCREL
+ support.
+
+1. Remove cModel type support other than normal.
+2. The method for calling global functions changed from 'la.global + jirl' to 'bl'
+   when complied add '-fplt'.
+
+gcc/ChangeLog:
+
+	* config/loongarch/constraints.md (a): Delete the constraint.
+	(b): A constant call not local address.
+	(h): Delete the constraint.
+	(t): Delete the constraint.
+	* config/loongarch/loongarch-opts.cc (loongarch_config_target):
+	Remove cModel type support other than normal.
+	* config/loongarch/loongarch-protos.h (enum loongarch_symbol_type):
+	Add new symbol type 'SYMBOL_PCREL', 'SYMBOL_TLS_IE' and 'SYMBOL_TLS_LE'.
+	(loongarch_split_symbol): Delete useless function declarations.
+	(loongarch_split_symbol_type): Delete useless function declarations.
+	* config/loongarch/loongarch.cc (enum loongarch_address_type):
+	Delete unnecessary comment information.
+	(loongarch_symbol_binds_local_p): Modified the judgment order of label
+	and symbol.
+	(loongarch_classify_symbol): Return symbol type. If symbol is a label,
+	or symbol is a local symbol return SYMBOL_PCREL. If is a tls symbol,
+	return SYMBOL_TLS. If is a not local symbol return SYMBOL_GOT_DISP.
+	(loongarch_symbolic_constant_p): Add handling of 'SYMBOL_TLS_IE'
+	'SYMBOL_TLS_LE' and 'SYMBOL_PCREL'.
+	(loongarch_symbol_insns): Add handling of 'SYMBOL_TLS_IE' 'SYMBOL_TLS_LE'
+	and 'SYMBOL_PCREL'.
+	(loongarch_address_insns): Sort code.
+	(loongarch_12bit_offset_address_p): Sort code.
+	(loongarch_14bit_shifted_offset_address_p): Sort code.
+	(loongarch_call_tls_get_addr): Sort code.
+	(loongarch_legitimize_tls_address): Sort code.
+	(loongarch_output_move): Remove schema support for cmodel other than normal.
+	(loongarch_memmodel_needs_release_fence): Sort code.
+	(loongarch_print_operand): Sort code.
+	* config/loongarch/loongarch.h (LARCH_U12BIT_OFFSET_P):
+	Rename to LARCH_12BIT_OFFSET_P.
+	(LARCH_12BIT_OFFSET_P): New macro.
+	* config/loongarch/loongarch.md: Reimplement the function call. Remove schema
+	support for cmodel other than normal.
+	* config/loongarch/predicates.md (is_const_call_weak_symbol): Delete this predicate.
+	(is_const_call_plt_symbol): Delete this predicate.
+	(is_const_call_global_noplt_symbol): Delete this predicate.
+	(is_const_call_no_local_symbol): New predicate, determines whether it is a local
+	symbol or label.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/func-call-1.c: New test.
+	* gcc.target/loongarch/func-call-2.c: New test.
+	* gcc.target/loongarch/func-call-3.c: New test.
+	* gcc.target/loongarch/func-call-4.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/constraints.md           |  24 +-
+ gcc/config/loongarch/loongarch-opts.cc        |   7 +
+ gcc/config/loongarch/loongarch-protos.h       |   9 +-
+ gcc/config/loongarch/loongarch.cc             | 256 +++++++---------
+ gcc/config/loongarch/loongarch.h              |   2 +-
+ gcc/config/loongarch/loongarch.md             | 279 +++---------------
+ gcc/config/loongarch/predicates.md            |  40 ++-
+ .../gcc.target/loongarch/func-call-1.c        |  32 ++
+ .../gcc.target/loongarch/func-call-2.c        |  32 ++
+ .../gcc.target/loongarch/func-call-3.c        |  32 ++
+ .../gcc.target/loongarch/func-call-4.c        |  32 ++
+ 11 files changed, 312 insertions(+), 433 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-3.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-4.c
+
+diff --git a/gcc/config/loongarch/constraints.md b/gcc/config/loongarch/constraints.md
+index d0bfddbd5..43cb7b5f0 100644
+--- a/gcc/config/loongarch/constraints.md
++++ b/gcc/config/loongarch/constraints.md
+@@ -20,14 +20,14 @@
+ 
+ ;; Register constraints
+ 
+-;; "a" "A constant call global and noplt address."
+-;; "b" <-----unused
++;; "a" <-----unused
++;; "b" "A constant call not local address."
+ ;; "c" "A constant call local address."
+ ;; "d" <-----unused
+ ;; "e" JIRL_REGS
+ ;; "f" FP_REGS
+ ;; "g" <-----unused
+-;; "h" "A constant call plt address."
++;; "h" <-----unused
+ ;; "i" "Matches a general integer constant." (Global non-architectural)
+ ;; "j" SIBCALL_REGS
+ ;; "k" "A memory operand whose address is formed by a base register and
+@@ -42,7 +42,7 @@
+ ;; "q" CSR_REGS
+ ;; "r" GENERAL_REGS (Global non-architectural)
+ ;; "s" "Matches a symbolic integer constant." (Global non-architectural)
+-;; "t" "A constant call weak address"
++;; "t" <-----unused
+ ;; "u" "A signed 52bit constant and low 32-bit is zero (for logic instructions)"
+ ;; "v" "A signed 64-bit constant and low 44-bit is zero (for logic instructions)."
+ ;; "w" "Matches any valid memory."
+@@ -89,10 +89,10 @@
+ ;; "<" "Matches a pre-dec or post-dec operand." (Global non-architectural)
+ ;; ">" "Matches a pre-inc or post-inc operand." (Global non-architectural)
+ 
+-(define_constraint "a"
++(define_constraint "b"
+   "@internal
+-   A constant call global and noplt address."
+-  (match_operand 0 "is_const_call_global_noplt_symbol"))
++   A constant call no local address."
++  (match_operand 0 "is_const_call_no_local_symbol"))
+ 
+ (define_constraint "c"
+   "@internal
+@@ -105,11 +105,6 @@
+ (define_register_constraint "f" "TARGET_HARD_FLOAT ? FP_REGS : NO_REGS"
+   "A floating-point register (if available).")
+ 
+-(define_constraint "h"
+-  "@internal
+-   A constant call plt address."
+-  (match_operand 0 "is_const_call_plt_symbol"))
+-
+ (define_register_constraint "j" "SIBCALL_REGS"
+   "@internal")
+ 
+@@ -134,11 +129,6 @@
+ (define_register_constraint "q" "CSR_REGS"
+   "A general-purpose register except for $r0 and $r1 for lcsr.")
+ 
+-(define_constraint "t"
+-  "@internal
+-   A constant call weak address."
+-  (match_operand 0 "is_const_call_weak_symbol"))
+-
+ (define_constraint "u"
+   "A signed 52bit constant and low 32-bit is zero (for logic instructions)."
+   (and (match_code "const_int")
+diff --git a/gcc/config/loongarch/loongarch-opts.cc b/gcc/config/loongarch/loongarch-opts.cc
+index eb9c2a52f..fc477bfd4 100644
+--- a/gcc/config/loongarch/loongarch-opts.cc
++++ b/gcc/config/loongarch/loongarch-opts.cc
+@@ -376,6 +376,13 @@ fallback:
+ 
+   /* 5.  Target code model */
+   t.cmodel = constrained.cmodel ? opt_cmodel : CMODEL_NORMAL;
++  if (t.cmodel != CMODEL_NORMAL)
++    {
++      warning (0, "%qs is not supported, now cmodel is set to 'normal'.",
++	       loongarch_cmodel_strings[t.cmodel]);
++      t.cmodel = CMODEL_NORMAL;
++    }
++
+ 
+   /* Cleanup and return.  */
+   obstack_free (&msg_obstack, NULL);
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 2287fd376..080766250 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -27,9 +27,13 @@ along with GCC; see the file COPYING3.  If not see
+    SYMBOL_GOT_DISP
+        The symbol's value will be loaded directly from the GOT.
+ 
++   SYMBOL_PCREL
++       The symbol's value will be loaded directly from data section.
++
+    SYMBOL_TLS
+        A thread-local symbol.
+ 
++   SYMBOL_TLS_IE
+    SYMBOL_TLSGD
+    SYMBOL_TLSLDM
+        UNSPEC wrappers around SYMBOL_TLS, corresponding to the
+@@ -37,7 +41,10 @@ along with GCC; see the file COPYING3.  If not see
+    */
+ enum loongarch_symbol_type {
+   SYMBOL_GOT_DISP,
++  SYMBOL_PCREL,
+   SYMBOL_TLS,
++  SYMBOL_TLS_IE,
++  SYMBOL_TLS_LE,
+   SYMBOL_TLSGD,
+   SYMBOL_TLSLDM,
+ };
+@@ -61,7 +68,6 @@ extern int loongarch_idiv_insns (machine_mode);
+ #ifdef RTX_CODE
+ extern void loongarch_emit_binary (enum rtx_code, rtx, rtx, rtx);
+ #endif
+-extern bool loongarch_split_symbol (rtx, rtx, machine_mode, rtx *);
+ extern rtx loongarch_unspec_address (rtx, enum loongarch_symbol_type);
+ extern rtx loongarch_strip_unspec_address (rtx);
+ extern void loongarch_move_integer (rtx, rtx, unsigned HOST_WIDE_INT);
+@@ -154,7 +160,6 @@ extern rtx loongarch_expand_thread_pointer (rtx);
+ extern bool loongarch_eh_uses (unsigned int);
+ extern bool loongarch_epilogue_uses (unsigned int);
+ extern bool loongarch_load_store_bonding_p (rtx *, machine_mode, bool);
+-extern bool loongarch_split_symbol_type (enum loongarch_symbol_type);
+ 
+ typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx);
+ 
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 750d53bbe..2e2f16e72 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -114,19 +114,7 @@ enum loongarch_address_type
+ };
+ 
+ 
+-/* Information about an address described by loongarch_address_type.
+-
+-   ADDRESS_CONST_INT
+-       No fields are used.
+-
+-   ADDRESS_REG
+-       REG is the base register and OFFSET is the constant offset.
+-
+-   ADDRESS_REG_REG
+-       A base register indexed by (optionally scaled) register.
+-
+-   ADDRESS_SYMBOLIC
+-       SYMBOL_TYPE is the type of symbol that the address references.  */
++/* Information about an address described by loongarch_address_type.  */
+ struct loongarch_address_info
+ {
+   enum loongarch_address_type type;
+@@ -1617,11 +1605,12 @@ loongarch_weak_symbol_p (const_rtx x)
+ bool
+ loongarch_symbol_binds_local_p (const_rtx x)
+ {
+-  if (LABEL_REF_P (x))
++  if (SYMBOL_REF_P (x))
++    return (SYMBOL_REF_DECL (x)
++	    ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
++	    : SYMBOL_REF_LOCAL_P (x));
++  else
+     return false;
+-
+-  return (SYMBOL_REF_DECL (x) ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
+-			      : SYMBOL_REF_LOCAL_P (x));
+ }
+ 
+ /* Return true if rtx constants of mode MODE should be put into a small
+@@ -1640,17 +1629,16 @@ static enum loongarch_symbol_type
+ loongarch_classify_symbol (const_rtx x)
+ {
+   if (LABEL_REF_P (x))
+-    return SYMBOL_GOT_DISP;
+-
+-  gcc_assert (SYMBOL_REF_P (x));
++    return SYMBOL_PCREL;
+ 
+   if (SYMBOL_REF_TLS_MODEL (x))
+     return SYMBOL_TLS;
+ 
+-  if (SYMBOL_REF_P (x))
++  if (SYMBOL_REF_P (x)
++      && !loongarch_symbol_binds_local_p (x))
+     return SYMBOL_GOT_DISP;
+ 
+-  return SYMBOL_GOT_DISP;
++  return SYMBOL_PCREL;
+ }
+ 
+ /* Return true if X is a symbolic constant.  If it is,
+@@ -1683,9 +1671,15 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type)
+      relocations.  */
+   switch (*symbol_type)
+     {
+-    case SYMBOL_GOT_DISP:
++    case SYMBOL_TLS_IE:
++    case SYMBOL_TLS_LE:
+     case SYMBOL_TLSGD:
+     case SYMBOL_TLSLDM:
++    case SYMBOL_PCREL:
++      /* GAS rejects offsets outside the range [-2^31, 2^31-1].  */
++      return sext_hwi (INTVAL (offset), 32) == INTVAL (offset);
++
++    case SYMBOL_GOT_DISP:
+     case SYMBOL_TLS:
+       return false;
+     }
+@@ -1707,9 +1701,14 @@ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode)
+ 
+       return 3;
+ 
++    case SYMBOL_PCREL:
++    case SYMBOL_TLS_IE:
++    case SYMBOL_TLS_LE:
++      return 2;
++
+     case SYMBOL_TLSGD:
+     case SYMBOL_TLSLDM:
+-      return 1;
++      return 3;
+ 
+     case SYMBOL_TLS:
+       /* We don't treat a bare TLS symbol as a constant.  */
+@@ -1937,11 +1936,7 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p)
+     switch (addr.type)
+       {
+       case ADDRESS_REG:
+-	return factor;
+-
+       case ADDRESS_REG_REG:
+-	return factor;
+-
+       case ADDRESS_CONST_INT:
+ 	return factor;
+ 
+@@ -1983,7 +1978,7 @@ loongarch_12bit_offset_address_p (rtx x, machine_mode mode)
+   return (loongarch_classify_address (&addr, x, mode, false)
+ 	  && addr.type == ADDRESS_REG
+ 	  && CONST_INT_P (addr.offset)
+-	  && LARCH_U12BIT_OFFSET_P (INTVAL (addr.offset)));
++	  && LARCH_12BIT_OFFSET_P (INTVAL (addr.offset)));
+ }
+ 
+ /* Return true if X is a legitimate address with a 14-bit offset shifted 2.
+@@ -2001,6 +1996,9 @@ loongarch_14bit_shifted_offset_address_p (rtx x, machine_mode mode)
+ 	  && LARCH_SHIFT_2_OFFSET_P (INTVAL (addr.offset)));
+ }
+ 
++/* Return true if X is a legitimate address with base and index.
++   MODE is the mode of the value being accessed.  */
++
+ bool
+ loongarch_base_index_address_p (rtx x, machine_mode mode)
+ {
+@@ -2310,7 +2308,7 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 
+ /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
+    its address.  The return value will be both a valid address and a valid
+-   SET_SRC (either a REG or a LO_SUM).  */
++   SET_SRC.  */
+ 
+ static rtx
+ loongarch_legitimize_tls_address (rtx loc)
+@@ -2336,7 +2334,7 @@ loongarch_legitimize_tls_address (rtx loc)
+       break;
+ 
+     case TLS_MODEL_INITIAL_EXEC:
+-      /* la.tls.ie; tp-relative add  */
++      /* la.tls.ie; tp-relative add.  */
+       tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
+       tmp = gen_reg_rtx (Pmode);
+       emit_insn (loongarch_got_load_tls_ie (tmp, loc));
+@@ -2345,7 +2343,7 @@ loongarch_legitimize_tls_address (rtx loc)
+       break;
+ 
+     case TLS_MODEL_LOCAL_EXEC:
+-      /* la.tls.le; tp-relative add  */
++      /* la.tls.le; tp-relative add.  */
+       tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
+       tmp = gen_reg_rtx (Pmode);
+       emit_insn (loongarch_got_load_tls_le (tmp, loc));
+@@ -3371,6 +3369,7 @@ loongarch_output_move (rtx dest, rtx src)
+ 	    case 2:
+ 	      return "st.h\t%z1,%0";
+ 	    case 4:
++	      /* Matching address type with a 12bit offset.  */
+ 	      if (const_arith_operand (offset, Pmode))
+ 		return "st.w\t%z1,%0";
+ 	      else
+@@ -3409,6 +3408,7 @@ loongarch_output_move (rtx dest, rtx src)
+ 	    case 2:
+ 	      return "ld.hu\t%0,%1";
+ 	    case 4:
++	      /* Matching address type with a 12bit offset.  */
+ 	      if (const_arith_operand (offset, Pmode))
+ 		return "ld.w\t%0,%1";
+ 	      else
+@@ -3436,56 +3436,16 @@ loongarch_output_move (rtx dest, rtx src)
+ 	  else
+ 	    gcc_unreachable ();
+ 	}
++    }
+ 
+-      if (symbolic_operand (src, VOIDmode))
+-	{
+-	  if ((TARGET_CMODEL_TINY && (!loongarch_global_symbol_p (src)
+-				      || loongarch_symbol_binds_local_p (src)))
+-	      || (TARGET_CMODEL_TINY_STATIC && !loongarch_weak_symbol_p (src)))
+-	    {
+-	      /* The symbol must be aligned to 4 byte.  */
+-	      unsigned int align;
+-
+-	      if (LABEL_REF_P (src))
+-		align = 32 /* Whatever.  */;
+-	      else if (CONSTANT_POOL_ADDRESS_P (src))
+-		align = GET_MODE_ALIGNMENT (get_pool_mode (src));
+-	      else if (TREE_CONSTANT_POOL_ADDRESS_P (src))
+-		{
+-		  tree exp = SYMBOL_REF_DECL (src);
+-		  align = TYPE_ALIGN (TREE_TYPE (exp));
+-		  align = loongarch_constant_alignment (exp, align);
+-		}
+-	      else if (SYMBOL_REF_DECL (src))
+-		align = DECL_ALIGN (SYMBOL_REF_DECL (src));
+-	      else if (SYMBOL_REF_HAS_BLOCK_INFO_P (src)
+-		       && SYMBOL_REF_BLOCK (src) != NULL)
+-		align = SYMBOL_REF_BLOCK (src)->alignment;
+-	      else
+-		align = BITS_PER_UNIT;
+-
+-	      if (align % (4 * 8) == 0)
+-		return "pcaddi\t%0,%%pcrel(%1)>>2";
+-	    }
+-	  if (TARGET_CMODEL_TINY
+-	      || TARGET_CMODEL_TINY_STATIC
+-	      || TARGET_CMODEL_NORMAL
+-	      || TARGET_CMODEL_LARGE)
+-	    {
+-	      if (!loongarch_global_symbol_p (src)
+-		  || loongarch_symbol_binds_local_p (src))
+-		return "la.local\t%0,%1";
+-	      else
+-		return "la.global\t%0,%1";
+-	    }
+-	  if (TARGET_CMODEL_EXTREME)
+-	    {
+-	      sorry ("Normal symbol loading not implemented in extreme mode.");
+-	      gcc_unreachable ();
+-	    }
+-
+-	}
++  if (dest_code == REG && symbolic_operand (src, VOIDmode))
++    {
++      if (loongarch_classify_symbol (src) == SYMBOL_PCREL)
++	return "la.local\t%0,%1";
++      else
++	return "la.global\t%0,%1";
+     }
++
+   if (src_code == REG && FP_REG_P (REGNO (src)))
+     {
+       if (dest_code == REG && FP_REG_P (REGNO (dest)))
+@@ -3503,6 +3463,7 @@ loongarch_output_move (rtx dest, rtx src)
+ 	  return dbl_p ? "fst.d\t%1,%0" : "fst.s\t%1,%0";
+ 	}
+     }
++
+   if (dest_code == REG && FP_REG_P (REGNO (dest)))
+     {
+       if (src_code == MEM)
+@@ -3517,6 +3478,7 @@ loongarch_output_move (rtx dest, rtx src)
+ 	  return dbl_p ? "fld.d\t%0,%1" : "fld.s\t%0,%1";
+ 	}
+     }
++
+   gcc_unreachable ();
+ }
+ 
+@@ -4347,27 +4309,27 @@ loongarch_memmodel_needs_release_fence (enum memmodel model)
+ 
+ /* Implement TARGET_PRINT_OPERAND.  The LoongArch-specific operand codes are:
+ 
+-   'X'	Print CONST_INT OP in hexadecimal format.
+-   'x'	Print the low 16 bits of CONST_INT OP in hexadecimal format.
++   'A'	Print a _DB suffix if the memory model requires a release.
++   'b'	Print the address of a memory operand, without offset.
++   'C'	Print the integer branch condition for comparison OP.
+    'd'	Print CONST_INT OP in decimal.
++   'F'	Print the FPU branch condition for comparison OP.
++   'G'	Print a DBAR insn if the memory model requires a release.
++   'i'	Print i if the operand is not a register.
+    'm'	Print one less than CONST_INT OP in decimal.
+-   'y'	Print exact log2 of CONST_INT OP in decimal.
+-   'C'	Print the integer branch condition for comparison OP.
+    'N'	Print the inverse of the integer branch condition for comparison OP.
+-   'F'	Print the FPU branch condition for comparison OP.
+-   'W'	Print the inverse of the FPU branch condition for comparison OP.
+    'T'	Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
+ 	      'z' for (eq:?I ...), 'n' for (ne:?I ...).
+    't'	Like 'T', but with the EQ/NE cases reversed
+-   'Y'	Print loongarch_fp_conditions[INTVAL (OP)]
+-   'Z'	Print OP and a comma for 8CC, otherwise print nothing.
+-   'z'	Print $0 if OP is zero, otherwise print OP normally.
+-   'b'	Print the address of a memory operand, without offset.
+    'V'	Print exact log2 of CONST_INT OP element 0 of a replicated
+ 	  CONST_VECTOR in decimal.
+-   'A'	Print a _DB suffix if the memory model requires a release.
+-   'G'	Print a DBAR insn if the memory model requires a release.
+-   'i'	Print i if the operand is not a register.  */
++   'W'	Print the inverse of the FPU branch condition for comparison OP.
++   'X'	Print CONST_INT OP in hexadecimal format.
++   'x'	Print the low 16 bits of CONST_INT OP in hexadecimal format.
++   'Y'	Print loongarch_fp_conditions[INTVAL (OP)]
++   'y'	Print exact log2 of CONST_INT OP in decimal.
++   'Z'	Print OP and a comma for 8CC, otherwise print nothing.
++   'z'	Print $0 if OP is zero, otherwise print OP normally.  */
+ 
+ static void
+ loongarch_print_operand (FILE *file, rtx op, int letter)
+@@ -4385,18 +4347,13 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+ 
+   switch (letter)
+     {
+-    case 'X':
+-      if (CONST_INT_P (op))
+-	fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
+-      else
+-	output_operand_lossage ("invalid use of '%%%c'", letter);
++    case 'A':
++      if (loongarch_memmodel_needs_rel_acq_fence ((enum memmodel) INTVAL (op)))
++       fputs ("_db", file);
+       break;
+ 
+-    case 'x':
+-      if (CONST_INT_P (op))
+-	fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
+-      else
+-	output_operand_lossage ("invalid use of '%%%c'", letter);
++    case 'C':
++      loongarch_print_int_branch_condition (file, code, letter);
+       break;
+ 
+     case 'd':
+@@ -4406,6 +4363,20 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+ 	output_operand_lossage ("invalid use of '%%%c'", letter);
+       break;
+ 
++    case 'F':
++      loongarch_print_float_branch_condition (file, code, letter);
++      break;
++
++    case 'G':
++      if (loongarch_memmodel_needs_release_fence ((enum memmodel) INTVAL (op)))
++	fputs ("dbar\t0", file);
++      break;
++
++    case 'i':
++      if (code != REG)
++	fputs ("i", file);
++      break;
++
+     case 'm':
+       if (CONST_INT_P (op))
+ 	fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1);
+@@ -4413,17 +4384,17 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+ 	output_operand_lossage ("invalid use of '%%%c'", letter);
+       break;
+ 
+-    case 'y':
+-      if (CONST_INT_P (op))
+-	{
+-	  int val = exact_log2 (INTVAL (op));
+-	  if (val != -1)
+-	    fprintf (file, "%d", val);
+-	  else
+-	    output_operand_lossage ("invalid use of '%%%c'", letter);
+-	}
+-      else
+-	output_operand_lossage ("invalid use of '%%%c'", letter);
++    case 'N':
++      loongarch_print_int_branch_condition (file, reverse_condition (code),
++					    letter);
++      break;
++
++    case 't':
++    case 'T':
++      {
++	int truth = (code == NE) == (letter == 'T');
++	fputc ("zfnt"[truth * 2 + FCC_REG_P (REGNO (XEXP (op, 0)))], file);
++      }
+       break;
+ 
+     case 'V':
+@@ -4441,30 +4412,36 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+ 	output_operand_lossage ("invalid use of '%%%c'", letter);
+       break;
+ 
+-    case 'C':
+-      loongarch_print_int_branch_condition (file, code, letter);
+-      break;
+-
+-    case 'N':
+-      loongarch_print_int_branch_condition (file, reverse_condition (code),
+-					    letter);
++    case 'W':
++      loongarch_print_float_branch_condition (file, reverse_condition (code),
++					      letter);
+       break;
+ 
+-    case 'F':
+-      loongarch_print_float_branch_condition (file, code, letter);
++    case 'x':
++      if (CONST_INT_P (op))
++	fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op) & 0xffff);
++      else
++	output_operand_lossage ("invalid use of '%%%c'", letter);
+       break;
+ 
+-    case 'W':
+-      loongarch_print_float_branch_condition (file, reverse_condition (code),
+-					      letter);
++    case 'X':
++      if (CONST_INT_P (op))
++	fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
++      else
++	output_operand_lossage ("invalid use of '%%%c'", letter);
+       break;
+ 
+-    case 'T':
+-    case 't':
+-      {
+-	int truth = (code == NE) == (letter == 'T');
+-	fputc ("zfnt"[truth * 2 + FCC_REG_P (REGNO (XEXP (op, 0)))], file);
+-      }
++    case 'y':
++      if (CONST_INT_P (op))
++	{
++	  int val = exact_log2 (INTVAL (op));
++	  if (val != -1)
++	    fprintf (file, "%d", val);
++	  else
++	    output_operand_lossage ("invalid use of '%%%c'", letter);
++	}
++      else
++	output_operand_lossage ("invalid use of '%%%c'", letter);
+       break;
+ 
+     case 'Y':
+@@ -4481,21 +4458,6 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+       fputc (',', file);
+       break;
+ 
+-    case 'A':
+-      if (loongarch_memmodel_needs_rel_acq_fence ((enum memmodel) INTVAL (op)))
+-	fputs ("_db", file);
+-      break;
+-
+-    case 'G':
+-      if (loongarch_memmodel_needs_release_fence ((enum memmodel) INTVAL (op)))
+-	fputs ("dbar\t0", file);
+-      break;
+-
+-    case 'i':
+-      if (code != REG)
+-	fputs ("i", file);
+-      break;
+-
+     default:
+       switch (code)
+ 	{
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index 714401f2d..12f209047 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -617,7 +617,7 @@ enum reg_class
+ #define LU12I_INT(X) LU12I_OPERAND (INTVAL (X))
+ #define LU32I_INT(X) LU32I_OPERAND (INTVAL (X))
+ #define LU52I_INT(X) LU52I_OPERAND (INTVAL (X))
+-#define LARCH_U12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047))
++#define LARCH_12BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -2048, 2047))
+ #define LARCH_9BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -256, 255))
+ #define LARCH_16BIT_OFFSET_P(OFFSET) (IN_RANGE (OFFSET, -32768, 32767))
+ #define LARCH_SHIFT_2_OFFSET_P(OFFSET) (((OFFSET) & 0x3) == 0)
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 5c0445dd8..376879fbc 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -2844,48 +2844,14 @@
+ })
+ 
+ (define_insn "sibcall_internal"
+-  [(call (mem:SI (match_operand 0 "call_insn_operand" "j,c,a,t,h"))
++  [(call (mem:SI (match_operand 0 "call_insn_operand" "j,c,b"))
+ 	 (match_operand 1 "" ""))]
+   "SIBLING_CALL_P (insn)"
+-{
+-  switch (which_alternative)
+-    {
+-    case 0:
+-      return "jr\t%0";
+-    case 1:
+-      if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r12,(%%pcrel(%0+0x20000))>>18\n\t"
+-	       "jirl\t$r0,$r12,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.local\t$r12,$r13,%0\n\tjr\t$r12";
+-      else
+-	return "b\t%0";
+-    case 2:
+-      if (TARGET_CMODEL_TINY_STATIC)
+-	return "b\t%0";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r12,$r13,%0\n\tjr\t$r12";
+-      else
+-	return "la.global\t$r12,%0\n\tjr\t$r12";
+-    case 3:
+-      if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r12,$r13,%0\n\tjr\t$r12";
+-      else
+-	return "la.global\t$r12,%0\n\tjr\t$r12";
+-    case 4:
+-      if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY)
+-	return "b\t%%plt(%0)";
+-      else if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r12,(%%plt(%0)+0x20000)>>18\n\t"
+-	       "jirl\t$r0,$r12,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)";
+-      else
+-	/* Cmodel extreme and tiny static not support plt.  */
+-	gcc_unreachable ();
+-    default:
+-      gcc_unreachable ();
+-    }
+-}
+-  [(set_attr "jirl" "indirect,direct,direct,direct,direct")])
++  "@
++   jr\t%0
++   b\t%0
++   b\t%%plt(%0)"
++  [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ (define_expand "sibcall_value"
+   [(parallel [(set (match_operand 0 "")
+@@ -2920,96 +2886,28 @@
+ 
+ (define_insn "sibcall_value_internal"
+   [(set (match_operand 0 "register_operand" "")
+-	(call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h"))
++	(call (mem:SI (match_operand 1 "call_insn_operand" "j,c,b"))
+ 	      (match_operand 2 "" "")))]
+   "SIBLING_CALL_P (insn)"
+-{
+-  switch (which_alternative)
+-  {
+-    case 0:
+-      return "jr\t%1";
+-    case 1:
+-      if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t"
+-	       "jirl\t$r0,$r12,%%pcrel(%1+4)-((%%pcrel(%1+4+0x20000))>>18<<18)";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.local\t$r12,$r13,%1\n\tjr\t$r12";
+-      else
+-	return "b\t%1";
+-    case 2:
+-      if (TARGET_CMODEL_TINY_STATIC)
+-	return "b\t%1";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r12,$r13,%1\n\tjr\t$r12";
+-      else
+-	return "la.global\t$r12,%1\n\tjr\t$r12";
+-    case 3:
+-      if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r12,$r13,%1\n\tjr\t$r12";
+-      else
+-	return "la.global\t$r12,%1\n\tjr\t$r12";
+-    case 4:
+-      if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY)
+-	return " b\t%%plt(%1)";
+-      else if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t"
+-	       "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)";
+-      else
+-	/* Cmodel extreme and tiny static not support plt.  */
+-	gcc_unreachable ();
+-    default:
+-      gcc_unreachable ();
+-  }
+-}
+-  [(set_attr "jirl" "indirect,direct,direct,direct,direct")])
++  "@
++   jr\t%1
++   b\t%1
++   b\t%%plt(%1)"
++  [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ (define_insn "sibcall_value_multiple_internal"
+   [(set (match_operand 0 "register_operand" "")
+-	(call (mem:SI (match_operand 1 "call_insn_operand" "j,c,a,t,h"))
++	(call (mem:SI (match_operand 1 "call_insn_operand" "j,c,b"))
+ 	      (match_operand 2 "" "")))
+    (set (match_operand 3 "register_operand" "")
+ 	(call (mem:SI (match_dup 1))
+ 	      (match_dup 2)))]
+   "SIBLING_CALL_P (insn)"
+-{
+-  switch (which_alternative)
+-  {
+-    case 0:
+-      return "jr\t%1";
+-    case 1:
+-      if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r12,%%pcrel(%1+0x20000)>>18\n\t"
+-	       "jirl\t$r0,$r12,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.local\t$r12,$r13,%1\n\tjr\t$r12";
+-      else
+-	return "b\t%1";
+-    case 2:
+-      if (TARGET_CMODEL_TINY_STATIC)
+-	return "b\t%1";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r12,$r13,%1\n\tjr\t$r12";
+-      else
+-	return "la.global\t$r12,%1\n\tjr\t$r12";
+-    case 3:
+-      if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r12,$r13,%1\n\tjr\t$r12";
+-      else
+-	return "la.global\t$r12,%1\n\tjr\t$r12";
+-    case 4:
+-      if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY)
+-	return "b\t%%plt(%1)";
+-      else if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r12,(%%plt(%1)+0x20000)>>18\n\t"
+-	       "jirl\t$r0,$r12,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)";
+-      else
+-	/* Cmodel extreme and tiny static not support plt.  */
+-	gcc_unreachable ();
+-    default:
+-      gcc_unreachable ();
+-  }
+-}
+-  [(set_attr "jirl" "indirect,direct,direct,direct,direct")])
++  "@
++   jr\t%1
++   b\t%1
++   b\t%%plt(%1)"
++  [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ (define_expand "call"
+   [(parallel [(call (match_operand 0 "")
+@@ -3025,50 +2923,15 @@
+ })
+ 
+ (define_insn "call_internal"
+-  [(call (mem:SI (match_operand 0 "call_insn_operand" "e,c,a,t,h"))
++  [(call (mem:SI (match_operand 0 "call_insn_operand" "e,c,b"))
+ 	 (match_operand 1 "" ""))
+    (clobber (reg:SI RETURN_ADDR_REGNUM))]
+   ""
+-{
+-  switch (which_alternative)
+-    {
+-    case 0:
+-      return "jirl\t$r1,%0,0";
+-    case 1:
+-      if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r1,%%pcrel(%0+0x20000)>>18\n\t"
+-	       "jirl\t$r1,$r1,%%pcrel(%0+4)-(%%pcrel(%0+4+0x20000)>>18<<18)";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.local\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0";
+-      else
+-	return "bl\t%0";
+-    case 2:
+-      if (TARGET_CMODEL_TINY_STATIC)
+-	return "bl\t%0";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0";
+-      else
+-	return "la.global\t$r1,%0\n\tjirl\t$r1,$r1,0";
+-    case 3:
+-      if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r1,$r12,%0\n\tjirl\t$r1,$r1,0";
+-      else
+-	return "la.global\t$r1,%0\n\tjirl\t$r1,$r1,0";
+-    case 4:
+-      if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r1,(%%plt(%0)+0x20000)>>18\n\t"
+-	       "jirl\t$r1,$r1,%%plt(%0)+4-((%%plt(%0)+(4+0x20000))>>18<<18)";
+-      else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY)
+-	return "bl\t%%plt(%0)";
+-      else
+-	/* Cmodel extreme and tiny static not support plt.  */
+-	gcc_unreachable ();
+-    default:
+-      gcc_unreachable ();
+-    }
+-}
+-  [(set_attr "jirl" "indirect,direct,direct,direct,direct")
+-   (set_attr "insn_count" "1,2,3,3,2")])
++  "@
++   jirl\t$r1,%0,0
++   bl\t%0
++   bl\t%%plt(%0)"
++  [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ (define_expand "call_value"
+   [(parallel [(set (match_operand 0 "")
+@@ -3101,100 +2964,30 @@
+ 
+ (define_insn "call_value_internal"
+   [(set (match_operand 0 "register_operand" "")
+-	(call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h"))
++	(call (mem:SI (match_operand 1 "call_insn_operand" "e,c,b"))
+ 	      (match_operand 2 "" "")))
+    (clobber (reg:SI RETURN_ADDR_REGNUM))]
+   ""
+-{
+-  switch (which_alternative)
+-    {
+-    case 0:
+-      return "jirl\t$r1,%1,0";
+-    case 1:
+-      if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t"
+-	       "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.local\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0";
+-      else
+-	return "bl\t%1";
+-    case 2:
+-      if (TARGET_CMODEL_TINY_STATIC)
+-	return "bl\t%1";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0";
+-      else
+-	return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0";
+-    case 3:
+-      if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0";
+-      else
+-	return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0";
+-    case 4:
+-      if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t"
+-	       "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)";
+-      else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY)
+-	return "bl\t%%plt(%1)";
+-      else
+-	/* Cmodel extreme and tiny static not support plt.  */
+-	gcc_unreachable ();
+-    default:
+-      gcc_unreachable ();
+-    }
+-}
+-  [(set_attr "jirl" "indirect,direct,direct,direct,direct")
+-   (set_attr "insn_count" "1,2,3,3,2")])
++  "@
++   jirl\t$r1,%1,0
++   bl\t%1
++   bl\t%%plt(%1)"
++  [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ (define_insn "call_value_multiple_internal"
+   [(set (match_operand 0 "register_operand" "")
+-	(call (mem:SI (match_operand 1 "call_insn_operand" "e,c,a,t,h"))
++	(call (mem:SI (match_operand 1 "call_insn_operand" "e,c,b"))
+ 	      (match_operand 2 "" "")))
+    (set (match_operand 3 "register_operand" "")
+ 	(call (mem:SI (match_dup 1))
+ 	      (match_dup 2)))
+    (clobber (reg:SI RETURN_ADDR_REGNUM))]
+   ""
+-{
+-  switch (which_alternative)
+-    {
+-    case 0:
+-      return "jirl\t$r1,%1,0";
+-    case 1:
+-      if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r1,%%pcrel(%1+0x20000)>>18\n\t"
+-	       "jirl\t$r1,$r1,%%pcrel(%1+4)-(%%pcrel(%1+4+0x20000)>>18<<18)";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.local\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0";
+-      else
+-	return "bl\t%1";
+-    case 2:
+-      if (TARGET_CMODEL_TINY_STATIC)
+-	return "bl\t%1";
+-      else if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0 ";
+-      else
+-	return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0";
+-    case 3:
+-      if (TARGET_CMODEL_EXTREME)
+-	return "la.global\t$r1,$r12,%1\n\tjirl\t$r1,$r1,0";
+-      else
+-	return "la.global\t$r1,%1\n\tjirl\t$r1,$r1,0";
+-    case 4:
+-      if (TARGET_CMODEL_LARGE)
+-	return "pcaddu18i\t$r1,(%%plt(%1)+0x20000)>>18\n\t"
+-	       "jirl\t$r1,$r1,%%plt(%1)+4-((%%plt(%1)+(4+0x20000))>>18<<18)";
+-      else if (TARGET_CMODEL_NORMAL || TARGET_CMODEL_TINY)
+-	return "bl\t%%plt(%1)";
+-      else
+-	/* Cmodel extreme and tiny static not support plt.  */
+-	gcc_unreachable ();
+-    default:
+-      gcc_unreachable ();
+-    }
+-}
+-  [(set_attr "jirl" "indirect,direct,direct,direct,direct")
+-   (set_attr "insn_count" "1,2,3,3,2")])
++  "@
++   jirl\t$r1,%1,0
++   bl\t%1
++   bl\t%%plt(%1)"
++  [(set_attr "jirl" "indirect,direct,direct")])
+ 
+ 
+ ;; Call subroutine returning any type.
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index edd74d478..2243ef71c 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -111,20 +111,25 @@
+   (match_code "const,symbol_ref,label_ref")
+ {
+   enum loongarch_symbol_type symbol_type;
++  loongarch_symbolic_constant_p (op, &symbol_type);
+ 
+-  if (!loongarch_symbolic_constant_p (op, &symbol_type))
++  rtx offset, x = op;
++  split_const (x, &x, &offset);
++
++  if (offset != const0_rtx)
+     return false;
+ 
+   switch (symbol_type)
+     {
+-    case SYMBOL_GOT_DISP:
+-      /* Without explicit relocs, there is no special syntax for
+-	 loading the address of a call destination into a register.
+-	 Using "la.global JIRL_REGS,foo; jirl JIRL_REGS" would prevent the lazy
+-	 binding of "foo", so keep the address of global symbols with the jirl
+-	 macro.  */
++    case SYMBOL_PCREL:
+       return 1;
+ 
++    case SYMBOL_GOT_DISP:
++      if (!flag_plt)
++	return false;
++      else
++	return 1;
++
+     default:
+       return false;
+     }
+@@ -140,22 +145,11 @@
+ 	    (match_test "loongarch_symbol_binds_local_p (op) != 0"))
+        (match_test "CONSTANT_P (op)")))
+ 
+-(define_predicate "is_const_call_weak_symbol"
+-  (and (match_operand 0 "const_call_insn_operand")
+-       (not (match_operand 0 "is_const_call_local_symbol"))
+-       (match_test "loongarch_weak_symbol_p (op) != 0")
+-       (match_test "CONSTANT_P (op)")))
+-
+-(define_predicate "is_const_call_plt_symbol"
+-  (and (match_operand 0 "const_call_insn_operand")
+-       (match_test "flag_plt != 0")
+-       (match_test "loongarch_global_symbol_noweak_p (op) != 0")
+-       (match_test "CONSTANT_P (op)")))
+-
+-(define_predicate "is_const_call_global_noplt_symbol"
++(define_predicate "is_const_call_no_local_symbol"
+   (and (match_operand 0 "const_call_insn_operand")
+-       (match_test "flag_plt == 0")
+-       (match_test "loongarch_global_symbol_noweak_p (op) != 0")
++       (ior (match_test "loongarch_global_symbol_p (op) != 0")
++	    (match_test "loongarch_symbol_binds_local_p (op) == 0")
++       (match_test "loongarch_weak_symbol_p (op) != 0"))
+        (match_test "CONSTANT_P (op)")))
+ 
+ ;; A legitimate CONST_INT operand that takes more than one instruction
+@@ -219,7 +213,7 @@
+     case CONST:
+     case SYMBOL_REF:
+     case LABEL_REF:
+-      return (loongarch_symbolic_constant_p (op, &symbol_type));
++      return loongarch_symbolic_constant_p (op, &symbol_type);
+     default:
+       return true;
+     }
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-1.c
+new file mode 100644
+index 000000000..b0482761a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-1.c
+@@ -0,0 +1,32 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt" } */
++/* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */
++/* { dg-final { scan-assembler "test1:.*bl\t%plt\\(f\\)\n" } } */
++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
++
++extern void g (void);
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-2.c
+new file mode 100644
+index 000000000..f5e061c29
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-2.c
+@@ -0,0 +1,32 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt" } */
++/* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */
++/* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */
++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
++
++extern void g (void);
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-3.c b/gcc/testsuite/gcc.target/loongarch/func-call-3.c
+new file mode 100644
+index 000000000..75082c574
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-3.c
+@@ -0,0 +1,32 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt" } */
++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
++/* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */
++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
++
++extern void g (void);
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-4.c b/gcc/testsuite/gcc.target/loongarch/func-call-4.c
+new file mode 100644
+index 000000000..e8a839549
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-4.c
+@@ -0,0 +1,32 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt" } */
++/* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
++/* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */
++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
++
++extern void g (void);
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Support-split-symbol.patch b/LoongArch-Support-split-symbol.patch
new file mode 100644
index 0000000000000000000000000000000000000000..97fa5f9f0e98f7904bd592ec3e4a142beb3017a7
--- /dev/null
+++ b/LoongArch-Support-split-symbol.patch
@@ -0,0 +1,1238 @@
+From 078261cabef370e7f3201980d03bd54a049290e9 Mon Sep 17 00:00:00 2001
+From: Lulu Cheng 
+Date: Thu, 21 Jul 2022 11:04:08 +0800
+Subject: [PATCH 004/124] LoongArch: Support split symbol.
+
+Add compilation option '-mexplicit-relocs', and if enable '-mexplicit-relocs'
+the symbolic address load instruction 'la.*' will be split into two instructions.
+This compilation option enabled by default.
+
+gcc/ChangeLog:
+
+	* common/config/loongarch/loongarch-common.cc:
+	Enable '-fsection-anchors' when O1 and more advanced optimization.
+	* config/loongarch/genopts/loongarch.opt.in: Add new option
+	'-mexplicit-relocs', and enable by default.
+	* config/loongarch/loongarch-protos.h (loongarch_split_move_insn_p):
+	Delete function declaration.
+	(loongarch_split_move_insn): Delete function declaration.
+	(loongarch_split_symbol_type): Add function declaration.
+	* config/loongarch/loongarch.cc (enum loongarch_address_type):
+	Add new address type 'ADDRESS_LO_SUM'.
+	(loongarch_classify_symbolic_expression): New function definitions.
+	Classify the base of symbolic expression X, given that X appears in
+	context CONTEXT.
+	(loongarch_symbol_insns): Add a judgment condition TARGET_EXPLICIT_RELOCS.
+	(loongarch_split_symbol_type): New function definitions.
+	Determines whether the symbol load should be split into two instructions.
+	(loongarch_valid_lo_sum_p): New function definitions.
+	Return true if a LO_SUM can address a value of mode MODE when the LO_SUM
+	symbol has type SYMBOL_TYPE.
+	(loongarch_classify_address): Add handling of 'LO_SUM'.
+	(loongarch_address_insns): Add handling of 'ADDRESS_LO_SUM'.
+	(loongarch_signed_immediate_p): Sort code.
+	(loongarch_12bit_offset_address_p): Return true if address type is ADDRESS_LO_SUM.
+	(loongarch_const_insns): Add handling of 'HIGH'.
+	(loongarch_split_move_insn_p): Add the static attribute to the function.
+	(loongarch_emit_set): New function definitions.
+	(loongarch_call_tls_get_addr): Add symbol handling when defining TARGET_EXPLICIT_RELOCS.
+	(loongarch_legitimize_tls_address): Add symbol handling when defining the
+	TARGET_EXPLICIT_RELOCS macro.
+	(loongarch_split_symbol): New function definitions. Split symbol.
+	(loongarch_legitimize_address): Add codes see if the address can split into a high part
+	and a LO_SUM.
+	(loongarch_legitimize_const_move): Add codes split moves of symbolic constants into
+	high and low.
+	(loongarch_split_move_insn): Delete function definitions.
+	(loongarch_output_move): Add support for HIGH and LO_SUM.
+	(loongarch_print_operand_reloc): New function definitions.
+	Print symbolic operand OP, which is part of a HIGH or LO_SUM in context CONTEXT.
+	(loongarch_memmodel_needs_release_fence): Sort code.
+	(loongarch_print_operand): Rearrange alphabetical order and add H and L to support HIGH
+	and LOW output.
+	(loongarch_print_operand_address): Add handling of 'ADDRESS_LO_SUM'.
+	(TARGET_MIN_ANCHOR_OFFSET): Define macro to -IMM_REACH/2.
+	(TARGET_MAX_ANCHOR_OFFSET): Define macro to IMM_REACH/2-1.
+	* config/loongarch/loongarch.md (movti): Delete the template.
+	(*movti): Delete the template.
+	(movtf): Delete the template.
+	(*movtf): Delete the template.
+	(*low): New template of normal symbol low address.
+	(@tls_low): New template of tls symbol low address.
+	(@ld_from_got): New template load address from got table.
+	(@ori_l_lo12): New template.
+	* config/loongarch/loongarch.opt: Update from loongarch.opt.in.
+	* config/loongarch/predicates.md: Add support for symbol_type HIGH.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/func-call-1.c: Add build option '-mno-explicit-relocs'.
+	* gcc.target/loongarch/func-call-2.c: Add build option '-mno-explicit-relocs'.
+	* gcc.target/loongarch/func-call-3.c: Add build option '-mno-explicit-relocs'.
+	* gcc.target/loongarch/func-call-4.c: Add build option '-mno-explicit-relocs'.
+	* gcc.target/loongarch/func-call-5.c: New test.
+	* gcc.target/loongarch/func-call-6.c: New test.
+	* gcc.target/loongarch/func-call-7.c: New test.
+	* gcc.target/loongarch/func-call-8.c: New test.
+	* gcc.target/loongarch/relocs-symbol-noaddend.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ .../config/loongarch/loongarch-common.cc      |   1 +
+ gcc/config/loongarch/genopts/loongarch.opt.in |   4 +
+ gcc/config/loongarch/loongarch-protos.h       |   3 +-
+ gcc/config/loongarch/loongarch.cc             | 412 ++++++++++++++++--
+ gcc/config/loongarch/loongarch.md             | 122 +++---
+ gcc/config/loongarch/loongarch.opt            |   4 +
+ gcc/config/loongarch/predicates.md            |  20 +-
+ .../gcc.target/loongarch/func-call-1.c        |   2 +-
+ .../gcc.target/loongarch/func-call-2.c        |   2 +-
+ .../gcc.target/loongarch/func-call-3.c        |   2 +-
+ .../gcc.target/loongarch/func-call-4.c        |   2 +-
+ .../gcc.target/loongarch/func-call-5.c        |  33 ++
+ .../gcc.target/loongarch/func-call-6.c        |  33 ++
+ .../gcc.target/loongarch/func-call-7.c        |  34 ++
+ .../gcc.target/loongarch/func-call-8.c        |  33 ++
+ .../loongarch/relocs-symbol-noaddend.c        |  23 +
+ 16 files changed, 614 insertions(+), 116 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-5.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-6.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-7.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/func-call-8.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c
+
+diff --git a/gcc/common/config/loongarch/loongarch-common.cc b/gcc/common/config/loongarch/loongarch-common.cc
+index ed3730fce..f8b4660fa 100644
+--- a/gcc/common/config/loongarch/loongarch-common.cc
++++ b/gcc/common/config/loongarch/loongarch-common.cc
+@@ -34,6 +34,7 @@ along with GCC; see the file COPYING3.  If not see
+ static const struct default_options loongarch_option_optimization_table[] =
+ {
+   { OPT_LEVELS_ALL, OPT_fasynchronous_unwind_tables, NULL, 1 },
++  { OPT_LEVELS_1_PLUS, OPT_fsection_anchors, NULL, 1 },
+   { OPT_LEVELS_NONE, 0, NULL, 0 }
+ };
+ 
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index 61e7d72a0..6f3950093 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -154,6 +154,10 @@ mmax-inline-memcpy-size=
+ Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024)
+ -mmax-inline-memcpy-size=SIZE	Set the max size of memcpy to inline, default is 1024.
+ 
++mexplicit-relocs
++Target Var(TARGET_EXPLICIT_RELOCS) Init(1)
++Use %reloc() assembly operators.
++
+ ; The code model option names for -mcmodel.
+ Enum
+ Name(cmodel) Type(int)
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 080766250..cadaad751 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -77,8 +77,6 @@ extern rtx loongarch_legitimize_call_address (rtx);
+ extern rtx loongarch_subword (rtx, bool);
+ extern bool loongarch_split_move_p (rtx, rtx);
+ extern void loongarch_split_move (rtx, rtx, rtx);
+-extern bool loongarch_split_move_insn_p (rtx, rtx);
+-extern void loongarch_split_move_insn (rtx, rtx, rtx);
+ extern const char *loongarch_output_move (rtx, rtx);
+ extern bool loongarch_cfun_has_cprestore_slot_p (void);
+ #ifdef RTX_CODE
+@@ -160,6 +158,7 @@ extern rtx loongarch_expand_thread_pointer (rtx);
+ extern bool loongarch_eh_uses (unsigned int);
+ extern bool loongarch_epilogue_uses (unsigned int);
+ extern bool loongarch_load_store_bonding_p (rtx *, machine_mode, bool);
++extern bool loongarch_split_symbol_type (enum loongarch_symbol_type);
+ 
+ typedef rtx (*mulsidi3_gen_fn) (rtx, rtx, rtx);
+ 
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 2e2f16e72..1b5af2c7d 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -100,6 +100,10 @@ along with GCC; see the file COPYING3.  If not see
+    ADDRESS_REG_REG
+        A base register indexed by (optionally scaled) register.
+ 
++   ADDRESS_LO_SUM
++       A LO_SUM rtx.  The first operand is a valid base register and the second
++       operand is a symbolic address.
++
+    ADDRESS_CONST_INT
+        A signed 16-bit constant address.
+ 
+@@ -109,6 +113,7 @@ enum loongarch_address_type
+ {
+   ADDRESS_REG,
+   ADDRESS_REG_REG,
++  ADDRESS_LO_SUM,
+   ADDRESS_CONST_INT,
+   ADDRESS_SYMBOLIC
+ };
+@@ -1641,6 +1646,21 @@ loongarch_classify_symbol (const_rtx x)
+   return SYMBOL_PCREL;
+ }
+ 
++/* Classify the base of symbolic expression X, given that X appears in
++   context CONTEXT.  */
++
++static enum loongarch_symbol_type
++loongarch_classify_symbolic_expression (rtx x)
++{
++  rtx offset;
++
++  split_const (x, &x, &offset);
++  if (UNSPEC_ADDRESS_P (x))
++    return UNSPEC_ADDRESS_TYPE (x);
++
++  return loongarch_classify_symbol (x);
++}
++
+ /* Return true if X is a symbolic constant.  If it is,
+    store the type of the symbol in *SYMBOL_TYPE.  */
+ 
+@@ -1696,7 +1716,7 @@ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode)
+     case SYMBOL_GOT_DISP:
+       /* The constant will have to be loaded from the GOT before it
+ 	 is used in an address.  */
+-      if (mode != MAX_MACHINE_MODE)
++      if (!TARGET_EXPLICIT_RELOCS && mode != MAX_MACHINE_MODE)
+ 	return 0;
+ 
+       return 3;
+@@ -1814,6 +1834,84 @@ loongarch_valid_offset_p (rtx x, machine_mode mode)
+   return true;
+ }
+ 
++/* Should a symbol of type SYMBOL_TYPE should be split in two?  */
++
++bool
++loongarch_split_symbol_type (enum loongarch_symbol_type symbol_type)
++{
++  switch (symbol_type)
++    {
++    case SYMBOL_PCREL:
++    case SYMBOL_GOT_DISP:
++    case SYMBOL_TLS_IE:
++    case SYMBOL_TLS_LE:
++    case SYMBOL_TLSGD:
++    case SYMBOL_TLSLDM:
++      return true;
++
++    case SYMBOL_TLS:
++      return false;
++
++    default:
++      gcc_unreachable ();
++    }
++}
++
++/* Return true if a LO_SUM can address a value of mode MODE when the
++   LO_SUM symbol has type SYMBOL_TYPE.  */
++
++static bool
++loongarch_valid_lo_sum_p (enum loongarch_symbol_type symbol_type,
++			  machine_mode mode, rtx x)
++{
++  int align, size;
++
++  /* Check that symbols of type SYMBOL_TYPE can be used to access values
++     of mode MODE.  */
++  if (loongarch_symbol_insns (symbol_type, mode) == 0)
++    return false;
++
++  /* Check that there is a known low-part relocation.  */
++  if (!loongarch_split_symbol_type (symbol_type))
++    return false;
++
++  /* We can't tell size or alignment when we have BLKmode, so try extracing a
++     decl from the symbol if possible.  */
++  if (mode == BLKmode)
++    {
++      rtx offset;
++
++      /* Extract the symbol from the LO_SUM operand, if any.  */
++      split_const (x, &x, &offset);
++
++      /* Might be a CODE_LABEL.  We can compute align but not size for that,
++	 so don't bother trying to handle it.  */
++      if (!SYMBOL_REF_P (x))
++	return false;
++
++      /* Use worst case assumptions if we don't have a SYMBOL_REF_DECL.  */
++      align = (SYMBOL_REF_DECL (x)
++	       ? DECL_ALIGN (SYMBOL_REF_DECL (x))
++	       : 1);
++      size = (SYMBOL_REF_DECL (x) && DECL_SIZE (SYMBOL_REF_DECL (x))
++	      ? tree_to_uhwi (DECL_SIZE (SYMBOL_REF_DECL (x)))
++	      : 2*BITS_PER_WORD);
++    }
++  else
++    {
++      align = GET_MODE_ALIGNMENT (mode);
++      size = GET_MODE_BITSIZE (mode);
++    }
++
++  /* We may need to split multiword moves, so make sure that each word
++     can be accessed without inducing a carry.  */
++  if (size > BITS_PER_WORD
++      && (!TARGET_STRICT_ALIGN || size > align))
++    return false;
++
++  return true;
++}
++
+ static bool
+ loongarch_valid_index_p (struct loongarch_address_info *info, rtx x,
+ 			  machine_mode mode, bool strict_p)
+@@ -1880,6 +1978,26 @@ loongarch_classify_address (struct loongarch_address_info *info, rtx x,
+       info->offset = XEXP (x, 1);
+       return (loongarch_valid_base_register_p (info->reg, mode, strict_p)
+ 	      && loongarch_valid_offset_p (info->offset, mode));
++
++    case LO_SUM:
++      info->type = ADDRESS_LO_SUM;
++      info->reg = XEXP (x, 0);
++      info->offset = XEXP (x, 1);
++      /* We have to trust the creator of the LO_SUM to do something vaguely
++	 sane.  Target-independent code that creates a LO_SUM should also
++	 create and verify the matching HIGH.  Target-independent code that
++	 adds an offset to a LO_SUM must prove that the offset will not
++	 induce a carry.  Failure to do either of these things would be
++	 a bug, and we are not required to check for it here.  The MIPS
++	 backend itself should only create LO_SUMs for valid symbolic
++	 constants, with the high part being either a HIGH or a copy
++	 of _gp. */
++      info->symbol_type
++	= loongarch_classify_symbolic_expression (info->offset);
++      return (loongarch_valid_base_register_p (info->reg, mode, strict_p)
++	      && loongarch_valid_lo_sum_p (info->symbol_type, mode,
++					   info->offset));
++
+     default:
+       return false;
+     }
+@@ -1940,6 +2058,9 @@ loongarch_address_insns (rtx x, machine_mode mode, bool might_split_p)
+       case ADDRESS_CONST_INT:
+ 	return factor;
+ 
++      case ADDRESS_LO_SUM:
++	return factor + 1;
++
+       case ADDRESS_SYMBOLIC:
+ 	return factor * loongarch_symbol_insns (addr.symbol_type, mode);
+       }
+@@ -1967,7 +2088,8 @@ loongarch_signed_immediate_p (unsigned HOST_WIDE_INT x, int bits,
+   return loongarch_unsigned_immediate_p (x, bits, shift);
+ }
+ 
+-/* Return true if X is a legitimate address with a 12-bit offset.
++/* Return true if X is a legitimate address with a 12-bit offset
++   or addr.type is ADDRESS_LO_SUM.
+    MODE is the mode of the value being accessed.  */
+ 
+ bool
+@@ -1976,9 +2098,10 @@ loongarch_12bit_offset_address_p (rtx x, machine_mode mode)
+   struct loongarch_address_info addr;
+ 
+   return (loongarch_classify_address (&addr, x, mode, false)
+-	  && addr.type == ADDRESS_REG
+-	  && CONST_INT_P (addr.offset)
+-	  && LARCH_12BIT_OFFSET_P (INTVAL (addr.offset)));
++	  && ((addr.type == ADDRESS_REG
++	       && CONST_INT_P (addr.offset)
++	       && LARCH_12BIT_OFFSET_P (INTVAL (addr.offset)))
++	      || addr.type == ADDRESS_LO_SUM));
+ }
+ 
+ /* Return true if X is a legitimate address with a 14-bit offset shifted 2.
+@@ -2020,6 +2143,14 @@ loongarch_const_insns (rtx x)
+ 
+   switch (GET_CODE (x))
+     {
++    case HIGH:
++      if (!loongarch_symbolic_constant_p (XEXP (x, 0), &symbol_type)
++	  || !loongarch_split_symbol_type (symbol_type))
++	return 0;
++
++      /* This is simply a PCALAU12I.  */
++      return 1;
++
+     case CONST_INT:
+       return loongarch_integer_cost (INTVAL (x));
+ 
+@@ -2080,6 +2211,8 @@ loongarch_split_const_insns (rtx x)
+   return low + high;
+ }
+ 
++static bool loongarch_split_move_insn_p (rtx dest, rtx src);
++
+ /* Return the number of instructions needed to implement INSN,
+    given that it loads from or stores to MEM.  */
+ 
+@@ -2197,6 +2330,15 @@ loongarch_unspec_address (rtx address, enum loongarch_symbol_type symbol_type)
+   return loongarch_unspec_address_offset (base, offset, symbol_type);
+ }
+ 
++/* Emit an instruction of the form (set TARGET SRC).  */
++
++static rtx
++loongarch_emit_set (rtx target, rtx src)
++{
++  emit_insn (gen_rtx_SET (target, src));
++  return target;
++}
++
+ /* If OP is an UNSPEC address, return the address to which it refers,
+    otherwise return OP itself.  */
+ 
+@@ -2278,6 +2420,7 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ {
+   rtx loc, a0;
+   rtx_insn *insn;
++  rtx tmp = gen_reg_rtx (Pmode);
+ 
+   a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
+ 
+@@ -2288,12 +2431,22 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 
+   start_sequence ();
+ 
+-  if (type == SYMBOL_TLSLDM)
+-    emit_insn (loongarch_got_load_tls_ld (a0, loc));
+-  else if (type == SYMBOL_TLSGD)
+-    emit_insn (loongarch_got_load_tls_gd (a0, loc));
++  if (TARGET_EXPLICIT_RELOCS)
++    {
++      /* Split tls symbol to high and low.  */
++      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (loc));
++      high = loongarch_force_temporary (tmp, high);
++      emit_insn (gen_tls_low (Pmode, a0, high, loc));
++    }
+   else
+-    gcc_unreachable ();
++    {
++      if (type == SYMBOL_TLSLDM)
++	emit_insn (loongarch_got_load_tls_ld (a0, loc));
++      else if (type == SYMBOL_TLSGD)
++	emit_insn (loongarch_got_load_tls_gd (a0, loc));
++      else
++	gcc_unreachable ();
++    }
+ 
+   insn = emit_call_insn (gen_call_value_internal (v0, loongarch_tls_symbol,
+ 						  const0_rtx));
+@@ -2308,12 +2461,12 @@ loongarch_call_tls_get_addr (rtx sym, enum loongarch_symbol_type type, rtx v0)
+ 
+ /* Generate the code to access LOC, a thread-local SYMBOL_REF, and return
+    its address.  The return value will be both a valid address and a valid
+-   SET_SRC.  */
++   SET_SRC (either a REG or a LO_SUM).  */
+ 
+ static rtx
+ loongarch_legitimize_tls_address (rtx loc)
+ {
+-  rtx dest, tp, tmp;
++  rtx dest, tp, tmp, tmp1, tmp2, tmp3;
+   enum tls_model model = SYMBOL_REF_TLS_MODEL (loc);
+   rtx_insn *insn;
+ 
+@@ -2334,21 +2487,45 @@ loongarch_legitimize_tls_address (rtx loc)
+       break;
+ 
+     case TLS_MODEL_INITIAL_EXEC:
+-      /* la.tls.ie; tp-relative add.  */
+-      tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
+-      tmp = gen_reg_rtx (Pmode);
+-      emit_insn (loongarch_got_load_tls_ie (tmp, loc));
+-      dest = gen_reg_rtx (Pmode);
+-      emit_insn (gen_add3_insn (dest, tmp, tp));
++	{
++	  /* la.tls.ie; tp-relative add.  */
++	  tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
++	  tmp1 = gen_reg_rtx (Pmode);
++	  dest = gen_reg_rtx (Pmode);
++	  if (TARGET_EXPLICIT_RELOCS)
++	    {
++	      tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_IE);
++	      tmp3 = gen_reg_rtx (Pmode);
++	      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2));
++	      high = loongarch_force_temporary (tmp3, high);
++	      emit_insn (gen_ld_from_got (Pmode, tmp1, high, tmp2));
++	    }
++	  else
++	    emit_insn (loongarch_got_load_tls_ie (tmp1, loc));
++	  emit_insn (gen_add3_insn (dest, tmp1, tp));
++	}
+       break;
+ 
+     case TLS_MODEL_LOCAL_EXEC:
+-      /* la.tls.le; tp-relative add.  */
+-      tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
+-      tmp = gen_reg_rtx (Pmode);
+-      emit_insn (loongarch_got_load_tls_le (tmp, loc));
+-      dest = gen_reg_rtx (Pmode);
+-      emit_insn (gen_add3_insn (dest, tmp, tp));
++	{
++	  /* la.tls.le; tp-relative add.  */
++	  tp = gen_rtx_REG (Pmode, THREAD_POINTER_REGNUM);
++	  tmp1 = gen_reg_rtx (Pmode);
++	  dest = gen_reg_rtx (Pmode);
++
++	  if (TARGET_EXPLICIT_RELOCS)
++	    {
++	      tmp2 = loongarch_unspec_address (loc, SYMBOL_TLS_LE);
++	      tmp3 = gen_reg_rtx (Pmode);
++	      rtx high = gen_rtx_HIGH (Pmode, copy_rtx (tmp2));
++	      high = loongarch_force_temporary (tmp3, high);
++	      emit_insn (gen_ori_l_lo12 (Pmode, tmp1, high, tmp2));
++	    }
++	  else
++	    emit_insn (loongarch_got_load_tls_le (tmp1, loc));
++	  emit_insn (gen_add3_insn (dest, tmp1, tp));
++
++	}
+       break;
+ 
+     default:
+@@ -2397,6 +2574,68 @@ loongarch_force_address (rtx x, machine_mode mode)
+   return x;
+ }
+ 
++/* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
++   it appears in a MEM of that mode.  Return true if ADDR is a legitimate
++   constant in that context and can be split into high and low parts.
++   If so, and if LOW_OUT is nonnull, emit the high part and store the
++   low part in *LOW_OUT.  Leave *LOW_OUT unchanged otherwise.
++
++   Return false if build with '-mno-explicit-relocs'.
++
++   TEMP is as for loongarch_force_temporary and is used to load the high
++   part into a register.
++
++   When MODE is MAX_MACHINE_MODE, the low part is guaranteed to be
++   a legitimize SET_SRC for an .md pattern, otherwise the low part
++   is guaranteed to be a legitimate address for mode MODE.  */
++
++bool
++loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
++{
++  enum loongarch_symbol_type symbol_type;
++  rtx high;
++
++  /* If build with '-mno-explicit-relocs', don't split symbol.  */
++  if (!TARGET_EXPLICIT_RELOCS)
++    return false;
++
++  if ((GET_CODE (addr) == HIGH && mode == MAX_MACHINE_MODE)
++      || !loongarch_symbolic_constant_p (addr, &symbol_type)
++      || loongarch_symbol_insns (symbol_type, mode) == 0
++      || !loongarch_split_symbol_type (symbol_type))
++    return false;
++
++  if (temp == NULL)
++    temp = gen_reg_rtx (Pmode);
++
++  /* Get the 12-31 bits of the address.  */
++  high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
++  high = loongarch_force_temporary (temp, high);
++
++  if (low_out)
++    switch (symbol_type)
++      {
++      case SYMBOL_PCREL:
++	*low_out = gen_rtx_LO_SUM (Pmode, high, addr);
++	break;
++
++      case SYMBOL_GOT_DISP:
++	/* SYMBOL_GOT_DISP symbols are loaded from the GOT.  */
++	{
++	  rtx low = gen_rtx_LO_SUM (Pmode, high, addr);
++	  rtx mem = gen_rtx_MEM (Pmode, low);
++	  *low_out = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, mem),
++				     UNSPEC_LOAD_FROM_GOT);
++	  break;
++	}
++
++      default:
++	gcc_unreachable ();
++      }
++
++  return true;
++}
++
+ /* This function is used to implement LEGITIMIZE_ADDRESS.  If X can
+    be legitimized in a way that the generic machinery might not expect,
+    return a new address, otherwise return NULL.  MODE is the mode of
+@@ -2412,6 +2651,10 @@ loongarch_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
+   if (loongarch_tls_symbol_p (x))
+     return loongarch_legitimize_tls_address (x);
+ 
++  /* See if the address can split into a high part and a LO_SUM.  */
++  if (loongarch_split_symbol (NULL, x, mode, &addr))
++    return loongarch_force_address (addr, mode);
++
+   /* Handle BASE + OFFSET using loongarch_add_offset.  */
+   loongarch_split_plus (x, &base, &offset);
+   if (offset != 0)
+@@ -2499,6 +2742,13 @@ loongarch_legitimize_const_move (machine_mode mode, rtx dest, rtx src)
+       return;
+     }
+ 
++  /* Split moves of symbolic constants into high and low.  */
++  if (loongarch_split_symbol (dest, src, MAX_MACHINE_MODE, &src))
++    {
++      loongarch_emit_set (dest, src);
++      return;
++    }
++
+   /* Generate the appropriate access sequences for TLS symbols.  */
+   if (loongarch_tls_symbol_p (src))
+     {
+@@ -3241,21 +3491,12 @@ loongarch_split_move (rtx dest, rtx src, rtx insn_)
+ 
+ /* Return true if a move from SRC to DEST in INSN should be split.  */
+ 
+-bool
++static bool
+ loongarch_split_move_insn_p (rtx dest, rtx src)
+ {
+   return loongarch_split_move_p (dest, src);
+ }
+ 
+-/* Split a move from SRC to DEST in INSN, given that
+-   loongarch_split_move_insn_p holds.  */
+-
+-void
+-loongarch_split_move_insn (rtx dest, rtx src, rtx insn)
+-{
+-  loongarch_split_move (dest, src, insn);
+-}
+-
+ /* Implement TARGET_CONSTANT_ALIGNMENT.  */
+ 
+ static HOST_WIDE_INT
+@@ -3369,13 +3610,16 @@ loongarch_output_move (rtx dest, rtx src)
+ 	    case 2:
+ 	      return "st.h\t%z1,%0";
+ 	    case 4:
+-	      /* Matching address type with a 12bit offset.  */
+-	      if (const_arith_operand (offset, Pmode))
++	      /* Matching address type with a 12bit offset and
++		 ADDRESS_LO_SUM.  */
++	      if (const_arith_operand (offset, Pmode)
++		  || GET_CODE (offset) == LO_SUM)
+ 		return "st.w\t%z1,%0";
+ 	      else
+ 		return "stptr.w\t%z1,%0";
+ 	    case 8:
+-	      if (const_arith_operand (offset, Pmode))
++	      if (const_arith_operand (offset, Pmode)
++		  || GET_CODE (offset) == LO_SUM)
+ 		return "st.d\t%z1,%0";
+ 	      else
+ 		return "stptr.d\t%z1,%0";
+@@ -3408,13 +3652,16 @@ loongarch_output_move (rtx dest, rtx src)
+ 	    case 2:
+ 	      return "ld.hu\t%0,%1";
+ 	    case 4:
+-	      /* Matching address type with a 12bit offset.  */
+-	      if (const_arith_operand (offset, Pmode))
++	      /* Matching address type with a 12bit offset and
++		 ADDRESS_LO_SUM.  */
++	      if (const_arith_operand (offset, Pmode)
++		  || GET_CODE (offset) == LO_SUM)
+ 		return "ld.w\t%0,%1";
+ 	      else
+ 		return "ldptr.w\t%0,%1";
+ 	    case 8:
+-	      if (const_arith_operand (offset, Pmode))
++	      if (const_arith_operand (offset, Pmode)
++		  || GET_CODE (offset) == LO_SUM)
+ 		return "ld.d\t%0,%1";
+ 	      else
+ 		return "ldptr.d\t%0,%1";
+@@ -3423,6 +3670,21 @@ loongarch_output_move (rtx dest, rtx src)
+ 	    }
+ 	}
+ 
++      if (src_code == HIGH)
++	{
++	  rtx offset, x;
++	  split_const (XEXP (src, 0), &x, &offset);
++	  enum loongarch_symbol_type type = SYMBOL_PCREL;
++
++	  if (UNSPEC_ADDRESS_P (x))
++	     type = UNSPEC_ADDRESS_TYPE (x);
++
++	  if (type == SYMBOL_TLS_LE)
++	    return "lu12i.w\t%0,%h1";
++	  else
++	    return "pcalau12i\t%0,%h1";
++	}
++
+       if (src_code == CONST_INT)
+ 	{
+ 	  if (LU12I_INT (src))
+@@ -3438,7 +3700,8 @@ loongarch_output_move (rtx dest, rtx src)
+ 	}
+     }
+ 
+-  if (dest_code == REG && symbolic_operand (src, VOIDmode))
++  if (!TARGET_EXPLICIT_RELOCS
++      && dest_code == REG && symbolic_operand (src, VOIDmode))
+     {
+       if (loongarch_classify_symbol (src) == SYMBOL_PCREL)
+ 	return "la.local\t%0,%1";
+@@ -4307,6 +4570,49 @@ loongarch_memmodel_needs_release_fence (enum memmodel model)
+     }
+ }
+ 
++/* Print symbolic operand OP, which is part of a HIGH or LO_SUM
++   in context CONTEXT.  HI_RELOC indicates a high-part reloc.  */
++
++static void
++loongarch_print_operand_reloc (FILE *file, rtx op, bool hi_reloc)
++{
++  const char *reloc;
++
++  switch (loongarch_classify_symbolic_expression (op))
++    {
++    case SYMBOL_PCREL:
++      reloc = hi_reloc ? "%pc_hi20" : "%pc_lo12";
++      break;
++
++    case SYMBOL_GOT_DISP:
++      reloc = hi_reloc ? "%got_pc_hi20" : "%got_pc_lo12";
++      break;
++
++    case SYMBOL_TLS_IE:
++      reloc = hi_reloc ? "%ie_pc_hi20" : "%ie_pc_lo12";
++      break;
++
++    case SYMBOL_TLS_LE:
++      reloc = hi_reloc ? "%le_hi20" : "%le_lo12";
++      break;
++
++    case SYMBOL_TLSGD:
++      reloc = hi_reloc ? "%gd_pc_hi20" : "%got_pc_lo12";
++      break;
++
++    case SYMBOL_TLSLDM:
++      reloc = hi_reloc ? "%ld_pc_hi20" : "%got_pc_lo12";
++      break;
++
++    default:
++      gcc_unreachable ();
++    }
++
++  fprintf (file, "%s(", reloc);
++  output_addr_const (file, loongarch_strip_unspec_address (op));
++  fputc (')', file);
++}
++
+ /* Implement TARGET_PRINT_OPERAND.  The LoongArch-specific operand codes are:
+ 
+    'A'	Print a _DB suffix if the memory model requires a release.
+@@ -4315,7 +4621,10 @@ loongarch_memmodel_needs_release_fence (enum memmodel model)
+    'd'	Print CONST_INT OP in decimal.
+    'F'	Print the FPU branch condition for comparison OP.
+    'G'	Print a DBAR insn if the memory model requires a release.
++   'H'  Print address 52-61bit relocation associated with OP.
++   'h'  Print the high-part relocation associated with OP.
+    'i'	Print i if the operand is not a register.
++   'L'  Print the low-part relocation associated with OP.
+    'm'	Print one less than CONST_INT OP in decimal.
+    'N'	Print the inverse of the integer branch condition for comparison OP.
+    'T'	Print 'f' for (eq:CC ...), 't' for (ne:CC ...),
+@@ -4372,11 +4681,21 @@ loongarch_print_operand (FILE *file, rtx op, int letter)
+ 	fputs ("dbar\t0", file);
+       break;
+ 
++    case 'h':
++      if (code == HIGH)
++	op = XEXP (op, 0);
++      loongarch_print_operand_reloc (file, op, true /* hi_reloc */);
++      break;
++
+     case 'i':
+       if (code != REG)
+ 	fputs ("i", file);
+       break;
+ 
++    case 'L':
++      loongarch_print_operand_reloc (file, op, false /* lo_reloc */);
++      break;
++
+     case 'm':
+       if (CONST_INT_P (op))
+ 	fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (op) - 1);
+@@ -4517,6 +4836,11 @@ loongarch_print_operand_address (FILE *file, machine_mode /* mode  */, rtx x)
+ 				reg_names[REGNO (addr.offset)]);
+ 	return;
+ 
++      case ADDRESS_LO_SUM:
++	fprintf (file, "%s,", reg_names[REGNO (addr.reg)]);
++	loongarch_print_operand_reloc (file, addr.offset, false /* hi_reloc */);
++	return;
++
+       case ADDRESS_CONST_INT:
+ 	fprintf (file, "%s,", reg_names[GP_REG_FIRST]);
+ 	output_addr_const (file, x);
+@@ -5891,6 +6215,12 @@ loongarch_starting_frame_offset (void)
+ #undef TARGET_TRAMPOLINE_INIT
+ #define TARGET_TRAMPOLINE_INIT loongarch_trampoline_init
+ 
++#undef TARGET_MIN_ANCHOR_OFFSET
++#define TARGET_MIN_ANCHOR_OFFSET (-IMM_REACH/2)
++
++#undef TARGET_MAX_ANCHOR_OFFSET
++#define TARGET_MAX_ANCHOR_OFFSET (IMM_REACH/2-1)
++
+ #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
+ #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV loongarch_atomic_assign_expand_fenv
+ 
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 376879fbc..6b6df22a5 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -57,6 +57,10 @@
+   ;; CRC
+   UNSPEC_CRC
+   UNSPEC_CRCC
++
++  UNSPEC_LOAD_FROM_GOT
++  UNSPEC_ORI_L_LO12
++  UNSPEC_TLS_LOW
+ ])
+ 
+ (define_c_enum "unspecv" [
+@@ -1743,73 +1747,6 @@
+   [(set_attr "move_type" "move,load,store")
+    (set_attr "mode" "DF")])
+ 
+-
+-;; 128-bit integer moves
+-
+-(define_expand "movti"
+-  [(set (match_operand:TI 0)
+-	(match_operand:TI 1))]
+-  "TARGET_64BIT"
+-{
+-  if (loongarch_legitimize_move (TImode, operands[0], operands[1]))
+-    DONE;
+-})
+-
+-(define_insn "*movti"
+-  [(set (match_operand:TI 0 "nonimmediate_operand" "=r,r,r,m")
+-	(match_operand:TI 1 "move_operand" "r,i,m,rJ"))]
+-  "TARGET_64BIT
+-   && (register_operand (operands[0], TImode)
+-       || reg_or_0_operand (operands[1], TImode))"
+-  { return loongarch_output_move (operands[0], operands[1]); }
+-  [(set_attr "move_type" "move,const,load,store")
+-   (set (attr "mode")
+-    (if_then_else (eq_attr "move_type" "imul")
+-		      (const_string "SI")
+-		      (const_string "TI")))])
+-
+-;; 128-bit floating point moves
+-
+-(define_expand "movtf"
+-  [(set (match_operand:TF 0)
+-	(match_operand:TF 1))]
+-  "TARGET_64BIT"
+-{
+-  if (loongarch_legitimize_move (TFmode, operands[0], operands[1]))
+-    DONE;
+-})
+-
+-;; This pattern handles both hard- and soft-float cases.
+-(define_insn "*movtf"
+-  [(set (match_operand:TF 0 "nonimmediate_operand" "=r,r,m,f,r,f,m")
+-	(match_operand:TF 1 "move_operand" "rG,m,rG,rG,f,m,f"))]
+-  "TARGET_64BIT
+-   && (register_operand (operands[0], TFmode)
+-       || reg_or_0_operand (operands[1], TFmode))"
+-  "#"
+-  [(set_attr "move_type" "move,load,store,mgtf,mftg,fpload,fpstore")
+-   (set_attr "mode" "TF")])
+-
+-(define_split
+-  [(set (match_operand:MOVE64 0 "nonimmediate_operand")
+-	(match_operand:MOVE64 1 "move_operand"))]
+-  "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1])"
+-  [(const_int 0)]
+-{
+-  loongarch_split_move_insn (operands[0], operands[1], curr_insn);
+-  DONE;
+-})
+-
+-(define_split
+-  [(set (match_operand:MOVE128 0 "nonimmediate_operand")
+-	(match_operand:MOVE128 1 "move_operand"))]
+-  "reload_completed && loongarch_split_move_insn_p (operands[0], operands[1])"
+-  [(const_int 0)]
+-{
+-  loongarch_split_move_insn (operands[0], operands[1], curr_insn);
+-  DONE;
+-})
+-
+ ;; Emit a doubleword move in which exactly one of the operands is
+ ;; a floating-point register.  We can't just emit two normal moves
+ ;; because of the constraints imposed by the FPU register model;
+@@ -1938,6 +1875,57 @@
+   [(set_attr "type" "arith")
+    (set_attr "mode" "DI")])
+ 
++;; Instructions for adding the low 12 bits of an address to a register.
++;; Operand 2 is the address: loongarch_print_operand works out which relocation
++;; should be applied.
++
++(define_insn "*low"
++  [(set (match_operand:P 0 "register_operand" "=r")
++ (lo_sum:P (match_operand:P 1 "register_operand" " r")
++     (match_operand:P 2 "symbolic_operand" "")))]
++  "TARGET_EXPLICIT_RELOCS"
++  "addi.\t%0,%1,%L2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "")])
++
++(define_insn "@tls_low"
++  [(set (match_operand:P 0 "register_operand" "=r")
++	(unspec:P [(mem:P (lo_sum:P (match_operand:P 1 "register_operand" "r")
++				    (match_operand:P 2 "symbolic_operand" "")))]
++	UNSPEC_TLS_LOW))]
++  "TARGET_EXPLICIT_RELOCS"
++  "addi.\t%0,%1,%L2"
++  [(set_attr "type" "arith")
++   (set_attr "mode" "")])
++
++;; Instructions for loading address from GOT entry.
++;; operands[1] is pc plus the high half of the address difference with the got
++;; entry;
++;; operands[2] is low 12 bits for low 12 bit of the address difference with the
++;; got entry.
++;; loongarch_print_operand works out which relocation should be applied.
++
++(define_insn "@ld_from_got"
++  [(set (match_operand:P 0 "register_operand" "=r")
++	(unspec:P [(mem:P (lo_sum:P
++				(match_operand:P 1 "register_operand" "r")
++				(match_operand:P 2 "symbolic_operand")))]
++	UNSPEC_LOAD_FROM_GOT))]
++  "TARGET_EXPLICIT_RELOCS"
++  "ld.\t%0,%1,%L2"
++  [(set_attr "type" "move")]
++)
++
++(define_insn "@ori_l_lo12"
++  [(set (match_operand:P 0 "register_operand" "=r")
++	(unspec:P [(match_operand:P 1 "register_operand" "r")
++		    (match_operand:P 2 "symbolic_operand")]
++	UNSPEC_ORI_L_LO12))]
++  ""
++  "ori\t%0,%1,%L2"
++  [(set_attr "type" "move")]
++)
++
+ ;; Convert floating-point numbers to integers
+ (define_insn "frint_"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 3ff0d8604..7a8c5b444 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -161,6 +161,10 @@ mmax-inline-memcpy-size=
+ Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init(1024)
+ -mmax-inline-memcpy-size=SIZE	Set the max size of memcpy to inline, default is 1024.
+ 
++mexplicit-relocs
++Target Var(TARGET_EXPLICIT_RELOCS) Init(1)
++Use %reloc() assembly operators.
++
+ ; The code model option names for -mcmodel.
+ Enum
+ Name(cmodel) Type(int)
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index 2243ef71c..cd3528c7c 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -110,6 +110,10 @@
+ (define_predicate "const_call_insn_operand"
+   (match_code "const,symbol_ref,label_ref")
+ {
++  /* Split symbol to high and low if return false.
++     If defined TARGET_CMODEL_LARGE, all symbol would be splited,
++     else if offset is not zero, the symbol would be splited.  */
++
+   enum loongarch_symbol_type symbol_type;
+   loongarch_symbolic_constant_p (op, &symbol_type);
+ 
+@@ -125,7 +129,7 @@
+       return 1;
+ 
+     case SYMBOL_GOT_DISP:
+-      if (!flag_plt)
++      if (TARGET_CMODEL_LARGE || !flag_plt)
+ 	return false;
+       else
+ 	return 1;
+@@ -213,7 +217,19 @@
+     case CONST:
+     case SYMBOL_REF:
+     case LABEL_REF:
+-      return loongarch_symbolic_constant_p (op, &symbol_type);
++      return (loongarch_symbolic_constant_p (op, &symbol_type)
++	      && (!TARGET_EXPLICIT_RELOCS
++		  || !loongarch_split_symbol_type (symbol_type)));
++
++    case HIGH:
++      /* '-mno-explicit-relocs' don't generate high/low pairs.  */
++      if (!TARGET_EXPLICIT_RELOCS)
++	return false;
++
++      op = XEXP (op, 0);
++      return (loongarch_symbolic_constant_p (op, &symbol_type)
++	      && loongarch_split_symbol_type (symbol_type));
++
+     default:
+       return true;
+     }
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-1.c b/gcc/testsuite/gcc.target/loongarch/func-call-1.c
+index b0482761a..01b8ea23f 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-1.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-1.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fpic -fplt" } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mno-explicit-relocs" } */
+ /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */
+ /* { dg-final { scan-assembler "test1:.*bl\t%plt\\(f\\)\n" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-2.c b/gcc/testsuite/gcc.target/loongarch/func-call-2.c
+index f5e061c29..4565baaec 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-2.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-2.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt" } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mno-explicit-relocs" } */
+ /* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */
+ /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-3.c b/gcc/testsuite/gcc.target/loongarch/func-call-3.c
+index 75082c574..4f669a029 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-3.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-3.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt" } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mno-explicit-relocs" } */
+ /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*la\.global\t.*f\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-4.c b/gcc/testsuite/gcc.target/loongarch/func-call-4.c
+index e8a839549..943adb640 100644
+--- a/gcc/testsuite/gcc.target/loongarch/func-call-4.c
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-4.c
+@@ -1,5 +1,5 @@
+ /* { dg-do compile } */
+-/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt" } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mno-explicit-relocs" } */
+ /* { dg-final { scan-assembler "test:.*la\.global\t.*g\n\tjirl" } } */
+ /* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */
+ /* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-5.c b/gcc/testsuite/gcc.target/loongarch/func-call-5.c
+new file mode 100644
+index 000000000..2c2a1c8a1
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-5.c
+@@ -0,0 +1,33 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fplt -mexplicit-relocs" } */
++/* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */
++/* { dg-final { scan-assembler "test1:.*bl\t%plt\\(f\\)\n" } } */
++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
++
++extern void g (void);
++
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-6.c b/gcc/testsuite/gcc.target/loongarch/func-call-6.c
+new file mode 100644
+index 000000000..4b0e4266e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-6.c
+@@ -0,0 +1,33 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fplt -mexplicit-relocs" } */
++/* { dg-final { scan-assembler "test:.*bl\t%plt\\(g\\)\n" } } */
++/* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */
++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
++
++extern void g (void);
++
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-7.c b/gcc/testsuite/gcc.target/loongarch/func-call-7.c
+new file mode 100644
+index 000000000..51792711f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-7.c
+@@ -0,0 +1,34 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fpic -fno-plt -mexplicit-relocs" } */
++/* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */
++/* { dg-final { scan-assembler "test1:.*pcalau12i\t.*%got_pc_hi20\\(f\\)\n\tld\.d\t.*%got_pc_lo12\\(f\\)\n\tjirl" } } */
++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
++
++
++extern void g (void);
++
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/func-call-8.c b/gcc/testsuite/gcc.target/loongarch/func-call-8.c
+new file mode 100644
+index 000000000..330140d88
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/func-call-8.c
+@@ -0,0 +1,33 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -O0 -fno-pic -fno-plt -mexplicit-relocs" } */
++/* { dg-final { scan-assembler "test:.*pcalau12i\t.*%got_pc_hi20\\(g\\)\n\tld\.d\t.*%got_pc_lo12\\(g\\)\n\tjirl" } } */
++/* { dg-final { scan-assembler "test1:.*bl\tf\n" } } */
++/* { dg-final { scan-assembler "test2:.*bl\tl\n" } } */
++
++extern void g (void);
++
++void
++f (void)
++{}
++
++static void
++l (void)
++{}
++
++void
++test (void)
++{
++  g ();
++}
++
++void
++test1 (void)
++{
++  f ();
++}
++
++void
++test2 (void)
++{
++  l ();
++}
+diff --git a/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c b/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c
+new file mode 100644
+index 000000000..bfcc9bc33
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/relocs-symbol-noaddend.c
+@@ -0,0 +1,23 @@
++/* { dg-do compile } */
++/* { dg-options "-mabi=lp64d -mexplicit-relocs -fno-pic -O2" } */
++/* { dg-final { scan-assembler "pcalau12i.*%pc_hi20\\(\.LANCHOR0\\)\n" } } */
++/* { dg-final { scan-assembler "addi\.d.*%pc_lo12\\(\.LANCHOR0\\)\n" } } */
++/* { dg-final { scan-assembler "ldptr.d\t\\\$r4,.*,0\n" } } */
++/* { dg-final { scan-assembler "ld.d\t\\\$r5,.*,8\n" } } */
++/* { dg-final { scan-assembler-not  "\.LANCHOR0+8" } } */
++
++
++struct S
++{
++  char *a;
++  unsigned short int b;
++};
++
++struct S s1;
++
++void test(struct S);
++void test1(void)
++{
++  test(s1);
++}
++
+-- 
+2.33.0
+
diff --git a/LoongArch-Support-storing-floating-point-zero-into-M.patch b/LoongArch-Support-storing-floating-point-zero-into-M.patch
new file mode 100644
index 0000000000000000000000000000000000000000..360c71fae6d30abaa7b059f9c7c0d1ea6486b590
--- /dev/null
+++ b/LoongArch-Support-storing-floating-point-zero-into-M.patch
@@ -0,0 +1,90 @@
+From de803130fa7d33afaf6e2fc42ef1cd97e45edf96 Mon Sep 17 00:00:00 2001
+From: Guo Jie 
+Date: Fri, 1 Sep 2023 16:35:05 +0800
+Subject: [PATCH 057/124] LoongArch: Support storing floating-point zero into
+ MEM[base + index].
+
+v2: Modify commit message.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.md: Support 'G' -> 'k' in
+	movsf_hardfloat and movdf_hardfloat.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/const-double-zero-stx.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md              | 12 ++++++------
+ .../loongarch/const-double-zero-stx.c          | 18 ++++++++++++++++++
+ 2 files changed, 24 insertions(+), 6 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/const-double-zero-stx.c
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index a5e9352ca..2d269794f 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1915,13 +1915,13 @@
+ })
+ 
+ (define_insn "*movsf_hardfloat"
+-  [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,*f,*r,*r,*r,*m")
+-	(match_operand:SF 1 "move_operand" "f,G,m,f,k,f,G,*r,*f,*G*r,*m,*r"))]
++  [(set (match_operand:SF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,k,*f,*r,*r,*r,*m")
++	(match_operand:SF 1 "move_operand" "f,G,m,f,k,f,G,G,*r,*f,*G*r,*m,*r"))]
+   "TARGET_HARD_FLOAT
+    && (register_operand (operands[0], SFmode)
+        || reg_or_0_operand (operands[1], SFmode))"
+   { return loongarch_output_move (operands[0], operands[1]); }
+-  [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,mgtf,mftg,move,load,store")
++  [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,store,mgtf,mftg,move,load,store")
+    (set_attr "mode" "SF")])
+ 
+ (define_insn "*movsf_softfloat"
+@@ -1946,13 +1946,13 @@
+ })
+ 
+ (define_insn "*movdf_hardfloat"
+-  [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,*f,*r,*r,*r,*m")
+-	(match_operand:DF 1 "move_operand" "f,G,m,f,k,f,G,*r,*f,*r*G,*m,*r"))]
++  [(set (match_operand:DF 0 "nonimmediate_operand" "=f,f,f,m,f,k,m,k,*f,*r,*r,*r,*m")
++	(match_operand:DF 1 "move_operand" "f,G,m,f,k,f,G,G,*r,*f,*r*G,*m,*r"))]
+   "TARGET_DOUBLE_FLOAT
+    && (register_operand (operands[0], DFmode)
+        || reg_or_0_operand (operands[1], DFmode))"
+   { return loongarch_output_move (operands[0], operands[1]); }
+-  [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,mgtf,mftg,move,load,store")
++  [(set_attr "move_type" "fmove,mgtf,fpload,fpstore,fpload,fpstore,store,store,mgtf,mftg,move,load,store")
+    (set_attr "mode" "DF")])
+ 
+ (define_insn "*movdf_softfloat"
+diff --git a/gcc/testsuite/gcc.target/loongarch/const-double-zero-stx.c b/gcc/testsuite/gcc.target/loongarch/const-double-zero-stx.c
+new file mode 100644
+index 000000000..8fb04be8f
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/const-double-zero-stx.c
+@@ -0,0 +1,18 @@
++/* { dg-do compile } */
++/* { dg-options "-O2" } */
++/* { dg-final { scan-assembler-times {stx\..\t\$r0} 2 } } */
++
++extern float arr_f[];
++extern double arr_d[];
++
++void
++test_f (int base, int index)
++{
++  arr_f[base + index] = 0.0;
++}
++
++void
++test_d (int base, int index)
++{
++  arr_d[base + index] = 0.0;
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Use-LSX-and-LASX-for-block-move.patch b/LoongArch-Use-LSX-and-LASX-for-block-move.patch
new file mode 100644
index 0000000000000000000000000000000000000000..77ead5f5f6ea8130d42f51bf4910396369b57a8a
--- /dev/null
+++ b/LoongArch-Use-LSX-and-LASX-for-block-move.patch
@@ -0,0 +1,154 @@
+From 01b932dead0e7bcc05aae2ac742c76b5fcac5ae7 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 5 Sep 2023 21:02:38 +0800
+Subject: [PATCH 072/124] LoongArch: Use LSX and LASX for block move
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.h (LARCH_MAX_MOVE_PER_INSN):
+	Define to the maximum amount of bytes able to be loaded or
+	stored with one machine instruction.
+	* config/loongarch/loongarch.cc (loongarch_mode_for_move_size):
+	New static function.
+	(loongarch_block_move_straight): Call
+	loongarch_mode_for_move_size for machine_mode to be moved.
+	(loongarch_expand_block_move): Use LARCH_MAX_MOVE_PER_INSN
+	instead of UNITS_PER_WORD.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/memcpy-vec-1.c: New test.
+	* gcc.target/loongarch/memcpy-vec-2.c: New test.
+	* gcc.target/loongarch/memcpy-vec-3.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc             | 22 +++++++++++++++----
+ gcc/config/loongarch/loongarch.h              |  3 +++
+ .../gcc.target/loongarch/memcpy-vec-1.c       | 11 ++++++++++
+ .../gcc.target/loongarch/memcpy-vec-2.c       | 12 ++++++++++
+ .../gcc.target/loongarch/memcpy-vec-3.c       |  6 +++++
+ 5 files changed, 50 insertions(+), 4 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-vec-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-vec-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 4b0944d56..baa5c2354 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -5187,6 +5187,20 @@ loongarch_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
+   return true;
+ }
+ 
++static machine_mode
++loongarch_mode_for_move_size (HOST_WIDE_INT size)
++{
++  switch (size)
++    {
++    case 32:
++      return V32QImode;
++    case 16:
++      return V16QImode;
++    }
++
++  return int_mode_for_size (size * BITS_PER_UNIT, 0).require ();
++}
++
+ /* Emit straight-line code to move LENGTH bytes from SRC to DEST.
+    Assume that the areas do not overlap.  */
+ 
+@@ -5216,7 +5230,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length,
+ 
+   for (delta_cur = delta, i = 0, offs = 0; offs < length; delta_cur /= 2)
+     {
+-      mode = int_mode_for_size (delta_cur * BITS_PER_UNIT, 0).require ();
++      mode = loongarch_mode_for_move_size (delta_cur);
+ 
+       for (; offs + delta_cur <= length; offs += delta_cur, i++)
+ 	{
+@@ -5227,7 +5241,7 @@ loongarch_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length,
+ 
+   for (delta_cur = delta, i = 0, offs = 0; offs < length; delta_cur /= 2)
+     {
+-      mode = int_mode_for_size (delta_cur * BITS_PER_UNIT, 0).require ();
++      mode = loongarch_mode_for_move_size (delta_cur);
+ 
+       for (; offs + delta_cur <= length; offs += delta_cur, i++)
+ 	loongarch_emit_move (adjust_address (dest, mode, offs), regs[i]);
+@@ -5322,8 +5336,8 @@ loongarch_expand_block_move (rtx dest, rtx src, rtx r_length, rtx r_align)
+ 
+   HOST_WIDE_INT align = INTVAL (r_align);
+ 
+-  if (!TARGET_STRICT_ALIGN || align > UNITS_PER_WORD)
+-    align = UNITS_PER_WORD;
++  if (!TARGET_STRICT_ALIGN || align > LARCH_MAX_MOVE_PER_INSN)
++    align = LARCH_MAX_MOVE_PER_INSN;
+ 
+   if (length <= align * LARCH_MAX_MOVE_OPS_STRAIGHT)
+     {
+diff --git a/gcc/config/loongarch/loongarch.h b/gcc/config/loongarch/loongarch.h
+index b2295c589..c7e91a06d 100644
+--- a/gcc/config/loongarch/loongarch.h
++++ b/gcc/config/loongarch/loongarch.h
+@@ -1181,6 +1181,9 @@ typedef struct {
+    least twice.  */
+ #define LARCH_MAX_MOVE_OPS_STRAIGHT (LARCH_MAX_MOVE_OPS_PER_LOOP_ITER * 2)
+ 
++#define LARCH_MAX_MOVE_PER_INSN \
++  (ISA_HAS_LASX ? 32 : (ISA_HAS_LSX ? 16 : UNITS_PER_WORD))
++
+ /* The base cost of a memcpy call, for MOVE_RATIO and friends.  These
+    values were determined experimentally by benchmarking with CSiBE.
+ */
+diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-vec-1.c b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-1.c
+new file mode 100644
+index 000000000..8d9fedc9e
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-1.c
+@@ -0,0 +1,11 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d -march=la464 -mno-strict-align" } */
++/* { dg-final { scan-assembler-times "xvst" 2 } } */
++/* { dg-final { scan-assembler-times "\tvst" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.d|stptr\\.d" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.w|stptr\\.w" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.h" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.b" 1 } } */
++
++extern char a[], b[];
++void test() { __builtin_memcpy(a, b, 95); }
+diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-vec-2.c b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-2.c
+new file mode 100644
+index 000000000..6b28b884d
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-2.c
+@@ -0,0 +1,12 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -mabi=lp64d -march=la464 -mno-strict-align" } */
++/* { dg-final { scan-assembler-times "xvst" 2 } } */
++/* { dg-final { scan-assembler-times "\tvst" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.d|stptr\\.d" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.w|stptr\\.w" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.h" 1 } } */
++/* { dg-final { scan-assembler-times "st\\.b" 1 } } */
++
++typedef char __attribute__ ((vector_size (32), aligned (32))) vec;
++extern vec a[], b[];
++void test() { __builtin_memcpy(a, b, 95); }
+diff --git a/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c
+new file mode 100644
+index 000000000..233ed2150
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/memcpy-vec-3.c
+@@ -0,0 +1,6 @@
++/* { dg-do compile } */
++/* { dg-options "-O2 -march=la464 -mabi=lp64d -mstrict-align" } */
++/* { dg-final { scan-assembler-not "vst" } } */
++
++extern char a[], b[];
++void test() { __builtin_memcpy(a, b, 16); }
+-- 
+2.33.0
+
diff --git a/LoongArch-Use-UNSPEC-for-fmin-fmax-RTL-pattern-PR105.patch b/LoongArch-Use-UNSPEC-for-fmin-fmax-RTL-pattern-PR105.patch
new file mode 100644
index 0000000000000000000000000000000000000000..105d2bc033381dbb333c6b4d94860058f4f05bfd
--- /dev/null
+++ b/LoongArch-Use-UNSPEC-for-fmin-fmax-RTL-pattern-PR105.patch
@@ -0,0 +1,68 @@
+From d6c7fe54314f7633d779da1a635a8f0832b8f4f6 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Sat, 24 Sep 2022 20:47:22 +0800
+Subject: [PATCH 021/124] LoongArch: Use UNSPEC for fmin/fmax RTL pattern
+ [PR105414]
+
+I made a mistake defining fmin/fmax RTL patterns in r13-2085: I used
+smin and smax in the definition mistakenly.  This causes the optimizer
+to perform constant folding as if fmin/fmax was "really" smin/smax
+operations even with -fsignaling-nans.  Then pr105414.c fails.
+
+We don't have fmin/fmax RTL codes for now (PR107013) so we can only use
+an UNSPEC for fmin and fmax patterns.
+
+gcc/ChangeLog:
+
+	PR tree-optimization/105414
+	* config/loongarch/loongarch.md (UNSPEC_FMAX): New unspec.
+	(UNSPEC_FMIN): Likewise.
+	(fmax3): Use UNSPEC_FMAX instead of smax.
+	(fmin3): Use UNSPEC_FMIN instead of smin.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.md | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 3787fd823..214b14bdd 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -35,6 +35,8 @@
+   ;; Floating point unspecs.
+   UNSPEC_FRINT
+   UNSPEC_FCLASS
++  UNSPEC_FMAX
++  UNSPEC_FMIN
+ 
+   ;; Override return address for exception handling.
+   UNSPEC_EH_RETURN
+@@ -1032,8 +1034,9 @@
+ 
+ (define_insn "fmax3"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+-	(smax:ANYF (match_operand:ANYF 1 "register_operand" "f")
+-		   (match_operand:ANYF 2 "register_operand" "f")))]
++	(unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" "f"))
++		      (use (match_operand:ANYF 2 "register_operand" "f"))]
++		     UNSPEC_FMAX))]
+   ""
+   "fmax.\t%0,%1,%2"
+   [(set_attr "type" "fmove")
+@@ -1041,8 +1044,9 @@
+ 
+ (define_insn "fmin3"
+   [(set (match_operand:ANYF 0 "register_operand" "=f")
+-	(smin:ANYF (match_operand:ANYF 1 "register_operand" "f")
+-		   (match_operand:ANYF 2 "register_operand" "f")))]
++	(unspec:ANYF [(use (match_operand:ANYF 1 "register_operand" "f"))
++		      (use (match_operand:ANYF 2 "register_operand" "f"))]
++		     UNSPEC_FMIN))]
+   ""
+   "fmin.\t%0,%1,%2"
+   [(set_attr "type" "fmove")
+-- 
+2.33.0
+
diff --git a/LoongArch-Use-bstrins-instruction-for-a-mask-and-a-m.patch b/LoongArch-Use-bstrins-instruction-for-a-mask-and-a-m.patch
new file mode 100644
index 0000000000000000000000000000000000000000..46f1dee2bea4502ada876cbd0ad8b5bbc4a2c14c
--- /dev/null
+++ b/LoongArch-Use-bstrins-instruction-for-a-mask-and-a-m.patch
@@ -0,0 +1,336 @@
+From 1c63c61f6508e3c718be79dd27dda25db2b291ee Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 5 Sep 2023 19:42:30 +0800
+Subject: [PATCH 068/124] LoongArch: Use bstrins instruction for (a & ~mask)
+ and (a & mask) | (b & ~mask) [PR111252]
+
+If mask is a constant with value ((1 << N) - 1) << M we can perform this
+optimization.
+
+gcc/ChangeLog:
+
+	PR target/111252
+	* config/loongarch/loongarch-protos.h
+	(loongarch_pre_reload_split): Declare new function.
+	(loongarch_use_bstrins_for_ior_with_mask): Likewise.
+	* config/loongarch/loongarch.cc
+	(loongarch_pre_reload_split): Implement.
+	(loongarch_use_bstrins_for_ior_with_mask): Likewise.
+	* config/loongarch/predicates.md (ins_zero_bitmask_operand):
+	New predicate.
+	* config/loongarch/loongarch.md (bstrins__for_mask):
+	New define_insn_and_split.
+	(bstrins__for_ior_mask): Likewise.
+	(define_peephole2): Further optimize code sequence produced by
+	bstrins__for_ior_mask if possible.
+
+gcc/testsuite/ChangeLog:
+
+	* g++.target/loongarch/bstrins-compile.C: New test.
+	* g++.target/loongarch/bstrins-run.C: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch-protos.h       |  4 +-
+ gcc/config/loongarch/loongarch.cc             | 36 ++++++++
+ gcc/config/loongarch/loongarch.md             | 91 +++++++++++++++++++
+ gcc/config/loongarch/predicates.md            |  8 ++
+ .../g++.target/loongarch/bstrins-compile.C    | 22 +++++
+ .../g++.target/loongarch/bstrins-run.C        | 65 +++++++++++++
+ 6 files changed, 225 insertions(+), 1 deletion(-)
+ create mode 100644 gcc/testsuite/g++.target/loongarch/bstrins-compile.C
+ create mode 100644 gcc/testsuite/g++.target/loongarch/bstrins-run.C
+
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index 133ec9fa8..ea61cf567 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -56,7 +56,7 @@ enum loongarch_symbol_type {
+ };
+ #define NUM_SYMBOL_TYPES (SYMBOL_TLSLDM + 1)
+ 
+-/* Routines implemented in loongarch.c.  */
++/* Routines implemented in loongarch.cc.  */
+ extern rtx loongarch_emit_move (rtx, rtx);
+ extern HOST_WIDE_INT loongarch_initial_elimination_offset (int, int);
+ extern void loongarch_expand_prologue (void);
+@@ -163,6 +163,8 @@ extern const char *current_section_name (void);
+ extern unsigned int current_section_flags (void);
+ extern bool loongarch_use_ins_ext_p (rtx, HOST_WIDE_INT, HOST_WIDE_INT);
+ extern bool loongarch_check_zero_div_p (void);
++extern bool loongarch_pre_reload_split (void);
++extern int loongarch_use_bstrins_for_ior_with_mask (machine_mode, rtx *);
+ 
+ union loongarch_gen_fn_ptrs
+ {
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index dae35a479..4b0944d56 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -5478,6 +5478,42 @@ loongarch_use_ins_ext_p (rtx op, HOST_WIDE_INT width, HOST_WIDE_INT bitpos)
+   return true;
+ }
+ 
++/* Predicate for pre-reload splitters with associated instructions,
++   which can match any time before the split1 pass (usually combine),
++   then are unconditionally split in that pass and should not be
++   matched again afterwards.  */
++
++bool loongarch_pre_reload_split (void)
++{
++  return (can_create_pseudo_p ()
++	  && !(cfun->curr_properties & PROP_rtl_split_insns));
++}
++
++/* Check if we can use bstrins. for
++   op0 = (op1 & op2) | (op3 & op4)
++   where op0, op1, op3 are regs, and op2, op4 are integer constants.  */
++int
++loongarch_use_bstrins_for_ior_with_mask (machine_mode mode, rtx *op)
++{
++  unsigned HOST_WIDE_INT mask1 = UINTVAL (op[2]);
++  unsigned HOST_WIDE_INT mask2 = UINTVAL (op[4]);
++
++  if (mask1 != ~mask2 || !mask1 || !mask2)
++    return 0;
++
++  /* Try to avoid a right-shift.  */
++  if (low_bitmask_len (mode, mask1) != -1)
++    return -1;
++
++  if (low_bitmask_len (mode, mask2 >> (ffs_hwi (mask2) - 1)) != -1)
++    return 1;
++
++  if (low_bitmask_len (mode, mask1 >> (ffs_hwi (mask1) - 1)) != -1)
++    return -1;
++
++  return 0;
++}
++
+ /* Print the text for PRINT_OPERAND punctation character CH to FILE.
+    The punctuation characters are:
+ 
+diff --git a/gcc/config/loongarch/loongarch.md b/gcc/config/loongarch/loongarch.md
+index 3dde0ceb1..11c18bf15 100644
+--- a/gcc/config/loongarch/loongarch.md
++++ b/gcc/config/loongarch/loongarch.md
+@@ -1322,6 +1322,97 @@
+   [(set_attr "move_type" "pick_ins")
+    (set_attr "mode" "")])
+ 
++(define_insn_and_split "*bstrins__for_mask"
++  [(set (match_operand:GPR 0 "register_operand")
++	(and:GPR (match_operand:GPR 1 "register_operand")
++		 (match_operand:GPR 2 "ins_zero_bitmask_operand")))]
++  ""
++  "#"
++  ""
++  [(set (match_dup 0) (match_dup 1))
++   (set (zero_extract:GPR (match_dup 0) (match_dup 2) (match_dup 3))
++	(const_int 0))]
++  {
++    unsigned HOST_WIDE_INT mask = ~UINTVAL (operands[2]);
++    int lo = ffs_hwi (mask) - 1;
++    int len = low_bitmask_len (mode, mask >> lo);
++
++    len = MIN (len, GET_MODE_BITSIZE (mode) - lo);
++    operands[2] = GEN_INT (len);
++    operands[3] = GEN_INT (lo);
++  })
++
++(define_insn_and_split "*bstrins__for_ior_mask"
++  [(set (match_operand:GPR 0 "register_operand")
++	(ior:GPR (and:GPR (match_operand:GPR 1 "register_operand")
++                          (match_operand:GPR 2 "const_int_operand"))
++		 (and:GPR (match_operand:GPR 3 "register_operand")
++			  (match_operand:GPR 4 "const_int_operand"))))]
++  "loongarch_pre_reload_split () && \
++   loongarch_use_bstrins_for_ior_with_mask (mode, operands)"
++  "#"
++  ""
++  [(set (match_dup 0) (match_dup 1))
++   (set (zero_extract:GPR (match_dup 0) (match_dup 2) (match_dup 4))
++	(match_dup 3))]
++  {
++    if (loongarch_use_bstrins_for_ior_with_mask (mode, operands) < 0)
++      {
++	std::swap (operands[1], operands[3]);
++	std::swap (operands[2], operands[4]);
++      }
++
++    unsigned HOST_WIDE_INT mask = ~UINTVAL (operands[2]);
++    int lo = ffs_hwi (mask) - 1;
++    int len = low_bitmask_len (mode, mask >> lo);
++
++    len = MIN (len, GET_MODE_BITSIZE (mode) - lo);
++    operands[2] = GEN_INT (len);
++    operands[4] = GEN_INT (lo);
++
++    if (lo)
++      {
++	rtx tmp = gen_reg_rtx (mode);
++	emit_move_insn (tmp, gen_rtx_ASHIFTRT(mode, operands[3],
++					      GEN_INT (lo)));
++	operands[3] = tmp;
++      }
++  })
++
++;; We always avoid the shift operation in bstrins__for_ior_mask
++;; if possible, but the result may be sub-optimal when one of the masks
++;; is (1 << N) - 1 and one of the src register is the dest register.
++;; For example:
++;;     move		t0, a0
++;;     move		a0, a1
++;;     bstrins.d	a0, t0, 42, 0
++;;     ret
++;; using a shift operation would be better:
++;;     srai.d		t0, a1, 43
++;;     bstrins.d	a0, t0, 63, 43
++;;     ret
++;; unfortunately we cannot figure it out in split1: before reload we cannot
++;; know if the dest register is one of the src register.  Fix it up in
++;; peephole2.
++(define_peephole2
++  [(set (match_operand:GPR 0 "register_operand")
++	(match_operand:GPR 1 "register_operand"))
++   (set (match_dup 1) (match_operand:GPR 2 "register_operand"))
++   (set (zero_extract:GPR (match_dup 1)
++			  (match_operand:SI 3 "const_int_operand")
++			  (const_int 0))
++	(match_dup 0))]
++  "peep2_reg_dead_p (3, operands[0])"
++  [(const_int 0)]
++  {
++    int len = GET_MODE_BITSIZE (mode) - INTVAL (operands[3]);
++
++    emit_insn (gen_ashr3 (operands[0], operands[2], operands[3]));
++    emit_insn (gen_insv (operands[1], GEN_INT (len), operands[3],
++			       operands[0]));
++    DONE;
++  })
++
+ (define_insn "*iorhi3"
+   [(set (match_operand:HI 0 "register_operand" "=r,r")
+ 	(ior:HI (match_operand:HI 1 "register_operand" "%r,r")
+diff --git a/gcc/config/loongarch/predicates.md b/gcc/config/loongarch/predicates.md
+index cf9361b73..ad6cee5c4 100644
+--- a/gcc/config/loongarch/predicates.md
++++ b/gcc/config/loongarch/predicates.md
+@@ -408,6 +408,14 @@
+ (define_predicate "muldiv_target_operand"
+ 		(match_operand 0 "register_operand"))
+ 
++(define_predicate "ins_zero_bitmask_operand"
++  (and (match_code "const_int")
++       (match_test "INTVAL (op) != -1")
++       (match_test "INTVAL (op) & 1")
++       (match_test "low_bitmask_len (mode, \
++				     ~UINTVAL (op) | (~UINTVAL(op) - 1)) \
++		    > 12")))
++
+ (define_predicate "const_call_insn_operand"
+   (match_code "const,symbol_ref,label_ref")
+ {
+diff --git a/gcc/testsuite/g++.target/loongarch/bstrins-compile.C b/gcc/testsuite/g++.target/loongarch/bstrins-compile.C
+new file mode 100644
+index 000000000..3c0db1de4
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/bstrins-compile.C
+@@ -0,0 +1,22 @@
++/* { dg-do compile } */
++/* { dg-options "-std=c++14 -O2 -march=loongarch64 -mabi=lp64d" } */
++/* { dg-final { scan-assembler "bstrins\\.d.*7,4" } } */
++/* { dg-final { scan-assembler "bstrins\\.d.*15,4" } } */
++/* { dg-final { scan-assembler "bstrins\\.d.*31,4" } } */
++/* { dg-final { scan-assembler "bstrins\\.d.*47,4" } } */
++/* { dg-final { scan-assembler "bstrins\\.d.*3,0" } } */
++
++typedef unsigned long u64;
++
++template 
++u64
++test (u64 a, u64 b)
++{
++  return (a & mask) | (b & ~mask);
++}
++
++template u64 test<0x0000'0000'0000'00f0l> (u64, u64);
++template u64 test<0x0000'0000'0000'fff0l> (u64, u64);
++template u64 test<0x0000'0000'ffff'fff0l> (u64, u64);
++template u64 test<0x0000'ffff'ffff'fff0l> (u64, u64);
++template u64 test<0xffff'ffff'ffff'fff0l> (u64, u64);
+diff --git a/gcc/testsuite/g++.target/loongarch/bstrins-run.C b/gcc/testsuite/g++.target/loongarch/bstrins-run.C
+new file mode 100644
+index 000000000..68913d5e0
+--- /dev/null
++++ b/gcc/testsuite/g++.target/loongarch/bstrins-run.C
+@@ -0,0 +1,65 @@
++/* { dg-do run } */
++/* { dg-options "-O2" } */
++
++typedef unsigned long gr;
++
++template 
++struct mask {
++  enum { value = (1ul << r) - (1ul << l) };
++};
++
++template 
++struct mask {
++  enum { value = -(1ul << l) };
++};
++
++__attribute__ ((noipa)) void
++test (gr a, gr b, gr mask, gr out)
++{
++  if (((a & mask) | (b & ~mask)) != out)
++    __builtin_abort ();
++}
++
++__attribute__ ((noipa)) gr
++no_optimize (gr x)
++{
++  return x;
++}
++
++template 
++struct test1 {
++  static void
++  run (void)
++  {
++    gr m = mask::value;
++    gr a = no_optimize (-1ul);
++    gr b = no_optimize (0);
++
++    test (a, b, m, (a & m) | (b & ~m));
++    test (a, b, ~m, (a & ~m) | (b & m));
++    test (a, 0, ~m, a & ~m);
++
++    test1::run ();
++  }
++};
++
++template 
++struct test1 {
++  static void run (void) {}
++};
++
++template 
++void
++test2 (void)
++{
++  test1::run ();
++  test2 ();
++}
++
++template <> void test2 (void) {}
++
++int
++main ()
++{
++  test2<0> ();
++}
+-- 
+2.33.0
+
diff --git a/LoongArch-Use-finer-grained-DBAR-hints.patch b/LoongArch-Use-finer-grained-DBAR-hints.patch
new file mode 100644
index 0000000000000000000000000000000000000000..ad549c4a0431a10de13e49e02409623bd70f1a04
--- /dev/null
+++ b/LoongArch-Use-finer-grained-DBAR-hints.patch
@@ -0,0 +1,137 @@
+From 4a70bfbf686c2b6a1ecd83fe851de826c612c3e0 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 14 Nov 2023 05:32:38 +0800
+Subject: [PATCH] LoongArch: Use finer-grained DBAR hints
+
+LA664 defines DBAR hints 0x1 - 0x1f (except 0xf and 0x1f) as follows [1-2]:
+
+- Bit 4: kind of constraint (0: completion, 1: ordering)
+- Bit 3: barrier for previous read (0: true, 1: false)
+- Bit 2: barrier for previous write (0: true, 1: false)
+- Bit 1: barrier for succeeding read (0: true, 1: false)
+- Bit 0: barrier for succeeding write (0: true, 1: false)
+
+LLVM has already utilized them for different memory orders [3]:
+
+- Bit 4 is always set to one because it's only intended to be zero for
+  things like MMIO devices, which are out of the scope of memory orders.
+- An acquire barrier is used to implement acquire loads like
+
+    ld.d $a1, $t0, 0
+    dbar acquire_hint
+
+  where the load operation (ld.d) should not be reordered with any load
+  or store operation after the acquire load.  To accomplish this
+  constraint, we need to prevent the load operation from being reordered
+  after the barrier, and also prevent any following load/store operation
+  from being reordered before the barrier.  Thus bits 0, 1, and 3 must
+  be zero, and bit 2 can be one, so acquire_hint should be 0b10100.
+- An release barrier is used to implement release stores like
+
+    dbar release_hint
+    st.d $a1, $t0, 0
+
+  where the store operation (st.d) should not be reordered with any load
+  or store operation before the release store.  So we need to prevent
+  the store operation from being reordered before the barrier, and also
+  prevent any preceding load/store operation from being reordered after
+  the barrier.  So bits 0, 2, 3 must be zero, and bit 1 can be one.  So
+  release_hint should be 0b10010.
+
+A similar mapping has been utilized for RISC-V GCC [4], LoongArch Linux
+kernel [1], and LoongArch LLVM [3].  So the mapping should be correct.
+And I've also bootstrapped & regtested GCC on a LA664 with this patch.
+
+The LoongArch CPUs should treat "unknown" hints as dbar 0, so we can
+unconditionally emit the new hints without a compiler switch.
+
+[1]: https://git.kernel.org/torvalds/c/e031a5f3f1ed
+[2]: https://github.com/loongson-community/docs/pull/12
+[3]: https://github.com/llvm/llvm-project/pull/68787
+[4]: https://gcc.gnu.org/r14-406
+
+gcc/ChangeLog:
+
+	* config/loongarch/sync.md (mem_thread_fence): Remove redundant
+	check.
+	(mem_thread_fence_1): Emit finer-grained DBAR hints for
+	different memory models, instead of 0.
+
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/sync.md | 51 +++++++++++++++++++++++++++++-------
+ 1 file changed, 42 insertions(+), 9 deletions(-)
+
+diff --git a/gcc/config/loongarch/sync.md b/gcc/config/loongarch/sync.md
+index 9924d522bcd..1ad0c63e0d9 100644
+--- a/gcc/config/loongarch/sync.md
++++ b/gcc/config/loongarch/sync.md
+@@ -50,23 +50,56 @@
+   [(match_operand:SI 0 "const_int_operand" "")] ;; model
+   ""
+ {
+-  if (INTVAL (operands[0]) != MEMMODEL_RELAXED)
+-    {
+-      rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
+-      MEM_VOLATILE_P (mem) = 1;
+-      emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
+-    }
++  rtx mem = gen_rtx_MEM (BLKmode, gen_rtx_SCRATCH (Pmode));
++  MEM_VOLATILE_P (mem) = 1;
++  emit_insn (gen_mem_thread_fence_1 (mem, operands[0]));
++
+   DONE;
+ })
+ 
+-;; Until the LoongArch memory model (hence its mapping from C++) is finalized,
+-;; conservatively emit a full FENCE.
++;; DBAR hint encoding for LA664 and later micro-architectures, paraphrased from
++;; the Linux patch revealing it [1]:
++;;
++;; - Bit 4: kind of constraint (0: completion, 1: ordering)
++;; - Bit 3: barrier for previous read (0: true, 1: false)
++;; - Bit 2: barrier for previous write (0: true, 1: false)
++;; - Bit 1: barrier for succeeding read (0: true, 1: false)
++;; - Bit 0: barrier for succeeding write (0: true, 1: false)
++;;
++;; [1]: https://git.kernel.org/torvalds/c/e031a5f3f1ed
++;;
++;; Implementations without support for the finer-granularity hints simply treat
++;; all as the full barrier (DBAR 0), so we can unconditionally start emiting the
++;; more precise hints right away.
+ (define_insn "mem_thread_fence_1"
+   [(set (match_operand:BLK 0 "" "")
+ 	(unspec:BLK [(match_dup 0)] UNSPEC_MEMORY_BARRIER))
+    (match_operand:SI 1 "const_int_operand" "")] ;; model
+   ""
+-  "dbar\t0")
++  {
++    enum memmodel model = memmodel_base (INTVAL (operands[1]));
++
++    switch (model)
++      {
++      case MEMMODEL_ACQUIRE:
++	return "dbar\t0b10100";
++      case MEMMODEL_RELEASE:
++	return "dbar\t0b10010";
++      case MEMMODEL_ACQ_REL:
++      case MEMMODEL_SEQ_CST:
++	return "dbar\t0b10000";
++      default:
++	/* GCC internal: "For the '__ATOMIC_RELAXED' model no instructions
++	   need to be issued and this expansion is not invoked."
++
++	   __atomic builtins doc: "Consume is implemented using the
++	   stronger acquire memory order because of a deficiency in C++11's
++	   semantics."  See PR 59448 and get_memmodel in builtins.cc.
++
++	   Other values should not be returned by memmodel_base.  */
++	gcc_unreachable ();
++      }
++  })
+ 
+ ;; Atomic memory operations.
+ 
+-- 
+2.33.0
+
diff --git a/LoongArch-add-mdirect-extern-access-option.patch b/LoongArch-add-mdirect-extern-access-option.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0ed7acea9227579d712002f36500304f1d2e8035
--- /dev/null
+++ b/LoongArch-add-mdirect-extern-access-option.patch
@@ -0,0 +1,157 @@
+From 22f6d3fad184d87f3dac7634537fdbc24846bab9 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Thu, 1 Sep 2022 18:38:14 +0800
+Subject: [PATCH 016/124] LoongArch: add -mdirect-extern-access option
+
+As a new target, LoongArch does not use copy relocation as it's
+problematic in some circumstances.  One bad consequence is we are
+emitting GOT for all accesses to all extern objects with default
+visibility.  The use of GOT is not needed in statically linked
+executables, OS kernels etc.  The GOT entry just wastes space, and the
+GOT access just slow down the execution in those environments.
+
+Before -mexplicit-relocs, we used "-Wa,-mla-global-with-pcrel" to tell
+the assembler not to use GOT for extern access.  But with
+-mexplicit-relocs, we have to opt the logic in GCC.
+
+The name "-mdirect-extern-access" is learnt from x86 port.
+
+gcc/ChangeLog:
+
+	* config/loongarch/genopts/loongarch.opt.in: Add
+	-mdirect-extern-access option.
+	* config/loongarch/loongarch.opt: Regenerate.
+	* config/loongarch/loongarch.cc
+	(loongarch_symbol_binds_local_p): Return true if
+	TARGET_DIRECT_EXTERN_ACCESS.
+	(loongarch_option_override_internal): Complain if
+	-mdirect-extern-access is used with -fPIC or -fpic.
+	* doc/invoke.texi: Document -mdirect-extern-access for
+	LoongArch.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/direct-extern-1.c: New test.
+	* gcc.target/loongarch/direct-extern-2.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/genopts/loongarch.opt.in     |  4 ++++
+ gcc/config/loongarch/loongarch.cc                 |  6 ++++++
+ gcc/config/loongarch/loongarch.opt                |  4 ++++
+ gcc/doc/invoke.texi                               | 15 +++++++++++++++
+ .../gcc.target/loongarch/direct-extern-1.c        |  6 ++++++
+ .../gcc.target/loongarch/direct-extern-2.c        |  6 ++++++
+ 6 files changed, 41 insertions(+)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/direct-extern-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/direct-extern-2.c
+
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index ebdd9538d..e10618777 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -184,3 +184,7 @@ Enum(cmodel) String(@@STR_CMODEL_EXTREME@@) Value(CMODEL_EXTREME)
+ mcmodel=
+ Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(CMODEL_NORMAL)
+ Specify the code model.
++
++mdirect-extern-access
++Target Var(TARGET_DIRECT_EXTERN_ACCESS) Init(0)
++Avoid using the GOT to access external symbols.
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 77e3a1053..c9187bf81 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -1610,6 +1610,9 @@ loongarch_weak_symbol_p (const_rtx x)
+ bool
+ loongarch_symbol_binds_local_p (const_rtx x)
+ {
++  if (TARGET_DIRECT_EXTERN_ACCESS)
++    return true;
++
+   if (SYMBOL_REF_P (x))
+     return (SYMBOL_REF_DECL (x)
+ 	    ? targetm.binds_local_p (SYMBOL_REF_DECL (x))
+@@ -6093,6 +6096,9 @@ loongarch_option_override_internal (struct gcc_options *opts)
+   if (loongarch_branch_cost == 0)
+     loongarch_branch_cost = loongarch_cost->branch_cost;
+ 
++  if (TARGET_DIRECT_EXTERN_ACCESS && flag_shlib)
++    error ("%qs cannot be used for compiling a shared library",
++	   "-mdirect-extern-access");
+ 
+   switch (la_target.cmodel)
+     {
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 639523421..96c811c85 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -191,3 +191,7 @@ Enum(cmodel) String(extreme) Value(CMODEL_EXTREME)
+ mcmodel=
+ Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(CMODEL_NORMAL)
+ Specify the code model.
++
++mdirect-extern-access
++Target Var(TARGET_DIRECT_EXTERN_ACCESS) Init(0)
++Avoid using the GOT to access external symbols.
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 2a5592516..9ec937b84 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -1007,6 +1007,7 @@ Objective-C and Objective-C++ Dialects}.
+ -memcpy  -mno-memcpy -mstrict-align -mno-strict-align @gol
+ -mmax-inline-memcpy-size=@var{n} @gol
+ -mexplicit-relocs -mno-explicit-relocs @gol
++-mdirect-extern-access -mno-direct-extern-access @gol
+ -mcmodel=@var{code-model}}
+ 
+ @emph{M32R/D Options}
+@@ -24649,6 +24650,20 @@ GCC build-time by detecting corresponding assembler support:
+ @code{-mno-explicit-relocs} otherwise.  This option is mostly useful for
+ debugging, or interoperation with assemblers different from the build-time
+ one.
++
++@item -mdirect-extern-access
++@itemx -mno-direct-extern-access
++@opindex mdirect-extern-access
++Do not use or use GOT to access external symbols.  The default is
++@option{-mno-direct-extern-access}: GOT is used for external symbols with
++default visibility, but not used for other external symbols.
++
++With @option{-mdirect-extern-access}, GOT is not used and all external
++symbols are PC-relatively addressed.  It is @strong{only} suitable for
++environments where no dynamic link is performed, like firmwares, OS
++kernels, executables linked with @option{-static} or @option{-static-pie}.
++@option{-mdirect-extern-access} is not compatible with @option{-fPIC} or
++@option{-fpic}.
+ @end table
+ 
+ @node M32C Options
+diff --git a/gcc/testsuite/gcc.target/loongarch/direct-extern-1.c b/gcc/testsuite/gcc.target/loongarch/direct-extern-1.c
+new file mode 100644
+index 000000000..85c6c1e8a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/direct-extern-1.c
+@@ -0,0 +1,6 @@
++/* { dg-do compile } */
++/* { dg-options "-mexplicit-relocs -mdirect-extern-access" } */
++/* { dg-final { scan-assembler-not "got" } } */
++
++extern int x;
++int f() { return x; }
+diff --git a/gcc/testsuite/gcc.target/loongarch/direct-extern-2.c b/gcc/testsuite/gcc.target/loongarch/direct-extern-2.c
+new file mode 100644
+index 000000000..58d8bd68a
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/direct-extern-2.c
+@@ -0,0 +1,6 @@
++/* { dg-do compile } */
++/* { dg-options "-mno-explicit-relocs -mdirect-extern-access" } */
++/* { dg-final { scan-assembler-not "la.global" } } */
++
++extern int x;
++int f() { return x; }
+-- 
+2.33.0
+
diff --git a/LoongArch-add-model-attribute.patch b/LoongArch-add-model-attribute.patch
new file mode 100644
index 0000000000000000000000000000000000000000..d8161a3682e7915a21dc06cae702c9bd294f0104
--- /dev/null
+++ b/LoongArch-add-model-attribute.patch
@@ -0,0 +1,477 @@
+From 859ed9ee2dc28b98e11b2bfdeabb0bda7dc921b0 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 29 Jul 2022 21:45:40 +0800
+Subject: [PATCH 014/124] LoongArch: add model attribute
+
+A linker script and/or a section attribute may locate some object
+specially, so we need to handle the code model for such objects
+differently than the -mcmodel setting. This happens when the Linux
+kernel loads a module with per-CPU variables.
+
+Add an attribute to override the code model for a specific variable.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-protos.h (loongarch_symbol_type):
+	Add SYMBOL_PCREL64 and change the description for SYMBOL_PCREL.
+	* config/loongarch/loongarch.cc (loongarch_attribute_table):
+	New attribute table.
+	(TARGET_ATTRIBUTE_TABLE): Define the target hook.
+	(loongarch_handle_model_attribute): New static function.
+	(loongarch_classify_symbol): Take TARGET_CMODEL_EXTREME and the
+	model attribute of SYMBOL_REF_DECL into account returning
+	SYMBOL_PCREL or SYMBOL_PCREL64.
+	(loongarch_use_anchors_for_symbol_p): New static function.
+	(TARGET_USE_ANCHORS_FOR_SYMBOL_P): Define the target hook.
+	(loongarch_symbol_extreme_p): New static function.
+	(loongarch_symbolic_constant_p): Handle SYMBOL_PCREL64.
+	(loongarch_symbol_insns): Likewise.
+	(loongarch_split_symbol_type): Likewise.
+	(loongarch_split_symbol): Check SYMBOL_PCREL64 instead of
+	TARGET_CMODEL_EXTREME for PC-relative addressing.
+	(loongarch_print_operand_reloc): Likewise.
+	* doc/extend.texi (Variable Attributes): Document new
+	LoongArch specific attribute.
+
+gcc/testsuite/ChangeLog:
+
+	* gcc.target/loongarch/attr-model-test.c: New test.
+	* gcc.target/loongarch/attr-model-1.c: New test.
+	* gcc.target/loongarch/attr-model-2.c: New test.
+	* gcc.target/loongarch/attr-model-diag.c: New test.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch-protos.h       |   8 +-
+ gcc/config/loongarch/loongarch.cc             | 190 ++++++++++++++++--
+ gcc/doc/extend.texi                           |  17 ++
+ .../gcc.target/loongarch/attr-model-1.c       |   6 +
+ .../gcc.target/loongarch/attr-model-2.c       |   6 +
+ .../gcc.target/loongarch/attr-model-diag.c    |   7 +
+ .../gcc.target/loongarch/attr-model-test.c    |  25 +++
+ 7 files changed, 238 insertions(+), 21 deletions(-)
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-1.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-2.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-diag.c
+ create mode 100644 gcc/testsuite/gcc.target/loongarch/attr-model-test.c
+
+diff --git a/gcc/config/loongarch/loongarch-protos.h b/gcc/config/loongarch/loongarch-protos.h
+index cadaad751..77b221724 100644
+--- a/gcc/config/loongarch/loongarch-protos.h
++++ b/gcc/config/loongarch/loongarch-protos.h
+@@ -28,7 +28,12 @@ along with GCC; see the file COPYING3.  If not see
+        The symbol's value will be loaded directly from the GOT.
+ 
+    SYMBOL_PCREL
+-       The symbol's value will be loaded directly from data section.
++       The symbol's value will be loaded directly from data section within
++       +/- 2GiB range.
++
++   SYMBOL_PCREL64
++       The symbol's value will be loaded directly from data section within
++       +/- 8EiB range.
+ 
+    SYMBOL_TLS
+        A thread-local symbol.
+@@ -42,6 +47,7 @@ along with GCC; see the file COPYING3.  If not see
+ enum loongarch_symbol_type {
+   SYMBOL_GOT_DISP,
+   SYMBOL_PCREL,
++  SYMBOL_PCREL64,
+   SYMBOL_TLS,
+   SYMBOL_TLS_IE,
+   SYMBOL_TLS_LE,
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index 452aba9d4..77e3a1053 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -1633,8 +1633,11 @@ loongarch_rtx_constant_in_small_data_p (machine_mode mode)
+ static enum loongarch_symbol_type
+ loongarch_classify_symbol (const_rtx x)
+ {
++  enum loongarch_symbol_type pcrel =
++    TARGET_CMODEL_EXTREME ? SYMBOL_PCREL64 : SYMBOL_PCREL;
++
+   if (!SYMBOL_REF_P (x))
+-    return SYMBOL_PCREL;
++    return pcrel;
+ 
+   if (SYMBOL_REF_TLS_MODEL (x))
+     return SYMBOL_TLS;
+@@ -1642,7 +1645,28 @@ loongarch_classify_symbol (const_rtx x)
+   if (!loongarch_symbol_binds_local_p (x))
+     return SYMBOL_GOT_DISP;
+ 
+-  return SYMBOL_PCREL;
++  tree t = SYMBOL_REF_DECL (x);
++  if (!t)
++    return pcrel;
++
++  t = lookup_attribute ("model", DECL_ATTRIBUTES (t));
++  if (!t)
++    return pcrel;
++
++  t = TREE_VALUE (TREE_VALUE (t));
++
++  /* loongarch_handle_model_attribute should reject other values.  */
++  gcc_assert (TREE_CODE (t) == STRING_CST);
++
++  const char *model = TREE_STRING_POINTER (t);
++  if (strcmp (model, "normal") == 0)
++    return SYMBOL_PCREL;
++  if (strcmp (model, "extreme") == 0)
++    return SYMBOL_PCREL64;
++
++  /* loongarch_handle_model_attribute should reject unknown model
++     name.  */
++  gcc_unreachable ();
+ }
+ 
+ /* Classify the base of symbolic expression X, given that X appears in
+@@ -1695,6 +1719,7 @@ loongarch_symbolic_constant_p (rtx x, enum loongarch_symbol_type *symbol_type)
+     case SYMBOL_TLSGD:
+     case SYMBOL_TLSLDM:
+     case SYMBOL_PCREL:
++    case SYMBOL_PCREL64:
+       /* GAS rejects offsets outside the range [-2^31, 2^31-1].  */
+       return sext_hwi (INTVAL (offset), 32) == INTVAL (offset);
+ 
+@@ -1729,6 +1754,9 @@ loongarch_symbol_insns (enum loongarch_symbol_type type, machine_mode mode)
+     case SYMBOL_TLSLDM:
+       return 3;
+ 
++    case SYMBOL_PCREL64:
++      return 5;
++
+     case SYMBOL_TLS:
+       /* We don't treat a bare TLS symbol as a constant.  */
+       return 0;
+@@ -1833,7 +1861,7 @@ loongarch_valid_offset_p (rtx x, machine_mode mode)
+   return true;
+ }
+ 
+-/* Should a symbol of type SYMBOL_TYPE should be split in two?  */
++/* Should a symbol of type SYMBOL_TYPE should be split in two or more?  */
+ 
+ bool
+ loongarch_split_symbol_type (enum loongarch_symbol_type symbol_type)
+@@ -1841,6 +1869,7 @@ loongarch_split_symbol_type (enum loongarch_symbol_type symbol_type)
+   switch (symbol_type)
+     {
+     case SYMBOL_PCREL:
++    case SYMBOL_PCREL64:
+     case SYMBOL_GOT_DISP:
+     case SYMBOL_TLS_IE:
+     case SYMBOL_TLS_LE:
+@@ -2718,6 +2747,20 @@ loongarch_force_address (rtx x, machine_mode mode)
+   return x;
+ }
+ 
++static bool
++loongarch_symbol_extreme_p (enum loongarch_symbol_type type)
++{
++  switch (type)
++    {
++      case SYMBOL_PCREL:
++	return false;
++      case SYMBOL_PCREL64:
++	return true;
++      default:
++	return TARGET_CMODEL_EXTREME;
++    }
++}
++
+ /* If MODE is MAX_MACHINE_MODE, ADDR appears as a move operand, otherwise
+    it appears in a MEM of that mode.  Return true if ADDR is a legitimate
+    constant in that context and can be split into high and low parts.
+@@ -2757,7 +2800,7 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
+   high = gen_rtx_HIGH (Pmode, copy_rtx (addr));
+   high = loongarch_force_temporary (temp, high);
+ 
+-  if (TARGET_CMODEL_EXTREME && can_create_pseudo_p ())
++  if (loongarch_symbol_extreme_p (symbol_type) && can_create_pseudo_p ())
+     {
+       gcc_assert (TARGET_EXPLICIT_RELOCS);
+ 
+@@ -2771,14 +2814,16 @@ loongarch_split_symbol (rtx temp, rtx addr, machine_mode mode, rtx *low_out)
+   if (low_out)
+     switch (symbol_type)
+       {
+-      case SYMBOL_PCREL:
+-	{
+-	  if (TARGET_CMODEL_EXTREME && can_create_pseudo_p ())
++      case SYMBOL_PCREL64:
++	if (can_create_pseudo_p ())
++	  {
+ 	    *low_out = gen_rtx_PLUS (Pmode, high, temp1);
+-	  else
+-	    *low_out = gen_rtx_LO_SUM (Pmode, high, addr);
+-	  break;
+-	}
++	    break;
++	  }
++	/* fall through */
++      case SYMBOL_PCREL:
++	*low_out = gen_rtx_LO_SUM (Pmode, high, addr);
++	break;
+ 
+       case SYMBOL_GOT_DISP:
+ 	/* SYMBOL_GOT_DISP symbols are loaded from the GOT.  */
+@@ -4745,22 +4790,23 @@ loongarch_print_operand_reloc (FILE *file, rtx op, bool hi64_part,
+ 			       bool hi_reloc)
+ {
+   const char *reloc;
++  enum loongarch_symbol_type symbol_type =
++    loongarch_classify_symbolic_expression (op);
+ 
+-  if (TARGET_CMODEL_EXTREME)
++  if (loongarch_symbol_extreme_p (symbol_type))
+     gcc_assert (TARGET_EXPLICIT_RELOCS);
+ 
+-  switch (loongarch_classify_symbolic_expression (op))
++  switch (symbol_type)
+     {
+-    case SYMBOL_PCREL:
++    case SYMBOL_PCREL64:
+       if (hi64_part)
+ 	{
+-	  if (TARGET_CMODEL_EXTREME)
+-	    reloc = hi_reloc ? "%pc64_hi12" : "%pc64_lo20";
+-	  else
+-	    gcc_unreachable ();
++	  reloc = hi_reloc ? "%pc64_hi12" : "%pc64_lo20";
++	  break;
+ 	}
+-      else
+-	reloc = hi_reloc ? "%pc_hi20" : "%pc_lo12";
++      /* fall through */
++    case SYMBOL_PCREL:
++      reloc = hi_reloc ? "%pc_hi20" : "%pc_lo12";
+       break;
+ 
+     case SYMBOL_GOT_DISP:
+@@ -6316,6 +6362,104 @@ loongarch_starting_frame_offset (void)
+   return crtl->outgoing_args_size;
+ }
+ 
++static tree
++loongarch_handle_model_attribute (tree *node, tree name, tree arg, int,
++				  bool *no_add_attrs)
++{
++  tree decl = *node;
++  if (TREE_CODE (decl) == VAR_DECL)
++    {
++      if (DECL_THREAD_LOCAL_P (decl))
++	{
++	  error_at (DECL_SOURCE_LOCATION (decl),
++		    "%qE attribute cannot be specified for thread-local "
++		    "variables", name);
++	  *no_add_attrs = true;
++	  return NULL_TREE;
++	}
++      if (DECL_CONTEXT (decl)
++	  && TREE_CODE (DECL_CONTEXT (decl)) == FUNCTION_DECL
++	  && !TREE_STATIC (decl))
++	{
++	  error_at (DECL_SOURCE_LOCATION (decl),
++		    "%qE attribute cannot be specified for local "
++		    "variables", name);
++	  *no_add_attrs = true;
++	  return NULL_TREE;
++	}
++      if (DECL_REGISTER (decl))
++	{
++	  error_at (DECL_SOURCE_LOCATION (decl),
++		    "%qE attribute cannot be specified for register "
++		    "variables", name);
++	  *no_add_attrs = true;
++	  return NULL_TREE;
++	}
++      if (!TARGET_EXPLICIT_RELOCS)
++	{
++	  error_at (DECL_SOURCE_LOCATION (decl),
++		    "%qE attribute requires %s", name, "-mexplicit-relocs");
++	  *no_add_attrs = true;
++	  return NULL_TREE;
++	}
++
++      arg = TREE_VALUE (arg);
++      if (TREE_CODE (arg) != STRING_CST)
++	{
++	  error_at (DECL_SOURCE_LOCATION (decl),
++		    "invalid argument of %qE attribute", name);
++	  *no_add_attrs = true;
++	  return NULL_TREE;
++	}
++
++      const char *model = TREE_STRING_POINTER (arg);
++      if (strcmp (model, "normal") != 0
++	  && strcmp (model, "extreme") != 0)
++	{
++	  error_at (DECL_SOURCE_LOCATION (decl),
++		    "invalid argument of %qE attribute", name);
++	  *no_add_attrs = true;
++	  return NULL_TREE;
++	}
++
++      if (lookup_attribute ("model", DECL_ATTRIBUTES (decl)))
++	{
++	  error_at (DECL_SOURCE_LOCATION (decl),
++		    "multiple %qE attribute", name);
++	  *no_add_attrs = true;
++	  return NULL_TREE;
++	}
++    }
++  else
++    {
++      warning (OPT_Wattributes, "%qE attribute ignored", name);
++      *no_add_attrs = true;
++    }
++  return NULL_TREE;
++}
++
++static const struct attribute_spec loongarch_attribute_table[] =
++{
++  /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
++       affects_type_identity, handler, exclude } */
++  { "model", 1, 1, true, false, false, false,
++    loongarch_handle_model_attribute, NULL },
++  /* The last attribute spec is set to be NULL.  */
++  {}
++};
++
++bool
++loongarch_use_anchors_for_symbol_p (const_rtx symbol)
++{
++  tree decl = SYMBOL_REF_DECL (symbol);
++
++  /* The section anchor optimization may break custom address model.  */
++  if (decl && lookup_attribute ("model", DECL_ATTRIBUTES (decl)))
++    return false;
++
++  return default_use_anchors_for_symbol_p (symbol);
++}
++
+ /* Initialize the GCC target structure.  */
+ #undef TARGET_ASM_ALIGNED_HI_OP
+ #define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
+@@ -6504,6 +6648,12 @@ loongarch_starting_frame_offset (void)
+ #undef  TARGET_HAVE_SPECULATION_SAFE_VALUE
+ #define TARGET_HAVE_SPECULATION_SAFE_VALUE speculation_safe_value_not_needed
+ 
++#undef  TARGET_ATTRIBUTE_TABLE
++#define TARGET_ATTRIBUTE_TABLE loongarch_attribute_table
++
++#undef  TARGET_USE_ANCHORS_FOR_SYMBOL_P
++#define TARGET_USE_ANCHORS_FOR_SYMBOL_P loongarch_use_anchors_for_symbol_p
++
+ struct gcc_target targetm = TARGET_INITIALIZER;
+ 
+ #include "gt-loongarch.h"
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index 33a776a79..da2840c23 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -7277,6 +7277,7 @@ attributes.
+ * Blackfin Variable Attributes::
+ * H8/300 Variable Attributes::
+ * IA-64 Variable Attributes::
++* LoongArch Variable Attributes::
+ * M32R/D Variable Attributes::
+ * MeP Variable Attributes::
+ * Microsoft Windows Variable Attributes::
+@@ -8061,6 +8062,22 @@ defined by shared libraries.
+ 
+ @end table
+ 
++@node LoongArch Variable Attributes
++@subsection LoongArch Variable Attributes
++
++One attribute is currently defined for the LoongArch.
++
++@table @code
++@item model("@var{name}")
++@cindex @code{model} variable attribute, LoongArch
++Use this attribute on the LoongArch to use a different code model for
++addressing this variable, than the code model specified by the global
++@option{-mcmodel} option.  This attribute is mostly useful if a
++@code{section} attribute and/or a linker script will locate this object
++specially.  Currently the only supported values of @var{name} are
++@code{normal} and @code{extreme}.
++@end table
++
+ @node M32R/D Variable Attributes
+ @subsection M32R/D Variable Attributes
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/attr-model-1.c b/gcc/testsuite/gcc.target/loongarch/attr-model-1.c
+new file mode 100644
+index 000000000..916d715b9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/attr-model-1.c
+@@ -0,0 +1,6 @@
++/* { dg-do compile } */
++/* { dg-options "-mexplicit-relocs -mcmodel=normal -O2" } */
++/* { dg-final { scan-assembler-times "%pc64_hi12" 2 } } */
++
++#define ATTR_MODEL_TEST
++#include "attr-model-test.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/attr-model-2.c b/gcc/testsuite/gcc.target/loongarch/attr-model-2.c
+new file mode 100644
+index 000000000..a74c795ac
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/attr-model-2.c
+@@ -0,0 +1,6 @@
++/* { dg-do compile } */
++/* { dg-options "-mexplicit-relocs -mcmodel=extreme -O2" } */
++/* { dg-final { scan-assembler-times "%pc64_hi12" 3 } } */
++
++#define ATTR_MODEL_TEST
++#include "attr-model-test.c"
+diff --git a/gcc/testsuite/gcc.target/loongarch/attr-model-diag.c b/gcc/testsuite/gcc.target/loongarch/attr-model-diag.c
+new file mode 100644
+index 000000000..88beede74
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/attr-model-diag.c
+@@ -0,0 +1,7 @@
++/* { dg-do compile } */
++/* { dg-options "-mexplicit-relocs" } */
++
++__thread int x __attribute__((model("extreme"))); /* { dg-error "attribute cannot be specified for thread-local variables" } */
++register int y __asm__("tp") __attribute__((model("extreme"))); /* { dg-error "attribute cannot be specified for register variables" } */
++int z __attribute__((model(114))); /* { dg-error "invalid argument" } */
++int t __attribute__((model("good"))); /* { dg-error "invalid argument" } */
+diff --git a/gcc/testsuite/gcc.target/loongarch/attr-model-test.c b/gcc/testsuite/gcc.target/loongarch/attr-model-test.c
+new file mode 100644
+index 000000000..5b61a7af9
+--- /dev/null
++++ b/gcc/testsuite/gcc.target/loongarch/attr-model-test.c
+@@ -0,0 +1,25 @@
++#ifdef ATTR_MODEL_TEST
++int x __attribute__((model("extreme")));
++int y __attribute__((model("normal")));
++int z;
++
++int
++test(void)
++{
++  return x + y + z;
++}
++
++/* The following will be used for kernel per-cpu storage implemention. */
++
++register char *per_cpu_base __asm__("r21");
++static int counter __attribute__((section(".data..percpu"), model("extreme")));
++
++void
++inc_counter(void)
++{
++  int *ptr = (int *)(per_cpu_base + (long)&counter);
++  (*ptr)++;
++}
++#endif
++
++int dummy;
+-- 
+2.33.0
+
diff --git a/LoongArch-add-new-configure-option-with-strict-align.patch b/LoongArch-add-new-configure-option-with-strict-align.patch
new file mode 100644
index 0000000000000000000000000000000000000000..0859a8f41d68a24cdd8297e3cad1bd5b353fbccb
--- /dev/null
+++ b/LoongArch-add-new-configure-option-with-strict-align.patch
@@ -0,0 +1,86 @@
+From da22606529688b125e6e08589a6dfe741b8dd18d Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Mon, 28 Aug 2023 10:20:12 +0800
+Subject: [PATCH 060/124] LoongArch: add new configure option
+ --with-strict-align-lib
+
+LoongArch processors may not support memory accesses without natural
+alignments.  Building libraries with -mstrict-align may help with
+toolchain binary compatiblity and performance on these implementations
+(e.g. Loongson 2K1000LA).
+
+No significant performance degredation is observed on current mainstream
+LoongArch processors when the option is enabled.
+
+gcc/ChangeLog:
+
+	* config.gcc: use -mstrict-align for building libraries
+	if --with-strict-align-lib is given.
+	* doc/install.texi: likewise.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config.gcc       | 16 +++++++++++++++-
+ gcc/doc/install.texi |  4 ++++
+ 2 files changed, 19 insertions(+), 1 deletion(-)
+
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 62525c296..16bbaea45 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -4966,7 +4966,7 @@ case "${target}" in
+ 		;;
+ 
+ 	loongarch*-*)
+-		supported_defaults="abi arch tune fpu simd multilib-default"
++		supported_defaults="abi arch tune fpu simd multilib-default strict-align-lib"
+ 
+ 		# Local variables
+ 		unset \
+@@ -5163,6 +5163,17 @@ case "${target}" in
+ 			;;
+ 		esac
+ 
++		# Build libraries with -mstrict-align if --with-strict-align-lib is given.
++		case ${with_strict_align_lib} in
++		yes) strict_align_opt="/mstrict-align" ;;
++		""|no)  ;;
++		*)
++			echo "Unknown option: --with-strict-align-lib=${with_strict_align_lib}" 1>&2
++			exit 1
++			;;
++		esac
++
++
+ 		# Handle --with-multilib-default
+ 		if echo "${with_multilib_default}" \
+ 		| grep -E -e '[[:space:]]' -e '//' -e '/$' -e '^/' > /dev/null 2>&1; then
+@@ -5324,6 +5335,9 @@ case "${target}" in
+ 					;;
+ 			esac
+ 
++			# Use mstrict-align for building libraries if --with-strict-align-lib is given.
++			loongarch_multilib_list_make="${loongarch_multilib_list_make}${strict_align_opt}"
++
+ 			# Check for repeated configuration of the same multilib variant.
+ 			if echo "${elem_abi_base}/${elem_abi_ext}" \
+ 			 | grep -E "^(${all_abis%|})$" >/dev/null 2>&1; then
+diff --git a/gcc/doc/install.texi b/gcc/doc/install.texi
+index 1fc5f0bfa..a8851e8bd 100644
+--- a/gcc/doc/install.texi
++++ b/gcc/doc/install.texi
+@@ -1353,6 +1353,10 @@ Multiple @var{option}s may appear consecutively while @var{arch} may only
+ appear in the beginning or be omitted (which means @option{-march=abi-default}
+ is applied when building the libraries).
+ 
++@item --with-strict-align-lib
++On LoongArch targets, build all enabled multilibs with @option{-mstrict-align}
++(Not enabled by default).
++
+ @item --with-multilib-generator=@var{config}
+ Specify what multilibs to build.  @var{config} is a semicolon separated list of
+ values, possibly consisting of a single value.  Currently only implemented
+-- 
+2.33.0
+
diff --git a/LoongArch-adjust-the-default-of-mexplicit-relocs-by-.patch b/LoongArch-adjust-the-default-of-mexplicit-relocs-by-.patch
new file mode 100644
index 0000000000000000000000000000000000000000..5874c7e591efffb38e29bcb47128b7d7cc03975b
--- /dev/null
+++ b/LoongArch-adjust-the-default-of-mexplicit-relocs-by-.patch
@@ -0,0 +1,149 @@
+From aa10a2949c86e46b7952acbb58599e9bfdeabdfb Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Tue, 26 Jul 2022 21:46:20 +0800
+Subject: [PATCH 006/124] LoongArch: adjust the default of -mexplicit-relocs by
+ checking gas feature
+
+The assembly produced with -mexplicit-relocs is not supported by gas <=
+2.39.  Check if the assembler supports explicit relocations and set the
+default accordingly.
+
+gcc/ChangeLog:
+
+	* configure.ac (HAVE_AS_EXPLICIT_RELOCS): Define to 1 if the
+	assembler supports explicit relocation for LoongArch.
+	* configure: Regenerate.
+	* config/loongarch/loongarch-opts.h (HAVE_AS_EXPLICIT_RELOCS):
+	Define to 0 if not defined.
+	* config/loongarch/genopts/loongarch.opt.in
+	(TARGET_EXPLICIT_RELOCS): Default to HAVE_AS_EXPLICIT_RELOCS.
+	* config/loongarch/loongarch.opt: Regenerate.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/genopts/loongarch.opt.in |  2 +-
+ gcc/config/loongarch/loongarch-opts.h         |  4 +++
+ gcc/config/loongarch/loongarch.opt            |  2 +-
+ gcc/configure                                 | 33 ++++++++++++++++++-
+ gcc/configure.ac                              |  7 +++-
+ 5 files changed, 44 insertions(+), 4 deletions(-)
+
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index 6f3950093..a571b6b75 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -155,7 +155,7 @@ Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init
+ -mmax-inline-memcpy-size=SIZE	Set the max size of memcpy to inline, default is 1024.
+ 
+ mexplicit-relocs
+-Target Var(TARGET_EXPLICIT_RELOCS) Init(1)
++Target Var(TARGET_EXPLICIT_RELOCS) Init(HAVE_AS_EXPLICIT_RELOCS)
+ Use %reloc() assembly operators.
+ 
+ ; The code model option names for -mcmodel.
+diff --git a/gcc/config/loongarch/loongarch-opts.h b/gcc/config/loongarch/loongarch-opts.h
+index eaa6fc074..da24ecd2b 100644
+--- a/gcc/config/loongarch/loongarch-opts.h
++++ b/gcc/config/loongarch/loongarch-opts.h
+@@ -87,4 +87,8 @@ loongarch_config_target (struct loongarch_target *target,
+    while -m[no]-memcpy imposes a global constraint.  */
+ #define TARGET_DO_OPTIMIZE_BLOCK_MOVE_P  loongarch_do_optimize_block_move_p()
+ 
++#ifndef HAVE_AS_EXPLICIT_RELOCS
++#define HAVE_AS_EXPLICIT_RELOCS 0
++#endif
++
+ #endif /* LOONGARCH_OPTS_H */
+diff --git a/gcc/config/loongarch/loongarch.opt b/gcc/config/loongarch/loongarch.opt
+index 7a8c5b444..9df7e1872 100644
+--- a/gcc/config/loongarch/loongarch.opt
++++ b/gcc/config/loongarch/loongarch.opt
+@@ -162,7 +162,7 @@ Target Joined RejectNegative UInteger Var(loongarch_max_inline_memcpy_size) Init
+ -mmax-inline-memcpy-size=SIZE	Set the max size of memcpy to inline, default is 1024.
+ 
+ mexplicit-relocs
+-Target Var(TARGET_EXPLICIT_RELOCS) Init(1)
++Target Var(TARGET_EXPLICIT_RELOCS) Init(HAVE_AS_EXPLICIT_RELOCS)
+ Use %reloc() assembly operators.
+ 
+ ; The code model option names for -mcmodel.
+diff --git a/gcc/configure b/gcc/configure
+index 98bbf0f85..840eddc7c 100755
+--- a/gcc/configure
++++ b/gcc/configure
+@@ -28792,7 +28792,7 @@ $as_echo "#define HAVE_AS_MARCH_ZIFENCEI 1" >>confdefs.h
+ fi
+ 
+     ;;
+-  loongarch*-*-*)
++    loongarch*-*-*)
+     { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for .dtprelword support" >&5
+ $as_echo_n "checking assembler for .dtprelword support... " >&6; }
+ if ${gcc_cv_as_loongarch_dtprelword+:} false; then :
+@@ -28828,6 +28828,37 @@ if test $gcc_cv_as_loongarch_dtprelword != yes; then
+ $as_echo "#define HAVE_AS_DTPRELWORD 1" >>confdefs.h
+ 
+ fi
++    { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for explicit relocation support" >&5
++$as_echo_n "checking assembler for explicit relocation support... " >&6; }
++if ${gcc_cv_as_loongarch_explicit_relocs+:} false; then :
++  $as_echo_n "(cached) " >&6
++else
++  gcc_cv_as_loongarch_explicit_relocs=no
++  if test x$gcc_cv_as != x; then
++    $as_echo 'a:pcalau12i $t0,%pc_hi20(a)' > conftest.s
++    if { ac_try='$gcc_cv_as $gcc_cv_as_flags  -o conftest.o conftest.s >&5'
++  { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_try\""; } >&5
++  (eval $ac_try) 2>&5
++  ac_status=$?
++  $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5
++  test $ac_status = 0; }; }
++    then
++	gcc_cv_as_loongarch_explicit_relocs=yes
++    else
++      echo "configure: failed program was" >&5
++      cat conftest.s >&5
++    fi
++    rm -f conftest.o conftest.s
++  fi
++fi
++{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $gcc_cv_as_loongarch_explicit_relocs" >&5
++$as_echo "$gcc_cv_as_loongarch_explicit_relocs" >&6; }
++if test $gcc_cv_as_loongarch_explicit_relocs = yes; then
++
++$as_echo "#define HAVE_AS_EXPLICIT_RELOCS 1" >>confdefs.h
++
++fi
++
+     ;;
+     s390*-*-*)
+     { $as_echo "$as_me:${as_lineno-$LINENO}: checking assembler for .gnu_attribute support" >&5
+diff --git a/gcc/configure.ac b/gcc/configure.ac
+index c74f4b555..975c852c6 100644
+--- a/gcc/configure.ac
++++ b/gcc/configure.ac
+@@ -5309,7 +5309,7 @@ configured with --enable-newlib-nano-formatted-io.])
+       [AC_DEFINE(HAVE_AS_MARCH_ZIFENCEI, 1,
+ 		 [Define if the assembler understands -march=rv*_zifencei.])])
+     ;;
+-  loongarch*-*-*)
++    loongarch*-*-*)
+     gcc_GAS_CHECK_FEATURE([.dtprelword support],
+       gcc_cv_as_loongarch_dtprelword, [2,18,0],,
+       [.section .tdata,"awT",@progbits
+@@ -5319,6 +5319,11 @@ x:
+ 	.dtprelword x+0x8000],,
+       [AC_DEFINE(HAVE_AS_DTPRELWORD, 1,
+ 	  [Define if your assembler supports .dtprelword.])])
++    gcc_GAS_CHECK_FEATURE([explicit relocation support],
++      gcc_cv_as_loongarch_explicit_relocs,,
++      [a:pcalau12i $t0,%pc_hi20(a)],,
++      [AC_DEFINE(HAVE_AS_EXPLICIT_RELOCS, 1,
++	  [Define if your assembler supports explicit relocation.])])
+     ;;
+     s390*-*-*)
+     gcc_GAS_CHECK_FEATURE([.gnu_attribute support],
+-- 
+2.33.0
+
diff --git a/LoongArch-define-preprocessing-macros-__loongarch_-a.patch b/LoongArch-define-preprocessing-macros-__loongarch_-a.patch
new file mode 100644
index 0000000000000000000000000000000000000000..53ab84bb043078d6576a799ed6d94faaca41deda
--- /dev/null
+++ b/LoongArch-define-preprocessing-macros-__loongarch_-a.patch
@@ -0,0 +1,42 @@
+From 41b01fb34126d8b40635af1847b21716f62e5388 Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Mon, 28 Aug 2023 09:32:16 +0800
+Subject: [PATCH 059/124] LoongArch: define preprocessing macros
+ "__loongarch_{arch,tune}"
+
+These are exported according to the LoongArch Toolchain Conventions[1]
+as a replacement of the obsolete "_LOONGARCH_{ARCH,TUNE}" macros,
+which are expanded to strings representing the actual architecture
+and microarchitecture of the target.
+
+[1] currently relased at https://github.com/loongson/LoongArch-Documentation
+    /blob/main/docs/LoongArch-toolchain-conventions-EN.adoc
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch-c.cc: Export macros
+	"__loongarch_{arch,tune}" in the preprocessor.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch-c.cc | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc
+index 2cf84eec7..c9b11a042 100644
+--- a/gcc/config/loongarch/loongarch-c.cc
++++ b/gcc/config/loongarch/loongarch-c.cc
+@@ -64,6 +64,9 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile)
+   LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", la_target.cpu_arch);
+   LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", la_target.cpu_tune);
+ 
++  LARCH_CPP_SET_PROCESSOR ("__loongarch_arch", la_target.cpu_arch);
++  LARCH_CPP_SET_PROCESSOR ("__loongarch_tune", la_target.cpu_tune);
++
+   /* Base architecture / ABI.  */
+   if (TARGET_64BIT)
+     {
+-- 
+2.33.0
+
diff --git a/LoongArch-document-m-no-explicit-relocs.patch b/LoongArch-document-m-no-explicit-relocs.patch
new file mode 100644
index 0000000000000000000000000000000000000000..9e025246778790668f038807fb4260d556cbf3dd
--- /dev/null
+++ b/LoongArch-document-m-no-explicit-relocs.patch
@@ -0,0 +1,43 @@
+From 3742550e00bf0401ead01cde64fc1571ffa075fc Mon Sep 17 00:00:00 2001
+From: WANG Xuerui 
+Date: Wed, 27 Jul 2022 15:01:17 +0800
+Subject: [PATCH 007/124] LoongArch: document -m[no-]explicit-relocs
+
+gcc/ChangeLog:
+
+	* doc/invoke.texi: Document -m[no-]explicit-relocs for
+	LoongArch.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/doc/invoke.texi | 13 +++++++++++++
+ 1 file changed, 13 insertions(+)
+
+diff --git a/gcc/doc/invoke.texi b/gcc/doc/invoke.texi
+index 2b376e0e9..1de2b2bd4 100644
+--- a/gcc/doc/invoke.texi
++++ b/gcc/doc/invoke.texi
+@@ -24663,6 +24663,19 @@ global symbol: The data got table must be within +/-8EiB addressing space.
+ @end itemize
+ @end table
+ The default code model is @code{normal}.
++
++@item -mexplicit-relocs
++@itemx -mno-explicit-relocs
++@opindex mexplicit-relocs
++@opindex mno-explicit-relocs
++Use or do not use assembler relocation operators when dealing with symbolic
++addresses.  The alternative is to use assembler macros instead, which may
++limit optimization.  The default value for the option is determined during
++GCC build-time by detecting corresponding assembler support:
++@code{-mexplicit-relocs} if said support is present,
++@code{-mno-explicit-relocs} otherwise.  This option is mostly useful for
++debugging, or interoperation with assemblers different from the build-time
++one.
+ @end table
+ 
+ @node M32C Options
+-- 
+2.33.0
+
diff --git a/LoongArch-fix-error-building.patch b/LoongArch-fix-error-building.patch
new file mode 100644
index 0000000000000000000000000000000000000000..e1d80e09c1dd3869dcc658407789d85b23a338fd
--- /dev/null
+++ b/LoongArch-fix-error-building.patch
@@ -0,0 +1,183 @@
+diff --git a/gcc/config/loongarch/loongarch-builtins.cc b/gcc/config/loongarch/loongarch-builtins.cc
+index a4a7dbec9..2d9743d86 100644
+--- a/gcc/config/loongarch/loongarch-builtins.cc
++++ b/gcc/config/loongarch/loongarch-builtins.cc
+@@ -2440,11 +2440,6 @@ loongarch_init_builtins (void)
+   unsigned int i;
+   tree type;
+ 
+-  /* Register the type float128_type_node as a built-in type and
+-     give it an alias "__float128".  */
+-  (*lang_hooks.types.register_builtin_type) (float128_type_node,
+-					    "__float128");
+-
+   /* Iterate through all of the bdesc arrays, initializing all of the
+      builtin functions.  */
+   for (i = 0; i < ARRAY_SIZE (loongarch_builtins); i++)
+diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc
+index c9b11a042..76c8ea8db 100644
+--- a/gcc/config/loongarch/loongarch-c.cc
++++ b/gcc/config/loongarch/loongarch-c.cc
+@@ -117,17 +117,6 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile)
+       builtin_define ("__loongarch_simd_width=256");
+     }
+ 
+-  /* Add support for FLOAT128_TYPE on the LoongArch architecture.  */
+-  builtin_define ("__FLOAT128_TYPE__");
+-
+-  /* Map the old _Float128 'q' builtins into the new 'f128' builtins.  */
+-  builtin_define ("__builtin_fabsq=__builtin_fabsf128");
+-  builtin_define ("__builtin_copysignq=__builtin_copysignf128");
+-  builtin_define ("__builtin_nanq=__builtin_nanf128");
+-  builtin_define ("__builtin_nansq=__builtin_nansf128");
+-  builtin_define ("__builtin_infq=__builtin_inff128");
+-  builtin_define ("__builtin_huge_valq=__builtin_huge_valf128");
+-
+   /* Native Data Sizes.  */
+   builtin_define_with_int_value ("_LOONGARCH_SZINT", INT_TYPE_SIZE);
+   builtin_define_with_int_value ("_LOONGARCH_SZLONG", LONG_TYPE_SIZE);
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index baa9831aa..ae074edbd 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -9712,13 +9712,10 @@ expand_perm_const_2_end:
+ /* Implement TARGET_VECTORIZE_VEC_PERM_CONST.  */
+ 
+ static bool
+-loongarch_vectorize_vec_perm_const (machine_mode vmode, machine_mode op_mode,
++loongarch_vectorize_vec_perm_const (machine_mode vmode,
+ 				    rtx target, rtx op0, rtx op1,
+ 				    const vec_perm_indices &sel)
+ {
+-  if (vmode != op_mode)
+-    return false;
+-
+   struct expand_vec_perm_d d;
+   int i, nelt, which;
+   unsigned char orig_perm[MAX_VECT_LEN];
+diff --git a/gcc/doc/extend.texi b/gcc/doc/extend.texi
+index bb19d0f27..1d1bac255 100644
+--- a/gcc/doc/extend.texi
++++ b/gcc/doc/extend.texi
+@@ -1085,10 +1085,10 @@ types.
+ As an extension, GNU C and GNU C++ support additional floating
+ types, which are not supported by all targets.
+ @itemize @bullet
+-@item @code{__float128} is available on i386, x86_64, IA-64, LoongArch
+-and hppa HP-UX, as well as on PowerPC GNU/Linux targets that enable
++@item @code{__float128} is available on i386, x86_64, IA-64, and
++hppa HP-UX, as well as on PowerPC GNU/Linux targets that enable
+ the vector scalar (VSX) instruction set.  @code{__float128} supports
+-the 128-bit floating type.  On i386, x86_64, PowerPC, LoongArch and IA-64,
++the 128-bit floating type.  On i386, x86_64, PowerPC, and IA-64
+ other than HP-UX, @code{__float128} is an alias for @code{_Float128}.
+ On hppa and IA-64 HP-UX, @code{__float128} is an alias for @code{long
+ double}.
+@@ -16257,20 +16257,6 @@ function you need to include @code{larchintrin.h}.
+     void __break (imm0_32767)
+ @end smallexample
+ 
+-Additional built-in functions are available for LoongArch family
+-processors to efficiently use 128-bit floating-point (__float128)
+-values.
+-
+-The following are the basic built-in functions supported.
+-@smallexample
+-__float128 __builtin_fabsq (__float128);
+-__float128 __builtin_copysignq (__float128, __float128);
+-__float128 __builtin_infq (void);
+-__float128 __builtin_huge_valq (void);
+-__float128 __builtin_nanq (void);
+-__float128 __builtin_nansq (void);
+-@end smallexample
+-
+ @node MIPS DSP Built-in Functions
+ @subsection MIPS DSP Built-in Functions
+ 
+diff --git a/gcc/testsuite/gcc.target/loongarch/math-float-128.c b/gcc/testsuite/gcc.target/loongarch/math-float-128.c
+deleted file mode 100644
+index 387566a57..000000000
+--- a/gcc/testsuite/gcc.target/loongarch/math-float-128.c
++++ /dev/null
+@@ -1,81 +0,0 @@
+-/* { dg-do compile } */
+-/* { dg-options " -march=loongarch64 -O2 " } */
+-/* { dg-final { scan-assembler-not "my_fabsq2:.*\\bl\t%plt\\(__builtin_fabsq\\).*my_fabsq2" } } */
+-/* { dg-final { scan-assembler-not "my_copysignq2:.*\\bl\t%plt\\(__builtin_copysignq\\).*my_copysignq2" } } */
+-/* { dg-final { scan-assembler-not "my_infq2:.*\\bl\t%plt\\(__builtin_infq\\).*my_infq2" } } */
+-/* { dg-final { scan-assembler-not "my_huge_valq2:.*\\bl\t%plt\\(__builtin_huge_valq\\).*my_huge_valq2" } } */
+-/* { dg-final { scan-assembler-not "my_nanq2:.*\\bl\t%plt\\(__builtin_nanq\\).*my_nanq2" } } */
+-/* { dg-final { scan-assembler-not "my_nansq2:.*\\bl\t%plt\\(__builtin_nansq\\).*my_nansq2" } } */
+-
+-__float128
+-my_fabsq1 (__float128 a)
+-{
+-  return __builtin_fabsq (a);
+-}
+-
+-_Float128
+-my_fabsq2 (_Float128 a)
+-{
+-  return __builtin_fabsq (a);
+-}
+-
+-__float128
+-my_copysignq1 (__float128 a, __float128 b)
+-{
+-  return __builtin_copysignq (a, b);
+-}
+-
+-_Float128
+-my_copysignq2 (_Float128 a, _Float128 b)
+-{
+-  return __builtin_copysignq (a, b);
+-}
+-
+-__float128
+-my_infq1 (void)
+-{
+-  return __builtin_infq ();
+-}
+-
+-_Float128
+-my_infq2 (void)
+-{
+-  return __builtin_infq ();
+-}
+-
+-__float128
+-my_huge_valq1 (void)
+-{
+-  return __builtin_huge_valq ();
+-}
+-
+-_Float128
+-my_huge_valq2 (void)
+-{
+-  return __builtin_huge_valq ();
+-}
+-
+-__float128
+-my_nanq1 (void)
+-{
+-  return __builtin_nanq ("");
+-}
+-
+-_Float128
+-my_nanq2 (void)
+-{
+-  return __builtin_nanq ("");
+-}
+-
+-__float128
+-my_nansq1 (void)
+-{
+-  return __builtin_nansq ("");
+-}
+-
+-_Float128
+-my_nansq2 (void)
+-{
+-  return __builtin_nansq ("");
+-}
+-
diff --git a/LoongArch-fix-signed-overflow-in-loongarch_emit_int_.patch b/LoongArch-fix-signed-overflow-in-loongarch_emit_int_.patch
new file mode 100644
index 0000000000000000000000000000000000000000..41eeff91d097b6ffd9c12538820e1d7382c85af6
--- /dev/null
+++ b/LoongArch-fix-signed-overflow-in-loongarch_emit_int_.patch
@@ -0,0 +1,43 @@
+From 15f1e94ddd7128f407ada43fd9e4b26d4a8bba8d Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Fri, 4 Nov 2022 01:35:25 +0800
+Subject: [PATCH 026/124] LoongArch: fix signed overflow in
+ loongarch_emit_int_compare
+
+Signed overflow is an undefined behavior, so we need to prevent it from
+happening, instead of "checking" the result.
+
+gcc/ChangeLog:
+
+	* config/loongarch/loongarch.cc (loongarch_emit_int_compare):
+	Avoid signed overflow.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ gcc/config/loongarch/loongarch.cc | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/gcc/config/loongarch/loongarch.cc b/gcc/config/loongarch/loongarch.cc
+index e9ba3374e..d552b162a 100644
+--- a/gcc/config/loongarch/loongarch.cc
++++ b/gcc/config/loongarch/loongarch.cc
+@@ -4177,10 +4177,13 @@ loongarch_emit_int_compare (enum rtx_code *code, rtx *op0, rtx *op1)
+ 	      if (!increment && !decrement)
+ 		continue;
+ 
++	      if ((increment && rhs == HOST_WIDE_INT_MAX)
++		  || (decrement && rhs == HOST_WIDE_INT_MIN))
++		break;
++
+ 	      new_rhs = rhs + (increment ? 1 : -1);
+ 	      if (loongarch_integer_cost (new_rhs)
+-		    < loongarch_integer_cost (rhs)
+-		  && (rhs < 0) == (new_rhs < 0))
++		    < loongarch_integer_cost (rhs))
+ 		{
+ 		  *op1 = GEN_INT (new_rhs);
+ 		  *code = mag_comparisons[i][increment];
+-- 
+2.33.0
+
diff --git a/LoongArch-implement-count_-leading-trailing-_zeros.patch b/LoongArch-implement-count_-leading-trailing-_zeros.patch
new file mode 100644
index 0000000000000000000000000000000000000000..6efc57f3d3aabfcb4836ad46403cfd80a387df9c
--- /dev/null
+++ b/LoongArch-implement-count_-leading-trailing-_zeros.patch
@@ -0,0 +1,50 @@
+From ad6541a22dd909a04d745b42c8da5e04733efec4 Mon Sep 17 00:00:00 2001
+From: Xi Ruoyao 
+Date: Wed, 12 Oct 2022 22:06:07 +0800
+Subject: [PATCH 023/124] LoongArch: implement count_{leading,trailing}_zeros
+
+LoongArch always support clz and ctz instructions, so we can always use
+__builtin_{clz,ctz} for count_{leading,trailing}_zeros.  This improves
+the code of libgcc, and also benefits Glibc once we merge longlong.h
+there.
+
+Bootstrapped and regtested on loongarch64-linux-gnu.
+
+include/ChangeLog:
+
+	* longlong.h [__loongarch__] (count_leading_zeros): Define.
+	[__loongarch__] (count_trailing_zeros): Likewise.
+	[__loongarch__] (COUNT_LEADING_ZEROS_0): Likewise.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ include/longlong.h | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/include/longlong.h b/include/longlong.h
+index 64a7b10f9..c3a6f1e7e 100644
+--- a/include/longlong.h
++++ b/include/longlong.h
+@@ -593,6 +593,18 @@ extern UDItype __umulsidi3 (USItype, USItype);
+ #define UMUL_TIME 14
+ #endif
+ 
++#ifdef __loongarch__
++# if W_TYPE_SIZE == 32
++#  define count_leading_zeros(count, x)  ((count) = __builtin_clz (x))
++#  define count_trailing_zeros(count, x) ((count) = __builtin_ctz (x))
++#  define COUNT_LEADING_ZEROS_0 32
++# elif W_TYPE_SIZE == 64
++#  define count_leading_zeros(count, x)  ((count) = __builtin_clzll (x))
++#  define count_trailing_zeros(count, x) ((count) = __builtin_ctzll (x))
++#  define COUNT_LEADING_ZEROS_0 64
++# endif
++#endif
++
+ #if defined (__M32R__) && W_TYPE_SIZE == 32
+ #define add_ssaaaa(sh, sl, ah, al, bh, bl) \
+   /* The cmp clears the condition bit.  */ \
+-- 
+2.33.0
+
diff --git a/LoongArch-improved-target-configuration-interface.patch b/LoongArch-improved-target-configuration-interface.patch
new file mode 100644
index 0000000000000000000000000000000000000000..2c8b7ad2ce700334c3badbf7e433f3ac94a25cc2
--- /dev/null
+++ b/LoongArch-improved-target-configuration-interface.patch
@@ -0,0 +1,3024 @@
+From b980a32eabcbd34e8f8e6a245dbba1898256555e Mon Sep 17 00:00:00 2001
+From: Yang Yujie 
+Date: Wed, 23 Aug 2023 15:16:21 +0800
+Subject: [PATCH 058/124] LoongArch: improved target configuration interface
+
+The configure script and the GCC driver are updated so that
+it is easier to customize and control GCC builds for targeting
+different LoongArch implementations.
+
+* Make --with-abi obsolete, since it might cause different default ABI
+  under the same target triplet, which is undesirable.  The default ABI
+  is now purely decided by the target triplet.
+
+* Support options for LoongArch SIMD extensions:
+  new configure options --with-simd={none,lsx,lasx};
+  new compiler option -msimd={none,lsx,lasx};
+  new driver options -m[no]-l[a]sx.
+
+* Enforce the priority of configuration paths (for ={fpu,tune,simd}):
+  -m > -march-implied > --with- > --with-arch-implied.
+
+* Allow the user to control the compiler options used when building
+  GCC libraries for each multilib variant via --with-multilib-list
+  and --with-multilib-default.  This could become more useful when
+  we have 32-bit support later.
+
+  Example 1: the following configure option
+    --with-multilib-list=lp64d/la464/mno-strict-align/msimd=lsx,lp64s/mfpu=32
+                          |     |            |         |
+                    -mabi=ABI  -march=ARCH  a list of other options
+                  (mandatory)  (optional)     (optional)
+
+     builds two sets of libraries:
+     1. lp64d/base ABI (built with "-march=la464 -mno-strict-align -msimd=lsx")
+     2. lp64s/base ABI (built with "-march=abi-default -mfpu=32")
+
+  Example 2: the following 3 configure options
+
+    --with-arch=loongarch64
+    --with-multilib-list=lp64d,lp64f,lp64s/la464
+    --with-multilib-default=fixed/mno-strict-align/mfpu=64
+                             |            |           |
+                        -march=ARCH   a list of other options
+                         (optional)        (optional)
+
+    is equivalent to (in terms of building libraries):
+
+    --with-multilib-list=\
+    lp64d/loongarch64/mno-strict-align/mfpu=64,\
+    lp64f/loongarch64/mno-strict-align/mfpu=64,\
+    lp64s/la464
+
+  Note:
+    1. the GCC driver and compiler proper does not support
+       "-march=fixed". "fixed" that appear here acts as a placeholder for
+       "use whatever ARCH in --with-arch=ARCH" (or the default value
+       of --with-arch=ARCH if --with-arch is not explicitly configured).
+
+    2. if the ARCH part is omitted, "-march=abi-default"
+       is used for building all library variants, which
+       practically means enabling the minimal ISA features
+       that can support the given ABI.
+
+ChangeLog:
+
+	* config-ml.in: Do not build the multilib library variant
+	that is duplicate with the toplevel one.
+
+gcc/ChangeLog:
+
+	* config.gcc: Make --with-abi= obsolete, decide the default ABI
+	with target triplet.  Allow specifying multilib library build
+	options with --with-multilib-list and --with-multilib-default.
+	* config/loongarch/t-linux: Likewise.
+	* config/loongarch/genopts/loongarch-strings: Likewise.
+	* config/loongarch/loongarch-str.h: Likewise.
+	* doc/install.texi: Likewise.
+	* config/loongarch/genopts/loongarch.opt.in: Introduce
+	-m[no-]l[a]sx options.  Only process -m*-float and
+	-m[no-]l[a]sx in the GCC driver.
+	* config/loongarch/loongarch.opt: Likewise.
+	* config/loongarch/la464.md: Likewise.
+	* config/loongarch/loongarch-c.cc: Likewise.
+	* config/loongarch/loongarch-cpu.cc: Likewise.
+	* config/loongarch/loongarch-cpu.h: Likewise.
+	* config/loongarch/loongarch-def.c: Likewise.
+	* config/loongarch/loongarch-def.h: Likewise.
+	* config/loongarch/loongarch-driver.cc: Likewise.
+	* config/loongarch/loongarch-driver.h: Likewise.
+	* config/loongarch/loongarch-opts.cc: Likewise.
+	* config/loongarch/loongarch-opts.h: Likewise.
+	* config/loongarch/loongarch.cc: Likewise.
+	* doc/invoke.texi: Likewise.
+
+Signed-off-by: Peng Fan 
+Signed-off-by: ticat_fp 
+---
+ config-ml.in                                  |  10 +
+ gcc/config.gcc                                | 379 ++++++++++--------
+ .../loongarch/genopts/loongarch-strings       |   8 +-
+ gcc/config/loongarch/genopts/loongarch.opt.in |  62 +--
+ gcc/config/loongarch/la464.md                 |  32 +-
+ gcc/config/loongarch/loongarch-c.cc           |  19 +-
+ gcc/config/loongarch/loongarch-cpu.cc         | 263 +++++++-----
+ gcc/config/loongarch/loongarch-cpu.h          |   3 +-
+ gcc/config/loongarch/loongarch-def.c          |  67 ++--
+ gcc/config/loongarch/loongarch-def.h          |  57 +--
+ gcc/config/loongarch/loongarch-driver.cc      | 208 +++++-----
+ gcc/config/loongarch/loongarch-driver.h       |  40 +-
+ gcc/config/loongarch/loongarch-opts.cc        | 372 ++++++++++++-----
+ gcc/config/loongarch/loongarch-opts.h         |  59 +--
+ gcc/config/loongarch/loongarch-str.h          |   7 +-
+ gcc/config/loongarch/loongarch.cc             |  87 ++--
+ gcc/config/loongarch/loongarch.opt            |  60 ++-
+ gcc/config/loongarch/t-linux                  |  32 +-
+ gcc/doc/install.texi                          |  52 ++-
+ 19 files changed, 1136 insertions(+), 681 deletions(-)
+
+diff --git a/config-ml.in b/config-ml.in
+index 68854a4f1..ad0db0781 100644
+--- a/config-ml.in
++++ b/config-ml.in
+@@ -301,6 +301,16 @@ arm-*-*)
+ 	  done
+ 	fi
+ 	;;
++loongarch*-*)
++	old_multidirs="${multidirs}"
++	multidirs=""
++	for x in ${old_multidirs}; do
++	case "$x" in
++	`${CC-gcc} --print-multi-directory`) : ;;
++	*) multidirs="${multidirs} ${x}" ;;
++	esac
++	done
++	;;
+ m68*-*-*)
+ 	if [ x$enable_softfloat = xno ]
+ 	then
+diff --git a/gcc/config.gcc b/gcc/config.gcc
+index 5c378c698..62525c296 100644
+--- a/gcc/config.gcc
++++ b/gcc/config.gcc
+@@ -4965,43 +4965,46 @@ case "${target}" in
+ 		esac
+ 		;;
+ 
+-	loongarch*-*-*)
+-		supported_defaults="abi arch tune fpu"
++	loongarch*-*)
++		supported_defaults="abi arch tune fpu simd multilib-default"
+ 
+ 		# Local variables
+ 		unset \
+-			abi_pattern      abi_default    \
+-			abiext_pattern   abiext_default \
+-			arch_pattern     arch_default   \
+-			fpu_pattern      fpu_default    \
+-			tune_pattern     tune_default   \
+-			triplet_os       triplet_abi
++			abi_base	abi_ext \
++			arch_pattern	arch_default \
++			fpu_pattern	fpu_default \
++			triplet_os	triplet_abi \
++			strict_align_opt
++
++		# --with-abi is now obsolete, emit a warning if given.
++		case ${with_abi} in
++		"") ;;
++		*)
++			echo "warning: --with-abi= is now obsolete," \
++			"the default ABI is derived from your target" \
++			"triplet ${target}" 1>&2
++			;;
++		esac
+ 
+ 		# Infer ABI from the triplet.
+ 		case ${target} in
+-		loongarch64-*-*-*f64)
+-			abi_pattern="lp64d"
+-			;;
+-		loongarch64-*-*-*f32)
+-			abi_pattern="lp64f"
+-			;;
+-		loongarch64-*-*-*sf)
+-			abi_pattern="lp64s"
+-			;;
+-		loongarch64-*-*-*)
+-			abi_pattern="lp64[dfs]"
+-			abi_default="lp64d"
+-			;;
++		loongarch64-*f64) abi_base="lp64d"; abi_ext="base" ;;
++		loongarch64-*f32) abi_base="lp64f"; abi_ext="base" ;;
++		loongarch64-*sf)  abi_base="lp64s"; abi_ext="base" ;;
++		loongarch64-*)    abi_base="lp64d"; abi_ext="base" ;;
+ 		*)
+ 			echo "Unsupported target ${target}." 1>&2
+ 			exit 1
+ 			;;
+ 		esac
+ 
+-		abiext_pattern="*"
+-		abiext_default="base"
+-
+ 		# Get the canonical triplet (multiarch specifier).
++		case ${abi_base},${abi_ext} in
++		lp64d,base) triplet_abi="";;
++		lp64f,base) triplet_abi="f32";;
++		lp64s,base) triplet_abi="sf";;
++		esac
++
+ 		case ${target} in
+ 		  *-linux-gnu*)  triplet_os="linux-gnu";;
+ 		  *-linux-musl*) triplet_os="linux-musl";;
+@@ -5010,42 +5013,24 @@ case "${target}" in
+ 			  exit 1
+ 			  ;;
+ 		esac
++		la_canonical_triplet="loongarch64-${triplet_os}${triplet_abi}"
++
+ 
+ 		# Perform initial sanity checks on --with-* options.
+ 		case ${with_arch} in
+-		"" | loongarch64 | la464) ;; # OK, append here.
++		"" | abi-default | loongarch64 | la464) ;; # OK, append here.
+ 		native)
+ 			if test x${host} != x${target}; then
+ 				echo "--with-arch=native is illegal for cross-compiler." 1>&2
+ 				exit 1
+ 			fi
+ 			;;
+-		"")
+-			echo "Please set a default value for \${with_arch}" \
+-			     "according to your target triplet \"${target}\"." 1>&2
+-			exit 1
+-			;;
+ 		*)
+ 			echo "Unknown arch in --with-arch=$with_arch" 1>&2
+ 			exit 1
+ 			;;
+ 		esac
+ 
+-		case ${with_abi} in
+-		"" | lp64d | lp64f | lp64s) ;; # OK, append here.
+-		*)
+-			echo "Unsupported ABI given in --with-abi=$with_abi" 1>&2
+-			exit 1
+-			;;
+-		esac
+-
+-		case ${with_abiext} in
+-		"" | base) ;; # OK, append here.
+-		*)
+-			echo "Unsupported ABI extention type $with_abiext" 1>&2
+-			exit 1
+-			;;
+-		esac
+ 
+ 		case ${with_fpu} in
+ 		"" | none | 32 | 64) ;; # OK, append here.
+@@ -5059,73 +5044,41 @@ case "${target}" in
+ 			;;
+ 		esac
+ 
+-
+-		# Set default value for with_abi.
+-		case ${with_abi} in
+-		"")
+-			if test x${abi_default} != x; then
+-				with_abi=${abi_default}
+-			else
+-				with_abi=${abi_pattern}
+-			fi
+-			;;
+-
+-		*)
+-			if echo "${with_abi}" | grep -E "^${abi_pattern}$" > /dev/null; then
+-				: # OK
+-			else
+-				echo "Incompatible options:" \
+-				"--with-abi=${with_abi} and --target=${target}." 1>&2
++		case ${with_simd} in
++		"" | none) ;;
++		lsx | lasx)  # OK, append here.
++			case ${with_fpu} in
++			64) ;;
++			"") with_fpu=64 ;;
++			*)
++				echo "--with-simd=${with_simd} conflicts with --with-fpu=${with_fpu}" 1>&2
+ 				exit 1
+-			fi
+-			;;
+-		esac
+-
+-		case ${with_abi} in
+-		  "lp64d") triplet_abi="";;
+-		  "lp64f") triplet_abi="f32";;
+-		  "lp64s") triplet_abi="sf";;
+-		esac
+-		la_canonical_triplet="loongarch64-${triplet_os}${triplet_abi}"
+-
+-		# Set default value for with_abiext (internal)
+-		case ${with_abiext} in
+-		"")
+-			if test x${abiext_default} != x; then
+-				with_abiext=${abiext_default}
+-			else
+-				with_abiext=${abiext_pattern}
+-			fi
++				;;
++			esac
+ 			;;
+ 
+ 		*)
+-			if echo "${with_abiext}" | grep -E "^${abiext_pattern}$" > /dev/null; then
+-				: # OK
+-			else
+-				echo "The ABI extension type \"${with_abiext}\"" \
+-				"is incompatible with --target=${target}." 1>&2
+-				exit 1
+-			fi
+-
++			echo "Unknown SIMD extension in --with-simd=$with_simd" 1>&2
++			exit 1
+ 			;;
+ 		esac
+ 
+ 		# Infer ISA-related default options from the ABI: pass 1
+-		case ${with_abi}/${with_abiext} in
++		case ${abi_base}/${abi_ext} in
+ 		lp64*/base)
+ 			# architectures that support lp64* ABI
+-			arch_pattern="native|loongarch64|la464"
++			arch_pattern="native|abi-default|loongarch64|la464"
+ 			# default architecture for lp64* ABI
+-			arch_default="loongarch64"
++			arch_default="abi-default"
+ 			;;
+ 		*)
+-			echo "Unsupported ABI type ${with_abi}/${with_abiext}." 1>&2
++			echo "Unsupported ABI type ${abi_base}/${abi_ext}." 1>&2
+ 			exit 1
+ 			;;
+ 		esac
+ 
+ 		# Infer ISA-related default options from the ABI: pass 2
+-		case ${with_abi}/${with_abiext} in
++		case ${abi_base}/${abi_ext} in
+ 		lp64d/base)
+ 			fpu_pattern="64"
+ 			;;
+@@ -5138,7 +5091,7 @@ case "${target}" in
+ 			fpu_default="none"
+ 			;;
+ 		*)
+-			echo "Unsupported ABI type ${with_abi}/${with_abiext}." 1>&2
++			echo "Unsupported ABI type ${abi_base}/${abi_ext}." 1>&2
+ 			exit 1
+ 			;;
+ 		esac
+@@ -5157,7 +5110,7 @@ case "${target}" in
+ 			if echo "${with_arch}" | grep -E "^${arch_pattern}$" > /dev/null; then
+ 				: # OK
+ 			else
+-				echo "${with_abi}/${with_abiext} ABI cannot be implemented with" \
++				echo "${abi_base}/${abi_ext} ABI cannot be implemented with" \
+ 				"--with-arch=${with_arch}." 1>&2
+ 				exit 1
+ 			fi
+@@ -5178,7 +5131,7 @@ case "${target}" in
+ 			if echo "${with_fpu}" | grep -E "^${fpu_pattern}$" > /dev/null; then
+ 				: # OK
+ 			else
+-				echo "${with_abi}/${with_abiext} ABI cannot be implemented with" \
++				echo "${abi_base}/${abi_ext} ABI cannot be implemented with" \
+ 				"--with-fpu=${with_fpu}." 1>&2
+ 				exit 1
+ 			fi
+@@ -5186,32 +5139,19 @@ case "${target}" in
+ 		esac
+ 
+ 
+-		# Infer default with_tune from with_arch: pass 1
++		# Check default with_tune configuration using with_arch.
+ 		case ${with_arch} in
+-		native)
+-			tune_pattern="*"
+-			tune_default="native"
+-			;;
+ 		loongarch64)
+-			tune_pattern="loongarch64|la464"
+-			tune_default="la464"
++			tune_pattern="native|abi-default|loongarch64|la464"
+ 			;;
+ 		*)
+ 			# By default, $with_tune == $with_arch
+-			tune_pattern="$with_arch"
++			tune_pattern="*"
+ 			;;
+ 		esac
+ 
+-		## Set default value for with_tune.
+ 		case ${with_tune} in
+-		"")
+-			if test x${tune_default} != x; then
+-				with_tune=${tune_default}
+-			else
+-				with_tune=${tune_pattern}
+-			fi
+-			;;
+-
++		"") ;; # OK
+ 		*)
+ 			if echo "${with_tune}" | grep -E "^${tune_pattern}$" > /dev/null; then
+ 				: # OK
+@@ -5223,13 +5163,53 @@ case "${target}" in
+ 			;;
+ 		esac
+ 
++		# Handle --with-multilib-default
++		if echo "${with_multilib_default}" \
++		| grep -E -e '[[:space:]]' -e '//' -e '/$' -e '^/' > /dev/null 2>&1; then
++			echo "Invalid argument to --with-multilib-default." 1>&2
++			exit 1
++		fi
++
++		if test x${with_multilib_default} = x; then
++			# Use -march=abi-default by default when building libraries.
++			with_multilib_default="/march=abi-default"
++		else
++			unset parse_state component
++			parse_state=arch
++			for component in $(echo "${with_multilib_default}" | tr '/' ' '); do
++				case ${parse_state},${component} in
++				arch,|arch,abi-default)
++					# ABI-default: use the ABI's default ARCH configuration for
++					# multilib library builds, unless otherwise specified
++					# in --with-multilib-list.
++					with_multilib_default="/march=abi-default" ;;
++				arch,fixed)
++					# Fixed: use the default gcc configuration for all multilib
++					# builds by default.
++					with_multilib_default="" ;;
++				arch,native|arch,loongarch64|arch,la464) # OK, append here.
++					with_multilib_default="/march=${component}" ;;
++				arch,*)
++					with_multilib_default="/march=abi-default"
++					with_multilib_default="${with_multilib_default}/${component}" ;;
++				opts,*)
++					with_multilib_default="${with_multilib_default}/${component}" ;;
++				esac
++
++				if test x${parse_state} = xarch; then
++					parse_state=opt;
++				fi
++			done
++			unset parse_state component
++		fi
++
+ 		# Handle --with-multilib-list.
+ 		if test x"${with_multilib_list}" = x \
+ 		   || test x"${with_multilib_list}" = xno \
+ 		   || test x"${with_multilib_list}" = xdefault \
+ 		   || test x"${enable_multilib}" != xyes; then
+ 
+-			with_multilib_list="${with_abi}/${with_abiext}"
++			with_multilib_list="${abi_base}/${abi_ext}"
+ 		fi
+ 
+ 		# Check if the configured default ABI combination is included in
+@@ -5245,25 +5225,21 @@ case "${target}" in
+ 		# ${with_multilib_list} should not contain whitespaces,
+ 		# consecutive commas or slashes.
+ 		if echo "${with_multilib_list}" \
+-		| grep -E -e "[[:space:]]" -e '[,/][,/]' -e '[,/]$' -e '^[,/]' > /dev/null; then
++		| grep -E -e "[[:space:]]" -e '[,/][,/]' -e '[,/]$' -e '^[,/]' > /dev/null 2>&1; then
+ 			echo "Invalid argument to --with-multilib-list." 1>&2
+ 			exit 1
+ 		fi
+ 
+-		unset component idx elem_abi_base elem_abi_ext elem_tmp
++		unset component elem_abi_base elem_abi_ext elem_tmp parse_state all_abis
+ 		for elem in $(echo "${with_multilib_list}" | tr ',' ' '); do
+-			idx=0
+-			while true; do
+-				idx=$((idx + 1))
+-				component=$(echo "${elem}" | awk -F'/' '{print $'"${idx}"'}')
+-
+-				case ${idx} in
+-				1)
+-					# Component 1: Base ABI type
++			unset elem_abi_base elem_abi_ext
++			parse_state="abi-base"
++
++			for component in $(echo "${elem}" | tr '/' ' '); do
++				if test x${parse_state} = x"abi-base"; then
++					# Base ABI type
+ 					case ${component} in
+-					lp64d) elem_tmp="ABI_BASE_LP64D,";;
+-					lp64f) elem_tmp="ABI_BASE_LP64F,";;
+-					lp64s) elem_tmp="ABI_BASE_LP64S,";;
++					lp64d | lp64f | lp64s) elem_tmp="ABI_BASE_$(tr a-z A-Z <<< ${component}),";;
+ 					*)
+ 						echo "Unknown base ABI \"${component}\" in --with-multilib-list." 1>&2
+ 						exit 1
+@@ -5272,57 +5248,111 @@ case "${target}" in
+ 					loongarch_multilib_list_c="${loongarch_multilib_list_c}${elem_tmp}"
+ 					loongarch_multilib_list_make="${loongarch_multilib_list_make}mabi=${component}"
+ 					elem_abi_base="${component}"
+-					;;
+ 
+-				2)
+-					# Component 2: ABI extension type
++					parse_state="abi-ext"
++					continue
++				fi
++
++				if test x${parse_state} = x"abi-ext"; then
++					# ABI extension type
+ 					case ${component} in
+-					"" | base)
+-						component="base"
+-						elem_tmp="ABI_EXT_BASE,"
+-						;;
+-					*)
+-						echo "Unknown ABI extension \"${component}\" in --with-multilib-list." 1>&2
+-						exit 1
++					base)
++						elem_abi_ext="base"
++						loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE,"
++						loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now.
++						parse_state="arch"
++						continue;
+ 						;;
+ 					esac
+-					loongarch_multilib_list_c="${loongarch_multilib_list_c}${elem_tmp}"
++
++					# The default ABI extension is "base" if unspecified.
++					elem_abi_ext="base"
++					loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE,"
+ 					loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now.
+-					elem_abi_ext="${component}"
+-					;;
++					parse_state="arch"
++				fi
+ 
+-				*)
+-					# Component 3 and on: optional stuff
++				if test x${parse_state} = x"arch"; then
++					# -march option
+ 					case ${component} in
+-					"")
+-						# End of component list.
+-						break
++					native | abi-default | loongarch64 | la464) # OK, append here.
++						# Append -march spec for each multilib variant.
++						loongarch_multilib_list_make="${loongarch_multilib_list_make}/march=${component}"
++						parse_state="opts"
++						continue
++						;;
++
++					default)
++						# "/default" is equivalent to --with-multilib-default=fixed
++						parse_state="opts"
++						continue
+ 						;;
++					esac
++
++					# If ARCH is unspecified for this multilib variant, use ${with_multllib_default}.
++					loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}"
++					parse_state="opts"
++				fi
++
++				if test x${parse_state} = x"opts"; then
++					# Other compiler options for building libraries.
++					# (no static sanity check performed)
++					case ${component} in
+ 					*)
+-						echo "Unknown ABI \"${elem}\" in --with-multilib-list." 1>&2
+-						exit 1
++						# Append other components as additional build options
++						# (without the prepending dash).
++						# Their validity should be examined by the compiler.
++						loongarch_multilib_list_make="${loongarch_multilib_list_make}/${component}"
+ 						;;
+ 					esac
+-					;;
+-				esac
++				fi
+ 			done
+ 
+-			if test x${elem_abi_base} = x${with_abi} \
+-			&& test x${elem_abi_ext} = x${with_abiext}; then
++			case ${parse_state} in
++			    "abi-ext")
++					elem_abi_ext="base"
++					loongarch_multilib_list_c="${loongarch_multilib_list_c}ABI_EXT_BASE,"
++					loongarch_multilib_list_make="${loongarch_multilib_list_make}" # Add nothing for now.
++					loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}"
++					;;
++			    "arch")
++					# If ARCH is unspecified for this multilib variant, use ${with_multllib_default}.
++					loongarch_multilib_list_make="${loongarch_multilib_list_make}${with_multilib_default}"
++					;;
++			    "opts")
++					:
++					;;
++			esac
++
++			# Check for repeated configuration of the same multilib variant.
++			if echo "${elem_abi_base}/${elem_abi_ext}" \
++			 | grep -E "^(${all_abis%|})$" >/dev/null 2>&1; then
++				echo "Repeated multilib config of \"${elem_abi_base}/${elem_abi_ext}\" in --with-multilib-list."
++				exit 1
++			fi
++			all_abis="${all_abis}${elem_abi_base}/${elem_abi_ext}|"
++
++
++			# Check if the default ABI configuration of the GCC binary
++			# is included in the enabled multilib variants.
++			if test x${elem_abi_base} = x${abi_base} \
++			&& test x${elem_abi_ext} = x${abi_ext}; then
+ 				loongarch_multilib_list_sane=yes
+ 			fi
+ 			loongarch_multilib_list_make="${loongarch_multilib_list_make},"
+ 		done
++		unset component elem_abi_base elem_abi_ext elem_tmp parse_state all_abis
++
+ 
+ 		# Check if the default ABI combination is in the default list.
+ 		if test x${loongarch_multilib_list_sane} = xno; then
+-			if test x${with_abiext} = xbase; then
+-				with_abiext=""
++			if test x${abi_ext} = xbase; then
++				abi_ext=""
+ 			else
+-				with_abiext="/${with_abiext}"
++				abi_ext="/${abi_ext}"
+ 			fi
+ 
+-			echo "Default ABI combination (${with_abi}${with_abiext})" \
++			echo "Default ABI combination (${abi_base}${abi_ext})" \
+ 			"not found in --with-multilib-list." 1>&2
+ 			exit 1
+ 		fi
+@@ -5783,34 +5813,37 @@ case ${target} in
+ 
+ 		# Let --with- flags initialize the enum variables from loongarch.opt.
+ 		# See macro definitions from loongarch-opts.h and loongarch-cpu.h.
+-		case ${with_arch} in
+-		native)		tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_NATIVE" ;;
+-		la464)		tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_LA464" ;;
+-		loongarch64)	tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_LOONGARCH64" ;;
+-		esac
+ 
+-		case ${with_tune} in
+-		native)		tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_NATIVE" ;;
+-		la464)		tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_LA464" ;;
+-		loongarch64)	tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_LOONGARCH64" ;;
+-		esac
++		# Architecture
++		tm_defines="${tm_defines} DEFAULT_CPU_ARCH=CPU_$(echo ${with_arch} | tr a-z- A-Z_)"
+ 
+-		case ${with_abi} in
+-		lp64d)     tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_LP64D" ;;
+-		lp64f)     tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_LP64F" ;;
+-		lp64s)     tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_LP64S" ;;
+-		esac
++		# Base ABI type
++		tm_defines="${tm_defines} DEFAULT_ABI_BASE=ABI_BASE_$(echo ${abi_base} | tr a-z- A-Z_)"
+ 
+-		case ${with_abiext} in
++		# ABI Extension
++		case ${abi_ext} in
+ 		base)      tm_defines="${tm_defines} DEFAULT_ABI_EXT=ABI_EXT_BASE" ;;
+ 		esac
+ 
++		# Microarchitecture
++		if test x${with_tune} != x; then
++		  tm_defines="${tm_defines} DEFAULT_CPU_TUNE=CPU_$(echo ${with_tune} | tr a-z- A-Z_)"
++		fi
++
++		# FPU adjustment
+ 		case ${with_fpu} in
+-		none)    tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_NOFPU" ;;
++		none)    tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_NONE" ;;
+ 		32)      tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_FPU32" ;;
+ 		64)      tm_defines="$tm_defines DEFAULT_ISA_EXT_FPU=ISA_EXT_FPU64" ;;
+ 		esac
+ 
++		# SIMD extensions
++		case ${with_simd} in
++		none)    tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_NONE" ;;
++		lsx)     tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_SIMD_LSX" ;;
++		lasx)    tm_defines="$tm_defines DEFAULT_ISA_EXT_SIMD=ISA_EXT_SIMD_LASX" ;;
++		esac
++
+ 		tmake_file="loongarch/t-loongarch $tmake_file"
+ 		;;
+ 
+diff --git a/gcc/config/loongarch/genopts/loongarch-strings b/gcc/config/loongarch/genopts/loongarch-strings
+index 44ebb7ab1..21245f52a 100644
+--- a/gcc/config/loongarch/genopts/loongarch-strings
++++ b/gcc/config/loongarch/genopts/loongarch-strings
+@@ -23,6 +23,7 @@ OPTSTR_ARCH	      arch
+ OPTSTR_TUNE	      tune
+ 
+ STR_CPU_NATIVE	      native
++STR_CPU_ABI_DEFAULT   abi-default
+ STR_CPU_LOONGARCH64   loongarch64
+ STR_CPU_LA464	      la464
+ 
+@@ -31,7 +32,7 @@ STR_ISA_BASE_LA64V100 la64
+ 
+ # -mfpu
+ OPTSTR_ISA_EXT_FPU    fpu
+-STR_ISA_EXT_NOFPU     none
++STR_NONE	      none
+ STR_ISA_EXT_FPU0      0
+ STR_ISA_EXT_FPU32     32
+ STR_ISA_EXT_FPU64     64
+@@ -40,6 +41,11 @@ OPTSTR_SOFT_FLOAT     soft-float
+ OPTSTR_SINGLE_FLOAT   single-float
+ OPTSTR_DOUBLE_FLOAT   double-float
+ 
++# SIMD extensions
++OPTSTR_ISA_EXT_SIMD   simd
++STR_ISA_EXT_LSX       lsx
++STR_ISA_EXT_LASX      lasx
++
+ # -mabi=
+ OPTSTR_ABI_BASE	      abi
+ STR_ABI_BASE_LP64D    lp64d
+diff --git a/gcc/config/loongarch/genopts/loongarch.opt.in b/gcc/config/loongarch/genopts/loongarch.opt.in
+index e10618777..c6e337d05 100644
+--- a/gcc/config/loongarch/genopts/loongarch.opt.in
++++ b/gcc/config/loongarch/genopts/loongarch.opt.in
+@@ -17,22 +17,12 @@
+ ; .
+ ;
+ 
+-; Variables (macros) that should be exported by loongarch.opt:
+-;   la_opt_switches,
+-;   la_opt_abi_base, la_opt_abi_ext,
+-;   la_opt_cpu_arch, la_opt_cpu_tune,
+-;   la_opt_fpu,
+-;   la_cmodel.
+-
+ HeaderInclude
+ config/loongarch/loongarch-opts.h
+ 
+ HeaderInclude
+ config/loongarch/loongarch-str.h
+ 
+-Variable
+-HOST_WIDE_INT la_opt_switches = 0
+-
+ ; ISA related options
+ ;; Base ISA
+ Enum
+@@ -42,14 +32,13 @@ Basic ISAs of LoongArch:
+ EnumValue
+ Enum(isa_base) String(@@STR_ISA_BASE_LA64V100@@) Value(ISA_BASE_LA64V100)
+ 
+-
+ ;; ISA extensions / adjustments
+ Enum
+ Name(isa_ext_fpu) Type(int)
+ FPU types of LoongArch:
+ 
+ EnumValue
+-Enum(isa_ext_fpu) String(@@STR_ISA_EXT_NOFPU@@) Value(ISA_EXT_NOFPU)
++Enum(isa_ext_fpu) String(@@STR_NONE@@) Value(ISA_EXT_NONE)
+ 
+ EnumValue
+ Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU32@@) Value(ISA_EXT_FPU32)
+@@ -58,24 +47,48 @@ EnumValue
+ Enum(isa_ext_fpu) String(@@STR_ISA_EXT_FPU64@@) Value(ISA_EXT_FPU64)
+ 
+ m@@OPTSTR_ISA_EXT_FPU@@=
+-Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPTION_NOT_SEEN)
++Target RejectNegative Joined ToLower Enum(isa_ext_fpu) Var(la_opt_fpu) Init(M_OPT_UNSET)
+ -m@@OPTSTR_ISA_EXT_FPU@@=FPU	Generate code for the given FPU.
+ 
+ m@@OPTSTR_ISA_EXT_FPU@@=@@STR_ISA_EXT_FPU0@@
+-Target RejectNegative Alias(m@@OPTSTR_ISA_EXT_FPU@@=,@@STR_ISA_EXT_NOFPU@@)
++Target RejectNegative Alias(m@@OPTSTR_ISA_EXT_FPU@@=,@@STR_NONE@@)
+ 
+ m@@OPTSTR_SOFT_FLOAT@@
+-Target Driver RejectNegative Var(la_opt_switches) Mask(FORCE_SOFTF) Negative(m@@OPTSTR_SINGLE_FLOAT@@)
++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_SINGLE_FLOAT@@)
+ Prevent the use of all hardware floating-point instructions.
+ 
+ m@@OPTSTR_SINGLE_FLOAT@@
+-Target Driver RejectNegative Var(la_opt_switches) Mask(FORCE_F32) Negative(m@@OPTSTR_DOUBLE_FLOAT@@)
++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_DOUBLE_FLOAT@@)
+ Restrict the use of hardware floating-point instructions to 32-bit operations.
+ 
+ m@@OPTSTR_DOUBLE_FLOAT@@
+-Target Driver RejectNegative Var(la_opt_switches) Mask(FORCE_F64) Negative(m@@OPTSTR_SOFT_FLOAT@@)
++Target Driver Defer Var(la_deferred_options) RejectNegative Negative(m@@OPTSTR_SOFT_FLOAT@@)
+ Allow hardware floating-point instructions to cover both 32-bit and 64-bit operations.
+ 
++Enum
++Name(isa_ext_simd) Type(int)
++SIMD extension levels of LoongArch:
++
++EnumValue
++Enum(isa_ext_simd) String(@@STR_NONE@@) Value(ISA_EXT_NONE)
++
++EnumValue
++Enum(isa_ext_simd) String(@@STR_ISA_EXT_LSX@@) Value(ISA_EXT_SIMD_LSX)
++
++EnumValue
++Enum(isa_ext_simd) String(@@STR_ISA_EXT_LASX@@) Value(ISA_EXT_SIMD_LASX)
++
++m@@OPTSTR_ISA_EXT_SIMD@@=
++Target RejectNegative Joined ToLower Enum(isa_ext_simd) Var(la_opt_simd) Init(M_OPT_UNSET)
++-m@@OPTSTR_ISA_EXT_SIMD@@=SIMD	Generate code for the given SIMD extension.
++
++m@@STR_ISA_EXT_LSX@@
++Target Driver Defer Var(la_deferred_options)
++Enable LoongArch SIMD Extension (LSX, 128-bit).
++
++m@@STR_ISA_EXT_LASX@@
++Target Driver Defer Var(la_deferred_options)
++Enable LoongArch Advanced SIMD Extension (LASX, 256-bit).
+ 
+ ;; Base target models (implies ISA & tune parameters)
+ Enum
+@@ -85,6 +98,9 @@ LoongArch CPU types:
+ EnumValue
+ Enum(cpu_type) String(@@STR_CPU_NATIVE@@) Value(CPU_NATIVE)
+ 
++EnumValue
++Enum(cpu_type) String(@@STR_CPU_ABI_DEFAULT@@) Value(CPU_ABI_DEFAULT)
++
+ EnumValue
+ Enum(cpu_type) String(@@STR_CPU_LOONGARCH64@@) Value(CPU_LOONGARCH64)
+ 
+@@ -92,11 +108,11 @@ EnumValue
+ Enum(cpu_type) String(@@STR_CPU_LA464@@) Value(CPU_LA464)
+ 
+ m@@OPTSTR_ARCH@@=
+-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPTION_NOT_SEEN)
++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_arch) Init(M_OPT_UNSET)
+ -m@@OPTSTR_ARCH@@=PROCESSOR	Generate code for the given PROCESSOR ISA.
+ 
+ m@@OPTSTR_TUNE@@=
+-Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPTION_NOT_SEEN)
++Target RejectNegative Joined Enum(cpu_type) Var(la_opt_cpu_tune) Init(M_OPT_UNSET)
+ -m@@OPTSTR_TUNE@@=PROCESSOR	Generate optimized code for PROCESSOR.
+ 
+ 
+@@ -118,13 +134,13 @@ EnumValue
+ Enum(abi_base) String(@@STR_ABI_BASE_LP64S@@) Value(ABI_BASE_LP64S)
+ 
+ m@@OPTSTR_ABI_BASE@@=
+-Target RejectNegative Joined ToLower Enum(abi_base) Var(la_opt_abi_base) Init(M_OPTION_NOT_SEEN)
++Target RejectNegative Joined ToLower Enum(abi_base) Var(la_opt_abi_base) Init(M_OPT_UNSET)
+ -m@@OPTSTR_ABI_BASE@@=BASEABI	Generate code that conforms to the given BASEABI.
+ 
++
+ ;; ABI Extension
+ Variable
+-int la_opt_abi_ext = M_OPTION_NOT_SEEN
+-
++int la_opt_abi_ext = M_OPT_UNSET
+ 
+ mbranch-cost=
+ Target RejectNegative Joined UInteger Var(loongarch_branch_cost)
+@@ -182,7 +198,7 @@ EnumValue
+ Enum(cmodel) String(@@STR_CMODEL_EXTREME@@) Value(CMODEL_EXTREME)
+ 
+ mcmodel=
+-Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(CMODEL_NORMAL)
++Target RejectNegative Joined Enum(cmodel) Var(la_opt_cmodel) Init(M_OPT_UNSET)
+ Specify the code model.
+ 
+ mdirect-extern-access
+diff --git a/gcc/config/loongarch/la464.md b/gcc/config/loongarch/la464.md
+index 0ae177610..89d61bf58 100644
+--- a/gcc/config/loongarch/la464.md
++++ b/gcc/config/loongarch/la464.md
+@@ -43,88 +43,88 @@
+ ;; Describe instruction reservations.
+ 
+ (define_insn_reservation "la464_arith" 1
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "arith,clz,const,logical,
+ 			move,nop,shift,signext,slt"))
+   "la464_alu1 | la464_alu2")
+ 
+ (define_insn_reservation "la464_branch" 1
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "branch,jump,call,condmove,trap"))
+   "la464_alu1 | la464_alu2")
+ 
+ (define_insn_reservation "la464_imul" 7
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "imul"))
+   "la464_alu1 | la464_alu2")
+ 
+ (define_insn_reservation "la464_idiv_si" 12
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (and (eq_attr "type" "idiv")
+ 	    (eq_attr "mode" "SI")))
+   "la464_alu1 | la464_alu2")
+ 
+ (define_insn_reservation "la464_idiv_di" 25
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (and (eq_attr "type" "idiv")
+ 	    (eq_attr "mode" "DI")))
+   "la464_alu1 | la464_alu2")
+ 
+ (define_insn_reservation "la464_load" 4
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "load"))
+   "la464_mem1 | la464_mem2")
+ 
+ (define_insn_reservation "la464_gpr_fp" 16
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "mftg,mgtf"))
+   "la464_mem1")
+ 
+ (define_insn_reservation "la464_fpload" 4
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "fpload"))
+   "la464_mem1 | la464_mem2")
+ 
+ (define_insn_reservation "la464_prefetch" 0
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "prefetch,prefetchx"))
+   "la464_mem1 | la464_mem2")
+ 
+ (define_insn_reservation "la464_store" 0
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "store,fpstore,fpidxstore"))
+   "la464_mem1 | la464_mem2")
+ 
+ (define_insn_reservation "la464_fadd" 4
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "fadd,fmul,fmadd"))
+   "la464_falu1 | la464_falu2")
+ 
+ (define_insn_reservation "la464_fcmp" 2
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "fabs,fcmp,fmove,fneg"))
+   "la464_falu1 | la464_falu2")
+ 
+ (define_insn_reservation "la464_fcvt" 4
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "fcvt"))
+   "la464_falu1 | la464_falu2")
+ 
+ (define_insn_reservation "la464_fdiv_sf" 12
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt")
+ 	    (eq_attr "mode" "SF")))
+   "la464_falu1 | la464_falu2")
+ 
+ (define_insn_reservation "la464_fdiv_df" 19
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (and (eq_attr "type" "fdiv,frdiv,fsqrt,frsqrt")
+ 	    (eq_attr "mode" "DF")))
+   "la464_falu1 | la464_falu2")
+ 
+ ;; Force single-dispatch for unknown or multi.
+ (define_insn_reservation "la464_unknown" 1
+-  (and (match_test "TARGET_TUNE_LA464")
++  (and (match_test "TARGET_uARCH_LA464")
+        (eq_attr "type" "unknown,multi,atomic,syncloop"))
+   "la464_alu1 + la464_alu2 + la464_falu1
+    + la464_falu2 + la464_mem1 + la464_mem2")
+diff --git a/gcc/config/loongarch/loongarch-c.cc b/gcc/config/loongarch/loongarch-c.cc
+index f779a7355..2cf84eec7 100644
+--- a/gcc/config/loongarch/loongarch-c.cc
++++ b/gcc/config/loongarch/loongarch-c.cc
+@@ -61,8 +61,8 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile)
+   builtin_assert ("cpu=loongarch");
+   builtin_define ("__loongarch__");
+ 
+-  LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", LARCH_ACTUAL_ARCH);
+-  LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", LARCH_ACTUAL_TUNE);
++  LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_ARCH", la_target.cpu_arch);
++  LARCH_CPP_SET_PROCESSOR ("_LOONGARCH_TUNE", la_target.cpu_tune);
+ 
+   /* Base architecture / ABI.  */
+   if (TARGET_64BIT)
+@@ -99,6 +99,21 @@ loongarch_cpu_cpp_builtins (cpp_reader *pfile)
+   else
+     builtin_define ("__loongarch_frlen=0");
+ 
++  if (ISA_HAS_LSX)
++    {
++      builtin_define ("__loongarch_simd");
++      builtin_define ("__loongarch_sx");
++
++      if (!ISA_HAS_LASX)
++	builtin_define ("__loongarch_simd_width=128");
++    }
++
++  if (ISA_HAS_LASX)
++    {
++      builtin_define ("__loongarch_asx");
++      builtin_define ("__loongarch_simd_width=256");
++    }
++
+   /* Add support for FLOAT128_TYPE on the LoongArch architecture.  */
+   builtin_define ("__FLOAT128_TYPE__");
+ 
+diff --git a/gcc/config/loongarch/loongarch-cpu.cc b/gcc/config/loongarch/loongarch-cpu.cc
+index a886dd932..ea05526d7 100644
+--- a/gcc/config/loongarch/loongarch-cpu.cc
++++ b/gcc/config/loongarch/loongarch-cpu.cc
+@@ -26,6 +26,7 @@ along with GCC; see the file COPYING3.  If not see
+ #include "tm.h"
+ #include "diagnostic-core.h"
+ 
++#include "loongarch-def.h"
+ #include "loongarch-opts.h"
+ #include "loongarch-cpu.h"
+ #include "loongarch-str.h"
+@@ -80,127 +81,191 @@ get_native_prid_str (void)
+ }
+ 
+ /* Fill property tables for CPU_NATIVE.  */
+-unsigned int
+-fill_native_cpu_config (int p_arch_native, int p_tune_native)
++void
++fill_native_cpu_config (struct loongarch_target *tgt)
+ {
+-  int ret_cpu_type;
++  int arch_native_p = tgt->cpu_arch == CPU_NATIVE;
++  int tune_native_p = tgt->cpu_tune == CPU_NATIVE;
++  int native_cpu_type = CPU_NATIVE;
+ 
+   /* Nothing needs to be done unless "-march/tune=native"
+      is given or implied.  */
+-  if (!(p_arch_native || p_tune_native))
+-    return CPU_NATIVE;
++  if (!arch_native_p && !tune_native_p)
++    return;
+ 
+   /* Fill cpucfg_cache with the "cpucfg" instruction.  */
+   cache_cpucfg ();
+ 
+-
+-  /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].base
+-     With: base architecture (ARCH)
+-     At:   cpucfg_words[1][1:0] */
+-
+-  #define NATIVE_BASE_ISA (loongarch_cpu_default_isa[CPU_NATIVE].base)
+-  switch (cpucfg_cache[1] & 0x3)
+-    {
+-      case 0x02:
+-	NATIVE_BASE_ISA = ISA_BASE_LA64V100;
+-	break;
+-
+-      default:
+-	if (p_arch_native)
+-	  fatal_error (UNKNOWN_LOCATION,
+-		       "unknown base architecture %<0x%x%>, %qs failed",
+-		       (unsigned int) (cpucfg_cache[1] & 0x3),
+-		       "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE);
+-    }
+-
+-  /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].fpu
+-     With: FPU type (FP, FP_SP, FP_DP)
+-     At:   cpucfg_words[2][2:0] */
+-
+-  #define NATIVE_FPU (loongarch_cpu_default_isa[CPU_NATIVE].fpu)
+-  switch (cpucfg_cache[2] & 0x7)
+-    {
+-      case 0x07:
+-	NATIVE_FPU = ISA_EXT_FPU64;
+-	break;
+-
+-      case 0x03:
+-	NATIVE_FPU = ISA_EXT_FPU32;
+-	break;
+-
+-      case 0x00:
+-	NATIVE_FPU = ISA_EXT_NOFPU;
+-	break;
+-
+-      default:
+-	if (p_arch_native)
+-	  fatal_error (UNKNOWN_LOCATION,
+-		       "unknown FPU type %<0x%x%>, %qs failed",
+-		       (unsigned int) (cpucfg_cache[2] & 0x7),
+-		       "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE);
+-    }
+-
+-  /* Fill: loongarch_cpu_cache[CPU_NATIVE]
+-     With: cache size info
+-     At:   cpucfg_words[16:20][31:0] */
+-
+-  int l1d_present = 0, l1u_present = 0;
+-  int l2d_present = 0;
+-  uint32_t l1_szword, l2_szword;
+-
+-  l1u_present |= cpucfg_cache[16] & 3;	      /* bit[1:0]: unified l1 cache */
+-  l1d_present |= cpucfg_cache[16] & 4;	      /* bit[2:2]: l1 dcache */
+-  l1_szword = l1d_present ? 18 : (l1u_present ? 17 : 0);
+-  l1_szword = l1_szword ? cpucfg_cache[l1_szword]: 0;
+-
+-  l2d_present |= cpucfg_cache[16] & 24;	      /* bit[4:3]: unified l2 cache */
+-  l2d_present |= cpucfg_cache[16] & 128;      /* bit[7:7]: l2 dcache */
+-  l2_szword = l2d_present ? cpucfg_cache[19]: 0;
+-
+-  loongarch_cpu_cache[CPU_NATIVE].l1d_line_size
+-    = 1 << ((l1_szword & 0x7f000000) >> 24);  /* bit[30:24]: log2(linesize) */
+-
+-  loongarch_cpu_cache[CPU_NATIVE].l1d_size
+-    = (1 << ((l1_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */
+-    * ((l1_szword & 0x0000ffff) + 1)	      /* bit[15:0]:  sets - 1 */
+-    * (1 << ((l1_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(linesize) */
+-    >> 10;				      /* in kilobytes */
+-
+-  loongarch_cpu_cache[CPU_NATIVE].l2d_size
+-    = (1 << ((l2_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */
+-    * ((l2_szword & 0x0000ffff) + 1)	      /* bit[15:0]:  sets - 1 */
+-    * (1 << ((l2_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(linesize) */
+-    >> 10;				      /* in kilobytes */
+-
+-  /* Fill: ret_cpu_type
++  /* Fill: tgt->cpu_arch | tgt->cpu_tune
+      With: processor ID (PRID)
+      At:   cpucfg_words[0][31:0] */
+ 
+   switch (cpucfg_cache[0] & 0x00ffff00)
+   {
+     case 0x0014c000:   /* LA464 */
+-      ret_cpu_type = CPU_LA464;
++      native_cpu_type = CPU_LA464;
+       break;
+ 
+     default:
+-      /* Unknown PRID.  This is generally harmless as long as
+-	 the properties above can be obtained via "cpucfg".  */
+-      if (p_tune_native)
++      /* Unknown PRID.  */
++      if (tune_native_p)
+ 	inform (UNKNOWN_LOCATION, "unknown processor ID %<0x%x%>, "
+ 		"some tuning parameters will fall back to default",
+ 		cpucfg_cache[0]);
+       break;
+   }
+ 
+-  /* Properties that cannot be looked up directly using cpucfg.  */
+-  loongarch_cpu_issue_rate[CPU_NATIVE]
+-    = loongarch_cpu_issue_rate[ret_cpu_type];
+-
+-  loongarch_cpu_multipass_dfa_lookahead[CPU_NATIVE]
+-    = loongarch_cpu_multipass_dfa_lookahead[ret_cpu_type];
+-
+-  loongarch_cpu_rtx_cost_data[CPU_NATIVE]
+-    = loongarch_cpu_rtx_cost_data[ret_cpu_type];
++  /* if -march=native */
++  if (arch_native_p)
++    {
++      int tmp;
++      tgt->cpu_arch = native_cpu_type;
++
++      /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].base
++	 With: base architecture (ARCH)
++	 At:   cpucfg_words[1][1:0] */
++
++      #define PRESET_ARCH (loongarch_cpu_default_isa[tgt->cpu_arch].base)
++      switch (cpucfg_cache[1] & 0x3)
++	{
++	  case 0x02:
++	    tmp = ISA_BASE_LA64V100;
++	    break;
++
++	  default:
++	    fatal_error (UNKNOWN_LOCATION,
++			 "unknown native base architecture %<0x%x%>, "
++			 "%qs failed", (unsigned int) (cpucfg_cache[1] & 0x3),
++			 "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE);
++	}
++
++      /* Check consistency with PRID presets.  */
++      if (native_cpu_type != CPU_NATIVE && tmp != PRESET_ARCH)
++	warning (0, "base architecture %qs differs from PRID preset %qs",
++		 loongarch_isa_base_strings[tmp],
++		 loongarch_isa_base_strings[PRESET_ARCH]);
++
++      /* Use the native value anyways.  */
++      PRESET_ARCH = tmp;
++
++      /* Fill: loongarch_cpu_default_isa[tgt->cpu_arch].fpu
++	 With: FPU type (FP, FP_SP, FP_DP)
++	 At:   cpucfg_words[2][2:0] */
++
++      #define PRESET_FPU (loongarch_cpu_default_isa[tgt->cpu_arch].fpu)
++      switch (cpucfg_cache[2] & 0x7)
++	{
++	  case 0x07:
++	    tmp = ISA_EXT_FPU64;
++	    break;
++
++	  case 0x03:
++	    tmp = ISA_EXT_FPU32;
++	    break;
++
++	  case 0x00:
++	    tmp = ISA_EXT_NONE;
++	    break;
++
++	  default:
++	    fatal_error (UNKNOWN_LOCATION,
++			 "unknown native FPU type %<0x%x%>, %qs failed",
++			 (unsigned int) (cpucfg_cache[2] & 0x7),
++			 "-m" OPTSTR_ARCH "=" STR_CPU_NATIVE);
++	}
++
++      /* Check consistency with PRID presets.  */
++      if (native_cpu_type != CPU_NATIVE && tmp != PRESET_FPU)
++	warning (0, "floating-point unit %qs differs from PRID preset %qs",
++		 loongarch_isa_ext_strings[tmp],
++		 loongarch_isa_ext_strings[PRESET_FPU]);
++
++      /* Use the native value anyways.  */
++      PRESET_FPU = tmp;
++
++
++      /* Fill: loongarch_cpu_default_isa[CPU_NATIVE].simd
++	 With: SIMD extension type (LSX, LASX)
++	 At:   cpucfg_words[2][7:6] */
++
++      #define PRESET_SIMD (loongarch_cpu_default_isa[tgt->cpu_arch].simd)
++      switch (cpucfg_cache[2] & 0xc0)
++	{
++	  case 0xc0:
++	    tmp = ISA_EXT_SIMD_LASX;
++	    break;
++
++	  case 0x40:
++	    tmp = ISA_EXT_SIMD_LSX;
++	    break;
++
++	  case 0x80:
++	    tmp = 0;
++	    warning (0, "unknown SIMD extension "
++			"(%qs disabled while %qs is enabled), disabling SIMD",
++			loongarch_isa_ext_strings[ISA_EXT_SIMD_LSX],
++			loongarch_isa_ext_strings[ISA_EXT_SIMD_LASX]);
++	    break;
++
++	  case 0x00:
++	    tmp = 0;
++	    break;
++	}
++
++      /* Check consistency with PRID presets.  */
++
++      /*
++      if (native_cpu_type != CPU_NATIVE && tmp != PRESET_SIMD)
++	warning (0, "SIMD extension %qs differs from PRID preset %qs",
++		 loongarch_isa_ext_strings[tmp],
++		 loongarch_isa_ext_strings[PRESET_SIMD]);
++      */
++
++      /* Use the native value anyways.  */
++      PRESET_SIMD = tmp;
++    }
+ 
+-  return ret_cpu_type;
++  if (tune_native_p)
++    {
++      tgt->cpu_tune = native_cpu_type;
++
++      /* Fill: loongarch_cpu_cache[tgt->cpu_tune]
++	 With: cache size info
++	 At:   cpucfg_words[16:20][31:0] */
++
++      #define PRESET_CACHE (loongarch_cpu_cache[tgt->cpu_tune])
++      struct loongarch_cache native_cache;
++      int l1d_present = 0, l1u_present = 0;
++      int l2d_present = 0;
++      uint32_t l1_szword, l2_szword;
++
++      l1u_present |= cpucfg_cache[16] & 3;	  /* bit[1:0]: unified l1 */
++      l1d_present |= cpucfg_cache[16] & 4;	  /* bit[2:2]: l1d */
++      l1_szword = l1d_present ? 18 : (l1u_present ? 17 : 0);
++      l1_szword = l1_szword ? cpucfg_cache[l1_szword]: 0;
++
++      l2d_present |= cpucfg_cache[16] & 24;	  /* bit[4:3]: unified l2 */
++      l2d_present |= cpucfg_cache[16] & 128;	  /* bit[7:7]: l2d */
++      l2_szword = l2d_present ? cpucfg_cache[19]: 0;
++
++      native_cache.l1d_line_size
++	= 1 << ((l1_szword & 0x7f000000) >> 24);  /* bit[30:24]: log2(line) */
++
++      native_cache.l1d_size
++	= (1 << ((l1_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */
++	* ((l1_szword & 0x0000ffff) + 1)	  /* bit[15:0]:  sets - 1 */
++	* (1 << ((l1_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(line) */
++	>> 10;					  /* in kibibytes */
++
++      native_cache.l2d_size
++	= (1 << ((l2_szword & 0x00ff0000) >> 16)) /* bit[23:16]: log2(idx) */
++	* ((l2_szword & 0x0000ffff) + 1)	  /* bit[15:0]:  sets - 1 */
++	* (1 << ((l2_szword & 0x7f000000) >> 24)) /* bit[30:24]: log2(linesz) */
++	>> 10;					  /* in kibibytes */
++
++      /* Use the native value anyways.  */
++      PRESET_CACHE.l1d_line_size = native_cache.l1d_line_size;
++      PRESET_CACHE.l1d_size = native_cache.l1d_size;
++      PRESET_CACHE.l2d_size = native_cache.l2d_size;
++    }
+ }
+diff --git a/gcc/config/loongarch/loongarch-cpu.h b/gcc/config/loongarch/loongarch-cpu.h
+index 93d656f70..eacb38774 100644
+--- a/gcc/config/loongarch/loongarch-cpu.h
++++ b/gcc/config/loongarch/loongarch-cpu.h
+@@ -21,9 +21,10 @@ along with GCC; see the file COPYING3.  If not see
+ #define LOONGARCH_CPU_H
+ 
+ #include "system.h"
++#include "loongarch-def.h"
+ 
+ void cache_cpucfg (void);
+-unsigned int fill_native_cpu_config (int p_arch_native, int p_tune_native);
++void fill_native_cpu_config (struct loongarch_target *tgt);
+ uint32_t get_native_prid (void);
+ const char* get_native_prid_str (void);
+ 
+diff --git a/gcc/config/loongarch/loongarch-def.c b/gcc/config/loongarch/loongarch-def.c
+index 74d422ce0..d29d5f001 100644
+--- a/gcc/config/loongarch/loongarch-def.c
++++ b/gcc/config/loongarch/loongarch-def.c
+@@ -21,25 +21,11 @@ along with GCC; see the file COPYING3.  If not see
+ #include "loongarch-def.h"
+ #include "loongarch-str.h"
+ 
+-/* Default RTX cost initializer.  */
+-#define COSTS_N_INSNS(N) ((N) * 4)
+-#define DEFAULT_COSTS				\
+-    .fp_add		= COSTS_N_INSNS (1),	\
+-    .fp_mult_sf		= COSTS_N_INSNS (2),	\
+-    .fp_mult_df		= COSTS_N_INSNS (4),	\
+-    .fp_div_sf		= COSTS_N_INSNS (6),	\
+-    .fp_div_df		= COSTS_N_INSNS (8),	\
+-    .int_mult_si	= COSTS_N_INSNS (1),	\
+-    .int_mult_di	= COSTS_N_INSNS (1),	\
+-    .int_div_si		= COSTS_N_INSNS (4),	\
+-    .int_div_di		= COSTS_N_INSNS (6),	\
+-    .branch_cost	= 2,			\
+-    .memory_latency	= 4
+-
+ /* CPU property tables.  */
+ const char*
+ loongarch_cpu_strings[N_TUNE_TYPES] = {
+   [CPU_NATIVE]		  = STR_CPU_NATIVE,
++  [CPU_ABI_DEFAULT]	  = STR_CPU_ABI_DEFAULT,
+   [CPU_LOONGARCH64]	  = STR_CPU_LOONGARCH64,
+   [CPU_LA464]		  = STR_CPU_LA464,
+ };
+@@ -49,10 +35,12 @@ loongarch_cpu_default_isa[N_ARCH_TYPES] = {
+   [CPU_LOONGARCH64] = {
+       .base = ISA_BASE_LA64V100,
+       .fpu = ISA_EXT_FPU64,
++      .simd = 0,
+   },
+   [CPU_LA464] = {
+       .base = ISA_BASE_LA64V100,
+       .fpu = ISA_EXT_FPU64,
++      .simd = ISA_EXT_SIMD_LASX,
+   },
+ };
+ 
+@@ -84,6 +72,22 @@ loongarch_cpu_align[N_TUNE_TYPES] = {
+   },
+ };
+ 
++
++/* Default RTX cost initializer.  */
++#define COSTS_N_INSNS(N) ((N) * 4)
++#define DEFAULT_COSTS				\
++    .fp_add		= COSTS_N_INSNS (1),	\
++    .fp_mult_sf		= COSTS_N_INSNS (2),	\
++    .fp_mult_df		= COSTS_N_INSNS (4),	\
++    .fp_div_sf		= COSTS_N_INSNS (6),	\
++    .fp_div_df		= COSTS_N_INSNS (8),	\
++    .int_mult_si	= COSTS_N_INSNS (1),	\
++    .int_mult_di	= COSTS_N_INSNS (1),	\
++    .int_div_si		= COSTS_N_INSNS (4),	\
++    .int_div_di		= COSTS_N_INSNS (6),	\
++    .branch_cost	= 2,			\
++    .memory_latency	= 4
++
+ /* The following properties cannot be looked up directly using "cpucfg".
+  So it is necessary to provide a default value for "unknown native"
+  tune targets (i.e. -mtune=native while PRID does not correspond to
+@@ -103,7 +107,7 @@ loongarch_cpu_rtx_cost_data[N_TUNE_TYPES] = {
+ };
+ 
+ /* RTX costs to use when optimizing for size.  */
+-extern const struct loongarch_rtx_cost_data
++const struct loongarch_rtx_cost_data
+ loongarch_rtx_cost_optimize_size = {
+     .fp_add	      = 4,
+     .fp_mult_sf	      = 4,
+@@ -144,9 +148,11 @@ loongarch_isa_base_strings[N_ISA_BASE_TYPES] = {
+ 
+ const char*
+ loongarch_isa_ext_strings[N_ISA_EXT_TYPES] = {
+-  [ISA_EXT_FPU64] = STR_ISA_EXT_FPU64,
++  [ISA_EXT_NONE] = STR_NONE,
+   [ISA_EXT_FPU32] = STR_ISA_EXT_FPU32,
+-  [ISA_EXT_NOFPU] = STR_ISA_EXT_NOFPU,
++  [ISA_EXT_FPU64] = STR_ISA_EXT_FPU64,
++  [ISA_EXT_SIMD_LSX] = STR_ISA_EXT_LSX,
++  [ISA_EXT_SIMD_LASX] = STR_ISA_EXT_LASX,
+ };
+ 
+ const char*
+@@ -171,24 +177,29 @@ loongarch_cmodel_strings[] = {
+   [CMODEL_EXTREME]	  = STR_CMODEL_EXTREME,
+ };
+ 
+-const char*
+-loongarch_switch_strings[] = {
+-  [SW_SOFT_FLOAT]	  = OPTSTR_SOFT_FLOAT,
+-  [SW_SINGLE_FLOAT]	  = OPTSTR_SINGLE_FLOAT,
+-  [SW_DOUBLE_FLOAT]	  = OPTSTR_DOUBLE_FLOAT,
+-};
+-
+ 
+ /* ABI-related definitions.  */
+ const struct loongarch_isa
+ abi_minimal_isa[N_ABI_BASE_TYPES][N_ABI_EXT_TYPES] = {
+   [ABI_BASE_LP64D] = {
+-      [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_FPU64},
++      [ABI_EXT_BASE] = {
++	  .base = ISA_BASE_LA64V100,
++	  .fpu = ISA_EXT_FPU64,
++	  .simd = 0
++      },
+   },
+   [ABI_BASE_LP64F] = {
+-      [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_FPU32},
++      [ABI_EXT_BASE] = {
++	  .base = ISA_BASE_LA64V100,
++	  .fpu = ISA_EXT_FPU32,
++	  .simd = 0
++      },
+   },
+   [ABI_BASE_LP64S] = {
+-      [ABI_EXT_BASE] = {.base = ISA_BASE_LA64V100, .fpu = ISA_EXT_NOFPU},
++      [ABI_EXT_BASE] = {
++	  .base = ISA_BASE_LA64V100,
++	  .fpu = ISA_EXT_NONE,
++	  .simd = 0
++      },
+   },
+ };
+diff --git a/gcc/config/loongarch/loongarch-def.h b/gcc/config/loongarch/loongarch-def.h
+index eb87a79a5..0aee7dc19 100644
+--- a/gcc/config/loongarch/loongarch-def.h
++++ b/gcc/config/loongarch/loongarch-def.h
+@@ -59,11 +59,13 @@ extern const char* loongarch_isa_base_strings[];
+ 
+ /* enum isa_ext_* */
+ extern const char* loongarch_isa_ext_strings[];
+-#define ISA_EXT_NOFPU	      0
++#define ISA_EXT_NONE	      0
+ #define ISA_EXT_FPU32	      1
+ #define ISA_EXT_FPU64	      2
+ #define N_ISA_EXT_FPU_TYPES   3
+-#define N_ISA_EXT_TYPES	      3
++#define ISA_EXT_SIMD_LSX      3
++#define ISA_EXT_SIMD_LASX     4
++#define N_ISA_EXT_TYPES	      5
+ 
+ /* enum abi_base */
+ extern const char* loongarch_abi_base_strings[];
+@@ -72,6 +74,16 @@ extern const char* loongarch_abi_base_strings[];
+ #define ABI_BASE_LP64S	      2
+ #define N_ABI_BASE_TYPES      3
+ 
++#define TO_LP64_ABI_BASE(C) (C)
++
++#define ABI_FPU_64(abi_base) \
++  (abi_base == ABI_BASE_LP64D)
++#define ABI_FPU_32(abi_base) \
++  (abi_base == ABI_BASE_LP64F)
++#define ABI_FPU_NONE(abi_base) \
++  (abi_base == ABI_BASE_LP64S)
++
++
+ /* enum abi_ext */
+ extern const char* loongarch_abi_ext_strings[];
+ #define ABI_EXT_BASE	      0
+@@ -87,55 +99,44 @@ extern const char* loongarch_cmodel_strings[];
+ #define CMODEL_EXTREME	      5
+ #define N_CMODEL_TYPES	      6
+ 
+-/* enum switches */
+-/* The "SW_" codes represent command-line switches (options that
+-   accept no parameters). Definition for other switches that affects
+-   the target ISA / ABI configuration will also be appended here
+-   in the future.  */
+-
+-extern const char* loongarch_switch_strings[];
+-#define SW_SOFT_FLOAT	      0
+-#define SW_SINGLE_FLOAT	      1
+-#define SW_DOUBLE_FLOAT	      2
+-#define N_SWITCH_TYPES	      3
+-
+ /* The common default value for variables whose assignments
+    are triggered by command-line options.  */
+ 
+-#define M_OPTION_NOT_SEEN -1
+-#define M_OPT_ABSENT(opt_enum)  ((opt_enum) == M_OPTION_NOT_SEEN)
++#define M_OPT_UNSET -1
++#define M_OPT_ABSENT(opt_enum)  ((opt_enum) == M_OPT_UNSET)
+ 
+ 
+ /* Internal representation of the target.  */
+ struct loongarch_isa
+ {
+-  unsigned char base;	    /* ISA_BASE_ */
+-  unsigned char fpu;	    /* ISA_EXT_FPU_ */
++  int base;	    /* ISA_BASE_ */
++  int fpu;	    /* ISA_EXT_FPU_ */
++  int simd;	    /* ISA_EXT_SIMD_ */
+ };
+ 
+ struct loongarch_abi
+ {
+-  unsigned char base;	    /* ABI_BASE_ */
+-  unsigned char ext;	    /* ABI_EXT_ */
++  int base;	    /* ABI_BASE_ */
++  int ext;	    /* ABI_EXT_ */
+ };
+ 
+ struct loongarch_target
+ {
+   struct loongarch_isa isa;
+   struct loongarch_abi abi;
+-  unsigned char cpu_arch;   /* CPU_ */
+-  unsigned char cpu_tune;   /* same */
+-  unsigned char cpu_native; /* same */
+-  unsigned char cmodel;	    /* CMODEL_ */
++  int cpu_arch;	    /* CPU_ */
++  int cpu_tune;	    /* same */
++  int cmodel;	    /* CMODEL_ */
+ };
+ 
+ /* CPU properties.  */
+ /* index */
+ #define CPU_NATIVE	  0
+-#define CPU_LOONGARCH64	  1
+-#define CPU_LA464	  2
+-#define N_ARCH_TYPES	  3
+-#define N_TUNE_TYPES	  3
++#define CPU_ABI_DEFAULT   1
++#define CPU_LOONGARCH64	  2
++#define CPU_LA464	  3
++#define N_ARCH_TYPES	  4
++#define N_TUNE_TYPES	  4
+ 
+ /* parallel tables.  */
+ extern const char* loongarch_cpu_strings[];
+diff --git a/gcc/config/loongarch/loongarch-driver.cc b/gcc/config/loongarch/loongarch-driver.cc
+index 0adcc923b..b3626984d 100644
+--- a/gcc/config/loongarch/loongarch-driver.cc
++++ b/gcc/config/loongarch/loongarch-driver.cc
+@@ -26,122 +26,137 @@ along with GCC; see the file COPYING3.  If not see
+ #include "tm.h"
+ #include "obstack.h"
+ #include "diagnostic-core.h"
++#include "opts.h"
+ 
+ #include "loongarch-opts.h"
+ #include "loongarch-driver.h"
+ 
+-static int
+-  opt_arch_driver = M_OPTION_NOT_SEEN,
+-  opt_tune_driver = M_OPTION_NOT_SEEN,
+-  opt_fpu_driver = M_OPTION_NOT_SEEN,
+-  opt_abi_base_driver = M_OPTION_NOT_SEEN,
+-  opt_abi_ext_driver = M_OPTION_NOT_SEEN,
+-  opt_cmodel_driver = M_OPTION_NOT_SEEN;
+-
+-int opt_switches = 0;
+-
+ /* This flag is set to 1 if we believe that the user might be avoiding
+    linking (implicitly) against something from the startfile search paths.  */
+ static int no_link = 0;
+ 
+-#define LARCH_DRIVER_SET_M_FLAG(OPTS_ARRAY, N_OPTS, FLAG, STR)	\
+-  for (int i = 0; i < (N_OPTS); i++)				\
+-  {								\
+-    if ((OPTS_ARRAY)[i] != 0)					\
+-      if (strcmp ((STR), (OPTS_ARRAY)[i]) == 0)			\
+-	(FLAG) = i;						\
+-  }
+-
+ /* Use the public obstack from the gcc driver (defined in gcc.c).
+    This is for allocating space for the returned string.  */
+ extern struct obstack opts_obstack;
+ 
+-#define APPEND_LTR(S)				      \
+-  obstack_grow (&opts_obstack, (const void*) (S),     \
+-		sizeof ((S)) / sizeof (char) -1)
+-
+-#define APPEND_VAL(S) \
+-  obstack_grow (&opts_obstack, (const void*) (S), strlen ((S)))
++const char*
++la_driver_init (int argc ATTRIBUTE_UNUSED, const char **argv ATTRIBUTE_UNUSED)
++{
++  /* Initialize all fields of la_target to -1 */
++  loongarch_init_target (&la_target, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET,
++			 M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET, M_OPT_UNSET);
++  return "";
++}
+ 
++const char*
++driver_set_no_link (int argc ATTRIBUTE_UNUSED,
++		    const char **argv ATTRIBUTE_UNUSED)
++{
++  no_link = 1;
++  return "";
++}
+ 
+ const char*
+-driver_set_m_flag (int argc, const char **argv)
++driver_set_m_parm (int argc, const char **argv)
+ {
+-  int parm_off = 0;
++  gcc_assert (argc == 2);
++
++#define LARCH_DRIVER_PARSE_PARM(OPT_IDX, NAME, OPTSTR_LIST, \
++				OPT_IDX_LO, OPT_IDX_HI)	    \
++  if (strcmp (argv[0], OPTSTR_##NAME) == 0)		    \
++    for (int i = (OPT_IDX_LO); i < (OPT_IDX_HI); i++)	    \
++    {							    \
++      if ((OPTSTR_LIST)[i] != 0)			    \
++	if (strcmp (argv[1], (OPTSTR_LIST)[i]) == 0)	    \
++	  {						    \
++	    (OPT_IDX) = i;				    \
++	    return 0;					    \
++	  }						    \
++    }
+ 
+-  if (argc != 1)
+-    return "%eset_m_flag requires exactly 1 argument.";
++  LARCH_DRIVER_PARSE_PARM (la_target.abi.base, ABI_BASE, \
++			   loongarch_abi_base_strings, 0, N_ABI_BASE_TYPES)
+ 
+-#undef PARM
+-#define PARM (argv[0] + parm_off)
++  LARCH_DRIVER_PARSE_PARM (la_target.isa.fpu, ISA_EXT_FPU, \
++			   loongarch_isa_ext_strings, 0, N_ISA_EXT_FPU_TYPES)
+ 
+-/* Note: sizeof (OPTSTR_##NAME) equals the length of "